summaryrefslogtreecommitdiff
path: root/scripts/gdb/linux/stackdepot.py
diff options
context:
space:
mode:
Diffstat (limited to 'scripts/gdb/linux/stackdepot.py')
0 files changed, 0 insertions, 0 deletions
d' style='width: 0.3%;'/> -rw-r--r--Documentation/ABI/testing/sysfs-bus-iio-frequency-admv101338
-rw-r--r--Documentation/ABI/testing/sysfs-bus-usb9
-rw-r--r--Documentation/ABI/testing/sysfs-bus-vdpa57
-rw-r--r--Documentation/ABI/testing/sysfs-class-power1
-rw-r--r--Documentation/ABI/testing/sysfs-devices-system-cpu15
-rw-r--r--Documentation/ABI/testing/sysfs-driver-aspeed-uart-routing6
-rw-r--r--Documentation/ABI/testing/sysfs-fs-f2fs12
-rw-r--r--Documentation/accounting/delay-accounting.rst55
-rw-r--r--Documentation/accounting/psi.rst3
-rw-r--r--Documentation/admin-guide/cgroup-v1/hugetlb.rst4
-rw-r--r--Documentation/admin-guide/cgroup-v2.rst11
-rw-r--r--Documentation/admin-guide/cputopology.rst25
-rw-r--r--Documentation/admin-guide/gpio/index.rst1
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt20
-rw-r--r--Documentation/admin-guide/mm/damon/reclaim.rst25
-rw-r--r--Documentation/admin-guide/mm/damon/usage.rst225
-rw-r--r--Documentation/admin-guide/mm/numa_memory_policy.rst16
-rw-r--r--Documentation/admin-guide/sysctl/vm.rst2
-rw-r--r--Documentation/arm/marvell.rst2
-rw-r--r--Documentation/arm64/silicon-errata.rst14
-rw-r--r--Documentation/block/index.rst1
-rw-r--r--Documentation/block/queue-sysfs.rst321
-rw-r--r--Documentation/core-api/kernel-api.rst4
-rw-r--r--Documentation/core-api/kobject.rst16
-rw-r--r--Documentation/dev-tools/kselftest.rst8
-rw-r--r--Documentation/dev-tools/kunit/usage.rst2
-rw-r--r--Documentation/devicetree/bindings/Makefile4
-rw-r--r--Documentation/devicetree/bindings/arm/arm,cci-400.yaml10
-rw-r--r--Documentation/devicetree/bindings/arm/arm-dsu-pmu.txt27
-rw-r--r--Documentation/devicetree/bindings/arm/cpus.yaml7
-rw-r--r--Documentation/devicetree/bindings/arm/mediatek/mediatek,apmixedsys.txt1
-rw-r--r--Documentation/devicetree/bindings/arm/mediatek/mediatek,ethsys.txt1
-rw-r--r--Documentation/devicetree/bindings/arm/mediatek/mediatek,infracfg.txt1
-rw-r--r--Documentation/devicetree/bindings/arm/mediatek/mediatek,sgmiisys.txt2
-rw-r--r--Documentation/devicetree/bindings/arm/mediatek/mediatek,topckgen.txt1
-rw-r--r--Documentation/devicetree/bindings/arm/omap/omap.txt3
-rw-r--r--Documentation/devicetree/bindings/arm/pmu.yaml8
-rw-r--r--Documentation/devicetree/bindings/arm/ux500.yaml30
-rw-r--r--Documentation/devicetree/bindings/arm/xen.txt14
-rw-r--r--Documentation/devicetree/bindings/ata/brcm,sata-brcm.txt45
-rw-r--r--Documentation/devicetree/bindings/ata/brcm,sata-brcm.yaml90
-rw-r--r--Documentation/devicetree/bindings/bus/brcm,gisb-arb.txt34
-rw-r--r--Documentation/devicetree/bindings/bus/brcm,gisb-arb.yaml66
-rw-r--r--Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-ccu.yaml4
-rw-r--r--Documentation/devicetree/bindings/clock/exynos5260-clock.txt190
-rw-r--r--Documentation/devicetree/bindings/clock/exynos5410-clock.txt50
-rw-r--r--Documentation/devicetree/bindings/clock/exynos5433-clock.txt507
-rw-r--r--Documentation/devicetree/bindings/clock/exynos7-clock.txt108
-rw-r--r--Documentation/devicetree/bindings/clock/imx5-clock.yaml9
-rw-r--r--Documentation/devicetree/bindings/clock/microchip,lan966x-gck.yaml60
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,gcc-msm8976.yaml97
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,rpmhcc.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/renesas,cpg-mssr.yaml1
-rw-r--r--Documentation/devicetree/bindings/clock/samsung,exynos5260-clock.yaml382
-rw-r--r--Documentation/devicetree/bindings/clock/samsung,exynos5410-clock.yaml66
-rw-r--r--Documentation/devicetree/bindings/clock/samsung,exynos5433-clock.yaml524
-rw-r--r--Documentation/devicetree/bindings/clock/samsung,exynos7-clock.yaml272
-rw-r--r--Documentation/devicetree/bindings/clock/samsung,exynos7885-clock.yaml166
-rw-r--r--Documentation/devicetree/bindings/clock/samsung,exynos850-clock.yaml38
-rw-r--r--Documentation/devicetree/bindings/clock/samsung,s5pv210-clock.txt77
-rw-r--r--Documentation/devicetree/bindings/clock/samsung,s5pv210-clock.yaml79
-rw-r--r--Documentation/devicetree/bindings/clock/toshiba,tmpv770x-pipllct.yaml57
-rw-r--r--Documentation/devicetree/bindings/clock/toshiba,tmpv770x-pismu.yaml52
-rw-r--r--Documentation/devicetree/bindings/crypto/qcom,prng.txt19
-rw-r--r--Documentation/devicetree/bindings/crypto/qcom,prng.yaml43
-rw-r--r--Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.yaml5
-rw-r--r--Documentation/devicetree/bindings/display/amlogic,meson-vpu.yaml6
-rw-r--r--Documentation/devicetree/bindings/display/bridge/analogix,anx7814.yaml4
-rw-r--r--Documentation/devicetree/bindings/display/bridge/google,cros-ec-anx7688.yaml1
-rw-r--r--Documentation/devicetree/bindings/display/bridge/ps8640.yaml1
-rw-r--r--Documentation/devicetree/bindings/display/msm/dpu-sdm845.yaml5
-rw-r--r--Documentation/devicetree/bindings/display/panel/abt,y030xx067a.yaml5
-rw-r--r--Documentation/devicetree/bindings/display/panel/ilitek,ili9322.yaml4
-rw-r--r--Documentation/devicetree/bindings/display/panel/innolux,ej030na.yaml5
-rw-r--r--Documentation/devicetree/bindings/display/panel/jdi,lt070me05000.yaml2
-rw-r--r--Documentation/devicetree/bindings/display/panel/kingdisplay,kd035g6-54nt.yaml5
-rw-r--r--Documentation/devicetree/bindings/display/panel/lgphilips,lb035q02.yaml5
-rw-r--r--Documentation/devicetree/bindings/display/panel/novatek,nt36672a.yaml4
-rw-r--r--Documentation/devicetree/bindings/display/panel/samsung,ld9040.yaml7
-rw-r--r--Documentation/devicetree/bindings/display/panel/samsung,s6e63m0.yaml1
-rw-r--r--Documentation/devicetree/bindings/display/panel/sitronix,st7789v.yaml5
-rw-r--r--Documentation/devicetree/bindings/display/panel/sony,acx565akm.yaml5
-rw-r--r--Documentation/devicetree/bindings/display/panel/tpo,td.yaml5
-rw-r--r--Documentation/devicetree/bindings/display/rockchip/rockchip,rk3066-hdmi.yaml8
-rw-r--r--Documentation/devicetree/bindings/display/simple-framebuffer.yaml12
-rw-r--r--Documentation/devicetree/bindings/display/st,stm32-dsi.yaml3
-rw-r--r--Documentation/devicetree/bindings/dma/arm,pl330.yaml83
-rw-r--r--Documentation/devicetree/bindings/dma/arm-pl08x.yaml4
-rw-r--r--Documentation/devicetree/bindings/dma/arm-pl330.txt49
-rw-r--r--Documentation/devicetree/bindings/dma/dma-controller.yaml8
-rw-r--r--Documentation/devicetree/bindings/dma/ingenic,dma.yaml42
-rw-r--r--Documentation/devicetree/bindings/dma/renesas,rcar-dmac.yaml5
-rw-r--r--Documentation/devicetree/bindings/dma/sifive,fu540-c000-pdma.yaml2
-rw-r--r--Documentation/devicetree/bindings/dma/snps,dw-axi-dmac.yaml3
-rw-r--r--Documentation/devicetree/bindings/dma/ti/k3-bcdma.yaml1
-rw-r--r--Documentation/devicetree/bindings/dma/ti/k3-pktdma.yaml1
-rw-r--r--Documentation/devicetree/bindings/eeprom/at24.yaml29
-rw-r--r--Documentation/devicetree/bindings/extcon/extcon-usbc-cros-ec.yaml1
-rw-r--r--Documentation/devicetree/bindings/gpio/brcm,brcmstb-gpio.txt83
-rw-r--r--Documentation/devicetree/bindings/gpio/brcm,brcmstb-gpio.yaml104
-rw-r--r--Documentation/devicetree/bindings/gpio/toshiba,gpio-visconti.yaml1
-rw-r--r--Documentation/devicetree/bindings/gpu/arm,mali-bifrost.yaml45
-rw-r--r--Documentation/devicetree/bindings/i2c/aspeed,i2c.yaml1
-rw-r--r--Documentation/devicetree/bindings/i2c/brcm,bcm2835-i2c.txt22
-rw-r--r--Documentation/devicetree/bindings/i2c/brcm,bcm2835-i2c.yaml54
-rw-r--r--Documentation/devicetree/bindings/i2c/google,cros-ec-i2c-tunnel.yaml1
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-exynos5.txt53
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-exynos5.yaml133
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-gate.yaml2
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-imx-lpi2c.yaml2
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-mux-gpio.txt80
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-mux-gpio.yaml104
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-mux-pinctrl.txt93
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-mux-pinctrl.yaml103
-rw-r--r--Documentation/devicetree/bindings/i2c/nvidia,tegra186-bpmp-i2c.txt42
-rw-r--r--Documentation/devicetree/bindings/i2c/nvidia,tegra186-bpmp-i2c.yaml45
-rw-r--r--Documentation/devicetree/bindings/i2c/nvidia,tegra20-i2c.txt87
-rw-r--r--Documentation/devicetree/bindings/i2c/nvidia,tegra20-i2c.yaml192
-rw-r--r--Documentation/devicetree/bindings/i2c/st,stm32-i2c.yaml6
-rw-r--r--Documentation/devicetree/bindings/iio/adc/fsl,vf610-adc.yaml2
-rw-r--r--Documentation/devicetree/bindings/iio/adc/x-powers,axp209-adc.yaml3
-rw-r--r--Documentation/devicetree/bindings/iio/adc/xlnx,zynqmp-ams.yaml227
-rw-r--r--Documentation/devicetree/bindings/iio/addac/adi,ad74413r.yaml158
-rw-r--r--Documentation/devicetree/bindings/iio/dac/adi,ad3552r.yaml217
-rw-r--r--Documentation/devicetree/bindings/iio/dac/adi,ad5755.yaml1
-rw-r--r--Documentation/devicetree/bindings/iio/dac/adi,ad7293.yaml61
-rw-r--r--Documentation/devicetree/bindings/iio/filter/adi,admv8818.yaml66
-rw-r--r--Documentation/devicetree/bindings/iio/frequency/adi,admv1013.yaml91
-rw-r--r--Documentation/devicetree/bindings/iio/gyroscope/invensense,mpu3050.yaml2
-rw-r--r--Documentation/devicetree/bindings/iio/imu/invensense,mpu6050.yaml2
-rw-r--r--Documentation/devicetree/bindings/iio/imu/st,lsm6dsx.yaml7
-rw-r--r--Documentation/devicetree/bindings/iio/light/liteon,ltr501.yaml7
-rw-r--r--Documentation/devicetree/bindings/iio/magnetometer/yamaha,yas530.yaml2
-rw-r--r--Documentation/devicetree/bindings/iio/proximity/google,cros-ec-mkbp-proximity.yaml1
-rw-r--r--Documentation/devicetree/bindings/iio/temperature/adi,ltc2983.yaml22
-rw-r--r--Documentation/devicetree/bindings/input/google,cros-ec-keyb.yaml1
-rw-r--r--Documentation/devicetree/bindings/input/gpio-keys.yaml6
-rw-r--r--Documentation/devicetree/bindings/input/pwm-vibrator.txt66
-rw-r--r--Documentation/devicetree/bindings/input/pwm-vibrator.yaml57
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/zinitix,bt400.yaml115
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/zinitix.txt40
-rw-r--r--Documentation/devicetree/bindings/interconnect/qcom,osm-l3.yaml1
-rw-r--r--Documentation/devicetree/bindings/interconnect/qcom,qcm2290.yaml137
-rw-r--r--Documentation/devicetree/bindings/interconnect/qcom,rpm.yaml143
-rw-r--r--Documentation/devicetree/bindings/interconnect/qcom,rpmh.yaml11
-rw-r--r--Documentation/devicetree/bindings/interconnect/qcom,sdm660.yaml185
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.yaml3
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/brcm,bcm3380-l2-intc.txt39
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/brcm,bcm7038-l1-intc.txt61
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/brcm,bcm7038-l1-intc.yaml91
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/brcm,bcm7120-l2-intc.txt88
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/brcm,bcm7120-l2-intc.yaml151
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/brcm,l2-intc.txt31
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/brcm,l2-intc.yaml72
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.yaml1
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/sifive,plic-1.0.0.yaml33
-rw-r--r--Documentation/devicetree/bindings/iommu/arm,smmu.yaml2
-rw-r--r--Documentation/devicetree/bindings/leds/leds-bcm6328.txt319
-rw-r--r--Documentation/devicetree/bindings/leds/leds-bcm6328.yaml404
-rw-r--r--Documentation/devicetree/bindings/leds/leds-mt6360.yaml159
-rw-r--r--Documentation/devicetree/bindings/mailbox/apple,mailbox.yaml12
-rw-r--r--Documentation/devicetree/bindings/mailbox/ti,omap-mailbox.yaml9
-rw-r--r--Documentation/devicetree/bindings/media/i2c/maxim,max96712.yaml111
-rw-r--r--Documentation/devicetree/bindings/media/nxp,imx7-mipi-csi2.yaml12
-rw-r--r--Documentation/devicetree/bindings/media/nxp,imx8mq-mipi-csi2.yaml12
-rw-r--r--Documentation/devicetree/bindings/memory-controllers/ti,gpmc.yaml5
-rw-r--r--Documentation/devicetree/bindings/mfd/cirrus,madera.yaml4
-rw-r--r--Documentation/devicetree/bindings/mfd/google,cros-ec.yaml1
-rw-r--r--Documentation/devicetree/bindings/mips/loongson/ls2k-reset.yaml38
-rw-r--r--Documentation/devicetree/bindings/mmc/arm,pl18x.yaml6
-rw-r--r--Documentation/devicetree/bindings/mux/gpio-mux.yaml11
-rw-r--r--Documentation/devicetree/bindings/mux/mux-consumer.yaml21
-rw-r--r--Documentation/devicetree/bindings/mux/mux-controller.yaml26
-rw-r--r--Documentation/devicetree/bindings/net/actions,owl-emac.yaml4
-rw-r--r--Documentation/devicetree/bindings/net/allwinner,sun8i-a83t-emac.yaml25
-rw-r--r--Documentation/devicetree/bindings/net/brcm,amac.txt30
-rw-r--r--Documentation/devicetree/bindings/net/brcm,amac.yaml88
-rw-r--r--Documentation/devicetree/bindings/net/brcm,bcm6368-mdio-mux.yaml26
-rw-r--r--Documentation/devicetree/bindings/net/brcm,bcmgenet.txt125
-rw-r--r--Documentation/devicetree/bindings/net/brcm,bcmgenet.yaml145
-rw-r--r--Documentation/devicetree/bindings/net/brcm,mdio-mux-iproc.txt62
-rw-r--r--Documentation/devicetree/bindings/net/brcm,mdio-mux-iproc.yaml80
-rw-r--r--Documentation/devicetree/bindings/net/brcm,systemport.txt38
-rw-r--r--Documentation/devicetree/bindings/net/brcm,systemport.yaml86
-rw-r--r--Documentation/devicetree/bindings/net/brcm,unimac-mdio.yaml3
-rw-r--r--Documentation/devicetree/bindings/net/can/bosch,m_can.yaml52
-rw-r--r--Documentation/devicetree/bindings/net/can/tcan4x5x.txt2
-rw-r--r--Documentation/devicetree/bindings/net/cdns,macb.yaml159
-rw-r--r--Documentation/devicetree/bindings/net/dsa/nxp,sja1105.yaml6
-rw-r--r--Documentation/devicetree/bindings/net/dsa/qca8k.yaml23
-rw-r--r--Documentation/devicetree/bindings/net/ethernet-controller.yaml54
-rw-r--r--Documentation/devicetree/bindings/net/fsl,fec.yaml3
-rw-r--r--Documentation/devicetree/bindings/net/fsl-fman.txt9
-rw-r--r--Documentation/devicetree/bindings/net/intel,dwmac-plat.yaml2
-rw-r--r--Documentation/devicetree/bindings/net/intel,ixp4xx-ethernet.yaml4
-rw-r--r--Documentation/devicetree/bindings/net/lantiq,etop-xway.yaml1
-rw-r--r--Documentation/devicetree/bindings/net/lantiq,xrx200-net.yaml1
-rw-r--r--Documentation/devicetree/bindings/net/litex,liteeth.yaml1
-rw-r--r--Documentation/devicetree/bindings/net/macb.txt60
-rw-r--r--Documentation/devicetree/bindings/net/mdio-mux.yaml7
-rw-r--r--Documentation/devicetree/bindings/net/mdio.yaml8
-rw-r--r--Documentation/devicetree/bindings/net/mediatek,star-emac.yaml5
-rw-r--r--Documentation/devicetree/bindings/net/oxnas-dwmac.txt3
-rw-r--r--Documentation/devicetree/bindings/net/qca,ar71xx.yaml16
-rw-r--r--Documentation/devicetree/bindings/net/qcom,ipa.yaml6
-rw-r--r--Documentation/devicetree/bindings/net/snps,dwmac.yaml6
-rw-r--r--Documentation/devicetree/bindings/net/socionext,uniphier-ave4.yaml1
-rw-r--r--Documentation/devicetree/bindings/net/stm32-dwmac.yaml10
-rw-r--r--Documentation/devicetree/bindings/net/ti,davinci-mdio.yaml7
-rw-r--r--Documentation/devicetree/bindings/net/ti,dp83869.yaml2
-rw-r--r--Documentation/devicetree/bindings/net/toshiba,visconti-dwmac.yaml2
-rw-r--r--Documentation/devicetree/bindings/net/wireless/mediatek,mt76.yaml4
-rw-r--r--Documentation/devicetree/bindings/nvmem/brcm,nvram.yaml3
-rw-r--r--Documentation/devicetree/bindings/nvmem/mtk-efuse.txt2
-rw-r--r--Documentation/devicetree/bindings/nvmem/nvmem.yaml17
-rw-r--r--Documentation/devicetree/bindings/nvmem/rmem.yaml3
-rw-r--r--Documentation/devicetree/bindings/nvmem/st,stm32-romem.yaml3
-rw-r--r--Documentation/devicetree/bindings/pci/brcm,iproc-pcie.txt133
-rw-r--r--Documentation/devicetree/bindings/pci/brcm,iproc-pcie.yaml184
-rw-r--r--Documentation/devicetree/bindings/pci/brcm,stb-pcie.yaml30
-rw-r--r--Documentation/devicetree/bindings/pci/cdns,cdns-pcie-ep.yaml1
-rw-r--r--Documentation/devicetree/bindings/pci/cdns-pcie-ep.yaml1
-rw-r--r--Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.yaml6
-rw-r--r--Documentation/devicetree/bindings/pci/hisilicon,kirin-pcie.yaml13
-rw-r--r--Documentation/devicetree/bindings/pci/mediatek,mt7621-pcie.yaml3
-rw-r--r--Documentation/devicetree/bindings/pci/mediatek-pcie-gen3.yaml4
-rw-r--r--Documentation/devicetree/bindings/pci/microchip,pcie-host.yaml18
-rw-r--r--Documentation/devicetree/bindings/pci/sifive,fu740-pcie.yaml5
-rw-r--r--Documentation/devicetree/bindings/pci/snps,dw-pcie-ep.yaml6
-rw-r--r--Documentation/devicetree/bindings/pci/snps,dw-pcie.yaml2
-rw-r--r--Documentation/devicetree/bindings/pci/socionext,uniphier-pcie-ep.yaml2
-rw-r--r--Documentation/devicetree/bindings/pci/ti,am65-pci-ep.yaml10
-rw-r--r--Documentation/devicetree/bindings/pci/ti,am65-pci-host.yaml20
-rw-r--r--Documentation/devicetree/bindings/pci/ti,j721e-pci-host.yaml2
-rw-r--r--Documentation/devicetree/bindings/pci/xilinx-versal-cpm.yaml1
-rw-r--r--Documentation/devicetree/bindings/perf/arm,dsu-pmu.yaml45
-rw-r--r--Documentation/devicetree/bindings/phy/amlogic,meson8-hdmi-tx-phy.yaml65
-rw-r--r--Documentation/devicetree/bindings/phy/brcm,cygnus-pcie-phy.txt47
-rw-r--r--Documentation/devicetree/bindings/phy/brcm,cygnus-pcie-phy.yaml76
-rw-r--r--Documentation/devicetree/bindings/phy/brcm,mdio-mux-bus-pci.txt27
-rw-r--r--Documentation/devicetree/bindings/phy/brcm,ns2-pcie-phy.yaml41
-rw-r--r--Documentation/devicetree/bindings/phy/fsl,imx8-pcie-phy.yaml92
-rw-r--r--Documentation/devicetree/bindings/phy/intel,phy-thunderbay-emmc.yaml46
-rw-r--r--Documentation/devicetree/bindings/phy/mediatek,tphy.yaml18
-rw-r--r--Documentation/devicetree/bindings/phy/microchip,lan966x-serdes.yaml59
-rw-r--r--Documentation/devicetree/bindings/phy/phy-cadence-sierra.yaml9
-rw-r--r--Documentation/devicetree/bindings/phy/phy-cadence-torrent.yaml4
-rw-r--r--Documentation/devicetree/bindings/phy/phy-rockchip-inno-usb2.yaml44
-rw-r--r--Documentation/devicetree/bindings/phy/phy-tegra194-p2u.txt28
-rw-r--r--Documentation/devicetree/bindings/phy/phy-tegra194-p2u.yaml44
-rw-r--r--Documentation/devicetree/bindings/phy/qcom,edp-phy.yaml67
-rw-r--r--Documentation/devicetree/bindings/phy/qcom,qmp-phy.yaml6
-rw-r--r--Documentation/devicetree/bindings/phy/qcom,qusb2-phy.yaml1
-rw-r--r--Documentation/devicetree/bindings/phy/qcom,usb-snps-femto-v2.yaml1
-rw-r--r--Documentation/devicetree/bindings/phy/socionext,uniphier-ahci-phy.yaml21
-rw-r--r--Documentation/devicetree/bindings/phy/socionext,uniphier-pcie-phy.yaml1
-rw-r--r--Documentation/devicetree/bindings/phy/socionext,uniphier-usb3hs-phy.yaml1
-rw-r--r--Documentation/devicetree/bindings/phy/socionext,uniphier-usb3ss-phy.yaml1
-rw-r--r--Documentation/devicetree/bindings/pinctrl/actions,s500-pinctrl.yaml3
-rw-r--r--Documentation/devicetree/bindings/pinctrl/allwinner,sun4i-a10-pinctrl.yaml1
-rw-r--r--Documentation/devicetree/bindings/pinctrl/apple,pinctrl.yaml3
-rw-r--r--Documentation/devicetree/bindings/pinctrl/aspeed,ast2400-pinctrl.yaml3
-rw-r--r--Documentation/devicetree/bindings/pinctrl/aspeed,ast2500-pinctrl.yaml3
-rw-r--r--Documentation/devicetree/bindings/pinctrl/aspeed,ast2600-pinctrl.yaml3
-rw-r--r--Documentation/devicetree/bindings/pinctrl/brcm,bcm6318-pinctrl.yaml3
-rw-r--r--Documentation/devicetree/bindings/pinctrl/brcm,bcm63268-pinctrl.yaml3
-rw-r--r--Documentation/devicetree/bindings/pinctrl/brcm,bcm6328-pinctrl.yaml3
-rw-r--r--Documentation/devicetree/bindings/pinctrl/brcm,bcm6358-pinctrl.yaml3
-rw-r--r--Documentation/devicetree/bindings/pinctrl/brcm,bcm6362-pinctrl.yaml3
-rw-r--r--Documentation/devicetree/bindings/pinctrl/brcm,bcm6368-pinctrl.yaml3
-rw-r--r--Documentation/devicetree/bindings/pinctrl/brcm,ns-pinmux.yaml3
-rw-r--r--Documentation/devicetree/bindings/pinctrl/canaan,k210-fpioa.yaml7
-rw-r--r--Documentation/devicetree/bindings/pinctrl/cirrus,lochnagar.yaml12
-rw-r--r--Documentation/devicetree/bindings/pinctrl/cirrus,madera.yaml13
-rw-r--r--Documentation/devicetree/bindings/pinctrl/fsl,imx8mm-pinctrl.yaml3
-rw-r--r--Documentation/devicetree/bindings/pinctrl/fsl,imx8mn-pinctrl.yaml3
-rw-r--r--Documentation/devicetree/bindings/pinctrl/fsl,imx8mp-pinctrl.yaml3
-rw-r--r--Documentation/devicetree/bindings/pinctrl/fsl,imx8mq-pinctrl.yaml3
-rw-r--r--Documentation/devicetree/bindings/pinctrl/fsl,imx8ulp-pinctrl.yaml3
-rw-r--r--Documentation/devicetree/bindings/pinctrl/fsl,imxrt1050.yaml79
-rw-r--r--Documentation/devicetree/bindings/pinctrl/ingenic,pinctrl.yaml8
-rw-r--r--Documentation/devicetree/bindings/pinctrl/intel,lgm-io.yaml3
-rw-r--r--Documentation/devicetree/bindings/pinctrl/intel,pinctrl-thunderbay.yaml119
-rw-r--r--Documentation/devicetree/bindings/pinctrl/mediatek,mt65xx-pinctrl.yaml3
-rw-r--r--Documentation/devicetree/bindings/pinctrl/mediatek,mt6779-pinctrl.yaml3
-rw-r--r--Documentation/devicetree/bindings/pinctrl/mediatek,mt6797-pinctrl.yaml3
-rw-r--r--Documentation/devicetree/bindings/pinctrl/mediatek,mt7622-pinctrl.yaml3
-rw-r--r--Documentation/devicetree/bindings/pinctrl/mediatek,mt7986-pinctrl.yaml3
-rw-r--r--Documentation/devicetree/bindings/pinctrl/mediatek,mt8183-pinctrl.yaml3
-rw-r--r--Documentation/devicetree/bindings/pinctrl/mscc,ocelot-pinctrl.txt3
-rw-r--r--Documentation/devicetree/bindings/pinctrl/pincfg-node.yaml3
-rw-r--r--Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt41
-rw-r--r--Documentation/devicetree/bindings/pinctrl/pinctrl-mt8192.yaml3
-rw-r--r--Documentation/devicetree/bindings/pinctrl/pinctrl-mt8195.yaml341
-rw-r--r--Documentation/devicetree/bindings/pinctrl/pinctrl.yaml45
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,ipq6018-pinctrl.yaml3
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,lpass-lpi-pinctrl.yaml3
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,mdm9607-pinctrl.yaml1
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,msm8226-pinctrl.yaml3
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,msm8953-pinctrl.yaml3
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.yaml3
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,qcm2290-pinctrl.yaml3
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,sc7280-pinctrl.yaml3
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,sc8180x-pinctrl.yaml1
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,sdx55-pinctrl.yaml3
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,sdx65-pinctrl.yaml191
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,sm6115-pinctrl.yaml3
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,sm6125-pinctrl.yaml1
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,sm6350-pinctrl.yaml1
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,sm8250-pinctrl.yaml3
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,sm8350-pinctrl.yaml1
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,sm8450-pinctrl.yaml143
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,tlmm-common.yaml3
-rw-r--r--Documentation/devicetree/bindings/pinctrl/ralink,rt2880-pinmux.yaml3
-rw-r--r--Documentation/devicetree/bindings/pinctrl/renesas,pfc.yaml3
-rw-r--r--Documentation/devicetree/bindings/pinctrl/renesas,rza1-ports.yaml3
-rw-r--r--Documentation/devicetree/bindings/pinctrl/renesas,rza2-pinctrl.yaml3
-rw-r--r--Documentation/devicetree/bindings/pinctrl/renesas,rzg2l-pinctrl.yaml5
-rw-r--r--Documentation/devicetree/bindings/pinctrl/renesas,rzn1-pinctrl.yaml3
-rw-r--r--Documentation/devicetree/bindings/pinctrl/rockchip,pinctrl.yaml3
-rw-r--r--Documentation/devicetree/bindings/pinctrl/samsung-pinctrl.txt1
-rw-r--r--Documentation/devicetree/bindings/pinctrl/socionext,uniphier-pinctrl.yaml44
-rw-r--r--Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.yaml3
-rw-r--r--Documentation/devicetree/bindings/pinctrl/toshiba,visconti-pinctrl.yaml5
-rw-r--r--Documentation/devicetree/bindings/pinctrl/xlnx,zynq-pinctrl.yaml3
-rw-r--r--Documentation/devicetree/bindings/pinctrl/xlnx,zynqmp-pinctrl.yaml3
-rw-r--r--Documentation/devicetree/bindings/power/reset/gpio-restart.txt54
-rw-r--r--Documentation/devicetree/bindings/power/reset/gpio-restart.yaml86
-rw-r--r--Documentation/devicetree/bindings/power/supply/maxim,max17040.yaml4
-rw-r--r--Documentation/devicetree/bindings/pwm/brcm,bcm7038-pwm.txt20
-rw-r--r--Documentation/devicetree/bindings/pwm/brcm,bcm7038-pwm.yaml43
-rw-r--r--Documentation/devicetree/bindings/pwm/pwm.yaml2
-rw-r--r--Documentation/devicetree/bindings/remoteproc/qcom,adsp.yaml28
-rw-r--r--Documentation/devicetree/bindings/remoteproc/renesas,rcar-rproc.yaml65
-rw-r--r--Documentation/devicetree/bindings/remoteproc/ti,k3-dsp-rproc.yaml3
-rw-r--r--Documentation/devicetree/bindings/remoteproc/ti,k3-r5f-rproc.yaml8
-rw-r--r--Documentation/devicetree/bindings/reserved-memory/nvidia,tegra210-emc-table.yaml31
-rw-r--r--Documentation/devicetree/bindings/reserved-memory/qcom,cmd-db.txt37
-rw-r--r--Documentation/devicetree/bindings/reserved-memory/qcom,cmd-db.yaml46
-rw-r--r--Documentation/devicetree/bindings/reserved-memory/qcom,rmtfs-mem.txt51
-rw-r--r--Documentation/devicetree/bindings/reserved-memory/qcom,rmtfs-mem.yaml53
-rw-r--r--Documentation/devicetree/bindings/reset/brcm,brcmstb-reset.txt27
-rw-r--r--Documentation/devicetree/bindings/reset/brcm,brcmstb-reset.yaml48
-rw-r--r--Documentation/devicetree/bindings/rng/apm,rng.txt17
-rw-r--r--Documentation/devicetree/bindings/rng/apm,x-gene-rng.yaml47
-rw-r--r--Documentation/devicetree/bindings/rng/atmel,at91-trng.yaml51
-rw-r--r--Documentation/devicetree/bindings/rng/atmel-trng.txt16
-rw-r--r--Documentation/devicetree/bindings/rng/brcm,iproc-rng200.txt16
-rw-r--r--Documentation/devicetree/bindings/rng/brcm,iproc-rng200.yaml30
-rw-r--r--Documentation/devicetree/bindings/rng/ks-sa-rng.txt21
-rw-r--r--Documentation/devicetree/bindings/rng/nuvoton,npcm-rng.txt12
-rw-r--r--Documentation/devicetree/bindings/rng/nuvoton,npcm-rng.yaml35
-rw-r--r--Documentation/devicetree/bindings/rng/omap3_rom_rng.txt27
-rw-r--r--Documentation/devicetree/bindings/rng/st,rng.txt15
-rw-r--r--Documentation/devicetree/bindings/rng/st,rng.yaml35
-rw-r--r--Documentation/devicetree/bindings/rng/ti,keystone-rng.yaml50
-rw-r--r--Documentation/devicetree/bindings/rng/ti,omap-rom-rng.yaml41
-rw-r--r--Documentation/devicetree/bindings/rng/timeriomem_rng.txt25
-rw-r--r--Documentation/devicetree/bindings/rng/timeriomem_rng.yaml48
-rw-r--r--Documentation/devicetree/bindings/rtc/brcm,brcmstb-waketimer.txt20
-rw-r--r--Documentation/devicetree/bindings/rtc/brcm,brcmstb-waketimer.yaml44
-rw-r--r--Documentation/devicetree/bindings/rtc/epson,rx8900.yaml1
-rw-r--r--Documentation/devicetree/bindings/rtc/qcom-pm8xxx-rtc.yaml9
-rw-r--r--Documentation/devicetree/bindings/rtc/st,stm32-rtc.yaml1
-rw-r--r--Documentation/devicetree/bindings/rtc/sunplus,sp7021-rtc.yaml56
-rw-r--r--Documentation/devicetree/bindings/serial/amlogic,meson-uart.yaml2
-rw-r--r--Documentation/devicetree/bindings/serial/fsl-lpuart.yaml8
-rw-r--r--Documentation/devicetree/bindings/serial/pl011.yaml3
-rw-r--r--Documentation/devicetree/bindings/serial/renesas,sci.yaml48
-rw-r--r--Documentation/devicetree/bindings/serial/renesas,scif.yaml15
-rw-r--r--Documentation/devicetree/bindings/sound/ak4375.yaml57
-rw-r--r--Documentation/devicetree/bindings/sound/amlogic,aiu.yaml5
-rw-r--r--Documentation/devicetree/bindings/sound/amlogic,g12a-toacodec.yaml5
-rw-r--r--Documentation/devicetree/bindings/sound/amlogic,t9015.yaml5
-rw-r--r--Documentation/devicetree/bindings/sound/audio-graph-port.yaml9
-rw-r--r--Documentation/devicetree/bindings/sound/cirrus,cs42l42.yaml225
-rw-r--r--Documentation/devicetree/bindings/sound/cs42l42.txt115
-rw-r--r--Documentation/devicetree/bindings/sound/google,cros-ec-codec.yaml1
-rw-r--r--Documentation/devicetree/bindings/sound/linux,spdif-dit.yaml5
-rw-r--r--Documentation/devicetree/bindings/sound/mt8195-afe-pcm.yaml8
-rw-r--r--Documentation/devicetree/bindings/sound/mt8195-mt6359-rt1011-rt5682.yaml4
-rw-r--r--Documentation/devicetree/bindings/sound/mt8195-mt6359-rt1019-rt5682.yaml14
-rw-r--r--Documentation/devicetree/bindings/sound/nvidia,tegra-audio-alc5632.txt48
-rw-r--r--Documentation/devicetree/bindings/sound/nvidia,tegra-audio-alc5632.yaml74
-rw-r--r--Documentation/devicetree/bindings/sound/nvidia,tegra-audio-common.yaml83
-rw-r--r--Documentation/devicetree/bindings/sound/nvidia,tegra-audio-graph-card.yaml10
-rw-r--r--Documentation/devicetree/bindings/sound/nvidia,tegra-audio-max98090.txt53
-rw-r--r--Documentation/devicetree/bindings/sound/nvidia,tegra-audio-max98090.yaml97
-rw-r--r--Documentation/devicetree/bindings/sound/nvidia,tegra-audio-rt5640.txt52
-rw-r--r--Documentation/devicetree/bindings/sound/nvidia,tegra-audio-rt5640.yaml85
-rw-r--r--Documentation/devicetree/bindings/sound/nvidia,tegra-audio-rt5677.txt67
-rw-r--r--Documentation/devicetree/bindings/sound/nvidia,tegra-audio-rt5677.yaml100
-rw-r--r--Documentation/devicetree/bindings/sound/nvidia,tegra-audio-sgtl5000.txt42
-rw-r--r--Documentation/devicetree/bindings/sound/nvidia,tegra-audio-sgtl5000.yaml67
-rw-r--r--Documentation/devicetree/bindings/sound/nvidia,tegra-audio-trimslice.txt21
-rw-r--r--Documentation/devicetree/bindings/sound/nvidia,tegra-audio-trimslice.yaml33
-rw-r--r--Documentation/devicetree/bindings/sound/nvidia,tegra-audio-wm8753.txt40
-rw-r--r--Documentation/devicetree/bindings/sound/nvidia,tegra-audio-wm8753.yaml79
-rw-r--r--Documentation/devicetree/bindings/sound/nvidia,tegra-audio-wm8903.txt62
-rw-r--r--Documentation/devicetree/bindings/sound/nvidia,tegra-audio-wm8903.yaml93
-rw-r--r--Documentation/devicetree/bindings/sound/nvidia,tegra-audio-wm9712.txt60
-rw-r--r--Documentation/devicetree/bindings/sound/nvidia,tegra-audio-wm9712.yaml76
-rw-r--r--Documentation/devicetree/bindings/sound/nvidia,tegra20-i2s.txt30
-rw-r--r--Documentation/devicetree/bindings/sound/nvidia,tegra20-i2s.yaml77
-rw-r--r--Documentation/devicetree/bindings/sound/nvidia,tegra20-spdif.yaml85
-rw-r--r--Documentation/devicetree/bindings/sound/nvidia,tegra30-hda.yaml2
-rw-r--r--Documentation/devicetree/bindings/sound/nxp,tfa989x.yaml41
-rw-r--r--Documentation/devicetree/bindings/sound/qcom,apq8016-sbc.txt96
-rw-r--r--Documentation/devicetree/bindings/sound/qcom,sm8250.yaml152
-rw-r--r--Documentation/devicetree/bindings/sound/realtek,rt5682s.yaml4
-rw-r--r--Documentation/devicetree/bindings/sound/samsung-i2s.yaml6
-rw-r--r--Documentation/devicetree/bindings/sound/simple-audio-amplifier.yaml8
-rw-r--r--Documentation/devicetree/bindings/sound/ti,tlv320adc3xxx.yaml137
-rw-r--r--Documentation/devicetree/bindings/sound/wlf,wm8903.yaml116
-rw-r--r--Documentation/devicetree/bindings/sound/wm8903.txt82
-rw-r--r--Documentation/devicetree/bindings/spi/spi-peripheral-props.yaml5
-rw-r--r--Documentation/devicetree/bindings/spmi/mtk,spmi-mtk-pmif.yaml76
-rw-r--r--Documentation/devicetree/bindings/spmi/spmi.yaml3
-rw-r--r--Documentation/devicetree/bindings/thermal/brcm,avs-tmon.txt23
-rw-r--r--Documentation/devicetree/bindings/thermal/brcm,avs-tmon.yaml56
-rw-r--r--Documentation/devicetree/bindings/thermal/thermal-zones.yaml9
-rw-r--r--Documentation/devicetree/bindings/timer/cdns,ttc.yaml3
-rw-r--r--Documentation/devicetree/bindings/timer/mstar,msc313e-timer.yaml46
-rw-r--r--Documentation/devicetree/bindings/timer/nxp,tpm-timer.yaml6
-rw-r--r--Documentation/devicetree/bindings/timer/renesas,ostm.yaml20
-rw-r--r--Documentation/devicetree/bindings/timer/rockchip,rk-timer.yaml1
-rw-r--r--Documentation/devicetree/bindings/trivial-devices.yaml17
-rw-r--r--Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt1
-rw-r--r--Documentation/devicetree/bindings/usb/brcm,bdc.txt29
-rw-r--r--Documentation/devicetree/bindings/usb/brcm,bdc.yaml50
-rw-r--r--Documentation/devicetree/bindings/usb/dwc2.yaml13
-rw-r--r--Documentation/devicetree/bindings/usb/dwc3-xilinx.txt56
-rw-r--r--Documentation/devicetree/bindings/usb/dwc3-xilinx.yaml131
-rw-r--r--Documentation/devicetree/bindings/usb/intel,keembay-dwc3.yaml3
-rw-r--r--Documentation/devicetree/bindings/usb/qcom,dwc3.yaml4
-rw-r--r--Documentation/devicetree/bindings/usb/snps,dwc3.yaml6
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.yaml16
-rw-r--r--Documentation/devicetree/bindings/watchdog/atmel,sama5d4-wdt.yaml3
-rw-r--r--Documentation/devicetree/bindings/watchdog/brcm,bcm7038-wdt.txt19
-rw-r--r--Documentation/devicetree/bindings/watchdog/brcm,bcm7038-wdt.yaml43
-rw-r--r--Documentation/devicetree/bindings/watchdog/fsl-imx7ulp-wdt.yaml7
-rw-r--r--Documentation/devicetree/bindings/watchdog/qcom-wdt.yaml2
-rw-r--r--Documentation/devicetree/bindings/watchdog/realtek,otto-wdt.yaml91
-rw-r--r--Documentation/devicetree/bindings/watchdog/renesas,wdt.yaml75
-rw-r--r--Documentation/devicetree/bindings/watchdog/samsung-wdt.yaml48
-rw-r--r--Documentation/devicetree/bindings/watchdog/ti,rti-wdt.yaml6
-rw-r--r--Documentation/driver-api/auxiliary_bus.rst236
-rw-r--r--Documentation/driver-api/dmaengine/dmatest.rst17
-rw-r--r--Documentation/driver-api/dmaengine/provider.rst23
-rw-r--r--Documentation/driver-api/firewire.rst4
-rw-r--r--Documentation/driver-api/fpga/fpga-bridge.rst6
-rw-r--r--Documentation/driver-api/fpga/fpga-mgr.rst38
-rw-r--r--Documentation/driver-api/fpga/fpga-region.rst12
-rw-r--r--Documentation/driver-api/generic-counter.rst10
-rw-r--r--Documentation/driver-api/pci/pci.rst2
-rw-r--r--Documentation/driver-api/serial/index.rst1
-rw-r--r--Documentation/driver-api/serial/n_gsm.rst8
-rw-r--r--Documentation/driver-api/serial/tty.rst328
-rw-r--r--Documentation/driver-api/usb/writing_usb_driver.rst32
-rw-r--r--Documentation/filesystems/caching/backend-api.rst850
-rw-r--r--Documentation/filesystems/caching/cachefiles.rst6
-rw-r--r--Documentation/filesystems/caching/fscache.rst525
-rw-r--r--Documentation/filesystems/caching/index.rst4
-rw-r--r--Documentation/filesystems/caching/netfs-api.rst1136
-rw-r--r--Documentation/filesystems/caching/object.rst313
-rw-r--r--Documentation/filesystems/caching/operations.rst210
-rw-r--r--Documentation/filesystems/ceph.rst25
-rw-r--r--Documentation/filesystems/dax.rst20
-rw-r--r--Documentation/filesystems/f2fs.rst1
-rw-r--r--Documentation/filesystems/netfs_library.rst32
-rw-r--r--Documentation/filesystems/proc.rst6
-rw-r--r--Documentation/gpu/todo.rst24
-rw-r--r--Documentation/index.rst2
-rw-r--r--Documentation/kbuild/kconfig-language.rst2
-rw-r--r--Documentation/kernel-hacking/locking.rst2
-rw-r--r--Documentation/livepatch/api.rst30
-rw-r--r--Documentation/livepatch/index.rst1
-rw-r--r--Documentation/livepatch/shadow-vars.rst4
-rw-r--r--Documentation/livepatch/system-state.rst4
-rw-r--r--Documentation/process/changes.rst2
-rw-r--r--Documentation/riscv/vm-layout.rst12
-rw-r--r--Documentation/staging/tee.rst4
-rw-r--r--Documentation/tools/index.rst20
-rw-r--r--Documentation/tools/rtla/Makefile41
-rw-r--r--Documentation/tools/rtla/common_appendix.rst12
-rw-r--r--Documentation/tools/rtla/common_hist_options.rst23
-rw-r--r--Documentation/tools/rtla/common_options.rst28
-rw-r--r--Documentation/tools/rtla/common_osnoise_description.rst8
-rw-r--r--Documentation/tools/rtla/common_osnoise_options.rst17
-rw-r--r--Documentation/tools/rtla/common_timerlat_description.rst10
-rw-r--r--Documentation/tools/rtla/common_timerlat_options.rst16
-rw-r--r--Documentation/tools/rtla/common_top_options.rst3
-rw-r--r--Documentation/tools/rtla/index.rst26
-rw-r--r--Documentation/tools/rtla/rtla-osnoise-hist.rst66
-rw-r--r--Documentation/tools/rtla/rtla-osnoise-top.rst61
-rw-r--r--Documentation/tools/rtla/rtla-osnoise.rst59
-rw-r--r--Documentation/tools/rtla/rtla-timerlat-hist.rst106
-rw-r--r--Documentation/tools/rtla/rtla-timerlat-top.rst145
-rw-r--r--Documentation/tools/rtla/rtla-timerlat.rst57
-rw-r--r--Documentation/tools/rtla/rtla.rst48
-rw-r--r--Documentation/trace/coresight/coresight-config.rst78
-rw-r--r--Documentation/trace/events.rst19
-rw-r--r--Documentation/trace/ftrace.rst2
-rw-r--r--Documentation/translations/zh_CN/core-api/kernel-api.rst2
-rw-r--r--Documentation/translations/zh_CN/core-api/kobject.rst12
-rw-r--r--Documentation/tty/index.rst63
-rw-r--r--Documentation/tty/n_tty.rst22
-rw-r--r--Documentation/tty/tty_buffer.rst46
-rw-r--r--Documentation/tty/tty_driver.rst128
-rw-r--r--Documentation/tty/tty_internals.rst31
-rw-r--r--Documentation/tty/tty_ldisc.rst85
-rw-r--r--Documentation/tty/tty_port.rst70
-rw-r--r--Documentation/tty/tty_struct.rst81
-rw-r--r--Documentation/usb/gadget-testing.rst2
-rw-r--r--Documentation/userspace-api/ioctl/ioctl-number.rst4
-rw-r--r--Documentation/virt/kvm/api.rst91
-rw-r--r--Documentation/virt/kvm/mmu.rst8
-rw-r--r--Documentation/vm/arch_pgtable_helpers.rst20
-rw-r--r--Documentation/vm/cleancache.rst296
-rw-r--r--Documentation/vm/frontswap.rst31
-rw-r--r--Documentation/vm/index.rst3
-rw-r--r--Documentation/vm/page_migration.rst12
-rw-r--r--Documentation/vm/page_table_check.rst56
-rw-r--r--Documentation/vm/vmalloced-kernel-stacks.rst153
-rw-r--r--LICENSES/preferred/LGPL-2.12
-rw-r--r--MAINTAINERS339
-rw-r--r--Makefile34
-rw-r--r--arch/Kconfig10
-rw-r--r--arch/alpha/include/asm/bitops.h2
-rw-r--r--arch/alpha/kernel/rtc.c7
-rw-r--r--arch/alpha/kernel/srm_env.c4
-rw-r--r--arch/alpha/kernel/srmcons.c2
-rw-r--r--arch/alpha/kernel/syscalls/syscall.tbl1
-rw-r--r--arch/alpha/kernel/traps.c6
-rw-r--r--arch/alpha/mm/fault.c18
-rw-r--r--arch/arc/Kconfig1
-rw-r--r--arch/arc/Makefile4
-rw-r--r--arch/arc/boot/dts/Makefile4
-rw-r--r--arch/arc/include/asm/bitops.h1
-rw-r--r--arch/arc/include/asm/irqflags-compact.h8
-rw-r--r--arch/arc/include/asm/perf_event.h162
-rw-r--r--arch/arc/include/asm/thread_info.h4
-rw-r--r--arch/arc/kernel/perf_event.c166
-rw-r--r--arch/arc/kernel/unwind.c11
-rw-r--r--arch/arc/mm/dma.c2
-rw-r--r--arch/arc/mm/fault.c3
-rw-r--r--arch/arc/plat-axs10x/axs10x.c2
-rw-r--r--arch/arc/plat-hsdk/platform.c2
-rw-r--r--arch/arm/Kconfig1
-rw-r--r--arch/arm/Kconfig.debug2
-rw-r--r--arch/arm/boot/compressed/Makefile8
-rw-r--r--arch/arm/boot/dts/Makefile1
-rw-r--r--arch/arm/boot/dts/am335x-wega.dtsi2
-rw-r--r--arch/arm/boot/dts/dra7.dtsi20
-rw-r--r--arch/arm/boot/dts/imx23-evk.dts1
-rw-r--r--arch/arm/boot/dts/imx6qdl-udoo.dtsi5
-rw-r--r--arch/arm/boot/dts/imx7ulp.dtsi2
-rw-r--r--arch/arm/boot/dts/meson.dtsi8
-rw-r--r--arch/arm/boot/dts/meson8.dtsi24
-rw-r--r--arch/arm/boot/dts/meson8b.dtsi24
-rw-r--r--arch/arm/boot/dts/omap3-beagle-ab4.dts47
-rw-r--r--arch/arm/boot/dts/omap3-beagle.dts33
-rw-r--r--arch/arm/boot/dts/omap3-n900.dts50
-rw-r--r--arch/arm/boot/dts/spear320-hmi.dts1
-rw-r--r--arch/arm/boot/dts/ste-ux500-samsung-skomer.dts4
-rw-r--r--arch/arm/configs/bcm2835_defconfig1
-rw-r--r--arch/arm/configs/qcom_defconfig1
-rw-r--r--arch/arm/crypto/blake2s-shash.c4
-rw-r--r--arch/arm/include/asm/assembler.h2
-rw-r--r--arch/arm/include/asm/bitops.h1
-rw-r--r--arch/arm/include/asm/io.h5
-rw-r--r--arch/arm/include/asm/processor.h1
-rw-r--r--arch/arm/include/asm/uaccess.h10
-rw-r--r--arch/arm/include/debug/pl01x.S7
-rw-r--r--arch/arm/kernel/atags_proc.c2
-rw-r--r--arch/arm/kernel/perf_callchain.c28
-rw-r--r--arch/arm/kernel/traps.c2
-rw-r--r--arch/arm/mach-dove/pcie.c9
-rw-r--r--arch/arm/mach-iop32x/pci.c5
-rw-r--r--arch/arm/mach-mv78xx0/pcie.c5
-rw-r--r--arch/arm/mach-omap2/display.c2
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c4
-rw-r--r--arch/arm/mach-orion5x/pci.c10
-rw-r--r--arch/arm/mach-socfpga/Kconfig2
-rw-r--r--arch/arm/mm/alignment.c2
-rw-r--r--arch/arm/mm/fault.c4
-rw-r--r--arch/arm/mm/ioremap.c16
-rw-r--r--arch/arm/probes/kprobes/Makefile3
-rw-r--r--arch/arm/tools/syscall.tbl1
-rw-r--r--arch/arm/xen/enlighten.c132
-rw-r--r--arch/arm64/Kconfig119
-rw-r--r--arch/arm64/Kconfig.platforms3
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi6
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12a-sei510.dts8
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2.dtsi4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gx.dtsi6
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-sm1-bananapi-m5.dts2
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-sm1-odroid.dtsi2
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts8
-rw-r--r--arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28.dts4
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mq-librem5.dtsi4
-rw-r--r--arch/arm64/boot/dts/freescale/imx8mq.dtsi10
-rw-r--r--arch/arm64/boot/dts/freescale/mba8mx.dtsi2
-rw-r--r--arch/arm64/boot/dts/nvidia/tegra194.dtsi5
-rw-r--r--arch/arm64/boot/dts/ti/k3-j721s2-common-proc-board.dts14
-rw-r--r--arch/arm64/boot/dts/ti/k3-j721s2.dtsi22
-rw-r--r--arch/arm64/include/asm/atomic_lse.h2
-rw-r--r--arch/arm64/include/asm/bitops.h1
-rw-r--r--arch/arm64/include/asm/cmpxchg.h2
-rw-r--r--arch/arm64/include/asm/cputype.h4
-rw-r--r--arch/arm64/include/asm/el2_setup.h2
-rw-r--r--arch/arm64/include/asm/hyperv-tlfs.h9
-rw-r--r--arch/arm64/include/asm/kvm_asm.h1
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h4
-rw-r--r--arch/arm64/include/asm/kvm_host.h58
-rw-r--r--arch/arm64/include/asm/kvm_hyp.h1
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h2
-rw-r--r--arch/arm64/include/asm/kvm_pgtable.h30
-rw-r--r--arch/arm64/include/asm/kvm_pkvm.h71
-rw-r--r--arch/arm64/include/asm/mmu.h1
-rw-r--r--arch/arm64/include/asm/sysreg.h1
-rw-r--r--arch/arm64/include/asm/unistd.h2
-rw-r--r--arch/arm64/include/asm/unistd32.h2
-rw-r--r--arch/arm64/kernel/asm-offsets.c1
-rw-r--r--arch/arm64/kernel/cpu_errata.c37
-rw-r--r--arch/arm64/kernel/cpufeature.c3
-rw-r--r--arch/arm64/kernel/fpsimd.c6
-rw-r--r--arch/arm64/kernel/image-vars.h2
-rw-r--r--arch/arm64/kernel/module.c4
-rw-r--r--arch/arm64/kernel/perf_callchain.c13
-rw-r--r--arch/arm64/kernel/stacktrace.c5
-rw-r--r--arch/arm64/kernel/traps.c2
-rw-r--r--arch/arm64/kernel/vdso/Makefile5
-rw-r--r--arch/arm64/kvm/.gitignore (renamed from arch/mips/txx9/rbtx4938/Makefile)2
-rw-r--r--arch/arm64/kvm/Kconfig2
-rw-r--r--arch/arm64/kvm/Makefile24
-rw-r--r--arch/arm64/kvm/arch_timer.c13
-rw-r--r--arch/arm64/kvm/arm.c191
-rw-r--r--arch/arm64/kvm/fpsimd.c79
-rw-r--r--arch/arm64/kvm/handle_exit.c13
-rw-r--r--arch/arm64/kvm/hyp/Makefile2
-rw-r--r--arch/arm64/kvm/hyp/exception.c5
-rw-r--r--arch/arm64/kvm/hyp/fpsimd.S6
-rw-r--r--arch/arm64/kvm/hyp/hyp-constants.c10
-rw-r--r--arch/arm64/kvm/hyp/include/hyp/switch.h53
-rw-r--r--arch/arm64/kvm/hyp/include/nvhe/mem_protect.h6
-rw-r--r--arch/arm64/kvm/hyp/include/nvhe/mm.h59
-rw-r--r--arch/arm64/kvm/hyp/nvhe/early_alloc.c5
-rw-r--r--arch/arm64/kvm/hyp/nvhe/hyp-main.c8
-rw-r--r--arch/arm64/kvm/hyp/nvhe/mem_protect.c505
-rw-r--r--arch/arm64/kvm/hyp/nvhe/mm.c4
-rw-r--r--arch/arm64/kvm/hyp/nvhe/page_alloc.c2
-rw-r--r--arch/arm64/kvm/hyp/nvhe/setup.c25
-rw-r--r--arch/arm64/kvm/hyp/nvhe/switch.c1
-rw-r--r--arch/arm64/kvm/hyp/pgtable.c126
-rw-r--r--arch/arm64/kvm/hyp/vgic-v3-sr.c3
-rw-r--r--arch/arm64/kvm/hyp/vhe/switch.c1
-rw-r--r--arch/arm64/kvm/mmu.c177
-rw-r--r--arch/arm64/kvm/perf.c59
-rw-r--r--arch/arm64/kvm/pkvm.c (renamed from arch/arm64/kvm/hyp/reserved_mem.c)8
-rw-r--r--arch/arm64/kvm/pmu-emul.c5
-rw-r--r--arch/arm64/kvm/psci.c10
-rw-r--r--arch/arm64/kvm/reset.c30
-rw-r--r--arch/arm64/kvm/vgic/vgic-init.c12
-rw-r--r--arch/arm64/kvm/vgic/vgic-kvm-device.c2
-rw-r--r--arch/arm64/kvm/vgic/vgic-mmio-v2.c3
-rw-r--r--arch/arm64/kvm/vgic/vgic-mmio-v3.c15
-rw-r--r--arch/arm64/kvm/vgic/vgic-mmio.c4
-rw-r--r--arch/arm64/kvm/vgic/vgic-mmio.h2
-rw-r--r--arch/arm64/kvm/vgic/vgic-v2.c9
-rw-r--r--arch/arm64/kvm/vgic/vgic-v3.c27
-rw-r--r--arch/arm64/kvm/vgic/vgic-v4.c5
-rw-r--r--arch/arm64/kvm/vgic/vgic.c2
-rw-r--r--arch/arm64/mm/extable.c4
-rw-r--r--arch/arm64/mm/fault.c8
-rw-r--r--arch/arm64/mm/init.c2
-rw-r--r--arch/arm64/tools/cpucaps4
-rw-r--r--arch/csky/abiv1/alignment.c2
-rw-r--r--arch/csky/include/asm/bitops.h1
-rw-r--r--arch/csky/kernel/perf_callchain.c10
-rw-r--r--arch/csky/kernel/traps.c2
-rw-r--r--arch/csky/mm/fault.c2
-rw-r--r--arch/h8300/boot/compressed/Makefile4
-rw-r--r--arch/h8300/boot/dts/Makefile6
-rw-r--r--arch/h8300/include/asm/bitops.h1
-rw-r--r--arch/h8300/kernel/traps.c3
-rw-r--r--arch/h8300/mm/fault.c2
-rw-r--r--arch/hexagon/include/asm/bitops.h1
-rw-r--r--arch/hexagon/kernel/traps.c2
-rw-r--r--arch/hexagon/mm/vm_fault.c8
-rw-r--r--arch/ia64/Kconfig11
-rw-r--r--arch/ia64/include/asm/bitops.h2
-rw-r--r--arch/ia64/kernel/mca_drv.c2
-rw-r--r--arch/ia64/kernel/module.c6
-rw-r--r--arch/ia64/kernel/salinfo.c10
-rw-r--r--arch/ia64/kernel/setup.c5
-rw-r--r--arch/ia64/kernel/syscalls/syscall.tbl1
-rw-r--r--arch/ia64/kernel/topology.c3
-rw-r--r--arch/ia64/kernel/traps.c2
-rw-r--r--arch/ia64/kernel/uncached.c2
-rw-r--r--arch/ia64/mm/fault.c18
-rw-r--r--arch/ia64/pci/fixup.c4
-rw-r--r--arch/m68k/configs/amiga_defconfig1
-rw-r--r--arch/m68k/configs/apollo_defconfig1
-rw-r--r--arch/m68k/configs/atari_defconfig1
-rw-r--r--arch/m68k/configs/bvme6000_defconfig1
-rw-r--r--arch/m68k/configs/hp300_defconfig1
-rw-r--r--arch/m68k/configs/mac_defconfig1
-rw-r--r--arch/m68k/configs/multi_defconfig1
-rw-r--r--arch/m68k/configs/mvme147_defconfig1
-rw-r--r--arch/m68k/configs/mvme16x_defconfig1
-rw-r--r--arch/m68k/configs/q40_defconfig1
-rw-r--r--arch/m68k/configs/sun3_defconfig1
-rw-r--r--arch/m68k/configs/sun3x_defconfig1
-rw-r--r--arch/m68k/include/asm/bitops.h2
-rw-r--r--arch/m68k/kernel/ptrace.c12
-rw-r--r--arch/m68k/kernel/syscalls/syscall.tbl1
-rw-r--r--arch/m68k/kernel/traps.c2
-rw-r--r--arch/m68k/mm/fault.c20
-rw-r--r--arch/microblaze/Makefile8
-rw-r--r--arch/microblaze/kernel/exceptions.c4
-rw-r--r--arch/microblaze/kernel/syscalls/syscall.tbl1
-rw-r--r--arch/microblaze/mm/fault.c22
-rw-r--r--arch/mips/Kconfig33
-rw-r--r--arch/mips/Makefile2
-rw-r--r--arch/mips/alchemy/common/gpiolib.c2
-rw-r--r--arch/mips/ath79/setup.c10
-rw-r--r--arch/mips/bcm47xx/Platform1
-rw-r--r--arch/mips/bcm47xx/board.c6
-rw-r--r--arch/mips/bcm47xx/buttons.c44
-rw-r--r--arch/mips/bcm47xx/leds.c21
-rw-r--r--arch/mips/bcm63xx/clk.c6
-rw-r--r--arch/mips/bcm63xx/dev-wdt.c8
-rw-r--r--arch/mips/bmips/dma.c106
-rw-r--r--arch/mips/boot/compressed/Makefile18
-rw-r--r--arch/mips/boot/compressed/clz_ctz.c2
-rw-r--r--arch/mips/boot/dts/brcm/bcm7425.dtsi30
-rw-r--r--arch/mips/boot/dts/brcm/bcm7435.dtsi30
-rw-r--r--arch/mips/boot/dts/brcm/bcm97425svmb.dts9
-rw-r--r--arch/mips/boot/dts/brcm/bcm97435svmb.dts9
-rw-r--r--arch/mips/boot/dts/ingenic/ci20.dts61
-rw-r--r--arch/mips/boot/dts/ingenic/jz4725b.dtsi2
-rw-r--r--arch/mips/boot/dts/ingenic/jz4740.dtsi2
-rw-r--r--arch/mips/boot/dts/ingenic/jz4770.dtsi2
-rw-r--r--arch/mips/boot/dts/ingenic/jz4780.dtsi40
-rw-r--r--arch/mips/boot/dts/loongson/loongson64-2k1000.dtsi5
-rw-r--r--arch/mips/cavium-octeon/octeon-memcpy.S2
-rw-r--r--arch/mips/cavium-octeon/octeon-platform.c2
-rw-r--r--arch/mips/cavium-octeon/octeon-usb.c1
-rw-r--r--arch/mips/configs/ci20_defconfig6
-rw-r--r--arch/mips/configs/rbtx49xx_defconfig4
-rw-r--r--arch/mips/dec/prom/init.c2
-rw-r--r--arch/mips/generic/Platform3
-rw-r--r--arch/mips/generic/init.c11
-rw-r--r--arch/mips/include/asm/asm.h26
-rw-r--r--arch/mips/include/asm/atomic.h11
-rw-r--r--arch/mips/include/asm/bitops.h25
-rw-r--r--arch/mips/include/asm/cmpxchg.h9
-rw-r--r--arch/mips/include/asm/ftrace.h4
-rw-r--r--arch/mips/include/asm/kgdb.h2
-rw-r--r--arch/mips/include/asm/kvm_host.h14
-rw-r--r--arch/mips/include/asm/llsc.h39
-rw-r--r--arch/mips/include/asm/local.h63
-rw-r--r--arch/mips/include/asm/mach-bcm47xx/bcm47xx_board.h4
-rw-r--r--arch/mips/include/asm/mach-loongson64/kernel-entry-init.h4
-rw-r--r--arch/mips/include/asm/mach-tx49xx/mangle-port.h8
-rw-r--r--arch/mips/include/asm/mips-cps.h19
-rw-r--r--arch/mips/include/asm/octeon/cvmx-bootinfo.h4
-rw-r--r--arch/mips/include/asm/r4kcache.h4
-rw-r--r--arch/mips/include/asm/sibyte/sb1250_mc.h2
-rw-r--r--arch/mips/include/asm/smp-ops.h3
-rw-r--r--arch/mips/include/asm/txx9/boards.h6
-rw-r--r--arch/mips/include/asm/txx9/rbtx4938.h145
-rw-r--r--arch/mips/include/asm/txx9/rbtx4939.h142
-rw-r--r--arch/mips/include/asm/txx9/spi.h34
-rw-r--r--arch/mips/include/asm/txx9/tx4939.h524
-rw-r--r--arch/mips/include/asm/unaligned-emul.h176
-rw-r--r--arch/mips/kernel/mips-cpc.c3
-rw-r--r--arch/mips/kernel/mips-r2-to-r6-emul.c104
-rw-r--r--arch/mips/kernel/r2300_fpu.S6
-rw-r--r--arch/mips/kernel/r4k_fpu.S2
-rw-r--r--arch/mips/kernel/relocate_kernel.S22
-rw-r--r--arch/mips/kernel/scall32-o32.S10
-rw-r--r--arch/mips/kernel/scall64-n32.S2
-rw-r--r--arch/mips/kernel/scall64-n64.S2
-rw-r--r--arch/mips/kernel/scall64-o32.S10
-rw-r--r--arch/mips/kernel/signal.c27
-rw-r--r--arch/mips/kernel/syscall.c8
-rw-r--r--arch/mips/kernel/syscalls/syscall_n32.tbl1
-rw-r--r--arch/mips/kernel/syscalls/syscall_n64.tbl1
-rw-r--r--arch/mips/kernel/syscalls/syscall_o32.tbl1
-rw-r--r--arch/mips/kernel/traps.c2
-rw-r--r--arch/mips/kvm/Kconfig1
-rw-r--r--arch/mips/kvm/Makefile3
-rw-r--r--arch/mips/kvm/emulate.c2
-rw-r--r--arch/mips/kvm/loongson_ipi.c4
-rw-r--r--arch/mips/kvm/mips.c82
-rw-r--r--arch/mips/kvm/vz.c12
-rw-r--r--arch/mips/lantiq/clk.c6
-rw-r--r--arch/mips/lantiq/falcon/sysctrl.c2
-rw-r--r--arch/mips/lib/csum_partial.S4
-rw-r--r--arch/mips/lib/memcpy.S4
-rw-r--r--arch/mips/lib/memset.S2
-rw-r--r--arch/mips/lib/strncpy_user.S4
-rw-r--r--arch/mips/lib/strnlen_user.S2
-rw-r--r--arch/mips/loongson2ef/Platform19
-rw-r--r--arch/mips/loongson64/vbios_quirk.c9
-rw-r--r--arch/mips/mm/c-octeon.c4
-rw-r--r--arch/mips/mm/fault.c19
-rw-r--r--arch/mips/mm/init.c14
-rw-r--r--arch/mips/pci/Makefile2
-rw-r--r--arch/mips/pci/fixup-rbtx4938.c53
-rw-r--r--arch/mips/pci/msi-octeon.c32
-rw-r--r--arch/mips/pci/pci-rt3883.c4
-rw-r--r--arch/mips/pci/pci-tx4939.c107
-rw-r--r--arch/mips/ralink/ill_acc.c1
-rw-r--r--arch/mips/ralink/mt7621.c31
-rw-r--r--arch/mips/ralink/of.c19
-rw-r--r--arch/mips/sgi-ip22/Platform5
-rw-r--r--arch/mips/txx9/Kconfig51
-rw-r--r--arch/mips/txx9/Makefile2
-rw-r--r--arch/mips/txx9/generic/7segled.c123
-rw-r--r--arch/mips/txx9/generic/Makefile3
-rw-r--r--arch/mips/txx9/generic/irq_tx4939.c216
-rw-r--r--arch/mips/txx9/generic/setup.c53
-rw-r--r--arch/mips/txx9/generic/setup_tx4939.c568
-rw-r--r--arch/mips/txx9/generic/spi_eeprom.c104
-rw-r--r--arch/mips/txx9/rbtx4938/irq.c157
-rw-r--r--arch/mips/txx9/rbtx4938/prom.c22
-rw-r--r--arch/mips/txx9/rbtx4938/setup.c372
-rw-r--r--arch/mips/txx9/rbtx4939/Makefile2
-rw-r--r--arch/mips/txx9/rbtx4939/irq.c95
-rw-r--r--arch/mips/txx9/rbtx4939/prom.c29
-rw-r--r--arch/mips/txx9/rbtx4939/setup.c554
-rw-r--r--arch/nds32/Makefile6
-rw-r--r--arch/nds32/boot/dts/Makefile7
-rw-r--r--arch/nds32/kernel/fpu.c2
-rw-r--r--arch/nds32/kernel/perf_event_cpu.c29
-rw-r--r--arch/nds32/kernel/traps.c8
-rw-r--r--arch/nds32/mm/fault.c18
-rw-r--r--arch/nios2/boot/dts/Makefile2
-rw-r--r--arch/nios2/kernel/traps.c4
-rw-r--r--arch/nios2/mm/fault.c18
-rw-r--r--arch/openrisc/Kconfig1
-rw-r--r--arch/openrisc/boot/dts/Makefile7
-rw-r--r--arch/openrisc/include/asm/bitops.h1
-rw-r--r--arch/openrisc/include/asm/syscalls.h2
-rw-r--r--arch/openrisc/kernel/entry.S27
-rw-r--r--arch/openrisc/kernel/time.c4
-rw-r--r--arch/openrisc/kernel/traps.c2
-rw-r--r--arch/openrisc/mm/fault.c18
-rw-r--r--arch/parisc/boot/compressed/Makefile24
-rw-r--r--arch/parisc/include/asm/bitops.h9
-rw-r--r--arch/parisc/include/asm/processor.h1
-rw-r--r--arch/parisc/include/asm/uaccess.h29
-rw-r--r--arch/parisc/kernel/setup.c15
-rw-r--r--arch/parisc/kernel/syscalls/syscall.tbl1
-rw-r--r--arch/parisc/kernel/toc.c3
-rw-r--r--arch/parisc/kernel/traps.c2
-rw-r--r--arch/parisc/lib/iomap.c18
-rw-r--r--arch/parisc/mm/fault.c18
-rw-r--r--arch/parisc/mm/init.c9
-rw-r--r--arch/powerpc/Kconfig27
-rw-r--r--arch/powerpc/Makefile9
-rw-r--r--arch/powerpc/boot/Makefile2
-rw-r--r--arch/powerpc/boot/crt0.S33
-rw-r--r--arch/powerpc/boot/dts/bluestone.dts25
-rw-r--r--arch/powerpc/boot/dts/canyonlands.dts18
-rw-r--r--arch/powerpc/boot/dts/digsy_mtc.dts8
-rw-r--r--arch/powerpc/boot/dts/fsl/qoriq-fman3l-0.dtsi2
-rw-r--r--arch/powerpc/boot/dts/katmai.dts18
-rw-r--r--arch/powerpc/boot/dts/kilauea.dts28
-rw-r--r--arch/powerpc/boot/dts/o2d.dtsi6
-rw-r--r--arch/powerpc/boot/dts/redwood.dts19
-rw-r--r--arch/powerpc/boot/dts/wii.dts5
-rw-r--r--arch/powerpc/boot/zImage.lds.S7
-rw-r--r--arch/powerpc/configs/gamecube_defconfig2
-rw-r--r--arch/powerpc/configs/microwatt_defconfig3
-rw-r--r--arch/powerpc/configs/ppc64_defconfig1
-rw-r--r--arch/powerpc/configs/pseries_defconfig1
-rw-r--r--arch/powerpc/configs/wii_defconfig2
-rw-r--r--arch/powerpc/crypto/md5-asm.S10
-rw-r--r--arch/powerpc/crypto/sha1-powerpc-asm.S6
-rw-r--r--arch/powerpc/include/asm/asm-prototypes.h5
-rw-r--r--arch/powerpc/include/asm/atomic.h151
-rw-r--r--arch/powerpc/include/asm/bitops.h91
-rw-r--r--arch/powerpc/include/asm/book3s/32/kup.h108
-rw-r--r--arch/powerpc/include/asm/book3s/32/mmu-hash.h84
-rw-r--r--arch/powerpc/include/asm/book3s/32/pgtable.h1
-rw-r--r--arch/powerpc/include/asm/book3s/64/hash.h4
-rw-r--r--arch/powerpc/include/asm/book3s/64/kup.h56
-rw-r--r--arch/powerpc/include/asm/book3s/64/mmu-hash.h8
-rw-r--r--arch/powerpc/include/asm/book3s/64/mmu.h38
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable.h2
-rw-r--r--arch/powerpc/include/asm/book3s/64/tlbflush-hash.h6
-rw-r--r--arch/powerpc/include/asm/book3s/64/tlbflush.h4
-rw-r--r--arch/powerpc/include/asm/book3s/pgtable.h4
-rw-r--r--arch/powerpc/include/asm/btext.h10
-rw-r--r--arch/powerpc/include/asm/code-patching.h40
-rw-r--r--arch/powerpc/include/asm/cpm2.h6
-rw-r--r--arch/powerpc/include/asm/cpuidle.h2
-rw-r--r--arch/powerpc/include/asm/cputhreads.h33
-rw-r--r--arch/powerpc/include/asm/eeh.h2
-rw-r--r--arch/powerpc/include/asm/exception-64e.h4
-rw-r--r--arch/powerpc/include/asm/fadump-internal.h6
-rw-r--r--arch/powerpc/include/asm/firmware.h8
-rw-r--r--arch/powerpc/include/asm/fixmap.h6
-rw-r--r--arch/powerpc/include/asm/floppy.h8
-rw-r--r--arch/powerpc/include/asm/head-64.h12
-rw-r--r--arch/powerpc/include/asm/hugetlb.h2
-rw-r--r--arch/powerpc/include/asm/hw_breakpoint.h5
-rw-r--r--arch/powerpc/include/asm/hw_irq.h107
-rw-r--r--arch/powerpc/include/asm/i8259.h2
-rw-r--r--arch/powerpc/include/asm/inst.h95
-rw-r--r--arch/powerpc/include/asm/interrupt.h73
-rw-r--r--arch/powerpc/include/asm/iommu.h2
-rw-r--r--arch/powerpc/include/asm/ipic.h2
-rw-r--r--arch/powerpc/include/asm/irq.h2
-rw-r--r--arch/powerpc/include/asm/kexec.h2
-rw-r--r--arch/powerpc/include/asm/kup.h122
-rw-r--r--arch/powerpc/include/asm/kvm_asm.h1
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h6
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_64.h6
-rw-r--r--arch/powerpc/include/asm/kvm_guest.h2
-rw-r--r--arch/powerpc/include/asm/kvm_host.h10
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h18
-rw-r--r--arch/powerpc/include/asm/machdep.h2
-rw-r--r--arch/powerpc/include/asm/mmu.h16
-rw-r--r--arch/powerpc/include/asm/mmu_context.h4
-rw-r--r--arch/powerpc/include/asm/mpic.h2
-rw-r--r--arch/powerpc/include/asm/nohash/32/kup-8xx.h50
-rw-r--r--arch/powerpc/include/asm/nohash/32/mmu-44x.h1
-rw-r--r--arch/powerpc/include/asm/nohash/32/mmu-8xx.h6
-rw-r--r--arch/powerpc/include/asm/nohash/32/pgtable.h1
-rw-r--r--arch/powerpc/include/asm/nohash/64/pgtable.h7
-rw-r--r--arch/powerpc/include/asm/nohash/kup-booke.h110
-rw-r--r--arch/powerpc/include/asm/opal-api.h1
-rw-r--r--arch/powerpc/include/asm/opal.h2
-rw-r--r--arch/powerpc/include/asm/paca.h8
-rw-r--r--arch/powerpc/include/asm/pci.h2
-rw-r--r--arch/powerpc/include/asm/perf_event_server.h2
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h7
-rw-r--r--arch/powerpc/include/asm/ppc_asm.h43
-rw-r--r--arch/powerpc/include/asm/processor.h18
-rw-r--r--arch/powerpc/include/asm/ptrace.h2
-rw-r--r--arch/powerpc/include/asm/reg.h16
-rw-r--r--arch/powerpc/include/asm/rtas.h2
-rw-r--r--arch/powerpc/include/asm/sections.h16
-rw-r--r--arch/powerpc/include/asm/setup.h9
-rw-r--r--arch/powerpc/include/asm/smu.h2
-rw-r--r--arch/powerpc/include/asm/sstep.h4
-rw-r--r--arch/powerpc/include/asm/switch_to.h3
-rw-r--r--arch/powerpc/include/asm/syscall.h4
-rw-r--r--arch/powerpc/include/asm/task_size_64.h6
-rw-r--r--arch/powerpc/include/asm/thread_info.h2
-rw-r--r--arch/powerpc/include/asm/time.h19
-rw-r--r--arch/powerpc/include/asm/udbg.h10
-rw-r--r--arch/powerpc/include/asm/uprobes.h1
-rw-r--r--arch/powerpc/include/asm/xics.h4
-rw-r--r--arch/powerpc/include/asm/xmon.h2
-rw-r--r--arch/powerpc/kernel/Makefile1
-rw-r--r--arch/powerpc/kernel/align.c4
-rw-r--r--arch/powerpc/kernel/asm-offsets.c5
-rw-r--r--arch/powerpc/kernel/btext.c16
-rw-r--r--arch/powerpc/kernel/cacheinfo.c5
-rw-r--r--arch/powerpc/kernel/cpu_setup_power.c12
-rw-r--r--arch/powerpc/kernel/dbell.c3
-rw-r--r--arch/powerpc/kernel/dt_cpu_ftrs.c24
-rw-r--r--arch/powerpc/kernel/eeh_cache.c2
-rw-r--r--arch/powerpc/kernel/eeh_driver.c162
-rw-r--r--arch/powerpc/kernel/entry_32.S54
-rw-r--r--arch/powerpc/kernel/entry_64.S4
-rw-r--r--arch/powerpc/kernel/epapr_paravirt.c2
-rw-r--r--arch/powerpc/kernel/exceptions-64e.S14
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S174
-rw-r--r--arch/powerpc/kernel/fadump.c26
-rw-r--r--arch/powerpc/kernel/fpu.S5
-rw-r--r--arch/powerpc/kernel/head_32.h9
-rw-r--r--arch/powerpc/kernel/head_40x.S17
-rw-r--r--arch/powerpc/kernel/head_44x.S26
-rw-r--r--arch/powerpc/kernel/head_64.S20
-rw-r--r--arch/powerpc/kernel/head_book3s_32.S8
-rw-r--r--arch/powerpc/kernel/head_booke.h3
-rw-r--r--arch/powerpc/kernel/head_fsl_booke.S13
-rw-r--r--arch/powerpc/kernel/hw_breakpoint.c4
-rw-r--r--arch/powerpc/kernel/hw_breakpoint_constraints.c4
-rw-r--r--arch/powerpc/kernel/idle.c2
-rw-r--r--arch/powerpc/kernel/idle_6xx.S2
-rw-r--r--arch/powerpc/kernel/interrupt.c3
-rw-r--r--arch/powerpc/kernel/interrupt_64.S48
-rw-r--r--arch/powerpc/kernel/irq.c5
-rw-r--r--arch/powerpc/kernel/kgdb.c4
-rw-r--r--arch/powerpc/kernel/kprobes.c4
-rw-r--r--arch/powerpc/kernel/l2cr_6xx.S6
-rw-r--r--arch/powerpc/kernel/mce.c2
-rw-r--r--arch/powerpc/kernel/mce_power.c18
-rw-r--r--arch/powerpc/kernel/module.c11
-rw-r--r--arch/powerpc/kernel/module_32.c33
-rw-r--r--arch/powerpc/kernel/nvram_64.c6
-rw-r--r--arch/powerpc/kernel/optprobes.c12
-rw-r--r--arch/powerpc/kernel/optprobes_head.S4
-rw-r--r--arch/powerpc/kernel/paca.c18
-rw-r--r--arch/powerpc/kernel/pci-common.c2
-rw-r--r--arch/powerpc/kernel/pci_32.c4
-rw-r--r--arch/powerpc/kernel/proc_powerpc.c4
-rw-r--r--arch/powerpc/kernel/process.c58
-rw-r--r--arch/powerpc/kernel/prom.c33
-rw-r--r--arch/powerpc/kernel/prom_init.c14
-rw-r--r--arch/powerpc/kernel/rtas.c104
-rw-r--r--arch/powerpc/kernel/rtasd.c6
-rw-r--r--arch/powerpc/kernel/security.c4
-rw-r--r--arch/powerpc/kernel/setup-common.c2
-rw-r--r--arch/powerpc/kernel/setup.h2
-rw-r--r--arch/powerpc/kernel/setup_32.c4
-rw-r--r--arch/powerpc/kernel/setup_64.c120
-rw-r--r--arch/powerpc/kernel/signal_32.c14
-rw-r--r--arch/powerpc/kernel/smp.c47
-rw-r--r--arch/powerpc/kernel/swsusp_32.S2
-rw-r--r--arch/powerpc/kernel/swsusp_asm64.S2
-rw-r--r--arch/powerpc/kernel/syscalls/syscall.tbl1
-rw-r--r--arch/powerpc/kernel/sysfs.c10
-rw-r--r--arch/powerpc/kernel/time.c90
-rw-r--r--arch/powerpc/kernel/tm.S15
-rw-r--r--arch/powerpc/kernel/trace/ftrace.c107
-rw-r--r--arch/powerpc/kernel/trace/ftrace_32.S118
-rw-r--r--arch/powerpc/kernel/trace/ftrace_64_mprofile.S15
-rw-r--r--arch/powerpc/kernel/traps.c8
-rw-r--r--arch/powerpc/kernel/udbg_16550.c10
-rw-r--r--arch/powerpc/kernel/vecemu.c2
-rw-r--r--arch/powerpc/kernel/vector.S10
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S16
-rw-r--r--arch/powerpc/kernel/watchdog.c223
-rw-r--r--arch/powerpc/kexec/core.c2
-rw-r--r--arch/powerpc/kexec/core_64.c4
-rw-r--r--arch/powerpc/kexec/ranges.c2
-rw-r--r--arch/powerpc/kvm/Kconfig17
-rw-r--r--arch/powerpc/kvm/Makefile8
-rw-r--r--arch/powerpc/kvm/book3s.c14
-rw-r--r--arch/powerpc/kvm/book3s_32_mmu.c2
-rw-r--r--arch/powerpc/kvm/book3s_64_entry.S11
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu.c2
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c4
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_radix.c4
-rw-r--r--arch/powerpc/kvm/book3s_hv.c895
-rw-r--r--arch/powerpc/kvm/book3s_hv.h42
-rw-r--r--arch/powerpc/kvm/book3s_hv_builtin.c55
-rw-r--r--arch/powerpc/kvm/book3s_hv_hmi.c7
-rw-r--r--arch/powerpc/kvm/book3s_hv_interrupts.S13
-rw-r--r--arch/powerpc/kvm/book3s_hv_nested.c16
-rw-r--r--arch/powerpc/kvm/book3s_hv_p9_entry.c911
-rw-r--r--arch/powerpc/kvm/book3s_hv_ras.c54
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_mmu.c6
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S78
-rw-r--r--arch/powerpc/kvm/book3s_hv_uvmem.c14
-rw-r--r--arch/powerpc/kvm/book3s_pr.c13
-rw-r--r--arch/powerpc/kvm/book3s_pr_papr.c2
-rw-r--r--arch/powerpc/kvm/book3s_xics.c6
-rw-r--r--arch/powerpc/kvm/book3s_xics.h2
-rw-r--r--arch/powerpc/kvm/book3s_xive.c15
-rw-r--r--arch/powerpc/kvm/book3s_xive.h4
-rw-r--r--arch/powerpc/kvm/book3s_xive_native.c8
-rw-r--r--arch/powerpc/kvm/booke.c9
-rw-r--r--arch/powerpc/kvm/e500_emulate.c2
-rw-r--r--arch/powerpc/kvm/powerpc.c24
-rw-r--r--arch/powerpc/lib/Makefile7
-rw-r--r--arch/powerpc/lib/code-patching.c497
-rw-r--r--arch/powerpc/lib/feature-fixups.c30
-rw-r--r--arch/powerpc/lib/sstep.c6
-rw-r--r--arch/powerpc/lib/test-code-patching.c362
-rw-r--r--arch/powerpc/lib/test_code-patching.S20
-rw-r--r--arch/powerpc/lib/test_emulate_step.c10
-rw-r--r--arch/powerpc/lib/test_emulate_step_exec_instr.S8
-rw-r--r--arch/powerpc/mm/book3s32/Makefile1
-rw-r--r--arch/powerpc/mm/book3s32/kuap.c5
-rw-r--r--arch/powerpc/mm/book3s32/kuep.c20
-rw-r--r--arch/powerpc/mm/book3s32/mmu.c15
-rw-r--r--arch/powerpc/mm/book3s32/mmu_context.c15
-rw-r--r--arch/powerpc/mm/book3s64/Makefile19
-rw-r--r--arch/powerpc/mm/book3s64/hash_native.c108
-rw-r--r--arch/powerpc/mm/book3s64/hash_pgtable.c1
-rw-r--r--arch/powerpc/mm/book3s64/hash_utils.c117
-rw-r--r--arch/powerpc/mm/book3s64/hugetlbpage.c (renamed from arch/powerpc/mm/book3s64/hash_hugetlbpage.c)4
-rw-r--r--arch/powerpc/mm/book3s64/mmu_context.c34
-rw-r--r--arch/powerpc/mm/book3s64/pgtable.c32
-rw-r--r--arch/powerpc/mm/book3s64/pkeys.c2
-rw-r--r--arch/powerpc/mm/book3s64/radix_pgtable.c40
-rw-r--r--arch/powerpc/mm/book3s64/slb.c16
-rw-r--r--arch/powerpc/mm/book3s64/trace.c8
-rw-r--r--arch/powerpc/mm/copro_fault.c2
-rw-r--r--arch/powerpc/mm/fault.c30
-rw-r--r--arch/powerpc/mm/hugetlbpage.c16
-rw-r--r--arch/powerpc/mm/init-common.c21
-rw-r--r--arch/powerpc/mm/init_64.c59
-rw-r--r--arch/powerpc/mm/ioremap.c20
-rw-r--r--arch/powerpc/mm/kasan/book3s_32.c58
-rw-r--r--arch/powerpc/mm/maccess.c17
-rw-r--r--arch/powerpc/mm/mem.c2
-rw-r--r--arch/powerpc/mm/mmap.c40
-rw-r--r--arch/powerpc/mm/mmu_context.c11
-rw-r--r--arch/powerpc/mm/nohash/44x.c20
-rw-r--r--arch/powerpc/mm/nohash/8xx.c33
-rw-r--r--arch/powerpc/mm/nohash/Makefile2
-rw-r--r--arch/powerpc/mm/nohash/book3e_pgtable.c15
-rw-r--r--arch/powerpc/mm/nohash/fsl_book3e.c10
-rw-r--r--arch/powerpc/mm/nohash/kaslr_booke.c4
-rw-r--r--arch/powerpc/mm/nohash/kup.c33
-rw-r--r--arch/powerpc/mm/nohash/mmu_context.c6
-rw-r--r--arch/powerpc/mm/nohash/tlb.c13
-rw-r--r--arch/powerpc/mm/nohash/tlb_low_64e.S40
-rw-r--r--arch/powerpc/mm/numa.c6
-rw-r--r--arch/powerpc/mm/pgtable.c18
-rw-r--r--arch/powerpc/mm/pgtable_64.c14
-rw-r--r--arch/powerpc/mm/ptdump/Makefile2
-rw-r--r--arch/powerpc/mm/ptdump/ptdump.c6
-rw-r--r--arch/powerpc/mm/slice.c20
-rw-r--r--arch/powerpc/net/bpf_jit.h17
-rw-r--r--arch/powerpc/net/bpf_jit_comp.c97
-rw-r--r--arch/powerpc/net/bpf_jit_comp32.c110
-rw-r--r--arch/powerpc/net/bpf_jit_comp64.c101
-rw-r--r--arch/powerpc/perf/8xx-pmu.c2
-rw-r--r--arch/powerpc/perf/core-book3s.c135
-rw-r--r--arch/powerpc/perf/generic-compat-pmu.c2
-rw-r--r--arch/powerpc/perf/hv-24x7.c2
-rw-r--r--arch/powerpc/perf/internal.h18
-rw-r--r--arch/powerpc/perf/isa207-common.c60
-rw-r--r--arch/powerpc/perf/power10-pmu.c2
-rw-r--r--arch/powerpc/perf/power5+-pmu.c2
-rw-r--r--arch/powerpc/perf/power5-pmu.c2
-rw-r--r--arch/powerpc/perf/power6-pmu.c2
-rw-r--r--arch/powerpc/perf/power7-pmu.c2
-rw-r--r--arch/powerpc/perf/power8-pmu.c2
-rw-r--r--arch/powerpc/perf/power9-pmu.c2
-rw-r--r--arch/powerpc/perf/ppc970-pmu.c2
-rw-r--r--arch/powerpc/platforms/40x/Kconfig1
-rw-r--r--arch/powerpc/platforms/44x/Kconfig4
-rw-r--r--arch/powerpc/platforms/44x/fsp2.c4
-rw-r--r--arch/powerpc/platforms/4xx/Makefile1
-rw-r--r--arch/powerpc/platforms/4xx/cpm.c4
-rw-r--r--arch/powerpc/platforms/4xx/hsta_msi.c7
-rw-r--r--arch/powerpc/platforms/4xx/msi.c281
-rw-r--r--arch/powerpc/platforms/4xx/pci.c2
-rw-r--r--arch/powerpc/platforms/512x/clock-commonclk.c52
-rw-r--r--arch/powerpc/platforms/512x/mpc512x.h4
-rw-r--r--arch/powerpc/platforms/512x/mpc512x_shared.c4
-rw-r--r--arch/powerpc/platforms/52xx/Kconfig2
-rw-r--r--arch/powerpc/platforms/83xx/km83xx.c2
-rw-r--r--arch/powerpc/platforms/83xx/mpc834x_mds.c2
-rw-r--r--arch/powerpc/platforms/83xx/mpc837x_mds.c2
-rw-r--r--arch/powerpc/platforms/83xx/mpc837x_rdb.c2
-rw-r--r--arch/powerpc/platforms/83xx/mpc83xx.h6
-rw-r--r--arch/powerpc/platforms/83xx/usb.c6
-rw-r--r--arch/powerpc/platforms/85xx/c293pcie.c2
-rw-r--r--arch/powerpc/platforms/85xx/ge_imp3a.c2
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_cds.c2
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_pm_ops.c2
-rw-r--r--arch/powerpc/platforms/85xx/smp.c4
-rw-r--r--arch/powerpc/platforms/85xx/socrates_fpga_pic.c2
-rw-r--r--arch/powerpc/platforms/85xx/socrates_fpga_pic.h2
-rw-r--r--arch/powerpc/platforms/85xx/xes_mpc85xx.c4
-rw-r--r--arch/powerpc/platforms/Kconfig4
-rw-r--r--arch/powerpc/platforms/Kconfig.cputype58
-rw-r--r--arch/powerpc/platforms/cell/Kconfig3
-rw-r--r--arch/powerpc/platforms/cell/axon_msi.c12
-rw-r--r--arch/powerpc/platforms/cell/cbe_regs.c2
-rw-r--r--arch/powerpc/platforms/cell/iommu.c15
-rw-r--r--arch/powerpc/platforms/cell/pervasive.c1
-rw-r--r--arch/powerpc/platforms/cell/spu_base.c6
-rw-r--r--arch/powerpc/platforms/cell/spu_manage.c16
-rw-r--r--arch/powerpc/platforms/cell/spufs/inode.c2
-rw-r--r--arch/powerpc/platforms/chrp/Kconfig2
-rw-r--r--arch/powerpc/platforms/chrp/pegasos_eth.c2
-rw-r--r--arch/powerpc/platforms/embedded6xx/Kconfig2
-rw-r--r--arch/powerpc/platforms/embedded6xx/hlwd-pic.c5
-rw-r--r--arch/powerpc/platforms/embedded6xx/hlwd-pic.h2
-rw-r--r--arch/powerpc/platforms/embedded6xx/holly.c2
-rw-r--r--arch/powerpc/platforms/embedded6xx/usbgecko_udbg.c4
-rw-r--r--arch/powerpc/platforms/embedded6xx/wii.c2
-rw-r--r--arch/powerpc/platforms/maple/Kconfig3
-rw-r--r--arch/powerpc/platforms/microwatt/Kconfig1
-rw-r--r--arch/powerpc/platforms/microwatt/rng.c2
-rw-r--r--arch/powerpc/platforms/pasemi/Kconfig3
-rw-r--r--arch/powerpc/platforms/pasemi/dma_lib.c4
-rw-r--r--arch/powerpc/platforms/pasemi/msi.c11
-rw-r--r--arch/powerpc/platforms/pasemi/pasemi.h2
-rw-r--r--arch/powerpc/platforms/pasemi/pci.c2
-rw-r--r--arch/powerpc/platforms/pasemi/setup.c2
-rw-r--r--arch/powerpc/platforms/powermac/Kconfig3
-rw-r--r--arch/powerpc/platforms/powermac/cache.S4
-rw-r--r--arch/powerpc/platforms/powermac/feature.c2
-rw-r--r--arch/powerpc/platforms/powermac/low_i2c.c3
-rw-r--r--arch/powerpc/platforms/powermac/nvram.c2
-rw-r--r--arch/powerpc/platforms/powermac/pfunc_base.c6
-rw-r--r--arch/powerpc/platforms/powermac/pic.c6
-rw-r--r--arch/powerpc/platforms/powermac/setup.c6
-rw-r--r--arch/powerpc/platforms/powermac/smp.c4
-rw-r--r--arch/powerpc/platforms/powermac/udbg_scc.c2
-rw-r--r--arch/powerpc/platforms/powernv/Kconfig2
-rw-r--r--arch/powerpc/platforms/powernv/idle.c27
-rw-r--r--arch/powerpc/platforms/powernv/opal-core.c6
-rw-r--r--arch/powerpc/platforms/powernv/opal-dump.c3
-rw-r--r--arch/powerpc/platforms/powernv/opal-elog.c3
-rw-r--r--arch/powerpc/platforms/powernv/opal-fadump.c2
-rw-r--r--arch/powerpc/platforms/powernv/opal-imc.c6
-rw-r--r--arch/powerpc/platforms/powernv/opal-lpc.c1
-rw-r--r--arch/powerpc/platforms/powernv/opal-msglog.c4
-rw-r--r--arch/powerpc/platforms/powernv/opal-power.c2
-rw-r--r--arch/powerpc/platforms/powernv/opal-powercap.c2
-rw-r--r--arch/powerpc/platforms/powernv/opal-rtc.c2
-rw-r--r--arch/powerpc/platforms/powernv/opal-sensor-groups.c4
-rw-r--r--arch/powerpc/platforms/powernv/opal.c8
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c8
-rw-r--r--arch/powerpc/platforms/powernv/powernv.h4
-rw-r--r--arch/powerpc/platforms/powernv/rng.c2
-rw-r--r--arch/powerpc/platforms/powernv/setup.c18
-rw-r--r--arch/powerpc/platforms/ps3/gelic_udbg.c2
-rw-r--r--arch/powerpc/platforms/ps3/mm.c4
-rw-r--r--arch/powerpc/platforms/ps3/os-area.c4
-rw-r--r--arch/powerpc/platforms/ps3/platform.h14
-rw-r--r--arch/powerpc/platforms/ps3/repository.c20
-rw-r--r--arch/powerpc/platforms/ps3/smp.c2
-rw-r--r--arch/powerpc/platforms/ps3/spu.c2
-rw-r--r--arch/powerpc/platforms/pseries/Kconfig5
-rw-r--r--arch/powerpc/platforms/pseries/Makefile1
-rw-r--r--arch/powerpc/platforms/pseries/event_sources.c2
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-cpu.c9
-rw-r--r--arch/powerpc/platforms/pseries/iommu.c2
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c71
-rw-r--r--arch/powerpc/platforms/pseries/lparcfg.c5
-rw-r--r--arch/powerpc/platforms/pseries/mobility.c4
-rw-r--r--arch/powerpc/platforms/pseries/msi.c38
-rw-r--r--arch/powerpc/platforms/pseries/pseries.h7
-rw-r--r--arch/powerpc/platforms/pseries/ras.c2
-rw-r--r--arch/powerpc/platforms/pseries/rtas-fadump.c6
-rw-r--r--arch/powerpc/platforms/pseries/scanlog.c195
-rw-r--r--arch/powerpc/platforms/pseries/setup.c10
-rw-r--r--arch/powerpc/platforms/pseries/vas.c13
-rw-r--r--arch/powerpc/platforms/pseries/vio.c6
-rw-r--r--arch/powerpc/sysdev/Kconfig6
-rw-r--r--arch/powerpc/sysdev/cpm2.c6
-rw-r--r--arch/powerpc/sysdev/dart_iommu.c2
-rw-r--r--arch/powerpc/sysdev/fsl_mpic_err.c4
-rw-r--r--arch/powerpc/sysdev/fsl_msi.c8
-rw-r--r--arch/powerpc/sysdev/fsl_pci.c2
-rw-r--r--arch/powerpc/sysdev/fsl_pci.h2
-rw-r--r--arch/powerpc/sysdev/i8259.c2
-rw-r--r--arch/powerpc/sysdev/ipic.c2
-rw-r--r--arch/powerpc/sysdev/mpic.c5
-rw-r--r--arch/powerpc/sysdev/mpic.h10
-rw-r--r--arch/powerpc/sysdev/mpic_msi.c6
-rw-r--r--arch/powerpc/sysdev/mpic_timer.c6
-rw-r--r--arch/powerpc/sysdev/mpic_u3msi.c11
-rw-r--r--arch/powerpc/sysdev/tsi108_dev.c3
-rw-r--r--arch/powerpc/sysdev/tsi108_pci.c2
-rw-r--r--arch/powerpc/sysdev/udbg_memcons.c2
-rw-r--r--arch/powerpc/sysdev/xics/icp-hv.c2
-rw-r--r--arch/powerpc/sysdev/xics/icp-opal.c2
-rw-r--r--arch/powerpc/sysdev/xics/xics-common.c2
-rw-r--r--arch/powerpc/sysdev/xive/common.c214
-rw-r--r--arch/powerpc/sysdev/xive/native.c19
-rw-r--r--arch/powerpc/sysdev/xive/spapr.c47
-rw-r--r--arch/powerpc/sysdev/xive/xive-internal.h2
-rw-r--r--arch/powerpc/xmon/xmon.c38
-rw-r--r--arch/powerpc/xmon/xmon_bpts.h4
-rw-r--r--arch/riscv/Kconfig64
-rw-r--r--arch/riscv/Makefile6
-rw-r--r--arch/riscv/boot/dts/canaan/Makefile4
-rw-r--r--arch/riscv/boot/dts/canaan/k210.dtsi23
-rw-r--r--arch/riscv/boot/dts/canaan/sipeed_maix_bit.dts2
-rw-r--r--arch/riscv/boot/dts/canaan/sipeed_maix_dock.dts2
-rw-r--r--arch/riscv/boot/dts/canaan/sipeed_maix_go.dts2
-rw-r--r--arch/riscv/boot/dts/canaan/sipeed_maixduino.dts2
-rw-r--r--arch/riscv/boot/dts/microchip/microchip-mpfs-icicle-kit.dts4
-rw-r--r--arch/riscv/boot/dts/microchip/microchip-mpfs.dtsi60
-rw-r--r--arch/riscv/boot/dts/sifive/fu540-c000.dtsi40
-rw-r--r--arch/riscv/boot/dts/sifive/fu740-c000.dtsi14
-rw-r--r--arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts5
-rw-r--r--arch/riscv/configs/defconfig13
-rw-r--r--arch/riscv/configs/nommu_k210_defconfig3
-rw-r--r--arch/riscv/configs/nommu_k210_sdcard_defconfig4
-rw-r--r--arch/riscv/configs/nommu_virt_defconfig3
-rw-r--r--arch/riscv/configs/rv32_defconfig6
-rw-r--r--arch/riscv/errata/alternative.c3
-rw-r--r--arch/riscv/include/asm/Kbuild1
-rw-r--r--arch/riscv/include/asm/asm-extable.h65
-rw-r--r--arch/riscv/include/asm/bitops.h1
-rw-r--r--arch/riscv/include/asm/cpu_ops.h2
-rw-r--r--arch/riscv/include/asm/cpu_ops_sbi.h25
-rw-r--r--arch/riscv/include/asm/csr.h3
-rw-r--r--arch/riscv/include/asm/extable.h48
-rw-r--r--arch/riscv/include/asm/fixmap.h1
-rw-r--r--arch/riscv/include/asm/futex.h30
-rw-r--r--arch/riscv/include/asm/gpr-num.h77
-rw-r--r--arch/riscv/include/asm/kasan.h11
-rw-r--r--arch/riscv/include/asm/kvm_host.h12
-rw-r--r--arch/riscv/include/asm/kvm_types.h2
-rw-r--r--arch/riscv/include/asm/kvm_vcpu_sbi.h33
-rw-r--r--arch/riscv/include/asm/page.h16
-rw-r--r--arch/riscv/include/asm/pgalloc.h40
-rw-r--r--arch/riscv/include/asm/pgtable-64.h108
-rw-r--r--arch/riscv/include/asm/pgtable-bits.h2
-rw-r--r--arch/riscv/include/asm/pgtable.h85
-rw-r--r--arch/riscv/include/asm/sbi.h52
-rw-r--r--arch/riscv/include/asm/smp.h10
-rw-r--r--arch/riscv/include/asm/sparsemem.h6
-rw-r--r--arch/riscv/include/asm/uaccess.h163
-rw-r--r--arch/riscv/kernel/Makefile3
-rw-r--r--arch/riscv/kernel/asm-offsets.c3
-rw-r--r--arch/riscv/kernel/cpu-hotplug.c10
-rw-r--r--arch/riscv/kernel/cpu.c23
-rw-r--r--arch/riscv/kernel/cpu_ops.c26
-rw-r--r--arch/riscv/kernel/cpu_ops_sbi.c26
-rw-r--r--arch/riscv/kernel/cpu_ops_spinwait.c27
-rw-r--r--arch/riscv/kernel/head.S62
-rw-r--r--arch/riscv/kernel/head.h6
-rw-r--r--arch/riscv/kernel/kexec_relocate.S20
-rw-r--r--arch/riscv/kernel/machine_kexec.c3
-rw-r--r--arch/riscv/kernel/perf_callchain.c10
-rw-r--r--arch/riscv/kernel/ptrace.c4
-rw-r--r--arch/riscv/kernel/sbi.c266
-rw-r--r--arch/riscv/kernel/smp.c10
-rw-r--r--arch/riscv/kernel/smpboot.c2
-rw-r--r--arch/riscv/kernel/stacktrace.c9
-rw-r--r--arch/riscv/kernel/traps.c2
-rw-r--r--arch/riscv/kernel/vmlinux-xip.lds.S1
-rw-r--r--arch/riscv/kernel/vmlinux.lds.S3
-rw-r--r--arch/riscv/kvm/Makefile10
-rw-r--r--arch/riscv/kvm/main.c8
-rw-r--r--arch/riscv/kvm/mmu.c106
-rw-r--r--arch/riscv/kvm/vcpu.c76
-rw-r--r--arch/riscv/kvm/vcpu_exit.c2
-rw-r--r--arch/riscv/kvm/vcpu_fp.c2
-rw-r--r--arch/riscv/kvm/vcpu_sbi.c213
-rw-r--r--arch/riscv/kvm/vcpu_sbi_base.c100
-rw-r--r--arch/riscv/kvm/vcpu_sbi_hsm.c105
-rw-r--r--arch/riscv/kvm/vcpu_sbi_replace.c132
-rw-r--r--arch/riscv/kvm/vcpu_sbi_v01.c123
-rw-r--r--arch/riscv/kvm/vm.c13
-rw-r--r--arch/riscv/kvm/vmid.c6
-rw-r--r--arch/riscv/lib/uaccess.S28
-rw-r--r--arch/riscv/mm/cacheflush.c5
-rw-r--r--arch/riscv/mm/context.c4
-rw-r--r--arch/riscv/mm/extable.c66
-rw-r--r--arch/riscv/mm/fault.c6
-rw-r--r--arch/riscv/mm/init.c400
-rw-r--r--arch/riscv/mm/kasan_init.c248
-rw-r--r--arch/riscv/mm/tlbflush.c9
-rw-r--r--arch/riscv/net/bpf_jit_comp64.c11
-rw-r--r--arch/s390/Kconfig16
-rw-r--r--arch/s390/boot/compressed/Makefile28
-rw-r--r--arch/s390/configs/debug_defconfig21
-rw-r--r--arch/s390/configs/defconfig17
-rw-r--r--arch/s390/configs/zfcpdump_defconfig3
-rw-r--r--arch/s390/hypfs/hypfs_vm.c6
-rw-r--r--arch/s390/include/asm/bitops.h1
-rw-r--r--arch/s390/include/asm/cpu_mf.h4
-rw-r--r--arch/s390/include/asm/kvm_host.h2
-rw-r--r--arch/s390/include/asm/uaccess.h120
-rw-r--r--arch/s390/include/asm/uv.h34
-rw-r--r--arch/s390/kernel/dumpstack.c2
-rw-r--r--arch/s390/kernel/module.c42
-rw-r--r--arch/s390/kernel/nmi.c29
-rw-r--r--arch/s390/kernel/perf_cpum_cf_common.c4
-rw-r--r--arch/s390/kernel/perf_cpum_cf_events.c6
-rw-r--r--arch/s390/kernel/perf_cpum_sf.c2
-rw-r--r--arch/s390/kernel/syscalls/syscall.tbl1
-rw-r--r--arch/s390/kvm/Kconfig1
-rw-r--r--arch/s390/kvm/Makefile8
-rw-r--r--arch/s390/kvm/gaccess.c158
-rw-r--r--arch/s390/kvm/interrupt.c12
-rw-r--r--arch/s390/kvm/kvm-s390.c165
-rw-r--r--arch/s390/kvm/kvm-s390.h19
-rw-r--r--arch/s390/kvm/pv.c4
-rw-r--r--arch/s390/kvm/sigp.c28
-rw-r--r--arch/s390/lib/Makefile3
-rw-r--r--arch/s390/lib/test_modules.c32
-rw-r--r--arch/s390/lib/test_modules.h53
-rw-r--r--arch/s390/lib/test_modules_helpers.c13
-rw-r--r--arch/s390/lib/uaccess.c24
-rw-r--r--arch/s390/mm/fault.c28
-rw-r--r--arch/s390/pci/pci_irq.c10
-rw-r--r--arch/sh/boot/Makefile16
-rw-r--r--arch/sh/boot/compressed/Makefile22
-rw-r--r--arch/sh/boot/dts/Makefile4
-rw-r--r--arch/sh/include/asm/bitops.h1
-rw-r--r--arch/sh/kernel/cpu/sh4/sq.c3
-rw-r--r--arch/sh/kernel/syscalls/syscall.tbl1
-rw-r--r--arch/sh/kernel/traps.c2
-rw-r--r--arch/sh/mm/alignment.c4
-rw-r--r--arch/sh/mm/fault.c20
-rw-r--r--arch/sparc/Kconfig12
-rw-r--r--arch/sparc/include/asm/bitops_32.h1
-rw-r--r--arch/sparc/include/asm/bitops_64.h2
-rw-r--r--arch/sparc/kernel/led.c8
-rw-r--r--arch/sparc/kernel/pci_msi.c4
-rw-r--r--arch/sparc/kernel/smp_64.c103
-rw-r--r--arch/sparc/kernel/syscalls/syscall.tbl1
-rw-r--r--arch/sparc/kernel/traps_32.c4
-rw-r--r--arch/sparc/kernel/traps_64.c4
-rw-r--r--arch/sparc/mm/fault_32.c16
-rw-r--r--arch/sparc/mm/fault_64.c16
-rw-r--r--arch/um/drivers/virt-pci.c2
-rw-r--r--arch/um/kernel/trap.c8
-rw-r--r--arch/x86/Kconfig36
-rw-r--r--arch/x86/Kconfig.cpu4
-rw-r--r--arch/x86/Makefile16
-rw-r--r--arch/x86/boot/compressed/Makefile12
-rw-r--r--arch/x86/boot/compressed/efi_thunk_64.S2
-rw-r--r--arch/x86/boot/compressed/head_64.S8
-rw-r--r--arch/x86/boot/compressed/mem_encrypt.S6
-rw-r--r--arch/x86/crypto/aegis128-aesni-asm.S48
-rw-r--r--arch/x86/crypto/aes_ctrby8_avx-x86_64.S2
-rw-r--r--arch/x86/crypto/aesni-intel_asm.S56
-rw-r--r--arch/x86/crypto/aesni-intel_avx-x86_64.S40
-rw-r--r--arch/x86/crypto/blake2s-core.S4
-rw-r--r--arch/x86/crypto/blake2s-shash.c4
-rw-r--r--arch/x86/crypto/blowfish-x86_64-asm_64.S12
-rw-r--r--arch/x86/crypto/camellia-aesni-avx-asm_64.S14
-rw-r--r--arch/x86/crypto/camellia-aesni-avx2-asm_64.S14
-rw-r--r--arch/x86/crypto/camellia-x86_64-asm_64.S12
-rw-r--r--arch/x86/crypto/cast5-avx-x86_64-asm_64.S12
-rw-r--r--arch/x86/crypto/cast6-avx-x86_64-asm_64.S10
-rw-r--r--arch/x86/crypto/chacha-avx2-x86_64.S6
-rw-r--r--arch/x86/crypto/chacha-avx512vl-x86_64.S6
-rw-r--r--arch/x86/crypto/chacha-ssse3-x86_64.S8
-rw-r--r--arch/x86/crypto/crc32-pclmul_asm.S2
-rw-r--r--arch/x86/crypto/crc32c-pcl-intel-asm_64.S2
-rw-r--r--arch/x86/crypto/crct10dif-pcl-asm_64.S2
-rw-r--r--arch/x86/crypto/des3_ede-asm_64.S4
-rw-r--r--arch/x86/crypto/ghash-clmulni-intel_asm.S6
-rw-r--r--arch/x86/crypto/nh-avx2-x86_64.S2
-rw-r--r--arch/x86/crypto/nh-sse2-x86_64.S2
-rw-r--r--arch/x86/crypto/serpent-avx-x86_64-asm_64.S10
-rw-r--r--arch/x86/crypto/serpent-avx2-asm_64.S10
-rw-r--r--arch/x86/crypto/serpent-sse2-i586-asm_32.S6
-rw-r--r--arch/x86/crypto/serpent-sse2-x86_64-asm_64.S6
-rw-r--r--arch/x86/crypto/sha1_avx2_x86_64_asm.S2
-rw-r--r--arch/x86/crypto/sha1_ni_asm.S2
-rw-r--r--arch/x86/crypto/sha1_ssse3_asm.S2
-rw-r--r--arch/x86/crypto/sha256-avx-asm.S2
-rw-r--r--arch/x86/crypto/sha256-avx2-asm.S2
-rw-r--r--arch/x86/crypto/sha256-ssse3-asm.S2
-rw-r--r--arch/x86/crypto/sha256_ni_asm.S2
-rw-r--r--arch/x86/crypto/sha512-avx-asm.S2
-rw-r--r--arch/x86/crypto/sha512-avx2-asm.S2
-rw-r--r--arch/x86/crypto/sha512-ssse3-asm.S2
-rw-r--r--arch/x86/crypto/sm4-aesni-avx-asm_64.S12
-rw-r--r--arch/x86/crypto/sm4-aesni-avx2-asm_64.S8
-rw-r--r--arch/x86/crypto/twofish-avx-x86_64-asm_64.S10
-rw-r--r--arch/x86/crypto/twofish-i586-asm_32.S4
-rw-r--r--arch/x86/crypto/twofish-x86_64-asm_64-3way.S6
-rw-r--r--arch/x86/crypto/twofish-x86_64-asm_64.S4
-rw-r--r--arch/x86/entry/entry_32.S43
-rw-r--r--arch/x86/entry/entry_64.S29
-rw-r--r--arch/x86/entry/syscalls/syscall_32.tbl1
-rw-r--r--arch/x86/entry/syscalls/syscall_64.tbl1
-rw-r--r--arch/x86/entry/thunk_32.S2
-rw-r--r--arch/x86/entry/thunk_64.S2
-rw-r--r--arch/x86/entry/vdso/vdso-layout.lds.S1
-rw-r--r--arch/x86/entry/vdso/vdso32/system_call.S2
-rw-r--r--arch/x86/entry/vdso/vsgx.S2
-rw-r--r--arch/x86/entry/vsyscall/vsyscall_emu_64.S6
-rw-r--r--arch/x86/events/core.c13
-rw-r--r--arch/x86/events/intel/core.c33
-rw-r--r--arch/x86/events/intel/lbr.c168
-rw-r--r--arch/x86/events/intel/pt.c5
-rw-r--r--arch/x86/events/intel/uncore.c2
-rw-r--r--arch/x86/events/intel/uncore.h3
-rw-r--r--arch/x86/events/intel/uncore_discovery.c4
-rw-r--r--arch/x86/events/intel/uncore_discovery.h2
-rw-r--r--arch/x86/events/intel/uncore_snb.c214
-rw-r--r--arch/x86/events/intel/uncore_snbep.c2
-rw-r--r--arch/x86/events/perf_event.h10
-rw-r--r--arch/x86/events/rapl.c9
-rw-r--r--arch/x86/hyperv/hv_init.c14
-rw-r--r--arch/x86/hyperv/irqdomain.c55
-rw-r--r--arch/x86/hyperv/ivm.c28
-rw-r--r--arch/x86/hyperv/mmu.c19
-rw-r--r--arch/x86/include/asm/asm.h37
-rw-r--r--arch/x86/include/asm/bitops.h2
-rw-r--r--arch/x86/include/asm/bug.h20
-rw-r--r--arch/x86/include/asm/cpufeatures.h2
-rw-r--r--arch/x86/include/asm/extable.h6
-rw-r--r--arch/x86/include/asm/extable_fixup_types.h59
-rw-r--r--arch/x86/include/asm/fpu/api.h11
-rw-r--r--arch/x86/include/asm/fpu/types.h32
-rw-r--r--arch/x86/include/asm/futex.h28
-rw-r--r--arch/x86/include/asm/hyperv-tlfs.h33
-rw-r--r--arch/x86/include/asm/insn-eval.h2
-rw-r--r--arch/x86/include/asm/kvm-x86-ops.h6
-rw-r--r--arch/x86/include/asm/kvm_host.h93
-rw-r--r--arch/x86/include/asm/kvm_page_track.h6
-rw-r--r--arch/x86/include/asm/linkage.h14
-rw-r--r--arch/x86/include/asm/mmx.h15
-rw-r--r--arch/x86/include/asm/mshyperv.h9
-rw-r--r--arch/x86/include/asm/msr-index.h1
-rw-r--r--arch/x86/include/asm/msr.h26
-rw-r--r--arch/x86/include/asm/page_32.h14
-rw-r--r--arch/x86/include/asm/paravirt.h2
-rw-r--r--arch/x86/include/asm/pgtable.h31
-rw-r--r--arch/x86/include/asm/qspinlock_paravirt.h4
-rw-r--r--arch/x86/include/asm/required-features.h4
-rw-r--r--arch/x86/include/asm/segment.h9
-rw-r--r--arch/x86/include/asm/sgx.h18
-rw-r--r--arch/x86/include/asm/static_call.h2
-rw-r--r--arch/x86/include/asm/string_32.h33
-rw-r--r--arch/x86/include/asm/svm.h36
-rw-r--r--arch/x86/include/asm/uaccess.h39
-rw-r--r--arch/x86/include/asm/word-at-a-time.h66
-rw-r--r--arch/x86/include/asm/x86_init.h6
-rw-r--r--arch/x86/include/asm/xen/cpuid.h7
-rw-r--r--arch/x86/include/asm/xen/hypervisor.h14
-rw-r--r--arch/x86/include/asm/xen/page.h14
-rw-r--r--arch/x86/include/uapi/asm/kvm.h19
-rw-r--r--arch/x86/include/uapi/asm/prctl.h26
-rw-r--r--arch/x86/kernel/acpi/wakeup_32.S6
-rw-r--r--arch/x86/kernel/alternative.c51
-rw-r--r--arch/x86/kernel/apic/msi.c11
-rw-r--r--arch/x86/kernel/apic/vector.c4
-rw-r--r--arch/x86/kernel/cc_platform.c8
-rw-r--r--arch/x86/kernel/cpu/mce/amd.c2
-rw-r--r--arch/x86/kernel/cpu/mce/intel.c1
-rw-r--r--arch/x86/kernel/cpu/mshyperv.c15
-rw-r--r--arch/x86/kernel/cpu/sgx/encl.c2
-rw-r--r--arch/x86/kernel/cpu/sgx/encls.h36
-rw-r--r--arch/x86/kernel/cpu/sgx/main.c10
-rw-r--r--arch/x86/kernel/dumpstack.c4
-rw-r--r--arch/x86/kernel/early-quirks.c10
-rw-r--r--arch/x86/kernel/fpu/core.c99
-rw-r--r--arch/x86/kernel/fpu/legacy.h6
-rw-r--r--arch/x86/kernel/fpu/regset.c9
-rw-r--r--arch/x86/kernel/fpu/xstate.c147
-rw-r--r--arch/x86/kernel/fpu/xstate.h25
-rw-r--r--arch/x86/kernel/ftrace.c2
-rw-r--r--arch/x86/kernel/ftrace_32.S6
-rw-r--r--arch/x86/kernel/ftrace_64.S8
-rw-r--r--arch/x86/kernel/head_32.S2
-rw-r--r--arch/x86/kernel/hpet.c8
-rw-r--r--arch/x86/kernel/irqflags.S2
-rw-r--r--arch/x86/kernel/kprobes/core.c2
-rw-r--r--arch/x86/kernel/kvm.c6
-rw-r--r--arch/x86/kernel/kvmclock.c2
-rw-r--r--arch/x86/kernel/module.c7
-rw-r--r--arch/x86/kernel/paravirt.c4
-rw-r--r--arch/x86/kernel/process.c2
-rw-r--r--arch/x86/kernel/ptrace.c4
-rw-r--r--arch/x86/kernel/relocate_kernel_32.S10
-rw-r--r--arch/x86/kernel/relocate_kernel_64.S10
-rw-r--r--arch/x86/kernel/setup_percpu.c66
-rw-r--r--arch/x86/kernel/sev_verify_cbit.S2
-rw-r--r--arch/x86/kernel/static_call.c5
-rw-r--r--arch/x86/kernel/verify_cpu.S4
-rw-r--r--arch/x86/kernel/vmlinux.lds.S1
-rw-r--r--arch/x86/kernel/x86_init.c12
-rw-r--r--arch/x86/kvm/Kconfig4
-rw-r--r--arch/x86/kvm/Makefile7
-rw-r--r--arch/x86/kvm/cpuid.c256
-rw-r--r--arch/x86/kvm/cpuid.h2
-rw-r--r--arch/x86/kvm/debugfs.c6
-rw-r--r--arch/x86/kvm/emulate.c71
-rw-r--r--arch/x86/kvm/hyperv.c9
-rw-r--r--arch/x86/kvm/i8254.c2
-rw-r--r--arch/x86/kvm/i8259.c5
-rw-r--r--arch/x86/kvm/ioapic.c4
-rw-r--r--arch/x86/kvm/irq_comm.c19
-rw-r--r--arch/x86/kvm/kvm_cache_regs.h20
-rw-r--r--arch/x86/kvm/kvm_emulate.h1
-rw-r--r--arch/x86/kvm/kvm_onhyperv.c3
-rw-r--r--arch/x86/kvm/lapic.c78
-rw-r--r--arch/x86/kvm/mmu.h16
-rw-r--r--arch/x86/kvm/mmu/mmu.c182
-rw-r--r--arch/x86/kvm/mmu/mmu_internal.h9
-rw-r--r--arch/x86/kvm/mmu/mmutrace.h2
-rw-r--r--arch/x86/kvm/mmu/page_track.c8
-rw-r--r--arch/x86/kvm/mmu/paging_tmpl.h48
-rw-r--r--arch/x86/kvm/mmu/spte.c7
-rw-r--r--arch/x86/kvm/mmu/spte.h44
-rw-r--r--arch/x86/kvm/mmu/tdp_mmu.c8
-rw-r--r--arch/x86/kvm/pmu.c168
-rw-r--r--arch/x86/kvm/pmu.h5
-rw-r--r--arch/x86/kvm/svm/avic.c166
-rw-r--r--arch/x86/kvm/svm/nested.c297
-rw-r--r--arch/x86/kvm/svm/pmu.c23
-rw-r--r--arch/x86/kvm/svm/sev.c18
-rw-r--r--arch/x86/kvm/svm/svm.c799
-rw-r--r--arch/x86/kvm/svm/svm.h108
-rw-r--r--arch/x86/kvm/svm/svm_onhyperv.h12
-rw-r--r--arch/x86/kvm/svm/vmenter.S4
-rw-r--r--arch/x86/kvm/trace.h24
-rw-r--r--arch/x86/kvm/vmx/capabilities.h14
-rw-r--r--arch/x86/kvm/vmx/evmcs.c4
-rw-r--r--arch/x86/kvm/vmx/evmcs.h48
-rw-r--r--arch/x86/kvm/vmx/nested.c147
-rw-r--r--arch/x86/kvm/vmx/pmu_intel.c63
-rw-r--r--arch/x86/kvm/vmx/posted_intr.c268
-rw-r--r--arch/x86/kvm/vmx/posted_intr.h14
-rw-r--r--arch/x86/kvm/vmx/vmcs.h5
-rw-r--r--arch/x86/kvm/vmx/vmcs12.c4
-rw-r--r--arch/x86/kvm/vmx/vmcs12.h6
-rw-r--r--arch/x86/kvm/vmx/vmenter.S14
-rw-r--r--arch/x86/kvm/vmx/vmx.c372
-rw-r--r--arch/x86/kvm/vmx/vmx.h47
-rw-r--r--arch/x86/kvm/vmx/vmx_ops.h43
-rw-r--r--arch/x86/kvm/x86.c639
-rw-r--r--arch/x86/kvm/x86.h64
-rw-r--r--arch/x86/kvm/xen.c432
-rw-r--r--arch/x86/kvm/xen.h9
-rw-r--r--arch/x86/lib/Makefile1
-rw-r--r--arch/x86/lib/atomic64_386_32.S86
-rw-r--r--arch/x86/lib/atomic64_cx8_32.S16
-rw-r--r--arch/x86/lib/checksum_32.S27
-rw-r--r--arch/x86/lib/clear_page_64.S6
-rw-r--r--arch/x86/lib/cmpxchg16b_emu.S4
-rw-r--r--arch/x86/lib/cmpxchg8b_emu.S4
-rw-r--r--arch/x86/lib/copy_mc_64.S18
-rw-r--r--arch/x86/lib/copy_page_64.S4
-rw-r--r--arch/x86/lib/copy_user_64.S42
-rw-r--r--arch/x86/lib/csum-copy_64.S2
-rw-r--r--arch/x86/lib/csum-partial_64.c183
-rw-r--r--arch/x86/lib/error-inject.c3
-rw-r--r--arch/x86/lib/getuser.S22
-rw-r--r--arch/x86/lib/hweight.S6
-rw-r--r--arch/x86/lib/insn-eval.c71
-rw-r--r--arch/x86/lib/iomap_copy_64.S2
-rw-r--r--arch/x86/lib/memcpy_32.c4
-rw-r--r--arch/x86/lib/memcpy_64.S12
-rw-r--r--arch/x86/lib/memmove_64.S4
-rw-r--r--arch/x86/lib/memset_64.S6
-rw-r--r--arch/x86/lib/mmx_32.c388
-rw-r--r--arch/x86/lib/msr-reg.S4
-rw-r--r--arch/x86/lib/putuser.S6
-rw-r--r--arch/x86/lib/retpoline.S4
-rw-r--r--arch/x86/lib/usercopy_32.c67
-rw-r--r--arch/x86/lib/usercopy_64.c8
-rw-r--r--arch/x86/math-emu/div_Xsig.S2
-rw-r--r--arch/x86/math-emu/div_small.S2
-rw-r--r--arch/x86/math-emu/mul_Xsig.S6
-rw-r--r--arch/x86/math-emu/polynom_Xsig.S2
-rw-r--r--arch/x86/math-emu/reg_norm.S6
-rw-r--r--arch/x86/math-emu/reg_round.S2
-rw-r--r--arch/x86/math-emu/reg_u_add.S2
-rw-r--r--arch/x86/math-emu/reg_u_div.S2
-rw-r--r--arch/x86/math-emu/reg_u_mul.S2
-rw-r--r--arch/x86/math-emu/reg_u_sub.S2
-rw-r--r--arch/x86/math-emu/round_Xsig.S4
-rw-r--r--arch/x86/math-emu/shr_Xsig.S8
-rw-r--r--arch/x86/math-emu/wm_shrx.S16
-rw-r--r--arch/x86/mm/extable.c113
-rw-r--r--arch/x86/mm/fault.c3
-rw-r--r--arch/x86/mm/mem_encrypt_boot.S4
-rw-r--r--arch/x86/net/bpf_jit_comp.c2
-rw-r--r--arch/x86/pci/acpi.c2
-rw-r--r--arch/x86/pci/fixup.c4
-rw-r--r--arch/x86/pci/xen.c38
-rw-r--r--arch/x86/platform/efi/efi_stub_32.S2
-rw-r--r--arch/x86/platform/efi/efi_stub_64.S2
-rw-r--r--arch/x86/platform/efi/efi_thunk_64.S2
-rw-r--r--arch/x86/platform/olpc/xo1-wakeup.S6
-rw-r--r--arch/x86/power/hibernate_asm_32.S4
-rw-r--r--arch/x86/power/hibernate_asm_64.S4
-rw-r--r--arch/x86/um/Kconfig1
-rw-r--r--arch/x86/um/checksum_32.S4
-rw-r--r--arch/x86/um/setjmp_32.S2
-rw-r--r--arch/x86/um/setjmp_64.S2
-rw-r--r--arch/x86/xen/Kconfig1
-rw-r--r--arch/x86/xen/enlighten_hvm.c22
-rw-r--r--arch/x86/xen/enlighten_pv.c4
-rw-r--r--arch/x86/xen/pmu.c32
-rw-r--r--arch/x86/xen/smp_pv.c26
-rw-r--r--arch/x86/xen/vga.c12
-rw-r--r--arch/x86/xen/xen-asm.S12
-rw-r--r--arch/x86/xen/xen-head.S2
-rw-r--r--arch/xtensa/Makefile2
-rw-r--r--arch/xtensa/boot/dts/Makefile5
-rw-r--r--arch/xtensa/include/asm/bitops.h1
-rw-r--r--arch/xtensa/kernel/entry.S2
-rw-r--r--arch/xtensa/kernel/syscalls/syscall.tbl1
-rw-r--r--arch/xtensa/kernel/traps.c2
-rw-r--r--arch/xtensa/mm/fault.c17
-rw-r--r--arch/xtensa/platforms/iss/simdisk.c4
-rw-r--r--block/Kconfig3
-rw-r--r--block/Kconfig.iosched1
-rw-r--r--block/Makefile2
-rw-r--r--block/bdev.c29
-rw-r--r--block/bfq-iosched.c306
-rw-r--r--block/bfq-iosched.h35
-rw-r--r--block/bio-integrity.c2
-rw-r--r--block/bio.c38
-rw-r--r--block/blk-cgroup.c1
-rw-r--r--block/blk-core.c426
-rw-r--r--block/blk-crypto-profile.c5
-rw-r--r--block/blk-exec.c116
-rw-r--r--block/blk-flush.c18
-rw-r--r--block/blk-ia-ranges.c2
-rw-r--r--block/blk-integrity.c2
-rw-r--r--block/blk-ioc.c318
-rw-r--r--block/blk-ioprio.c13
-rw-r--r--block/blk-map.c2
-rw-r--r--block/blk-merge.c18
-rw-r--r--block/blk-mq-debugfs.c5
-rw-r--r--block/blk-mq-sched.c29
-rw-r--r--block/blk-mq-sched.h2
-rw-r--r--block/blk-mq-sysfs.c2
-rw-r--r--block/blk-mq-tag.c107
-rw-r--r--block/blk-mq-tag.h2
-rw-r--r--block/blk-mq.c983
-rw-r--r--block/blk-mq.h22
-rw-r--r--block/blk-pm.c22
-rw-r--r--block/blk-stat.c39
-rw-r--r--block/blk-stat.h2
-rw-r--r--block/blk-sysfs.c17
-rw-r--r--block/blk-throttle.c1
-rw-r--r--block/blk.h115
-rw-r--r--block/bsg-lib.c2
-rw-r--r--block/elevator.c12
-rw-r--r--block/fops.c42
-rw-r--r--block/genhd.c74
-rw-r--r--block/ioctl.c31
-rw-r--r--block/ioprio.c32
-rw-r--r--block/kyber-iosched.c1
-rw-r--r--block/mq-deadline.c4
-rw-r--r--block/partitions/core.c24
-rw-r--r--certs/.gitignore1
-rw-r--r--certs/Kconfig4
-rw-r--r--certs/Makefile125
-rw-r--r--certs/default_x509.genkey17
-rw-r--r--certs/extract-cert.c (renamed from scripts/extract-cert.c)2
-rw-r--r--crypto/Kconfig2
-rw-r--r--crypto/af_alg.c3
-rw-r--r--crypto/algapi.c1
-rw-r--r--crypto/algboss.c4
-rw-r--r--crypto/api.c1
-rw-r--r--crypto/blake2s_generic.c4
-rw-r--r--drivers/accessibility/speakup/speakup_acntpc.c2
-rw-r--r--drivers/accessibility/speakup/speakup_dectlk.c1
-rw-r--r--drivers/accessibility/speakup/speakup_dtlk.c2
-rw-r--r--drivers/accessibility/speakup/speakup_keypc.c2
-rw-r--r--drivers/accessibility/speakup/spk_ttyio.c4
-rw-r--r--drivers/acpi/Kconfig26
-rw-r--r--drivers/acpi/Makefile3
-rw-r--r--drivers/acpi/acpi_apd.c13
-rw-r--r--drivers/acpi/acpi_pcc.c2
-rw-r--r--drivers/acpi/arm64/iort.c14
-rw-r--r--drivers/acpi/bus.c1
-rw-r--r--drivers/acpi/cppc_acpi.c11
-rw-r--r--drivers/acpi/dptf/dptf_pch_fivr.c1
-rw-r--r--drivers/acpi/dptf/dptf_power.c2
-rw-r--r--drivers/acpi/dptf/int340x_thermal.c6
-rw-r--r--drivers/acpi/ec.c10
-rw-r--r--drivers/acpi/fan.h1
-rw-r--r--drivers/acpi/internal.h2
-rw-r--r--drivers/acpi/nfit/core.c4
-rw-r--r--drivers/acpi/numa/srat.c59
-rw-r--r--drivers/acpi/pfr_telemetry.c435
-rw-r--r--drivers/acpi/pfr_update.c575
-rw-r--r--drivers/acpi/proc.c2
-rw-r--r--drivers/acpi/processor_idle.c5
-rw-r--r--drivers/acpi/scan.c92
-rw-r--r--drivers/acpi/sleep.c15
-rw-r--r--drivers/acpi/spcr.c9
-rw-r--r--drivers/acpi/tables.c89
-rw-r--r--drivers/acpi/x86/s2idle.c12
-rw-r--r--drivers/android/binder.c437
-rw-r--r--drivers/ata/Kconfig44
-rw-r--r--drivers/ata/acard-ahci.c4
-rw-r--r--drivers/ata/ahci.c24
-rw-r--r--drivers/ata/ahci_brcm.c4
-rw-r--r--drivers/ata/ahci_ceva.c5
-rw-r--r--drivers/ata/ahci_qoriq.c4
-rw-r--r--drivers/ata/ahci_xgene.c12
-rw-r--r--drivers/ata/ata_piix.c11
-rw-r--r--drivers/ata/libahci.c33
-rw-r--r--drivers/ata/libahci_platform.c14
-rw-r--r--drivers/ata/libata-acpi.c69
-rw-r--r--drivers/ata/libata-core.c256
-rw-r--r--drivers/ata/libata-eh.c72
-rw-r--r--drivers/ata/libata-pmp.c8
-rw-r--r--drivers/ata/libata-sata.c11
-rw-r--r--drivers/ata/libata-scsi.c170
-rw-r--r--drivers/ata/libata-sff.c88
-rw-r--r--drivers/ata/libata-trace.c47
-rw-r--r--drivers/ata/libata-transport.c48
-rw-r--r--drivers/ata/libata.h5
-rw-r--r--drivers/ata/pata_ali.c4
-rw-r--r--drivers/ata/pata_arasan_cf.c3
-rw-r--r--drivers/ata/pata_atp867x.c105
-rw-r--r--drivers/ata/pata_cmd640.c2
-rw-r--r--drivers/ata/pata_cmd64x.c4
-rw-r--r--drivers/ata/pata_cs5520.c4
-rw-r--r--drivers/ata/pata_cs5536.c4
-rw-r--r--drivers/ata/pata_cypress.c2
-rw-r--r--drivers/ata/pata_ep93xx.c1
-rw-r--r--drivers/ata/pata_hpt366.c5
-rw-r--r--drivers/ata/pata_hpt37x.c20
-rw-r--r--drivers/ata/pata_hpt3x2n.c12
-rw-r--r--drivers/ata/pata_it821x.c66
-rw-r--r--drivers/ata/pata_ixp4xx_cf.c6
-rw-r--r--drivers/ata/pata_marvell.c9
-rw-r--r--drivers/ata/pata_netcell.c5
-rw-r--r--drivers/ata/pata_octeon_cf.c54
-rw-r--r--drivers/ata/pata_of_platform.c15
-rw-r--r--drivers/ata/pata_pdc2027x.c71
-rw-r--r--drivers/ata/pata_pdc202xx_old.c2
-rw-r--r--drivers/ata/pata_platform.c2
-rw-r--r--drivers/ata/pata_rz1000.c4
-rw-r--r--drivers/ata/pata_serverworks.c4
-rw-r--r--drivers/ata/pata_sil680.c9
-rw-r--r--drivers/ata/pata_via.c12
-rw-r--r--drivers/ata/pdc_adma.c33
-rw-r--r--drivers/ata/sata_dwc_460ex.c165
-rw-r--r--drivers/ata/sata_fsl.c224
-rw-r--r--drivers/ata/sata_gemini.c4
-rw-r--r--drivers/ata/sata_inic162x.c4
-rw-r--r--drivers/ata/sata_mv.c132
-rw-r--r--drivers/ata/sata_nv.c54
-rw-r--r--drivers/ata/sata_promise.c31
-rw-r--r--drivers/ata/sata_qstor.c15
-rw-r--r--drivers/ata/sata_rcar.c26
-rw-r--r--drivers/ata/sata_sil.c1
-rw-r--r--drivers/ata/sata_sil24.c5
-rw-r--r--drivers/ata/sata_sx4.c148
-rw-r--r--drivers/atm/iphase.c4
-rw-r--r--drivers/base/Kconfig11
-rw-r--r--drivers/base/arch_numa.c68
-rw-r--r--drivers/base/auxiliary.c152
-rw-r--r--drivers/base/bus.c4
-rw-r--r--drivers/base/core.c34
-rw-r--r--drivers/base/dd.c7
-rw-r--r--drivers/base/devtmpfs.c17
-rw-r--r--drivers/base/firmware_loader/builtin/Makefile4
-rw-r--r--drivers/base/firmware_loader/fallback.c7
-rw-r--r--drivers/base/firmware_loader/fallback.h11
-rw-r--r--drivers/base/firmware_loader/fallback_table.c25
-rw-r--r--drivers/base/platform-msi.c234
-rw-r--r--drivers/base/platform.c9
-rw-r--r--drivers/base/power/trace.c6
-rw-r--r--drivers/base/power/wakeup.c41
-rw-r--r--drivers/base/property.c29
-rw-r--r--drivers/base/test/test_async_driver_probe.c14
-rw-r--r--drivers/base/topology.c28
-rw-r--r--drivers/block/Kconfig11
-rw-r--r--drivers/block/Makefile1
-rw-r--r--drivers/block/amiflop.c3
-rw-r--r--drivers/block/aoe/aoecmd.c2
-rw-r--r--drivers/block/ataflop.c7
-rw-r--r--drivers/block/brd.c74
-rw-r--r--drivers/block/drbd/drbd_main.c4
-rw-r--r--drivers/block/drbd/drbd_protocol.h6
-rw-r--r--drivers/block/drbd/drbd_receiver.c3
-rw-r--r--drivers/block/floppy.c13
-rw-r--r--drivers/block/loop.c74
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c90
-rw-r--r--drivers/block/n64cart.c2
-rw-r--r--drivers/block/null_blk/main.c13
-rw-r--r--drivers/block/null_blk/trace.h2
-rw-r--r--drivers/block/paride/bpck.c1
-rw-r--r--drivers/block/paride/pcd.c5
-rw-r--r--drivers/block/paride/pd.c6
-rw-r--r--drivers/block/paride/pf.c5
-rw-r--r--drivers/block/pktcdvd.c310
-rw-r--r--drivers/block/ps3vram.c1
-rw-r--r--drivers/block/rbd.c13
-rw-r--r--drivers/block/rnbd/rnbd-clt-sysfs.c3
-rw-r--r--drivers/block/rnbd/rnbd-clt.c10
-rw-r--r--drivers/block/rnbd/rnbd-clt.h2
-rw-r--r--drivers/block/rnbd/rnbd-srv.c16
-rw-r--r--drivers/block/rnbd/rnbd-srv.h2
-rw-r--r--drivers/block/rsxx/Makefile3
-rw-r--r--drivers/block/rsxx/config.c197
-rw-r--r--drivers/block/rsxx/core.c1126
-rw-r--r--drivers/block/rsxx/cregs.c789
-rw-r--r--drivers/block/rsxx/dev.c306
-rw-r--r--drivers/block/rsxx/dma.c1085
-rw-r--r--drivers/block/rsxx/rsxx.h33
-rw-r--r--drivers/block/rsxx/rsxx_cfg.h58
-rw-r--r--drivers/block/rsxx/rsxx_priv.h418
-rw-r--r--drivers/block/sunvdc.c19
-rw-r--r--drivers/block/swim.c1
-rw-r--r--drivers/block/swim3.c2
-rw-r--r--drivers/block/sx8.c4
-rw-r--r--drivers/block/virtio_blk.c7
-rw-r--r--drivers/block/xen-blkback/xenbus.c2
-rw-r--r--drivers/block/xen-blkfront.c28
-rw-r--r--drivers/block/z2ram.c1
-rw-r--r--drivers/block/zram/zram_drv.c12
-rw-r--r--drivers/bluetooth/hci_ldisc.c5
-rw-r--r--drivers/bluetooth/virtio_bt.c2
-rw-r--r--drivers/bus/fsl-mc/dprc-driver.c8
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-allocator.c9
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-msi.c79
-rw-r--r--drivers/bus/mhi/core/boot.c2
-rw-r--r--drivers/bus/mhi/core/init.c4
-rw-r--r--drivers/bus/mhi/core/internal.h9
-rw-r--r--drivers/bus/mhi/core/main.c24
-rw-r--r--drivers/bus/mhi/core/pm.c39
-rw-r--r--drivers/bus/mhi/pci_generic.c58
-rw-r--r--drivers/bus/mvebu-mbus.c5
-rw-r--r--drivers/cdrom/cdrom.c23
-rw-r--r--drivers/cdrom/gdrom.c1
-rw-r--r--drivers/char/agp/amd64-agp.c24
-rw-r--r--drivers/char/agp/sis-agp.c25
-rw-r--r--drivers/char/agp/via-agp.c25
-rw-r--r--drivers/char/applicom.c4
-rw-r--r--drivers/char/hpet.c22
-rw-r--r--drivers/char/hw_random/Kconfig13
-rw-r--r--drivers/char/hw_random/Makefile1
-rw-r--r--drivers/char/hw_random/tx4939-rng.c157
-rw-r--r--drivers/char/hw_random/virtio-rng.c2
-rw-r--r--drivers/char/mwave/3780i.h2
-rw-r--r--drivers/char/random.c676
-rw-r--r--drivers/char/virtio_console.c4
-rw-r--r--drivers/clk/Kconfig26
-rw-r--r--drivers/clk/Makefile5
-rw-r--r--drivers/clk/clk-bm1880.c20
-rw-r--r--drivers/clk/clk-gate.c35
-rw-r--r--drivers/clk/clk-gemini.c2
-rw-r--r--drivers/clk/clk-lan966x.c293
-rw-r--r--drivers/clk/clk-si5341.c2
-rw-r--r--drivers/clk/clk-stm32f4.c4
-rw-r--r--drivers/clk/clk-stm32mp1.c2
-rw-r--r--drivers/clk/clk-tps68470.c261
-rw-r--r--drivers/clk/clk.c80
-rw-r--r--drivers/clk/imx/clk-imx8mn.c6
-rw-r--r--drivers/clk/imx/clk-imx8mp.c2
-rw-r--r--drivers/clk/imx/clk-imx8ulp.c1
-rw-r--r--drivers/clk/imx/clk-pllv1.c17
-rw-r--r--drivers/clk/imx/clk-pllv3.c6
-rw-r--r--drivers/clk/ingenic/jz4760-cgu.c10
-rw-r--r--drivers/clk/ingenic/jz4770-cgu.c5
-rw-r--r--drivers/clk/mediatek/Kconfig17
-rw-r--r--drivers/clk/mediatek/Makefile4
-rw-r--r--drivers/clk/mediatek/clk-gate.c24
-rw-r--r--drivers/clk/mediatek/clk-mt7986-apmixed.c100
-rw-r--r--drivers/clk/mediatek/clk-mt7986-eth.c132
-rw-r--r--drivers/clk/mediatek/clk-mt7986-infracfg.c224
-rw-r--r--drivers/clk/mediatek/clk-mt7986-topckgen.c342
-rw-r--r--drivers/clk/meson/gxbb.c44
-rw-r--r--drivers/clk/qcom/Kconfig24
-rw-r--r--drivers/clk/qcom/Makefile3
-rw-r--r--drivers/clk/qcom/clk-alpha-pll.c166
-rw-r--r--drivers/clk/qcom/clk-alpha-pll.h3
-rw-r--r--drivers/clk/qcom/clk-rpmh.c52
-rw-r--r--drivers/clk/qcom/clk-smd-rpm.c31
-rw-r--r--drivers/clk/qcom/gcc-msm8976.c4155
-rw-r--r--drivers/clk/qcom/gcc-msm8994.c1
-rw-r--r--drivers/clk/qcom/gcc-sc7280.c2
-rw-r--r--drivers/clk/qcom/gcc-sdx65.c1611
-rw-r--r--drivers/clk/qcom/gcc-sm6350.c1
-rw-r--r--drivers/clk/qcom/gcc-sm8350.c1
-rw-r--r--drivers/clk/qcom/gcc-sm8450.c3304
-rw-r--r--drivers/clk/qcom/lpasscc-sc7280.c1
-rw-r--r--drivers/clk/qcom/lpasscc-sdm845.c1
-rw-r--r--drivers/clk/qcom/mmcc-apq8084.c1
-rw-r--r--drivers/clk/qcom/q6sstop-qcs404.c1
-rw-r--r--drivers/clk/qcom/turingcc-qcs404.c1
-rw-r--r--drivers/clk/renesas/Kconfig13
-rw-r--r--drivers/clk/renesas/Makefile2
-rw-r--r--drivers/clk/renesas/r8a774a1-cpg-mssr.c12
-rw-r--r--drivers/clk/renesas/r8a774b1-cpg-mssr.c12
-rw-r--r--drivers/clk/renesas/r8a774c0-cpg-mssr.c9
-rw-r--r--drivers/clk/renesas/r8a774e1-cpg-mssr.c12
-rw-r--r--drivers/clk/renesas/r8a7795-cpg-mssr.c12
-rw-r--r--drivers/clk/renesas/r8a7796-cpg-mssr.c12
-rw-r--r--drivers/clk/renesas/r8a77965-cpg-mssr.c12
-rw-r--r--drivers/clk/renesas/r8a77980-cpg-mssr.c3
-rw-r--r--drivers/clk/renesas/r8a77990-cpg-mssr.c9
-rw-r--r--drivers/clk/renesas/r8a77995-cpg-mssr.c3
-rw-r--r--drivers/clk/renesas/r8a779a0-cpg-mssr.c343
-rw-r--r--drivers/clk/renesas/r8a779f0-cpg-mssr.c183
-rw-r--r--drivers/clk/renesas/r9a07g044-cpg.c81
-rw-r--r--drivers/clk/renesas/rcar-cpg-lib.c211
-rw-r--r--drivers/clk/renesas/rcar-cpg-lib.h7
-rw-r--r--drivers/clk/renesas/rcar-gen3-cpg.c24
-rw-r--r--drivers/clk/renesas/rcar-gen3-cpg.h4
-rw-r--r--drivers/clk/renesas/rcar-gen4-cpg.c305
-rw-r--r--drivers/clk/renesas/rcar-gen4-cpg.h76
-rw-r--r--drivers/clk/renesas/renesas-cpg-mssr.c60
-rw-r--r--drivers/clk/renesas/renesas-cpg-mssr.h3
-rw-r--r--drivers/clk/renesas/rzg2l-cpg.c18
-rw-r--r--drivers/clk/renesas/rzg2l-cpg.h9
-rw-r--r--drivers/clk/samsung/Makefile2
-rw-r--r--drivers/clk/samsung/clk-cpu.c2
-rw-r--r--drivers/clk/samsung/clk-cpu.h7
-rw-r--r--drivers/clk/samsung/clk-exynos-arm64.c94
-rw-r--r--drivers/clk/samsung/clk-exynos-arm64.h20
-rw-r--r--drivers/clk/samsung/clk-exynos3250.c54
-rw-r--r--drivers/clk/samsung/clk-exynos4.c41
-rw-r--r--drivers/clk/samsung/clk-exynos5250.c21
-rw-r--r--drivers/clk/samsung/clk-exynos5420.c29
-rw-r--r--drivers/clk/samsung/clk-exynos7885.c597
-rw-r--r--drivers/clk/samsung/clk-exynos850.c366
-rw-r--r--drivers/clk/samsung/clk-pll.c1
-rw-r--r--drivers/clk/samsung/clk-pll.h1
-rw-r--r--drivers/clk/samsung/clk-s3c2410.c6
-rw-r--r--drivers/clk/samsung/clk-s3c64xx.c8
-rw-r--r--drivers/clk/samsung/clk-s5pv210.c8
-rw-r--r--drivers/clk/samsung/clk.c14
-rw-r--r--drivers/clk/samsung/clk.h36
-rw-r--r--drivers/clk/socfpga/clk-agilex.c4
-rw-r--r--drivers/clk/socfpga/clk-gate.c4
-rw-r--r--drivers/clk/socfpga/clk-pll-s10.c2
-rw-r--r--drivers/clk/socfpga/clk-s10.c4
-rw-r--r--drivers/clk/st/clkgen-fsyn.c13
-rw-r--r--drivers/clk/st/clkgen-mux.c11
-rw-r--r--drivers/clk/sunxi-ng/Kconfig49
-rw-r--r--drivers/clk/sunxi-ng/Makefile101
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun20i-d1-r.c140
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun20i-d1-r.h17
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun20i-d1.c1390
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun20i-d1.h15
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun4i-a10.c58
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-a100-r.c4
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-a100.c4
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-a64.c7
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c56
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-h6.c7
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-h616.c33
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun6i-a31.c40
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-a23.c35
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-a33.c40
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-a83t.c7
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-de2.c9
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-h3.c62
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-r.c65
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-r40.c6
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-v3s.c57
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun9i-a80-de.c7
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun9i-a80-usb.c7
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun9i-a80.c7
-rw-r--r--drivers/clk/sunxi-ng/ccu-suniv-f1c100s.c40
-rw-r--r--drivers/clk/sunxi-ng/ccu_common.c6
-rw-r--r--drivers/clk/sunxi-ng/ccu_div.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu_div.h78
-rw-r--r--drivers/clk/sunxi-ng/ccu_frac.c6
-rw-r--r--drivers/clk/sunxi-ng/ccu_gate.c4
-rw-r--r--drivers/clk/sunxi-ng/ccu_gate.h32
-rw-r--r--drivers/clk/sunxi-ng/ccu_mp.c2
-rw-r--r--drivers/clk/sunxi-ng/ccu_mp.h49
-rw-r--r--drivers/clk/sunxi-ng/ccu_mult.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu_mux.c6
-rw-r--r--drivers/clk/sunxi-ng/ccu_mux.h33
-rw-r--r--drivers/clk/sunxi-ng/ccu_nk.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu_nkm.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu_nkmp.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu_nm.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu_phase.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu_reset.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu_sdm.c6
-rw-r--r--drivers/clk/tegra/Makefile1
-rw-r--r--drivers/clk/tegra/clk-device.c199
-rw-r--r--drivers/clk/tegra/clk-pll.c2
-rw-r--r--drivers/clk/tegra/clk-super.c2
-rw-r--r--drivers/clk/tegra/clk-tegra114.c2
-rw-r--r--drivers/clk/tegra/clk-tegra20.c77
-rw-r--r--drivers/clk/tegra/clk-tegra30.c116
-rw-r--r--drivers/clk/tegra/clk.c75
-rw-r--r--drivers/clk/tegra/clk.h2
-rw-r--r--drivers/clk/ti/adpll.c2
-rw-r--r--drivers/clk/visconti/Kconfig9
-rw-r--r--drivers/clk/visconti/Makefile5
-rw-r--r--drivers/clk/visconti/clkc-tmpv770x.c291
-rw-r--r--drivers/clk/visconti/clkc.c206
-rw-r--r--drivers/clk/visconti/clkc.h76
-rw-r--r--drivers/clk/visconti/pll-tmpv770x.c85
-rw-r--r--drivers/clk/visconti/pll.c339
-rw-r--r--drivers/clk/visconti/pll.h62
-rw-r--r--drivers/clk/visconti/reset.c107
-rw-r--r--drivers/clk/visconti/reset.h36
-rw-r--r--drivers/clk/x86/clk-fch.c48
-rw-r--r--drivers/clk/zynq/pll.c12
-rw-r--r--drivers/clocksource/Kconfig12
-rw-r--r--drivers/clocksource/Makefile1
-rw-r--r--drivers/clocksource/exynos_mct.c52
-rw-r--r--drivers/clocksource/renesas-ostm.c39
-rw-r--r--drivers/clocksource/timer-imx-sysctr.c6
-rw-r--r--drivers/clocksource/timer-msc313e.c253
-rw-r--r--drivers/clocksource/timer-pistachio.c3
-rw-r--r--drivers/clocksource/timer-ti-dm-systimer.c2
-rw-r--r--drivers/comedi/comedi_buf.c3
-rw-r--r--drivers/comedi/comedi_fops.c2
-rw-r--r--drivers/comedi/comedi_pci.c3
-rw-r--r--drivers/comedi/comedi_pcmcia.c3
-rw-r--r--drivers/comedi/comedi_usb.c3
-rw-r--r--drivers/comedi/drivers.c3
-rw-r--r--drivers/comedi/drivers/8255.c5
-rw-r--r--drivers/comedi/drivers/8255_pci.c6
-rw-r--r--drivers/comedi/drivers/addi_apci_1032.c2
-rw-r--r--drivers/comedi/drivers/addi_apci_1500.c2
-rw-r--r--drivers/comedi/drivers/addi_apci_1516.c2
-rw-r--r--drivers/comedi/drivers/addi_apci_1564.c2
-rw-r--r--drivers/comedi/drivers/addi_apci_16xx.c3
-rw-r--r--drivers/comedi/drivers/addi_apci_2032.c2
-rw-r--r--drivers/comedi/drivers/addi_apci_2200.c2
-rw-r--r--drivers/comedi/drivers/addi_apci_3120.c2
-rw-r--r--drivers/comedi/drivers/addi_apci_3501.c2
-rw-r--r--drivers/comedi/drivers/addi_apci_3xxx.c3
-rw-r--r--drivers/comedi/drivers/addi_watchdog.c2
-rw-r--r--drivers/comedi/drivers/adl_pci6208.c3
-rw-r--r--drivers/comedi/drivers/adl_pci7x3x.c3
-rw-r--r--drivers/comedi/drivers/adl_pci8164.c3
-rw-r--r--drivers/comedi/drivers/adl_pci9111.c5
-rw-r--r--drivers/comedi/drivers/adl_pci9118.c5
-rw-r--r--drivers/comedi/drivers/adq12b.c3
-rw-r--r--drivers/comedi/drivers/adv_pci1710.c5
-rw-r--r--drivers/comedi/drivers/adv_pci1720.c3
-rw-r--r--drivers/comedi/drivers/adv_pci1723.c3
-rw-r--r--drivers/comedi/drivers/adv_pci1724.c3
-rw-r--r--drivers/comedi/drivers/adv_pci1760.c3
-rw-r--r--drivers/comedi/drivers/adv_pci_dio.c8
-rw-r--r--drivers/comedi/drivers/aio_aio12_8.c7
-rw-r--r--drivers/comedi/drivers/aio_iiro_16.c3
-rw-r--r--drivers/comedi/drivers/amplc_dio200.c2
-rw-r--r--drivers/comedi/drivers/amplc_dio200_common.c7
-rw-r--r--drivers/comedi/drivers/amplc_dio200_pci.c3
-rw-r--r--drivers/comedi/drivers/amplc_pc236.c3
-rw-r--r--drivers/comedi/drivers/amplc_pc236_common.c5
-rw-r--r--drivers/comedi/drivers/amplc_pc263.c2
-rw-r--r--drivers/comedi/drivers/amplc_pci224.c6
-rw-r--r--drivers/comedi/drivers/amplc_pci230.c8
-rw-r--r--drivers/comedi/drivers/amplc_pci236.c3
-rw-r--r--drivers/comedi/drivers/amplc_pci263.c3
-rw-r--r--drivers/comedi/drivers/c6xdigio.c3
-rw-r--r--drivers/comedi/drivers/cb_das16_cs.c6
-rw-r--r--drivers/comedi/drivers/cb_pcidas.c7
-rw-r--r--drivers/comedi/drivers/cb_pcidas64.c5
-rw-r--r--drivers/comedi/drivers/cb_pcidda.c6
-rw-r--r--drivers/comedi/drivers/cb_pcimdas.c7
-rw-r--r--drivers/comedi/drivers/cb_pcimdda.c6
-rw-r--r--drivers/comedi/drivers/comedi_8254.c6
-rw-r--r--drivers/comedi/drivers/comedi_8255.c5
-rw-r--r--drivers/comedi/drivers/comedi_bond.c6
-rw-r--r--drivers/comedi/drivers/comedi_isadma.c6
-rw-r--r--drivers/comedi/drivers/comedi_parport.c3
-rw-r--r--drivers/comedi/drivers/comedi_test.c4
-rw-r--r--drivers/comedi/drivers/contec_pci_dio.c3
-rw-r--r--drivers/comedi/drivers/dac02.c3
-rw-r--r--drivers/comedi/drivers/daqboard2000.c5
-rw-r--r--drivers/comedi/drivers/das08.c7
-rw-r--r--drivers/comedi/drivers/das08_cs.c3
-rw-r--r--drivers/comedi/drivers/das08_isa.c2
-rw-r--r--drivers/comedi/drivers/das08_pci.c3
-rw-r--r--drivers/comedi/drivers/das16.c10
-rw-r--r--drivers/comedi/drivers/das16m1.c7
-rw-r--r--drivers/comedi/drivers/das1800.c8
-rw-r--r--drivers/comedi/drivers/das6402.c6
-rw-r--r--drivers/comedi/drivers/das800.c6
-rw-r--r--drivers/comedi/drivers/dmm32at.c5
-rw-r--r--drivers/comedi/drivers/dt2801.c2
-rw-r--r--drivers/comedi/drivers/dt2811.c3
-rw-r--r--drivers/comedi/drivers/dt2814.c3
-rw-r--r--drivers/comedi/drivers/dt2815.c3
-rw-r--r--drivers/comedi/drivers/dt2817.c2
-rw-r--r--drivers/comedi/drivers/dt282x.c6
-rw-r--r--drivers/comedi/drivers/dt3000.c3
-rw-r--r--drivers/comedi/drivers/dt9812.c3
-rw-r--r--drivers/comedi/drivers/dyna_pci10xx.c3
-rw-r--r--drivers/comedi/drivers/fl512.c3
-rw-r--r--drivers/comedi/drivers/gsc_hpdi.c3
-rw-r--r--drivers/comedi/drivers/icp_multi.c3
-rw-r--r--drivers/comedi/drivers/ii_pci20kc.c2
-rw-r--r--drivers/comedi/drivers/jr3_pci.c3
-rw-r--r--drivers/comedi/drivers/ke_counter.c3
-rw-r--r--drivers/comedi/drivers/me4000.c5
-rw-r--r--drivers/comedi/drivers/me_daq.c3
-rw-r--r--drivers/comedi/drivers/mf6x4.c3
-rw-r--r--drivers/comedi/drivers/mite.c3
-rw-r--r--drivers/comedi/drivers/mpc624.c3
-rw-r--r--drivers/comedi/drivers/multiq3.c3
-rw-r--r--drivers/comedi/drivers/ni_6527.c3
-rw-r--r--drivers/comedi/drivers/ni_65xx.c3
-rw-r--r--drivers/comedi/drivers/ni_660x.c3
-rw-r--r--drivers/comedi/drivers/ni_670x.c3
-rw-r--r--drivers/comedi/drivers/ni_at_a2150.c8
-rw-r--r--drivers/comedi/drivers/ni_at_ao.c6
-rw-r--r--drivers/comedi/drivers/ni_atmio.c5
-rw-r--r--drivers/comedi/drivers/ni_atmio16d.c5
-rw-r--r--drivers/comedi/drivers/ni_daq_700.c3
-rw-r--r--drivers/comedi/drivers/ni_daq_dio24.c5
-rw-r--r--drivers/comedi/drivers/ni_labpc.c3
-rw-r--r--drivers/comedi/drivers/ni_labpc_common.c7
-rw-r--r--drivers/comedi/drivers/ni_labpc_cs.c3
-rw-r--r--drivers/comedi/drivers/ni_labpc_isadma.c5
-rw-r--r--drivers/comedi/drivers/ni_labpc_pci.c3
-rw-r--r--drivers/comedi/drivers/ni_mio_common.c2
-rw-r--r--drivers/comedi/drivers/ni_mio_cs.c4
-rw-r--r--drivers/comedi/drivers/ni_pcidio.c3
-rw-r--r--drivers/comedi/drivers/ni_pcimio.c4
-rw-r--r--drivers/comedi/drivers/ni_routes.c3
-rw-r--r--drivers/comedi/drivers/ni_routes.h2
-rw-r--r--drivers/comedi/drivers/ni_routing/ni_route_values.h2
-rw-r--r--drivers/comedi/drivers/ni_routing/tools/.gitignore1
-rw-r--r--drivers/comedi/drivers/ni_routing/tools/Makefile29
-rw-r--r--drivers/comedi/drivers/ni_tio.h2
-rw-r--r--drivers/comedi/drivers/ni_usb6501.c3
-rw-r--r--drivers/comedi/drivers/pcl711.c6
-rw-r--r--drivers/comedi/drivers/pcl724.c5
-rw-r--r--drivers/comedi/drivers/pcl726.c3
-rw-r--r--drivers/comedi/drivers/pcl730.c2
-rw-r--r--drivers/comedi/drivers/pcl812.c8
-rw-r--r--drivers/comedi/drivers/pcl816.c8
-rw-r--r--drivers/comedi/drivers/pcl818.c8
-rw-r--r--drivers/comedi/drivers/pcm3724.c5
-rw-r--r--drivers/comedi/drivers/pcmad.c2
-rw-r--r--drivers/comedi/drivers/pcmda12.c2
-rw-r--r--drivers/comedi/drivers/pcmmio.c3
-rw-r--r--drivers/comedi/drivers/pcmuio.c3
-rw-r--r--drivers/comedi/drivers/quatech_daqp_cs.c3
-rw-r--r--drivers/comedi/drivers/rtd520.c5
-rw-r--r--drivers/comedi/drivers/rti800.c2
-rw-r--r--drivers/comedi/drivers/rti802.c2
-rw-r--r--drivers/comedi/drivers/s526.c2
-rw-r--r--drivers/comedi/drivers/s626.c3
-rw-r--r--drivers/comedi/drivers/ssv_dnp.c2
-rw-r--r--drivers/comedi/drivers/usbdux.c3
-rw-r--r--drivers/comedi/drivers/usbduxfast.c2
-rw-r--r--drivers/comedi/drivers/usbduxsigma.c3
-rw-r--r--drivers/comedi/drivers/vmk80xx.c3
-rw-r--r--drivers/comedi/kcomedilib/kcomedilib_main.c6
-rw-r--r--drivers/comedi/proc.c2
-rw-r--r--drivers/comedi/range.c2
-rw-r--r--drivers/connector/cn_proc.c2
-rw-r--r--drivers/counter/104-quad-8.c175
-rw-r--r--drivers/counter/counter-core.c183
-rw-r--r--drivers/counter/ftm-quaddec.c36
-rw-r--r--drivers/counter/intel-qep.c46
-rw-r--r--drivers/counter/interrupt-cnt.c38
-rw-r--r--drivers/counter/microchip-tcb-capture.c44
-rw-r--r--drivers/counter/stm32-lptimer-cnt.c51
-rw-r--r--drivers/counter/stm32-timer-cnt.c48
-rw-r--r--drivers/counter/ti-eqep.c52
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c3
-rw-r--r--drivers/crypto/virtio/virtio_crypto_core.c8
-rw-r--r--drivers/cxl/Kconfig1
-rw-r--r--drivers/cxl/acpi.c237
-rw-r--r--drivers/cxl/core/Makefile2
-rw-r--r--drivers/cxl/core/bus.c26
-rw-r--r--drivers/cxl/core/mbox.c186
-rw-r--r--drivers/cxl/core/memdev.c55
-rw-r--r--drivers/cxl/core/pmem.c20
-rw-r--r--drivers/cxl/core/regs.c8
-rw-r--r--drivers/cxl/cxl.h10
-rw-r--r--drivers/cxl/cxlmem.h37
-rw-r--r--drivers/cxl/pci.c120
-rw-r--r--drivers/cxl/pmem.c85
-rw-r--r--drivers/dax/Kconfig13
-rw-r--r--drivers/dax/Makefile3
-rw-r--r--drivers/dax/bus.c62
-rw-r--r--drivers/dax/bus.h14
-rw-r--r--drivers/dax/device.c132
-rw-r--r--drivers/dax/pmem.c (renamed from drivers/dax/pmem/core.c)36
-rw-r--r--drivers/dax/pmem/Makefile1
-rw-r--r--drivers/dax/pmem/compat.c72
-rw-r--r--drivers/dax/pmem/pmem.c30
-rw-r--r--drivers/dax/super.c272
-rw-r--r--drivers/dma-buf/dma-buf-sysfs-stats.c2
-rw-r--r--drivers/dma-buf/dma-heap.c2
-rw-r--r--drivers/dma-buf/heaps/cma_heap.c6
-rw-r--r--drivers/dma/at_xdmac.c196
-rw-r--r--drivers/dma/dma-jz4780.c118
-rw-r--r--drivers/dma/dmaengine.c7
-rw-r--r--drivers/dma/idxd/device.c222
-rw-r--r--drivers/dma/idxd/dma.c40
-rw-r--r--drivers/dma/idxd/idxd.h67
-rw-r--r--drivers/dma/idxd/init.c196
-rw-r--r--drivers/dma/idxd/irq.c239
-rw-r--r--drivers/dma/idxd/registers.h15
-rw-r--r--drivers/dma/idxd/submit.c69
-rw-r--r--drivers/dma/idxd/sysfs.c215
-rw-r--r--drivers/dma/ioat/sysfs.c3
-rw-r--r--drivers/dma/mmp_pdma.c6
-rw-r--r--drivers/dma/mv_xor_v2.c16
-rw-r--r--drivers/dma/pch_dma.c2
-rw-r--r--drivers/dma/ppc4xx/adma.c3
-rw-r--r--drivers/dma/ptdma/ptdma-dev.c17
-rw-r--r--drivers/dma/pxa_dma.c7
-rw-r--r--drivers/dma/qcom/gpi.c4
-rw-r--r--drivers/dma/qcom/hidma.c44
-rw-r--r--drivers/dma/qcom/qcom_adm.c56
-rw-r--r--drivers/dma/sh/rcar-dmac.c26
-rw-r--r--drivers/dma/sh/shdma-base.c18
-rw-r--r--drivers/dma/sprd-dma.c3
-rw-r--r--drivers/dma/stm32-dmamux.c4
-rw-r--r--drivers/dma/stm32-mdma.c78
-rw-r--r--drivers/dma/tegra20-apb-dma.c6
-rw-r--r--drivers/dma/ti/Makefile3
-rw-r--r--drivers/dma/ti/edma.c3
-rw-r--r--drivers/dma/ti/k3-psil-j721s2.c167
-rw-r--r--drivers/dma/ti/k3-psil-priv.h1
-rw-r--r--drivers/dma/ti/k3-psil.c1
-rw-r--r--drivers/dma/ti/k3-udma-private.c6
-rw-r--r--drivers/dma/ti/k3-udma.c15
-rw-r--r--drivers/dma/uniphier-xdmac.c5
-rw-r--r--drivers/dma/xilinx/xilinx_dma.c133
-rw-r--r--drivers/dma/xilinx/xilinx_dpdma.c17
-rw-r--r--drivers/edac/altera_edac.c2
-rw-r--r--drivers/edac/edac_mc.c2
-rw-r--r--drivers/edac/xgene_edac.c2
-rw-r--r--drivers/extcon/extcon-usb-gpio.c2
-rw-r--r--drivers/extcon/extcon.c14
-rw-r--r--drivers/firmware/arm_scmi/virtio.c2
-rw-r--r--drivers/firmware/cirrus/cs_dsp.c156
-rw-r--r--drivers/firmware/dmi-sysfs.c7
-rw-r--r--drivers/firmware/edd.c9
-rw-r--r--drivers/firmware/efi/efi-init.c5
-rw-r--r--drivers/firmware/efi/efi.c7
-rw-r--r--drivers/firmware/efi/libstub/arm64-stub.c6
-rw-r--r--drivers/firmware/efi/libstub/efi-stub.c2
-rw-r--r--drivers/firmware/google/Kconfig6
-rw-r--r--drivers/firmware/memmap.c3
-rw-r--r--drivers/firmware/qemu_fw_cfg.c26
-rw-r--r--drivers/firmware/sysfb_simplefb.c8
-rw-r--r--drivers/firmware/xilinx/zynqmp.c40
-rw-r--r--drivers/fpga/altera-cvp.c12
-rw-r--r--drivers/fpga/altera-fpga2sdram.c12
-rw-r--r--drivers/fpga/altera-freeze-bridge.c10
-rw-r--r--drivers/fpga/altera-hps2fpga.c12
-rw-r--r--drivers/fpga/altera-pr-ip-core.c7
-rw-r--r--drivers/fpga/altera-ps-spi.c9
-rw-r--r--drivers/fpga/dfl-fme-br.c10
-rw-r--r--drivers/fpga/dfl-fme-mgr.c22
-rw-r--r--drivers/fpga/dfl-fme-region.c17
-rw-r--r--drivers/fpga/dfl.c12
-rw-r--r--drivers/fpga/fpga-bridge.c122
-rw-r--r--drivers/fpga/fpga-mgr.c215
-rw-r--r--drivers/fpga/fpga-region.c119
-rw-r--r--drivers/fpga/ice40-spi.c9
-rw-r--r--drivers/fpga/machxo2-spi.c9
-rw-r--r--drivers/fpga/of-fpga-region.c12
-rw-r--r--drivers/fpga/socfpga-a10.c16
-rw-r--r--drivers/fpga/socfpga.c9
-rw-r--r--drivers/fpga/stratix10-soc.c18
-rw-r--r--drivers/fpga/ts73xx-fpga.c9
-rw-r--r--drivers/fpga/versal-fpga.c9
-rw-r--r--drivers/fpga/xilinx-pr-decoupler.c17
-rw-r--r--drivers/fpga/xilinx-spi.c11
-rw-r--r--drivers/fpga/zynq-fpga.c16
-rw-r--r--drivers/fpga/zynqmp-fpga.c9
-rw-r--r--drivers/gnss/Kconfig11
-rw-r--r--drivers/gnss/Makefile3
-rw-r--r--drivers/gnss/mtk.c2
-rw-r--r--drivers/gnss/serial.c2
-rw-r--r--drivers/gnss/sirf.c2
-rw-r--r--drivers/gnss/ubx.c2
-rw-r--r--drivers/gnss/usb.c214
-rw-r--r--drivers/gpio/gpio-aggregator.c18
-rw-r--r--drivers/gpio/gpio-idt3243x.c6
-rw-r--r--drivers/gpio/gpio-mockup.c23
-rw-r--r--drivers/gpio/gpio-mpc8xxx.c6
-rw-r--r--drivers/gpio/gpio-sifive.c2
-rw-r--r--drivers/gpio/gpio-sim.c19
-rw-r--r--drivers/gpio/gpio-virtio.c2
-rw-r--r--drivers/gpio/gpiolib-cdev.c6
-rw-r--r--drivers/gpio/gpiolib-sysfs.c7
-rw-r--r--drivers/gpio/gpiolib.h12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c37
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c81
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c103
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c84
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c40
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_crat.c3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c9
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c23
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.h4
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c46
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c42
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c145
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c26
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c87
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c37
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c114
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c61
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_cp_psp.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/resource.h13
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c25
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c25
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/irq_service.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/irq_service.h4
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h2
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_pm.c3
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c32
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.h8
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c16
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c9
-rw-r--r--drivers/gpu/drm/ast/ast_tables.h2
-rw-r--r--drivers/gpu/drm/bridge/nwl-dsi.c12
-rw-r--r--drivers/gpu/drm/drm_atomic.c12
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c14
-rw-r--r--drivers/gpu/drm/drm_atomic_uapi.c14
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c1
-rw-r--r--drivers/gpu/drm/drm_gem_cma_helper.c1
-rw-r--r--drivers/gpu/drm/drm_mipi_dbi.c2
-rw-r--r--drivers/gpu/drm/drm_mm.c4
-rw-r--r--drivers/gpu/drm/drm_modeset_lock.c9
-rw-r--r--drivers/gpu/drm/drm_panel_orientation_quirks.c12
-rw-r--r--drivers/gpu/drm/drm_privacy_screen.c2
-rw-r--r--drivers/gpu/drm/drm_privacy_screen_x86.c3
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c4
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c4
-rw-r--r--drivers/gpu/drm/i915/Kconfig1
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c22
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c10
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_drrs.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_opregion.c15
-rw-r--r--drivers/gpu/drm/i915/display/intel_overlay.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c9
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_types.h2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pages.c11
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm.c33
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c14
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c18
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.c108
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_types.h2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.h5
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c114
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c2
-rw-r--r--drivers/gpu/drm/i915/i915_mm.h1
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c22
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h22
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c3
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c147
-rw-r--r--drivers/gpu/drm/i915/intel_pm.h1
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c1
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c26
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.h2
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_tee.c5
-rw-r--r--drivers/gpu/drm/kmb/kmb_plane.c6
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c167
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c18
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c11
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.c7
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.c4
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c7
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c5
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c3
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h3
-rw-r--r--drivers/gpu/drm/msm/msm_gpu_devfreq.c21
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_kms.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c2
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c1
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c22
-rw-r--r--drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c14
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop_reg.c8
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c4
-rw-r--r--drivers/gpu/drm/ttm/ttm_module.c4
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c1
-rw-r--r--drivers/gpu/drm/vc4/vc4_dsi.c14
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c29
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.h3
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_kms.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h5
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c33
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c2
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_disp.c9
-rw-r--r--drivers/greybus/es2.c2
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_pcie.c76
-rw-r--r--drivers/hid/amd-sfh-hid/amd_sfh_pcie.h4
-rw-r--r--drivers/hid/amd-sfh-hid/hid_descriptor/amd_sfh_hid_desc.c4
-rw-r--r--drivers/hid/hid-apple.c16
-rw-r--r--drivers/hid/hid-elo.c1
-rw-r--r--drivers/hid/hid-ids.h2
-rw-r--r--drivers/hid/hid-input.c2
-rw-r--r--drivers/hid/hid-quirks.c1
-rw-r--r--drivers/hid/hid-vivaldi.c41
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-of-goodix.c28
-rw-r--r--drivers/hid/uhid.c49
-rw-r--r--drivers/hid/wacom_wac.c39
-rw-r--r--drivers/hv/channel_mgmt.c2
-rw-r--r--drivers/hv/hv_balloon.c7
-rw-r--r--drivers/hv/hv_common.c15
-rw-r--r--drivers/hv/hv_utils_transport.c2
-rw-r--r--drivers/hv/vmbus_drv.c9
-rw-r--r--drivers/hwmon/adt7470.c3
-rw-r--r--drivers/hwmon/dell-smm-hwmon.c4
-rw-r--r--drivers/hwmon/lm90.c21
-rw-r--r--drivers/hwmon/ltc2992.c3
-rw-r--r--drivers/hwmon/nct6775.c6
-rw-r--r--drivers/hwmon/pmbus/ir38064.c2
-rw-r--r--drivers/hwspinlock/stm32_hwspinlock.c58
-rw-r--r--drivers/hwtracing/coresight/coresight-cfg-preload.c9
-rw-r--r--drivers/hwtracing/coresight/coresight-config.h9
-rw-r--r--drivers/hwtracing/coresight/coresight-core.c2
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x-core.c11
-rw-r--r--drivers/hwtracing/coresight/coresight-stm.c10
-rw-r--r--drivers/hwtracing/coresight/coresight-syscfg-configfs.c87
-rw-r--r--drivers/hwtracing/coresight/coresight-syscfg-configfs.h4
-rw-r--r--drivers/hwtracing/coresight/coresight-syscfg.c315
-rw-r--r--drivers/hwtracing/coresight/coresight-syscfg.h39
-rw-r--r--drivers/i2c/busses/Kconfig24
-rw-r--r--drivers/i2c/busses/Makefile1
-rw-r--r--drivers/i2c/busses/i2c-aspeed.c2
-rw-r--r--drivers/i2c/busses/i2c-bcm2835.c22
-rw-r--r--drivers/i2c/busses/i2c-brcmstb.c2
-rw-r--r--drivers/i2c/busses/i2c-designware-core.h13
-rw-r--r--drivers/i2c/busses/i2c-designware-master.c7
-rw-r--r--drivers/i2c/busses/i2c-designware-pcidrv.c51
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c2
-rw-r--r--drivers/i2c/busses/i2c-exynos5.c110
-rw-r--r--drivers/i2c/busses/i2c-i801.c288
-rw-r--r--drivers/i2c/busses/i2c-imx.c92
-rw-r--r--drivers/i2c/busses/i2c-mpc.c23
-rw-r--r--drivers/i2c/busses/i2c-qcom-cci.c16
-rw-r--r--drivers/i2c/busses/i2c-rcar.c26
-rw-r--r--drivers/i2c/busses/i2c-riic.c10
-rw-r--r--drivers/i2c/busses/i2c-rk3x.c7
-rw-r--r--drivers/i2c/busses/i2c-sh_mobile.c60
-rw-r--r--drivers/i2c/busses/i2c-stm32f7.c14
-rw-r--r--drivers/i2c/busses/i2c-tegra.c69
-rw-r--r--drivers/i2c/busses/i2c-virtio.c2
-rw-r--r--drivers/i2c/busses/i2c-xlp9xx.c7
-rw-r--r--drivers/i2c/busses/i2c-xlr.c470
-rw-r--r--drivers/i2c/i2c-core-base.c2
-rw-r--r--drivers/i2c/muxes/i2c-mux-gpio.c53
-rw-r--r--drivers/i3c/master.c3
-rw-r--r--drivers/i3c/master/dw-i3c-master.c4
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/core.c2
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/dat_v1.c4
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/dma.c2
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/hci.h2
-rw-r--r--drivers/i3c/master/svc-i3c-master.c341
-rw-r--r--drivers/iio/Kconfig2
-rw-r--r--drivers/iio/Makefile2
-rw-r--r--drivers/iio/accel/bma180.c4
-rw-r--r--drivers/iio/accel/bma220_spi.c6
-rw-r--r--drivers/iio/accel/bmc150-accel-core.c2
-rw-r--r--drivers/iio/accel/kxcjk-1013.c5
-rw-r--r--drivers/iio/accel/mma7455_core.c3
-rw-r--r--drivers/iio/accel/mma7660.c8
-rw-r--r--drivers/iio/accel/mma8452.c2
-rw-r--r--drivers/iio/accel/mma9553.c2
-rw-r--r--drivers/iio/accel/sca3000.c17
-rw-r--r--drivers/iio/accel/stk8312.c2
-rw-r--r--drivers/iio/accel/stk8ba50.c3
-rw-r--r--drivers/iio/adc/Kconfig21
-rw-r--r--drivers/iio/adc/Makefile1
-rw-r--r--drivers/iio/adc/ad7124.c2
-rw-r--r--drivers/iio/adc/ad7192.c3
-rw-r--r--drivers/iio/adc/ad7266.c3
-rw-r--r--drivers/iio/adc/ad7606.h2
-rw-r--r--drivers/iio/adc/ad_sigma_delta.c4
-rw-r--r--drivers/iio/adc/at91-sama5d2_adc.c38
-rw-r--r--drivers/iio/adc/axp20x_adc.c45
-rw-r--r--drivers/iio/adc/envelope-detector.c3
-rw-r--r--drivers/iio/adc/hi8435.c2
-rw-r--r--drivers/iio/adc/imx7d_adc.c5
-rw-r--r--drivers/iio/adc/ina2xx-adc.c15
-rw-r--r--drivers/iio/adc/lpc18xx_adc.c6
-rw-r--r--drivers/iio/adc/max9611.c20
-rw-r--r--drivers/iio/adc/mcp3911.c9
-rw-r--r--drivers/iio/adc/rcar-gyroadc.c3
-rw-r--r--drivers/iio/adc/rzg2l_adc.c4
-rw-r--r--drivers/iio/adc/stm32-adc.c3
-rw-r--r--drivers/iio/adc/stmpe-adc.c5
-rw-r--r--drivers/iio/adc/ti-adc081c.c22
-rw-r--r--drivers/iio/adc/ti-adc12138.c14
-rw-r--r--drivers/iio/adc/ti-ads1015.c10
-rw-r--r--drivers/iio/adc/ti-ads124s08.c3
-rw-r--r--drivers/iio/adc/ti-ads8688.c4
-rw-r--r--drivers/iio/adc/xilinx-ams.c1451
-rw-r--r--drivers/iio/adc/xilinx-xadc-core.c64
-rw-r--r--drivers/iio/addac/Kconfig20
-rw-r--r--drivers/iio/addac/Makefile7
-rw-r--r--drivers/iio/addac/ad74413r.c1475
-rw-r--r--drivers/iio/amplifiers/hmc425a.c2
-rw-r--r--drivers/iio/buffer/industrialio-buffer-dmaengine.c2
-rw-r--r--drivers/iio/chemical/atlas-sensor.c4
-rw-r--r--drivers/iio/chemical/sunrise_co2.c4
-rw-r--r--drivers/iio/chemical/vz89x.c2
-rw-r--r--drivers/iio/common/scmi_sensors/scmi_iio.c57
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_core.c4
-rw-r--r--drivers/iio/dac/Kconfig22
-rw-r--r--drivers/iio/dac/Makefile2
-rw-r--r--drivers/iio/dac/ad3552r.c1138
-rw-r--r--drivers/iio/dac/ad5064.c4
-rw-r--r--drivers/iio/dac/ad5380.c2
-rw-r--r--drivers/iio/dac/ad5446.c2
-rw-r--r--drivers/iio/dac/ad5504.c2
-rw-r--r--drivers/iio/dac/ad5624r_spi.c2
-rw-r--r--drivers/iio/dac/ad5686.c2
-rw-r--r--drivers/iio/dac/ad5755.c152
-rw-r--r--drivers/iio/dac/ad5758.c3
-rw-r--r--drivers/iio/dac/ad5766.c13
-rw-r--r--drivers/iio/dac/ad5791.c2
-rw-r--r--drivers/iio/dac/ad7293.c934
-rw-r--r--drivers/iio/dac/dpot-dac.c2
-rw-r--r--drivers/iio/dac/lpc18xx_dac.c3
-rw-r--r--drivers/iio/dac/max5821.c2
-rw-r--r--drivers/iio/dac/mcp4725.c10
-rw-r--r--drivers/iio/dac/stm32-dac.c2
-rw-r--r--drivers/iio/dac/ti-dac082s085.c2
-rw-r--r--drivers/iio/dac/ti-dac5571.c2
-rw-r--r--drivers/iio/dac/ti-dac7311.c2
-rw-r--r--drivers/iio/dummy/iio_simple_dummy_buffer.c2
-rw-r--r--drivers/iio/filter/Kconfig18
-rw-r--r--drivers/iio/filter/Makefile7
-rw-r--r--drivers/iio/filter/admv8818.c665
-rw-r--r--drivers/iio/frequency/Kconfig10
-rw-r--r--drivers/iio/frequency/Makefile1
-rw-r--r--drivers/iio/frequency/admv1013.c656
-rw-r--r--drivers/iio/health/afe4403.c5
-rw-r--r--drivers/iio/health/afe4404.c5
-rw-r--r--drivers/iio/iio_core.h2
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_i2c.c2
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_spi.c2
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c2
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c2
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c4
-rw-r--r--drivers/iio/industrialio-buffer.c34
-rw-r--r--drivers/iio/industrialio-core.c6
-rw-r--r--drivers/iio/industrialio-trigger.c36
-rw-r--r--drivers/iio/light/cm3605.c6
-rw-r--r--drivers/iio/light/gp2ap020a00f.c5
-rw-r--r--drivers/iio/light/ltr501.c42
-rw-r--r--drivers/iio/magnetometer/ak8975.c2
-rw-r--r--drivers/iio/magnetometer/hmc5843_core.c4
-rw-r--r--drivers/iio/magnetometer/mag3110.c6
-rw-r--r--drivers/iio/potentiometer/mcp41010.c6
-rw-r--r--drivers/iio/potentiostat/lmp91000.c4
-rw-r--r--drivers/iio/pressure/bmp280-core.c11
-rw-r--r--drivers/iio/pressure/bmp280-i2c.c2
-rw-r--r--drivers/iio/pressure/bmp280-spi.c2
-rw-r--r--drivers/iio/pressure/mpl3115.c16
-rw-r--r--drivers/iio/pressure/ms5611.h6
-rw-r--r--drivers/iio/pressure/ms5611_core.c7
-rw-r--r--drivers/iio/pressure/ms5611_i2c.c11
-rw-r--r--drivers/iio/pressure/ms5611_spi.c17
-rw-r--r--drivers/iio/proximity/as3935.c6
-rw-r--r--drivers/iio/test/iio-test-format.c123
-rw-r--r--drivers/iio/trigger/iio-trig-interrupt.c4
-rw-r--r--drivers/iio/trigger/iio-trig-sysfs.c4
-rw-r--r--drivers/iio/trigger/stm32-timer-trigger.c4
-rw-r--r--drivers/infiniband/core/cache.c12
-rw-r--r--drivers/infiniband/core/cm.c2
-rw-r--r--drivers/infiniband/core/cma.c40
-rw-r--r--drivers/infiniband/core/device.c3
-rw-r--r--drivers/infiniband/core/sysfs.c3
-rw-r--r--drivers/infiniband/core/ucma.c34
-rw-r--r--drivers/infiniband/core/umem_odp.c3
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c1
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c9
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c3
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c11
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.c12
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.h1
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.c50
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.h7
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.c99
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.h9
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c5
-rw-r--r--drivers/infiniband/hw/cxgb4/id_table.c17
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c8
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c1
-rw-r--r--drivers/infiniband/hw/hfi1/ipoib.h2
-rw-r--r--drivers/infiniband/hw/hfi1/ipoib_main.c27
-rw-r--r--drivers/infiniband/hw/hfi1/ipoib_tx.c38
-rw-r--r--drivers/infiniband/hw/hfi1/user_sdma.c8
-rw-r--r--drivers/infiniband/hw/hns/Kconfig17
-rw-r--r--drivers/infiniband/hw/hns/Makefile5
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_ah.c5
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_alloc.c3
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cmd.c11
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_common.h202
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cq.c13
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_db.c1
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_device.h108
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hem.c1
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.c4675
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.h1147
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c49
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.h22
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_main.c85
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_mr.c32
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_pd.c17
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_qp.c93
-rw-r--r--drivers/infiniband/hw/irdma/hw.c20
-rw-r--r--drivers/infiniband/hw/irdma/i40iw_if.c2
-rw-r--r--drivers/infiniband/hw/irdma/main.c4
-rw-r--r--drivers/infiniband/hw/irdma/pble.h2
-rw-r--r--drivers/infiniband/hw/irdma/verbs.c31
-rw-r--r--drivers/infiniband/hw/mlx4/main.c36
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c5
-rw-r--r--drivers/infiniband/hw/mlx5/mad.c23
-rw-r--r--drivers/infiniband/hw/mlx5/main.c8
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h12
-rw-r--r--drivers/infiniband/hw/mthca/mthca_allocator.c15
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mr.c25
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c20
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.c16
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_main.c17
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c18
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.h1
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c2
-rw-r--r--drivers/infiniband/hw/qib/qib.h2
-rw-r--r--drivers/infiniband/hw/qib/qib_file_ops.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_iba6120.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7220.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.c4
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_sysfs.c3
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_verbs.c8
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_doorbell.c10
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c2
-rw-r--r--drivers/infiniband/sw/rxe/Makefile1
-rw-r--r--drivers/infiniband/sw/rxe/rxe.c4
-rw-r--r--drivers/infiniband/sw/rxe/rxe.h2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_comp.c8
-rw-r--r--drivers/infiniband/sw/rxe/rxe_cq.c24
-rw-r--r--drivers/infiniband/sw/rxe/rxe_loc.h10
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mcast.c11
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mr.c6
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mw.c21
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.c9
-rw-r--r--drivers/infiniband/sw/rxe/rxe_opcode.c739
-rw-r--r--drivers/infiniband/sw/rxe/rxe_pool.c177
-rw-r--r--drivers/infiniband/sw/rxe/rxe_pool.h54
-rw-r--r--drivers/infiniband/sw/rxe/rxe_qp.c9
-rw-r--r--drivers/infiniband/sw/rxe/rxe_queue.c9
-rw-r--r--drivers/infiniband/sw/rxe/rxe_req.c16
-rw-r--r--drivers/infiniband/sw/rxe/rxe_srq.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_sysfs.c119
-rw-r--r--drivers/infiniband/sw/rxe/rxe_task.c18
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c34
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.h24
-rw-r--r--drivers/infiniband/sw/siw/siw.h7
-rw-r--r--drivers/infiniband/sw/siw/siw_main.c7
-rw-r--r--drivers/infiniband/sw/siw/siw_qp_rx.c20
-rw-r--r--drivers/infiniband/sw/siw/siw_verbs.c9
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c76
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h23
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c106
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c58
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c138
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c8
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c145
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt.c1089
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt.h41
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-pri.h18
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c121
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv.c684
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv.h16
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs.c98
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs.h34
-rw-r--r--drivers/input/ff-core.c2
-rw-r--r--drivers/input/input.c6
-rw-r--r--drivers/input/keyboard/gpio_keys.c2
-rw-r--r--drivers/input/misc/axp20x-pek.c72
-rw-r--r--drivers/input/misc/palmas-pwrbutton.c9
-rw-r--r--drivers/input/mouse/byd.c2
-rw-r--r--drivers/input/mouse/psmouse-smbus.c10
-rw-r--r--drivers/input/serio/serport.c5
-rw-r--r--drivers/input/touchscreen/goodix.c127
-rw-r--r--drivers/input/touchscreen/goodix.h1
-rw-r--r--drivers/input/touchscreen/silead.c172
-rw-r--r--drivers/input/touchscreen/ti_am335x_tsc.c20
-rw-r--r--drivers/input/touchscreen/ucb1400_ts.c4
-rw-r--r--drivers/input/touchscreen/wacom_i2c.c44
-rw-r--r--drivers/input/touchscreen/wm97xx-core.c12
-rw-r--r--drivers/input/touchscreen/zinitix.c34
-rw-r--r--drivers/interconnect/qcom/Kconfig27
-rw-r--r--drivers/interconnect/qcom/Makefile6
-rw-r--r--drivers/interconnect/qcom/icc-rpm.c64
-rw-r--r--drivers/interconnect/qcom/icc-rpm.h15
-rw-r--r--drivers/interconnect/qcom/icc-rpmh.c10
-rw-r--r--drivers/interconnect/qcom/msm8916.c4
-rw-r--r--drivers/interconnect/qcom/msm8939.c5
-rw-r--r--drivers/interconnect/qcom/msm8996.c2110
-rw-r--r--drivers/interconnect/qcom/msm8996.h149
-rw-r--r--drivers/interconnect/qcom/osm-l3.c20
-rw-r--r--drivers/interconnect/qcom/qcm2290.c1363
-rw-r--r--drivers/interconnect/qcom/sc7280.h2
-rw-r--r--drivers/interconnect/qcom/sdm660.c7
-rw-r--r--drivers/interconnect/qcom/sm8150.c1
-rw-r--r--drivers/interconnect/qcom/sm8250.c1
-rw-r--r--drivers/interconnect/qcom/sm8350.c1
-rw-r--r--drivers/interconnect/qcom/sm8450.c1987
-rw-r--r--drivers/interconnect/qcom/sm8450.h169
-rw-r--r--drivers/iommu/amd/amd_iommu_types.h2
-rw-r--r--drivers/iommu/amd/init.c111
-rw-r--r--drivers/iommu/amd/io_pgtable.c110
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c2
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c23
-rw-r--r--drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h5
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c3
-rw-r--r--drivers/iommu/dma-iommu.c274
-rw-r--r--drivers/iommu/intel/iommu.c111
-rw-r--r--drivers/iommu/intel/irq_remapping.c13
-rw-r--r--drivers/iommu/io-pgtable-arm-v7s.c6
-rw-r--r--drivers/iommu/io-pgtable-arm.c9
-rw-r--r--drivers/iommu/ioasid.c1
-rw-r--r--drivers/iommu/iommu.c36
-rw-r--r--drivers/iommu/iova.c209
-rw-r--r--drivers/iommu/omap-iommu.c2
-rw-r--r--drivers/iommu/virtio-iommu.c117
-rw-r--r--drivers/irqchip/irq-apple-aic.c1
-rw-r--r--drivers/irqchip/irq-gic-v2m.c3
-rw-r--r--drivers/irqchip/irq-gic-v3-its-pci-msi.c1
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c206
-rw-r--r--drivers/irqchip/irq-gic-v3-mbi.c1
-rw-r--r--drivers/irqchip/irq-gic-v3.c17
-rw-r--r--drivers/irqchip/irq-imx-gpcv2.c2
-rw-r--r--drivers/irqchip/irq-ingenic-tcu.c4
-rw-r--r--drivers/irqchip/irq-loongson-pch-msi.c2
-rw-r--r--drivers/irqchip/irq-mbigen.c4
-rw-r--r--drivers/irqchip/irq-mvebu-icu.c12
-rw-r--r--drivers/irqchip/irq-realtek-rtl.c18
-rw-r--r--drivers/irqchip/irq-renesas-intc-irqpin.c9
-rw-r--r--drivers/irqchip/irq-renesas-irqc.c9
-rw-r--r--drivers/irqchip/irq-sifive-plic.c1
-rw-r--r--drivers/irqchip/irq-ti-sci-inta.c2
-rw-r--r--drivers/irqchip/spear-shirq.c2
-rw-r--r--drivers/leds/Kconfig7
-rw-r--r--drivers/leds/Makefile1
-rw-r--r--drivers/leds/blink/leds-lgm-sso.c1
-rw-r--r--drivers/leds/flash/Kconfig13
-rw-r--r--drivers/leds/flash/Makefile1
-rw-r--r--drivers/leds/flash/leds-ktd2692.c2
-rw-r--r--drivers/leds/flash/leds-mt6360.c910
-rw-r--r--drivers/leds/led-class.c6
-rw-r--r--drivers/leds/leds-fsg.c193
-rw-r--r--drivers/leds/leds-lp50xx.c1
-rw-r--r--drivers/leds/leds-lp55xx-common.c4
-rw-r--r--drivers/leds/leds-tca6507.c7
-rw-r--r--drivers/macintosh/mac_hid.c24
-rw-r--r--drivers/macintosh/mediabay.c2
-rw-r--r--drivers/mailbox/apple-mailbox.c4
-rw-r--r--drivers/mailbox/bcm-flexrm-mailbox.c13
-rw-r--r--drivers/mailbox/hi3660-mailbox.c18
-rw-r--r--drivers/mailbox/imx-mailbox.c4
-rw-r--r--drivers/mailbox/mailbox-mpfs.c2
-rw-r--r--drivers/mailbox/mtk-cmdq-mailbox.c15
-rw-r--r--drivers/mailbox/pcc.c10
-rw-r--r--drivers/mailbox/qcom-ipcc.c99
-rw-r--r--drivers/mailbox/zynqmp-ipi-mailbox.c1
-rw-r--r--drivers/md/dm-integrity.c9
-rw-r--r--drivers/md/dm-linear.c63
-rw-r--r--drivers/md/dm-log-writes.c110
-rw-r--r--drivers/md/dm-mpath.c1
-rw-r--r--drivers/md/dm-stripe.c75
-rw-r--r--drivers/md/dm-sysfs.c3
-rw-r--r--drivers/md/dm-table.c22
-rw-r--r--drivers/md/dm-writecache.c2
-rw-r--r--drivers/md/dm.c112
-rw-r--r--drivers/md/dm.h4
-rw-r--r--drivers/md/md-cluster.c2
-rw-r--r--drivers/md/md.c58
-rw-r--r--drivers/md/md.h2
-rw-r--r--drivers/md/persistent-data/dm-btree-remove.c173
-rw-r--r--drivers/md/persistent-data/dm-btree-spine.c12
-rw-r--r--drivers/md/persistent-data/dm-btree.c8
-rw-r--r--drivers/md/persistent-data/dm-space-map-common.c5
-rw-r--r--drivers/md/raid0.c38
-rw-r--r--drivers/md/raid1-10.c6
-rw-r--r--drivers/md/raid1.c83
-rw-r--r--drivers/md/raid10.c107
-rw-r--r--drivers/md/raid5.c67
-rw-r--r--drivers/md/raid5.h4
-rw-r--r--drivers/media/cec/core/cec-core.c2
-rw-r--r--drivers/media/mc/mc-devnode.c2
-rw-r--r--drivers/media/tuners/si2157.c34
-rw-r--r--drivers/message/fusion/mptbase.c155
-rw-r--r--drivers/message/fusion/mptctl.c82
-rw-r--r--drivers/message/fusion/mptlan.c90
-rw-r--r--drivers/message/fusion/mptsas.c94
-rw-r--r--drivers/mfd/Kconfig10
-rw-r--r--drivers/mfd/Makefile1
-rw-r--r--drivers/mfd/intel_pmt.c261
-rw-r--r--drivers/misc/cxl/Kconfig1
-rw-r--r--drivers/misc/cxl/sysfs.c3
-rw-r--r--drivers/misc/eeprom/at24.c68
-rw-r--r--drivers/misc/eeprom/at25.c217
-rw-r--r--drivers/misc/eeprom/ee1004.c3
-rw-r--r--drivers/misc/fastrpc.c9
-rw-r--r--drivers/misc/habanalabs/common/command_buffer.c46
-rw-r--r--drivers/misc/habanalabs/common/command_submission.c389
-rw-r--r--drivers/misc/habanalabs/common/context.c39
-rw-r--r--drivers/misc/habanalabs/common/debugfs.c97
-rw-r--r--drivers/misc/habanalabs/common/device.c387
-rw-r--r--drivers/misc/habanalabs/common/firmware_if.c253
-rw-r--r--drivers/misc/habanalabs/common/habanalabs.h301
-rw-r--r--drivers/misc/habanalabs/common/habanalabs_drv.c150
-rw-r--r--drivers/misc/habanalabs/common/habanalabs_ioctl.c195
-rw-r--r--drivers/misc/habanalabs/common/hw_queue.c5
-rw-r--r--drivers/misc/habanalabs/common/hwmon.c209
-rw-r--r--drivers/misc/habanalabs/common/irq.c14
-rw-r--r--drivers/misc/habanalabs/common/memory.c78
-rw-r--r--drivers/misc/habanalabs/common/mmu/mmu.c25
-rw-r--r--drivers/misc/habanalabs/common/mmu/mmu_v1.c18
-rw-r--r--drivers/misc/habanalabs/common/sysfs.c56
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudi.c313
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudiP.h4
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudi_coresight.c4
-rw-r--r--drivers/misc/habanalabs/goya/goya.c165
-rw-r--r--drivers/misc/habanalabs/goya/goyaP.h14
-rw-r--r--drivers/misc/habanalabs/goya/goya_coresight.c4
-rw-r--r--drivers/misc/habanalabs/goya/goya_hwmgr.c31
-rw-r--r--drivers/misc/habanalabs/include/common/cpucp_if.h62
-rw-r--r--drivers/misc/habanalabs/include/common/hl_boot_if.h4
-rw-r--r--drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h19
-rw-r--r--drivers/misc/habanalabs/include/hw_ip/mmu/mmu_v1_0.h18
-rw-r--r--drivers/misc/habanalabs/include/hw_ip/mmu/mmu_v1_1.h20
-rw-r--r--drivers/misc/lattice-ecp3-config.c12
-rw-r--r--drivers/misc/lkdtm/Makefile4
-rw-r--r--drivers/misc/lkdtm/bugs.c16
-rw-r--r--drivers/misc/lkdtm/core.c8
-rw-r--r--drivers/misc/mei/client.c4
-rw-r--r--drivers/misc/mei/hbm.c20
-rw-r--r--drivers/misc/mei/hw-txe.c6
-rw-r--r--drivers/misc/mei/init.c1
-rw-r--r--drivers/misc/ocxl/file.c4
-rw-r--r--drivers/misc/pci_endpoint_test.c2
-rw-r--r--drivers/misc/sram.c1
-rw-r--r--drivers/misc/uacce/uacce.c12
-rw-r--r--drivers/misc/vmw_vmci/vmci_context.c6
-rw-r--r--drivers/misc/vmw_vmci/vmci_event.c3
-rw-r--r--drivers/mmc/core/block.c66
-rw-r--r--drivers/mmc/core/sd.c8
-rw-r--r--drivers/mmc/host/Kconfig1
-rw-r--r--drivers/mmc/host/bcm2835.c2
-rw-r--r--drivers/mmc/host/jz4740_mmc.c4
-rw-r--r--drivers/mmc/host/moxart-mmc.c2
-rw-r--r--drivers/mmc/host/mxcmmc.c2
-rw-r--r--drivers/mmc/host/renesas_sdhi.h4
-rw-r--r--drivers/mmc/host/renesas_sdhi_core.c47
-rw-r--r--drivers/mmc/host/renesas_sdhi_internal_dmac.c21
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c8
-rw-r--r--drivers/mmc/host/sh_mmcif.c3
-rw-r--r--drivers/most/most_usb.c4
-rw-r--r--drivers/mtd/devices/phram.c12
-rw-r--r--drivers/mtd/maps/Kconfig6
-rw-r--r--drivers/mtd/maps/Makefile1
-rw-r--r--drivers/mtd/maps/rbtx4939-flash.c133
-rw-r--r--drivers/mtd/mtd_blkdevs.c26
-rw-r--r--drivers/mtd/nand/raw/Kconfig5
-rw-r--r--drivers/mtd/nand/raw/brcmnand/brcmnand.c2
-rw-r--r--drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c3
-rw-r--r--drivers/mtd/nand/raw/ingenic/ingenic_ecc.c7
-rw-r--r--drivers/mtd/nand/raw/qcom_nandc.c28
-rw-r--r--drivers/mtd/parsers/qcomsmempart.c36
-rw-r--r--drivers/mtd/ubi/block.c7
-rw-r--r--drivers/net/bonding/bond_3ad.c33
-rw-r--r--drivers/net/bonding/bond_main.c43
-rw-r--r--drivers/net/bonding/bond_procfs.c8
-rw-r--r--drivers/net/caif/caif_virtio.c2
-rw-r--r--drivers/net/can/flexcan/flexcan-core.c1
-rw-r--r--drivers/net/can/flexcan/flexcan.h2
-rw-r--r--drivers/net/can/m_can/m_can.c6
-rw-r--r--drivers/net/can/m_can/tcan4x5x-regmap.c2
-rw-r--r--drivers/net/can/slcan.c4
-rw-r--r--drivers/net/dsa/Kconfig2
-rw-r--r--drivers/net/dsa/bcm_sf2.c7
-rw-r--r--drivers/net/dsa/lan9303-core.c13
-rw-r--r--drivers/net/dsa/lantiq_gswip.c14
-rw-r--r--drivers/net/dsa/mt7530.c2
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c22
-rw-r--r--drivers/net/dsa/ocelot/felix_vsc9959.c4
-rw-r--r--drivers/net/dsa/ocelot/seville_vsc9953.c5
-rw-r--r--drivers/net/dsa/qca/ar9331.c3
-rw-r--r--drivers/net/ethernet/3com/typhoon.c6
-rw-r--r--drivers/net/ethernet/8390/etherh.c6
-rw-r--r--drivers/net/ethernet/allwinner/sun4i-emac.c31
-rw-r--r--drivers/net/ethernet/allwinner/sun4i-emac.h18
-rw-r--r--drivers/net/ethernet/amd/declance.c4
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c14
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-pci.c3
-rw-r--r--drivers/net/ethernet/apple/bmac.c5
-rw-r--r--drivers/net/ethernet/apple/mace.c16
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_filters.c6
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c2
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-platform.c23
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c10
-rw-r--r--drivers/net/ethernet/broadcom/sb1250-mac.c4
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h2
-rw-r--r--drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c3
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c8
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c4
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c10
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c4
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c4
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c5
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx.c12
-rw-r--r--drivers/net/ethernet/freescale/xgmac_mdio.c28
-rw-r--r--drivers/net/ethernet/google/gve/gve.h2
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.c2
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c6
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx.c4
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx_dqo.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c3
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_rx.c4
-rw-r--r--drivers/net/ethernet/i825xx/ether1.c4
-rw-r--r--drivers/net/ethernet/i825xx/sni_82596.c3
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c180
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h1
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h4
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c20
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c39
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h10
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c83
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_register.h3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c103
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h1
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c8
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.c34
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c28
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c10
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c13
-rw-r--r--drivers/net/ethernet/litex/Kconfig2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h70
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rpm.c66
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rpm.h4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c7
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c14
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c20
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c22
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c20
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c7
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera.h1
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_hw.c4
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_main.c1
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_router.c24
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_router_hw.c40
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_router_hw.h3
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/qos.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c32
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/diag/bridge_tracepoint.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c9
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_mac.c11
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.c6
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c2
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_packet.c2
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c30
-rw-r--r--drivers/net/ethernet/mscc/ocelot_flower.c44
-rw-r--r--drivers/net/ethernet/mscc/ocelot_net.c6
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.h2
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c12
-rw-r--r--drivers/net/ethernet/seeq/ether3.c4
-rw-r--r--drivers/net/ethernet/smsc/smc911x.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-oxnas.c101
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c51
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c19
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c19
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c45
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c3
-rw-r--r--drivers/net/ethernet/ti/cpsw.c6
-rw-r--r--drivers/net/ethernet/ti/cpsw_new.c6
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.c4
-rw-r--r--drivers/net/ethernet/tundra/tsi108_eth.c35
-rw-r--r--drivers/net/ethernet/vertexcom/Kconfig2
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c135
-rw-r--r--drivers/net/hamradio/6pack.c4
-rw-r--r--drivers/net/hamradio/mkiss.c4
-rw-r--r--drivers/net/hamradio/yam.c4
-rw-r--r--drivers/net/hyperv/hyperv_net.h5
-rw-r--r--drivers/net/hyperv/netvsc.c136
-rw-r--r--drivers/net/hyperv/netvsc_drv.c1
-rw-r--r--drivers/net/hyperv/rndis_filter.c2
-rw-r--r--drivers/net/ieee802154/at86rf230.c13
-rw-r--r--drivers/net/ieee802154/ca8210.c5
-rw-r--r--drivers/net/ieee802154/mac802154_hwsim.c1
-rw-r--r--drivers/net/ieee802154/mcr20a.c4
-rw-r--r--drivers/net/ipa/ipa_endpoint.c28
-rw-r--r--drivers/net/ipa/ipa_endpoint.h17
-rw-r--r--drivers/net/ipa/ipa_power.c52
-rw-r--r--drivers/net/ipa/ipa_power.h7
-rw-r--r--drivers/net/ipa/ipa_uc.c5
-rw-r--r--drivers/net/macsec.c33
-rw-r--r--drivers/net/mctp/mctp-serial.c9
-rw-r--r--drivers/net/mdio/mdio-aspeed.c1
-rw-r--r--drivers/net/netdevsim/fib.c4
-rw-r--r--drivers/net/phy/at803x.c28
-rw-r--r--drivers/net/phy/broadcom.c1
-rw-r--r--drivers/net/phy/marvell.c73
-rw-r--r--drivers/net/phy/mediatek-ge.c3
-rw-r--r--drivers/net/phy/micrel.c36
-rw-r--r--drivers/net/phy/phy_device.c6
-rw-r--r--drivers/net/phy/sfp-bus.c5
-rw-r--r--drivers/net/phy/sfp.c25
-rw-r--r--drivers/net/ppp/ppp_async.c3
-rw-r--r--drivers/net/ppp/ppp_synctty.c3
-rw-r--r--drivers/net/slip/slip.c4
-rw-r--r--drivers/net/usb/ax88179_178a.c68
-rw-r--r--drivers/net/usb/cdc_ether.c12
-rw-r--r--drivers/net/usb/cdc_mbim.c5
-rw-r--r--drivers/net/usb/cdc_ncm.c8
-rw-r--r--drivers/net/usb/ipheth.c6
-rw-r--r--drivers/net/usb/qmi_wwan.c4
-rw-r--r--drivers/net/usb/smsc95xx.c3
-rw-r--r--drivers/net/usb/zaurus.c12
-rw-r--r--drivers/net/veth.c13
-rw-r--r--drivers/net/virtio_net.c6
-rw-r--r--drivers/net/wireguard/noise.c45
-rw-r--r--drivers/net/wireless/ath/ath11k/pci.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/xtlv.c2
-rw-r--r--drivers/net/wireless/cisco/airo.c22
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Kconfig13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/commands.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/filter.h88
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rs.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/file.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/rs.c33
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-csr.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mei/main.c45
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mei/net.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c203
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c240
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c3
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_ap.c16
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_download.c2
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_proc.c24
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c15
-rw-r--r--drivers/net/wireless/ray_cs.c2
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_coex.c2
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_main.c2
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_sdio_ops.c2
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_usb_ops.c2
-rw-r--r--drivers/net/wwan/mhi_wwan_mbim.c4
-rw-r--r--drivers/nfc/pn544/i2c.c2
-rw-r--r--drivers/nfc/st21nfca/se.c10
-rw-r--r--drivers/ntb/hw/amd/ntb_hw_amd.c2
-rw-r--r--drivers/ntb/hw/mscc/ntb_hw_switchtec.c26
-rw-r--r--drivers/ntb/msi.c22
-rw-r--r--drivers/nubus/proc.c36
-rw-r--r--drivers/nvdimm/Kconfig2
-rw-r--r--drivers/nvdimm/pmem.c38
-rw-r--r--drivers/nvdimm/virtio_pmem.c2
-rw-r--r--drivers/nvme/host/core.c23
-rw-r--r--drivers/nvme/host/fabrics.c25
-rw-r--r--drivers/nvme/host/fabrics.h1
-rw-r--r--drivers/nvme/host/fault_inject.c2
-rw-r--r--drivers/nvme/host/multipath.c43
-rw-r--r--drivers/nvme/host/nvme.h8
-rw-r--r--drivers/nvme/host/pci.c150
-rw-r--r--drivers/nvme/host/rdma.c1
-rw-r--r--drivers/nvme/host/tcp.c11
-rw-r--r--drivers/nvme/host/trace.h6
-rw-r--r--drivers/nvme/target/passthru.c3
-rw-r--r--drivers/nvmem/core.c2
-rw-r--r--drivers/nvmem/mtk-efuse.c13
-rw-r--r--drivers/of/base.c169
-rw-r--r--drivers/of/device.c2
-rw-r--r--drivers/of/fdt.c150
-rw-r--r--drivers/of/property.c17
-rw-r--r--drivers/of/unittest.c175
-rw-r--r--drivers/parisc/ccio-dma.c3
-rw-r--r--drivers/parisc/led.c4
-rw-r--r--drivers/parisc/pdc_stable.c4
-rw-r--r--drivers/parisc/sba_iommu.c3
-rw-r--r--drivers/pci/Kconfig2
-rw-r--r--drivers/pci/Makefile3
-rw-r--r--drivers/pci/access.c36
-rw-r--r--drivers/pci/controller/Kconfig10
-rw-r--r--drivers/pci/controller/cadence/pci-j721e.c103
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-plat.c6
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence.h2
-rw-r--r--drivers/pci/controller/dwc/pci-dra7xx.c8
-rw-r--r--drivers/pci/controller/dwc/pci-exynos.c4
-rw-r--r--drivers/pci/controller/dwc/pci-imx6.c81
-rw-r--r--drivers/pci/controller/dwc/pci-keystone.c37
-rw-r--r--drivers/pci/controller/dwc/pci-layerscape.c152
-rw-r--r--drivers/pci/controller/dwc/pcie-artpec6.c6
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-plat.c6
-rw-r--r--drivers/pci/controller/dwc/pcie-designware.c7
-rw-r--r--drivers/pci/controller/dwc/pcie-hisi.c32
-rw-r--r--drivers/pci/controller/dwc/pcie-histb.c4
-rw-r--r--drivers/pci/controller/dwc/pcie-intel-gw.c204
-rw-r--r--drivers/pci/controller/dwc/pcie-kirin.c37
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom-ep.c6
-rw-r--r--drivers/pci/controller/dwc/pcie-qcom.c14
-rw-r--r--drivers/pci/controller/dwc/pcie-spear13xx.c8
-rw-r--r--drivers/pci/controller/dwc/pcie-tegra194.c222
-rw-r--r--drivers/pci/controller/dwc/pcie-uniphier.c147
-rw-r--r--drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c84
-rw-r--r--drivers/pci/controller/pci-aardvark.c79
-rw-r--r--drivers/pci/controller/pci-hyperv.c342
-rw-r--r--drivers/pci/controller/pci-mvebu.c542
-rw-r--r--drivers/pci/controller/pci-rcar-gen2.c14
-rw-r--r--drivers/pci/controller/pci-thunder-ecam.c46
-rw-r--r--drivers/pci/controller/pci-thunder-pem.c4
-rw-r--r--drivers/pci/controller/pci-xgene-msi.c6
-rw-r--r--drivers/pci/controller/pci-xgene.c58
-rw-r--r--drivers/pci/controller/pcie-altera.c12
-rw-r--r--drivers/pci/controller/pcie-apple.c10
-rw-r--r--drivers/pci/controller/pcie-brcmstb.c386
-rw-r--r--drivers/pci/controller/pcie-iproc-bcma.c22
-rw-r--r--drivers/pci/controller/pcie-iproc-platform.c16
-rw-r--r--drivers/pci/controller/pcie-iproc.c4
-rw-r--r--drivers/pci/controller/pcie-mediatek-gen3.c382
-rw-r--r--drivers/pci/controller/pcie-mediatek.c18
-rw-r--r--drivers/pci/controller/pcie-microchip-host.c42
-rw-r--r--drivers/pci/controller/pcie-mt7621.c84
-rw-r--r--drivers/pci/controller/pcie-rcar-host.c14
-rw-r--r--drivers/pci/controller/pcie-rockchip-host.c4
-rw-r--r--drivers/pci/controller/pcie-xilinx-cpm.c44
-rw-r--r--drivers/pci/controller/pcie-xilinx-nwl.c30
-rw-r--r--drivers/pci/controller/pcie-xilinx.c158
-rw-r--r--drivers/pci/controller/vmd.c61
-rw-r--r--drivers/pci/endpoint/functions/pci-epf-ntb.c2
-rw-r--r--drivers/pci/endpoint/pci-epc-core.c2
-rw-r--r--drivers/pci/hotplug/TODO5
-rw-r--r--drivers/pci/hotplug/cpqphp_ctrl.c4
-rw-r--r--drivers/pci/hotplug/ibmphp_core.c74
-rw-r--r--drivers/pci/hotplug/pciehp.h3
-rw-r--r--drivers/pci/hotplug/pciehp_core.c2
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c38
-rw-r--r--drivers/pci/msi/Makefile7
-rw-r--r--drivers/pci/msi/irqdomain.c280
-rw-r--r--drivers/pci/msi/legacy.c80
-rw-r--r--drivers/pci/msi/msi.c (renamed from drivers/pci/msi.c)955
-rw-r--r--drivers/pci/msi/msi.h39
-rw-r--r--drivers/pci/msi/pcidev_msi.c43
-rw-r--r--drivers/pci/of.c2
-rw-r--r--drivers/pci/p2pdma.c4
-rw-r--r--drivers/pci/pci-bridge-emul.c119
-rw-r--r--drivers/pci/pci-sysfs.c7
-rw-r--r--drivers/pci/pci.c31
-rw-r--r--drivers/pci/pcie/aspm.c94
-rw-r--r--drivers/pci/pcie/dpc.c4
-rw-r--r--drivers/pci/pcie/pme.c4
-rw-r--r--drivers/pci/pcie/portdrv_core.c47
-rw-r--r--drivers/pci/probe.c36
-rw-r--r--drivers/pci/proc.c10
-rw-r--r--drivers/pci/quirks.c26
-rw-r--r--drivers/pci/setup-res.c8
-rw-r--r--drivers/pci/slot.c3
-rw-r--r--drivers/pci/switch/switchtec.c11
-rw-r--r--drivers/pci/xen-pcifront.c4
-rw-r--r--drivers/pcmcia/at91_cf.c6
-rw-r--r--drivers/perf/arm_smmuv3_pmu.c5
-rw-r--r--drivers/phy/amlogic/Kconfig10
-rw-r--r--drivers/phy/amlogic/Makefile1
-rw-r--r--drivers/phy/amlogic/phy-meson-axg-mipi-dphy.c3
-rw-r--r--drivers/phy/amlogic/phy-meson8-hdmi-tx.c160
-rw-r--r--drivers/phy/broadcom/Kconfig3
-rw-r--r--drivers/phy/broadcom/phy-bcm-ns-usb2.c54
-rw-r--r--drivers/phy/broadcom/phy-brcm-usb.c38
-rw-r--r--drivers/phy/cadence/phy-cadence-sierra.c1345
-rw-r--r--drivers/phy/cadence/phy-cadence-torrent.c6
-rw-r--r--drivers/phy/freescale/Kconfig8
-rw-r--r--drivers/phy/freescale/Makefile1
-rw-r--r--drivers/phy/freescale/phy-fsl-imx8m-pcie.c237
-rw-r--r--drivers/phy/intel/Kconfig10
-rw-r--r--drivers/phy/intel/Makefile1
-rw-r--r--drivers/phy/intel/phy-intel-thunderbay-emmc.c509
-rw-r--r--drivers/phy/mediatek/phy-mtk-io.h38
-rw-r--r--drivers/phy/mediatek/phy-mtk-mipi-dsi.c2
-rw-r--r--drivers/phy/mediatek/phy-mtk-tphy.c608
-rw-r--r--drivers/phy/mediatek/phy-mtk-xsphy.c140
-rw-r--r--drivers/phy/microchip/Kconfig8
-rw-r--r--drivers/phy/microchip/Makefile1
-rw-r--r--drivers/phy/microchip/lan966x_serdes.c545
-rw-r--r--drivers/phy/microchip/lan966x_serdes_regs.h209
-rw-r--r--drivers/phy/phy-can-transceiver.c4
-rw-r--r--drivers/phy/phy-core-mipi-dphy.c4
-rw-r--r--drivers/phy/qualcomm/Kconfig10
-rw-r--r--drivers/phy/qualcomm/Makefile1
-rw-r--r--drivers/phy/qualcomm/phy-qcom-edp.c674
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp.c313
-rw-r--r--drivers/phy/qualcomm/phy-qcom-qmp.h104
-rw-r--r--drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c3
-rw-r--r--drivers/phy/rockchip/phy-rockchip-inno-usb2.c260
-rw-r--r--drivers/phy/socionext/Kconfig2
-rw-r--r--drivers/phy/socionext/phy-uniphier-ahci.c201
-rw-r--r--drivers/phy/socionext/phy-uniphier-pcie.c70
-rw-r--r--drivers/phy/socionext/phy-uniphier-usb3hs.c4
-rw-r--r--drivers/phy/socionext/phy-uniphier-usb3ss.c14
-rw-r--r--drivers/phy/st/phy-stm32-usbphyc.c12
-rw-r--r--drivers/phy/tegra/xusb.c2
-rw-r--r--drivers/phy/ti/phy-j721e-wiz.c1
-rw-r--r--drivers/phy/ti/phy-omap-control.c6
-rw-r--r--drivers/phy/xilinx/phy-zynqmp.c11
-rw-r--r--drivers/pinctrl/Kconfig450
-rw-r--r--drivers/pinctrl/Makefile46
-rw-r--r--drivers/pinctrl/actions/pinctrl-owl.c1
-rw-r--r--drivers/pinctrl/aspeed/Kconfig2
-rw-r--r--drivers/pinctrl/bcm/Kconfig3
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm2835.c38
-rw-r--r--drivers/pinctrl/bcm/pinctrl-iproc-gpio.c1
-rw-r--r--drivers/pinctrl/bcm/pinctrl-ns.c163
-rw-r--r--drivers/pinctrl/bcm/pinctrl-nsp-gpio.c1
-rw-r--r--drivers/pinctrl/cirrus/pinctrl-lochnagar.c3
-rw-r--r--drivers/pinctrl/cirrus/pinctrl-madera-core.c5
-rw-r--r--drivers/pinctrl/freescale/Kconfig7
-rw-r--r--drivers/pinctrl/freescale/Makefile1
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx.c17
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imxrt1050.c349
-rw-r--r--drivers/pinctrl/intel/pinctrl-baytrail.c2
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c131
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c64
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-moore.c7
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c2
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-paris.c7
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-37xx.c85
-rw-r--r--drivers/pinctrl/pinconf-generic.c2
-rw-r--r--drivers/pinctrl/pinctrl-amd.c3
-rw-r--r--drivers/pinctrl/pinctrl-apple-gpio.c105
-rw-r--r--drivers/pinctrl/pinctrl-as3722.c13
-rw-r--r--drivers/pinctrl/pinctrl-at91-pio4.c1
-rw-r--r--drivers/pinctrl/pinctrl-at91.c1
-rw-r--r--drivers/pinctrl/pinctrl-da9062.c6
-rw-r--r--drivers/pinctrl/pinctrl-digicolor.c5
-rw-r--r--drivers/pinctrl/pinctrl-keembay.c87
-rw-r--r--drivers/pinctrl/pinctrl-max77620.c11
-rw-r--r--drivers/pinctrl/pinctrl-mcp23s08.c1
-rw-r--r--drivers/pinctrl/pinctrl-microchip-sgpio.c46
-rw-r--r--drivers/pinctrl/pinctrl-ocelot.c486
-rw-r--r--drivers/pinctrl/pinctrl-oxnas.c1
-rw-r--r--drivers/pinctrl/pinctrl-pic32.c2
-rw-r--r--drivers/pinctrl/pinctrl-rk805.c12
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.c161
-rw-r--r--drivers/pinctrl/pinctrl-st.c116
-rw-r--r--drivers/pinctrl/pinctrl-stmfx.c1
-rw-r--r--drivers/pinctrl/pinctrl-sx150x.c3
-rw-r--r--drivers/pinctrl/pinctrl-thunderbay.c1302
-rw-r--r--drivers/pinctrl/pinctrl-xway.c1
-rw-r--r--drivers/pinctrl/pinmux.c2
-rw-r--r--drivers/pinctrl/pinmux.h4
-rw-r--r--drivers/pinctrl/qcom/Kconfig18
-rw-r--r--drivers/pinctrl/qcom/Makefile2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.c16
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.h10
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sc7280.c75
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sdx65.c967
-rw-r--r--drivers/pinctrl/qcom/pinctrl-sm8450.c1689
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-gpio.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c1
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c1
-rw-r--r--drivers/pinctrl/renesas/pfc-r8a779a0.c4
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rza1.c6
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rza2.c1
-rw-r--r--drivers/pinctrl/renesas/pinctrl-rzg2l.c310
-rw-r--r--drivers/pinctrl/renesas/pinctrl.c9
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos-arm64.c81
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.c11
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.h1
-rw-r--r--drivers/pinctrl/spear/pinctrl-plgpio.c149
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear.c10
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear.h12
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sun50i-h616.c8
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.c8
-rw-r--r--drivers/pinctrl/vt8500/pinctrl-wmt.c1
-rw-r--r--drivers/platform/mips/Kconfig6
-rw-r--r--drivers/platform/mips/Makefile1
-rw-r--r--drivers/platform/mips/ls2k-reset.c53
-rw-r--r--drivers/platform/surface/Kconfig1
-rw-r--r--drivers/platform/x86/amd-pmc.c15
-rw-r--r--drivers/platform/x86/asus-tf103c-dock.c4
-rw-r--r--drivers/platform/x86/asus-wmi.c2
-rw-r--r--drivers/platform/x86/intel/Kconfig11
-rw-r--r--drivers/platform/x86/intel/Makefile2
-rw-r--r--drivers/platform/x86/intel/crystal_cove_charger.c26
-rw-r--r--drivers/platform/x86/intel/int3472/tps68470_board_data.c3
-rw-r--r--drivers/platform/x86/intel/pmt/Kconfig4
-rw-r--r--drivers/platform/x86/intel/pmt/class.c21
-rw-r--r--drivers/platform/x86/intel/pmt/class.h5
-rw-r--r--drivers/platform/x86/intel/pmt/crashlog.c47
-rw-r--r--drivers/platform/x86/intel/pmt/telemetry.c46
-rw-r--r--drivers/platform/x86/intel/speed_select_if/isst_if_common.c97
-rw-r--r--drivers/platform/x86/intel/vsec.c408
-rw-r--r--drivers/platform/x86/intel/vsec.h43
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c30
-rw-r--r--drivers/platform/x86/toshiba_acpi.c16
-rw-r--r--drivers/platform/x86/touchscreen_dmi.c24
-rw-r--r--drivers/platform/x86/x86-android-tablets.c105
-rw-r--r--drivers/pnp/isapnp/proc.c2
-rw-r--r--drivers/pnp/pnpbios/core.c6
-rw-r--r--drivers/pnp/pnpbios/proc.c4
-rw-r--r--drivers/power/supply/bq256xx_charger.c3
-rw-r--r--drivers/power/supply/cw2015_battery.c2
-rw-r--r--drivers/pwm/core.c139
-rw-r--r--drivers/pwm/pwm-img.c35
-rw-r--r--drivers/pwm/pwm-twl.c62
-rw-r--r--drivers/pwm/pwm-vt8500.c57
-rw-r--r--drivers/rapidio/switches/Kconfig11
-rw-r--r--drivers/rapidio/switches/Makefile2
-rw-r--r--drivers/rapidio/switches/tsi568.c195
-rw-r--r--drivers/rapidio/switches/tsi57x.c365
-rw-r--r--drivers/regulator/core.c13
-rw-r--r--drivers/regulator/max20086-regulator.c3
-rw-r--r--drivers/remoteproc/Kconfig15
-rw-r--r--drivers/remoteproc/Makefile1
-rw-r--r--drivers/remoteproc/imx_rproc.c9
-rw-r--r--drivers/remoteproc/ingenic_rproc.c5
-rw-r--r--drivers/remoteproc/mtk_scp_ipi.c4
-rw-r--r--drivers/remoteproc/qcom_pil_info.c2
-rw-r--r--drivers/remoteproc/qcom_q6v5.c1
-rw-r--r--drivers/remoteproc/qcom_q6v5_pas.c38
-rw-r--r--drivers/remoteproc/rcar_rproc.c224
-rw-r--r--drivers/remoteproc/remoteproc_core.c4
-rw-r--r--drivers/remoteproc/remoteproc_coredump.c2
-rw-r--r--drivers/remoteproc/st_slim_rproc.c2
-rw-r--r--drivers/remoteproc/stm32_rproc.c2
-rw-r--r--drivers/remoteproc/ti_k3_dsp_remoteproc.c1
-rw-r--r--drivers/remoteproc/ti_k3_r5_remoteproc.c5
-rw-r--r--drivers/rpmsg/qcom_glink_native.c2
-rw-r--r--drivers/rpmsg/qcom_smd.c2
-rw-r--r--drivers/rpmsg/rpmsg_char.c29
-rw-r--r--drivers/rpmsg/rpmsg_core.c44
-rw-r--r--drivers/rpmsg/virtio_rpmsg_bus.c4
-rw-r--r--drivers/rtc/Kconfig24
-rw-r--r--drivers/rtc/Makefile2
-rw-r--r--drivers/rtc/dev.c6
-rw-r--r--drivers/rtc/rtc-cmos.c201
-rw-r--r--drivers/rtc/rtc-da9063.c16
-rw-r--r--drivers/rtc/rtc-ftrtc010.c8
-rw-r--r--drivers/rtc/rtc-gamecube.c377
-rw-r--r--drivers/rtc/rtc-mc146818-lib.c182
-rw-r--r--drivers/rtc/rtc-pcf2127.c2
-rw-r--r--drivers/rtc/rtc-pcf85063.c97
-rw-r--r--drivers/rtc/rtc-pxa.c4
-rw-r--r--drivers/rtc/rtc-rs5c372.c185
-rw-r--r--drivers/rtc/rtc-rv8803.c6
-rw-r--r--drivers/rtc/rtc-sunplus.c362
-rw-r--r--drivers/s390/block/Kconfig2
-rw-r--r--drivers/s390/block/dcssblk.c26
-rw-r--r--drivers/s390/char/keyboard.h4
-rw-r--r--drivers/s390/cio/device.c2
-rw-r--r--drivers/s390/scsi/zfcp_fc.c13
-rw-r--r--drivers/scsi/3w-sas.c4
-rw-r--r--drivers/scsi/53c700.c1
-rw-r--r--drivers/scsi/a100u2w.c2
-rw-r--r--drivers/scsi/aacraid/aachba.c2
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.c6
-rw-r--r--drivers/scsi/atp870u.c1
-rw-r--r--drivers/scsi/bfa/bfad.c6
-rw-r--r--drivers/scsi/bfa/bfad_attr.c2
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c41
-rw-r--r--drivers/scsi/ch.c8
-rw-r--r--drivers/scsi/dc395x.c3
-rw-r--r--drivers/scsi/elx/efct/efct_driver.c13
-rw-r--r--drivers/scsi/elx/efct/efct_hw.c10
-rw-r--r--drivers/scsi/elx/efct/efct_io.c2
-rw-r--r--drivers/scsi/elx/libefc/efc_cmds.c4
-rw-r--r--drivers/scsi/elx/libefc/efc_els.c12
-rw-r--r--drivers/scsi/elx/libefc_sli/sli4.c14
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas.h7
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c406
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c37
-rw-r--r--drivers/scsi/hosts.c15
-rw-r--r--drivers/scsi/hpsa.c2
-rw-r--r--drivers/scsi/initio.c5
-rw-r--r--drivers/scsi/libsas/sas_discover.c1
-rw-r--r--drivers/scsi/libsas/sas_event.c77
-rw-r--r--drivers/scsi/libsas/sas_expander.c3
-rw-r--r--drivers/scsi/libsas/sas_init.c49
-rw-r--r--drivers/scsi/libsas/sas_internal.h2
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c7
-rw-r--r--drivers/scsi/lpfc/lpfc.h111
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c69
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c27
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c42
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c10
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h29
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c47
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c11
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c8
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c56
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c83
-rw-r--r--drivers/scsi/megaraid.c84
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c1
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c27
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h603
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_image.h59
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_init.h15
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_ioc.h128
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_pci.h44
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_sas.h14
-rw-r--r--drivers/scsi/mpi3mr/mpi/mpi30_transport.h31
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr.h126
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_debug.h133
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_fw.c1428
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_os.c771
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c21
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h4
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c87
-rw-r--r--drivers/scsi/mvsas/mv_sas.c5
-rw-r--r--drivers/scsi/myrb.c2
-rw-r--r--drivers/scsi/myrs.c13
-rw-r--r--drivers/scsi/pcmcia/nsp_cs.c3
-rw-r--r--drivers/scsi/pm8001/Makefile7
-rw-r--r--drivers/scsi/pm8001/pm8001_ctl.c24
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c51
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c4
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c28
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.h3
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.c92
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.h6
-rw-r--r--drivers/scsi/pm8001/pm80xx_tracepoints.c10
-rw-r--r--drivers/scsi/pm8001/pm80xx_tracepoints.h113
-rw-r--r--drivers/scsi/pmcraid.c5
-rw-r--r--drivers/scsi/qedf/qedf_io.c1
-rw-r--r--drivers/scsi/qedf/qedf_main.c9
-rw-r--r--drivers/scsi/qedi/qedi_fw.c6
-rw-r--r--drivers/scsi/qedi/qedi_main.c8
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c6
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c10
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.c5
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c4
-rw-r--r--drivers/scsi/scsi.c5
-rw-r--r--drivers/scsi/scsi_bsg.c2
-rw-r--r--drivers/scsi/scsi_debugfs.c1
-rw-r--r--drivers/scsi/scsi_error.c114
-rw-r--r--drivers/scsi/scsi_ioctl.c43
-rw-r--r--drivers/scsi/scsi_lib.c55
-rw-r--r--drivers/scsi/scsi_logging.c4
-rw-r--r--drivers/scsi/scsi_pm.c3
-rw-r--r--drivers/scsi/scsi_priv.h3
-rw-r--r--drivers/scsi/scsi_proc.c4
-rw-r--r--drivers/scsi/scsi_scan.c59
-rw-r--r--drivers/scsi/scsi_sysfs.c7
-rw-r--r--drivers/scsi/sd.c30
-rw-r--r--drivers/scsi/sd_zbc.c14
-rw-r--r--drivers/scsi/sg.c41
-rw-r--r--drivers/scsi/snic/snic_disc.c2
-rw-r--r--drivers/scsi/sr.c19
-rw-r--r--drivers/scsi/sr_vendor.c4
-rw-r--r--drivers/scsi/st.c4
-rw-r--r--drivers/scsi/storvsc_drv.c54
-rw-r--r--drivers/scsi/ufs/Kconfig15
-rw-r--r--drivers/scsi/ufs/tc-dwc-g210-pci.c1
-rw-r--r--drivers/scsi/ufs/ufs-exynos.c4
-rw-r--r--drivers/scsi/ufs/ufs-hisi.c8
-rw-r--r--drivers/scsi/ufs/ufs-mediatek.c2
-rw-r--r--drivers/scsi/ufs/ufshcd-pci.c2
-rw-r--r--drivers/scsi/ufs/ufshcd-pltfrm.c9
-rw-r--r--drivers/scsi/ufs/ufshcd.c358
-rw-r--r--drivers/scsi/ufs/ufshcd.h20
-rw-r--r--drivers/scsi/ufs/ufshci.h3
-rw-r--r--drivers/scsi/ufs/ufshpb.c5
-rw-r--r--drivers/scsi/virtio_scsi.c4
-rw-r--r--drivers/soc/aspeed/aspeed-lpc-ctrl.c7
-rw-r--r--drivers/soc/canaan/Kconfig1
-rw-r--r--drivers/soc/fsl/dpio/dpio-driver.c8
-rw-r--r--drivers/soc/fsl/qbman/bman_portal.c2
-rw-r--r--drivers/soc/fsl/qbman/qman_portal.c2
-rw-r--r--drivers/soc/samsung/Kconfig2
-rw-r--r--drivers/soc/ti/k3-ringacc.c10
-rw-r--r--drivers/soc/ti/ti_sci_inta_msi.c94
-rw-r--r--drivers/soc/xilinx/Kconfig10
-rw-r--r--drivers/soc/xilinx/Makefile1
-rw-r--r--drivers/soc/xilinx/xlnx_event_manager.c600
-rw-r--r--drivers/soc/xilinx/zynqmp_power.c54
-rw-r--r--drivers/soundwire/cadence_master.c36
-rw-r--r--drivers/soundwire/cadence_master.h14
-rw-r--r--drivers/soundwire/intel.c261
-rw-r--r--drivers/soundwire/intel_init.c2
-rw-r--r--drivers/soundwire/qcom.c14
-rw-r--r--drivers/soundwire/stream.c4
-rw-r--r--drivers/spi/spi-bcm-qspi.c2
-rw-r--r--drivers/spi/spi-meson-spicc.c5
-rw-r--r--drivers/spi/spi-mt65xx.c2
-rw-r--r--drivers/spi/spi-pic32.c2
-rw-r--r--drivers/spi/spi-stm32-qspi.c47
-rw-r--r--drivers/spi/spi-stm32.c7
-rw-r--r--drivers/spi/spi-uniphier.c18
-rw-r--r--drivers/spmi/Kconfig11
-rw-r--r--drivers/spmi/Makefile1
-rw-r--r--drivers/spmi/spmi-mtk-pmif.c542
-rw-r--r--drivers/spmi/spmi-pmic-arb.c193
-rw-r--r--drivers/staging/Kconfig4
-rw-r--r--drivers/staging/Makefile2
-rw-r--r--drivers/staging/axis-fifo/axis-fifo.c11
-rw-r--r--drivers/staging/fbtft/Kconfig6
-rw-r--r--drivers/staging/fbtft/Makefile1
-rw-r--r--drivers/staging/fbtft/fb_sh1106.c7
-rw-r--r--drivers/staging/fbtft/fb_watterott.c302
-rw-r--r--drivers/staging/fbtft/fbtft.h46
-rw-r--r--drivers/staging/greybus/audio_manager_module.c3
-rw-r--r--drivers/staging/greybus/audio_topology.c107
-rw-r--r--drivers/staging/media/atomisp/pci/isp2400_input_system_local.h2
-rw-r--r--drivers/staging/most/dim2/dim2.c28
-rw-r--r--drivers/staging/mt7621-dma/Kconfig7
-rw-r--r--drivers/staging/mt7621-dma/Makefile4
-rw-r--r--drivers/staging/mt7621-dma/TODO5
-rw-r--r--drivers/staging/mt7621-dma/hsdma-mt7621.c758
-rw-r--r--drivers/staging/mt7621-dts/mt7621.dtsi38
-rw-r--r--drivers/staging/pi433/pi433_if.c2
-rw-r--r--drivers/staging/pi433/rf69.c62
-rw-r--r--drivers/staging/pi433/rf69_enum.h12
-rw-r--r--drivers/staging/r8188eu/Makefile3
-rw-r--r--drivers/staging/r8188eu/core/rtw_ap.c11
-rw-r--r--drivers/staging/r8188eu/core/rtw_br_ext.c11
-rw-r--r--drivers/staging/r8188eu/core/rtw_cmd.c36
-rw-r--r--drivers/staging/r8188eu/core/rtw_efuse.c221
-rw-r--r--drivers/staging/r8188eu/core/rtw_ieee80211.c73
-rw-r--r--drivers/staging/r8188eu/core/rtw_ioctl_set.c19
-rw-r--r--drivers/staging/r8188eu/core/rtw_led.c285
-rw-r--r--drivers/staging/r8188eu/core/rtw_mlme.c16
-rw-r--r--drivers/staging/r8188eu/core/rtw_mlme_ext.c102
-rw-r--r--drivers/staging/r8188eu/core/rtw_p2p.c6
-rw-r--r--drivers/staging/r8188eu/core/rtw_pwrctrl.c22
-rw-r--r--drivers/staging/r8188eu/core/rtw_rf.c2
-rw-r--r--drivers/staging/r8188eu/core/rtw_security.c22
-rw-r--r--drivers/staging/r8188eu/core/rtw_sta_mgt.c2
-rw-r--r--drivers/staging/r8188eu/core/rtw_wlan_util.c23
-rw-r--r--drivers/staging/r8188eu/core/rtw_xmit.c2
-rw-r--r--drivers/staging/r8188eu/hal/Hal8188EPwrSeq.c100
-rw-r--r--drivers/staging/r8188eu/hal/Hal8188ERateAdaptive.c27
-rw-r--r--drivers/staging/r8188eu/hal/HalHWImg8188E_BB.c1
-rw-r--r--drivers/staging/r8188eu/hal/HalHWImg8188E_MAC.c1
-rw-r--r--drivers/staging/r8188eu/hal/HalHWImg8188E_RF.c1
-rw-r--r--drivers/staging/r8188eu/hal/HalPhyRf_8188e.c401
-rw-r--r--drivers/staging/r8188eu/hal/HalPwrSeqCmd.c84
-rw-r--r--drivers/staging/r8188eu/hal/odm.c903
-rw-r--r--drivers/staging/r8188eu/hal/odm_HWConfig.c20
-rw-r--r--drivers/staging/r8188eu/hal/odm_RTL8188E.c176
-rw-r--r--drivers/staging/r8188eu/hal/odm_RegConfig8188E.c60
-rw-r--r--drivers/staging/r8188eu/hal/odm_debug.c2
-rw-r--r--drivers/staging/r8188eu/hal/odm_interface.c93
-rw-r--r--drivers/staging/r8188eu/hal/rtl8188e_cmd.c13
-rw-r--r--drivers/staging/r8188eu/hal/rtl8188e_dm.c25
-rw-r--r--drivers/staging/r8188eu/hal/rtl8188e_hal_init.c1010
-rw-r--r--drivers/staging/r8188eu/hal/rtl8188e_phycfg.c197
-rw-r--r--drivers/staging/r8188eu/hal/rtl8188e_rf6052.c65
-rw-r--r--drivers/staging/r8188eu/hal/rtl8188e_rxdesc.c2
-rw-r--r--drivers/staging/r8188eu/hal/rtl8188e_sreset.c37
-rw-r--r--drivers/staging/r8188eu/hal/rtl8188eu_led.c94
-rw-r--r--drivers/staging/r8188eu/hal/rtl8188eu_xmit.c4
-rw-r--r--drivers/staging/r8188eu/hal/usb_halinit.c263
-rw-r--r--drivers/staging/r8188eu/hal/usb_ops_linux.c32
-rw-r--r--drivers/staging/r8188eu/include/Hal8188EPhyCfg.h60
-rw-r--r--drivers/staging/r8188eu/include/Hal8188EPwrSeq.h148
-rw-r--r--drivers/staging/r8188eu/include/HalPhyRf_8188e.h5
-rw-r--r--drivers/staging/r8188eu/include/HalPwrSeqCmd.h53
-rw-r--r--drivers/staging/r8188eu/include/HalVerDef.h13
-rw-r--r--drivers/staging/r8188eu/include/drv_types.h19
-rw-r--r--drivers/staging/r8188eu/include/hal_intf.h30
-rw-r--r--drivers/staging/r8188eu/include/ieee80211.h3
-rw-r--r--drivers/staging/r8188eu/include/ieee80211_ext.h271
-rw-r--r--drivers/staging/r8188eu/include/odm.h216
-rw-r--r--drivers/staging/r8188eu/include/odm_RTL8188E.h6
-rw-r--r--drivers/staging/r8188eu/include/odm_RegConfig8188E.h3
-rw-r--r--drivers/staging/r8188eu/include/odm_interface.h59
-rw-r--r--drivers/staging/r8188eu/include/odm_precomp.h54
-rw-r--r--drivers/staging/r8188eu/include/osdep_service.h32
-rw-r--r--drivers/staging/r8188eu/include/rtl8188e_dm.h7
-rw-r--r--drivers/staging/r8188eu/include/rtl8188e_hal.h100
-rw-r--r--drivers/staging/r8188eu/include/rtl8188e_led.h16
-rw-r--r--drivers/staging/r8188eu/include/rtl8188e_recv.h3
-rw-r--r--drivers/staging/r8188eu/include/rtl8188e_spec.h61
-rw-r--r--drivers/staging/r8188eu/include/rtl8188e_sreset.h13
-rw-r--r--drivers/staging/r8188eu/include/rtw_cmd.h1
-rw-r--r--drivers/staging/r8188eu/include/rtw_debug.h12
-rw-r--r--drivers/staging/r8188eu/include/rtw_eeprom.h31
-rw-r--r--drivers/staging/r8188eu/include/rtw_efuse.h104
-rw-r--r--drivers/staging/r8188eu/include/rtw_io.h2
-rw-r--r--drivers/staging/r8188eu/include/rtw_led.h116
-rw-r--r--drivers/staging/r8188eu/include/rtw_mlme.h12
-rw-r--r--drivers/staging/r8188eu/include/rtw_mlme_ext.h5
-rw-r--r--drivers/staging/r8188eu/include/rtw_pwrctrl.h7
-rw-r--r--drivers/staging/r8188eu/include/rtw_recv.h1
-rw-r--r--drivers/staging/r8188eu/include/rtw_rf.h34
-rw-r--r--drivers/staging/r8188eu/include/usb_osintf.h1
-rw-r--r--drivers/staging/r8188eu/include/wifi.h59
-rw-r--r--drivers/staging/r8188eu/os_dep/ioctl_linux.c235
-rw-r--r--drivers/staging/r8188eu/os_dep/os_intfs.c69
-rw-r--r--drivers/staging/r8188eu/os_dep/usb_intf.c42
-rw-r--r--drivers/staging/r8188eu/os_dep/usb_ops_linux.c6
-rw-r--r--drivers/staging/ralink-gdma/Kconfig8
-rw-r--r--drivers/staging/ralink-gdma/Makefile4
-rw-r--r--drivers/staging/ralink-gdma/ralink-gdma.c917
-rw-r--r--drivers/staging/rtl8192e/rtllib.h2
-rw-r--r--drivers/staging/rtl8192e/rtllib_module.c17
-rw-r--r--drivers/staging/rtl8192e/rtllib_softmac.c6
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c4
-rw-r--r--drivers/staging/rtl8192u/r8192U_core.c13
-rw-r--r--drivers/staging/rtl8712/rtl8712_efuse.c28
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_cmd.c3
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_efuse.c14
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c5
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c2
-rw-r--r--drivers/staging/rts5208/rtsx.c16
-rw-r--r--drivers/staging/unisys/visorhba/visorhba_main.c2
-rw-r--r--drivers/staging/unisys/visornic/visornic_main.c8
-rw-r--r--drivers/staging/vc04_services/bcm2835-audio/bcm2835.c12
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c89
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.h12
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/controls.c143
-rw-r--r--drivers/staging/vc04_services/interface/TODO8
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c22
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h2
-rw-r--r--drivers/staging/vc04_services/vchiq-mmal/mmal-common.h2
-rw-r--r--drivers/staging/vc04_services/vchiq-mmal/mmal-encodings.h2
-rw-r--r--drivers/staging/vc04_services/vchiq-mmal/mmal-msg-common.h2
-rw-r--r--drivers/staging/vc04_services/vchiq-mmal/mmal-msg-format.h2
-rw-r--r--drivers/staging/vc04_services/vchiq-mmal/mmal-msg-port.h2
-rw-r--r--drivers/staging/vc04_services/vchiq-mmal/mmal-msg.h2
-rw-r--r--drivers/staging/vc04_services/vchiq-mmal/mmal-parameters.h2
-rw-r--r--drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c2
-rw-r--r--drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.h2
-rw-r--r--drivers/staging/vt6655/card.c36
-rw-r--r--drivers/staging/vt6655/channel.c2
-rw-r--r--drivers/staging/vt6655/device.h10
-rw-r--r--drivers/staging/vt6655/device_main.c12
-rw-r--r--drivers/staging/vt6655/dpc.c2
-rw-r--r--drivers/staging/vt6655/rf.c54
-rw-r--r--drivers/staging/vt6655/rf.h2
-rw-r--r--drivers/staging/wlan-ng/prism2mib.c8
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.c3
-rw-r--r--drivers/target/target_core_pscsi.c2
-rw-r--r--drivers/tee/optee/core.c1
-rw-r--r--drivers/tee/optee/ffa_abi.c92
-rw-r--r--drivers/tee/optee/notif.c2
-rw-r--r--drivers/tee/optee/optee_private.h5
-rw-r--r--drivers/tee/optee/smc_abi.c58
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3400_thermal.c1
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3403_thermal.c1
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_device.h1
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c1
-rw-r--r--drivers/thunderbolt/acpi.c15
-rw-r--r--drivers/thunderbolt/icm.c7
-rw-r--r--drivers/thunderbolt/lc.c24
-rw-r--r--drivers/thunderbolt/path.c42
-rw-r--r--drivers/thunderbolt/retimer.c28
-rw-r--r--drivers/thunderbolt/switch.c493
-rw-r--r--drivers/thunderbolt/tb.c91
-rw-r--r--drivers/thunderbolt/tb.h106
-rw-r--r--drivers/thunderbolt/tb_msgs.h47
-rw-r--r--drivers/thunderbolt/tb_regs.h113
-rw-r--r--drivers/thunderbolt/tmu.c337
-rw-r--r--drivers/thunderbolt/tunnel.c27
-rw-r--r--drivers/thunderbolt/tunnel.h9
-rw-r--r--drivers/thunderbolt/usb4.c52
-rw-r--r--drivers/thunderbolt/xdomain.c16
-rw-r--r--drivers/tty/goldfish.c12
-rw-r--r--drivers/tty/mips_ejtag_fdc.c22
-rw-r--r--drivers/tty/moxa.c4
-rw-r--r--drivers/tty/mxser.c306
-rw-r--r--drivers/tty/n_gsm.c15
-rw-r--r--drivers/tty/n_hdlc.c5
-rw-r--r--drivers/tty/n_tty.c704
-rw-r--r--drivers/tty/rpmsg_tty.c40
-rw-r--r--drivers/tty/serial/8250/8250.h12
-rw-r--r--drivers/tty/serial/8250/8250_alpha.c21
-rw-r--r--drivers/tty/serial/8250/8250_bcm7271.c23
-rw-r--r--drivers/tty/serial/8250/8250_core.c9
-rw-r--r--drivers/tty/serial/8250/8250_gsc.c2
-rw-r--r--drivers/tty/serial/8250/8250_of.c11
-rw-r--r--drivers/tty/serial/8250/8250_pci.c507
-rw-r--r--drivers/tty/serial/8250/8250_pericom.c214
-rw-r--r--drivers/tty/serial/8250/8250_port.c4
-rw-r--r--drivers/tty/serial/8250/Kconfig8
-rw-r--r--drivers/tty/serial/8250/Makefile3
-rw-r--r--drivers/tty/serial/Kconfig2
-rw-r--r--drivers/tty/serial/altera_jtaguart.c11
-rw-r--r--drivers/tty/serial/altera_uart.c9
-rw-r--r--drivers/tty/serial/amba-pl010.c3
-rw-r--r--drivers/tty/serial/amba-pl011.c84
-rw-r--r--drivers/tty/serial/ar933x_uart.c12
-rw-r--r--drivers/tty/serial/atmel_serial.c16
-rw-r--r--drivers/tty/serial/bcm63xx_uart.c10
-rw-r--r--drivers/tty/serial/fsl_lpuart.c12
-rw-r--r--drivers/tty/serial/imx.c13
-rw-r--r--drivers/tty/serial/lantiq.c34
-rw-r--r--drivers/tty/serial/liteuart.c2
-rw-r--r--drivers/tty/serial/lpc32xx_hs.c2
-rw-r--r--drivers/tty/serial/meson_uart.c45
-rw-r--r--drivers/tty/serial/msm_serial.c15
-rw-r--r--drivers/tty/serial/pmac_zilog.c12
-rw-r--r--drivers/tty/serial/pxa.c12
-rw-r--r--drivers/tty/serial/samsung_tty.c78
-rw-r--r--drivers/tty/serial/serial_core.c49
-rw-r--r--drivers/tty/serial/sh-sci.c91
-rw-r--r--drivers/tty/serial/stm32-usart.c88
-rw-r--r--drivers/tty/serial/stm32-usart.h2
-rw-r--r--drivers/tty/serial/sunsu.c3
-rw-r--r--drivers/tty/serial/uartlite.c2
-rw-r--r--drivers/tty/serial/vt8500_serial.c12
-rw-r--r--drivers/tty/tty_buffer.c279
-rw-r--r--drivers/tty/tty_io.c921
-rw-r--r--drivers/tty/tty_ldisc.c292
-rw-r--r--drivers/tty/tty_ldsem.c2
-rw-r--r--drivers/tty/tty_port.c223
-rw-r--r--drivers/tty/vt/keyboard.c18
-rw-r--r--drivers/tty/vt/vt.c2
-rw-r--r--drivers/tty/vt/vt_ioctl.c3
-rw-r--r--drivers/uio/uio.c8
-rw-r--r--drivers/uio/uio_dmem_genirq.c6
-rw-r--r--drivers/usb/atm/usbatm.c2
-rw-r--r--drivers/usb/cdns3/cdns3-plat.c14
-rw-r--r--drivers/usb/cdns3/cdnsp-gadget.c2
-rw-r--r--drivers/usb/cdns3/core.h6
-rw-r--r--drivers/usb/cdns3/drd.c6
-rw-r--r--drivers/usb/chipidea/core.c1
-rw-r--r--drivers/usb/chipidea/otg.c5
-rw-r--r--drivers/usb/class/cdc-acm.c4
-rw-r--r--drivers/usb/common/debug.c1
-rw-r--r--drivers/usb/common/ulpi.c17
-rw-r--r--drivers/usb/core/driver.c3
-rw-r--r--drivers/usb/core/generic.c2
-rw-r--r--drivers/usb/core/hcd.c25
-rw-r--r--drivers/usb/core/hub.c37
-rw-r--r--drivers/usb/core/port.c35
-rw-r--r--drivers/usb/core/urb.c12
-rw-r--r--drivers/usb/core/usb.c46
-rw-r--r--drivers/usb/dwc2/core.h6
-rw-r--r--drivers/usb/dwc2/drd.c51
-rw-r--r--drivers/usb/dwc2/gadget.c19
-rw-r--r--drivers/usb/dwc2/hcd.c7
-rw-r--r--drivers/usb/dwc2/platform.c63
-rw-r--r--drivers/usb/dwc3/core.h9
-rw-r--r--drivers/usb/dwc3/dwc3-meson-g12a.c17
-rw-r--r--drivers/usb/dwc3/dwc3-qcom.c15
-rw-r--r--drivers/usb/dwc3/dwc3-xilinx.c25
-rw-r--r--drivers/usb/dwc3/gadget.c72
-rw-r--r--drivers/usb/dwc3/host.c45
-rw-r--r--drivers/usb/gadget/composite.c42
-rw-r--r--drivers/usb/gadget/configfs.c39
-rw-r--r--drivers/usb/gadget/function/f_fs.c60
-rw-r--r--drivers/usb/gadget/function/f_mass_storage.c2
-rw-r--r--drivers/usb/gadget/function/f_midi.c48
-rw-r--r--drivers/usb/gadget/function/f_sourcesink.c1
-rw-r--r--drivers/usb/gadget/function/f_uac2.c4
-rw-r--r--drivers/usb/gadget/function/rndis.c13
-rw-r--r--drivers/usb/gadget/function/u_audio.c28
-rw-r--r--drivers/usb/gadget/legacy/inode.c18
-rw-r--r--drivers/usb/gadget/legacy/raw_gadget.c2
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/dev.c19
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/ep0.c7
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/hub.c47
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/vhub.h1
-rw-r--r--drivers/usb/gadget/udc/at91_udc.c69
-rw-r--r--drivers/usb/gadget/udc/at91_udc.h8
-rw-r--r--drivers/usb/gadget/udc/bcm63xx_udc.c8
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_core.c1
-rw-r--r--drivers/usb/gadget/udc/mv_udc_core.c4
-rw-r--r--drivers/usb/gadget/udc/pxa25x_udc.c2
-rw-r--r--drivers/usb/gadget/udc/renesas_usb3.c2
-rw-r--r--drivers/usb/gadget/udc/udc-xilinx.c56
-rw-r--r--drivers/usb/host/Kconfig11
-rw-r--r--drivers/usb/host/Makefile1
-rw-r--r--drivers/usb/host/ehci-brcm.c6
-rw-r--r--drivers/usb/host/fotg210-hcd.c11
-rw-r--r--drivers/usb/host/ohci-omap.c2
-rw-r--r--drivers/usb/host/ohci-s3c2410.c10
-rw-r--r--drivers/usb/host/ohci-spear.c2
-rw-r--r--drivers/usb/host/ohci-tmio.c5
-rw-r--r--drivers/usb/host/u132-hcd.c1
-rw-r--r--drivers/usb/host/uhci-platform.c9
-rw-r--r--drivers/usb/host/xen-hcd.c1609
-rw-r--r--drivers/usb/host/xhci-mtk.c16
-rw-r--r--drivers/usb/host/xhci-plat.c3
-rw-r--r--drivers/usb/host/xhci.c6
-rw-r--r--drivers/usb/isp1760/isp1760-if.c16
-rw-r--r--drivers/usb/misc/ehset.c58
-rw-r--r--drivers/usb/misc/ftdi-elan.c1
-rw-r--r--drivers/usb/misc/usb251xb.c4
-rw-r--r--drivers/usb/musb/am35x.c2
-rw-r--r--drivers/usb/musb/da8xx.c20
-rw-r--r--drivers/usb/musb/jz4740.c1
-rw-r--r--drivers/usb/musb/mediatek.c2
-rw-r--r--drivers/usb/musb/musb_dsps.c15
-rw-r--r--drivers/usb/musb/omap2430.c23
-rw-r--r--drivers/usb/musb/ux500.c18
-rw-r--r--drivers/usb/phy/phy-mv-usb.c5
-rw-r--r--drivers/usb/renesas_usbhs/common.c14
-rw-r--r--drivers/usb/renesas_usbhs/common.h1
-rw-r--r--drivers/usb/renesas_usbhs/mod.c14
-rw-r--r--drivers/usb/serial/ch341.c1
-rw-r--r--drivers/usb/serial/cp210x.c2
-rw-r--r--drivers/usb/serial/ftdi_sio.c3
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h3
-rw-r--r--drivers/usb/serial/option.c2
-rw-r--r--drivers/usb/storage/sierra_ms.c2
-rw-r--r--drivers/usb/storage/transport.c2
-rw-r--r--drivers/usb/storage/unusual_devs.h10
-rw-r--r--drivers/usb/typec/Makefile3
-rw-r--r--drivers/usb/typec/class.c2
-rw-r--r--drivers/usb/typec/class.h10
-rw-r--r--drivers/usb/typec/port-mapper.c281
-rw-r--r--drivers/usb/typec/tcpm/tcpci.c26
-rw-r--r--drivers/usb/typec/tcpm/tcpci.h1
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c7
-rw-r--r--drivers/usb/typec/ucsi/ucsi.c16
-rw-r--r--drivers/usb/typec/ucsi/ucsi_ccg.c2
-rw-r--r--drivers/usb/usbip/usbip_event.c1
-rw-r--r--drivers/vdpa/alibaba/eni_vdpa.c28
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_base.c41
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_base.h9
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_main.c40
-rw-r--r--drivers/vdpa/mlx5/net/mlx5_vnet.c160
-rw-r--r--drivers/vdpa/vdpa.c163
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim.c21
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim_net.c2
-rw-r--r--drivers/vdpa/vdpa_user/iova_domain.c8
-rw-r--r--drivers/vdpa/vdpa_user/vduse_dev.c19
-rw-r--r--drivers/vdpa/virtio_pci/vp_vdpa.c16
-rw-r--r--drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c4
-rw-r--r--drivers/vfio/pci/vfio_pci_igd.c15
-rw-r--r--drivers/vfio/vfio_iommu_type1.c2
-rw-r--r--drivers/vhost/test.c1
-rw-r--r--drivers/vhost/vdpa.c12
-rw-r--r--drivers/video/console/Kconfig20
-rw-r--r--drivers/video/fbdev/core/bitblit.c16
-rw-r--r--drivers/video/fbdev/core/fbcon.c554
-rw-r--r--drivers/video/fbdev/core/fbcon.h72
-rw-r--r--drivers/video/fbdev/core/fbcon_ccw.c28
-rw-r--r--drivers/video/fbdev/core/fbcon_cw.c28
-rw-r--r--drivers/video/fbdev/core/fbcon_rotate.h9
-rw-r--r--drivers/video/fbdev/core/fbcon_ud.c37
-rw-r--r--drivers/video/fbdev/core/fbmem.c2
-rw-r--r--drivers/video/fbdev/core/tileblit.c16
-rw-r--r--drivers/video/fbdev/hyperv_fb.c16
-rw-r--r--drivers/video/fbdev/skeletonfb.c12
-rw-r--r--drivers/video/fbdev/vga16fb.c24
-rw-r--r--drivers/virt/acrn/ioreq.c3
-rw-r--r--drivers/virt/nitro_enclaves/Kconfig9
-rw-r--r--drivers/virt/nitro_enclaves/ne_misc_dev.c174
-rw-r--r--drivers/virt/nitro_enclaves/ne_misc_dev_test.c157
-rw-r--r--drivers/virt/nitro_enclaves/ne_pci_dev.c1
-rw-r--r--drivers/virtio/virtio.c6
-rw-r--r--drivers/virtio/virtio_balloon.c2
-rw-r--r--drivers/virtio/virtio_input.c2
-rw-r--r--drivers/virtio/virtio_mem.c114
-rw-r--r--drivers/virtio/virtio_pci_legacy.c2
-rw-r--r--drivers/virtio/virtio_pci_legacy_dev.c4
-rw-r--r--drivers/virtio/virtio_pci_modern_dev.c2
-rw-r--r--drivers/virtio/virtio_ring.c4
-rw-r--r--drivers/virtio/virtio_vdpa.c7
-rw-r--r--drivers/w1/slaves/w1_ds28e04.c26
-rw-r--r--drivers/w1/slaves/w1_therm.c7
-rw-r--r--drivers/watchdog/Kconfig98
-rw-r--r--drivers/watchdog/Makefile4
-rw-r--r--drivers/watchdog/apple_wdt.c226
-rw-r--r--drivers/watchdog/bcm63xx_wdt.c317
-rw-r--r--drivers/watchdog/bcm7038_wdt.c15
-rw-r--r--drivers/watchdog/da9063_wdt.c12
-rw-r--r--drivers/watchdog/davinci_wdt.c2
-rw-r--r--drivers/watchdog/f71808e_wdt.c10
-rw-r--r--drivers/watchdog/meson_gxbb_wdt.c1
-rw-r--r--drivers/watchdog/msc313e_wdt.c4
-rw-r--r--drivers/watchdog/mtk_wdt.c2
-rw-r--r--drivers/watchdog/realtek_otto_wdt.c384
-rw-r--r--drivers/watchdog/rzg2l_wdt.c263
-rw-r--r--drivers/watchdog/s3c2410_wdt.c338
-rw-r--r--drivers/xen/Kconfig2
-rw-r--r--drivers/xen/balloon.c20
-rw-r--r--drivers/xen/gntdev.c6
-rw-r--r--drivers/xen/pci.c2
-rw-r--r--drivers/xen/unpopulated-alloc.c87
-rw-r--r--drivers/zorro/proc.c2
-rw-r--r--fs/9p/cache.c195
-rw-r--r--fs/9p/cache.h25
-rw-r--r--fs/9p/fid.c9
-rw-r--r--fs/9p/v9fs.c17
-rw-r--r--fs/9p/v9fs.h13
-rw-r--r--fs/9p/vfs_addr.c62
-rw-r--r--fs/9p/vfs_dir.c13
-rw-r--r--fs/9p/vfs_file.c9
-rw-r--r--fs/9p/vfs_inode.c26
-rw-r--r--fs/9p/vfs_inode_dotl.c32
-rw-r--r--fs/9p/vfs_super.c3
-rw-r--r--fs/Kconfig12
-rw-r--r--fs/Makefile4
-rw-r--r--fs/adfs/inode.c4
-rw-r--r--fs/afs/Makefile3
-rw-r--r--fs/afs/cache.c68
-rw-r--r--fs/afs/cell.c12
-rw-r--r--fs/afs/file.c38
-rw-r--r--fs/afs/inode.c101
-rw-r--r--fs/afs/internal.h37
-rw-r--r--fs/afs/main.c14
-rw-r--r--fs/afs/proc.c6
-rw-r--r--fs/afs/super.c1
-rw-r--r--fs/afs/volume.c29
-rw-r--r--fs/afs/write.c88
-rw-r--r--fs/aio.c31
-rw-r--r--fs/binfmt_elf.c6
-rw-r--r--fs/btrfs/Kconfig3
-rw-r--r--fs/btrfs/block-group.c39
-rw-r--r--fs/btrfs/ctree.h6
-rw-r--r--fs/btrfs/extent_io.c10
-rw-r--r--fs/btrfs/ioctl.c102
-rw-r--r--fs/btrfs/qgroup.c21
-rw-r--r--fs/btrfs/send.c4
-rw-r--r--fs/btrfs/super.c2
-rw-r--r--fs/btrfs/transaction.c36
-rw-r--r--fs/btrfs/transaction.h2
-rw-r--r--fs/btrfs/tree-checker.c15
-rw-r--r--fs/btrfs/tree-log.c23
-rw-r--r--fs/buffer.c23
-rw-r--r--fs/cachefiles/Kconfig7
-rw-r--r--fs/cachefiles/Makefile6
-rw-r--r--fs/cachefiles/bind.c278
-rw-r--r--fs/cachefiles/cache.c383
-rw-r--r--fs/cachefiles/daemon.c189
-rw-r--r--fs/cachefiles/error_inject.c46
-rw-r--r--fs/cachefiles/interface.c747
-rw-r--r--fs/cachefiles/internal.h272
-rw-r--r--fs/cachefiles/io.c389
-rw-r--r--fs/cachefiles/key.c201
-rw-r--r--fs/cachefiles/main.c22
-rw-r--r--fs/cachefiles/namei.c1229
-rw-r--r--fs/cachefiles/rdwr.c972
-rw-r--r--fs/cachefiles/security.c2
-rw-r--r--fs/cachefiles/volume.c139
-rw-r--r--fs/cachefiles/xattr.c421
-rw-r--r--fs/ceph/addr.c107
-rw-r--r--fs/ceph/cache.c218
-rw-r--r--fs/ceph/cache.h97
-rw-r--r--fs/ceph/caps.c61
-rw-r--r--fs/ceph/file.c46
-rw-r--r--fs/ceph/inode.c22
-rw-r--r--fs/ceph/metric.c2
-rw-r--r--fs/ceph/quota.c17
-rw-r--r--fs/ceph/super.c179
-rw-r--r--fs/ceph/super.h31
-rw-r--r--fs/cifs/Makefile2
-rw-r--r--fs/cifs/cache.c105
-rw-r--r--fs/cifs/cifs_debug.c8
-rw-r--r--fs/cifs/cifs_spnego.c4
-rw-r--r--fs/cifs/cifs_spnego.h3
-rw-r--r--fs/cifs/cifs_swn.c9
-rw-r--r--fs/cifs/cifsacl.c9
-rw-r--r--fs/cifs/cifsencrypt.c6
-rw-r--r--fs/cifs/cifsfs.c25
-rw-r--r--fs/cifs/cifsfs.h3
-rw-r--r--fs/cifs/cifsglob.h86
-rw-r--r--fs/cifs/cifspdu.h2
-rw-r--r--fs/cifs/cifsproto.h39
-rw-r--r--fs/cifs/cifssmb.c94
-rw-r--r--fs/cifs/connect.c380
-rw-r--r--fs/cifs/dfs_cache.c2
-rw-r--r--fs/cifs/dir.c5
-rw-r--r--fs/cifs/file.c287
-rw-r--r--fs/cifs/fs_context.c12
-rw-r--r--fs/cifs/fscache.c409
-rw-r--r--fs/cifs/fscache.h195
-rw-r--r--fs/cifs/inode.c25
-rw-r--r--fs/cifs/misc.c49
-rw-r--r--fs/cifs/netmisc.c5
-rw-r--r--fs/cifs/ntlmssp.h32
-rw-r--r--fs/cifs/sess.c290
-rw-r--r--fs/cifs/smb1ops.c22
-rw-r--r--fs/cifs/smb2glob.h2
-rw-r--r--fs/cifs/smb2misc.c5
-rw-r--r--fs/cifs/smb2ops.c34
-rw-r--r--fs/cifs/smb2pdu.c263
-rw-r--r--fs/cifs/smb2proto.h6
-rw-r--r--fs/cifs/smb2transport.c67
-rw-r--r--fs/cifs/transport.c86
-rw-r--r--fs/cifs/xattr.c2
-rw-r--r--fs/configfs/dir.c6
-rw-r--r--fs/coredump.c80
-rw-r--r--fs/dax.c157
-rw-r--r--fs/dcache.c37
-rw-r--r--fs/debugfs/file.c2
-rw-r--r--fs/devpts/inode.c2
-rw-r--r--fs/dlm/lockspace.c3
-rw-r--r--fs/erofs/data.c11
-rw-r--r--fs/erofs/internal.h3
-rw-r--r--fs/erofs/super.c15
-rw-r--r--fs/erofs/zdata.c113
-rw-r--r--fs/erofs/zmap.c7
-rw-r--r--fs/eventpoll.c10
-rw-r--r--fs/exec.c52
-rw-r--r--fs/exfat/balloc.c2
-rw-r--r--fs/exfat/dir.c42
-rw-r--r--fs/exfat/exfat_fs.h6
-rw-r--r--fs/exfat/fatent.c4
-rw-r--r--fs/exfat/file.c18
-rw-r--r--fs/exfat/inode.c15
-rw-r--r--fs/exfat/misc.c3
-rw-r--r--fs/exfat/namei.c48
-rw-r--r--fs/exfat/nls.c2
-rw-r--r--fs/exfat/super.c11
-rw-r--r--fs/ext2/ext2.h1
-rw-r--r--fs/ext2/inode.c15
-rw-r--r--fs/ext2/super.c16
-rw-r--r--fs/ext4/acl.c8
-rw-r--r--fs/ext4/ext4.h32
-rw-r--r--fs/ext4/ext4_jbd2.h2
-rw-r--r--fs/ext4/extents.c24
-rw-r--r--fs/ext4/fast_commit.c133
-rw-r--r--fs/ext4/hash.c2
-rw-r--r--fs/ext4/indirect.c2
-rw-r--r--fs/ext4/inline.c28
-rw-r--r--fs/ext4/inode.c37
-rw-r--r--fs/ext4/ioctl.c4
-rw-r--r--fs/ext4/mballoc.c40
-rw-r--r--fs/ext4/migrate.c2
-rw-r--r--fs/ext4/namei.c16
-rw-r--r--fs/ext4/orphan.c4
-rw-r--r--fs/ext4/page-io.c9
-rw-r--r--fs/ext4/readpage.c6
-rw-r--r--fs/ext4/super.c67
-rw-r--r--fs/ext4/sysfs.c8
-rw-r--r--fs/ext4/xattr.c6
-rw-r--r--fs/f2fs/Kconfig1
-rw-r--r--fs/f2fs/checkpoint.c6
-rw-r--r--fs/f2fs/compress.c84
-rw-r--r--fs/f2fs/data.c368
-rw-r--r--fs/f2fs/dir.c10
-rw-r--r--fs/f2fs/f2fs.h47
-rw-r--r--fs/f2fs/file.c509
-rw-r--r--fs/f2fs/gc.c31
-rw-r--r--fs/f2fs/hash.c2
-rw-r--r--fs/f2fs/inline.c4
-rw-r--r--fs/f2fs/inode.c22
-rw-r--r--fs/f2fs/iostat.c40
-rw-r--r--fs/f2fs/namei.c4
-rw-r--r--fs/f2fs/node.c27
-rw-r--r--fs/f2fs/recovery.c12
-rw-r--r--fs/f2fs/segment.c19
-rw-r--r--fs/f2fs/segment.h3
-rw-r--r--fs/f2fs/super.c99
-rw-r--r--fs/f2fs/sysfs.c39
-rw-r--r--fs/f2fs/xattr.c40
-rw-r--r--fs/fat/file.c5
-rw-r--r--fs/file_table.c53
-rw-r--r--fs/fs-writeback.c32
-rw-r--r--fs/fs_context.c2
-rw-r--r--fs/fscache/Kconfig3
-rw-r--r--fs/fscache/Makefile6
-rw-r--r--fs/fscache/cache.c618
-rw-r--r--fs/fscache/cookie.c1448
-rw-r--r--fs/fscache/fsdef.c98
-rw-r--r--fs/fscache/internal.h317
-rw-r--r--fs/fscache/io.c376
-rw-r--r--fs/fscache/main.c147
-rw-r--r--fs/fscache/netfs.c74
-rw-r--r--fs/fscache/object.c1125
-rw-r--r--fs/fscache/operation.c633
-rw-r--r--fs/fscache/page.c1242
-rw-r--r--fs/fscache/proc.c47
-rw-r--r--fs/fscache/stats.c293
-rw-r--r--fs/fscache/volume.c517
-rw-r--r--fs/fuse/Kconfig2
-rw-r--r--fs/fuse/dax.c36
-rw-r--r--fs/fuse/dir.c91
-rw-r--r--fs/fuse/file.c6
-rw-r--r--fs/fuse/fuse_i.h31
-rw-r--r--fs/fuse/inode.c89
-rw-r--r--fs/fuse/virtio_fs.c40
-rw-r--r--fs/gfs2/file.c7
-rw-r--r--fs/gfs2/glock.c3
-rw-r--r--fs/gfs2/sys.c3
-rw-r--r--fs/hfsplus/hfsplus_raw.h12
-rw-r--r--fs/hfsplus/xattr.c4
-rw-r--r--fs/hugetlbfs/inode.c7
-rw-r--r--fs/inode.c88
-rw-r--r--fs/internal.h2
-rw-r--r--fs/io-wq.c97
-rw-r--r--fs/io-wq.h24
-rw-r--r--fs/io_uring.c1194
-rw-r--r--fs/ioctl.c2
-rw-r--r--fs/iomap/Makefile4
-rw-r--r--fs/iomap/buffered-io.c593
-rw-r--r--fs/iomap/direct-io.c1
-rw-r--r--fs/jbd2/commit.c21
-rw-r--r--fs/jbd2/journal.c10
-rw-r--r--fs/jbd2/transaction.c53
-rw-r--r--fs/jffs2/background.c2
-rw-r--r--fs/kernfs/dir.c118
-rw-r--r--fs/kernfs/file.c6
-rw-r--r--fs/kernfs/inode.c22
-rw-r--r--fs/kernfs/mount.c15
-rw-r--r--fs/kernfs/symlink.c5
-rw-r--r--fs/ksmbd/asn1.c142
-rw-r--r--fs/ksmbd/auth.c54
-rw-r--r--fs/ksmbd/auth.h10
-rw-r--r--fs/ksmbd/connection.c10
-rw-r--r--fs/ksmbd/connection.h12
-rw-r--r--fs/ksmbd/ksmbd_netlink.h12
-rw-r--r--fs/ksmbd/mgmt/user_config.c10
-rw-r--r--fs/ksmbd/mgmt/user_config.h1
-rw-r--r--fs/ksmbd/mgmt/user_session.h1
-rw-r--r--fs/ksmbd/smb2misc.c18
-rw-r--r--fs/ksmbd/smb2ops.c16
-rw-r--r--fs/ksmbd/smb2pdu.c253
-rw-r--r--fs/ksmbd/smb2pdu.h1
-rw-r--r--fs/ksmbd/smb_common.c5
-rw-r--r--fs/ksmbd/smb_common.h1
-rw-r--r--fs/ksmbd/transport_ipc.c2
-rw-r--r--fs/ksmbd/transport_rdma.c261
-rw-r--r--fs/ksmbd/transport_rdma.h4
-rw-r--r--fs/ksmbd/transport_tcp.c3
-rw-r--r--fs/ksmbd/vfs.h1
-rw-r--r--fs/ksmbd/vfs_cache.h10
-rw-r--r--fs/libfs.c10
-rw-r--r--fs/lockd/svc.c200
-rw-r--r--fs/lockd/svclock.c6
-rw-r--r--fs/lockd/svcsubs.c18
-rw-r--r--fs/locks.c34
-rw-r--r--fs/mpage.c7
-rw-r--r--fs/namei.c71
-rw-r--r--fs/namespace.c54
-rw-r--r--fs/netfs/read_helper.c13
-rw-r--r--fs/nfs/Makefile2
-rw-r--r--fs/nfs/callback.c36
-rw-r--r--fs/nfs/callback.h2
-rw-r--r--fs/nfs/callback_proc.c2
-rw-r--r--fs/nfs/callback_xdr.c18
-rw-r--r--fs/nfs/client.c13
-rw-r--r--fs/nfs/dir.c170
-rw-r--r--fs/nfs/direct.c2
-rw-r--r--fs/nfs/export.c2
-rw-r--r--fs/nfs/file.c13
-rw-r--r--fs/nfs/filelayout/filelayout.h2
-rw-r--r--fs/nfs/filelayout/filelayoutdev.c4
-rw-r--r--fs/nfs/fscache-index.c140
-rw-r--r--fs/nfs/fscache.c490
-rw-r--r--fs/nfs/fscache.h180
-rw-r--r--fs/nfs/inode.c20
-rw-r--r--fs/nfs/internal.h1
-rw-r--r--fs/nfs/nfs3proc.c5
-rw-r--r--fs/nfs/nfs42proc.c13
-rw-r--r--fs/nfs/nfs4_fs.h14
-rw-r--r--fs/nfs/nfs4client.c5
-rw-r--r--fs/nfs/nfs4namespace.c19
-rw-r--r--fs/nfs/nfs4proc.c203
-rw-r--r--fs/nfs/nfs4state.c8
-rw-r--r--fs/nfs/nfs4xdr.c49
-rw-r--r--fs/nfs/nfstrace.h1
-rw-r--r--fs/nfs/read.c25
-rw-r--r--fs/nfs/super.c28
-rw-r--r--fs/nfs/sysfs.c3
-rw-r--r--fs/nfs/write.c8
-rw-r--r--fs/nfsd/filecache.c79
-rw-r--r--fs/nfsd/filecache.h1
-rw-r--r--fs/nfsd/netns.h27
-rw-r--r--fs/nfsd/nfs3proc.c25
-rw-r--r--fs/nfsd/nfs3xdr.c69
-rw-r--r--fs/nfsd/nfs4proc.c37
-rw-r--r--fs/nfsd/nfs4state.c67
-rw-r--r--fs/nfsd/nfs4xdr.c31
-rw-r--r--fs/nfsd/nfscache.c2
-rw-r--r--fs/nfsd/nfsctl.c32
-rw-r--r--fs/nfsd/nfsd.h2
-rw-r--r--fs/nfsd/nfsfh.c66
-rw-r--r--fs/nfsd/nfsfh.h40
-rw-r--r--fs/nfsd/nfsproc.c8
-rw-r--r--fs/nfsd/nfssvc.c222
-rw-r--r--fs/nfsd/state.h5
-rw-r--r--fs/nfsd/stats.c2
-rw-r--r--fs/nfsd/stats.h4
-rw-r--r--fs/nfsd/trace.h120
-rw-r--r--fs/nfsd/vfs.c177
-rw-r--r--fs/nfsd/vfs.h7
-rw-r--r--fs/nilfs2/page.c4
-rw-r--r--fs/nilfs2/sysfs.c13
-rw-r--r--fs/notify/dnotify/dnotify.c23
-rw-r--r--fs/notify/fanotify/fanotify.c213
-rw-r--r--fs/notify/fanotify/fanotify.h142
-rw-r--r--fs/notify/fanotify/fanotify_user.c101
-rw-r--r--fs/notify/fsnotify.c53
-rw-r--r--fs/notify/group.c2
-rw-r--r--fs/notify/inotify/inotify_user.c11
-rw-r--r--fs/notify/mark.c31
-rw-r--r--fs/ntfs/attrib.c2
-rw-r--r--fs/ntfs3/ntfs_fs.h1
-rw-r--r--fs/ocfs2/alloc.c2
-rw-r--r--fs/ocfs2/aops.c26
-rw-r--r--fs/ocfs2/cluster/heartbeat.c2
-rw-r--r--fs/ocfs2/cluster/masklog.c11
-rw-r--r--fs/ocfs2/dir.c2
-rw-r--r--fs/ocfs2/dlm/dlmdomain.c4
-rw-r--r--fs/ocfs2/dlm/dlmmaster.c18
-rw-r--r--fs/ocfs2/dlm/dlmrecovery.c2
-rw-r--r--fs/ocfs2/dlm/dlmthread.c2
-rw-r--r--fs/ocfs2/filecheck.c3
-rw-r--r--fs/ocfs2/journal.c6
-rw-r--r--fs/ocfs2/stackglue.c36
-rw-r--r--fs/ocfs2/suballoc.c25
-rw-r--r--fs/ocfs2/super.c2
-rw-r--r--fs/orangefs/orangefs-bufmap.c7
-rw-r--r--fs/orangefs/orangefs-sysfs.c21
-rw-r--r--fs/overlayfs/copy_up.c16
-rw-r--r--fs/pipe.c64
-rw-r--r--fs/proc/array.c9
-rw-r--r--fs/proc/base.c4
-rw-r--r--fs/proc/generic.c6
-rw-r--r--fs/proc/inode.c1
-rw-r--r--fs/proc/internal.h5
-rw-r--r--fs/proc/proc_net.c8
-rw-r--r--fs/proc/proc_sysctl.c72
-rw-r--r--fs/proc/task_mmu.c53
-rw-r--r--fs/proc/vmcore.c10
-rw-r--r--fs/quota/dquot.c11
-rw-r--r--fs/reiserfs/journal.c7
-rw-r--r--fs/remap_range.c116
-rw-r--r--fs/signalfd.c5
-rw-r--r--fs/smbfs_common/smb2pdu.h2
-rw-r--r--fs/smbfs_common/smbfsctl.h2
-rw-r--r--fs/squashfs/super.c33
-rw-r--r--fs/super.c26
-rw-r--r--fs/sync.c18
-rw-r--r--fs/sysctls.c39
-rw-r--r--fs/tracefs/inode.c24
-rw-r--r--fs/udf/ialloc.c2
-rw-r--r--fs/udf/inode.c9
-rw-r--r--fs/unicode/.gitignore2
-rw-r--r--fs/unicode/Kconfig7
-rw-r--r--fs/unicode/Makefile17
-rw-r--r--fs/unicode/mkutf8data.c24
-rw-r--r--fs/unicode/utf8-core.c109
-rw-r--r--fs/unicode/utf8-norm.c262
-rw-r--r--fs/unicode/utf8-selftest.c94
-rw-r--r--fs/unicode/utf8data.c_shipped (renamed from fs/unicode/utf8data.h_shipped)22
-rw-r--r--fs/unicode/utf8n.h81
-rw-r--r--fs/userfaultfd.c8
-rw-r--r--fs/xfs/kmem.c3
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c4
-rw-r--r--fs/xfs/libxfs/xfs_fs.h37
-rw-r--r--fs/xfs/scrub/agheader.c53
-rw-r--r--fs/xfs/scrub/agheader_repair.c12
-rw-r--r--fs/xfs/xfs_aops.c42
-rw-r--r--fs/xfs/xfs_bmap_util.c23
-rw-r--r--fs/xfs/xfs_bmap_util.h2
-rw-r--r--fs/xfs/xfs_buf.c10
-rw-r--r--fs/xfs/xfs_buf.h5
-rw-r--r--fs/xfs/xfs_dir2_readdir.c53
-rw-r--r--fs/xfs/xfs_file.c92
-rw-r--r--fs/xfs/xfs_icache.c24
-rw-r--r--fs/xfs/xfs_inode.h9
-rw-r--r--fs/xfs/xfs_ioctl.c104
-rw-r--r--fs/xfs/xfs_ioctl.h6
-rw-r--r--fs/xfs/xfs_ioctl32.c27
-rw-r--r--fs/xfs/xfs_ioctl32.h22
-rw-r--r--fs/xfs/xfs_iomap.c84
-rw-r--r--fs/xfs/xfs_iomap.h12
-rw-r--r--fs/xfs/xfs_iops.c7
-rw-r--r--fs/xfs/xfs_pnfs.c46
-rw-r--r--fs/xfs/xfs_reflink.c3
-rw-r--r--fs/xfs/xfs_super.c86
-rw-r--r--include/acpi/acpi_bus.h1
-rw-r--r--include/asm-generic/barrier.h2
-rw-r--r--include/asm-generic/bitops.h1
-rw-r--r--include/asm-generic/bitops/le.h64
-rw-r--r--include/asm-generic/hyperv-tlfs.h33
-rw-r--r--include/asm-generic/mshyperv.h6
-rw-r--r--include/asm-generic/pgalloc.h24
-rw-r--r--include/crypto/blake2s.h3
-rw-r--r--include/crypto/internal/blake2s.h40
-rw-r--r--include/dt-bindings/clock/dra7.h14
-rw-r--r--include/dt-bindings/clock/exynos4.h4
-rw-r--r--include/dt-bindings/clock/exynos5250.h4
-rw-r--r--include/dt-bindings/clock/exynos7885.h115
-rw-r--r--include/dt-bindings/clock/exynos850.h54
-rw-r--r--include/dt-bindings/clock/imx8mp-clock.h1
-rw-r--r--include/dt-bindings/clock/ingenic,jz4760-cgu.h2
-rw-r--r--include/dt-bindings/clock/ingenic,jz4770-cgu.h1
-rw-r--r--include/dt-bindings/clock/microchip,lan966x.h34
-rw-r--r--include/dt-bindings/clock/mt7986-clk.h169
-rw-r--r--include/dt-bindings/clock/qcom,gcc-msm8976.h240
-rw-r--r--include/dt-bindings/clock/sun20i-d1-ccu.h156
-rw-r--r--include/dt-bindings/clock/sun20i-d1-r-ccu.h19
-rw-r--r--include/dt-bindings/clock/toshiba,tmpv770x.h181
-rw-r--r--include/dt-bindings/iio/addac/adi,ad74413r.h21
-rw-r--r--include/dt-bindings/interconnect/qcom,msm8996.h163
-rw-r--r--include/dt-bindings/interconnect/qcom,qcm2290.h94
-rw-r--r--include/dt-bindings/interconnect/qcom,sm8450.h171
-rw-r--r--include/dt-bindings/mailbox/qcom-ipcc.h2
-rw-r--r--include/dt-bindings/mux/ti-serdes.h22
-rw-r--r--include/dt-bindings/phy/phy-cadence.h9
-rw-r--r--include/dt-bindings/phy/phy-imx8-pcie.h14
-rw-r--r--include/dt-bindings/phy/phy-lan966x-serdes.h14
-rw-r--r--include/dt-bindings/reset/sun20i-d1-ccu.h77
-rw-r--r--include/dt-bindings/reset/sun20i-d1-r-ccu.h16
-rw-r--r--include/dt-bindings/reset/toshiba,tmpv770x.h41
-rw-r--r--include/dt-bindings/sound/rt5640.h1
-rw-r--r--include/dt-bindings/sound/tlv320adc3xxx.h28
-rw-r--r--include/kunit/assert.h2
-rw-r--r--include/kvm/arm_pmu.h19
-rw-r--r--include/kvm/arm_vgic.h4
-rw-r--r--include/linux/acpi.h34
-rw-r--r--include/linux/aio.h4
-rw-r--r--include/linux/amba/bus.h6
-rw-r--r--include/linux/ata.h2
-rw-r--r--include/linux/auxiliary_bus.h174
-rw-r--r--include/linux/bio.h60
-rw-r--r--include/linux/bitfield.h19
-rw-r--r--include/linux/bitmap.h34
-rw-r--r--include/linux/bitops.h34
-rw-r--r--include/linux/blk-mq.h105
-rw-r--r--include/linux/blk-pm.h2
-rw-r--r--include/linux/blkdev.h51
-rw-r--r--include/linux/bpf.h9
-rw-r--r--include/linux/bpf_verifier.h4
-rw-r--r--include/linux/byteorder/generic.h4
-rw-r--r--include/linux/ceph/libceph.h5
-rw-r--r--include/linux/ceph/messenger.h7
-rw-r--r--include/linux/cleancache.h124
-rw-r--r--include/linux/clk-provider.h23
-rw-r--r--include/linux/clk/sunxi-ng.h15
-rw-r--r--include/linux/comedi/comedi_8254.h (renamed from drivers/comedi/drivers/comedi_8254.h)0
-rw-r--r--include/linux/comedi/comedi_8255.h (renamed from drivers/comedi/drivers/8255.h)8
-rw-r--r--include/linux/comedi/comedi_isadma.h (renamed from drivers/comedi/drivers/comedi_isadma.h)0
-rw-r--r--include/linux/comedi/comedi_pci.h (renamed from drivers/comedi/comedi_pci.h)3
-rw-r--r--include/linux/comedi/comedi_pcmcia.h (renamed from drivers/comedi/comedi_pcmcia.h)3
-rw-r--r--include/linux/comedi/comedi_usb.h (renamed from drivers/comedi/comedi_usb.h)3
-rw-r--r--include/linux/comedi/comedidev.h (renamed from drivers/comedi/comedidev.h)3
-rw-r--r--include/linux/comedi/comedilib.h (renamed from drivers/comedi/comedilib.h)0
-rw-r--r--include/linux/compiler.h21
-rw-r--r--include/linux/coredump.h10
-rw-r--r--include/linux/counter.h55
-rw-r--r--include/linux/cpumask.h46
-rw-r--r--include/linux/cuda.h2
-rw-r--r--include/linux/damon.h89
-rw-r--r--include/linux/dax.h93
-rw-r--r--include/linux/dcache.h10
-rw-r--r--include/linux/delayacct.h107
-rw-r--r--include/linux/device-mapper.h4
-rw-r--r--include/linux/device.h31
-rw-r--r--include/linux/dma/qcom_adm.h12
-rw-r--r--include/linux/dma/xilinx_dpdma.h11
-rw-r--r--include/linux/dmaengine.h24
-rw-r--r--include/linux/dnotify.h3
-rw-r--r--include/linux/efi.h46
-rw-r--r--include/linux/elfcore-compat.h5
-rw-r--r--include/linux/elfcore.h5
-rw-r--r--include/linux/ethtool.h2
-rw-r--r--include/linux/exportfs.h2
-rw-r--r--include/linux/fanotify.h7
-rw-r--r--include/linux/fb.h2
-rw-r--r--include/linux/find.h372
-rw-r--r--include/linux/firmware/cirrus/cs_dsp.h21
-rw-r--r--include/linux/firmware/cirrus/wmfw.h1
-rw-r--r--include/linux/firmware/xlnx-event-manager.h36
-rw-r--r--include/linux/firmware/xlnx-zynqmp.h33
-rw-r--r--include/linux/fpga/fpga-bridge.h30
-rw-r--r--include/linux/fpga/fpga-mgr.h62
-rw-r--r--include/linux/fpga/fpga-region.h36
-rw-r--r--include/linux/frontswap.h35
-rw-r--r--include/linux/fs.h31
-rw-r--r--include/linux/fs_context.h2
-rw-r--r--include/linux/fscache-cache.h614
-rw-r--r--include/linux/fscache.h1026
-rw-r--r--include/linux/fsl/mc.h4
-rw-r--r--include/linux/fsnotify.h58
-rw-r--r--include/linux/fsnotify_backend.h74
-rw-r--r--include/linux/genhd.h85
-rw-r--r--include/linux/gfp.h12
-rw-r--r--include/linux/hash.h5
-rw-r--r--include/linux/huge_mm.h14
-rw-r--r--include/linux/hugetlb.h4
-rw-r--r--include/linux/hugetlb_cgroup.h7
-rw-r--r--include/linux/hyperv.h7
-rw-r--r--include/linux/if_vlan.h6
-rw-r--r--include/linux/iio/buffer-dma.h5
-rw-r--r--include/linux/iio/iio.h5
-rw-r--r--include/linux/iio/trigger.h2
-rw-r--r--include/linux/iio/types.h1
-rw-r--r--include/linux/inotify.h3
-rw-r--r--include/linux/intel-svm.h6
-rw-r--r--include/linux/interrupt.h53
-rw-r--r--include/linux/iocontext.h49
-rw-r--r--include/linux/iomap.h10
-rw-r--r--include/linux/iommu.h3
-rw-r--r--include/linux/iova.h68
-rw-r--r--include/linux/irqchip/arm-gic-v3.h4
-rw-r--r--include/linux/irqdomain.h2
-rw-r--r--include/linux/jbd2.h13
-rw-r--r--include/linux/kasan.h4
-rw-r--r--include/linux/kernel.h10
-rw-r--r--include/linux/kernfs.h6
-rw-r--r--include/linux/kfence.h2
-rw-r--r--include/linux/kobject.h34
-rw-r--r--include/linux/kprobes.h6
-rw-r--r--include/linux/kthread.h30
-rw-r--r--include/linux/kvm_dirty_ring.h14
-rw-r--r--include/linux/kvm_host.h557
-rw-r--r--include/linux/kvm_types.h19
-rw-r--r--include/linux/libata.h139
-rw-r--r--include/linux/list.h36
-rw-r--r--include/linux/lockd/lockd.h9
-rw-r--r--include/linux/lsm_hook_defs.h2
-rw-r--r--include/linux/mc146818rtc.h6
-rw-r--r--include/linux/memblock.h2
-rw-r--r--include/linux/memcontrol.h27
-rw-r--r--include/linux/mempolicy.h1
-rw-r--r--include/linux/memremap.h29
-rw-r--r--include/linux/mfd/tps68470.h11
-rw-r--r--include/linux/mhi.h21
-rw-r--r--include/linux/migrate.h2
-rw-r--r--include/linux/mm.h181
-rw-r--r--include/linux/mm_inline.h136
-rw-r--r--include/linux/mm_types.h183
-rw-r--r--include/linux/mmzone.h9
-rw-r--r--include/linux/module.h9
-rw-r--r--include/linux/mount.h3
-rw-r--r--include/linux/msi.h289
-rw-r--r--include/linux/netdevice.h3
-rw-r--r--include/linux/netfs.h22
-rw-r--r--include/linux/nfs.h8
-rw-r--r--include/linux/nfs_fs.h12
-rw-r--r--include/linux/nfs_fs_sb.h13
-rw-r--r--include/linux/nfs_xdr.h5
-rw-r--r--include/linux/of.h422
-rw-r--r--include/linux/of_fdt.h11
-rw-r--r--include/linux/page-flags.h56
-rw-r--r--include/linux/page_idle.h1
-rw-r--r--include/linux/page_table_check.h166
-rw-r--r--include/linux/pagemap.h88
-rw-r--r--include/linux/pagevec.h68
-rw-r--r--include/linux/pci.h21
-rw-r--r--include/linux/pci_ids.h68
-rw-r--r--include/linux/percpu.h13
-rw-r--r--include/linux/perf_event.h59
-rw-r--r--include/linux/pgtable.h9
-rw-r--r--include/linux/pid_namespace.h5
-rw-r--r--include/linux/pinctrl/pinconf-generic.h3
-rw-r--r--include/linux/pipe_fs_i.h4
-rw-r--r--include/linux/pktcdvd.h12
-rw-r--r--include/linux/platform_data/ad5755.h102
-rw-r--r--include/linux/platform_data/bcm7038_wdt.h8
-rw-r--r--include/linux/platform_data/clk-fch.h2
-rw-r--r--include/linux/pm.h55
-rw-r--r--include/linux/pm_runtime.h24
-rw-r--r--include/linux/pmu.h2
-rw-r--r--include/linux/poll.h2
-rw-r--r--include/linux/printk.h4
-rw-r--r--include/linux/proc_fs.h25
-rw-r--r--include/linux/profile.h45
-rw-r--r--include/linux/property.h2
-rw-r--r--include/linux/psi.h13
-rw-r--r--include/linux/psi_types.h3
-rw-r--r--include/linux/quota.h2
-rw-r--r--include/linux/raid/pq.h2
-rw-r--r--include/linux/ref_tracker.h2
-rw-r--r--include/linux/reset.h20
-rw-r--r--include/linux/rio_ids.h13
-rw-r--r--include/linux/rwlock.h6
-rw-r--r--include/linux/rwlock_api_smp.h8
-rw-r--r--include/linux/rwlock_rt.h10
-rw-r--r--include/linux/sbitmap.h11
-rw-r--r--include/linux/scatterlist.h29
-rw-r--r--include/linux/sched.h18
-rw-r--r--include/linux/sched/mm.h26
-rw-r--r--include/linux/sched/signal.h25
-rw-r--r--include/linux/sched/sysctl.h14
-rw-r--r--include/linux/sched/task.h5
-rw-r--r--include/linux/seq_file.h2
-rw-r--r--include/linux/serial_8250.h2
-rw-r--r--include/linux/serial_s3c.h9
-rw-r--r--include/linux/shmem_fs.h3
-rw-r--r--include/linux/skbuff.h2
-rw-r--r--include/linux/slab.h3
-rw-r--r--include/linux/soc/ti/ti_sci_inta_msi.h2
-rw-r--r--include/linux/soundwire/sdw_intel.h4
-rw-r--r--include/linux/spinlock_api_up.h1
-rw-r--r--include/linux/stackdepot.h25
-rw-r--r--include/linux/stackleak.h5
-rw-r--r--include/linux/string_helpers.h4
-rw-r--r--include/linux/sunrpc/svc.h79
-rw-r--r--include/linux/suspend.h15
-rw-r--r--include/linux/swap.h6
-rw-r--r--include/linux/swapfile.h3
-rw-r--r--include/linux/swiotlb.h6
-rw-r--r--include/linux/switchtec.h2
-rw-r--r--include/linux/syscalls.h3
-rw-r--r--include/linux/sysctl.h67
-rw-r--r--include/linux/topology.h25
-rw-r--r--include/linux/trace_events.h2
-rw-r--r--include/linux/tracehook.h7
-rw-r--r--include/linux/tty.h153
-rw-r--r--include/linux/tty_driver.h572
-rw-r--r--include/linux/tty_flip.h1
-rw-r--r--include/linux/tty_ldisc.h287
-rw-r--r--include/linux/tty_port.h131
-rw-r--r--include/linux/uio.h27
-rw-r--r--include/linux/unaligned/packed_struct.h2
-rw-r--r--include/linux/unicode.h49
-rw-r--r--include/linux/usb.h9
-rw-r--r--include/linux/usb/ch9.h3
-rw-r--r--include/linux/usb/role.h6
-rw-r--r--include/linux/usb/typec.h12
-rw-r--r--include/linux/vdpa.h39
-rw-r--r--include/linux/virtio.h1
-rw-r--r--include/linux/vm_event_item.h3
-rw-r--r--include/linux/vmalloc.h7
-rw-r--r--include/linux/writeback.h1
-rw-r--r--include/linux/xarray.h18
-rw-r--r--include/net/9p/9p.h2
-rw-r--r--include/net/9p/transport.h2
-rw-r--r--include/net/addrconf.h4
-rw-r--r--include/net/ax25.h12
-rw-r--r--include/net/bond_3ad.h2
-rw-r--r--include/net/bonding.h2
-rw-r--r--include/net/dsa.h1
-rw-r--r--include/net/dst_metadata.h14
-rw-r--r--include/net/inet_frag.h11
-rw-r--r--include/net/ip.h21
-rw-r--r--include/net/ip6_fib.h12
-rw-r--r--include/net/ipv6.h5
-rw-r--r--include/net/ipv6_frag.h3
-rw-r--r--include/net/neighbour.h18
-rw-r--r--include/net/netns/ipv6.h3
-rw-r--r--include/net/pkt_cls.h4
-rw-r--r--include/net/route.h2
-rw-r--r--include/net/sch_generic.h5
-rw-r--r--include/net/tcp.h4
-rw-r--r--include/ras/ras_event.h2
-rw-r--r--include/rdma/ib_mad.h1
-rw-r--r--include/rdma/ib_smi.h12
-rw-r--r--include/rdma/ib_verbs.h17
-rw-r--r--include/scsi/libsas.h2
-rw-r--r--include/scsi/scsi_cmnd.h2
-rw-r--r--include/scsi/scsi_device.h9
-rw-r--r--include/scsi/scsi_host.h6
-rw-r--r--include/scsi/scsi_ioctl.h4
-rw-r--r--include/scsi/sg.h4
-rw-r--r--include/sound/cs35l41.h746
-rw-r--r--include/sound/dmaengine_pcm.h2
-rw-r--r--include/sound/hda_codec.h8
-rw-r--r--include/sound/hdaudio.h1
-rw-r--r--include/sound/hdaudio_ext.h27
-rw-r--r--include/sound/intel-nhlt.h37
-rw-r--r--include/sound/memalloc.h14
-rw-r--r--include/sound/pcm.h18
-rw-r--r--include/sound/rt5682s.h1
-rw-r--r--include/sound/soc-component.h4
-rw-r--r--include/sound/soc-dai.h36
-rw-r--r--include/sound/soc-dpcm.h2
-rw-r--r--include/sound/soc.h3
-rw-r--r--include/sound/sof.h22
-rw-r--r--include/sound/sof/dai-amd.h21
-rw-r--r--include/sound/sof/dai-mediatek.h23
-rw-r--r--include/sound/sof/dai.h35
-rw-r--r--include/sound/sof/debug.h2
-rw-r--r--include/sound/sof/header.h1
-rw-r--r--include/trace/bpf_probe.h16
-rw-r--r--include/trace/events/block.h8
-rw-r--r--include/trace/events/cachefiles.h592
-rw-r--r--include/trace/events/compaction.h24
-rw-r--r--include/trace/events/damon.h15
-rw-r--r--include/trace/events/error_report.h8
-rw-r--r--include/trace/events/f2fs.h27
-rw-r--r--include/trace/events/filemap.h32
-rw-r--r--include/trace/events/fscache.h642
-rw-r--r--include/trace/events/iommu.h10
-rw-r--r--include/trace/events/libata.h416
-rw-r--r--include/trace/events/netfs.h5
-rw-r--r--include/trace/events/random.h56
-rw-r--r--include/trace/events/skb.h2
-rw-r--r--include/trace/events/sunrpc.h107
-rw-r--r--include/trace/events/thp.h35
-rw-r--r--include/trace/perf.h17
-rw-r--r--include/trace/trace_events.h123
-rw-r--r--include/uapi/asm-generic/unistd.h5
-rw-r--r--include/uapi/linux/comedi.h (renamed from drivers/comedi/comedi.h)2
-rw-r--r--include/uapi/linux/cyclades.h35
-rw-r--r--include/uapi/linux/fanotify.h12
-rw-r--r--include/uapi/linux/fuse.h55
-rw-r--r--include/uapi/linux/idxd.h1
-rw-r--r--include/uapi/linux/io_uring.h4
-rw-r--r--include/uapi/linux/kfd_sysfs.h2
-rw-r--r--include/uapi/linux/kvm.h17
-rw-r--r--include/uapi/linux/magic.h6
-rw-r--r--include/uapi/linux/module.h1
-rw-r--r--include/uapi/linux/netfilter/nf_conntrack_common.h2
-rw-r--r--include/uapi/linux/pci_regs.h142
-rw-r--r--include/uapi/linux/perf_event.h7
-rw-r--r--include/uapi/linux/pfrut.h262
-rw-r--r--include/uapi/linux/prctl.h3
-rw-r--r--include/uapi/linux/smc_diag.h11
-rw-r--r--include/uapi/linux/soundcard.h2
-rw-r--r--include/uapi/linux/taskstats.h6
-rw-r--r--include/uapi/linux/uuid.h10
-rw-r--r--include/uapi/linux/vdpa.h6
-rw-r--r--include/uapi/linux/virtio_iommu.h8
-rw-r--r--include/uapi/misc/habanalabs.h166
-rw-r--r--include/uapi/rdma/hns-abi.h2
-rw-r--r--include/uapi/sound/asound.h11
-rw-r--r--include/uapi/sound/sof/tokens.h5
-rw-r--r--include/uapi/xen/gntdev.h8
-rw-r--r--include/xen/balloon.h3
-rw-r--r--include/xen/interface/io/usbif.h405
-rw-r--r--include/xen/interface/xen.h3
-rw-r--r--include/xen/xen.h16
-rw-r--r--include/xen/xenbus_dev.h2
-rw-r--r--init/Kconfig18
-rw-r--r--init/Makefile2
-rw-r--r--init/main.c9
-rw-r--r--ipc/sem.c4
-rw-r--r--ipc/util.c2
-rw-r--r--kernel/Makefile1
-rw-r--r--kernel/async.c3
-rw-r--r--kernel/audit.c62
-rw-r--r--kernel/auditsc.c2
-rw-r--r--kernel/bpf/bpf_lsm.c2
-rw-r--r--kernel/bpf/btf.c2
-rw-r--r--kernel/bpf/inode.c14
-rw-r--r--kernel/bpf/ringbuf.c2
-rw-r--r--kernel/bpf/stackmap.c5
-rw-r--r--kernel/bpf/trampoline.c5
-rw-r--r--kernel/bpf/verifier.c81
-rw-r--r--kernel/cgroup/cgroup.c11
-rw-r--r--kernel/configs/debug.config105
-rw-r--r--kernel/cred.c9
-rw-r--r--kernel/delayacct.c49
-rw-r--r--kernel/dma/direct.c240
-rw-r--r--kernel/dma/pool.c4
-rw-r--r--kernel/dma/swiotlb.c50
-rw-r--r--kernel/events/core.c330
-rw-r--r--kernel/exit.c97
-rw-r--r--kernel/fork.c79
-rw-r--r--kernel/futex/core.c2
-rw-r--r--kernel/gcov/Kconfig1
-rw-r--r--kernel/hung_task.c81
-rw-r--r--kernel/irq/generic-chip.c2
-rw-r--r--kernel/irq/manage.c8
-rw-r--r--kernel/irq/msi.c792
-rw-r--r--kernel/irq/proc.c8
-rw-r--r--kernel/kallsyms.c1
-rw-r--r--kernel/kexec_core.c2
-rw-r--r--kernel/kprobes.c30
-rw-r--r--kernel/kthread.c121
-rw-r--r--kernel/livepatch/core.c29
-rw-r--r--kernel/livepatch/shadow.c6
-rw-r--r--kernel/locking/lockdep.c4
-rw-r--r--kernel/locking/spinlock.c10
-rw-r--r--kernel/locking/spinlock_rt.c12
-rw-r--r--kernel/module-internal.h19
-rw-r--r--kernel/module.c78
-rw-r--r--kernel/module_decompress.c273
-rw-r--r--kernel/panic.c21
-rw-r--r--kernel/params.c4
-rw-r--r--kernel/power/main.c5
-rw-r--r--kernel/power/process.c2
-rw-r--r--kernel/power/snapshot.c21
-rw-r--r--kernel/power/suspend.c2
-rw-r--r--kernel/power/wakelock.c11
-rw-r--r--kernel/printk/Makefile5
-rw-r--r--kernel/printk/internal.h8
-rw-r--r--kernel/printk/printk.c4
-rw-r--r--kernel/printk/sysctl.c85
-rw-r--r--kernel/profile.c73
-rw-r--r--kernel/ptrace.c2
-rw-r--r--kernel/rcu/rcutorture.c7
-rw-r--r--kernel/rcu/tasks.h12
-rw-r--r--kernel/resource.c4
-rw-r--r--kernel/sched/core.c54
-rw-r--r--kernel/sched/core_sched.c2
-rw-r--r--kernel/sched/fair.c118
-rw-r--r--kernel/sched/membarrier.c9
-rw-r--r--kernel/sched/pelt.h4
-rw-r--r--kernel/sched/psi.c145
-rw-r--r--kernel/seccomp.c10
-rw-r--r--kernel/signal.c66
-rw-r--r--kernel/stackleak.c31
-rw-r--r--kernel/sys.c99
-rw-r--r--kernel/sys_ni.c1
-rw-r--r--kernel/sysctl.c723
-rw-r--r--kernel/time/clocksource.c56
-rw-r--r--kernel/trace/Kconfig27
-rw-r--r--kernel/trace/blktrace.c20
-rw-r--r--kernel/trace/ftrace.c34
-rw-r--r--kernel/trace/ring_buffer.c7
-rw-r--r--kernel/trace/trace.c95
-rw-r--r--kernel/trace/trace.h83
-rw-r--r--kernel/trace/trace_eprobe.c38
-rw-r--r--kernel/trace/trace_events.c12
-rw-r--r--kernel/trace/trace_events_filter.c139
-rw-r--r--kernel/trace/trace_events_hist.c79
-rw-r--r--kernel/trace/trace_events_inject.c11
-rw-r--r--kernel/trace/trace_events_synth.c15
-rw-r--r--kernel/trace/trace_events_trigger.c424
-rw-r--r--kernel/trace/trace_hwlat.c6
-rw-r--r--kernel/trace/trace_kprobe.c43
-rw-r--r--kernel/trace/trace_osnoise.c26
-rw-r--r--kernel/trace/trace_output.c4
-rw-r--r--kernel/trace/trace_probe.c5
-rw-r--r--kernel/trace/trace_syscalls.c6
-rw-r--r--kernel/trace/trace_uprobe.c39
-rw-r--r--kernel/tsacct.c7
-rw-r--r--kernel/ucount.c5
-rw-r--r--kernel/watchdog.c101
-rw-r--r--lib/Kconfig9
-rw-r--r--lib/Kconfig.debug31
-rw-r--r--lib/Kconfig.kasan2
-rw-r--r--lib/Kconfig.ubsan13
-rw-r--r--lib/Makefile3
-rw-r--r--lib/crypto/Kconfig17
-rw-r--r--lib/crypto/blake2s-selftest.c31
-rw-r--r--lib/crypto/blake2s.c41
-rw-r--r--lib/find_bit.c21
-rw-r--r--lib/find_bit_benchmark.c21
-rw-r--r--lib/genalloc.c2
-rw-r--r--lib/iov_iter.c32
-rw-r--r--lib/kobject.c8
-rw-r--r--lib/kobject_uevent.c6
-rw-r--r--lib/kstrtox.c12
-rw-r--r--lib/kunit/try-catch.c4
-rw-r--r--lib/list_debug.c8
-rw-r--r--lib/lz4/lz4defs.h2
-rw-r--r--lib/raid6/algos.c78
-rw-r--r--lib/raid6/avx2.c8
-rw-r--r--lib/raid6/avx512.c6
-rw-r--r--lib/ref_tracker.c5
-rw-r--r--lib/sbitmap.c29
-rw-r--r--lib/sha1.c95
-rw-r--r--lib/stackdepot.c46
-rw-r--r--lib/string_helpers.c64
-rw-r--r--lib/test_bitmap.c37
-rw-r--r--lib/test_hash.c259
-rw-r--r--lib/test_hmm.c24
-rw-r--r--lib/test_kasan.c35
-rw-r--r--lib/test_meminit.c1
-rw-r--r--lib/test_sysctl.c22
-rw-r--r--lib/test_ubsan.c22
-rw-r--r--lib/vsprintf.c24
-rw-r--r--lib/xarray.c6
-rw-r--r--mm/Kconfig64
-rw-r--r--mm/Kconfig.debug24
-rw-r--r--mm/Makefile2
-rw-r--r--mm/cleancache.c315
-rw-r--r--mm/compaction.c7
-rw-r--r--mm/damon/core.c45
-rw-r--r--mm/damon/dbgfs.c18
-rw-r--r--mm/damon/paddr.c22
-rw-r--r--mm/damon/prmtv-common.h4
-rw-r--r--mm/damon/reclaim.c46
-rw-r--r--mm/damon/vaddr.c182
-rw-r--r--mm/debug.c52
-rw-r--r--mm/debug_vm_pgtable.c8
-rw-r--r--mm/dmapool.c2
-rw-r--r--mm/filemap.c1134
-rw-r--r--mm/folio-compat.c11
-rw-r--r--mm/frontswap.c259
-rw-r--r--mm/gup.c66
-rw-r--r--mm/hmm.c5
-rw-r--r--mm/huge_memory.c50
-rw-r--r--mm/hugetlb.c6
-rw-r--r--mm/hugetlb_cgroup.c133
-rw-r--r--mm/internal.h21
-rw-r--r--mm/kasan/common.c1
-rw-r--r--mm/kasan/quarantine.c11
-rw-r--r--mm/kasan/shadow.c9
-rw-r--r--mm/kfence/core.c3
-rw-r--r--mm/kfence/kfence_test.c8
-rw-r--r--mm/khugepaged.c72
-rw-r--r--mm/kmemleak.c34
-rw-r--r--mm/ksm.c5
-rw-r--r--mm/madvise.c494
-rw-r--r--mm/mapping_dirty_helpers.c1
-rw-r--r--mm/memcontrol.c54
-rw-r--r--mm/memory-failure.c193
-rw-r--r--mm/memory.c65
-rw-r--r--mm/mempolicy.c95
-rw-r--r--mm/memremap.c69
-rw-r--r--mm/migrate.c424
-rw-r--r--mm/mlock.c2
-rw-r--r--mm/mmap.c56
-rw-r--r--mm/mmu_gather.c1
-rw-r--r--mm/mprotect.c4
-rw-r--r--mm/oom_kill.c32
-rw-r--r--mm/page-writeback.c6
-rw-r--r--mm/page_alloc.c197
-rw-r--r--mm/page_counter.c1
-rw-r--r--mm/page_ext.c8
-rw-r--r--mm/page_io.c3
-rw-r--r--mm/page_owner.c6
-rw-r--r--mm/page_table_check.c269
-rw-r--r--mm/percpu-internal.h18
-rw-r--r--mm/percpu.c199
-rw-r--r--mm/pgtable-generic.c1
-rw-r--r--mm/readahead.c24
-rw-r--r--mm/rmap.c43
-rw-r--r--mm/shmem.c298
-rw-r--r--mm/slab.h11
-rw-r--r--mm/slab_common.c34
-rw-r--r--mm/swap.c28
-rw-r--r--mm/swapfile.c134
-rw-r--r--mm/truncate.c322
-rw-r--r--mm/userfaultfd.c5
-rw-r--r--mm/util.c15
-rw-r--r--mm/vmalloc.c73
-rw-r--r--mm/vmscan.c6
-rw-r--r--mm/vmstat.c3
-rw-r--r--mm/zpool.c12
-rw-r--r--mm/zsmalloc.c529
-rw-r--r--mm/zswap.c8
-rw-r--r--net/8021q/vlan.h2
-rw-r--r--net/8021q/vlan_dev.c15
-rw-r--r--net/8021q/vlan_netlink.c7
-rw-r--r--net/9p/Kconfig7
-rw-r--r--net/9p/Makefile5
-rw-r--r--net/9p/client.c7
-rw-r--r--net/9p/mod.c15
-rw-r--r--net/9p/trans_fd.c14
-rw-r--r--net/9p/trans_virtio.c4
-rw-r--r--net/9p/trans_xen.c1
-rw-r--r--net/atm/proc.c4
-rw-r--r--net/ax25/af_ax25.c28
-rw-r--r--net/ax25/ax25_dev.c28
-rw-r--r--net/ax25/ax25_route.c13
-rw-r--r--net/bluetooth/af_bluetooth.c8
-rw-r--r--net/bluetooth/bnep/core.c2
-rw-r--r--net/bluetooth/cmtp/core.c2
-rw-r--r--net/bluetooth/hidp/core.c2
-rw-r--r--net/bridge/br_if.c3
-rw-r--r--net/bridge/br_multicast.c4
-rw-r--r--net/bridge/br_vlan.c9
-rw-r--r--net/bridge/netfilter/nft_reject_bridge.c8
-rw-r--r--net/can/bcm.c2
-rw-r--r--net/can/isotp.c29
-rw-r--r--net/can/proc.c2
-rw-r--r--net/ceph/buffer.c4
-rw-r--r--net/ceph/ceph_common.c52
-rw-r--r--net/ceph/crypto.c2
-rw-r--r--net/ceph/messenger.c21
-rw-r--r--net/ceph/messenger_v1.c54
-rw-r--r--net/ceph/messenger_v2.c252
-rw-r--r--net/ceph/osdmap.c12
-rw-r--r--net/core/dev.c6
-rw-r--r--net/core/drop_monitor.c11
-rw-r--r--net/core/neighbour.c24
-rw-r--r--net/core/net-procfs.c38
-rw-r--r--net/core/net_namespace.c4
-rw-r--r--net/core/of_net.c33
-rw-r--r--net/core/pktgen.c6
-rw-r--r--net/core/rtnetlink.c12
-rw-r--r--net/core/skbuff.c2
-rw-r--r--net/core/sock.c5
-rw-r--r--net/dsa/dsa.c1
-rw-r--r--net/dsa/dsa2.c25
-rw-r--r--net/dsa/dsa_priv.h1
-rw-r--r--net/dsa/tag_lan9303.c21
-rw-r--r--net/ieee802154/nl802154.c8
-rw-r--r--net/ipv4/fib_frontend.c3
-rw-r--r--net/ipv4/fib_lookup.h7
-rw-r--r--net/ipv4/fib_semantics.c82
-rw-r--r--net/ipv4/fib_trie.c22
-rw-r--r--net/ipv4/inet_fragment.c8
-rw-r--r--net/ipv4/ip_fragment.c3
-rw-r--r--net/ipv4/ip_gre.c5
-rw-r--r--net/ipv4/ip_output.c26
-rw-r--r--net/ipv4/ipmr.c2
-rw-r--r--net/ipv4/netfilter/Kconfig4
-rw-r--r--net/ipv4/netfilter/ipt_CLUSTERIP.c6
-rw-r--r--net/ipv4/ping.c12
-rw-r--r--net/ipv4/raw.c13
-rw-r--r--net/ipv4/route.c4
-rw-r--r--net/ipv4/tcp.c31
-rw-r--r--net/ipv4/tcp_input.c2
-rw-r--r--net/ipv4/tcp_ipv4.c4
-rw-r--r--net/ipv4/udp.c6
-rw-r--r--net/ipv6/addrconf.c31
-rw-r--r--net/ipv6/ip6_fib.c23
-rw-r--r--net/ipv6/ip6_flowlabel.c4
-rw-r--r--net/ipv6/ip6_tunnel.c8
-rw-r--r--net/ipv6/ip6mr.c2
-rw-r--r--net/ipv6/mcast.c2
-rw-r--r--net/ipv6/netfilter/Kconfig4
-rw-r--r--net/ipv6/netfilter/Makefile3
-rw-r--r--net/ipv6/netfilter/nf_flow_table_ipv6.c0
-rw-r--r--net/ipv6/route.c21
-rw-r--r--net/ipv6/sit.c2
-rw-r--r--net/mac80211/mlme.c29
-rw-r--r--net/mctp/route.c11
-rw-r--r--net/mctp/test/route-test.c2
-rw-r--r--net/mpls/af_mpls.c2
-rw-r--r--net/mptcp/pm_netlink.c47
-rw-r--r--net/mptcp/protocol.h6
-rw-r--r--net/ncsi/ncsi-manage.c4
-rw-r--r--net/netfilter/nf_conntrack_core.c8
-rw-r--r--net/netfilter/nf_conntrack_netbios_ns.c5
-rw-r--r--net/netfilter/nf_conntrack_netlink.c3
-rw-r--r--net/netfilter/nf_conntrack_proto_sctp.c9
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c59
-rw-r--r--net/netfilter/nf_tables_api.c5
-rw-r--r--net/netfilter/nft_byteorder.c12
-rw-r--r--net/netfilter/nft_connlimit.c13
-rw-r--r--net/netfilter/nft_ct.c5
-rw-r--r--net/netfilter/nft_exthdr.c2
-rw-r--r--net/netfilter/nft_last.c2
-rw-r--r--net/netfilter/nft_limit.c2
-rw-r--r--net/netfilter/nft_payload.c9
-rw-r--r--net/netfilter/nft_quota.c2
-rw-r--r--net/netfilter/nft_synproxy.c4
-rw-r--r--net/netfilter/x_tables.c10
-rw-r--r--net/netfilter/xt_hashlimit.c18
-rw-r--r--net/netfilter/xt_recent.c4
-rw-r--r--net/netfilter/xt_socket.c2
-rw-r--r--net/nfc/llcp_sock.c5
-rw-r--r--net/nfc/nci/uart.c5
-rw-r--r--net/packet/af_packet.c10
-rw-r--r--net/qrtr/mhi.c2
-rw-r--r--net/rxrpc/call_event.c8
-rw-r--r--net/rxrpc/output.c2
-rw-r--r--net/sched/act_api.c13
-rw-r--r--net/sched/cls_api.c17
-rw-r--r--net/sched/sch_api.c26
-rw-r--r--net/sched/sch_generic.c30
-rw-r--r--net/sched/sch_htb.c20
-rw-r--r--net/smc/af_smc.c202
-rw-r--r--net/smc/smc.h21
-rw-r--r--net/smc/smc_cdc.c3
-rw-r--r--net/smc/smc_clc.c2
-rw-r--r--net/smc/smc_core.c137
-rw-r--r--net/smc/smc_core.h12
-rw-r--r--net/smc/smc_diag.c8
-rw-r--r--net/smc/smc_pnet.c7
-rw-r--r--net/smc/smc_wr.h4
-rw-r--r--net/socket.c4
-rw-r--r--net/sunrpc/auth_gss/gss_generic_token.c6
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c4
-rw-r--r--net/sunrpc/cache.c24
-rw-r--r--net/sunrpc/clnt.c5
-rw-r--r--net/sunrpc/rpc_pipe.c4
-rw-r--r--net/sunrpc/stats.c2
-rw-r--r--net/sunrpc/svc.c175
-rw-r--r--net/sunrpc/svc_xprt.c11
-rw-r--r--net/sunrpc/sysfs.c52
-rw-r--r--net/sunrpc/xprtrdma/backchannel.c4
-rw-r--r--net/sunrpc/xprtrdma/frwr_ops.c4
-rw-r--r--net/sunrpc/xprtrdma/rpc_rdma.c4
-rw-r--r--net/sunrpc/xprtrdma/transport.c4
-rw-r--r--net/sunrpc/xprtrdma/verbs.c26
-rw-r--r--net/sunrpc/xprtsock.c9
-rw-r--r--net/tipc/crypto.c2
-rw-r--r--net/tipc/link.c9
-rw-r--r--net/tipc/monitor.c2
-rw-r--r--net/tipc/name_distr.c2
-rw-r--r--net/tipc/node.c13
-rw-r--r--net/tls/tls_sw.c1
-rw-r--r--net/unix/garbage.c14
-rw-r--r--net/unix/scm.c6
-rw-r--r--net/vmw_vsock/af_vsock.c1
-rw-r--r--net/vmw_vsock/virtio_transport.c4
-rw-r--r--net/wireless/Makefile4
-rw-r--r--net/wireless/core.c17
-rw-r--r--net/xfrm/xfrm_policy.c3
-rw-r--r--samples/Kconfig9
-rw-r--r--samples/Makefile1
-rw-r--r--samples/bpf/offwaketime_kern.c4
-rw-r--r--samples/bpf/test_overhead_kprobe_kern.c11
-rw-r--r--samples/bpf/test_overhead_tp_kern.c5
-rw-r--r--samples/coresight/Makefile4
-rw-r--r--samples/coresight/coresight-cfg-sample.c73
-rw-r--r--samples/ftrace/ftrace-direct-modify.c4
-rw-r--r--samples/ftrace/ftrace-direct-too.c2
-rw-r--r--samples/ftrace/ftrace-direct.c2
-rw-r--r--samples/seccomp/dropper.c9
-rw-r--r--samples/trace_events/trace-events-sample.c3
-rw-r--r--samples/trace_events/trace-events-sample.h35
-rw-r--r--scripts/.gitignore1
-rw-r--r--scripts/Kbuild.include47
-rw-r--r--scripts/Makefile17
-rw-r--r--scripts/Makefile.build3
-rw-r--r--scripts/Makefile.extrawarn1
-rw-r--r--scripts/Makefile.lib35
-rw-r--r--scripts/Makefile.modinst4
-rw-r--r--scripts/Makefile.ubsan1
-rwxr-xr-xscripts/checkpatch.pl54
-rw-r--r--scripts/coccinelle/iterators/fen.cocci124
-rw-r--r--scripts/coccinelle/misc/bugon.cocci63
-rw-r--r--scripts/const_structs.checkpatch23
-rwxr-xr-xscripts/dtc/dtx_diff8
-rwxr-xr-xscripts/gen_autoksyms.sh11
-rwxr-xr-xscripts/get_maintainer.pl2
-rw-r--r--scripts/kconfig/Makefile3
-rw-r--r--scripts/kconfig/conf.c17
-rw-r--r--scripts/kconfig/confdata.c49
-rw-r--r--scripts/kconfig/preprocess.c2
-rwxr-xr-xscripts/kconfig/streamline_config.pl2
-rwxr-xr-xscripts/link-vmlinux.sh56
-rwxr-xr-xscripts/min-tool-version.sh2
-rw-r--r--scripts/mod/modpost.c15
-rwxr-xr-xscripts/remove-stale-files2
-rwxr-xr-xscripts/setlocalversion9
-rw-r--r--scripts/sorttable.c40
-rw-r--r--scripts/sorttable.h125
-rw-r--r--scripts/spelling.txt1
-rwxr-xr-xscripts/tags.sh126
-rw-r--r--security/integrity/digsig_asymmetric.c15
-rw-r--r--security/integrity/ima/ima_fs.c2
-rw-r--r--security/integrity/ima/ima_policy.c8
-rw-r--r--security/integrity/ima/ima_template.c10
-rw-r--r--security/integrity/integrity_audit.c2
-rw-r--r--security/security.c15
-rw-r--r--security/selinux/ss/conditional.c3
-rw-r--r--sound/core/Makefile1
-rw-r--r--sound/core/control_led.c2
-rw-r--r--sound/core/info.c4
-rw-r--r--sound/core/info_oss.c6
-rw-r--r--sound/core/init.c25
-rw-r--r--sound/core/jack.c3
-rw-r--r--sound/core/memalloc.c68
-rw-r--r--sound/core/misc.c2
-rw-r--r--sound/core/oss/pcm_oss.c2
-rw-r--r--sound/core/pcm.c6
-rw-r--r--sound/core/pcm_dmaengine.c5
-rw-r--r--sound/core/pcm_lib.c17
-rw-r--r--sound/core/pcm_native.c13
-rw-r--r--sound/core/seq/seq_queue.c14
-rw-r--r--sound/core/seq/seq_virmidi.c11
-rw-r--r--sound/core/sgbuf.c201
-rw-r--r--sound/drivers/virmidi.c3
-rw-r--r--sound/hda/ext/hdac_ext_stream.c216
-rw-r--r--sound/hda/hdac_stream.c30
-rw-r--r--sound/hda/intel-dsp-config.c53
-rw-r--r--sound/hda/intel-nhlt.c102
-rw-r--r--sound/hda/intel-sdw-acpi.c7
-rw-r--r--sound/isa/gus/gus_mem.c22
-rw-r--r--sound/pci/ac97/ac97_pcm.c2
-rw-r--r--sound/pci/hda/Kconfig33
-rw-r--r--sound/pci/hda/Makefile10
-rw-r--r--sound/pci/hda/cs35l41_hda.c542
-rw-r--r--sound/pci/hda/cs35l41_hda.h69
-rw-r--r--sound/pci/hda/cs35l41_hda_i2c.c68
-rw-r--r--sound/pci/hda/cs35l41_hda_spi.c65
-rw-r--r--sound/pci/hda/hda_auto_parser.c8
-rw-r--r--sound/pci/hda/hda_bind.c7
-rw-r--r--sound/pci/hda/hda_codec.c49
-rw-r--r--sound/pci/hda/hda_component.h20
-rw-r--r--sound/pci/hda/hda_controller.c1
-rw-r--r--sound/pci/hda/hda_generic.c17
-rw-r--r--sound/pci/hda/hda_generic.h5
-rw-r--r--sound/pci/hda/hda_intel.c17
-rw-r--r--sound/pci/hda/hda_jack.c11
-rw-r--r--sound/pci/hda/hda_jack.h1
-rw-r--r--sound/pci/hda/hda_local.h1
-rw-r--r--sound/pci/hda/hda_tegra.c43
-rw-r--r--sound/pci/hda/patch_cs8409-tables.c5
-rw-r--r--sound/pci/hda/patch_cs8409.c9
-rw-r--r--sound/pci/hda/patch_cs8409.h1
-rw-r--r--sound/pci/hda/patch_hdmi.c2
-rw-r--r--sound/pci/hda/patch_realtek.c289
-rw-r--r--sound/pci/mixart/mixart_core.c5
-rw-r--r--sound/pci/mixart/mixart_core.h10
-rw-r--r--sound/ppc/beep.c2
-rw-r--r--sound/soc/amd/Kconfig9
-rw-r--r--sound/soc/amd/Makefile2
-rw-r--r--sound/soc/amd/acp-config.c124
-rw-r--r--sound/soc/amd/acp-da7219-max98357a.c20
-rw-r--r--sound/soc/amd/acp-pcm-dma.c15
-rw-r--r--sound/soc/amd/acp-rt5645.c4
-rw-r--r--sound/soc/amd/acp.h1
-rw-r--r--sound/soc/amd/acp/Kconfig8
-rw-r--r--sound/soc/amd/acp/acp-legacy-mach.c19
-rw-r--r--sound/soc/amd/acp/acp-mach-common.c31
-rw-r--r--sound/soc/amd/acp/acp-mach.h9
-rw-r--r--sound/soc/amd/acp/acp-sof-mach.c21
-rw-r--r--sound/soc/amd/acp3x-rt5682-max9836.c8
-rw-r--r--sound/soc/amd/mach-config.h28
-rw-r--r--sound/soc/amd/yc/acp6x-pdm-dma.c2
-rw-r--r--sound/soc/atmel/mikroe-proto.c6
-rw-r--r--sound/soc/atmel/tse850-pcm5142.c32
-rw-r--r--sound/soc/bcm/bcm63xx-i2s.h1
-rw-r--r--sound/soc/bcm/bcm63xx-pcm-whistler.c13
-rw-r--r--sound/soc/codecs/Kconfig36
-rw-r--r--sound/soc/codecs/Makefile12
-rw-r--r--sound/soc/codecs/adau1701.c94
-rw-r--r--sound/soc/codecs/ak4118.c18
-rw-r--r--sound/soc/codecs/ak4375.c610
-rw-r--r--sound/soc/codecs/cpcap.c2
-rw-r--r--sound/soc/codecs/cs35l35.c2
-rw-r--r--sound/soc/codecs/cs35l41-i2c.c19
-rw-r--r--sound/soc/codecs/cs35l41-lib.c (renamed from sound/soc/codecs/cs35l41-tables.c)578
-rw-r--r--sound/soc/codecs/cs35l41-spi.c20
-rw-r--r--sound/soc/codecs/cs35l41.c893
-rw-r--r--sound/soc/codecs/cs35l41.h749
-rw-r--r--sound/soc/codecs/cs4265.c15
-rw-r--r--sound/soc/codecs/cs42l42.c94
-rw-r--r--sound/soc/codecs/cs42l42.h6
-rw-r--r--sound/soc/codecs/es7241.c28
-rw-r--r--sound/soc/codecs/hdac_hda.c22
-rw-r--r--sound/soc/codecs/hdmi-codec.c2
-rw-r--r--sound/soc/codecs/jz4770.c9
-rw-r--r--sound/soc/codecs/lpass-rx-macro.c8
-rw-r--r--sound/soc/codecs/max9759.c31
-rw-r--r--sound/soc/codecs/max98373-sdw.c2
-rw-r--r--sound/soc/codecs/max9860.c12
-rw-r--r--sound/soc/codecs/msm8916-wcd-analog.c7
-rw-r--r--sound/soc/codecs/mt6660.c5
-rw-r--r--sound/soc/codecs/pcm3168a.c22
-rw-r--r--sound/soc/codecs/rt1308-sdw.c2
-rw-r--r--sound/soc/codecs/rt1316-sdw.c2
-rw-r--r--sound/soc/codecs/rt5640.c169
-rw-r--r--sound/soc/codecs/rt5640.h11
-rw-r--r--sound/soc/codecs/rt5663.c12
-rw-r--r--sound/soc/codecs/rt5668.c12
-rw-r--r--sound/soc/codecs/rt5682-i2c.c15
-rw-r--r--sound/soc/codecs/rt5682-sdw.c2
-rw-r--r--sound/soc/codecs/rt5682.c41
-rw-r--r--sound/soc/codecs/rt5682.h2
-rw-r--r--sound/soc/codecs/rt5682s.c46
-rw-r--r--sound/soc/codecs/rt700.c2
-rw-r--r--sound/soc/codecs/rt711-sdca.c2
-rw-r--r--sound/soc/codecs/rt711.c2
-rw-r--r--sound/soc/codecs/rt715-sdca.c2
-rw-r--r--sound/soc/codecs/rt715.c2
-rw-r--r--sound/soc/codecs/sdw-mockup.c2
-rw-r--r--sound/soc/codecs/sgtl5000.c5
-rw-r--r--sound/soc/codecs/simple-amplifier.c10
-rw-r--r--sound/soc/codecs/simple-mux.c10
-rw-r--r--sound/soc/codecs/ssm2305.c11
-rw-r--r--sound/soc/codecs/sta350.h2
-rw-r--r--sound/soc/codecs/tas2770.c7
-rw-r--r--sound/soc/codecs/tfa989x.c20
-rw-r--r--sound/soc/codecs/tlv320adc3xxx.c1317
-rw-r--r--sound/soc/codecs/tlv320aic26.h6
-rw-r--r--sound/soc/codecs/tlv320aic31xx.c121
-rw-r--r--sound/soc/codecs/tlv320aic31xx.h2
-rw-r--r--sound/soc/codecs/wcd-mbhc-v2.c76
-rw-r--r--sound/soc/codecs/wcd9335.c17
-rw-r--r--sound/soc/codecs/wcd934x.c6
-rw-r--r--sound/soc/codecs/wcd938x.c39
-rw-r--r--sound/soc/codecs/wm_adsp.c62
-rw-r--r--sound/soc/codecs/wm_adsp.h8
-rw-r--r--sound/soc/codecs/wsa881x.c2
-rw-r--r--sound/soc/codecs/zl38060.c4
-rw-r--r--sound/soc/fsl/Kconfig1
-rw-r--r--sound/soc/fsl/fsl-asoc-card.c15
-rw-r--r--sound/soc/fsl/fsl_asrc.c69
-rw-r--r--sound/soc/fsl/fsl_mqs.c2
-rw-r--r--sound/soc/fsl/imx-card.c49
-rw-r--r--sound/soc/fsl/imx-hdmi.c2
-rw-r--r--sound/soc/fsl/imx-sgtl5000.c4
-rw-r--r--sound/soc/fsl/imx-spdif.c4
-rw-r--r--sound/soc/fsl/pcm030-audio-fabric.c11
-rw-r--r--sound/soc/generic/audio-graph-card.c5
-rw-r--r--sound/soc/generic/audio-graph-card2.c4
-rw-r--r--sound/soc/generic/simple-card-utils.c45
-rw-r--r--sound/soc/generic/simple-card.c29
-rw-r--r--sound/soc/generic/test-component.c5
-rw-r--r--sound/soc/img/img-i2s-in.c8
-rw-r--r--sound/soc/img/img-i2s-out.c24
-rw-r--r--sound/soc/img/img-parallel-out.c24
-rw-r--r--sound/soc/img/img-spdif-in.c8
-rw-r--r--sound/soc/img/img-spdif-out.c24
-rw-r--r--sound/soc/img/pistachio-internal-dac.c9
-rw-r--r--sound/soc/intel/atom/sst-mfld-platform-pcm.c14
-rw-r--r--sound/soc/intel/boards/Kconfig20
-rw-r--r--sound/soc/intel/boards/Makefile2
-rw-r--r--sound/soc/intel/boards/bytcht_cx2072x.c2
-rw-r--r--sound/soc/intel/boards/bytcht_nocodec.c2
-rw-r--r--sound/soc/intel/boards/bytcr_rt5640.c86
-rw-r--r--sound/soc/intel/boards/hda_dsp_common.c2
-rw-r--r--sound/soc/intel/boards/sof_maxim_common.c180
-rw-r--r--sound/soc/intel/boards/sof_maxim_common.h16
-rw-r--r--sound/soc/intel/boards/sof_nau8825.c651
-rw-r--r--sound/soc/intel/boards/sof_realtek_common.c119
-rw-r--r--sound/soc/intel/boards/sof_realtek_common.h7
-rw-r--r--sound/soc/intel/boards/sof_rt5682.c179
-rw-r--r--sound/soc/intel/boards/sof_sdw.c158
-rw-r--r--sound/soc/intel/boards/sof_sdw_common.h7
-rw-r--r--sound/soc/intel/boards/sof_sdw_rt715.c7
-rw-r--r--sound/soc/intel/boards/sof_sdw_rt715_sdca.c7
-rw-r--r--sound/soc/intel/catpt/dsp.c14
-rw-r--r--sound/soc/intel/catpt/pcm.c37
-rw-r--r--sound/soc/intel/common/soc-acpi-intel-adl-match.c48
-rw-r--r--sound/soc/intel/skylake/skl-nhlt.c102
-rw-r--r--sound/soc/intel/skylake/skl-pcm.c11
-rw-r--r--sound/soc/intel/skylake/skl-topology.c29
-rw-r--r--sound/soc/intel/skylake/skl-topology.h1
-rw-r--r--sound/soc/intel/skylake/skl.c5
-rw-r--r--sound/soc/intel/skylake/skl.h4
-rw-r--r--sound/soc/mediatek/Kconfig4
-rw-r--r--sound/soc/mediatek/mt2701/mt2701-afe-pcm.c2
-rw-r--r--sound/soc/mediatek/mt6797/mt6797-afe-pcm.c2
-rw-r--r--sound/soc/mediatek/mt8173/mt8173-max98090.c8
-rw-r--r--sound/soc/mediatek/mt8173/mt8173-rt5650-rt5514.c7
-rw-r--r--sound/soc/mediatek/mt8173/mt8173-rt5650-rt5676.c7
-rw-r--r--sound/soc/mediatek/mt8173/mt8173-rt5650.c7
-rw-r--r--sound/soc/mediatek/mt8183/mt8183-afe-pcm.c2
-rw-r--r--sound/soc/mediatek/mt8183/mt8183-da7219-max98357.c21
-rw-r--r--sound/soc/mediatek/mt8183/mt8183-mt6358-ts3a227-max98357.c26
-rw-r--r--sound/soc/mediatek/mt8192/mt8192-afe-pcm.c2
-rw-r--r--sound/soc/mediatek/mt8192/mt8192-mt6359-rt1015-rt5682.c17
-rw-r--r--sound/soc/mediatek/mt8195/mt8195-afe-clk.c12
-rw-r--r--sound/soc/mediatek/mt8195/mt8195-afe-pcm.c9
-rw-r--r--sound/soc/mediatek/mt8195/mt8195-dai-pcm.c73
-rw-r--r--sound/soc/mediatek/mt8195/mt8195-mt6359-rt1011-rt5682.c147
-rw-r--r--sound/soc/mediatek/mt8195/mt8195-mt6359-rt1019-rt5682.c470
-rw-r--r--sound/soc/mediatek/mt8195/mt8195-reg.h1
-rw-r--r--sound/soc/meson/aiu.c36
-rw-r--r--sound/soc/meson/axg-fifo.c16
-rw-r--r--sound/soc/meson/axg-pdm.c25
-rw-r--r--sound/soc/meson/axg-spdifin.c17
-rw-r--r--sound/soc/meson/axg-spdifout.c17
-rw-r--r--sound/soc/meson/axg-tdm-formatter.c50
-rw-r--r--sound/soc/meson/axg-tdm-interface.c25
-rw-r--r--sound/soc/meson/meson-card-utils.c8
-rw-r--r--sound/soc/meson/t9015.c14
-rw-r--r--sound/soc/mxs/mxs-sgtl5000.c8
-rw-r--r--sound/soc/qcom/Kconfig1
-rw-r--r--sound/soc/qcom/apq8016_sbc.c134
-rw-r--r--sound/soc/qcom/common.c20
-rw-r--r--sound/soc/qcom/lpass-platform.c8
-rw-r--r--sound/soc/qcom/qdsp6/q6apm-dai.c7
-rw-r--r--sound/soc/qcom/qdsp6/q6apm.c14
-rw-r--r--sound/soc/qcom/sc7180.c24
-rw-r--r--sound/soc/qcom/sdm845.c14
-rw-r--r--sound/soc/qcom/sm8250.c4
-rw-r--r--sound/soc/rockchip/rk3288_hdmi_analog.c10
-rw-r--r--sound/soc/samsung/aries_wm8994.c17
-rw-r--r--sound/soc/samsung/arndale.c5
-rw-r--r--sound/soc/samsung/idma.c2
-rw-r--r--sound/soc/samsung/littlemill.c5
-rw-r--r--sound/soc/samsung/lowland.c5
-rw-r--r--sound/soc/samsung/odroid.c4
-rw-r--r--sound/soc/samsung/smdk_wm8994.c4
-rw-r--r--sound/soc/samsung/smdk_wm8994pcm.c4
-rw-r--r--sound/soc/samsung/snow.c9
-rw-r--r--sound/soc/samsung/speyside.c5
-rw-r--r--sound/soc/samsung/tm2_wm5110.c3
-rw-r--r--sound/soc/samsung/tobermory.c5
-rw-r--r--sound/soc/sh/rz-ssi.c7
-rw-r--r--sound/soc/soc-acpi.c7
-rw-r--r--sound/soc/soc-component.c28
-rw-r--r--sound/soc/soc-core.c51
-rw-r--r--sound/soc/soc-dai.c40
-rw-r--r--sound/soc/soc-ops.c70
-rw-r--r--sound/soc/soc-pcm.c390
-rw-r--r--sound/soc/soc-topology.c2
-rw-r--r--sound/soc/sof/Kconfig18
-rw-r--r--sound/soc/sof/Makefile4
-rw-r--r--sound/soc/sof/amd/Kconfig33
-rw-r--r--sound/soc/sof/amd/Makefile11
-rw-r--r--sound/soc/sof/amd/acp-dsp-offset.h78
-rw-r--r--sound/soc/sof/amd/acp-ipc.c187
-rw-r--r--sound/soc/sof/amd/acp-loader.c199
-rw-r--r--sound/soc/sof/amd/acp-pcm.c82
-rw-r--r--sound/soc/sof/amd/acp-stream.c181
-rw-r--r--sound/soc/sof/amd/acp-trace.c84
-rw-r--r--sound/soc/sof/amd/acp.c446
-rw-r--r--sound/soc/sof/amd/acp.h226
-rw-r--r--sound/soc/sof/amd/pci-rn.c165
-rw-r--r--sound/soc/sof/amd/renoir.c186
-rw-r--r--sound/soc/sof/control.c61
-rw-r--r--sound/soc/sof/core.c135
-rw-r--r--sound/soc/sof/debug.c142
-rw-r--r--sound/soc/sof/imx/Kconfig46
-rw-r--r--sound/soc/sof/imx/imx-common.c28
-rw-r--r--sound/soc/sof/imx/imx-common.h11
-rw-r--r--sound/soc/sof/imx/imx-ops.h10
-rw-r--r--sound/soc/sof/imx/imx8.c220
-rw-r--r--sound/soc/sof/imx/imx8m.c260
-rw-r--r--sound/soc/sof/intel/apl.c7
-rw-r--r--sound/soc/sof/intel/atom.c64
-rw-r--r--sound/soc/sof/intel/atom.h4
-rw-r--r--sound/soc/sof/intel/bdw.c71
-rw-r--r--sound/soc/sof/intel/byt.c9
-rw-r--r--sound/soc/sof/intel/cnl.c34
-rw-r--r--sound/soc/sof/intel/hda-codec.c3
-rw-r--r--sound/soc/sof/intel/hda-ctrl.c2
-rw-r--r--sound/soc/sof/intel/hda-dai.c97
-rw-r--r--sound/soc/sof/intel/hda-dsp.c52
-rw-r--r--sound/soc/sof/intel/hda-ipc.c48
-rw-r--r--sound/soc/sof/intel/hda-loader.c104
-rw-r--r--sound/soc/sof/intel/hda-pcm.c127
-rw-r--r--sound/soc/sof/intel/hda-stream.c109
-rw-r--r--sound/soc/sof/intel/hda.c140
-rw-r--r--sound/soc/sof/intel/hda.h22
-rw-r--r--sound/soc/sof/intel/icl.c73
-rw-r--r--sound/soc/sof/intel/pci-tng.c9
-rw-r--r--sound/soc/sof/intel/shim.h11
-rw-r--r--sound/soc/sof/intel/tgl.c47
-rw-r--r--sound/soc/sof/ipc.c134
-rw-r--r--sound/soc/sof/loader.c16
-rw-r--r--sound/soc/sof/mediatek/Kconfig33
-rw-r--r--sound/soc/sof/mediatek/Makefile2
-rw-r--r--sound/soc/sof/mediatek/adsp_helper.h49
-rw-r--r--sound/soc/sof/mediatek/mt8195/Makefile3
-rw-r--r--sound/soc/sof/mediatek/mt8195/mt8195-clk.c158
-rw-r--r--sound/soc/sof/mediatek/mt8195/mt8195-clk.h28
-rw-r--r--sound/soc/sof/mediatek/mt8195/mt8195-loader.c56
-rw-r--r--sound/soc/sof/mediatek/mt8195/mt8195.c463
-rw-r--r--sound/soc/sof/mediatek/mt8195/mt8195.h158
-rw-r--r--sound/soc/sof/ops.c47
-rw-r--r--sound/soc/sof/ops.h93
-rw-r--r--sound/soc/sof/pcm.c118
-rw-r--r--sound/soc/sof/pm.c10
-rw-r--r--sound/soc/sof/sof-audio.c239
-rw-r--r--sound/soc/sof/sof-audio.h17
-rw-r--r--sound/soc/sof/sof-of-dev.c68
-rw-r--r--sound/soc/sof/sof-of-dev.h17
-rw-r--r--sound/soc/sof/sof-pci-dev.c19
-rw-r--r--sound/soc/sof/sof-priv.h82
-rw-r--r--sound/soc/sof/sof-probes.c2
-rw-r--r--sound/soc/sof/sof-probes.h2
-rw-r--r--sound/soc/sof/topology.c292
-rw-r--r--sound/soc/sof/trace.c18
-rw-r--r--sound/soc/sof/xtensa/core.c44
-rw-r--r--sound/soc/stm/stm32_adfsdm.c5
-rw-r--r--sound/soc/stm/stm32_i2s.c66
-rw-r--r--sound/soc/stm/stm32_sai.c37
-rw-r--r--sound/soc/stm/stm32_sai_sub.c29
-rw-r--r--sound/soc/stm/stm32_spdifrx.c48
-rw-r--r--sound/soc/sunxi/sun4i-codec.c3
-rw-r--r--sound/soc/sunxi/sun4i-spdif.c115
-rw-r--r--sound/soc/sunxi/sun8i-codec.c56
-rw-r--r--sound/soc/tegra/tegra20_i2s.c49
-rw-r--r--sound/soc/tegra/tegra20_spdif.c197
-rw-r--r--sound/soc/tegra/tegra20_spdif.h1
-rw-r--r--sound/soc/tegra/tegra210_mvc.c209
-rw-r--r--sound/soc/tegra/tegra210_mvc.h5
-rw-r--r--sound/soc/tegra/tegra_pcm.c6
-rw-r--r--sound/soc/tegra/tegra_pcm.h1
-rw-r--r--sound/soc/ti/davinci-mcasp.c21
-rw-r--r--sound/soc/ti/j721e-evm.c10
-rw-r--r--sound/soc/uniphier/Kconfig2
-rw-r--r--sound/soc/xilinx/xlnx_formatter_pcm.c27
-rw-r--r--sound/soc/xilinx/xlnx_spdif.c10
-rw-r--r--sound/sparc/dbri.c6
-rw-r--r--sound/usb/card.c7
-rw-r--r--sound/usb/format.c2
-rw-r--r--sound/usb/implicit.c4
-rw-r--r--sound/usb/mixer.c18
-rw-r--r--sound/usb/mixer.h2
-rw-r--r--sound/usb/mixer_maps.c29
-rw-r--r--sound/usb/mixer_quirks.c4
-rw-r--r--sound/usb/mixer_quirks.h2
-rw-r--r--sound/usb/power.h10
-rw-r--r--sound/usb/quirks-table.h2
-rw-r--r--sound/usb/quirks.c2
-rw-r--r--sound/usb/usx2y/usbusx2y.c2
-rw-r--r--sound/virtio/virtio_card.c4
-rw-r--r--sound/x86/intel_hdmi_audio.c4
-rw-r--r--tools/accounting/getdelays.c8
-rw-r--r--tools/arch/x86/include/asm/cpufeatures.h3
-rw-r--r--tools/arch/x86/include/asm/msr-index.h17
-rw-r--r--tools/arch/x86/include/asm/required-features.h4
-rw-r--r--tools/arch/x86/include/uapi/asm/kvm.h19
-rw-r--r--tools/arch/x86/include/uapi/asm/prctl.h26
-rw-r--r--tools/arch/x86/lib/memcpy_64.S12
-rw-r--r--tools/arch/x86/lib/memset_64.S6
-rw-r--r--tools/bpf/bpftool/skeleton/pid_iter.bpf.c4
-rw-r--r--tools/bpf/resolve_btfids/Makefile6
-rw-r--r--tools/bpf/runqslower/runqslower.bpf.c2
-rw-r--r--tools/bpf/runqslower/runqslower.c2
-rw-r--r--tools/bpf/runqslower/runqslower.h2
-rw-r--r--tools/build/Build.include2
-rw-r--r--tools/iio/iio_event_monitor.c1
-rw-r--r--tools/include/asm-generic/bitops.h1
-rw-r--r--tools/include/asm-generic/bitops/find.h145
-rw-r--r--tools/include/linux/bitmap.h7
-rw-r--r--tools/include/linux/find.h (renamed from include/asm-generic/bitops/find.h)54
-rw-r--r--tools/include/linux/hash.h5
-rw-r--r--tools/include/uapi/asm-generic/unistd.h5
-rw-r--r--tools/include/uapi/drm/drm.h18
-rw-r--r--tools/include/uapi/linux/kvm.h17
-rw-r--r--tools/include/uapi/linux/lirc.h229
-rw-r--r--tools/include/uapi/linux/perf_event.h7
-rw-r--r--tools/include/uapi/linux/prctl.h3
-rw-r--r--tools/include/uapi/sound/asound.h11
-rw-r--r--tools/lib/find_bit.c20
-rw-r--r--tools/lib/perf/Documentation/libperf.txt11
-rw-r--r--tools/lib/perf/cpumap.c113
-rw-r--r--tools/lib/perf/evlist.c19
-rw-r--r--tools/lib/perf/evsel.c115
-rw-r--r--tools/lib/perf/include/internal/cpumap.h14
-rw-r--r--tools/lib/perf/include/internal/evlist.h5
-rw-r--r--tools/lib/perf/include/internal/evsel.h4
-rw-r--r--tools/lib/perf/include/internal/mmap.h5
-rw-r--r--tools/lib/perf/include/perf/cpumap.h11
-rw-r--r--tools/lib/perf/include/perf/evsel.h14
-rw-r--r--tools/lib/perf/libperf.map3
-rw-r--r--tools/lib/perf/mmap.c102
-rw-r--r--tools/lib/perf/tests/test-cpumap.c11
-rw-r--r--tools/lib/perf/tests/test-evlist.c163
-rw-r--r--tools/lib/perf/tests/test-evsel.c5
-rw-r--r--tools/lib/subcmd/subcmd-util.h11
-rw-r--r--tools/lib/traceevent/event-parse.c59
-rw-r--r--tools/lib/traceevent/event-parse.h5
-rw-r--r--tools/lib/traceevent/parse-filter.c5
-rw-r--r--tools/objtool/arch/x86/decode.c13
-rw-r--r--tools/objtool/builtin-check.c3
-rw-r--r--tools/objtool/check.c32
-rw-r--r--tools/objtool/include/objtool/arch.h1
-rw-r--r--tools/objtool/include/objtool/builtin.h2
-rw-r--r--tools/perf/Documentation/perf-buildid-cache.txt5
-rw-r--r--tools/perf/Documentation/perf-config.txt9
-rw-r--r--tools/perf/Documentation/perf-list.txt48
-rw-r--r--tools/perf/Documentation/perf-record.txt15
-rw-r--r--tools/perf/Documentation/perf-stat.txt10
-rw-r--r--tools/perf/Documentation/perf-top.txt7
-rw-r--r--tools/perf/Makefile.config10
-rw-r--r--tools/perf/Makefile.perf4
-rw-r--r--tools/perf/arch/arm/include/perf_regs.h42
-rw-r--r--tools/perf/arch/arm/util/cs-etm.c54
-rw-r--r--tools/perf/arch/arm64/include/perf_regs.h78
-rw-r--r--tools/perf/arch/arm64/util/machine.c7
-rw-r--r--tools/perf/arch/arm64/util/pmu.c2
-rw-r--r--tools/perf/arch/csky/include/perf_regs.h82
-rw-r--r--tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl1
-rw-r--r--tools/perf/arch/mips/include/perf_regs.h69
-rw-r--r--tools/perf/arch/powerpc/entry/syscalls/syscall.tbl1
-rw-r--r--tools/perf/arch/powerpc/include/perf_regs.h66
-rw-r--r--tools/perf/arch/powerpc/util/event.c8
-rw-r--r--tools/perf/arch/riscv/include/perf_regs.h74
-rw-r--r--tools/perf/arch/s390/entry/syscalls/syscall.tbl1
-rw-r--r--tools/perf/arch/s390/include/perf_regs.h78
-rw-r--r--tools/perf/arch/x86/entry/syscalls/syscall_64.tbl1
-rw-r--r--tools/perf/arch/x86/include/perf_regs.h82
-rw-r--r--tools/perf/arch/x86/util/evlist.c17
-rw-r--r--tools/perf/bench/epoll-ctl.c4
-rw-r--r--tools/perf/bench/epoll-wait.c4
-rw-r--r--tools/perf/bench/evlist-open-close.c4
-rw-r--r--tools/perf/bench/futex-hash.c4
-rw-r--r--tools/perf/bench/futex-lock-pi.c4
-rw-r--r--tools/perf/bench/futex-requeue.c4
-rw-r--r--tools/perf/bench/futex-wake-parallel.c4
-rw-r--r--tools/perf/bench/futex-wake.c4
-rw-r--r--tools/perf/builtin-bench.c5
-rw-r--r--tools/perf/builtin-buildid-cache.c25
-rw-r--r--tools/perf/builtin-c2c.c15
-rw-r--r--tools/perf/builtin-ftrace.c448
-rw-r--r--tools/perf/builtin-inject.c5
-rw-r--r--tools/perf/builtin-kmem.c2
-rw-r--r--tools/perf/builtin-record.c23
-rw-r--r--tools/perf/builtin-report.c4
-rw-r--r--tools/perf/builtin-sched.c71
-rw-r--r--tools/perf/builtin-script.c43
-rw-r--r--tools/perf/builtin-stat.c557
-rw-r--r--tools/perf/builtin-trace.c28
-rw-r--r--tools/perf/dlfilters/dlfilter-test-api-v0.c2
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-n2/branch.json8
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-n2/bus.json20
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-n2/cache.json155
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-n2/exception.json47
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-n2/instruction.json143
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-n2/memory.json38
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-n2/other.json5
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-n2/pipeline.json23
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-n2/spe.json14
-rw-r--r--tools/perf/pmu-events/arch/arm64/arm/neoverse-n2/trace.json29
-rw-r--r--tools/perf/pmu-events/arch/arm64/common-and-microarch.json (renamed from tools/perf/pmu-events/arch/arm64/armv8-common-and-microarch.json)198
-rw-r--r--tools/perf/pmu-events/arch/arm64/mapfile.csv1
-rw-r--r--tools/perf/pmu-events/arch/arm64/recommended.json (renamed from tools/perf/pmu-events/arch/arm64/armv8-recommended.json)202
-rw-r--r--tools/perf/pmu-events/arch/test/test_soc/cpu/uncore.json16
-rw-r--r--tools/perf/pmu-events/jevents.c2
-rw-r--r--tools/perf/tests/Build1
-rw-r--r--tools/perf/tests/attr.c6
-rw-r--r--tools/perf/tests/attr/README2
-rw-r--r--tools/perf/tests/attr/test-record-graph-default2
-rw-r--r--tools/perf/tests/attr/test-record-graph-default-aarch649
-rw-r--r--tools/perf/tests/attr/test-record-graph-fp2
-rw-r--r--tools/perf/tests/attr/test-record-graph-fp-aarch649
-rw-r--r--tools/perf/tests/bitmap.c4
-rw-r--r--tools/perf/tests/builtin-test.c16
-rw-r--r--tools/perf/tests/cpumap.c6
-rw-r--r--tools/perf/tests/event_update.c8
-rw-r--r--tools/perf/tests/mem2node.c9
-rw-r--r--tools/perf/tests/mmap-basic.c5
-rw-r--r--tools/perf/tests/openat-syscall-all-cpus.c39
-rw-r--r--tools/perf/tests/parse-events.c49
-rw-r--r--tools/perf/tests/pmu-events.c32
-rwxr-xr-xtools/perf/tests/shell/stat_all_metricgroups.sh2
-rw-r--r--tools/perf/tests/sigtrap.c177
-rw-r--r--tools/perf/tests/stat.c3
-rw-r--r--tools/perf/tests/tests.h1
-rw-r--r--tools/perf/tests/topology.c58
-rwxr-xr-xtools/perf/trace/beauty/prctl_option.sh2
-rw-r--r--tools/perf/ui/browsers/annotate.c23
-rw-r--r--tools/perf/util/Build2
-rw-r--r--tools/perf/util/affinity.c10
-rw-r--r--tools/perf/util/annotate.c1
-rw-r--r--tools/perf/util/arm-spe-decoder/arm-spe-decoder.c2
-rw-r--r--tools/perf/util/arm-spe-decoder/arm-spe-decoder.h1
-rw-r--r--tools/perf/util/arm-spe.c67
-rw-r--r--tools/perf/util/arm64-frame-pointer-unwind-support.c63
-rw-r--r--tools/perf/util/arm64-frame-pointer-unwind-support.h10
-rw-r--r--tools/perf/util/auxtrace.c14
-rw-r--r--tools/perf/util/auxtrace.h5
-rw-r--r--tools/perf/util/bpf-loader.c18
-rw-r--r--tools/perf/util/bpf_counter.c29
-rw-r--r--tools/perf/util/bpf_counter.h4
-rw-r--r--tools/perf/util/bpf_counter_cgroup.c12
-rw-r--r--tools/perf/util/bpf_ftrace.c152
-rw-r--r--tools/perf/util/bpf_skel/func_latency.bpf.c114
-rw-r--r--tools/perf/util/callchain.c14
-rw-r--r--tools/perf/util/callchain.h4
-rw-r--r--tools/perf/util/counts.c8
-rw-r--r--tools/perf/util/counts.h14
-rw-r--r--tools/perf/util/cpumap.c253
-rw-r--r--tools/perf/util/cpumap.h125
-rw-r--r--tools/perf/util/cputopo.c11
-rw-r--r--tools/perf/util/cs-etm.c16
-rw-r--r--tools/perf/util/data-convert-bt.c2
-rw-r--r--tools/perf/util/debug.c2
-rw-r--r--tools/perf/util/env.c29
-rw-r--r--tools/perf/util/env.h3
-rw-r--r--tools/perf/util/evlist-hybrid.c11
-rw-r--r--tools/perf/util/evlist.c174
-rw-r--r--tools/perf/util/evlist.h52
-rw-r--r--tools/perf/util/evsel.c205
-rw-r--r--tools/perf/util/evsel.h33
-rw-r--r--tools/perf/util/expr.c37
-rw-r--r--tools/perf/util/ftrace.h81
-rw-r--r--tools/perf/util/header.c6
-rw-r--r--tools/perf/util/hist.c4
-rw-r--r--tools/perf/util/hist.h3
-rw-r--r--tools/perf/util/libunwind/arm64.c2
-rw-r--r--tools/perf/util/machine.c55
-rw-r--r--tools/perf/util/machine.h1
-rw-r--r--tools/perf/util/map_symbol.h1
-rw-r--r--tools/perf/util/mem-events.c29
-rw-r--r--tools/perf/util/metricgroup.c46
-rw-r--r--tools/perf/util/mmap.c19
-rw-r--r--tools/perf/util/mmap.h3
-rw-r--r--tools/perf/util/namespaces.c76
-rw-r--r--tools/perf/util/namespaces.h2
-rw-r--r--tools/perf/util/parse-events-hybrid.c9
-rw-r--r--tools/perf/util/parse-events.c77
-rw-r--r--tools/perf/util/parse-events.h1
-rw-r--r--tools/perf/util/parse-events.l2
-rw-r--r--tools/perf/util/parse-events.y17
-rw-r--r--tools/perf/util/perf_api_probe.c19
-rw-r--r--tools/perf/util/perf_event_attr_fprintf.c2
-rw-r--r--tools/perf/util/perf_regs.c666
-rw-r--r--tools/perf/util/perf_regs.h17
-rw-r--r--tools/perf/util/probe-event.c3
-rw-r--r--tools/perf/util/python.c12
-rw-r--r--tools/perf/util/record.c13
-rw-r--r--tools/perf/util/scripting-engines/trace-event-perl.c2
-rw-r--r--tools/perf/util/scripting-engines/trace-event-python.c22
-rw-r--r--tools/perf/util/session.c38
-rw-r--r--tools/perf/util/smt.c73
-rw-r--r--tools/perf/util/sort.c40
-rw-r--r--tools/perf/util/sort.h3
-rw-r--r--tools/perf/util/stat-display.c157
-rw-r--r--tools/perf/util/stat-shadow.c308
-rw-r--r--tools/perf/util/stat.c47
-rw-r--r--tools/perf/util/stat.h9
-rw-r--r--tools/perf/util/svghelper.c10
-rw-r--r--tools/perf/util/synthetic-events.c43
-rw-r--r--tools/perf/util/synthetic-events.h3
-rw-r--r--tools/perf/util/top.c6
-rw-r--r--tools/perf/util/util.c15
-rw-r--r--tools/perf/util/util.h11
-rw-r--r--tools/power/acpi/.gitignore1
-rw-r--r--tools/power/acpi/Makefile16
-rw-r--r--tools/power/acpi/Makefile.rules2
-rw-r--r--tools/power/acpi/man/pfrut.8137
-rw-r--r--tools/power/acpi/tools/pfrut/Makefile23
-rw-r--r--tools/power/acpi/tools/pfrut/pfrut.c424
-rw-r--r--tools/scripts/Makefile.include2
-rw-r--r--tools/testing/cxl/Kbuild3
-rw-r--r--tools/testing/cxl/test/cxl.c68
-rw-r--r--tools/testing/cxl/test/mem.c99
-rw-r--r--tools/testing/cxl/test/mock.c30
-rw-r--r--tools/testing/cxl/test/mock.h6
-rw-r--r--tools/testing/kunit/kunit_kernel.py1
-rwxr-xr-xtools/testing/kunit/run_checks.py2
-rw-r--r--tools/testing/nvdimm/Kbuild8
-rw-r--r--tools/testing/nvdimm/dax_pmem_compat_test.c8
-rw-r--r--tools/testing/nvdimm/dax_pmem_core_test.c8
-rw-r--r--tools/testing/nvdimm/test/iomap.c43
-rw-r--r--tools/testing/nvdimm/test/ndtest.c4
-rw-r--r--tools/testing/nvdimm/test/nfit.c4
-rw-r--r--tools/testing/scatterlist/linux/mm.h3
-rw-r--r--tools/testing/selftests/Makefile3
-rw-r--r--tools/testing/selftests/alsa/.gitignore1
-rw-r--r--tools/testing/selftests/alsa/Makefile9
-rw-r--r--tools/testing/selftests/alsa/mixer-test.c705
-rw-r--r--tools/testing/selftests/arm64/fp/sve-ptrace.c11
-rw-r--r--tools/testing/selftests/bpf/prog_tests/d_path.c14
-rw-r--r--tools/testing/selftests/bpf/prog_tests/xdp_link.c61
-rw-r--r--tools/testing/selftests/bpf/progs/test_d_path_check_types.c32
-rw-r--r--tools/testing/selftests/bpf/progs/test_stacktrace_map.c6
-rw-r--r--tools/testing/selftests/bpf/progs/test_tracepoint.c6
-rw-r--r--tools/testing/selftests/bpf/progs/test_xdp_link.c6
-rw-r--r--tools/testing/selftests/bpf/test_lirc_mode2_user.c1
-rw-r--r--tools/testing/selftests/bpf/verifier/ringbuf.c95
-rw-r--r--tools/testing/selftests/bpf/verifier/spill_fill.c2
-rw-r--r--tools/testing/selftests/clone3/clone3.c2
-rwxr-xr-xtools/testing/selftests/cpufreq/main.sh2
-rw-r--r--tools/testing/selftests/exec/Makefile6
-rw-r--r--tools/testing/selftests/ftrace/test.d/ftrace/func_set_ftrace_file.tc2
-rw-r--r--tools/testing/selftests/futex/Makefile4
-rw-r--r--tools/testing/selftests/ir/ir_loopback.c10
-rw-r--r--tools/testing/selftests/kselftest_harness.h4
-rw-r--r--tools/testing/selftests/kvm/.gitignore6
-rw-r--r--tools/testing/selftests/kvm/Makefile24
-rw-r--r--tools/testing/selftests/kvm/aarch64/arch_timer.c2
-rw-r--r--tools/testing/selftests/kvm/aarch64/get-reg-list.c50
-rw-r--r--tools/testing/selftests/kvm/aarch64/vgic_irq.c853
-rw-r--r--tools/testing/selftests/kvm/include/aarch64/gic.h26
-rw-r--r--tools/testing/selftests/kvm/include/aarch64/gic_v3.h (renamed from tools/testing/selftests/kvm/lib/aarch64/gic_v3.h)12
-rw-r--r--tools/testing/selftests/kvm/include/aarch64/processor.h3
-rw-r--r--tools/testing/selftests/kvm/include/aarch64/vgic.h18
-rw-r--r--tools/testing/selftests/kvm/include/kvm_util.h409
-rw-r--r--tools/testing/selftests/kvm/include/kvm_util_base.h398
-rw-r--r--tools/testing/selftests/kvm/include/riscv/processor.h135
-rw-r--r--tools/testing/selftests/kvm/include/ucall_common.h59
-rw-r--r--tools/testing/selftests/kvm/include/x86_64/processor.h52
-rw-r--r--tools/testing/selftests/kvm/lib/aarch64/gic.c66
-rw-r--r--tools/testing/selftests/kvm/lib/aarch64/gic_private.h11
-rw-r--r--tools/testing/selftests/kvm/lib/aarch64/gic_v3.c206
-rw-r--r--tools/testing/selftests/kvm/lib/aarch64/processor.c82
-rw-r--r--tools/testing/selftests/kvm/lib/aarch64/vgic.c103
-rw-r--r--tools/testing/selftests/kvm/lib/guest_modes.c59
-rw-r--r--tools/testing/selftests/kvm/lib/kvm_util.c129
-rw-r--r--tools/testing/selftests/kvm/lib/riscv/processor.c362
-rw-r--r--tools/testing/selftests/kvm/lib/riscv/ucall.c87
-rw-r--r--tools/testing/selftests/kvm/lib/x86_64/processor.c244
-rw-r--r--tools/testing/selftests/kvm/x86_64/amx_test.c450
-rw-r--r--tools/testing/selftests/kvm/x86_64/cpuid_test.c (renamed from tools/testing/selftests/kvm/x86_64/get_cpuid_test.c)30
-rw-r--r--tools/testing/selftests/kvm/x86_64/evmcs_test.c2
-rw-r--r--tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c434
-rw-r--r--tools/testing/selftests/kvm/x86_64/sev_migrate_tests.c59
-rw-r--r--tools/testing/selftests/kvm/x86_64/smm_test.c3
-rw-r--r--tools/testing/selftests/kvm/x86_64/state_test.c2
-rw-r--r--tools/testing/selftests/kvm/x86_64/tsc_msrs_test.c4
-rw-r--r--tools/testing/selftests/kvm/x86_64/vmx_close_while_nested_test.c4
-rw-r--r--tools/testing/selftests/kvm/x86_64/vmx_exception_with_invalid_guest_state.c139
-rw-r--r--tools/testing/selftests/kvm/x86_64/vmx_preemption_timer_test.c2
-rw-r--r--tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c218
-rwxr-xr-xtools/testing/selftests/lkdtm/stack-entropy.sh16
-rw-r--r--tools/testing/selftests/mincore/mincore_selftest.c20
-rw-r--r--tools/testing/selftests/mount_setattr/mount_setattr_test.c4
-rwxr-xr-xtools/testing/selftests/net/fcnal-test.sh3
-rw-r--r--tools/testing/selftests/net/ioam6_parser.c5
-rwxr-xr-xtools/testing/selftests/net/mptcp/mptcp_join.sh6
-rw-r--r--tools/testing/selftests/net/settings2
-rw-r--r--tools/testing/selftests/netfilter/Makefile2
-rwxr-xr-xtools/testing/selftests/netfilter/nft_concat_range.sh74
-rwxr-xr-xtools/testing/selftests/netfilter/nft_fib.sh1
-rwxr-xr-xtools/testing/selftests/netfilter/nft_nat.sh152
-rwxr-xr-xtools/testing/selftests/netfilter/nft_synproxy.sh117
-rwxr-xr-xtools/testing/selftests/netfilter/nft_zones_many.sh12
-rw-r--r--tools/testing/selftests/openat2/Makefile2
-rw-r--r--tools/testing/selftests/openat2/helpers.h12
-rw-r--r--tools/testing/selftests/openat2/openat2_test.c12
-rw-r--r--tools/testing/selftests/perf_events/sigtrap_threads.c17
-rw-r--r--tools/testing/selftests/pidfd/pidfd.h13
-rw-r--r--tools/testing/selftests/pidfd/pidfd_fdinfo_test.c22
-rw-r--r--tools/testing/selftests/pidfd/pidfd_test.c6
-rw-r--r--tools/testing/selftests/pidfd/pidfd_wait.c5
-rwxr-xr-xtools/testing/selftests/powerpc/security/mitigation-patching.sh5
-rw-r--r--tools/testing/selftests/powerpc/security/spectre_v2.c2
-rw-r--r--tools/testing/selftests/powerpc/signal/.gitignore2
-rw-r--r--tools/testing/selftests/powerpc/signal/Makefile2
-rw-r--r--tools/testing/selftests/powerpc/signal/sigreturn_kernel.c132
-rw-r--r--tools/testing/selftests/powerpc/signal/sigreturn_unaligned.c43
-rw-r--r--tools/testing/selftests/rtc/settings2
-rw-r--r--tools/testing/selftests/seccomp/Makefile2
-rw-r--r--tools/testing/selftests/vDSO/vdso_test_abi.c135
-rw-r--r--tools/testing/selftests/vm/charge_reserved_hugetlb.sh34
-rw-r--r--tools/testing/selftests/vm/hmm-tests.c42
-rw-r--r--tools/testing/selftests/vm/hugepage-mremap.c46
-rw-r--r--tools/testing/selftests/vm/hugetlb_reparenting_test.sh21
-rwxr-xr-xtools/testing/selftests/vm/run_vmtests.sh2
-rw-r--r--tools/testing/selftests/vm/userfaultfd.c22
-rw-r--r--tools/testing/selftests/vm/write_hugetlb_memory.sh2
-rwxr-xr-xtools/testing/selftests/zram/zram.sh15
-rwxr-xr-xtools/testing/selftests/zram/zram01.sh33
-rwxr-xr-xtools/testing/selftests/zram/zram02.sh1
-rwxr-xr-xtools/testing/selftests/zram/zram_lib.sh134
-rw-r--r--tools/tracing/Makefile18
-rw-r--r--tools/tracing/rtla/Makefile104
-rw-r--r--tools/tracing/rtla/README.txt36
-rw-r--r--tools/tracing/rtla/src/osnoise.c878
-rw-r--r--tools/tracing/rtla/src/osnoise.h91
-rw-r--r--tools/tracing/rtla/src/osnoise_hist.c800
-rw-r--r--tools/tracing/rtla/src/osnoise_top.c578
-rw-r--r--tools/tracing/rtla/src/rtla.c87
-rw-r--r--tools/tracing/rtla/src/timerlat.c72
-rw-r--r--tools/tracing/rtla/src/timerlat.h4
-rw-r--r--tools/tracing/rtla/src/timerlat_hist.c821
-rw-r--r--tools/tracing/rtla/src/timerlat_top.c617
-rw-r--r--tools/tracing/rtla/src/trace.c192
-rw-r--r--tools/tracing/rtla/src/trace.h27
-rw-r--r--tools/tracing/rtla/src/utils.c433
-rw-r--r--tools/tracing/rtla/src/utils.h56
-rw-r--r--usr/Makefile7
-rw-r--r--usr/include/Makefile8
-rwxr-xr-xusr/include/headers_check.pl (renamed from scripts/headers_check.pl)0
-rw-r--r--virt/kvm/Kconfig6
-rw-r--r--virt/kvm/Makefile.kvm14
-rw-r--r--virt/kvm/async_pf.c2
-rw-r--r--virt/kvm/dirty_ring.c11
-rw-r--r--virt/kvm/eventfd.c8
-rw-r--r--virt/kvm/kvm_main.c1123
-rw-r--r--virt/kvm/kvm_mm.h44
-rw-r--r--virt/kvm/mmu_lock.h23
-rw-r--r--virt/kvm/pfncache.c337
5889 files changed, 183596 insertions, 106929 deletions
diff --git a/.clang-format b/.clang-format
index 15d4eaabc6b5..fa959436bcfd 100644
--- a/.clang-format
+++ b/.clang-format
@@ -216,7 +216,6 @@ ForEachMacros:
- 'for_each_migratetype_order'
- 'for_each_msi_entry'
- 'for_each_msi_entry_safe'
- - 'for_each_msi_vector'
- 'for_each_net'
- 'for_each_net_continue_reverse'
- 'for_each_netdev'
diff --git a/.mailmap b/.mailmap
index a83599921b1a..8cd44b0c6579 100644
--- a/.mailmap
+++ b/.mailmap
@@ -49,10 +49,12 @@ Andy Adamson <andros@citi.umich.edu>
Antoine Tenart <atenart@kernel.org> <antoine.tenart@bootlin.com>
Antoine Tenart <atenart@kernel.org> <antoine.tenart@free-electrons.com>
Antonio Ospite <ao2@ao2.it> <ao2@amarulasolutions.com>
+Anup Patel <anup@brainfault.org> <anup.patel@wdc.com>
Archit Taneja <archit@ti.com>
Ard Biesheuvel <ardb@kernel.org> <ard.biesheuvel@linaro.org>
Arnaud Patard <arnaud.patard@rtp-net.org>
Arnd Bergmann <arnd@arndb.de>
+Atish Patra <atishp@atishpatra.org> <atish.patra@wdc.com>
Axel Dyks <xl@xlsigned.net>
Axel Lin <axel.lin@gmail.com>
Bart Van Assche <bvanassche@acm.org> <bart.vanassche@sandisk.com>
@@ -68,6 +70,7 @@ Boris Brezillon <bbrezillon@kernel.org> <boris.brezillon@bootlin.com>
Boris Brezillon <bbrezillon@kernel.org> <boris.brezillon@free-electrons.com>
Brian Avery <b.avery@hp.com>
Brian King <brking@us.ibm.com>
+Brian Silverman <bsilver16384@gmail.com> <brian.silverman@bluerivertech.com>
Changbin Du <changbin.du@intel.com> <changbin.du@gmail.com>
Changbin Du <changbin.du@intel.com> <changbin.du@intel.com>
Chao Yu <chao@kernel.org> <chao2.yu@samsung.com>
@@ -77,6 +80,9 @@ Chris Chiu <chris.chiu@canonical.com> <chiu@endlessos.org>
Christian Borntraeger <borntraeger@linux.ibm.com> <borntraeger@de.ibm.com>
Christian Borntraeger <borntraeger@linux.ibm.com> <cborntra@de.ibm.com>
Christian Borntraeger <borntraeger@linux.ibm.com> <borntrae@de.ibm.com>
+Christian Brauner <brauner@kernel.org> <christian@brauner.io>
+Christian Brauner <brauner@kernel.org> <christian.brauner@canonical.com>
+Christian Brauner <brauner@kernel.org> <christian.brauner@ubuntu.com>
Christophe Ricard <christophe.ricard@gmail.com>
Christoph Hellwig <hch@lst.de>
Colin Ian King <colin.king@intel.com> <colin.king@canonical.com>
diff --git a/CREDITS b/CREDITS
index d8f63e8329e8..b97256d5bc24 100644
--- a/CREDITS
+++ b/CREDITS
@@ -315,6 +315,11 @@ S: Via Delle Palme, 9
S: Terni 05100
S: Italy
+N: Ohad Ben Cohen
+E: ohad@wizery.com
+D: Remote Processor (remoteproc) subsystem
+D: Remote Processor Messaging (rpmsg) subsystem
+
N: Krzysztof Benedyczak
E: golbi@mat.uni.torun.pl
W: http://www.mat.uni.torun.pl/~golbi
diff --git a/Documentation/ABI/obsolete/sysfs-class-dax b/Documentation/ABI/obsolete/sysfs-class-dax
deleted file mode 100644
index 5bcce27458e3..000000000000
--- a/Documentation/ABI/obsolete/sysfs-class-dax
+++ /dev/null
@@ -1,22 +0,0 @@
-What: /sys/class/dax/
-Date: May, 2016
-KernelVersion: v4.7
-Contact: nvdimm@lists.linux.dev
-Description: Device DAX is the device-centric analogue of Filesystem
- DAX (CONFIG_FS_DAX). It allows memory ranges to be
- allocated and mapped without need of an intervening file
- system. Device DAX is strict, precise and predictable.
- Specifically this interface:
-
- 1. Guarantees fault granularity with respect to a given
- page size (pte, pmd, or pud) set at configuration time.
-
- 2. Enforces deterministic behavior by being strict about
- what fault scenarios are supported.
-
- The /sys/class/dax/ interface enumerates all the
- device-dax instances in the system. The ABI is
- deprecated and will be removed after 2020. It is
- replaced with the DAX bus interface /sys/bus/dax/ where
- device-dax instances can be found under
- /sys/bus/dax/devices/
diff --git a/Documentation/ABI/stable/sysfs-block b/Documentation/ABI/stable/sysfs-block
new file mode 100644
index 000000000000..8dd3e84a8aad
--- /dev/null
+++ b/Documentation/ABI/stable/sysfs-block
@@ -0,0 +1,676 @@
+What: /sys/block/<disk>/alignment_offset
+Date: April 2009
+Contact: Martin K. Petersen <martin.petersen@oracle.com>
+Description:
+ Storage devices may report a physical block size that is
+ bigger than the logical block size (for instance a drive
+ with 4KB physical sectors exposing 512-byte logical
+ blocks to the operating system). This parameter
+ indicates how many bytes the beginning of the device is
+ offset from the disk's natural alignment.
+
+
+What: /sys/block/<disk>/discard_alignment
+Date: May 2011
+Contact: Martin K. Petersen <martin.petersen@oracle.com>
+Description:
+ Devices that support discard functionality may
+ internally allocate space in units that are bigger than
+ the exported logical block size. The discard_alignment
+ parameter indicates how many bytes the beginning of the
+ device is offset from the internal allocation unit's
+ natural alignment.
+
+
+What: /sys/block/<disk>/diskseq
+Date: February 2021
+Contact: Matteo Croce <mcroce@microsoft.com>
+Description:
+ The /sys/block/<disk>/diskseq files reports the disk
+ sequence number, which is a monotonically increasing
+ number assigned to every drive.
+ Some devices, like the loop device, refresh such number
+ every time the backing file is changed.
+ The value type is 64 bit unsigned.
+
+
+What: /sys/block/<disk>/inflight
+Date: October 2009
+Contact: Jens Axboe <axboe@kernel.dk>, Nikanth Karthikesan <knikanth@suse.de>
+Description:
+ Reports the number of I/O requests currently in progress
+ (pending / in flight) in a device driver. This can be less
+ than the number of requests queued in the block device queue.
+ The report contains 2 fields: one for read requests
+ and one for write requests.
+ The value type is unsigned int.
+ Cf. Documentation/block/stat.rst which contains a single value for
+ requests in flight.
+ This is related to /sys/block/<disk>/queue/nr_requests
+ and for SCSI device also its queue_depth.
+
+
+What: /sys/block/<disk>/integrity/device_is_integrity_capable
+Date: July 2014
+Contact: Martin K. Petersen <martin.petersen@oracle.com>
+Description:
+ Indicates whether a storage device is capable of storing
+ integrity metadata. Set if the device is T10 PI-capable.
+
+
+What: /sys/block/<disk>/integrity/format
+Date: June 2008
+Contact: Martin K. Petersen <martin.petersen@oracle.com>
+Description:
+ Metadata format for integrity capable block device.
+ E.g. T10-DIF-TYPE1-CRC.
+
+
+What: /sys/block/<disk>/integrity/protection_interval_bytes
+Date: July 2015
+Contact: Martin K. Petersen <martin.petersen@oracle.com>
+Description:
+ Describes the number of data bytes which are protected
+ by one integrity tuple. Typically the device's logical
+ block size.
+
+
+What: /sys/block/<disk>/integrity/read_verify
+Date: June 2008
+Contact: Martin K. Petersen <martin.petersen@oracle.com>
+Description:
+ Indicates whether the block layer should verify the
+ integrity of read requests serviced by devices that
+ support sending integrity metadata.
+
+
+What: /sys/block/<disk>/integrity/tag_size
+Date: June 2008
+Contact: Martin K. Petersen <martin.petersen@oracle.com>
+Description:
+ Number of bytes of integrity tag space available per
+ 512 bytes of data.
+
+
+What: /sys/block/<disk>/integrity/write_generate
+Date: June 2008
+Contact: Martin K. Petersen <martin.petersen@oracle.com>
+Description:
+ Indicates whether the block layer should automatically
+ generate checksums for write requests bound for
+ devices that support receiving integrity metadata.
+
+
+What: /sys/block/<disk>/<partition>/alignment_offset
+Date: April 2009
+Contact: Martin K. Petersen <martin.petersen@oracle.com>
+Description:
+ Storage devices may report a physical block size that is
+ bigger than the logical block size (for instance a drive
+ with 4KB physical sectors exposing 512-byte logical
+ blocks to the operating system). This parameter
+ indicates how many bytes the beginning of the partition
+ is offset from the disk's natural alignment.
+
+
+What: /sys/block/<disk>/<partition>/discard_alignment
+Date: May 2011
+Contact: Martin K. Petersen <martin.petersen@oracle.com>
+Description:
+ Devices that support discard functionality may
+ internally allocate space in units that are bigger than
+ the exported logical block size. The discard_alignment
+ parameter indicates how many bytes the beginning of the
+ partition is offset from the internal allocation unit's
+ natural alignment.
+
+
+What: /sys/block/<disk>/<partition>/stat
+Date: February 2008
+Contact: Jerome Marchand <jmarchan@redhat.com>
+Description:
+ The /sys/block/<disk>/<partition>/stat files display the
+ I/O statistics of partition <partition>. The format is the
+ same as the format of /sys/block/<disk>/stat.
+
+
+What: /sys/block/<disk>/queue/add_random
+Date: June 2010
+Contact: linux-block@vger.kernel.org
+Description:
+ [RW] This file allows to turn off the disk entropy contribution.
+ Default value of this file is '1'(on).
+
+
+What: /sys/block/<disk>/queue/chunk_sectors
+Date: September 2016
+Contact: Hannes Reinecke <hare@suse.com>
+Description:
+ [RO] chunk_sectors has different meaning depending on the type
+ of the disk. For a RAID device (dm-raid), chunk_sectors
+ indicates the size in 512B sectors of the RAID volume stripe
+ segment. For a zoned block device, either host-aware or
+ host-managed, chunk_sectors indicates the size in 512B sectors
+ of the zones of the device, with the eventual exception of the
+ last zone of the device which may be smaller.
+
+
+What: /sys/block/<disk>/queue/dax
+Date: June 2016
+Contact: linux-block@vger.kernel.org
+Description:
+ [RO] This file indicates whether the device supports Direct
+ Access (DAX), used by CPU-addressable storage to bypass the
+ pagecache. It shows '1' if true, '0' if not.
+
+
+What: /sys/block/<disk>/queue/discard_granularity
+Date: May 2011
+Contact: Martin K. Petersen <martin.petersen@oracle.com>
+Description:
+ [RO] Devices that support discard functionality may internally
+ allocate space using units that are bigger than the logical
+ block size. The discard_granularity parameter indicates the size
+ of the internal allocation unit in bytes if reported by the
+ device. Otherwise the discard_granularity will be set to match
+ the device's physical block size. A discard_granularity of 0
+ means that the device does not support discard functionality.
+
+
+What: /sys/block/<disk>/queue/discard_max_bytes
+Date: May 2011
+Contact: Martin K. Petersen <martin.petersen@oracle.com>
+Description:
+ [RW] While discard_max_hw_bytes is the hardware limit for the
+ device, this setting is the software limit. Some devices exhibit
+ large latencies when large discards are issued, setting this
+ value lower will make Linux issue smaller discards and
+ potentially help reduce latencies induced by large discard
+ operations.
+
+
+What: /sys/block/<disk>/queue/discard_max_hw_bytes
+Date: July 2015
+Contact: linux-block@vger.kernel.org
+Description:
+ [RO] Devices that support discard functionality may have
+ internal limits on the number of bytes that can be trimmed or
+ unmapped in a single operation. The `discard_max_hw_bytes`
+ parameter is set by the device driver to the maximum number of
+ bytes that can be discarded in a single operation. Discard
+ requests issued to the device must not exceed this limit. A
+ `discard_max_hw_bytes` value of 0 means that the device does not
+ support discard functionality.
+
+
+What: /sys/block/<disk>/queue/discard_zeroes_data
+Date: May 2011
+Contact: Martin K. Petersen <martin.petersen@oracle.com>
+Description:
+ [RO] Will always return 0. Don't rely on any specific behavior
+ for discards, and don't read this file.
+
+
+What: /sys/block/<disk>/queue/fua
+Date: May 2018
+Contact: linux-block@vger.kernel.org
+Description:
+ [RO] Whether or not the block driver supports the FUA flag for
+ write requests. FUA stands for Force Unit Access. If the FUA
+ flag is set that means that write requests must bypass the
+ volatile cache of the storage device.
+
+
+What: /sys/block/<disk>/queue/hw_sector_size
+Date: January 2008
+Contact: linux-block@vger.kernel.org
+Description:
+ [RO] This is the hardware sector size of the device, in bytes.
+
+
+What: /sys/block/<disk>/queue/independent_access_ranges/
+Date: October 2021
+Contact: linux-block@vger.kernel.org
+Description:
+ [RO] The presence of this sub-directory of the
+ /sys/block/xxx/queue/ directory indicates that the device is
+ capable of executing requests targeting different sector ranges
+ in parallel. For instance, single LUN multi-actuator hard-disks
+ will have an independent_access_ranges directory if the device
+ correctly advertizes the sector ranges of its actuators.
+
+ The independent_access_ranges directory contains one directory
+ per access range, with each range described using the sector
+ (RO) attribute file to indicate the first sector of the range
+ and the nr_sectors (RO) attribute file to indicate the total
+ number of sectors in the range starting from the first sector of
+ the range. For example, a dual-actuator hard-disk will have the
+ following independent_access_ranges entries.::
+
+ $ tree /sys/block/<disk>/queue/independent_access_ranges/
+ /sys/block/<disk>/queue/independent_access_ranges/
+ |-- 0
+ | |-- nr_sectors
+ | `-- sector
+ `-- 1
+ |-- nr_sectors
+ `-- sector
+
+ The sector and nr_sectors attributes use 512B sector unit,
+ regardless of the actual block size of the device. Independent
+ access ranges do not overlap and include all sectors within the
+ device capacity. The access ranges are numbered in increasing
+ order of the range start sector, that is, the sector attribute
+ of range 0 always has the value 0.
+
+
+What: /sys/block/<disk>/queue/io_poll
+Date: November 2015
+Contact: linux-block@vger.kernel.org
+Description:
+ [RW] When read, this file shows whether polling is enabled (1)
+ or disabled (0). Writing '0' to this file will disable polling
+ for this device. Writing any non-zero value will enable this
+ feature.
+
+
+What: /sys/block/<disk>/queue/io_poll_delay
+Date: November 2016
+Contact: linux-block@vger.kernel.org
+Description:
+ [RW] If polling is enabled, this controls what kind of polling
+ will be performed. It defaults to -1, which is classic polling.
+ In this mode, the CPU will repeatedly ask for completions
+ without giving up any time. If set to 0, a hybrid polling mode
+ is used, where the kernel will attempt to make an educated guess
+ at when the IO will complete. Based on this guess, the kernel
+ will put the process issuing IO to sleep for an amount of time,
+ before entering a classic poll loop. This mode might be a little
+ slower than pure classic polling, but it will be more efficient.
+ If set to a value larger than 0, the kernel will put the process
+ issuing IO to sleep for this amount of microseconds before
+ entering classic polling.
+
+
+What: /sys/block/<disk>/queue/io_timeout
+Date: November 2018
+Contact: Weiping Zhang <zhangweiping@didiglobal.com>
+Description:
+ [RW] io_timeout is the request timeout in milliseconds. If a
+ request does not complete in this time then the block driver
+ timeout handler is invoked. That timeout handler can decide to
+ retry the request, to fail it or to start a device recovery
+ strategy.
+
+
+What: /sys/block/<disk>/queue/iostats
+Date: January 2009
+Contact: linux-block@vger.kernel.org
+Description:
+ [RW] This file is used to control (on/off) the iostats
+ accounting of the disk.
+
+
+What: /sys/block/<disk>/queue/logical_block_size
+Date: May 2009
+Contact: Martin K. Petersen <martin.petersen@oracle.com>
+Description:
+ [RO] This is the smallest unit the storage device can address.
+ It is typically 512 bytes.
+
+
+What: /sys/block/<disk>/queue/max_active_zones
+Date: July 2020
+Contact: Niklas Cassel <niklas.cassel@wdc.com>
+Description:
+ [RO] For zoned block devices (zoned attribute indicating
+ "host-managed" or "host-aware"), the sum of zones belonging to
+ any of the zone states: EXPLICIT OPEN, IMPLICIT OPEN or CLOSED,
+ is limited by this value. If this value is 0, there is no limit.
+
+ If the host attempts to exceed this limit, the driver should
+ report this error with BLK_STS_ZONE_ACTIVE_RESOURCE, which user
+ space may see as the EOVERFLOW errno.
+
+
+What: /sys/block/<disk>/queue/max_discard_segments
+Date: February 2017
+Contact: linux-block@vger.kernel.org
+Description:
+ [RO] The maximum number of DMA scatter/gather entries in a
+ discard request.
+
+
+What: /sys/block/<disk>/queue/max_hw_sectors_kb
+Date: September 2004
+Contact: linux-block@vger.kernel.org
+Description:
+ [RO] This is the maximum number of kilobytes supported in a
+ single data transfer.
+
+
+What: /sys/block/<disk>/queue/max_integrity_segments
+Date: September 2010
+Contact: linux-block@vger.kernel.org
+Description:
+ [RO] Maximum number of elements in a DMA scatter/gather list
+ with integrity data that will be submitted by the block layer
+ core to the associated block driver.
+
+
+What: /sys/block/<disk>/queue/max_open_zones
+Date: July 2020
+Contact: Niklas Cassel <niklas.cassel@wdc.com>
+Description:
+ [RO] For zoned block devices (zoned attribute indicating
+ "host-managed" or "host-aware"), the sum of zones belonging to
+ any of the zone states: EXPLICIT OPEN or IMPLICIT OPEN, is
+ limited by this value. If this value is 0, there is no limit.
+
+
+What: /sys/block/<disk>/queue/max_sectors_kb
+Date: September 2004
+Contact: linux-block@vger.kernel.org
+Description:
+ [RW] This is the maximum number of kilobytes that the block
+ layer will allow for a filesystem request. Must be smaller than
+ or equal to the maximum size allowed by the hardware.
+
+
+What: /sys/block/<disk>/queue/max_segment_size
+Date: March 2010
+Contact: linux-block@vger.kernel.org
+Description:
+ [RO] Maximum size in bytes of a single element in a DMA
+ scatter/gather list.
+
+
+What: /sys/block/<disk>/queue/max_segments
+Date: March 2010
+Contact: linux-block@vger.kernel.org
+Description:
+ [RO] Maximum number of elements in a DMA scatter/gather list
+ that is submitted to the associated block driver.
+
+
+What: /sys/block/<disk>/queue/minimum_io_size
+Date: April 2009
+Contact: Martin K. Petersen <martin.petersen@oracle.com>
+Description:
+ [RO] Storage devices may report a granularity or preferred
+ minimum I/O size which is the smallest request the device can
+ perform without incurring a performance penalty. For disk
+ drives this is often the physical block size. For RAID arrays
+ it is often the stripe chunk size. A properly aligned multiple
+ of minimum_io_size is the preferred request size for workloads
+ where a high number of I/O operations is desired.
+
+
+What: /sys/block/<disk>/queue/nomerges
+Date: January 2010
+Contact: linux-block@vger.kernel.org
+Description:
+ [RW] Standard I/O elevator operations include attempts to merge
+ contiguous I/Os. For known random I/O loads these attempts will
+ always fail and result in extra cycles being spent in the
+ kernel. This allows one to turn off this behavior on one of two
+ ways: When set to 1, complex merge checks are disabled, but the
+ simple one-shot merges with the previous I/O request are
+ enabled. When set to 2, all merge tries are disabled. The
+ default value is 0 - which enables all types of merge tries.
+
+
+What: /sys/block/<disk>/queue/nr_requests
+Date: July 2003
+Contact: linux-block@vger.kernel.org
+Description:
+ [RW] This controls how many requests may be allocated in the
+ block layer for read or write requests. Note that the total
+ allocated number may be twice this amount, since it applies only
+ to reads or writes (not the accumulated sum).
+
+ To avoid priority inversion through request starvation, a
+ request queue maintains a separate request pool per each cgroup
+ when CONFIG_BLK_CGROUP is enabled, and this parameter applies to
+ each such per-block-cgroup request pool. IOW, if there are N
+ block cgroups, each request queue may have up to N request
+ pools, each independently regulated by nr_requests.
+
+
+What: /sys/block/<disk>/queue/nr_zones
+Date: November 2018
+Contact: Damien Le Moal <damien.lemoal@wdc.com>
+Description:
+ [RO] nr_zones indicates the total number of zones of a zoned
+ block device ("host-aware" or "host-managed" zone model). For
+ regular block devices, the value is always 0.
+
+
+What: /sys/block/<disk>/queue/optimal_io_size
+Date: April 2009
+Contact: Martin K. Petersen <martin.petersen@oracle.com>
+Description:
+ [RO] Storage devices may report an optimal I/O size, which is
+ the device's preferred unit for sustained I/O. This is rarely
+ reported for disk drives. For RAID arrays it is usually the
+ stripe width or the internal track size. A properly aligned
+ multiple of optimal_io_size is the preferred request size for
+ workloads where sustained throughput is desired. If no optimal
+ I/O size is reported this file contains 0.
+
+
+What: /sys/block/<disk>/queue/physical_block_size
+Date: May 2009
+Contact: Martin K. Petersen <martin.petersen@oracle.com>
+Description:
+ [RO] This is the smallest unit a physical storage device can
+ write atomically. It is usually the same as the logical block
+ size but may be bigger. One example is SATA drives with 4KB
+ sectors that expose a 512-byte logical block size to the
+ operating system. For stacked block devices the
+ physical_block_size variable contains the maximum
+ physical_block_size of the component devices.
+
+
+What: /sys/block/<disk>/queue/read_ahead_kb
+Date: May 2004
+Contact: linux-block@vger.kernel.org
+Description:
+ [RW] Maximum number of kilobytes to read-ahead for filesystems
+ on this block device.
+
+
+What: /sys/block/<disk>/queue/rotational
+Date: January 2009
+Contact: linux-block@vger.kernel.org
+Description:
+ [RW] This file is used to stat if the device is of rotational
+ type or non-rotational type.
+
+
+What: /sys/block/<disk>/queue/rq_affinity
+Date: September 2008
+Contact: linux-block@vger.kernel.org
+Description:
+ [RW] If this option is '1', the block layer will migrate request
+ completions to the cpu "group" that originally submitted the
+ request. For some workloads this provides a significant
+ reduction in CPU cycles due to caching effects.
+
+ For storage configurations that need to maximize distribution of
+ completion processing setting this option to '2' forces the
+ completion to run on the requesting cpu (bypassing the "group"
+ aggregation logic).
+
+
+What: /sys/block/<disk>/queue/scheduler
+Date: October 2004
+Contact: linux-block@vger.kernel.org
+Description:
+ [RW] When read, this file will display the current and available
+ IO schedulers for this block device. The currently active IO
+ scheduler will be enclosed in [] brackets. Writing an IO
+ scheduler name to this file will switch control of this block
+ device to that new IO scheduler. Note that writing an IO
+ scheduler name to this file will attempt to load that IO
+ scheduler module, if it isn't already present in the system.
+
+
+What: /sys/block/<disk>/queue/stable_writes
+Date: September 2020
+Contact: linux-block@vger.kernel.org
+Description:
+ [RW] This file will contain '1' if memory must not be modified
+ while it is being used in a write request to this device. When
+ this is the case and the kernel is performing writeback of a
+ page, the kernel will wait for writeback to complete before
+ allowing the page to be modified again, rather than allowing
+ immediate modification as is normally the case. This
+ restriction arises when the device accesses the memory multiple
+ times where the same data must be seen every time -- for
+ example, once to calculate a checksum and once to actually write
+ the data. If no such restriction exists, this file will contain
+ '0'. This file is writable for testing purposes.
+
+
+What: /sys/block/<disk>/queue/throttle_sample_time
+Date: March 2017
+Contact: linux-block@vger.kernel.org
+Description:
+ [RW] This is the time window that blk-throttle samples data, in
+ millisecond. blk-throttle makes decision based on the
+ samplings. Lower time means cgroups have more smooth throughput,
+ but higher CPU overhead. This exists only when
+ CONFIG_BLK_DEV_THROTTLING_LOW is enabled.
+
+
+What: /sys/block/<disk>/queue/virt_boundary_mask
+Date: April 2021
+Contact: linux-block@vger.kernel.org
+Description:
+ [RO] This file shows the I/O segment memory alignment mask for
+ the block device. I/O requests to this device will be split
+ between segments wherever either the memory address of the end
+ of the previous segment or the memory address of the beginning
+ of the current segment is not aligned to virt_boundary_mask + 1
+ bytes.
+
+
+What: /sys/block/<disk>/queue/wbt_lat_usec
+Date: November 2016
+Contact: linux-block@vger.kernel.org
+Description:
+ [RW] If the device is registered for writeback throttling, then
+ this file shows the target minimum read latency. If this latency
+ is exceeded in a given window of time (see wb_window_usec), then
+ the writeback throttling will start scaling back writes. Writing
+ a value of '0' to this file disables the feature. Writing a
+ value of '-1' to this file resets the value to the default
+ setting.
+
+
+What: /sys/block/<disk>/queue/write_cache
+Date: April 2016
+Contact: linux-block@vger.kernel.org
+Description:
+ [RW] When read, this file will display whether the device has
+ write back caching enabled or not. It will return "write back"
+ for the former case, and "write through" for the latter. Writing
+ to this file can change the kernels view of the device, but it
+ doesn't alter the device state. This means that it might not be
+ safe to toggle the setting from "write back" to "write through",
+ since that will also eliminate cache flushes issued by the
+ kernel.
+
+
+What: /sys/block/<disk>/queue/write_same_max_bytes
+Date: January 2012
+Contact: Martin K. Petersen <martin.petersen@oracle.com>
+Description:
+ [RO] Some devices support a write same operation in which a
+ single data block can be written to a range of several
+ contiguous blocks on storage. This can be used to wipe areas on
+ disk or to initialize drives in a RAID configuration.
+ write_same_max_bytes indicates how many bytes can be written in
+ a single write same command. If write_same_max_bytes is 0, write
+ same is not supported by the device.
+
+
+What: /sys/block/<disk>/queue/write_zeroes_max_bytes
+Date: November 2016
+Contact: Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com>
+Description:
+ [RO] Devices that support write zeroes operation in which a
+ single request can be issued to zero out the range of contiguous
+ blocks on storage without having any payload in the request.
+ This can be used to optimize writing zeroes to the devices.
+ write_zeroes_max_bytes indicates how many bytes can be written
+ in a single write zeroes command. If write_zeroes_max_bytes is
+ 0, write zeroes is not supported by the device.
+
+
+What: /sys/block/<disk>/queue/zone_append_max_bytes
+Date: May 2020
+Contact: linux-block@vger.kernel.org
+Description:
+ [RO] This is the maximum number of bytes that can be written to
+ a sequential zone of a zoned block device using a zone append
+ write operation (REQ_OP_ZONE_APPEND). This value is always 0 for
+ regular block devices.
+
+
+What: /sys/block/<disk>/queue/zone_write_granularity
+Date: January 2021
+Contact: linux-block@vger.kernel.org
+Description:
+ [RO] This indicates the alignment constraint, in bytes, for
+ write operations in sequential zones of zoned block devices
+ (devices with a zoned attributed that reports "host-managed" or
+ "host-aware"). This value is always 0 for regular block devices.
+
+
+What: /sys/block/<disk>/queue/zoned
+Date: September 2016
+Contact: Damien Le Moal <damien.lemoal@wdc.com>
+Description:
+ [RO] zoned indicates if the device is a zoned block device and
+ the zone model of the device if it is indeed zoned. The
+ possible values indicated by zoned are "none" for regular block
+ devices and "host-aware" or "host-managed" for zoned block
+ devices. The characteristics of host-aware and host-managed
+ zoned block devices are described in the ZBC (Zoned Block
+ Commands) and ZAC (Zoned Device ATA Command Set) standards.
+ These standards also define the "drive-managed" zone model.
+ However, since drive-managed zoned block devices do not support
+ zone commands, they will be treated as regular block devices and
+ zoned will report "none".
+
+
+What: /sys/block/<disk>/stat
+Date: February 2008
+Contact: Jerome Marchand <jmarchan@redhat.com>
+Description:
+ The /sys/block/<disk>/stat files displays the I/O
+ statistics of disk <disk>. They contain 11 fields:
+
+ == ==============================================
+ 1 reads completed successfully
+ 2 reads merged
+ 3 sectors read
+ 4 time spent reading (ms)
+ 5 writes completed
+ 6 writes merged
+ 7 sectors written
+ 8 time spent writing (ms)
+ 9 I/Os currently in progress
+ 10 time spent doing I/Os (ms)
+ 11 weighted time spent doing I/Os (ms)
+ 12 discards completed
+ 13 discards merged
+ 14 sectors discarded
+ 15 time spent discarding (ms)
+ 16 flush requests completed
+ 17 time spent flushing (ms)
+ == ==============================================
+
+ For more details refer Documentation/admin-guide/iostats.rst
diff --git a/Documentation/ABI/stable/sysfs-driver-dma-idxd b/Documentation/ABI/stable/sysfs-driver-dma-idxd
index df4afbccf037..0c2b613f2373 100644
--- a/Documentation/ABI/stable/sysfs-driver-dma-idxd
+++ b/Documentation/ABI/stable/sysfs-driver-dma-idxd
@@ -41,14 +41,14 @@ KernelVersion: 5.6.0
Contact: dmaengine@vger.kernel.org
Description: The maximum number of groups can be created under this device.
-What: /sys/bus/dsa/devices/dsa<m>/max_tokens
-Date: Oct 25, 2019
-KernelVersion: 5.6.0
+What: /sys/bus/dsa/devices/dsa<m>/max_read_buffers
+Date: Dec 10, 2021
+KernelVersion: 5.17.0
Contact: dmaengine@vger.kernel.org
-Description: The total number of bandwidth tokens supported by this device.
- The bandwidth tokens represent resources within the DSA
+Description: The total number of read buffers supported by this device.
+ The read buffers represent resources within the DSA
implementation, and these resources are allocated by engines to
- support operations.
+ support operations. See DSA spec v1.2 9.2.4 Total Read Buffers.
What: /sys/bus/dsa/devices/dsa<m>/max_transfer_size
Date: Oct 25, 2019
@@ -115,13 +115,13 @@ KernelVersion: 5.6.0
Contact: dmaengine@vger.kernel.org
Description: To indicate if this device is configurable or not.
-What: /sys/bus/dsa/devices/dsa<m>/token_limit
-Date: Oct 25, 2019
-KernelVersion: 5.6.0
+What: /sys/bus/dsa/devices/dsa<m>/read_buffer_limit
+Date: Dec 10, 2021
+KernelVersion: 5.17.0
Contact: dmaengine@vger.kernel.org
-Description: The maximum number of bandwidth tokens that may be in use at
+Description: The maximum number of read buffers that may be in use at
one time by operations that access low bandwidth memory in the
- device.
+ device. See DSA spec v1.2 9.2.8 GENCFG on Global Read Buffer Limit.
What: /sys/bus/dsa/devices/dsa<m>/cmd_status
Date: Aug 28, 2020
@@ -220,8 +220,38 @@ Contact: dmaengine@vger.kernel.org
Description: Show the current number of entries in this WQ if WQ Occupancy
Support bit WQ capabilities is 1.
+What: /sys/bus/dsa/devices/wq<m>.<n>/enqcmds_retries
+Date Oct 29, 2021
+KernelVersion: 5.17.0
+Contact: dmaengine@vger.kernel.org
+Description: Indicate the number of retires for an enqcmds submission on a sharedwq.
+ A max value to set attribute is capped at 64.
+
What: /sys/bus/dsa/devices/engine<m>.<n>/group_id
Date: Oct 25, 2019
KernelVersion: 5.6.0
Contact: dmaengine@vger.kernel.org
Description: The group that this engine belongs to.
+
+What: /sys/bus/dsa/devices/group<m>.<n>/use_read_buffer_limit
+Date: Dec 10, 2021
+KernelVersion: 5.17.0
+Contact: dmaengine@vger.kernel.org
+Description: Enable the use of global read buffer limit for the group. See DSA
+ spec v1.2 9.2.18 GRPCFG Use Global Read Buffer Limit.
+
+What: /sys/bus/dsa/devices/group<m>.<n>/read_buffers_allowed
+Date: Dec 10, 2021
+KernelVersion: 5.17.0
+Contact: dmaengine@vger.kernel.org
+Description: Indicates max number of read buffers that may be in use at one time
+ by all engines in the group. See DSA spec v1.2 9.2.18 GRPCFG Read
+ Buffers Allowed.
+
+What: /sys/bus/dsa/devices/group<m>.<n>/read_buffers_reserved
+Date: Dec 10, 2021
+KernelVersion: 5.17.0
+Contact: dmaengine@vger.kernel.org
+Description: Indicates the number of Read Buffers reserved for the use of
+ engines in the group. See DSA spec v1.2 9.2.18 GRPCFG Read Buffers
+ Reserved.
diff --git a/Documentation/ABI/testing/configfs-usb-gadget-uac1 b/Documentation/ABI/testing/configfs-usb-gadget-uac1
index b576b3d6ea6d..d4b8cf40a9e4 100644
--- a/Documentation/ABI/testing/configfs-usb-gadget-uac1
+++ b/Documentation/ABI/testing/configfs-usb-gadget-uac1
@@ -27,6 +27,6 @@ Description:
(in 1/256 dB)
p_volume_res playback volume control resolution
(in 1/256 dB)
- req_number the number of pre-allocated request
+ req_number the number of pre-allocated requests
for both capture and playback
===================== =======================================
diff --git a/Documentation/ABI/testing/configfs-usb-gadget-uac2 b/Documentation/ABI/testing/configfs-usb-gadget-uac2
index 244d96650123..7fb3dbe26857 100644
--- a/Documentation/ABI/testing/configfs-usb-gadget-uac2
+++ b/Documentation/ABI/testing/configfs-usb-gadget-uac2
@@ -30,4 +30,6 @@ Description:
(in 1/256 dB)
p_volume_res playback volume control resolution
(in 1/256 dB)
+ req_number the number of pre-allocated requests
+ for both capture and playback
===================== =======================================
diff --git a/Documentation/ABI/testing/debugfs-driver-habanalabs b/Documentation/ABI/testing/debugfs-driver-habanalabs
index 63c46d9d538f..2667cbf940f3 100644
--- a/Documentation/ABI/testing/debugfs-driver-habanalabs
+++ b/Documentation/ABI/testing/debugfs-driver-habanalabs
@@ -21,11 +21,11 @@ Description: Allow the root user to disable/enable in runtime the clock
a different engine to disable/enable its clock gating feature.
The bitmask is composed of 20 bits:
- ======= ============
+ ======= ============
0 - 7 DMA channels
8 - 11 MME engines
12 - 19 TPC engines
- ======= ============
+ ======= ============
The bit's location of a specific engine can be determined
using (1 << GAUDI_ENGINE_ID_*). GAUDI_ENGINE_ID_* values
@@ -155,6 +155,13 @@ Description: Triggers an I2C transaction that is generated by the device's
CPU. Writing to this file generates a write transaction while
reading from the file generates a read transaction
+What: /sys/kernel/debug/habanalabs/hl<n>/i2c_len
+Date: Dec 2021
+KernelVersion: 5.17
+Contact: obitton@habana.ai
+Description: Sets I2C length in bytes for I2C transaction that is generated by
+ the device's CPU
+
What: /sys/kernel/debug/habanalabs/hl<n>/i2c_reg
Date: Jan 2019
KernelVersion: 5.1
@@ -226,12 +233,6 @@ Description: Gets the state dump occurring on a CS timeout or failure.
Writing an integer X discards X state dumps, so that the
next read would return X+1-st newest state dump.
-What: /sys/kernel/debug/habanalabs/hl<n>/timeout_locked
-Date: Sep 2021
-KernelVersion: 5.16
-Contact: obitton@habana.ai
-Description: Sets the command submission timeout value in seconds.
-
What: /sys/kernel/debug/habanalabs/hl<n>/stop_on_err
Date: Mar 2020
KernelVersion: 5.6
@@ -239,6 +240,12 @@ Contact: ogabbay@kernel.org
Description: Sets the stop-on_error option for the device engines. Value of
"0" is for disable, otherwise enable.
+What: /sys/kernel/debug/habanalabs/hl<n>/timeout_locked
+Date: Sep 2021
+KernelVersion: 5.16
+Contact: obitton@habana.ai
+Description: Sets the command submission timeout value in seconds.
+
What: /sys/kernel/debug/habanalabs/hl<n>/userptr
Date: Jan 2019
KernelVersion: 5.1
diff --git a/Documentation/ABI/testing/sysfs-block b/Documentation/ABI/testing/sysfs-block
deleted file mode 100644
index b16b0c45a272..000000000000
--- a/Documentation/ABI/testing/sysfs-block
+++ /dev/null
@@ -1,346 +0,0 @@
-What: /sys/block/<disk>/stat
-Date: February 2008
-Contact: Jerome Marchand <jmarchan@redhat.com>
-Description:
- The /sys/block/<disk>/stat files displays the I/O
- statistics of disk <disk>. They contain 11 fields:
-
- == ==============================================
- 1 reads completed successfully
- 2 reads merged
- 3 sectors read
- 4 time spent reading (ms)
- 5 writes completed
- 6 writes merged
- 7 sectors written
- 8 time spent writing (ms)
- 9 I/Os currently in progress
- 10 time spent doing I/Os (ms)
- 11 weighted time spent doing I/Os (ms)
- 12 discards completed
- 13 discards merged
- 14 sectors discarded
- 15 time spent discarding (ms)
- 16 flush requests completed
- 17 time spent flushing (ms)
- == ==============================================
-
- For more details refer Documentation/admin-guide/iostats.rst
-
-
-What: /sys/block/<disk>/inflight
-Date: October 2009
-Contact: Jens Axboe <axboe@kernel.dk>, Nikanth Karthikesan <knikanth@suse.de>
-Description:
- Reports the number of I/O requests currently in progress
- (pending / in flight) in a device driver. This can be less
- than the number of requests queued in the block device queue.
- The report contains 2 fields: one for read requests
- and one for write requests.
- The value type is unsigned int.
- Cf. Documentation/block/stat.rst which contains a single value for
- requests in flight.
- This is related to nr_requests in Documentation/block/queue-sysfs.rst
- and for SCSI device also its queue_depth.
-
-
-What: /sys/block/<disk>/diskseq
-Date: February 2021
-Contact: Matteo Croce <mcroce@microsoft.com>
-Description:
- The /sys/block/<disk>/diskseq files reports the disk
- sequence number, which is a monotonically increasing
- number assigned to every drive.
- Some devices, like the loop device, refresh such number
- every time the backing file is changed.
- The value type is 64 bit unsigned.
-
-
-What: /sys/block/<disk>/<part>/stat
-Date: February 2008
-Contact: Jerome Marchand <jmarchan@redhat.com>
-Description:
- The /sys/block/<disk>/<part>/stat files display the
- I/O statistics of partition <part>. The format is the
- same as the above-written /sys/block/<disk>/stat
- format.
-
-
-What: /sys/block/<disk>/integrity/format
-Date: June 2008
-Contact: Martin K. Petersen <martin.petersen@oracle.com>
-Description:
- Metadata format for integrity capable block device.
- E.g. T10-DIF-TYPE1-CRC.
-
-
-What: /sys/block/<disk>/integrity/read_verify
-Date: June 2008
-Contact: Martin K. Petersen <martin.petersen@oracle.com>
-Description:
- Indicates whether the block layer should verify the
- integrity of read requests serviced by devices that
- support sending integrity metadata.
-
-
-What: /sys/block/<disk>/integrity/tag_size
-Date: June 2008
-Contact: Martin K. Petersen <martin.petersen@oracle.com>
-Description:
- Number of bytes of integrity tag space available per
- 512 bytes of data.
-
-
-What: /sys/block/<disk>/integrity/device_is_integrity_capable
-Date: July 2014
-Contact: Martin K. Petersen <martin.petersen@oracle.com>
-Description:
- Indicates whether a storage device is capable of storing
- integrity metadata. Set if the device is T10 PI-capable.
-
-What: /sys/block/<disk>/integrity/protection_interval_bytes
-Date: July 2015
-Contact: Martin K. Petersen <martin.petersen@oracle.com>
-Description:
- Describes the number of data bytes which are protected
- by one integrity tuple. Typically the device's logical
- block size.
-
-What: /sys/block/<disk>/integrity/write_generate
-Date: June 2008
-Contact: Martin K. Petersen <martin.petersen@oracle.com>
-Description:
- Indicates whether the block layer should automatically
- generate checksums for write requests bound for
- devices that support receiving integrity metadata.
-
-What: /sys/block/<disk>/alignment_offset
-Date: April 2009
-Contact: Martin K. Petersen <martin.petersen@oracle.com>
-Description:
- Storage devices may report a physical block size that is
- bigger than the logical block size (for instance a drive
- with 4KB physical sectors exposing 512-byte logical
- blocks to the operating system). This parameter
- indicates how many bytes the beginning of the device is
- offset from the disk's natural alignment.
-
-What: /sys/block/<disk>/<partition>/alignment_offset
-Date: April 2009
-Contact: Martin K. Petersen <martin.petersen@oracle.com>
-Description:
- Storage devices may report a physical block size that is
- bigger than the logical block size (for instance a drive
- with 4KB physical sectors exposing 512-byte logical
- blocks to the operating system). This parameter
- indicates how many bytes the beginning of the partition
- is offset from the disk's natural alignment.
-
-What: /sys/block/<disk>/queue/logical_block_size
-Date: May 2009
-Contact: Martin K. Petersen <martin.petersen@oracle.com>
-Description:
- This is the smallest unit the storage device can
- address. It is typically 512 bytes.
-
-What: /sys/block/<disk>/queue/physical_block_size
-Date: May 2009
-Contact: Martin K. Petersen <martin.petersen@oracle.com>
-Description:
- This is the smallest unit a physical storage device can
- write atomically. It is usually the same as the logical
- block size but may be bigger. One example is SATA
- drives with 4KB sectors that expose a 512-byte logical
- block size to the operating system. For stacked block
- devices the physical_block_size variable contains the
- maximum physical_block_size of the component devices.
-
-What: /sys/block/<disk>/queue/minimum_io_size
-Date: April 2009
-Contact: Martin K. Petersen <martin.petersen@oracle.com>
-Description:
- Storage devices may report a granularity or preferred
- minimum I/O size which is the smallest request the
- device can perform without incurring a performance
- penalty. For disk drives this is often the physical
- block size. For RAID arrays it is often the stripe
- chunk size. A properly aligned multiple of
- minimum_io_size is the preferred request size for
- workloads where a high number of I/O operations is
- desired.
-
-What: /sys/block/<disk>/queue/optimal_io_size
-Date: April 2009
-Contact: Martin K. Petersen <martin.petersen@oracle.com>
-Description:
- Storage devices may report an optimal I/O size, which is
- the device's preferred unit for sustained I/O. This is
- rarely reported for disk drives. For RAID arrays it is
- usually the stripe width or the internal track size. A
- properly aligned multiple of optimal_io_size is the
- preferred request size for workloads where sustained
- throughput is desired. If no optimal I/O size is
- reported this file contains 0.
-
-What: /sys/block/<disk>/queue/nomerges
-Date: January 2010
-Contact:
-Description:
- Standard I/O elevator operations include attempts to
- merge contiguous I/Os. For known random I/O loads these
- attempts will always fail and result in extra cycles
- being spent in the kernel. This allows one to turn off
- this behavior on one of two ways: When set to 1, complex
- merge checks are disabled, but the simple one-shot merges
- with the previous I/O request are enabled. When set to 2,
- all merge tries are disabled. The default value is 0 -
- which enables all types of merge tries.
-
-What: /sys/block/<disk>/discard_alignment
-Date: May 2011
-Contact: Martin K. Petersen <martin.petersen@oracle.com>
-Description:
- Devices that support discard functionality may
- internally allocate space in units that are bigger than
- the exported logical block size. The discard_alignment
- parameter indicates how many bytes the beginning of the
- device is offset from the internal allocation unit's
- natural alignment.
-
-What: /sys/block/<disk>/<partition>/discard_alignment
-Date: May 2011
-Contact: Martin K. Petersen <martin.petersen@oracle.com>
-Description:
- Devices that support discard functionality may
- internally allocate space in units that are bigger than
- the exported logical block size. The discard_alignment
- parameter indicates how many bytes the beginning of the
- partition is offset from the internal allocation unit's
- natural alignment.
-
-What: /sys/block/<disk>/queue/discard_granularity
-Date: May 2011
-Contact: Martin K. Petersen <martin.petersen@oracle.com>
-Description:
- Devices that support discard functionality may
- internally allocate space using units that are bigger
- than the logical block size. The discard_granularity
- parameter indicates the size of the internal allocation
- unit in bytes if reported by the device. Otherwise the
- discard_granularity will be set to match the device's
- physical block size. A discard_granularity of 0 means
- that the device does not support discard functionality.
-
-What: /sys/block/<disk>/queue/discard_max_bytes
-Date: May 2011
-Contact: Martin K. Petersen <martin.petersen@oracle.com>
-Description:
- Devices that support discard functionality may have
- internal limits on the number of bytes that can be
- trimmed or unmapped in a single operation. Some storage
- protocols also have inherent limits on the number of
- blocks that can be described in a single command. The
- discard_max_bytes parameter is set by the device driver
- to the maximum number of bytes that can be discarded in
- a single operation. Discard requests issued to the
- device must not exceed this limit. A discard_max_bytes
- value of 0 means that the device does not support
- discard functionality.
-
-What: /sys/block/<disk>/queue/discard_zeroes_data
-Date: May 2011
-Contact: Martin K. Petersen <martin.petersen@oracle.com>
-Description:
- Will always return 0. Don't rely on any specific behavior
- for discards, and don't read this file.
-
-What: /sys/block/<disk>/queue/write_same_max_bytes
-Date: January 2012
-Contact: Martin K. Petersen <martin.petersen@oracle.com>
-Description:
- Some devices support a write same operation in which a
- single data block can be written to a range of several
- contiguous blocks on storage. This can be used to wipe
- areas on disk or to initialize drives in a RAID
- configuration. write_same_max_bytes indicates how many
- bytes can be written in a single write same command. If
- write_same_max_bytes is 0, write same is not supported
- by the device.
-
-What: /sys/block/<disk>/queue/write_zeroes_max_bytes
-Date: November 2016
-Contact: Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com>
-Description:
- Devices that support write zeroes operation in which a
- single request can be issued to zero out the range of
- contiguous blocks on storage without having any payload
- in the request. This can be used to optimize writing zeroes
- to the devices. write_zeroes_max_bytes indicates how many
- bytes can be written in a single write zeroes command. If
- write_zeroes_max_bytes is 0, write zeroes is not supported
- by the device.
-
-What: /sys/block/<disk>/queue/zoned
-Date: September 2016
-Contact: Damien Le Moal <damien.lemoal@wdc.com>
-Description:
- zoned indicates if the device is a zoned block device
- and the zone model of the device if it is indeed zoned.
- The possible values indicated by zoned are "none" for
- regular block devices and "host-aware" or "host-managed"
- for zoned block devices. The characteristics of
- host-aware and host-managed zoned block devices are
- described in the ZBC (Zoned Block Commands) and ZAC
- (Zoned Device ATA Command Set) standards. These standards
- also define the "drive-managed" zone model. However,
- since drive-managed zoned block devices do not support
- zone commands, they will be treated as regular block
- devices and zoned will report "none".
-
-What: /sys/block/<disk>/queue/nr_zones
-Date: November 2018
-Contact: Damien Le Moal <damien.lemoal@wdc.com>
-Description:
- nr_zones indicates the total number of zones of a zoned block
- device ("host-aware" or "host-managed" zone model). For regular
- block devices, the value is always 0.
-
-What: /sys/block/<disk>/queue/max_active_zones
-Date: July 2020
-Contact: Niklas Cassel <niklas.cassel@wdc.com>
-Description:
- For zoned block devices (zoned attribute indicating
- "host-managed" or "host-aware"), the sum of zones belonging to
- any of the zone states: EXPLICIT OPEN, IMPLICIT OPEN or CLOSED,
- is limited by this value. If this value is 0, there is no limit.
-
-What: /sys/block/<disk>/queue/max_open_zones
-Date: July 2020
-Contact: Niklas Cassel <niklas.cassel@wdc.com>
-Description:
- For zoned block devices (zoned attribute indicating
- "host-managed" or "host-aware"), the sum of zones belonging to
- any of the zone states: EXPLICIT OPEN or IMPLICIT OPEN,
- is limited by this value. If this value is 0, there is no limit.
-
-What: /sys/block/<disk>/queue/chunk_sectors
-Date: September 2016
-Contact: Hannes Reinecke <hare@suse.com>
-Description:
- chunk_sectors has different meaning depending on the type
- of the disk. For a RAID device (dm-raid), chunk_sectors
- indicates the size in 512B sectors of the RAID volume
- stripe segment. For a zoned block device, either
- host-aware or host-managed, chunk_sectors indicates the
- size in 512B sectors of the zones of the device, with
- the eventual exception of the last zone of the device
- which may be smaller.
-
-What: /sys/block/<disk>/queue/io_timeout
-Date: November 2018
-Contact: Weiping Zhang <zhangweiping@didiglobal.com>
-Description:
- io_timeout is the request timeout in milliseconds. If a request
- does not complete in this time then the block driver timeout
- handler is invoked. That timeout handler can decide to retry
- the request, to fail it or to start a device recovery strategy.
diff --git a/Documentation/ABI/testing/sysfs-bus-iio-filter-admv8818 b/Documentation/ABI/testing/sysfs-bus-iio-filter-admv8818
new file mode 100644
index 000000000000..f6c035752639
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-iio-filter-admv8818
@@ -0,0 +1,16 @@
+What: /sys/bus/iio/devices/iio:deviceX/filter_mode_available
+KernelVersion:
+Contact: linux-iio@vger.kernel.org
+Description:
+ Reading this returns the valid values that can be written to the
+ on_altvoltage0_mode attribute:
+
+ - auto -> Adjust bandpass filter to track changes in input clock rate.
+ - manual -> disable/unregister the clock rate notifier / input clock tracking.
+
+What: /sys/bus/iio/devices/iio:deviceX/filter_mode
+KernelVersion:
+Contact: linux-iio@vger.kernel.org
+Description:
+ This attribute configures the filter mode.
+ Reading returns the actual mode.
diff --git a/Documentation/ABI/testing/sysfs-bus-iio-frequency-admv1013 b/Documentation/ABI/testing/sysfs-bus-iio-frequency-admv1013
new file mode 100644
index 000000000000..de1e323e5d47
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-iio-frequency-admv1013
@@ -0,0 +1,38 @@
+What: /sys/bus/iio/devices/iio:deviceX/in_altvoltage0-1_i_calibphase
+KernelVersion:
+Contact: linux-iio@vger.kernel.org
+Description:
+ Read/write unscaled value for the Local Oscillatior path quadrature I phase shift.
+
+What: /sys/bus/iio/devices/iio:deviceX/in_altvoltage0-1_q_calibphase
+KernelVersion:
+Contact: linux-iio@vger.kernel.org
+Description:
+ Read/write unscaled value for the Local Oscillatior path quadrature Q phase shift.
+
+What: /sys/bus/iio/devices/iio:deviceX/in_altvoltage0_i_calibbias
+KernelVersion:
+Contact: linux-iio@vger.kernel.org
+Description:
+ Read/write value for the Local Oscillatior Feedthrough Offset Calibration I Positive
+ side.
+
+What: /sys/bus/iio/devices/iio:deviceX/in_altvoltage0_q_calibbias
+KernelVersion:
+Contact: linux-iio@vger.kernel.org
+Description:
+ Read/write value for the Local Oscillatior Feedthrough Offset Calibration Q Positive side.
+
+What: /sys/bus/iio/devices/iio:deviceX/in_altvoltage1_i_calibbias
+KernelVersion:
+Contact: linux-iio@vger.kernel.org
+Description:
+ Read/write raw value for the Local Oscillatior Feedthrough Offset Calibration I Negative
+ side.
+
+What: /sys/bus/iio/devices/iio:deviceX/in_altvoltage1_q_calibbias
+KernelVersion:
+Contact: linux-iio@vger.kernel.org
+Description:
+ Read/write raw value for the Local Oscillatior Feedthrough Offset Calibration Q Negative
+ side.
diff --git a/Documentation/ABI/testing/sysfs-bus-usb b/Documentation/ABI/testing/sysfs-bus-usb
index 2ebe5708b4bc..7efe31ed3a25 100644
--- a/Documentation/ABI/testing/sysfs-bus-usb
+++ b/Documentation/ABI/testing/sysfs-bus-usb
@@ -244,6 +244,15 @@ Description:
is permitted, "u2" if only u2 is permitted, "u1_u2" if both u1 and
u2 are permitted.
+What: /sys/bus/usb/devices/.../<hub_interface>/port<X>/connector
+Date: December 2021
+Contact: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+Description:
+ Link to the USB Type-C connector when available. This link is
+ only created when USB Type-C Connector Class is enabled, and
+ only if the system firmware is capable of describing the
+ connection between a port and its connector.
+
What: /sys/bus/usb/devices/.../power/usb2_lpm_l1_timeout
Date: May 2013
Contact: Mathias Nyman <mathias.nyman@linux.intel.com>
diff --git a/Documentation/ABI/testing/sysfs-bus-vdpa b/Documentation/ABI/testing/sysfs-bus-vdpa
new file mode 100644
index 000000000000..28a6111202ba
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-vdpa
@@ -0,0 +1,57 @@
+What: /sys/bus/vdpa/driver_autoprobe
+Date: March 2020
+Contact: virtualization@lists.linux-foundation.org
+Description:
+ This file determines whether new devices are immediately bound
+ to a driver after the creation. It initially contains 1, which
+ means the kernel automatically binds devices to a compatible
+ driver immediately after they are created.
+
+ Writing "0" to this file disable this feature, any other string
+ enable it.
+
+What: /sys/bus/vdpa/driver_probe
+Date: March 2020
+Contact: virtualization@lists.linux-foundation.org
+Description:
+ Writing a device name to this file will cause the kernel binds
+ devices to a compatible driver.
+
+ This can be useful when /sys/bus/vdpa/driver_autoprobe is
+ disabled.
+
+What: /sys/bus/vdpa/drivers/.../bind
+Date: March 2020
+Contact: virtualization@lists.linux-foundation.org
+Description:
+ Writing a device name to this file will cause the driver to
+ attempt to bind to the device. This is useful for overriding
+ default bindings.
+
+What: /sys/bus/vdpa/drivers/.../unbind
+Date: March 2020
+Contact: virtualization@lists.linux-foundation.org
+Description:
+ Writing a device name to this file will cause the driver to
+ attempt to unbind from the device. This may be useful when
+ overriding default bindings.
+
+What: /sys/bus/vdpa/devices/.../driver_override
+Date: November 2021
+Contact: virtualization@lists.linux-foundation.org
+Description:
+ This file allows the driver for a device to be specified.
+ When specified, only a driver with a name matching the value
+ written to driver_override will have an opportunity to bind to
+ the device. The override is specified by writing a string to the
+ driver_override file (echo vhost-vdpa > driver_override) and may
+ be cleared with an empty string (echo > driver_override).
+ This returns the device to standard matching rules binding.
+ Writing to driver_override does not automatically unbind the
+ device from its current driver or make any attempt to
+ automatically load the specified driver. If no driver with a
+ matching name is currently loaded in the kernel, the device will
+ not bind to any driver. This also allows devices to opt-out of
+ driver binding using a driver_override name such as "none".
+ Only a single driver may be specified in the override, there is
+ no support for parsing delimiters.
diff --git a/Documentation/ABI/testing/sysfs-class-power b/Documentation/ABI/testing/sysfs-class-power
index fde21d900420..859501366777 100644
--- a/Documentation/ABI/testing/sysfs-class-power
+++ b/Documentation/ABI/testing/sysfs-class-power
@@ -468,6 +468,7 @@ Description:
auto: Charge normally, respect thresholds
inhibit-charge: Do not charge while AC is attached
force-discharge: Force discharge while AC is attached
+ ================ ====================================
What: /sys/class/power_supply/<supply_name>/technology
Date: May 2007
diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu
index 69c65da16dff..61f5676a7429 100644
--- a/Documentation/ABI/testing/sysfs-devices-system-cpu
+++ b/Documentation/ABI/testing/sysfs-devices-system-cpu
@@ -666,3 +666,18 @@ Description: Preferred MTE tag checking mode
================ ==============================================
See also: Documentation/arm64/memory-tagging-extension.rst
+
+What: /sys/devices/system/cpu/nohz_full
+Date: Apr 2015
+Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
+Description:
+ (RO) the list of CPUs that are in nohz_full mode.
+ These CPUs are set by boot parameter "nohz_full=".
+
+What: /sys/devices/system/cpu/isolated
+Date: Apr 2015
+Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
+Description:
+ (RO) the list of CPUs that are isolated and don't
+ participate in load balancing. These CPUs are set by
+ boot parameter "isolcpus=".
diff --git a/Documentation/ABI/testing/sysfs-driver-aspeed-uart-routing b/Documentation/ABI/testing/sysfs-driver-aspeed-uart-routing
index b363827da437..910df0e5815a 100644
--- a/Documentation/ABI/testing/sysfs-driver-aspeed-uart-routing
+++ b/Documentation/ABI/testing/sysfs-driver-aspeed-uart-routing
@@ -1,4 +1,4 @@
-What: /sys/bus/platform/drivers/aspeed-uart-routing/*/uart*
+What: /sys/bus/platform/drivers/aspeed-uart-routing/\*/uart\*
Date: September 2021
Contact: Oskar Senft <osk@google.com>
Chia-Wei Wang <chiawei_wang@aspeedtech.com>
@@ -9,7 +9,7 @@ Description: Selects the RX source of the UARTx device.
depends on the selected file.
e.g.
- cat /sys/bus/platform/drivers/aspeed-uart-routing/*.uart_routing/uart1
+ cat /sys/bus/platform/drivers/aspeed-uart-routing/\*.uart_routing/uart1
[io1] io2 io3 io4 uart2 uart3 uart4 io6
In this case, UART1 gets its input from IO1 (physical serial port 1).
@@ -17,7 +17,7 @@ Description: Selects the RX source of the UARTx device.
Users: OpenBMC. Proposed changes should be mailed to
openbmc@lists.ozlabs.org
-What: /sys/bus/platform/drivers/aspeed-uart-routing/*/io*
+What: /sys/bus/platform/drivers/aspeed-uart-routing/\*/io\*
Date: September 2021
Contact: Oskar Senft <osk@google.com>
Chia-Wei Wang <chiawei_wang@aspeedtech.com>
diff --git a/Documentation/ABI/testing/sysfs-fs-f2fs b/Documentation/ABI/testing/sysfs-fs-f2fs
index b268e3e18b4a..2416b03ff283 100644
--- a/Documentation/ABI/testing/sysfs-fs-f2fs
+++ b/Documentation/ABI/testing/sysfs-fs-f2fs
@@ -112,6 +112,11 @@ Contact: "Jaegeuk Kim" <jaegeuk@kernel.org>
Description: Set timeout to issue discard commands during umount.
Default: 5 secs
+What: /sys/fs/f2fs/<disk>/pending_discard
+Date: November 2021
+Contact: "Jaegeuk Kim" <jaegeuk@kernel.org>
+Description: Shows the number of pending discard commands in the queue.
+
What: /sys/fs/f2fs/<disk>/max_victim_search
Date: January 2014
Contact: "Jaegeuk Kim" <jaegeuk.kim@samsung.com>
@@ -528,3 +533,10 @@ Description: With "mode=fragment:block" mount options, we can scatter block allo
f2fs will allocate 1..<max_fragment_chunk> blocks in a chunk and make a hole
in the length of 1..<max_fragment_hole> by turns. This value can be set
between 1..512 and the default value is 4.
+
+What: /sys/fs/f2fs/<disk>/gc_urgent_high_remaining
+Date: December 2021
+Contact: "Daeho Jeong" <daehojeong@google.com>
+Description: You can set the trial count limit for GC urgent high mode with this value.
+ If GC thread gets to the limit, the mode will turn back to GC normal mode.
+ By default, the value is zero, which means there is no limit like before.
diff --git a/Documentation/accounting/delay-accounting.rst b/Documentation/accounting/delay-accounting.rst
index 1b8b46deeb29..197fe319cbec 100644
--- a/Documentation/accounting/delay-accounting.rst
+++ b/Documentation/accounting/delay-accounting.rst
@@ -13,6 +13,8 @@ a) waiting for a CPU (while being runnable)
b) completion of synchronous block I/O initiated by the task
c) swapping in pages
d) memory reclaim
+e) thrashing page cache
+f) direct compact
and makes these statistics available to userspace through
the taskstats interface.
@@ -41,11 +43,12 @@ generic data structure to userspace corresponding to per-pid and per-tgid
statistics. The delay accounting functionality populates specific fields of
this structure. See
- include/linux/taskstats.h
+ include/uapi/linux/taskstats.h
for a description of the fields pertaining to delay accounting.
It will generally be in the form of counters returning the cumulative
-delay seen for cpu, sync block I/O, swapin, memory reclaim etc.
+delay seen for cpu, sync block I/O, swapin, memory reclaim, thrash page
+cache, direct compact etc.
Taking the difference of two successive readings of a given
counter (say cpu_delay_total) for a task will give the delay
@@ -88,41 +91,37 @@ seen.
General format of the getdelays command::
- getdelays [-t tgid] [-p pid] [-c cmd...]
-
+ getdelays [-dilv] [-t tgid] [-p pid]
Get delays, since system boot, for pid 10::
- # ./getdelays -p 10
+ # ./getdelays -d -p 10
(output similar to next case)
Get sum of delays, since system boot, for all pids with tgid 5::
- # ./getdelays -t 5
-
-
- CPU count real total virtual total delay total
- 7876 92005750 100000000 24001500
- IO count delay total
- 0 0
- SWAP count delay total
- 0 0
- RECLAIM count delay total
- 0 0
+ # ./getdelays -d -t 5
+ print delayacct stats ON
+ TGID 5
-Get delays seen in executing a given simple command::
- # ./getdelays -c ls /
+ CPU count real total virtual total delay total delay average
+ 8 7000000 6872122 3382277 0.423ms
+ IO count delay total delay average
+ 0 0 0ms
+ SWAP count delay total delay average
+ 0 0 0ms
+ RECLAIM count delay total delay average
+ 0 0 0ms
+ THRASHING count delay total delay average
+ 0 0 0ms
+ COMPACT count delay total delay average
+ 0 0 0ms
- bin data1 data3 data5 dev home media opt root srv sys usr
- boot data2 data4 data6 etc lib mnt proc sbin subdomain tmp var
+Get IO accounting for pid 1, it works only with -p::
+ # ./getdelays -i -p 1
+ printing IO accounting
+ linuxrc: read=65536, write=0, cancelled_write=0
- CPU count real total virtual total delay total
- 6 4000250 4000000 0
- IO count delay total
- 0 0
- SWAP count delay total
- 0 0
- RECLAIM count delay total
- 0 0
+The above command can be used with -v to get more debug information.
diff --git a/Documentation/accounting/psi.rst b/Documentation/accounting/psi.rst
index f2b3439edcc2..860fe651d645 100644
--- a/Documentation/accounting/psi.rst
+++ b/Documentation/accounting/psi.rst
@@ -92,7 +92,8 @@ Triggers can be set on more than one psi metric and more than one trigger
for the same psi metric can be specified. However for each trigger a separate
file descriptor is required to be able to poll it separately from others,
therefore for each trigger a separate open() syscall should be made even
-when opening the same psi interface file.
+when opening the same psi interface file. Write operations to a file descriptor
+with an already existing psi trigger will fail with EBUSY.
Monitors activate only when system enters stall state for the monitored
psi metric and deactivates upon exit from the stall state. While system is
diff --git a/Documentation/admin-guide/cgroup-v1/hugetlb.rst b/Documentation/admin-guide/cgroup-v1/hugetlb.rst
index 338f2c7d7a1c..0fa724d82abb 100644
--- a/Documentation/admin-guide/cgroup-v1/hugetlb.rst
+++ b/Documentation/admin-guide/cgroup-v1/hugetlb.rst
@@ -29,12 +29,14 @@ Brief summary of control files::
hugetlb.<hugepagesize>.max_usage_in_bytes # show max "hugepagesize" hugetlb usage recorded
hugetlb.<hugepagesize>.usage_in_bytes # show current usage for "hugepagesize" hugetlb
hugetlb.<hugepagesize>.failcnt # show the number of allocation failure due to HugeTLB usage limit
+ hugetlb.<hugepagesize>.numa_stat # show the numa information of the hugetlb memory charged to this cgroup
For a system supporting three hugepage sizes (64k, 32M and 1G), the control
files include::
hugetlb.1GB.limit_in_bytes
hugetlb.1GB.max_usage_in_bytes
+ hugetlb.1GB.numa_stat
hugetlb.1GB.usage_in_bytes
hugetlb.1GB.failcnt
hugetlb.1GB.rsvd.limit_in_bytes
@@ -43,6 +45,7 @@ files include::
hugetlb.1GB.rsvd.failcnt
hugetlb.64KB.limit_in_bytes
hugetlb.64KB.max_usage_in_bytes
+ hugetlb.64KB.numa_stat
hugetlb.64KB.usage_in_bytes
hugetlb.64KB.failcnt
hugetlb.64KB.rsvd.limit_in_bytes
@@ -51,6 +54,7 @@ files include::
hugetlb.64KB.rsvd.failcnt
hugetlb.32MB.limit_in_bytes
hugetlb.32MB.max_usage_in_bytes
+ hugetlb.32MB.numa_stat
hugetlb.32MB.usage_in_bytes
hugetlb.32MB.failcnt
hugetlb.32MB.rsvd.limit_in_bytes
diff --git a/Documentation/admin-guide/cgroup-v2.rst b/Documentation/admin-guide/cgroup-v2.rst
index 2aeb7ae8b393..5aa368d165da 100644
--- a/Documentation/admin-guide/cgroup-v2.rst
+++ b/Documentation/admin-guide/cgroup-v2.rst
@@ -1268,6 +1268,9 @@ PAGE_SIZE multiple when read back.
The number of processes belonging to this cgroup
killed by any kind of OOM killer.
+ oom_group_kill
+ The number of times a group OOM has occurred.
+
memory.events.local
Similar to memory.events but the fields in the file are local
to the cgroup i.e. not hierarchical. The file modified event
@@ -1311,6 +1314,9 @@ PAGE_SIZE multiple when read back.
sock (npn)
Amount of memory used in network transmission buffers
+ vmalloc (npn)
+ Amount of memory used for vmap backed memory.
+
shmem
Amount of cached filesystem data that is swap-backed,
such as tmpfs, shm segments, shared anonymous mmap()s
@@ -2260,6 +2266,11 @@ HugeTLB Interface Files
are local to the cgroup i.e. not hierarchical. The file modified event
generated on this file reflects only the local events.
+ hugetlb.<hugepagesize>.numa_stat
+ Similar to memory.numa_stat, it shows the numa information of the
+ hugetlb pages of <hugepagesize> in this cgroup. Only active in
+ use hugetlb pages are included. The per-node values are in bytes.
+
Misc
----
diff --git a/Documentation/admin-guide/cputopology.rst b/Documentation/admin-guide/cputopology.rst
index 6b62e182baf4..d29cacc9b3c3 100644
--- a/Documentation/admin-guide/cputopology.rst
+++ b/Documentation/admin-guide/cputopology.rst
@@ -8,11 +8,9 @@ to /proc/cpuinfo output of some architectures. They reside in
Documentation/ABI/stable/sysfs-devices-system-cpu.
Architecture-neutral, drivers/base/topology.c, exports these attributes.
-However, the book and drawer related sysfs files will only be created if
-CONFIG_SCHED_BOOK and CONFIG_SCHED_DRAWER are selected, respectively.
-
-CONFIG_SCHED_BOOK and CONFIG_SCHED_DRAWER are currently only used on s390,
-where they reflect the cpu and cache hierarchy.
+However the die, cluster, book, and drawer hierarchy related sysfs files will
+only be created if an architecture provides the related macros as described
+below.
For an architecture to support this feature, it must define some of
these macros in include/asm-XXX/topology.h::
@@ -43,15 +41,14 @@ not defined by include/asm-XXX/topology.h:
2) topology_die_id: -1
3) topology_cluster_id: -1
4) topology_core_id: 0
-5) topology_sibling_cpumask: just the given CPU
-6) topology_core_cpumask: just the given CPU
-7) topology_cluster_cpumask: just the given CPU
-8) topology_die_cpumask: just the given CPU
-
-For architectures that don't support books (CONFIG_SCHED_BOOK) there are no
-default definitions for topology_book_id() and topology_book_cpumask().
-For architectures that don't support drawers (CONFIG_SCHED_DRAWER) there are
-no default definitions for topology_drawer_id() and topology_drawer_cpumask().
+5) topology_book_id: -1
+6) topology_drawer_id: -1
+7) topology_sibling_cpumask: just the given CPU
+8) topology_core_cpumask: just the given CPU
+9) topology_cluster_cpumask: just the given CPU
+10) topology_die_cpumask: just the given CPU
+11) topology_book_cpumask: just the given CPU
+12) topology_drawer_cpumask: just the given CPU
Additionally, CPU topology information is provided under
/sys/devices/system/cpu and includes these files. The internal
diff --git a/Documentation/admin-guide/gpio/index.rst b/Documentation/admin-guide/gpio/index.rst
index 7db367572f30..f6861ca16ffe 100644
--- a/Documentation/admin-guide/gpio/index.rst
+++ b/Documentation/admin-guide/gpio/index.rst
@@ -10,6 +10,7 @@ gpio
gpio-aggregator
sysfs
gpio-mockup
+ gpio-sim
.. only:: subproject and html
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 685645e75706..f5a27f067db9 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -612,8 +612,8 @@
clocksource.max_cswd_read_retries= [KNL]
Number of clocksource_watchdog() retries due to
external delays before the clock will be marked
- unstable. Defaults to three retries, that is,
- four attempts to read the clock under test.
+ unstable. Defaults to two retries, that is,
+ three attempts to read the clock under test.
clocksource.verify_n_cpus= [KNL]
Limit the number of CPUs checked for clocksources
@@ -3393,7 +3393,7 @@
Disable SMAP (Supervisor Mode Access Prevention)
even if it is supported by processor.
- nosmep [X86,PPC]
+ nosmep [X86,PPC64s]
Disable SMEP (Supervisor Mode Execution Prevention)
even if it is supported by processor.
@@ -4166,6 +4166,14 @@
Override pmtimer IOPort with a hex value.
e.g. pmtmr=0x508
+ pmu_override= [PPC] Override the PMU.
+ This option takes over the PMU facility, so it is no
+ longer usable by perf. Setting this option starts the
+ PMU counters by setting MMCR0 to 0 (the FC bit is
+ cleared). If a number is given, then MMCR1 is set to
+ that number, otherwise (e.g., 'pmu_override=on'), MMCR1
+ remains 0.
+
pm_debug_messages [SUSPEND,KNL]
Enable suspend/resume debug messages during boot up.
@@ -6494,6 +6502,12 @@
controller on both pseries and powernv
platforms. Only useful on POWER9 and above.
+ xive.store-eoi=off [PPC]
+ By default on POWER10 and above, the kernel will use
+ stores for EOI handling when the XIVE interrupt mode
+ is active. This option allows the XIVE driver to use
+ loads instead, as on POWER9.
+
xhci-hcd.quirks [USB,KNL]
A hex value specifying bitmask with supplemental xhci
host controller quirks. Meaning of each bit can be
diff --git a/Documentation/admin-guide/mm/damon/reclaim.rst b/Documentation/admin-guide/mm/damon/reclaim.rst
index fb9def3a7355..0af51a9705b1 100644
--- a/Documentation/admin-guide/mm/damon/reclaim.rst
+++ b/Documentation/admin-guide/mm/damon/reclaim.rst
@@ -208,6 +208,31 @@ PID of the DAMON thread.
If DAMON_RECLAIM is enabled, this becomes the PID of the worker thread. Else,
-1.
+nr_reclaim_tried_regions
+------------------------
+
+Number of memory regions that tried to be reclaimed by DAMON_RECLAIM.
+
+bytes_reclaim_tried_regions
+---------------------------
+
+Total bytes of memory regions that tried to be reclaimed by DAMON_RECLAIM.
+
+nr_reclaimed_regions
+--------------------
+
+Number of memory regions that successfully be reclaimed by DAMON_RECLAIM.
+
+bytes_reclaimed_regions
+-----------------------
+
+Total bytes of memory regions that successfully be reclaimed by DAMON_RECLAIM.
+
+nr_quota_exceeds
+----------------
+
+Number of times that the time/space quota limits have exceeded.
+
Example
=======
diff --git a/Documentation/admin-guide/mm/damon/usage.rst b/Documentation/admin-guide/mm/damon/usage.rst
index ed96bbf0daff..59b84904a854 100644
--- a/Documentation/admin-guide/mm/damon/usage.rst
+++ b/Documentation/admin-guide/mm/damon/usage.rst
@@ -7,37 +7,40 @@ Detailed Usages
DAMON provides below three interfaces for different users.
- *DAMON user space tool.*
- This is for privileged people such as system administrators who want a
- just-working human-friendly interface. Using this, users can use the DAMON’s
- major features in a human-friendly way. It may not be highly tuned for
- special cases, though. It supports both virtual and physical address spaces
- monitoring.
+ `This <https://github.com/awslabs/damo>`_ is for privileged people such as
+ system administrators who want a just-working human-friendly interface.
+ Using this, users can use the DAMON’s major features in a human-friendly way.
+ It may not be highly tuned for special cases, though. It supports both
+ virtual and physical address spaces monitoring. For more detail, please
+ refer to its `usage document
+ <https://github.com/awslabs/damo/blob/next/USAGE.md>`_.
- *debugfs interface.*
- This is for privileged user space programmers who want more optimized use of
- DAMON. Using this, users can use DAMON’s major features by reading
- from and writing to special debugfs files. Therefore, you can write and use
- your personalized DAMON debugfs wrapper programs that reads/writes the
- debugfs files instead of you. The DAMON user space tool is also a reference
- implementation of such programs. It supports both virtual and physical
- address spaces monitoring.
+ :ref:`This <debugfs_interface>` is for privileged user space programmers who
+ want more optimized use of DAMON. Using this, users can use DAMON’s major
+ features by reading from and writing to special debugfs files. Therefore,
+ you can write and use your personalized DAMON debugfs wrapper programs that
+ reads/writes the debugfs files instead of you. The `DAMON user space tool
+ <https://github.com/awslabs/damo>`_ is one example of such programs. It
+ supports both virtual and physical address spaces monitoring. Note that this
+ interface provides only simple :ref:`statistics <damos_stats>` for the
+ monitoring results. For detailed monitoring results, DAMON provides a
+ :ref:`tracepoint <tracepoint>`.
- *Kernel Space Programming Interface.*
- This is for kernel space programmers. Using this, users can utilize every
- feature of DAMON most flexibly and efficiently by writing kernel space
- DAMON application programs for you. You can even extend DAMON for various
- address spaces.
+ :doc:`This </vm/damon/api>` is for kernel space programmers. Using this,
+ users can utilize every feature of DAMON most flexibly and efficiently by
+ writing kernel space DAMON application programs for you. You can even extend
+ DAMON for various address spaces. For detail, please refer to the interface
+ :doc:`document </vm/damon/api>`.
-Nevertheless, you could write your own user space tool using the debugfs
-interface. A reference implementation is available at
-https://github.com/awslabs/damo. If you are a kernel programmer, you could
-refer to :doc:`/vm/damon/api` for the kernel space programming interface. For
-the reason, this document describes only the debugfs interface
+
+.. _debugfs_interface:
debugfs Interface
=================
-DAMON exports five files, ``attrs``, ``target_ids``, ``init_regions``,
-``schemes`` and ``monitor_on`` under its debugfs directory,
-``<debugfs>/damon/``.
+DAMON exports eight files, ``attrs``, ``target_ids``, ``init_regions``,
+``schemes``, ``monitor_on``, ``kdamond_pid``, ``mk_contexts`` and
+``rm_contexts`` under its debugfs directory, ``<debugfs>/damon/``.
Attributes
@@ -131,24 +134,38 @@ Schemes
For usual DAMON-based data access aware memory management optimizations, users
would simply want the system to apply a memory management action to a memory
-region of a specific size having a specific access frequency for a specific
-time. DAMON receives such formalized operation schemes from the user and
-applies those to the target processes. It also counts the total number and
-size of regions that each scheme is applied. This statistics can be used for
-online analysis or tuning of the schemes.
+region of a specific access pattern. DAMON receives such formalized operation
+schemes from the user and applies those to the target processes.
Users can get and set the schemes by reading from and writing to ``schemes``
debugfs file. Reading the file also shows the statistics of each scheme. To
-the file, each of the schemes should be represented in each line in below form:
+the file, each of the schemes should be represented in each line in below
+form::
+
+ <target access pattern> <action> <quota> <watermarks>
+
+You can disable schemes by simply writing an empty string to the file.
+
+Target Access Pattern
+~~~~~~~~~~~~~~~~~~~~~
+
+The ``<target access pattern>`` is constructed with three ranges in below
+form::
+
+ min-size max-size min-acc max-acc min-age max-age
- min-size max-size min-acc max-acc min-age max-age action
+Specifically, bytes for the size of regions (``min-size`` and ``max-size``),
+number of monitored accesses per aggregate interval for access frequency
+(``min-acc`` and ``max-acc``), number of aggregate intervals for the age of
+regions (``min-age`` and ``max-age``) are specified. Note that the ranges are
+closed interval.
-Note that the ranges are closed interval. Bytes for the size of regions
-(``min-size`` and ``max-size``), number of monitored accesses per aggregate
-interval for access frequency (``min-acc`` and ``max-acc``), number of
-aggregate intervals for the age of regions (``min-age`` and ``max-age``), and a
-predefined integer for memory management actions should be used. The supported
-numbers and their meanings are as below.
+Action
+~~~~~~
+
+The ``<action>`` is a predefined integer for memory management actions, which
+DAMON will apply to the regions having the target access pattern. The
+supported numbers and their meanings are as below.
- 0: Call ``madvise()`` for the region with ``MADV_WILLNEED``
- 1: Call ``madvise()`` for the region with ``MADV_COLD``
@@ -157,20 +174,82 @@ numbers and their meanings are as below.
- 4: Call ``madvise()`` for the region with ``MADV_NOHUGEPAGE``
- 5: Do nothing but count the statistics
-You can disable schemes by simply writing an empty string to the file. For
-example, below commands applies a scheme saying "If a memory region of size in
-[4KiB, 8KiB] is showing accesses per aggregate interval in [0, 5] for aggregate
-interval in [10, 20], page out the region", check the entered scheme again, and
-finally remove the scheme. ::
+Quota
+~~~~~
- # cd <debugfs>/damon
- # echo "4096 8192 0 5 10 20 2" > schemes
- # cat schemes
- 4096 8192 0 5 10 20 2 0 0
- # echo > schemes
+Optimal ``target access pattern`` for each ``action`` is workload dependent, so
+not easy to find. Worse yet, setting a scheme of some action too aggressive
+can cause severe overhead. To avoid such overhead, users can limit time and
+size quota for the scheme via the ``<quota>`` in below form::
+
+ <ms> <sz> <reset interval> <priority weights>
+
+This makes DAMON to try to use only up to ``<ms>`` milliseconds for applying
+the action to memory regions of the ``target access pattern`` within the
+``<reset interval>`` milliseconds, and to apply the action to only up to
+``<sz>`` bytes of memory regions within the ``<reset interval>``. Setting both
+``<ms>`` and ``<sz>`` zero disables the quota limits.
+
+When the quota limit is expected to be exceeded, DAMON prioritizes found memory
+regions of the ``target access pattern`` based on their size, access frequency,
+and age. For personalized prioritization, users can set the weights for the
+three properties in ``<priority weights>`` in below form::
+
+ <size weight> <access frequency weight> <age weight>
+
+Watermarks
+~~~~~~~~~~
+
+Some schemes would need to run based on current value of the system's specific
+metrics like free memory ratio. For such cases, users can specify watermarks
+for the condition.::
+
+ <metric> <check interval> <high mark> <middle mark> <low mark>
+
+``<metric>`` is a predefined integer for the metric to be checked. The
+supported numbers and their meanings are as below.
+
+ - 0: Ignore the watermarks
+ - 1: System's free memory rate (per thousand)
+
+The value of the metric is checked every ``<check interval>`` microseconds.
+
+If the value is higher than ``<high mark>`` or lower than ``<low mark>``, the
+scheme is deactivated. If the value is lower than ``<mid mark>``, the scheme
+is activated.
+
+.. _damos_stats:
+
+Statistics
+~~~~~~~~~~
+
+It also counts the total number and bytes of regions that each scheme is tried
+to be applied, the two numbers for the regions that each scheme is successfully
+applied, and the total number of the quota limit exceeds. This statistics can
+be used for online analysis or tuning of the schemes.
+
+The statistics can be shown by reading the ``schemes`` file. Reading the file
+will show each scheme you entered in each line, and the five numbers for the
+statistics will be added at the end of each line.
-The last two integers in the 4th line of above example is the total number and
-the total size of the regions that the scheme is applied.
+Example
+~~~~~~~
+
+Below commands applies a scheme saying "If a memory region of size in [4KiB,
+8KiB] is showing accesses per aggregate interval in [0, 5] for aggregate
+interval in [10, 20], page out the region. For the paging out, use only up to
+10ms per second, and also don't page out more than 1GiB per second. Under the
+limitation, page out memory regions having longer age first. Also, check the
+free memory rate of the system every 5 seconds, start the monitoring and paging
+out when the free memory rate becomes lower than 50%, but stop it if the free
+memory rate becomes larger than 60%, or lower than 30%".::
+
+ # cd <debugfs>/damon
+ # scheme="4096 8192 0 5 10 20 2" # target access pattern and action
+ # scheme+=" 10 $((1024*1024*1024)) 1000" # quotas
+ # scheme+=" 0 0 100" # prioritization weights
+ # scheme+=" 1 5000000 600 500 300" # watermarks
+ # echo "$scheme" > schemes
Turning On/Off
@@ -195,6 +274,54 @@ the monitoring is turned on. If you write to the files while DAMON is running,
an error code such as ``-EBUSY`` will be returned.
+Monitoring Thread PID
+---------------------
+
+DAMON does requested monitoring with a kernel thread called ``kdamond``. You
+can get the pid of the thread by reading the ``kdamond_pid`` file. When the
+monitoring is turned off, reading the file returns ``none``. ::
+
+ # cd <debugfs>/damon
+ # cat monitor_on
+ off
+ # cat kdamond_pid
+ none
+ # echo on > monitor_on
+ # cat kdamond_pid
+ 18594
+
+
+Using Multiple Monitoring Threads
+---------------------------------
+
+One ``kdamond`` thread is created for each monitoring context. You can create
+and remove monitoring contexts for multiple ``kdamond`` required use case using
+the ``mk_contexts`` and ``rm_contexts`` files.
+
+Writing the name of the new context to the ``mk_contexts`` file creates a
+directory of the name on the DAMON debugfs directory. The directory will have
+DAMON debugfs files for the context. ::
+
+ # cd <debugfs>/damon
+ # ls foo
+ # ls: cannot access 'foo': No such file or directory
+ # echo foo > mk_contexts
+ # ls foo
+ # attrs init_regions kdamond_pid schemes target_ids
+
+If the context is not needed anymore, you can remove it and the corresponding
+directory by putting the name of the context to the ``rm_contexts`` file. ::
+
+ # echo foo > rm_contexts
+ # ls foo
+ # ls: cannot access 'foo': No such file or directory
+
+Note that ``mk_contexts``, ``rm_contexts``, and ``monitor_on`` files are in the
+root directory only.
+
+
+.. _tracepoint:
+
Tracepoint for Monitoring Results
=================================
diff --git a/Documentation/admin-guide/mm/numa_memory_policy.rst b/Documentation/admin-guide/mm/numa_memory_policy.rst
index 64fd0ba0d057..5a6afecbb0d0 100644
--- a/Documentation/admin-guide/mm/numa_memory_policy.rst
+++ b/Documentation/admin-guide/mm/numa_memory_policy.rst
@@ -408,7 +408,7 @@ follows:
Memory Policy APIs
==================
-Linux supports 3 system calls for controlling memory policy. These APIS
+Linux supports 4 system calls for controlling memory policy. These APIS
always affect only the calling task, the calling task's address space, or
some shared object mapped into the calling task's address space.
@@ -460,6 +460,20 @@ requested via the 'flags' argument.
See the mbind(2) man page for more details.
+Set home node for a Range of Task's Address Spacec::
+
+ long sys_set_mempolicy_home_node(unsigned long start, unsigned long len,
+ unsigned long home_node,
+ unsigned long flags);
+
+sys_set_mempolicy_home_node set the home node for a VMA policy present in the
+task's address range. The system call updates the home node only for the existing
+mempolicy range. Other address ranges are ignored. A home node is the NUMA node
+closest to which page allocation will come from. Specifying the home node override
+the default allocation policy to allocate memory close to the local node for an
+executing CPU.
+
+
Memory Policy Command Line Interface
====================================
diff --git a/Documentation/admin-guide/sysctl/vm.rst b/Documentation/admin-guide/sysctl/vm.rst
index 5e795202111f..f4804ce37c58 100644
--- a/Documentation/admin-guide/sysctl/vm.rst
+++ b/Documentation/admin-guide/sysctl/vm.rst
@@ -948,7 +948,7 @@ how much memory needs to be free before kswapd goes back to sleep.
The unit is in fractions of 10,000. The default value of 10 means the
distances between watermarks are 0.1% of the available memory in the
-node/system. The maximum value is 1000, or 10% of memory.
+node/system. The maximum value is 3000, or 30% of memory.
A high rate of threads entering direct reclaim (allocstall) or kswapd
going to sleep prematurely (kswapd_low_wmark_hit_quickly) can indicate
diff --git a/Documentation/arm/marvell.rst b/Documentation/arm/marvell.rst
index 9485a5a2e2e9..2f41caa0096c 100644
--- a/Documentation/arm/marvell.rst
+++ b/Documentation/arm/marvell.rst
@@ -266,10 +266,12 @@ Avanta family
-------------
Flavors:
+ - 88F6500
- 88F6510
- 88F6530P
- 88F6550
- 88F6560
+ - 88F6601
Homepage:
https://web.archive.org/web/20181005145041/http://www.marvell.com/broadband/
diff --git a/Documentation/arm64/silicon-errata.rst b/Documentation/arm64/silicon-errata.rst
index 5342e895fb60..ea281dd75517 100644
--- a/Documentation/arm64/silicon-errata.rst
+++ b/Documentation/arm64/silicon-errata.rst
@@ -52,6 +52,12 @@ stable kernels.
| Allwinner | A64/R18 | UNKNOWN1 | SUN50I_ERRATUM_UNKNOWN1 |
+----------------+-----------------+-----------------+-----------------------------+
+----------------+-----------------+-----------------+-----------------------------+
+| ARM | Cortex-A510 | #2064142 | ARM64_ERRATUM_2064142 |
++----------------+-----------------+-----------------+-----------------------------+
+| ARM | Cortex-A510 | #2038923 | ARM64_ERRATUM_2038923 |
++----------------+-----------------+-----------------+-----------------------------+
+| ARM | Cortex-A510 | #1902691 | ARM64_ERRATUM_1902691 |
++----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A53 | #826319 | ARM64_ERRATUM_826319 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A53 | #827319 | ARM64_ERRATUM_827319 |
@@ -92,12 +98,20 @@ stable kernels.
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A77 | #1508412 | ARM64_ERRATUM_1508412 |
+----------------+-----------------+-----------------+-----------------------------+
+| ARM | Cortex-A510 | #2051678 | ARM64_ERRATUM_2051678 |
++----------------+-----------------+-----------------+-----------------------------+
+| ARM | Cortex-A510 | #2077057 | ARM64_ERRATUM_2077057 |
++----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A710 | #2119858 | ARM64_ERRATUM_2119858 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A710 | #2054223 | ARM64_ERRATUM_2054223 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A710 | #2224489 | ARM64_ERRATUM_2224489 |
+----------------+-----------------+-----------------+-----------------------------+
+| ARM | Cortex-X2 | #2119858 | ARM64_ERRATUM_2119858 |
++----------------+-----------------+-----------------+-----------------------------+
+| ARM | Cortex-X2 | #2224489 | ARM64_ERRATUM_2224489 |
++----------------+-----------------+-----------------+-----------------------------+
| ARM | Neoverse-N1 | #1188873,1418040| ARM64_ERRATUM_1418040 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Neoverse-N1 | #1349291 | N/A |
diff --git a/Documentation/block/index.rst b/Documentation/block/index.rst
index 86dcf7159f99..3a41495dd77b 100644
--- a/Documentation/block/index.rst
+++ b/Documentation/block/index.rst
@@ -20,7 +20,6 @@ Block
kyber-iosched
null_blk
pr
- queue-sysfs
request
stat
switching-sched
diff --git a/Documentation/block/queue-sysfs.rst b/Documentation/block/queue-sysfs.rst
deleted file mode 100644
index 3f569d532485..000000000000
--- a/Documentation/block/queue-sysfs.rst
+++ /dev/null
@@ -1,321 +0,0 @@
-=================
-Queue sysfs files
-=================
-
-This text file will detail the queue files that are located in the sysfs tree
-for each block device. Note that stacked devices typically do not export
-any settings, since their queue merely functions as a remapping target.
-These files are the ones found in the /sys/block/xxx/queue/ directory.
-
-Files denoted with a RO postfix are readonly and the RW postfix means
-read-write.
-
-add_random (RW)
----------------
-This file allows to turn off the disk entropy contribution. Default
-value of this file is '1'(on).
-
-chunk_sectors (RO)
-------------------
-This has different meaning depending on the type of the block device.
-For a RAID device (dm-raid), chunk_sectors indicates the size in 512B sectors
-of the RAID volume stripe segment. For a zoned block device, either host-aware
-or host-managed, chunk_sectors indicates the size in 512B sectors of the zones
-of the device, with the eventual exception of the last zone of the device which
-may be smaller.
-
-dax (RO)
---------
-This file indicates whether the device supports Direct Access (DAX),
-used by CPU-addressable storage to bypass the pagecache. It shows '1'
-if true, '0' if not.
-
-discard_granularity (RO)
-------------------------
-This shows the size of internal allocation of the device in bytes, if
-reported by the device. A value of '0' means device does not support
-the discard functionality.
-
-discard_max_hw_bytes (RO)
--------------------------
-Devices that support discard functionality may have internal limits on
-the number of bytes that can be trimmed or unmapped in a single operation.
-The `discard_max_hw_bytes` parameter is set by the device driver to the
-maximum number of bytes that can be discarded in a single operation.
-Discard requests issued to the device must not exceed this limit.
-A `discard_max_hw_bytes` value of 0 means that the device does not support
-discard functionality.
-
-discard_max_bytes (RW)
-----------------------
-While discard_max_hw_bytes is the hardware limit for the device, this
-setting is the software limit. Some devices exhibit large latencies when
-large discards are issued, setting this value lower will make Linux issue
-smaller discards and potentially help reduce latencies induced by large
-discard operations.
-
-discard_zeroes_data (RO)
-------------------------
-Obsolete. Always zero.
-
-fua (RO)
---------
-Whether or not the block driver supports the FUA flag for write requests.
-FUA stands for Force Unit Access. If the FUA flag is set that means that
-write requests must bypass the volatile cache of the storage device.
-
-hw_sector_size (RO)
--------------------
-This is the hardware sector size of the device, in bytes.
-
-io_poll (RW)
-------------
-When read, this file shows whether polling is enabled (1) or disabled
-(0). Writing '0' to this file will disable polling for this device.
-Writing any non-zero value will enable this feature.
-
-io_poll_delay (RW)
-------------------
-If polling is enabled, this controls what kind of polling will be
-performed. It defaults to -1, which is classic polling. In this mode,
-the CPU will repeatedly ask for completions without giving up any time.
-If set to 0, a hybrid polling mode is used, where the kernel will attempt
-to make an educated guess at when the IO will complete. Based on this
-guess, the kernel will put the process issuing IO to sleep for an amount
-of time, before entering a classic poll loop. This mode might be a
-little slower than pure classic polling, but it will be more efficient.
-If set to a value larger than 0, the kernel will put the process issuing
-IO to sleep for this amount of microseconds before entering classic
-polling.
-
-io_timeout (RW)
----------------
-io_timeout is the request timeout in milliseconds. If a request does not
-complete in this time then the block driver timeout handler is invoked.
-That timeout handler can decide to retry the request, to fail it or to start
-a device recovery strategy.
-
-iostats (RW)
--------------
-This file is used to control (on/off) the iostats accounting of the
-disk.
-
-logical_block_size (RO)
------------------------
-This is the logical block size of the device, in bytes.
-
-max_discard_segments (RO)
--------------------------
-The maximum number of DMA scatter/gather entries in a discard request.
-
-max_hw_sectors_kb (RO)
-----------------------
-This is the maximum number of kilobytes supported in a single data transfer.
-
-max_integrity_segments (RO)
----------------------------
-Maximum number of elements in a DMA scatter/gather list with integrity
-data that will be submitted by the block layer core to the associated
-block driver.
-
-max_active_zones (RO)
----------------------
-For zoned block devices (zoned attribute indicating "host-managed" or
-"host-aware"), the sum of zones belonging to any of the zone states:
-EXPLICIT OPEN, IMPLICIT OPEN or CLOSED, is limited by this value.
-If this value is 0, there is no limit.
-
-If the host attempts to exceed this limit, the driver should report this error
-with BLK_STS_ZONE_ACTIVE_RESOURCE, which user space may see as the EOVERFLOW
-errno.
-
-max_open_zones (RO)
--------------------
-For zoned block devices (zoned attribute indicating "host-managed" or
-"host-aware"), the sum of zones belonging to any of the zone states:
-EXPLICIT OPEN or IMPLICIT OPEN, is limited by this value.
-If this value is 0, there is no limit.
-
-If the host attempts to exceed this limit, the driver should report this error
-with BLK_STS_ZONE_OPEN_RESOURCE, which user space may see as the ETOOMANYREFS
-errno.
-
-max_sectors_kb (RW)
--------------------
-This is the maximum number of kilobytes that the block layer will allow
-for a filesystem request. Must be smaller than or equal to the maximum
-size allowed by the hardware.
-
-max_segments (RO)
------------------
-Maximum number of elements in a DMA scatter/gather list that is submitted
-to the associated block driver.
-
-max_segment_size (RO)
----------------------
-Maximum size in bytes of a single element in a DMA scatter/gather list.
-
-minimum_io_size (RO)
---------------------
-This is the smallest preferred IO size reported by the device.
-
-nomerges (RW)
--------------
-This enables the user to disable the lookup logic involved with IO
-merging requests in the block layer. By default (0) all merges are
-enabled. When set to 1 only simple one-hit merges will be tried. When
-set to 2 no merge algorithms will be tried (including one-hit or more
-complex tree/hash lookups).
-
-nr_requests (RW)
-----------------
-This controls how many requests may be allocated in the block layer for
-read or write requests. Note that the total allocated number may be twice
-this amount, since it applies only to reads or writes (not the accumulated
-sum).
-
-To avoid priority inversion through request starvation, a request
-queue maintains a separate request pool per each cgroup when
-CONFIG_BLK_CGROUP is enabled, and this parameter applies to each such
-per-block-cgroup request pool. IOW, if there are N block cgroups,
-each request queue may have up to N request pools, each independently
-regulated by nr_requests.
-
-nr_zones (RO)
--------------
-For zoned block devices (zoned attribute indicating "host-managed" or
-"host-aware"), this indicates the total number of zones of the device.
-This is always 0 for regular block devices.
-
-optimal_io_size (RO)
---------------------
-This is the optimal IO size reported by the device.
-
-physical_block_size (RO)
-------------------------
-This is the physical block size of device, in bytes.
-
-read_ahead_kb (RW)
-------------------
-Maximum number of kilobytes to read-ahead for filesystems on this block
-device.
-
-rotational (RW)
----------------
-This file is used to stat if the device is of rotational type or
-non-rotational type.
-
-rq_affinity (RW)
-----------------
-If this option is '1', the block layer will migrate request completions to the
-cpu "group" that originally submitted the request. For some workloads this
-provides a significant reduction in CPU cycles due to caching effects.
-
-For storage configurations that need to maximize distribution of completion
-processing setting this option to '2' forces the completion to run on the
-requesting cpu (bypassing the "group" aggregation logic).
-
-scheduler (RW)
---------------
-When read, this file will display the current and available IO schedulers
-for this block device. The currently active IO scheduler will be enclosed
-in [] brackets. Writing an IO scheduler name to this file will switch
-control of this block device to that new IO scheduler. Note that writing
-an IO scheduler name to this file will attempt to load that IO scheduler
-module, if it isn't already present in the system.
-
-write_cache (RW)
-----------------
-When read, this file will display whether the device has write back
-caching enabled or not. It will return "write back" for the former
-case, and "write through" for the latter. Writing to this file can
-change the kernels view of the device, but it doesn't alter the
-device state. This means that it might not be safe to toggle the
-setting from "write back" to "write through", since that will also
-eliminate cache flushes issued by the kernel.
-
-write_same_max_bytes (RO)
--------------------------
-This is the number of bytes the device can write in a single write-same
-command. A value of '0' means write-same is not supported by this
-device.
-
-wbt_lat_usec (RW)
------------------
-If the device is registered for writeback throttling, then this file shows
-the target minimum read latency. If this latency is exceeded in a given
-window of time (see wb_window_usec), then the writeback throttling will start
-scaling back writes. Writing a value of '0' to this file disables the
-feature. Writing a value of '-1' to this file resets the value to the
-default setting.
-
-throttle_sample_time (RW)
--------------------------
-This is the time window that blk-throttle samples data, in millisecond.
-blk-throttle makes decision based on the samplings. Lower time means cgroups
-have more smooth throughput, but higher CPU overhead. This exists only when
-CONFIG_BLK_DEV_THROTTLING_LOW is enabled.
-
-write_zeroes_max_bytes (RO)
----------------------------
-For block drivers that support REQ_OP_WRITE_ZEROES, the maximum number of
-bytes that can be zeroed at once. The value 0 means that REQ_OP_WRITE_ZEROES
-is not supported.
-
-zone_append_max_bytes (RO)
---------------------------
-This is the maximum number of bytes that can be written to a sequential
-zone of a zoned block device using a zone append write operation
-(REQ_OP_ZONE_APPEND). This value is always 0 for regular block devices.
-
-zoned (RO)
-----------
-This indicates if the device is a zoned block device and the zone model of the
-device if it is indeed zoned. The possible values indicated by zoned are
-"none" for regular block devices and "host-aware" or "host-managed" for zoned
-block devices. The characteristics of host-aware and host-managed zoned block
-devices are described in the ZBC (Zoned Block Commands) and ZAC
-(Zoned Device ATA Command Set) standards. These standards also define the
-"drive-managed" zone model. However, since drive-managed zoned block devices
-do not support zone commands, they will be treated as regular block devices
-and zoned will report "none".
-
-zone_write_granularity (RO)
----------------------------
-This indicates the alignment constraint, in bytes, for write operations in
-sequential zones of zoned block devices (devices with a zoned attributed
-that reports "host-managed" or "host-aware"). This value is always 0 for
-regular block devices.
-
-independent_access_ranges (RO)
-------------------------------
-
-The presence of this sub-directory of the /sys/block/xxx/queue/ directory
-indicates that the device is capable of executing requests targeting
-different sector ranges in parallel. For instance, single LUN multi-actuator
-hard-disks will have an independent_access_ranges directory if the device
-correctly advertizes the sector ranges of its actuators.
-
-The independent_access_ranges directory contains one directory per access
-range, with each range described using the sector (RO) attribute file to
-indicate the first sector of the range and the nr_sectors (RO) attribute file
-to indicate the total number of sectors in the range starting from the first
-sector of the range. For example, a dual-actuator hard-disk will have the
-following independent_access_ranges entries.::
-
- $ tree /sys/block/<device>/queue/independent_access_ranges/
- /sys/block/<device>/queue/independent_access_ranges/
- |-- 0
- | |-- nr_sectors
- | `-- sector
- `-- 1
- |-- nr_sectors
- `-- sector
-
-The sector and nr_sectors attributes use 512B sector unit, regardless of
-the actual block size of the device. Independent access ranges do not
-overlap and include all sectors within the device capacity. The access
-ranges are numbered in increasing order of the range start sector,
-that is, the sector attribute of range 0 always has the value 0.
-
-Jens Axboe <jens.axboe@oracle.com>, February 2009
diff --git a/Documentation/core-api/kernel-api.rst b/Documentation/core-api/kernel-api.rst
index 2e7186805148..d6b3f94b9f1f 100644
--- a/Documentation/core-api/kernel-api.rst
+++ b/Documentation/core-api/kernel-api.rst
@@ -279,6 +279,7 @@ Accounting Framework
Block Devices
=============
+.. kernel-doc:: include/linux/bio.h
.. kernel-doc:: block/blk-core.c
:export:
@@ -294,9 +295,6 @@ Block Devices
.. kernel-doc:: block/blk-settings.c
:export:
-.. kernel-doc:: block/blk-exec.c
- :export:
-
.. kernel-doc:: block/blk-flush.c
:export:
diff --git a/Documentation/core-api/kobject.rst b/Documentation/core-api/kobject.rst
index 2739f8b72575..7310247310a0 100644
--- a/Documentation/core-api/kobject.rst
+++ b/Documentation/core-api/kobject.rst
@@ -118,7 +118,7 @@ Initialization of kobjects
Code which creates a kobject must, of course, initialize that object. Some
of the internal fields are setup with a (mandatory) call to kobject_init()::
- void kobject_init(struct kobject *kobj, struct kobj_type *ktype);
+ void kobject_init(struct kobject *kobj, const struct kobj_type *ktype);
The ktype is required for a kobject to be created properly, as every kobject
must have an associated kobj_type. After calling kobject_init(), to
@@ -156,7 +156,7 @@ kobject_name()::
There is a helper function to both initialize and add the kobject to the
kernel at the same time, called surprisingly enough kobject_init_and_add()::
- int kobject_init_and_add(struct kobject *kobj, struct kobj_type *ktype,
+ int kobject_init_and_add(struct kobject *kobj, const struct kobj_type *ktype,
struct kobject *parent, const char *fmt, ...);
The arguments are the same as the individual kobject_init() and
@@ -299,7 +299,6 @@ kobj_type::
struct kobj_type {
void (*release)(struct kobject *kobj);
const struct sysfs_ops *sysfs_ops;
- struct attribute **default_attrs;
const struct attribute_group **default_groups;
const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj);
const void *(*namespace)(struct kobject *kobj);
@@ -313,10 +312,10 @@ call kobject_init() or kobject_init_and_add().
The release field in struct kobj_type is, of course, a pointer to the
release() method for this type of kobject. The other two fields (sysfs_ops
-and default_attrs) control how objects of this type are represented in
+and default_groups) control how objects of this type are represented in
sysfs; they are beyond the scope of this document.
-The default_attrs pointer is a list of default attributes that will be
+The default_groups pointer is a list of default attributes that will be
automatically created for any kobject that is registered with this ktype.
@@ -373,10 +372,9 @@ If a kset wishes to control the uevent operations of the kobjects
associated with it, it can use the struct kset_uevent_ops to handle it::
struct kset_uevent_ops {
- int (* const filter)(struct kset *kset, struct kobject *kobj);
- const char *(* const name)(struct kset *kset, struct kobject *kobj);
- int (* const uevent)(struct kset *kset, struct kobject *kobj,
- struct kobj_uevent_env *env);
+ int (* const filter)(struct kobject *kobj);
+ const char *(* const name)(struct kobject *kobj);
+ int (* const uevent)(struct kobject *kobj, struct kobj_uevent_env *env);
};
diff --git a/Documentation/dev-tools/kselftest.rst b/Documentation/dev-tools/kselftest.rst
index dcefee707ccd..a833ecf12fbc 100644
--- a/Documentation/dev-tools/kselftest.rst
+++ b/Documentation/dev-tools/kselftest.rst
@@ -7,6 +7,14 @@ directory. These are intended to be small tests to exercise individual code
paths in the kernel. Tests are intended to be run after building, installing
and booting a kernel.
+Kselftest from mainline can be run on older stable kernels. Running tests
+from mainline offers the best coverage. Several test rings run mainline
+kselftest suite on stable releases. The reason is that when a new test
+gets added to test existing code to regression test a bug, we should be
+able to run that test on an older kernel. Hence, it is important to keep
+code that can still test an older kernel and make sure it skips the test
+gracefully on newer releases.
+
You can find additional information on Kselftest framework, how to
write new tests using the framework on Kselftest wiki:
diff --git a/Documentation/dev-tools/kunit/usage.rst b/Documentation/dev-tools/kunit/usage.rst
index 76af931a332c..1c83e7d60a8a 100644
--- a/Documentation/dev-tools/kunit/usage.rst
+++ b/Documentation/dev-tools/kunit/usage.rst
@@ -242,7 +242,7 @@ example:
int rectangle_area(struct shape *this)
{
- struct rectangle *self = container_of(this, struct shape, parent);
+ struct rectangle *self = container_of(this, struct rectangle, parent);
return self->length * self->width;
};
diff --git a/Documentation/devicetree/bindings/Makefile b/Documentation/devicetree/bindings/Makefile
index c9abfbe3f0aa..41c555181b6f 100644
--- a/Documentation/devicetree/bindings/Makefile
+++ b/Documentation/devicetree/bindings/Makefile
@@ -65,7 +65,9 @@ DT_DOCS = $(patsubst $(srctree)/%,%,$(shell $(find_all_cmd)))
override DTC_FLAGS := \
-Wno-avoid_unnecessary_addr_size \
-Wno-graph_child_address \
- -Wno-interrupt_provider
+ -Wno-interrupt_provider \
+ -Wno-unique_unit_address \
+ -Wunique_unit_address_if_enabled
# Disable undocumented compatible checks until warning free
override DT_CHECKER_FLAGS ?=
diff --git a/Documentation/devicetree/bindings/arm/arm,cci-400.yaml b/Documentation/devicetree/bindings/arm/arm,cci-400.yaml
index 4682f991a5c8..f8530a50863a 100644
--- a/Documentation/devicetree/bindings/arm/arm,cci-400.yaml
+++ b/Documentation/devicetree/bindings/arm/arm,cci-400.yaml
@@ -166,16 +166,6 @@ examples:
};
};
- dma0: dma@3000000 {
- /* compatible = "arm,pl330", "arm,primecell"; */
- cci-control-port = <&cci_control0>;
- reg = <0x0 0x3000000 0x0 0x1000>;
- interrupts = <10>;
- #dma-cells = <1>;
- #dma-channels = <8>;
- #dma-requests = <32>;
- };
-
cci@2c090000 {
compatible = "arm,cci-400";
#address-cells = <1>;
diff --git a/Documentation/devicetree/bindings/arm/arm-dsu-pmu.txt b/Documentation/devicetree/bindings/arm/arm-dsu-pmu.txt
deleted file mode 100644
index 6efabba530f1..000000000000
--- a/Documentation/devicetree/bindings/arm/arm-dsu-pmu.txt
+++ /dev/null
@@ -1,27 +0,0 @@
-* ARM DynamIQ Shared Unit (DSU) Performance Monitor Unit (PMU)
-
-ARM DyanmIQ Shared Unit (DSU) integrates one or more CPU cores
-with a shared L3 memory system, control logic and external interfaces to
-form a multicore cluster. The PMU enables to gather various statistics on
-the operations of the DSU. The PMU provides independent 32bit counters that
-can count any of the supported events, along with a 64bit cycle counter.
-The PMU is accessed via CPU system registers and has no MMIO component.
-
-** DSU PMU required properties:
-
-- compatible : should be one of :
-
- "arm,dsu-pmu"
-
-- interrupts : Exactly 1 SPI must be listed.
-
-- cpus : List of phandles for the CPUs connected to this DSU instance.
-
-
-** Example:
-
-dsu-pmu-0 {
- compatible = "arm,dsu-pmu";
- interrupts = <GIC_SPI 02 IRQ_TYPE_LEVEL_HIGH>;
- cpus = <&cpu_0>, <&cpu_1>;
-};
diff --git a/Documentation/devicetree/bindings/arm/cpus.yaml b/Documentation/devicetree/bindings/arm/cpus.yaml
index dfa28e3525cb..0dcebc48ea22 100644
--- a/Documentation/devicetree/bindings/arm/cpus.yaml
+++ b/Documentation/devicetree/bindings/arm/cpus.yaml
@@ -137,6 +137,9 @@ properties:
- arm,cortex-a75
- arm,cortex-a76
- arm,cortex-a77
+ - arm,cortex-a78
+ - arm,cortex-a510
+ - arm,cortex-a710
- arm,cortex-m0
- arm,cortex-m0+
- arm,cortex-m1
@@ -145,8 +148,12 @@ properties:
- arm,cortex-r4
- arm,cortex-r5
- arm,cortex-r7
+ - arm,cortex-x1
+ - arm,cortex-x2
- arm,neoverse-e1
- arm,neoverse-n1
+ - arm,neoverse-n2
+ - arm,neoverse-v1
- brcm,brahma-b15
- brcm,brahma-b53
- brcm,vulcan
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,apmixedsys.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,apmixedsys.txt
index ea827e8763de..3fa755866528 100644
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,apmixedsys.txt
+++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,apmixedsys.txt
@@ -14,6 +14,7 @@ Required Properties:
- "mediatek,mt7622-apmixedsys"
- "mediatek,mt7623-apmixedsys", "mediatek,mt2701-apmixedsys"
- "mediatek,mt7629-apmixedsys"
+ - "mediatek,mt7986-apmixedsys"
- "mediatek,mt8135-apmixedsys"
- "mediatek,mt8167-apmixedsys", "syscon"
- "mediatek,mt8173-apmixedsys"
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,ethsys.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,ethsys.txt
index 6b7e8067e7aa..0502db73686b 100644
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,ethsys.txt
+++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,ethsys.txt
@@ -10,6 +10,7 @@ Required Properties:
- "mediatek,mt7622-ethsys", "syscon"
- "mediatek,mt7623-ethsys", "mediatek,mt2701-ethsys", "syscon"
- "mediatek,mt7629-ethsys", "syscon"
+ - "mediatek,mt7986-ethsys", "syscon"
- #clock-cells: Must be 1
- #reset-cells: Must be 1
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,infracfg.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,infracfg.txt
index eb3523c7a7be..f66bd720571d 100644
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,infracfg.txt
+++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,infracfg.txt
@@ -15,6 +15,7 @@ Required Properties:
- "mediatek,mt7622-infracfg", "syscon"
- "mediatek,mt7623-infracfg", "mediatek,mt2701-infracfg", "syscon"
- "mediatek,mt7629-infracfg", "syscon"
+ - "mediatek,mt7986-infracfg", "syscon"
- "mediatek,mt8135-infracfg", "syscon"
- "mediatek,mt8167-infracfg", "syscon"
- "mediatek,mt8173-infracfg", "syscon"
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,sgmiisys.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,sgmiisys.txt
index 30cb645c0e54..29ca7a10b315 100644
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,sgmiisys.txt
+++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,sgmiisys.txt
@@ -8,6 +8,8 @@ Required Properties:
- compatible: Should be:
- "mediatek,mt7622-sgmiisys", "syscon"
- "mediatek,mt7629-sgmiisys", "syscon"
+ - "mediatek,mt7986-sgmiisys_0", "syscon"
+ - "mediatek,mt7986-sgmiisys_1", "syscon"
- #clock-cells: Must be 1
The SGMIISYS controller uses the common clk binding from
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,topckgen.txt b/Documentation/devicetree/bindings/arm/mediatek/mediatek,topckgen.txt
index 5ce7578cf274..b82422bb717f 100644
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,topckgen.txt
+++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,topckgen.txt
@@ -14,6 +14,7 @@ Required Properties:
- "mediatek,mt7622-topckgen"
- "mediatek,mt7623-topckgen", "mediatek,mt2701-topckgen"
- "mediatek,mt7629-topckgen"
+ - "mediatek,mt7986-topckgen", "syscon"
- "mediatek,mt8135-topckgen"
- "mediatek,mt8167-topckgen", "syscon"
- "mediatek,mt8173-topckgen"
diff --git a/Documentation/devicetree/bindings/arm/omap/omap.txt b/Documentation/devicetree/bindings/arm/omap/omap.txt
index e77635c5422c..fa8b31660cad 100644
--- a/Documentation/devicetree/bindings/arm/omap/omap.txt
+++ b/Documentation/devicetree/bindings/arm/omap/omap.txt
@@ -119,6 +119,9 @@ Boards (incomplete list of examples):
- OMAP3 BeagleBoard : Low cost community board
compatible = "ti,omap3-beagle", "ti,omap3430", "ti,omap3"
+- OMAP3 BeagleBoard A to B4 : Early BeagleBoard revisions A to B4 with a timer quirk
+ compatible = "ti,omap3-beagle-ab4", "ti,omap3-beagle", "ti,omap3430", "ti,omap3"
+
- OMAP3 Tobi with Overo : Commercial expansion board with daughter board
compatible = "gumstix,omap3-overo-tobi", "gumstix,omap3-overo", "ti,omap3430", "ti,omap3"
diff --git a/Documentation/devicetree/bindings/arm/pmu.yaml b/Documentation/devicetree/bindings/arm/pmu.yaml
index e17ac049e890..981bac451698 100644
--- a/Documentation/devicetree/bindings/arm/pmu.yaml
+++ b/Documentation/devicetree/bindings/arm/pmu.yaml
@@ -44,10 +44,18 @@ properties:
- arm,cortex-a76-pmu
- arm,cortex-a77-pmu
- arm,cortex-a78-pmu
+ - arm,cortex-a510-pmu
+ - arm,cortex-a710-pmu
+ - arm,cortex-x1-pmu
+ - arm,cortex-x2-pmu
- arm,neoverse-e1-pmu
- arm,neoverse-n1-pmu
+ - arm,neoverse-n2-pmu
+ - arm,neoverse-v1-pmu
- brcm,vulcan-pmu
- cavium,thunder-pmu
+ - nvidia,denver-pmu
+ - nvidia,carmel-pmu
- qcom,krait-pmu
- qcom,scorpion-pmu
- qcom,scorpion-mp-pmu
diff --git a/Documentation/devicetree/bindings/arm/ux500.yaml b/Documentation/devicetree/bindings/arm/ux500.yaml
index 5db7cfba81a4..a46193ad94e0 100644
--- a/Documentation/devicetree/bindings/arm/ux500.yaml
+++ b/Documentation/devicetree/bindings/arm/ux500.yaml
@@ -20,6 +20,11 @@ properties:
- const: st-ericsson,mop500
- const: st-ericsson,u8500
+ - description: ST-Ericsson HREF520
+ items:
+ - const: st-ericsson,href520
+ - const: st-ericsson,u8500
+
- description: ST-Ericsson HREF (v60+)
items:
- const: st-ericsson,hrefv60+
@@ -30,9 +35,34 @@ properties:
- const: calaosystems,snowball-a9500
- const: st-ericsson,u9500
+ - description: Samsung Galaxy Ace 2 (GT-I8160)
+ items:
+ - const: samsung,codina
+ - const: st-ericsson,u8500
+
+ - description: Samsung Galaxy Beam (GT-I8530)
+ items:
+ - const: samsung,gavini
+ - const: st-ericsson,u8500
+
- description: Samsung Galaxy S III mini (GT-I8190)
items:
- const: samsung,golden
- const: st-ericsson,u8500
+ - description: Samsung Galaxy S Advance (GT-I9070)
+ items:
+ - const: samsung,janice
+ - const: st-ericsson,u8500
+
+ - description: Samsung Galaxy Amp (SGH-I407)
+ items:
+ - const: samsung,kyle
+ - const: st-ericsson,u8500
+
+ - description: Samsung Galaxy XCover 2 (GT-S7710)
+ items:
+ - const: samsung,skomer
+ - const: st-ericsson,u8500
+
additionalProperties: true
diff --git a/Documentation/devicetree/bindings/arm/xen.txt b/Documentation/devicetree/bindings/arm/xen.txt
index db5c56db30ec..61d77acbeb5e 100644
--- a/Documentation/devicetree/bindings/arm/xen.txt
+++ b/Documentation/devicetree/bindings/arm/xen.txt
@@ -7,15 +7,17 @@ the following properties:
compatible = "xen,xen-<version>", "xen,xen";
where <version> is the version of the Xen ABI of the platform.
-- reg: specifies the base physical address and size of a region in
- memory where the grant table should be mapped to, using an
- HYPERVISOR_memory_op hypercall. The memory region is large enough to map
- the whole grant table (it is larger or equal to gnttab_max_grant_frames()).
- This property is unnecessary when booting Dom0 using ACPI.
+- reg: specifies the base physical address and size of the regions in memory
+ where the special resources should be mapped to, using an HYPERVISOR_memory_op
+ hypercall.
+ Region 0 is reserved for mapping grant table, it must be always present.
+ The memory region is large enough to map the whole grant table (it is larger
+ or equal to gnttab_max_grant_frames()).
+ Regions 1...N are extended regions (unused address space) for mapping foreign
+ GFNs and grants, they might be absent if there is nothing to expose.
- interrupts: the interrupt used by Xen to inject event notifications.
A GIC node is also required.
- This property is unnecessary when booting Dom0 using ACPI.
To support UEFI on Xen ARM virtual platforms, Xen populates the FDT "uefi" node
under /hypervisor with following parameters:
diff --git a/Documentation/devicetree/bindings/ata/brcm,sata-brcm.txt b/Documentation/devicetree/bindings/ata/brcm,sata-brcm.txt
deleted file mode 100644
index b9ae4ce4a0a0..000000000000
--- a/Documentation/devicetree/bindings/ata/brcm,sata-brcm.txt
+++ /dev/null
@@ -1,45 +0,0 @@
-* Broadcom SATA3 AHCI Controller
-
-SATA nodes are defined to describe on-chip Serial ATA controllers.
-Each SATA controller should have its own node.
-
-Required properties:
-- compatible : should be one or more of
- "brcm,bcm7216-ahci"
- "brcm,bcm7425-ahci"
- "brcm,bcm7445-ahci"
- "brcm,bcm-nsp-ahci"
- "brcm,sata3-ahci"
- "brcm,bcm63138-ahci"
-- reg : register mappings for AHCI and SATA_TOP_CTRL
-- reg-names : "ahci" and "top-ctrl"
-- interrupts : interrupt mapping for SATA IRQ
-
-Optional properties:
-
-- reset: for "brcm,bcm7216-ahci" must be a valid reset phandle
- pointing to the RESCAL reset controller provider node.
-- reset-names: for "brcm,bcm7216-ahci", must be "rescal".
-
-Also see ahci-platform.txt.
-
-Example:
-
- sata@f045a000 {
- compatible = "brcm,bcm7445-ahci", "brcm,sata3-ahci";
- reg = <0xf045a000 0xa9c>, <0xf0458040 0x24>;
- reg-names = "ahci", "top-ctrl";
- interrupts = <0 30 0>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- sata0: sata-port@0 {
- reg = <0>;
- phys = <&sata_phy 0>;
- };
-
- sata1: sata-port@1 {
- reg = <1>;
- phys = <&sata_phy 1>;
- };
- };
diff --git a/Documentation/devicetree/bindings/ata/brcm,sata-brcm.yaml b/Documentation/devicetree/bindings/ata/brcm,sata-brcm.yaml
new file mode 100644
index 000000000000..235a93ac86b0
--- /dev/null
+++ b/Documentation/devicetree/bindings/ata/brcm,sata-brcm.yaml
@@ -0,0 +1,90 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/ata/brcm,sata-brcm.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Broadcom SATA3 AHCI Controller
+
+description:
+ SATA nodes are defined to describe on-chip Serial ATA controllers.
+ Each SATA controller should have its own node.
+
+maintainers:
+ - Florian Fainelli <f.fainelli@gmail.com>
+
+allOf:
+ - $ref: sata-common.yaml#
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - enum:
+ - brcm,bcm7216-ahci
+ - brcm,bcm7445-ahci
+ - brcm,bcm7425-ahci
+ - brcm,bcm63138-ahci
+ - const: brcm,sata3-ahci
+ - items:
+ - const: brcm,bcm-nsp-ahci
+
+ reg:
+ minItems: 2
+ maxItems: 2
+
+ reg-names:
+ items:
+ - const: ahci
+ - const: top-ctrl
+
+ interrupts:
+ maxItems: 1
+
+ dma-coherent: true
+
+if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - brcm,bcm7216-ahci
+ - brcm,bcm63138-ahci
+then:
+ properties:
+ resets:
+ maxItems: 1
+ reset-names:
+ enum:
+ - rescal
+ - ahci
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - "#address-cells"
+ - "#size-cells"
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ sata@f045a000 {
+ compatible = "brcm,bcm7445-ahci", "brcm,sata3-ahci";
+ reg = <0xf045a000 0xa9c>, <0xf0458040 0x24>;
+ reg-names = "ahci", "top-ctrl";
+ interrupts = <0 30 0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ sata0: sata-port@0 {
+ reg = <0>;
+ phys = <&sata_phy 0>;
+ };
+
+ sata1: sata-port@1 {
+ reg = <1>;
+ phys = <&sata_phy 1>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/bus/brcm,gisb-arb.txt b/Documentation/devicetree/bindings/bus/brcm,gisb-arb.txt
deleted file mode 100644
index 10f6d0a8159d..000000000000
--- a/Documentation/devicetree/bindings/bus/brcm,gisb-arb.txt
+++ /dev/null
@@ -1,34 +0,0 @@
-Broadcom GISB bus Arbiter controller
-
-Required properties:
-
-- compatible:
- "brcm,bcm7278-gisb-arb" for V7 28nm chips
- "brcm,gisb-arb" or "brcm,bcm7445-gisb-arb" for other 28nm chips
- "brcm,bcm7435-gisb-arb" for newer 40nm chips
- "brcm,bcm7400-gisb-arb" for older 40nm chips and all 65nm chips
- "brcm,bcm7038-gisb-arb" for 130nm chips
-- reg: specifies the base physical address and size of the registers
-- interrupts: specifies the two interrupts (timeout and TEA) to be used from
- the parent interrupt controller. A third optional interrupt may be specified
- for breakpoints.
-
-Optional properties:
-
-- brcm,gisb-arb-master-mask: 32-bits wide bitmask used to specify which GISB
- masters are valid at the system level
-- brcm,gisb-arb-master-names: string list of the litteral name of the GISB
- masters. Should match the number of bits set in brcm,gisb-master-mask and
- the order in which they appear
-
-Example:
-
-gisb-arb@f0400000 {
- compatible = "brcm,gisb-arb";
- reg = <0xf0400000 0x800>;
- interrupts = <0>, <2>;
- interrupt-parent = <&sun_l2_intc>;
-
- brcm,gisb-arb-master-mask = <0x7>;
- brcm,gisb-arb-master-names = "bsp_0", "scpu_0", "cpu_0";
-};
diff --git a/Documentation/devicetree/bindings/bus/brcm,gisb-arb.yaml b/Documentation/devicetree/bindings/bus/brcm,gisb-arb.yaml
new file mode 100644
index 000000000000..b23c3001991e
--- /dev/null
+++ b/Documentation/devicetree/bindings/bus/brcm,gisb-arb.yaml
@@ -0,0 +1,66 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/bus/brcm,gisb-arb.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Broadcom GISB bus Arbiter controller
+
+maintainers:
+ - Florian Fainelli <f.fainelli@gmail.com>
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - enum:
+ - brcm,bcm7445-gisb-arb # for other 28nm chips
+ - const: brcm,gisb-arb
+ - items:
+ - enum:
+ - brcm,bcm7278-gisb-arb # for V7 28nm chips
+ - brcm,bcm7435-gisb-arb # for newer 40nm chips
+ - brcm,bcm7400-gisb-arb # for older 40nm chips and all 65nm chips
+ - brcm,bcm7038-gisb-arb # for 130nm chips
+ - brcm,gisb-arb # fallback compatible
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ minItems: 2
+ items:
+ - description: timeout interrupt line
+ - description: target abort interrupt line
+ - description: breakpoint interrupt line
+
+ brcm,gisb-arb-master-mask:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: >
+ 32-bits wide bitmask used to specify which GISB masters are valid at the
+ system level
+
+ brcm,gisb-arb-master-names:
+ $ref: /schemas/types.yaml#/definitions/string-array
+ description: >
+ String list of the litteral name of the GISB masters. Should match the
+ number of bits set in brcm,gisb-master-mask and the order in which they
+ appear from MSB to LSB.
+
+required:
+ - compatible
+ - reg
+ - interrupts
+
+additionalProperties: false
+
+examples:
+ - |
+ gisb-arb@f0400000 {
+ compatible = "brcm,gisb-arb";
+ reg = <0xf0400000 0x800>;
+ interrupts = <0>, <2>;
+ interrupt-parent = <&sun_l2_intc>;
+ brcm,gisb-arb-master-mask = <0x7>;
+ brcm,gisb-arb-master-names = "bsp_0", "scpu_0", "cpu_0";
+ };
diff --git a/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-ccu.yaml b/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-ccu.yaml
index c4b7243ddcf2..15ed64d35261 100644
--- a/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-ccu.yaml
+++ b/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-ccu.yaml
@@ -34,6 +34,8 @@ properties:
- allwinner,sun8i-v3-ccu
- allwinner,sun8i-v3s-ccu
- allwinner,sun9i-a80-ccu
+ - allwinner,sun20i-d1-ccu
+ - allwinner,sun20i-d1-r-ccu
- allwinner,sun50i-a64-ccu
- allwinner,sun50i-a64-r-ccu
- allwinner,sun50i-a100-ccu
@@ -79,6 +81,7 @@ if:
enum:
- allwinner,sun8i-a83t-r-ccu
- allwinner,sun8i-h3-r-ccu
+ - allwinner,sun20i-d1-r-ccu
- allwinner,sun50i-a64-r-ccu
- allwinner,sun50i-a100-r-ccu
- allwinner,sun50i-h6-r-ccu
@@ -99,6 +102,7 @@ else:
properties:
compatible:
enum:
+ - allwinner,sun20i-d1-ccu
- allwinner,sun50i-a100-ccu
- allwinner,sun50i-h6-ccu
- allwinner,sun50i-h616-ccu
diff --git a/Documentation/devicetree/bindings/clock/exynos5260-clock.txt b/Documentation/devicetree/bindings/clock/exynos5260-clock.txt
deleted file mode 100644
index c79d31f7f66e..000000000000
--- a/Documentation/devicetree/bindings/clock/exynos5260-clock.txt
+++ /dev/null
@@ -1,190 +0,0 @@
-* Samsung Exynos5260 Clock Controller
-
-Exynos5260 has 13 clock controllers which are instantiated
-independently from the device-tree. These clock controllers
-generate and supply clocks to various hardware blocks within
-the SoC.
-
-Each clock is assigned an identifier and client nodes can use
-this identifier to specify the clock which they consume. All
-available clocks are defined as preprocessor macros in
-dt-bindings/clock/exynos5260-clk.h header and can be used in
-device tree sources.
-
-External clocks:
-
-There are several clocks that are generated outside the SoC. It
-is expected that they are defined using standard clock bindings
-with following clock-output-names:
-
- - "fin_pll" - PLL input clock from XXTI
- - "xrtcxti" - input clock from XRTCXTI
- - "ioclk_pcm_extclk" - pcm external operation clock
- - "ioclk_spdif_extclk" - spdif external operation clock
- - "ioclk_i2s_cdclk" - i2s0 codec clock
-
-Phy clocks:
-
-There are several clocks which are generated by specific PHYs.
-These clocks are fed into the clock controller and then routed to
-the hardware blocks. These clocks are defined as fixed clocks in the
-driver with following names:
-
- - "phyclk_dptx_phy_ch3_txd_clk" - dp phy clock for channel 3
- - "phyclk_dptx_phy_ch2_txd_clk" - dp phy clock for channel 2
- - "phyclk_dptx_phy_ch1_txd_clk" - dp phy clock for channel 1
- - "phyclk_dptx_phy_ch0_txd_clk" - dp phy clock for channel 0
- - "phyclk_hdmi_phy_tmds_clko" - hdmi phy tmds clock
- - "phyclk_hdmi_phy_pixel_clko" - hdmi phy pixel clock
- - "phyclk_hdmi_link_o_tmds_clkhi" - hdmi phy for hdmi link
- - "phyclk_dptx_phy_o_ref_clk_24m" - dp phy reference clock
- - "phyclk_dptx_phy_clk_div2"
- - "phyclk_mipi_dphy_4l_m_rxclkesc0"
- - "phyclk_usbhost20_phy_phyclock" - usb 2.0 phy clock
- - "phyclk_usbhost20_phy_freeclk"
- - "phyclk_usbhost20_phy_clk48mohci"
- - "phyclk_usbdrd30_udrd30_pipe_pclk"
- - "phyclk_usbdrd30_udrd30_phyclock" - usb 3.0 phy clock
-
-Required Properties for Clock Controller:
-
- - compatible: should be one of the following.
- 1) "samsung,exynos5260-clock-top"
- 2) "samsung,exynos5260-clock-peri"
- 3) "samsung,exynos5260-clock-egl"
- 4) "samsung,exynos5260-clock-kfc"
- 5) "samsung,exynos5260-clock-g2d"
- 6) "samsung,exynos5260-clock-mif"
- 7) "samsung,exynos5260-clock-mfc"
- 8) "samsung,exynos5260-clock-g3d"
- 9) "samsung,exynos5260-clock-fsys"
- 10) "samsung,exynos5260-clock-aud"
- 11) "samsung,exynos5260-clock-isp"
- 12) "samsung,exynos5260-clock-gscl"
- 13) "samsung,exynos5260-clock-disp"
-
- - reg: physical base address of the controller and the length of
- memory mapped region.
-
- - #clock-cells: should be 1.
-
- - clocks: list of clock identifiers which are fed as the input to
- the given clock controller. Please refer the next section to find
- the input clocks for a given controller.
-
- - clock-names: list of names of clocks which are fed as the input
- to the given clock controller.
-
-Input clocks for top clock controller:
- - fin_pll
- - dout_mem_pll
- - dout_bus_pll
- - dout_media_pll
-
-Input clocks for peri clock controller:
- - fin_pll
- - ioclk_pcm_extclk
- - ioclk_i2s_cdclk
- - ioclk_spdif_extclk
- - phyclk_hdmi_phy_ref_cko
- - dout_aclk_peri_66
- - dout_sclk_peri_uart0
- - dout_sclk_peri_uart1
- - dout_sclk_peri_uart2
- - dout_sclk_peri_spi0_b
- - dout_sclk_peri_spi1_b
- - dout_sclk_peri_spi2_b
- - dout_aclk_peri_aud
- - dout_sclk_peri_spi0_b
-
-Input clocks for egl clock controller:
- - fin_pll
- - dout_bus_pll
-
-Input clocks for kfc clock controller:
- - fin_pll
- - dout_media_pll
-
-Input clocks for g2d clock controller:
- - fin_pll
- - dout_aclk_g2d_333
-
-Input clocks for mif clock controller:
- - fin_pll
-
-Input clocks for mfc clock controller:
- - fin_pll
- - dout_aclk_mfc_333
-
-Input clocks for g3d clock controller:
- - fin_pll
-
-Input clocks for fsys clock controller:
- - fin_pll
- - phyclk_usbhost20_phy_phyclock
- - phyclk_usbhost20_phy_freeclk
- - phyclk_usbhost20_phy_clk48mohci
- - phyclk_usbdrd30_udrd30_pipe_pclk
- - phyclk_usbdrd30_udrd30_phyclock
- - dout_aclk_fsys_200
-
-Input clocks for aud clock controller:
- - fin_pll
- - fout_aud_pll
- - ioclk_i2s_cdclk
- - ioclk_pcm_extclk
-
-Input clocks for isp clock controller:
- - fin_pll
- - dout_aclk_isp1_266
- - dout_aclk_isp1_400
- - mout_aclk_isp1_266
-
-Input clocks for gscl clock controller:
- - fin_pll
- - dout_aclk_gscl_400
- - dout_aclk_gscl_333
-
-Input clocks for disp clock controller:
- - fin_pll
- - phyclk_dptx_phy_ch3_txd_clk
- - phyclk_dptx_phy_ch2_txd_clk
- - phyclk_dptx_phy_ch1_txd_clk
- - phyclk_dptx_phy_ch0_txd_clk
- - phyclk_hdmi_phy_tmds_clko
- - phyclk_hdmi_phy_ref_clko
- - phyclk_hdmi_phy_pixel_clko
- - phyclk_hdmi_link_o_tmds_clkhi
- - phyclk_mipi_dphy_4l_m_txbyte_clkhs
- - phyclk_dptx_phy_o_ref_clk_24m
- - phyclk_dptx_phy_clk_div2
- - phyclk_mipi_dphy_4l_m_rxclkesc0
- - phyclk_hdmi_phy_ref_cko
- - ioclk_spdif_extclk
- - dout_aclk_peri_aud
- - dout_aclk_disp_222
- - dout_sclk_disp_pixel
- - dout_aclk_disp_333
-
-Example 1: An example of a clock controller node is listed below.
-
- clock_mfc: clock-controller@11090000 {
- compatible = "samsung,exynos5260-clock-mfc";
- clock = <&fin_pll>, <&clock_top TOP_DOUT_ACLK_MFC_333>;
- clock-names = "fin_pll", "dout_aclk_mfc_333";
- reg = <0x11090000 0x10000>;
- #clock-cells = <1>;
- };
-
-Example 2: UART controller node that consumes the clock generated by the
- peri clock controller. Refer to the standard clock bindings for
- information about 'clocks' and 'clock-names' property.
-
- serial@12c00000 {
- compatible = "samsung,exynos4210-uart";
- reg = <0x12C00000 0x100>;
- interrupts = <0 146 0>;
- clocks = <&clock_peri PERI_PCLK_UART0>, <&clock_peri PERI_SCLK_UART0>;
- clock-names = "uart", "clk_uart_baud0";
- };
-
diff --git a/Documentation/devicetree/bindings/clock/exynos5410-clock.txt b/Documentation/devicetree/bindings/clock/exynos5410-clock.txt
deleted file mode 100644
index 217beb27c30e..000000000000
--- a/Documentation/devicetree/bindings/clock/exynos5410-clock.txt
+++ /dev/null
@@ -1,50 +0,0 @@
-* Samsung Exynos5410 Clock Controller
-
-The Exynos5410 clock controller generates and supplies clock to various
-controllers within the Exynos5410 SoC.
-
-Required Properties:
-
-- compatible: should be "samsung,exynos5410-clock"
-
-- reg: physical base address of the controller and length of memory mapped
- region.
-
-- #clock-cells: should be 1.
-
-- clocks: should contain an entry specifying the root clock from external
- oscillator supplied through XXTI or XusbXTI pin. This clock should be
- defined using standard clock bindings with "fin_pll" clock-output-name.
- That clock is being passed internally to the 9 PLLs.
-
-All available clocks are defined as preprocessor macros in
-dt-bindings/clock/exynos5410.h header and can be used in device
-tree sources.
-
-Example 1: An example of a clock controller node is listed below.
-
- fin_pll: xxti {
- compatible = "fixed-clock";
- clock-frequency = <24000000>;
- clock-output-names = "fin_pll";
- #clock-cells = <0>;
- };
-
- clock: clock-controller@10010000 {
- compatible = "samsung,exynos5410-clock";
- reg = <0x10010000 0x30000>;
- #clock-cells = <1>;
- clocks = <&fin_pll>;
- };
-
-Example 2: UART controller node that consumes the clock generated by the clock
- controller. Refer to the standard clock bindings for information
- about 'clocks' and 'clock-names' property.
-
- serial@12c20000 {
- compatible = "samsung,exynos4210-uart";
- reg = <0x12C00000 0x100>;
- interrupts = <0 51 0>;
- clocks = <&clock CLK_UART0>, <&clock CLK_SCLK_UART0>;
- clock-names = "uart", "clk_uart_baud0";
- };
diff --git a/Documentation/devicetree/bindings/clock/exynos5433-clock.txt b/Documentation/devicetree/bindings/clock/exynos5433-clock.txt
deleted file mode 100644
index 183c327a7d6b..000000000000
--- a/Documentation/devicetree/bindings/clock/exynos5433-clock.txt
+++ /dev/null
@@ -1,507 +0,0 @@
-* Samsung Exynos5433 CMU (Clock Management Units)
-
-The Exynos5433 clock controller generates and supplies clock to various
-controllers within the Exynos5433 SoC.
-
-Required Properties:
-
-- compatible: should be one of the following.
- - "samsung,exynos5433-cmu-top" - clock controller compatible for CMU_TOP
- which generates clocks for IMEM/FSYS/G3D/GSCL/HEVC/MSCL/G2D/MFC/PERIC/PERIS
- domains and bus clocks.
- - "samsung,exynos5433-cmu-cpif" - clock controller compatible for CMU_CPIF
- which generates clocks for LLI (Low Latency Interface) IP.
- - "samsung,exynos5433-cmu-mif" - clock controller compatible for CMU_MIF
- which generates clocks for DRAM Memory Controller domain.
- - "samsung,exynos5433-cmu-peric" - clock controller compatible for CMU_PERIC
- which generates clocks for UART/I2C/SPI/I2S/PCM/SPDIF/PWM/SLIMBUS IPs.
- - "samsung,exynos5433-cmu-peris" - clock controller compatible for CMU_PERIS
- which generates clocks for PMU/TMU/MCT/WDT/RTC/SECKEY/TZPC IPs.
- - "samsung,exynos5433-cmu-fsys" - clock controller compatible for CMU_FSYS
- which generates clocks for USB/UFS/SDMMC/TSI/PDMA IPs.
- - "samsung,exynos5433-cmu-g2d" - clock controller compatible for CMU_G2D
- which generates clocks for G2D/MDMA IPs.
- - "samsung,exynos5433-cmu-disp" - clock controller compatible for CMU_DISP
- which generates clocks for Display (DECON/HDMI/DSIM/MIXER) IPs.
- - "samsung,exynos5433-cmu-aud" - clock controller compatible for CMU_AUD
- which generates clocks for Cortex-A5/BUS/AUDIO clocks.
- - "samsung,exynos5433-cmu-bus0", "samsung,exynos5433-cmu-bus1"
- and "samsung,exynos5433-cmu-bus2" - clock controller compatible for CMU_BUS
- which generates global data buses clock and global peripheral buses clock.
- - "samsung,exynos5433-cmu-g3d" - clock controller compatible for CMU_G3D
- which generates clocks for 3D Graphics Engine IP.
- - "samsung,exynos5433-cmu-gscl" - clock controller compatible for CMU_GSCL
- which generates clocks for GSCALER IPs.
- - "samsung,exynos5433-cmu-apollo"- clock controller compatible for CMU_APOLLO
- which generates clocks for Cortex-A53 Quad-core processor.
- - "samsung,exynos5433-cmu-atlas" - clock controller compatible for CMU_ATLAS
- which generates clocks for Cortex-A57 Quad-core processor, CoreSight and
- L2 cache controller.
- - "samsung,exynos5433-cmu-mscl" - clock controller compatible for CMU_MSCL
- which generates clocks for M2M (Memory to Memory) scaler and JPEG IPs.
- - "samsung,exynos5433-cmu-mfc" - clock controller compatible for CMU_MFC
- which generates clocks for MFC(Multi-Format Codec) IP.
- - "samsung,exynos5433-cmu-hevc" - clock controller compatible for CMU_HEVC
- which generates clocks for HEVC(High Efficiency Video Codec) decoder IP.
- - "samsung,exynos5433-cmu-isp" - clock controller compatible for CMU_ISP
- which generates clocks for FIMC-ISP/DRC/SCLC/DIS/3DNR IPs.
- - "samsung,exynos5433-cmu-cam0" - clock controller compatible for CMU_CAM0
- which generates clocks for MIPI_CSIS{0|1}/FIMC_LITE_{A|B|D}/FIMC_3AA{0|1}
- IPs.
- - "samsung,exynos5433-cmu-cam1" - clock controller compatible for CMU_CAM1
- which generates clocks for Cortex-A5/MIPI_CSIS2/FIMC-LITE_C/FIMC-FD IPs.
- - "samsung,exynos5433-cmu-imem" - clock controller compatible for CMU_IMEM
- which generates clocks for SSS (Security SubSystem) and SlimSSS IPs.
-
-- reg: physical base address of the controller and length of memory mapped
- region.
-
-- #clock-cells: should be 1.
-
-- clocks: list of the clock controller input clock identifiers,
- from common clock bindings. Please refer the next section
- to find the input clocks for a given controller.
-
-- clock-names: list of the clock controller input clock names,
- as described in clock-bindings.txt.
-
- Input clocks for top clock controller:
- - oscclk
- - sclk_mphy_pll
- - sclk_mfc_pll
- - sclk_bus_pll
-
- Input clocks for cpif clock controller:
- - oscclk
-
- Input clocks for mif clock controller:
- - oscclk
- - sclk_mphy_pll
-
- Input clocks for fsys clock controller:
- - oscclk
- - sclk_ufs_mphy
- - aclk_fsys_200
- - sclk_pcie_100_fsys
- - sclk_ufsunipro_fsys
- - sclk_mmc2_fsys
- - sclk_mmc1_fsys
- - sclk_mmc0_fsys
- - sclk_usbhost30_fsys
- - sclk_usbdrd30_fsys
-
- Input clocks for g2d clock controller:
- - oscclk
- - aclk_g2d_266
- - aclk_g2d_400
-
- Input clocks for disp clock controller:
- - oscclk
- - sclk_dsim1_disp
- - sclk_dsim0_disp
- - sclk_dsd_disp
- - sclk_decon_tv_eclk_disp
- - sclk_decon_vclk_disp
- - sclk_decon_eclk_disp
- - sclk_decon_tv_vclk_disp
- - aclk_disp_333
-
- Input clocks for audio clock controller:
- - oscclk
- - fout_aud_pll
-
- Input clocks for bus0 clock controller:
- - aclk_bus0_400
-
- Input clocks for bus1 clock controller:
- - aclk_bus1_400
-
- Input clocks for bus2 clock controller:
- - oscclk
- - aclk_bus2_400
-
- Input clocks for g3d clock controller:
- - oscclk
- - aclk_g3d_400
-
- Input clocks for gscl clock controller:
- - oscclk
- - aclk_gscl_111
- - aclk_gscl_333
-
- Input clocks for apollo clock controller:
- - oscclk
- - sclk_bus_pll_apollo
-
- Input clocks for atlas clock controller:
- - oscclk
- - sclk_bus_pll_atlas
-
- Input clocks for mscl clock controller:
- - oscclk
- - sclk_jpeg_mscl
- - aclk_mscl_400
-
- Input clocks for mfc clock controller:
- - oscclk
- - aclk_mfc_400
-
- Input clocks for hevc clock controller:
- - oscclk
- - aclk_hevc_400
-
- Input clocks for isp clock controller:
- - oscclk
- - aclk_isp_dis_400
- - aclk_isp_400
-
- Input clocks for cam0 clock controller:
- - oscclk
- - aclk_cam0_333
- - aclk_cam0_400
- - aclk_cam0_552
-
- Input clocks for cam1 clock controller:
- - oscclk
- - sclk_isp_uart_cam1
- - sclk_isp_spi1_cam1
- - sclk_isp_spi0_cam1
- - aclk_cam1_333
- - aclk_cam1_400
- - aclk_cam1_552
-
- Input clocks for imem clock controller:
- - oscclk
- - aclk_imem_sssx_266
- - aclk_imem_266
- - aclk_imem_200
-
-Optional properties:
- - power-domains: a phandle to respective power domain node as described by
- generic PM domain bindings (see power/power_domain.txt for more
- information).
-
-Each clock is assigned an identifier and client nodes can use this identifier
-to specify the clock which they consume.
-
-All available clocks are defined as preprocessor macros in
-dt-bindings/clock/exynos5433.h header and can be used in device
-tree sources.
-
-Example 1: Examples of 'oscclk' source clock node are listed below.
-
- xxti: xxti {
- compatible = "fixed-clock";
- clock-output-names = "oscclk";
- #clock-cells = <0>;
- };
-
-Example 2: Examples of clock controller nodes are listed below.
-
- cmu_top: clock-controller@10030000 {
- compatible = "samsung,exynos5433-cmu-top";
- reg = <0x10030000 0x0c04>;
- #clock-cells = <1>;
-
- clock-names = "oscclk",
- "sclk_mphy_pll",
- "sclk_mfc_pll",
- "sclk_bus_pll";
- clocks = <&xxti>,
- <&cmu_cpif CLK_SCLK_MPHY_PLL>,
- <&cmu_mif CLK_SCLK_MFC_PLL>,
- <&cmu_mif CLK_SCLK_BUS_PLL>;
- };
-
- cmu_cpif: clock-controller@10fc0000 {
- compatible = "samsung,exynos5433-cmu-cpif";
- reg = <0x10fc0000 0x0c04>;
- #clock-cells = <1>;
-
- clock-names = "oscclk";
- clocks = <&xxti>;
- };
-
- cmu_mif: clock-controller@105b0000 {
- compatible = "samsung,exynos5433-cmu-mif";
- reg = <0x105b0000 0x100c>;
- #clock-cells = <1>;
-
- clock-names = "oscclk",
- "sclk_mphy_pll";
- clocks = <&xxti>,
- <&cmu_cpif CLK_SCLK_MPHY_PLL>;
- };
-
- cmu_peric: clock-controller@14c80000 {
- compatible = "samsung,exynos5433-cmu-peric";
- reg = <0x14c80000 0x0b08>;
- #clock-cells = <1>;
- };
-
- cmu_peris: clock-controller@10040000 {
- compatible = "samsung,exynos5433-cmu-peris";
- reg = <0x10040000 0x0b20>;
- #clock-cells = <1>;
- };
-
- cmu_fsys: clock-controller@156e0000 {
- compatible = "samsung,exynos5433-cmu-fsys";
- reg = <0x156e0000 0x0b04>;
- #clock-cells = <1>;
-
- clock-names = "oscclk",
- "sclk_ufs_mphy",
- "aclk_fsys_200",
- "sclk_pcie_100_fsys",
- "sclk_ufsunipro_fsys",
- "sclk_mmc2_fsys",
- "sclk_mmc1_fsys",
- "sclk_mmc0_fsys",
- "sclk_usbhost30_fsys",
- "sclk_usbdrd30_fsys";
- clocks = <&xxti>,
- <&cmu_cpif CLK_SCLK_UFS_MPHY>,
- <&cmu_top CLK_ACLK_FSYS_200>,
- <&cmu_top CLK_SCLK_PCIE_100_FSYS>,
- <&cmu_top CLK_SCLK_UFSUNIPRO_FSYS>,
- <&cmu_top CLK_SCLK_MMC2_FSYS>,
- <&cmu_top CLK_SCLK_MMC1_FSYS>,
- <&cmu_top CLK_SCLK_MMC0_FSYS>,
- <&cmu_top CLK_SCLK_USBHOST30_FSYS>,
- <&cmu_top CLK_SCLK_USBDRD30_FSYS>;
- };
-
- cmu_g2d: clock-controller@12460000 {
- compatible = "samsung,exynos5433-cmu-g2d";
- reg = <0x12460000 0x0b08>;
- #clock-cells = <1>;
-
- clock-names = "oscclk",
- "aclk_g2d_266",
- "aclk_g2d_400";
- clocks = <&xxti>,
- <&cmu_top CLK_ACLK_G2D_266>,
- <&cmu_top CLK_ACLK_G2D_400>;
- power-domains = <&pd_g2d>;
- };
-
- cmu_disp: clock-controller@13b90000 {
- compatible = "samsung,exynos5433-cmu-disp";
- reg = <0x13b90000 0x0c04>;
- #clock-cells = <1>;
-
- clock-names = "oscclk",
- "sclk_dsim1_disp",
- "sclk_dsim0_disp",
- "sclk_dsd_disp",
- "sclk_decon_tv_eclk_disp",
- "sclk_decon_vclk_disp",
- "sclk_decon_eclk_disp",
- "sclk_decon_tv_vclk_disp",
- "aclk_disp_333";
- clocks = <&xxti>,
- <&cmu_mif CLK_SCLK_DSIM1_DISP>,
- <&cmu_mif CLK_SCLK_DSIM0_DISP>,
- <&cmu_mif CLK_SCLK_DSD_DISP>,
- <&cmu_mif CLK_SCLK_DECON_TV_ECLK_DISP>,
- <&cmu_mif CLK_SCLK_DECON_VCLK_DISP>,
- <&cmu_mif CLK_SCLK_DECON_ECLK_DISP>,
- <&cmu_mif CLK_SCLK_DECON_TV_VCLK_DISP>,
- <&cmu_mif CLK_ACLK_DISP_333>;
- power-domains = <&pd_disp>;
- };
-
- cmu_aud: clock-controller@114c0000 {
- compatible = "samsung,exynos5433-cmu-aud";
- reg = <0x114c0000 0x0b04>;
- #clock-cells = <1>;
-
- clock-names = "oscclk", "fout_aud_pll";
- clocks = <&xxti>, <&cmu_top CLK_FOUT_AUD_PLL>;
- power-domains = <&pd_aud>;
- };
-
- cmu_bus0: clock-controller@13600000 {
- compatible = "samsung,exynos5433-cmu-bus0";
- reg = <0x13600000 0x0b04>;
- #clock-cells = <1>;
-
- clock-names = "aclk_bus0_400";
- clocks = <&cmu_top CLK_ACLK_BUS0_400>;
- };
-
- cmu_bus1: clock-controller@14800000 {
- compatible = "samsung,exynos5433-cmu-bus1";
- reg = <0x14800000 0x0b04>;
- #clock-cells = <1>;
-
- clock-names = "aclk_bus1_400";
- clocks = <&cmu_top CLK_ACLK_BUS1_400>;
- };
-
- cmu_bus2: clock-controller@13400000 {
- compatible = "samsung,exynos5433-cmu-bus2";
- reg = <0x13400000 0x0b04>;
- #clock-cells = <1>;
-
- clock-names = "oscclk", "aclk_bus2_400";
- clocks = <&xxti>, <&cmu_mif CLK_ACLK_BUS2_400>;
- };
-
- cmu_g3d: clock-controller@14aa0000 {
- compatible = "samsung,exynos5433-cmu-g3d";
- reg = <0x14aa0000 0x1000>;
- #clock-cells = <1>;
-
- clock-names = "oscclk", "aclk_g3d_400";
- clocks = <&xxti>, <&cmu_top CLK_ACLK_G3D_400>;
- power-domains = <&pd_g3d>;
- };
-
- cmu_gscl: clock-controller@13cf0000 {
- compatible = "samsung,exynos5433-cmu-gscl";
- reg = <0x13cf0000 0x0b10>;
- #clock-cells = <1>;
-
- clock-names = "oscclk",
- "aclk_gscl_111",
- "aclk_gscl_333";
- clocks = <&xxti>,
- <&cmu_top CLK_ACLK_GSCL_111>,
- <&cmu_top CLK_ACLK_GSCL_333>;
- power-domains = <&pd_gscl>;
- };
-
- cmu_apollo: clock-controller@11900000 {
- compatible = "samsung,exynos5433-cmu-apollo";
- reg = <0x11900000 0x1088>;
- #clock-cells = <1>;
-
- clock-names = "oscclk", "sclk_bus_pll_apollo";
- clocks = <&xxti>, <&cmu_mif CLK_SCLK_BUS_PLL_APOLLO>;
- };
-
- cmu_atlas: clock-controller@11800000 {
- compatible = "samsung,exynos5433-cmu-atlas";
- reg = <0x11800000 0x1088>;
- #clock-cells = <1>;
-
- clock-names = "oscclk", "sclk_bus_pll_atlas";
- clocks = <&xxti>, <&cmu_mif CLK_SCLK_BUS_PLL_ATLAS>;
- };
-
- cmu_mscl: clock-controller@105d0000 {
- compatible = "samsung,exynos5433-cmu-mscl";
- reg = <0x105d0000 0x0b10>;
- #clock-cells = <1>;
-
- clock-names = "oscclk",
- "sclk_jpeg_mscl",
- "aclk_mscl_400";
- clocks = <&xxti>,
- <&cmu_top CLK_SCLK_JPEG_MSCL>,
- <&cmu_top CLK_ACLK_MSCL_400>;
- power-domains = <&pd_mscl>;
- };
-
- cmu_mfc: clock-controller@15280000 {
- compatible = "samsung,exynos5433-cmu-mfc";
- reg = <0x15280000 0x0b08>;
- #clock-cells = <1>;
-
- clock-names = "oscclk", "aclk_mfc_400";
- clocks = <&xxti>, <&cmu_top CLK_ACLK_MFC_400>;
- power-domains = <&pd_mfc>;
- };
-
- cmu_hevc: clock-controller@14f80000 {
- compatible = "samsung,exynos5433-cmu-hevc";
- reg = <0x14f80000 0x0b08>;
- #clock-cells = <1>;
-
- clock-names = "oscclk", "aclk_hevc_400";
- clocks = <&xxti>, <&cmu_top CLK_ACLK_HEVC_400>;
- power-domains = <&pd_hevc>;
- };
-
- cmu_isp: clock-controller@146d0000 {
- compatible = "samsung,exynos5433-cmu-isp";
- reg = <0x146d0000 0x0b0c>;
- #clock-cells = <1>;
-
- clock-names = "oscclk",
- "aclk_isp_dis_400",
- "aclk_isp_400";
- clocks = <&xxti>,
- <&cmu_top CLK_ACLK_ISP_DIS_400>,
- <&cmu_top CLK_ACLK_ISP_400>;
- power-domains = <&pd_isp>;
- };
-
- cmu_cam0: clock-controller@120d0000 {
- compatible = "samsung,exynos5433-cmu-cam0";
- reg = <0x120d0000 0x0b0c>;
- #clock-cells = <1>;
-
- clock-names = "oscclk",
- "aclk_cam0_333",
- "aclk_cam0_400",
- "aclk_cam0_552";
- clocks = <&xxti>,
- <&cmu_top CLK_ACLK_CAM0_333>,
- <&cmu_top CLK_ACLK_CAM0_400>,
- <&cmu_top CLK_ACLK_CAM0_552>;
- power-domains = <&pd_cam0>;
- };
-
- cmu_cam1: clock-controller@145d0000 {
- compatible = "samsung,exynos5433-cmu-cam1";
- reg = <0x145d0000 0x0b08>;
- #clock-cells = <1>;
-
- clock-names = "oscclk",
- "sclk_isp_uart_cam1",
- "sclk_isp_spi1_cam1",
- "sclk_isp_spi0_cam1",
- "aclk_cam1_333",
- "aclk_cam1_400",
- "aclk_cam1_552";
- clocks = <&xxti>,
- <&cmu_top CLK_SCLK_ISP_UART_CAM1>,
- <&cmu_top CLK_SCLK_ISP_SPI1_CAM1>,
- <&cmu_top CLK_SCLK_ISP_SPI0_CAM1>,
- <&cmu_top CLK_ACLK_CAM1_333>,
- <&cmu_top CLK_ACLK_CAM1_400>,
- <&cmu_top CLK_ACLK_CAM1_552>;
- power-domains = <&pd_cam1>;
- };
-
- cmu_imem: clock-controller@11060000 {
- compatible = "samsung,exynos5433-cmu-imem";
- reg = <0x11060000 0x1000>;
- #clock-cells = <1>;
-
- clock-names = "oscclk",
- "aclk_imem_sssx_266",
- "aclk_imem_266",
- "aclk_imem_200";
- clocks = <&xxti>,
- <&cmu_top CLK_DIV_ACLK_IMEM_SSSX_266>,
- <&cmu_top CLK_DIV_ACLK_IMEM_266>,
- <&cmu_top CLK_DIV_ACLK_IMEM_200>;
- };
-
-Example 3: UART controller node that consumes the clock generated by the clock
- controller.
-
- serial_0: serial@14c10000 {
- compatible = "samsung,exynos5433-uart";
- reg = <0x14C10000 0x100>;
- interrupts = <0 421 0>;
- clocks = <&cmu_peric CLK_PCLK_UART0>,
- <&cmu_peric CLK_SCLK_UART0>;
- clock-names = "uart", "clk_uart_baud0";
- pinctrl-names = "default";
- pinctrl-0 = <&uart0_bus>;
- };
diff --git a/Documentation/devicetree/bindings/clock/exynos7-clock.txt b/Documentation/devicetree/bindings/clock/exynos7-clock.txt
deleted file mode 100644
index 6bf1e7493f61..000000000000
--- a/Documentation/devicetree/bindings/clock/exynos7-clock.txt
+++ /dev/null
@@ -1,108 +0,0 @@
-* Samsung Exynos7 Clock Controller
-
-Exynos7 clock controller has various blocks which are instantiated
-independently from the device-tree. These clock controllers
-generate and supply clocks to various hardware blocks within
-the SoC.
-
-Each clock is assigned an identifier and client nodes can use
-this identifier to specify the clock which they consume. All
-available clocks are defined as preprocessor macros in
-dt-bindings/clock/exynos7-clk.h header and can be used in
-device tree sources.
-
-External clocks:
-
-There are several clocks that are generated outside the SoC. It
-is expected that they are defined using standard clock bindings
-with following clock-output-names:
-
- - "fin_pll" - PLL input clock from XXTI
-
-Required Properties for Clock Controller:
-
- - compatible: clock controllers will use one of the following
- compatible strings to indicate the clock controller
- functionality.
-
- - "samsung,exynos7-clock-topc"
- - "samsung,exynos7-clock-top0"
- - "samsung,exynos7-clock-top1"
- - "samsung,exynos7-clock-ccore"
- - "samsung,exynos7-clock-peric0"
- - "samsung,exynos7-clock-peric1"
- - "samsung,exynos7-clock-peris"
- - "samsung,exynos7-clock-fsys0"
- - "samsung,exynos7-clock-fsys1"
- - "samsung,exynos7-clock-mscl"
- - "samsung,exynos7-clock-aud"
-
- - reg: physical base address of the controller and the length of
- memory mapped region.
-
- - #clock-cells: should be 1.
-
- - clocks: list of clock identifiers which are fed as the input to
- the given clock controller. Please refer the next section to
- find the input clocks for a given controller.
-
-- clock-names: list of names of clocks which are fed as the input
- to the given clock controller.
-
-Input clocks for top0 clock controller:
- - fin_pll
- - dout_sclk_bus0_pll
- - dout_sclk_bus1_pll
- - dout_sclk_cc_pll
- - dout_sclk_mfc_pll
- - dout_sclk_aud_pll
-
-Input clocks for top1 clock controller:
- - fin_pll
- - dout_sclk_bus0_pll
- - dout_sclk_bus1_pll
- - dout_sclk_cc_pll
- - dout_sclk_mfc_pll
-
-Input clocks for ccore clock controller:
- - fin_pll
- - dout_aclk_ccore_133
-
-Input clocks for peric0 clock controller:
- - fin_pll
- - dout_aclk_peric0_66
- - sclk_uart0
-
-Input clocks for peric1 clock controller:
- - fin_pll
- - dout_aclk_peric1_66
- - sclk_uart1
- - sclk_uart2
- - sclk_uart3
- - sclk_spi0
- - sclk_spi1
- - sclk_spi2
- - sclk_spi3
- - sclk_spi4
- - sclk_i2s1
- - sclk_pcm1
- - sclk_spdif
-
-Input clocks for peris clock controller:
- - fin_pll
- - dout_aclk_peris_66
-
-Input clocks for fsys0 clock controller:
- - fin_pll
- - dout_aclk_fsys0_200
- - dout_sclk_mmc2
-
-Input clocks for fsys1 clock controller:
- - fin_pll
- - dout_aclk_fsys1_200
- - dout_sclk_mmc0
- - dout_sclk_mmc1
-
-Input clocks for aud clock controller:
- - fin_pll
- - fout_aud_pll
diff --git a/Documentation/devicetree/bindings/clock/imx5-clock.yaml b/Documentation/devicetree/bindings/clock/imx5-clock.yaml
index b1740d7abe68..c0e19ff92c76 100644
--- a/Documentation/devicetree/bindings/clock/imx5-clock.yaml
+++ b/Documentation/devicetree/bindings/clock/imx5-clock.yaml
@@ -55,11 +55,4 @@ examples:
<0 72 IRQ_TYPE_LEVEL_HIGH>;
#clock-cells = <1>;
};
-
- can@53fc8000 {
- compatible = "fsl,imx53-flexcan", "fsl,imx25-flexcan";
- reg = <0x53fc8000 0x4000>;
- interrupts = <82>;
- clocks = <&clks IMX5_CLK_CAN1_IPG_GATE>, <&clks IMX5_CLK_CAN1_SERIAL_GATE>;
- clock-names = "ipg", "per";
- };
+...
diff --git a/Documentation/devicetree/bindings/clock/microchip,lan966x-gck.yaml b/Documentation/devicetree/bindings/clock/microchip,lan966x-gck.yaml
new file mode 100644
index 000000000000..df2bec188706
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/microchip,lan966x-gck.yaml
@@ -0,0 +1,60 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/microchip,lan966x-gck.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Microchip LAN966X Generic Clock Controller
+
+maintainers:
+ - Kavyasree Kotagiri <kavyasree.kotagiri@microchip.com>
+
+description: |
+ The LAN966X Generic clock controller contains 3 PLLs - cpu_clk,
+ ddr_clk and sys_clk. This clock controller generates and supplies
+ clock to various peripherals within the SoC.
+
+properties:
+ compatible:
+ const: microchip,lan966x-gck
+
+ reg:
+ minItems: 1
+ items:
+ - description: Generic clock registers
+ - description: Optional gate clock registers
+
+ clocks:
+ items:
+ - description: CPU clock source
+ - description: DDR clock source
+ - description: System clock source
+
+ clock-names:
+ items:
+ - const: cpu
+ - const: ddr
+ - const: sys
+
+ '#clock-cells':
+ const: 1
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+ - '#clock-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ clks: clock-controller@e00c00a8 {
+ compatible = "microchip,lan966x-gck";
+ #clock-cells = <1>;
+ clocks = <&cpu_clk>, <&ddr_clk>, <&sys_clk>;
+ clock-names = "cpu", "ddr", "sys";
+ reg = <0xe00c00a8 0x38>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/clock/qcom,gcc-msm8976.yaml b/Documentation/devicetree/bindings/clock/qcom,gcc-msm8976.yaml
new file mode 100644
index 000000000000..f3430b159caa
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/qcom,gcc-msm8976.yaml
@@ -0,0 +1,97 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/qcom,gcc-msm8976.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Global Clock & Reset Controller Binding for MSM8976
+
+maintainers:
+ - Stephen Boyd <sboyd@kernel.org>
+ - Taniya Das <tdas@codeaurora.org>
+
+description: |
+ Qualcomm global clock control module which supports the clocks, resets and
+ power domains on MSM8976.
+
+ See also:
+ - dt-bindings/clock/qcom,gcc-msm8976.h
+
+properties:
+ compatible:
+ enum:
+ - qcom,gcc-msm8976
+ - qcom,gcc-msm8976-v1.1
+
+ clocks:
+ items:
+ - description: XO source
+ - description: Always-on XO source
+ - description: Pixel clock from DSI PHY0
+ - description: Byte clock from DSI PHY0
+ - description: Pixel clock from DSI PHY1
+ - description: Byte clock from DSI PHY1
+
+ clock-names:
+ items:
+ - const: xo
+ - const: xo_a
+ - const: dsi0pll
+ - const: dsi0pllbyte
+ - const: dsi1pll
+ - const: dsi1pllbyte
+
+ vdd_gfx-supply:
+ description:
+ Phandle to voltage regulator providing power to the GX domain.
+
+ '#clock-cells':
+ const: 1
+
+ '#reset-cells':
+ const: 1
+
+ '#power-domain-cells':
+ const: 1
+
+ reg:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+ - vdd_gfx-supply
+ - '#clock-cells'
+ - '#reset-cells'
+ - '#power-domain-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ clock-controller@1800000 {
+ compatible = "qcom,gcc-msm8976";
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ #power-domain-cells = <1>;
+ reg = <0x1800000 0x80000>;
+
+ clocks = <&xo_board>,
+ <&xo_board>,
+ <&dsi0_phy 1>,
+ <&dsi0_phy 0>,
+ <&dsi1_phy 1>,
+ <&dsi1_phy 0>;
+
+ clock-names = "xo",
+ "xo_a",
+ "dsi0pll",
+ "dsi0pllbyte",
+ "dsi1pll",
+ "dsi1pllbyte";
+
+ vdd_gfx-supply = <&pm8004_s5>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/clock/qcom,rpmhcc.yaml b/Documentation/devicetree/bindings/clock/qcom,rpmhcc.yaml
index 72212970e6f5..8406dde17937 100644
--- a/Documentation/devicetree/bindings/clock/qcom,rpmhcc.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,rpmhcc.yaml
@@ -22,10 +22,12 @@ properties:
- qcom,sc8180x-rpmh-clk
- qcom,sdm845-rpmh-clk
- qcom,sdx55-rpmh-clk
+ - qcom,sdx65-rpmh-clk
- qcom,sm6350-rpmh-clk
- qcom,sm8150-rpmh-clk
- qcom,sm8250-rpmh-clk
- qcom,sm8350-rpmh-clk
+ - qcom,sm8450-rpmh-clk
clocks:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/clock/renesas,cpg-mssr.yaml b/Documentation/devicetree/bindings/clock/renesas,cpg-mssr.yaml
index 9b414fbde6d7..e0b86214f0f5 100644
--- a/Documentation/devicetree/bindings/clock/renesas,cpg-mssr.yaml
+++ b/Documentation/devicetree/bindings/clock/renesas,cpg-mssr.yaml
@@ -48,6 +48,7 @@ properties:
- renesas,r8a77990-cpg-mssr # R-Car E3
- renesas,r8a77995-cpg-mssr # R-Car D3
- renesas,r8a779a0-cpg-mssr # R-Car V3U
+ - renesas,r8a779f0-cpg-mssr # R-Car S4-8
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/clock/samsung,exynos5260-clock.yaml b/Documentation/devicetree/bindings/clock/samsung,exynos5260-clock.yaml
new file mode 100644
index 000000000000..a3fac5c6809d
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/samsung,exynos5260-clock.yaml
@@ -0,0 +1,382 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/samsung,exynos5260-clock.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Samsung Exynos5260 SoC clock controller
+
+maintainers:
+ - Chanwoo Choi <cw00.choi@samsung.com>
+ - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Sylwester Nawrocki <s.nawrocki@samsung.com>
+ - Tomasz Figa <tomasz.figa@gmail.com>
+
+description: |
+ Expected external clocks, defined in DTS as fixed-rate clocks with a matching
+ name::
+ - "fin_pll" - PLL input clock from XXTI
+ - "xrtcxti" - input clock from XRTCXTI
+ - "ioclk_pcm_extclk" - pcm external operation clock
+ - "ioclk_spdif_extclk" - spdif external operation clock
+ - "ioclk_i2s_cdclk" - i2s0 codec clock
+
+ Phy clocks::
+ There are several clocks which are generated by specific PHYs. These clocks
+ are fed into the clock controller and then routed to the hardware blocks.
+ These clocks are defined as fixed clocks in the driver with following names::
+ - "phyclk_dptx_phy_ch3_txd_clk" - dp phy clock for channel 3
+ - "phyclk_dptx_phy_ch2_txd_clk" - dp phy clock for channel 2
+ - "phyclk_dptx_phy_ch1_txd_clk" - dp phy clock for channel 1
+ - "phyclk_dptx_phy_ch0_txd_clk" - dp phy clock for channel 0
+ - "phyclk_hdmi_phy_tmds_clko" - hdmi phy tmds clock
+ - "phyclk_hdmi_phy_pixel_clko" - hdmi phy pixel clock
+ - "phyclk_hdmi_link_o_tmds_clkhi" - hdmi phy for hdmi link
+ - "phyclk_dptx_phy_o_ref_clk_24m" - dp phy reference clock
+ - "phyclk_dptx_phy_clk_div2"
+ - "phyclk_mipi_dphy_4l_m_rxclkesc0"
+ - "phyclk_usbhost20_phy_phyclock" - usb 2.0 phy clock
+ - "phyclk_usbhost20_phy_freeclk"
+ - "phyclk_usbhost20_phy_clk48mohci"
+ - "phyclk_usbdrd30_udrd30_pipe_pclk"
+ - "phyclk_usbdrd30_udrd30_phyclock" - usb 3.0 phy clock
+
+ All available clocks are defined as preprocessor macros in
+ include/dt-bindings/clock/exynos5260-clk.h header.
+
+properties:
+ compatible:
+ enum:
+ - samsung,exynos5260-clock-top
+ - samsung,exynos5260-clock-peri
+ - samsung,exynos5260-clock-egl
+ - samsung,exynos5260-clock-kfc
+ - samsung,exynos5260-clock-g2d
+ - samsung,exynos5260-clock-mif
+ - samsung,exynos5260-clock-mfc
+ - samsung,exynos5260-clock-g3d
+ - samsung,exynos5260-clock-fsys
+ - samsung,exynos5260-clock-aud
+ - samsung,exynos5260-clock-isp
+ - samsung,exynos5260-clock-gscl
+ - samsung,exynos5260-clock-disp
+
+ clocks:
+ minItems: 1
+ maxItems: 19
+
+ clock-names:
+ minItems: 1
+ maxItems: 19
+
+ "#clock-cells":
+ const: 1
+
+ reg:
+ maxItems: 1
+
+required:
+ - compatible
+ - "#clock-cells"
+ - reg
+
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: samsung,exynos5260-clock-top
+ then:
+ properties:
+ clocks:
+ minItems: 4
+ maxItems: 4
+ clock-names:
+ items:
+ - const: fin_pll
+ - const: dout_mem_pll
+ - const: dout_bus_pll
+ - const: dout_media_pll
+ required:
+ - clock-names
+ - clocks
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: samsung,exynos5260-clock-peri
+ then:
+ properties:
+ clocks:
+ minItems: 13
+ maxItems: 13
+ clock-names:
+ items:
+ - const: fin_pll
+ - const: ioclk_pcm_extclk
+ - const: ioclk_i2s_cdclk
+ - const: ioclk_spdif_extclk
+ - const: phyclk_hdmi_phy_ref_cko
+ - const: dout_aclk_peri_66
+ - const: dout_sclk_peri_uart0
+ - const: dout_sclk_peri_uart1
+ - const: dout_sclk_peri_uart2
+ - const: dout_sclk_peri_spi0_b
+ - const: dout_sclk_peri_spi1_b
+ - const: dout_sclk_peri_spi2_b
+ - const: dout_aclk_peri_aud
+ required:
+ - clock-names
+ - clocks
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: samsung,exynos5260-clock-egl
+ then:
+ properties:
+ clocks:
+ minItems: 2
+ maxItems: 2
+ clock-names:
+ items:
+ - const: fin_pll
+ - const: dout_bus_pll
+ required:
+ - clock-names
+ - clocks
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: samsung,exynos5260-clock-kfc
+ then:
+ properties:
+ clocks:
+ minItems: 2
+ maxItems: 2
+ clock-names:
+ items:
+ - const: fin_pll
+ - const: dout_media_pll
+ required:
+ - clock-names
+ - clocks
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: samsung,exynos5260-clock-g2d
+ then:
+ properties:
+ clocks:
+ minItems: 2
+ maxItems: 2
+ clock-names:
+ items:
+ - const: fin_pll
+ - const: dout_aclk_g2d_333
+ required:
+ - clock-names
+ - clocks
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: samsung,exynos5260-clock-mif
+ then:
+ properties:
+ clocks:
+ minItems: 1
+ maxItems: 1
+ clock-names:
+ items:
+ - const: fin_pll
+ required:
+ - clock-names
+ - clocks
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: samsung,exynos5260-clock-mfc
+ then:
+ properties:
+ clocks:
+ minItems: 2
+ maxItems: 2
+ clock-names:
+ items:
+ - const: fin_pll
+ - const: dout_aclk_mfc_333
+ required:
+ - clock-names
+ - clocks
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: samsung,exynos5260-clock-g3d
+ then:
+ properties:
+ clocks:
+ minItems: 1
+ maxItems: 1
+ clock-names:
+ items:
+ - const: fin_pll
+ required:
+ - clock-names
+ - clocks
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: samsung,exynos5260-clock-fsys
+ then:
+ properties:
+ clocks:
+ minItems: 7
+ maxItems: 7
+ clock-names:
+ items:
+ - const: fin_pll
+ - const: phyclk_usbhost20_phy_phyclock
+ - const: phyclk_usbhost20_phy_freeclk
+ - const: phyclk_usbhost20_phy_clk48mohci
+ - const: phyclk_usbdrd30_udrd30_pipe_pclk
+ - const: phyclk_usbdrd30_udrd30_phyclock
+ - const: dout_aclk_fsys_200
+ required:
+ - clock-names
+ - clocks
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: samsung,exynos5260-clock-aud
+ then:
+ properties:
+ clocks:
+ minItems: 4
+ maxItems: 4
+ clock-names:
+ items:
+ - const: fin_pll
+ - const: fout_aud_pll
+ - const: ioclk_i2s_cdclk
+ - const: ioclk_pcm_extclk
+ required:
+ - clock-names
+ - clocks
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: samsung,exynos5260-clock-isp
+ then:
+ properties:
+ clocks:
+ minItems: 4
+ maxItems: 4
+ clock-names:
+ items:
+ - const: fin_pll
+ - const: dout_aclk_isp1_266
+ - const: dout_aclk_isp1_400
+ - const: mout_aclk_isp1_266
+
+ required:
+ - clock-names
+ - clocks
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: samsung,exynos5260-clock-gscl
+ then:
+ properties:
+ clocks:
+ minItems: 3
+ maxItems: 3
+ clock-names:
+ items:
+ - const: fin_pll
+ - const: dout_aclk_gscl_400
+ - const: dout_aclk_gscl_333
+ required:
+ - clock-names
+ - clocks
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: samsung,exynos5260-clock-disp
+ then:
+ properties:
+ clocks:
+ minItems: 19
+ maxItems: 19
+ clock-names:
+ items:
+ - const: fin_pll
+ - const: phyclk_dptx_phy_ch3_txd_clk
+ - const: phyclk_dptx_phy_ch2_txd_clk
+ - const: phyclk_dptx_phy_ch1_txd_clk
+ - const: phyclk_dptx_phy_ch0_txd_clk
+ - const: phyclk_hdmi_phy_tmds_clko
+ - const: phyclk_hdmi_phy_ref_clko
+ - const: phyclk_hdmi_phy_pixel_clko
+ - const: phyclk_hdmi_link_o_tmds_clkhi
+ - const: phyclk_mipi_dphy_4l_m_txbyte_clkhs
+ - const: phyclk_dptx_phy_o_ref_clk_24m
+ - const: phyclk_dptx_phy_clk_div2
+ - const: phyclk_mipi_dphy_4l_m_rxclkesc0
+ - const: phyclk_hdmi_phy_ref_cko
+ - const: ioclk_spdif_extclk
+ - const: dout_aclk_peri_aud
+ - const: dout_aclk_disp_222
+ - const: dout_sclk_disp_pixel
+ - const: dout_aclk_disp_333
+ required:
+ - clock-names
+ - clocks
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/exynos5260-clk.h>
+
+ fin_pll: clock {
+ compatible = "fixed-clock";
+ clock-output-names = "fin_pll";
+ #clock-cells = <0>;
+ clock-frequency = <24000000>;
+ };
+
+ clock-controller@10010000 {
+ compatible = "samsung,exynos5260-clock-top";
+ reg = <0x10010000 0x10000>;
+ #clock-cells = <1>;
+ clocks = <&fin_pll>,
+ <&clock_mif MIF_DOUT_MEM_PLL>,
+ <&clock_mif MIF_DOUT_BUS_PLL>,
+ <&clock_mif MIF_DOUT_MEDIA_PLL>;
+ clock-names = "fin_pll",
+ "dout_mem_pll",
+ "dout_bus_pll",
+ "dout_media_pll";
+ };
diff --git a/Documentation/devicetree/bindings/clock/samsung,exynos5410-clock.yaml b/Documentation/devicetree/bindings/clock/samsung,exynos5410-clock.yaml
new file mode 100644
index 000000000000..032862e9f55b
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/samsung,exynos5410-clock.yaml
@@ -0,0 +1,66 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/samsung,exynos5410-clock.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Samsung Exynos5410 SoC clock controller
+
+maintainers:
+ - Chanwoo Choi <cw00.choi@samsung.com>
+ - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Sylwester Nawrocki <s.nawrocki@samsung.com>
+ - Tomasz Figa <tomasz.figa@gmail.com>
+
+description: |
+ Expected external clocks, defined in DTS as fixed-rate clocks with a matching
+ name::
+ - "fin_pll" - PLL input clock from XXTI
+
+ All available clocks are defined as preprocessor macros in
+ include/dt-bindings/clock/exynos5410.h header.
+
+properties:
+ compatible:
+ oneOf:
+ - enum:
+ - samsung,exynos5410-clock
+
+ clocks:
+ description:
+ Should contain an entry specifying the root clock from external
+ oscillator supplied through XXTI or XusbXTI pin. This clock should be
+ defined using standard clock bindings with "fin_pll" clock-output-name.
+ That clock is being passed internally to the 9 PLLs.
+ maxItems: 1
+
+ "#clock-cells":
+ const: 1
+
+ reg:
+ maxItems: 1
+
+required:
+ - compatible
+ - "#clock-cells"
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/exynos5410.h>
+
+ fin_pll: osc-clock {
+ compatible = "fixed-clock";
+ clock-frequency = <24000000>;
+ clock-output-names = "fin_pll";
+ #clock-cells = <0>;
+ };
+
+ clock-controller@10010000 {
+ compatible = "samsung,exynos5410-clock";
+ reg = <0x10010000 0x30000>;
+ #clock-cells = <1>;
+ clocks = <&fin_pll>;
+ };
diff --git a/Documentation/devicetree/bindings/clock/samsung,exynos5433-clock.yaml b/Documentation/devicetree/bindings/clock/samsung,exynos5433-clock.yaml
new file mode 100644
index 000000000000..edd1b4ac4334
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/samsung,exynos5433-clock.yaml
@@ -0,0 +1,524 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/samsung,exynos5433-clock.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Samsung Exynos5433 SoC clock controller
+
+maintainers:
+ - Chanwoo Choi <cw00.choi@samsung.com>
+ - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Sylwester Nawrocki <s.nawrocki@samsung.com>
+ - Tomasz Figa <tomasz.figa@gmail.com>
+
+description: |
+ Expected external clocks, defined in DTS as fixed-rate clocks with a matching
+ name::
+ - "oscclk" - PLL input clock from XXTI
+
+ All available clocks are defined as preprocessor macros in
+ include/dt-bindings/clock/exynos5433.h header.
+
+properties:
+ compatible:
+ enum:
+ # CMU_TOP which generates clocks for
+ # IMEM/FSYS/G3D/GSCL/HEVC/MSCL/G2D/MFC/PERIC/PERIS domains and bus
+ # clocks
+ - samsung,exynos5433-cmu-top
+ # CMU_CPIF which generates clocks for LLI (Low Latency Interface) IP
+ - samsung,exynos5433-cmu-cpif
+ # CMU_MIF which generates clocks for DRAM Memory Controller domain
+ - samsung,exynos5433-cmu-mif
+ # CMU_PERIC which generates clocks for
+ # UART/I2C/SPI/I2S/PCM/SPDIF/PWM/SLIMBUS IPs
+ - samsung,exynos5433-cmu-peric
+ # CMU_PERIS which generates clocks for PMU/TMU/MCT/WDT/RTC/SECKEY/TZPC IPs
+ - samsung,exynos5433-cmu-peris
+ # CMU_FSYS which generates clocks for USB/UFS/SDMMC/TSI/PDMA IPs
+ - samsung,exynos5433-cmu-fsys
+ - samsung,exynos5433-cmu-g2d
+ # CMU_DISP which generates clocks for Display (DECON/HDMI/DSIM/MIXER) IPs
+ - samsung,exynos5433-cmu-disp
+ - samsung,exynos5433-cmu-aud
+ - samsung,exynos5433-cmu-bus0
+ - samsung,exynos5433-cmu-bus1
+ - samsung,exynos5433-cmu-bus2
+ - samsung,exynos5433-cmu-g3d
+ - samsung,exynos5433-cmu-gscl
+ - samsung,exynos5433-cmu-apollo
+ # CMU_ATLAS which generates clocks for Cortex-A57 Quad-core processor,
+ # CoreSight and L2 cache controller
+ - samsung,exynos5433-cmu-atlas
+ # CMU_MSCL which generates clocks for M2M (Memory to Memory) scaler and
+ # JPEG IPs
+ - samsung,exynos5433-cmu-mscl
+ - samsung,exynos5433-cmu-mfc
+ - samsung,exynos5433-cmu-hevc
+ # CMU_ISP which generates clocks for FIMC-ISP/DRC/SCLC/DIS/3DNR IPs
+ - samsung,exynos5433-cmu-isp
+ # CMU_CAM0 which generates clocks for
+ # MIPI_CSIS{0|1}/FIMC_LITE_{A|B|D}/FIMC_3AA{0|1} IPs
+ - samsung,exynos5433-cmu-cam0
+ # CMU_CAM1 which generates clocks for
+ # Cortex-A5/MIPI_CSIS2/FIMC-LITE_C/FIMC-FD IPs
+ - samsung,exynos5433-cmu-cam1
+ # CMU_IMEM which generates clocks for SSS (Security SubSystem) and
+ # SlimSSS IPs
+ - samsung,exynos5433-cmu-imem
+
+ clocks:
+ minItems: 1
+ maxItems: 10
+
+ clock-names:
+ minItems: 1
+ maxItems: 10
+
+ "#clock-cells":
+ const: 1
+
+ power-domains:
+ maxItems: 1
+
+ reg:
+ maxItems: 1
+
+required:
+ - compatible
+ - "#clock-cells"
+ - reg
+
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: samsung,exynos5433-cmu-top
+ then:
+ properties:
+ clocks:
+ minItems: 4
+ maxItems: 4
+ clock-names:
+ items:
+ - const: oscclk
+ - const: sclk_mphy_pll
+ - const: sclk_mfc_pll
+ - const: sclk_bus_pll
+ required:
+ - clock-names
+ - clocks
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: samsung,exynos5433-cmu-cpif
+ then:
+ properties:
+ clocks:
+ minItems: 1
+ maxItems: 1
+ clock-names:
+ items:
+ - const: oscclk
+ required:
+ - clock-names
+ - clocks
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: samsung,exynos5433-cmu-mif
+ then:
+ properties:
+ clocks:
+ minItems: 2
+ maxItems: 2
+ clock-names:
+ items:
+ - const: oscclk
+ - const: sclk_mphy_pll
+ required:
+ - clock-names
+ - clocks
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: samsung,exynos5433-cmu-fsys
+ then:
+ properties:
+ clocks:
+ minItems: 10
+ maxItems: 10
+ clock-names:
+ items:
+ - const: oscclk
+ - const: sclk_ufs_mphy
+ - const: aclk_fsys_200
+ - const: sclk_pcie_100_fsys
+ - const: sclk_ufsunipro_fsys
+ - const: sclk_mmc2_fsys
+ - const: sclk_mmc1_fsys
+ - const: sclk_mmc0_fsys
+ - const: sclk_usbhost30_fsys
+ - const: sclk_usbdrd30_fsys
+ required:
+ - clock-names
+ - clocks
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: samsung,exynos5433-cmu-g2d
+ then:
+ properties:
+ clocks:
+ minItems: 3
+ maxItems: 3
+ clock-names:
+ items:
+ - const: oscclk
+ - const: aclk_g2d_266
+ - const: aclk_g2d_400
+ required:
+ - clock-names
+ - clocks
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: samsung,exynos5433-cmu-disp
+ then:
+ properties:
+ clocks:
+ minItems: 9
+ maxItems: 9
+ clock-names:
+ items:
+ - const: oscclk
+ - const: sclk_dsim1_disp
+ - const: sclk_dsim0_disp
+ - const: sclk_dsd_disp
+ - const: sclk_decon_tv_eclk_disp
+ - const: sclk_decon_vclk_disp
+ - const: sclk_decon_eclk_disp
+ - const: sclk_decon_tv_vclk_disp
+ - const: aclk_disp_333
+ required:
+ - clock-names
+ - clocks
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: samsung,exynos5433-cmu-aud
+ then:
+ properties:
+ clocks:
+ minItems: 2
+ maxItems: 2
+ clock-names:
+ items:
+ - const: oscclk
+ - const: fout_aud_pll
+ required:
+ - clock-names
+ - clocks
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: samsung,exynos5433-cmu-bus0
+ then:
+ properties:
+ clocks:
+ minItems: 1
+ maxItems: 1
+ clock-names:
+ items:
+ - const: aclk_bus0_400
+ required:
+ - clock-names
+ - clocks
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: samsung,exynos5433-cmu-bus1
+ then:
+ properties:
+ clocks:
+ minItems: 1
+ maxItems: 1
+ clock-names:
+ items:
+ - const: aclk_bus1_400
+ required:
+ - clock-names
+ - clocks
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: samsung,exynos5433-cmu-bus2
+ then:
+ properties:
+ clocks:
+ minItems: 2
+ maxItems: 2
+ clock-names:
+ items:
+ - const: oscclk
+ - const: aclk_bus2_400
+ required:
+ - clock-names
+ - clocks
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: samsung,exynos5433-cmu-g3d
+ then:
+ properties:
+ clocks:
+ minItems: 2
+ maxItems: 2
+ clock-names:
+ items:
+ - const: oscclk
+ - const: aclk_g3d_400
+ required:
+ - clock-names
+ - clocks
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: samsung,exynos5433-cmu-gscl
+ then:
+ properties:
+ clocks:
+ minItems: 3
+ maxItems: 3
+ clock-names:
+ items:
+ - const: oscclk
+ - const: aclk_gscl_111
+ - const: aclk_gscl_333
+ required:
+ - clock-names
+ - clocks
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: samsung,exynos5433-cmu-apollo
+ then:
+ properties:
+ clocks:
+ minItems: 2
+ maxItems: 2
+ clock-names:
+ items:
+ - const: oscclk
+ - const: sclk_bus_pll_apollo
+ required:
+ - clock-names
+ - clocks
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: samsung,exynos5433-cmu-atlas
+ then:
+ properties:
+ clocks:
+ minItems: 2
+ maxItems: 2
+ clock-names:
+ items:
+ - const: oscclk
+ - const: sclk_bus_pll_atlas
+ required:
+ - clock-names
+ - clocks
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: samsung,exynos5433-cmu-mscl
+ then:
+ properties:
+ clocks:
+ minItems: 3
+ maxItems: 3
+ clock-names:
+ items:
+ - const: oscclk
+ - const: sclk_jpeg_mscl
+ - const: aclk_mscl_400
+ required:
+ - clock-names
+ - clocks
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: samsung,exynos5433-cmu-mfc
+ then:
+ properties:
+ clocks:
+ minItems: 2
+ maxItems: 2
+ clock-names:
+ items:
+ - const: oscclk
+ - const: aclk_mfc_400
+ required:
+ - clock-names
+ - clocks
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: samsung,exynos5433-cmu-hevc
+ then:
+ properties:
+ clocks:
+ minItems: 2
+ maxItems: 2
+ clock-names:
+ items:
+ - const: oscclk
+ - const: aclk_hevc_400
+ required:
+ - clock-names
+ - clocks
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: samsung,exynos5433-cmu-isp
+ then:
+ properties:
+ clocks:
+ minItems: 3
+ maxItems: 3
+ clock-names:
+ items:
+ - const: oscclk
+ - const: aclk_isp_dis_400
+ - const: aclk_isp_400
+ required:
+ - clock-names
+ - clocks
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: samsung,exynos5433-cmu-cam0
+ then:
+ properties:
+ clocks:
+ minItems: 4
+ maxItems: 4
+ clock-names:
+ items:
+ - const: oscclk
+ - const: aclk_cam0_333
+ - const: aclk_cam0_400
+ - const: aclk_cam0_552
+ required:
+ - clock-names
+ - clocks
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: samsung,exynos5433-cmu-cam1
+ then:
+ properties:
+ clocks:
+ minItems: 7
+ maxItems: 7
+ clock-names:
+ items:
+ - const: oscclk
+ - const: sclk_isp_uart_cam1
+ - const: sclk_isp_spi1_cam1
+ - const: sclk_isp_spi0_cam1
+ - const: aclk_cam1_333
+ - const: aclk_cam1_400
+ - const: aclk_cam1_552
+ required:
+ - clock-names
+ - clocks
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: samsung,exynos5433-cmu-imem
+ then:
+ properties:
+ clocks:
+ minItems: 4
+ maxItems: 4
+ clock-names:
+ items:
+ - const: oscclk
+ - const: aclk_imem_sssx_266
+ - const: aclk_imem_266
+ - const: aclk_imem_200
+ required:
+ - clock-names
+ - clocks
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/exynos5433.h>
+ xxti: clock {
+ compatible = "fixed-clock";
+ clock-output-names = "oscclk";
+ #clock-cells = <0>;
+ clock-frequency = <24000000>;
+ };
+
+ clock-controller@10030000 {
+ compatible = "samsung,exynos5433-cmu-top";
+ reg = <0x10030000 0x1000>;
+ #clock-cells = <1>;
+
+ clock-names = "oscclk",
+ "sclk_mphy_pll",
+ "sclk_mfc_pll",
+ "sclk_bus_pll";
+ clocks = <&xxti>,
+ <&cmu_cpif CLK_SCLK_MPHY_PLL>,
+ <&cmu_mif CLK_SCLK_MFC_PLL>,
+ <&cmu_mif CLK_SCLK_BUS_PLL>;
+ };
diff --git a/Documentation/devicetree/bindings/clock/samsung,exynos7-clock.yaml b/Documentation/devicetree/bindings/clock/samsung,exynos7-clock.yaml
new file mode 100644
index 000000000000..599baf0b7231
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/samsung,exynos7-clock.yaml
@@ -0,0 +1,272 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/samsung,exynos7-clock.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Samsung Exynos7 SoC clock controller
+
+maintainers:
+ - Chanwoo Choi <cw00.choi@samsung.com>
+ - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Sylwester Nawrocki <s.nawrocki@samsung.com>
+ - Tomasz Figa <tomasz.figa@gmail.com>
+
+description: |
+ Expected external clocks, defined in DTS as fixed-rate clocks with a matching
+ name::
+ - "fin_pll" - PLL input clock from XXTI
+
+ All available clocks are defined as preprocessor macros in
+ include/dt-bindings/clock/exynos7-clk.h header.
+
+properties:
+ compatible:
+ enum:
+ - samsung,exynos7-clock-topc
+ - samsung,exynos7-clock-top0
+ - samsung,exynos7-clock-top1
+ - samsung,exynos7-clock-ccore
+ - samsung,exynos7-clock-peric0
+ - samsung,exynos7-clock-peric1
+ - samsung,exynos7-clock-peris
+ - samsung,exynos7-clock-fsys0
+ - samsung,exynos7-clock-fsys1
+ - samsung,exynos7-clock-mscl
+ - samsung,exynos7-clock-aud
+
+ clocks:
+ minItems: 1
+ maxItems: 13
+
+ clock-names:
+ minItems: 1
+ maxItems: 13
+
+ "#clock-cells":
+ const: 1
+
+ reg:
+ maxItems: 1
+
+required:
+ - compatible
+ - "#clock-cells"
+ - reg
+
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: samsung,exynos7-clock-top0
+ then:
+ properties:
+ clocks:
+ minItems: 6
+ maxItems: 6
+ clock-names:
+ items:
+ - const: fin_pll
+ - const: dout_sclk_bus0_pll
+ - const: dout_sclk_bus1_pll
+ - const: dout_sclk_cc_pll
+ - const: dout_sclk_mfc_pll
+ - const: dout_sclk_aud_pll
+ required:
+ - clock-names
+ - clocks
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: samsung,exynos7-clock-top1
+ then:
+ properties:
+ clocks:
+ minItems: 5
+ maxItems: 5
+ clock-names:
+ items:
+ - const: fin_pll
+ - const: dout_sclk_bus0_pll
+ - const: dout_sclk_bus1_pll
+ - const: dout_sclk_cc_pll
+ - const: dout_sclk_mfc_pll
+ required:
+ - clock-names
+ - clocks
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: samsung,exynos7-clock-ccore
+ then:
+ properties:
+ clocks:
+ minItems: 2
+ maxItems: 2
+ clock-names:
+ items:
+ - const: fin_pll
+ - const: dout_aclk_ccore_133
+ required:
+ - clock-names
+ - clocks
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: samsung,exynos7-clock-peric0
+ then:
+ properties:
+ clocks:
+ minItems: 3
+ maxItems: 3
+ clock-names:
+ items:
+ - const: fin_pll
+ - const: dout_aclk_peric0_66
+ - const: sclk_uart0
+ required:
+ - clock-names
+ - clocks
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: samsung,exynos7-clock-peric1
+ then:
+ properties:
+ clocks:
+ minItems: 13
+ maxItems: 13
+ clock-names:
+ items:
+ - const: fin_pll
+ - const: dout_aclk_peric1_66
+ - const: sclk_uart1
+ - const: sclk_uart2
+ - const: sclk_uart3
+ - const: sclk_spi0
+ - const: sclk_spi1
+ - const: sclk_spi2
+ - const: sclk_spi3
+ - const: sclk_spi4
+ - const: sclk_i2s1
+ - const: sclk_pcm1
+ - const: sclk_spdif
+ required:
+ - clock-names
+ - clocks
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: samsung,exynos7-clock-peris
+ then:
+ properties:
+ clocks:
+ minItems: 2
+ maxItems: 2
+ clock-names:
+ items:
+ - const: fin_pll
+ - const: dout_aclk_peris_66
+ required:
+ - clock-names
+ - clocks
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: samsung,exynos7-clock-fsys0
+ then:
+ properties:
+ clocks:
+ minItems: 3
+ maxItems: 3
+ clock-names:
+ items:
+ - const: fin_pll
+ - const: dout_aclk_fsys0_200
+ - const: dout_sclk_mmc2
+ required:
+ - clock-names
+ - clocks
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: samsung,exynos7-clock-fsys1
+ then:
+ properties:
+ clocks:
+ minItems: 7
+ maxItems: 7
+ clock-names:
+ items:
+ - const: fin_pll
+ - const: dout_aclk_fsys1_200
+ - const: dout_sclk_mmc0
+ - const: dout_sclk_mmc1
+ - const: dout_sclk_ufsunipro20
+ - const: dout_sclk_phy_fsys1
+ - const: dout_sclk_phy_fsys1_26m
+ required:
+ - clock-names
+ - clocks
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: samsung,exynos7-clock-aud
+ then:
+ properties:
+ clocks:
+ minItems: 2
+ maxItems: 2
+ clock-names:
+ items:
+ - const: fin_pll
+ - const: fout_aud_pll
+ required:
+ - clock-names
+ - clocks
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/exynos7-clk.h>
+
+ fin_pll: clock {
+ compatible = "fixed-clock";
+ clock-output-names = "fin_pll";
+ #clock-cells = <0>;
+ clock-frequency = <24000000>;
+ };
+
+ clock-controller@105e0000 {
+ compatible = "samsung,exynos7-clock-top1";
+ reg = <0x105e0000 0xb000>;
+ #clock-cells = <1>;
+ clocks = <&fin_pll>,
+ <&clock_topc DOUT_SCLK_BUS0_PLL>,
+ <&clock_topc DOUT_SCLK_BUS1_PLL>,
+ <&clock_topc DOUT_SCLK_CC_PLL>,
+ <&clock_topc DOUT_SCLK_MFC_PLL>;
+ clock-names = "fin_pll",
+ "dout_sclk_bus0_pll",
+ "dout_sclk_bus1_pll",
+ "dout_sclk_cc_pll",
+ "dout_sclk_mfc_pll";
+ };
diff --git a/Documentation/devicetree/bindings/clock/samsung,exynos7885-clock.yaml b/Documentation/devicetree/bindings/clock/samsung,exynos7885-clock.yaml
new file mode 100644
index 000000000000..7e5a9cac2fd2
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/samsung,exynos7885-clock.yaml
@@ -0,0 +1,166 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/samsung,exynos7885-clock.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Samsung Exynos7885 SoC clock controller
+
+maintainers:
+ - Dávid Virág <virag.david003@gmail.com>
+ - Chanwoo Choi <cw00.choi@samsung.com>
+ - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Sylwester Nawrocki <s.nawrocki@samsung.com>
+ - Tomasz Figa <tomasz.figa@gmail.com>
+
+description: |
+ Exynos7885 clock controller is comprised of several CMU units, generating
+ clocks for different domains. Those CMU units are modeled as separate device
+ tree nodes, and might depend on each other. The root clock in that root tree
+ is an external clock: OSCCLK (26 MHz). This external clock must be defined
+ as a fixed-rate clock in dts.
+
+ CMU_TOP is a top-level CMU, where all base clocks are prepared using PLLs and
+ dividers; all other leaf clocks (other CMUs) are usually derived from CMU_TOP.
+
+ Each clock is assigned an identifier and client nodes can use this identifier
+ to specify the clock which they consume. All clocks available for usage
+ in clock consumer nodes are defined as preprocessor macros in
+ 'dt-bindings/clock/exynos7885.h' header.
+
+properties:
+ compatible:
+ enum:
+ - samsung,exynos7885-cmu-top
+ - samsung,exynos7885-cmu-core
+ - samsung,exynos7885-cmu-peri
+
+ clocks:
+ minItems: 1
+ maxItems: 10
+
+ clock-names:
+ minItems: 1
+ maxItems: 10
+
+ "#clock-cells":
+ const: 1
+
+ reg:
+ maxItems: 1
+
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: samsung,exynos7885-cmu-top
+
+ then:
+ properties:
+ clocks:
+ items:
+ - description: External reference clock (26 MHz)
+
+ clock-names:
+ items:
+ - const: oscclk
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: samsung,exynos7885-cmu-core
+
+ then:
+ properties:
+ clocks:
+ items:
+ - description: External reference clock (26 MHz)
+ - description: CMU_CORE bus clock (from CMU_TOP)
+ - description: CCI clock (from CMU_TOP)
+ - description: G3D clock (from CMU_TOP)
+
+ clock-names:
+ items:
+ - const: oscclk
+ - const: dout_core_bus
+ - const: dout_core_cci
+ - const: dout_core_g3d
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: samsung,exynos7885-cmu-peri
+
+ then:
+ properties:
+ clocks:
+ items:
+ - description: External reference clock (26 MHz)
+ - description: CMU_PERI bus clock (from CMU_TOP)
+ - description: SPI0 clock (from CMU_TOP)
+ - description: SPI1 clock (from CMU_TOP)
+ - description: UART0 clock (from CMU_TOP)
+ - description: UART1 clock (from CMU_TOP)
+ - description: UART2 clock (from CMU_TOP)
+ - description: USI0 clock (from CMU_TOP)
+ - description: USI1 clock (from CMU_TOP)
+ - description: USI2 clock (from CMU_TOP)
+
+ clock-names:
+ items:
+ - const: oscclk
+ - const: dout_peri_bus
+ - const: dout_peri_spi0
+ - const: dout_peri_spi1
+ - const: dout_peri_uart0
+ - const: dout_peri_uart1
+ - const: dout_peri_uart2
+ - const: dout_peri_usi0
+ - const: dout_peri_usi1
+ - const: dout_peri_usi2
+
+required:
+ - compatible
+ - "#clock-cells"
+ - clocks
+ - clock-names
+ - reg
+
+additionalProperties: false
+
+examples:
+ # Clock controller node for CMU_PERI
+ - |
+ #include <dt-bindings/clock/exynos7885.h>
+
+ cmu_peri: clock-controller@10010000 {
+ compatible = "samsung,exynos7885-cmu-peri";
+ reg = <0x10010000 0x8000>;
+ #clock-cells = <1>;
+
+ clocks = <&oscclk>,
+ <&cmu_top CLK_DOUT_PERI_BUS>,
+ <&cmu_top CLK_DOUT_PERI_SPI0>,
+ <&cmu_top CLK_DOUT_PERI_SPI1>,
+ <&cmu_top CLK_DOUT_PERI_UART0>,
+ <&cmu_top CLK_DOUT_PERI_UART1>,
+ <&cmu_top CLK_DOUT_PERI_UART2>,
+ <&cmu_top CLK_DOUT_PERI_USI0>,
+ <&cmu_top CLK_DOUT_PERI_USI1>,
+ <&cmu_top CLK_DOUT_PERI_USI2>;
+ clock-names = "oscclk",
+ "dout_peri_bus",
+ "dout_peri_spi0",
+ "dout_peri_spi1",
+ "dout_peri_uart0",
+ "dout_peri_uart1",
+ "dout_peri_uart2",
+ "dout_peri_usi0",
+ "dout_peri_usi1",
+ "dout_peri_usi2";
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/clock/samsung,exynos850-clock.yaml b/Documentation/devicetree/bindings/clock/samsung,exynos850-clock.yaml
index 7f8c91a29b91..80ba60838f2b 100644
--- a/Documentation/devicetree/bindings/clock/samsung,exynos850-clock.yaml
+++ b/Documentation/devicetree/bindings/clock/samsung,exynos850-clock.yaml
@@ -32,6 +32,8 @@ properties:
compatible:
enum:
- samsung,exynos850-cmu-top
+ - samsung,exynos850-cmu-apm
+ - samsung,exynos850-cmu-cmgp
- samsung,exynos850-cmu-core
- samsung,exynos850-cmu-dpu
- samsung,exynos850-cmu-hsi
@@ -72,6 +74,42 @@ allOf:
properties:
compatible:
contains:
+ const: samsung,exynos850-cmu-apm
+
+ then:
+ properties:
+ clocks:
+ items:
+ - description: External reference clock (26 MHz)
+ - description: CMU_APM bus clock (from CMU_TOP)
+
+ clock-names:
+ items:
+ - const: oscclk
+ - const: dout_clkcmu_apm_bus
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: samsung,exynos850-cmu-cmgp
+
+ then:
+ properties:
+ clocks:
+ items:
+ - description: External reference clock (26 MHz)
+ - description: CMU_CMGP bus clock (from CMU_APM)
+
+ clock-names:
+ items:
+ - const: oscclk
+ - const: gout_clkcmu_cmgp_bus
+
+ - if:
+ properties:
+ compatible:
+ contains:
const: samsung,exynos850-cmu-core
then:
diff --git a/Documentation/devicetree/bindings/clock/samsung,s5pv210-clock.txt b/Documentation/devicetree/bindings/clock/samsung,s5pv210-clock.txt
deleted file mode 100644
index a86c83bf9d4e..000000000000
--- a/Documentation/devicetree/bindings/clock/samsung,s5pv210-clock.txt
+++ /dev/null
@@ -1,77 +0,0 @@
-* Samsung S5P6442/S5PC110/S5PV210 Clock Controller
-
-Samsung S5P6442, S5PC110 and S5PV210 SoCs contain integrated clock
-controller, which generates and supplies clock to various controllers
-within the SoC.
-
-Required Properties:
-
-- compatible: should be one of following:
- - "samsung,s5pv210-clock" : for clock controller of Samsung
- S5PC110/S5PV210 SoCs,
- - "samsung,s5p6442-clock" : for clock controller of Samsung
- S5P6442 SoC.
-
-- reg: physical base address of the controller and length of memory mapped
- region.
-
-- #clock-cells: should be 1.
-
-All available clocks are defined as preprocessor macros in
-dt-bindings/clock/s5pv210.h header and can be used in device tree sources.
-
-External clocks:
-
-There are several clocks that are generated outside the SoC. It is expected
-that they are defined using standard clock bindings with following
-clock-output-names:
- - "xxti": external crystal oscillator connected to XXTI and XXTO pins of
-the SoC,
- - "xusbxti": external crystal oscillator connected to XUSBXTI and XUSBXTO
-pins of the SoC,
-
-A subset of above clocks available on given board shall be specified in
-board device tree, including the system base clock, as selected by XOM[0]
-pin of the SoC. Refer to generic fixed rate clock bindings
-documentation[1] for more information how to specify these clocks.
-
-[1] Documentation/devicetree/bindings/clock/fixed-clock.yaml
-
-Example: Clock controller node:
-
- clock: clock-controller@7e00f000 {
- compatible = "samsung,s5pv210-clock";
- reg = <0x7e00f000 0x1000>;
- #clock-cells = <1>;
- };
-
-Example: Required external clocks:
-
- xxti: clock-xxti {
- compatible = "fixed-clock";
- clock-output-names = "xxti";
- clock-frequency = <24000000>;
- #clock-cells = <0>;
- };
-
- xusbxti: clock-xusbxti {
- compatible = "fixed-clock";
- clock-output-names = "xusbxti";
- clock-frequency = <24000000>;
- #clock-cells = <0>;
- };
-
-Example: UART controller node that consumes the clock generated by the clock
- controller (refer to the standard clock bindings for information about
- "clocks" and "clock-names" properties):
-
- uart0: serial@e2900000 {
- compatible = "samsung,s5pv210-uart";
- reg = <0xe2900000 0x400>;
- interrupt-parent = <&vic1>;
- interrupts = <10>;
- clock-names = "uart", "clk_uart_baud0",
- "clk_uart_baud1";
- clocks = <&clocks UART0>, <&clocks UART0>,
- <&clocks SCLK_UART0>;
- };
diff --git a/Documentation/devicetree/bindings/clock/samsung,s5pv210-clock.yaml b/Documentation/devicetree/bindings/clock/samsung,s5pv210-clock.yaml
new file mode 100644
index 000000000000..dcb29a2d1159
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/samsung,s5pv210-clock.yaml
@@ -0,0 +1,79 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/samsung,s5pv210-clock.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Samsung S5P6442/S5PC110/S5PV210 SoC clock controller
+
+maintainers:
+ - Chanwoo Choi <cw00.choi@samsung.com>
+ - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+ - Sylwester Nawrocki <s.nawrocki@samsung.com>
+ - Tomasz Figa <tomasz.figa@gmail.com>
+
+description: |
+ Expected external clocks, defined in DTS as fixed-rate clocks with a matching
+ name::
+ - "xxti" - external crystal oscillator connected to XXTI and XXTO pins of
+ the SoC,
+ - "xusbxti" - external crystal oscillator connected to XUSBXTI and XUSBXTO
+ pins of the SoC,
+
+ All available clocks are defined as preprocessor macros in
+ include/dt-bindings/clock/s5pv210.h header.
+
+properties:
+ compatible:
+ enum:
+ - samsung,s5pv210-clock
+ - samsung,s5p6442-clock
+
+ clocks:
+ items:
+ - description: xxti clock
+ - description: xusbxti clock
+
+ clock-names:
+ items:
+ - const: xxti
+ - const: xusbxti
+
+ "#clock-cells":
+ const: 1
+
+ reg:
+ maxItems: 1
+
+required:
+ - compatible
+ - "#clock-cells"
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/s5pv210.h>
+
+ xxti: clock-0 {
+ compatible = "fixed-clock";
+ clock-frequency = <0>;
+ clock-output-names = "xxti";
+ #clock-cells = <0>;
+ };
+
+ xusbxti: clock-1 {
+ compatible = "fixed-clock";
+ clock-frequency = <0>;
+ clock-output-names = "xusbxti";
+ #clock-cells = <0>;
+ };
+
+ clock-controller@e0100000 {
+ compatible = "samsung,s5pv210-clock";
+ reg = <0xe0100000 0x10000>;
+ clock-names = "xxti", "xusbxti";
+ clocks = <&xxti>, <&xusbxti>;
+ #clock-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/clock/toshiba,tmpv770x-pipllct.yaml b/Documentation/devicetree/bindings/clock/toshiba,tmpv770x-pipllct.yaml
new file mode 100644
index 000000000000..7b7300ce96d6
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/toshiba,tmpv770x-pipllct.yaml
@@ -0,0 +1,57 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/toshiba,tmpv770x-pipllct.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Toshiba Visconti5 TMPV770X PLL Controller Device Tree Bindings
+
+maintainers:
+ - Nobuhiro Iwamatsu <nobuhiro1.iwamatsu@toshiba.co.jp>
+
+description:
+ Toshia Visconti5 PLL controller which supports the PLLs on TMPV770X.
+
+properties:
+ compatible:
+ const: toshiba,tmpv7708-pipllct
+
+ reg:
+ maxItems: 1
+
+ '#clock-cells':
+ const: 1
+
+ clocks:
+ description: External reference clock (OSC2)
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - "#clock-cells"
+ - clocks
+
+additionalProperties: false
+
+examples:
+ - |
+
+ osc2_clk: osc2-clk {
+ compatible = "fixed-clock";
+ clock-frequency = <20000000>;
+ #clock-cells = <0>;
+ };
+
+ soc {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ pipllct: clock-controller@24220000 {
+ compatible = "toshiba,tmpv7708-pipllct";
+ reg = <0 0x24220000 0 0x820>;
+ #clock-cells = <1>;
+ clocks = <&osc2_clk>;
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/clock/toshiba,tmpv770x-pismu.yaml b/Documentation/devicetree/bindings/clock/toshiba,tmpv770x-pismu.yaml
new file mode 100644
index 000000000000..ed79f16fe6bc
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/toshiba,tmpv770x-pismu.yaml
@@ -0,0 +1,52 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/clock/toshiba,tmpv770x-pismu.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Toshiba Visconti5 TMPV770x SMU controller Device Tree Bindings
+
+maintainers:
+ - Nobuhiro Iwamatsu <nobuhiro1.iwamatsu@toshiba.co.jp>
+
+description:
+ Toshia Visconti5 SMU (System Management Unit) which supports the clock
+ and resets on TMPV770x.
+
+properties:
+ compatible:
+ items:
+ - const: toshiba,tmpv7708-pismu
+ - const: syscon
+
+ reg:
+ maxItems: 1
+
+ '#clock-cells':
+ const: 1
+
+ '#reset-cells':
+ const: 1
+
+required:
+ - compatible
+ - reg
+ - "#clock-cells"
+ - "#reset-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ soc {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ pismu: syscon@24200000 {
+ compatible = "toshiba,tmpv7708-pismu", "syscon";
+ reg = <0 0x24200000 0 0x2140>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/crypto/qcom,prng.txt b/Documentation/devicetree/bindings/crypto/qcom,prng.txt
deleted file mode 100644
index 7ee0e9eac973..000000000000
--- a/Documentation/devicetree/bindings/crypto/qcom,prng.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-Qualcomm MSM pseudo random number generator.
-
-Required properties:
-
-- compatible : should be "qcom,prng" for 8916 etc
- : should be "qcom,prng-ee" for 8996 and later using EE
- (Execution Environment) slice of prng
-- reg : specifies base physical address and size of the registers map
-- clocks : phandle to clock-controller plus clock-specifier pair
-- clock-names : "core" clocks all registers, FIFO and circuits in PRNG IP block
-
-Example:
-
- rng@f9bff000 {
- compatible = "qcom,prng";
- reg = <0xf9bff000 0x200>;
- clocks = <&clock GCC_PRNG_AHB_CLK>;
- clock-names = "core";
- };
diff --git a/Documentation/devicetree/bindings/crypto/qcom,prng.yaml b/Documentation/devicetree/bindings/crypto/qcom,prng.yaml
new file mode 100644
index 000000000000..bb42f4588b40
--- /dev/null
+++ b/Documentation/devicetree/bindings/crypto/qcom,prng.yaml
@@ -0,0 +1,43 @@
+# SPDX-License-Identifier: GPL-2.0-only
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/crypto/qcom,prng.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Pseudo Random Number Generator
+
+maintainers:
+ - Vinod Koul <vkoul@kernel.org>
+
+properties:
+ compatible:
+ enum:
+ - qcom,prng # 8916 etc.
+ - qcom,prng-ee # 8996 and later using EE
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ items:
+ - const: core
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+
+additionalProperties: false
+
+examples:
+ - |
+ rng@f9bff000 {
+ compatible = "qcom,prng";
+ reg = <0xf9bff000 0x200>;
+ clocks = <&clk 125>;
+ clock-names = "core";
+ };
diff --git a/Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.yaml b/Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.yaml
index cf5a208f2f10..343598c9f473 100644
--- a/Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.yaml
+++ b/Documentation/devicetree/bindings/display/amlogic,meson-dw-hdmi.yaml
@@ -10,6 +10,9 @@ title: Amlogic specific extensions to the Synopsys Designware HDMI Controller
maintainers:
- Neil Armstrong <narmstrong@baylibre.com>
+allOf:
+ - $ref: /schemas/sound/name-prefix.yaml#
+
description: |
The Amlogic Meson Synopsys Designware Integration is composed of
- A Synopsys DesignWare HDMI Controller IP
@@ -99,6 +102,8 @@ properties:
"#sound-dai-cells":
const: 0
+ sound-name-prefix: true
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/display/amlogic,meson-vpu.yaml b/Documentation/devicetree/bindings/display/amlogic,meson-vpu.yaml
index 851cb0781217..047fd69e0377 100644
--- a/Documentation/devicetree/bindings/display/amlogic,meson-vpu.yaml
+++ b/Documentation/devicetree/bindings/display/amlogic,meson-vpu.yaml
@@ -78,6 +78,10 @@ properties:
interrupts:
maxItems: 1
+ amlogic,canvas:
+ description: should point to a canvas provider node
+ $ref: /schemas/types.yaml#/definitions/phandle
+
power-domains:
maxItems: 1
description: phandle to the associated power domain
@@ -106,6 +110,7 @@ required:
- port@1
- "#address-cells"
- "#size-cells"
+ - amlogic,canvas
additionalProperties: false
@@ -118,6 +123,7 @@ examples:
interrupts = <3>;
#address-cells = <1>;
#size-cells = <0>;
+ amlogic,canvas = <&canvas>;
/* CVBS VDAC output port */
port@0 {
diff --git a/Documentation/devicetree/bindings/display/bridge/analogix,anx7814.yaml b/Documentation/devicetree/bindings/display/bridge/analogix,anx7814.yaml
index 8e13f27b28ed..bce96b5b0db0 100644
--- a/Documentation/devicetree/bindings/display/bridge/analogix,anx7814.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/analogix,anx7814.yaml
@@ -7,7 +7,9 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Analogix ANX7814 SlimPort (Full-HD Transmitter)
maintainers:
- - Enric Balletbo i Serra <enric.balletbo@collabora.com>
+ - Andrzej Hajda <andrzej.hajda@intel.com>
+ - Neil Armstrong <narmstrong@baylibre.com>
+ - Robert Foss <robert.foss@linaro.org>
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/display/bridge/google,cros-ec-anx7688.yaml b/Documentation/devicetree/bindings/display/bridge/google,cros-ec-anx7688.yaml
index 9f7cc6b757cb..a88a5d8c7ba5 100644
--- a/Documentation/devicetree/bindings/display/bridge/google,cros-ec-anx7688.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/google,cros-ec-anx7688.yaml
@@ -8,7 +8,6 @@ title: ChromeOS EC ANX7688 HDMI to DP Converter through Type-C Port
maintainers:
- Nicolas Boichat <drinkcat@chromium.org>
- - Enric Balletbo i Serra <enric.balletbo@collabora.com>
description: |
ChromeOS EC ANX7688 is a display bridge that converts HDMI 2.0 to
diff --git a/Documentation/devicetree/bindings/display/bridge/ps8640.yaml b/Documentation/devicetree/bindings/display/bridge/ps8640.yaml
index cdaf7a7a8f88..186e17be51fb 100644
--- a/Documentation/devicetree/bindings/display/bridge/ps8640.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/ps8640.yaml
@@ -8,7 +8,6 @@ title: MIPI DSI to eDP Video Format Converter Device Tree Bindings
maintainers:
- Nicolas Boichat <drinkcat@chromium.org>
- - Enric Balletbo i Serra <enric.balletbo@collabora.com>
description: |
The PS8640 is a low power MIPI-to-eDP video format converter supporting
diff --git a/Documentation/devicetree/bindings/display/msm/dpu-sdm845.yaml b/Documentation/devicetree/bindings/display/msm/dpu-sdm845.yaml
index b4ea7c92fb3d..0dca4b3d66e4 100644
--- a/Documentation/devicetree/bindings/display/msm/dpu-sdm845.yaml
+++ b/Documentation/devicetree/bindings/display/msm/dpu-sdm845.yaml
@@ -31,13 +31,11 @@ properties:
clocks:
items:
- description: Display AHB clock from gcc
- - description: Display AXI clock
- description: Display core clock
clock-names:
items:
- const: iface
- - const: bus
- const: core
interrupts:
@@ -160,9 +158,8 @@ examples:
power-domains = <&dispcc MDSS_GDSC>;
clocks = <&gcc GCC_DISP_AHB_CLK>,
- <&gcc GCC_DISP_AXI_CLK>,
<&dispcc DISP_CC_MDSS_MDP_CLK>;
- clock-names = "iface", "bus", "core";
+ clock-names = "iface", "core";
interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
interrupt-controller;
diff --git a/Documentation/devicetree/bindings/display/panel/abt,y030xx067a.yaml b/Documentation/devicetree/bindings/display/panel/abt,y030xx067a.yaml
index a108029ecfab..acd2f3faa6b9 100644
--- a/Documentation/devicetree/bindings/display/panel/abt,y030xx067a.yaml
+++ b/Documentation/devicetree/bindings/display/panel/abt,y030xx067a.yaml
@@ -6,15 +6,12 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Asia Better Technology 3.0" (320x480 pixels) 24-bit IPS LCD panel
-description: |
- The panel must obey the rules for a SPI slave device as specified in
- spi/spi-controller.yaml
-
maintainers:
- Paul Cercueil <paul@crapouillou.net>
allOf:
- $ref: panel-common.yaml#
+ - $ref: /schemas/spi/spi-peripheral-props.yaml#
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/display/panel/ilitek,ili9322.yaml b/Documentation/devicetree/bindings/display/panel/ilitek,ili9322.yaml
index e89c1ea62ffa..7d221ef35443 100644
--- a/Documentation/devicetree/bindings/display/panel/ilitek,ili9322.yaml
+++ b/Documentation/devicetree/bindings/display/panel/ilitek,ili9322.yaml
@@ -15,11 +15,9 @@ description: |
960 TFT source driver pins and 240 TFT gate driver pins, VCOM, VCOML and
VCOMH outputs.
- The panel must obey the rules for a SPI slave device as specified in
- spi/spi-controller.yaml
-
allOf:
- $ref: panel-common.yaml#
+ - $ref: /schemas/spi/spi-peripheral-props.yaml#
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/display/panel/innolux,ej030na.yaml b/Documentation/devicetree/bindings/display/panel/innolux,ej030na.yaml
index cda36c04e85c..72788e3e6c59 100644
--- a/Documentation/devicetree/bindings/display/panel/innolux,ej030na.yaml
+++ b/Documentation/devicetree/bindings/display/panel/innolux,ej030na.yaml
@@ -6,15 +6,12 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Innolux EJ030NA 3.0" (320x480 pixels) 24-bit TFT LCD panel
-description: |
- The panel must obey the rules for a SPI slave device as specified in
- spi/spi-controller.yaml
-
maintainers:
- Paul Cercueil <paul@crapouillou.net>
allOf:
- $ref: panel-common.yaml#
+ - $ref: /schemas/spi/spi-peripheral-props.yaml#
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/display/panel/jdi,lt070me05000.yaml b/Documentation/devicetree/bindings/display/panel/jdi,lt070me05000.yaml
index 4f92365e888a..63c82a4378ff 100644
--- a/Documentation/devicetree/bindings/display/panel/jdi,lt070me05000.yaml
+++ b/Documentation/devicetree/bindings/display/panel/jdi,lt070me05000.yaml
@@ -35,6 +35,8 @@ properties:
phandle of the gpio for power ic line
Power IC supply enable, High active
+ port: true
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/display/panel/kingdisplay,kd035g6-54nt.yaml b/Documentation/devicetree/bindings/display/panel/kingdisplay,kd035g6-54nt.yaml
index c45c92a3d41f..2a2756d19681 100644
--- a/Documentation/devicetree/bindings/display/panel/kingdisplay,kd035g6-54nt.yaml
+++ b/Documentation/devicetree/bindings/display/panel/kingdisplay,kd035g6-54nt.yaml
@@ -6,15 +6,12 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: King Display KD035G6-54NT 3.5" (320x240 pixels) 24-bit TFT LCD panel
-description: |
- The panel must obey the rules for a SPI slave device as specified in
- spi/spi-controller.yaml
-
maintainers:
- Paul Cercueil <paul@crapouillou.net>
allOf:
- $ref: panel-common.yaml#
+ - $ref: /schemas/spi/spi-peripheral-props.yaml#
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/display/panel/lgphilips,lb035q02.yaml b/Documentation/devicetree/bindings/display/panel/lgphilips,lb035q02.yaml
index 830e335ddb53..5e4e0e552c2f 100644
--- a/Documentation/devicetree/bindings/display/panel/lgphilips,lb035q02.yaml
+++ b/Documentation/devicetree/bindings/display/panel/lgphilips,lb035q02.yaml
@@ -6,15 +6,12 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: LG.Philips LB035Q02 Panel
-description: |
- The panel must obey the rules for a SPI slave device as specified in
- spi/spi-controller.yaml
-
maintainers:
- Tomi Valkeinen <tomi.valkeinen@ti.com>
allOf:
- $ref: panel-common.yaml#
+ - $ref: /schemas/spi/spi-peripheral-props.yaml#
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/display/panel/novatek,nt36672a.yaml b/Documentation/devicetree/bindings/display/panel/novatek,nt36672a.yaml
index ef4c0a24512d..563766d283f6 100644
--- a/Documentation/devicetree/bindings/display/panel/novatek,nt36672a.yaml
+++ b/Documentation/devicetree/bindings/display/panel/novatek,nt36672a.yaml
@@ -34,7 +34,7 @@ properties:
description: phandle of gpio for reset line - This should be 8mA, gpio
can be configured using mux, pinctrl, pinctrl-names (active high)
- vddio-supply:
+ vddi0-supply:
description: phandle of the regulator that provides the supply voltage
Power IC supply
@@ -75,8 +75,6 @@ examples:
reset-gpios = <&tlmm 6 GPIO_ACTIVE_HIGH>;
- #address-cells = <1>;
- #size-cells = <0>;
port {
tianma_nt36672a_in_0: endpoint {
remote-endpoint = <&dsi0_out>;
diff --git a/Documentation/devicetree/bindings/display/panel/samsung,ld9040.yaml b/Documentation/devicetree/bindings/display/panel/samsung,ld9040.yaml
index 060ee27a4749..d525165d6d63 100644
--- a/Documentation/devicetree/bindings/display/panel/samsung,ld9040.yaml
+++ b/Documentation/devicetree/bindings/display/panel/samsung,ld9040.yaml
@@ -6,15 +6,12 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Samsung LD9040 AMOLED LCD parallel RGB panel with SPI control bus
-description: |
- The panel must obey the rules for a SPI slave device as specified in
- spi/spi-controller.yaml
-
maintainers:
- Andrzej Hajda <a.hajda@samsung.com>
allOf:
- $ref: panel-common.yaml#
+ - $ref: /schemas/spi/spi-peripheral-props.yaml#
properties:
compatible:
@@ -63,8 +60,6 @@ examples:
lcd@0 {
compatible = "samsung,ld9040";
- #address-cells = <1>;
- #size-cells = <0>;
reg = <0>;
vdd3-supply = <&ldo7_reg>;
diff --git a/Documentation/devicetree/bindings/display/panel/samsung,s6e63m0.yaml b/Documentation/devicetree/bindings/display/panel/samsung,s6e63m0.yaml
index ea58df49263a..940f7f88526f 100644
--- a/Documentation/devicetree/bindings/display/panel/samsung,s6e63m0.yaml
+++ b/Documentation/devicetree/bindings/display/panel/samsung,s6e63m0.yaml
@@ -12,6 +12,7 @@ maintainers:
allOf:
- $ref: panel-common.yaml#
- $ref: /schemas/leds/backlight/common.yaml#
+ - $ref: /schemas/spi/spi-peripheral-props.yaml#
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/display/panel/sitronix,st7789v.yaml b/Documentation/devicetree/bindings/display/panel/sitronix,st7789v.yaml
index fa46d151e7b3..9e1d707c2ace 100644
--- a/Documentation/devicetree/bindings/display/panel/sitronix,st7789v.yaml
+++ b/Documentation/devicetree/bindings/display/panel/sitronix,st7789v.yaml
@@ -6,15 +6,12 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Sitronix ST7789V RGB panel with SPI control bus
-description: |
- The panel must obey the rules for a SPI slave device as specified in
- spi/spi-controller.yaml
-
maintainers:
- Maxime Ripard <mripard@kernel.org>
allOf:
- $ref: panel-common.yaml#
+ - $ref: /schemas/spi/spi-peripheral-props.yaml#
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/display/panel/sony,acx565akm.yaml b/Documentation/devicetree/bindings/display/panel/sony,acx565akm.yaml
index 95d053c548ab..98abdf4ddeac 100644
--- a/Documentation/devicetree/bindings/display/panel/sony,acx565akm.yaml
+++ b/Documentation/devicetree/bindings/display/panel/sony,acx565akm.yaml
@@ -6,15 +6,12 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Sony ACX565AKM SDI Panel
-description: |
- The panel must obey the rules for a SPI slave device as specified in
- spi/spi-controller.yaml
-
maintainers:
- Tomi Valkeinen <tomi.valkeinen@ti.com>
allOf:
- $ref: panel-common.yaml#
+ - $ref: /schemas/spi/spi-peripheral-props.yaml#
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/display/panel/tpo,td.yaml b/Documentation/devicetree/bindings/display/panel/tpo,td.yaml
index 4aa605613445..f902a9d74141 100644
--- a/Documentation/devicetree/bindings/display/panel/tpo,td.yaml
+++ b/Documentation/devicetree/bindings/display/panel/tpo,td.yaml
@@ -6,16 +6,13 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Toppoly TD Panels
-description: |
- The panel must obey the rules for a SPI slave device as specified in
- spi/spi-controller.yaml
-
maintainers:
- Marek Belisko <marek@goldelico.com>
- H. Nikolaus Schaller <hns@goldelico.com>
allOf:
- $ref: panel-common.yaml#
+ - $ref: /schemas/spi/spi-peripheral-props.yaml#
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/display/rockchip/rockchip,rk3066-hdmi.yaml b/Documentation/devicetree/bindings/display/rockchip/rockchip,rk3066-hdmi.yaml
index 008c144257cb..1a68a940d165 100644
--- a/Documentation/devicetree/bindings/display/rockchip/rockchip,rk3066-hdmi.yaml
+++ b/Documentation/devicetree/bindings/display/rockchip/rockchip,rk3066-hdmi.yaml
@@ -26,14 +26,6 @@ properties:
clock-names:
const: hclk
- pinctrl-0:
- maxItems: 2
-
- pinctrl-names:
- const: default
- description:
- Switch the iomux for the HPD/I2C pins to HDMI function.
-
power-domains:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/display/simple-framebuffer.yaml b/Documentation/devicetree/bindings/display/simple-framebuffer.yaml
index c2499a7906f5..44a29d813f14 100644
--- a/Documentation/devicetree/bindings/display/simple-framebuffer.yaml
+++ b/Documentation/devicetree/bindings/display/simple-framebuffer.yaml
@@ -83,13 +83,25 @@ properties:
format:
description: >
Format of the framebuffer:
+ * `a1r5g5b5` - 16-bit pixels, d[15]=a, d[14:10]=r, d[9:5]=g, d[4:0]=b
+ * `a2r10g10b10` - 32-bit pixels, d[31:30]=a, d[29:20]=r, d[19:10]=g, d[9:0]=b
* `a8b8g8r8` - 32-bit pixels, d[31:24]=a, d[23:16]=b, d[15:8]=g, d[7:0]=r
+ * `a8r8g8b8` - 32-bit pixels, d[31:24]=a, d[23:16]=r, d[15:8]=g, d[7:0]=b
* `r5g6b5` - 16-bit pixels, d[15:11]=r, d[10:5]=g, d[4:0]=b
+ * `r5g5b5a1` - 16-bit pixels, d[15:11]=r, d[10:6]=g, d[5:1]=b d[1:0]=a
+ * `r8g8b8` - 24-bit pixels, d[23:16]=r, d[15:8]=g, d[7:0]=b
+ * `x1r5g5b5` - 16-bit pixels, d[14:10]=r, d[9:5]=g, d[4:0]=b
* `x2r10g10b10` - 32-bit pixels, d[29:20]=r, d[19:10]=g, d[9:0]=b
* `x8r8g8b8` - 32-bit pixels, d[23:16]=r, d[15:8]=g, d[7:0]=b
enum:
+ - a1r5g5b5
+ - a2r10g10b10
- a8b8g8r8
+ - a8r8g8b8
- r5g6b5
+ - r5g5b5a1
+ - r8g8b8
+ - x1r5g5b5
- x2r10g10b10
- x8r8g8b8
diff --git a/Documentation/devicetree/bindings/display/st,stm32-dsi.yaml b/Documentation/devicetree/bindings/display/st,stm32-dsi.yaml
index ce1ef93cce93..54f67cb51040 100644
--- a/Documentation/devicetree/bindings/display/st,stm32-dsi.yaml
+++ b/Documentation/devicetree/bindings/display/st,stm32-dsi.yaml
@@ -110,7 +110,7 @@ examples:
};
};
- panel-dsi@0 {
+ panel@0 {
compatible = "orisetech,otm8009a";
reg = <0>;
reset-gpios = <&gpioe 4 GPIO_ACTIVE_LOW>;
@@ -125,4 +125,3 @@ examples:
};
...
-
diff --git a/Documentation/devicetree/bindings/dma/arm,pl330.yaml b/Documentation/devicetree/bindings/dma/arm,pl330.yaml
new file mode 100644
index 000000000000..decab185cf4d
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/arm,pl330.yaml
@@ -0,0 +1,83 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/dma/arm,pl330.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ARM PrimeCell PL330 DMA Controller
+
+maintainers:
+ - Vinod Koul <vkoul@kernel.org>
+
+description:
+ The ARM PrimeCell PL330 DMA controller can move blocks of memory contents
+ between memory and peripherals or memory to memory.
+
+# We need a select here so we don't match all nodes with 'arm,primecell'
+select:
+ properties:
+ compatible:
+ contains:
+ const: arm,pl330
+ required:
+ - compatible
+
+allOf:
+ - $ref: dma-controller.yaml#
+ - $ref: /schemas/arm/primecell.yaml#
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - arm,pl330
+ - const: arm,primecell
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ minItems: 1
+ maxItems: 32
+ description: A single combined interrupt or an interrupt per event
+
+ '#dma-cells':
+ const: 1
+ description: Contains the DMA request number for the consumer
+
+ arm,pl330-broken-no-flushp:
+ type: boolean
+ description: quirk for avoiding to execute DMAFLUSHP
+
+ arm,pl330-periph-burst:
+ type: boolean
+ description: quirk for performing burst transfer only
+
+ dma-coherent: true
+
+ resets:
+ minItems: 1
+ maxItems: 2
+
+ reset-names:
+ minItems: 1
+ items:
+ - const: dma
+ - const: dma-ocp
+
+required:
+ - compatible
+ - reg
+ - interrupts
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ dma-controller@12680000 {
+ compatible = "arm,pl330", "arm,primecell";
+ reg = <0x12680000 0x1000>;
+ interrupts = <99>;
+ #dma-cells = <1>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/dma/arm-pl08x.yaml b/Documentation/devicetree/bindings/dma/arm-pl08x.yaml
index 3bd9eea543ca..9193b18fb75f 100644
--- a/Documentation/devicetree/bindings/dma/arm-pl08x.yaml
+++ b/Documentation/devicetree/bindings/dma/arm-pl08x.yaml
@@ -10,6 +10,7 @@ maintainers:
- Vinod Koul <vkoul@kernel.org>
allOf:
+ - $ref: /schemas/arm/primecell.yaml#
- $ref: "dma-controller.yaml#"
# We need a select here so we don't match all nodes with 'arm,primecell'
@@ -89,6 +90,9 @@ properties:
- 64
description: bus width used for memcpy in bits. FTDMAC020 also accept 64 bits
+ resets:
+ maxItems: 1
+
required:
- reg
- interrupts
diff --git a/Documentation/devicetree/bindings/dma/arm-pl330.txt b/Documentation/devicetree/bindings/dma/arm-pl330.txt
deleted file mode 100644
index 315e90122afa..000000000000
--- a/Documentation/devicetree/bindings/dma/arm-pl330.txt
+++ /dev/null
@@ -1,49 +0,0 @@
-* ARM PrimeCell PL330 DMA Controller
-
-The ARM PrimeCell PL330 DMA controller can move blocks of memory contents
-between memory and peripherals or memory to memory.
-
-Required properties:
- - compatible: should include both "arm,pl330" and "arm,primecell".
- - reg: physical base address of the controller and length of memory mapped
- region.
- - interrupts: interrupt number to the cpu.
-
-Optional properties:
- - dma-coherent : Present if dma operations are coherent
- - #dma-cells: must be <1>. used to represent the number of integer
- cells in the dmas property of client device.
- - dma-channels: contains the total number of DMA channels supported by the DMAC
- - dma-requests: contains the total number of DMA requests supported by the DMAC
- - arm,pl330-broken-no-flushp: quirk for avoiding to execute DMAFLUSHP
- - arm,pl330-periph-burst: quirk for performing burst transfer only
- - resets: contains an entry for each entry in reset-names.
- See ../reset/reset.txt for details.
- - reset-names: must contain at least "dma", and optional is "dma-ocp".
-
-Example:
-
- pdma0: pdma@12680000 {
- compatible = "arm,pl330", "arm,primecell";
- reg = <0x12680000 0x1000>;
- interrupts = <99>;
- #dma-cells = <1>;
- #dma-channels = <8>;
- #dma-requests = <32>;
- };
-
-Client drivers (device nodes requiring dma transfers from dev-to-mem or
-mem-to-dev) should specify the DMA channel numbers and dma channel names
-as shown below.
-
- [property name] = <[phandle of the dma controller] [dma request id]>;
- [property name] = <[dma channel name]>
-
- where 'dma request id' is the dma request number which is connected
- to the client controller. The 'property name' 'dmas' and 'dma-names'
- as required by the generic dma device tree binding helpers. The dma
- names correspond 1:1 with the dma request ids in the dmas property.
-
- Example: dmas = <&pdma0 12
- &pdma1 11>;
- dma-names = "tx", "rx";
diff --git a/Documentation/devicetree/bindings/dma/dma-controller.yaml b/Documentation/devicetree/bindings/dma/dma-controller.yaml
index 0043b91da95e..6d3727267fa8 100644
--- a/Documentation/devicetree/bindings/dma/dma-controller.yaml
+++ b/Documentation/devicetree/bindings/dma/dma-controller.yaml
@@ -24,10 +24,10 @@ examples:
dma: dma-controller@48000000 {
compatible = "ti,omap-sdma";
reg = <0x48000000 0x1000>;
- interrupts = <0 12 0x4
- 0 13 0x4
- 0 14 0x4
- 0 15 0x4>;
+ interrupts = <0 12 0x4>,
+ <0 13 0x4>,
+ <0 14 0x4>,
+ <0 15 0x4>;
#dma-cells = <1>;
dma-channels = <32>;
dma-requests = <127>;
diff --git a/Documentation/devicetree/bindings/dma/ingenic,dma.yaml b/Documentation/devicetree/bindings/dma/ingenic,dma.yaml
index dc059d6fd037..3b0b3b919af8 100644
--- a/Documentation/devicetree/bindings/dma/ingenic,dma.yaml
+++ b/Documentation/devicetree/bindings/dma/ingenic,dma.yaml
@@ -14,15 +14,23 @@ allOf:
properties:
compatible:
- enum:
- - ingenic,jz4740-dma
- - ingenic,jz4725b-dma
- - ingenic,jz4760-dma
- - ingenic,jz4760b-dma
- - ingenic,jz4770-dma
- - ingenic,jz4780-dma
- - ingenic,x1000-dma
- - ingenic,x1830-dma
+ oneOf:
+ - enum:
+ - ingenic,jz4740-dma
+ - ingenic,jz4725b-dma
+ - ingenic,jz4760-dma
+ - ingenic,jz4760-bdma
+ - ingenic,jz4760-mdma
+ - ingenic,jz4760b-dma
+ - ingenic,jz4760b-bdma
+ - ingenic,jz4760b-mdma
+ - ingenic,jz4770-dma
+ - ingenic,jz4780-dma
+ - ingenic,x1000-dma
+ - ingenic,x1830-dma
+ - items:
+ - const: ingenic,jz4770-bdma
+ - const: ingenic,jz4760b-bdma
reg:
items:
@@ -36,13 +44,19 @@ properties:
maxItems: 1
"#dma-cells":
- const: 2
+ enum: [2, 3]
description: >
DMA clients must use the format described in dma.txt, giving a phandle
- to the DMA controller plus the following 2 integer cells:
-
- - Request type: The DMA request type for transfers to/from the
- device on the allocated channel, as defined in the SoC documentation.
+ to the DMA controller plus the following integer cells:
+
+ - Request type: The DMA request type specifies the device endpoint that
+ will be the source or destination of the DMA transfer.
+ If "#dma-cells" is 2, the request type is a single cell, and the
+ direction will be unidirectional (either RX or TX but not both).
+ If "#dma-cells" is 3, the request type has two cells; the first
+ one corresponds to the host to device direction (TX), the second one
+ corresponds to the device to host direction (RX). The DMA channel is
+ then bidirectional.
- Channel: If set to 0xffffffff, any available channel will be allocated
for the client. Otherwise, the exact channel specified will be used.
diff --git a/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.yaml b/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.yaml
index d8142cbd13d3..7c6badf39921 100644
--- a/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.yaml
+++ b/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.yaml
@@ -44,6 +44,10 @@ properties:
- items:
- const: renesas,dmac-r8a779a0 # R-Car V3U
+ - items:
+ - const: renesas,dmac-r8a779f0 # R-Car S4-8
+ - const: renesas,rcar-gen4-dmac
+
reg: true
interrupts:
@@ -118,6 +122,7 @@ if:
contains:
enum:
- renesas,dmac-r8a779a0
+ - renesas,rcar-gen4-dmac
then:
properties:
reg:
diff --git a/Documentation/devicetree/bindings/dma/sifive,fu540-c000-pdma.yaml b/Documentation/devicetree/bindings/dma/sifive,fu540-c000-pdma.yaml
index d32a71b975fe..75ad898c59bc 100644
--- a/Documentation/devicetree/bindings/dma/sifive,fu540-c000-pdma.yaml
+++ b/Documentation/devicetree/bindings/dma/sifive,fu540-c000-pdma.yaml
@@ -50,7 +50,7 @@ examples:
dma@3000000 {
compatible = "sifive,fu540-c000-pdma";
reg = <0x3000000 0x8000>;
- interrupts = <23 24 25 26 27 28 29 30>;
+ interrupts = <23>, <24>, <25>, <26>, <27>, <28>, <29>, <30>;
#dma-cells = <1>;
};
diff --git a/Documentation/devicetree/bindings/dma/snps,dw-axi-dmac.yaml b/Documentation/devicetree/bindings/dma/snps,dw-axi-dmac.yaml
index 79e241498e25..4324a94b26b2 100644
--- a/Documentation/devicetree/bindings/dma/snps,dw-axi-dmac.yaml
+++ b/Documentation/devicetree/bindings/dma/snps,dw-axi-dmac.yaml
@@ -53,6 +53,9 @@ properties:
minimum: 1
maximum: 8
+ resets:
+ maxItems: 1
+
snps,dma-masters:
description: |
Number of AXI masters supported by the hardware.
diff --git a/Documentation/devicetree/bindings/dma/ti/k3-bcdma.yaml b/Documentation/devicetree/bindings/dma/ti/k3-bcdma.yaml
index df29d59d13a8..08627d91e607 100644
--- a/Documentation/devicetree/bindings/dma/ti/k3-bcdma.yaml
+++ b/Documentation/devicetree/bindings/dma/ti/k3-bcdma.yaml
@@ -30,6 +30,7 @@ description: |
allOf:
- $ref: /schemas/dma/dma-controller.yaml#
+ - $ref: /schemas/arm/keystone/ti,k3-sci-common.yaml#
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/dma/ti/k3-pktdma.yaml b/Documentation/devicetree/bindings/dma/ti/k3-pktdma.yaml
index ea19d12a9337..507d16d84ade 100644
--- a/Documentation/devicetree/bindings/dma/ti/k3-pktdma.yaml
+++ b/Documentation/devicetree/bindings/dma/ti/k3-pktdma.yaml
@@ -25,6 +25,7 @@ description: |
allOf:
- $ref: /schemas/dma/dma-controller.yaml#
+ - $ref: /schemas/arm/keystone/ti,k3-sci-common.yaml#
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/eeprom/at24.yaml b/Documentation/devicetree/bindings/eeprom/at24.yaml
index 4c5396a9744f..6b61a8cf6137 100644
--- a/Documentation/devicetree/bindings/eeprom/at24.yaml
+++ b/Documentation/devicetree/bindings/eeprom/at24.yaml
@@ -87,6 +87,10 @@ properties:
- items:
pattern: cs1024$
- items:
+ pattern: c1025$
+ - items:
+ pattern: cs1025$
+ - items:
pattern: c2048$
- items:
pattern: cs2048$
@@ -95,18 +99,21 @@ properties:
# These are special cases that don't conform to the above pattern.
# Each requires a standard at24 model as fallback.
- items:
- - const: nxp,se97b
- - const: atmel,24c02
+ - enum:
+ - rohm,br24g01
+ - rohm,br24t01
+ - const: atmel,24c01
- items:
- - const: onnn,cat24c04
- - const: atmel,24c04
+ - enum:
+ - nxp,se97b
+ - renesas,r1ex24002
+ - const: atmel,24c02
- items:
- - const: onnn,cat24c05
+ - enum:
+ - onnn,cat24c04
+ - onnn,cat24c05
- const: atmel,24c04
- items:
- - const: renesas,r1ex24002
- - const: atmel,24c02
- - items:
- const: renesas,r1ex24016
- const: atmel,24c16
- items:
@@ -115,12 +122,6 @@ properties:
- items:
- const: renesas,r1ex24128
- const: atmel,24c128
- - items:
- - const: rohm,br24g01
- - const: atmel,24c01
- - items:
- - const: rohm,br24t01
- - const: atmel,24c01
label:
description: Descriptive name of the EEPROM.
diff --git a/Documentation/devicetree/bindings/extcon/extcon-usbc-cros-ec.yaml b/Documentation/devicetree/bindings/extcon/extcon-usbc-cros-ec.yaml
index 20e1ccfc8630..2d82b44268db 100644
--- a/Documentation/devicetree/bindings/extcon/extcon-usbc-cros-ec.yaml
+++ b/Documentation/devicetree/bindings/extcon/extcon-usbc-cros-ec.yaml
@@ -8,7 +8,6 @@ title: ChromeOS EC USB Type-C cable and accessories detection
maintainers:
- Benson Leung <bleung@chromium.org>
- - Enric Balletbo i Serra <enric.balletbo@collabora.com>
description: |
On ChromeOS systems with USB Type C ports, the ChromeOS Embedded Controller is
diff --git a/Documentation/devicetree/bindings/gpio/brcm,brcmstb-gpio.txt b/Documentation/devicetree/bindings/gpio/brcm,brcmstb-gpio.txt
deleted file mode 100644
index 5d468ecd1809..000000000000
--- a/Documentation/devicetree/bindings/gpio/brcm,brcmstb-gpio.txt
+++ /dev/null
@@ -1,83 +0,0 @@
-Broadcom STB "UPG GIO" GPIO controller
-
-The controller's registers are organized as sets of eight 32-bit
-registers with each set controlling a bank of up to 32 pins. A single
-interrupt is shared for all of the banks handled by the controller.
-
-Required properties:
-
-- compatible:
- Must be "brcm,brcmstb-gpio"
-
-- reg:
- Define the base and range of the I/O address space containing
- the brcmstb GPIO controller registers
-
-- #gpio-cells:
- Should be <2>. The first cell is the pin number (within the controller's
- pin space), and the second is used for the following:
- bit[0]: polarity (0 for active-high, 1 for active-low)
-
-- gpio-controller:
- Specifies that the node is a GPIO controller.
-
-- brcm,gpio-bank-widths:
- Number of GPIO lines for each bank. Number of elements must
- correspond to number of banks suggested by the 'reg' property.
-
-Optional properties:
-
-- interrupts:
- The interrupt shared by all GPIO lines for this controller.
-
-- interrupts-extended:
- Alternate form of specifying interrupts and parents that allows for
- multiple parents. This takes precedence over 'interrupts' and
- 'interrupt-parent'. Wakeup-capable GPIO controllers often route their
- wakeup interrupt lines through a different interrupt controller than the
- primary interrupt line, making this property necessary.
-
-- #interrupt-cells:
- Should be <2>. The first cell is the GPIO number, the second should specify
- flags. The following subset of flags is supported:
- - bits[3:0] trigger type and level flags
- 1 = low-to-high edge triggered
- 2 = high-to-low edge triggered
- 4 = active high level-sensitive
- 8 = active low level-sensitive
- Valid combinations are 1, 2, 3, 4, 8.
- See also Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
-
-- interrupt-controller:
- Marks the device node as an interrupt controller
-
-- wakeup-source:
- GPIOs for this controller can be used as a wakeup source
-
-Example:
- upg_gio: gpio@f040a700 {
- #gpio-cells = <2>;
- #interrupt-cells = <2>;
- compatible = "brcm,bcm7445-gpio", "brcm,brcmstb-gpio";
- gpio-controller;
- interrupt-controller;
- reg = <0xf040a700 0x80>;
- interrupt-parent = <&irq0_intc>;
- interrupts = <0x6>;
- brcm,gpio-bank-widths = <32 32 32 24>;
- };
-
- upg_gio_aon: gpio@f04172c0 {
- #gpio-cells = <2>;
- #interrupt-cells = <2>;
- compatible = "brcm,bcm7445-gpio", "brcm,brcmstb-gpio";
- gpio-controller;
- interrupt-controller;
- reg = <0xf04172c0 0x40>;
- interrupt-parent = <&irq0_aon_intc>;
- interrupts = <0x6>;
- interrupts-extended = <&irq0_aon_intc 0x6>,
- <&aon_pm_l2_intc 0x5>;
- wakeup-source;
- brcm,gpio-bank-widths = <18 4>;
- };
diff --git a/Documentation/devicetree/bindings/gpio/brcm,brcmstb-gpio.yaml b/Documentation/devicetree/bindings/gpio/brcm,brcmstb-gpio.yaml
new file mode 100644
index 000000000000..4a896ff7edc5
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpio/brcm,brcmstb-gpio.yaml
@@ -0,0 +1,104 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/gpio/brcm,brcmstb-gpio.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Broadcom STB "UPG GIO" GPIO controller
+
+description: >
+ The controller's registers are organized as sets of eight 32-bit
+ registers with each set controlling a bank of up to 32 pins. A single
+ interrupt is shared for all of the banks handled by the controller.
+
+maintainers:
+ - Doug Berger <opendmb@gmail.com>
+ - Florian Fainelli <f.fainelli@gmail.com>
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - brcm,bcm7445-gpio
+ - const: brcm,brcmstb-gpio
+
+ reg:
+ maxItems: 1
+ description: >
+ Define the base and range of the I/O address space containing
+ the brcmstb GPIO controller registers
+
+ "#gpio-cells":
+ const: 2
+ description: >
+ The first cell is the pin number (within the controller's
+ pin space), and the second is used for the following:
+ bit[0]: polarity (0 for active-high, 1 for active-low)
+
+ gpio-controller: true
+
+ brcm,gpio-bank-widths:
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ description: >
+ Number of GPIO lines for each bank. Number of elements must
+ correspond to number of banks suggested by the 'reg' property.
+
+ interrupts:
+ maxItems: 1
+ description: >
+ The interrupt shared by all GPIO lines for this controller.
+
+ "#interrupt-cells":
+ const: 2
+ description: |
+ The first cell is the GPIO number, the second should specify
+ flags. The following subset of flags is supported:
+ - bits[3:0] trigger type and level flags
+ 1 = low-to-high edge triggered
+ 2 = high-to-low edge triggered
+ 4 = active high level-sensitive
+ 8 = active low level-sensitive
+ Valid combinations are 1, 2, 3, 4, 8.
+
+ interrupt-controller: true
+
+ wakeup-source:
+ type: boolean
+ description: >
+ GPIOs for this controller can be used as a wakeup source
+
+required:
+ - compatible
+ - reg
+ - gpio-controller
+ - "#gpio-cells"
+ - "brcm,gpio-bank-widths"
+
+additionalProperties: false
+
+examples:
+ - |
+ upg_gio: gpio@f040a700 {
+ #gpio-cells = <2>;
+ #interrupt-cells = <2>;
+ compatible = "brcm,bcm7445-gpio", "brcm,brcmstb-gpio";
+ gpio-controller;
+ interrupt-controller;
+ reg = <0xf040a700 0x80>;
+ interrupt-parent = <&irq0_intc>;
+ interrupts = <0x6>;
+ brcm,gpio-bank-widths = <32 32 32 24>;
+ };
+
+ upg_gio_aon: gpio@f04172c0 {
+ #gpio-cells = <2>;
+ #interrupt-cells = <2>;
+ compatible = "brcm,bcm7445-gpio", "brcm,brcmstb-gpio";
+ gpio-controller;
+ interrupt-controller;
+ reg = <0xf04172c0 0x40>;
+ interrupt-parent = <&irq0_aon_intc>;
+ interrupts = <0x6>;
+ wakeup-source;
+ brcm,gpio-bank-widths = <18 4>;
+ };
diff --git a/Documentation/devicetree/bindings/gpio/toshiba,gpio-visconti.yaml b/Documentation/devicetree/bindings/gpio/toshiba,gpio-visconti.yaml
index 9ad470e01953..b085450b527f 100644
--- a/Documentation/devicetree/bindings/gpio/toshiba,gpio-visconti.yaml
+++ b/Documentation/devicetree/bindings/gpio/toshiba,gpio-visconti.yaml
@@ -43,7 +43,6 @@ required:
- gpio-controller
- interrupt-controller
- "#interrupt-cells"
- - interrupt-parent
additionalProperties: false
diff --git a/Documentation/devicetree/bindings/gpu/arm,mali-bifrost.yaml b/Documentation/devicetree/bindings/gpu/arm,mali-bifrost.yaml
index 6f98dd55fb4c..63a08f3f321d 100644
--- a/Documentation/devicetree/bindings/gpu/arm,mali-bifrost.yaml
+++ b/Documentation/devicetree/bindings/gpu/arm,mali-bifrost.yaml
@@ -19,6 +19,7 @@ properties:
- amlogic,meson-g12a-mali
- mediatek,mt8183-mali
- realtek,rtd1619-mali
+ - renesas,r9a07g044-mali
- rockchip,px30-mali
- rockchip,rk3568-mali
- const: arm,mali-bifrost # Mali Bifrost GPU model/revision is fully discoverable
@@ -27,19 +28,26 @@ properties:
maxItems: 1
interrupts:
+ minItems: 3
items:
- description: Job interrupt
- description: MMU interrupt
- description: GPU interrupt
+ - description: Event interrupt
interrupt-names:
+ minItems: 3
items:
- const: job
- const: mmu
- const: gpu
+ - const: event
clocks:
- maxItems: 1
+ minItems: 1
+ maxItems: 3
+
+ clock-names: true
mali-supply: true
@@ -52,7 +60,10 @@ properties:
maxItems: 3
resets:
- maxItems: 2
+ minItems: 1
+ maxItems: 3
+
+ reset-names: true
"#cooling-cells":
const: 2
@@ -98,6 +109,36 @@ allOf:
properties:
compatible:
contains:
+ const: renesas,r9a07g044-mali
+ then:
+ properties:
+ interrupts:
+ minItems: 4
+ interrupt-names:
+ minItems: 4
+ clocks:
+ minItems: 3
+ clock-names:
+ items:
+ - const: gpu
+ - const: bus
+ - const: bus_ace
+ resets:
+ minItems: 3
+ reset-names:
+ items:
+ - const: rst
+ - const: axi_rst
+ - const: ace_rst
+ required:
+ - clock-names
+ - power-domains
+ - resets
+ - reset-names
+ - if:
+ properties:
+ compatible:
+ contains:
const: mediatek,mt8183-mali
then:
properties:
diff --git a/Documentation/devicetree/bindings/i2c/aspeed,i2c.yaml b/Documentation/devicetree/bindings/i2c/aspeed,i2c.yaml
index ea643e6c3ef5..f597f73ccd87 100644
--- a/Documentation/devicetree/bindings/i2c/aspeed,i2c.yaml
+++ b/Documentation/devicetree/bindings/i2c/aspeed,i2c.yaml
@@ -63,7 +63,6 @@ examples:
i2c0: i2c-bus@40 {
#address-cells = <1>;
#size-cells = <0>;
- #interrupt-cells = <1>;
compatible = "aspeed,ast2500-i2c-bus";
reg = <0x40 0x40>;
clocks = <&syscon ASPEED_CLK_APB>;
diff --git a/Documentation/devicetree/bindings/i2c/brcm,bcm2835-i2c.txt b/Documentation/devicetree/bindings/i2c/brcm,bcm2835-i2c.txt
deleted file mode 100644
index a8a35df41951..000000000000
--- a/Documentation/devicetree/bindings/i2c/brcm,bcm2835-i2c.txt
+++ /dev/null
@@ -1,22 +0,0 @@
-Broadcom BCM2835 I2C controller
-
-Required properties:
-- compatible : Should be one of:
- "brcm,bcm2711-i2c"
- "brcm,bcm2835-i2c"
-- reg: Should contain register location and length.
-- interrupts: Should contain interrupt.
-- clocks : The clock feeding the I2C controller.
-
-Recommended properties:
-- clock-frequency : desired I2C bus clock frequency in Hz.
-
-Example:
-
-i2c@7e205000 {
- compatible = "brcm,bcm2835-i2c";
- reg = <0x7e205000 0x1000>;
- interrupts = <2 21>;
- clocks = <&clk_i2c>;
- clock-frequency = <100000>;
-};
diff --git a/Documentation/devicetree/bindings/i2c/brcm,bcm2835-i2c.yaml b/Documentation/devicetree/bindings/i2c/brcm,bcm2835-i2c.yaml
new file mode 100644
index 000000000000..8256490a7af2
--- /dev/null
+++ b/Documentation/devicetree/bindings/i2c/brcm,bcm2835-i2c.yaml
@@ -0,0 +1,54 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/i2c/brcm,bcm2835-i2c.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Broadcom BCM2835 I2C controller
+
+maintainers:
+ - Stephen Warren <swarren@wwwdotorg.org>
+
+allOf:
+ - $ref: /schemas/i2c/i2c-controller.yaml#
+
+properties:
+ compatible:
+ oneOf:
+ - enum:
+ - brcm,bcm2835-i2c
+ - items:
+ - const: brcm,bcm2711-i2c
+ - const: brcm,bcm2835-i2c
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clock-names:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ clock-frequency: true
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ i2c@7e205000 {
+ compatible = "brcm,bcm2835-i2c";
+ reg = <0x7e205000 0x1000>;
+ interrupts = <2 21>;
+ clocks = <&clk_i2c>;
+ clock-frequency = <100000>;
+ };
diff --git a/Documentation/devicetree/bindings/i2c/google,cros-ec-i2c-tunnel.yaml b/Documentation/devicetree/bindings/i2c/google,cros-ec-i2c-tunnel.yaml
index b386e4128a79..6e1c70e9275e 100644
--- a/Documentation/devicetree/bindings/i2c/google,cros-ec-i2c-tunnel.yaml
+++ b/Documentation/devicetree/bindings/i2c/google,cros-ec-i2c-tunnel.yaml
@@ -10,7 +10,6 @@ title: I2C bus that tunnels through the ChromeOS EC (cros-ec)
maintainers:
- Doug Anderson <dianders@chromium.org>
- Benson Leung <bleung@chromium.org>
- - Enric Balletbo i Serra <enric.balletbo@collabora.com>
description: |
On some ChromeOS board designs we've got a connection to the EC
diff --git a/Documentation/devicetree/bindings/i2c/i2c-exynos5.txt b/Documentation/devicetree/bindings/i2c/i2c-exynos5.txt
deleted file mode 100644
index 2dbc0b62daa6..000000000000
--- a/Documentation/devicetree/bindings/i2c/i2c-exynos5.txt
+++ /dev/null
@@ -1,53 +0,0 @@
-* Samsung's High Speed I2C controller
-
-The Samsung's High Speed I2C controller is used to interface with I2C devices
-at various speeds ranging from 100khz to 3.4Mhz.
-
-Required properties:
- - compatible: value should be.
- -> "samsung,exynos5-hsi2c", (DEPRECATED)
- for i2c compatible with HSI2C available
- on Exynos5250 and Exynos5420 SoCs.
- -> "samsung,exynos5250-hsi2c", for i2c compatible with HSI2C available
- on Exynos5250 and Exynos5420 SoCs.
- -> "samsung,exynos5260-hsi2c", for i2c compatible with HSI2C available
- on Exynos5260 SoCs.
- -> "samsung,exynos7-hsi2c", for i2c compatible with HSI2C available
- on Exynos7 SoCs.
-
- - reg: physical base address of the controller and length of memory mapped
- region.
- - interrupts: interrupt number to the cpu.
- - #address-cells: always 1 (for i2c addresses)
- - #size-cells: always 0
-
- - Pinctrl:
- - pinctrl-0: Pin control group to be used for this controller.
- - pinctrl-names: Should contain only one value - "default".
-
-Optional properties:
- - clock-frequency: Desired operating frequency in Hz of the bus.
- -> If not specified, the bus operates in fast-speed mode at
- at 100khz.
- -> If specified, the bus operates in high-speed mode only if the
- clock-frequency is >= 1Mhz.
-
-Example:
-
-hsi2c@12ca0000 {
- compatible = "samsung,exynos5250-hsi2c";
- reg = <0x12ca0000 0x100>;
- interrupts = <56>;
- clock-frequency = <100000>;
-
- pinctrl-0 = <&i2c4_bus>;
- pinctrl-names = "default";
-
- #address-cells = <1>;
- #size-cells = <0>;
-
- s2mps11_pmic@66 {
- compatible = "samsung,s2mps11-pmic";
- reg = <0x66>;
- };
-};
diff --git a/Documentation/devicetree/bindings/i2c/i2c-exynos5.yaml b/Documentation/devicetree/bindings/i2c/i2c-exynos5.yaml
new file mode 100644
index 000000000000..19874e8b73b9
--- /dev/null
+++ b/Documentation/devicetree/bindings/i2c/i2c-exynos5.yaml
@@ -0,0 +1,133 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/i2c/i2c-exynos5.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Samsung's High Speed I2C controller
+
+maintainers:
+ - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+
+description: |
+ The Samsung's High Speed I2C controller is used to interface with I2C devices
+ at various speeds ranging from 100kHz to 3.4MHz.
+
+ In case the HSI2C controller is encapsulated within USI block (it's the case
+ e.g. for Exynos850 and Exynos Auto V9 SoCs), it might be also necessary to
+ define USI node in device tree file, choosing "i2c" configuration. Please see
+ Documentation/devicetree/bindings/soc/samsung/exynos-usi.yaml for details.
+
+properties:
+ compatible:
+ oneOf:
+ - enum:
+ - samsung,exynos5250-hsi2c # Exynos5250 and Exynos5420
+ - samsung,exynos5260-hsi2c # Exynos5260
+ - samsung,exynos7-hsi2c # Exynos7
+ - samsung,exynosautov9-hsi2c # ExynosAutoV9 and Exynos850
+ - const: samsung,exynos5-hsi2c # Exynos5250 and Exynos5420
+ deprecated: true
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clock-frequency:
+ default: 100000
+ description:
+ Desired operating frequency in Hz of the bus.
+
+ If not specified, the bus operates in fast-speed mode at 100kHz.
+
+ If specified, the bus operates in high-speed mode only if the
+ clock-frequency is >= 1MHz.
+
+ clocks:
+ minItems: 1
+ items:
+ - description: I2C operating clock
+ - description: Bus clock (APB)
+
+ clock-names:
+ minItems: 1
+ items:
+ - const: hsi2c
+ - const: hsi2c_pclk
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+
+allOf:
+ - $ref: /schemas/i2c/i2c-controller.yaml#
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - samsung,exynosautov9-hsi2c
+
+ then:
+ properties:
+ clocks:
+ minItems: 2
+
+ clock-names:
+ minItems: 2
+
+ required:
+ - clock-names
+
+ else:
+ properties:
+ clocks:
+ maxItems: 1
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/exynos5420.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ hsi2c_8: i2c@12e00000 {
+ compatible = "samsung,exynos5250-hsi2c";
+ reg = <0x12e00000 0x1000>;
+ interrupts = <GIC_SPI 87 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clock-frequency = <100000>;
+ clocks = <&clock CLK_USI4>;
+ clock-names = "hsi2c";
+
+ pmic@66 {
+ /* compatible = "samsung,s2mps11-pmic"; */
+ reg = <0x66>;
+ };
+ };
+
+ - |
+ #include <dt-bindings/clock/exynos850.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ hsi2c_2: i2c@138c0000 {
+ compatible = "samsung,exynosautov9-hsi2c";
+ reg = <0x138c0000 0xc0>;
+ interrupts = <GIC_SPI 195 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ clocks = <&cmu_peri CLK_GOUT_HSI2C2_IPCLK>,
+ <&cmu_peri CLK_GOUT_HSI2C2_PCLK>;
+ clock-names = "hsi2c", "hsi2c_pclk";
+
+ pmic@66 {
+ /* compatible = "samsung,s2mps11-pmic"; */
+ reg = <0x66>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/i2c/i2c-gate.yaml b/Documentation/devicetree/bindings/i2c/i2c-gate.yaml
index 66472f12a7e2..bd67b0766599 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-gate.yaml
+++ b/Documentation/devicetree/bindings/i2c/i2c-gate.yaml
@@ -31,7 +31,7 @@ examples:
#address-cells = <1>;
#size-cells = <0>;
ak8975@c {
- compatible = "ak,ak8975";
+ compatible = "asahi-kasei,ak8975";
reg = <0x0c>;
};
};
diff --git a/Documentation/devicetree/bindings/i2c/i2c-imx-lpi2c.yaml b/Documentation/devicetree/bindings/i2c/i2c-imx-lpi2c.yaml
index fe0c89edf7c1..529bea56d324 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-imx-lpi2c.yaml
+++ b/Documentation/devicetree/bindings/i2c/i2c-imx-lpi2c.yaml
@@ -20,7 +20,9 @@ properties:
- items:
- enum:
- fsl,imx8qxp-lpi2c
+ - fsl,imx8dxl-lpi2c
- fsl,imx8qm-lpi2c
+ - fsl,imx8ulp-lpi2c
- const: fsl,imx7ulp-lpi2c
reg:
diff --git a/Documentation/devicetree/bindings/i2c/i2c-mux-gpio.txt b/Documentation/devicetree/bindings/i2c/i2c-mux-gpio.txt
deleted file mode 100644
index d4cf10582a26..000000000000
--- a/Documentation/devicetree/bindings/i2c/i2c-mux-gpio.txt
+++ /dev/null
@@ -1,80 +0,0 @@
-GPIO-based I2C Bus Mux
-
-This binding describes an I2C bus multiplexer that uses GPIOs to
-route the I2C signals.
-
- +-----+ +-----+
- | dev | | dev |
- +------------+ +-----+ +-----+
- | SoC | | |
- | | /--------+--------+
- | +------+ | +------+ child bus A, on GPIO value set to 0
- | | I2C |-|--| Mux |
- | +------+ | +--+---+ child bus B, on GPIO value set to 1
- | | | \----------+--------+--------+
- | +------+ | | | | |
- | | GPIO |-|-----+ +-----+ +-----+ +-----+
- | +------+ | | dev | | dev | | dev |
- +------------+ +-----+ +-----+ +-----+
-
-Required properties:
-- compatible: i2c-mux-gpio
-- i2c-parent: The phandle of the I2C bus that this multiplexer's master-side
- port is connected to.
-- mux-gpios: list of gpios used to control the muxer
-* Standard I2C mux properties. See i2c-mux.yaml in this directory.
-* I2C child bus nodes. See i2c-mux.yaml in this directory.
-
-Optional properties:
-- idle-state: value to set the muxer to when idle. When no value is
- given, it defaults to the last value used.
-
-For each i2c child node, an I2C child bus will be created. They will
-be numbered based on their order in the device tree.
-
-Whenever an access is made to a device on a child bus, the value set
-in the relevant node's reg property will be output using the list of
-GPIOs, the first in the list holding the least-significant value.
-
-If an idle state is defined, using the idle-state (optional) property,
-whenever an access is not being made to a device on a child bus, the
-GPIOs will be set according to the idle value.
-
-If an idle state is not defined, the most recently used value will be
-left programmed into hardware whenever no access is being made to a
-device on a child bus.
-
-Example:
- i2cmux {
- compatible = "i2c-mux-gpio";
- #address-cells = <1>;
- #size-cells = <0>;
- mux-gpios = <&gpio1 22 0 &gpio1 23 0>;
- i2c-parent = <&i2c1>;
-
- i2c@1 {
- reg = <1>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- ssd1307: oled@3c {
- compatible = "solomon,ssd1307fb-i2c";
- reg = <0x3c>;
- pwms = <&pwm 4 3000>;
- reset-gpios = <&gpio2 7 1>;
- };
- };
-
- i2c@3 {
- reg = <3>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- pca9555: pca9555@20 {
- compatible = "nxp,pca9555";
- gpio-controller;
- #gpio-cells = <2>;
- reg = <0x20>;
- };
- };
- };
diff --git a/Documentation/devicetree/bindings/i2c/i2c-mux-gpio.yaml b/Documentation/devicetree/bindings/i2c/i2c-mux-gpio.yaml
new file mode 100644
index 000000000000..6e0a5686af04
--- /dev/null
+++ b/Documentation/devicetree/bindings/i2c/i2c-mux-gpio.yaml
@@ -0,0 +1,104 @@
+# SPDX-License-Identifier: GPL-2.0-only
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/i2c/i2c-mux-gpio.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: GPIO-based I2C Bus Mux
+
+maintainers:
+ - Wolfram Sang <wsa@kernel.org>
+
+description: |
+ This binding describes an I2C bus multiplexer that uses GPIOs to route the I2C signals.
+
+ +-----+ +-----+
+ | dev | | dev |
+ +------------+ +-----+ +-----+
+ | SoC | | |
+ | | /--------+--------+
+ | +------+ | +------+ child bus A, on GPIO value set to 0
+ | | I2C |-|--| Mux |
+ | +------+ | +--+---+ child bus B, on GPIO value set to 1
+ | | | \----------+--------+--------+
+ | +------+ | | | | |
+ | | GPIO |-|-----+ +-----+ +-----+ +-----+
+ | +------+ | | dev | | dev | | dev |
+ +------------+ +-----+ +-----+ +-----+
+
+ For each I2C child node, an I2C child bus will be created. They will be numbered based on their
+ order in the device tree.
+
+ Whenever an access is made to a device on a child bus, the value set in the relevant node's reg
+ property will be output using the list of GPIOs, the first in the list holding the least-
+ significant value.
+
+ If an idle state is defined, using the idle-state (optional) property, whenever an access is not
+ being made to a device on a child bus, the GPIOs will be set according to the idle value.
+
+ If an idle state is not defined, the most recently used value will be left programmed into
+ hardware whenever no access is being made to a device on a child bus.
+
+properties:
+ compatible:
+ const: i2c-mux-gpio
+
+ i2c-parent:
+ description: phandle of the I2C bus that this multiplexer's master-side port is connected to
+ $ref: "/schemas/types.yaml#/definitions/phandle"
+
+ mux-gpios:
+ description: list of GPIOs used to control the muxer
+ minItems: 1
+ maxItems: 4 # Should be enough
+
+ idle-state:
+ description: Value to set the muxer to when idle. When no value is given, it defaults to the
+ last value used.
+ $ref: "/schemas/types.yaml#/definitions/uint32"
+
+allOf:
+ - $ref: i2c-mux.yaml
+
+unevaluatedProperties: false
+
+required:
+ - compatible
+ - i2c-parent
+ - mux-gpios
+
+examples:
+ - |
+ i2cmux {
+ compatible = "i2c-mux-gpio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ mux-gpios = <&gpio1 22 0>, <&gpio1 23 0>;
+ i2c-parent = <&i2c1>;
+
+ i2c@1 {
+ reg = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ssd1307: oled@3c {
+ compatible = "solomon,ssd1307fb-i2c";
+ reg = <0x3c>;
+ pwms = <&pwm 4 3000>;
+ reset-gpios = <&gpio2 7 1>;
+ };
+ };
+
+ i2c@3 {
+ reg = <3>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pca9555: pca9555@20 {
+ compatible = "nxp,pca9555";
+ gpio-controller;
+ #gpio-cells = <2>;
+ reg = <0x20>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/i2c/i2c-mux-pinctrl.txt b/Documentation/devicetree/bindings/i2c/i2c-mux-pinctrl.txt
deleted file mode 100644
index 997a287ed3f6..000000000000
--- a/Documentation/devicetree/bindings/i2c/i2c-mux-pinctrl.txt
+++ /dev/null
@@ -1,93 +0,0 @@
-Pinctrl-based I2C Bus Mux
-
-This binding describes an I2C bus multiplexer that uses pin multiplexing to
-route the I2C signals, and represents the pin multiplexing configuration
-using the pinctrl device tree bindings.
-
- +-----+ +-----+
- | dev | | dev |
- +------------------------+ +-----+ +-----+
- | SoC | | |
- | /----|------+--------+
- | +---+ +------+ | child bus A, on first set of pins
- | |I2C|---|Pinmux| |
- | +---+ +------+ | child bus B, on second set of pins
- | \----|------+--------+--------+
- | | | | |
- +------------------------+ +-----+ +-----+ +-----+
- | dev | | dev | | dev |
- +-----+ +-----+ +-----+
-
-Required properties:
-- compatible: i2c-mux-pinctrl
-- i2c-parent: The phandle of the I2C bus that this multiplexer's master-side
- port is connected to.
-
-Also required are:
-
-* Standard pinctrl properties that specify the pin mux state for each child
- bus. See ../pinctrl/pinctrl-bindings.txt.
-
-* Standard I2C mux properties. See i2c-mux.yaml in this directory.
-
-* I2C child bus nodes. See i2c-mux.yaml in this directory.
-
-For each named state defined in the pinctrl-names property, an I2C child bus
-will be created. I2C child bus numbers are assigned based on the index into
-the pinctrl-names property.
-
-The only exception is that no bus will be created for a state named "idle". If
-such a state is defined, it must be the last entry in pinctrl-names. For
-example:
-
- pinctrl-names = "ddc", "pta", "idle" -> ddc = bus 0, pta = bus 1
- pinctrl-names = "ddc", "idle", "pta" -> Invalid ("idle" not last)
- pinctrl-names = "idle", "ddc", "pta" -> Invalid ("idle" not last)
-
-Whenever an access is made to a device on a child bus, the relevant pinctrl
-state will be programmed into hardware.
-
-If an idle state is defined, whenever an access is not being made to a device
-on a child bus, the idle pinctrl state will be programmed into hardware.
-
-If an idle state is not defined, the most recently used pinctrl state will be
-left programmed into hardware whenever no access is being made of a device on
-a child bus.
-
-Example:
-
- i2cmux {
- compatible = "i2c-mux-pinctrl";
- #address-cells = <1>;
- #size-cells = <0>;
-
- i2c-parent = <&i2c1>;
-
- pinctrl-names = "ddc", "pta", "idle";
- pinctrl-0 = <&state_i2cmux_ddc>;
- pinctrl-1 = <&state_i2cmux_pta>;
- pinctrl-2 = <&state_i2cmux_idle>;
-
- i2c@0 {
- reg = <0>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- eeprom {
- compatible = "eeprom";
- reg = <0x50>;
- };
- };
-
- i2c@1 {
- reg = <1>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- eeprom {
- compatible = "eeprom";
- reg = <0x50>;
- };
- };
- };
-
diff --git a/Documentation/devicetree/bindings/i2c/i2c-mux-pinctrl.yaml b/Documentation/devicetree/bindings/i2c/i2c-mux-pinctrl.yaml
new file mode 100644
index 000000000000..2e3d555eb96c
--- /dev/null
+++ b/Documentation/devicetree/bindings/i2c/i2c-mux-pinctrl.yaml
@@ -0,0 +1,103 @@
+# SPDX-License-Identifier: GPL-2.0-only
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/i2c/i2c-mux-pinctrl.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Pinctrl-based I2C Bus Mux
+
+maintainers:
+ - Wolfram Sang <wsa@kernel.org>
+
+description: |
+ This binding describes an I2C bus multiplexer that uses pin multiplexing to route the I2C
+ signals, and represents the pin multiplexing configuration using the pinctrl device tree
+ bindings.
+
+ +-----+ +-----+
+ | dev | | dev |
+ +------------------------+ +-----+ +-----+
+ | SoC | | |
+ | /----|------+--------+
+ | +---+ +------+ | child bus A, on first set of pins
+ | |I2C|---|Pinmux| |
+ | +---+ +------+ | child bus B, on second set of pins
+ | \----|------+--------+--------+
+ | | | | |
+ +------------------------+ +-----+ +-----+ +-----+
+ | dev | | dev | | dev |
+ +-----+ +-----+ +-----+
+
+ For each named state defined in the pinctrl-names property, an I2C child bus will be created.
+ I2C child bus numbers are assigned based on the index into the pinctrl-names property.
+
+ The only exception is that no bus will be created for a state named "idle". If such a state is
+ defined, it must be the last entry in pinctrl-names. For example:
+
+ pinctrl-names = "ddc", "pta", "idle" -> ddc = bus 0, pta = bus 1
+ pinctrl-names = "ddc", "idle", "pta" -> Invalid ("idle" not last)
+ pinctrl-names = "idle", "ddc", "pta" -> Invalid ("idle" not last)
+
+ Whenever an access is made to a device on a child bus, the relevant pinctrl state will be
+ programmed into hardware.
+
+ If an idle state is defined, whenever an access is not being made to a device on a child bus,
+ the idle pinctrl state will be programmed into hardware.
+
+ If an idle state is not defined, the most recently used pinctrl state will be left programmed
+ into hardware whenever no access is being made of a device on a child bus.
+
+properties:
+ compatible:
+ const: i2c-mux-pinctrl
+
+ i2c-parent:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description: The phandle of the I2C bus that this multiplexer's master-side port is connected
+ to.
+
+allOf:
+ - $ref: i2c-mux.yaml
+
+unevaluatedProperties: false
+
+required:
+ - compatible
+ - i2c-parent
+
+examples:
+ - |
+ i2cmux {
+ compatible = "i2c-mux-pinctrl";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ i2c-parent = <&i2c1>;
+
+ pinctrl-names = "ddc", "pta", "idle";
+ pinctrl-0 = <&state_i2cmux_ddc>;
+ pinctrl-1 = <&state_i2cmux_pta>;
+ pinctrl-2 = <&state_i2cmux_idle>;
+
+ i2c@0 {
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@50 {
+ compatible = "atmel,24c02";
+ reg = <0x50>;
+ };
+ };
+
+ i2c@1 {
+ reg = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@50 {
+ compatible = "atmel,24c02";
+ reg = <0x50>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/i2c/nvidia,tegra186-bpmp-i2c.txt b/Documentation/devicetree/bindings/i2c/nvidia,tegra186-bpmp-i2c.txt
deleted file mode 100644
index ab240e10debc..000000000000
--- a/Documentation/devicetree/bindings/i2c/nvidia,tegra186-bpmp-i2c.txt
+++ /dev/null
@@ -1,42 +0,0 @@
-NVIDIA Tegra186 BPMP I2C controller
-
-In Tegra186, the BPMP (Boot and Power Management Processor) owns certain HW
-devices, such as the I2C controller for the power management I2C bus. Software
-running on other CPUs must perform IPC to the BPMP in order to execute
-transactions on that I2C bus. This binding describes an I2C bus that is
-accessed in such a fashion.
-
-The BPMP I2C node must be located directly inside the main BPMP node. See
-../firmware/nvidia,tegra186-bpmp.txt for details of the BPMP binding.
-
-This node represents an I2C controller. See ../i2c/i2c.txt for details of the
-core I2C binding.
-
-Required properties:
-- compatible:
- Array of strings.
- One of:
- - "nvidia,tegra186-bpmp-i2c".
-- #address-cells: Address cells for I2C device address.
- Single-cell integer.
- Must be <1>.
-- #size-cells:
- Single-cell integer.
- Must be <0>.
-- nvidia,bpmp-bus-id:
- Single-cell integer.
- Indicates the I2C bus number this DT node represent, as defined by the
- BPMP firmware.
-
-Example:
-
-bpmp {
- ...
-
- i2c {
- compatible = "nvidia,tegra186-bpmp-i2c";
- #address-cells = <1>;
- #size-cells = <0>;
- nvidia,bpmp-bus-id = <5>;
- };
-};
diff --git a/Documentation/devicetree/bindings/i2c/nvidia,tegra186-bpmp-i2c.yaml b/Documentation/devicetree/bindings/i2c/nvidia,tegra186-bpmp-i2c.yaml
new file mode 100644
index 000000000000..b8319dcf3d8a
--- /dev/null
+++ b/Documentation/devicetree/bindings/i2c/nvidia,tegra186-bpmp-i2c.yaml
@@ -0,0 +1,45 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/i2c/nvidia,tegra186-bpmp-i2c.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NVIDIA Tegra186 (and later) BPMP I2C controller
+
+maintainers:
+ - Thierry Reding <thierry.reding@gmail.com>
+ - Jon Hunter <jonathanh@nvidia.com>
+
+description: |
+ In Tegra186 and later, the BPMP (Boot and Power Management Processor)
+ owns certain HW devices, such as the I2C controller for the power
+ management I2C bus. Software running on other CPUs must perform IPC to
+ the BPMP in order to execute transactions on that I2C bus. This
+ binding describes an I2C bus that is accessed in such a fashion.
+
+ The BPMP I2C node must be located directly inside the main BPMP node.
+ See ../firmware/nvidia,tegra186-bpmp.yaml for details of the BPMP
+ binding.
+
+ This node represents an I2C controller. See ../i2c/i2c.txt for details
+ of the core I2C binding.
+
+properties:
+ compatible:
+ const: nvidia,tegra186-bpmp-i2c
+
+ nvidia,bpmp-bus-id:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: Indicates the I2C bus number this DT node represents,
+ as defined by the BPMP firmware.
+
+allOf:
+ - $ref: /schemas/i2c/i2c-controller.yaml
+
+unevaluatedProperties: false
+
+required:
+ - compatible
+ - "#address-cells"
+ - "#size-cells"
+ - nvidia,bpmp-bus-id
diff --git a/Documentation/devicetree/bindings/i2c/nvidia,tegra20-i2c.txt b/Documentation/devicetree/bindings/i2c/nvidia,tegra20-i2c.txt
deleted file mode 100644
index 3f2f990c2e62..000000000000
--- a/Documentation/devicetree/bindings/i2c/nvidia,tegra20-i2c.txt
+++ /dev/null
@@ -1,87 +0,0 @@
-NVIDIA Tegra20/Tegra30/Tegra114 I2C controller driver.
-
-Required properties:
-- compatible : For Tegra20, must be one of "nvidia,tegra20-i2c-dvc" or
- "nvidia,tegra20-i2c". For Tegra30, must be "nvidia,tegra30-i2c".
- For Tegra114, must be "nvidia,tegra114-i2c". Otherwise, must be
- "nvidia,<chip>-i2c", plus at least one of the above, where <chip> is
- tegra124, tegra132, or tegra210.
- Details of compatible are as follows:
- nvidia,tegra20-i2c-dvc: Tegra20 has specific I2C controller called as DVC I2C
- controller. This only support master mode of I2C communication. Register
- interface/offset and interrupts handling are different than generic I2C
- controller. Driver of DVC I2C controller is only compatible with
- "nvidia,tegra20-i2c-dvc".
- nvidia,tegra20-i2c: Tegra20 has 4 generic I2C controller. This can support
- master and slave mode of I2C communication. The i2c-tegra driver only
- support master mode of I2C communication. Driver of I2C controller is
- only compatible with "nvidia,tegra20-i2c".
- nvidia,tegra30-i2c: Tegra30 has 5 generic I2C controller. This controller is
- very much similar to Tegra20 I2C controller with additional feature:
- Continue Transfer Support. This feature helps to implement M_NO_START
- as per I2C core API transfer flags. Driver of I2C controller is
- compatible with "nvidia,tegra30-i2c" to enable the continue transfer
- support. This is also compatible with "nvidia,tegra20-i2c" without
- continue transfer support.
- nvidia,tegra114-i2c: Tegra114 has 5 generic I2C controller. This controller is
- very much similar to Tegra30 I2C controller with some hardware
- modification:
- - Tegra30/Tegra20 I2C controller has 2 clock source called div-clk and
- fast-clk. Tegra114 has only one clock source called as div-clk and
- hence clock mechanism is changed in I2C controller.
- - Tegra30/Tegra20 I2C controller has enabled per packet transfer by
- default and there is no way to disable it. Tegra114 has this
- interrupt disable by default and SW need to enable explicitly.
- Due to above changes, Tegra114 I2C driver makes incompatible with
- previous hardware driver. Hence, tegra114 I2C controller is compatible
- with "nvidia,tegra114-i2c".
- nvidia,tegra210-i2c-vi: Tegra210 has one I2C controller that is on host1x bus
- and is part of VE power domain and typically used for camera use-cases.
- This VI I2C controller is mostly compatible with the programming model
- of the regular I2C controllers with a few exceptions. The I2C registers
- start at an offset of 0xc00 (instead of 0), registers are 16 bytes
- apart (rather than 4) and the controller does not support slave mode.
-- reg: Should contain I2C controller registers physical address and length.
-- interrupts: Should contain I2C controller interrupts.
-- address-cells: Address cells for I2C device address.
-- size-cells: Size of the I2C device address.
-- clocks: Must contain an entry for each entry in clock-names.
- See ../clocks/clock-bindings.txt for details.
-- clock-names: Must include the following entries:
- Tegra20/Tegra30:
- - div-clk
- - fast-clk
- Tegra114:
- - div-clk
- Tegra210:
- - div-clk
- - slow (only for nvidia,tegra210-i2c-vi compatible node)
-- resets: Must contain an entry for each entry in reset-names.
- See ../reset/reset.txt for details.
-- reset-names: Must include the following entries:
- - i2c
-- power-domains: Only for nvidia,tegra210-i2c-vi compatible node and must
- include venc powergate node as vi i2c is part of VE power domain.
- tegra210-i2c-vi:
- - pd_venc
-- dmas: Must contain an entry for each entry in clock-names.
- See ../dma/dma.txt for details.
-- dma-names: Must include the following entries:
- - rx
- - tx
-
-Example:
-
- i2c@7000c000 {
- compatible = "nvidia,tegra20-i2c";
- reg = <0x7000c000 0x100>;
- interrupts = <0 38 0x04>;
- #address-cells = <1>;
- #size-cells = <0>;
- clocks = <&tegra_car 12>, <&tegra_car 124>;
- clock-names = "div-clk", "fast-clk";
- resets = <&tegra_car 12>;
- reset-names = "i2c";
- dmas = <&apbdma 16>, <&apbdma 16>;
- dma-names = "rx", "tx";
- };
diff --git a/Documentation/devicetree/bindings/i2c/nvidia,tegra20-i2c.yaml b/Documentation/devicetree/bindings/i2c/nvidia,tegra20-i2c.yaml
new file mode 100644
index 000000000000..424a4fc218b6
--- /dev/null
+++ b/Documentation/devicetree/bindings/i2c/nvidia,tegra20-i2c.yaml
@@ -0,0 +1,192 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/i2c/nvidia,tegra20-i2c.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+maintainers:
+ - Thierry Reding <thierry.reding@gmail.com>
+ - Jon Hunter <jonathanh@nvidia.com>
+
+title: NVIDIA Tegra I2C controller driver
+
+properties:
+ compatible:
+ oneOf:
+ - description: Tegra20 has 4 generic I2C controller. This can support
+ master and slave mode of I2C communication. The i2c-tegra driver
+ only support master mode of I2C communication. Driver of I2C
+ controller is only compatible with "nvidia,tegra20-i2c".
+ const: nvidia,tegra20-i2c
+ - description: Tegra20 has specific I2C controller called as DVC I2C
+ controller. This only support master mode of I2C communication.
+ Register interface/offset and interrupts handling are different than
+ generic I2C controller. Driver of DVC I2C controller is only
+ compatible with "nvidia,tegra20-i2c-dvc".
+ const: nvidia,tegra20-i2c-dvc
+ - description: |
+ Tegra30 has 5 generic I2C controller. This controller is very much
+ similar to Tegra20 I2C controller with additional feature: Continue
+ Transfer Support. This feature helps to implement M_NO_START as per
+ I2C core API transfer flags. Driver of I2C controller is compatible
+ with "nvidia,tegra30-i2c" to enable the continue transfer support.
+ This is also compatible with "nvidia,tegra20-i2c" without continue
+ transfer support.
+ items:
+ - const: nvidia,tegra30-i2c
+ - const: nvidia,tegra20-i2c
+ - description: |
+ Tegra114 has 5 generic I2C controllers. This controller is very much
+ similar to Tegra30 I2C controller with some hardware modification:
+ - Tegra30/Tegra20 I2C controller has 2 clock source called div-clk
+ and fast-clk. Tegra114 has only one clock source called as
+ div-clk and hence clock mechanism is changed in I2C controller.
+ - Tegra30/Tegra20 I2C controller has enabled per packet transfer
+ by default and there is no way to disable it. Tegra114 has this
+ interrupt disable by default and SW need to enable explicitly.
+ Due to above changes, Tegra114 I2C driver makes incompatible with
+ previous hardware driver. Hence, Tegra114 I2C controller is
+ compatible with "nvidia,tegra114-i2c".
+ const: nvidia,tegra114-i2c
+ - description: |
+ Tegra124 has 6 generic I2C controllers. These controllers are very
+ similar to those found on Tegra114 but also contain several hardware
+ improvements and new registers.
+ const: nvidia,tegra124-i2c
+ - description: |
+ Tegra210 has 6 generic I2C controllers. These controllers are very
+ similar to those found on Tegra124.
+ items:
+ - const: nvidia,tegra210-i2c
+ - const: nvidia,tegra124-i2c
+ - description: |
+ Tegra210 has one I2C controller that is on host1x bus and is part of
+ the VE power domain and typically used for camera use-cases. This VI
+ I2C controller is mostly compatible with the programming model of
+ the regular I2C controllers with a few exceptions. The I2C registers
+ start at an offset of 0xc00 (instead of 0), registers are 16 bytes
+ apart (rather than 4) and the controller does not support slave
+ mode.
+ const: nvidia,tegra210-i2c-vi
+ - description: |
+ Tegra186 has 9 generic I2C controllers, two of which are in the AON
+ (always-on) partition of the SoC. All of these controllers are very
+ similar to those found on Tegra210.
+ const: nvidia,tegra186-i2c
+ - description: |
+ Tegra194 has 8 generic I2C controllers, two of which are in the AON
+ (always-on) partition of the SoC. All of these controllers are very
+ similar to those found on Tegra186. However, these controllers have
+ support for 64 KiB transactions whereas earlier chips supported no
+ more than 4 KiB per transactions.
+ const: nvidia,tegra194-i2c
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ '#address-cells':
+ const: 1
+
+ '#size-cells':
+ const: 0
+
+ clocks:
+ minItems: 1
+ maxItems: 2
+
+ clock-names:
+ minItems: 1
+ maxItems: 2
+
+ resets:
+ items:
+ - description: module reset
+
+ reset-names:
+ items:
+ - const: i2c
+
+ dmas:
+ items:
+ - description: DMA channel for the reception FIFO
+ - description: DMA channel for the transmission FIFO
+
+ dma-names:
+ items:
+ - const: rx
+ - const: tx
+
+allOf:
+ - $ref: /schemas/i2c/i2c-controller.yaml
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - nvidia,tegra20-i2c
+ - nvidia,tegra30-i2c
+ then:
+ properties:
+ clock-names:
+ items:
+ - const: div-clk
+ - const: fast-clk
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: nvidia,tegra114-i2c
+ then:
+ properties:
+ clock-names:
+ items:
+ - const: div-clk
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: nvidia,tegra210-i2c
+ then:
+ properties:
+ clock-names:
+ items:
+ - const: div-clk
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: nvidia,tegra210-i2c-vi
+ then:
+ properties:
+ clock-names:
+ items:
+ - const: div-clk
+ - const: slow
+ power-domains:
+ items:
+ - description: phandle to the VENC power domain
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ i2c@7000c000 {
+ compatible = "nvidia,tegra20-i2c";
+ reg = <0x7000c000 0x100>;
+ interrupts = <0 38 0x04>;
+ clocks = <&tegra_car 12>, <&tegra_car 124>;
+ clock-names = "div-clk", "fast-clk";
+ resets = <&tegra_car 12>;
+ reset-names = "i2c";
+ dmas = <&apbdma 16>, <&apbdma 16>;
+ dma-names = "rx", "tx";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
diff --git a/Documentation/devicetree/bindings/i2c/st,stm32-i2c.yaml b/Documentation/devicetree/bindings/i2c/st,stm32-i2c.yaml
index c07289a643d8..46b62e1c9273 100644
--- a/Documentation/devicetree/bindings/i2c/st,stm32-i2c.yaml
+++ b/Documentation/devicetree/bindings/i2c/st,stm32-i2c.yaml
@@ -112,6 +112,9 @@ examples:
clocks = <&rcc 0 149>;
};
+ - |
+ #include <dt-bindings/mfd/stm32f7-rcc.h>
+ #include <dt-bindings/clock/stm32fx-clock.h>
//Example 2 (with st,stm32f7-i2c compatible)
i2c@40005800 {
compatible = "st,stm32f7-i2c";
@@ -124,6 +127,9 @@ examples:
clocks = <&rcc 1 CLK_I2C1>;
};
+ - |
+ #include <dt-bindings/mfd/stm32f7-rcc.h>
+ #include <dt-bindings/clock/stm32fx-clock.h>
//Example 3 (with st,stm32mp15-i2c compatible on stm32mp)
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/clock/stm32mp1-clks.h>
diff --git a/Documentation/devicetree/bindings/iio/adc/fsl,vf610-adc.yaml b/Documentation/devicetree/bindings/iio/adc/fsl,vf610-adc.yaml
index 1ca571056ea9..925f355cc21f 100644
--- a/Documentation/devicetree/bindings/iio/adc/fsl,vf610-adc.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/fsl,vf610-adc.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: ADC found on Freescale vf610 and similar SoCs
maintainers:
- - Fugang Duan <fugang.duan@nxp.com>
+ - Haibo Chen <haibo.chen@nxp.com>
description:
ADCs found on vf610/i.MX6slx and upward SoCs from Freescale.
diff --git a/Documentation/devicetree/bindings/iio/adc/x-powers,axp209-adc.yaml b/Documentation/devicetree/bindings/iio/adc/x-powers,axp209-adc.yaml
index e759a5da708d..d6d3d8590171 100644
--- a/Documentation/devicetree/bindings/iio/adc/x-powers,axp209-adc.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/x-powers,axp209-adc.yaml
@@ -27,6 +27,7 @@ description: |
8 | batt_v
9 | batt_chrg_i
10 | batt_dischrg_i
+ 11 | ts_v
AXP22x
------
@@ -34,6 +35,7 @@ description: |
1 | batt_v
2 | batt_chrg_i
3 | batt_dischrg_i
+ 4 | ts_v
AXP813
------
@@ -42,6 +44,7 @@ description: |
2 | batt_v
3 | batt_chrg_i
4 | batt_dischrg_i
+ 5 | ts_v
properties:
diff --git a/Documentation/devicetree/bindings/iio/adc/xlnx,zynqmp-ams.yaml b/Documentation/devicetree/bindings/iio/adc/xlnx,zynqmp-ams.yaml
new file mode 100644
index 000000000000..87992db389b2
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/adc/xlnx,zynqmp-ams.yaml
@@ -0,0 +1,227 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/adc/xlnx,zynqmp-ams.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Xilinx Zynq Ultrascale AMS controller
+
+maintainers:
+ - Anand Ashok Dumbre <anand.ashok.dumbre@xilinx.com>
+
+description: |
+ The AMS (Analog Monitoring System) includes an ADC as well as on-chip sensors
+ that can be used to sample external voltages and monitor on-die operating
+ conditions, such as temperature and supply voltage levels.
+ The AMS has two SYSMON blocks which are PL (Programmable Logic) SYSMON and
+ PS (Processing System) SYSMON.
+ All designs should have AMS registers, but PS and PL are optional. The
+ AMS controller can work with only PS, only PL and both PS and PL
+ configurations. Please specify registers according to your design. Devicetree
+ should always have AMS module property. Providing PS & PL module is optional.
+
+ AMS Channel Details
+ ```````````````````
+ Sysmon Block |Channel| Details |Measurement
+ |Number | |Type
+ ---------------------------------------------------------------------------------------------------------
+ AMS CTRL |0 |System PLLs voltage measurement, VCC_PSPLL. |Voltage
+ |1 |Battery voltage measurement, VCC_PSBATT. |Voltage
+ |2 |PL Internal voltage measurement, VCCINT. |Voltage
+ |3 |Block RAM voltage measurement, VCCBRAM. |Voltage
+ |4 |PL Aux voltage measurement, VCCAUX. |Voltage
+ |5 |Voltage measurement for six DDR I/O PLLs, VCC_PSDDR_PLL. |Voltage
+ |6 |VCC_PSINTFP_DDR voltage measurement. |Voltage
+ ---------------------------------------------------------------------------------------------------------
+ PS Sysmon |7 |LPD temperature measurement. |Temperature
+ |8 |FPD temperature measurement (REMOTE). |Temperature
+ |9 |VCC PS LPD voltage measurement (supply1). |Voltage
+ |10 |VCC PS FPD voltage measurement (supply2). |Voltage
+ |11 |PS Aux voltage reference (supply3). |Voltage
+ |12 |DDR I/O VCC voltage measurement. |Voltage
+ |13 |PS IO Bank 503 voltage measurement (supply5). |Voltage
+ |14 |PS IO Bank 500 voltage measurement (supply6). |Voltage
+ |15 |VCCO_PSIO1 voltage measurement. |Voltage
+ |16 |VCCO_PSIO2 voltage measurement. |Voltage
+ |17 |VCC_PS_GTR voltage measurement (VPS_MGTRAVCC). |Voltage
+ |18 |VTT_PS_GTR voltage measurement (VPS_MGTRAVTT). |Voltage
+ |19 |VCC_PSADC voltage measurement. |Voltage
+ ---------------------------------------------------------------------------------------------------------
+ PL Sysmon |20 |PL temperature measurement. |Temperature
+ |21 |PL Internal voltage measurement, VCCINT. |Voltage
+ |22 |PL Auxiliary voltage measurement, VCCAUX. |Voltage
+ |23 |ADC Reference P+ voltage measurement. |Voltage
+ |24 |ADC Reference N- voltage measurement. |Voltage
+ |25 |PL Block RAM voltage measurement, VCCBRAM. |Voltage
+ |26 |LPD Internal voltage measurement, VCC_PSINTLP (supply4). |Voltage
+ |27 |FPD Internal voltage measurement, VCC_PSINTFP (supply5). |Voltage
+ |28 |PS Auxiliary voltage measurement (supply6). |Voltage
+ |29 |PL VCCADC voltage measurement (vccams). |Voltage
+ |30 |Differential analog input signal voltage measurment. |Voltage
+ |31 |VUser0 voltage measurement (supply7). |Voltage
+ |32 |VUser1 voltage measurement (supply8). |Voltage
+ |33 |VUser2 voltage measurement (supply9). |Voltage
+ |34 |VUser3 voltage measurement (supply10). |Voltage
+ |35 |Auxiliary ch 0 voltage measurement (VAux0). |Voltage
+ |36 |Auxiliary ch 1 voltage measurement (VAux1). |Voltage
+ |37 |Auxiliary ch 2 voltage measurement (VAux2). |Voltage
+ |38 |Auxiliary ch 3 voltage measurement (VAux3). |Voltage
+ |39 |Auxiliary ch 4 voltage measurement (VAux4). |Voltage
+ |40 |Auxiliary ch 5 voltage measurement (VAux5). |Voltage
+ |41 |Auxiliary ch 6 voltage measurement (VAux6). |Voltage
+ |42 |Auxiliary ch 7 voltage measurement (VAux7). |Voltage
+ |43 |Auxiliary ch 8 voltage measurement (VAux8). |Voltage
+ |44 |Auxiliary ch 9 voltage measurement (VAux9). |Voltage
+ |45 |Auxiliary ch 10 voltage measurement (VAux10). |Voltage
+ |46 |Auxiliary ch 11 voltage measurement (VAux11). |Voltage
+ |47 |Auxiliary ch 12 voltage measurement (VAux12). |Voltage
+ |48 |Auxiliary ch 13 voltage measurement (VAux13). |Voltage
+ |49 |Auxiliary ch 14 voltage measurement (VAux14). |Voltage
+ |50 |Auxiliary ch 15 voltage measurement (VAux15). |Voltage
+ --------------------------------------------------------------------------------------------------------
+
+properties:
+ compatible:
+ enum:
+ - xlnx,zynqmp-ams
+
+ interrupts:
+ maxItems: 1
+
+ reg:
+ description: AMS Controller register space
+ maxItems: 1
+
+ ranges:
+ description:
+ Maps the child address space for PS and/or PL.
+ maxItems: 1
+
+ '#address-cells':
+ const: 1
+
+ '#size-cells':
+ const: 1
+
+ '#io-channel-cells':
+ const: 1
+
+ ams-ps@0:
+ type: object
+ description: |
+ PS (Processing System) SYSMON is memory mapped to PS. This block has
+ built-in alarm generation logic that is used to interrupt the processor
+ based on condition set.
+
+ properties:
+ compatible:
+ enum:
+ - xlnx,zynqmp-ams-ps
+
+ reg:
+ description: Register Space for PS-SYSMON
+ maxItems: 1
+
+ required:
+ - compatible
+ - reg
+
+ additionalProperties: false
+
+ ams-pl@400:
+ type: object
+ description:
+ PL-SYSMON is capable of monitoring off chip voltage and temperature.
+ PL-SYSMON block has DRP, JTAG and I2C interface to enable monitoring
+ from external master. Out of this interface currently only DRP is
+ supported. This block has alarm generation logic that is used to
+ interrupt the processor based on condition set.
+
+ properties:
+ compatible:
+ items:
+ - enum:
+ - xlnx,zynqmp-ams-pl
+
+ reg:
+ description: Register Space for PL-SYSMON.
+ maxItems: 1
+
+ '#address-cells':
+ const: 1
+
+ '#size-cells':
+ const: 0
+
+ patternProperties:
+ "^channel@([2-4][0-9]|50)$":
+ type: object
+ description:
+ Describes the external channels connected.
+
+ properties:
+ reg:
+ description:
+ Pair of pins the channel is connected to. This value is
+ same as Channel Number for a particular channel.
+ minimum: 20
+ maximum: 50
+
+ xlnx,bipolar:
+ $ref: /schemas/types.yaml#/definitions/flag
+ type: boolean
+ description:
+ If the set channel is used in bipolar mode.
+
+ required:
+ - reg
+
+ additionalProperties: false
+
+required:
+ - compatible
+ - reg
+ - ranges
+
+additionalProperties: false
+
+examples:
+ - |
+ bus {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ xilinx_ams: ams@ffa50000 {
+ compatible = "xlnx,zynqmp-ams";
+ interrupt-parent = <&gic>;
+ interrupts = <0 56 4>;
+ reg = <0x0 0xffa50000 0x0 0x800>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ #io-channel-cells = <1>;
+ ranges = <0 0 0xffa50800 0x800>;
+
+ ams_ps: ams-ps@0 {
+ compatible = "xlnx,zynqmp-ams-ps";
+ reg = <0 0x400>;
+ };
+
+ ams_pl: ams-pl@400 {
+ compatible = "xlnx,zynqmp-ams-pl";
+ reg = <0x400 0x400>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ channel@30 {
+ reg = <30>;
+ xlnx,bipolar;
+ };
+ channel@31 {
+ reg = <31>;
+ };
+ channel@38 {
+ reg = <38>;
+ xlnx,bipolar;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/iio/addac/adi,ad74413r.yaml b/Documentation/devicetree/bindings/iio/addac/adi,ad74413r.yaml
new file mode 100644
index 000000000000..baa65a521bad
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/addac/adi,ad74413r.yaml
@@ -0,0 +1,158 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/addac/adi,ad74413r.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Analog Devices AD74412R/AD74413R device
+
+maintainers:
+ - Cosmin Tanislav <cosmin.tanislav@analog.com>
+
+description: |
+ The AD74412R and AD74413R are quad-channel software configurable input/output
+ solutions for building and process control applications. They contain
+ functionality for analog output, analog input, digital input, resistance
+ temperature detector, and thermocouple measurements integrated
+ into a single chip solution with an SPI interface.
+ The devices feature a 16-bit ADC and four configurable 13-bit DACs to provide
+ four configurable input/output channels and a suite of diagnostic functions.
+ The AD74413R differentiates itself from the AD74412R by being HART-compatible.
+ https://www.analog.com/en/products/ad74412r.html
+ https://www.analog.com/en/products/ad74413r.html
+
+properties:
+ compatible:
+ enum:
+ - adi,ad74412r
+ - adi,ad74413r
+
+ reg:
+ maxItems: 1
+
+ '#address-cells':
+ const: 1
+
+ '#size-cells':
+ const: 0
+
+ spi-max-frequency:
+ maximum: 1000000
+
+ spi-cpol: true
+
+ interrupts:
+ maxItems: 1
+
+ refin-supply: true
+
+ shunt-resistor-micro-ohms:
+ description:
+ Shunt (sense) resistor value in micro-Ohms.
+ default: 100000000
+
+required:
+ - compatible
+ - reg
+ - spi-max-frequency
+ - spi-cpol
+ - refin-supply
+
+additionalProperties: false
+
+patternProperties:
+ "^channel@[0-3]$":
+ type: object
+ description: Represents the external channels which are connected to the device.
+
+ properties:
+ reg:
+ description: |
+ The channel number. It can have up to 4 channels numbered from 0 to 3.
+ minimum: 0
+ maximum: 3
+
+ adi,ch-func:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: |
+ Channel function.
+ HART functions are not supported on AD74412R.
+ 0 - CH_FUNC_HIGH_IMPEDANCE
+ 1 - CH_FUNC_VOLTAGE_OUTPUT
+ 2 - CH_FUNC_CURRENT_OUTPUT
+ 3 - CH_FUNC_VOLTAGE_INPUT
+ 4 - CH_FUNC_CURRENT_INPUT_EXT_POWER
+ 5 - CH_FUNC_CURRENT_INPUT_LOOP_POWER
+ 6 - CH_FUNC_RESISTANCE_INPUT
+ 7 - CH_FUNC_DIGITAL_INPUT_LOGIC
+ 8 - CH_FUNC_DIGITAL_INPUT_LOOP_POWER
+ 9 - CH_FUNC_CURRENT_INPUT_EXT_POWER_HART
+ 10 - CH_FUNC_CURRENT_INPUT_LOOP_POWER_HART
+ minimum: 0
+ maximum: 10
+ default: 0
+
+ adi,gpo-comparator:
+ type: boolean
+ description: |
+ Whether to configure GPO as a comparator or not.
+ When not configured as a comparator, the GPO will be treated as an
+ output-only GPIO.
+
+ required:
+ - reg
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/iio/addac/adi,ad74413r.h>
+
+ spi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cs-gpios = <&gpio 17 GPIO_ACTIVE_LOW>;
+ status = "okay";
+
+ ad74413r@0 {
+ compatible = "adi,ad74413r";
+ reg = <0>;
+ spi-max-frequency = <1000000>;
+ spi-cpol;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ interrupt-parent = <&gpio>;
+ interrupts = <26 IRQ_TYPE_EDGE_FALLING>;
+
+ refin-supply = <&ad74413r_refin>;
+
+ channel@0 {
+ reg = <0>;
+
+ adi,ch-func = <CH_FUNC_VOLTAGE_OUTPUT>;
+ };
+
+ channel@1 {
+ reg = <1>;
+
+ adi,ch-func = <CH_FUNC_CURRENT_OUTPUT>;
+ };
+
+ channel@2 {
+ reg = <2>;
+
+ adi,ch-func = <CH_FUNC_DIGITAL_INPUT_LOGIC>;
+ adi,gpo-comparator;
+ };
+
+ channel@3 {
+ reg = <3>;
+
+ adi,ch-func = <CH_FUNC_CURRENT_INPUT_EXT_POWER>;
+ };
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/iio/dac/adi,ad3552r.yaml b/Documentation/devicetree/bindings/iio/dac/adi,ad3552r.yaml
new file mode 100644
index 000000000000..501a463e5d88
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/dac/adi,ad3552r.yaml
@@ -0,0 +1,217 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+# Copyright 2020 Analog Devices Inc.
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/dac/adi,ad3552r.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Analog Devices AD2552R DAC device driver
+
+maintainers:
+ - Mihail Chindris <mihail.chindris@analog.com>
+
+description: |
+ Bindings for the Analog Devices AD3552R DAC device and similar.
+ Datasheet can be found here:
+ https://www.analog.com/media/en/technical-documentation/data-sheets/ad3542r.pdf
+ https://www.analog.com/media/en/technical-documentation/data-sheets/ad3552r.pdf
+
+properties:
+ compatible:
+ enum:
+ - adi,ad3542r
+ - adi,ad3552r
+
+ reg:
+ maxItems: 1
+
+ spi-max-frequency:
+ maximum: 30000000
+
+ reset-gpios:
+ maxItems: 1
+
+ ldac-gpios:
+ description: |
+ LDAC pin to be used as a hardware trigger to update the DAC channels.
+ maxItems: 1
+
+ vref-supply:
+ description:
+ The regulator to use as an external reference. If it does not exists the
+ internal reference will be used. External reference must be 2.5V
+
+ adi,vref-out-en:
+ description: Vref I/O driven by internal vref to 2.5V. If not set, Vref pin
+ will be floating.
+ type: boolean
+
+ adi,sdo-drive-strength:
+ description: |
+ Configure SDIO0 and SDIO1 strength levels:
+ - 0: low SDO drive strength.
+ - 1: medium low SDO drive strength.
+ - 2: medium high SDO drive strength.
+ - 3: high SDO drive strength
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [0, 1, 2, 3]
+
+ '#address-cells':
+ const: 1
+
+ '#size-cells':
+ const: 0
+
+patternProperties:
+ "^channel@([0-1])$":
+ type: object
+ description: Configurations of the DAC Channels
+
+ additionalProperties: false
+
+ properties:
+ reg:
+ description: Channel number
+ enum: [0, 1]
+
+ adi,output-range-microvolt: true
+
+ custom-output-range-config:
+ type: object
+ description: Configuration of custom range when
+ adi,output-range-microvolt is not present.
+ The formulas for calculation the output voltages are
+ Vout_fs = 2.5 + [(GainN + Offset/1024) * 2.5 * Rfbx * 1.03]
+ Vout_zs = 2.5 - [(GainP + Offset/1024) * 2.5 * Rfbx * 1.03]
+
+ properties:
+ adi,gain-offset:
+ description: Gain offset used in the above formula
+ $ref: /schemas/types.yaml#/definitions/int32
+ maximum: 511
+ minimum: -511
+
+ adi,gain-scaling-p-inv-log2:
+ description: GainP = 1 / ( 2 ^ adi,gain-scaling-p-inv-log2)
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [0, 1, 2, 3]
+
+ adi,gain-scaling-n-inv-log2:
+ description: GainN = 1 / ( 2 ^ adi,gain-scaling-n-inv-log2)
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [0, 1, 2, 3]
+
+ adi,rfb-ohms:
+ description: Feedback Resistor
+
+ required:
+ - adi,gain-offset
+ - adi,gain-scaling-p-inv-log2
+ - adi,gain-scaling-n-inv-log2
+ - adi,rfb-ohms
+
+ required:
+ - reg
+
+ oneOf:
+ # If adi,output-range-microvolt is missing,
+ # custom-output-range-config must be used
+ - required:
+ - adi,output-range-microvolt
+
+ - required:
+ - custom-output-range-config
+
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: adi,ad3542r
+ then:
+ patternProperties:
+ "^channel@([0-1])$":
+ type: object
+ properties:
+ adi,output-range-microvolt:
+ description: |
+ Voltage output range of the channel as <minimum, maximum>
+ Required connections:
+ Rfb1x for: 0 to 2.5 V; 0 to 3V; 0 to 5 V;
+ Rfb2x for: 0 to 10 V; 2.5 to 7.5V; -5 to 5 V;
+ oneOf:
+ - items:
+ - const: 0
+ - enum: [2500000, 3000000, 5000000, 10000000]
+ - items:
+ - const: -2500000
+ - const: 7500000
+ - items:
+ - const: -5000000
+ - const: 5000000
+
+ required:
+ - adi,output-range-microvolt
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: adi,ad3552r
+ then:
+ patternProperties:
+ "^channel@([0-1])$":
+ type: object
+ properties:
+ adi,output-range-microvolt:
+ description: |
+ Voltage output range of the channel as <minimum, maximum>
+ Required connections:
+ Rfb1x for: 0 to 2.5 V; 0 to 5 V;
+ Rfb2x for: 0 to 10 V; -5 to 5 V;
+ Rfb4x for: -10 to 10V
+ oneOf:
+ - items:
+ - const: 0
+ - enum: [2500000, 5000000, 10000000]
+ - items:
+ - const: -5000000
+ - const: 5000000
+ - items:
+ - const: -10000000
+ - const: 10000000
+
+required:
+ - compatible
+ - reg
+ - spi-max-frequency
+
+additionalProperties: false
+
+examples:
+ - |
+ spi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ ad3552r@0 {
+ compatible = "adi,ad3552r";
+ reg = <0>;
+ spi-max-frequency = <20000000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ channel@0 {
+ reg = <0>;
+ adi,output-range-microvolt = <0 10000000>;
+ };
+ channel@1 {
+ reg = <1>;
+ custom-output-range-config {
+ adi,gain-offset = <5>;
+ adi,gain-scaling-p-inv-log2 = <1>;
+ adi,gain-scaling-n-inv-log2 = <2>;
+ adi,rfb-ohms = <1>;
+ };
+ };
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/iio/dac/adi,ad5755.yaml b/Documentation/devicetree/bindings/iio/dac/adi,ad5755.yaml
index be419ac46caa..f866b88e1440 100644
--- a/Documentation/devicetree/bindings/iio/dac/adi,ad5755.yaml
+++ b/Documentation/devicetree/bindings/iio/dac/adi,ad5755.yaml
@@ -125,7 +125,6 @@ oneOf:
examples:
- |
- #include <dt-bindings/iio/adi,ad5592r.h>
spi {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/Documentation/devicetree/bindings/iio/dac/adi,ad7293.yaml b/Documentation/devicetree/bindings/iio/dac/adi,ad7293.yaml
new file mode 100644
index 000000000000..5ee80bf6aa11
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/dac/adi,ad7293.yaml
@@ -0,0 +1,61 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/dac/adi,ad7293.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: AD7293 12-Bit Power Amplifier Current Controller with ADC,
+ DACs, Temperature and Current Sensors
+
+maintainers:
+ - Antoniu Miclaus <antoniu.miclaus@analog.com>
+
+description: |
+ Power Amplifier drain current controller containing functionality
+ for general-purpose monitoring and control of current, voltage,
+ and temperature, integrated into a single chip solution with an
+ SPI-compatible interface.
+
+ https://www.analog.com/en/products/ad7293.html
+
+properties:
+ compatible:
+ enum:
+ - adi,ad7293
+
+ avdd-supply: true
+
+ vdrive-supply: true
+
+ reset-gpios:
+ maxItems: 1
+
+ reg:
+ maxItems: 1
+
+ spi-max-frequency:
+ maximum: 1000000
+
+required:
+ - compatible
+ - reg
+ - avdd-supply
+ - vdrive-supply
+
+additionalProperties: false
+
+examples:
+ - |
+ spi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ ad7293@0 {
+ compatible = "adi,ad7293";
+ reg = <0>;
+ spi-max-frequency = <1000000>;
+ avdd-supply = <&avdd>;
+ vdrive-supply = <&vdrive>;
+ reset-gpios = <&gpio 10 0>;
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/iio/filter/adi,admv8818.yaml b/Documentation/devicetree/bindings/iio/filter/adi,admv8818.yaml
new file mode 100644
index 000000000000..b77e855bd594
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/filter/adi,admv8818.yaml
@@ -0,0 +1,66 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/filter/adi,admv8818.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ADMV8818 Digitally Tunable, High-Pass and Low-Pass Filter
+
+maintainers:
+ - Antoniu Miclaus <antoniu.miclaus@analog.com>
+
+description: |
+ Fully monolithic microwave integrated circuit (MMIC) that
+ features a digitally selectable frequency of operation.
+ The device features four independently controlled high-pass
+ filters (HPFs) and four independently controlled low-pass filters
+ (LPFs) that span the 2 GHz to 18 GHz frequency range.
+
+ https://www.analog.com/en/products/admv8818.html
+
+properties:
+ compatible:
+ enum:
+ - adi,admv8818
+
+ reg:
+ maxItems: 1
+
+ spi-max-frequency:
+ maximum: 10000000
+
+ clocks:
+ description:
+ Definition of the external clock.
+ minItems: 1
+
+ clock-names:
+ items:
+ - const: rf_in
+
+ clock-output-names:
+ maxItems: 1
+
+ '#clock-cells':
+ const: 0
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ spi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ admv8818@0 {
+ compatible = "adi,admv8818";
+ reg = <0>;
+ spi-max-frequency = <10000000>;
+ clocks = <&admv8818_rfin>;
+ clock-names = "rf_in";
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/iio/frequency/adi,admv1013.yaml b/Documentation/devicetree/bindings/iio/frequency/adi,admv1013.yaml
new file mode 100644
index 000000000000..23f1f3b55abb
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/frequency/adi,admv1013.yaml
@@ -0,0 +1,91 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/iio/frequency/adi,admv1013.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ADMV1013 Microwave Upconverter
+
+maintainers:
+ - Antoniu Miclaus <antoniu.miclaus@analog.com>
+
+description: |
+ Wideband, microwave upconverter optimized for point to point microwave
+ radio designs operating in the 24 GHz to 44 GHz frequency range.
+
+ https://www.analog.com/en/products/admv1013.html
+
+properties:
+ compatible:
+ enum:
+ - adi,admv1013
+
+ reg:
+ maxItems: 1
+
+ spi-max-frequency:
+ maximum: 1000000
+
+ clocks:
+ description:
+ Definition of the external clock.
+ minItems: 1
+
+ clock-names:
+ items:
+ - const: lo_in
+
+ vcm-supply:
+ description:
+ Analog voltage regulator.
+
+ adi,detector-enable:
+ description:
+ Enable the Envelope Detector available at output pins VENV_P and
+ VENV_N. Disable to reduce power consumption.
+ type: boolean
+
+ adi,input-mode:
+ description:
+ Select the input mode.
+ iq - in-phase quadrature (I/Q) input
+ if - complex intermediate frequency (IF) input
+ enum: [iq, if]
+
+ adi,quad-se-mode:
+ description:
+ Switch the LO path from differential to single-ended operation.
+ se-neg - Single-Ended Mode, Negative Side Disabled.
+ se-pos - Single-Ended Mode, Positive Side Disabled.
+ diff - Differential Mode.
+ enum: [se-neg, se-pos, diff]
+
+ '#clock-cells':
+ const: 0
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+ - vcm-supply
+
+additionalProperties: false
+
+examples:
+ - |
+ spi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ admv1013@0{
+ compatible = "adi,admv1013";
+ reg = <0>;
+ spi-max-frequency = <1000000>;
+ clocks = <&admv1013_lo>;
+ clock-names = "lo_in";
+ vcm-supply = <&vcm>;
+ adi,quad-se-mode = "diff";
+ adi,detector-enable;
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/iio/gyroscope/invensense,mpu3050.yaml b/Documentation/devicetree/bindings/iio/gyroscope/invensense,mpu3050.yaml
index 7e2accc3d5ce..d1a6103fc37a 100644
--- a/Documentation/devicetree/bindings/iio/gyroscope/invensense,mpu3050.yaml
+++ b/Documentation/devicetree/bindings/iio/gyroscope/invensense,mpu3050.yaml
@@ -61,7 +61,7 @@ examples:
#size-cells = <0>;
magnetometer@c {
- compatible = "ak,ak8975";
+ compatible = "asahi-kasei,ak8975";
reg = <0x0c>;
};
};
diff --git a/Documentation/devicetree/bindings/iio/imu/invensense,mpu6050.yaml b/Documentation/devicetree/bindings/iio/imu/invensense,mpu6050.yaml
index edbc2921aabd..d69595a524c1 100644
--- a/Documentation/devicetree/bindings/iio/imu/invensense,mpu6050.yaml
+++ b/Documentation/devicetree/bindings/iio/imu/invensense,mpu6050.yaml
@@ -95,7 +95,7 @@ examples:
#address-cells = <1>;
#size-cells = <0>;
magnetometer@c {
- compatible = "ak,ak8975";
+ compatible = "asahi-kasei,ak8975";
reg = <0x0c>;
};
};
diff --git a/Documentation/devicetree/bindings/iio/imu/st,lsm6dsx.yaml b/Documentation/devicetree/bindings/iio/imu/st,lsm6dsx.yaml
index d9b3213318fb..0750f700a143 100644
--- a/Documentation/devicetree/bindings/iio/imu/st,lsm6dsx.yaml
+++ b/Documentation/devicetree/bindings/iio/imu/st,lsm6dsx.yaml
@@ -61,6 +61,13 @@ properties:
type: boolean
description: enable/disable internal i2c controller pullup resistors.
+ st,disable-sensor-hub:
+ type: boolean
+ description:
+ Enable/disable internal i2c controller slave autoprobing at bootstrap.
+ Disable sensor-hub is useful if i2c controller clock/data lines are
+ connected through a pull-up with other chip lines (e.g. SDO/SA0).
+
drive-open-drain:
type: boolean
description:
diff --git a/Documentation/devicetree/bindings/iio/light/liteon,ltr501.yaml b/Documentation/devicetree/bindings/iio/light/liteon,ltr501.yaml
index db0407bc9209..c8074f180a79 100644
--- a/Documentation/devicetree/bindings/iio/light/liteon,ltr501.yaml
+++ b/Documentation/devicetree/bindings/iio/light/liteon,ltr501.yaml
@@ -9,6 +9,9 @@ title: LiteON LTR501 I2C Proximity and Light sensor
maintainers:
- Nikita Travkin <nikita@trvn.ru>
+allOf:
+ - $ref: ../common.yaml#
+
properties:
compatible:
enum:
@@ -25,6 +28,8 @@ properties:
interrupts:
maxItems: 1
+ proximity-near-level: true
+
additionalProperties: false
required:
@@ -42,6 +47,8 @@ examples:
light-sensor@23 {
compatible = "liteon,ltr559";
reg = <0x23>;
+ proximity-near-level = <75>;
+
vdd-supply = <&pm8916_l17>;
vddio-supply = <&pm8916_l6>;
diff --git a/Documentation/devicetree/bindings/iio/magnetometer/yamaha,yas530.yaml b/Documentation/devicetree/bindings/iio/magnetometer/yamaha,yas530.yaml
index 4b0ef1ef5445..9438fffaf0ba 100644
--- a/Documentation/devicetree/bindings/iio/magnetometer/yamaha,yas530.yaml
+++ b/Documentation/devicetree/bindings/iio/magnetometer/yamaha,yas530.yaml
@@ -96,7 +96,7 @@ examples:
vdd-supply = <&ldo1_reg>;
iovdd-supply = <&ldo2_reg>;
reset-gpios = <&gpio6 12 GPIO_ACTIVE_LOW>;
- interrupts = <&gpio6 13 IRQ_TYPE_EDGE_RISING>;
+ interrupts = <13 IRQ_TYPE_EDGE_RISING>;
};
};
diff --git a/Documentation/devicetree/bindings/iio/proximity/google,cros-ec-mkbp-proximity.yaml b/Documentation/devicetree/bindings/iio/proximity/google,cros-ec-mkbp-proximity.yaml
index 099b4be927d4..00e3b59641d2 100644
--- a/Documentation/devicetree/bindings/iio/proximity/google,cros-ec-mkbp-proximity.yaml
+++ b/Documentation/devicetree/bindings/iio/proximity/google,cros-ec-mkbp-proximity.yaml
@@ -10,7 +10,6 @@ title: ChromeOS EC MKBP Proximity Sensor
maintainers:
- Stephen Boyd <swboyd@chromium.org>
- Benson Leung <bleung@chromium.org>
- - Enric Balletbo i Serra <enric.balletbo@collabora.com>
description: |
Google's ChromeOS EC sometimes has the ability to detect user proximity.
diff --git a/Documentation/devicetree/bindings/iio/temperature/adi,ltc2983.yaml b/Documentation/devicetree/bindings/iio/temperature/adi,ltc2983.yaml
index 0f79d9a01c49..722781aa4697 100644
--- a/Documentation/devicetree/bindings/iio/temperature/adi,ltc2983.yaml
+++ b/Documentation/devicetree/bindings/iio/temperature/adi,ltc2983.yaml
@@ -448,17 +448,17 @@ examples:
reg = <20>;
adi,sensor-type = <9>; //custom thermocouple
adi,single-ended;
- adi,custom-thermocouple = /bits/ 64
- <(-50220000) 0>,
- <(-30200000) 99100000>,
- <(-5300000) 135400000>,
- <0 273150000>,
- <40200000 361200000>,
- <55300000 522100000>,
- <88300000 720300000>,
- <132200000 811200000>,
- <188700000 922500000>,
- <460400000 1000000000>; //10 pairs
+ adi,custom-thermocouple =
+ /bits/ 64 <(-50220000) 0>,
+ /bits/ 64 <(-30200000) 99100000>,
+ /bits/ 64 <(-5300000) 135400000>,
+ /bits/ 64 <0 273150000>,
+ /bits/ 64 <40200000 361200000>,
+ /bits/ 64 <55300000 522100000>,
+ /bits/ 64 <88300000 720300000>,
+ /bits/ 64 <132200000 811200000>,
+ /bits/ 64 <188700000 922500000>,
+ /bits/ 64 <460400000 1000000000>; //10 pairs
};
};
diff --git a/Documentation/devicetree/bindings/input/google,cros-ec-keyb.yaml b/Documentation/devicetree/bindings/input/google,cros-ec-keyb.yaml
index 5377b232fa10..e8f137abb03c 100644
--- a/Documentation/devicetree/bindings/input/google,cros-ec-keyb.yaml
+++ b/Documentation/devicetree/bindings/input/google,cros-ec-keyb.yaml
@@ -10,7 +10,6 @@ title: ChromeOS EC Keyboard
maintainers:
- Simon Glass <sjg@chromium.org>
- Benson Leung <bleung@chromium.org>
- - Enric Balletbo i Serra <enric.balletbo@collabora.com>
description: |
Google's ChromeOS EC Keyboard is a simple matrix keyboard
diff --git a/Documentation/devicetree/bindings/input/gpio-keys.yaml b/Documentation/devicetree/bindings/input/gpio-keys.yaml
index dbe7ecc19ccb..7fe1966ea28a 100644
--- a/Documentation/devicetree/bindings/input/gpio-keys.yaml
+++ b/Documentation/devicetree/bindings/input/gpio-keys.yaml
@@ -88,12 +88,6 @@ patternProperties:
which can be disabled to suppress events from the button.
type: boolean
- pinctrl-0:
- maxItems: 1
-
- pinctrl-names:
- maxItems: 1
-
required:
- linux,code
diff --git a/Documentation/devicetree/bindings/input/pwm-vibrator.txt b/Documentation/devicetree/bindings/input/pwm-vibrator.txt
deleted file mode 100644
index 88c775a3fe21..000000000000
--- a/Documentation/devicetree/bindings/input/pwm-vibrator.txt
+++ /dev/null
@@ -1,66 +0,0 @@
-* PWM vibrator device tree bindings
-
-Registers a PWM device as vibrator. It is expected, that the vibrator's
-strength increases based on the duty cycle of the enable PWM channel
-(100% duty cycle meaning strongest vibration, 0% meaning no vibration).
-
-The binding supports an optional direction PWM channel, that can be
-driven at fixed duty cycle. If available this is can be used to increase
-the vibration effect of some devices.
-
-Required properties:
-- compatible: should contain "pwm-vibrator"
-- pwm-names: Should contain "enable" and optionally "direction"
-- pwms: Should contain a PWM handle for each entry in pwm-names
-
-Optional properties:
-- vcc-supply: Phandle for the regulator supplying power
-- direction-duty-cycle-ns: Duty cycle of the direction PWM channel in
- nanoseconds, defaults to 50% of the channel's
- period.
-
-Example from Motorola Droid 4:
-
-&omap4_pmx_core {
- vibrator_direction_pin: pinmux_vibrator_direction_pin {
- pinctrl-single,pins = <
- OMAP4_IOPAD(0x1ce, PIN_OUTPUT | MUX_MODE1) /* dmtimer8_pwm_evt (gpio_27) */
- >;
- };
-
- vibrator_enable_pin: pinmux_vibrator_enable_pin {
- pinctrl-single,pins = <
- OMAP4_IOPAD(0X1d0, PIN_OUTPUT | MUX_MODE1) /* dmtimer9_pwm_evt (gpio_28) */
- >;
- };
-};
-
-/ {
- pwm8: dmtimer-pwm {
- pinctrl-names = "default";
- pinctrl-0 = <&vibrator_direction_pin>;
-
- compatible = "ti,omap-dmtimer-pwm";
- #pwm-cells = <3>;
- ti,timers = <&timer8>;
- ti,clock-source = <0x01>;
- };
-
- pwm9: dmtimer-pwm {
- pinctrl-names = "default";
- pinctrl-0 = <&vibrator_enable_pin>;
-
- compatible = "ti,omap-dmtimer-pwm";
- #pwm-cells = <3>;
- ti,timers = <&timer9>;
- ti,clock-source = <0x01>;
- };
-
- vibrator {
- compatible = "pwm-vibrator";
- pwms = <&pwm9 0 1000000000 0>,
- <&pwm8 0 1000000000 0>;
- pwm-names = "enable", "direction";
- direction-duty-cycle-ns = <1000000000>;
- };
-};
diff --git a/Documentation/devicetree/bindings/input/pwm-vibrator.yaml b/Documentation/devicetree/bindings/input/pwm-vibrator.yaml
new file mode 100644
index 000000000000..a70a636ee112
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/pwm-vibrator.yaml
@@ -0,0 +1,57 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/input/pwm-vibrator.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: PWM vibrator
+
+maintainers:
+ - Sebastian Reichel <sre@kernel.org>
+
+description: >
+ Registers a PWM device as vibrator. It is expected, that the vibrator's
+ strength increases based on the duty cycle of the enable PWM channel
+ (100% duty cycle meaning strongest vibration, 0% meaning no vibration).
+
+ The binding supports an optional direction PWM channel, that can be
+ driven at fixed duty cycle. If available this is can be used to increase
+ the vibration effect of some devices.
+
+properties:
+ compatible:
+ const: pwm-vibrator
+
+ pwm-names:
+ items:
+ - const: enable
+ - const: direction
+ minItems: 1
+
+ pwms:
+ minItems: 1
+ maxItems: 2
+
+ vcc-supply: true
+
+ direction-duty-cycle-ns:
+ description: >
+ Duty cycle of the direction PWM channel in nanoseconds,
+ defaults to 50% of the channel's period.
+
+required:
+ - compatible
+ - pwm-names
+ - pwms
+
+additionalProperties: false
+
+examples:
+ - |
+ vibrator {
+ compatible = "pwm-vibrator";
+ pwms = <&pwm9 0 1000000000 0>,
+ <&pwm8 0 1000000000 0>;
+ pwm-names = "enable", "direction";
+ direction-duty-cycle-ns = <1000000000>;
+ };
diff --git a/Documentation/devicetree/bindings/input/touchscreen/zinitix,bt400.yaml b/Documentation/devicetree/bindings/input/touchscreen/zinitix,bt400.yaml
new file mode 100644
index 000000000000..b4e5ba7c0b49
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/touchscreen/zinitix,bt400.yaml
@@ -0,0 +1,115 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/input/touchscreen/zinitix,bt400.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Zinitix BT4xx and BT5xx series touchscreen controller bindings
+
+description: The Zinitix BT4xx and BT5xx series of touchscreen controllers
+ are Korea-produced touchscreens with embedded microcontrollers. The
+ BT4xx series was produced 2010-2013 and the BT5xx series 2013-2014.
+
+maintainers:
+ - Michael Srba <Michael.Srba@seznam.cz>
+ - Linus Walleij <linus.walleij@linaro.org>
+
+allOf:
+ - $ref: touchscreen.yaml#
+
+properties:
+ $nodename:
+ pattern: "^touchscreen(@.*)?$"
+
+ compatible:
+ enum:
+ - zinitix,bt402
+ - zinitix,bt403
+ - zinitix,bt404
+ - zinitix,bt412
+ - zinitix,bt413
+ - zinitix,bt431
+ - zinitix,bt432
+ - zinitix,bt531
+ - zinitix,bt532
+ - zinitix,bt538
+ - zinitix,bt541
+ - zinitix,bt548
+ - zinitix,bt554
+ - zinitix,at100
+
+ reg:
+ description: I2C address on the I2C bus
+
+ clock-frequency:
+ description: I2C client clock frequency, defined for host when using
+ the device on the I2C bus
+ minimum: 0
+ maximum: 400000
+
+ interrupts:
+ description: Interrupt to host
+ maxItems: 1
+
+ vcca-supply:
+ description: Analog power supply regulator on the VCCA pin
+
+ vdd-supply:
+ description: Digital power supply regulator on the VDD pin.
+ In older device trees this can be the accidental name for the analog
+ supply on the VCCA pin, and in that case the deprecated vddo-supply is
+ used for the digital power supply.
+
+ vddo-supply:
+ description: Deprecated name for the digital power supply, use vdd-supply
+ as this reflects the real name of the pin. If this supply is present,
+ the vdd-supply represents VCCA instead of VDD. Implementers should first
+ check for this property, and if it is present assume that the vdd-supply
+ represents the analog supply.
+ deprecated: true
+
+ reset-gpios:
+ description: Reset line for the touchscreen, should be tagged
+ as GPIO_ACTIVE_LOW
+
+ zinitix,mode:
+ description: Mode of reporting touch points. Some modes may not work
+ with a particular ts firmware for unknown reasons. Available modes are
+ 1 and 2. Mode 2 is the default and preferred.
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [1, 2]
+
+ touchscreen-size-x: true
+ touchscreen-size-y: true
+ touchscreen-fuzz-x: true
+ touchscreen-fuzz-y: true
+
+additionalProperties: false
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - touchscreen-size-x
+ - touchscreen-size-y
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/gpio/gpio.h>
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ touchscreen@20 {
+ compatible = "zinitix,bt541";
+ reg = <0x20>;
+ interrupt-parent = <&gpio>;
+ interrupts = <13 IRQ_TYPE_EDGE_FALLING>;
+ vcca-supply = <&reg_vcca_tsp>;
+ vdd-supply = <&reg_vdd_tsp>;
+ touchscreen-size-x = <540>;
+ touchscreen-size-y = <960>;
+ zinitix,mode = <2>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/input/touchscreen/zinitix.txt b/Documentation/devicetree/bindings/input/touchscreen/zinitix.txt
deleted file mode 100644
index 446efb9f5f55..000000000000
--- a/Documentation/devicetree/bindings/input/touchscreen/zinitix.txt
+++ /dev/null
@@ -1,40 +0,0 @@
-Device tree bindings for Zinitx BT541 touchscreen controller
-
-Required properties:
-
- - compatible : Should be "zinitix,bt541"
- - reg : I2C address of the chip. Should be 0x20
- - interrupts : Interrupt to which the chip is connected
-
-Optional properties:
-
- - vdd-supply : Analog power supply regulator on VCCA pin
- - vddo-supply : Digital power supply regulator on VDD pin
- - zinitix,mode : Mode of reporting touch points. Some modes may not work
- with a particular ts firmware for unknown reasons. Available
- modes are 1 and 2. Mode 2 is the default and preferred.
-
-The touchscreen-* properties are documented in touchscreen.txt in this
-directory.
-
-Example:
-
- i2c@00000000 {
- /* ... */
-
- bt541@20 {
- compatible = "zinitix,bt541";
- reg = <0x20>;
- interrupt-parent = <&msmgpio>;
- interrupts = <13 IRQ_TYPE_EDGE_FALLING>;
- pinctrl-names = "default";
- pinctrl-0 = <&tsp_default>;
- vdd-supply = <&reg_vdd_tsp>;
- vddo-supply = <&pm8916_l6>;
- touchscreen-size-x = <540>;
- touchscreen-size-y = <960>;
- zinitix,mode = <2>;
- };
-
- /* ... */
- };
diff --git a/Documentation/devicetree/bindings/interconnect/qcom,osm-l3.yaml b/Documentation/devicetree/bindings/interconnect/qcom,osm-l3.yaml
index e701524ee811..116e434d0daa 100644
--- a/Documentation/devicetree/bindings/interconnect/qcom,osm-l3.yaml
+++ b/Documentation/devicetree/bindings/interconnect/qcom,osm-l3.yaml
@@ -18,6 +18,7 @@ properties:
compatible:
enum:
- qcom,sc7180-osm-l3
+ - qcom,sc7280-epss-l3
- qcom,sc8180x-osm-l3
- qcom,sdm845-osm-l3
- qcom,sm8150-osm-l3
diff --git a/Documentation/devicetree/bindings/interconnect/qcom,qcm2290.yaml b/Documentation/devicetree/bindings/interconnect/qcom,qcm2290.yaml
new file mode 100644
index 000000000000..f65a2fe846de
--- /dev/null
+++ b/Documentation/devicetree/bindings/interconnect/qcom,qcm2290.yaml
@@ -0,0 +1,137 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/interconnect/qcom,qcm2290.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm QCM2290 Network-On-Chip interconnect
+
+maintainers:
+ - Shawn Guo <shawn.guo@linaro.org>
+
+description: |
+ The Qualcomm QCM2290 interconnect providers support adjusting the
+ bandwidth requirements between the various NoC fabrics.
+
+properties:
+ reg:
+ maxItems: 1
+
+ compatible:
+ enum:
+ - qcom,qcm2290-bimc
+ - qcom,qcm2290-cnoc
+ - qcom,qcm2290-snoc
+
+ '#interconnect-cells':
+ const: 1
+
+ clock-names:
+ items:
+ - const: bus
+ - const: bus_a
+
+ clocks:
+ items:
+ - description: Bus Clock
+ - description: Bus A Clock
+
+# Child node's properties
+patternProperties:
+ '^interconnect-[a-z0-9]+$':
+ type: object
+ description:
+ The interconnect providers do not have a separate QoS register space,
+ but share parent's space.
+
+ properties:
+ compatible:
+ enum:
+ - qcom,qcm2290-qup-virt
+ - qcom,qcm2290-mmrt-virt
+ - qcom,qcm2290-mmnrt-virt
+
+ '#interconnect-cells':
+ const: 1
+
+ clock-names:
+ items:
+ - const: bus
+ - const: bus_a
+
+ clocks:
+ items:
+ - description: Bus Clock
+ - description: Bus A Clock
+
+ required:
+ - compatible
+ - '#interconnect-cells'
+ - clock-names
+ - clocks
+
+ additionalProperties: false
+
+required:
+ - compatible
+ - reg
+ - '#interconnect-cells'
+ - clock-names
+ - clocks
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/qcom,rpmcc.h>
+
+ snoc: interconnect@1880000 {
+ compatible = "qcom,qcm2290-snoc";
+ reg = <0x01880000 0x60200>;
+ #interconnect-cells = <1>;
+ clock-names = "bus", "bus_a";
+ clocks = <&rpmcc RPM_SMD_SNOC_CLK>,
+ <&rpmcc RPM_SMD_SNOC_A_CLK>;
+
+ qup_virt: interconnect-qup {
+ compatible = "qcom,qcm2290-qup-virt";
+ #interconnect-cells = <1>;
+ clock-names = "bus", "bus_a";
+ clocks = <&rpmcc RPM_SMD_QUP_CLK>,
+ <&rpmcc RPM_SMD_QUP_A_CLK>;
+ };
+
+ mmnrt_virt: interconnect-mmnrt {
+ compatible = "qcom,qcm2290-mmnrt-virt";
+ #interconnect-cells = <1>;
+ clock-names = "bus", "bus_a";
+ clocks = <&rpmcc RPM_SMD_MMNRT_CLK>,
+ <&rpmcc RPM_SMD_MMNRT_A_CLK>;
+ };
+
+ mmrt_virt: interconnect-mmrt {
+ compatible = "qcom,qcm2290-mmrt-virt";
+ #interconnect-cells = <1>;
+ clock-names = "bus", "bus_a";
+ clocks = <&rpmcc RPM_SMD_MMRT_CLK>,
+ <&rpmcc RPM_SMD_MMRT_A_CLK>;
+ };
+ };
+
+ cnoc: interconnect@1900000 {
+ compatible = "qcom,qcm2290-cnoc";
+ reg = <0x01900000 0x8200>;
+ #interconnect-cells = <1>;
+ clock-names = "bus", "bus_a";
+ clocks = <&rpmcc RPM_SMD_CNOC_CLK>,
+ <&rpmcc RPM_SMD_CNOC_A_CLK>;
+ };
+
+ bimc: interconnect@4480000 {
+ compatible = "qcom,qcm2290-bimc";
+ reg = <0x04480000 0x80000>;
+ #interconnect-cells = <1>;
+ clock-names = "bus", "bus_a";
+ clocks = <&rpmcc RPM_SMD_BIMC_CLK>,
+ <&rpmcc RPM_SMD_BIMC_A_CLK>;
+ };
diff --git a/Documentation/devicetree/bindings/interconnect/qcom,rpm.yaml b/Documentation/devicetree/bindings/interconnect/qcom,rpm.yaml
index 983d71fb5399..e4c3c2818119 100644
--- a/Documentation/devicetree/bindings/interconnect/qcom,rpm.yaml
+++ b/Documentation/devicetree/bindings/interconnect/qcom,rpm.yaml
@@ -27,22 +27,37 @@ properties:
- qcom,msm8939-pcnoc
- qcom,msm8939-snoc
- qcom,msm8939-snoc-mm
+ - qcom,msm8996-a0noc
+ - qcom,msm8996-a1noc
+ - qcom,msm8996-a2noc
+ - qcom,msm8996-bimc
+ - qcom,msm8996-cnoc
+ - qcom,msm8996-mnoc
+ - qcom,msm8996-pnoc
+ - qcom,msm8996-snoc
- qcom,qcs404-bimc
- qcom,qcs404-pcnoc
- qcom,qcs404-snoc
+ - qcom,sdm660-a2noc
+ - qcom,sdm660-bimc
+ - qcom,sdm660-cnoc
+ - qcom,sdm660-gnoc
+ - qcom,sdm660-mnoc
+ - qcom,sdm660-snoc
'#interconnect-cells':
const: 1
+ clocks:
+ minItems: 2
+ maxItems: 7
+
clock-names:
- items:
- - const: bus
- - const: bus_a
+ minItems: 2
+ maxItems: 7
- clocks:
- items:
- - description: Bus Clock
- - description: Bus A Clock
+ power-domains:
+ maxItems: 1
required:
- compatible
@@ -53,6 +68,120 @@ required:
additionalProperties: false
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,msm8916-bimc
+ - qcom,msm8916-pcnoc
+ - qcom,msm8916-snoc
+ - qcom,msm8939-bimc
+ - qcom,msm8939-pcnoc
+ - qcom,msm8939-snoc
+ - qcom,msm8939-snoc-mm
+ - qcom,msm8996-a1noc
+ - qcom,msm8996-a2noc
+ - qcom,msm8996-bimc
+ - qcom,msm8996-cnoc
+ - qcom,msm8996-pnoc
+ - qcom,msm8996-snoc
+ - qcom,qcs404-bimc
+ - qcom,qcs404-pcnoc
+ - qcom,qcs404-snoc
+ - qcom,sdm660-bimc
+ - qcom,sdm660-cnoc
+ - qcom,sdm660-gnoc
+ - qcom,sdm660-snoc
+
+ then:
+ properties:
+ clock-names:
+ items:
+ - const: bus
+ - const: bus_a
+
+ clocks:
+ items:
+ - description: Bus Clock
+ - description: Bus A Clock
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,msm8996-mnoc
+ - qcom,sdm660-mnoc
+
+ then:
+ properties:
+ clock-names:
+ items:
+ - const: bus
+ - const: bus_a
+ - const: iface
+
+ clocks:
+ items:
+ - description: Bus Clock.
+ - description: Bus A Clock.
+ - description: CPU-NoC High-performance Bus Clock.
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,msm8996-a0noc
+
+ then:
+ properties:
+ clock-names:
+ items:
+ - const: aggre0_snoc_axi
+ - const: aggre0_cnoc_ahb
+ - const: aggre0_noc_mpu_cfg
+
+ clocks:
+ items:
+ - description: Aggregate0 System NoC AXI Clock.
+ - description: Aggregate0 Config NoC AHB Clock.
+ - description: Aggregate0 NoC MPU Clock.
+
+ required:
+ - power-domains
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,sdm660-a2noc
+
+ then:
+ properties:
+ clock-names:
+ items:
+ - const: bus
+ - const: bus_a
+ - const: ipa
+ - const: ufs_axi
+ - const: aggre2_ufs_axi
+ - const: aggre2_usb3_axi
+ - const: cfg_noc_usb2_axi
+
+ clocks:
+ items:
+ - description: Bus Clock.
+ - description: Bus A Clock.
+ - description: IPA Clock.
+ - description: UFS AXI Clock.
+ - description: Aggregate2 UFS AXI Clock.
+ - description: Aggregate2 USB3 AXI Clock.
+ - description: Config NoC USB2 AXI Clock.
+
examples:
- |
#include <dt-bindings/clock/qcom,rpmcc.h>
diff --git a/Documentation/devicetree/bindings/interconnect/qcom,rpmh.yaml b/Documentation/devicetree/bindings/interconnect/qcom,rpmh.yaml
index 3fd1a134162d..cbb24f9bb609 100644
--- a/Documentation/devicetree/bindings/interconnect/qcom,rpmh.yaml
+++ b/Documentation/devicetree/bindings/interconnect/qcom,rpmh.yaml
@@ -104,6 +104,17 @@ properties:
- qcom,sm8350-mmss-noc
- qcom,sm8350-compute-noc
- qcom,sm8350-system-noc
+ - qcom,sm8450-aggre1-noc
+ - qcom,sm8450-aggre2-noc
+ - qcom,sm8450-clk-virt
+ - qcom,sm8450-config-noc
+ - qcom,sm8450-gem-noc
+ - qcom,sm8450-lpass-ag-noc
+ - qcom,sm8450-mc-virt
+ - qcom,sm8450-mmss-noc
+ - qcom,sm8450-nsp-noc
+ - qcom,sm8450-pcie-anoc
+ - qcom,sm8450-system-noc
'#interconnect-cells':
enum: [ 1, 2 ]
diff --git a/Documentation/devicetree/bindings/interconnect/qcom,sdm660.yaml b/Documentation/devicetree/bindings/interconnect/qcom,sdm660.yaml
deleted file mode 100644
index bcd41e491f1d..000000000000
--- a/Documentation/devicetree/bindings/interconnect/qcom,sdm660.yaml
+++ /dev/null
@@ -1,185 +0,0 @@
-# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
-%YAML 1.2
----
-$id: http://devicetree.org/schemas/interconnect/qcom,sdm660.yaml#
-$schema: http://devicetree.org/meta-schemas/core.yaml#
-
-title: Qualcomm SDM660 Network-On-Chip interconnect
-
-maintainers:
- - AngeloGioacchino Del Regno <kholk11@gmail.com>
-
-description: |
- The Qualcomm SDM660 interconnect providers support adjusting the
- bandwidth requirements between the various NoC fabrics.
-
-properties:
- reg:
- maxItems: 1
-
- compatible:
- enum:
- - qcom,sdm660-a2noc
- - qcom,sdm660-bimc
- - qcom,sdm660-cnoc
- - qcom,sdm660-gnoc
- - qcom,sdm660-mnoc
- - qcom,sdm660-snoc
-
- '#interconnect-cells':
- const: 1
-
- clocks:
- minItems: 1
- maxItems: 7
-
- clock-names:
- minItems: 1
- maxItems: 7
-
-required:
- - compatible
- - reg
- - '#interconnect-cells'
- - clock-names
- - clocks
-
-additionalProperties: false
-
-allOf:
- - if:
- properties:
- compatible:
- contains:
- enum:
- - qcom,sdm660-mnoc
- then:
- properties:
- clocks:
- items:
- - description: Bus Clock.
- - description: Bus A Clock.
- - description: CPU-NoC High-performance Bus Clock.
- clock-names:
- items:
- - const: bus
- - const: bus_a
- - const: iface
-
- - if:
- properties:
- compatible:
- contains:
- enum:
- - qcom,sdm660-a2noc
- then:
- properties:
- clocks:
- items:
- - description: Bus Clock.
- - description: Bus A Clock.
- - description: IPA Clock.
- - description: UFS AXI Clock.
- - description: Aggregate2 UFS AXI Clock.
- - description: Aggregate2 USB3 AXI Clock.
- - description: Config NoC USB2 AXI Clock.
- clock-names:
- items:
- - const: bus
- - const: bus_a
- - const: ipa
- - const: ufs_axi
- - const: aggre2_ufs_axi
- - const: aggre2_usb3_axi
- - const: cfg_noc_usb2_axi
-
- - if:
- properties:
- compatible:
- contains:
- enum:
- - qcom,sdm660-bimc
- - qcom,sdm660-cnoc
- - qcom,sdm660-gnoc
- - qcom,sdm660-snoc
- then:
- properties:
- clocks:
- items:
- - description: Bus Clock.
- - description: Bus A Clock.
- clock-names:
- items:
- - const: bus
- - const: bus_a
-
-examples:
- - |
- #include <dt-bindings/clock/qcom,rpmcc.h>
- #include <dt-bindings/clock/qcom,mmcc-sdm660.h>
- #include <dt-bindings/clock/qcom,gcc-sdm660.h>
-
- bimc: interconnect@1008000 {
- compatible = "qcom,sdm660-bimc";
- reg = <0x01008000 0x78000>;
- #interconnect-cells = <1>;
- clock-names = "bus", "bus_a";
- clocks = <&rpmcc RPM_SMD_BIMC_CLK>,
- <&rpmcc RPM_SMD_BIMC_A_CLK>;
- };
-
- cnoc: interconnect@1500000 {
- compatible = "qcom,sdm660-cnoc";
- reg = <0x01500000 0x10000>;
- #interconnect-cells = <1>;
- clock-names = "bus", "bus_a";
- clocks = <&rpmcc RPM_SMD_CNOC_CLK>,
- <&rpmcc RPM_SMD_CNOC_A_CLK>;
- };
-
- snoc: interconnect@1626000 {
- compatible = "qcom,sdm660-snoc";
- reg = <0x01626000 0x7090>;
- #interconnect-cells = <1>;
- clock-names = "bus", "bus_a";
- clocks = <&rpmcc RPM_SMD_SNOC_CLK>,
- <&rpmcc RPM_SMD_SNOC_A_CLK>;
- };
-
- a2noc: interconnect@1704000 {
- compatible = "qcom,sdm660-a2noc";
- reg = <0x01704000 0xc100>;
- #interconnect-cells = <1>;
- clock-names = "bus",
- "bus_a",
- "ipa",
- "ufs_axi",
- "aggre2_ufs_axi",
- "aggre2_usb3_axi",
- "cfg_noc_usb2_axi";
- clocks = <&rpmcc RPM_SMD_AGGR2_NOC_CLK>,
- <&rpmcc RPM_SMD_AGGR2_NOC_A_CLK>,
- <&rpmcc RPM_SMD_IPA_CLK>,
- <&gcc GCC_UFS_AXI_CLK>,
- <&gcc GCC_AGGRE2_UFS_AXI_CLK>,
- <&gcc GCC_AGGRE2_USB3_AXI_CLK>,
- <&gcc GCC_CFG_NOC_USB2_AXI_CLK>;
- };
-
- mnoc: interconnect@1745000 {
- compatible = "qcom,sdm660-mnoc";
- reg = <0x01745000 0xa010>;
- #interconnect-cells = <1>;
- clock-names = "bus", "bus_a", "iface";
- clocks = <&rpmcc RPM_SMD_MMSSNOC_AXI_CLK>,
- <&rpmcc RPM_SMD_MMSSNOC_AXI_CLK_A>,
- <&mmcc AHB_CLK_SRC>;
- };
-
- gnoc: interconnect@17900000 {
- compatible = "qcom,sdm660-gnoc";
- reg = <0x17900000 0xe000>;
- #interconnect-cells = <1>;
- clock-names = "bus", "bus_a";
- clocks = <&xo_board>, <&xo_board>;
- };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.yaml b/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.yaml
index c84f9fe7f254..cfb3ec27bd2b 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.yaml
+++ b/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.yaml
@@ -239,6 +239,7 @@ examples:
};
};
+ - |
interrupt-controller@2c010000 {
compatible = "arm,gic-v3";
#interrupt-cells = <4>;
@@ -254,7 +255,7 @@ examples:
<0x2c040000 0x2000>, // GICC
<0x2c060000 0x2000>, // GICH
<0x2c080000 0x2000>; // GICV
- interrupts = <1 9 4>;
+ interrupts = <1 9 4 0>;
msi-controller@2c200000 {
compatible = "arm,gic-v3-its";
diff --git a/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm3380-l2-intc.txt b/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm3380-l2-intc.txt
deleted file mode 100644
index 37aea40d5430..000000000000
--- a/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm3380-l2-intc.txt
+++ /dev/null
@@ -1,39 +0,0 @@
-Broadcom BCM3380-style Level 1 / Level 2 interrupt controller
-
-This interrupt controller shows up in various forms on many BCM338x/BCM63xx
-chipsets. It has the following properties:
-
-- outputs a single interrupt signal to its interrupt controller parent
-
-- contains one or more enable/status word pairs, which often appear at
- different offsets in different blocks
-
-- no atomic set/clear operations
-
-Required properties:
-
-- compatible: should be "brcm,bcm3380-l2-intc"
-- reg: specifies one or more enable/status pairs, in the following format:
- <enable_reg 0x4 status_reg 0x4>...
-- interrupt-controller: identifies the node as an interrupt controller
-- #interrupt-cells: specifies the number of cells needed to encode an interrupt
- source, should be 1.
-- interrupts: specifies the interrupt line in the interrupt-parent controller
- node, valid values depend on the type of parent interrupt controller
-
-Optional properties:
-
-- brcm,irq-can-wake: if present, this means the L2 controller can be used as a
- wakeup source for system suspend/resume.
-
-Example:
-
-irq0_intc: interrupt-controller@10000020 {
- compatible = "brcm,bcm3380-l2-intc";
- reg = <0x10000024 0x4 0x1000002c 0x4>,
- <0x10000020 0x4 0x10000028 0x4>;
- interrupt-controller;
- #interrupt-cells = <1>;
- interrupt-parent = <&cpu_intc>;
- interrupts = <2>;
-};
diff --git a/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm7038-l1-intc.txt b/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm7038-l1-intc.txt
deleted file mode 100644
index 5ddef1dc0c1a..000000000000
--- a/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm7038-l1-intc.txt
+++ /dev/null
@@ -1,61 +0,0 @@
-Broadcom BCM7038-style Level 1 interrupt controller
-
-This block is a first level interrupt controller that is typically connected
-directly to one of the HW INT lines on each CPU. Every BCM7xxx set-top chip
-since BCM7038 has contained this hardware.
-
-Key elements of the hardware design include:
-
-- 64, 96, 128, or 160 incoming level IRQ lines
-
-- Most onchip peripherals are wired directly to an L1 input
-
-- A separate instance of the register set for each CPU, allowing individual
- peripheral IRQs to be routed to any CPU
-
-- Atomic mask/unmask operations
-
-- No polarity/level/edge settings
-
-- No FIFO or priority encoder logic; software is expected to read all
- 2-5 status words to determine which IRQs are pending
-
-Required properties:
-
-- compatible: should be "brcm,bcm7038-l1-intc"
-- reg: specifies the base physical address and size of the registers;
- the number of supported IRQs is inferred from the size argument
-- interrupt-controller: identifies the node as an interrupt controller
-- #interrupt-cells: specifies the number of cells needed to encode an interrupt
- source, should be 1.
-- interrupts: specifies the interrupt line(s) in the interrupt-parent controller
- node; valid values depend on the type of parent interrupt controller
-
-Optional properties:
-
-- brcm,irq-can-wake: If present, this means the L1 controller can be used as a
- wakeup source for system suspend/resume.
-
-Optional properties:
-
-- brcm,int-fwd-mask: if present, a bit mask to indicate which interrupts
- have already been configured by the firmware and should be left unmanaged.
- This should have one 32-bit word per status/set/clear/mask group.
-
-If multiple reg ranges and interrupt-parent entries are present on an SMP
-system, the driver will allow IRQ SMP affinity to be set up through the
-/proc/irq/ interface. In the simplest possible configuration, only one
-reg range and one interrupt-parent is needed.
-
-Example:
-
-periph_intc: periph_intc@1041a400 {
- compatible = "brcm,bcm7038-l1-intc";
- reg = <0x1041a400 0x30 0x1041a600 0x30>;
-
- interrupt-controller;
- #interrupt-cells = <1>;
-
- interrupt-parent = <&cpu_intc>;
- interrupts = <2>, <3>;
-};
diff --git a/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm7038-l1-intc.yaml b/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm7038-l1-intc.yaml
new file mode 100644
index 000000000000..5ecb6faa70dc
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm7038-l1-intc.yaml
@@ -0,0 +1,91 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/interrupt-controller/brcm,bcm7038-l1-intc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Broadcom BCM7038-style Level 1 interrupt controller
+
+description: >
+ This block is a first level interrupt controller that is typically connected
+ directly to one of the HW INT lines on each CPU. Every BCM7xxx set-top chip
+ since BCM7038 has contained this hardware.
+
+ Key elements of the hardware design include:
+
+ - 64, 96, 128, or 160 incoming level IRQ lines
+
+ - Most onchip peripherals are wired directly to an L1 input
+
+ - A separate instance of the register set for each CPU, allowing individual
+ peripheral IRQs to be routed to any CPU
+
+ - Atomic mask/unmask operations
+
+ - No polarity/level/edge settings
+
+ - No FIFO or priority encoder logic; software is expected to read all
+ 2-5 status words to determine which IRQs are pending
+
+ If multiple reg ranges and interrupt-parent entries are present on an SMP
+ system, the driver will allow IRQ SMP affinity to be set up through the
+ /proc/irq/ interface. In the simplest possible configuration, only one
+ reg range and one interrupt-parent is needed.
+
+maintainers:
+ - Florian Fainelli <f.fainelli@gmail.com>
+
+allOf:
+ - $ref: /schemas/interrupt-controller.yaml#
+
+properties:
+ compatible:
+ const: brcm,bcm7038-l1-intc
+
+ reg:
+ description: >
+ Specifies the base physical address and size of the registers
+ the number of supported IRQs is inferred from the size argument
+
+ interrupt-controller: true
+
+ "#interrupt-cells":
+ const: 1
+
+ interrupts:
+ description: >
+ Specifies the interrupt line(s) in the interrupt-parent controller node;
+ valid values depend on the type of parent interrupt controller
+
+ brcm,irq-can-wake:
+ type: boolean
+ description: >
+ If present, this means the L1 controller can be used as a
+ wakeup source for system suspend/resume.
+
+ brcm,int-fwd-mask:
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ description:
+ If present, a bit mask to indicate which interrupts have already been
+ configured by the firmware and should be left unmanaged. This should
+ have one 32-bit word per status/set/clear/mask group.
+
+required:
+ - compatible
+ - reg
+ - interrupt-controller
+ - "#interrupt-cells"
+ - interrupts
+
+additionalProperties: false
+
+examples:
+ - |
+ periph_intc: interrupt-controller@1041a400 {
+ compatible = "brcm,bcm7038-l1-intc";
+ reg = <0x1041a400 0x30>, <0x1041a600 0x30>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&cpu_intc>;
+ interrupts = <2>, <3>;
+ };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm7120-l2-intc.txt b/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm7120-l2-intc.txt
deleted file mode 100644
index addd86b6ca2f..000000000000
--- a/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm7120-l2-intc.txt
+++ /dev/null
@@ -1,88 +0,0 @@
-Broadcom BCM7120-style Level 2 interrupt controller
-
-This interrupt controller hardware is a second level interrupt controller that
-is hooked to a parent interrupt controller: e.g: ARM GIC for ARM-based
-platforms. It can be found on BCM7xxx products starting with BCM7120.
-
-Such an interrupt controller has the following hardware design:
-
-- outputs multiple interrupts signals towards its interrupt controller parent
-
-- controls how some of the interrupts will be flowing, whether they will
- directly output an interrupt signal towards the interrupt controller parent,
- or if they will output an interrupt signal at this 2nd level interrupt
- controller, in particular for UARTs
-
-- has one 32-bit enable word and one 32-bit status word
-
-- no atomic set/clear operations
-
-- not all bits within the interrupt controller actually map to an interrupt
-
-The typical hardware layout for this controller is represented below:
-
-2nd level interrupt line Outputs for the parent controller (e.g: ARM GIC)
-
-0 -----[ MUX ] ------------|==========> GIC interrupt 75
- \-----------\
- |
-1 -----[ MUX ] --------)---|==========> GIC interrupt 76
- \------------|
- |
-2 -----[ MUX ] --------)---|==========> GIC interrupt 77
- \------------|
- |
-3 ---------------------|
-4 ---------------------|
-5 ---------------------|
-7 ---------------------|---|===========> GIC interrupt 66
-9 ---------------------|
-10 --------------------|
-11 --------------------/
-
-6 ------------------------\
- |===========> GIC interrupt 64
-8 ------------------------/
-
-12 ........................ X
-13 ........................ X (not connected)
-..
-31 ........................ X
-
-Required properties:
-
-- compatible: should be "brcm,bcm7120-l2-intc"
-- reg: specifies the base physical address and size of the registers
-- interrupt-controller: identifies the node as an interrupt controller
-- #interrupt-cells: specifies the number of cells needed to encode an interrupt
- source, should be 1.
-- interrupts: specifies the interrupt line(s) in the interrupt-parent controller
- node, valid values depend on the type of parent interrupt controller
-- brcm,int-map-mask: 32-bits bit mask describing how many and which interrupts
- are wired to this 2nd level interrupt controller, and how they match their
- respective interrupt parents. Should match exactly the number of interrupts
- specified in the 'interrupts' property.
-
-Optional properties:
-
-- brcm,irq-can-wake: if present, this means the L2 controller can be used as a
- wakeup source for system suspend/resume.
-
-- brcm,int-fwd-mask: if present, a bit mask to configure the interrupts which
- have a mux gate, typically UARTs. Setting these bits will make their
- respective interrupt outputs bypass this 2nd level interrupt controller
- completely; it is completely transparent for the interrupt controller
- parent. This should have one 32-bit word per enable/status pair.
-
-Example:
-
-irq0_intc: interrupt-controller@f0406800 {
- compatible = "brcm,bcm7120-l2-intc";
- interrupt-parent = <&intc>;
- #interrupt-cells = <1>;
- reg = <0xf0406800 0x8>;
- interrupt-controller;
- interrupts = <0x0 0x42 0x0>, <0x0 0x40 0x0>;
- brcm,int-map-mask = <0xeb8>, <0x140>;
- brcm,int-fwd-mask = <0x7>;
-};
diff --git a/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm7120-l2-intc.yaml b/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm7120-l2-intc.yaml
new file mode 100644
index 000000000000..46b2eb3c43ee
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/brcm,bcm7120-l2-intc.yaml
@@ -0,0 +1,151 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/interrupt-controller/brcm,bcm7120-l2-intc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Broadcom BCM7120-style Level 2 and Broadcom BCM3380 Level 1 / Level 2
+
+maintainers:
+ - Florian Fainelli <f.fainelli@gmail.com>
+
+description: >
+ This interrupt controller hardware is a second level interrupt controller that
+ is hooked to a parent interrupt controller: e.g: ARM GIC for ARM-based
+ platforms. It can be found on BCM7xxx products starting with BCM7120.
+
+ Such an interrupt controller has the following hardware design:
+
+ - outputs multiple interrupts signals towards its interrupt controller parent
+
+ - controls how some of the interrupts will be flowing, whether they will
+ directly output an interrupt signal towards the interrupt controller parent,
+ or if they will output an interrupt signal at this 2nd level interrupt
+ controller, in particular for UARTs
+
+ - has one 32-bit enable word and one 32-bit status word
+
+ - no atomic set/clear operations
+
+ - not all bits within the interrupt controller actually map to an interrupt
+
+ The typical hardware layout for this controller is represented below:
+
+ 2nd level interrupt line Outputs for the parent controller (e.g: ARM GIC)
+
+ 0 -----[ MUX ] ------------|==========> GIC interrupt 75
+ \-----------\
+ |
+ 1 -----[ MUX ] --------)---|==========> GIC interrupt 76
+ \------------|
+ |
+ 2 -----[ MUX ] --------)---|==========> GIC interrupt 77
+ \------------|
+ |
+ 3 ---------------------|
+ 4 ---------------------|
+ 5 ---------------------|
+ 7 ---------------------|---|===========> GIC interrupt 66
+ 9 ---------------------|
+ 10 --------------------|
+ 11 --------------------/
+
+ 6 ------------------------\
+ |===========> GIC interrupt 64
+ 8 ------------------------/
+
+ 12 ........................ X
+ 13 ........................ X (not connected)
+ ..
+ 31 ........................ X
+
+ The BCM3380 Level 1 / Level 2 interrrupt controller shows up in various forms
+ on many BCM338x/BCM63xx chipsets. It has the following properties:
+
+ - outputs a single interrupt signal to its interrupt controller parent
+
+ - contains one or more enable/status word pairs, which often appear at
+ different offsets in different blocks
+
+ - no atomic set/clear operations
+
+allOf:
+ - $ref: /schemas/interrupt-controller.yaml#
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - brcm,bcm7120-l2-intc
+ - brcm,bcm3380-l2-intc
+
+ reg:
+ minItems: 1
+ maxItems: 4
+ description: >
+ Specifies the base physical address and size of the registers
+
+ interrupt-controller: true
+
+ "#interrupt-cells":
+ const: 1
+
+ interrupts:
+ minItems: 1
+ maxItems: 32
+
+ brcm,int-map-mask:
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ description: >
+ 32-bits bit mask describing how many and which interrupts are wired to
+ this 2nd level interrupt controller, and how they match their respective
+ interrupt parents. Should match exactly the number of interrupts
+ specified in the 'interrupts' property.
+
+ brcm,irq-can-wake:
+ type: boolean
+ description: >
+ If present, this means the L2 controller can be used as a wakeup source
+ for system suspend/resume.
+
+ brcm,int-fwd-mask:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: >
+ if present, a bit mask to configure the interrupts which have a mux gate,
+ typically UARTs. Setting these bits will make their respective interrupt
+ outputs bypass this 2nd level interrupt controller completely; it is
+ completely transparent for the interrupt controller parent. This should
+ have one 32-bit word per enable/status pair.
+
+additionalProperties: false
+
+required:
+ - compatible
+ - reg
+ - interrupt-controller
+ - "#interrupt-cells"
+ - interrupts
+
+examples:
+ - |
+ irq0_intc: interrupt-controller@f0406800 {
+ compatible = "brcm,bcm7120-l2-intc";
+ interrupt-parent = <&intc>;
+ #interrupt-cells = <1>;
+ reg = <0xf0406800 0x8>;
+ interrupt-controller;
+ interrupts = <0x0 0x42 0x0>, <0x0 0x40 0x0>;
+ brcm,int-map-mask = <0xeb8>, <0x140>;
+ brcm,int-fwd-mask = <0x7>;
+ };
+
+ - |
+ irq1_intc: interrupt-controller@10000020 {
+ compatible = "brcm,bcm3380-l2-intc";
+ reg = <0x10000024 0x4>, <0x1000002c 0x4>,
+ <0x10000020 0x4>, <0x10000028 0x4>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&cpu_intc>;
+ interrupts = <2>;
+ };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/brcm,l2-intc.txt b/Documentation/devicetree/bindings/interrupt-controller/brcm,l2-intc.txt
deleted file mode 100644
index 021cf822395c..000000000000
--- a/Documentation/devicetree/bindings/interrupt-controller/brcm,l2-intc.txt
+++ /dev/null
@@ -1,31 +0,0 @@
-Broadcom Generic Level 2 Interrupt Controller
-
-Required properties:
-
-- compatible: should be one of:
- "brcm,hif-spi-l2-intc" or
- "brcm,upg-aux-aon-l2-intc" or
- "brcm,l2-intc" for latched interrupt controllers
- should be "brcm,bcm7271-l2-intc" for level interrupt controllers
-- reg: specifies the base physical address and size of the registers
-- interrupt-controller: identifies the node as an interrupt controller
-- #interrupt-cells: specifies the number of cells needed to encode an
- interrupt source. Should be 1.
-- interrupts: specifies the interrupt line in the interrupt-parent irq space
- to be used for cascading
-
-Optional properties:
-
-- brcm,irq-can-wake: If present, this means the L2 controller can be used as a
- wakeup source for system suspend/resume.
-
-Example:
-
-hif_intr2_intc: interrupt-controller@f0441000 {
- compatible = "brcm,l2-intc";
- reg = <0xf0441000 0x30>;
- interrupt-controller;
- #interrupt-cells = <1>;
- interrupt-parent = <&intc>;
- interrupts = <0x0 0x20 0x0>;
-};
diff --git a/Documentation/devicetree/bindings/interrupt-controller/brcm,l2-intc.yaml b/Documentation/devicetree/bindings/interrupt-controller/brcm,l2-intc.yaml
new file mode 100644
index 000000000000..8961afca96f1
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/brcm,l2-intc.yaml
@@ -0,0 +1,72 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/interrupt-controller/brcm,l2-intc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Broadcom Generic Level 2 Interrupt Controller
+
+maintainers:
+ - Florian Fainelli <f.fainelli@gmail.com>
+
+allOf:
+ - $ref: /schemas/interrupt-controller.yaml#
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - enum:
+ - brcm,hif-spi-l2-intc
+ - brcm,upg-aux-aon-l2-intc
+ - const: brcm,l2-intc
+ - items:
+ - enum:
+ - brcm,bcm2711-l2-intc
+ - const: brcm,l2-intc
+ - items:
+ - const: brcm,bcm7271-l2-intc
+ - items:
+ - const: brcm,l2-intc
+
+ reg:
+ maxItems: 1
+ description: >
+ Specifies the base physical address and size of the registers
+
+ interrupt-controller: true
+
+ "#interrupt-cells":
+ const: 1
+
+ interrupts:
+ maxItems: 1
+
+ interrupt-names:
+ maxItems: 1
+
+ brcm,irq-can-wake:
+ type: boolean
+ description: >
+ If present, this means the L2 controller can be used as a wakeup source
+ for system suspend/resume.
+
+additionalProperties: false
+
+required:
+ - compatible
+ - reg
+ - interrupt-controller
+ - "#interrupt-cells"
+ - interrupts
+
+examples:
+ - |
+ hif_intr2_intc: interrupt-controller@f0441000 {
+ compatible = "brcm,l2-intc";
+ reg = <0xf0441000 0x30>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&intc>;
+ interrupts = <0x0 0x20 0x0>;
+ };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.yaml b/Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.yaml
index 79d0358e2f61..620f01775e42 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.yaml
+++ b/Documentation/devicetree/bindings/interrupt-controller/renesas,irqc.yaml
@@ -36,6 +36,7 @@ properties:
- renesas,intc-ex-r8a77980 # R-Car V3H
- renesas,intc-ex-r8a77990 # R-Car E3
- renesas,intc-ex-r8a77995 # R-Car D3
+ - renesas,intc-ex-r8a779a0 # R-Car V3U
- const: renesas,irqc
'#interrupt-cells':
diff --git a/Documentation/devicetree/bindings/interrupt-controller/sifive,plic-1.0.0.yaml b/Documentation/devicetree/bindings/interrupt-controller/sifive,plic-1.0.0.yaml
index 28b6b17fe4b2..27092c6a86c4 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/sifive,plic-1.0.0.yaml
+++ b/Documentation/devicetree/bindings/interrupt-controller/sifive,plic-1.0.0.yaml
@@ -35,6 +35,10 @@ description:
contains a specific memory layout, which is documented in chapter 8 of the
SiFive U5 Coreplex Series Manual <https://static.dev.sifive.com/U54-MC-RVCoreIP.pdf>.
+ The thead,c900-plic is different from sifive,plic-1.0.0 in opensbi, the
+ T-HEAD PLIC implementation requires setting a delegation bit to allow access
+ from S-mode. So add thead,c900-plic to distinguish them.
+
maintainers:
- Sagar Kadam <sagar.kadam@sifive.com>
- Paul Walmsley <paul.walmsley@sifive.com>
@@ -42,12 +46,17 @@ maintainers:
properties:
compatible:
- items:
- - enum:
- - sifive,fu540-c000-plic
- - starfive,jh7100-plic
- - canaan,k210-plic
- - const: sifive,plic-1.0.0
+ oneOf:
+ - items:
+ - enum:
+ - sifive,fu540-c000-plic
+ - starfive,jh7100-plic
+ - canaan,k210-plic
+ - const: sifive,plic-1.0.0
+ - items:
+ - enum:
+ - allwinner,sun20i-d1-plic
+ - const: thead,c900-plic
reg:
maxItems: 1
@@ -62,6 +71,7 @@ properties:
interrupts-extended:
minItems: 1
+ maxItems: 15872
description:
Specifies which contexts are connected to the PLIC, with "-1" specifying
that a context is not present. Each node pointed to should be a
@@ -90,12 +100,11 @@ examples:
#interrupt-cells = <1>;
compatible = "sifive,fu540-c000-plic", "sifive,plic-1.0.0";
interrupt-controller;
- interrupts-extended = <
- &cpu0_intc 11
- &cpu1_intc 11 &cpu1_intc 9
- &cpu2_intc 11 &cpu2_intc 9
- &cpu3_intc 11 &cpu3_intc 9
- &cpu4_intc 11 &cpu4_intc 9>;
+ interrupts-extended = <&cpu0_intc 11>,
+ <&cpu1_intc 11>, <&cpu1_intc 9>,
+ <&cpu2_intc 11>, <&cpu2_intc 9>,
+ <&cpu3_intc 11>, <&cpu3_intc 9>,
+ <&cpu4_intc 11>, <&cpu4_intc 9>;
reg = <0xc000000 0x4000000>;
riscv,ndev = <10>;
};
diff --git a/Documentation/devicetree/bindings/iommu/arm,smmu.yaml b/Documentation/devicetree/bindings/iommu/arm,smmu.yaml
index f66a3effba73..da5381c8ee11 100644
--- a/Documentation/devicetree/bindings/iommu/arm,smmu.yaml
+++ b/Documentation/devicetree/bindings/iommu/arm,smmu.yaml
@@ -38,10 +38,12 @@ properties:
- qcom,sc7280-smmu-500
- qcom,sc8180x-smmu-500
- qcom,sdm845-smmu-500
+ - qcom,sdx55-smmu-500
- qcom,sm6350-smmu-500
- qcom,sm8150-smmu-500
- qcom,sm8250-smmu-500
- qcom,sm8350-smmu-500
+ - qcom,sm8450-smmu-500
- const: arm,mmu-500
- description: Qcom Adreno GPUs implementing "arm,smmu-v2"
items:
diff --git a/Documentation/devicetree/bindings/leds/leds-bcm6328.txt b/Documentation/devicetree/bindings/leds/leds-bcm6328.txt
deleted file mode 100644
index a555d94084b7..000000000000
--- a/Documentation/devicetree/bindings/leds/leds-bcm6328.txt
+++ /dev/null
@@ -1,319 +0,0 @@
-LEDs connected to Broadcom BCM6328 controller
-
-This controller is present on BCM6318, BCM6328, BCM6362 and BCM63268.
-In these SoCs it's possible to control LEDs both as GPIOs or by hardware.
-However, on some devices there are Serial LEDs (LEDs connected to a 74x164
-controller), which can either be controlled by software (exporting the 74x164
-as spi-gpio. See Documentation/devicetree/bindings/gpio/fairchild,74hc595.yaml),
-or by hardware using this driver.
-Some of these Serial LEDs are hardware controlled (e.g. ethernet LEDs) and
-exporting the 74x164 as spi-gpio prevents those LEDs to be hardware
-controlled, so the only chance to keep them working is by using this driver.
-
-BCM6328 LED controller has a HWDIS register, which controls whether a LED
-should be controlled by a hardware signal instead of the MODE register value,
-with 0 meaning hardware control enabled and 1 hardware control disabled. This
-is usually 1:1 for hardware to LED signals, but through the activity/link
-registers you have some limited control over rerouting the LEDs (as
-explained later in brcm,link-signal-sources). Even if a LED is hardware
-controlled you are still able to make it blink or light it up if it isn't,
-but you can't turn it off if the hardware decides to light it up. For this
-reason, hardware controlled LEDs aren't registered as LED class devices.
-
-Required properties:
- - compatible : should be "brcm,bcm6328-leds".
- - #address-cells : must be 1.
- - #size-cells : must be 0.
- - reg : BCM6328 LED controller address and size.
-
-Optional properties:
- - brcm,serial-leds : Boolean, enables Serial LEDs.
- Default : false
- - brcm,serial-mux : Boolean, enables Serial LEDs multiplexing.
- Default : false
- - brcm,serial-clk-low : Boolean, makes clock signal active low.
- Default : false
- - brcm,serial-dat-low : Boolean, makes data signal active low.
- Default : false
- - brcm,serial-shift-inv : Boolean, inverts Serial LEDs shift direction.
- Default : false
-
-Each LED is represented as a sub-node of the brcm,bcm6328-leds device.
-
-LED sub-node required properties:
- - reg : LED pin number (only LEDs 0 to 23 are valid).
-
-LED sub-node optional properties:
- a) Optional properties for sub-nodes related to software controlled LEDs:
- - label : see Documentation/devicetree/bindings/leds/common.txt
- - active-low : Boolean, makes LED active low.
- Default : false
- - default-state : see
- Documentation/devicetree/bindings/leds/common.txt
- - linux,default-trigger : see
- Documentation/devicetree/bindings/leds/common.txt
-
- b) Optional properties for sub-nodes related to hardware controlled LEDs:
- - brcm,hardware-controlled : Boolean, makes this LED hardware controlled.
- Default : false
- - brcm,link-signal-sources : An array of hardware link
- signal sources. Up to four link hardware signals can get muxed into
- these LEDs. Only valid for LEDs 0 to 7, where LED signals 0 to 3 may
- be muxed to LEDs 0 to 3, and signals 4 to 7 may be muxed to LEDs
- 4 to 7. A signal can be muxed to more than one LED, and one LED can
- have more than one source signal.
- - brcm,activity-signal-sources : An array of hardware activity
- signal sources. Up to four activity hardware signals can get muxed into
- these LEDs. Only valid for LEDs 0 to 7, where LED signals 0 to 3 may
- be muxed to LEDs 0 to 3, and signals 4 to 7 may be muxed to LEDs
- 4 to 7. A signal can be muxed to more than one LED, and one LED can
- have more than one source signal.
-
-Examples:
-Scenario 1 : BCM6328 with 4 EPHY LEDs
- leds0: led-controller@10000800 {
- compatible = "brcm,bcm6328-leds";
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0x10000800 0x24>;
-
- alarm_red@2 {
- reg = <2>;
- active-low;
- label = "red:alarm";
- };
- inet_green@3 {
- reg = <3>;
- active-low;
- label = "green:inet";
- };
- power_green@4 {
- reg = <4>;
- active-low;
- label = "green:power";
- default-state = "on";
- };
- ephy0_spd@17 {
- reg = <17>;
- brcm,hardware-controlled;
- };
- ephy1_spd@18 {
- reg = <18>;
- brcm,hardware-controlled;
- };
- ephy2_spd@19 {
- reg = <19>;
- brcm,hardware-controlled;
- };
- ephy3_spd@20 {
- reg = <20>;
- brcm,hardware-controlled;
- };
- };
-
-Scenario 2 : BCM63268 with Serial/GPHY0 LEDs
- leds0: led-controller@10001900 {
- compatible = "brcm,bcm6328-leds";
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0x10001900 0x24>;
- brcm,serial-leds;
- brcm,serial-dat-low;
- brcm,serial-shift-inv;
-
- gphy0_spd0@0 {
- reg = <0>;
- brcm,hardware-controlled;
- brcm,link-signal-sources = <0>;
- };
- gphy0_spd1@1 {
- reg = <1>;
- brcm,hardware-controlled;
- brcm,link-signal-sources = <1>;
- };
- inet_red@2 {
- reg = <2>;
- active-low;
- label = "red:inet";
- };
- dsl_green@3 {
- reg = <3>;
- active-low;
- label = "green:dsl";
- };
- usb_green@4 {
- reg = <4>;
- active-low;
- label = "green:usb";
- };
- wps_green@7 {
- reg = <7>;
- active-low;
- label = "green:wps";
- };
- inet_green@8 {
- reg = <8>;
- active-low;
- label = "green:inet";
- };
- ephy0_act@9 {
- reg = <9>;
- brcm,hardware-controlled;
- };
- ephy1_act@10 {
- reg = <10>;
- brcm,hardware-controlled;
- };
- ephy2_act@11 {
- reg = <11>;
- brcm,hardware-controlled;
- };
- gphy0_act@12 {
- reg = <12>;
- brcm,hardware-controlled;
- };
- ephy0_spd@13 {
- reg = <13>;
- brcm,hardware-controlled;
- };
- ephy1_spd@14 {
- reg = <14>;
- brcm,hardware-controlled;
- };
- ephy2_spd@15 {
- reg = <15>;
- brcm,hardware-controlled;
- };
- power_green@20 {
- reg = <20>;
- active-low;
- label = "green:power";
- default-state = "on";
- };
- };
-
-Scenario 3 : BCM6362 with 1 LED for each EPHY
- leds0: led-controller@10001900 {
- compatible = "brcm,bcm6328-leds";
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0x10001900 0x24>;
-
- usb@0 {
- reg = <0>;
- brcm,hardware-controlled;
- brcm,link-signal-sources = <0>;
- brcm,activity-signal-sources = <0>;
- /* USB link/activity routed to USB LED */
- };
- inet@1 {
- reg = <1>;
- brcm,hardware-controlled;
- brcm,activity-signal-sources = <1>;
- /* INET activity routed to INET LED */
- };
- ephy0@4 {
- reg = <4>;
- brcm,hardware-controlled;
- brcm,link-signal-sources = <4>;
- /* EPHY0 link routed to EPHY0 LED */
- };
- ephy1@5 {
- reg = <5>;
- brcm,hardware-controlled;
- brcm,link-signal-sources = <5>;
- /* EPHY1 link routed to EPHY1 LED */
- };
- ephy2@6 {
- reg = <6>;
- brcm,hardware-controlled;
- brcm,link-signal-sources = <6>;
- /* EPHY2 link routed to EPHY2 LED */
- };
- ephy3@7 {
- reg = <7>;
- brcm,hardware-controlled;
- brcm,link-signal-sources = <7>;
- /* EPHY3 link routed to EPHY3 LED */
- };
- power_green@20 {
- reg = <20>;
- active-low;
- label = "green:power";
- default-state = "on";
- };
- };
-
-Scenario 4 : BCM6362 with 1 LED for all EPHYs
- leds0: led-controller@10001900 {
- compatible = "brcm,bcm6328-leds";
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0x10001900 0x24>;
-
- usb@0 {
- reg = <0>;
- brcm,hardware-controlled;
- brcm,link-signal-sources = <0 1>;
- brcm,activity-signal-sources = <0 1>;
- /* USB/INET link/activity routed to USB LED */
- };
- ephy@4 {
- reg = <4>;
- brcm,hardware-controlled;
- brcm,link-signal-sources = <4 5 6 7>;
- /* EPHY0/1/2/3 link routed to EPHY0 LED */
- };
- power_green@20 {
- reg = <20>;
- active-low;
- label = "green:power";
- default-state = "on";
- };
- };
-
-Scenario 5 : BCM6362 with EPHY LEDs swapped
- leds0: led-controller@10001900 {
- compatible = "brcm,bcm6328-leds";
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0x10001900 0x24>;
-
- usb@0 {
- reg = <0>;
- brcm,hardware-controlled;
- brcm,link-signal-sources = <0>;
- brcm,activity-signal-sources = <0 1>;
- /* USB link/act and INET act routed to USB LED */
- };
- ephy0@4 {
- reg = <4>;
- brcm,hardware-controlled;
- brcm,link-signal-sources = <7>;
- /* EPHY3 link routed to EPHY0 LED */
- };
- ephy1@5 {
- reg = <5>;
- brcm,hardware-controlled;
- brcm,link-signal-sources = <6>;
- /* EPHY2 link routed to EPHY1 LED */
- };
- ephy2@6 {
- reg = <6>;
- brcm,hardware-controlled;
- brcm,link-signal-sources = <5>;
- /* EPHY1 link routed to EPHY2 LED */
- };
- ephy3@7 {
- reg = <7>;
- brcm,hardware-controlled;
- brcm,link-signal-sources = <4>;
- /* EPHY0 link routed to EPHY3 LED */
- };
- power_green@20 {
- reg = <20>;
- active-low;
- label = "green:power";
- default-state = "on";
- };
- };
diff --git a/Documentation/devicetree/bindings/leds/leds-bcm6328.yaml b/Documentation/devicetree/bindings/leds/leds-bcm6328.yaml
new file mode 100644
index 000000000000..51cc0d82c12e
--- /dev/null
+++ b/Documentation/devicetree/bindings/leds/leds-bcm6328.yaml
@@ -0,0 +1,404 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/leds/leds-bcm6328.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: LEDs connected to Broadcom BCM6328 controller
+
+maintainers:
+ - Álvaro Fernández Rojas <noltari@gmail.com>
+
+description: |
+ This controller is present on BCM6318, BCM6328, BCM6362 and BCM63268.
+ In these SoCs it's possible to control LEDs both as GPIOs or by hardware.
+ However, on some devices there are Serial LEDs (LEDs connected to a 74x164
+ controller), which can either be controlled by software (exporting the 74x164
+ as spi-gpio. See
+ Documentation/devicetree/bindings/gpio/fairchild,74hc595.yaml), or by hardware
+ using this driver.
+ Some of these Serial LEDs are hardware controlled (e.g. ethernet LEDs) and
+ exporting the 74x164 as spi-gpio prevents those LEDs to be hardware
+ controlled, so the only chance to keep them working is by using this driver.
+
+ BCM6328 LED controller has a HWDIS register, which controls whether a LED
+ should be controlled by a hardware signal instead of the MODE register value,
+ with 0 meaning hardware control enabled and 1 hardware control disabled. This
+ is usually 1:1 for hardware to LED signals, but through the activity/link
+ registers you have some limited control over rerouting the LEDs (as
+ explained later in brcm,link-signal-sources). Even if a LED is hardware
+ controlled you are still able to make it blink or light it up if it isn't,
+ but you can't turn it off if the hardware decides to light it up. For this
+ reason, hardware controlled LEDs aren't registered as LED class devices.
+
+ Each LED is represented as a sub-node of the brcm,bcm6328-leds device.
+
+properties:
+ compatible:
+ const: brcm,bcm6328-leds
+
+ reg:
+ maxItems: 1
+
+ brcm,serial-leds:
+ type: boolean
+ description: Enables Serial LEDs.
+
+ brcm,serial-mux:
+ type: boolean
+ description: Enables Serial LEDs multiplexing.
+
+ brcm,serial-clk-low:
+ type: boolean
+ description: Makes clock signal active low.
+
+ brcm,serial-dat-low:
+ type: boolean
+ description: Makes data signal active low.
+
+ brcm,serial-shift-inv:
+ type: boolean
+ description: Inverts Serial LEDs shift direction.
+
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 0
+
+patternProperties:
+ "@[a-f0-9]+$":
+ type: object
+
+ $ref: common.yaml#
+
+ properties:
+ reg:
+ items:
+ - maximum: 23
+ description: LED pin number (only LEDs 0 to 23 are valid).
+
+ active-low:
+ type: boolean
+ description: Makes LED active low.
+
+ brcm,hardware-controlled:
+ type: boolean
+ description: Makes this LED hardware controlled.
+
+ brcm,link-signal-sources:
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ minItems: 1
+ maxItems: 4
+ description: >
+ An array of hardware link signal sources. Up to four link hardware
+ signals can get muxed into these LEDs. Only valid for LEDs 0 to 7,
+ where LED signals 0 to 3 may be muxed to LEDs 0 to 3, and signals 4 to
+ 7 may be muxed to LEDs 4 to 7. A signal can be muxed to more than one
+ LED, and one LED can have more than one source signal.
+
+ brcm,activity-signal-sources:
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ minItems: 1
+ maxItems: 4
+ description: >
+ An array of hardware activity signal sources. Up to four activity
+ hardware signals can get muxed into these LEDs. Only valid for LEDs 0
+ to 7, where LED signals 0 to 3 may be muxed to LEDs 0 to 3, and
+ signals 4 to 7 may be muxed to LEDs 4 to 7. A signal can be muxed to
+ more than one LED, and one LED can have more than one source signal.
+
+ required:
+ - reg
+
+ unevaluatedProperties: false
+
+required:
+ - reg
+ - "#address-cells"
+ - "#size-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ /* BCM6328 with 4 EPHY LEDs */
+ led-controller@10000800 {
+ compatible = "brcm,bcm6328-leds";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x10000800 0x24>;
+
+ alarm_red@2 {
+ reg = <2>;
+ active-low;
+ label = "red:alarm";
+ };
+
+ inet_green@3 {
+ reg = <3>;
+ active-low;
+ label = "green:inet";
+ };
+
+ power_green@4 {
+ reg = <4>;
+ active-low;
+ label = "green:power";
+ default-state = "on";
+ };
+
+ ephy0_spd@17 {
+ reg = <17>;
+ brcm,hardware-controlled;
+ };
+
+ ephy1_spd@18 {
+ reg = <18>;
+ brcm,hardware-controlled;
+ };
+
+ ephy2_spd@19 {
+ reg = <19>;
+ brcm,hardware-controlled;
+ };
+
+ ephy3_spd@20 {
+ reg = <20>;
+ brcm,hardware-controlled;
+ };
+ };
+ - |
+ /* BCM63268 with Serial/GPHY0 LEDs */
+ led-controller@10001900 {
+ compatible = "brcm,bcm6328-leds";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x10001900 0x24>;
+ brcm,serial-leds;
+ brcm,serial-dat-low;
+ brcm,serial-shift-inv;
+
+ gphy0_spd0@0 {
+ reg = <0>;
+ brcm,hardware-controlled;
+ brcm,link-signal-sources = <0>;
+ };
+
+ gphy0_spd1@1 {
+ reg = <1>;
+ brcm,hardware-controlled;
+ brcm,link-signal-sources = <1>;
+ };
+
+ inet_red@2 {
+ reg = <2>;
+ active-low;
+ label = "red:inet";
+ };
+
+ dsl_green@3 {
+ reg = <3>;
+ active-low;
+ label = "green:dsl";
+ };
+
+ usb_green@4 {
+ reg = <4>;
+ active-low;
+ label = "green:usb";
+ };
+
+ wps_green@7 {
+ reg = <7>;
+ active-low;
+ label = "green:wps";
+ };
+
+ inet_green@8 {
+ reg = <8>;
+ active-low;
+ label = "green:inet";
+ };
+
+ ephy0_act@9 {
+ reg = <9>;
+ brcm,hardware-controlled;
+ };
+
+ ephy1_act@10 {
+ reg = <10>;
+ brcm,hardware-controlled;
+ };
+
+ ephy2_act@11 {
+ reg = <11>;
+ brcm,hardware-controlled;
+ };
+
+ gphy0_act@12 {
+ reg = <12>;
+ brcm,hardware-controlled;
+ };
+
+ ephy0_spd@13 {
+ reg = <13>;
+ brcm,hardware-controlled;
+ };
+
+ ephy1_spd@14 {
+ reg = <14>;
+ brcm,hardware-controlled;
+ };
+
+ ephy2_spd@15 {
+ reg = <15>;
+ brcm,hardware-controlled;
+ };
+
+ power_green@20 {
+ reg = <20>;
+ active-low;
+ label = "green:power";
+ default-state = "on";
+ };
+ };
+ - |
+ /* BCM6362 with 1 LED for each EPHY */
+ led-controller@10001900 {
+ compatible = "brcm,bcm6328-leds";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x10001900 0x24>;
+
+ usb@0 {
+ reg = <0>;
+ brcm,hardware-controlled;
+ brcm,link-signal-sources = <0>;
+ brcm,activity-signal-sources = <0>;
+ /* USB link/activity routed to USB LED */
+ };
+
+ inet@1 {
+ reg = <1>;
+ brcm,hardware-controlled;
+ brcm,activity-signal-sources = <1>;
+ /* INET activity routed to INET LED */
+ };
+
+ ephy0@4 {
+ reg = <4>;
+ brcm,hardware-controlled;
+ brcm,link-signal-sources = <4>;
+ /* EPHY0 link routed to EPHY0 LED */
+ };
+
+ ephy1@5 {
+ reg = <5>;
+ brcm,hardware-controlled;
+ brcm,link-signal-sources = <5>;
+ /* EPHY1 link routed to EPHY1 LED */
+ };
+
+ ephy2@6 {
+ reg = <6>;
+ brcm,hardware-controlled;
+ brcm,link-signal-sources = <6>;
+ /* EPHY2 link routed to EPHY2 LED */
+ };
+
+ ephy3@7 {
+ reg = <7>;
+ brcm,hardware-controlled;
+ brcm,link-signal-sources = <7>;
+ /* EPHY3 link routed to EPHY3 LED */
+ };
+
+ power_green@20 {
+ reg = <20>;
+ active-low;
+ label = "green:power";
+ default-state = "on";
+ };
+ };
+ - |
+ /* BCM6362 with 1 LED for all EPHYs */
+ led-controller@10001900 {
+ compatible = "brcm,bcm6328-leds";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x10001900 0x24>;
+
+ usb@0 {
+ reg = <0>;
+ brcm,hardware-controlled;
+ brcm,link-signal-sources = <0 1>;
+ brcm,activity-signal-sources = <0 1>;
+ /* USB/INET link/activity routed to USB LED */
+ };
+
+ ephy@4 {
+ reg = <4>;
+ brcm,hardware-controlled;
+ brcm,link-signal-sources = <4 5 6 7>;
+ /* EPHY0/1/2/3 link routed to EPHY0 LED */
+ };
+
+ power_green@20 {
+ reg = <20>;
+ active-low;
+ label = "green:power";
+ default-state = "on";
+ };
+ };
+ - |
+ /* BCM6362 with EPHY LEDs swapped */
+ led-controller@10001900 {
+ compatible = "brcm,bcm6328-leds";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x10001900 0x24>;
+
+ usb@0 {
+ reg = <0>;
+ brcm,hardware-controlled;
+ brcm,link-signal-sources = <0>;
+ brcm,activity-signal-sources = <0 1>;
+ /* USB link/act and INET act routed to USB LED */
+ };
+
+ ephy0@4 {
+ reg = <4>;
+ brcm,hardware-controlled;
+ brcm,link-signal-sources = <7>;
+ /* EPHY3 link routed to EPHY0 LED */
+ };
+
+ ephy1@5 {
+ reg = <5>;
+ brcm,hardware-controlled;
+ brcm,link-signal-sources = <6>;
+ /* EPHY2 link routed to EPHY1 LED */
+ };
+
+ ephy2@6 {
+ reg = <6>;
+ brcm,hardware-controlled;
+ brcm,link-signal-sources = <5>;
+ /* EPHY1 link routed to EPHY2 LED */
+ };
+
+ ephy3@7 {
+ reg = <7>;
+ brcm,hardware-controlled;
+ brcm,link-signal-sources = <4>;
+ /* EPHY0 link routed to EPHY3 LED */
+ };
+
+ power_green@20 {
+ reg = <20>;
+ active-low;
+ label = "green:power";
+ default-state = "on";
+ };
+ };
diff --git a/Documentation/devicetree/bindings/leds/leds-mt6360.yaml b/Documentation/devicetree/bindings/leds/leds-mt6360.yaml
new file mode 100644
index 000000000000..b2fe6eb89389
--- /dev/null
+++ b/Documentation/devicetree/bindings/leds/leds-mt6360.yaml
@@ -0,0 +1,159 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/leds/leds-mt6360.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: LED driver for MT6360 PMIC from MediaTek Integrated.
+
+maintainers:
+ - Gene Chen <gene_chen@richtek.com>
+
+description: |
+ This module is part of the MT6360 MFD device.
+ see Documentation/devicetree/bindings/mfd/mt6360.yaml
+ Add MT6360 LED driver include 2-channel Flash LED with torch/strobe mode,
+ and 4-channel RGB LED support Register/Flash/Breath Mode
+
+properties:
+ compatible:
+ const: mediatek,mt6360-led
+
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 0
+
+patternProperties:
+ "^(multi-)?led@[0-5]$":
+ type: object
+ $ref: common.yaml#
+ description:
+ Properties for a single LED.
+
+ properties:
+ reg:
+ description: Index of the LED.
+ enum:
+ - 0 # LED output ISINK1
+ - 1 # LED output ISINK2
+ - 2 # LED output ISINK3
+ - 3 # LED output ISINKML
+ - 4 # LED output FLASH1
+ - 5 # LED output FLASH2
+
+unevaluatedProperties: false
+
+required:
+ - compatible
+ - "#address-cells"
+ - "#size-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/leds/common.h>
+ led-controller {
+ compatible = "mediatek,mt6360-led";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ multi-led@0 {
+ reg = <0>;
+ function = LED_FUNCTION_INDICATOR;
+ color = <LED_COLOR_ID_RGB>;
+ led-max-microamp = <24000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ led@0 {
+ reg = <0>;
+ color = <LED_COLOR_ID_RED>;
+ };
+ led@1 {
+ reg = <1>;
+ color = <LED_COLOR_ID_GREEN>;
+ };
+ led@2 {
+ reg = <2>;
+ color = <LED_COLOR_ID_BLUE>;
+ };
+ };
+ led@3 {
+ reg = <3>;
+ function = LED_FUNCTION_INDICATOR;
+ color = <LED_COLOR_ID_WHITE>;
+ led-max-microamp = <150000>;
+ };
+ led@4 {
+ reg = <4>;
+ function = LED_FUNCTION_FLASH;
+ color = <LED_COLOR_ID_WHITE>;
+ function-enumerator = <1>;
+ led-max-microamp = <200000>;
+ flash-max-microamp = <500000>;
+ flash-max-timeout-us = <1024000>;
+ };
+ led@5 {
+ reg = <5>;
+ function = LED_FUNCTION_FLASH;
+ color = <LED_COLOR_ID_WHITE>;
+ function-enumerator = <2>;
+ led-max-microamp = <200000>;
+ flash-max-microamp = <500000>;
+ flash-max-timeout-us = <1024000>;
+ };
+ };
+
+ - |
+
+ led-controller {
+ compatible = "mediatek,mt6360-led";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ led@0 {
+ reg = <0>;
+ function = LED_FUNCTION_INDICATOR;
+ color = <LED_COLOR_ID_RED>;
+ led-max-microamp = <24000>;
+ };
+ led@1 {
+ reg = <1>;
+ function = LED_FUNCTION_INDICATOR;
+ color = <LED_COLOR_ID_GREEN>;
+ led-max-microamp = <24000>;
+ };
+ led@2 {
+ reg = <2>;
+ function = LED_FUNCTION_INDICATOR;
+ color = <LED_COLOR_ID_BLUE>;
+ led-max-microamp = <24000>;
+ };
+ led@3 {
+ reg = <3>;
+ function = LED_FUNCTION_INDICATOR;
+ color = <LED_COLOR_ID_WHITE>;
+ led-max-microamp = <150000>;
+ };
+ led@4 {
+ reg = <4>;
+ function = LED_FUNCTION_FLASH;
+ color = <LED_COLOR_ID_WHITE>;
+ function-enumerator = <1>;
+ led-max-microamp = <200000>;
+ flash-max-microamp = <500000>;
+ flash-max-timeout-us = <1024000>;
+ };
+ led@5 {
+ reg = <5>;
+ function = LED_FUNCTION_FLASH;
+ color = <LED_COLOR_ID_WHITE>;
+ function-enumerator = <2>;
+ led-max-microamp = <200000>;
+ flash-max-microamp = <500000>;
+ flash-max-timeout-us = <1024000>;
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/mailbox/apple,mailbox.yaml b/Documentation/devicetree/bindings/mailbox/apple,mailbox.yaml
index c4255f42e801..5c5c328b3134 100644
--- a/Documentation/devicetree/bindings/mailbox/apple,mailbox.yaml
+++ b/Documentation/devicetree/bindings/mailbox/apple,mailbox.yaml
@@ -27,14 +27,20 @@ properties:
for example for the display controller, the system management
controller and the NVMe coprocessor.
items:
- - const: apple,t8103-asc-mailbox
+ - enum:
+ - apple,t8103-asc-mailbox
+ - apple,t6000-asc-mailbox
+ - const: apple,asc-mailbox-v4
- description:
M3 mailboxes are an older variant with a slightly different MMIO
interface still found on the M1. It is used for the Thunderbolt
co-processors.
items:
- - const: apple,t8103-m3-mailbox
+ - enum:
+ - apple,t8103-m3-mailbox
+ - apple,t6000-m3-mailbox
+ - const: apple,m3-mailbox-v2
reg:
maxItems: 1
@@ -71,7 +77,7 @@ additionalProperties: false
examples:
- |
mailbox@77408000 {
- compatible = "apple,t8103-asc-mailbox";
+ compatible = "apple,t8103-asc-mailbox", "apple,asc-mailbox-v4";
reg = <0x77408000 0x4000>;
interrupts = <1 583 4>, <1 584 4>, <1 585 4>, <1 586 4>;
interrupt-names = "send-empty", "send-not-empty",
diff --git a/Documentation/devicetree/bindings/mailbox/ti,omap-mailbox.yaml b/Documentation/devicetree/bindings/mailbox/ti,omap-mailbox.yaml
index e864d798168d..d433e496ec6e 100644
--- a/Documentation/devicetree/bindings/mailbox/ti,omap-mailbox.yaml
+++ b/Documentation/devicetree/bindings/mailbox/ti,omap-mailbox.yaml
@@ -180,15 +180,6 @@ allOf:
compatible:
enum:
- ti,am654-mailbox
- then:
- required:
- - interrupt-parent
-
- - if:
- properties:
- compatible:
- enum:
- - ti,am654-mailbox
- ti,am64-mailbox
then:
properties:
diff --git a/Documentation/devicetree/bindings/media/i2c/maxim,max96712.yaml b/Documentation/devicetree/bindings/media/i2c/maxim,max96712.yaml
new file mode 100644
index 000000000000..444f24838d3d
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/i2c/maxim,max96712.yaml
@@ -0,0 +1,111 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+# Copyright (C) 2021 Renesas Electronics Corp.
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/media/i2c/maxim,max96712.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Quad GMSL2 to CSI-2 Deserializer with GMSL1 Compatibility
+
+maintainers:
+ - Niklas Söderlund <niklas.soderlund+renesas@ragnatech.se>
+
+description: |
+ The MAX96712 deserializer converts GMSL2 or GMSL1 serial inputs into MIPI
+ CSI-2 D-PHY or C-PHY formatted outputs. The device allows each link to
+ simultaneously transmit bidirectional control-channel data while forward
+ video transmissions are in progress. The MAX96712 can accommodate as many as
+ four remotely located sensors using industry-standard coax or STP
+ interconnects.
+
+ Each GMSL2 serial link operates at a fixed rate of 3Gbps or 6Gbps in the
+ forward direction and 187.5Mbps in the reverse direction. In GMSL1 mode, the
+ MAX96712 can be paired with first-generation 3.12Gbps or 1.5Gbps GMSL1
+ serializers or operate up to 3.12Gbps with GMSL2 serializers in GMSL1 mode.
+
+properties:
+ compatible:
+ const: maxim,max96712
+
+ reg:
+ description: I2C device address
+ maxItems: 1
+
+ enable-gpios: true
+
+ ports:
+ $ref: /schemas/graph.yaml#/properties/ports
+
+ properties:
+ port@0:
+ $ref: /schemas/graph.yaml#/properties/port
+ description: GMSL Input 0
+
+ port@1:
+ $ref: /schemas/graph.yaml#/properties/port
+ description: GMSL Input 1
+
+ port@2:
+ $ref: /schemas/graph.yaml#/properties/port
+ description: GMSL Input 2
+
+ port@3:
+ $ref: /schemas/graph.yaml#/properties/port
+ description: GMSL Input 3
+
+ port@4:
+ $ref: /schemas/graph.yaml#/$defs/port-base
+ unevaluatedProperties: false
+ description: CSI-2 Output
+
+ properties:
+ endpoint:
+ $ref: /schemas/media/video-interfaces.yaml#
+ unevaluatedProperties: false
+
+ properties:
+ data-lanes: true
+
+ required:
+ - data-lanes
+
+ required:
+ - port@4
+
+required:
+ - compatible
+ - reg
+ - ports
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+
+ i2c@e6508000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ reg = <0 0xe6508000>;
+
+ gmsl0: gmsl-deserializer@49 {
+ compatible = "maxim,max96712";
+ reg = <0x49>;
+ enable-gpios = <&pca9654_a 0 GPIO_ACTIVE_HIGH>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@4 {
+ reg = <4>;
+ max96712_out0: endpoint {
+ clock-lanes = <0>;
+ data-lanes = <1 2 3 4>;
+ remote-endpoint = <&csi40_in>;
+ };
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/media/nxp,imx7-mipi-csi2.yaml b/Documentation/devicetree/bindings/media/nxp,imx7-mipi-csi2.yaml
index 1ef849dc74d7..e2e6e9aa0fe6 100644
--- a/Documentation/devicetree/bindings/media/nxp,imx7-mipi-csi2.yaml
+++ b/Documentation/devicetree/bindings/media/nxp,imx7-mipi-csi2.yaml
@@ -81,14 +81,12 @@ properties:
data-lanes:
description:
Note that 'fsl,imx7-mipi-csi2' only supports up to 2 data lines.
+ minItems: 1
items:
- minItems: 1
- maxItems: 4
- items:
- - const: 1
- - const: 2
- - const: 3
- - const: 4
+ - const: 1
+ - const: 2
+ - const: 3
+ - const: 4
required:
- data-lanes
diff --git a/Documentation/devicetree/bindings/media/nxp,imx8mq-mipi-csi2.yaml b/Documentation/devicetree/bindings/media/nxp,imx8mq-mipi-csi2.yaml
index 9c04fa85ee5c..1b3e1c4b99ed 100644
--- a/Documentation/devicetree/bindings/media/nxp,imx8mq-mipi-csi2.yaml
+++ b/Documentation/devicetree/bindings/media/nxp,imx8mq-mipi-csi2.yaml
@@ -87,14 +87,12 @@ properties:
properties:
data-lanes:
+ minItems: 1
items:
- minItems: 1
- maxItems: 4
- items:
- - const: 1
- - const: 2
- - const: 3
- - const: 4
+ - const: 1
+ - const: 2
+ - const: 3
+ - const: 4
required:
- data-lanes
diff --git a/Documentation/devicetree/bindings/memory-controllers/ti,gpmc.yaml b/Documentation/devicetree/bindings/memory-controllers/ti,gpmc.yaml
index 64dc9d398d9a..e188a4bf755c 100644
--- a/Documentation/devicetree/bindings/memory-controllers/ti,gpmc.yaml
+++ b/Documentation/devicetree/bindings/memory-controllers/ti,gpmc.yaml
@@ -129,11 +129,8 @@ patternProperties:
The child device node represents the device connected to the GPMC
bus. The device can be a NAND chip, SRAM device, NOR device
or an ASIC.
+ $ref: "ti,gpmc-child.yaml"
- allOf:
- - $ref: "ti,gpmc-child.yaml"
-
- unevaluatedProperties: false
required:
- compatible
diff --git a/Documentation/devicetree/bindings/mfd/cirrus,madera.yaml b/Documentation/devicetree/bindings/mfd/cirrus,madera.yaml
index 499c62c04daa..68c75a517c92 100644
--- a/Documentation/devicetree/bindings/mfd/cirrus,madera.yaml
+++ b/Documentation/devicetree/bindings/mfd/cirrus,madera.yaml
@@ -221,7 +221,6 @@ required:
- '#gpio-cells'
- interrupt-controller
- '#interrupt-cells'
- - interrupt-parent
- interrupts
- AVDD-supply
- DBVDD1-supply
@@ -246,8 +245,7 @@ examples:
interrupt-controller;
#interrupt-cells = <2>;
- interrupts = <&host_irq1>;
- interrupt-parent = <&gic>;
+ interrupts = <4 1 0>;
gpio-controller;
#gpio-cells = <2>;
diff --git a/Documentation/devicetree/bindings/mfd/google,cros-ec.yaml b/Documentation/devicetree/bindings/mfd/google,cros-ec.yaml
index 0faa4da6c7c8..d1f53bd449f7 100644
--- a/Documentation/devicetree/bindings/mfd/google,cros-ec.yaml
+++ b/Documentation/devicetree/bindings/mfd/google,cros-ec.yaml
@@ -8,7 +8,6 @@ title: ChromeOS Embedded Controller
maintainers:
- Benson Leung <bleung@chromium.org>
- - Enric Balletbo i Serra <enric.balletbo@collabora.com>
- Guenter Roeck <groeck@chromium.org>
description:
diff --git a/Documentation/devicetree/bindings/mips/loongson/ls2k-reset.yaml b/Documentation/devicetree/bindings/mips/loongson/ls2k-reset.yaml
new file mode 100644
index 000000000000..20b5836efd90
--- /dev/null
+++ b/Documentation/devicetree/bindings/mips/loongson/ls2k-reset.yaml
@@ -0,0 +1,38 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/mips/loongson/ls2k-reset.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Loongson 2K1000 PM Controller
+
+maintainers:
+ - Qing Zhang <zhangqing@loongson.cn>
+
+description: |
+ This controller can be found in Loongson-2K1000 Soc systems.
+
+properties:
+ compatible:
+ const: loongson,ls2k-pm
+
+ reg:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ bus {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ pm: reset-controller@1fe07000 {
+ compatible = "loongson,ls2k-pm";
+ reg = <0 0x1fe07000 0 0x422>;
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/mmc/arm,pl18x.yaml b/Documentation/devicetree/bindings/mmc/arm,pl18x.yaml
index a4f74bec68a3..1e69a5a42439 100644
--- a/Documentation/devicetree/bindings/mmc/arm,pl18x.yaml
+++ b/Documentation/devicetree/bindings/mmc/arm,pl18x.yaml
@@ -185,6 +185,9 @@ examples:
clock-names = "mclk", "apb_pclk";
};
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+
mmc@80126000 {
compatible = "arm,pl18x", "arm,primecell";
reg = <0x80126000 0x1000>;
@@ -206,12 +209,12 @@ examples:
vqmmc-supply = <&vmmci>;
};
+ - |
mmc@101f6000 {
compatible = "arm,pl18x", "arm,primecell";
reg = <0x101f6000 0x1000>;
clocks = <&sdiclk>, <&pclksdi>;
clock-names = "mclk", "apb_pclk";
- interrupt-parent = <&vica>;
interrupts = <22>;
max-frequency = <400000>;
bus-width = <4>;
@@ -226,6 +229,7 @@ examples:
vmmc-supply = <&vmmc_regulator>;
};
+ - |
mmc@52007000 {
compatible = "arm,pl18x", "arm,primecell";
arm,primecell-periphid = <0x10153180>;
diff --git a/Documentation/devicetree/bindings/mux/gpio-mux.yaml b/Documentation/devicetree/bindings/mux/gpio-mux.yaml
index 0a7c8d64981a..ee4de9fbaf4d 100644
--- a/Documentation/devicetree/bindings/mux/gpio-mux.yaml
+++ b/Documentation/devicetree/bindings/mux/gpio-mux.yaml
@@ -26,7 +26,10 @@ properties:
List of gpios used to control the multiplexer, least significant bit first.
'#mux-control-cells':
- const: 0
+ enum: [ 0, 1 ]
+
+ '#mux-state-cells':
+ enum: [ 1, 2 ]
idle-state:
default: -1
@@ -34,7 +37,11 @@ properties:
required:
- compatible
- mux-gpios
- - "#mux-control-cells"
+anyOf:
+ - required:
+ - "#mux-control-cells"
+ - required:
+ - "#mux-state-cells"
additionalProperties: false
diff --git a/Documentation/devicetree/bindings/mux/mux-consumer.yaml b/Documentation/devicetree/bindings/mux/mux-consumer.yaml
index 7af93298ab5c..d3d854967359 100644
--- a/Documentation/devicetree/bindings/mux/mux-consumer.yaml
+++ b/Documentation/devicetree/bindings/mux/mux-consumer.yaml
@@ -25,6 +25,17 @@ description: |
strings to label each of the mux controllers listed in the "mux-controls"
property.
+ If it is required to provide the state that the mux controller needs to
+ be set to, the property "mux-states" must be used. An optional property
+ "mux-state-names" can be used to provide a list of strings, to label
+ each of the multiplixer states listed in the "mux-states" property.
+
+ Properties "mux-controls" and "mux-states" can be used depending on how
+ the consumers want to control the mux controller. If the consumer needs
+ needs to set multiple states in a mux controller, then property
+ "mux-controls" can be used. If the consumer needs to set the mux
+ controller to a given state then property "mux-states" can be used.
+
mux-ctrl-specifier typically encodes the chip-relative mux controller number.
If the mux controller chip only provides a single mux controller, the
mux-ctrl-specifier can typically be left out.
@@ -35,12 +46,22 @@ properties:
mux-controls:
$ref: /schemas/types.yaml#/definitions/phandle-array
+ mux-states:
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+
mux-control-names:
description:
Devices that use more than a single mux controller can use the
"mux-control-names" property to map the name of the requested mux
controller to an index into the list given by the "mux-controls" property.
+ mux-state-names:
+ description:
+ Devices that use more than a single multiplexer state can use the
+ "mux-state-names" property to map the name of the requested mux
+ controller to an index into the list given by the "mux-states"
+ property.
+
additionalProperties: true
...
diff --git a/Documentation/devicetree/bindings/mux/mux-controller.yaml b/Documentation/devicetree/bindings/mux/mux-controller.yaml
index 736a84c3b6a5..c855fbad3884 100644
--- a/Documentation/devicetree/bindings/mux/mux-controller.yaml
+++ b/Documentation/devicetree/bindings/mux/mux-controller.yaml
@@ -25,7 +25,9 @@ description: |
--------------------
Mux controller nodes must specify the number of cells used for the
- specifier using the '#mux-control-cells' property.
+ specifier using the '#mux-control-cells' or '#mux-state-cells' property.
+ The value of '#mux-state-cells' will always be one greater than the value
+ of '#mux-control-cells'.
Optionally, mux controller nodes can also specify the state the mux should
have when it is idle. The idle-state property is used for this. If the
@@ -67,6 +69,8 @@ select:
pattern: '^mux-controller'
- required:
- '#mux-control-cells'
+ - required:
+ - '#mux-state-cells'
properties:
$nodename:
@@ -75,6 +79,9 @@ properties:
'#mux-control-cells':
enum: [ 0, 1 ]
+ '#mux-state-cells':
+ enum: [ 1, 2 ]
+
idle-state:
$ref: /schemas/types.yaml#/definitions/int32
minimum: -2
@@ -179,4 +186,21 @@ examples:
};
};
};
+
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+
+ mux1: mux-controller {
+ compatible = "gpio-mux";
+ #mux-state-cells = <1>;
+ mux-gpios = <&exp_som 2 GPIO_ACTIVE_HIGH>;
+ };
+
+ transceiver4: can-phy4 {
+ compatible = "ti,tcan1042";
+ #phy-cells = <0>;
+ max-bitrate = <5000000>;
+ standby-gpios = <&exp_som 7 GPIO_ACTIVE_HIGH>;
+ mux-states = <&mux1 1>;
+ };
...
diff --git a/Documentation/devicetree/bindings/net/actions,owl-emac.yaml b/Documentation/devicetree/bindings/net/actions,owl-emac.yaml
index 1626e0a821b0..d30fada2ac39 100644
--- a/Documentation/devicetree/bindings/net/actions,owl-emac.yaml
+++ b/Documentation/devicetree/bindings/net/actions,owl-emac.yaml
@@ -51,6 +51,10 @@ properties:
description:
Phandle to the device containing custom config.
+ mdio:
+ $ref: mdio.yaml#
+ unevaluatedProperties: false
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/net/allwinner,sun8i-a83t-emac.yaml b/Documentation/devicetree/bindings/net/allwinner,sun8i-a83t-emac.yaml
index 407586bc366b..6a4831fd3616 100644
--- a/Documentation/devicetree/bindings/net/allwinner,sun8i-a83t-emac.yaml
+++ b/Documentation/devicetree/bindings/net/allwinner,sun8i-a83t-emac.yaml
@@ -122,6 +122,7 @@ allOf:
mdio-mux:
type: object
+ unevaluatedProperties: false
properties:
compatible:
@@ -132,17 +133,18 @@ allOf:
description:
Phandle to EMAC MDIO.
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 0
+
mdio@1:
- type: object
+ $ref: mdio.yaml#
+ unevaluatedProperties: false
description: Internal MDIO Bus
properties:
- "#address-cells":
- const: 1
-
- "#size-cells":
- const: 0
-
compatible:
const: allwinner,sun8i-h3-mdio-internal
@@ -168,16 +170,11 @@ allOf:
mdio@2:
- type: object
+ $ref: mdio.yaml#
+ unevaluatedProperties: false
description: External MDIO Bus (H3 only)
properties:
- "#address-cells":
- const: 1
-
- "#size-cells":
- const: 0
-
reg:
const: 2
diff --git a/Documentation/devicetree/bindings/net/brcm,amac.txt b/Documentation/devicetree/bindings/net/brcm,amac.txt
deleted file mode 100644
index 0120ebe93262..000000000000
--- a/Documentation/devicetree/bindings/net/brcm,amac.txt
+++ /dev/null
@@ -1,30 +0,0 @@
-Broadcom AMAC Ethernet Controller Device Tree Bindings
--------------------------------------------------------------
-
-Required properties:
- - compatible: "brcm,amac"
- "brcm,nsp-amac"
- "brcm,ns2-amac"
- - reg: Address and length of the register set for the device. It
- contains the information of registers in the same order as
- described by reg-names
- - reg-names: Names of the registers.
- "amac_base": Address and length of the GMAC registers
- "idm_base": Address and length of the GMAC IDM registers
- (required for NSP and Northstar2)
- "nicpm_base": Address and length of the NIC Port Manager
- registers (required for Northstar2)
- - interrupts: Interrupt number
-
-The MAC address will be determined using the optional properties
-defined in ethernet.txt.
-
-Examples:
-
-amac0: ethernet@18022000 {
- compatible = "brcm,nsp-amac";
- reg = <0x18022000 0x1000>,
- <0x18110000 0x1000>;
- reg-names = "amac_base", "idm_base";
- interrupts = <GIC_SPI 147 IRQ_TYPE_LEVEL_HIGH>;
-};
diff --git a/Documentation/devicetree/bindings/net/brcm,amac.yaml b/Documentation/devicetree/bindings/net/brcm,amac.yaml
new file mode 100644
index 000000000000..8f031932c8af
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/brcm,amac.yaml
@@ -0,0 +1,88 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/brcm,amac.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Broadcom AMAC Ethernet Controller Device Tree Bindings
+
+maintainers:
+ - Florian Fainelli <f.fainelli@gmail.com>
+
+allOf:
+ - $ref: "ethernet-controller.yaml#"
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - brcm,amac
+ then:
+ properties:
+ reg:
+ maxItems: 2
+ reg-names:
+ maxItems: 2
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - brcm,nsp-amac
+ then:
+ properties:
+ reg:
+ minItems: 2
+ maxItems: 2
+ reg-names:
+ minItems: 2
+ maxItems: 2
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - brcm,ns2-amac
+ then:
+ properties:
+ reg:
+ minItems: 3
+ reg-names:
+ minItems: 3
+
+properties:
+ compatible:
+ enum:
+ - brcm,amac
+ - brcm,nsp-amac
+ - brcm,ns2-amac
+
+ interrupts:
+ maxItems: 1
+
+ reg:
+ minItems: 1
+ maxItems: 3
+
+ reg-names:
+ minItems: 1
+ items:
+ - const: amac_base
+ - const: idm_base
+ - const: nicpm_base
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ amac0: ethernet@18022000 {
+ compatible = "brcm,nsp-amac";
+ reg = <0x18022000 0x1000>,
+ <0x18110000 0x1000>;
+ reg-names = "amac_base", "idm_base";
+ interrupts = <GIC_SPI 147 IRQ_TYPE_LEVEL_HIGH>;
+ };
diff --git a/Documentation/devicetree/bindings/net/brcm,bcm6368-mdio-mux.yaml b/Documentation/devicetree/bindings/net/brcm,bcm6368-mdio-mux.yaml
index 2f34fda55fd0..9ef28c2a0afc 100644
--- a/Documentation/devicetree/bindings/net/brcm,bcm6368-mdio-mux.yaml
+++ b/Documentation/devicetree/bindings/net/brcm,bcm6368-mdio-mux.yaml
@@ -15,18 +15,12 @@ description:
properties as well to generate desired MDIO transaction on appropriate bus.
allOf:
- - $ref: "mdio.yaml#"
+ - $ref: mdio-mux.yaml#
properties:
compatible:
const: brcm,bcm6368-mdio-mux
- "#address-cells":
- const: 1
-
- "#size-cells":
- const: 0
-
reg:
maxItems: 1
@@ -34,24 +28,6 @@ required:
- compatible
- reg
-patternProperties:
- '^mdio@[0-1]$':
- type: object
- properties:
- reg:
- maxItems: 1
-
- "#address-cells":
- const: 1
-
- "#size-cells":
- const: 0
-
- required:
- - reg
- - "#address-cells"
- - "#size-cells"
-
unevaluatedProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/net/brcm,bcmgenet.txt b/Documentation/devicetree/bindings/net/brcm,bcmgenet.txt
deleted file mode 100644
index 0b5994fba35f..000000000000
--- a/Documentation/devicetree/bindings/net/brcm,bcmgenet.txt
+++ /dev/null
@@ -1,125 +0,0 @@
-* Broadcom BCM7xxx Ethernet Controller (GENET)
-
-Required properties:
-- compatible: should contain one of "brcm,genet-v1", "brcm,genet-v2",
- "brcm,genet-v3", "brcm,genet-v4", "brcm,genet-v5", "brcm,bcm2711-genet-v5" or
- "brcm,bcm7712-genet-v5".
-- reg: address and length of the register set for the device
-- interrupts and/or interrupts-extended: must be two cells, the first cell
- is the general purpose interrupt line, while the second cell is the
- interrupt for the ring RX and TX queues operating in ring mode. An
- optional third interrupt cell for Wake-on-LAN can be specified.
- See Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
- for information on the property specifics.
-- phy-mode: see ethernet.txt file in the same directory
-- #address-cells: should be 1
-- #size-cells: should be 1
-
-Optional properties:
-- clocks: When provided, must be two phandles to the functional clocks nodes
- of the GENET block. The first phandle is the main GENET clock used during
- normal operation, while the second phandle is the Wake-on-LAN clock.
-- clock-names: When provided, names of the functional clock phandles, first
- name should be "enet" and second should be "enet-wol".
-
-- phy-handle: See ethernet.txt file in the same directory; used to describe
- configurations where a PHY (internal or external) is used.
-
-- fixed-link: When the GENET interface is connected to a MoCA hardware block or
- when operating in a RGMII to RGMII type of connection, or when the MDIO bus is
- voluntarily disabled, this property should be used to describe the "fixed link".
- See Documentation/devicetree/bindings/net/fixed-link.txt for information on
- the property specifics
-
-Required child nodes:
-
-- mdio bus node: this node should always be present regardless of the PHY
- configuration of the GENET instance
-
-MDIO bus node required properties:
-
-- compatible: should contain one of "brcm,genet-mdio-v1", "brcm,genet-mdio-v2"
- "brcm,genet-mdio-v3", "brcm,genet-mdio-v4", "brcm,genet-mdio-v5", the version
- has to match the parent node compatible property (e.g: brcm,genet-v4 pairs
- with brcm,genet-mdio-v4)
-- reg: address and length relative to the parent node base register address
-- #address-cells: address cell for MDIO bus addressing, should be 1
-- #size-cells: size of the cells for MDIO bus addressing, should be 0
-
-Ethernet PHY node properties:
-
-See Documentation/devicetree/bindings/net/phy.txt for the list of required and
-optional properties.
-
-Internal Gigabit PHY example:
-
-ethernet@f0b60000 {
- phy-mode = "internal";
- phy-handle = <&phy1>;
- mac-address = [ 00 10 18 36 23 1a ];
- compatible = "brcm,genet-v4";
- #address-cells = <0x1>;
- #size-cells = <0x1>;
- reg = <0xf0b60000 0xfc4c>;
- interrupts = <0x0 0x14 0x0>, <0x0 0x15 0x0>;
-
- mdio@e14 {
- compatible = "brcm,genet-mdio-v4";
- #address-cells = <0x1>;
- #size-cells = <0x0>;
- reg = <0xe14 0x8>;
-
- phy1: ethernet-phy@1 {
- max-speed = <1000>;
- reg = <0x1>;
- compatible = "ethernet-phy-ieee802.3-c22";
- };
- };
-};
-
-MoCA interface / MAC to MAC example:
-
-ethernet@f0b80000 {
- phy-mode = "moca";
- fixed-link = <1 0 1000 0 0>;
- mac-address = [ 00 10 18 36 24 1a ];
- compatible = "brcm,genet-v4";
- #address-cells = <0x1>;
- #size-cells = <0x1>;
- reg = <0xf0b80000 0xfc4c>;
- interrupts = <0x0 0x16 0x0>, <0x0 0x17 0x0>;
-
- mdio@e14 {
- compatible = "brcm,genet-mdio-v4";
- #address-cells = <0x1>;
- #size-cells = <0x0>;
- reg = <0xe14 0x8>;
- };
-};
-
-
-External MDIO-connected Gigabit PHY/switch:
-
-ethernet@f0ba0000 {
- phy-mode = "rgmii";
- phy-handle = <&phy0>;
- mac-address = [ 00 10 18 36 26 1a ];
- compatible = "brcm,genet-v4";
- #address-cells = <0x1>;
- #size-cells = <0x1>;
- reg = <0xf0ba0000 0xfc4c>;
- interrupts = <0x0 0x18 0x0>, <0x0 0x19 0x0>;
-
- mdio@e14 {
- compatible = "brcm,genet-mdio-v4";
- #address-cells = <0x1>;
- #size-cells = <0x0>;
- reg = <0xe14 0x8>;
-
- phy0: ethernet-phy@0 {
- max-speed = <1000>;
- reg = <0x0>;
- compatible = "ethernet-phy-ieee802.3-c22";
- };
- };
-};
diff --git a/Documentation/devicetree/bindings/net/brcm,bcmgenet.yaml b/Documentation/devicetree/bindings/net/brcm,bcmgenet.yaml
new file mode 100644
index 000000000000..e5af53508e25
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/brcm,bcmgenet.yaml
@@ -0,0 +1,145 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/brcm,bcmgenet.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Broadcom BCM7xxx Ethernet Controller (GENET) binding
+
+maintainers:
+ - Doug Berger <opendmb@gmail.com>
+ - Florian Fainelli <f.fainelli@gmail.com>
+
+properties:
+ compatible:
+ enum:
+ - brcm,genet-v1
+ - brcm,genet-v2
+ - brcm,genet-v3
+ - brcm,genet-v4
+ - brcm,genet-v5
+ - brcm,bcm2711-genet-v5
+ - brcm,bcm7712-genet-v5
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ minItems: 2
+ items:
+ - description: general purpose interrupt line
+ - description: RX and TX rings interrupt line
+ - description: Wake-on-LAN interrupt line
+
+
+ clocks:
+ minItems: 1
+ items:
+ - description: main clock
+ - description: EEE clock
+ - description: Wake-on-LAN clock
+
+ clock-names:
+ minItems: 1
+ items:
+ - const: enet
+ - const: enet-eee
+ - const: enet-wol
+
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 1
+
+patternProperties:
+ "^mdio@[0-9a-f]+$":
+ type: object
+ $ref: "brcm,unimac-mdio.yaml"
+
+ description:
+ GENET internal UniMAC MDIO bus
+
+required:
+ - reg
+ - interrupts
+ - phy-mode
+ - "#address-cells"
+ - "#size-cells"
+
+allOf:
+ - $ref: ethernet-controller.yaml
+
+unevaluatedProperties: false
+
+examples:
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ - |
+ ethernet@f0b60000 {
+ phy-mode = "internal";
+ phy-handle = <&phy1>;
+ mac-address = [ 00 10 18 36 23 1a ];
+ compatible = "brcm,genet-v4";
+ reg = <0xf0b60000 0xfc4c>;
+ interrupts = <0x0 0x14 0x0>, <0x0 0x15 0x0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ mdio0: mdio@e14 {
+ compatible = "brcm,genet-mdio-v4";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0xe14 0x8>;
+
+ phy1: ethernet-phy@1 {
+ max-speed = <1000>;
+ reg = <1>;
+ compatible = "ethernet-phy-ieee802.3-c22";
+ };
+ };
+ };
+
+ - |
+ ethernet@f0b80000 {
+ phy-mode = "moca";
+ fixed-link = <1 0 1000 0 0>;
+ mac-address = [ 00 10 18 36 24 1a ];
+ compatible = "brcm,genet-v4";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0xf0b80000 0xfc4c>;
+ interrupts = <0x0 0x16 0x0>, <0x0 0x17 0x0>;
+
+ mdio1: mdio@e14 {
+ compatible = "brcm,genet-mdio-v4";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0xe14 0x8>;
+ };
+ };
+
+ - |
+ ethernet@f0ba0000 {
+ phy-mode = "rgmii";
+ phy-handle = <&phy0>;
+ mac-address = [ 00 10 18 36 26 1a ];
+ compatible = "brcm,genet-v4";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0xf0ba0000 0xfc4c>;
+ interrupts = <0x0 0x18 0x0>, <0x0 0x19 0x0>;
+
+ mdio2: mdio@e14 {
+ compatible = "brcm,genet-mdio-v4";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0xe14 0x8>;
+
+ phy0: ethernet-phy@0 {
+ max-speed = <1000>;
+ reg = <0>;
+ compatible = "ethernet-phy-ieee802.3-c22";
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/brcm,mdio-mux-iproc.txt b/Documentation/devicetree/bindings/net/brcm,mdio-mux-iproc.txt
deleted file mode 100644
index deb9e852ea27..000000000000
--- a/Documentation/devicetree/bindings/net/brcm,mdio-mux-iproc.txt
+++ /dev/null
@@ -1,62 +0,0 @@
-Properties for an MDIO bus multiplexer found in Broadcom iProc based SoCs.
-
-This MDIO bus multiplexer defines buses that could be internal as well as
-external to SoCs and could accept MDIO transaction compatible to C-22 or
-C-45 Clause. When child bus is selected, one needs to select these two
-properties as well to generate desired MDIO transaction on appropriate bus.
-
-Required properties in addition to the generic multiplexer properties:
-
-MDIO multiplexer node:
-- compatible: brcm,mdio-mux-iproc.
-
-Every non-ethernet PHY requires a compatible so that it could be probed based
-on this compatible string.
-
-Optional properties:
-- clocks: phandle of the core clock which drives the mdio block.
-
-Additional information regarding generic multiplexer properties can be found
-at- Documentation/devicetree/bindings/net/mdio-mux.yaml
-
-
-for example:
- mdio_mux_iproc: mdio-mux@66020000 {
- compatible = "brcm,mdio-mux-iproc";
- reg = <0x66020000 0x250>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- mdio@0 {
- reg = <0x0>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- pci_phy0: pci-phy@0 {
- compatible = "brcm,ns2-pcie-phy";
- reg = <0x0>;
- #phy-cells = <0>;
- };
- };
-
- mdio@7 {
- reg = <0x7>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- pci_phy1: pci-phy@0 {
- compatible = "brcm,ns2-pcie-phy";
- reg = <0x0>;
- #phy-cells = <0>;
- };
- };
- mdio@10 {
- reg = <0x10>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- gphy0: eth-phy@10 {
- reg = <0x10>;
- };
- };
- };
diff --git a/Documentation/devicetree/bindings/net/brcm,mdio-mux-iproc.yaml b/Documentation/devicetree/bindings/net/brcm,mdio-mux-iproc.yaml
new file mode 100644
index 000000000000..af96b4fd89d5
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/brcm,mdio-mux-iproc.yaml
@@ -0,0 +1,80 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/brcm,mdio-mux-iproc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: MDIO bus multiplexer found in Broadcom iProc based SoCs.
+
+maintainers:
+ - Florian Fainelli <f.fainelli@gmail.com>
+
+description:
+ This MDIO bus multiplexer defines buses that could be internal as well as
+ external to SoCs and could accept MDIO transaction compatible to C-22 or
+ C-45 Clause. When child bus is selected, one needs to select these two
+ properties as well to generate desired MDIO transaction on appropriate bus.
+
+allOf:
+ - $ref: /schemas/net/mdio-mux.yaml#
+
+properties:
+ compatible:
+ const: brcm,mdio-mux-iproc
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+ description: core clock driving the MDIO block
+
+
+required:
+ - compatible
+ - reg
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ mdio_mux_iproc: mdio-mux@66020000 {
+ compatible = "brcm,mdio-mux-iproc";
+ reg = <0x66020000 0x250>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ mdio@0 {
+ reg = <0x0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pci_phy0: pci-phy@0 {
+ compatible = "brcm,ns2-pcie-phy";
+ reg = <0x0>;
+ #phy-cells = <0>;
+ };
+ };
+
+ mdio@7 {
+ reg = <0x7>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pci_phy1: pci-phy@0 {
+ compatible = "brcm,ns2-pcie-phy";
+ reg = <0x0>;
+ #phy-cells = <0>;
+ };
+ };
+
+ mdio@10 {
+ reg = <0x10>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ gphy0: eth-phy@10 {
+ reg = <0x10>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/brcm,systemport.txt b/Documentation/devicetree/bindings/net/brcm,systemport.txt
deleted file mode 100644
index 75736739bfdd..000000000000
--- a/Documentation/devicetree/bindings/net/brcm,systemport.txt
+++ /dev/null
@@ -1,38 +0,0 @@
-* Broadcom BCM7xxx Ethernet Systemport Controller (SYSTEMPORT)
-
-Required properties:
-- compatible: should be one of:
- "brcm,systemport-v1.00"
- "brcm,systemportlite-v1.00" or
- "brcm,systemport"
-- reg: address and length of the register set for the device.
-- interrupts: interrupts for the device, first cell must be for the rx
- interrupts, and the second cell should be for the transmit queues. An
- optional third interrupt cell for Wake-on-LAN can be specified
-- local-mac-address: Ethernet MAC address (48 bits) of this adapter
-- phy-mode: Should be a string describing the PHY interface to the
- Ethernet switch/PHY, see Documentation/devicetree/bindings/net/ethernet.txt
-- fixed-link: see Documentation/devicetree/bindings/net/fixed-link.txt for
- the property specific details
-
-Optional properties:
-- systemport,num-tier2-arb: number of tier 2 arbiters, an integer
-- systemport,num-tier1-arb: number of tier 1 arbiters, an integer
-- systemport,num-txq: number of HW transmit queues, an integer
-- systemport,num-rxq: number of HW receive queues, an integer
-- clocks: When provided, must be two phandles to the functional clocks nodes of
- the SYSTEMPORT block. The first phandle is the main SYSTEMPORT clock used
- during normal operation, while the second phandle is the Wake-on-LAN clock.
-- clock-names: When provided, names of the functional clock phandles, first
- name should be "sw_sysport" and second should be "sw_sysportwol".
-
-Example:
-ethernet@f04a0000 {
- compatible = "brcm,systemport-v1.00";
- reg = <0xf04a0000 0x4650>;
- local-mac-address = [ 00 11 22 33 44 55 ];
- fixed-link = <0 1 1000 0 0>;
- phy-mode = "gmii";
- interrupts = <0x0 0x16 0x0>,
- <0x0 0x17 0x0>;
-};
diff --git a/Documentation/devicetree/bindings/net/brcm,systemport.yaml b/Documentation/devicetree/bindings/net/brcm,systemport.yaml
new file mode 100644
index 000000000000..5fc9c9fafd85
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/brcm,systemport.yaml
@@ -0,0 +1,86 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/brcm,systemport.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Broadcom BCM7xxx Ethernet Systemport Controller (SYSTEMPORT)
+
+maintainers:
+ - Florian Fainelli <f.fainelli@gmail.com>
+
+properties:
+ compatible:
+ enum:
+ - brcm,systemport-v1.00
+ - brcm,systemportlite-v1.00
+ - brcm,systemport
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ minItems: 2
+ items:
+ - description: interrupt line for RX queues
+ - description: interrupt line for TX queues
+ - description: interrupt line for Wake-on-LAN
+
+ clocks:
+ items:
+ - description: main clock
+ - description: Wake-on-LAN clock
+
+ clock-names:
+ items:
+ - const: sw_sysport
+ - const: sw_sysportwol
+
+ systemport,num-tier2-arb:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ Number of tier 2 arbiters
+
+ systemport,num-tier1-arb:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ Number of tier 2 arbiters
+
+ systemport,num-txq:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 1
+ maximum: 32
+ description:
+ Number of HW transmit queues
+
+ systemport,num-rxq:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ minimum: 1
+ maximum: 32
+ description:
+ Number of HW receive queues
+
+required:
+ - reg
+ - interrupts
+ - phy-mode
+
+allOf:
+ - $ref: "ethernet-controller.yaml#"
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ ethernet@f04a0000 {
+ compatible = "brcm,systemport-v1.00";
+ reg = <0xf04a0000 0x4650>;
+ local-mac-address = [ 00 11 22 33 44 55 ];
+ phy-mode = "gmii";
+ interrupts = <0x0 0x16 0x0>,
+ <0x0 0x17 0x0>;
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/brcm,unimac-mdio.yaml b/Documentation/devicetree/bindings/net/brcm,unimac-mdio.yaml
index f4f4c37f1d4e..0be426ee1e44 100644
--- a/Documentation/devicetree/bindings/net/brcm,unimac-mdio.yaml
+++ b/Documentation/devicetree/bindings/net/brcm,unimac-mdio.yaml
@@ -7,6 +7,8 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Broadcom UniMAC MDIO bus controller
maintainers:
+ - Doug Berger <opendmb@gmail.com>
+ - Florian Fainelli <f.fainelli@gmail.com>
- Rafał Miłecki <rafal@milecki.pl>
allOf:
@@ -64,7 +66,6 @@ unevaluatedProperties: false
required:
- reg
- - reg-names
- '#address-cells'
- '#size-cells'
diff --git a/Documentation/devicetree/bindings/net/can/bosch,m_can.yaml b/Documentation/devicetree/bindings/net/can/bosch,m_can.yaml
index fb547e26c676..401ab7cdb379 100644
--- a/Documentation/devicetree/bindings/net/can/bosch,m_can.yaml
+++ b/Documentation/devicetree/bindings/net/can/bosch,m_can.yaml
@@ -76,33 +76,31 @@ properties:
M_CAN user manual for details.
$ref: /schemas/types.yaml#/definitions/int32-array
items:
- items:
- - description: The 'offset' is an address offset of the Message RAM where
- the following elements start from. This is usually set to 0x0 if
- you're using a private Message RAM.
- default: 0
- - description: 11-bit Filter 0-128 elements / 0-128 words
- minimum: 0
- maximum: 128
- - description: 29-bit Filter 0-64 elements / 0-128 words
- minimum: 0
- maximum: 64
- - description: Rx FIFO 0 0-64 elements / 0-1152 words
- minimum: 0
- maximum: 64
- - description: Rx FIFO 1 0-64 elements / 0-1152 words
- minimum: 0
- maximum: 64
- - description: Rx Buffers 0-64 elements / 0-1152 words
- minimum: 0
- maximum: 64
- - description: Tx Event FIFO 0-32 elements / 0-64 words
- minimum: 0
- maximum: 32
- - description: Tx Buffers 0-32 elements / 0-576 words
- minimum: 0
- maximum: 32
- maxItems: 1
+ - description: The 'offset' is an address offset of the Message RAM where
+ the following elements start from. This is usually set to 0x0 if
+ you're using a private Message RAM.
+ default: 0
+ - description: 11-bit Filter 0-128 elements / 0-128 words
+ minimum: 0
+ maximum: 128
+ - description: 29-bit Filter 0-64 elements / 0-128 words
+ minimum: 0
+ maximum: 64
+ - description: Rx FIFO 0 0-64 elements / 0-1152 words
+ minimum: 0
+ maximum: 64
+ - description: Rx FIFO 1 0-64 elements / 0-1152 words
+ minimum: 0
+ maximum: 64
+ - description: Rx Buffers 0-64 elements / 0-1152 words
+ minimum: 0
+ maximum: 64
+ - description: Tx Event FIFO 0-32 elements / 0-64 words
+ minimum: 0
+ maximum: 32
+ - description: Tx Buffers 0-32 elements / 0-576 words
+ minimum: 0
+ maximum: 32
power-domains:
description:
diff --git a/Documentation/devicetree/bindings/net/can/tcan4x5x.txt b/Documentation/devicetree/bindings/net/can/tcan4x5x.txt
index 0968b40aef1e..e3501bfa22e9 100644
--- a/Documentation/devicetree/bindings/net/can/tcan4x5x.txt
+++ b/Documentation/devicetree/bindings/net/can/tcan4x5x.txt
@@ -31,7 +31,7 @@ tcan4x5x: tcan4x5x@0 {
#address-cells = <1>;
#size-cells = <1>;
spi-max-frequency = <10000000>;
- bosch,mram-cfg = <0x0 0 0 32 0 0 1 1>;
+ bosch,mram-cfg = <0x0 0 0 16 0 0 1 1>;
interrupt-parent = <&gpio1>;
interrupts = <14 IRQ_TYPE_LEVEL_LOW>;
device-state-gpios = <&gpio3 21 GPIO_ACTIVE_HIGH>;
diff --git a/Documentation/devicetree/bindings/net/cdns,macb.yaml b/Documentation/devicetree/bindings/net/cdns,macb.yaml
new file mode 100644
index 000000000000..8dd06db34169
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/cdns,macb.yaml
@@ -0,0 +1,159 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/cdns,macb.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Cadence MACB/GEM Ethernet controller
+
+maintainers:
+ - Nicolas Ferre <nicolas.ferre@microchip.com>
+ - Claudiu Beznea <claudiu.beznea@microchip.com>
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - enum:
+ - cdns,at91rm9200-emac # Atmel at91rm9200 SoC
+ - const: cdns,emac # Generic
+
+ - items:
+ - enum:
+ - cdns,zynq-gem # Xilinx Zynq-7xxx SoC
+ - cdns,zynqmp-gem # Xilinx Zynq Ultrascale+ MPSoC
+ - const: cdns,gem # Generic
+
+ - items:
+ - enum:
+ - cdns,at91sam9260-macb # Atmel at91sam9 SoCs
+ - cdns,sam9x60-macb # Microchip sam9x60 SoC
+ - const: cdns,macb # Generic
+
+ - items:
+ - enum:
+ - atmel,sama5d3-macb # 10/100Mbit IP on Atmel sama5d3 SoCs
+ - enum:
+ - cdns,at91sam9260-macb # Atmel at91sam9 SoCs.
+ - const: cdns,macb # Generic
+
+ - enum:
+ - atmel,sama5d29-gem # GEM XL IP (10/100) on Atmel sama5d29 SoCs
+ - atmel,sama5d2-gem # GEM IP (10/100) on Atmel sama5d2 SoCs
+ - atmel,sama5d3-gem # Gigabit IP on Atmel sama5d3 SoCs
+ - atmel,sama5d4-gem # GEM IP (10/100) on Atmel sama5d4 SoCs
+ - cdns,at32ap7000-macb # Other 10/100 usage or use the generic form
+ - cdns,np4-macb # NP4 SoC devices
+ - microchip,sama7g5-emac # Microchip SAMA7G5 ethernet interface
+ - microchip,sama7g5-gem # Microchip SAMA7G5 gigabit ethernet interface
+ - sifive,fu540-c000-gem # SiFive FU540-C000 SoC
+ - cdns,emac # Generic
+ - cdns,gem # Generic
+ - cdns,macb # Generic
+
+ reg:
+ minItems: 1
+ items:
+ - description: Basic register set
+ - description: GEMGXL Management block registers on SiFive FU540-C000 SoC
+
+ interrupts:
+ minItems: 1
+ maxItems: 8
+ description: One interrupt per available hardware queue
+
+ clocks:
+ minItems: 1
+ maxItems: 5
+
+ clock-names:
+ minItems: 1
+ items:
+ - enum: [ ether_clk, hclk, pclk ]
+ - enum: [ hclk, pclk ]
+ - const: tx_clk
+ - enum: [ rx_clk, tsu_clk ]
+ - const: tsu_clk
+
+ local-mac-address: true
+
+ phy-mode: true
+
+ phy-handle: true
+
+ fixed-link: true
+
+ iommus:
+ maxItems: 1
+
+ power-domains:
+ maxItems: 1
+
+ '#address-cells':
+ const: 1
+
+ '#size-cells':
+ const: 0
+
+ mdio:
+ type: object
+ description:
+ Node containing PHY children. If this node is not present, then PHYs will
+ be direct children.
+
+patternProperties:
+ "^ethernet-phy@[0-9a-f]$":
+ type: object
+ $ref: ethernet-phy.yaml#
+
+ properties:
+ reset-gpios: true
+
+ magic-packet:
+ description:
+ Indicates that the hardware supports waking up via magic packet.
+
+ unevaluatedProperties: false
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+ - clock-names
+ - phy-mode
+
+allOf:
+ - $ref: ethernet-controller.yaml#
+
+ - if:
+ not:
+ properties:
+ compatible:
+ contains:
+ const: sifive,fu540-c000-gem
+ then:
+ properties:
+ reg:
+ maxItems: 1
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ macb0: ethernet@fffc4000 {
+ compatible = "cdns,at32ap7000-macb";
+ reg = <0xfffc4000 0x4000>;
+ interrupts = <21>;
+ phy-mode = "rmii";
+ local-mac-address = [3a 0e 03 04 05 06];
+ clock-names = "pclk", "hclk", "tx_clk";
+ clocks = <&clkc 30>, <&clkc 30>, <&clkc 13>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethernet-phy@1 {
+ reg = <0x1>;
+ reset-gpios = <&pioE 6 1>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/dsa/nxp,sja1105.yaml b/Documentation/devicetree/bindings/net/dsa/nxp,sja1105.yaml
index 24cd733c11d1..1ea0bd490473 100644
--- a/Documentation/devicetree/bindings/net/dsa/nxp,sja1105.yaml
+++ b/Documentation/devicetree/bindings/net/dsa/nxp,sja1105.yaml
@@ -52,10 +52,8 @@ properties:
patternProperties:
"^mdio@[0-1]$":
- type: object
-
- allOf:
- - $ref: "http://devicetree.org/schemas/net/mdio.yaml#"
+ $ref: /schemas/net/mdio.yaml#
+ unevaluatedProperties: false
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/net/dsa/qca8k.yaml b/Documentation/devicetree/bindings/net/dsa/qca8k.yaml
index 89c21b289447..f3c88371d76c 100644
--- a/Documentation/devicetree/bindings/net/dsa/qca8k.yaml
+++ b/Documentation/devicetree/bindings/net/dsa/qca8k.yaml
@@ -58,33 +58,14 @@ properties:
B68 on the QCA832x and B49 on the QCA833x.
mdio:
- type: object
+ $ref: /schemas/net/mdio.yaml#
+ unevaluatedProperties: false
description: Qca8k switch have an internal mdio to access switch port.
If this is not present, the legacy mapping is used and the
internal mdio access is used.
With the legacy mapping the reg corresponding to the internal
mdio is the switch reg with an offset of -1.
- properties:
- '#address-cells':
- const: 1
- '#size-cells':
- const: 0
-
- patternProperties:
- "^(ethernet-)?phy@[0-4]$":
- type: object
-
- allOf:
- - $ref: "http://devicetree.org/schemas/net/mdio.yaml#"
-
- properties:
- reg:
- maxItems: 1
-
- required:
- - reg
-
patternProperties:
"^(ethernet-)?ports$":
type: object
diff --git a/Documentation/devicetree/bindings/net/ethernet-controller.yaml b/Documentation/devicetree/bindings/net/ethernet-controller.yaml
index b0933a8c295a..34c5463abcec 100644
--- a/Documentation/devicetree/bindings/net/ethernet-controller.yaml
+++ b/Documentation/devicetree/bindings/net/ethernet-controller.yaml
@@ -17,9 +17,8 @@ properties:
description:
Specifies the MAC address that was assigned to the network device.
$ref: /schemas/types.yaml#/definitions/uint8-array
- items:
- - minItems: 6
- maxItems: 6
+ minItems: 6
+ maxItems: 6
mac-address:
description:
@@ -28,9 +27,8 @@ properties:
to the device by the boot program is different from the
local-mac-address property.
$ref: /schemas/types.yaml#/definitions/uint8-array
- items:
- - minItems: 6
- maxItems: 6
+ minItems: 6
+ maxItems: 6
max-frame-size:
$ref: /schemas/types.yaml#/definitions/uint32
@@ -69,6 +67,7 @@ properties:
- rev-mii
- rmii
- rev-rmii
+ - moca
# RX and TX delays are added by the MAC when required
- rgmii
@@ -163,33 +162,30 @@ properties:
type: array
then:
deprecated: true
- minItems: 1
- maxItems: 1
items:
- items:
- - minimum: 0
- maximum: 31
- description:
- Emulated PHY ID, choose any but unique to the all
- specified fixed-links
+ - minimum: 0
+ maximum: 31
+ description:
+ Emulated PHY ID, choose any but unique to the all
+ specified fixed-links
- - enum: [0, 1]
- description:
- Duplex configuration. 0 for half duplex or 1 for
- full duplex
+ - enum: [0, 1]
+ description:
+ Duplex configuration. 0 for half duplex or 1 for
+ full duplex
- - enum: [10, 100, 1000]
- description:
- Link speed in Mbits/sec.
+ - enum: [10, 100, 1000, 2500, 10000]
+ description:
+ Link speed in Mbits/sec.
- - enum: [0, 1]
- description:
- Pause configuration. 0 for no pause, 1 for pause
+ - enum: [0, 1]
+ description:
+ Pause configuration. 0 for no pause, 1 for pause
- - enum: [0, 1]
- description:
- Asymmetric pause configuration. 0 for no asymmetric
- pause, 1 for asymmetric pause
+ - enum: [0, 1]
+ description:
+ Asymmetric pause configuration. 0 for no asymmetric
+ pause, 1 for asymmetric pause
- if:
@@ -200,7 +196,7 @@ properties:
description:
Link speed.
$ref: /schemas/types.yaml#/definitions/uint32
- enum: [10, 100, 1000]
+ enum: [10, 100, 1000, 2500, 10000]
full-duplex:
$ref: /schemas/types.yaml#/definitions/flag
diff --git a/Documentation/devicetree/bindings/net/fsl,fec.yaml b/Documentation/devicetree/bindings/net/fsl,fec.yaml
index eca41443fcce..fd8371e31867 100644
--- a/Documentation/devicetree/bindings/net/fsl,fec.yaml
+++ b/Documentation/devicetree/bindings/net/fsl,fec.yaml
@@ -165,7 +165,8 @@ properties:
req_bit is the gpr bit offset for ENET stop request.
mdio:
- type: object
+ $ref: mdio.yaml#
+ unevaluatedProperties: false
description:
Specifies the mdio bus in the FEC, used as a container for phy nodes.
diff --git a/Documentation/devicetree/bindings/net/fsl-fman.txt b/Documentation/devicetree/bindings/net/fsl-fman.txt
index c00fb0d22c7b..020337f3c05f 100644
--- a/Documentation/devicetree/bindings/net/fsl-fman.txt
+++ b/Documentation/devicetree/bindings/net/fsl-fman.txt
@@ -410,6 +410,15 @@ PROPERTIES
The settings and programming routines for internal/external
MDIO are different. Must be included for internal MDIO.
+- fsl,erratum-a009885
+ Usage: optional
+ Value type: <boolean>
+ Definition: Indicates the presence of the A009885
+ erratum describing that the contents of MDIO_DATA may
+ become corrupt unless it is read within 16 MDC cycles
+ of MDIO_CFG[BSY] being cleared, when performing an
+ MDIO read operation.
+
- fsl,erratum-a011043
Usage: optional
Value type: <boolean>
diff --git a/Documentation/devicetree/bindings/net/intel,dwmac-plat.yaml b/Documentation/devicetree/bindings/net/intel,dwmac-plat.yaml
index 08a3f1f6aea2..52a7fa4f49a4 100644
--- a/Documentation/devicetree/bindings/net/intel,dwmac-plat.yaml
+++ b/Documentation/devicetree/bindings/net/intel,dwmac-plat.yaml
@@ -117,7 +117,7 @@ examples:
snps,mtl-tx-config = <&mtl_tx_setup>;
snps,tso;
- mdio0 {
+ mdio {
#address-cells = <1>;
#size-cells = <0>;
compatible = "snps,dwmac-mdio";
diff --git a/Documentation/devicetree/bindings/net/intel,ixp4xx-ethernet.yaml b/Documentation/devicetree/bindings/net/intel,ixp4xx-ethernet.yaml
index 378ed2d3b003..67eaf02dda80 100644
--- a/Documentation/devicetree/bindings/net/intel,ixp4xx-ethernet.yaml
+++ b/Documentation/devicetree/bindings/net/intel,ixp4xx-ethernet.yaml
@@ -48,8 +48,8 @@ properties:
and the instance to use in the second cell
mdio:
- type: object
- $ref: "mdio.yaml#"
+ $ref: mdio.yaml#
+ unevaluatedProperties: false
description: optional node for embedded MDIO controller
required:
diff --git a/Documentation/devicetree/bindings/net/lantiq,etop-xway.yaml b/Documentation/devicetree/bindings/net/lantiq,etop-xway.yaml
index 437502c5ca96..3ce9f9a16baf 100644
--- a/Documentation/devicetree/bindings/net/lantiq,etop-xway.yaml
+++ b/Documentation/devicetree/bindings/net/lantiq,etop-xway.yaml
@@ -46,7 +46,6 @@ properties:
required:
- compatible
- reg
- - interrupt-parent
- interrupts
- interrupt-names
- lantiq,tx-burst-length
diff --git a/Documentation/devicetree/bindings/net/lantiq,xrx200-net.yaml b/Documentation/devicetree/bindings/net/lantiq,xrx200-net.yaml
index 7bc074a42369..5bc1a21ca579 100644
--- a/Documentation/devicetree/bindings/net/lantiq,xrx200-net.yaml
+++ b/Documentation/devicetree/bindings/net/lantiq,xrx200-net.yaml
@@ -38,7 +38,6 @@ properties:
required:
- compatible
- reg
- - interrupt-parent
- interrupts
- interrupt-names
- "#address-cells"
diff --git a/Documentation/devicetree/bindings/net/litex,liteeth.yaml b/Documentation/devicetree/bindings/net/litex,liteeth.yaml
index 76c164a8199a..ebf4e360f8dd 100644
--- a/Documentation/devicetree/bindings/net/litex,liteeth.yaml
+++ b/Documentation/devicetree/bindings/net/litex,liteeth.yaml
@@ -62,6 +62,7 @@ properties:
mdio:
$ref: mdio.yaml#
+ unevaluatedProperties: false
required:
- compatible
diff --git a/Documentation/devicetree/bindings/net/macb.txt b/Documentation/devicetree/bindings/net/macb.txt
deleted file mode 100644
index a1b06fd1962e..000000000000
--- a/Documentation/devicetree/bindings/net/macb.txt
+++ /dev/null
@@ -1,60 +0,0 @@
-* Cadence MACB/GEM Ethernet controller
-
-Required properties:
-- compatible: Should be "cdns,[<chip>-]{macb|gem}"
- Use "cdns,at91rm9200-emac" Atmel at91rm9200 SoC.
- Use "cdns,at91sam9260-macb" for Atmel at91sam9 SoCs.
- Use "cdns,sam9x60-macb" for Microchip sam9x60 SoC.
- Use "cdns,np4-macb" for NP4 SoC devices.
- Use "cdns,at32ap7000-macb" for other 10/100 usage or use the generic form: "cdns,macb".
- Use "atmel,sama5d2-gem" for the GEM IP (10/100) available on Atmel sama5d2 SoCs.
- Use "atmel,sama5d29-gem" for GEM XL IP (10/100) available on Atmel sama5d29 SoCs.
- Use "atmel,sama5d3-macb" for the 10/100Mbit IP available on Atmel sama5d3 SoCs.
- Use "atmel,sama5d3-gem" for the Gigabit IP available on Atmel sama5d3 SoCs.
- Use "atmel,sama5d4-gem" for the GEM IP (10/100) available on Atmel sama5d4 SoCs.
- Use "cdns,zynq-gem" Xilinx Zynq-7xxx SoC.
- Use "cdns,zynqmp-gem" for Zynq Ultrascale+ MPSoC.
- Use "sifive,fu540-c000-gem" for SiFive FU540-C000 SoC.
- Use "microchip,sama7g5-emac" for Microchip SAMA7G5 ethernet interface.
- Use "microchip,sama7g5-gem" for Microchip SAMA7G5 gigabit ethernet interface.
- Or the generic form: "cdns,emac".
-- reg: Address and length of the register set for the device
- For "sifive,fu540-c000-gem", second range is required to specify the
- address and length of the registers for GEMGXL Management block.
-- interrupts: Should contain macb interrupt
-- phy-mode: See ethernet.txt file in the same directory.
-- clock-names: Tuple listing input clock names.
- Required elements: 'pclk', 'hclk'
- Optional elements: 'tx_clk'
- Optional elements: 'rx_clk' applies to cdns,zynqmp-gem
- Optional elements: 'tsu_clk'
-- clocks: Phandles to input clocks.
-
-Optional properties:
-- mdio: node containing PHY children. If this node is not present, then PHYs
- will be direct children.
-
-The MAC address will be determined using the optional properties
-defined in ethernet.txt.
-
-Optional properties for PHY child node:
-- reset-gpios : Should specify the gpio for phy reset
-- magic-packet : If present, indicates that the hardware supports waking
- up via magic packet.
-- phy-handle : see ethernet.txt file in the same directory
-
-Examples:
-
- macb0: ethernet@fffc4000 {
- compatible = "cdns,at32ap7000-macb";
- reg = <0xfffc4000 0x4000>;
- interrupts = <21>;
- phy-mode = "rmii";
- local-mac-address = [3a 0e 03 04 05 06];
- clock-names = "pclk", "hclk", "tx_clk";
- clocks = <&clkc 30>, <&clkc 30>, <&clkc 13>;
- ethernet-phy@1 {
- reg = <0x1>;
- reset-gpios = <&pioE 6 1>;
- };
- };
diff --git a/Documentation/devicetree/bindings/net/mdio-mux.yaml b/Documentation/devicetree/bindings/net/mdio-mux.yaml
index d169adf5d9f4..4321c87de86f 100644
--- a/Documentation/devicetree/bindings/net/mdio-mux.yaml
+++ b/Documentation/devicetree/bindings/net/mdio-mux.yaml
@@ -15,9 +15,6 @@ description: |+
bus multiplexer/switch will have one child node for each child bus.
properties:
- $nodename:
- pattern: '^mdio-mux[\-@]?'
-
mdio-parent-bus:
$ref: /schemas/types.yaml#/definitions/phandle
description:
@@ -32,12 +29,12 @@ properties:
patternProperties:
'^mdio@[0-9a-f]+$':
- type: object
+ $ref: mdio.yaml#
+ unevaluatedProperties: false
properties:
reg:
maxItems: 1
- description: The sub-bus number.
additionalProperties: true
diff --git a/Documentation/devicetree/bindings/net/mdio.yaml b/Documentation/devicetree/bindings/net/mdio.yaml
index 08e15fb1584f..b5706d4e7e38 100644
--- a/Documentation/devicetree/bindings/net/mdio.yaml
+++ b/Documentation/devicetree/bindings/net/mdio.yaml
@@ -59,7 +59,7 @@ properties:
type: boolean
patternProperties:
- "^ethernet-phy@[0-9a-f]+$":
+ '@[0-9a-f]+$':
type: object
properties:
@@ -76,12 +76,6 @@ patternProperties:
the turn around line low at end of the control phase of the
MDIO transaction.
- resets:
- maxItems: 1
-
- reset-names:
- const: phy
-
reset-gpios:
maxItems: 1
description:
diff --git a/Documentation/devicetree/bindings/net/mediatek,star-emac.yaml b/Documentation/devicetree/bindings/net/mediatek,star-emac.yaml
index e6a5ff208253..def994c9cbb4 100644
--- a/Documentation/devicetree/bindings/net/mediatek,star-emac.yaml
+++ b/Documentation/devicetree/bindings/net/mediatek,star-emac.yaml
@@ -48,9 +48,8 @@ properties:
to control the MII mode.
mdio:
- type: object
- description:
- Creates and registers an MDIO bus.
+ $ref: mdio.yaml#
+ unevaluatedProperties: false
required:
- compatible
diff --git a/Documentation/devicetree/bindings/net/oxnas-dwmac.txt b/Documentation/devicetree/bindings/net/oxnas-dwmac.txt
index d7117a22fd87..27db496f1ce8 100644
--- a/Documentation/devicetree/bindings/net/oxnas-dwmac.txt
+++ b/Documentation/devicetree/bindings/net/oxnas-dwmac.txt
@@ -9,6 +9,9 @@ Required properties on all platforms:
- compatible: For the OX820 SoC, it should be :
- "oxsemi,ox820-dwmac" to select glue
- "snps,dwmac-3.512" to select IP version.
+ For the OX810SE SoC, it should be :
+ - "oxsemi,ox810se-dwmac" to select glue
+ - "snps,dwmac-3.512" to select IP version.
- clocks: Should contain phandles to the following clocks
- clock-names: Should contain the following:
diff --git a/Documentation/devicetree/bindings/net/qca,ar71xx.yaml b/Documentation/devicetree/bindings/net/qca,ar71xx.yaml
index cf4d35edaa1b..1ebf9e8c8a1d 100644
--- a/Documentation/devicetree/bindings/net/qca,ar71xx.yaml
+++ b/Documentation/devicetree/bindings/net/qca,ar71xx.yaml
@@ -34,14 +34,6 @@ properties:
interrupts:
maxItems: 1
- '#address-cells':
- description: number of address cells for the MDIO bus
- const: 1
-
- '#size-cells':
- description: number of size cells on the MDIO bus
- const: 0
-
clocks:
items:
- description: MAC main clock
@@ -62,6 +54,10 @@ properties:
- const: mac
- const: mdio
+ mdio:
+ $ref: mdio.yaml#
+ unevaluatedProperties: false
+
required:
- compatible
- reg
@@ -85,7 +81,6 @@ examples:
reset-names = "mac", "mdio";
clocks = <&pll 1>, <&pll 2>;
clock-names = "eth", "mdio";
- qca,ethcfg = <&ethcfg>;
phy-mode = "mii";
phy-handle = <&phy_port4>;
};
@@ -111,9 +106,6 @@ examples:
#size-cells = <0>;
switch10: switch@10 {
- #address-cells = <1>;
- #size-cells = <0>;
-
compatible = "qca,ar9331-switch";
reg = <0x10>;
resets = <&rst 8>;
diff --git a/Documentation/devicetree/bindings/net/qcom,ipa.yaml b/Documentation/devicetree/bindings/net/qcom,ipa.yaml
index b86edf67ce62..58ecc62adfaa 100644
--- a/Documentation/devicetree/bindings/net/qcom,ipa.yaml
+++ b/Documentation/devicetree/bindings/net/qcom,ipa.yaml
@@ -107,6 +107,10 @@ properties:
- const: imem
- const: config
+ qcom,qmp:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description: phandle to the AOSS side-channel message RAM
+
qcom,smem-states:
$ref: /schemas/types.yaml#/definitions/phandle-array
description: State bits used in by the AP to signal the modem.
@@ -222,6 +226,8 @@ examples:
"imem",
"config";
+ qcom,qmp = <&aoss_qmp>;
+
qcom,smem-states = <&ipa_smp2p_out 0>,
<&ipa_smp2p_out 1>;
qcom,smem-state-names = "ipa-clock-enabled-valid",
diff --git a/Documentation/devicetree/bindings/net/snps,dwmac.yaml b/Documentation/devicetree/bindings/net/snps,dwmac.yaml
index 7ae70dc27f78..7eb43707e601 100644
--- a/Documentation/devicetree/bindings/net/snps,dwmac.yaml
+++ b/Documentation/devicetree/bindings/net/snps,dwmac.yaml
@@ -286,7 +286,8 @@ properties:
MAC2MAC connection.
mdio:
- type: object
+ $ref: mdio.yaml#
+ unevaluatedProperties: false
description:
Creates and registers an MDIO bus.
@@ -326,6 +327,9 @@ allOf:
- ingenic,x1600-mac
- ingenic,x1830-mac
- ingenic,x2000-mac
+ - snps,dwmac-3.50a
+ - snps,dwmac-4.10a
+ - snps,dwmac-4.20a
- snps,dwxgmac
- snps,dwxgmac-2.10
- st,spear600-gmac
diff --git a/Documentation/devicetree/bindings/net/socionext,uniphier-ave4.yaml b/Documentation/devicetree/bindings/net/socionext,uniphier-ave4.yaml
index 6bc61c42418f..aad5a9f3f962 100644
--- a/Documentation/devicetree/bindings/net/socionext,uniphier-ave4.yaml
+++ b/Documentation/devicetree/bindings/net/socionext,uniphier-ave4.yaml
@@ -72,6 +72,7 @@ properties:
mdio:
$ref: mdio.yaml#
+ unevaluatedProperties: false
required:
- compatible
diff --git a/Documentation/devicetree/bindings/net/stm32-dwmac.yaml b/Documentation/devicetree/bindings/net/stm32-dwmac.yaml
index 577f4e284425..3d8a3b763ae6 100644
--- a/Documentation/devicetree/bindings/net/stm32-dwmac.yaml
+++ b/Documentation/devicetree/bindings/net/stm32-dwmac.yaml
@@ -44,6 +44,12 @@ properties:
- st,stm32-dwmac
- const: snps,dwmac-3.50a
+ reg: true
+
+ reg-names:
+ items:
+ - const: stmmaceth
+
clocks:
minItems: 3
items:
@@ -102,7 +108,7 @@ examples:
compatible = "st,stm32mp1-dwmac", "snps,dwmac-4.20a";
reg = <0x5800a000 0x2000>;
reg-names = "stmmaceth";
- interrupts = <&intc GIC_SPI 61 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 61 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "macirq";
clock-names = "stmmaceth",
"mac-clk-tx",
@@ -121,6 +127,7 @@ examples:
phy-mode = "rgmii";
};
+ - |
//Example 2 (MCU example)
ethernet1: ethernet@40028000 {
compatible = "st,stm32-dwmac", "snps,dwmac-3.50a";
@@ -136,6 +143,7 @@ examples:
phy-mode = "mii";
};
+ - |
//Example 3
ethernet2: ethernet@40027000 {
compatible = "st,stm32-dwmac", "snps,dwmac-4.10a";
diff --git a/Documentation/devicetree/bindings/net/ti,davinci-mdio.yaml b/Documentation/devicetree/bindings/net/ti,davinci-mdio.yaml
index 5728fe23f530..dbfca5ee9139 100644
--- a/Documentation/devicetree/bindings/net/ti,davinci-mdio.yaml
+++ b/Documentation/devicetree/bindings/net/ti,davinci-mdio.yaml
@@ -37,6 +37,13 @@ properties:
maximum: 2500000
description: MDIO Bus frequency
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ items:
+ - const: fck
+
ti,hwmods:
description: TI hwmod name
deprecated: true
diff --git a/Documentation/devicetree/bindings/net/ti,dp83869.yaml b/Documentation/devicetree/bindings/net/ti,dp83869.yaml
index 70a1209cb13b..1b780dce61ab 100644
--- a/Documentation/devicetree/bindings/net/ti,dp83869.yaml
+++ b/Documentation/devicetree/bindings/net/ti,dp83869.yaml
@@ -92,7 +92,7 @@ examples:
tx-fifo-depth = <DP83869_PHYCR_FIFO_DEPTH_4_B_NIB>;
rx-fifo-depth = <DP83869_PHYCR_FIFO_DEPTH_4_B_NIB>;
ti,op-mode = <DP83869_RGMII_COPPER_ETHERNET>;
- ti,max-output-impedance = "true";
+ ti,max-output-impedance;
ti,clk-output-sel = <DP83869_CLK_O_SEL_CHN_A_RCLK>;
rx-internal-delay-ps = <2000>;
tx-internal-delay-ps = <2000>;
diff --git a/Documentation/devicetree/bindings/net/toshiba,visconti-dwmac.yaml b/Documentation/devicetree/bindings/net/toshiba,visconti-dwmac.yaml
index 59724d18e6f3..b12bfe61c67a 100644
--- a/Documentation/devicetree/bindings/net/toshiba,visconti-dwmac.yaml
+++ b/Documentation/devicetree/bindings/net/toshiba,visconti-dwmac.yaml
@@ -71,7 +71,7 @@ examples:
phy-mode = "rgmii-id";
phy-handle = <&phy0>;
- mdio0 {
+ mdio {
#address-cells = <0x1>;
#size-cells = <0x0>;
compatible = "snps,dwmac-mdio";
diff --git a/Documentation/devicetree/bindings/net/wireless/mediatek,mt76.yaml b/Documentation/devicetree/bindings/net/wireless/mediatek,mt76.yaml
index 1489d3c1cd6e..269cd63fb544 100644
--- a/Documentation/devicetree/bindings/net/wireless/mediatek,mt76.yaml
+++ b/Documentation/devicetree/bindings/net/wireless/mediatek,mt76.yaml
@@ -191,9 +191,9 @@ examples:
channels = <36 48>;
rates-ofdm = /bits/ 8 <23 23 23 23 23 23 23 23>;
rates-mcs = /bits/ 8 <1 23 23 23 23 23 23 23 23 23 23>,
- <3 22 22 22 22 22 22 22 22 22 22>;
+ /bits/ 8 <3 22 22 22 22 22 22 22 22 22 22>;
rates-ru = /bits/ 8 <3 22 22 22 22 22 22 22 22 22 22 22 22>,
- <4 20 20 20 20 20 20 20 20 20 20 20 20>;
+ /bits/ 8 <4 20 20 20 20 20 20 20 20 20 20 20 20>;
};
b1 {
channels = <100 181>;
diff --git a/Documentation/devicetree/bindings/nvmem/brcm,nvram.yaml b/Documentation/devicetree/bindings/nvmem/brcm,nvram.yaml
index 58ff6b0bdb1a..8c3f0cd22821 100644
--- a/Documentation/devicetree/bindings/nvmem/brcm,nvram.yaml
+++ b/Documentation/devicetree/bindings/nvmem/brcm,nvram.yaml
@@ -24,6 +24,9 @@ properties:
compatible:
const: brcm,nvram
+ reg:
+ maxItems: 1
+
unevaluatedProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/nvmem/mtk-efuse.txt b/Documentation/devicetree/bindings/nvmem/mtk-efuse.txt
index b6791702bcfc..39d529599444 100644
--- a/Documentation/devicetree/bindings/nvmem/mtk-efuse.txt
+++ b/Documentation/devicetree/bindings/nvmem/mtk-efuse.txt
@@ -8,8 +8,10 @@ Required properties:
"mediatek,mt7623-efuse", "mediatek,efuse": for MT7623
"mediatek,mt8173-efuse" or "mediatek,efuse": for MT8173
"mediatek,mt8192-efuse", "mediatek,efuse": for MT8192
+ "mediatek,mt8195-efuse", "mediatek,efuse": for MT8195
"mediatek,mt8516-efuse", "mediatek,efuse": for MT8516
- reg: Should contain registers location and length
+- bits: contain the bits range by offset and size
= Data cells =
Are child nodes of MTK-EFUSE, bindings of which as described in
diff --git a/Documentation/devicetree/bindings/nvmem/nvmem.yaml b/Documentation/devicetree/bindings/nvmem/nvmem.yaml
index 456fb808100a..43ed7e32e5ac 100644
--- a/Documentation/devicetree/bindings/nvmem/nvmem.yaml
+++ b/Documentation/devicetree/bindings/nvmem/nvmem.yaml
@@ -50,16 +50,15 @@ patternProperties:
Offset and size in bytes within the storage device.
bits:
- maxItems: 1
+ $ref: /schemas/types.yaml#/definitions/uint32-array
items:
- items:
- - minimum: 0
- maximum: 7
- description:
- Offset in bit within the address range specified by reg.
- - minimum: 1
- description:
- Size in bit within the address range specified by reg.
+ - minimum: 0
+ maximum: 7
+ description:
+ Offset in bit within the address range specified by reg.
+ - minimum: 1
+ description:
+ Size in bit within the address range specified by reg.
required:
- reg
diff --git a/Documentation/devicetree/bindings/nvmem/rmem.yaml b/Documentation/devicetree/bindings/nvmem/rmem.yaml
index 1d85a0a30846..a4a755dcfc43 100644
--- a/Documentation/devicetree/bindings/nvmem/rmem.yaml
+++ b/Documentation/devicetree/bindings/nvmem/rmem.yaml
@@ -19,6 +19,9 @@ properties:
- raspberrypi,bootloader-config
- const: nvmem-rmem
+ reg:
+ maxItems: 1
+
no-map:
$ref: /schemas/types.yaml#/definitions/flag
description:
diff --git a/Documentation/devicetree/bindings/nvmem/st,stm32-romem.yaml b/Documentation/devicetree/bindings/nvmem/st,stm32-romem.yaml
index a48c8fa56bce..448a2678dc62 100644
--- a/Documentation/devicetree/bindings/nvmem/st,stm32-romem.yaml
+++ b/Documentation/devicetree/bindings/nvmem/st,stm32-romem.yaml
@@ -24,6 +24,9 @@ properties:
- st,stm32f4-otp
- st,stm32mp15-bsec
+ reg:
+ maxItems: 1
+
patternProperties:
"^.*@[0-9a-f]+$":
type: object
diff --git a/Documentation/devicetree/bindings/pci/brcm,iproc-pcie.txt b/Documentation/devicetree/bindings/pci/brcm,iproc-pcie.txt
deleted file mode 100644
index df065aa53a83..000000000000
--- a/Documentation/devicetree/bindings/pci/brcm,iproc-pcie.txt
+++ /dev/null
@@ -1,133 +0,0 @@
-* Broadcom iProc PCIe controller with the platform bus interface
-
-Required properties:
-- compatible:
- "brcm,iproc-pcie" for the first generation of PAXB based controller,
-used in SoCs including NSP, Cygnus, NS2, and Pegasus
- "brcm,iproc-pcie-paxb-v2" for the second generation of PAXB-based
-controllers, used in Stingray
- "brcm,iproc-pcie-paxc" for the first generation of PAXC based
-controller, used in NS2
- "brcm,iproc-pcie-paxc-v2" for the second generation of PAXC based
-controller, used in Stingray
- PAXB-based root complex is used for external endpoint devices. PAXC-based
-root complex is connected to emulated endpoint devices internal to the ASIC
-- reg: base address and length of the PCIe controller I/O register space
-- #interrupt-cells: set to <1>
-- interrupt-map-mask and interrupt-map, standard PCI properties to define the
- mapping of the PCIe interface to interrupt numbers
-- linux,pci-domain: PCI domain ID. Should be unique for each host controller
-- bus-range: PCI bus numbers covered
-- #address-cells: set to <3>
-- #size-cells: set to <2>
-- device_type: set to "pci"
-- ranges: ranges for the PCI memory and I/O regions
-
-Optional properties:
-- phys: phandle of the PCIe PHY device
-- phy-names: must be "pcie-phy"
-- dma-coherent: present if DMA operations are coherent
-- dma-ranges: Some PAXB-based root complexes do not have inbound mapping done
- by the ASIC after power on reset. In this case, SW is required to configure
-the mapping, based on inbound memory regions specified by this property.
-
-- brcm,pcie-ob: Some iProc SoCs do not have the outbound address mapping done
-by the ASIC after power on reset. In this case, SW needs to configure it
-
-If the brcm,pcie-ob property is present, the following properties become
-effective:
-
-Required:
-- brcm,pcie-ob-axi-offset: The offset from the AXI address to the internal
-address used by the iProc PCIe core (not the PCIe address)
-
-MSI support (optional):
-
-For older platforms without MSI integrated in the GIC, iProc PCIe core provides
-an event queue based MSI support. The iProc MSI uses host memories to store
-MSI posted writes in the event queues
-
-On newer iProc platforms, gicv2m or gicv3-its based MSI support should be used
-
-- msi-map: Maps a Requester ID to an MSI controller and associated MSI
-sideband data
-
-- msi-parent: Link to the device node of the MSI controller, used when no MSI
-sideband data is passed between the iProc PCIe controller and the MSI
-controller
-
-Refer to the following binding documents for more detailed description on
-the use of 'msi-map' and 'msi-parent':
- Documentation/devicetree/bindings/pci/pci-msi.txt
- Documentation/devicetree/bindings/interrupt-controller/msi.txt
-
-When the iProc event queue based MSI is used, one needs to define the
-following properties in the MSI device node:
-- compatible: Must be "brcm,iproc-msi"
-- msi-controller: claims itself as an MSI controller
-- interrupts: List of interrupt IDs from its parent interrupt device
-
-Optional properties:
-- brcm,pcie-msi-inten: Needs to be present for some older iProc platforms that
-require the interrupt enable registers to be set explicitly to enable MSI
-
-Example:
- pcie0: pcie@18012000 {
- compatible = "brcm,iproc-pcie";
- reg = <0x18012000 0x1000>;
-
- #interrupt-cells = <1>;
- interrupt-map-mask = <0 0 0 0>;
- interrupt-map = <0 0 0 0 &gic GIC_SPI 100 IRQ_TYPE_NONE>;
-
- linux,pci-domain = <0>;
-
- bus-range = <0x00 0xff>;
-
- #address-cells = <3>;
- #size-cells = <2>;
- device_type = "pci";
- ranges = <0x81000000 0 0 0x28000000 0 0x00010000
- 0x82000000 0 0x20000000 0x20000000 0 0x04000000>;
-
- phys = <&phy 0 5>;
- phy-names = "pcie-phy";
-
- brcm,pcie-ob;
- brcm,pcie-ob-axi-offset = <0x00000000>;
-
- msi-parent = <&msi0>;
-
- /* iProc event queue based MSI */
- msi0: msi@18012000 {
- compatible = "brcm,iproc-msi";
- msi-controller;
- interrupt-parent = <&gic>;
- interrupts = <GIC_SPI 96 IRQ_TYPE_NONE>,
- <GIC_SPI 97 IRQ_TYPE_NONE>,
- <GIC_SPI 98 IRQ_TYPE_NONE>,
- <GIC_SPI 99 IRQ_TYPE_NONE>,
- };
- };
-
- pcie1: pcie@18013000 {
- compatible = "brcm,iproc-pcie";
- reg = <0x18013000 0x1000>;
-
- #interrupt-cells = <1>;
- interrupt-map-mask = <0 0 0 0>;
- interrupt-map = <0 0 0 0 &gic GIC_SPI 106 IRQ_TYPE_NONE>;
-
- linux,pci-domain = <1>;
-
- bus-range = <0x00 0xff>;
-
- #address-cells = <3>;
- #size-cells = <2>;
- device_type = "pci";
- ranges = <0x81000000 0 0 0x48000000 0 0x00010000
- 0x82000000 0 0x40000000 0x40000000 0 0x04000000>;
-
- phys = <&phy 1 6>;
- phy-names = "pcie-phy";
- };
diff --git a/Documentation/devicetree/bindings/pci/brcm,iproc-pcie.yaml b/Documentation/devicetree/bindings/pci/brcm,iproc-pcie.yaml
new file mode 100644
index 000000000000..0972868735fc
--- /dev/null
+++ b/Documentation/devicetree/bindings/pci/brcm,iproc-pcie.yaml
@@ -0,0 +1,184 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pci/brcm,iproc-pcie.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Broadcom iProc PCIe controller with the platform bus interface
+
+maintainers:
+ - Ray Jui <ray.jui@broadcom.com>
+ - Scott Branden <scott.branden@broadcom.com>
+
+allOf:
+ - $ref: /schemas/pci/pci-bus.yaml#
+ - $ref: /schemas/interrupt-controller/msi-controller.yaml#
+
+properties:
+ compatible:
+ items:
+ - enum:
+ # for the first generation of PAXB based controller, used in SoCs
+ # including NSP, Cygnus, NS2, and Pegasus
+ - brcm,iproc-pcie
+ # for the second generation of PAXB-based controllers, used in
+ # Stingray
+ - brcm,iproc-pcie-paxb-v2
+ # For the first generation of PAXC based controller, used in NS2
+ - brcm,iproc-pcie-paxc
+ # For the second generation of PAXC based controller, used in Stingray
+ - brcm,iproc-pcie-paxc-v2
+
+ reg:
+ maxItems: 1
+ description: >
+ Base address and length of the PCIe controller I/O register space
+
+ interrupt-map: true
+
+ interrupt-map-mask: true
+
+ "#interrupt-cells":
+ const: 1
+
+ ranges:
+ minItems: 1
+ maxItems: 2
+ description: >
+ Ranges for the PCI memory and I/O regions
+
+ phys:
+ maxItems: 1
+
+ phy-names:
+ items:
+ - const: pcie-phy
+
+ bus-range: true
+
+ dma-coherent: true
+
+ "#address-cells": true
+
+ "#size-cells": true
+
+ device_type: true
+
+ brcm,pcie-ob:
+ type: boolean
+ description: >
+ Some iProc SoCs do not have the outbound address mapping done by the
+ ASIC after power on reset. In this case, SW needs to configure it
+
+ brcm,pcie-ob-axi-offset:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: >
+ The offset from the AXI address to the internal address used by the
+ iProc PCIe core (not the PCIe address)
+
+ msi:
+ type: object
+ properties:
+ compatible:
+ items:
+ - const: brcm,iproc-msi
+
+ msi-parent: true
+
+ msi-controller: true
+
+ brcm,pcie-msi-inten:
+ type: boolean
+ description: >
+ Needs to be present for some older iProc platforms that require the
+ interrupt enable registers to be set explicitly to enable MSI
+
+dependencies:
+ brcm,pcie-ob-axi-offset: ["brcm,pcie-ob"]
+ brcm,pcie-msi-inten: [msi-controller]
+
+required:
+ - compatible
+ - reg
+ - ranges
+
+if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - brcm,iproc-pcie
+then:
+ required:
+ - interrupt-map
+ - interrupt-map-mask
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ bus {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ pcie0: pcie@18012000 {
+ compatible = "brcm,iproc-pcie";
+ reg = <0x18012000 0x1000>;
+
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0>;
+ interrupt-map = <0 0 0 0 &gic GIC_SPI 100 IRQ_TYPE_NONE>;
+
+ linux,pci-domain = <0>;
+
+ bus-range = <0x00 0xff>;
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ device_type = "pci";
+ ranges = <0x81000000 0 0 0x28000000 0 0x00010000>,
+ <0x82000000 0 0x20000000 0x20000000 0 0x04000000>;
+
+ phys = <&phy 0 5>;
+ phy-names = "pcie-phy";
+
+ brcm,pcie-ob;
+ brcm,pcie-ob-axi-offset = <0x00000000>;
+
+ msi-parent = <&msi0>;
+
+ /* iProc event queue based MSI */
+ msi0: msi {
+ compatible = "brcm,iproc-msi";
+ msi-controller;
+ interrupt-parent = <&gic>;
+ interrupts = <GIC_SPI 96 IRQ_TYPE_NONE>,
+ <GIC_SPI 97 IRQ_TYPE_NONE>,
+ <GIC_SPI 98 IRQ_TYPE_NONE>,
+ <GIC_SPI 99 IRQ_TYPE_NONE>;
+ };
+ };
+
+ pcie1: pcie@18013000 {
+ compatible = "brcm,iproc-pcie";
+ reg = <0x18013000 0x1000>;
+
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 0 0>;
+ interrupt-map = <0 0 0 0 &gic GIC_SPI 106 IRQ_TYPE_NONE>;
+
+ linux,pci-domain = <1>;
+
+ bus-range = <0x00 0xff>;
+
+ #address-cells = <3>;
+ #size-cells = <2>;
+ device_type = "pci";
+ ranges = <0x81000000 0 0 0x48000000 0 0x00010000>,
+ <0x82000000 0 0x40000000 0x40000000 0 0x04000000>;
+
+ phys = <&phy 1 6>;
+ phy-names = "pcie-phy";
+ };
+ };
diff --git a/Documentation/devicetree/bindings/pci/brcm,stb-pcie.yaml b/Documentation/devicetree/bindings/pci/brcm,stb-pcie.yaml
index 1fe102743f82..0f064e4222f3 100644
--- a/Documentation/devicetree/bindings/pci/brcm,stb-pcie.yaml
+++ b/Documentation/devicetree/bindings/pci/brcm,stb-pcie.yaml
@@ -19,6 +19,8 @@ properties:
- brcm,bcm7278-pcie # Broadcom 7278 Arm
- brcm,bcm7216-pcie # Broadcom 7216 Arm
- brcm,bcm7445-pcie # Broadcom 7445 Arm
+ - brcm,bcm7425-pcie # Broadcom 7425 MIPs
+ - brcm,bcm7435-pcie # Broadcom 7435 MIPs
reg:
maxItems: 1
@@ -76,6 +78,7 @@ properties:
maxItems: 3
required:
+ - compatible
- reg
- ranges
- dma-ranges
@@ -143,11 +146,15 @@ examples:
#address-cells = <3>;
#size-cells = <2>;
#interrupt-cells = <1>;
- interrupts = <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>,
+ interrupts = <GIC_SPI 147 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "pcie", "msi";
interrupt-map-mask = <0x0 0x0 0x0 0x7>;
- interrupt-map = <0 0 0 1 &gicv2 GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-map = <0 0 0 1 &gicv2 GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH
+ 0 0 0 2 &gicv2 GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH
+ 0 0 0 3 &gicv2 GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH
+ 0 0 0 4 &gicv2 GIC_SPI 146 IRQ_TYPE_LEVEL_HIGH>;
+
msi-parent = <&pcie0>;
msi-controller;
ranges = <0x02000000 0x0 0xf8000000 0x6 0x00000000 0x0 0x04000000>;
@@ -155,5 +162,24 @@ examples:
<0x42000000 0x1 0x80000000 0x3 0x00000000 0x0 0x80000000>;
brcm,enable-ssc;
brcm,scb-sizes = <0x0000000080000000 0x0000000080000000>;
+
+ /* PCIe bridge, Root Port */
+ pci@0,0 {
+ #address-cells = <3>;
+ #size-cells = <2>;
+ reg = <0x0 0x0 0x0 0x0 0x0>;
+ compatible = "pciclass,0604";
+ device_type = "pci";
+ vpcie3v3-supply = <&vreg7>;
+ ranges;
+
+ /* PCIe endpoint */
+ pci-ep@0,0 {
+ assigned-addresses =
+ <0x82010000 0x0 0xf8000000 0x6 0x00000000 0x0 0x2000>;
+ reg = <0x0 0x0 0x0 0x0 0x0>;
+ compatible = "pci14e4,1688";
+ };
+ };
};
};
diff --git a/Documentation/devicetree/bindings/pci/cdns,cdns-pcie-ep.yaml b/Documentation/devicetree/bindings/pci/cdns,cdns-pcie-ep.yaml
index 651eee88989d..e6ef1012a580 100644
--- a/Documentation/devicetree/bindings/pci/cdns,cdns-pcie-ep.yaml
+++ b/Documentation/devicetree/bindings/pci/cdns,cdns-pcie-ep.yaml
@@ -11,7 +11,6 @@ maintainers:
allOf:
- $ref: "cdns-pcie-ep.yaml#"
- - $ref: "pci-ep.yaml#"
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/pci/cdns-pcie-ep.yaml b/Documentation/devicetree/bindings/pci/cdns-pcie-ep.yaml
index 21e8a8849076..baeafda36ebe 100644
--- a/Documentation/devicetree/bindings/pci/cdns-pcie-ep.yaml
+++ b/Documentation/devicetree/bindings/pci/cdns-pcie-ep.yaml
@@ -11,6 +11,7 @@ maintainers:
allOf:
- $ref: "cdns-pcie.yaml#"
+ - $ref: "pci-ep.yaml#"
properties:
cdns,max-outbound-regions:
diff --git a/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.yaml b/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.yaml
index acea1cd444fd..643a6333b07b 100644
--- a/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.yaml
+++ b/Documentation/devicetree/bindings/pci/fsl,imx6q-pcie.yaml
@@ -127,6 +127,12 @@ properties:
enum: [1, 2, 3, 4]
default: 1
+ phys:
+ maxItems: 1
+
+ phy-names:
+ const: pcie-phy
+
reset-gpio:
description: Should specify the GPIO for controlling the PCI bus device
reset signal. It's not polarity aware and defaults to active-low reset
diff --git a/Documentation/devicetree/bindings/pci/hisilicon,kirin-pcie.yaml b/Documentation/devicetree/bindings/pci/hisilicon,kirin-pcie.yaml
index cbee87802559..c9f04999c9cf 100644
--- a/Documentation/devicetree/bindings/pci/hisilicon,kirin-pcie.yaml
+++ b/Documentation/devicetree/bindings/pci/hisilicon,kirin-pcie.yaml
@@ -37,6 +37,19 @@ properties:
minItems: 3
maxItems: 4
+ clocks: true
+
+ clock-names:
+ items:
+ - const: pcie_phy_ref
+ - const: pcie_aux
+ - const: pcie_apb_phy
+ - const: pcie_apb_sys
+ - const: pcie_aclk
+
+ phys:
+ maxItems: 1
+
hisilicon,clken-gpios:
description: |
Clock input enablement GPIOs from PCI devices like Ethernet, M.2 and
diff --git a/Documentation/devicetree/bindings/pci/mediatek,mt7621-pcie.yaml b/Documentation/devicetree/bindings/pci/mediatek,mt7621-pcie.yaml
index 044fa967bc8b..d60f43fd9c5a 100644
--- a/Documentation/devicetree/bindings/pci/mediatek,mt7621-pcie.yaml
+++ b/Documentation/devicetree/bindings/pci/mediatek,mt7621-pcie.yaml
@@ -45,6 +45,9 @@ patternProperties:
phys:
maxItems: 1
+ phy-names:
+ pattern: '^pcie-phy[0-2]$'
+
required:
- "#interrupt-cells"
- interrupt-map-mask
diff --git a/Documentation/devicetree/bindings/pci/mediatek-pcie-gen3.yaml b/Documentation/devicetree/bindings/pci/mediatek-pcie-gen3.yaml
index 742206dbd965..0499b94627ae 100644
--- a/Documentation/devicetree/bindings/pci/mediatek-pcie-gen3.yaml
+++ b/Documentation/devicetree/bindings/pci/mediatek-pcie-gen3.yaml
@@ -95,6 +95,10 @@ properties:
phys:
maxItems: 1
+ phy-names:
+ items:
+ - const: pcie-phy
+
'#interrupt-cells':
const: 1
diff --git a/Documentation/devicetree/bindings/pci/microchip,pcie-host.yaml b/Documentation/devicetree/bindings/pci/microchip,pcie-host.yaml
index 7b0776457178..edb4f81253c8 100644
--- a/Documentation/devicetree/bindings/pci/microchip,pcie-host.yaml
+++ b/Documentation/devicetree/bindings/pci/microchip,pcie-host.yaml
@@ -46,6 +46,24 @@ properties:
msi-parent:
description: MSI controller the device is capable of using.
+ interrupt-controller:
+ type: object
+ properties:
+ '#address-cells':
+ const: 0
+
+ '#interrupt-cells':
+ const: 1
+
+ interrupt-controller: true
+
+ required:
+ - '#address-cells'
+ - '#interrupt-cells'
+ - interrupt-controller
+
+ additionalProperties: false
+
required:
- reg
- reg-names
diff --git a/Documentation/devicetree/bindings/pci/sifive,fu740-pcie.yaml b/Documentation/devicetree/bindings/pci/sifive,fu740-pcie.yaml
index 2b9d1d6fc661..392f0ab488c2 100644
--- a/Documentation/devicetree/bindings/pci/sifive,fu740-pcie.yaml
+++ b/Documentation/devicetree/bindings/pci/sifive,fu740-pcie.yaml
@@ -32,6 +32,8 @@ properties:
- const: config
- const: mgmt
+ dma-coherent: true
+
num-lanes:
const: 8
@@ -61,10 +63,8 @@ required:
- num-lanes
- interrupts
- interrupt-names
- - interrupt-parent
- interrupt-map-mask
- interrupt-map
- - clock-names
- clocks
- resets
- pwren-gpios
@@ -104,7 +104,6 @@ examples:
<0x0 0x0 0x0 0x2 &plic0 58>,
<0x0 0x0 0x0 0x3 &plic0 59>,
<0x0 0x0 0x0 0x4 &plic0 60>;
- clock-names = "pcie_aux";
clocks = <&prci PRCI_CLK_PCIE_AUX>;
resets = <&prci 4>;
pwren-gpios = <&gpio 5 0>;
diff --git a/Documentation/devicetree/bindings/pci/snps,dw-pcie-ep.yaml b/Documentation/devicetree/bindings/pci/snps,dw-pcie-ep.yaml
index b5935b1b153f..e59059ab5be0 100644
--- a/Documentation/devicetree/bindings/pci/snps,dw-pcie-ep.yaml
+++ b/Documentation/devicetree/bindings/pci/snps,dw-pcie-ep.yaml
@@ -64,16 +64,12 @@ properties:
maxItems: 1
deprecated: true
- max-functions:
- $ref: /schemas/types.yaml#/definitions/uint32
- description: maximum number of functions that can be configured
-
required:
- reg
- reg-names
- compatible
-unevaluatedProperties: false
+additionalProperties: true
examples:
- |
diff --git a/Documentation/devicetree/bindings/pci/snps,dw-pcie.yaml b/Documentation/devicetree/bindings/pci/snps,dw-pcie.yaml
index 9ed0dfba7f89..a5345c494744 100644
--- a/Documentation/devicetree/bindings/pci/snps,dw-pcie.yaml
+++ b/Documentation/devicetree/bindings/pci/snps,dw-pcie.yaml
@@ -73,7 +73,7 @@ properties:
does not specify it, the driver autodetects it.
deprecated: true
-unevaluatedProperties: false
+additionalProperties: true
required:
- reg
diff --git a/Documentation/devicetree/bindings/pci/socionext,uniphier-pcie-ep.yaml b/Documentation/devicetree/bindings/pci/socionext,uniphier-pcie-ep.yaml
index 144cbcd60a1c..179ab0858482 100644
--- a/Documentation/devicetree/bindings/pci/socionext,uniphier-pcie-ep.yaml
+++ b/Documentation/devicetree/bindings/pci/socionext,uniphier-pcie-ep.yaml
@@ -79,7 +79,7 @@ required:
- resets
- reset-names
-additionalProperties: false
+unevaluatedProperties: false
examples:
- |
diff --git a/Documentation/devicetree/bindings/pci/ti,am65-pci-ep.yaml b/Documentation/devicetree/bindings/pci/ti,am65-pci-ep.yaml
index 78c217d362a7..74195c1f5292 100644
--- a/Documentation/devicetree/bindings/pci/ti,am65-pci-ep.yaml
+++ b/Documentation/devicetree/bindings/pci/ti,am65-pci-ep.yaml
@@ -32,8 +32,12 @@ properties:
maxItems: 1
ti,syscon-pcie-mode:
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ items:
+ - items:
+ - description: Phandle to the SYSCON entry
+ - description: pcie_ctrl register offset within SYSCON
description: Phandle to the SYSCON entry required for configuring PCIe in RC or EP mode.
- $ref: /schemas/types.yaml#/definitions/phandle
interrupts:
minItems: 1
@@ -65,9 +69,7 @@ examples:
<0x5506000 0x1000>;
reg-names = "app", "dbics", "addr_space", "atu";
power-domains = <&k3_pds 120 TI_SCI_PD_EXCLUSIVE>;
- ti,syscon-pcie-mode = <&pcie0_mode>;
- num-ib-windows = <16>;
- num-ob-windows = <16>;
+ ti,syscon-pcie-mode = <&scm_conf 0x4060>;
max-link-speed = <2>;
dma-coherent;
interrupts = <GIC_SPI 340 IRQ_TYPE_EDGE_RISING>;
diff --git a/Documentation/devicetree/bindings/pci/ti,am65-pci-host.yaml b/Documentation/devicetree/bindings/pci/ti,am65-pci-host.yaml
index 834dc1c1743c..a20dccbafd94 100644
--- a/Documentation/devicetree/bindings/pci/ti,am65-pci-host.yaml
+++ b/Documentation/devicetree/bindings/pci/ti,am65-pci-host.yaml
@@ -29,16 +29,27 @@ properties:
- const: config
- const: atu
+ interrupts:
+ maxItems: 1
+
power-domains:
maxItems: 1
ti,syscon-pcie-id:
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ items:
+ - items:
+ - description: Phandle to the SYSCON entry
+ - description: pcie_device_id register offset within SYSCON
description: Phandle to the SYSCON entry required for getting PCIe device/vendor ID
- $ref: /schemas/types.yaml#/definitions/phandle
ti,syscon-pcie-mode:
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ items:
+ - items:
+ - description: Phandle to the SYSCON entry
+ - description: pcie_ctrl register offset within SYSCON
description: Phandle to the SYSCON entry required for configuring PCIe in RC or EP mode.
- $ref: /schemas/types.yaml#/definitions/phandle
msi-map: true
@@ -84,10 +95,9 @@ examples:
#size-cells = <2>;
ranges = <0x81000000 0 0 0x10020000 0 0x00010000>,
<0x82000000 0 0x10030000 0x10030000 0 0x07FD0000>;
- ti,syscon-pcie-id = <&pcie_devid>;
- ti,syscon-pcie-mode = <&pcie0_mode>;
+ ti,syscon-pcie-id = <&scm_conf 0x0210>;
+ ti,syscon-pcie-mode = <&scm_conf 0x4060>;
bus-range = <0x0 0xff>;
- num-viewport = <16>;
max-link-speed = <2>;
dma-coherent;
interrupts = <GIC_SPI 340 IRQ_TYPE_EDGE_RISING>;
diff --git a/Documentation/devicetree/bindings/pci/ti,j721e-pci-host.yaml b/Documentation/devicetree/bindings/pci/ti,j721e-pci-host.yaml
index cc900202df29..2115d5a3f0e1 100644
--- a/Documentation/devicetree/bindings/pci/ti,j721e-pci-host.yaml
+++ b/Documentation/devicetree/bindings/pci/ti,j721e-pci-host.yaml
@@ -60,6 +60,8 @@ properties:
- const: fck
- const: pcie_refclk
+ dma-coherent: true
+
vendor-id:
const: 0x104c
diff --git a/Documentation/devicetree/bindings/pci/xilinx-versal-cpm.yaml b/Documentation/devicetree/bindings/pci/xilinx-versal-cpm.yaml
index a2bbc0eb7220..32f4641085bc 100644
--- a/Documentation/devicetree/bindings/pci/xilinx-versal-cpm.yaml
+++ b/Documentation/devicetree/bindings/pci/xilinx-versal-cpm.yaml
@@ -55,7 +55,6 @@ required:
- reg-names
- "#interrupt-cells"
- interrupts
- - interrupt-parent
- interrupt-map
- interrupt-map-mask
- bus-range
diff --git a/Documentation/devicetree/bindings/perf/arm,dsu-pmu.yaml b/Documentation/devicetree/bindings/perf/arm,dsu-pmu.yaml
new file mode 100644
index 000000000000..aef63a542f34
--- /dev/null
+++ b/Documentation/devicetree/bindings/perf/arm,dsu-pmu.yaml
@@ -0,0 +1,45 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+# Copyright 2021 Arm Ltd.
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/perf/arm,dsu-pmu.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ARM DynamIQ Shared Unit (DSU) Performance Monitor Unit (PMU)
+
+maintainers:
+ - Suzuki K Poulose <suzuki.poulose@arm.com>
+ - Robin Murphy <robin.murphy@arm.com>
+
+description:
+ ARM DynamIQ Shared Unit (DSU) integrates one or more CPU cores with a shared
+ L3 memory system, control logic and external interfaces to form a multicore
+ cluster. The PMU enables gathering various statistics on the operation of the
+ DSU. The PMU provides independent 32-bit counters that can count any of the
+ supported events, along with a 64-bit cycle counter. The PMU is accessed via
+ CPU system registers and has no MMIO component.
+
+properties:
+ compatible:
+ oneOf:
+ - const: arm,dsu-pmu
+ - items:
+ - const: arm,dsu-110-pmu
+ - const: arm,dsu-pmu
+
+ interrupts:
+ items:
+ - description: nCLUSTERPMUIRQ interrupt
+
+ cpus:
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ minItems: 1
+ maxItems: 12
+ description: List of phandles for the CPUs connected to this DSU instance.
+
+required:
+ - compatible
+ - interrupts
+ - cpus
+
+additionalProperties: false
diff --git a/Documentation/devicetree/bindings/phy/amlogic,meson8-hdmi-tx-phy.yaml b/Documentation/devicetree/bindings/phy/amlogic,meson8-hdmi-tx-phy.yaml
new file mode 100644
index 000000000000..1f085cdd1c85
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/amlogic,meson8-hdmi-tx-phy.yaml
@@ -0,0 +1,65 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/phy/amlogic,meson8-hdmi-tx-phy.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Amlogic Meson8, Meson8b and Meson8m2 HDMI TX PHY
+
+maintainers:
+ - Martin Blumenstingl <martin.blumenstingl@googlemail.com>
+
+description: |+
+ The HDMI TX PHY node should be the child of a syscon node with the
+ required property:
+
+ compatible = "amlogic,meson-hhi-sysctrl", "simple-mfd", "syscon"
+
+ Refer to the bindings described in
+ Documentation/devicetree/bindings/mfd/syscon.yaml
+
+properties:
+ $nodename:
+ pattern: "^hdmi-phy@[0-9a-f]+$"
+
+ compatible:
+ oneOf:
+ - items:
+ - enum:
+ - amlogic,meson8b-hdmi-tx-phy
+ - amlogic,meson8m2-hdmi-tx-phy
+ - const: amlogic,meson8-hdmi-tx-phy
+ - const: amlogic,meson8-hdmi-tx-phy
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ minItems: 1
+ description:
+ HDMI TMDS clock
+
+ "#phy-cells":
+ const: 0
+
+required:
+ - compatible
+ - "#phy-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ hdmi-phy@3a0 {
+ compatible = "amlogic,meson8-hdmi-tx-phy";
+ reg = <0x3a0 0xc>;
+ clocks = <&tmds_clock>;
+ #phy-cells = <0>;
+ };
+ - |
+ hdmi-phy@3a0 {
+ compatible = "amlogic,meson8b-hdmi-tx-phy", "amlogic,meson8-hdmi-tx-phy";
+ reg = <0x3a0 0xc>;
+ clocks = <&tmds_clock>;
+ #phy-cells = <0>;
+ };
diff --git a/Documentation/devicetree/bindings/phy/brcm,cygnus-pcie-phy.txt b/Documentation/devicetree/bindings/phy/brcm,cygnus-pcie-phy.txt
deleted file mode 100644
index 10efff28b52b..000000000000
--- a/Documentation/devicetree/bindings/phy/brcm,cygnus-pcie-phy.txt
+++ /dev/null
@@ -1,47 +0,0 @@
-Broadcom Cygnus PCIe PHY
-
-Required properties:
-- compatible: must be "brcm,cygnus-pcie-phy"
-- reg: base address and length of the PCIe PHY block
-- #address-cells: must be 1
-- #size-cells: must be 0
-
-Each PCIe PHY should be represented by a child node
-
-Required properties For the child node:
-- reg: the PHY ID
-0 - PCIe RC 0
-1 - PCIe RC 1
-- #phy-cells: must be 0
-
-Example:
- pcie_phy: phy@301d0a0 {
- compatible = "brcm,cygnus-pcie-phy";
- reg = <0x0301d0a0 0x14>;
-
- pcie0_phy: phy@0 {
- reg = <0>;
- #phy-cells = <0>;
- };
-
- pcie1_phy: phy@1 {
- reg = <1>;
- #phy-cells = <0>;
- };
- };
-
- /* users of the PCIe phy */
-
- pcie0: pcie@18012000 {
- ...
- ...
- phys = <&pcie0_phy>;
- phy-names = "pcie-phy";
- };
-
- pcie1: pcie@18013000 {
- ...
- ...
- phys = <pcie1_phy>;
- phy-names = "pcie-phy";
- };
diff --git a/Documentation/devicetree/bindings/phy/brcm,cygnus-pcie-phy.yaml b/Documentation/devicetree/bindings/phy/brcm,cygnus-pcie-phy.yaml
new file mode 100644
index 000000000000..045699c65779
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/brcm,cygnus-pcie-phy.yaml
@@ -0,0 +1,76 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/brcm,cygnus-pcie-phy.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Broadcom Cygnus PCIe PHY
+
+maintainers:
+ - Ray Jui <ray.jui@broadcom.com>
+ - Scott Branden <scott.branden@broadcom.com>
+
+properties:
+ $nodename:
+ pattern: "^pcie[-|_]phy(@.*)?$"
+
+ compatible:
+ items:
+ - const: brcm,cygnus-pcie-phy
+
+ reg:
+ maxItems: 1
+ description: >
+ Base address and length of the PCIe PHY block
+
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 0
+
+patternProperties:
+ "^pcie-phy@[0-9]+$":
+ type: object
+ description: >
+ PCIe PHY child nodes
+
+ properties:
+ reg:
+ maxItems: 1
+ description: >
+ The PCIe PHY port number
+
+ "#phy-cells":
+ const: 0
+
+ required:
+ - reg
+ - "#phy-cells"
+
+required:
+ - compatible
+ - reg
+ - "#address-cells"
+ - "#size-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ pcie_phy: pcie_phy@301d0a0 {
+ compatible = "brcm,cygnus-pcie-phy";
+ reg = <0x0301d0a0 0x14>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pcie0_phy: pcie-phy@0 {
+ reg = <0>;
+ #phy-cells = <0>;
+ };
+
+ pcie1_phy: pcie-phy@1 {
+ reg = <1>;
+ #phy-cells = <0>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/phy/brcm,mdio-mux-bus-pci.txt b/Documentation/devicetree/bindings/phy/brcm,mdio-mux-bus-pci.txt
deleted file mode 100644
index 5b51007c6f24..000000000000
--- a/Documentation/devicetree/bindings/phy/brcm,mdio-mux-bus-pci.txt
+++ /dev/null
@@ -1,27 +0,0 @@
-* Broadcom NS2 PCIe PHY binding document
-
-Required bus properties:
-- reg: MDIO Bus number for the MDIO interface
-- #address-cells: must be 1
-- #size-cells: must be 0
-
-Required PHY properties:
-- compatible: should be "brcm,ns2-pcie-phy"
-- reg: MDIO Phy ID for the MDIO interface
-- #phy-cells: must be 0
-
-This is a child bus node of "brcm,mdio-mux-iproc" node.
-
-Example:
-
-mdio@0 {
- reg = <0x0>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- pci_phy0: pci-phy@0 {
- compatible = "brcm,ns2-pcie-phy";
- reg = <0x0>;
- #phy-cells = <0>;
- };
-};
diff --git a/Documentation/devicetree/bindings/phy/brcm,ns2-pcie-phy.yaml b/Documentation/devicetree/bindings/phy/brcm,ns2-pcie-phy.yaml
new file mode 100644
index 000000000000..70eb48b391c9
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/brcm,ns2-pcie-phy.yaml
@@ -0,0 +1,41 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/brcm,ns2-pcie-phy.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Broadcom NS2 PCIe PHY binding document
+
+maintainers:
+ - Ray Jui <ray.jui@broadcom.com>
+ - Scott Branden <scott.branden@broadcom.com>
+
+properties:
+ compatible:
+ const: brcm,ns2-pcie-phy
+
+ reg:
+ maxItems: 1
+
+ "#phy-cells":
+ const: 0
+
+required:
+ - compatible
+ - reg
+ - "#phy-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pci-phy@0 {
+ compatible = "brcm,ns2-pcie-phy";
+ reg = <0x0>;
+ #phy-cells = <0>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/phy/fsl,imx8-pcie-phy.yaml b/Documentation/devicetree/bindings/phy/fsl,imx8-pcie-phy.yaml
new file mode 100644
index 000000000000..b6421eedece3
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/fsl,imx8-pcie-phy.yaml
@@ -0,0 +1,92 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/fsl,imx8-pcie-phy.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale i.MX8 SoC series PCIe PHY Device Tree Bindings
+
+maintainers:
+ - Richard Zhu <hongxing.zhu@nxp.com>
+
+properties:
+ "#phy-cells":
+ const: 0
+
+ compatible:
+ enum:
+ - fsl,imx8mm-pcie-phy
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ items:
+ - const: ref
+
+ resets:
+ maxItems: 1
+
+ reset-names:
+ items:
+ - const: pciephy
+
+ fsl,refclk-pad-mode:
+ description: |
+ Specifies the mode of the refclk pad used. It can be UNUSED(PHY
+ refclock is derived from SoC internal source), INPUT(PHY refclock
+ is provided externally via the refclk pad) or OUTPUT(PHY refclock
+ is derived from SoC internal source and provided on the refclk pad).
+ Refer include/dt-bindings/phy/phy-imx8-pcie.h for the constants
+ to be used.
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [ 0, 1, 2 ]
+
+ fsl,tx-deemph-gen1:
+ description: Gen1 De-emphasis value (optional).
+ $ref: /schemas/types.yaml#/definitions/uint32
+ default: 0
+
+ fsl,tx-deemph-gen2:
+ description: Gen2 De-emphasis value (optional).
+ $ref: /schemas/types.yaml#/definitions/uint32
+ default: 0
+
+ fsl,clkreq-unsupported:
+ type: boolean
+ description: A boolean property indicating the CLKREQ# signal is
+ not supported in the board design (optional)
+
+required:
+ - "#phy-cells"
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+ - fsl,refclk-pad-mode
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/imx8mm-clock.h>
+ #include <dt-bindings/phy/phy-imx8-pcie.h>
+ #include <dt-bindings/reset/imx8mq-reset.h>
+
+ pcie_phy: pcie-phy@32f00000 {
+ compatible = "fsl,imx8mm-pcie-phy";
+ reg = <0x32f00000 0x10000>;
+ clocks = <&clk IMX8MM_CLK_PCIE1_PHY>;
+ clock-names = "ref";
+ assigned-clocks = <&clk IMX8MM_CLK_PCIE1_PHY>;
+ assigned-clock-rates = <100000000>;
+ assigned-clock-parents = <&clk IMX8MM_SYS_PLL2_100M>;
+ resets = <&src IMX8MQ_RESET_PCIEPHY>;
+ reset-names = "pciephy";
+ fsl,refclk-pad-mode = <IMX8_PCIE_REFCLK_PAD_INPUT>;
+ #phy-cells = <0>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/phy/intel,phy-thunderbay-emmc.yaml b/Documentation/devicetree/bindings/phy/intel,phy-thunderbay-emmc.yaml
new file mode 100644
index 000000000000..34bdb5c4cae8
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/intel,phy-thunderbay-emmc.yaml
@@ -0,0 +1,46 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/intel,phy-thunderbay-emmc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Intel Thunder Bay eMMC PHY bindings
+
+maintainers:
+ - Srikandan Nandhini <nandhini.srikandan@intel.com>
+
+properties:
+ compatible:
+ const: intel,thunderbay-emmc-phy
+
+ "#phy-cells":
+ const: 0
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ items:
+ - const: emmcclk
+
+required:
+ - "#phy-cells"
+ - compatible
+ - reg
+ - clocks
+
+additionalProperties: false
+
+examples:
+ - |
+ mmc_phy@80440800 {
+ #phy-cells = <0x0>;
+ compatible = "intel,thunderbay-emmc-phy";
+ status = "okay";
+ reg = <0x80440800 0x100>;
+ clocks = <&emmc>;
+ clock-names = "emmcclk";
+ };
diff --git a/Documentation/devicetree/bindings/phy/mediatek,tphy.yaml b/Documentation/devicetree/bindings/phy/mediatek,tphy.yaml
index 9e6c0f43f1c6..05ee274b4b71 100644
--- a/Documentation/devicetree/bindings/phy/mediatek,tphy.yaml
+++ b/Documentation/devicetree/bindings/phy/mediatek,tphy.yaml
@@ -160,6 +160,24 @@ patternProperties:
- PHY_TYPE_PCIE
- PHY_TYPE_SATA
+ nvmem-cells:
+ items:
+ - description: internal R efuse for U2 PHY or U3/PCIe PHY
+ - description: rx_imp_sel efuse for U3/PCIe PHY
+ - description: tx_imp_sel efuse for U3/PCIe PHY
+ description: |
+ Phandles to nvmem cell that contains the efuse data;
+ Available only for U2 PHY or U3/PCIe PHY of version 2/3, these
+ three items should be provided at the same time for U3/PCIe PHY,
+ when use software to load efuse;
+ If unspecified, will use hardware auto-load efuse.
+
+ nvmem-cell-names:
+ items:
+ - const: intr
+ - const: rx_imp
+ - const: tx_imp
+
# The following optional vendor properties are only for debug or HQA test
mediatek,eye-src:
description:
diff --git a/Documentation/devicetree/bindings/phy/microchip,lan966x-serdes.yaml b/Documentation/devicetree/bindings/phy/microchip,lan966x-serdes.yaml
new file mode 100644
index 000000000000..6e914fbbac56
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/microchip,lan966x-serdes.yaml
@@ -0,0 +1,59 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/phy/microchip,lan966x-serdes.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Microchip Lan966x Serdes controller
+
+maintainers:
+ - Horatiu Vultur <horatiu.vultur@microchip.com>
+
+description: |
+ Lan966x has 7 interfaces, consisting of 2 copper transceivers(CU),
+ 3 SERDES6G and 2 RGMII interfaces. Two of the SERDES6G support QSGMII.
+ Also it has 8 logical Ethernet ports which can be connected to these
+ interfaces. The Serdes controller will allow to configure these interfaces
+ and allows to "mux" the interfaces to different ports.
+
+ For simple selection of the interface that is used with a port, the
+ following macros are defined CU(X), SERDES6G(X), RGMII(X). Where X is a
+ number that represents the index of that interface type. For example
+ CU(1) means use interface copper transceivers 1. SERDES6G(2) means use
+ interface SerDes 2.
+
+properties:
+ $nodename:
+ pattern: "^serdes@[0-9a-f]+$"
+
+ compatible:
+ const: microchip,lan966x-serdes
+
+ reg:
+ items:
+ - description: HSIO registers
+ - description: HW_STAT register
+
+ '#phy-cells':
+ const: 2
+ description: |
+ - Input port to use for a given macro.
+ - The macro to be used. The macros are defined in
+ dt-bindings/phy/phy-lan966x-serdes.
+
+required:
+ - compatible
+ - reg
+ - '#phy-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ serdes: serdes@e2004010 {
+ compatible = "microchip,lan966x-serdes";
+ reg = <0xe202c000 0x9c>, <0xe2004010 0x4>;
+ #phy-cells = <2>;
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/phy/phy-cadence-sierra.yaml b/Documentation/devicetree/bindings/phy/phy-cadence-sierra.yaml
index e71b32c9c0d1..a9e227d8b076 100644
--- a/Documentation/devicetree/bindings/phy/phy-cadence-sierra.yaml
+++ b/Documentation/devicetree/bindings/phy/phy-cadence-sierra.yaml
@@ -113,6 +113,15 @@ patternProperties:
minimum: 1
maximum: 16
+ cdns,ssc-mode:
+ description:
+ Specifies the Spread Spectrum Clocking mode used. It can be NO_SSC,
+ EXTERNAL_SSC or INTERNAL_SSC.
+ Refer include/dt-bindings/phy/phy-cadence.h for the constants to be used.
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum: [0, 1, 2]
+ default: 1
+
required:
- reg
- resets
diff --git a/Documentation/devicetree/bindings/phy/phy-cadence-torrent.yaml b/Documentation/devicetree/bindings/phy/phy-cadence-torrent.yaml
index bd9ae11c9994..2fec9e54ad0e 100644
--- a/Documentation/devicetree/bindings/phy/phy-cadence-torrent.yaml
+++ b/Documentation/devicetree/bindings/phy/phy-cadence-torrent.yaml
@@ -202,7 +202,7 @@ examples:
#phy-cells = <0>;
cdns,phy-type = <PHY_TYPE_PCIE>;
cdns,num-lanes = <2>;
- cdns,ssc-mode = <TORRENT_SERDES_NO_SSC>;
+ cdns,ssc-mode = <CDNS_SERDES_NO_SSC>;
};
phy@2 {
@@ -211,7 +211,7 @@ examples:
#phy-cells = <0>;
cdns,phy-type = <PHY_TYPE_SGMII>;
cdns,num-lanes = <1>;
- cdns,ssc-mode = <TORRENT_SERDES_NO_SSC>;
+ cdns,ssc-mode = <CDNS_SERDES_NO_SSC>;
};
};
};
diff --git a/Documentation/devicetree/bindings/phy/phy-rockchip-inno-usb2.yaml b/Documentation/devicetree/bindings/phy/phy-rockchip-inno-usb2.yaml
index 5bebd86bf8b6..4b75289735eb 100644
--- a/Documentation/devicetree/bindings/phy/phy-rockchip-inno-usb2.yaml
+++ b/Documentation/devicetree/bindings/phy/phy-rockchip-inno-usb2.yaml
@@ -18,6 +18,7 @@ properties:
- rockchip,rk3328-usb2phy
- rockchip,rk3366-usb2phy
- rockchip,rk3399-usb2phy
+ - rockchip,rk3568-usb2phy
- rockchip,rv1108-usb2phy
reg:
@@ -50,6 +51,10 @@ properties:
description:
Phandle to the extcon device providing the cable state for the otg phy.
+ interrupts:
+ description: Muxed interrupt for both ports
+ maxItems: 1
+
rockchip,usbgrf:
$ref: /schemas/types.yaml#/definitions/phandle
description:
@@ -67,6 +72,7 @@ properties:
interrupts:
description: host linestate interrupt
+ maxItems: 1
interrupt-names:
const: linestate
@@ -78,8 +84,6 @@ properties:
required:
- "#phy-cells"
- - interrupts
- - interrupt-names
otg-port:
type: object
@@ -109,8 +113,6 @@ properties:
required:
- "#phy-cells"
- - interrupts
- - interrupt-names
required:
- compatible
@@ -120,6 +122,40 @@ required:
- host-port
- otg-port
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: rockchip,rk3568-usb2phy
+
+ then:
+ properties:
+ host-port:
+ properties:
+ interrupts: false
+
+ otg-port:
+ properties:
+ interrupts: false
+
+ required:
+ - interrupts
+
+ else:
+ properties:
+ interrupts: false
+
+ host-port:
+ required:
+ - interrupts
+ - interrupt-names
+
+ otg-port:
+ required:
+ - interrupts
+ - interrupt-names
+
additionalProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/phy/phy-tegra194-p2u.txt b/Documentation/devicetree/bindings/phy/phy-tegra194-p2u.txt
deleted file mode 100644
index d23ff90baad5..000000000000
--- a/Documentation/devicetree/bindings/phy/phy-tegra194-p2u.txt
+++ /dev/null
@@ -1,28 +0,0 @@
-NVIDIA Tegra194 P2U binding
-
-Tegra194 has two PHY bricks namely HSIO (High Speed IO) and NVHS (NVIDIA High
-Speed) each interfacing with 12 and 8 P2U instances respectively.
-A P2U instance is a glue logic between Synopsys DesignWare Core PCIe IP's PIPE
-interface and PHY of HSIO/NVHS bricks. Each P2U instance represents one PCIe
-lane.
-
-Required properties:
-- compatible: For Tegra19x, must contain "nvidia,tegra194-p2u".
-- reg: Should be the physical address space and length of respective each P2U
- instance.
-- reg-names: Must include the entry "ctl".
-
-Required properties for PHY port node:
-- #phy-cells: Defined by generic PHY bindings. Must be 0.
-
-Refer to phy/phy-bindings.txt for the generic PHY binding properties.
-
-Example:
-
-p2u_hsio_0: phy@3e10000 {
- compatible = "nvidia,tegra194-p2u";
- reg = <0x03e10000 0x10000>;
- reg-names = "ctl";
-
- #phy-cells = <0>;
-};
diff --git a/Documentation/devicetree/bindings/phy/phy-tegra194-p2u.yaml b/Documentation/devicetree/bindings/phy/phy-tegra194-p2u.yaml
new file mode 100644
index 000000000000..9a89d05efbda
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/phy-tegra194-p2u.yaml
@@ -0,0 +1,44 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/phy/phy-tegra194-p2u.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: NVIDIA Tegra194 P2U binding
+
+maintainers:
+ - Thierry Reding <treding@nvidia.com>
+
+description: >
+ Tegra194 has two PHY bricks namely HSIO (High Speed IO) and NVHS (NVIDIA High
+ Speed) each interfacing with 12 and 8 P2U instances respectively.
+ A P2U instance is a glue logic between Synopsys DesignWare Core PCIe IP's PIPE
+ interface and PHY of HSIO/NVHS bricks. Each P2U instance represents one PCIe
+ lane.
+
+properties:
+ compatible:
+ const: nvidia,tegra194-p2u
+
+ reg:
+ maxItems: 1
+ description: Should be the physical address space and length of respective each P2U instance.
+
+ reg-names:
+ items:
+ - const: ctl
+
+ '#phy-cells':
+ const: 0
+
+additionalProperties: false
+
+examples:
+ - |
+ p2u_hsio_0: phy@3e10000 {
+ compatible = "nvidia,tegra194-p2u";
+ reg = <0x03e10000 0x10000>;
+ reg-names = "ctl";
+
+ #phy-cells = <0>;
+ };
diff --git a/Documentation/devicetree/bindings/phy/qcom,edp-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,edp-phy.yaml
new file mode 100644
index 000000000000..9076e19b6417
--- /dev/null
+++ b/Documentation/devicetree/bindings/phy/qcom,edp-phy.yaml
@@ -0,0 +1,67 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/phy/qcom,edp-phy.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Qualcomm eDP PHY
+
+maintainers:
+ - Bjorn Andersson <bjorn.andersson@linaro.org>
+
+description:
+ The Qualcomm eDP PHY is found in a number of Qualcomm platform and provides
+ the physical interface for Embedded Display Port.
+
+properties:
+ compatible:
+ const: qcom,sc8180x-edp-phy
+
+ reg:
+ items:
+ - description: PHY base register block
+ - description: tx0 register block
+ - description: tx1 register block
+ - description: PLL register block
+
+ clocks:
+ maxItems: 2
+
+ clock-names:
+ items:
+ - const: aux
+ - const: cfg_ahb
+
+ "#clock-cells":
+ const: 1
+
+ "#phy-cells":
+ const: 0
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - clock-names
+ - "#clock-cells"
+ - "#phy-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ phy@aec2a00 {
+ compatible = "qcom,sc8180x-edp-phy";
+ reg = <0x0aec2a00 0x1c0>,
+ <0x0aec2200 0xa0>,
+ <0x0aec2600 0xa0>,
+ <0x0aec2000 0x19c>;
+
+ clocks = <&dispcc 0>, <&dispcc 1>;
+ clock-names = "aux", "cfg_ahb";
+
+ #clock-cells = <1>;
+ #phy-cells = <0>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/phy/qcom,qmp-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,qmp-phy.yaml
index 630ceaf915e2..e417cd667997 100644
--- a/Documentation/devicetree/bindings/phy/qcom,qmp-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/qcom,qmp-phy.yaml
@@ -50,6 +50,10 @@ properties:
- qcom,sm8350-qmp-ufs-phy
- qcom,sm8350-qmp-usb3-phy
- qcom,sm8350-qmp-usb3-uni-phy
+ - qcom,sm8450-qmp-gen3x1-pcie-phy
+ - qcom,sm8450-qmp-gen4x2-pcie-phy
+ - qcom,sm8450-qmp-ufs-phy
+ - qcom,sm8450-qmp-usb3-phy
- qcom,sdx55-qmp-pcie-phy
- qcom,sdx55-qmp-usb3-uni-phy
@@ -332,6 +336,8 @@ allOf:
- qcom,sm8250-qmp-gen3x1-pcie-phy
- qcom,sm8250-qmp-gen3x2-pcie-phy
- qcom,sm8250-qmp-modem-pcie-phy
+ - qcom,sm8450-qmp-gen3x1-pcie-phy
+ - qcom,sm8450-qmp-gen4x2-pcie-phy
then:
properties:
clocks:
diff --git a/Documentation/devicetree/bindings/phy/qcom,qusb2-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,qusb2-phy.yaml
index aa2e409a1a09..e651a63a4be3 100644
--- a/Documentation/devicetree/bindings/phy/qcom,qusb2-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/qcom,qusb2-phy.yaml
@@ -30,6 +30,7 @@ properties:
- enum:
- qcom,sc7180-qusb2-phy
- qcom,sdm845-qusb2-phy
+ - qcom,sm6350-qusb2-phy
- const: qcom,qusb2-v2-phy
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/phy/qcom,usb-snps-femto-v2.yaml b/Documentation/devicetree/bindings/phy/qcom,usb-snps-femto-v2.yaml
index 20203a8a9e41..0dfe6914ec87 100644
--- a/Documentation/devicetree/bindings/phy/qcom,usb-snps-femto-v2.yaml
+++ b/Documentation/devicetree/bindings/phy/qcom,usb-snps-femto-v2.yaml
@@ -20,6 +20,7 @@ properties:
- qcom,sm8150-usb-hs-phy
- qcom,sm8250-usb-hs-phy
- qcom,sm8350-usb-hs-phy
+ - qcom,sm8450-usb-hs-phy
- qcom,usb-snps-femto-v2-phy
reg:
diff --git a/Documentation/devicetree/bindings/phy/socionext,uniphier-ahci-phy.yaml b/Documentation/devicetree/bindings/phy/socionext,uniphier-ahci-phy.yaml
index 745c525ce6b9..3b400a85b44a 100644
--- a/Documentation/devicetree/bindings/phy/socionext,uniphier-ahci-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/socionext,uniphier-ahci-phy.yaml
@@ -16,6 +16,7 @@ maintainers:
properties:
compatible:
enum:
+ - socionext,uniphier-pro4-ahci-phy
- socionext,uniphier-pxs2-ahci-phy
- socionext,uniphier-pxs3-ahci-phy
@@ -26,23 +27,35 @@ properties:
const: 0
clocks:
+ minItems: 1
maxItems: 2
clock-names:
oneOf:
- items: # for PXs2
- const: link
+ - items: # for Pro4
+ - const: link
+ - const: gio
- items: # for others
- const: link
- const: phy
resets:
- maxItems: 2
+ minItems: 2
+ maxItems: 5
reset-names:
- items:
- - const: link
- - const: phy
+ oneOf:
+ - items: # for Pro4
+ - const: link
+ - const: gio
+ - const: pm
+ - const: tx
+ - const: rx
+ - items: # for others
+ - const: link
+ - const: phy
required:
- compatible
diff --git a/Documentation/devicetree/bindings/phy/socionext,uniphier-pcie-phy.yaml b/Documentation/devicetree/bindings/phy/socionext,uniphier-pcie-phy.yaml
index 3e0566899041..fbb71d6dd531 100644
--- a/Documentation/devicetree/bindings/phy/socionext,uniphier-pcie-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/socionext,uniphier-pcie-phy.yaml
@@ -19,6 +19,7 @@ properties:
- socionext,uniphier-pro5-pcie-phy
- socionext,uniphier-ld20-pcie-phy
- socionext,uniphier-pxs3-pcie-phy
+ - socionext,uniphier-nx1-pcie-phy
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/phy/socionext,uniphier-usb3hs-phy.yaml b/Documentation/devicetree/bindings/phy/socionext,uniphier-usb3hs-phy.yaml
index a681cbc3b4ef..33946efcac5e 100644
--- a/Documentation/devicetree/bindings/phy/socionext,uniphier-usb3hs-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/socionext,uniphier-usb3hs-phy.yaml
@@ -22,6 +22,7 @@ properties:
- socionext,uniphier-pxs2-usb3-hsphy
- socionext,uniphier-ld20-usb3-hsphy
- socionext,uniphier-pxs3-usb3-hsphy
+ - socionext,uniphier-nx1-usb3-hsphy
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/phy/socionext,uniphier-usb3ss-phy.yaml b/Documentation/devicetree/bindings/phy/socionext,uniphier-usb3ss-phy.yaml
index 41c0dd68ee25..92d46eb913a3 100644
--- a/Documentation/devicetree/bindings/phy/socionext,uniphier-usb3ss-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/socionext,uniphier-usb3ss-phy.yaml
@@ -23,6 +23,7 @@ properties:
- socionext,uniphier-pxs2-usb3-ssphy
- socionext,uniphier-ld20-usb3-ssphy
- socionext,uniphier-pxs3-usb3-ssphy
+ - socionext,uniphier-nx1-usb3-ssphy
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/pinctrl/actions,s500-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/actions,s500-pinctrl.yaml
index 3f94f6944740..fb0f69ce9c16 100644
--- a/Documentation/devicetree/bindings/pinctrl/actions,s500-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/actions,s500-pinctrl.yaml
@@ -184,6 +184,9 @@ patternProperties:
additionalProperties: false
+allOf:
+ - $ref: "pinctrl.yaml#"
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/pinctrl/allwinner,sun4i-a10-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/allwinner,sun4i-a10-pinctrl.yaml
index cce63c3cc463..bfce850c2035 100644
--- a/Documentation/devicetree/bindings/pinctrl/allwinner,sun4i-a10-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/allwinner,sun4i-a10-pinctrl.yaml
@@ -147,6 +147,7 @@ allOf:
# boards are defining it at the moment so it would generate a lot of
# warnings.
+ - $ref: "pinctrl.yaml#"
- if:
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/pinctrl/apple,pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/apple,pinctrl.yaml
index 572923d7023e..d3b11351ca45 100644
--- a/Documentation/devicetree/bindings/pinctrl/apple,pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/apple,pinctrl.yaml
@@ -72,6 +72,9 @@ patternProperties:
additionalProperties: false
+allOf:
+ - $ref: "pinctrl.yaml#"
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/pinctrl/aspeed,ast2400-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/aspeed,ast2400-pinctrl.yaml
index 100bb6dea3ec..c689bea7ce6e 100644
--- a/Documentation/devicetree/bindings/pinctrl/aspeed,ast2400-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/aspeed,ast2400-pinctrl.yaml
@@ -50,6 +50,9 @@ patternProperties:
TXD3, TXD4, UART6, USB11D1, USB11H2, USB2D1, USB2H1, USBCKI, VGABIOS_ROM,
VGAHS, VGAVS, VPI18, VPI24, VPI30, VPO12, VPO24, WDTRST1, WDTRST2]
+allOf:
+ - $ref: "pinctrl.yaml#"
+
required:
- compatible
diff --git a/Documentation/devicetree/bindings/pinctrl/aspeed,ast2500-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/aspeed,ast2500-pinctrl.yaml
index 904697bc9415..d316cc082107 100644
--- a/Documentation/devicetree/bindings/pinctrl/aspeed,ast2500-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/aspeed,ast2500-pinctrl.yaml
@@ -62,6 +62,9 @@ patternProperties:
USB11BHID, USB2AD, USB2AH, USB2BD, USB2BH, USBCKI, VGABIOSROM, VGAHS,
VGAVS, VPI24, VPO, WDTRST1, WDTRST2]
+allOf:
+ - $ref: "pinctrl.yaml#"
+
required:
- compatible
- aspeed,external-nodes
diff --git a/Documentation/devicetree/bindings/pinctrl/aspeed,ast2600-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/aspeed,ast2600-pinctrl.yaml
index ad2866c99738..57b68d6c7c70 100644
--- a/Documentation/devicetree/bindings/pinctrl/aspeed,ast2600-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/aspeed,ast2600-pinctrl.yaml
@@ -83,6 +83,9 @@ patternProperties:
UART7, UART8, UART9, USBA, USBB, VB, VGAHS, VGAVS, WDTRST1, WDTRST2,
WDTRST3, WDTRST4]
+allOf:
+ - $ref: "pinctrl.yaml#"
+
required:
- compatible
diff --git a/Documentation/devicetree/bindings/pinctrl/brcm,bcm6318-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/brcm,bcm6318-pinctrl.yaml
index 08995a4f854b..ab019a1998e8 100644
--- a/Documentation/devicetree/bindings/pinctrl/brcm,bcm6318-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/brcm,bcm6318-pinctrl.yaml
@@ -37,6 +37,9 @@ patternProperties:
enum: [ gpio0, gpio1, gpio2, gpio3, gpio4, gpio5, gpio6, gpio7,
gpio8, gpio9, gpio10, gpio11, gpio12, gpio13, gpio40 ]
+allOf:
+ - $ref: "pinctrl.yaml#"
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/pinctrl/brcm,bcm63268-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/brcm,bcm63268-pinctrl.yaml
index 58ffed44b3c4..8c9d4668c8c4 100644
--- a/Documentation/devicetree/bindings/pinctrl/brcm,bcm63268-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/brcm,bcm63268-pinctrl.yaml
@@ -41,6 +41,9 @@ patternProperties:
vdsl_phy_override_1_grp, vdsl_phy_override_2_grp,
vdsl_phy_override_3_grp, dsl_gpio8, dsl_gpio9 ]
+allOf:
+ - $ref: "pinctrl.yaml#"
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/pinctrl/brcm,bcm6328-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/brcm,bcm6328-pinctrl.yaml
index 0fd24f40afb1..a8e22ec02215 100644
--- a/Documentation/devicetree/bindings/pinctrl/brcm,bcm6328-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/brcm,bcm6328-pinctrl.yaml
@@ -36,6 +36,9 @@ patternProperties:
gpio20, gpio25, gpio26, gpio27, gpio28, hsspi_cs1,
usb_port1 ]
+allOf:
+ - $ref: "pinctrl.yaml#"
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/pinctrl/brcm,bcm6358-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/brcm,bcm6358-pinctrl.yaml
index 0c3ce256aa78..35867355a47a 100644
--- a/Documentation/devicetree/bindings/pinctrl/brcm,bcm6358-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/brcm,bcm6358-pinctrl.yaml
@@ -34,6 +34,9 @@ patternProperties:
enum: [ ebi_cs_grp, uart1_grp, serial_led_grp, legacy_led_grp,
led_grp, spi_cs_grp, utopia_grp, pwm_syn_clk, sys_irq_grp ]
+allOf:
+ - $ref: "pinctrl.yaml#"
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/pinctrl/brcm,bcm6362-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/brcm,bcm6362-pinctrl.yaml
index 6f68fee373bd..b584d4b27223 100644
--- a/Documentation/devicetree/bindings/pinctrl/brcm,bcm6362-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/brcm,bcm6362-pinctrl.yaml
@@ -41,6 +41,9 @@ patternProperties:
gpio15, gpio16, gpio17, gpio18, gpio19, gpio20, gpio21,
gpio22, gpio23, gpio24, gpio25, gpio26, gpio27, nand_grp ]
+allOf:
+ - $ref: "pinctrl.yaml#"
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/pinctrl/brcm,bcm6368-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/brcm,bcm6368-pinctrl.yaml
index f4168b9f4460..229323d9237d 100644
--- a/Documentation/devicetree/bindings/pinctrl/brcm,bcm6368-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/brcm,bcm6368-pinctrl.yaml
@@ -42,6 +42,9 @@ patternProperties:
gpio24, gpio25, gpio26, gpio27, gpio28, gpio29, gpio30,
gpio31, uart1_grp ]
+allOf:
+ - $ref: "pinctrl.yaml#"
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/pinctrl/brcm,ns-pinmux.yaml b/Documentation/devicetree/bindings/pinctrl/brcm,ns-pinmux.yaml
index fc39e3e9f71c..8d1e5b1cdd5f 100644
--- a/Documentation/devicetree/bindings/pinctrl/brcm,ns-pinmux.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/brcm,ns-pinmux.yaml
@@ -53,6 +53,7 @@ patternProperties:
additionalProperties: false
allOf:
+ - $ref: "pinctrl.yaml#"
- if:
properties:
compatible:
@@ -77,7 +78,7 @@ additionalProperties: false
examples:
- |
- pin-controller@1800c1c0 {
+ pinctrl@1800c1c0 {
compatible = "brcm,bcm4708-pinmux";
reg = <0x1800c1c0 0x24>;
reg-names = "cru_gpio_control";
diff --git a/Documentation/devicetree/bindings/pinctrl/canaan,k210-fpioa.yaml b/Documentation/devicetree/bindings/pinctrl/canaan,k210-fpioa.yaml
index 46fbc73ab26b..a44691d9c57d 100644
--- a/Documentation/devicetree/bindings/pinctrl/canaan,k210-fpioa.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/canaan,k210-fpioa.yaml
@@ -137,6 +137,9 @@ patternProperties:
additionalProperties: false
+allOf:
+ - $ref: "pinctrl.yaml#"
+
required:
- compatible
- reg
@@ -151,9 +154,9 @@ examples:
#include <dt-bindings/clock/k210-clk.h>
#include <dt-bindings/reset/k210-rst.h>
- fpioa: pinmux@502B0000 {
+ fpioa: pinmux@502b0000 {
compatible = "canaan,k210-fpioa";
- reg = <0x502B0000 0x100>;
+ reg = <0x502b0000 0x100>;
clocks = <&sysclk K210_CLK_FPIOA>,
<&sysclk K210_CLK_APB0>;
clock-names = "ref", "pclk";
diff --git a/Documentation/devicetree/bindings/pinctrl/cirrus,lochnagar.yaml b/Documentation/devicetree/bindings/pinctrl/cirrus,lochnagar.yaml
index a07dd197176a..5cd512b7d5ba 100644
--- a/Documentation/devicetree/bindings/pinctrl/cirrus,lochnagar.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/cirrus,lochnagar.yaml
@@ -51,15 +51,6 @@ properties:
appropriate of the LOCHNAGARx_PIN_NUM_GPIOS define, see [3].
maxItems: 1
- pinctrl-0:
- description:
- A phandle to the default pinctrl state.
-
- pinctrl-names:
- description:
- A pinctrl state named "default" must be defined.
- const: default
-
pin-settings:
type: object
patternProperties:
@@ -181,6 +172,9 @@ properties:
additionalProperties: false
+allOf:
+ - $ref: "pinctrl.yaml#"
+
required:
- compatible
- gpio-controller
diff --git a/Documentation/devicetree/bindings/pinctrl/cirrus,madera.yaml b/Documentation/devicetree/bindings/pinctrl/cirrus,madera.yaml
index 4cb174bf31ff..c85f759ae5a3 100644
--- a/Documentation/devicetree/bindings/pinctrl/cirrus,madera.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/cirrus,madera.yaml
@@ -30,16 +30,6 @@ description: |
Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
properties:
- pinctrl-0:
- description:
- A phandle to the node containing the subnodes containing default
- configurations.
-
- pinctrl-names:
- description:
- A pinctrl state named "default" must be defined.
- const: default
-
pin-settings:
description:
One subnode is required to contain the default settings. It
@@ -117,6 +107,9 @@ properties:
additionalProperties: false
+allOf:
+ - $ref: "pinctrl.yaml#"
+
required:
- pinctrl-0
- pinctrl-names
diff --git a/Documentation/devicetree/bindings/pinctrl/fsl,imx8mm-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/fsl,imx8mm-pinctrl.yaml
index 6d7d162e6171..6717f163390b 100644
--- a/Documentation/devicetree/bindings/pinctrl/fsl,imx8mm-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/fsl,imx8mm-pinctrl.yaml
@@ -58,6 +58,9 @@ patternProperties:
additionalProperties: false
+allOf:
+ - $ref: "pinctrl.yaml#"
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/pinctrl/fsl,imx8mn-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/fsl,imx8mn-pinctrl.yaml
index 7131cfd1fc45..b1cdbb56d4e4 100644
--- a/Documentation/devicetree/bindings/pinctrl/fsl,imx8mn-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/fsl,imx8mn-pinctrl.yaml
@@ -58,6 +58,9 @@ patternProperties:
additionalProperties: false
+allOf:
+ - $ref: "pinctrl.yaml#"
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/pinctrl/fsl,imx8mp-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/fsl,imx8mp-pinctrl.yaml
index d474bc1f393b..4eed3a4e153a 100644
--- a/Documentation/devicetree/bindings/pinctrl/fsl,imx8mp-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/fsl,imx8mp-pinctrl.yaml
@@ -58,6 +58,9 @@ patternProperties:
additionalProperties: false
+allOf:
+ - $ref: "pinctrl.yaml#"
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/pinctrl/fsl,imx8mq-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/fsl,imx8mq-pinctrl.yaml
index 0af2b6c95c17..d4a8ea5551a5 100644
--- a/Documentation/devicetree/bindings/pinctrl/fsl,imx8mq-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/fsl,imx8mq-pinctrl.yaml
@@ -58,6 +58,9 @@ patternProperties:
additionalProperties: false
+allOf:
+ - $ref: "pinctrl.yaml#"
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/pinctrl/fsl,imx8ulp-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/fsl,imx8ulp-pinctrl.yaml
index 86622c4f374b..693398d88223 100644
--- a/Documentation/devicetree/bindings/pinctrl/fsl,imx8ulp-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/fsl,imx8ulp-pinctrl.yaml
@@ -56,6 +56,9 @@ patternProperties:
additionalProperties: false
+allOf:
+ - $ref: "pinctrl.yaml#"
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/pinctrl/fsl,imxrt1050.yaml b/Documentation/devicetree/bindings/pinctrl/fsl,imxrt1050.yaml
new file mode 100644
index 000000000000..1278f7293560
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/fsl,imxrt1050.yaml
@@ -0,0 +1,79 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pinctrl/fsl,imxrt1050.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Freescale IMXRT1050 IOMUX Controller
+
+maintainers:
+ - Giulio Benetti <giulio.benetti@benettiengineering.com>
+ - Jesse Taube <Mr.Bossman075@gmail.com>
+
+description:
+ Please refer to fsl,imx-pinctrl.txt and pinctrl-bindings.txt in this directory
+ for common binding part and usage.
+
+properties:
+ compatible:
+ const: fsl,imxrt1050-iomuxc
+
+ reg:
+ maxItems: 1
+
+# Client device subnode's properties
+patternProperties:
+ 'grp$':
+ type: object
+ description:
+ Pinctrl node's client devices use subnodes for desired pin configuration.
+ Client device subnodes use below standard properties.
+
+ properties:
+ fsl,pins:
+ description:
+ each entry consists of 6 integers and represents the mux and config
+ setting for one pin. The first 5 integers <mux_reg conf_reg input_reg
+ mux_val input_val> are specified using a PIN_FUNC_ID macro, which can
+ be found in <include/dt-bindings/pinctrl/pins-imxrt1050.h>. The last
+ integer CONFIG is the pad setting value like pull-up on this pin. Please
+ refer to i.MXRT1050 Reference Manual for detailed CONFIG settings.
+ $ref: /schemas/types.yaml#/definitions/uint32-matrix
+ items:
+ items:
+ - description: |
+ "mux_reg" indicates the offset of mux register.
+ - description: |
+ "conf_reg" indicates the offset of pad configuration register.
+ - description: |
+ "input_reg" indicates the offset of select input register.
+ - description: |
+ "mux_val" indicates the mux value to be applied.
+ - description: |
+ "input_val" indicates the select input value to be applied.
+ - description: |
+ "pad_setting" indicates the pad configuration value to be applied.
+
+ required:
+ - fsl,pins
+
+ additionalProperties: false
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ iomuxc: iomuxc@401f8000 {
+ compatible = "fsl,imxrt1050-iomuxc";
+ reg = <0x401f8000 0x4000>;
+
+ pinctrl_lpuart1: lpuart1grp {
+ fsl,pins =
+ <0x0EC 0x2DC 0x000 0x2 0x0 0xf1>,
+ <0x0F0 0x2E0 0x000 0x2 0x0 0xf1>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/pinctrl/ingenic,pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/ingenic,pinctrl.yaml
index a12d0ceb7637..c2c370448b81 100644
--- a/Documentation/devicetree/bindings/pinctrl/ingenic,pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/ingenic,pinctrl.yaml
@@ -28,9 +28,6 @@ maintainers:
- Paul Cercueil <paul@crapouillou.net>
properties:
- nodename:
- pattern: "^pinctrl@[0-9a-f]+$"
-
compatible:
oneOf:
- enum:
@@ -121,6 +118,9 @@ patternProperties:
additionalProperties: false
+allOf:
+ - $ref: "pinctrl.yaml#"
+
required:
- compatible
- reg
@@ -169,7 +169,7 @@ additionalProperties:
examples:
- |
- pin-controller@10010000 {
+ pinctrl@10010000 {
compatible = "ingenic,jz4770-pinctrl";
reg = <0x10010000 0x600>;
diff --git a/Documentation/devicetree/bindings/pinctrl/intel,lgm-io.yaml b/Documentation/devicetree/bindings/pinctrl/intel,lgm-io.yaml
index 2c0acb405e6c..b42548350188 100644
--- a/Documentation/devicetree/bindings/pinctrl/intel,lgm-io.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/intel,lgm-io.yaml
@@ -47,6 +47,9 @@ patternProperties:
additionalProperties: false
+allOf:
+ - $ref: "pinctrl.yaml#"
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/pinctrl/intel,pinctrl-thunderbay.yaml b/Documentation/devicetree/bindings/pinctrl/intel,pinctrl-thunderbay.yaml
new file mode 100644
index 000000000000..0ec476248f21
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/intel,pinctrl-thunderbay.yaml
@@ -0,0 +1,119 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pinctrl/intel,pinctrl-thunderbay.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Intel Thunder Bay pin controller Device Tree Bindings
+
+maintainers:
+ - Lakshmi Sowjanya D <lakshmi.sowjanya.d@intel.com>
+
+description: |
+ Intel Thunder Bay SoC integrates a pin controller which enables control
+ of pin directions, input/output values and configuration
+ for a total of 67 pins.
+
+properties:
+ compatible:
+ const: intel,thunderbay-pinctrl
+
+ reg:
+ maxItems: 1
+
+ gpio-controller: true
+
+ '#gpio-cells':
+ const: 2
+
+ gpio-ranges:
+ maxItems: 1
+
+ interrupts:
+ description:
+ Specifies the interrupt lines to be used by the controller.
+ maxItems: 2
+
+ interrupt-controller: true
+
+ '#interrupt-cells':
+ const: 2
+
+patternProperties:
+ '^gpio@[0-9a-f]*$':
+ type: object
+
+ description:
+ Child nodes can be specified to contain pin configuration information,
+ which can then be utilized by pinctrl client devices.
+ The following properties are supported.
+
+ properties:
+ pins:
+ description: |
+ The name(s) of the pins to be configured in the child node.
+ Supported pin names are "GPIO0" up to "GPIO66".
+
+ bias-disable: true
+
+ bias-pull-down: true
+
+ bias-pull-up: true
+
+ drive-strength:
+ description: Drive strength for the pad.
+ enum: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
+
+ bias-bus-hold:
+ type: boolean
+
+ input-schmitt-enable:
+ type: boolean
+
+ slew-rate:
+ description: GPIO slew rate control.
+ 0 - Slow
+ 1 - Fast
+ enum: [0, 1]
+
+additionalProperties: false
+
+required:
+ - compatible
+ - reg
+ - gpio-controller
+ - '#gpio-cells'
+ - gpio-ranges
+ - interrupts
+ - interrupt-controller
+ - '#interrupt-cells'
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+ // Example 1
+ pinctrl0: gpio@0 {
+ compatible = "intel,thunderbay-pinctrl";
+ reg = <0x600b0000 0x88>;
+ gpio-controller;
+ #gpio-cells = <0x2>;
+ gpio-ranges = <&pinctrl0 0 0 67>;
+ interrupts = <GIC_SPI 94 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ // Example 2
+ pinctrl1: gpio@1 {
+ compatible = "intel,thunderbay-pinctrl";
+ reg = <0x600c0000 0x88>;
+ gpio-controller;
+ #gpio-cells = <0x2>;
+ gpio-ranges = <&pinctrl1 0 0 53>;
+ interrupts = <GIC_SPI 94 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
diff --git a/Documentation/devicetree/bindings/pinctrl/mediatek,mt65xx-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/mediatek,mt65xx-pinctrl.yaml
index f8e6e138dc13..6953c958ff7c 100644
--- a/Documentation/devicetree/bindings/pinctrl/mediatek,mt65xx-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/mediatek,mt65xx-pinctrl.yaml
@@ -64,6 +64,9 @@ required:
- gpio-controller
- "#gpio-cells"
+allOf:
+ - $ref: "pinctrl.yaml#"
+
patternProperties:
'-[0-9]+$':
type: object
diff --git a/Documentation/devicetree/bindings/pinctrl/mediatek,mt6779-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/mediatek,mt6779-pinctrl.yaml
index 152c151c27ad..e7601c0f5a69 100644
--- a/Documentation/devicetree/bindings/pinctrl/mediatek,mt6779-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/mediatek,mt6779-pinctrl.yaml
@@ -59,6 +59,9 @@ properties:
"#interrupt-cells":
const: 2
+allOf:
+ - $ref: "pinctrl.yaml#"
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/pinctrl/mediatek,mt6797-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/mediatek,mt6797-pinctrl.yaml
index 76a6df75ed9c..d42215f59afd 100644
--- a/Documentation/devicetree/bindings/pinctrl/mediatek,mt6797-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/mediatek,mt6797-pinctrl.yaml
@@ -45,6 +45,9 @@ properties:
"#interrupt-cells":
const: 2
+allOf:
+ - $ref: "pinctrl.yaml#"
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/pinctrl/mediatek,mt7622-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/mediatek,mt7622-pinctrl.yaml
index 0feecd376c69..78a0175cecc7 100644
--- a/Documentation/devicetree/bindings/pinctrl/mediatek,mt7622-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/mediatek,mt7622-pinctrl.yaml
@@ -42,6 +42,9 @@ properties:
"#interrupt-cells":
const: 2
+allOf:
+ - $ref: "pinctrl.yaml#"
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/pinctrl/mediatek,mt7986-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/mediatek,mt7986-pinctrl.yaml
index 7602b11e8bce..4eadea55df10 100644
--- a/Documentation/devicetree/bindings/pinctrl/mediatek,mt7986-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/mediatek,mt7986-pinctrl.yaml
@@ -56,6 +56,9 @@ properties:
"#interrupt-cells":
const: 2
+allOf:
+ - $ref: "pinctrl.yaml#"
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/pinctrl/mediatek,mt8183-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/mediatek,mt8183-pinctrl.yaml
index cc1509e9b981..2d13a57b8961 100644
--- a/Documentation/devicetree/bindings/pinctrl/mediatek,mt8183-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/mediatek,mt8183-pinctrl.yaml
@@ -56,6 +56,9 @@ properties:
"#interrupt-cells":
const: 2
+allOf:
+ - $ref: "pinctrl.yaml#"
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/pinctrl/mscc,ocelot-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/mscc,ocelot-pinctrl.txt
index db99bd95d423..3bb76487669f 100644
--- a/Documentation/devicetree/bindings/pinctrl/mscc,ocelot-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/mscc,ocelot-pinctrl.txt
@@ -4,7 +4,8 @@ Microsemi Ocelot pin controller Device Tree Bindings
Required properties:
- compatible : Should be "mscc,ocelot-pinctrl",
"mscc,jaguar2-pinctrl", "microchip,sparx5-pinctrl",
- "mscc,luton-pinctrl" or "mscc,serval-pinctrl"
+ "mscc,luton-pinctrl", "mscc,serval-pinctrl" or
+ "microchip,lan966x-pinctrl"
- reg : Address and length of the register set for the device
- gpio-controller : Indicates this device is a GPIO controller
- #gpio-cells : Must be 2.
diff --git a/Documentation/devicetree/bindings/pinctrl/pincfg-node.yaml b/Documentation/devicetree/bindings/pinctrl/pincfg-node.yaml
index 71ed0a9def84..4b22a9e3a447 100644
--- a/Documentation/devicetree/bindings/pinctrl/pincfg-node.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/pincfg-node.yaml
@@ -114,6 +114,9 @@ properties:
description: enable output on a pin without actively driving it
(such as enabling an output buffer)
+ output-impedance-ohms:
+ description: set the output impedance of a pin to at most X ohms
+
output-low:
type: boolean
description: set the pin to output mode with low level
diff --git a/Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt b/Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
index 9dae60acf950..6904072d3944 100644
--- a/Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
+++ b/Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
@@ -97,47 +97,8 @@ For example:
};
== Pin controller devices ==
-Required properties: See the pin controller driver specific documentation
-Optional properties:
-#pinctrl-cells: Number of pin control cells in addition to the index within the
- pin controller device instance
-
-pinctrl-use-default: Boolean. Indicates that the OS can use the boot default
- pin configuration. This allows using an OS that does not have a
- driver for the pin controller. This property can be set either
- globally for the pin controller or in child nodes for individual
- pin group control.
-
-Pin controller devices should contain the pin configuration nodes that client
-devices reference.
-
-For example:
-
- pincontroller {
- ... /* Standard DT properties for the device itself elided */
-
- state_0_node_a {
- ...
- };
- state_1_node_a {
- ...
- };
- state_1_node_b {
- ...
- };
- }
-
-The contents of each of those pin configuration child nodes is defined
-entirely by the binding for the individual pin controller device. There
-exists no common standard for this content. The pinctrl framework only
-provides generic helper bindings that the pin controller driver can use.
-
-The pin configuration nodes need not be direct children of the pin controller
-device; they may be grandchildren, for example. Whether this is legal, and
-whether there is any interaction between the child and intermediate parent
-nodes, is again defined entirely by the binding for the individual pin
-controller device.
+See pinctrl.yaml
== Generic pin multiplexing node content ==
diff --git a/Documentation/devicetree/bindings/pinctrl/pinctrl-mt8192.yaml b/Documentation/devicetree/bindings/pinctrl/pinctrl-mt8192.yaml
index c4c071211611..3c84676a167d 100644
--- a/Documentation/devicetree/bindings/pinctrl/pinctrl-mt8192.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/pinctrl-mt8192.yaml
@@ -108,6 +108,9 @@ patternProperties:
additionalProperties: false
+allOf:
+ - $ref: "pinctrl.yaml#"
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/pinctrl/pinctrl-mt8195.yaml b/Documentation/devicetree/bindings/pinctrl/pinctrl-mt8195.yaml
index 5e2bb2bf3a55..328ea59c5466 100644
--- a/Documentation/devicetree/bindings/pinctrl/pinctrl-mt8195.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/pinctrl-mt8195.yaml
@@ -55,137 +55,162 @@ properties:
Identifying i2c pins pull up/down type which is RSEL. It can support
RSEL define or si unit value(ohm) to set different resistance.
-#PIN CONFIGURATION NODES
+# PIN CONFIGURATION NODES
patternProperties:
'-pins$':
type: object
- description: |
- A pinctrl node should contain at least one subnodes representing the
- pinctrl groups available on the machine. Each subnode will list the
- pins it needs, and how they should be configured, with regard to muxer
- configuration, pullups, drive strength, input enable/disable and
- input schmitt.
- An example of using macro:
- pincontroller {
- /* GPIO0 set as multifunction GPIO0 */
- gpio_pin {
- pinmux = <PINMUX_GPIO0__FUNC_GPIO0>;
- };
- /* GPIO8 set as multifunction SDA0 */
- i2c0_pin {
- pinmux = <PINMUX_GPIO8__FUNC_SDA0>;
- };
- };
- $ref: "pinmux-node.yaml"
-
- properties:
- pinmux:
- description: |
- Integer array, represents gpio pin number and mux setting.
- Supported pin number and mux varies for different SoCs, and are defined
- as macros in dt-bindings/pinctrl/<soc>-pinfunc.h directly.
-
- drive-strength:
- enum: [2, 4, 6, 8, 10, 12, 14, 16]
-
- bias-pull-down:
- description: |
- For pull down type is normal, it don't need add RSEL & R1R0 define
- and resistance value.
- For pull down type is PUPD/R0/R1 type, it can add R1R0 define to
- set different resistance. It can support "MTK_PUPD_SET_R1R0_00" &
- "MTK_PUPD_SET_R1R0_01" & "MTK_PUPD_SET_R1R0_10" & "MTK_PUPD_SET_R1R0_11"
- define in mt8195.
- For pull down type is RSEL, it can add RSEL define & resistance value(ohm)
- to set different resistance by identifying property "mediatek,rsel_resistance_in_si_unit".
- It can support "MTK_PULL_SET_RSEL_000" & "MTK_PULL_SET_RSEL_001"
- & "MTK_PULL_SET_RSEL_010" & "MTK_PULL_SET_RSEL_011" & "MTK_PULL_SET_RSEL_100"
- & "MTK_PULL_SET_RSEL_101" & "MTK_PULL_SET_RSEL_110" & "MTK_PULL_SET_RSEL_111"
- define in mt8195. It can also support resistance value(ohm) "75000" & "5000" in mt8195.
- oneOf:
- - enum: [100, 101, 102, 103]
- - description: mt8195 pull down PUPD/R0/R1 type define value.
- - enum: [200, 201, 202, 203, 204, 205, 206, 207]
- - description: mt8195 pull down RSEL type define value.
- - enum: [75000, 5000]
- - description: mt8195 pull down RSEL type si unit value(ohm).
-
- An example of using RSEL define:
- pincontroller {
- i2c0_pin {
- pinmux = <PINMUX_GPIO8__FUNC_SDA0>;
- bias-pull-down = <MTK_PULL_SET_RSEL_001>;
- };
- };
- An example of using si unit resistance value(ohm):
- &pio {
- mediatek,rsel_resistance_in_si_unit;
- }
- pincontroller {
- i2c0_pin {
- pinmux = <PINMUX_GPIO8__FUNC_SDA0>;
- bias-pull-down = <75000>;
- };
- };
-
- bias-pull-up:
+ additionalProperties: false
+ patternProperties:
+ '^pins':
+ type: object
+ additionalProperties: false
description: |
- For pull up type is normal, it don't need add RSEL & R1R0 define
- and resistance value.
- For pull up type is PUPD/R0/R1 type, it can add R1R0 define to
- set different resistance. It can support "MTK_PUPD_SET_R1R0_00" &
- "MTK_PUPD_SET_R1R0_01" & "MTK_PUPD_SET_R1R0_10" & "MTK_PUPD_SET_R1R0_11"
- define in mt8195.
- For pull up type is RSEL, it can add RSEL define & resistance value(ohm)
- to set different resistance by identifying property "mediatek,rsel_resistance_in_si_unit".
- It can support "MTK_PULL_SET_RSEL_000" & "MTK_PULL_SET_RSEL_001"
- & "MTK_PULL_SET_RSEL_010" & "MTK_PULL_SET_RSEL_011" & "MTK_PULL_SET_RSEL_100"
- & "MTK_PULL_SET_RSEL_101" & "MTK_PULL_SET_RSEL_110" & "MTK_PULL_SET_RSEL_111"
- define in mt8195. It can also support resistance value(ohm)
- "1000" & "1500" & "2000" & "3000" & "4000" & "5000" & "10000" & "75000" in mt8195.
- oneOf:
- - enum: [100, 101, 102, 103]
- - description: mt8195 pull up PUPD/R0/R1 type define value.
- - enum: [200, 201, 202, 203, 204, 205, 206, 207]
- - description: mt8195 pull up RSEL type define value.
- - enum: [1000, 1500, 2000, 3000, 4000, 5000, 10000, 75000]
- - description: mt8195 pull up RSEL type si unit value(ohm).
- An example of using RSEL define:
+ A pinctrl node should contain at least one subnodes representing the
+ pinctrl groups available on the machine. Each subnode will list the
+ pins it needs, and how they should be configured, with regard to muxer
+ configuration, pullups, drive strength, input enable/disable and
+ input schmitt.
+ An example of using macro:
pincontroller {
- i2c0_pin {
- pinmux = <PINMUX_GPIO8__FUNC_SDA0>;
- bias-pull-up = <MTK_PULL_SET_RSEL_001>;
+ /* GPIO0 set as multifunction GPIO0 */
+ gpio-pins {
+ pins {
+ pinmux = <PINMUX_GPIO0__FUNC_GPIO0>;
+ }
};
- };
- An example of using si unit resistance value(ohm):
- &pio {
- mediatek,rsel_resistance_in_si_unit;
- }
- pincontroller {
- i2c0_pin {
- pinmux = <PINMUX_GPIO8__FUNC_SDA0>;
- bias-pull-up = <1000>;
+ /* GPIO8 set as multifunction SDA0 */
+ i2c0-pins {
+ pins {
+ pinmux = <PINMUX_GPIO8__FUNC_SDA0>;
+ }
};
};
-
- bias-disable: true
-
- output-high: true
-
- output-low: true
-
- input-enable: true
-
- input-disable: true
-
- input-schmitt-enable: true
-
- input-schmitt-disable: true
-
- required:
- - pinmux
-
- additionalProperties: false
+ $ref: "pinmux-node.yaml"
+
+ properties:
+ pinmux:
+ description: |
+ Integer array, represents gpio pin number and mux setting.
+ Supported pin number and mux varies for different SoCs, and are
+ defined as macros in dt-bindings/pinctrl/<soc>-pinfunc.h
+ directly.
+
+ drive-strength:
+ enum: [2, 4, 6, 8, 10, 12, 14, 16]
+
+ bias-pull-down:
+ description: |
+ For pull down type is normal, it don't need add RSEL & R1R0 define
+ and resistance value.
+ For pull down type is PUPD/R0/R1 type, it can add R1R0 define to
+ set different resistance. It can support "MTK_PUPD_SET_R1R0_00" &
+ "MTK_PUPD_SET_R1R0_01" & "MTK_PUPD_SET_R1R0_10" &
+ "MTK_PUPD_SET_R1R0_11" define in mt8195.
+ For pull down type is RSEL, it can add RSEL define & resistance
+ value(ohm) to set different resistance by identifying property
+ "mediatek,rsel_resistance_in_si_unit".
+ It can support "MTK_PULL_SET_RSEL_000" & "MTK_PULL_SET_RSEL_001"
+ & "MTK_PULL_SET_RSEL_010" & "MTK_PULL_SET_RSEL_011"
+ & "MTK_PULL_SET_RSEL_100" & "MTK_PULL_SET_RSEL_101"
+ & "MTK_PULL_SET_RSEL_110" & "MTK_PULL_SET_RSEL_111"
+ define in mt8195. It can also support resistance value(ohm)
+ "75000" & "5000" in mt8195.
+ oneOf:
+ - enum: [100, 101, 102, 103]
+ - description: mt8195 pull down PUPD/R0/R1 type define value.
+ - enum: [200, 201, 202, 203, 204, 205, 206, 207]
+ - description: mt8195 pull down RSEL type define value.
+ - enum: [75000, 5000]
+ - description: mt8195 pull down RSEL type si unit value(ohm).
+
+ An example of using RSEL define:
+ pincontroller {
+ i2c0_pin {
+ pins {
+ pinmux = <PINMUX_GPIO8__FUNC_SDA0>;
+ bias-pull-down = <MTK_PULL_SET_RSEL_001>;
+ }
+ };
+ };
+ An example of using si unit resistance value(ohm):
+ &pio {
+ mediatek,rsel_resistance_in_si_unit;
+ }
+ pincontroller {
+ i2c0_pin {
+ pins {
+ pinmux = <PINMUX_GPIO8__FUNC_SDA0>;
+ bias-pull-down = <75000>;
+ }
+ };
+ };
+
+ bias-pull-up:
+ description: |
+ For pull up type is normal, it don't need add RSEL & R1R0 define
+ and resistance value.
+ For pull up type is PUPD/R0/R1 type, it can add R1R0 define to
+ set different resistance. It can support "MTK_PUPD_SET_R1R0_00" &
+ "MTK_PUPD_SET_R1R0_01" & "MTK_PUPD_SET_R1R0_10" &
+ "MTK_PUPD_SET_R1R0_11" define in mt8195.
+ For pull up type is RSEL, it can add RSEL define & resistance
+ value(ohm) to set different resistance by identifying property
+ "mediatek,rsel_resistance_in_si_unit".
+ It can support "MTK_PULL_SET_RSEL_000" & "MTK_PULL_SET_RSEL_001"
+ & "MTK_PULL_SET_RSEL_010" & "MTK_PULL_SET_RSEL_011"
+ & "MTK_PULL_SET_RSEL_100" & "MTK_PULL_SET_RSEL_101"
+ & "MTK_PULL_SET_RSEL_110" & "MTK_PULL_SET_RSEL_111"
+ define in mt8195. It can also support resistance value(ohm)
+ "1000" & "1500" & "2000" & "3000" & "4000" & "5000" & "10000" &
+ "75000" in mt8195.
+ oneOf:
+ - enum: [100, 101, 102, 103]
+ - description: mt8195 pull up PUPD/R0/R1 type define value.
+ - enum: [200, 201, 202, 203, 204, 205, 206, 207]
+ - description: mt8195 pull up RSEL type define value.
+ - enum: [1000, 1500, 2000, 3000, 4000, 5000, 10000, 75000]
+ - description: mt8195 pull up RSEL type si unit value(ohm).
+ An example of using RSEL define:
+ pincontroller {
+ i2c0-pins {
+ pins {
+ pinmux = <PINMUX_GPIO8__FUNC_SDA0>;
+ bias-pull-up = <MTK_PULL_SET_RSEL_001>;
+ }
+ };
+ };
+ An example of using si unit resistance value(ohm):
+ &pio {
+ mediatek,rsel_resistance_in_si_unit;
+ }
+ pincontroller {
+ i2c0-pins {
+ pins {
+ pinmux = <PINMUX_GPIO8__FUNC_SDA0>;
+ bias-pull-up = <1000>;
+ }
+ };
+ };
+
+ bias-disable: true
+
+ output-high: true
+
+ output-low: true
+
+ input-enable: true
+
+ input-disable: true
+
+ input-schmitt-enable: true
+
+ input-schmitt-disable: true
+
+ required:
+ - pinmux
+
+allOf:
+ - $ref: "pinctrl.yaml#"
required:
- compatible
@@ -201,30 +226,46 @@ additionalProperties: false
examples:
- |
- #include <dt-bindings/pinctrl/mt8195-pinfunc.h>
- #include <dt-bindings/interrupt-controller/arm-gic.h>
- pio: pinctrl@10005000 {
- compatible = "mediatek,mt8195-pinctrl";
- reg = <0x10005000 0x1000>,
- <0x11d10000 0x1000>,
- <0x11d30000 0x1000>,
- <0x11d40000 0x1000>,
- <0x11e20000 0x1000>,
- <0x11eb0000 0x1000>,
- <0x11f40000 0x1000>,
- <0x1000b000 0x1000>;
- reg-names = "iocfg0", "iocfg_bm", "iocfg_bl",
- "iocfg_br", "iocfg_lm", "iocfg_rb",
- "iocfg_tl", "eint";
- gpio-controller;
- #gpio-cells = <2>;
- gpio-ranges = <&pio 0 0 144>;
- interrupt-controller;
- interrupts = <GIC_SPI 225 IRQ_TYPE_LEVEL_HIGH 0>;
- #interrupt-cells = <2>;
-
- pio-pins {
- pinmux = <PINMUX_GPIO0__FUNC_GPIO0>;
- output-low;
- };
- };
+ #include <dt-bindings/pinctrl/mt8195-pinfunc.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #
+ pio: pinctrl@10005000 {
+ compatible = "mediatek,mt8195-pinctrl";
+ reg = <0x10005000 0x1000>,
+ <0x11d10000 0x1000>,
+ <0x11d30000 0x1000>,
+ <0x11d40000 0x1000>,
+ <0x11e20000 0x1000>,
+ <0x11eb0000 0x1000>,
+ <0x11f40000 0x1000>,
+ <0x1000b000 0x1000>;
+ reg-names = "iocfg0", "iocfg_bm", "iocfg_bl",
+ "iocfg_br", "iocfg_lm", "iocfg_rb",
+ "iocfg_tl", "eint";
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&pio 0 0 144>;
+ interrupt-controller;
+ interrupts = <GIC_SPI 225 IRQ_TYPE_LEVEL_HIGH 0>;
+ #interrupt-cells = <2>;
+
+ pio-pins {
+ pins {
+ pinmux = <PINMUX_GPIO0__FUNC_GPIO0>;
+ output-low;
+ };
+ };
+
+ spi0-pins {
+ pins-spi {
+ pinmux = <PINMUX_GPIO132__FUNC_SPIM0_CSB>,
+ <PINMUX_GPIO134__FUNC_SPIM0_MO>,
+ <PINMUX_GPIO133__FUNC_SPIM0_CLK>;
+ bias-disable;
+ };
+ pins-spi-mi {
+ pinmux = <PINMUX_GPIO135__FUNC_SPIM0_MI>;
+ bias-pull-down;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/pinctrl/pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/pinctrl.yaml
new file mode 100644
index 000000000000..d471563119a9
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/pinctrl.yaml
@@ -0,0 +1,45 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pinctrl/pinctrl.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Pin controller device
+
+maintainers:
+ - Linus Walleij <linus.walleij@linaro.org>
+ - Rafał Miłecki <rafal@milecki.pl>
+
+description: |
+ Pin controller devices should contain the pin configuration nodes that client
+ devices reference.
+
+ The contents of each of those pin configuration child nodes is defined
+ entirely by the binding for the individual pin controller device. There
+ exists no common standard for this content. The pinctrl framework only
+ provides generic helper bindings that the pin controller driver can use.
+
+ The pin configuration nodes need not be direct children of the pin controller
+ device; they may be grandchildren, for example. Whether this is legal, and
+ whether there is any interaction between the child and intermediate parent
+ nodes, is again defined entirely by the binding for the individual pin
+ controller device.
+
+properties:
+ $nodename:
+ pattern: "^(pinctrl|pinmux)(@[0-9a-f]+)?$"
+
+ "#pinctrl-cells":
+ description: >
+ Number of pin control cells in addition to the index within the pin
+ controller device instance.
+
+ pinctrl-use-default:
+ type: boolean
+ description: >
+ Indicates that the OS can use the boot default pin configuration. This
+ allows using an OS that does not have a driver for the pin controller.
+ This property can be set either globally for the pin controller or in
+ child nodes for individual pin group control.
+
+additionalProperties: true
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,ipq6018-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/qcom,ipq6018-pinctrl.yaml
index c64c93206817..b83c7f476e19 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,ipq6018-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,ipq6018-pinctrl.yaml
@@ -118,6 +118,9 @@ patternProperties:
additionalProperties: false
+allOf:
+ - $ref: "pinctrl.yaml#"
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,lpass-lpi-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/qcom,lpass-lpi-pinctrl.yaml
index e47ebf934daf..5c5542f1627c 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,lpass-lpi-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,lpass-lpi-pinctrl.yaml
@@ -103,6 +103,9 @@ patternProperties:
additionalProperties: false
+allOf:
+ - $ref: "pinctrl.yaml#"
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,mdm9607-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/qcom,mdm9607-pinctrl.yaml
index 3b02dc6626ed..f7bd4be1739e 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,mdm9607-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,mdm9607-pinctrl.yaml
@@ -14,6 +14,7 @@ description: |
MDM9607 platform.
allOf:
+ - $ref: "pinctrl.yaml#"
- $ref: /schemas/pinctrl/qcom,tlmm-common.yaml#
properties:
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,msm8226-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/qcom,msm8226-pinctrl.yaml
index 040d2ada3669..ab4a2b4cfda2 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,msm8226-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,msm8226-pinctrl.yaml
@@ -97,6 +97,9 @@ patternProperties:
additionalProperties: false
+allOf:
+ - $ref: "pinctrl.yaml#"
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,msm8953-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/qcom,msm8953-pinctrl.yaml
index abe9f4c9b1e3..64c0a41ca0c3 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,msm8953-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,msm8953-pinctrl.yaml
@@ -133,6 +133,9 @@ patternProperties:
additionalProperties: false
+allOf:
+ - $ref: "pinctrl.yaml#"
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.yaml b/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.yaml
index 8952b4cc1262..9400b665a46f 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.yaml
@@ -17,6 +17,7 @@ properties:
compatible:
items:
- enum:
+ - qcom,pm2250-gpio
- qcom,pm660-gpio
- qcom,pm660l-gpio
- qcom,pm6150-gpio
@@ -26,10 +27,12 @@ properties:
- qcom,pm8005-gpio
- qcom,pm8008-gpio
- qcom,pm8018-gpio
+ - qcom,pm8019-gpio
- qcom,pm8038-gpio
- qcom,pm8058-gpio
- qcom,pm8150-gpio
- qcom,pm8150b-gpio
+ - qcom,pm8226-gpio
- qcom,pm8350-gpio
- qcom,pm8350b-gpio
- qcom,pm8350c-gpio
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,qcm2290-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/qcom,qcm2290-pinctrl.yaml
index 13f338619d77..206f4f238736 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,qcm2290-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,qcm2290-pinctrl.yaml
@@ -118,6 +118,9 @@ patternProperties:
additionalProperties: false
+allOf:
+ - $ref: "pinctrl.yaml#"
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,sc7280-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/qcom,sc7280-pinctrl.yaml
index 7d6a2ab10eec..6c7c3f6a140e 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,sc7280-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,sc7280-pinctrl.yaml
@@ -123,6 +123,9 @@ patternProperties:
additionalProperties: false
+allOf:
+ - $ref: "pinctrl.yaml#"
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,sc8180x-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/qcom,sc8180x-pinctrl.yaml
index a82dab898395..86509172603d 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,sc8180x-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,sc8180x-pinctrl.yaml
@@ -14,6 +14,7 @@ description: |
SC8180X platform.
allOf:
+ - $ref: "pinctrl.yaml#"
- $ref: /schemas/pinctrl/qcom,tlmm-common.yaml#
properties:
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,sdx55-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/qcom,sdx55-pinctrl.yaml
index 112dd59ce7ed..a38090b14aab 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,sdx55-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,sdx55-pinctrl.yaml
@@ -118,6 +118,9 @@ patternProperties:
additionalProperties: false
+allOf:
+ - $ref: "pinctrl.yaml#"
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,sdx65-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/qcom,sdx65-pinctrl.yaml
new file mode 100644
index 000000000000..cdfcf29dffee
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,sdx65-pinctrl.yaml
@@ -0,0 +1,191 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pinctrl/qcom,sdx65-pinctrl.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Technologies, Inc. SDX65 TLMM block
+
+maintainers:
+ - Vamsi krishna Lanka <quic_vamslank@quicinc.com>
+
+description:
+ This binding describes the Top Level Mode Multiplexer block found in the
+ SDX65 platform.
+
+properties:
+ compatible:
+ const: qcom,sdx65-tlmm
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ interrupt-controller: true
+
+ '#interrupt-cells':
+ description: Specifies the PIN numbers and Flags, as defined in
+ include/dt-bindings/interrupt-controller/irq.h
+ const: 2
+
+ gpio-controller: true
+
+ '#gpio-cells':
+ description: Specifying the pin number and flags, as defined in
+ include/dt-bindings/gpio/gpio.h
+ const: 2
+
+ gpio-ranges:
+ maxItems: 1
+
+ gpio-reserved-ranges:
+ maxItems: 1
+
+#PIN CONFIGURATION NODES
+patternProperties:
+ '-state$':
+ oneOf:
+ - $ref: "#/$defs/qcom-sdx65-tlmm-state"
+ - patternProperties:
+ ".*":
+ $ref: "#/$defs/qcom-sdx65-tlmm-state"
+'$defs':
+ qcom-sdx65-tlmm-state:
+ type: object
+ description:
+ Pinctrl node's client devices use subnodes for desired pin configuration.
+ Client device subnodes use below standard properties.
+ $ref: "qcom,tlmm-common.yaml#/$defs/qcom-tlmm-state"
+
+ properties:
+ pins:
+ description:
+ List of gpio pins affected by the properties specified in this subnode.
+ items:
+ oneOf:
+ - pattern: "^gpio([0-9]|[1-9][0-9]|10[0-7])$"
+ - enum: [ ufs_reset, sdc1_clk, sdc1_cmd, sdc1_data, sdc2_clk, sdc2_cmd, sdc2_data, sdc1_rclk ]
+ minItems: 1
+ maxItems: 150
+
+ function:
+ description:
+ Specify the alternative function to be configured for the specified
+ pins. Functions are only valid for gpio pins.
+ enum: [ blsp_uart1, blsp_spi1, blsp_i2c1, blsp_uim1, atest_tsens,
+ bimc_dte1, dac_calib0, blsp_spi8, blsp_uart8, blsp_uim8,
+ qdss_cti_trig_out_b, bimc_dte0, dac_calib1, qdss_cti_trig_in_b,
+ dac_calib2, atest_tsens2, atest_usb1, blsp_spi10, blsp_uart10,
+ blsp_uim10, atest_bbrx1, atest_usb13, atest_bbrx0, atest_usb12,
+ mdp_vsync, edp_lcd, blsp_i2c10, atest_gpsadc1, atest_usb11,
+ atest_gpsadc0, edp_hot, atest_usb10, m_voc, dac_gpio, atest_char,
+ cam_mclk, pll_bypassnl, qdss_stm7, blsp_i2c8, qdss_tracedata_b,
+ pll_reset, qdss_stm6, qdss_stm5, qdss_stm4, atest_usb2, cci_i2c,
+ qdss_stm3, dac_calib3, atest_usb23, atest_char3, dac_calib4,
+ qdss_stm2, atest_usb22, atest_char2, qdss_stm1, dac_calib5,
+ atest_usb21, atest_char1, dbg_out, qdss_stm0, dac_calib6,
+ atest_usb20, atest_char0, dac_calib10, qdss_stm10,
+ qdss_cti_trig_in_a, cci_timer4, blsp_spi6, blsp_uart6, blsp_uim6,
+ blsp2_spi, qdss_stm9, qdss_cti_trig_out_a, dac_calib11,
+ qdss_stm8, cci_timer0, qdss_stm13, dac_calib7, cci_timer1,
+ qdss_stm12, dac_calib8, cci_timer2, blsp1_spi, qdss_stm11,
+ dac_calib9, cci_timer3, cci_async, dac_calib12, blsp_i2c6,
+ qdss_tracectl_a, dac_calib13, qdss_traceclk_a, dac_calib14,
+ dac_calib15, hdmi_rcv, dac_calib16, hdmi_cec, pwr_modem,
+ dac_calib17, hdmi_ddc, pwr_nav, dac_calib18, pwr_crypto,
+ dac_calib19, hdmi_hot, dac_calib20, dac_calib21, pci_e0,
+ dac_calib22, dac_calib23, dac_calib24, tsif1_sync, dac_calib25,
+ sd_write, tsif1_error, blsp_spi2, blsp_uart2, blsp_uim2,
+ qdss_cti, blsp_i2c2, blsp_spi3, blsp_uart3, blsp_uim3, blsp_i2c3,
+ uim3, blsp_spi9, blsp_uart9, blsp_uim9, blsp10_spi, blsp_i2c9,
+ blsp_spi7, blsp_uart7, blsp_uim7, qdss_tracedata_a, blsp_i2c7,
+ qua_mi2s, gcc_gp1_clk_a, ssc_irq, uim4, blsp_spi11, blsp_uart11,
+ blsp_uim11, gcc_gp2_clk_a, gcc_gp3_clk_a, blsp_i2c11, cri_trng0,
+ cri_trng1, cri_trng, qdss_stm18, pri_mi2s, qdss_stm17, blsp_spi4,
+ blsp_uart4, blsp_uim4, qdss_stm16, qdss_stm15, blsp_i2c4,
+ qdss_stm14, dac_calib26, spkr_i2s, audio_ref, lpass_slimbus,
+ isense_dbg, tsense_pwm1, tsense_pwm2, btfm_slimbus, ter_mi2s,
+ qdss_stm22, qdss_stm21, qdss_stm20, qdss_stm19, gcc_gp1_clk_b,
+ sec_mi2s, blsp_spi5, blsp_uart5, blsp_uim5, gcc_gp2_clk_b,
+ gcc_gp3_clk_b, blsp_i2c5, blsp_spi12, blsp_uart12, blsp_uim12,
+ qdss_stm25, qdss_stm31, blsp_i2c12, qdss_stm30, qdss_stm29,
+ tsif1_clk, qdss_stm28, tsif1_en, tsif1_data, sdc4_cmd, qdss_stm27,
+ qdss_traceclk_b, tsif2_error, sdc43, vfr_1, qdss_stm26, tsif2_clk,
+ sdc4_clk, qdss_stm24, tsif2_en, sdc42, qdss_stm23, qdss_tracectl_b,
+ sd_card, tsif2_data, sdc41, tsif2_sync, sdc40, mdp_vsync_p_b,
+ ldo_en, mdp_vsync_s_b, ldo_update, blsp11_uart_tx_b, blsp11_uart_rx_b,
+ blsp11_i2c_sda_b, prng_rosc, blsp11_i2c_scl_b, uim2, uim1, uim_batt,
+ pci_e2, pa_indicator, adsp_ext, ddr_bist, qdss_tracedata_11,
+ qdss_tracedata_12, modem_tsync, nav_dr, nav_pps, pci_e1, gsm_tx,
+ qspi_cs, ssbi2, ssbi1, mss_lte, qspi_clk, qspi0, qspi1, qspi2, qspi3,
+ gpio ]
+
+ drive-strength:
+ enum: [2, 4, 6, 8, 10, 12, 14, 16]
+ default: 2
+ description:
+ Selects the drive strength for the specified pins, in mA.
+
+ bias-pull-down: true
+
+ bias-pull-up: true
+
+ bias-disable: true
+
+ output-high: true
+
+ output-low: true
+
+ required:
+ - pins
+ - function
+
+ additionalProperties: false
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - interrupt-controller
+ - '#interrupt-cells'
+ - gpio-controller
+ - '#gpio-cells'
+ - gpio-ranges
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ tlmm: pinctrl@f100000 {
+ compatible = "qcom,sdx65-tlmm";
+ reg = <0x03000000 0xdc2000>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&tlmm 0 0 109>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ interrupts = <GIC_SPI 212 IRQ_TYPE_LEVEL_HIGH>;
+
+ gpio-wo-subnode-state {
+ pins = "gpio1";
+ function = "gpio";
+ };
+
+ uart-w-subnodes-state {
+ rx {
+ pins = "gpio4";
+ function = "blsp_uart1";
+ bias-pull-up;
+ };
+
+ tx {
+ pins = "gpio5";
+ function = "blsp_uart1";
+ bias-disable;
+ };
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,sm6115-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/qcom,sm6115-pinctrl.yaml
index 8fc06f6a3ef4..cfcde405d30a 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,sm6115-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,sm6115-pinctrl.yaml
@@ -121,6 +121,9 @@ patternProperties:
additionalProperties: false
+allOf:
+ - $ref: "pinctrl.yaml#"
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,sm6125-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/qcom,sm6125-pinctrl.yaml
index 5f7adaa81f83..c8eec845ade9 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,sm6125-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,sm6125-pinctrl.yaml
@@ -13,6 +13,7 @@ description: |
in the SM6125 platform.
allOf:
+ - $ref: "pinctrl.yaml#"
- $ref: /schemas/pinctrl/qcom,tlmm-common.yaml#
properties:
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,sm6350-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/qcom,sm6350-pinctrl.yaml
index 554992a681f3..898608671c4b 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,sm6350-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,sm6350-pinctrl.yaml
@@ -14,6 +14,7 @@ description: |
in the SM6350 platform.
allOf:
+ - $ref: "pinctrl.yaml#"
- $ref: /schemas/pinctrl/qcom,tlmm-common.yaml#
properties:
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,sm8250-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/qcom,sm8250-pinctrl.yaml
index 8508c57522fd..cfa2c50fdb93 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,sm8250-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,sm8250-pinctrl.yaml
@@ -115,6 +115,9 @@ patternProperties:
additionalProperties: false
+allOf:
+ - $ref: "pinctrl.yaml#"
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,sm8350-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/qcom,sm8350-pinctrl.yaml
index 4f2667ea2805..6b7789db2f75 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,sm8350-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,sm8350-pinctrl.yaml
@@ -14,6 +14,7 @@ description: |
in the SM8350 platform.
allOf:
+ - $ref: "pinctrl.yaml#"
- $ref: /schemas/pinctrl/qcom,tlmm-common.yaml#
properties:
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,sm8450-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/qcom,sm8450-pinctrl.yaml
new file mode 100644
index 000000000000..9c891246245b
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,sm8450-pinctrl.yaml
@@ -0,0 +1,143 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pinctrl/qcom,sm8450-pinctrl.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm Technologies, Inc. SM8450 TLMM block
+
+maintainers:
+ - Vinod Koul <vkoul@kernel.org>
+
+description: |
+ This binding describes the Top Level Mode Multiplexer (TLMM) block found
+ in the SM8450 platform.
+
+allOf:
+ - $ref: /schemas/pinctrl/qcom,tlmm-common.yaml#
+
+properties:
+ compatible:
+ const: qcom,sm8450-tlmm
+
+ reg:
+ maxItems: 1
+
+ interrupts: true
+ interrupt-controller: true
+ '#interrupt-cells': true
+ gpio-controller: true
+ gpio-reserved-ranges: true
+ '#gpio-cells': true
+ gpio-ranges: true
+ wakeup-parent: true
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+patternProperties:
+ '-state$':
+ oneOf:
+ - $ref: "#/$defs/qcom-sm8450-tlmm-state"
+ - patternProperties:
+ ".*":
+ $ref: "#/$defs/qcom-sm8450-tlmm-state"
+
+$defs:
+ qcom-sm8450-tlmm-state:
+ type: object
+ description:
+ Pinctrl node's client devices use subnodes for desired pin configuration.
+ Client device subnodes use below standard properties.
+ $ref: "qcom,tlmm-common.yaml#/$defs/qcom-tlmm-state"
+
+ properties:
+ pins:
+ description:
+ List of gpio pins affected by the properties specified in this
+ subnode.
+ items:
+ oneOf:
+ - pattern: "^gpio([0-9]|[1-9][0-9]|1[0-9][0-9]|20[0-9])$"
+ - enum: [ ufs_reset, sdc2_clk, sdc2_cmd, sdc2_data ]
+ minItems: 1
+ maxItems: 36
+
+ function:
+ description:
+ Specify the alternative function to be configured for the specified
+ pins.
+ enum: [ aon_cam, atest_char, atest_usb, audio_ref, cam_mclk, cci_async,
+ cci_i2c, cci_timer, cmu_rng, coex_uart1, coex_uart2, cri_trng,
+ cri_trng0, cri_trng1, dbg_out, ddr_bist, ddr_pxi0, ddr_pxi1,
+ ddr_pxi2, ddr_pxi3, dp_hot, gcc_gp1, gcc_gp2, gcc_gp3,
+ gpio, ibi_i3c, jitter_bist, mdp_vsync, mdp_vsync0, mdp_vsync1,
+ mdp_vsync2, mdp_vsync3, mi2s0_data0, mi2s0_data1, mi2s0_sck,
+ mi2s0_ws, mi2s2_data0, mi2s2_data1, mi2s2_sck, mi2s2_ws,
+ mss_grfc0, mss_grfc1, mss_grfc10, mss_grfc11, mss_grfc12,
+ mss_grfc2, mss_grfc3, mss_grfc4, mss_grfc5, mss_grfc6,
+ mss_grfc7, mss_grfc8, mss_grfc9, nav, pcie0_clkreqn,
+ pcie1_clkreqn, phase_flag, pll_bist, pll_clk, pri_mi2s,
+ prng_rosc, qdss_cti, qdss_gpio, qlink0_enable, qlink0_request,
+ qlink0_wmss, qlink1_enable, qlink1_request, qlink1_wmss,
+ qlink2_enable, qlink2_request, qlink2_wmss, qspi0, qspi1,
+ qspi2, qspi3, qspi_clk, qspi_cs, qup0, qup1, qup10, qup11,
+ qup12, qup13, qup14, qup15, qup16, qup17, qup18, qup19, qup2,
+ qup20, qup21, qup3, qup4, qup5, qup6, qup7, qup8, qup9, qup_l4,
+ qup_l5, qup_l6, sd_write, sdc40, sdc41, sdc42, sdc43, sdc4_clk,
+ sdc4_cmd, sec_mi2s, tb_trig, tgu_ch0, tgu_ch1, tgu_ch2,
+ tgu_ch3, tmess_prng0, tmess_prng1, tmess_prng2, tmess_prng3,
+ tsense_pwm1, tsense_pwm2, uim0_clk, uim0_data, uim0_present,
+ uim0_reset, uim1_clk, uim1_data, uim1_present, uim1_reset,
+ usb2phy_ac, usb_phy, vfr_0, vfr_1, vsense_trigger ]
+
+ bias-disable: true
+ bias-pull-down: true
+ bias-pull-up: true
+ drive-strength: true
+ input-enable: true
+ output-high: true
+ output-low: true
+
+ required:
+ - pins
+ - function
+
+ additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ pinctrl@f100000 {
+ compatible = "qcom,sm8450-tlmm";
+ reg = <0x0f100000 0x300000>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&tlmm 0 0 211>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ interrupts = <GIC_SPI 208 IRQ_TYPE_LEVEL_HIGH>;
+
+ gpio-wo-subnode-state {
+ pins = "gpio1";
+ function = "gpio";
+ };
+
+ uart-w-subnodes-state {
+ rx {
+ pins = "gpio26";
+ function = "qup7";
+ bias-pull-up;
+ };
+
+ tx {
+ pins = "gpio27";
+ function = "qup7";
+ bias-disable;
+ };
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,tlmm-common.yaml b/Documentation/devicetree/bindings/pinctrl/qcom,tlmm-common.yaml
index 3b37cf102d41..be8cb0ead62f 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,tlmm-common.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,tlmm-common.yaml
@@ -51,6 +51,9 @@ properties:
should not be accessed by the OS. Please see the ../gpio/gpio.txt for more
information.
+allOf:
+ - $ref: "pinctrl.yaml#"
+
required:
- interrupts
- interrupt-controller
diff --git a/Documentation/devicetree/bindings/pinctrl/ralink,rt2880-pinmux.yaml b/Documentation/devicetree/bindings/pinctrl/ralink,rt2880-pinmux.yaml
index b32f2676cab5..f0c52feb24d7 100644
--- a/Documentation/devicetree/bindings/pinctrl/ralink,rt2880-pinmux.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/ralink,rt2880-pinmux.yaml
@@ -44,6 +44,9 @@ patternProperties:
additionalProperties: false
+allOf:
+ - $ref: "pinctrl.yaml#"
+
required:
- compatible
diff --git a/Documentation/devicetree/bindings/pinctrl/renesas,pfc.yaml b/Documentation/devicetree/bindings/pinctrl/renesas,pfc.yaml
index ac4e068aa03f..8548e3639b75 100644
--- a/Documentation/devicetree/bindings/pinctrl/renesas,pfc.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/renesas,pfc.yaml
@@ -70,6 +70,9 @@ properties:
power-domains:
maxItems: 1
+allOf:
+ - $ref: "pinctrl.yaml#"
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/pinctrl/renesas,rza1-ports.yaml b/Documentation/devicetree/bindings/pinctrl/renesas,rza1-ports.yaml
index 7f80578dc229..8ed4b98a1628 100644
--- a/Documentation/devicetree/bindings/pinctrl/renesas,rza1-ports.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/renesas,rza1-ports.yaml
@@ -31,6 +31,9 @@ properties:
reg:
maxItems: 1
+allOf:
+ - $ref: "pinctrl.yaml#"
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/pinctrl/renesas,rza2-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/renesas,rza2-pinctrl.yaml
index ce1f7343788f..d761fddc2206 100644
--- a/Documentation/devicetree/bindings/pinctrl/renesas,rza2-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/renesas,rza2-pinctrl.yaml
@@ -72,6 +72,9 @@ patternProperties:
additionalProperties: false
+allOf:
+ - $ref: "pinctrl.yaml#"
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/pinctrl/renesas,rzg2l-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/renesas,rzg2l-pinctrl.yaml
index ef68dabcf4dc..b749c82edebd 100644
--- a/Documentation/devicetree/bindings/pinctrl/renesas,rzg2l-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/renesas,rzg2l-pinctrl.yaml
@@ -73,6 +73,8 @@ additionalProperties:
pins: true
drive-strength:
enum: [ 2, 4, 8, 12 ]
+ output-impedance-ohms:
+ enum: [ 33, 50, 66, 100 ]
power-source:
enum: [ 1800, 2500, 3300 ]
slew-rate: true
@@ -90,6 +92,9 @@ additionalProperties:
additionalProperties:
$ref: "#/additionalProperties/anyOf/0"
+allOf:
+ - $ref: "pinctrl.yaml#"
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/pinctrl/renesas,rzn1-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/renesas,rzn1-pinctrl.yaml
index 4a43af0d6e02..70b1788ab594 100644
--- a/Documentation/devicetree/bindings/pinctrl/renesas,rzn1-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/renesas,rzn1-pinctrl.yaml
@@ -31,6 +31,9 @@ properties:
description:
The bus clock, sometimes described as pclk, for register accesses.
+allOf:
+ - $ref: "pinctrl.yaml#"
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/pinctrl/rockchip,pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/rockchip,pinctrl.yaml
index 07c0a98ef9c6..b0eae3a67ab1 100644
--- a/Documentation/devicetree/bindings/pinctrl/rockchip,pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/rockchip,pinctrl.yaml
@@ -67,6 +67,9 @@ properties:
ranges: true
+allOf:
+ - $ref: "pinctrl.yaml#"
+
required:
- compatible
- rockchip,grf
diff --git a/Documentation/devicetree/bindings/pinctrl/samsung-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/samsung-pinctrl.txt
index b8b475967ff9..9e70edceb21b 100644
--- a/Documentation/devicetree/bindings/pinctrl/samsung-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/samsung-pinctrl.txt
@@ -22,6 +22,7 @@ Required Properties:
- "samsung,exynos5420-pinctrl": for Exynos5420 compatible pin-controller.
- "samsung,exynos5433-pinctrl": for Exynos5433 compatible pin-controller.
- "samsung,exynos7-pinctrl": for Exynos7 compatible pin-controller.
+ - "samsung,exynos7885-pinctrl": for Exynos7885 compatible pin-controller.
- "samsung,exynos850-pinctrl": for Exynos850 compatible pin-controller.
- "samsung,exynosautov9-pinctrl": for ExynosAutov9 compatible pin-controller.
diff --git a/Documentation/devicetree/bindings/pinctrl/socionext,uniphier-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/socionext,uniphier-pinctrl.yaml
index a804d9bc1602..14a8c0215cc6 100644
--- a/Documentation/devicetree/bindings/pinctrl/socionext,uniphier-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/socionext,uniphier-pinctrl.yaml
@@ -10,9 +10,6 @@ maintainers:
- Masahiro Yamada <yamada.masahiro@socionext.com>
properties:
- $nodename:
- pattern: "pinctrl"
-
compatible:
enum:
- socionext,uniphier-ld4-pinctrl
@@ -26,11 +23,48 @@ properties:
- socionext,uniphier-pxs3-pinctrl
- socionext,uniphier-nx1-pinctrl
+additionalProperties:
+ type: object
+
+ allOf:
+ - $ref: pincfg-node.yaml#
+ - $ref: pinmux-node.yaml#
+
+ properties:
+ phandle: true
+ function: true
+ groups: true
+ pins: true
+ bias-pull-up: true
+ bias-pull-down: true
+ bias-pull-pin-default: true
+ drive-strength: true
+
+ additionalProperties:
+ type: object
+
+ allOf:
+ - $ref: pincfg-node.yaml#
+ - $ref: pinmux-node.yaml#
+
+ properties:
+ phandle: true
+ function: true
+ groups: true
+ pins: true
+ bias-pull-up: true
+ bias-pull-down: true
+ bias-pull-pin-default: true
+ drive-strength: true
+
+ unevaluatedProperties: false
+
+allOf:
+ - $ref: "pinctrl.yaml#"
+
required:
- compatible
-additionalProperties: false
-
examples:
- |
// The UniPhier pinctrl should be a subnode of a "syscon" compatible node.
diff --git a/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.yaml
index ac88e01ec430..83a18d0331b1 100644
--- a/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.yaml
@@ -183,6 +183,9 @@ patternProperties:
required:
- pinmux
+allOf:
+ - $ref: "pinctrl.yaml#"
+
required:
- compatible
- '#address-cells'
diff --git a/Documentation/devicetree/bindings/pinctrl/toshiba,visconti-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/toshiba,visconti-pinctrl.yaml
index 9f1dab0c2430..306524885a2b 100644
--- a/Documentation/devicetree/bindings/pinctrl/toshiba,visconti-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/toshiba,visconti-pinctrl.yaml
@@ -20,6 +20,9 @@ properties:
reg:
maxItems: 1
+allOf:
+ - $ref: "pinctrl.yaml#"
+
required:
- compatible
- reg
@@ -80,7 +83,7 @@ examples:
#address-cells = <2>;
#size-cells = <2>;
- pmux: pmux@24190000 {
+ pmux: pinmux@24190000 {
compatible = "toshiba,tmpv7708-pinctrl";
reg = <0 0x24190000 0 0x10000>;
diff --git a/Documentation/devicetree/bindings/pinctrl/xlnx,zynq-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/xlnx,zynq-pinctrl.yaml
index ac97dbf6998e..cfd0cc549a7b 100644
--- a/Documentation/devicetree/bindings/pinctrl/xlnx,zynq-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/xlnx,zynq-pinctrl.yaml
@@ -167,6 +167,9 @@ patternProperties:
additionalProperties: false
+allOf:
+ - $ref: "pinctrl.yaml#"
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/pinctrl/xlnx,zynqmp-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/xlnx,zynqmp-pinctrl.yaml
index 8ef0d07d35fe..2722dc7bb03d 100644
--- a/Documentation/devicetree/bindings/pinctrl/xlnx,zynqmp-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/xlnx,zynqmp-pinctrl.yaml
@@ -290,6 +290,9 @@ patternProperties:
additionalProperties: false
+allOf:
+ - $ref: pinctrl.yaml#
+
required:
- compatible
diff --git a/Documentation/devicetree/bindings/power/reset/gpio-restart.txt b/Documentation/devicetree/bindings/power/reset/gpio-restart.txt
deleted file mode 100644
index af3701bc15c4..000000000000
--- a/Documentation/devicetree/bindings/power/reset/gpio-restart.txt
+++ /dev/null
@@ -1,54 +0,0 @@
-Drive a GPIO line that can be used to restart the system from a restart
-handler.
-
-This binding supports level and edge triggered reset. At driver load
-time, the driver will request the given gpio line and install a restart
-handler. If the optional properties 'open-source' is not found, the GPIO line
-will be driven in the inactive state. Otherwise its not driven until
-the restart is initiated.
-
-When the system is restarted, the restart handler will be invoked in
-priority order. The gpio is configured as an output, and driven active,
-triggering a level triggered reset condition. This will also cause an
-inactive->active edge condition, triggering positive edge triggered
-reset. After a delay specified by active-delay, the GPIO is set to
-inactive, thus causing an active->inactive edge, triggering negative edge
-triggered reset. After a delay specified by inactive-delay, the GPIO
-is driven active again. After a delay specified by wait-delay, the
-restart handler completes allowing other restart handlers to be attempted.
-
-Required properties:
-- compatible : should be "gpio-restart".
-- gpios : The GPIO to set high/low, see "gpios property" in
- Documentation/devicetree/bindings/gpio/gpio.txt. If the pin should be
- low to reset the board set it to "Active Low", otherwise set
- gpio to "Active High".
-
-Optional properties:
-- open-source : Treat the GPIO as being open source and defer driving
- it to when the restart is initiated. If this optional property is not
- specified, the GPIO is initialized as an output in its inactive state.
-- priority : A priority ranging from 0 to 255 (default 128) according to
- the following guidelines:
- 0: Restart handler of last resort, with limited restart
- capabilities
- 128: Default restart handler; use if no other restart handler is
- expected to be available, and/or if restart functionality is
- sufficient to restart the entire system
- 255: Highest priority restart handler, will preempt all other
- restart handlers
-- active-delay: Delay (default 100) to wait after driving gpio active [ms]
-- inactive-delay: Delay (default 100) to wait after driving gpio inactive [ms]
-- wait-delay: Delay (default 3000) to wait after completing restart
- sequence [ms]
-
-Examples:
-
-gpio-restart {
- compatible = "gpio-restart";
- gpios = <&gpio 4 0>;
- priority = <128>;
- active-delay = <100>;
- inactive-delay = <100>;
- wait-delay = <3000>;
-};
diff --git a/Documentation/devicetree/bindings/power/reset/gpio-restart.yaml b/Documentation/devicetree/bindings/power/reset/gpio-restart.yaml
new file mode 100644
index 000000000000..a72d5c721516
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/reset/gpio-restart.yaml
@@ -0,0 +1,86 @@
+# SPDX-License-Identifier: (GPL-2.0-only or BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/power/reset/gpio-restart.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: GPIO controlled reset
+
+maintainers:
+ - Sebastian Reichel <sre@kernel.org>
+
+description: >
+ Drive a GPIO line that can be used to restart the system from a restart handler.
+
+ This binding supports level and edge triggered reset. At driver load time, the driver will
+ request the given gpio line and install a restart handler. If the optional properties
+ 'open-source' is not found, the GPIO line will be driven in the inactive state. Otherwise its
+ not driven until the restart is initiated.
+
+ When the system is restarted, the restart handler will be invoked in priority order. The GPIO
+ is configured as an output, and driven active, triggering a level triggered reset condition.
+ This will also cause an inactive->active edge condition, triggering positive edge triggered
+ reset. After a delay specified by active-delay, the GPIO is set to inactive, thus causing an
+ active->inactive edge, triggering negative edge triggered reset. After a delay specified by
+ inactive-delay, the GPIO is driven active again. After a delay specified by wait-delay, the
+ restart handler completes allowing other restart handlers to be attempted.
+
+properties:
+ compatible:
+ const: gpio-restart
+
+ gpios:
+ description: The GPIO to set high/low, see "gpios property" in
+ Documentation/devicetree/bindings/gpio/gpio.txt. If the pin should be low to reset the board
+ set it to "Active Low", otherwise set GPIO to "Active High".
+
+ open-source:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description: Treat the GPIO as being open source and defer driving it to when the restart is
+ initiated. If this optional property is not specified, the GPIO is initialized as an output
+ in its inactive state.
+
+ priority:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: |
+ A priority ranging from 0 to 255 (default 129) according to the following guidelines:
+
+ 0: Restart handler of last resort, with limited restart capabilities.
+ 128: Default restart handler; use if no other restart handler is expected to be available,
+ and/or if restart functionality is sufficient to restart the entire system.
+ 255: Highest priority restart handler, will preempt all other restart handlers.
+ minimum: 0
+ maximum: 255
+ default: 129
+
+ active-delay:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: Delay (default 100) to wait after driving gpio active [ms]
+ default: 100
+
+ inactive-delay:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: Delay (default 100) to wait after driving gpio inactive [ms]
+ default: 100
+
+ wait-delay:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: Delay (default 3000) to wait after completing restart sequence [ms]
+ default: 100
+
+additionalProperties: false
+
+required:
+ - compatible
+ - gpios
+
+examples:
+ - |
+ gpio-restart {
+ compatible = "gpio-restart";
+ gpios = <&gpio 4 0>;
+ priority = <128>;
+ active-delay = <100>;
+ inactive-delay = <100>;
+ wait-delay = <3000>;
+ };
diff --git a/Documentation/devicetree/bindings/power/supply/maxim,max17040.yaml b/Documentation/devicetree/bindings/power/supply/maxim,max17040.yaml
index ffb344987a7b..6b4588a3253b 100644
--- a/Documentation/devicetree/bindings/power/supply/maxim,max17040.yaml
+++ b/Documentation/devicetree/bindings/power/supply/maxim,max17040.yaml
@@ -44,7 +44,9 @@ properties:
SoC == State of Charge == Capacity.
maxim,rcomp:
- $ref: /schemas/types.yaml#/definitions/uint32
+ $ref: /schemas/types.yaml#/definitions/uint8-array
+ minItems: 1
+ maxItems: 2
description: |
A value to compensate readings for various battery chemistries and operating temperatures.
max17040,41 have 2 byte rcomp, default to 0x97 0x00.
diff --git a/Documentation/devicetree/bindings/pwm/brcm,bcm7038-pwm.txt b/Documentation/devicetree/bindings/pwm/brcm,bcm7038-pwm.txt
deleted file mode 100644
index 0e662d7f6bd1..000000000000
--- a/Documentation/devicetree/bindings/pwm/brcm,bcm7038-pwm.txt
+++ /dev/null
@@ -1,20 +0,0 @@
-Broadcom BCM7038 PWM controller (BCM7xxx Set Top Box PWM controller)
-
-Required properties:
-
-- compatible: must be "brcm,bcm7038-pwm"
-- reg: physical base address and length for this controller
-- #pwm-cells: should be 2. See pwm.yaml in this directory for a description
- of the cells format
-- clocks: a phandle to the reference clock for this block which is fed through
- its internal variable clock frequency generator
-
-
-Example:
-
- pwm: pwm@f0408000 {
- compatible = "brcm,bcm7038-pwm";
- reg = <0xf0408000 0x28>;
- #pwm-cells = <2>;
- clocks = <&upg_fixed>;
- };
diff --git a/Documentation/devicetree/bindings/pwm/brcm,bcm7038-pwm.yaml b/Documentation/devicetree/bindings/pwm/brcm,bcm7038-pwm.yaml
new file mode 100644
index 000000000000..4080e098f746
--- /dev/null
+++ b/Documentation/devicetree/bindings/pwm/brcm,bcm7038-pwm.yaml
@@ -0,0 +1,43 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/pwm/brcm,bcm7038-pwm.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Broadcom BCM7038 PWM controller (BCM7xxx Set Top Box PWM controller)
+
+maintainers:
+ - Florian Fainelli <f.fainelli@gmail.com>
+
+allOf:
+ - $ref: pwm.yaml#
+
+properties:
+ compatible:
+ const: brcm,bcm7038-pwm
+
+ reg:
+ maxItems: 1
+
+ "#pwm-cells":
+ const: 2
+
+ clocks:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - "#pwm-cells"
+ - clocks
+
+additionalProperties: false
+
+examples:
+ - |
+ pwm: pwm@f0408000 {
+ compatible = "brcm,bcm7038-pwm";
+ reg = <0xf0408000 0x28>;
+ #pwm-cells = <2>;
+ clocks = <&upg_fixed>;
+ };
diff --git a/Documentation/devicetree/bindings/pwm/pwm.yaml b/Documentation/devicetree/bindings/pwm/pwm.yaml
index 2effe6c0de6b..3c01f85029e5 100644
--- a/Documentation/devicetree/bindings/pwm/pwm.yaml
+++ b/Documentation/devicetree/bindings/pwm/pwm.yaml
@@ -9,6 +9,8 @@ title: PWM controllers (providers)
maintainers:
- Thierry Reding <thierry.reding@gmail.com>
+select: false
+
properties:
$nodename:
pattern: "^pwm(@.*|-[0-9a-f])*$"
diff --git a/Documentation/devicetree/bindings/remoteproc/qcom,adsp.yaml b/Documentation/devicetree/bindings/remoteproc/qcom,adsp.yaml
index 63e06d93bca3..c635c181d2c2 100644
--- a/Documentation/devicetree/bindings/remoteproc/qcom,adsp.yaml
+++ b/Documentation/devicetree/bindings/remoteproc/qcom,adsp.yaml
@@ -33,6 +33,9 @@ properties:
- qcom,sdm845-adsp-pas
- qcom,sdm845-cdsp-pas
- qcom,sdx55-mpss-pas
+ - qcom,sm6350-adsp-pas
+ - qcom,sm6350-cdsp-pas
+ - qcom,sm6350-mpss-pas
- qcom,sm8150-adsp-pas
- qcom,sm8150-cdsp-pas
- qcom,sm8150-mpss-pas
@@ -158,6 +161,9 @@ allOf:
- qcom,sc8180x-mpss-pas
- qcom,sdm845-adsp-pas
- qcom,sdm845-cdsp-pas
+ - qcom,sm6350-adsp-pas
+ - qcom,sm6350-cdsp-pas
+ - qcom,sm6350-mpss-pas
- qcom,sm8150-adsp-pas
- qcom,sm8150-cdsp-pas
- qcom,sm8150-mpss-pas
@@ -266,6 +272,8 @@ allOf:
- qcom,sc8180x-cdsp-pas
- qcom,sdm845-adsp-pas
- qcom,sdm845-cdsp-pas
+ - qcom,sm6350-adsp-pas
+ - qcom,sm6350-cdsp-pas
- qcom,sm8150-adsp-pas
- qcom,sm8150-cdsp-pas
- qcom,sm8150-slpi-pas
@@ -301,6 +309,7 @@ allOf:
- qcom,sc7280-mpss-pas
- qcom,sc8180x-mpss-pas
- qcom,sdx55-mpss-pas
+ - qcom,sm6350-mpss-pas
- qcom,sm8150-mpss-pas
- qcom,sm8350-mpss-pas
then:
@@ -390,6 +399,23 @@ allOf:
compatible:
contains:
enum:
+ - qcom,sm6350-cdsp-pas
+ then:
+ properties:
+ power-domains:
+ items:
+ - description: CX power domain
+ - description: MX power domain
+ power-domain-names:
+ items:
+ - const: cx
+ - const: mx
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
- qcom,sm8150-adsp-pas
- qcom,sm8150-cdsp-pas
then:
@@ -405,6 +431,7 @@ allOf:
enum:
- qcom,sc7280-mpss-pas
- qcom,sdx55-mpss-pas
+ - qcom,sm6350-mpss-pas
- qcom,sm8150-mpss-pas
- qcom,sm8350-mpss-pas
then:
@@ -425,6 +452,7 @@ allOf:
enum:
- qcom,sc8180x-adsp-pas
- qcom,sc8180x-cdsp-pas
+ - qcom,sm6350-adsp-pas
- qcom,sm8150-slpi-pas
- qcom,sm8250-adsp-pas
- qcom,sm8250-slpi-pas
diff --git a/Documentation/devicetree/bindings/remoteproc/renesas,rcar-rproc.yaml b/Documentation/devicetree/bindings/remoteproc/renesas,rcar-rproc.yaml
new file mode 100644
index 000000000000..a7d25fa920e5
--- /dev/null
+++ b/Documentation/devicetree/bindings/remoteproc/renesas,rcar-rproc.yaml
@@ -0,0 +1,65 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/remoteproc/renesas,rcar-rproc.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Renesas R-Car remote processor controller bindings
+
+maintainers:
+ - Julien Massot <julien.massot@iot.bzh>
+
+description: |
+ This document defines the bindings for the remoteproc component that loads and
+ boots firmwares on the Renesas R-Car family chipset.
+ R-Car gen3 family may have a realtime processor, this processor shares peripheral
+ and RAM with the host processor with the same address map.
+
+properties:
+ compatible:
+ const: renesas,rcar-cr7
+
+ resets:
+ maxItems: 1
+
+ power-domains:
+ maxItems: 1
+
+ memory-region:
+ description:
+ List of phandles to the reserved memory regions associated with the
+ remoteproc device. This is variable and describes the memories shared with
+ the remote processor (e.g. remoteproc firmware and carveouts, rpmsg
+ vrings, ...).
+ (see ../reserved-memory/reserved-memory.yaml)
+
+required:
+ - compatible
+ - resets
+ - memory-region
+ - power-domains
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/r8a7795-cpg-mssr.h>
+ #include <dt-bindings/power/r8a7795-sysc.h>
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ cr7_ram: cr7_ram@40040000 {
+ no-map;
+ reg = <0x0 0x40040000 0x0 0x1fc0000>;
+ };
+ };
+
+ cr7_rproc: cr7 {
+ compatible = "renesas,rcar-cr7";
+ memory-region = <&cr7_ram>;
+ power-domains = <&sysc R8A7795_PD_CR7>;
+ resets = <&cpg 222>;
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/remoteproc/ti,k3-dsp-rproc.yaml b/Documentation/devicetree/bindings/remoteproc/ti,k3-dsp-rproc.yaml
index 5ec6505ac408..7b56497eec4d 100644
--- a/Documentation/devicetree/bindings/remoteproc/ti,k3-dsp-rproc.yaml
+++ b/Documentation/devicetree/bindings/remoteproc/ti,k3-dsp-rproc.yaml
@@ -33,9 +33,11 @@ properties:
enum:
- ti,j721e-c66-dsp
- ti,j721e-c71-dsp
+ - ti,j721s2-c71-dsp
description:
Use "ti,j721e-c66-dsp" for C66x DSPs on K3 J721E SoCs
Use "ti,j721e-c71-dsp" for C71x DSPs on K3 J721E SoCs
+ Use "ti,j721s2-c71-dsp" for C71x DSPs on K3 J721S2 SoCs
resets:
description: |
@@ -106,6 +108,7 @@ else:
compatible:
enum:
- ti,j721e-c71-dsp
+ - ti,j721s2-c71-dsp
then:
properties:
reg:
diff --git a/Documentation/devicetree/bindings/remoteproc/ti,k3-r5f-rproc.yaml b/Documentation/devicetree/bindings/remoteproc/ti,k3-r5f-rproc.yaml
index eeef255c4045..d9c7e8c2b268 100644
--- a/Documentation/devicetree/bindings/remoteproc/ti,k3-r5f-rproc.yaml
+++ b/Documentation/devicetree/bindings/remoteproc/ti,k3-r5f-rproc.yaml
@@ -38,6 +38,7 @@ properties:
- ti,j721e-r5fss
- ti,j7200-r5fss
- ti,am64-r5fss
+ - ti,j721s2-r5fss
power-domains:
description: |
@@ -64,9 +65,9 @@ properties:
description: |
Configuration Mode for the Dual R5F cores within the R5F cluster.
Should be either a value of 1 (LockStep mode) or 0 (Split mode) on
- most SoCs (AM65x, J721E, J7200), default is LockStep mode if omitted;
- and should be either a value of 0 (Split mode) or 2 (Single-CPU mode)
- on AM64x SoCs, default is Split mode if omitted.
+ most SoCs (AM65x, J721E, J7200, J721s2), default is LockStep mode if
+ omitted; and should be either a value of 0 (Split mode) or 2
+ (Single-CPU mode) on AM64x SoCs, default is Split mode if omitted.
# R5F Processor Child Nodes:
# ==========================
@@ -104,6 +105,7 @@ patternProperties:
- ti,j721e-r5f
- ti,j7200-r5f
- ti,am64-r5f
+ - ti,j721s2-r5f
reg:
items:
diff --git a/Documentation/devicetree/bindings/reserved-memory/nvidia,tegra210-emc-table.yaml b/Documentation/devicetree/bindings/reserved-memory/nvidia,tegra210-emc-table.yaml
new file mode 100644
index 000000000000..035a50fe3ee4
--- /dev/null
+++ b/Documentation/devicetree/bindings/reserved-memory/nvidia,tegra210-emc-table.yaml
@@ -0,0 +1,31 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/reserved-memory/nvidia,tegra210-emc-table.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NVIDIA Tegra210 EMC Frequency Table Device Tree Bindings
+
+maintainers:
+ - Thierry Reding <thierry.reding@gmail.com>
+ - Jon Hunter <jonathanh@nvidia.com>
+
+description: On Tegra210, firmware passes a binary representation of the
+ EMC frequency table via a reserved memory region.
+
+allOf:
+ - $ref: "reserved-memory.yaml"
+
+properties:
+ compatible:
+ const: nvidia,tegra210-emc-table
+
+ reg:
+ description: region of memory reserved by firmware to pass the EMC
+ frequency table
+
+unevaluatedProperties: false
+
+required:
+ - compatible
+ - reg
diff --git a/Documentation/devicetree/bindings/reserved-memory/qcom,cmd-db.txt b/Documentation/devicetree/bindings/reserved-memory/qcom,cmd-db.txt
deleted file mode 100644
index 68395530c0a5..000000000000
--- a/Documentation/devicetree/bindings/reserved-memory/qcom,cmd-db.txt
+++ /dev/null
@@ -1,37 +0,0 @@
-Command DB
----------
-
-Command DB is a database that provides a mapping between resource key and the
-resource address for a system resource managed by a remote processor. The data
-is stored in a shared memory region and is loaded by the remote processor.
-
-Some of the Qualcomm Technologies Inc SoC's have hardware accelerators for
-controlling shared resources. Depending on the board configuration the shared
-resource properties may change. These properties are dynamically probed by the
-remote processor and made available in the shared memory.
-
-The bindings for Command DB is specified in the reserved-memory section in
-devicetree. The devicetree representation of the command DB driver should be:
-
-Properties:
-- compatible:
- Usage: required
- Value type: <string>
- Definition: Should be "qcom,cmd-db"
-
-- reg:
- Usage: required
- Value type: <prop encoded array>
- Definition: The register address that points to the actual location of
- the Command DB in memory.
-
-Example:
-
- reserved-memory {
- [...]
- reserved-memory@85fe0000 {
- reg = <0x0 0x85fe0000 0x0 0x20000>;
- compatible = "qcom,cmd-db";
- no-map;
- };
- };
diff --git a/Documentation/devicetree/bindings/reserved-memory/qcom,cmd-db.yaml b/Documentation/devicetree/bindings/reserved-memory/qcom,cmd-db.yaml
new file mode 100644
index 000000000000..df1b5e0ed3f4
--- /dev/null
+++ b/Documentation/devicetree/bindings/reserved-memory/qcom,cmd-db.yaml
@@ -0,0 +1,46 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/reserved-memory/qcom,cmd-db.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Qualcomm Command DB
+
+description: |
+ Command DB is a database that provides a mapping between resource key and the
+ resource address for a system resource managed by a remote processor. The data
+ is stored in a shared memory region and is loaded by the remote processor.
+
+ Some of the Qualcomm Technologies Inc SoC's have hardware accelerators for
+ controlling shared resources. Depending on the board configuration the shared
+ resource properties may change. These properties are dynamically probed by the
+ remote processor and made available in the shared memory.
+
+maintainers:
+ - Bjorn Andersson <bjorn.andersson@linaro.org>
+
+allOf:
+ - $ref: "reserved-memory.yaml"
+
+properties:
+ compatible:
+ const: qcom,cmd-db
+
+required:
+ - reg
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ reserved-memory {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ reserved-memory@85fe0000 {
+ reg = <0x85fe0000 0x20000>;
+ compatible = "qcom,cmd-db";
+ no-map;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/reserved-memory/qcom,rmtfs-mem.txt b/Documentation/devicetree/bindings/reserved-memory/qcom,rmtfs-mem.txt
deleted file mode 100644
index 8562ba1dce69..000000000000
--- a/Documentation/devicetree/bindings/reserved-memory/qcom,rmtfs-mem.txt
+++ /dev/null
@@ -1,51 +0,0 @@
-Qualcomm Remote File System Memory binding
-
-This binding describes the Qualcomm remote filesystem memory, which serves the
-purpose of describing the shared memory region used for remote processors to
-access block device data using the Remote Filesystem protocol.
-
-- compatible:
- Usage: required
- Value type: <stringlist>
- Definition: must be:
- "qcom,rmtfs-mem"
-
-- reg:
- Usage: required for static allocation
- Value type: <prop-encoded-array>
- Definition: must specify base address and size of the memory region,
- as described in reserved-memory.txt
-
-- size:
- Usage: required for dynamic allocation
- Value type: <prop-encoded-array>
- Definition: must specify a size of the memory region, as described in
- reserved-memory.txt
-
-- qcom,client-id:
- Usage: required
- Value type: <u32>
- Definition: identifier of the client to use this region for buffers.
-
-- qcom,vmid:
- Usage: optional
- Value type: <u32>
- Definition: vmid of the remote processor, to set up memory protection.
-
-= EXAMPLE
-The following example shows the remote filesystem memory setup for APQ8016,
-with the rmtfs region for the Hexagon DSP (id #1) located at 0x86700000.
-
- reserved-memory {
- #address-cells = <2>;
- #size-cells = <2>;
- ranges;
-
- rmtfs@86700000 {
- compatible = "qcom,rmtfs-mem";
- reg = <0x0 0x86700000 0x0 0xe0000>;
- no-map;
-
- qcom,client-id = <1>;
- };
- };
diff --git a/Documentation/devicetree/bindings/reserved-memory/qcom,rmtfs-mem.yaml b/Documentation/devicetree/bindings/reserved-memory/qcom,rmtfs-mem.yaml
new file mode 100644
index 000000000000..2998f1c8f0db
--- /dev/null
+++ b/Documentation/devicetree/bindings/reserved-memory/qcom,rmtfs-mem.yaml
@@ -0,0 +1,53 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/reserved-memory/qcom,rmtfs-mem.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Qualcomm Remote File System Memory
+
+description: |
+ This binding describes the Qualcomm remote filesystem memory, which serves the
+ purpose of describing the shared memory region used for remote processors to
+ access block device data using the Remote Filesystem protocol.
+
+maintainers:
+ - Bjorn Andersson <bjorn.andersson@linaro.org>
+
+allOf:
+ - $ref: "reserved-memory.yaml"
+
+properties:
+ compatible:
+ const: qcom,rmtfs-mem
+
+ qcom,client-id:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: >
+ identifier of the client to use this region for buffers
+
+ qcom,vmid:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: >
+ vmid of the remote processor, to set up memory protection
+
+required:
+ - qcom,client-id
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ reserved-memory {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ rmtfs@86700000 {
+ compatible = "qcom,rmtfs-mem";
+ reg = <0x86700000 0xe0000>;
+ no-map;
+
+ qcom,client-id = <1>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/reset/brcm,brcmstb-reset.txt b/Documentation/devicetree/bindings/reset/brcm,brcmstb-reset.txt
deleted file mode 100644
index ee59409640f2..000000000000
--- a/Documentation/devicetree/bindings/reset/brcm,brcmstb-reset.txt
+++ /dev/null
@@ -1,27 +0,0 @@
-Broadcom STB SW_INIT-style reset controller
-===========================================
-
-Broadcom STB SoCs have a SW_INIT-style reset controller with separate
-SET/CLEAR/STATUS registers and possibly multiple banks, each of 32 bit
-reset lines.
-
-Please also refer to reset.txt in this directory for common reset
-controller binding usage.
-
-Required properties:
-- compatible: should be brcm,brcmstb-reset
-- reg: register base and length
-- #reset-cells: must be set to 1
-
-Example:
-
- reset: reset-controller@8404318 {
- compatible = "brcm,brcmstb-reset";
- reg = <0x8404318 0x30>;
- #reset-cells = <1>;
- };
-
- &ethernet_switch {
- resets = <&reset 26>;
- reset-names = "switch";
- };
diff --git a/Documentation/devicetree/bindings/reset/brcm,brcmstb-reset.yaml b/Documentation/devicetree/bindings/reset/brcm,brcmstb-reset.yaml
new file mode 100644
index 000000000000..e00efa88a198
--- /dev/null
+++ b/Documentation/devicetree/bindings/reset/brcm,brcmstb-reset.yaml
@@ -0,0 +1,48 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/reset/brcm,brcmstb-reset.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Broadcom STB SW_INIT-style reset controller
+
+description:
+ Broadcom STB SoCs have a SW_INIT-style reset controller with separate
+ SET/CLEAR/STATUS registers and possibly multiple banks, each of 32 bit
+ reset lines.
+
+ Please also refer to reset.txt in this directory for common reset
+ controller binding usage.
+
+maintainers:
+ - Florian Fainelli <f.fainelli@gmail.com>
+
+properties:
+ compatible:
+ const: brcm,brcmstb-reset
+
+ reg:
+ maxItems: 1
+
+ "#reset-cells":
+ const: 1
+
+required:
+ - compatible
+ - reg
+ - "#reset-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ reset: reset-controller@8404318 {
+ compatible = "brcm,brcmstb-reset";
+ reg = <0x8404318 0x30>;
+ #reset-cells = <1>;
+ };
+
+ ethernet_switch {
+ resets = <&reset 26>;
+ reset-names = "switch";
+ };
diff --git a/Documentation/devicetree/bindings/rng/apm,rng.txt b/Documentation/devicetree/bindings/rng/apm,rng.txt
deleted file mode 100644
index 4dde4b06cdd9..000000000000
--- a/Documentation/devicetree/bindings/rng/apm,rng.txt
+++ /dev/null
@@ -1,17 +0,0 @@
-APM X-Gene SoC random number generator.
-
-Required properties:
-
-- compatible : should be "apm,xgene-rng"
-- reg : specifies base physical address and size of the registers map
-- clocks : phandle to clock-controller plus clock-specifier pair
-- interrupts : specify the fault interrupt for the RNG device
-
-Example:
-
- rng: rng@10520000 {
- compatible = "apm,xgene-rng";
- reg = <0x0 0x10520000 0x0 0x100>;
- interrupts = <0x0 0x41 0x4>;
- clocks = <&rngpkaclk 0>;
- };
diff --git a/Documentation/devicetree/bindings/rng/apm,x-gene-rng.yaml b/Documentation/devicetree/bindings/rng/apm,x-gene-rng.yaml
new file mode 100644
index 000000000000..02be143cc829
--- /dev/null
+++ b/Documentation/devicetree/bindings/rng/apm,x-gene-rng.yaml
@@ -0,0 +1,47 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/rng/apm,x-gene-rng.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: APM X-Gene SoC Random Number Generator
+
+maintainers:
+ - Khuong Dinh <khuong@os.amperecomputing.com>
+
+properties:
+ compatible:
+ const: apm,xgene-rng
+
+ clocks:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ reg:
+ maxItems: 1
+
+required:
+ - compatible
+ - clocks
+ - interrupts
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ soc {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ rng@10520000 {
+ compatible = "apm,xgene-rng";
+ reg = <0x0 0x10520000 0x0 0x100>;
+ interrupts = <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&rngpkaclk 0>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/rng/atmel,at91-trng.yaml b/Documentation/devicetree/bindings/rng/atmel,at91-trng.yaml
new file mode 100644
index 000000000000..c1527637eb74
--- /dev/null
+++ b/Documentation/devicetree/bindings/rng/atmel,at91-trng.yaml
@@ -0,0 +1,51 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/rng/atmel,at91-trng.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Atmel AT91 True Random Number Generator
+
+maintainers:
+ - Nicolas Ferre <nicolas.ferre@microchip.com>
+ - Alexandre Belloni <alexandre.belloni@bootlin.com>
+ - Ludovic Desroches <ludovic.desroches@microchip.com>
+
+properties:
+ compatible:
+ oneOf:
+ - enum:
+ - atmel,at91sam9g45-trng
+ - microchip,sam9x60-trng
+ - items:
+ - enum:
+ - microchip,sama7g5-trng
+ - const: atmel,at91sam9g45-trng
+
+ clocks:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ reg:
+ maxItems: 1
+
+required:
+ - compatible
+ - clocks
+ - interrupts
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ rng@fffcc000 {
+ compatible = "atmel,at91sam9g45-trng";
+ reg = <0xfffcc000 0x4000>;
+ interrupts = <6 IRQ_TYPE_LEVEL_HIGH 0>;
+ clocks = <&trng_clk>;
+ };
diff --git a/Documentation/devicetree/bindings/rng/atmel-trng.txt b/Documentation/devicetree/bindings/rng/atmel-trng.txt
deleted file mode 100644
index 3900ee4f3532..000000000000
--- a/Documentation/devicetree/bindings/rng/atmel-trng.txt
+++ /dev/null
@@ -1,16 +0,0 @@
-Atmel TRNG (True Random Number Generator) block
-
-Required properties:
-- compatible : Should be "atmel,at91sam9g45-trng" or "microchip,sam9x60-trng"
-- reg : Offset and length of the register set of this block
-- interrupts : the interrupt number for the TRNG block
-- clocks: should contain the TRNG clk source
-
-Example:
-
-trng@fffcc000 {
- compatible = "atmel,at91sam9g45-trng";
- reg = <0xfffcc000 0x4000>;
- interrupts = <6 IRQ_TYPE_LEVEL_HIGH 0>;
- clocks = <&trng_clk>;
-};
diff --git a/Documentation/devicetree/bindings/rng/brcm,iproc-rng200.txt b/Documentation/devicetree/bindings/rng/brcm,iproc-rng200.txt
deleted file mode 100644
index 802523196ee5..000000000000
--- a/Documentation/devicetree/bindings/rng/brcm,iproc-rng200.txt
+++ /dev/null
@@ -1,16 +0,0 @@
-HWRNG support for the iproc-rng200 driver
-
-Required properties:
-- compatible : Must be one of:
- "brcm,bcm2711-rng200"
- "brcm,bcm7211-rng200"
- "brcm,bcm7278-rng200"
- "brcm,iproc-rng200"
-- reg : base address and size of control register block
-
-Example:
-
-rng {
- compatible = "brcm,iproc-rng200";
- reg = <0x18032000 0x28>;
-};
diff --git a/Documentation/devicetree/bindings/rng/brcm,iproc-rng200.yaml b/Documentation/devicetree/bindings/rng/brcm,iproc-rng200.yaml
new file mode 100644
index 000000000000..a00e9bc8b609
--- /dev/null
+++ b/Documentation/devicetree/bindings/rng/brcm,iproc-rng200.yaml
@@ -0,0 +1,30 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/rng/brcm,iproc-rng200.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: HWRNG support for the iproc-rng200 driver
+
+maintainers:
+ - Florian Fainelli <f.fainelli@gmail.com>
+
+properties:
+ compatible:
+ enum:
+ - brcm,bcm2711-rng200
+ - brcm,bcm7211-rng200
+ - brcm,bcm7278-rng200
+ - brcm,iproc-rng200
+
+ reg:
+ maxItems: 1
+
+additionalProperties: false
+
+examples:
+ - |
+ rng@18032000 {
+ compatible = "brcm,iproc-rng200";
+ reg = <0x18032000 0x28>;
+ };
diff --git a/Documentation/devicetree/bindings/rng/ks-sa-rng.txt b/Documentation/devicetree/bindings/rng/ks-sa-rng.txt
deleted file mode 100644
index b7a65b487901..000000000000
--- a/Documentation/devicetree/bindings/rng/ks-sa-rng.txt
+++ /dev/null
@@ -1,21 +0,0 @@
-Keystone SoC Hardware Random Number Generator(HWRNG) Module
-
-On Keystone SoCs HWRNG module is a submodule of the Security Accelerator.
-
-- compatible: should be "ti,keystone-rng"
-- ti,syscon-sa-cfg: phandle to syscon node of the SA configuration registers.
- This registers are shared between hwrng and crypto drivers.
-- clocks: phandle to the reference clocks for the subsystem
-- clock-names: functional clock name. Should be set to "fck"
-- reg: HWRNG module register space
-
-Example:
-/* K2HK */
-
-rng@24000 {
- compatible = "ti,keystone-rng";
- ti,syscon-sa-cfg = <&sa_config>;
- clocks = <&clksa>;
- clock-names = "fck";
- reg = <0x24000 0x1000>;
-};
diff --git a/Documentation/devicetree/bindings/rng/nuvoton,npcm-rng.txt b/Documentation/devicetree/bindings/rng/nuvoton,npcm-rng.txt
deleted file mode 100644
index 65c04172fc8c..000000000000
--- a/Documentation/devicetree/bindings/rng/nuvoton,npcm-rng.txt
+++ /dev/null
@@ -1,12 +0,0 @@
-NPCM SoC Random Number Generator
-
-Required properties:
-- compatible : "nuvoton,npcm750-rng" for the NPCM7XX BMC.
-- reg : Specifies physical base address and size of the registers.
-
-Example:
-
-rng: rng@f000b000 {
- compatible = "nuvoton,npcm750-rng";
- reg = <0xf000b000 0x8>;
-};
diff --git a/Documentation/devicetree/bindings/rng/nuvoton,npcm-rng.yaml b/Documentation/devicetree/bindings/rng/nuvoton,npcm-rng.yaml
new file mode 100644
index 000000000000..abd134c9d400
--- /dev/null
+++ b/Documentation/devicetree/bindings/rng/nuvoton,npcm-rng.yaml
@@ -0,0 +1,35 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/rng/nuvoton,npcm-rng.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Nuvoton NPCM SoC Random Number Generator
+
+maintainers:
+ - Avi Fishman <avifishman70@gmail.com>
+ - Tomer Maimon <tmaimon77@gmail.com>
+ - Tali Perry <tali.perry1@gmail.com>
+ - Patrick Venture <venture@google.com>
+ - Nancy Yuen <yuenn@google.com>
+ - Benjamin Fair <benjaminfair@google.com>
+
+properties:
+ compatible:
+ const: nuvoton,npcm750-rng
+
+ reg:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ rng@f000b000 {
+ compatible = "nuvoton,npcm750-rng";
+ reg = <0xf000b000 0x8>;
+ };
diff --git a/Documentation/devicetree/bindings/rng/omap3_rom_rng.txt b/Documentation/devicetree/bindings/rng/omap3_rom_rng.txt
deleted file mode 100644
index f315c9723bd2..000000000000
--- a/Documentation/devicetree/bindings/rng/omap3_rom_rng.txt
+++ /dev/null
@@ -1,27 +0,0 @@
-OMAP ROM RNG driver binding
-
-Secure SoCs may provide RNG via secure ROM calls like Nokia N900 does. The
-implementation can depend on the SoC secure ROM used.
-
-- compatible:
- Usage: required
- Value type: <string>
- Definition: must be "nokia,n900-rom-rng"
-
-- clocks:
- Usage: required
- Value type: <prop-encoded-array>
- Definition: reference to the the RNG interface clock
-
-- clock-names:
- Usage: required
- Value type: <stringlist>
- Definition: must be "ick"
-
-Example:
-
- rom_rng: rng {
- compatible = "nokia,n900-rom-rng";
- clocks = <&rng_ick>;
- clock-names = "ick";
- };
diff --git a/Documentation/devicetree/bindings/rng/st,rng.txt b/Documentation/devicetree/bindings/rng/st,rng.txt
deleted file mode 100644
index 35734bc282e9..000000000000
--- a/Documentation/devicetree/bindings/rng/st,rng.txt
+++ /dev/null
@@ -1,15 +0,0 @@
-STMicroelectronics HW Random Number Generator
-----------------------------------------------
-
-Required parameters:
-compatible : Should be "st,rng"
-reg : Base address and size of IP's register map.
-clocks : Phandle to device's clock (See: ../clocks/clock-bindings.txt)
-
-Example:
-
-rng@fee80000 {
- compatible = "st,rng";
- reg = <0xfee80000 0x1000>;
- clocks = <&clk_sysin>;
-}
diff --git a/Documentation/devicetree/bindings/rng/st,rng.yaml b/Documentation/devicetree/bindings/rng/st,rng.yaml
new file mode 100644
index 000000000000..ff1211ef9046
--- /dev/null
+++ b/Documentation/devicetree/bindings/rng/st,rng.yaml
@@ -0,0 +1,35 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/rng/st,rng.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: STMicroelectronics Hardware Random Number Generator
+
+maintainers:
+ - Patrice Chotard <patrice.chotard@foss.st.com>
+
+properties:
+ compatible:
+ const: st,rng
+
+ clocks:
+ maxItems: 1
+
+ reg:
+ maxItems: 1
+
+required:
+ - compatible
+ - clocks
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ rng@fee80000 {
+ compatible = "st,rng";
+ reg = <0xfee80000 0x1000>;
+ clocks = <&clk_sysin>;
+ };
diff --git a/Documentation/devicetree/bindings/rng/ti,keystone-rng.yaml b/Documentation/devicetree/bindings/rng/ti,keystone-rng.yaml
new file mode 100644
index 000000000000..e749818fc193
--- /dev/null
+++ b/Documentation/devicetree/bindings/rng/ti,keystone-rng.yaml
@@ -0,0 +1,50 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/rng/ti,keystone-rng.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Keystone SoC Hardware Random Number Generator
+
+maintainers:
+ - Nishanth Menon <nm@ti.com>
+ - Santosh Shilimkar <ssantosh@kernel.org>
+
+properties:
+ compatible:
+ const: ti,keystone-rng
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ items:
+ - const: fck
+
+ reg:
+ maxItems: 1
+
+ ti,syscon-sa-cfg:
+ $ref: "/schemas/types.yaml#/definitions/phandle"
+ description: |
+ Phandle to syscon node of the SA configuration registers. These
+ registers are shared between HWRNG and crypto drivers.
+
+required:
+ - compatible
+ - clocks
+ - clock-names
+ - reg
+ - ti,syscon-sa-cfg
+
+additionalProperties: false
+
+examples:
+ - |
+ rng@24000 {
+ compatible = "ti,keystone-rng";
+ ti,syscon-sa-cfg = <&sa_config>;
+ clocks = <&clksa>;
+ clock-names = "fck";
+ reg = <0x24000 0x1000>;
+ };
diff --git a/Documentation/devicetree/bindings/rng/ti,omap-rom-rng.yaml b/Documentation/devicetree/bindings/rng/ti,omap-rom-rng.yaml
new file mode 100644
index 000000000000..9a58440b1ab1
--- /dev/null
+++ b/Documentation/devicetree/bindings/rng/ti,omap-rom-rng.yaml
@@ -0,0 +1,41 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/rng/ti,omap-rom-rng.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: OMAP ROM Random Number Generator
+
+maintainers:
+ - Pali Rohár <pali@kernel.org>
+ - Tony Lindgren <tony@atomide.com>
+
+description:
+ Secure SoCs may provide RNG via secure ROM calls like Nokia N900 does.
+ The implementation can depend on the SoC secure ROM used.
+
+properties:
+ compatible:
+ const: nokia,n900-rom-rng
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ items:
+ - const: ick
+
+required:
+ - compatible
+ - clocks
+ - clock-names
+
+additionalProperties: false
+
+examples:
+ - |
+ rng {
+ compatible = "nokia,n900-rom-rng";
+ clocks = <&rng_ick>;
+ clock-names = "ick";
+ };
diff --git a/Documentation/devicetree/bindings/rng/timeriomem_rng.txt b/Documentation/devicetree/bindings/rng/timeriomem_rng.txt
deleted file mode 100644
index fb4846160047..000000000000
--- a/Documentation/devicetree/bindings/rng/timeriomem_rng.txt
+++ /dev/null
@@ -1,25 +0,0 @@
-HWRNG support for the timeriomem_rng driver
-
-Required properties:
-- compatible : "timeriomem_rng"
-- reg : base address to sample from
-- period : wait time in microseconds to use between samples
-
-Optional properties:
-- quality : estimated number of bits of true entropy per 1024 bits read from the
- rng. Defaults to zero which causes the kernel's default quality to
- be used instead. Note that the default quality is usually zero
- which disables using this rng to automatically fill the kernel's
- entropy pool.
-
-N.B. currently 'reg' must be at least four bytes wide and 32-bit aligned
-
-Example:
-
-hwrng@44 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "timeriomem_rng";
- reg = <0x44 0x04>;
- period = <1000000>;
-};
diff --git a/Documentation/devicetree/bindings/rng/timeriomem_rng.yaml b/Documentation/devicetree/bindings/rng/timeriomem_rng.yaml
new file mode 100644
index 000000000000..84bf518a5549
--- /dev/null
+++ b/Documentation/devicetree/bindings/rng/timeriomem_rng.yaml
@@ -0,0 +1,48 @@
+# SPDX-License-Identifier: GPL-2.0-only
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/rng/timeriomem_rng.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: TimerIO Random Number Generator
+
+maintainers:
+ - Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+
+properties:
+ compatible:
+ const: timeriomem_rng
+
+ period:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: wait time in microseconds to use between samples
+
+ quality:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ default: 0
+ description:
+ Estimated number of bits of true entropy per 1024 bits read from the rng.
+ Defaults to zero which causes the kernel's default quality to be used
+ instead. Note that the default quality is usually zero which disables
+ using this rng to automatically fill the kernel's entropy pool.
+
+ reg:
+ maxItems: 1
+ description:
+ Base address to sample from. Currently 'reg' must be at least four bytes
+ wide and 32-bit aligned.
+
+required:
+ - compatible
+ - period
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ rng@44 {
+ compatible = "timeriomem_rng";
+ reg = <0x44 0x04>;
+ period = <1000000>;
+ };
diff --git a/Documentation/devicetree/bindings/rtc/brcm,brcmstb-waketimer.txt b/Documentation/devicetree/bindings/rtc/brcm,brcmstb-waketimer.txt
deleted file mode 100644
index d946f28502b3..000000000000
--- a/Documentation/devicetree/bindings/rtc/brcm,brcmstb-waketimer.txt
+++ /dev/null
@@ -1,20 +0,0 @@
-Broadcom STB wake-up Timer
-
-The Broadcom STB wake-up timer provides a 27Mhz resolution timer, with the
-ability to wake up the system from low-power suspend/standby modes.
-
-Required properties:
-- compatible : should contain "brcm,brcmstb-waketimer"
-- reg : the register start and length for the WKTMR block
-- interrupts : The TIMER interrupt
-- clocks : The phandle to the UPG fixed clock (27Mhz domain)
-
-Example:
-
-waketimer@f0411580 {
- compatible = "brcm,brcmstb-waketimer";
- reg = <0xf0411580 0x14>;
- interrupts = <0x3>;
- interrupt-parent = <&aon_pm_l2_intc>;
- clocks = <&upg_fixed>;
-};
diff --git a/Documentation/devicetree/bindings/rtc/brcm,brcmstb-waketimer.yaml b/Documentation/devicetree/bindings/rtc/brcm,brcmstb-waketimer.yaml
new file mode 100644
index 000000000000..9fe079917a98
--- /dev/null
+++ b/Documentation/devicetree/bindings/rtc/brcm,brcmstb-waketimer.yaml
@@ -0,0 +1,44 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/rtc/brcm,brcmstb-waketimer.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Broadcom STB wake-up Timer
+
+maintainers:
+ - Florian Fainelli <f.fainelli@gmail.com>
+
+description:
+ The Broadcom STB wake-up timer provides a 27Mhz resolution timer, with the
+ ability to wake up the system from low-power suspend/standby modes.
+
+allOf:
+ - $ref: "rtc.yaml#"
+
+properties:
+ compatible:
+ const: brcm,brcmstb-waketimer
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ description: the TIMER interrupt
+ maxItems: 1
+
+ clocks:
+ description: clock reference in the 27MHz domain
+ maxItems: 1
+
+additionalProperties: false
+
+examples:
+ - |
+ rtc@f0411580 {
+ compatible = "brcm,brcmstb-waketimer";
+ reg = <0xf0411580 0x14>;
+ interrupts = <0x3>;
+ interrupt-parent = <&aon_pm_l2_intc>;
+ clocks = <&upg_fixed>;
+ };
diff --git a/Documentation/devicetree/bindings/rtc/epson,rx8900.yaml b/Documentation/devicetree/bindings/rtc/epson,rx8900.yaml
index 29fe39bb08ad..d12855e7ffd7 100644
--- a/Documentation/devicetree/bindings/rtc/epson,rx8900.yaml
+++ b/Documentation/devicetree/bindings/rtc/epson,rx8900.yaml
@@ -15,6 +15,7 @@ allOf:
properties:
compatible:
enum:
+ - epson,rx8804
- epson,rx8900
- microcrystal,rv8803
diff --git a/Documentation/devicetree/bindings/rtc/qcom-pm8xxx-rtc.yaml b/Documentation/devicetree/bindings/rtc/qcom-pm8xxx-rtc.yaml
index 4fba6dba16f3..6fa7d9fc2dc7 100644
--- a/Documentation/devicetree/bindings/rtc/qcom-pm8xxx-rtc.yaml
+++ b/Documentation/devicetree/bindings/rtc/qcom-pm8xxx-rtc.yaml
@@ -19,7 +19,14 @@ properties:
- qcom,pmk8350-rtc
reg:
- maxItems: 1
+ minItems: 1
+ maxItems: 2
+
+ reg-names:
+ minItems: 1
+ items:
+ - const: rtc
+ - const: alarm
interrupts:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/rtc/st,stm32-rtc.yaml b/Documentation/devicetree/bindings/rtc/st,stm32-rtc.yaml
index 2359f541b770..764717ce1873 100644
--- a/Documentation/devicetree/bindings/rtc/st,stm32-rtc.yaml
+++ b/Documentation/devicetree/bindings/rtc/st,stm32-rtc.yaml
@@ -127,6 +127,7 @@ examples:
st,syscfg = <&pwrcfg 0x00 0x100>;
};
+ - |
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/clock/stm32mp1-clks.h>
rtc@5c004000 {
diff --git a/Documentation/devicetree/bindings/rtc/sunplus,sp7021-rtc.yaml b/Documentation/devicetree/bindings/rtc/sunplus,sp7021-rtc.yaml
new file mode 100644
index 000000000000..fd1b3e71ff2c
--- /dev/null
+++ b/Documentation/devicetree/bindings/rtc/sunplus,sp7021-rtc.yaml
@@ -0,0 +1,56 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+# Copyright (C) Sunplus Co., Ltd. 2021
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/rtc/sunplus,sp7021-rtc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Sunplus SP7021 Real Time Clock controller
+
+maintainers:
+ - Vincent Shih <vincent.sunplus@gmail.com>
+
+properties:
+ compatible:
+ const: sunplus,sp7021-rtc
+
+ reg:
+ maxItems: 1
+
+ reg-names:
+ items:
+ - const: rtc
+
+ clocks:
+ maxItems: 1
+
+ resets:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - reg-names
+ - clocks
+ - resets
+ - interrupts
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ rtc: serial@9c003a00 {
+ compatible = "sunplus,sp7021-rtc";
+ reg = <0x9c003a00 0x80>;
+ reg-names = "rtc";
+ clocks = <&clkc 0x12>;
+ resets = <&rstc 0x02>;
+ interrupt-parent = <&intc>;
+ interrupts = <163 IRQ_TYPE_EDGE_RISING>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/serial/amlogic,meson-uart.yaml b/Documentation/devicetree/bindings/serial/amlogic,meson-uart.yaml
index 7487aa6ef849..72e8868db3e0 100644
--- a/Documentation/devicetree/bindings/serial/amlogic,meson-uart.yaml
+++ b/Documentation/devicetree/bindings/serial/amlogic,meson-uart.yaml
@@ -29,6 +29,7 @@ properties:
- amlogic,meson8-uart
- amlogic,meson8b-uart
- amlogic,meson-gx-uart
+ - amlogic,meson-s4-uart
- const: amlogic,meson-ao-uart
- description: Everything-Else power domain UART controller
enum:
@@ -36,6 +37,7 @@ properties:
- amlogic,meson8-uart
- amlogic,meson8b-uart
- amlogic,meson-gx-uart
+ - amlogic,meson-s4-uart
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/serial/fsl-lpuart.yaml b/Documentation/devicetree/bindings/serial/fsl-lpuart.yaml
index a90c971b4f1f..6e04e3848261 100644
--- a/Documentation/devicetree/bindings/serial/fsl-lpuart.yaml
+++ b/Documentation/devicetree/bindings/serial/fsl-lpuart.yaml
@@ -21,9 +21,15 @@ properties:
- fsl,ls1028a-lpuart
- fsl,imx7ulp-lpuart
- fsl,imx8qm-lpuart
+ - fsl,imxrt1050-lpuart
- items:
- - const: fsl,imx8qxp-lpuart
+ - enum:
+ - fsl,imx8qxp-lpuart
+ - fsl,imx8ulp-lpuart
- const: fsl,imx7ulp-lpuart
+ - items:
+ - const: fsl,imx8qm-lpuart
+ - const: fsl,imx8qxp-lpuart
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/serial/pl011.yaml b/Documentation/devicetree/bindings/serial/pl011.yaml
index 5ea00f8a283d..d8aed84abcd3 100644
--- a/Documentation/devicetree/bindings/serial/pl011.yaml
+++ b/Documentation/devicetree/bindings/serial/pl011.yaml
@@ -91,6 +91,9 @@ properties:
3000ms.
default: 3000
+ resets:
+ maxItems: 1
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/serial/renesas,sci.yaml b/Documentation/devicetree/bindings/serial/renesas,sci.yaml
index 22ed2f0b1dc3..8dda4e10e09d 100644
--- a/Documentation/devicetree/bindings/serial/renesas,sci.yaml
+++ b/Documentation/devicetree/bindings/serial/renesas,sci.yaml
@@ -14,7 +14,15 @@ allOf:
properties:
compatible:
- const: renesas,sci
+ oneOf:
+ - items:
+ - enum:
+ - renesas,r9a07g044-sci # RZ/G2{L,LC}
+ - renesas,r9a07g054-sci # RZ/V2L
+ - const: renesas,sci # generic SCI compatible UART
+
+ - items:
+ - const: renesas,sci # generic SCI compatible UART
reg:
maxItems: 1
@@ -54,18 +62,46 @@ required:
- clocks
- clock-names
+if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - renesas,r9a07g044-sci
+ - renesas,r9a07g054-sci
+then:
+ properties:
+ resets:
+ maxItems: 1
+
+ power-domains:
+ maxItems: 1
+
+ required:
+ - resets
+ - power-domains
+
unevaluatedProperties: false
examples:
- |
+ #include <dt-bindings/clock/r9a07g044-cpg.h>
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
aliases {
serial0 = &sci0;
};
- sci0: serial@ffff78 {
- compatible = "renesas,sci";
- reg = <0xffff78 8>;
- interrupts = <88 0>, <89 0>, <90 0>, <91 0>;
- clocks = <&fclk>;
+ sci0: serial@1004d000 {
+ compatible = "renesas,r9a07g044-sci", "renesas,sci";
+ reg = <0x1004d000 0x400>;
+ interrupts = <GIC_SPI 405 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 406 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 407 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 408 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "eri", "rxi", "txi", "tei";
+ clocks = <&cpg CPG_MOD R9A07G044_SCI0_CLKP>;
clock-names = "fck";
+ power-domains = <&cpg>;
+ resets = <&cpg R9A07G044_SCI0_RST>;
};
diff --git a/Documentation/devicetree/bindings/serial/renesas,scif.yaml b/Documentation/devicetree/bindings/serial/renesas,scif.yaml
index 6b8731f7f2fb..ba5d3e0acc63 100644
--- a/Documentation/devicetree/bindings/serial/renesas,scif.yaml
+++ b/Documentation/devicetree/bindings/serial/renesas,scif.yaml
@@ -66,7 +66,19 @@ properties:
- items:
- enum:
+ - renesas,scif-r8a779f0 # R-Car S4-8
+ - const: renesas,rcar-gen4-scif # R-Car Gen4
+ - const: renesas,scif # generic SCIF compatible UART
+
+ - items:
+ - enum:
- renesas,scif-r9a07g044 # RZ/G2{L,LC}
+ - renesas,scif-r9a07g054 # RZ/V2L
+
+ - items:
+ - enum:
+ - renesas,scif-r9a07g054 # RZ/V2L
+ - const: renesas,scif-r9a07g044 # RZ/G2{L,LC} fallback for RZ/V2L
reg:
maxItems: 1
@@ -153,6 +165,9 @@ if:
enum:
- renesas,rcar-gen2-scif
- renesas,rcar-gen3-scif
+ - renesas,rcar-gen4-scif
+ - renesas,scif-r9a07g044
+ - renesas,scif-r9a07g054
then:
required:
- resets
diff --git a/Documentation/devicetree/bindings/sound/ak4375.yaml b/Documentation/devicetree/bindings/sound/ak4375.yaml
new file mode 100644
index 000000000000..f1d5074a024d
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/ak4375.yaml
@@ -0,0 +1,57 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/ak4375.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: AK4375 DAC and headphones amplifier Device Tree Bindings
+
+maintainers:
+ - Vincent Knecht <vincent.knecht@mailoo.org>
+
+properties:
+ compatible:
+ const: asahi-kasei,ak4375
+
+ reg:
+ maxItems: 1
+
+ '#sound-dai-cells':
+ const: 0
+
+ avdd-supply:
+ description: regulator phandle for the AVDD power supply.
+
+ tvdd-supply:
+ description: regulator phandle for the TVDD power supply.
+
+ pdn-gpios:
+ description: optional GPIO to set the PDN pin.
+
+required:
+ - compatible
+ - reg
+ - '#sound-dai-cells'
+ - avdd-supply
+ - tvdd-supply
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ headphones: audio-codec@10 {
+ compatible = "asahi-kasei,ak4375";
+ reg = <0x10>;
+ avdd-supply = <&reg_headphones_avdd>;
+ tvdd-supply = <&pm8916_l6>;
+ pdn-gpios = <&msmgpio 114 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&headphones_pdn_default>;
+ #sound-dai-cells = <0>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/sound/amlogic,aiu.yaml b/Documentation/devicetree/bindings/sound/amlogic,aiu.yaml
index f50558ed914f..0705f91199a0 100644
--- a/Documentation/devicetree/bindings/sound/amlogic,aiu.yaml
+++ b/Documentation/devicetree/bindings/sound/amlogic,aiu.yaml
@@ -9,6 +9,9 @@ title: Amlogic AIU audio output controller
maintainers:
- Jerome Brunet <jbrunet@baylibre.com>
+allOf:
+ - $ref: name-prefix.yaml#
+
properties:
$nodename:
pattern: "^audio-controller@.*"
@@ -65,6 +68,8 @@ properties:
resets:
maxItems: 1
+ sound-name-prefix: true
+
required:
- "#sound-dai-cells"
- compatible
diff --git a/Documentation/devicetree/bindings/sound/amlogic,g12a-toacodec.yaml b/Documentation/devicetree/bindings/sound/amlogic,g12a-toacodec.yaml
index 3c3891d17238..77469a45bb7a 100644
--- a/Documentation/devicetree/bindings/sound/amlogic,g12a-toacodec.yaml
+++ b/Documentation/devicetree/bindings/sound/amlogic,g12a-toacodec.yaml
@@ -9,6 +9,9 @@ title: Amlogic G12a Internal DAC Control Glue
maintainers:
- Jerome Brunet <jbrunet@baylibre.com>
+allOf:
+ - $ref: name-prefix.yaml#
+
properties:
$nodename:
pattern: "^audio-controller@.*"
@@ -31,6 +34,8 @@ properties:
resets:
maxItems: 1
+ sound-name-prefix: true
+
required:
- "#sound-dai-cells"
- compatible
diff --git a/Documentation/devicetree/bindings/sound/amlogic,t9015.yaml b/Documentation/devicetree/bindings/sound/amlogic,t9015.yaml
index db7b04da0b39..580a3d040abc 100644
--- a/Documentation/devicetree/bindings/sound/amlogic,t9015.yaml
+++ b/Documentation/devicetree/bindings/sound/amlogic,t9015.yaml
@@ -9,6 +9,9 @@ title: Amlogic T9015 Internal Audio DAC
maintainers:
- Jerome Brunet <jbrunet@baylibre.com>
+allOf:
+ - $ref: name-prefix.yaml#
+
properties:
$nodename:
pattern: "^audio-controller@.*"
@@ -38,6 +41,8 @@ properties:
description:
Analogue power supply.
+ sound-name-prefix: true
+
required:
- "#sound-dai-cells"
- compatible
diff --git a/Documentation/devicetree/bindings/sound/audio-graph-port.yaml b/Documentation/devicetree/bindings/sound/audio-graph-port.yaml
index 43e7f86e3b23..476dcb49ece6 100644
--- a/Documentation/devicetree/bindings/sound/audio-graph-port.yaml
+++ b/Documentation/devicetree/bindings/sound/audio-graph-port.yaml
@@ -42,10 +42,15 @@ patternProperties:
$ref: /schemas/types.yaml#/definitions/flag
frame-master:
description: Indicates dai-link frame master.
- $ref: /schemas/types.yaml#/definitions/phandle
+ oneOf:
+ - $ref: /schemas/types.yaml#/definitions/flag
+ - $ref: /schemas/types.yaml#/definitions/phandle
bitclock-master:
description: Indicates dai-link bit clock master
- $ref: /schemas/types.yaml#/definitions/phandle
+ oneOf:
+ - $ref: /schemas/types.yaml#/definitions/flag
+ - $ref: /schemas/types.yaml#/definitions/phandle
+
dai-format:
description: audio format.
items:
diff --git a/Documentation/devicetree/bindings/sound/cirrus,cs42l42.yaml b/Documentation/devicetree/bindings/sound/cirrus,cs42l42.yaml
new file mode 100644
index 000000000000..31800f70e9d9
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/cirrus,cs42l42.yaml
@@ -0,0 +1,225 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/cirrus,cs42l42.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Cirrus Logic CS42L42 audio CODEC
+
+maintainers:
+ - patches@opensource.cirrus.com
+
+description:
+ The CS42L42 is a low-power audio codec designed for portable applications.
+ It provides a high-dynamic range, stereo DAC for audio playback and a mono
+ high-dynamic-range ADC for audio capture. There is an integrated headset
+ detection block.
+
+properties:
+ compatible:
+ enum:
+ - cirrus,cs42l42
+
+ reg:
+ description:
+ The I2C address of the CS42L42.
+ maxItems: 1
+
+ VP-supply:
+ description:
+ VP power supply.
+
+ VCP-supply:
+ description:
+ Charge pump power supply.
+
+ VD_FILT-supply:
+ description:
+ FILT+ power supply.
+
+ VL-supply:
+ description:
+ Logic power supply.
+
+ VA-supply:
+ description:
+ Analog power supply.
+
+ reset-gpios:
+ description:
+ This pin will be asserted and then deasserted to reset the
+ CS42L42 before communication starts.
+ maxItems: 1
+
+ interrupts:
+ description:
+ Interrupt for CS42L42 IRQ line.
+ maxItems: 1
+
+ cirrus,ts-inv:
+ description: |
+ Sets the behaviour of the jack plug detect switch.
+
+ 0 - (Default) Shorted to tip when unplugged, open when plugged.
+ This is "inverted tip sense (ITS)" in the datasheet.
+
+ 1 - Open when unplugged, shorted to tip when plugged.
+ This is "normal tip sense (TS)" in the datasheet.
+
+ The CS42L42_TS_INV_* defines are available for this.
+ $ref: "/schemas/types.yaml#/definitions/uint32"
+ minimum: 0
+ maximum: 1
+
+ cirrus,ts-dbnc-rise:
+ description: |
+ Debounce the rising edge of TIP_SENSE_PLUG. With no
+ debounce, the tip sense pin might be noisy on a plug event.
+
+ 0 - 0ms
+ 1 - 125ms
+ 2 - 250ms
+ 3 - 500ms
+ 4 - 750ms
+ 5 - 1s (Default)
+ 6 - 1.25s
+ 7 - 1.5s
+
+ The CS42L42_TS_DBNCE_* defines are available for this.
+ $ref: "/schemas/types.yaml#/definitions/uint32"
+ minimum: 0
+ maximum: 7
+
+ cirrus,ts-dbnc-fall:
+ description: |
+ Debounce the falling edge of TIP_SENSE_UNPLUG. With no
+ debounce, the tip sense pin might be noisy on an unplug event.
+
+ 0 - 0ms
+ 1 - 125ms
+ 2 - 250ms
+ 3 - 500ms
+ 4 - 750ms
+ 5 - 1s (Default)
+ 6 - 1.25s
+ 7 - 1.5s
+
+ The CS42L42_TS_DBNCE_* defines are available for this.
+ $ref: "/schemas/types.yaml#/definitions/uint32"
+ minimum: 0
+ maximum: 7
+
+ cirrus,btn-det-init-dbnce:
+ description: |
+ This sets how long to wait after enabling button detection
+ interrupts before servicing button interrupts, to allow the
+ HS bias time to settle. Value is in milliseconds.
+ There may be erroneous button interrupts if this debounce time
+ is too short.
+
+ 0ms - 200ms,
+ Default = 100ms
+ $ref: "/schemas/types.yaml#/definitions/uint32"
+ minimum: 0
+ maximum: 200
+
+ cirrus,btn-det-event-dbnce:
+ description: |
+ This sets how long to wait after receiving a button press
+ interrupt before processing it. Allows time for the button
+ press to make a clean connection with the bias resistors.
+ Value is in milliseconds.
+
+ 0ms - 20ms,
+ Default = 10ms
+ $ref: "/schemas/types.yaml#/definitions/uint32"
+ minimum: 0
+ maximum: 20
+
+ cirrus,bias-lvls:
+ description: |
+ For a level-detect headset button scheme, each button will bias
+ the mic pin to a certain voltage. To determine which button was
+ pressed, the voltage is compared to sequential, decreasing
+ voltages, until the compared voltage < bias voltage.
+ For different hardware setups, a designer might want to tweak this.
+ This is an array of descending values for the comparator voltage,
+ given as percent of the HSBIAS voltage.
+
+ Array of 4 values, each 0-63
+ < x1 x2 x3 x4 >
+ Default = < 15 8 4 1 >
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ minItems: 4
+ maxItems: 4
+ items:
+ minimum: 0
+ maximum: 63
+
+ cirrus,hs-bias-ramp-rate:
+ description: |
+ If present this sets the rate that the HS bias should rise and fall.
+ The actual rise and fall times depend on external hardware (the
+ datasheet gives several rise and fall time examples).
+
+ 0 - Fast rise time; slow, load-dependent fall time
+ 1 - Fast
+ 2 - Slow (default)
+ 3 - Slowest
+
+ The CS42L42_HSBIAS_RAMP_* defines are available for this.
+ $ref: "/schemas/types.yaml#/definitions/uint32"
+ minimum: 0
+ maximum: 3
+
+ cirrus,hs-bias-sense-disable:
+ description: |
+ If present the HSBIAS sense is disabled. Configures HSBIAS output
+ current sense through the external 2.21-k resistor. HSBIAS_SENSE
+ is a hardware feature to reduce the potential pop noise when the
+ headset plug is removed slowly. But on some platforms ESD voltage
+ will affect it causing plug detection to fail, especially with CTIA
+ headset type. For different hardware setups, a designer might want
+ to tweak default behavior.
+ type: boolean
+
+required:
+ - compatible
+ - reg
+ - VP-supply
+ - VCP-supply
+ - VD_FILT-supply
+ - VL-supply
+ - VA-supply
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/sound/cs42l42.h>
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cs42l42: cs42l42@48 {
+ compatible = "cirrus,cs42l42";
+ reg = <0x48>;
+ VA-supply = <&dummy_vreg>;
+ VP-supply = <&dummy_vreg>;
+ VCP-supply = <&dummy_vreg>;
+ VD_FILT-supply = <&dummy_vreg>;
+ VL-supply = <&dummy_vreg>;
+
+ reset-gpios = <&axi_gpio_0 1 0>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <55 8>;
+
+ cirrus,ts-inv = <CS42L42_TS_INV_DIS>;
+ cirrus,ts-dbnc-rise = <CS42L42_TS_DBNCE_1000>;
+ cirrus,ts-dbnc-fall = <CS42L42_TS_DBNCE_0>;
+ cirrus,btn-det-init-dbnce = <100>;
+ cirrus,btn-det-event-dbnce = <10>;
+ cirrus,bias-lvls = <0x0F 0x08 0x04 0x01>;
+ cirrus,hs-bias-ramp-rate = <CS42L42_HSBIAS_RAMP_SLOW>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/sound/cs42l42.txt b/Documentation/devicetree/bindings/sound/cs42l42.txt
deleted file mode 100644
index 3b7705623980..000000000000
--- a/Documentation/devicetree/bindings/sound/cs42l42.txt
+++ /dev/null
@@ -1,115 +0,0 @@
-CS42L42 audio CODEC
-
-Required properties:
-
- - compatible : "cirrus,cs42l42"
-
- - reg : the I2C address of the device for I2C.
-
- - VP-supply, VCP-supply, VD_FILT-supply, VL-supply, VA-supply :
- power supplies for the device, as covered in
- Documentation/devicetree/bindings/regulator/regulator.txt.
-
-Optional properties:
-
- - reset-gpios : a GPIO spec for the reset pin. If specified, it will be
- deasserted before communication to the codec starts.
-
- - interrupts : IRQ line info CS42L42.
- (See Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
- for further information relating to interrupt properties)
-
- - cirrus,ts-inv : Boolean property. Sets the behaviour of the jack plug
- detect switch.
-
- 0 = (Default) Shorted to tip when unplugged, open when plugged.
- This is "inverted tip sense (ITS)" in the datasheet.
-
- 1 = Open when unplugged, shorted to tip when plugged.
- This is "normal tip sense (TS)" in the datasheet.
-
- - cirrus,ts-dbnc-rise : Debounce the rising edge of TIP_SENSE_PLUG. With no
- debounce, the tip sense pin might be noisy on a plug event.
-
- 0 - 0ms,
- 1 - 125ms,
- 2 - 250ms,
- 3 - 500ms,
- 4 - 750ms,
- 5 - (Default) 1s,
- 6 - 1.25s,
- 7 - 1.5s,
-
- - cirrus,ts-dbnc-fall : Debounce the falling edge of TIP_SENSE_UNPLUG.
- With no debounce, the tip sense pin might be noisy on an unplug event.
-
- 0 - 0ms,
- 1 - 125ms,
- 2 - 250ms,
- 3 - 500ms,
- 4 - 750ms,
- 5 - (Default) 1s,
- 6 - 1.25s,
- 7 - 1.5s,
-
- - cirrus,btn-det-init-dbnce : This sets how long the driver sleeps after
- enabling button detection interrupts. After auto-detection and before
- servicing button interrupts, the HS bias needs time to settle. If you
- don't wait, there is possibility for erroneous button interrupt.
-
- 0ms - 200ms,
- Default = 100ms
-
- - cirrus,btn-det-event-dbnce : This sets how long the driver delays after
- receiving a button press interrupt. With level detect interrupts, you want
- to wait a small amount of time to make sure the button press is making a
- clean connection with the bias resistors.
-
- 0ms - 20ms,
- Default = 10ms
-
- - cirrus,bias-lvls : For a level-detect headset button scheme, each button
- will bias the mic pin to a certain voltage. To determine which button was
- pressed, the driver will compare this biased voltage to sequential,
- decreasing voltages and will stop when a comparator is tripped,
- indicating a comparator voltage < bias voltage. This value represents a
- percentage of the internally generated HS bias voltage. For different
- hardware setups, a designer might want to tweak this. This is an array of
- descending values for the comparator voltage.
-
- Array of 4 values
- Each 0-63
- < x1 x2 x3 x4 >
- Default = < 15 8 4 1>
-
- - cirrus,hs-bias-sense-disable: This is boolean property. If present the
- HSBIAS sense is disabled. Configures HSBIAS output current sense through
- the external 2.21-k resistor. HSBIAS_SENSE is hardware feature to reduce
- the potential pop noise during the headset plug out slowly. But on some
- platforms ESD voltage will affect it causing test to fail, especially
- with CTIA headset type. For different hardware setups, a designer might
- want to tweak default behavior.
-
-Example:
-
-cs42l42: cs42l42@48 {
- compatible = "cirrus,cs42l42";
- reg = <0x48>;
- VA-supply = <&dummy_vreg>;
- VP-supply = <&dummy_vreg>;
- VCP-supply = <&dummy_vreg>;
- VD_FILT-supply = <&dummy_vreg>;
- VL-supply = <&dummy_vreg>;
-
- reset-gpios = <&axi_gpio_0 1 0>;
- interrupt-parent = <&gpio0>;
- interrupts = <55 8>
-
- cirrus,ts-inv = <0x00>;
- cirrus,ts-dbnc-rise = <0x05>;
- cirrus,ts-dbnc-fall = <0x00>;
- cirrus,btn-det-init-dbnce = <100>;
- cirrus,btn-det-event-dbnce = <10>;
- cirrus,bias-lvls = <0x0F 0x08 0x04 0x01>;
- cirrus,hs-bias-ramp-rate = <0x02>;
-};
diff --git a/Documentation/devicetree/bindings/sound/google,cros-ec-codec.yaml b/Documentation/devicetree/bindings/sound/google,cros-ec-codec.yaml
index 77adbebed824..c3e9f3485449 100644
--- a/Documentation/devicetree/bindings/sound/google,cros-ec-codec.yaml
+++ b/Documentation/devicetree/bindings/sound/google,cros-ec-codec.yaml
@@ -8,6 +8,7 @@ title: Audio codec controlled by ChromeOS EC
maintainers:
- Cheng-Yi Chiang <cychiang@chromium.org>
+ - Tzung-Bi Shih <tzungbi@google.com>
description: |
Google's ChromeOS EC codec is a digital mic codec provided by the
diff --git a/Documentation/devicetree/bindings/sound/linux,spdif-dit.yaml b/Documentation/devicetree/bindings/sound/linux,spdif-dit.yaml
index c6b070e1d014..a4f9257e313d 100644
--- a/Documentation/devicetree/bindings/sound/linux,spdif-dit.yaml
+++ b/Documentation/devicetree/bindings/sound/linux,spdif-dit.yaml
@@ -9,6 +9,9 @@ title: Dummy SPDIF Transmitter Device Tree Bindings
maintainers:
- Mark Brown <broonie@kernel.org>
+allOf:
+ - $ref: name-prefix.yaml#
+
properties:
compatible:
const: linux,spdif-dit
@@ -16,6 +19,8 @@ properties:
"#sound-dai-cells":
const: 0
+ sound-name-prefix: true
+
required:
- "#sound-dai-cells"
- compatible
diff --git a/Documentation/devicetree/bindings/sound/mt8195-afe-pcm.yaml b/Documentation/devicetree/bindings/sound/mt8195-afe-pcm.yaml
index dcf790b053d2..6d0975b33d15 100644
--- a/Documentation/devicetree/bindings/sound/mt8195-afe-pcm.yaml
+++ b/Documentation/devicetree/bindings/sound/mt8195-afe-pcm.yaml
@@ -19,6 +19,12 @@ properties:
interrupts:
maxItems: 1
+ memory-region:
+ maxItems: 1
+ description: |
+ Shared memory region for AFE memif. A "shared-dma-pool".
+ See ../reserved-memory/reserved-memory.txt for details.
+
mediatek,topckgen:
$ref: "/schemas/types.yaml#/definitions/phandle"
description: The phandle of the mediatek topckgen controller
@@ -125,6 +131,7 @@ required:
- power-domains
- clocks
- clock-names
+ - memory-region
additionalProperties: false
@@ -139,6 +146,7 @@ examples:
interrupts = <GIC_SPI 822 IRQ_TYPE_LEVEL_HIGH 0>;
mediatek,topckgen = <&topckgen>;
power-domains = <&spm 7>; //MT8195_POWER_DOMAIN_AUDIO
+ memory-region = <&snd_dma_mem_reserved>;
clocks = <&clk26m>,
<&topckgen 163>, //CLK_TOP_APLL1
<&topckgen 166>, //CLK_TOP_APLL2
diff --git a/Documentation/devicetree/bindings/sound/mt8195-mt6359-rt1011-rt5682.yaml b/Documentation/devicetree/bindings/sound/mt8195-mt6359-rt1011-rt5682.yaml
index d354c30d3377..cf6ad7933e23 100644
--- a/Documentation/devicetree/bindings/sound/mt8195-mt6359-rt1011-rt5682.yaml
+++ b/Documentation/devicetree/bindings/sound/mt8195-mt6359-rt1011-rt5682.yaml
@@ -16,6 +16,10 @@ properties:
compatible:
const: mediatek,mt8195_mt6359_rt1011_rt5682
+ model:
+ $ref: /schemas/types.yaml#/definitions/string
+ description: User specified audio sound card name
+
mediatek,platform:
$ref: "/schemas/types.yaml#/definitions/phandle"
description: The phandle of MT8195 ASoC platform.
diff --git a/Documentation/devicetree/bindings/sound/mt8195-mt6359-rt1019-rt5682.yaml b/Documentation/devicetree/bindings/sound/mt8195-mt6359-rt1019-rt5682.yaml
index 20bc0ffd0e34..8f177e02ad35 100644
--- a/Documentation/devicetree/bindings/sound/mt8195-mt6359-rt1019-rt5682.yaml
+++ b/Documentation/devicetree/bindings/sound/mt8195-mt6359-rt1019-rt5682.yaml
@@ -16,6 +16,10 @@ properties:
compatible:
const: mediatek,mt8195_mt6359_rt1019_rt5682
+ model:
+ $ref: /schemas/types.yaml#/definitions/string
+ description: User specified audio sound card name
+
mediatek,platform:
$ref: "/schemas/types.yaml#/definitions/phandle"
description: The phandle of MT8195 ASoC platform.
@@ -28,6 +32,16 @@ properties:
$ref: "/schemas/types.yaml#/definitions/phandle"
description: The phandle of MT8195 HDMI codec node.
+ mediatek,adsp:
+ $ref: "/schemas/types.yaml#/definitions/phandle"
+ description: The phandle of MT8195 ADSP platform.
+
+ mediatek,dai-link:
+ $ref: /schemas/types.yaml#/definitions/string-array
+ description:
+ A list of the desired dai-links in the sound card. Each entry is a
+ name defined in the machine driver.
+
additionalProperties: false
required:
diff --git a/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-alc5632.txt b/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-alc5632.txt
deleted file mode 100644
index 57f40f93453e..000000000000
--- a/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-alc5632.txt
+++ /dev/null
@@ -1,48 +0,0 @@
-NVIDIA Tegra audio complex
-
-Required properties:
-- compatible : "nvidia,tegra-audio-alc5632"
-- clocks : Must contain an entry for each entry in clock-names.
- See ../clocks/clock-bindings.txt for details.
-- clock-names : Must include the following entries:
- - pll_a
- - pll_a_out0
- - mclk (The Tegra cdev1/extern1 clock, which feeds the CODEC's mclk)
-- nvidia,model : The user-visible name of this sound complex.
-- nvidia,audio-routing : A list of the connections between audio components.
- Each entry is a pair of strings, the first being the connection's sink,
- the second being the connection's source. Valid names for sources and
- sinks are the ALC5632's pins as documented in the binding for the device
- and:
-
- * Headset Stereophone
- * Int Spk
- * Headset Mic
- * Digital Mic
-
-- nvidia,i2s-controller : The phandle of the Tegra I2S controller
-- nvidia,audio-codec : The phandle of the ALC5632 audio codec
-
-Example:
-
-sound {
- compatible = "nvidia,tegra-audio-alc5632-paz00",
- "nvidia,tegra-audio-alc5632";
-
- nvidia,model = "Compal PAZ00";
-
- nvidia,audio-routing =
- "Int Spk", "SPK_OUTP",
- "Int Spk", "SPK_OUTN",
- "Headset Mic","MICBIAS1",
- "MIC1_N", "Headset Mic",
- "MIC1_P", "Headset Mic",
- "Headset Stereophone", "HP_OUT_R",
- "Headset Stereophone", "HP_OUT_L";
-
- nvidia,i2s-controller = <&tegra_i2s1>;
- nvidia,audio-codec = <&alc5632>;
-
- clocks = <&tegra_car 112>, <&tegra_car 113>, <&tegra_car 93>;
- clock-names = "pll_a", "pll_a_out0", "mclk";
-};
diff --git a/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-alc5632.yaml b/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-alc5632.yaml
new file mode 100644
index 000000000000..7ef774910e5c
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-alc5632.yaml
@@ -0,0 +1,74 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/nvidia,tegra-audio-alc5632.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NVIDIA Tegra audio complex with ALC5632 CODEC
+
+maintainers:
+ - Jon Hunter <jonathanh@nvidia.com>
+ - Thierry Reding <thierry.reding@gmail.com>
+
+allOf:
+ - $ref: nvidia,tegra-audio-common.yaml#
+
+properties:
+ compatible:
+ items:
+ - pattern: '^[a-z0-9]+,tegra-audio-alc5632(-[a-z0-9]+)+$'
+ - const: nvidia,tegra-audio-alc5632
+
+ nvidia,audio-routing:
+ $ref: /schemas/types.yaml#/definitions/non-unique-string-array
+ description: |
+ A list of the connections between audio components.
+ Each entry is a pair of strings, the first being the connection's sink,
+ the second being the connection's source. Valid names for sources and
+ sinks are the pins (documented in the binding document),
+ and the jacks on the board.
+ minItems: 2
+ items:
+ enum:
+ # Board Connectors
+ - "Headset Stereophone"
+ - "Int Spk"
+ - "Headset Mic"
+ - "Digital Mic"
+
+ # CODEC Pins
+ - SPKOUT
+ - SPKOUTN
+ - MICBIAS1
+ - MIC1
+ - HPR
+ - HPL
+ - DMICDAT
+
+required:
+ - nvidia,i2s-controller
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ sound {
+ compatible = "nvidia,tegra-audio-alc5632-paz00",
+ "nvidia,tegra-audio-alc5632";
+
+ nvidia,model = "Compal PAZ00";
+
+ nvidia,audio-routing = "Int Spk", "SPKOUT",
+ "Int Spk", "SPKOUTN",
+ "Headset Mic", "MICBIAS1",
+ "MIC1", "Headset Mic",
+ "Headset Stereophone", "HPR",
+ "Headset Stereophone", "HPL",
+ "DMICDAT", "Digital Mic";
+
+ nvidia,i2s-controller = <&i2s>;
+ nvidia,audio-codec = <&codec>;
+
+ clocks = <&clk 112>, <&clk 113>, <&clk 93>;
+ clock-names = "pll_a", "pll_a_out0", "mclk";
+ };
diff --git a/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-common.yaml b/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-common.yaml
new file mode 100644
index 000000000000..82801b4f46dd
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-common.yaml
@@ -0,0 +1,83 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/sound/nvidia,tegra-audio-common.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Common properties for NVIDIA Tegra audio complexes
+
+maintainers:
+ - Jon Hunter <jonathanh@nvidia.com>
+ - Thierry Reding <thierry.reding@gmail.com>
+
+properties:
+ clocks:
+ items:
+ - description: PLL A clock
+ - description: PLL A OUT0 clock
+ - description: The Tegra cdev1/extern1 clock, which feeds the card's mclk
+
+ clock-names:
+ items:
+ - const: pll_a
+ - const: pll_a_out0
+ - const: mclk
+
+ nvidia,model:
+ $ref: /schemas/types.yaml#/definitions/string
+ description: The user-visible name of this sound complex.
+
+ nvidia,audio-routing:
+ $ref: /schemas/types.yaml#/definitions/non-unique-string-array
+ description: |
+ A list of the connections between audio components.
+ Each entry is a pair of strings, the first being the connection's sink,
+ the second being the connection's source. Valid names for sources and
+ sinks are the pins (documented in the binding document),
+ and the jacks on the board.
+
+ nvidia,ac97-controller:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description: The phandle of the AC97 controller
+
+ nvidia,i2s-controller:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description: The phandle of the Tegra I2S controller
+
+ nvidia,audio-codec:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description: The phandle of audio codec
+
+ nvidia,spkr-en-gpios:
+ maxItems: 1
+ description: The GPIO that enables the speakers
+
+ nvidia,hp-mute-gpios:
+ maxItems: 1
+ description: The GPIO that mutes the headphones
+
+ nvidia,hp-det-gpios:
+ maxItems: 1
+ description: The GPIO that detect headphones are plugged in
+
+ nvidia,mic-det-gpios:
+ maxItems: 1
+ description: The GPIO that detect microphone is plugged in
+
+ nvidia,ear-sel-gpios:
+ maxItems: 1
+ description: The GPIO that switch between the microphones
+
+ nvidia,int-mic-en-gpios:
+ maxItems: 1
+ description: The GPIO that enables the internal microphone
+
+ nvidia,ext-mic-en-gpios:
+ maxItems: 1
+ description: The GPIO that enables the external microphone
+
+ nvidia,headset:
+ type: boolean
+ description: The Mic Jack represents state of the headset microphone pin
+
+additionalProperties: true
diff --git a/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-graph-card.yaml b/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-graph-card.yaml
index 5bdd30a8a404..b4bee466d67a 100644
--- a/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-graph-card.yaml
+++ b/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-graph-card.yaml
@@ -44,6 +44,16 @@ properties:
minItems: 1
maxItems: 3
+ interconnects:
+ items:
+ - description: APE read memory client
+ - description: APE write memory client
+
+ interconnect-names:
+ items:
+ - const: dma-mem # read
+ - const: write
+
iommus:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-max98090.txt b/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-max98090.txt
deleted file mode 100644
index c3495beba358..000000000000
--- a/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-max98090.txt
+++ /dev/null
@@ -1,53 +0,0 @@
-NVIDIA Tegra audio complex, with MAX98090 CODEC
-
-Required properties:
-- compatible : "nvidia,tegra-audio-max98090"
-- clocks : Must contain an entry for each entry in clock-names.
- See ../clocks/clock-bindings.txt for details.
-- clock-names : Must include the following entries:
- - pll_a
- - pll_a_out0
- - mclk (The Tegra cdev1/extern1 clock, which feeds the CODEC's mclk)
-- nvidia,model : The user-visible name of this sound complex.
-- nvidia,audio-routing : A list of the connections between audio components.
- Each entry is a pair of strings, the first being the connection's sink,
- the second being the connection's source. Valid names for sources and
- sinks are the MAX98090's pins (as documented in its binding), and the jacks
- on the board:
-
- * Headphones
- * Speakers
- * Mic Jack
- * Int Mic
-
-- nvidia,i2s-controller : The phandle of the Tegra I2S controller that's
- connected to the CODEC.
-- nvidia,audio-codec : The phandle of the MAX98090 audio codec.
-
-Optional properties:
-- nvidia,hp-det-gpios : The GPIO that detect headphones are plugged in
-- nvidia,mic-det-gpios : The GPIO that detect microphones are plugged in
-
-Example:
-
-sound {
- compatible = "nvidia,tegra-audio-max98090-venice2",
- "nvidia,tegra-audio-max98090";
- nvidia,model = "NVIDIA Tegra Venice2";
-
- nvidia,audio-routing =
- "Headphones", "HPR",
- "Headphones", "HPL",
- "Speakers", "SPKR",
- "Speakers", "SPKL",
- "Mic Jack", "MICBIAS",
- "IN34", "Mic Jack";
-
- nvidia,i2s-controller = <&tegra_i2s1>;
- nvidia,audio-codec = <&acodec>;
-
- clocks = <&tegra_car TEGRA124_CLK_PLL_A>,
- <&tegra_car TEGRA124_CLK_PLL_A_OUT0>,
- <&tegra_car TEGRA124_CLK_EXTERN1>;
- clock-names = "pll_a", "pll_a_out0", "mclk";
-};
diff --git a/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-max98090.yaml b/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-max98090.yaml
new file mode 100644
index 000000000000..ccc2ee77ca30
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-max98090.yaml
@@ -0,0 +1,97 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/nvidia,tegra-audio-max98090.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NVIDIA Tegra audio complex with MAX98090 CODEC
+
+maintainers:
+ - Jon Hunter <jonathanh@nvidia.com>
+ - Thierry Reding <thierry.reding@gmail.com>
+
+allOf:
+ - $ref: nvidia,tegra-audio-common.yaml#
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - pattern: '^[a-z0-9]+,tegra-audio-max98090(-[a-z0-9]+)+$'
+ - const: nvidia,tegra-audio-max98090
+ - items:
+ - enum:
+ - nvidia,tegra-audio-max98090-nyan-big
+ - nvidia,tegra-audio-max98090-nyan-blaze
+ - const: nvidia,tegra-audio-max98090-nyan
+ - const: nvidia,tegra-audio-max98090
+
+ nvidia,audio-routing:
+ $ref: /schemas/types.yaml#/definitions/non-unique-string-array
+ description: |
+ A list of the connections between audio components.
+ Each entry is a pair of strings, the first being the connection's sink,
+ the second being the connection's source. Valid names for sources and
+ sinks are the pins (documented in the binding document),
+ and the jacks on the board.
+ minItems: 2
+ items:
+ enum:
+ # Board Connectors
+ - "Headphones"
+ - "Speakers"
+ - "Mic Jack"
+ - "Int Mic"
+
+ # CODEC Pins
+ - MIC1
+ - MIC2
+ - DMICL
+ - DMICR
+ - IN1
+ - IN2
+ - IN3
+ - IN4
+ - IN5
+ - IN6
+ - IN12
+ - IN34
+ - IN56
+ - HPL
+ - HPR
+ - SPKL
+ - SPKR
+ - RCVL
+ - RCVR
+ - MICBIAS
+
+required:
+ - nvidia,i2s-controller
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/tegra124-car.h>
+
+ sound {
+ compatible = "nvidia,tegra-audio-max98090-venice2",
+ "nvidia,tegra-audio-max98090";
+ nvidia,model = "NVIDIA Tegra Venice2";
+
+ nvidia,audio-routing =
+ "Headphones", "HPR",
+ "Headphones", "HPL",
+ "Speakers", "SPKR",
+ "Speakers", "SPKL",
+ "Mic Jack", "MICBIAS",
+ "IN34", "Mic Jack";
+
+ nvidia,i2s-controller = <&tegra_i2s1>;
+ nvidia,audio-codec = <&acodec>;
+
+ clocks = <&tegra_car TEGRA124_CLK_PLL_A>,
+ <&tegra_car TEGRA124_CLK_PLL_A_OUT0>,
+ <&tegra_car TEGRA124_CLK_EXTERN1>;
+ clock-names = "pll_a", "pll_a_out0", "mclk";
+ };
diff --git a/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-rt5640.txt b/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-rt5640.txt
deleted file mode 100644
index 7788808dcd0b..000000000000
--- a/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-rt5640.txt
+++ /dev/null
@@ -1,52 +0,0 @@
-NVIDIA Tegra audio complex, with RT5640 CODEC
-
-Required properties:
-- compatible : "nvidia,tegra-audio-rt5640"
-- clocks : Must contain an entry for each entry in clock-names.
- See ../clocks/clock-bindings.txt for details.
-- clock-names : Must include the following entries:
- - pll_a
- - pll_a_out0
- - mclk (The Tegra cdev1/extern1 clock, which feeds the CODEC's mclk)
-- nvidia,model : The user-visible name of this sound complex.
-- nvidia,audio-routing : A list of the connections between audio components.
- Each entry is a pair of strings, the first being the connection's sink,
- the second being the connection's source. Valid names for sources and
- sinks are the RT5640's pins (as documented in its binding), and the jacks
- on the board:
-
- * Headphones
- * Speakers
- * Mic Jack
-
-- nvidia,i2s-controller : The phandle of the Tegra I2S controller that's
- connected to the CODEC.
-- nvidia,audio-codec : The phandle of the RT5640 audio codec. This binding
- assumes that AIF1 on the CODEC is connected to Tegra.
-
-Optional properties:
-- nvidia,hp-det-gpios : The GPIO that detects headphones are plugged in
-
-Example:
-
-sound {
- compatible = "nvidia,tegra-audio-rt5640-dalmore",
- "nvidia,tegra-audio-rt5640";
- nvidia,model = "NVIDIA Tegra Dalmore";
-
- nvidia,audio-routing =
- "Headphones", "HPOR",
- "Headphones", "HPOL",
- "Speakers", "SPORP",
- "Speakers", "SPORN",
- "Speakers", "SPOLP",
- "Speakers", "SPOLN";
-
- nvidia,i2s-controller = <&tegra_i2s1>;
- nvidia,audio-codec = <&rt5640>;
-
- nvidia,hp-det-gpios = <&gpio 143 0>; /* GPIO PR7 */
-
- clocks = <&tegra_car 216>, <&tegra_car 217>, <&tegra_car 120>;
- clock-names = "pll_a", "pll_a_out0", "mclk";
-};
diff --git a/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-rt5640.yaml b/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-rt5640.yaml
new file mode 100644
index 000000000000..e768fb0e9a59
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-rt5640.yaml
@@ -0,0 +1,85 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/nvidia,tegra-audio-rt5640.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NVIDIA Tegra audio complex with RT5639 or RT5640 CODEC
+
+maintainers:
+ - Jon Hunter <jonathanh@nvidia.com>
+ - Thierry Reding <thierry.reding@gmail.com>
+
+allOf:
+ - $ref: nvidia,tegra-audio-common.yaml#
+
+properties:
+ compatible:
+ items:
+ - pattern: '^[a-z0-9]+,tegra-audio-rt56(39|40)(-[a-z0-9]+)+$'
+ - const: nvidia,tegra-audio-rt5640
+
+ nvidia,audio-routing:
+ $ref: /schemas/types.yaml#/definitions/non-unique-string-array
+ description: |
+ A list of the connections between audio components.
+ Each entry is a pair of strings, the first being the connection's sink,
+ the second being the connection's source. Valid names for sources and
+ sinks are the pins (documented in the binding document),
+ and the jacks on the board.
+ minItems: 2
+ items:
+ enum:
+ # Board Connectors
+ - "Headphones"
+ - "Speakers"
+ - "Mic Jack"
+
+ # CODEC Pins
+ - DMIC1
+ - DMIC2
+ - MICBIAS1
+ - IN1P
+ - IN1R
+ - IN2P
+ - IN2R
+ - HPOL
+ - HPOR
+ - LOUTL
+ - LOUTR
+ - MONOP
+ - MONON
+ - SPOLP
+ - SPOLN
+ - SPORP
+ - SPORN
+
+required:
+ - nvidia,i2s-controller
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ sound {
+ compatible = "nvidia,tegra-audio-rt5640-dalmore",
+ "nvidia,tegra-audio-rt5640";
+ nvidia,model = "NVIDIA Tegra Dalmore";
+
+ nvidia,audio-routing =
+ "Headphones", "HPOR",
+ "Headphones", "HPOL",
+ "Speakers", "SPORP",
+ "Speakers", "SPORN",
+ "Speakers", "SPOLP",
+ "Speakers", "SPOLN";
+
+ nvidia,i2s-controller = <&tegra_i2s1>;
+ nvidia,audio-codec = <&rt5640>;
+
+ nvidia,hp-det-gpios = <&gpio 143 0>;
+
+ clocks = <&clk 216>, <&clk 217>, <&clk 120>;
+ clock-names = "pll_a", "pll_a_out0", "mclk";
+ };
+
diff --git a/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-rt5677.txt b/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-rt5677.txt
deleted file mode 100644
index a4589cda214e..000000000000
--- a/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-rt5677.txt
+++ /dev/null
@@ -1,67 +0,0 @@
-NVIDIA Tegra audio complex, with RT5677 CODEC
-
-Required properties:
-- compatible : "nvidia,tegra-audio-rt5677"
-- clocks : Must contain an entry for each entry in clock-names.
- See ../clocks/clock-bindings.txt for details.
-- clock-names : Must include the following entries:
- - pll_a
- - pll_a_out0
- - mclk (The Tegra cdev1/extern1 clock, which feeds the CODEC's mclk)
-- nvidia,model : The user-visible name of this sound complex.
-- nvidia,audio-routing : A list of the connections between audio components.
- Each entry is a pair of strings, the first being the connection's sink,
- the second being the connection's source. Valid names for sources and
- sinks are the RT5677's pins (as documented in its binding), and the jacks
- on the board:
-
- * Headphone
- * Speaker
- * Headset Mic
- * Internal Mic 1
- * Internal Mic 2
-
-- nvidia,i2s-controller : The phandle of the Tegra I2S controller that's
- connected to the CODEC.
-- nvidia,audio-codec : The phandle of the RT5677 audio codec. This binding
- assumes that AIF1 on the CODEC is connected to Tegra.
-
-Optional properties:
-- nvidia,hp-det-gpios : The GPIO that detects headphones are plugged in
-- nvidia,hp-en-gpios : The GPIO that enables headphone amplifier
-- nvidia,mic-present-gpios: The GPIO that mic jack is plugged in
-- nvidia,dmic-clk-en-gpios : The GPIO that gates DMIC clock signal
-
-Example:
-
-sound {
- compatible = "nvidia,tegra-audio-rt5677-ryu",
- "nvidia,tegra-audio-rt5677";
- nvidia,model = "NVIDIA Tegra Ryu";
-
- nvidia,audio-routing =
- "Headphone", "LOUT2",
- "Headphone", "LOUT1",
- "Headset Mic", "MICBIAS1",
- "IN1P", "Headset Mic",
- "IN1N", "Headset Mic",
- "DMIC L1", "Internal Mic 1",
- "DMIC R1", "Internal Mic 1",
- "DMIC L2", "Internal Mic 2",
- "DMIC R2", "Internal Mic 2",
- "Speaker", "PDM1L",
- "Speaker", "PDM1R";
-
- nvidia,i2s-controller = <&tegra_i2s1>;
- nvidia,audio-codec = <&rt5677>;
-
- nvidia,hp-det-gpios = <&gpio TEGRA_GPIO(R, 7) GPIO_ACTIVE_HIGH>;
- nvidia,mic-present-gpios = <&gpio TEGRA_GPIO(O, 5) GPIO_ACTIVE_LOW>;
- nvidia,hp-en-gpios = <&rt5677 1 GPIO_ACTIVE_HIGH>;
- nvidia,dmic-clk-en-gpios = <&rt5677 2 GPIO_ACTIVE_HIGH>;
-
- clocks = <&tegra_car TEGRA124_CLK_PLL_A>,
- <&tegra_car TEGRA124_CLK_PLL_A_OUT0>,
- <&tegra_car TEGRA124_CLK_EXTERN1>;
- clock-names = "pll_a", "pll_a_out0", "mclk";
-};
diff --git a/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-rt5677.yaml b/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-rt5677.yaml
new file mode 100644
index 000000000000..a49997d6028b
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-rt5677.yaml
@@ -0,0 +1,100 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/nvidia,tegra-audio-rt5677.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NVIDIA Tegra audio complex with RT5677 CODEC
+
+maintainers:
+ - Jon Hunter <jonathanh@nvidia.com>
+ - Thierry Reding <thierry.reding@gmail.com>
+
+allOf:
+ - $ref: nvidia,tegra-audio-common.yaml#
+
+properties:
+ compatible:
+ items:
+ - pattern: '^[a-z0-9]+,tegra-audio-rt5677(-[a-z0-9]+)+$'
+ - const: nvidia,tegra-audio-rt5677
+
+ nvidia,audio-routing:
+ $ref: /schemas/types.yaml#/definitions/non-unique-string-array
+ description: |
+ A list of the connections between audio components.
+ Each entry is a pair of strings, the first being the connection's sink,
+ the second being the connection's source. Valid names for sources and
+ sinks are the pins (documented in the binding document),
+ and the jacks on the board.
+ minItems: 2
+ items:
+ enum:
+ # Board Connectors
+ - "Headphone"
+ - "Speaker"
+ - "Headset Mic"
+ - "Internal Mic 1"
+ - "Internal Mic 2"
+
+ # CODEC Pins
+ - IN1P
+ - IN1N
+ - IN2P
+ - IN2N
+ - MICBIAS1
+ - DMIC1
+ - DMIC2
+ - DMIC3
+ - DMIC4
+ - "DMIC L1"
+ - "DMIC L2"
+ - "DMIC L3"
+ - "DMIC L4"
+ - "DMIC R1"
+ - "DMIC R2"
+ - "DMIC R3"
+ - "DMIC R4"
+ - LOUT1
+ - LOUT2
+ - LOUT3
+ - PDM1L
+ - PDM1R
+ - PDM2L
+ - PDM2R
+
+required:
+ - nvidia,i2s-controller
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ sound {
+ compatible = "nvidia,tegra-audio-rt5677-ryu",
+ "nvidia,tegra-audio-rt5677";
+ nvidia,model = "NVIDIA Tegra Ryu";
+
+ nvidia,audio-routing =
+ "Headphone", "LOUT2",
+ "Headphone", "LOUT1",
+ "Headset Mic", "MICBIAS1",
+ "IN1P", "Headset Mic",
+ "IN1N", "Headset Mic",
+ "DMIC L1", "Internal Mic 1",
+ "DMIC R1", "Internal Mic 1",
+ "DMIC L2", "Internal Mic 2",
+ "DMIC R2", "Internal Mic 2",
+ "Speaker", "PDM1L",
+ "Speaker", "PDM1R";
+
+ nvidia,i2s-controller = <&tegra_i2s1>;
+ nvidia,audio-codec = <&rt5677>;
+
+ nvidia,hp-det-gpios = <&gpio 143 0>;
+
+ clocks = <&clk 216>,
+ <&clk 217>,
+ <&clk 121>;
+ clock-names = "pll_a", "pll_a_out0", "mclk";
+ };
diff --git a/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-sgtl5000.txt b/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-sgtl5000.txt
deleted file mode 100644
index 5da7da4ea07a..000000000000
--- a/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-sgtl5000.txt
+++ /dev/null
@@ -1,42 +0,0 @@
-NVIDIA Tegra audio complex, with SGTL5000 CODEC
-
-Required properties:
-- compatible : "nvidia,tegra-audio-sgtl5000"
-- clocks : Must contain an entry for each entry in clock-names.
- See ../clocks/clock-bindings.txt for details.
-- clock-names : Must include the following entries:
- - pll_a
- - pll_a_out0
- - mclk (The Tegra cdev1/extern1 clock, which feeds the CODEC's mclk)
-- nvidia,model : The user-visible name of this sound complex.
-- nvidia,audio-routing : A list of the connections between audio components.
- Each entry is a pair of strings, the first being the connection's sink,
- the second being the connection's source. Valid names for sources and
- sinks are the SGTL5000's pins (as documented in its binding), and the jacks
- on the board:
-
- * Headphone Jack
- * Line In Jack
- * Mic Jack
-
-- nvidia,i2s-controller : The phandle of the Tegra I2S controller that's
- connected to the CODEC.
-- nvidia,audio-codec : The phandle of the SGTL5000 audio codec.
-
-Example:
-
-sound {
- compatible = "toradex,tegra-audio-sgtl5000-apalis_t30",
- "nvidia,tegra-audio-sgtl5000";
- nvidia,model = "Toradex Apalis T30";
- nvidia,audio-routing =
- "Headphone Jack", "HP_OUT",
- "LINE_IN", "Line In Jack",
- "MIC_IN", "Mic Jack";
- nvidia,i2s-controller = <&tegra_i2s2>;
- nvidia,audio-codec = <&sgtl5000>;
- clocks = <&tegra_car TEGRA30_CLK_PLL_A>,
- <&tegra_car TEGRA30_CLK_PLL_A_OUT0>,
- <&tegra_car TEGRA30_CLK_EXTERN1>;
- clock-names = "pll_a", "pll_a_out0", "mclk";
-};
diff --git a/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-sgtl5000.yaml b/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-sgtl5000.yaml
new file mode 100644
index 000000000000..943e7c01741c
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-sgtl5000.yaml
@@ -0,0 +1,67 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/nvidia,tegra-audio-sgtl5000.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NVIDIA Tegra audio complex with SGTL5000 CODEC
+
+maintainers:
+ - Jon Hunter <jonathanh@nvidia.com>
+ - Thierry Reding <thierry.reding@gmail.com>
+
+allOf:
+ - $ref: nvidia,tegra-audio-common.yaml#
+
+properties:
+ compatible:
+ items:
+ - pattern: '^[a-z0-9]+,tegra-audio-sgtl5000([-_][a-z0-9]+)+$'
+ - const: nvidia,tegra-audio-sgtl5000
+
+ nvidia,audio-routing:
+ $ref: /schemas/types.yaml#/definitions/non-unique-string-array
+ description: |
+ A list of the connections between audio components.
+ Each entry is a pair of strings, the first being the connection's sink,
+ the second being the connection's source. Valid names for sources and
+ sinks are the pins (documented in the binding document),
+ and the jacks on the board.
+ minItems: 2
+ items:
+ enum:
+ # Board Connectors
+ - "Headphone Jack"
+ - "Line In Jack"
+ - "Mic Jack"
+
+ # CODEC Pins
+ - HP_OUT
+ - LINE_OUT
+ - LINE_IN
+ - MIC_IN
+
+required:
+ - nvidia,i2s-controller
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/tegra30-car.h>
+
+ sound {
+ compatible = "toradex,tegra-audio-sgtl5000-apalis_t30",
+ "nvidia,tegra-audio-sgtl5000";
+ nvidia,model = "Toradex Apalis T30 SGTL5000";
+ nvidia,audio-routing =
+ "Headphone Jack", "HP_OUT",
+ "LINE_IN", "Line In Jack",
+ "MIC_IN", "Mic Jack";
+ nvidia,i2s-controller = <&tegra_i2s2>;
+ nvidia,audio-codec = <&codec>;
+ clocks = <&tegra_car TEGRA30_CLK_PLL_A>,
+ <&tegra_car TEGRA30_CLK_PLL_A_OUT0>,
+ <&tegra_car TEGRA30_CLK_EXTERN1>;
+ clock-names = "pll_a", "pll_a_out0", "mclk";
+ };
diff --git a/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-trimslice.txt b/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-trimslice.txt
deleted file mode 100644
index ef1fe7358279..000000000000
--- a/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-trimslice.txt
+++ /dev/null
@@ -1,21 +0,0 @@
-NVIDIA Tegra audio complex for TrimSlice
-
-Required properties:
-- compatible : "nvidia,tegra-audio-trimslice"
-- clocks : Must contain an entry for each entry in clock-names.
-- clock-names : Must include the following entries:
- "pll_a" (The Tegra clock of that name),
- "pll_a_out0" (The Tegra clock of that name),
- "mclk" (The Tegra cdev1/extern1 clock, which feeds the CODEC's mclk)
-- nvidia,i2s-controller : The phandle of the Tegra I2S1 controller
-- nvidia,audio-codec : The phandle of the WM8903 audio codec
-
-Example:
-
-sound {
- compatible = "nvidia,tegra-audio-trimslice";
- nvidia,i2s-controller = <&tegra_i2s1>;
- nvidia,audio-codec = <&codec>;
- clocks = <&tegra_car 112>, <&tegra_car 113>, <&tegra_car 93>;
- clock-names = "pll_a", "pll_a_out0", "mclk";
-};
diff --git a/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-trimslice.yaml b/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-trimslice.yaml
new file mode 100644
index 000000000000..8c87cd166238
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-trimslice.yaml
@@ -0,0 +1,33 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/nvidia,tegra-audio-trimslice.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NVIDIA Tegra audio complex with TrimSlice CODEC
+
+maintainers:
+ - Jon Hunter <jonathanh@nvidia.com>
+ - Thierry Reding <thierry.reding@gmail.com>
+
+allOf:
+ - $ref: nvidia,tegra-audio-common.yaml#
+
+properties:
+ compatible:
+ const: nvidia,tegra-audio-trimslice
+
+required:
+ - nvidia,i2s-controller
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ sound {
+ compatible = "nvidia,tegra-audio-trimslice";
+ nvidia,i2s-controller = <&tegra_i2s1>;
+ nvidia,audio-codec = <&codec>;
+ clocks = <&tegra_car 112>, <&tegra_car 113>, <&tegra_car 93>;
+ clock-names = "pll_a", "pll_a_out0", "mclk";
+ };
diff --git a/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-wm8753.txt b/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-wm8753.txt
deleted file mode 100644
index 96f6a57dd6b4..000000000000
--- a/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-wm8753.txt
+++ /dev/null
@@ -1,40 +0,0 @@
-NVIDIA Tegra audio complex
-
-Required properties:
-- compatible : "nvidia,tegra-audio-wm8753"
-- clocks : Must contain an entry for each entry in clock-names.
- See ../clocks/clock-bindings.txt for details.
-- clock-names : Must include the following entries:
- - pll_a
- - pll_a_out0
- - mclk (The Tegra cdev1/extern1 clock, which feeds the CODEC's mclk)
-- nvidia,model : The user-visible name of this sound complex.
-- nvidia,audio-routing : A list of the connections between audio components.
- Each entry is a pair of strings, the first being the connection's sink,
- the second being the connection's source. Valid names for sources and
- sinks are the WM8753's pins as documented in the binding for the WM8753,
- and the jacks on the board:
-
- * Headphone Jack
- * Mic Jack
-
-- nvidia,i2s-controller : The phandle of the Tegra I2S1 controller
-- nvidia,audio-codec : The phandle of the WM8753 audio codec
-Example:
-
-sound {
- compatible = "nvidia,tegra-audio-wm8753-whistler",
- "nvidia,tegra-audio-wm8753"
- nvidia,model = "tegra-wm8753-harmony";
-
- nvidia,audio-routing =
- "Headphone Jack", "LOUT1",
- "Headphone Jack", "ROUT1";
-
- nvidia,i2s-controller = <&i2s1>;
- nvidia,audio-codec = <&wm8753>;
-
- clocks = <&tegra_car 112>, <&tegra_car 113>, <&tegra_car 93>;
- clock-names = "pll_a", "pll_a_out0", "mclk";
-};
-
diff --git a/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-wm8753.yaml b/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-wm8753.yaml
new file mode 100644
index 000000000000..a5b431d7d0c2
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-wm8753.yaml
@@ -0,0 +1,79 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/nvidia,tegra-audio-wm8753.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NVIDIA Tegra audio complex with WM8753 CODEC
+
+maintainers:
+ - Jon Hunter <jonathanh@nvidia.com>
+ - Thierry Reding <thierry.reding@gmail.com>
+
+allOf:
+ - $ref: nvidia,tegra-audio-common.yaml#
+
+properties:
+ compatible:
+ items:
+ - pattern: '^[a-z0-9]+,tegra-audio-wm8753(-[a-z0-9]+)+$'
+ - const: nvidia,tegra-audio-wm8753
+
+ nvidia,audio-routing:
+ $ref: /schemas/types.yaml#/definitions/non-unique-string-array
+ description: |
+ A list of the connections between audio components.
+ Each entry is a pair of strings, the first being the connection's sink,
+ the second being the connection's source. Valid names for sources and
+ sinks are the pins (documented in the binding document),
+ and the jacks on the board.
+ minItems: 2
+ items:
+ enum:
+ # Board Connectors
+ - "Headphone Jack"
+ - "Mic Jack"
+
+ # CODEC Pins
+ - LOUT1
+ - LOUT2
+ - ROUT1
+ - ROUT2
+ - MONO1
+ - MONO2
+ - OUT3
+ - OUT4
+ - LINE1
+ - LINE2
+ - RXP
+ - RXN
+ - ACIN
+ - ACOP
+ - MIC1N
+ - MIC1
+ - MIC2N
+ - MIC2
+ - "Mic Bias"
+
+required:
+ - nvidia,i2s-controller
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ sound {
+ compatible = "nvidia,tegra-audio-wm8753-whistler",
+ "nvidia,tegra-audio-wm8753";
+ nvidia,model = "tegra-wm8753-harmony";
+
+ nvidia,audio-routing =
+ "Headphone Jack", "LOUT1",
+ "Headphone Jack", "ROUT1";
+
+ nvidia,i2s-controller = <&i2s1>;
+ nvidia,audio-codec = <&wm8753>;
+
+ clocks = <&clk 112>, <&clk 113>, <&clk 93>;
+ clock-names = "pll_a", "pll_a_out0", "mclk";
+ };
diff --git a/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-wm8903.txt b/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-wm8903.txt
deleted file mode 100644
index bbd581a8c5bc..000000000000
--- a/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-wm8903.txt
+++ /dev/null
@@ -1,62 +0,0 @@
-NVIDIA Tegra audio complex
-
-Required properties:
-- compatible : "nvidia,tegra-audio-wm8903"
-- clocks : Must contain an entry for each entry in clock-names.
- See ../clocks/clock-bindings.txt for details.
-- clock-names : Must include the following entries:
- - pll_a
- - pll_a_out0
- - mclk (The Tegra cdev1/extern1 clock, which feeds the CODEC's mclk)
-- nvidia,model : The user-visible name of this sound complex.
-- nvidia,audio-routing : A list of the connections between audio components.
- Each entry is a pair of strings, the first being the connection's sink,
- the second being the connection's source. Valid names for sources and
- sinks are the WM8903's pins (documented in the WM8903 binding document),
- and the jacks on the board:
-
- * Headphone Jack
- * Int Spk
- * Mic Jack
- * Int Mic
-
-- nvidia,i2s-controller : The phandle of the Tegra I2S1 controller
-- nvidia,audio-codec : The phandle of the WM8903 audio codec
-
-Optional properties:
-- nvidia,spkr-en-gpios : The GPIO that enables the speakers
-- nvidia,hp-mute-gpios : The GPIO that mutes the headphones
-- nvidia,hp-det-gpios : The GPIO that detect headphones are plugged in
-- nvidia,int-mic-en-gpios : The GPIO that enables the internal microphone
-- nvidia,ext-mic-en-gpios : The GPIO that enables the external microphone
-- nvidia,headset : The Mic Jack represents state of the headset microphone pin
-
-Example:
-
-sound {
- compatible = "nvidia,tegra-audio-wm8903-harmony",
- "nvidia,tegra-audio-wm8903"
- nvidia,model = "tegra-wm8903-harmony";
-
- nvidia,audio-routing =
- "Headphone Jack", "HPOUTR",
- "Headphone Jack", "HPOUTL",
- "Int Spk", "ROP",
- "Int Spk", "RON",
- "Int Spk", "LOP",
- "Int Spk", "LON",
- "Mic Jack", "MICBIAS",
- "IN1L", "Mic Jack";
-
- nvidia,i2s-controller = <&i2s1>;
- nvidia,audio-codec = <&wm8903>;
-
- nvidia,spkr-en-gpios = <&codec 2 0>;
- nvidia,hp-det-gpios = <&gpio 178 0>; /* gpio PW2 */
- nvidia,int-mic-en-gpios = <&gpio 184 0>; /*gpio PX0 */
- nvidia,ext-mic-en-gpios = <&gpio 185 0>; /* gpio PX1 */
-
- clocks = <&tegra_car 112>, <&tegra_car 113>, <&tegra_car 93>;
- clock-names = "pll_a", "pll_a_out0", "mclk";
-};
-
diff --git a/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-wm8903.yaml b/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-wm8903.yaml
new file mode 100644
index 000000000000..1b836acab980
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-wm8903.yaml
@@ -0,0 +1,93 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/nvidia,tegra-audio-wm8903.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NVIDIA Tegra audio complex with WM8903 CODEC
+
+maintainers:
+ - Jon Hunter <jonathanh@nvidia.com>
+ - Thierry Reding <thierry.reding@gmail.com>
+
+allOf:
+ - $ref: nvidia,tegra-audio-common.yaml#
+
+properties:
+ compatible:
+ oneOf:
+ - items:
+ - pattern: '^[a-z0-9]+,tegra-audio-wm8903(-[a-z0-9]+)+$'
+ - const: nvidia,tegra-audio-wm8903
+ - items:
+ - pattern: ad,tegra-audio-plutux
+ - const: nvidia,tegra-audio-wm8903
+
+ nvidia,audio-routing:
+ $ref: /schemas/types.yaml#/definitions/non-unique-string-array
+ description: |
+ A list of the connections between audio components.
+ Each entry is a pair of strings, the first being the connection's sink,
+ the second being the connection's source. Valid names for sources and
+ sinks are the pins (documented in the binding document),
+ and the jacks on the board.
+ minItems: 2
+ items:
+ enum:
+ # Board Connectors
+ - "Headphone Jack"
+ - "Int Spk"
+ - "Mic Jack"
+ - "Int Mic"
+
+ # CODEC Pins
+ - IN1L
+ - IN1R
+ - IN2L
+ - IN2R
+ - IN3L
+ - IN3R
+ - DMICDAT
+ - HPOUTL
+ - HPOUTR
+ - LINEOUTL
+ - LINEOUTR
+ - LOP
+ - LON
+ - ROP
+ - RON
+ - MICBIAS
+
+required:
+ - nvidia,i2s-controller
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ sound {
+ compatible = "nvidia,tegra-audio-wm8903-harmony",
+ "nvidia,tegra-audio-wm8903";
+ nvidia,model = "tegra-wm8903-harmony";
+
+ nvidia,audio-routing =
+ "Headphone Jack", "HPOUTR",
+ "Headphone Jack", "HPOUTL",
+ "Int Spk", "ROP",
+ "Int Spk", "RON",
+ "Int Spk", "LOP",
+ "Int Spk", "LON",
+ "Mic Jack", "MICBIAS",
+ "IN1L", "Mic Jack";
+
+ nvidia,i2s-controller = <&i2s1>;
+ nvidia,audio-codec = <&wm8903>;
+
+ nvidia,spkr-en-gpios = <&codec 2 0>;
+ nvidia,hp-det-gpios = <&gpio 178 0>;
+ nvidia,int-mic-en-gpios = <&gpio 184 0>;
+ nvidia,ext-mic-en-gpios = <&gpio 185 0>;
+
+ clocks = <&clk 112>, <&clk 113>, <&clk 93>;
+ clock-names = "pll_a", "pll_a_out0", "mclk";
+ };
diff --git a/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-wm9712.txt b/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-wm9712.txt
deleted file mode 100644
index 436f6cd9d07c..000000000000
--- a/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-wm9712.txt
+++ /dev/null
@@ -1,60 +0,0 @@
-NVIDIA Tegra audio complex
-
-Required properties:
-- compatible : "nvidia,tegra-audio-wm9712"
-- clocks : Must contain an entry for each entry in clock-names.
- See ../clocks/clock-bindings.txt for details.
-- clock-names : Must include the following entries:
- - pll_a
- - pll_a_out0
- - mclk (The Tegra cdev1/extern1 clock, which feeds the CODEC's mclk)
-- nvidia,model : The user-visible name of this sound complex.
-- nvidia,audio-routing : A list of the connections between audio components.
- Each entry is a pair of strings, the first being the connection's sink,
- the second being the connection's source. Valid names for sources and
- sinks are the WM9712's pins, and the jacks on the board:
-
- WM9712 pins:
-
- * MONOOUT
- * HPOUTL
- * HPOUTR
- * LOUT2
- * ROUT2
- * OUT3
- * LINEINL
- * LINEINR
- * PHONE
- * PCBEEP
- * MIC1
- * MIC2
- * Mic Bias
-
- Board connectors:
-
- * Headphone
- * LineIn
- * Mic
-
-- nvidia,ac97-controller : The phandle of the Tegra AC97 controller
-
-
-Example:
-
-sound {
- compatible = "nvidia,tegra-audio-wm9712-colibri_t20",
- "nvidia,tegra-audio-wm9712";
- nvidia,model = "Toradex Colibri T20";
-
- nvidia,audio-routing =
- "Headphone", "HPOUTL",
- "Headphone", "HPOUTR",
- "LineIn", "LINEINL",
- "LineIn", "LINEINR",
- "Mic", "MIC1";
-
- nvidia,ac97-controller = <&ac97>;
-
- clocks = <&tegra_car 112>, <&tegra_car 113>, <&tegra_car 93>;
- clock-names = "pll_a", "pll_a_out0", "mclk";
-};
diff --git a/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-wm9712.yaml b/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-wm9712.yaml
new file mode 100644
index 000000000000..a1448283344b
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-wm9712.yaml
@@ -0,0 +1,76 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/nvidia,tegra-audio-wm9712.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NVIDIA Tegra audio complex with WM9712 CODEC
+
+maintainers:
+ - Jon Hunter <jonathanh@nvidia.com>
+ - Thierry Reding <thierry.reding@gmail.com>
+
+allOf:
+ - $ref: nvidia,tegra-audio-common.yaml#
+
+properties:
+ compatible:
+ items:
+ - pattern: '^[a-z0-9]+,tegra-audio-wm9712([-_][a-z0-9]+)+$'
+ - const: nvidia,tegra-audio-wm9712
+
+ nvidia,audio-routing:
+ $ref: /schemas/types.yaml#/definitions/non-unique-string-array
+ description: |
+ A list of the connections between audio components.
+ Each entry is a pair of strings, the first being the connection's sink,
+ the second being the connection's source. Valid names for sources and
+ sinks are the pins (documented in the binding document),
+ and the jacks on the board.
+ minItems: 2
+ items:
+ enum:
+ # Board Connectors
+ - "Headphone"
+ - "LineIn"
+ - "Mic"
+
+ # CODEC Pins
+ - MONOOUT
+ - HPOUTL
+ - HPOUTR
+ - LOUT2
+ - ROUT2
+ - OUT3
+ - LINEINL
+ - LINEINR
+ - PHONE
+ - PCBEEP
+ - MIC1
+ - MIC2
+ - "Mic Bias"
+
+required:
+ - nvidia,ac97-controller
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ sound {
+ compatible = "nvidia,tegra-audio-wm9712-colibri_t20",
+ "nvidia,tegra-audio-wm9712";
+ nvidia,model = "Toradex Colibri T20";
+
+ nvidia,audio-routing =
+ "Headphone", "HPOUTL",
+ "Headphone", "HPOUTR",
+ "LineIn", "LINEINL",
+ "LineIn", "LINEINR",
+ "Mic", "MIC1";
+
+ nvidia,ac97-controller = <&ac97>;
+
+ clocks = <&clk 112>, <&clk 113>, <&clk 93>;
+ clock-names = "pll_a", "pll_a_out0", "mclk";
+ };
diff --git a/Documentation/devicetree/bindings/sound/nvidia,tegra20-i2s.txt b/Documentation/devicetree/bindings/sound/nvidia,tegra20-i2s.txt
deleted file mode 100644
index dc30c6bfbe95..000000000000
--- a/Documentation/devicetree/bindings/sound/nvidia,tegra20-i2s.txt
+++ /dev/null
@@ -1,30 +0,0 @@
-NVIDIA Tegra 20 I2S controller
-
-Required properties:
-- compatible : "nvidia,tegra20-i2s"
-- reg : Should contain I2S registers location and length
-- interrupts : Should contain I2S interrupt
-- resets : Must contain an entry for each entry in reset-names.
- See ../reset/reset.txt for details.
-- reset-names : Must include the following entries:
- - i2s
-- dmas : Must contain an entry for each entry in clock-names.
- See ../dma/dma.txt for details.
-- dma-names : Must include the following entries:
- - rx
- - tx
-- clocks : Must contain one entry, for the module clock.
- See ../clocks/clock-bindings.txt for details.
-
-Example:
-
-i2s@70002800 {
- compatible = "nvidia,tegra20-i2s";
- reg = <0x70002800 0x200>;
- interrupts = < 45 >;
- clocks = <&tegra_car 11>;
- resets = <&tegra_car 11>;
- reset-names = "i2s";
- dmas = <&apbdma 21>, <&apbdma 21>;
- dma-names = "rx", "tx";
-};
diff --git a/Documentation/devicetree/bindings/sound/nvidia,tegra20-i2s.yaml b/Documentation/devicetree/bindings/sound/nvidia,tegra20-i2s.yaml
new file mode 100644
index 000000000000..68ae124eaf80
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/nvidia,tegra20-i2s.yaml
@@ -0,0 +1,77 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/nvidia,tegra20-i2s.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NVIDIA Tegra20 I2S Controller
+
+description: |
+ The I2S Controller streams synchronous serial audio data between system
+ memory and an external audio device. The controller supports the I2S Left
+ Justified Mode, Right Justified Mode, and DSP mode formats.
+
+maintainers:
+ - Thierry Reding <treding@nvidia.com>
+ - Jon Hunter <jonathanh@nvidia.com>
+
+properties:
+ compatible:
+ const: nvidia,tegra20-i2s
+
+ reg:
+ maxItems: 1
+
+ resets:
+ maxItems: 1
+
+ reset-names:
+ const: i2s
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ minItems: 1
+
+ dmas:
+ minItems: 2
+
+ dma-names:
+ items:
+ - const: rx
+ - const: tx
+
+ nvidia,fixed-parent-rate:
+ description: |
+ Specifies whether board prefers parent clock to stay at a fixed rate.
+ This allows multiple Tegra20 audio components work simultaneously by
+ limiting number of supportable audio rates.
+ type: boolean
+
+required:
+ - compatible
+ - reg
+ - resets
+ - reset-names
+ - interrupts
+ - clocks
+ - dmas
+ - dma-names
+
+additionalProperties: false
+
+examples:
+ - |
+ i2s@70002800 {
+ compatible = "nvidia,tegra20-i2s";
+ reg = <0x70002800 0x200>;
+ interrupts = <45>;
+ clocks = <&tegra_car 11>;
+ resets = <&tegra_car 11>;
+ reset-names = "i2s";
+ dmas = <&apbdma 21>, <&apbdma 21>;
+ dma-names = "rx", "tx";
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/sound/nvidia,tegra20-spdif.yaml b/Documentation/devicetree/bindings/sound/nvidia,tegra20-spdif.yaml
new file mode 100644
index 000000000000..60a368a132b8
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/nvidia,tegra20-spdif.yaml
@@ -0,0 +1,85 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/nvidia,tegra20-spdif.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NVIDIA Tegra20 S/PDIF Controller
+
+description: |
+ The S/PDIF controller supports both input and output in serial audio
+ digital interface format. The input controller can digitally recover
+ a clock from the received stream. The S/PDIF controller is also used
+ to generate the embedded audio for HDMI output channel.
+
+maintainers:
+ - Thierry Reding <treding@nvidia.com>
+ - Jon Hunter <jonathanh@nvidia.com>
+
+properties:
+ compatible:
+ const: nvidia,tegra20-spdif
+
+ reg:
+ maxItems: 1
+
+ resets:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ minItems: 2
+
+ clock-names:
+ items:
+ - const: out
+ - const: in
+
+ dmas:
+ minItems: 2
+
+ dma-names:
+ items:
+ - const: rx
+ - const: tx
+
+ "#sound-dai-cells":
+ const: 0
+
+ nvidia,fixed-parent-rate:
+ description: |
+ Specifies whether board prefers parent clock to stay at a fixed rate.
+ This allows multiple Tegra20 audio components work simultaneously by
+ limiting number of supportable audio rates.
+ type: boolean
+
+required:
+ - compatible
+ - reg
+ - resets
+ - interrupts
+ - clocks
+ - clock-names
+ - dmas
+ - dma-names
+ - "#sound-dai-cells"
+
+additionalProperties: false
+
+examples:
+ - |
+ spdif@70002400 {
+ compatible = "nvidia,tegra20-spdif";
+ reg = <0x70002400 0x200>;
+ interrupts = <77>;
+ clocks = <&clk 99>, <&clk 98>;
+ clock-names = "out", "in";
+ resets = <&rst 10>;
+ dmas = <&apbdma 3>, <&apbdma 3>;
+ dma-names = "rx", "tx";
+ #sound-dai-cells = <0>;
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/sound/nvidia,tegra30-hda.yaml b/Documentation/devicetree/bindings/sound/nvidia,tegra30-hda.yaml
index b55775e21de6..2c913aa44fee 100644
--- a/Documentation/devicetree/bindings/sound/nvidia,tegra30-hda.yaml
+++ b/Documentation/devicetree/bindings/sound/nvidia,tegra30-hda.yaml
@@ -50,9 +50,11 @@ properties:
- const: hda2codec_2x
resets:
+ minItems: 2
maxItems: 3
reset-names:
+ minItems: 2
items:
- const: hda
- const: hda2hdmi
diff --git a/Documentation/devicetree/bindings/sound/nxp,tfa989x.yaml b/Documentation/devicetree/bindings/sound/nxp,tfa989x.yaml
index 7667471be1e4..b9b1dba40856 100644
--- a/Documentation/devicetree/bindings/sound/nxp,tfa989x.yaml
+++ b/Documentation/devicetree/bindings/sound/nxp,tfa989x.yaml
@@ -24,11 +24,23 @@ properties:
'#sound-dai-cells':
const: 0
+ rcv-gpios:
+ description: optional GPIO to be asserted when receiver mode is enabled.
+
sound-name-prefix: true
vddd-supply:
description: regulator phandle for the VDDD power supply.
+if:
+ not:
+ properties:
+ compatible:
+ const: nxp,tfa9897
+then:
+ properties:
+ rcv-gpios: false
+
required:
- compatible
- reg
@@ -55,3 +67,32 @@ examples:
#sound-dai-cells = <0>;
};
};
+
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ speaker_codec_top: audio-codec@34 {
+ compatible = "nxp,tfa9897";
+ reg = <0x34>;
+ vddd-supply = <&pm8916_l6>;
+ rcv-gpios = <&msmgpio 50 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&speaker_top_default>;
+ sound-name-prefix = "Speaker Top";
+ #sound-dai-cells = <0>;
+ };
+
+ speaker_codec_bottom: audio-codec@36 {
+ compatible = "nxp,tfa9897";
+ reg = <0x36>;
+ vddd-supply = <&pm8916_l6>;
+ rcv-gpios = <&msmgpio 111 GPIO_ACTIVE_HIGH>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&speaker_bottom_default>;
+ sound-name-prefix = "Speaker Bottom";
+ #sound-dai-cells = <0>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/sound/qcom,apq8016-sbc.txt b/Documentation/devicetree/bindings/sound/qcom,apq8016-sbc.txt
deleted file mode 100644
index 23998262a0a7..000000000000
--- a/Documentation/devicetree/bindings/sound/qcom,apq8016-sbc.txt
+++ /dev/null
@@ -1,96 +0,0 @@
-* Qualcomm Technologies APQ8016 SBC ASoC machine driver
-
-This node models the Qualcomm Technologies APQ8016 SBC ASoC machine driver
-
-Required properties:
-
-- compatible : "qcom,apq8016-sbc-sndcard"
-
-- pinctrl-N : One property must exist for each entry in
- pinctrl-names. See ../pinctrl/pinctrl-bindings.txt
- for details of the property values.
-- pinctrl-names : Must contain a "default" entry.
-- reg : Must contain an address for each entry in reg-names.
-- reg-names : A list which must include the following entries:
- * "mic-iomux"
- * "spkr-iomux"
-- qcom,model : Name of the sound card.
-
-- qcom,audio-routing : A list of the connections between audio components.
- Each entry is a pair of strings, the first being the
- connection's sink, the second being the connection's
- source. Valid names could be power supplies, MicBias
- of msm8x16_wcd codec and the jacks on the board:
-
- Power supplies:
- * MIC BIAS External1
- * MIC BIAS External2
- * MIC BIAS Internal1
- * MIC BIAS Internal2
-
- Board connectors:
- * Headset Mic
- * Secondary Mic
- * DMIC
- * Ext Spk
-
-Optional properties:
-
-- aux-devs : A list of phandles for auxiliary devices (e.g. analog
- amplifiers) that do not appear directly within the DAI
- links. Should be connected to another audio component
- using "qcom,audio-routing".
-
-Dai-link subnode properties and subnodes:
-
-Required dai-link subnodes:
-
-- cpu : CPU sub-node
-- codec : CODEC sub-node
-
-Required CPU/CODEC subnodes properties:
-
--link-name : Name of the dai link.
--sound-dai : phandle/s and port of CPU/CODEC
-
-Example:
-
-sound: sound {
- compatible = "qcom,apq8016-sbc-sndcard";
- reg = <0x07702000 0x4>, <0x07702004 0x4>;
- reg-names = "mic-iomux", "spkr-iomux";
- qcom,model = "DB410c";
-
- qcom,audio-routing =
- "MIC BIAS External1", "Handset Mic",
- "MIC BIAS Internal2", "Headset Mic",
- "MIC BIAS External1", "Secondary Mic",
- "AMIC1", "MIC BIAS External1",
- "AMIC2", "MIC BIAS Internal2",
- "AMIC3", "MIC BIAS External1",
- "DMIC1", "MIC BIAS Internal1",
- "MIC BIAS Internal1", "Digital Mic1",
- "DMIC2", "MIC BIAS Internal1",
- "MIC BIAS Internal1", "Digital Mic2";
-
- /* I2S - Internal codec */
- internal-dai-link@0 {
- cpu { /* PRIMARY */
- sound-dai = <&lpass MI2S_PRIMARY>;
- };
- codec {
- sound-dai = <&lpass_codec 0>, <&wcd_codec 0>;
- };
- };
-
- /* External Primary or External Secondary -ADV7533 HDMI */
- external-dai-link@0 {
- link-name = "ADV7533";
- cpu { /* QUAT */
- sound-dai = <&lpass MI2S_QUATERNARY>;
- };
- codec {
- sound-dai = <&adv_bridge 0>;
- };
- };
-};
diff --git a/Documentation/devicetree/bindings/sound/qcom,sm8250.yaml b/Documentation/devicetree/bindings/sound/qcom,sm8250.yaml
index 7d57eb91657a..4bfda04b4608 100644
--- a/Documentation/devicetree/bindings/sound/qcom,sm8250.yaml
+++ b/Documentation/devicetree/bindings/sound/qcom,sm8250.yaml
@@ -4,18 +4,20 @@
$id: http://devicetree.org/schemas/sound/qcom,sm8250.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
-title: Qualcomm Technologies Inc. SM8250 ASoC sound card driver
+title: Qualcomm Technologies Inc. ASoC sound card drivers
maintainers:
- Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
description:
- This bindings describes SC8250 SoC based sound cards
+ This bindings describes Qualcomm SoC based sound cards
which uses LPASS internal codec for audio.
properties:
compatible:
enum:
+ - qcom,apq8016-sbc-sndcard
+ - qcom,msm8916-qdsp6-sndcard
- qcom,sm8250-sndcard
- qcom,qrb5165-rb5-sndcard
@@ -27,10 +29,28 @@ properties:
being the connection's source. Valid names could be power supplies,
MicBias of codec and the jacks on the board.
+ aux-devs:
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ description: |
+ List of phandles pointing to auxiliary devices, such
+ as amplifiers, to be added to the sound card.
+
model:
$ref: /schemas/types.yaml#/definitions/string
description: User visible long sound card name
+ pin-switches:
+ description: List of widget names for which pin switches should be created.
+ $ref: /schemas/types.yaml#/definitions/string-array
+
+ widgets:
+ description: User specified audio sound widgets.
+ $ref: /schemas/types.yaml#/definitions/non-unique-string-array
+
+ # Only valid for some compatibles (see allOf if below)
+ reg: true
+ reg-names: true
+
patternProperties:
".*-dai-link$":
description:
@@ -73,6 +93,34 @@ required:
- compatible
- model
+allOf:
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,apq8016-sbc-sndcard
+ - qcom,msm8916-qdsp6-sndcard
+ then:
+ properties:
+ reg:
+ items:
+ - description: Microphone I/O mux register address
+ - description: Speaker I/O mux register address
+ reg-names:
+ items:
+ - const: mic-iomux
+ - const: spkr-iomux
+ required:
+ - compatible
+ - model
+ - reg
+ - reg-names
+ else:
+ properties:
+ reg: false
+ reg-names: false
+
additionalProperties: false
examples:
@@ -86,10 +134,7 @@ examples:
audio-routing = "SpkrLeft IN", "WSA_SPK1 OUT",
"SpkrRight IN", "WSA_SPK2 OUT",
"VA DMIC0", "vdd-micb",
- "VA DMIC1", "vdd-micb",
- "MM_DL1", "MultiMedia1 Playback",
- "MM_DL2", "MultiMedia2 Playback",
- "MultiMedia3 Capture", "MM_UL3";
+ "VA DMIC1", "vdd-micb";
mm1-dai-link {
link-name = "MultiMedia0";
@@ -157,3 +202,98 @@ examples:
};
};
};
+
+ - |
+ #include <dt-bindings/sound/qcom,lpass.h>
+ sound@7702000 {
+ compatible = "qcom,apq8016-sbc-sndcard";
+ reg = <0x07702000 0x4>, <0x07702004 0x4>;
+ reg-names = "mic-iomux", "spkr-iomux";
+
+ model = "DB410c";
+ audio-routing =
+ "AMIC2", "MIC BIAS Internal2",
+ "AMIC3", "MIC BIAS External1";
+
+ pinctrl-0 = <&cdc_pdm_lines_act &ext_sec_tlmm_lines_act &ext_mclk_tlmm_lines_act>;
+ pinctrl-1 = <&cdc_pdm_lines_sus &ext_sec_tlmm_lines_sus &ext_mclk_tlmm_lines_sus>;
+ pinctrl-names = "default", "sleep";
+
+ quaternary-dai-link {
+ link-name = "ADV7533";
+ cpu {
+ sound-dai = <&lpass MI2S_QUATERNARY>;
+ };
+ codec {
+ sound-dai = <&adv_bridge 0>;
+ };
+ };
+
+ primary-dai-link {
+ link-name = "WCD";
+ cpu {
+ sound-dai = <&lpass MI2S_PRIMARY>;
+ };
+ codec {
+ sound-dai = <&lpass_codec 0>, <&wcd_codec 0>;
+ };
+ };
+
+ tertiary-dai-link {
+ link-name = "WCD-Capture";
+ cpu {
+ sound-dai = <&lpass MI2S_TERTIARY>;
+ };
+ codec {
+ sound-dai = <&lpass_codec 1>, <&wcd_codec 1>;
+ };
+ };
+ };
+
+ - |
+ #include <dt-bindings/sound/qcom,q6afe.h>
+ #include <dt-bindings/sound/qcom,q6asm.h>
+ sound@7702000 {
+ compatible = "qcom,msm8916-qdsp6-sndcard";
+ reg = <0x07702000 0x4>, <0x07702004 0x4>;
+ reg-names = "mic-iomux", "spkr-iomux";
+
+ model = "msm8916";
+ widgets =
+ "Speaker", "Speaker",
+ "Headphone", "Headphones";
+ pin-switches = "Speaker";
+ audio-routing =
+ "Speaker", "Speaker Amp OUT",
+ "Speaker Amp IN", "HPH_R",
+ "Headphones", "HPH_L",
+ "Headphones", "HPH_R",
+ "AMIC1", "MIC BIAS Internal1",
+ "AMIC2", "MIC BIAS Internal2",
+ "AMIC3", "MIC BIAS Internal3";
+ aux-devs = <&speaker_amp>;
+
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&cdc_pdm_lines_act>;
+ pinctrl-1 = <&cdc_pdm_lines_sus>;
+
+ mm1-dai-link {
+ link-name = "MultiMedia1";
+ cpu {
+ sound-dai = <&q6asmdai MSM_FRONTEND_DAI_MULTIMEDIA1>;
+ };
+ };
+
+ primary-dai-link {
+ link-name = "Primary MI2S";
+ cpu {
+ sound-dai = <&q6afedai PRIMARY_MI2S_RX>;
+ };
+ platform {
+ sound-dai = <&q6routing>;
+ };
+ codec {
+ sound-dai = <&lpass_codec 0>, <&wcd_codec 0>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/sound/realtek,rt5682s.yaml b/Documentation/devicetree/bindings/sound/realtek,rt5682s.yaml
index 2b8b7b51fe55..d65c0ed5060c 100644
--- a/Documentation/devicetree/bindings/sound/realtek,rt5682s.yaml
+++ b/Documentation/devicetree/bindings/sound/realtek,rt5682s.yaml
@@ -61,6 +61,10 @@ properties:
description: |
Set the delay time (ms) for the requirement of the particular DMIC.
+ realtek,amic-delay-ms:
+ description: |
+ Set the delay time (ms) for the requirement of the particular platform or AMIC.
+
realtek,dmic-clk-driving-high:
type: boolean
description: |
diff --git a/Documentation/devicetree/bindings/sound/samsung-i2s.yaml b/Documentation/devicetree/bindings/sound/samsung-i2s.yaml
index 2e3628ef48df..84c4d6cba521 100644
--- a/Documentation/devicetree/bindings/sound/samsung-i2s.yaml
+++ b/Documentation/devicetree/bindings/sound/samsung-i2s.yaml
@@ -110,12 +110,6 @@ properties:
Internal DMA register base address of the audio
subsystem (used in secondary sound source).
- pinctrl-0:
- description: Should specify pin control groups used for this controller.
-
- pinctrl-names:
- const: default
-
power-domains:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/sound/simple-audio-amplifier.yaml b/Documentation/devicetree/bindings/sound/simple-audio-amplifier.yaml
index 26379377a7ac..8327846356d3 100644
--- a/Documentation/devicetree/bindings/sound/simple-audio-amplifier.yaml
+++ b/Documentation/devicetree/bindings/sound/simple-audio-amplifier.yaml
@@ -9,6 +9,9 @@ title: Simple Audio Amplifier Device Tree Bindings
maintainers:
- Jerome Brunet <jbrunet@baylibre.com>
+allOf:
+ - $ref: name-prefix.yaml#
+
properties:
compatible:
enum:
@@ -22,10 +25,7 @@ properties:
description: >
power supply for the device
- sound-name-prefix:
- $ref: /schemas/types.yaml#/definitions/string
- description: >
- See ./name-prefix.txt
+ sound-name-prefix: true
required:
- compatible
diff --git a/Documentation/devicetree/bindings/sound/ti,tlv320adc3xxx.yaml b/Documentation/devicetree/bindings/sound/ti,tlv320adc3xxx.yaml
new file mode 100644
index 000000000000..83936f594d1a
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/ti,tlv320adc3xxx.yaml
@@ -0,0 +1,137 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/sound/ti,tlv320adc3xxx.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Texas Instruments TLV320ADC3001/TLV320ADC3101 Stereo ADC
+
+maintainers:
+ - Ricard Wanderlof <ricardw@axis.com>
+
+description: |
+ Texas Instruments TLV320ADC3001 and TLV320ADC3101 Stereo ADC
+ https://www.ti.com/product/TLV320ADC3001
+ https://www.ti.com/product/TLV320ADC3101
+
+properties:
+ compatible:
+ enum:
+ - ti,tlv320adc3001
+ - ti,tlv320adc3101
+
+ reg:
+ maxItems: 1
+ description: I2C address
+
+ '#sound-dai-cells':
+ const: 0
+
+ '#gpio-cells':
+ const: 2
+
+ gpio-controller: true
+
+ reset-gpios:
+ maxItems: 1
+ description: GPIO pin used for codec reset (RESET pin)
+
+ clocks:
+ maxItems: 1
+ description: Master clock (MCLK)
+
+ ti,dmdin-gpio1:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum:
+ - 0 # ADC3XXX_GPIO_DISABLED - I/O buffers powered down and not used
+ - 1 # ADC3XXX_GPIO_INPUT - Various non-GPIO input functions
+ - 2 # ADC3XXX_GPIO_GPI - General purpose input
+ - 3 # ADC3XXX_GPIO_GPO - General purpose output
+ - 4 # ADC3XXX_GPIO_CLKOUT - Clock source set in CLKOUT_MUX reg
+ - 5 # ADC3XXX_GPIO_INT1 - INT1 output
+ - 6 # ADC3XXX_GPIO_SECONDARY_BCLK - Codec interface secondary BCLK
+ - 7 # ADC3XXX_GPIO_SECONDARY_WCLK - Codec interface secondary WCLK
+ default: 0
+ description: |
+ Configuration for DMDIN/GPIO1 pin.
+
+ When ADC3XXX_GPIO_GPO is configured, this causes corresponding the
+ ALSA control "GPIOx Output" to appear, as a switch control.
+
+ ti,dmclk-gpio2:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum:
+ - 0 # ADC3XXX_GPIO_DISABLED - I/O buffers powered down and not used
+ - 1 # ADC3XXX_GPIO_INPUT - Various non-GPIO input functions
+ - 2 # ADC3XXX_GPIO_GPI - General purpose input
+ - 3 # ADC3XXX_GPIO_GPO - General purpose output
+ - 4 # ADC3XXX_GPIO_CLKOUT - Clock source set in CLKOUT_MUX reg
+ - 5 # ADC3XXX_GPIO_INT1 - INT1 output
+ - 6 # ADC3XXX_GPIO_SECONDARY_BCLK - Codec interface secondary BCLK
+ - 7 # ADC3XXX_GPIO_SECONDARY_WCLK - Codec interface secondary WCLK
+ default: 0
+ description: |
+ Configuration for DMCLK/GPIO2 pin.
+
+ When ADC3XXX_GPIO_GPO is configured, this causes corresponding the
+ ALSA control "GPIOx Output" to appear, as a switch control.
+
+ Note that there is currently no support for reading the GPIO pins as
+ inputs.
+
+ ti,micbias1-vg:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum:
+ - 0 # ADC3XXX_MICBIAS_OFF - Mic bias is powered down
+ - 1 # ADC3XXX_MICBIAS_2_0V - Mic bias is set to 2.0V
+ - 2 # ADC3XXX_MICBIAS_2_5V - Mic bias is set to 2.5V
+ - 3 # ADC3XXX_MICBIAS_AVDD - Mic bias is same as AVDD supply
+ default: 0
+ description: |
+ Mic bias voltage output on MICBIAS1 pin
+
+ ti,micbias2-vg:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ enum:
+ - 0 # ADC3XXX_MICBIAS_OFF - Mic bias is powered down
+ - 1 # ADC3XXX_MICBIAS_2_0V - Mic bias is set to 2.0V
+ - 2 # ADC3XXX_MICBIAS_2_5V - Mic bias is set to 2.5V
+ - 3 # ADC3XXX_MICBIAS_AVDD - Mic bias is same as AVDD supply
+ default: 0
+ description: |
+ Mic bias voltage output on MICBIAS2 pin
+
+required:
+ - compatible
+ - reg
+ - clocks
+
+additionalProperties: false
+
+examples:
+ - |
+
+ #include <dt-bindings/gpio/gpio.h>
+ #include <dt-bindings/sound/tlv320adc3xxx.h>
+
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ tlv320adc3101: audio-codec@18 {
+ compatible = "ti,tlv320adc3101";
+ reg = <0x18>;
+ reset-gpios = <&gpio_pc 3 GPIO_ACTIVE_LOW>;
+ clocks = <&audio_mclk>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ ti,dmdin-gpio1 = <ADC3XXX_GPIO_GPO>;
+ ti,micbias1-vg = <ADC3XXX_MICBIAS_AVDD>;
+ };
+ };
+
+ audio_mclk: clock {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <24576000>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/sound/wlf,wm8903.yaml b/Documentation/devicetree/bindings/sound/wlf,wm8903.yaml
new file mode 100644
index 000000000000..7105ed5fd6c7
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/wlf,wm8903.yaml
@@ -0,0 +1,116 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/sound/wlf,wm8903.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: WM8903 audio codec
+
+description: |
+ This device supports I2C only.
+ Pins on the device (for linking into audio routes):
+ * IN1L
+ * IN1R
+ * IN2L
+ * IN2R
+ * IN3L
+ * IN3R
+ * DMICDAT
+ * HPOUTL
+ * HPOUTR
+ * LINEOUTL
+ * LINEOUTR
+ * LOP
+ * LON
+ * ROP
+ * RON
+ * MICBIAS
+
+maintainers:
+ - patches@opensource.cirrus.com
+
+properties:
+ compatible:
+ const: wlf,wm8903
+
+ reg:
+ maxItems: 1
+
+ gpio-controller: true
+ '#gpio-cells':
+ const: 2
+
+ interrupts:
+ maxItems: 1
+
+ micdet-cfg:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ default: 0
+ description: Default register value for R6 (Mic Bias).
+
+ micdet-delay:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ default: 100
+ description: The debounce delay for microphone detection in mS.
+
+ gpio-cfg:
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ description: |
+ minItems: 5
+ maxItems: 5
+ A list of GPIO configuration register values.
+ If absent, no configuration of these registers is performed.
+ If any entry has the value 0xffffffff, that GPIO's
+ configuration will not be modified.
+
+ AVDD-supply:
+ description: Analog power supply regulator on the AVDD pin.
+
+ CPVDD-supply:
+ description: Charge pump supply regulator on the CPVDD pin.
+
+ DBVDD-supply:
+ description: Digital buffer supply regulator for the DBVDD pin.
+
+ DCVDD-supply:
+ description: Digital core supply regulator for the DCVDD pin.
+
+
+required:
+ - compatible
+ - reg
+ - gpio-controller
+ - '#gpio-cells'
+
+additionalProperties: false
+
+examples:
+ - |
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ wm8903: codec@1a {
+ compatible = "wlf,wm8903";
+ reg = <0x1a>;
+ interrupts = <347>;
+
+ AVDD-supply = <&fooreg_a>;
+ CPVDD-supply = <&fooreg_b>;
+ DBVDD-supply = <&fooreg_c>;
+ DCVDD-supply = <&fooreg_d>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ micdet-cfg = <0>;
+ micdet-delay = <100>;
+ gpio-cfg = <
+ 0x0600 /* DMIC_LR, output */
+ 0x0680 /* DMIC_DAT, input */
+ 0x0000 /* GPIO, output, low */
+ 0x0200 /* Interrupt, output */
+ 0x01a0 /* BCLK, input, active high */
+ >;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/sound/wm8903.txt b/Documentation/devicetree/bindings/sound/wm8903.txt
deleted file mode 100644
index 6371c2434afe..000000000000
--- a/Documentation/devicetree/bindings/sound/wm8903.txt
+++ /dev/null
@@ -1,82 +0,0 @@
-WM8903 audio CODEC
-
-This device supports I2C only.
-
-Required properties:
-
- - compatible : "wlf,wm8903"
-
- - reg : the I2C address of the device.
-
- - gpio-controller : Indicates this device is a GPIO controller.
-
- - #gpio-cells : Should be two. The first cell is the pin number and the
- second cell is used to specify optional parameters (currently unused).
-
-Optional properties:
-
- - interrupts : The interrupt line the codec is connected to.
-
- - micdet-cfg : Default register value for R6 (Mic Bias). If absent, the
- default is 0.
-
- - micdet-delay : The debounce delay for microphone detection in mS. If
- absent, the default is 100.
-
- - gpio-cfg : A list of GPIO configuration register values. The list must
- be 5 entries long. If absent, no configuration of these registers is
- performed. If any entry has the value 0xffffffff, that GPIO's
- configuration will not be modified.
-
- - AVDD-supply : Analog power supply regulator on the AVDD pin.
-
- - CPVDD-supply : Charge pump supply regulator on the CPVDD pin.
-
- - DBVDD-supply : Digital buffer supply regulator for the DBVDD pin.
-
- - DCVDD-supply : Digital core supply regulator for the DCVDD pin.
-
-Pins on the device (for linking into audio routes):
-
- * IN1L
- * IN1R
- * IN2L
- * IN2R
- * IN3L
- * IN3R
- * DMICDAT
- * HPOUTL
- * HPOUTR
- * LINEOUTL
- * LINEOUTR
- * LOP
- * LON
- * ROP
- * RON
- * MICBIAS
-
-Example:
-
-wm8903: codec@1a {
- compatible = "wlf,wm8903";
- reg = <0x1a>;
- interrupts = < 347 >;
-
- AVDD-supply = <&fooreg_a>;
- CPVDD-supply = <&fooreg_b>;
- DBVDD-supply = <&fooreg_c>;
- DCVDC-supply = <&fooreg_d>;
-
- gpio-controller;
- #gpio-cells = <2>;
-
- micdet-cfg = <0>;
- micdet-delay = <100>;
- gpio-cfg = <
- 0x0600 /* DMIC_LR, output */
- 0x0680 /* DMIC_DAT, input */
- 0x0000 /* GPIO, output, low */
- 0x0200 /* Interrupt, output */
- 0x01a0 /* BCLK, input, active high */
- >;
-};
diff --git a/Documentation/devicetree/bindings/spi/spi-peripheral-props.yaml b/Documentation/devicetree/bindings/spi/spi-peripheral-props.yaml
index 5dd209206e88..3ec2d7b83775 100644
--- a/Documentation/devicetree/bindings/spi/spi-peripheral-props.yaml
+++ b/Documentation/devicetree/bindings/spi/spi-peripheral-props.yaml
@@ -23,8 +23,9 @@ properties:
minItems: 1
maxItems: 256
items:
- minimum: 0
- maximum: 256
+ items:
+ - minimum: 0
+ maximum: 256
description:
Chip select used by the device.
diff --git a/Documentation/devicetree/bindings/spmi/mtk,spmi-mtk-pmif.yaml b/Documentation/devicetree/bindings/spmi/mtk,spmi-mtk-pmif.yaml
new file mode 100644
index 000000000000..2445c5e0b0ef
--- /dev/null
+++ b/Documentation/devicetree/bindings/spmi/mtk,spmi-mtk-pmif.yaml
@@ -0,0 +1,76 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/spmi/mtk,spmi-mtk-pmif.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Mediatek SPMI Controller Device Tree Bindings
+
+maintainers:
+ - Hsin-Hsiung Wang <hsin-hsiung.wang@mediatek.com>
+
+description: |+
+ On MediaTek SoCs the PMIC is connected via SPMI and the controller allows
+ for multiple SoCs to control a single SPMI master.
+
+allOf:
+ - $ref: "spmi.yaml"
+
+properties:
+ compatible:
+ enum:
+ - mediatek,mt6873-spmi
+ - mediatek,mt8195-spmi
+
+ reg:
+ maxItems: 2
+
+ reg-names:
+ items:
+ - const: pmif
+ - const: spmimst
+
+ clocks:
+ minItems: 3
+ maxItems: 3
+
+ clock-names:
+ items:
+ - const: pmif_sys_ck
+ - const: pmif_tmr_ck
+ - const: spmimst_clk_mux
+
+ assigned-clocks:
+ maxItems: 1
+
+ assigned-clock-parents:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - reg-names
+ - clocks
+ - clock-names
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/mt8192-clk.h>
+
+ spmi: spmi@10027000 {
+ compatible = "mediatek,mt6873-spmi";
+ reg = <0x10027000 0xe00>,
+ <0x10029000 0x100>;
+ reg-names = "pmif", "spmimst";
+ clocks = <&infracfg CLK_INFRA_PMIC_AP>,
+ <&infracfg CLK_INFRA_PMIC_TMR>,
+ <&topckgen CLK_TOP_SPMI_MST_SEL>;
+ clock-names = "pmif_sys_ck",
+ "pmif_tmr_ck",
+ "spmimst_clk_mux";
+ assigned-clocks = <&topckgen CLK_TOP_PWRAP_ULPOSC_SEL>;
+ assigned-clock-parents = <&topckgen CLK_TOP_OSC_D10>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/spmi/spmi.yaml b/Documentation/devicetree/bindings/spmi/spmi.yaml
index 1d243faef2f8..c1b06fa5c631 100644
--- a/Documentation/devicetree/bindings/spmi/spmi.yaml
+++ b/Documentation/devicetree/bindings/spmi/spmi.yaml
@@ -24,9 +24,6 @@ properties:
$nodename:
pattern: "^spmi@.*"
- reg:
- maxItems: 1
-
"#address-cells":
const: 2
diff --git a/Documentation/devicetree/bindings/thermal/brcm,avs-tmon.txt b/Documentation/devicetree/bindings/thermal/brcm,avs-tmon.txt
deleted file mode 100644
index 74a9ef09db8b..000000000000
--- a/Documentation/devicetree/bindings/thermal/brcm,avs-tmon.txt
+++ /dev/null
@@ -1,23 +0,0 @@
-* Broadcom STB thermal management
-
-Thermal management core, provided by the AVS TMON hardware block.
-
-Required properties:
-- compatible: must be one of:
- "brcm,avs-tmon-bcm7216"
- "brcm,avs-tmon-bcm7445"
- "brcm,avs-tmon"
-- reg: address range for the AVS TMON registers
-- interrupts: temperature monitor interrupt, for high/low threshold triggers,
- required except for "brcm,avs-tmon-bcm7216"
-- interrupt-names: should be "tmon"
-
-Example:
-
- thermal@f04d1500 {
- compatible = "brcm,avs-tmon-bcm7445", "brcm,avs-tmon";
- reg = <0xf04d1500 0x28>;
- interrupts = <0x6>;
- interrupt-names = "tmon";
- interrupt-parent = <&avs_host_l2_intc>;
- };
diff --git a/Documentation/devicetree/bindings/thermal/brcm,avs-tmon.yaml b/Documentation/devicetree/bindings/thermal/brcm,avs-tmon.yaml
new file mode 100644
index 000000000000..267a0f423504
--- /dev/null
+++ b/Documentation/devicetree/bindings/thermal/brcm,avs-tmon.yaml
@@ -0,0 +1,56 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/thermal/brcm,avs-tmon.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Broadcom STB thermal management
+
+description: Thermal management core, provided by the AVS TMON hardware block.
+
+maintainers:
+ - Florian Fainelli <f.fainelli@gmail.com>
+
+allOf:
+ - $ref: thermal-sensor.yaml#
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - brcm,avs-tmon-bcm7216
+ - brcm,avs-tmon-bcm7445
+ - const: brcm,avs-tmon
+
+ reg:
+ maxItems: 1
+ description: >
+ Address range for the AVS TMON registers
+
+ interrupts:
+ maxItems: 1
+
+ interrupt-names:
+ items:
+ - const: tmon
+
+ "#thermal-sensor-cells":
+ const: 0
+
+additionalProperties: false
+
+required:
+ - compatible
+ - reg
+ - "#thermal-sensor-cells"
+
+examples:
+ - |
+ thermal@f04d1500 {
+ compatible = "brcm,avs-tmon-bcm7445", "brcm,avs-tmon";
+ reg = <0xf04d1500 0x28>;
+ interrupts = <0x6>;
+ interrupt-names = "tmon";
+ interrupt-parent = <&avs_host_l2_intc>;
+ #thermal-sensor-cells = <0>;
+ };
diff --git a/Documentation/devicetree/bindings/thermal/thermal-zones.yaml b/Documentation/devicetree/bindings/thermal/thermal-zones.yaml
index a07de5ed0ca6..2d34f3ccb257 100644
--- a/Documentation/devicetree/bindings/thermal/thermal-zones.yaml
+++ b/Documentation/devicetree/bindings/thermal/thermal-zones.yaml
@@ -199,12 +199,11 @@ patternProperties:
contribution:
$ref: /schemas/types.yaml#/definitions/uint32
- minimum: 0
- maximum: 100
description:
- The percentage contribution of the cooling devices at the
- specific trip temperature referenced in this map
- to this thermal zone
+ The cooling contribution to the thermal zone of the referred
+ cooling device at the referred trip point. The contribution is
+ a ratio of the sum of all cooling contributions within a
+ thermal zone.
required:
- trip
diff --git a/Documentation/devicetree/bindings/timer/cdns,ttc.yaml b/Documentation/devicetree/bindings/timer/cdns,ttc.yaml
index 8615353f69b4..c3386076a98c 100644
--- a/Documentation/devicetree/bindings/timer/cdns,ttc.yaml
+++ b/Documentation/devicetree/bindings/timer/cdns,ttc.yaml
@@ -25,6 +25,9 @@ properties:
clocks:
maxItems: 1
+ power-domains:
+ maxItems: 1
+
timer-width:
$ref: "/schemas/types.yaml#/definitions/uint32"
description: |
diff --git a/Documentation/devicetree/bindings/timer/mstar,msc313e-timer.yaml b/Documentation/devicetree/bindings/timer/mstar,msc313e-timer.yaml
new file mode 100644
index 000000000000..03d5dba5d5b3
--- /dev/null
+++ b/Documentation/devicetree/bindings/timer/mstar,msc313e-timer.yaml
@@ -0,0 +1,46 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/timer/mstar,msc313e-timer.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Mstar MSC313e Timer Device Tree Bindings
+
+maintainers:
+ - Daniel Palmer <daniel@0x0f.com>
+ - Romain Perier <romain.perier@gmail.com>
+
+properties:
+ compatible:
+ enum:
+ - mstar,msc313e-timer
+ - sstar,ssd20xd-timer
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - clocks
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+
+ timer@6040 {
+ compatible = "mstar,msc313e-timer";
+ reg = <0x6040 0x40>;
+ clocks = <&xtal_div2>;
+ interrupts-extended = <&intc_fiq GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>;
+ };
+...
diff --git a/Documentation/devicetree/bindings/timer/nxp,tpm-timer.yaml b/Documentation/devicetree/bindings/timer/nxp,tpm-timer.yaml
index edd9585f6726..f69773a8e4b9 100644
--- a/Documentation/devicetree/bindings/timer/nxp,tpm-timer.yaml
+++ b/Documentation/devicetree/bindings/timer/nxp,tpm-timer.yaml
@@ -19,7 +19,11 @@ description: |
properties:
compatible:
- const: fsl,imx7ulp-tpm
+ oneOf:
+ - const: fsl,imx7ulp-tpm
+ - items:
+ - const: fsl,imx8ulp-tpm
+ - const: fsl,imx7ulp-tpm
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/timer/renesas,ostm.yaml b/Documentation/devicetree/bindings/timer/renesas,ostm.yaml
index 600d47ab7d58..7fa7f977b44c 100644
--- a/Documentation/devicetree/bindings/timer/renesas,ostm.yaml
+++ b/Documentation/devicetree/bindings/timer/renesas,ostm.yaml
@@ -21,9 +21,10 @@ properties:
compatible:
items:
- enum:
- - renesas,r7s72100-ostm # RZ/A1H
- - renesas,r7s9210-ostm # RZ/A2M
- - const: renesas,ostm # Generic
+ - renesas,r7s72100-ostm # RZ/A1H
+ - renesas,r7s9210-ostm # RZ/A2M
+ - renesas,r9a07g044-ostm # RZ/G2{L,LC}
+ - const: renesas,ostm # Generic
reg:
maxItems: 1
@@ -37,6 +38,9 @@ properties:
power-domains:
maxItems: 1
+ resets:
+ maxItems: 1
+
required:
- compatible
- reg
@@ -44,6 +48,16 @@ required:
- clocks
- power-domains
+if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - renesas,r9a07g044-ostm
+then:
+ required:
+ - resets
+
additionalProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/timer/rockchip,rk-timer.yaml b/Documentation/devicetree/bindings/timer/rockchip,rk-timer.yaml
index e26ecb5893ae..5d157d87dad5 100644
--- a/Documentation/devicetree/bindings/timer/rockchip,rk-timer.yaml
+++ b/Documentation/devicetree/bindings/timer/rockchip,rk-timer.yaml
@@ -18,7 +18,6 @@ properties:
- enum:
- rockchip,rv1108-timer
- rockchip,rk3036-timer
- - rockchip,rk3066-timer
- rockchip,rk3188-timer
- rockchip,rk3228-timer
- rockchip,rk3229-timer
diff --git a/Documentation/devicetree/bindings/trivial-devices.yaml b/Documentation/devicetree/bindings/trivial-devices.yaml
index c451ae82d8d7..091792ba993e 100644
--- a/Documentation/devicetree/bindings/trivial-devices.yaml
+++ b/Documentation/devicetree/bindings/trivial-devices.yaml
@@ -31,7 +31,7 @@ properties:
- enum:
# SMBus/I2C Digital Temperature Sensor in 6-Pin SOT with SMBus Alert and Over Temperature Pin
- ad,ad7414
- # ADM9240: Complete System Hardware Monitor for uProcessor-Based Systems
+ # ADM9240: Complete System Hardware Monitor for uProcessor-Based Systems
- ad,adm9240
# AD5110 - Nonvolatile Digital Potentiometer
- adi,ad5110
@@ -43,7 +43,7 @@ properties:
- adi,adp5589
# AMS iAQ-Core VOC Sensor
- ams,iaq-core
- # i2c serial eeprom (24cxx)
+ # i2c serial eeprom (24cxx)
- at,24c08
# i2c trusted platform module (TPM)
- atmel,at97sc3204t
@@ -289,6 +289,8 @@ properties:
- sensirion,sgp30
# Sensirion gas sensor with I2C interface
- sensirion,sgp40
+ # Sensirion temperature & humidity sensor with I2C interface
+ - sensirion,sht4x
# Sensortek 3 axis accelerometer
- sensortek,stk8312
# Sensortek 3 axis accelerometer
@@ -301,9 +303,9 @@ properties:
- skyworks,sky81452
# Socionext SynQuacer TPM MMIO module
- socionext,synquacer-tpm-mmio
- # i2c serial eeprom (24cxx)
- - sparkfun,qwiic-joystick
# SparkFun Qwiic Joystick (COM-15168) with i2c interface
+ - sparkfun,qwiic-joystick
+ # i2c serial eeprom (24cxx)
- st,24c256
# Ambient Light Sensor with SMBUS/Two Wire Serial Interface
- taos,tsl2550
@@ -337,12 +339,19 @@ properties:
- ti,tmp122
# Digital Temperature Sensor
- ti,tmp275
+ # TI DC-DC converter on PMBus
+ - ti,tps40400
# TI Dual channel DCAP+ multiphase controller TPS53676 with AVSBus
- ti,tps53676
# TI Dual channel DCAP+ multiphase controller TPS53679
- ti,tps53679
# TI Dual channel DCAP+ multiphase controller TPS53688
- ti,tps53688
+ # TI DC-DC converters on PMBus
+ - ti,tps544b20
+ - ti,tps544b25
+ - ti,tps544c20
+ - ti,tps544c25
# Winbond/Nuvoton H/W Monitor
- winbond,w83793
# i2c trusted platform module (TPM)
diff --git a/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt b/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
index d8fd4df81743..d0fee78e6203 100644
--- a/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
+++ b/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
@@ -16,6 +16,7 @@ Required properties:
"qcom,sm8150-ufshc", "qcom,ufshc", "jedec,ufs-2.0"
"qcom,sm8250-ufshc", "qcom,ufshc", "jedec,ufs-2.0"
"qcom,sm8350-ufshc", "qcom,ufshc", "jedec,ufs-2.0"
+ "qcom,sm8450-ufshc", "qcom,ufshc", "jedec,ufs-2.0"
- interrupts : <interrupt mapping for UFS host controller IRQ>
- reg : <registers mapping>
diff --git a/Documentation/devicetree/bindings/usb/brcm,bdc.txt b/Documentation/devicetree/bindings/usb/brcm,bdc.txt
deleted file mode 100644
index c9f52b97cef1..000000000000
--- a/Documentation/devicetree/bindings/usb/brcm,bdc.txt
+++ /dev/null
@@ -1,29 +0,0 @@
-Broadcom USB Device Controller (BDC)
-====================================
-
-Required properties:
-
-- compatible: must be one of:
- "brcm,bdc-udc-v2"
- "brcm,bdc"
-- reg: the base register address and length
-- interrupts: the interrupt line for this controller
-
-Optional properties:
-
-On Broadcom STB platforms, these properties are required:
-
-- phys: phandle to one or two USB PHY blocks
- NOTE: Some SoC's have a single phy and some have
- USB 2.0 and USB 3.0 phys
-- clocks: phandle to the functional clock of this block
-
-Example:
-
- bdc@f0b02000 {
- compatible = "brcm,bdc-udc-v2";
- reg = <0xf0b02000 0xfc4>;
- interrupts = <0x0 0x60 0x0>;
- phys = <&usbphy_0 0x0>;
- clocks = <&sw_usbd>;
- };
diff --git a/Documentation/devicetree/bindings/usb/brcm,bdc.yaml b/Documentation/devicetree/bindings/usb/brcm,bdc.yaml
new file mode 100644
index 000000000000..9e561fee98f1
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/brcm,bdc.yaml
@@ -0,0 +1,50 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/usb/brcm,bdc.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Broadcom USB Device Controller (BDC)
+
+maintainers:
+ - Al Cooper <alcooperx@gmail.com>
+ - Florian Fainelli <f.fainelli@gmail.com>
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - brcm,bdc-udc-v2
+ - brcm,bdc
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ phys:
+ minItems: 1
+ items:
+ - description: USB 2.0 or 3.0 PHY
+ - description: USB 3.0 PHY if there is a dedicated 2.0 PHY
+
+ clocks:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+
+additionalProperties: false
+
+examples:
+ - |
+ usb@f0b02000 {
+ compatible = "brcm,bdc-udc-v2";
+ reg = <0xf0b02000 0xfc4>;
+ interrupts = <0x0 0x60 0x0>;
+ phys = <&usbphy_0 0x0>;
+ clocks = <&sw_usbd>;
+ };
diff --git a/Documentation/devicetree/bindings/usb/dwc2.yaml b/Documentation/devicetree/bindings/usb/dwc2.yaml
index 56a818478cd7..f00867ebc147 100644
--- a/Documentation/devicetree/bindings/usb/dwc2.yaml
+++ b/Documentation/devicetree/bindings/usb/dwc2.yaml
@@ -114,6 +114,8 @@ properties:
usb-role-switch: true
+ role-switch-default-mode: true
+
g-rx-fifo-size:
$ref: /schemas/types.yaml#/definitions/uint32
description: size of rx fifo size in gadget mode.
@@ -136,6 +138,17 @@ properties:
description: If present indicates that we need to reset the PHY when we
detect a wakeup. This is due to a hardware errata.
+ port:
+ description:
+ Any connector to the data bus of this controller should be modelled
+ using the OF graph bindings specified, if the "usb-role-switch"
+ property is used.
+ $ref: /schemas/graph.yaml#/properties/port
+
+dependencies:
+ port: [ usb-role-switch ]
+ role-switch-default-mode: [ usb-role-switch ]
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/usb/dwc3-xilinx.txt b/Documentation/devicetree/bindings/usb/dwc3-xilinx.txt
deleted file mode 100644
index 04813a46e5d0..000000000000
--- a/Documentation/devicetree/bindings/usb/dwc3-xilinx.txt
+++ /dev/null
@@ -1,56 +0,0 @@
-Xilinx SuperSpeed DWC3 USB SoC controller
-
-Required properties:
-- compatible: May contain "xlnx,zynqmp-dwc3" or "xlnx,versal-dwc3"
-- reg: Base address and length of the register control block
-- clocks: A list of phandles for the clocks listed in clock-names
-- clock-names: Should contain the following:
- "bus_clk" Master/Core clock, have to be >= 125 MHz for SS
- operation and >= 60MHz for HS operation
-
- "ref_clk" Clock source to core during PHY power down
-- resets: A list of phandles for resets listed in reset-names
-- reset-names:
- "usb_crst" USB core reset
- "usb_hibrst" USB hibernation reset
- "usb_apbrst" USB APB reset
-
-Required child node:
-A child node must exist to represent the core DWC3 IP block. The name of
-the node is not important. The content of the node is defined in dwc3.txt.
-
-Optional properties for snps,dwc3:
-- dma-coherent: Enable this flag if CCI is enabled in design. Adding this
- flag configures Global SoC bus Configuration Register and
- Xilinx USB 3.0 IP - USB coherency register to enable CCI.
-- interrupt-names: Should contain the following:
- "dwc_usb3" USB gadget mode interrupts
- "otg" USB OTG mode interrupts
- "hiber" USB hibernation interrupts
-
-Example device node:
-
- usb@0 {
- #address-cells = <0x2>;
- #size-cells = <0x1>;
- compatible = "xlnx,zynqmp-dwc3";
- reg = <0x0 0xff9d0000 0x0 0x100>;
- clock-names = "bus_clk", "ref_clk";
- clocks = <&clk125>, <&clk125>;
- resets = <&zynqmp_reset ZYNQMP_RESET_USB1_CORERESET>,
- <&zynqmp_reset ZYNQMP_RESET_USB1_HIBERRESET>,
- <&zynqmp_reset ZYNQMP_RESET_USB1_APB>;
- reset-names = "usb_crst", "usb_hibrst", "usb_apbrst";
- ranges;
-
- dwc3@fe200000 {
- compatible = "snps,dwc3";
- reg = <0x0 0xfe200000 0x40000>;
- interrupt-names = "dwc_usb3", "otg", "hiber";
- interrupts = <0 65 4>, <0 69 4>, <0 75 4>;
- phys = <&psgtr 2 PHY_TYPE_USB3 0 2>;
- phy-names = "usb3-phy";
- dr_mode = "host";
- dma-coherent;
- };
- };
diff --git a/Documentation/devicetree/bindings/usb/dwc3-xilinx.yaml b/Documentation/devicetree/bindings/usb/dwc3-xilinx.yaml
new file mode 100644
index 000000000000..f77c16e203d5
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/dwc3-xilinx.yaml
@@ -0,0 +1,131 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/usb/dwc3-xilinx.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Xilinx SuperSpeed DWC3 USB SoC controller
+
+maintainers:
+ - Manish Narani <manish.narani@xilinx.com>
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - xlnx,zynqmp-dwc3
+ - xlnx,versal-dwc3
+ reg:
+ maxItems: 1
+
+ "#address-cells":
+ enum: [ 1, 2 ]
+
+ "#size-cells":
+ enum: [ 1, 2 ]
+
+ ranges: true
+
+ power-domains:
+ description: specifies a phandle to PM domain provider node
+ maxItems: 1
+
+ clocks:
+ description:
+ A list of phandle and clock-specifier pairs for the clocks
+ listed in clock-names.
+ items:
+ - description: Master/Core clock, has to be >= 125 MHz
+ for SS operation and >= 60MHz for HS operation.
+ - description: Clock source to core during PHY power down.
+
+ clock-names:
+ items:
+ - const: bus_clk
+ - const: ref_clk
+
+ resets:
+ description:
+ A list of phandles for resets listed in reset-names.
+
+ items:
+ - description: USB core reset
+ - description: USB hibernation reset
+ - description: USB APB reset
+
+ reset-names:
+ items:
+ - const: usb_crst
+ - const: usb_hibrst
+ - const: usb_apbrst
+
+ phys:
+ minItems: 1
+ maxItems: 2
+
+ phy-names:
+ minItems: 1
+ maxItems: 2
+ items:
+ enum:
+ - usb2-phy
+ - usb3-phy
+
+# Required child node:
+
+patternProperties:
+ "^usb@[0-9a-f]+$":
+ $ref: snps,dwc3.yaml#
+
+required:
+ - compatible
+ - reg
+ - "#address-cells"
+ - "#size-cells"
+ - ranges
+ - power-domains
+ - clocks
+ - clock-names
+ - resets
+ - reset-names
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/dma/xlnx-zynqmp-dpdma.h>
+ #include <dt-bindings/power/xlnx-zynqmp-power.h>
+ #include <dt-bindings/reset/xlnx-zynqmp-resets.h>
+ #include <dt-bindings/clock/xlnx-zynqmp-clk.h>
+ #include <dt-bindings/reset/xlnx-zynqmp-resets.h>
+ #include <dt-bindings/phy/phy.h>
+ axi {
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ usb@0 {
+ #address-cells = <0x2>;
+ #size-cells = <0x2>;
+ compatible = "xlnx,zynqmp-dwc3";
+ reg = <0x0 0xff9d0000 0x0 0x100>;
+ clocks = <&zynqmp_clk USB0_BUS_REF>, <&zynqmp_clk USB3_DUAL_REF>;
+ clock-names = "bus_clk", "ref_clk";
+ power-domains = <&zynqmp_firmware PD_USB_0>;
+ resets = <&zynqmp_reset ZYNQMP_RESET_USB1_CORERESET>,
+ <&zynqmp_reset ZYNQMP_RESET_USB1_HIBERRESET>,
+ <&zynqmp_reset ZYNQMP_RESET_USB1_APB>;
+ reset-names = "usb_crst", "usb_hibrst", "usb_apbrst";
+ phys = <&psgtr 2 PHY_TYPE_USB3 0 2>;
+ phy-names = "usb3-phy";
+ ranges;
+
+ usb@fe200000 {
+ compatible = "snps,dwc3";
+ reg = <0x0 0xfe200000 0x0 0x40000>;
+ interrupt-names = "host", "otg";
+ interrupts = <0 65 4>, <0 69 4>;
+ dr_mode = "host";
+ dma-coherent;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/usb/intel,keembay-dwc3.yaml b/Documentation/devicetree/bindings/usb/intel,keembay-dwc3.yaml
index 43b91ab62004..d3511f48cd55 100644
--- a/Documentation/devicetree/bindings/usb/intel,keembay-dwc3.yaml
+++ b/Documentation/devicetree/bindings/usb/intel,keembay-dwc3.yaml
@@ -13,6 +13,9 @@ properties:
compatible:
const: intel,keembay-dwc3
+ reg:
+ maxItems: 1
+
clocks:
maxItems: 4
diff --git a/Documentation/devicetree/bindings/usb/qcom,dwc3.yaml b/Documentation/devicetree/bindings/usb/qcom,dwc3.yaml
index 2bdaba023c01..2d23a4ff702f 100644
--- a/Documentation/devicetree/bindings/usb/qcom,dwc3.yaml
+++ b/Documentation/devicetree/bindings/usb/qcom,dwc3.yaml
@@ -13,7 +13,9 @@ properties:
compatible:
items:
- enum:
+ - qcom,ipq4019-dwc3
- qcom,ipq6018-dwc3
+ - qcom,ipq8064-dwc3
- qcom,msm8996-dwc3
- qcom,msm8998-dwc3
- qcom,sc7180-dwc3
@@ -23,9 +25,11 @@ properties:
- qcom,sdx55-dwc3
- qcom,sm4250-dwc3
- qcom,sm6115-dwc3
+ - qcom,sm6350-dwc3
- qcom,sm8150-dwc3
- qcom,sm8250-dwc3
- qcom,sm8350-dwc3
+ - qcom,sm8450-dwc3
- const: qcom,dwc3
reg:
diff --git a/Documentation/devicetree/bindings/usb/snps,dwc3.yaml b/Documentation/devicetree/bindings/usb/snps,dwc3.yaml
index 25ac2c93dc6c..d29ffcd27472 100644
--- a/Documentation/devicetree/bindings/usb/snps,dwc3.yaml
+++ b/Documentation/devicetree/bindings/usb/snps,dwc3.yaml
@@ -36,6 +36,9 @@ properties:
- const: synopsys,dwc3
deprecated: true
+ reg:
+ maxItems: 1
+
interrupts:
description:
It's either a single common DWC3 interrupt (dwc_usb3) or individual
@@ -65,6 +68,9 @@ properties:
- enum: [bus_early, ref, suspend]
- true
+ iommus:
+ maxItems: 1
+
usb-phy:
minItems: 1
items:
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.yaml b/Documentation/devicetree/bindings/vendor-prefixes.yaml
index ae2aaaba620a..294093d45a23 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.yaml
+++ b/Documentation/devicetree/bindings/vendor-prefixes.yaml
@@ -25,6 +25,8 @@ patternProperties:
# Keep list in alphabetical order.
"^70mai,.*":
description: 70mai Co., Ltd.
+ "^8dev,.*":
+ description: 8devices, UAB
"^abb,.*":
description: ABB
"^abilis,.*":
@@ -441,6 +443,8 @@ patternProperties:
description: Freescale Semiconductor
"^fujitsu,.*":
description: Fujitsu Ltd.
+ "^fxtec,.*":
+ description: FX Technology Ltd.
"^gardena,.*":
description: GARDENA GmbH
"^gateworks,.*":
@@ -515,6 +519,8 @@ patternProperties:
description: HannStar Display Co.
"^holtek,.*":
description: Holtek Semiconductor, Inc.
+ "^huawei,.*":
+ description: Huawei Technologies Co., Ltd.
"^hugsun,.*":
description: Shenzhen Hugsun Technology Co. Ltd.
"^hwacom,.*":
@@ -858,6 +864,8 @@ patternProperties:
description: OLIMEX Ltd.
"^olpc,.*":
description: One Laptop Per Child
+ "^oneplus,.*":
+ description: OnePlus Technology (Shenzhen) Co., Ltd.
"^onion,.*":
description: Onion Corporation
"^onnn,.*":
@@ -1159,6 +1167,8 @@ patternProperties:
description: Summit microelectronics
"^sunchip,.*":
description: Shenzhen Sunchip Technology Co., Ltd
+ "^sunplus,.*":
+ description: Sunplus Technology Co., Ltd.
"^SUNW,.*":
description: Sun Microsystems, Inc
"^supermicro,.*":
@@ -1197,10 +1207,14 @@ patternProperties:
description: Terasic Inc.
"^tfc,.*":
description: Three Five Corp
+ "^thead,.*":
+ description: T-Head Semiconductor Co., Ltd.
"^thine,.*":
description: THine Electronics, Inc.
"^thingyjp,.*":
description: thingy.jp
+ "^thundercomm,.*":
+ description: Thundercomm Technology Co., Ltd.
"^ti,.*":
description: Texas Instruments
"^tianma,.*":
@@ -1328,6 +1342,8 @@ patternProperties:
description: Wiligear, Ltd.
"^winbond,.*":
description: Winbond Electronics corp.
+ "^wingtech,.*":
+ description: Wingtech Technology Co., Ltd.
"^winlink,.*":
description: WinLink Co., Ltd
"^winstar,.*":
diff --git a/Documentation/devicetree/bindings/watchdog/atmel,sama5d4-wdt.yaml b/Documentation/devicetree/bindings/watchdog/atmel,sama5d4-wdt.yaml
index 9856cd76c28d..a9635c03761c 100644
--- a/Documentation/devicetree/bindings/watchdog/atmel,sama5d4-wdt.yaml
+++ b/Documentation/devicetree/bindings/watchdog/atmel,sama5d4-wdt.yaml
@@ -22,6 +22,9 @@ properties:
reg:
maxItems: 1
+ interrupts:
+ maxItems: 1
+
atmel,watchdog-type:
$ref: /schemas/types.yaml#/definitions/string
description: should be hardware or software.
diff --git a/Documentation/devicetree/bindings/watchdog/brcm,bcm7038-wdt.txt b/Documentation/devicetree/bindings/watchdog/brcm,bcm7038-wdt.txt
deleted file mode 100644
index 84122270be8f..000000000000
--- a/Documentation/devicetree/bindings/watchdog/brcm,bcm7038-wdt.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-BCM7038 Watchdog timer
-
-Required properties:
-
-- compatible : should be "brcm,bcm7038-wdt"
-- reg : Specifies base physical address and size of the registers.
-
-Optional properties:
-
-- clocks: The clock running the watchdog. If no clock is found the
- driver will default to 27000000 Hz.
-
-Example:
-
-watchdog@f040a7e8 {
- compatible = "brcm,bcm7038-wdt";
- clocks = <&upg_fixed>;
- reg = <0xf040a7e8 0x16>;
-};
diff --git a/Documentation/devicetree/bindings/watchdog/brcm,bcm7038-wdt.yaml b/Documentation/devicetree/bindings/watchdog/brcm,bcm7038-wdt.yaml
new file mode 100644
index 000000000000..a926809352b8
--- /dev/null
+++ b/Documentation/devicetree/bindings/watchdog/brcm,bcm7038-wdt.yaml
@@ -0,0 +1,43 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/watchdog/brcm,bcm7038-wdt.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: BCM63xx and BCM7038 watchdog timer
+
+allOf:
+ - $ref: "watchdog.yaml#"
+
+maintainers:
+ - Florian Fainelli <f.fainelli@gmail.com>
+ - Justin Chen <justinpopo6@gmail.com>
+ - Rafał Miłecki <rafal@milecki.pl>
+
+properties:
+ compatible:
+ enum:
+ - brcm,bcm6345-wdt
+ - brcm,bcm7038-wdt
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+ description: >
+ The clock running the watchdog. If no clock is found the driver will
+ default to 27000000 Hz.
+
+unevaluatedProperties: false
+
+required:
+ - reg
+
+examples:
+ - |
+ watchdog@f040a7e8 {
+ compatible = "brcm,bcm7038-wdt";
+ reg = <0xf040a7e8 0x16>;
+ clocks = <&upg_fixed>;
+ };
diff --git a/Documentation/devicetree/bindings/watchdog/fsl-imx7ulp-wdt.yaml b/Documentation/devicetree/bindings/watchdog/fsl-imx7ulp-wdt.yaml
index 51d6d482bbc2..fb603a20e396 100644
--- a/Documentation/devicetree/bindings/watchdog/fsl-imx7ulp-wdt.yaml
+++ b/Documentation/devicetree/bindings/watchdog/fsl-imx7ulp-wdt.yaml
@@ -14,8 +14,11 @@ allOf:
properties:
compatible:
- enum:
- - fsl,imx7ulp-wdt
+ oneOf:
+ - const: fsl,imx7ulp-wdt
+ - items:
+ - const: fsl,imx8ulp-wdt
+ - const: fsl,imx7ulp-wdt
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/watchdog/qcom-wdt.yaml b/Documentation/devicetree/bindings/watchdog/qcom-wdt.yaml
index ba60bdf1fecc..16c6f82a13ca 100644
--- a/Documentation/devicetree/bindings/watchdog/qcom-wdt.yaml
+++ b/Documentation/devicetree/bindings/watchdog/qcom-wdt.yaml
@@ -20,7 +20,9 @@ properties:
- qcom,apss-wdt-sc7280
- qcom,apss-wdt-sdm845
- qcom,apss-wdt-sdx55
+ - qcom,apss-wdt-sm6350
- qcom,apss-wdt-sm8150
+ - qcom,apss-wdt-sm8250
- qcom,kpss-timer
- qcom,kpss-wdt
- qcom,kpss-wdt-apq8064
diff --git a/Documentation/devicetree/bindings/watchdog/realtek,otto-wdt.yaml b/Documentation/devicetree/bindings/watchdog/realtek,otto-wdt.yaml
new file mode 100644
index 000000000000..11b220a5e0f6
--- /dev/null
+++ b/Documentation/devicetree/bindings/watchdog/realtek,otto-wdt.yaml
@@ -0,0 +1,91 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/watchdog/realtek,otto-wdt.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Realtek Otto watchdog timer
+
+maintainers:
+ - Sander Vanheule <sander@svanheule.net>
+
+description: |
+ The timer has two timeout phases. Both phases have a maximum duration of 32
+ prescaled clock ticks, which is ca. 43s with a bus clock of 200MHz. The
+ minimum duration of each phase is one tick. Each phase can trigger an
+ interrupt, although the phase 2 interrupt will occur with the system reset.
+ - Phase 1: During this phase, the WDT can be pinged to reset the timeout.
+ - Phase 2: Starts after phase 1 has timed out, and only serves to give the
+ system some time to clean up, or notify others that it's going to reset.
+ During this phase, pinging the WDT has no effect, and a reset is
+ unavoidable, unless the WDT is disabled.
+
+allOf:
+ - $ref: watchdog.yaml#
+
+properties:
+ compatible:
+ enum:
+ - realtek,rtl8380-wdt
+ - realtek,rtl8390-wdt
+ - realtek,rtl9300-wdt
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ maxItems: 1
+
+ interrupts:
+ items:
+ - description: interrupt specifier for pretimeout
+ - description: interrupt specifier for timeout
+
+ interrupt-names:
+ items:
+ - const: phase1
+ - const: phase2
+
+ realtek,reset-mode:
+ $ref: /schemas/types.yaml#/definitions/string
+ description: |
+ Specify how the system is reset after a timeout. Defaults to "cpu" if
+ left unspecified.
+ oneOf:
+ - description: Reset the entire chip
+ const: soc
+ - description: |
+ Reset the CPU and IPsec engine, but leave other peripherals untouched
+ const: cpu
+ - description: |
+ Reset the execution pointer, but don't actually reset any hardware
+ const: software
+
+required:
+ - compatible
+ - reg
+ - clocks
+ - interrupts
+
+unevaluatedProperties: false
+
+dependencies:
+ interrupts: [ interrupt-names ]
+
+examples:
+ - |
+ watchdog: watchdog@3150 {
+ compatible = "realtek,rtl8380-wdt";
+ reg = <0x3150 0xc>;
+
+ realtek,reset-mode = "soc";
+
+ clocks = <&lxbus_clock>;
+ timeout-sec = <20>;
+
+ interrupt-parent = <&rtlintc>;
+ interrupt-names = "phase1", "phase2";
+ interrupts = <19>, <18>;
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/watchdog/renesas,wdt.yaml b/Documentation/devicetree/bindings/watchdog/renesas,wdt.yaml
index ab66d3f0c476..91a98ccd4226 100644
--- a/Documentation/devicetree/bindings/watchdog/renesas,wdt.yaml
+++ b/Documentation/devicetree/bindings/watchdog/renesas,wdt.yaml
@@ -10,9 +10,6 @@ maintainers:
- Wolfram Sang <wsa+renesas@sang-engineering.com>
- Geert Uytterhoeven <geert+renesas@glider.be>
-allOf:
- - $ref: "watchdog.yaml#"
-
properties:
compatible:
oneOf:
@@ -24,6 +21,11 @@ properties:
- items:
- enum:
+ - renesas,r9a07g044-wdt # RZ/G2{L,LC}
+ - const: renesas,rzg2l-wdt # RZ/G2L
+
+ - items:
+ - enum:
- renesas,r8a7742-wdt # RZ/G1H
- renesas,r8a7743-wdt # RZ/G1M
- renesas,r8a7744-wdt # RZ/G1N
@@ -56,11 +58,13 @@ properties:
reg:
maxItems: 1
- interrupts:
- maxItems: 1
+ interrupts: true
- clocks:
- maxItems: 1
+ interrupt-names: true
+
+ clocks: true
+
+ clock-names: true
power-domains:
maxItems: 1
@@ -75,17 +79,52 @@ required:
- reg
- clocks
-if:
- not:
- properties:
- compatible:
- contains:
- enum:
- - renesas,rza-wdt
-then:
- required:
- - power-domains
- - resets
+allOf:
+ - $ref: "watchdog.yaml#"
+
+ - if:
+ not:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - renesas,rza-wdt
+ then:
+ required:
+ - power-domains
+ - resets
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - renesas,rzg2l-wdt
+ then:
+ properties:
+ interrupts:
+ maxItems: 2
+ interrupt-names:
+ items:
+ - const: wdt
+ - const: perrout
+ clocks:
+ items:
+ - description: Register access clock
+ - description: Main clock
+ clock-names:
+ items:
+ - const: pclk
+ - const: oscclk
+ required:
+ - clock-names
+ - interrupt-names
+ else:
+ properties:
+ interrupts:
+ maxItems: 1
+ clocks:
+ maxItems: 1
additionalProperties: false
diff --git a/Documentation/devicetree/bindings/watchdog/samsung-wdt.yaml b/Documentation/devicetree/bindings/watchdog/samsung-wdt.yaml
index 76cb9586ee00..b08373336b16 100644
--- a/Documentation/devicetree/bindings/watchdog/samsung-wdt.yaml
+++ b/Documentation/devicetree/bindings/watchdog/samsung-wdt.yaml
@@ -22,25 +22,32 @@ properties:
- samsung,exynos5250-wdt # for Exynos5250
- samsung,exynos5420-wdt # for Exynos5420
- samsung,exynos7-wdt # for Exynos7
+ - samsung,exynos850-wdt # for Exynos850
reg:
maxItems: 1
clocks:
- maxItems: 1
+ minItems: 1
+ maxItems: 2
clock-names:
- items:
- - const: watchdog
+ minItems: 1
+ maxItems: 2
interrupts:
maxItems: 1
+ samsung,cluster-index:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ Index of CPU cluster on which watchdog is running (in case of Exynos850)
+
samsung,syscon-phandle:
$ref: /schemas/types.yaml#/definitions/phandle
description:
- Phandle to the PMU system controller node (in case of Exynos5250
- and Exynos5420).
+ Phandle to the PMU system controller node (in case of Exynos5250,
+ Exynos5420, Exynos7 and Exynos850).
required:
- compatible
@@ -58,9 +65,40 @@ allOf:
enum:
- samsung,exynos5250-wdt
- samsung,exynos5420-wdt
+ - samsung,exynos7-wdt
+ - samsung,exynos850-wdt
then:
required:
- samsung,syscon-phandle
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - samsung,exynos850-wdt
+ then:
+ properties:
+ clocks:
+ items:
+ - description: Bus clock, used for register interface
+ - description: Source clock (driving watchdog counter)
+ clock-names:
+ items:
+ - const: watchdog
+ - const: watchdog_src
+ samsung,cluster-index:
+ enum: [0, 1]
+ required:
+ - samsung,cluster-index
+ else:
+ properties:
+ clocks:
+ items:
+ - description: Bus clock, which is also a source clock
+ clock-names:
+ items:
+ - const: watchdog
+ samsung,cluster-index: false
unevaluatedProperties: false
diff --git a/Documentation/devicetree/bindings/watchdog/ti,rti-wdt.yaml b/Documentation/devicetree/bindings/watchdog/ti,rti-wdt.yaml
index 054584d7543a..2f33635876ff 100644
--- a/Documentation/devicetree/bindings/watchdog/ti,rti-wdt.yaml
+++ b/Documentation/devicetree/bindings/watchdog/ti,rti-wdt.yaml
@@ -34,12 +34,6 @@ properties:
power-domains:
maxItems: 1
- assigned-clocks:
- maxItems: 1
-
- assigned-clocks-parents:
- maxItems: 1
-
required:
- compatible
- reg
diff --git a/Documentation/driver-api/auxiliary_bus.rst b/Documentation/driver-api/auxiliary_bus.rst
index ef902daf0d68..cec84908fbc0 100644
--- a/Documentation/driver-api/auxiliary_bus.rst
+++ b/Documentation/driver-api/auxiliary_bus.rst
@@ -6,231 +6,45 @@
Auxiliary Bus
=============
-In some subsystems, the functionality of the core device (PCI/ACPI/other) is
-too complex for a single device to be managed by a monolithic driver
-(e.g. Sound Open Firmware), multiple devices might implement a common
-intersection of functionality (e.g. NICs + RDMA), or a driver may want to
-export an interface for another subsystem to drive (e.g. SIOV Physical Function
-export Virtual Function management). A split of the functionality into child-
-devices representing sub-domains of functionality makes it possible to
-compartmentalize, layer, and distribute domain-specific concerns via a Linux
-device-driver model.
-
-An example for this kind of requirement is the audio subsystem where a single
-IP is handling multiple entities such as HDMI, Soundwire, local devices such as
-mics/speakers etc. The split for the core's functionality can be arbitrary or
-be defined by the DSP firmware topology and include hooks for test/debug. This
-allows for the audio core device to be minimal and focused on hardware-specific
-control and communication.
-
-Each auxiliary_device represents a part of its parent functionality. The
-generic behavior can be extended and specialized as needed by encapsulating an
-auxiliary_device within other domain-specific structures and the use of .ops
-callbacks. Devices on the auxiliary bus do not share any structures and the use
-of a communication channel with the parent is domain-specific.
-
-Note that ops are intended as a way to augment instance behavior within a class
-of auxiliary devices, it is not the mechanism for exporting common
-infrastructure from the parent. Consider EXPORT_SYMBOL_NS() to convey
-infrastructure from the parent module to the auxiliary module(s).
-
+.. kernel-doc:: drivers/base/auxiliary.c
+ :doc: PURPOSE
When Should the Auxiliary Bus Be Used
=====================================
-The auxiliary bus is to be used when a driver and one or more kernel modules,
-who share a common header file with the driver, need a mechanism to connect and
-provide access to a shared object allocated by the auxiliary_device's
-registering driver. The registering driver for the auxiliary_device(s) and the
-kernel module(s) registering auxiliary_drivers can be from the same subsystem,
-or from multiple subsystems.
-
-The emphasis here is on a common generic interface that keeps subsystem
-customization out of the bus infrastructure.
-
-One example is a PCI network device that is RDMA-capable and exports a child
-device to be driven by an auxiliary_driver in the RDMA subsystem. The PCI
-driver allocates and registers an auxiliary_device for each physical
-function on the NIC. The RDMA driver registers an auxiliary_driver that claims
-each of these auxiliary_devices. This conveys data/ops published by the parent
-PCI device/driver to the RDMA auxiliary_driver.
-
-Another use case is for the PCI device to be split out into multiple sub
-functions. For each sub function an auxiliary_device is created. A PCI sub
-function driver binds to such devices that creates its own one or more class
-devices. A PCI sub function auxiliary device is likely to be contained in a
-struct with additional attributes such as user defined sub function number and
-optional attributes such as resources and a link to the parent device. These
-attributes could be used by systemd/udev; and hence should be initialized
-before a driver binds to an auxiliary_device.
-
-A key requirement for utilizing the auxiliary bus is that there is no
-dependency on a physical bus, device, register accesses or regmap support.
-These individual devices split from the core cannot live on the platform bus as
-they are not physical devices that are controlled by DT/ACPI. The same
-argument applies for not using MFD in this scenario as MFD relies on individual
-function devices being physical devices.
-
-Auxiliary Device
-================
-
-An auxiliary_device represents a part of its parent device's functionality. It
-is given a name that, combined with the registering drivers KBUILD_MODNAME,
-creates a match_name that is used for driver binding, and an id that combined
-with the match_name provide a unique name to register with the bus subsystem.
-
-Registering an auxiliary_device is a two-step process. First call
-auxiliary_device_init(), which checks several aspects of the auxiliary_device
-struct and performs a device_initialize(). After this step completes, any
-error state must have a call to auxiliary_device_uninit() in its resolution path.
-The second step in registering an auxiliary_device is to perform a call to
-auxiliary_device_add(), which sets the name of the device and add the device to
-the bus.
-
-Unregistering an auxiliary_device is also a two-step process to mirror the
-register process. First call auxiliary_device_delete(), then call
-auxiliary_device_uninit().
-
-.. code-block:: c
-
- struct auxiliary_device {
- struct device dev;
- const char *name;
- u32 id;
- };
-
-If two auxiliary_devices both with a match_name "mod.foo" are registered onto
-the bus, they must have unique id values (e.g. "x" and "y") so that the
-registered devices names are "mod.foo.x" and "mod.foo.y". If match_name + id
-are not unique, then the device_add fails and generates an error message.
-
-The auxiliary_device.dev.type.release or auxiliary_device.dev.release must be
-populated with a non-NULL pointer to successfully register the auxiliary_device.
-
-The auxiliary_device.dev.parent must also be populated.
+.. kernel-doc:: drivers/base/auxiliary.c
+ :doc: USAGE
+
+
+Auxiliary Device Creation
+=========================
+
+.. kernel-doc:: include/linux/auxiliary_bus.h
+ :identifiers: auxiliary_device
+
+.. kernel-doc:: drivers/base/auxiliary.c
+ :identifiers: auxiliary_device_init __auxiliary_device_add
+ auxiliary_find_device
Auxiliary Device Memory Model and Lifespan
------------------------------------------
-The registering driver is the entity that allocates memory for the
-auxiliary_device and register it on the auxiliary bus. It is important to note
-that, as opposed to the platform bus, the registering driver is wholly
-responsible for the management for the memory used for the driver object.
-
-A parent object, defined in the shared header file, contains the
-auxiliary_device. It also contains a pointer to the shared object(s), which
-also is defined in the shared header. Both the parent object and the shared
-object(s) are allocated by the registering driver. This layout allows the
-auxiliary_driver's registering module to perform a container_of() call to go
-from the pointer to the auxiliary_device, that is passed during the call to the
-auxiliary_driver's probe function, up to the parent object, and then have
-access to the shared object(s).
-
-The memory for the auxiliary_device is freed only in its release() callback
-flow as defined by its registering driver.
-
-The memory for the shared object(s) must have a lifespan equal to, or greater
-than, the lifespan of the memory for the auxiliary_device. The auxiliary_driver
-should only consider that this shared object is valid as long as the
-auxiliary_device is still registered on the auxiliary bus. It is up to the
-registering driver to manage (e.g. free or keep available) the memory for the
-shared object beyond the life of the auxiliary_device.
-
-The registering driver must unregister all auxiliary devices before its own
-driver.remove() is completed.
+.. kernel-doc:: include/linux/auxiliary_bus.h
+ :doc: DEVICE_LIFESPAN
+
Auxiliary Drivers
=================
-Auxiliary drivers follow the standard driver model convention, where
-discovery/enumeration is handled by the core, and drivers
-provide probe() and remove() methods. They support power management
-and shutdown notifications using the standard conventions.
-
-.. code-block:: c
+.. kernel-doc:: include/linux/auxiliary_bus.h
+ :identifiers: auxiliary_driver module_auxiliary_driver
- struct auxiliary_driver {
- int (*probe)(struct auxiliary_device *,
- const struct auxiliary_device_id *id);
- void (*remove)(struct auxiliary_device *);
- void (*shutdown)(struct auxiliary_device *);
- int (*suspend)(struct auxiliary_device *, pm_message_t);
- int (*resume)(struct auxiliary_device *);
- struct device_driver driver;
- const struct auxiliary_device_id *id_table;
- };
-
-Auxiliary drivers register themselves with the bus by calling
-auxiliary_driver_register(). The id_table contains the match_names of auxiliary
-devices that a driver can bind with.
+.. kernel-doc:: drivers/base/auxiliary.c
+ :identifiers: __auxiliary_driver_register auxiliary_driver_unregister
Example Usage
=============
-Auxiliary devices are created and registered by a subsystem-level core device
-that needs to break up its functionality into smaller fragments. One way to
-extend the scope of an auxiliary_device is to encapsulate it within a domain-
-pecific structure defined by the parent device. This structure contains the
-auxiliary_device and any associated shared data/callbacks needed to establish
-the connection with the parent.
-
-An example is:
-
-.. code-block:: c
-
- struct foo {
- struct auxiliary_device auxdev;
- void (*connect)(struct auxiliary_device *auxdev);
- void (*disconnect)(struct auxiliary_device *auxdev);
- void *data;
- };
-
-The parent device then registers the auxiliary_device by calling
-auxiliary_device_init(), and then auxiliary_device_add(), with the pointer to
-the auxdev member of the above structure. The parent provides a name for the
-auxiliary_device that, combined with the parent's KBUILD_MODNAME, creates a
-match_name that is be used for matching and binding with a driver.
-
-Whenever an auxiliary_driver is registered, based on the match_name, the
-auxiliary_driver's probe() is invoked for the matching devices. The
-auxiliary_driver can also be encapsulated inside custom drivers that make the
-core device's functionality extensible by adding additional domain-specific ops
-as follows:
-
-.. code-block:: c
-
- struct my_ops {
- void (*send)(struct auxiliary_device *auxdev);
- void (*receive)(struct auxiliary_device *auxdev);
- };
-
-
- struct my_driver {
- struct auxiliary_driver auxiliary_drv;
- const struct my_ops ops;
- };
-
-An example of this type of usage is:
-
-.. code-block:: c
-
- const struct auxiliary_device_id my_auxiliary_id_table[] = {
- { .name = "foo_mod.foo_dev" },
- { },
- };
-
- const struct my_ops my_custom_ops = {
- .send = my_tx,
- .receive = my_rx,
- };
-
- const struct my_driver my_drv = {
- .auxiliary_drv = {
- .name = "myauxiliarydrv",
- .id_table = my_auxiliary_id_table,
- .probe = my_probe,
- .remove = my_remove,
- .shutdown = my_shutdown,
- },
- .ops = my_custom_ops,
- };
+.. kernel-doc:: drivers/base/auxiliary.c
+ :doc: EXAMPLE
+
diff --git a/Documentation/driver-api/dmaengine/dmatest.rst b/Documentation/driver-api/dmaengine/dmatest.rst
index ee268d445d38..cf9859cd0b43 100644
--- a/Documentation/driver-api/dmaengine/dmatest.rst
+++ b/Documentation/driver-api/dmaengine/dmatest.rst
@@ -6,6 +6,16 @@ Andy Shevchenko <andriy.shevchenko@linux.intel.com>
This small document introduces how to test DMA drivers using dmatest module.
+The dmatest module tests DMA memcpy, memset, XOR and RAID6 P+Q operations using
+various lengths and various offsets into the source and destination buffers. It
+will initialize both buffers with a repeatable pattern and verify that the DMA
+engine copies the requested region and nothing more. It will also verify that
+the bytes aren't swapped around, and that the source buffer isn't modified.
+
+The dmatest module can be configured to test a specific channel. It can also
+test multiple channels at the same time, and it can start multiple threads
+competing for the same channel.
+
.. note::
The test suite works only on the channels that have at least one
capability of the following: DMA_MEMCPY (memory-to-memory), DMA_MEMSET
@@ -143,13 +153,14 @@ Part 5 - Handling channel allocation
Allocating Channels
-------------------
-Channels are required to be configured prior to starting the test run.
-Attempting to run the test without configuring the channels will fail.
+Channels do not need to be configured prior to starting a test run. Attempting
+to run the test without configuring the channels will result in testing any
+channels that are available.
Example::
% echo 1 > /sys/module/dmatest/parameters/run
- dmatest: Could not start test, no channels configured
+ dmatest: No channels configured, continue with any
Channels are registered using the "channel" parameter. Channels can be requested by their
name, once requested, the channel is registered and a pending thread is added to the test list.
diff --git a/Documentation/driver-api/dmaengine/provider.rst b/Documentation/driver-api/dmaengine/provider.rst
index ddb0a81a796c..0072c9c7efd3 100644
--- a/Documentation/driver-api/dmaengine/provider.rst
+++ b/Documentation/driver-api/dmaengine/provider.rst
@@ -162,6 +162,29 @@ Currently, the types available are:
- The device is able to do memory to memory copies
+- - DMA_MEMCPY_SG
+
+ - The device supports memory to memory scatter-gather transfers.
+
+ - Even though a plain memcpy can look like a particular case of a
+ scatter-gather transfer, with a single chunk to copy, it's a distinct
+ transaction type in the mem2mem transfer case. This is because some very
+ simple devices might be able to do contiguous single-chunk memory copies,
+ but have no support for more complex SG transfers.
+
+ - No matter what the overall size of the combined chunks for source and
+ destination is, only as many bytes as the smallest of the two will be
+ transmitted. That means the number and size of the scatter-gather buffers in
+ both lists need not be the same, and that the operation functionally is
+ equivalent to a ``strncpy`` where the ``count`` argument equals the smallest
+ total size of the two scatter-gather list buffers.
+
+ - It's usually used for copying pixel data between host memory and
+ memory-mapped GPU device memory, such as found on modern PCI video graphics
+ cards. The most immediate example is the OpenGL API function
+ ``glReadPielx()``, which might require a verbatim copy of a huge framebuffer
+ from local device memory onto host memory.
+
- DMA_XOR
- The device is able to perform XOR operations on memory areas
diff --git a/Documentation/driver-api/firewire.rst b/Documentation/driver-api/firewire.rst
index 94a2d7f01d99..d3cfa73cbb2b 100644
--- a/Documentation/driver-api/firewire.rst
+++ b/Documentation/driver-api/firewire.rst
@@ -19,7 +19,7 @@ of kernel interfaces is available via exported symbols in `firewire-core` module
Firewire char device data structures
====================================
-.. include:: /ABI/stable/firewire-cdev
+.. include:: ../ABI/stable/firewire-cdev
:literal:
.. kernel-doc:: include/uapi/linux/firewire-cdev.h
@@ -28,7 +28,7 @@ Firewire char device data structures
Firewire device probing and sysfs interfaces
============================================
-.. include:: /ABI/stable/sysfs-bus-firewire
+.. include:: ../ABI/stable/sysfs-bus-firewire
:literal:
.. kernel-doc:: drivers/firewire/core-device.c
diff --git a/Documentation/driver-api/fpga/fpga-bridge.rst b/Documentation/driver-api/fpga/fpga-bridge.rst
index 8d650b4e2ce6..604208534095 100644
--- a/Documentation/driver-api/fpga/fpga-bridge.rst
+++ b/Documentation/driver-api/fpga/fpga-bridge.rst
@@ -6,8 +6,7 @@ API to implement a new FPGA bridge
* struct fpga_bridge - The FPGA Bridge structure
* struct fpga_bridge_ops - Low level Bridge driver ops
-* devm_fpga_bridge_create() - Allocate and init a bridge struct
-* fpga_bridge_register() - Register a bridge
+* fpga_bridge_register() - Create and register a bridge
* fpga_bridge_unregister() - Unregister a bridge
.. kernel-doc:: include/linux/fpga/fpga-bridge.h
@@ -17,9 +16,6 @@ API to implement a new FPGA bridge
:functions: fpga_bridge_ops
.. kernel-doc:: drivers/fpga/fpga-bridge.c
- :functions: devm_fpga_bridge_create
-
-.. kernel-doc:: drivers/fpga/fpga-bridge.c
:functions: fpga_bridge_register
.. kernel-doc:: drivers/fpga/fpga-bridge.c
diff --git a/Documentation/driver-api/fpga/fpga-mgr.rst b/Documentation/driver-api/fpga/fpga-mgr.rst
index 4d926b452cb3..42c01f396dce 100644
--- a/Documentation/driver-api/fpga/fpga-mgr.rst
+++ b/Documentation/driver-api/fpga/fpga-mgr.rst
@@ -24,7 +24,7 @@ How to support a new FPGA device
--------------------------------
To add another FPGA manager, write a driver that implements a set of ops. The
-probe function calls fpga_mgr_register(), such as::
+probe function calls fpga_mgr_register() or fpga_mgr_register_full(), such as::
static const struct fpga_manager_ops socfpga_fpga_ops = {
.write_init = socfpga_fpga_ops_configure_init,
@@ -49,14 +49,14 @@ probe function calls fpga_mgr_register(), such as::
* them in priv
*/
- mgr = devm_fpga_mgr_create(dev, "Altera SOCFPGA FPGA Manager",
- &socfpga_fpga_ops, priv);
- if (!mgr)
- return -ENOMEM;
+ mgr = fpga_mgr_register(dev, "Altera SOCFPGA FPGA Manager",
+ &socfpga_fpga_ops, priv);
+ if (IS_ERR(mgr))
+ return PTR_ERR(mgr);
platform_set_drvdata(pdev, mgr);
- return fpga_mgr_register(mgr);
+ return 0;
}
static int socfpga_fpga_remove(struct platform_device *pdev)
@@ -68,6 +68,11 @@ probe function calls fpga_mgr_register(), such as::
return 0;
}
+Alternatively, the probe function could call one of the resource managed
+register functions, devm_fpga_mgr_register() or devm_fpga_mgr_register_full().
+When these functions are used, the parameter syntax is the same, but the call
+to fpga_mgr_unregister() should be removed. In the above example, the
+socfpga_fpga_remove() function would not be required.
The ops will implement whatever device specific register writes are needed to
do the programming sequence for this particular FPGA. These ops return 0 for
@@ -104,8 +109,14 @@ API for implementing a new FPGA Manager driver
* ``fpga_mgr_states`` - Values for :c:expr:`fpga_manager->state`.
* struct fpga_manager - the FPGA manager struct
* struct fpga_manager_ops - Low level FPGA manager driver ops
-* devm_fpga_mgr_create() - Allocate and init a manager struct
-* fpga_mgr_register() - Register an FPGA manager
+* struct fpga_manager_info - Parameter structure for fpga_mgr_register_full()
+* fpga_mgr_register_full() - Create and register an FPGA manager using the
+ fpga_mgr_info structure to provide the full flexibility of options
+* fpga_mgr_register() - Create and register an FPGA manager using standard
+ arguments
+* devm_fpga_mgr_register_full() - Resource managed version of
+ fpga_mgr_register_full()
+* devm_fpga_mgr_register() - Resource managed version of fpga_mgr_register()
* fpga_mgr_unregister() - Unregister an FPGA manager
.. kernel-doc:: include/linux/fpga/fpga-mgr.h
@@ -117,11 +128,20 @@ API for implementing a new FPGA Manager driver
.. kernel-doc:: include/linux/fpga/fpga-mgr.h
:functions: fpga_manager_ops
+.. kernel-doc:: include/linux/fpga/fpga-mgr.h
+ :functions: fpga_manager_info
+
.. kernel-doc:: drivers/fpga/fpga-mgr.c
- :functions: devm_fpga_mgr_create
+ :functions: fpga_mgr_register_full
.. kernel-doc:: drivers/fpga/fpga-mgr.c
:functions: fpga_mgr_register
.. kernel-doc:: drivers/fpga/fpga-mgr.c
+ :functions: devm_fpga_mgr_register_full
+
+.. kernel-doc:: drivers/fpga/fpga-mgr.c
+ :functions: devm_fpga_mgr_register
+
+.. kernel-doc:: drivers/fpga/fpga-mgr.c
:functions: fpga_mgr_unregister
diff --git a/Documentation/driver-api/fpga/fpga-region.rst b/Documentation/driver-api/fpga/fpga-region.rst
index 2636a27c11b2..dc55d60a0b4a 100644
--- a/Documentation/driver-api/fpga/fpga-region.rst
+++ b/Documentation/driver-api/fpga/fpga-region.rst
@@ -46,8 +46,11 @@ API to add a new FPGA region
----------------------------
* struct fpga_region - The FPGA region struct
-* devm_fpga_region_create() - Allocate and init a region struct
-* fpga_region_register() - Register an FPGA region
+* struct fpga_region_info - Parameter structure for fpga_region_register_full()
+* fpga_region_register_full() - Create and register an FPGA region using the
+ fpga_region_info structure to provide the full flexibility of options
+* fpga_region_register() - Create and register an FPGA region using standard
+ arguments
* fpga_region_unregister() - Unregister an FPGA region
The FPGA region's probe function will need to get a reference to the FPGA
@@ -75,8 +78,11 @@ following APIs to handle building or tearing down that list.
.. kernel-doc:: include/linux/fpga/fpga-region.h
:functions: fpga_region
+.. kernel-doc:: include/linux/fpga/fpga-region.h
+ :functions: fpga_region_info
+
.. kernel-doc:: drivers/fpga/fpga-region.c
- :functions: devm_fpga_region_create
+ :functions: fpga_region_register_full
.. kernel-doc:: drivers/fpga/fpga-region.c
:functions: fpga_region_register
diff --git a/Documentation/driver-api/generic-counter.rst b/Documentation/driver-api/generic-counter.rst
index 1b487a331467..71ccc30e586b 100644
--- a/Documentation/driver-api/generic-counter.rst
+++ b/Documentation/driver-api/generic-counter.rst
@@ -262,11 +262,11 @@ order to communicate with the device: to read and write various Signals
and Counts, and to set and get the "action mode" and "function mode" for
various Synapses and Counts respectively.
-A defined counter_device structure may be registered to the system by
-passing it to the counter_register function, and unregistered by passing
-it to the counter_unregister function. Similarly, the
-devm_counter_register function may be used if device memory-managed
-registration is desired.
+A counter_device structure is allocated using counter_alloc() and then
+registered to the system by passing it to the counter_add() function, and
+unregistered by passing it to the counter_unregister function. There are
+device managed variants of these functions: devm_counter_alloc() and
+devm_counter_add().
The struct counter_comp structure is used to define counter extensions
for Signals, Synapses, and Counts.
diff --git a/Documentation/driver-api/pci/pci.rst b/Documentation/driver-api/pci/pci.rst
index ca85e5e78b2c..4843cfad4f60 100644
--- a/Documentation/driver-api/pci/pci.rst
+++ b/Documentation/driver-api/pci/pci.rst
@@ -13,7 +13,7 @@ PCI Support Library
.. kernel-doc:: drivers/pci/search.c
:export:
-.. kernel-doc:: drivers/pci/msi.c
+.. kernel-doc:: drivers/pci/msi/msi.c
:export:
.. kernel-doc:: drivers/pci/bus.c
diff --git a/Documentation/driver-api/serial/index.rst b/Documentation/driver-api/serial/index.rst
index 8f7d7af3b90b..7eb21a695fc3 100644
--- a/Documentation/driver-api/serial/index.rst
+++ b/Documentation/driver-api/serial/index.rst
@@ -9,7 +9,6 @@ Support for Serial devices
driver
- tty
Serial drivers
==============
diff --git a/Documentation/driver-api/serial/n_gsm.rst b/Documentation/driver-api/serial/n_gsm.rst
index 8fe723ab9c67..49956509ad73 100644
--- a/Documentation/driver-api/serial/n_gsm.rst
+++ b/Documentation/driver-api/serial/n_gsm.rst
@@ -18,9 +18,12 @@ How to use it
1.1 initialize the modem in 0710 mux mode (usually AT+CMUX= command) through
its serial port. Depending on the modem used, you can pass more or less
parameters to this command.
+
1.2 switch the serial line to using the n_gsm line discipline by using
TIOCSETD ioctl.
+
1.3 configure the mux using GSMIOC_GETCONF / GSMIOC_SETCONF ioctl.
+
1.4 obtain base gsmtty number for the used serial port.
Major parts of the initialization program :
@@ -95,10 +98,13 @@ Major parts of the initialization program :
2.1 receive string "AT+CMUX= command" through its serial port,initialize
mux mode config
+
2.2 switch the serial line to using the n_gsm line discipline by using
TIOCSETD ioctl.
+
2.3 configure the mux using GSMIOC_GETCONF / GSMIOC_SETCONF ioctl.
-2.4 obtain base gsmtty number for the used serial port,
+
+2.4 obtain base gsmtty number for the used serial port::
#include <stdio.h>
#include <stdint.h>
diff --git a/Documentation/driver-api/serial/tty.rst b/Documentation/driver-api/serial/tty.rst
deleted file mode 100644
index 4b709f392713..000000000000
--- a/Documentation/driver-api/serial/tty.rst
+++ /dev/null
@@ -1,328 +0,0 @@
-=================
-The Lockronomicon
-=================
-
-Your guide to the ancient and twisted locking policies of the tty layer and
-the warped logic behind them. Beware all ye who read on.
-
-
-Line Discipline
----------------
-
-Line disciplines are registered with tty_register_ldisc() passing the
-discipline number and the ldisc structure. At the point of registration the
-discipline must be ready to use and it is possible it will get used before
-the call returns success. If the call returns an error then it won't get
-called. Do not re-use ldisc numbers as they are part of the userspace ABI
-and writing over an existing ldisc will cause demons to eat your computer.
-After the return the ldisc data has been copied so you may free your own
-copy of the structure. You must not re-register over the top of the line
-discipline even with the same data or your computer again will be eaten by
-demons.
-
-In order to remove a line discipline call tty_unregister_ldisc().
-In ancient times this always worked. In modern times the function will
-return -EBUSY if the ldisc is currently in use. Since the ldisc referencing
-code manages the module counts this should not usually be a concern.
-
-Heed this warning: the reference count field of the registered copies of the
-tty_ldisc structure in the ldisc table counts the number of lines using this
-discipline. The reference count of the tty_ldisc structure within a tty
-counts the number of active users of the ldisc at this instant. In effect it
-counts the number of threads of execution within an ldisc method (plus those
-about to enter and exit although this detail matters not).
-
-Line Discipline Methods
------------------------
-
-TTY side interfaces
-^^^^^^^^^^^^^^^^^^^
-
-======================= =======================================================
-open() Called when the line discipline is attached to
- the terminal. No other call into the line
- discipline for this tty will occur until it
- completes successfully. Should initialize any
- state needed by the ldisc, and set receive_room
- in the tty_struct to the maximum amount of data
- the line discipline is willing to accept from the
- driver with a single call to receive_buf().
- Returning an error will prevent the ldisc from
- being attached. Can sleep.
-
-close() This is called on a terminal when the line
- discipline is being unplugged. At the point of
- execution no further users will enter the
- ldisc code for this tty. Can sleep.
-
-hangup() Called when the tty line is hung up.
- The line discipline should cease I/O to the tty.
- No further calls into the ldisc code will occur.
- Can sleep.
-
-read() (optional) A process requests reading data from
- the line. Multiple read calls may occur in parallel
- and the ldisc must deal with serialization issues.
- If not defined, the process will receive an EIO
- error. May sleep.
-
-write() (optional) A process requests writing data to the
- line. Multiple write calls are serialized by the
- tty layer for the ldisc. If not defined, the
- process will receive an EIO error. May sleep.
-
-flush_buffer() (optional) May be called at any point between
- open and close, and instructs the line discipline
- to empty its input buffer.
-
-set_termios() (optional) Called on termios structure changes.
- The caller passes the old termios data and the
- current data is in the tty. Called under the
- termios semaphore so allowed to sleep. Serialized
- against itself only.
-
-poll() (optional) Check the status for the poll/select
- calls. Multiple poll calls may occur in parallel.
- May sleep.
-
-ioctl() (optional) Called when an ioctl is handed to the
- tty layer that might be for the ldisc. Multiple
- ioctl calls may occur in parallel. May sleep.
-
-compat_ioctl() (optional) Called when a 32 bit ioctl is handed
- to the tty layer that might be for the ldisc.
- Multiple ioctl calls may occur in parallel.
- May sleep.
-======================= =======================================================
-
-Driver Side Interfaces
-^^^^^^^^^^^^^^^^^^^^^^
-
-======================= =======================================================
-receive_buf() (optional) Called by the low-level driver to hand
- a buffer of received bytes to the ldisc for
- processing. The number of bytes is guaranteed not
- to exceed the current value of tty->receive_room.
- All bytes must be processed.
-
-receive_buf2() (optional) Called by the low-level driver to hand
- a buffer of received bytes to the ldisc for
- processing. Returns the number of bytes processed.
-
- If both receive_buf() and receive_buf2() are
- defined, receive_buf2() should be preferred.
-
-write_wakeup() May be called at any point between open and close.
- The TTY_DO_WRITE_WAKEUP flag indicates if a call
- is needed but always races versus calls. Thus the
- ldisc must be careful about setting order and to
- handle unexpected calls. Must not sleep.
-
- The driver is forbidden from calling this directly
- from the ->write call from the ldisc as the ldisc
- is permitted to call the driver write method from
- this function. In such a situation defer it.
-
-dcd_change() Report to the tty line the current DCD pin status
- changes and the relative timestamp. The timestamp
- cannot be NULL.
-======================= =======================================================
-
-
-Driver Access
-^^^^^^^^^^^^^
-
-Line discipline methods can call the following methods of the underlying
-hardware driver through the function pointers within the tty->driver
-structure:
-
-======================= =======================================================
-write() Write a block of characters to the tty device.
- Returns the number of characters accepted. The
- character buffer passed to this method is already
- in kernel space.
-
-put_char() Queues a character for writing to the tty device.
- If there is no room in the queue, the character is
- ignored.
-
-flush_chars() (Optional) If defined, must be called after
- queueing characters with put_char() in order to
- start transmission.
-
-write_room() Returns the numbers of characters the tty driver
- will accept for queueing to be written.
-
-ioctl() Invoke device specific ioctl.
- Expects data pointers to refer to userspace.
- Returns ENOIOCTLCMD for unrecognized ioctl numbers.
-
-set_termios() Notify the tty driver that the device's termios
- settings have changed. New settings are in
- tty->termios. Previous settings should be passed in
- the "old" argument.
-
- The API is defined such that the driver should return
- the actual modes selected. This means that the
- driver function is responsible for modifying any
- bits in the request it cannot fulfill to indicate
- the actual modes being used. A device with no
- hardware capability for change (e.g. a USB dongle or
- virtual port) can provide NULL for this method.
-
-throttle() Notify the tty driver that input buffers for the
- line discipline are close to full, and it should
- somehow signal that no more characters should be
- sent to the tty.
-
-unthrottle() Notify the tty driver that characters can now be
- sent to the tty without fear of overrunning the
- input buffers of the line disciplines.
-
-stop() Ask the tty driver to stop outputting characters
- to the tty device.
-
-start() Ask the tty driver to resume sending characters
- to the tty device.
-
-hangup() Ask the tty driver to hang up the tty device.
-
-break_ctl() (Optional) Ask the tty driver to turn on or off
- BREAK status on the RS-232 port. If state is -1,
- then the BREAK status should be turned on; if
- state is 0, then BREAK should be turned off.
- If this routine is not implemented, use ioctls
- TIOCSBRK / TIOCCBRK instead.
-
-wait_until_sent() Waits until the device has written out all of the
- characters in its transmitter FIFO.
-
-send_xchar() Send a high-priority XON/XOFF character to the device.
-======================= =======================================================
-
-
-Flags
-^^^^^
-
-Line discipline methods have access to tty->flags field containing the
-following interesting flags:
-
-======================= =======================================================
-TTY_THROTTLED Driver input is throttled. The ldisc should call
- tty->driver->unthrottle() in order to resume
- reception when it is ready to process more data.
-
-TTY_DO_WRITE_WAKEUP If set, causes the driver to call the ldisc's
- write_wakeup() method in order to resume
- transmission when it can accept more data
- to transmit.
-
-TTY_IO_ERROR If set, causes all subsequent userspace read/write
- calls on the tty to fail, returning -EIO.
-
-TTY_OTHER_CLOSED Device is a pty and the other side has closed.
-
-TTY_NO_WRITE_SPLIT Prevent driver from splitting up writes into
- smaller chunks.
-======================= =======================================================
-
-
-Locking
-^^^^^^^
-
-Callers to the line discipline functions from the tty layer are required to
-take line discipline locks. The same is true of calls from the driver side
-but not yet enforced.
-
-Three calls are now provided::
-
- ldisc = tty_ldisc_ref(tty);
-
-takes a handle to the line discipline in the tty and returns it. If no ldisc
-is currently attached or the ldisc is being closed and re-opened at this
-point then NULL is returned. While this handle is held the ldisc will not
-change or go away::
-
- tty_ldisc_deref(ldisc)
-
-Returns the ldisc reference and allows the ldisc to be closed. Returning the
-reference takes away your right to call the ldisc functions until you take
-a new reference::
-
- ldisc = tty_ldisc_ref_wait(tty);
-
-Performs the same function as tty_ldisc_ref except that it will wait for an
-ldisc change to complete and then return a reference to the new ldisc.
-
-While these functions are slightly slower than the old code they should have
-minimal impact as most receive logic uses the flip buffers and they only
-need to take a reference when they push bits up through the driver.
-
-A caution: The ldisc->open(), ldisc->close() and driver->set_ldisc
-functions are called with the ldisc unavailable. Thus tty_ldisc_ref will
-fail in this situation if used within these functions. Ldisc and driver
-code calling its own functions must be careful in this case.
-
-
-Driver Interface
-----------------
-
-======================= =======================================================
-open() Called when a device is opened. May sleep
-
-close() Called when a device is closed. At the point of
- return from this call the driver must make no
- further ldisc calls of any kind. May sleep
-
-write() Called to write bytes to the device. May not
- sleep. May occur in parallel in special cases.
- Because this includes panic paths drivers generally
- shouldn't try and do clever locking here.
-
-put_char() Stuff a single character onto the queue. The
- driver is guaranteed following up calls to
- flush_chars.
-
-flush_chars() Ask the kernel to write put_char queue
-
-write_room() Return the number of characters that can be stuffed
- into the port buffers without overflow (or less).
- The ldisc is responsible for being intelligent
- about multi-threading of write_room/write calls
-
-ioctl() Called when an ioctl may be for the driver
-
-set_termios() Called on termios change, serialized against
- itself by a semaphore. May sleep.
-
-set_ldisc() Notifier for discipline change. At the point this
- is done the discipline is not yet usable. Can now
- sleep (I think)
-
-throttle() Called by the ldisc to ask the driver to do flow
- control. Serialization including with unthrottle
- is the job of the ldisc layer.
-
-unthrottle() Called by the ldisc to ask the driver to stop flow
- control.
-
-stop() Ldisc notifier to the driver to stop output. As with
- throttle the serializations with start() are down
- to the ldisc layer.
-
-start() Ldisc notifier to the driver to start output.
-
-hangup() Ask the tty driver to cause a hangup initiated
- from the host side. [Can sleep ??]
-
-break_ctl() Send RS232 break. Can sleep. Can get called in
- parallel, driver must serialize (for now), and
- with write calls.
-
-wait_until_sent() Wait for characters to exit the hardware queue
- of the driver. Can sleep
-
-send_xchar() Send XON/XOFF and if possible jump the queue with
- it in order to get fast flow control responses.
- Cannot sleep ??
-======================= =======================================================
diff --git a/Documentation/driver-api/usb/writing_usb_driver.rst b/Documentation/driver-api/usb/writing_usb_driver.rst
index b43e1ce49f0e..95c4f5d14052 100644
--- a/Documentation/driver-api/usb/writing_usb_driver.rst
+++ b/Documentation/driver-api/usb/writing_usb_driver.rst
@@ -94,8 +94,8 @@ usually in the driver's init function, as shown here::
/* register this driver with the USB subsystem */
result = usb_register(&skel_driver);
if (result < 0) {
- err("usb_register failed for the "__FILE__ "driver."
- "Error number %d", result);
+ pr_err("usb_register failed for the %s driver. Error number %d\n",
+ skel_driver.name, result);
return -1;
}
@@ -170,8 +170,8 @@ structure. This is done so that future calls to file operations will
enable the driver to determine which device the user is addressing. All
of this is done with the following code::
- /* increment our usage count for the module */
- ++skel->open_count;
+ /* increment our usage count for the device */
+ kref_get(&dev->kref);
/* save our object in the file's private structure */
file->private_data = dev;
@@ -188,24 +188,26 @@ space, points the urb to the data and submits the urb to the USB
subsystem. This can be seen in the following code::
/* we can only write as much as 1 urb will hold */
- bytes_written = (count > skel->bulk_out_size) ? skel->bulk_out_size : count;
+ size_t writesize = min_t(size_t, count, MAX_TRANSFER);
/* copy the data from user space into our urb */
- copy_from_user(skel->write_urb->transfer_buffer, buffer, bytes_written);
+ copy_from_user(buf, user_buffer, writesize);
/* set up our urb */
- usb_fill_bulk_urb(skel->write_urb,
- skel->dev,
- usb_sndbulkpipe(skel->dev, skel->bulk_out_endpointAddr),
- skel->write_urb->transfer_buffer,
- bytes_written,
+ usb_fill_bulk_urb(urb,
+ dev->udev,
+ usb_sndbulkpipe(dev->udev, dev->bulk_out_endpointAddr),
+ buf,
+ writesize,
skel_write_bulk_callback,
- skel);
+ dev);
/* send the data out the bulk port */
- result = usb_submit_urb(skel->write_urb);
- if (result) {
- err("Failed submitting write urb, error %d", result);
+ retval = usb_submit_urb(urb, GFP_KERNEL);
+ if (retval) {
+ dev_err(&dev->interface->dev,
+ "%s - failed submitting write urb, error %d\n",
+ __func__, retval);
}
diff --git a/Documentation/filesystems/caching/backend-api.rst b/Documentation/filesystems/caching/backend-api.rst
index 19fbf6b9aa36..be793c49a772 100644
--- a/Documentation/filesystems/caching/backend-api.rst
+++ b/Documentation/filesystems/caching/backend-api.rst
@@ -1,727 +1,479 @@
.. SPDX-License-Identifier: GPL-2.0
-==========================
-FS-Cache Cache backend API
-==========================
+=================
+Cache Backend API
+=================
The FS-Cache system provides an API by which actual caches can be supplied to
FS-Cache for it to then serve out to network filesystems and other interested
-parties.
+parties. This API is used by::
-This API is declared in <linux/fscache-cache.h>.
+ #include <linux/fscache-cache.h>.
-Initialising and Registering a Cache
-====================================
-
-To start off, a cache definition must be initialised and registered for each
-cache the backend wants to make available. For instance, CacheFS does this in
-the fill_super() operation on mounting.
-
-The cache definition (struct fscache_cache) should be initialised by calling::
-
- void fscache_init_cache(struct fscache_cache *cache,
- struct fscache_cache_ops *ops,
- const char *idfmt,
- ...);
-
-Where:
-
- * "cache" is a pointer to the cache definition;
-
- * "ops" is a pointer to the table of operations that the backend supports on
- this cache; and
-
- * "idfmt" is a format and printf-style arguments for constructing a label
- for the cache.
-
-
-The cache should then be registered with FS-Cache by passing a pointer to the
-previously initialised cache definition to::
-
- int fscache_add_cache(struct fscache_cache *cache,
- struct fscache_object *fsdef,
- const char *tagname);
-
-Two extra arguments should also be supplied:
-
- * "fsdef" which should point to the object representation for the FS-Cache
- master index in this cache. Netfs primary index entries will be created
- here. FS-Cache keeps the caller's reference to the index object if
- successful and will release it upon withdrawal of the cache.
-
- * "tagname" which, if given, should be a text string naming this cache. If
- this is NULL, the identifier will be used instead. For CacheFS, the
- identifier is set to name the underlying block device and the tag can be
- supplied by mount.
-
-This function may return -ENOMEM if it ran out of memory or -EEXIST if the tag
-is already in use. 0 will be returned on success.
-
-
-Unregistering a Cache
-=====================
-
-A cache can be withdrawn from the system by calling this function with a
-pointer to the cache definition::
-
- void fscache_withdraw_cache(struct fscache_cache *cache);
-
-In CacheFS's case, this is called by put_super().
-
-
-Security
+Overview
========
-The cache methods are executed one of two contexts:
-
- (1) that of the userspace process that issued the netfs operation that caused
- the cache method to be invoked, or
-
- (2) that of one of the processes in the FS-Cache thread pool.
-
-In either case, this may not be an appropriate context in which to access the
-cache.
-
-The calling process's fsuid, fsgid and SELinux security identities may need to
-be masqueraded for the duration of the cache driver's access to the cache.
-This is left to the cache to handle; FS-Cache makes no effort in this regard.
-
+Interaction with the API is handled on three levels: cache, volume and data
+storage, and each level has its own type of cookie object:
-Control and Statistics Presentation
-===================================
+ ======================= =======================
+ COOKIE C TYPE
+ ======================= =======================
+ Cache cookie struct fscache_cache
+ Volume cookie struct fscache_volume
+ Data storage cookie struct fscache_cookie
+ ======================= =======================
-The cache may present data to the outside world through FS-Cache's interfaces
-in sysfs and procfs - the former for control and the latter for statistics.
+Cookies are used to provide some filesystem data to the cache, manage state and
+pin the cache during access in addition to acting as reference points for the
+API functions. Each cookie has a debugging ID that is included in trace points
+to make it easier to correlate traces. Note, though, that debugging IDs are
+simply allocated from incrementing counters and will eventually wrap.
-A sysfs directory called /sys/fs/fscache/<cachetag>/ is created if CONFIG_SYSFS
-is enabled. This is accessible through the kobject struct fscache_cache::kobj
-and is for use by the cache as it sees fit.
+The cache backend and the network filesystem can both ask for cache cookies -
+and if they ask for one of the same name, they'll get the same cookie. Volume
+and data cookies, however, are created at the behest of the filesystem only.
-Relevant Data Structures
-========================
+Cache Cookies
+=============
- * Index/Data file FS-Cache representation cookie::
+Caches are represented in the API by cache cookies. These are objects of
+type::
- struct fscache_cookie {
- struct fscache_object_def *def;
- struct fscache_netfs *netfs;
- void *netfs_data;
- ...
- };
-
- The fields that might be of use to the backend describe the object
- definition, the netfs definition and the netfs's data for this cookie.
- The object definition contain functions supplied by the netfs for loading
- and matching index entries; these are required to provide some of the
- cache operations.
-
-
- * In-cache object representation::
-
- struct fscache_object {
- int debug_id;
- enum {
- FSCACHE_OBJECT_RECYCLING,
- ...
- } state;
- spinlock_t lock
- struct fscache_cache *cache;
- struct fscache_cookie *cookie;
+ struct fscache_cache {
+ void *cache_priv;
+ unsigned int debug_id;
+ char *name;
...
};
- Structures of this type should be allocated by the cache backend and
- passed to FS-Cache when requested by the appropriate cache operation. In
- the case of CacheFS, they're embedded in CacheFS's internal object
- structures.
+There are a few fields that the cache backend might be interested in. The
+``debug_id`` can be used in tracing to match lines referring to the same cache
+and ``name`` is the name the cache was registered with. The ``cache_priv``
+member is private data provided by the cache when it is brought online. The
+other fields are for internal use.
- The debug_id is a simple integer that can be used in debugging messages
- that refer to a particular object. In such a case it should be printed
- using "OBJ%x" to be consistent with FS-Cache.
- Each object contains a pointer to the cookie that represents the object it
- is backing. An object should retired when put_object() is called if it is
- in state FSCACHE_OBJECT_RECYCLING. The fscache_object struct should be
- initialised by calling fscache_object_init(object).
+Registering a Cache
+===================
+When a cache backend wants to bring a cache online, it should first register
+the cache name and that will get it a cache cookie. This is done with::
- * FS-Cache operation record::
+ struct fscache_cache *fscache_acquire_cache(const char *name);
- struct fscache_operation {
- atomic_t usage;
- struct fscache_object *object;
- unsigned long flags;
- #define FSCACHE_OP_EXCLUSIVE
- void (*processor)(struct fscache_operation *op);
- void (*release)(struct fscache_operation *op);
- ...
- };
+This will look up and potentially create a cache cookie. The cache cookie may
+have already been created by a network filesystem looking for it, in which case
+that cache cookie will be used. If the cache cookie is not in use by another
+cache, it will be moved into the preparing state, otherwise it will return
+busy.
- FS-Cache has a pool of threads that it uses to give CPU time to the
- various asynchronous operations that need to be done as part of driving
- the cache. These are represented by the above structure. The processor
- method is called to give the op CPU time, and the release method to get
- rid of it when its usage count reaches 0.
+If successful, the cache backend can then start setting up the cache. In the
+event that the initialisation fails, the cache backend should call::
- An operation can be made exclusive upon an object by setting the
- appropriate flag before enqueuing it with fscache_enqueue_operation(). If
- an operation needs more processing time, it should be enqueued again.
+ void fscache_relinquish_cookie(struct fscache_cache *cache);
+to reset and discard the cookie.
- * FS-Cache retrieval operation record::
- struct fscache_retrieval {
- struct fscache_operation op;
- struct address_space *mapping;
- struct list_head *to_do;
- ...
- };
+Bringing a Cache Online
+=======================
- A structure of this type is allocated by FS-Cache to record retrieval and
- allocation requests made by the netfs. This struct is then passed to the
- backend to do the operation. The backend may get extra refs to it by
- calling fscache_get_retrieval() and refs may be discarded by calling
- fscache_put_retrieval().
+Once the cache is set up, it can be brought online by calling::
- A retrieval operation can be used by the backend to do retrieval work. To
- do this, the retrieval->op.processor method pointer should be set
- appropriately by the backend and fscache_enqueue_retrieval() called to
- submit it to the thread pool. CacheFiles, for example, uses this to queue
- page examination when it detects PG_lock being cleared.
+ int fscache_add_cache(struct fscache_cache *cache,
+ const struct fscache_cache_ops *ops,
+ void *cache_priv);
- The to_do field is an empty list available for the cache backend to use as
- it sees fit.
+This stores the cache operations table pointer and cache private data into the
+cache cookie and moves the cache to the active state, thereby allowing accesses
+to take place.
- * FS-Cache storage operation record::
+Withdrawing a Cache From Service
+================================
- struct fscache_storage {
- struct fscache_operation op;
- pgoff_t store_limit;
- ...
- };
+The cache backend can withdraw a cache from service by calling this function::
- A structure of this type is allocated by FS-Cache to record outstanding
- writes to be made. FS-Cache itself enqueues this operation and invokes
- the write_page() method on the object at appropriate times to effect
- storage.
+ void fscache_withdraw_cache(struct fscache_cache *cache);
+This moves the cache to the withdrawn state to prevent new cache- and
+volume-level accesses from starting and then waits for outstanding cache-level
+accesses to complete.
-Cache Operations
-================
+The cache must then go through the data storage objects it has and tell fscache
+to withdraw them, calling::
-The cache backend provides FS-Cache with a table of operations that can be
-performed on the denizens of the cache. These are held in a structure of type:
+ void fscache_withdraw_cookie(struct fscache_cookie *cookie);
- ::
+on the cookie that each object belongs to. This schedules the specified cookie
+for withdrawal. This gets offloaded to a workqueue. The cache backend can
+test for completion by calling::
- struct fscache_cache_ops
+ bool fscache_are_objects_withdrawn(struct fscache_cookie *cache);
- * Name of cache provider [mandatory]::
+Once all the cookies are withdrawn, a cache backend can withdraw all the
+volumes, calling::
- const char *name
+ void fscache_withdraw_volume(struct fscache_volume *volume);
- This isn't strictly an operation, but should be pointed at a string naming
- the backend.
+to tell fscache that a volume has been withdrawn. This waits for all
+outstanding accesses on the volume to complete before returning.
+When the the cache is completely withdrawn, fscache should be notified by
+calling::
- * Allocate a new object [mandatory]::
+ void fscache_cache_relinquish(struct fscache_cache *cache);
- struct fscache_object *(*alloc_object)(struct fscache_cache *cache,
- struct fscache_cookie *cookie)
+to clear fields in the cookie and discard the caller's ref on it.
- This method is used to allocate a cache object representation to back a
- cookie in a particular cache. fscache_object_init() should be called on
- the object to initialise it prior to returning.
- This function may also be used to parse the index key to be used for
- multiple lookup calls to turn it into a more convenient form. FS-Cache
- will call the lookup_complete() method to allow the cache to release the
- form once lookup is complete or aborted.
+Volume Cookies
+==============
+Within a cache, the data storage objects are organised into logical volumes.
+These are represented in the API as objects of type::
- * Look up and create object [mandatory]::
+ struct fscache_volume {
+ struct fscache_cache *cache;
+ void *cache_priv;
+ unsigned int debug_id;
+ char *key;
+ unsigned int key_hash;
+ ...
+ u8 coherency_len;
+ u8 coherency[];
+ };
- void (*lookup_object)(struct fscache_object *object)
+There are a number of fields here that are of interest to the caching backend:
- This method is used to look up an object, given that the object is already
- allocated and attached to the cookie. This should instantiate that object
- in the cache if it can.
+ * ``cache`` - The parent cache cookie.
- The method should call fscache_object_lookup_negative() as soon as
- possible if it determines the object doesn't exist in the cache. If the
- object is found to exist and the netfs indicates that it is valid then
- fscache_obtained_object() should be called once the object is in a
- position to have data stored in it. Similarly, fscache_obtained_object()
- should also be called once a non-present object has been created.
+ * ``cache_priv`` - A place for the cache to stash private data.
- If a lookup error occurs, fscache_object_lookup_error() should be called
- to abort the lookup of that object.
+ * ``debug_id`` - A debugging ID for logging in tracepoints.
+ * ``key`` - A printable string with no '/' characters in it that represents
+ the index key for the volume. The key is NUL-terminated and padded out to
+ a multiple of 4 bytes.
- * Release lookup data [mandatory]::
+ * ``key_hash`` - A hash of the index key. This should work out the same, no
+ matter the cpu arch and endianness.
- void (*lookup_complete)(struct fscache_object *object)
+ * ``coherency`` - A piece of coherency data that should be checked when the
+ volume is bound to in the cache.
- This method is called to ask the cache to release any resources it was
- using to perform a lookup.
+ * ``coherency_len`` - The amount of data in the coherency buffer.
- * Increment object refcount [mandatory]::
+Data Storage Cookies
+====================
- struct fscache_object *(*grab_object)(struct fscache_object *object)
+A volume is a logical group of data storage objects, each of which is
+represented to the network filesystem by a cookie. Cookies are represented in
+the API as objects of type::
- This method is called to increment the reference count on an object. It
- may fail (for instance if the cache is being withdrawn) by returning NULL.
- It should return the object pointer if successful.
+ struct fscache_cookie {
+ struct fscache_volume *volume;
+ void *cache_priv;
+ unsigned long flags;
+ unsigned int debug_id;
+ unsigned int inval_counter;
+ loff_t object_size;
+ u8 advice;
+ u32 key_hash;
+ u8 key_len;
+ u8 aux_len;
+ ...
+ };
+The fields in the cookie that are of interest to the cache backend are:
- * Lock/Unlock object [mandatory]::
+ * ``volume`` - The parent volume cookie.
- void (*lock_object)(struct fscache_object *object)
- void (*unlock_object)(struct fscache_object *object)
+ * ``cache_priv`` - A place for the cache to stash private data.
- These methods are used to exclusively lock an object. It must be possible
- to schedule with the lock held, so a spinlock isn't sufficient.
+ * ``flags`` - A collection of bit flags, including:
+ * FSCACHE_COOKIE_NO_DATA_TO_READ - There is no data available in the
+ cache to be read as the cookie has been created or invalidated.
- * Pin/Unpin object [optional]::
+ * FSCACHE_COOKIE_NEEDS_UPDATE - The coherency data and/or object size has
+ been changed and needs committing.
- int (*pin_object)(struct fscache_object *object)
- void (*unpin_object)(struct fscache_object *object)
+ * FSCACHE_COOKIE_LOCAL_WRITE - The netfs's data has been modified
+ locally, so the cache object may be in an incoherent state with respect
+ to the server.
- These methods are used to pin an object into the cache. Once pinned an
- object cannot be reclaimed to make space. Return -ENOSPC if there's not
- enough space in the cache to permit this.
+ * FSCACHE_COOKIE_HAVE_DATA - The backend should set this if it
+ successfully stores data into the cache.
+ * FSCACHE_COOKIE_RETIRED - The cookie was invalidated when it was
+ relinquished and the cached data should be discarded.
- * Check coherency state of an object [mandatory]::
+ * ``debug_id`` - A debugging ID for logging in tracepoints.
- int (*check_consistency)(struct fscache_object *object)
+ * ``inval_counter`` - The number of invalidations done on the cookie.
- This method is called to have the cache check the saved auxiliary data of
- the object against the netfs's idea of the state. 0 should be returned
- if they're consistent and -ESTALE otherwise. -ENOMEM and -ERESTARTSYS
- may also be returned.
+ * ``advice`` - Information about how the cookie is to be used.
- * Update object [mandatory]::
+ * ``key_hash`` - A hash of the index key. This should work out the same, no
+ matter the cpu arch and endianness.
- int (*update_object)(struct fscache_object *object)
+ * ``key_len`` - The length of the index key.
- This is called to update the index entry for the specified object. The
- new information should be in object->cookie->netfs_data. This can be
- obtained by calling object->cookie->def->get_aux()/get_attr().
+ * ``aux_len`` - The length of the coherency data buffer.
+Each cookie has an index key, which may be stored inline to the cookie or
+elsewhere. A pointer to this can be obtained by calling::
- * Invalidate data object [mandatory]::
+ void *fscache_get_key(struct fscache_cookie *cookie);
- int (*invalidate_object)(struct fscache_operation *op)
+The index key is a binary blob, the storage for which is padded out to a
+multiple of 4 bytes.
- This is called to invalidate a data object (as pointed to by op->object).
- All the data stored for this object should be discarded and an
- attr_changed operation should be performed. The caller will follow up
- with an object update operation.
+Each cookie also has a buffer for coherency data. This may also be inline or
+detached from the cookie and a pointer is obtained by calling::
- fscache_op_complete() must be called on op before returning.
+ void *fscache_get_aux(struct fscache_cookie *cookie);
- * Discard object [mandatory]::
- void (*drop_object)(struct fscache_object *object)
+Cookie Accounting
+=================
- This method is called to indicate that an object has been unbound from its
- cookie, and that the cache should release the object's resources and
- retire it if it's in state FSCACHE_OBJECT_RECYCLING.
+Data storage cookies are counted and this is used to block cache withdrawal
+completion until all objects have been destroyed. The following functions are
+provided to the cache to deal with that::
- This method should not attempt to release any references held by the
- caller. The caller will invoke the put_object() method as appropriate.
+ void fscache_count_object(struct fscache_cache *cache);
+ void fscache_uncount_object(struct fscache_cache *cache);
+ void fscache_wait_for_objects(struct fscache_cache *cache);
+The count function records the allocation of an object in a cache and the
+uncount function records its destruction. Warning: by the time the uncount
+function returns, the cache may have been destroyed.
- * Release object reference [mandatory]::
+The wait function can be used during the withdrawal procedure to wait for
+fscache to finish withdrawing all the objects in the cache. When it completes,
+there will be no remaining objects referring to the cache object or any volume
+objects.
- void (*put_object)(struct fscache_object *object)
- This method is used to discard a reference to an object. The object may
- be freed when all the references to it are released.
+Cache Management API
+====================
+The cache backend implements the cache management API by providing a table of
+operations that fscache can use to manage various aspects of the cache. These
+are held in a structure of type::
- * Synchronise a cache [mandatory]::
+ struct fscache_cache_ops {
+ const char *name;
+ ...
+ };
- void (*sync)(struct fscache_cache *cache)
+This contains a printable name for the cache backend driver plus a number of
+pointers to methods to allow fscache to request management of the cache:
- This is called to ask the backend to synchronise a cache with its backing
- device.
+ * Set up a volume cookie [optional]::
+ void (*acquire_volume)(struct fscache_volume *volume);
- * Dissociate a cache [mandatory]::
+ This method is called when a volume cookie is being created. The caller
+ holds a cache-level access pin to prevent the cache from going away for
+ the duration. This method should set up the resources to access a volume
+ in the cache and should not return until it has done so.
- void (*dissociate_pages)(struct fscache_cache *cache)
+ If successful, it can set ``cache_priv`` to its own data.
- This is called to ask a cache to perform any page dissociations as part of
- cache withdrawal.
+ * Clean up volume cookie [optional]::
- * Notification that the attributes on a netfs file changed [mandatory]::
+ void (*free_volume)(struct fscache_volume *volume);
- int (*attr_changed)(struct fscache_object *object);
+ This method is called when a volume cookie is being released if
+ ``cache_priv`` is set.
- This is called to indicate to the cache that certain attributes on a netfs
- file have changed (for example the maximum size a file may reach). The
- cache can read these from the netfs by calling the cookie's get_attr()
- method.
- The cache may use the file size information to reserve space on the cache.
- It should also call fscache_set_store_limit() to indicate to FS-Cache the
- highest byte it's willing to store for an object.
+ * Look up a cookie in the cache [mandatory]::
- This method may return -ve if an error occurred or the cache object cannot
- be expanded. In such a case, the object will be withdrawn from service.
+ bool (*lookup_cookie)(struct fscache_cookie *cookie);
- This operation is run asynchronously from FS-Cache's thread pool, and
- storage and retrieval operations from the netfs are excluded during the
- execution of this operation.
+ This method is called to look up/create the resources needed to access the
+ data storage for a cookie. It is called from a worker thread with a
+ volume-level access pin in the cache to prevent it from being withdrawn.
+ True should be returned if successful and false otherwise. If false is
+ returned, the withdraw_cookie op (see below) will be called.
- * Reserve cache space for an object's data [optional]::
+ If lookup fails, but the object could still be created (e.g. it hasn't
+ been cached before), then::
- int (*reserve_space)(struct fscache_object *object, loff_t size);
+ void fscache_cookie_lookup_negative(
+ struct fscache_cookie *cookie);
- This is called to request that cache space be reserved to hold the data
- for an object and the metadata used to track it. Zero size should be
- taken as request to cancel a reservation.
+ can be called to let the network filesystem proceed and start downloading
+ stuff whilst the cache backend gets on with the job of creating things.
- This should return 0 if successful, -ENOSPC if there isn't enough space
- available, or -ENOMEM or -EIO on other errors.
+ If successful, ``cookie->cache_priv`` can be set.
- The reservation may exceed the current size of the object, thus permitting
- future expansion. If the amount of space consumed by an object would
- exceed the reservation, it's permitted to refuse requests to allocate
- pages, but not required. An object may be pruned down to its reservation
- size if larger than that already.
+ * Withdraw an object without any cookie access counts held [mandatory]::
- * Request page be read from cache [mandatory]::
+ void (*withdraw_cookie)(struct fscache_cookie *cookie);
- int (*read_or_alloc_page)(struct fscache_retrieval *op,
- struct page *page,
- gfp_t gfp)
+ This method is called to withdraw a cookie from service. It will be
+ called when the cookie is relinquished by the netfs, withdrawn or culled
+ by the cache backend or closed after a period of non-use by fscache.
- This is called to attempt to read a netfs page from the cache, or to
- reserve a backing block if not. FS-Cache will have done as much checking
- as it can before calling, but most of the work belongs to the backend.
+ The caller doesn't hold any access pins, but it is called from a
+ non-reentrant work item to manage races between the various ways
+ withdrawal can occur.
- If there's no page in the cache, then -ENODATA should be returned if the
- backend managed to reserve a backing block; -ENOBUFS or -ENOMEM if it
- didn't.
+ The cookie will have the ``FSCACHE_COOKIE_RETIRED`` flag set on it if the
+ associated data is to be removed from the cache.
- If there is suitable data in the cache, then a read operation should be
- queued and 0 returned. When the read finishes, fscache_end_io() should be
- called.
- The fscache_mark_pages_cached() should be called for the page if any cache
- metadata is retained. This will indicate to the netfs that the page needs
- explicit uncaching. This operation takes a pagevec, thus allowing several
- pages to be marked at once.
+ * Change the size of a data storage object [mandatory]::
- The retrieval record pointed to by op should be retained for each page
- queued and released when I/O on the page has been formally ended.
- fscache_get/put_retrieval() are available for this purpose.
+ void (*resize_cookie)(struct netfs_cache_resources *cres,
+ loff_t new_size);
- The retrieval record may be used to get CPU time via the FS-Cache thread
- pool. If this is desired, the op->op.processor should be set to point to
- the appropriate processing routine, and fscache_enqueue_retrieval() should
- be called at an appropriate point to request CPU time. For instance, the
- retrieval routine could be enqueued upon the completion of a disk read.
- The to_do field in the retrieval record is provided to aid in this.
+ This method is called to inform the cache backend of a change in size of
+ the netfs file due to local truncation. The cache backend should make all
+ of the changes it needs to make before returning as this is done under the
+ netfs inode mutex.
- If an I/O error occurs, fscache_io_error() should be called and -ENOBUFS
- returned if possible or fscache_end_io() called with a suitable error
- code.
+ The caller holds a cookie-level access pin to prevent a race with
+ withdrawal and the netfs must have the cookie marked in-use to prevent
+ garbage collection or culling from removing any resources.
- fscache_put_retrieval() should be called after a page or pages are dealt
- with. This will complete the operation when all pages are dealt with.
+ * Invalidate a data storage object [mandatory]::
- * Request pages be read from cache [mandatory]::
+ bool (*invalidate_cookie)(struct fscache_cookie *cookie);
- int (*read_or_alloc_pages)(struct fscache_retrieval *op,
- struct list_head *pages,
- unsigned *nr_pages,
- gfp_t gfp)
+ This is called when the network filesystem detects a third-party
+ modification or when an O_DIRECT write is made locally. This requests
+ that the cache backend should throw away all the data in the cache for
+ this object and start afresh. It should return true if successful and
+ false otherwise.
- This is like the read_or_alloc_page() method, except it is handed a list
- of pages instead of one page. Any pages on which a read operation is
- started must be added to the page cache for the specified mapping and also
- to the LRU. Such pages must also be removed from the pages list and
- ``*nr_pages`` decremented per page.
+ On entry, new I O/operations are blocked. Once the cache is in a position
+ to accept I/O again, the backend should release the block by calling::
- If there was an error such as -ENOMEM, then that should be returned; else
- if one or more pages couldn't be read or allocated, then -ENOBUFS should
- be returned; else if one or more pages couldn't be read, then -ENODATA
- should be returned. If all the pages are dispatched then 0 should be
- returned.
+ void fscache_resume_after_invalidation(struct fscache_cookie *cookie);
+ If the method returns false, caching will be withdrawn for this cookie.
- * Request page be allocated in the cache [mandatory]::
- int (*allocate_page)(struct fscache_retrieval *op,
- struct page *page,
- gfp_t gfp)
+ * Prepare to make local modifications to the cache [mandatory]::
- This is like the read_or_alloc_page() method, except that it shouldn't
- read from the cache, even if there's data there that could be retrieved.
- It should, however, set up any internal metadata required such that
- the write_page() method can write to the cache.
+ void (*prepare_to_write)(struct fscache_cookie *cookie);
- If there's no backing block available, then -ENOBUFS should be returned
- (or -ENOMEM if there were other problems). If a block is successfully
- allocated, then the netfs page should be marked and 0 returned.
+ This method is called when the network filesystem finds that it is going
+ to need to modify the contents of the cache due to local writes or
+ truncations. This gives the cache a chance to note that a cache object
+ may be incoherent with respect to the server and may need writing back
+ later. This may also cause the cached data to be scrapped on later
+ rebinding if not properly committed.
- * Request pages be allocated in the cache [mandatory]::
+ * Begin an operation for the netfs lib [mandatory]::
- int (*allocate_pages)(struct fscache_retrieval *op,
- struct list_head *pages,
- unsigned *nr_pages,
- gfp_t gfp)
+ bool (*begin_operation)(struct netfs_cache_resources *cres,
+ enum fscache_want_state want_state);
- This is an multiple page version of the allocate_page() method. pages and
- nr_pages should be treated as for the read_or_alloc_pages() method.
+ This method is called when an I/O operation is being set up (read, write
+ or resize). The caller holds an access pin on the cookie and must have
+ marked the cookie as in-use.
+ If it can, the backend should attach any resources it needs to keep around
+ to the netfs_cache_resources object and return true.
- * Request page be written to cache [mandatory]::
+ If it can't complete the setup, it should return false.
- int (*write_page)(struct fscache_storage *op,
- struct page *page);
+ The want_state parameter indicates the state the caller needs the cache
+ object to be in and what it wants to do during the operation:
- This is called to write from a page on which there was a previously
- successful read_or_alloc_page() call or similar. FS-Cache filters out
- pages that don't have mappings.
+ * ``FSCACHE_WANT_PARAMS`` - The caller just wants to access cache
+ object parameters; it doesn't need to do data I/O yet.
- This method is called asynchronously from the FS-Cache thread pool. It is
- not required to actually store anything, provided -ENODATA is then
- returned to the next read of this page.
+ * ``FSCACHE_WANT_READ`` - The caller wants to read data.
- If an error occurred, then a negative error code should be returned,
- otherwise zero should be returned. FS-Cache will take appropriate action
- in response to an error, such as withdrawing this object.
+ * ``FSCACHE_WANT_WRITE`` - The caller wants to write to or resize the
+ cache object.
- If this method returns success then FS-Cache will inform the netfs
- appropriately.
+ Note that there won't necessarily be anything attached to the cookie's
+ cache_priv yet if the cookie is still being created.
- * Discard retained per-page metadata [mandatory]::
+Data I/O API
+============
- void (*uncache_page)(struct fscache_object *object, struct page *page)
+A cache backend provides a data I/O API by through the netfs library's ``struct
+netfs_cache_ops`` attached to a ``struct netfs_cache_resources`` by the
+``begin_operation`` method described above.
- This is called when a netfs page is being evicted from the pagecache. The
- cache backend should tear down any internal representation or tracking it
- maintains for this page.
+See the Documentation/filesystems/netfs_library.rst for a description.
-FS-Cache Utilities
-==================
+Miscellaneous Functions
+=======================
FS-Cache provides some utilities that a cache backend may make use of:
* Note occurrence of an I/O error in a cache::
- void fscache_io_error(struct fscache_cache *cache)
+ void fscache_io_error(struct fscache_cache *cache);
- This tells FS-Cache that an I/O error occurred in the cache. After this
- has been called, only resource dissociation operations (object and page
- release) will be passed from the netfs to the cache backend for the
- specified cache.
+ This tells FS-Cache that an I/O error occurred in the cache. This
+ prevents any new I/O from being started on the cache.
This does not actually withdraw the cache. That must be done separately.
+ * Note cessation of caching on a cookie due to failure::
- * Invoke the retrieval I/O completion function::
-
- void fscache_end_io(struct fscache_retrieval *op, struct page *page,
- int error);
-
- This is called to note the end of an attempt to retrieve a page. The
- error value should be 0 if successful and an error otherwise.
-
-
- * Record that one or more pages being retrieved or allocated have been dealt
- with::
-
- void fscache_retrieval_complete(struct fscache_retrieval *op,
- int n_pages);
-
- This is called to record the fact that one or more pages have been dealt
- with and are no longer the concern of this operation. When the number of
- pages remaining in the operation reaches 0, the operation will be
- completed.
-
-
- * Record operation completion::
-
- void fscache_op_complete(struct fscache_operation *op);
-
- This is called to record the completion of an operation. This deducts
- this operation from the parent object's run state, potentially permitting
- one or more pending operations to start running.
-
-
- * Set highest store limit::
-
- void fscache_set_store_limit(struct fscache_object *object,
- loff_t i_size);
-
- This sets the limit FS-Cache imposes on the highest byte it's willing to
- try and store for a netfs. Any page over this limit is automatically
- rejected by fscache_read_alloc_page() and co with -ENOBUFS.
-
-
- * Mark pages as being cached::
-
- void fscache_mark_pages_cached(struct fscache_retrieval *op,
- struct pagevec *pagevec);
-
- This marks a set of pages as being cached. After this has been called,
- the netfs must call fscache_uncache_page() to unmark the pages.
-
-
- * Perform coherency check on an object::
-
- enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
- const void *data,
- uint16_t datalen);
-
- This asks the netfs to perform a coherency check on an object that has
- just been looked up. The cookie attached to the object will determine the
- netfs to use. data and datalen should specify where the auxiliary data
- retrieved from the cache can be found.
-
- One of three values will be returned:
-
- FSCACHE_CHECKAUX_OKAY
- The coherency data indicates the object is valid as is.
-
- FSCACHE_CHECKAUX_NEEDS_UPDATE
- The coherency data needs updating, but otherwise the object is
- valid.
-
- FSCACHE_CHECKAUX_OBSOLETE
- The coherency data indicates that the object is obsolete and should
- be discarded.
-
-
- * Initialise a freshly allocated object::
-
- void fscache_object_init(struct fscache_object *object);
-
- This initialises all the fields in an object representation.
-
-
- * Indicate the destruction of an object::
-
- void fscache_object_destroyed(struct fscache_cache *cache);
-
- This must be called to inform FS-Cache that an object that belonged to a
- cache has been destroyed and deallocated. This will allow continuation
- of the cache withdrawal process when it is stopped pending destruction of
- all the objects.
-
-
- * Indicate negative lookup on an object::
-
- void fscache_object_lookup_negative(struct fscache_object *object);
-
- This is called to indicate to FS-Cache that a lookup process for an object
- found a negative result.
-
- This changes the state of an object to permit reads pending on lookup
- completion to go off and start fetching data from the netfs server as it's
- known at this point that there can't be any data in the cache.
-
- This may be called multiple times on an object. Only the first call is
- significant - all subsequent calls are ignored.
-
-
- * Indicate an object has been obtained::
-
- void fscache_obtained_object(struct fscache_object *object);
-
- This is called to indicate to FS-Cache that a lookup process for an object
- produced a positive result, or that an object was created. This should
- only be called once for any particular object.
-
- This changes the state of an object to indicate:
-
- (1) if no call to fscache_object_lookup_negative() has been made on
- this object, that there may be data available, and that reads can
- now go and look for it; and
-
- (2) that writes may now proceed against this object.
-
-
- * Indicate that object lookup failed::
-
- void fscache_object_lookup_error(struct fscache_object *object);
-
- This marks an object as having encountered a fatal error (usually EIO)
- and causes it to move into a state whereby it will be withdrawn as soon
- as possible.
-
-
- * Indicate that a stale object was found and discarded::
-
- void fscache_object_retrying_stale(struct fscache_object *object);
-
- This is called to indicate that the lookup procedure found an object in
- the cache that the netfs decided was stale. The object has been
- discarded from the cache and the lookup will be performed again.
-
-
- * Indicate that the caching backend killed an object::
-
- void fscache_object_mark_killed(struct fscache_object *object,
- enum fscache_why_object_killed why);
-
- This is called to indicate that the cache backend preemptively killed an
- object. The why parameter should be set to indicate the reason:
+ void fscache_caching_failed(struct fscache_cookie *cookie);
- FSCACHE_OBJECT_IS_STALE
- - the object was stale and needs discarding.
+ This notes that a the caching that was being done on a cookie failed in
+ some way, for instance the backing storage failed to be created or
+ invalidation failed and that no further I/O operations should take place
+ on it until the cache is reset.
- FSCACHE_OBJECT_NO_SPACE
- - there was insufficient cache space
+ * Count I/O requests::
- FSCACHE_OBJECT_WAS_RETIRED
- - the object was retired when relinquished.
+ void fscache_count_read(void);
+ void fscache_count_write(void);
- FSCACHE_OBJECT_WAS_CULLED
- - the object was culled to make space.
+ These record reads and writes from/to the cache. The numbers are
+ displayed in /proc/fs/fscache/stats.
+ * Count out-of-space errors::
- * Get and release references on a retrieval record::
+ void fscache_count_no_write_space(void);
+ void fscache_count_no_create_space(void);
- void fscache_get_retrieval(struct fscache_retrieval *op);
- void fscache_put_retrieval(struct fscache_retrieval *op);
+ These record ENOSPC errors in the cache, divided into failures of data
+ writes and failures of filesystem object creations (e.g. mkdir).
- These two functions are used to retain a retrieval record while doing
- asynchronous data retrieval and block allocation.
+ * Count objects culled::
+ void fscache_count_culled(void);
- * Enqueue a retrieval record for processing::
+ This records the culling of an object.
- void fscache_enqueue_retrieval(struct fscache_retrieval *op);
+ * Get the cookie from a set of cache resources::
- This enqueues a retrieval record for processing by the FS-Cache thread
- pool. One of the threads in the pool will invoke the retrieval record's
- op->op.processor callback function. This function may be called from
- within the callback function.
+ struct fscache_cookie *fscache_cres_cookie(struct netfs_cache_resources *cres)
+ Pull a pointer to the cookie from the cache resources. This may return a
+ NULL cookie if no cookie was set.
- * List of object state names::
- const char *fscache_object_states[];
+API Function Reference
+======================
- For debugging purposes, this may be used to turn the state that an object
- is in into a text string for display purposes.
+.. kernel-doc:: include/linux/fscache-cache.h
diff --git a/Documentation/filesystems/caching/cachefiles.rst b/Documentation/filesystems/caching/cachefiles.rst
index e58bc1fd312a..8bf396b76359 100644
--- a/Documentation/filesystems/caching/cachefiles.rst
+++ b/Documentation/filesystems/caching/cachefiles.rst
@@ -1,8 +1,8 @@
.. SPDX-License-Identifier: GPL-2.0
-===============================================
-CacheFiles: CACHE ON ALREADY MOUNTED FILESYSTEM
-===============================================
+===================================
+Cache on Already Mounted Filesystem
+===================================
.. Contents:
diff --git a/Documentation/filesystems/caching/fscache.rst b/Documentation/filesystems/caching/fscache.rst
index 70de86922b6a..a74d7b052dc1 100644
--- a/Documentation/filesystems/caching/fscache.rst
+++ b/Documentation/filesystems/caching/fscache.rst
@@ -10,25 +10,25 @@ Overview
This facility is a general purpose cache for network filesystems, though it
could be used for caching other things such as ISO9660 filesystems too.
-FS-Cache mediates between cache backends (such as CacheFS) and network
+FS-Cache mediates between cache backends (such as CacheFiles) and network
filesystems::
+---------+
- | | +--------------+
- | NFS |--+ | |
- | | | +-->| CacheFS |
- +---------+ | +----------+ | | /dev/hda5 |
- | | | | +--------------+
- +---------+ +-->| | |
- | | | |--+
- | AFS |----->| FS-Cache |
- | | | |--+
- +---------+ +-->| | |
- | | | | +--------------+
- +---------+ | +----------+ | | |
- | | | +-->| CacheFiles |
- | ISOFS |--+ | /var/cache |
- | | +--------------+
+ | | +--------------+
+ | NFS |--+ | |
+ | | | +-->| CacheFS |
+ +---------+ | +----------+ | | /dev/hda5 |
+ | | | | +--------------+
+ +---------+ +-------------->| | |
+ | | +-------+ | |--+
+ | AFS |----->| | | FS-Cache |
+ | | | netfs |-->| |--+
+ +---------+ +-->| lib | | | |
+ | | | | | | +--------------+
+ +---------+ | +-------+ +----------+ | | |
+ | | | +-->| CacheFiles |
+ | 9P |--+ | /var/cache |
+ | | +--------------+
+---------+
Or to look at it another way, FS-Cache is a module that provides a caching
@@ -84,101 +84,62 @@ then serving the pages out of that cache rather than the netfs inode because:
one-off access of a small portion of it (such as might be done with the
"file" program).
-It instead serves the cache out in PAGE_SIZE chunks as and when requested by
-the netfs('s) using it.
+It instead serves the cache out in chunks as and when requested by the netfs
+using it.
FS-Cache provides the following facilities:
- (1) More than one cache can be used at once. Caches can be selected
+ * More than one cache can be used at once. Caches can be selected
explicitly by use of tags.
- (2) Caches can be added / removed at any time.
+ * Caches can be added / removed at any time, even whilst being accessed.
- (3) The netfs is provided with an interface that allows either party to
+ * The netfs is provided with an interface that allows either party to
withdraw caching facilities from a file (required for (2)).
- (4) The interface to the netfs returns as few errors as possible, preferring
+ * The interface to the netfs returns as few errors as possible, preferring
rather to let the netfs remain oblivious.
- (5) Cookies are used to represent indices, files and other objects to the
- netfs. The simplest cookie is just a NULL pointer - indicating nothing
- cached there.
-
- (6) The netfs is allowed to propose - dynamically - any index hierarchy it
- desires, though it must be aware that the index search function is
- recursive, stack space is limited, and indices can only be children of
- indices.
-
- (7) Data I/O is done direct to and from the netfs's pages. The netfs
- indicates that page A is at index B of the data-file represented by cookie
- C, and that it should be read or written. The cache backend may or may
- not start I/O on that page, but if it does, a netfs callback will be
- invoked to indicate completion. The I/O may be either synchronous or
- asynchronous.
-
- (8) Cookies can be "retired" upon release. At this point FS-Cache will mark
- them as obsolete and the index hierarchy rooted at that point will get
- recycled.
-
- (9) The netfs provides a "match" function for index searches. In addition to
- saying whether a match was made or not, this can also specify that an
- entry should be updated or deleted.
-
-(10) As much as possible is done asynchronously.
-
-
-FS-Cache maintains a virtual indexing tree in which all indices, files, objects
-and pages are kept. Bits of this tree may actually reside in one or more
-caches::
-
- FSDEF
- |
- +------------------------------------+
- | |
- NFS AFS
- | |
- +--------------------------+ +-----------+
- | | | |
- homedir mirror afs.org redhat.com
- | | |
- +------------+ +---------------+ +----------+
- | | | | | |
- 00001 00002 00007 00125 vol00001 vol00002
- | | | | |
- +---+---+ +-----+ +---+ +------+------+ +-----+----+
- | | | | | | | | | | | | |
- PG0 PG1 PG2 PG0 XATTR PG0 PG1 DIRENT DIRENT DIRENT R/W R/O Bak
- | |
- PG0 +-------+
- | |
- 00001 00003
- |
- +---+---+
- | | |
- PG0 PG1 PG2
-
-In the example above, you can see two netfs's being backed: NFS and AFS. These
-have different index hierarchies:
-
- * The NFS primary index contains per-server indices. Each server index is
- indexed by NFS file handles to get data file objects. Each data file
- objects can have an array of pages, but may also have further child
- objects, such as extended attributes and directory entries. Extended
- attribute objects themselves have page-array contents.
-
- * The AFS primary index contains per-cell indices. Each cell index contains
- per-logical-volume indices. Each of volume index contains up to three
- indices for the read-write, read-only and backup mirrors of those volumes.
- Each of these contains vnode data file objects, each of which contains an
- array of pages.
-
-The very top index is the FS-Cache master index in which individual netfs's
-have entries.
-
-Any index object may reside in more than one cache, provided it only has index
-children. Any index with non-index object children will be assumed to only
-reside in one cache.
+ * There are three types of cookie: cache, volume and data file cookies.
+ Cache cookies represent the cache as a whole and are not normally visible
+ to the netfs; the netfs gets a volume cookie to represent a collection of
+ files (typically something that a netfs would get for a superblock); and
+ data file cookies are used to cache data (something that would be got for
+ an inode).
+
+ * Volumes are matched using a key. This is a printable string that is used
+ to encode all the information that might be needed to distinguish one
+ superblock, say, from another. This would be a compound of things like
+ cell name or server address, volume name or share path. It must be a
+ valid pathname.
+
+ * Cookies are matched using a key. This is a binary blob and is used to
+ represent the object within a volume (so the volume key need not form
+ part of the blob). This might include things like an inode number and
+ uniquifier or a file handle.
+
+ * Cookie resources are set up and pinned by marking the cookie in-use.
+ This prevents the backing resources from being culled. Timed garbage
+ collection is employed to eliminate cookies that haven't been used for a
+ short while, thereby reducing resource overload. This is intended to be
+ used when a file is opened or closed.
+
+ A cookie can be marked in-use multiple times simultaneously; each mark
+ must be unused.
+
+ * Begin/end access functions are provided to delay cache withdrawal for the
+ duration of an operation and prevent structs from being freed whilst
+ we're looking at them.
+
+ * Data I/O is done by asynchronous DIO to/from a buffer described by the
+ netfs using an iov_iter.
+
+ * An invalidation facility is available to discard data from the cache and
+ to deal with I/O that's in progress that is accessing old data.
+
+ * Cookies can be "retired" upon release, thereby causing the object to be
+ removed from the cache.
The netfs API to FS-Cache can be found in:
@@ -189,11 +150,6 @@ The cache backend API to FS-Cache can be found in:
Documentation/filesystems/caching/backend-api.rst
-A description of the internal representations and object state machine can be
-found in:
-
- Documentation/filesystems/caching/object.rst
-
Statistical Information
=======================
@@ -201,333 +157,162 @@ Statistical Information
If FS-Cache is compiled with the following options enabled::
CONFIG_FSCACHE_STATS=y
- CONFIG_FSCACHE_HISTOGRAM=y
-then it will gather certain statistics and display them through a number of
-proc files.
+then it will gather certain statistics and display them through:
-/proc/fs/fscache/stats
-----------------------
+ /proc/fs/fscache/stats
- This shows counts of a number of events that can happen in FS-Cache:
+This shows counts of a number of events that can happen in FS-Cache:
+--------------+-------+-------------------------------------------------------+
|CLASS |EVENT |MEANING |
+==============+=======+=======================================================+
-|Cookies |idx=N |Number of index cookies allocated |
-+ +-------+-------------------------------------------------------+
-| |dat=N |Number of data storage cookies allocated |
+|Cookies |n=N |Number of data storage cookies allocated |
+ +-------+-------------------------------------------------------+
-| |spc=N |Number of special cookies allocated |
-+--------------+-------+-------------------------------------------------------+
-|Objects |alc=N |Number of objects allocated |
-+ +-------+-------------------------------------------------------+
-| |nal=N |Number of object allocation failures |
+| |v=N |Number of volume index cookies allocated |
+ +-------+-------------------------------------------------------+
-| |avl=N |Number of objects that reached the available state |
-+ +-------+-------------------------------------------------------+
-| |ded=N |Number of objects that reached the dead state |
-+--------------+-------+-------------------------------------------------------+
-|ChkAux |non=N |Number of objects that didn't have a coherency check |
+| |vcol=N |Number of volume index key collisions |
+ +-------+-------------------------------------------------------+
-| |ok=N |Number of objects that passed a coherency check |
-+ +-------+-------------------------------------------------------+
-| |upd=N |Number of objects that needed a coherency data update |
-+ +-------+-------------------------------------------------------+
-| |obs=N |Number of objects that were declared obsolete |
-+--------------+-------+-------------------------------------------------------+
-|Pages |mrk=N |Number of pages marked as being cached |
-| |unc=N |Number of uncache page requests seen |
+| |voom=N |Number of OOM events when allocating volume cookies |
+--------------+-------+-------------------------------------------------------+
|Acquire |n=N |Number of acquire cookie requests seen |
+ +-------+-------------------------------------------------------+
-| |nul=N |Number of acq reqs given a NULL parent |
-+ +-------+-------------------------------------------------------+
-| |noc=N |Number of acq reqs rejected due to no cache available |
-+ +-------+-------------------------------------------------------+
| |ok=N |Number of acq reqs succeeded |
+ +-------+-------------------------------------------------------+
-| |nbf=N |Number of acq reqs rejected due to error |
-+ +-------+-------------------------------------------------------+
| |oom=N |Number of acq reqs failed on ENOMEM |
+--------------+-------+-------------------------------------------------------+
-|Lookups |n=N |Number of lookup calls made on cache backends |
+|LRU |n=N |Number of cookies currently on the LRU |
+ +-------+-------------------------------------------------------+
-| |neg=N |Number of negative lookups made |
+| |exp=N |Number of cookies expired off of the LRU |
+ +-------+-------------------------------------------------------+
-| |pos=N |Number of positive lookups made |
+| |rmv=N |Number of cookies removed from the LRU |
+ +-------+-------------------------------------------------------+
-| |crt=N |Number of objects created by lookup |
+| |drp=N |Number of LRU'd cookies relinquished/withdrawn |
+ +-------+-------------------------------------------------------+
-| |tmo=N |Number of lookups timed out and requeued |
+| |at=N |Time till next LRU cull (jiffies) |
++--------------+-------+-------------------------------------------------------+
+|Invals |n=N |Number of invalidations |
+--------------+-------+-------------------------------------------------------+
|Updates |n=N |Number of update cookie requests seen |
+ +-------+-------------------------------------------------------+
-| |nul=N |Number of upd reqs given a NULL parent |
+| |rsz=N |Number of resize requests |
+ +-------+-------------------------------------------------------+
-| |run=N |Number of upd reqs granted CPU time |
+| |rsn=N |Number of skipped resize requests |
+--------------+-------+-------------------------------------------------------+
|Relinqs |n=N |Number of relinquish cookie requests seen |
+ +-------+-------------------------------------------------------+
-| |nul=N |Number of rlq reqs given a NULL parent |
+| |rtr=N |Number of rlq reqs with retire=true |
+ +-------+-------------------------------------------------------+
-| |wcr=N |Number of rlq reqs waited on completion of creation |
+| |drop=N |Number of cookies no longer blocking re-acquisition |
+--------------+-------+-------------------------------------------------------+
-|AttrChg |n=N |Number of attribute changed requests seen |
-+ +-------+-------------------------------------------------------+
-| |ok=N |Number of attr changed requests queued |
-+ +-------+-------------------------------------------------------+
-| |nbf=N |Number of attr changed rejected -ENOBUFS |
+|NoSpace |nwr=N |Number of write requests refused due to lack of space |
+ +-------+-------------------------------------------------------+
-| |oom=N |Number of attr changed failed -ENOMEM |
+| |ncr=N |Number of create requests refused due to lack of space |
+ +-------+-------------------------------------------------------+
-| |run=N |Number of attr changed ops given CPU time |
+| |cull=N |Number of objects culled to make space |
+--------------+-------+-------------------------------------------------------+
-|Allocs |n=N |Number of allocation requests seen |
+|IO |rd=N |Number of read operations in the cache |
+ +-------+-------------------------------------------------------+
-| |ok=N |Number of successful alloc reqs |
-+ +-------+-------------------------------------------------------+
-| |wt=N |Number of alloc reqs that waited on lookup completion |
-+ +-------+-------------------------------------------------------+
-| |nbf=N |Number of alloc reqs rejected -ENOBUFS |
-+ +-------+-------------------------------------------------------+
-| |int=N |Number of alloc reqs aborted -ERESTARTSYS |
-+ +-------+-------------------------------------------------------+
-| |ops=N |Number of alloc reqs submitted |
-+ +-------+-------------------------------------------------------+
-| |owt=N |Number of alloc reqs waited for CPU time |
-+ +-------+-------------------------------------------------------+
-| |abt=N |Number of alloc reqs aborted due to object death |
-+--------------+-------+-------------------------------------------------------+
-|Retrvls |n=N |Number of retrieval (read) requests seen |
-+ +-------+-------------------------------------------------------+
-| |ok=N |Number of successful retr reqs |
-+ +-------+-------------------------------------------------------+
-| |wt=N |Number of retr reqs that waited on lookup completion |
-+ +-------+-------------------------------------------------------+
-| |nod=N |Number of retr reqs returned -ENODATA |
-+ +-------+-------------------------------------------------------+
-| |nbf=N |Number of retr reqs rejected -ENOBUFS |
-+ +-------+-------------------------------------------------------+
-| |int=N |Number of retr reqs aborted -ERESTARTSYS |
-+ +-------+-------------------------------------------------------+
-| |oom=N |Number of retr reqs failed -ENOMEM |
-+ +-------+-------------------------------------------------------+
-| |ops=N |Number of retr reqs submitted |
-+ +-------+-------------------------------------------------------+
-| |owt=N |Number of retr reqs waited for CPU time |
-+ +-------+-------------------------------------------------------+
-| |abt=N |Number of retr reqs aborted due to object death |
-+--------------+-------+-------------------------------------------------------+
-|Stores |n=N |Number of storage (write) requests seen |
-+ +-------+-------------------------------------------------------+
-| |ok=N |Number of successful store reqs |
-+ +-------+-------------------------------------------------------+
-| |agn=N |Number of store reqs on a page already pending storage |
-+ +-------+-------------------------------------------------------+
-| |nbf=N |Number of store reqs rejected -ENOBUFS |
-+ +-------+-------------------------------------------------------+
-| |oom=N |Number of store reqs failed -ENOMEM |
-+ +-------+-------------------------------------------------------+
-| |ops=N |Number of store reqs submitted |
-+ +-------+-------------------------------------------------------+
-| |run=N |Number of store reqs granted CPU time |
-+ +-------+-------------------------------------------------------+
-| |pgs=N |Number of pages given store req processing time |
-+ +-------+-------------------------------------------------------+
-| |rxd=N |Number of store reqs deleted from tracking tree |
-+ +-------+-------------------------------------------------------+
-| |olm=N |Number of store reqs over store limit |
-+--------------+-------+-------------------------------------------------------+
-|VmScan |nos=N |Number of release reqs against pages with no |
-| | |pending store |
-+ +-------+-------------------------------------------------------+
-| |gon=N |Number of release reqs against pages stored by |
-| | |time lock granted |
-+ +-------+-------------------------------------------------------+
-| |bsy=N |Number of release reqs ignored due to in-progress store|
-+ +-------+-------------------------------------------------------+
-| |can=N |Number of page stores cancelled due to release req |
-+--------------+-------+-------------------------------------------------------+
-|Ops |pend=N |Number of times async ops added to pending queues |
-+ +-------+-------------------------------------------------------+
-| |run=N |Number of times async ops given CPU time |
-+ +-------+-------------------------------------------------------+
-| |enq=N |Number of times async ops queued for processing |
-+ +-------+-------------------------------------------------------+
-| |can=N |Number of async ops cancelled |
-+ +-------+-------------------------------------------------------+
-| |rej=N |Number of async ops rejected due to object |
-| | |lookup/create failure |
-+ +-------+-------------------------------------------------------+
-| |ini=N |Number of async ops initialised |
-+ +-------+-------------------------------------------------------+
-| |dfr=N |Number of async ops queued for deferred release |
-+ +-------+-------------------------------------------------------+
-| |rel=N |Number of async ops released |
-| | |(should equal ini=N when idle) |
-+ +-------+-------------------------------------------------------+
-| |gc=N |Number of deferred-release async ops garbage collected |
-+--------------+-------+-------------------------------------------------------+
-|CacheOp |alo=N |Number of in-progress alloc_object() cache ops |
-+ +-------+-------------------------------------------------------+
-| |luo=N |Number of in-progress lookup_object() cache ops |
-+ +-------+-------------------------------------------------------+
-| |luc=N |Number of in-progress lookup_complete() cache ops |
-+ +-------+-------------------------------------------------------+
-| |gro=N |Number of in-progress grab_object() cache ops |
-+ +-------+-------------------------------------------------------+
-| |upo=N |Number of in-progress update_object() cache ops |
-+ +-------+-------------------------------------------------------+
-| |dro=N |Number of in-progress drop_object() cache ops |
-+ +-------+-------------------------------------------------------+
-| |pto=N |Number of in-progress put_object() cache ops |
-+ +-------+-------------------------------------------------------+
-| |syn=N |Number of in-progress sync_cache() cache ops |
-+ +-------+-------------------------------------------------------+
-| |atc=N |Number of in-progress attr_changed() cache ops |
-+ +-------+-------------------------------------------------------+
-| |rap=N |Number of in-progress read_or_alloc_page() cache ops |
-+ +-------+-------------------------------------------------------+
-| |ras=N |Number of in-progress read_or_alloc_pages() cache ops |
-+ +-------+-------------------------------------------------------+
-| |alp=N |Number of in-progress allocate_page() cache ops |
-+ +-------+-------------------------------------------------------+
-| |als=N |Number of in-progress allocate_pages() cache ops |
-+ +-------+-------------------------------------------------------+
-| |wrp=N |Number of in-progress write_page() cache ops |
-+ +-------+-------------------------------------------------------+
-| |ucp=N |Number of in-progress uncache_page() cache ops |
-+ +-------+-------------------------------------------------------+
-| |dsp=N |Number of in-progress dissociate_pages() cache ops |
-+--------------+-------+-------------------------------------------------------+
-|CacheEv |nsp=N |Number of object lookups/creations rejected due to |
-| | |lack of space |
-+ +-------+-------------------------------------------------------+
-| |stl=N |Number of stale objects deleted |
-+ +-------+-------------------------------------------------------+
-| |rtr=N |Number of objects retired when relinquished |
-+ +-------+-------------------------------------------------------+
-| |cul=N |Number of objects culled |
+| |wr=N |Number of write operations in the cache |
+--------------+-------+-------------------------------------------------------+
+Netfslib will also add some stats counters of its own.
-/proc/fs/fscache/histogram
---------------------------
+Cache List
+==========
- ::
+FS-Cache provides a list of cache cookies:
- cat /proc/fs/fscache/histogram
- JIFS SECS OBJ INST OP RUNS OBJ RUNS RETRV DLY RETRIEVLS
- ===== ===== ========= ========= ========= ========= =========
+ /proc/fs/fscache/cookies
- This shows the breakdown of the number of times each amount of time
- between 0 jiffies and HZ-1 jiffies a variety of tasks took to run. The
- columns are as follows:
+This will look something like::
- ========= =======================================================
- COLUMN TIME MEASUREMENT
- ========= =======================================================
- OBJ INST Length of time to instantiate an object
- OP RUNS Length of time a call to process an operation took
- OBJ RUNS Length of time a call to process an object event took
- RETRV DLY Time between an requesting a read and lookup completing
- RETRIEVLS Time between beginning and end of a retrieval
- ========= =======================================================
+ # cat /proc/fs/fscache/caches
+ CACHE REF VOLS OBJS ACCES S NAME
+ ======== ===== ===== ===== ===== = ===============
+ 00000001 2 1 2123 1 A default
- Each row shows the number of events that took a particular range of times.
- Each step is 1 jiffy in size. The JIFS column indicates the particular
- jiffy range covered, and the SECS field the equivalent number of seconds.
+where the columns are:
+ ======= ===============================================================
+ COLUMN DESCRIPTION
+ ======= ===============================================================
+ CACHE Cache cookie debug ID (also appears in traces)
+ REF Number of references on the cache cookie
+ VOLS Number of volumes cookies in this cache
+ OBJS Number of cache objects in use
+ ACCES Number of accesses pinning the cache
+ S State
+ NAME Name of the cache.
+ ======= ===============================================================
+
+The state can be (-) Inactive, (P)reparing, (A)ctive, (E)rror or (W)ithdrawing.
-Object List
+Volume List
===========
-If CONFIG_FSCACHE_OBJECT_LIST is enabled, the FS-Cache facility will maintain a
-list of all the objects currently allocated and allow them to be viewed
-through::
+FS-Cache provides a list of volume cookies:
- /proc/fs/fscache/objects
+ /proc/fs/fscache/volumes
This will look something like::
- [root@andromeda ~]# head /proc/fs/fscache/objects
- OBJECT PARENT STAT CHLDN OPS OOP IPR EX READS EM EV F S | NETFS_COOKIE_DEF TY FL NETFS_DATA OBJECT_KEY, AUX_DATA
- ======== ======== ==== ===== === === === == ===== == == = = | ================ == == ================ ================
- 17e4b 2 ACTV 0 0 0 0 0 0 7b 4 0 0 | NFS.fh DT 0 ffff88001dd82820 010006017edcf8bbc93b43298fdfbe71e50b57b13a172c0117f38472, e567634700000000000000000000000063f2404a000000000000000000000000c9030000000000000000000063f2404a
- 1693a 2 ACTV 0 0 0 0 0 0 7b 4 0 0 | NFS.fh DT 0 ffff88002db23380 010006017edcf8bbc93b43298fdfbe71e50b57b1e0162c01a2df0ea6, 420ebc4a000000000000000000000000420ebc4a0000000000000000000000000e1801000000000000000000420ebc4a
+ VOLUME REF nCOOK ACC FL CACHE KEY
+ ======== ===== ===== === == =============== ================
+ 00000001 55 54 1 00 default afs,example.com,100058
-where the first set of columns before the '|' describe the object:
+where the columns are:
======= ===============================================================
COLUMN DESCRIPTION
======= ===============================================================
- OBJECT Object debugging ID (appears as OBJ%x in some debug messages)
- PARENT Debugging ID of parent object
- STAT Object state
- CHLDN Number of child objects of this object
- OPS Number of outstanding operations on this object
- OOP Number of outstanding child object management operations
- IPR
- EX Number of outstanding exclusive operations
- READS Number of outstanding read operations
- EM Object's event mask
- EV Events raised on this object
- F Object flags
- S Object work item busy state mask (1:pending 2:running)
+ VOLUME The volume cookie debug ID (also appears in traces)
+ REF Number of references on the volume cookie
+ nCOOK Number of cookies in the volume
+ ACC Number of accesses pinning the cache
+ FL Flags on the volume cookie
+ CACHE Name of the cache or "-"
+ KEY The indexing key for the volume
======= ===============================================================
-and the second set of columns describe the object's cookie, if present:
-
- ================ ======================================================
- COLUMN DESCRIPTION
- ================ ======================================================
- NETFS_COOKIE_DEF Name of netfs cookie definition
- TY Cookie type (IX - index, DT - data, hex - special)
- FL Cookie flags
- NETFS_DATA Netfs private data stored in the cookie
- OBJECT_KEY Object key } 1 column, with separating comma
- AUX_DATA Object aux data } presence may be configured
- ================ ======================================================
-
-The data shown may be filtered by attaching the a key to an appropriate keyring
-before viewing the file. Something like::
-
- keyctl add user fscache:objlist <restrictions> @s
-
-where <restrictions> are a selection of the following letters:
- == =========================================================
- K Show hexdump of object key (don't show if not given)
- A Show hexdump of object aux data (don't show if not given)
- == =========================================================
+Cookie List
+===========
-and the following paired letters:
+FS-Cache provides a list of cookies:
- == =========================================================
- C Show objects that have a cookie
- c Show objects that don't have a cookie
- B Show objects that are busy
- b Show objects that aren't busy
- W Show objects that have pending writes
- w Show objects that don't have pending writes
- R Show objects that have outstanding reads
- r Show objects that don't have outstanding reads
- S Show objects that have work queued
- s Show objects that don't have work queued
- == =========================================================
+ /proc/fs/fscache/cookies
-If neither side of a letter pair is given, then both are implied. For example:
+This will look something like::
- keyctl add user fscache:objlist KB @s
+ # head /proc/fs/fscache/cookies
+ COOKIE VOLUME REF ACT ACC S FL DEF
+ ======== ======== === === === = == ================
+ 00000435 00000001 1 0 -1 - 08 0000000201d080070000000000000000, 0000000000000000
+ 00000436 00000001 1 0 -1 - 00 0000005601d080080000000000000000, 0000000000000051
+ 00000437 00000001 1 0 -1 - 08 00023b3001d0823f0000000000000000, 0000000000000000
+ 00000438 00000001 1 0 -1 - 08 0000005801d0807b0000000000000000, 0000000000000000
+ 00000439 00000001 1 0 -1 - 08 00023b3201d080a10000000000000000, 0000000000000000
+ 0000043a 00000001 1 0 -1 - 08 00023b3401d080a30000000000000000, 0000000000000000
+ 0000043b 00000001 1 0 -1 - 08 00023b3601d080b30000000000000000, 0000000000000000
+ 0000043c 00000001 1 0 -1 - 08 00023b3801d080b40000000000000000, 0000000000000000
-shows objects that are busy, and lists their object keys, but does not dump
-their auxiliary data. It also implies "CcWwRrSs", but as 'B' is given, 'b' is
-not implied.
+where the columns are:
-By default all objects and all fields will be shown.
+ ======= ===============================================================
+ COLUMN DESCRIPTION
+ ======= ===============================================================
+ COOKIE The cookie debug ID (also appears in traces)
+ VOLUME The parent volume cookie debug ID
+ REF Number of references on the volume cookie
+ ACT Number of times the cookie is marked for in use
+ ACC Number of access pins in the cookie
+ S State of the cookie
+ FL Flags on the cookie
+ DEF Key, auxiliary data
+ ======= ===============================================================
Debugging
@@ -549,10 +334,8 @@ This is a bitmask of debugging streams to enable:
3 8 Cookie management Function entry trace
4 16 Function exit trace
5 32 General
- 6 64 Page handling Function entry trace
- 7 128 Function exit trace
- 8 256 General
- 9 512 Operation management Function entry trace
+ 6-8 (Not used)
+ 9 512 I/O operation management Function entry trace
10 1024 Function exit trace
11 2048 General
======= ======= =============================== =======================
@@ -560,6 +343,6 @@ This is a bitmask of debugging streams to enable:
The appropriate set of values should be OR'd together and the result written to
the control file. For example::
- echo $((1|8|64)) >/sys/module/fscache/parameters/debug
+ echo $((1|8|512)) >/sys/module/fscache/parameters/debug
will turn on all function entry debugging.
diff --git a/Documentation/filesystems/caching/index.rst b/Documentation/filesystems/caching/index.rst
index 033da7ac7c6e..df4307124b00 100644
--- a/Documentation/filesystems/caching/index.rst
+++ b/Documentation/filesystems/caching/index.rst
@@ -7,8 +7,6 @@ Filesystem Caching
:maxdepth: 2
fscache
- object
+ netfs-api
backend-api
cachefiles
- netfs-api
- operations
diff --git a/Documentation/filesystems/caching/netfs-api.rst b/Documentation/filesystems/caching/netfs-api.rst
index d9f14b8610ba..f84e9ffdf0b4 100644
--- a/Documentation/filesystems/caching/netfs-api.rst
+++ b/Documentation/filesystems/caching/netfs-api.rst
@@ -1,896 +1,452 @@
.. SPDX-License-Identifier: GPL-2.0
-===============================
-FS-Cache Network Filesystem API
-===============================
+==============================
+Network Filesystem Caching API
+==============================
-There's an API by which a network filesystem can make use of the FS-Cache
-facilities. This is based around a number of principles:
+Fscache provides an API by which a network filesystem can make use of local
+caching facilities. The API is arranged around a number of principles:
- (1) Caches can store a number of different object types. There are two main
- object types: indices and files. The first is a special type used by
- FS-Cache to make finding objects faster and to make retiring of groups of
- objects easier.
+ (1) A cache is logically organised into volumes and data storage objects
+ within those volumes.
- (2) Every index, file or other object is represented by a cookie. This cookie
- may or may not have anything associated with it, but the netfs doesn't
- need to care.
+ (2) Volumes and data storage objects are represented by various types of
+ cookie.
- (3) Barring the top-level index (one entry per cached netfs), the index
- hierarchy for each netfs is structured according the whim of the netfs.
+ (3) Cookies have keys that distinguish them from their peers.
-This API is declared in <linux/fscache.h>.
+ (4) Cookies have coherency data that allows a cache to determine if the
+ cached data is still valid.
-.. This document contains the following sections:
-
- (1) Network filesystem definition
- (2) Index definition
- (3) Object definition
- (4) Network filesystem (un)registration
- (5) Cache tag lookup
- (6) Index registration
- (7) Data file registration
- (8) Miscellaneous object registration
- (9) Setting the data file size
- (10) Page alloc/read/write
- (11) Page uncaching
- (12) Index and data file consistency
- (13) Cookie enablement
- (14) Miscellaneous cookie operations
- (15) Cookie unregistration
- (16) Index invalidation
- (17) Data file invalidation
- (18) FS-Cache specific page flags.
-
-
-Network Filesystem Definition
-=============================
-
-FS-Cache needs a description of the network filesystem. This is specified
-using a record of the following structure::
-
- struct fscache_netfs {
- uint32_t version;
- const char *name;
- struct fscache_cookie *primary_index;
- ...
- };
-
-This first two fields should be filled in before registration, and the third
-will be filled in by the registration function; any other fields should just be
-ignored and are for internal use only.
-
-The fields are:
-
- (1) The name of the netfs (used as the key in the toplevel index).
-
- (2) The version of the netfs (if the name matches but the version doesn't, the
- entire in-cache hierarchy for this netfs will be scrapped and begun
- afresh).
-
- (3) The cookie representing the primary index will be allocated according to
- another parameter passed into the registration function.
-
-For example, kAFS (linux/fs/afs/) uses the following definitions to describe
-itself::
-
- struct fscache_netfs afs_cache_netfs = {
- .version = 0,
- .name = "afs",
- };
-
-
-Index Definition
-================
-
-Indices are used for two purposes:
-
- (1) To aid the finding of a file based on a series of keys (such as AFS's
- "cell", "volume ID", "vnode ID").
-
- (2) To make it easier to discard a subset of all the files cached based around
- a particular key - for instance to mirror the removal of an AFS volume.
-
-However, since it's unlikely that any two netfs's are going to want to define
-their index hierarchies in quite the same way, FS-Cache tries to impose as few
-restraints as possible on how an index is structured and where it is placed in
-the tree. The netfs can even mix indices and data files at the same level, but
-it's not recommended.
-
-Each index entry consists of a key of indeterminate length plus some auxiliary
-data, also of indeterminate length.
-
-There are some limits on indices:
-
- (1) Any index containing non-index objects should be restricted to a single
- cache. Any such objects created within an index will be created in the
- first cache only. The cache in which an index is created can be
- controlled by cache tags (see below).
-
- (2) The entry data must be atomically journallable, so it is limited to about
- 400 bytes at present. At least 400 bytes will be available.
-
- (3) The depth of the index tree should be judged with care as the search
- function is recursive. Too many layers will run the kernel out of stack.
-
-
-Object Definition
-=================
-
-To define an object, a structure of the following type should be filled out::
-
- struct fscache_cookie_def
- {
- uint8_t name[16];
- uint8_t type;
-
- struct fscache_cache_tag *(*select_cache)(
- const void *parent_netfs_data,
- const void *cookie_netfs_data);
-
- enum fscache_checkaux (*check_aux)(void *cookie_netfs_data,
- const void *data,
- uint16_t datalen,
- loff_t object_size);
-
- void (*get_context)(void *cookie_netfs_data, void *context);
-
- void (*put_context)(void *cookie_netfs_data, void *context);
-
- void (*mark_pages_cached)(void *cookie_netfs_data,
- struct address_space *mapping,
- struct pagevec *cached_pvec);
- };
-
-This has the following fields:
-
- (1) The type of the object [mandatory].
-
- This is one of the following values:
-
- FSCACHE_COOKIE_TYPE_INDEX
- This defines an index, which is a special FS-Cache type.
-
- FSCACHE_COOKIE_TYPE_DATAFILE
- This defines an ordinary data file.
-
- Any other value between 2 and 255
- This defines an extraordinary object such as an XATTR.
-
- (2) The name of the object type (NUL terminated unless all 16 chars are used)
- [optional].
-
- (3) A function to select the cache in which to store an index [optional].
-
- This function is invoked when an index needs to be instantiated in a cache
- during the instantiation of a non-index object. Only the immediate index
- parent for the non-index object will be queried. Any indices above that
- in the hierarchy may be stored in multiple caches. This function does not
- need to be supplied for any non-index object or any index that will only
- have index children.
-
- If this function is not supplied or if it returns NULL then the first
- cache in the parent's list will be chosen, or failing that, the first
- cache in the master list.
-
- (4) A function to check the auxiliary data [optional].
-
- This function will be called to check that a match found in the cache for
- this object is valid. For instance with AFS it could check the auxiliary
- data against the data version number returned by the server to determine
- whether the index entry in a cache is still valid.
-
- If this function is absent, it will be assumed that matching objects in a
- cache are always valid.
-
- The function is also passed the cache's idea of the object size and may
- use this to manage coherency also.
-
- If present, the function should return one of the following values:
-
- FSCACHE_CHECKAUX_OKAY
- - the entry is okay as is
-
- FSCACHE_CHECKAUX_NEEDS_UPDATE
- - the entry requires update
-
- FSCACHE_CHECKAUX_OBSOLETE
- - the entry should be deleted
+ (5) I/O is done asynchronously where possible.
- This function can also be used to extract data from the auxiliary data in
- the cache and copy it into the netfs's structures.
+This API is used by::
- (5) A pair of functions to manage contexts for the completion callback
- [optional].
+ #include <linux/fscache.h>.
- The cache read/write functions are passed a context which is then passed
- to the I/O completion callback function. To ensure this context remains
- valid until after the I/O completion is called, two functions may be
- provided: one to get an extra reference on the context, and one to drop a
- reference to it.
-
- If the context is not used or is a type of object that won't go out of
- scope, then these functions are not required. These functions are not
- required for indices as indices may not contain data. These functions may
- be called in interrupt context and so may not sleep.
-
- (6) A function to mark a page as retaining cache metadata [optional].
-
- This is called by the cache to indicate that it is retaining in-memory
- information for this page and that the netfs should uncache the page when
- it has finished. This does not indicate whether there's data on the disk
- or not. Note that several pages at once may be presented for marking.
-
- The PG_fscache bit is set on the pages before this function would be
- called, so the function need not be provided if this is sufficient.
-
- This function is not required for indices as they're not permitted data.
-
- (7) A function to unmark all the pages retaining cache metadata [mandatory].
-
- This is called by FS-Cache to indicate that a backing store is being
- unbound from a cookie and that all the marks on the pages should be
- cleared to prevent confusion. Note that the cache will have torn down all
- its tracking information so that the pages don't need to be explicitly
- uncached.
-
- This function is not required for indices as they're not permitted data.
-
-
-Network Filesystem (Un)registration
-===================================
-
-The first step is to declare the network filesystem to the cache. This also
-involves specifying the layout of the primary index (for AFS, this would be the
-"cell" level).
-
-The registration function is::
-
- int fscache_register_netfs(struct fscache_netfs *netfs);
-
-It just takes a pointer to the netfs definition. It returns 0 or an error as
-appropriate.
-
-For kAFS, registration is done as follows::
-
- ret = fscache_register_netfs(&afs_cache_netfs);
-
-The last step is, of course, unregistration::
-
- void fscache_unregister_netfs(struct fscache_netfs *netfs);
-
-
-Cache Tag Lookup
-================
-
-FS-Cache permits the use of more than one cache. To permit particular index
-subtrees to be bound to particular caches, the second step is to look up cache
-representation tags. This step is optional; it can be left entirely up to
-FS-Cache as to which cache should be used. The problem with doing that is that
-FS-Cache will always pick the first cache that was registered.
-
-To get the representation for a named tag::
-
- struct fscache_cache_tag *fscache_lookup_cache_tag(const char *name);
-
-This takes a text string as the name and returns a representation of a tag. It
-will never return an error. It may return a dummy tag, however, if it runs out
-of memory; this will inhibit caching with this tag.
-
-Any representation so obtained must be released by passing it to this function::
-
- void fscache_release_cache_tag(struct fscache_cache_tag *tag);
+.. This document contains the following sections:
-The tag will be retrieved by FS-Cache when it calls the object definition
-operation select_cache().
+ (1) Overview
+ (2) Volume registration
+ (3) Data file registration
+ (4) Declaring a cookie to be in use
+ (5) Resizing a data file (truncation)
+ (6) Data I/O API
+ (7) Data file coherency
+ (8) Data file invalidation
+ (9) Write back resource management
+ (10) Caching of local modifications
+ (11) Page release and invalidation
+
+
+Overview
+========
+
+The fscache hierarchy is organised on two levels from a network filesystem's
+point of view. The upper level represents "volumes" and the lower level
+represents "data storage objects". These are represented by two types of
+cookie, hereafter referred to as "volume cookies" and "cookies".
+
+A network filesystem acquires a volume cookie for a volume using a volume key,
+which represents all the information that defines that volume (e.g. cell name
+or server address, volume ID or share name). This must be rendered as a
+printable string that can be used as a directory name (ie. no '/' characters
+and shouldn't begin with a '.'). The maximum name length is one less than the
+maximum size of a filename component (allowing the cache backend one char for
+its own purposes).
+
+A filesystem would typically have a volume cookie for each superblock.
+
+The filesystem then acquires a cookie for each file within that volume using an
+object key. Object keys are binary blobs and only need to be unique within
+their parent volume. The cache backend is reponsible for rendering the binary
+blob into something it can use and may employ hash tables, trees or whatever to
+improve its ability to find an object. This is transparent to the network
+filesystem.
+
+A filesystem would typically have a cookie for each inode, and would acquire it
+in iget and relinquish it when evicting the cookie.
+
+Once it has a cookie, the filesystem needs to mark the cookie as being in use.
+This causes fscache to send the cache backend off to look up/create resources
+for the cookie in the background, to check its coherency and, if necessary, to
+mark the object as being under modification.
+
+A filesystem would typically "use" the cookie in its file open routine and
+unuse it in file release and it needs to use the cookie around calls to
+truncate the cookie locally. It *also* needs to use the cookie when the
+pagecache becomes dirty and unuse it when writeback is complete. This is
+slightly tricky, and provision is made for it.
+
+When performing a read, write or resize on a cookie, the filesystem must first
+begin an operation. This copies the resources into a holding struct and puts
+extra pins into the cache to stop cache withdrawal from tearing down the
+structures being used. The actual operation can then be issued and conflicting
+invalidations can be detected upon completion.
+
+The filesystem is expected to use netfslib to access the cache, but that's not
+actually required and it can use the fscache I/O API directly.
+
+
+Volume Registration
+===================
+
+The first step for a network filsystem is to acquire a volume cookie for the
+volume it wants to access::
+
+ struct fscache_volume *
+ fscache_acquire_volume(const char *volume_key,
+ const char *cache_name,
+ const void *coherency_data,
+ size_t coherency_len);
+
+This function creates a volume cookie with the specified volume key as its name
+and notes the coherency data.
+
+The volume key must be a printable string with no '/' characters in it. It
+should begin with the name of the filesystem and should be no longer than 254
+characters. It should uniquely represent the volume and will be matched with
+what's stored in the cache.
+
+The caller may also specify the name of the cache to use. If specified,
+fscache will look up or create a cache cookie of that name and will use a cache
+of that name if it is online or comes online. If no cache name is specified,
+it will use the first cache that comes to hand and set the name to that.
+
+The specified coherency data is stored in the cookie and will be matched
+against coherency data stored on disk. The data pointer may be NULL if no data
+is provided. If the coherency data doesn't match, the entire cache volume will
+be invalidated.
+
+This function can return errors such as EBUSY if the volume key is already in
+use by an acquired volume or ENOMEM if an allocation failure occured. It may
+also return a NULL volume cookie if fscache is not enabled. It is safe to
+pass a NULL cookie to any function that takes a volume cookie. This will
+cause that function to do nothing.
+
+
+When the network filesystem has finished with a volume, it should relinquish it
+by calling::
+
+ void fscache_relinquish_volume(struct fscache_volume *volume,
+ const void *coherency_data,
+ bool invalidate);
+
+This will cause the volume to be committed or removed, and if sealed the
+coherency data will be set to the value supplied. The amount of coherency data
+must match the length specified when the volume was acquired. Note that all
+data cookies obtained in this volume must be relinquished before the volume is
+relinquished.
-Index Registration
-==================
+Data File Registration
+======================
-The third step is to inform FS-Cache about part of an index hierarchy that can
-be used to locate files. This is done by requesting a cookie for each index in
-the path to the file::
+Once it has a volume cookie, a network filesystem can use it to acquire a
+cookie for data storage::
struct fscache_cookie *
- fscache_acquire_cookie(struct fscache_cookie *parent,
- const struct fscache_object_def *def,
+ fscache_acquire_cookie(struct fscache_volume *volume,
+ u8 advice,
const void *index_key,
size_t index_key_len,
const void *aux_data,
size_t aux_data_len,
- void *netfs_data,
- loff_t object_size,
- bool enable);
+ loff_t object_size)
-This function creates an index entry in the index represented by parent,
-filling in the index entry by calling the operations pointed to by def.
+This creates the cookie in the volume using the specified index key. The index
+key is a binary blob of the given length and must be unique for the volume.
+This is saved into the cookie. There are no restrictions on the content, but
+its length shouldn't exceed about three quarters of the maximum filename length
+to allow for encoding.
-A unique key that represents the object within the parent must be pointed to by
-index_key and is of length index_key_len.
+The caller should also pass in a piece of coherency data in aux_data. A buffer
+of size aux_data_len will be allocated and the coherency data copied in. It is
+assumed that the size is invariant over time. The coherency data is used to
+check the validity of data in the cache. Functions are provided by which the
+coherency data can be updated.
-An optional blob of auxiliary data that is to be stored within the cache can be
-pointed to with aux_data and should be of length aux_data_len. This would
-typically be used for storing coherency data.
+The file size of the object being cached should also be provided. This may be
+used to trim the data and will be stored with the coherency data.
-The netfs may pass an arbitrary value in netfs_data and this will be presented
-to it in the event of any calling back. This may also be used in tracing or
-logging of messages.
+This function never returns an error, though it may return a NULL cookie on
+allocation failure or if fscache is not enabled. It is safe to pass in a NULL
+volume cookie and pass the NULL cookie returned to any function that takes it.
+This will cause that function to do nothing.
-The cache tracks the size of the data attached to an object and this set to be
-object_size. For indices, this should be 0. This value will be passed to the
-->check_aux() callback.
-Note that this function never returns an error - all errors are handled
-internally. It may, however, return NULL to indicate no cookie. It is quite
-acceptable to pass this token back to this function as the parent to another
-acquisition (or even to the relinquish cookie, read page and write page
-functions - see below).
+When the network filesystem has finished with a cookie, it should relinquish it
+by calling::
-Note also that no indices are actually created in a cache until a non-index
-object needs to be created somewhere down the hierarchy. Furthermore, an index
-may be created in several different caches independently at different times.
-This is all handled transparently, and the netfs doesn't see any of it.
+ void fscache_relinquish_cookie(struct fscache_cookie *cookie,
+ bool retire);
-A cookie will be created in the disabled state if enabled is false. A cookie
-must be enabled to do anything with it. A disabled cookie can be enabled by
-calling fscache_enable_cookie() (see below).
+This will cause fscache to either commit the storage backing the cookie or
+delete it.
-For example, with AFS, a cell would be added to the primary index. This index
-entry would have a dependent inode containing volume mappings within this cell::
- cell->cache =
- fscache_acquire_cookie(afs_cache_netfs.primary_index,
- &afs_cell_cache_index_def,
- cell->name, strlen(cell->name),
- NULL, 0,
- cell, 0, true);
+Marking A Cookie In-Use
+=======================
-And then a particular volume could be added to that index by ID, creating
-another index for vnodes (AFS inode equivalents)::
+Once a cookie has been acquired by a network filesystem, the filesystem should
+tell fscache when it intends to use the cookie (typically done on file open)
+and should say when it has finished with it (typically on file close)::
- volume->cache =
- fscache_acquire_cookie(volume->cell->cache,
- &afs_volume_cache_index_def,
- &volume->vid, sizeof(volume->vid),
- NULL, 0,
- volume, 0, true);
+ void fscache_use_cookie(struct fscache_cookie *cookie,
+ bool will_modify);
+ void fscache_unuse_cookie(struct fscache_cookie *cookie,
+ const void *aux_data,
+ const loff_t *object_size);
+The *use* function tells fscache that it will use the cookie and, additionally,
+indicate if the user is intending to modify the contents locally. If not yet
+done, this will trigger the cache backend to go and gather the resources it
+needs to access/store data in the cache. This is done in the background, and
+so may not be complete by the time the function returns.
-Data File Registration
-======================
+The *unuse* function indicates that a filesystem has finished using a cookie.
+It optionally updates the stored coherency data and object size and then
+decreases the in-use counter. When the last user unuses the cookie, it is
+scheduled for garbage collection. If not reused within a short time, the
+resources will be released to reduce system resource consumption.
-The fourth step is to request a data file be created in the cache. This is
-identical to index cookie acquisition. The only difference is that the type in
-the object definition should be something other than index type::
+A cookie must be marked in-use before it can be accessed for read, write or
+resize - and an in-use mark must be kept whilst there is dirty data in the
+pagecache in order to avoid an oops due to trying to open a file during process
+exit.
- vnode->cache =
- fscache_acquire_cookie(volume->cache,
- &afs_vnode_cache_object_def,
- &key, sizeof(key),
- &aux, sizeof(aux),
- vnode, vnode->status.size, true);
+Note that in-use marks are cumulative. For each time a cookie is marked
+in-use, it must be unused.
-Miscellaneous Object Registration
+Resizing A Data File (Truncation)
=================================
-An optional step is to request an object of miscellaneous type be created in
-the cache. This is almost identical to index cookie acquisition. The only
-difference is that the type in the object definition should be something other
-than index type. While the parent object could be an index, it's more likely
-it would be some other type of object such as a data file::
-
- xattr->cache =
- fscache_acquire_cookie(vnode->cache,
- &afs_xattr_cache_object_def,
- &xattr->name, strlen(xattr->name),
- NULL, 0,
- xattr, strlen(xattr->val), true);
-
-Miscellaneous objects might be used to store extended attributes or directory
-entries for example.
-
-
-Setting the Data File Size
-==========================
+If a network filesystem file is resized locally by truncation, the following
+should be called to notify the cache::
-The fifth step is to set the physical attributes of the file, such as its size.
-This doesn't automatically reserve any space in the cache, but permits the
-cache to adjust its metadata for data tracking appropriately::
+ void fscache_resize_cookie(struct fscache_cookie *cookie,
+ loff_t new_size);
- int fscache_attr_changed(struct fscache_cookie *cookie);
+The caller must have first marked the cookie in-use. The cookie and the new
+size are passed in and the cache is synchronously resized. This is expected to
+be called from ``->setattr()`` inode operation under the inode lock.
-The cache will return -ENOBUFS if there is no backing cache or if there is no
-space to allocate any extra metadata required in the cache.
-Note that attempts to read or write data pages in the cache over this size may
-be rebuffed with -ENOBUFS.
+Data I/O API
+============
-This operation schedules an attribute adjustment to happen asynchronously at
-some point in the future, and as such, it may happen after the function returns
-to the caller. The attribute adjustment excludes read and write operations.
+To do data I/O operations directly through a cookie, the following functions
+are available::
+ int fscache_begin_read_operation(struct netfs_cache_resources *cres,
+ struct fscache_cookie *cookie);
+ int fscache_read(struct netfs_cache_resources *cres,
+ loff_t start_pos,
+ struct iov_iter *iter,
+ enum netfs_read_from_hole read_hole,
+ netfs_io_terminated_t term_func,
+ void *term_func_priv);
+ int fscache_write(struct netfs_cache_resources *cres,
+ loff_t start_pos,
+ struct iov_iter *iter,
+ netfs_io_terminated_t term_func,
+ void *term_func_priv);
-Page alloc/read/write
-=====================
+The *begin* function sets up an operation, attaching the resources required to
+the cache resources block from the cookie. Assuming it doesn't return an error
+(for instance, it will return -ENOBUFS if given a NULL cookie, but otherwise do
+nothing), then one of the other two functions can be issued.
-And the sixth step is to store and retrieve pages in the cache. There are
-three functions that are used to do this.
+The *read* and *write* functions initiate a direct-IO operation. Both take the
+previously set up cache resources block, an indication of the start file
+position, and an I/O iterator that describes buffer and indicates the amount of
+data.
-Note:
+The read function also takes a parameter to indicate how it should handle a
+partially populated region (a hole) in the disk content. This may be to ignore
+it, skip over an initial hole and place zeros in the buffer or give an error.
- (1) A page should not be re-read or re-allocated without uncaching it first.
-
- (2) A read or allocated page must be uncached when the netfs page is released
- from the pagecache.
-
- (3) A page should only be written to the cache if previous read or allocated.
-
-This permits the cache to maintain its page tracking in proper order.
-
-
-PAGE READ
----------
-
-Firstly, the netfs should ask FS-Cache to examine the caches and read the
-contents cached for a particular page of a particular file if present, or else
-allocate space to store the contents if not::
+The read and write functions can be given an optional termination function that
+will be run on completion::
typedef
- void (*fscache_rw_complete_t)(struct page *page,
- void *context,
- int error);
-
- int fscache_read_or_alloc_page(struct fscache_cookie *cookie,
- struct page *page,
- fscache_rw_complete_t end_io_func,
- void *context,
- gfp_t gfp);
-
-The cookie argument must specify a cookie for an object that isn't an index,
-the page specified will have the data loaded into it (and is also used to
-specify the page number), and the gfp argument is used to control how any
-memory allocations made are satisfied.
-
-If the cookie indicates the inode is not cached:
-
- (1) The function will return -ENOBUFS.
-
-Else if there's a copy of the page resident in the cache:
-
- (1) The mark_pages_cached() cookie operation will be called on that page.
+ void (*netfs_io_terminated_t)(void *priv, ssize_t transferred_or_error,
+ bool was_async);
- (2) The function will submit a request to read the data from the cache's
- backing device directly into the page specified.
+If a termination function is given, the operation will be run asynchronously
+and the termination function will be called upon completion. If not given, the
+operation will be run synchronously. Note that in the asynchronous case, it is
+possible for the operation to complete before the function returns.
- (3) The function will return 0.
+Both the read and write functions end the operation when they complete,
+detaching any pinned resources.
- (4) When the read is complete, end_io_func() will be invoked with:
+The read operation will fail with ESTALE if invalidation occurred whilst the
+operation was ongoing.
- * The netfs data supplied when the cookie was created.
- * The page descriptor.
+Data File Coherency
+===================
- * The context argument passed to the above function. This will be
- maintained with the get_context/put_context functions mentioned above.
-
- * An argument that's 0 on success or negative for an error code.
-
- If an error occurs, it should be assumed that the page contains no usable
- data. fscache_readpages_cancel() may need to be called.
-
- end_io_func() will be called in process context if the read is results in
- an error, but it might be called in interrupt context if the read is
- successful.
-
-Otherwise, if there's not a copy available in cache, but the cache may be able
-to store the page:
-
- (1) The mark_pages_cached() cookie operation will be called on that page.
-
- (2) A block may be reserved in the cache and attached to the object at the
- appropriate place.
-
- (3) The function will return -ENODATA.
-
-This function may also return -ENOMEM or -EINTR, in which case it won't have
-read any data from the cache.
-
-
-Page Allocate
--------------
-
-Alternatively, if there's not expected to be any data in the cache for a page
-because the file has been extended, a block can simply be allocated instead::
-
- int fscache_alloc_page(struct fscache_cookie *cookie,
- struct page *page,
- gfp_t gfp);
-
-This is similar to the fscache_read_or_alloc_page() function, except that it
-never reads from the cache. It will return 0 if a block has been allocated,
-rather than -ENODATA as the other would. One or the other must be performed
-before writing to the cache.
-
-The mark_pages_cached() cookie operation will be called on the page if
-successful.
-
-
-Page Write
-----------
-
-Secondly, if the netfs changes the contents of the page (either due to an
-initial download or if a user performs a write), then the page should be
-written back to the cache::
-
- int fscache_write_page(struct fscache_cookie *cookie,
- struct page *page,
- loff_t object_size,
- gfp_t gfp);
-
-The cookie argument must specify a data file cookie, the page specified should
-contain the data to be written (and is also used to specify the page number),
-object_size is the revised size of the object and the gfp argument is used to
-control how any memory allocations made are satisfied.
-
-The page must have first been read or allocated successfully and must not have
-been uncached before writing is performed.
-
-If the cookie indicates the inode is not cached then:
-
- (1) The function will return -ENOBUFS.
-
-Else if space can be allocated in the cache to hold this page:
-
- (1) PG_fscache_write will be set on the page.
-
- (2) The function will submit a request to write the data to cache's backing
- device directly from the page specified.
-
- (3) The function will return 0.
-
- (4) When the write is complete PG_fscache_write is cleared on the page and
- anyone waiting for that bit will be woken up.
-
-Else if there's no space available in the cache, -ENOBUFS will be returned. It
-is also possible for the PG_fscache_write bit to be cleared when no write took
-place if unforeseen circumstances arose (such as a disk error).
-
-Writing takes place asynchronously.
-
-
-Multiple Page Read
-------------------
-
-A facility is provided to read several pages at once, as requested by the
-readpages() address space operation::
-
- int fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
- struct address_space *mapping,
- struct list_head *pages,
- int *nr_pages,
- fscache_rw_complete_t end_io_func,
- void *context,
- gfp_t gfp);
-
-This works in a similar way to fscache_read_or_alloc_page(), except:
-
- (1) Any page it can retrieve data for is removed from pages and nr_pages and
- dispatched for reading to the disk. Reads of adjacent pages on disk may
- be merged for greater efficiency.
-
- (2) The mark_pages_cached() cookie operation will be called on several pages
- at once if they're being read or allocated.
-
- (3) If there was an general error, then that error will be returned.
-
- Else if some pages couldn't be allocated or read, then -ENOBUFS will be
- returned.
-
- Else if some pages couldn't be read but were allocated, then -ENODATA will
- be returned.
-
- Otherwise, if all pages had reads dispatched, then 0 will be returned, the
- list will be empty and ``*nr_pages`` will be 0.
-
- (4) end_io_func will be called once for each page being read as the reads
- complete. It will be called in process context if error != 0, but it may
- be called in interrupt context if there is no error.
-
-Note that a return of -ENODATA, -ENOBUFS or any other error does not preclude
-some of the pages being read and some being allocated. Those pages will have
-been marked appropriately and will need uncaching.
-
-
-Cancellation of Unread Pages
-----------------------------
-
-If one or more pages are passed to fscache_read_or_alloc_pages() but not then
-read from the cache and also not read from the underlying filesystem then
-those pages will need to have any marks and reservations removed. This can be
-done by calling::
-
- void fscache_readpages_cancel(struct fscache_cookie *cookie,
- struct list_head *pages);
-
-prior to returning to the caller. The cookie argument should be as passed to
-fscache_read_or_alloc_pages(). Every page in the pages list will be examined
-and any that have PG_fscache set will be uncached.
-
-
-Page Uncaching
-==============
-
-To uncache a page, this function should be called::
-
- void fscache_uncache_page(struct fscache_cookie *cookie,
- struct page *page);
-
-This function permits the cache to release any in-memory representation it
-might be holding for this netfs page. This function must be called once for
-each page on which the read or write page functions above have been called to
-make sure the cache's in-memory tracking information gets torn down.
-
-Note that pages can't be explicitly deleted from the a data file. The whole
-data file must be retired (see the relinquish cookie function below).
-
-Furthermore, note that this does not cancel the asynchronous read or write
-operation started by the read/alloc and write functions, so the page
-invalidation functions must use::
-
- bool fscache_check_page_write(struct fscache_cookie *cookie,
- struct page *page);
-
-to see if a page is being written to the cache, and::
-
- void fscache_wait_on_page_write(struct fscache_cookie *cookie,
- struct page *page);
-
-to wait for it to finish if it is.
-
-
-When releasepage() is being implemented, a special FS-Cache function exists to
-manage the heuristics of coping with vmscan trying to eject pages, which may
-conflict with the cache trying to write pages to the cache (which may itself
-need to allocate memory)::
-
- bool fscache_maybe_release_page(struct fscache_cookie *cookie,
- struct page *page,
- gfp_t gfp);
-
-This takes the netfs cookie, and the page and gfp arguments as supplied to
-releasepage(). It will return false if the page cannot be released yet for
-some reason and if it returns true, the page has been uncached and can now be
-released.
-
-To make a page available for release, this function may wait for an outstanding
-storage request to complete, or it may attempt to cancel the storage request -
-in which case the page will not be stored in the cache this time.
-
-
-Bulk Image Page Uncache
------------------------
-
-A convenience routine is provided to perform an uncache on all the pages
-attached to an inode. This assumes that the pages on the inode correspond on a
-1:1 basis with the pages in the cache::
-
- void fscache_uncache_all_inode_pages(struct fscache_cookie *cookie,
- struct inode *inode);
-
-This takes the netfs cookie that the pages were cached with and the inode that
-the pages are attached to. This function will wait for pages to finish being
-written to the cache and for the cache to finish with the page generally. No
-error is returned.
-
-
-Index and Data File consistency
-===============================
-
-To find out whether auxiliary data for an object is up to data within the
-cache, the following function can be called::
-
- int fscache_check_consistency(struct fscache_cookie *cookie,
- const void *aux_data);
-
-This will call back to the netfs to check whether the auxiliary data associated
-with a cookie is correct; if aux_data is non-NULL, it will update the auxiliary
-data buffer first. It returns 0 if it is and -ESTALE if it isn't; it may also
-return -ENOMEM and -ERESTARTSYS.
-
-To request an update of the index data for an index or other object, the
-following function should be called::
+To request an update of the coherency data and file size on a cookie, the
+following should be called::
void fscache_update_cookie(struct fscache_cookie *cookie,
- const void *aux_data);
-
-This function will update the cookie's auxiliary data buffer from aux_data if
-that is non-NULL and then schedule this to be stored on disk. The update
-method in the parent index definition will be called to transfer the data.
-
-Note that partial updates may happen automatically at other times, such as when
-data blocks are added to a data file object.
-
-
-Cookie Enablement
-=================
-
-Cookies exist in one of two states: enabled and disabled. If a cookie is
-disabled, it ignores all attempts to acquire child cookies; check, update or
-invalidate its state; allocate, read or write backing pages - though it is
-still possible to uncache pages and relinquish the cookie.
-
-The initial enablement state is set by fscache_acquire_cookie(), but the cookie
-can be enabled or disabled later. To disable a cookie, call::
-
- void fscache_disable_cookie(struct fscache_cookie *cookie,
- const void *aux_data,
- bool invalidate);
-
-If the cookie is not already disabled, this locks the cookie against other
-enable and disable ops, marks the cookie as being disabled, discards or
-invalidates any backing objects and waits for cessation of activity on any
-associated object before unlocking the cookie.
-
-All possible failures are handled internally. The caller should consider
-calling fscache_uncache_all_inode_pages() afterwards to make sure all page
-markings are cleared up.
-
-Cookies can be enabled or reenabled with::
-
- void fscache_enable_cookie(struct fscache_cookie *cookie,
const void *aux_data,
- loff_t object_size,
- bool (*can_enable)(void *data),
- void *data)
-
-If the cookie is not already enabled, this locks the cookie against other
-enable and disable ops, invokes can_enable() and, if the cookie is not an index
-cookie, will begin the procedure of acquiring backing objects.
-
-The optional can_enable() function is passed the data argument and returns a
-ruling as to whether or not enablement should actually be permitted to begin.
+ const loff_t *object_size);
-All possible failures are handled internally. The cookie will only be marked
-as enabled if provisional backing objects are allocated.
+This will update the cookie's coherency data and/or file size.
-The object's data size is updated from object_size and is passed to the
-->check_aux() function.
-In both cases, the cookie's auxiliary data buffer is updated from aux_data if
-that is non-NULL inside the enablement lock before proceeding.
-
-
-Miscellaneous Cookie operations
-===============================
+Data File Invalidation
+======================
-There are a number of operations that can be used to control cookies:
+Sometimes it will be necessary to invalidate an object that contains data.
+Typically this will be necessary when the server informs the network filesystem
+of a remote third-party change - at which point the filesystem has to throw
+away the state and cached data that it had for an file and reload from the
+server.
- * Cookie pinning::
+To indicate that a cache object should be invalidated, the following should be
+called::
- int fscache_pin_cookie(struct fscache_cookie *cookie);
- void fscache_unpin_cookie(struct fscache_cookie *cookie);
+ void fscache_invalidate(struct fscache_cookie *cookie,
+ const void *aux_data,
+ loff_t size,
+ unsigned int flags);
- These operations permit data cookies to be pinned into the cache and to
- have the pinning removed. They are not permitted on index cookies.
+This increases the invalidation counter in the cookie to cause outstanding
+reads to fail with -ESTALE, sets the coherency data and file size from the
+information supplied, blocks new I/O on the cookie and dispatches the cache to
+go and get rid of the old data.
- The pinning function will return 0 if successful, -ENOBUFS in the cookie
- isn't backed by a cache, -EOPNOTSUPP if the cache doesn't support pinning,
- -ENOSPC if there isn't enough space to honour the operation, -ENOMEM or
- -EIO if there's any other problem.
+Invalidation runs asynchronously in a worker thread so that it doesn't block
+too much.
- * Data space reservation::
- int fscache_reserve_space(struct fscache_cookie *cookie, loff_t size);
+Write-Back Resource Management
+==============================
- This permits a netfs to request cache space be reserved to store up to the
- given amount of a file. It is permitted to ask for more than the current
- size of the file to allow for future file expansion.
+To write data to the cache from network filesystem writeback, the cache
+resources required need to be pinned at the point the modification is made (for
+instance when the page is marked dirty) as it's not possible to open a file in
+a thread that's exiting.
- If size is given as zero then the reservation will be cancelled.
+The following facilities are provided to manage this:
- The function will return 0 if successful, -ENOBUFS in the cookie isn't
- backed by a cache, -EOPNOTSUPP if the cache doesn't support reservations,
- -ENOSPC if there isn't enough space to honour the operation, -ENOMEM or
- -EIO if there's any other problem.
+ * An inode flag, ``I_PINNING_FSCACHE_WB``, is provided to indicate that an
+ in-use is held on the cookie for this inode. It can only be changed if the
+ the inode lock is held.
- Note that this doesn't pin an object in a cache; it can still be culled to
- make space if it's not in use.
+ * A flag, ``unpinned_fscache_wb`` is placed in the ``writeback_control``
+ struct that gets set if ``__writeback_single_inode()`` clears
+ ``I_PINNING_FSCACHE_WB`` because all the dirty pages were cleared.
+To support this, the following functions are provided::
-Cookie Unregistration
-=====================
+ int fscache_set_page_dirty(struct page *page,
+ struct fscache_cookie *cookie);
+ void fscache_unpin_writeback(struct writeback_control *wbc,
+ struct fscache_cookie *cookie);
+ void fscache_clear_inode_writeback(struct fscache_cookie *cookie,
+ struct inode *inode,
+ const void *aux);
-To get rid of a cookie, this function should be called::
+The *set* function is intended to be called from the filesystem's
+``set_page_dirty`` address space operation. If ``I_PINNING_FSCACHE_WB`` is not
+set, it sets that flag and increments the use count on the cookie (the caller
+must already have called ``fscache_use_cookie()``).
- void fscache_relinquish_cookie(struct fscache_cookie *cookie,
- const void *aux_data,
- bool retire);
+The *unpin* function is intended to be called from the filesystem's
+``write_inode`` superblock operation. It cleans up after writing by unusing
+the cookie if unpinned_fscache_wb is set in the writeback_control struct.
-If retire is non-zero, then the object will be marked for recycling, and all
-copies of it will be removed from all active caches in which it is present.
-Not only that but all child objects will also be retired.
+The *clear* function is intended to be called from the netfs's ``evict_inode``
+superblock operation. It must be called *after*
+``truncate_inode_pages_final()``, but *before* ``clear_inode()``. This cleans
+up any hanging ``I_PINNING_FSCACHE_WB``. It also allows the coherency data to
+be updated.
-If retire is zero, then the object may be available again when next the
-acquisition function is called. Retirement here will overrule the pinning on a
-cookie.
-The cookie's auxiliary data will be updated from aux_data if that is non-NULL
-so that the cache can lazily update it on disk.
+Caching of Local Modifications
+==============================
-One very important note - relinquish must NOT be called for a cookie unless all
-the cookies for "child" indices, objects and pages have been relinquished
-first.
+If a network filesystem has locally modified data that it wants to write to the
+cache, it needs to mark the pages to indicate that a write is in progress, and
+if the mark is already present, it needs to wait for it to be removed first
+(presumably due to an already in-progress operation). This prevents multiple
+competing DIO writes to the same storage in the cache.
+Firstly, the netfs should determine if caching is available by doing something
+like::
-Index Invalidation
-==================
+ bool caching = fscache_cookie_enabled(cookie);
-There is no direct way to invalidate an index subtree. To do this, the caller
-should relinquish and retire the cookie they have, and then acquire a new one.
+If caching is to be attempted, pages should be waited for and then marked using
+the following functions provided by the netfs helper library::
+ void set_page_fscache(struct page *page);
+ void wait_on_page_fscache(struct page *page);
+ int wait_on_page_fscache_killable(struct page *page);
-Data File Invalidation
-======================
+Once all the pages in the span are marked, the netfs can ask fscache to
+schedule a write of that region::
-Sometimes it will be necessary to invalidate an object that contains data.
-Typically this will be necessary when the server tells the netfs of a foreign
-change - at which point the netfs has to throw away all the state it had for an
-inode and reload from the server.
+ void fscache_write_to_cache(struct fscache_cookie *cookie,
+ struct address_space *mapping,
+ loff_t start, size_t len, loff_t i_size,
+ netfs_io_terminated_t term_func,
+ void *term_func_priv,
+ bool caching)
-To indicate that a cache object should be invalidated, the following function
-can be called::
+And if an error occurs before that point is reached, the marks can be removed
+by calling::
- void fscache_invalidate(struct fscache_cookie *cookie);
+ void fscache_clear_page_bits(struct fscache_cookie *cookie,
+ struct address_space *mapping,
+ loff_t start, size_t len,
+ bool caching)
-This can be called with spinlocks held as it defers the work to a thread pool.
-All extant storage, retrieval and attribute change ops at this point are
-cancelled and discarded. Some future operations will be rejected until the
-cache has had a chance to insert a barrier in the operations queue. After
-that, operations will be queued again behind the invalidation operation.
+In both of these functions, the cookie representing the cache object to be
+written to and a pointer to the mapping to which the source pages are attached
+are passed in; start and len indicate the size of the region that's going to be
+written (it doesn't have to align to page boundaries necessarily, but it does
+have to align to DIO boundaries on the backing filesystem). The caching
+parameter indicates if caching should be skipped, and if false, the functions
+do nothing.
-The invalidation operation will perform an attribute change operation and an
-auxiliary data update operation as it is very likely these will have changed.
+The write function takes some additional parameters: i_size indicates the size
+of the netfs file and term_func indicates an optional completion function, to
+which term_func_priv will be passed, along with the error or amount written.
-Using the following function, the netfs can wait for the invalidation operation
-to have reached a point at which it can start submitting ordinary operations
-once again::
+Note that the write function will always run asynchronously and will unmark all
+the pages upon completion before calling term_func.
- void fscache_wait_on_invalidate(struct fscache_cookie *cookie);
+Page Release and Invalidation
+=============================
-FS-cache Specific Page Flag
-===========================
+Fscache keeps track of whether we have any data in the cache yet for a cache
+object we've just created. It knows it doesn't have to do any reading until it
+has done a write and then the page it wrote from has been released by the VM,
+after which it *has* to look in the cache.
-FS-Cache makes use of a page flag, PG_private_2, for its own purpose. This is
-given the alternative name PG_fscache.
+To inform fscache that a page might now be in the cache, the following function
+should be called from the ``releasepage`` address space op::
-PG_fscache is used to indicate that the page is known by the cache, and that
-the cache must be informed if the page is going to go away. It's an indication
-to the netfs that the cache has an interest in this page, where an interest may
-be a pointer to it, resources allocated or reserved for it, or I/O in progress
-upon it.
+ void fscache_note_page_release(struct fscache_cookie *cookie);
-The netfs can use this information in methods such as releasepage() to
-determine whether it needs to uncache a page or update it.
+if the page has been released (ie. releasepage returned true).
-Furthermore, if this bit is set, releasepage() and invalidatepage() operations
-will be called on a page to get rid of it, even if PG_private is not set. This
-allows caching to attempted on a page before read_cache_pages() to be called
-after fscache_read_or_alloc_pages() as the former will try and release pages it
-was given under certain circumstances.
+Page release and page invalidation should also wait for any mark left on the
+page to say that a DIO write is underway from that page::
-This bit does not overlap with such as PG_private. This means that FS-Cache
-can be used with a filesystem that uses the block buffering code.
+ void wait_on_page_fscache(struct page *page);
+ int wait_on_page_fscache_killable(struct page *page);
-There are a number of operations defined on this flag::
- int PageFsCache(struct page *page);
- void SetPageFsCache(struct page *page)
- void ClearPageFsCache(struct page *page)
- int TestSetPageFsCache(struct page *page)
- int TestClearPageFsCache(struct page *page)
+API Function Reference
+======================
-These functions are bit test, bit set, bit clear, bit test and set and bit
-test and clear operations on PG_fscache.
+.. kernel-doc:: include/linux/fscache.h
diff --git a/Documentation/filesystems/caching/object.rst b/Documentation/filesystems/caching/object.rst
deleted file mode 100644
index ce0e043ccd33..000000000000
--- a/Documentation/filesystems/caching/object.rst
+++ /dev/null
@@ -1,313 +0,0 @@
-.. SPDX-License-Identifier: GPL-2.0
-
-====================================================
-In-Kernel Cache Object Representation and Management
-====================================================
-
-By: David Howells <dhowells@redhat.com>
-
-.. Contents:
-
- (*) Representation
-
- (*) Object management state machine.
-
- - Provision of cpu time.
- - Locking simplification.
-
- (*) The set of states.
-
- (*) The set of events.
-
-
-Representation
-==============
-
-FS-Cache maintains an in-kernel representation of each object that a netfs is
-currently interested in. Such objects are represented by the fscache_cookie
-struct and are referred to as cookies.
-
-FS-Cache also maintains a separate in-kernel representation of the objects that
-a cache backend is currently actively caching. Such objects are represented by
-the fscache_object struct. The cache backends allocate these upon request, and
-are expected to embed them in their own representations. These are referred to
-as objects.
-
-There is a 1:N relationship between cookies and objects. A cookie may be
-represented by multiple objects - an index may exist in more than one cache -
-or even by no objects (it may not be cached).
-
-Furthermore, both cookies and objects are hierarchical. The two hierarchies
-correspond, but the cookies tree is a superset of the union of the object trees
-of multiple caches::
-
- NETFS INDEX TREE : CACHE 1 : CACHE 2
- : :
- : +-----------+ :
- +----------->| IObject | :
- +-----------+ | : +-----------+ :
- | ICookie |-------+ : | :
- +-----------+ | : | : +-----------+
- | +------------------------------>| IObject |
- | : | : +-----------+
- | : V : |
- | : +-----------+ : |
- V +----------->| IObject | : |
- +-----------+ | : +-----------+ : |
- | ICookie |-------+ : | : V
- +-----------+ | : | : +-----------+
- | +------------------------------>| IObject |
- +-----+-----+ : | : +-----------+
- | | : | : |
- V | : V : |
- +-----------+ | : +-----------+ : |
- | ICookie |------------------------->| IObject | : |
- +-----------+ | : +-----------+ : |
- | V : | : V
- | +-----------+ : | : +-----------+
- | | ICookie |-------------------------------->| IObject |
- | +-----------+ : | : +-----------+
- V | : V : |
- +-----------+ | : +-----------+ : |
- | DCookie |------------------------->| DObject | : |
- +-----------+ | : +-----------+ : |
- | : : |
- +-------+-------+ : : |
- | | : : |
- V V : : V
- +-----------+ +-----------+ : : +-----------+
- | DCookie | | DCookie |------------------------>| DObject |
- +-----------+ +-----------+ : : +-----------+
- : :
-
-In the above illustration, ICookie and IObject represent indices and DCookie
-and DObject represent data storage objects. Indices may have representation in
-multiple caches, but currently, non-index objects may not. Objects of any type
-may also be entirely unrepresented.
-
-As far as the netfs API goes, the netfs is only actually permitted to see
-pointers to the cookies. The cookies themselves and any objects attached to
-those cookies are hidden from it.
-
-
-Object Management State Machine
-===============================
-
-Within FS-Cache, each active object is managed by its own individual state
-machine. The state for an object is kept in the fscache_object struct, in
-object->state. A cookie may point to a set of objects that are in different
-states.
-
-Each state has an action associated with it that is invoked when the machine
-wakes up in that state. There are four logical sets of states:
-
- (1) Preparation: states that wait for the parent objects to become ready. The
- representations are hierarchical, and it is expected that an object must
- be created or accessed with respect to its parent object.
-
- (2) Initialisation: states that perform lookups in the cache and validate
- what's found and that create on disk any missing metadata.
-
- (3) Normal running: states that allow netfs operations on objects to proceed
- and that update the state of objects.
-
- (4) Termination: states that detach objects from their netfs cookies, that
- delete objects from disk, that handle disk and system errors and that free
- up in-memory resources.
-
-
-In most cases, transitioning between states is in response to signalled events.
-When a state has finished processing, it will usually set the mask of events in
-which it is interested (object->event_mask) and relinquish the worker thread.
-Then when an event is raised (by calling fscache_raise_event()), if the event
-is not masked, the object will be queued for processing (by calling
-fscache_enqueue_object()).
-
-
-Provision of CPU Time
----------------------
-
-The work to be done by the various states was given CPU time by the threads of
-the slow work facility. This was used in preference to the workqueue facility
-because:
-
- (1) Threads may be completely occupied for very long periods of time by a
- particular work item. These state actions may be doing sequences of
- synchronous, journalled disk accesses (lookup, mkdir, create, setxattr,
- getxattr, truncate, unlink, rmdir, rename).
-
- (2) Threads may do little actual work, but may rather spend a lot of time
- sleeping on I/O. This means that single-threaded and 1-per-CPU-threaded
- workqueues don't necessarily have the right numbers of threads.
-
-
-Locking Simplification
-----------------------
-
-Because only one worker thread may be operating on any particular object's
-state machine at once, this simplifies the locking, particularly with respect
-to disconnecting the netfs's representation of a cache object (fscache_cookie)
-from the cache backend's representation (fscache_object) - which may be
-requested from either end.
-
-
-The Set of States
-=================
-
-The object state machine has a set of states that it can be in. There are
-preparation states in which the object sets itself up and waits for its parent
-object to transit to a state that allows access to its children:
-
- (1) State FSCACHE_OBJECT_INIT.
-
- Initialise the object and wait for the parent object to become active. In
- the cache, it is expected that it will not be possible to look an object
- up from the parent object, until that parent object itself has been looked
- up.
-
-There are initialisation states in which the object sets itself up and accesses
-disk for the object metadata:
-
- (2) State FSCACHE_OBJECT_LOOKING_UP.
-
- Look up the object on disk, using the parent as a starting point.
- FS-Cache expects the cache backend to probe the cache to see whether this
- object is represented there, and if it is, to see if it's valid (coherency
- management).
-
- The cache should call fscache_object_lookup_negative() to indicate lookup
- failure for whatever reason, and should call fscache_obtained_object() to
- indicate success.
-
- At the completion of lookup, FS-Cache will let the netfs go ahead with
- read operations, no matter whether the file is yet cached. If not yet
- cached, read operations will be immediately rejected with ENODATA until
- the first known page is uncached - as to that point there can be no data
- to be read out of the cache for that file that isn't currently also held
- in the pagecache.
-
- (3) State FSCACHE_OBJECT_CREATING.
-
- Create an object on disk, using the parent as a starting point. This
- happens if the lookup failed to find the object, or if the object's
- coherency data indicated what's on disk is out of date. In this state,
- FS-Cache expects the cache to create
-
- The cache should call fscache_obtained_object() if creation completes
- successfully, fscache_object_lookup_negative() otherwise.
-
- At the completion of creation, FS-Cache will start processing write
- operations the netfs has queued for an object. If creation failed, the
- write ops will be transparently discarded, and nothing recorded in the
- cache.
-
-There are some normal running states in which the object spends its time
-servicing netfs requests:
-
- (4) State FSCACHE_OBJECT_AVAILABLE.
-
- A transient state in which pending operations are started, child objects
- are permitted to advance from FSCACHE_OBJECT_INIT state, and temporary
- lookup data is freed.
-
- (5) State FSCACHE_OBJECT_ACTIVE.
-
- The normal running state. In this state, requests the netfs makes will be
- passed on to the cache.
-
- (6) State FSCACHE_OBJECT_INVALIDATING.
-
- The object is undergoing invalidation. When the state comes here, it
- discards all pending read, write and attribute change operations as it is
- going to clear out the cache entirely and reinitialise it. It will then
- continue to the FSCACHE_OBJECT_UPDATING state.
-
- (7) State FSCACHE_OBJECT_UPDATING.
-
- The state machine comes here to update the object in the cache from the
- netfs's records. This involves updating the auxiliary data that is used
- to maintain coherency.
-
-And there are terminal states in which an object cleans itself up, deallocates
-memory and potentially deletes stuff from disk:
-
- (8) State FSCACHE_OBJECT_LC_DYING.
-
- The object comes here if it is dying because of a lookup or creation
- error. This would be due to a disk error or system error of some sort.
- Temporary data is cleaned up, and the parent is released.
-
- (9) State FSCACHE_OBJECT_DYING.
-
- The object comes here if it is dying due to an error, because its parent
- cookie has been relinquished by the netfs or because the cache is being
- withdrawn.
-
- Any child objects waiting on this one are given CPU time so that they too
- can destroy themselves. This object waits for all its children to go away
- before advancing to the next state.
-
-(10) State FSCACHE_OBJECT_ABORT_INIT.
-
- The object comes to this state if it was waiting on its parent in
- FSCACHE_OBJECT_INIT, but its parent died. The object will destroy itself
- so that the parent may proceed from the FSCACHE_OBJECT_DYING state.
-
-(11) State FSCACHE_OBJECT_RELEASING.
-(12) State FSCACHE_OBJECT_RECYCLING.
-
- The object comes to one of these two states when dying once it is rid of
- all its children, if it is dying because the netfs relinquished its
- cookie. In the first state, the cached data is expected to persist, and
- in the second it will be deleted.
-
-(13) State FSCACHE_OBJECT_WITHDRAWING.
-
- The object transits to this state if the cache decides it wants to
- withdraw the object from service, perhaps to make space, but also due to
- error or just because the whole cache is being withdrawn.
-
-(14) State FSCACHE_OBJECT_DEAD.
-
- The object transits to this state when the in-memory object record is
- ready to be deleted. The object processor shouldn't ever see an object in
- this state.
-
-
-The Set of Events
------------------
-
-There are a number of events that can be raised to an object state machine:
-
- FSCACHE_OBJECT_EV_UPDATE
- The netfs requested that an object be updated. The state machine will ask
- the cache backend to update the object, and the cache backend will ask the
- netfs for details of the change through its cookie definition ops.
-
- FSCACHE_OBJECT_EV_CLEARED
- This is signalled in two circumstances:
-
- (a) when an object's last child object is dropped and
-
- (b) when the last operation outstanding on an object is completed.
-
- This is used to proceed from the dying state.
-
- FSCACHE_OBJECT_EV_ERROR
- This is signalled when an I/O error occurs during the processing of some
- object.
-
- FSCACHE_OBJECT_EV_RELEASE, FSCACHE_OBJECT_EV_RETIRE
- These are signalled when the netfs relinquishes a cookie it was using.
- The event selected depends on whether the netfs asks for the backing
- object to be retired (deleted) or retained.
-
- FSCACHE_OBJECT_EV_WITHDRAW
- This is signalled when the cache backend wants to withdraw an object.
- This means that the object will have to be detached from the netfs's
- cookie.
-
-Because the withdrawing releasing/retiring events are all handled by the object
-state machine, it doesn't matter if there's a collision with both ends trying
-to sever the connection at the same time. The state machine can just pick
-which one it wants to honour, and that effects the other.
diff --git a/Documentation/filesystems/caching/operations.rst b/Documentation/filesystems/caching/operations.rst
deleted file mode 100644
index 9983e1675447..000000000000
--- a/Documentation/filesystems/caching/operations.rst
+++ /dev/null
@@ -1,210 +0,0 @@
-.. SPDX-License-Identifier: GPL-2.0
-
-================================
-Asynchronous Operations Handling
-================================
-
-By: David Howells <dhowells@redhat.com>
-
-.. Contents:
-
- (*) Overview.
-
- (*) Operation record initialisation.
-
- (*) Parameters.
-
- (*) Procedure.
-
- (*) Asynchronous callback.
-
-
-Overview
-========
-
-FS-Cache has an asynchronous operations handling facility that it uses for its
-data storage and retrieval routines. Its operations are represented by
-fscache_operation structs, though these are usually embedded into some other
-structure.
-
-This facility is available to and expected to be used by the cache backends,
-and FS-Cache will create operations and pass them off to the appropriate cache
-backend for completion.
-
-To make use of this facility, <linux/fscache-cache.h> should be #included.
-
-
-Operation Record Initialisation
-===============================
-
-An operation is recorded in an fscache_operation struct::
-
- struct fscache_operation {
- union {
- struct work_struct fast_work;
- struct slow_work slow_work;
- };
- unsigned long flags;
- fscache_operation_processor_t processor;
- ...
- };
-
-Someone wanting to issue an operation should allocate something with this
-struct embedded in it. They should initialise it by calling::
-
- void fscache_operation_init(struct fscache_operation *op,
- fscache_operation_release_t release);
-
-with the operation to be initialised and the release function to use.
-
-The op->flags parameter should be set to indicate the CPU time provision and
-the exclusivity (see the Parameters section).
-
-The op->fast_work, op->slow_work and op->processor flags should be set as
-appropriate for the CPU time provision (see the Parameters section).
-
-FSCACHE_OP_WAITING may be set in op->flags prior to each submission of the
-operation and waited for afterwards.
-
-
-Parameters
-==========
-
-There are a number of parameters that can be set in the operation record's flag
-parameter. There are three options for the provision of CPU time in these
-operations:
-
- (1) The operation may be done synchronously (FSCACHE_OP_MYTHREAD). A thread
- may decide it wants to handle an operation itself without deferring it to
- another thread.
-
- This is, for example, used in read operations for calling readpages() on
- the backing filesystem in CacheFiles. Although readpages() does an
- asynchronous data fetch, the determination of whether pages exist is done
- synchronously - and the netfs does not proceed until this has been
- determined.
-
- If this option is to be used, FSCACHE_OP_WAITING must be set in op->flags
- before submitting the operation, and the operating thread must wait for it
- to be cleared before proceeding::
-
- wait_on_bit(&op->flags, FSCACHE_OP_WAITING,
- TASK_UNINTERRUPTIBLE);
-
-
- (2) The operation may be fast asynchronous (FSCACHE_OP_FAST), in which case it
- will be given to keventd to process. Such an operation is not permitted
- to sleep on I/O.
-
- This is, for example, used by CacheFiles to copy data from a backing fs
- page to a netfs page after the backing fs has read the page in.
-
- If this option is used, op->fast_work and op->processor must be
- initialised before submitting the operation::
-
- INIT_WORK(&op->fast_work, do_some_work);
-
-
- (3) The operation may be slow asynchronous (FSCACHE_OP_SLOW), in which case it
- will be given to the slow work facility to process. Such an operation is
- permitted to sleep on I/O.
-
- This is, for example, used by FS-Cache to handle background writes of
- pages that have just been fetched from a remote server.
-
- If this option is used, op->slow_work and op->processor must be
- initialised before submitting the operation::
-
- fscache_operation_init_slow(op, processor)
-
-
-Furthermore, operations may be one of two types:
-
- (1) Exclusive (FSCACHE_OP_EXCLUSIVE). Operations of this type may not run in
- conjunction with any other operation on the object being operated upon.
-
- An example of this is the attribute change operation, in which the file
- being written to may need truncation.
-
- (2) Shareable. Operations of this type may be running simultaneously. It's
- up to the operation implementation to prevent interference between other
- operations running at the same time.
-
-
-Procedure
-=========
-
-Operations are used through the following procedure:
-
- (1) The submitting thread must allocate the operation and initialise it
- itself. Normally this would be part of a more specific structure with the
- generic op embedded within.
-
- (2) The submitting thread must then submit the operation for processing using
- one of the following two functions::
-
- int fscache_submit_op(struct fscache_object *object,
- struct fscache_operation *op);
-
- int fscache_submit_exclusive_op(struct fscache_object *object,
- struct fscache_operation *op);
-
- The first function should be used to submit non-exclusive ops and the
- second to submit exclusive ones. The caller must still set the
- FSCACHE_OP_EXCLUSIVE flag.
-
- If successful, both functions will assign the operation to the specified
- object and return 0. -ENOBUFS will be returned if the object specified is
- permanently unavailable.
-
- The operation manager will defer operations on an object that is still
- undergoing lookup or creation. The operation will also be deferred if an
- operation of conflicting exclusivity is in progress on the object.
-
- If the operation is asynchronous, the manager will retain a reference to
- it, so the caller should put their reference to it by passing it to::
-
- void fscache_put_operation(struct fscache_operation *op);
-
- (3) If the submitting thread wants to do the work itself, and has marked the
- operation with FSCACHE_OP_MYTHREAD, then it should monitor
- FSCACHE_OP_WAITING as described above and check the state of the object if
- necessary (the object might have died while the thread was waiting).
-
- When it has finished doing its processing, it should call
- fscache_op_complete() and fscache_put_operation() on it.
-
- (4) The operation holds an effective lock upon the object, preventing other
- exclusive ops conflicting until it is released. The operation can be
- enqueued for further immediate asynchronous processing by adjusting the
- CPU time provisioning option if necessary, eg::
-
- op->flags &= ~FSCACHE_OP_TYPE;
- op->flags |= ~FSCACHE_OP_FAST;
-
- and calling::
-
- void fscache_enqueue_operation(struct fscache_operation *op)
-
- This can be used to allow other things to have use of the worker thread
- pools.
-
-
-Asynchronous Callback
-=====================
-
-When used in asynchronous mode, the worker thread pool will invoke the
-processor method with a pointer to the operation. This should then get at the
-container struct by using container_of()::
-
- static void fscache_write_op(struct fscache_operation *_op)
- {
- struct fscache_storage *op =
- container_of(_op, struct fscache_storage, op);
- ...
- }
-
-The caller holds a reference on the operation, and will invoke
-fscache_put_operation() when the processor function returns. The processor
-function is at liberty to call fscache_enqueue_operation() or to take extra
-references.
diff --git a/Documentation/filesystems/ceph.rst b/Documentation/filesystems/ceph.rst
index 7d2ef4e27273..4942e018db85 100644
--- a/Documentation/filesystems/ceph.rst
+++ b/Documentation/filesystems/ceph.rst
@@ -82,7 +82,7 @@ Mount Syntax
The basic mount syntax is::
- # mount -t ceph monip[:port][,monip2[:port]...]:/[subdir] mnt
+ # mount -t ceph user@fsid.fs_name=/[subdir] mnt -o mon_addr=monip1[:port][/monip2[:port]]
You only need to specify a single monitor, as the client will get the
full list when it connects. (However, if the monitor you specify
@@ -90,16 +90,35 @@ happens to be down, the mount won't succeed.) The port can be left
off if the monitor is using the default. So if the monitor is at
1.2.3.4::
- # mount -t ceph 1.2.3.4:/ /mnt/ceph
+ # mount -t ceph cephuser@07fe3187-00d9-42a3-814b-72a4d5e7d5be.cephfs=/ /mnt/ceph -o mon_addr=1.2.3.4
is sufficient. If /sbin/mount.ceph is installed, a hostname can be
-used instead of an IP address.
+used instead of an IP address and the cluster FSID can be left out
+(as the mount helper will fill it in by reading the ceph configuration
+file)::
+ # mount -t ceph cephuser@cephfs=/ /mnt/ceph -o mon_addr=mon-addr
+Multiple monitor addresses can be passed by separating each address with a slash (`/`)::
+
+ # mount -t ceph cephuser@cephfs=/ /mnt/ceph -o mon_addr=192.168.1.100/192.168.1.101
+
+When using the mount helper, monitor address can be read from ceph
+configuration file if available. Note that, the cluster FSID (passed as part
+of the device string) is validated by checking it with the FSID reported by
+the monitor.
Mount Options
=============
+ mon_addr=ip_address[:port][/ip_address[:port]]
+ Monitor address to the cluster. This is used to bootstrap the
+ connection to the cluster. Once connection is established, the
+ monitor addresses in the monitor map are followed.
+
+ fsid=cluster-id
+ FSID of the cluster (from `ceph fsid` command).
+
ip=A.B.C.D[:N]
Specify the IP and/or port the client should bind to locally.
There is normally not much reason to do this. If the IP is not
diff --git a/Documentation/filesystems/dax.rst b/Documentation/filesystems/dax.rst
index 9a1b8fd9e82b..e3b30429d703 100644
--- a/Documentation/filesystems/dax.rst
+++ b/Documentation/filesystems/dax.rst
@@ -23,8 +23,8 @@ on it as usual. The `DAX` code currently only supports files with a block
size equal to your kernel's `PAGE_SIZE`, so you may need to specify a block
size when creating the filesystem.
-Currently 3 filesystems support `DAX`: ext2, ext4 and xfs. Enabling `DAX` on them
-is different.
+Currently 4 filesystems support `DAX`: ext2, ext4, xfs and virtiofs.
+Enabling `DAX` on them is different.
Enabling DAX on ext2
--------------------
@@ -168,6 +168,22 @@ if the underlying media does not support dax and/or the filesystem is
overridden with a mount option.
+Enabling DAX on virtiofs
+----------------------------
+The semantic of DAX on virtiofs is basically equal to that on ext4 and xfs,
+except that when '-o dax=inode' is specified, virtiofs client derives the hint
+whether DAX shall be enabled or not from virtiofs server through FUSE protocol,
+rather than the persistent `FS_XFLAG_DAX` flag. That is, whether DAX shall be
+enabled or not is completely determined by virtiofs server, while virtiofs
+server itself may deploy various algorithm making this decision, e.g. depending
+on the persistent `FS_XFLAG_DAX` flag on the host.
+
+It is still supported to set or clear persistent `FS_XFLAG_DAX` flag inside
+guest, but it is not guaranteed that DAX will be enabled or disabled for
+corresponding file then. Users inside guest still need to call statx(2) and
+check the statx flag `STATX_ATTR_DAX` to see if DAX is enabled for this file.
+
+
Implementation Tips for Block Driver Writers
--------------------------------------------
diff --git a/Documentation/filesystems/f2fs.rst b/Documentation/filesystems/f2fs.rst
index d7b84695f56a..4a2426f0485a 100644
--- a/Documentation/filesystems/f2fs.rst
+++ b/Documentation/filesystems/f2fs.rst
@@ -198,6 +198,7 @@ fault_type=%d Support configuring fault injection type, should be
FAULT_WRITE_IO 0x000004000
FAULT_SLAB_ALLOC 0x000008000
FAULT_DQUOT_INIT 0x000010000
+ FAULT_LOCK_OP 0x000020000
=================== ===========
mode=%s Control block allocation mode which supports "adaptive"
and "lfs". In "lfs" mode, there should be no random
diff --git a/Documentation/filesystems/netfs_library.rst b/Documentation/filesystems/netfs_library.rst
index 375baca7edcd..4f373a8ec47b 100644
--- a/Documentation/filesystems/netfs_library.rst
+++ b/Documentation/filesystems/netfs_library.rst
@@ -454,13 +454,18 @@ operation table looks like the following::
void *term_func_priv);
int (*prepare_write)(struct netfs_cache_resources *cres,
- loff_t *_start, size_t *_len, loff_t i_size);
+ loff_t *_start, size_t *_len, loff_t i_size,
+ bool no_space_allocated_yet);
int (*write)(struct netfs_cache_resources *cres,
loff_t start_pos,
struct iov_iter *iter,
netfs_io_terminated_t term_func,
void *term_func_priv);
+
+ int (*query_occupancy)(struct netfs_cache_resources *cres,
+ loff_t start, size_t len, size_t granularity,
+ loff_t *_data_start, size_t *_data_len);
};
With a termination handler function pointer::
@@ -515,11 +520,14 @@ The methods defined in the table are:
* ``prepare_write()``
- [Required] Called to adjust a write to the cache and check that there is
- sufficient space in the cache. The start and length values indicate the
- size of the write that netfslib is proposing, and this can be adjusted by
- the cache to respect DIO boundaries. The file size is passed for
- information.
+ [Required] Called to prepare a write to the cache to take place. This
+ involves checking to see whether the cache has sufficient space to honour
+ the write. ``*_start`` and ``*_len`` indicate the region to be written; the
+ region can be shrunk or it can be expanded to a page boundary either way as
+ necessary to align for direct I/O. i_size holds the size of the object and
+ is provided for reference. no_space_allocated_yet is set to true if the
+ caller is certain that no data has been written to that region - for example
+ if it tried to do a read from there already.
* ``write()``
@@ -532,6 +540,18 @@ The methods defined in the table are:
indicating whether the termination is definitely happening in the caller's
context.
+ * ``query_occupancy()``
+
+ [Required] Called to find out where the next piece of data is within a
+ particular region of the cache. The start and length of the region to be
+ queried are passed in, along with the granularity to which the answer needs
+ to be aligned. The function passes back the start and length of the data,
+ if any, available within that region. Note that there may be a hole at the
+ front.
+
+ It returns 0 if some data was found, -ENODATA if there was no usable data
+ within the region or -ENOBUFS if there is no caching on this file.
+
Note that these methods are passed a pointer to the cache resource structure,
not the read request structure as they could be used in other situations where
there isn't a read request structure as well, such as writing dirty data to the
diff --git a/Documentation/filesystems/proc.rst b/Documentation/filesystems/proc.rst
index 8d7f141c6fc7..061744c436d9 100644
--- a/Documentation/filesystems/proc.rst
+++ b/Documentation/filesystems/proc.rst
@@ -426,12 +426,14 @@ with the memory region, as the case would be with BSS (uninitialized data).
The "pathname" shows the name associated file for this mapping. If the mapping
is not associated with a file:
- ======= ====================================
+ ============= ====================================
[heap] the heap of the program
[stack] the stack of the main process
[vdso] the "virtual dynamic shared object",
the kernel system call handler
- ======= ====================================
+ [anon:<name>] an anonymous mapping that has been
+ named by userspace
+ ============= ====================================
or if empty, the mapping is anonymous.
diff --git a/Documentation/gpu/todo.rst b/Documentation/gpu/todo.rst
index da138dd39883..a1212b5b3026 100644
--- a/Documentation/gpu/todo.rst
+++ b/Documentation/gpu/todo.rst
@@ -300,30 +300,6 @@ Contact: Daniel Vetter, Noralf Tronnes
Level: Advanced
-Garbage collect fbdev scrolling acceleration
---------------------------------------------
-
-Scroll acceleration has been disabled in fbcon. Now it works as the old
-SCROLL_REDRAW mode. A ton of code was removed in fbcon.c and the hook bmove was
-removed from fbcon_ops.
-Remaining tasks:
-
-- a bunch of the hooks in fbcon_ops could be removed or simplified by calling
- directly instead of the function table (with a switch on p->rotate)
-
-- fb_copyarea is unused after this, and can be deleted from all drivers
-
-- after that, fb_copyarea can be deleted from fb_ops in include/linux/fb.h as
- well as cfb_copyarea
-
-Note that not all acceleration code can be deleted, since clearing and cursor
-support is still accelerated, which might be good candidates for further
-deletion projects.
-
-Contact: Daniel Vetter
-
-Level: Intermediate
-
idr_init_base()
---------------
diff --git a/Documentation/index.rst b/Documentation/index.rst
index 54ce34fd6fbd..b58692d687f6 100644
--- a/Documentation/index.rst
+++ b/Documentation/index.rst
@@ -137,6 +137,7 @@ needed).
misc-devices/index
scheduler/index
mhi/index
+ tty/index
Architecture-agnostic documentation
-----------------------------------
@@ -165,6 +166,7 @@ to ReStructured Text format, or are simply too old.
.. toctree::
:maxdepth: 2
+ tools/index
staging/index
watch_queue
diff --git a/Documentation/kbuild/kconfig-language.rst b/Documentation/kbuild/kconfig-language.rst
index 98c24183d8c3..93a5b6e1fabd 100644
--- a/Documentation/kbuild/kconfig-language.rst
+++ b/Documentation/kbuild/kconfig-language.rst
@@ -176,7 +176,7 @@ applicable everywhere (see syntax).
y y y Y/m/n
n m n N/m
m m m M/n
- y m n M/n
+ y m m M/n
y n * N
=== === ============= ==============
diff --git a/Documentation/kernel-hacking/locking.rst b/Documentation/kernel-hacking/locking.rst
index e6cd40663ea5..4cbd50edf277 100644
--- a/Documentation/kernel-hacking/locking.rst
+++ b/Documentation/kernel-hacking/locking.rst
@@ -295,7 +295,7 @@ Pete Zaitcev gives the following summary:
- If you are in a process context (any syscall) and want to lock other
process out, use a mutex. You can take a mutex and sleep
- (``copy_from_user*(`` or ``kmalloc(x,GFP_KERNEL)``).
+ (``copy_from_user()`` or ``kmalloc(x,GFP_KERNEL)``).
- Otherwise (== data can be touched in an interrupt), use
spin_lock_irqsave() and
diff --git a/Documentation/livepatch/api.rst b/Documentation/livepatch/api.rst
new file mode 100644
index 000000000000..78944b63d74b
--- /dev/null
+++ b/Documentation/livepatch/api.rst
@@ -0,0 +1,30 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=================
+Livepatching APIs
+=================
+
+Livepatch Enablement
+====================
+
+.. kernel-doc:: kernel/livepatch/core.c
+ :export:
+
+
+Shadow Variables
+================
+
+.. kernel-doc:: kernel/livepatch/shadow.c
+ :export:
+
+System State Changes
+====================
+
+.. kernel-doc:: kernel/livepatch/state.c
+ :export:
+
+Object Types
+============
+
+.. kernel-doc:: include/linux/livepatch.h
+ :identifiers: klp_patch klp_object klp_func klp_callbacks klp_state
diff --git a/Documentation/livepatch/index.rst b/Documentation/livepatch/index.rst
index 43cce5fad705..cebf1c71d4a5 100644
--- a/Documentation/livepatch/index.rst
+++ b/Documentation/livepatch/index.rst
@@ -14,6 +14,7 @@ Kernel Livepatching
shadow-vars
system-state
reliable-stacktrace
+ api
.. only:: subproject and html
diff --git a/Documentation/livepatch/shadow-vars.rst b/Documentation/livepatch/shadow-vars.rst
index 6a7d43a8787d..7a7098bfb5c8 100644
--- a/Documentation/livepatch/shadow-vars.rst
+++ b/Documentation/livepatch/shadow-vars.rst
@@ -82,8 +82,8 @@ to do actions that can be done only once when a new variable is allocated.
- call destructor function if defined
- free shadow variable
-* klp_shadow_free_all() - detach and free all <*, id> shadow variables
- - find and remove any <*, id> references from global hashtable
+* klp_shadow_free_all() - detach and free all <_, id> shadow variables
+ - find and remove any <_, id> references from global hashtable
- if found
diff --git a/Documentation/livepatch/system-state.rst b/Documentation/livepatch/system-state.rst
index c6d127c2d9aa..7a3935fd812b 100644
--- a/Documentation/livepatch/system-state.rst
+++ b/Documentation/livepatch/system-state.rst
@@ -52,12 +52,12 @@ struct klp_state:
The state can be manipulated using two functions:
- - *klp_get_state(patch, id)*
+ - klp_get_state()
- Get struct klp_state associated with the given livepatch
and state id.
- - *klp_get_prev_state(id)*
+ - klp_get_prev_state()
- Get struct klp_state associated with the given feature id and
already installed livepatches.
diff --git a/Documentation/process/changes.rst b/Documentation/process/changes.rst
index cf908d79666e..a337e8eabfe1 100644
--- a/Documentation/process/changes.rst
+++ b/Documentation/process/changes.rst
@@ -30,7 +30,7 @@ you probably needn't concern yourself with pcmciautils.
Program Minimal version Command to check the version
====================== =============== ========================================
GNU C 5.1 gcc --version
-Clang/LLVM (optional) 10.0.1 clang --version
+Clang/LLVM (optional) 11.0.0 clang --version
GNU make 3.81 make --version
binutils 2.23 ld -v
flex 2.5.35 flex --version
diff --git a/Documentation/riscv/vm-layout.rst b/Documentation/riscv/vm-layout.rst
index b7f98930d38d..1bd687b97104 100644
--- a/Documentation/riscv/vm-layout.rst
+++ b/Documentation/riscv/vm-layout.rst
@@ -47,12 +47,12 @@ RISC-V Linux Kernel SV39
| Kernel-space virtual memory, shared between all processes:
____________________________________________________________|___________________________________________________________
| | | |
- ffffffc000000000 | -256 GB | ffffffc7ffffffff | 32 GB | kasan
- ffffffcefee00000 | -196 GB | ffffffcefeffffff | 2 MB | fixmap
- ffffffceff000000 | -196 GB | ffffffceffffffff | 16 MB | PCI io
- ffffffcf00000000 | -196 GB | ffffffcfffffffff | 4 GB | vmemmap
- ffffffd000000000 | -192 GB | ffffffdfffffffff | 64 GB | vmalloc/ioremap space
- ffffffe000000000 | -128 GB | ffffffff7fffffff | 124 GB | direct mapping of all physical memory
+ ffffffc6fee00000 | -228 GB | ffffffc6feffffff | 2 MB | fixmap
+ ffffffc6ff000000 | -228 GB | ffffffc6ffffffff | 16 MB | PCI io
+ ffffffc700000000 | -228 GB | ffffffc7ffffffff | 4 GB | vmemmap
+ ffffffc800000000 | -224 GB | ffffffd7ffffffff | 64 GB | vmalloc/ioremap space
+ ffffffd800000000 | -160 GB | fffffff6ffffffff | 124 GB | direct mapping of all physical memory
+ fffffff700000000 | -36 GB | fffffffeffffffff | 32 GB | kasan
__________________|____________|__________________|_________|____________________________________________________________
|
|
diff --git a/Documentation/staging/tee.rst b/Documentation/staging/tee.rst
index 3c63d8dcd61e..498343c7ab08 100644
--- a/Documentation/staging/tee.rst
+++ b/Documentation/staging/tee.rst
@@ -255,7 +255,7 @@ The following picture shows a high level overview of AMD-TEE::
+--------------------------+ +---------+--------------------+
At the lowest level (in x86), the AMD Secure Processor (ASP) driver uses the
-CPU to PSP mailbox regsister to submit commands to the PSP. The format of the
+CPU to PSP mailbox register to submit commands to the PSP. The format of the
command buffer is opaque to the ASP driver. It's role is to submit commands to
the secure processor and return results to AMD-TEE driver. The interface
between AMD-TEE driver and AMD Secure Processor driver can be found in [6].
@@ -290,7 +290,7 @@ cancel_req driver callback is not supported by AMD-TEE.
The GlobalPlatform TEE Client API [5] can be used by the user space (client) to
talk to AMD's TEE. AMD's TEE provides a secure environment for loading, opening
-a session, invoking commands and clossing session with TA.
+a session, invoking commands and closing session with TA.
References
==========
diff --git a/Documentation/tools/index.rst b/Documentation/tools/index.rst
new file mode 100644
index 000000000000..0bb1e61bdcc0
--- /dev/null
+++ b/Documentation/tools/index.rst
@@ -0,0 +1,20 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+============
+Kernel tools
+============
+
+This book covers user-space tools that are shipped with the kernel source;
+more additions are needed here:
+
+.. toctree::
+ :maxdepth: 1
+
+ rtla/index
+
+.. only:: subproject and html
+
+ Indices
+ =======
+
+ * :ref:`genindex`
diff --git a/Documentation/tools/rtla/Makefile b/Documentation/tools/rtla/Makefile
new file mode 100644
index 000000000000..9f2b84af1a6c
--- /dev/null
+++ b/Documentation/tools/rtla/Makefile
@@ -0,0 +1,41 @@
+# SPDX-License-Identifier: GPL-2.0-only
+# Based on bpftool's Documentation Makefile
+
+INSTALL ?= install
+RM ?= rm -f
+RMDIR ?= rmdir --ignore-fail-on-non-empty
+
+PREFIX ?= /usr/share
+MANDIR ?= $(PREFIX)/man
+MAN1DIR = $(MANDIR)/man1
+
+MAN1_RST = $(wildcard rtla*.rst)
+
+_DOC_MAN1 = $(patsubst %.rst,%.1,$(MAN1_RST))
+DOC_MAN1 = $(addprefix $(OUTPUT),$(_DOC_MAN1))
+
+RST2MAN_DEP := $(shell command -v rst2man 2>/dev/null)
+RST2MAN_OPTS += --verbose
+
+$(OUTPUT)%.1: %.rst
+ifndef RST2MAN_DEP
+ $(error "rst2man not found, but required to generate man pages")
+endif
+ rst2man $(RST2MAN_OPTS) $< > $@
+
+man1: $(DOC_MAN1)
+man: man1
+
+clean:
+ $(RM) $(DOC_MAN1)
+
+install: man
+ $(INSTALL) -d -m 755 $(DESTDIR)$(MAN1DIR)
+ $(INSTALL) -m 644 $(DOC_MAN1) $(DESTDIR)$(MAN1DIR)
+
+uninstall:
+ $(RM) $(addprefix $(DESTDIR)$(MAN1DIR)/,$(_DOC_MAN1))
+ $(RMDIR) $(DESTDIR)$(MAN1DIR)
+
+.PHONY: man man1 clean install uninstall
+.DEFAULT_GOAL := man
diff --git a/Documentation/tools/rtla/common_appendix.rst b/Documentation/tools/rtla/common_appendix.rst
new file mode 100644
index 000000000000..b494084acccd
--- /dev/null
+++ b/Documentation/tools/rtla/common_appendix.rst
@@ -0,0 +1,12 @@
+REPORTING BUGS
+==============
+Report bugs to <lkml@vger.kernel.org>
+
+LICENSE
+=======
+**rtla** is Free Software licensed under the GNU GPLv2
+
+COPYING
+=======
+Copyright \(C) 2021 Red Hat, Inc. Free use of this software is granted under
+the terms of the GNU Public License (GPL).
diff --git a/Documentation/tools/rtla/common_hist_options.rst b/Documentation/tools/rtla/common_hist_options.rst
new file mode 100644
index 000000000000..0266cd08a6c9
--- /dev/null
+++ b/Documentation/tools/rtla/common_hist_options.rst
@@ -0,0 +1,23 @@
+**-b**, **--bucket-size** *N*
+
+ Set the histogram bucket size (default *1*).
+
+**-e**, **--entries** *N*
+
+ Set the number of entries of the histogram (default 256).
+
+**--no-header**
+
+ Do not print header.
+
+**--no-summary**
+
+ Do not print summary.
+
+**--no-index**
+
+ Do not print index.
+
+**--with-zeros**
+
+ Print zero only entries.
diff --git a/Documentation/tools/rtla/common_options.rst b/Documentation/tools/rtla/common_options.rst
new file mode 100644
index 000000000000..721790ad984e
--- /dev/null
+++ b/Documentation/tools/rtla/common_options.rst
@@ -0,0 +1,28 @@
+**-c**, **--cpus** *cpu-list*
+
+ Set the osnoise tracer to run the sample threads in the cpu-list.
+
+**-d**, **--duration** *time[s|m|h|d]*
+
+ Set the duration of the session.
+
+**-D**, **--debug**
+
+ Print debug info.
+
+**-t**, **--trace**\[*=file*]
+
+ Save the stopped trace to [*file|osnoise_trace.txt*].
+
+**-P**, **--priority** *o:prio|r:prio|f:prio|d:runtime:period*
+
+ Set scheduling parameters to the osnoise tracer threads, the format to set the priority are:
+
+ - *o:prio* - use SCHED_OTHER with *prio*;
+ - *r:prio* - use SCHED_RR with *prio*;
+ - *f:prio* - use SCHED_FIFO with *prio*;
+ - *d:runtime[us|ms|s]:period[us|ms|s]* - use SCHED_DEADLINE with *runtime* and *period* in nanoseconds.
+
+**-h**, **--help**
+
+ Print help menu.
diff --git a/Documentation/tools/rtla/common_osnoise_description.rst b/Documentation/tools/rtla/common_osnoise_description.rst
new file mode 100644
index 000000000000..8973c5df888f
--- /dev/null
+++ b/Documentation/tools/rtla/common_osnoise_description.rst
@@ -0,0 +1,8 @@
+The **rtla osnoise** tool is an interface for the *osnoise* tracer. The
+*osnoise* tracer dispatches a kernel thread per-cpu. These threads read the
+time in a loop while with preemption, softirq and IRQs enabled, thus
+allowing all the sources of operating systme noise during its execution.
+The *osnoise*'s tracer threads take note of the delta between each time
+read, along with an interference counter of all sources of interference.
+At the end of each period, the *osnoise* tracer displays a summary of
+the results.
diff --git a/Documentation/tools/rtla/common_osnoise_options.rst b/Documentation/tools/rtla/common_osnoise_options.rst
new file mode 100644
index 000000000000..d556883e4e26
--- /dev/null
+++ b/Documentation/tools/rtla/common_osnoise_options.rst
@@ -0,0 +1,17 @@
+**-p**, **--period** *us*
+
+ Set the *osnoise* tracer period in microseconds.
+
+**-r**, **--runtime** *us*
+
+ Set the *osnoise* tracer runtime in microseconds.
+
+**-s**, **--stop** *us*
+
+ Stop the trace if a single sample is higher than the argument in microseconds.
+ If **-T** is set, it will also save the trace to the output.
+
+**-S**, **--stop-total** *us*
+
+ Stop the trace if the total sample is higher than the argument in microseconds.
+ If **-T** is set, it will also save the trace to the output.
diff --git a/Documentation/tools/rtla/common_timerlat_description.rst b/Documentation/tools/rtla/common_timerlat_description.rst
new file mode 100644
index 000000000000..321201cb8597
--- /dev/null
+++ b/Documentation/tools/rtla/common_timerlat_description.rst
@@ -0,0 +1,10 @@
+The **rtla timerlat** tool is an interface for the *timerlat* tracer. The
+*timerlat* tracer dispatches a kernel thread per-cpu. These threads
+set a periodic timer to wake themselves up and go back to sleep. After
+the wakeup, they collect and generate useful information for the
+debugging of operating system timer latency.
+
+The *timerlat* tracer outputs information in two ways. It periodically
+prints the timer latency at the timer *IRQ* handler and the *Thread*
+handler. It also enable the trace of the most relevant information via
+**osnoise:** tracepoints.
diff --git a/Documentation/tools/rtla/common_timerlat_options.rst b/Documentation/tools/rtla/common_timerlat_options.rst
new file mode 100644
index 000000000000..e9c1bfd55d48
--- /dev/null
+++ b/Documentation/tools/rtla/common_timerlat_options.rst
@@ -0,0 +1,16 @@
+**-p**, **--period** *us*
+
+ Set the *timerlat* tracer period in microseconds.
+
+**-i**, **--irq** *us*
+
+ Stop trace if the *IRQ* latency is higher than the argument in us.
+
+**-T**, **--thread** *us*
+
+ Stop trace if the *Thread* latency is higher than the argument in us.
+
+**-s**, **--stack** *us*
+
+ Save the stack trace at the *IRQ* if a *Thread* latency is higher than the
+ argument in us.
diff --git a/Documentation/tools/rtla/common_top_options.rst b/Documentation/tools/rtla/common_top_options.rst
new file mode 100644
index 000000000000..f48878938f84
--- /dev/null
+++ b/Documentation/tools/rtla/common_top_options.rst
@@ -0,0 +1,3 @@
+**-q**, **--quiet**
+
+ Print only a summary at the end of the session.
diff --git a/Documentation/tools/rtla/index.rst b/Documentation/tools/rtla/index.rst
new file mode 100644
index 000000000000..840f0bf3e803
--- /dev/null
+++ b/Documentation/tools/rtla/index.rst
@@ -0,0 +1,26 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+================================
+The realtime Linux analysis tool
+================================
+
+RTLA provides a set of tools for the analysis of the kernel's realtime
+behavior on specific hardware.
+
+.. toctree::
+ :maxdepth: 1
+
+ rtla
+ rtla-osnoise
+ rtla-osnoise-hist
+ rtla-osnoise-top
+ rtla-timerlat
+ rtla-timerlat-hist
+ rtla-timerlat-top
+
+.. only:: subproject and html
+
+ Indices
+ =======
+
+ * :ref:`genindex`
diff --git a/Documentation/tools/rtla/rtla-osnoise-hist.rst b/Documentation/tools/rtla/rtla-osnoise-hist.rst
new file mode 100644
index 000000000000..52298ddd8701
--- /dev/null
+++ b/Documentation/tools/rtla/rtla-osnoise-hist.rst
@@ -0,0 +1,66 @@
+===================
+rtla-osnoise-hist
+===================
+------------------------------------------------------
+Display a histogram of the osnoise tracer samples
+------------------------------------------------------
+
+:Manual section: 1
+
+SYNOPSIS
+========
+**rtla osnoise hist** [*OPTIONS*]
+
+DESCRIPTION
+===========
+.. include:: common_osnoise_description.rst
+
+The **rtla osnoise hist** tool collects all **osnoise:sample_threshold**
+occurrence in a histogram, displaying the results in a user-friendly way.
+The tool also allows many configurations of the *osnoise* tracer and the
+collection of the tracer output.
+
+OPTIONS
+=======
+.. include:: common_osnoise_options.rst
+
+.. include:: common_hist_options.rst
+
+.. include:: common_options.rst
+
+EXAMPLE
+=======
+In the example below, *osnoise* tracer threads are set to run with real-time
+priority *FIFO:1*, on CPUs *0-11*, for *900ms* at each period (*1s* by
+default). The reason for reducing the runtime is to avoid starving the
+**rtla** tool. The tool is also set to run for *one minute*. The output
+histogram is set to group outputs in buckets of *10us* and *25* entries::
+
+ [root@f34 ~/]# rtla osnoise hist -P F:1 -c 0-11 -r 900000 -d 1M -b 10 -e 25
+ # RTLA osnoise histogram
+ # Time unit is microseconds (us)
+ # Duration: 0 00:01:00
+ Index CPU-000 CPU-001 CPU-002 CPU-003 CPU-004 CPU-005 CPU-006 CPU-007 CPU-008 CPU-009 CPU-010 CPU-011
+ 0 42982 46287 51779 53740 52024 44817 49898 36500 50408 50128 49523 52377
+ 10 12224 8356 2912 878 2667 10155 4573 18894 4214 4836 5708 2413
+ 20 8 5 12 2 13 24 20 41 29 53 39 39
+ 30 1 1 0 0 10 3 6 19 15 31 30 38
+ 40 0 0 0 0 0 4 2 7 2 3 8 11
+ 50 0 0 0 0 0 0 0 0 0 1 1 2
+ over: 0 0 0 0 0 0 0 0 0 0 0 0
+ count: 55215 54649 54703 54620 54714 55003 54499 55461 54668 55052 55309 54880
+ min: 0 0 0 0 0 0 0 0 0 0 0 0
+ avg: 0 0 0 0 0 0 0 0 0 0 0 0
+ max: 30 30 20 20 30 40 40 40 40 50 50 50
+
+SEE ALSO
+========
+**rtla-osnoise**\(1), **rtla-osnoise-top**\(1)
+
+*osnoise* tracer documentation: <https://www.kernel.org/doc/html/latest/trace/osnoise-tracer.html>
+
+AUTHOR
+======
+Written by Daniel Bristot de Oliveira <bristot@kernel.org>
+
+.. include:: common_appendix.rst
diff --git a/Documentation/tools/rtla/rtla-osnoise-top.rst b/Documentation/tools/rtla/rtla-osnoise-top.rst
new file mode 100644
index 000000000000..5d75d1394516
--- /dev/null
+++ b/Documentation/tools/rtla/rtla-osnoise-top.rst
@@ -0,0 +1,61 @@
+===================
+rtla-osnoise-top
+===================
+-----------------------------------------------
+Display a summary of the operating system noise
+-----------------------------------------------
+
+:Manual section: 1
+
+SYNOPSIS
+========
+**rtla osnoise top** [*OPTIONS*]
+
+DESCRIPTION
+===========
+.. include:: common_osnoise_description.rst
+
+**rtla osnoise top** collects the periodic summary from the *osnoise* tracer,
+including the counters of the occurrence of the interference source,
+displaying the results in a user-friendly format.
+
+The tool also allows many configurations of the *osnoise* tracer and the
+collection of the tracer output.
+
+OPTIONS
+=======
+.. include:: common_osnoise_options.rst
+
+.. include:: common_top_options.rst
+
+.. include:: common_options.rst
+
+EXAMPLE
+=======
+In the example below, the **rtla osnoise top** tool is set to run with a
+real-time priority *FIFO:1*, on CPUs *0-3*, for *900ms* at each period
+(*1s* by default). The reason for reducing the runtime is to avoid starving
+the rtla tool. The tool is also set to run for *one minute* and to display
+a summary of the report at the end of the session::
+
+ [root@f34 ~]# rtla osnoise top -P F:1 -c 0-3 -r 900000 -d 1M -q
+ Operating System Noise
+ duration: 0 00:01:00 | time is in us
+ CPU Period Runtime Noise % CPU Aval Max Noise Max Single HW NMI IRQ Softirq Thread
+ 0 #59 53100000 304896 99.42580 6978 56 549 0 53111 1590 13
+ 1 #59 53100000 338339 99.36282 8092 24 399 0 53130 1448 31
+ 2 #59 53100000 290842 99.45227 6582 39 855 0 53110 1406 12
+ 3 #59 53100000 204935 99.61405 6251 33 290 0 53156 1460 12
+
+SEE ALSO
+========
+
+**rtla-osnoise**\(1), **rtla-osnoise-hist**\(1)
+
+Osnoise tracer documentation: <https://www.kernel.org/doc/html/latest/trace/osnoise-tracer.html>
+
+AUTHOR
+======
+Written by Daniel Bristot de Oliveira <bristot@kernel.org>
+
+.. include:: common_appendix.rst
diff --git a/Documentation/tools/rtla/rtla-osnoise.rst b/Documentation/tools/rtla/rtla-osnoise.rst
new file mode 100644
index 000000000000..c129b206ce34
--- /dev/null
+++ b/Documentation/tools/rtla/rtla-osnoise.rst
@@ -0,0 +1,59 @@
+===============
+rtla-osnoise
+===============
+------------------------------------------------------------------
+Measure the operating system noise
+------------------------------------------------------------------
+
+:Manual section: 1
+
+SYNOPSIS
+========
+**rtla osnoise** [*MODE*] ...
+
+DESCRIPTION
+===========
+
+.. include:: common_osnoise_description.rst
+
+The *osnoise* tracer outputs information in two ways. It periodically prints
+a summary of the noise of the operating system, including the counters of
+the occurrence of the source of interference. It also provides information
+for each noise via the **osnoise:** tracepoints. The **rtla osnoise top**
+mode displays information about the periodic summary from the *osnoise* tracer.
+The **rtla osnoise hist** mode displays information about the noise using
+the **osnoise:** tracepoints. For further details, please refer to the
+respective man page.
+
+MODES
+=====
+**top**
+
+ Prints the summary from osnoise tracer.
+
+**hist**
+
+ Prints a histogram of osnoise samples.
+
+If no MODE is given, the top mode is called, passing the arguments.
+
+OPTIONS
+=======
+
+**-h**, **--help**
+
+ Display the help text.
+
+For other options, see the man page for the corresponding mode.
+
+SEE ALSO
+========
+**rtla-osnoise-top**\(1), **rtla-osnoise-hist**\(1)
+
+Osnoise tracer documentation: <https://www.kernel.org/doc/html/latest/trace/osnoise-tracer.html>
+
+AUTHOR
+======
+Written by Daniel Bristot de Oliveira <bristot@kernel.org>
+
+.. include:: common_appendix.rst
diff --git a/Documentation/tools/rtla/rtla-timerlat-hist.rst b/Documentation/tools/rtla/rtla-timerlat-hist.rst
new file mode 100644
index 000000000000..e12eae1f3301
--- /dev/null
+++ b/Documentation/tools/rtla/rtla-timerlat-hist.rst
@@ -0,0 +1,106 @@
+=====================
+rtla-timerlat-hist
+=====================
+------------------------------------------------
+Histograms of the operating system timer latency
+------------------------------------------------
+
+:Manual section: 1
+
+SYNOPSIS
+========
+**rtla timerlat hist** [*OPTIONS*] ...
+
+DESCRIPTION
+===========
+
+.. include:: common_timerlat_description.rst
+
+The **rtla timerlat hist** displays a histogram of each tracer event
+occurrence. This tool uses the periodic information, and the
+**osnoise:** tracepoints are enabled when using the **-T** option.
+
+OPTIONS
+=======
+
+.. include:: common_timerlat_options.rst
+
+.. include:: common_hist_options.rst
+
+.. include:: common_options.rst
+
+EXAMPLE
+=======
+In the example below, **rtla timerlat hist** is set to run for *10* minutes,
+in the cpus *0-4*, *skipping zero* only lines. Moreover, **rtla timerlat
+hist** will change the priority of the *timelat* threads to run under
+*SCHED_DEADLINE* priority, with a *10us* runtime every *1ms* period. The
+*1ms* period is also passed to the *timerlat* tracer::
+
+ [root@alien ~]# timerlat hist -d 10m -c 0-4 -P d:100us:1ms -p 1ms
+ # RTLA timerlat histogram
+ # Time unit is microseconds (us)
+ # Duration: 0 00:10:00
+ Index IRQ-000 Thr-000 IRQ-001 Thr-001 IRQ-002 Thr-002 IRQ-003 Thr-003 IRQ-004 Thr-004
+ 0 276489 0 206089 0 466018 0 481102 0 205546 0
+ 1 318327 35487 388149 30024 94531 48382 83082 71078 388026 55730
+ 2 3282 122584 4019 126527 28231 109012 23311 89309 4568 98739
+ 3 940 11815 837 9863 6209 16227 6895 17196 910 9780
+ 4 444 17287 424 11574 2097 38443 2169 36736 462 13476
+ 5 206 43291 255 25581 1223 101908 1304 101137 236 28913
+ 6 132 101501 96 64584 635 213774 757 215471 99 73453
+ 7 74 169347 65 124758 350 57466 441 53639 69 148573
+ 8 53 85183 31 156751 229 9052 306 9026 39 139907
+ 9 22 10387 12 42762 161 2554 225 2689 19 26192
+ 10 13 1898 8 5770 114 1247 128 1405 13 3772
+ 11 9 560 9 924 71 686 76 765 8 713
+ 12 4 256 2 360 50 411 64 474 3 278
+ 13 2 167 2 172 43 256 53 350 4 180
+ 14 1 88 1 116 15 198 42 223 0 115
+ 15 2 63 3 94 11 139 20 150 0 58
+ 16 2 37 0 56 5 78 10 102 0 39
+ 17 0 18 0 28 4 57 8 80 0 15
+ 18 0 8 0 17 2 50 6 56 0 12
+ 19 0 9 0 5 0 19 0 48 0 18
+ 20 0 4 0 8 0 11 2 27 0 4
+ 21 0 2 0 3 1 9 1 18 0 6
+ 22 0 1 0 3 1 7 0 3 0 5
+ 23 0 2 0 4 0 2 0 7 0 2
+ 24 0 2 0 2 1 3 0 3 0 5
+ 25 0 0 0 1 0 1 0 1 0 3
+ 26 0 1 0 0 0 2 0 2 0 0
+ 27 0 0 0 3 0 1 0 0 0 1
+ 28 0 0 0 3 0 0 0 1 0 0
+ 29 0 0 0 2 0 2 0 1 0 3
+ 30 0 1 0 0 0 0 0 0 0 0
+ 31 0 1 0 0 0 0 0 2 0 2
+ 32 0 0 0 1 0 2 0 0 0 0
+ 33 0 0 0 2 0 0 0 0 0 1
+ 34 0 0 0 0 0 0 0 0 0 2
+ 35 0 1 0 1 0 0 0 0 0 1
+ 36 0 1 0 0 0 1 0 1 0 0
+ 37 0 0 0 1 0 0 0 0 0 0
+ 40 0 0 0 0 0 1 0 1 0 0
+ 41 0 0 0 0 0 0 0 0 0 1
+ 42 0 0 0 0 0 0 0 0 0 1
+ 44 0 0 0 0 0 1 0 0 0 0
+ 46 0 0 0 0 0 0 0 1 0 0
+ 47 0 0 0 0 0 0 0 0 0 1
+ 50 0 0 0 0 0 0 0 0 0 1
+ 54 0 0 0 1 0 0 0 0 0 0
+ 58 0 0 0 1 0 0 0 0 0 0
+ over: 0 0 0 0 0 0 0 0 0 0
+ count: 600002 600002 600002 600002 600002 600002 600002 600002 600002 600002
+ min: 0 1 0 1 0 1 0 1 0 1
+ avg: 0 5 0 5 0 4 0 4 0 5
+ max: 16 36 15 58 24 44 21 46 13 50
+
+SEE ALSO
+========
+**rtla-timerlat**\(1), **rtla-timerlat-top**\(1)
+
+*timerlat* tracer documentation: <https://www.kernel.org/doc/html/latest/trace/timerlat-tracer.html>
+
+AUTHOR
+======
+Written by Daniel Bristot de Oliveira <bristot@kernel.org>
diff --git a/Documentation/tools/rtla/rtla-timerlat-top.rst b/Documentation/tools/rtla/rtla-timerlat-top.rst
new file mode 100644
index 000000000000..1c321de1c171
--- /dev/null
+++ b/Documentation/tools/rtla/rtla-timerlat-top.rst
@@ -0,0 +1,145 @@
+====================
+rtla-timerlat-top
+====================
+-------------------------------------------
+Measures the operating system timer latency
+-------------------------------------------
+
+:Manual section: 1
+
+SYNOPSIS
+========
+**rtla timerlat top** [*OPTIONS*] ...
+
+DESCRIPTION
+===========
+
+.. include:: common_timerlat_description.rst
+
+The **rtla timerlat top** displays a summary of the periodic output
+from the *timerlat* tracer. It also provides information for each
+operating system noise via the **osnoise:** tracepoints that can be
+seem with the option **-T**.
+
+OPTIONS
+=======
+
+.. include:: common_timerlat_options.rst
+
+.. include:: common_top_options.rst
+
+.. include:: common_options.rst
+
+EXAMPLE
+=======
+
+In the example below, the *timerlat* tracer is set to capture the stack trace at
+the IRQ handler, printing it to the buffer if the *Thread* timer latency is
+higher than *30 us*. It is also set to stop the session if a *Thread* timer
+latency higher than *30 us* is hit. Finally, it is set to save the trace
+buffer if the stop condition is hit::
+
+ [root@alien ~]# rtla timerlat top -s 30 -t 30 -T
+ Timer Latency
+ 0 00:00:59 | IRQ Timer Latency (us) | Thread Timer Latency (us)
+ CPU COUNT | cur min avg max | cur min avg max
+ 0 #58634 | 1 0 1 10 | 11 2 10 23
+ 1 #58634 | 1 0 1 9 | 12 2 9 23
+ 2 #58634 | 0 0 1 11 | 10 2 9 23
+ 3 #58634 | 1 0 1 11 | 11 2 9 24
+ 4 #58634 | 1 0 1 10 | 11 2 9 26
+ 5 #58634 | 1 0 1 8 | 10 2 9 25
+ 6 #58634 | 12 0 1 12 | 30 2 10 30 <--- CPU with spike
+ 7 #58634 | 1 0 1 9 | 11 2 9 23
+ 8 #58633 | 1 0 1 9 | 11 2 9 26
+ 9 #58633 | 1 0 1 9 | 10 2 9 26
+ 10 #58633 | 1 0 1 13 | 11 2 9 28
+ 11 #58633 | 1 0 1 13 | 12 2 9 24
+ 12 #58633 | 1 0 1 8 | 10 2 9 23
+ 13 #58633 | 1 0 1 10 | 10 2 9 22
+ 14 #58633 | 1 0 1 18 | 12 2 9 27
+ 15 #58633 | 1 0 1 10 | 11 2 9 28
+ 16 #58633 | 0 0 1 11 | 7 2 9 26
+ 17 #58633 | 1 0 1 13 | 10 2 9 24
+ 18 #58633 | 1 0 1 9 | 13 2 9 22
+ 19 #58633 | 1 0 1 10 | 11 2 9 23
+ 20 #58633 | 1 0 1 12 | 11 2 9 28
+ 21 #58633 | 1 0 1 14 | 11 2 9 24
+ 22 #58633 | 1 0 1 8 | 11 2 9 22
+ 23 #58633 | 1 0 1 10 | 11 2 9 27
+ timerlat hit stop tracing
+ saving trace to timerlat_trace.txt
+ [root@alien bristot]# tail -60 timerlat_trace.txt
+ [...]
+ timerlat/5-79755 [005] ....... 426.271226: #58634 context thread timer_latency 10823 ns
+ sh-109404 [006] dnLh213 426.271247: #58634 context irq timer_latency 12505 ns
+ sh-109404 [006] dNLh313 426.271258: irq_noise: local_timer:236 start 426.271245463 duration 12553 ns
+ sh-109404 [006] d...313 426.271263: thread_noise: sh:109404 start 426.271245853 duration 4769 ns
+ timerlat/6-79756 [006] ....... 426.271264: #58634 context thread timer_latency 30328 ns
+ timerlat/6-79756 [006] ....1.. 426.271265: <stack trace>
+ => timerlat_irq
+ => __hrtimer_run_queues
+ => hrtimer_interrupt
+ => __sysvec_apic_timer_interrupt
+ => sysvec_apic_timer_interrupt
+ => asm_sysvec_apic_timer_interrupt
+ => _raw_spin_unlock_irqrestore <---- spinlock that disabled interrupt.
+ => try_to_wake_up
+ => autoremove_wake_function
+ => __wake_up_common
+ => __wake_up_common_lock
+ => ep_poll_callback
+ => __wake_up_common
+ => __wake_up_common_lock
+ => fsnotify_add_event
+ => inotify_handle_inode_event
+ => fsnotify
+ => __fsnotify_parent
+ => __fput
+ => task_work_run
+ => exit_to_user_mode_prepare
+ => syscall_exit_to_user_mode
+ => do_syscall_64
+ => entry_SYSCALL_64_after_hwframe
+ => 0x7265000001378c
+ => 0x10000cea7
+ => 0x25a00000204a
+ => 0x12e302d00000000
+ => 0x19b51010901b6
+ => 0x283ce00726500
+ => 0x61ea308872
+ => 0x00000fe3
+ bash-109109 [007] d..h... 426.271265: #58634 context irq timer_latency 1211 ns
+ timerlat/6-79756 [006] ....... 426.271267: timerlat_main: stop tracing hit on cpu 6
+
+In the trace, it is possible the notice that the *IRQ* timer latency was
+already high, accounting *12505 ns*. The IRQ delay was caused by the
+*bash-109109* process that disabled IRQs in the wake-up path
+(*_try_to_wake_up()* function). The duration of the IRQ handler that woke
+up the timerlat thread, informed with the **osnoise:irq_noise** event, was
+also high and added more *12553 ns* to the Thread latency. Finally, the
+**osnoise:thread_noise** added by the currently running thread (including
+the scheduling overhead) added more *4769 ns*. Summing up these values,
+the *Thread* timer latency accounted for *30328 ns*.
+
+The primary reason for this high value is the wake-up path that was hit
+twice during this case: when the *bash-109109* was waking up a thread
+and then when the *timerlat* thread was awakened. This information can
+then be used as the starting point of a more fine-grained analysis.
+
+Note that **rtla timerlat** was dispatched without changing *timerlat* tracer
+threads' priority. That is generally not needed because these threads hava
+priority *FIFO:95* by default, which is a common priority used by real-time
+kernel developers to analyze scheduling delays.
+
+SEE ALSO
+--------
+**rtla-timerlat**\(1), **rtla-timerlat-hist**\(1)
+
+*timerlat* tracer documentation: <https://www.kernel.org/doc/html/latest/trace/timerlat-tracer.html>
+
+AUTHOR
+------
+Written by Daniel Bristot de Oliveira <bristot@kernel.org>
+
+.. include:: common_appendix.rst
diff --git a/Documentation/tools/rtla/rtla-timerlat.rst b/Documentation/tools/rtla/rtla-timerlat.rst
new file mode 100644
index 000000000000..44a49e6f302b
--- /dev/null
+++ b/Documentation/tools/rtla/rtla-timerlat.rst
@@ -0,0 +1,57 @@
+================
+rtla-timerlat
+================
+-------------------------------------------
+Measures the operating system timer latency
+-------------------------------------------
+
+:Manual section: 1
+
+SYNOPSIS
+========
+**rtla timerlat** [*MODE*] ...
+
+DESCRIPTION
+===========
+
+.. include:: common_timerlat_description.rst
+
+The *timerlat* tracer outputs information in two ways. It periodically
+prints the timer latency at the timer *IRQ* handler and the *Thread* handler.
+It also provides information for each noise via the **osnoise:** tracepoints.
+The **rtla timerlat top** mode displays a summary of the periodic output
+from the *timerlat* tracer. The **rtla hist hist** mode displays a histogram
+of each tracer event occurrence. For further details, please refer to the
+respective man page.
+
+MODES
+=====
+**top**
+
+ Prints the summary from *timerlat* tracer.
+
+**hist**
+
+ Prints a histogram of timerlat samples.
+
+If no *MODE* is given, the top mode is called, passing the arguments.
+
+OPTIONS
+=======
+**-h**, **--help**
+
+ Display the help text.
+
+For other options, see the man page for the corresponding mode.
+
+SEE ALSO
+========
+**rtla-timerlat-top**\(1), **rtla-timerlat-hist**\(1)
+
+*timerlat* tracer documentation: <https://www.kernel.org/doc/html/latest/trace/timerlat-tracer.html>
+
+AUTHOR
+======
+Written by Daniel Bristot de Oliveira <bristot@kernel.org>
+
+.. include:: common_appendix.rst
diff --git a/Documentation/tools/rtla/rtla.rst b/Documentation/tools/rtla/rtla.rst
new file mode 100644
index 000000000000..fc0d233efcd5
--- /dev/null
+++ b/Documentation/tools/rtla/rtla.rst
@@ -0,0 +1,48 @@
+=========
+rtla
+=========
+--------------------------------
+Real-time Linux Analysis tool
+--------------------------------
+
+:Manual section: 1
+
+SYNOPSIS
+========
+**rtla** *COMMAND* [*OPTIONS*]
+
+DESCRIPTION
+===========
+The **rtla** is a meta-tool that includes a set of commands that aims to
+analyze the real-time properties of Linux. But instead of testing Linux
+as a black box, **rtla** leverages kernel tracing capabilities to provide
+precise information about the properties and root causes of unexpected
+results.
+
+COMMANDS
+========
+**osnoise**
+
+ Gives information about the operating system noise (osnoise).
+
+**timerlat**
+
+ Measures the IRQ and thread timer latency.
+
+OPTIONS
+=======
+**-h**, **--help**
+
+ Display the help text.
+
+For other options, see the man page for the corresponding command.
+
+SEE ALSO
+========
+**rtla-osnoise**\(1), **rtla-timerlat**\(1)
+
+AUTHOR
+======
+Daniel Bristot de Oliveira <bristot@kernel.org>
+
+.. include:: common_appendix.rst
diff --git a/Documentation/trace/coresight/coresight-config.rst b/Documentation/trace/coresight/coresight-config.rst
index a4e3ef295240..6d5ffa6f7347 100644
--- a/Documentation/trace/coresight/coresight-config.rst
+++ b/Documentation/trace/coresight/coresight-config.rst
@@ -155,14 +155,14 @@ follows::
autofdo
$ cd autofdo/
$ ls
- description preset1 preset3 preset5 preset7 preset9
- feature_refs preset2 preset4 preset6 preset8
+ description feature_refs preset1 preset3 preset5 preset7 preset9
+ enable preset preset2 preset4 preset6 preset8
$ cat description
Setup ETMs with strobing for autofdo
$ cat feature_refs
strobing
-Each preset declared has a preset<n> subdirectory declared. The values for
+Each preset declared has a 'preset<n>' subdirectory declared. The values for
the preset can be examined::
$ cat preset1/values
@@ -170,6 +170,9 @@ the preset can be examined::
$ cat preset2/values
strobing.window = 0x1388 strobing.period = 0x4
+The 'enable' and 'preset' files allow the control of a configuration when
+using CoreSight with sysfs.
+
The features referenced by the configuration can be examined in the features
directory::
@@ -211,19 +214,13 @@ also declared in the perf 'cs_etm' event infrastructure so that they can
be selected when running trace under perf::
$ ls /sys/devices/cs_etm
- configurations format perf_event_mux_interval_ms sinks type
- events nr_addr_filters power
-
-Key directories here are 'configurations' - which lists the loaded
-configurations, and 'events' - a generic perf directory which allows
-selection on the perf command line.::
+ cpu0 cpu2 events nr_addr_filters power subsystem uevent
+ cpu1 cpu3 format perf_event_mux_interval_ms sinks type
- $ ls configurations/
- autofdo
- $ cat configurations/autofdo
- 0xa7c3dddd
+The key directory here is 'events' - a generic perf directory which allows
+selection on the perf command line. As with the sinks entries, this provides
+a hash of the configuration name.
-As with the sinks entries, this provides a hash of the configuration name.
The entry in the 'events' directory uses perfs built in syntax generator
to substitute the syntax for the name when evaluating the command::
@@ -242,3 +239,56 @@ A preset to override the current parameter values can also be selected::
When configurations are selected in this way, then the trace sink used is
automatically selected.
+
+Using Configurations in sysfs
+=============================
+
+Coresight can be controlled using sysfs. When this is in use then a configuration
+can be made active for the devices that are used in the sysfs session.
+
+In a configuration there are 'enable' and 'preset' files.
+
+To enable a configuration for use with sysfs::
+
+ $ cd configurations/autofdo
+ $ echo 1 > enable
+
+This will then use any default parameter values in the features - which can be
+adjusted as described above.
+
+To use a preset<n> set of parameter values::
+
+ $ echo 3 > preset
+
+This will select preset3 for the configuration.
+The valid values for preset are 0 - to deselect presets, and any value of
+<n> where a preset<n> sub-directory is present.
+
+Note that the active sysfs configuration is a global parameter, therefore
+only a single configuration can be active for sysfs at any one time.
+Attempting to enable a second configuration will result in an error.
+Additionally, attempting to disable the configuration while in use will
+also result in an error.
+
+The use of the active configuration by sysfs is independent of the configuration
+used in perf.
+
+
+Creating and Loading Custom Configurations
+==========================================
+
+Custom configurations and / or features can be dynamically loaded into the
+system by using a loadable module.
+
+An example of a custom configuration is found in ./samples/coresight.
+
+This creates a new configuration that uses the existing built in
+strobing feature, but provides a different set of presets.
+
+When the module is loaded, then the configuration appears in the configfs
+file system and is selectable in the same way as the built in configuration
+described above.
+
+Configurations can use previously loaded features. The system will ensure
+that it is not possible to unload a feature that is currently in use, by
+enforcing the unload order as the strict reverse of the load order.
diff --git a/Documentation/trace/events.rst b/Documentation/trace/events.rst
index 8ddb9b09451c..c47f381d0c00 100644
--- a/Documentation/trace/events.rst
+++ b/Documentation/trace/events.rst
@@ -198,6 +198,15 @@ The glob (~) accepts a wild card character (\*,?) and character classes
prev_comm ~ "*sh*"
prev_comm ~ "ba*sh"
+If the field is a pointer that points into user space (for example
+"filename" from sys_enter_openat), then you have to append ".ustring" to the
+field name::
+
+ filename.ustring ~ "password"
+
+As the kernel will have to know how to retrieve the memory that the pointer
+is at from user space.
+
5.2 Setting filters
-------------------
@@ -230,6 +239,16 @@ Currently the caret ('^') for an error always appears at the beginning of
the filter string; the error message should still be useful though
even without more accurate position info.
+5.2.1 Filter limitations
+------------------------
+
+If a filter is placed on a string pointer ``(char *)`` that does not point
+to a string on the ring buffer, but instead points to kernel or user space
+memory, then, for safety reasons, at most 1024 bytes of the content is
+copied onto a temporary buffer to do the compare. If the copy of the memory
+faults (the pointer points to memory that should not be accessed), then the
+string compare will be treated as not matching.
+
5.3 Clearing filters
--------------------
diff --git a/Documentation/trace/ftrace.rst b/Documentation/trace/ftrace.rst
index b3166c4a7867..45b8c56af67a 100644
--- a/Documentation/trace/ftrace.rst
+++ b/Documentation/trace/ftrace.rst
@@ -3370,7 +3370,7 @@ one of the latency tracers, you will get the following results.
Instances
---------
-In the tracefs tracing directory is a directory called "instances".
+In the tracefs tracing directory, there is a directory called "instances".
This directory can have new directories created inside of it using
mkdir, and removing directories with rmdir. The directory created
with mkdir in this directory will already contain files and other
diff --git a/Documentation/translations/zh_CN/core-api/kernel-api.rst b/Documentation/translations/zh_CN/core-api/kernel-api.rst
index ab7d81889340..e45fe80d1cd8 100644
--- a/Documentation/translations/zh_CN/core-api/kernel-api.rst
+++ b/Documentation/translations/zh_CN/core-api/kernel-api.rst
@@ -292,8 +292,6 @@ block/blk-sysfs.c
block/blk-settings.c
-block/blk-exec.c
-
block/blk-flush.c
block/blk-lib.c
diff --git a/Documentation/translations/zh_CN/core-api/kobject.rst b/Documentation/translations/zh_CN/core-api/kobject.rst
index b7c37794cc7f..0747b472fdea 100644
--- a/Documentation/translations/zh_CN/core-api/kobject.rst
+++ b/Documentation/translations/zh_CN/core-api/kobject.rst
@@ -258,7 +258,6 @@ kobject_put()以避免错误的发生是一个很好的做法。
struct kobj_type {
void (*release)(struct kobject *kobj);
const struct sysfs_ops *sysfs_ops;
- struct attribute **default_attrs;
const struct attribute_group **default_groups;
const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj);
const void *(*namespace)(struct kobject *kobj);
@@ -271,10 +270,10 @@ kobject_init()或kobject_init_and_add()时必须指定一个指向该结构的
指针。
当然,kobj_type结构中的release字段是指向这种类型的kobject的release()
-方法的一个指针。另外两个字段(sysfs_ops 和 default_attrs)控制这种
+方法的一个指针。另外两个字段(sysfs_ops 和 default_groups)控制这种
类型的对象如何在 sysfs 中被表示;它们超出了本文的范围。
-default_attrs 指针是一个默认属性的列表,它将为任何用这个 ktype 注册
+default_groups 指针是一个默认属性的列表,它将为任何用这个 ktype 注册
的 kobject 自动创建。
@@ -325,10 +324,9 @@ ksets
结构体kset_uevent_ops来处理它::
struct kset_uevent_ops {
- int (* const filter)(struct kset *kset, struct kobject *kobj);
- const char *(* const name)(struct kset *kset, struct kobject *kobj);
- int (* const uevent)(struct kset *kset, struct kobject *kobj,
- struct kobj_uevent_env *env);
+ int (* const filter)(struct kobject *kobj);
+ const char *(* const name)(struct kobject *kobj);
+ int (* const uevent)(struct kobject *kobj, struct kobj_uevent_env *env);
};
diff --git a/Documentation/tty/index.rst b/Documentation/tty/index.rst
new file mode 100644
index 000000000000..21ea0cb21e55
--- /dev/null
+++ b/Documentation/tty/index.rst
@@ -0,0 +1,63 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+===
+TTY
+===
+
+Teletypewriter (TTY) layer takes care of all those serial devices. Including
+the virtual ones like pseudoterminal (PTY).
+
+TTY structures
+==============
+
+There are several major TTY structures. Every TTY device in a system has a
+corresponding struct tty_port. These devices are maintained by a TTY driver
+which is struct tty_driver. This structure describes the driver but also
+contains a reference to operations which could be performed on the TTYs. It is
+struct tty_operations. Then, upon open, a struct tty_struct is allocated and
+lives until the final close. During this time, several callbacks from struct
+tty_operations are invoked by the TTY layer.
+
+Every character received by the kernel (both from devices and users) is passed
+through a preselected :doc:`tty_ldisc` (in
+short ldisc; in C, struct tty_ldisc_ops). Its task is to transform characters
+as defined by a particular ldisc or by user too. The default one is n_tty,
+implementing echoes, signal handling, jobs control, special characters
+processing, and more. The transformed characters are passed further to
+user/device, depending on the source.
+
+In-detail description of the named TTY structures is in separate documents:
+
+.. toctree::
+ :maxdepth: 2
+
+ tty_driver
+ tty_port
+ tty_struct
+ tty_ldisc
+ tty_buffer
+ n_tty
+ tty_internals
+
+Writing TTY Driver
+==================
+
+Before one starts writing a TTY driver, they must consider
+:doc:`Serial <../driver-api/serial/driver>` and :doc:`USB Serial
+<../usb/usb-serial>` layers
+first. Drivers for serial devices can often use one of these specific layers to
+implement a serial driver. Only special devices should be handled directly by
+the TTY Layer. If you are about to write such a driver, read on.
+
+A *typical* sequence a TTY driver performs is as follows:
+
+#. Allocate and register a TTY driver (module init)
+#. Create and register TTY devices as they are probed (probe function)
+#. Handle TTY operations and events like interrupts (TTY core invokes the
+ former, the device the latter)
+#. Remove devices as they are going away (remove function)
+#. Unregister and free the TTY driver (module exit)
+
+Steps regarding driver, i.e. 1., 3., and 5. are described in detail in
+:doc:`tty_driver`. For the other two (devices handling), look into
+:doc:`tty_port`.
diff --git a/Documentation/tty/n_tty.rst b/Documentation/tty/n_tty.rst
new file mode 100644
index 000000000000..15b70faee72d
--- /dev/null
+++ b/Documentation/tty/n_tty.rst
@@ -0,0 +1,22 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=====
+N_TTY
+=====
+
+.. contents:: :local:
+
+The default (and fallback) :doc:`TTY line discipline <tty_ldisc>`. It tries to
+handle characters as per POSIX.
+
+External Functions
+==================
+
+.. kernel-doc:: drivers/tty/n_tty.c
+ :export:
+
+Internal Functions
+==================
+
+.. kernel-doc:: drivers/tty/n_tty.c
+ :internal:
diff --git a/Documentation/tty/tty_buffer.rst b/Documentation/tty/tty_buffer.rst
new file mode 100644
index 000000000000..a39d4781e0d2
--- /dev/null
+++ b/Documentation/tty/tty_buffer.rst
@@ -0,0 +1,46 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+==========
+TTY Buffer
+==========
+
+.. contents:: :local:
+
+Here, we document functions for taking care of tty buffer and their flipping.
+Drivers are supposed to fill the buffer by one of those functions below and
+then flip the buffer, so that the data are passed to :doc:`line discipline
+<tty_ldisc>` for further processing.
+
+Flip Buffer Management
+======================
+
+.. kernel-doc:: drivers/tty/tty_buffer.c
+ :identifiers: tty_prepare_flip_string tty_insert_flip_string_fixed_flag
+ tty_insert_flip_string_flags __tty_insert_flip_char
+ tty_flip_buffer_push tty_ldisc_receive_buf
+
+----
+
+Other Functions
+===============
+
+.. kernel-doc:: drivers/tty/tty_buffer.c
+ :identifiers: tty_buffer_space_avail tty_buffer_set_limit
+
+----
+
+Buffer Locking
+==============
+
+These are used only in special circumstances. Avoid them.
+
+.. kernel-doc:: drivers/tty/tty_buffer.c
+ :identifiers: tty_buffer_lock_exclusive tty_buffer_unlock_exclusive
+
+----
+
+Internal Functions
+==================
+
+.. kernel-doc:: drivers/tty/tty_buffer.c
+ :internal:
diff --git a/Documentation/tty/tty_driver.rst b/Documentation/tty/tty_driver.rst
new file mode 100644
index 000000000000..cc529f863406
--- /dev/null
+++ b/Documentation/tty/tty_driver.rst
@@ -0,0 +1,128 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=============================
+TTY Driver and TTY Operations
+=============================
+
+.. contents:: :local:
+
+Allocation
+==========
+
+The first thing a driver needs to do is to allocate a struct tty_driver. This
+is done by tty_alloc_driver() (or __tty_alloc_driver()). Next, the newly
+allocated structure is filled with information. See `TTY Driver Reference`_ at
+the end of this document on what actually shall be filled in.
+
+The allocation routines expect a number of devices the driver can handle at
+most and flags. Flags are those starting ``TTY_DRIVER_`` listed and described
+in `TTY Driver Flags`_ below.
+
+When the driver is about to be freed, tty_driver_kref_put() is called on that.
+It will decrements the reference count and if it reaches zero, the driver is
+freed.
+
+For reference, both allocation and deallocation functions are explained here in
+detail:
+
+.. kernel-doc:: drivers/tty/tty_io.c
+ :identifiers: __tty_alloc_driver tty_driver_kref_put
+
+TTY Driver Flags
+----------------
+
+Here comes the documentation of flags accepted by tty_alloc_driver() (or
+__tty_alloc_driver()):
+
+.. kernel-doc:: include/linux/tty_driver.h
+ :doc: TTY Driver Flags
+
+----
+
+Registration
+============
+
+When a struct tty_driver is allocated and filled in, it can be registered using
+tty_register_driver(). It is recommended to pass ``TTY_DRIVER_DYNAMIC_DEV`` in
+flags of tty_alloc_driver(). If it is not passed, *all* devices are also
+registered during tty_register_driver() and the following paragraph of
+registering devices can be skipped for such drivers. However, the struct
+tty_port part in `Registering Devices`_ is still relevant there.
+
+.. kernel-doc:: drivers/tty/tty_io.c
+ :identifiers: tty_register_driver tty_unregister_driver
+
+Registering Devices
+-------------------
+
+Every TTY device shall be backed by a struct tty_port. Usually, TTY drivers
+embed tty_port into device's private structures. Further details about handling
+tty_port can be found in :doc:`tty_port`. The driver is also recommended to use
+tty_port's reference counting by tty_port_get() and tty_port_put(). The final
+put is supposed to free the tty_port including the device's private struct.
+
+Unless ``TTY_DRIVER_DYNAMIC_DEV`` was passed as flags to tty_alloc_driver(),
+TTY driver is supposed to register every device discovered in the system
+(the latter is preferred). This is performed by tty_register_device(). Or by
+tty_register_device_attr() if the driver wants to expose some information
+through struct attribute_group. Both of them register ``index``'th device and
+upon return, the device can be opened. There are also preferred tty_port
+variants described in `Linking Devices to Ports`_ later. It is up to driver to
+manage free indices and choosing the right one. The TTY layer only refuses to
+register more devices than passed to tty_alloc_driver().
+
+When the device is opened, the TTY layer allocates struct tty_struct and starts
+calling operations from :c:member:`tty_driver.ops`, see `TTY Operations
+Reference`_.
+
+The registration routines are documented as follows:
+
+.. kernel-doc:: drivers/tty/tty_io.c
+ :identifiers: tty_register_device tty_register_device_attr
+ tty_unregister_device
+
+----
+
+Linking Devices to Ports
+------------------------
+As stated earlier, every TTY device shall have a struct tty_port assigned to
+it. It must be known to the TTY layer at :c:member:`tty_driver.ops.install()`
+at latest. There are few helpers to *link* the two. Ideally, the driver uses
+tty_port_register_device() or tty_port_register_device_attr() instead of
+tty_register_device() and tty_register_device_attr() at the registration time.
+This way, the driver needs not care about linking later on.
+
+If that is not possible, the driver still can link the tty_port to a specific
+index *before* the actual registration by tty_port_link_device(). If it still
+does not fit, tty_port_install() can be used from the
+:c:member:`tty_driver.ops.install` hook as a last resort. The last one is
+dedicated mostly for in-memory devices like PTY where tty_ports are allocated
+on demand.
+
+The linking routines are documented here:
+
+.. kernel-doc:: drivers/tty/tty_port.c
+ :identifiers: tty_port_link_device tty_port_register_device
+ tty_port_register_device_attr
+
+----
+
+TTY Driver Reference
+====================
+
+All members of struct tty_driver are documented here. The required members are
+noted at the end. struct tty_operations are documented next.
+
+.. kernel-doc:: include/linux/tty_driver.h
+ :identifiers: tty_driver
+
+----
+
+TTY Operations Reference
+========================
+
+When a TTY is registered, these driver hooks can be invoked by the TTY layer:
+
+.. kernel-doc:: include/linux/tty_driver.h
+ :identifiers: tty_operations
+
diff --git a/Documentation/tty/tty_internals.rst b/Documentation/tty/tty_internals.rst
new file mode 100644
index 000000000000..d0d415820300
--- /dev/null
+++ b/Documentation/tty/tty_internals.rst
@@ -0,0 +1,31 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=============
+TTY Internals
+=============
+
+.. contents:: :local:
+
+Kopen
+=====
+
+These functions serve for opening a TTY from the kernelspace:
+
+.. kernel-doc:: drivers/tty/tty_io.c
+ :identifiers: tty_kopen_exclusive tty_kopen_shared tty_kclose
+
+----
+
+Exported Internal Functions
+===========================
+
+.. kernel-doc:: drivers/tty/tty_io.c
+ :identifiers: tty_release_struct tty_dev_name_to_number tty_get_icount
+
+----
+
+Internal Functions
+==================
+
+.. kernel-doc:: drivers/tty/tty_io.c
+ :internal:
diff --git a/Documentation/tty/tty_ldisc.rst b/Documentation/tty/tty_ldisc.rst
new file mode 100644
index 000000000000..5144751be804
--- /dev/null
+++ b/Documentation/tty/tty_ldisc.rst
@@ -0,0 +1,85 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+===================
+TTY Line Discipline
+===================
+
+.. contents:: :local:
+
+TTY line discipline process all incoming and outgoing character from/to a tty
+device. The default line discipline is :doc:`N_TTY <n_tty>`. It is also a
+fallback if establishing any other discipline for a tty fails. If even N_TTY
+fails, N_NULL takes over. That never fails, but also does not process any
+characters -- it throws them away.
+
+Registration
+============
+
+Line disciplines are registered with tty_register_ldisc() passing the ldisc
+structure. At the point of registration the discipline must be ready to use and
+it is possible it will get used before the call returns success. If the call
+returns an error then it won’t get called. Do not re-use ldisc numbers as they
+are part of the userspace ABI and writing over an existing ldisc will cause
+demons to eat your computer. You must not re-register over the top of the line
+discipline even with the same data or your computer again will be eaten by
+demons. In order to remove a line discipline call tty_unregister_ldisc().
+
+Heed this warning: the reference count field of the registered copies of the
+tty_ldisc structure in the ldisc table counts the number of lines using this
+discipline. The reference count of the tty_ldisc structure within a tty counts
+the number of active users of the ldisc at this instant. In effect it counts
+the number of threads of execution within an ldisc method (plus those about to
+enter and exit although this detail matters not).
+
+.. kernel-doc:: drivers/tty/tty_ldisc.c
+ :identifiers: tty_register_ldisc tty_unregister_ldisc
+
+Other Functions
+===============
+
+.. kernel-doc:: drivers/tty/tty_ldisc.c
+ :identifiers: tty_set_ldisc tty_ldisc_flush
+
+Line Discipline Operations Reference
+====================================
+
+.. kernel-doc:: include/linux/tty_ldisc.h
+ :identifiers: tty_ldisc_ops
+
+Driver Access
+=============
+
+Line discipline methods can call the methods of the underlying hardware driver.
+These are documented as a part of struct tty_operations.
+
+TTY Flags
+=========
+
+Line discipline methods have access to :c:member:`tty_struct.flags` field. See
+:doc:`tty_struct`.
+
+Locking
+=======
+
+Callers to the line discipline functions from the tty layer are required to
+take line discipline locks. The same is true of calls from the driver side
+but not yet enforced.
+
+.. kernel-doc:: drivers/tty/tty_ldisc.c
+ :identifiers: tty_ldisc_ref_wait tty_ldisc_ref tty_ldisc_deref
+
+While these functions are slightly slower than the old code they should have
+minimal impact as most receive logic uses the flip buffers and they only
+need to take a reference when they push bits up through the driver.
+
+A caution: The :c:member:`tty_ldisc_ops.open()`,
+:c:member:`tty_ldisc_ops.close()` and :c:member:`tty_driver.set_ldisc()`
+functions are called with the ldisc unavailable. Thus tty_ldisc_ref() will fail
+in this situation if used within these functions. Ldisc and driver code
+calling its own functions must be careful in this case.
+
+Internal Functions
+==================
+
+.. kernel-doc:: drivers/tty/tty_ldisc.c
+ :internal:
diff --git a/Documentation/tty/tty_port.rst b/Documentation/tty/tty_port.rst
new file mode 100644
index 000000000000..5cb90e954fcf
--- /dev/null
+++ b/Documentation/tty/tty_port.rst
@@ -0,0 +1,70 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+========
+TTY Port
+========
+
+.. contents:: :local:
+
+The TTY drivers are advised to use struct tty_port helpers as much as possible.
+If the drivers implement :c:member:`tty_port.ops.activate()` and
+:c:member:`tty_port.ops.shutdown()`, they can use tty_port_open(),
+tty_port_close(), and tty_port_hangup() in respective
+:c:member:`tty_struct.ops` hooks.
+
+The reference and details are contained in the `TTY Port Reference`_ and `TTY
+Port Operations Reference`_ sections at the bottom.
+
+TTY Port Functions
+==================
+
+Init & Destroy
+--------------
+
+.. kernel-doc:: drivers/tty/tty_port.c
+ :identifiers: tty_port_init tty_port_destroy
+ tty_port_get tty_port_put
+
+Open/Close/Hangup Helpers
+-------------------------
+
+.. kernel-doc:: drivers/tty/tty_port.c
+ :identifiers: tty_port_install tty_port_open tty_port_block_til_ready
+ tty_port_close tty_port_close_start tty_port_close_end tty_port_hangup
+ tty_port_shutdown
+
+TTY Refcounting
+---------------
+
+.. kernel-doc:: drivers/tty/tty_port.c
+ :identifiers: tty_port_tty_get tty_port_tty_set
+
+TTY Helpers
+-----------
+
+.. kernel-doc:: drivers/tty/tty_port.c
+ :identifiers: tty_port_tty_hangup tty_port_tty_wakeup
+
+
+Modem Signals
+-------------
+
+.. kernel-doc:: drivers/tty/tty_port.c
+ :identifiers: tty_port_carrier_raised tty_port_raise_dtr_rts
+ tty_port_lower_dtr_rts
+
+----
+
+TTY Port Reference
+==================
+
+.. kernel-doc:: include/linux/tty_port.h
+ :identifiers: tty_port
+
+----
+
+TTY Port Operations Reference
+=============================
+
+.. kernel-doc:: include/linux/tty_port.h
+ :identifiers: tty_port_operations
diff --git a/Documentation/tty/tty_struct.rst b/Documentation/tty/tty_struct.rst
new file mode 100644
index 000000000000..c72f5a4293b2
--- /dev/null
+++ b/Documentation/tty/tty_struct.rst
@@ -0,0 +1,81 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+==========
+TTY Struct
+==========
+
+.. contents:: :local:
+
+struct tty_struct is allocated by the TTY layer upon the first open of the TTY
+device and released after the last close. The TTY layer passes this structure
+to most of struct tty_operation's hooks. Members of tty_struct are documented
+in `TTY Struct Reference`_ at the bottom.
+
+Initialization
+==============
+
+.. kernel-doc:: drivers/tty/tty_io.c
+ :identifiers: tty_init_termios
+
+Name
+====
+
+.. kernel-doc:: drivers/tty/tty_io.c
+ :identifiers: tty_name
+
+Reference counting
+==================
+
+.. kernel-doc:: include/linux/tty.h
+ :identifiers: tty_kref_get
+
+.. kernel-doc:: drivers/tty/tty_io.c
+ :identifiers: tty_kref_put
+
+Install
+=======
+
+.. kernel-doc:: drivers/tty/tty_io.c
+ :identifiers: tty_standard_install
+
+Read & Write
+============
+
+.. kernel-doc:: drivers/tty/tty_io.c
+ :identifiers: tty_put_char
+
+Start & Stop
+============
+
+.. kernel-doc:: drivers/tty/tty_io.c
+ :identifiers: start_tty stop_tty
+
+Wakeup
+======
+
+.. kernel-doc:: drivers/tty/tty_io.c
+ :identifiers: tty_wakeup
+
+Hangup
+======
+
+.. kernel-doc:: drivers/tty/tty_io.c
+ :identifiers: tty_hangup tty_vhangup tty_hung_up_p
+
+Misc
+====
+
+.. kernel-doc:: drivers/tty/tty_io.c
+ :identifiers: tty_do_resize
+
+TTY Struct Flags
+================
+
+.. kernel-doc:: include/linux/tty.h
+ :doc: TTY Struct Flags
+
+TTY Struct Reference
+====================
+
+.. kernel-doc:: include/linux/tty.h
+ :identifiers: tty_struct
diff --git a/Documentation/usb/gadget-testing.rst b/Documentation/usb/gadget-testing.rst
index c18113077889..cbbd948c626f 100644
--- a/Documentation/usb/gadget-testing.rst
+++ b/Documentation/usb/gadget-testing.rst
@@ -931,7 +931,7 @@ The uac1 function provides these attributes in its function directory:
p_volume_min playback volume control min value (in 1/256 dB)
p_volume_max playback volume control max value (in 1/256 dB)
p_volume_res playback volume control resolution (in 1/256 dB)
- req_number the number of pre-allocated request for both capture
+ req_number the number of pre-allocated requests for both capture
and playback
================ ====================================================
diff --git a/Documentation/userspace-api/ioctl/ioctl-number.rst b/Documentation/userspace-api/ioctl/ioctl-number.rst
index cfe6cccf0f44..e6fce2cbd99e 100644
--- a/Documentation/userspace-api/ioctl/ioctl-number.rst
+++ b/Documentation/userspace-api/ioctl/ioctl-number.rst
@@ -115,6 +115,7 @@ Code Seq# Include File Comments
'B' 00-1F linux/cciss_ioctl.h conflict!
'B' 00-0F include/linux/pmu.h conflict!
'B' C0-FF advanced bbus <mailto:maassen@uni-freiburg.de>
+'B' 00-0F xen/xenbus_dev.h conflict!
'C' all linux/soundcard.h conflict!
'C' 01-2F linux/capi.h conflict!
'C' F0-FF drivers/net/wan/cosa.h conflict!
@@ -134,6 +135,7 @@ Code Seq# Include File Comments
'F' 80-8F linux/arcfb.h conflict!
'F' DD video/sstfb.h conflict!
'G' 00-3F drivers/misc/sgi-gru/grulib.h conflict!
+'G' 00-0F xen/gntalloc.h, xen/gntdev.h conflict!
'H' 00-7F linux/hiddev.h conflict!
'H' 00-0F linux/hidraw.h conflict!
'H' 01 linux/mei.h conflict!
@@ -176,6 +178,7 @@ Code Seq# Include File Comments
'P' 60-6F sound/sscape_ioctl.h conflict!
'P' 00-0F drivers/usb/class/usblp.c conflict!
'P' 01-09 drivers/misc/pci_endpoint_test.c conflict!
+'P' 00-0F xen/privcmd.h conflict!
'Q' all linux/soundcard.h
'R' 00-1F linux/random.h conflict!
'R' 01 linux/rfkill.h conflict!
@@ -367,6 +370,7 @@ Code Seq# Include File Comments
<mailto:aherrman@de.ibm.com>
0xE5 00-3F linux/fuse.h
0xEC 00-01 drivers/platform/chrome/cros_ec_dev.h ChromeOS EC driver
+0xEE 00-09 uapi/linux/pfrut.h Platform Firmware Runtime Update and Telemetry
0xF3 00-3F drivers/usb/misc/sisusbvga/sisusb.h sisfb (in development)
<mailto:thomas@winischhofer.net>
0xF6 all LTTng Linux Trace Toolkit Next Generation
diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst
index aeeb071c7688..a4267104db50 100644
--- a/Documentation/virt/kvm/api.rst
+++ b/Documentation/virt/kvm/api.rst
@@ -371,6 +371,9 @@ The bits in the dirty bitmap are cleared before the ioctl returns, unless
KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 is enabled. For more information,
see the description of the capability.
+Note that the Xen shared info page, if configured, shall always be assumed
+to be dirty. KVM will not explicitly mark it such.
+
4.9 KVM_SET_MEMORY_ALIAS
------------------------
@@ -1566,6 +1569,7 @@ otherwise it will return EBUSY error.
struct kvm_xsave {
__u32 region[1024];
+ __u32 extra[0];
};
This ioctl would copy current vcpu's xsave struct to the userspace.
@@ -1574,7 +1578,7 @@ This ioctl would copy current vcpu's xsave struct to the userspace.
4.43 KVM_SET_XSAVE
------------------
-:Capability: KVM_CAP_XSAVE
+:Capability: KVM_CAP_XSAVE and KVM_CAP_XSAVE2
:Architectures: x86
:Type: vcpu ioctl
:Parameters: struct kvm_xsave (in)
@@ -1585,9 +1589,18 @@ This ioctl would copy current vcpu's xsave struct to the userspace.
struct kvm_xsave {
__u32 region[1024];
+ __u32 extra[0];
};
-This ioctl would copy userspace's xsave struct to the kernel.
+This ioctl would copy userspace's xsave struct to the kernel. It copies
+as many bytes as are returned by KVM_CHECK_EXTENSION(KVM_CAP_XSAVE2),
+when invoked on the vm file descriptor. The size value returned by
+KVM_CHECK_EXTENSION(KVM_CAP_XSAVE2) will always be at least 4096.
+Currently, it is only greater than 4096 if a dynamic feature has been
+enabled with ``arch_prctl()``, but this may change in the future.
+
+The offsets of the state save areas in struct kvm_xsave follow the
+contents of CPUID leaf 0xD on the host.
4.44 KVM_GET_XCRS
@@ -1684,6 +1697,10 @@ userspace capabilities, and with user requirements (for example, the
user may wish to constrain cpuid to emulate older hardware, or for
feature consistency across a cluster).
+Dynamically-enabled feature bits need to be requested with
+``arch_prctl()`` before calling this ioctl. Feature bits that have not
+been requested are excluded from the result.
+
Note that certain capabilities, such as KVM_CAP_X86_DISABLE_EXITS, may
expose cpuid features (e.g. MONITOR) which are not supported by kvm in
its default configuration. If userspace enables such capabilities, it
@@ -1796,6 +1813,7 @@ No flags are specified so far, the corresponding field must be set to zero.
struct kvm_irq_routing_msi msi;
struct kvm_irq_routing_s390_adapter adapter;
struct kvm_irq_routing_hv_sint hv_sint;
+ struct kvm_irq_routing_xen_evtchn xen_evtchn;
__u32 pad[8];
} u;
};
@@ -1805,6 +1823,7 @@ No flags are specified so far, the corresponding field must be set to zero.
#define KVM_IRQ_ROUTING_MSI 2
#define KVM_IRQ_ROUTING_S390_ADAPTER 3
#define KVM_IRQ_ROUTING_HV_SINT 4
+ #define KVM_IRQ_ROUTING_XEN_EVTCHN 5
flags:
@@ -1856,6 +1875,20 @@ address_hi must be zero.
__u32 sint;
};
+ struct kvm_irq_routing_xen_evtchn {
+ __u32 port;
+ __u32 vcpu;
+ __u32 priority;
+ };
+
+
+When KVM_CAP_XEN_HVM includes the KVM_XEN_HVM_CONFIG_EVTCHN_2LEVEL bit
+in its indication of supported features, routing to Xen event channels
+is supported. Although the priority field is present, only the value
+KVM_XEN_HVM_CONFIG_EVTCHN_2LEVEL is supported, which means delivery by
+2 level event channels. FIFO event channel support may be added in
+the future.
+
4.55 KVM_SET_TSC_KHZ
--------------------
@@ -3235,6 +3268,7 @@ number.
:Capability: KVM_CAP_DEVICE_CTRL, KVM_CAP_VM_ATTRIBUTES for vm device,
KVM_CAP_VCPU_ATTRIBUTES for vcpu device
+ KVM_CAP_SYS_ATTRIBUTES for system (/dev/kvm) device (no set)
:Type: device ioctl, vm ioctl, vcpu ioctl
:Parameters: struct kvm_device_attr
:Returns: 0 on success, -1 on error
@@ -3269,7 +3303,8 @@ transferred is defined by the particular attribute.
------------------------
:Capability: KVM_CAP_DEVICE_CTRL, KVM_CAP_VM_ATTRIBUTES for vm device,
- KVM_CAP_VCPU_ATTRIBUTES for vcpu device
+ KVM_CAP_VCPU_ATTRIBUTES for vcpu device
+ KVM_CAP_SYS_ATTRIBUTES for system (/dev/kvm) device
:Type: device ioctl, vm ioctl, vcpu ioctl
:Parameters: struct kvm_device_attr
:Returns: 0 on success, -1 on error
@@ -3701,7 +3736,7 @@ KVM with the currently defined set of flags.
:Architectures: s390
:Type: vm ioctl
:Parameters: struct kvm_s390_skeys
-:Returns: 0 on success, KVM_S390_GET_KEYS_NONE if guest is not using storage
+:Returns: 0 on success, KVM_S390_GET_SKEYS_NONE if guest is not using storage
keys, negative value on error
This ioctl is used to get guest storage key values on the s390
@@ -3720,7 +3755,7 @@ you want to get.
The count field is the number of consecutive frames (starting from start_gfn)
whose storage keys to get. The count field must be at least 1 and the maximum
-allowed value is defined as KVM_S390_SKEYS_ALLOC_MAX. Values outside this range
+allowed value is defined as KVM_S390_SKEYS_MAX. Values outside this range
will cause the ioctl to return -EINVAL.
The skeydata_addr field is the address to a buffer large enough to hold count
@@ -3744,7 +3779,7 @@ you want to set.
The count field is the number of consecutive frames (starting from start_gfn)
whose storage keys to get. The count field must be at least 1 and the maximum
-allowed value is defined as KVM_S390_SKEYS_ALLOC_MAX. Values outside this range
+allowed value is defined as KVM_S390_SKEYS_MAX. Values outside this range
will cause the ioctl to return -EINVAL.
The skeydata_addr field is the address to a buffer containing count bytes of
@@ -5134,6 +5169,15 @@ KVM_XEN_ATTR_TYPE_SHARED_INFO
not aware of the Xen CPU id which is used as the index into the
vcpu_info[] array, so cannot know the correct default location.
+ Note that the shared info page may be constantly written to by KVM;
+ it contains the event channel bitmap used to deliver interrupts to
+ a Xen guest, amongst other things. It is exempt from dirty tracking
+ mechanisms — KVM will not explicitly mark the page as dirty each
+ time an event channel interrupt is delivered to the guest! Thus,
+ userspace should always assume that the designated GFN is dirty if
+ any vCPU has been running or any event channel interrupts can be
+ routed to the guest.
+
KVM_XEN_ATTR_TYPE_UPCALL_VECTOR
Sets the exception vector used to deliver Xen event channel upcalls.
@@ -5503,6 +5547,34 @@ the trailing ``'\0'``, is indicated by ``name_size`` in the header.
The Stats Data block contains an array of 64-bit values in the same order
as the descriptors in Descriptors block.
+4.134 KVM_GET_XSAVE2
+--------------------
+
+:Capability: KVM_CAP_XSAVE2
+:Architectures: x86
+:Type: vcpu ioctl
+:Parameters: struct kvm_xsave (out)
+:Returns: 0 on success, -1 on error
+
+
+::
+
+ struct kvm_xsave {
+ __u32 region[1024];
+ __u32 extra[0];
+ };
+
+This ioctl would copy current vcpu's xsave struct to the userspace. It
+copies as many bytes as are returned by KVM_CHECK_EXTENSION(KVM_CAP_XSAVE2)
+when invoked on the vm file descriptor. The size value returned by
+KVM_CHECK_EXTENSION(KVM_CAP_XSAVE2) will always be at least 4096.
+Currently, it is only greater than 4096 if a dynamic feature has been
+enabled with ``arch_prctl()``, but this may change in the future.
+
+The offsets of the state save areas in struct kvm_xsave follow the contents
+of CPUID leaf 0xD on the host.
+
+
5. The kvm_run structure
========================
@@ -7293,7 +7365,7 @@ trap and emulate MSRs that are outside of the scope of KVM as well as
limit the attack surface on KVM's MSR emulation code.
8.28 KVM_CAP_ENFORCE_PV_FEATURE_CPUID
------------------------------
+-------------------------------------
Architectures: x86
@@ -7401,6 +7473,7 @@ PVHVM guests. Valid flags are::
#define KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL (1 << 1)
#define KVM_XEN_HVM_CONFIG_SHARED_INFO (1 << 2)
#define KVM_XEN_HVM_CONFIG_RUNSTATE (1 << 2)
+ #define KVM_XEN_HVM_CONFIG_EVTCHN_2LEVEL (1 << 3)
The KVM_XEN_HVM_CONFIG_HYPERCALL_MSR flag indicates that the KVM_XEN_HVM_CONFIG
ioctl is available, for the guest to set its hypercall page.
@@ -7420,6 +7493,10 @@ The KVM_XEN_HVM_CONFIG_RUNSTATE flag indicates that the runstate-related
features KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR/_CURRENT/_DATA/_ADJUST are
supported by the KVM_XEN_VCPU_SET_ATTR/KVM_XEN_VCPU_GET_ATTR ioctls.
+The KVM_XEN_HVM_CONFIG_EVTCHN_2LEVEL flag indicates that IRQ routing entries
+of the type KVM_IRQ_ROUTING_XEN_EVTCHN are supported, with the priority
+field set to indicate 2 level event channel delivery.
+
8.31 KVM_CAP_PPC_MULTITCE
-------------------------
diff --git a/Documentation/virt/kvm/mmu.rst b/Documentation/virt/kvm/mmu.rst
index f60f5488e121..5b1ebad24c77 100644
--- a/Documentation/virt/kvm/mmu.rst
+++ b/Documentation/virt/kvm/mmu.rst
@@ -161,7 +161,7 @@ Shadow pages contain the following information:
If clear, this page corresponds to a guest page table denoted by the gfn
field.
role.quadrant:
- When role.gpte_is_8_bytes=0, the guest uses 32-bit gptes while the host uses 64-bit
+ When role.has_4_byte_gpte=1, the guest uses 32-bit gptes while the host uses 64-bit
sptes. That means a guest page table contains more ptes than the host,
so multiple shadow pages are needed to shadow one guest page.
For first-level shadow pages, role.quadrant can be 0 or 1 and denotes the
@@ -177,9 +177,9 @@ Shadow pages contain the following information:
The page is invalid and should not be used. It is a root page that is
currently pinned (by a cpu hardware register pointing to it); once it is
unpinned it will be destroyed.
- role.gpte_is_8_bytes:
- Reflects the size of the guest PTE for which the page is valid, i.e. '1'
- if 64-bit gptes are in use, '0' if 32-bit gptes are in use.
+ role.has_4_byte_gpte:
+ Reflects the size of the guest PTE for which the page is valid, i.e. '0'
+ if direct map or 64-bit gptes are in use, '1' if 32-bit gptes are in use.
role.efer_nx:
Contains the value of efer.nx for which the page is valid.
role.cr0_wp:
diff --git a/Documentation/vm/arch_pgtable_helpers.rst b/Documentation/vm/arch_pgtable_helpers.rst
index 552567d863b8..f8b225fc9190 100644
--- a/Documentation/vm/arch_pgtable_helpers.rst
+++ b/Documentation/vm/arch_pgtable_helpers.rst
@@ -66,9 +66,11 @@ PTE Page Table Helpers
+---------------------------+--------------------------------------------------+
| pte_mknotpresent | Invalidates a mapped PTE |
+---------------------------+--------------------------------------------------+
-| ptep_get_and_clear | Clears a PTE |
+| ptep_clear | Clears a PTE |
+---------------------------+--------------------------------------------------+
-| ptep_get_and_clear_full | Clears a PTE |
+| ptep_get_and_clear | Clears and returns PTE |
++---------------------------+--------------------------------------------------+
+| ptep_get_and_clear_full | Clears and returns PTE (batched PTE unmap) |
+---------------------------+--------------------------------------------------+
| ptep_test_and_clear_young | Clears young from a PTE |
+---------------------------+--------------------------------------------------+
@@ -247,12 +249,12 @@ SWAP Page Table Helpers
| __swp_to_pmd_entry | Creates a mapped PMD from a swapped entry (arch) |
+---------------------------+--------------------------------------------------+
| is_migration_entry | Tests a migration (read or write) swapped entry |
-+---------------------------+--------------------------------------------------+
-| is_write_migration_entry | Tests a write migration swapped entry |
-+---------------------------+--------------------------------------------------+
-| make_migration_entry_read | Converts into read migration swapped entry |
-+---------------------------+--------------------------------------------------+
-| make_migration_entry | Creates a migration swapped entry (read or write)|
-+---------------------------+--------------------------------------------------+
++-------------------------------+----------------------------------------------+
+| is_writable_migration_entry | Tests a write migration swapped entry |
++-------------------------------+----------------------------------------------+
+| make_readable_migration_entry | Creates a read migration swapped entry |
++-------------------------------+----------------------------------------------+
+| make_writable_migration_entry | Creates a write migration swapped entry |
++-------------------------------+----------------------------------------------+
[1] https://lore.kernel.org/linux-mm/20181017020930.GN30832@redhat.com/
diff --git a/Documentation/vm/cleancache.rst b/Documentation/vm/cleancache.rst
deleted file mode 100644
index 68cba9131c31..000000000000
--- a/Documentation/vm/cleancache.rst
+++ /dev/null
@@ -1,296 +0,0 @@
-.. _cleancache:
-
-==========
-Cleancache
-==========
-
-Motivation
-==========
-
-Cleancache is a new optional feature provided by the VFS layer that
-potentially dramatically increases page cache effectiveness for
-many workloads in many environments at a negligible cost.
-
-Cleancache can be thought of as a page-granularity victim cache for clean
-pages that the kernel's pageframe replacement algorithm (PFRA) would like
-to keep around, but can't since there isn't enough memory. So when the
-PFRA "evicts" a page, it first attempts to use cleancache code to
-put the data contained in that page into "transcendent memory", memory
-that is not directly accessible or addressable by the kernel and is
-of unknown and possibly time-varying size.
-
-Later, when a cleancache-enabled filesystem wishes to access a page
-in a file on disk, it first checks cleancache to see if it already
-contains it; if it does, the page of data is copied into the kernel
-and a disk access is avoided.
-
-Transcendent memory "drivers" for cleancache are currently implemented
-in Xen (using hypervisor memory) and zcache (using in-kernel compressed
-memory) and other implementations are in development.
-
-:ref:`FAQs <faq>` are included below.
-
-Implementation Overview
-=======================
-
-A cleancache "backend" that provides transcendent memory registers itself
-to the kernel's cleancache "frontend" by calling cleancache_register_ops,
-passing a pointer to a cleancache_ops structure with funcs set appropriately.
-The functions provided must conform to certain semantics as follows:
-
-Most important, cleancache is "ephemeral". Pages which are copied into
-cleancache have an indefinite lifetime which is completely unknowable
-by the kernel and so may or may not still be in cleancache at any later time.
-Thus, as its name implies, cleancache is not suitable for dirty pages.
-Cleancache has complete discretion over what pages to preserve and what
-pages to discard and when.
-
-Mounting a cleancache-enabled filesystem should call "init_fs" to obtain a
-pool id which, if positive, must be saved in the filesystem's superblock;
-a negative return value indicates failure. A "put_page" will copy a
-(presumably about-to-be-evicted) page into cleancache and associate it with
-the pool id, a file key, and a page index into the file. (The combination
-of a pool id, a file key, and an index is sometimes called a "handle".)
-A "get_page" will copy the page, if found, from cleancache into kernel memory.
-An "invalidate_page" will ensure the page no longer is present in cleancache;
-an "invalidate_inode" will invalidate all pages associated with the specified
-file; and, when a filesystem is unmounted, an "invalidate_fs" will invalidate
-all pages in all files specified by the given pool id and also surrender
-the pool id.
-
-An "init_shared_fs", like init_fs, obtains a pool id but tells cleancache
-to treat the pool as shared using a 128-bit UUID as a key. On systems
-that may run multiple kernels (such as hard partitioned or virtualized
-systems) that may share a clustered filesystem, and where cleancache
-may be shared among those kernels, calls to init_shared_fs that specify the
-same UUID will receive the same pool id, thus allowing the pages to
-be shared. Note that any security requirements must be imposed outside
-of the kernel (e.g. by "tools" that control cleancache). Or a
-cleancache implementation can simply disable shared_init by always
-returning a negative value.
-
-If a get_page is successful on a non-shared pool, the page is invalidated
-(thus making cleancache an "exclusive" cache). On a shared pool, the page
-is NOT invalidated on a successful get_page so that it remains accessible to
-other sharers. The kernel is responsible for ensuring coherency between
-cleancache (shared or not), the page cache, and the filesystem, using
-cleancache invalidate operations as required.
-
-Note that cleancache must enforce put-put-get coherency and get-get
-coherency. For the former, if two puts are made to the same handle but
-with different data, say AAA by the first put and BBB by the second, a
-subsequent get can never return the stale data (AAA). For get-get coherency,
-if a get for a given handle fails, subsequent gets for that handle will
-never succeed unless preceded by a successful put with that handle.
-
-Last, cleancache provides no SMP serialization guarantees; if two
-different Linux threads are simultaneously putting and invalidating a page
-with the same handle, the results are indeterminate. Callers must
-lock the page to ensure serial behavior.
-
-Cleancache Performance Metrics
-==============================
-
-If properly configured, monitoring of cleancache is done via debugfs in
-the `/sys/kernel/debug/cleancache` directory. The effectiveness of cleancache
-can be measured (across all filesystems) with:
-
-``succ_gets``
- number of gets that were successful
-
-``failed_gets``
- number of gets that failed
-
-``puts``
- number of puts attempted (all "succeed")
-
-``invalidates``
- number of invalidates attempted
-
-A backend implementation may provide additional metrics.
-
-.. _faq:
-
-FAQ
-===
-
-* Where's the value? (Andrew Morton)
-
-Cleancache provides a significant performance benefit to many workloads
-in many environments with negligible overhead by improving the
-effectiveness of the pagecache. Clean pagecache pages are
-saved in transcendent memory (RAM that is otherwise not directly
-addressable to the kernel); fetching those pages later avoids "refaults"
-and thus disk reads.
-
-Cleancache (and its sister code "frontswap") provide interfaces for
-this transcendent memory (aka "tmem"), which conceptually lies between
-fast kernel-directly-addressable RAM and slower DMA/asynchronous devices.
-Disallowing direct kernel or userland reads/writes to tmem
-is ideal when data is transformed to a different form and size (such
-as with compression) or secretly moved (as might be useful for write-
-balancing for some RAM-like devices). Evicted page-cache pages (and
-swap pages) are a great use for this kind of slower-than-RAM-but-much-
-faster-than-disk transcendent memory, and the cleancache (and frontswap)
-"page-object-oriented" specification provides a nice way to read and
-write -- and indirectly "name" -- the pages.
-
-In the virtual case, the whole point of virtualization is to statistically
-multiplex physical resources across the varying demands of multiple
-virtual machines. This is really hard to do with RAM and efforts to
-do it well with no kernel change have essentially failed (except in some
-well-publicized special-case workloads). Cleancache -- and frontswap --
-with a fairly small impact on the kernel, provide a huge amount
-of flexibility for more dynamic, flexible RAM multiplexing.
-Specifically, the Xen Transcendent Memory backend allows otherwise
-"fallow" hypervisor-owned RAM to not only be "time-shared" between multiple
-virtual machines, but the pages can be compressed and deduplicated to
-optimize RAM utilization. And when guest OS's are induced to surrender
-underutilized RAM (e.g. with "self-ballooning"), page cache pages
-are the first to go, and cleancache allows those pages to be
-saved and reclaimed if overall host system memory conditions allow.
-
-And the identical interface used for cleancache can be used in
-physical systems as well. The zcache driver acts as a memory-hungry
-device that stores pages of data in a compressed state. And
-the proposed "RAMster" driver shares RAM across multiple physical
-systems.
-
-* Why does cleancache have its sticky fingers so deep inside the
- filesystems and VFS? (Andrew Morton and Christoph Hellwig)
-
-The core hooks for cleancache in VFS are in most cases a single line
-and the minimum set are placed precisely where needed to maintain
-coherency (via cleancache_invalidate operations) between cleancache,
-the page cache, and disk. All hooks compile into nothingness if
-cleancache is config'ed off and turn into a function-pointer-
-compare-to-NULL if config'ed on but no backend claims the ops
-functions, or to a compare-struct-element-to-negative if a
-backend claims the ops functions but a filesystem doesn't enable
-cleancache.
-
-Some filesystems are built entirely on top of VFS and the hooks
-in VFS are sufficient, so don't require an "init_fs" hook; the
-initial implementation of cleancache didn't provide this hook.
-But for some filesystems (such as btrfs), the VFS hooks are
-incomplete and one or more hooks in fs-specific code are required.
-And for some other filesystems, such as tmpfs, cleancache may
-be counterproductive. So it seemed prudent to require a filesystem
-to "opt in" to use cleancache, which requires adding a hook in
-each filesystem. Not all filesystems are supported by cleancache
-only because they haven't been tested. The existing set should
-be sufficient to validate the concept, the opt-in approach means
-that untested filesystems are not affected, and the hooks in the
-existing filesystems should make it very easy to add more
-filesystems in the future.
-
-The total impact of the hooks to existing fs and mm files is only
-about 40 lines added (not counting comments and blank lines).
-
-* Why not make cleancache asynchronous and batched so it can more
- easily interface with real devices with DMA instead of copying each
- individual page? (Minchan Kim)
-
-The one-page-at-a-time copy semantics simplifies the implementation
-on both the frontend and backend and also allows the backend to
-do fancy things on-the-fly like page compression and
-page deduplication. And since the data is "gone" (copied into/out
-of the pageframe) before the cleancache get/put call returns,
-a great deal of race conditions and potential coherency issues
-are avoided. While the interface seems odd for a "real device"
-or for real kernel-addressable RAM, it makes perfect sense for
-transcendent memory.
-
-* Why is non-shared cleancache "exclusive"? And where is the
- page "invalidated" after a "get"? (Minchan Kim)
-
-The main reason is to free up space in transcendent memory and
-to avoid unnecessary cleancache_invalidate calls. If you want inclusive,
-the page can be "put" immediately following the "get". If
-put-after-get for inclusive becomes common, the interface could
-be easily extended to add a "get_no_invalidate" call.
-
-The invalidate is done by the cleancache backend implementation.
-
-* What's the performance impact?
-
-Performance analysis has been presented at OLS'09 and LCA'10.
-Briefly, performance gains can be significant on most workloads,
-especially when memory pressure is high (e.g. when RAM is
-overcommitted in a virtual workload); and because the hooks are
-invoked primarily in place of or in addition to a disk read/write,
-overhead is negligible even in worst case workloads. Basically
-cleancache replaces I/O with memory-copy-CPU-overhead; on older
-single-core systems with slow memory-copy speeds, cleancache
-has little value, but in newer multicore machines, especially
-consolidated/virtualized machines, it has great value.
-
-* How do I add cleancache support for filesystem X? (Boaz Harrash)
-
-Filesystems that are well-behaved and conform to certain
-restrictions can utilize cleancache simply by making a call to
-cleancache_init_fs at mount time. Unusual, misbehaving, or
-poorly layered filesystems must either add additional hooks
-and/or undergo extensive additional testing... or should just
-not enable the optional cleancache.
-
-Some points for a filesystem to consider:
-
- - The FS should be block-device-based (e.g. a ram-based FS such
- as tmpfs should not enable cleancache)
- - To ensure coherency/correctness, the FS must ensure that all
- file removal or truncation operations either go through VFS or
- add hooks to do the equivalent cleancache "invalidate" operations
- - To ensure coherency/correctness, either inode numbers must
- be unique across the lifetime of the on-disk file OR the
- FS must provide an "encode_fh" function.
- - The FS must call the VFS superblock alloc and deactivate routines
- or add hooks to do the equivalent cleancache calls done there.
- - To maximize performance, all pages fetched from the FS should
- go through the do_mpag_readpage routine or the FS should add
- hooks to do the equivalent (cf. btrfs)
- - Currently, the FS blocksize must be the same as PAGESIZE. This
- is not an architectural restriction, but no backends currently
- support anything different.
- - A clustered FS should invoke the "shared_init_fs" cleancache
- hook to get best performance for some backends.
-
-* Why not use the KVA of the inode as the key? (Christoph Hellwig)
-
-If cleancache would use the inode virtual address instead of
-inode/filehandle, the pool id could be eliminated. But, this
-won't work because cleancache retains pagecache data pages
-persistently even when the inode has been pruned from the
-inode unused list, and only invalidates the data page if the file
-gets removed/truncated. So if cleancache used the inode kva,
-there would be potential coherency issues if/when the inode
-kva is reused for a different file. Alternately, if cleancache
-invalidated the pages when the inode kva was freed, much of the value
-of cleancache would be lost because the cache of pages in cleanache
-is potentially much larger than the kernel pagecache and is most
-useful if the pages survive inode cache removal.
-
-* Why is a global variable required?
-
-The cleancache_enabled flag is checked in all of the frequently-used
-cleancache hooks. The alternative is a function call to check a static
-variable. Since cleancache is enabled dynamically at runtime, systems
-that don't enable cleancache would suffer thousands (possibly
-tens-of-thousands) of unnecessary function calls per second. So the
-global variable allows cleancache to be enabled by default at compile
-time, but have insignificant performance impact when cleancache remains
-disabled at runtime.
-
-* Does cleanache work with KVM?
-
-The memory model of KVM is sufficiently different that a cleancache
-backend may have less value for KVM. This remains to be tested,
-especially in an overcommitted system.
-
-* Does cleancache work in userspace? It sounds useful for
- memory hungry caches like web browsers. (Jamie Lokier)
-
-No plans yet, though we agree it sounds useful, at least for
-apps that bypass the page cache (e.g. O_DIRECT).
-
-Last updated: Dan Magenheimer, April 13 2011
diff --git a/Documentation/vm/frontswap.rst b/Documentation/vm/frontswap.rst
index 1979f430c1c5..feecc5e24477 100644
--- a/Documentation/vm/frontswap.rst
+++ b/Documentation/vm/frontswap.rst
@@ -8,12 +8,6 @@ Frontswap provides a "transcendent memory" interface for swap pages.
In some environments, dramatic performance savings may be obtained because
swapped pages are saved in RAM (or a RAM-like device) instead of a swap disk.
-(Note, frontswap -- and :ref:`cleancache` (merged at 3.0) -- are the "frontends"
-and the only necessary changes to the core kernel for transcendent memory;
-all other supporting code -- the "backends" -- is implemented as drivers.
-See the LWN.net article `Transcendent memory in a nutshell`_
-for a detailed overview of frontswap and related kernel parts)
-
.. _Transcendent memory in a nutshell: https://lwn.net/Articles/454795/
Frontswap is so named because it can be thought of as the opposite of
@@ -45,12 +39,6 @@ a disk write and, if the data is later read back, a disk read are avoided.
If a store returns failure, transcendent memory has rejected the data, and the
page can be written to swap as usual.
-If a backend chooses, frontswap can be configured as a "writethrough
-cache" by calling frontswap_writethrough(). In this mode, the reduction
-in swap device writes is lost (and also a non-trivial performance advantage)
-in order to allow the backend to arbitrarily "reclaim" space used to
-store frontswap pages to more completely manage its memory usage.
-
Note that if a page is stored and the page already exists in transcendent memory
(a "duplicate" store), either the store succeeds and the data is overwritten,
or the store fails AND the page is invalidated. This ensures stale data may
@@ -87,11 +75,9 @@ This interface is ideal when data is transformed to a different form
and size (such as with compression) or secretly moved (as might be
useful for write-balancing for some RAM-like devices). Swap pages (and
evicted page-cache pages) are a great use for this kind of slower-than-RAM-
-but-much-faster-than-disk "pseudo-RAM device" and the frontswap (and
-cleancache) interface to transcendent memory provides a nice way to read
-and write -- and indirectly "name" -- the pages.
+but-much-faster-than-disk "pseudo-RAM device".
-Frontswap -- and cleancache -- with a fairly small impact on the kernel,
+Frontswap with a fairly small impact on the kernel,
provides a huge amount of flexibility for more dynamic, flexible RAM
utilization in various system configurations:
@@ -269,19 +255,6 @@ the old data and ensure that it is no longer accessible. Since the
swap subsystem then writes the new data to the read swap device,
this is the correct course of action to ensure coherency.
-* What is frontswap_shrink for?
-
-When the (non-frontswap) swap subsystem swaps out a page to a real
-swap device, that page is only taking up low-value pre-allocated disk
-space. But if frontswap has placed a page in transcendent memory, that
-page may be taking up valuable real estate. The frontswap_shrink
-routine allows code outside of the swap subsystem to force pages out
-of the memory managed by frontswap and back into kernel-addressable memory.
-For example, in RAMster, a "suction driver" thread will attempt
-to "repatriate" pages sent to a remote machine back to the local machine;
-this is driven using the frontswap_shrink mechanism when memory pressure
-subsides.
-
* Why does the frontswap patch create the new include file swapfile.h?
The frontswap code depends on some swap-subsystem-internal data
diff --git a/Documentation/vm/index.rst b/Documentation/vm/index.rst
index 6f5ffef4b716..44365c4574a3 100644
--- a/Documentation/vm/index.rst
+++ b/Documentation/vm/index.rst
@@ -15,7 +15,6 @@ algorithms. If you are looking for advice on simply allocating memory, see the
active_mm
arch_pgtable_helpers
balance
- cleancache
damon/index
free_page_reporting
frontswap
@@ -31,10 +30,12 @@ algorithms. If you are looking for advice on simply allocating memory, see the
page_migration
page_frags
page_owner
+ page_table_check
remap_file_pages
slub
split_page_table_lock
transhuge
unevictable-lru
+ vmalloced-kernel-stacks
z3fold
zsmalloc
diff --git a/Documentation/vm/page_migration.rst b/Documentation/vm/page_migration.rst
index 08810f549f70..8c5cb8147e55 100644
--- a/Documentation/vm/page_migration.rst
+++ b/Documentation/vm/page_migration.rst
@@ -263,15 +263,15 @@ Monitoring Migration
The following events (counters) can be used to monitor page migration.
1. PGMIGRATE_SUCCESS: Normal page migration success. Each count means that a
- page was migrated. If the page was a non-THP page, then this counter is
- increased by one. If the page was a THP, then this counter is increased by
- the number of THP subpages. For example, migration of a single 2MB THP that
- has 4KB-size base pages (subpages) will cause this counter to increase by
- 512.
+ page was migrated. If the page was a non-THP and non-hugetlb page, then
+ this counter is increased by one. If the page was a THP or hugetlb, then
+ this counter is increased by the number of THP or hugetlb subpages.
+ For example, migration of a single 2MB THP that has 4KB-size base pages
+ (subpages) will cause this counter to increase by 512.
2. PGMIGRATE_FAIL: Normal page migration failure. Same counting rules as for
PGMIGRATE_SUCCESS, above: this will be increased by the number of subpages,
- if it was a THP.
+ if it was a THP or hugetlb.
3. THP_MIGRATION_SUCCESS: A THP was migrated without being split.
diff --git a/Documentation/vm/page_table_check.rst b/Documentation/vm/page_table_check.rst
new file mode 100644
index 000000000000..1a09472f10a3
--- /dev/null
+++ b/Documentation/vm/page_table_check.rst
@@ -0,0 +1,56 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+.. _page_table_check:
+
+================
+Page Table Check
+================
+
+Introduction
+============
+
+Page table check allows to harden the kernel by ensuring that some types of
+the memory corruptions are prevented.
+
+Page table check performs extra verifications at the time when new pages become
+accessible from the userspace by getting their page table entries (PTEs PMDs
+etc.) added into the table.
+
+In case of detected corruption, the kernel is crashed. There is a small
+performance and memory overhead associated with the page table check. Therefore,
+it is disabled by default, but can be optionally enabled on systems where the
+extra hardening outweighs the performance costs. Also, because page table check
+is synchronous, it can help with debugging double map memory corruption issues,
+by crashing kernel at the time wrong mapping occurs instead of later which is
+often the case with memory corruptions bugs.
+
+Double mapping detection logic
+==============================
+
++-------------------+-------------------+-------------------+------------------+
+| Current Mapping | New mapping | Permissions | Rule |
++===================+===================+===================+==================+
+| Anonymous | Anonymous | Read | Allow |
++-------------------+-------------------+-------------------+------------------+
+| Anonymous | Anonymous | Read / Write | Prohibit |
++-------------------+-------------------+-------------------+------------------+
+| Anonymous | Named | Any | Prohibit |
++-------------------+-------------------+-------------------+------------------+
+| Named | Anonymous | Any | Prohibit |
++-------------------+-------------------+-------------------+------------------+
+| Named | Named | Any | Allow |
++-------------------+-------------------+-------------------+------------------+
+
+Enabling Page Table Check
+=========================
+
+Build kernel with:
+
+- PAGE_TABLE_CHECK=y
+ Note, it can only be enabled on platforms where ARCH_SUPPORTS_PAGE_TABLE_CHECK
+ is available.
+
+- Boot with 'page_table_check=on' kernel parameter.
+
+Optionally, build kernel with PAGE_TABLE_CHECK_ENFORCED in order to have page
+table support without extra kernel parameter.
diff --git a/Documentation/vm/vmalloced-kernel-stacks.rst b/Documentation/vm/vmalloced-kernel-stacks.rst
new file mode 100644
index 000000000000..fc8c67833af6
--- /dev/null
+++ b/Documentation/vm/vmalloced-kernel-stacks.rst
@@ -0,0 +1,153 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=====================================
+Virtually Mapped Kernel Stack Support
+=====================================
+
+:Author: Shuah Khan <skhan@linuxfoundation.org>
+
+.. contents:: :local:
+
+Overview
+--------
+
+This is a compilation of information from the code and original patch
+series that introduced the `Virtually Mapped Kernel Stacks feature
+<https://lwn.net/Articles/694348/>`
+
+Introduction
+------------
+
+Kernel stack overflows are often hard to debug and make the kernel
+susceptible to exploits. Problems could show up at a later time making
+it difficult to isolate and root-cause.
+
+Virtually-mapped kernel stacks with guard pages causes kernel stack
+overflows to be caught immediately rather than causing difficult to
+diagnose corruptions.
+
+HAVE_ARCH_VMAP_STACK and VMAP_STACK configuration options enable
+support for virtually mapped stacks with guard pages. This feature
+causes reliable faults when the stack overflows. The usability of
+the stack trace after overflow and response to the overflow itself
+is architecture dependent.
+
+.. note::
+ As of this writing, arm64, powerpc, riscv, s390, um, and x86 have
+ support for VMAP_STACK.
+
+HAVE_ARCH_VMAP_STACK
+--------------------
+
+Architectures that can support Virtually Mapped Kernel Stacks should
+enable this bool configuration option. The requirements are:
+
+- vmalloc space must be large enough to hold many kernel stacks. This
+ may rule out many 32-bit architectures.
+- Stacks in vmalloc space need to work reliably. For example, if
+ vmap page tables are created on demand, either this mechanism
+ needs to work while the stack points to a virtual address with
+ unpopulated page tables or arch code (switch_to() and switch_mm(),
+ most likely) needs to ensure that the stack's page table entries
+ are populated before running on a possibly unpopulated stack.
+- If the stack overflows into a guard page, something reasonable
+ should happen. The definition of "reasonable" is flexible, but
+ instantly rebooting without logging anything would be unfriendly.
+
+VMAP_STACK
+----------
+
+VMAP_STACK bool configuration option when enabled allocates virtually
+mapped task stacks. This option depends on HAVE_ARCH_VMAP_STACK.
+
+- Enable this if you want the use virtually-mapped kernel stacks
+ with guard pages. This causes kernel stack overflows to be caught
+ immediately rather than causing difficult-to-diagnose corruption.
+
+.. note::
+
+ Using this feature with KASAN requires architecture support
+ for backing virtual mappings with real shadow memory, and
+ KASAN_VMALLOC must be enabled.
+
+.. note::
+
+ VMAP_STACK is enabled, it is not possible to run DMA on stack
+ allocated data.
+
+Kernel configuration options and dependencies keep changing. Refer to
+the latest code base:
+
+`Kconfig <https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/arch/Kconfig>`
+
+Allocation
+-----------
+
+When a new kernel thread is created, thread stack is allocated from
+virtually contiguous memory pages from the page level allocator. These
+pages are mapped into contiguous kernel virtual space with PAGE_KERNEL
+protections.
+
+alloc_thread_stack_node() calls __vmalloc_node_range() to allocate stack
+with PAGE_KERNEL protections.
+
+- Allocated stacks are cached and later reused by new threads, so memcg
+ accounting is performed manually on assigning/releasing stacks to tasks.
+ Hence, __vmalloc_node_range is called without __GFP_ACCOUNT.
+- vm_struct is cached to be able to find when thread free is initiated
+ in interrupt context. free_thread_stack() can be called in interrupt
+ context.
+- On arm64, all VMAP's stacks need to have the same alignment to ensure
+ that VMAP'd stack overflow detection works correctly. Arch specific
+ vmap stack allocator takes care of this detail.
+- This does not address interrupt stacks - according to the original patch
+
+Thread stack allocation is initiated from clone(), fork(), vfork(),
+kernel_thread() via kernel_clone(). Leaving a few hints for searching
+the code base to understand when and how thread stack is allocated.
+
+Bulk of the code is in:
+`kernel/fork.c <https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/kernel/fork.c>`.
+
+stack_vm_area pointer in task_struct keeps track of the virtually allocated
+stack and a non-null stack_vm_area pointer serves as a indication that the
+virtually mapped kernel stacks are enabled.
+
+::
+
+ struct vm_struct *stack_vm_area;
+
+Stack overflow handling
+-----------------------
+
+Leading and trailing guard pages help detect stack overflows. When stack
+overflows into the guard pages, handlers have to be careful not overflow
+the stack again. When handlers are called, it is likely that very little
+stack space is left.
+
+On x86, this is done by handling the page fault indicating the kernel
+stack overflow on the double-fault stack.
+
+Testing VMAP allocation with guard pages
+----------------------------------------
+
+How do we ensure that VMAP_STACK is actually allocating with a leading
+and trailing guard page? The following lkdtm tests can help detect any
+regressions.
+
+::
+
+ void lkdtm_STACK_GUARD_PAGE_LEADING()
+ void lkdtm_STACK_GUARD_PAGE_TRAILING()
+
+Conclusions
+-----------
+
+- A percpu cache of vmalloced stacks appears to be a bit faster than a
+ high-order stack allocation, at least when the cache hits.
+- THREAD_INFO_IN_TASK gets rid of arch-specific thread_info entirely and
+ simply embed the thread_info (containing only flags) and 'int cpu' into
+ task_struct.
+- The thread stack can be free'ed as soon as the task is dead (without
+ waiting for RCU) and then, if vmapped stacks are in use, cache the
+ entire stack for reuse on the same cpu.
diff --git a/LICENSES/preferred/LGPL-2.1 b/LICENSES/preferred/LGPL-2.1
index 27bb4342a3e8..b73f9b6230f5 100644
--- a/LICENSES/preferred/LGPL-2.1
+++ b/LICENSES/preferred/LGPL-2.1
@@ -1,5 +1,7 @@
Valid-License-Identifier: LGPL-2.1
+Valid-License-Identifier: LGPL-2.1-only
Valid-License-Identifier: LGPL-2.1+
+Valid-License-Identifier: LGPL-2.1-or-later
SPDX-URL: https://spdx.org/licenses/LGPL-2.1.html
Usage-Guide:
To use this license in source code, put one of the following SPDX
diff --git a/MAINTAINERS b/MAINTAINERS
index 03916ac8a4ac..777cd6fa2b3d 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -190,8 +190,9 @@ M: Johannes Berg <johannes@sipsolutions.net>
L: linux-wireless@vger.kernel.org
S: Maintained
W: https://wireless.wiki.kernel.org/
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211.git
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211-next.git
+Q: https://patchwork.kernel.org/project/linux-wireless/list/
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/wireless/wireless.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/wireless/wireless-next.git
F: Documentation/driver-api/80211/cfg80211.rst
F: Documentation/networking/regulatory.rst
F: include/linux/ieee80211.h
@@ -226,6 +227,7 @@ F: drivers/net/ethernet/8390/
M: Eric Van Hensbergen <ericvh@gmail.com>
M: Latchesar Ionkov <lucho@ionkov.net>
M: Dominique Martinet <asmadeus@codewreck.org>
+R: Christian Schoenebeck <linux_oss@crudebyte.com>
L: v9fs-developer@lists.sourceforge.net
S: Maintained
W: http://swik.net/v9fs
@@ -1077,6 +1079,15 @@ W: http://ez.analog.com/community/linux-device-drivers
F: Documentation/devicetree/bindings/iio/adc/adi,ad7780.yaml
F: drivers/iio/adc/ad7780.c
+ANALOG DEVICES INC AD74413R DRIVER
+M: Cosmin Tanislav <cosmin.tanislav@analog.com>
+L: linux-iio@vger.kernel.org
+S: Supported
+W: http://ez.analog.com/community/linux-device-drivers
+F: Documentation/devicetree/bindings/iio/addac/adi,ad74413r.yaml
+F: drivers/iio/addac/ad74413r.c
+F: include/dt-bindings/iio/addac/adi,ad74413r.h
+
ANALOG DEVICES INC AD9389B DRIVER
M: Hans Verkuil <hverkuil-cisco@xs4all.nl>
L: linux-media@vger.kernel.org
@@ -1609,6 +1620,7 @@ M: Olof Johansson <olof@lixom.net>
M: soc@kernel.org
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
+C: irc://irc.libera.chat/armlinux
T: git git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc.git
F: arch/arm/boot/dts/Makefile
F: arch/arm64/boot/dts/Makefile
@@ -1616,6 +1628,7 @@ F: arch/arm64/boot/dts/Makefile
ARM SUB-ARCHITECTURES
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
+C: irc://irc.libera.chat/armlinux
T: git git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc.git
F: arch/arm/mach-*/
F: arch/arm/plat-*/
@@ -1769,6 +1782,7 @@ F: drivers/irqchip/irq-apple-aic.c
F: drivers/mailbox/apple-mailbox.c
F: drivers/pinctrl/pinctrl-apple-gpio.c
F: drivers/soc/apple/*
+F: drivers/watchdog/apple_wdt.c
F: include/dt-bindings/interrupt-controller/apple-aic.h
F: include/dt-bindings/pinctrl/apple.h
F: include/linux/apple-mailbox.h
@@ -1903,6 +1917,7 @@ F: Documentation/trace/coresight/*
F: drivers/hwtracing/coresight/*
F: include/dt-bindings/arm/coresight-cti-dt.h
F: include/linux/coresight*
+F: samples/coresight/*
F: tools/perf/arch/arm/util/auxtrace.c
F: tools/perf/arch/arm/util/cs-etm.c
F: tools/perf/arch/arm/util/cs-etm.h
@@ -2304,6 +2319,7 @@ F: Documentation/devicetree/bindings/gpio/mstar,msc313-gpio.yaml
F: arch/arm/boot/dts/mstar-*
F: arch/arm/mach-mstar/
F: drivers/clk/mstar/
+F: drivers/clocksource/timer-msc313e.c
F: drivers/gpio/gpio-msc313.c
F: drivers/rtc/rtc-msc313.c
F: drivers/watchdog/msc313e_wdt.c
@@ -2557,10 +2573,13 @@ N: rockchip
ARM/SAMSUNG S3C, S5P AND EXYNOS ARM ARCHITECTURES
M: Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
+R: Alim Akhtar <alim.akhtar@samsung.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: linux-samsung-soc@vger.kernel.org
S: Maintained
+C: irc://irc.libera.chat/linux-exynos
Q: https://patchwork.kernel.org/project/linux-samsung-soc/list/
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/krzk/linux.git
F: Documentation/arm/samsung/
F: Documentation/devicetree/bindings/arm/samsung/
F: Documentation/devicetree/bindings/power/pd-samsung.yaml
@@ -2807,12 +2826,15 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/iwamatsu/linux-visconti.git
F: Documentation/devicetree/bindings/arm/toshiba.yaml
+F: Documentation/devicetree/bindings/clock/toshiba,tmpv770x-pipllct.yaml
+F: Documentation/devicetree/bindings/clock/toshiba,tmpv770x-pismu.yaml
F: Documentation/devicetree/bindings/net/toshiba,visconti-dwmac.yaml
F: Documentation/devicetree/bindings/gpio/toshiba,gpio-visconti.yaml
F: Documentation/devicetree/bindings/pci/toshiba,visconti-pcie.yaml
F: Documentation/devicetree/bindings/pinctrl/toshiba,visconti-pinctrl.yaml
F: Documentation/devicetree/bindings/watchdog/toshiba,visconti-wdt.yaml
F: arch/arm64/boot/dts/toshiba/
+F: drivers/clk/visconti/
F: drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c
F: drivers/gpio/gpio-visconti.c
F: drivers/pci/controller/dwc/pcie-visconti.c
@@ -3117,11 +3139,9 @@ W: https://wireless.wiki.kernel.org/en/users/Drivers/ath5k
F: drivers/net/wireless/ath/ath5k/
ATHEROS ATH6KL WIRELESS DRIVER
-M: Kalle Valo <kvalo@kernel.org>
L: linux-wireless@vger.kernel.org
-S: Supported
+S: Orphan
W: https://wireless.wiki.kernel.org/en/users/Drivers/ath6kl
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git
F: drivers/net/wireless/ath/ath6kl/
ATI_REMOTE2 DRIVER
@@ -3395,14 +3415,14 @@ M: Yury Norov <yury.norov@gmail.com>
R: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
R: Rasmus Villemoes <linux@rasmusvillemoes.dk>
S: Maintained
-F: include/asm-generic/bitops/find.h
F: include/linux/bitmap.h
+F: include/linux/find.h
F: lib/bitmap.c
F: lib/find_bit.c
F: lib/find_bit_benchmark.c
F: lib/test_bitmap.c
-F: tools/include/asm-generic/bitops/find.h
F: tools/include/linux/bitmap.h
+F: tools/include/linux/find.h
F: tools/lib/bitmap.c
F: tools/lib/find_bit.c
@@ -3416,6 +3436,8 @@ M: Jens Axboe <axboe@kernel.dk>
L: linux-block@vger.kernel.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git
+F: Documentation/ABI/stable/sysfs-block
+F: Documentation/block/
F: block/
F: drivers/block/
F: include/linux/blk*
@@ -3728,7 +3750,7 @@ M: Al Cooper <alcooperx@gmail.com>
L: linux-usb@vger.kernel.org
L: bcm-kernel-feedback-list@broadcom.com
S: Maintained
-F: Documentation/devicetree/bindings/usb/brcm,bdc.txt
+F: Documentation/devicetree/bindings/usb/brcm,bdc.yaml
F: drivers/usb/gadget/udc/bdc/
BROADCOM BMIPS CPUFREQ DRIVER
@@ -3811,7 +3833,7 @@ M: Doug Berger <opendmb@gmail.com>
M: Florian Fainelli <f.fainelli@gmail.com>
L: bcm-kernel-feedback-list@broadcom.com
S: Supported
-F: Documentation/devicetree/bindings/gpio/brcm,brcmstb-gpio.txt
+F: Documentation/devicetree/bindings/gpio/brcm,brcmstb-gpio.yaml
F: drivers/gpio/gpio-brcmstb.c
BROADCOM BRCMSTB I2C DRIVER
@@ -3869,7 +3891,7 @@ M: Florian Fainelli <f.fainelli@gmail.com>
L: bcm-kernel-feedback-list@broadcom.com
L: netdev@vger.kernel.org
S: Supported
-F: Documentation/devicetree/bindings/net/brcm,bcmgenet.txt
+F: Documentation/devicetree/bindings/net/brcm,bcmgenet.yaml
F: Documentation/devicetree/bindings/net/brcm,unimac-mdio.yaml
F: drivers/net/ethernet/broadcom/genet/
F: drivers/net/ethernet/broadcom/unimac.h
@@ -3911,7 +3933,7 @@ M: Rafał Miłecki <rafal@milecki.pl>
M: bcm-kernel-feedback-list@broadcom.com
L: netdev@vger.kernel.org
S: Maintained
-F: Documentation/devicetree/bindings/net/brcm,amac.txt
+F: Documentation/devicetree/bindings/net/brcm,amac.yaml
F: drivers/net/ethernet/broadcom/bgmac*
F: drivers/net/ethernet/broadcom/unimac.h
@@ -3986,7 +4008,7 @@ M: Markus Mayer <mmayer@broadcom.com>
M: bcm-kernel-feedback-list@broadcom.com
L: linux-pm@vger.kernel.org
S: Maintained
-F: Documentation/devicetree/bindings/thermal/brcm,avs-tmon.txt
+F: Documentation/devicetree/bindings/thermal/brcm,avs-tmon.yaml
F: drivers/thermal/broadcom/brcmstb*
BROADCOM STB DPFE DRIVER
@@ -4022,6 +4044,7 @@ L: netdev@vger.kernel.org
S: Supported
F: drivers/net/ethernet/broadcom/bcmsysport.*
F: drivers/net/ethernet/broadcom/unimac.h
+F: Documentation/devicetree/bindings/net/brcm,systemport.yaml
BROADCOM TG3 GIGABIT ETHERNET DRIVER
M: Siva Reddy Kallam <siva.kallam@broadcom.com>
@@ -4138,9 +4161,8 @@ N: csky
K: csky
CA8210 IEEE-802.15.4 RADIO DRIVER
-M: Harry Morris <h.morris@cascoda.com>
L: linux-wpan@vger.kernel.org
-S: Maintained
+S: Orphan
W: https://github.com/Cascoda/ca8210-linux.git
F: Documentation/devicetree/bindings/net/ieee802154/ca8210.txt
F: drivers/net/ieee802154/ca8210.c
@@ -4449,7 +4471,6 @@ L: keyrings@vger.kernel.org
S: Maintained
F: Documentation/admin-guide/module-signing.rst
F: certs/
-F: scripts/extract-cert.c
F: scripts/sign-file.c
CFAG12864B LCD DRIVER
@@ -4526,6 +4547,7 @@ F: drivers/platform/chrome/
CHROMEOS EC CODEC DRIVER
M: Cheng-Yi Chiang <cychiang@chromium.org>
+M: Tzung-Bi Shih <tzungbi@google.com>
R: Guenter Roeck <groeck@chromium.org>
S: Maintained
F: Documentation/devicetree/bindings/sound/google,cros-ec-codec.yaml
@@ -4561,9 +4583,12 @@ F: drivers/media/cec/i2c/ch7322.c
CIRRUS LOGIC AUDIO CODEC DRIVERS
M: James Schulman <james.schulman@cirrus.com>
M: David Rhodes <david.rhodes@cirrus.com>
+M: Lucas Tanure <tanureal@opensource.cirrus.com>
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
L: patches@opensource.cirrus.com
S: Maintained
+F: Documentation/devicetree/bindings/sound/cirrus,cs*
+F: sound/pci/hda/cs*
F: sound/soc/codecs/cs*
CIRRUS LOGIC DSP FIRMWARE DRIVER
@@ -4685,13 +4710,6 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git for-next/cla
F: include/linux/cfi.h
F: kernel/cfi.c
-CLEANCACHE API
-M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
-L: linux-kernel@vger.kernel.org
-S: Maintained
-F: include/linux/cleancache.h
-F: mm/cleancache.c
-
CLK API
M: Russell King <linux@armlinux.org.uk>
L: linux-clk@vger.kernel.org
@@ -4724,7 +4742,6 @@ F: drivers/media/pci/cobalt/
COCCINELLE/Semantic Patches (SmPL)
M: Julia Lawall <Julia.Lawall@inria.fr>
-M: Gilles Muller <Gilles.Muller@inria.fr>
M: Nicolas Palix <nicolas.palix@imag.fr>
L: cocci@inria.fr (moderated for non-subscribers)
S: Supported
@@ -4763,6 +4780,8 @@ M: Ian Abbott <abbotti@mev.co.uk>
M: H Hartley Sweeten <hsweeten@visionengravers.com>
S: Odd Fixes
F: drivers/comedi/
+F: include/linux/comedi/
+F: include/uapi/linux/comedi.h
COMMON CLK FRAMEWORK
M: Michael Turquette <mturquette@baylibre.com>
@@ -5758,7 +5777,7 @@ F: tools/testing/selftests/dma/
DMA-BUF HEAPS FRAMEWORK
M: Sumit Semwal <sumit.semwal@linaro.org>
-R: Benjamin Gaignard <benjamin.gaignard@linaro.org>
+R: Benjamin Gaignard <benjamin.gaignard@collabora.com>
R: Liam Mark <lmark@codeaurora.org>
R: Laura Abbott <labbott@redhat.com>
R: Brian Starkey <Brian.Starkey@arm.com>
@@ -6488,7 +6507,7 @@ F: Documentation/devicetree/bindings/display/rockchip/
F: drivers/gpu/drm/rockchip/
DRM DRIVERS FOR STI
-M: Benjamin Gaignard <benjamin.gaignard@linaro.org>
+M: Alain Volmat <alain.volmat@foss.st.com>
L: dri-devel@lists.freedesktop.org
S: Maintained
T: git git://anongit.freedesktop.org/drm/drm-misc
@@ -6497,8 +6516,8 @@ F: drivers/gpu/drm/sti
DRM DRIVERS FOR STM
M: Yannick Fertre <yannick.fertre@foss.st.com>
+M: Raphael Gallais-Pou <raphael.gallais-pou@foss.st.com>
M: Philippe Cornu <philippe.cornu@foss.st.com>
-M: Benjamin Gaignard <benjamin.gaignard@linaro.org>
L: dri-devel@lists.freedesktop.org
S: Maintained
T: git git://anongit.freedesktop.org/drm/drm-misc
@@ -7074,9 +7093,7 @@ S: Maintained
F: drivers/mmc/host/cqhci*
EMULEX 10Gbps iSCSI - OneConnect DRIVER
-M: Subbu Seetharaman <subbu.seetharaman@broadcom.com>
M: Ketan Mukadam <ketan.mukadam@broadcom.com>
-M: Jitendra Bhivare <jitendra.bhivare@broadcom.com>
L: linux-scsi@vger.kernel.org
S: Supported
W: http://www.broadcom.com
@@ -7170,7 +7187,7 @@ F: drivers/net/can/usb/etas_es58x/
ETHERNET BRIDGE
M: Roopa Prabhu <roopa@nvidia.com>
-M: Nikolay Aleksandrov <nikolay@nvidia.com>
+M: Nikolay Aleksandrov <razor@blackwall.org>
L: bridge@lists.linux-foundation.org (moderated for non-subscribers)
L: netdev@vger.kernel.org
S: Maintained
@@ -7196,8 +7213,10 @@ F: drivers/net/mdio/of_mdio.c
F: drivers/net/pcs/
F: drivers/net/phy/
F: include/dt-bindings/net/qca-ar803x.h
+F: include/linux/linkmode.h
F: include/linux/*mdio*.h
F: include/linux/mdio/*.h
+F: include/linux/mii.h
F: include/linux/of_net.h
F: include/linux/phy.h
F: include/linux/phy_fixed.h
@@ -7396,7 +7415,6 @@ F: include/uapi/scsi/fc/
FILE LOCKING (flock() and fcntl()/lockf())
M: Jeff Layton <jlayton@kernel.org>
-M: "J. Bruce Fields" <bfields@fieldses.org>
L: linux-fsdevel@vger.kernel.org
S: Maintained
F: fs/fcntl.c
@@ -7487,12 +7505,6 @@ F: Documentation/firmware_class/
F: drivers/base/firmware_loader/
F: include/linux/firmware.h
-FLASH ADAPTER DRIVER (IBM Flash Adapter 900GB Full Height PCI Flash Card)
-M: Joshua Morris <josh.h.morris@us.ibm.com>
-M: Philip Kelleher <pjk1939@linux.ibm.com>
-S: Maintained
-F: drivers/block/rsxx/
-
FLEXTIMER FTM-QUADDEC DRIVER
M: Patrick Havelange <patrick.havelange@essensium.com>
L: linux-iio@vger.kernel.org
@@ -7568,12 +7580,19 @@ S: Maintained
W: http://floatingpoint.sourceforge.net/emulator/index.html
F: arch/x86/math-emu/
+FRAMEBUFFER CORE
+M: Daniel Vetter <daniel@ffwll.ch>
+F: drivers/video/fbdev/core/
+S: Odd Fixes
+T: git git://anongit.freedesktop.org/drm/drm-misc
+
FRAMEBUFFER LAYER
-L: dri-devel@lists.freedesktop.org
+M: Helge Deller <deller@gmx.de>
L: linux-fbdev@vger.kernel.org
-S: Orphan
+L: dri-devel@lists.freedesktop.org
+S: Maintained
Q: http://patchwork.kernel.org/project/linux-fbdev/list/
-T: git git://anongit.freedesktop.org/drm/drm-misc
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/deller/linux-fbdev.git
F: Documentation/fb/
F: drivers/video/
F: include/linux/fb.h
@@ -9244,6 +9263,15 @@ S: Maintained
W: https://github.com/o2genum/ideapad-slidebar
F: drivers/input/misc/ideapad_slidebar.c
+IDMAPPED MOUNTS
+M: Christian Brauner <brauner@kernel.org>
+L: linux-fsdevel@vger.kernel.org
+S: Maintained
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/brauner/linux.git
+F: Documentation/filesystems/idmappings.rst
+F: tools/testing/selftests/mount_setattr/
+F: include/linux/mnt_idmapping.h
+
IDT VersaClock 5 CLOCK DRIVER
M: Luca Ceresoli <luca@lucaceresoli.net>
S: Maintained
@@ -9768,6 +9796,13 @@ F: drivers/crypto/keembay/keembay-ocs-hcu-core.c
F: drivers/crypto/keembay/ocs-hcu.c
F: drivers/crypto/keembay/ocs-hcu.h
+INTEL THUNDER BAY EMMC PHY DRIVER
+M: Nandhini Srikandan <nandhini.srikandan@intel.com>
+M: Rashmi A <rashmi.a@intel.com>
+S: Maintained
+F: Documentation/devicetree/bindings/phy/intel,phy-thunderbay-emmc.yaml
+F: drivers/phy/intel/phy-intel-thunderbay-emmc.c
+
INTEL MANAGEMENT ENGINE (mei)
M: Tomas Winkler <tomas.winkler@intel.com>
L: linux-kernel@vger.kernel.org
@@ -9823,10 +9858,9 @@ S: Maintained
F: drivers/mfd/intel_soc_pmic*
F: include/linux/mfd/intel_soc_pmic*
-INTEL PMT DRIVER
-M: "David E. Box" <david.e.box@linux.intel.com>
-S: Maintained
-F: drivers/mfd/intel_pmt.c
+INTEL PMT DRIVERS
+M: David E. Box <david.e.box@linux.intel.com>
+S: Supported
F: drivers/platform/x86/intel/pmt/
INTEL PRO/WIRELESS 2100, 2200BG, 2915ABG NETWORK CONNECTION SUPPORT
@@ -9893,6 +9927,11 @@ L: platform-driver-x86@vger.kernel.org
S: Maintained
F: drivers/platform/x86/intel/uncore-frequency.c
+INTEL VENDOR SPECIFIC EXTENDED CAPABILITIES DRIVER
+M: David E. Box <david.e.box@linux.intel.com>
+S: Supported
+F: drivers/platform/x86/intel/vsec.*
+
INTEL VIRTUAL BUTTON DRIVER
M: AceLan Kao <acelan.kao@canonical.com>
L: platform-driver-x86@vger.kernel.org
@@ -10402,12 +10441,11 @@ S: Odd Fixes
W: http://kernelnewbies.org/KernelJanitors
KERNEL NFSD, SUNRPC, AND LOCKD SERVERS
-M: "J. Bruce Fields" <bfields@fieldses.org>
M: Chuck Lever <chuck.lever@oracle.com>
L: linux-nfs@vger.kernel.org
S: Supported
W: http://nfs.sourceforge.net/
-T: git git://linux-nfs.org/~bfields/linux.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/cel/linux.git
F: fs/lockd/
F: fs/nfs_common/
F: fs/nfsd/
@@ -10515,8 +10553,8 @@ F: arch/powerpc/kernel/kvm*
F: arch/powerpc/kvm/
KERNEL VIRTUAL MACHINE FOR RISC-V (KVM/riscv)
-M: Anup Patel <anup.patel@wdc.com>
-R: Atish Patra <atish.patra@wdc.com>
+M: Anup Patel <anup@brainfault.org>
+R: Atish Patra <atishp@atishpatra.org>
L: kvm@vger.kernel.org
L: kvm-riscv@lists.infradead.org
L: linux-riscv@lists.infradead.org
@@ -10681,6 +10719,7 @@ F: samples/kmemleak/kmemleak-test.c
KMOD KERNEL MODULE LOADER - USERMODE HELPER
M: Luis Chamberlain <mcgrof@kernel.org>
L: linux-kernel@vger.kernel.org
+L: linux-modules@vger.kernel.org
S: Maintained
F: include/linux/kmod.h
F: kernel/kmod.c
@@ -10860,6 +10899,12 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git
F: drivers/ata/pata_arasan_cf.c
F: include/linux/pata_arasan_cf_data.h
+LIBATA PATA DRIVERS
+R: Sergey Shtylyov <s.shtylyov@omp.ru>
+L: linux-ide@vger.kernel.org
+F: drivers/ata/ata_*.c
+F: drivers/ata/pata_*.c
+
LIBATA PATA FARADAY FTIDE010 AND GEMINI SATA BRIDGE DRIVERS
M: Linus Walleij <linus.walleij@linaro.org>
L: linux-ide@vger.kernel.org
@@ -11349,8 +11394,9 @@ M: Johannes Berg <johannes@sipsolutions.net>
L: linux-wireless@vger.kernel.org
S: Maintained
W: https://wireless.wiki.kernel.org/
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211.git
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211-next.git
+Q: https://patchwork.kernel.org/project/linux-wireless/list/
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/wireless/wireless.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/wireless/wireless-next.git
F: Documentation/networking/mac80211-injection.rst
F: Documentation/networking/mac80211_hwsim/mac80211_hwsim.rst
F: drivers/net/wireless/mac80211_hwsim.[ch]
@@ -12379,7 +12425,7 @@ F: include/uapi/linux/membarrier.h
F: kernel/sched/membarrier.c
MEMBLOCK
-M: Mike Rapoport <rppt@linux.ibm.com>
+M: Mike Rapoport <rppt@kernel.org>
L: linux-mm@kvack.org
S: Maintained
F: Documentation/core-api/boot-time-mm.rst
@@ -12970,9 +13016,10 @@ F: drivers/media/dvb-frontends/mn88473*
MODULE SUPPORT
M: Luis Chamberlain <mcgrof@kernel.org>
-M: Jessica Yu <jeyu@kernel.org>
+L: linux-modules@vger.kernel.org
+L: linux-kernel@vger.kernel.org
S: Maintained
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/jeyu/linux.git modules-next
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/mcgrof/linux.git modules-next
F: include/linux/module.h
F: kernel/module.c
@@ -13276,8 +13323,8 @@ W: http://www.iptables.org/
W: http://www.nftables.org/
Q: http://patchwork.ozlabs.org/project/netfilter-devel/list/
C: irc://irc.libera.chat/netfilter
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf.git
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf-next.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf-next.git
F: include/linux/netfilter*
F: include/linux/netfilter/
F: include/net/netfilter/
@@ -13356,9 +13403,10 @@ NETWORKING DRIVERS (WIRELESS)
M: Kalle Valo <kvalo@kernel.org>
L: linux-wireless@vger.kernel.org
S: Maintained
-Q: http://patchwork.kernel.org/project/linux-wireless/list/
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers.git
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers-next.git
+W: https://wireless.wiki.kernel.org/
+Q: https://patchwork.kernel.org/project/linux-wireless/list/
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/wireless/wireless.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/wireless/wireless-next.git
F: Documentation/devicetree/bindings/net/wireless/
F: drivers/net/wireless/
@@ -13431,7 +13479,11 @@ L: netdev@vger.kernel.org
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git
F: arch/x86/net/*
+F: include/linux/ip.h
+F: include/linux/ipv6*
+F: include/net/fib*
F: include/net/ip*
+F: include/net/route.h
F: net/ipv4/
F: net/ipv6/
@@ -13492,10 +13544,6 @@ F: include/net/tls.h
F: include/uapi/linux/tls.h
F: net/tls/*
-NETWORKING [WIRELESS]
-L: linux-wireless@vger.kernel.org
-Q: http://patchwork.kernel.org/project/linux-wireless/list/
-
NETXEN (1/10) GbE SUPPORT
M: Manish Chopra <manishc@marvell.com>
M: Rahul Verma <rahulv@marvell.com>
@@ -13543,7 +13591,7 @@ F: tools/testing/selftests/nci/
NFS, SUNRPC, AND LOCKD CLIENTS
M: Trond Myklebust <trond.myklebust@hammerspace.com>
-M: Anna Schumaker <anna.schumaker@netapp.com>
+M: Anna Schumaker <anna@kernel.org>
L: linux-nfs@vger.kernel.org
S: Maintained
W: http://client.linux-nfs.org
@@ -13811,12 +13859,24 @@ F: Documentation/devicetree/bindings/display/imx/nxp,imx8mq-dcss.yaml
F: drivers/gpu/drm/imx/dcss/
NXP i.MX 8QXP ADC DRIVER
-M: Cai Huoqing <caihuoqing@baidu.com>
+M: Cai Huoqing <cai.huoqing@linux.dev>
+M: Haibo Chen <haibo.chen@nxp.com>
+L: linux-imx@nxp.com
L: linux-iio@vger.kernel.org
-S: Supported
+S: Maintained
F: Documentation/devicetree/bindings/iio/adc/nxp,imx8qxp-adc.yaml
F: drivers/iio/adc/imx8qxp-adc.c
+NXP i.MX 7D/6SX/6UL AND VF610 ADC DRIVER
+M: Haibo Chen <haibo.chen@nxp.com>
+L: linux-iio@vger.kernel.org
+L: linux-imx@nxp.com
+S: Maintained
+F: Documentation/devicetree/bindings/iio/adc/fsl,imx7d-adc.yaml
+F: Documentation/devicetree/bindings/iio/adc/fsl,vf610-adc.yaml
+F: drivers/iio/adc/imx7d_adc.c
+F: drivers/iio/adc/vf610_adc.c
+
NXP PF8100/PF8121A/PF8200 PMIC REGULATOR DEVICE DRIVER
M: Jagan Teki <jagan@amarulasolutions.com>
S: Maintained
@@ -14349,6 +14409,7 @@ M: Rob Herring <robh+dt@kernel.org>
M: Frank Rowand <frowand.list@gmail.com>
L: devicetree@vger.kernel.org
S: Maintained
+C: irc://irc.libera.chat/devicetree
W: http://www.devicetree.org/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/robh/linux.git
F: Documentation/ABI/testing/sysfs-firmware-ofw
@@ -14360,6 +14421,7 @@ OPEN FIRMWARE AND FLATTENED DEVICE TREE BINDINGS
M: Rob Herring <robh+dt@kernel.org>
L: devicetree@vger.kernel.org
S: Maintained
+C: irc://irc.libera.chat/devicetree
Q: http://patchwork.ozlabs.org/project/devicetree-bindings/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/robh/linux.git
F: Documentation/devicetree/
@@ -14504,6 +14566,15 @@ F: include/net/page_pool.h
F: include/trace/events/page_pool.h
F: net/core/page_pool.c
+PAGE TABLE CHECK
+M: Pasha Tatashin <pasha.tatashin@soleen.com>
+M: Andrew Morton <akpm@linux-foundation.org>
+L: linux-mm@kvack.org
+S: Maintained
+F: Documentation/vm/page_table_check.rst
+F: include/linux/page_table_check.h
+F: mm/page_table_check.c
+
PANASONIC LAPTOP ACPI EXTRAS DRIVER
M: Kenneth Chan <kenneth.t.chan@gmail.com>
L: platform-driver-x86@vger.kernel.org
@@ -14845,6 +14916,19 @@ L: linux-pci@vger.kernel.org
S: Supported
F: Documentation/PCI/pci-error-recovery.rst
+PCI PEER-TO-PEER DMA (P2PDMA)
+M: Bjorn Helgaas <bhelgaas@google.com>
+M: Logan Gunthorpe <logang@deltatee.com>
+L: linux-pci@vger.kernel.org
+S: Supported
+Q: https://patchwork.kernel.org/project/linux-pci/list/
+B: https://bugzilla.kernel.org
+C: irc://irc.oftc.net/linux-pci
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/helgaas/pci.git
+F: Documentation/driver-api/pci/p2pdma.rst
+F: drivers/pci/p2pdma.c
+F: include/linux/pci-p2pdma.h
+
PCI MSI DRIVER FOR ALTERA MSI IP
M: Joyce Ooi <joyce.ooi@intel.com>
L: linux-pci@vger.kernel.org
@@ -15071,7 +15155,7 @@ M: Ingo Molnar <mingo@redhat.com>
M: Arnaldo Carvalho de Melo <acme@kernel.org>
R: Mark Rutland <mark.rutland@arm.com>
R: Alexander Shishkin <alexander.shishkin@linux.intel.com>
-R: Jiri Olsa <jolsa@redhat.com>
+R: Jiri Olsa <jolsa@kernel.org>
R: Namhyung Kim <namhyung@kernel.org>
L: linux-perf-users@vger.kernel.org
L: linux-kernel@vger.kernel.org
@@ -15228,9 +15312,11 @@ PIN CONTROLLER - SAMSUNG
M: Tomasz Figa <tomasz.figa@gmail.com>
M: Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>
M: Sylwester Nawrocki <s.nawrocki@samsung.com>
+R: Alim Akhtar <alim.akhtar@samsung.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: linux-samsung-soc@vger.kernel.org
S: Maintained
+C: irc://irc.libera.chat/linux-exynos
Q: https://patchwork.kernel.org/project/linux-samsung-soc/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/pinctrl/samsung.git
F: Documentation/devicetree/bindings/pinctrl/samsung-pinctrl.txt
@@ -15245,6 +15331,11 @@ L: linux-omap@vger.kernel.org
S: Maintained
F: drivers/pinctrl/pinctrl-single.c
+PIN CONTROLLER - THUNDERBAY
+M: Lakshmi Sowjanya D <lakshmi.sowjanya.d@intel.com>
+S: Supported
+F: drivers/pinctrl/pinctrl-thunderbay.c
+
PKTCDVD DRIVER
M: linux-block@vger.kernel.org
S: Orphan
@@ -15829,6 +15920,7 @@ S: Supported
W: https://wireless.wiki.kernel.org/en/users/Drivers/ath10k
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git
F: drivers/net/wireless/ath/ath10k/
+F: Documentation/devicetree/bindings/net/wireless/qcom,ath10k.txt
QUALCOMM ATHEROS ATH11K WIRELESS DRIVER
M: Kalle Valo <kvalo@kernel.org>
@@ -15836,11 +15928,12 @@ L: ath11k@lists.infradead.org
S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath.git
F: drivers/net/wireless/ath/ath11k/
+F: Documentation/devicetree/bindings/net/wireless/qcom,ath11k.txt
QUALCOMM ATHEROS ATH9K WIRELESS DRIVER
-M: ath9k-devel@qca.qualcomm.com
+M: Toke Høiland-Jørgensen <toke@toke.dk>
L: linux-wireless@vger.kernel.org
-S: Supported
+S: Maintained
W: https://wireless.wiki.kernel.org/en/users/Drivers/ath9k
F: Documentation/devicetree/bindings/net/wireless/qca,ath9k.yaml
F: drivers/net/wireless/ath/ath9k/
@@ -15862,6 +15955,15 @@ F: Documentation/admin-guide/media/qcom_camss.rst
F: Documentation/devicetree/bindings/media/*camss*
F: drivers/media/platform/qcom/camss/
+QUALCOMM CLOCK DRIVERS
+M: Bjorn Andersson <bjorn.andersson@linaro.org>
+L: linux-arm-msm@vger.kernel.org
+S: Supported
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/qcom/linux.git
+F: Documentation/devicetree/bindings/clock/qcom,*
+F: drivers/clk/qcom/
+F: include/dt-bindings/clock/qcom,*
+
QUALCOMM CORE POWER REDUCTION (CPR) AVS DRIVER
M: Niklas Cassel <nks@flawful.org>
L: linux-pm@vger.kernel.org
@@ -15906,14 +16008,6 @@ F: Documentation/devicetree/bindings/misc/qcom,fastrpc.txt
F: drivers/misc/fastrpc.c
F: include/uapi/misc/fastrpc.h
-QUALCOMM GENERIC INTERFACE I2C DRIVER
-M: Akash Asthana <akashast@codeaurora.org>
-M: Mukesh Savaliya <msavaliy@codeaurora.org>
-L: linux-i2c@vger.kernel.org
-L: linux-arm-msm@vger.kernel.org
-S: Supported
-F: drivers/i2c/busses/i2c-qcom-geni.c
-
QUALCOMM HEXAGON ARCHITECTURE
M: Brian Cain <bcain@codeaurora.org>
L: linux-hexagon@vger.kernel.org
@@ -16012,11 +16106,10 @@ F: Documentation/devicetree/bindings/media/*venus*
F: drivers/media/platform/qcom/venus/
QUALCOMM WCN36XX WIRELESS DRIVER
-M: Kalle Valo <kvalo@kernel.org>
+M: Loic Poulain <loic.poulain@linaro.org>
L: wcn36xx@lists.infradead.org
S: Supported
W: https://wireless.wiki.kernel.org/en/users/Drivers/wcn36xx
-T: git git://github.com/KrasnikovEugene/wcn36xx.git
F: drivers/net/wireless/ath/wcn36xx/
QUANTENNA QTNFMAC WIRELESS DRIVER
@@ -16270,6 +16363,13 @@ S: Maintained
F: include/sound/rt*.h
F: sound/soc/codecs/rt*
+REALTEK OTTO WATCHDOG
+M: Sander Vanheule <sander@svanheule.net>
+L: linux-watchdog@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/watchdog/realtek,otto-wdt.yaml
+F: drivers/watchdog/realtek_otto_wdt.c
+
REALTEK RTL83xx SMI DSA ROUTER CHIPS
M: Linus Walleij <linus.walleij@linaro.org>
S: Maintained
@@ -16319,7 +16419,6 @@ S: Supported
F: fs/reiserfs/
REMOTE PROCESSOR (REMOTEPROC) SUBSYSTEM
-M: Ohad Ben-Cohen <ohad@wizery.com>
M: Bjorn Andersson <bjorn.andersson@linaro.org>
M: Mathieu Poirier <mathieu.poirier@linaro.org>
L: linux-remoteproc@vger.kernel.org
@@ -16333,7 +16432,6 @@ F: include/linux/remoteproc.h
F: include/linux/remoteproc/
REMOTE PROCESSOR MESSAGING (RPMSG) SUBSYSTEM
-M: Ohad Ben-Cohen <ohad@wizery.com>
M: Bjorn Andersson <bjorn.andersson@linaro.org>
M: Mathieu Poirier <mathieu.poirier@linaro.org>
L: linux-remoteproc@vger.kernel.org
@@ -16393,6 +16491,14 @@ F: Documentation/devicetree/bindings/i2c/renesas,rmobile-iic.yaml
F: drivers/i2c/busses/i2c-rcar.c
F: drivers/i2c/busses/i2c-sh_mobile.c
+RENESAS R-CAR SATA DRIVER
+R: Sergey Shtylyov <s.shtylyov@omp.ru>
+S: Supported
+L: linux-ide@vger.kernel.org
+L: linux-renesas-soc@vger.kernel.org
+F: Documentation/devicetree/bindings/ata/renesas,rcar-sata.yaml
+F: drivers/ata/sata_rcar.c
+
RENESAS R-CAR THERMAL DRIVERS
M: Niklas Söderlund <niklas.soderlund@ragnatech.se>
L: linux-renesas-soc@vger.kernel.org
@@ -16461,8 +16567,9 @@ M: Johannes Berg <johannes@sipsolutions.net>
L: linux-wireless@vger.kernel.org
S: Maintained
W: https://wireless.wiki.kernel.org/
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211.git
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211-next.git
+Q: https://patchwork.kernel.org/project/linux-wireless/list/
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/wireless/wireless.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/wireless/wireless-next.git
F: Documentation/ABI/stable/sysfs-class-rfkill
F: Documentation/driver-api/rfkill.rst
F: include/linux/rfkill.h
@@ -16727,8 +16834,9 @@ F: drivers/video/fbdev/savage/
S390
M: Heiko Carstens <hca@linux.ibm.com>
M: Vasily Gorbik <gor@linux.ibm.com>
-M: Christian Borntraeger <borntraeger@linux.ibm.com>
-R: Alexander Gordeev <agordeev@linux.ibm.com>
+M: Alexander Gordeev <agordeev@linux.ibm.com>
+R: Christian Borntraeger <borntraeger@linux.ibm.com>
+R: Sven Schnelle <svens@linux.ibm.com>
L: linux-s390@vger.kernel.org
S: Supported
W: http://www.ibm.com/developerworks/linux/linux390/
@@ -16998,13 +17106,12 @@ SAMSUNG SOC CLOCK DRIVERS
M: Sylwester Nawrocki <s.nawrocki@samsung.com>
M: Tomasz Figa <tomasz.figa@gmail.com>
M: Chanwoo Choi <cw00.choi@samsung.com>
+R: Alim Akhtar <alim.akhtar@samsung.com>
L: linux-samsung-soc@vger.kernel.org
S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/snawrocki/clk.git
-F: Documentation/devicetree/bindings/clock/exynos*.txt
F: Documentation/devicetree/bindings/clock/samsung,*.yaml
F: Documentation/devicetree/bindings/clock/samsung,s3c*
-F: Documentation/devicetree/bindings/clock/samsung,s5p*
F: drivers/clk/samsung/
F: include/dt-bindings/clock/exynos*.h
F: include/dt-bindings/clock/s3c*.h
@@ -17636,6 +17743,21 @@ S: Maintained
W: http://www.winischhofer.at/linuxsisusbvga.shtml
F: drivers/usb/misc/sisusbvga/
+SL28 CPLD MFD DRIVER
+M: Michael Walle <michael@walle.cc>
+S: Maintained
+F: Documentation/devicetree/bindings/gpio/kontron,sl28cpld-gpio.yaml
+F: Documentation/devicetree/bindings/hwmon/kontron,sl28cpld-hwmon.yaml
+F: Documentation/devicetree/bindings/interrupt-controller/kontron,sl28cpld-intc.yaml
+F: Documentation/devicetree/bindings/mfd/kontron,sl28cpld.yaml
+F: Documentation/devicetree/bindings/pwm/kontron,sl28cpld-pwm.yaml
+F: Documentation/devicetree/bindings/watchdog/kontron,sl28cpld-wdt.yaml
+F: drivers/gpio/gpio-sl28cpld.c
+F: drivers/hwmon/sl28cpld-hwmon.c
+F: drivers/irqchip/irq-sl28cpld.c
+F: drivers/pwm/pwm-sl28cpld.c
+F: drivers/watchdog/sl28cpld_wdt.c
+
SLAB ALLOCATOR
M: Christoph Lameter <cl@linux.com>
M: Pekka Enberg <penberg@kernel.org>
@@ -17973,6 +18095,7 @@ F: Documentation/sound/
F: include/sound/
F: include/uapi/sound/
F: sound/
+F: tools/testing/selftests/alsa
SOUND - COMPRESSED AUDIO
M: Vinod Koul <vkoul@kernel.org>
@@ -17992,6 +18115,13 @@ F: include/sound/dmaengine_pcm.h
F: sound/core/pcm_dmaengine.c
F: sound/soc/soc-generic-dmaengine-pcm.c
+SOUND - ALSA SELFTESTS
+M: Mark Brown <broonie@kernel.org>
+L: alsa-devel@alsa-project.org (moderated for non-subscribers)
+L: linux-kselftest@vger.kernel.org
+S: Supported
+F: tools/testing/selftests/alsa
+
SOUND - SOC LAYER / DYNAMIC AUDIO POWER MANAGEMENT (ASoC)
M: Liam Girdwood <lgirdwood@gmail.com>
M: Mark Brown <broonie@kernel.org>
@@ -18344,7 +18474,7 @@ F: Documentation/devicetree/bindings/sound/st,sti-asoc-card.txt
F: sound/soc/sti/
STI CEC DRIVER
-M: Benjamin Gaignard <benjamin.gaignard@linaro.org>
+M: Alain Volmat <alain.volmat@foss.st.com>
S: Maintained
F: Documentation/devicetree/bindings/media/stih-cec.txt
F: drivers/media/cec/platform/sti/
@@ -18406,6 +18536,13 @@ L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/ethernet/dlink/sundance.c
+SUNPLUS RTC DRIVER
+M: Vincent Shih <vincent.sunplus@gmail.com>
+L: linux-rtc@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/rtc/sunplus,sp7021-rtc.yaml
+F: drivers/rtc/rtc-sunplus.c
+
SUPERH
M: Yoshinori Sato <ysato@users.sourceforge.jp>
M: Rich Felker <dalias@libc.org>
@@ -19491,6 +19628,14 @@ F: Documentation/trace/timerlat-tracer.rst
F: Documentation/trace/hwlat_detector.rst
F: arch/*/kernel/trace.c
+Real-time Linux Analysis (RTLA) tools
+M: Daniel Bristot de Oliveira <bristot@kernel.org>
+M: Steven Rostedt <rostedt@goodmis.org>
+L: linux-trace-devel@vger.kernel.org
+S: Maintained
+F: Documentation/tools/rtla/
+F: tools/tracing/rtla/
+
TRADITIONAL CHINESE DOCUMENTATION
M: Hu Haowen <src.res@email.cn>
L: linux-doc-tw-discuss@lists.sourceforge.net
@@ -20292,6 +20437,7 @@ M: "Michael S. Tsirkin" <mst@redhat.com>
M: Jason Wang <jasowang@redhat.com>
L: virtualization@lists.linux-foundation.org
S: Maintained
+F: Documentation/ABI/testing/sysfs-bus-vdpa
F: Documentation/devicetree/bindings/virtio/
F: drivers/block/virtio_blk.c
F: drivers/crypto/virtio/
@@ -21028,6 +21174,14 @@ F: drivers/scsi/xen-scsifront.c
F: drivers/xen/xen-scsiback.c
F: include/xen/interface/io/vscsiif.h
+XEN PVUSB DRIVER
+M: Juergen Gross <jgross@suse.com>
+L: xen-devel@lists.xenproject.org (moderated for non-subscribers)
+L: linux-usb@vger.kernel.org
+S: Supported
+F: drivers/usb/host/xen*
+F: include/xen/interface/io/usbif.h
+
XEN SOUND FRONTEND DRIVER
M: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
L: xen-devel@lists.xenproject.org (moderated for non-subscribers)
@@ -21060,6 +21214,13 @@ F: fs/xfs/
F: include/uapi/linux/dqblk_xfs.h
F: include/uapi/linux/fsmap.h
+XILINX AMS DRIVER
+M: Anand Ashok Dumbre <anand.ashok.dumbre@xilinx.com>
+L: linux-iio@vger.kernel.org
+S: Maintained
+F: Documentation/devicetree/bindings/iio/adc/xlnx,zynqmp-ams.yaml
+F: drivers/iio/adc/xilinx-ams.c
+
XILINX AXI ETHERNET DRIVER
M: Radhey Shyam Pandey <radhey.shyam.pandey@xilinx.com>
S: Maintained
@@ -21128,6 +21289,12 @@ T: git https://github.com/Xilinx/linux-xlnx.git
F: Documentation/devicetree/bindings/phy/xlnx,zynqmp-psgtr.yaml
F: drivers/phy/xilinx/phy-zynqmp.c
+XILINX EVENT MANAGEMENT DRIVER
+M: Abhyuday Godhasara <abhyuday.godhasara@xilinx.com>
+S: Maintained
+F: drivers/soc/xilinx/xlnx_event_manager.c
+F: include/linux/firmware/xlnx-event-manager.h
+
XILLYBUS DRIVER
M: Eli Billauer <eli.billauer@gmail.com>
L: linux-kernel@vger.kernel.org
diff --git a/Makefile b/Makefile
index 45278d508d81..289ce2be8032 100644
--- a/Makefile
+++ b/Makefile
@@ -1,9 +1,9 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
-PATCHLEVEL = 16
+PATCHLEVEL = 17
SUBLEVEL = 0
-EXTRAVERSION =
-NAME = Gobble Gobble
+EXTRAVERSION = -rc5
+NAME = Superb Owl
# *DOCUMENTATION*
# To see a list of typical targets execute "make help"
@@ -688,17 +688,6 @@ ifdef CONFIG_FUNCTION_TRACER
CC_FLAGS_FTRACE := -pg
endif
-ifdef CONFIG_CC_IS_GCC
-RETPOLINE_CFLAGS := $(call cc-option,-mindirect-branch=thunk-extern -mindirect-branch-register)
-RETPOLINE_VDSO_CFLAGS := $(call cc-option,-mindirect-branch=thunk-inline -mindirect-branch-register)
-endif
-ifdef CONFIG_CC_IS_CLANG
-RETPOLINE_CFLAGS := -mretpoline-external-thunk
-RETPOLINE_VDSO_CFLAGS := -mretpoline
-endif
-export RETPOLINE_CFLAGS
-export RETPOLINE_VDSO_CFLAGS
-
include $(srctree)/arch/$(SRCARCH)/Makefile
ifdef need-config
@@ -789,7 +778,7 @@ stackp-flags-$(CONFIG_STACKPROTECTOR_STRONG) := -fstack-protector-strong
KBUILD_CFLAGS += $(stackp-flags-y)
KBUILD_CFLAGS-$(CONFIG_WERROR) += -Werror
-KBUILD_CFLAGS += $(KBUILD_CFLAGS-y) $(CONFIG_CC_IMPLICIT_FALLTHROUGH:"%"=%)
+KBUILD_CFLAGS += $(KBUILD_CFLAGS-y) $(CONFIG_CC_IMPLICIT_FALLTHROUGH)
ifdef CONFIG_CC_IS_CLANG
KBUILD_CPPFLAGS += -Qunused-arguments
@@ -1289,15 +1278,6 @@ headers: $(version_h) scripts_unifdef uapi-asm-generic archheaders archscripts
$(Q)$(MAKE) $(hdr-inst)=include/uapi
$(Q)$(MAKE) $(hdr-inst)=arch/$(SRCARCH)/include/uapi
-# Deprecated. It is no-op now.
-PHONY += headers_check
-headers_check:
- @echo >&2 "=================== WARNING ==================="
- @echo >&2 "Since Linux 5.5, 'make headers_check' is no-op,"
- @echo >&2 "and will be removed after Linux 5.15 release."
- @echo >&2 "Please remove headers_check from your scripts."
- @echo >&2 "==============================================="
-
ifdef CONFIG_HEADERS_INSTALL
prepare: headers
endif
@@ -1508,7 +1488,7 @@ MRPROPER_FILES += include/config include/generated \
debian snap tar-install \
.config .config.old .version \
Module.symvers \
- certs/signing_key.pem certs/signing_key.x509 \
+ certs/signing_key.pem \
certs/x509.genkey \
vmlinux-gdb.py \
*.spec
@@ -1734,9 +1714,9 @@ PHONY += prepare
# now expand this into a simple variable to reduce the cost of shell evaluations
prepare: CC_VERSION_TEXT := $(CC_VERSION_TEXT)
prepare:
- @if [ "$(CC_VERSION_TEXT)" != $(CONFIG_CC_VERSION_TEXT) ]; then \
+ @if [ "$(CC_VERSION_TEXT)" != "$(CONFIG_CC_VERSION_TEXT)" ]; then \
echo >&2 "warning: the compiler differs from the one used to build the kernel"; \
- echo >&2 " The kernel was built by: "$(CONFIG_CC_VERSION_TEXT); \
+ echo >&2 " The kernel was built by: $(CONFIG_CC_VERSION_TEXT)"; \
echo >&2 " You are using: $(CC_VERSION_TEXT)"; \
fi
diff --git a/arch/Kconfig b/arch/Kconfig
index 847fde3d22cd..678a80713b21 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -648,8 +648,7 @@ config ARCH_SUPPORTS_LTO_CLANG_THIN
config HAS_LTO_CLANG
def_bool y
- # Clang >= 11: https://github.com/ClangBuiltLinux/linux/issues/510
- depends on CC_IS_CLANG && CLANG_VERSION >= 110000 && LD_IS_LLD && AS_IS_LLVM
+ depends on CC_IS_CLANG && LD_IS_LLD && AS_IS_LLVM
depends on $(success,$(NM) --help | head -n 1 | grep -qi llvm)
depends on $(success,$(AR) --help | head -n 1 | grep -qi llvm)
depends on ARCH_SUPPORTS_LTO_CLANG
@@ -998,6 +997,10 @@ config PAGE_SIZE_LESS_THAN_64KB
depends on !PAGE_SIZE_64KB
depends on !PARISC_PAGE_SIZE_64KB
depends on !PPC_64K_PAGES
+ depends on PAGE_SIZE_LESS_THAN_256KB
+
+config PAGE_SIZE_LESS_THAN_256KB
+ def_bool y
depends on !PPC_256K_PAGES
depends on !PAGE_SIZE_256KB
@@ -1297,6 +1300,9 @@ config HAVE_ARCH_PFN_VALID
config ARCH_SUPPORTS_DEBUG_PAGEALLOC
bool
+config ARCH_SUPPORTS_PAGE_TABLE_CHECK
+ bool
+
config ARCH_SPLIT_ARG64
bool
help
diff --git a/arch/alpha/include/asm/bitops.h b/arch/alpha/include/asm/bitops.h
index 5adca78830b5..e1d8483a45f2 100644
--- a/arch/alpha/include/asm/bitops.h
+++ b/arch/alpha/include/asm/bitops.h
@@ -430,8 +430,6 @@ static inline unsigned int __arch_hweight8(unsigned int w)
#endif /* __KERNEL__ */
-#include <asm-generic/bitops/find.h>
-
#ifdef __KERNEL__
/*
diff --git a/arch/alpha/kernel/rtc.c b/arch/alpha/kernel/rtc.c
index ce3077946e1d..fb3025396ac9 100644
--- a/arch/alpha/kernel/rtc.c
+++ b/arch/alpha/kernel/rtc.c
@@ -80,7 +80,12 @@ init_rtc_epoch(void)
static int
alpha_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
- mc146818_get_time(tm);
+ int ret = mc146818_get_time(tm);
+
+ if (ret < 0) {
+ dev_err_ratelimited(dev, "unable to read current time\n");
+ return ret;
+ }
/* Adjust for non-default epochs. It's easier to depend on the
generic __get_rtc_time and adjust the epoch here than create
diff --git a/arch/alpha/kernel/srm_env.c b/arch/alpha/kernel/srm_env.c
index 528d2be58182..217b4dca51dd 100644
--- a/arch/alpha/kernel/srm_env.c
+++ b/arch/alpha/kernel/srm_env.c
@@ -83,14 +83,14 @@ static int srm_env_proc_show(struct seq_file *m, void *v)
static int srm_env_proc_open(struct inode *inode, struct file *file)
{
- return single_open(file, srm_env_proc_show, PDE_DATA(inode));
+ return single_open(file, srm_env_proc_show, pde_data(inode));
}
static ssize_t srm_env_proc_write(struct file *file, const char __user *buffer,
size_t count, loff_t *pos)
{
int res;
- unsigned long id = (unsigned long)PDE_DATA(file_inode(file));
+ unsigned long id = (unsigned long)pde_data(file_inode(file));
char *buf = (char *) __get_free_page(GFP_USER);
unsigned long ret1, ret2;
diff --git a/arch/alpha/kernel/srmcons.c b/arch/alpha/kernel/srmcons.c
index 90635ef5dafa..6dc952b0df4a 100644
--- a/arch/alpha/kernel/srmcons.c
+++ b/arch/alpha/kernel/srmcons.c
@@ -59,7 +59,7 @@ srmcons_do_receive_chars(struct tty_port *port)
} while((result.bits.status & 1) && (++loops < 10));
if (count)
- tty_schedule_flip(port);
+ tty_flip_buffer_push(port);
return count;
}
diff --git a/arch/alpha/kernel/syscalls/syscall.tbl b/arch/alpha/kernel/syscalls/syscall.tbl
index ca5a32228cd6..3515bc4f16a4 100644
--- a/arch/alpha/kernel/syscalls/syscall.tbl
+++ b/arch/alpha/kernel/syscalls/syscall.tbl
@@ -489,3 +489,4 @@
# 557 reserved for memfd_secret
558 common process_mrelease sys_process_mrelease
559 common futex_waitv sys_futex_waitv
+560 common set_mempolicy_home_node sys_ni_syscall
diff --git a/arch/alpha/kernel/traps.c b/arch/alpha/kernel/traps.c
index 2ae34702456c..8a66fe544c69 100644
--- a/arch/alpha/kernel/traps.c
+++ b/arch/alpha/kernel/traps.c
@@ -190,7 +190,7 @@ die_if_kernel(char * str, struct pt_regs *regs, long err, unsigned long *r9_15)
local_irq_enable();
while (1);
}
- do_exit(SIGSEGV);
+ make_task_dead(SIGSEGV);
}
#ifndef CONFIG_MATHEMU
@@ -575,7 +575,7 @@ do_entUna(void * va, unsigned long opcode, unsigned long reg,
printk("Bad unaligned kernel access at %016lx: %p %lx %lu\n",
pc, va, opcode, reg);
- do_exit(SIGSEGV);
+ make_task_dead(SIGSEGV);
got_exception:
/* Ok, we caught the exception, but we don't want it. Is there
@@ -630,7 +630,7 @@ got_exception:
local_irq_enable();
while (1);
}
- do_exit(SIGSEGV);
+ make_task_dead(SIGSEGV);
}
/*
diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
index eee5102c3d88..ec20c1004abf 100644
--- a/arch/alpha/mm/fault.c
+++ b/arch/alpha/mm/fault.c
@@ -165,17 +165,15 @@ retry:
BUG();
}
- if (flags & FAULT_FLAG_ALLOW_RETRY) {
- if (fault & VM_FAULT_RETRY) {
- flags |= FAULT_FLAG_TRIED;
+ if (fault & VM_FAULT_RETRY) {
+ flags |= FAULT_FLAG_TRIED;
- /* No need to mmap_read_unlock(mm) as we would
- * have already released it in __lock_page_or_retry
- * in mm/filemap.c.
- */
+ /* No need to mmap_read_unlock(mm) as we would
+ * have already released it in __lock_page_or_retry
+ * in mm/filemap.c.
+ */
- goto retry;
- }
+ goto retry;
}
mmap_read_unlock(mm);
@@ -204,7 +202,7 @@ retry:
printk(KERN_ALERT "Unable to handle kernel paging request at "
"virtual address %016lx\n", address);
die_if_kernel("Oops", regs, cause, (unsigned long*)regs - 16);
- do_exit(SIGKILL);
+ make_task_dead(SIGKILL);
/* We ran out of memory, or some other thing happened to us that
made us unable to handle the page fault gracefully. */
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index f74d9860a442..3c2a4753d09b 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -20,7 +20,6 @@ config ARC
select COMMON_CLK
select DMA_DIRECT_REMAP
select GENERIC_ATOMIC64 if !ISA_ARCV2 || !(ARC_HAS_LL64 && ARC_HAS_LLSC)
- select GENERIC_FIND_FIRST_BIT
# for now, we don't need GENERIC_IRQ_PROBE, CONFIG_GENERIC_IRQ_CHIP
select GENERIC_IRQ_SHOW
select GENERIC_PCI_IOMAP
diff --git a/arch/arc/Makefile b/arch/arc/Makefile
index f252e7b924e9..efc54f3e35e0 100644
--- a/arch/arc/Makefile
+++ b/arch/arc/Makefile
@@ -14,10 +14,10 @@ cflags-y += -fno-common -pipe -fno-builtin -mmedium-calls -D__linux__
tune-mcpu-def-$(CONFIG_ISA_ARCOMPACT) := -mcpu=arc700
tune-mcpu-def-$(CONFIG_ISA_ARCV2) := -mcpu=hs38
-ifeq ($(CONFIG_ARC_TUNE_MCPU),"")
+ifeq ($(CONFIG_ARC_TUNE_MCPU),)
cflags-y += $(tune-mcpu-def-y)
else
-tune-mcpu := $(shell echo $(CONFIG_ARC_TUNE_MCPU))
+tune-mcpu := $(CONFIG_ARC_TUNE_MCPU)
ifneq ($(call cc-option,$(tune-mcpu)),)
cflags-y += $(tune-mcpu)
else
diff --git a/arch/arc/boot/dts/Makefile b/arch/arc/boot/dts/Makefile
index 8483a86c743d..4237aa5de3a3 100644
--- a/arch/arc/boot/dts/Makefile
+++ b/arch/arc/boot/dts/Makefile
@@ -2,8 +2,8 @@
# Built-in dtb
builtindtb-y := nsim_700
-ifneq ($(CONFIG_ARC_BUILTIN_DTB_NAME),"")
- builtindtb-y := $(patsubst "%",%,$(CONFIG_ARC_BUILTIN_DTB_NAME))
+ifneq ($(CONFIG_ARC_BUILTIN_DTB_NAME),)
+ builtindtb-y := $(CONFIG_ARC_BUILTIN_DTB_NAME)
endif
obj-y += $(builtindtb-y).dtb.o
diff --git a/arch/arc/include/asm/bitops.h b/arch/arc/include/asm/bitops.h
index a7daaf64ae34..bdb7e190a294 100644
--- a/arch/arc/include/asm/bitops.h
+++ b/arch/arc/include/asm/bitops.h
@@ -189,7 +189,6 @@ static inline __attribute__ ((const)) unsigned long __ffs(unsigned long x)
#include <asm-generic/bitops/atomic.h>
#include <asm-generic/bitops/non-atomic.h>
-#include <asm-generic/bitops/find.h>
#include <asm-generic/bitops/le.h>
#include <asm-generic/bitops/ext2-atomic-setbit.h>
diff --git a/arch/arc/include/asm/irqflags-compact.h b/arch/arc/include/asm/irqflags-compact.h
index 863d63ad18d6..0d63e568d64c 100644
--- a/arch/arc/include/asm/irqflags-compact.h
+++ b/arch/arc/include/asm/irqflags-compact.h
@@ -50,8 +50,12 @@
* are redone after IRQs are re-enabled (and gcc doesn't reuse stale register)
*
* Noted at the time of Abilis Timer List corruption
- * Orig Bug + Rejected solution : https://lkml.org/lkml/2013/3/29/67
- * Reasoning : https://lkml.org/lkml/2013/4/8/15
+ *
+ * Orig Bug + Rejected solution:
+ * https://lore.kernel.org/lkml/1364553218-31255-1-git-send-email-vgupta@synopsys.com
+ *
+ * Reasoning:
+ * https://lore.kernel.org/lkml/CA+55aFyFWjpSVQM6M266tKrG_ZXJzZ-nYejpmXYQXbrr42mGPQ@mail.gmail.com
*
******************************************************************/
diff --git a/arch/arc/include/asm/perf_event.h b/arch/arc/include/asm/perf_event.h
index e1971d34ef30..4c919c0f4b30 100644
--- a/arch/arc/include/asm/perf_event.h
+++ b/arch/arc/include/asm/perf_event.h
@@ -63,166 +63,4 @@ struct arc_reg_cc_build {
#define PERF_COUNT_ARC_HW_MAX (PERF_COUNT_HW_MAX + 8)
-/*
- * Some ARC pct quirks:
- *
- * PERF_COUNT_HW_STALLED_CYCLES_BACKEND
- * PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
- * The ARC 700 can either measure stalls per pipeline stage, or all stalls
- * combined; for now we assign all stalls to STALLED_CYCLES_BACKEND
- * and all pipeline flushes (e.g. caused by mispredicts, etc.) to
- * STALLED_CYCLES_FRONTEND.
- *
- * We could start multiple performance counters and combine everything
- * afterwards, but that makes it complicated.
- *
- * Note that I$ cache misses aren't counted by either of the two!
- */
-
-/*
- * ARC PCT has hardware conditions with fixed "names" but variable "indexes"
- * (based on a specific RTL build)
- * Below is the static map between perf generic/arc specific event_id and
- * h/w condition names.
- * At the time of probe, we loop thru each index and find it's name to
- * complete the mapping of perf event_id to h/w index as latter is needed
- * to program the counter really
- */
-static const char * const arc_pmu_ev_hw_map[] = {
- /* count cycles */
- [PERF_COUNT_HW_CPU_CYCLES] = "crun",
- [PERF_COUNT_HW_REF_CPU_CYCLES] = "crun",
- [PERF_COUNT_HW_BUS_CYCLES] = "crun",
-
- [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = "bflush",
- [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = "bstall",
-
- /* counts condition */
- [PERF_COUNT_HW_INSTRUCTIONS] = "iall",
- /* All jump instructions that are taken */
- [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmptak",
-#ifdef CONFIG_ISA_ARCV2
- [PERF_COUNT_HW_BRANCH_MISSES] = "bpmp",
-#else
- [PERF_COUNT_ARC_BPOK] = "bpok", /* NP-NT, PT-T, PNT-NT */
- [PERF_COUNT_HW_BRANCH_MISSES] = "bpfail", /* NP-T, PT-NT, PNT-T */
-#endif
- [PERF_COUNT_ARC_LDC] = "imemrdc", /* Instr: mem read cached */
- [PERF_COUNT_ARC_STC] = "imemwrc", /* Instr: mem write cached */
-
- [PERF_COUNT_ARC_DCLM] = "dclm", /* D-cache Load Miss */
- [PERF_COUNT_ARC_DCSM] = "dcsm", /* D-cache Store Miss */
- [PERF_COUNT_ARC_ICM] = "icm", /* I-cache Miss */
- [PERF_COUNT_ARC_EDTLB] = "edtlb", /* D-TLB Miss */
- [PERF_COUNT_ARC_EITLB] = "eitlb", /* I-TLB Miss */
-
- [PERF_COUNT_HW_CACHE_REFERENCES] = "imemrdc", /* Instr: mem read cached */
- [PERF_COUNT_HW_CACHE_MISSES] = "dclm", /* D-cache Load Miss */
-};
-
-#define C(_x) PERF_COUNT_HW_CACHE_##_x
-#define CACHE_OP_UNSUPPORTED 0xffff
-
-static const unsigned int arc_pmu_cache_map[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
- [C(L1D)] = {
- [C(OP_READ)] = {
- [C(RESULT_ACCESS)] = PERF_COUNT_ARC_LDC,
- [C(RESULT_MISS)] = PERF_COUNT_ARC_DCLM,
- },
- [C(OP_WRITE)] = {
- [C(RESULT_ACCESS)] = PERF_COUNT_ARC_STC,
- [C(RESULT_MISS)] = PERF_COUNT_ARC_DCSM,
- },
- [C(OP_PREFETCH)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- },
- [C(L1I)] = {
- [C(OP_READ)] = {
- [C(RESULT_ACCESS)] = PERF_COUNT_HW_INSTRUCTIONS,
- [C(RESULT_MISS)] = PERF_COUNT_ARC_ICM,
- },
- [C(OP_WRITE)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- [C(OP_PREFETCH)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- },
- [C(LL)] = {
- [C(OP_READ)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- [C(OP_WRITE)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- [C(OP_PREFETCH)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- },
- [C(DTLB)] = {
- [C(OP_READ)] = {
- [C(RESULT_ACCESS)] = PERF_COUNT_ARC_LDC,
- [C(RESULT_MISS)] = PERF_COUNT_ARC_EDTLB,
- },
- /* DTLB LD/ST Miss not segregated by h/w*/
- [C(OP_WRITE)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- [C(OP_PREFETCH)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- },
- [C(ITLB)] = {
- [C(OP_READ)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = PERF_COUNT_ARC_EITLB,
- },
- [C(OP_WRITE)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- [C(OP_PREFETCH)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- },
- [C(BPU)] = {
- [C(OP_READ)] = {
- [C(RESULT_ACCESS)] = PERF_COUNT_HW_BRANCH_INSTRUCTIONS,
- [C(RESULT_MISS)] = PERF_COUNT_HW_BRANCH_MISSES,
- },
- [C(OP_WRITE)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- [C(OP_PREFETCH)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- },
- [C(NODE)] = {
- [C(OP_READ)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- [C(OP_WRITE)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- [C(OP_PREFETCH)] = {
- [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
- [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
- },
- },
-};
-
#endif /* __ASM_PERF_EVENT_H */
diff --git a/arch/arc/include/asm/thread_info.h b/arch/arc/include/asm/thread_info.h
index c0942c24d401..d36863e34bfc 100644
--- a/arch/arc/include/asm/thread_info.h
+++ b/arch/arc/include/asm/thread_info.h
@@ -99,8 +99,8 @@ static inline __attribute_const__ struct thread_info *current_thread_info(void)
/*
* _TIF_ALLWORK_MASK includes SYSCALL_TRACE, but we don't need it.
- * SYSCALL_TRACE is anyway seperately/unconditionally tested right after a
- * syscall, so all that reamins to be tested is _TIF_WORK_MASK
+ * SYSCALL_TRACE is anyway separately/unconditionally tested right after a
+ * syscall, so all that remains to be tested is _TIF_WORK_MASK
*/
#endif /* _ASM_THREAD_INFO_H */
diff --git a/arch/arc/kernel/perf_event.c b/arch/arc/kernel/perf_event.c
index 145722f80c9b..adff957962da 100644
--- a/arch/arc/kernel/perf_event.c
+++ b/arch/arc/kernel/perf_event.c
@@ -17,6 +17,168 @@
/* HW holds 8 symbols + one for null terminator */
#define ARCPMU_EVENT_NAME_LEN 9
+/*
+ * Some ARC pct quirks:
+ *
+ * PERF_COUNT_HW_STALLED_CYCLES_BACKEND
+ * PERF_COUNT_HW_STALLED_CYCLES_FRONTEND
+ * The ARC 700 can either measure stalls per pipeline stage, or all stalls
+ * combined; for now we assign all stalls to STALLED_CYCLES_BACKEND
+ * and all pipeline flushes (e.g. caused by mispredicts, etc.) to
+ * STALLED_CYCLES_FRONTEND.
+ *
+ * We could start multiple performance counters and combine everything
+ * afterwards, but that makes it complicated.
+ *
+ * Note that I$ cache misses aren't counted by either of the two!
+ */
+
+/*
+ * ARC PCT has hardware conditions with fixed "names" but variable "indexes"
+ * (based on a specific RTL build)
+ * Below is the static map between perf generic/arc specific event_id and
+ * h/w condition names.
+ * At the time of probe, we loop thru each index and find it's name to
+ * complete the mapping of perf event_id to h/w index as latter is needed
+ * to program the counter really
+ */
+static const char * const arc_pmu_ev_hw_map[] = {
+ /* count cycles */
+ [PERF_COUNT_HW_CPU_CYCLES] = "crun",
+ [PERF_COUNT_HW_REF_CPU_CYCLES] = "crun",
+ [PERF_COUNT_HW_BUS_CYCLES] = "crun",
+
+ [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = "bflush",
+ [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = "bstall",
+
+ /* counts condition */
+ [PERF_COUNT_HW_INSTRUCTIONS] = "iall",
+ /* All jump instructions that are taken */
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmptak",
+#ifdef CONFIG_ISA_ARCV2
+ [PERF_COUNT_HW_BRANCH_MISSES] = "bpmp",
+#else
+ [PERF_COUNT_ARC_BPOK] = "bpok", /* NP-NT, PT-T, PNT-NT */
+ [PERF_COUNT_HW_BRANCH_MISSES] = "bpfail", /* NP-T, PT-NT, PNT-T */
+#endif
+ [PERF_COUNT_ARC_LDC] = "imemrdc", /* Instr: mem read cached */
+ [PERF_COUNT_ARC_STC] = "imemwrc", /* Instr: mem write cached */
+
+ [PERF_COUNT_ARC_DCLM] = "dclm", /* D-cache Load Miss */
+ [PERF_COUNT_ARC_DCSM] = "dcsm", /* D-cache Store Miss */
+ [PERF_COUNT_ARC_ICM] = "icm", /* I-cache Miss */
+ [PERF_COUNT_ARC_EDTLB] = "edtlb", /* D-TLB Miss */
+ [PERF_COUNT_ARC_EITLB] = "eitlb", /* I-TLB Miss */
+
+ [PERF_COUNT_HW_CACHE_REFERENCES] = "imemrdc", /* Instr: mem read cached */
+ [PERF_COUNT_HW_CACHE_MISSES] = "dclm", /* D-cache Load Miss */
+};
+
+#define C(_x) PERF_COUNT_HW_CACHE_##_x
+#define CACHE_OP_UNSUPPORTED 0xffff
+
+static const unsigned int arc_pmu_cache_map[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
+ [C(L1D)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = PERF_COUNT_ARC_LDC,
+ [C(RESULT_MISS)] = PERF_COUNT_ARC_DCLM,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = PERF_COUNT_ARC_STC,
+ [C(RESULT_MISS)] = PERF_COUNT_ARC_DCSM,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(L1I)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = PERF_COUNT_HW_INSTRUCTIONS,
+ [C(RESULT_MISS)] = PERF_COUNT_ARC_ICM,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(LL)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(DTLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = PERF_COUNT_ARC_LDC,
+ [C(RESULT_MISS)] = PERF_COUNT_ARC_EDTLB,
+ },
+ /* DTLB LD/ST Miss not segregated by h/w*/
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(ITLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = PERF_COUNT_ARC_EITLB,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(BPU)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = PERF_COUNT_HW_BRANCH_INSTRUCTIONS,
+ [C(RESULT_MISS)] = PERF_COUNT_HW_BRANCH_MISSES,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(NODE)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+};
+
enum arc_pmu_attr_groups {
ARCPMU_ATTR_GR_EVENTS,
ARCPMU_ATTR_GR_FORMATS,
@@ -328,7 +490,7 @@ static void arc_pmu_stop(struct perf_event *event, int flags)
}
if (!(event->hw.state & PERF_HES_STOPPED)) {
- /* stop ARC pmu here */
+ /* stop hw counter here */
write_aux_reg(ARC_REG_PCT_INDEX, idx);
/* condition code #0 is always "never" */
@@ -361,7 +523,7 @@ static int arc_pmu_add(struct perf_event *event, int flags)
{
struct arc_pmu_cpu *pmu_cpu = this_cpu_ptr(&arc_pmu_cpu);
struct hw_perf_event *hwc = &event->hw;
- int idx = hwc->idx;
+ int idx;
idx = ffz(pmu_cpu->used_mask[0]);
if (idx == arc_pmu->n_counters)
diff --git a/arch/arc/kernel/unwind.c b/arch/arc/kernel/unwind.c
index 9e28058cdba8..200270a94558 100644
--- a/arch/arc/kernel/unwind.c
+++ b/arch/arc/kernel/unwind.c
@@ -245,14 +245,9 @@ static void swap_eh_frame_hdr_table_entries(void *p1, void *p2, int size)
{
struct eh_frame_hdr_table_entry *e1 = p1;
struct eh_frame_hdr_table_entry *e2 = p2;
- unsigned long v;
-
- v = e1->start;
- e1->start = e2->start;
- e2->start = v;
- v = e1->fde;
- e1->fde = e2->fde;
- e2->fde = v;
+
+ swap(e1->start, e2->start);
+ swap(e1->fde, e2->fde);
}
static void init_unwind_hdr(struct unwind_table *table,
diff --git a/arch/arc/mm/dma.c b/arch/arc/mm/dma.c
index 517988e60cfc..2a7fbbb83b70 100644
--- a/arch/arc/mm/dma.c
+++ b/arch/arc/mm/dma.c
@@ -32,7 +32,7 @@ void arch_dma_prep_coherent(struct page *page, size_t size)
/*
* Cache operations depending on function and direction argument, inspired by
- * https://lkml.org/lkml/2018/5/18/979
+ * https://lore.kernel.org/lkml/20180518175004.GF17671@n2100.armlinux.org.uk
* "dma_sync_*_for_cpu and direction=TO_DEVICE (was Re: [PATCH 02/20]
* dma-mapping: provide a generic dma-noncoherent implementation)"
*
diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c
index 5787c261c9a4..dad27e4d69ff 100644
--- a/arch/arc/mm/fault.c
+++ b/arch/arc/mm/fault.c
@@ -149,8 +149,7 @@ retry:
/*
* Fault retry nuances, mmap_lock already relinquished by core mm
*/
- if (unlikely((fault & VM_FAULT_RETRY) &&
- (flags & FAULT_FLAG_ALLOW_RETRY))) {
+ if (unlikely(fault & VM_FAULT_RETRY)) {
flags |= FAULT_FLAG_TRIED;
goto retry;
}
diff --git a/arch/arc/plat-axs10x/axs10x.c b/arch/arc/plat-axs10x/axs10x.c
index 63ea5a606ecd..b821df7b0089 100644
--- a/arch/arc/plat-axs10x/axs10x.c
+++ b/arch/arc/plat-axs10x/axs10x.c
@@ -50,7 +50,7 @@ static void __init axs10x_enable_gpio_intc_wire(void)
* Current implementation of "irq-dw-apb-ictl" driver doesn't work well
* with stacked INTCs. In particular problem happens if its master INTC
* not yet instantiated. See discussion here -
- * https://lkml.org/lkml/2015/3/4/755
+ * https://lore.kernel.org/lkml/54F6FE2C.7020309@synopsys.com
*
* So setup the first gpio block as a passive pass thru and hide it from
* DT hardware topology - connect MB intc directly to cpu intc
diff --git a/arch/arc/plat-hsdk/platform.c b/arch/arc/plat-hsdk/platform.c
index b3ea1fa11f87..c4a875b22352 100644
--- a/arch/arc/plat-hsdk/platform.c
+++ b/arch/arc/plat-hsdk/platform.c
@@ -52,7 +52,7 @@ static void __init hsdk_enable_gpio_intc_wire(void)
* Current implementation of "irq-dw-apb-ictl" driver doesn't work well
* with stacked INTCs. In particular problem happens if its master INTC
* not yet instantiated. See discussion here -
- * https://lkml.org/lkml/2015/3/4/755
+ * https://lore.kernel.org/lkml/54F6FE2C.7020309@synopsys.com
*
* So setup the first gpio block as a passive pass thru and hide it from
* DT hardware topology - connect intc directly to cpu intc
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index fabe39169b12..4c97cb40eebb 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -83,6 +83,7 @@ config ARM
select HAVE_EBPF_JIT if !CPU_ENDIAN_BE32
select HAVE_CONTEXT_TRACKING
select HAVE_C_RECORDMCOUNT
+ select HAVE_BUILDTIME_MCOUNT_SORT
select HAVE_DEBUG_KMEMLEAK if !XIP_KERNEL
select HAVE_DMA_CONTIGUOUS if MMU
select HAVE_DYNAMIC_FTRACE if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index cb9e48dcba88..976315dea958 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -66,8 +66,6 @@ config UNWINDER_FRAME_POINTER
config UNWINDER_ARM
bool "ARM EABI stack unwinder"
depends on AEABI && !FUNCTION_GRAPH_TRACER
- # https://github.com/ClangBuiltLinux/linux/issues/732
- depends on !LD_IS_LLD || LLD_VERSION >= 110000
select ARM_UNWIND
help
This option enables stack unwinding support in the kernel
diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
index 74d2f1401acb..954eee8a785a 100644
--- a/arch/arm/boot/compressed/Makefile
+++ b/arch/arm/boot/compressed/Makefile
@@ -76,10 +76,10 @@ CPPFLAGS_vmlinux.lds += -DTEXT_OFFSET="$(TEXT_OFFSET)"
CPPFLAGS_vmlinux.lds += -DMALLOC_SIZE="$(MALLOC_SIZE)"
compress-$(CONFIG_KERNEL_GZIP) = gzip
-compress-$(CONFIG_KERNEL_LZO) = lzo
-compress-$(CONFIG_KERNEL_LZMA) = lzma
-compress-$(CONFIG_KERNEL_XZ) = xzkern
-compress-$(CONFIG_KERNEL_LZ4) = lz4
+compress-$(CONFIG_KERNEL_LZO) = lzo_with_size
+compress-$(CONFIG_KERNEL_LZMA) = lzma_with_size
+compress-$(CONFIG_KERNEL_XZ) = xzkern_with_size
+compress-$(CONFIG_KERNEL_LZ4) = lz4_with_size
libfdt_objs := fdt_rw.o fdt_ro.o fdt_wip.o fdt.o
diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile
index 235ad559acb2..e41eca79c950 100644
--- a/arch/arm/boot/dts/Makefile
+++ b/arch/arm/boot/dts/Makefile
@@ -806,6 +806,7 @@ dtb-$(CONFIG_ARCH_OMAP3) += \
logicpd-som-lv-37xx-devkit.dtb \
omap3430-sdp.dtb \
omap3-beagle.dtb \
+ omap3-beagle-ab4.dtb \
omap3-beagle-xm.dtb \
omap3-beagle-xm-ab.dtb \
omap3-cm-t3517.dtb \
diff --git a/arch/arm/boot/dts/am335x-wega.dtsi b/arch/arm/boot/dts/am335x-wega.dtsi
index 673159d93a6a..f957fea8208e 100644
--- a/arch/arm/boot/dts/am335x-wega.dtsi
+++ b/arch/arm/boot/dts/am335x-wega.dtsi
@@ -55,7 +55,7 @@
2 1 0 0 /* # 0: INACTIVE, 1: TX, 2: RX */
>;
tx-num-evt = <16>;
- rt-num-evt = <16>;
+ rx-num-evt = <16>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
index 6b485cbed8d5..42bff117656c 100644
--- a/arch/arm/boot/dts/dra7.dtsi
+++ b/arch/arm/boot/dts/dra7.dtsi
@@ -160,7 +160,7 @@
target-module@48210000 {
compatible = "ti,sysc-omap4-simple", "ti,sysc";
power-domains = <&prm_mpu>;
- clocks = <&mpu_clkctrl DRA7_MPU_CLKCTRL 0>;
+ clocks = <&mpu_clkctrl DRA7_MPU_MPU_CLKCTRL 0>;
clock-names = "fck";
#address-cells = <1>;
#size-cells = <1>;
@@ -875,10 +875,10 @@
<0x58000014 4>;
reg-names = "rev", "syss";
ti,syss-mask = <1>;
- clocks = <&dss_clkctrl DRA7_DSS_CORE_CLKCTRL 0>,
- <&dss_clkctrl DRA7_DSS_CORE_CLKCTRL 9>,
- <&dss_clkctrl DRA7_DSS_CORE_CLKCTRL 10>,
- <&dss_clkctrl DRA7_DSS_CORE_CLKCTRL 11>;
+ clocks = <&dss_clkctrl DRA7_DSS_DSS_CORE_CLKCTRL 0>,
+ <&dss_clkctrl DRA7_DSS_DSS_CORE_CLKCTRL 9>,
+ <&dss_clkctrl DRA7_DSS_DSS_CORE_CLKCTRL 10>,
+ <&dss_clkctrl DRA7_DSS_DSS_CORE_CLKCTRL 11>;
clock-names = "fck", "hdmi_clk", "sys_clk", "tv_clk";
#address-cells = <1>;
#size-cells = <1>;
@@ -912,7 +912,7 @@
SYSC_OMAP2_SOFTRESET |
SYSC_OMAP2_AUTOIDLE)>;
ti,syss-mask = <1>;
- clocks = <&dss_clkctrl DRA7_DSS_CORE_CLKCTRL 8>;
+ clocks = <&dss_clkctrl DRA7_DSS_DSS_CORE_CLKCTRL 8>;
clock-names = "fck";
#address-cells = <1>;
#size-cells = <1>;
@@ -939,8 +939,8 @@
<SYSC_IDLE_SMART>,
<SYSC_IDLE_SMART_WKUP>;
ti,sysc-mask = <(SYSC_OMAP4_SOFTRESET)>;
- clocks = <&dss_clkctrl DRA7_DSS_CORE_CLKCTRL 9>,
- <&dss_clkctrl DRA7_DSS_CORE_CLKCTRL 8>;
+ clocks = <&dss_clkctrl DRA7_DSS_DSS_CORE_CLKCTRL 9>,
+ <&dss_clkctrl DRA7_DSS_DSS_CORE_CLKCTRL 8>;
clock-names = "fck", "dss_clk";
#address-cells = <1>;
#size-cells = <1>;
@@ -979,7 +979,7 @@
compatible = "vivante,gc";
reg = <0x0 0x700>;
interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&dss_clkctrl DRA7_BB2D_CLKCTRL 0>;
+ clocks = <&dss_clkctrl DRA7_DSS_BB2D_CLKCTRL 0>;
clock-names = "core";
};
};
@@ -1333,7 +1333,7 @@
ti,no-reset-on-init;
ti,no-idle;
timer@0 {
- assigned-clocks = <&wkupaon_clkctrl DRA7_TIMER1_CLKCTRL 24>;
+ assigned-clocks = <&wkupaon_clkctrl DRA7_WKUPAON_TIMER1_CLKCTRL 24>;
assigned-clock-parents = <&sys_32k_ck>;
};
};
diff --git a/arch/arm/boot/dts/imx23-evk.dts b/arch/arm/boot/dts/imx23-evk.dts
index 8cbaf1c81174..3b609d987d88 100644
--- a/arch/arm/boot/dts/imx23-evk.dts
+++ b/arch/arm/boot/dts/imx23-evk.dts
@@ -79,7 +79,6 @@
MX23_PAD_LCD_RESET__GPIO_1_18
MX23_PAD_PWM3__GPIO_1_29
MX23_PAD_PWM4__GPIO_1_30
- MX23_PAD_SSP1_DETECT__SSP1_DETECT
>;
fsl,drive-strength = <MXS_DRIVE_4mA>;
fsl,voltage = <MXS_VOLTAGE_HIGH>;
diff --git a/arch/arm/boot/dts/imx6qdl-udoo.dtsi b/arch/arm/boot/dts/imx6qdl-udoo.dtsi
index d07d8f83456d..ccfa8e320be6 100644
--- a/arch/arm/boot/dts/imx6qdl-udoo.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-udoo.dtsi
@@ -5,6 +5,8 @@
* Author: Fabio Estevam <fabio.estevam@freescale.com>
*/
+#include <dt-bindings/gpio/gpio.h>
+
/ {
aliases {
backlight = &backlight;
@@ -226,6 +228,7 @@
MX6QDL_PAD_SD3_DAT1__SD3_DATA1 0x17059
MX6QDL_PAD_SD3_DAT2__SD3_DATA2 0x17059
MX6QDL_PAD_SD3_DAT3__SD3_DATA3 0x17059
+ MX6QDL_PAD_SD3_DAT5__GPIO7_IO00 0x1b0b0
>;
};
@@ -304,7 +307,7 @@
&usdhc3 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usdhc3>;
- non-removable;
+ cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx7ulp.dtsi b/arch/arm/boot/dts/imx7ulp.dtsi
index b7ea37ad4e55..bcec98b96411 100644
--- a/arch/arm/boot/dts/imx7ulp.dtsi
+++ b/arch/arm/boot/dts/imx7ulp.dtsi
@@ -259,7 +259,7 @@
interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&pcc2 IMX7ULP_CLK_WDG1>;
assigned-clocks = <&pcc2 IMX7ULP_CLK_WDG1>;
- assigned-clocks-parents = <&scg1 IMX7ULP_CLK_FIRC_BUS_CLK>;
+ assigned-clock-parents = <&scg1 IMX7ULP_CLK_FIRC_BUS_CLK>;
timeout-sec = <40>;
};
diff --git a/arch/arm/boot/dts/meson.dtsi b/arch/arm/boot/dts/meson.dtsi
index 3be7cba603d5..26eaba3fa96f 100644
--- a/arch/arm/boot/dts/meson.dtsi
+++ b/arch/arm/boot/dts/meson.dtsi
@@ -59,7 +59,7 @@
};
uart_A: serial@84c0 {
- compatible = "amlogic,meson6-uart", "amlogic,meson-uart";
+ compatible = "amlogic,meson6-uart";
reg = <0x84c0 0x18>;
interrupts = <GIC_SPI 26 IRQ_TYPE_EDGE_RISING>;
fifo-size = <128>;
@@ -67,7 +67,7 @@
};
uart_B: serial@84dc {
- compatible = "amlogic,meson6-uart", "amlogic,meson-uart";
+ compatible = "amlogic,meson6-uart";
reg = <0x84dc 0x18>;
interrupts = <GIC_SPI 75 IRQ_TYPE_EDGE_RISING>;
status = "disabled";
@@ -105,7 +105,7 @@
};
uart_C: serial@8700 {
- compatible = "amlogic,meson6-uart", "amlogic,meson-uart";
+ compatible = "amlogic,meson6-uart";
reg = <0x8700 0x18>;
interrupts = <GIC_SPI 93 IRQ_TYPE_EDGE_RISING>;
status = "disabled";
@@ -228,7 +228,7 @@
};
uart_AO: serial@4c0 {
- compatible = "amlogic,meson6-uart", "amlogic,meson-ao-uart", "amlogic,meson-uart";
+ compatible = "amlogic,meson6-uart", "amlogic,meson-ao-uart";
reg = <0x4c0 0x18>;
interrupts = <GIC_SPI 90 IRQ_TYPE_EDGE_RISING>;
status = "disabled";
diff --git a/arch/arm/boot/dts/meson8.dtsi b/arch/arm/boot/dts/meson8.dtsi
index f80ddc98d3a2..9997a5d0333a 100644
--- a/arch/arm/boot/dts/meson8.dtsi
+++ b/arch/arm/boot/dts/meson8.dtsi
@@ -736,27 +736,27 @@
};
&uart_AO {
- compatible = "amlogic,meson8-uart", "amlogic,meson-uart";
- clocks = <&clkc CLKID_CLK81>, <&xtal>, <&clkc CLKID_CLK81>;
- clock-names = "baud", "xtal", "pclk";
+ compatible = "amlogic,meson8-uart", "amlogic,meson-ao-uart";
+ clocks = <&xtal>, <&clkc CLKID_CLK81>, <&clkc CLKID_CLK81>;
+ clock-names = "xtal", "pclk", "baud";
};
&uart_A {
- compatible = "amlogic,meson8-uart", "amlogic,meson-uart";
- clocks = <&clkc CLKID_CLK81>, <&xtal>, <&clkc CLKID_UART0>;
- clock-names = "baud", "xtal", "pclk";
+ compatible = "amlogic,meson8-uart";
+ clocks = <&xtal>, <&clkc CLKID_UART0>, <&clkc CLKID_CLK81>;
+ clock-names = "xtal", "pclk", "baud";
};
&uart_B {
- compatible = "amlogic,meson8-uart", "amlogic,meson-uart";
- clocks = <&clkc CLKID_CLK81>, <&xtal>, <&clkc CLKID_UART1>;
- clock-names = "baud", "xtal", "pclk";
+ compatible = "amlogic,meson8-uart";
+ clocks = <&xtal>, <&clkc CLKID_UART0>, <&clkc CLKID_CLK81>;
+ clock-names = "xtal", "pclk", "baud";
};
&uart_C {
- compatible = "amlogic,meson8-uart", "amlogic,meson-uart";
- clocks = <&clkc CLKID_CLK81>, <&xtal>, <&clkc CLKID_UART2>;
- clock-names = "baud", "xtal", "pclk";
+ compatible = "amlogic,meson8-uart";
+ clocks = <&xtal>, <&clkc CLKID_UART0>, <&clkc CLKID_CLK81>;
+ clock-names = "xtal", "pclk", "baud";
};
&usb0 {
diff --git a/arch/arm/boot/dts/meson8b.dtsi b/arch/arm/boot/dts/meson8b.dtsi
index b49b7cbaed4e..94f1c03decce 100644
--- a/arch/arm/boot/dts/meson8b.dtsi
+++ b/arch/arm/boot/dts/meson8b.dtsi
@@ -724,27 +724,27 @@
};
&uart_AO {
- compatible = "amlogic,meson8b-uart", "amlogic,meson-uart";
- clocks = <&clkc CLKID_CLK81>, <&xtal>, <&clkc CLKID_CLK81>;
- clock-names = "baud", "xtal", "pclk";
+ compatible = "amlogic,meson8b-uart", "amlogic,meson-ao-uart";
+ clocks = <&xtal>, <&clkc CLKID_CLK81>, <&clkc CLKID_CLK81>;
+ clock-names = "xtal", "pclk", "baud";
};
&uart_A {
- compatible = "amlogic,meson8b-uart", "amlogic,meson-uart";
- clocks = <&clkc CLKID_CLK81>, <&xtal>, <&clkc CLKID_UART0>;
- clock-names = "baud", "xtal", "pclk";
+ compatible = "amlogic,meson8b-uart";
+ clocks = <&xtal>, <&clkc CLKID_UART0>, <&clkc CLKID_CLK81>;
+ clock-names = "xtal", "pclk", "baud";
};
&uart_B {
- compatible = "amlogic,meson8b-uart", "amlogic,meson-uart";
- clocks = <&clkc CLKID_CLK81>, <&xtal>, <&clkc CLKID_UART1>;
- clock-names = "baud", "xtal", "pclk";
+ compatible = "amlogic,meson8b-uart";
+ clocks = <&xtal>, <&clkc CLKID_UART0>, <&clkc CLKID_CLK81>;
+ clock-names = "xtal", "pclk", "baud";
};
&uart_C {
- compatible = "amlogic,meson8b-uart", "amlogic,meson-uart";
- clocks = <&clkc CLKID_CLK81>, <&xtal>, <&clkc CLKID_UART2>;
- clock-names = "baud", "xtal", "pclk";
+ compatible = "amlogic,meson8b-uart";
+ clocks = <&xtal>, <&clkc CLKID_UART0>, <&clkc CLKID_CLK81>;
+ clock-names = "xtal", "pclk", "baud";
};
&usb0 {
diff --git a/arch/arm/boot/dts/omap3-beagle-ab4.dts b/arch/arm/boot/dts/omap3-beagle-ab4.dts
new file mode 100644
index 000000000000..990ff2d84686
--- /dev/null
+++ b/arch/arm/boot/dts/omap3-beagle-ab4.dts
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/dts-v1/;
+
+#include "omap3-beagle.dts"
+
+/ {
+ model = "TI OMAP3 BeagleBoard A to B4";
+ compatible = "ti,omap3-beagle-ab4", "ti,omap3-beagle", "ti,omap3430", "ti,omap3";
+};
+
+/*
+ * Workaround for capacitor C70 issue, see "Boards revision A and < B5"
+ * section at https://elinux.org/BeagleBoard_Community
+ */
+
+/* Unusable as clocksource because of unreliable oscillator */
+&counter32k {
+ status = "disabled";
+};
+
+/* Unusable as clockevent because of unreliable oscillator, allow to idle */
+&timer1_target {
+ /delete-property/ti,no-reset-on-init;
+ /delete-property/ti,no-idle;
+ timer@0 {
+ /delete-property/ti,timer-alwon;
+ };
+};
+
+/* Preferred always-on timer for clocksource */
+&timer12_target {
+ ti,no-reset-on-init;
+ ti,no-idle;
+ timer@0 {
+ /* Always clocked by secure_32k_fck */
+ };
+};
+
+/* Preferred timer for clockevent */
+&timer2_target {
+ ti,no-reset-on-init;
+ ti,no-idle;
+ timer@0 {
+ assigned-clocks = <&gpt2_fck>;
+ assigned-clock-parents = <&sys_ck>;
+ };
+};
diff --git a/arch/arm/boot/dts/omap3-beagle.dts b/arch/arm/boot/dts/omap3-beagle.dts
index f9f34b8458e9..0548b391334f 100644
--- a/arch/arm/boot/dts/omap3-beagle.dts
+++ b/arch/arm/boot/dts/omap3-beagle.dts
@@ -304,39 +304,6 @@
phys = <0 &hsusb2_phy>;
};
-/* Unusable as clocksource because of unreliable oscillator */
-&counter32k {
- status = "disabled";
-};
-
-/* Unusable as clockevent because if unreliable oscillator, allow to idle */
-&timer1_target {
- /delete-property/ti,no-reset-on-init;
- /delete-property/ti,no-idle;
- timer@0 {
- /delete-property/ti,timer-alwon;
- };
-};
-
-/* Preferred always-on timer for clocksource */
-&timer12_target {
- ti,no-reset-on-init;
- ti,no-idle;
- timer@0 {
- /* Always clocked by secure_32k_fck */
- };
-};
-
-/* Preferred timer for clockevent */
-&timer2_target {
- ti,no-reset-on-init;
- ti,no-idle;
- timer@0 {
- assigned-clocks = <&gpt2_fck>;
- assigned-clock-parents = <&sys_ck>;
- };
-};
-
&twl_gpio {
ti,use-leds;
/* pullups: BIT(1) */
diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts
index 32335d4ce478..d40c3d2c4914 100644
--- a/arch/arm/boot/dts/omap3-n900.dts
+++ b/arch/arm/boot/dts/omap3-n900.dts
@@ -8,6 +8,7 @@
#include "omap34xx.dtsi"
#include <dt-bindings/input/input.h>
+#include <dt-bindings/leds/common.h>
/*
* Default secure signed bootloader (Nokia X-Loader) does not enable L3 firewall
@@ -630,63 +631,92 @@
};
lp5523: lp5523@32 {
+ #address-cells = <1>;
+ #size-cells = <0>;
compatible = "national,lp5523";
reg = <0x32>;
clock-mode = /bits/ 8 <0>; /* LP55XX_CLOCK_AUTO */
- enable-gpio = <&gpio2 9 GPIO_ACTIVE_HIGH>; /* 41 */
+ enable-gpios = <&gpio2 9 GPIO_ACTIVE_HIGH>; /* 41 */
- chan0 {
+ led@0 {
+ reg = <0>;
chan-name = "lp5523:kb1";
led-cur = /bits/ 8 <50>;
max-cur = /bits/ 8 <100>;
+ color = <LED_COLOR_ID_WHITE>;
+ function = LED_FUNCTION_KBD_BACKLIGHT;
};
- chan1 {
+ led@1 {
+ reg = <1>;
chan-name = "lp5523:kb2";
led-cur = /bits/ 8 <50>;
max-cur = /bits/ 8 <100>;
+ color = <LED_COLOR_ID_WHITE>;
+ function = LED_FUNCTION_KBD_BACKLIGHT;
};
- chan2 {
+ led@2 {
+ reg = <2>;
chan-name = "lp5523:kb3";
led-cur = /bits/ 8 <50>;
max-cur = /bits/ 8 <100>;
+ color = <LED_COLOR_ID_WHITE>;
+ function = LED_FUNCTION_KBD_BACKLIGHT;
};
- chan3 {
+ led@3 {
+ reg = <3>;
chan-name = "lp5523:kb4";
led-cur = /bits/ 8 <50>;
max-cur = /bits/ 8 <100>;
+ color = <LED_COLOR_ID_WHITE>;
+ function = LED_FUNCTION_KBD_BACKLIGHT;
};
- chan4 {
+ led@4 {
+ reg = <4>;
chan-name = "lp5523:b";
led-cur = /bits/ 8 <50>;
max-cur = /bits/ 8 <100>;
+ color = <LED_COLOR_ID_BLUE>;
+ function = LED_FUNCTION_STATUS;
};
- chan5 {
+ led@5 {
+ reg = <5>;
chan-name = "lp5523:g";
led-cur = /bits/ 8 <50>;
max-cur = /bits/ 8 <100>;
+ color = <LED_COLOR_ID_GREEN>;
+ function = LED_FUNCTION_STATUS;
};
- chan6 {
+ led@6 {
+ reg = <6>;
chan-name = "lp5523:r";
led-cur = /bits/ 8 <50>;
max-cur = /bits/ 8 <100>;
+ color = <LED_COLOR_ID_RED>;
+ function = LED_FUNCTION_STATUS;
};
- chan7 {
+ led@7 {
+ reg = <7>;
chan-name = "lp5523:kb5";
led-cur = /bits/ 8 <50>;
max-cur = /bits/ 8 <100>;
+ color = <LED_COLOR_ID_WHITE>;
+ function = LED_FUNCTION_KBD_BACKLIGHT;
};
- chan8 {
+ led@8 {
+ reg = <8>;
chan-name = "lp5523:kb6";
led-cur = /bits/ 8 <50>;
max-cur = /bits/ 8 <100>;
+ color = <LED_COLOR_ID_WHITE>;
+ function = LED_FUNCTION_KBD_BACKLIGHT;
};
};
diff --git a/arch/arm/boot/dts/spear320-hmi.dts b/arch/arm/boot/dts/spear320-hmi.dts
index 367ba48aac3e..b587e4ec11e5 100644
--- a/arch/arm/boot/dts/spear320-hmi.dts
+++ b/arch/arm/boot/dts/spear320-hmi.dts
@@ -235,7 +235,6 @@
#address-cells = <1>;
#size-cells = <0>;
reg = <0x41>;
- irq-over-gpio;
irq-gpios = <&gpiopinctrl 29 0x4>;
id = <0>;
blocks = <0x5>;
diff --git a/arch/arm/boot/dts/ste-ux500-samsung-skomer.dts b/arch/arm/boot/dts/ste-ux500-samsung-skomer.dts
index 580ca497f312..f8c5899fbdba 100644
--- a/arch/arm/boot/dts/ste-ux500-samsung-skomer.dts
+++ b/arch/arm/boot/dts/ste-ux500-samsung-skomer.dts
@@ -185,10 +185,6 @@
cap-sd-highspeed;
cap-mmc-highspeed;
/* All direction control is used */
- st,sig-dir-cmd;
- st,sig-dir-dat0;
- st,sig-dir-dat2;
- st,sig-dir-dat31;
st,sig-pin-fbclk;
full-pwr-cycle;
vmmc-supply = <&ab8500_ldo_aux3_reg>;
diff --git a/arch/arm/configs/bcm2835_defconfig b/arch/arm/configs/bcm2835_defconfig
index 383c632eba7b..a9ed79b7f871 100644
--- a/arch/arm/configs/bcm2835_defconfig
+++ b/arch/arm/configs/bcm2835_defconfig
@@ -31,7 +31,6 @@ CONFIG_ARCH_BCM2835=y
CONFIG_PREEMPT_VOLUNTARY=y
CONFIG_AEABI=y
CONFIG_KSM=y
-CONFIG_CLEANCACHE=y
CONFIG_CMA=y
CONFIG_SECCOMP=y
CONFIG_KEXEC=y
diff --git a/arch/arm/configs/qcom_defconfig b/arch/arm/configs/qcom_defconfig
index 0daa9c0d298e..9981566f2096 100644
--- a/arch/arm/configs/qcom_defconfig
+++ b/arch/arm/configs/qcom_defconfig
@@ -27,7 +27,6 @@ CONFIG_PCIE_QCOM=y
CONFIG_SMP=y
CONFIG_PREEMPT=y
CONFIG_HIGHMEM=y
-CONFIG_CLEANCACHE=y
CONFIG_ARM_APPENDED_DTB=y
CONFIG_ARM_ATAG_DTB_COMPAT=y
CONFIG_CPU_IDLE=y
diff --git a/arch/arm/crypto/blake2s-shash.c b/arch/arm/crypto/blake2s-shash.c
index 17c1c3bfe2f5..763c73beea2d 100644
--- a/arch/arm/crypto/blake2s-shash.c
+++ b/arch/arm/crypto/blake2s-shash.c
@@ -13,12 +13,12 @@
static int crypto_blake2s_update_arm(struct shash_desc *desc,
const u8 *in, unsigned int inlen)
{
- return crypto_blake2s_update(desc, in, inlen, blake2s_compress);
+ return crypto_blake2s_update(desc, in, inlen, false);
}
static int crypto_blake2s_final_arm(struct shash_desc *desc, u8 *out)
{
- return crypto_blake2s_final(desc, out, blake2s_compress);
+ return crypto_blake2s_final(desc, out, false);
}
#define BLAKE2S_ALG(name, driver_name, digest_size) \
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index 7d23d4bb2168..6fe67963ba5a 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -288,6 +288,7 @@
*/
#define ALT_UP(instr...) \
.pushsection ".alt.smp.init", "a" ;\
+ .align 2 ;\
.long 9998b - . ;\
9997: instr ;\
.if . - 9997b == 2 ;\
@@ -299,6 +300,7 @@
.popsection
#define ALT_UP_B(label) \
.pushsection ".alt.smp.init", "a" ;\
+ .align 2 ;\
.long 9998b - . ;\
W(b) . + (label - 9998b) ;\
.popsection
diff --git a/arch/arm/include/asm/bitops.h b/arch/arm/include/asm/bitops.h
index c92e42a5c8f7..8e94fe7ab5eb 100644
--- a/arch/arm/include/asm/bitops.h
+++ b/arch/arm/include/asm/bitops.h
@@ -264,7 +264,6 @@ static inline int find_next_bit_le(const void *p, int size, int offset)
#endif
-#include <asm-generic/bitops/find.h>
#include <asm-generic/bitops/le.h>
/*
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h
index c576fa7d9bf8..0c70eb688a00 100644
--- a/arch/arm/include/asm/io.h
+++ b/arch/arm/include/asm/io.h
@@ -180,7 +180,10 @@ void pci_ioremap_set_mem_type(int mem_type);
static inline void pci_ioremap_set_mem_type(int mem_type) {}
#endif
-extern int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr);
+struct resource;
+
+#define pci_remap_iospace pci_remap_iospace
+int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr);
/*
* PCI configuration space mapping function.
diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h
index 6af68edfa53a..bdc35c0e8dfb 100644
--- a/arch/arm/include/asm/processor.h
+++ b/arch/arm/include/asm/processor.h
@@ -96,6 +96,7 @@ unsigned long __get_wchan(struct task_struct *p);
#define __ALT_SMP_ASM(smp, up) \
"9998: " smp "\n" \
" .pushsection \".alt.smp.init\", \"a\"\n" \
+ " .align 2\n" \
" .long 9998b - .\n" \
" " up "\n" \
" .popsection\n"
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index 36fbc3329252..32dbfd81f42a 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -11,6 +11,7 @@
#include <linux/string.h>
#include <asm/memory.h>
#include <asm/domain.h>
+#include <asm/unaligned.h>
#include <asm/unified.h>
#include <asm/compiler.h>
@@ -497,7 +498,10 @@ do { \
} \
default: __err = __get_user_bad(); break; \
} \
- *(type *)(dst) = __val; \
+ if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) \
+ put_unaligned(__val, (type *)(dst)); \
+ else \
+ *(type *)(dst) = __val; /* aligned by caller */ \
if (__err) \
goto err_label; \
} while (0)
@@ -507,7 +511,9 @@ do { \
const type *__pk_ptr = (dst); \
unsigned long __dst = (unsigned long)__pk_ptr; \
int __err = 0; \
- type __val = *(type *)src; \
+ type __val = IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) \
+ ? get_unaligned((type *)(src)) \
+ : *(type *)(src); /* aligned by caller */ \
switch (sizeof(type)) { \
case 1: __put_user_asm_byte(__val, __dst, __err, ""); break; \
case 2: __put_user_asm_half(__val, __dst, __err, ""); break; \
diff --git a/arch/arm/include/debug/pl01x.S b/arch/arm/include/debug/pl01x.S
index 0c7bfa4c10db..c7e02d0628bf 100644
--- a/arch/arm/include/debug/pl01x.S
+++ b/arch/arm/include/debug/pl01x.S
@@ -8,13 +8,6 @@
*/
#include <linux/amba/serial.h>
-#ifdef CONFIG_DEBUG_ZTE_ZX
-#undef UART01x_DR
-#undef UART01x_FR
-#define UART01x_DR 0x04
-#define UART01x_FR 0x14
-#endif
-
#ifdef CONFIG_DEBUG_UART_PHYS
.macro addruart, rp, rv, tmp
ldr \rp, =CONFIG_DEBUG_UART_PHYS
diff --git a/arch/arm/kernel/atags_proc.c b/arch/arm/kernel/atags_proc.c
index 3c2faf2bd124..3ec2afe78423 100644
--- a/arch/arm/kernel/atags_proc.c
+++ b/arch/arm/kernel/atags_proc.c
@@ -13,7 +13,7 @@ struct buffer {
static ssize_t atags_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
- struct buffer *b = PDE_DATA(file_inode(file));
+ struct buffer *b = pde_data(file_inode(file));
return simple_read_from_buffer(buf, count, ppos, b->data, b->size);
}
diff --git a/arch/arm/kernel/perf_callchain.c b/arch/arm/kernel/perf_callchain.c
index 3b69a76d341e..bc6b246ab55e 100644
--- a/arch/arm/kernel/perf_callchain.c
+++ b/arch/arm/kernel/perf_callchain.c
@@ -64,11 +64,6 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs
{
struct frame_tail __user *tail;
- if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
- /* We don't support guest os callchain now */
- return;
- }
-
perf_callchain_store(entry, regs->ARM_pc);
if (!current->mm)
@@ -100,20 +95,12 @@ perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *re
{
struct stackframe fr;
- if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
- /* We don't support guest os callchain now */
- return;
- }
-
arm_get_current_stackframe(regs, &fr);
walk_stackframe(&fr, callchain_trace, entry);
}
unsigned long perf_instruction_pointer(struct pt_regs *regs)
{
- if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
- return perf_guest_cbs->get_guest_ip();
-
return instruction_pointer(regs);
}
@@ -121,17 +108,10 @@ unsigned long perf_misc_flags(struct pt_regs *regs)
{
int misc = 0;
- if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
- if (perf_guest_cbs->is_user_mode())
- misc |= PERF_RECORD_MISC_GUEST_USER;
- else
- misc |= PERF_RECORD_MISC_GUEST_KERNEL;
- } else {
- if (user_mode(regs))
- misc |= PERF_RECORD_MISC_USER;
- else
- misc |= PERF_RECORD_MISC_KERNEL;
- }
+ if (user_mode(regs))
+ misc |= PERF_RECORD_MISC_USER;
+ else
+ misc |= PERF_RECORD_MISC_KERNEL;
return misc;
}
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index c5e25cf7219b..da04ed85855a 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -335,7 +335,7 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
if (panic_on_oops)
panic("Fatal exception");
if (signr)
- do_exit(signr);
+ make_task_dead(signr);
}
/*
diff --git a/arch/arm/mach-dove/pcie.c b/arch/arm/mach-dove/pcie.c
index ee91ac6b5ebf..2a493bdfffc6 100644
--- a/arch/arm/mach-dove/pcie.c
+++ b/arch/arm/mach-dove/pcie.c
@@ -38,6 +38,7 @@ static int num_pcie_ports;
static int __init dove_pcie_setup(int nr, struct pci_sys_data *sys)
{
struct pcie_port *pp;
+ struct resource realio;
if (nr >= num_pcie_ports)
return 0;
@@ -53,10 +54,10 @@ static int __init dove_pcie_setup(int nr, struct pci_sys_data *sys)
orion_pcie_setup(pp->base);
- if (pp->index == 0)
- pci_ioremap_io(sys->busnr * SZ_64K, DOVE_PCIE0_IO_PHYS_BASE);
- else
- pci_ioremap_io(sys->busnr * SZ_64K, DOVE_PCIE1_IO_PHYS_BASE);
+ realio.start = sys->busnr * SZ_64K;
+ realio.end = realio.start + SZ_64K - 1;
+ pci_remap_iospace(&realio, pp->index == 0 ? DOVE_PCIE0_IO_PHYS_BASE :
+ DOVE_PCIE1_IO_PHYS_BASE);
/*
* IORESOURCE_MEM
diff --git a/arch/arm/mach-iop32x/pci.c b/arch/arm/mach-iop32x/pci.c
index ab0010dc3145..7a215d2ee7e2 100644
--- a/arch/arm/mach-iop32x/pci.c
+++ b/arch/arm/mach-iop32x/pci.c
@@ -185,6 +185,7 @@ iop3xx_pci_abort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
int iop3xx_pci_setup(int nr, struct pci_sys_data *sys)
{
struct resource *res;
+ struct resource realio;
if (nr != 0)
return 0;
@@ -206,7 +207,9 @@ int iop3xx_pci_setup(int nr, struct pci_sys_data *sys)
pci_add_resource_offset(&sys->resources, res, sys->mem_offset);
- pci_ioremap_io(0, IOP3XX_PCI_LOWER_IO_PA);
+ realio.start = 0;
+ realio.end = realio.start + SZ_64K - 1;
+ pci_remap_iospace(&realio, IOP3XX_PCI_LOWER_IO_PA);
return 1;
}
diff --git a/arch/arm/mach-mv78xx0/pcie.c b/arch/arm/mach-mv78xx0/pcie.c
index 636d84b40466..e15646af7f26 100644
--- a/arch/arm/mach-mv78xx0/pcie.c
+++ b/arch/arm/mach-mv78xx0/pcie.c
@@ -101,6 +101,7 @@ static void __init mv78xx0_pcie_preinit(void)
static int __init mv78xx0_pcie_setup(int nr, struct pci_sys_data *sys)
{
struct pcie_port *pp;
+ struct resource realio;
if (nr >= num_pcie_ports)
return 0;
@@ -115,7 +116,9 @@ static int __init mv78xx0_pcie_setup(int nr, struct pci_sys_data *sys)
orion_pcie_set_local_bus_nr(pp->base, sys->busnr);
orion_pcie_setup(pp->base);
- pci_ioremap_io(nr * SZ_64K, MV78XX0_PCIE_IO_PHYS_BASE(nr));
+ realio.start = nr * SZ_64K;
+ realio.end = realio.start + SZ_64K - 1;
+ pci_remap_iospace(&realio, MV78XX0_PCIE_IO_PHYS_BASE(nr));
pci_add_resource_offset(&sys->resources, &pp->res, sys->mem_offset);
diff --git a/arch/arm/mach-omap2/display.c b/arch/arm/mach-omap2/display.c
index 6daaa645ae5d..21413a9b7b6c 100644
--- a/arch/arm/mach-omap2/display.c
+++ b/arch/arm/mach-omap2/display.c
@@ -263,9 +263,9 @@ static int __init omapdss_init_of(void)
}
r = of_platform_populate(node, NULL, NULL, &pdev->dev);
+ put_device(&pdev->dev);
if (r) {
pr_err("Unable to populate DSS submodule devices\n");
- put_device(&pdev->dev);
return r;
}
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index ccb0e3732c0d..31d1a21f6041 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -752,8 +752,10 @@ static int __init _init_clkctrl_providers(void)
for_each_matching_node(np, ti_clkctrl_match_table) {
ret = _setup_clkctrl_provider(np);
- if (ret)
+ if (ret) {
+ of_node_put(np);
break;
+ }
}
return ret;
diff --git a/arch/arm/mach-orion5x/pci.c b/arch/arm/mach-orion5x/pci.c
index 76951bfbacf5..92e938bba20d 100644
--- a/arch/arm/mach-orion5x/pci.c
+++ b/arch/arm/mach-orion5x/pci.c
@@ -142,6 +142,7 @@ static struct pci_ops pcie_ops = {
static int __init pcie_setup(struct pci_sys_data *sys)
{
struct resource *res;
+ struct resource realio;
int dev;
/*
@@ -164,7 +165,9 @@ static int __init pcie_setup(struct pci_sys_data *sys)
pcie_ops.read = pcie_rd_conf_wa;
}
- pci_ioremap_io(sys->busnr * SZ_64K, ORION5X_PCIE_IO_PHYS_BASE);
+ realio.start = sys->busnr * SZ_64K;
+ realio.end = realio.start + SZ_64K - 1;
+ pci_remap_iospace(&realio, ORION5X_PCIE_IO_PHYS_BASE);
/*
* Request resources.
@@ -466,6 +469,7 @@ static void __init orion5x_setup_pci_wins(void)
static int __init pci_setup(struct pci_sys_data *sys)
{
struct resource *res;
+ struct resource realio;
/*
* Point PCI unit MBUS decode windows to DRAM space.
@@ -482,7 +486,9 @@ static int __init pci_setup(struct pci_sys_data *sys)
*/
orion5x_setbits(PCI_CMD, PCI_CMD_HOST_REORDER);
- pci_ioremap_io(sys->busnr * SZ_64K, ORION5X_PCI_IO_PHYS_BASE);
+ realio.start = sys->busnr * SZ_64K;
+ realio.end = realio.start + SZ_64K - 1;
+ pci_remap_iospace(&realio, ORION5X_PCI_IO_PHYS_BASE);
/*
* Request resources
diff --git a/arch/arm/mach-socfpga/Kconfig b/arch/arm/mach-socfpga/Kconfig
index 43ddec677c0b..594edf9bbea4 100644
--- a/arch/arm/mach-socfpga/Kconfig
+++ b/arch/arm/mach-socfpga/Kconfig
@@ -2,6 +2,7 @@
menuconfig ARCH_INTEL_SOCFPGA
bool "Altera SOCFPGA family"
depends on ARCH_MULTI_V7
+ select ARCH_HAS_RESET_CONTROLLER
select ARCH_SUPPORTS_BIG_ENDIAN
select ARM_AMBA
select ARM_GIC
@@ -18,6 +19,7 @@ menuconfig ARCH_INTEL_SOCFPGA
select PL310_ERRATA_727915
select PL310_ERRATA_753970 if PL310
select PL310_ERRATA_769419
+ select RESET_CONTROLLER
if ARCH_INTEL_SOCFPGA
config SOCFPGA_SUSPEND
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
index adbb3817d0be..6f499559d193 100644
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
@@ -1005,7 +1005,7 @@ static int __init noalign_setup(char *__unused)
__setup("noalign", noalign_setup);
/*
- * This needs to be done after sysctl_init, otherwise sys/ will be
+ * This needs to be done after sysctl_init_bases(), otherwise sys/ will be
* overwritten. Actually, this shouldn't be in sys/ at all since
* it isn't a sysctl, and it doesn't contain sysctl information.
* We now locate it in /proc/cpu/alignment instead.
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index a1cebe363ed5..a062e07516dd 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -117,7 +117,7 @@ static void die_kernel_fault(const char *msg, struct mm_struct *mm,
show_pte(KERN_ALERT, mm, addr);
die("Oops", regs, fsr);
bust_spinlocks(0);
- do_exit(SIGKILL);
+ make_task_dead(SIGKILL);
}
/*
@@ -322,7 +322,7 @@ retry:
return 0;
}
- if (!(fault & VM_FAULT_ERROR) && flags & FAULT_FLAG_ALLOW_RETRY) {
+ if (!(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_RETRY) {
flags |= FAULT_FLAG_TRIED;
goto retry;
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index 6e830b9418c9..197f8eb3a775 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -459,16 +459,20 @@ void pci_ioremap_set_mem_type(int mem_type)
pci_ioremap_mem_type = mem_type;
}
-int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr)
+int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
{
- BUG_ON(offset + SZ_64K - 1 > IO_SPACE_LIMIT);
+ unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
- return ioremap_page_range(PCI_IO_VIRT_BASE + offset,
- PCI_IO_VIRT_BASE + offset + SZ_64K,
- phys_addr,
+ if (!(res->flags & IORESOURCE_IO))
+ return -EINVAL;
+
+ if (res->end > IO_SPACE_LIMIT)
+ return -EINVAL;
+
+ return ioremap_page_range(vaddr, vaddr + resource_size(res), phys_addr,
__pgprot(get_mem_type(pci_ioremap_mem_type)->prot_pte));
}
-EXPORT_SYMBOL_GPL(pci_ioremap_io);
+EXPORT_SYMBOL(pci_remap_iospace);
void __iomem *pci_remap_cfgspace(resource_size_t res_cookie, size_t size)
{
diff --git a/arch/arm/probes/kprobes/Makefile b/arch/arm/probes/kprobes/Makefile
index 14db56f49f0a..6159010dac4a 100644
--- a/arch/arm/probes/kprobes/Makefile
+++ b/arch/arm/probes/kprobes/Makefile
@@ -1,4 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
+KASAN_SANITIZE_actions-common.o := n
+KASAN_SANITIZE_actions-arm.o := n
+KASAN_SANITIZE_actions-thumb.o := n
obj-$(CONFIG_KPROBES) += core.o actions-common.o checkers-common.o
obj-$(CONFIG_ARM_KPROBES_TEST) += test-kprobes.o
test-kprobes-objs := test-core.o
diff --git a/arch/arm/tools/syscall.tbl b/arch/arm/tools/syscall.tbl
index 543100151f2b..ac964612d8b0 100644
--- a/arch/arm/tools/syscall.tbl
+++ b/arch/arm/tools/syscall.tbl
@@ -463,3 +463,4 @@
# 447 reserved for memfd_secret
448 common process_mrelease sys_process_mrelease
449 common futex_waitv sys_futex_waitv
+450 common set_mempolicy_home_node sys_set_mempolicy_home_node
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
index 7619fbffcea2..ec5b082f3de6 100644
--- a/arch/arm/xen/enlighten.c
+++ b/arch/arm/xen/enlighten.c
@@ -59,6 +59,10 @@ unsigned long xen_released_pages;
struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata;
static __read_mostly unsigned int xen_events_irq;
+static __read_mostly phys_addr_t xen_grant_frames;
+
+#define GRANT_TABLE_INDEX 0
+#define EXT_REGION_INDEX 1
uint32_t xen_start_flags;
EXPORT_SYMBOL(xen_start_flags);
@@ -300,9 +304,115 @@ static void __init xen_acpi_guest_init(void)
#endif
}
+#ifdef CONFIG_XEN_UNPOPULATED_ALLOC
+/*
+ * A type-less specific Xen resource which contains extended regions
+ * (unused regions of guest physical address space provided by the hypervisor).
+ */
+static struct resource xen_resource = {
+ .name = "Xen unused space",
+};
+
+int __init arch_xen_unpopulated_init(struct resource **res)
+{
+ struct device_node *np;
+ struct resource *regs, *tmp_res;
+ uint64_t min_gpaddr = -1, max_gpaddr = 0;
+ unsigned int i, nr_reg = 0;
+ int rc;
+
+ if (!xen_domain())
+ return -ENODEV;
+
+ if (!acpi_disabled)
+ return -ENODEV;
+
+ np = of_find_compatible_node(NULL, NULL, "xen,xen");
+ if (WARN_ON(!np))
+ return -ENODEV;
+
+ /* Skip region 0 which is reserved for grant table space */
+ while (of_get_address(np, nr_reg + EXT_REGION_INDEX, NULL, NULL))
+ nr_reg++;
+
+ if (!nr_reg) {
+ pr_err("No extended regions are found\n");
+ return -EINVAL;
+ }
+
+ regs = kcalloc(nr_reg, sizeof(*regs), GFP_KERNEL);
+ if (!regs)
+ return -ENOMEM;
+
+ /*
+ * Create resource from extended regions provided by the hypervisor to be
+ * used as unused address space for Xen scratch pages.
+ */
+ for (i = 0; i < nr_reg; i++) {
+ rc = of_address_to_resource(np, i + EXT_REGION_INDEX, &regs[i]);
+ if (rc)
+ goto err;
+
+ if (max_gpaddr < regs[i].end)
+ max_gpaddr = regs[i].end;
+ if (min_gpaddr > regs[i].start)
+ min_gpaddr = regs[i].start;
+ }
+
+ xen_resource.start = min_gpaddr;
+ xen_resource.end = max_gpaddr;
+
+ /*
+ * Mark holes between extended regions as unavailable. The rest of that
+ * address space will be available for the allocation.
+ */
+ for (i = 1; i < nr_reg; i++) {
+ resource_size_t start, end;
+
+ /* There is an overlap between regions */
+ if (regs[i - 1].end + 1 > regs[i].start) {
+ rc = -EINVAL;
+ goto err;
+ }
+
+ /* There is no hole between regions */
+ if (regs[i - 1].end + 1 == regs[i].start)
+ continue;
+
+ start = regs[i - 1].end + 1;
+ end = regs[i].start - 1;
+
+ tmp_res = kzalloc(sizeof(*tmp_res), GFP_KERNEL);
+ if (!tmp_res) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ tmp_res->name = "Unavailable space";
+ tmp_res->start = start;
+ tmp_res->end = end;
+
+ rc = insert_resource(&xen_resource, tmp_res);
+ if (rc) {
+ pr_err("Cannot insert resource %pR (%d)\n", tmp_res, rc);
+ kfree(tmp_res);
+ goto err;
+ }
+ }
+
+ *res = &xen_resource;
+
+err:
+ kfree(regs);
+
+ return rc;
+}
+#endif
+
static void __init xen_dt_guest_init(void)
{
struct device_node *xen_node;
+ struct resource res;
xen_node = of_find_compatible_node(NULL, NULL, "xen,xen");
if (!xen_node) {
@@ -311,13 +421,19 @@ static void __init xen_dt_guest_init(void)
}
xen_events_irq = irq_of_parse_and_map(xen_node, 0);
+
+ if (of_address_to_resource(xen_node, GRANT_TABLE_INDEX, &res)) {
+ pr_err("Xen grant table region is not found\n");
+ return;
+ }
+ xen_grant_frames = res.start;
}
static int __init xen_guest_init(void)
{
struct xen_add_to_physmap xatp;
struct shared_info *shared_info_page = NULL;
- int cpu;
+ int rc, cpu;
if (!xen_domain())
return 0;
@@ -370,12 +486,16 @@ static int __init xen_guest_init(void)
for_each_possible_cpu(cpu)
per_cpu(xen_vcpu_id, cpu) = cpu;
- xen_auto_xlat_grant_frames.count = gnttab_max_grant_frames();
- if (xen_xlate_map_ballooned_pages(&xen_auto_xlat_grant_frames.pfn,
- &xen_auto_xlat_grant_frames.vaddr,
- xen_auto_xlat_grant_frames.count)) {
+ if (!xen_grant_frames) {
+ xen_auto_xlat_grant_frames.count = gnttab_max_grant_frames();
+ rc = xen_xlate_map_ballooned_pages(&xen_auto_xlat_grant_frames.pfn,
+ &xen_auto_xlat_grant_frames.vaddr,
+ xen_auto_xlat_grant_frames.count);
+ } else
+ rc = gnttab_setup_auto_xlat_frames(xen_grant_frames);
+ if (rc) {
free_percpu(xen_vcpu_info);
- return -ENOMEM;
+ return rc;
}
gnttab_init();
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index f6e333b59314..09b885cc4db5 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -120,7 +120,6 @@ config ARM64
select GENERIC_CPU_AUTOPROBE
select GENERIC_CPU_VULNERABILITIES
select GENERIC_EARLY_IOREMAP
- select GENERIC_FIND_FIRST_BIT
select GENERIC_IDLE_POLL_SETUP
select GENERIC_IRQ_IPI
select GENERIC_IRQ_PROBE
@@ -671,15 +670,42 @@ config ARM64_ERRATUM_1508412
config ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE
bool
+config ARM64_ERRATUM_2051678
+ bool "Cortex-A510: 2051678: disable Hardware Update of the page table dirty bit"
+ default y
+ help
+ This options adds the workaround for ARM Cortex-A510 erratum ARM64_ERRATUM_2051678.
+ Affected Coretex-A510 might not respect the ordering rules for
+ hardware update of the page table's dirty bit. The workaround
+ is to not enable the feature on affected CPUs.
+
+ If unsure, say Y.
+
+config ARM64_ERRATUM_2077057
+ bool "Cortex-A510: 2077057: workaround software-step corrupting SPSR_EL2"
+ help
+ This option adds the workaround for ARM Cortex-A510 erratum 2077057.
+ Affected Cortex-A510 may corrupt SPSR_EL2 when the a step exception is
+ expected, but a Pointer Authentication trap is taken instead. The
+ erratum causes SPSR_EL1 to be copied to SPSR_EL2, which could allow
+ EL1 to cause a return to EL2 with a guest controlled ELR_EL2.
+
+ This can only happen when EL2 is stepping EL1.
+
+ When these conditions occur, the SPSR_EL2 value is unchanged from the
+ previous guest entry, and can be restored from the in-memory copy.
+
+ If unsure, say Y.
+
config ARM64_ERRATUM_2119858
- bool "Cortex-A710: 2119858: workaround TRBE overwriting trace data in FILL mode"
+ bool "Cortex-A710/X2: 2119858: workaround TRBE overwriting trace data in FILL mode"
default y
depends on CORESIGHT_TRBE
select ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE
help
- This option adds the workaround for ARM Cortex-A710 erratum 2119858.
+ This option adds the workaround for ARM Cortex-A710/X2 erratum 2119858.
- Affected Cortex-A710 cores could overwrite up to 3 cache lines of trace
+ Affected Cortex-A710/X2 cores could overwrite up to 3 cache lines of trace
data at the base of the buffer (pointed to by TRBASER_EL1) in FILL mode in
the event of a WRAP event.
@@ -762,14 +788,14 @@ config ARM64_ERRATUM_2253138
If unsure, say Y.
config ARM64_ERRATUM_2224489
- bool "Cortex-A710: 2224489: workaround TRBE writing to address out-of-range"
+ bool "Cortex-A710/X2: 2224489: workaround TRBE writing to address out-of-range"
depends on CORESIGHT_TRBE
default y
select ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE
help
- This option adds the workaround for ARM Cortex-A710 erratum 2224489.
+ This option adds the workaround for ARM Cortex-A710/X2 erratum 2224489.
- Affected Cortex-A710 cores might write to an out-of-range address, not reserved
+ Affected Cortex-A710/X2 cores might write to an out-of-range address, not reserved
for TRBE. Under some conditions, the TRBE might generate a write to the next
virtually addressed page following the last page of the TRBE address space
(i.e., the TRBLIMITR_EL1.LIMIT), instead of wrapping around to the base.
@@ -779,6 +805,65 @@ config ARM64_ERRATUM_2224489
If unsure, say Y.
+config ARM64_ERRATUM_2064142
+ bool "Cortex-A510: 2064142: workaround TRBE register writes while disabled"
+ depends on COMPILE_TEST # Until the CoreSight TRBE driver changes are in
+ default y
+ help
+ This option adds the workaround for ARM Cortex-A510 erratum 2064142.
+
+ Affected Cortex-A510 core might fail to write into system registers after the
+ TRBE has been disabled. Under some conditions after the TRBE has been disabled
+ writes into TRBE registers TRBLIMITR_EL1, TRBPTR_EL1, TRBBASER_EL1, TRBSR_EL1,
+ and TRBTRG_EL1 will be ignored and will not be effected.
+
+ Work around this in the driver by executing TSB CSYNC and DSB after collection
+ is stopped and before performing a system register write to one of the affected
+ registers.
+
+ If unsure, say Y.
+
+config ARM64_ERRATUM_2038923
+ bool "Cortex-A510: 2038923: workaround TRBE corruption with enable"
+ depends on COMPILE_TEST # Until the CoreSight TRBE driver changes are in
+ default y
+ help
+ This option adds the workaround for ARM Cortex-A510 erratum 2038923.
+
+ Affected Cortex-A510 core might cause an inconsistent view on whether trace is
+ prohibited within the CPU. As a result, the trace buffer or trace buffer state
+ might be corrupted. This happens after TRBE buffer has been enabled by setting
+ TRBLIMITR_EL1.E, followed by just a single context synchronization event before
+ execution changes from a context, in which trace is prohibited to one where it
+ isn't, or vice versa. In these mentioned conditions, the view of whether trace
+ is prohibited is inconsistent between parts of the CPU, and the trace buffer or
+ the trace buffer state might be corrupted.
+
+ Work around this in the driver by preventing an inconsistent view of whether the
+ trace is prohibited or not based on TRBLIMITR_EL1.E by immediately following a
+ change to TRBLIMITR_EL1.E with at least one ISB instruction before an ERET, or
+ two ISB instructions if no ERET is to take place.
+
+ If unsure, say Y.
+
+config ARM64_ERRATUM_1902691
+ bool "Cortex-A510: 1902691: workaround TRBE trace corruption"
+ depends on COMPILE_TEST # Until the CoreSight TRBE driver changes are in
+ default y
+ help
+ This option adds the workaround for ARM Cortex-A510 erratum 1902691.
+
+ Affected Cortex-A510 core might cause trace data corruption, when being written
+ into the memory. Effectively TRBE is broken and hence cannot be used to capture
+ trace data.
+
+ Work around this problem in the driver by just preventing TRBE initialization on
+ affected cpus. The firmware must have disabled the access to TRBE for the kernel
+ on such implementations. This will cover the kernel for any firmware that doesn't
+ do this already.
+
+ If unsure, say Y.
+
config CAVIUM_ERRATUM_22375
bool "Cavium erratum 22375, 24313"
default y
@@ -1136,6 +1221,10 @@ config NUMA
select GENERIC_ARCH_NUMA
select ACPI_NUMA if ACPI
select OF_NUMA
+ select HAVE_SETUP_PER_CPU_AREA
+ select NEED_PER_CPU_EMBED_FIRST_CHUNK
+ select NEED_PER_CPU_PAGE_FIRST_CHUNK
+ select USE_PERCPU_NUMA_NODE_ID
help
Enable NUMA (Non-Uniform Memory Access) support.
@@ -1152,22 +1241,6 @@ config NODES_SHIFT
Specify the maximum number of NUMA Nodes available on the target
system. Increases memory reserved to accommodate various tables.
-config USE_PERCPU_NUMA_NODE_ID
- def_bool y
- depends on NUMA
-
-config HAVE_SETUP_PER_CPU_AREA
- def_bool y
- depends on NUMA
-
-config NEED_PER_CPU_EMBED_FIRST_CHUNK
- def_bool y
- depends on NUMA
-
-config NEED_PER_CPU_PAGE_FIRST_CHUNK
- def_bool y
- depends on NUMA
-
source "kernel/Kconfig.hz"
config ARCH_SPARSEMEM_ENABLE
diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms
index 7d5d58800170..21697449d762 100644
--- a/arch/arm64/Kconfig.platforms
+++ b/arch/arm64/Kconfig.platforms
@@ -309,9 +309,6 @@ config ARCH_VISCONTI
help
This enables support for Toshiba Visconti SoCs Family.
-config ARCH_VULCAN
- def_bool n
-
config ARCH_XGENE
bool "AppliedMicro X-Gene SOC Family"
help
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
index 517519e6e87f..f84d4b489e0b 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-g12-common.dtsi
@@ -107,6 +107,12 @@
no-map;
};
+ /* 32 MiB reserved for ARM Trusted Firmware (BL32) */
+ secmon_reserved_bl32: secmon@5300000 {
+ reg = <0x0 0x05300000 0x0 0x2000000>;
+ no-map;
+ };
+
linux,cma {
compatible = "shared-dma-pool";
reusable;
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12a-sei510.dts b/arch/arm64/boot/dts/amlogic/meson-g12a-sei510.dts
index d8838dde0f0f..4fb31c2ba31c 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12a-sei510.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-g12a-sei510.dts
@@ -157,14 +157,6 @@
regulator-always-on;
};
- reserved-memory {
- /* TEE Reserved Memory */
- bl32_reserved: bl32@5000000 {
- reg = <0x0 0x05300000 0x0 0x2000000>;
- no-map;
- };
- };
-
sdio_pwrseq: sdio-pwrseq {
compatible = "mmc-pwrseq-simple";
reset-gpios = <&gpio GPIOX_6 GPIO_ACTIVE_LOW>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2.dtsi b/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2.dtsi
index 3e968b244191..fd3fa82e4c33 100644
--- a/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-g12b-odroid-n2.dtsi
@@ -17,7 +17,7 @@
rtc1 = &vrtc;
};
- dioo2133: audio-amplifier-0 {
+ dio2133: audio-amplifier-0 {
compatible = "simple-audio-amplifier";
enable-gpios = <&gpio_ao GPIOAO_2 GPIO_ACTIVE_HIGH>;
VCC-supply = <&vcc_5v>;
@@ -219,7 +219,7 @@
audio-widgets = "Line", "Lineout";
audio-aux-devs = <&tdmout_b>, <&tdmout_c>, <&tdmin_a>,
<&tdmin_b>, <&tdmin_c>, <&tdmin_lb>,
- <&dioo2133>;
+ <&dio2133>;
audio-routing = "TDMOUT_B IN 0", "FRDDR_A OUT 1",
"TDMOUT_B IN 1", "FRDDR_B OUT 1",
"TDMOUT_B IN 2", "FRDDR_C OUT 1",
diff --git a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi
index 6b457b2c30a4..aa14ea017a61 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi
@@ -49,6 +49,12 @@
no-map;
};
+ /* 32 MiB reserved for ARM Trusted Firmware (BL32) */
+ secmon_reserved_bl32: secmon@5300000 {
+ reg = <0x0 0x05300000 0x0 0x2000000>;
+ no-map;
+ };
+
linux,cma {
compatible = "shared-dma-pool";
reusable;
diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1-bananapi-m5.dts b/arch/arm64/boot/dts/amlogic/meson-sm1-bananapi-m5.dts
index 212c6aa5a3b8..5751c48620ed 100644
--- a/arch/arm64/boot/dts/amlogic/meson-sm1-bananapi-m5.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-sm1-bananapi-m5.dts
@@ -123,7 +123,7 @@
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <3300000>;
- enable-gpio = <&gpio GPIOE_2 GPIO_ACTIVE_HIGH>;
+ enable-gpio = <&gpio_ao GPIOE_2 GPIO_ACTIVE_HIGH>;
enable-active-high;
regulator-always-on;
diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1-odroid.dtsi b/arch/arm64/boot/dts/amlogic/meson-sm1-odroid.dtsi
index 0bd1e98a0eef..ddb1b345397f 100644
--- a/arch/arm64/boot/dts/amlogic/meson-sm1-odroid.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-sm1-odroid.dtsi
@@ -48,7 +48,7 @@
regulator-max-microvolt = <3300000>;
vin-supply = <&vcc_5v>;
- enable-gpio = <&gpio GPIOE_2 GPIO_ACTIVE_HIGH>;
+ enable-gpio = <&gpio_ao GPIOE_2 GPIO_OPEN_DRAIN>;
enable-active-high;
regulator-always-on;
diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts b/arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts
index 427475846fc7..a5d79f2f7c19 100644
--- a/arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts
@@ -203,14 +203,6 @@
regulator-always-on;
};
- reserved-memory {
- /* TEE Reserved Memory */
- bl32_reserved: bl32@5000000 {
- reg = <0x0 0x05300000 0x0 0x2000000>;
- no-map;
- };
- };
-
sdio_pwrseq: sdio-pwrseq {
compatible = "mmc-pwrseq-simple";
reset-gpios = <&gpio GPIOX_6 GPIO_ACTIVE_LOW>;
diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28.dts b/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28.dts
index d74e738e4070..c03f4e183389 100644
--- a/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28.dts
+++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28.dts
@@ -157,6 +157,10 @@
};
};
+&ftm_alarm0 {
+ status = "okay";
+};
+
&gpio1 {
gpio-line-names =
"", "", "", "", "", "", "", "",
diff --git a/arch/arm64/boot/dts/freescale/imx8mq-librem5.dtsi b/arch/arm64/boot/dts/freescale/imx8mq-librem5.dtsi
index f3e3418f7edc..2d4a472af6a9 100644
--- a/arch/arm64/boot/dts/freescale/imx8mq-librem5.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mq-librem5.dtsi
@@ -1115,8 +1115,8 @@
status = "okay";
ports {
- port@1 {
- reg = <1>;
+ port@0 {
+ reg = <0>;
mipi1_sensor_ep: endpoint {
remote-endpoint = <&camera1_ep>;
diff --git a/arch/arm64/boot/dts/freescale/imx8mq.dtsi b/arch/arm64/boot/dts/freescale/imx8mq.dtsi
index 2df2510d0118..e92ebb6147e6 100644
--- a/arch/arm64/boot/dts/freescale/imx8mq.dtsi
+++ b/arch/arm64/boot/dts/freescale/imx8mq.dtsi
@@ -554,7 +554,7 @@
assigned-clock-rates = <0>, <0>, <0>, <594000000>;
status = "disabled";
- port@0 {
+ port {
lcdif_mipi_dsi: endpoint {
remote-endpoint = <&mipi_dsi_lcdif_in>;
};
@@ -1151,8 +1151,8 @@
#address-cells = <1>;
#size-cells = <0>;
- port@0 {
- reg = <0>;
+ port@1 {
+ reg = <1>;
csi1_mipi_ep: endpoint {
remote-endpoint = <&csi1_ep>;
@@ -1203,8 +1203,8 @@
#address-cells = <1>;
#size-cells = <0>;
- port@0 {
- reg = <0>;
+ port@1 {
+ reg = <1>;
csi2_mipi_ep: endpoint {
remote-endpoint = <&csi2_ep>;
diff --git a/arch/arm64/boot/dts/freescale/mba8mx.dtsi b/arch/arm64/boot/dts/freescale/mba8mx.dtsi
index f27e3c8de916..ce6d5bdba0a8 100644
--- a/arch/arm64/boot/dts/freescale/mba8mx.dtsi
+++ b/arch/arm64/boot/dts/freescale/mba8mx.dtsi
@@ -91,7 +91,7 @@
sound {
compatible = "fsl,imx-audio-tlv320aic32x4";
- model = "tqm-tlv320aic32";
+ model = "imx-audio-tlv320aic32x4";
ssi-controller = <&sai3>;
audio-codec = <&tlv320aic3x04>;
};
diff --git a/arch/arm64/boot/dts/nvidia/tegra194.dtsi b/arch/arm64/boot/dts/nvidia/tegra194.dtsi
index 3c4acfca459d..2d48c3715fc6 100644
--- a/arch/arm64/boot/dts/nvidia/tegra194.dtsi
+++ b/arch/arm64/boot/dts/nvidia/tegra194.dtsi
@@ -995,9 +995,8 @@
<&bpmp TEGRA194_CLK_HDA2CODEC_2X>;
clock-names = "hda", "hda2hdmi", "hda2codec_2x";
resets = <&bpmp TEGRA194_RESET_HDA>,
- <&bpmp TEGRA194_RESET_HDA2HDMICODEC>,
- <&bpmp TEGRA194_RESET_HDA2CODEC_2X>;
- reset-names = "hda", "hda2hdmi", "hda2codec_2x";
+ <&bpmp TEGRA194_RESET_HDA2HDMICODEC>;
+ reset-names = "hda", "hda2hdmi";
power-domains = <&bpmp TEGRA194_POWER_DOMAIN_DISP>;
interconnects = <&mc TEGRA194_MEMORY_CLIENT_HDAR &emc>,
<&mc TEGRA194_MEMORY_CLIENT_HDAW &emc>;
diff --git a/arch/arm64/boot/dts/ti/k3-j721s2-common-proc-board.dts b/arch/arm64/boot/dts/ti/k3-j721s2-common-proc-board.dts
index a5a24f9f46c5..b210cc07c539 100644
--- a/arch/arm64/boot/dts/ti/k3-j721s2-common-proc-board.dts
+++ b/arch/arm64/boot/dts/ti/k3-j721s2-common-proc-board.dts
@@ -15,8 +15,18 @@
model = "Texas Instruments J721S2 EVM";
chosen {
- stdout-path = "serial10:115200n8";
- bootargs = "console=ttyS10,115200n8 earlycon=ns16550a,mmio32,2880000";
+ stdout-path = "serial2:115200n8";
+ bootargs = "console=ttyS2,115200n8 earlycon=ns16550a,mmio32,2880000";
+ };
+
+ aliases {
+ serial1 = &mcu_uart0;
+ serial2 = &main_uart8;
+ mmc0 = &main_sdhci0;
+ mmc1 = &main_sdhci1;
+ can0 = &main_mcan16;
+ can1 = &mcu_mcan0;
+ can2 = &mcu_mcan1;
};
evm_12v0: fixedregulator-evm12v0 {
diff --git a/arch/arm64/boot/dts/ti/k3-j721s2.dtsi b/arch/arm64/boot/dts/ti/k3-j721s2.dtsi
index 80d3cae03e88..fe5234c40f6c 100644
--- a/arch/arm64/boot/dts/ti/k3-j721s2.dtsi
+++ b/arch/arm64/boot/dts/ti/k3-j721s2.dtsi
@@ -21,28 +21,6 @@
#address-cells = <2>;
#size-cells = <2>;
- aliases {
- serial0 = &wkup_uart0;
- serial1 = &mcu_uart0;
- serial2 = &main_uart0;
- serial3 = &main_uart1;
- serial4 = &main_uart2;
- serial5 = &main_uart3;
- serial6 = &main_uart4;
- serial7 = &main_uart5;
- serial8 = &main_uart6;
- serial9 = &main_uart7;
- serial10 = &main_uart8;
- serial11 = &main_uart9;
- mmc0 = &main_sdhci0;
- mmc1 = &main_sdhci1;
- can0 = &main_mcan16;
- can1 = &mcu_mcan0;
- can2 = &mcu_mcan1;
- can3 = &main_mcan3;
- can4 = &main_mcan5;
- };
-
chosen { };
cpus {
diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h
index d955ade5df7c..5d460f6b7675 100644
--- a/arch/arm64/include/asm/atomic_lse.h
+++ b/arch/arm64/include/asm/atomic_lse.h
@@ -249,7 +249,7 @@ __lse__cmpxchg_case_##name##sz(volatile void *ptr, \
" mov %" #w "[tmp], %" #w "[old]\n" \
" cas" #mb #sfx "\t%" #w "[tmp], %" #w "[new], %[v]\n" \
" mov %" #w "[ret], %" #w "[tmp]" \
- : [ret] "+r" (x0), [v] "+Q" (*(unsigned long *)ptr), \
+ : [ret] "+r" (x0), [v] "+Q" (*(u##sz *)ptr), \
[tmp] "=&r" (tmp) \
: [old] "r" (x1), [new] "r" (x2) \
: cl); \
diff --git a/arch/arm64/include/asm/bitops.h b/arch/arm64/include/asm/bitops.h
index 81a3e519b07d..9b3c787132d2 100644
--- a/arch/arm64/include/asm/bitops.h
+++ b/arch/arm64/include/asm/bitops.h
@@ -18,7 +18,6 @@
#include <asm-generic/bitops/ffz.h>
#include <asm-generic/bitops/fls64.h>
-#include <asm-generic/bitops/find.h>
#include <asm-generic/bitops/sched.h>
#include <asm-generic/bitops/hweight.h>
diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h
index f9bef42c1411..497acf134d99 100644
--- a/arch/arm64/include/asm/cmpxchg.h
+++ b/arch/arm64/include/asm/cmpxchg.h
@@ -243,7 +243,7 @@ static inline void __cmpwait_case_##sz(volatile void *ptr, \
" cbnz %" #w "[tmp], 1f\n" \
" wfe\n" \
"1:" \
- : [tmp] "=&r" (tmp), [v] "+Q" (*(unsigned long *)ptr) \
+ : [tmp] "=&r" (tmp), [v] "+Q" (*(u##sz *)ptr) \
: [val] "r" (val)); \
}
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
index 19b8441aa8f2..999b9149f856 100644
--- a/arch/arm64/include/asm/cputype.h
+++ b/arch/arm64/include/asm/cputype.h
@@ -73,7 +73,9 @@
#define ARM_CPU_PART_CORTEX_A76 0xD0B
#define ARM_CPU_PART_NEOVERSE_N1 0xD0C
#define ARM_CPU_PART_CORTEX_A77 0xD0D
+#define ARM_CPU_PART_CORTEX_A510 0xD46
#define ARM_CPU_PART_CORTEX_A710 0xD47
+#define ARM_CPU_PART_CORTEX_X2 0xD48
#define ARM_CPU_PART_NEOVERSE_N2 0xD49
#define APM_CPU_PART_POTENZA 0x000
@@ -115,7 +117,9 @@
#define MIDR_CORTEX_A76 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A76)
#define MIDR_NEOVERSE_N1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N1)
#define MIDR_CORTEX_A77 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A77)
+#define MIDR_CORTEX_A510 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A510)
#define MIDR_CORTEX_A710 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A710)
+#define MIDR_CORTEX_X2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X2)
#define MIDR_NEOVERSE_N2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N2)
#define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
#define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h
index 3198acb2aad8..7f3c87f7a0ce 100644
--- a/arch/arm64/include/asm/el2_setup.h
+++ b/arch/arm64/include/asm/el2_setup.h
@@ -106,7 +106,7 @@
msr_s SYS_ICC_SRE_EL2, x0
isb // Make sure SRE is now set
mrs_s x0, SYS_ICC_SRE_EL2 // Read SRE back,
- tbz x0, #0, 1f // and check that it sticks
+ tbz x0, #0, .Lskip_gicv3_\@ // and check that it sticks
msr_s SYS_ICH_HCR_EL2, xzr // Reset ICC_HCR_EL2 to defaults
.Lskip_gicv3_\@:
.endm
diff --git a/arch/arm64/include/asm/hyperv-tlfs.h b/arch/arm64/include/asm/hyperv-tlfs.h
index 4d964a7f02ee..bc6c7ac934a1 100644
--- a/arch/arm64/include/asm/hyperv-tlfs.h
+++ b/arch/arm64/include/asm/hyperv-tlfs.h
@@ -64,6 +64,15 @@
#define HV_REGISTER_STIMER0_CONFIG 0x000B0000
#define HV_REGISTER_STIMER0_COUNT 0x000B0001
+union hv_msi_entry {
+ u64 as_uint64[2];
+ struct {
+ u64 address;
+ u32 data;
+ u32 reserved;
+ } __packed;
+};
+
#include <asm-generic/hyperv-tlfs.h>
#endif
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index 50d5e4de244c..d5b0386ef765 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -63,6 +63,7 @@ enum __kvm_host_smccc_func {
/* Hypercalls available after pKVM finalisation */
__KVM_HOST_SMCCC_FUNC___pkvm_host_share_hyp,
+ __KVM_HOST_SMCCC_FUNC___pkvm_host_unshare_hyp,
__KVM_HOST_SMCCC_FUNC___kvm_adjust_pc,
__KVM_HOST_SMCCC_FUNC___kvm_vcpu_run,
__KVM_HOST_SMCCC_FUNC___kvm_flush_vm_context,
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index f4871e47b2d0..d62405ce3e6d 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -41,6 +41,8 @@ void kvm_inject_vabt(struct kvm_vcpu *vcpu);
void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
+void kvm_vcpu_wfi(struct kvm_vcpu *vcpu);
+
static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
{
return !(vcpu->arch.hcr_el2 & HCR_RW);
@@ -386,7 +388,7 @@ static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
*vcpu_cpsr(vcpu) |= PSR_AA32_E_BIT;
} else {
u64 sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1);
- sctlr |= (1 << 25);
+ sctlr |= SCTLR_ELx_EE;
vcpu_write_sys_reg(vcpu, sctlr, SCTLR_EL1);
}
}
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 2a5f7f38006f..5bc01e62c08a 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -26,7 +26,6 @@
#include <asm/fpsimd.h>
#include <asm/kvm.h>
#include <asm/kvm_asm.h>
-#include <asm/thread_info.h>
#define __KVM_HAVE_ARCH_INTC_INITIALIZED
@@ -298,9 +297,6 @@ struct kvm_vcpu_arch {
/* Exception Information */
struct kvm_vcpu_fault_info fault;
- /* State of various workarounds, see kvm_asm.h for bit assignment */
- u64 workaround_flags;
-
/* Miscellaneous vcpu state flags */
u64 flags;
@@ -321,8 +317,8 @@ struct kvm_vcpu_arch {
struct kvm_guest_debug_arch vcpu_debug_state;
struct kvm_guest_debug_arch external_debug_state;
- struct thread_info *host_thread_info; /* hyp VA */
struct user_fpsimd_state *host_fpsimd_state; /* hyp VA */
+ struct task_struct *parent_task;
struct {
/* {Break,watch}point registers */
@@ -367,9 +363,6 @@ struct kvm_vcpu_arch {
int target;
DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES);
- /* Detect first run of a vcpu */
- bool has_run_once;
-
/* Virtual SError ESR to restore when HCR_EL2.VSE is set */
u64 vsesr_el2;
@@ -411,20 +404,17 @@ struct kvm_vcpu_arch {
#define KVM_ARM64_DEBUG_DIRTY (1 << 0)
#define KVM_ARM64_FP_ENABLED (1 << 1) /* guest FP regs loaded */
#define KVM_ARM64_FP_HOST (1 << 2) /* host FP regs loaded */
-#define KVM_ARM64_HOST_SVE_IN_USE (1 << 3) /* backup for host TIF_SVE */
#define KVM_ARM64_HOST_SVE_ENABLED (1 << 4) /* SVE enabled for EL0 */
#define KVM_ARM64_GUEST_HAS_SVE (1 << 5) /* SVE exposed to guest */
#define KVM_ARM64_VCPU_SVE_FINALIZED (1 << 6) /* SVE config completed */
#define KVM_ARM64_GUEST_HAS_PTRAUTH (1 << 7) /* PTRAUTH exposed to guest */
#define KVM_ARM64_PENDING_EXCEPTION (1 << 8) /* Exception pending */
+/*
+ * Overlaps with KVM_ARM64_EXCEPT_MASK on purpose so that it can't be
+ * set together with an exception...
+ */
+#define KVM_ARM64_INCREMENT_PC (1 << 9) /* Increment PC */
#define KVM_ARM64_EXCEPT_MASK (7 << 9) /* Target EL/MODE */
-#define KVM_ARM64_DEBUG_STATE_SAVE_SPE (1 << 12) /* Save SPE context if active */
-#define KVM_ARM64_DEBUG_STATE_SAVE_TRBE (1 << 13) /* Save TRBE context if active */
-
-#define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE | \
- KVM_GUESTDBG_USE_SW_BP | \
- KVM_GUESTDBG_USE_HW | \
- KVM_GUESTDBG_SINGLESTEP)
/*
* When KVM_ARM64_PENDING_EXCEPTION is set, KVM_ARM64_EXCEPT_MASK can
* take the following values:
@@ -442,11 +432,14 @@ struct kvm_vcpu_arch {
#define KVM_ARM64_EXCEPT_AA64_EL1 (0 << 11)
#define KVM_ARM64_EXCEPT_AA64_EL2 (1 << 11)
-/*
- * Overlaps with KVM_ARM64_EXCEPT_MASK on purpose so that it can't be
- * set together with an exception...
- */
-#define KVM_ARM64_INCREMENT_PC (1 << 9) /* Increment PC */
+#define KVM_ARM64_DEBUG_STATE_SAVE_SPE (1 << 12) /* Save SPE context if active */
+#define KVM_ARM64_DEBUG_STATE_SAVE_TRBE (1 << 13) /* Save TRBE context if active */
+#define KVM_ARM64_FP_FOREIGN_FPSTATE (1 << 14)
+
+#define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE | \
+ KVM_GUESTDBG_USE_SW_BP | \
+ KVM_GUESTDBG_USE_HW | \
+ KVM_GUESTDBG_SINGLESTEP)
#define vcpu_has_sve(vcpu) (system_supports_sve() && \
((vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_SVE))
@@ -606,6 +599,8 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
void kvm_arm_halt_guest(struct kvm *kvm);
void kvm_arm_resume_guest(struct kvm *kvm);
+#define vcpu_has_run_once(vcpu) !!rcu_access_pointer((vcpu)->pid)
+
#ifndef __KVM_NVHE_HYPERVISOR__
#define kvm_call_hyp_nvhe(f, ...) \
({ \
@@ -675,8 +670,15 @@ unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len);
int kvm_handle_mmio_return(struct kvm_vcpu *vcpu);
int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa);
-int kvm_perf_init(void);
-int kvm_perf_teardown(void);
+/*
+ * Returns true if a Performance Monitoring Interrupt (PMI), a.k.a. perf event,
+ * arrived in guest context. For arm64, any event that arrives while a vCPU is
+ * loaded is considered to be "in guest".
+ */
+static inline bool kvm_arch_pmi_in_guest(struct kvm_vcpu *vcpu)
+{
+ return IS_ENABLED(CONFIG_GUEST_PERF_EVENTS) && !!vcpu;
+}
long kvm_hypercall_pv_features(struct kvm_vcpu *vcpu);
gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu);
@@ -717,7 +719,6 @@ void kvm_arm_vcpu_ptrauth_trap(struct kvm_vcpu *vcpu);
static inline void kvm_arch_hardware_unsetup(void) {}
static inline void kvm_arch_sync_events(struct kvm *kvm) {}
static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
-static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
void kvm_arm_init_debug(void);
void kvm_arm_vcpu_init_debug(struct kvm_vcpu *vcpu);
@@ -737,8 +738,10 @@ long kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm,
/* Guest/host FPSIMD coordination helpers */
int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu);
+void kvm_arch_vcpu_ctxflush_fp(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu);
+void kvm_vcpu_unshare_task_fp(struct kvm_vcpu *vcpu);
static inline bool kvm_pmu_counter_deferred(struct perf_event_attr *attr)
{
@@ -749,12 +752,7 @@ static inline bool kvm_pmu_counter_deferred(struct perf_event_attr *attr)
void kvm_arch_vcpu_load_debug_state_flags(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_put_debug_state_flags(struct kvm_vcpu *vcpu);
-#ifdef CONFIG_KVM /* Avoid conflicts with core headers if CONFIG_KVM=n */
-static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
-{
- return kvm_arch_vcpu_run_map_fp(vcpu);
-}
-
+#ifdef CONFIG_KVM
void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr);
void kvm_clr_pmu_events(u32 clr);
diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h
index 5afd14ab15b9..462882f356c7 100644
--- a/arch/arm64/include/asm/kvm_hyp.h
+++ b/arch/arm64/include/asm/kvm_hyp.h
@@ -90,7 +90,6 @@ void __debug_restore_host_buffers_nvhe(struct kvm_vcpu *vcpu);
void __fpsimd_save_state(struct user_fpsimd_state *fp_regs);
void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs);
-void __sve_save_state(void *sve_pffr, u32 *fpsr);
void __sve_restore_state(void *sve_pffr, u32 *fpsr);
#ifndef __KVM_NVHE_HYPERVISOR__
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 02d378887743..81839e9a8a24 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -150,6 +150,8 @@ static __always_inline unsigned long __kern_hyp_va(unsigned long v)
#include <asm/kvm_pgtable.h>
#include <asm/stage2_pgtable.h>
+int kvm_share_hyp(void *from, void *to);
+void kvm_unshare_hyp(void *from, void *to);
int create_hyp_mappings(void *from, void *to, enum kvm_pgtable_prot prot);
int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size,
void __iomem **kaddr,
diff --git a/arch/arm64/include/asm/kvm_pgtable.h b/arch/arm64/include/asm/kvm_pgtable.h
index 027783829584..9f339dffbc1a 100644
--- a/arch/arm64/include/asm/kvm_pgtable.h
+++ b/arch/arm64/include/asm/kvm_pgtable.h
@@ -252,6 +252,27 @@ int kvm_pgtable_hyp_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys,
enum kvm_pgtable_prot prot);
/**
+ * kvm_pgtable_hyp_unmap() - Remove a mapping from a hypervisor stage-1 page-table.
+ * @pgt: Page-table structure initialised by kvm_pgtable_hyp_init().
+ * @addr: Virtual address from which to remove the mapping.
+ * @size: Size of the mapping.
+ *
+ * The offset of @addr within a page is ignored, @size is rounded-up to
+ * the next page boundary and @phys is rounded-down to the previous page
+ * boundary.
+ *
+ * TLB invalidation is performed for each page-table entry cleared during the
+ * unmapping operation and the reference count for the page-table page
+ * containing the cleared entry is decremented, with unreferenced pages being
+ * freed. The unmapping operation will stop early if it encounters either an
+ * invalid page-table entry or a valid block mapping which maps beyond the range
+ * being unmapped.
+ *
+ * Return: Number of bytes unmapped, which may be 0.
+ */
+u64 kvm_pgtable_hyp_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size);
+
+/**
* kvm_get_vtcr() - Helper to construct VTCR_EL2
* @mmfr0: Sanitized value of SYS_ID_AA64MMFR0_EL1 register.
* @mmfr1: Sanitized value of SYS_ID_AA64MMFR1_EL1 register.
@@ -270,8 +291,7 @@ u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift);
/**
* __kvm_pgtable_stage2_init() - Initialise a guest stage-2 page-table.
* @pgt: Uninitialised page-table structure to initialise.
- * @arch: Arch-specific KVM structure representing the guest virtual
- * machine.
+ * @mmu: S2 MMU context for this S2 translation
* @mm_ops: Memory management callbacks.
* @flags: Stage-2 configuration flags.
* @force_pte_cb: Function that returns true if page level mappings must
@@ -279,13 +299,13 @@ u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift);
*
* Return: 0 on success, negative error code on failure.
*/
-int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_arch *arch,
+int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu,
struct kvm_pgtable_mm_ops *mm_ops,
enum kvm_pgtable_stage2_flags flags,
kvm_pgtable_force_pte_cb_t force_pte_cb);
-#define kvm_pgtable_stage2_init(pgt, arch, mm_ops) \
- __kvm_pgtable_stage2_init(pgt, arch, mm_ops, 0, NULL)
+#define kvm_pgtable_stage2_init(pgt, mmu, mm_ops) \
+ __kvm_pgtable_stage2_init(pgt, mmu, mm_ops, 0, NULL)
/**
* kvm_pgtable_stage2_destroy() - Destroy an unused guest stage-2 page-table.
diff --git a/arch/arm64/include/asm/kvm_pkvm.h b/arch/arm64/include/asm/kvm_pkvm.h
new file mode 100644
index 000000000000..9f4ad2a8df59
--- /dev/null
+++ b/arch/arm64/include/asm/kvm_pkvm.h
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 - Google LLC
+ * Author: Quentin Perret <qperret@google.com>
+ */
+#ifndef __ARM64_KVM_PKVM_H__
+#define __ARM64_KVM_PKVM_H__
+
+#include <linux/memblock.h>
+#include <asm/kvm_pgtable.h>
+
+#define HYP_MEMBLOCK_REGIONS 128
+
+extern struct memblock_region kvm_nvhe_sym(hyp_memory)[];
+extern unsigned int kvm_nvhe_sym(hyp_memblock_nr);
+
+static inline unsigned long __hyp_pgtable_max_pages(unsigned long nr_pages)
+{
+ unsigned long total = 0, i;
+
+ /* Provision the worst case scenario */
+ for (i = 0; i < KVM_PGTABLE_MAX_LEVELS; i++) {
+ nr_pages = DIV_ROUND_UP(nr_pages, PTRS_PER_PTE);
+ total += nr_pages;
+ }
+
+ return total;
+}
+
+static inline unsigned long __hyp_pgtable_total_pages(void)
+{
+ unsigned long res = 0, i;
+
+ /* Cover all of memory with page-granularity */
+ for (i = 0; i < kvm_nvhe_sym(hyp_memblock_nr); i++) {
+ struct memblock_region *reg = &kvm_nvhe_sym(hyp_memory)[i];
+ res += __hyp_pgtable_max_pages(reg->size >> PAGE_SHIFT);
+ }
+
+ return res;
+}
+
+static inline unsigned long hyp_s1_pgtable_pages(void)
+{
+ unsigned long res;
+
+ res = __hyp_pgtable_total_pages();
+
+ /* Allow 1 GiB for private mappings */
+ res += __hyp_pgtable_max_pages(SZ_1G >> PAGE_SHIFT);
+
+ return res;
+}
+
+static inline unsigned long host_s2_pgtable_pages(void)
+{
+ unsigned long res;
+
+ /*
+ * Include an extra 16 pages to safely upper-bound the worst case of
+ * concatenated pgds.
+ */
+ res = __hyp_pgtable_total_pages() + 16;
+
+ /* Allow 1 GiB for MMIO mappings */
+ res += __hyp_pgtable_max_pages(SZ_1G >> PAGE_SHIFT);
+
+ return res;
+}
+
+#endif /* __ARM64_KVM_PKVM_H__ */
diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
index e9c30859f80c..48f8466a4be9 100644
--- a/arch/arm64/include/asm/mmu.h
+++ b/arch/arm64/include/asm/mmu.h
@@ -15,6 +15,7 @@
#ifndef __ASSEMBLY__
#include <linux/refcount.h>
+#include <asm/cpufeature.h>
typedef struct {
atomic64_t id;
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index 4704f5893458..898bee0004ae 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -953,6 +953,7 @@
#define ID_AA64DFR0_PMUVER_8_1 0x4
#define ID_AA64DFR0_PMUVER_8_4 0x5
#define ID_AA64DFR0_PMUVER_8_5 0x6
+#define ID_AA64DFR0_PMUVER_8_7 0x7
#define ID_AA64DFR0_PMUVER_IMP_DEF 0xf
#define ID_AA64DFR0_PMSVER_8_2 0x1
diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h
index 6bdb5f5db438..4e65da3445c7 100644
--- a/arch/arm64/include/asm/unistd.h
+++ b/arch/arm64/include/asm/unistd.h
@@ -38,7 +38,7 @@
#define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE + 5)
#define __ARM_NR_COMPAT_END (__ARM_NR_COMPAT_BASE + 0x800)
-#define __NR_compat_syscalls 450
+#define __NR_compat_syscalls 451
#endif
#define __ARCH_WANT_SYS_CLONE
diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h
index 41ea1195e44b..604a2053d006 100644
--- a/arch/arm64/include/asm/unistd32.h
+++ b/arch/arm64/include/asm/unistd32.h
@@ -905,6 +905,8 @@ __SYSCALL(__NR_landlock_restrict_self, sys_landlock_restrict_self)
__SYSCALL(__NR_process_mrelease, sys_process_mrelease)
#define __NR_futex_waitv 449
__SYSCALL(__NR_futex_waitv, sys_futex_waitv)
+#define __NR_set_mempolicy_home_node 450
+__SYSCALL(__NR_set_mempolicy_home_node, sys_set_mempolicy_home_node)
/*
* Please add new compat syscalls above this comment and update
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index 6d0c3afd36b8..1197e7679882 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -111,7 +111,6 @@ int main(void)
#ifdef CONFIG_KVM
DEFINE(VCPU_CONTEXT, offsetof(struct kvm_vcpu, arch.ctxt));
DEFINE(VCPU_FAULT_DISR, offsetof(struct kvm_vcpu, arch.fault.disr_el1));
- DEFINE(VCPU_WORKAROUND_FLAGS, offsetof(struct kvm_vcpu, arch.workaround_flags));
DEFINE(VCPU_HCR_EL2, offsetof(struct kvm_vcpu, arch.hcr_el2));
DEFINE(CPU_USER_PT_REGS, offsetof(struct kvm_cpu_context, regs));
DEFINE(CPU_RGSR_EL1, offsetof(struct kvm_cpu_context, sys_regs[RGSR_EL1]));
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index 9e1c1aef9ebd..b217941713a8 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -347,6 +347,7 @@ static const struct midr_range trbe_overwrite_fill_mode_cpus[] = {
#endif
#ifdef CONFIG_ARM64_ERRATUM_2119858
MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
+ MIDR_RANGE(MIDR_CORTEX_X2, 0, 0, 2, 0),
#endif
{},
};
@@ -371,6 +372,7 @@ static struct midr_range trbe_write_out_of_range_cpus[] = {
#endif
#ifdef CONFIG_ARM64_ERRATUM_2224489
MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
+ MIDR_RANGE(MIDR_CORTEX_X2, 0, 0, 2, 0),
#endif
{},
};
@@ -598,6 +600,41 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
CAP_MIDR_RANGE_LIST(trbe_write_out_of_range_cpus),
},
#endif
+#ifdef CONFIG_ARM64_ERRATUM_2077057
+ {
+ .desc = "ARM erratum 2077057",
+ .capability = ARM64_WORKAROUND_2077057,
+ .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
+ ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 2),
+ },
+#endif
+#ifdef CONFIG_ARM64_ERRATUM_2064142
+ {
+ .desc = "ARM erratum 2064142",
+ .capability = ARM64_WORKAROUND_2064142,
+
+ /* Cortex-A510 r0p0 - r0p2 */
+ ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 2)
+ },
+#endif
+#ifdef CONFIG_ARM64_ERRATUM_2038923
+ {
+ .desc = "ARM erratum 2038923",
+ .capability = ARM64_WORKAROUND_2038923,
+
+ /* Cortex-A510 r0p0 - r0p2 */
+ ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 2)
+ },
+#endif
+#ifdef CONFIG_ARM64_ERRATUM_1902691
+ {
+ .desc = "ARM erratum 1902691",
+ .capability = ARM64_WORKAROUND_1902691,
+
+ /* Cortex-A510 r0p0 - r0p1 */
+ ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 1)
+ },
+#endif
{
}
};
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index a46ab3b1c4d5..e5f23dab1c8d 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -1646,6 +1646,9 @@ static bool cpu_has_broken_dbm(void)
/* Kryo4xx Silver (rdpe => r1p0) */
MIDR_REV(MIDR_QCOM_KRYO_4XX_SILVER, 0xd, 0xe),
#endif
+#ifdef CONFIG_ARM64_ERRATUM_2051678
+ MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 2),
+#endif
{},
};
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index f2307d6631eb..5280e098cfb5 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -79,7 +79,11 @@
* indicate whether or not the userland FPSIMD state of the current task is
* present in the registers. The flag is set unless the FPSIMD registers of this
* CPU currently contain the most recent userland FPSIMD state of the current
- * task.
+ * task. If the task is behaving as a VMM, then this is will be managed by
+ * KVM which will clear it to indicate that the vcpu FPSIMD state is currently
+ * loaded on the CPU, allowing the state to be saved if a FPSIMD-aware
+ * softirq kicks in. Upon vcpu_put(), KVM will save the vcpu FP state and
+ * flag the register state as invalid.
*
* In order to allow softirq handlers to use FPSIMD, kernel_neon_begin() may
* save the task's FPSIMD context back to task_struct from softirq context.
diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h
index c96a9a0043bf..7eaf1f7c4168 100644
--- a/arch/arm64/kernel/image-vars.h
+++ b/arch/arm64/kernel/image-vars.h
@@ -102,7 +102,9 @@ KVM_NVHE_ALIAS(__stop___kvm_ex_table);
KVM_NVHE_ALIAS(kvm_arm_hyp_percpu_base);
/* PMU available static key */
+#ifdef CONFIG_HW_PERF_EVENTS
KVM_NVHE_ALIAS(kvm_arm_pmu_available);
+#endif
/* Position-independent library routines */
KVM_NVHE_ALIAS_HYP(clear_page, __pi_clear_page);
diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
index b5ec010c481f..309a27553c87 100644
--- a/arch/arm64/kernel/module.c
+++ b/arch/arm64/kernel/module.c
@@ -36,7 +36,7 @@ void *module_alloc(unsigned long size)
module_alloc_end = MODULES_END;
p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base,
- module_alloc_end, gfp_mask, PAGE_KERNEL, 0,
+ module_alloc_end, gfp_mask, PAGE_KERNEL, VM_DEFER_KMEMLEAK,
NUMA_NO_NODE, __builtin_return_address(0));
if (!p && IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
@@ -58,7 +58,7 @@ void *module_alloc(unsigned long size)
PAGE_KERNEL, 0, NUMA_NO_NODE,
__builtin_return_address(0));
- if (p && (kasan_module_alloc(p, size) < 0)) {
+ if (p && (kasan_module_alloc(p, size, gfp_mask) < 0)) {
vfree(p);
return NULL;
}
diff --git a/arch/arm64/kernel/perf_callchain.c b/arch/arm64/kernel/perf_callchain.c
index e9b7d99f4e3a..65b196e3ca6c 100644
--- a/arch/arm64/kernel/perf_callchain.c
+++ b/arch/arm64/kernel/perf_callchain.c
@@ -102,7 +102,7 @@ compat_user_backtrace(struct compat_frame_tail __user *tail,
void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
struct pt_regs *regs)
{
- if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
+ if (perf_guest_state()) {
/* We don't support guest os callchain now */
return;
}
@@ -141,7 +141,7 @@ static bool callchain_trace(void *data, unsigned long pc)
void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
struct pt_regs *regs)
{
- if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
+ if (perf_guest_state()) {
/* We don't support guest os callchain now */
return;
}
@@ -151,18 +151,19 @@ void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
unsigned long perf_instruction_pointer(struct pt_regs *regs)
{
- if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
- return perf_guest_cbs->get_guest_ip();
+ if (perf_guest_state())
+ return perf_guest_get_ip();
return instruction_pointer(regs);
}
unsigned long perf_misc_flags(struct pt_regs *regs)
{
+ unsigned int guest_state = perf_guest_state();
int misc = 0;
- if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
- if (perf_guest_cbs->is_user_mode())
+ if (guest_state) {
+ if (guest_state & PERF_GUEST_USER)
misc |= PERF_RECORD_MISC_GUEST_USER;
else
misc |= PERF_RECORD_MISC_GUEST_KERNEL;
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index 0fb58fed54cb..e4103e085681 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -33,8 +33,8 @@
*/
-static void start_backtrace(struct stackframe *frame, unsigned long fp,
- unsigned long pc)
+static notrace void start_backtrace(struct stackframe *frame, unsigned long fp,
+ unsigned long pc)
{
frame->fp = fp;
frame->pc = pc;
@@ -55,6 +55,7 @@ static void start_backtrace(struct stackframe *frame, unsigned long fp,
frame->prev_fp = 0;
frame->prev_type = STACK_TYPE_UNKNOWN;
}
+NOKPROBE_SYMBOL(start_backtrace);
/*
* Unwind from one frame record (A) to the next frame record (B).
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index e8986e6067a9..70fc42470f13 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -235,7 +235,7 @@ void die(const char *str, struct pt_regs *regs, int err)
raw_spin_unlock_irqrestore(&die_lock, flags);
if (ret != NOTIFY_STOP)
- do_exit(SIGSEGV);
+ make_task_dead(SIGSEGV);
}
static void arm64_show_signal(int signo, const char *str)
diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile
index 60813497a381..172452f79e46 100644
--- a/arch/arm64/kernel/vdso/Makefile
+++ b/arch/arm64/kernel/vdso/Makefile
@@ -29,8 +29,11 @@ ldflags-y := -shared -soname=linux-vdso.so.1 --hash-style=sysv \
ccflags-y := -fno-common -fno-builtin -fno-stack-protector -ffixed-x18
ccflags-y += -DDISABLE_BRANCH_PROFILING -DBUILD_VDSO
+# -Wmissing-prototypes and -Wmissing-declarations are removed from
+# the CFLAGS of vgettimeofday.c to make possible to build the
+# kernel with CONFIG_WERROR enabled.
CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE) -Os $(CC_FLAGS_SCS) $(GCC_PLUGINS_CFLAGS) \
- $(CC_FLAGS_LTO)
+ $(CC_FLAGS_LTO) -Wmissing-prototypes -Wmissing-declarations
KASAN_SANITIZE := n
KCSAN_SANITIZE := n
UBSAN_SANITIZE := n
diff --git a/arch/mips/txx9/rbtx4938/Makefile b/arch/arm64/kvm/.gitignore
index 08a02aebda5a..6182aefb8302 100644
--- a/arch/mips/txx9/rbtx4938/Makefile
+++ b/arch/arm64/kvm/.gitignore
@@ -1,2 +1,2 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-y += prom.o setup.o irq.o
+hyp_constants.h
diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig
index 8ffcbe29395e..8a5fbbf084df 100644
--- a/arch/arm64/kvm/Kconfig
+++ b/arch/arm64/kvm/Kconfig
@@ -39,6 +39,8 @@ menuconfig KVM
select HAVE_KVM_IRQ_BYPASS
select HAVE_KVM_VCPU_RUN_PID_CHANGE
select SCHED_INFO
+ select GUEST_PERF_EVENTS if PERF_EVENTS
+ select INTERVAL_TREE
help
Support hosting virtualized guest machines.
diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile
index 989bb5dad2c8..91861fd8b897 100644
--- a/arch/arm64/kvm/Makefile
+++ b/arch/arm64/kvm/Makefile
@@ -5,17 +5,15 @@
ccflags-y += -I $(srctree)/$(src)
-KVM=../../../virt/kvm
+include $(srctree)/virt/kvm/Makefile.kvm
obj-$(CONFIG_KVM) += kvm.o
obj-$(CONFIG_KVM) += hyp/
-kvm-y := $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/eventfd.o \
- $(KVM)/vfio.o $(KVM)/irqchip.o $(KVM)/binary_stats.o \
- arm.o mmu.o mmio.o psci.o perf.o hypercalls.o pvtime.o \
+kvm-y += arm.o mmu.o mmio.o psci.o hypercalls.o pvtime.o \
inject_fault.o va_layout.o handle_exit.o \
guest.o debug.o reset.o sys_regs.o \
- vgic-sys-reg-v3.o fpsimd.o pmu.o \
+ vgic-sys-reg-v3.o fpsimd.o pmu.o pkvm.o \
arch_timer.o trng.o\
vgic/vgic.o vgic/vgic-init.o \
vgic/vgic-irqfd.o vgic/vgic-v2.o \
@@ -25,3 +23,19 @@ kvm-y := $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/eventfd.o \
vgic/vgic-its.o vgic/vgic-debug.o
kvm-$(CONFIG_HW_PERF_EVENTS) += pmu-emul.o
+
+always-y := hyp_constants.h hyp-constants.s
+
+define rule_gen_hyp_constants
+ $(call filechk,offsets,__HYP_CONSTANTS_H__)
+endef
+
+CFLAGS_hyp-constants.o = -I $(srctree)/$(src)/hyp/include
+$(obj)/hyp-constants.s: $(src)/hyp/hyp-constants.c FORCE
+ $(call if_changed_dep,cc_s_c)
+
+$(obj)/hyp_constants.h: $(obj)/hyp-constants.s FORCE
+ $(call if_changed_rule,gen_hyp_constants)
+
+obj-kvm := $(addprefix $(obj)/, $(kvm-y))
+$(obj-kvm): $(obj)/hyp_constants.h
diff --git a/arch/arm64/kvm/arch_timer.c b/arch/arm64/kvm/arch_timer.c
index 3df67c127489..6e542e2eae32 100644
--- a/arch/arm64/kvm/arch_timer.c
+++ b/arch/arm64/kvm/arch_timer.c
@@ -467,7 +467,7 @@ out:
}
/*
- * Schedule the background timer before calling kvm_vcpu_block, so that this
+ * Schedule the background timer before calling kvm_vcpu_halt, so that this
* thread is removed from its waitqueue and made runnable when there's a timer
* interrupt to handle.
*/
@@ -649,7 +649,6 @@ void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu)
{
struct arch_timer_cpu *timer = vcpu_timer(vcpu);
struct timer_map map;
- struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu);
if (unlikely(!timer->enabled))
return;
@@ -672,7 +671,7 @@ void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu)
if (map.emul_ptimer)
soft_timer_cancel(&map.emul_ptimer->hrtimer);
- if (rcuwait_active(wait))
+ if (kvm_vcpu_is_blocking(vcpu))
kvm_timer_blocking(vcpu);
/*
@@ -750,7 +749,7 @@ int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu)
/* Make the updates of cntvoff for all vtimer contexts atomic */
static void update_vtimer_cntvoff(struct kvm_vcpu *vcpu, u64 cntvoff)
{
- int i;
+ unsigned long i;
struct kvm *kvm = vcpu->kvm;
struct kvm_vcpu *tmp;
@@ -1189,8 +1188,8 @@ void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu)
static bool timer_irqs_are_valid(struct kvm_vcpu *vcpu)
{
- int vtimer_irq, ptimer_irq;
- int i, ret;
+ int vtimer_irq, ptimer_irq, ret;
+ unsigned long i;
vtimer_irq = vcpu_vtimer(vcpu)->irq.irq;
ret = kvm_vgic_set_owner(vcpu, vtimer_irq, vcpu_vtimer(vcpu));
@@ -1297,7 +1296,7 @@ void kvm_timer_init_vhe(void)
static void set_timer_irqs(struct kvm *kvm, int vtimer_irq, int ptimer_irq)
{
struct kvm_vcpu *vcpu;
- int i;
+ unsigned long i;
kvm_for_each_vcpu(i, vcpu, kvm) {
vcpu_vtimer(vcpu)->irq.irq = vtimer_irq;
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index e4727dc771bf..ecc5958e27fe 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -146,7 +146,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
if (ret)
return ret;
- ret = create_hyp_mappings(kvm, kvm + 1, PAGE_HYP);
+ ret = kvm_share_hyp(kvm, kvm + 1);
if (ret)
goto out_free_stage2_pgd;
@@ -175,19 +175,13 @@ vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
*/
void kvm_arch_destroy_vm(struct kvm *kvm)
{
- int i;
-
bitmap_free(kvm->arch.pmu_filter);
kvm_vgic_destroy(kvm);
- for (i = 0; i < KVM_MAX_VCPUS; ++i) {
- if (kvm->vcpus[i]) {
- kvm_vcpu_destroy(kvm->vcpus[i]);
- kvm->vcpus[i] = NULL;
- }
- }
- atomic_set(&kvm->online_vcpus, 0);
+ kvm_destroy_vcpus(kvm);
+
+ kvm_unshare_hyp(kvm, kvm + 1);
}
int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
@@ -342,7 +336,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
if (err)
return err;
- return create_hyp_mappings(vcpu, vcpu + 1, PAGE_HYP);
+ return kvm_share_hyp(vcpu, vcpu + 1);
}
void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
@@ -351,7 +345,7 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
{
- if (vcpu->arch.has_run_once && unlikely(!irqchip_in_kernel(vcpu->kvm)))
+ if (vcpu_has_run_once(vcpu) && unlikely(!irqchip_in_kernel(vcpu->kvm)))
static_branch_dec(&userspace_irqchip_in_use);
kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
@@ -368,27 +362,12 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
{
- /*
- * If we're about to block (most likely because we've just hit a
- * WFI), we need to sync back the state of the GIC CPU interface
- * so that we have the latest PMR and group enables. This ensures
- * that kvm_arch_vcpu_runnable has up-to-date data to decide
- * whether we have pending interrupts.
- *
- * For the same reason, we want to tell GICv4 that we need
- * doorbells to be signalled, should an interrupt become pending.
- */
- preempt_disable();
- kvm_vgic_vmcr_sync(vcpu);
- vgic_v4_put(vcpu, true);
- preempt_enable();
+
}
void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
{
- preempt_disable();
- vgic_v4_load(vcpu);
- preempt_enable();
+
}
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
@@ -503,6 +482,13 @@ bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
return vcpu_mode_priv(vcpu);
}
+#ifdef CONFIG_GUEST_PERF_EVENTS
+unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu)
+{
+ return *vcpu_pc(vcpu);
+}
+#endif
+
/* Just ensure a guest exit from a particular CPU */
static void exit_vm_noop(void *info)
{
@@ -584,18 +570,33 @@ static void update_vmid(struct kvm_vmid *vmid)
spin_unlock(&kvm_vmid_lock);
}
-static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
+static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.target >= 0;
+}
+
+/*
+ * Handle both the initialisation that is being done when the vcpu is
+ * run for the first time, as well as the updates that must be
+ * performed each time we get a new thread dealing with this vcpu.
+ */
+int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
{
struct kvm *kvm = vcpu->kvm;
- int ret = 0;
+ int ret;
- if (likely(vcpu->arch.has_run_once))
- return 0;
+ if (!kvm_vcpu_initialized(vcpu))
+ return -ENOEXEC;
if (!kvm_arm_vcpu_is_finalized(vcpu))
return -EPERM;
- vcpu->arch.has_run_once = true;
+ ret = kvm_arch_vcpu_run_map_fp(vcpu);
+ if (ret)
+ return ret;
+
+ if (likely(vcpu_has_run_once(vcpu)))
+ return 0;
kvm_arm_vcpu_init_debug(vcpu);
@@ -607,12 +608,6 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
ret = kvm_vgic_map_resources(kvm);
if (ret)
return ret;
- } else {
- /*
- * Tell the rest of the code that there are userspace irqchip
- * VMs in the wild.
- */
- static_branch_inc(&userspace_irqchip_in_use);
}
ret = kvm_timer_enable(vcpu);
@@ -620,6 +615,16 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
return ret;
ret = kvm_arm_pmu_v3_enable(vcpu);
+ if (ret)
+ return ret;
+
+ if (!irqchip_in_kernel(kvm)) {
+ /*
+ * Tell the rest of the code that there are userspace irqchip
+ * VMs in the wild.
+ */
+ static_branch_inc(&userspace_irqchip_in_use);
+ }
/*
* Initialize traps for protected VMs.
@@ -639,7 +644,7 @@ bool kvm_arch_intc_initialized(struct kvm *kvm)
void kvm_arm_halt_guest(struct kvm *kvm)
{
- int i;
+ unsigned long i;
struct kvm_vcpu *vcpu;
kvm_for_each_vcpu(i, vcpu, kvm)
@@ -649,12 +654,12 @@ void kvm_arm_halt_guest(struct kvm *kvm)
void kvm_arm_resume_guest(struct kvm *kvm)
{
- int i;
+ unsigned long i;
struct kvm_vcpu *vcpu;
kvm_for_each_vcpu(i, vcpu, kvm) {
vcpu->arch.pause = false;
- rcuwait_wake_up(kvm_arch_vcpu_get_wait(vcpu));
+ __kvm_vcpu_wake_up(vcpu);
}
}
@@ -679,9 +684,37 @@ static void vcpu_req_sleep(struct kvm_vcpu *vcpu)
smp_rmb();
}
-static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu)
+/**
+ * kvm_vcpu_wfi - emulate Wait-For-Interrupt behavior
+ * @vcpu: The VCPU pointer
+ *
+ * Suspend execution of a vCPU until a valid wake event is detected, i.e. until
+ * the vCPU is runnable. The vCPU may or may not be scheduled out, depending
+ * on when a wake event arrives, e.g. there may already be a pending wake event.
+ */
+void kvm_vcpu_wfi(struct kvm_vcpu *vcpu)
{
- return vcpu->arch.target >= 0;
+ /*
+ * Sync back the state of the GIC CPU interface so that we have
+ * the latest PMR and group enables. This ensures that
+ * kvm_arch_vcpu_runnable has up-to-date data to decide whether
+ * we have pending interrupts, e.g. when determining if the
+ * vCPU should block.
+ *
+ * For the same reason, we want to tell GICv4 that we need
+ * doorbells to be signalled, should an interrupt become pending.
+ */
+ preempt_disable();
+ kvm_vgic_vmcr_sync(vcpu);
+ vgic_v4_put(vcpu, true);
+ preempt_enable();
+
+ kvm_vcpu_halt(vcpu);
+ kvm_clear_request(KVM_REQ_UNHALT, vcpu);
+
+ preempt_disable();
+ vgic_v4_load(vcpu);
+ preempt_enable();
}
static void check_vcpu_requests(struct kvm_vcpu *vcpu)
@@ -764,6 +797,24 @@ static bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu, int *ret)
xfer_to_guest_mode_work_pending();
}
+/*
+ * Actually run the vCPU, entering an RCU extended quiescent state (EQS) while
+ * the vCPU is running.
+ *
+ * This must be noinstr as instrumentation may make use of RCU, and this is not
+ * safe during the EQS.
+ */
+static int noinstr kvm_arm_vcpu_enter_exit(struct kvm_vcpu *vcpu)
+{
+ int ret;
+
+ guest_state_enter_irqoff();
+ ret = kvm_call_hyp_ret(__kvm_vcpu_run, vcpu);
+ guest_state_exit_irqoff();
+
+ return ret;
+}
+
/**
* kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code
* @vcpu: The VCPU pointer
@@ -779,13 +830,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
struct kvm_run *run = vcpu->run;
int ret;
- if (unlikely(!kvm_vcpu_initialized(vcpu)))
- return -ENOEXEC;
-
- ret = kvm_vcpu_first_run_init(vcpu);
- if (ret)
- return ret;
-
if (run->exit_reason == KVM_EXIT_MMIO) {
ret = kvm_handle_mmio_return(vcpu);
if (ret)
@@ -849,14 +893,15 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
}
kvm_arm_setup_debug(vcpu);
+ kvm_arch_vcpu_ctxflush_fp(vcpu);
/**************************************************************
* Enter the guest
*/
trace_kvm_entry(*vcpu_pc(vcpu));
- guest_enter_irqoff();
+ guest_timing_enter_irqoff();
- ret = kvm_call_hyp_ret(__kvm_vcpu_run, vcpu);
+ ret = kvm_arm_vcpu_enter_exit(vcpu);
vcpu->mode = OUTSIDE_GUEST_MODE;
vcpu->stat.exits++;
@@ -891,26 +936,23 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
kvm_arch_vcpu_ctxsync_fp(vcpu);
/*
- * We may have taken a host interrupt in HYP mode (ie
- * while executing the guest). This interrupt is still
- * pending, as we haven't serviced it yet!
+ * We must ensure that any pending interrupts are taken before
+ * we exit guest timing so that timer ticks are accounted as
+ * guest time. Transiently unmask interrupts so that any
+ * pending interrupts are taken.
*
- * We're now back in SVC mode, with interrupts
- * disabled. Enabling the interrupts now will have
- * the effect of taking the interrupt again, in SVC
- * mode this time.
+ * Per ARM DDI 0487G.b section D1.13.4, an ISB (or other
+ * context synchronization event) is necessary to ensure that
+ * pending interrupts are taken.
*/
local_irq_enable();
+ isb();
+ local_irq_disable();
+
+ guest_timing_exit_irqoff();
+
+ local_irq_enable();
- /*
- * We do local_irq_enable() before calling guest_exit() so
- * that if a timer interrupt hits while running the guest we
- * account that tick as being spent in the guest. We enable
- * preemption after calling guest_exit() so that if we get
- * preempted we make sure ticks after that is not counted as
- * guest time.
- */
- guest_exit();
trace_kvm_exit(ret, kvm_vcpu_trap_get_class(vcpu), *vcpu_pc(vcpu));
/* Exit types that need handling before we can be preempted */
@@ -1123,7 +1165,7 @@ static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
* need to invalidate the I-cache though, as FWB does *not*
* imply CTR_EL0.DIC.
*/
- if (vcpu->arch.has_run_once) {
+ if (vcpu_has_run_once(vcpu)) {
if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
stage2_unmap_vm(vcpu->kvm);
else
@@ -1775,7 +1817,8 @@ static int init_subsystems(void)
if (err)
goto out;
- kvm_perf_init();
+ kvm_register_perf_callbacks(NULL);
+
kvm_sys_reg_table_init();
out:
@@ -2035,7 +2078,7 @@ static int finalize_hyp_mode(void)
struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr)
{
struct kvm_vcpu *vcpu;
- int i;
+ unsigned long i;
mpidr &= MPIDR_HWID_BITMASK;
kvm_for_each_vcpu(i, vcpu, kvm) {
@@ -2163,7 +2206,7 @@ out_err:
/* NOP: Compiling as a module not supported */
void kvm_arch_exit(void)
{
- kvm_perf_teardown();
+ kvm_unregister_perf_callbacks();
}
static int __init early_kvm_mode_cfg(char *arg)
diff --git a/arch/arm64/kvm/fpsimd.c b/arch/arm64/kvm/fpsimd.c
index 5621020b28de..2f48fd362a8c 100644
--- a/arch/arm64/kvm/fpsimd.c
+++ b/arch/arm64/kvm/fpsimd.c
@@ -7,7 +7,6 @@
*/
#include <linux/irqflags.h>
#include <linux/sched.h>
-#include <linux/thread_info.h>
#include <linux/kvm_host.h>
#include <asm/fpsimd.h>
#include <asm/kvm_asm.h>
@@ -15,6 +14,19 @@
#include <asm/kvm_mmu.h>
#include <asm/sysreg.h>
+void kvm_vcpu_unshare_task_fp(struct kvm_vcpu *vcpu)
+{
+ struct task_struct *p = vcpu->arch.parent_task;
+ struct user_fpsimd_state *fpsimd;
+
+ if (!is_protected_kvm_enabled() || !p)
+ return;
+
+ fpsimd = &p->thread.uw.fpsimd_state;
+ kvm_unshare_hyp(fpsimd, fpsimd + 1);
+ put_task_struct(p);
+}
+
/*
* Called on entry to KVM_RUN unless this vcpu previously ran at least
* once and the most recent prior KVM_RUN for this vcpu was called from
@@ -28,36 +40,29 @@ int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu)
{
int ret;
- struct thread_info *ti = &current->thread_info;
struct user_fpsimd_state *fpsimd = &current->thread.uw.fpsimd_state;
- /*
- * Make sure the host task thread flags and fpsimd state are
- * visible to hyp:
- */
- ret = create_hyp_mappings(ti, ti + 1, PAGE_HYP);
- if (ret)
- goto error;
+ kvm_vcpu_unshare_task_fp(vcpu);
- ret = create_hyp_mappings(fpsimd, fpsimd + 1, PAGE_HYP);
+ /* Make sure the host task fpsimd state is visible to hyp: */
+ ret = kvm_share_hyp(fpsimd, fpsimd + 1);
if (ret)
- goto error;
-
- if (vcpu->arch.sve_state) {
- void *sve_end;
+ return ret;
- sve_end = vcpu->arch.sve_state + vcpu_sve_state_size(vcpu);
+ vcpu->arch.host_fpsimd_state = kern_hyp_va(fpsimd);
- ret = create_hyp_mappings(vcpu->arch.sve_state, sve_end,
- PAGE_HYP);
- if (ret)
- goto error;
+ /*
+ * We need to keep current's task_struct pinned until its data has been
+ * unshared with the hypervisor to make sure it is not re-used by the
+ * kernel and donated to someone else while already shared -- see
+ * kvm_vcpu_unshare_task_fp() for the matching put_task_struct().
+ */
+ if (is_protected_kvm_enabled()) {
+ get_task_struct(current);
+ vcpu->arch.parent_task = current;
}
- vcpu->arch.host_thread_info = kern_hyp_va(ti);
- vcpu->arch.host_fpsimd_state = kern_hyp_va(fpsimd);
-error:
- return ret;
+ return 0;
}
/*
@@ -66,26 +71,27 @@ error:
*
* Here, we just set the correct metadata to indicate that the FPSIMD
* state in the cpu regs (if any) belongs to current on the host.
- *
- * TIF_SVE is backed up here, since it may get clobbered with guest state.
- * This flag is restored by kvm_arch_vcpu_put_fp(vcpu).
*/
void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu)
{
BUG_ON(!current->mm);
+ BUG_ON(test_thread_flag(TIF_SVE));
- vcpu->arch.flags &= ~(KVM_ARM64_FP_ENABLED |
- KVM_ARM64_HOST_SVE_IN_USE |
- KVM_ARM64_HOST_SVE_ENABLED);
+ vcpu->arch.flags &= ~KVM_ARM64_FP_ENABLED;
vcpu->arch.flags |= KVM_ARM64_FP_HOST;
- if (test_thread_flag(TIF_SVE))
- vcpu->arch.flags |= KVM_ARM64_HOST_SVE_IN_USE;
-
if (read_sysreg(cpacr_el1) & CPACR_EL1_ZEN_EL0EN)
vcpu->arch.flags |= KVM_ARM64_HOST_SVE_ENABLED;
}
+void kvm_arch_vcpu_ctxflush_fp(struct kvm_vcpu *vcpu)
+{
+ if (test_thread_flag(TIF_FOREIGN_FPSTATE))
+ vcpu->arch.flags |= KVM_ARM64_FP_FOREIGN_FPSTATE;
+ else
+ vcpu->arch.flags &= ~KVM_ARM64_FP_FOREIGN_FPSTATE;
+}
+
/*
* If the guest FPSIMD state was loaded, update the host's context
* tracking data mark the CPU FPSIMD regs as dirty and belonging to vcpu
@@ -115,13 +121,11 @@ void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu)
void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu)
{
unsigned long flags;
- bool host_has_sve = system_supports_sve();
- bool guest_has_sve = vcpu_has_sve(vcpu);
local_irq_save(flags);
if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED) {
- if (guest_has_sve) {
+ if (vcpu_has_sve(vcpu)) {
__vcpu_sys_reg(vcpu, ZCR_EL1) = read_sysreg_el1(SYS_ZCR);
/* Restore the VL that was saved when bound to the CPU */
@@ -131,7 +135,7 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu)
}
fpsimd_save_and_flush_cpu_state();
- } else if (has_vhe() && host_has_sve) {
+ } else if (has_vhe() && system_supports_sve()) {
/*
* The FPSIMD/SVE state in the CPU has not been touched, and we
* have SVE (and VHE): CPACR_EL1 (alias CPTR_EL2) has been
@@ -145,8 +149,7 @@ void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu)
sysreg_clear_set(CPACR_EL1, CPACR_EL1_ZEN_EL0EN, 0);
}
- update_thread_flag(TIF_SVE,
- vcpu->arch.flags & KVM_ARM64_HOST_SVE_IN_USE);
+ update_thread_flag(TIF_SVE, 0);
local_irq_restore(flags);
}
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index 5abe0617f2af..e3140abd2e2e 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -82,7 +82,7 @@ static int handle_no_fpsimd(struct kvm_vcpu *vcpu)
*
* WFE: Yield the CPU and come back to this vcpu when the scheduler
* decides to.
- * WFI: Simply call kvm_vcpu_block(), which will halt execution of
+ * WFI: Simply call kvm_vcpu_halt(), which will halt execution of
* world-switches and schedule other host processes until there is an
* incoming IRQ or FIQ to the VM.
*/
@@ -95,8 +95,7 @@ static int kvm_handle_wfx(struct kvm_vcpu *vcpu)
} else {
trace_kvm_wfx_arm64(*vcpu_pc(vcpu), false);
vcpu->stat.wfi_exit_stat++;
- kvm_vcpu_block(vcpu);
- kvm_clear_request(KVM_REQ_UNHALT, vcpu);
+ kvm_vcpu_wfi(vcpu);
}
kvm_incr_pc(vcpu);
@@ -229,6 +228,14 @@ int handle_exit(struct kvm_vcpu *vcpu, int exception_index)
{
struct kvm_run *run = vcpu->run;
+ if (ARM_SERROR_PENDING(exception_index)) {
+ /*
+ * The SError is handled by handle_exit_early(). If the guest
+ * survives it will re-execute the original instruction.
+ */
+ return 1;
+ }
+
exception_index = ARM_EXCEPTION_CODE(exception_index);
switch (exception_index) {
diff --git a/arch/arm64/kvm/hyp/Makefile b/arch/arm64/kvm/hyp/Makefile
index b726332eec49..687598e41b21 100644
--- a/arch/arm64/kvm/hyp/Makefile
+++ b/arch/arm64/kvm/hyp/Makefile
@@ -10,4 +10,4 @@ subdir-ccflags-y := -I$(incdir) \
-DDISABLE_BRANCH_PROFILING \
$(DISABLE_STACKLEAK_PLUGIN)
-obj-$(CONFIG_KVM) += vhe/ nvhe/ pgtable.o reserved_mem.o
+obj-$(CONFIG_KVM) += vhe/ nvhe/ pgtable.o
diff --git a/arch/arm64/kvm/hyp/exception.c b/arch/arm64/kvm/hyp/exception.c
index 0418399e0a20..c5d009715402 100644
--- a/arch/arm64/kvm/hyp/exception.c
+++ b/arch/arm64/kvm/hyp/exception.c
@@ -38,7 +38,10 @@ static inline void __vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg)
static void __vcpu_write_spsr(struct kvm_vcpu *vcpu, u64 val)
{
- write_sysreg_el1(val, SYS_SPSR);
+ if (has_vhe())
+ write_sysreg_el1(val, SYS_SPSR);
+ else
+ __vcpu_sys_reg(vcpu, SPSR_EL1) = val;
}
static void __vcpu_write_spsr_abt(struct kvm_vcpu *vcpu, u64 val)
diff --git a/arch/arm64/kvm/hyp/fpsimd.S b/arch/arm64/kvm/hyp/fpsimd.S
index e950875e31ce..61e6f3ba7b7d 100644
--- a/arch/arm64/kvm/hyp/fpsimd.S
+++ b/arch/arm64/kvm/hyp/fpsimd.S
@@ -25,9 +25,3 @@ SYM_FUNC_START(__sve_restore_state)
sve_load 0, x1, x2, 3
ret
SYM_FUNC_END(__sve_restore_state)
-
-SYM_FUNC_START(__sve_save_state)
- mov x2, #1
- sve_save 0, x1, x2, 3
- ret
-SYM_FUNC_END(__sve_save_state)
diff --git a/arch/arm64/kvm/hyp/hyp-constants.c b/arch/arm64/kvm/hyp/hyp-constants.c
new file mode 100644
index 000000000000..b3742a6691e8
--- /dev/null
+++ b/arch/arm64/kvm/hyp/hyp-constants.c
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/kbuild.h>
+#include <nvhe/memory.h>
+
+int main(void)
+{
+ DEFINE(STRUCT_HYP_PAGE_SIZE, sizeof(struct hyp_page));
+ return 0;
+}
diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h
index 96c5f3fb7838..701cfb964905 100644
--- a/arch/arm64/kvm/hyp/include/hyp/switch.h
+++ b/arch/arm64/kvm/hyp/include/hyp/switch.h
@@ -29,7 +29,6 @@
#include <asm/fpsimd.h>
#include <asm/debug-monitors.h>
#include <asm/processor.h>
-#include <asm/thread_info.h>
struct kvm_exception_table_entry {
int insn, fixup;
@@ -49,7 +48,7 @@ static inline bool update_fp_enabled(struct kvm_vcpu *vcpu)
* trap the accesses.
*/
if (!system_supports_fpsimd() ||
- vcpu->arch.host_thread_info->flags & _TIF_FOREIGN_FPSTATE)
+ vcpu->arch.flags & KVM_ARM64_FP_FOREIGN_FPSTATE)
vcpu->arch.flags &= ~(KVM_ARM64_FP_ENABLED |
KVM_ARM64_FP_HOST);
@@ -143,16 +142,6 @@ static inline bool __populate_fault_info(struct kvm_vcpu *vcpu)
return __get_fault_info(vcpu->arch.fault.esr_el2, &vcpu->arch.fault);
}
-static inline void __hyp_sve_save_host(struct kvm_vcpu *vcpu)
-{
- struct thread_struct *thread;
-
- thread = container_of(vcpu->arch.host_fpsimd_state, struct thread_struct,
- uw.fpsimd_state);
-
- __sve_save_state(sve_pffr(thread), &vcpu->arch.host_fpsimd_state->fpsr);
-}
-
static inline void __hyp_sve_restore_guest(struct kvm_vcpu *vcpu)
{
sve_cond_update_zcr_vq(vcpu_sve_max_vq(vcpu) - 1, SYS_ZCR_EL2);
@@ -169,21 +158,14 @@ static inline void __hyp_sve_restore_guest(struct kvm_vcpu *vcpu)
*/
static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
{
- bool sve_guest, sve_host;
+ bool sve_guest;
u8 esr_ec;
u64 reg;
if (!system_supports_fpsimd())
return false;
- if (system_supports_sve()) {
- sve_guest = vcpu_has_sve(vcpu);
- sve_host = vcpu->arch.flags & KVM_ARM64_HOST_SVE_IN_USE;
- } else {
- sve_guest = false;
- sve_host = false;
- }
-
+ sve_guest = vcpu_has_sve(vcpu);
esr_ec = kvm_vcpu_trap_get_class(vcpu);
/* Don't handle SVE traps for non-SVE vcpus here: */
@@ -207,11 +189,7 @@ static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
isb();
if (vcpu->arch.flags & KVM_ARM64_FP_HOST) {
- if (sve_host)
- __hyp_sve_save_host(vcpu);
- else
- __fpsimd_save_state(vcpu->arch.host_fpsimd_state);
-
+ __fpsimd_save_state(vcpu->arch.host_fpsimd_state);
vcpu->arch.flags &= ~KVM_ARM64_FP_HOST;
}
@@ -424,6 +402,24 @@ static inline bool kvm_hyp_handle_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
return false;
}
+static inline void synchronize_vcpu_pstate(struct kvm_vcpu *vcpu, u64 *exit_code)
+{
+ /*
+ * Check for the conditions of Cortex-A510's #2077057. When these occur
+ * SPSR_EL2 can't be trusted, but isn't needed either as it is
+ * unchanged from the value in vcpu_gp_regs(vcpu)->pstate.
+ * Are we single-stepping the guest, and took a PAC exception from the
+ * active-not-pending state?
+ */
+ if (cpus_have_final_cap(ARM64_WORKAROUND_2077057) &&
+ vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP &&
+ *vcpu_cpsr(vcpu) & DBG_SPSR_SS &&
+ ESR_ELx_EC(read_sysreg_el2(SYS_ESR)) == ESR_ELx_EC_PAC)
+ write_sysreg_el2(*vcpu_cpsr(vcpu), SYS_SPSR);
+
+ vcpu->arch.ctxt.regs.pstate = read_sysreg_el2(SYS_SPSR);
+}
+
/*
* Return true when we were able to fixup the guest exit and should return to
* the guest, false when we should restore the host state and return to the
@@ -435,7 +431,7 @@ static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
* Save PSTATE early so that we can evaluate the vcpu mode
* early on.
*/
- vcpu->arch.ctxt.regs.pstate = read_sysreg_el2(SYS_SPSR);
+ synchronize_vcpu_pstate(vcpu, exit_code);
/*
* Check whether we want to repaint the state one way or
@@ -446,7 +442,8 @@ static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
if (ARM_EXCEPTION_CODE(*exit_code) != ARM_EXCEPTION_IRQ)
vcpu->arch.fault.esr_el2 = read_sysreg_el2(SYS_ESR);
- if (ARM_SERROR_PENDING(*exit_code)) {
+ if (ARM_SERROR_PENDING(*exit_code) &&
+ ARM_EXCEPTION_CODE(*exit_code) != ARM_EXCEPTION_IRQ) {
u8 esr_ec = kvm_vcpu_trap_get_class(vcpu);
/*
diff --git a/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h b/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
index b58c910babaf..80e99836eac7 100644
--- a/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
+++ b/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
@@ -24,6 +24,11 @@ enum pkvm_page_state {
PKVM_PAGE_OWNED = 0ULL,
PKVM_PAGE_SHARED_OWNED = KVM_PGTABLE_PROT_SW0,
PKVM_PAGE_SHARED_BORROWED = KVM_PGTABLE_PROT_SW1,
+ __PKVM_PAGE_RESERVED = KVM_PGTABLE_PROT_SW0 |
+ KVM_PGTABLE_PROT_SW1,
+
+ /* Meta-states which aren't encoded directly in the PTE's SW bits */
+ PKVM_NOPAGE,
};
#define PKVM_PAGE_STATE_PROT_MASK (KVM_PGTABLE_PROT_SW0 | KVM_PGTABLE_PROT_SW1)
@@ -50,6 +55,7 @@ extern const u8 pkvm_hyp_id;
int __pkvm_prot_finalize(void);
int __pkvm_host_share_hyp(u64 pfn);
+int __pkvm_host_unshare_hyp(u64 pfn);
bool addr_is_memory(phys_addr_t phys);
int host_stage2_idmap_locked(phys_addr_t addr, u64 size, enum kvm_pgtable_prot prot);
diff --git a/arch/arm64/kvm/hyp/include/nvhe/mm.h b/arch/arm64/kvm/hyp/include/nvhe/mm.h
index c9a8f535212e..2d08510c6cc1 100644
--- a/arch/arm64/kvm/hyp/include/nvhe/mm.h
+++ b/arch/arm64/kvm/hyp/include/nvhe/mm.h
@@ -10,13 +10,8 @@
#include <nvhe/memory.h>
#include <nvhe/spinlock.h>
-#define HYP_MEMBLOCK_REGIONS 128
-extern struct memblock_region kvm_nvhe_sym(hyp_memory)[];
-extern unsigned int kvm_nvhe_sym(hyp_memblock_nr);
extern struct kvm_pgtable pkvm_pgtable;
extern hyp_spinlock_t pkvm_pgd_lock;
-extern struct hyp_pool hpool;
-extern u64 __io_map_base;
int hyp_create_idmap(u32 hyp_va_bits);
int hyp_map_vectors(void);
@@ -39,58 +34,4 @@ static inline void hyp_vmemmap_range(phys_addr_t phys, unsigned long size,
*end = ALIGN(*end, PAGE_SIZE);
}
-static inline unsigned long __hyp_pgtable_max_pages(unsigned long nr_pages)
-{
- unsigned long total = 0, i;
-
- /* Provision the worst case scenario */
- for (i = 0; i < KVM_PGTABLE_MAX_LEVELS; i++) {
- nr_pages = DIV_ROUND_UP(nr_pages, PTRS_PER_PTE);
- total += nr_pages;
- }
-
- return total;
-}
-
-static inline unsigned long __hyp_pgtable_total_pages(void)
-{
- unsigned long res = 0, i;
-
- /* Cover all of memory with page-granularity */
- for (i = 0; i < kvm_nvhe_sym(hyp_memblock_nr); i++) {
- struct memblock_region *reg = &kvm_nvhe_sym(hyp_memory)[i];
- res += __hyp_pgtable_max_pages(reg->size >> PAGE_SHIFT);
- }
-
- return res;
-}
-
-static inline unsigned long hyp_s1_pgtable_pages(void)
-{
- unsigned long res;
-
- res = __hyp_pgtable_total_pages();
-
- /* Allow 1 GiB for private mappings */
- res += __hyp_pgtable_max_pages(SZ_1G >> PAGE_SHIFT);
-
- return res;
-}
-
-static inline unsigned long host_s2_pgtable_pages(void)
-{
- unsigned long res;
-
- /*
- * Include an extra 16 pages to safely upper-bound the worst case of
- * concatenated pgds.
- */
- res = __hyp_pgtable_total_pages() + 16;
-
- /* Allow 1 GiB for MMIO mappings */
- res += __hyp_pgtable_max_pages(SZ_1G >> PAGE_SHIFT);
-
- return res;
-}
-
#endif /* __KVM_HYP_MM_H */
diff --git a/arch/arm64/kvm/hyp/nvhe/early_alloc.c b/arch/arm64/kvm/hyp/nvhe/early_alloc.c
index 1306c430ab87..00de04153cc6 100644
--- a/arch/arm64/kvm/hyp/nvhe/early_alloc.c
+++ b/arch/arm64/kvm/hyp/nvhe/early_alloc.c
@@ -43,6 +43,9 @@ void *hyp_early_alloc_page(void *arg)
return hyp_early_alloc_contig(1);
}
+static void hyp_early_alloc_get_page(void *addr) { }
+static void hyp_early_alloc_put_page(void *addr) { }
+
void hyp_early_alloc_init(void *virt, unsigned long size)
{
base = cur = (unsigned long)virt;
@@ -51,4 +54,6 @@ void hyp_early_alloc_init(void *virt, unsigned long size)
hyp_early_alloc_mm_ops.zalloc_page = hyp_early_alloc_page;
hyp_early_alloc_mm_ops.phys_to_virt = hyp_phys_to_virt;
hyp_early_alloc_mm_ops.virt_to_phys = hyp_virt_to_phys;
+ hyp_early_alloc_mm_ops.get_page = hyp_early_alloc_get_page;
+ hyp_early_alloc_mm_ops.put_page = hyp_early_alloc_put_page;
}
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
index b096bf009144..5e2197db0d32 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
@@ -147,6 +147,13 @@ static void handle___pkvm_host_share_hyp(struct kvm_cpu_context *host_ctxt)
cpu_reg(host_ctxt, 1) = __pkvm_host_share_hyp(pfn);
}
+static void handle___pkvm_host_unshare_hyp(struct kvm_cpu_context *host_ctxt)
+{
+ DECLARE_REG(u64, pfn, host_ctxt, 1);
+
+ cpu_reg(host_ctxt, 1) = __pkvm_host_unshare_hyp(pfn);
+}
+
static void handle___pkvm_create_private_mapping(struct kvm_cpu_context *host_ctxt)
{
DECLARE_REG(phys_addr_t, phys, host_ctxt, 1);
@@ -184,6 +191,7 @@ static const hcall_t host_hcall[] = {
HANDLE_FUNC(__pkvm_prot_finalize),
HANDLE_FUNC(__pkvm_host_share_hyp),
+ HANDLE_FUNC(__pkvm_host_unshare_hyp),
HANDLE_FUNC(__kvm_adjust_pc),
HANDLE_FUNC(__kvm_vcpu_run),
HANDLE_FUNC(__kvm_flush_vm_context),
diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
index c1a90dd022b8..674f10564373 100644
--- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c
+++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
@@ -9,6 +9,7 @@
#include <asm/kvm_hyp.h>
#include <asm/kvm_mmu.h>
#include <asm/kvm_pgtable.h>
+#include <asm/kvm_pkvm.h>
#include <asm/stage2_pgtable.h>
#include <hyp/fault.h>
@@ -27,6 +28,26 @@ static struct hyp_pool host_s2_pool;
const u8 pkvm_hyp_id = 1;
+static void host_lock_component(void)
+{
+ hyp_spin_lock(&host_kvm.lock);
+}
+
+static void host_unlock_component(void)
+{
+ hyp_spin_unlock(&host_kvm.lock);
+}
+
+static void hyp_lock_component(void)
+{
+ hyp_spin_lock(&pkvm_pgd_lock);
+}
+
+static void hyp_unlock_component(void)
+{
+ hyp_spin_unlock(&pkvm_pgd_lock);
+}
+
static void *host_s2_zalloc_pages_exact(size_t size)
{
void *addr = hyp_alloc_pages(&host_s2_pool, get_order(size));
@@ -103,19 +124,19 @@ int kvm_host_prepare_stage2(void *pgt_pool_base)
prepare_host_vtcr();
hyp_spin_lock_init(&host_kvm.lock);
+ mmu->arch = &host_kvm.arch;
ret = prepare_s2_pool(pgt_pool_base);
if (ret)
return ret;
- ret = __kvm_pgtable_stage2_init(&host_kvm.pgt, &host_kvm.arch,
+ ret = __kvm_pgtable_stage2_init(&host_kvm.pgt, mmu,
&host_kvm.mm_ops, KVM_HOST_S2_FLAGS,
host_stage2_force_pte_cb);
if (ret)
return ret;
mmu->pgd_phys = __hyp_pa(host_kvm.pgt.pgd);
- mmu->arch = &host_kvm.arch;
mmu->pgt = &host_kvm.pgt;
WRITE_ONCE(mmu->vmid.vmid_gen, 0);
WRITE_ONCE(mmu->vmid.vmid, 0);
@@ -338,116 +359,446 @@ static int host_stage2_idmap(u64 addr)
prot = is_memory ? PKVM_HOST_MEM_PROT : PKVM_HOST_MMIO_PROT;
- hyp_spin_lock(&host_kvm.lock);
+ host_lock_component();
ret = host_stage2_adjust_range(addr, &range);
if (ret)
goto unlock;
ret = host_stage2_idmap_locked(range.start, range.end - range.start, prot);
unlock:
- hyp_spin_unlock(&host_kvm.lock);
+ host_unlock_component();
return ret;
}
-static inline bool check_prot(enum kvm_pgtable_prot prot,
- enum kvm_pgtable_prot required,
- enum kvm_pgtable_prot denied)
+void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt)
{
- return (prot & (required | denied)) == required;
+ struct kvm_vcpu_fault_info fault;
+ u64 esr, addr;
+ int ret = 0;
+
+ esr = read_sysreg_el2(SYS_ESR);
+ BUG_ON(!__get_fault_info(esr, &fault));
+
+ addr = (fault.hpfar_el2 & HPFAR_MASK) << 8;
+ ret = host_stage2_idmap(addr);
+ BUG_ON(ret && ret != -EAGAIN);
}
-int __pkvm_host_share_hyp(u64 pfn)
+/* This corresponds to locking order */
+enum pkvm_component_id {
+ PKVM_ID_HOST,
+ PKVM_ID_HYP,
+};
+
+struct pkvm_mem_transition {
+ u64 nr_pages;
+
+ struct {
+ enum pkvm_component_id id;
+ /* Address in the initiator's address space */
+ u64 addr;
+
+ union {
+ struct {
+ /* Address in the completer's address space */
+ u64 completer_addr;
+ } host;
+ };
+ } initiator;
+
+ struct {
+ enum pkvm_component_id id;
+ } completer;
+};
+
+struct pkvm_mem_share {
+ const struct pkvm_mem_transition tx;
+ const enum kvm_pgtable_prot completer_prot;
+};
+
+struct check_walk_data {
+ enum pkvm_page_state desired;
+ enum pkvm_page_state (*get_page_state)(kvm_pte_t pte);
+};
+
+static int __check_page_state_visitor(u64 addr, u64 end, u32 level,
+ kvm_pte_t *ptep,
+ enum kvm_pgtable_walk_flags flag,
+ void * const arg)
{
- phys_addr_t addr = hyp_pfn_to_phys(pfn);
- enum kvm_pgtable_prot prot, cur;
- void *virt = __hyp_va(addr);
- enum pkvm_page_state state;
- kvm_pte_t pte;
- int ret;
+ struct check_walk_data *d = arg;
+ kvm_pte_t pte = *ptep;
- if (!addr_is_memory(addr))
+ if (kvm_pte_valid(pte) && !addr_is_memory(kvm_pte_to_phys(pte)))
return -EINVAL;
- hyp_spin_lock(&host_kvm.lock);
- hyp_spin_lock(&pkvm_pgd_lock);
+ return d->get_page_state(pte) == d->desired ? 0 : -EPERM;
+}
+
+static int check_page_state_range(struct kvm_pgtable *pgt, u64 addr, u64 size,
+ struct check_walk_data *data)
+{
+ struct kvm_pgtable_walker walker = {
+ .cb = __check_page_state_visitor,
+ .arg = data,
+ .flags = KVM_PGTABLE_WALK_LEAF,
+ };
+
+ return kvm_pgtable_walk(pgt, addr, size, &walker);
+}
+
+static enum pkvm_page_state host_get_page_state(kvm_pte_t pte)
+{
+ if (!kvm_pte_valid(pte) && pte)
+ return PKVM_NOPAGE;
+
+ return pkvm_getstate(kvm_pgtable_stage2_pte_prot(pte));
+}
+
+static int __host_check_page_state_range(u64 addr, u64 size,
+ enum pkvm_page_state state)
+{
+ struct check_walk_data d = {
+ .desired = state,
+ .get_page_state = host_get_page_state,
+ };
+
+ hyp_assert_lock_held(&host_kvm.lock);
+ return check_page_state_range(&host_kvm.pgt, addr, size, &d);
+}
+
+static int __host_set_page_state_range(u64 addr, u64 size,
+ enum pkvm_page_state state)
+{
+ enum kvm_pgtable_prot prot = pkvm_mkstate(PKVM_HOST_MEM_PROT, state);
+
+ return host_stage2_idmap_locked(addr, size, prot);
+}
+
+static int host_request_owned_transition(u64 *completer_addr,
+ const struct pkvm_mem_transition *tx)
+{
+ u64 size = tx->nr_pages * PAGE_SIZE;
+ u64 addr = tx->initiator.addr;
+
+ *completer_addr = tx->initiator.host.completer_addr;
+ return __host_check_page_state_range(addr, size, PKVM_PAGE_OWNED);
+}
+
+static int host_request_unshare(u64 *completer_addr,
+ const struct pkvm_mem_transition *tx)
+{
+ u64 size = tx->nr_pages * PAGE_SIZE;
+ u64 addr = tx->initiator.addr;
+
+ *completer_addr = tx->initiator.host.completer_addr;
+ return __host_check_page_state_range(addr, size, PKVM_PAGE_SHARED_OWNED);
+}
+
+static int host_initiate_share(u64 *completer_addr,
+ const struct pkvm_mem_transition *tx)
+{
+ u64 size = tx->nr_pages * PAGE_SIZE;
+ u64 addr = tx->initiator.addr;
+
+ *completer_addr = tx->initiator.host.completer_addr;
+ return __host_set_page_state_range(addr, size, PKVM_PAGE_SHARED_OWNED);
+}
+
+static int host_initiate_unshare(u64 *completer_addr,
+ const struct pkvm_mem_transition *tx)
+{
+ u64 size = tx->nr_pages * PAGE_SIZE;
+ u64 addr = tx->initiator.addr;
+
+ *completer_addr = tx->initiator.host.completer_addr;
+ return __host_set_page_state_range(addr, size, PKVM_PAGE_OWNED);
+}
+
+static enum pkvm_page_state hyp_get_page_state(kvm_pte_t pte)
+{
+ if (!kvm_pte_valid(pte))
+ return PKVM_NOPAGE;
+
+ return pkvm_getstate(kvm_pgtable_stage2_pte_prot(pte));
+}
+
+static int __hyp_check_page_state_range(u64 addr, u64 size,
+ enum pkvm_page_state state)
+{
+ struct check_walk_data d = {
+ .desired = state,
+ .get_page_state = hyp_get_page_state,
+ };
+
+ hyp_assert_lock_held(&pkvm_pgd_lock);
+ return check_page_state_range(&pkvm_pgtable, addr, size, &d);
+}
+
+static bool __hyp_ack_skip_pgtable_check(const struct pkvm_mem_transition *tx)
+{
+ return !(IS_ENABLED(CONFIG_NVHE_EL2_DEBUG) ||
+ tx->initiator.id != PKVM_ID_HOST);
+}
+
+static int hyp_ack_share(u64 addr, const struct pkvm_mem_transition *tx,
+ enum kvm_pgtable_prot perms)
+{
+ u64 size = tx->nr_pages * PAGE_SIZE;
+
+ if (perms != PAGE_HYP)
+ return -EPERM;
+
+ if (__hyp_ack_skip_pgtable_check(tx))
+ return 0;
+
+ return __hyp_check_page_state_range(addr, size, PKVM_NOPAGE);
+}
+
+static int hyp_ack_unshare(u64 addr, const struct pkvm_mem_transition *tx)
+{
+ u64 size = tx->nr_pages * PAGE_SIZE;
+
+ if (__hyp_ack_skip_pgtable_check(tx))
+ return 0;
+
+ return __hyp_check_page_state_range(addr, size,
+ PKVM_PAGE_SHARED_BORROWED);
+}
+
+static int hyp_complete_share(u64 addr, const struct pkvm_mem_transition *tx,
+ enum kvm_pgtable_prot perms)
+{
+ void *start = (void *)addr, *end = start + (tx->nr_pages * PAGE_SIZE);
+ enum kvm_pgtable_prot prot;
+
+ prot = pkvm_mkstate(perms, PKVM_PAGE_SHARED_BORROWED);
+ return pkvm_create_mappings_locked(start, end, prot);
+}
+
+static int hyp_complete_unshare(u64 addr, const struct pkvm_mem_transition *tx)
+{
+ u64 size = tx->nr_pages * PAGE_SIZE;
+ int ret = kvm_pgtable_hyp_unmap(&pkvm_pgtable, addr, size);
+
+ return (ret != size) ? -EFAULT : 0;
+}
+
+static int check_share(struct pkvm_mem_share *share)
+{
+ const struct pkvm_mem_transition *tx = &share->tx;
+ u64 completer_addr;
+ int ret;
+
+ switch (tx->initiator.id) {
+ case PKVM_ID_HOST:
+ ret = host_request_owned_transition(&completer_addr, tx);
+ break;
+ default:
+ ret = -EINVAL;
+ }
- ret = kvm_pgtable_get_leaf(&host_kvm.pgt, addr, &pte, NULL);
if (ret)
- goto unlock;
- if (!pte)
- goto map_shared;
+ return ret;
- /*
- * Check attributes in the host stage-2 PTE. We need the page to be:
- * - mapped RWX as we're sharing memory;
- * - not borrowed, as that implies absence of ownership.
- * Otherwise, we can't let it got through
- */
- cur = kvm_pgtable_stage2_pte_prot(pte);
- prot = pkvm_mkstate(0, PKVM_PAGE_SHARED_BORROWED);
- if (!check_prot(cur, PKVM_HOST_MEM_PROT, prot)) {
- ret = -EPERM;
- goto unlock;
+ switch (tx->completer.id) {
+ case PKVM_ID_HYP:
+ ret = hyp_ack_share(completer_addr, tx, share->completer_prot);
+ break;
+ default:
+ ret = -EINVAL;
}
- state = pkvm_getstate(cur);
- if (state == PKVM_PAGE_OWNED)
- goto map_shared;
+ return ret;
+}
- /*
- * Tolerate double-sharing the same page, but this requires
- * cross-checking the hypervisor stage-1.
- */
- if (state != PKVM_PAGE_SHARED_OWNED) {
- ret = -EPERM;
- goto unlock;
+static int __do_share(struct pkvm_mem_share *share)
+{
+ const struct pkvm_mem_transition *tx = &share->tx;
+ u64 completer_addr;
+ int ret;
+
+ switch (tx->initiator.id) {
+ case PKVM_ID_HOST:
+ ret = host_initiate_share(&completer_addr, tx);
+ break;
+ default:
+ ret = -EINVAL;
}
- ret = kvm_pgtable_get_leaf(&pkvm_pgtable, (u64)virt, &pte, NULL);
if (ret)
- goto unlock;
+ return ret;
- /*
- * If the page has been shared with the hypervisor, it must be
- * already mapped as SHARED_BORROWED in its stage-1.
- */
- cur = kvm_pgtable_hyp_pte_prot(pte);
- prot = pkvm_mkstate(PAGE_HYP, PKVM_PAGE_SHARED_BORROWED);
- if (!check_prot(cur, prot, ~prot))
- ret = -EPERM;
- goto unlock;
+ switch (tx->completer.id) {
+ case PKVM_ID_HYP:
+ ret = hyp_complete_share(completer_addr, tx, share->completer_prot);
+ break;
+ default:
+ ret = -EINVAL;
+ }
-map_shared:
- /*
- * If the page is not yet shared, adjust mappings in both page-tables
- * while both locks are held.
- */
- prot = pkvm_mkstate(PAGE_HYP, PKVM_PAGE_SHARED_BORROWED);
- ret = pkvm_create_mappings_locked(virt, virt + PAGE_SIZE, prot);
- BUG_ON(ret);
+ return ret;
+}
+
+/*
+ * do_share():
+ *
+ * The page owner grants access to another component with a given set
+ * of permissions.
+ *
+ * Initiator: OWNED => SHARED_OWNED
+ * Completer: NOPAGE => SHARED_BORROWED
+ */
+static int do_share(struct pkvm_mem_share *share)
+{
+ int ret;
- prot = pkvm_mkstate(PKVM_HOST_MEM_PROT, PKVM_PAGE_SHARED_OWNED);
- ret = host_stage2_idmap_locked(addr, PAGE_SIZE, prot);
- BUG_ON(ret);
+ ret = check_share(share);
+ if (ret)
+ return ret;
-unlock:
- hyp_spin_unlock(&pkvm_pgd_lock);
- hyp_spin_unlock(&host_kvm.lock);
+ return WARN_ON(__do_share(share));
+}
+
+static int check_unshare(struct pkvm_mem_share *share)
+{
+ const struct pkvm_mem_transition *tx = &share->tx;
+ u64 completer_addr;
+ int ret;
+
+ switch (tx->initiator.id) {
+ case PKVM_ID_HOST:
+ ret = host_request_unshare(&completer_addr, tx);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ if (ret)
+ return ret;
+
+ switch (tx->completer.id) {
+ case PKVM_ID_HYP:
+ ret = hyp_ack_unshare(completer_addr, tx);
+ break;
+ default:
+ ret = -EINVAL;
+ }
return ret;
}
-void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt)
+static int __do_unshare(struct pkvm_mem_share *share)
{
- struct kvm_vcpu_fault_info fault;
- u64 esr, addr;
- int ret = 0;
+ const struct pkvm_mem_transition *tx = &share->tx;
+ u64 completer_addr;
+ int ret;
- esr = read_sysreg_el2(SYS_ESR);
- BUG_ON(!__get_fault_info(esr, &fault));
+ switch (tx->initiator.id) {
+ case PKVM_ID_HOST:
+ ret = host_initiate_unshare(&completer_addr, tx);
+ break;
+ default:
+ ret = -EINVAL;
+ }
- addr = (fault.hpfar_el2 & HPFAR_MASK) << 8;
- ret = host_stage2_idmap(addr);
- BUG_ON(ret && ret != -EAGAIN);
+ if (ret)
+ return ret;
+
+ switch (tx->completer.id) {
+ case PKVM_ID_HYP:
+ ret = hyp_complete_unshare(completer_addr, tx);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+/*
+ * do_unshare():
+ *
+ * The page owner revokes access from another component for a range of
+ * pages which were previously shared using do_share().
+ *
+ * Initiator: SHARED_OWNED => OWNED
+ * Completer: SHARED_BORROWED => NOPAGE
+ */
+static int do_unshare(struct pkvm_mem_share *share)
+{
+ int ret;
+
+ ret = check_unshare(share);
+ if (ret)
+ return ret;
+
+ return WARN_ON(__do_unshare(share));
+}
+
+int __pkvm_host_share_hyp(u64 pfn)
+{
+ int ret;
+ u64 host_addr = hyp_pfn_to_phys(pfn);
+ u64 hyp_addr = (u64)__hyp_va(host_addr);
+ struct pkvm_mem_share share = {
+ .tx = {
+ .nr_pages = 1,
+ .initiator = {
+ .id = PKVM_ID_HOST,
+ .addr = host_addr,
+ .host = {
+ .completer_addr = hyp_addr,
+ },
+ },
+ .completer = {
+ .id = PKVM_ID_HYP,
+ },
+ },
+ .completer_prot = PAGE_HYP,
+ };
+
+ host_lock_component();
+ hyp_lock_component();
+
+ ret = do_share(&share);
+
+ hyp_unlock_component();
+ host_unlock_component();
+
+ return ret;
+}
+
+int __pkvm_host_unshare_hyp(u64 pfn)
+{
+ int ret;
+ u64 host_addr = hyp_pfn_to_phys(pfn);
+ u64 hyp_addr = (u64)__hyp_va(host_addr);
+ struct pkvm_mem_share share = {
+ .tx = {
+ .nr_pages = 1,
+ .initiator = {
+ .id = PKVM_ID_HOST,
+ .addr = host_addr,
+ .host = {
+ .completer_addr = hyp_addr,
+ },
+ },
+ .completer = {
+ .id = PKVM_ID_HYP,
+ },
+ },
+ .completer_prot = PAGE_HYP,
+ };
+
+ host_lock_component();
+ hyp_lock_component();
+
+ ret = do_unshare(&share);
+
+ hyp_unlock_component();
+ host_unlock_component();
+
+ return ret;
}
diff --git a/arch/arm64/kvm/hyp/nvhe/mm.c b/arch/arm64/kvm/hyp/nvhe/mm.c
index 2fabeceb889a..526a7d6fa86f 100644
--- a/arch/arm64/kvm/hyp/nvhe/mm.c
+++ b/arch/arm64/kvm/hyp/nvhe/mm.c
@@ -8,6 +8,7 @@
#include <asm/kvm_hyp.h>
#include <asm/kvm_mmu.h>
#include <asm/kvm_pgtable.h>
+#include <asm/kvm_pkvm.h>
#include <asm/spectre.h>
#include <nvhe/early_alloc.h>
@@ -18,11 +19,12 @@
struct kvm_pgtable pkvm_pgtable;
hyp_spinlock_t pkvm_pgd_lock;
-u64 __io_map_base;
struct memblock_region hyp_memory[HYP_MEMBLOCK_REGIONS];
unsigned int hyp_memblock_nr;
+static u64 __io_map_base;
+
static int __pkvm_create_mappings(unsigned long start, unsigned long size,
unsigned long phys, enum kvm_pgtable_prot prot)
{
diff --git a/arch/arm64/kvm/hyp/nvhe/page_alloc.c b/arch/arm64/kvm/hyp/nvhe/page_alloc.c
index 0bd7701ad1df..543cad6c376a 100644
--- a/arch/arm64/kvm/hyp/nvhe/page_alloc.c
+++ b/arch/arm64/kvm/hyp/nvhe/page_alloc.c
@@ -241,7 +241,7 @@ int hyp_pool_init(struct hyp_pool *pool, u64 pfn, unsigned int nr_pages,
int i;
hyp_spin_lock_init(&pool->lock);
- pool->max_order = min(MAX_ORDER, get_order(nr_pages << PAGE_SHIFT));
+ pool->max_order = min(MAX_ORDER, get_order((nr_pages + 1) << PAGE_SHIFT));
for (i = 0; i < pool->max_order; i++)
INIT_LIST_HEAD(&pool->free_area[i]);
pool->range_start = phys;
diff --git a/arch/arm64/kvm/hyp/nvhe/setup.c b/arch/arm64/kvm/hyp/nvhe/setup.c
index 578f71798c2e..27af337f9fea 100644
--- a/arch/arm64/kvm/hyp/nvhe/setup.c
+++ b/arch/arm64/kvm/hyp/nvhe/setup.c
@@ -8,6 +8,7 @@
#include <asm/kvm_hyp.h>
#include <asm/kvm_mmu.h>
#include <asm/kvm_pgtable.h>
+#include <asm/kvm_pkvm.h>
#include <nvhe/early_alloc.h>
#include <nvhe/fixed_config.h>
@@ -17,7 +18,6 @@
#include <nvhe/mm.h>
#include <nvhe/trap_handler.h>
-struct hyp_pool hpool;
unsigned long hyp_nr_cpus;
#define hyp_percpu_size ((unsigned long)__per_cpu_end - \
@@ -27,6 +27,7 @@ static void *vmemmap_base;
static void *hyp_pgt_base;
static void *host_s2_pgt_base;
static struct kvm_pgtable_mm_ops pkvm_pgtable_mm_ops;
+static struct hyp_pool hpool;
static int divide_memory_pool(void *virt, unsigned long size)
{
@@ -165,6 +166,7 @@ static int finalize_host_mappings_walker(u64 addr, u64 end, u32 level,
enum kvm_pgtable_walk_flags flag,
void * const arg)
{
+ struct kvm_pgtable_mm_ops *mm_ops = arg;
enum kvm_pgtable_prot prot;
enum pkvm_page_state state;
kvm_pte_t pte = *ptep;
@@ -173,6 +175,15 @@ static int finalize_host_mappings_walker(u64 addr, u64 end, u32 level,
if (!kvm_pte_valid(pte))
return 0;
+ /*
+ * Fix-up the refcount for the page-table pages as the early allocator
+ * was unable to access the hyp_vmemmap and so the buddy allocator has
+ * initialised the refcount to '1'.
+ */
+ mm_ops->get_page(ptep);
+ if (flag != KVM_PGTABLE_WALK_LEAF)
+ return 0;
+
if (level != (KVM_PGTABLE_MAX_LEVELS - 1))
return -EINVAL;
@@ -205,7 +216,8 @@ static int finalize_host_mappings(void)
{
struct kvm_pgtable_walker walker = {
.cb = finalize_host_mappings_walker,
- .flags = KVM_PGTABLE_WALK_LEAF,
+ .flags = KVM_PGTABLE_WALK_LEAF | KVM_PGTABLE_WALK_TABLE_POST,
+ .arg = pkvm_pgtable.mm_ops,
};
int i, ret;
@@ -240,19 +252,20 @@ void __noreturn __pkvm_init_finalise(void)
if (ret)
goto out;
- ret = finalize_host_mappings();
- if (ret)
- goto out;
-
pkvm_pgtable_mm_ops = (struct kvm_pgtable_mm_ops) {
.zalloc_page = hyp_zalloc_hyp_page,
.phys_to_virt = hyp_phys_to_virt,
.virt_to_phys = hyp_virt_to_phys,
.get_page = hpool_get_page,
.put_page = hpool_put_page,
+ .page_count = hyp_page_count,
};
pkvm_pgtable.mm_ops = &pkvm_pgtable_mm_ops;
+ ret = finalize_host_mappings();
+ if (ret)
+ goto out;
+
out:
/*
* We tail-called to here from handle___pkvm_init() and will not return,
diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c
index d13115a12434..6410d21d8695 100644
--- a/arch/arm64/kvm/hyp/nvhe/switch.c
+++ b/arch/arm64/kvm/hyp/nvhe/switch.c
@@ -25,7 +25,6 @@
#include <asm/fpsimd.h>
#include <asm/debug-monitors.h>
#include <asm/processor.h>
-#include <asm/thread_info.h>
#include <nvhe/fixed_config.h>
#include <nvhe/mem_protect.h>
diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c
index f8ceebe4982e..2cb3867eb7c2 100644
--- a/arch/arm64/kvm/hyp/pgtable.c
+++ b/arch/arm64/kvm/hyp/pgtable.c
@@ -383,21 +383,6 @@ enum kvm_pgtable_prot kvm_pgtable_hyp_pte_prot(kvm_pte_t pte)
return prot;
}
-static bool hyp_pte_needs_update(kvm_pte_t old, kvm_pte_t new)
-{
- /*
- * Tolerate KVM recreating the exact same mapping, or changing software
- * bits if the existing mapping was valid.
- */
- if (old == new)
- return false;
-
- if (!kvm_pte_valid(old))
- return true;
-
- return !WARN_ON((old ^ new) & ~KVM_PTE_LEAF_ATTR_HI_SW);
-}
-
static bool hyp_map_walker_try_leaf(u64 addr, u64 end, u32 level,
kvm_pte_t *ptep, struct hyp_map_data *data)
{
@@ -407,11 +392,16 @@ static bool hyp_map_walker_try_leaf(u64 addr, u64 end, u32 level,
if (!kvm_block_mapping_supported(addr, end, phys, level))
return false;
+ data->phys += granule;
new = kvm_init_valid_leaf_pte(phys, data->attr, level);
- if (hyp_pte_needs_update(old, new))
- smp_store_release(ptep, new);
+ if (old == new)
+ return true;
+ if (!kvm_pte_valid(old))
+ data->mm_ops->get_page(ptep);
+ else if (WARN_ON((old ^ new) & ~KVM_PTE_LEAF_ATTR_HI_SW))
+ return false;
- data->phys += granule;
+ smp_store_release(ptep, new);
return true;
}
@@ -433,6 +423,7 @@ static int hyp_map_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
return -ENOMEM;
kvm_set_table_pte(ptep, childp, mm_ops);
+ mm_ops->get_page(ptep);
return 0;
}
@@ -460,6 +451,69 @@ int kvm_pgtable_hyp_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys,
return ret;
}
+struct hyp_unmap_data {
+ u64 unmapped;
+ struct kvm_pgtable_mm_ops *mm_ops;
+};
+
+static int hyp_unmap_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
+ enum kvm_pgtable_walk_flags flag, void * const arg)
+{
+ kvm_pte_t pte = *ptep, *childp = NULL;
+ u64 granule = kvm_granule_size(level);
+ struct hyp_unmap_data *data = arg;
+ struct kvm_pgtable_mm_ops *mm_ops = data->mm_ops;
+
+ if (!kvm_pte_valid(pte))
+ return -EINVAL;
+
+ if (kvm_pte_table(pte, level)) {
+ childp = kvm_pte_follow(pte, mm_ops);
+
+ if (mm_ops->page_count(childp) != 1)
+ return 0;
+
+ kvm_clear_pte(ptep);
+ dsb(ishst);
+ __tlbi_level(vae2is, __TLBI_VADDR(addr, 0), level);
+ } else {
+ if (end - addr < granule)
+ return -EINVAL;
+
+ kvm_clear_pte(ptep);
+ dsb(ishst);
+ __tlbi_level(vale2is, __TLBI_VADDR(addr, 0), level);
+ data->unmapped += granule;
+ }
+
+ dsb(ish);
+ isb();
+ mm_ops->put_page(ptep);
+
+ if (childp)
+ mm_ops->put_page(childp);
+
+ return 0;
+}
+
+u64 kvm_pgtable_hyp_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size)
+{
+ struct hyp_unmap_data unmap_data = {
+ .mm_ops = pgt->mm_ops,
+ };
+ struct kvm_pgtable_walker walker = {
+ .cb = hyp_unmap_walker,
+ .arg = &unmap_data,
+ .flags = KVM_PGTABLE_WALK_LEAF | KVM_PGTABLE_WALK_TABLE_POST,
+ };
+
+ if (!pgt->mm_ops->page_count)
+ return 0;
+
+ kvm_pgtable_walk(pgt, addr, size, &walker);
+ return unmap_data.unmapped;
+}
+
int kvm_pgtable_hyp_init(struct kvm_pgtable *pgt, u32 va_bits,
struct kvm_pgtable_mm_ops *mm_ops)
{
@@ -482,8 +536,16 @@ static int hyp_free_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
enum kvm_pgtable_walk_flags flag, void * const arg)
{
struct kvm_pgtable_mm_ops *mm_ops = arg;
+ kvm_pte_t pte = *ptep;
+
+ if (!kvm_pte_valid(pte))
+ return 0;
+
+ mm_ops->put_page(ptep);
+
+ if (kvm_pte_table(pte, level))
+ mm_ops->put_page(kvm_pte_follow(pte, mm_ops));
- mm_ops->put_page((void *)kvm_pte_follow(*ptep, mm_ops));
return 0;
}
@@ -491,7 +553,7 @@ void kvm_pgtable_hyp_destroy(struct kvm_pgtable *pgt)
{
struct kvm_pgtable_walker walker = {
.cb = hyp_free_walker,
- .flags = KVM_PGTABLE_WALK_TABLE_POST,
+ .flags = KVM_PGTABLE_WALK_LEAF | KVM_PGTABLE_WALK_TABLE_POST,
.arg = pgt->mm_ops,
};
@@ -921,13 +983,9 @@ static int stage2_unmap_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
*/
stage2_put_pte(ptep, mmu, addr, level, mm_ops);
- if (need_flush) {
- kvm_pte_t *pte_follow = kvm_pte_follow(pte, mm_ops);
-
- dcache_clean_inval_poc((unsigned long)pte_follow,
- (unsigned long)pte_follow +
- kvm_granule_size(level));
- }
+ if (need_flush && mm_ops->dcache_clean_inval_poc)
+ mm_ops->dcache_clean_inval_poc(kvm_pte_follow(pte, mm_ops),
+ kvm_granule_size(level));
if (childp)
mm_ops->put_page(childp);
@@ -1089,15 +1147,13 @@ static int stage2_flush_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
struct kvm_pgtable *pgt = arg;
struct kvm_pgtable_mm_ops *mm_ops = pgt->mm_ops;
kvm_pte_t pte = *ptep;
- kvm_pte_t *pte_follow;
if (!kvm_pte_valid(pte) || !stage2_pte_cacheable(pgt, pte))
return 0;
- pte_follow = kvm_pte_follow(pte, mm_ops);
- dcache_clean_inval_poc((unsigned long)pte_follow,
- (unsigned long)pte_follow +
- kvm_granule_size(level));
+ if (mm_ops->dcache_clean_inval_poc)
+ mm_ops->dcache_clean_inval_poc(kvm_pte_follow(pte, mm_ops),
+ kvm_granule_size(level));
return 0;
}
@@ -1116,13 +1172,13 @@ int kvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size)
}
-int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_arch *arch,
+int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu,
struct kvm_pgtable_mm_ops *mm_ops,
enum kvm_pgtable_stage2_flags flags,
kvm_pgtable_force_pte_cb_t force_pte_cb)
{
size_t pgd_sz;
- u64 vtcr = arch->vtcr;
+ u64 vtcr = mmu->arch->vtcr;
u32 ia_bits = VTCR_EL2_IPA(vtcr);
u32 sl0 = FIELD_GET(VTCR_EL2_SL0_MASK, vtcr);
u32 start_level = VTCR_EL2_TGRAN_SL0_BASE - sl0;
@@ -1135,7 +1191,7 @@ int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_arch *arch,
pgt->ia_bits = ia_bits;
pgt->start_level = start_level;
pgt->mm_ops = mm_ops;
- pgt->mmu = &arch->mmu;
+ pgt->mmu = mmu;
pgt->flags = flags;
pgt->force_pte_cb = force_pte_cb;
diff --git a/arch/arm64/kvm/hyp/vgic-v3-sr.c b/arch/arm64/kvm/hyp/vgic-v3-sr.c
index 20db2f281cf2..4fb419f7b8b6 100644
--- a/arch/arm64/kvm/hyp/vgic-v3-sr.c
+++ b/arch/arm64/kvm/hyp/vgic-v3-sr.c
@@ -983,6 +983,9 @@ static void __vgic_v3_read_ctlr(struct kvm_vcpu *vcpu, u32 vmcr, int rt)
val = ((vtr >> 29) & 7) << ICC_CTLR_EL1_PRI_BITS_SHIFT;
/* IDbits */
val |= ((vtr >> 23) & 7) << ICC_CTLR_EL1_ID_BITS_SHIFT;
+ /* SEIS */
+ if (kvm_vgic_global_state.ich_vtr_el2 & ICH_VTR_SEIS_MASK)
+ val |= BIT(ICC_CTLR_EL1_SEIS_SHIFT);
/* A3V */
val |= ((vtr >> 21) & 1) << ICC_CTLR_EL1_A3V_SHIFT;
/* EOImode */
diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c
index fbb26b93c347..11d053fdd604 100644
--- a/arch/arm64/kvm/hyp/vhe/switch.c
+++ b/arch/arm64/kvm/hyp/vhe/switch.c
@@ -24,7 +24,6 @@
#include <asm/fpsimd.h>
#include <asm/debug-monitors.h>
#include <asm/processor.h>
-#include <asm/thread_info.h>
/* VHE specific context */
DEFINE_PER_CPU(struct kvm_host_data, kvm_host_data);
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 326cdfec74a1..bc2aba953299 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -210,13 +210,13 @@ static void stage2_flush_vm(struct kvm *kvm)
{
struct kvm_memslots *slots;
struct kvm_memory_slot *memslot;
- int idx;
+ int idx, bkt;
idx = srcu_read_lock(&kvm->srcu);
spin_lock(&kvm->mmu_lock);
slots = kvm_memslots(kvm);
- kvm_for_each_memslot(memslot, slots)
+ kvm_for_each_memslot(memslot, bkt, slots)
stage2_flush_memslot(kvm, memslot);
spin_unlock(&kvm->mmu_lock);
@@ -239,6 +239,9 @@ void free_hyp_pgds(void)
static bool kvm_host_owns_hyp_mappings(void)
{
+ if (is_kernel_in_hyp_mode())
+ return false;
+
if (static_branch_likely(&kvm_protected_mode_initialized))
return false;
@@ -281,14 +284,117 @@ static phys_addr_t kvm_kaddr_to_phys(void *kaddr)
}
}
-static int pkvm_share_hyp(phys_addr_t start, phys_addr_t end)
+struct hyp_shared_pfn {
+ u64 pfn;
+ int count;
+ struct rb_node node;
+};
+
+static DEFINE_MUTEX(hyp_shared_pfns_lock);
+static struct rb_root hyp_shared_pfns = RB_ROOT;
+
+static struct hyp_shared_pfn *find_shared_pfn(u64 pfn, struct rb_node ***node,
+ struct rb_node **parent)
{
- phys_addr_t addr;
+ struct hyp_shared_pfn *this;
+
+ *node = &hyp_shared_pfns.rb_node;
+ *parent = NULL;
+ while (**node) {
+ this = container_of(**node, struct hyp_shared_pfn, node);
+ *parent = **node;
+ if (this->pfn < pfn)
+ *node = &((**node)->rb_left);
+ else if (this->pfn > pfn)
+ *node = &((**node)->rb_right);
+ else
+ return this;
+ }
+
+ return NULL;
+}
+
+static int share_pfn_hyp(u64 pfn)
+{
+ struct rb_node **node, *parent;
+ struct hyp_shared_pfn *this;
+ int ret = 0;
+
+ mutex_lock(&hyp_shared_pfns_lock);
+ this = find_shared_pfn(pfn, &node, &parent);
+ if (this) {
+ this->count++;
+ goto unlock;
+ }
+
+ this = kzalloc(sizeof(*this), GFP_KERNEL);
+ if (!this) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+
+ this->pfn = pfn;
+ this->count = 1;
+ rb_link_node(&this->node, parent, node);
+ rb_insert_color(&this->node, &hyp_shared_pfns);
+ ret = kvm_call_hyp_nvhe(__pkvm_host_share_hyp, pfn, 1);
+unlock:
+ mutex_unlock(&hyp_shared_pfns_lock);
+
+ return ret;
+}
+
+static int unshare_pfn_hyp(u64 pfn)
+{
+ struct rb_node **node, *parent;
+ struct hyp_shared_pfn *this;
+ int ret = 0;
+
+ mutex_lock(&hyp_shared_pfns_lock);
+ this = find_shared_pfn(pfn, &node, &parent);
+ if (WARN_ON(!this)) {
+ ret = -ENOENT;
+ goto unlock;
+ }
+
+ this->count--;
+ if (this->count)
+ goto unlock;
+
+ rb_erase(&this->node, &hyp_shared_pfns);
+ kfree(this);
+ ret = kvm_call_hyp_nvhe(__pkvm_host_unshare_hyp, pfn, 1);
+unlock:
+ mutex_unlock(&hyp_shared_pfns_lock);
+
+ return ret;
+}
+
+int kvm_share_hyp(void *from, void *to)
+{
+ phys_addr_t start, end, cur;
+ u64 pfn;
int ret;
- for (addr = ALIGN_DOWN(start, PAGE_SIZE); addr < end; addr += PAGE_SIZE) {
- ret = kvm_call_hyp_nvhe(__pkvm_host_share_hyp,
- __phys_to_pfn(addr));
+ if (is_kernel_in_hyp_mode())
+ return 0;
+
+ /*
+ * The share hcall maps things in the 'fixed-offset' region of the hyp
+ * VA space, so we can only share physically contiguous data-structures
+ * for now.
+ */
+ if (is_vmalloc_or_module_addr(from) || is_vmalloc_or_module_addr(to))
+ return -EINVAL;
+
+ if (kvm_host_owns_hyp_mappings())
+ return create_hyp_mappings(from, to, PAGE_HYP);
+
+ start = ALIGN_DOWN(__pa(from), PAGE_SIZE);
+ end = PAGE_ALIGN(__pa(to));
+ for (cur = start; cur < end; cur += PAGE_SIZE) {
+ pfn = __phys_to_pfn(cur);
+ ret = share_pfn_hyp(pfn);
if (ret)
return ret;
}
@@ -296,6 +402,22 @@ static int pkvm_share_hyp(phys_addr_t start, phys_addr_t end)
return 0;
}
+void kvm_unshare_hyp(void *from, void *to)
+{
+ phys_addr_t start, end, cur;
+ u64 pfn;
+
+ if (is_kernel_in_hyp_mode() || kvm_host_owns_hyp_mappings() || !from)
+ return;
+
+ start = ALIGN_DOWN(__pa(from), PAGE_SIZE);
+ end = PAGE_ALIGN(__pa(to));
+ for (cur = start; cur < end; cur += PAGE_SIZE) {
+ pfn = __phys_to_pfn(cur);
+ WARN_ON(unshare_pfn_hyp(pfn));
+ }
+}
+
/**
* create_hyp_mappings - duplicate a kernel virtual address range in Hyp mode
* @from: The virtual kernel start address of the range
@@ -316,12 +438,8 @@ int create_hyp_mappings(void *from, void *to, enum kvm_pgtable_prot prot)
if (is_kernel_in_hyp_mode())
return 0;
- if (!kvm_host_owns_hyp_mappings()) {
- if (WARN_ON(prot != PAGE_HYP))
- return -EPERM;
- return pkvm_share_hyp(kvm_kaddr_to_phys(from),
- kvm_kaddr_to_phys(to));
- }
+ if (!kvm_host_owns_hyp_mappings())
+ return -EPERM;
start = start & PAGE_MASK;
end = PAGE_ALIGN(end);
@@ -407,6 +525,9 @@ int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size,
unsigned long addr;
int ret;
+ if (is_protected_kvm_enabled())
+ return -EPERM;
+
*kaddr = ioremap(phys_addr, size);
if (!*kaddr)
return -ENOMEM;
@@ -516,7 +637,8 @@ int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu)
if (!pgt)
return -ENOMEM;
- err = kvm_pgtable_stage2_init(pgt, &kvm->arch, &kvm_s2_mm_ops);
+ mmu->arch = &kvm->arch;
+ err = kvm_pgtable_stage2_init(pgt, mmu, &kvm_s2_mm_ops);
if (err)
goto out_free_pgtable;
@@ -529,7 +651,6 @@ int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu)
for_each_possible_cpu(cpu)
*per_cpu_ptr(mmu->last_vcpu_ran, cpu) = -1;
- mmu->arch = &kvm->arch;
mmu->pgt = pgt;
mmu->pgd_phys = __pa(pgt->pgd);
WRITE_ONCE(mmu->vmid.vmid_gen, 0);
@@ -595,14 +716,14 @@ void stage2_unmap_vm(struct kvm *kvm)
{
struct kvm_memslots *slots;
struct kvm_memory_slot *memslot;
- int idx;
+ int idx, bkt;
idx = srcu_read_lock(&kvm->srcu);
mmap_read_lock(current->mm);
spin_lock(&kvm->mmu_lock);
slots = kvm_memslots(kvm);
- kvm_for_each_memslot(memslot, slots)
+ kvm_for_each_memslot(memslot, bkt, slots)
stage2_unmap_memslot(kvm, memslot);
spin_unlock(&kvm->mmu_lock);
@@ -650,6 +771,9 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
KVM_PGTABLE_PROT_R |
(writable ? KVM_PGTABLE_PROT_W : 0);
+ if (is_protected_kvm_enabled())
+ return -EPERM;
+
size += offset_in_page(guest_ipa);
guest_ipa &= PAGE_MASK;
@@ -1463,7 +1587,6 @@ out:
}
void kvm_arch_commit_memory_region(struct kvm *kvm,
- const struct kvm_userspace_memory_region *mem,
struct kvm_memory_slot *old,
const struct kvm_memory_slot *new,
enum kvm_mr_change change)
@@ -1473,25 +1596,24 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
* allocated dirty_bitmap[], dirty pages will be tracked while the
* memory slot is write protected.
*/
- if (change != KVM_MR_DELETE && mem->flags & KVM_MEM_LOG_DIRTY_PAGES) {
+ if (change != KVM_MR_DELETE && new->flags & KVM_MEM_LOG_DIRTY_PAGES) {
/*
* If we're with initial-all-set, we don't need to write
* protect any pages because they're all reported as dirty.
* Huge pages and normal pages will be write protect gradually.
*/
if (!kvm_dirty_log_manual_protect_and_init_set(kvm)) {
- kvm_mmu_wp_memory_region(kvm, mem->slot);
+ kvm_mmu_wp_memory_region(kvm, new->id);
}
}
}
int kvm_arch_prepare_memory_region(struct kvm *kvm,
- struct kvm_memory_slot *memslot,
- const struct kvm_userspace_memory_region *mem,
+ const struct kvm_memory_slot *old,
+ struct kvm_memory_slot *new,
enum kvm_mr_change change)
{
- hva_t hva = mem->userspace_addr;
- hva_t reg_end = hva + mem->memory_size;
+ hva_t hva, reg_end;
int ret = 0;
if (change != KVM_MR_CREATE && change != KVM_MR_MOVE &&
@@ -1502,9 +1624,12 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
* Prevent userspace from creating a memory region outside of the IPA
* space addressable by the KVM guest IPA space.
*/
- if ((memslot->base_gfn + memslot->npages) > (kvm_phys_size(kvm) >> PAGE_SHIFT))
+ if ((new->base_gfn + new->npages) > (kvm_phys_size(kvm) >> PAGE_SHIFT))
return -EFAULT;
+ hva = new->userspace_addr;
+ reg_end = hva + (new->npages << PAGE_SHIFT);
+
mmap_read_lock(current->mm);
/*
* A memory region could potentially cover multiple VMAs, and any holes
@@ -1536,7 +1661,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
if (vma->vm_flags & VM_PFNMAP) {
/* IO region dirty page logging not allowed */
- if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES) {
+ if (new->flags & KVM_MEM_LOG_DIRTY_PAGES) {
ret = -EINVAL;
break;
}
diff --git a/arch/arm64/kvm/perf.c b/arch/arm64/kvm/perf.c
deleted file mode 100644
index c84fe24b2ea1..000000000000
--- a/arch/arm64/kvm/perf.c
+++ /dev/null
@@ -1,59 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Based on the x86 implementation.
- *
- * Copyright (C) 2012 ARM Ltd.
- * Author: Marc Zyngier <marc.zyngier@arm.com>
- */
-
-#include <linux/perf_event.h>
-#include <linux/kvm_host.h>
-
-#include <asm/kvm_emulate.h>
-
-DEFINE_STATIC_KEY_FALSE(kvm_arm_pmu_available);
-
-static int kvm_is_in_guest(void)
-{
- return kvm_get_running_vcpu() != NULL;
-}
-
-static int kvm_is_user_mode(void)
-{
- struct kvm_vcpu *vcpu;
-
- vcpu = kvm_get_running_vcpu();
-
- if (vcpu)
- return !vcpu_mode_priv(vcpu);
-
- return 0;
-}
-
-static unsigned long kvm_get_guest_ip(void)
-{
- struct kvm_vcpu *vcpu;
-
- vcpu = kvm_get_running_vcpu();
-
- if (vcpu)
- return *vcpu_pc(vcpu);
-
- return 0;
-}
-
-static struct perf_guest_info_callbacks kvm_guest_cbs = {
- .is_in_guest = kvm_is_in_guest,
- .is_user_mode = kvm_is_user_mode,
- .get_guest_ip = kvm_get_guest_ip,
-};
-
-int kvm_perf_init(void)
-{
- return perf_register_guest_info_callbacks(&kvm_guest_cbs);
-}
-
-int kvm_perf_teardown(void)
-{
- return perf_unregister_guest_info_callbacks(&kvm_guest_cbs);
-}
diff --git a/arch/arm64/kvm/hyp/reserved_mem.c b/arch/arm64/kvm/pkvm.c
index 578670e3f608..ebecb7c045f4 100644
--- a/arch/arm64/kvm/hyp/reserved_mem.c
+++ b/arch/arm64/kvm/pkvm.c
@@ -8,10 +8,9 @@
#include <linux/memblock.h>
#include <linux/sort.h>
-#include <asm/kvm_host.h>
+#include <asm/kvm_pkvm.h>
-#include <nvhe/memory.h>
-#include <nvhe/mm.h>
+#include "hyp_constants.h"
static struct memblock_region *hyp_memory = kvm_nvhe_sym(hyp_memory);
static unsigned int *hyp_memblock_nr_ptr = &kvm_nvhe_sym(hyp_memblock_nr);
@@ -82,7 +81,8 @@ void __init kvm_hyp_reserve(void)
do {
prev = nr_pages;
nr_pages = hyp_mem_pages + prev;
- nr_pages = DIV_ROUND_UP(nr_pages * sizeof(struct hyp_page), PAGE_SIZE);
+ nr_pages = DIV_ROUND_UP(nr_pages * STRUCT_HYP_PAGE_SIZE,
+ PAGE_SIZE);
nr_pages += __hyp_pgtable_max_pages(nr_pages);
} while (nr_pages != prev);
hyp_mem_pages += nr_pages;
diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c
index a5e4bbf5e68f..fbcfd4ec6f92 100644
--- a/arch/arm64/kvm/pmu-emul.c
+++ b/arch/arm64/kvm/pmu-emul.c
@@ -14,6 +14,8 @@
#include <kvm/arm_pmu.h>
#include <kvm/arm_vgic.h>
+DEFINE_STATIC_KEY_FALSE(kvm_arm_pmu_available);
+
static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx);
static void kvm_pmu_update_pmc_chained(struct kvm_vcpu *vcpu, u64 select_idx);
static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc);
@@ -28,6 +30,7 @@ static u32 kvm_pmu_event_mask(struct kvm *kvm)
case ID_AA64DFR0_PMUVER_8_1:
case ID_AA64DFR0_PMUVER_8_4:
case ID_AA64DFR0_PMUVER_8_5:
+ case ID_AA64DFR0_PMUVER_8_7:
return GENMASK(15, 0);
default: /* Shouldn't be here, just for sanity */
WARN_ONCE(1, "Unknown PMU version %d\n", kvm->arch.pmuver);
@@ -900,7 +903,7 @@ static int kvm_arm_pmu_v3_init(struct kvm_vcpu *vcpu)
*/
static bool pmu_irq_is_valid(struct kvm *kvm, int irq)
{
- int i;
+ unsigned long i;
struct kvm_vcpu *vcpu;
kvm_for_each_vcpu(i, vcpu, kvm) {
diff --git a/arch/arm64/kvm/psci.c b/arch/arm64/kvm/psci.c
index 74c47d420253..3eae32876897 100644
--- a/arch/arm64/kvm/psci.c
+++ b/arch/arm64/kvm/psci.c
@@ -46,7 +46,7 @@ static unsigned long kvm_psci_vcpu_suspend(struct kvm_vcpu *vcpu)
* specification (ARM DEN 0022A). This means all suspend states
* for KVM will preserve the register state.
*/
- kvm_vcpu_block(vcpu);
+ kvm_vcpu_halt(vcpu);
kvm_clear_request(KVM_REQ_UNHALT, vcpu);
return PSCI_RET_SUCCESS;
@@ -109,7 +109,7 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
/*
* Make sure the reset request is observed if the change to
- * power_state is observed.
+ * power_off is observed.
*/
smp_wmb();
@@ -121,8 +121,8 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu)
{
- int i, matching_cpus = 0;
- unsigned long mpidr;
+ int matching_cpus = 0;
+ unsigned long i, mpidr;
unsigned long target_affinity;
unsigned long target_affinity_mask;
unsigned long lowest_affinity_level;
@@ -164,7 +164,7 @@ static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu)
static void kvm_prepare_system_event(struct kvm_vcpu *vcpu, u32 type)
{
- int i;
+ unsigned long i;
struct kvm_vcpu *tmp;
/*
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
index 27386f0d81e4..ecc40c8cd6f6 100644
--- a/arch/arm64/kvm/reset.c
+++ b/arch/arm64/kvm/reset.c
@@ -94,22 +94,31 @@ static int kvm_vcpu_finalize_sve(struct kvm_vcpu *vcpu)
{
void *buf;
unsigned int vl;
+ size_t reg_sz;
+ int ret;
vl = vcpu->arch.sve_max_vl;
/*
* Responsibility for these properties is shared between
- * kvm_arm_init_arch_resources(), kvm_vcpu_enable_sve() and
+ * kvm_arm_init_sve(), kvm_vcpu_enable_sve() and
* set_sve_vls(). Double-check here just to be sure:
*/
if (WARN_ON(!sve_vl_valid(vl) || vl > sve_max_virtualisable_vl() ||
vl > VL_ARCH_MAX))
return -EIO;
- buf = kzalloc(SVE_SIG_REGS_SIZE(sve_vq_from_vl(vl)), GFP_KERNEL_ACCOUNT);
+ reg_sz = vcpu_sve_state_size(vcpu);
+ buf = kzalloc(reg_sz, GFP_KERNEL_ACCOUNT);
if (!buf)
return -ENOMEM;
+ ret = kvm_share_hyp(buf, buf + reg_sz);
+ if (ret) {
+ kfree(buf);
+ return ret;
+ }
+
vcpu->arch.sve_state = buf;
vcpu->arch.flags |= KVM_ARM64_VCPU_SVE_FINALIZED;
return 0;
@@ -141,7 +150,13 @@ bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu)
void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu)
{
- kfree(vcpu->arch.sve_state);
+ void *sve_state = vcpu->arch.sve_state;
+
+ kvm_vcpu_unshare_task_fp(vcpu);
+ kvm_unshare_hyp(vcpu, vcpu + 1);
+ if (sve_state)
+ kvm_unshare_hyp(sve_state, sve_state + vcpu_sve_state_size(vcpu));
+ kfree(sve_state);
}
static void kvm_vcpu_reset_sve(struct kvm_vcpu *vcpu)
@@ -170,7 +185,7 @@ static bool vcpu_allowed_register_width(struct kvm_vcpu *vcpu)
{
struct kvm_vcpu *tmp;
bool is32bit;
- int i;
+ unsigned long i;
is32bit = vcpu_has_feature(vcpu, KVM_ARM_VCPU_EL1_32BIT);
if (!cpus_have_const_cap(ARM64_HAS_32BIT_EL1) && is32bit)
@@ -193,10 +208,9 @@ static bool vcpu_allowed_register_width(struct kvm_vcpu *vcpu)
* kvm_reset_vcpu - sets core registers and sys_regs to reset value
* @vcpu: The VCPU pointer
*
- * This function finds the right table above and sets the registers on
- * the virtual CPU struct to their architecturally defined reset
- * values, except for registers whose reset is deferred until
- * kvm_arm_vcpu_finalize().
+ * This function sets the registers on the virtual CPU struct to their
+ * architecturally defined reset values, except for registers whose reset is
+ * deferred until kvm_arm_vcpu_finalize().
*
* Note: This function can be called from two paths: The KVM_ARM_VCPU_INIT
* ioctl or as part of handling a request issued by another VCPU in the PSCI
diff --git a/arch/arm64/kvm/vgic/vgic-init.c b/arch/arm64/kvm/vgic/vgic-init.c
index 0a06d0648970..fc00304fe7d8 100644
--- a/arch/arm64/kvm/vgic/vgic-init.c
+++ b/arch/arm64/kvm/vgic/vgic-init.c
@@ -70,8 +70,9 @@ void kvm_vgic_early_init(struct kvm *kvm)
*/
int kvm_vgic_create(struct kvm *kvm, u32 type)
{
- int i, ret;
struct kvm_vcpu *vcpu;
+ unsigned long i;
+ int ret;
if (irqchip_in_kernel(kvm))
return -EEXIST;
@@ -91,7 +92,7 @@ int kvm_vgic_create(struct kvm *kvm, u32 type)
return ret;
kvm_for_each_vcpu(i, vcpu, kvm) {
- if (vcpu->arch.has_run_once)
+ if (vcpu_has_run_once(vcpu))
goto out_unlock;
}
ret = 0;
@@ -255,7 +256,8 @@ int vgic_init(struct kvm *kvm)
{
struct vgic_dist *dist = &kvm->arch.vgic;
struct kvm_vcpu *vcpu;
- int ret = 0, i, idx;
+ int ret = 0, i;
+ unsigned long idx;
if (vgic_initialized(kvm))
return 0;
@@ -308,7 +310,7 @@ int vgic_init(struct kvm *kvm)
goto out;
}
- kvm_for_each_vcpu(i, vcpu, kvm)
+ kvm_for_each_vcpu(idx, vcpu, kvm)
kvm_vgic_vcpu_enable(vcpu);
ret = kvm_vgic_setup_default_irq_routing(kvm);
@@ -370,7 +372,7 @@ void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
static void __kvm_vgic_destroy(struct kvm *kvm)
{
struct kvm_vcpu *vcpu;
- int i;
+ unsigned long i;
vgic_debug_destroy(kvm);
diff --git a/arch/arm64/kvm/vgic/vgic-kvm-device.c b/arch/arm64/kvm/vgic/vgic-kvm-device.c
index 0d000d2fe8d2..c6d52a1fd9c8 100644
--- a/arch/arm64/kvm/vgic/vgic-kvm-device.c
+++ b/arch/arm64/kvm/vgic/vgic-kvm-device.c
@@ -325,7 +325,7 @@ void unlock_all_vcpus(struct kvm *kvm)
bool lock_all_vcpus(struct kvm *kvm)
{
struct kvm_vcpu *tmp_vcpu;
- int c;
+ unsigned long c;
/*
* Any time a vcpu is run, vcpu_load is called which tries to grab the
diff --git a/arch/arm64/kvm/vgic/vgic-mmio-v2.c b/arch/arm64/kvm/vgic/vgic-mmio-v2.c
index 5f9014ae595b..12e4c223e6b8 100644
--- a/arch/arm64/kvm/vgic/vgic-mmio-v2.c
+++ b/arch/arm64/kvm/vgic/vgic-mmio-v2.c
@@ -113,9 +113,8 @@ static void vgic_mmio_write_sgir(struct kvm_vcpu *source_vcpu,
int intid = val & 0xf;
int targets = (val >> 16) & 0xff;
int mode = (val >> 24) & 0x03;
- int c;
struct kvm_vcpu *vcpu;
- unsigned long flags;
+ unsigned long flags, c;
switch (mode) {
case 0x0: /* as specified by targets */
diff --git a/arch/arm64/kvm/vgic/vgic-mmio-v3.c b/arch/arm64/kvm/vgic/vgic-mmio-v3.c
index bf7ec4a78497..58e40b4874f8 100644
--- a/arch/arm64/kvm/vgic/vgic-mmio-v3.c
+++ b/arch/arm64/kvm/vgic/vgic-mmio-v3.c
@@ -754,7 +754,8 @@ static void vgic_unregister_redist_iodev(struct kvm_vcpu *vcpu)
static int vgic_register_all_redist_iodevs(struct kvm *kvm)
{
struct kvm_vcpu *vcpu;
- int c, ret = 0;
+ unsigned long c;
+ int ret = 0;
kvm_for_each_vcpu(c, vcpu, kvm) {
ret = vgic_register_redist_iodev(vcpu);
@@ -763,10 +764,12 @@ static int vgic_register_all_redist_iodevs(struct kvm *kvm)
}
if (ret) {
- /* The current c failed, so we start with the previous one. */
+ /* The current c failed, so iterate over the previous ones. */
+ int i;
+
mutex_lock(&kvm->slots_lock);
- for (c--; c >= 0; c--) {
- vcpu = kvm_get_vcpu(kvm, c);
+ for (i = 0; i < c; i++) {
+ vcpu = kvm_get_vcpu(kvm, i);
vgic_unregister_redist_iodev(vcpu);
}
mutex_unlock(&kvm->slots_lock);
@@ -995,10 +998,10 @@ void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg, bool allow_group1)
struct kvm_vcpu *c_vcpu;
u16 target_cpus;
u64 mpidr;
- int sgi, c;
+ int sgi;
int vcpu_id = vcpu->vcpu_id;
bool broadcast;
- unsigned long flags;
+ unsigned long c, flags;
sgi = (reg & ICC_SGI1R_SGI_ID_MASK) >> ICC_SGI1R_SGI_ID_SHIFT;
broadcast = reg & BIT_ULL(ICC_SGI1R_IRQ_ROUTING_MODE_BIT);
diff --git a/arch/arm64/kvm/vgic/vgic-mmio.c b/arch/arm64/kvm/vgic/vgic-mmio.c
index 48c6067fc5ec..49837d3a3ef5 100644
--- a/arch/arm64/kvm/vgic/vgic-mmio.c
+++ b/arch/arm64/kvm/vgic/vgic-mmio.c
@@ -248,6 +248,8 @@ unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
IRQCHIP_STATE_PENDING,
&val);
WARN_RATELIMIT(err, "IRQ %d", irq->host_irq);
+ } else if (vgic_irq_is_mapped_level(irq)) {
+ val = vgic_get_phys_line_level(irq);
} else {
val = irq_is_pending(irq);
}
@@ -1050,7 +1052,7 @@ static int dispatch_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
return 0;
}
-struct kvm_io_device_ops kvm_io_gic_ops = {
+const struct kvm_io_device_ops kvm_io_gic_ops = {
.read = dispatch_mmio_read,
.write = dispatch_mmio_write,
};
diff --git a/arch/arm64/kvm/vgic/vgic-mmio.h b/arch/arm64/kvm/vgic/vgic-mmio.h
index fefcca2b14dc..3fa696f198a3 100644
--- a/arch/arm64/kvm/vgic/vgic-mmio.h
+++ b/arch/arm64/kvm/vgic/vgic-mmio.h
@@ -34,7 +34,7 @@ struct vgic_register_region {
};
};
-extern struct kvm_io_device_ops kvm_io_gic_ops;
+extern const struct kvm_io_device_ops kvm_io_gic_ops;
#define VGIC_ACCESS_8bit 1
#define VGIC_ACCESS_32bit 2
diff --git a/arch/arm64/kvm/vgic/vgic-v2.c b/arch/arm64/kvm/vgic/vgic-v2.c
index 95a18cec14a3..645648349c99 100644
--- a/arch/arm64/kvm/vgic/vgic-v2.c
+++ b/arch/arm64/kvm/vgic/vgic-v2.c
@@ -293,12 +293,12 @@ int vgic_v2_map_resources(struct kvm *kvm)
if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base) ||
IS_VGIC_ADDR_UNDEF(dist->vgic_cpu_base)) {
- kvm_err("Need to set vgic cpu and dist addresses first\n");
+ kvm_debug("Need to set vgic cpu and dist addresses first\n");
return -ENXIO;
}
if (!vgic_v2_check_base(dist->vgic_dist_base, dist->vgic_cpu_base)) {
- kvm_err("VGIC CPU and dist frames overlap\n");
+ kvm_debug("VGIC CPU and dist frames overlap\n");
return -EINVAL;
}
@@ -345,6 +345,11 @@ int vgic_v2_probe(const struct gic_kvm_info *info)
int ret;
u32 vtr;
+ if (is_protected_kvm_enabled()) {
+ kvm_err("GICv2 not supported in protected mode\n");
+ return -ENXIO;
+ }
+
if (!info->vctrl.start) {
kvm_err("GICH not present in the firmware table\n");
return -ENXIO;
diff --git a/arch/arm64/kvm/vgic/vgic-v3.c b/arch/arm64/kvm/vgic/vgic-v3.c
index 04f62c4b07fb..b549af8b1dc2 100644
--- a/arch/arm64/kvm/vgic/vgic-v3.c
+++ b/arch/arm64/kvm/vgic/vgic-v3.c
@@ -542,24 +542,24 @@ int vgic_v3_map_resources(struct kvm *kvm)
struct vgic_dist *dist = &kvm->arch.vgic;
struct kvm_vcpu *vcpu;
int ret = 0;
- int c;
+ unsigned long c;
kvm_for_each_vcpu(c, vcpu, kvm) {
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
if (IS_VGIC_ADDR_UNDEF(vgic_cpu->rd_iodev.base_addr)) {
- kvm_debug("vcpu %d redistributor base not set\n", c);
+ kvm_debug("vcpu %ld redistributor base not set\n", c);
return -ENXIO;
}
}
if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base)) {
- kvm_err("Need to set vgic distributor addresses first\n");
+ kvm_debug("Need to set vgic distributor addresses first\n");
return -ENXIO;
}
if (!vgic_v3_check_base(kvm)) {
- kvm_err("VGIC redist and dist frames overlap\n");
+ kvm_debug("VGIC redist and dist frames overlap\n");
return -EINVAL;
}
@@ -609,6 +609,18 @@ static int __init early_gicv4_enable(char *buf)
}
early_param("kvm-arm.vgic_v4_enable", early_gicv4_enable);
+static const struct midr_range broken_seis[] = {
+ MIDR_ALL_VERSIONS(MIDR_APPLE_M1_ICESTORM),
+ MIDR_ALL_VERSIONS(MIDR_APPLE_M1_FIRESTORM),
+ {},
+};
+
+static bool vgic_v3_broken_seis(void)
+{
+ return ((kvm_vgic_global_state.ich_vtr_el2 & ICH_VTR_SEIS_MASK) &&
+ is_midr_in_range_list(read_cpuid_id(), broken_seis));
+}
+
/**
* vgic_v3_probe - probe for a VGICv3 compatible interrupt controller
* @info: pointer to the GIC description
@@ -651,7 +663,7 @@ int vgic_v3_probe(const struct gic_kvm_info *info)
} else if (!PAGE_ALIGNED(info->vcpu.start)) {
pr_warn("GICV physical address 0x%llx not page aligned\n",
(unsigned long long)info->vcpu.start);
- } else {
+ } else if (kvm_get_mode() != KVM_MODE_PROTECTED) {
kvm_vgic_global_state.vcpu_base = info->vcpu.start;
kvm_vgic_global_state.can_emulate_gicv2 = true;
ret = kvm_register_vgic_device(KVM_DEV_TYPE_ARM_VGIC_V2);
@@ -676,9 +688,10 @@ int vgic_v3_probe(const struct gic_kvm_info *info)
group1_trap = true;
}
- if (kvm_vgic_global_state.ich_vtr_el2 & ICH_VTR_SEIS_MASK) {
- kvm_info("GICv3 with locally generated SEI\n");
+ if (vgic_v3_broken_seis()) {
+ kvm_info("GICv3 with broken locally generated SEI\n");
+ kvm_vgic_global_state.ich_vtr_el2 &= ~ICH_VTR_SEIS_MASK;
group0_trap = true;
group1_trap = true;
if (ich_vtr_el2 & ICH_VTR_TDS_MASK)
diff --git a/arch/arm64/kvm/vgic/vgic-v4.c b/arch/arm64/kvm/vgic/vgic-v4.c
index 772dd15a22c7..ad06ba6c9b00 100644
--- a/arch/arm64/kvm/vgic/vgic-v4.c
+++ b/arch/arm64/kvm/vgic/vgic-v4.c
@@ -189,7 +189,7 @@ void vgic_v4_configure_vsgis(struct kvm *kvm)
{
struct vgic_dist *dist = &kvm->arch.vgic;
struct kvm_vcpu *vcpu;
- int i;
+ unsigned long i;
kvm_arm_halt_guest(kvm);
@@ -235,7 +235,8 @@ int vgic_v4_init(struct kvm *kvm)
{
struct vgic_dist *dist = &kvm->arch.vgic;
struct kvm_vcpu *vcpu;
- int i, nr_vcpus, ret;
+ int nr_vcpus, ret;
+ unsigned long i;
if (!kvm_vgic_global_state.has_gicv4)
return 0; /* Nothing to see here... move along. */
diff --git a/arch/arm64/kvm/vgic/vgic.c b/arch/arm64/kvm/vgic/vgic.c
index 5dad4996cfb2..9b98876a8a93 100644
--- a/arch/arm64/kvm/vgic/vgic.c
+++ b/arch/arm64/kvm/vgic/vgic.c
@@ -990,7 +990,7 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
void vgic_kick_vcpus(struct kvm *kvm)
{
struct kvm_vcpu *vcpu;
- int c;
+ unsigned long c;
/*
* We've injected an interrupt, time to find out who deserves
diff --git a/arch/arm64/mm/extable.c b/arch/arm64/mm/extable.c
index c0181e60cc98..489455309695 100644
--- a/arch/arm64/mm/extable.c
+++ b/arch/arm64/mm/extable.c
@@ -40,8 +40,8 @@ static bool
ex_handler_load_unaligned_zeropad(const struct exception_table_entry *ex,
struct pt_regs *regs)
{
- int reg_data = FIELD_GET(EX_DATA_REG_DATA, ex->type);
- int reg_addr = FIELD_GET(EX_DATA_REG_ADDR, ex->type);
+ int reg_data = FIELD_GET(EX_DATA_REG_DATA, ex->data);
+ int reg_addr = FIELD_GET(EX_DATA_REG_ADDR, ex->data);
unsigned long data, addr, offset;
addr = pt_regs_read_reg(regs, reg_addr);
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 9a9e7675b187..77341b160aca 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -304,7 +304,7 @@ static void die_kernel_fault(const char *msg, unsigned long addr,
show_pte(addr);
die("Oops", regs, esr);
bust_spinlocks(0);
- do_exit(SIGKILL);
+ make_task_dead(SIGKILL);
}
#ifdef CONFIG_KASAN_HW_TAGS
@@ -608,10 +608,8 @@ retry:
}
if (fault & VM_FAULT_RETRY) {
- if (mm_flags & FAULT_FLAG_ALLOW_RETRY) {
- mm_flags |= FAULT_FLAG_TRIED;
- goto retry;
- }
+ mm_flags |= FAULT_FLAG_TRIED;
+ goto retry;
}
mmap_read_unlock(mm);
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index a8834434af99..db63cc885771 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -172,7 +172,7 @@ int pfn_is_map_memory(unsigned long pfn)
}
EXPORT_SYMBOL(pfn_is_map_memory);
-static phys_addr_t memory_limit = PHYS_ADDR_MAX;
+static phys_addr_t memory_limit __ro_after_init = PHYS_ADDR_MAX;
/*
* Limit the memory size that was specified via FDT.
diff --git a/arch/arm64/tools/cpucaps b/arch/arm64/tools/cpucaps
index 870c39537dd0..9c65b1e25a96 100644
--- a/arch/arm64/tools/cpucaps
+++ b/arch/arm64/tools/cpucaps
@@ -55,6 +55,10 @@ WORKAROUND_1418040
WORKAROUND_1463225
WORKAROUND_1508412
WORKAROUND_1542419
+WORKAROUND_1902691
+WORKAROUND_2038923
+WORKAROUND_2064142
+WORKAROUND_2077057
WORKAROUND_TRBE_OVERWRITE_FILL_MODE
WORKAROUND_TSB_FLUSH_FAILURE
WORKAROUND_TRBE_WRITE_OUT_OF_RANGE
diff --git a/arch/csky/abiv1/alignment.c b/arch/csky/abiv1/alignment.c
index cb2a0d94a144..2df115d0e210 100644
--- a/arch/csky/abiv1/alignment.c
+++ b/arch/csky/abiv1/alignment.c
@@ -294,7 +294,7 @@ bad_area:
__func__, opcode, rz, rx, imm, addr);
show_regs(regs);
bust_spinlocks(0);
- do_exit(SIGKILL);
+ make_task_dead(SIGKILL);
}
force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)addr);
diff --git a/arch/csky/include/asm/bitops.h b/arch/csky/include/asm/bitops.h
index 02b72a000767..72e1b2aa29a0 100644
--- a/arch/csky/include/asm/bitops.h
+++ b/arch/csky/include/asm/bitops.h
@@ -59,7 +59,6 @@ static __always_inline unsigned long __fls(unsigned long x)
#include <asm-generic/bitops/ffz.h>
#include <asm-generic/bitops/fls64.h>
-#include <asm-generic/bitops/find.h>
#ifndef _LINUX_BITOPS_H
#error only <linux/bitops.h> can be included directly
diff --git a/arch/csky/kernel/perf_callchain.c b/arch/csky/kernel/perf_callchain.c
index ab55e98ee8f6..92057de08f4f 100644
--- a/arch/csky/kernel/perf_callchain.c
+++ b/arch/csky/kernel/perf_callchain.c
@@ -88,10 +88,6 @@ void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
{
unsigned long fp = 0;
- /* C-SKY does not support virtualization. */
- if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
- return;
-
fp = regs->regs[4];
perf_callchain_store(entry, regs->pc);
@@ -112,12 +108,6 @@ void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
{
struct stackframe fr;
- /* C-SKY does not support virtualization. */
- if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
- pr_warn("C-SKY does not support perf in guest mode!");
- return;
- }
-
fr.fp = regs->regs[4];
fr.lr = regs->lr;
walk_stackframe(&fr, entry);
diff --git a/arch/csky/kernel/traps.c b/arch/csky/kernel/traps.c
index 2020af88b636..6e426fba0119 100644
--- a/arch/csky/kernel/traps.c
+++ b/arch/csky/kernel/traps.c
@@ -109,7 +109,7 @@ void die(struct pt_regs *regs, const char *str)
if (panic_on_oops)
panic("Fatal exception");
if (ret != NOTIFY_STOP)
- do_exit(SIGSEGV);
+ make_task_dead(SIGSEGV);
}
void do_trap(struct pt_regs *regs, int signo, int code, unsigned long addr)
diff --git a/arch/csky/mm/fault.c b/arch/csky/mm/fault.c
index 466ad949818a..7215a46b6b8e 100644
--- a/arch/csky/mm/fault.c
+++ b/arch/csky/mm/fault.c
@@ -67,7 +67,7 @@ static inline void no_context(struct pt_regs *regs, unsigned long addr)
pr_alert("Unable to handle kernel paging request at virtual "
"addr 0x%08lx, pc: 0x%08lx\n", addr, regs->pc);
die(regs, "Oops");
- do_exit(SIGKILL);
+ make_task_dead(SIGKILL);
}
static inline void mm_fault_error(struct pt_regs *regs, unsigned long addr, vm_fault_t fault)
diff --git a/arch/h8300/boot/compressed/Makefile b/arch/h8300/boot/compressed/Makefile
index 5942793f77a0..6ab2fa5ba105 100644
--- a/arch/h8300/boot/compressed/Makefile
+++ b/arch/h8300/boot/compressed/Makefile
@@ -30,9 +30,11 @@ $(obj)/vmlinux.bin: vmlinux FORCE
suffix-$(CONFIG_KERNEL_GZIP) := gzip
suffix-$(CONFIG_KERNEL_LZO) := lzo
+compress-$(CONFIG_KERNEL_GZIP) := gzip
+compress-$(CONFIG_KERNEL_LZO) := lzo_with_size
$(obj)/vmlinux.bin.$(suffix-y): $(obj)/vmlinux.bin FORCE
- $(call if_changed,$(suffix-y))
+ $(call if_changed,$(compress-y))
LDFLAGS_piggy.o := -r --format binary --oformat elf32-h8300-linux -T
OBJCOPYFLAGS := -O binary
diff --git a/arch/h8300/boot/dts/Makefile b/arch/h8300/boot/dts/Makefile
index 69fcd817892c..c36bbd1f2592 100644
--- a/arch/h8300/boot/dts/Makefile
+++ b/arch/h8300/boot/dts/Makefile
@@ -1,9 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-ifneq '$(CONFIG_H8300_BUILTIN_DTB)' '""'
-BUILTIN_DTB := $(patsubst "%",%,$(CONFIG_H8300_BUILTIN_DTB)).dtb.o
-endif
-
-obj-y += $(BUILTIN_DTB)
+obj-y += $(addsuffix .dtb.o, $(CONFIG_H8300_BUILTIN_DTB))
dtb-$(CONFIG_H8300H_SIM) := h8300h_sim.dtb
dtb-$(CONFIG_H8S_SIM) := h8s_sim.dtb
diff --git a/arch/h8300/include/asm/bitops.h b/arch/h8300/include/asm/bitops.h
index c867a80cab5b..4489e3d6edd3 100644
--- a/arch/h8300/include/asm/bitops.h
+++ b/arch/h8300/include/asm/bitops.h
@@ -168,7 +168,6 @@ static inline unsigned long __ffs(unsigned long word)
return result;
}
-#include <asm-generic/bitops/find.h>
#include <asm-generic/bitops/sched.h>
#include <asm-generic/bitops/hweight.h>
#include <asm-generic/bitops/lock.h>
diff --git a/arch/h8300/kernel/traps.c b/arch/h8300/kernel/traps.c
index bdbe988d8dbc..a92c39e03802 100644
--- a/arch/h8300/kernel/traps.c
+++ b/arch/h8300/kernel/traps.c
@@ -17,6 +17,7 @@
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
+#include <linux/sched/task.h>
#include <linux/mm_types.h>
#include <linux/kernel.h>
#include <linux/errno.h>
@@ -106,7 +107,7 @@ void die(const char *str, struct pt_regs *fp, unsigned long err)
dump(fp);
spin_unlock_irq(&die_lock);
- do_exit(SIGSEGV);
+ make_task_dead(SIGSEGV);
}
static int kstack_depth_to_print = 24;
diff --git a/arch/h8300/mm/fault.c b/arch/h8300/mm/fault.c
index d4bc9c16f2df..b465441f490d 100644
--- a/arch/h8300/mm/fault.c
+++ b/arch/h8300/mm/fault.c
@@ -51,7 +51,7 @@ asmlinkage int do_page_fault(struct pt_regs *regs, unsigned long address,
printk(" at virtual address %08lx\n", address);
if (!user_mode(regs))
die("Oops", regs, error_code);
- do_exit(SIGKILL);
+ make_task_dead(SIGKILL);
return 1;
}
diff --git a/arch/hexagon/include/asm/bitops.h b/arch/hexagon/include/asm/bitops.h
index 71429f756af0..75d6ba3643b8 100644
--- a/arch/hexagon/include/asm/bitops.h
+++ b/arch/hexagon/include/asm/bitops.h
@@ -271,7 +271,6 @@ static inline unsigned long __fls(unsigned long word)
}
#include <asm-generic/bitops/lock.h>
-#include <asm-generic/bitops/find.h>
#include <asm-generic/bitops/fls64.h>
#include <asm-generic/bitops/sched.h>
diff --git a/arch/hexagon/kernel/traps.c b/arch/hexagon/kernel/traps.c
index edfc35dafeb1..1240f038cce0 100644
--- a/arch/hexagon/kernel/traps.c
+++ b/arch/hexagon/kernel/traps.c
@@ -214,7 +214,7 @@ int die(const char *str, struct pt_regs *regs, long err)
panic("Fatal exception");
oops_exit();
- do_exit(err);
+ make_task_dead(err);
return 0;
}
diff --git a/arch/hexagon/mm/vm_fault.c b/arch/hexagon/mm/vm_fault.c
index ef32c5a84ff3..4fac4b9eb316 100644
--- a/arch/hexagon/mm/vm_fault.c
+++ b/arch/hexagon/mm/vm_fault.c
@@ -98,11 +98,9 @@ good_area:
/* The most common case -- we are done. */
if (likely(!(fault & VM_FAULT_ERROR))) {
- if (flags & FAULT_FLAG_ALLOW_RETRY) {
- if (fault & VM_FAULT_RETRY) {
- flags |= FAULT_FLAG_TRIED;
- goto retry;
- }
+ if (fault & VM_FAULT_RETRY) {
+ flags |= FAULT_FLAG_TRIED;
+ goto retry;
}
mmap_read_unlock(mm);
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 1e33666fa679..a7e01573abd8 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -32,6 +32,7 @@ config IA64
select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_DYNAMIC_FTRACE if (!ITANIUM)
select HAVE_FUNCTION_TRACER
+ select HAVE_SETUP_PER_CPU_AREA
select TTY
select HAVE_ARCH_TRACEHOOK
select HAVE_VIRT_CPU_ACCOUNTING
@@ -88,9 +89,6 @@ config GENERIC_CALIBRATE_DELAY
bool
default y
-config HAVE_SETUP_PER_CPU_AREA
- def_bool y
-
config DMI
bool
default y
@@ -292,6 +290,7 @@ config NUMA
bool "NUMA support"
depends on !FLATMEM
select SMP
+ select USE_PERCPU_NUMA_NODE_ID
help
Say Y to compile the kernel to support NUMA (Non-Uniform Memory
Access). This option is for configuring high-end multiprocessor
@@ -311,10 +310,6 @@ config HAVE_ARCH_NODEDATA_EXTENSION
def_bool y
depends on NUMA
-config USE_PERCPU_NUMA_NODE_ID
- def_bool y
- depends on NUMA
-
config HAVE_MEMORYLESS_NODES
def_bool NUMA
@@ -323,7 +318,7 @@ config ARCH_PROC_KCORE_TEXT
depends on PROC_KCORE
config IA64_MCA_RECOVERY
- tristate "MCA recovery from errors other than TLB."
+ bool "MCA recovery from errors other than TLB."
config IA64_PALINFO
tristate "/proc/pal support"
diff --git a/arch/ia64/include/asm/bitops.h b/arch/ia64/include/asm/bitops.h
index 2f24ee6459d2..577be93c0818 100644
--- a/arch/ia64/include/asm/bitops.h
+++ b/arch/ia64/include/asm/bitops.h
@@ -441,8 +441,6 @@ static __inline__ unsigned long __arch_hweight64(unsigned long x)
#endif /* __KERNEL__ */
-#include <asm-generic/bitops/find.h>
-
#ifdef __KERNEL__
#include <asm-generic/bitops/le.h>
diff --git a/arch/ia64/kernel/mca_drv.c b/arch/ia64/kernel/mca_drv.c
index 5bfc79be4cef..23c203639a96 100644
--- a/arch/ia64/kernel/mca_drv.c
+++ b/arch/ia64/kernel/mca_drv.c
@@ -176,7 +176,7 @@ mca_handler_bh(unsigned long paddr, void *iip, unsigned long ipsr)
spin_unlock(&mca_bh_lock);
/* This process is about to be killed itself */
- do_exit(SIGKILL);
+ make_task_dead(SIGKILL);
}
/**
diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
index 2cba53c1da82..360f36b0eb3f 100644
--- a/arch/ia64/kernel/module.c
+++ b/arch/ia64/kernel/module.c
@@ -848,7 +848,7 @@ register_unwind_table (struct module *mod)
{
struct unw_table_entry *start = (void *) mod->arch.unwind->sh_addr;
struct unw_table_entry *end = start + mod->arch.unwind->sh_size / sizeof (*start);
- struct unw_table_entry tmp, *e1, *e2, *core, *init;
+ struct unw_table_entry *e1, *e2, *core, *init;
unsigned long num_init = 0, num_core = 0;
/* First, count how many init and core unwind-table entries there are. */
@@ -865,9 +865,7 @@ register_unwind_table (struct module *mod)
for (e1 = start; e1 < end; ++e1) {
for (e2 = e1 + 1; e2 < end; ++e2) {
if (e2->start_offset < e1->start_offset) {
- tmp = *e1;
- *e1 = *e2;
- *e2 = tmp;
+ swap(*e1, *e2);
}
}
}
diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c
index a25ab9b37953..bd3ba276e69c 100644
--- a/arch/ia64/kernel/salinfo.c
+++ b/arch/ia64/kernel/salinfo.c
@@ -282,7 +282,7 @@ salinfo_event_open(struct inode *inode, struct file *file)
static ssize_t
salinfo_event_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos)
{
- struct salinfo_data *data = PDE_DATA(file_inode(file));
+ struct salinfo_data *data = pde_data(file_inode(file));
char cmd[32];
size_t size;
int i, n, cpu = -1;
@@ -340,7 +340,7 @@ static const struct proc_ops salinfo_event_proc_ops = {
static int
salinfo_log_open(struct inode *inode, struct file *file)
{
- struct salinfo_data *data = PDE_DATA(inode);
+ struct salinfo_data *data = pde_data(inode);
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
@@ -365,7 +365,7 @@ salinfo_log_open(struct inode *inode, struct file *file)
static int
salinfo_log_release(struct inode *inode, struct file *file)
{
- struct salinfo_data *data = PDE_DATA(inode);
+ struct salinfo_data *data = pde_data(inode);
if (data->state == STATE_NO_DATA) {
vfree(data->log_buffer);
@@ -433,7 +433,7 @@ retry:
static ssize_t
salinfo_log_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos)
{
- struct salinfo_data *data = PDE_DATA(file_inode(file));
+ struct salinfo_data *data = pde_data(file_inode(file));
u8 *buf;
u64 bufsize;
@@ -494,7 +494,7 @@ salinfo_log_clear(struct salinfo_data *data, int cpu)
static ssize_t
salinfo_log_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos)
{
- struct salinfo_data *data = PDE_DATA(file_inode(file));
+ struct salinfo_data *data = pde_data(file_inode(file));
char cmd[32];
size_t size;
u32 offset;
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index 31fb84de2d21..5010348fa21b 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -208,10 +208,7 @@ sort_regions (struct rsvd_region *rsvd_region, int max)
while (max--) {
for (j = 0; j < max; ++j) {
if (rsvd_region[j].start > rsvd_region[j+1].start) {
- struct rsvd_region tmp;
- tmp = rsvd_region[j];
- rsvd_region[j] = rsvd_region[j + 1];
- rsvd_region[j + 1] = tmp;
+ swap(rsvd_region[j], rsvd_region[j + 1]);
}
}
}
diff --git a/arch/ia64/kernel/syscalls/syscall.tbl b/arch/ia64/kernel/syscalls/syscall.tbl
index 707ae121f6d3..78b1d03e86e1 100644
--- a/arch/ia64/kernel/syscalls/syscall.tbl
+++ b/arch/ia64/kernel/syscalls/syscall.tbl
@@ -370,3 +370,4 @@
# 447 reserved for memfd_secret
448 common process_mrelease sys_process_mrelease
449 common futex_waitv sys_futex_waitv
+450 common set_mempolicy_home_node sys_set_mempolicy_home_node
diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
index 3639e0a7cb3b..e4992917a24b 100644
--- a/arch/ia64/kernel/topology.c
+++ b/arch/ia64/kernel/topology.c
@@ -264,6 +264,7 @@ static struct attribute * cache_default_attrs[] = {
&shared_cpu_map.attr,
NULL
};
+ATTRIBUTE_GROUPS(cache_default);
#define to_object(k) container_of(k, struct cache_info, kobj)
#define to_attr(a) container_of(a, struct cache_attr, attr)
@@ -284,7 +285,7 @@ static const struct sysfs_ops cache_sysfs_ops = {
static struct kobj_type cache_ktype = {
.sysfs_ops = &cache_sysfs_ops,
- .default_attrs = cache_default_attrs,
+ .default_groups = cache_default_groups,
};
static struct kobj_type cache_ktype_percpu_entry = {
diff --git a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c
index e13cb905930f..753642366e12 100644
--- a/arch/ia64/kernel/traps.c
+++ b/arch/ia64/kernel/traps.c
@@ -85,7 +85,7 @@ die (const char *str, struct pt_regs *regs, long err)
if (panic_on_oops)
panic("Fatal exception");
- do_exit(SIGSEGV);
+ make_task_dead(SIGSEGV);
return 0;
}
diff --git a/arch/ia64/kernel/uncached.c b/arch/ia64/kernel/uncached.c
index 51883a66aeb5..816803636a75 100644
--- a/arch/ia64/kernel/uncached.c
+++ b/arch/ia64/kernel/uncached.c
@@ -171,7 +171,7 @@ failed:
* @n_pages: number of contiguous pages to allocate
*
* Allocate the specified number of contiguous uncached pages on the
- * the requested node. If not enough contiguous uncached pages are available
+ * requested node. If not enough contiguous uncached pages are available
* on the requested node, roundrobin starting with the next higher node.
*/
unsigned long uncached_alloc_page(int starting_nid, int n_pages)
diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
index 02de2e70c587..07379d1a227f 100644
--- a/arch/ia64/mm/fault.c
+++ b/arch/ia64/mm/fault.c
@@ -156,17 +156,15 @@ retry:
BUG();
}
- if (flags & FAULT_FLAG_ALLOW_RETRY) {
- if (fault & VM_FAULT_RETRY) {
- flags |= FAULT_FLAG_TRIED;
+ if (fault & VM_FAULT_RETRY) {
+ flags |= FAULT_FLAG_TRIED;
- /* No need to mmap_read_unlock(mm) as we would
- * have already released it in __lock_page_or_retry
- * in mm/filemap.c.
- */
+ /* No need to mmap_read_unlock(mm) as we would
+ * have already released it in __lock_page_or_retry
+ * in mm/filemap.c.
+ */
- goto retry;
- }
+ goto retry;
}
mmap_read_unlock(mm);
@@ -259,7 +257,7 @@ retry:
regs = NULL;
bust_spinlocks(0);
if (regs)
- do_exit(SIGKILL);
+ make_task_dead(SIGKILL);
return;
out_of_memory:
diff --git a/arch/ia64/pci/fixup.c b/arch/ia64/pci/fixup.c
index acb55a41260d..2bcdd7d3a1ad 100644
--- a/arch/ia64/pci/fixup.c
+++ b/arch/ia64/pci/fixup.c
@@ -76,5 +76,5 @@ static void pci_fixup_video(struct pci_dev *pdev)
}
}
}
-DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID,
- PCI_CLASS_DISPLAY_VGA, 8, pci_fixup_video);
+DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_ANY_ID, PCI_ANY_ID,
+ PCI_CLASS_DISPLAY_VGA, 8, pci_fixup_video);
diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig
index a4b6c7108465..bc9952f8be66 100644
--- a/arch/m68k/configs/amiga_defconfig
+++ b/arch/m68k/configs/amiga_defconfig
@@ -45,7 +45,6 @@ CONFIG_IOSCHED_BFQ=m
CONFIG_BINFMT_AOUT=m
CONFIG_BINFMT_MISC=m
# CONFIG_COMPACTION is not set
-CONFIG_CLEANCACHE=y
CONFIG_ZPOOL=m
CONFIG_NET=y
CONFIG_PACKET=y
diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig
index 2db721965520..a77269c6e5ba 100644
--- a/arch/m68k/configs/apollo_defconfig
+++ b/arch/m68k/configs/apollo_defconfig
@@ -41,7 +41,6 @@ CONFIG_IOSCHED_BFQ=m
CONFIG_BINFMT_AOUT=m
CONFIG_BINFMT_MISC=m
# CONFIG_COMPACTION is not set
-CONFIG_CLEANCACHE=y
CONFIG_ZPOOL=m
CONFIG_NET=y
CONFIG_PACKET=y
diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig
index c266a704eecd..7a74efa6b9a1 100644
--- a/arch/m68k/configs/atari_defconfig
+++ b/arch/m68k/configs/atari_defconfig
@@ -48,7 +48,6 @@ CONFIG_IOSCHED_BFQ=m
CONFIG_BINFMT_AOUT=m
CONFIG_BINFMT_MISC=m
# CONFIG_COMPACTION is not set
-CONFIG_CLEANCACHE=y
CONFIG_ZPOOL=m
CONFIG_NET=y
CONFIG_PACKET=y
diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig
index f644f08dd6ed..a5323bf2eb33 100644
--- a/arch/m68k/configs/bvme6000_defconfig
+++ b/arch/m68k/configs/bvme6000_defconfig
@@ -38,7 +38,6 @@ CONFIG_IOSCHED_BFQ=m
CONFIG_BINFMT_AOUT=m
CONFIG_BINFMT_MISC=m
# CONFIG_COMPACTION is not set
-CONFIG_CLEANCACHE=y
CONFIG_ZPOOL=m
CONFIG_NET=y
CONFIG_PACKET=y
diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig
index e4924650b687..5e80aa0869d5 100644
--- a/arch/m68k/configs/hp300_defconfig
+++ b/arch/m68k/configs/hp300_defconfig
@@ -40,7 +40,6 @@ CONFIG_IOSCHED_BFQ=m
CONFIG_BINFMT_AOUT=m
CONFIG_BINFMT_MISC=m
# CONFIG_COMPACTION is not set
-CONFIG_CLEANCACHE=y
CONFIG_ZPOOL=m
CONFIG_NET=y
CONFIG_PACKET=y
diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig
index 24113871ea76..e84326a3f62d 100644
--- a/arch/m68k/configs/mac_defconfig
+++ b/arch/m68k/configs/mac_defconfig
@@ -39,7 +39,6 @@ CONFIG_IOSCHED_BFQ=m
CONFIG_BINFMT_AOUT=m
CONFIG_BINFMT_MISC=m
# CONFIG_COMPACTION is not set
-CONFIG_CLEANCACHE=y
CONFIG_ZPOOL=m
CONFIG_NET=y
CONFIG_PACKET=y
diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig
index 6a7e4be70eea..337552f43339 100644
--- a/arch/m68k/configs/multi_defconfig
+++ b/arch/m68k/configs/multi_defconfig
@@ -59,7 +59,6 @@ CONFIG_IOSCHED_BFQ=m
CONFIG_BINFMT_AOUT=m
CONFIG_BINFMT_MISC=m
# CONFIG_COMPACTION is not set
-CONFIG_CLEANCACHE=y
CONFIG_ZPOOL=m
CONFIG_NET=y
CONFIG_PACKET=y
diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig
index 1d223247aff0..7b688f7d272a 100644
--- a/arch/m68k/configs/mvme147_defconfig
+++ b/arch/m68k/configs/mvme147_defconfig
@@ -37,7 +37,6 @@ CONFIG_IOSCHED_BFQ=m
CONFIG_BINFMT_AOUT=m
CONFIG_BINFMT_MISC=m
# CONFIG_COMPACTION is not set
-CONFIG_CLEANCACHE=y
CONFIG_ZPOOL=m
CONFIG_NET=y
CONFIG_PACKET=y
diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig
index 961f789f96c9..7c2cb31d63dd 100644
--- a/arch/m68k/configs/mvme16x_defconfig
+++ b/arch/m68k/configs/mvme16x_defconfig
@@ -38,7 +38,6 @@ CONFIG_IOSCHED_BFQ=m
CONFIG_BINFMT_AOUT=m
CONFIG_BINFMT_MISC=m
# CONFIG_COMPACTION is not set
-CONFIG_CLEANCACHE=y
CONFIG_ZPOOL=m
CONFIG_NET=y
CONFIG_PACKET=y
diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig
index ff4b5e469390..ca43897af26d 100644
--- a/arch/m68k/configs/q40_defconfig
+++ b/arch/m68k/configs/q40_defconfig
@@ -39,7 +39,6 @@ CONFIG_IOSCHED_BFQ=m
CONFIG_BINFMT_AOUT=m
CONFIG_BINFMT_MISC=m
# CONFIG_COMPACTION is not set
-CONFIG_CLEANCACHE=y
CONFIG_ZPOOL=m
CONFIG_NET=y
CONFIG_PACKET=y
diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig
index 5f228621d0cc..e3d515f37144 100644
--- a/arch/m68k/configs/sun3_defconfig
+++ b/arch/m68k/configs/sun3_defconfig
@@ -35,7 +35,6 @@ CONFIG_IOSCHED_BFQ=m
CONFIG_BINFMT_AOUT=m
CONFIG_BINFMT_MISC=m
# CONFIG_COMPACTION is not set
-CONFIG_CLEANCACHE=y
CONFIG_ZPOOL=m
CONFIG_NET=y
CONFIG_PACKET=y
diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig
index a600cb9e68c2..d601606c969b 100644
--- a/arch/m68k/configs/sun3x_defconfig
+++ b/arch/m68k/configs/sun3x_defconfig
@@ -35,7 +35,6 @@ CONFIG_IOSCHED_BFQ=m
CONFIG_BINFMT_AOUT=m
CONFIG_BINFMT_MISC=m
# CONFIG_COMPACTION is not set
-CONFIG_CLEANCACHE=y
CONFIG_ZPOOL=m
CONFIG_NET=y
CONFIG_PACKET=y
diff --git a/arch/m68k/include/asm/bitops.h b/arch/m68k/include/asm/bitops.h
index 7b93e1fd8ffa..51283db53667 100644
--- a/arch/m68k/include/asm/bitops.h
+++ b/arch/m68k/include/asm/bitops.h
@@ -529,6 +529,4 @@ static inline int __fls(int x)
#include <asm-generic/bitops/le.h>
#endif /* __KERNEL__ */
-#include <asm-generic/bitops/find.h>
-
#endif /* _M68K_BITOPS_H */
diff --git a/arch/m68k/kernel/ptrace.c b/arch/m68k/kernel/ptrace.c
index 94b3b274186d..aa3a0b8d07e9 100644
--- a/arch/m68k/kernel/ptrace.c
+++ b/arch/m68k/kernel/ptrace.c
@@ -273,17 +273,7 @@ out_eio:
asmlinkage void syscall_trace(void)
{
- ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
- ? 0x80 : 0));
- /*
- * this isn't the same as continuing with a signal, but it will do
- * for normal use. strace only continues with a signal if the
- * stopping signal is not SIGTRAP. -brl
- */
- if (current->exit_code) {
- send_sig(current->exit_code, current, 1);
- current->exit_code = 0;
- }
+ ptrace_report_syscall(0);
}
#if defined(CONFIG_COLDFIRE) || !defined(CONFIG_MMU)
diff --git a/arch/m68k/kernel/syscalls/syscall.tbl b/arch/m68k/kernel/syscalls/syscall.tbl
index 45bc32a41b90..b1f3940bc298 100644
--- a/arch/m68k/kernel/syscalls/syscall.tbl
+++ b/arch/m68k/kernel/syscalls/syscall.tbl
@@ -449,3 +449,4 @@
# 447 reserved for memfd_secret
448 common process_mrelease sys_process_mrelease
449 common futex_waitv sys_futex_waitv
+450 common set_mempolicy_home_node sys_set_mempolicy_home_node
diff --git a/arch/m68k/kernel/traps.c b/arch/m68k/kernel/traps.c
index 34d6458340b0..59fc63feb0dc 100644
--- a/arch/m68k/kernel/traps.c
+++ b/arch/m68k/kernel/traps.c
@@ -1131,7 +1131,7 @@ void die_if_kernel (char *str, struct pt_regs *fp, int nr)
pr_crit("%s: %08x\n", str, nr);
show_registers(fp);
add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
- do_exit(SIGSEGV);
+ make_task_dead(SIGSEGV);
}
asmlinkage void set_esp0(unsigned long ssp)
diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c
index ef46e77e97a5..1493cf5eac1e 100644
--- a/arch/m68k/mm/fault.c
+++ b/arch/m68k/mm/fault.c
@@ -48,7 +48,7 @@ int send_fault_sig(struct pt_regs *regs)
pr_alert("Unable to handle kernel access");
pr_cont(" at virtual address %p\n", addr);
die_if_kernel("Oops", regs, 0 /*error_code*/);
- do_exit(SIGKILL);
+ make_task_dead(SIGKILL);
}
return 1;
@@ -153,18 +153,16 @@ good_area:
BUG();
}
- if (flags & FAULT_FLAG_ALLOW_RETRY) {
- if (fault & VM_FAULT_RETRY) {
- flags |= FAULT_FLAG_TRIED;
+ if (fault & VM_FAULT_RETRY) {
+ flags |= FAULT_FLAG_TRIED;
- /*
- * No need to mmap_read_unlock(mm) as we would
- * have already released it in __lock_page_or_retry
- * in mm/filemap.c.
- */
+ /*
+ * No need to mmap_read_unlock(mm) as we would
+ * have already released it in __lock_page_or_retry
+ * in mm/filemap.c.
+ */
- goto retry;
- }
+ goto retry;
}
mmap_read_unlock(mm);
diff --git a/arch/microblaze/Makefile b/arch/microblaze/Makefile
index e775a696aa6f..1826d9ce4459 100644
--- a/arch/microblaze/Makefile
+++ b/arch/microblaze/Makefile
@@ -5,10 +5,10 @@ UTS_SYSNAME = -DUTS_SYSNAME=\"Linux\"
# What CPU version are we building for, and crack it open
# as major.minor.rev
-CPU_VER := $(shell echo $(CONFIG_XILINX_MICROBLAZE0_HW_VER))
-CPU_MAJOR := $(shell echo $(CPU_VER) | cut -d '.' -f 1)
-CPU_MINOR := $(shell echo $(CPU_VER) | cut -d '.' -f 2)
-CPU_REV := $(shell echo $(CPU_VER) | cut -d '.' -f 3)
+CPU_VER := $(CONFIG_XILINX_MICROBLAZE0_HW_VER)
+CPU_MAJOR := $(word 1, $(subst ., , $(CPU_VER)))
+CPU_MINOR := $(word 2, $(subst ., , $(CPU_VER)))
+CPU_REV := $(word 3, $(subst ., , $(CPU_VER)))
export CPU_VER CPU_MAJOR CPU_MINOR CPU_REV
diff --git a/arch/microblaze/kernel/exceptions.c b/arch/microblaze/kernel/exceptions.c
index 908788497b28..fd153d5fab98 100644
--- a/arch/microblaze/kernel/exceptions.c
+++ b/arch/microblaze/kernel/exceptions.c
@@ -44,10 +44,10 @@ void die(const char *str, struct pt_regs *fp, long err)
pr_warn("Oops: %s, sig: %ld\n", str, err);
show_regs(fp);
spin_unlock_irq(&die_lock);
- /* do_exit() should take care of panic'ing from an interrupt
+ /* make_task_dead() should take care of panic'ing from an interrupt
* context so we don't handle it here
*/
- do_exit(err);
+ make_task_dead(err);
}
/* for user application debugging */
diff --git a/arch/microblaze/kernel/syscalls/syscall.tbl b/arch/microblaze/kernel/syscalls/syscall.tbl
index 2204bde3ce4a..820145e47350 100644
--- a/arch/microblaze/kernel/syscalls/syscall.tbl
+++ b/arch/microblaze/kernel/syscalls/syscall.tbl
@@ -455,3 +455,4 @@
# 447 reserved for memfd_secret
448 common process_mrelease sys_process_mrelease
449 common futex_waitv sys_futex_waitv
+450 common set_mempolicy_home_node sys_set_mempolicy_home_node
diff --git a/arch/microblaze/mm/fault.c b/arch/microblaze/mm/fault.c
index b3fed2cecf84..a9626e6a68af 100644
--- a/arch/microblaze/mm/fault.c
+++ b/arch/microblaze/mm/fault.c
@@ -232,18 +232,16 @@ good_area:
BUG();
}
- if (flags & FAULT_FLAG_ALLOW_RETRY) {
- if (fault & VM_FAULT_RETRY) {
- flags |= FAULT_FLAG_TRIED;
-
- /*
- * No need to mmap_read_unlock(mm) as we would
- * have already released it in __lock_page_or_retry
- * in mm/filemap.c.
- */
-
- goto retry;
- }
+ if (fault & VM_FAULT_RETRY) {
+ flags |= FAULT_FLAG_TRIED;
+
+ /*
+ * No need to mmap_read_unlock(mm) as we would
+ * have already released it in __lock_page_or_retry
+ * in mm/filemap.c.
+ */
+
+ goto retry;
}
mmap_read_unlock(mm);
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 0215dc1529e9..058446f01487 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -32,7 +32,6 @@ config MIPS
select GENERIC_ATOMIC64 if !64BIT
select GENERIC_CMOS_UPDATE
select GENERIC_CPU_AUTOPROBE
- select GENERIC_FIND_FIRST_BIT
select GENERIC_GETTIMEOFDAY
select GENERIC_IOMAP
select GENERIC_IRQ_PROBE
@@ -264,7 +263,6 @@ config BMIPS_GENERIC
bool "Broadcom Generic BMIPS kernel"
select ARCH_HAS_RESET_CONTROLLER
select ARCH_HAS_SYNC_DMA_FOR_CPU_ALL
- select ARCH_HAS_PHYS_TO_DMA
select BOOT_RAW
select NO_EXCEPT_FILL
select USE_OF
@@ -640,9 +638,6 @@ config MACH_REALTEK_RTL
select SYS_SUPPORTS_MIPS16
select SYS_SUPPORTS_MULTITHREADING
select SYS_SUPPORTS_VPE_LOADER
- select SYS_HAS_EARLY_PRINTK
- select SYS_HAS_EARLY_PRINTK_8250
- select USE_GENERIC_EARLY_PRINTK_8250
select BOOT_RAW
select PINCTRL
select USE_OF
@@ -765,7 +760,6 @@ config SGI_IP30
select HAVE_PCI
select IRQ_MIPS_CPU
select IRQ_DOMAIN_HIERARCHY
- select NR_CPUS_DEFAULT_2
select PCI_DRIVERS_GENERIC
select PCI_XTALK_BRIDGE
select SYS_HAS_EARLY_PRINTK
@@ -1611,7 +1605,6 @@ config CPU_R4300
depends on SYS_HAS_CPU_R4300
select CPU_SUPPORTS_32BIT_KERNEL
select CPU_SUPPORTS_64BIT_KERNEL
- select CPU_HAS_LOAD_STORE_LR
help
MIPS Technologies R4300-series processors.
@@ -1907,6 +1900,10 @@ config SYS_HAS_CPU_MIPS64_R1
config SYS_HAS_CPU_MIPS64_R2
bool
+config SYS_HAS_CPU_MIPS64_R5
+ bool
+ select ARCH_HAS_SYNC_DMA_FOR_CPU if DMA_NONCOHERENT
+
config SYS_HAS_CPU_MIPS64_R6
bool
select ARCH_HAS_SYNC_DMA_FOR_CPU if DMA_NONCOHERENT
@@ -2065,7 +2062,7 @@ config CPU_SUPPORTS_ADDRWINCFG
bool
config CPU_SUPPORTS_HUGEPAGES
bool
- depends on !(32BIT && (ARCH_PHYS_ADDR_T_64BIT || EVA))
+ depends on !(32BIT && (PHYS_ADDR_T_64BIT || EVA))
config MIPS_PGD_C0_CONTEXT
bool
depends on 64BIT
@@ -2116,6 +2113,16 @@ config MIPS_VA_BITS_48
If unsure, say N.
+config ZBOOT_LOAD_ADDRESS
+ hex "Compressed kernel load address"
+ default 0xffffffff80400000 if BCM47XX
+ default 0x0
+ depends on SYS_SUPPORTS_ZBOOT
+ help
+ The address to load compressed kernel, aka vmlinuz.
+
+ This is only used if non-zero.
+
choice
prompt "Kernel page size"
default PAGE_SIZE_4KB
@@ -2666,6 +2673,8 @@ config NUMA
bool "NUMA Support"
depends on SYS_SUPPORTS_NUMA
select SMP
+ select HAVE_SETUP_PER_CPU_AREA
+ select NEED_PER_CPU_EMBED_FIRST_CHUNK
help
Say Y to compile the kernel to support NUMA (Non-Uniform Memory
Access). This option improves performance on systems with more
@@ -2676,14 +2685,6 @@ config NUMA
config SYS_SUPPORTS_NUMA
bool
-config HAVE_SETUP_PER_CPU_AREA
- def_bool y
- depends on NUMA
-
-config NEED_PER_CPU_EMBED_FIRST_CHUNK
- def_bool y
- depends on NUMA
-
config RELOCATABLE
bool "Relocatable kernel"
depends on SYS_SUPPORTS_RELOCATABLE
diff --git a/arch/mips/Makefile b/arch/mips/Makefile
index ace7f033de07..e036fc025ccc 100644
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -253,9 +253,7 @@ endif
#
# Board-dependent options and extra files
#
-ifdef need-compiler
include $(srctree)/arch/mips/Kbuild.platforms
-endif
ifdef CONFIG_PHYSICAL_START
load-y = $(CONFIG_PHYSICAL_START)
diff --git a/arch/mips/alchemy/common/gpiolib.c b/arch/mips/alchemy/common/gpiolib.c
index 7d5da5edd74d..a17d7a8909c4 100644
--- a/arch/mips/alchemy/common/gpiolib.c
+++ b/arch/mips/alchemy/common/gpiolib.c
@@ -23,8 +23,6 @@
* 675 Mass Ave, Cambridge, MA 02139, USA.
*
* Notes :
- * This file must ONLY be built when CONFIG_GPIOLIB=y and
- * CONFIG_ALCHEMY_GPIO_INDIRECT=n, otherwise compilation will fail!
* au1000 SoC have only one GPIO block : GPIO1
* Au1100, Au15x0, Au12x0 have a second one : GPIO2
* Au1300 is totally different: 1 block with up to 128 GPIOs
diff --git a/arch/mips/ath79/setup.c b/arch/mips/ath79/setup.c
index 891f495c4c3c..0ac435fe2dc9 100644
--- a/arch/mips/ath79/setup.c
+++ b/arch/mips/ath79/setup.c
@@ -34,15 +34,6 @@
static char ath79_sys_type[ATH79_SYS_TYPE_LEN];
-static void ath79_restart(char *command)
-{
- local_irq_disable();
- ath79_device_reset_set(AR71XX_RESET_FULL_CHIP);
- for (;;)
- if (cpu_wait)
- cpu_wait();
-}
-
static void ath79_halt(void)
{
while (1)
@@ -234,7 +225,6 @@ void __init plat_mem_setup(void)
detect_memory_region(0, ATH79_MEM_SIZE_MIN, ATH79_MEM_SIZE_MAX);
- _machine_restart = ath79_restart;
_machine_halt = ath79_halt;
pm_power_off = ath79_halt;
}
diff --git a/arch/mips/bcm47xx/Platform b/arch/mips/bcm47xx/Platform
index 833b204fe5da..fe6daba3f948 100644
--- a/arch/mips/bcm47xx/Platform
+++ b/arch/mips/bcm47xx/Platform
@@ -4,4 +4,3 @@
cflags-$(CONFIG_BCM47XX) += \
-I$(srctree)/arch/mips/include/asm/mach-bcm47xx
load-$(CONFIG_BCM47XX) := 0xffffffff80001000
-zload-$(CONFIG_BCM47XX) += 0xffffffff80400000
diff --git a/arch/mips/bcm47xx/board.c b/arch/mips/bcm47xx/board.c
index 35266a70e22a..87dc76a1f941 100644
--- a/arch/mips/bcm47xx/board.c
+++ b/arch/mips/bcm47xx/board.c
@@ -141,6 +141,7 @@ struct bcm47xx_board_type_list2 bcm47xx_board_list_boot_hw[] __initconst = {
{{BCM47XX_BOARD_LINKSYS_WRT300NV11, "Linksys WRT300N V1.1"}, "WRT300N", "1.1"},
{{BCM47XX_BOARD_LINKSYS_WRT310NV1, "Linksys WRT310N V1"}, "WRT310N", "1.0"},
{{BCM47XX_BOARD_LINKSYS_WRT310NV2, "Linksys WRT310N V2"}, "WRT310N", "2.0"},
+ {{BCM47XX_BOARD_LINKSYS_WRT320N_V1, "Linksys WRT320N V1"}, "WRT320N", "1.0"},
{{BCM47XX_BOARD_LINKSYS_WRT54G3GV2, "Linksys WRT54G3GV2-VF"}, "WRT54G3GV2-VF", "1.0"},
{{BCM47XX_BOARD_LINKSYS_WRT610NV1, "Linksys WRT610N V1"}, "WRT610N", "1.0"},
{{BCM47XX_BOARD_LINKSYS_WRT610NV2, "Linksys WRT610N V2"}, "WRT610N", "2.0"},
@@ -161,9 +162,12 @@ struct bcm47xx_board_type_list1 bcm47xx_board_list_board_id[] __initconst = {
{{BCM47XX_BOARD_LUXUL_XWR_600_V1, "Luxul XWR-600 V1"}, "luxul_xwr600_v1"},
{{BCM47XX_BOARD_LUXUL_XWR_1750_V1, "Luxul XWR-1750 V1"}, "luxul_xwr1750_v1"},
{{BCM47XX_BOARD_NETGEAR_R6200_V1, "Netgear R6200 V1"}, "U12H192T00_NETGEAR"},
+ {{BCM47XX_BOARD_NETGEAR_R6300_V1, "Netgear R6300 V1"}, "U12H218T00_NETGEAR"},
{{BCM47XX_BOARD_NETGEAR_WGR614V8, "Netgear WGR614 V8"}, "U12H072T00_NETGEAR"},
{{BCM47XX_BOARD_NETGEAR_WGR614V9, "Netgear WGR614 V9"}, "U12H094T00_NETGEAR"},
{{BCM47XX_BOARD_NETGEAR_WGR614_V10, "Netgear WGR614 V10"}, "U12H139T01_NETGEAR"},
+ {{BCM47XX_BOARD_NETGEAR_WN2500RP_V1, "Netgear WN2500RP V1"}, "U12H197T00_NETGEAR"},
+ {{BCM47XX_BOARD_NETGEAR_WN2500RP_V2, "Netgear WN2500RP V2"}, "U12H294T00_NETGEAR"},
{{BCM47XX_BOARD_NETGEAR_WNDR3300, "Netgear WNDR3300"}, "U12H093T00_NETGEAR"},
{{BCM47XX_BOARD_NETGEAR_WNDR3400V1, "Netgear WNDR3400 V1"}, "U12H155T00_NETGEAR"},
{{BCM47XX_BOARD_NETGEAR_WNDR3400V2, "Netgear WNDR3400 V2"}, "U12H187T00_NETGEAR"},
@@ -345,7 +349,7 @@ void __init bcm47xx_board_detect(void)
board_detected = bcm47xx_board_get_nvram();
bcm47xx_board.board = board_detected->board;
- strlcpy(bcm47xx_board.name, board_detected->name,
+ strscpy(bcm47xx_board.name, board_detected->name,
BCM47XX_BOARD_MAX_NAME);
}
diff --git a/arch/mips/bcm47xx/buttons.c b/arch/mips/bcm47xx/buttons.c
index 535d84addcdb..36f0b1aafaa2 100644
--- a/arch/mips/bcm47xx/buttons.c
+++ b/arch/mips/bcm47xx/buttons.c
@@ -27,6 +27,12 @@
/* Asus */
static const struct gpio_keys_button
+bcm47xx_buttons_asus_rtn10u[] __initconst = {
+ BCM47XX_GPIO_KEY(20, KEY_WPS_BUTTON),
+ BCM47XX_GPIO_KEY(21, KEY_RESTART),
+};
+
+static const struct gpio_keys_button
bcm47xx_buttons_asus_rtn12[] __initconst = {
BCM47XX_GPIO_KEY(0, KEY_WPS_BUTTON),
BCM47XX_GPIO_KEY(1, KEY_RESTART),
@@ -277,6 +283,18 @@ bcm47xx_buttons_linksys_wrt310nv1[] __initconst = {
};
static const struct gpio_keys_button
+bcm47xx_buttons_linksys_wrt310n_v2[] __initconst = {
+ BCM47XX_GPIO_KEY(5, KEY_WPS_BUTTON),
+ BCM47XX_GPIO_KEY(6, KEY_RESTART),
+};
+
+static const struct gpio_keys_button
+bcm47xx_buttons_linksys_wrt320n_v1[] __initconst = {
+ BCM47XX_GPIO_KEY(5, KEY_WPS_BUTTON),
+ BCM47XX_GPIO_KEY(8, KEY_RESTART),
+};
+
+static const struct gpio_keys_button
bcm47xx_buttons_linksys_wrt54g3gv2[] __initconst = {
BCM47XX_GPIO_KEY(5, KEY_WIMAX),
BCM47XX_GPIO_KEY(6, KEY_RESTART),
@@ -392,6 +410,17 @@ bcm47xx_buttons_netgear_r6200_v1[] __initconst = {
};
static const struct gpio_keys_button
+bcm47xx_buttons_netgear_r6300_v1[] __initconst = {
+ BCM47XX_GPIO_KEY(6, KEY_RESTART),
+};
+
+static const struct gpio_keys_button
+bcm47xx_buttons_netgear_wn2500rp_v1[] __initconst = {
+ BCM47XX_GPIO_KEY(12, KEY_RESTART),
+ BCM47XX_GPIO_KEY(31, KEY_WPS_BUTTON),
+};
+
+static const struct gpio_keys_button
bcm47xx_buttons_netgear_wndr3400v1[] __initconst = {
BCM47XX_GPIO_KEY(4, KEY_RESTART),
BCM47XX_GPIO_KEY(6, KEY_WPS_BUTTON),
@@ -478,6 +507,9 @@ int __init bcm47xx_buttons_register(void)
int err;
switch (board) {
+ case BCM47XX_BOARD_ASUS_RTN10U:
+ err = bcm47xx_copy_bdata(bcm47xx_buttons_asus_rtn10u);
+ break;
case BCM47XX_BOARD_ASUS_RTN12:
err = bcm47xx_copy_bdata(bcm47xx_buttons_asus_rtn12);
break;
@@ -608,6 +640,12 @@ int __init bcm47xx_buttons_register(void)
case BCM47XX_BOARD_LINKSYS_WRT310NV1:
err = bcm47xx_copy_bdata(bcm47xx_buttons_linksys_wrt310nv1);
break;
+ case BCM47XX_BOARD_LINKSYS_WRT310NV2:
+ err = bcm47xx_copy_bdata(bcm47xx_buttons_linksys_wrt310n_v2);
+ break;
+ case BCM47XX_BOARD_LINKSYS_WRT320N_V1:
+ err = bcm47xx_copy_bdata(bcm47xx_buttons_linksys_wrt320n_v1);
+ break;
case BCM47XX_BOARD_LINKSYS_WRT54G3GV2:
err = bcm47xx_copy_bdata(bcm47xx_buttons_linksys_wrt54g3gv2);
break;
@@ -674,6 +712,12 @@ int __init bcm47xx_buttons_register(void)
case BCM47XX_BOARD_NETGEAR_R6200_V1:
err = bcm47xx_copy_bdata(bcm47xx_buttons_netgear_r6200_v1);
break;
+ case BCM47XX_BOARD_NETGEAR_R6300_V1:
+ err = bcm47xx_copy_bdata(bcm47xx_buttons_netgear_r6300_v1);
+ break;
+ case BCM47XX_BOARD_NETGEAR_WN2500RP_V1:
+ err = bcm47xx_copy_bdata(bcm47xx_buttons_netgear_wn2500rp_v1);
+ break;
case BCM47XX_BOARD_NETGEAR_WNDR3400V1:
err = bcm47xx_copy_bdata(bcm47xx_buttons_netgear_wndr3400v1);
break;
diff --git a/arch/mips/bcm47xx/leds.c b/arch/mips/bcm47xx/leds.c
index 167c42c71e79..4648a302a3c0 100644
--- a/arch/mips/bcm47xx/leds.c
+++ b/arch/mips/bcm47xx/leds.c
@@ -30,6 +30,14 @@
/* Asus */
static const struct gpio_led
+bcm47xx_leds_asus_rtn10u[] __initconst = {
+ BCM47XX_GPIO_LED(5, "green", "wlan", 0, LEDS_GPIO_DEFSTATE_OFF),
+ BCM47XX_GPIO_LED(6, "green", "power", 1, LEDS_GPIO_DEFSTATE_ON),
+ BCM47XX_GPIO_LED(7, "green", "wps", 0, LEDS_GPIO_DEFSTATE_OFF),
+ BCM47XX_GPIO_LED(8, "green", "usb", 0, LEDS_GPIO_DEFSTATE_OFF),
+};
+
+static const struct gpio_led
bcm47xx_leds_asus_rtn12[] __initconst = {
BCM47XX_GPIO_LED(2, "unk", "power", 1, LEDS_GPIO_DEFSTATE_ON),
BCM47XX_GPIO_LED(7, "unk", "wlan", 0, LEDS_GPIO_DEFSTATE_OFF),
@@ -314,6 +322,13 @@ bcm47xx_leds_linksys_wrt310nv1[] __initconst = {
};
static const struct gpio_led
+bcm47xx_leds_linksys_wrt320n_v1[] __initconst = {
+ BCM47XX_GPIO_LED(1, "blue", "wlan", 1, LEDS_GPIO_DEFSTATE_OFF),
+ BCM47XX_GPIO_LED(2, "blue", "power", 0, LEDS_GPIO_DEFSTATE_ON),
+ BCM47XX_GPIO_LED(4, "amber", "wps", 1, LEDS_GPIO_DEFSTATE_OFF),
+};
+
+static const struct gpio_led
bcm47xx_leds_linksys_wrt54g_generic[] __initconst = {
BCM47XX_GPIO_LED(0, "unk", "dmz", 1, LEDS_GPIO_DEFSTATE_OFF),
BCM47XX_GPIO_LED(1, "unk", "power", 0, LEDS_GPIO_DEFSTATE_ON),
@@ -556,6 +571,9 @@ void __init bcm47xx_leds_register(void)
enum bcm47xx_board board = bcm47xx_board_get();
switch (board) {
+ case BCM47XX_BOARD_ASUS_RTN10U:
+ bcm47xx_set_pdata(bcm47xx_leds_asus_rtn10u);
+ break;
case BCM47XX_BOARD_ASUS_RTN12:
bcm47xx_set_pdata(bcm47xx_leds_asus_rtn12);
break;
@@ -689,6 +707,9 @@ void __init bcm47xx_leds_register(void)
case BCM47XX_BOARD_LINKSYS_WRT310NV1:
bcm47xx_set_pdata(bcm47xx_leds_linksys_wrt310nv1);
break;
+ case BCM47XX_BOARD_LINKSYS_WRT320N_V1:
+ bcm47xx_set_pdata(bcm47xx_leds_linksys_wrt320n_v1);
+ break;
case BCM47XX_BOARD_LINKSYS_WRT54G3GV2:
bcm47xx_set_pdata(bcm47xx_leds_linksys_wrt54g3gv2);
break;
diff --git a/arch/mips/bcm63xx/clk.c b/arch/mips/bcm63xx/clk.c
index 1c91064cb448..6e6756e8fa0a 100644
--- a/arch/mips/bcm63xx/clk.c
+++ b/arch/mips/bcm63xx/clk.c
@@ -387,6 +387,12 @@ struct clk *clk_get_parent(struct clk *clk)
}
EXPORT_SYMBOL(clk_get_parent);
+int clk_set_parent(struct clk *clk, struct clk *parent)
+{
+ return 0;
+}
+EXPORT_SYMBOL(clk_set_parent);
+
unsigned long clk_get_rate(struct clk *clk)
{
if (!clk)
diff --git a/arch/mips/bcm63xx/dev-wdt.c b/arch/mips/bcm63xx/dev-wdt.c
index 2a2346a99bcb..42130914a3c2 100644
--- a/arch/mips/bcm63xx/dev-wdt.c
+++ b/arch/mips/bcm63xx/dev-wdt.c
@@ -9,6 +9,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
+#include <linux/platform_data/bcm7038_wdt.h>
#include <bcm63xx_cpu.h>
static struct resource wdt_resources[] = {
@@ -19,11 +20,18 @@ static struct resource wdt_resources[] = {
},
};
+static struct bcm7038_wdt_platform_data bcm63xx_wdt_pdata = {
+ .clk_name = "periph",
+};
+
static struct platform_device bcm63xx_wdt_device = {
.name = "bcm63xx-wdt",
.id = -1,
.num_resources = ARRAY_SIZE(wdt_resources),
.resource = wdt_resources,
+ .dev = {
+ .platform_data = &bcm63xx_wdt_pdata,
+ },
};
int __init bcm63xx_wdt_register(void)
diff --git a/arch/mips/bmips/dma.c b/arch/mips/bmips/dma.c
index 915ce4b189c1..c535f9cb75ec 100644
--- a/arch/mips/bmips/dma.c
+++ b/arch/mips/bmips/dma.c
@@ -1,68 +1,8 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2014 Kevin Cernekee <cernekee@gmail.com>
- */
+// SPDX-License-Identifier: GPL-2.0+
-#define pr_fmt(fmt) "bmips-dma: " fmt
-
-#include <linux/device.h>
-#include <linux/dma-direction.h>
-#include <linux/dma-direct.h>
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/of.h>
-#include <linux/printk.h>
-#include <linux/slab.h>
#include <linux/types.h>
#include <asm/bmips.h>
-
-/*
- * BCM338x has configurable address translation windows which allow the
- * peripherals' DMA addresses to be different from the Zephyr-visible
- * physical addresses. e.g. usb_dma_addr = zephyr_pa ^ 0x08000000
- *
- * If the "brcm,ubus" node has a "dma-ranges" property we will enable this
- * translation globally using the provided information. This implements a
- * very limited subset of "dma-ranges" support and it will probably be
- * replaced by a more generic version later.
- */
-
-struct bmips_dma_range {
- u32 child_addr;
- u32 parent_addr;
- u32 size;
-};
-
-static struct bmips_dma_range *bmips_dma_ranges;
-
-#define FLUSH_RAC 0x100
-
-dma_addr_t phys_to_dma(struct device *dev, phys_addr_t pa)
-{
- struct bmips_dma_range *r;
-
- for (r = bmips_dma_ranges; r && r->size; r++) {
- if (pa >= r->child_addr &&
- pa < (r->child_addr + r->size))
- return pa - r->child_addr + r->parent_addr;
- }
- return pa;
-}
-
-phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dma_addr)
-{
- struct bmips_dma_range *r;
-
- for (r = bmips_dma_ranges; r && r->size; r++) {
- if (dma_addr >= r->parent_addr &&
- dma_addr < (r->parent_addr + r->size))
- return dma_addr - r->parent_addr + r->child_addr;
- }
- return dma_addr;
-}
+#include <asm/io.h>
void arch_sync_dma_for_cpu_all(void)
{
@@ -79,45 +19,3 @@ void arch_sync_dma_for_cpu_all(void)
__raw_writel(cfg | 0x100, cbr + BMIPS_RAC_CONFIG);
__raw_readl(cbr + BMIPS_RAC_CONFIG);
}
-
-static int __init bmips_init_dma_ranges(void)
-{
- struct device_node *np =
- of_find_compatible_node(NULL, NULL, "brcm,ubus");
- const __be32 *data;
- struct bmips_dma_range *r;
- int len;
-
- if (!np)
- return 0;
-
- data = of_get_property(np, "dma-ranges", &len);
- if (!data)
- goto out_good;
-
- len /= sizeof(*data) * 3;
- if (!len)
- goto out_bad;
-
- /* add a dummy (zero) entry at the end as a sentinel */
- bmips_dma_ranges = kcalloc(len + 1, sizeof(struct bmips_dma_range),
- GFP_KERNEL);
- if (!bmips_dma_ranges)
- goto out_bad;
-
- for (r = bmips_dma_ranges; len; len--, r++) {
- r->child_addr = be32_to_cpup(data++);
- r->parent_addr = be32_to_cpup(data++);
- r->size = be32_to_cpup(data++);
- }
-
-out_good:
- of_node_put(np);
- return 0;
-
-out_bad:
- pr_err("error parsing dma-ranges property\n");
- of_node_put(np);
- return -EINVAL;
-}
-arch_initcall(bmips_init_dma_ranges);
diff --git a/arch/mips/boot/compressed/Makefile b/arch/mips/boot/compressed/Makefile
index f27cf31b4140..5a15d51e8884 100644
--- a/arch/mips/boot/compressed/Makefile
+++ b/arch/mips/boot/compressed/Makefile
@@ -52,7 +52,7 @@ endif
vmlinuzobjs-$(CONFIG_KERNEL_XZ) += $(obj)/ashldi3.o
-vmlinuzobjs-$(CONFIG_KERNEL_ZSTD) += $(obj)/bswapdi.o $(obj)/ashldi3.o
+vmlinuzobjs-$(CONFIG_KERNEL_ZSTD) += $(obj)/bswapdi.o $(obj)/ashldi3.o $(obj)/clz_ctz.o
targets := $(notdir $(vmlinuzobjs-y))
@@ -64,12 +64,12 @@ $(obj)/vmlinux.bin: $(KBUILD_IMAGE) FORCE
$(call if_changed,objcopy)
tool_$(CONFIG_KERNEL_GZIP) = gzip
-tool_$(CONFIG_KERNEL_BZIP2) = bzip2
-tool_$(CONFIG_KERNEL_LZ4) = lz4
-tool_$(CONFIG_KERNEL_LZMA) = lzma
-tool_$(CONFIG_KERNEL_LZO) = lzo
-tool_$(CONFIG_KERNEL_XZ) = xzkern
-tool_$(CONFIG_KERNEL_ZSTD) = zstd22
+tool_$(CONFIG_KERNEL_BZIP2) = bzip2_with_size
+tool_$(CONFIG_KERNEL_LZ4) = lz4_with_size
+tool_$(CONFIG_KERNEL_LZMA) = lzma_with_size
+tool_$(CONFIG_KERNEL_LZO) = lzo_with_size
+tool_$(CONFIG_KERNEL_XZ) = xzkern_with_size
+tool_$(CONFIG_KERNEL_ZSTD) = zstd22_with_size
targets += vmlinux.bin.z
@@ -89,6 +89,10 @@ HOSTCFLAGS_calc_vmlinuz_load_addr.o += $(LINUXINCLUDE)
# Calculate the load address of the compressed kernel image
hostprogs := calc_vmlinuz_load_addr
+ifneq (0x0,$(CONFIG_ZBOOT_LOAD_ADDRESS))
+zload-y = $(CONFIG_ZBOOT_LOAD_ADDRESS)
+endif
+
ifneq ($(zload-y),)
VMLINUZ_LOAD_ADDRESS := $(zload-y)
else
diff --git a/arch/mips/boot/compressed/clz_ctz.c b/arch/mips/boot/compressed/clz_ctz.c
new file mode 100644
index 000000000000..b4a1b6eb2f8a
--- /dev/null
+++ b/arch/mips/boot/compressed/clz_ctz.c
@@ -0,0 +1,2 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include "../../../../lib/clz_ctz.c"
diff --git a/arch/mips/boot/dts/brcm/bcm7425.dtsi b/arch/mips/boot/dts/brcm/bcm7425.dtsi
index aa0b2d39c902..62588c53d356 100644
--- a/arch/mips/boot/dts/brcm/bcm7425.dtsi
+++ b/arch/mips/boot/dts/brcm/bcm7425.dtsi
@@ -584,4 +584,34 @@
};
};
};
+
+ pcie_0: pcie@8b20000 {
+ status = "disabled";
+ compatible = "brcm,bcm7425-pcie";
+
+ ranges = <0x02000000 0x0 0xd0000000 0xd0000000 0x0 0x08000000
+ 0x02000000 0x0 0xd8000000 0xd8000000 0x0 0x08000000
+ 0x02000000 0x0 0xe0000000 0xe0000000 0x0 0x08000000
+ 0x02000000 0x0 0xe8000000 0xe8000000 0x0 0x08000000>;
+
+ reg = <0x10410000 0x19310>;
+ aspm-no-l0s;
+ device_type = "pci";
+ msi-controller;
+ msi-parent = <&pcie_0>;
+ #address-cells = <0x3>;
+ #size-cells = <0x2>;
+ bus-range = <0x0 0xff>;
+ interrupt-map-mask = <0x0 0x0 0x0 0x7>;
+ linux,pci-domain = <0x0>;
+
+ interrupt-parent = <&periph_intc>;
+ interrupts = <37>, <37>;
+ interrupt-names = "pcie", "msi";
+ #interrupt-cells = <0x1>;
+ interrupt-map = <0 0 0 1 &periph_intc 0x21
+ 0 0 0 1 &periph_intc 0x22
+ 0 0 0 1 &periph_intc 0x23
+ 0 0 0 1 &periph_intc 0x24>;
+ };
};
diff --git a/arch/mips/boot/dts/brcm/bcm7435.dtsi b/arch/mips/boot/dts/brcm/bcm7435.dtsi
index 8398b7f68bf4..8c001b944c8b 100644
--- a/arch/mips/boot/dts/brcm/bcm7435.dtsi
+++ b/arch/mips/boot/dts/brcm/bcm7435.dtsi
@@ -599,4 +599,34 @@
};
};
};
+
+ pcie_0: pcie@8b20000 {
+ status = "disabled";
+ compatible = "brcm,bcm7435-pcie";
+
+ ranges = <0x02000000 0x0 0xd0000000 0xd0000000 0x0 0x08000000
+ 0x02000000 0x0 0xd8000000 0xd8000000 0x0 0x08000000
+ 0x02000000 0x0 0xe0000000 0xe0000000 0x0 0x08000000
+ 0x02000000 0x0 0xe8000000 0xe8000000 0x0 0x08000000>;
+
+ reg = <0x10410000 0x19310>;
+ aspm-no-l0s;
+ device_type = "pci";
+ msi-controller;
+ msi-parent = <&pcie_0>;
+ #address-cells = <0x3>;
+ #size-cells = <0x2>;
+ bus-range = <0x0 0xff>;
+ interrupt-map-mask = <0x0 0x0 0x0 0x7>;
+ linux,pci-domain = <0x0>;
+
+ interrupt-parent = <&periph_intc>;
+ interrupts = <39>, <39>;
+ interrupt-names = "pcie", "msi";
+ #interrupt-cells = <0x1>;
+ interrupt-map = <0 0 0 1 &periph_intc 0x23
+ 0 0 0 1 &periph_intc 0x24
+ 0 0 0 1 &periph_intc 0x25
+ 0 0 0 1 &periph_intc 0x26>;
+ };
};
diff --git a/arch/mips/boot/dts/brcm/bcm97425svmb.dts b/arch/mips/boot/dts/brcm/bcm97425svmb.dts
index 9efecfe1e05c..f38934934349 100644
--- a/arch/mips/boot/dts/brcm/bcm97425svmb.dts
+++ b/arch/mips/boot/dts/brcm/bcm97425svmb.dts
@@ -152,3 +152,12 @@
&waketimer {
status = "okay";
};
+
+&pcie_0 {
+ status = "okay";
+ /* 1GB Memc0, 1GB Memc1 */
+ brcm,scb-sizes = <0 0x40000000 0 0x40000000>;
+ dma-ranges = <0x43000000 0x00000000 0x00000000 0x00000000 0x0 0x10000000
+ 0x43000000 0x00000000 0x10000000 0x20000000 0x0 0x30000000
+ 0x43000000 0x00000000 0x40000000 0x90000000 0x0 0x40000000>;
+};
diff --git a/arch/mips/boot/dts/brcm/bcm97435svmb.dts b/arch/mips/boot/dts/brcm/bcm97435svmb.dts
index b653c6ff74b5..a0cf53e23c07 100644
--- a/arch/mips/boot/dts/brcm/bcm97435svmb.dts
+++ b/arch/mips/boot/dts/brcm/bcm97435svmb.dts
@@ -128,3 +128,12 @@
&waketimer {
status = "okay";
};
+
+&pcie_0 {
+ status = "okay";
+ /* 1GB Memc0, 1GB Memc1 */
+ brcm,scb-sizes = <0 0x40000000 0 0x40000000>;
+ dma-ranges = <0x43000000 0x00000000 0x00000000 0x00000000 0x0 0x10000000
+ 0x43000000 0x00000000 0x10000000 0x20000000 0x0 0x30000000
+ 0x43000000 0x00000000 0x40000000 0x90000000 0x0 0x40000000>;
+};
diff --git a/arch/mips/boot/dts/ingenic/ci20.dts b/arch/mips/boot/dts/ingenic/ci20.dts
index b249a4f0f6b6..ab6e3dc0bc1d 100644
--- a/arch/mips/boot/dts/ingenic/ci20.dts
+++ b/arch/mips/boot/dts/ingenic/ci20.dts
@@ -78,6 +78,20 @@
enable-active-high;
};
+ hdmi_out: connector {
+ compatible = "hdmi-connector";
+ label = "HDMI OUT";
+ type = "a";
+
+ ddc-en-gpios = <&gpa 25 GPIO_ACTIVE_HIGH>;
+
+ port {
+ hdmi_con: endpoint {
+ remote-endpoint = <&dw_hdmi_out>;
+ };
+ };
+ };
+
ir: ir {
compatible = "gpio-ir-receiver";
gpios = <&gpe 3 GPIO_ACTIVE_LOW>;
@@ -114,11 +128,12 @@
* precision.
*/
assigned-clocks = <&cgu JZ4780_CLK_OTGPHY>, <&cgu JZ4780_CLK_RTC>,
- <&cgu JZ4780_CLK_SSIPLL>, <&cgu JZ4780_CLK_SSI>;
+ <&cgu JZ4780_CLK_SSIPLL>, <&cgu JZ4780_CLK_SSI>,
+ <&cgu JZ4780_CLK_HDMI>;
assigned-clock-parents = <0>, <&cgu JZ4780_CLK_RTCLK>,
<&cgu JZ4780_CLK_MPLL>,
<&cgu JZ4780_CLK_SSIPLL>;
- assigned-clock-rates = <48000000>, <0>, <54000000>;
+ assigned-clock-rates = <48000000>, <0>, <54000000>, <0>, <27000000>;
};
&tcu {
@@ -509,6 +524,12 @@
bias-disable;
};
+ pins_hdmi_ddc: hdmi_ddc {
+ function = "hdmi-ddc";
+ groups = "hdmi-ddc";
+ bias-disable;
+ };
+
pins_nemc: nemc {
function = "nemc";
groups = "nemc-data", "nemc-cle-ale", "nemc-rd-we", "nemc-frd-fwe";
@@ -539,3 +560,39 @@
bias-disable;
};
};
+
+&hdmi {
+ status = "okay";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&pins_hdmi_ddc>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ dw_hdmi_in: endpoint {
+ remote-endpoint = <&lcd_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ dw_hdmi_out: endpoint {
+ remote-endpoint = <&hdmi_con>;
+ };
+ };
+ };
+};
+
+&lcdc0 {
+ status = "okay";
+
+ port {
+ lcd_out: endpoint {
+ remote-endpoint = <&dw_hdmi_in>;
+ };
+ };
+};
diff --git a/arch/mips/boot/dts/ingenic/jz4725b.dtsi b/arch/mips/boot/dts/ingenic/jz4725b.dtsi
index 0c6a5a4266f4..e9e48022f631 100644
--- a/arch/mips/boot/dts/ingenic/jz4725b.dtsi
+++ b/arch/mips/boot/dts/ingenic/jz4725b.dtsi
@@ -321,7 +321,7 @@
lcd: lcd-controller@13050000 {
compatible = "ingenic,jz4725b-lcd";
- reg = <0x13050000 0x1000>;
+ reg = <0x13050000 0x130>; /* tbc */
interrupt-parent = <&intc>;
interrupts = <31>;
diff --git a/arch/mips/boot/dts/ingenic/jz4740.dtsi b/arch/mips/boot/dts/ingenic/jz4740.dtsi
index 772542e1f266..7f76cba03a08 100644
--- a/arch/mips/boot/dts/ingenic/jz4740.dtsi
+++ b/arch/mips/boot/dts/ingenic/jz4740.dtsi
@@ -323,7 +323,7 @@
lcd: lcd-controller@13050000 {
compatible = "ingenic,jz4740-lcd";
- reg = <0x13050000 0x1000>;
+ reg = <0x13050000 0x60>; /* LCDCMD1+4 */
interrupt-parent = <&intc>;
interrupts = <30>;
diff --git a/arch/mips/boot/dts/ingenic/jz4770.dtsi b/arch/mips/boot/dts/ingenic/jz4770.dtsi
index dfe74328ae5d..bda0a3a86ed5 100644
--- a/arch/mips/boot/dts/ingenic/jz4770.dtsi
+++ b/arch/mips/boot/dts/ingenic/jz4770.dtsi
@@ -399,7 +399,7 @@
lcd: lcd-controller@13050000 {
compatible = "ingenic,jz4770-lcd";
- reg = <0x13050000 0x300>;
+ reg = <0x13050000 0x130>; /* tbc */
interrupt-parent = <&intc>;
interrupts = <31>;
diff --git a/arch/mips/boot/dts/ingenic/jz4780.dtsi b/arch/mips/boot/dts/ingenic/jz4780.dtsi
index b0a4e2e019c3..3f9ea47a10cd 100644
--- a/arch/mips/boot/dts/ingenic/jz4780.dtsi
+++ b/arch/mips/boot/dts/ingenic/jz4780.dtsi
@@ -444,6 +444,46 @@
status = "disabled";
};
+ hdmi: hdmi@10180000 {
+ compatible = "ingenic,jz4780-dw-hdmi";
+ reg = <0x10180000 0x8000>;
+ reg-io-width = <4>;
+
+ clocks = <&cgu JZ4780_CLK_AHB0>, <&cgu JZ4780_CLK_HDMI>;
+ clock-names = "iahb", "isfr";
+
+ interrupt-parent = <&intc>;
+ interrupts = <3>;
+
+ status = "disabled";
+ };
+
+ lcdc0: lcdc0@13050000 {
+ compatible = "ingenic,jz4780-lcd";
+ reg = <0x13050000 0x1800>;
+
+ clocks = <&cgu JZ4780_CLK_TVE>, <&cgu JZ4780_CLK_LCD0PIXCLK>;
+ clock-names = "lcd", "lcd_pclk";
+
+ interrupt-parent = <&intc>;
+ interrupts = <31>;
+
+ status = "disabled";
+ };
+
+ lcdc1: lcdc1@130a0000 {
+ compatible = "ingenic,jz4780-lcd";
+ reg = <0x130a0000 0x1800>;
+
+ clocks = <&cgu JZ4780_CLK_TVE>, <&cgu JZ4780_CLK_LCD1PIXCLK>;
+ clock-names = "lcd", "lcd_pclk";
+
+ interrupt-parent = <&intc>;
+ interrupts = <23>;
+
+ status = "disabled";
+ };
+
nemc: nemc@13410000 {
compatible = "ingenic,jz4780-nemc", "simple-mfd";
reg = <0x13410000 0x10000>;
diff --git a/arch/mips/boot/dts/loongson/loongson64-2k1000.dtsi b/arch/mips/boot/dts/loongson/loongson64-2k1000.dtsi
index bfc3d3243ee7..8143a61111e3 100644
--- a/arch/mips/boot/dts/loongson/loongson64-2k1000.dtsi
+++ b/arch/mips/boot/dts/loongson/loongson64-2k1000.dtsi
@@ -52,6 +52,11 @@
0 0x40000000 0 0x40000000 0 0x40000000
0xfe 0x00000000 0xfe 0x00000000 0 0x40000000>;
+ pm: reset-controller@1fe07000 {
+ compatible = "loongson,ls2k-pm";
+ reg = <0 0x1fe07000 0 0x422>;
+ };
+
liointc0: interrupt-controller@1fe11400 {
compatible = "loongson,liointc-2.0";
reg = <0 0x1fe11400 0 0x40>,
diff --git a/arch/mips/cavium-octeon/octeon-memcpy.S b/arch/mips/cavium-octeon/octeon-memcpy.S
index 0a515cde1c18..25860fba6218 100644
--- a/arch/mips/cavium-octeon/octeon-memcpy.S
+++ b/arch/mips/cavium-octeon/octeon-memcpy.S
@@ -74,7 +74,7 @@
#define EXC(inst_reg,addr,handler) \
9: inst_reg, addr; \
.section __ex_table,"a"; \
- PTR 9b, handler; \
+ PTR_WD 9b, handler; \
.previous
/*
diff --git a/arch/mips/cavium-octeon/octeon-platform.c b/arch/mips/cavium-octeon/octeon-platform.c
index d56e9b9d2e43..a994022e32c9 100644
--- a/arch/mips/cavium-octeon/octeon-platform.c
+++ b/arch/mips/cavium-octeon/octeon-platform.c
@@ -328,6 +328,7 @@ static int __init octeon_ehci_device_init(void)
pd->dev.platform_data = &octeon_ehci_pdata;
octeon_ehci_hw_start(&pd->dev);
+ put_device(&pd->dev);
return ret;
}
@@ -391,6 +392,7 @@ static int __init octeon_ohci_device_init(void)
pd->dev.platform_data = &octeon_ohci_pdata;
octeon_ohci_hw_start(&pd->dev);
+ put_device(&pd->dev);
return ret;
}
diff --git a/arch/mips/cavium-octeon/octeon-usb.c b/arch/mips/cavium-octeon/octeon-usb.c
index 6e4d3619137a..4df919d26b08 100644
--- a/arch/mips/cavium-octeon/octeon-usb.c
+++ b/arch/mips/cavium-octeon/octeon-usb.c
@@ -537,6 +537,7 @@ static int __init dwc3_octeon_device_init(void)
devm_iounmap(&pdev->dev, base);
devm_release_mem_region(&pdev->dev, res->start,
resource_size(res));
+ put_device(&pdev->dev);
}
} while (node != NULL);
diff --git a/arch/mips/configs/ci20_defconfig b/arch/mips/configs/ci20_defconfig
index ab7ebb066834..cc69b215854e 100644
--- a/arch/mips/configs/ci20_defconfig
+++ b/arch/mips/configs/ci20_defconfig
@@ -98,7 +98,13 @@ CONFIG_RC_DEVICES=y
CONFIG_IR_GPIO_CIR=m
CONFIG_IR_GPIO_TX=m
CONFIG_MEDIA_SUPPORT=m
+CONFIG_DRM=m
+CONFIG_DRM_INGENIC=m
+CONFIG_DRM_INGENIC_DW_HDMI=m
+CONFIG_DRM_DISPLAY_CONNECTOR=m
# CONFIG_VGA_CONSOLE is not set
+CONFIG_FB=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
# CONFIG_HID is not set
CONFIG_USB=y
CONFIG_USB_STORAGE=y
diff --git a/arch/mips/configs/rbtx49xx_defconfig b/arch/mips/configs/rbtx49xx_defconfig
index 69f2300107f9..f8212a813be7 100644
--- a/arch/mips/configs/rbtx49xx_defconfig
+++ b/arch/mips/configs/rbtx49xx_defconfig
@@ -10,9 +10,6 @@ CONFIG_EXPERT=y
CONFIG_SLAB=y
CONFIG_MACH_TX49XX=y
CONFIG_TOSHIBA_RBTX4927=y
-CONFIG_TOSHIBA_RBTX4938=y
-CONFIG_TOSHIBA_RBTX4939=y
-CONFIG_TOSHIBA_RBTX4938_MPLEX_KEEP=y
# CONFIG_SECCOMP is not set
CONFIG_PCI=y
CONFIG_MODULES=y
@@ -38,7 +35,6 @@ CONFIG_MTD_JEDECPROBE=y
CONFIG_MTD_CFI_AMDSTD=y
CONFIG_MTD_COMPLEX_MAPPINGS=y
CONFIG_MTD_PHYSMAP=y
-CONFIG_MTD_RBTX4939=y
CONFIG_MTD_RAW_NAND=m
CONFIG_MTD_NAND_TXX9NDFMC=m
CONFIG_BLK_DEV_LOOP=y
diff --git a/arch/mips/dec/prom/init.c b/arch/mips/dec/prom/init.c
index cc988bbd27fc..cb12eb211a49 100644
--- a/arch/mips/dec/prom/init.c
+++ b/arch/mips/dec/prom/init.c
@@ -113,7 +113,7 @@ void __init prom_init(void)
if ((current_cpu_type() == CPU_R4000SC) ||
(current_cpu_type() == CPU_R4400SC)) {
static const char r4k_msg[] __initconst =
- "Please recompile with \"CONFIG_CPU_R4x00 = y\".\n";
+ "Please recompile with \"CONFIG_CPU_R4X00 = y\".\n";
printk(cpu_msg);
printk(r4k_msg);
dec_machine_halt();
diff --git a/arch/mips/generic/Platform b/arch/mips/generic/Platform
index e1abc113b409..0c03623f3897 100644
--- a/arch/mips/generic/Platform
+++ b/arch/mips/generic/Platform
@@ -13,8 +13,7 @@ cflags-$(CONFIG_MACH_INGENIC_SOC) += -I$(srctree)/arch/mips/include/asm/mach-ing
cflags-$(CONFIG_MIPS_GENERIC) += -I$(srctree)/arch/mips/include/asm/mach-generic
load-$(CONFIG_MIPS_GENERIC) += 0xffffffff80100000
-zload-$(CONFIG_MIPS_GENERIC) += 0xffffffff81000000
-all-$(CONFIG_MIPS_GENERIC) := vmlinux.gz.itb
+all-$(CONFIG_MIPS_GENERIC) += vmlinux.gz.itb
its-y := vmlinux.its.S
its-$(CONFIG_FIT_IMAGE_FDT_BOSTON) += board-boston.its.S
diff --git a/arch/mips/generic/init.c b/arch/mips/generic/init.c
index 1842cddd8356..1d712eac1617 100644
--- a/arch/mips/generic/init.c
+++ b/arch/mips/generic/init.c
@@ -110,14 +110,15 @@ void __init plat_mem_setup(void)
void __init device_tree_init(void)
{
- int err;
-
unflatten_and_copy_device_tree();
mips_cpc_probe();
- err = register_cps_smp_ops();
- if (err)
- err = register_up_smp_ops();
+ if (!register_cps_smp_ops())
+ return;
+ if (!register_vsmp_smp_ops())
+ return;
+
+ register_up_smp_ops();
}
int __init apply_mips_fdt_fixups(void *fdt_out, size_t fdt_out_size,
diff --git a/arch/mips/include/asm/asm.h b/arch/mips/include/asm/asm.h
index 2f8ce94ebaaf..336ac9b65235 100644
--- a/arch/mips/include/asm/asm.h
+++ b/arch/mips/include/asm/asm.h
@@ -19,6 +19,7 @@
#include <asm/sgidefs.h>
#include <asm/asm-eva.h>
+#include <asm/isa-rev.h>
#ifndef __VDSO__
/*
@@ -211,6 +212,8 @@ symbol = value
#define LONG_SUB sub
#define LONG_SUBU subu
#define LONG_L lw
+#define LONG_LL ll
+#define LONG_SC sc
#define LONG_S sw
#define LONG_SP swp
#define LONG_SLL sll
@@ -219,6 +222,8 @@ symbol = value
#define LONG_SRLV srlv
#define LONG_SRA sra
#define LONG_SRAV srav
+#define LONG_INS ins
+#define LONG_EXT ext
#ifdef __ASSEMBLY__
#define LONG .word
@@ -236,6 +241,8 @@ symbol = value
#define LONG_SUB dsub
#define LONG_SUBU dsubu
#define LONG_L ld
+#define LONG_LL lld
+#define LONG_SC scd
#define LONG_S sd
#define LONG_SP sdp
#define LONG_SLL dsll
@@ -244,6 +251,8 @@ symbol = value
#define LONG_SRLV dsrlv
#define LONG_SRA dsra
#define LONG_SRAV dsrav
+#define LONG_INS dins
+#define LONG_EXT dext
#ifdef __ASSEMBLY__
#define LONG .dword
@@ -276,7 +285,7 @@ symbol = value
#define PTR_SCALESHIFT 2
-#define PTR .word
+#define PTR_WD .word
#define PTRSIZE 4
#define PTRLOG 2
#endif
@@ -301,7 +310,7 @@ symbol = value
#define PTR_SCALESHIFT 3
-#define PTR .dword
+#define PTR_WD .dword
#define PTRSIZE 8
#define PTRLOG 3
#endif
@@ -320,6 +329,19 @@ symbol = value
#define SSNOP sll zero, zero, 1
+/*
+ * Using a branch-likely instruction to check the result of an sc instruction
+ * works around a bug present in R10000 CPUs prior to revision 3.0 that could
+ * cause ll-sc sequences to execute non-atomically.
+ */
+#ifdef CONFIG_WAR_R10000_LLSC
+# define SC_BEQZ beqzl
+#elif MIPS_ISA_REV >= 6
+# define SC_BEQZ beqzc
+#else
+# define SC_BEQZ beqz
+#endif
+
#ifdef CONFIG_SGI_IP28
/* Inhibit speculative stores to volatile (e.g.DMA) or invalid addresses. */
#include <asm/cacheops.h>
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
index a0b9e7c1e4fc..712fb5a6a568 100644
--- a/arch/mips/include/asm/atomic.h
+++ b/arch/mips/include/asm/atomic.h
@@ -16,13 +16,12 @@
#include <linux/irqflags.h>
#include <linux/types.h>
+#include <asm/asm.h>
#include <asm/barrier.h>
#include <asm/compiler.h>
#include <asm/cpu-features.h>
#include <asm/cmpxchg.h>
-#include <asm/llsc.h>
#include <asm/sync.h>
-#include <asm/war.h>
#define ATOMIC_OPS(pfx, type) \
static __always_inline type arch_##pfx##_read(const pfx##_t *v) \
@@ -74,7 +73,7 @@ static __inline__ void arch_##pfx##_##op(type i, pfx##_t * v) \
"1: " #ll " %0, %1 # " #pfx "_" #op " \n" \
" " #asm_op " %0, %2 \n" \
" " #sc " %0, %1 \n" \
- "\t" __SC_BEQZ "%0, 1b \n" \
+ "\t" __stringify(SC_BEQZ) " %0, 1b \n" \
" .set pop \n" \
: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
: "Ir" (i) : __LLSC_CLOBBER); \
@@ -104,7 +103,7 @@ arch_##pfx##_##op##_return_relaxed(type i, pfx##_t * v) \
"1: " #ll " %1, %2 # " #pfx "_" #op "_return\n" \
" " #asm_op " %0, %1, %3 \n" \
" " #sc " %0, %2 \n" \
- "\t" __SC_BEQZ "%0, 1b \n" \
+ "\t" __stringify(SC_BEQZ) " %0, 1b \n" \
" " #asm_op " %0, %1, %3 \n" \
" .set pop \n" \
: "=&r" (result), "=&r" (temp), \
@@ -137,7 +136,7 @@ arch_##pfx##_fetch_##op##_relaxed(type i, pfx##_t * v) \
"1: " #ll " %1, %2 # " #pfx "_fetch_" #op "\n" \
" " #asm_op " %0, %1, %3 \n" \
" " #sc " %0, %2 \n" \
- "\t" __SC_BEQZ "%0, 1b \n" \
+ "\t" __stringify(SC_BEQZ) " %0, 1b \n" \
" .set pop \n" \
" move %0, %1 \n" \
: "=&r" (result), "=&r" (temp), \
@@ -237,7 +236,7 @@ static __inline__ type arch_##pfx##_sub_if_positive(type i, pfx##_t * v) \
" .set push \n" \
" .set " MIPS_ISA_LEVEL " \n" \
" " #sc " %1, %2 \n" \
- " " __SC_BEQZ "%1, 1b \n" \
+ " " __stringify(SC_BEQZ) " %1, 1b \n" \
"2: " __SYNC(full, loongson3_war) " \n" \
" .set pop \n" \
: "=&r" (result), "=&r" (temp), \
diff --git a/arch/mips/include/asm/bitops.h b/arch/mips/include/asm/bitops.h
index dc2a6234dd3c..b4bf754f7db3 100644
--- a/arch/mips/include/asm/bitops.h
+++ b/arch/mips/include/asm/bitops.h
@@ -16,14 +16,12 @@
#include <linux/bits.h>
#include <linux/compiler.h>
#include <linux/types.h>
+#include <asm/asm.h>
#include <asm/barrier.h>
#include <asm/byteorder.h> /* sigh ... */
#include <asm/compiler.h>
#include <asm/cpu-features.h>
-#include <asm/isa-rev.h>
-#include <asm/llsc.h>
#include <asm/sgidefs.h>
-#include <asm/war.h>
#define __bit_op(mem, insn, inputs...) do { \
unsigned long __temp; \
@@ -32,10 +30,10 @@
" .set push \n" \
" .set " MIPS_ISA_LEVEL " \n" \
" " __SYNC(full, loongson3_war) " \n" \
- "1: " __LL "%0, %1 \n" \
+ "1: " __stringify(LONG_LL) " %0, %1 \n" \
" " insn " \n" \
- " " __SC "%0, %1 \n" \
- " " __SC_BEQZ "%0, 1b \n" \
+ " " __stringify(LONG_SC) " %0, %1 \n" \
+ " " __stringify(SC_BEQZ) " %0, 1b \n" \
" .set pop \n" \
: "=&r"(__temp), "+" GCC_OFF_SMALL_ASM()(mem) \
: inputs \
@@ -49,10 +47,10 @@
" .set push \n" \
" .set " MIPS_ISA_LEVEL " \n" \
" " __SYNC(full, loongson3_war) " \n" \
- "1: " __LL ll_dst ", %2 \n" \
+ "1: " __stringify(LONG_LL) " " ll_dst ", %2\n" \
" " insn " \n" \
- " " __SC "%1, %2 \n" \
- " " __SC_BEQZ "%1, 1b \n" \
+ " " __stringify(LONG_SC) " %1, %2 \n" \
+ " " __stringify(SC_BEQZ) " %1, 1b \n" \
" .set pop \n" \
: "=&r"(__orig), "=&r"(__temp), \
"+" GCC_OFF_SMALL_ASM()(mem) \
@@ -98,7 +96,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
}
if ((MIPS_ISA_REV >= 2) && __builtin_constant_p(bit) && (bit >= 16)) {
- __bit_op(*m, __INS "%0, %3, %2, 1", "i"(bit), "r"(~0));
+ __bit_op(*m, __stringify(LONG_INS) " %0, %3, %2, 1", "i"(bit), "r"(~0));
return;
}
@@ -126,7 +124,7 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
}
if ((MIPS_ISA_REV >= 2) && __builtin_constant_p(bit)) {
- __bit_op(*m, __INS "%0, $0, %2, 1", "i"(bit));
+ __bit_op(*m, __stringify(LONG_INS) " %0, $0, %2, 1", "i"(bit));
return;
}
@@ -234,8 +232,8 @@ static inline int test_and_clear_bit(unsigned long nr,
res = __mips_test_and_clear_bit(nr, addr);
} else if ((MIPS_ISA_REV >= 2) && __builtin_constant_p(nr)) {
res = __test_bit_op(*m, "%1",
- __EXT "%0, %1, %3, 1;"
- __INS "%1, $0, %3, 1",
+ __stringify(LONG_EXT) " %0, %1, %3, 1;"
+ __stringify(LONG_INS) " %1, $0, %3, 1",
"i"(bit));
} else {
orig = __test_bit_op(*m, "%0",
@@ -446,7 +444,6 @@ static inline int ffs(int word)
}
#include <asm-generic/bitops/ffz.h>
-#include <asm-generic/bitops/find.h>
#ifdef __KERNEL__
diff --git a/arch/mips/include/asm/cmpxchg.h b/arch/mips/include/asm/cmpxchg.h
index 66a8b293fd80..7ec9493b2861 100644
--- a/arch/mips/include/asm/cmpxchg.h
+++ b/arch/mips/include/asm/cmpxchg.h
@@ -10,10 +10,9 @@
#include <linux/bug.h>
#include <linux/irqflags.h>
+#include <asm/asm.h>
#include <asm/compiler.h>
-#include <asm/llsc.h>
#include <asm/sync.h>
-#include <asm/war.h>
/*
* These functions doesn't exist, so if they are called you'll either:
@@ -48,7 +47,7 @@ extern unsigned long __xchg_called_with_bad_pointer(void)
" move $1, %z3 \n" \
" .set " MIPS_ISA_ARCH_LEVEL " \n" \
" " st " $1, %1 \n" \
- "\t" __SC_BEQZ "$1, 1b \n" \
+ "\t" __stringify(SC_BEQZ) " $1, 1b \n" \
" .set pop \n" \
: "=&r" (__ret), "=" GCC_OFF_SMALL_ASM() (*m) \
: GCC_OFF_SMALL_ASM() (*m), "Jr" (val) \
@@ -127,7 +126,7 @@ unsigned long __xchg(volatile void *ptr, unsigned long x, int size)
" move $1, %z4 \n" \
" .set "MIPS_ISA_ARCH_LEVEL" \n" \
" " st " $1, %1 \n" \
- "\t" __SC_BEQZ "$1, 1b \n" \
+ "\t" __stringify(SC_BEQZ) " $1, 1b \n" \
" .set pop \n" \
"2: " __SYNC(full, loongson3_war) " \n" \
: "=&r" (__ret), "=" GCC_OFF_SMALL_ASM() (*m) \
@@ -282,7 +281,7 @@ static inline unsigned long __cmpxchg64(volatile void *ptr,
/* Attempt to store new at ptr */
" scd %L1, %2 \n"
/* If we failed, loop! */
- "\t" __SC_BEQZ "%L1, 1b \n"
+ "\t" __stringify(SC_BEQZ) " %L1, 1b \n"
"2: " __SYNC(full, loongson3_war) " \n"
" .set pop \n"
: "=&r"(ret),
diff --git a/arch/mips/include/asm/ftrace.h b/arch/mips/include/asm/ftrace.h
index b463f2aa5a61..db497a8167da 100644
--- a/arch/mips/include/asm/ftrace.h
+++ b/arch/mips/include/asm/ftrace.h
@@ -32,7 +32,7 @@ do { \
".previous\n" \
\
".section\t__ex_table,\"a\"\n\t" \
- STR(PTR) "\t1b, 3b\n\t" \
+ STR(PTR_WD) "\t1b, 3b\n\t" \
".previous\n" \
\
: [tmp_dst] "=&r" (dst), [tmp_err] "=r" (error)\
@@ -54,7 +54,7 @@ do { \
".previous\n" \
\
".section\t__ex_table,\"a\"\n\t"\
- STR(PTR) "\t1b, 3b\n\t" \
+ STR(PTR_WD) "\t1b, 3b\n\t" \
".previous\n" \
\
: [tmp_err] "=r" (error) \
diff --git a/arch/mips/include/asm/kgdb.h b/arch/mips/include/asm/kgdb.h
index 4f2302267deb..b4e210d633c2 100644
--- a/arch/mips/include/asm/kgdb.h
+++ b/arch/mips/include/asm/kgdb.h
@@ -18,7 +18,7 @@
#ifdef CONFIG_32BIT
#define KGDB_GDB_REG_SIZE 32
#define GDB_SIZEOF_REG sizeof(u32)
-#else /* CONFIG_CPU_32BIT */
+#else /* CONFIG_32BIT */
#define KGDB_GDB_REG_SIZE 64
#define GDB_SIZEOF_REG sizeof(u64)
#endif
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index 696f6b009377..717716cc51c5 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -20,6 +20,7 @@
#include <linux/threads.h>
#include <linux/spinlock.h>
+#include <asm/asm.h>
#include <asm/inst.h>
#include <asm/mipsregs.h>
@@ -379,9 +380,9 @@ static inline void _kvm_atomic_set_c0_guest_reg(unsigned long *reg,
__asm__ __volatile__(
" .set push \n"
" .set "MIPS_ISA_ARCH_LEVEL" \n"
- " " __LL "%0, %1 \n"
+ " "__stringify(LONG_LL) " %0, %1 \n"
" or %0, %2 \n"
- " " __SC "%0, %1 \n"
+ " "__stringify(LONG_SC) " %0, %1 \n"
" .set pop \n"
: "=&r" (temp), "+m" (*reg)
: "r" (val));
@@ -396,9 +397,9 @@ static inline void _kvm_atomic_clear_c0_guest_reg(unsigned long *reg,
__asm__ __volatile__(
" .set push \n"
" .set "MIPS_ISA_ARCH_LEVEL" \n"
- " " __LL "%0, %1 \n"
+ " "__stringify(LONG_LL) " %0, %1 \n"
" and %0, %2 \n"
- " " __SC "%0, %1 \n"
+ " "__stringify(LONG_SC) " %0, %1 \n"
" .set pop \n"
: "=&r" (temp), "+m" (*reg)
: "r" (~val));
@@ -414,10 +415,10 @@ static inline void _kvm_atomic_change_c0_guest_reg(unsigned long *reg,
__asm__ __volatile__(
" .set push \n"
" .set "MIPS_ISA_ARCH_LEVEL" \n"
- " " __LL "%0, %1 \n"
+ " "__stringify(LONG_LL) " %0, %1 \n"
" and %0, %2 \n"
" or %0, %3 \n"
- " " __SC "%0, %1 \n"
+ " "__stringify(LONG_SC) " %0, %1 \n"
" .set pop \n"
: "=&r" (temp), "+m" (*reg)
: "r" (~change), "r" (val & change));
@@ -897,7 +898,6 @@ static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {}
static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
-static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
#define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLB
int kvm_arch_flush_remote_tlb(struct kvm *kvm);
diff --git a/arch/mips/include/asm/llsc.h b/arch/mips/include/asm/llsc.h
deleted file mode 100644
index ec09fe5d6d6c..000000000000
--- a/arch/mips/include/asm/llsc.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Macros for 32/64-bit neutral inline assembler
- */
-
-#ifndef __ASM_LLSC_H
-#define __ASM_LLSC_H
-
-#include <asm/isa-rev.h>
-
-#if _MIPS_SZLONG == 32
-#define __LL "ll "
-#define __SC "sc "
-#define __INS "ins "
-#define __EXT "ext "
-#elif _MIPS_SZLONG == 64
-#define __LL "lld "
-#define __SC "scd "
-#define __INS "dins "
-#define __EXT "dext "
-#endif
-
-/*
- * Using a branch-likely instruction to check the result of an sc instruction
- * works around a bug present in R10000 CPUs prior to revision 3.0 that could
- * cause ll-sc sequences to execute non-atomically.
- */
-#ifdef CONFIG_WAR_R10000_LLSC
-# define __SC_BEQZ "beqzl "
-#elif MIPS_ISA_REV >= 6
-# define __SC_BEQZ "beqzc "
-#else
-# define __SC_BEQZ "beqz "
-#endif
-
-#endif /* __ASM_LLSC_H */
diff --git a/arch/mips/include/asm/local.h b/arch/mips/include/asm/local.h
index ecda7295ddcd..08366b1fd273 100644
--- a/arch/mips/include/asm/local.h
+++ b/arch/mips/include/asm/local.h
@@ -5,9 +5,9 @@
#include <linux/percpu.h>
#include <linux/bitops.h>
#include <linux/atomic.h>
+#include <asm/asm.h>
#include <asm/cmpxchg.h>
#include <asm/compiler.h>
-#include <asm/war.h>
typedef struct
{
@@ -31,34 +31,18 @@ static __inline__ long local_add_return(long i, local_t * l)
{
unsigned long result;
- if (kernel_uses_llsc && IS_ENABLED(CONFIG_WAR_R10000_LLSC)) {
- unsigned long temp;
-
- __asm__ __volatile__(
- " .set push \n"
- " .set arch=r4000 \n"
- __SYNC(full, loongson3_war) " \n"
- "1:" __LL "%1, %2 # local_add_return \n"
- " addu %0, %1, %3 \n"
- __SC "%0, %2 \n"
- " beqzl %0, 1b \n"
- " addu %0, %1, %3 \n"
- " .set pop \n"
- : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
- : "Ir" (i), "m" (l->a.counter)
- : "memory");
- } else if (kernel_uses_llsc) {
+ if (kernel_uses_llsc) {
unsigned long temp;
__asm__ __volatile__(
" .set push \n"
" .set "MIPS_ISA_ARCH_LEVEL" \n"
- __SYNC(full, loongson3_war) " \n"
- "1:" __LL "%1, %2 # local_add_return \n"
- " addu %0, %1, %3 \n"
- __SC "%0, %2 \n"
- " beqz %0, 1b \n"
- " addu %0, %1, %3 \n"
+ __SYNC(full, loongson3_war) " \n"
+ "1:" __stringify(LONG_LL) " %1, %2 \n"
+ __stringify(LONG_ADDU) " %0, %1, %3 \n"
+ __stringify(LONG_SC) " %0, %2 \n"
+ __stringify(SC_BEQZ) " %0, 1b \n"
+ __stringify(LONG_ADDU) " %0, %1, %3 \n"
" .set pop \n"
: "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
: "Ir" (i), "m" (l->a.counter)
@@ -80,34 +64,19 @@ static __inline__ long local_sub_return(long i, local_t * l)
{
unsigned long result;
- if (kernel_uses_llsc && IS_ENABLED(CONFIG_WAR_R10000_LLSC)) {
- unsigned long temp;
-
- __asm__ __volatile__(
- " .set push \n"
- " .set arch=r4000 \n"
- __SYNC(full, loongson3_war) " \n"
- "1:" __LL "%1, %2 # local_sub_return \n"
- " subu %0, %1, %3 \n"
- __SC "%0, %2 \n"
- " beqzl %0, 1b \n"
- " subu %0, %1, %3 \n"
- " .set pop \n"
- : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
- : "Ir" (i), "m" (l->a.counter)
- : "memory");
- } else if (kernel_uses_llsc) {
+ if (kernel_uses_llsc) {
unsigned long temp;
__asm__ __volatile__(
" .set push \n"
" .set "MIPS_ISA_ARCH_LEVEL" \n"
- __SYNC(full, loongson3_war) " \n"
- "1:" __LL "%1, %2 # local_sub_return \n"
- " subu %0, %1, %3 \n"
- __SC "%0, %2 \n"
- " beqz %0, 1b \n"
- " subu %0, %1, %3 \n"
+ __SYNC(full, loongson3_war) " \n"
+ "1:" __stringify(LONG_LL) " %1, %2 \n"
+ __stringify(LONG_SUBU) " %0, %1, %3 \n"
+ __stringify(LONG_SUBU) " %0, %1, %3 \n"
+ __stringify(LONG_SC) " %0, %2 \n"
+ __stringify(SC_BEQZ) " %0, 1b \n"
+ __stringify(LONG_SUBU) " %0, %1, %3 \n"
" .set pop \n"
: "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
: "Ir" (i), "m" (l->a.counter)
diff --git a/arch/mips/include/asm/mach-bcm47xx/bcm47xx_board.h b/arch/mips/include/asm/mach-bcm47xx/bcm47xx_board.h
index f879be3e8099..6583639fe760 100644
--- a/arch/mips/include/asm/mach-bcm47xx/bcm47xx_board.h
+++ b/arch/mips/include/asm/mach-bcm47xx/bcm47xx_board.h
@@ -72,6 +72,7 @@ enum bcm47xx_board {
BCM47XX_BOARD_LINKSYS_WRT300NV11,
BCM47XX_BOARD_LINKSYS_WRT310NV1,
BCM47XX_BOARD_LINKSYS_WRT310NV2,
+ BCM47XX_BOARD_LINKSYS_WRT320N_V1,
BCM47XX_BOARD_LINKSYS_WRT54G3GV2,
BCM47XX_BOARD_LINKSYS_WRT54G_TYPE_0101,
BCM47XX_BOARD_LINKSYS_WRT54G_TYPE_0467,
@@ -99,9 +100,12 @@ enum bcm47xx_board {
BCM47XX_BOARD_MOTOROLA_WR850GV2V3,
BCM47XX_BOARD_NETGEAR_R6200_V1,
+ BCM47XX_BOARD_NETGEAR_R6300_V1,
BCM47XX_BOARD_NETGEAR_WGR614V8,
BCM47XX_BOARD_NETGEAR_WGR614V9,
BCM47XX_BOARD_NETGEAR_WGR614_V10,
+ BCM47XX_BOARD_NETGEAR_WN2500RP_V1,
+ BCM47XX_BOARD_NETGEAR_WN2500RP_V2,
BCM47XX_BOARD_NETGEAR_WNDR3300,
BCM47XX_BOARD_NETGEAR_WNDR3400V1,
BCM47XX_BOARD_NETGEAR_WNDR3400V2,
diff --git a/arch/mips/include/asm/mach-loongson64/kernel-entry-init.h b/arch/mips/include/asm/mach-loongson64/kernel-entry-init.h
index 13373c5144f8..efb41b351974 100644
--- a/arch/mips/include/asm/mach-loongson64/kernel-entry-init.h
+++ b/arch/mips/include/asm/mach-loongson64/kernel-entry-init.h
@@ -32,7 +32,7 @@
nop
/* Loongson-3A R2/R3 */
andi t0, (PRID_IMP_MASK | PRID_REV_MASK)
- slti t0, (PRID_IMP_LOONGSON_64C | PRID_REV_LOONGSON3A_R2_0)
+ slti t0, t0, (PRID_IMP_LOONGSON_64C | PRID_REV_LOONGSON3A_R2_0)
bnez t0, 2f
nop
1:
@@ -63,7 +63,7 @@
nop
/* Loongson-3A R2/R3 */
andi t0, (PRID_IMP_MASK | PRID_REV_MASK)
- slti t0, (PRID_IMP_LOONGSON_64C | PRID_REV_LOONGSON3A_R2_0)
+ slti t0, t0, (PRID_IMP_LOONGSON_64C | PRID_REV_LOONGSON3A_R2_0)
bnez t0, 2f
nop
1:
diff --git a/arch/mips/include/asm/mach-tx49xx/mangle-port.h b/arch/mips/include/asm/mach-tx49xx/mangle-port.h
index 98c7abf4484a..50b1b8f1e186 100644
--- a/arch/mips/include/asm/mach-tx49xx/mangle-port.h
+++ b/arch/mips/include/asm/mach-tx49xx/mangle-port.h
@@ -9,16 +9,8 @@
#define ioswabb(a, x) (x)
#define __mem_ioswabb(a, x) (x)
-#if defined(CONFIG_TOSHIBA_RBTX4939) && \
- IS_ENABLED(CONFIG_SMC91X) && \
- defined(__BIG_ENDIAN)
-#define NEEDS_TXX9_IOSWABW
-extern u16 (*ioswabw)(volatile u16 *a, u16 x);
-extern u16 (*__mem_ioswabw)(volatile u16 *a, u16 x);
-#else
#define ioswabw(a, x) le16_to_cpu((__force __le16)(x))
#define __mem_ioswabw(a, x) (x)
-#endif
#define ioswabl(a, x) le32_to_cpu((__force __le32)(x))
#define __mem_ioswabl(a, x) (x)
#define ioswabq(a, x) le64_to_cpu((__force __le64)(x))
diff --git a/arch/mips/include/asm/mips-cps.h b/arch/mips/include/asm/mips-cps.h
index fd43d876892e..c077e8d100f5 100644
--- a/arch/mips/include/asm/mips-cps.h
+++ b/arch/mips/include/asm/mips-cps.h
@@ -7,6 +7,7 @@
#ifndef __MIPS_ASM_MIPS_CPS_H__
#define __MIPS_ASM_MIPS_CPS_H__
+#include <linux/bitfield.h>
#include <linux/io.h>
#include <linux/types.h>
@@ -112,14 +113,10 @@ static inline void clear_##unit##_##name(uint##sz##_t val) \
*/
static inline unsigned int mips_cps_numclusters(void)
{
- unsigned int num_clusters;
-
if (mips_cm_revision() < CM_REV_CM3_5)
return 1;
- num_clusters = read_gcr_config() & CM_GCR_CONFIG_NUM_CLUSTERS;
- num_clusters >>= __ffs(CM_GCR_CONFIG_NUM_CLUSTERS);
- return num_clusters;
+ return FIELD_GET(CM_GCR_CONFIG_NUM_CLUSTERS, read_gcr_config());
}
/**
@@ -169,7 +166,8 @@ static inline unsigned int mips_cps_numcores(unsigned int cluster)
return 0;
/* Add one before masking to handle 0xff indicating no cores */
- return (mips_cps_cluster_config(cluster) + 1) & CM_GCR_CONFIG_PCORES;
+ return FIELD_GET(CM_GCR_CONFIG_PCORES,
+ mips_cps_cluster_config(cluster) + 1);
}
/**
@@ -181,14 +179,11 @@ static inline unsigned int mips_cps_numcores(unsigned int cluster)
*/
static inline unsigned int mips_cps_numiocu(unsigned int cluster)
{
- unsigned int num_iocu;
-
if (!mips_cm_present())
return 0;
- num_iocu = mips_cps_cluster_config(cluster) & CM_GCR_CONFIG_NUMIOCU;
- num_iocu >>= __ffs(CM_GCR_CONFIG_NUMIOCU);
- return num_iocu;
+ return FIELD_GET(CM_GCR_CONFIG_NUMIOCU,
+ mips_cps_cluster_config(cluster));
}
/**
@@ -230,7 +225,7 @@ static inline unsigned int mips_cps_numvps(unsigned int cluster, unsigned int co
mips_cm_unlock_other();
- return (cfg + 1) & CM_GCR_Cx_CONFIG_PVPE;
+ return FIELD_GET(CM_GCR_Cx_CONFIG_PVPE, cfg + 1);
}
#endif /* __MIPS_ASM_MIPS_CPS_H__ */
diff --git a/arch/mips/include/asm/octeon/cvmx-bootinfo.h b/arch/mips/include/asm/octeon/cvmx-bootinfo.h
index 0e6bf220db61..6c61e0a63924 100644
--- a/arch/mips/include/asm/octeon/cvmx-bootinfo.h
+++ b/arch/mips/include/asm/octeon/cvmx-bootinfo.h
@@ -318,7 +318,7 @@ enum cvmx_chip_types_enum {
/* Functions to return string based on type */
#define ENUM_BRD_TYPE_CASE(x) \
- case x: return(#x + 16); /* Skip CVMX_BOARD_TYPE_ */
+ case x: return (&#x[16]); /* Skip CVMX_BOARD_TYPE_ */
static inline const char *cvmx_board_type_to_string(enum
cvmx_board_types_enum type)
{
@@ -410,7 +410,7 @@ static inline const char *cvmx_board_type_to_string(enum
}
#define ENUM_CHIP_TYPE_CASE(x) \
- case x: return(#x + 15); /* Skip CVMX_CHIP_TYPE */
+ case x: return (&#x[15]); /* Skip CVMX_CHIP_TYPE */
static inline const char *cvmx_chip_type_to_string(enum
cvmx_chip_types_enum type)
{
diff --git a/arch/mips/include/asm/r4kcache.h b/arch/mips/include/asm/r4kcache.h
index af3788589ee6..431a1c9d53fc 100644
--- a/arch/mips/include/asm/r4kcache.h
+++ b/arch/mips/include/asm/r4kcache.h
@@ -119,7 +119,7 @@ static inline void flush_scache_line(unsigned long addr)
" j 2b \n" \
" .previous \n" \
" .section __ex_table,\"a\" \n" \
- " "STR(PTR)" 1b, 3b \n" \
+ " "STR(PTR_WD)" 1b, 3b \n" \
" .previous" \
: "+r" (__err) \
: "i" (op), "r" (addr), "i" (-EFAULT)); \
@@ -142,7 +142,7 @@ static inline void flush_scache_line(unsigned long addr)
" j 2b \n" \
" .previous \n" \
" .section __ex_table,\"a\" \n" \
- " "STR(PTR)" 1b, 3b \n" \
+ " "STR(PTR_WD)" 1b, 3b \n" \
" .previous" \
: "+r" (__err) \
: "i" (op), "r" (addr), "i" (-EFAULT)); \
diff --git a/arch/mips/include/asm/sibyte/sb1250_mc.h b/arch/mips/include/asm/sibyte/sb1250_mc.h
index c02fe823effc..61411619dff3 100644
--- a/arch/mips/include/asm/sibyte/sb1250_mc.h
+++ b/arch/mips/include/asm/sibyte/sb1250_mc.h
@@ -484,7 +484,7 @@
/*
- * Bank Address Address Bits Register (Table 6-22)
+ * Bank Address Bits Register (Table 6-22)
*/
#define S_MC_BA_RESERVED 0
diff --git a/arch/mips/include/asm/smp-ops.h b/arch/mips/include/asm/smp-ops.h
index 65618ff1280c..864aea803984 100644
--- a/arch/mips/include/asm/smp-ops.h
+++ b/arch/mips/include/asm/smp-ops.h
@@ -101,6 +101,9 @@ static inline int register_vsmp_smp_ops(void)
#ifdef CONFIG_MIPS_MT_SMP
extern const struct plat_smp_ops vsmp_smp_ops;
+ if (!cpu_has_mipsmt)
+ return -ENODEV;
+
register_smp_ops(&vsmp_smp_ops);
return 0;
diff --git a/arch/mips/include/asm/txx9/boards.h b/arch/mips/include/asm/txx9/boards.h
index d45237befd3e..70284e90dc53 100644
--- a/arch/mips/include/asm/txx9/boards.h
+++ b/arch/mips/include/asm/txx9/boards.h
@@ -6,9 +6,3 @@ BOARD_VEC(jmr3927_vec)
BOARD_VEC(rbtx4927_vec)
BOARD_VEC(rbtx4937_vec)
#endif
-#ifdef CONFIG_TOSHIBA_RBTX4938
-BOARD_VEC(rbtx4938_vec)
-#endif
-#ifdef CONFIG_TOSHIBA_RBTX4939
-BOARD_VEC(rbtx4939_vec)
-#endif
diff --git a/arch/mips/include/asm/txx9/rbtx4938.h b/arch/mips/include/asm/txx9/rbtx4938.h
deleted file mode 100644
index 9c969dd3c6eb..000000000000
--- a/arch/mips/include/asm/txx9/rbtx4938.h
+++ /dev/null
@@ -1,145 +0,0 @@
-/*
- * Definitions for TX4937/TX4938
- *
- * 2003-2005 (c) MontaVista Software, Inc. This file is licensed under the
- * terms of the GNU General Public License version 2. This program is
- * licensed "as is" without any warranty of any kind, whether express
- * or implied.
- *
- * Support for TX4938 in 2.6 - Manish Lachwani (mlachwani@mvista.com)
- */
-#ifndef __ASM_TXX9_RBTX4938_H
-#define __ASM_TXX9_RBTX4938_H
-
-#include <asm/addrspace.h>
-#include <asm/txx9irq.h>
-#include <asm/txx9/tx4938.h>
-
-/* Address map */
-#define RBTX4938_FPGA_REG_ADDR (IO_BASE + TXX9_CE(2) + 0x00000000)
-#define RBTX4938_FPGA_REV_ADDR (IO_BASE + TXX9_CE(2) + 0x00000002)
-#define RBTX4938_CONFIG1_ADDR (IO_BASE + TXX9_CE(2) + 0x00000004)
-#define RBTX4938_CONFIG2_ADDR (IO_BASE + TXX9_CE(2) + 0x00000006)
-#define RBTX4938_CONFIG3_ADDR (IO_BASE + TXX9_CE(2) + 0x00000008)
-#define RBTX4938_LED_ADDR (IO_BASE + TXX9_CE(2) + 0x00001000)
-#define RBTX4938_DIPSW_ADDR (IO_BASE + TXX9_CE(2) + 0x00001002)
-#define RBTX4938_BDIPSW_ADDR (IO_BASE + TXX9_CE(2) + 0x00001004)
-#define RBTX4938_IMASK_ADDR (IO_BASE + TXX9_CE(2) + 0x00002000)
-#define RBTX4938_IMASK2_ADDR (IO_BASE + TXX9_CE(2) + 0x00002002)
-#define RBTX4938_INTPOL_ADDR (IO_BASE + TXX9_CE(2) + 0x00002004)
-#define RBTX4938_ISTAT_ADDR (IO_BASE + TXX9_CE(2) + 0x00002006)
-#define RBTX4938_ISTAT2_ADDR (IO_BASE + TXX9_CE(2) + 0x00002008)
-#define RBTX4938_IMSTAT_ADDR (IO_BASE + TXX9_CE(2) + 0x0000200a)
-#define RBTX4938_IMSTAT2_ADDR (IO_BASE + TXX9_CE(2) + 0x0000200c)
-#define RBTX4938_SOFTINT_ADDR (IO_BASE + TXX9_CE(2) + 0x00003000)
-#define RBTX4938_PIOSEL_ADDR (IO_BASE + TXX9_CE(2) + 0x00005000)
-#define RBTX4938_SPICS_ADDR (IO_BASE + TXX9_CE(2) + 0x00005002)
-#define RBTX4938_SFPWR_ADDR (IO_BASE + TXX9_CE(2) + 0x00005008)
-#define RBTX4938_SFVOL_ADDR (IO_BASE + TXX9_CE(2) + 0x0000500a)
-#define RBTX4938_SOFTRESET_ADDR (IO_BASE + TXX9_CE(2) + 0x00007000)
-#define RBTX4938_SOFTRESETLOCK_ADDR (IO_BASE + TXX9_CE(2) + 0x00007002)
-#define RBTX4938_PCIRESET_ADDR (IO_BASE + TXX9_CE(2) + 0x00007004)
-#define RBTX4938_ETHER_BASE (IO_BASE + TXX9_CE(2) + 0x00020000)
-
-/* Ethernet port address (Jumperless Mode (W12:Open)) */
-#define RBTX4938_ETHER_ADDR (RBTX4938_ETHER_BASE + 0x280)
-
-/* bits for ISTAT/IMASK/IMSTAT */
-#define RBTX4938_INTB_PCID 0
-#define RBTX4938_INTB_PCIC 1
-#define RBTX4938_INTB_PCIB 2
-#define RBTX4938_INTB_PCIA 3
-#define RBTX4938_INTB_RTC 4
-#define RBTX4938_INTB_ATA 5
-#define RBTX4938_INTB_MODEM 6
-#define RBTX4938_INTB_SWINT 7
-#define RBTX4938_INTF_PCID (1 << RBTX4938_INTB_PCID)
-#define RBTX4938_INTF_PCIC (1 << RBTX4938_INTB_PCIC)
-#define RBTX4938_INTF_PCIB (1 << RBTX4938_INTB_PCIB)
-#define RBTX4938_INTF_PCIA (1 << RBTX4938_INTB_PCIA)
-#define RBTX4938_INTF_RTC (1 << RBTX4938_INTB_RTC)
-#define RBTX4938_INTF_ATA (1 << RBTX4938_INTB_ATA)
-#define RBTX4938_INTF_MODEM (1 << RBTX4938_INTB_MODEM)
-#define RBTX4938_INTF_SWINT (1 << RBTX4938_INTB_SWINT)
-
-#define rbtx4938_fpga_rev_addr ((__u8 __iomem *)RBTX4938_FPGA_REV_ADDR)
-#define rbtx4938_led_addr ((__u8 __iomem *)RBTX4938_LED_ADDR)
-#define rbtx4938_dipsw_addr ((__u8 __iomem *)RBTX4938_DIPSW_ADDR)
-#define rbtx4938_bdipsw_addr ((__u8 __iomem *)RBTX4938_BDIPSW_ADDR)
-#define rbtx4938_imask_addr ((__u8 __iomem *)RBTX4938_IMASK_ADDR)
-#define rbtx4938_imask2_addr ((__u8 __iomem *)RBTX4938_IMASK2_ADDR)
-#define rbtx4938_intpol_addr ((__u8 __iomem *)RBTX4938_INTPOL_ADDR)
-#define rbtx4938_istat_addr ((__u8 __iomem *)RBTX4938_ISTAT_ADDR)
-#define rbtx4938_istat2_addr ((__u8 __iomem *)RBTX4938_ISTAT2_ADDR)
-#define rbtx4938_imstat_addr ((__u8 __iomem *)RBTX4938_IMSTAT_ADDR)
-#define rbtx4938_imstat2_addr ((__u8 __iomem *)RBTX4938_IMSTAT2_ADDR)
-#define rbtx4938_softint_addr ((__u8 __iomem *)RBTX4938_SOFTINT_ADDR)
-#define rbtx4938_piosel_addr ((__u8 __iomem *)RBTX4938_PIOSEL_ADDR)
-#define rbtx4938_spics_addr ((__u8 __iomem *)RBTX4938_SPICS_ADDR)
-#define rbtx4938_sfpwr_addr ((__u8 __iomem *)RBTX4938_SFPWR_ADDR)
-#define rbtx4938_sfvol_addr ((__u8 __iomem *)RBTX4938_SFVOL_ADDR)
-#define rbtx4938_softreset_addr ((__u8 __iomem *)RBTX4938_SOFTRESET_ADDR)
-#define rbtx4938_softresetlock_addr \
- ((__u8 __iomem *)RBTX4938_SOFTRESETLOCK_ADDR)
-#define rbtx4938_pcireset_addr ((__u8 __iomem *)RBTX4938_PCIRESET_ADDR)
-
-/*
- * IRQ mappings
- */
-
-#define RBTX4938_SOFT_INT0 0 /* not used */
-#define RBTX4938_SOFT_INT1 1 /* not used */
-#define RBTX4938_IRC_INT 2
-#define RBTX4938_TIMER_INT 7
-
-/* These are the virtual IRQ numbers, we divide all IRQ's into
- * 'spaces', the 'space' determines where and how to enable/disable
- * that particular IRQ on an RBTX4938 machine. Add new 'spaces' as new
- * IRQ hardware is supported.
- */
-#define RBTX4938_NR_IRQ_IOC 8
-
-#define RBTX4938_IRQ_IRC TXX9_IRQ_BASE
-#define RBTX4938_IRQ_IOC (TXX9_IRQ_BASE + TX4938_NUM_IR)
-#define RBTX4938_IRQ_END (RBTX4938_IRQ_IOC + RBTX4938_NR_IRQ_IOC)
-
-#define RBTX4938_IRQ_IRC_ECCERR (RBTX4938_IRQ_IRC + TX4938_IR_ECCERR)
-#define RBTX4938_IRQ_IRC_WTOERR (RBTX4938_IRQ_IRC + TX4938_IR_WTOERR)
-#define RBTX4938_IRQ_IRC_INT(n) (RBTX4938_IRQ_IRC + TX4938_IR_INT(n))
-#define RBTX4938_IRQ_IRC_SIO(n) (RBTX4938_IRQ_IRC + TX4938_IR_SIO(n))
-#define RBTX4938_IRQ_IRC_DMA(ch, n) (RBTX4938_IRQ_IRC + TX4938_IR_DMA(ch, n))
-#define RBTX4938_IRQ_IRC_PIO (RBTX4938_IRQ_IRC + TX4938_IR_PIO)
-#define RBTX4938_IRQ_IRC_PDMAC (RBTX4938_IRQ_IRC + TX4938_IR_PDMAC)
-#define RBTX4938_IRQ_IRC_PCIC (RBTX4938_IRQ_IRC + TX4938_IR_PCIC)
-#define RBTX4938_IRQ_IRC_TMR(n) (RBTX4938_IRQ_IRC + TX4938_IR_TMR(n))
-#define RBTX4938_IRQ_IRC_NDFMC (RBTX4938_IRQ_IRC + TX4938_IR_NDFMC)
-#define RBTX4938_IRQ_IRC_PCIERR (RBTX4938_IRQ_IRC + TX4938_IR_PCIERR)
-#define RBTX4938_IRQ_IRC_PCIPME (RBTX4938_IRQ_IRC + TX4938_IR_PCIPME)
-#define RBTX4938_IRQ_IRC_ACLC (RBTX4938_IRQ_IRC + TX4938_IR_ACLC)
-#define RBTX4938_IRQ_IRC_ACLCPME (RBTX4938_IRQ_IRC + TX4938_IR_ACLCPME)
-#define RBTX4938_IRQ_IRC_PCIC1 (RBTX4938_IRQ_IRC + TX4938_IR_PCIC1)
-#define RBTX4938_IRQ_IRC_SPI (RBTX4938_IRQ_IRC + TX4938_IR_SPI)
-#define RBTX4938_IRQ_IOC_PCID (RBTX4938_IRQ_IOC + RBTX4938_INTB_PCID)
-#define RBTX4938_IRQ_IOC_PCIC (RBTX4938_IRQ_IOC + RBTX4938_INTB_PCIC)
-#define RBTX4938_IRQ_IOC_PCIB (RBTX4938_IRQ_IOC + RBTX4938_INTB_PCIB)
-#define RBTX4938_IRQ_IOC_PCIA (RBTX4938_IRQ_IOC + RBTX4938_INTB_PCIA)
-#define RBTX4938_IRQ_IOC_RTC (RBTX4938_IRQ_IOC + RBTX4938_INTB_RTC)
-#define RBTX4938_IRQ_IOC_ATA (RBTX4938_IRQ_IOC + RBTX4938_INTB_ATA)
-#define RBTX4938_IRQ_IOC_MODEM (RBTX4938_IRQ_IOC + RBTX4938_INTB_MODEM)
-#define RBTX4938_IRQ_IOC_SWINT (RBTX4938_IRQ_IOC + RBTX4938_INTB_SWINT)
-
-
-/* IOC (PCI, etc) */
-#define RBTX4938_IRQ_IOCINT (TXX9_IRQ_BASE + TX4938_IR_INT(0))
-/* Onboard 10M Ether */
-#define RBTX4938_IRQ_ETHER (TXX9_IRQ_BASE + TX4938_IR_INT(1))
-
-#define RBTX4938_RTL_8019_BASE (RBTX4938_ETHER_ADDR - mips_io_port_base)
-#define RBTX4938_RTL_8019_IRQ (RBTX4938_IRQ_ETHER)
-
-void rbtx4938_prom_init(void);
-void rbtx4938_irq_setup(void);
-struct pci_dev;
-int rbtx4938_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin);
-
-#endif /* __ASM_TXX9_RBTX4938_H */
diff --git a/arch/mips/include/asm/txx9/rbtx4939.h b/arch/mips/include/asm/txx9/rbtx4939.h
deleted file mode 100644
index 6157bfd90848..000000000000
--- a/arch/mips/include/asm/txx9/rbtx4939.h
+++ /dev/null
@@ -1,142 +0,0 @@
-/*
- * Definitions for RBTX4939
- *
- * (C) Copyright TOSHIBA CORPORATION 2005-2006
- * 2003-2005 (c) MontaVista Software, Inc. This file is licensed under the
- * terms of the GNU General Public License version 2. This program is
- * licensed "as is" without any warranty of any kind, whether express
- * or implied.
- */
-#ifndef __ASM_TXX9_RBTX4939_H
-#define __ASM_TXX9_RBTX4939_H
-
-#include <asm/addrspace.h>
-#include <asm/txx9irq.h>
-#include <asm/txx9/generic.h>
-#include <asm/txx9/tx4939.h>
-
-/* Address map */
-#define RBTX4939_IOC_REG_ADDR (IO_BASE + TXX9_CE(1) + 0x00000000)
-#define RBTX4939_BOARD_REV_ADDR (IO_BASE + TXX9_CE(1) + 0x00000000)
-#define RBTX4939_IOC_REV_ADDR (IO_BASE + TXX9_CE(1) + 0x00000002)
-#define RBTX4939_CONFIG1_ADDR (IO_BASE + TXX9_CE(1) + 0x00000004)
-#define RBTX4939_CONFIG2_ADDR (IO_BASE + TXX9_CE(1) + 0x00000006)
-#define RBTX4939_CONFIG3_ADDR (IO_BASE + TXX9_CE(1) + 0x00000008)
-#define RBTX4939_CONFIG4_ADDR (IO_BASE + TXX9_CE(1) + 0x0000000a)
-#define RBTX4939_USTAT_ADDR (IO_BASE + TXX9_CE(1) + 0x00001000)
-#define RBTX4939_UDIPSW_ADDR (IO_BASE + TXX9_CE(1) + 0x00001002)
-#define RBTX4939_BDIPSW_ADDR (IO_BASE + TXX9_CE(1) + 0x00001004)
-#define RBTX4939_IEN_ADDR (IO_BASE + TXX9_CE(1) + 0x00002000)
-#define RBTX4939_IPOL_ADDR (IO_BASE + TXX9_CE(1) + 0x00002002)
-#define RBTX4939_IFAC1_ADDR (IO_BASE + TXX9_CE(1) + 0x00002004)
-#define RBTX4939_IFAC2_ADDR (IO_BASE + TXX9_CE(1) + 0x00002006)
-#define RBTX4939_SOFTINT_ADDR (IO_BASE + TXX9_CE(1) + 0x00003000)
-#define RBTX4939_ISASTAT_ADDR (IO_BASE + TXX9_CE(1) + 0x00004000)
-#define RBTX4939_PCISTAT_ADDR (IO_BASE + TXX9_CE(1) + 0x00004002)
-#define RBTX4939_ROME_ADDR (IO_BASE + TXX9_CE(1) + 0x00004004)
-#define RBTX4939_SPICS_ADDR (IO_BASE + TXX9_CE(1) + 0x00004006)
-#define RBTX4939_AUDI_ADDR (IO_BASE + TXX9_CE(1) + 0x00004008)
-#define RBTX4939_ISAGPIO_ADDR (IO_BASE + TXX9_CE(1) + 0x0000400a)
-#define RBTX4939_PE1_ADDR (IO_BASE + TXX9_CE(1) + 0x00005000)
-#define RBTX4939_PE2_ADDR (IO_BASE + TXX9_CE(1) + 0x00005002)
-#define RBTX4939_PE3_ADDR (IO_BASE + TXX9_CE(1) + 0x00005004)
-#define RBTX4939_VP_ADDR (IO_BASE + TXX9_CE(1) + 0x00005006)
-#define RBTX4939_VPRESET_ADDR (IO_BASE + TXX9_CE(1) + 0x00005008)
-#define RBTX4939_VPSOUT_ADDR (IO_BASE + TXX9_CE(1) + 0x0000500a)
-#define RBTX4939_VPSIN_ADDR (IO_BASE + TXX9_CE(1) + 0x0000500c)
-#define RBTX4939_7SEG_ADDR(s, ch) \
- (IO_BASE + TXX9_CE(1) + 0x00006000 + (s) * 16 + ((ch) & 3) * 2)
-#define RBTX4939_SOFTRESET_ADDR (IO_BASE + TXX9_CE(1) + 0x00007000)
-#define RBTX4939_RESETEN_ADDR (IO_BASE + TXX9_CE(1) + 0x00007002)
-#define RBTX4939_RESETSTAT_ADDR (IO_BASE + TXX9_CE(1) + 0x00007004)
-#define RBTX4939_ETHER_BASE (IO_BASE + TXX9_CE(1) + 0x00020000)
-
-/* Ethernet port address */
-#define RBTX4939_ETHER_ADDR (RBTX4939_ETHER_BASE + 0x300)
-
-/* bits for IEN/IPOL/IFAC */
-#define RBTX4938_INTB_ISA0 0
-#define RBTX4938_INTB_ISA11 1
-#define RBTX4938_INTB_ISA12 2
-#define RBTX4938_INTB_ISA15 3
-#define RBTX4938_INTB_I2S 4
-#define RBTX4938_INTB_SW 5
-#define RBTX4938_INTF_ISA0 (1 << RBTX4938_INTB_ISA0)
-#define RBTX4938_INTF_ISA11 (1 << RBTX4938_INTB_ISA11)
-#define RBTX4938_INTF_ISA12 (1 << RBTX4938_INTB_ISA12)
-#define RBTX4938_INTF_ISA15 (1 << RBTX4938_INTB_ISA15)
-#define RBTX4938_INTF_I2S (1 << RBTX4938_INTB_I2S)
-#define RBTX4938_INTF_SW (1 << RBTX4938_INTB_SW)
-
-/* bits for PE1,PE2,PE3 */
-#define RBTX4939_PE1_ATA(ch) (0x01 << (ch))
-#define RBTX4939_PE1_RMII(ch) (0x04 << (ch))
-#define RBTX4939_PE2_SIO0 0x01
-#define RBTX4939_PE2_SIO2 0x02
-#define RBTX4939_PE2_SIO3 0x04
-#define RBTX4939_PE2_CIR 0x08
-#define RBTX4939_PE2_SPI 0x10
-#define RBTX4939_PE2_GPIO 0x20
-#define RBTX4939_PE3_VP 0x01
-#define RBTX4939_PE3_VP_P 0x02
-#define RBTX4939_PE3_VP_S 0x04
-
-#define rbtx4939_board_rev_addr ((u8 __iomem *)RBTX4939_BOARD_REV_ADDR)
-#define rbtx4939_ioc_rev_addr ((u8 __iomem *)RBTX4939_IOC_REV_ADDR)
-#define rbtx4939_config1_addr ((u8 __iomem *)RBTX4939_CONFIG1_ADDR)
-#define rbtx4939_config2_addr ((u8 __iomem *)RBTX4939_CONFIG2_ADDR)
-#define rbtx4939_config3_addr ((u8 __iomem *)RBTX4939_CONFIG3_ADDR)
-#define rbtx4939_config4_addr ((u8 __iomem *)RBTX4939_CONFIG4_ADDR)
-#define rbtx4939_ustat_addr ((u8 __iomem *)RBTX4939_USTAT_ADDR)
-#define rbtx4939_udipsw_addr ((u8 __iomem *)RBTX4939_UDIPSW_ADDR)
-#define rbtx4939_bdipsw_addr ((u8 __iomem *)RBTX4939_BDIPSW_ADDR)
-#define rbtx4939_ien_addr ((u8 __iomem *)RBTX4939_IEN_ADDR)
-#define rbtx4939_ipol_addr ((u8 __iomem *)RBTX4939_IPOL_ADDR)
-#define rbtx4939_ifac1_addr ((u8 __iomem *)RBTX4939_IFAC1_ADDR)
-#define rbtx4939_ifac2_addr ((u8 __iomem *)RBTX4939_IFAC2_ADDR)
-#define rbtx4939_softint_addr ((u8 __iomem *)RBTX4939_SOFTINT_ADDR)
-#define rbtx4939_isastat_addr ((u8 __iomem *)RBTX4939_ISASTAT_ADDR)
-#define rbtx4939_pcistat_addr ((u8 __iomem *)RBTX4939_PCISTAT_ADDR)
-#define rbtx4939_rome_addr ((u8 __iomem *)RBTX4939_ROME_ADDR)
-#define rbtx4939_spics_addr ((u8 __iomem *)RBTX4939_SPICS_ADDR)
-#define rbtx4939_audi_addr ((u8 __iomem *)RBTX4939_AUDI_ADDR)
-#define rbtx4939_isagpio_addr ((u8 __iomem *)RBTX4939_ISAGPIO_ADDR)
-#define rbtx4939_pe1_addr ((u8 __iomem *)RBTX4939_PE1_ADDR)
-#define rbtx4939_pe2_addr ((u8 __iomem *)RBTX4939_PE2_ADDR)
-#define rbtx4939_pe3_addr ((u8 __iomem *)RBTX4939_PE3_ADDR)
-#define rbtx4939_vp_addr ((u8 __iomem *)RBTX4939_VP_ADDR)
-#define rbtx4939_vpreset_addr ((u8 __iomem *)RBTX4939_VPRESET_ADDR)
-#define rbtx4939_vpsout_addr ((u8 __iomem *)RBTX4939_VPSOUT_ADDR)
-#define rbtx4939_vpsin_addr ((u8 __iomem *)RBTX4939_VPSIN_ADDR)
-#define rbtx4939_7seg_addr(s, ch) \
- ((u8 __iomem *)RBTX4939_7SEG_ADDR(s, ch))
-#define rbtx4939_softreset_addr ((u8 __iomem *)RBTX4939_SOFTRESET_ADDR)
-#define rbtx4939_reseten_addr ((u8 __iomem *)RBTX4939_RESETEN_ADDR)
-#define rbtx4939_resetstat_addr ((u8 __iomem *)RBTX4939_RESETSTAT_ADDR)
-
-/*
- * IRQ mappings
- */
-#define RBTX4939_NR_IRQ_IOC 8
-
-#define RBTX4939_IRQ_IOC (TXX9_IRQ_BASE + TX4939_NUM_IR)
-#define RBTX4939_IRQ_END (RBTX4939_IRQ_IOC + RBTX4939_NR_IRQ_IOC)
-
-/* IOC (ISA, etc) */
-#define RBTX4939_IRQ_IOCINT (TXX9_IRQ_BASE + TX4939_IR_INT(0))
-/* Onboard 10M Ether */
-#define RBTX4939_IRQ_ETHER (TXX9_IRQ_BASE + TX4939_IR_INT(1))
-
-void rbtx4939_prom_init(void);
-void rbtx4939_irq_setup(void);
-
-struct mtd_partition;
-struct map_info;
-struct rbtx4939_flash_data {
- unsigned int width;
- unsigned int nr_parts;
- struct mtd_partition *parts;
- void (*map_init)(struct map_info *map);
-};
-
-#endif /* __ASM_TXX9_RBTX4939_H */
diff --git a/arch/mips/include/asm/txx9/spi.h b/arch/mips/include/asm/txx9/spi.h
deleted file mode 100644
index 0d727f354557..000000000000
--- a/arch/mips/include/asm/txx9/spi.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Definitions for TX4937/TX4938 SPI
- *
- * Copyright (C) 2000-2001 Toshiba Corporation
- *
- * 2003-2005 (c) MontaVista Software, Inc. This file is licensed under the
- * terms of the GNU General Public License version 2. This program is
- * licensed "as is" without any warranty of any kind, whether express
- * or implied.
- *
- * Support for TX4938 in 2.6 - Manish Lachwani (mlachwani@mvista.com)
- */
-#ifndef __ASM_TXX9_SPI_H
-#define __ASM_TXX9_SPI_H
-
-#include <linux/errno.h>
-
-#ifdef CONFIG_SPI
-int spi_eeprom_register(int busid, int chipid, int size);
-int spi_eeprom_read(int busid, int chipid,
- int address, unsigned char *buf, int len);
-#else
-static inline int spi_eeprom_register(int busid, int chipid, int size)
-{
- return -ENODEV;
-}
-static inline int spi_eeprom_read(int busid, int chipid,
- int address, unsigned char *buf, int len)
-{
- return -ENODEV;
-}
-#endif
-
-#endif /* __ASM_TXX9_SPI_H */
diff --git a/arch/mips/include/asm/txx9/tx4939.h b/arch/mips/include/asm/txx9/tx4939.h
deleted file mode 100644
index abf980af9ef4..000000000000
--- a/arch/mips/include/asm/txx9/tx4939.h
+++ /dev/null
@@ -1,524 +0,0 @@
-/*
- * Definitions for TX4939
- *
- * Copyright (C) 2000-2001,2005-2006 Toshiba Corporation
- * 2003-2005 (c) MontaVista Software, Inc. This file is licensed under the
- * terms of the GNU General Public License version 2. This program is
- * licensed "as is" without any warranty of any kind, whether express
- * or implied.
- */
-#ifndef __ASM_TXX9_TX4939_H
-#define __ASM_TXX9_TX4939_H
-
-/* some controllers are compatible with 4927/4938 */
-#include <asm/txx9/tx4938.h>
-
-#ifdef CONFIG_64BIT
-#define TX4939_REG_BASE 0xffffffffff1f0000UL /* == TX4938_REG_BASE */
-#else
-#define TX4939_REG_BASE 0xff1f0000UL /* == TX4938_REG_BASE */
-#endif
-#define TX4939_REG_SIZE 0x00010000 /* == TX4938_REG_SIZE */
-
-#define TX4939_ATA_REG(ch) (TX4939_REG_BASE + 0x3000 + (ch) * 0x1000)
-#define TX4939_NDFMC_REG (TX4939_REG_BASE + 0x5000)
-#define TX4939_SRAMC_REG (TX4939_REG_BASE + 0x6000)
-#define TX4939_CRYPTO_REG (TX4939_REG_BASE + 0x6800)
-#define TX4939_PCIC1_REG (TX4939_REG_BASE + 0x7000)
-#define TX4939_DDRC_REG (TX4939_REG_BASE + 0x8000)
-#define TX4939_EBUSC_REG (TX4939_REG_BASE + 0x9000)
-#define TX4939_VPC_REG (TX4939_REG_BASE + 0xa000)
-#define TX4939_DMA_REG(ch) (TX4939_REG_BASE + 0xb000 + (ch) * 0x800)
-#define TX4939_PCIC_REG (TX4939_REG_BASE + 0xd000)
-#define TX4939_CCFG_REG (TX4939_REG_BASE + 0xe000)
-#define TX4939_IRC_REG (TX4939_REG_BASE + 0xe800)
-#define TX4939_NR_TMR 6 /* 0xf000,0xf100,0xf200,0xfd00,0xfe00,0xff00 */
-#define TX4939_TMR_REG(ch) \
- (TX4939_REG_BASE + 0xf000 + ((ch) + ((ch) >= 3) * 10) * 0x100)
-#define TX4939_NR_SIO 4 /* 0xf300, 0xf400, 0xf380, 0xf480 */
-#define TX4939_SIO_REG(ch) \
- (TX4939_REG_BASE + 0xf300 + (((ch) & 1) << 8) + (((ch) & 2) << 6))
-#define TX4939_ACLC_REG (TX4939_REG_BASE + 0xf700)
-#define TX4939_SPI_REG (TX4939_REG_BASE + 0xf800)
-#define TX4939_I2C_REG (TX4939_REG_BASE + 0xf900)
-#define TX4939_I2S_REG (TX4939_REG_BASE + 0xfa00)
-#define TX4939_RTC_REG (TX4939_REG_BASE + 0xfb00)
-#define TX4939_CIR_REG (TX4939_REG_BASE + 0xfc00)
-
-#define TX4939_RNG_REG (TX4939_CRYPTO_REG + 0xb0)
-
-struct tx4939_le_reg {
- __u32 r;
- __u32 unused;
-};
-
-struct tx4939_ddrc_reg {
- struct tx4939_le_reg ctl[47];
- __u64 unused0[17];
- __u64 winen;
- __u64 win[4];
-};
-
-struct tx4939_ccfg_reg {
- __u64 ccfg;
- __u64 crir;
- __u64 pcfg;
- __u64 toea;
- __u64 clkctr;
- __u64 unused0;
- __u64 garbc;
- __u64 unused1[2];
- __u64 ramp;
- __u64 unused2[2];
- __u64 dskwctrl;
- __u64 mclkosc;
- __u64 mclkctl;
- __u64 unused3[17];
- struct {
- __u64 mr;
- __u64 dr;
- } gpio[2];
-};
-
-struct tx4939_irc_reg {
- struct tx4939_le_reg den;
- struct tx4939_le_reg scipb;
- struct tx4939_le_reg dm[2];
- struct tx4939_le_reg lvl[16];
- struct tx4939_le_reg msk;
- struct tx4939_le_reg edc;
- struct tx4939_le_reg pnd0;
- struct tx4939_le_reg cs;
- struct tx4939_le_reg pnd1;
- struct tx4939_le_reg dm2[2];
- struct tx4939_le_reg dbr[2];
- struct tx4939_le_reg dben;
- struct tx4939_le_reg unused0[2];
- struct tx4939_le_reg flag[2];
- struct tx4939_le_reg pol;
- struct tx4939_le_reg cnt;
- struct tx4939_le_reg maskint;
- struct tx4939_le_reg maskext;
-};
-
-struct tx4939_crypto_reg {
- struct tx4939_le_reg csr;
- struct tx4939_le_reg idesptr;
- struct tx4939_le_reg cdesptr;
- struct tx4939_le_reg buserr;
- struct tx4939_le_reg cip_tout;
- struct tx4939_le_reg cir;
- union {
- struct {
- struct tx4939_le_reg data[8];
- struct tx4939_le_reg ctrl;
- } gen;
- struct {
- struct {
- struct tx4939_le_reg l;
- struct tx4939_le_reg u;
- } key[3], ini;
- struct tx4939_le_reg ctrl;
- } des;
- struct {
- struct tx4939_le_reg key[4];
- struct tx4939_le_reg ini[4];
- struct tx4939_le_reg ctrl;
- } aes;
- struct {
- struct {
- struct tx4939_le_reg l;
- struct tx4939_le_reg u;
- } cnt;
- struct tx4939_le_reg ini[5];
- struct tx4939_le_reg unused;
- struct tx4939_le_reg ctrl;
- } hash;
- } cdr;
- struct tx4939_le_reg unused0[7];
- struct tx4939_le_reg rcsr;
- struct tx4939_le_reg rpr;
- __u64 rdr;
- __u64 ror[3];
- struct tx4939_le_reg unused1[2];
- struct tx4939_le_reg xorslr;
- struct tx4939_le_reg xorsur;
-};
-
-struct tx4939_crypto_desc {
- __u32 src;
- __u32 dst;
- __u32 next;
- __u32 ctrl;
- __u32 index;
- __u32 xor;
-};
-
-struct tx4939_vpc_reg {
- struct tx4939_le_reg csr;
- struct {
- struct tx4939_le_reg ctrlA;
- struct tx4939_le_reg ctrlB;
- struct tx4939_le_reg idesptr;
- struct tx4939_le_reg cdesptr;
- } port[3];
- struct tx4939_le_reg buserr;
-};
-
-struct tx4939_vpc_desc {
- __u32 src;
- __u32 next;
- __u32 ctrl1;
- __u32 ctrl2;
-};
-
-/*
- * IRC
- */
-#define TX4939_IR_NONE 0
-#define TX4939_IR_DDR 1
-#define TX4939_IR_WTOERR 2
-#define TX4939_NUM_IR_INT 3
-#define TX4939_IR_INT(n) (3 + (n))
-#define TX4939_NUM_IR_ETH 2
-#define TX4939_IR_ETH(n) ((n) ? 43 : 6)
-#define TX4939_IR_VIDEO 7
-#define TX4939_IR_CIR 8
-#define TX4939_NUM_IR_SIO 4
-#define TX4939_IR_SIO(n) ((n) ? 43 + (n) : 9) /* 9,44-46 */
-#define TX4939_NUM_IR_DMA 4
-#define TX4939_IR_DMA(ch, n) (((ch) ? 22 : 10) + (n)) /* 10-13,22-25 */
-#define TX4939_IR_IRC 14
-#define TX4939_IR_PDMAC 15
-#define TX4939_NUM_IR_TMR 6
-#define TX4939_IR_TMR(n) (((n) >= 3 ? 45 : 16) + (n)) /* 16-18,48-50 */
-#define TX4939_NUM_IR_ATA 2
-#define TX4939_IR_ATA(n) (19 + (n))
-#define TX4939_IR_ACLC 21
-#define TX4939_IR_CIPHER 26
-#define TX4939_IR_INTA 27
-#define TX4939_IR_INTB 28
-#define TX4939_IR_INTC 29
-#define TX4939_IR_INTD 30
-#define TX4939_IR_I2C 33
-#define TX4939_IR_SPI 34
-#define TX4939_IR_PCIC 35
-#define TX4939_IR_PCIC1 36
-#define TX4939_IR_PCIERR 37
-#define TX4939_IR_PCIPME 38
-#define TX4939_IR_NDFMC 39
-#define TX4939_IR_ACLCPME 40
-#define TX4939_IR_RTC 41
-#define TX4939_IR_RND 42
-#define TX4939_IR_I2S 47
-#define TX4939_NUM_IR 64
-
-#define TX4939_IRC_INT 2 /* IP[2] in Status register */
-
-/*
- * CCFG
- */
-/* CCFG : Chip Configuration */
-#define TX4939_CCFG_PCIBOOT 0x0000040000000000ULL
-#define TX4939_CCFG_WDRST 0x0000020000000000ULL
-#define TX4939_CCFG_WDREXEN 0x0000010000000000ULL
-#define TX4939_CCFG_BCFG_MASK 0x000000ff00000000ULL
-#define TX4939_CCFG_GTOT_MASK 0x06000000
-#define TX4939_CCFG_GTOT_4096 0x06000000
-#define TX4939_CCFG_GTOT_2048 0x04000000
-#define TX4939_CCFG_GTOT_1024 0x02000000
-#define TX4939_CCFG_GTOT_512 0x00000000
-#define TX4939_CCFG_TINTDIS 0x01000000
-#define TX4939_CCFG_PCI66 0x00800000
-#define TX4939_CCFG_PCIMODE 0x00400000
-#define TX4939_CCFG_SSCG 0x00100000
-#define TX4939_CCFG_MULCLK_MASK 0x000e0000
-#define TX4939_CCFG_MULCLK_8 (0x7 << 17)
-#define TX4939_CCFG_MULCLK_9 (0x0 << 17)
-#define TX4939_CCFG_MULCLK_10 (0x1 << 17)
-#define TX4939_CCFG_MULCLK_11 (0x2 << 17)
-#define TX4939_CCFG_MULCLK_12 (0x3 << 17)
-#define TX4939_CCFG_MULCLK_13 (0x4 << 17)
-#define TX4939_CCFG_MULCLK_14 (0x5 << 17)
-#define TX4939_CCFG_MULCLK_15 (0x6 << 17)
-#define TX4939_CCFG_BEOW 0x00010000
-#define TX4939_CCFG_WR 0x00008000
-#define TX4939_CCFG_TOE 0x00004000
-#define TX4939_CCFG_PCIARB 0x00002000
-#define TX4939_CCFG_YDIVMODE_MASK 0x00001c00
-#define TX4939_CCFG_YDIVMODE_2 (0x0 << 10)
-#define TX4939_CCFG_YDIVMODE_3 (0x1 << 10)
-#define TX4939_CCFG_YDIVMODE_5 (0x6 << 10)
-#define TX4939_CCFG_YDIVMODE_6 (0x7 << 10)
-#define TX4939_CCFG_PTSEL 0x00000200
-#define TX4939_CCFG_BESEL 0x00000100
-#define TX4939_CCFG_SYSSP_MASK 0x000000c0
-#define TX4939_CCFG_ACKSEL 0x00000020
-#define TX4939_CCFG_ROMW 0x00000010
-#define TX4939_CCFG_ENDIAN 0x00000004
-#define TX4939_CCFG_ARMODE 0x00000002
-#define TX4939_CCFG_ACEHOLD 0x00000001
-
-/* PCFG : Pin Configuration */
-#define TX4939_PCFG_SIO2MODE_MASK 0xc000000000000000ULL
-#define TX4939_PCFG_SIO2MODE_GPIO 0x8000000000000000ULL
-#define TX4939_PCFG_SIO2MODE_SIO2 0x4000000000000000ULL
-#define TX4939_PCFG_SIO2MODE_SIO0 0x0000000000000000ULL
-#define TX4939_PCFG_SPIMODE 0x2000000000000000ULL
-#define TX4939_PCFG_I2CMODE 0x1000000000000000ULL
-#define TX4939_PCFG_I2SMODE_MASK 0x0c00000000000000ULL
-#define TX4939_PCFG_I2SMODE_GPIO 0x0c00000000000000ULL
-#define TX4939_PCFG_I2SMODE_I2S 0x0800000000000000ULL
-#define TX4939_PCFG_I2SMODE_I2S_ALT 0x0400000000000000ULL
-#define TX4939_PCFG_I2SMODE_ACLC 0x0000000000000000ULL
-#define TX4939_PCFG_SIO3MODE 0x0200000000000000ULL
-#define TX4939_PCFG_DMASEL3 0x0004000000000000ULL
-#define TX4939_PCFG_DMASEL3_SIO0 0x0004000000000000ULL
-#define TX4939_PCFG_DMASEL3_NDFC 0x0000000000000000ULL
-#define TX4939_PCFG_VSSMODE 0x0000200000000000ULL
-#define TX4939_PCFG_VPSMODE 0x0000100000000000ULL
-#define TX4939_PCFG_ET1MODE 0x0000080000000000ULL
-#define TX4939_PCFG_ET0MODE 0x0000040000000000ULL
-#define TX4939_PCFG_ATA1MODE 0x0000020000000000ULL
-#define TX4939_PCFG_ATA0MODE 0x0000010000000000ULL
-#define TX4939_PCFG_BP_PLL 0x0000000100000000ULL
-
-#define TX4939_PCFG_SYSCLKEN 0x08000000
-#define TX4939_PCFG_PCICLKEN_ALL 0x000f0000
-#define TX4939_PCFG_PCICLKEN(ch) (0x00010000<<(ch))
-#define TX4939_PCFG_SPEED1 0x00002000
-#define TX4939_PCFG_SPEED0 0x00001000
-#define TX4939_PCFG_ITMODE 0x00000300
-#define TX4939_PCFG_DMASEL_ALL (0x00000007 | TX4939_PCFG_DMASEL3)
-#define TX4939_PCFG_DMASEL2 0x00000004
-#define TX4939_PCFG_DMASEL2_DRQ2 0x00000000
-#define TX4939_PCFG_DMASEL2_SIO0 0x00000004
-#define TX4939_PCFG_DMASEL1 0x00000002
-#define TX4939_PCFG_DMASEL1_DRQ1 0x00000000
-#define TX4939_PCFG_DMASEL0 0x00000001
-#define TX4939_PCFG_DMASEL0_DRQ0 0x00000000
-
-/* CLKCTR : Clock Control */
-#define TX4939_CLKCTR_IOSCKD 0x8000000000000000ULL
-#define TX4939_CLKCTR_SYSCKD 0x4000000000000000ULL
-#define TX4939_CLKCTR_TM5CKD 0x2000000000000000ULL
-#define TX4939_CLKCTR_TM4CKD 0x1000000000000000ULL
-#define TX4939_CLKCTR_TM3CKD 0x0800000000000000ULL
-#define TX4939_CLKCTR_CIRCKD 0x0400000000000000ULL
-#define TX4939_CLKCTR_SIO3CKD 0x0200000000000000ULL
-#define TX4939_CLKCTR_SIO2CKD 0x0100000000000000ULL
-#define TX4939_CLKCTR_SIO1CKD 0x0080000000000000ULL
-#define TX4939_CLKCTR_VPCCKD 0x0040000000000000ULL
-#define TX4939_CLKCTR_EPCICKD 0x0020000000000000ULL
-#define TX4939_CLKCTR_ETH1CKD 0x0008000000000000ULL
-#define TX4939_CLKCTR_ATA1CKD 0x0004000000000000ULL
-#define TX4939_CLKCTR_BROMCKD 0x0002000000000000ULL
-#define TX4939_CLKCTR_NDCCKD 0x0001000000000000ULL
-#define TX4939_CLKCTR_I2CCKD 0x0000800000000000ULL
-#define TX4939_CLKCTR_ETH0CKD 0x0000400000000000ULL
-#define TX4939_CLKCTR_SPICKD 0x0000200000000000ULL
-#define TX4939_CLKCTR_SRAMCKD 0x0000100000000000ULL
-#define TX4939_CLKCTR_PCI1CKD 0x0000080000000000ULL
-#define TX4939_CLKCTR_DMA1CKD 0x0000040000000000ULL
-#define TX4939_CLKCTR_ACLCKD 0x0000020000000000ULL
-#define TX4939_CLKCTR_ATA0CKD 0x0000010000000000ULL
-#define TX4939_CLKCTR_DMA0CKD 0x0000008000000000ULL
-#define TX4939_CLKCTR_PCICCKD 0x0000004000000000ULL
-#define TX4939_CLKCTR_I2SCKD 0x0000002000000000ULL
-#define TX4939_CLKCTR_TM0CKD 0x0000001000000000ULL
-#define TX4939_CLKCTR_TM1CKD 0x0000000800000000ULL
-#define TX4939_CLKCTR_TM2CKD 0x0000000400000000ULL
-#define TX4939_CLKCTR_SIO0CKD 0x0000000200000000ULL
-#define TX4939_CLKCTR_CYPCKD 0x0000000100000000ULL
-#define TX4939_CLKCTR_IOSRST 0x80000000
-#define TX4939_CLKCTR_SYSRST 0x40000000
-#define TX4939_CLKCTR_TM5RST 0x20000000
-#define TX4939_CLKCTR_TM4RST 0x10000000
-#define TX4939_CLKCTR_TM3RST 0x08000000
-#define TX4939_CLKCTR_CIRRST 0x04000000
-#define TX4939_CLKCTR_SIO3RST 0x02000000
-#define TX4939_CLKCTR_SIO2RST 0x01000000
-#define TX4939_CLKCTR_SIO1RST 0x00800000
-#define TX4939_CLKCTR_VPCRST 0x00400000
-#define TX4939_CLKCTR_EPCIRST 0x00200000
-#define TX4939_CLKCTR_ETH1RST 0x00080000
-#define TX4939_CLKCTR_ATA1RST 0x00040000
-#define TX4939_CLKCTR_BROMRST 0x00020000
-#define TX4939_CLKCTR_NDCRST 0x00010000
-#define TX4939_CLKCTR_I2CRST 0x00008000
-#define TX4939_CLKCTR_ETH0RST 0x00004000
-#define TX4939_CLKCTR_SPIRST 0x00002000
-#define TX4939_CLKCTR_SRAMRST 0x00001000
-#define TX4939_CLKCTR_PCI1RST 0x00000800
-#define TX4939_CLKCTR_DMA1RST 0x00000400
-#define TX4939_CLKCTR_ACLRST 0x00000200
-#define TX4939_CLKCTR_ATA0RST 0x00000100
-#define TX4939_CLKCTR_DMA0RST 0x00000080
-#define TX4939_CLKCTR_PCICRST 0x00000040
-#define TX4939_CLKCTR_I2SRST 0x00000020
-#define TX4939_CLKCTR_TM0RST 0x00000010
-#define TX4939_CLKCTR_TM1RST 0x00000008
-#define TX4939_CLKCTR_TM2RST 0x00000004
-#define TX4939_CLKCTR_SIO0RST 0x00000002
-#define TX4939_CLKCTR_CYPRST 0x00000001
-
-/*
- * CRYPTO
- */
-#define TX4939_CRYPTO_CSR_SAESO 0x08000000
-#define TX4939_CRYPTO_CSR_SAESI 0x04000000
-#define TX4939_CRYPTO_CSR_SDESO 0x02000000
-#define TX4939_CRYPTO_CSR_SDESI 0x01000000
-#define TX4939_CRYPTO_CSR_INDXBST_MASK 0x00700000
-#define TX4939_CRYPTO_CSR_INDXBST(n) ((n) << 20)
-#define TX4939_CRYPTO_CSR_TOINT 0x00080000
-#define TX4939_CRYPTO_CSR_DCINT 0x00040000
-#define TX4939_CRYPTO_CSR_GBINT 0x00010000
-#define TX4939_CRYPTO_CSR_INDXAST_MASK 0x0000e000
-#define TX4939_CRYPTO_CSR_INDXAST(n) ((n) << 13)
-#define TX4939_CRYPTO_CSR_CSWAP_MASK 0x00001800
-#define TX4939_CRYPTO_CSR_CSWAP_NONE 0x00000000
-#define TX4939_CRYPTO_CSR_CSWAP_IN 0x00000800
-#define TX4939_CRYPTO_CSR_CSWAP_OUT 0x00001000
-#define TX4939_CRYPTO_CSR_CSWAP_BOTH 0x00001800
-#define TX4939_CRYPTO_CSR_CDIV_MASK 0x00000600
-#define TX4939_CRYPTO_CSR_CDIV_DIV2 0x00000000
-#define TX4939_CRYPTO_CSR_CDIV_DIV1 0x00000200
-#define TX4939_CRYPTO_CSR_CDIV_DIV2ALT 0x00000400
-#define TX4939_CRYPTO_CSR_CDIV_DIV1ALT 0x00000600
-#define TX4939_CRYPTO_CSR_PDINT_MASK 0x000000c0
-#define TX4939_CRYPTO_CSR_PDINT_ALL 0x00000000
-#define TX4939_CRYPTO_CSR_PDINT_END 0x00000040
-#define TX4939_CRYPTO_CSR_PDINT_NEXT 0x00000080
-#define TX4939_CRYPTO_CSR_PDINT_NONE 0x000000c0
-#define TX4939_CRYPTO_CSR_GINTE 0x00000008
-#define TX4939_CRYPTO_CSR_RSTD 0x00000004
-#define TX4939_CRYPTO_CSR_RSTC 0x00000002
-#define TX4939_CRYPTO_CSR_ENCR 0x00000001
-
-/* bits for tx4939_crypto_reg.cdr.gen.ctrl */
-#define TX4939_CRYPTO_CTX_ENGINE_MASK 0x00000003
-#define TX4939_CRYPTO_CTX_ENGINE_DES 0x00000000
-#define TX4939_CRYPTO_CTX_ENGINE_AES 0x00000001
-#define TX4939_CRYPTO_CTX_ENGINE_MD5 0x00000002
-#define TX4939_CRYPTO_CTX_ENGINE_SHA1 0x00000003
-#define TX4939_CRYPTO_CTX_TDMS 0x00000010
-#define TX4939_CRYPTO_CTX_CMS 0x00000020
-#define TX4939_CRYPTO_CTX_DMS 0x00000040
-#define TX4939_CRYPTO_CTX_UPDATE 0x00000080
-
-/* bits for tx4939_crypto_desc.ctrl */
-#define TX4939_CRYPTO_DESC_OB_CNT_MASK 0xffe00000
-#define TX4939_CRYPTO_DESC_OB_CNT(cnt) ((cnt) << 21)
-#define TX4939_CRYPTO_DESC_IB_CNT_MASK 0x001ffc00
-#define TX4939_CRYPTO_DESC_IB_CNT(cnt) ((cnt) << 10)
-#define TX4939_CRYPTO_DESC_START 0x00000200
-#define TX4939_CRYPTO_DESC_END 0x00000100
-#define TX4939_CRYPTO_DESC_XOR 0x00000010
-#define TX4939_CRYPTO_DESC_LAST 0x00000008
-#define TX4939_CRYPTO_DESC_ERR_MASK 0x00000006
-#define TX4939_CRYPTO_DESC_ERR_NONE 0x00000000
-#define TX4939_CRYPTO_DESC_ERR_TOUT 0x00000002
-#define TX4939_CRYPTO_DESC_ERR_DIGEST 0x00000004
-#define TX4939_CRYPTO_DESC_OWN 0x00000001
-
-/* bits for tx4939_crypto_desc.index */
-#define TX4939_CRYPTO_DESC_HASH_IDX_MASK 0x00000070
-#define TX4939_CRYPTO_DESC_HASH_IDX(idx) ((idx) << 4)
-#define TX4939_CRYPTO_DESC_ENCRYPT_IDX_MASK 0x00000007
-#define TX4939_CRYPTO_DESC_ENCRYPT_IDX(idx) ((idx) << 0)
-
-#define TX4939_CRYPTO_NR_SET 6
-
-#define TX4939_CRYPTO_RCSR_INTE 0x00000008
-#define TX4939_CRYPTO_RCSR_RST 0x00000004
-#define TX4939_CRYPTO_RCSR_FIN 0x00000002
-#define TX4939_CRYPTO_RCSR_ST 0x00000001
-
-/*
- * VPC
- */
-#define TX4939_VPC_CSR_GBINT 0x00010000
-#define TX4939_VPC_CSR_SWAPO 0x00000020
-#define TX4939_VPC_CSR_SWAPI 0x00000010
-#define TX4939_VPC_CSR_GINTE 0x00000008
-#define TX4939_VPC_CSR_RSTD 0x00000004
-#define TX4939_VPC_CSR_RSTVPC 0x00000002
-
-#define TX4939_VPC_CTRLA_VDPSN 0x00000200
-#define TX4939_VPC_CTRLA_PBUSY 0x00000100
-#define TX4939_VPC_CTRLA_DCINT 0x00000080
-#define TX4939_VPC_CTRLA_UOINT 0x00000040
-#define TX4939_VPC_CTRLA_PDINT_MASK 0x00000030
-#define TX4939_VPC_CTRLA_PDINT_ALL 0x00000000
-#define TX4939_VPC_CTRLA_PDINT_NEXT 0x00000010
-#define TX4939_VPC_CTRLA_PDINT_NONE 0x00000030
-#define TX4939_VPC_CTRLA_VDVLDP 0x00000008
-#define TX4939_VPC_CTRLA_VDMODE 0x00000004
-#define TX4939_VPC_CTRLA_VDFOR 0x00000002
-#define TX4939_VPC_CTRLA_ENVPC 0x00000001
-
-/* bits for tx4939_vpc_desc.ctrl1 */
-#define TX4939_VPC_DESC_CTRL1_ERR_MASK 0x00000006
-#define TX4939_VPC_DESC_CTRL1_OWN 0x00000001
-
-#define tx4939_ddrcptr ((struct tx4939_ddrc_reg __iomem *)TX4939_DDRC_REG)
-#define tx4939_ebuscptr tx4938_ebuscptr
-#define tx4939_ircptr \
- ((struct tx4939_irc_reg __iomem *)TX4939_IRC_REG)
-#define tx4939_pcicptr tx4938_pcicptr
-#define tx4939_pcic1ptr tx4938_pcic1ptr
-#define tx4939_ccfgptr \
- ((struct tx4939_ccfg_reg __iomem *)TX4939_CCFG_REG)
-#define tx4939_sramcptr tx4938_sramcptr
-#define tx4939_cryptoptr \
- ((struct tx4939_crypto_reg __iomem *)TX4939_CRYPTO_REG)
-#define tx4939_vpcptr ((struct tx4939_vpc_reg __iomem *)TX4939_VPC_REG)
-
-#define TX4939_REV_MAJ_MIN() \
- ((__u32)__raw_readq(&tx4939_ccfgptr->crir) & 0x00ff)
-#define TX4939_REV_PCODE() \
- ((__u32)__raw_readq(&tx4939_ccfgptr->crir) >> 16)
-#define TX4939_CCFG_BCFG() \
- ((__u32)((__raw_readq(&tx4939_ccfgptr->ccfg) & TX4939_CCFG_BCFG_MASK) \
- >> 32))
-
-#define tx4939_ccfg_clear(bits) tx4938_ccfg_clear(bits)
-#define tx4939_ccfg_set(bits) tx4938_ccfg_set(bits)
-#define tx4939_ccfg_change(change, new) tx4938_ccfg_change(change, new)
-
-#define TX4939_EBUSC_CR(ch) TX4927_EBUSC_CR(ch)
-#define TX4939_EBUSC_BA(ch) TX4927_EBUSC_BA(ch)
-#define TX4939_EBUSC_SIZE(ch) TX4927_EBUSC_SIZE(ch)
-#define TX4939_EBUSC_WIDTH(ch) \
- (16 >> ((__u32)(TX4939_EBUSC_CR(ch) >> 20) & 0x1))
-
-/* SCLK0 = MSTCLK * 429/19 * 16/245 / 2 (14.745MHz for MST 20MHz) */
-#define TX4939_SCLK0(mst) \
- ((((mst) + 245/2) / 245UL * 429 * 16 + 19) / 19 / 2)
-
-void tx4939_wdt_init(void);
-void tx4939_setup(void);
-void tx4939_time_init(unsigned int tmrnr);
-void tx4939_sio_init(unsigned int sclk, unsigned int cts_mask);
-void tx4939_spi_init(int busid);
-void tx4939_ethaddr_init(unsigned char *addr0, unsigned char *addr1);
-int tx4939_report_pciclk(void);
-void tx4939_report_pci1clk(void);
-struct pci_dev;
-int tx4939_pcic1_map_irq(const struct pci_dev *dev, u8 slot);
-int tx4939_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin);
-void tx4939_setup_pcierr_irq(void);
-void tx4939_irq_init(void);
-int tx4939_irq(void);
-void tx4939_mtd_init(int ch);
-void tx4939_ata_init(void);
-void tx4939_rtc_init(void);
-void tx4939_ndfmc_init(unsigned int hold, unsigned int spw,
- unsigned char ch_mask, unsigned char wide_mask);
-void tx4939_dmac_init(int memcpy_chan0, int memcpy_chan1);
-void tx4939_aclc_init(void);
-void tx4939_sramc_init(void);
-void tx4939_rng_init(void);
-
-#endif /* __ASM_TXX9_TX4939_H */
diff --git a/arch/mips/include/asm/unaligned-emul.h b/arch/mips/include/asm/unaligned-emul.h
index 2022b18944b9..9af0f4d3d288 100644
--- a/arch/mips/include/asm/unaligned-emul.h
+++ b/arch/mips/include/asm/unaligned-emul.h
@@ -20,8 +20,8 @@ do { \
"j\t3b\n\t" \
".previous\n\t" \
".section\t__ex_table,\"a\"\n\t" \
- STR(PTR)"\t1b, 4b\n\t" \
- STR(PTR)"\t2b, 4b\n\t" \
+ STR(PTR_WD)"\t1b, 4b\n\t" \
+ STR(PTR_WD)"\t2b, 4b\n\t" \
".previous" \
: "=&r" (value), "=r" (res) \
: "r" (addr), "i" (-EFAULT)); \
@@ -41,8 +41,8 @@ do { \
"j\t3b\n\t" \
".previous\n\t" \
".section\t__ex_table,\"a\"\n\t" \
- STR(PTR)"\t1b, 4b\n\t" \
- STR(PTR)"\t2b, 4b\n\t" \
+ STR(PTR_WD)"\t1b, 4b\n\t" \
+ STR(PTR_WD)"\t2b, 4b\n\t" \
".previous" \
: "=&r" (value), "=r" (res) \
: "r" (addr), "i" (-EFAULT)); \
@@ -74,10 +74,10 @@ do { \
"j\t10b\n\t" \
".previous\n\t" \
".section\t__ex_table,\"a\"\n\t" \
- STR(PTR)"\t1b, 11b\n\t" \
- STR(PTR)"\t2b, 11b\n\t" \
- STR(PTR)"\t3b, 11b\n\t" \
- STR(PTR)"\t4b, 11b\n\t" \
+ STR(PTR_WD)"\t1b, 11b\n\t" \
+ STR(PTR_WD)"\t2b, 11b\n\t" \
+ STR(PTR_WD)"\t3b, 11b\n\t" \
+ STR(PTR_WD)"\t4b, 11b\n\t" \
".previous" \
: "=&r" (value), "=r" (res) \
: "r" (addr), "i" (-EFAULT)); \
@@ -102,8 +102,8 @@ do { \
"j\t3b\n\t" \
".previous\n\t" \
".section\t__ex_table,\"a\"\n\t" \
- STR(PTR)"\t1b, 4b\n\t" \
- STR(PTR)"\t2b, 4b\n\t" \
+ STR(PTR_WD)"\t1b, 4b\n\t" \
+ STR(PTR_WD)"\t2b, 4b\n\t" \
".previous" \
: "=&r" (value), "=r" (res) \
: "r" (addr), "i" (-EFAULT)); \
@@ -125,8 +125,8 @@ do { \
"j\t3b\n\t" \
".previous\n\t" \
".section\t__ex_table,\"a\"\n\t" \
- STR(PTR)"\t1b, 4b\n\t" \
- STR(PTR)"\t2b, 4b\n\t" \
+ STR(PTR_WD)"\t1b, 4b\n\t" \
+ STR(PTR_WD)"\t2b, 4b\n\t" \
".previous" \
: "=&r" (value), "=r" (res) \
: "r" (addr), "i" (-EFAULT)); \
@@ -145,8 +145,8 @@ do { \
"j\t3b\n\t" \
".previous\n\t" \
".section\t__ex_table,\"a\"\n\t" \
- STR(PTR)"\t1b, 4b\n\t" \
- STR(PTR)"\t2b, 4b\n\t" \
+ STR(PTR_WD)"\t1b, 4b\n\t" \
+ STR(PTR_WD)"\t2b, 4b\n\t" \
".previous" \
: "=&r" (value), "=r" (res) \
: "r" (addr), "i" (-EFAULT)); \
@@ -178,10 +178,10 @@ do { \
"j\t10b\n\t" \
".previous\n\t" \
".section\t__ex_table,\"a\"\n\t" \
- STR(PTR)"\t1b, 11b\n\t" \
- STR(PTR)"\t2b, 11b\n\t" \
- STR(PTR)"\t3b, 11b\n\t" \
- STR(PTR)"\t4b, 11b\n\t" \
+ STR(PTR_WD)"\t1b, 11b\n\t" \
+ STR(PTR_WD)"\t2b, 11b\n\t" \
+ STR(PTR_WD)"\t3b, 11b\n\t" \
+ STR(PTR_WD)"\t4b, 11b\n\t" \
".previous" \
: "=&r" (value), "=r" (res) \
: "r" (addr), "i" (-EFAULT)); \
@@ -223,14 +223,14 @@ do { \
"j\t10b\n\t" \
".previous\n\t" \
".section\t__ex_table,\"a\"\n\t" \
- STR(PTR)"\t1b, 11b\n\t" \
- STR(PTR)"\t2b, 11b\n\t" \
- STR(PTR)"\t3b, 11b\n\t" \
- STR(PTR)"\t4b, 11b\n\t" \
- STR(PTR)"\t5b, 11b\n\t" \
- STR(PTR)"\t6b, 11b\n\t" \
- STR(PTR)"\t7b, 11b\n\t" \
- STR(PTR)"\t8b, 11b\n\t" \
+ STR(PTR_WD)"\t1b, 11b\n\t" \
+ STR(PTR_WD)"\t2b, 11b\n\t" \
+ STR(PTR_WD)"\t3b, 11b\n\t" \
+ STR(PTR_WD)"\t4b, 11b\n\t" \
+ STR(PTR_WD)"\t5b, 11b\n\t" \
+ STR(PTR_WD)"\t6b, 11b\n\t" \
+ STR(PTR_WD)"\t7b, 11b\n\t" \
+ STR(PTR_WD)"\t8b, 11b\n\t" \
".previous" \
: "=&r" (value), "=r" (res) \
: "r" (addr), "i" (-EFAULT)); \
@@ -255,8 +255,8 @@ do { \
"j\t3b\n\t" \
".previous\n\t" \
".section\t__ex_table,\"a\"\n\t" \
- STR(PTR)"\t1b, 4b\n\t" \
- STR(PTR)"\t2b, 4b\n\t" \
+ STR(PTR_WD)"\t1b, 4b\n\t" \
+ STR(PTR_WD)"\t2b, 4b\n\t" \
".previous" \
: "=r" (res) \
: "r" (value), "r" (addr), "i" (-EFAULT));\
@@ -276,8 +276,8 @@ do { \
"j\t3b\n\t" \
".previous\n\t" \
".section\t__ex_table,\"a\"\n\t" \
- STR(PTR)"\t1b, 4b\n\t" \
- STR(PTR)"\t2b, 4b\n\t" \
+ STR(PTR_WD)"\t1b, 4b\n\t" \
+ STR(PTR_WD)"\t2b, 4b\n\t" \
".previous" \
: "=r" (res) \
: "r" (value), "r" (addr), "i" (-EFAULT)); \
@@ -296,8 +296,8 @@ do { \
"j\t3b\n\t" \
".previous\n\t" \
".section\t__ex_table,\"a\"\n\t" \
- STR(PTR)"\t1b, 4b\n\t" \
- STR(PTR)"\t2b, 4b\n\t" \
+ STR(PTR_WD)"\t1b, 4b\n\t" \
+ STR(PTR_WD)"\t2b, 4b\n\t" \
".previous" \
: "=r" (res) \
: "r" (value), "r" (addr), "i" (-EFAULT)); \
@@ -325,10 +325,10 @@ do { \
"j\t10b\n\t" \
".previous\n\t" \
".section\t__ex_table,\"a\"\n\t" \
- STR(PTR)"\t1b, 11b\n\t" \
- STR(PTR)"\t2b, 11b\n\t" \
- STR(PTR)"\t3b, 11b\n\t" \
- STR(PTR)"\t4b, 11b\n\t" \
+ STR(PTR_WD)"\t1b, 11b\n\t" \
+ STR(PTR_WD)"\t2b, 11b\n\t" \
+ STR(PTR_WD)"\t3b, 11b\n\t" \
+ STR(PTR_WD)"\t4b, 11b\n\t" \
".previous" \
: "=&r" (res) \
: "r" (value), "r" (addr), "i" (-EFAULT) \
@@ -365,14 +365,14 @@ do { \
"j\t10b\n\t" \
".previous\n\t" \
".section\t__ex_table,\"a\"\n\t" \
- STR(PTR)"\t1b, 11b\n\t" \
- STR(PTR)"\t2b, 11b\n\t" \
- STR(PTR)"\t3b, 11b\n\t" \
- STR(PTR)"\t4b, 11b\n\t" \
- STR(PTR)"\t5b, 11b\n\t" \
- STR(PTR)"\t6b, 11b\n\t" \
- STR(PTR)"\t7b, 11b\n\t" \
- STR(PTR)"\t8b, 11b\n\t" \
+ STR(PTR_WD)"\t1b, 11b\n\t" \
+ STR(PTR_WD)"\t2b, 11b\n\t" \
+ STR(PTR_WD)"\t3b, 11b\n\t" \
+ STR(PTR_WD)"\t4b, 11b\n\t" \
+ STR(PTR_WD)"\t5b, 11b\n\t" \
+ STR(PTR_WD)"\t6b, 11b\n\t" \
+ STR(PTR_WD)"\t7b, 11b\n\t" \
+ STR(PTR_WD)"\t8b, 11b\n\t" \
".previous" \
: "=&r" (res) \
: "r" (value), "r" (addr), "i" (-EFAULT) \
@@ -398,8 +398,8 @@ do { \
"j\t3b\n\t" \
".previous\n\t" \
".section\t__ex_table,\"a\"\n\t" \
- STR(PTR)"\t1b, 4b\n\t" \
- STR(PTR)"\t2b, 4b\n\t" \
+ STR(PTR_WD)"\t1b, 4b\n\t" \
+ STR(PTR_WD)"\t2b, 4b\n\t" \
".previous" \
: "=&r" (value), "=r" (res) \
: "r" (addr), "i" (-EFAULT)); \
@@ -419,8 +419,8 @@ do { \
"j\t3b\n\t" \
".previous\n\t" \
".section\t__ex_table,\"a\"\n\t" \
- STR(PTR)"\t1b, 4b\n\t" \
- STR(PTR)"\t2b, 4b\n\t" \
+ STR(PTR_WD)"\t1b, 4b\n\t" \
+ STR(PTR_WD)"\t2b, 4b\n\t" \
".previous" \
: "=&r" (value), "=r" (res) \
: "r" (addr), "i" (-EFAULT)); \
@@ -452,10 +452,10 @@ do { \
"j\t10b\n\t" \
".previous\n\t" \
".section\t__ex_table,\"a\"\n\t" \
- STR(PTR)"\t1b, 11b\n\t" \
- STR(PTR)"\t2b, 11b\n\t" \
- STR(PTR)"\t3b, 11b\n\t" \
- STR(PTR)"\t4b, 11b\n\t" \
+ STR(PTR_WD)"\t1b, 11b\n\t" \
+ STR(PTR_WD)"\t2b, 11b\n\t" \
+ STR(PTR_WD)"\t3b, 11b\n\t" \
+ STR(PTR_WD)"\t4b, 11b\n\t" \
".previous" \
: "=&r" (value), "=r" (res) \
: "r" (addr), "i" (-EFAULT)); \
@@ -481,8 +481,8 @@ do { \
"j\t3b\n\t" \
".previous\n\t" \
".section\t__ex_table,\"a\"\n\t" \
- STR(PTR)"\t1b, 4b\n\t" \
- STR(PTR)"\t2b, 4b\n\t" \
+ STR(PTR_WD)"\t1b, 4b\n\t" \
+ STR(PTR_WD)"\t2b, 4b\n\t" \
".previous" \
: "=&r" (value), "=r" (res) \
: "r" (addr), "i" (-EFAULT)); \
@@ -504,8 +504,8 @@ do { \
"j\t3b\n\t" \
".previous\n\t" \
".section\t__ex_table,\"a\"\n\t" \
- STR(PTR)"\t1b, 4b\n\t" \
- STR(PTR)"\t2b, 4b\n\t" \
+ STR(PTR_WD)"\t1b, 4b\n\t" \
+ STR(PTR_WD)"\t2b, 4b\n\t" \
".previous" \
: "=&r" (value), "=r" (res) \
: "r" (addr), "i" (-EFAULT)); \
@@ -524,8 +524,8 @@ do { \
"j\t3b\n\t" \
".previous\n\t" \
".section\t__ex_table,\"a\"\n\t" \
- STR(PTR)"\t1b, 4b\n\t" \
- STR(PTR)"\t2b, 4b\n\t" \
+ STR(PTR_WD)"\t1b, 4b\n\t" \
+ STR(PTR_WD)"\t2b, 4b\n\t" \
".previous" \
: "=&r" (value), "=r" (res) \
: "r" (addr), "i" (-EFAULT)); \
@@ -557,10 +557,10 @@ do { \
"j\t10b\n\t" \
".previous\n\t" \
".section\t__ex_table,\"a\"\n\t" \
- STR(PTR)"\t1b, 11b\n\t" \
- STR(PTR)"\t2b, 11b\n\t" \
- STR(PTR)"\t3b, 11b\n\t" \
- STR(PTR)"\t4b, 11b\n\t" \
+ STR(PTR_WD)"\t1b, 11b\n\t" \
+ STR(PTR_WD)"\t2b, 11b\n\t" \
+ STR(PTR_WD)"\t3b, 11b\n\t" \
+ STR(PTR_WD)"\t4b, 11b\n\t" \
".previous" \
: "=&r" (value), "=r" (res) \
: "r" (addr), "i" (-EFAULT)); \
@@ -602,14 +602,14 @@ do { \
"j\t10b\n\t" \
".previous\n\t" \
".section\t__ex_table,\"a\"\n\t" \
- STR(PTR)"\t1b, 11b\n\t" \
- STR(PTR)"\t2b, 11b\n\t" \
- STR(PTR)"\t3b, 11b\n\t" \
- STR(PTR)"\t4b, 11b\n\t" \
- STR(PTR)"\t5b, 11b\n\t" \
- STR(PTR)"\t6b, 11b\n\t" \
- STR(PTR)"\t7b, 11b\n\t" \
- STR(PTR)"\t8b, 11b\n\t" \
+ STR(PTR_WD)"\t1b, 11b\n\t" \
+ STR(PTR_WD)"\t2b, 11b\n\t" \
+ STR(PTR_WD)"\t3b, 11b\n\t" \
+ STR(PTR_WD)"\t4b, 11b\n\t" \
+ STR(PTR_WD)"\t5b, 11b\n\t" \
+ STR(PTR_WD)"\t6b, 11b\n\t" \
+ STR(PTR_WD)"\t7b, 11b\n\t" \
+ STR(PTR_WD)"\t8b, 11b\n\t" \
".previous" \
: "=&r" (value), "=r" (res) \
: "r" (addr), "i" (-EFAULT)); \
@@ -632,8 +632,8 @@ do { \
"j\t3b\n\t" \
".previous\n\t" \
".section\t__ex_table,\"a\"\n\t" \
- STR(PTR)"\t1b, 4b\n\t" \
- STR(PTR)"\t2b, 4b\n\t" \
+ STR(PTR_WD)"\t1b, 4b\n\t" \
+ STR(PTR_WD)"\t2b, 4b\n\t" \
".previous" \
: "=r" (res) \
: "r" (value), "r" (addr), "i" (-EFAULT));\
@@ -653,8 +653,8 @@ do { \
"j\t3b\n\t" \
".previous\n\t" \
".section\t__ex_table,\"a\"\n\t" \
- STR(PTR)"\t1b, 4b\n\t" \
- STR(PTR)"\t2b, 4b\n\t" \
+ STR(PTR_WD)"\t1b, 4b\n\t" \
+ STR(PTR_WD)"\t2b, 4b\n\t" \
".previous" \
: "=r" (res) \
: "r" (value), "r" (addr), "i" (-EFAULT)); \
@@ -673,8 +673,8 @@ do { \
"j\t3b\n\t" \
".previous\n\t" \
".section\t__ex_table,\"a\"\n\t" \
- STR(PTR)"\t1b, 4b\n\t" \
- STR(PTR)"\t2b, 4b\n\t" \
+ STR(PTR_WD)"\t1b, 4b\n\t" \
+ STR(PTR_WD)"\t2b, 4b\n\t" \
".previous" \
: "=r" (res) \
: "r" (value), "r" (addr), "i" (-EFAULT)); \
@@ -703,10 +703,10 @@ do { \
"j\t10b\n\t" \
".previous\n\t" \
".section\t__ex_table,\"a\"\n\t" \
- STR(PTR)"\t1b, 11b\n\t" \
- STR(PTR)"\t2b, 11b\n\t" \
- STR(PTR)"\t3b, 11b\n\t" \
- STR(PTR)"\t4b, 11b\n\t" \
+ STR(PTR_WD)"\t1b, 11b\n\t" \
+ STR(PTR_WD)"\t2b, 11b\n\t" \
+ STR(PTR_WD)"\t3b, 11b\n\t" \
+ STR(PTR_WD)"\t4b, 11b\n\t" \
".previous" \
: "=&r" (res) \
: "r" (value), "r" (addr), "i" (-EFAULT) \
@@ -743,14 +743,14 @@ do { \
"j\t10b\n\t" \
".previous\n\t" \
".section\t__ex_table,\"a\"\n\t" \
- STR(PTR)"\t1b, 11b\n\t" \
- STR(PTR)"\t2b, 11b\n\t" \
- STR(PTR)"\t3b, 11b\n\t" \
- STR(PTR)"\t4b, 11b\n\t" \
- STR(PTR)"\t5b, 11b\n\t" \
- STR(PTR)"\t6b, 11b\n\t" \
- STR(PTR)"\t7b, 11b\n\t" \
- STR(PTR)"\t8b, 11b\n\t" \
+ STR(PTR_WD)"\t1b, 11b\n\t" \
+ STR(PTR_WD)"\t2b, 11b\n\t" \
+ STR(PTR_WD)"\t3b, 11b\n\t" \
+ STR(PTR_WD)"\t4b, 11b\n\t" \
+ STR(PTR_WD)"\t5b, 11b\n\t" \
+ STR(PTR_WD)"\t6b, 11b\n\t" \
+ STR(PTR_WD)"\t7b, 11b\n\t" \
+ STR(PTR_WD)"\t8b, 11b\n\t" \
".previous" \
: "=&r" (res) \
: "r" (value), "r" (addr), "i" (-EFAULT) \
diff --git a/arch/mips/kernel/mips-cpc.c b/arch/mips/kernel/mips-cpc.c
index 8d2535123f11..17aff13cd7ce 100644
--- a/arch/mips/kernel/mips-cpc.c
+++ b/arch/mips/kernel/mips-cpc.c
@@ -4,6 +4,7 @@
* Author: Paul Burton <paul.burton@mips.com>
*/
+#include <linux/bitfield.h>
#include <linux/errno.h>
#include <linux/percpu.h>
#include <linux/of.h>
@@ -97,7 +98,7 @@ void mips_cpc_lock_other(unsigned int core)
curr_core = cpu_core(&current_cpu_data);
spin_lock_irqsave(&per_cpu(cpc_core_lock, curr_core),
per_cpu(cpc_core_lock_flags, curr_core));
- write_cpc_cl_other(core << __ffs(CPC_Cx_OTHER_CORENUM));
+ write_cpc_cl_other(FIELD_PREP(CPC_Cx_OTHER_CORENUM, core));
/*
* Ensure the core-other region reflects the appropriate core &
diff --git a/arch/mips/kernel/mips-r2-to-r6-emul.c b/arch/mips/kernel/mips-r2-to-r6-emul.c
index a39ec755e4c2..750fe569862b 100644
--- a/arch/mips/kernel/mips-r2-to-r6-emul.c
+++ b/arch/mips/kernel/mips-r2-to-r6-emul.c
@@ -1258,10 +1258,10 @@ fpu_emul:
" j 10b\n"
" .previous\n"
" .section __ex_table,\"a\"\n"
- STR(PTR) " 1b,8b\n"
- STR(PTR) " 2b,8b\n"
- STR(PTR) " 3b,8b\n"
- STR(PTR) " 4b,8b\n"
+ STR(PTR_WD) " 1b,8b\n"
+ STR(PTR_WD) " 2b,8b\n"
+ STR(PTR_WD) " 3b,8b\n"
+ STR(PTR_WD) " 4b,8b\n"
" .previous\n"
" .set pop\n"
: "+&r"(rt), "=&r"(rs),
@@ -1333,10 +1333,10 @@ fpu_emul:
" j 10b\n"
" .previous\n"
" .section __ex_table,\"a\"\n"
- STR(PTR) " 1b,8b\n"
- STR(PTR) " 2b,8b\n"
- STR(PTR) " 3b,8b\n"
- STR(PTR) " 4b,8b\n"
+ STR(PTR_WD) " 1b,8b\n"
+ STR(PTR_WD) " 2b,8b\n"
+ STR(PTR_WD) " 3b,8b\n"
+ STR(PTR_WD) " 4b,8b\n"
" .previous\n"
" .set pop\n"
: "+&r"(rt), "=&r"(rs),
@@ -1404,10 +1404,10 @@ fpu_emul:
" j 9b\n"
" .previous\n"
" .section __ex_table,\"a\"\n"
- STR(PTR) " 1b,8b\n"
- STR(PTR) " 2b,8b\n"
- STR(PTR) " 3b,8b\n"
- STR(PTR) " 4b,8b\n"
+ STR(PTR_WD) " 1b,8b\n"
+ STR(PTR_WD) " 2b,8b\n"
+ STR(PTR_WD) " 3b,8b\n"
+ STR(PTR_WD) " 4b,8b\n"
" .previous\n"
" .set pop\n"
: "+&r"(rt), "=&r"(rs),
@@ -1474,10 +1474,10 @@ fpu_emul:
" j 9b\n"
" .previous\n"
" .section __ex_table,\"a\"\n"
- STR(PTR) " 1b,8b\n"
- STR(PTR) " 2b,8b\n"
- STR(PTR) " 3b,8b\n"
- STR(PTR) " 4b,8b\n"
+ STR(PTR_WD) " 1b,8b\n"
+ STR(PTR_WD) " 2b,8b\n"
+ STR(PTR_WD) " 3b,8b\n"
+ STR(PTR_WD) " 4b,8b\n"
" .previous\n"
" .set pop\n"
: "+&r"(rt), "=&r"(rs),
@@ -1589,14 +1589,14 @@ fpu_emul:
" j 9b\n"
" .previous\n"
" .section __ex_table,\"a\"\n"
- STR(PTR) " 1b,8b\n"
- STR(PTR) " 2b,8b\n"
- STR(PTR) " 3b,8b\n"
- STR(PTR) " 4b,8b\n"
- STR(PTR) " 5b,8b\n"
- STR(PTR) " 6b,8b\n"
- STR(PTR) " 7b,8b\n"
- STR(PTR) " 0b,8b\n"
+ STR(PTR_WD) " 1b,8b\n"
+ STR(PTR_WD) " 2b,8b\n"
+ STR(PTR_WD) " 3b,8b\n"
+ STR(PTR_WD) " 4b,8b\n"
+ STR(PTR_WD) " 5b,8b\n"
+ STR(PTR_WD) " 6b,8b\n"
+ STR(PTR_WD) " 7b,8b\n"
+ STR(PTR_WD) " 0b,8b\n"
" .previous\n"
" .set pop\n"
: "+&r"(rt), "=&r"(rs),
@@ -1708,14 +1708,14 @@ fpu_emul:
" j 9b\n"
" .previous\n"
" .section __ex_table,\"a\"\n"
- STR(PTR) " 1b,8b\n"
- STR(PTR) " 2b,8b\n"
- STR(PTR) " 3b,8b\n"
- STR(PTR) " 4b,8b\n"
- STR(PTR) " 5b,8b\n"
- STR(PTR) " 6b,8b\n"
- STR(PTR) " 7b,8b\n"
- STR(PTR) " 0b,8b\n"
+ STR(PTR_WD) " 1b,8b\n"
+ STR(PTR_WD) " 2b,8b\n"
+ STR(PTR_WD) " 3b,8b\n"
+ STR(PTR_WD) " 4b,8b\n"
+ STR(PTR_WD) " 5b,8b\n"
+ STR(PTR_WD) " 6b,8b\n"
+ STR(PTR_WD) " 7b,8b\n"
+ STR(PTR_WD) " 0b,8b\n"
" .previous\n"
" .set pop\n"
: "+&r"(rt), "=&r"(rs),
@@ -1827,14 +1827,14 @@ fpu_emul:
" j 9b\n"
" .previous\n"
" .section __ex_table,\"a\"\n"
- STR(PTR) " 1b,8b\n"
- STR(PTR) " 2b,8b\n"
- STR(PTR) " 3b,8b\n"
- STR(PTR) " 4b,8b\n"
- STR(PTR) " 5b,8b\n"
- STR(PTR) " 6b,8b\n"
- STR(PTR) " 7b,8b\n"
- STR(PTR) " 0b,8b\n"
+ STR(PTR_WD) " 1b,8b\n"
+ STR(PTR_WD) " 2b,8b\n"
+ STR(PTR_WD) " 3b,8b\n"
+ STR(PTR_WD) " 4b,8b\n"
+ STR(PTR_WD) " 5b,8b\n"
+ STR(PTR_WD) " 6b,8b\n"
+ STR(PTR_WD) " 7b,8b\n"
+ STR(PTR_WD) " 0b,8b\n"
" .previous\n"
" .set pop\n"
: "+&r"(rt), "=&r"(rs),
@@ -1945,14 +1945,14 @@ fpu_emul:
" j 9b\n"
" .previous\n"
" .section __ex_table,\"a\"\n"
- STR(PTR) " 1b,8b\n"
- STR(PTR) " 2b,8b\n"
- STR(PTR) " 3b,8b\n"
- STR(PTR) " 4b,8b\n"
- STR(PTR) " 5b,8b\n"
- STR(PTR) " 6b,8b\n"
- STR(PTR) " 7b,8b\n"
- STR(PTR) " 0b,8b\n"
+ STR(PTR_WD) " 1b,8b\n"
+ STR(PTR_WD) " 2b,8b\n"
+ STR(PTR_WD) " 3b,8b\n"
+ STR(PTR_WD) " 4b,8b\n"
+ STR(PTR_WD) " 5b,8b\n"
+ STR(PTR_WD) " 6b,8b\n"
+ STR(PTR_WD) " 7b,8b\n"
+ STR(PTR_WD) " 0b,8b\n"
" .previous\n"
" .set pop\n"
: "+&r"(rt), "=&r"(rs),
@@ -2007,7 +2007,7 @@ fpu_emul:
"j 2b\n"
".previous\n"
".section __ex_table,\"a\"\n"
- STR(PTR) " 1b,3b\n"
+ STR(PTR_WD) " 1b,3b\n"
".previous\n"
: "=&r"(res), "+&r"(err)
: "r"(vaddr), "i"(SIGSEGV)
@@ -2065,7 +2065,7 @@ fpu_emul:
"j 2b\n"
".previous\n"
".section __ex_table,\"a\"\n"
- STR(PTR) " 1b,3b\n"
+ STR(PTR_WD) " 1b,3b\n"
".previous\n"
: "+&r"(res), "+&r"(err)
: "r"(vaddr), "i"(SIGSEGV));
@@ -2126,7 +2126,7 @@ fpu_emul:
"j 2b\n"
".previous\n"
".section __ex_table,\"a\"\n"
- STR(PTR) " 1b,3b\n"
+ STR(PTR_WD) " 1b,3b\n"
".previous\n"
: "=&r"(res), "+&r"(err)
: "r"(vaddr), "i"(SIGSEGV)
@@ -2189,7 +2189,7 @@ fpu_emul:
"j 2b\n"
".previous\n"
".section __ex_table,\"a\"\n"
- STR(PTR) " 1b,3b\n"
+ STR(PTR_WD) " 1b,3b\n"
".previous\n"
: "+&r"(res), "+&r"(err)
: "r"(vaddr), "i"(SIGSEGV));
diff --git a/arch/mips/kernel/r2300_fpu.S b/arch/mips/kernel/r2300_fpu.S
index cbf6db98cfb3..2748c55820c2 100644
--- a/arch/mips/kernel/r2300_fpu.S
+++ b/arch/mips/kernel/r2300_fpu.S
@@ -23,14 +23,14 @@
#define EX(a,b) \
9: a,##b; \
.section __ex_table,"a"; \
- PTR 9b,fault; \
+ PTR_WD 9b,fault; \
.previous
#define EX2(a,b) \
9: a,##b; \
.section __ex_table,"a"; \
- PTR 9b,fault; \
- PTR 9b+4,fault; \
+ PTR_WD 9b,fault; \
+ PTR_WD 9b+4,fault; \
.previous
.set mips1
diff --git a/arch/mips/kernel/r4k_fpu.S b/arch/mips/kernel/r4k_fpu.S
index b91e91106475..2e687c60bc4f 100644
--- a/arch/mips/kernel/r4k_fpu.S
+++ b/arch/mips/kernel/r4k_fpu.S
@@ -31,7 +31,7 @@
.ex\@: \insn \reg, \src
.set pop
.section __ex_table,"a"
- PTR .ex\@, fault
+ PTR_WD .ex\@, fault
.previous
.endm
diff --git a/arch/mips/kernel/relocate_kernel.S b/arch/mips/kernel/relocate_kernel.S
index f3c908abdbb8..cfde14b48fd8 100644
--- a/arch/mips/kernel/relocate_kernel.S
+++ b/arch/mips/kernel/relocate_kernel.S
@@ -147,10 +147,10 @@ LEAF(kexec_smp_wait)
kexec_args:
EXPORT(kexec_args)
-arg0: PTR 0x0
-arg1: PTR 0x0
-arg2: PTR 0x0
-arg3: PTR 0x0
+arg0: PTR_WD 0x0
+arg1: PTR_WD 0x0
+arg2: PTR_WD 0x0
+arg3: PTR_WD 0x0
.size kexec_args,PTRSIZE*4
#ifdef CONFIG_SMP
@@ -161,10 +161,10 @@ arg3: PTR 0x0
*/
secondary_kexec_args:
EXPORT(secondary_kexec_args)
-s_arg0: PTR 0x0
-s_arg1: PTR 0x0
-s_arg2: PTR 0x0
-s_arg3: PTR 0x0
+s_arg0: PTR_WD 0x0
+s_arg1: PTR_WD 0x0
+s_arg2: PTR_WD 0x0
+s_arg3: PTR_WD 0x0
.size secondary_kexec_args,PTRSIZE*4
kexec_flag:
LONG 0x1
@@ -173,17 +173,17 @@ kexec_flag:
kexec_start_address:
EXPORT(kexec_start_address)
- PTR 0x0
+ PTR_WD 0x0
.size kexec_start_address, PTRSIZE
kexec_indirection_page:
EXPORT(kexec_indirection_page)
- PTR 0
+ PTR_WD 0
.size kexec_indirection_page, PTRSIZE
relocate_new_kernel_end:
relocate_new_kernel_size:
EXPORT(relocate_new_kernel_size)
- PTR relocate_new_kernel_end - relocate_new_kernel
+ PTR_WD relocate_new_kernel_end - relocate_new_kernel
.size relocate_new_kernel_size, PTRSIZE
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index b1b2e106f711..9bfce5f75f60 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -72,10 +72,10 @@ loads_done:
.set pop
.section __ex_table,"a"
- PTR load_a4, bad_stack_a4
- PTR load_a5, bad_stack_a5
- PTR load_a6, bad_stack_a6
- PTR load_a7, bad_stack_a7
+ PTR_WD load_a4, bad_stack_a4
+ PTR_WD load_a5, bad_stack_a5
+ PTR_WD load_a6, bad_stack_a6
+ PTR_WD load_a7, bad_stack_a7
.previous
lw t0, TI_FLAGS($28) # syscall tracing enabled?
@@ -216,7 +216,7 @@ einval: li v0, -ENOSYS
#endif /* CONFIG_MIPS_MT_FPAFF */
#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, native)
-#define __SYSCALL(nr, entry) PTR entry
+#define __SYSCALL(nr, entry) PTR_WD entry
.align 2
.type sys_call_table, @object
EXPORT(sys_call_table)
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index f650c55a17dc..97456b2ca7dc 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -101,7 +101,7 @@ not_n32_scall:
END(handle_sysn32)
-#define __SYSCALL(nr, entry) PTR entry
+#define __SYSCALL(nr, entry) PTR_WD entry
.type sysn32_call_table, @object
EXPORT(sysn32_call_table)
#include <asm/syscall_table_n32.h>
diff --git a/arch/mips/kernel/scall64-n64.S b/arch/mips/kernel/scall64-n64.S
index 5d7bfc65e4d0..5f6ed4b4c399 100644
--- a/arch/mips/kernel/scall64-n64.S
+++ b/arch/mips/kernel/scall64-n64.S
@@ -109,7 +109,7 @@ illegal_syscall:
j n64_syscall_exit
END(handle_sys64)
-#define __SYSCALL(nr, entry) PTR entry
+#define __SYSCALL(nr, entry) PTR_WD entry
.align 3
.type sys_call_table, @object
EXPORT(sys_call_table)
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index cedc8bd88804..d3c2616cba22 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -73,10 +73,10 @@ load_a7: lw a7, 28(t0) # argument #8 from usp
loads_done:
.section __ex_table,"a"
- PTR load_a4, bad_stack_a4
- PTR load_a5, bad_stack_a5
- PTR load_a6, bad_stack_a6
- PTR load_a7, bad_stack_a7
+ PTR_WD load_a4, bad_stack_a4
+ PTR_WD load_a5, bad_stack_a5
+ PTR_WD load_a6, bad_stack_a6
+ PTR_WD load_a7, bad_stack_a7
.previous
li t1, _TIF_WORK_SYSCALL_ENTRY
@@ -214,7 +214,7 @@ einval: li v0, -ENOSYS
END(sys32_syscall)
#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, compat)
-#define __SYSCALL(nr, entry) PTR entry
+#define __SYSCALL(nr, entry) PTR_WD entry
.align 3
.type sys32_call_table,@object
EXPORT(sys32_call_table)
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c
index c9b2a75563e1..5bce782e694c 100644
--- a/arch/mips/kernel/signal.c
+++ b/arch/mips/kernel/signal.c
@@ -563,6 +563,13 @@ void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
sp = regs->regs[29];
/*
+ * If we are on the alternate signal stack and would overflow it, don't.
+ * Return an always-bogus address instead so we will die with SIGSEGV.
+ */
+ if (on_sig_stack(sp) && !likely(on_sig_stack(sp - frame_size)))
+ return (void __user __force *)(-1UL);
+
+ /*
* FPU emulator may have it's own trampoline active just
* above the user stack, 16-bytes before the next lowest
* 16 byte boundary. Try to avoid trashing it.
@@ -747,23 +754,25 @@ static int setup_rt_frame(void *sig_return, struct ksignal *ksig,
struct pt_regs *regs, sigset_t *set)
{
struct rt_sigframe __user *frame;
- int err = 0;
frame = get_sigframe(ksig, regs, sizeof(*frame));
if (!access_ok(frame, sizeof (*frame)))
return -EFAULT;
/* Create siginfo. */
- err |= copy_siginfo_to_user(&frame->rs_info, &ksig->info);
+ if (copy_siginfo_to_user(&frame->rs_info, &ksig->info))
+ return -EFAULT;
/* Create the ucontext. */
- err |= __put_user(0, &frame->rs_uc.uc_flags);
- err |= __put_user(NULL, &frame->rs_uc.uc_link);
- err |= __save_altstack(&frame->rs_uc.uc_stack, regs->regs[29]);
- err |= setup_sigcontext(regs, &frame->rs_uc.uc_mcontext);
- err |= __copy_to_user(&frame->rs_uc.uc_sigmask, set, sizeof(*set));
-
- if (err)
+ if (__put_user(0, &frame->rs_uc.uc_flags))
+ return -EFAULT;
+ if (__put_user(NULL, &frame->rs_uc.uc_link))
+ return -EFAULT;
+ if (__save_altstack(&frame->rs_uc.uc_stack, regs->regs[29]))
+ return -EFAULT;
+ if (setup_sigcontext(regs, &frame->rs_uc.uc_mcontext))
+ return -EFAULT;
+ if (__copy_to_user(&frame->rs_uc.uc_sigmask, set, sizeof(*set)))
return -EFAULT;
/*
diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
index 5512cd586e6e..ae93a607ddf7 100644
--- a/arch/mips/kernel/syscall.c
+++ b/arch/mips/kernel/syscall.c
@@ -122,8 +122,8 @@ static inline int mips_atomic_set(unsigned long addr, unsigned long new)
" j 3b \n"
" .previous \n"
" .section __ex_table,\"a\" \n"
- " "STR(PTR)" 1b, 4b \n"
- " "STR(PTR)" 2b, 4b \n"
+ " "STR(PTR_WD)" 1b, 4b \n"
+ " "STR(PTR_WD)" 2b, 4b \n"
" .previous \n"
" .set pop \n"
: [old] "=&r" (old),
@@ -152,8 +152,8 @@ static inline int mips_atomic_set(unsigned long addr, unsigned long new)
" j 3b \n"
" .previous \n"
" .section __ex_table,\"a\" \n"
- " "STR(PTR)" 1b, 5b \n"
- " "STR(PTR)" 2b, 5b \n"
+ " "STR(PTR_WD)" 1b, 5b \n"
+ " "STR(PTR_WD)" 2b, 5b \n"
" .previous \n"
" .set pop \n"
: [old] "=&r" (old),
diff --git a/arch/mips/kernel/syscalls/syscall_n32.tbl b/arch/mips/kernel/syscalls/syscall_n32.tbl
index 72d02d363f36..253ff994ed2e 100644
--- a/arch/mips/kernel/syscalls/syscall_n32.tbl
+++ b/arch/mips/kernel/syscalls/syscall_n32.tbl
@@ -388,3 +388,4 @@
# 447 reserved for memfd_secret
448 n32 process_mrelease sys_process_mrelease
449 n32 futex_waitv sys_futex_waitv
+450 n32 set_mempolicy_home_node sys_set_mempolicy_home_node
diff --git a/arch/mips/kernel/syscalls/syscall_n64.tbl b/arch/mips/kernel/syscalls/syscall_n64.tbl
index e2c481fcede6..3f1886ad9d80 100644
--- a/arch/mips/kernel/syscalls/syscall_n64.tbl
+++ b/arch/mips/kernel/syscalls/syscall_n64.tbl
@@ -364,3 +364,4 @@
# 447 reserved for memfd_secret
448 n64 process_mrelease sys_process_mrelease
449 n64 futex_waitv sys_futex_waitv
+450 common set_mempolicy_home_node sys_set_mempolicy_home_node
diff --git a/arch/mips/kernel/syscalls/syscall_o32.tbl b/arch/mips/kernel/syscalls/syscall_o32.tbl
index 3714c97b2643..8f243e35a7b2 100644
--- a/arch/mips/kernel/syscalls/syscall_o32.tbl
+++ b/arch/mips/kernel/syscalls/syscall_o32.tbl
@@ -437,3 +437,4 @@
# 447 reserved for memfd_secret
448 o32 process_mrelease sys_process_mrelease
449 o32 futex_waitv sys_futex_waitv
+450 o32 set_mempolicy_home_node sys_set_mempolicy_home_node
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index d26b0fb8ea06..a486486b2355 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -422,7 +422,7 @@ void __noreturn die(const char *str, struct pt_regs *regs)
if (regs && kexec_should_crash(current))
crash_kexec(regs);
- do_exit(sig);
+ make_task_dead(sig);
}
extern struct exception_table_entry __start___dbe_table[];
diff --git a/arch/mips/kvm/Kconfig b/arch/mips/kvm/Kconfig
index a77297480f56..91d197bee9c0 100644
--- a/arch/mips/kvm/Kconfig
+++ b/arch/mips/kvm/Kconfig
@@ -27,6 +27,7 @@ config KVM
select KVM_MMIO
select MMU_NOTIFIER
select SRCU
+ select INTERVAL_TREE
help
Support for hosting Guest kernels.
diff --git a/arch/mips/kvm/Makefile b/arch/mips/kvm/Makefile
index d3710959da55..21ff75bcdbc4 100644
--- a/arch/mips/kvm/Makefile
+++ b/arch/mips/kvm/Makefile
@@ -2,9 +2,10 @@
# Makefile for KVM support for MIPS
#
+include $(srctree)/virt/kvm/Makefile.kvm
+
ccflags-y += -Ivirt/kvm -Iarch/mips/kvm
-kvm-y := $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o eventfd.o binary_stats.o)
kvm-$(CONFIG_CPU_HAS_MSA) += msa.o
kvm-y += mips.o emulate.o entry.o \
diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c
index 22e745e49b0a..b494d8d39290 100644
--- a/arch/mips/kvm/emulate.c
+++ b/arch/mips/kvm/emulate.c
@@ -952,7 +952,7 @@ enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu)
if (!vcpu->arch.pending_exceptions) {
kvm_vz_lose_htimer(vcpu);
vcpu->arch.wait = 1;
- kvm_vcpu_block(vcpu);
+ kvm_vcpu_halt(vcpu);
/*
* We we are runnable, then definitely go off to user space to
diff --git a/arch/mips/kvm/loongson_ipi.c b/arch/mips/kvm/loongson_ipi.c
index 3681fc8fba38..5d53f32d837c 100644
--- a/arch/mips/kvm/loongson_ipi.c
+++ b/arch/mips/kvm/loongson_ipi.c
@@ -120,7 +120,7 @@ static int loongson_vipi_write(struct loongson_kvm_ipi *ipi,
s->status |= data;
irq.cpu = id;
irq.irq = 6;
- kvm_vcpu_ioctl_interrupt(kvm->vcpus[id], &irq);
+ kvm_vcpu_ioctl_interrupt(kvm_get_vcpu(kvm, id), &irq);
break;
case CORE0_CLEAR_OFF:
@@ -128,7 +128,7 @@ static int loongson_vipi_write(struct loongson_kvm_ipi *ipi,
if (!s->status) {
irq.cpu = id;
irq.irq = -6;
- kvm_vcpu_ioctl_interrupt(kvm->vcpus[id], &irq);
+ kvm_vcpu_ioctl_interrupt(kvm_get_vcpu(kvm, id), &irq);
}
break;
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index aa20d074d388..a25e0b73ee70 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -171,25 +171,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
return 0;
}
-void kvm_mips_free_vcpus(struct kvm *kvm)
-{
- unsigned int i;
- struct kvm_vcpu *vcpu;
-
- kvm_for_each_vcpu(i, vcpu, kvm) {
- kvm_vcpu_destroy(vcpu);
- }
-
- mutex_lock(&kvm->lock);
-
- for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
- kvm->vcpus[i] = NULL;
-
- atomic_set(&kvm->online_vcpus, 0);
-
- mutex_unlock(&kvm->lock);
-}
-
static void kvm_mips_free_gpa_pt(struct kvm *kvm)
{
/* It should always be safe to remove after flushing the whole range */
@@ -199,7 +180,7 @@ static void kvm_mips_free_gpa_pt(struct kvm *kvm)
void kvm_arch_destroy_vm(struct kvm *kvm)
{
- kvm_mips_free_vcpus(kvm);
+ kvm_destroy_vcpus(kvm);
kvm_mips_free_gpa_pt(kvm);
}
@@ -233,25 +214,20 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
}
int kvm_arch_prepare_memory_region(struct kvm *kvm,
- struct kvm_memory_slot *memslot,
- const struct kvm_userspace_memory_region *mem,
+ const struct kvm_memory_slot *old,
+ struct kvm_memory_slot *new,
enum kvm_mr_change change)
{
return 0;
}
void kvm_arch_commit_memory_region(struct kvm *kvm,
- const struct kvm_userspace_memory_region *mem,
struct kvm_memory_slot *old,
const struct kvm_memory_slot *new,
enum kvm_mr_change change)
{
int needs_flush;
- kvm_debug("%s: kvm: %p slot: %d, GPA: %llx, size: %llx, QVA: %llx\n",
- __func__, kvm, mem->slot, mem->guest_phys_addr,
- mem->memory_size, mem->userspace_addr);
-
/*
* If dirty page logging is enabled, write protect all pages in the slot
* ready for dirty logging.
@@ -438,6 +414,24 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
return -ENOIOCTLCMD;
}
+/*
+ * Actually run the vCPU, entering an RCU extended quiescent state (EQS) while
+ * the vCPU is running.
+ *
+ * This must be noinstr as instrumentation may make use of RCU, and this is not
+ * safe during the EQS.
+ */
+static int noinstr kvm_mips_vcpu_enter_exit(struct kvm_vcpu *vcpu)
+{
+ int ret;
+
+ guest_state_enter_irqoff();
+ ret = kvm_mips_callbacks->vcpu_run(vcpu);
+ guest_state_exit_irqoff();
+
+ return ret;
+}
+
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
{
int r = -EINTR;
@@ -458,7 +452,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
lose_fpu(1);
local_irq_disable();
- guest_enter_irqoff();
+ guest_timing_enter_irqoff();
trace_kvm_enter(vcpu);
/*
@@ -469,10 +463,23 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
*/
smp_store_mb(vcpu->mode, IN_GUEST_MODE);
- r = kvm_mips_callbacks->vcpu_run(vcpu);
+ r = kvm_mips_vcpu_enter_exit(vcpu);
+
+ /*
+ * We must ensure that any pending interrupts are taken before
+ * we exit guest timing so that timer ticks are accounted as
+ * guest time. Transiently unmask interrupts so that any
+ * pending interrupts are taken.
+ *
+ * TODO: is there a barrier which ensures that pending interrupts are
+ * recognised? Currently this just hopes that the CPU takes any pending
+ * interrupts between the enable and disable.
+ */
+ local_irq_enable();
+ local_irq_disable();
trace_kvm_out(vcpu);
- guest_exit_irqoff();
+ guest_timing_exit_irqoff();
local_irq_enable();
out:
@@ -498,7 +505,7 @@ int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
if (irq->cpu == -1)
dvcpu = vcpu;
else
- dvcpu = vcpu->kvm->vcpus[irq->cpu];
+ dvcpu = kvm_get_vcpu(vcpu->kvm, irq->cpu);
if (intr == 2 || intr == 3 || intr == 4 || intr == 6) {
kvm_mips_callbacks->queue_io_int(dvcpu, irq);
@@ -1192,7 +1199,7 @@ static void kvm_mips_set_c0_status(void)
/*
* Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
*/
-int kvm_mips_handle_exit(struct kvm_vcpu *vcpu)
+static int __kvm_mips_handle_exit(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
u32 cause = vcpu->arch.host_cp0_cause;
@@ -1381,6 +1388,17 @@ int kvm_mips_handle_exit(struct kvm_vcpu *vcpu)
return ret;
}
+int noinstr kvm_mips_handle_exit(struct kvm_vcpu *vcpu)
+{
+ int ret;
+
+ guest_state_exit_irqoff();
+ ret = __kvm_mips_handle_exit(vcpu);
+ guest_state_enter_irqoff();
+
+ return ret;
+}
+
/* Enable FPU for guest and restore context */
void kvm_own_fpu(struct kvm_vcpu *vcpu)
{
diff --git a/arch/mips/kvm/vz.c b/arch/mips/kvm/vz.c
index 4adca5abbc72..c706f5890a05 100644
--- a/arch/mips/kvm/vz.c
+++ b/arch/mips/kvm/vz.c
@@ -458,8 +458,8 @@ void kvm_vz_acquire_htimer(struct kvm_vcpu *vcpu)
/**
* _kvm_vz_save_htimer() - Switch to software emulation of guest timer.
* @vcpu: Virtual CPU.
- * @compare: Pointer to write compare value to.
- * @cause: Pointer to write cause value to.
+ * @out_compare: Pointer to write compare value to.
+ * @out_cause: Pointer to write cause value to.
*
* Save VZ guest timer state and switch to software emulation of guest CP0
* timer. The hard timer must already be in use, so preemption should be
@@ -1541,11 +1541,14 @@ static int kvm_trap_vz_handle_guest_exit(struct kvm_vcpu *vcpu)
}
/**
- * kvm_trap_vz_handle_cop_unusuable() - Guest used unusable coprocessor.
+ * kvm_trap_vz_handle_cop_unusable() - Guest used unusable coprocessor.
* @vcpu: Virtual CPU context.
*
* Handle when the guest attempts to use a coprocessor which hasn't been allowed
* by the root context.
+ *
+ * Return: value indicating whether to resume the host or the guest
+ * (RESUME_HOST or RESUME_GUEST)
*/
static int kvm_trap_vz_handle_cop_unusable(struct kvm_vcpu *vcpu)
{
@@ -1592,6 +1595,9 @@ static int kvm_trap_vz_handle_cop_unusable(struct kvm_vcpu *vcpu)
*
* Handle when the guest attempts to use MSA when it is disabled in the root
* context.
+ *
+ * Return: value indicating whether to resume the host or the guest
+ * (RESUME_HOST or RESUME_GUEST)
*/
static int kvm_trap_vz_handle_msa_disabled(struct kvm_vcpu *vcpu)
{
diff --git a/arch/mips/lantiq/clk.c b/arch/mips/lantiq/clk.c
index 4916cccf378f..7a623684d9b5 100644
--- a/arch/mips/lantiq/clk.c
+++ b/arch/mips/lantiq/clk.c
@@ -164,6 +164,12 @@ struct clk *clk_get_parent(struct clk *clk)
}
EXPORT_SYMBOL(clk_get_parent);
+int clk_set_parent(struct clk *clk, struct clk *parent)
+{
+ return 0;
+}
+EXPORT_SYMBOL(clk_set_parent);
+
static inline u32 get_counter_resolution(void)
{
u32 res;
diff --git a/arch/mips/lantiq/falcon/sysctrl.c b/arch/mips/lantiq/falcon/sysctrl.c
index 42222f849bd2..64726c670ca6 100644
--- a/arch/mips/lantiq/falcon/sysctrl.c
+++ b/arch/mips/lantiq/falcon/sysctrl.c
@@ -141,7 +141,7 @@ static void falcon_gpe_enable(void)
unsigned int freq;
unsigned int status;
- /* if if the clock is already enabled */
+ /* if the clock is already enabled */
status = sysctl_r32(SYSCTL_SYS1, SYS1_INFRAC);
if (status & (1 << (GPPC_OFFSET + 1)))
return;
diff --git a/arch/mips/lib/csum_partial.S b/arch/mips/lib/csum_partial.S
index a46db0807195..7767137c3e49 100644
--- a/arch/mips/lib/csum_partial.S
+++ b/arch/mips/lib/csum_partial.S
@@ -347,7 +347,7 @@ EXPORT_SYMBOL(csum_partial)
.if \mode == LEGACY_MODE; \
9: insn reg, addr; \
.section __ex_table,"a"; \
- PTR 9b, .L_exc; \
+ PTR_WD 9b, .L_exc; \
.previous; \
/* This is enabled in EVA mode */ \
.else; \
@@ -356,7 +356,7 @@ EXPORT_SYMBOL(csum_partial)
((\to == USEROP) && (type == ST_INSN)); \
9: __BUILD_EVA_INSN(insn##e, reg, addr); \
.section __ex_table,"a"; \
- PTR 9b, .L_exc; \
+ PTR_WD 9b, .L_exc; \
.previous; \
.else; \
/* EVA without exception */ \
diff --git a/arch/mips/lib/memcpy.S b/arch/mips/lib/memcpy.S
index 277c32296636..18a43f2e29c8 100644
--- a/arch/mips/lib/memcpy.S
+++ b/arch/mips/lib/memcpy.S
@@ -116,7 +116,7 @@
.if \mode == LEGACY_MODE; \
9: insn reg, addr; \
.section __ex_table,"a"; \
- PTR 9b, handler; \
+ PTR_WD 9b, handler; \
.previous; \
/* This is assembled in EVA mode */ \
.else; \
@@ -125,7 +125,7 @@
((\to == USEROP) && (type == ST_INSN)); \
9: __BUILD_EVA_INSN(insn##e, reg, addr); \
.section __ex_table,"a"; \
- PTR 9b, handler; \
+ PTR_WD 9b, handler; \
.previous; \
.else; \
/* \
diff --git a/arch/mips/lib/memset.S b/arch/mips/lib/memset.S
index b0baa3c79fad..0b342bae9a98 100644
--- a/arch/mips/lib/memset.S
+++ b/arch/mips/lib/memset.S
@@ -52,7 +52,7 @@
9: ___BUILD_EVA_INSN(insn, reg, addr); \
.endif; \
.section __ex_table,"a"; \
- PTR 9b, handler; \
+ PTR_WD 9b, handler; \
.previous
.macro f_fill64 dst, offset, val, fixup, mode
diff --git a/arch/mips/lib/strncpy_user.S b/arch/mips/lib/strncpy_user.S
index 556acf684d7b..13aaa9927ad1 100644
--- a/arch/mips/lib/strncpy_user.S
+++ b/arch/mips/lib/strncpy_user.S
@@ -15,7 +15,7 @@
#define EX(insn,reg,addr,handler) \
9: insn reg, addr; \
.section __ex_table,"a"; \
- PTR 9b, handler; \
+ PTR_WD 9b, handler; \
.previous
/*
@@ -59,7 +59,7 @@ LEAF(__strncpy_from_user_asm)
jr ra
.section __ex_table,"a"
- PTR 1b, .Lfault
+ PTR_WD 1b, .Lfault
.previous
EXPORT_SYMBOL(__strncpy_from_user_asm)
diff --git a/arch/mips/lib/strnlen_user.S b/arch/mips/lib/strnlen_user.S
index 92b63f20ec05..6de31b616f9c 100644
--- a/arch/mips/lib/strnlen_user.S
+++ b/arch/mips/lib/strnlen_user.S
@@ -14,7 +14,7 @@
#define EX(insn,reg,addr,handler) \
9: insn reg, addr; \
.section __ex_table,"a"; \
- PTR 9b, handler; \
+ PTR_WD 9b, handler; \
.previous
/*
diff --git a/arch/mips/loongson2ef/Platform b/arch/mips/loongson2ef/Platform
index ae023b9a1c51..50e659aca543 100644
--- a/arch/mips/loongson2ef/Platform
+++ b/arch/mips/loongson2ef/Platform
@@ -2,12 +2,9 @@
# Loongson Processors' Support
#
-# Only gcc >= 4.4 have Loongson specific support
cflags-$(CONFIG_CPU_LOONGSON2EF) += -Wa,--trap
-cflags-$(CONFIG_CPU_LOONGSON2E) += \
- $(call cc-option,-march=loongson2e,-march=r4600)
-cflags-$(CONFIG_CPU_LOONGSON2F) += \
- $(call cc-option,-march=loongson2f,-march=r4600)
+cflags-$(CONFIG_CPU_LOONGSON2E) += -march=loongson2e
+cflags-$(CONFIG_CPU_LOONGSON2F) += -march=loongson2f
#
# Some versions of binutils, not currently mainline as of 2019/02/04, support
# an -mfix-loongson3-llsc flag which emits a sync prior to each ll instruction
@@ -32,16 +29,8 @@ cflags-$(CONFIG_CPU_LOONGSON2EF) += $(call as-option,-Wa$(comma)-mno-fix-loongso
# Enable the workarounds for Loongson2f
ifdef CONFIG_CPU_LOONGSON2F_WORKAROUNDS
- ifeq ($(call as-option,-Wa$(comma)-mfix-loongson2f-nop,),)
- $(error only binutils >= 2.20.2 have needed option -mfix-loongson2f-nop)
- else
- cflags-$(CONFIG_CPU_NOP_WORKAROUNDS) += -Wa$(comma)-mfix-loongson2f-nop
- endif
- ifeq ($(call as-option,-Wa$(comma)-mfix-loongson2f-jump,),)
- $(error only binutils >= 2.20.2 have needed option -mfix-loongson2f-jump)
- else
- cflags-$(CONFIG_CPU_JUMP_WORKAROUNDS) += -Wa$(comma)-mfix-loongson2f-jump
- endif
+cflags-$(CONFIG_CPU_NOP_WORKAROUNDS) += -Wa,-mfix-loongson2f-nop
+cflags-$(CONFIG_CPU_JUMP_WORKAROUNDS) += -Wa,-mfix-loongson2f-jump
endif
# Some -march= flags enable MMI instructions, and GCC complains about that
diff --git a/arch/mips/loongson64/vbios_quirk.c b/arch/mips/loongson64/vbios_quirk.c
index 9a29e94d3db1..3115d4de982c 100644
--- a/arch/mips/loongson64/vbios_quirk.c
+++ b/arch/mips/loongson64/vbios_quirk.c
@@ -3,7 +3,7 @@
#include <linux/pci.h>
#include <loongson.h>
-static void pci_fixup_radeon(struct pci_dev *pdev)
+static void pci_fixup_video(struct pci_dev *pdev)
{
struct resource *res = &pdev->resource[PCI_ROM_RESOURCE];
@@ -22,8 +22,7 @@ static void pci_fixup_radeon(struct pci_dev *pdev)
res->flags = IORESOURCE_MEM | IORESOURCE_ROM_SHADOW |
IORESOURCE_PCI_FIXED;
- dev_info(&pdev->dev, "BAR %d: assigned %pR for Radeon ROM\n",
- PCI_ROM_RESOURCE, res);
+ dev_info(&pdev->dev, "Video device with shadowed ROM at %pR\n", res);
}
-DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_ATI, 0x9615,
- PCI_CLASS_DISPLAY_VGA, 8, pci_fixup_radeon);
+DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_ATI, 0x9615,
+ PCI_CLASS_DISPLAY_VGA, 8, pci_fixup_video);
diff --git a/arch/mips/mm/c-octeon.c b/arch/mips/mm/c-octeon.c
index ec2ae501539a..737870d8fd94 100644
--- a/arch/mips/mm/c-octeon.c
+++ b/arch/mips/mm/c-octeon.c
@@ -332,7 +332,7 @@ static void co_cache_error_call_notifiers(unsigned long val)
}
/*
- * Called when the the exception is recoverable
+ * Called when the exception is recoverable
*/
asmlinkage void cache_parity_error_octeon_recoverable(void)
@@ -341,7 +341,7 @@ asmlinkage void cache_parity_error_octeon_recoverable(void)
}
/*
- * Called when the the exception is not recoverable
+ * Called when the exception is not recoverable
*/
asmlinkage void cache_parity_error_octeon_non_recoverable(void)
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
index e7abda9c013f..44f98100e84e 100644
--- a/arch/mips/mm/fault.c
+++ b/arch/mips/mm/fault.c
@@ -171,18 +171,17 @@ good_area:
goto do_sigbus;
BUG();
}
- if (flags & FAULT_FLAG_ALLOW_RETRY) {
- if (fault & VM_FAULT_RETRY) {
- flags |= FAULT_FLAG_TRIED;
- /*
- * No need to mmap_read_unlock(mm) as we would
- * have already released it in __lock_page_or_retry
- * in mm/filemap.c.
- */
+ if (fault & VM_FAULT_RETRY) {
+ flags |= FAULT_FLAG_TRIED;
- goto retry;
- }
+ /*
+ * No need to mmap_read_unlock(mm) as we would
+ * have already released it in __lock_page_or_retry
+ * in mm/filemap.c.
+ */
+
+ goto retry;
}
mmap_read_unlock(mm);
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 325e1552cbea..5a8002839550 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -519,17 +519,9 @@ static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
return node_distance(cpu_to_node(from), cpu_to_node(to));
}
-static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size,
- size_t align)
+static int __init pcpu_cpu_to_node(int cpu)
{
- return memblock_alloc_try_nid(size, align, __pa(MAX_DMA_ADDRESS),
- MEMBLOCK_ALLOC_ACCESSIBLE,
- cpu_to_node(cpu));
-}
-
-static void __init pcpu_fc_free(void *ptr, size_t size)
-{
- memblock_free(ptr, size);
+ return cpu_to_node(cpu);
}
void __init setup_per_cpu_areas(void)
@@ -545,7 +537,7 @@ void __init setup_per_cpu_areas(void)
rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
PERCPU_DYNAMIC_RESERVE, PAGE_SIZE,
pcpu_cpu_distance,
- pcpu_fc_alloc, pcpu_fc_free);
+ pcpu_cpu_to_node);
if (rc < 0)
panic("Failed to initialize percpu areas.");
diff --git a/arch/mips/pci/Makefile b/arch/mips/pci/Makefile
index 6ddefafd00cb..9a6bc702608c 100644
--- a/arch/mips/pci/Makefile
+++ b/arch/mips/pci/Makefile
@@ -49,9 +49,7 @@ obj-$(CONFIG_TANBAC_TB0287) += fixup-tb0287.o
obj-$(CONFIG_TOSHIBA_JMR3927) += fixup-jmr3927.o
obj-$(CONFIG_SOC_TX4927) += pci-tx4927.o
obj-$(CONFIG_SOC_TX4938) += pci-tx4938.o
-obj-$(CONFIG_SOC_TX4939) += pci-tx4939.o
obj-$(CONFIG_TOSHIBA_RBTX4927) += fixup-rbtx4927.o
-obj-$(CONFIG_TOSHIBA_RBTX4938) += fixup-rbtx4938.o
obj-$(CONFIG_VICTOR_MPC30X) += fixup-mpc30x.o
obj-$(CONFIG_ZAO_CAPCELLA) += fixup-capcella.o
obj-$(CONFIG_MIKROTIK_RB532) += pci-rc32434.o ops-rc32434.o fixup-rc32434.o
diff --git a/arch/mips/pci/fixup-rbtx4938.c b/arch/mips/pci/fixup-rbtx4938.c
deleted file mode 100644
index ff22a22db73e..000000000000
--- a/arch/mips/pci/fixup-rbtx4938.c
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Toshiba rbtx4938 pci routines
- * Copyright (C) 2000-2001 Toshiba Corporation
- *
- * 2003-2005 (c) MontaVista Software, Inc. This file is licensed under the
- * terms of the GNU General Public License version 2. This program is
- * licensed "as is" without any warranty of any kind, whether express
- * or implied.
- *
- * Support for TX4938 in 2.6 - Manish Lachwani (mlachwani@mvista.com)
- */
-#include <linux/types.h>
-#include <asm/txx9/pci.h>
-#include <asm/txx9/rbtx4938.h>
-
-int rbtx4938_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
-{
- int irq = tx4938_pcic1_map_irq(dev, slot);
-
- if (irq >= 0)
- return irq;
- irq = pin;
- /* IRQ rotation */
- irq--; /* 0-3 */
- if (slot == TX4927_PCIC_IDSEL_AD_TO_SLOT(23)) {
- /* PCI CardSlot (IDSEL=A23) */
- /* PCIA => PCIA (IDSEL=A23) */
- irq = (irq + 0 + slot) % 4;
- } else {
- /* PCI Backplane */
- if (txx9_pci_option & TXX9_PCI_OPT_PICMG)
- irq = (irq + 33 - slot) % 4;
- else
- irq = (irq + 3 + slot) % 4;
- }
- irq++; /* 1-4 */
-
- switch (irq) {
- case 1:
- irq = RBTX4938_IRQ_IOC_PCIA;
- break;
- case 2:
- irq = RBTX4938_IRQ_IOC_PCIB;
- break;
- case 3:
- irq = RBTX4938_IRQ_IOC_PCIC;
- break;
- case 4:
- irq = RBTX4938_IRQ_IOC_PCID;
- break;
- }
- return irq;
-}
diff --git a/arch/mips/pci/msi-octeon.c b/arch/mips/pci/msi-octeon.c
index 288b58b00dc8..c2860ebbd863 100644
--- a/arch/mips/pci/msi-octeon.c
+++ b/arch/mips/pci/msi-octeon.c
@@ -68,6 +68,9 @@ int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc)
u64 search_mask;
int index;
+ if (desc->pci.msi_attrib.is_msix)
+ return -EINVAL;
+
/*
* Read the MSI config to figure out how many IRQs this device
* wants. Most devices only want 1, which will give
@@ -182,35 +185,6 @@ msi_irq_allocated:
return 0;
}
-int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
-{
- struct msi_desc *entry;
- int ret;
-
- /*
- * MSI-X is not supported.
- */
- if (type == PCI_CAP_ID_MSIX)
- return -EINVAL;
-
- /*
- * If an architecture wants to support multiple MSI, it needs to
- * override arch_setup_msi_irqs()
- */
- if (type == PCI_CAP_ID_MSI && nvec > 1)
- return 1;
-
- for_each_pci_msi_entry(entry, dev) {
- ret = arch_setup_msi_irq(dev, entry);
- if (ret < 0)
- return ret;
- if (ret > 0)
- return -ENOSPC;
- }
-
- return 0;
-}
-
/**
* Called when a device no longer needs its MSI interrupts. All
* MSI interrupts for the device are freed.
diff --git a/arch/mips/pci/pci-rt3883.c b/arch/mips/pci/pci-rt3883.c
index d3c947fa2969..e07ae098bdd8 100644
--- a/arch/mips/pci/pci-rt3883.c
+++ b/arch/mips/pci/pci-rt3883.c
@@ -102,14 +102,12 @@ static u32 rt3883_pci_read_cfg32(struct rt3883_pci_controller *rpc,
unsigned func, unsigned reg)
{
u32 address;
- u32 ret;
address = rt3883_pci_get_cfgaddr(bus, slot, func, reg);
rt3883_pci_w32(rpc, address, RT3883_PCI_REG_CFGADDR);
- ret = rt3883_pci_r32(rpc, RT3883_PCI_REG_CFGDATA);
- return ret;
+ return rt3883_pci_r32(rpc, RT3883_PCI_REG_CFGDATA);
}
static void rt3883_pci_write_cfg32(struct rt3883_pci_controller *rpc,
diff --git a/arch/mips/pci/pci-tx4939.c b/arch/mips/pci/pci-tx4939.c
deleted file mode 100644
index 09a65f7dbe7c..000000000000
--- a/arch/mips/pci/pci-tx4939.c
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- * Based on linux/arch/mips/txx9/rbtx4939/setup.c,
- * and RBTX49xx patch from CELF patch archive.
- *
- * Copyright 2001, 2003-2005 MontaVista Software Inc.
- * Copyright (C) 2004 by Ralf Baechle (ralf@linux-mips.org)
- * (C) Copyright TOSHIBA CORPORATION 2000-2001, 2004-2007
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/kernel.h>
-#include <linux/interrupt.h>
-#include <asm/txx9/generic.h>
-#include <asm/txx9/tx4939.h>
-
-int __init tx4939_report_pciclk(void)
-{
- int pciclk = 0;
-
- pr_info("PCIC --%s PCICLK:",
- (__raw_readq(&tx4939_ccfgptr->ccfg) & TX4939_CCFG_PCI66) ?
- " PCI66" : "");
- if (__raw_readq(&tx4939_ccfgptr->pcfg) & TX4939_PCFG_PCICLKEN_ALL) {
- pciclk = txx9_master_clock * 20 / 6;
- if (!(__raw_readq(&tx4939_ccfgptr->ccfg) & TX4939_CCFG_PCI66))
- pciclk /= 2;
- pr_cont("Internal(%u.%uMHz)",
- (pciclk + 50000) / 1000000,
- ((pciclk + 50000) / 100000) % 10);
- } else {
- pr_cont("External");
- pciclk = -1;
- }
- pr_cont("\n");
- return pciclk;
-}
-
-void __init tx4939_report_pci1clk(void)
-{
- unsigned int pciclk = txx9_master_clock * 20 / 6;
-
- pr_info("PCIC1 -- PCICLK:%u.%uMHz\n",
- (pciclk + 50000) / 1000000,
- ((pciclk + 50000) / 100000) % 10);
-}
-
-int tx4939_pcic1_map_irq(const struct pci_dev *dev, u8 slot)
-{
- if (get_tx4927_pcicptr(dev->bus->sysdata) == tx4939_pcic1ptr) {
- switch (slot) {
- case TX4927_PCIC_IDSEL_AD_TO_SLOT(31):
- if (__raw_readq(&tx4939_ccfgptr->pcfg) &
- TX4939_PCFG_ET0MODE)
- return TXX9_IRQ_BASE + TX4939_IR_ETH(0);
- break;
- case TX4927_PCIC_IDSEL_AD_TO_SLOT(30):
- if (__raw_readq(&tx4939_ccfgptr->pcfg) &
- TX4939_PCFG_ET1MODE)
- return TXX9_IRQ_BASE + TX4939_IR_ETH(1);
- break;
- }
- return 0;
- }
- return -1;
-}
-
-int tx4939_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
-{
- int irq = tx4939_pcic1_map_irq(dev, slot);
-
- if (irq >= 0)
- return irq;
- irq = pin;
- /* IRQ rotation */
- irq--; /* 0-3 */
- irq = (irq + 33 - slot) % 4;
- irq++; /* 1-4 */
-
- switch (irq) {
- case 1:
- irq = TXX9_IRQ_BASE + TX4939_IR_INTA;
- break;
- case 2:
- irq = TXX9_IRQ_BASE + TX4939_IR_INTB;
- break;
- case 3:
- irq = TXX9_IRQ_BASE + TX4939_IR_INTC;
- break;
- case 4:
- irq = TXX9_IRQ_BASE + TX4939_IR_INTD;
- break;
- }
- return irq;
-}
-
-void __init tx4939_setup_pcierr_irq(void)
-{
- if (request_irq(TXX9_IRQ_BASE + TX4939_IR_PCIERR,
- tx4927_pcierr_interrupt,
- 0, "PCI error",
- (void *)TX4939_PCIC_REG))
- pr_warn("Failed to request irq for PCIERR\n");
-}
diff --git a/arch/mips/ralink/ill_acc.c b/arch/mips/ralink/ill_acc.c
index bdf53807d7c2..115a69fc20ca 100644
--- a/arch/mips/ralink/ill_acc.c
+++ b/arch/mips/ralink/ill_acc.c
@@ -65,6 +65,7 @@ static int __init ill_acc_of_setup(void)
}
irq = irq_of_parse_and_map(np, 0);
+ of_node_put(np);
if (!irq) {
dev_err(&pdev->dev, "failed to get irq\n");
put_device(&pdev->dev);
diff --git a/arch/mips/ralink/mt7621.c b/arch/mips/ralink/mt7621.c
index bd71f5b14238..d6efffd4dd20 100644
--- a/arch/mips/ralink/mt7621.c
+++ b/arch/mips/ralink/mt7621.c
@@ -10,6 +10,8 @@
#include <linux/slab.h>
#include <linux/sys_soc.h>
#include <linux/memblock.h>
+#include <linux/pci.h>
+#include <linux/bug.h>
#include <asm/bootinfo.h>
#include <asm/mipsregs.h>
@@ -22,6 +24,35 @@
static void *detect_magic __initdata = detect_memory_region;
+int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
+{
+ struct resource_entry *entry;
+ resource_size_t mask;
+
+ entry = resource_list_first_type(&bridge->windows, IORESOURCE_MEM);
+ if (!entry) {
+ pr_err("Cannot get memory resource\n");
+ return -EINVAL;
+ }
+
+ if (mips_cps_numiocu(0)) {
+ /*
+ * Hardware doesn't accept mask values with 1s after
+ * 0s (e.g. 0xffef), so warn if that's happen
+ */
+ mask = ~(entry->res->end - entry->res->start) & CM_GCR_REGn_MASK_ADDRMASK;
+ WARN_ON(mask && BIT(ffz(~mask)) - 1 != ~mask);
+
+ write_gcr_reg1_base(entry->res->start);
+ write_gcr_reg1_mask(mask | CM_GCR_REGn_MASK_CMTGT_IOCU0);
+ pr_info("PCI coherence region base: 0x%08llx, mask/settings: 0x%08llx\n",
+ (unsigned long long)read_gcr_reg1_base(),
+ (unsigned long long)read_gcr_reg1_mask());
+ }
+
+ return 0;
+}
+
phys_addr_t mips_cpc_default_phys_base(void)
{
panic("Cannot detect cpc address");
diff --git a/arch/mips/ralink/of.c b/arch/mips/ralink/of.c
index 0135376c5de5..35a87a2da10b 100644
--- a/arch/mips/ralink/of.c
+++ b/arch/mips/ralink/of.c
@@ -53,17 +53,6 @@ void __init device_tree_init(void)
unflatten_and_copy_device_tree();
}
-static int memory_dtb;
-
-static int __init early_init_dt_find_memory(unsigned long node,
- const char *uname, int depth, void *data)
-{
- if (depth == 1 && !strcmp(uname, "memory@0"))
- memory_dtb = 1;
-
- return 0;
-}
-
void __init plat_mem_setup(void)
{
void *dtb;
@@ -77,10 +66,10 @@ void __init plat_mem_setup(void)
dtb = get_fdt();
__dt_setup_arch(dtb);
- of_scan_flat_dt(early_init_dt_find_memory, NULL);
- if (memory_dtb)
- of_scan_flat_dt(early_init_dt_scan_memory, NULL);
- else if (soc_info.mem_detect)
+ if (!early_init_dt_scan_memory())
+ return;
+
+ if (soc_info.mem_detect)
soc_info.mem_detect();
else if (soc_info.mem_size)
memblock_add(soc_info.mem_base, soc_info.mem_size * SZ_1M);
diff --git a/arch/mips/sgi-ip22/Platform b/arch/mips/sgi-ip22/Platform
index 62fa30bb959e..a4c46e33562e 100644
--- a/arch/mips/sgi-ip22/Platform
+++ b/arch/mips/sgi-ip22/Platform
@@ -23,10 +23,5 @@ endif
# be 16kb aligned or the handling of the current variable will break.
# Simplified: what IP22 does at 128MB+ in ksegN, IP28 does at 512MB+ in xkphys
#
-ifdef CONFIG_SGI_IP28
- ifeq ($(call cc-option-yn,-march=r10000 -mr10k-cache-barrier=store), n)
- $(error gcc doesn't support needed option -mr10k-cache-barrier=store)
- endif
-endif
cflags-$(CONFIG_SGI_IP28) += -mr10k-cache-barrier=store -I$(srctree)/arch/mips/include/asm/mach-ip28
load-$(CONFIG_SGI_IP28) += 0xa800000020004000
diff --git a/arch/mips/txx9/Kconfig b/arch/mips/txx9/Kconfig
index 85c4c121c71f..6c61feee6dd3 100644
--- a/arch/mips/txx9/Kconfig
+++ b/arch/mips/txx9/Kconfig
@@ -6,6 +6,7 @@ config MACH_TX39XX
config MACH_TX49XX
bool
+ select BOOT_ELF32
select MACH_TXX9
select CEVT_R4K
select CSRC_R4K
@@ -38,23 +39,6 @@ config TOSHIBA_RBTX4927
This Toshiba board is based on the TX4927 processor. Say Y here to
support this machine type
-config TOSHIBA_RBTX4938
- bool "Toshiba RBTX4938 board"
- depends on MACH_TX49XX
- select SOC_TX4938
- help
- This Toshiba board is based on the TX4938 processor. Say Y here to
- support this machine type
-
-config TOSHIBA_RBTX4939
- bool "Toshiba RBTX4939 board"
- depends on MACH_TX49XX
- select SOC_TX4939
- select TXX9_7SEGLED
- help
- This Toshiba board is based on the TX4939 processor. Say Y here to
- support this machine type
-
config SOC_TX3927
bool
select CEVT_TXX9
@@ -71,7 +55,6 @@ config SOC_TX4927
select IRQ_TXX9
select PCI_TX4927
select GPIO_TXX9
- imply HAS_TXX9_ACLC
config SOC_TX4938
bool
@@ -81,18 +64,6 @@ config SOC_TX4938
select IRQ_TXX9
select PCI_TX4927
select GPIO_TXX9
- imply HAS_TXX9_ACLC
-
-config SOC_TX4939
- bool
- select CEVT_TXX9
- imply HAS_TXX9_SERIAL
- select HAVE_PCI
- select PCI_TX4927
- imply HAS_TXX9_ACLC
-
-config TXX9_7SEGLED
- bool
config TOSHIBA_FPCIB0
bool "FPCIB0 Backplane Support"
@@ -104,25 +75,5 @@ config PICMG_PCI_BACKPLANE_DEFAULT
depends on PCI && MACH_TXX9
default y if !TOSHIBA_FPCIB0
-if TOSHIBA_RBTX4938
-
-comment "Multiplex Pin Select"
-choice
- prompt "PIO[58:61]"
- default TOSHIBA_RBTX4938_MPLEX_PIO58_61
-
-config TOSHIBA_RBTX4938_MPLEX_PIO58_61
- bool "PIO"
-config TOSHIBA_RBTX4938_MPLEX_NAND
- bool "NAND"
-config TOSHIBA_RBTX4938_MPLEX_ATA
- bool "ATA"
-config TOSHIBA_RBTX4938_MPLEX_KEEP
- bool "Keep firmware settings"
-
-endchoice
-
-endif
-
config PCI_TX4927
bool
diff --git a/arch/mips/txx9/Makefile b/arch/mips/txx9/Makefile
index 195295937282..53269910a48b 100644
--- a/arch/mips/txx9/Makefile
+++ b/arch/mips/txx9/Makefile
@@ -14,5 +14,3 @@ obj-$(CONFIG_TOSHIBA_JMR3927) += jmr3927/
# Toshiba RBTX49XX boards
#
obj-$(CONFIG_TOSHIBA_RBTX4927) += rbtx4927/
-obj-$(CONFIG_TOSHIBA_RBTX4938) += rbtx4938/
-obj-$(CONFIG_TOSHIBA_RBTX4939) += rbtx4939/
diff --git a/arch/mips/txx9/generic/7segled.c b/arch/mips/txx9/generic/7segled.c
deleted file mode 100644
index 2203c2548cb4..000000000000
--- a/arch/mips/txx9/generic/7segled.c
+++ /dev/null
@@ -1,123 +0,0 @@
-/*
- * 7 Segment LED routines
- * Based on RBTX49xx patch from CELF patch archive.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * (C) Copyright TOSHIBA CORPORATION 2005-2007
- * All Rights Reserved.
- */
-#include <linux/device.h>
-#include <linux/slab.h>
-#include <linux/map_to_7segment.h>
-#include <asm/txx9/generic.h>
-
-static unsigned int tx_7segled_num;
-static void (*tx_7segled_putc)(unsigned int pos, unsigned char val);
-
-void __init txx9_7segled_init(unsigned int num,
- void (*putc)(unsigned int pos, unsigned char val))
-{
- tx_7segled_num = num;
- tx_7segled_putc = putc;
-}
-
-static SEG7_CONVERSION_MAP(txx9_seg7map, MAP_ASCII7SEG_ALPHANUM_LC);
-
-int txx9_7segled_putc(unsigned int pos, char c)
-{
- if (pos >= tx_7segled_num)
- return -EINVAL;
- c = map_to_seg7(&txx9_seg7map, c);
- if (c < 0)
- return c;
- tx_7segled_putc(pos, c);
- return 0;
-}
-
-static ssize_t ascii_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- unsigned int ch = dev->id;
- txx9_7segled_putc(ch, buf[0]);
- return size;
-}
-
-static ssize_t raw_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- unsigned int ch = dev->id;
- tx_7segled_putc(ch, buf[0]);
- return size;
-}
-
-static DEVICE_ATTR_WO(ascii);
-static DEVICE_ATTR_WO(raw);
-
-static ssize_t map_seg7_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- memcpy(buf, &txx9_seg7map, sizeof(txx9_seg7map));
- return sizeof(txx9_seg7map);
-}
-
-static ssize_t map_seg7_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- if (size != sizeof(txx9_seg7map))
- return -EINVAL;
- memcpy(&txx9_seg7map, buf, size);
- return size;
-}
-
-static DEVICE_ATTR(map_seg7, 0600, map_seg7_show, map_seg7_store);
-
-static struct bus_type tx_7segled_subsys = {
- .name = "7segled",
- .dev_name = "7segled",
-};
-
-static void tx_7segled_release(struct device *dev)
-{
- kfree(dev);
-}
-
-static int __init tx_7segled_init_sysfs(void)
-{
- int error, i;
- if (!tx_7segled_num)
- return -ENODEV;
- error = subsys_system_register(&tx_7segled_subsys, NULL);
- if (error)
- return error;
- error = device_create_file(tx_7segled_subsys.dev_root, &dev_attr_map_seg7);
- if (error)
- return error;
- for (i = 0; i < tx_7segled_num; i++) {
- struct device *dev;
- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev) {
- error = -ENODEV;
- break;
- }
- dev->id = i;
- dev->bus = &tx_7segled_subsys;
- dev->release = &tx_7segled_release;
- error = device_register(dev);
- if (error) {
- put_device(dev);
- return error;
- }
- device_create_file(dev, &dev_attr_ascii);
- device_create_file(dev, &dev_attr_raw);
- }
- return error;
-}
-
-device_initcall(tx_7segled_init_sysfs);
diff --git a/arch/mips/txx9/generic/Makefile b/arch/mips/txx9/generic/Makefile
index 6d00580fc81d..be5af9fe7c11 100644
--- a/arch/mips/txx9/generic/Makefile
+++ b/arch/mips/txx9/generic/Makefile
@@ -8,7 +8,4 @@ obj-$(CONFIG_PCI) += pci.o
obj-$(CONFIG_SOC_TX3927) += setup_tx3927.o irq_tx3927.o
obj-$(CONFIG_SOC_TX4927) += mem_tx4927.o setup_tx4927.o irq_tx4927.o
obj-$(CONFIG_SOC_TX4938) += mem_tx4927.o setup_tx4938.o irq_tx4938.o
-obj-$(CONFIG_SOC_TX4939) += setup_tx4939.o irq_tx4939.o
obj-$(CONFIG_TOSHIBA_FPCIB0) += smsc_fdc37m81x.o
-obj-$(CONFIG_SPI) += spi_eeprom.o
-obj-$(CONFIG_TXX9_7SEGLED) += 7segled.o
diff --git a/arch/mips/txx9/generic/irq_tx4939.c b/arch/mips/txx9/generic/irq_tx4939.c
deleted file mode 100644
index 0d7267e81a8c..000000000000
--- a/arch/mips/txx9/generic/irq_tx4939.c
+++ /dev/null
@@ -1,216 +0,0 @@
-/*
- * TX4939 irq routines
- * Based on linux/arch/mips/kernel/irq_txx9.c,
- * and RBTX49xx patch from CELF patch archive.
- *
- * Copyright 2001, 2003-2005 MontaVista Software Inc.
- * Author: MontaVista Software, Inc.
- * ahennessy@mvista.com
- * source@mvista.com
- * Copyright (C) 2000-2001,2005-2007 Toshiba Corporation
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-/*
- * TX4939 defines 64 IRQs.
- * Similer to irq_txx9.c but different register layouts.
- */
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/types.h>
-#include <asm/irq_cpu.h>
-#include <asm/txx9irq.h>
-#include <asm/txx9/tx4939.h>
-
-/* IRCER : Int. Control Enable */
-#define TXx9_IRCER_ICE 0x00000001
-
-/* IRCR : Int. Control */
-#define TXx9_IRCR_LOW 0x00000000
-#define TXx9_IRCR_HIGH 0x00000001
-#define TXx9_IRCR_DOWN 0x00000002
-#define TXx9_IRCR_UP 0x00000003
-#define TXx9_IRCR_EDGE(cr) ((cr) & 0x00000002)
-
-/* IRSCR : Int. Status Control */
-#define TXx9_IRSCR_EIClrE 0x00000100
-#define TXx9_IRSCR_EIClr_MASK 0x0000000f
-
-/* IRCSR : Int. Current Status */
-#define TXx9_IRCSR_IF 0x00010000
-
-#define irc_dlevel 0
-#define irc_elevel 1
-
-static struct {
- unsigned char level;
- unsigned char mode;
-} tx4939irq[TX4939_NUM_IR] __read_mostly;
-
-static void tx4939_irq_unmask(struct irq_data *d)
-{
- unsigned int irq_nr = d->irq - TXX9_IRQ_BASE;
- u32 __iomem *lvlp;
- int ofs;
- if (irq_nr < 32) {
- irq_nr--;
- lvlp = &tx4939_ircptr->lvl[(irq_nr % 16) / 2].r;
- } else {
- irq_nr -= 32;
- lvlp = &tx4939_ircptr->lvl[8 + (irq_nr % 16) / 2].r;
- }
- ofs = (irq_nr & 16) + (irq_nr & 1) * 8;
- __raw_writel((__raw_readl(lvlp) & ~(0xff << ofs))
- | (tx4939irq[irq_nr].level << ofs),
- lvlp);
-}
-
-static inline void tx4939_irq_mask(struct irq_data *d)
-{
- unsigned int irq_nr = d->irq - TXX9_IRQ_BASE;
- u32 __iomem *lvlp;
- int ofs;
- if (irq_nr < 32) {
- irq_nr--;
- lvlp = &tx4939_ircptr->lvl[(irq_nr % 16) / 2].r;
- } else {
- irq_nr -= 32;
- lvlp = &tx4939_ircptr->lvl[8 + (irq_nr % 16) / 2].r;
- }
- ofs = (irq_nr & 16) + (irq_nr & 1) * 8;
- __raw_writel((__raw_readl(lvlp) & ~(0xff << ofs))
- | (irc_dlevel << ofs),
- lvlp);
- mmiowb();
-}
-
-static void tx4939_irq_mask_ack(struct irq_data *d)
-{
- unsigned int irq_nr = d->irq - TXX9_IRQ_BASE;
-
- tx4939_irq_mask(d);
- if (TXx9_IRCR_EDGE(tx4939irq[irq_nr].mode)) {
- irq_nr--;
- /* clear edge detection */
- __raw_writel((TXx9_IRSCR_EIClrE | (irq_nr & 0xf))
- << (irq_nr & 0x10),
- &tx4939_ircptr->edc.r);
- }
-}
-
-static int tx4939_irq_set_type(struct irq_data *d, unsigned int flow_type)
-{
- unsigned int irq_nr = d->irq - TXX9_IRQ_BASE;
- u32 cr;
- u32 __iomem *crp;
- int ofs;
- int mode;
-
- if (flow_type & IRQF_TRIGGER_PROBE)
- return 0;
- switch (flow_type & IRQF_TRIGGER_MASK) {
- case IRQF_TRIGGER_RISING:
- mode = TXx9_IRCR_UP;
- break;
- case IRQF_TRIGGER_FALLING:
- mode = TXx9_IRCR_DOWN;
- break;
- case IRQF_TRIGGER_HIGH:
- mode = TXx9_IRCR_HIGH;
- break;
- case IRQF_TRIGGER_LOW:
- mode = TXx9_IRCR_LOW;
- break;
- default:
- return -EINVAL;
- }
- if (irq_nr < 32) {
- irq_nr--;
- crp = &tx4939_ircptr->dm[(irq_nr & 8) >> 3].r;
- } else {
- irq_nr -= 32;
- crp = &tx4939_ircptr->dm2[((irq_nr & 8) >> 3)].r;
- }
- ofs = (((irq_nr & 16) >> 1) | (irq_nr & (8 - 1))) * 2;
- cr = __raw_readl(crp);
- cr &= ~(0x3 << ofs);
- cr |= (mode & 0x3) << ofs;
- __raw_writel(cr, crp);
- tx4939irq[irq_nr].mode = mode;
- return 0;
-}
-
-static struct irq_chip tx4939_irq_chip = {
- .name = "TX4939",
- .irq_ack = tx4939_irq_mask_ack,
- .irq_mask = tx4939_irq_mask,
- .irq_mask_ack = tx4939_irq_mask_ack,
- .irq_unmask = tx4939_irq_unmask,
- .irq_set_type = tx4939_irq_set_type,
-};
-
-static int tx4939_irq_set_pri(int irc_irq, int new_pri)
-{
- int old_pri;
-
- if ((unsigned int)irc_irq >= TX4939_NUM_IR)
- return 0;
- old_pri = tx4939irq[irc_irq].level;
- tx4939irq[irc_irq].level = new_pri;
- return old_pri;
-}
-
-void __init tx4939_irq_init(void)
-{
- int i;
-
- mips_cpu_irq_init();
- /* disable interrupt control */
- __raw_writel(0, &tx4939_ircptr->den.r);
- __raw_writel(0, &tx4939_ircptr->maskint.r);
- __raw_writel(0, &tx4939_ircptr->maskext.r);
- /* irq_base + 0 is not used */
- for (i = 1; i < TX4939_NUM_IR; i++) {
- tx4939irq[i].level = 4; /* middle level */
- tx4939irq[i].mode = TXx9_IRCR_LOW;
- irq_set_chip_and_handler(TXX9_IRQ_BASE + i, &tx4939_irq_chip,
- handle_level_irq);
- }
-
- /* mask all IRC interrupts */
- __raw_writel(0, &tx4939_ircptr->msk.r);
- for (i = 0; i < 16; i++)
- __raw_writel(0, &tx4939_ircptr->lvl[i].r);
- /* setup IRC interrupt mode (Low Active) */
- for (i = 0; i < 2; i++)
- __raw_writel(0, &tx4939_ircptr->dm[i].r);
- for (i = 0; i < 2; i++)
- __raw_writel(0, &tx4939_ircptr->dm2[i].r);
- /* enable interrupt control */
- __raw_writel(TXx9_IRCER_ICE, &tx4939_ircptr->den.r);
- __raw_writel(irc_elevel, &tx4939_ircptr->msk.r);
-
- irq_set_chained_handler(MIPS_CPU_IRQ_BASE + TX4939_IRC_INT,
- handle_simple_irq);
-
- /* raise priority for errors, timers, sio */
- tx4939_irq_set_pri(TX4939_IR_WTOERR, 7);
- tx4939_irq_set_pri(TX4939_IR_PCIERR, 7);
- tx4939_irq_set_pri(TX4939_IR_PCIPME, 7);
- for (i = 0; i < TX4939_NUM_IR_TMR; i++)
- tx4939_irq_set_pri(TX4939_IR_TMR(i), 6);
- for (i = 0; i < TX4939_NUM_IR_SIO; i++)
- tx4939_irq_set_pri(TX4939_IR_SIO(i), 5);
-}
-
-int tx4939_irq(void)
-{
- u32 csr = __raw_readl(&tx4939_ircptr->cs.r);
-
- if (likely(!(csr & TXx9_IRCSR_IF)))
- return TXX9_IRQ_BASE + (csr & (TX4939_NUM_IR - 1));
- return -1;
-}
diff --git a/arch/mips/txx9/generic/setup.c b/arch/mips/txx9/generic/setup.c
index 42ba1e97dff0..39cd1edf9d80 100644
--- a/arch/mips/txx9/generic/setup.c
+++ b/arch/mips/txx9/generic/setup.c
@@ -315,16 +315,6 @@ static void __init select_board(void)
txx9_board_vec = &rbtx4937_vec;
break;
#endif
-#ifdef CONFIG_TOSHIBA_RBTX4938
- case 0x4938:
- txx9_board_vec = &rbtx4938_vec;
- break;
-#endif
-#ifdef CONFIG_TOSHIBA_RBTX4939
- case 0x4939:
- txx9_board_vec = &rbtx4939_vec;
- break;
-#endif
}
#endif
}
@@ -590,21 +580,6 @@ unsigned long (*__swizzle_addr_b)(unsigned long port) = __swizzle_addr_none;
EXPORT_SYMBOL(__swizzle_addr_b);
#endif
-#ifdef NEEDS_TXX9_IOSWABW
-static u16 ioswabw_default(volatile u16 *a, u16 x)
-{
- return le16_to_cpu(x);
-}
-static u16 __mem_ioswabw_default(volatile u16 *a, u16 x)
-{
- return x;
-}
-u16 (*ioswabw)(volatile u16 *a, u16 x) = ioswabw_default;
-EXPORT_SYMBOL(ioswabw);
-u16 (*__mem_ioswabw)(volatile u16 *a, u16 x) = __mem_ioswabw_default;
-EXPORT_SYMBOL(__mem_ioswabw);
-#endif
-
void __init txx9_physmap_flash_init(int no, unsigned long addr,
unsigned long size,
const struct physmap_flash_data *pdata)
@@ -840,34 +815,6 @@ void __init txx9_aclc_init(unsigned long baseaddr, int irq,
unsigned int dma_chan_out,
unsigned int dma_chan_in)
{
-#if IS_ENABLED(CONFIG_SND_SOC_TXX9ACLC)
- unsigned int dma_base = dmac_id * TXX9_DMA_MAX_NR_CHANNELS;
- struct resource res[] = {
- {
- .start = baseaddr,
- .end = baseaddr + 0x100 - 1,
- .flags = IORESOURCE_MEM,
- }, {
- .start = irq,
- .flags = IORESOURCE_IRQ,
- }, {
- .name = "txx9dmac-chan",
- .start = dma_base + dma_chan_out,
- .flags = IORESOURCE_DMA,
- }, {
- .name = "txx9dmac-chan",
- .start = dma_base + dma_chan_in,
- .flags = IORESOURCE_DMA,
- }
- };
- struct platform_device *pdev =
- platform_device_alloc("txx9aclc-ac97", -1);
-
- if (!pdev ||
- platform_device_add_resources(pdev, res, ARRAY_SIZE(res)) ||
- platform_device_add(pdev))
- platform_device_put(pdev);
-#endif
}
static struct bus_type txx9_sramc_subsys = {
diff --git a/arch/mips/txx9/generic/setup_tx4939.c b/arch/mips/txx9/generic/setup_tx4939.c
deleted file mode 100644
index f5f59b7401a3..000000000000
--- a/arch/mips/txx9/generic/setup_tx4939.c
+++ /dev/null
@@ -1,568 +0,0 @@
-/*
- * TX4939 setup routines
- * Based on linux/arch/mips/txx9/generic/setup_tx4938.c,
- * and RBTX49xx patch from CELF patch archive.
- *
- * 2003-2005 (c) MontaVista Software, Inc.
- * (C) Copyright TOSHIBA CORPORATION 2000-2001, 2004-2007
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-#include <linux/init.h>
-#include <linux/ioport.h>
-#include <linux/delay.h>
-#include <linux/netdevice.h>
-#include <linux/notifier.h>
-#include <linux/device.h>
-#include <linux/ethtool.h>
-#include <linux/param.h>
-#include <linux/ptrace.h>
-#include <linux/mtd/physmap.h>
-#include <linux/platform_device.h>
-#include <linux/platform_data/txx9/ndfmc.h>
-#include <asm/reboot.h>
-#include <asm/traps.h>
-#include <asm/txx9irq.h>
-#include <asm/txx9tmr.h>
-#include <asm/txx9/generic.h>
-#include <asm/txx9/dmac.h>
-#include <asm/txx9/tx4939.h>
-
-static void __init tx4939_wdr_init(void)
-{
- /* report watchdog reset status */
- if (____raw_readq(&tx4939_ccfgptr->ccfg) & TX4939_CCFG_WDRST)
- pr_warn("Watchdog reset detected at 0x%lx\n",
- read_c0_errorepc());
- /* clear WatchDogReset (W1C) */
- tx4939_ccfg_set(TX4939_CCFG_WDRST);
- /* do reset on watchdog */
- tx4939_ccfg_set(TX4939_CCFG_WR);
-}
-
-void __init tx4939_wdt_init(void)
-{
- txx9_wdt_init(TX4939_TMR_REG(2) & 0xfffffffffULL);
-}
-
-static void tx4939_machine_restart(char *command)
-{
- local_irq_disable();
- pr_emerg("Rebooting (with %s watchdog reset)...\n",
- (____raw_readq(&tx4939_ccfgptr->ccfg) & TX4939_CCFG_WDREXEN) ?
- "external" : "internal");
- /* clear watchdog status */
- tx4939_ccfg_set(TX4939_CCFG_WDRST); /* W1C */
- txx9_wdt_now(TX4939_TMR_REG(2) & 0xfffffffffULL);
- while (!(____raw_readq(&tx4939_ccfgptr->ccfg) & TX4939_CCFG_WDRST))
- ;
- mdelay(10);
- if (____raw_readq(&tx4939_ccfgptr->ccfg) & TX4939_CCFG_WDREXEN) {
- pr_emerg("Rebooting (with internal watchdog reset)...\n");
- /* External WDRST failed. Do internal watchdog reset */
- tx4939_ccfg_clear(TX4939_CCFG_WDREXEN);
- }
- /* fallback */
- (*_machine_halt)();
-}
-
-void show_registers(struct pt_regs *regs);
-static int tx4939_be_handler(struct pt_regs *regs, int is_fixup)
-{
- int data = regs->cp0_cause & 4;
- console_verbose();
- pr_err("%cBE exception at %#lx\n",
- data ? 'D' : 'I', regs->cp0_epc);
- pr_err("ccfg:%llx, toea:%llx\n",
- (unsigned long long)____raw_readq(&tx4939_ccfgptr->ccfg),
- (unsigned long long)____raw_readq(&tx4939_ccfgptr->toea));
-#ifdef CONFIG_PCI
- tx4927_report_pcic_status();
-#endif
- show_registers(regs);
- panic("BusError!");
-}
-static void __init tx4939_be_init(void)
-{
- mips_set_be_handler(tx4939_be_handler);
-}
-
-static struct resource tx4939_sdram_resource[4];
-static struct resource tx4939_sram_resource;
-#define TX4939_SRAM_SIZE 0x800
-
-void __init tx4939_setup(void)
-{
- int i;
- __u32 divmode;
- __u64 pcfg;
- unsigned int cpuclk = 0;
-
- txx9_reg_res_init(TX4939_REV_PCODE(), TX4939_REG_BASE,
- TX4939_REG_SIZE);
- set_c0_config(TX49_CONF_CWFON);
-
- /* SDRAMC,EBUSC are configured by PROM */
- for (i = 0; i < 4; i++) {
- if (!(TX4939_EBUSC_CR(i) & 0x8))
- continue; /* disabled */
- txx9_ce_res[i].start = (unsigned long)TX4939_EBUSC_BA(i);
- txx9_ce_res[i].end =
- txx9_ce_res[i].start + TX4939_EBUSC_SIZE(i) - 1;
- request_resource(&iomem_resource, &txx9_ce_res[i]);
- }
-
- /* clocks */
- if (txx9_master_clock) {
- /* calculate cpu_clock from master_clock */
- divmode = (__u32)____raw_readq(&tx4939_ccfgptr->ccfg) &
- TX4939_CCFG_MULCLK_MASK;
- cpuclk = txx9_master_clock * 20 / 2;
- switch (divmode) {
- case TX4939_CCFG_MULCLK_8:
- cpuclk = cpuclk / 3 * 4 /* / 6 * 8 */; break;
- case TX4939_CCFG_MULCLK_9:
- cpuclk = cpuclk / 2 * 3 /* / 6 * 9 */; break;
- case TX4939_CCFG_MULCLK_10:
- cpuclk = cpuclk / 3 * 5 /* / 6 * 10 */; break;
- case TX4939_CCFG_MULCLK_11:
- cpuclk = cpuclk / 6 * 11; break;
- case TX4939_CCFG_MULCLK_12:
- cpuclk = cpuclk * 2 /* / 6 * 12 */; break;
- case TX4939_CCFG_MULCLK_13:
- cpuclk = cpuclk / 6 * 13; break;
- case TX4939_CCFG_MULCLK_14:
- cpuclk = cpuclk / 3 * 7 /* / 6 * 14 */; break;
- case TX4939_CCFG_MULCLK_15:
- cpuclk = cpuclk / 2 * 5 /* / 6 * 15 */; break;
- }
- txx9_cpu_clock = cpuclk;
- } else {
- if (txx9_cpu_clock == 0)
- txx9_cpu_clock = 400000000; /* 400MHz */
- /* calculate master_clock from cpu_clock */
- cpuclk = txx9_cpu_clock;
- divmode = (__u32)____raw_readq(&tx4939_ccfgptr->ccfg) &
- TX4939_CCFG_MULCLK_MASK;
- switch (divmode) {
- case TX4939_CCFG_MULCLK_8:
- txx9_master_clock = cpuclk * 6 / 8; break;
- case TX4939_CCFG_MULCLK_9:
- txx9_master_clock = cpuclk * 6 / 9; break;
- case TX4939_CCFG_MULCLK_10:
- txx9_master_clock = cpuclk * 6 / 10; break;
- case TX4939_CCFG_MULCLK_11:
- txx9_master_clock = cpuclk * 6 / 11; break;
- case TX4939_CCFG_MULCLK_12:
- txx9_master_clock = cpuclk * 6 / 12; break;
- case TX4939_CCFG_MULCLK_13:
- txx9_master_clock = cpuclk * 6 / 13; break;
- case TX4939_CCFG_MULCLK_14:
- txx9_master_clock = cpuclk * 6 / 14; break;
- case TX4939_CCFG_MULCLK_15:
- txx9_master_clock = cpuclk * 6 / 15; break;
- }
- txx9_master_clock /= 10; /* * 2 / 20 */
- }
- /* calculate gbus_clock from cpu_clock */
- divmode = (__u32)____raw_readq(&tx4939_ccfgptr->ccfg) &
- TX4939_CCFG_YDIVMODE_MASK;
- txx9_gbus_clock = txx9_cpu_clock;
- switch (divmode) {
- case TX4939_CCFG_YDIVMODE_2:
- txx9_gbus_clock /= 2; break;
- case TX4939_CCFG_YDIVMODE_3:
- txx9_gbus_clock /= 3; break;
- case TX4939_CCFG_YDIVMODE_5:
- txx9_gbus_clock /= 5; break;
- case TX4939_CCFG_YDIVMODE_6:
- txx9_gbus_clock /= 6; break;
- }
- /* change default value to udelay/mdelay take reasonable time */
- loops_per_jiffy = txx9_cpu_clock / HZ / 2;
-
- /* CCFG */
- tx4939_wdr_init();
- /* clear BusErrorOnWrite flag (W1C) */
- tx4939_ccfg_set(TX4939_CCFG_WDRST | TX4939_CCFG_BEOW);
- /* enable Timeout BusError */
- if (txx9_ccfg_toeon)
- tx4939_ccfg_set(TX4939_CCFG_TOE);
-
- /* DMA selection */
- txx9_clear64(&tx4939_ccfgptr->pcfg, TX4939_PCFG_DMASEL_ALL);
-
- /* Use external clock for external arbiter */
- if (!(____raw_readq(&tx4939_ccfgptr->ccfg) & TX4939_CCFG_PCIARB))
- txx9_clear64(&tx4939_ccfgptr->pcfg, TX4939_PCFG_PCICLKEN_ALL);
-
- pr_info("%s -- %dMHz(M%dMHz,G%dMHz) CRIR:%08x CCFG:%llx PCFG:%llx\n",
- txx9_pcode_str,
- (cpuclk + 500000) / 1000000,
- (txx9_master_clock + 500000) / 1000000,
- (txx9_gbus_clock + 500000) / 1000000,
- (__u32)____raw_readq(&tx4939_ccfgptr->crir),
- ____raw_readq(&tx4939_ccfgptr->ccfg),
- ____raw_readq(&tx4939_ccfgptr->pcfg));
-
- pr_info("%s DDRC -- EN:%08x", txx9_pcode_str,
- (__u32)____raw_readq(&tx4939_ddrcptr->winen));
- for (i = 0; i < 4; i++) {
- __u64 win = ____raw_readq(&tx4939_ddrcptr->win[i]);
- if (!((__u32)____raw_readq(&tx4939_ddrcptr->winen) & (1 << i)))
- continue; /* disabled */
- pr_cont(" #%d:%016llx", i, win);
- tx4939_sdram_resource[i].name = "DDR SDRAM";
- tx4939_sdram_resource[i].start =
- (unsigned long)(win >> 48) << 20;
- tx4939_sdram_resource[i].end =
- ((((unsigned long)(win >> 32) & 0xffff) + 1) <<
- 20) - 1;
- tx4939_sdram_resource[i].flags = IORESOURCE_MEM;
- request_resource(&iomem_resource, &tx4939_sdram_resource[i]);
- }
- pr_cont("\n");
-
- /* SRAM */
- if (____raw_readq(&tx4939_sramcptr->cr) & 1) {
- unsigned int size = TX4939_SRAM_SIZE;
- tx4939_sram_resource.name = "SRAM";
- tx4939_sram_resource.start =
- (____raw_readq(&tx4939_sramcptr->cr) >> (39-11))
- & ~(size - 1);
- tx4939_sram_resource.end =
- tx4939_sram_resource.start + TX4939_SRAM_SIZE - 1;
- tx4939_sram_resource.flags = IORESOURCE_MEM;
- request_resource(&iomem_resource, &tx4939_sram_resource);
- }
-
- /* TMR */
- /* disable all timers */
- for (i = 0; i < TX4939_NR_TMR; i++)
- txx9_tmr_init(TX4939_TMR_REG(i) & 0xfffffffffULL);
-
- /* set PCIC1 reset (required to prevent hangup on BIST) */
- txx9_set64(&tx4939_ccfgptr->clkctr, TX4939_CLKCTR_PCI1RST);
- pcfg = ____raw_readq(&tx4939_ccfgptr->pcfg);
- if (pcfg & (TX4939_PCFG_ET0MODE | TX4939_PCFG_ET1MODE)) {
- mdelay(1); /* at least 128 cpu clock */
- /* clear PCIC1 reset */
- txx9_clear64(&tx4939_ccfgptr->clkctr, TX4939_CLKCTR_PCI1RST);
- } else {
- pr_info("%s: stop PCIC1\n", txx9_pcode_str);
- /* stop PCIC1 */
- txx9_set64(&tx4939_ccfgptr->clkctr, TX4939_CLKCTR_PCI1CKD);
- }
- if (!(pcfg & TX4939_PCFG_ET0MODE)) {
- pr_info("%s: stop ETH0\n", txx9_pcode_str);
- txx9_set64(&tx4939_ccfgptr->clkctr, TX4939_CLKCTR_ETH0RST);
- txx9_set64(&tx4939_ccfgptr->clkctr, TX4939_CLKCTR_ETH0CKD);
- }
- if (!(pcfg & TX4939_PCFG_ET1MODE)) {
- pr_info("%s: stop ETH1\n", txx9_pcode_str);
- txx9_set64(&tx4939_ccfgptr->clkctr, TX4939_CLKCTR_ETH1RST);
- txx9_set64(&tx4939_ccfgptr->clkctr, TX4939_CLKCTR_ETH1CKD);
- }
-
- _machine_restart = tx4939_machine_restart;
- board_be_init = tx4939_be_init;
-}
-
-void __init tx4939_time_init(unsigned int tmrnr)
-{
- if (____raw_readq(&tx4939_ccfgptr->ccfg) & TX4939_CCFG_TINTDIS)
- txx9_clockevent_init(TX4939_TMR_REG(tmrnr) & 0xfffffffffULL,
- TXX9_IRQ_BASE + TX4939_IR_TMR(tmrnr),
- TXX9_IMCLK);
-}
-
-void __init tx4939_sio_init(unsigned int sclk, unsigned int cts_mask)
-{
- int i;
- unsigned int ch_mask = 0;
- __u64 pcfg = __raw_readq(&tx4939_ccfgptr->pcfg);
-
- cts_mask |= ~1; /* only SIO0 have RTS/CTS */
- if ((pcfg & TX4939_PCFG_SIO2MODE_MASK) != TX4939_PCFG_SIO2MODE_SIO0)
- cts_mask |= 1 << 0; /* disable SIO0 RTS/CTS by PCFG setting */
- if ((pcfg & TX4939_PCFG_SIO2MODE_MASK) != TX4939_PCFG_SIO2MODE_SIO2)
- ch_mask |= 1 << 2; /* disable SIO2 by PCFG setting */
- if (pcfg & TX4939_PCFG_SIO3MODE)
- ch_mask |= 1 << 3; /* disable SIO3 by PCFG setting */
- for (i = 0; i < 4; i++) {
- if ((1 << i) & ch_mask)
- continue;
- txx9_sio_init(TX4939_SIO_REG(i) & 0xfffffffffULL,
- TXX9_IRQ_BASE + TX4939_IR_SIO(i),
- i, sclk, (1 << i) & cts_mask);
- }
-}
-
-#if IS_ENABLED(CONFIG_TC35815)
-static u32 tx4939_get_eth_speed(struct net_device *dev)
-{
- struct ethtool_link_ksettings cmd;
-
- if (__ethtool_get_link_ksettings(dev, &cmd))
- return 100; /* default 100Mbps */
-
- return cmd.base.speed;
-}
-
-static int tx4939_netdev_event(struct notifier_block *this,
- unsigned long event,
- void *ptr)
-{
- struct net_device *dev = netdev_notifier_info_to_dev(ptr);
-
- if (event == NETDEV_CHANGE && netif_carrier_ok(dev)) {
- __u64 bit = 0;
- if (dev->irq == TXX9_IRQ_BASE + TX4939_IR_ETH(0))
- bit = TX4939_PCFG_SPEED0;
- else if (dev->irq == TXX9_IRQ_BASE + TX4939_IR_ETH(1))
- bit = TX4939_PCFG_SPEED1;
- if (bit) {
- if (tx4939_get_eth_speed(dev) == 100)
- txx9_set64(&tx4939_ccfgptr->pcfg, bit);
- else
- txx9_clear64(&tx4939_ccfgptr->pcfg, bit);
- }
- }
- return NOTIFY_DONE;
-}
-
-static struct notifier_block tx4939_netdev_notifier = {
- .notifier_call = tx4939_netdev_event,
- .priority = 1,
-};
-
-void __init tx4939_ethaddr_init(unsigned char *addr0, unsigned char *addr1)
-{
- u64 pcfg = __raw_readq(&tx4939_ccfgptr->pcfg);
-
- if (addr0 && (pcfg & TX4939_PCFG_ET0MODE))
- txx9_ethaddr_init(TXX9_IRQ_BASE + TX4939_IR_ETH(0), addr0);
- if (addr1 && (pcfg & TX4939_PCFG_ET1MODE))
- txx9_ethaddr_init(TXX9_IRQ_BASE + TX4939_IR_ETH(1), addr1);
- register_netdevice_notifier(&tx4939_netdev_notifier);
-}
-#else
-void __init tx4939_ethaddr_init(unsigned char *addr0, unsigned char *addr1)
-{
-}
-#endif
-
-void __init tx4939_mtd_init(int ch)
-{
- struct physmap_flash_data pdata = {
- .width = TX4939_EBUSC_WIDTH(ch) / 8,
- };
- unsigned long start = txx9_ce_res[ch].start;
- unsigned long size = txx9_ce_res[ch].end - start + 1;
-
- if (!(TX4939_EBUSC_CR(ch) & 0x8))
- return; /* disabled */
- txx9_physmap_flash_init(ch, start, size, &pdata);
-}
-
-#define TX4939_ATA_REG_PHYS(ch) (TX4939_ATA_REG(ch) & 0xfffffffffULL)
-void __init tx4939_ata_init(void)
-{
- static struct resource ata0_res[] = {
- {
- .start = TX4939_ATA_REG_PHYS(0),
- .end = TX4939_ATA_REG_PHYS(0) + 0x1000 - 1,
- .flags = IORESOURCE_MEM,
- }, {
- .start = TXX9_IRQ_BASE + TX4939_IR_ATA(0),
- .flags = IORESOURCE_IRQ,
- },
- };
- static struct resource ata1_res[] = {
- {
- .start = TX4939_ATA_REG_PHYS(1),
- .end = TX4939_ATA_REG_PHYS(1) + 0x1000 - 1,
- .flags = IORESOURCE_MEM,
- }, {
- .start = TXX9_IRQ_BASE + TX4939_IR_ATA(1),
- .flags = IORESOURCE_IRQ,
- },
- };
- static struct platform_device ata0_dev = {
- .name = "tx4939ide",
- .id = 0,
- .num_resources = ARRAY_SIZE(ata0_res),
- .resource = ata0_res,
- };
- static struct platform_device ata1_dev = {
- .name = "tx4939ide",
- .id = 1,
- .num_resources = ARRAY_SIZE(ata1_res),
- .resource = ata1_res,
- };
- __u64 pcfg = __raw_readq(&tx4939_ccfgptr->pcfg);
-
- if (pcfg & TX4939_PCFG_ATA0MODE)
- platform_device_register(&ata0_dev);
- if ((pcfg & (TX4939_PCFG_ATA1MODE |
- TX4939_PCFG_ET1MODE |
- TX4939_PCFG_ET0MODE)) == TX4939_PCFG_ATA1MODE)
- platform_device_register(&ata1_dev);
-}
-
-void __init tx4939_rtc_init(void)
-{
- static struct resource res[] = {
- {
- .start = TX4939_RTC_REG & 0xfffffffffULL,
- .end = (TX4939_RTC_REG & 0xfffffffffULL) + 0x100 - 1,
- .flags = IORESOURCE_MEM,
- }, {
- .start = TXX9_IRQ_BASE + TX4939_IR_RTC,
- .flags = IORESOURCE_IRQ,
- },
- };
- static struct platform_device rtc_dev = {
- .name = "tx4939rtc",
- .id = -1,
- .num_resources = ARRAY_SIZE(res),
- .resource = res,
- };
-
- platform_device_register(&rtc_dev);
-}
-
-void __init tx4939_ndfmc_init(unsigned int hold, unsigned int spw,
- unsigned char ch_mask, unsigned char wide_mask)
-{
- struct txx9ndfmc_platform_data plat_data = {
- .shift = 1,
- .gbus_clock = txx9_gbus_clock,
- .hold = hold,
- .spw = spw,
- .flags = NDFMC_PLAT_FLAG_NO_RSTR | NDFMC_PLAT_FLAG_HOLDADD |
- NDFMC_PLAT_FLAG_DUMMYWRITE,
- .ch_mask = ch_mask,
- .wide_mask = wide_mask,
- };
- txx9_ndfmc_init(TX4939_NDFMC_REG & 0xfffffffffULL, &plat_data);
-}
-
-void __init tx4939_dmac_init(int memcpy_chan0, int memcpy_chan1)
-{
- struct txx9dmac_platform_data plat_data = {
- .have_64bit_regs = true,
- };
- int i;
-
- for (i = 0; i < 2; i++) {
- plat_data.memcpy_chan = i ? memcpy_chan1 : memcpy_chan0;
- txx9_dmac_init(i, TX4939_DMA_REG(i) & 0xfffffffffULL,
- TXX9_IRQ_BASE + TX4939_IR_DMA(i, 0),
- &plat_data);
- }
-}
-
-void __init tx4939_aclc_init(void)
-{
- u64 pcfg = __raw_readq(&tx4939_ccfgptr->pcfg);
-
- if ((pcfg & TX4939_PCFG_I2SMODE_MASK) == TX4939_PCFG_I2SMODE_ACLC)
- txx9_aclc_init(TX4939_ACLC_REG & 0xfffffffffULL,
- TXX9_IRQ_BASE + TX4939_IR_ACLC, 1, 0, 1);
-}
-
-void __init tx4939_sramc_init(void)
-{
- if (tx4939_sram_resource.start)
- txx9_sramc_init(&tx4939_sram_resource);
-}
-
-void __init tx4939_rng_init(void)
-{
- static struct resource res = {
- .start = TX4939_RNG_REG & 0xfffffffffULL,
- .end = (TX4939_RNG_REG & 0xfffffffffULL) + 0x30 - 1,
- .flags = IORESOURCE_MEM,
- };
- static struct platform_device pdev = {
- .name = "tx4939-rng",
- .id = -1,
- .num_resources = 1,
- .resource = &res,
- };
-
- platform_device_register(&pdev);
-}
-
-static void __init tx4939_stop_unused_modules(void)
-{
- __u64 pcfg, rst = 0, ckd = 0;
- char buf[128];
-
- buf[0] = '\0';
- local_irq_disable();
- pcfg = ____raw_readq(&tx4939_ccfgptr->pcfg);
- if ((pcfg & TX4939_PCFG_I2SMODE_MASK) !=
- TX4939_PCFG_I2SMODE_ACLC) {
- rst |= TX4939_CLKCTR_ACLRST;
- ckd |= TX4939_CLKCTR_ACLCKD;
- strcat(buf, " ACLC");
- }
- if ((pcfg & TX4939_PCFG_I2SMODE_MASK) !=
- TX4939_PCFG_I2SMODE_I2S &&
- (pcfg & TX4939_PCFG_I2SMODE_MASK) !=
- TX4939_PCFG_I2SMODE_I2S_ALT) {
- rst |= TX4939_CLKCTR_I2SRST;
- ckd |= TX4939_CLKCTR_I2SCKD;
- strcat(buf, " I2S");
- }
- if (!(pcfg & TX4939_PCFG_ATA0MODE)) {
- rst |= TX4939_CLKCTR_ATA0RST;
- ckd |= TX4939_CLKCTR_ATA0CKD;
- strcat(buf, " ATA0");
- }
- if (!(pcfg & TX4939_PCFG_ATA1MODE)) {
- rst |= TX4939_CLKCTR_ATA1RST;
- ckd |= TX4939_CLKCTR_ATA1CKD;
- strcat(buf, " ATA1");
- }
- if (pcfg & TX4939_PCFG_SPIMODE) {
- rst |= TX4939_CLKCTR_SPIRST;
- ckd |= TX4939_CLKCTR_SPICKD;
- strcat(buf, " SPI");
- }
- if (!(pcfg & (TX4939_PCFG_VSSMODE | TX4939_PCFG_VPSMODE))) {
- rst |= TX4939_CLKCTR_VPCRST;
- ckd |= TX4939_CLKCTR_VPCCKD;
- strcat(buf, " VPC");
- }
- if ((pcfg & TX4939_PCFG_SIO2MODE_MASK) != TX4939_PCFG_SIO2MODE_SIO2) {
- rst |= TX4939_CLKCTR_SIO2RST;
- ckd |= TX4939_CLKCTR_SIO2CKD;
- strcat(buf, " SIO2");
- }
- if (pcfg & TX4939_PCFG_SIO3MODE) {
- rst |= TX4939_CLKCTR_SIO3RST;
- ckd |= TX4939_CLKCTR_SIO3CKD;
- strcat(buf, " SIO3");
- }
- if (rst | ckd) {
- txx9_set64(&tx4939_ccfgptr->clkctr, rst);
- txx9_set64(&tx4939_ccfgptr->clkctr, ckd);
- }
- local_irq_enable();
- if (buf[0])
- pr_info("%s: stop%s\n", txx9_pcode_str, buf);
-}
-
-static int __init tx4939_late_init(void)
-{
- if (txx9_pcode != 0x4939)
- return -ENODEV;
- tx4939_stop_unused_modules();
- return 0;
-}
-late_initcall(tx4939_late_init);
diff --git a/arch/mips/txx9/generic/spi_eeprom.c b/arch/mips/txx9/generic/spi_eeprom.c
deleted file mode 100644
index d833dd2c9b55..000000000000
--- a/arch/mips/txx9/generic/spi_eeprom.c
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * spi_eeprom.c
- * Copyright (C) 2000-2001 Toshiba Corporation
- *
- * 2003-2005 (c) MontaVista Software, Inc. This file is licensed under the
- * terms of the GNU General Public License version 2. This program is
- * licensed "as is" without any warranty of any kind, whether express
- * or implied.
- *
- * Support for TX4938 in 2.6 - Manish Lachwani (mlachwani@mvista.com)
- */
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/export.h>
-#include <linux/device.h>
-#include <linux/spi/spi.h>
-#include <linux/spi/eeprom.h>
-#include <asm/txx9/spi.h>
-
-#define AT250X0_PAGE_SIZE 8
-
-/* register board information for at25 driver */
-int __init spi_eeprom_register(int busid, int chipid, int size)
-{
- struct spi_board_info info = {
- .modalias = "at25",
- .max_speed_hz = 1500000, /* 1.5Mbps */
- .bus_num = busid,
- .chip_select = chipid,
- /* Mode 0: High-Active, Sample-Then-Shift */
- };
- struct spi_eeprom *eeprom;
- eeprom = kzalloc(sizeof(*eeprom), GFP_KERNEL);
- if (!eeprom)
- return -ENOMEM;
- strcpy(eeprom->name, "at250x0");
- eeprom->byte_len = size;
- eeprom->page_size = AT250X0_PAGE_SIZE;
- eeprom->flags = EE_ADDR1;
- info.platform_data = eeprom;
- return spi_register_board_info(&info, 1);
-}
-
-/* simple temporary spi driver to provide early access to seeprom. */
-
-static struct read_param {
- int busid;
- int chipid;
- int address;
- unsigned char *buf;
- int len;
-} *read_param;
-
-static int __init early_seeprom_probe(struct spi_device *spi)
-{
- int stat = 0;
- u8 cmd[2];
- int len = read_param->len;
- char *buf = read_param->buf;
- int address = read_param->address;
-
- dev_info(&spi->dev, "spiclk %u KHz.\n",
- (spi->max_speed_hz + 500) / 1000);
- if (read_param->busid != spi->master->bus_num ||
- read_param->chipid != spi->chip_select)
- return -ENODEV;
- while (len > 0) {
- /* spi_write_then_read can only work with small chunk */
- int c = len < AT250X0_PAGE_SIZE ? len : AT250X0_PAGE_SIZE;
- cmd[0] = 0x03; /* AT25_READ */
- cmd[1] = address;
- stat = spi_write_then_read(spi, cmd, sizeof(cmd), buf, c);
- buf += c;
- len -= c;
- address += c;
- }
- return stat;
-}
-
-static struct spi_driver early_seeprom_driver __initdata = {
- .driver = {
- .name = "at25",
- },
- .probe = early_seeprom_probe,
-};
-
-int __init spi_eeprom_read(int busid, int chipid, int address,
- unsigned char *buf, int len)
-{
- int ret;
- struct read_param param = {
- .busid = busid,
- .chipid = chipid,
- .address = address,
- .buf = buf,
- .len = len
- };
-
- read_param = &param;
- ret = spi_register_driver(&early_seeprom_driver);
- if (!ret)
- spi_unregister_driver(&early_seeprom_driver);
- return ret;
-}
diff --git a/arch/mips/txx9/rbtx4938/irq.c b/arch/mips/txx9/rbtx4938/irq.c
deleted file mode 100644
index 58cd7a9272cc..000000000000
--- a/arch/mips/txx9/rbtx4938/irq.c
+++ /dev/null
@@ -1,157 +0,0 @@
-/*
- * Toshiba RBTX4938 specific interrupt handlers
- * Copyright (C) 2000-2001 Toshiba Corporation
- *
- * 2003-2005 (c) MontaVista Software, Inc. This file is licensed under the
- * terms of the GNU General Public License version 2. This program is
- * licensed "as is" without any warranty of any kind, whether express
- * or implied.
- *
- * Support for TX4938 in 2.6 - Manish Lachwani (mlachwani@mvista.com)
- */
-
-/*
- * MIPS_CPU_IRQ_BASE+00 Software 0
- * MIPS_CPU_IRQ_BASE+01 Software 1
- * MIPS_CPU_IRQ_BASE+02 Cascade TX4938-CP0
- * MIPS_CPU_IRQ_BASE+03 Multiplexed -- do not use
- * MIPS_CPU_IRQ_BASE+04 Multiplexed -- do not use
- * MIPS_CPU_IRQ_BASE+05 Multiplexed -- do not use
- * MIPS_CPU_IRQ_BASE+06 Multiplexed -- do not use
- * MIPS_CPU_IRQ_BASE+07 CPU TIMER
- *
- * TXX9_IRQ_BASE+00
- * TXX9_IRQ_BASE+01
- * TXX9_IRQ_BASE+02 Cascade RBTX4938-IOC
- * TXX9_IRQ_BASE+03 RBTX4938 RTL-8019AS Ethernet
- * TXX9_IRQ_BASE+04
- * TXX9_IRQ_BASE+05 TX4938 ETH1
- * TXX9_IRQ_BASE+06 TX4938 ETH0
- * TXX9_IRQ_BASE+07
- * TXX9_IRQ_BASE+08 TX4938 SIO 0
- * TXX9_IRQ_BASE+09 TX4938 SIO 1
- * TXX9_IRQ_BASE+10 TX4938 DMA0
- * TXX9_IRQ_BASE+11 TX4938 DMA1
- * TXX9_IRQ_BASE+12 TX4938 DMA2
- * TXX9_IRQ_BASE+13 TX4938 DMA3
- * TXX9_IRQ_BASE+14
- * TXX9_IRQ_BASE+15
- * TXX9_IRQ_BASE+16 TX4938 PCIC
- * TXX9_IRQ_BASE+17 TX4938 TMR0
- * TXX9_IRQ_BASE+18 TX4938 TMR1
- * TXX9_IRQ_BASE+19 TX4938 TMR2
- * TXX9_IRQ_BASE+20
- * TXX9_IRQ_BASE+21
- * TXX9_IRQ_BASE+22 TX4938 PCIERR
- * TXX9_IRQ_BASE+23
- * TXX9_IRQ_BASE+24
- * TXX9_IRQ_BASE+25
- * TXX9_IRQ_BASE+26
- * TXX9_IRQ_BASE+27
- * TXX9_IRQ_BASE+28
- * TXX9_IRQ_BASE+29
- * TXX9_IRQ_BASE+30
- * TXX9_IRQ_BASE+31 TX4938 SPI
- *
- * RBTX4938_IRQ_IOC+00 PCI-D
- * RBTX4938_IRQ_IOC+01 PCI-C
- * RBTX4938_IRQ_IOC+02 PCI-B
- * RBTX4938_IRQ_IOC+03 PCI-A
- * RBTX4938_IRQ_IOC+04 RTC
- * RBTX4938_IRQ_IOC+05 ATA
- * RBTX4938_IRQ_IOC+06 MODEM
- * RBTX4938_IRQ_IOC+07 SWINT
- */
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <asm/mipsregs.h>
-#include <asm/txx9/generic.h>
-#include <asm/txx9/rbtx4938.h>
-
-static int toshiba_rbtx4938_irq_nested(int sw_irq)
-{
- u8 level3;
-
- level3 = readb(rbtx4938_imstat_addr);
- if (unlikely(!level3))
- return -1;
- /* must use fls so onboard ATA has priority */
- return RBTX4938_IRQ_IOC + __fls8(level3);
-}
-
-static void toshiba_rbtx4938_irq_ioc_enable(struct irq_data *d)
-{
- unsigned char v;
-
- v = readb(rbtx4938_imask_addr);
- v |= (1 << (d->irq - RBTX4938_IRQ_IOC));
- writeb(v, rbtx4938_imask_addr);
- mmiowb();
-}
-
-static void toshiba_rbtx4938_irq_ioc_disable(struct irq_data *d)
-{
- unsigned char v;
-
- v = readb(rbtx4938_imask_addr);
- v &= ~(1 << (d->irq - RBTX4938_IRQ_IOC));
- writeb(v, rbtx4938_imask_addr);
- mmiowb();
-}
-
-#define TOSHIBA_RBTX4938_IOC_NAME "RBTX4938-IOC"
-static struct irq_chip toshiba_rbtx4938_irq_ioc_type = {
- .name = TOSHIBA_RBTX4938_IOC_NAME,
- .irq_mask = toshiba_rbtx4938_irq_ioc_disable,
- .irq_unmask = toshiba_rbtx4938_irq_ioc_enable,
-};
-
-static int rbtx4938_irq_dispatch(int pending)
-{
- int irq;
-
- if (pending & STATUSF_IP7)
- irq = MIPS_CPU_IRQ_BASE + 7;
- else if (pending & STATUSF_IP2) {
- irq = txx9_irq();
- if (irq == RBTX4938_IRQ_IOCINT)
- irq = toshiba_rbtx4938_irq_nested(irq);
- } else if (pending & STATUSF_IP1)
- irq = MIPS_CPU_IRQ_BASE + 0;
- else if (pending & STATUSF_IP0)
- irq = MIPS_CPU_IRQ_BASE + 1;
- else
- irq = -1;
- return irq;
-}
-
-static void __init toshiba_rbtx4938_irq_ioc_init(void)
-{
- int i;
-
- for (i = RBTX4938_IRQ_IOC;
- i < RBTX4938_IRQ_IOC + RBTX4938_NR_IRQ_IOC; i++)
- irq_set_chip_and_handler(i, &toshiba_rbtx4938_irq_ioc_type,
- handle_level_irq);
-
- irq_set_chained_handler(RBTX4938_IRQ_IOCINT, handle_simple_irq);
-}
-
-void __init rbtx4938_irq_setup(void)
-{
- txx9_irq_dispatch = rbtx4938_irq_dispatch;
- /* Now, interrupt control disabled, */
- /* all IRC interrupts are masked, */
- /* all IRC interrupt mode are Low Active. */
-
- /* mask all IOC interrupts */
- writeb(0, rbtx4938_imask_addr);
-
- /* clear SoftInt interrupts */
- writeb(0, rbtx4938_softint_addr);
- tx4938_irq_init();
- toshiba_rbtx4938_irq_ioc_init();
- /* Onboard 10M Ether: High Active */
- irq_set_irq_type(RBTX4938_IRQ_ETHER, IRQF_TRIGGER_HIGH);
-}
diff --git a/arch/mips/txx9/rbtx4938/prom.c b/arch/mips/txx9/rbtx4938/prom.c
deleted file mode 100644
index 0de84716a428..000000000000
--- a/arch/mips/txx9/rbtx4938/prom.c
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * rbtx4938 specific prom routines
- * Copyright (C) 2000-2001 Toshiba Corporation
- *
- * 2003-2005 (c) MontaVista Software, Inc. This file is licensed under the
- * terms of the GNU General Public License version 2. This program is
- * licensed "as is" without any warranty of any kind, whether express
- * or implied.
- *
- * Support for TX4938 in 2.6 - Manish Lachwani (mlachwani@mvista.com)
- */
-
-#include <linux/init.h>
-#include <linux/memblock.h>
-#include <asm/txx9/generic.h>
-#include <asm/txx9/rbtx4938.h>
-
-void __init rbtx4938_prom_init(void)
-{
- memblock_add(0, tx4938_get_mem_size());
- txx9_sio_putchar_init(TX4938_SIO_REG(0) & 0xfffffffffULL);
-}
diff --git a/arch/mips/txx9/rbtx4938/setup.c b/arch/mips/txx9/rbtx4938/setup.c
deleted file mode 100644
index e68eb2e7ce0c..000000000000
--- a/arch/mips/txx9/rbtx4938/setup.c
+++ /dev/null
@@ -1,372 +0,0 @@
-/*
- * Setup pointers to hardware-dependent routines.
- * Copyright (C) 2000-2001 Toshiba Corporation
- *
- * 2003-2005 (c) MontaVista Software, Inc. This file is licensed under the
- * terms of the GNU General Public License version 2. This program is
- * licensed "as is" without any warranty of any kind, whether express
- * or implied.
- *
- * Support for TX4938 in 2.6 - Manish Lachwani (mlachwani@mvista.com)
- */
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/ioport.h>
-#include <linux/delay.h>
-#include <linux/platform_device.h>
-#include <linux/gpio/driver.h>
-#include <linux/gpio.h>
-#include <linux/mtd/physmap.h>
-
-#include <asm/reboot.h>
-#include <asm/io.h>
-#include <asm/txx9/generic.h>
-#include <asm/txx9/pci.h>
-#include <asm/txx9/rbtx4938.h>
-#include <linux/spi/spi.h>
-#include <asm/txx9/spi.h>
-#include <asm/txx9pio.h>
-
-static void rbtx4938_machine_restart(char *command)
-{
- local_irq_disable();
- writeb(1, rbtx4938_softresetlock_addr);
- writeb(1, rbtx4938_sfvol_addr);
- writeb(1, rbtx4938_softreset_addr);
- /* fallback */
- (*_machine_halt)();
-}
-
-static void __init rbtx4938_pci_setup(void)
-{
-#ifdef CONFIG_PCI
- int extarb = !(__raw_readq(&tx4938_ccfgptr->ccfg) & TX4938_CCFG_PCIARB);
- struct pci_controller *c = &txx9_primary_pcic;
-
- register_pci_controller(c);
-
- if (__raw_readq(&tx4938_ccfgptr->ccfg) & TX4938_CCFG_PCI66)
- txx9_pci_option =
- (txx9_pci_option & ~TXX9_PCI_OPT_CLK_MASK) |
- TXX9_PCI_OPT_CLK_66; /* already configured */
-
- /* Reset PCI Bus */
- writeb(0, rbtx4938_pcireset_addr);
- /* Reset PCIC */
- txx9_set64(&tx4938_ccfgptr->clkctr, TX4938_CLKCTR_PCIRST);
- if ((txx9_pci_option & TXX9_PCI_OPT_CLK_MASK) ==
- TXX9_PCI_OPT_CLK_66)
- tx4938_pciclk66_setup();
- mdelay(10);
- /* clear PCIC reset */
- txx9_clear64(&tx4938_ccfgptr->clkctr, TX4938_CLKCTR_PCIRST);
- writeb(1, rbtx4938_pcireset_addr);
- iob();
-
- tx4938_report_pciclk();
- tx4927_pcic_setup(tx4938_pcicptr, c, extarb);
- if ((txx9_pci_option & TXX9_PCI_OPT_CLK_MASK) ==
- TXX9_PCI_OPT_CLK_AUTO &&
- txx9_pci66_check(c, 0, 0)) {
- /* Reset PCI Bus */
- writeb(0, rbtx4938_pcireset_addr);
- /* Reset PCIC */
- txx9_set64(&tx4938_ccfgptr->clkctr, TX4938_CLKCTR_PCIRST);
- tx4938_pciclk66_setup();
- mdelay(10);
- /* clear PCIC reset */
- txx9_clear64(&tx4938_ccfgptr->clkctr, TX4938_CLKCTR_PCIRST);
- writeb(1, rbtx4938_pcireset_addr);
- iob();
- /* Reinitialize PCIC */
- tx4938_report_pciclk();
- tx4927_pcic_setup(tx4938_pcicptr, c, extarb);
- }
-
- if (__raw_readq(&tx4938_ccfgptr->pcfg) &
- (TX4938_PCFG_ETH0_SEL|TX4938_PCFG_ETH1_SEL)) {
- /* Reset PCIC1 */
- txx9_set64(&tx4938_ccfgptr->clkctr, TX4938_CLKCTR_PCIC1RST);
- /* PCI1DMD==0 => PCI1CLK==GBUSCLK/2 => PCI66 */
- if (!(__raw_readq(&tx4938_ccfgptr->ccfg)
- & TX4938_CCFG_PCI1DMD))
- tx4938_ccfg_set(TX4938_CCFG_PCI1_66);
- mdelay(10);
- /* clear PCIC1 reset */
- txx9_clear64(&tx4938_ccfgptr->clkctr, TX4938_CLKCTR_PCIC1RST);
- tx4938_report_pci1clk();
-
- /* mem:64K(max), io:64K(max) (enough for ETH0,ETH1) */
- c = txx9_alloc_pci_controller(NULL, 0, 0x10000, 0, 0x10000);
- register_pci_controller(c);
- tx4927_pcic_setup(tx4938_pcic1ptr, c, 0);
- }
- tx4938_setup_pcierr_irq();
-#endif /* CONFIG_PCI */
-}
-
-/* SPI support */
-
-/* chip select for SPI devices */
-#define SEEPROM1_CS 7 /* PIO7 */
-#define SEEPROM2_CS 0 /* IOC */
-#define SEEPROM3_CS 1 /* IOC */
-#define SRTC_CS 2 /* IOC */
-#define SPI_BUSNO 0
-
-static int __init rbtx4938_ethaddr_init(void)
-{
-#ifdef CONFIG_PCI
- unsigned char dat[17];
- unsigned char sum;
- int i;
-
- /* 0-3: "MAC\0", 4-9:eth0, 10-15:eth1, 16:sum */
- if (spi_eeprom_read(SPI_BUSNO, SEEPROM1_CS, 0, dat, sizeof(dat))) {
- pr_err("seeprom: read error.\n");
- return -ENODEV;
- } else {
- if (strcmp(dat, "MAC") != 0)
- pr_warn("seeprom: bad signature.\n");
- for (i = 0, sum = 0; i < sizeof(dat); i++)
- sum += dat[i];
- if (sum)
- pr_warn("seeprom: bad checksum.\n");
- }
- tx4938_ethaddr_init(&dat[4], &dat[4 + 6]);
-#endif /* CONFIG_PCI */
- return 0;
-}
-
-static void __init rbtx4938_spi_setup(void)
-{
- /* set SPI_SEL */
- txx9_set64(&tx4938_ccfgptr->pcfg, TX4938_PCFG_SPI_SEL);
-}
-
-static struct resource rbtx4938_fpga_resource;
-
-static void __init rbtx4938_time_init(void)
-{
- tx4938_time_init(0);
-}
-
-static void __init rbtx4938_mem_setup(void)
-{
- unsigned long long pcfg;
-
- if (txx9_master_clock == 0)
- txx9_master_clock = 25000000; /* 25MHz */
-
- tx4938_setup();
-
-#ifdef CONFIG_PCI
- txx9_alloc_pci_controller(&txx9_primary_pcic, 0, 0, 0, 0);
- txx9_board_pcibios_setup = tx4927_pcibios_setup;
-#else
- set_io_port_base(RBTX4938_ETHER_BASE);
-#endif
-
- tx4938_sio_init(7372800, 0);
-
-#ifdef CONFIG_TOSHIBA_RBTX4938_MPLEX_PIO58_61
- pr_info("PIOSEL: disabling both ATA and NAND selection\n");
- txx9_clear64(&tx4938_ccfgptr->pcfg,
- TX4938_PCFG_NDF_SEL | TX4938_PCFG_ATA_SEL);
-#endif
-
-#ifdef CONFIG_TOSHIBA_RBTX4938_MPLEX_NAND
- pr_info("PIOSEL: enabling NAND selection\n");
- txx9_set64(&tx4938_ccfgptr->pcfg, TX4938_PCFG_NDF_SEL);
- txx9_clear64(&tx4938_ccfgptr->pcfg, TX4938_PCFG_ATA_SEL);
-#endif
-
-#ifdef CONFIG_TOSHIBA_RBTX4938_MPLEX_ATA
- pr_info("PIOSEL: enabling ATA selection\n");
- txx9_set64(&tx4938_ccfgptr->pcfg, TX4938_PCFG_ATA_SEL);
- txx9_clear64(&tx4938_ccfgptr->pcfg, TX4938_PCFG_NDF_SEL);
-#endif
-
-#ifdef CONFIG_TOSHIBA_RBTX4938_MPLEX_KEEP
- pcfg = ____raw_readq(&tx4938_ccfgptr->pcfg);
- pr_info("PIOSEL: NAND %s, ATA %s\n",
- (pcfg & TX4938_PCFG_NDF_SEL) ? "enabled" : "disabled",
- (pcfg & TX4938_PCFG_ATA_SEL) ? "enabled" : "disabled");
-#endif
-
- rbtx4938_spi_setup();
- pcfg = ____raw_readq(&tx4938_ccfgptr->pcfg); /* updated */
- /* fixup piosel */
- if ((pcfg & (TX4938_PCFG_ATA_SEL | TX4938_PCFG_NDF_SEL)) ==
- TX4938_PCFG_ATA_SEL)
- writeb((readb(rbtx4938_piosel_addr) & 0x03) | 0x04,
- rbtx4938_piosel_addr);
- else if ((pcfg & (TX4938_PCFG_ATA_SEL | TX4938_PCFG_NDF_SEL)) ==
- TX4938_PCFG_NDF_SEL)
- writeb((readb(rbtx4938_piosel_addr) & 0x03) | 0x08,
- rbtx4938_piosel_addr);
- else
- writeb(readb(rbtx4938_piosel_addr) & ~(0x08 | 0x04),
- rbtx4938_piosel_addr);
-
- rbtx4938_fpga_resource.name = "FPGA Registers";
- rbtx4938_fpga_resource.start = CPHYSADDR(RBTX4938_FPGA_REG_ADDR);
- rbtx4938_fpga_resource.end = CPHYSADDR(RBTX4938_FPGA_REG_ADDR) + 0xffff;
- rbtx4938_fpga_resource.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
- if (request_resource(&txx9_ce_res[2], &rbtx4938_fpga_resource))
- pr_err("request resource for fpga failed\n");
-
- _machine_restart = rbtx4938_machine_restart;
-
- writeb(0xff, rbtx4938_led_addr);
- pr_info("RBTX4938 --- FPGA(Rev %02x) DIPSW:%02x,%02x\n",
- readb(rbtx4938_fpga_rev_addr),
- readb(rbtx4938_dipsw_addr), readb(rbtx4938_bdipsw_addr));
-}
-
-static void __init rbtx4938_ne_init(void)
-{
- struct resource res[] = {
- {
- .start = RBTX4938_RTL_8019_BASE,
- .end = RBTX4938_RTL_8019_BASE + 0x20 - 1,
- .flags = IORESOURCE_IO,
- }, {
- .start = RBTX4938_RTL_8019_IRQ,
- .flags = IORESOURCE_IRQ,
- }
- };
- platform_device_register_simple("ne", -1, res, ARRAY_SIZE(res));
-}
-
-static DEFINE_SPINLOCK(rbtx4938_spi_gpio_lock);
-
-static void rbtx4938_spi_gpio_set(struct gpio_chip *chip, unsigned int offset,
- int value)
-{
- u8 val;
- unsigned long flags;
- spin_lock_irqsave(&rbtx4938_spi_gpio_lock, flags);
- val = readb(rbtx4938_spics_addr);
- if (value)
- val |= 1 << offset;
- else
- val &= ~(1 << offset);
- writeb(val, rbtx4938_spics_addr);
- mmiowb();
- spin_unlock_irqrestore(&rbtx4938_spi_gpio_lock, flags);
-}
-
-static int rbtx4938_spi_gpio_dir_out(struct gpio_chip *chip,
- unsigned int offset, int value)
-{
- rbtx4938_spi_gpio_set(chip, offset, value);
- return 0;
-}
-
-static struct gpio_chip rbtx4938_spi_gpio_chip = {
- .set = rbtx4938_spi_gpio_set,
- .direction_output = rbtx4938_spi_gpio_dir_out,
- .label = "RBTX4938-SPICS",
- .base = 16,
- .ngpio = 3,
-};
-
-static int __init rbtx4938_spi_init(void)
-{
- struct spi_board_info srtc_info = {
- .modalias = "rtc-rs5c348",
- .max_speed_hz = 1000000, /* 1.0Mbps @ Vdd 2.0V */
- .bus_num = 0,
- .chip_select = 16 + SRTC_CS,
- /* Mode 1 (High-Active, Shift-Then-Sample), High Avtive CS */
- .mode = SPI_MODE_1 | SPI_CS_HIGH,
- };
- spi_register_board_info(&srtc_info, 1);
- spi_eeprom_register(SPI_BUSNO, SEEPROM1_CS, 128);
- spi_eeprom_register(SPI_BUSNO, 16 + SEEPROM2_CS, 128);
- spi_eeprom_register(SPI_BUSNO, 16 + SEEPROM3_CS, 128);
- gpio_request(16 + SRTC_CS, "rtc-rs5c348");
- gpio_direction_output(16 + SRTC_CS, 0);
- gpio_request(SEEPROM1_CS, "seeprom1");
- gpio_direction_output(SEEPROM1_CS, 1);
- gpio_request(16 + SEEPROM2_CS, "seeprom2");
- gpio_direction_output(16 + SEEPROM2_CS, 1);
- gpio_request(16 + SEEPROM3_CS, "seeprom3");
- gpio_direction_output(16 + SEEPROM3_CS, 1);
- tx4938_spi_init(SPI_BUSNO);
- return 0;
-}
-
-static void __init rbtx4938_mtd_init(void)
-{
- struct physmap_flash_data pdata = {
- .width = 4,
- };
-
- switch (readb(rbtx4938_bdipsw_addr) & 7) {
- case 0:
- /* Boot */
- txx9_physmap_flash_init(0, 0x1fc00000, 0x400000, &pdata);
- /* System */
- txx9_physmap_flash_init(1, 0x1e000000, 0x1000000, &pdata);
- break;
- case 1:
- /* System */
- txx9_physmap_flash_init(0, 0x1f000000, 0x1000000, &pdata);
- /* Boot */
- txx9_physmap_flash_init(1, 0x1ec00000, 0x400000, &pdata);
- break;
- case 2:
- /* Ext */
- txx9_physmap_flash_init(0, 0x1f000000, 0x1000000, &pdata);
- /* System */
- txx9_physmap_flash_init(1, 0x1e000000, 0x1000000, &pdata);
- /* Boot */
- txx9_physmap_flash_init(2, 0x1dc00000, 0x400000, &pdata);
- break;
- case 3:
- /* Boot */
- txx9_physmap_flash_init(1, 0x1bc00000, 0x400000, &pdata);
- /* System */
- txx9_physmap_flash_init(2, 0x1a000000, 0x1000000, &pdata);
- break;
- }
-}
-
-static void __init rbtx4938_arch_init(void)
-{
- txx9_gpio_init(TX4938_PIO_REG & 0xfffffffffULL, 0, TX4938_NUM_PIO);
- gpiochip_add_data(&rbtx4938_spi_gpio_chip, NULL);
- rbtx4938_pci_setup();
- rbtx4938_spi_init();
-}
-
-static void __init rbtx4938_device_init(void)
-{
- rbtx4938_ethaddr_init();
- rbtx4938_ne_init();
- tx4938_wdt_init();
- rbtx4938_mtd_init();
- /* TC58DVM82A1FT: tDH=10ns, tWP=tRP=tREADID=35ns */
- tx4938_ndfmc_init(10, 35);
- tx4938_ata_init(RBTX4938_IRQ_IOC_ATA, 0, 1);
- tx4938_dmac_init(0, 2);
- tx4938_aclc_init();
- platform_device_register_simple("txx9aclc-generic", -1, NULL, 0);
- tx4938_sramc_init();
- txx9_iocled_init(RBTX4938_LED_ADDR - IO_BASE, -1, 8, 1, "green", NULL);
-}
-
-struct txx9_board_vec rbtx4938_vec __initdata = {
- .system = "Toshiba RBTX4938",
- .prom_init = rbtx4938_prom_init,
- .mem_setup = rbtx4938_mem_setup,
- .irq_setup = rbtx4938_irq_setup,
- .time_init = rbtx4938_time_init,
- .device_init = rbtx4938_device_init,
- .arch_init = rbtx4938_arch_init,
-#ifdef CONFIG_PCI
- .pci_map_irq = rbtx4938_pci_map_irq,
-#endif
-};
diff --git a/arch/mips/txx9/rbtx4939/Makefile b/arch/mips/txx9/rbtx4939/Makefile
deleted file mode 100644
index 840496e7a76e..000000000000
--- a/arch/mips/txx9/rbtx4939/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-obj-y += irq.o setup.o prom.o
diff --git a/arch/mips/txx9/rbtx4939/irq.c b/arch/mips/txx9/rbtx4939/irq.c
deleted file mode 100644
index 69a80616f0c9..000000000000
--- a/arch/mips/txx9/rbtx4939/irq.c
+++ /dev/null
@@ -1,95 +0,0 @@
-/*
- * Toshiba RBTX4939 interrupt routines
- * Based on linux/arch/mips/txx9/rbtx4938/irq.c,
- * and RBTX49xx patch from CELF patch archive.
- *
- * Copyright (C) 2000-2001,2005-2006 Toshiba Corporation
- * 2003-2005 (c) MontaVista Software, Inc. This file is licensed under the
- * terms of the GNU General Public License version 2. This program is
- * licensed "as is" without any warranty of any kind, whether express
- * or implied.
- */
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <asm/mipsregs.h>
-#include <asm/txx9/rbtx4939.h>
-
-/*
- * RBTX4939 IOC controller definition
- */
-
-static void rbtx4939_ioc_irq_unmask(struct irq_data *d)
-{
- int ioc_nr = d->irq - RBTX4939_IRQ_IOC;
-
- writeb(readb(rbtx4939_ien_addr) | (1 << ioc_nr), rbtx4939_ien_addr);
-}
-
-static void rbtx4939_ioc_irq_mask(struct irq_data *d)
-{
- int ioc_nr = d->irq - RBTX4939_IRQ_IOC;
-
- writeb(readb(rbtx4939_ien_addr) & ~(1 << ioc_nr), rbtx4939_ien_addr);
- mmiowb();
-}
-
-static struct irq_chip rbtx4939_ioc_irq_chip = {
- .name = "IOC",
- .irq_mask = rbtx4939_ioc_irq_mask,
- .irq_unmask = rbtx4939_ioc_irq_unmask,
-};
-
-
-static inline int rbtx4939_ioc_irqroute(void)
-{
- unsigned char istat = readb(rbtx4939_ifac2_addr);
-
- if (unlikely(istat == 0))
- return -1;
- return RBTX4939_IRQ_IOC + __fls8(istat);
-}
-
-static int rbtx4939_irq_dispatch(int pending)
-{
- int irq;
-
- if (pending & CAUSEF_IP7)
- return MIPS_CPU_IRQ_BASE + 7;
- irq = tx4939_irq();
- if (likely(irq >= 0)) {
- /* redirect IOC interrupts */
- switch (irq) {
- case RBTX4939_IRQ_IOCINT:
- irq = rbtx4939_ioc_irqroute();
- break;
- }
- } else if (pending & CAUSEF_IP0)
- irq = MIPS_CPU_IRQ_BASE + 0;
- else if (pending & CAUSEF_IP1)
- irq = MIPS_CPU_IRQ_BASE + 1;
- else
- irq = -1;
- return irq;
-}
-
-void __init rbtx4939_irq_setup(void)
-{
- int i;
-
- /* mask all IOC interrupts */
- writeb(0, rbtx4939_ien_addr);
-
- /* clear SoftInt interrupts */
- writeb(0, rbtx4939_softint_addr);
-
- txx9_irq_dispatch = rbtx4939_irq_dispatch;
-
- tx4939_irq_init();
- for (i = RBTX4939_IRQ_IOC;
- i < RBTX4939_IRQ_IOC + RBTX4939_NR_IRQ_IOC; i++)
- irq_set_chip_and_handler(i, &rbtx4939_ioc_irq_chip,
- handle_level_irq);
-
- irq_set_chained_handler(RBTX4939_IRQ_IOCINT, handle_simple_irq);
-}
diff --git a/arch/mips/txx9/rbtx4939/prom.c b/arch/mips/txx9/rbtx4939/prom.c
deleted file mode 100644
index ba25ba1bd2ec..000000000000
--- a/arch/mips/txx9/rbtx4939/prom.c
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * rbtx4939 specific prom routines
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
-#include <linux/init.h>
-#include <linux/memblock.h>
-#include <asm/txx9/generic.h>
-#include <asm/txx9/rbtx4939.h>
-
-void __init rbtx4939_prom_init(void)
-{
- unsigned long start, size;
- u64 win;
- int i;
-
- for (i = 0; i < 4; i++) {
- if (!((__u32)____raw_readq(&tx4939_ddrcptr->winen) & (1 << i)))
- continue;
- win = ____raw_readq(&tx4939_ddrcptr->win[i]);
- start = (unsigned long)(win >> 48);
- size = (((unsigned long)(win >> 32) & 0xffff) + 1) - start;
- memblock_add(start << 20, size << 20);
- }
- txx9_sio_putchar_init(TX4939_SIO_REG(0) & 0xfffffffffULL);
-}
diff --git a/arch/mips/txx9/rbtx4939/setup.c b/arch/mips/txx9/rbtx4939/setup.c
deleted file mode 100644
index ef29a9c2ffd6..000000000000
--- a/arch/mips/txx9/rbtx4939/setup.c
+++ /dev/null
@@ -1,554 +0,0 @@
-/*
- * Toshiba RBTX4939 setup routines.
- * Based on linux/arch/mips/txx9/rbtx4938/setup.c,
- * and RBTX49xx patch from CELF patch archive.
- *
- * Copyright (C) 2000-2001,2005-2007 Toshiba Corporation
- * 2003-2005 (c) MontaVista Software, Inc. This file is licensed under the
- * terms of the GNU General Public License version 2. This program is
- * licensed "as is" without any warranty of any kind, whether express
- * or implied.
- */
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/slab.h>
-#include <linux/export.h>
-#include <linux/platform_device.h>
-#include <linux/leds.h>
-#include <linux/interrupt.h>
-#include <linux/smc91x.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/partitions.h>
-#include <linux/mtd/map.h>
-#include <asm/reboot.h>
-#include <asm/txx9/generic.h>
-#include <asm/txx9/pci.h>
-#include <asm/txx9/rbtx4939.h>
-
-static void rbtx4939_machine_restart(char *command)
-{
- local_irq_disable();
- writeb(1, rbtx4939_reseten_addr);
- writeb(1, rbtx4939_softreset_addr);
- while (1)
- ;
-}
-
-static void __init rbtx4939_time_init(void)
-{
- tx4939_time_init(0);
-}
-
-#if defined(__BIG_ENDIAN) && IS_ENABLED(CONFIG_SMC91X)
-#define HAVE_RBTX4939_IOSWAB
-#define IS_CE1_ADDR(addr) \
- ((((unsigned long)(addr) - IO_BASE) & 0xfff00000) == TXX9_CE(1))
-static u16 rbtx4939_ioswabw(volatile u16 *a, u16 x)
-{
- return IS_CE1_ADDR(a) ? x : le16_to_cpu(x);
-}
-static u16 rbtx4939_mem_ioswabw(volatile u16 *a, u16 x)
-{
- return !IS_CE1_ADDR(a) ? x : le16_to_cpu(x);
-}
-#endif /* __BIG_ENDIAN && CONFIG_SMC91X */
-
-static void __init rbtx4939_pci_setup(void)
-{
-#ifdef CONFIG_PCI
- int extarb = !(__raw_readq(&tx4939_ccfgptr->ccfg) & TX4939_CCFG_PCIARB);
- struct pci_controller *c = &txx9_primary_pcic;
-
- register_pci_controller(c);
-
- tx4939_report_pciclk();
- tx4927_pcic_setup(tx4939_pcicptr, c, extarb);
- if (!(__raw_readq(&tx4939_ccfgptr->pcfg) & TX4939_PCFG_ATA1MODE) &&
- (__raw_readq(&tx4939_ccfgptr->pcfg) &
- (TX4939_PCFG_ET0MODE | TX4939_PCFG_ET1MODE))) {
- tx4939_report_pci1clk();
-
- /* mem:64K(max), io:64K(max) (enough for ETH0,ETH1) */
- c = txx9_alloc_pci_controller(NULL, 0, 0x10000, 0, 0x10000);
- register_pci_controller(c);
- tx4927_pcic_setup(tx4939_pcic1ptr, c, 0);
- }
-
- tx4939_setup_pcierr_irq();
-#endif /* CONFIG_PCI */
-}
-
-static unsigned long long default_ebccr[] __initdata = {
- 0x01c0000000007608ULL, /* 64M ROM */
- 0x017f000000007049ULL, /* 1M IOC */
- 0x0180000000408608ULL, /* ISA */
- 0,
-};
-
-static void __init rbtx4939_ebusc_setup(void)
-{
- int i;
- unsigned int sp;
-
- /* use user-configured speed */
- sp = TX4939_EBUSC_CR(0) & 0x30;
- default_ebccr[0] |= sp;
- default_ebccr[1] |= sp;
- default_ebccr[2] |= sp;
- /* initialise by myself */
- for (i = 0; i < ARRAY_SIZE(default_ebccr); i++) {
- if (default_ebccr[i])
- ____raw_writeq(default_ebccr[i],
- &tx4939_ebuscptr->cr[i]);
- else
- ____raw_writeq(____raw_readq(&tx4939_ebuscptr->cr[i])
- & ~8,
- &tx4939_ebuscptr->cr[i]);
- }
-}
-
-static void __init rbtx4939_update_ioc_pen(void)
-{
- __u64 pcfg = ____raw_readq(&tx4939_ccfgptr->pcfg);
- __u64 ccfg = ____raw_readq(&tx4939_ccfgptr->ccfg);
- __u8 pe1 = readb(rbtx4939_pe1_addr);
- __u8 pe2 = readb(rbtx4939_pe2_addr);
- __u8 pe3 = readb(rbtx4939_pe3_addr);
- if (pcfg & TX4939_PCFG_ATA0MODE)
- pe1 |= RBTX4939_PE1_ATA(0);
- else
- pe1 &= ~RBTX4939_PE1_ATA(0);
- if (pcfg & TX4939_PCFG_ATA1MODE) {
- pe1 |= RBTX4939_PE1_ATA(1);
- pe1 &= ~(RBTX4939_PE1_RMII(0) | RBTX4939_PE1_RMII(1));
- } else {
- pe1 &= ~RBTX4939_PE1_ATA(1);
- if (pcfg & TX4939_PCFG_ET0MODE)
- pe1 |= RBTX4939_PE1_RMII(0);
- else
- pe1 &= ~RBTX4939_PE1_RMII(0);
- if (pcfg & TX4939_PCFG_ET1MODE)
- pe1 |= RBTX4939_PE1_RMII(1);
- else
- pe1 &= ~RBTX4939_PE1_RMII(1);
- }
- if (ccfg & TX4939_CCFG_PTSEL)
- pe3 &= ~(RBTX4939_PE3_VP | RBTX4939_PE3_VP_P |
- RBTX4939_PE3_VP_S);
- else {
- __u64 vmode = pcfg &
- (TX4939_PCFG_VSSMODE | TX4939_PCFG_VPSMODE);
- if (vmode == 0)
- pe3 &= ~(RBTX4939_PE3_VP | RBTX4939_PE3_VP_P |
- RBTX4939_PE3_VP_S);
- else if (vmode == TX4939_PCFG_VPSMODE) {
- pe3 |= RBTX4939_PE3_VP_P;
- pe3 &= ~(RBTX4939_PE3_VP | RBTX4939_PE3_VP_S);
- } else if (vmode == TX4939_PCFG_VSSMODE) {
- pe3 |= RBTX4939_PE3_VP | RBTX4939_PE3_VP_S;
- pe3 &= ~RBTX4939_PE3_VP_P;
- } else {
- pe3 |= RBTX4939_PE3_VP | RBTX4939_PE3_VP_P;
- pe3 &= ~RBTX4939_PE3_VP_S;
- }
- }
- if (pcfg & TX4939_PCFG_SPIMODE) {
- if (pcfg & TX4939_PCFG_SIO2MODE_GPIO)
- pe2 &= ~(RBTX4939_PE2_SIO2 | RBTX4939_PE2_SIO0);
- else {
- if (pcfg & TX4939_PCFG_SIO2MODE_SIO2) {
- pe2 |= RBTX4939_PE2_SIO2;
- pe2 &= ~RBTX4939_PE2_SIO0;
- } else {
- pe2 |= RBTX4939_PE2_SIO0;
- pe2 &= ~RBTX4939_PE2_SIO2;
- }
- }
- if (pcfg & TX4939_PCFG_SIO3MODE)
- pe2 |= RBTX4939_PE2_SIO3;
- else
- pe2 &= ~RBTX4939_PE2_SIO3;
- pe2 &= ~RBTX4939_PE2_SPI;
- } else {
- pe2 |= RBTX4939_PE2_SPI;
- pe2 &= ~(RBTX4939_PE2_SIO3 | RBTX4939_PE2_SIO2 |
- RBTX4939_PE2_SIO0);
- }
- if ((pcfg & TX4939_PCFG_I2SMODE_MASK) == TX4939_PCFG_I2SMODE_GPIO)
- pe2 |= RBTX4939_PE2_GPIO;
- else
- pe2 &= ~RBTX4939_PE2_GPIO;
- writeb(pe1, rbtx4939_pe1_addr);
- writeb(pe2, rbtx4939_pe2_addr);
- writeb(pe3, rbtx4939_pe3_addr);
-}
-
-#define RBTX4939_MAX_7SEGLEDS 8
-
-#if IS_BUILTIN(CONFIG_LEDS_CLASS)
-static u8 led_val[RBTX4939_MAX_7SEGLEDS];
-struct rbtx4939_led_data {
- struct led_classdev cdev;
- char name[32];
- unsigned int num;
-};
-
-/* Use "dot" in 7seg LEDs */
-static void rbtx4939_led_brightness_set(struct led_classdev *led_cdev,
- enum led_brightness value)
-{
- struct rbtx4939_led_data *led_dat =
- container_of(led_cdev, struct rbtx4939_led_data, cdev);
- unsigned int num = led_dat->num;
- unsigned long flags;
-
- local_irq_save(flags);
- led_val[num] = (led_val[num] & 0x7f) | (value ? 0x80 : 0);
- writeb(led_val[num], rbtx4939_7seg_addr(num / 4, num % 4));
- local_irq_restore(flags);
-}
-
-static int __init rbtx4939_led_probe(struct platform_device *pdev)
-{
- struct rbtx4939_led_data *leds_data;
- int i;
- static char *default_triggers[] __initdata = {
- "heartbeat",
- "disk-activity",
- "nand-disk",
- };
-
- leds_data = kcalloc(RBTX4939_MAX_7SEGLEDS, sizeof(*leds_data),
- GFP_KERNEL);
- if (!leds_data)
- return -ENOMEM;
- for (i = 0; i < RBTX4939_MAX_7SEGLEDS; i++) {
- int rc;
- struct rbtx4939_led_data *led_dat = &leds_data[i];
-
- led_dat->num = i;
- led_dat->cdev.brightness_set = rbtx4939_led_brightness_set;
- sprintf(led_dat->name, "rbtx4939:amber:%u", i);
- led_dat->cdev.name = led_dat->name;
- if (i < ARRAY_SIZE(default_triggers))
- led_dat->cdev.default_trigger = default_triggers[i];
- rc = led_classdev_register(&pdev->dev, &led_dat->cdev);
- if (rc < 0)
- return rc;
- led_dat->cdev.brightness_set(&led_dat->cdev, 0);
- }
- return 0;
-
-}
-
-static struct platform_driver rbtx4939_led_driver = {
- .driver = {
- .name = "rbtx4939-led",
- },
-};
-
-static void __init rbtx4939_led_setup(void)
-{
- platform_device_register_simple("rbtx4939-led", -1, NULL, 0);
- platform_driver_probe(&rbtx4939_led_driver, rbtx4939_led_probe);
-}
-#else
-static inline void rbtx4939_led_setup(void)
-{
-}
-#endif
-
-static void __rbtx4939_7segled_putc(unsigned int pos, unsigned char val)
-{
-#if IS_BUILTIN(CONFIG_LEDS_CLASS)
- unsigned long flags;
- local_irq_save(flags);
- /* bit7: reserved for LED class */
- led_val[pos] = (led_val[pos] & 0x80) | (val & 0x7f);
- val = led_val[pos];
- local_irq_restore(flags);
-#endif
- writeb(val, rbtx4939_7seg_addr(pos / 4, pos % 4));
-}
-
-static void rbtx4939_7segled_putc(unsigned int pos, unsigned char val)
-{
- /* convert from map_to_seg7() notation */
- val = (val & 0x88) |
- ((val & 0x40) >> 6) |
- ((val & 0x20) >> 4) |
- ((val & 0x10) >> 2) |
- ((val & 0x04) << 2) |
- ((val & 0x02) << 4) |
- ((val & 0x01) << 6);
- __rbtx4939_7segled_putc(pos, val);
-}
-
-#if IS_ENABLED(CONFIG_MTD_RBTX4939)
-/* special mapping for boot rom */
-static unsigned long rbtx4939_flash_fixup_ofs(unsigned long ofs)
-{
- u8 bdipsw = readb(rbtx4939_bdipsw_addr) & 0x0f;
- unsigned char shift;
-
- if (bdipsw & 8) {
- /* BOOT Mode: USER ROM1 / USER ROM2 */
- shift = bdipsw & 3;
- /* rotate A[23:22] */
- return (ofs & ~0xc00000) | ((((ofs >> 22) + shift) & 3) << 22);
- }
-#ifdef __BIG_ENDIAN
- if (bdipsw == 0)
- /* BOOT Mode: Monitor ROM */
- ofs ^= 0x400000; /* swap A[22] */
-#endif
- return ofs;
-}
-
-static map_word rbtx4939_flash_read16(struct map_info *map, unsigned long ofs)
-{
- map_word r;
-
- ofs = rbtx4939_flash_fixup_ofs(ofs);
- r.x[0] = __raw_readw(map->virt + ofs);
- return r;
-}
-
-static void rbtx4939_flash_write16(struct map_info *map, const map_word datum,
- unsigned long ofs)
-{
- ofs = rbtx4939_flash_fixup_ofs(ofs);
- __raw_writew(datum.x[0], map->virt + ofs);
- mb(); /* see inline_map_write() in mtd/map.h */
-}
-
-static void rbtx4939_flash_copy_from(struct map_info *map, void *to,
- unsigned long from, ssize_t len)
-{
- u8 bdipsw = readb(rbtx4939_bdipsw_addr) & 0x0f;
- unsigned char shift;
- ssize_t curlen;
-
- from += (unsigned long)map->virt;
- if (bdipsw & 8) {
- /* BOOT Mode: USER ROM1 / USER ROM2 */
- shift = bdipsw & 3;
- while (len) {
- curlen = min_t(unsigned long, len,
- 0x400000 - (from & (0x400000 - 1)));
- memcpy(to,
- (void *)((from & ~0xc00000) |
- ((((from >> 22) + shift) & 3) << 22)),
- curlen);
- len -= curlen;
- from += curlen;
- to += curlen;
- }
- return;
- }
-#ifdef __BIG_ENDIAN
- if (bdipsw == 0) {
- /* BOOT Mode: Monitor ROM */
- while (len) {
- curlen = min_t(unsigned long, len,
- 0x400000 - (from & (0x400000 - 1)));
- memcpy(to, (void *)(from ^ 0x400000), curlen);
- len -= curlen;
- from += curlen;
- to += curlen;
- }
- return;
- }
-#endif
- memcpy(to, (void *)from, len);
-}
-
-static void rbtx4939_flash_map_init(struct map_info *map)
-{
- map->read = rbtx4939_flash_read16;
- map->write = rbtx4939_flash_write16;
- map->copy_from = rbtx4939_flash_copy_from;
-}
-
-static void __init rbtx4939_mtd_init(void)
-{
- static struct {
- struct platform_device dev;
- struct resource res;
- struct rbtx4939_flash_data data;
- } pdevs[4];
- int i;
- static char names[4][8];
- static struct mtd_partition parts[4];
- struct rbtx4939_flash_data *boot_pdata = &pdevs[0].data;
- u8 bdipsw = readb(rbtx4939_bdipsw_addr) & 0x0f;
-
- if (bdipsw & 8) {
- /* BOOT Mode: USER ROM1 / USER ROM2 */
- boot_pdata->nr_parts = 4;
- for (i = 0; i < boot_pdata->nr_parts; i++) {
- sprintf(names[i], "img%d", 4 - i);
- parts[i].name = names[i];
- parts[i].size = 0x400000;
- parts[i].offset = MTDPART_OFS_NXTBLK;
- }
- } else if (bdipsw == 0) {
- /* BOOT Mode: Monitor ROM */
- boot_pdata->nr_parts = 2;
- strcpy(names[0], "big");
- strcpy(names[1], "little");
- for (i = 0; i < boot_pdata->nr_parts; i++) {
- parts[i].name = names[i];
- parts[i].size = 0x400000;
- parts[i].offset = MTDPART_OFS_NXTBLK;
- }
- } else {
- /* BOOT Mode: ROM Emulator */
- boot_pdata->nr_parts = 2;
- parts[0].name = "boot";
- parts[0].offset = 0xc00000;
- parts[0].size = 0x400000;
- parts[1].name = "user";
- parts[1].offset = 0;
- parts[1].size = 0xc00000;
- }
- boot_pdata->parts = parts;
- boot_pdata->map_init = rbtx4939_flash_map_init;
-
- for (i = 0; i < ARRAY_SIZE(pdevs); i++) {
- struct resource *r = &pdevs[i].res;
- struct platform_device *dev = &pdevs[i].dev;
-
- r->start = 0x1f000000 - i * 0x1000000;
- r->end = r->start + 0x1000000 - 1;
- r->flags = IORESOURCE_MEM;
- pdevs[i].data.width = 2;
- dev->num_resources = 1;
- dev->resource = r;
- dev->id = i;
- dev->name = "rbtx4939-flash";
- dev->dev.platform_data = &pdevs[i].data;
- platform_device_register(dev);
- }
-}
-#else
-static void __init rbtx4939_mtd_init(void)
-{
-}
-#endif
-
-static void __init rbtx4939_arch_init(void)
-{
- rbtx4939_pci_setup();
-}
-
-static void __init rbtx4939_device_init(void)
-{
- unsigned long smc_addr = RBTX4939_ETHER_ADDR - IO_BASE;
- struct resource smc_res[] = {
- {
- .start = smc_addr,
- .end = smc_addr + 0x10 - 1,
- .flags = IORESOURCE_MEM,
- }, {
- .start = RBTX4939_IRQ_ETHER,
- /* override default irq flag defined in smc91x.h */
- .flags = IORESOURCE_IRQ | IRQF_TRIGGER_LOW,
- },
- };
- struct smc91x_platdata smc_pdata = {
- .flags = SMC91X_USE_16BIT,
- };
- struct platform_device *pdev;
-#if IS_ENABLED(CONFIG_TC35815)
- int i, j;
- unsigned char ethaddr[2][6];
- u8 bdipsw = readb(rbtx4939_bdipsw_addr) & 0x0f;
-
- for (i = 0; i < 2; i++) {
- unsigned long area = CKSEG1 + 0x1fff0000 + (i * 0x10);
- if (bdipsw == 0)
- memcpy(ethaddr[i], (void *)area, 6);
- else {
- u16 buf[3];
- if (bdipsw & 8)
- area -= 0x03000000;
- else
- area -= 0x01000000;
- for (j = 0; j < 3; j++)
- buf[j] = le16_to_cpup((u16 *)(area + j * 2));
- memcpy(ethaddr[i], buf, 6);
- }
- }
- tx4939_ethaddr_init(ethaddr[0], ethaddr[1]);
-#endif
- pdev = platform_device_alloc("smc91x", -1);
- if (!pdev ||
- platform_device_add_resources(pdev, smc_res, ARRAY_SIZE(smc_res)) ||
- platform_device_add_data(pdev, &smc_pdata, sizeof(smc_pdata)) ||
- platform_device_add(pdev))
- platform_device_put(pdev);
- rbtx4939_mtd_init();
- /* TC58DVM82A1FT: tDH=10ns, tWP=tRP=tREADID=35ns */
- tx4939_ndfmc_init(10, 35,
- (1 << 1) | (1 << 2),
- (1 << 2)); /* ch1:8bit, ch2:16bit */
- rbtx4939_led_setup();
- tx4939_wdt_init();
- tx4939_ata_init();
- tx4939_rtc_init();
- tx4939_dmac_init(0, 2);
- tx4939_aclc_init();
- platform_device_register_simple("txx9aclc-generic", -1, NULL, 0);
- tx4939_sramc_init();
- tx4939_rng_init();
-}
-
-static void __init rbtx4939_setup(void)
-{
- int i;
-
- rbtx4939_ebusc_setup();
- /* always enable ATA0 */
- txx9_set64(&tx4939_ccfgptr->pcfg, TX4939_PCFG_ATA0MODE);
- if (txx9_master_clock == 0)
- txx9_master_clock = 20000000;
- tx4939_setup();
- rbtx4939_update_ioc_pen();
-#ifdef HAVE_RBTX4939_IOSWAB
- ioswabw = rbtx4939_ioswabw;
- __mem_ioswabw = rbtx4939_mem_ioswabw;
-#endif
-
- _machine_restart = rbtx4939_machine_restart;
-
- txx9_7segled_init(RBTX4939_MAX_7SEGLEDS, rbtx4939_7segled_putc);
- for (i = 0; i < RBTX4939_MAX_7SEGLEDS; i++)
- txx9_7segled_putc(i, '-');
- pr_info("RBTX4939 (Rev %02x) --- FPGA(Rev %02x) DIPSW:%02x,%02x\n",
- readb(rbtx4939_board_rev_addr), readb(rbtx4939_ioc_rev_addr),
- readb(rbtx4939_udipsw_addr), readb(rbtx4939_bdipsw_addr));
-
-#ifdef CONFIG_PCI
- txx9_alloc_pci_controller(&txx9_primary_pcic, 0, 0, 0, 0);
- txx9_board_pcibios_setup = tx4927_pcibios_setup;
-#else
- set_io_port_base(RBTX4939_ETHER_BASE);
-#endif
-
- tx4939_sio_init(TX4939_SCLK0(txx9_master_clock), 0);
-}
-
-struct txx9_board_vec rbtx4939_vec __initdata = {
- .system = "Toshiba RBTX4939",
- .prom_init = rbtx4939_prom_init,
- .mem_setup = rbtx4939_setup,
- .irq_setup = rbtx4939_irq_setup,
- .time_init = rbtx4939_time_init,
- .device_init = rbtx4939_device_init,
- .arch_init = rbtx4939_arch_init,
-#ifdef CONFIG_PCI
- .pci_map_irq = tx4939_pci_map_irq,
-#endif
-};
diff --git a/arch/nds32/Makefile b/arch/nds32/Makefile
index 797ad9b450af..b33d5d81b6ae 100644
--- a/arch/nds32/Makefile
+++ b/arch/nds32/Makefile
@@ -31,12 +31,6 @@ core-y += arch/nds32/kernel/ arch/nds32/mm/
core-$(CONFIG_FPU) += arch/nds32/math-emu/
libs-y += arch/nds32/lib/
-ifneq '$(CONFIG_NDS32_BUILTIN_DTB)' '""'
-BUILTIN_DTB := y
-else
-BUILTIN_DTB := n
-endif
-
ifdef CONFIG_CPU_LITTLE_ENDIAN
KBUILD_CFLAGS += $(call cc-option, -EL)
KBUILD_AFLAGS += $(call cc-option, -EL)
diff --git a/arch/nds32/boot/dts/Makefile b/arch/nds32/boot/dts/Makefile
index f84bd529b6fd..4fc69562eae8 100644
--- a/arch/nds32/boot/dts/Makefile
+++ b/arch/nds32/boot/dts/Makefile
@@ -1,7 +1,2 @@
# SPDX-License-Identifier: GPL-2.0-only
-ifneq '$(CONFIG_NDS32_BUILTIN_DTB)' '""'
-BUILTIN_DTB := $(patsubst "%",%,$(CONFIG_NDS32_BUILTIN_DTB)).dtb.o
-else
-BUILTIN_DTB :=
-endif
-obj-$(CONFIG_OF) += $(BUILTIN_DTB)
+obj-$(CONFIG_OF) += $(addsuffix .dtb.o, $(CONFIG_NDS32_BUILTIN_DTB))
diff --git a/arch/nds32/kernel/fpu.c b/arch/nds32/kernel/fpu.c
index 9edd7ed7d7bf..701c09a668de 100644
--- a/arch/nds32/kernel/fpu.c
+++ b/arch/nds32/kernel/fpu.c
@@ -223,7 +223,7 @@ inline void handle_fpu_exception(struct pt_regs *regs)
}
} else if (fpcsr & FPCSR_mskRIT) {
if (!user_mode(regs))
- do_exit(SIGILL);
+ make_task_dead(SIGILL);
si_signo = SIGILL;
}
diff --git a/arch/nds32/kernel/perf_event_cpu.c b/arch/nds32/kernel/perf_event_cpu.c
index 0ce6f9f307e6..a78a879e7ef1 100644
--- a/arch/nds32/kernel/perf_event_cpu.c
+++ b/arch/nds32/kernel/perf_event_cpu.c
@@ -1371,11 +1371,6 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry,
leaf_fp = 0;
- if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
- /* We don't support guest os callchain now */
- return;
- }
-
perf_callchain_store(entry, regs->ipc);
fp = regs->fp;
gp = regs->gp;
@@ -1481,10 +1476,6 @@ perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
{
struct stackframe fr;
- if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
- /* We don't support guest os callchain now */
- return;
- }
fr.fp = regs->fp;
fr.lp = regs->lp;
fr.sp = regs->sp;
@@ -1493,10 +1484,6 @@ perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
unsigned long perf_instruction_pointer(struct pt_regs *regs)
{
- /* However, NDS32 does not support virtualization */
- if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
- return perf_guest_cbs->get_guest_ip();
-
return instruction_pointer(regs);
}
@@ -1504,18 +1491,10 @@ unsigned long perf_misc_flags(struct pt_regs *regs)
{
int misc = 0;
- /* However, NDS32 does not support virtualization */
- if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
- if (perf_guest_cbs->is_user_mode())
- misc |= PERF_RECORD_MISC_GUEST_USER;
- else
- misc |= PERF_RECORD_MISC_GUEST_KERNEL;
- } else {
- if (user_mode(regs))
- misc |= PERF_RECORD_MISC_USER;
- else
- misc |= PERF_RECORD_MISC_KERNEL;
- }
+ if (user_mode(regs))
+ misc |= PERF_RECORD_MISC_USER;
+ else
+ misc |= PERF_RECORD_MISC_KERNEL;
return misc;
}
diff --git a/arch/nds32/kernel/traps.c b/arch/nds32/kernel/traps.c
index ca75d475eda4..c0a8f3344fb9 100644
--- a/arch/nds32/kernel/traps.c
+++ b/arch/nds32/kernel/traps.c
@@ -141,7 +141,7 @@ void __noreturn die(const char *str, struct pt_regs *regs, int err)
bust_spinlocks(0);
spin_unlock_irq(&die_lock);
- do_exit(SIGSEGV);
+ make_task_dead(SIGSEGV);
}
EXPORT_SYMBOL(die);
@@ -240,7 +240,7 @@ void unhandled_interruption(struct pt_regs *regs)
pr_emerg("unhandled_interruption\n");
show_regs(regs);
if (!user_mode(regs))
- do_exit(SIGKILL);
+ make_task_dead(SIGKILL);
force_sig(SIGKILL);
}
@@ -251,7 +251,7 @@ void unhandled_exceptions(unsigned long entry, unsigned long addr,
addr, type);
show_regs(regs);
if (!user_mode(regs))
- do_exit(SIGKILL);
+ make_task_dead(SIGKILL);
force_sig(SIGKILL);
}
@@ -278,7 +278,7 @@ void do_revinsn(struct pt_regs *regs)
pr_emerg("Reserved Instruction\n");
show_regs(regs);
if (!user_mode(regs))
- do_exit(SIGILL);
+ make_task_dead(SIGILL);
force_sig(SIGILL);
}
diff --git a/arch/nds32/mm/fault.c b/arch/nds32/mm/fault.c
index 1d139b117168..636977a1c8b9 100644
--- a/arch/nds32/mm/fault.c
+++ b/arch/nds32/mm/fault.c
@@ -230,16 +230,14 @@ good_area:
goto bad_area;
}
- if (flags & FAULT_FLAG_ALLOW_RETRY) {
- if (fault & VM_FAULT_RETRY) {
- flags |= FAULT_FLAG_TRIED;
-
- /* No need to mmap_read_unlock(mm) as we would
- * have already released it in __lock_page_or_retry
- * in mm/filemap.c.
- */
- goto retry;
- }
+ if (fault & VM_FAULT_RETRY) {
+ flags |= FAULT_FLAG_TRIED;
+
+ /* No need to mmap_read_unlock(mm) as we would
+ * have already released it in __lock_page_or_retry
+ * in mm/filemap.c.
+ */
+ goto retry;
}
mmap_read_unlock(mm);
diff --git a/arch/nios2/boot/dts/Makefile b/arch/nios2/boot/dts/Makefile
index a91a0b09be63..e9e31bb40df8 100644
--- a/arch/nios2/boot/dts/Makefile
+++ b/arch/nios2/boot/dts/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
-obj-y := $(patsubst "%.dts",%.dtb.o,$(CONFIG_NIOS2_DTB_SOURCE))
+obj-y := $(patsubst %.dts,%.dtb.o,$(CONFIG_NIOS2_DTB_SOURCE))
dtstree := $(srctree)/$(src)
dtb-$(CONFIG_OF_ALL_DTBS) := $(patsubst $(dtstree)/%.dts,%.dtb, $(wildcard $(dtstree)/*.dts))
diff --git a/arch/nios2/kernel/traps.c b/arch/nios2/kernel/traps.c
index 596986a74a26..85ac49d64cf7 100644
--- a/arch/nios2/kernel/traps.c
+++ b/arch/nios2/kernel/traps.c
@@ -37,10 +37,10 @@ void die(const char *str, struct pt_regs *regs, long err)
show_regs(regs);
spin_unlock_irq(&die_lock);
/*
- * do_exit() should take care of panic'ing from an interrupt
+ * make_task_dead() should take care of panic'ing from an interrupt
* context so we don't handle it here
*/
- do_exit(err);
+ make_task_dead(err);
}
void _exception(int signo, struct pt_regs *regs, int code, unsigned long addr)
diff --git a/arch/nios2/mm/fault.c b/arch/nios2/mm/fault.c
index 9476feecf512..a32f14cd72f2 100644
--- a/arch/nios2/mm/fault.c
+++ b/arch/nios2/mm/fault.c
@@ -149,18 +149,16 @@ good_area:
BUG();
}
- if (flags & FAULT_FLAG_ALLOW_RETRY) {
- if (fault & VM_FAULT_RETRY) {
- flags |= FAULT_FLAG_TRIED;
+ if (fault & VM_FAULT_RETRY) {
+ flags |= FAULT_FLAG_TRIED;
- /*
- * No need to mmap_read_unlock(mm) as we would
- * have already released it in __lock_page_or_retry
- * in mm/filemap.c.
- */
+ /*
+ * No need to mmap_read_unlock(mm) as we would
+ * have already released it in __lock_page_or_retry
+ * in mm/filemap.c.
+ */
- goto retry;
- }
+ goto retry;
}
mmap_read_unlock(mm);
diff --git a/arch/openrisc/Kconfig b/arch/openrisc/Kconfig
index c2491b295d60..f724b3f1aeed 100644
--- a/arch/openrisc/Kconfig
+++ b/arch/openrisc/Kconfig
@@ -10,6 +10,7 @@ config OPENRISC
select ARCH_HAS_DMA_SET_UNCACHED
select ARCH_HAS_DMA_CLEAR_UNCACHED
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
+ select COMMON_CLK
select OF
select OF_EARLY_FLATTREE
select IRQ_DOMAIN
diff --git a/arch/openrisc/boot/dts/Makefile b/arch/openrisc/boot/dts/Makefile
index 17dd791a833f..13db5a2aab52 100644
--- a/arch/openrisc/boot/dts/Makefile
+++ b/arch/openrisc/boot/dts/Makefile
@@ -1,9 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
-ifneq '$(CONFIG_OPENRISC_BUILTIN_DTB)' '""'
-BUILTIN_DTB := $(patsubst "%",%,$(CONFIG_OPENRISC_BUILTIN_DTB)).dtb.o
-else
-BUILTIN_DTB :=
-endif
-obj-y += $(BUILTIN_DTB)
+obj-y += $(addsuffix .dtb.o, $(CONFIG_OPENRISC_BUILTIN_DTB))
#DTC_FLAGS ?= -p 1024
diff --git a/arch/openrisc/include/asm/bitops.h b/arch/openrisc/include/asm/bitops.h
index 7f1ca35213d8..d773ed938acb 100644
--- a/arch/openrisc/include/asm/bitops.h
+++ b/arch/openrisc/include/asm/bitops.h
@@ -30,7 +30,6 @@
#include <asm/bitops/fls.h>
#include <asm/bitops/__fls.h>
#include <asm-generic/bitops/fls64.h>
-#include <asm-generic/bitops/find.h>
#ifndef _LINUX_BITOPS_H
#error only <linux/bitops.h> can be included directly
diff --git a/arch/openrisc/include/asm/syscalls.h b/arch/openrisc/include/asm/syscalls.h
index 3a7eeae6f56a..aa1c7e98722e 100644
--- a/arch/openrisc/include/asm/syscalls.h
+++ b/arch/openrisc/include/asm/syscalls.h
@@ -22,9 +22,11 @@ asmlinkage long sys_or1k_atomic(unsigned long type, unsigned long *v1,
asmlinkage long __sys_clone(unsigned long clone_flags, unsigned long newsp,
void __user *parent_tid, void __user *child_tid, int tls);
+asmlinkage long __sys_clone3(struct clone_args __user *uargs, size_t size);
asmlinkage long __sys_fork(void);
#define sys_clone __sys_clone
+#define sys_clone3 __sys_clone3
#define sys_fork __sys_fork
#endif /* __ASM_OPENRISC_SYSCALLS_H */
diff --git a/arch/openrisc/kernel/entry.S b/arch/openrisc/kernel/entry.S
index 59c6d3aa7081..3ca1b1f490b9 100644
--- a/arch/openrisc/kernel/entry.S
+++ b/arch/openrisc/kernel/entry.S
@@ -1001,11 +1001,10 @@ ENTRY(ret_from_fork)
l.lwz r11,PT_GPR11(r1)
/* The syscall fast path return expects call-saved registers
- * r12-r28 to be untouched, so we restore them here as they
+ * r14-r28 to be untouched, so we restore them here as they
* will have been effectively clobbered when arriving here
* via the call to switch()
*/
- l.lwz r12,PT_GPR12(r1)
l.lwz r14,PT_GPR14(r1)
l.lwz r16,PT_GPR16(r1)
l.lwz r18,PT_GPR18(r1)
@@ -1037,10 +1036,10 @@ ENTRY(ret_from_fork)
/* _switch MUST never lay on page boundry, cause it runs from
* effective addresses and beeing interrupted by iTLB miss would kill it.
- * dTLB miss seams to never accour in the bad place since data accesses
+ * dTLB miss seems to never accour in the bad place since data accesses
* are from task structures which are always page aligned.
*
- * The problem happens in RESTORE_ALL_NO_R11 where we first set the EPCR
+ * The problem happens in RESTORE_ALL where we first set the EPCR
* register, then load the previous register values and only at the end call
* the l.rfe instruction. If get TLB miss in beetwen the EPCR register gets
* garbled and we end up calling l.rfe with the wrong EPCR. (same probably
@@ -1068,9 +1067,8 @@ ENTRY(_switch)
/* No need to store r1/PT_SP as it goes into KSP below */
l.sw PT_GPR2(r1),r2
l.sw PT_GPR9(r1),r9
- /* This is wrong, r12 shouldn't be here... but GCC is broken for the time being
- * and expects r12 to be callee-saved... */
- l.sw PT_GPR12(r1),r12
+
+ /* Save callee-saved registers to the new pt_regs */
l.sw PT_GPR14(r1),r14
l.sw PT_GPR16(r1),r16
l.sw PT_GPR18(r1),r18
@@ -1111,9 +1109,7 @@ ENTRY(_switch)
/* No need to restore r10 */
/* ...and do not restore r11 */
- /* This is wrong, r12 shouldn't be here... but GCC is broken for the time being
- * and expects r12 to be callee-saved... */
- l.lwz r12,PT_GPR12(r1)
+ /* Restore callee-saved registers */
l.lwz r14,PT_GPR14(r1)
l.lwz r16,PT_GPR16(r1)
l.lwz r18,PT_GPR18(r1)
@@ -1166,15 +1162,18 @@ _fork_save_extra_regs_and_call:
ENTRY(__sys_clone)
l.movhi r29,hi(sys_clone)
- l.ori r29,r29,lo(sys_clone)
l.j _fork_save_extra_regs_and_call
- l.nop
+ l.ori r29,r29,lo(sys_clone)
+
+ENTRY(__sys_clone3)
+ l.movhi r29,hi(sys_clone3)
+ l.j _fork_save_extra_regs_and_call
+ l.ori r29,r29,lo(sys_clone3)
ENTRY(__sys_fork)
l.movhi r29,hi(sys_fork)
- l.ori r29,r29,lo(sys_fork)
l.j _fork_save_extra_regs_and_call
- l.nop
+ l.ori r29,r29,lo(sys_fork)
ENTRY(sys_rt_sigreturn)
l.jal _sys_rt_sigreturn
diff --git a/arch/openrisc/kernel/time.c b/arch/openrisc/kernel/time.c
index a6e69386f82a..6d18989d63d0 100644
--- a/arch/openrisc/kernel/time.c
+++ b/arch/openrisc/kernel/time.c
@@ -20,6 +20,7 @@
#include <linux/clockchips.h>
#include <linux/irq.h>
#include <linux/io.h>
+#include <linux/of_clk.h>
#include <asm/cpuinfo.h>
@@ -169,4 +170,7 @@ void __init time_init(void)
openrisc_timer_init();
openrisc_clockevent_init();
+
+ of_clk_init(NULL);
+ timer_probe();
}
diff --git a/arch/openrisc/kernel/traps.c b/arch/openrisc/kernel/traps.c
index 0898cb159fac..0446a3c34372 100644
--- a/arch/openrisc/kernel/traps.c
+++ b/arch/openrisc/kernel/traps.c
@@ -212,7 +212,7 @@ void __noreturn die(const char *str, struct pt_regs *regs, long err)
__asm__ __volatile__("l.nop 1");
do {} while (1);
#endif
- do_exit(SIGSEGV);
+ make_task_dead(SIGSEGV);
}
/* This is normally the 'Oops' routine */
diff --git a/arch/openrisc/mm/fault.c b/arch/openrisc/mm/fault.c
index f0fa6394a58e..80bb66ad42f6 100644
--- a/arch/openrisc/mm/fault.c
+++ b/arch/openrisc/mm/fault.c
@@ -177,18 +177,16 @@ good_area:
BUG();
}
- if (flags & FAULT_FLAG_ALLOW_RETRY) {
- /*RGD modeled on Cris */
- if (fault & VM_FAULT_RETRY) {
- flags |= FAULT_FLAG_TRIED;
+ /*RGD modeled on Cris */
+ if (fault & VM_FAULT_RETRY) {
+ flags |= FAULT_FLAG_TRIED;
- /* No need to mmap_read_unlock(mm) as we would
- * have already released it in __lock_page_or_retry
- * in mm/filemap.c.
- */
+ /* No need to mmap_read_unlock(mm) as we would
+ * have already released it in __lock_page_or_retry
+ * in mm/filemap.c.
+ */
- goto retry;
- }
+ goto retry;
}
mmap_read_unlock(mm);
diff --git a/arch/parisc/boot/compressed/Makefile b/arch/parisc/boot/compressed/Makefile
index 116bd5c1873c..a294a1b58ee7 100644
--- a/arch/parisc/boot/compressed/Makefile
+++ b/arch/parisc/boot/compressed/Makefile
@@ -50,8 +50,6 @@ OBJCOPYFLAGS_vmlinux.bin := -R .comment -R .note -S
$(obj)/vmlinux.bin: vmlinux FORCE
$(call if_changed,objcopy)
-vmlinux.bin.all-y := $(obj)/vmlinux.bin
-
suffix-$(CONFIG_KERNEL_GZIP) := gz
suffix-$(CONFIG_KERNEL_BZIP2) := bz2
suffix-$(CONFIG_KERNEL_LZ4) := lz4
@@ -59,18 +57,18 @@ suffix-$(CONFIG_KERNEL_LZMA) := lzma
suffix-$(CONFIG_KERNEL_LZO) := lzo
suffix-$(CONFIG_KERNEL_XZ) := xz
-$(obj)/vmlinux.bin.gz: $(vmlinux.bin.all-y) FORCE
+$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE
$(call if_changed,gzip)
-$(obj)/vmlinux.bin.bz2: $(vmlinux.bin.all-y) FORCE
- $(call if_changed,bzip2)
-$(obj)/vmlinux.bin.lz4: $(vmlinux.bin.all-y) FORCE
- $(call if_changed,lz4)
-$(obj)/vmlinux.bin.lzma: $(vmlinux.bin.all-y) FORCE
- $(call if_changed,lzma)
-$(obj)/vmlinux.bin.lzo: $(vmlinux.bin.all-y) FORCE
- $(call if_changed,lzo)
-$(obj)/vmlinux.bin.xz: $(vmlinux.bin.all-y) FORCE
- $(call if_changed,xzkern)
+$(obj)/vmlinux.bin.bz2: $(obj)/vmlinux.bin FORCE
+ $(call if_changed,bzip2_with_size)
+$(obj)/vmlinux.bin.lz4: $(obj)/vmlinux.bin FORCE
+ $(call if_changed,lz4_with_size)
+$(obj)/vmlinux.bin.lzma: $(obj)/vmlinux.bin FORCE
+ $(call if_changed,lzma_with_size)
+$(obj)/vmlinux.bin.lzo: $(obj)/vmlinux.bin FORCE
+ $(call if_changed,lzo_with_size)
+$(obj)/vmlinux.bin.xz: $(obj)/vmlinux.bin FORCE
+ $(call if_changed,xzkern_with_size)
LDFLAGS_piggy.o := -r --format binary --oformat $(LD_BFD) -T
$(obj)/piggy.o: $(obj)/vmlinux.scr $(obj)/vmlinux.bin.$(suffix-y) FORCE
diff --git a/arch/parisc/include/asm/bitops.h b/arch/parisc/include/asm/bitops.h
index daa2afd974fb..56ffd260c669 100644
--- a/arch/parisc/include/asm/bitops.h
+++ b/arch/parisc/include/asm/bitops.h
@@ -12,6 +12,14 @@
#include <asm/barrier.h>
#include <linux/atomic.h>
+/* compiler build environment sanity checks: */
+#if !defined(CONFIG_64BIT) && defined(__LP64__)
+#error "Please use 'ARCH=parisc' to build the 32-bit kernel."
+#endif
+#if defined(CONFIG_64BIT) && !defined(__LP64__)
+#error "Please use 'ARCH=parisc64' to build the 64-bit kernel."
+#endif
+
/* See http://marc.theaimsgroup.com/?t=108826637900003 for discussion
* on use of volatile and __*_bit() (set/clear/change):
* *_bit() want use of volatile.
@@ -203,7 +211,6 @@ static __inline__ int fls(unsigned int x)
#include <asm-generic/bitops/hweight.h>
#include <asm-generic/bitops/lock.h>
#include <asm-generic/bitops/sched.h>
-#include <asm-generic/bitops/find.h>
#include <asm-generic/bitops/le.h>
#include <asm-generic/bitops/ext2-atomic-setbit.h>
diff --git a/arch/parisc/include/asm/processor.h b/arch/parisc/include/asm/processor.h
index b669f4b9040b..3a3d05438408 100644
--- a/arch/parisc/include/asm/processor.h
+++ b/arch/parisc/include/asm/processor.h
@@ -289,6 +289,7 @@ extern int _parisc_requires_coherency;
extern int running_on_qemu;
+extern void __noreturn toc_intr(struct pt_regs *regs);
extern void toc_handler(void);
extern unsigned int toc_handler_size;
extern unsigned int toc_handler_csum;
diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
index ebf8a845b017..123d5f16cd9d 100644
--- a/arch/parisc/include/asm/uaccess.h
+++ b/arch/parisc/include/asm/uaccess.h
@@ -89,8 +89,8 @@ struct exception_table_entry {
__asm__("1: " ldx " 0(" sr "%2),%0\n" \
"9:\n" \
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
- : "=r"(__gu_val), "=r"(__gu_err) \
- : "r"(ptr), "1"(__gu_err)); \
+ : "=r"(__gu_val), "+r"(__gu_err) \
+ : "r"(ptr)); \
\
(val) = (__force __typeof__(*(ptr))) __gu_val; \
}
@@ -123,8 +123,8 @@ struct exception_table_entry {
"9:\n" \
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \
- : "=&r"(__gu_tmp.l), "=r"(__gu_err) \
- : "r"(ptr), "1"(__gu_err)); \
+ : "=&r"(__gu_tmp.l), "+r"(__gu_err) \
+ : "r"(ptr)); \
\
(val) = __gu_tmp.t; \
}
@@ -135,13 +135,12 @@ struct exception_table_entry {
#define __put_user_internal(sr, x, ptr) \
({ \
ASM_EXCEPTIONTABLE_VAR(__pu_err); \
- __typeof__(*(ptr)) __x = (__typeof__(*(ptr)))(x); \
\
switch (sizeof(*(ptr))) { \
- case 1: __put_user_asm(sr, "stb", __x, ptr); break; \
- case 2: __put_user_asm(sr, "sth", __x, ptr); break; \
- case 4: __put_user_asm(sr, "stw", __x, ptr); break; \
- case 8: STD_USER(sr, __x, ptr); break; \
+ case 1: __put_user_asm(sr, "stb", x, ptr); break; \
+ case 2: __put_user_asm(sr, "sth", x, ptr); break; \
+ case 4: __put_user_asm(sr, "stw", x, ptr); break; \
+ case 8: STD_USER(sr, x, ptr); break; \
default: BUILD_BUG(); \
} \
\
@@ -150,7 +149,9 @@ struct exception_table_entry {
#define __put_user(x, ptr) \
({ \
- __put_user_internal("%%sr3,", x, ptr); \
+ __typeof__(&*(ptr)) __ptr = ptr; \
+ __typeof__(*(__ptr)) __x = (__typeof__(*(__ptr)))(x); \
+ __put_user_internal("%%sr3,", __x, __ptr); \
})
#define __put_kernel_nofault(dst, src, type, err_label) \
@@ -180,8 +181,8 @@ struct exception_table_entry {
"1: " stx " %2,0(" sr "%1)\n" \
"9:\n" \
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
- : "=r"(__pu_err) \
- : "r"(ptr), "r"(x), "0"(__pu_err))
+ : "+r"(__pu_err) \
+ : "r"(ptr), "r"(x))
#if !defined(CONFIG_64BIT)
@@ -193,8 +194,8 @@ struct exception_table_entry {
"9:\n" \
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \
- : "=r"(__pu_err) \
- : "r"(ptr), "r"(__val), "0"(__pu_err)); \
+ : "+r"(__pu_err) \
+ : "r"(ptr), "r"(__val)); \
} while (0)
#endif /* !defined(CONFIG_64BIT) */
diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c
index cceb09855e03..b91cb45ffd4e 100644
--- a/arch/parisc/kernel/setup.c
+++ b/arch/parisc/kernel/setup.c
@@ -48,6 +48,7 @@ struct proc_dir_entry * proc_mckinley_root __read_mostly = NULL;
void __init setup_cmdline(char **cmdline_p)
{
extern unsigned int boot_args[];
+ char *p;
/* Collect stuff passed in from the boot loader */
@@ -56,9 +57,19 @@ void __init setup_cmdline(char **cmdline_p)
/* called from hpux boot loader */
boot_command_line[0] = '\0';
} else {
- strlcpy(boot_command_line, (char *)__va(boot_args[1]),
+ strscpy(boot_command_line, (char *)__va(boot_args[1]),
COMMAND_LINE_SIZE);
+ /* autodetect console type (if not done by palo yet) */
+ p = boot_command_line;
+ if (!str_has_prefix(p, "console=") && !strstr(p, " console=")) {
+ strlcat(p, " console=", COMMAND_LINE_SIZE);
+ if (PAGE0->mem_cons.cl_class == CL_DUPLEX)
+ strlcat(p, "ttyS0", COMMAND_LINE_SIZE);
+ else
+ strlcat(p, "tty0", COMMAND_LINE_SIZE);
+ }
+
#ifdef CONFIG_BLK_DEV_INITRD
if (boot_args[2] != 0) /* did palo pass us a ramdisk? */
{
@@ -68,7 +79,7 @@ void __init setup_cmdline(char **cmdline_p)
#endif
}
- strcpy(command_line, boot_command_line);
+ strscpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
*cmdline_p = command_line;
}
diff --git a/arch/parisc/kernel/syscalls/syscall.tbl b/arch/parisc/kernel/syscalls/syscall.tbl
index 358c00000755..68b46fe2f17c 100644
--- a/arch/parisc/kernel/syscalls/syscall.tbl
+++ b/arch/parisc/kernel/syscalls/syscall.tbl
@@ -447,3 +447,4 @@
# 447 reserved for memfd_secret
448 common process_mrelease sys_process_mrelease
449 common futex_waitv sys_futex_waitv
+450 common set_mempolicy_home_node sys_set_mempolicy_home_node
diff --git a/arch/parisc/kernel/toc.c b/arch/parisc/kernel/toc.c
index be9a0bebe61e..e4b48d07afbd 100644
--- a/arch/parisc/kernel/toc.c
+++ b/arch/parisc/kernel/toc.c
@@ -10,9 +10,10 @@
#include <asm/pdc.h>
#include <asm/pdc_chassis.h>
#include <asm/ldcw.h>
+#include <asm/processor.h>
static unsigned int __aligned(16) toc_lock = 1;
-DEFINE_PER_CPU_PAGE_ALIGNED(char [16384], toc_stack);
+DEFINE_PER_CPU_PAGE_ALIGNED(char [16384], toc_stack) __visible;
static void toc20_to_pt_regs(struct pt_regs *regs, struct pdc_toc_pim_20 *toc)
{
diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
index eb41fece1910..b6fdebddc8e9 100644
--- a/arch/parisc/kernel/traps.c
+++ b/arch/parisc/kernel/traps.c
@@ -269,7 +269,7 @@ void die_if_kernel(char *str, struct pt_regs *regs, long err)
panic("Fatal exception");
oops_exit();
- do_exit(SIGSEGV);
+ make_task_dead(SIGSEGV);
}
/* gdb uses break 4,8 */
diff --git a/arch/parisc/lib/iomap.c b/arch/parisc/lib/iomap.c
index 367f6397bda7..860385058085 100644
--- a/arch/parisc/lib/iomap.c
+++ b/arch/parisc/lib/iomap.c
@@ -346,6 +346,16 @@ u64 ioread64be(const void __iomem *addr)
return *((u64 *)addr);
}
+u64 ioread64_lo_hi(const void __iomem *addr)
+{
+ u32 low, high;
+
+ low = ioread32(addr);
+ high = ioread32(addr + sizeof(u32));
+
+ return low + ((u64)high << 32);
+}
+
u64 ioread64_hi_lo(const void __iomem *addr)
{
u32 low, high;
@@ -419,6 +429,12 @@ void iowrite64be(u64 datum, void __iomem *addr)
}
}
+void iowrite64_lo_hi(u64 val, void __iomem *addr)
+{
+ iowrite32(val, addr);
+ iowrite32(val >> 32, addr + sizeof(u32));
+}
+
void iowrite64_hi_lo(u64 val, void __iomem *addr)
{
iowrite32(val >> 32, addr + sizeof(u32));
@@ -530,6 +546,7 @@ EXPORT_SYMBOL(ioread32);
EXPORT_SYMBOL(ioread32be);
EXPORT_SYMBOL(ioread64);
EXPORT_SYMBOL(ioread64be);
+EXPORT_SYMBOL(ioread64_lo_hi);
EXPORT_SYMBOL(ioread64_hi_lo);
EXPORT_SYMBOL(iowrite8);
EXPORT_SYMBOL(iowrite16);
@@ -538,6 +555,7 @@ EXPORT_SYMBOL(iowrite32);
EXPORT_SYMBOL(iowrite32be);
EXPORT_SYMBOL(iowrite64);
EXPORT_SYMBOL(iowrite64be);
+EXPORT_SYMBOL(iowrite64_lo_hi);
EXPORT_SYMBOL(iowrite64_hi_lo);
EXPORT_SYMBOL(ioread8_rep);
EXPORT_SYMBOL(ioread16_rep);
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
index 147868427b7c..e9eabf8f14d7 100644
--- a/arch/parisc/mm/fault.c
+++ b/arch/parisc/mm/fault.c
@@ -324,16 +324,14 @@ good_area:
goto bad_area;
BUG();
}
- if (flags & FAULT_FLAG_ALLOW_RETRY) {
- if (fault & VM_FAULT_RETRY) {
- /*
- * No need to mmap_read_unlock(mm) as we would
- * have already released it in __lock_page_or_retry
- * in mm/filemap.c.
- */
- flags |= FAULT_FLAG_TRIED;
- goto retry;
- }
+ if (fault & VM_FAULT_RETRY) {
+ /*
+ * No need to mmap_read_unlock(mm) as we would
+ * have already released it in __lock_page_or_retry
+ * in mm/filemap.c.
+ */
+ flags |= FAULT_FLAG_TRIED;
+ goto retry;
}
mmap_read_unlock(mm);
return;
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index 1ae31db9988f..1dc2e88e7b04 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -337,9 +337,9 @@ static void __init setup_bootmem(void)
static bool kernel_set_to_readonly;
-static void __init map_pages(unsigned long start_vaddr,
- unsigned long start_paddr, unsigned long size,
- pgprot_t pgprot, int force)
+static void __ref map_pages(unsigned long start_vaddr,
+ unsigned long start_paddr, unsigned long size,
+ pgprot_t pgprot, int force)
{
pmd_t *pmd;
pte_t *pg_table;
@@ -449,7 +449,7 @@ void __init set_kernel_text_rw(int enable_read_write)
flush_tlb_all();
}
-void __ref free_initmem(void)
+void free_initmem(void)
{
unsigned long init_begin = (unsigned long)__init_begin;
unsigned long init_end = (unsigned long)__init_end;
@@ -463,7 +463,6 @@ void __ref free_initmem(void)
/* The init text pages are marked R-X. We have to
* flush the icache and mark them RW-
*
- * This is tricky, because map_pages is in the init section.
* Do a dummy remap of the data section first (the data
* section is already PAGE_KERNEL) to pull in the TLB entries
* for map_kernel */
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index dea74d7717c0..b779603978e1 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -55,15 +55,6 @@ config ARCH_MMAP_RND_COMPAT_BITS_MIN
default 9 if PPC_16K_PAGES # 9 = 23 (8MB) - 14 (16K)
default 11 # 11 = 23 (8MB) - 12 (4K)
-config HAVE_SETUP_PER_CPU_AREA
- def_bool PPC64
-
-config NEED_PER_CPU_EMBED_FIRST_CHUNK
- def_bool y if PPC64
-
-config NEED_PER_CPU_PAGE_FIRST_CHUNK
- def_bool y if PPC64
-
config NR_IRQS
int "Number of virtual interrupt numbers"
range 32 1048576
@@ -129,7 +120,7 @@ config PPC
select ARCH_HAS_KCOV
select ARCH_HAS_MEMBARRIER_CALLBACKS
select ARCH_HAS_MEMBARRIER_SYNC_CORE
- select ARCH_HAS_MEMREMAP_COMPAT_ALIGN
+ select ARCH_HAS_MEMREMAP_COMPAT_ALIGN if PPC_64S_HASH_MMU
select ARCH_HAS_MMIOWB if PPC64
select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
select ARCH_HAS_PHYS_TO_DMA
@@ -165,6 +156,7 @@ config PPC
select BINFMT_ELF
select BUILDTIME_TABLE_SORT
select CLONE_BACKWARDS
+ select CPUMASK_OFFSTACK if NR_CPUS >= 8192
select DCACHE_WORD_ACCESS if PPC64 && CPU_LITTLE_ENDIAN
select DMA_OPS_BYPASS if PPC64
select DMA_OPS if PPC64
@@ -205,7 +197,7 @@ config PPC
select HAVE_DEBUG_KMEMLEAK
select HAVE_DEBUG_STACKOVERFLOW
select HAVE_DYNAMIC_FTRACE
- select HAVE_DYNAMIC_FTRACE_WITH_REGS if MPROFILE_KERNEL
+ select HAVE_DYNAMIC_FTRACE_WITH_REGS if MPROFILE_KERNEL || PPC32
select HAVE_EBPF_JIT
select HAVE_EFFICIENT_UNALIGNED_ACCESS if !(CPU_LITTLE_ENDIAN && POWER7_CPU)
select HAVE_FAST_GUP
@@ -229,7 +221,7 @@ config PPC
select HAVE_KPROBES_ON_FTRACE
select HAVE_KRETPROBES
select HAVE_LD_DEAD_CODE_DATA_ELIMINATION
- select HAVE_LIVEPATCH if HAVE_DYNAMIC_FTRACE_WITH_REGS
+ select HAVE_LIVEPATCH if HAVE_DYNAMIC_FTRACE_WITH_REGS && PPC64
select HAVE_MOD_ARCH_SPECIFIC
select HAVE_NMI if PERF_EVENTS || (PPC64 && PPC_BOOK3S)
select HAVE_OPTPROBES
@@ -240,6 +232,7 @@ config PPC
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RELIABLE_STACKTRACE
select HAVE_RSEQ
+ select HAVE_SETUP_PER_CPU_AREA if PPC64
select HAVE_SOFTIRQ_ON_OWN_STACK
select HAVE_STACKPROTECTOR if PPC32 && $(cc-option,-mstack-protector-guard=tls -mstack-protector-guard-reg=r2)
select HAVE_STACKPROTECTOR if PPC64 && $(cc-option,-mstack-protector-guard=tls -mstack-protector-guard-reg=r13)
@@ -254,6 +247,8 @@ config PPC
select MMU_GATHER_RCU_TABLE_FREE
select MODULES_USE_ELF_RELA
select NEED_DMA_MAP_STATE if PPC64 || NOT_COHERENT_CACHE
+ select NEED_PER_CPU_EMBED_FIRST_CHUNK if PPC64
+ select NEED_PER_CPU_PAGE_FIRST_CHUNK if PPC64
select NEED_SG_DMA_LENGTH
select OF
select OF_DMA_DEFAULT_COHERENT if !NOT_COHERENT_CACHE
@@ -659,6 +654,7 @@ config NUMA
bool "NUMA Memory Allocation and Scheduler Support"
depends on PPC64 && SMP
default y if PPC_PSERIES || PPC_POWERNV
+ select USE_PERCPU_NUMA_NODE_ID
help
Enable NUMA (Non-Uniform Memory Access) support.
@@ -672,10 +668,6 @@ config NODES_SHIFT
default "4"
depends on NUMA
-config USE_PERCPU_NUMA_NODE_ID
- def_bool y
- depends on NUMA
-
config HAVE_MEMORYLESS_NODES
def_bool y
depends on NUMA
@@ -845,7 +837,7 @@ config FORCE_MAX_ZONEORDER
config PPC_SUBPAGE_PROT
bool "Support setting protections for 4k subpages (subpage_prot syscall)"
default n
- depends on PPC_BOOK3S_64 && PPC_64K_PAGES
+ depends on PPC_64S_HASH_MMU && PPC_64K_PAGES
help
This option adds support for system call to allow user programs
to set access permissions (read/write, readonly, or no access)
@@ -943,6 +935,7 @@ config PPC_MEM_KEYS
prompt "PowerPC Memory Protection Keys"
def_bool y
depends on PPC_BOOK3S_64
+ depends on PPC_64S_HASH_MMU
select ARCH_USES_HIGH_VMA_FLAGS
select ARCH_HAS_PKEYS
help
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index e02568f17334..5f16ac1583c5 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -245,7 +245,9 @@ cpu-as-$(CONFIG_E500) += -Wa,-me500
# When using '-many -mpower4' gas will first try and find a matching power4
# mnemonic and failing that it will allow any valid mnemonic that GAS knows
# about. GCC will pass -many to GAS when assembling, clang does not.
-cpu-as-$(CONFIG_PPC_BOOK3S_64) += -Wa,-mpower4 -Wa,-many
+# LLVM IAS doesn't understand either flag: https://github.com/ClangBuiltLinux/linux/issues/675
+# but LLVM IAS only supports ISA >= 2.06 for Book3S 64 anyway...
+cpu-as-$(CONFIG_PPC_BOOK3S_64) += $(call as-option,-Wa$(comma)-mpower4) $(call as-option,-Wa$(comma)-many)
cpu-as-$(CONFIG_PPC_E500MC) += $(call as-option,-Wa$(comma)-me500mc)
KBUILD_AFLAGS += $(cpu-as-y)
@@ -445,10 +447,11 @@ PHONY += checkbin
# Check toolchain versions:
# - gcc-4.6 is the minimum kernel-wide version so nothing required.
checkbin:
- @if test "x${CONFIG_CPU_LITTLE_ENDIAN}" = "xy" \
- && $(LD) --version | head -1 | grep ' 2\.24$$' >/dev/null ; then \
+ @if test "x${CONFIG_LD_IS_LLD}" != "xy" -a \
+ "x$(call ld-ifversion, -le, 22400, y)" = "xy" ; then \
echo -n '*** binutils 2.24 miscompiles weak symbols ' ; \
echo 'in some circumstances.' ; \
+ echo '*** binutils 2.23 do not define the TOC symbol ' ; \
echo -n '*** Please use a different binutils version.' ; \
false ; \
fi
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index 9993c6256ad2..4b4827c475c6 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -365,7 +365,7 @@ image-$(CONFIG_PPC_PMAC) += zImage.coff zImage.miboot
endif
# Allow extra targets to be added to the defconfig
-image-y += $(subst ",,$(CONFIG_EXTRA_TARGETS))
+image-y += $(CONFIG_EXTRA_TARGETS)
initrd- := $(patsubst zImage%, zImage.initrd%, $(image-))
initrd-y := $(patsubst zImage%, zImage.initrd%, \
diff --git a/arch/powerpc/boot/crt0.S b/arch/powerpc/boot/crt0.S
index 1d83966f5ef6..feadee18e271 100644
--- a/arch/powerpc/boot/crt0.S
+++ b/arch/powerpc/boot/crt0.S
@@ -28,7 +28,7 @@ p_etext: .8byte _etext
p_bss_start: .8byte __bss_start
p_end: .8byte _end
-p_toc: .8byte __toc_start + 0x8000 - p_base
+p_toc: .8byte .TOC. - p_base
p_dyn: .8byte __dynamic_start - p_base
p_rela: .8byte __rela_dyn_start - p_base
p_prom: .8byte 0
@@ -226,16 +226,19 @@ p_base: mflr r10 /* r10 now points to runtime addr of p_base */
#ifdef __powerpc64__
#define PROM_FRAME_SIZE 512
-#define SAVE_GPR(n, base) std n,8*(n)(base)
-#define REST_GPR(n, base) ld n,8*(n)(base)
-#define SAVE_2GPRS(n, base) SAVE_GPR(n, base); SAVE_GPR(n+1, base)
-#define SAVE_4GPRS(n, base) SAVE_2GPRS(n, base); SAVE_2GPRS(n+2, base)
-#define SAVE_8GPRS(n, base) SAVE_4GPRS(n, base); SAVE_4GPRS(n+4, base)
-#define SAVE_10GPRS(n, base) SAVE_8GPRS(n, base); SAVE_2GPRS(n+8, base)
-#define REST_2GPRS(n, base) REST_GPR(n, base); REST_GPR(n+1, base)
-#define REST_4GPRS(n, base) REST_2GPRS(n, base); REST_2GPRS(n+2, base)
-#define REST_8GPRS(n, base) REST_4GPRS(n, base); REST_4GPRS(n+4, base)
-#define REST_10GPRS(n, base) REST_8GPRS(n, base); REST_2GPRS(n+8, base)
+
+.macro OP_REGS op, width, start, end, base, offset
+ .Lreg=\start
+ .rept (\end - \start + 1)
+ \op .Lreg,\offset+\width*.Lreg(\base)
+ .Lreg=.Lreg+1
+ .endr
+.endm
+
+#define SAVE_GPRS(start, end, base) OP_REGS std, 8, start, end, base, 0
+#define REST_GPRS(start, end, base) OP_REGS ld, 8, start, end, base, 0
+#define SAVE_GPR(n, base) SAVE_GPRS(n, n, base)
+#define REST_GPR(n, base) REST_GPRS(n, n, base)
/* prom handles the jump into and return from firmware. The prom args pointer
is loaded in r3. */
@@ -246,9 +249,7 @@ prom:
stdu r1,-PROM_FRAME_SIZE(r1) /* Save SP and create stack space */
SAVE_GPR(2, r1)
- SAVE_GPR(13, r1)
- SAVE_8GPRS(14, r1)
- SAVE_10GPRS(22, r1)
+ SAVE_GPRS(13, 31, r1)
mfcr r10
std r10,8*32(r1)
mfmsr r10
@@ -283,9 +284,7 @@ prom:
/* Restore other registers */
REST_GPR(2, r1)
- REST_GPR(13, r1)
- REST_8GPRS(14, r1)
- REST_10GPRS(22, r1)
+ REST_GPRS(13, 31, r1)
ld r10,8*32(r1)
mtcr r10
diff --git a/arch/powerpc/boot/dts/bluestone.dts b/arch/powerpc/boot/dts/bluestone.dts
index aa1ae94cd776..6971595319c1 100644
--- a/arch/powerpc/boot/dts/bluestone.dts
+++ b/arch/powerpc/boot/dts/bluestone.dts
@@ -366,30 +366,5 @@
0x0 0x0 0x0 0x3 &UIC3 0xe 0x4 /* swizzled int C */
0x0 0x0 0x0 0x4 &UIC3 0xf 0x4 /* swizzled int D */>;
};
-
- MSI: ppc4xx-msi@C10000000 {
- compatible = "amcc,ppc4xx-msi", "ppc4xx-msi";
- reg = < 0xC 0x10000000 0x100
- 0xC 0x10000000 0x100>;
- sdr-base = <0x36C>;
- msi-data = <0x00004440>;
- msi-mask = <0x0000ffe0>;
- interrupts =<0 1 2 3 4 5 6 7>;
- interrupt-parent = <&MSI>;
- #interrupt-cells = <1>;
- #address-cells = <0>;
- #size-cells = <0>;
- msi-available-ranges = <0x0 0x100>;
- interrupt-map = <
- 0 &UIC3 0x18 1
- 1 &UIC3 0x19 1
- 2 &UIC3 0x1A 1
- 3 &UIC3 0x1B 1
- 4 &UIC3 0x1C 1
- 5 &UIC3 0x1D 1
- 6 &UIC3 0x1E 1
- 7 &UIC3 0x1F 1
- >;
- };
};
};
diff --git a/arch/powerpc/boot/dts/canyonlands.dts b/arch/powerpc/boot/dts/canyonlands.dts
index c5fbb08e0a6e..5db1bff6b23d 100644
--- a/arch/powerpc/boot/dts/canyonlands.dts
+++ b/arch/powerpc/boot/dts/canyonlands.dts
@@ -544,23 +544,5 @@
0x0 0x0 0x0 0x3 &UIC3 0x12 0x4 /* swizzled int C */
0x0 0x0 0x0 0x4 &UIC3 0x13 0x4 /* swizzled int D */>;
};
-
- MSI: ppc4xx-msi@C10000000 {
- compatible = "amcc,ppc4xx-msi", "ppc4xx-msi";
- reg = < 0xC 0x10000000 0x100>;
- sdr-base = <0x36C>;
- msi-data = <0x00000000>;
- msi-mask = <0x44440000>;
- interrupt-count = <3>;
- interrupts = <0 1 2 3>;
- interrupt-parent = <&UIC3>;
- #interrupt-cells = <1>;
- #address-cells = <0>;
- #size-cells = <0>;
- interrupt-map = <0 &UIC3 0x18 1
- 1 &UIC3 0x19 1
- 2 &UIC3 0x1A 1
- 3 &UIC3 0x1B 1>;
- };
};
};
diff --git a/arch/powerpc/boot/dts/digsy_mtc.dts b/arch/powerpc/boot/dts/digsy_mtc.dts
index 57024a4c1e7d..dfaf974c0ce6 100644
--- a/arch/powerpc/boot/dts/digsy_mtc.dts
+++ b/arch/powerpc/boot/dts/digsy_mtc.dts
@@ -25,14 +25,6 @@
status = "disabled";
};
- spi@f00 {
- msp430@0 {
- compatible = "spidev";
- spi-max-frequency = <32000>;
- reg = <0>;
- };
- };
-
psc@2000 { // PSC1
status = "disabled";
};
diff --git a/arch/powerpc/boot/dts/fsl/qoriq-fman3l-0.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-fman3l-0.dtsi
index c90702b04a53..48e5cd61599c 100644
--- a/arch/powerpc/boot/dts/fsl/qoriq-fman3l-0.dtsi
+++ b/arch/powerpc/boot/dts/fsl/qoriq-fman3l-0.dtsi
@@ -79,6 +79,7 @@ fman0: fman@400000 {
#size-cells = <0>;
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
reg = <0xfc000 0x1000>;
+ fsl,erratum-a009885;
};
xmdio0: mdio@fd000 {
@@ -86,6 +87,7 @@ fman0: fman@400000 {
#size-cells = <0>;
compatible = "fsl,fman-memac-mdio", "fsl,fman-xmdio";
reg = <0xfd000 0x1000>;
+ fsl,erratum-a009885;
};
};
diff --git a/arch/powerpc/boot/dts/katmai.dts b/arch/powerpc/boot/dts/katmai.dts
index a8f353229fb7..4262b2bbd6de 100644
--- a/arch/powerpc/boot/dts/katmai.dts
+++ b/arch/powerpc/boot/dts/katmai.dts
@@ -442,24 +442,6 @@
0x0 0x0 0x0 0x4 &UIC3 0xb 0x4 /* swizzled int D */>;
};
- MSI: ppc4xx-msi@400300000 {
- compatible = "amcc,ppc4xx-msi", "ppc4xx-msi";
- reg = < 0x4 0x00300000 0x100>;
- sdr-base = <0x3B0>;
- msi-data = <0x00000000>;
- msi-mask = <0x44440000>;
- interrupt-count = <3>;
- interrupts =<0 1 2 3>;
- interrupt-parent = <&UIC0>;
- #interrupt-cells = <1>;
- #address-cells = <0>;
- #size-cells = <0>;
- interrupt-map = <0 &UIC0 0xC 1
- 1 &UIC0 0x0D 1
- 2 &UIC0 0x0E 1
- 3 &UIC0 0x0F 1>;
- };
-
I2O: i2o@400100000 {
compatible = "ibm,i2o-440spe";
reg = <0x00000004 0x00100000 0x100>;
diff --git a/arch/powerpc/boot/dts/kilauea.dts b/arch/powerpc/boot/dts/kilauea.dts
index a709fb47a180..c07a7525a72c 100644
--- a/arch/powerpc/boot/dts/kilauea.dts
+++ b/arch/powerpc/boot/dts/kilauea.dts
@@ -403,33 +403,5 @@
0x0 0x0 0x0 0x3 &UIC2 0xd 0x4 /* swizzled int C */
0x0 0x0 0x0 0x4 &UIC2 0xe 0x4 /* swizzled int D */>;
};
-
- MSI: ppc4xx-msi@C10000000 {
- compatible = "amcc,ppc4xx-msi", "ppc4xx-msi";
- reg = <0xEF620000 0x100>;
- sdr-base = <0x4B0>;
- msi-data = <0x00000000>;
- msi-mask = <0x44440000>;
- interrupt-count = <12>;
- interrupts = <0 1 2 3 4 5 6 7 8 9 0xA 0xB 0xC 0xD>;
- interrupt-parent = <&UIC2>;
- #interrupt-cells = <1>;
- #address-cells = <0>;
- #size-cells = <0>;
- interrupt-map = <0 &UIC2 0x10 1
- 1 &UIC2 0x11 1
- 2 &UIC2 0x12 1
- 2 &UIC2 0x13 1
- 2 &UIC2 0x14 1
- 2 &UIC2 0x15 1
- 2 &UIC2 0x16 1
- 2 &UIC2 0x17 1
- 2 &UIC2 0x18 1
- 2 &UIC2 0x19 1
- 2 &UIC2 0x1A 1
- 2 &UIC2 0x1B 1
- 2 &UIC2 0x1C 1
- 3 &UIC2 0x1D 1>;
- };
};
};
diff --git a/arch/powerpc/boot/dts/o2d.dtsi b/arch/powerpc/boot/dts/o2d.dtsi
index b55a9e5bd828..7e52509fa506 100644
--- a/arch/powerpc/boot/dts/o2d.dtsi
+++ b/arch/powerpc/boot/dts/o2d.dtsi
@@ -34,12 +34,6 @@
#address-cells = <1>;
#size-cells = <0>;
cell-index = <0>;
-
- spidev@0 {
- compatible = "spidev";
- spi-max-frequency = <250000>;
- reg = <0>;
- };
};
psc@2200 { // PSC2
diff --git a/arch/powerpc/boot/dts/redwood.dts b/arch/powerpc/boot/dts/redwood.dts
index f38035a1f4a1..3c849e23e5f3 100644
--- a/arch/powerpc/boot/dts/redwood.dts
+++ b/arch/powerpc/boot/dts/redwood.dts
@@ -358,25 +358,6 @@
0x0 0x0 0x0 0x4 &UIC3 0xb 0x4 /* swizzled int D */>;
};
- MSI: ppc4xx-msi@400300000 {
- compatible = "amcc,ppc4xx-msi", "ppc4xx-msi";
- reg = < 0x4 0x00300000 0x100
- 0x4 0x00300000 0x100>;
- sdr-base = <0x3B0>;
- msi-data = <0x00000000>;
- msi-mask = <0x44440000>;
- interrupt-count = <3>;
- interrupts =<0 1 2 3>;
- interrupt-parent = <&UIC0>;
- #interrupt-cells = <1>;
- #address-cells = <0>;
- #size-cells = <0>;
- interrupt-map = <0 &UIC0 0xC 1
- 1 &UIC0 0x0D 1
- 2 &UIC0 0x0E 1
- 3 &UIC0 0x0F 1>;
- };
-
};
diff --git a/arch/powerpc/boot/dts/wii.dts b/arch/powerpc/boot/dts/wii.dts
index e9c945b123c6..e46143c32308 100644
--- a/arch/powerpc/boot/dts/wii.dts
+++ b/arch/powerpc/boot/dts/wii.dts
@@ -168,6 +168,11 @@
interrupts = <14>;
};
+ srnprot@d800060 {
+ compatible = "nintendo,hollywood-srnprot";
+ reg = <0x0d800060 0x4>;
+ };
+
GPIO: gpio@d8000c0 {
#gpio-cells = <2>;
compatible = "nintendo,hollywood-gpio";
diff --git a/arch/powerpc/boot/zImage.lds.S b/arch/powerpc/boot/zImage.lds.S
index d6f072865627..d65cd55a6f38 100644
--- a/arch/powerpc/boot/zImage.lds.S
+++ b/arch/powerpc/boot/zImage.lds.S
@@ -36,12 +36,9 @@ SECTIONS
}
#ifdef CONFIG_PPC64_BOOT_WRAPPER
- . = ALIGN(256);
- .got :
+ .got : ALIGN(256)
{
- __toc_start = .;
- *(.got)
- *(.toc)
+ *(.got .toc)
}
#endif
diff --git a/arch/powerpc/configs/gamecube_defconfig b/arch/powerpc/configs/gamecube_defconfig
index 24c0e0ea5aeb..91a1b99f4e8f 100644
--- a/arch/powerpc/configs/gamecube_defconfig
+++ b/arch/powerpc/configs/gamecube_defconfig
@@ -68,7 +68,7 @@ CONFIG_SND_SEQUENCER=y
CONFIG_SND_SEQUENCER_OSS=y
# CONFIG_USB_SUPPORT is not set
CONFIG_RTC_CLASS=y
-CONFIG_RTC_DRV_GENERIC=y
+CONFIG_RTC_DRV_GAMECUBE=y
CONFIG_EXT2_FS=y
CONFIG_EXT4_FS=y
CONFIG_ISO9660_FS=y
diff --git a/arch/powerpc/configs/microwatt_defconfig b/arch/powerpc/configs/microwatt_defconfig
index 07d87a4044b2..eff933ebbb9e 100644
--- a/arch/powerpc/configs/microwatt_defconfig
+++ b/arch/powerpc/configs/microwatt_defconfig
@@ -15,6 +15,8 @@ CONFIG_EMBEDDED=y
# CONFIG_COMPAT_BRK is not set
# CONFIG_SLAB_MERGE_DEFAULT is not set
CONFIG_PPC64=y
+CONFIG_POWER9_CPU=y
+# CONFIG_PPC_64S_HASH_MMU is not set
# CONFIG_PPC_KUEP is not set
# CONFIG_PPC_KUAP is not set
CONFIG_CPU_LITTLE_ENDIAN=y
@@ -27,7 +29,6 @@ CONFIG_PPC_MICROWATT=y
CONFIG_CPU_FREQ=y
CONFIG_HZ_100=y
CONFIG_PPC_4K_PAGES=y
-# CONFIG_PPC_MEM_KEYS is not set
# CONFIG_SECCOMP is not set
# CONFIG_MQ_IOSCHED_KYBER is not set
# CONFIG_COREDUMP is not set
diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig
index 203d0b7f0bb8..c8b0e80d613b 100644
--- a/arch/powerpc/configs/ppc64_defconfig
+++ b/arch/powerpc/configs/ppc64_defconfig
@@ -26,7 +26,6 @@ CONFIG_PPC64=y
CONFIG_NR_CPUS=2048
CONFIG_PPC_SPLPAR=y
CONFIG_DTL=y
-CONFIG_SCANLOG=m
CONFIG_PPC_SMLPAR=y
CONFIG_IBMEBUS=y
CONFIG_PPC_SVM=y
diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig
index e64f2242abe1..b571d084c148 100644
--- a/arch/powerpc/configs/pseries_defconfig
+++ b/arch/powerpc/configs/pseries_defconfig
@@ -38,7 +38,6 @@ CONFIG_MODULE_SRCVERSION_ALL=y
CONFIG_PARTITION_ADVANCED=y
CONFIG_PPC_SPLPAR=y
CONFIG_DTL=y
-CONFIG_SCANLOG=m
CONFIG_PPC_SMLPAR=y
CONFIG_IBMEBUS=y
CONFIG_PAPR_SCM=m
diff --git a/arch/powerpc/configs/wii_defconfig b/arch/powerpc/configs/wii_defconfig
index a0c45bf2bfb1..0ab78c51455d 100644
--- a/arch/powerpc/configs/wii_defconfig
+++ b/arch/powerpc/configs/wii_defconfig
@@ -98,7 +98,7 @@ CONFIG_LEDS_TRIGGERS=y
CONFIG_LEDS_TRIGGER_HEARTBEAT=y
CONFIG_LEDS_TRIGGER_PANIC=y
CONFIG_RTC_CLASS=y
-CONFIG_RTC_DRV_GENERIC=y
+CONFIG_RTC_DRV_GAMECUBE=y
CONFIG_NVMEM_NINTENDO_OTP=y
CONFIG_EXT2_FS=y
CONFIG_EXT4_FS=y
diff --git a/arch/powerpc/crypto/md5-asm.S b/arch/powerpc/crypto/md5-asm.S
index 948d100a2934..fa6bc440cf4a 100644
--- a/arch/powerpc/crypto/md5-asm.S
+++ b/arch/powerpc/crypto/md5-asm.S
@@ -38,15 +38,11 @@
#define INITIALIZE \
PPC_STLU r1,-INT_FRAME_SIZE(r1); \
- SAVE_8GPRS(14, r1); /* push registers onto stack */ \
- SAVE_4GPRS(22, r1); \
- SAVE_GPR(26, r1)
+ SAVE_GPRS(14, 26, r1) /* push registers onto stack */
#define FINALIZE \
- REST_8GPRS(14, r1); /* pop registers from stack */ \
- REST_4GPRS(22, r1); \
- REST_GPR(26, r1); \
- addi r1,r1,INT_FRAME_SIZE;
+ REST_GPRS(14, 26, r1); /* pop registers from stack */ \
+ addi r1,r1,INT_FRAME_SIZE
#ifdef __BIG_ENDIAN__
#define LOAD_DATA(reg, off) \
diff --git a/arch/powerpc/crypto/sha1-powerpc-asm.S b/arch/powerpc/crypto/sha1-powerpc-asm.S
index 23e248beff71..f0d5ed557ab1 100644
--- a/arch/powerpc/crypto/sha1-powerpc-asm.S
+++ b/arch/powerpc/crypto/sha1-powerpc-asm.S
@@ -125,8 +125,7 @@
_GLOBAL(powerpc_sha_transform)
PPC_STLU r1,-INT_FRAME_SIZE(r1)
- SAVE_8GPRS(14, r1)
- SAVE_10GPRS(22, r1)
+ SAVE_GPRS(14, 31, r1)
/* Load up A - E */
lwz RA(0),0(r3) /* A */
@@ -184,7 +183,6 @@ _GLOBAL(powerpc_sha_transform)
stw RD(0),12(r3)
stw RE(0),16(r3)
- REST_8GPRS(14, r1)
- REST_10GPRS(22, r1)
+ REST_GPRS(14, 31, r1)
addi r1,r1,INT_FRAME_SIZE
blr
diff --git a/arch/powerpc/include/asm/asm-prototypes.h b/arch/powerpc/include/asm/asm-prototypes.h
index 222823861a67..41b8a1e1144a 100644
--- a/arch/powerpc/include/asm/asm-prototypes.h
+++ b/arch/powerpc/include/asm/asm-prototypes.h
@@ -141,11 +141,6 @@ static inline void kvmppc_restore_tm_hv(struct kvm_vcpu *vcpu, u64 msr,
bool preserve_nv) { }
#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
-void kvmhv_save_host_pmu(void);
-void kvmhv_load_host_pmu(void);
-void kvmhv_save_guest_pmu(struct kvm_vcpu *vcpu, bool pmu_in_use);
-void kvmhv_load_guest_pmu(struct kvm_vcpu *vcpu);
-
void kvmppc_p9_enter_guest(struct kvm_vcpu *vcpu);
long kvmppc_h_set_dabr(struct kvm_vcpu *vcpu, unsigned long dabr);
diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
index fd594fdbd84d..853dc86864f4 100644
--- a/arch/powerpc/include/asm/atomic.h
+++ b/arch/powerpc/include/asm/atomic.h
@@ -37,62 +37,62 @@ static __inline__ void arch_atomic_set(atomic_t *v, int i)
__asm__ __volatile__("stw%U0%X0 %1,%0" : "=m<>"(v->counter) : "r"(i));
}
-#define ATOMIC_OP(op, asm_op) \
+#define ATOMIC_OP(op, asm_op, suffix, sign, ...) \
static __inline__ void arch_atomic_##op(int a, atomic_t *v) \
{ \
int t; \
\
__asm__ __volatile__( \
"1: lwarx %0,0,%3 # atomic_" #op "\n" \
- #asm_op " %0,%2,%0\n" \
+ #asm_op "%I2" suffix " %0,%0,%2\n" \
" stwcx. %0,0,%3 \n" \
" bne- 1b\n" \
: "=&r" (t), "+m" (v->counter) \
- : "r" (a), "r" (&v->counter) \
- : "cc"); \
+ : "r"#sign (a), "r" (&v->counter) \
+ : "cc", ##__VA_ARGS__); \
} \
-#define ATOMIC_OP_RETURN_RELAXED(op, asm_op) \
+#define ATOMIC_OP_RETURN_RELAXED(op, asm_op, suffix, sign, ...) \
static inline int arch_atomic_##op##_return_relaxed(int a, atomic_t *v) \
{ \
int t; \
\
__asm__ __volatile__( \
"1: lwarx %0,0,%3 # atomic_" #op "_return_relaxed\n" \
- #asm_op " %0,%2,%0\n" \
+ #asm_op "%I2" suffix " %0,%0,%2\n" \
" stwcx. %0,0,%3\n" \
" bne- 1b\n" \
: "=&r" (t), "+m" (v->counter) \
- : "r" (a), "r" (&v->counter) \
- : "cc"); \
+ : "r"#sign (a), "r" (&v->counter) \
+ : "cc", ##__VA_ARGS__); \
\
return t; \
}
-#define ATOMIC_FETCH_OP_RELAXED(op, asm_op) \
+#define ATOMIC_FETCH_OP_RELAXED(op, asm_op, suffix, sign, ...) \
static inline int arch_atomic_fetch_##op##_relaxed(int a, atomic_t *v) \
{ \
int res, t; \
\
__asm__ __volatile__( \
"1: lwarx %0,0,%4 # atomic_fetch_" #op "_relaxed\n" \
- #asm_op " %1,%3,%0\n" \
+ #asm_op "%I3" suffix " %1,%0,%3\n" \
" stwcx. %1,0,%4\n" \
" bne- 1b\n" \
: "=&r" (res), "=&r" (t), "+m" (v->counter) \
- : "r" (a), "r" (&v->counter) \
- : "cc"); \
+ : "r"#sign (a), "r" (&v->counter) \
+ : "cc", ##__VA_ARGS__); \
\
return res; \
}
-#define ATOMIC_OPS(op, asm_op) \
- ATOMIC_OP(op, asm_op) \
- ATOMIC_OP_RETURN_RELAXED(op, asm_op) \
- ATOMIC_FETCH_OP_RELAXED(op, asm_op)
+#define ATOMIC_OPS(op, asm_op, suffix, sign, ...) \
+ ATOMIC_OP(op, asm_op, suffix, sign, ##__VA_ARGS__) \
+ ATOMIC_OP_RETURN_RELAXED(op, asm_op, suffix, sign, ##__VA_ARGS__)\
+ ATOMIC_FETCH_OP_RELAXED(op, asm_op, suffix, sign, ##__VA_ARGS__)
-ATOMIC_OPS(add, add)
-ATOMIC_OPS(sub, subf)
+ATOMIC_OPS(add, add, "c", I, "xer")
+ATOMIC_OPS(sub, sub, "c", I, "xer")
#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
@@ -101,13 +101,13 @@ ATOMIC_OPS(sub, subf)
#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
#undef ATOMIC_OPS
-#define ATOMIC_OPS(op, asm_op) \
- ATOMIC_OP(op, asm_op) \
- ATOMIC_FETCH_OP_RELAXED(op, asm_op)
+#define ATOMIC_OPS(op, asm_op, suffix, sign) \
+ ATOMIC_OP(op, asm_op, suffix, sign) \
+ ATOMIC_FETCH_OP_RELAXED(op, asm_op, suffix, sign)
-ATOMIC_OPS(and, and)
-ATOMIC_OPS(or, or)
-ATOMIC_OPS(xor, xor)
+ATOMIC_OPS(and, and, ".", K)
+ATOMIC_OPS(or, or, "", K)
+ATOMIC_OPS(xor, xor, "", K)
#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
@@ -118,71 +118,6 @@ ATOMIC_OPS(xor, xor)
#undef ATOMIC_OP_RETURN_RELAXED
#undef ATOMIC_OP
-static __inline__ void arch_atomic_inc(atomic_t *v)
-{
- int t;
-
- __asm__ __volatile__(
-"1: lwarx %0,0,%2 # atomic_inc\n\
- addic %0,%0,1\n"
-" stwcx. %0,0,%2 \n\
- bne- 1b"
- : "=&r" (t), "+m" (v->counter)
- : "r" (&v->counter)
- : "cc", "xer");
-}
-#define arch_atomic_inc arch_atomic_inc
-
-static __inline__ int arch_atomic_inc_return_relaxed(atomic_t *v)
-{
- int t;
-
- __asm__ __volatile__(
-"1: lwarx %0,0,%2 # atomic_inc_return_relaxed\n"
-" addic %0,%0,1\n"
-" stwcx. %0,0,%2\n"
-" bne- 1b"
- : "=&r" (t), "+m" (v->counter)
- : "r" (&v->counter)
- : "cc", "xer");
-
- return t;
-}
-
-static __inline__ void arch_atomic_dec(atomic_t *v)
-{
- int t;
-
- __asm__ __volatile__(
-"1: lwarx %0,0,%2 # atomic_dec\n\
- addic %0,%0,-1\n"
-" stwcx. %0,0,%2\n\
- bne- 1b"
- : "=&r" (t), "+m" (v->counter)
- : "r" (&v->counter)
- : "cc", "xer");
-}
-#define arch_atomic_dec arch_atomic_dec
-
-static __inline__ int arch_atomic_dec_return_relaxed(atomic_t *v)
-{
- int t;
-
- __asm__ __volatile__(
-"1: lwarx %0,0,%2 # atomic_dec_return_relaxed\n"
-" addic %0,%0,-1\n"
-" stwcx. %0,0,%2\n"
-" bne- 1b"
- : "=&r" (t), "+m" (v->counter)
- : "r" (&v->counter)
- : "cc", "xer");
-
- return t;
-}
-
-#define arch_atomic_inc_return_relaxed arch_atomic_inc_return_relaxed
-#define arch_atomic_dec_return_relaxed arch_atomic_dec_return_relaxed
-
#define arch_atomic_cmpxchg(v, o, n) \
(arch_cmpxchg(&((v)->counter), (o), (n)))
#define arch_atomic_cmpxchg_relaxed(v, o, n) \
@@ -241,50 +176,20 @@ static __inline__ int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
"1: lwarx %0,0,%1 # atomic_fetch_add_unless\n\
cmpw 0,%0,%3 \n\
beq 2f \n\
- add %0,%2,%0 \n"
+ add%I2c %0,%0,%2 \n"
" stwcx. %0,0,%1 \n\
bne- 1b \n"
PPC_ATOMIC_EXIT_BARRIER
-" subf %0,%2,%0 \n\
+" sub%I2c %0,%0,%2 \n\
2:"
: "=&r" (t)
- : "r" (&v->counter), "r" (a), "r" (u)
- : "cc", "memory");
+ : "r" (&v->counter), "rI" (a), "r" (u)
+ : "cc", "memory", "xer");
return t;
}
#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
-/**
- * atomic_inc_not_zero - increment unless the number is zero
- * @v: pointer of type atomic_t
- *
- * Atomically increments @v by 1, so long as @v is non-zero.
- * Returns non-zero if @v was non-zero, and zero otherwise.
- */
-static __inline__ int arch_atomic_inc_not_zero(atomic_t *v)
-{
- int t1, t2;
-
- __asm__ __volatile__ (
- PPC_ATOMIC_ENTRY_BARRIER
-"1: lwarx %0,0,%2 # atomic_inc_not_zero\n\
- cmpwi 0,%0,0\n\
- beq- 2f\n\
- addic %1,%0,1\n"
-" stwcx. %1,0,%2\n\
- bne- 1b\n"
- PPC_ATOMIC_EXIT_BARRIER
- "\n\
-2:"
- : "=&r" (t1), "=&r" (t2)
- : "r" (&v->counter)
- : "cc", "xer", "memory");
-
- return t1;
-}
-#define arch_atomic_inc_not_zero(v) arch_atomic_inc_not_zero((v))
-
/*
* Atomically test *v and decrement if it is greater than 0.
* The function returns the old value of *v minus 1, even if
diff --git a/arch/powerpc/include/asm/bitops.h b/arch/powerpc/include/asm/bitops.h
index 11847b6a244e..ea5d27dda8cf 100644
--- a/arch/powerpc/include/asm/bitops.h
+++ b/arch/powerpc/include/asm/bitops.h
@@ -71,19 +71,61 @@ static inline void fn(unsigned long mask, \
__asm__ __volatile__ ( \
prefix \
"1:" PPC_LLARX "%0,0,%3,0\n" \
- stringify_in_c(op) "%0,%0,%2\n" \
+ #op "%I2 %0,%0,%2\n" \
PPC_STLCX "%0,0,%3\n" \
"bne- 1b\n" \
: "=&r" (old), "+m" (*p) \
- : "r" (mask), "r" (p) \
+ : "rK" (mask), "r" (p) \
: "cc", "memory"); \
}
DEFINE_BITOP(set_bits, or, "")
-DEFINE_BITOP(clear_bits, andc, "")
-DEFINE_BITOP(clear_bits_unlock, andc, PPC_RELEASE_BARRIER)
DEFINE_BITOP(change_bits, xor, "")
+static __always_inline bool is_rlwinm_mask_valid(unsigned long x)
+{
+ if (!x)
+ return false;
+ if (x & 1)
+ x = ~x; // make the mask non-wrapping
+ x += x & -x; // adding the low set bit results in at most one bit set
+
+ return !(x & (x - 1));
+}
+
+#define DEFINE_CLROP(fn, prefix) \
+static inline void fn(unsigned long mask, volatile unsigned long *_p) \
+{ \
+ unsigned long old; \
+ unsigned long *p = (unsigned long *)_p; \
+ \
+ if (IS_ENABLED(CONFIG_PPC32) && \
+ __builtin_constant_p(mask) && is_rlwinm_mask_valid(~mask)) {\
+ asm volatile ( \
+ prefix \
+ "1:" "lwarx %0,0,%3\n" \
+ "rlwinm %0,%0,0,%2\n" \
+ "stwcx. %0,0,%3\n" \
+ "bne- 1b\n" \
+ : "=&r" (old), "+m" (*p) \
+ : "n" (~mask), "r" (p) \
+ : "cc", "memory"); \
+ } else { \
+ asm volatile ( \
+ prefix \
+ "1:" PPC_LLARX "%0,0,%3,0\n" \
+ "andc %0,%0,%2\n" \
+ PPC_STLCX "%0,0,%3\n" \
+ "bne- 1b\n" \
+ : "=&r" (old), "+m" (*p) \
+ : "r" (mask), "r" (p) \
+ : "cc", "memory"); \
+ } \
+}
+
+DEFINE_CLROP(clear_bits, "")
+DEFINE_CLROP(clear_bits_unlock, PPC_RELEASE_BARRIER)
+
static inline void arch_set_bit(int nr, volatile unsigned long *addr)
{
set_bits(BIT_MASK(nr), addr + BIT_WORD(nr));
@@ -116,12 +158,12 @@ static inline unsigned long fn( \
__asm__ __volatile__ ( \
prefix \
"1:" PPC_LLARX "%0,0,%3,%4\n" \
- stringify_in_c(op) "%1,%0,%2\n" \
+ #op "%I2 %1,%0,%2\n" \
PPC_STLCX "%1,0,%3\n" \
"bne- 1b\n" \
postfix \
: "=&r" (old), "=&r" (t) \
- : "r" (mask), "r" (p), "i" (IS_ENABLED(CONFIG_PPC64) ? eh : 0) \
+ : "rK" (mask), "r" (p), "i" (IS_ENABLED(CONFIG_PPC64) ? eh : 0) \
: "cc", "memory"); \
return (old & mask); \
}
@@ -130,11 +172,42 @@ DEFINE_TESTOP(test_and_set_bits, or, PPC_ATOMIC_ENTRY_BARRIER,
PPC_ATOMIC_EXIT_BARRIER, 0)
DEFINE_TESTOP(test_and_set_bits_lock, or, "",
PPC_ACQUIRE_BARRIER, 1)
-DEFINE_TESTOP(test_and_clear_bits, andc, PPC_ATOMIC_ENTRY_BARRIER,
- PPC_ATOMIC_EXIT_BARRIER, 0)
DEFINE_TESTOP(test_and_change_bits, xor, PPC_ATOMIC_ENTRY_BARRIER,
PPC_ATOMIC_EXIT_BARRIER, 0)
+static inline unsigned long test_and_clear_bits(unsigned long mask, volatile unsigned long *_p)
+{
+ unsigned long old, t;
+ unsigned long *p = (unsigned long *)_p;
+
+ if (IS_ENABLED(CONFIG_PPC32) &&
+ __builtin_constant_p(mask) && is_rlwinm_mask_valid(~mask)) {
+ asm volatile (
+ PPC_ATOMIC_ENTRY_BARRIER
+ "1:" "lwarx %0,0,%3\n"
+ "rlwinm %1,%0,0,%2\n"
+ "stwcx. %1,0,%3\n"
+ "bne- 1b\n"
+ PPC_ATOMIC_EXIT_BARRIER
+ : "=&r" (old), "=&r" (t)
+ : "n" (~mask), "r" (p)
+ : "cc", "memory");
+ } else {
+ asm volatile (
+ PPC_ATOMIC_ENTRY_BARRIER
+ "1:" PPC_LLARX "%0,0,%3,0\n"
+ "andc %1,%0,%2\n"
+ PPC_STLCX "%1,0,%3\n"
+ "bne- 1b\n"
+ PPC_ATOMIC_EXIT_BARRIER
+ : "=&r" (old), "=&r" (t)
+ : "r" (mask), "r" (p)
+ : "cc", "memory");
+ }
+
+ return (old & mask);
+}
+
static inline int arch_test_and_set_bit(unsigned long nr,
volatile unsigned long *addr)
{
@@ -255,8 +328,6 @@ unsigned long __arch_hweight64(__u64 w);
#include <asm-generic/bitops/hweight.h>
#endif
-#include <asm-generic/bitops/find.h>
-
/* wrappers that deal with KASAN instrumentation */
#include <asm-generic/bitops/instrumented-atomic.h>
#include <asm-generic/bitops/instrumented-lock.h>
diff --git a/arch/powerpc/include/asm/book3s/32/kup.h b/arch/powerpc/include/asm/book3s/32/kup.h
index 9f38040f0641..678f9c9d89b6 100644
--- a/arch/powerpc/include/asm/book3s/32/kup.h
+++ b/arch/powerpc/include/asm/book3s/32/kup.h
@@ -12,50 +12,10 @@
#include <linux/jump_label.h>
extern struct static_key_false disable_kuap_key;
-extern struct static_key_false disable_kuep_key;
-
-static __always_inline bool kuap_is_disabled(void)
-{
- return !IS_ENABLED(CONFIG_PPC_KUAP) || static_branch_unlikely(&disable_kuap_key);
-}
static __always_inline bool kuep_is_disabled(void)
{
- return !IS_ENABLED(CONFIG_PPC_KUEP) || static_branch_unlikely(&disable_kuep_key);
-}
-
-static inline void kuep_lock(void)
-{
- if (kuep_is_disabled())
- return;
-
- update_user_segments(mfsr(0) | SR_NX);
- /*
- * This isync() shouldn't be necessary as the kernel is not excepted to
- * run any instruction in userspace soon after the update of segments,
- * but hash based cores (at least G3) seem to exhibit a random
- * behaviour when the 'isync' is not there. 603 cores don't have this
- * behaviour so don't do the 'isync' as it saves several CPU cycles.
- */
- if (mmu_has_feature(MMU_FTR_HPTE_TABLE))
- isync(); /* Context sync required after mtsr() */
-}
-
-static inline void kuep_unlock(void)
-{
- if (kuep_is_disabled())
- return;
-
- update_user_segments(mfsr(0) & ~SR_NX);
- /*
- * This isync() shouldn't be necessary as a 'rfi' will soon be executed
- * to return to userspace, but hash based cores (at least G3) seem to
- * exhibit a random behaviour when the 'isync' is not there. 603 cores
- * don't have this behaviour so don't do the 'isync' as it saves several
- * CPU cycles.
- */
- if (mmu_has_feature(MMU_FTR_HPTE_TABLE))
- isync(); /* Context sync required after mtsr() */
+ return !IS_ENABLED(CONFIG_PPC_KUEP);
}
#ifdef CONFIG_PPC_KUAP
@@ -65,6 +25,11 @@ static inline void kuep_unlock(void)
#define KUAP_NONE (~0UL)
#define KUAP_ALL (~1UL)
+static __always_inline bool kuap_is_disabled(void)
+{
+ return static_branch_unlikely(&disable_kuap_key);
+}
+
static inline void kuap_lock_one(unsigned long addr)
{
mtsr(mfsr(addr) | SR_KS, addr);
@@ -92,7 +57,7 @@ static inline void kuap_unlock_all(void)
void kuap_lock_all_ool(void);
void kuap_unlock_all_ool(void);
-static inline void kuap_lock(unsigned long addr, bool ool)
+static inline void kuap_lock_addr(unsigned long addr, bool ool)
{
if (likely(addr != KUAP_ALL))
kuap_lock_one(addr);
@@ -112,33 +77,31 @@ static inline void kuap_unlock(unsigned long addr, bool ool)
kuap_unlock_all_ool();
}
-static inline void kuap_save_and_lock(struct pt_regs *regs)
+static inline void __kuap_lock(void)
{
- unsigned long kuap = current->thread.kuap;
+}
- if (kuap_is_disabled())
- return;
+static inline void __kuap_save_and_lock(struct pt_regs *regs)
+{
+ unsigned long kuap = current->thread.kuap;
regs->kuap = kuap;
if (unlikely(kuap == KUAP_NONE))
return;
current->thread.kuap = KUAP_NONE;
- kuap_lock(kuap, false);
+ kuap_lock_addr(kuap, false);
}
static inline void kuap_user_restore(struct pt_regs *regs)
{
}
-static inline void kuap_kernel_restore(struct pt_regs *regs, unsigned long kuap)
+static inline void __kuap_kernel_restore(struct pt_regs *regs, unsigned long kuap)
{
- if (kuap_is_disabled())
- return;
-
if (unlikely(kuap != KUAP_NONE)) {
current->thread.kuap = KUAP_NONE;
- kuap_lock(kuap, false);
+ kuap_lock_addr(kuap, false);
}
if (likely(regs->kuap == KUAP_NONE))
@@ -149,29 +112,18 @@ static inline void kuap_kernel_restore(struct pt_regs *regs, unsigned long kuap)
kuap_unlock(regs->kuap, false);
}
-static inline unsigned long kuap_get_and_assert_locked(void)
+static inline unsigned long __kuap_get_and_assert_locked(void)
{
unsigned long kuap = current->thread.kuap;
- if (kuap_is_disabled())
- return KUAP_NONE;
-
WARN_ON_ONCE(IS_ENABLED(CONFIG_PPC_KUAP_DEBUG) && kuap != KUAP_NONE);
return kuap;
}
-static inline void kuap_assert_locked(void)
-{
- kuap_get_and_assert_locked();
-}
-
-static __always_inline void allow_user_access(void __user *to, const void __user *from,
- u32 size, unsigned long dir)
+static __always_inline void __allow_user_access(void __user *to, const void __user *from,
+ u32 size, unsigned long dir)
{
- if (kuap_is_disabled())
- return;
-
BUILD_BUG_ON(!__builtin_constant_p(dir));
if (!(dir & KUAP_WRITE))
@@ -181,42 +133,33 @@ static __always_inline void allow_user_access(void __user *to, const void __user
kuap_unlock_one((__force u32)to);
}
-static __always_inline void prevent_user_access(unsigned long dir)
+static __always_inline void __prevent_user_access(unsigned long dir)
{
u32 kuap = current->thread.kuap;
- if (kuap_is_disabled())
- return;
-
BUILD_BUG_ON(!__builtin_constant_p(dir));
if (!(dir & KUAP_WRITE))
return;
current->thread.kuap = KUAP_NONE;
- kuap_lock(kuap, true);
+ kuap_lock_addr(kuap, true);
}
-static inline unsigned long prevent_user_access_return(void)
+static inline unsigned long __prevent_user_access_return(void)
{
unsigned long flags = current->thread.kuap;
- if (kuap_is_disabled())
- return KUAP_NONE;
-
if (flags != KUAP_NONE) {
current->thread.kuap = KUAP_NONE;
- kuap_lock(flags, true);
+ kuap_lock_addr(flags, true);
}
return flags;
}
-static inline void restore_user_access(unsigned long flags)
+static inline void __restore_user_access(unsigned long flags)
{
- if (kuap_is_disabled())
- return;
-
if (flags != KUAP_NONE) {
current->thread.kuap = flags;
kuap_unlock(flags, true);
@@ -224,13 +167,10 @@ static inline void restore_user_access(unsigned long flags)
}
static inline bool
-bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
+__bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
{
unsigned long kuap = regs->kuap;
- if (kuap_is_disabled())
- return false;
-
if (!is_write || kuap == KUAP_ALL)
return false;
if (kuap == KUAP_NONE)
diff --git a/arch/powerpc/include/asm/book3s/32/mmu-hash.h b/arch/powerpc/include/asm/book3s/32/mmu-hash.h
index f5be185cbdf8..78c6a5fde1d6 100644
--- a/arch/powerpc/include/asm/book3s/32/mmu-hash.h
+++ b/arch/powerpc/include/asm/book3s/32/mmu-hash.h
@@ -64,7 +64,82 @@ struct ppc_bat {
#define SR_KP 0x20000000 /* User key */
#define SR_KS 0x40000000 /* Supervisor key */
-#ifndef __ASSEMBLY__
+#ifdef __ASSEMBLY__
+
+#include <asm/asm-offsets.h>
+
+.macro uus_addi sr reg1 reg2 imm
+ .if NUM_USER_SEGMENTS > \sr
+ addi \reg1,\reg2,\imm
+ .endif
+.endm
+
+.macro uus_mtsr sr reg1
+ .if NUM_USER_SEGMENTS > \sr
+ mtsr \sr, \reg1
+ .endif
+.endm
+
+/*
+ * This isync() shouldn't be necessary as the kernel is not excepted to run
+ * any instruction in userspace soon after the update of segments and 'rfi'
+ * instruction is used to return to userspace, but hash based cores
+ * (at least G3) seem to exhibit a random behaviour when the 'isync' is not
+ * there. 603 cores don't have this behaviour so don't do the 'isync' as it
+ * saves several CPU cycles.
+ */
+.macro uus_isync
+#ifdef CONFIG_PPC_BOOK3S_604
+BEGIN_MMU_FTR_SECTION
+ isync
+END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
+#endif
+.endm
+
+.macro update_user_segments_by_4 tmp1 tmp2 tmp3 tmp4
+ uus_addi 1, \tmp2, \tmp1, 0x111
+ uus_addi 2, \tmp3, \tmp1, 0x222
+ uus_addi 3, \tmp4, \tmp1, 0x333
+
+ uus_mtsr 0, \tmp1
+ uus_mtsr 1, \tmp2
+ uus_mtsr 2, \tmp3
+ uus_mtsr 3, \tmp4
+
+ uus_addi 4, \tmp1, \tmp1, 0x444
+ uus_addi 5, \tmp2, \tmp2, 0x444
+ uus_addi 6, \tmp3, \tmp3, 0x444
+ uus_addi 7, \tmp4, \tmp4, 0x444
+
+ uus_mtsr 4, \tmp1
+ uus_mtsr 5, \tmp2
+ uus_mtsr 6, \tmp3
+ uus_mtsr 7, \tmp4
+
+ uus_addi 8, \tmp1, \tmp1, 0x444
+ uus_addi 9, \tmp2, \tmp2, 0x444
+ uus_addi 10, \tmp3, \tmp3, 0x444
+ uus_addi 11, \tmp4, \tmp4, 0x444
+
+ uus_mtsr 8, \tmp1
+ uus_mtsr 9, \tmp2
+ uus_mtsr 10, \tmp3
+ uus_mtsr 11, \tmp4
+
+ uus_addi 12, \tmp1, \tmp1, 0x444
+ uus_addi 13, \tmp2, \tmp2, 0x444
+ uus_addi 14, \tmp3, \tmp3, 0x444
+ uus_addi 15, \tmp4, \tmp4, 0x444
+
+ uus_mtsr 12, \tmp1
+ uus_mtsr 13, \tmp2
+ uus_mtsr 14, \tmp3
+ uus_mtsr 15, \tmp4
+
+ uus_isync
+.endm
+
+#else
/*
* This macro defines the mapping from contexts to VSIDs (virtual
@@ -100,9 +175,14 @@ struct hash_pte {
typedef struct {
unsigned long id;
+ unsigned long sr0;
void __user *vdso;
} mm_context_t;
+#ifdef CONFIG_PPC_KUEP
+#define INIT_MM_CONTEXT(mm) .context.sr0 = SR_NX
+#endif
+
void update_bats(void);
static inline void cleanup_cpu_mmu_context(void) { }
@@ -143,6 +223,8 @@ static __always_inline void update_user_segments(u32 val)
update_user_segment(15, val);
}
+int __init find_free_bat(void);
+unsigned int bat_block_size(unsigned long base, unsigned long top);
#endif /* !__ASSEMBLY__ */
/* We happily ignore the smaller BATs on 601, we don't actually use
diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h
index 609c80f67194..f8b94f78403f 100644
--- a/arch/powerpc/include/asm/book3s/32/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/32/pgtable.h
@@ -178,6 +178,7 @@ static inline bool pte_user(pte_t pte)
#ifndef __ASSEMBLY__
int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
+void unmap_kernel_page(unsigned long va);
#endif /* !__ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/book3s/64/hash.h b/arch/powerpc/include/asm/book3s/64/hash.h
index 674fe0e890dc..a7a0572f3846 100644
--- a/arch/powerpc/include/asm/book3s/64/hash.h
+++ b/arch/powerpc/include/asm/book3s/64/hash.h
@@ -99,10 +99,6 @@
* Defines the address of the vmemap area, in its own region on
* hash table CPUs.
*/
-#ifdef CONFIG_PPC_MM_SLICES
-#define HAVE_ARCH_UNMAPPED_AREA
-#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
-#endif /* CONFIG_PPC_MM_SLICES */
/* PTEIDX nibble */
#define _PTEIDX_SECONDARY 0x8
diff --git a/arch/powerpc/include/asm/book3s/64/kup.h b/arch/powerpc/include/asm/book3s/64/kup.h
index 170339969b7c..69fcf63eec94 100644
--- a/arch/powerpc/include/asm/book3s/64/kup.h
+++ b/arch/powerpc/include/asm/book3s/64/kup.h
@@ -229,6 +229,11 @@ static inline u64 current_thread_iamr(void)
#ifdef CONFIG_PPC_KUAP
+static __always_inline bool kuap_is_disabled(void)
+{
+ return !mmu_has_feature(MMU_FTR_BOOK3S_KUAP);
+}
+
static inline void kuap_user_restore(struct pt_regs *regs)
{
bool restore_amr = false, restore_iamr = false;
@@ -268,40 +273,38 @@ static inline void kuap_user_restore(struct pt_regs *regs)
*/
}
-static inline void kuap_kernel_restore(struct pt_regs *regs,
- unsigned long amr)
+static inline void __kuap_kernel_restore(struct pt_regs *regs, unsigned long amr)
{
- if (mmu_has_feature(MMU_FTR_BOOK3S_KUAP)) {
- if (unlikely(regs->amr != amr)) {
- isync();
- mtspr(SPRN_AMR, regs->amr);
- /*
- * No isync required here because we are about to rfi
- * back to previous context before any user accesses
- * would be made, which is a CSI.
- */
- }
- }
+ if (likely(regs->amr == amr))
+ return;
+
+ isync();
+ mtspr(SPRN_AMR, regs->amr);
/*
+ * No isync required here because we are about to rfi
+ * back to previous context before any user accesses
+ * would be made, which is a CSI.
+ *
* No need to restore IAMR when returning to kernel space.
*/
}
-static inline unsigned long kuap_get_and_assert_locked(void)
+static inline unsigned long __kuap_get_and_assert_locked(void)
{
- if (mmu_has_feature(MMU_FTR_BOOK3S_KUAP)) {
- unsigned long amr = mfspr(SPRN_AMR);
- if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG)) /* kuap_check_amr() */
- WARN_ON_ONCE(amr != AMR_KUAP_BLOCKED);
- return amr;
- }
- return 0;
+ unsigned long amr = mfspr(SPRN_AMR);
+
+ if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG)) /* kuap_check_amr() */
+ WARN_ON_ONCE(amr != AMR_KUAP_BLOCKED);
+ return amr;
}
-static inline void kuap_assert_locked(void)
+/* Do nothing, book3s/64 does that in ASM */
+static inline void __kuap_lock(void)
+{
+}
+
+static inline void __kuap_save_and_lock(struct pt_regs *regs)
{
- if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG) && mmu_has_feature(MMU_FTR_BOOK3S_KUAP))
- WARN_ON_ONCE(mfspr(SPRN_AMR) != AMR_KUAP_BLOCKED);
}
/*
@@ -339,11 +342,8 @@ static inline void set_kuap(unsigned long value)
isync();
}
-static inline bool bad_kuap_fault(struct pt_regs *regs, unsigned long address,
- bool is_write)
+static inline bool __bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
{
- if (!mmu_has_feature(MMU_FTR_BOOK3S_KUAP))
- return false;
/*
* For radix this will be a storage protection fault (DSISR_PROTFAULT).
* For hash this will be a key fault (DSISR_KEYFAULT)
diff --git a/arch/powerpc/include/asm/book3s/64/mmu-hash.h b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
index 3004f3323144..21f780942911 100644
--- a/arch/powerpc/include/asm/book3s/64/mmu-hash.h
+++ b/arch/powerpc/include/asm/book3s/64/mmu-hash.h
@@ -523,8 +523,14 @@ void slb_save_contents(struct slb_entry *slb_ptr);
void slb_dump_contents(struct slb_entry *slb_ptr);
extern void slb_vmalloc_update(void);
-extern void slb_set_size(u16 size);
void preload_new_slb_context(unsigned long start, unsigned long sp);
+
+#ifdef CONFIG_PPC_64S_HASH_MMU
+void slb_set_size(u16 size);
+#else
+static inline void slb_set_size(u16 size) { }
+#endif
+
#endif /* __ASSEMBLY__ */
/*
diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h
index c02f42d1031e..ba5b1becf518 100644
--- a/arch/powerpc/include/asm/book3s/64/mmu.h
+++ b/arch/powerpc/include/asm/book3s/64/mmu.h
@@ -4,6 +4,12 @@
#include <asm/page.h>
+#ifdef CONFIG_HUGETLB_PAGE
+#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
+#endif
+#define HAVE_ARCH_UNMAPPED_AREA
+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
+
#ifndef __ASSEMBLY__
/*
* Page size definition
@@ -62,6 +68,9 @@ extern struct patb_entry *partition_tb;
#define PRTS_MASK 0x1f /* process table size field */
#define PRTB_MASK 0x0ffffffffffff000UL
+/* Number of supported LPID bits */
+extern unsigned int mmu_lpid_bits;
+
/* Number of supported PID bits */
extern unsigned int mmu_pid_bits;
@@ -76,10 +85,8 @@ extern unsigned long __ro_after_init radix_mem_block_size;
#define PRTB_SIZE_SHIFT (mmu_pid_bits + 4)
#define PRTB_ENTRIES (1ul << mmu_pid_bits)
-/*
- * Power9 currently only support 64K partition table size.
- */
-#define PATB_SIZE_SHIFT 16
+#define PATB_SIZE_SHIFT (mmu_lpid_bits + 4)
+#define PATB_ENTRIES (1ul << mmu_lpid_bits)
typedef unsigned long mm_context_id_t;
struct spinlock;
@@ -98,7 +105,9 @@ typedef struct {
* from EA and new context ids to build the new VAs.
*/
mm_context_id_t id;
+#ifdef CONFIG_PPC_64S_HASH_MMU
mm_context_id_t extended_id[TASK_SIZE_USER64/TASK_CONTEXT_SIZE];
+#endif
};
/* Number of bits in the mm_cpumask */
@@ -110,7 +119,9 @@ typedef struct {
/* Number of user space windows opened in process mm_context */
atomic_t vas_windows;
+#ifdef CONFIG_PPC_64S_HASH_MMU
struct hash_mm_context *hash_context;
+#endif
void __user *vdso;
/*
@@ -133,6 +144,7 @@ typedef struct {
#endif
} mm_context_t;
+#ifdef CONFIG_PPC_64S_HASH_MMU
static inline u16 mm_ctx_user_psize(mm_context_t *ctx)
{
return ctx->hash_context->user_psize;
@@ -193,8 +205,15 @@ static inline struct subpage_prot_table *mm_ctx_subpage_prot(mm_context_t *ctx)
extern int mmu_linear_psize;
extern int mmu_virtual_psize;
extern int mmu_vmalloc_psize;
-extern int mmu_vmemmap_psize;
extern int mmu_io_psize;
+#else /* CONFIG_PPC_64S_HASH_MMU */
+#ifdef CONFIG_PPC_64K_PAGES
+#define mmu_virtual_psize MMU_PAGE_64K
+#else
+#define mmu_virtual_psize MMU_PAGE_4K
+#endif
+#endif
+extern int mmu_vmemmap_psize;
/* MMU initialization */
void mmu_early_init_devtree(void);
@@ -233,12 +252,13 @@ static inline void setup_initial_memory_limit(phys_addr_t first_memblock_base,
* know which translations we will pick. Hence go with hash
* restrictions.
*/
- return hash__setup_initial_memory_limit(first_memblock_base,
- first_memblock_size);
+ if (!early_radix_enabled())
+ hash__setup_initial_memory_limit(first_memblock_base,
+ first_memblock_size);
}
#ifdef CONFIG_PPC_PSERIES
-extern void radix_init_pseries(void);
+void __init radix_init_pseries(void);
#else
static inline void radix_init_pseries(void) { }
#endif
@@ -255,6 +275,7 @@ static inline void radix_init_pseries(void) { }
void cleanup_cpu_mmu_context(void);
#endif
+#ifdef CONFIG_PPC_64S_HASH_MMU
static inline int get_user_context(mm_context_t *ctx, unsigned long ea)
{
int index = ea >> MAX_EA_BITS_PER_CONTEXT;
@@ -274,6 +295,7 @@ static inline unsigned long get_user_vsid(mm_context_t *ctx,
return get_vsid(context, ea, ssize);
}
+#endif
#endif /* __ASSEMBLY__ */
#endif /* _ASM_POWERPC_BOOK3S_64_MMU_H_ */
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 33e073d6b0c4..875730d5af40 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -1082,6 +1082,8 @@ static inline int map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t p
return hash__map_kernel_page(ea, pa, prot);
}
+void unmap_kernel_page(unsigned long va);
+
static inline int __meminit vmemmap_create_mapping(unsigned long start,
unsigned long page_size,
unsigned long phys)
diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h b/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h
index 3b95769739c7..8b762f282190 100644
--- a/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h
+++ b/arch/powerpc/include/asm/book3s/64/tlbflush-hash.h
@@ -112,8 +112,14 @@ static inline void hash__flush_tlb_kernel_range(unsigned long start,
struct mmu_gather;
extern void hash__tlb_flush(struct mmu_gather *tlb);
+void flush_tlb_pmd_range(struct mm_struct *mm, pmd_t *pmd, unsigned long addr);
+
+#ifdef CONFIG_PPC_64S_HASH_MMU
/* Private function for use by PCI IO mapping code */
extern void __flush_hash_table_range(unsigned long start, unsigned long end);
extern void flush_tlb_pmd_range(struct mm_struct *mm, pmd_t *pmd,
unsigned long addr);
+#else
+static inline void __flush_hash_table_range(unsigned long start, unsigned long end) { }
+#endif
#endif /* _ASM_POWERPC_BOOK3S_64_TLBFLUSH_HASH_H */
diff --git a/arch/powerpc/include/asm/book3s/64/tlbflush.h b/arch/powerpc/include/asm/book3s/64/tlbflush.h
index 215973b4cb26..d2e80f178b6d 100644
--- a/arch/powerpc/include/asm/book3s/64/tlbflush.h
+++ b/arch/powerpc/include/asm/book3s/64/tlbflush.h
@@ -14,7 +14,6 @@ enum {
TLB_INVAL_SCOPE_LPID = 1, /* invalidate TLBs for current LPID */
};
-#ifdef CONFIG_PPC_NATIVE
static inline void tlbiel_all(void)
{
/*
@@ -30,9 +29,6 @@ static inline void tlbiel_all(void)
else
hash__tlbiel_all(TLB_INVAL_SCOPE_GLOBAL);
}
-#else
-static inline void tlbiel_all(void) { BUG(); }
-#endif
static inline void tlbiel_all_lpid(bool radix)
{
diff --git a/arch/powerpc/include/asm/book3s/pgtable.h b/arch/powerpc/include/asm/book3s/pgtable.h
index ad130e15a126..e8269434ecbe 100644
--- a/arch/powerpc/include/asm/book3s/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/pgtable.h
@@ -25,6 +25,7 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
unsigned long size, pgprot_t vma_prot);
#define __HAVE_PHYS_MEM_ACCESS_PROT
+#if defined(CONFIG_PPC32) || defined(CONFIG_PPC_64S_HASH_MMU)
/*
* This gets called at the end of handling a page fault, when
* the kernel has put a new PTE into the page table for the process.
@@ -35,6 +36,9 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
* waiting for the inevitable extra hash-table miss exception.
*/
void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep);
+#else
+static inline void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) {}
+#endif
#endif /* __ASSEMBLY__ */
#endif
diff --git a/arch/powerpc/include/asm/btext.h b/arch/powerpc/include/asm/btext.h
index 461b0f193864..860f8868f11e 100644
--- a/arch/powerpc/include/asm/btext.h
+++ b/arch/powerpc/include/asm/btext.h
@@ -23,12 +23,12 @@ extern void btext_unmap(void);
extern void btext_drawchar(char c);
extern void btext_drawstring(const char *str);
-extern void btext_drawhex(unsigned long v);
-extern void btext_drawtext(const char *c, unsigned int len);
+void __init btext_drawhex(unsigned long v);
+void __init btext_drawtext(const char *c, unsigned int len);
-extern void btext_clearscreen(void);
-extern void btext_flushscreen(void);
-extern void btext_flushline(void);
+void __init btext_clearscreen(void);
+void __init btext_flushscreen(void);
+void __init btext_flushline(void);
#endif /* __KERNEL__ */
#endif /* __PPC_BTEXT_H */
diff --git a/arch/powerpc/include/asm/code-patching.h b/arch/powerpc/include/asm/code-patching.h
index 4ba834599c4d..e26080539c31 100644
--- a/arch/powerpc/include/asm/code-patching.h
+++ b/arch/powerpc/include/asm/code-patching.h
@@ -24,20 +24,20 @@
bool is_offset_in_branch_range(long offset);
bool is_offset_in_cond_branch_range(long offset);
-int create_branch(struct ppc_inst *instr, const u32 *addr,
+int create_branch(ppc_inst_t *instr, const u32 *addr,
unsigned long target, int flags);
-int create_cond_branch(struct ppc_inst *instr, const u32 *addr,
+int create_cond_branch(ppc_inst_t *instr, const u32 *addr,
unsigned long target, int flags);
int patch_branch(u32 *addr, unsigned long target, int flags);
-int patch_instruction(u32 *addr, struct ppc_inst instr);
-int raw_patch_instruction(u32 *addr, struct ppc_inst instr);
+int patch_instruction(u32 *addr, ppc_inst_t instr);
+int raw_patch_instruction(u32 *addr, ppc_inst_t instr);
static inline unsigned long patch_site_addr(s32 *site)
{
return (unsigned long)site + *site;
}
-static inline int patch_instruction_site(s32 *site, struct ppc_inst instr)
+static inline int patch_instruction_site(s32 *site, ppc_inst_t instr)
{
return patch_instruction((u32 *)patch_site_addr(site), instr);
}
@@ -58,18 +58,26 @@ static inline int modify_instruction_site(s32 *site, unsigned int clr, unsigned
return modify_instruction((unsigned int *)patch_site_addr(site), clr, set);
}
-int instr_is_relative_branch(struct ppc_inst instr);
-int instr_is_relative_link_branch(struct ppc_inst instr);
+static inline unsigned int branch_opcode(ppc_inst_t instr)
+{
+ return ppc_inst_primary_opcode(instr) & 0x3F;
+}
+
+static inline int instr_is_branch_iform(ppc_inst_t instr)
+{
+ return branch_opcode(instr) == 18;
+}
+
+static inline int instr_is_branch_bform(ppc_inst_t instr)
+{
+ return branch_opcode(instr) == 16;
+}
+
+int instr_is_relative_branch(ppc_inst_t instr);
+int instr_is_relative_link_branch(ppc_inst_t instr);
unsigned long branch_target(const u32 *instr);
-int translate_branch(struct ppc_inst *instr, const u32 *dest, const u32 *src);
-extern bool is_conditional_branch(struct ppc_inst instr);
-#ifdef CONFIG_PPC_BOOK3E_64
-void __patch_exception(int exc, unsigned long addr);
-#define patch_exception(exc, name) do { \
- extern unsigned int name; \
- __patch_exception((exc), (unsigned long)&name); \
-} while (0)
-#endif
+int translate_branch(ppc_inst_t *instr, const u32 *dest, const u32 *src);
+bool is_conditional_branch(ppc_inst_t instr);
#define OP_RT_RA_MASK 0xffff0000UL
#define LIS_R2 (PPC_RAW_LIS(_R2, 0))
diff --git a/arch/powerpc/include/asm/cpm2.h b/arch/powerpc/include/asm/cpm2.h
index bda45788cfcc..9ee192a6c5d7 100644
--- a/arch/powerpc/include/asm/cpm2.h
+++ b/arch/powerpc/include/asm/cpm2.h
@@ -1133,8 +1133,8 @@ enum cpm_clk {
CPM_CLK_DUMMY
};
-extern int cpm2_clk_setup(enum cpm_clk_target target, int clock, int mode);
-extern int cpm2_smc_clk_setup(enum cpm_clk_target target, int clock);
+int __init cpm2_clk_setup(enum cpm_clk_target target, int clock, int mode);
+int __init cpm2_smc_clk_setup(enum cpm_clk_target target, int clock);
#define CPM_PIN_INPUT 0
#define CPM_PIN_OUTPUT 1
@@ -1143,7 +1143,7 @@ extern int cpm2_smc_clk_setup(enum cpm_clk_target target, int clock);
#define CPM_PIN_GPIO 4
#define CPM_PIN_OPENDRAIN 8
-void cpm2_set_pin(int port, int pin, int flags);
+void __init cpm2_set_pin(int port, int pin, int flags);
#endif /* __CPM2__ */
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/cpuidle.h b/arch/powerpc/include/asm/cpuidle.h
index 9844b3ded187..0cce5dc7fb1c 100644
--- a/arch/powerpc/include/asm/cpuidle.h
+++ b/arch/powerpc/include/asm/cpuidle.h
@@ -85,7 +85,7 @@ extern struct pnv_idle_states_t *pnv_idle_states;
extern int nr_pnv_idle_states;
unsigned long pnv_cpu_offline(unsigned int cpu);
-int validate_psscr_val_mask(u64 *psscr_val, u64 *psscr_mask, u32 flags);
+int __init validate_psscr_val_mask(u64 *psscr_val, u64 *psscr_mask, u32 flags);
static inline void report_invalid_psscr_val(u64 psscr_val, int err)
{
switch (err) {
diff --git a/arch/powerpc/include/asm/cputhreads.h b/arch/powerpc/include/asm/cputhreads.h
index b167186aaee4..f26c430f3982 100644
--- a/arch/powerpc/include/asm/cputhreads.h
+++ b/arch/powerpc/include/asm/cputhreads.h
@@ -32,44 +32,11 @@ extern cpumask_t threads_core_mask;
#define threads_core_mask (*get_cpu_mask(0))
#endif
-/* cpu_thread_mask_to_cores - Return a cpumask of one per cores
- * hit by the argument
- *
- * @threads: a cpumask of online threads
- *
- * This function returns a cpumask which will have one online cpu's
- * bit set for each core that has at least one thread set in the argument.
- *
- * This can typically be used for things like IPI for tlb invalidations
- * since those need to be done only once per core/TLB
- */
-static inline cpumask_t cpu_thread_mask_to_cores(const struct cpumask *threads)
-{
- cpumask_t tmp, res;
- int i, cpu;
-
- cpumask_clear(&res);
- for (i = 0; i < NR_CPUS; i += threads_per_core) {
- cpumask_shift_left(&tmp, &threads_core_mask, i);
- if (cpumask_intersects(threads, &tmp)) {
- cpu = cpumask_next_and(-1, &tmp, cpu_online_mask);
- if (cpu < nr_cpu_ids)
- cpumask_set_cpu(cpu, &res);
- }
- }
- return res;
-}
-
static inline int cpu_nr_cores(void)
{
return nr_cpu_ids >> threads_shift;
}
-static inline cpumask_t cpu_online_cores_map(void)
-{
- return cpu_thread_mask_to_cores(cpu_online_mask);
-}
-
#ifdef CONFIG_SMP
int cpu_core_index_of_thread(int cpu);
int cpu_first_thread_of_core(int core);
diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h
index b1a5bba2e0b9..bd513fd49be9 100644
--- a/arch/powerpc/include/asm/eeh.h
+++ b/arch/powerpc/include/asm/eeh.h
@@ -460,7 +460,7 @@ static inline void eeh_readsl(const volatile void __iomem *addr, void * buf,
}
-void eeh_cache_debugfs_init(void);
+void __init eeh_cache_debugfs_init(void);
#endif /* CONFIG_PPC64 */
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/exception-64e.h b/arch/powerpc/include/asm/exception-64e.h
index 40cdcb2fb057..b1ef1e92c34a 100644
--- a/arch/powerpc/include/asm/exception-64e.h
+++ b/arch/powerpc/include/asm/exception-64e.h
@@ -149,6 +149,10 @@ exc_##label##_book3e:
addi r11,r13,PACA_EXTLB; \
TLB_MISS_RESTORE(r11)
+#ifndef __ASSEMBLY__
+extern unsigned int interrupt_base_book3e;
+#endif
+
#define SET_IVOR(vector_number, vector_offset) \
LOAD_REG_ADDR(r3,interrupt_base_book3e);\
ori r3,r3,vector_offset@l; \
diff --git a/arch/powerpc/include/asm/fadump-internal.h b/arch/powerpc/include/asm/fadump-internal.h
index 8d61c8f3fec4..52189928ec08 100644
--- a/arch/powerpc/include/asm/fadump-internal.h
+++ b/arch/powerpc/include/asm/fadump-internal.h
@@ -137,10 +137,10 @@ struct fadump_ops {
};
/* Helper functions */
-s32 fadump_setup_cpu_notes_buf(u32 num_cpus);
+s32 __init fadump_setup_cpu_notes_buf(u32 num_cpus);
void fadump_free_cpu_notes_buf(void);
-u32 *fadump_regs_to_elf_notes(u32 *buf, struct pt_regs *regs);
-void fadump_update_elfcore_header(char *bufp);
+u32 *__init fadump_regs_to_elf_notes(u32 *buf, struct pt_regs *regs);
+void __init fadump_update_elfcore_header(char *bufp);
bool is_fadump_boot_mem_contiguous(void);
bool is_fadump_reserved_mem_contiguous(void);
diff --git a/arch/powerpc/include/asm/firmware.h b/arch/powerpc/include/asm/firmware.h
index 97a3bd9ffeb9..9b702d2b80fb 100644
--- a/arch/powerpc/include/asm/firmware.h
+++ b/arch/powerpc/include/asm/firmware.h
@@ -80,8 +80,6 @@ enum {
FW_FEATURE_POWERNV_ALWAYS = 0,
FW_FEATURE_PS3_POSSIBLE = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1,
FW_FEATURE_PS3_ALWAYS = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1,
- FW_FEATURE_NATIVE_POSSIBLE = 0,
- FW_FEATURE_NATIVE_ALWAYS = 0,
FW_FEATURE_POSSIBLE =
#ifdef CONFIG_PPC_PSERIES
FW_FEATURE_PSERIES_POSSIBLE |
@@ -92,9 +90,6 @@ enum {
#ifdef CONFIG_PPC_PS3
FW_FEATURE_PS3_POSSIBLE |
#endif
-#ifdef CONFIG_PPC_NATIVE
- FW_FEATURE_NATIVE_ALWAYS |
-#endif
0,
FW_FEATURE_ALWAYS =
#ifdef CONFIG_PPC_PSERIES
@@ -106,9 +101,6 @@ enum {
#ifdef CONFIG_PPC_PS3
FW_FEATURE_PS3_ALWAYS &
#endif
-#ifdef CONFIG_PPC_NATIVE
- FW_FEATURE_NATIVE_ALWAYS &
-#endif
FW_FEATURE_POSSIBLE,
#else /* CONFIG_PPC64 */
diff --git a/arch/powerpc/include/asm/fixmap.h b/arch/powerpc/include/asm/fixmap.h
index 947b5b9c4424..a832aeafe560 100644
--- a/arch/powerpc/include/asm/fixmap.h
+++ b/arch/powerpc/include/asm/fixmap.h
@@ -111,8 +111,10 @@ static inline void __set_fixmap(enum fixed_addresses idx,
BUILD_BUG_ON(idx >= __end_of_fixed_addresses);
else if (WARN_ON(idx >= __end_of_fixed_addresses))
return;
-
- map_kernel_page(__fix_to_virt(idx), phys, flags);
+ if (pgprot_val(flags))
+ map_kernel_page(__fix_to_virt(idx), phys, flags);
+ else
+ unmap_kernel_page(__fix_to_virt(idx));
}
#define __early_set_fixmap __set_fixmap
diff --git a/arch/powerpc/include/asm/floppy.h b/arch/powerpc/include/asm/floppy.h
index 7af9a68fd949..f8ce178b43b7 100644
--- a/arch/powerpc/include/asm/floppy.h
+++ b/arch/powerpc/include/asm/floppy.h
@@ -134,17 +134,19 @@ static int hard_dma_setup(char *addr, unsigned long size, int mode, int io)
int dir;
doing_vdma = 0;
- dir = (mode == DMA_MODE_READ) ? PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE;
+ dir = (mode == DMA_MODE_READ) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
if (bus_addr
&& (addr != prev_addr || size != prev_size || dir != prev_dir)) {
/* different from last time -- unmap prev */
- pci_unmap_single(isa_bridge_pcidev, bus_addr, prev_size, prev_dir);
+ dma_unmap_single(&isa_bridge_pcidev->dev, bus_addr, prev_size,
+ prev_dir);
bus_addr = 0;
}
if (!bus_addr) /* need to map it */
- bus_addr = pci_map_single(isa_bridge_pcidev, addr, size, dir);
+ bus_addr = dma_map_single(&isa_bridge_pcidev->dev, addr, size,
+ dir);
/* remember this one as prev */
prev_addr = addr;
diff --git a/arch/powerpc/include/asm/head-64.h b/arch/powerpc/include/asm/head-64.h
index 242204e12993..d73153b0275d 100644
--- a/arch/powerpc/include/asm/head-64.h
+++ b/arch/powerpc/include/asm/head-64.h
@@ -98,13 +98,9 @@ start_text:
. = sname##_len;
#define USE_FIXED_SECTION(sname) \
- fs_label = start_##sname; \
- fs_start = sname##_start; \
use_ftsec sname;
#define USE_TEXT_SECTION() \
- fs_label = start_text; \
- fs_start = text_start; \
.text
#define CLOSE_FIXED_SECTION(sname) \
@@ -161,13 +157,15 @@ name:
* - ABS_ADDR is used to find the absolute address of any symbol, from within
* a fixed section.
*/
-#define DEFINE_FIXED_SYMBOL(label) \
- label##_absolute = (label - fs_label + fs_start)
+// define label as being _in_ sname
+#define DEFINE_FIXED_SYMBOL(label, sname) \
+ label##_absolute = (label - start_ ## sname + sname ## _start)
#define FIXED_SYMBOL_ABS_ADDR(label) \
(label##_absolute)
-#define ABS_ADDR(label) (label - fs_label + fs_start)
+// find label from _within_ sname
+#define ABS_ADDR(label, sname) (label - start_ ## sname + sname ## _start)
#endif /* __ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h
index f18c543bc01d..962708fa1017 100644
--- a/arch/powerpc/include/asm/hugetlb.h
+++ b/arch/powerpc/include/asm/hugetlb.h
@@ -15,7 +15,7 @@
extern bool hugetlb_disabled;
-void hugetlbpage_init_default(void);
+void __init hugetlbpage_init_default(void);
int slice_is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
unsigned long len);
diff --git a/arch/powerpc/include/asm/hw_breakpoint.h b/arch/powerpc/include/asm/hw_breakpoint.h
index abebfbee5b1c..84d39fd42f71 100644
--- a/arch/powerpc/include/asm/hw_breakpoint.h
+++ b/arch/powerpc/include/asm/hw_breakpoint.h
@@ -10,7 +10,6 @@
#define _PPC_BOOK3S_64_HW_BREAKPOINT_H
#include <asm/cpu_has_feature.h>
-#include <asm/inst.h>
#ifdef __KERNEL__
struct arch_hw_breakpoint {
@@ -56,11 +55,11 @@ static inline int nr_wp_slots(void)
return cpu_has_feature(CPU_FTR_DAWR1) ? 2 : 1;
}
-bool wp_check_constraints(struct pt_regs *regs, struct ppc_inst instr,
+bool wp_check_constraints(struct pt_regs *regs, ppc_inst_t instr,
unsigned long ea, int type, int size,
struct arch_hw_breakpoint *info);
-void wp_get_instr_detail(struct pt_regs *regs, struct ppc_inst *instr,
+void wp_get_instr_detail(struct pt_regs *regs, ppc_inst_t *instr,
int *type, int *size, unsigned long *ea);
#ifdef CONFIG_HAVE_HW_BREAKPOINT
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
index 21cc571ea9c2..674e5aaafcbd 100644
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -61,7 +61,7 @@
static inline void __hard_irq_enable(void)
{
- if (IS_ENABLED(CONFIG_BOOKE) || IS_ENABLED(CONFIG_40x))
+ if (IS_ENABLED(CONFIG_BOOKE_OR_40x))
wrtee(MSR_EE);
else if (IS_ENABLED(CONFIG_PPC_8xx))
wrtspr(SPRN_EIE);
@@ -73,7 +73,7 @@ static inline void __hard_irq_enable(void)
static inline void __hard_irq_disable(void)
{
- if (IS_ENABLED(CONFIG_BOOKE) || IS_ENABLED(CONFIG_40x))
+ if (IS_ENABLED(CONFIG_BOOKE_OR_40x))
wrtee(0);
else if (IS_ENABLED(CONFIG_PPC_8xx))
wrtspr(SPRN_EID);
@@ -85,7 +85,7 @@ static inline void __hard_irq_disable(void)
static inline void __hard_EE_RI_disable(void)
{
- if (IS_ENABLED(CONFIG_BOOKE) || IS_ENABLED(CONFIG_40x))
+ if (IS_ENABLED(CONFIG_BOOKE_OR_40x))
wrtee(0);
else if (IS_ENABLED(CONFIG_PPC_8xx))
wrtspr(SPRN_NRI);
@@ -97,7 +97,7 @@ static inline void __hard_EE_RI_disable(void)
static inline void __hard_RI_enable(void)
{
- if (IS_ENABLED(CONFIG_BOOKE) || IS_ENABLED(CONFIG_40x))
+ if (IS_ENABLED(CONFIG_BOOKE_OR_40x))
return;
if (IS_ENABLED(CONFIG_PPC_8xx))
@@ -224,6 +224,42 @@ static inline bool arch_irqs_disabled(void)
return arch_irqs_disabled_flags(arch_local_save_flags());
}
+static inline void set_pmi_irq_pending(void)
+{
+ /*
+ * Invoked from PMU callback functions to set PMI bit in the paca.
+ * This has to be called with irq's disabled (via hard_irq_disable()).
+ */
+ if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
+ WARN_ON_ONCE(mfmsr() & MSR_EE);
+
+ get_paca()->irq_happened |= PACA_IRQ_PMI;
+}
+
+static inline void clear_pmi_irq_pending(void)
+{
+ /*
+ * Invoked from PMU callback functions to clear the pending PMI bit
+ * in the paca.
+ */
+ if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
+ WARN_ON_ONCE(mfmsr() & MSR_EE);
+
+ get_paca()->irq_happened &= ~PACA_IRQ_PMI;
+}
+
+static inline bool pmi_irq_pending(void)
+{
+ /*
+ * Invoked from PMU callback functions to check if there is a pending
+ * PMI bit in the paca.
+ */
+ if (get_paca()->irq_happened & PACA_IRQ_PMI)
+ return true;
+
+ return false;
+}
+
#ifdef CONFIG_PPC_BOOK3S
/*
* To support disabling and enabling of irq with PMI, set of
@@ -306,18 +342,57 @@ static inline bool lazy_irq_pending_nocheck(void)
return __lazy_irq_pending(local_paca->irq_happened);
}
+bool power_pmu_wants_prompt_pmi(void);
+
+/*
+ * This is called by asynchronous interrupts to check whether to
+ * conditionally re-enable hard interrupts after having cleared
+ * the source of the interrupt. They are kept disabled if there
+ * is a different soft-masked interrupt pending that requires hard
+ * masking.
+ */
+static inline bool should_hard_irq_enable(void)
+{
+#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
+ WARN_ON(irq_soft_mask_return() == IRQS_ENABLED);
+ WARN_ON(mfmsr() & MSR_EE);
+#endif
+#ifdef CONFIG_PERF_EVENTS
+ /*
+ * If the PMU is not running, there is not much reason to enable
+ * MSR[EE] in irq handlers because any interrupts would just be
+ * soft-masked.
+ *
+ * TODO: Add test for 64e
+ */
+ if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && !power_pmu_wants_prompt_pmi())
+ return false;
+
+ if (get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK)
+ return false;
+
+ return true;
+#else
+ return false;
+#endif
+}
+
/*
- * This is called by asynchronous interrupts to conditionally
- * re-enable hard interrupts after having cleared the source
- * of the interrupt. They are kept disabled if there is a different
- * soft-masked interrupt pending that requires hard masking.
+ * Do the hard enabling, only call this if should_hard_irq_enable is true.
*/
-static inline void may_hard_irq_enable(void)
+static inline void do_hard_irq_enable(void)
{
- if (!(get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK)) {
- get_paca()->irq_happened &= ~PACA_IRQ_HARD_DIS;
- __hard_irq_enable();
- }
+#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
+ WARN_ON(irq_soft_mask_return() == IRQS_ENABLED);
+ WARN_ON(get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK);
+ WARN_ON(mfmsr() & MSR_EE);
+#endif
+ /*
+ * This allows PMI interrupts (and watchdog soft-NMIs) through.
+ * There is no other reason to enable this way.
+ */
+ get_paca()->irq_happened &= ~PACA_IRQ_HARD_DIS;
+ __hard_irq_enable();
}
static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
@@ -398,7 +473,7 @@ static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
return !(regs->msr & MSR_EE);
}
-static inline bool may_hard_irq_enable(void)
+static __always_inline bool should_hard_irq_enable(void)
{
return false;
}
@@ -408,6 +483,10 @@ static inline void do_hard_irq_enable(void)
BUILD_BUG();
}
+static inline void clear_pmi_irq_pending(void) { }
+static inline void set_pmi_irq_pending(void) { }
+static inline bool pmi_irq_pending(void) { return false; }
+
static inline void irq_soft_mask_regs_set_state(struct pt_regs *regs, unsigned long val)
{
}
diff --git a/arch/powerpc/include/asm/i8259.h b/arch/powerpc/include/asm/i8259.h
index d7f08ae49e12..75481d363cd8 100644
--- a/arch/powerpc/include/asm/i8259.h
+++ b/arch/powerpc/include/asm/i8259.h
@@ -7,7 +7,7 @@
extern void i8259_init(struct device_node *node, unsigned long intack_addr);
extern unsigned int i8259_irq(void);
-extern struct irq_domain *i8259_get_host(void);
+struct irq_domain *__init i8259_get_host(void);
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_I8259_H */
diff --git a/arch/powerpc/include/asm/inst.h b/arch/powerpc/include/asm/inst.h
index b11c0e2f9639..80b6d74146c6 100644
--- a/arch/powerpc/include/asm/inst.h
+++ b/arch/powerpc/include/asm/inst.h
@@ -3,20 +3,21 @@
#define _ASM_POWERPC_INST_H
#include <asm/ppc-opcode.h>
-
-#ifdef CONFIG_PPC64
+#include <asm/reg.h>
+#include <asm/disassemble.h>
+#include <asm/uaccess.h>
#define ___get_user_instr(gu_op, dest, ptr) \
({ \
long __gui_ret; \
u32 __user *__gui_ptr = (u32 __user *)ptr; \
- struct ppc_inst __gui_inst; \
+ ppc_inst_t __gui_inst; \
unsigned int __prefix, __suffix; \
\
__chk_user_ptr(ptr); \
__gui_ret = gu_op(__prefix, __gui_ptr); \
if (__gui_ret == 0) { \
- if ((__prefix >> 26) == OP_PREFIX) { \
+ if (IS_ENABLED(CONFIG_PPC64) && (__prefix >> 26) == OP_PREFIX) { \
__gui_ret = gu_op(__suffix, __gui_ptr + 1); \
__gui_inst = ppc_inst_prefix(__prefix, __suffix); \
} else { \
@@ -27,13 +28,6 @@
} \
__gui_ret; \
})
-#else /* !CONFIG_PPC64 */
-#define ___get_user_instr(gu_op, dest, ptr) \
-({ \
- __chk_user_ptr(ptr); \
- gu_op((dest).val, (u32 __user *)(ptr)); \
-})
-#endif /* CONFIG_PPC64 */
#define get_user_instr(x, ptr) ___get_user_instr(get_user, x, ptr)
@@ -43,44 +37,46 @@
* Instruction data type for POWER
*/
-struct ppc_inst {
- u32 val;
-#ifdef CONFIG_PPC64
- u32 suffix;
-#endif
-} __packed;
-
-static inline u32 ppc_inst_val(struct ppc_inst x)
+#if defined(CONFIG_PPC64) || defined(__CHECKER__)
+static inline u32 ppc_inst_val(ppc_inst_t x)
{
return x.val;
}
-static inline int ppc_inst_primary_opcode(struct ppc_inst x)
+#define ppc_inst(x) ((ppc_inst_t){ .val = (x) })
+
+#else
+static inline u32 ppc_inst_val(ppc_inst_t x)
{
- return ppc_inst_val(x) >> 26;
+ return x;
}
+#define ppc_inst(x) (x)
+#endif
-#define ppc_inst(x) ((struct ppc_inst){ .val = (x) })
+static inline int ppc_inst_primary_opcode(ppc_inst_t x)
+{
+ return ppc_inst_val(x) >> 26;
+}
#ifdef CONFIG_PPC64
-#define ppc_inst_prefix(x, y) ((struct ppc_inst){ .val = (x), .suffix = (y) })
+#define ppc_inst_prefix(x, y) ((ppc_inst_t){ .val = (x), .suffix = (y) })
-static inline u32 ppc_inst_suffix(struct ppc_inst x)
+static inline u32 ppc_inst_suffix(ppc_inst_t x)
{
return x.suffix;
}
#else
-#define ppc_inst_prefix(x, y) ppc_inst(x)
+#define ppc_inst_prefix(x, y) ((void)y, ppc_inst(x))
-static inline u32 ppc_inst_suffix(struct ppc_inst x)
+static inline u32 ppc_inst_suffix(ppc_inst_t x)
{
return 0;
}
#endif /* CONFIG_PPC64 */
-static inline struct ppc_inst ppc_inst_read(const u32 *ptr)
+static inline ppc_inst_t ppc_inst_read(const u32 *ptr)
{
if (IS_ENABLED(CONFIG_PPC64) && (*ptr >> 26) == OP_PREFIX)
return ppc_inst_prefix(*ptr, *(ptr + 1));
@@ -88,17 +84,17 @@ static inline struct ppc_inst ppc_inst_read(const u32 *ptr)
return ppc_inst(*ptr);
}
-static inline bool ppc_inst_prefixed(struct ppc_inst x)
+static inline bool ppc_inst_prefixed(ppc_inst_t x)
{
return IS_ENABLED(CONFIG_PPC64) && ppc_inst_primary_opcode(x) == OP_PREFIX;
}
-static inline struct ppc_inst ppc_inst_swab(struct ppc_inst x)
+static inline ppc_inst_t ppc_inst_swab(ppc_inst_t x)
{
return ppc_inst_prefix(swab32(ppc_inst_val(x)), swab32(ppc_inst_suffix(x)));
}
-static inline bool ppc_inst_equal(struct ppc_inst x, struct ppc_inst y)
+static inline bool ppc_inst_equal(ppc_inst_t x, ppc_inst_t y)
{
if (ppc_inst_val(x) != ppc_inst_val(y))
return false;
@@ -107,7 +103,7 @@ static inline bool ppc_inst_equal(struct ppc_inst x, struct ppc_inst y)
return ppc_inst_suffix(x) == ppc_inst_suffix(y);
}
-static inline int ppc_inst_len(struct ppc_inst x)
+static inline int ppc_inst_len(ppc_inst_t x)
{
return ppc_inst_prefixed(x) ? 8 : 4;
}
@@ -118,14 +114,14 @@ static inline int ppc_inst_len(struct ppc_inst x)
*/
static inline u32 *ppc_inst_next(u32 *location, u32 *value)
{
- struct ppc_inst tmp;
+ ppc_inst_t tmp;
tmp = ppc_inst_read(value);
return (void *)location + ppc_inst_len(tmp);
}
-static inline unsigned long ppc_inst_as_ulong(struct ppc_inst x)
+static inline unsigned long ppc_inst_as_ulong(ppc_inst_t x)
{
if (IS_ENABLED(CONFIG_PPC32))
return ppc_inst_val(x);
@@ -135,9 +131,17 @@ static inline unsigned long ppc_inst_as_ulong(struct ppc_inst x)
return (u64)ppc_inst_val(x) << 32 | ppc_inst_suffix(x);
}
+static inline void ppc_inst_write(u32 *ptr, ppc_inst_t x)
+{
+ if (!ppc_inst_prefixed(x))
+ *ptr = ppc_inst_val(x);
+ else
+ *(u64 *)ptr = ppc_inst_as_ulong(x);
+}
+
#define PPC_INST_STR_LEN sizeof("00000000 00000000")
-static inline char *__ppc_inst_as_str(char str[PPC_INST_STR_LEN], struct ppc_inst x)
+static inline char *__ppc_inst_as_str(char str[PPC_INST_STR_LEN], ppc_inst_t x)
{
if (ppc_inst_prefixed(x))
sprintf(str, "%08x %08x", ppc_inst_val(x), ppc_inst_suffix(x));
@@ -154,6 +158,27 @@ static inline char *__ppc_inst_as_str(char str[PPC_INST_STR_LEN], struct ppc_ins
__str; \
})
-int copy_inst_from_kernel_nofault(struct ppc_inst *inst, u32 *src);
+static inline int copy_inst_from_kernel_nofault(ppc_inst_t *inst, u32 *src)
+{
+ unsigned int val, suffix;
+
+ if (unlikely(!is_kernel_addr((unsigned long)src)))
+ return -ERANGE;
+
+/* See https://github.com/ClangBuiltLinux/linux/issues/1521 */
+#if defined(CONFIG_CC_IS_CLANG) && CONFIG_CLANG_VERSION < 140000
+ val = suffix = 0;
+#endif
+ __get_kernel_nofault(&val, src, u32, Efault);
+ if (IS_ENABLED(CONFIG_PPC64) && get_op(val) == OP_PREFIX) {
+ __get_kernel_nofault(&suffix, src + 1, u32, Efault);
+ *inst = ppc_inst_prefix(val, suffix);
+ } else {
+ *inst = ppc_inst(val);
+ }
+ return 0;
+Efault:
+ return -EFAULT;
+}
#endif /* _ASM_POWERPC_INST_H */
diff --git a/arch/powerpc/include/asm/interrupt.h b/arch/powerpc/include/asm/interrupt.h
index a1d238255f07..fc28f46d2f9d 100644
--- a/arch/powerpc/include/asm/interrupt.h
+++ b/arch/powerpc/include/asm/interrupt.h
@@ -97,6 +97,11 @@ static inline void srr_regs_clobbered(void)
local_paca->hsrr_valid = 0;
}
#else
+static inline unsigned long search_kernel_restart_table(unsigned long addr)
+{
+ return 0;
+}
+
static inline bool is_implicit_soft_masked(struct pt_regs *regs)
{
return false;
@@ -139,39 +144,68 @@ static inline void interrupt_enter_prepare(struct pt_regs *regs, struct interrup
if (!arch_irq_disabled_regs(regs))
trace_hardirqs_off();
- if (user_mode(regs)) {
- kuep_lock();
- account_cpu_user_entry();
- } else {
+ if (user_mode(regs))
+ kuap_lock();
+ else
kuap_save_and_lock(regs);
- }
+
+ if (user_mode(regs))
+ account_cpu_user_entry();
#endif
#ifdef CONFIG_PPC64
- if (irq_soft_mask_set_return(IRQS_ALL_DISABLED) == IRQS_ENABLED)
+ bool trace_enable = false;
+
+ if (IS_ENABLED(CONFIG_TRACE_IRQFLAGS)) {
+ if (irq_soft_mask_set_return(IRQS_ALL_DISABLED) == IRQS_ENABLED)
+ trace_enable = true;
+ } else {
+ irq_soft_mask_set(IRQS_ALL_DISABLED);
+ }
+
+ /*
+ * If the interrupt was taken with HARD_DIS clear, then enable MSR[EE].
+ * Asynchronous interrupts get here with HARD_DIS set (see below), so
+ * this enables MSR[EE] for synchronous interrupts. IRQs remain
+ * soft-masked. The interrupt handler may later call
+ * interrupt_cond_local_irq_enable() to achieve a regular process
+ * context.
+ */
+ if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS)) {
+ if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
+ BUG_ON(!(regs->msr & MSR_EE));
+ __hard_irq_enable();
+ } else {
+ __hard_RI_enable();
+ }
+
+ /* Do this when RI=1 because it can cause SLB faults */
+ if (trace_enable)
trace_hardirqs_off();
- local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
if (user_mode(regs)) {
+ kuap_lock();
CT_WARN_ON(ct_state() != CONTEXT_USER);
user_exit_irqoff();
account_cpu_user_entry();
account_stolen_time();
} else {
+ kuap_save_and_lock(regs);
/*
* CT_WARN_ON comes here via program_check_exception,
* so avoid recursion.
*/
if (TRAP(regs) != INTERRUPT_PROGRAM) {
CT_WARN_ON(ct_state() != CONTEXT_KERNEL);
- BUG_ON(is_implicit_soft_masked(regs));
+ if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
+ BUG_ON(is_implicit_soft_masked(regs));
}
-#ifdef CONFIG_PPC_BOOK3S
+
/* Move this under a debugging check */
- if (arch_irq_disabled_regs(regs))
+ if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG) &&
+ arch_irq_disabled_regs(regs))
BUG_ON(search_kernel_restart_table(regs->nip));
-#endif
}
if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG))
BUG_ON(!arch_irq_disabled_regs(regs) && !(regs->msr & MSR_EE));
@@ -200,13 +234,20 @@ static inline void interrupt_exit_prepare(struct pt_regs *regs, struct interrupt
static inline void interrupt_async_enter_prepare(struct pt_regs *regs, struct interrupt_state *state)
{
+#ifdef CONFIG_PPC64
+ /* Ensure interrupt_enter_prepare does not enable MSR[EE] */
+ local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
+#endif
+ interrupt_enter_prepare(regs, state);
#ifdef CONFIG_PPC_BOOK3S_64
+ /*
+ * RI=1 is set by interrupt_enter_prepare, so this thread flags access
+ * has to come afterward (it can cause SLB faults).
+ */
if (cpu_has_feature(CPU_FTR_CTRL) &&
!test_thread_local_flags(_TLF_RUNLATCH))
__ppc64_runlatch_on();
#endif
-
- interrupt_enter_prepare(regs, state);
irq_enter();
}
@@ -276,6 +317,8 @@ static inline void interrupt_nmi_enter_prepare(struct pt_regs *regs, struct inte
regs->softe = IRQS_ALL_DISABLED;
}
+ __hard_RI_enable();
+
/* Don't do any per-CPU operations until interrupt state is fixed */
if (nmi_disables_ftrace(regs)) {
@@ -373,6 +416,8 @@ interrupt_handler long func(struct pt_regs *regs) \
{ \
long ret; \
\
+ __hard_RI_enable(); \
+ \
ret = ____##func (regs); \
\
return ret; \
@@ -564,7 +609,7 @@ DECLARE_INTERRUPT_HANDLER(kernel_bad_stack);
/* slb.c */
DECLARE_INTERRUPT_HANDLER_RAW(do_slb_fault);
-DECLARE_INTERRUPT_HANDLER(do_bad_slb_fault);
+DECLARE_INTERRUPT_HANDLER(do_bad_segment_interrupt);
/* hash_utils.c */
DECLARE_INTERRUPT_HANDLER_RAW(do_hash_fault);
diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h
index c361212ac160..d7912b66c874 100644
--- a/arch/powerpc/include/asm/iommu.h
+++ b/arch/powerpc/include/asm/iommu.h
@@ -275,7 +275,7 @@ extern void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle,
size_t size, enum dma_data_direction direction,
unsigned long attrs);
-extern void iommu_init_early_pSeries(void);
+void __init iommu_init_early_pSeries(void);
extern void iommu_init_early_dart(struct pci_controller_ops *controller_ops);
extern void iommu_init_early_pasemi(void);
diff --git a/arch/powerpc/include/asm/ipic.h b/arch/powerpc/include/asm/ipic.h
index 0524df31a7e6..b47ca7dc7199 100644
--- a/arch/powerpc/include/asm/ipic.h
+++ b/arch/powerpc/include/asm/ipic.h
@@ -65,7 +65,7 @@ enum ipic_mcp_irq {
IPIC_MCP_MU = 7,
};
-extern void ipic_set_default_priority(void);
+void __init ipic_set_default_priority(void);
extern u32 ipic_get_mcp_status(void);
extern void ipic_clear_mcp_status(u32 mask);
diff --git a/arch/powerpc/include/asm/irq.h b/arch/powerpc/include/asm/irq.h
index 2b3278534bc1..13f0409dd617 100644
--- a/arch/powerpc/include/asm/irq.h
+++ b/arch/powerpc/include/asm/irq.h
@@ -36,7 +36,7 @@ extern int distribute_irqs;
struct pt_regs;
-#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
+#ifdef CONFIG_BOOKE_OR_40x
/*
* Per-cpu stacks for handling critical, debug and machine check
* level interrupts.
diff --git a/arch/powerpc/include/asm/kexec.h b/arch/powerpc/include/asm/kexec.h
index c6f250eca3fb..8ebdd23d987c 100644
--- a/arch/powerpc/include/asm/kexec.h
+++ b/arch/powerpc/include/asm/kexec.h
@@ -84,7 +84,7 @@ extern int crash_shutdown_register(crash_shutdown_t handler);
extern int crash_shutdown_unregister(crash_shutdown_t handler);
extern void crash_kexec_secondary(struct pt_regs *regs);
-extern int overlaps_crashkernel(unsigned long start, unsigned long size);
+int __init overlaps_crashkernel(unsigned long start, unsigned long size);
extern void reserve_crashkernel(void);
extern void machine_kexec_mask_interrupts(void);
diff --git a/arch/powerpc/include/asm/kup.h b/arch/powerpc/include/asm/kup.h
index 1df763002726..fb2237809d63 100644
--- a/arch/powerpc/include/asm/kup.h
+++ b/arch/powerpc/include/asm/kup.h
@@ -14,6 +14,10 @@
#include <asm/nohash/32/kup-8xx.h>
#endif
+#ifdef CONFIG_BOOKE_OR_40x
+#include <asm/nohash/kup-booke.h>
+#endif
+
#ifdef CONFIG_PPC_BOOK3S_32
#include <asm/book3s/32/kup.h>
#endif
@@ -32,34 +36,29 @@ extern bool disable_kuap;
#include <linux/pgtable.h>
-#ifdef CONFIG_PPC_KUEP
+void setup_kup(void);
void setup_kuep(bool disabled);
-#else
-static inline void setup_kuep(bool disabled) { }
-#endif /* CONFIG_PPC_KUEP */
-
-#ifndef CONFIG_PPC_BOOK3S_32
-static inline void kuep_lock(void) { }
-static inline void kuep_unlock(void) { }
-#endif
#ifdef CONFIG_PPC_KUAP
void setup_kuap(bool disabled);
#else
static inline void setup_kuap(bool disabled) { }
+static __always_inline bool kuap_is_disabled(void) { return true; }
+
static inline bool
-bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
+__bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
{
return false;
}
-static inline void kuap_assert_locked(void) { }
-static inline void kuap_save_and_lock(struct pt_regs *regs) { }
+static inline void __kuap_assert_locked(void) { }
+static inline void __kuap_lock(void) { }
+static inline void __kuap_save_and_lock(struct pt_regs *regs) { }
static inline void kuap_user_restore(struct pt_regs *regs) { }
-static inline void kuap_kernel_restore(struct pt_regs *regs, unsigned long amr) { }
+static inline void __kuap_kernel_restore(struct pt_regs *regs, unsigned long amr) { }
-static inline unsigned long kuap_get_and_assert_locked(void)
+static inline unsigned long __kuap_get_and_assert_locked(void)
{
return 0;
}
@@ -70,20 +69,99 @@ static inline unsigned long kuap_get_and_assert_locked(void)
* platforms.
*/
#ifndef CONFIG_PPC_BOOK3S_64
-static inline void allow_user_access(void __user *to, const void __user *from,
- unsigned long size, unsigned long dir) { }
-static inline void prevent_user_access(unsigned long dir) { }
-static inline unsigned long prevent_user_access_return(void) { return 0UL; }
-static inline void restore_user_access(unsigned long flags) { }
+static inline void __allow_user_access(void __user *to, const void __user *from,
+ unsigned long size, unsigned long dir) { }
+static inline void __prevent_user_access(unsigned long dir) { }
+static inline unsigned long __prevent_user_access_return(void) { return 0UL; }
+static inline void __restore_user_access(unsigned long flags) { }
#endif /* CONFIG_PPC_BOOK3S_64 */
#endif /* CONFIG_PPC_KUAP */
-static __always_inline void setup_kup(void)
+static __always_inline bool
+bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
+{
+ if (kuap_is_disabled())
+ return false;
+
+ return __bad_kuap_fault(regs, address, is_write);
+}
+
+static __always_inline void kuap_assert_locked(void)
{
- setup_kuep(disable_kuep);
- setup_kuap(disable_kuap);
+ if (kuap_is_disabled())
+ return;
+
+ if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG))
+ __kuap_get_and_assert_locked();
+}
+
+static __always_inline void kuap_lock(void)
+{
+ if (kuap_is_disabled())
+ return;
+
+ __kuap_lock();
+}
+
+static __always_inline void kuap_save_and_lock(struct pt_regs *regs)
+{
+ if (kuap_is_disabled())
+ return;
+
+ __kuap_save_and_lock(regs);
}
+static __always_inline void kuap_kernel_restore(struct pt_regs *regs, unsigned long amr)
+{
+ if (kuap_is_disabled())
+ return;
+
+ __kuap_kernel_restore(regs, amr);
+}
+
+static __always_inline unsigned long kuap_get_and_assert_locked(void)
+{
+ if (kuap_is_disabled())
+ return 0;
+
+ return __kuap_get_and_assert_locked();
+}
+
+#ifndef CONFIG_PPC_BOOK3S_64
+static __always_inline void allow_user_access(void __user *to, const void __user *from,
+ unsigned long size, unsigned long dir)
+{
+ if (kuap_is_disabled())
+ return;
+
+ __allow_user_access(to, from, size, dir);
+}
+
+static __always_inline void prevent_user_access(unsigned long dir)
+{
+ if (kuap_is_disabled())
+ return;
+
+ __prevent_user_access(dir);
+}
+
+static __always_inline unsigned long prevent_user_access_return(void)
+{
+ if (kuap_is_disabled())
+ return 0;
+
+ return __prevent_user_access_return();
+}
+
+static __always_inline void restore_user_access(unsigned long flags)
+{
+ if (kuap_is_disabled())
+ return;
+
+ __restore_user_access(flags);
+}
+#endif /* CONFIG_PPC_BOOK3S_64 */
+
static __always_inline void allow_read_from_user(const void __user *from, unsigned long size)
{
barrier_nospec();
diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h
index fbbf3cec92e9..d68d71987d5c 100644
--- a/arch/powerpc/include/asm/kvm_asm.h
+++ b/arch/powerpc/include/asm/kvm_asm.h
@@ -79,6 +79,7 @@
#define BOOK3S_INTERRUPT_FP_UNAVAIL 0x800
#define BOOK3S_INTERRUPT_DECREMENTER 0x900
#define BOOK3S_INTERRUPT_HV_DECREMENTER 0x980
+#define BOOK3S_INTERRUPT_NESTED_HV_DECREMENTER 0x1980
#define BOOK3S_INTERRUPT_DOORBELL 0xa00
#define BOOK3S_INTERRUPT_SYSCALL 0xc00
#define BOOK3S_INTERRUPT_TRACE 0xd00
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index 3d31f2c59e43..91c9f937edcd 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -406,6 +406,12 @@ static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
return vcpu->arch.fault_dar;
}
+/* Expiry time of vcpu DEC relative to host TB */
+static inline u64 kvmppc_dec_expires_host_tb(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.dec_expires - vcpu->arch.vcore->tb_offset;
+}
+
static inline bool is_kvmppc_resume_guest(int r)
{
return (r == RESUME_GUEST || r == RESUME_GUEST_NV);
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h
index fff391b9b97b..827038a33064 100644
--- a/arch/powerpc/include/asm/kvm_book3s_64.h
+++ b/arch/powerpc/include/asm/kvm_book3s_64.h
@@ -39,12 +39,10 @@ struct kvm_nested_guest {
pgd_t *shadow_pgtable; /* our page table for this guest */
u64 l1_gr_to_hr; /* L1's addr of part'n-scoped table */
u64 process_table; /* process table entry for this guest */
- u64 hfscr; /* HFSCR that the L1 requested for this nested guest */
long refcnt; /* number of pointers to this struct */
struct mutex tlb_lock; /* serialize page faults and tlbies */
struct kvm_nested_guest *next;
cpumask_t need_tlb_flush;
- cpumask_t cpu_in_guest;
short prev_cpu[NR_CPUS];
u8 radix; /* is this nested guest radix */
};
@@ -154,7 +152,9 @@ static inline bool kvmhv_vcpu_is_radix(struct kvm_vcpu *vcpu)
return radix;
}
-int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpcr);
+unsigned long kvmppc_msr_hard_disable_set_facilities(struct kvm_vcpu *vcpu, unsigned long msr);
+
+int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpcr, u64 *tb);
#define KVM_DEFAULT_HPT_ORDER 24 /* 16MB HPT by default */
#endif
diff --git a/arch/powerpc/include/asm/kvm_guest.h b/arch/powerpc/include/asm/kvm_guest.h
index c63105d2c9e7..68e499abdb24 100644
--- a/arch/powerpc/include/asm/kvm_guest.h
+++ b/arch/powerpc/include/asm/kvm_guest.h
@@ -16,7 +16,7 @@ static inline bool is_kvm_guest(void)
return static_branch_unlikely(&kvm_guest);
}
-int check_kvm_guest(void);
+int __init check_kvm_guest(void);
#else
static inline bool is_kvm_guest(void) { return false; }
static inline int check_kvm_guest(void) { return 0; }
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index e4d23193eba7..d9bf60bf0816 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -287,7 +287,6 @@ struct kvm_arch {
u32 online_vcores;
atomic_t hpte_mod_interest;
cpumask_t need_tlb_flush;
- cpumask_t cpu_in_guest;
u8 radix;
u8 fwnmi_enabled;
u8 secure_guest;
@@ -579,6 +578,10 @@ struct kvm_vcpu_arch {
ulong cfar;
ulong ppr;
u32 pspb;
+ u8 load_ebb;
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ u8 load_tm;
+#endif
ulong fscr;
ulong shadow_fscr;
ulong ebbhr;
@@ -741,7 +744,7 @@ struct kvm_vcpu_arch {
struct hrtimer dec_timer;
u64 dec_jiffies;
- u64 dec_expires;
+ u64 dec_expires; /* Relative to guest timebase. */
unsigned long pending_exceptions;
u8 ceded;
u8 prodded;
@@ -749,6 +752,7 @@ struct kvm_vcpu_arch {
u8 irq_pending; /* Used by XIVE to signal pending guest irqs */
u32 last_inst;
+ struct rcuwait wait;
struct rcuwait *waitp;
struct kvmppc_vcore *vcore;
int ret;
@@ -814,6 +818,7 @@ struct kvm_vcpu_arch {
/* For support of nested guests */
struct kvm_nested_guest *nested;
+ u64 nested_hfscr; /* HFSCR that the L1 requested for the nested guest */
u32 nested_vcpu_id;
gpa_t nested_io_gpr;
#endif
@@ -864,6 +869,5 @@ static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
static inline void kvm_arch_exit(void) {}
static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
-static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
#endif /* __POWERPC_KVM_HOST_H__ */
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index 671fbd1a765e..a14dbcd1b8ce 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -200,12 +200,11 @@ extern void kvmppc_core_destroy_vm(struct kvm *kvm);
extern void kvmppc_core_free_memslot(struct kvm *kvm,
struct kvm_memory_slot *slot);
extern int kvmppc_core_prepare_memory_region(struct kvm *kvm,
- struct kvm_memory_slot *memslot,
- const struct kvm_userspace_memory_region *mem,
+ const struct kvm_memory_slot *old,
+ struct kvm_memory_slot *new,
enum kvm_mr_change change);
extern void kvmppc_core_commit_memory_region(struct kvm *kvm,
- const struct kvm_userspace_memory_region *mem,
- const struct kvm_memory_slot *old,
+ struct kvm_memory_slot *old,
const struct kvm_memory_slot *new,
enum kvm_mr_change change);
extern int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm,
@@ -274,12 +273,11 @@ struct kvmppc_ops {
int (*get_dirty_log)(struct kvm *kvm, struct kvm_dirty_log *log);
void (*flush_memslot)(struct kvm *kvm, struct kvm_memory_slot *memslot);
int (*prepare_memory_region)(struct kvm *kvm,
- struct kvm_memory_slot *memslot,
- const struct kvm_userspace_memory_region *mem,
+ const struct kvm_memory_slot *old,
+ struct kvm_memory_slot *new,
enum kvm_mr_change change);
void (*commit_memory_region)(struct kvm *kvm,
- const struct kvm_userspace_memory_region *mem,
- const struct kvm_memory_slot *old,
+ struct kvm_memory_slot *old,
const struct kvm_memory_slot *new,
enum kvm_mr_change change);
bool (*unmap_gfn_range)(struct kvm *kvm, struct kvm_gfn_range *range);
@@ -552,8 +550,7 @@ extern void kvm_hv_vm_activated(void);
extern void kvm_hv_vm_deactivated(void);
extern bool kvm_hv_mode_active(void);
-extern void kvmppc_check_need_tlb_flush(struct kvm *kvm, int pcpu,
- struct kvm_nested_guest *nested);
+extern void kvmppc_check_need_tlb_flush(struct kvm *kvm, int pcpu);
#else
static inline void __init kvm_cma_reserve(void)
@@ -760,6 +757,7 @@ void kvmppc_realmode_machine_check(struct kvm_vcpu *vcpu);
void kvmppc_subcore_enter_guest(void);
void kvmppc_subcore_exit_guest(void);
long kvmppc_realmode_hmi_handler(void);
+long kvmppc_p9_realmode_hmi_handler(struct kvm_vcpu *vcpu);
long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
long pte_index, unsigned long pteh, unsigned long ptel);
long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h
index 9c3c9f04129f..e821037f74f0 100644
--- a/arch/powerpc/include/asm/machdep.h
+++ b/arch/powerpc/include/asm/machdep.h
@@ -235,8 +235,6 @@ extern struct machdep_calls *machine_id;
machine_id == &mach_##name; \
})
-extern void probe_machine(void);
-
#ifdef CONFIG_PPC_PMAC
/*
* Power macintoshes have either a CUDA, PMU or SMU controlling
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index 8abe8e42e045..5f41565a1e5d 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -157,7 +157,7 @@ DECLARE_PER_CPU(int, next_tlbcam_idx);
enum {
MMU_FTRS_POSSIBLE =
-#if defined(CONFIG_PPC_BOOK3S_64) || defined(CONFIG_PPC_BOOK3S_604)
+#if defined(CONFIG_PPC_BOOK3S_604)
MMU_FTR_HPTE_TABLE |
#endif
#ifdef CONFIG_PPC_8xx
@@ -184,15 +184,18 @@ enum {
MMU_FTR_USE_TLBRSRV | MMU_FTR_USE_PAIRED_MAS |
#endif
#ifdef CONFIG_PPC_BOOK3S_64
+ MMU_FTR_KERNEL_RO |
+#ifdef CONFIG_PPC_64S_HASH_MMU
MMU_FTR_NO_SLBIE_B | MMU_FTR_16M_PAGE | MMU_FTR_TLBIEL |
MMU_FTR_LOCKLESS_TLBIE | MMU_FTR_CI_LARGE_PAGE |
MMU_FTR_1T_SEGMENT | MMU_FTR_TLBIE_CROP_VA |
- MMU_FTR_KERNEL_RO | MMU_FTR_68_BIT_VA |
+ MMU_FTR_68_BIT_VA | MMU_FTR_HPTE_TABLE |
#endif
#ifdef CONFIG_PPC_RADIX_MMU
MMU_FTR_TYPE_RADIX |
MMU_FTR_GTSE |
#endif /* CONFIG_PPC_RADIX_MMU */
+#endif
#ifdef CONFIG_PPC_KUAP
MMU_FTR_BOOK3S_KUAP |
#endif /* CONFIG_PPC_KUAP */
@@ -224,6 +227,13 @@ enum {
#define MMU_FTRS_ALWAYS MMU_FTR_TYPE_FSL_E
#endif
+/* BOOK3S_64 options */
+#if defined(CONFIG_PPC_RADIX_MMU) && !defined(CONFIG_PPC_64S_HASH_MMU)
+#define MMU_FTRS_ALWAYS MMU_FTR_TYPE_RADIX
+#elif !defined(CONFIG_PPC_RADIX_MMU) && defined(CONFIG_PPC_64S_HASH_MMU)
+#define MMU_FTRS_ALWAYS MMU_FTR_HPTE_TABLE
+#endif
+
#ifndef MMU_FTRS_ALWAYS
#define MMU_FTRS_ALWAYS 0
#endif
@@ -329,7 +339,7 @@ static __always_inline bool radix_enabled(void)
return mmu_has_feature(MMU_FTR_TYPE_RADIX);
}
-static inline bool early_radix_enabled(void)
+static __always_inline bool early_radix_enabled(void)
{
return early_mmu_has_feature(MMU_FTR_TYPE_RADIX);
}
diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
index 9ba6b585337f..fd277b15635c 100644
--- a/arch/powerpc/include/asm/mmu_context.h
+++ b/arch/powerpc/include/asm/mmu_context.h
@@ -71,10 +71,11 @@ static inline void switch_mmu_context(struct mm_struct *prev,
}
extern int hash__alloc_context_id(void);
-extern void hash__reserve_context_id(int id);
+void __init hash__reserve_context_id(int id);
extern void __destroy_context(int context_id);
static inline void mmu_context_init(void) { }
+#ifdef CONFIG_PPC_64S_HASH_MMU
static inline int alloc_extended_context(struct mm_struct *mm,
unsigned long ea)
{
@@ -100,6 +101,7 @@ static inline bool need_extra_context(struct mm_struct *mm, unsigned long ea)
return true;
return false;
}
+#endif
#else
extern void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next,
diff --git a/arch/powerpc/include/asm/mpic.h b/arch/powerpc/include/asm/mpic.h
index 0abf2e7fd222..58353c5bd3fb 100644
--- a/arch/powerpc/include/asm/mpic.h
+++ b/arch/powerpc/include/asm/mpic.h
@@ -472,7 +472,7 @@ extern int mpic_cpu_get_priority(void);
extern void mpic_cpu_set_priority(int prio);
/* Request IPIs on primary mpic */
-extern void mpic_request_ipis(void);
+void __init mpic_request_ipis(void);
/* Send a message (IPI) to a given target (cpu number or MSG_*) */
void smp_mpic_message_pass(int target, int msg);
diff --git a/arch/powerpc/include/asm/nohash/32/kup-8xx.h b/arch/powerpc/include/asm/nohash/32/kup-8xx.h
index 882a0bc7887a..c44d97751723 100644
--- a/arch/powerpc/include/asm/nohash/32/kup-8xx.h
+++ b/arch/powerpc/include/asm/nohash/32/kup-8xx.h
@@ -20,11 +20,12 @@ static __always_inline bool kuap_is_disabled(void)
return static_branch_unlikely(&disable_kuap_key);
}
-static inline void kuap_save_and_lock(struct pt_regs *regs)
+static inline void __kuap_lock(void)
{
- if (kuap_is_disabled())
- return;
+}
+static inline void __kuap_save_and_lock(struct pt_regs *regs)
+{
regs->kuap = mfspr(SPRN_MD_AP);
mtspr(SPRN_MD_AP, MD_APG_KUAP);
}
@@ -33,21 +34,15 @@ static inline void kuap_user_restore(struct pt_regs *regs)
{
}
-static inline void kuap_kernel_restore(struct pt_regs *regs, unsigned long kuap)
+static inline void __kuap_kernel_restore(struct pt_regs *regs, unsigned long kuap)
{
- if (kuap_is_disabled())
- return;
-
mtspr(SPRN_MD_AP, regs->kuap);
}
-static inline unsigned long kuap_get_and_assert_locked(void)
+static inline unsigned long __kuap_get_and_assert_locked(void)
{
unsigned long kuap;
- if (kuap_is_disabled())
- return MD_APG_INIT;
-
kuap = mfspr(SPRN_MD_AP);
if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG))
@@ -56,36 +51,21 @@ static inline unsigned long kuap_get_and_assert_locked(void)
return kuap;
}
-static inline void kuap_assert_locked(void)
-{
- if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG) && !kuap_is_disabled())
- kuap_get_and_assert_locked();
-}
-
-static inline void allow_user_access(void __user *to, const void __user *from,
- unsigned long size, unsigned long dir)
+static inline void __allow_user_access(void __user *to, const void __user *from,
+ unsigned long size, unsigned long dir)
{
- if (kuap_is_disabled())
- return;
-
mtspr(SPRN_MD_AP, MD_APG_INIT);
}
-static inline void prevent_user_access(unsigned long dir)
+static inline void __prevent_user_access(unsigned long dir)
{
- if (kuap_is_disabled())
- return;
-
mtspr(SPRN_MD_AP, MD_APG_KUAP);
}
-static inline unsigned long prevent_user_access_return(void)
+static inline unsigned long __prevent_user_access_return(void)
{
unsigned long flags;
- if (kuap_is_disabled())
- return MD_APG_INIT;
-
flags = mfspr(SPRN_MD_AP);
mtspr(SPRN_MD_AP, MD_APG_KUAP);
@@ -93,20 +73,14 @@ static inline unsigned long prevent_user_access_return(void)
return flags;
}
-static inline void restore_user_access(unsigned long flags)
+static inline void __restore_user_access(unsigned long flags)
{
- if (kuap_is_disabled())
- return;
-
mtspr(SPRN_MD_AP, flags);
}
static inline bool
-bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
+__bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
{
- if (kuap_is_disabled())
- return false;
-
return !((regs->kuap ^ MD_APG_KUAP) & 0xff000000);
}
diff --git a/arch/powerpc/include/asm/nohash/32/mmu-44x.h b/arch/powerpc/include/asm/nohash/32/mmu-44x.h
index 43ceca128531..2d92a39d8f2e 100644
--- a/arch/powerpc/include/asm/nohash/32/mmu-44x.h
+++ b/arch/powerpc/include/asm/nohash/32/mmu-44x.h
@@ -113,7 +113,6 @@ typedef struct {
/* patch sites */
extern s32 patch__tlb_44x_hwater_D, patch__tlb_44x_hwater_I;
-extern s32 patch__tlb_44x_kuep, patch__tlb_47x_kuep;
#endif /* !__ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/nohash/32/mmu-8xx.h b/arch/powerpc/include/asm/nohash/32/mmu-8xx.h
index 997cec973406..0e93a4728c9e 100644
--- a/arch/powerpc/include/asm/nohash/32/mmu-8xx.h
+++ b/arch/powerpc/include/asm/nohash/32/mmu-8xx.h
@@ -39,12 +39,10 @@
* 0 => Kernel => 11 (all accesses performed according as user iaw page definition)
* 1 => Kernel+Accessed => 01 (all accesses performed according to page definition)
* 2 => User => 11 (all accesses performed according as user iaw page definition)
- * 3 => User+Accessed => 00 (all accesses performed as supervisor iaw page definition) for INIT
- * => 10 (all accesses performed according to swaped page definition) for KUEP
+ * 3 => User+Accessed => 10 (all accesses performed according to swaped page definition) for KUEP
* 4-15 => Not Used
*/
-#define MI_APG_INIT 0xdc000000
-#define MI_APG_KUEP 0xde000000
+#define MI_APG_INIT 0xde000000
/* The effective page number register. When read, contains the information
* about the last instruction TLB miss. When MI_RPN is written, bits in
diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h
index b67742e2a9b2..d959c2a73fbf 100644
--- a/arch/powerpc/include/asm/nohash/32/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/32/pgtable.h
@@ -64,6 +64,7 @@ extern int icache_44x_need_flush;
#ifndef __ASSEMBLY__
int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
+void unmap_kernel_page(unsigned long va);
#endif /* !__ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h b/arch/powerpc/include/asm/nohash/64/pgtable.h
index 9d2905a47410..2816d158280a 100644
--- a/arch/powerpc/include/asm/nohash/64/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/64/pgtable.h
@@ -308,11 +308,18 @@ static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
#define __swp_entry_to_pte(x) __pte((x).val)
int map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot);
+void unmap_kernel_page(unsigned long va);
extern int __meminit vmemmap_create_mapping(unsigned long start,
unsigned long page_size,
unsigned long phys);
extern void vmemmap_remove_mapping(unsigned long start,
unsigned long page_size);
+void __patch_exception(int exc, unsigned long addr);
+#define patch_exception(exc, name) do { \
+ extern unsigned int name; \
+ __patch_exception((exc), (unsigned long)&name); \
+} while (0)
+
#endif /* __ASSEMBLY__ */
#endif /* _ASM_POWERPC_NOHASH_64_PGTABLE_H */
diff --git a/arch/powerpc/include/asm/nohash/kup-booke.h b/arch/powerpc/include/asm/nohash/kup-booke.h
new file mode 100644
index 000000000000..49bb41ed0816
--- /dev/null
+++ b/arch/powerpc/include/asm/nohash/kup-booke.h
@@ -0,0 +1,110 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_POWERPC_KUP_BOOKE_H_
+#define _ASM_POWERPC_KUP_BOOKE_H_
+
+#include <asm/bug.h>
+
+#ifdef CONFIG_PPC_KUAP
+
+#ifdef __ASSEMBLY__
+
+.macro kuap_check_amr gpr1, gpr2
+.endm
+
+#else
+
+#include <linux/jump_label.h>
+#include <linux/sched.h>
+
+#include <asm/reg.h>
+
+extern struct static_key_false disable_kuap_key;
+
+static __always_inline bool kuap_is_disabled(void)
+{
+ return static_branch_unlikely(&disable_kuap_key);
+}
+
+static inline void __kuap_lock(void)
+{
+ mtspr(SPRN_PID, 0);
+ isync();
+}
+
+static inline void __kuap_save_and_lock(struct pt_regs *regs)
+{
+ regs->kuap = mfspr(SPRN_PID);
+ mtspr(SPRN_PID, 0);
+ isync();
+}
+
+static inline void kuap_user_restore(struct pt_regs *regs)
+{
+ if (kuap_is_disabled())
+ return;
+
+ mtspr(SPRN_PID, current->thread.pid);
+
+ /* Context synchronisation is performed by rfi */
+}
+
+static inline void __kuap_kernel_restore(struct pt_regs *regs, unsigned long kuap)
+{
+ if (regs->kuap)
+ mtspr(SPRN_PID, current->thread.pid);
+
+ /* Context synchronisation is performed by rfi */
+}
+
+static inline unsigned long __kuap_get_and_assert_locked(void)
+{
+ unsigned long kuap = mfspr(SPRN_PID);
+
+ if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG))
+ WARN_ON_ONCE(kuap);
+
+ return kuap;
+}
+
+static inline void __allow_user_access(void __user *to, const void __user *from,
+ unsigned long size, unsigned long dir)
+{
+ mtspr(SPRN_PID, current->thread.pid);
+ isync();
+}
+
+static inline void __prevent_user_access(unsigned long dir)
+{
+ mtspr(SPRN_PID, 0);
+ isync();
+}
+
+static inline unsigned long __prevent_user_access_return(void)
+{
+ unsigned long flags = mfspr(SPRN_PID);
+
+ mtspr(SPRN_PID, 0);
+ isync();
+
+ return flags;
+}
+
+static inline void __restore_user_access(unsigned long flags)
+{
+ if (flags) {
+ mtspr(SPRN_PID, current->thread.pid);
+ isync();
+ }
+}
+
+static inline bool
+__bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write)
+{
+ return !regs->kuap;
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* CONFIG_PPC_KUAP */
+
+#endif /* _ASM_POWERPC_KUP_BOOKE_H_ */
diff --git a/arch/powerpc/include/asm/opal-api.h b/arch/powerpc/include/asm/opal-api.h
index 0b63ba7d5917..a2bc4b95e703 100644
--- a/arch/powerpc/include/asm/opal-api.h
+++ b/arch/powerpc/include/asm/opal-api.h
@@ -1094,6 +1094,7 @@ enum {
OPAL_XIVE_IRQ_SHIFT_BUG = 0x00000008, /* P9 DD1.0 workaround */
OPAL_XIVE_IRQ_MASK_VIA_FW = 0x00000010, /* P9 DD1.0 workaround */
OPAL_XIVE_IRQ_EOI_VIA_FW = 0x00000020, /* P9 DD1.0 workaround */
+ OPAL_XIVE_IRQ_STORE_EOI2 = 0x00000040,
};
/* Flags for OPAL_XIVE_GET/SET_QUEUE_INFO */
diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h
index 6ea9001de9a9..bfd3142cd0ba 100644
--- a/arch/powerpc/include/asm/opal.h
+++ b/arch/powerpc/include/asm/opal.h
@@ -314,7 +314,7 @@ extern int early_init_dt_scan_opal(unsigned long node, const char *uname,
int depth, void *data);
extern int early_init_dt_scan_recoverable_ranges(unsigned long node,
const char *uname, int depth, void *data);
-extern void opal_configure_cores(void);
+void __init opal_configure_cores(void);
extern int opal_get_chars(uint32_t vtermno, char *buf, int count);
extern int opal_put_chars(uint32_t vtermno, const char *buf, int total_len);
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index dc05a862e72a..295573a82c66 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -97,7 +97,9 @@ struct paca_struct {
/* this becomes non-zero. */
u8 kexec_state; /* set when kexec down has irqs off */
#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
struct slb_shadow *slb_shadow_ptr;
+#endif
struct dtl_entry *dispatch_log;
struct dtl_entry *dispatch_log_end;
#endif
@@ -110,6 +112,7 @@ struct paca_struct {
/* used for most interrupts/exceptions */
u64 exgen[EX_SIZE] __attribute__((aligned(0x80)));
+#ifdef CONFIG_PPC_64S_HASH_MMU
/* SLB related definitions */
u16 vmalloc_sllp;
u8 slb_cache_ptr;
@@ -120,6 +123,7 @@ struct paca_struct {
u32 slb_used_bitmap; /* Bitmaps for first 32 SLB entries. */
u32 slb_kern_bitmap;
u32 slb_cache[SLB_CACHE_ENTRIES];
+#endif
#endif /* CONFIG_PPC_BOOK3S_64 */
#ifdef CONFIG_PPC_BOOK3E
@@ -149,6 +153,7 @@ struct paca_struct {
#endif /* CONFIG_PPC_BOOK3E */
#ifdef CONFIG_PPC_BOOK3S
+#ifdef CONFIG_PPC_64S_HASH_MMU
#ifdef CONFIG_PPC_MM_SLICES
unsigned char mm_ctx_low_slices_psize[BITS_PER_LONG / BITS_PER_BYTE];
unsigned char mm_ctx_high_slices_psize[SLICE_ARRAY_SIZE];
@@ -157,6 +162,7 @@ struct paca_struct {
u16 mm_ctx_sllp;
#endif
#endif
+#endif
/*
* then miscellaneous read-write fields
@@ -268,9 +274,11 @@ struct paca_struct {
#endif /* CONFIG_PPC_PSERIES */
#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
/* Capture SLB related old contents in MCE handler. */
struct slb_entry *mce_faulty_slbs;
u16 slb_save_cache_ptr;
+#endif
#endif /* CONFIG_PPC_BOOK3S_64 */
#ifdef CONFIG_STACKPROTECTOR
unsigned long canary;
diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h
index d1f53260725c..915d6ee4b40a 100644
--- a/arch/powerpc/include/asm/pci.h
+++ b/arch/powerpc/include/asm/pci.h
@@ -48,7 +48,7 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
}
#ifdef CONFIG_PCI
-extern void set_pci_dma_ops(const struct dma_map_ops *dma_ops);
+void __init set_pci_dma_ops(const struct dma_map_ops *dma_ops);
#else /* CONFIG_PCI */
#define set_pci_dma_ops(d)
#endif
diff --git a/arch/powerpc/include/asm/perf_event_server.h b/arch/powerpc/include/asm/perf_event_server.h
index f4c3428e816b..e2221d29fdf9 100644
--- a/arch/powerpc/include/asm/perf_event_server.h
+++ b/arch/powerpc/include/asm/perf_event_server.h
@@ -98,7 +98,7 @@ struct power_pmu {
#define PPMU_LIMITED_PMC_REQD 2 /* have to put this on a limited PMC */
#define PPMU_ONLY_COUNT_RUN 4 /* only counting in run state */
-extern int register_power_pmu(struct power_pmu *);
+int __init register_power_pmu(struct power_pmu *pmu);
struct pt_regs;
extern unsigned long perf_misc_flags(struct pt_regs *regs);
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index baea657bc868..9675303b724e 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -249,6 +249,7 @@
#define PPC_INST_COPY 0x7c20060c
#define PPC_INST_DCBA 0x7c0005ec
#define PPC_INST_DCBA_MASK 0xfc0007fe
+#define PPC_INST_DSSALL 0x7e00066c
#define PPC_INST_ISEL 0x7c00001e
#define PPC_INST_ISEL_MASK 0xfc00003e
#define PPC_INST_LSWI 0x7c0004aa
@@ -393,6 +394,7 @@
(0x7c000264 | ___PPC_RB(rb) | ___PPC_RS(rs) | ___PPC_RIC(ric) | ___PPC_PRS(prs) | ___PPC_R(r))
#define PPC_RAW_TLBIEL(rb, rs, ric, prs, r) \
(0x7c000224 | ___PPC_RB(rb) | ___PPC_RS(rs) | ___PPC_RIC(ric) | ___PPC_PRS(prs) | ___PPC_R(r))
+#define PPC_RAW_TLBIEL_v205(rb, l) (0x7c000224 | ___PPC_RB(rb) | (l << 21))
#define PPC_RAW_TLBSRX_DOT(a, b) (0x7c0006a5 | __PPC_RA0(a) | __PPC_RB(b))
#define PPC_RAW_TLBIVAX(a, b) (0x7c000624 | __PPC_RA0(a) | __PPC_RB(b))
#define PPC_RAW_ERATWE(s, a, w) (0x7c0001a6 | __PPC_RS(s) | __PPC_RA(a) | __PPC_WS(w))
@@ -498,6 +500,7 @@
#define PPC_RAW_LDX(r, base, b) (0x7c00002a | ___PPC_RT(r) | ___PPC_RA(base) | ___PPC_RB(b))
#define PPC_RAW_LHZ(r, base, i) (0xa0000000 | ___PPC_RT(r) | ___PPC_RA(base) | IMM_L(i))
#define PPC_RAW_LHBRX(r, base, b) (0x7c00062c | ___PPC_RT(r) | ___PPC_RA(base) | ___PPC_RB(b))
+#define PPC_RAW_LWBRX(r, base, b) (0x7c00042c | ___PPC_RT(r) | ___PPC_RA(base) | ___PPC_RB(b))
#define PPC_RAW_LDBRX(r, base, b) (0x7c000428 | ___PPC_RT(r) | ___PPC_RA(base) | ___PPC_RB(b))
#define PPC_RAW_STWCX(s, a, b) (0x7c00012d | ___PPC_RS(s) | ___PPC_RA(a) | ___PPC_RB(b))
#define PPC_RAW_CMPWI(a, i) (0x2c000000 | ___PPC_RA(a) | IMM_L(i))
@@ -566,6 +569,8 @@
#define PPC_RAW_MTSPR(spr, d) (0x7c0003a6 | ___PPC_RS(d) | __PPC_SPR(spr))
#define PPC_RAW_EIEIO() (0x7c0006ac)
+#define PPC_RAW_BRANCH(addr) (PPC_INST_BRANCH | ((addr) & 0x03fffffc))
+
/* Deal with instructions that older assemblers aren't aware of */
#define PPC_BCCTR_FLUSH stringify_in_c(.long PPC_INST_BCCTR_FLUSH)
#define PPC_CP_ABORT stringify_in_c(.long PPC_RAW_CP_ABORT)
@@ -575,6 +580,7 @@
#define PPC_DCBZL(a, b) stringify_in_c(.long PPC_RAW_DCBZL(a, b))
#define PPC_DIVDE(t, a, b) stringify_in_c(.long PPC_RAW_DIVDE(t, a, b))
#define PPC_DIVDEU(t, a, b) stringify_in_c(.long PPC_RAW_DIVDEU(t, a, b))
+#define PPC_DSSALL stringify_in_c(.long PPC_INST_DSSALL)
#define PPC_LQARX(t, a, b, eh) stringify_in_c(.long PPC_RAW_LQARX(t, a, b, eh))
#define PPC_STQCX(t, a, b) stringify_in_c(.long PPC_RAW_STQCX(t, a, b))
#define PPC_MADDHD(t, a, b, c) stringify_in_c(.long PPC_RAW_MADDHD(t, a, b, c))
@@ -602,6 +608,7 @@
stringify_in_c(.long PPC_RAW_TLBIE_5(rb, rs, ric, prs, r))
#define PPC_TLBIEL(rb,rs,ric,prs,r) \
stringify_in_c(.long PPC_RAW_TLBIEL(rb, rs, ric, prs, r))
+#define PPC_TLBIEL_v205(rb, l) stringify_in_c(.long PPC_RAW_TLBIEL_v205(rb, l))
#define PPC_TLBSRX_DOT(a, b) stringify_in_c(.long PPC_RAW_TLBSRX_DOT(a, b))
#define PPC_TLBIVAX(a, b) stringify_in_c(.long PPC_RAW_TLBIVAX(a, b))
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
index 7be24048b8d1..f21e6bde17a1 100644
--- a/arch/powerpc/include/asm/ppc_asm.h
+++ b/arch/powerpc/include/asm/ppc_asm.h
@@ -17,29 +17,40 @@
#define SZL (BITS_PER_LONG/8)
/*
+ * This expands to a sequence of operations with reg incrementing from
+ * start to end inclusive, of this form:
+ *
+ * op reg, (offset + (width * reg))(base)
+ *
+ * Note that offset is not the offset of the first operation unless start
+ * is zero (or width is zero).
+ */
+.macro OP_REGS op, width, start, end, base, offset
+ .Lreg=\start
+ .rept (\end - \start + 1)
+ \op .Lreg, \offset + \width * .Lreg(\base)
+ .Lreg=.Lreg+1
+ .endr
+.endm
+
+/*
* Macros for storing registers into and loading registers from
* exception frames.
*/
#ifdef __powerpc64__
-#define SAVE_GPR(n, base) std n,GPR0+8*(n)(base)
-#define REST_GPR(n, base) ld n,GPR0+8*(n)(base)
-#define SAVE_NVGPRS(base) SAVE_8GPRS(14, base); SAVE_10GPRS(22, base)
-#define REST_NVGPRS(base) REST_8GPRS(14, base); REST_10GPRS(22, base)
+#define SAVE_GPRS(start, end, base) OP_REGS std, 8, start, end, base, GPR0
+#define REST_GPRS(start, end, base) OP_REGS ld, 8, start, end, base, GPR0
+#define SAVE_NVGPRS(base) SAVE_GPRS(14, 31, base)
+#define REST_NVGPRS(base) REST_GPRS(14, 31, base)
#else
-#define SAVE_GPR(n, base) stw n,GPR0+4*(n)(base)
-#define REST_GPR(n, base) lwz n,GPR0+4*(n)(base)
-#define SAVE_NVGPRS(base) SAVE_GPR(13, base); SAVE_8GPRS(14, base); SAVE_10GPRS(22, base)
-#define REST_NVGPRS(base) REST_GPR(13, base); REST_8GPRS(14, base); REST_10GPRS(22, base)
+#define SAVE_GPRS(start, end, base) OP_REGS stw, 4, start, end, base, GPR0
+#define REST_GPRS(start, end, base) OP_REGS lwz, 4, start, end, base, GPR0
+#define SAVE_NVGPRS(base) SAVE_GPRS(13, 31, base)
+#define REST_NVGPRS(base) REST_GPRS(13, 31, base)
#endif
-#define SAVE_2GPRS(n, base) SAVE_GPR(n, base); SAVE_GPR(n+1, base)
-#define SAVE_4GPRS(n, base) SAVE_2GPRS(n, base); SAVE_2GPRS(n+2, base)
-#define SAVE_8GPRS(n, base) SAVE_4GPRS(n, base); SAVE_4GPRS(n+4, base)
-#define SAVE_10GPRS(n, base) SAVE_8GPRS(n, base); SAVE_2GPRS(n+8, base)
-#define REST_2GPRS(n, base) REST_GPR(n, base); REST_GPR(n+1, base)
-#define REST_4GPRS(n, base) REST_2GPRS(n, base); REST_2GPRS(n+2, base)
-#define REST_8GPRS(n, base) REST_4GPRS(n, base); REST_4GPRS(n+4, base)
-#define REST_10GPRS(n, base) REST_8GPRS(n, base); REST_2GPRS(n+8, base)
+#define SAVE_GPR(n, base) SAVE_GPRS(n, n, base)
+#define REST_GPR(n, base) REST_GPRS(n, n, base)
#define SAVE_FPR(n, base) stfd n,8*TS_FPRWIDTH*(n)(base)
#define SAVE_2FPRS(n, base) SAVE_FPR(n, base); SAVE_FPR(n+1, base)
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index e39bd0ff69f3..2c8686d9e964 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -157,8 +157,12 @@ struct thread_struct {
#ifdef CONFIG_PPC_BOOK3S_32
unsigned long r0, r3, r4, r5, r6, r8, r9, r11;
unsigned long lr, ctr;
+ unsigned long sr0;
#endif
#endif /* CONFIG_PPC32 */
+#if defined(CONFIG_BOOKE_OR_40x) && defined(CONFIG_PPC_KUAP)
+ unsigned long pid; /* value written in PID reg. at interrupt exit */
+#endif
/* Debug Registers */
struct debug_reg debug;
#ifdef CONFIG_PPC_FPU_REGS
@@ -191,8 +195,10 @@ struct thread_struct {
int used_vsr; /* set if process has used VSX */
#endif /* CONFIG_VSX */
#ifdef CONFIG_SPE
- unsigned long evr[32]; /* upper 32-bits of SPE regs */
- u64 acc; /* Accumulator */
+ struct_group(spe,
+ unsigned long evr[32]; /* upper 32-bits of SPE regs */
+ u64 acc; /* Accumulator */
+ );
unsigned long spefscr; /* SPE & eFP status */
unsigned long spefscr_last; /* SPEFSCR value on last prctl
call or trap return */
@@ -276,6 +282,12 @@ struct thread_struct {
#define SPEFSCR_INIT
#endif
+#ifdef CONFIG_PPC_BOOK3S_32
+#define SR0_INIT .sr0 = IS_ENABLED(CONFIG_PPC_KUEP) ? SR_NX : 0,
+#else
+#define SR0_INIT
+#endif
+
#if defined(CONFIG_PPC_BOOK3S_32) && defined(CONFIG_PPC_KUAP)
#define INIT_THREAD { \
.ksp = INIT_SP, \
@@ -283,6 +295,7 @@ struct thread_struct {
.kuap = ~0UL, /* KUAP_NONE */ \
.fpexc_mode = MSR_FE0 | MSR_FE1, \
SPEFSCR_INIT \
+ SR0_INIT \
}
#elif defined(CONFIG_PPC32)
#define INIT_THREAD { \
@@ -290,6 +303,7 @@ struct thread_struct {
.pgdir = swapper_pg_dir, \
.fpexc_mode = MSR_FE0 | MSR_FE1, \
SPEFSCR_INIT \
+ SR0_INIT \
}
#else
#define INIT_THREAD { \
diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h
index 6e560f035614..42f89e2d8f04 100644
--- a/arch/powerpc/include/asm/ptrace.h
+++ b/arch/powerpc/include/asm/ptrace.h
@@ -291,7 +291,7 @@ static inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc)
static inline bool cpu_has_msr_ri(void)
{
- return !IS_ENABLED(CONFIG_BOOKE) && !IS_ENABLED(CONFIG_40x);
+ return !IS_ENABLED(CONFIG_BOOKE_OR_40x);
}
static inline bool regs_is_unrecoverable(struct pt_regs *regs)
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index e9d27265253b..2835f6363228 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -18,9 +18,9 @@
#include <asm/feature-fixups.h>
/* Pickup Book E specific registers. */
-#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
+#ifdef CONFIG_BOOKE_OR_40x
#include <asm/reg_booke.h>
-#endif /* CONFIG_BOOKE || CONFIG_40x */
+#endif
#ifdef CONFIG_FSL_EMB_PERFMON
#include <asm/reg_fsl_emb.h>
@@ -1366,6 +1366,18 @@
/* Macros for setting and retrieving special purpose registers */
#ifndef __ASSEMBLY__
+
+#if defined(CONFIG_PPC64) || defined(__CHECKER__)
+typedef struct {
+ u32 val;
+#ifdef CONFIG_PPC64
+ u32 suffix;
+#endif
+} __packed ppc_inst_t;
+#else
+typedef u32 ppc_inst_t;
+#endif
+
#define mfmsr() ({unsigned long rval; \
asm volatile("mfmsr %0" : "=r" (rval) : \
: "memory"); rval;})
diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h
index 9dc97d2f9d27..82e5b055fa2a 100644
--- a/arch/powerpc/include/asm/rtas.h
+++ b/arch/powerpc/include/asm/rtas.h
@@ -264,7 +264,7 @@ extern void rtas_get_rtc_time(struct rtc_time *rtc_time);
extern int rtas_set_rtc_time(struct rtc_time *rtc_time);
extern unsigned int rtas_busy_delay_time(int status);
-extern unsigned int rtas_busy_delay(int status);
+bool rtas_busy_delay(int status);
extern int early_init_dt_scan_rtas(unsigned long node,
const char *uname, int depth, void *data);
diff --git a/arch/powerpc/include/asm/sections.h b/arch/powerpc/include/asm/sections.h
index 79cb7a25a5fb..38f79e42bf3c 100644
--- a/arch/powerpc/include/asm/sections.h
+++ b/arch/powerpc/include/asm/sections.h
@@ -25,16 +25,16 @@ extern char start_virt_trampolines[];
extern char end_virt_trampolines[];
#endif
+/*
+ * This assumes the kernel is never compiled -mcmodel=small or
+ * the total .toc is always less than 64k.
+ */
static inline unsigned long kernel_toc_addr(void)
{
- /* Defined by the linker, see vmlinux.lds.S */
- extern unsigned long __toc_start;
-
- /*
- * The TOC register (r2) points 32kB into the TOC, so that 64kB of
- * the TOC can be addressed using a single machine instruction.
- */
- return (unsigned long)(&__toc_start) + 0x8000UL;
+ unsigned long toc_ptr;
+
+ asm volatile("mr %0, 2" : "=r" (toc_ptr));
+ return toc_ptr;
}
static inline int overlaps_interrupt_vector_text(unsigned long start,
diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h
index 6c1a7d217d1a..d0d3dd531c7f 100644
--- a/arch/powerpc/include/asm/setup.h
+++ b/arch/powerpc/include/asm/setup.h
@@ -9,7 +9,6 @@ extern void ppc_printk_progress(char *s, unsigned short hex);
extern unsigned int rtas_data;
extern unsigned long long memory_limit;
-extern bool init_mem_is_free;
extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask);
struct device_node;
@@ -32,7 +31,7 @@ void setup_panic(void);
extern bool pseries_enable_reloc_on_exc(void);
extern void pseries_disable_reloc_on_exc(void);
extern void pseries_big_endian_exceptions(void);
-extern void pseries_little_endian_exceptions(void);
+void __init pseries_little_endian_exceptions(void);
#else
static inline bool pseries_enable_reloc_on_exc(void) { return false; }
static inline void pseries_disable_reloc_on_exc(void) {}
@@ -55,7 +54,7 @@ void setup_entry_flush(bool enable);
void setup_uaccess_flush(bool enable);
void do_rfi_flush_fixups(enum l1d_flush_type types);
#ifdef CONFIG_PPC_BARRIER_NOSPEC
-void setup_barrier_nospec(void);
+void __init setup_barrier_nospec(void);
#else
static inline void setup_barrier_nospec(void) { }
#endif
@@ -71,11 +70,11 @@ static inline void do_barrier_nospec_fixups_range(bool enable, void *start, void
#endif
#ifdef CONFIG_PPC_FSL_BOOK3E
-void setup_spectre_v2(void);
+void __init setup_spectre_v2(void);
#else
static inline void setup_spectre_v2(void) {}
#endif
-void do_btb_flush_fixups(void);
+void __init do_btb_flush_fixups(void);
#endif /* !__ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/smu.h b/arch/powerpc/include/asm/smu.h
index 4b30a0205c93..2ac6ab903023 100644
--- a/arch/powerpc/include/asm/smu.h
+++ b/arch/powerpc/include/asm/smu.h
@@ -456,7 +456,7 @@ extern void smu_poll(void);
/*
* Init routine, presence check....
*/
-extern int smu_init(void);
+int __init smu_init(void);
extern int smu_present(void);
struct platform_device;
extern struct platform_device *smu_get_ofdev(void);
diff --git a/arch/powerpc/include/asm/sstep.h b/arch/powerpc/include/asm/sstep.h
index 1df867c2e054..50950deedb87 100644
--- a/arch/powerpc/include/asm/sstep.h
+++ b/arch/powerpc/include/asm/sstep.h
@@ -145,7 +145,7 @@ union vsx_reg {
* otherwise.
*/
extern int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
- struct ppc_inst instr);
+ ppc_inst_t instr);
/*
* Emulate an instruction that can be executed just by updating
@@ -162,7 +162,7 @@ void emulate_update_regs(struct pt_regs *reg, struct instruction_op *op);
* 0 if it could not be emulated, or -1 for an instruction that
* should not be emulated (rfid, mtmsrd clearing MSR_RI, etc.).
*/
-extern int emulate_step(struct pt_regs *regs, struct ppc_inst instr);
+int emulate_step(struct pt_regs *regs, ppc_inst_t instr);
/*
* Emulate a load or store instruction by reading/writing the
diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h
index 9d1fbd8be1c7..1f43ef696033 100644
--- a/arch/powerpc/include/asm/switch_to.h
+++ b/arch/powerpc/include/asm/switch_to.h
@@ -112,6 +112,9 @@ static inline void clear_task_ebb(struct task_struct *t)
#endif
}
+void kvmppc_save_user_regs(void);
+void kvmppc_save_current_sprs(void);
+
extern int set_thread_tidr(struct task_struct *t);
#endif /* _ASM_POWERPC_SWITCH_TO_H */
diff --git a/arch/powerpc/include/asm/syscall.h b/arch/powerpc/include/asm/syscall.h
index 52d05b465e3e..25fc8ad9a27a 100644
--- a/arch/powerpc/include/asm/syscall.h
+++ b/arch/powerpc/include/asm/syscall.h
@@ -90,7 +90,7 @@ static inline void syscall_get_arguments(struct task_struct *task,
unsigned long val, mask = -1UL;
unsigned int n = 6;
- if (is_32bit_task())
+ if (is_tsk_32bit_task(task))
mask = 0xffffffff;
while (n--) {
@@ -105,7 +105,7 @@ static inline void syscall_get_arguments(struct task_struct *task,
static inline int syscall_get_arch(struct task_struct *task)
{
- if (is_32bit_task())
+ if (is_tsk_32bit_task(task))
return AUDIT_ARCH_PPC;
else if (IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN))
return AUDIT_ARCH_PPC64LE;
diff --git a/arch/powerpc/include/asm/task_size_64.h b/arch/powerpc/include/asm/task_size_64.h
index c993482237ed..38fdf8041d12 100644
--- a/arch/powerpc/include/asm/task_size_64.h
+++ b/arch/powerpc/include/asm/task_size_64.h
@@ -44,11 +44,7 @@
*/
#define TASK_SIZE_USER32 (0x0000000100000000UL - (1 * PAGE_SIZE))
-#define TASK_SIZE_OF(tsk) \
- (test_tsk_thread_flag(tsk, TIF_32BIT) ? TASK_SIZE_USER32 : \
- TASK_SIZE_USER64)
-
-#define TASK_SIZE TASK_SIZE_OF(current)
+#define TASK_SIZE (is_32bit_task() ? TASK_SIZE_USER32 : TASK_SIZE_USER64)
#define TASK_UNMAPPED_BASE_USER32 (PAGE_ALIGN(TASK_SIZE_USER32 / 4))
#define TASK_UNMAPPED_BASE_USER64 (PAGE_ALIGN(DEFAULT_MAP_WINDOW_USER64 / 4))
diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
index 5725029aaa29..d6e649b3c70b 100644
--- a/arch/powerpc/include/asm/thread_info.h
+++ b/arch/powerpc/include/asm/thread_info.h
@@ -168,8 +168,10 @@ static inline bool test_thread_local_flags(unsigned int flags)
#ifdef CONFIG_COMPAT
#define is_32bit_task() (test_thread_flag(TIF_32BIT))
+#define is_tsk_32bit_task(tsk) (test_tsk_thread_flag(tsk, TIF_32BIT))
#else
#define is_32bit_task() (IS_ENABLED(CONFIG_PPC32))
+#define is_tsk_32bit_task(tsk) (IS_ENABLED(CONFIG_PPC32))
#endif
#if defined(CONFIG_PPC64)
diff --git a/arch/powerpc/include/asm/time.h b/arch/powerpc/include/asm/time.h
index 8c2c3dd4ddba..924b2157882f 100644
--- a/arch/powerpc/include/asm/time.h
+++ b/arch/powerpc/include/asm/time.h
@@ -18,6 +18,8 @@
#include <asm/vdso/timebase.h>
/* time.c */
+extern u64 decrementer_max;
+
extern unsigned long tb_ticks_per_jiffy;
extern unsigned long tb_ticks_per_usec;
extern unsigned long tb_ticks_per_sec;
@@ -97,19 +99,16 @@ extern void div128_by_32(u64 dividend_high, u64 dividend_low,
extern void secondary_cpu_time_init(void);
extern void __init time_init(void);
-#ifdef CONFIG_PPC64
-static inline unsigned long test_irq_work_pending(void)
-{
- unsigned long x;
+DECLARE_PER_CPU(u64, decrementers_next_tb);
- asm volatile("lbz %0,%1(13)"
- : "=r" (x)
- : "i" (offsetof(struct paca_struct, irq_work_pending)));
- return x;
+static inline u64 timer_get_next_tb(void)
+{
+ return __this_cpu_read(decrementers_next_tb);
}
-#endif
-DECLARE_PER_CPU(u64, decrementers_next_tb);
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+void timer_rearm_host_dec(u64 now);
+#endif
/* Convert timebase ticks to nanoseconds */
unsigned long long tb_to_ns(unsigned long long tb_ticks);
diff --git a/arch/powerpc/include/asm/udbg.h b/arch/powerpc/include/asm/udbg.h
index 0ea9e70ed78b..b4aa0d88ce2c 100644
--- a/arch/powerpc/include/asm/udbg.h
+++ b/arch/powerpc/include/asm/udbg.h
@@ -23,14 +23,14 @@ extern void udbg_printf(const char *fmt, ...)
__attribute__ ((format (printf, 1, 2)));
extern void udbg_progress(char *s, unsigned short hex);
-extern void udbg_uart_init_mmio(void __iomem *addr, unsigned int stride);
-extern void udbg_uart_init_pio(unsigned long port, unsigned int stride);
+void __init udbg_uart_init_mmio(void __iomem *addr, unsigned int stride);
+void __init udbg_uart_init_pio(unsigned long port, unsigned int stride);
-extern void udbg_uart_setup(unsigned int speed, unsigned int clock);
-extern unsigned int udbg_probe_uart_speed(unsigned int clock);
+void __init udbg_uart_setup(unsigned int speed, unsigned int clock);
+unsigned int __init udbg_probe_uart_speed(unsigned int clock);
struct device_node;
-extern void udbg_scc_init(int force_scc);
+void __init udbg_scc_init(int force_scc);
extern int udbg_adb_init(int force_btext);
extern void udbg_adb_init_early(void);
diff --git a/arch/powerpc/include/asm/uprobes.h b/arch/powerpc/include/asm/uprobes.h
index fe683371336f..a7ae1860115a 100644
--- a/arch/powerpc/include/asm/uprobes.h
+++ b/arch/powerpc/include/asm/uprobes.h
@@ -11,7 +11,6 @@
#include <linux/notifier.h>
#include <asm/probes.h>
-#include <asm/inst.h>
typedef ppc_opcode_t uprobe_opcode_t;
diff --git a/arch/powerpc/include/asm/xics.h b/arch/powerpc/include/asm/xics.h
index 0ac9bfddf704..e2e704eca5f6 100644
--- a/arch/powerpc/include/asm/xics.h
+++ b/arch/powerpc/include/asm/xics.h
@@ -38,13 +38,13 @@ static inline int icp_native_init(void) { return -ENODEV; }
/* PAPR ICP */
#ifdef CONFIG_PPC_ICP_HV
-extern int icp_hv_init(void);
+int __init icp_hv_init(void);
#else
static inline int icp_hv_init(void) { return -ENODEV; }
#endif
#ifdef CONFIG_PPC_POWERNV
-extern int icp_opal_init(void);
+int __init icp_opal_init(void);
extern void icp_opal_flush_interrupt(void);
#else
static inline int icp_opal_init(void) { return -ENODEV; }
diff --git a/arch/powerpc/include/asm/xmon.h b/arch/powerpc/include/asm/xmon.h
index 68bfb2361f03..f2d44b44f46c 100644
--- a/arch/powerpc/include/asm/xmon.h
+++ b/arch/powerpc/include/asm/xmon.h
@@ -12,7 +12,7 @@
#ifdef CONFIG_XMON
extern void xmon_setup(void);
-extern void xmon_register_spus(struct list_head *list);
+void __init xmon_register_spus(struct list_head *list);
struct pt_regs;
extern int xmon(struct pt_regs *excp);
extern irqreturn_t xmon_irq(int, void *);
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index b039877c743d..4d7829399570 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -11,6 +11,7 @@ CFLAGS_prom_init.o += -fPIC
CFLAGS_btext.o += -fPIC
endif
+CFLAGS_early_32.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
CFLAGS_cputable.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
CFLAGS_prom_init.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
CFLAGS_btext.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c
index bf96b954a4eb..3e37ece06739 100644
--- a/arch/powerpc/kernel/align.c
+++ b/arch/powerpc/kernel/align.c
@@ -105,7 +105,7 @@ static struct aligninfo spe_aligninfo[32] = {
* so we don't need the address swizzling.
*/
static int emulate_spe(struct pt_regs *regs, unsigned int reg,
- struct ppc_inst ppc_instr)
+ ppc_inst_t ppc_instr)
{
union {
u64 ll;
@@ -300,7 +300,7 @@ Efault_write:
int fix_alignment(struct pt_regs *regs)
{
- struct ppc_inst instr;
+ ppc_inst_t instr;
struct instruction_op op;
int r, type;
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index cc05522f50bf..7582f3e3a330 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -54,7 +54,7 @@
#endif
#ifdef CONFIG_PPC32
-#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
+#ifdef CONFIG_BOOKE_OR_40x
#include "head_booke.h"
#endif
#endif
@@ -139,6 +139,7 @@ int main(void)
OFFSET(THR11, thread_struct, r11);
OFFSET(THLR, thread_struct, lr);
OFFSET(THCTR, thread_struct, ctr);
+ OFFSET(THSR0, thread_struct, sr0);
#endif
#ifdef CONFIG_SPE
OFFSET(THREAD_EVR0, thread_struct, evr[0]);
@@ -218,10 +219,12 @@ int main(void)
OFFSET(PACA_EXGEN, paca_struct, exgen);
OFFSET(PACA_EXMC, paca_struct, exmc);
OFFSET(PACA_EXNMI, paca_struct, exnmi);
+#ifdef CONFIG_PPC_64S_HASH_MMU
OFFSET(PACA_SLBSHADOWPTR, paca_struct, slb_shadow_ptr);
OFFSET(SLBSHADOW_STACKVSID, slb_shadow, save_area[SLB_NUM_BOLTED - 1].vsid);
OFFSET(SLBSHADOW_STACKESID, slb_shadow, save_area[SLB_NUM_BOLTED - 1].esid);
OFFSET(SLBSHADOW_SAVEAREA, slb_shadow, save_area);
+#endif
OFFSET(LPPACA_PMCINUSE, lppaca, pmcregs_in_use);
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
OFFSET(PACA_PMCINUSE, paca_struct, pmcregs_in_use);
diff --git a/arch/powerpc/kernel/btext.c b/arch/powerpc/kernel/btext.c
index 803c2a45b22a..9d9d56b574cc 100644
--- a/arch/powerpc/kernel/btext.c
+++ b/arch/powerpc/kernel/btext.c
@@ -161,7 +161,7 @@ void btext_map(void)
boot_text_mapped = 1;
}
-static int btext_initialize(struct device_node *np)
+static int __init btext_initialize(struct device_node *np)
{
unsigned int width, height, depth, pitch;
unsigned long address = 0;
@@ -241,8 +241,10 @@ int __init btext_find_display(int allow_nonstdout)
rc = btext_initialize(np);
printk("result: %d\n", rc);
}
- if (rc == 0)
+ if (rc == 0) {
+ of_node_put(np);
break;
+ }
}
return rc;
}
@@ -290,7 +292,7 @@ void btext_update_display(unsigned long phys, int width, int height,
}
EXPORT_SYMBOL(btext_update_display);
-void btext_clearscreen(void)
+void __init btext_clearscreen(void)
{
unsigned int *base = (unsigned int *)calc_base(0, 0);
unsigned long width = ((dispDeviceRect[2] - dispDeviceRect[0]) *
@@ -308,7 +310,7 @@ void btext_clearscreen(void)
rmci_maybe_off();
}
-void btext_flushscreen(void)
+void __init btext_flushscreen(void)
{
unsigned int *base = (unsigned int *)calc_base(0, 0);
unsigned long width = ((dispDeviceRect[2] - dispDeviceRect[0]) *
@@ -327,7 +329,7 @@ void btext_flushscreen(void)
__asm__ __volatile__ ("sync" ::: "memory");
}
-void btext_flushline(void)
+void __init btext_flushline(void)
{
unsigned int *base = (unsigned int *)calc_base(0, g_loc_Y << 4);
unsigned long width = ((dispDeviceRect[2] - dispDeviceRect[0]) *
@@ -542,7 +544,7 @@ void btext_drawstring(const char *c)
btext_drawchar(*c++);
}
-void btext_drawtext(const char *c, unsigned int len)
+void __init btext_drawtext(const char *c, unsigned int len)
{
if (!boot_text_mapped)
return;
@@ -550,7 +552,7 @@ void btext_drawtext(const char *c, unsigned int len)
btext_drawchar(*c++);
}
-void btext_drawhex(unsigned long v)
+void __init btext_drawhex(unsigned long v)
{
if (!boot_text_mapped)
return;
diff --git a/arch/powerpc/kernel/cacheinfo.c b/arch/powerpc/kernel/cacheinfo.c
index cf1be75b7833..00b0992be3e7 100644
--- a/arch/powerpc/kernel/cacheinfo.c
+++ b/arch/powerpc/kernel/cacheinfo.c
@@ -710,7 +710,7 @@ static struct kobj_attribute cache_shared_cpu_list_attr =
__ATTR(shared_cpu_list, 0444, shared_cpu_list_show, NULL);
/* Attributes which should always be created -- the kobject/sysfs core
- * does this automatically via kobj_type->default_attrs. This is the
+ * does this automatically via kobj_type->default_groups. This is the
* minimum data required to uniquely identify a cache.
*/
static struct attribute *cache_index_default_attrs[] = {
@@ -720,6 +720,7 @@ static struct attribute *cache_index_default_attrs[] = {
&cache_shared_cpu_list_attr.attr,
NULL,
};
+ATTRIBUTE_GROUPS(cache_index_default);
/* Attributes which should be created if the cache device node has the
* right properties -- see cacheinfo_create_index_opt_attrs
@@ -738,7 +739,7 @@ static const struct sysfs_ops cache_index_ops = {
static struct kobj_type cache_index_type = {
.release = cache_index_release,
.sysfs_ops = &cache_index_ops,
- .default_attrs = cache_index_default_attrs,
+ .default_groups = cache_index_default_groups,
};
static void cacheinfo_create_index_opt_attrs(struct cache_index_dir *dir)
diff --git a/arch/powerpc/kernel/cpu_setup_power.c b/arch/powerpc/kernel/cpu_setup_power.c
index 3cca88ee96d7..3dc61e203f37 100644
--- a/arch/powerpc/kernel/cpu_setup_power.c
+++ b/arch/powerpc/kernel/cpu_setup_power.c
@@ -109,7 +109,7 @@ static void init_PMU_HV_ISA207(void)
static void init_PMU(void)
{
mtspr(SPRN_MMCRA, 0);
- mtspr(SPRN_MMCR0, 0);
+ mtspr(SPRN_MMCR0, MMCR0_FC);
mtspr(SPRN_MMCR1, 0);
mtspr(SPRN_MMCR2, 0);
}
@@ -123,7 +123,7 @@ static void init_PMU_ISA31(void)
{
mtspr(SPRN_MMCR3, 0);
mtspr(SPRN_MMCRA, MMCRA_BHRB_DISABLE);
- mtspr(SPRN_MMCR0, MMCR0_PMCCEXT);
+ mtspr(SPRN_MMCR0, MMCR0_FC | MMCR0_PMCCEXT);
}
/*
@@ -137,6 +137,7 @@ void __setup_cpu_power7(unsigned long offset, struct cpu_spec *t)
return;
mtspr(SPRN_LPID, 0);
+ mtspr(SPRN_AMOR, ~0);
mtspr(SPRN_PCR, PCR_MASK);
init_LPCR_ISA206(mfspr(SPRN_LPCR), LPCR_LPES1 >> LPCR_LPES_SH);
}
@@ -150,6 +151,7 @@ void __restore_cpu_power7(void)
return;
mtspr(SPRN_LPID, 0);
+ mtspr(SPRN_AMOR, ~0);
mtspr(SPRN_PCR, PCR_MASK);
init_LPCR_ISA206(mfspr(SPRN_LPCR), LPCR_LPES1 >> LPCR_LPES_SH);
}
@@ -164,6 +166,7 @@ void __setup_cpu_power8(unsigned long offset, struct cpu_spec *t)
return;
mtspr(SPRN_LPID, 0);
+ mtspr(SPRN_AMOR, ~0);
mtspr(SPRN_PCR, PCR_MASK);
init_LPCR_ISA206(mfspr(SPRN_LPCR) | LPCR_PECEDH, 0); /* LPES = 0 */
init_HFSCR();
@@ -184,6 +187,7 @@ void __restore_cpu_power8(void)
return;
mtspr(SPRN_LPID, 0);
+ mtspr(SPRN_AMOR, ~0);
mtspr(SPRN_PCR, PCR_MASK);
init_LPCR_ISA206(mfspr(SPRN_LPCR) | LPCR_PECEDH, 0); /* LPES = 0 */
init_HFSCR();
@@ -202,6 +206,7 @@ void __setup_cpu_power9(unsigned long offset, struct cpu_spec *t)
mtspr(SPRN_PSSCR, 0);
mtspr(SPRN_LPID, 0);
mtspr(SPRN_PID, 0);
+ mtspr(SPRN_AMOR, ~0);
mtspr(SPRN_PCR, PCR_MASK);
init_LPCR_ISA300((mfspr(SPRN_LPCR) | LPCR_PECEDH | LPCR_PECE_HVEE |\
LPCR_HVICE | LPCR_HEIC) & ~(LPCR_UPRT | LPCR_HR), 0);
@@ -223,6 +228,7 @@ void __restore_cpu_power9(void)
mtspr(SPRN_PSSCR, 0);
mtspr(SPRN_LPID, 0);
mtspr(SPRN_PID, 0);
+ mtspr(SPRN_AMOR, ~0);
mtspr(SPRN_PCR, PCR_MASK);
init_LPCR_ISA300((mfspr(SPRN_LPCR) | LPCR_PECEDH | LPCR_PECE_HVEE |\
LPCR_HVICE | LPCR_HEIC) & ~(LPCR_UPRT | LPCR_HR), 0);
@@ -242,6 +248,7 @@ void __setup_cpu_power10(unsigned long offset, struct cpu_spec *t)
mtspr(SPRN_PSSCR, 0);
mtspr(SPRN_LPID, 0);
mtspr(SPRN_PID, 0);
+ mtspr(SPRN_AMOR, ~0);
mtspr(SPRN_PCR, PCR_MASK);
init_LPCR_ISA300((mfspr(SPRN_LPCR) | LPCR_PECEDH | LPCR_PECE_HVEE |\
LPCR_HVICE | LPCR_HEIC) & ~(LPCR_UPRT | LPCR_HR), 0);
@@ -264,6 +271,7 @@ void __restore_cpu_power10(void)
mtspr(SPRN_PSSCR, 0);
mtspr(SPRN_LPID, 0);
mtspr(SPRN_PID, 0);
+ mtspr(SPRN_AMOR, ~0);
mtspr(SPRN_PCR, PCR_MASK);
init_LPCR_ISA300((mfspr(SPRN_LPCR) | LPCR_PECEDH | LPCR_PECE_HVEE |\
LPCR_HVICE | LPCR_HEIC) & ~(LPCR_UPRT | LPCR_HR), 0);
diff --git a/arch/powerpc/kernel/dbell.c b/arch/powerpc/kernel/dbell.c
index 5545c9cd17c1..f55c6fb34a3a 100644
--- a/arch/powerpc/kernel/dbell.c
+++ b/arch/powerpc/kernel/dbell.c
@@ -27,7 +27,8 @@ DEFINE_INTERRUPT_HANDLER_ASYNC(doorbell_exception)
ppc_msgsync();
- may_hard_irq_enable();
+ if (should_hard_irq_enable())
+ do_hard_irq_enable();
kvmppc_clear_host_ipi(smp_processor_id());
__this_cpu_inc(irq_stat.doorbell_irqs);
diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c
index ba527fb52993..7d1b2c4a4891 100644
--- a/arch/powerpc/kernel/dt_cpu_ftrs.c
+++ b/arch/powerpc/kernel/dt_cpu_ftrs.c
@@ -80,6 +80,7 @@ static void __restore_cpu_cpufeatures(void)
mtspr(SPRN_LPCR, system_registers.lpcr);
if (hv_mode) {
mtspr(SPRN_LPID, 0);
+ mtspr(SPRN_AMOR, ~0);
mtspr(SPRN_HFSCR, system_registers.hfscr);
mtspr(SPRN_PCR, system_registers.pcr);
}
@@ -216,6 +217,7 @@ static int __init feat_enable_hv(struct dt_cpu_feature *f)
}
mtspr(SPRN_LPID, 0);
+ mtspr(SPRN_AMOR, ~0);
lpcr = mfspr(SPRN_LPCR);
lpcr &= ~LPCR_LPES0; /* HV external interrupts */
@@ -271,6 +273,9 @@ static int __init feat_enable_mmu_hash(struct dt_cpu_feature *f)
{
u64 lpcr;
+ if (!IS_ENABLED(CONFIG_PPC_64S_HASH_MMU))
+ return 0;
+
lpcr = mfspr(SPRN_LPCR);
lpcr &= ~LPCR_ISL;
@@ -290,6 +295,9 @@ static int __init feat_enable_mmu_hash_v3(struct dt_cpu_feature *f)
{
u64 lpcr;
+ if (!IS_ENABLED(CONFIG_PPC_64S_HASH_MMU))
+ return 0;
+
lpcr = mfspr(SPRN_LPCR);
lpcr &= ~(LPCR_ISL | LPCR_UPRT | LPCR_HR);
mtspr(SPRN_LPCR, lpcr);
@@ -303,15 +311,15 @@ static int __init feat_enable_mmu_hash_v3(struct dt_cpu_feature *f)
static int __init feat_enable_mmu_radix(struct dt_cpu_feature *f)
{
-#ifdef CONFIG_PPC_RADIX_MMU
+ if (!IS_ENABLED(CONFIG_PPC_RADIX_MMU))
+ return 0;
+
+ cur_cpu_spec->mmu_features |= MMU_FTR_KERNEL_RO;
cur_cpu_spec->mmu_features |= MMU_FTR_TYPE_RADIX;
- cur_cpu_spec->mmu_features |= MMU_FTRS_HASH_BASE;
cur_cpu_spec->mmu_features |= MMU_FTR_GTSE;
cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_MMU;
return 1;
-#endif
- return 0;
}
static int __init feat_enable_dscr(struct dt_cpu_feature *f)
@@ -336,7 +344,7 @@ static int __init feat_enable_dscr(struct dt_cpu_feature *f)
return 1;
}
-static void hfscr_pmu_enable(void)
+static void __init hfscr_pmu_enable(void)
{
u64 hfscr = mfspr(SPRN_HFSCR);
hfscr |= PPC_BIT(60);
@@ -351,7 +359,7 @@ static void init_pmu_power8(void)
}
mtspr(SPRN_MMCRA, 0);
- mtspr(SPRN_MMCR0, 0);
+ mtspr(SPRN_MMCR0, MMCR0_FC);
mtspr(SPRN_MMCR1, 0);
mtspr(SPRN_MMCR2, 0);
mtspr(SPRN_MMCRS, 0);
@@ -390,7 +398,7 @@ static void init_pmu_power9(void)
mtspr(SPRN_MMCRC, 0);
mtspr(SPRN_MMCRA, 0);
- mtspr(SPRN_MMCR0, 0);
+ mtspr(SPRN_MMCR0, MMCR0_FC);
mtspr(SPRN_MMCR1, 0);
mtspr(SPRN_MMCR2, 0);
}
@@ -426,7 +434,7 @@ static void init_pmu_power10(void)
mtspr(SPRN_MMCR3, 0);
mtspr(SPRN_MMCRA, MMCRA_BHRB_DISABLE);
- mtspr(SPRN_MMCR0, MMCR0_PMCCEXT);
+ mtspr(SPRN_MMCR0, MMCR0_FC | MMCR0_PMCCEXT);
}
static int __init feat_enable_pmu_power10(struct dt_cpu_feature *f)
diff --git a/arch/powerpc/kernel/eeh_cache.c b/arch/powerpc/kernel/eeh_cache.c
index 9bdaaf7fddc9..2f9dbf8ad2ee 100644
--- a/arch/powerpc/kernel/eeh_cache.c
+++ b/arch/powerpc/kernel/eeh_cache.c
@@ -280,7 +280,7 @@ static int eeh_addr_cache_show(struct seq_file *s, void *v)
}
DEFINE_SHOW_ATTRIBUTE(eeh_addr_cache);
-void eeh_cache_debugfs_init(void)
+void __init eeh_cache_debugfs_init(void)
{
debugfs_create_file_unsafe("eeh_address_cache", 0400,
arch_debugfs_dir, NULL,
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
index 350dab18e137..422f80b5b27b 100644
--- a/arch/powerpc/kernel/eeh_driver.c
+++ b/arch/powerpc/kernel/eeh_driver.c
@@ -905,18 +905,19 @@ void eeh_handle_normal_event(struct eeh_pe *pe)
}
#endif /* CONFIG_STACKTRACE */
+ eeh_for_each_pe(pe, tmp_pe)
+ eeh_pe_for_each_dev(tmp_pe, edev, tmp)
+ edev->mode &= ~EEH_DEV_NO_HANDLER;
+
eeh_pe_update_time_stamp(pe);
pe->freeze_count++;
if (pe->freeze_count > eeh_max_freezes) {
pr_err("EEH: PHB#%x-PE#%x has failed %d times in the last hour and has been permanently disabled.\n",
pe->phb->global_number, pe->addr,
pe->freeze_count);
- result = PCI_ERS_RESULT_DISCONNECT;
- }
- eeh_for_each_pe(pe, tmp_pe)
- eeh_pe_for_each_dev(tmp_pe, edev, tmp)
- edev->mode &= ~EEH_DEV_NO_HANDLER;
+ goto recover_failed;
+ }
/* Walk the various device drivers attached to this slot through
* a reset sequence, giving each an opportunity to do what it needs
@@ -928,39 +929,38 @@ void eeh_handle_normal_event(struct eeh_pe *pe)
* the error. Override the result if necessary to have partially
* hotplug for this case.
*/
- if (result != PCI_ERS_RESULT_DISCONNECT) {
- pr_warn("EEH: This PCI device has failed %d times in the last hour and will be permanently disabled after %d failures.\n",
- pe->freeze_count, eeh_max_freezes);
- pr_info("EEH: Notify device drivers to shutdown\n");
- eeh_set_channel_state(pe, pci_channel_io_frozen);
- eeh_set_irq_state(pe, false);
- eeh_pe_report("error_detected(IO frozen)", pe,
- eeh_report_error, &result);
- if ((pe->type & EEH_PE_PHB) &&
- result != PCI_ERS_RESULT_NONE &&
- result != PCI_ERS_RESULT_NEED_RESET)
- result = PCI_ERS_RESULT_NEED_RESET;
- }
+ pr_warn("EEH: This PCI device has failed %d times in the last hour and will be permanently disabled after %d failures.\n",
+ pe->freeze_count, eeh_max_freezes);
+ pr_info("EEH: Notify device drivers to shutdown\n");
+ eeh_set_channel_state(pe, pci_channel_io_frozen);
+ eeh_set_irq_state(pe, false);
+ eeh_pe_report("error_detected(IO frozen)", pe,
+ eeh_report_error, &result);
+ if (result == PCI_ERS_RESULT_DISCONNECT)
+ goto recover_failed;
+
+ /*
+ * Error logged on a PHB are always fences which need a full
+ * PHB reset to clear so force that to happen.
+ */
+ if ((pe->type & EEH_PE_PHB) && result != PCI_ERS_RESULT_NONE)
+ result = PCI_ERS_RESULT_NEED_RESET;
/* Get the current PCI slot state. This can take a long time,
* sometimes over 300 seconds for certain systems.
*/
- if (result != PCI_ERS_RESULT_DISCONNECT) {
- rc = eeh_wait_state(pe, MAX_WAIT_FOR_RECOVERY*1000);
- if (rc < 0 || rc == EEH_STATE_NOT_SUPPORT) {
- pr_warn("EEH: Permanent failure\n");
- result = PCI_ERS_RESULT_DISCONNECT;
- }
+ rc = eeh_wait_state(pe, MAX_WAIT_FOR_RECOVERY * 1000);
+ if (rc < 0 || rc == EEH_STATE_NOT_SUPPORT) {
+ pr_warn("EEH: Permanent failure\n");
+ goto recover_failed;
}
/* Since rtas may enable MMIO when posting the error log,
* don't post the error log until after all dev drivers
* have been informed.
*/
- if (result != PCI_ERS_RESULT_DISCONNECT) {
- pr_info("EEH: Collect temporary log\n");
- eeh_slot_error_detail(pe, EEH_LOG_TEMP);
- }
+ pr_info("EEH: Collect temporary log\n");
+ eeh_slot_error_detail(pe, EEH_LOG_TEMP);
/* If all device drivers were EEH-unaware, then shut
* down all of the device drivers, and hope they
@@ -970,9 +970,8 @@ void eeh_handle_normal_event(struct eeh_pe *pe)
pr_info("EEH: Reset with hotplug activity\n");
rc = eeh_reset_device(pe, bus, NULL, false);
if (rc) {
- pr_warn("%s: Unable to reset, err=%d\n",
- __func__, rc);
- result = PCI_ERS_RESULT_DISCONNECT;
+ pr_warn("%s: Unable to reset, err=%d\n", __func__, rc);
+ goto recover_failed;
}
}
@@ -980,10 +979,10 @@ void eeh_handle_normal_event(struct eeh_pe *pe)
if (result == PCI_ERS_RESULT_CAN_RECOVER) {
pr_info("EEH: Enable I/O for affected devices\n");
rc = eeh_pci_enable(pe, EEH_OPT_THAW_MMIO);
+ if (rc < 0)
+ goto recover_failed;
- if (rc < 0) {
- result = PCI_ERS_RESULT_DISCONNECT;
- } else if (rc) {
+ if (rc) {
result = PCI_ERS_RESULT_NEED_RESET;
} else {
pr_info("EEH: Notify device drivers to resume I/O\n");
@@ -991,15 +990,13 @@ void eeh_handle_normal_event(struct eeh_pe *pe)
eeh_report_mmio_enabled, &result);
}
}
-
- /* If all devices reported they can proceed, then re-enable DMA */
if (result == PCI_ERS_RESULT_CAN_RECOVER) {
pr_info("EEH: Enabled DMA for affected devices\n");
rc = eeh_pci_enable(pe, EEH_OPT_THAW_DMA);
+ if (rc < 0)
+ goto recover_failed;
- if (rc < 0) {
- result = PCI_ERS_RESULT_DISCONNECT;
- } else if (rc) {
+ if (rc) {
result = PCI_ERS_RESULT_NEED_RESET;
} else {
/*
@@ -1017,16 +1014,15 @@ void eeh_handle_normal_event(struct eeh_pe *pe)
pr_info("EEH: Reset without hotplug activity\n");
rc = eeh_reset_device(pe, bus, &rmv_data, true);
if (rc) {
- pr_warn("%s: Cannot reset, err=%d\n",
- __func__, rc);
- result = PCI_ERS_RESULT_DISCONNECT;
- } else {
- result = PCI_ERS_RESULT_NONE;
- eeh_set_channel_state(pe, pci_channel_io_normal);
- eeh_set_irq_state(pe, true);
- eeh_pe_report("slot_reset", pe, eeh_report_reset,
- &result);
+ pr_warn("%s: Cannot reset, err=%d\n", __func__, rc);
+ goto recover_failed;
}
+
+ result = PCI_ERS_RESULT_NONE;
+ eeh_set_channel_state(pe, pci_channel_io_normal);
+ eeh_set_irq_state(pe, true);
+ eeh_pe_report("slot_reset", pe, eeh_report_reset,
+ &result);
}
if ((result == PCI_ERS_RESULT_RECOVERED) ||
@@ -1054,45 +1050,47 @@ void eeh_handle_normal_event(struct eeh_pe *pe)
}
pr_info("EEH: Recovery successful.\n");
- } else {
- /*
- * About 90% of all real-life EEH failures in the field
- * are due to poorly seated PCI cards. Only 10% or so are
- * due to actual, failed cards.
- */
- pr_err("EEH: Unable to recover from failure from PHB#%x-PE#%x.\n"
- "Please try reseating or replacing it\n",
- pe->phb->global_number, pe->addr);
+ goto out;
+ }
- eeh_slot_error_detail(pe, EEH_LOG_PERM);
+recover_failed:
+ /*
+ * About 90% of all real-life EEH failures in the field
+ * are due to poorly seated PCI cards. Only 10% or so are
+ * due to actual, failed cards.
+ */
+ pr_err("EEH: Unable to recover from failure from PHB#%x-PE#%x.\n"
+ "Please try reseating or replacing it\n",
+ pe->phb->global_number, pe->addr);
- /* Notify all devices that they're about to go down. */
- eeh_set_channel_state(pe, pci_channel_io_perm_failure);
- eeh_set_irq_state(pe, false);
- eeh_pe_report("error_detected(permanent failure)", pe,
- eeh_report_failure, NULL);
+ eeh_slot_error_detail(pe, EEH_LOG_PERM);
- /* Mark the PE to be removed permanently */
- eeh_pe_state_mark(pe, EEH_PE_REMOVED);
+ /* Notify all devices that they're about to go down. */
+ eeh_set_channel_state(pe, pci_channel_io_perm_failure);
+ eeh_set_irq_state(pe, false);
+ eeh_pe_report("error_detected(permanent failure)", pe,
+ eeh_report_failure, NULL);
- /*
- * Shut down the device drivers for good. We mark
- * all removed devices correctly to avoid access
- * the their PCI config any more.
- */
- if (pe->type & EEH_PE_VF) {
- eeh_pe_dev_traverse(pe, eeh_rmv_device, NULL);
- eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED);
- } else {
- eeh_pe_state_clear(pe, EEH_PE_PRI_BUS, true);
- eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED);
+ /* Mark the PE to be removed permanently */
+ eeh_pe_state_mark(pe, EEH_PE_REMOVED);
- pci_lock_rescan_remove();
- pci_hp_remove_devices(bus);
- pci_unlock_rescan_remove();
- /* The passed PE should no longer be used */
- return;
- }
+ /*
+ * Shut down the device drivers for good. We mark
+ * all removed devices correctly to avoid access
+ * the their PCI config any more.
+ */
+ if (pe->type & EEH_PE_VF) {
+ eeh_pe_dev_traverse(pe, eeh_rmv_device, NULL);
+ eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED);
+ } else {
+ eeh_pe_state_clear(pe, EEH_PE_PRI_BUS, true);
+ eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED);
+
+ pci_lock_rescan_remove();
+ pci_hp_remove_devices(bus);
+ pci_unlock_rescan_remove();
+ /* The passed PE should no longer be used */
+ return;
}
out:
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 61fdd53cdd9a..7748c278d13c 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -73,13 +73,39 @@ prepare_transfer_to_handler:
_ASM_NOKPROBE_SYMBOL(prepare_transfer_to_handler)
#endif /* CONFIG_PPC_BOOK3S_32 || CONFIG_E500 */
+#if defined(CONFIG_PPC_KUEP) && defined(CONFIG_PPC_BOOK3S_32)
+ .globl __kuep_lock
+__kuep_lock:
+ lwz r9, THREAD+THSR0(r2)
+ update_user_segments_by_4 r9, r10, r11, r12
+ blr
+
+__kuep_unlock:
+ lwz r9, THREAD+THSR0(r2)
+ rlwinm r9,r9,0,~SR_NX
+ update_user_segments_by_4 r9, r10, r11, r12
+ blr
+
+.macro kuep_lock
+ bl __kuep_lock
+.endm
+.macro kuep_unlock
+ bl __kuep_unlock
+.endm
+#else
+.macro kuep_lock
+.endm
+.macro kuep_unlock
+.endm
+#endif
+
.globl transfer_to_syscall
transfer_to_syscall:
stw r11, GPR1(r1)
stw r11, 0(r1)
mflr r12
stw r12, _LINK(r1)
-#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
+#ifdef CONFIG_BOOKE_OR_40x
rlwinm r9,r9,0,14,12 /* clear MSR_WE (necessary?) */
#endif
lis r12,STACK_FRAME_REGS_MARKER@ha /* exception frame marker */
@@ -90,10 +116,10 @@ transfer_to_syscall:
stw r12,8(r1)
stw r2,_TRAP(r1)
SAVE_GPR(0, r1)
- SAVE_4GPRS(3, r1)
- SAVE_2GPRS(7, r1)
+ SAVE_GPRS(3, 8, r1)
addi r2,r10,-THREAD
SAVE_NVGPRS(r1)
+ kuep_lock
/* Calling convention has r9 = orig r0, r10 = regs */
addi r10,r1,STACK_FRAME_OVERHEAD
@@ -110,6 +136,7 @@ ret_from_syscall:
cmplwi cr0,r5,0
bne- 2f
#endif /* CONFIG_PPC_47x */
+ kuep_unlock
lwz r4,_LINK(r1)
lwz r5,_CCR(r1)
mtlr r4
@@ -139,7 +166,7 @@ syscall_exit_finish:
mtxer r5
lwz r0,GPR0(r1)
lwz r3,GPR3(r1)
- REST_8GPRS(4,r1)
+ REST_GPRS(4, 11, r1)
lwz r12,GPR12(r1)
b 1b
@@ -232,9 +259,9 @@ fast_exception_return:
beq 3f /* if not, we've got problems */
#endif
-2: REST_4GPRS(3, r11)
+2: REST_GPRS(3, 6, r11)
lwz r10,_CCR(r11)
- REST_2GPRS(1, r11)
+ REST_GPRS(1, 2, r11)
mtcr r10
lwz r10,_LINK(r11)
mtlr r10
@@ -273,6 +300,7 @@ interrupt_return:
beq .Lkernel_interrupt_return
bl interrupt_exit_user_prepare
cmpwi r3,0
+ kuep_unlock
bne- .Lrestore_nvgprs
.Lfast_user_interrupt_return:
@@ -298,16 +326,14 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
* the reliable stack unwinder later on. Clear it.
*/
stw r0,8(r1)
- REST_4GPRS(7, r1)
- REST_2GPRS(11, r1)
+ REST_GPRS(7, 12, r1)
mtcr r3
mtlr r4
mtctr r5
mtspr SPRN_XER,r6
- REST_4GPRS(2, r1)
- REST_GPR(6, r1)
+ REST_GPRS(2, 6, r1)
REST_GPR(0, r1)
REST_GPR(1, r1)
rfi
@@ -341,8 +367,7 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
lwz r6,_CCR(r1)
li r0,0
- REST_4GPRS(7, r1)
- REST_2GPRS(11, r1)
+ REST_GPRS(7, 12, r1)
mtlr r3
mtctr r4
@@ -354,7 +379,7 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
*/
stw r0,8(r1)
- REST_4GPRS(2, r1)
+ REST_GPRS(2, 5, r1)
bne- cr1,1f /* emulate stack store */
mtcr r6
@@ -430,8 +455,7 @@ _ASM_NOKPROBE_SYMBOL(interrupt_return)
bne interrupt_return; \
lwz r0,GPR0(r1); \
lwz r2,GPR2(r1); \
- REST_4GPRS(3, r1); \
- REST_2GPRS(7, r1); \
+ REST_GPRS(3, 8, r1); \
lwz r10,_XER(r1); \
lwz r11,_CTR(r1); \
mtspr SPRN_XER,r10; \
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 70cff7b49e17..9581906b5ee9 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -180,7 +180,7 @@ _GLOBAL(_switch)
#endif
ld r8,KSP(r4) /* new stack pointer */
-#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
BEGIN_MMU_FTR_SECTION
b 2f
END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
@@ -232,7 +232,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
slbmte r7,r0
isync
2:
-#endif /* CONFIG_PPC_BOOK3S_64 */
+#endif /* CONFIG_PPC_64S_HASH_MMU */
clrrdi r7, r8, THREAD_SHIFT /* base of new stack */
/* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
diff --git a/arch/powerpc/kernel/epapr_paravirt.c b/arch/powerpc/kernel/epapr_paravirt.c
index 93b0f3ec8fb0..d4b8aff20815 100644
--- a/arch/powerpc/kernel/epapr_paravirt.c
+++ b/arch/powerpc/kernel/epapr_paravirt.c
@@ -37,7 +37,7 @@ static int __init early_init_dt_scan_epapr(unsigned long node,
return -1;
for (i = 0; i < (len / 4); i++) {
- struct ppc_inst inst = ppc_inst(be32_to_cpu(insts[i]));
+ ppc_inst_t inst = ppc_inst(be32_to_cpu(insts[i]));
patch_instruction(epapr_hypercall_start + i, inst);
#if !defined(CONFIG_64BIT) || defined(CONFIG_PPC_BOOK3E_64)
patch_instruction(epapr_ev_idle_start + i, inst);
diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
index 711c66b76df1..67dc4e3179a0 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -198,8 +198,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
stdcx. r0,0,r1 /* to clear the reservation */
- REST_4GPRS(2, r1)
- REST_4GPRS(6, r1)
+ REST_GPRS(2, 9, r1)
ld r10,_CTR(r1)
ld r11,_XER(r1)
@@ -375,9 +374,7 @@ ret_from_mc_except:
exc_##n##_common: \
std r0,GPR0(r1); /* save r0 in stackframe */ \
std r2,GPR2(r1); /* save r2 in stackframe */ \
- SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \
- SAVE_2GPRS(7, r1); /* save r7, r8 in stackframe */ \
- std r9,GPR9(r1); /* save r9 in stackframe */ \
+ SAVE_GPRS(3, 9, r1); /* save r3 - r9 in stackframe */ \
std r10,_NIP(r1); /* save SRR0 to stackframe */ \
std r11,_MSR(r1); /* save SRR1 to stackframe */ \
beq 2f; /* if from kernel mode */ \
@@ -1061,9 +1058,7 @@ bad_stack_book3e:
std r11,_ESR(r1)
std r0,GPR0(r1); /* save r0 in stackframe */ \
std r2,GPR2(r1); /* save r2 in stackframe */ \
- SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \
- SAVE_2GPRS(7, r1); /* save r7, r8 in stackframe */ \
- std r9,GPR9(r1); /* save r9 in stackframe */ \
+ SAVE_GPRS(3, 9, r1); /* save r3 - r9 in stackframe */ \
ld r3,PACA_EXGEN+EX_R10(r13);/* get back r10 */ \
ld r4,PACA_EXGEN+EX_R11(r13);/* get back r11 */ \
mfspr r5,SPRN_SPRG_GEN_SCRATCH;/* get back r13 XXX can be wrong */ \
@@ -1077,8 +1072,7 @@ bad_stack_book3e:
std r10,_LINK(r1)
std r11,_CTR(r1)
std r12,_XER(r1)
- SAVE_10GPRS(14,r1)
- SAVE_8GPRS(24,r1)
+ SAVE_GPRS(14, 31, r1)
lhz r12,PACA_TRAP_SAVE(r13)
std r12,_TRAP(r1)
addi r11,r1,INT_FRAME_SIZE
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index eaf1f72131a1..55caeee37c08 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -48,7 +48,7 @@
.balign IFETCH_ALIGN_BYTES; \
.global name; \
_ASM_NOKPROBE_SYMBOL(name); \
- DEFINE_FIXED_SYMBOL(name); \
+ DEFINE_FIXED_SYMBOL(name, text); \
name:
#define TRAMP_REAL_BEGIN(name) \
@@ -76,31 +76,18 @@ name:
ld reg,PACAKBASE(r13); /* get high part of &label */ \
ori reg,reg,FIXED_SYMBOL_ABS_ADDR(label)
-#define __LOAD_HANDLER(reg, label) \
+#define __LOAD_HANDLER(reg, label, section) \
ld reg,PACAKBASE(r13); \
- ori reg,reg,(ABS_ADDR(label))@l
+ ori reg,reg,(ABS_ADDR(label, section))@l
/*
* Branches from unrelocated code (e.g., interrupts) to labels outside
* head-y require >64K offsets.
*/
-#define __LOAD_FAR_HANDLER(reg, label) \
+#define __LOAD_FAR_HANDLER(reg, label, section) \
ld reg,PACAKBASE(r13); \
- ori reg,reg,(ABS_ADDR(label))@l; \
- addis reg,reg,(ABS_ADDR(label))@h
-
-/*
- * Branch to label using its 0xC000 address. This results in instruction
- * address suitable for MSR[IR]=0 or 1, which allows relocation to be turned
- * on using mtmsr rather than rfid.
- *
- * This could set the 0xc bits for !RELOCATABLE as an immediate, rather than
- * load KBASE for a slight optimisation.
- */
-#define BRANCH_TO_C000(reg, label) \
- __LOAD_FAR_HANDLER(reg, label); \
- mtctr reg; \
- bctr
+ ori reg,reg,(ABS_ADDR(label, section))@l; \
+ addis reg,reg,(ABS_ADDR(label, section))@h
/*
* Interrupt code generation macros
@@ -111,9 +98,10 @@ name:
#define IAREA .L_IAREA_\name\() /* PACA save area */
#define IVIRT .L_IVIRT_\name\() /* Has virt mode entry point */
#define IISIDE .L_IISIDE_\name\() /* Uses SRR0/1 not DAR/DSISR */
+#define ICFAR .L_ICFAR_\name\() /* Uses CFAR */
+#define ICFAR_IF_HVMODE .L_ICFAR_IF_HVMODE_\name\() /* Uses CFAR if HV */
#define IDAR .L_IDAR_\name\() /* Uses DAR (or SRR0) */
#define IDSISR .L_IDSISR_\name\() /* Uses DSISR (or SRR1) */
-#define ISET_RI .L_ISET_RI_\name\() /* Run common code w/ MSR[RI]=1 */
#define IBRANCH_TO_COMMON .L_IBRANCH_TO_COMMON_\name\() /* ENTRY branch to common */
#define IREALMODE_COMMON .L_IREALMODE_COMMON_\name\() /* Common runs in realmode */
#define IMASK .L_IMASK_\name\() /* IRQ soft-mask bit */
@@ -151,15 +139,18 @@ do_define_int n
.ifndef IISIDE
IISIDE=0
.endif
+ .ifndef ICFAR
+ ICFAR=1
+ .endif
+ .ifndef ICFAR_IF_HVMODE
+ ICFAR_IF_HVMODE=0
+ .endif
.ifndef IDAR
IDAR=0
.endif
.ifndef IDSISR
IDSISR=0
.endif
- .ifndef ISET_RI
- ISET_RI=1
- .endif
.ifndef IBRANCH_TO_COMMON
IBRANCH_TO_COMMON=1
.endif
@@ -291,9 +282,21 @@ BEGIN_FTR_SECTION
END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
HMT_MEDIUM
std r10,IAREA+EX_R10(r13) /* save r10 - r12 */
+ .if ICFAR
BEGIN_FTR_SECTION
mfspr r10,SPRN_CFAR
END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
+ .elseif ICFAR_IF_HVMODE
+BEGIN_FTR_SECTION
+ BEGIN_FTR_SECTION_NESTED(69)
+ mfspr r10,SPRN_CFAR
+ END_FTR_SECTION_NESTED(CPU_FTR_CFAR, CPU_FTR_CFAR, 69)
+FTR_SECTION_ELSE
+ BEGIN_FTR_SECTION_NESTED(69)
+ li r10,0
+ END_FTR_SECTION_NESTED(CPU_FTR_CFAR, CPU_FTR_CFAR, 69)
+ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
+ .endif
.if \ool
.if !\virt
b tramp_real_\name
@@ -309,9 +312,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
BEGIN_FTR_SECTION
std r9,IAREA+EX_PPR(r13)
END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
+ .if ICFAR || ICFAR_IF_HVMODE
BEGIN_FTR_SECTION
std r10,IAREA+EX_CFAR(r13)
END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
+ .endif
INTERRUPT_TO_KERNEL
mfctr r10
std r10,IAREA+EX_CTR(r13)
@@ -376,7 +381,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
* This switches to virtual mode and sets MSR[RI].
*/
.macro __GEN_COMMON_ENTRY name
-DEFINE_FIXED_SYMBOL(\name\()_common_real)
+DEFINE_FIXED_SYMBOL(\name\()_common_real, text)
\name\()_common_real:
.if IKVM_REAL
KVMTEST \name kvm_interrupt
@@ -399,7 +404,7 @@ DEFINE_FIXED_SYMBOL(\name\()_common_real)
.endif
.balign IFETCH_ALIGN_BYTES
-DEFINE_FIXED_SYMBOL(\name\()_common_virt)
+DEFINE_FIXED_SYMBOL(\name\()_common_virt, text)
\name\()_common_virt:
.if IKVM_VIRT
KVMTEST \name kvm_interrupt
@@ -413,7 +418,7 @@ DEFINE_FIXED_SYMBOL(\name\()_common_virt)
* want to run in real mode.
*/
.macro __GEN_REALMODE_COMMON_ENTRY name
-DEFINE_FIXED_SYMBOL(\name\()_common_real)
+DEFINE_FIXED_SYMBOL(\name\()_common_real, text)
\name\()_common_real:
.if IKVM_REAL
KVMTEST \name kvm_interrupt
@@ -512,11 +517,6 @@ DEFINE_FIXED_SYMBOL(\name\()_common_real)
stb r10,PACASRR_VALID(r13)
.endif
- .if ISET_RI
- li r10,MSR_RI
- mtmsrd r10,1 /* Set MSR_RI */
- .endif
-
.if ISTACK
.if IKUAP
kuap_save_amr_and_lock r9, r10, cr1, cr0
@@ -568,14 +568,17 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
.endif
BEGIN_FTR_SECTION
+ .if ICFAR || ICFAR_IF_HVMODE
ld r10,IAREA+EX_CFAR(r13)
+ .else
+ li r10,0
+ .endif
std r10,ORIG_GPR3(r1)
END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
ld r10,IAREA+EX_CTR(r13)
std r10,_CTR(r1)
std r2,GPR2(r1) /* save r2 in stackframe */
- SAVE_4GPRS(3, r1) /* save r3 - r6 in stackframe */
- SAVE_2GPRS(7, r1) /* save r7, r8 in stackframe */
+ SAVE_GPRS(3, 8, r1) /* save r3 - r8 in stackframe */
mflr r9 /* Get LR, later save to stack */
ld r2,PACATOC(r13) /* get kernel TOC into r2 */
std r9,_LINK(r1)
@@ -693,8 +696,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
mtlr r9
ld r9,_CCR(r1)
mtcr r9
- REST_8GPRS(2, r1)
- REST_4GPRS(10, r1)
+ REST_GPRS(2, 13, r1)
REST_GPR(0, r1)
/* restore original r1. */
ld r1,GPR1(r1)
@@ -850,12 +852,12 @@ SOFT_MASK_TABLE(0xc000000000003000, 0xc000000000004000)
#ifdef CONFIG_RELOCATABLE
TRAMP_VIRT_BEGIN(system_call_vectored_tramp)
- __LOAD_HANDLER(r10, system_call_vectored_common)
+ __LOAD_HANDLER(r10, system_call_vectored_common, virt_trampolines)
mtctr r10
bctr
TRAMP_VIRT_BEGIN(system_call_vectored_sigill_tramp)
- __LOAD_HANDLER(r10, system_call_vectored_sigill)
+ __LOAD_HANDLER(r10, system_call_vectored_sigill, virt_trampolines)
mtctr r10
bctr
#endif
@@ -902,11 +904,6 @@ INT_DEFINE_BEGIN(system_reset)
IVEC=0x100
IAREA=PACA_EXNMI
IVIRT=0 /* no virt entry point */
- /*
- * MSR_RI is not enabled, because PACA_EXNMI and nmi stack is
- * being used, so a nested NMI exception would corrupt it.
- */
- ISET_RI=0
ISTACK=0
IKVM_REAL=1
INT_DEFINE_END(system_reset)
@@ -964,7 +961,9 @@ TRAMP_REAL_BEGIN(system_reset_idle_wake)
/* We are waking up from idle, so may clobber any volatile register */
cmpwi cr1,r5,2
bltlr cr1 /* no state loss, return to idle caller with r3=SRR1 */
- BRANCH_TO_C000(r12, DOTSYM(idle_return_gpr_loss))
+ __LOAD_FAR_HANDLER(r12, DOTSYM(idle_return_gpr_loss), real_trampolines)
+ mtctr r12
+ bctr
#endif
#ifdef CONFIG_PPC_PSERIES
@@ -979,16 +978,14 @@ TRAMP_REAL_BEGIN(system_reset_fwnmi)
EXC_COMMON_BEGIN(system_reset_common)
__GEN_COMMON_ENTRY system_reset
/*
- * Increment paca->in_nmi then enable MSR_RI. SLB or MCE will be able
- * to recover, but nested NMI will notice in_nmi and not recover
- * because of the use of the NMI stack. in_nmi reentrancy is tested in
- * system_reset_exception.
+ * Increment paca->in_nmi. When the interrupt entry wrapper later
+ * enable MSR_RI, then SLB or MCE will be able to recover, but a nested
+ * NMI will notice in_nmi and not recover because of the use of the NMI
+ * stack. in_nmi reentrancy is tested in system_reset_exception.
*/
lhz r10,PACA_IN_NMI(r13)
addi r10,r10,1
sth r10,PACA_IN_NMI(r13)
- li r10,MSR_RI
- mtmsrd r10,1
mr r10,r1
ld r1,PACA_NMI_EMERG_SP(r13)
@@ -1062,12 +1059,6 @@ INT_DEFINE_BEGIN(machine_check_early)
IAREA=PACA_EXMC
IVIRT=0 /* no virt entry point */
IREALMODE_COMMON=1
- /*
- * MSR_RI is not enabled, because PACA_EXMC is being used, so a
- * nested machine check corrupts it. machine_check_common enables
- * MSR_RI.
- */
- ISET_RI=0
ISTACK=0
IDAR=1
IDSISR=1
@@ -1078,7 +1069,6 @@ INT_DEFINE_BEGIN(machine_check)
IVEC=0x200
IAREA=PACA_EXMC
IVIRT=0 /* no virt entry point */
- ISET_RI=0
IDAR=1
IDSISR=1
IKVM_REAL=1
@@ -1148,9 +1138,6 @@ EXC_COMMON_BEGIN(machine_check_early_common)
BEGIN_FTR_SECTION
bl enable_machine_check
END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
- li r10,MSR_RI
- mtmsrd r10,1
-
addi r3,r1,STACK_FRAME_OVERHEAD
bl machine_check_early
std r3,RESULT(r1) /* Save result */
@@ -1238,10 +1225,6 @@ EXC_COMMON_BEGIN(machine_check_common)
* save area: PACA_EXMC instead of PACA_EXGEN.
*/
GEN_COMMON machine_check
-
- /* Enable MSR_RI when finished with PACA_EXMC */
- li r10,MSR_RI
- mtmsrd r10,1
addi r3,r1,STACK_FRAME_OVERHEAD
bl machine_check_exception_async
b interrupt_return_srr
@@ -1369,11 +1352,15 @@ EXC_COMMON_BEGIN(data_access_common)
addi r3,r1,STACK_FRAME_OVERHEAD
andis. r0,r4,DSISR_DABRMATCH@h
bne- 1f
+#ifdef CONFIG_PPC_64S_HASH_MMU
BEGIN_MMU_FTR_SECTION
bl do_hash_fault
MMU_FTR_SECTION_ELSE
bl do_page_fault
ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
+#else
+ bl do_page_fault
+#endif
b interrupt_return_srr
1: bl do_break
@@ -1416,6 +1403,7 @@ EXC_VIRT_BEGIN(data_access_slb, 0x4380, 0x80)
EXC_VIRT_END(data_access_slb, 0x4380, 0x80)
EXC_COMMON_BEGIN(data_access_slb_common)
GEN_COMMON data_access_slb
+#ifdef CONFIG_PPC_64S_HASH_MMU
BEGIN_MMU_FTR_SECTION
/* HPT case, do SLB fault */
addi r3,r1,STACK_FRAME_OVERHEAD
@@ -1428,9 +1416,12 @@ MMU_FTR_SECTION_ELSE
/* Radix case, access is outside page table range */
li r3,-EFAULT
ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
+#else
+ li r3,-EFAULT
+#endif
std r3,RESULT(r1)
addi r3,r1,STACK_FRAME_OVERHEAD
- bl do_bad_slb_fault
+ bl do_bad_segment_interrupt
b interrupt_return_srr
@@ -1462,11 +1453,15 @@ EXC_VIRT_END(instruction_access, 0x4400, 0x80)
EXC_COMMON_BEGIN(instruction_access_common)
GEN_COMMON instruction_access
addi r3,r1,STACK_FRAME_OVERHEAD
+#ifdef CONFIG_PPC_64S_HASH_MMU
BEGIN_MMU_FTR_SECTION
bl do_hash_fault
MMU_FTR_SECTION_ELSE
bl do_page_fault
ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
+#else
+ bl do_page_fault
+#endif
b interrupt_return_srr
@@ -1496,6 +1491,7 @@ EXC_VIRT_BEGIN(instruction_access_slb, 0x4480, 0x80)
EXC_VIRT_END(instruction_access_slb, 0x4480, 0x80)
EXC_COMMON_BEGIN(instruction_access_slb_common)
GEN_COMMON instruction_access_slb
+#ifdef CONFIG_PPC_64S_HASH_MMU
BEGIN_MMU_FTR_SECTION
/* HPT case, do SLB fault */
addi r3,r1,STACK_FRAME_OVERHEAD
@@ -1508,9 +1504,12 @@ MMU_FTR_SECTION_ELSE
/* Radix case, access is outside page table range */
li r3,-EFAULT
ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
+#else
+ li r3,-EFAULT
+#endif
std r3,RESULT(r1)
addi r3,r1,STACK_FRAME_OVERHEAD
- bl do_bad_slb_fault
+ bl do_bad_segment_interrupt
b interrupt_return_srr
@@ -1536,6 +1535,12 @@ ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_TYPE_RADIX)
*
* If soft masked, the masked handler will note the pending interrupt for
* replay, and clear MSR[EE] in the interrupted context.
+ *
+ * CFAR is not required because this is an asynchronous interrupt that in
+ * general won't have much bearing on the state of the CPU, with the possible
+ * exception of crash/debug IPIs, but those are generally moving to use SRESET
+ * IPIs. Unless this is an HV interrupt and KVM HV is possible, in which case
+ * it may be exiting the guest and need CFAR to be saved.
*/
INT_DEFINE_BEGIN(hardware_interrupt)
IVEC=0x500
@@ -1543,6 +1548,10 @@ INT_DEFINE_BEGIN(hardware_interrupt)
IMASK=IRQS_DISABLED
IKVM_REAL=1
IKVM_VIRT=1
+ ICFAR=0
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+ ICFAR_IF_HVMODE=1
+#endif
INT_DEFINE_END(hardware_interrupt)
EXC_REAL_BEGIN(hardware_interrupt, 0x500, 0x100)
@@ -1764,6 +1773,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_TM)
* If PPC_WATCHDOG is configured, the soft masked handler will actually set
* things back up to run soft_nmi_interrupt as a regular interrupt handler
* on the emergency stack.
+ *
+ * CFAR is not required because this is asynchronous (see hardware_interrupt).
+ * A watchdog interrupt may like to have CFAR, but usually the interesting
+ * branch is long gone by that point (e.g., infinite loop).
*/
INT_DEFINE_BEGIN(decrementer)
IVEC=0x900
@@ -1771,6 +1784,7 @@ INT_DEFINE_BEGIN(decrementer)
#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
IKVM_REAL=1
#endif
+ ICFAR=0
INT_DEFINE_END(decrementer)
EXC_REAL_BEGIN(decrementer, 0x900, 0x80)
@@ -1846,6 +1860,8 @@ EXC_COMMON_BEGIN(hdecrementer_common)
* If soft masked, the masked handler will note the pending interrupt for
* replay, leaving MSR[EE] enabled in the interrupted context because the
* doorbells are edge triggered.
+ *
+ * CFAR is not required, similarly to hardware_interrupt.
*/
INT_DEFINE_BEGIN(doorbell_super)
IVEC=0xa00
@@ -1853,6 +1869,7 @@ INT_DEFINE_BEGIN(doorbell_super)
#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
IKVM_REAL=1
#endif
+ ICFAR=0
INT_DEFINE_END(doorbell_super)
EXC_REAL_BEGIN(doorbell_super, 0xa00, 0x100)
@@ -1904,6 +1921,7 @@ INT_DEFINE_BEGIN(system_call)
IVEC=0xc00
IKVM_REAL=1
IKVM_VIRT=1
+ ICFAR=0
INT_DEFINE_END(system_call)
.macro SYSTEM_CALL virt
@@ -1942,12 +1960,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)
HMT_MEDIUM
.if ! \virt
- __LOAD_HANDLER(r10, system_call_common_real)
+ __LOAD_HANDLER(r10, system_call_common_real, real_vectors)
mtctr r10
bctr
.else
#ifdef CONFIG_RELOCATABLE
- __LOAD_HANDLER(r10, system_call_common)
+ __LOAD_HANDLER(r10, system_call_common, virt_vectors)
mtctr r10
bctr
#else
@@ -2001,7 +2019,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
* Requires __LOAD_FAR_HANDLER beause kvmppc_hcall lives
* outside the head section.
*/
- __LOAD_FAR_HANDLER(r10, kvmppc_hcall)
+ __LOAD_FAR_HANDLER(r10, kvmppc_hcall, real_trampolines)
mtctr r10
bctr
#else
@@ -2202,6 +2220,11 @@ EXC_COMMON_BEGIN(hmi_exception_common)
* Interrupt 0xe80 - Directed Hypervisor Doorbell Interrupt.
* This is an asynchronous interrupt in response to a msgsnd doorbell.
* Similar to the 0xa00 doorbell but for host rather than guest.
+ *
+ * CFAR is not required (similar to doorbell_interrupt), unless KVM HV
+ * is enabled, in which case it may be a guest exit. Most PowerNV kernels
+ * include KVM support so it would be nice if this could be dynamically
+ * patched out if KVM was not currently running any guests.
*/
INT_DEFINE_BEGIN(h_doorbell)
IVEC=0xe80
@@ -2209,6 +2232,9 @@ INT_DEFINE_BEGIN(h_doorbell)
IMASK=IRQS_DISABLED
IKVM_REAL=1
IKVM_VIRT=1
+#ifndef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+ ICFAR=0
+#endif
INT_DEFINE_END(h_doorbell)
EXC_REAL_BEGIN(h_doorbell, 0xe80, 0x20)
@@ -2232,6 +2258,9 @@ EXC_COMMON_BEGIN(h_doorbell_common)
* Interrupt 0xea0 - Hypervisor Virtualization Interrupt.
* This is an asynchronous interrupt in response to an "external exception".
* Similar to 0x500 but for host only.
+ *
+ * Like h_doorbell, CFAR is only required for KVM HV because this can be
+ * a guest exit.
*/
INT_DEFINE_BEGIN(h_virt_irq)
IVEC=0xea0
@@ -2239,6 +2268,9 @@ INT_DEFINE_BEGIN(h_virt_irq)
IMASK=IRQS_DISABLED
IKVM_REAL=1
IKVM_VIRT=1
+#ifndef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+ ICFAR=0
+#endif
INT_DEFINE_END(h_virt_irq)
EXC_REAL_BEGIN(h_virt_irq, 0xea0, 0x20)
@@ -2275,6 +2307,8 @@ EXC_VIRT_NONE(0x4ee0, 0x20)
*
* If soft masked, the masked handler will note the pending interrupt for
* replay, and clear MSR[EE] in the interrupted context.
+ *
+ * CFAR is not used by perf interrupts so not required.
*/
INT_DEFINE_BEGIN(performance_monitor)
IVEC=0xf00
@@ -2282,6 +2316,7 @@ INT_DEFINE_BEGIN(performance_monitor)
#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
IKVM_REAL=1
#endif
+ ICFAR=0
INT_DEFINE_END(performance_monitor)
EXC_REAL_BEGIN(performance_monitor, 0xf00, 0x20)
@@ -2706,6 +2741,7 @@ EXC_VIRT_NONE(0x5800, 0x100)
INT_DEFINE_BEGIN(soft_nmi)
IVEC=0x900
ISTACK=0
+ ICFAR=0
INT_DEFINE_END(soft_nmi)
/*
@@ -3025,7 +3061,7 @@ USE_FIXED_SECTION(virt_trampolines)
.align 7
.globl __end_interrupts
__end_interrupts:
-DEFINE_FIXED_SYMBOL(__end_interrupts)
+DEFINE_FIXED_SYMBOL(__end_interrupts, virt_trampolines)
CLOSE_FIXED_SECTION(real_vectors);
CLOSE_FIXED_SECTION(real_trampolines);
diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c
index b7ceb041743c..d03e488cfe9c 100644
--- a/arch/powerpc/kernel/fadump.c
+++ b/arch/powerpc/kernel/fadump.c
@@ -251,7 +251,7 @@ bool is_fadump_reserved_mem_contiguous(void)
}
/* Print firmware assisted dump configurations for debugging purpose. */
-static void fadump_show_config(void)
+static void __init fadump_show_config(void)
{
int i;
@@ -353,7 +353,7 @@ static __init u64 fadump_calculate_reserve_size(void)
* Calculate the total memory size required to be reserved for
* firmware-assisted dump registration.
*/
-static unsigned long get_fadump_area_size(void)
+static unsigned long __init get_fadump_area_size(void)
{
unsigned long size = 0;
@@ -462,7 +462,7 @@ static int __init fadump_get_boot_mem_regions(void)
* with the given memory range.
* False, otherwise.
*/
-static bool overlaps_reserved_ranges(u64 base, u64 end, int *idx)
+static bool __init overlaps_reserved_ranges(u64 base, u64 end, int *idx)
{
bool ret = false;
int i;
@@ -737,7 +737,7 @@ void crash_fadump(struct pt_regs *regs, const char *str)
fw_dump.ops->fadump_trigger(fdh, str);
}
-u32 *fadump_regs_to_elf_notes(u32 *buf, struct pt_regs *regs)
+u32 *__init fadump_regs_to_elf_notes(u32 *buf, struct pt_regs *regs)
{
struct elf_prstatus prstatus;
@@ -752,7 +752,7 @@ u32 *fadump_regs_to_elf_notes(u32 *buf, struct pt_regs *regs)
return buf;
}
-void fadump_update_elfcore_header(char *bufp)
+void __init fadump_update_elfcore_header(char *bufp)
{
struct elf_phdr *phdr;
@@ -770,7 +770,7 @@ void fadump_update_elfcore_header(char *bufp)
return;
}
-static void *fadump_alloc_buffer(unsigned long size)
+static void *__init fadump_alloc_buffer(unsigned long size)
{
unsigned long count, i;
struct page *page;
@@ -792,7 +792,7 @@ static void fadump_free_buffer(unsigned long vaddr, unsigned long size)
free_reserved_area((void *)vaddr, (void *)(vaddr + size), -1, NULL);
}
-s32 fadump_setup_cpu_notes_buf(u32 num_cpus)
+s32 __init fadump_setup_cpu_notes_buf(u32 num_cpus)
{
/* Allocate buffer to hold cpu crash notes. */
fw_dump.cpu_notes_buf_size = num_cpus * sizeof(note_buf_t);
@@ -1447,7 +1447,7 @@ static ssize_t release_mem_store(struct kobject *kobj,
}
/* Release the reserved memory and disable the FADump */
-static void unregister_fadump(void)
+static void __init unregister_fadump(void)
{
fadump_cleanup();
fadump_release_memory(fw_dump.reserve_dump_area_start,
@@ -1547,7 +1547,7 @@ ATTRIBUTE_GROUPS(fadump);
DEFINE_SHOW_ATTRIBUTE(fadump_region);
-static void fadump_init_files(void)
+static void __init fadump_init_files(void)
{
int rc = 0;
@@ -1641,6 +1641,14 @@ int __init setup_fadump(void)
else if (fw_dump.reserve_dump_area_size)
fw_dump.ops->fadump_init_mem_struct(&fw_dump);
+ /*
+ * In case of panic, fadump is triggered via ppc_panic_event()
+ * panic notifier. Setting crash_kexec_post_notifiers to 'true'
+ * lets panic() function take crash friendly path before panic
+ * notifiers are invoked.
+ */
+ crash_kexec_post_notifiers = true;
+
return 1;
}
subsys_initcall(setup_fadump);
diff --git a/arch/powerpc/kernel/fpu.S b/arch/powerpc/kernel/fpu.S
index ba4afe3b5a9c..f71f2bbd4de6 100644
--- a/arch/powerpc/kernel/fpu.S
+++ b/arch/powerpc/kernel/fpu.S
@@ -81,7 +81,12 @@ EXPORT_SYMBOL(store_fp_state)
*/
_GLOBAL(load_up_fpu)
mfmsr r5
+#ifdef CONFIG_PPC_BOOK3S_64
+ /* interrupt doesn't set MSR[RI] and HPT can fault on current access */
+ ori r5,r5,MSR_FP|MSR_RI
+#else
ori r5,r5,MSR_FP
+#endif
#ifdef CONFIG_VSX
BEGIN_FTR_SECTION
oris r5,r5,MSR_VSX@h
diff --git a/arch/powerpc/kernel/head_32.h b/arch/powerpc/kernel/head_32.h
index 349c4a820231..c3286260a7d1 100644
--- a/arch/powerpc/kernel/head_32.h
+++ b/arch/powerpc/kernel/head_32.h
@@ -115,8 +115,7 @@ _ASM_NOKPROBE_SYMBOL(\name\()_virt)
stw r10,8(r1)
li r10, \trapno
stw r10,_TRAP(r1)
- SAVE_4GPRS(3, r1)
- SAVE_2GPRS(7, r1)
+ SAVE_GPRS(3, 8, r1)
SAVE_NVGPRS(r1)
stw r2,GPR2(r1)
stw r12,_NIP(r1)
@@ -136,6 +135,12 @@ _ASM_NOKPROBE_SYMBOL(\name\()_virt)
andi. r12,r9,MSR_PR
bne 777f
bl prepare_transfer_to_handler
+#ifdef CONFIG_PPC_KUEP
+ b 778f
+777:
+ bl __kuep_lock
+778:
+#endif
777:
#endif
.endm
diff --git a/arch/powerpc/kernel/head_40x.S b/arch/powerpc/kernel/head_40x.S
index 7d72ee5ab387..b6c6d1de5fd5 100644
--- a/arch/powerpc/kernel/head_40x.S
+++ b/arch/powerpc/kernel/head_40x.S
@@ -27,6 +27,7 @@
#include <linux/init.h>
#include <linux/pgtable.h>
+#include <linux/sizes.h>
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/mmu.h>
@@ -297,6 +298,10 @@ _ASM_NOKPROBE_SYMBOL(\name\()_virt)
3:
mfspr r11,SPRN_SPRG_THREAD
lwz r11,PGDIR(r11)
+#ifdef CONFIG_PPC_KUAP
+ rlwinm. r9, r9, 0, 0xff
+ beq 5f /* Kuap fault */
+#endif
4:
tophys(r11, r11)
rlwimi r11, r10, 12, 20, 29 /* Create L1 (pgdir/pmd) address */
@@ -377,6 +382,10 @@ _ASM_NOKPROBE_SYMBOL(\name\()_virt)
3:
mfspr r11,SPRN_SPRG_THREAD
lwz r11,PGDIR(r11)
+#ifdef CONFIG_PPC_KUAP
+ rlwinm. r9, r9, 0, 0xff
+ beq 5f /* Kuap fault */
+#endif
4:
tophys(r11, r11)
rlwimi r11, r10, 12, 20, 29 /* Create L1 (pgdir/pmd) address */
@@ -650,7 +659,7 @@ start_here:
b . /* prevent prefetch past rfi */
/* Set up the initial MMU state so we can do the first level of
- * kernel initialization. This maps the first 16 MBytes of memory 1:1
+ * kernel initialization. This maps the first 32 MBytes of memory 1:1
* virtual to physical and more importantly sets the cache mode.
*/
initial_mmu:
@@ -687,6 +696,12 @@ initial_mmu:
tlbwe r4,r0,TLB_DATA /* Load the data portion of the entry */
tlbwe r3,r0,TLB_TAG /* Load the tag portion of the entry */
+ li r0,62 /* TLB slot 62 */
+ addis r4,r4,SZ_16M@h
+ addis r3,r3,SZ_16M@h
+ tlbwe r4,r0,TLB_DATA /* Load the data portion of the entry */
+ tlbwe r3,r0,TLB_TAG /* Load the tag portion of the entry */
+
isync
/* Establish the exception vector base
diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S
index 02d2928d1e01..b73a56466903 100644
--- a/arch/powerpc/kernel/head_44x.S
+++ b/arch/powerpc/kernel/head_44x.S
@@ -334,6 +334,10 @@ interrupt_base:
mfspr r12,SPRN_MMUCR
mfspr r13,SPRN_PID /* Get PID */
rlwimi r12,r13,0,24,31 /* Set TID */
+#ifdef CONFIG_PPC_KUAP
+ cmpwi r13,0
+ beq 2f /* KUAP Fault */
+#endif
4:
mtspr SPRN_MMUCR,r12
@@ -444,6 +448,10 @@ interrupt_base:
mfspr r12,SPRN_MMUCR
mfspr r13,SPRN_PID /* Get PID */
rlwimi r12,r13,0,24,31 /* Set TID */
+#ifdef CONFIG_PPC_KUAP
+ cmpwi r13,0
+ beq 2f /* KUAP Fault */
+#endif
4:
mtspr SPRN_MMUCR,r12
@@ -532,10 +540,7 @@ finish_tlb_load_44x:
andi. r10,r12,_PAGE_USER /* User page ? */
beq 1f /* nope, leave U bits empty */
rlwimi r11,r11,3,26,28 /* yes, copy S bits to U */
-#ifdef CONFIG_PPC_KUEP
-0: rlwinm r11,r11,0,~PPC44x_TLB_SX /* Clear SX if User page */
- patch_site 0b, patch__tlb_44x_kuep
-#endif
+ rlwinm r11,r11,0,~PPC44x_TLB_SX /* Clear SX if User page */
1: tlbwe r11,r13,PPC44x_TLB_ATTRIB /* Write ATTRIB */
/* Done...restore registers and get out of here.
@@ -575,6 +580,10 @@ finish_tlb_load_44x:
3: mfspr r11,SPRN_SPRG3
lwz r11,PGDIR(r11)
mfspr r12,SPRN_PID /* Get PID */
+#ifdef CONFIG_PPC_KUAP
+ cmpwi r12,0
+ beq 2f /* KUAP Fault */
+#endif
4: mtspr SPRN_MMUCR,r12 /* Set MMUCR */
/* Mask of required permission bits. Note that while we
@@ -672,6 +681,10 @@ finish_tlb_load_44x:
3: mfspr r11,SPRN_SPRG_THREAD
lwz r11,PGDIR(r11)
mfspr r12,SPRN_PID /* Get PID */
+#ifdef CONFIG_PPC_KUAP
+ cmpwi r12,0
+ beq 2f /* KUAP Fault */
+#endif
4: mtspr SPRN_MMUCR,r12 /* Set MMUCR */
/* Make up the required permissions */
@@ -747,10 +760,7 @@ finish_tlb_load_47x:
andi. r10,r12,_PAGE_USER /* User page ? */
beq 1f /* nope, leave U bits empty */
rlwimi r11,r11,3,26,28 /* yes, copy S bits to U */
-#ifdef CONFIG_PPC_KUEP
-0: rlwinm r11,r11,0,~PPC47x_TLB2_SX /* Clear SX if User page */
- patch_site 0b, patch__tlb_47x_kuep
-#endif
+ rlwinm r11,r11,0,~PPC47x_TLB2_SX /* Clear SX if User page */
1: tlbwe r11,r13,2
/* Done...restore registers and get out of here.
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index f17ae2083733..5c5181e8d5f1 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -126,7 +126,7 @@ __secondary_hold_acknowledge:
. = 0x5c
.globl __run_at_load
__run_at_load:
-DEFINE_FIXED_SYMBOL(__run_at_load)
+DEFINE_FIXED_SYMBOL(__run_at_load, first_256B)
.long RUN_AT_LOAD_DEFAULT
#endif
@@ -156,7 +156,7 @@ __secondary_hold:
/* Tell the master cpu we're here */
/* Relocation is off & we are located at an address less */
/* than 0x100, so only need to grab low order offset. */
- std r24,(ABS_ADDR(__secondary_hold_acknowledge))(0)
+ std r24,(ABS_ADDR(__secondary_hold_acknowledge, first_256B))(0)
sync
li r26,0
@@ -164,7 +164,7 @@ __secondary_hold:
tovirt(r26,r26)
#endif
/* All secondary cpus wait here until told to start. */
-100: ld r12,(ABS_ADDR(__secondary_hold_spinloop))(r26)
+100: ld r12,(ABS_ADDR(__secondary_hold_spinloop, first_256B))(r26)
cmpdi 0,r12,0
beq 100b
@@ -649,15 +649,15 @@ __after_prom_start:
3:
#endif
/* # bytes of memory to copy */
- lis r5,(ABS_ADDR(copy_to_here))@ha
- addi r5,r5,(ABS_ADDR(copy_to_here))@l
+ lis r5,(ABS_ADDR(copy_to_here, text))@ha
+ addi r5,r5,(ABS_ADDR(copy_to_here, text))@l
bl copy_and_flush /* copy the first n bytes */
/* this includes the code being */
/* executed here. */
/* Jump to the copy of this code that we just made */
- addis r8,r3,(ABS_ADDR(4f))@ha
- addi r12,r8,(ABS_ADDR(4f))@l
+ addis r8,r3,(ABS_ADDR(4f, text))@ha
+ addi r12,r8,(ABS_ADDR(4f, text))@l
mtctr r12
bctr
@@ -669,8 +669,8 @@ p_end: .8byte _end - copy_to_here
* Now copy the rest of the kernel up to _end, add
* _end - copy_to_here to the copy limit and run again.
*/
- addis r8,r26,(ABS_ADDR(p_end))@ha
- ld r8,(ABS_ADDR(p_end))@l(r8)
+ addis r8,r26,(ABS_ADDR(p_end, text))@ha
+ ld r8,(ABS_ADDR(p_end, text))@l(r8)
add r5,r5,r8
5: bl copy_and_flush /* copy the rest */
@@ -904,7 +904,7 @@ _GLOBAL(relative_toc)
blr
.balign 8
-p_toc: .8byte __toc_start + 0x8000 - 0b
+p_toc: .8byte .TOC. - 0b
/*
* This is where the main kernel code starts.
diff --git a/arch/powerpc/kernel/head_book3s_32.S b/arch/powerpc/kernel/head_book3s_32.S
index 68e5c0a7e99d..b876ef8c70a7 100644
--- a/arch/powerpc/kernel/head_book3s_32.S
+++ b/arch/powerpc/kernel/head_book3s_32.S
@@ -421,14 +421,14 @@ InstructionTLBMiss:
*/
/* Get PTE (linux-style) and check access */
mfspr r3,SPRN_IMISS
-#ifdef CONFIG_MODULES
+#if defined(CONFIG_MODULES) || defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)
lis r1, TASK_SIZE@h /* check if kernel address */
cmplw 0,r1,r3
#endif
mfspr r2, SPRN_SDR1
li r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC | _PAGE_USER
rlwinm r2, r2, 28, 0xfffff000
-#ifdef CONFIG_MODULES
+#if defined(CONFIG_MODULES) || defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)
bgt- 112f
lis r2, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
li r1,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC
@@ -931,7 +931,11 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS)
_GLOBAL(load_segment_registers)
li r0, NUM_USER_SEGMENTS /* load up user segment register values */
mtctr r0 /* for context 0 */
+#ifdef CONFIG_PPC_KUEP
+ lis r3, SR_NX@h /* Kp = 0, Ks = 0, VSID = 0 */
+#else
li r3, 0 /* Kp = 0, Ks = 0, VSID = 0 */
+#endif
li r4, 0
3: mtsrin r3, r4
addi r3, r3, 0x111 /* increment VSID */
diff --git a/arch/powerpc/kernel/head_booke.h b/arch/powerpc/kernel/head_booke.h
index ef8d1b1c234e..bb6d5d0fc4ac 100644
--- a/arch/powerpc/kernel/head_booke.h
+++ b/arch/powerpc/kernel/head_booke.h
@@ -87,8 +87,7 @@ END_BTB_FLUSH_SECTION
stw r10, 8(r1)
li r10, \trapno
stw r10,_TRAP(r1)
- SAVE_4GPRS(3, r1)
- SAVE_2GPRS(7, r1)
+ SAVE_GPRS(3, 8, r1)
SAVE_NVGPRS(r1)
stw r2,GPR2(r1)
stw r12,_NIP(r1)
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
index 0a9a0f301474..ac2b4dcf5fd3 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -462,6 +462,12 @@ END_BTB_FLUSH_SECTION
mfspr r11,SPRN_SPRG_THREAD
lwz r11,PGDIR(r11)
+#ifdef CONFIG_PPC_KUAP
+ mfspr r12, SPRN_MAS1
+ rlwinm. r12,r12,0,0x3fff0000
+ beq 2f /* KUAP fault */
+#endif
+
4:
/* Mask of required permission bits. Note that while we
* do copy ESR:ST to _PAGE_RW position as trying to write
@@ -571,6 +577,12 @@ END_BTB_FLUSH_SECTION
mfspr r11,SPRN_SPRG_THREAD
lwz r11,PGDIR(r11)
+#ifdef CONFIG_PPC_KUAP
+ mfspr r12, SPRN_MAS1
+ rlwinm. r12,r12,0,0x3fff0000
+ beq 2f /* KUAP fault */
+#endif
+
/* Make up the required permissions for user code */
#ifdef CONFIG_PTE_64BIT
li r13,_PAGE_PRESENT | _PAGE_BAP_UX
@@ -777,6 +789,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_BIG_PHYS)
andi. r10, r11, _PAGE_USER /* Test for _PAGE_USER */
slwi r10, r12, 1
or r10, r10, r12
+ rlwinm r10, r10, 0, ~_PAGE_EXEC /* Clear SX on user pages */
iseleq r12, r12, r10
rlwimi r13, r12, 0, 20, 31 /* Get RPN from PTE, merge w/ perms */
mtspr SPRN_MAS3, r13
diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c
index 91a3be14808b..2669f80b3a49 100644
--- a/arch/powerpc/kernel/hw_breakpoint.c
+++ b/arch/powerpc/kernel/hw_breakpoint.c
@@ -523,7 +523,7 @@ static void larx_stcx_err(struct perf_event *bp, struct arch_hw_breakpoint *info
static bool stepping_handler(struct pt_regs *regs, struct perf_event **bp,
struct arch_hw_breakpoint **info, int *hit,
- struct ppc_inst instr)
+ ppc_inst_t instr)
{
int i;
int stepped;
@@ -616,7 +616,7 @@ int hw_breakpoint_handler(struct die_args *args)
int hit[HBP_NUM_MAX] = {0};
int nr_hit = 0;
bool ptrace_bp = false;
- struct ppc_inst instr = ppc_inst(0);
+ ppc_inst_t instr = ppc_inst(0);
int type = 0;
int size = 0;
unsigned long ea;
diff --git a/arch/powerpc/kernel/hw_breakpoint_constraints.c b/arch/powerpc/kernel/hw_breakpoint_constraints.c
index 42b967e3d85c..a74623025f3a 100644
--- a/arch/powerpc/kernel/hw_breakpoint_constraints.c
+++ b/arch/powerpc/kernel/hw_breakpoint_constraints.c
@@ -80,7 +80,7 @@ static bool check_dawrx_constraints(struct pt_regs *regs, int type,
* Return true if the event is valid wrt dawr configuration,
* including extraneous exception. Otherwise return false.
*/
-bool wp_check_constraints(struct pt_regs *regs, struct ppc_inst instr,
+bool wp_check_constraints(struct pt_regs *regs, ppc_inst_t instr,
unsigned long ea, int type, int size,
struct arch_hw_breakpoint *info)
{
@@ -127,7 +127,7 @@ bool wp_check_constraints(struct pt_regs *regs, struct ppc_inst instr,
return false;
}
-void wp_get_instr_detail(struct pt_regs *regs, struct ppc_inst *instr,
+void wp_get_instr_detail(struct pt_regs *regs, ppc_inst_t *instr,
int *type, int *size, unsigned long *ea)
{
struct instruction_op op;
diff --git a/arch/powerpc/kernel/idle.c b/arch/powerpc/kernel/idle.c
index 1f835539fda4..4ad79eb638c6 100644
--- a/arch/powerpc/kernel/idle.c
+++ b/arch/powerpc/kernel/idle.c
@@ -82,7 +82,7 @@ void power4_idle(void)
return;
if (cpu_has_feature(CPU_FTR_ALTIVEC))
- asm volatile("DSSALL ; sync" ::: "memory");
+ asm volatile(PPC_DSSALL " ; sync" ::: "memory");
power4_idle_nap();
diff --git a/arch/powerpc/kernel/idle_6xx.S b/arch/powerpc/kernel/idle_6xx.S
index 13cad9297d82..3c097356366b 100644
--- a/arch/powerpc/kernel/idle_6xx.S
+++ b/arch/powerpc/kernel/idle_6xx.S
@@ -129,7 +129,7 @@ BEGIN_FTR_SECTION
END_FTR_SECTION_IFCLR(CPU_FTR_NO_DPM)
mtspr SPRN_HID0,r4
BEGIN_FTR_SECTION
- DSSALL
+ PPC_DSSALL
sync
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
lwz r8,TI_LOCAL_FLAGS(r2) /* set napping bit */
diff --git a/arch/powerpc/kernel/interrupt.c b/arch/powerpc/kernel/interrupt.c
index 563ebfcd16e2..7cd6ce3ec423 100644
--- a/arch/powerpc/kernel/interrupt.c
+++ b/arch/powerpc/kernel/interrupt.c
@@ -81,7 +81,7 @@ notrace long system_call_exception(long r3, long r4, long r5,
{
syscall_fn f;
- kuep_lock();
+ kuap_lock();
regs->orig_gpr3 = r3;
@@ -406,7 +406,6 @@ again:
/* Restore user access locks last */
kuap_user_restore(regs);
- kuep_unlock();
return ret;
}
diff --git a/arch/powerpc/kernel/interrupt_64.S b/arch/powerpc/kernel/interrupt_64.S
index ec950b08a8dc..7bab2d7de372 100644
--- a/arch/powerpc/kernel/interrupt_64.S
+++ b/arch/powerpc/kernel/interrupt_64.S
@@ -30,21 +30,25 @@ COMPAT_SYS_CALL_TABLE:
.ifc \srr,srr
mfspr r11,SPRN_SRR0
ld r12,_NIP(r1)
+ clrrdi r11,r11,2
+ clrrdi r12,r12,2
100: tdne r11,r12
- EMIT_BUG_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
+ EMIT_WARN_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
mfspr r11,SPRN_SRR1
ld r12,_MSR(r1)
100: tdne r11,r12
- EMIT_BUG_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
+ EMIT_WARN_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
.else
mfspr r11,SPRN_HSRR0
ld r12,_NIP(r1)
+ clrrdi r11,r11,2
+ clrrdi r12,r12,2
100: tdne r11,r12
- EMIT_BUG_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
+ EMIT_WARN_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
mfspr r11,SPRN_HSRR1
ld r12,_MSR(r1)
100: tdne r11,r12
- EMIT_BUG_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
+ EMIT_WARN_ENTRY 100b,__FILE__,__LINE__,(BUGFLAG_WARNING | BUGFLAG_ONCE)
.endif
#endif
.endm
@@ -162,10 +166,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
* The value of AMR only matters while we're in the kernel.
*/
mtcr r2
- ld r2,GPR2(r1)
- ld r3,GPR3(r1)
- ld r13,GPR13(r1)
- ld r1,GPR1(r1)
+ REST_GPRS(2, 3, r1)
+ REST_GPR(13, r1)
+ REST_GPR(1, r1)
RFSCV_TO_USER
b . /* prevent speculative execution */
@@ -183,9 +186,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
mtctr r3
mtlr r4
mtspr SPRN_XER,r5
- REST_10GPRS(2, r1)
- REST_2GPRS(12, r1)
- ld r1,GPR1(r1)
+ REST_GPRS(2, 13, r1)
+ REST_GPR(1, r1)
RFI_TO_USER
.Lsyscall_vectored_\name\()_rst_end:
@@ -374,10 +376,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
* The value of AMR only matters while we're in the kernel.
*/
mtcr r2
- ld r2,GPR2(r1)
- ld r3,GPR3(r1)
- ld r13,GPR13(r1)
- ld r1,GPR1(r1)
+ REST_GPRS(2, 3, r1)
+ REST_GPR(13, r1)
+ REST_GPR(1, r1)
RFI_TO_USER
b . /* prevent speculative execution */
@@ -388,8 +389,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
mtctr r3
mtspr SPRN_XER,r4
ld r0,GPR0(r1)
- REST_8GPRS(4, r1)
- ld r12,GPR12(r1)
+ REST_GPRS(4, 12, r1)
b .Lsyscall_restore_regs_cont
.Lsyscall_rst_end:
@@ -518,17 +518,14 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
ld r6,_XER(r1)
li r0,0
- REST_4GPRS(7, r1)
- REST_2GPRS(11, r1)
- REST_GPR(13, r1)
+ REST_GPRS(7, 13, r1)
mtcr r3
mtlr r4
mtctr r5
mtspr SPRN_XER,r6
- REST_4GPRS(2, r1)
- REST_GPR(6, r1)
+ REST_GPRS(2, 6, r1)
REST_GPR(0, r1)
REST_GPR(1, r1)
.ifc \srr,srr
@@ -625,8 +622,7 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
ld r6,_CCR(r1)
li r0,0
- REST_4GPRS(7, r1)
- REST_2GPRS(11, r1)
+ REST_GPRS(7, 12, r1)
mtlr r3
mtctr r4
@@ -638,7 +634,7 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
*/
std r0,STACK_FRAME_OVERHEAD-16(r1)
- REST_4GPRS(2, r1)
+ REST_GPRS(2, 5, r1)
bne- cr1,1f /* emulate stack store */
mtcr r6
@@ -703,7 +699,7 @@ interrupt_return_macro hsrr
.globl __end_soft_masked
__end_soft_masked:
-DEFINE_FIXED_SYMBOL(__end_soft_masked)
+DEFINE_FIXED_SYMBOL(__end_soft_masked, text)
#endif /* CONFIG_PPC_BOOK3S */
#ifdef CONFIG_PPC_BOOK3S
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index c4f1d6b7d992..2cf31a97126c 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -745,7 +745,8 @@ void __do_irq(struct pt_regs *regs)
irq = ppc_md.get_irq();
/* We can hard enable interrupts now to allow perf interrupts */
- may_hard_irq_enable();
+ if (should_hard_irq_enable())
+ do_hard_irq_enable();
/* And finally process it */
if (unlikely(!irq))
@@ -811,7 +812,7 @@ void __init init_IRQ(void)
ppc_md.init_IRQ();
}
-#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
+#ifdef CONFIG_BOOKE_OR_40x
void *critirq_ctx[NR_CPUS] __read_mostly;
void *dbgirq_ctx[NR_CPUS] __read_mostly;
void *mcheckirq_ctx[NR_CPUS] __read_mostly;
diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c
index bdee7262c080..9f8d0fa7b718 100644
--- a/arch/powerpc/kernel/kgdb.c
+++ b/arch/powerpc/kernel/kgdb.c
@@ -48,7 +48,7 @@ static struct hard_trap_info
{ 0x0800, 0x08 /* SIGFPE */ }, /* fp unavailable */
{ 0x0900, 0x0e /* SIGALRM */ }, /* decrementer */
{ 0x0c00, 0x14 /* SIGCHLD */ }, /* system call */
-#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
+#ifdef CONFIG_BOOKE_OR_40x
{ 0x2002, 0x05 /* SIGTRAP */ }, /* debug */
#if defined(CONFIG_FSL_BOOKE)
{ 0x2010, 0x08 /* SIGFPE */ }, /* spe unavailable */
@@ -67,7 +67,7 @@ static struct hard_trap_info
{ 0x2010, 0x08 /* SIGFPE */ }, /* fp unavailable */
{ 0x2020, 0x08 /* SIGFPE */ }, /* ap unavailable */
#endif
-#else /* ! (defined(CONFIG_40x) || defined(CONFIG_BOOKE)) */
+#else /* !CONFIG_BOOKE_OR_40x */
{ 0x0d00, 0x05 /* SIGTRAP */ }, /* single-step */
#if defined(CONFIG_PPC_8xx)
{ 0x1000, 0x04 /* SIGILL */ }, /* software emulation */
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index 86d77ff056a6..9a492fdec1df 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -124,7 +124,7 @@ int arch_prepare_kprobe(struct kprobe *p)
{
int ret = 0;
struct kprobe *prev;
- struct ppc_inst insn = ppc_inst_read(p->addr);
+ ppc_inst_t insn = ppc_inst_read(p->addr);
if ((unsigned long)p->addr & 0x03) {
printk("Attempt to register kprobe at an unaligned address\n");
@@ -244,7 +244,7 @@ NOKPROBE_SYMBOL(arch_prepare_kretprobe);
static int try_to_emulate(struct kprobe *p, struct pt_regs *regs)
{
int ret;
- struct ppc_inst insn = ppc_inst_read(p->ainsn.insn);
+ ppc_inst_t insn = ppc_inst_read(p->ainsn.insn);
/* regs->nip is also adjusted if emulate_step returns 1 */
ret = emulate_step(regs, insn);
diff --git a/arch/powerpc/kernel/l2cr_6xx.S b/arch/powerpc/kernel/l2cr_6xx.S
index 225511d73bef..f2e03ed423d0 100644
--- a/arch/powerpc/kernel/l2cr_6xx.S
+++ b/arch/powerpc/kernel/l2cr_6xx.S
@@ -96,7 +96,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_L2CR)
/* Stop DST streams */
BEGIN_FTR_SECTION
- DSSALL
+ PPC_DSSALL
sync
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
@@ -292,7 +292,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_L3CR)
isync
/* Stop DST streams */
- DSSALL
+ PPC_DSSALL
sync
/* Get the current enable bit of the L3CR into r4 */
@@ -401,7 +401,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_L3CR)
_GLOBAL(__flush_disable_L1)
/* Stop pending alitvec streams and memory accesses */
BEGIN_FTR_SECTION
- DSSALL
+ PPC_DSSALL
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
sync
diff --git a/arch/powerpc/kernel/mce.c b/arch/powerpc/kernel/mce.c
index fd829f7f25a4..2503dd4713b9 100644
--- a/arch/powerpc/kernel/mce.c
+++ b/arch/powerpc/kernel/mce.c
@@ -586,7 +586,7 @@ void machine_check_print_event_info(struct machine_check_event *evt,
mc_error_class[evt->error_class] : "Unknown";
printk("%sMCE: CPU%d: %s\n", level, evt->cpu, subtype);
-#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
/* Display faulty slb contents for SLB errors. */
if (evt->error_type == MCE_ERROR_TYPE_SLB && !in_guest)
slb_dump_contents(local_paca->mce_faulty_slbs);
diff --git a/arch/powerpc/kernel/mce_power.c b/arch/powerpc/kernel/mce_power.c
index c2f55fe7092d..71e8f2a92e36 100644
--- a/arch/powerpc/kernel/mce_power.c
+++ b/arch/powerpc/kernel/mce_power.c
@@ -77,15 +77,15 @@ static bool mce_in_guest(void)
}
/* flush SLBs and reload */
-#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
void flush_and_reload_slb(void)
{
- /* Invalidate all SLBs */
- slb_flush_all_realmode();
-
if (early_radix_enabled())
return;
+ /* Invalidate all SLBs */
+ slb_flush_all_realmode();
+
/*
* This probably shouldn't happen, but it may be possible it's
* called in early boot before SLB shadows are allocated.
@@ -99,7 +99,7 @@ void flush_and_reload_slb(void)
void flush_erat(void)
{
-#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
if (!early_cpu_has_feature(CPU_FTR_ARCH_300)) {
flush_and_reload_slb();
return;
@@ -114,7 +114,7 @@ void flush_erat(void)
static int mce_flush(int what)
{
-#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
if (what == MCE_FLUSH_SLB) {
flush_and_reload_slb();
return 1;
@@ -455,7 +455,7 @@ static int mce_find_instr_ea_and_phys(struct pt_regs *regs, uint64_t *addr,
* in real-mode is tricky and can lead to recursive
* faults
*/
- struct ppc_inst instr;
+ ppc_inst_t instr;
unsigned long pfn, instr_addr;
struct instruction_op op;
struct pt_regs tmp = *regs;
@@ -499,8 +499,10 @@ static int mce_handle_ierror(struct pt_regs *regs, unsigned long srr1,
/* attempt to correct the error */
switch (table[i].error_type) {
case MCE_ERROR_TYPE_SLB:
+#ifdef CONFIG_PPC_64S_HASH_MMU
if (local_paca->in_mce == 1)
slb_save_contents(local_paca->mce_faulty_slbs);
+#endif
handled = mce_flush(MCE_FLUSH_SLB);
break;
case MCE_ERROR_TYPE_ERAT:
@@ -588,8 +590,10 @@ static int mce_handle_derror(struct pt_regs *regs,
/* attempt to correct the error */
switch (table[i].error_type) {
case MCE_ERROR_TYPE_SLB:
+#ifdef CONFIG_PPC_64S_HASH_MMU
if (local_paca->in_mce == 1)
slb_save_contents(local_paca->mce_faulty_slbs);
+#endif
if (mce_flush(MCE_FLUSH_SLB))
handled = 1;
break;
diff --git a/arch/powerpc/kernel/module.c b/arch/powerpc/kernel/module.c
index ed04a3ba66fe..40a583e9d3c7 100644
--- a/arch/powerpc/kernel/module.c
+++ b/arch/powerpc/kernel/module.c
@@ -90,16 +90,17 @@ int module_finalize(const Elf_Ehdr *hdr,
}
static __always_inline void *
-__module_alloc(unsigned long size, unsigned long start, unsigned long end)
+__module_alloc(unsigned long size, unsigned long start, unsigned long end, bool nowarn)
{
pgprot_t prot = strict_module_rwx_enabled() ? PAGE_KERNEL : PAGE_KERNEL_EXEC;
+ gfp_t gfp = GFP_KERNEL | (nowarn ? __GFP_NOWARN : 0);
/*
* Don't do huge page allocations for modules yet until more testing
* is done. STRICT_MODULE_RWX may require extra work to support this
* too.
*/
- return __vmalloc_node_range(size, 1, start, end, GFP_KERNEL, prot,
+ return __vmalloc_node_range(size, 1, start, end, gfp, prot,
VM_FLUSH_RESET_PERMS | VM_NO_HUGE_VMAP,
NUMA_NO_NODE, __builtin_return_address(0));
}
@@ -114,13 +115,13 @@ void *module_alloc(unsigned long size)
/* First try within 32M limit from _etext to avoid branch trampolines */
if (MODULES_VADDR < PAGE_OFFSET && MODULES_END > limit)
- ptr = __module_alloc(size, limit, MODULES_END);
+ ptr = __module_alloc(size, limit, MODULES_END, true);
if (!ptr)
- ptr = __module_alloc(size, MODULES_VADDR, MODULES_END);
+ ptr = __module_alloc(size, MODULES_VADDR, MODULES_END, false);
return ptr;
#else
- return __module_alloc(size, VMALLOC_START, VMALLOC_END);
+ return __module_alloc(size, VMALLOC_START, VMALLOC_END, false);
#endif
}
diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
index f417afc08d33..a491ad481d85 100644
--- a/arch/powerpc/kernel/module_32.c
+++ b/arch/powerpc/kernel/module_32.c
@@ -273,6 +273,31 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
}
#ifdef CONFIG_DYNAMIC_FTRACE
+int module_trampoline_target(struct module *mod, unsigned long addr,
+ unsigned long *target)
+{
+ unsigned int jmp[4];
+
+ /* Find where the trampoline jumps to */
+ if (copy_from_kernel_nofault(jmp, (void *)addr, sizeof(jmp)))
+ return -EFAULT;
+
+ /* verify that this is what we expect it to be */
+ if ((jmp[0] & 0xffff0000) != PPC_RAW_LIS(_R12, 0) ||
+ (jmp[1] & 0xffff0000) != PPC_RAW_ADDI(_R12, _R12, 0) ||
+ jmp[2] != PPC_RAW_MTCTR(_R12) ||
+ jmp[3] != PPC_RAW_BCTR())
+ return -EINVAL;
+
+ addr = (jmp[1] & 0xffff) | ((jmp[0] & 0xffff) << 16);
+ if (addr & 0x8000)
+ addr -= 0x10000;
+
+ *target = addr;
+
+ return 0;
+}
+
int module_finalize_ftrace(struct module *module, const Elf_Shdr *sechdrs)
{
module->arch.tramp = do_plt_call(module->core_layout.base,
@@ -281,6 +306,14 @@ int module_finalize_ftrace(struct module *module, const Elf_Shdr *sechdrs)
if (!module->arch.tramp)
return -ENOENT;
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+ module->arch.tramp_regs = do_plt_call(module->core_layout.base,
+ (unsigned long)ftrace_regs_caller,
+ sechdrs, module);
+ if (!module->arch.tramp_regs)
+ return -ENOENT;
+#endif
+
return 0;
}
#endif
diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c
index 3c8d9bbb51cf..0d9f9cd41e13 100644
--- a/arch/powerpc/kernel/nvram_64.c
+++ b/arch/powerpc/kernel/nvram_64.c
@@ -540,7 +540,7 @@ static struct pstore_info nvram_pstore_info = {
.write = nvram_pstore_write,
};
-static int nvram_pstore_init(void)
+static int __init nvram_pstore_init(void)
{
int rc = 0;
@@ -562,7 +562,7 @@ static int nvram_pstore_init(void)
return rc;
}
#else
-static int nvram_pstore_init(void)
+static int __init nvram_pstore_init(void)
{
return -1;
}
@@ -755,7 +755,7 @@ static unsigned char __init nvram_checksum(struct nvram_header *p)
* Per the criteria passed via nvram_remove_partition(), should this
* partition be removed? 1=remove, 0=keep
*/
-static int nvram_can_remove_partition(struct nvram_partition *part,
+static int __init nvram_can_remove_partition(struct nvram_partition *part,
const char *name, int sig, const char *exceptions[])
{
if (part->header.signature != sig)
diff --git a/arch/powerpc/kernel/optprobes.c b/arch/powerpc/kernel/optprobes.c
index ce1903064031..3b1c2236cbee 100644
--- a/arch/powerpc/kernel/optprobes.c
+++ b/arch/powerpc/kernel/optprobes.c
@@ -153,7 +153,7 @@ static void patch_imm_load_insns(unsigned long val, int reg, kprobe_opcode_t *ad
int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
{
- struct ppc_inst branch_op_callback, branch_emulate_step, temp;
+ ppc_inst_t branch_op_callback, branch_emulate_step, temp;
unsigned long op_callback_addr, emulate_step_addr;
kprobe_opcode_t *buff;
long b_offset;
@@ -228,12 +228,8 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *p)
/*
* 3. load instruction to be emulated into relevant register, and
*/
- if (IS_ENABLED(CONFIG_PPC64)) {
- temp = ppc_inst_read(p->ainsn.insn);
- patch_imm_load_insns(ppc_inst_as_ulong(temp), 4, buff + TMPL_INSN_IDX);
- } else {
- patch_imm_load_insns((unsigned long)p->ainsn.insn, 4, buff + TMPL_INSN_IDX);
- }
+ temp = ppc_inst_read(p->ainsn.insn);
+ patch_imm_load_insns(ppc_inst_as_ulong(temp), 4, buff + TMPL_INSN_IDX);
/*
* 4. branch back from trampoline
@@ -269,7 +265,7 @@ int arch_check_optimized_kprobe(struct optimized_kprobe *op)
void arch_optimize_kprobes(struct list_head *oplist)
{
- struct ppc_inst instr;
+ ppc_inst_t instr;
struct optimized_kprobe *op;
struct optimized_kprobe *tmp;
diff --git a/arch/powerpc/kernel/optprobes_head.S b/arch/powerpc/kernel/optprobes_head.S
index 19ea3312403c..5c7f0b4b784b 100644
--- a/arch/powerpc/kernel/optprobes_head.S
+++ b/arch/powerpc/kernel/optprobes_head.S
@@ -10,8 +10,8 @@
#include <asm/asm-offsets.h>
#ifdef CONFIG_PPC64
-#define SAVE_30GPRS(base) SAVE_10GPRS(2,base); SAVE_10GPRS(12,base); SAVE_10GPRS(22,base)
-#define REST_30GPRS(base) REST_10GPRS(2,base); REST_10GPRS(12,base); REST_10GPRS(22,base)
+#define SAVE_30GPRS(base) SAVE_GPRS(2, 31, base)
+#define REST_30GPRS(base) REST_GPRS(2, 31, base)
#define TEMPLATE_FOR_IMM_LOAD_INSNS nop; nop; nop; nop; nop
#else
#define SAVE_30GPRS(base) stmw r2, GPR2(base)
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c
index 4208b4044d12..39da688a9455 100644
--- a/arch/powerpc/kernel/paca.c
+++ b/arch/powerpc/kernel/paca.c
@@ -139,8 +139,7 @@ static struct lppaca * __init new_lppaca(int cpu, unsigned long limit)
}
#endif /* CONFIG_PPC_PSERIES */
-#ifdef CONFIG_PPC_BOOK3S_64
-
+#ifdef CONFIG_PPC_64S_HASH_MMU
/*
* 3 persistent SLBs are allocated here. The buffer will be zero
* initially, hence will all be invaild until we actually write them.
@@ -169,8 +168,7 @@ static struct slb_shadow * __init new_slb_shadow(int cpu, unsigned long limit)
return s;
}
-
-#endif /* CONFIG_PPC_BOOK3S_64 */
+#endif /* CONFIG_PPC_64S_HASH_MMU */
#ifdef CONFIG_PPC_PSERIES
/**
@@ -226,7 +224,7 @@ void __init initialise_paca(struct paca_struct *new_paca, int cpu)
new_paca->kexec_state = KEXEC_STATE_NONE;
new_paca->__current = &init_task;
new_paca->data_offset = 0xfeeeeeeeeeeeeeeeULL;
-#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
new_paca->slb_shadow_ptr = NULL;
#endif
@@ -307,7 +305,7 @@ void __init allocate_paca(int cpu)
#ifdef CONFIG_PPC_PSERIES
paca->lppaca_ptr = new_lppaca(cpu, limit);
#endif
-#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
paca->slb_shadow_ptr = new_slb_shadow(cpu, limit);
#endif
#ifdef CONFIG_PPC_PSERIES
@@ -328,7 +326,7 @@ void __init free_unused_pacas(void)
paca_nr_cpu_ids = nr_cpu_ids;
paca_ptrs_size = new_ptrs_size;
-#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
if (early_radix_enabled()) {
/* Ugly fixup, see new_slb_shadow() */
memblock_phys_free(__pa(paca_ptrs[boot_cpuid]->slb_shadow_ptr),
@@ -341,9 +339,9 @@ void __init free_unused_pacas(void)
paca_ptrs_size + paca_struct_size, nr_cpu_ids);
}
+#ifdef CONFIG_PPC_64S_HASH_MMU
void copy_mm_to_paca(struct mm_struct *mm)
{
-#ifdef CONFIG_PPC_BOOK3S
mm_context_t *context = &mm->context;
#ifdef CONFIG_PPC_MM_SLICES
@@ -356,7 +354,5 @@ void copy_mm_to_paca(struct mm_struct *mm)
get_paca()->mm_ctx_user_psize = context->user_psize;
get_paca()->mm_ctx_sllp = context->sllp;
#endif
-#else /* !CONFIG_PPC_BOOK3S */
- return;
-#endif
}
+#endif /* CONFIG_PPC_64S_HASH_MMU */
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index 6749905932f4..8bc9cf62cd93 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -62,7 +62,7 @@ EXPORT_SYMBOL(isa_mem_base);
static const struct dma_map_ops *pci_dma_ops;
-void set_pci_dma_ops(const struct dma_map_ops *dma_ops)
+void __init set_pci_dma_ops(const struct dma_map_ops *dma_ops)
{
pci_dma_ops = dma_ops;
}
diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c
index b49e1060a3bf..48537964fba1 100644
--- a/arch/powerpc/kernel/pci_32.c
+++ b/arch/powerpc/kernel/pci_32.c
@@ -37,7 +37,7 @@ int pcibios_assign_bus_offset = 1;
EXPORT_SYMBOL(isa_io_base);
EXPORT_SYMBOL(pci_dram_offset);
-void pcibios_make_OF_bus_map(void);
+void __init pcibios_make_OF_bus_map(void);
static void fixup_cpc710_pci64(struct pci_dev* dev);
static u8* pci_to_OF_bus_map;
@@ -109,7 +109,7 @@ make_one_node_map(struct device_node* node, u8 pci_bus)
}
}
-void
+void __init
pcibios_make_OF_bus_map(void)
{
int i;
diff --git a/arch/powerpc/kernel/proc_powerpc.c b/arch/powerpc/kernel/proc_powerpc.c
index 877817471e3c..6a029f2378e1 100644
--- a/arch/powerpc/kernel/proc_powerpc.c
+++ b/arch/powerpc/kernel/proc_powerpc.c
@@ -25,7 +25,7 @@ static ssize_t page_map_read( struct file *file, char __user *buf, size_t nbytes
loff_t *ppos)
{
return simple_read_from_buffer(buf, nbytes, ppos,
- PDE_DATA(file_inode(file)), PAGE_SIZE);
+ pde_data(file_inode(file)), PAGE_SIZE);
}
static int page_map_mmap( struct file *file, struct vm_area_struct *vma )
@@ -34,7 +34,7 @@ static int page_map_mmap( struct file *file, struct vm_area_struct *vma )
return -EINVAL;
remap_pfn_range(vma, vma->vm_start,
- __pa(PDE_DATA(file_inode(file))) >> PAGE_SHIFT,
+ __pa(pde_data(file_inode(file))) >> PAGE_SHIFT,
PAGE_SIZE, vma->vm_page_prot);
return 0;
}
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 406d7ee9e322..984813a4d5dc 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -628,7 +628,7 @@ static void do_break_handler(struct pt_regs *regs)
{
struct arch_hw_breakpoint null_brk = {0};
struct arch_hw_breakpoint *info;
- struct ppc_inst instr = ppc_inst(0);
+ ppc_inst_t instr = ppc_inst(0);
int type = 0;
int size = 0;
unsigned long ea;
@@ -1156,6 +1156,40 @@ static inline void save_sprs(struct thread_struct *t)
#endif
}
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+void kvmppc_save_user_regs(void)
+{
+ unsigned long usermsr;
+
+ if (!current->thread.regs)
+ return;
+
+ usermsr = current->thread.regs->msr;
+
+ if (usermsr & MSR_FP)
+ save_fpu(current);
+
+ if (usermsr & MSR_VEC)
+ save_altivec(current);
+
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ if (usermsr & MSR_TM) {
+ current->thread.tm_tfhar = mfspr(SPRN_TFHAR);
+ current->thread.tm_tfiar = mfspr(SPRN_TFIAR);
+ current->thread.tm_texasr = mfspr(SPRN_TEXASR);
+ current->thread.regs->msr &= ~MSR_TM;
+ }
+#endif
+}
+EXPORT_SYMBOL_GPL(kvmppc_save_user_regs);
+
+void kvmppc_save_current_sprs(void)
+{
+ save_sprs(&current->thread);
+}
+EXPORT_SYMBOL_GPL(kvmppc_save_current_sprs);
+#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
+
static inline void restore_sprs(struct thread_struct *old_thread,
struct thread_struct *new_thread)
{
@@ -1206,7 +1240,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
{
struct thread_struct *new_thread, *old_thread;
struct task_struct *last;
-#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
struct ppc64_tlb_batch *batch;
#endif
@@ -1215,7 +1249,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
WARN_ON(!irqs_disabled());
-#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
batch = this_cpu_ptr(&ppc64_tlb_batch);
if (batch->active) {
current_thread_info()->local_flags |= _TLF_LAZY_MMU;
@@ -1281,9 +1315,9 @@ struct task_struct *__switch_to(struct task_struct *prev,
set_return_regs_changed(); /* _switch changes stack (and regs) */
-#ifdef CONFIG_PPC32
- kuap_assert_locked();
-#endif
+ if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64))
+ kuap_assert_locked();
+
last = _switch(old_thread, new_thread);
/*
@@ -1294,6 +1328,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
*/
#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
/*
* This applies to a process that was context switched while inside
* arch_enter_lazy_mmu_mode(), to re-activate the batch that was
@@ -1305,6 +1340,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
batch = this_cpu_ptr(&ppc64_tlb_batch);
batch->active = 1;
}
+#endif
/*
* Math facilities are masked out of the child MSR in copy_thread.
@@ -1655,7 +1691,7 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
static void setup_ksp_vsid(struct task_struct *p, unsigned long sp)
{
-#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
unsigned long sp_vsid;
unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp;
@@ -1767,6 +1803,9 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
#if defined(CONFIG_PPC_BOOK3S_32) && defined(CONFIG_PPC_KUAP)
p->thread.kuap = KUAP_NONE;
#endif
+#if defined(CONFIG_BOOKE_OR_40x) && defined(CONFIG_PPC_KUAP)
+ p->thread.pid = MMU_NO_CONTEXT;
+#endif
setup_ksp_vsid(p, sp);
@@ -2299,10 +2338,9 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
* the heap, we can put it above 1TB so it is backed by a 1TB
* segment. Otherwise the heap will be in the bottom 1TB
* which always uses 256MB segments and this may result in a
- * performance penalty. We don't need to worry about radix. For
- * radix, mmu_highuser_ssize remains unchanged from 256MB.
+ * performance penalty.
*/
- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
+ if (!radix_enabled() && !is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
#endif
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 2e67588f6f6e..3d30d40a0e9c 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -231,7 +231,7 @@ static void __init check_cpu_pa_features(unsigned long node)
ibm_pa_features, ARRAY_SIZE(ibm_pa_features));
}
-#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
static void __init init_mmu_slb_size(unsigned long node)
{
const __be32 *slb_size_ptr;
@@ -402,7 +402,7 @@ static int __init early_init_dt_scan_chosen_ppc(unsigned long node,
const unsigned long *lprop; /* All these set by kernel, so no need to convert endian */
/* Use common scan routine to determine if this is the chosen node */
- if (early_init_dt_scan_chosen(node, uname, depth, data) == 0)
+ if (early_init_dt_scan_chosen(data) < 0)
return 0;
#ifdef CONFIG_PPC64
@@ -447,7 +447,7 @@ static int __init early_init_dt_scan_chosen_ppc(unsigned long node,
*/
#ifdef CONFIG_SPARSEMEM
-static bool validate_mem_limit(u64 base, u64 *size)
+static bool __init validate_mem_limit(u64 base, u64 *size)
{
u64 max_mem = 1UL << (MAX_PHYSMEM_BITS);
@@ -458,7 +458,7 @@ static bool validate_mem_limit(u64 base, u64 *size)
return true;
}
#else
-static bool validate_mem_limit(u64 base, u64 *size)
+static bool __init validate_mem_limit(u64 base, u64 *size)
{
return true;
}
@@ -532,19 +532,18 @@ static int __init early_init_drmem_lmb(struct drmem_lmb *lmb,
}
#endif /* CONFIG_PPC_PSERIES */
-static int __init early_init_dt_scan_memory_ppc(unsigned long node,
- const char *uname,
- int depth, void *data)
+static int __init early_init_dt_scan_memory_ppc(void)
{
#ifdef CONFIG_PPC_PSERIES
- if (depth == 1 &&
- strcmp(uname, "ibm,dynamic-reconfiguration-memory") == 0) {
+ const void *fdt = initial_boot_params;
+ int node = fdt_path_offset(fdt, "/ibm,dynamic-reconfiguration-memory");
+
+ if (node > 0)
walk_drmem_lmbs_early(node, NULL, early_init_drmem_lmb);
- return 0;
- }
+
#endif
-
- return early_init_dt_scan_memory(node, uname, depth, data);
+
+ return early_init_dt_scan_memory();
}
/*
@@ -748,8 +747,8 @@ void __init early_init_devtree(void *params)
of_scan_flat_dt(early_init_dt_scan_chosen_ppc, boot_command_line);
/* Scan memory nodes and rebuild MEMBLOCKs */
- of_scan_flat_dt(early_init_dt_scan_root, NULL);
- of_scan_flat_dt(early_init_dt_scan_memory_ppc, NULL);
+ early_init_dt_scan_root();
+ early_init_dt_scan_memory_ppc();
parse_early_param();
@@ -857,8 +856,8 @@ void __init early_get_first_memblock_info(void *params, phys_addr_t *size)
* mess the memblock.
*/
add_mem_to_memblock = 0;
- of_scan_flat_dt(early_init_dt_scan_root, NULL);
- of_scan_flat_dt(early_init_dt_scan_memory_ppc, NULL);
+ early_init_dt_scan_root();
+ early_init_dt_scan_memory_ppc();
add_mem_to_memblock = 1;
if (size)
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 18b04b08b983..0ac5faacc909 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -672,7 +672,7 @@ static inline int __init prom_getproplen(phandle node, const char *pname)
return call_prom("getproplen", 2, 1, node, ADDR(pname));
}
-static void add_string(char **str, const char *q)
+static void __init add_string(char **str, const char *q)
{
char *p = *str;
@@ -682,7 +682,7 @@ static void add_string(char **str, const char *q)
*str = p;
}
-static char *tohex(unsigned int x)
+static char *__init tohex(unsigned int x)
{
static const char digits[] __initconst = "0123456789abcdef";
static char result[9] __prombss;
@@ -728,7 +728,7 @@ static int __init prom_setprop(phandle node, const char *nodename,
#define prom_islower(c) ('a' <= (c) && (c) <= 'z')
#define prom_toupper(c) (prom_islower(c) ? ((c) - 'a' + 'A') : (c))
-static unsigned long prom_strtoul(const char *cp, const char **endp)
+static unsigned long __init prom_strtoul(const char *cp, const char **endp)
{
unsigned long result = 0, base = 10, value;
@@ -753,7 +753,7 @@ static unsigned long prom_strtoul(const char *cp, const char **endp)
return result;
}
-static unsigned long prom_memparse(const char *ptr, const char **retptr)
+static unsigned long __init prom_memparse(const char *ptr, const char **retptr)
{
unsigned long ret = prom_strtoul(ptr, retptr);
int shift = 0;
@@ -1786,7 +1786,7 @@ static void __init prom_close_stdin(void)
}
#ifdef CONFIG_PPC_SVM
-static int prom_rtas_hcall(uint64_t args)
+static int __init prom_rtas_hcall(uint64_t args)
{
register uint64_t arg1 asm("r3") = H_RTAS;
register uint64_t arg2 asm("r4") = args;
@@ -2991,7 +2991,7 @@ static void __init fixup_device_tree_efika_add_phy(void)
/* Check if the phy-handle property exists - bail if it does */
rv = prom_getprop(node, "phy-handle", prop, sizeof(prop));
- if (!rv)
+ if (rv <= 0)
return;
/*
@@ -3248,7 +3248,7 @@ static void __init prom_check_initrd(unsigned long r3, unsigned long r4)
/*
* Perform the Enter Secure Mode ultracall.
*/
-static int enter_secure_mode(unsigned long kbase, unsigned long fdt)
+static int __init enter_secure_mode(unsigned long kbase, unsigned long fdt)
{
register unsigned long r3 asm("r3") = UV_ESM;
register unsigned long r4 asm("r4") = kbase;
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index ff80bbad22a5..733e6ef36758 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -492,8 +492,25 @@ int rtas_call(int token, int nargs, int nret, int *outputs, ...)
}
EXPORT_SYMBOL(rtas_call);
-/* For RTAS_BUSY (-2), delay for 1 millisecond. For an extended busy status
- * code of 990n, perform the hinted delay of 10^n (last digit) milliseconds.
+/**
+ * rtas_busy_delay_time() - From an RTAS status value, calculate the
+ * suggested delay time in milliseconds.
+ *
+ * @status: a value returned from rtas_call() or similar APIs which return
+ * the status of a RTAS function call.
+ *
+ * Context: Any context.
+ *
+ * Return:
+ * * 100000 - If @status is 9905.
+ * * 10000 - If @status is 9904.
+ * * 1000 - If @status is 9903.
+ * * 100 - If @status is 9902.
+ * * 10 - If @status is 9901.
+ * * 1 - If @status is either 9900 or -2. This is "wrong" for -2, but
+ * some callers depend on this behavior, and the worst outcome
+ * is that they will delay for longer than necessary.
+ * * 0 - If @status is not a busy or extended delay value.
*/
unsigned int rtas_busy_delay_time(int status)
{
@@ -513,17 +530,77 @@ unsigned int rtas_busy_delay_time(int status)
}
EXPORT_SYMBOL(rtas_busy_delay_time);
-/* For an RTAS busy status code, perform the hinted delay. */
-unsigned int rtas_busy_delay(int status)
+/**
+ * rtas_busy_delay() - helper for RTAS busy and extended delay statuses
+ *
+ * @status: a value returned from rtas_call() or similar APIs which return
+ * the status of a RTAS function call.
+ *
+ * Context: Process context. May sleep or schedule.
+ *
+ * Return:
+ * * true - @status is RTAS_BUSY or an extended delay hint. The
+ * caller may assume that the CPU has been yielded if necessary,
+ * and that an appropriate delay for @status has elapsed.
+ * Generally the caller should reattempt the RTAS call which
+ * yielded @status.
+ *
+ * * false - @status is not @RTAS_BUSY nor an extended delay hint. The
+ * caller is responsible for handling @status.
+ */
+bool rtas_busy_delay(int status)
{
unsigned int ms;
+ bool ret;
- might_sleep();
- ms = rtas_busy_delay_time(status);
- if (ms && need_resched())
- msleep(ms);
+ switch (status) {
+ case RTAS_EXTENDED_DELAY_MIN...RTAS_EXTENDED_DELAY_MAX:
+ ret = true;
+ ms = rtas_busy_delay_time(status);
+ /*
+ * The extended delay hint can be as high as 100 seconds.
+ * Surely any function returning such a status is either
+ * buggy or isn't going to be significantly slowed by us
+ * polling at 1HZ. Clamp the sleep time to one second.
+ */
+ ms = clamp(ms, 1U, 1000U);
+ /*
+ * The delay hint is an order-of-magnitude suggestion, not
+ * a minimum. It is fine, possibly even advantageous, for
+ * us to pause for less time than hinted. For small values,
+ * use usleep_range() to ensure we don't sleep much longer
+ * than actually needed.
+ *
+ * See Documentation/timers/timers-howto.rst for
+ * explanation of the threshold used here. In effect we use
+ * usleep_range() for 9900 and 9901, msleep() for
+ * 9902-9905.
+ */
+ if (ms <= 20)
+ usleep_range(ms * 100, ms * 1000);
+ else
+ msleep(ms);
+ break;
+ case RTAS_BUSY:
+ ret = true;
+ /*
+ * We should call again immediately if there's no other
+ * work to do.
+ */
+ cond_resched();
+ break;
+ default:
+ ret = false;
+ /*
+ * Not a busy or extended delay status; the caller should
+ * handle @status itself. Ensure we warn on misuses in
+ * atomic context regardless.
+ */
+ might_sleep();
+ break;
+ }
- return ms;
+ return ret;
}
EXPORT_SYMBOL(rtas_busy_delay);
@@ -809,13 +886,13 @@ void rtas_os_term(char *str)
/**
* rtas_activate_firmware() - Activate a new version of firmware.
*
+ * Context: This function may sleep.
+ *
* Activate a new version of partition firmware. The OS must call this
* after resuming from a partition hibernation or migration in order
* to maintain the ability to perform live firmware updates. It's not
* catastrophic for this method to be absent or to fail; just log the
* condition in that case.
- *
- * Context: This function may sleep.
*/
void rtas_activate_firmware(void)
{
@@ -890,11 +967,12 @@ int rtas_call_reentrant(int token, int nargs, int nret, int *outputs, ...)
#endif /* CONFIG_PPC_PSERIES */
/**
- * Find a specific pseries error log in an RTAS extended event log.
+ * get_pseries_errorlog() - Find a specific pseries error log in an RTAS
+ * extended event log.
* @log: RTAS error/event log
* @section_id: two character section identifier
*
- * Returns a pointer to the specified errorlog or NULL if not found.
+ * Return: A pointer to the specified errorlog or NULL if not found.
*/
struct pseries_errorlog *get_pseries_errorlog(struct rtas_error_log *log,
uint16_t section_id)
diff --git a/arch/powerpc/kernel/rtasd.c b/arch/powerpc/kernel/rtasd.c
index 32ee17753eb4..cf0f42909ddf 100644
--- a/arch/powerpc/kernel/rtasd.c
+++ b/arch/powerpc/kernel/rtasd.c
@@ -455,7 +455,7 @@ static void rtas_event_scan(struct work_struct *w)
}
#ifdef CONFIG_PPC64
-static void retrieve_nvram_error_log(void)
+static void __init retrieve_nvram_error_log(void)
{
unsigned int err_type ;
int rc ;
@@ -473,12 +473,12 @@ static void retrieve_nvram_error_log(void)
}
}
#else /* CONFIG_PPC64 */
-static void retrieve_nvram_error_log(void)
+static void __init retrieve_nvram_error_log(void)
{
}
#endif /* CONFIG_PPC64 */
-static void start_event_scan(void)
+static void __init start_event_scan(void)
{
printk(KERN_DEBUG "RTAS daemon started\n");
pr_debug("rtasd: will sleep for %d milliseconds\n",
diff --git a/arch/powerpc/kernel/security.c b/arch/powerpc/kernel/security.c
index 15fb5ea1b9ea..e159d4093d98 100644
--- a/arch/powerpc/kernel/security.c
+++ b/arch/powerpc/kernel/security.c
@@ -44,7 +44,7 @@ static void enable_barrier_nospec(bool enable)
do_barrier_nospec_fixups(enable);
}
-void setup_barrier_nospec(void)
+void __init setup_barrier_nospec(void)
{
bool enable;
@@ -132,7 +132,7 @@ early_param("nospectre_v2", handle_nospectre_v2);
#endif /* CONFIG_PPC_FSL_BOOK3E || CONFIG_PPC_BOOK3S_64 */
#ifdef CONFIG_PPC_FSL_BOOK3E
-void setup_spectre_v2(void)
+void __init setup_spectre_v2(void)
{
if (no_spectrev2 || cpu_mitigations_off())
do_btb_flush_fixups();
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 4f1322b65760..f8da937df918 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -582,7 +582,7 @@ static __init int add_pcspkr(void)
device_initcall(add_pcspkr);
#endif /* CONFIG_PCSPKR_PLATFORM */
-void probe_machine(void)
+static __init void probe_machine(void)
{
extern struct machdep_calls __machine_desc_start;
extern struct machdep_calls __machine_desc_end;
diff --git a/arch/powerpc/kernel/setup.h b/arch/powerpc/kernel/setup.h
index 84058bbc8fe9..93f22da12abe 100644
--- a/arch/powerpc/kernel/setup.h
+++ b/arch/powerpc/kernel/setup.h
@@ -29,7 +29,7 @@ void setup_tlb_core_data(void);
static inline void setup_tlb_core_data(void) { }
#endif
-#if defined(CONFIG_PPC_BOOK3E) || defined(CONFIG_BOOKE) || defined(CONFIG_40x)
+#ifdef CONFIG_BOOKE_OR_40x
void exc_lvl_early_init(void);
#else
static inline void exc_lvl_early_init(void) { }
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index 7ec5c47fce0e..a6e9d36d7c01 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -75,7 +75,7 @@ EXPORT_SYMBOL(DMA_MODE_WRITE);
notrace void __init machine_init(u64 dt_ptr)
{
u32 *addr = (u32 *)patch_site_addr(&patch__memset_nocache);
- struct ppc_inst insn;
+ ppc_inst_t insn;
/* Configure static keys first, now that we're relocated. */
setup_feature_keys();
@@ -175,7 +175,7 @@ void __init emergency_stack_init(void)
}
#endif
-#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
+#ifdef CONFIG_BOOKE_OR_40x
void __init exc_lvl_early_init(void)
{
unsigned int i, hw_cpu;
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 6052f5d5ded3..be8577ac9397 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -499,7 +499,7 @@ void smp_release_cpus(void)
* routines and/or provided to userland
*/
-static void init_cache_info(struct ppc_cache_info *info, u32 size, u32 lsize,
+static void __init init_cache_info(struct ppc_cache_info *info, u32 size, u32 lsize,
u32 bsize, u32 sets)
{
info->size = size;
@@ -771,50 +771,6 @@ void __init emergency_stack_init(void)
}
#ifdef CONFIG_SMP
-/**
- * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu
- * @cpu: cpu to allocate for
- * @size: size allocation in bytes
- * @align: alignment
- *
- * Allocate @size bytes aligned at @align for cpu @cpu. This wrapper
- * does the right thing for NUMA regardless of the current
- * configuration.
- *
- * RETURNS:
- * Pointer to the allocated area on success, NULL on failure.
- */
-static void * __init pcpu_alloc_bootmem(unsigned int cpu, size_t size,
- size_t align)
-{
- const unsigned long goal = __pa(MAX_DMA_ADDRESS);
-#ifdef CONFIG_NUMA
- int node = early_cpu_to_node(cpu);
- void *ptr;
-
- if (!node_online(node) || !NODE_DATA(node)) {
- ptr = memblock_alloc_from(size, align, goal);
- pr_info("cpu %d has no node %d or node-local memory\n",
- cpu, node);
- pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n",
- cpu, size, __pa(ptr));
- } else {
- ptr = memblock_alloc_try_nid(size, align, goal,
- MEMBLOCK_ALLOC_ACCESSIBLE, node);
- pr_debug("per cpu data for cpu%d %lu bytes on node%d at "
- "%016lx\n", cpu, size, node, __pa(ptr));
- }
- return ptr;
-#else
- return memblock_alloc_from(size, align, goal);
-#endif
-}
-
-static void __init pcpu_free_bootmem(void *ptr, size_t size)
-{
- memblock_free(ptr, size);
-}
-
static int pcpu_cpu_distance(unsigned int from, unsigned int to)
{
if (early_cpu_to_node(from) == early_cpu_to_node(to))
@@ -823,53 +779,13 @@ static int pcpu_cpu_distance(unsigned int from, unsigned int to)
return REMOTE_DISTANCE;
}
-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
-EXPORT_SYMBOL(__per_cpu_offset);
-
-static void __init pcpu_populate_pte(unsigned long addr)
+static __init int pcpu_cpu_to_node(int cpu)
{
- pgd_t *pgd = pgd_offset_k(addr);
- p4d_t *p4d;
- pud_t *pud;
- pmd_t *pmd;
-
- p4d = p4d_offset(pgd, addr);
- if (p4d_none(*p4d)) {
- pud_t *new;
-
- new = memblock_alloc(PUD_TABLE_SIZE, PUD_TABLE_SIZE);
- if (!new)
- goto err_alloc;
- p4d_populate(&init_mm, p4d, new);
- }
-
- pud = pud_offset(p4d, addr);
- if (pud_none(*pud)) {
- pmd_t *new;
-
- new = memblock_alloc(PMD_TABLE_SIZE, PMD_TABLE_SIZE);
- if (!new)
- goto err_alloc;
- pud_populate(&init_mm, pud, new);
- }
-
- pmd = pmd_offset(pud, addr);
- if (!pmd_present(*pmd)) {
- pte_t *new;
-
- new = memblock_alloc(PTE_TABLE_SIZE, PTE_TABLE_SIZE);
- if (!new)
- goto err_alloc;
- pmd_populate_kernel(&init_mm, pmd, new);
- }
-
- return;
-
-err_alloc:
- panic("%s: Failed to allocate %lu bytes align=%lx from=%lx\n",
- __func__, PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
+ return early_cpu_to_node(cpu);
}
+unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
+EXPORT_SYMBOL(__per_cpu_offset);
void __init setup_per_cpu_areas(void)
{
@@ -880,18 +796,27 @@ void __init setup_per_cpu_areas(void)
int rc = -EINVAL;
/*
- * Linear mapping is one of 4K, 1M and 16M. For 4K, no need
- * to group units. For larger mappings, use 1M atom which
- * should be large enough to contain a number of units.
+ * BookE and BookS radix are historical values and should be revisited.
*/
- if (mmu_linear_psize == MMU_PAGE_4K)
+ if (IS_ENABLED(CONFIG_PPC_BOOK3E)) {
+ atom_size = SZ_1M;
+ } else if (radix_enabled()) {
atom_size = PAGE_SIZE;
- else
- atom_size = 1 << 20;
+ } else if (IS_ENABLED(CONFIG_PPC_64S_HASH_MMU)) {
+ /*
+ * Linear mapping is one of 4K, 1M and 16M. For 4K, no need
+ * to group units. For larger mappings, use 1M atom which
+ * should be large enough to contain a number of units.
+ */
+ if (mmu_linear_psize == MMU_PAGE_4K)
+ atom_size = PAGE_SIZE;
+ else
+ atom_size = SZ_1M;
+ }
if (pcpu_chosen_fc != PCPU_FC_PAGE) {
rc = pcpu_embed_first_chunk(0, dyn_size, atom_size, pcpu_cpu_distance,
- pcpu_alloc_bootmem, pcpu_free_bootmem);
+ pcpu_cpu_to_node);
if (rc)
pr_warn("PERCPU: %s allocator failed (%d), "
"falling back to page size\n",
@@ -899,8 +824,7 @@ void __init setup_per_cpu_areas(void)
}
if (rc < 0)
- rc = pcpu_page_first_chunk(0, pcpu_alloc_bootmem, pcpu_free_bootmem,
- pcpu_populate_pte);
+ rc = pcpu_page_first_chunk(0, pcpu_cpu_to_node);
if (rc < 0)
panic("cannot initialize percpu area (err=%d)", rc);
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index 3e053e2fd6b6..d84c434b2b78 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -527,16 +527,20 @@ static long restore_user_regs(struct pt_regs *regs,
regs_set_return_msr(regs, regs->msr & ~(MSR_FP | MSR_FE0 | MSR_FE1));
#ifdef CONFIG_SPE
- /* force the process to reload the spe registers from
- current->thread when it next does spe instructions */
+ /*
+ * Force the process to reload the spe registers from
+ * current->thread when it next does spe instructions.
+ * Since this is user ABI, we must enforce the sizing.
+ */
+ BUILD_BUG_ON(sizeof(current->thread.spe) != ELF_NEVRREG * sizeof(u32));
regs_set_return_msr(regs, regs->msr & ~MSR_SPE);
if (msr & MSR_SPE) {
/* restore spe registers from the stack */
- unsafe_copy_from_user(current->thread.evr, &sr->mc_vregs,
- ELF_NEVRREG * sizeof(u32), failed);
+ unsafe_copy_from_user(&current->thread.spe, &sr->mc_vregs,
+ sizeof(current->thread.spe), failed);
current->thread.used_spe = true;
} else if (current->thread.used_spe)
- memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32));
+ memset(&current->thread.spe, 0, sizeof(current->thread.spe));
/* Always get SPEFSCR back */
unsafe_get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs + ELF_NEVRREG, failed);
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index c23ee842c4c3..b7fd6a72aa76 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -61,6 +61,7 @@
#include <asm/cpu_has_feature.h>
#include <asm/ftrace.h>
#include <asm/kup.h>
+#include <asm/fadump.h>
#ifdef DEBUG
#include <asm/udbg.h>
@@ -621,6 +622,45 @@ void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
#endif
#ifdef CONFIG_NMI_IPI
+static void crash_stop_this_cpu(struct pt_regs *regs)
+#else
+static void crash_stop_this_cpu(void *dummy)
+#endif
+{
+ /*
+ * Just busy wait here and avoid marking CPU as offline to ensure
+ * register data is captured appropriately.
+ */
+ while (1)
+ cpu_relax();
+}
+
+void crash_smp_send_stop(void)
+{
+ static bool stopped = false;
+
+ /*
+ * In case of fadump, register data for all CPUs is captured by f/w
+ * on ibm,os-term rtas call. Skip IPI callbacks to other CPUs before
+ * this rtas call to avoid tricky post processing of those CPUs'
+ * backtraces.
+ */
+ if (should_fadump_crash())
+ return;
+
+ if (stopped)
+ return;
+
+ stopped = true;
+
+#ifdef CONFIG_NMI_IPI
+ smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, crash_stop_this_cpu, 1000000);
+#else
+ smp_call_function(crash_stop_this_cpu, NULL, 0);
+#endif /* CONFIG_NMI_IPI */
+}
+
+#ifdef CONFIG_NMI_IPI
static void nmi_stop_this_cpu(struct pt_regs *regs)
{
/*
@@ -896,7 +936,8 @@ out:
return tg;
}
-static int update_mask_from_threadgroup(cpumask_var_t *mask, struct thread_groups *tg, int cpu, int cpu_group_start)
+static int __init update_mask_from_threadgroup(cpumask_var_t *mask, struct thread_groups *tg,
+ int cpu, int cpu_group_start)
{
int first_thread = cpu_first_thread_sibling(cpu);
int i;
@@ -1635,12 +1676,14 @@ void start_secondary(void *unused)
BUG();
}
+#ifdef CONFIG_PROFILING
int setup_profiling_timer(unsigned int multiplier)
{
return 0;
}
+#endif
-static void fixup_topology(void)
+static void __init fixup_topology(void)
{
int i;
diff --git a/arch/powerpc/kernel/swsusp_32.S b/arch/powerpc/kernel/swsusp_32.S
index f73f4d72fea4..e0cbd63007f2 100644
--- a/arch/powerpc/kernel/swsusp_32.S
+++ b/arch/powerpc/kernel/swsusp_32.S
@@ -181,7 +181,7 @@ _GLOBAL(swsusp_arch_resume)
#ifdef CONFIG_ALTIVEC
/* Stop pending alitvec streams and memory accesses */
BEGIN_FTR_SECTION
- DSSALL
+ PPC_DSSALL
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif
sync
diff --git a/arch/powerpc/kernel/swsusp_asm64.S b/arch/powerpc/kernel/swsusp_asm64.S
index 96bb20715aa9..9f1903c7f540 100644
--- a/arch/powerpc/kernel/swsusp_asm64.S
+++ b/arch/powerpc/kernel/swsusp_asm64.S
@@ -141,7 +141,7 @@ END_FW_FTR_SECTION_IFCLR(FW_FEATURE_LPAR)
_GLOBAL(swsusp_arch_resume)
/* Stop pending alitvec streams and memory accesses */
BEGIN_FTR_SECTION
- DSSALL
+ PPC_DSSALL
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
sync
diff --git a/arch/powerpc/kernel/syscalls/syscall.tbl b/arch/powerpc/kernel/syscalls/syscall.tbl
index 15109af9d075..2600b4237292 100644
--- a/arch/powerpc/kernel/syscalls/syscall.tbl
+++ b/arch/powerpc/kernel/syscalls/syscall.tbl
@@ -529,3 +529,4 @@
# 447 reserved for memfd_secret
448 common process_mrelease sys_process_mrelease
449 common futex_waitv sys_futex_waitv
+450 nospu set_mempolicy_home_node sys_set_mempolicy_home_node
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
index 08d8072d6e7a..d45a415d5374 100644
--- a/arch/powerpc/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -214,7 +214,7 @@ static ssize_t __used store_dscr_default(struct device *dev,
static DEVICE_ATTR(dscr_default, 0600,
show_dscr_default, store_dscr_default);
-static void sysfs_create_dscr_default(void)
+static void __init sysfs_create_dscr_default(void)
{
if (cpu_has_feature(CPU_FTR_DSCR)) {
int cpu;
@@ -744,12 +744,12 @@ static ssize_t show_svm(struct device *dev, struct device_attribute *attr, char
}
static DEVICE_ATTR(svm, 0444, show_svm, NULL);
-static void create_svm_file(void)
+static void __init create_svm_file(void)
{
device_create_file(cpu_subsys.dev_root, &dev_attr_svm);
}
#else
-static void create_svm_file(void)
+static void __init create_svm_file(void)
{
}
#endif /* CONFIG_PPC_SVM */
@@ -1110,7 +1110,7 @@ EXPORT_SYMBOL_GPL(cpu_remove_dev_attr_group);
/* NUMA stuff */
#ifdef CONFIG_NUMA
-static void register_nodes(void)
+static void __init register_nodes(void)
{
int i;
@@ -1134,7 +1134,7 @@ void sysfs_remove_device_from_node(struct device *dev, int nid)
EXPORT_SYMBOL_GPL(sysfs_remove_device_from_node);
#else
-static void register_nodes(void)
+static void __init register_nodes(void)
{
return;
}
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index cae8f03a44fe..cd0b8b71ecdd 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -88,6 +88,7 @@ static struct clocksource clocksource_timebase = {
#define DECREMENTER_DEFAULT_MAX 0x7FFFFFFF
u64 decrementer_max = DECREMENTER_DEFAULT_MAX;
+EXPORT_SYMBOL_GPL(decrementer_max); /* for KVM HDEC */
static int decrementer_set_next_event(unsigned long evt,
struct clock_event_device *dev);
@@ -107,6 +108,7 @@ struct clock_event_device decrementer_clockevent = {
EXPORT_SYMBOL(decrementer_clockevent);
DEFINE_PER_CPU(u64, decrementers_next_tb);
+EXPORT_SYMBOL_GPL(decrementers_next_tb);
static DEFINE_PER_CPU(struct clock_event_device, decrementers);
#define XSEC_PER_SEC (1024*1024)
@@ -496,6 +498,16 @@ EXPORT_SYMBOL(profile_pc);
* 64-bit uses a byte in the PACA, 32-bit uses a per-cpu variable...
*/
#ifdef CONFIG_PPC64
+static inline unsigned long test_irq_work_pending(void)
+{
+ unsigned long x;
+
+ asm volatile("lbz %0,%1(13)"
+ : "=r" (x)
+ : "i" (offsetof(struct paca_struct, irq_work_pending)));
+ return x;
+}
+
static inline void set_irq_work_pending_flag(void)
{
asm volatile("stb %0,%1(13)" : :
@@ -539,13 +551,44 @@ void arch_irq_work_raise(void)
preempt_enable();
}
+static void set_dec_or_work(u64 val)
+{
+ set_dec(val);
+ /* We may have raced with new irq work */
+ if (unlikely(test_irq_work_pending()))
+ set_dec(1);
+}
+
#else /* CONFIG_IRQ_WORK */
#define test_irq_work_pending() 0
#define clear_irq_work_pending()
+static void set_dec_or_work(u64 val)
+{
+ set_dec(val);
+}
#endif /* CONFIG_IRQ_WORK */
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+void timer_rearm_host_dec(u64 now)
+{
+ u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);
+
+ WARN_ON_ONCE(!arch_irqs_disabled());
+ WARN_ON_ONCE(mfmsr() & MSR_EE);
+
+ if (now >= *next_tb) {
+ local_paca->irq_happened |= PACA_IRQ_DEC;
+ } else {
+ now = *next_tb - now;
+ if (now <= decrementer_max)
+ set_dec_or_work(now);
+ }
+}
+EXPORT_SYMBOL_GPL(timer_rearm_host_dec);
+#endif
+
/*
* timer_interrupt - gets called when the decrementer overflows,
* with interrupts disabled.
@@ -566,22 +609,23 @@ DEFINE_INTERRUPT_HANDLER_ASYNC(timer_interrupt)
return;
}
- /* Ensure a positive value is written to the decrementer, or else
- * some CPUs will continue to take decrementer exceptions. When the
- * PPC_WATCHDOG (decrementer based) is configured, keep this at most
- * 31 bits, which is about 4 seconds on most systems, which gives
- * the watchdog a chance of catching timer interrupt hard lockups.
- */
- if (IS_ENABLED(CONFIG_PPC_WATCHDOG))
- set_dec(0x7fffffff);
- else
- set_dec(decrementer_max);
-
- /* Conditionally hard-enable interrupts now that the DEC has been
- * bumped to its maximum value
- */
- may_hard_irq_enable();
+ /* Conditionally hard-enable interrupts. */
+ if (should_hard_irq_enable()) {
+ /*
+ * Ensure a positive value is written to the decrementer, or
+ * else some CPUs will continue to take decrementer exceptions.
+ * When the PPC_WATCHDOG (decrementer based) is configured,
+ * keep this at most 31 bits, which is about 4 seconds on most
+ * systems, which gives the watchdog a chance of catching timer
+ * interrupt hard lockups.
+ */
+ if (IS_ENABLED(CONFIG_PPC_WATCHDOG))
+ set_dec(0x7fffffff);
+ else
+ set_dec(decrementer_max);
+ do_hard_irq_enable();
+ }
#if defined(CONFIG_PPC32) && defined(CONFIG_PPC_PMAC)
if (atomic_read(&ppc_n_lost_interrupts) != 0)
@@ -605,11 +649,9 @@ DEFINE_INTERRUPT_HANDLER_ASYNC(timer_interrupt)
__this_cpu_inc(irq_stat.timer_irqs_event);
} else {
now = *next_tb - now;
- if (now <= decrementer_max)
- set_dec(now);
- /* We may have raced with new irq work */
- if (test_irq_work_pending())
- set_dec(1);
+ if (now > decrementer_max)
+ now = decrementer_max;
+ set_dec_or_work(now);
__this_cpu_inc(irq_stat.timer_irqs_others);
}
@@ -730,7 +772,7 @@ static int __init get_freq(char *name, int cells, unsigned long *val)
static void start_cpu_decrementer(void)
{
-#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
+#ifdef CONFIG_BOOKE_OR_40x
unsigned int tcr;
/* Clear any pending timer interrupts */
@@ -843,11 +885,7 @@ static int decrementer_set_next_event(unsigned long evt,
struct clock_event_device *dev)
{
__this_cpu_write(decrementers_next_tb, get_tb() + evt);
- set_dec(evt);
-
- /* We may have raced with new irq work */
- if (test_irq_work_pending())
- set_dec(1);
+ set_dec_or_work(evt);
return 0;
}
diff --git a/arch/powerpc/kernel/tm.S b/arch/powerpc/kernel/tm.S
index 2b91f233b05d..3beecc32940b 100644
--- a/arch/powerpc/kernel/tm.S
+++ b/arch/powerpc/kernel/tm.S
@@ -226,11 +226,8 @@ _GLOBAL(tm_reclaim)
/* Sync the userland GPRs 2-12, 14-31 to thread->regs: */
SAVE_GPR(0, r7) /* user r0 */
- SAVE_GPR(2, r7) /* user r2 */
- SAVE_4GPRS(3, r7) /* user r3-r6 */
- SAVE_GPR(8, r7) /* user r8 */
- SAVE_GPR(9, r7) /* user r9 */
- SAVE_GPR(10, r7) /* user r10 */
+ SAVE_GPRS(2, 6, r7) /* user r2-r6 */
+ SAVE_GPRS(8, 10, r7) /* user r8-r10 */
ld r3, GPR1(r1) /* user r1 */
ld r4, GPR7(r1) /* user r7 */
ld r5, GPR11(r1) /* user r11 */
@@ -445,12 +442,8 @@ restore_gprs:
ld r6, THREAD_TM_PPR(r3)
REST_GPR(0, r7) /* GPR0 */
- REST_2GPRS(2, r7) /* GPR2-3 */
- REST_GPR(4, r7) /* GPR4 */
- REST_4GPRS(8, r7) /* GPR8-11 */
- REST_2GPRS(12, r7) /* GPR12-13 */
-
- REST_NVGPRS(r7) /* GPR14-31 */
+ REST_GPRS(2, 4, r7) /* GPR2-4 */
+ REST_GPRS(8, 31, r7) /* GPR8-31 */
/* Load up PPR and DSCR here so we don't run with user values for long */
mtspr SPRN_DSCR, r5
diff --git a/arch/powerpc/kernel/trace/ftrace.c b/arch/powerpc/kernel/trace/ftrace.c
index d89c5df4f206..80b6285769f2 100644
--- a/arch/powerpc/kernel/trace/ftrace.c
+++ b/arch/powerpc/kernel/trace/ftrace.c
@@ -41,10 +41,10 @@
#define NUM_FTRACE_TRAMPS 8
static unsigned long ftrace_tramps[NUM_FTRACE_TRAMPS];
-static struct ppc_inst
+static ppc_inst_t
ftrace_call_replace(unsigned long ip, unsigned long addr, int link)
{
- struct ppc_inst op;
+ ppc_inst_t op;
addr = ppc_function_entry((void *)addr);
@@ -55,9 +55,9 @@ ftrace_call_replace(unsigned long ip, unsigned long addr, int link)
}
static int
-ftrace_modify_code(unsigned long ip, struct ppc_inst old, struct ppc_inst new)
+ftrace_modify_code(unsigned long ip, ppc_inst_t old, ppc_inst_t new)
{
- struct ppc_inst replaced;
+ ppc_inst_t replaced;
/*
* Note:
@@ -90,24 +90,24 @@ ftrace_modify_code(unsigned long ip, struct ppc_inst old, struct ppc_inst new)
*/
static int test_24bit_addr(unsigned long ip, unsigned long addr)
{
- struct ppc_inst op;
+ ppc_inst_t op;
addr = ppc_function_entry((void *)addr);
/* use the create_branch to verify that this offset can be branched */
return create_branch(&op, (u32 *)ip, addr, 0) == 0;
}
-static int is_bl_op(struct ppc_inst op)
+static int is_bl_op(ppc_inst_t op)
{
return (ppc_inst_val(op) & 0xfc000003) == 0x48000001;
}
-static int is_b_op(struct ppc_inst op)
+static int is_b_op(ppc_inst_t op)
{
return (ppc_inst_val(op) & 0xfc000003) == 0x48000000;
}
-static unsigned long find_bl_target(unsigned long ip, struct ppc_inst op)
+static unsigned long find_bl_target(unsigned long ip, ppc_inst_t op)
{
int offset;
@@ -127,7 +127,7 @@ __ftrace_make_nop(struct module *mod,
{
unsigned long entry, ptr, tramp;
unsigned long ip = rec->ip;
- struct ppc_inst op, pop;
+ ppc_inst_t op, pop;
/* read where this goes */
if (copy_inst_from_kernel_nofault(&op, (void *)ip)) {
@@ -221,10 +221,9 @@ static int
__ftrace_make_nop(struct module *mod,
struct dyn_ftrace *rec, unsigned long addr)
{
- struct ppc_inst op;
- unsigned int jmp[4];
+ ppc_inst_t op;
unsigned long ip = rec->ip;
- unsigned long tramp;
+ unsigned long tramp, ptr;
if (copy_from_kernel_nofault(&op, (void *)ip, MCOUNT_INSN_SIZE))
return -EFAULT;
@@ -238,41 +237,13 @@ __ftrace_make_nop(struct module *mod,
/* lets find where the pointer goes */
tramp = find_bl_target(ip, op);
- /*
- * On PPC32 the trampoline looks like:
- * 0x3d, 0x80, 0x00, 0x00 lis r12,sym@ha
- * 0x39, 0x8c, 0x00, 0x00 addi r12,r12,sym@l
- * 0x7d, 0x89, 0x03, 0xa6 mtctr r12
- * 0x4e, 0x80, 0x04, 0x20 bctr
- */
-
- pr_devel("ip:%lx jumps to %lx", ip, tramp);
-
/* Find where the trampoline jumps to */
- if (copy_from_kernel_nofault(jmp, (void *)tramp, sizeof(jmp))) {
- pr_err("Failed to read %lx\n", tramp);
+ if (module_trampoline_target(mod, tramp, &ptr)) {
+ pr_err("Failed to get trampoline target\n");
return -EFAULT;
}
- pr_devel(" %08x %08x ", jmp[0], jmp[1]);
-
- /* verify that this is what we expect it to be */
- if (((jmp[0] & 0xffff0000) != 0x3d800000) ||
- ((jmp[1] & 0xffff0000) != 0x398c0000) ||
- (jmp[2] != 0x7d8903a6) ||
- (jmp[3] != 0x4e800420)) {
- pr_err("Not a trampoline\n");
- return -EINVAL;
- }
-
- tramp = (jmp[1] & 0xffff) |
- ((jmp[0] & 0xffff) << 16);
- if (tramp & 0x8000)
- tramp -= 0x10000;
-
- pr_devel(" %lx ", tramp);
-
- if (tramp != addr) {
+ if (ptr != addr) {
pr_err("Trampoline location %08lx does not match addr\n",
tramp);
return -EINVAL;
@@ -291,7 +262,7 @@ __ftrace_make_nop(struct module *mod,
static unsigned long find_ftrace_tramp(unsigned long ip)
{
int i;
- struct ppc_inst instr;
+ ppc_inst_t instr;
/*
* We have the compiler generated long_branch tramps at the end
@@ -329,9 +300,9 @@ static int add_ftrace_tramp(unsigned long tramp)
static int setup_mcount_compiler_tramp(unsigned long tramp)
{
int i;
- struct ppc_inst op;
+ ppc_inst_t op;
unsigned long ptr;
- struct ppc_inst instr;
+ ppc_inst_t instr;
static unsigned long ftrace_plt_tramps[NUM_FTRACE_TRAMPS];
/* Is this a known long jump tramp? */
@@ -396,7 +367,7 @@ static int setup_mcount_compiler_tramp(unsigned long tramp)
static int __ftrace_make_nop_kernel(struct dyn_ftrace *rec, unsigned long addr)
{
unsigned long tramp, ip = rec->ip;
- struct ppc_inst op;
+ ppc_inst_t op;
/* Read where this goes */
if (copy_inst_from_kernel_nofault(&op, (void *)ip)) {
@@ -436,7 +407,7 @@ int ftrace_make_nop(struct module *mod,
struct dyn_ftrace *rec, unsigned long addr)
{
unsigned long ip = rec->ip;
- struct ppc_inst old, new;
+ ppc_inst_t old, new;
/*
* If the calling address is more that 24 bits away,
@@ -489,7 +460,7 @@ int ftrace_make_nop(struct module *mod,
*/
#ifndef CONFIG_MPROFILE_KERNEL
static int
-expected_nop_sequence(void *ip, struct ppc_inst op0, struct ppc_inst op1)
+expected_nop_sequence(void *ip, ppc_inst_t op0, ppc_inst_t op1)
{
/*
* We expect to see:
@@ -507,7 +478,7 @@ expected_nop_sequence(void *ip, struct ppc_inst op0, struct ppc_inst op1)
}
#else
static int
-expected_nop_sequence(void *ip, struct ppc_inst op0, struct ppc_inst op1)
+expected_nop_sequence(void *ip, ppc_inst_t op0, ppc_inst_t op1)
{
/* look for patched "NOP" on ppc64 with -mprofile-kernel */
if (!ppc_inst_equal(op0, ppc_inst(PPC_RAW_NOP())))
@@ -519,8 +490,8 @@ expected_nop_sequence(void *ip, struct ppc_inst op0, struct ppc_inst op1)
static int
__ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
{
- struct ppc_inst op[2];
- struct ppc_inst instr;
+ ppc_inst_t op[2];
+ ppc_inst_t instr;
void *ip = (void *)rec->ip;
unsigned long entry, ptr, tramp;
struct module *mod = rec->arch.mod;
@@ -588,8 +559,10 @@ static int
__ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
{
int err;
- struct ppc_inst op;
+ ppc_inst_t op;
u32 *ip = (u32 *)rec->ip;
+ struct module *mod = rec->arch.mod;
+ unsigned long tramp;
/* read where this goes */
if (copy_inst_from_kernel_nofault(&op, ip))
@@ -602,13 +575,23 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
}
/* If we never set up a trampoline to ftrace_caller, then bail */
- if (!rec->arch.mod->arch.tramp) {
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+ if (!mod->arch.tramp || !mod->arch.tramp_regs) {
+#else
+ if (!mod->arch.tramp) {
+#endif
pr_err("No ftrace trampoline\n");
return -EINVAL;
}
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+ if (rec->flags & FTRACE_FL_REGS)
+ tramp = mod->arch.tramp_regs;
+ else
+#endif
+ tramp = mod->arch.tramp;
/* create the branch to the trampoline */
- err = create_branch(&op, ip, rec->arch.mod->arch.tramp, BRANCH_SET_LINK);
+ err = create_branch(&op, ip, tramp, BRANCH_SET_LINK);
if (err) {
pr_err("REL24 out of range!\n");
return -EINVAL;
@@ -626,7 +609,7 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
static int __ftrace_make_call_kernel(struct dyn_ftrace *rec, unsigned long addr)
{
- struct ppc_inst op;
+ ppc_inst_t op;
void *ip = (void *)rec->ip;
unsigned long tramp, entry, ptr;
@@ -674,7 +657,7 @@ static int __ftrace_make_call_kernel(struct dyn_ftrace *rec, unsigned long addr)
int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
{
unsigned long ip = rec->ip;
- struct ppc_inst old, new;
+ ppc_inst_t old, new;
/*
* If the calling address is more that 24 bits away,
@@ -713,7 +696,7 @@ static int
__ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
unsigned long addr)
{
- struct ppc_inst op;
+ ppc_inst_t op;
unsigned long ip = rec->ip;
unsigned long entry, ptr, tramp;
struct module *mod = rec->arch.mod;
@@ -807,7 +790,7 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
unsigned long addr)
{
unsigned long ip = rec->ip;
- struct ppc_inst old, new;
+ ppc_inst_t old, new;
/*
* If the calling address is more that 24 bits away,
@@ -847,7 +830,7 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
int ftrace_update_ftrace_func(ftrace_func_t func)
{
unsigned long ip = (unsigned long)(&ftrace_call);
- struct ppc_inst old, new;
+ ppc_inst_t old, new;
int ret;
old = ppc_inst_read((u32 *)&ftrace_call);
@@ -932,7 +915,7 @@ int ftrace_enable_ftrace_graph_caller(void)
unsigned long ip = (unsigned long)(&ftrace_graph_call);
unsigned long addr = (unsigned long)(&ftrace_graph_caller);
unsigned long stub = (unsigned long)(&ftrace_graph_stub);
- struct ppc_inst old, new;
+ ppc_inst_t old, new;
old = ftrace_call_replace(ip, stub, 0);
new = ftrace_call_replace(ip, addr, 0);
@@ -945,7 +928,7 @@ int ftrace_disable_ftrace_graph_caller(void)
unsigned long ip = (unsigned long)(&ftrace_graph_call);
unsigned long addr = (unsigned long)(&ftrace_graph_caller);
unsigned long stub = (unsigned long)(&ftrace_graph_stub);
- struct ppc_inst old, new;
+ ppc_inst_t old, new;
old = ftrace_call_replace(ip, addr, 0);
new = ftrace_call_replace(ip, stub, 0);
diff --git a/arch/powerpc/kernel/trace/ftrace_32.S b/arch/powerpc/kernel/trace/ftrace_32.S
index e023ae59c429..0a02c0cb12d9 100644
--- a/arch/powerpc/kernel/trace/ftrace_32.S
+++ b/arch/powerpc/kernel/trace/ftrace_32.S
@@ -9,55 +9,135 @@
#include <asm/asm-offsets.h>
#include <asm/ftrace.h>
#include <asm/export.h>
+#include <asm/ptrace.h>
_GLOBAL(mcount)
_GLOBAL(_mcount)
/*
* It is required that _mcount on PPC32 must preserve the
- * link register. But we have r0 to play with. We use r0
+ * link register. But we have r12 to play with. We use r12
* to push the return address back to the caller of mcount
* into the ctr register, restore the link register and
* then jump back using the ctr register.
*/
- mflr r0
- mtctr r0
- lwz r0, 4(r1)
+ mflr r12
+ mtctr r12
mtlr r0
bctr
+EXPORT_SYMBOL(_mcount)
_GLOBAL(ftrace_caller)
MCOUNT_SAVE_FRAME
/* r3 ends up with link register */
subi r3, r3, MCOUNT_INSN_SIZE
+ lis r5,function_trace_op@ha
+ lwz r5,function_trace_op@l(r5)
+ li r6, 0
.globl ftrace_call
ftrace_call:
bl ftrace_stub
nop
+ MCOUNT_RESTORE_FRAME
+ftrace_caller_common:
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
.globl ftrace_graph_call
ftrace_graph_call:
b ftrace_graph_stub
_GLOBAL(ftrace_graph_stub)
#endif
- MCOUNT_RESTORE_FRAME
/* old link register ends up in ctr reg */
bctr
-EXPORT_SYMBOL(_mcount)
_GLOBAL(ftrace_stub)
blr
+_GLOBAL(ftrace_regs_caller)
+ /* Save the original return address in A's stack frame */
+ stw r0,LRSAVE(r1)
+
+ /* Create our stack frame + pt_regs */
+ stwu r1,-INT_FRAME_SIZE(r1)
+
+ /* Save all gprs to pt_regs */
+ stw r0, GPR0(r1)
+ stmw r2, GPR2(r1)
+
+ /* Save previous stack pointer (r1) */
+ addi r8, r1, INT_FRAME_SIZE
+ stw r8, GPR1(r1)
+
+ /* Load special regs for save below */
+ mfmsr r8
+ mfctr r9
+ mfxer r10
+ mfcr r11
+
+ /* Get the _mcount() call site out of LR */
+ mflr r7
+ /* Save it as pt_regs->nip */
+ stw r7, _NIP(r1)
+ /* Save the read LR in pt_regs->link */
+ stw r0, _LINK(r1)
+
+ lis r3,function_trace_op@ha
+ lwz r5,function_trace_op@l(r3)
+
+ /* Calculate ip from nip-4 into r3 for call below */
+ subi r3, r7, MCOUNT_INSN_SIZE
+
+ /* Put the original return address in r4 as parent_ip */
+ mr r4, r0
+
+ /* Save special regs */
+ stw r8, _MSR(r1)
+ stw r9, _CTR(r1)
+ stw r10, _XER(r1)
+ stw r11, _CCR(r1)
+
+ /* Load &pt_regs in r6 for call below */
+ addi r6, r1, STACK_FRAME_OVERHEAD
+
+ /* ftrace_call(r3, r4, r5, r6) */
+.globl ftrace_regs_call
+ftrace_regs_call:
+ bl ftrace_stub
+ nop
+
+ /* Load ctr with the possibly modified NIP */
+ lwz r3, _NIP(r1)
+ mtctr r3
+
+ /* Restore gprs */
+ lmw r2, GPR2(r1)
+
+ /* Restore possibly modified LR */
+ lwz r0, _LINK(r1)
+ mtlr r0
+
+ /* Pop our stack frame */
+ addi r1, r1, INT_FRAME_SIZE
+
+ b ftrace_caller_common
+
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
_GLOBAL(ftrace_graph_caller)
+ stwu r1,-48(r1)
+ stw r3, 12(r1)
+ stw r4, 16(r1)
+ stw r5, 20(r1)
+ stw r6, 24(r1)
+ stw r7, 28(r1)
+ stw r8, 32(r1)
+ stw r9, 36(r1)
+ stw r10,40(r1)
+
addi r5, r1, 48
- /* load r4 with local address */
- lwz r4, 44(r1)
+ mfctr r4 /* ftrace_caller has moved local addr here */
+ stw r4, 44(r1)
+ mflr r3 /* ftrace_caller has restored LR from stack */
subi r4, r4, MCOUNT_INSN_SIZE
- /* Grab the LR out of the caller stack frame */
- lwz r3,52(r1)
-
bl prepare_ftrace_return
nop
@@ -66,9 +146,21 @@ _GLOBAL(ftrace_graph_caller)
* Change the LR in the callers stack frame to this.
*/
stw r3,52(r1)
+ mtlr r3
+ lwz r0,44(r1)
+ mtctr r0
+
+ lwz r3, 12(r1)
+ lwz r4, 16(r1)
+ lwz r5, 20(r1)
+ lwz r6, 24(r1)
+ lwz r7, 28(r1)
+ lwz r8, 32(r1)
+ lwz r9, 36(r1)
+ lwz r10,40(r1)
+
+ addi r1, r1, 48
- MCOUNT_RESTORE_FRAME
- /* old link register ends up in ctr reg */
bctr
_GLOBAL(return_to_handler)
diff --git a/arch/powerpc/kernel/trace/ftrace_64_mprofile.S b/arch/powerpc/kernel/trace/ftrace_64_mprofile.S
index f9fd5f743eba..d636fc755f60 100644
--- a/arch/powerpc/kernel/trace/ftrace_64_mprofile.S
+++ b/arch/powerpc/kernel/trace/ftrace_64_mprofile.S
@@ -41,15 +41,14 @@ _GLOBAL(ftrace_regs_caller)
/* Save all gprs to pt_regs */
SAVE_GPR(0, r1)
- SAVE_10GPRS(2, r1)
+ SAVE_GPRS(2, 11, r1)
/* Ok to continue? */
lbz r3, PACA_FTRACE_ENABLED(r13)
cmpdi r3, 0
beq ftrace_no_trace
- SAVE_10GPRS(12, r1)
- SAVE_10GPRS(22, r1)
+ SAVE_GPRS(12, 31, r1)
/* Save previous stack pointer (r1) */
addi r8, r1, SWITCH_FRAME_SIZE
@@ -108,10 +107,8 @@ ftrace_regs_call:
#endif
/* Restore gprs */
- REST_GPR(0,r1)
- REST_10GPRS(2,r1)
- REST_10GPRS(12,r1)
- REST_10GPRS(22,r1)
+ REST_GPR(0, r1)
+ REST_GPRS(2, 31, r1)
/* Restore possibly modified LR */
ld r0, _LINK(r1)
@@ -157,7 +154,7 @@ _GLOBAL(ftrace_caller)
stdu r1, -SWITCH_FRAME_SIZE(r1)
/* Save all gprs to pt_regs */
- SAVE_8GPRS(3, r1)
+ SAVE_GPRS(3, 10, r1)
lbz r3, PACA_FTRACE_ENABLED(r13)
cmpdi r3, 0
@@ -194,7 +191,7 @@ ftrace_call:
mtctr r3
/* Restore gprs */
- REST_8GPRS(3,r1)
+ REST_GPRS(3, 10, r1)
/* Restore callee's TOC */
ld r2, 24(r1)
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 11741703d26e..a08bb7cefdc5 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -245,7 +245,7 @@ static void oops_end(unsigned long flags, struct pt_regs *regs,
if (panic_on_oops)
panic("Fatal exception");
- do_exit(signr);
+ make_task_dead(signr);
}
NOKPROBE_SYMBOL(oops_end);
@@ -792,9 +792,9 @@ int machine_check_generic(struct pt_regs *regs)
void die_mce(const char *str, struct pt_regs *regs, long err)
{
/*
- * The machine check wants to kill the interrupted context, but
- * do_exit() checks for in_interrupt() and panics in that case, so
- * exit the irq/nmi before calling die.
+ * The machine check wants to kill the interrupted context,
+ * but make_task_dead() checks for in_interrupt() and panics
+ * in that case, so exit the irq/nmi before calling die.
*/
if (in_nmi())
nmi_exit();
diff --git a/arch/powerpc/kernel/udbg_16550.c b/arch/powerpc/kernel/udbg_16550.c
index 8513aa49614e..d3942de254c6 100644
--- a/arch/powerpc/kernel/udbg_16550.c
+++ b/arch/powerpc/kernel/udbg_16550.c
@@ -84,7 +84,7 @@ static int udbg_uart_getc(void)
return udbg_uart_in(UART_RBR);
}
-static void udbg_use_uart(void)
+static void __init udbg_use_uart(void)
{
udbg_putc = udbg_uart_putc;
udbg_flush = udbg_uart_flush;
@@ -92,7 +92,7 @@ static void udbg_use_uart(void)
udbg_getc_poll = udbg_uart_getc_poll;
}
-void udbg_uart_setup(unsigned int speed, unsigned int clock)
+void __init udbg_uart_setup(unsigned int speed, unsigned int clock)
{
unsigned int dll, base_bauds;
@@ -121,7 +121,7 @@ void udbg_uart_setup(unsigned int speed, unsigned int clock)
udbg_uart_out(UART_FCR, 0x7);
}
-unsigned int udbg_probe_uart_speed(unsigned int clock)
+unsigned int __init udbg_probe_uart_speed(unsigned int clock)
{
unsigned int dll, dlm, divisor, prescaler, speed;
u8 old_lcr;
@@ -172,7 +172,7 @@ static void udbg_uart_out_pio(unsigned int reg, u8 data)
outb(data, udbg_uart.pio_base + (reg * udbg_uart_stride));
}
-void udbg_uart_init_pio(unsigned long port, unsigned int stride)
+void __init udbg_uart_init_pio(unsigned long port, unsigned int stride)
{
if (!port)
return;
@@ -194,7 +194,7 @@ static void udbg_uart_out_mmio(unsigned int reg, u8 data)
}
-void udbg_uart_init_mmio(void __iomem *addr, unsigned int stride)
+void __init udbg_uart_init_mmio(void __iomem *addr, unsigned int stride)
{
if (!addr)
return;
diff --git a/arch/powerpc/kernel/vecemu.c b/arch/powerpc/kernel/vecemu.c
index ae632569446f..fd9432875ebc 100644
--- a/arch/powerpc/kernel/vecemu.c
+++ b/arch/powerpc/kernel/vecemu.c
@@ -261,7 +261,7 @@ static unsigned int rfin(unsigned int x)
int emulate_altivec(struct pt_regs *regs)
{
- struct ppc_inst instr;
+ ppc_inst_t instr;
unsigned int i, word;
unsigned int va, vb, vc, vd;
vector128 *vrs;
diff --git a/arch/powerpc/kernel/vector.S b/arch/powerpc/kernel/vector.S
index ba03eedfdcd8..5cc24d8cce94 100644
--- a/arch/powerpc/kernel/vector.S
+++ b/arch/powerpc/kernel/vector.S
@@ -47,6 +47,10 @@ EXPORT_SYMBOL(store_vr_state)
*/
_GLOBAL(load_up_altivec)
mfmsr r5 /* grab the current MSR */
+#ifdef CONFIG_PPC_BOOK3S_64
+ /* interrupt doesn't set MSR[RI] and HPT can fault on current access */
+ ori r5,r5,MSR_RI
+#endif
oris r5,r5,MSR_VEC@h
MTMSRD(r5) /* enable use of AltiVec now */
isync
@@ -126,6 +130,12 @@ _GLOBAL(load_up_vsx)
andis. r5,r12,MSR_VEC@h
beql+ load_up_altivec /* skip if already loaded */
+#ifdef CONFIG_PPC_BOOK3S_64
+ /* interrupt doesn't set MSR[RI] and HPT can fault on current access */
+ li r5,MSR_RI
+ mtmsrd r5,1
+#endif
+
ld r4,PACACURRENT(r13)
addi r4,r4,THREAD /* Get THREAD */
li r6,1
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index 18e42c74abdd..2bcca818136a 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -322,10 +322,6 @@ SECTIONS
#ifdef CONFIG_PPC32
.data : AT(ADDR(.data) - LOAD_OFFSET) {
DATA_DATA
-#ifdef CONFIG_UBSAN
- *(.data..Lubsan_data*)
- *(.data..Lubsan_type*)
-#endif
*(.data.rel*)
*(SDATA_MAIN)
*(.sdata2)
@@ -336,24 +332,18 @@ SECTIONS
#else
.data : AT(ADDR(.data) - LOAD_OFFSET) {
DATA_DATA
-#ifdef CONFIG_UBSAN
- *(.data..Lubsan_data*)
- *(.data..Lubsan_type*)
-#endif
*(.data.rel*)
*(.toc1)
*(.branch_lt)
}
- . = ALIGN(256);
- .got : AT(ADDR(.got) - LOAD_OFFSET) {
- __toc_start = .;
+ .got : AT(ADDR(.got) - LOAD_OFFSET) ALIGN(256) {
+ *(.got)
#ifndef CONFIG_RELOCATABLE
__prom_init_toc_start = .;
- arch/powerpc/kernel/prom_init.o*(.toc .got)
+ arch/powerpc/kernel/prom_init.o*(.toc)
__prom_init_toc_end = .;
#endif
- *(.got)
*(.toc)
}
#endif
diff --git a/arch/powerpc/kernel/watchdog.c b/arch/powerpc/kernel/watchdog.c
index 3fa6d240bade..bfc27496fe7e 100644
--- a/arch/powerpc/kernel/watchdog.c
+++ b/arch/powerpc/kernel/watchdog.c
@@ -85,10 +85,37 @@ static DEFINE_PER_CPU(u64, wd_timer_tb);
/* SMP checker bits */
static unsigned long __wd_smp_lock;
+static unsigned long __wd_reporting;
+static unsigned long __wd_nmi_output;
static cpumask_t wd_smp_cpus_pending;
static cpumask_t wd_smp_cpus_stuck;
static u64 wd_smp_last_reset_tb;
+/*
+ * Try to take the exclusive watchdog action / NMI IPI / printing lock.
+ * wd_smp_lock must be held. If this fails, we should return and wait
+ * for the watchdog to kick in again (or another CPU to trigger it).
+ *
+ * Importantly, if hardlockup_panic is set, wd_try_report failure should
+ * not delay the panic, because whichever other CPU is reporting will
+ * call panic.
+ */
+static bool wd_try_report(void)
+{
+ if (__wd_reporting)
+ return false;
+ __wd_reporting = 1;
+ return true;
+}
+
+/* End printing after successful wd_try_report. wd_smp_lock not required. */
+static void wd_end_reporting(void)
+{
+ smp_mb(); /* End printing "critical section" */
+ WARN_ON_ONCE(__wd_reporting == 0);
+ WRITE_ONCE(__wd_reporting, 0);
+}
+
static inline void wd_smp_lock(unsigned long *flags)
{
/*
@@ -128,109 +155,182 @@ static void wd_lockup_ipi(struct pt_regs *regs)
else
dump_stack();
+ /*
+ * __wd_nmi_output must be set after we printk from NMI context.
+ *
+ * printk from NMI context defers printing to the console to irq_work.
+ * If that NMI was taken in some code that is hard-locked, then irqs
+ * are disabled so irq_work will never fire. That can result in the
+ * hard lockup messages being delayed (indefinitely, until something
+ * else kicks the console drivers).
+ *
+ * Setting __wd_nmi_output will cause another CPU to notice and kick
+ * the console drivers for us.
+ *
+ * xchg is not needed here (it could be a smp_mb and store), but xchg
+ * gives the memory ordering and atomicity required.
+ */
+ xchg(&__wd_nmi_output, 1);
+
/* Do not panic from here because that can recurse into NMI IPI layer */
}
-static void set_cpumask_stuck(const struct cpumask *cpumask, u64 tb)
+static bool set_cpu_stuck(int cpu)
{
- cpumask_or(&wd_smp_cpus_stuck, &wd_smp_cpus_stuck, cpumask);
- cpumask_andnot(&wd_smp_cpus_pending, &wd_smp_cpus_pending, cpumask);
+ cpumask_set_cpu(cpu, &wd_smp_cpus_stuck);
+ cpumask_clear_cpu(cpu, &wd_smp_cpus_pending);
+ /*
+ * See wd_smp_clear_cpu_pending()
+ */
+ smp_mb();
if (cpumask_empty(&wd_smp_cpus_pending)) {
- wd_smp_last_reset_tb = tb;
+ wd_smp_last_reset_tb = get_tb();
cpumask_andnot(&wd_smp_cpus_pending,
&wd_cpus_enabled,
&wd_smp_cpus_stuck);
+ return true;
}
-}
-static void set_cpu_stuck(int cpu, u64 tb)
-{
- set_cpumask_stuck(cpumask_of(cpu), tb);
+ return false;
}
-static void watchdog_smp_panic(int cpu, u64 tb)
+static void watchdog_smp_panic(int cpu)
{
+ static cpumask_t wd_smp_cpus_ipi; // protected by reporting
unsigned long flags;
+ u64 tb, last_reset;
int c;
wd_smp_lock(&flags);
/* Double check some things under lock */
- if ((s64)(tb - wd_smp_last_reset_tb) < (s64)wd_smp_panic_timeout_tb)
+ tb = get_tb();
+ last_reset = wd_smp_last_reset_tb;
+ if ((s64)(tb - last_reset) < (s64)wd_smp_panic_timeout_tb)
goto out;
if (cpumask_test_cpu(cpu, &wd_smp_cpus_pending))
goto out;
- if (cpumask_weight(&wd_smp_cpus_pending) == 0)
+ if (!wd_try_report())
+ goto out;
+ for_each_online_cpu(c) {
+ if (!cpumask_test_cpu(c, &wd_smp_cpus_pending))
+ continue;
+ if (c == cpu)
+ continue; // should not happen
+
+ __cpumask_set_cpu(c, &wd_smp_cpus_ipi);
+ if (set_cpu_stuck(c))
+ break;
+ }
+ if (cpumask_empty(&wd_smp_cpus_ipi)) {
+ wd_end_reporting();
goto out;
+ }
+ wd_smp_unlock(&flags);
pr_emerg("CPU %d detected hard LOCKUP on other CPUs %*pbl\n",
- cpu, cpumask_pr_args(&wd_smp_cpus_pending));
+ cpu, cpumask_pr_args(&wd_smp_cpus_ipi));
pr_emerg("CPU %d TB:%lld, last SMP heartbeat TB:%lld (%lldms ago)\n",
- cpu, tb, wd_smp_last_reset_tb,
- tb_to_ns(tb - wd_smp_last_reset_tb) / 1000000);
+ cpu, tb, last_reset, tb_to_ns(tb - last_reset) / 1000000);
if (!sysctl_hardlockup_all_cpu_backtrace) {
/*
* Try to trigger the stuck CPUs, unless we are going to
* get a backtrace on all of them anyway.
*/
- for_each_cpu(c, &wd_smp_cpus_pending) {
- if (c == cpu)
- continue;
+ for_each_cpu(c, &wd_smp_cpus_ipi) {
smp_send_nmi_ipi(c, wd_lockup_ipi, 1000000);
+ __cpumask_clear_cpu(c, &wd_smp_cpus_ipi);
}
- }
-
- /* Take the stuck CPUs out of the watch group */
- set_cpumask_stuck(&wd_smp_cpus_pending, tb);
-
- wd_smp_unlock(&flags);
-
- if (sysctl_hardlockup_all_cpu_backtrace)
+ } else {
trigger_allbutself_cpu_backtrace();
-
- /*
- * Force flush any remote buffers that might be stuck in IRQ context
- * and therefore could not run their irq_work.
- */
- printk_trigger_flush();
+ cpumask_clear(&wd_smp_cpus_ipi);
+ }
if (hardlockup_panic)
nmi_panic(NULL, "Hard LOCKUP");
+ wd_end_reporting();
+
return;
out:
wd_smp_unlock(&flags);
}
-static void wd_smp_clear_cpu_pending(int cpu, u64 tb)
+static void wd_smp_clear_cpu_pending(int cpu)
{
if (!cpumask_test_cpu(cpu, &wd_smp_cpus_pending)) {
if (unlikely(cpumask_test_cpu(cpu, &wd_smp_cpus_stuck))) {
struct pt_regs *regs = get_irq_regs();
unsigned long flags;
- wd_smp_lock(&flags);
-
pr_emerg("CPU %d became unstuck TB:%lld\n",
- cpu, tb);
+ cpu, get_tb());
print_irqtrace_events(current);
if (regs)
show_regs(regs);
else
dump_stack();
+ wd_smp_lock(&flags);
cpumask_clear_cpu(cpu, &wd_smp_cpus_stuck);
wd_smp_unlock(&flags);
+ } else {
+ /*
+ * The last CPU to clear pending should have reset the
+ * watchdog so we generally should not find it empty
+ * here if our CPU was clear. However it could happen
+ * due to a rare race with another CPU taking the
+ * last CPU out of the mask concurrently.
+ *
+ * We can't add a warning for it. But just in case
+ * there is a problem with the watchdog that is causing
+ * the mask to not be reset, try to kick it along here.
+ */
+ if (unlikely(cpumask_empty(&wd_smp_cpus_pending)))
+ goto none_pending;
}
return;
}
+
+ /*
+ * All other updates to wd_smp_cpus_pending are performed under
+ * wd_smp_lock. All of them are atomic except the case where the
+ * mask becomes empty and is reset. This will not happen here because
+ * cpu was tested to be in the bitmap (above), and a CPU only clears
+ * its own bit. _Except_ in the case where another CPU has detected a
+ * hard lockup on our CPU and takes us out of the pending mask. So in
+ * normal operation there will be no race here, no problem.
+ *
+ * In the lockup case, this atomic clear-bit vs a store that refills
+ * other bits in the accessed word wll not be a problem. The bit clear
+ * is atomic so it will not cause the store to get lost, and the store
+ * will never set this bit so it will not overwrite the bit clear. The
+ * only way for a stuck CPU to return to the pending bitmap is to
+ * become unstuck itself.
+ */
cpumask_clear_cpu(cpu, &wd_smp_cpus_pending);
+
+ /*
+ * Order the store to clear pending with the load(s) to check all
+ * words in the pending mask to check they are all empty. This orders
+ * with the same barrier on another CPU. This prevents two CPUs
+ * clearing the last 2 pending bits, but neither seeing the other's
+ * store when checking if the mask is empty, and missing an empty
+ * mask, which ends with a false positive.
+ */
+ smp_mb();
if (cpumask_empty(&wd_smp_cpus_pending)) {
unsigned long flags;
+none_pending:
+ /*
+ * Double check under lock because more than one CPU could see
+ * a clear mask with the lockless check after clearing their
+ * pending bits.
+ */
wd_smp_lock(&flags);
if (cpumask_empty(&wd_smp_cpus_pending)) {
- wd_smp_last_reset_tb = tb;
+ wd_smp_last_reset_tb = get_tb();
cpumask_andnot(&wd_smp_cpus_pending,
&wd_cpus_enabled,
&wd_smp_cpus_stuck);
@@ -245,10 +345,21 @@ static void watchdog_timer_interrupt(int cpu)
per_cpu(wd_timer_tb, cpu) = tb;
- wd_smp_clear_cpu_pending(cpu, tb);
+ wd_smp_clear_cpu_pending(cpu);
if ((s64)(tb - wd_smp_last_reset_tb) >= (s64)wd_smp_panic_timeout_tb)
- watchdog_smp_panic(cpu, tb);
+ watchdog_smp_panic(cpu);
+
+ if (__wd_nmi_output && xchg(&__wd_nmi_output, 0)) {
+ /*
+ * Something has called printk from NMI context. It might be
+ * stuck, so this this triggers a flush that will get that
+ * printk output to the console.
+ *
+ * See wd_lockup_ipi.
+ */
+ printk_trigger_flush();
+ }
}
DEFINE_INTERRUPT_HANDLER_NMI(soft_nmi_interrupt)
@@ -267,12 +378,27 @@ DEFINE_INTERRUPT_HANDLER_NMI(soft_nmi_interrupt)
tb = get_tb();
if (tb - per_cpu(wd_timer_tb, cpu) >= wd_panic_timeout_tb) {
+ /*
+ * Taking wd_smp_lock here means it is a soft-NMI lock, which
+ * means we can't take any regular or irqsafe spin locks while
+ * holding this lock. This is why timers can't printk while
+ * holding the lock.
+ */
wd_smp_lock(&flags);
if (cpumask_test_cpu(cpu, &wd_smp_cpus_stuck)) {
wd_smp_unlock(&flags);
return 0;
}
- set_cpu_stuck(cpu, tb);
+ if (!wd_try_report()) {
+ wd_smp_unlock(&flags);
+ /* Couldn't report, try again in 100ms */
+ mtspr(SPRN_DEC, 100 * tb_ticks_per_usec * 1000);
+ return 0;
+ }
+
+ set_cpu_stuck(cpu);
+
+ wd_smp_unlock(&flags);
pr_emerg("CPU %d self-detected hard LOCKUP @ %pS\n",
cpu, (void *)regs->nip);
@@ -283,14 +409,21 @@ DEFINE_INTERRUPT_HANDLER_NMI(soft_nmi_interrupt)
print_irqtrace_events(current);
show_regs(regs);
- wd_smp_unlock(&flags);
+ xchg(&__wd_nmi_output, 1); // see wd_lockup_ipi
if (sysctl_hardlockup_all_cpu_backtrace)
trigger_allbutself_cpu_backtrace();
if (hardlockup_panic)
nmi_panic(regs, "Hard LOCKUP");
+
+ wd_end_reporting();
}
+ /*
+ * We are okay to change DEC in soft_nmi_interrupt because the masked
+ * handler has marked a DEC as pending, so the timer interrupt will be
+ * replayed as soon as local irqs are enabled again.
+ */
if (wd_panic_timeout_tb < 0x7fffffff)
mtspr(SPRN_DEC, wd_panic_timeout_tb);
@@ -318,11 +451,15 @@ void arch_touch_nmi_watchdog(void)
{
unsigned long ticks = tb_ticks_per_usec * wd_timer_period_ms * 1000;
int cpu = smp_processor_id();
- u64 tb = get_tb();
+ u64 tb;
+ if (!cpumask_test_cpu(cpu, &watchdog_cpumask))
+ return;
+
+ tb = get_tb();
if (tb - per_cpu(wd_timer_tb, cpu) >= ticks) {
per_cpu(wd_timer_tb, cpu) = tb;
- wd_smp_clear_cpu_pending(cpu, tb);
+ wd_smp_clear_cpu_pending(cpu);
}
}
EXPORT_SYMBOL(arch_touch_nmi_watchdog);
@@ -380,7 +517,7 @@ static void stop_watchdog(void *arg)
cpumask_clear_cpu(cpu, &wd_cpus_enabled);
wd_smp_unlock(&flags);
- wd_smp_clear_cpu_pending(cpu, get_tb());
+ wd_smp_clear_cpu_pending(cpu);
}
static int stop_watchdog_on_cpu(unsigned int cpu)
diff --git a/arch/powerpc/kexec/core.c b/arch/powerpc/kexec/core.c
index a2242017e55f..8b68d9f91a03 100644
--- a/arch/powerpc/kexec/core.c
+++ b/arch/powerpc/kexec/core.c
@@ -185,7 +185,7 @@ void __init reserve_crashkernel(void)
}
}
-int overlaps_crashkernel(unsigned long start, unsigned long size)
+int __init overlaps_crashkernel(unsigned long start, unsigned long size)
{
return (start + size) > crashk_res.start && start <= crashk_res.end;
}
diff --git a/arch/powerpc/kexec/core_64.c b/arch/powerpc/kexec/core_64.c
index 66678518b938..635b5fc30b53 100644
--- a/arch/powerpc/kexec/core_64.c
+++ b/arch/powerpc/kexec/core_64.c
@@ -378,7 +378,7 @@ void default_machine_kexec(struct kimage *image)
/* NOTREACHED */
}
-#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
/* Values we need to export to the second kernel via the device tree. */
static unsigned long htab_base;
static unsigned long htab_size;
@@ -420,4 +420,4 @@ static int __init export_htab_values(void)
return 0;
}
late_initcall(export_htab_values);
-#endif /* CONFIG_PPC_BOOK3S_64 */
+#endif /* CONFIG_PPC_64S_HASH_MMU */
diff --git a/arch/powerpc/kexec/ranges.c b/arch/powerpc/kexec/ranges.c
index 6b81c852feab..563e9989a5bf 100644
--- a/arch/powerpc/kexec/ranges.c
+++ b/arch/powerpc/kexec/ranges.c
@@ -296,7 +296,7 @@ int add_initrd_mem_range(struct crash_mem **mem_ranges)
return ret;
}
-#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
/**
* add_htab_mem_range - Adds htab range to the given memory ranges list,
* if it exists
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
index ff581d70f20c..18e58085447c 100644
--- a/arch/powerpc/kvm/Kconfig
+++ b/arch/powerpc/kvm/Kconfig
@@ -26,6 +26,7 @@ config KVM
select KVM_VFIO
select IRQ_BYPASS_MANAGER
select HAVE_KVM_IRQ_BYPASS
+ select INTERVAL_TREE
config KVM_BOOK3S_HANDLER
bool
@@ -69,6 +70,7 @@ config KVM_BOOK3S_64
select KVM_BOOK3S_64_HANDLER
select KVM
select KVM_BOOK3S_PR_POSSIBLE if !KVM_BOOK3S_HV_POSSIBLE
+ select PPC_64S_HASH_MMU
select SPAPR_TCE_IOMMU if IOMMU_SUPPORT && (PPC_PSERIES || PPC_POWERNV)
help
Support running unmodified book3s_64 and book3s_32 guest kernels
@@ -130,6 +132,21 @@ config KVM_BOOK3S_HV_EXIT_TIMING
If unsure, say N.
+config KVM_BOOK3S_HV_NESTED_PMU_WORKAROUND
+ bool "Nested L0 host workaround for L1 KVM host PMU handling bug" if EXPERT
+ depends on KVM_BOOK3S_HV_POSSIBLE
+ default !EXPERT
+ help
+ Old nested HV capable Linux guests have a bug where they don't
+ reflect the PMU in-use status of their L2 guest to the L0 host
+ while the L2 PMU registers are live. This can result in loss
+ of L2 PMU register state, causing perf to not work correctly in
+ L2 guests.
+
+ Selecting this option for the L0 host implements a workaround for
+ those buggy L1s which saves the L2 state, at the cost of performance
+ in all nested-capable guest entry/exit.
+
config KVM_BOOKE_HV
bool
diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile
index 583c14ef596e..9bdfc8b50899 100644
--- a/arch/powerpc/kvm/Makefile
+++ b/arch/powerpc/kvm/Makefile
@@ -4,11 +4,8 @@
#
ccflags-y := -Ivirt/kvm -Iarch/powerpc/kvm
-KVM := ../../../virt/kvm
-common-objs-y = $(KVM)/kvm_main.o $(KVM)/eventfd.o $(KVM)/binary_stats.o
-common-objs-$(CONFIG_KVM_VFIO) += $(KVM)/vfio.o
-common-objs-$(CONFIG_KVM_MMIO) += $(KVM)/coalesced_mmio.o
+include $(srctree)/virt/kvm/Makefile.kvm
common-objs-y += powerpc.o emulate_loadstore.o
obj-$(CONFIG_KVM_EXIT_TIMING) += timing.o
@@ -125,9 +122,8 @@ kvm-book3s_32-objs := \
kvm-objs-$(CONFIG_KVM_BOOK3S_32) := $(kvm-book3s_32-objs)
kvm-objs-$(CONFIG_KVM_MPIC) += mpic.o
-kvm-objs-$(CONFIG_HAVE_KVM_IRQ_ROUTING) += $(KVM)/irqchip.o
-kvm-objs := $(kvm-objs-m) $(kvm-objs-y)
+kvm-y += $(kvm-objs-m) $(kvm-objs-y)
obj-$(CONFIG_KVM_E500V2) += kvm.o
obj-$(CONFIG_KVM_E500MC) += kvm.o
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index b785f6772391..6d525285dbe8 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -847,21 +847,19 @@ void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
}
int kvmppc_core_prepare_memory_region(struct kvm *kvm,
- struct kvm_memory_slot *memslot,
- const struct kvm_userspace_memory_region *mem,
- enum kvm_mr_change change)
+ const struct kvm_memory_slot *old,
+ struct kvm_memory_slot *new,
+ enum kvm_mr_change change)
{
- return kvm->arch.kvm_ops->prepare_memory_region(kvm, memslot, mem,
- change);
+ return kvm->arch.kvm_ops->prepare_memory_region(kvm, old, new, change);
}
void kvmppc_core_commit_memory_region(struct kvm *kvm,
- const struct kvm_userspace_memory_region *mem,
- const struct kvm_memory_slot *old,
+ struct kvm_memory_slot *old,
const struct kvm_memory_slot *new,
enum kvm_mr_change change)
{
- kvm->arch.kvm_ops->commit_memory_region(kvm, mem, old, new, change);
+ kvm->arch.kvm_ops->commit_memory_region(kvm, old, new, change);
}
bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
diff --git a/arch/powerpc/kvm/book3s_32_mmu.c b/arch/powerpc/kvm/book3s_32_mmu.c
index 3fbd570f9c1e..0215f32932a9 100644
--- a/arch/powerpc/kvm/book3s_32_mmu.c
+++ b/arch/powerpc/kvm/book3s_32_mmu.c
@@ -337,7 +337,7 @@ static void kvmppc_mmu_book3s_32_mtsrin(struct kvm_vcpu *vcpu, u32 srnum,
static void kvmppc_mmu_book3s_32_tlbie(struct kvm_vcpu *vcpu, ulong ea, bool large)
{
- int i;
+ unsigned long i;
struct kvm_vcpu *v;
/* flush this VA on all cpus */
diff --git a/arch/powerpc/kvm/book3s_64_entry.S b/arch/powerpc/kvm/book3s_64_entry.S
index 983b8c18bc31..05e003eb5d90 100644
--- a/arch/powerpc/kvm/book3s_64_entry.S
+++ b/arch/powerpc/kvm/book3s_64_entry.S
@@ -374,11 +374,16 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX)
BEGIN_FTR_SECTION
mtspr SPRN_DAWRX1,r10
END_FTR_SECTION_IFSET(CPU_FTR_DAWR1)
- mtspr SPRN_PID,r10
/*
- * Switch to host MMU mode
+ * Switch to host MMU mode (don't have the real host PID but we aren't
+ * going back to userspace).
*/
+ hwsync
+ isync
+
+ mtspr SPRN_PID,r10
+
ld r10, HSTATE_KVM_VCPU(r13)
ld r10, VCPU_KVM(r10)
lwz r10, KVM_HOST_LPID(r10)
@@ -389,6 +394,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_DAWR1)
ld r10, KVM_HOST_LPCR(r10)
mtspr SPRN_LPCR,r10
+ isync
+
/*
* Set GUEST_MODE_NONE so the handler won't branch to KVM, and clear
* MSR_RI in r12 ([H]SRR1) so the handler won't try to return.
diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c
index feee40cb2ba1..61290282fd9e 100644
--- a/arch/powerpc/kvm/book3s_64_mmu.c
+++ b/arch/powerpc/kvm/book3s_64_mmu.c
@@ -530,7 +530,7 @@ static void kvmppc_mmu_book3s_64_tlbie(struct kvm_vcpu *vcpu, ulong va,
bool large)
{
u64 mask = 0xFFFFFFFFFULL;
- long i;
+ unsigned long i;
struct kvm_vcpu *v;
dprintk("KVM MMU: tlbie(0x%lx)\n", va);
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index c63e263312a4..213232914367 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -734,11 +734,11 @@ void kvmppc_rmap_reset(struct kvm *kvm)
{
struct kvm_memslots *slots;
struct kvm_memory_slot *memslot;
- int srcu_idx;
+ int srcu_idx, bkt;
srcu_idx = srcu_read_lock(&kvm->srcu);
slots = kvm_memslots(kvm);
- kvm_for_each_memslot(memslot, slots) {
+ kvm_for_each_memslot(memslot, bkt, slots) {
/* Mutual exclusion with kvm_unmap_hva_range etc. */
spin_lock(&kvm->mmu_lock);
/*
diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c
index 16359525a40f..8cebe5542256 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_radix.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c
@@ -57,6 +57,8 @@ unsigned long __kvmhv_copy_tofrom_guest_radix(int lpid, int pid,
preempt_disable();
+ asm volatile("hwsync" ::: "memory");
+ isync();
/* switch the lpid first to avoid running host with unallocated pid */
old_lpid = mfspr(SPRN_LPID);
if (old_lpid != lpid)
@@ -75,6 +77,8 @@ unsigned long __kvmhv_copy_tofrom_guest_radix(int lpid, int pid,
ret = __copy_to_user_inatomic((void __user *)to, from, n);
pagefault_enable();
+ asm volatile("hwsync" ::: "memory");
+ isync();
/* switch the pid first to avoid running host with unallocated pid */
if (quadrant == 1 && pid != old_pid)
mtspr(SPRN_PID, old_pid);
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 7b74fc0a986b..84c89f08ae9a 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -80,6 +80,7 @@
#include <asm/plpar_wrappers.h>
#include "book3s.h"
+#include "book3s_hv.h"
#define CREATE_TRACE_POINTS
#include "trace_hv.h"
@@ -127,11 +128,6 @@ static bool nested = true;
module_param(nested, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(nested, "Enable nested virtualization (only on POWER9)");
-static inline bool nesting_enabled(struct kvm *kvm)
-{
- return kvm->arch.nested_enable && kvm_is_radix(kvm);
-}
-
static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu);
/*
@@ -276,22 +272,26 @@ static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu)
* they should never fail.)
*/
-static void kvmppc_core_start_stolen(struct kvmppc_vcore *vc)
+static void kvmppc_core_start_stolen(struct kvmppc_vcore *vc, u64 tb)
{
unsigned long flags;
+ WARN_ON_ONCE(cpu_has_feature(CPU_FTR_ARCH_300));
+
spin_lock_irqsave(&vc->stoltb_lock, flags);
- vc->preempt_tb = mftb();
+ vc->preempt_tb = tb;
spin_unlock_irqrestore(&vc->stoltb_lock, flags);
}
-static void kvmppc_core_end_stolen(struct kvmppc_vcore *vc)
+static void kvmppc_core_end_stolen(struct kvmppc_vcore *vc, u64 tb)
{
unsigned long flags;
+ WARN_ON_ONCE(cpu_has_feature(CPU_FTR_ARCH_300));
+
spin_lock_irqsave(&vc->stoltb_lock, flags);
if (vc->preempt_tb != TB_NIL) {
- vc->stolen_tb += mftb() - vc->preempt_tb;
+ vc->stolen_tb += tb - vc->preempt_tb;
vc->preempt_tb = TB_NIL;
}
spin_unlock_irqrestore(&vc->stoltb_lock, flags);
@@ -301,6 +301,12 @@ static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu)
{
struct kvmppc_vcore *vc = vcpu->arch.vcore;
unsigned long flags;
+ u64 now;
+
+ if (cpu_has_feature(CPU_FTR_ARCH_300))
+ return;
+
+ now = mftb();
/*
* We can test vc->runner without taking the vcore lock,
@@ -309,12 +315,12 @@ static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu)
* ever sets it to NULL.
*/
if (vc->runner == vcpu && vc->vcore_state >= VCORE_SLEEPING)
- kvmppc_core_end_stolen(vc);
+ kvmppc_core_end_stolen(vc, now);
spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST &&
vcpu->arch.busy_preempt != TB_NIL) {
- vcpu->arch.busy_stolen += mftb() - vcpu->arch.busy_preempt;
+ vcpu->arch.busy_stolen += now - vcpu->arch.busy_preempt;
vcpu->arch.busy_preempt = TB_NIL;
}
spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
@@ -324,13 +330,19 @@ static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu)
{
struct kvmppc_vcore *vc = vcpu->arch.vcore;
unsigned long flags;
+ u64 now;
+
+ if (cpu_has_feature(CPU_FTR_ARCH_300))
+ return;
+
+ now = mftb();
if (vc->runner == vcpu && vc->vcore_state >= VCORE_SLEEPING)
- kvmppc_core_start_stolen(vc);
+ kvmppc_core_start_stolen(vc, now);
spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST)
- vcpu->arch.busy_preempt = mftb();
+ vcpu->arch.busy_preempt = now;
spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
}
@@ -675,6 +687,8 @@ static u64 vcore_stolen_time(struct kvmppc_vcore *vc, u64 now)
u64 p;
unsigned long flags;
+ WARN_ON_ONCE(cpu_has_feature(CPU_FTR_ARCH_300));
+
spin_lock_irqsave(&vc->stoltb_lock, flags);
p = vc->stolen_tb;
if (vc->vcore_state != VCORE_INACTIVE &&
@@ -684,35 +698,30 @@ static u64 vcore_stolen_time(struct kvmppc_vcore *vc, u64 now)
return p;
}
-static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu,
- struct kvmppc_vcore *vc)
+static void __kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu,
+ unsigned int pcpu, u64 now,
+ unsigned long stolen)
{
struct dtl_entry *dt;
struct lppaca *vpa;
- unsigned long stolen;
- unsigned long core_stolen;
- u64 now;
- unsigned long flags;
dt = vcpu->arch.dtl_ptr;
vpa = vcpu->arch.vpa.pinned_addr;
- now = mftb();
- core_stolen = vcore_stolen_time(vc, now);
- stolen = core_stolen - vcpu->arch.stolen_logged;
- vcpu->arch.stolen_logged = core_stolen;
- spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
- stolen += vcpu->arch.busy_stolen;
- vcpu->arch.busy_stolen = 0;
- spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
+
if (!dt || !vpa)
return;
- memset(dt, 0, sizeof(struct dtl_entry));
+
dt->dispatch_reason = 7;
- dt->processor_id = cpu_to_be16(vc->pcpu + vcpu->arch.ptid);
- dt->timebase = cpu_to_be64(now + vc->tb_offset);
+ dt->preempt_reason = 0;
+ dt->processor_id = cpu_to_be16(pcpu + vcpu->arch.ptid);
dt->enqueue_to_dispatch_time = cpu_to_be32(stolen);
+ dt->ready_to_enqueue_time = 0;
+ dt->waiting_to_ready_time = 0;
+ dt->timebase = cpu_to_be64(now);
+ dt->fault_addr = 0;
dt->srr0 = cpu_to_be64(kvmppc_get_pc(vcpu));
dt->srr1 = cpu_to_be64(vcpu->arch.shregs.msr);
+
++dt;
if (dt == vcpu->arch.dtl.pinned_end)
dt = vcpu->arch.dtl.pinned_addr;
@@ -723,6 +732,27 @@ static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu,
vcpu->arch.dtl.dirty = true;
}
+static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu,
+ struct kvmppc_vcore *vc)
+{
+ unsigned long stolen;
+ unsigned long core_stolen;
+ u64 now;
+ unsigned long flags;
+
+ now = mftb();
+
+ core_stolen = vcore_stolen_time(vc, now);
+ stolen = core_stolen - vcpu->arch.stolen_logged;
+ vcpu->arch.stolen_logged = core_stolen;
+ spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
+ stolen += vcpu->arch.busy_stolen;
+ vcpu->arch.busy_stolen = 0;
+ spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
+
+ __kvmppc_create_dtl_entry(vcpu, vc->pcpu, now + vc->tb_offset, stolen);
+}
+
/* See if there is a doorbell interrupt pending for a vcpu */
static bool kvmppc_doorbell_pending(struct kvm_vcpu *vcpu)
{
@@ -731,6 +761,8 @@ static bool kvmppc_doorbell_pending(struct kvm_vcpu *vcpu)
if (vcpu->arch.doorbell_request)
return true;
+ if (cpu_has_feature(CPU_FTR_ARCH_300))
+ return false;
/*
* Ensure that the read of vcore->dpdes comes after the read
* of vcpu->doorbell_request. This barrier matches the
@@ -900,13 +932,14 @@ static int kvm_arch_vcpu_yield_to(struct kvm_vcpu *target)
* mode handler is not called but no other threads are in the
* source vcore.
*/
-
- spin_lock(&vcore->lock);
- if (target->arch.state == KVMPPC_VCPU_RUNNABLE &&
- vcore->vcore_state != VCORE_INACTIVE &&
- vcore->runner)
- target = vcore->runner;
- spin_unlock(&vcore->lock);
+ if (!cpu_has_feature(CPU_FTR_ARCH_300)) {
+ spin_lock(&vcore->lock);
+ if (target->arch.state == KVMPPC_VCPU_RUNNABLE &&
+ vcore->vcore_state != VCORE_INACTIVE &&
+ vcore->runner)
+ target = vcore->runner;
+ spin_unlock(&vcore->lock);
+ }
return kvm_vcpu_yield_to(target);
}
@@ -1421,6 +1454,43 @@ static int kvmppc_emulate_doorbell_instr(struct kvm_vcpu *vcpu)
return RESUME_GUEST;
}
+/*
+ * If the lppaca had pmcregs_in_use clear when we exited the guest, then
+ * HFSCR_PM is cleared for next entry. If the guest then tries to access
+ * the PMU SPRs, we get this facility unavailable interrupt. Putting HFSCR_PM
+ * back in the guest HFSCR will cause the next entry to load the PMU SPRs and
+ * allow the guest access to continue.
+ */
+static int kvmppc_pmu_unavailable(struct kvm_vcpu *vcpu)
+{
+ if (!(vcpu->arch.hfscr_permitted & HFSCR_PM))
+ return EMULATE_FAIL;
+
+ vcpu->arch.hfscr |= HFSCR_PM;
+
+ return RESUME_GUEST;
+}
+
+static int kvmppc_ebb_unavailable(struct kvm_vcpu *vcpu)
+{
+ if (!(vcpu->arch.hfscr_permitted & HFSCR_EBB))
+ return EMULATE_FAIL;
+
+ vcpu->arch.hfscr |= HFSCR_EBB;
+
+ return RESUME_GUEST;
+}
+
+static int kvmppc_tm_unavailable(struct kvm_vcpu *vcpu)
+{
+ if (!(vcpu->arch.hfscr_permitted & HFSCR_TM))
+ return EMULATE_FAIL;
+
+ vcpu->arch.hfscr |= HFSCR_TM;
+
+ return RESUME_GUEST;
+}
+
static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
struct task_struct *tsk)
{
@@ -1451,6 +1521,10 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
run->ready_for_interrupt_injection = 1;
switch (vcpu->arch.trap) {
/* We're good on these - the host merely wanted to get our attention */
+ case BOOK3S_INTERRUPT_NESTED_HV_DECREMENTER:
+ WARN_ON_ONCE(1); /* Should never happen */
+ vcpu->arch.trap = BOOK3S_INTERRUPT_HV_DECREMENTER;
+ fallthrough;
case BOOK3S_INTERRUPT_HV_DECREMENTER:
vcpu->stat.dec_exits++;
r = RESUME_GUEST;
@@ -1575,7 +1649,8 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
unsigned long vsid;
long err;
- if (vcpu->arch.fault_dsisr == HDSISR_CANARY) {
+ if (cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG) &&
+ unlikely(vcpu->arch.fault_dsisr == HDSISR_CANARY)) {
r = RESUME_GUEST; /* Just retry if it's the canary */
break;
}
@@ -1702,16 +1777,26 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
* to emulate.
* Otherwise, we just generate a program interrupt to the guest.
*/
- case BOOK3S_INTERRUPT_H_FAC_UNAVAIL:
+ case BOOK3S_INTERRUPT_H_FAC_UNAVAIL: {
+ u64 cause = vcpu->arch.hfscr >> 56;
+
r = EMULATE_FAIL;
- if (((vcpu->arch.hfscr >> 56) == FSCR_MSGP_LG) &&
- cpu_has_feature(CPU_FTR_ARCH_300))
- r = kvmppc_emulate_doorbell_instr(vcpu);
+ if (cpu_has_feature(CPU_FTR_ARCH_300)) {
+ if (cause == FSCR_MSGP_LG)
+ r = kvmppc_emulate_doorbell_instr(vcpu);
+ if (cause == FSCR_PM_LG)
+ r = kvmppc_pmu_unavailable(vcpu);
+ if (cause == FSCR_EBB_LG)
+ r = kvmppc_ebb_unavailable(vcpu);
+ if (cause == FSCR_TM_LG)
+ r = kvmppc_tm_unavailable(vcpu);
+ }
if (r == EMULATE_FAIL) {
kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
r = RESUME_GUEST;
}
break;
+ }
case BOOK3S_INTERRUPT_HV_RM_HARD:
r = RESUME_PASSTHROUGH;
@@ -1731,7 +1816,6 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
static int kvmppc_handle_nested_exit(struct kvm_vcpu *vcpu)
{
- struct kvm_nested_guest *nested = vcpu->arch.nested;
int r;
int srcu_idx;
@@ -1768,6 +1852,12 @@ static int kvmppc_handle_nested_exit(struct kvm_vcpu *vcpu)
vcpu->stat.ext_intr_exits++;
r = RESUME_GUEST;
break;
+ /* These need to go to the nested HV */
+ case BOOK3S_INTERRUPT_NESTED_HV_DECREMENTER:
+ vcpu->arch.trap = BOOK3S_INTERRUPT_HV_DECREMENTER;
+ vcpu->stat.dec_exits++;
+ r = RESUME_HOST;
+ break;
/* SR/HMI/PMI are HV interrupts that host has handled. Resume guest.*/
case BOOK3S_INTERRUPT_HMI:
case BOOK3S_INTERRUPT_PERFMON:
@@ -1831,7 +1921,7 @@ static int kvmppc_handle_nested_exit(struct kvm_vcpu *vcpu)
* it into a HEAI.
*/
if (!(vcpu->arch.hfscr_permitted & (1UL << cause)) ||
- (nested->hfscr & (1UL << cause))) {
+ (vcpu->arch.nested_hfscr & (1UL << cause))) {
vcpu->arch.trap = BOOK3S_INTERRUPT_H_EMUL_ASSIST;
/*
@@ -1993,7 +2083,7 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
*/
if ((new_lpcr & LPCR_ILE) != (vc->lpcr & LPCR_ILE)) {
struct kvm_vcpu *vcpu;
- int i;
+ unsigned long i;
kvm_for_each_vcpu(i, vcpu, kvm) {
if (vcpu->arch.vcore != vc)
@@ -2096,8 +2186,10 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
* either vcore->dpdes or doorbell_request.
* On POWER8, doorbell_request is 0.
*/
- *val = get_reg_val(id, vcpu->arch.vcore->dpdes |
- vcpu->arch.doorbell_request);
+ if (cpu_has_feature(CPU_FTR_ARCH_300))
+ *val = get_reg_val(id, vcpu->arch.doorbell_request);
+ else
+ *val = get_reg_val(id, vcpu->arch.vcore->dpdes);
break;
case KVM_REG_PPC_VTB:
*val = get_reg_val(id, vcpu->arch.vcore->vtb);
@@ -2238,8 +2330,7 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
*val = get_reg_val(id, vcpu->arch.vcore->arch_compat);
break;
case KVM_REG_PPC_DEC_EXPIRY:
- *val = get_reg_val(id, vcpu->arch.dec_expires +
- vcpu->arch.vcore->tb_offset);
+ *val = get_reg_val(id, vcpu->arch.dec_expires);
break;
case KVM_REG_PPC_ONLINE:
*val = get_reg_val(id, vcpu->arch.online);
@@ -2335,7 +2426,10 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
vcpu->arch.pspb = set_reg_val(id, *val);
break;
case KVM_REG_PPC_DPDES:
- vcpu->arch.vcore->dpdes = set_reg_val(id, *val);
+ if (cpu_has_feature(CPU_FTR_ARCH_300))
+ vcpu->arch.doorbell_request = set_reg_val(id, *val) & 1;
+ else
+ vcpu->arch.vcore->dpdes = set_reg_val(id, *val);
break;
case KVM_REG_PPC_VTB:
vcpu->arch.vcore->vtb = set_reg_val(id, *val);
@@ -2491,8 +2585,7 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
r = kvmppc_set_arch_compat(vcpu, set_reg_val(id, *val));
break;
case KVM_REG_PPC_DEC_EXPIRY:
- vcpu->arch.dec_expires = set_reg_val(id, *val) -
- vcpu->arch.vcore->tb_offset;
+ vcpu->arch.dec_expires = set_reg_val(id, *val);
break;
case KVM_REG_PPC_ONLINE:
i = set_reg_val(id, *val);
@@ -2715,6 +2808,11 @@ static int kvmppc_core_vcpu_create_hv(struct kvm_vcpu *vcpu)
#endif
#endif
vcpu->arch.mmcr[0] = MMCR0_FC;
+ if (cpu_has_feature(CPU_FTR_ARCH_31)) {
+ vcpu->arch.mmcr[0] |= MMCR0_PMCCEXT;
+ vcpu->arch.mmcra = MMCRA_BHRB_DISABLE;
+ }
+
vcpu->arch.ctrl = CTRL_RUNLATCH;
/* default to host PVR, since we can't spoof it */
kvmppc_set_pvr_hv(vcpu, mfspr(SPRN_PVR));
@@ -2745,6 +2843,11 @@ static int kvmppc_core_vcpu_create_hv(struct kvm_vcpu *vcpu)
vcpu->arch.hfscr_permitted = vcpu->arch.hfscr;
+ /*
+ * PM, EBB, TM are demand-faulted so start with it clear.
+ */
+ vcpu->arch.hfscr &= ~(HFSCR_PM | HFSCR_EBB | HFSCR_TM);
+
kvmppc_mmu_book3s_hv_init(vcpu);
vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
@@ -2869,13 +2972,13 @@ static void kvmppc_set_timer(struct kvm_vcpu *vcpu)
unsigned long dec_nsec, now;
now = get_tb();
- if (now > vcpu->arch.dec_expires) {
+ if (now > kvmppc_dec_expires_host_tb(vcpu)) {
/* decrementer has already gone negative */
kvmppc_core_queue_dec(vcpu);
kvmppc_core_prepare_to_enter(vcpu);
return;
}
- dec_nsec = tb_to_ns(vcpu->arch.dec_expires - now);
+ dec_nsec = tb_to_ns(kvmppc_dec_expires_host_tb(vcpu) - now);
hrtimer_start(&vcpu->arch.dec_timer, dec_nsec, HRTIMER_MODE_REL);
vcpu->arch.timer_running = 1;
}
@@ -2883,14 +2986,14 @@ static void kvmppc_set_timer(struct kvm_vcpu *vcpu)
extern int __kvmppc_vcore_entry(void);
static void kvmppc_remove_runnable(struct kvmppc_vcore *vc,
- struct kvm_vcpu *vcpu)
+ struct kvm_vcpu *vcpu, u64 tb)
{
u64 now;
if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE)
return;
spin_lock_irq(&vcpu->arch.tbacct_lock);
- now = mftb();
+ now = tb;
vcpu->arch.busy_stolen += vcore_stolen_time(vc, now) -
vcpu->arch.stolen_logged;
vcpu->arch.busy_preempt = now;
@@ -2945,30 +3048,59 @@ static void kvmppc_release_hwthread(int cpu)
tpaca->kvm_hstate.kvm_split_mode = NULL;
}
+static DEFINE_PER_CPU(struct kvm *, cpu_in_guest);
+
static void radix_flush_cpu(struct kvm *kvm, int cpu, struct kvm_vcpu *vcpu)
{
struct kvm_nested_guest *nested = vcpu->arch.nested;
- cpumask_t *cpu_in_guest;
+ cpumask_t *need_tlb_flush;
int i;
+ if (nested)
+ need_tlb_flush = &nested->need_tlb_flush;
+ else
+ need_tlb_flush = &kvm->arch.need_tlb_flush;
+
cpu = cpu_first_tlb_thread_sibling(cpu);
- if (nested) {
- cpumask_set_cpu(cpu, &nested->need_tlb_flush);
- cpu_in_guest = &nested->cpu_in_guest;
- } else {
- cpumask_set_cpu(cpu, &kvm->arch.need_tlb_flush);
- cpu_in_guest = &kvm->arch.cpu_in_guest;
- }
+ for (i = cpu; i <= cpu_last_tlb_thread_sibling(cpu);
+ i += cpu_tlb_thread_sibling_step())
+ cpumask_set_cpu(i, need_tlb_flush);
+
/*
- * Make sure setting of bit in need_tlb_flush precedes
- * testing of cpu_in_guest bits. The matching barrier on
- * the other side is the first smp_mb() in kvmppc_run_core().
+ * Make sure setting of bit in need_tlb_flush precedes testing of
+ * cpu_in_guest. The matching barrier on the other side is hwsync
+ * when switching to guest MMU mode, which happens between
+ * cpu_in_guest being set to the guest kvm, and need_tlb_flush bit
+ * being tested.
*/
smp_mb();
+
for (i = cpu; i <= cpu_last_tlb_thread_sibling(cpu);
- i += cpu_tlb_thread_sibling_step())
- if (cpumask_test_cpu(i, cpu_in_guest))
+ i += cpu_tlb_thread_sibling_step()) {
+ struct kvm *running = *per_cpu_ptr(&cpu_in_guest, i);
+
+ if (running == kvm)
smp_call_function_single(i, do_nothing, NULL, 1);
+ }
+}
+
+static void do_migrate_away_vcpu(void *arg)
+{
+ struct kvm_vcpu *vcpu = arg;
+ struct kvm *kvm = vcpu->kvm;
+
+ /*
+ * If the guest has GTSE, it may execute tlbie, so do a eieio; tlbsync;
+ * ptesync sequence on the old CPU before migrating to a new one, in
+ * case we interrupted the guest between a tlbie ; eieio ;
+ * tlbsync; ptesync sequence.
+ *
+ * Otherwise, ptesync is sufficient for ordering tlbiel sequences.
+ */
+ if (kvm->arch.lpcr & LPCR_GTSE)
+ asm volatile("eieio; tlbsync; ptesync");
+ else
+ asm volatile("ptesync");
}
static void kvmppc_prepare_radix_vcpu(struct kvm_vcpu *vcpu, int pcpu)
@@ -2994,14 +3126,17 @@ static void kvmppc_prepare_radix_vcpu(struct kvm_vcpu *vcpu, int pcpu)
* can move around between pcpus. To cope with this, when
* a vcpu moves from one pcpu to another, we need to tell
* any vcpus running on the same core as this vcpu previously
- * ran to flush the TLB. The TLB is shared between threads,
- * so we use a single bit in .need_tlb_flush for all 4 threads.
+ * ran to flush the TLB.
*/
if (prev_cpu != pcpu) {
- if (prev_cpu >= 0 &&
- cpu_first_tlb_thread_sibling(prev_cpu) !=
- cpu_first_tlb_thread_sibling(pcpu))
- radix_flush_cpu(kvm, prev_cpu, vcpu);
+ if (prev_cpu >= 0) {
+ if (cpu_first_tlb_thread_sibling(prev_cpu) !=
+ cpu_first_tlb_thread_sibling(pcpu))
+ radix_flush_cpu(kvm, prev_cpu, vcpu);
+
+ smp_call_function_single(prev_cpu,
+ do_migrate_away_vcpu, vcpu, 1);
+ }
if (nested)
nested->prev_cpu[vcpu->arch.nested_vcpu_id] = pcpu;
else
@@ -3013,7 +3148,6 @@ static void kvmppc_start_thread(struct kvm_vcpu *vcpu, struct kvmppc_vcore *vc)
{
int cpu;
struct paca_struct *tpaca;
- struct kvm *kvm = vc->kvm;
cpu = vc->pcpu;
if (vcpu) {
@@ -3024,7 +3158,6 @@ static void kvmppc_start_thread(struct kvm_vcpu *vcpu, struct kvmppc_vcore *vc)
cpu += vcpu->arch.ptid;
vcpu->cpu = vc->pcpu;
vcpu->arch.thread_cpu = cpu;
- cpumask_set_cpu(cpu, &kvm->arch.cpu_in_guest);
}
tpaca = paca_ptrs[cpu];
tpaca->kvm_hstate.kvm_vcpu = vcpu;
@@ -3125,6 +3258,8 @@ static void kvmppc_vcore_preempt(struct kvmppc_vcore *vc)
{
struct preempted_vcore_list *lp = this_cpu_ptr(&preempted_vcores);
+ WARN_ON_ONCE(cpu_has_feature(CPU_FTR_ARCH_300));
+
vc->vcore_state = VCORE_PREEMPT;
vc->pcpu = smp_processor_id();
if (vc->num_threads < threads_per_vcore(vc->kvm)) {
@@ -3134,14 +3269,16 @@ static void kvmppc_vcore_preempt(struct kvmppc_vcore *vc)
}
/* Start accumulating stolen time */
- kvmppc_core_start_stolen(vc);
+ kvmppc_core_start_stolen(vc, mftb());
}
static void kvmppc_vcore_end_preempt(struct kvmppc_vcore *vc)
{
struct preempted_vcore_list *lp;
- kvmppc_core_end_stolen(vc);
+ WARN_ON_ONCE(cpu_has_feature(CPU_FTR_ARCH_300));
+
+ kvmppc_core_end_stolen(vc, mftb());
if (!list_empty(&vc->preempt_list)) {
lp = &per_cpu(preempted_vcores, vc->pcpu);
spin_lock(&lp->lock);
@@ -3268,7 +3405,7 @@ static void prepare_threads(struct kvmppc_vcore *vc)
vcpu->arch.ret = RESUME_GUEST;
else
continue;
- kvmppc_remove_runnable(vc, vcpu);
+ kvmppc_remove_runnable(vc, vcpu, mftb());
wake_up(&vcpu->arch.cpu_run);
}
}
@@ -3287,7 +3424,7 @@ static void collect_piggybacks(struct core_info *cip, int target_threads)
list_del_init(&pvc->preempt_list);
if (pvc->runner == NULL) {
pvc->vcore_state = VCORE_INACTIVE;
- kvmppc_core_end_stolen(pvc);
+ kvmppc_core_end_stolen(pvc, mftb());
}
spin_unlock(&pvc->lock);
continue;
@@ -3296,7 +3433,7 @@ static void collect_piggybacks(struct core_info *cip, int target_threads)
spin_unlock(&pvc->lock);
continue;
}
- kvmppc_core_end_stolen(pvc);
+ kvmppc_core_end_stolen(pvc, mftb());
pvc->vcore_state = VCORE_PIGGYBACK;
if (cip->total_threads >= target_threads)
break;
@@ -3340,7 +3477,7 @@ static void post_guest_process(struct kvmppc_vcore *vc, bool is_master)
*/
spin_unlock(&vc->lock);
/* cancel pending dec exception if dec is positive */
- if (now < vcpu->arch.dec_expires &&
+ if (now < kvmppc_dec_expires_host_tb(vcpu) &&
kvmppc_core_pending_dec(vcpu))
kvmppc_core_dequeue_dec(vcpu);
@@ -3363,7 +3500,7 @@ static void post_guest_process(struct kvmppc_vcore *vc, bool is_master)
else
++still_running;
} else {
- kvmppc_remove_runnable(vc, vcpu);
+ kvmppc_remove_runnable(vc, vcpu, mftb());
wake_up(&vcpu->arch.cpu_run);
}
}
@@ -3372,7 +3509,7 @@ static void post_guest_process(struct kvmppc_vcore *vc, bool is_master)
kvmppc_vcore_preempt(vc);
} else if (vc->runner) {
vc->vcore_state = VCORE_PREEMPT;
- kvmppc_core_start_stolen(vc);
+ kvmppc_core_start_stolen(vc, mftb());
} else {
vc->vcore_state = VCORE_INACTIVE;
}
@@ -3503,7 +3640,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
((vc->num_threads > threads_per_subcore) || !on_primary_thread())) {
for_each_runnable_thread(i, vcpu, vc) {
vcpu->arch.ret = -EBUSY;
- kvmppc_remove_runnable(vc, vcpu);
+ kvmppc_remove_runnable(vc, vcpu, mftb());
wake_up(&vcpu->arch.cpu_run);
}
goto out;
@@ -3748,7 +3885,6 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
kvmppc_release_hwthread(pcpu + i);
if (sip && sip->napped[i])
kvmppc_ipi_thread(pcpu + i);
- cpumask_clear_cpu(pcpu + i, &vc->kvm->arch.cpu_in_guest);
}
spin_unlock(&vc->lock);
@@ -3770,211 +3906,137 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
trace_kvmppc_run_core(vc, 1);
}
-static void load_spr_state(struct kvm_vcpu *vcpu)
-{
- mtspr(SPRN_DSCR, vcpu->arch.dscr);
- mtspr(SPRN_IAMR, vcpu->arch.iamr);
- mtspr(SPRN_PSPB, vcpu->arch.pspb);
- mtspr(SPRN_FSCR, vcpu->arch.fscr);
- mtspr(SPRN_TAR, vcpu->arch.tar);
- mtspr(SPRN_EBBHR, vcpu->arch.ebbhr);
- mtspr(SPRN_EBBRR, vcpu->arch.ebbrr);
- mtspr(SPRN_BESCR, vcpu->arch.bescr);
- mtspr(SPRN_TIDR, vcpu->arch.tid);
- mtspr(SPRN_AMR, vcpu->arch.amr);
- mtspr(SPRN_UAMOR, vcpu->arch.uamor);
-
- /*
- * DAR, DSISR, and for nested HV, SPRGs must be set with MSR[RI]
- * clear (or hstate set appropriately to catch those registers
- * being clobbered if we take a MCE or SRESET), so those are done
- * later.
- */
-
- if (!(vcpu->arch.ctrl & 1))
- mtspr(SPRN_CTRLT, mfspr(SPRN_CTRLF) & ~1);
-}
-
-static void store_spr_state(struct kvm_vcpu *vcpu)
-{
- vcpu->arch.ctrl = mfspr(SPRN_CTRLF);
-
- vcpu->arch.iamr = mfspr(SPRN_IAMR);
- vcpu->arch.pspb = mfspr(SPRN_PSPB);
- vcpu->arch.fscr = mfspr(SPRN_FSCR);
- vcpu->arch.tar = mfspr(SPRN_TAR);
- vcpu->arch.ebbhr = mfspr(SPRN_EBBHR);
- vcpu->arch.ebbrr = mfspr(SPRN_EBBRR);
- vcpu->arch.bescr = mfspr(SPRN_BESCR);
- vcpu->arch.tid = mfspr(SPRN_TIDR);
- vcpu->arch.amr = mfspr(SPRN_AMR);
- vcpu->arch.uamor = mfspr(SPRN_UAMOR);
- vcpu->arch.dscr = mfspr(SPRN_DSCR);
-}
-
-/*
- * Privileged (non-hypervisor) host registers to save.
- */
-struct p9_host_os_sprs {
- unsigned long dscr;
- unsigned long tidr;
- unsigned long iamr;
- unsigned long amr;
- unsigned long fscr;
-};
-
-static void save_p9_host_os_sprs(struct p9_host_os_sprs *host_os_sprs)
-{
- host_os_sprs->dscr = mfspr(SPRN_DSCR);
- host_os_sprs->tidr = mfspr(SPRN_TIDR);
- host_os_sprs->iamr = mfspr(SPRN_IAMR);
- host_os_sprs->amr = mfspr(SPRN_AMR);
- host_os_sprs->fscr = mfspr(SPRN_FSCR);
-}
-
-/* vcpu guest regs must already be saved */
-static void restore_p9_host_os_sprs(struct kvm_vcpu *vcpu,
- struct p9_host_os_sprs *host_os_sprs)
-{
- mtspr(SPRN_PSPB, 0);
- mtspr(SPRN_UAMOR, 0);
-
- mtspr(SPRN_DSCR, host_os_sprs->dscr);
- mtspr(SPRN_TIDR, host_os_sprs->tidr);
- mtspr(SPRN_IAMR, host_os_sprs->iamr);
-
- if (host_os_sprs->amr != vcpu->arch.amr)
- mtspr(SPRN_AMR, host_os_sprs->amr);
-
- if (host_os_sprs->fscr != vcpu->arch.fscr)
- mtspr(SPRN_FSCR, host_os_sprs->fscr);
-
- /* Save guest CTRL register, set runlatch to 1 */
- if (!(vcpu->arch.ctrl & 1))
- mtspr(SPRN_CTRLT, 1);
-}
-
static inline bool hcall_is_xics(unsigned long req)
{
return req == H_EOI || req == H_CPPR || req == H_IPI ||
req == H_IPOLL || req == H_XIRR || req == H_XIRR_X;
}
-/*
- * Guest entry for POWER9 and later CPUs.
- */
-static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
- unsigned long lpcr)
+static void vcpu_vpa_increment_dispatch(struct kvm_vcpu *vcpu)
+{
+ struct lppaca *lp = vcpu->arch.vpa.pinned_addr;
+ if (lp) {
+ u32 yield_count = be32_to_cpu(lp->yield_count) + 1;
+ lp->yield_count = cpu_to_be32(yield_count);
+ vcpu->arch.vpa.dirty = 1;
+ }
+}
+
+/* call our hypervisor to load up HV regs and go */
+static int kvmhv_vcpu_entry_p9_nested(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpcr, u64 *tb)
{
struct kvmppc_vcore *vc = vcpu->arch.vcore;
+ unsigned long host_psscr;
+ unsigned long msr;
+ struct hv_guest_state hvregs;
struct p9_host_os_sprs host_os_sprs;
s64 dec;
- u64 tb;
- int trap, save_pmu;
-
- WARN_ON_ONCE(vcpu->arch.ceded);
+ int trap;
- dec = mfspr(SPRN_DEC);
- tb = mftb();
- if (dec < 0)
- return BOOK3S_INTERRUPT_HV_DECREMENTER;
- local_paca->kvm_hstate.dec_expires = dec + tb;
- if (local_paca->kvm_hstate.dec_expires < time_limit)
- time_limit = local_paca->kvm_hstate.dec_expires;
+ msr = mfmsr();
save_p9_host_os_sprs(&host_os_sprs);
- kvmhv_save_host_pmu(); /* saves it to PACA kvm_hstate */
-
- kvmppc_subcore_enter_guest();
+ /*
+ * We need to save and restore the guest visible part of the
+ * psscr (i.e. using SPRN_PSSCR_PR) since the hypervisor
+ * doesn't do this for us. Note only required if pseries since
+ * this is done in kvmhv_vcpu_entry_p9() below otherwise.
+ */
+ host_psscr = mfspr(SPRN_PSSCR_PR);
- vc->entry_exit_map = 1;
- vc->in_guest = 1;
+ kvmppc_msr_hard_disable_set_facilities(vcpu, msr);
+ if (lazy_irq_pending())
+ return 0;
- if (vcpu->arch.vpa.pinned_addr) {
- struct lppaca *lp = vcpu->arch.vpa.pinned_addr;
- u32 yield_count = be32_to_cpu(lp->yield_count) + 1;
- lp->yield_count = cpu_to_be32(yield_count);
- vcpu->arch.vpa.dirty = 1;
- }
+ if (unlikely(load_vcpu_state(vcpu, &host_os_sprs)))
+ msr = mfmsr(); /* TM restore can update msr */
- if (cpu_has_feature(CPU_FTR_TM) ||
- cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST))
- kvmppc_restore_tm_hv(vcpu, vcpu->arch.shregs.msr, true);
+ if (vcpu->arch.psscr != host_psscr)
+ mtspr(SPRN_PSSCR_PR, vcpu->arch.psscr);
-#ifdef CONFIG_PPC_PSERIES
- if (kvmhv_on_pseries()) {
- barrier();
- if (vcpu->arch.vpa.pinned_addr) {
- struct lppaca *lp = vcpu->arch.vpa.pinned_addr;
- get_lppaca()->pmcregs_in_use = lp->pmcregs_in_use;
- } else {
- get_lppaca()->pmcregs_in_use = 1;
- }
- barrier();
+ kvmhv_save_hv_regs(vcpu, &hvregs);
+ hvregs.lpcr = lpcr;
+ vcpu->arch.regs.msr = vcpu->arch.shregs.msr;
+ hvregs.version = HV_GUEST_STATE_VERSION;
+ if (vcpu->arch.nested) {
+ hvregs.lpid = vcpu->arch.nested->shadow_lpid;
+ hvregs.vcpu_token = vcpu->arch.nested_vcpu_id;
+ } else {
+ hvregs.lpid = vcpu->kvm->arch.lpid;
+ hvregs.vcpu_token = vcpu->vcpu_id;
}
-#endif
- kvmhv_load_guest_pmu(vcpu);
-
- msr_check_and_set(MSR_FP | MSR_VEC | MSR_VSX);
- load_fp_state(&vcpu->arch.fp);
-#ifdef CONFIG_ALTIVEC
- load_vr_state(&vcpu->arch.vr);
-#endif
- mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
-
- load_spr_state(vcpu);
+ hvregs.hdec_expiry = time_limit;
/*
- * When setting DEC, we must always deal with irq_work_raise via NMI vs
- * setting DEC. The problem occurs right as we switch into guest mode
- * if a NMI hits and sets pending work and sets DEC, then that will
- * apply to the guest and not bring us back to the host.
+ * When setting DEC, we must always deal with irq_work_raise
+ * via NMI vs setting DEC. The problem occurs right as we
+ * switch into guest mode if a NMI hits and sets pending work
+ * and sets DEC, then that will apply to the guest and not
+ * bring us back to the host.
*
- * irq_work_raise could check a flag (or possibly LPCR[HDICE] for
- * example) and set HDEC to 1? That wouldn't solve the nested hv
- * case which needs to abort the hcall or zero the time limit.
+ * irq_work_raise could check a flag (or possibly LPCR[HDICE]
+ * for example) and set HDEC to 1? That wouldn't solve the
+ * nested hv case which needs to abort the hcall or zero the
+ * time limit.
*
* XXX: Another day's problem.
*/
- mtspr(SPRN_DEC, vcpu->arch.dec_expires - mftb());
+ mtspr(SPRN_DEC, kvmppc_dec_expires_host_tb(vcpu) - *tb);
+
+ mtspr(SPRN_DAR, vcpu->arch.shregs.dar);
+ mtspr(SPRN_DSISR, vcpu->arch.shregs.dsisr);
+ switch_pmu_to_guest(vcpu, &host_os_sprs);
+ trap = plpar_hcall_norets(H_ENTER_NESTED, __pa(&hvregs),
+ __pa(&vcpu->arch.regs));
+ kvmhv_restore_hv_return_state(vcpu, &hvregs);
+ switch_pmu_to_host(vcpu, &host_os_sprs);
+ vcpu->arch.shregs.msr = vcpu->arch.regs.msr;
+ vcpu->arch.shregs.dar = mfspr(SPRN_DAR);
+ vcpu->arch.shregs.dsisr = mfspr(SPRN_DSISR);
+ vcpu->arch.psscr = mfspr(SPRN_PSSCR_PR);
+
+ store_vcpu_state(vcpu);
- if (kvmhv_on_pseries()) {
- /*
- * We need to save and restore the guest visible part of the
- * psscr (i.e. using SPRN_PSSCR_PR) since the hypervisor
- * doesn't do this for us. Note only required if pseries since
- * this is done in kvmhv_vcpu_entry_p9() below otherwise.
- */
- unsigned long host_psscr;
- /* call our hypervisor to load up HV regs and go */
- struct hv_guest_state hvregs;
+ dec = mfspr(SPRN_DEC);
+ if (!(lpcr & LPCR_LD)) /* Sign extend if not using large decrementer */
+ dec = (s32) dec;
+ *tb = mftb();
+ vcpu->arch.dec_expires = dec + (*tb + vc->tb_offset);
- host_psscr = mfspr(SPRN_PSSCR_PR);
- mtspr(SPRN_PSSCR_PR, vcpu->arch.psscr);
- kvmhv_save_hv_regs(vcpu, &hvregs);
- hvregs.lpcr = lpcr;
- vcpu->arch.regs.msr = vcpu->arch.shregs.msr;
- hvregs.version = HV_GUEST_STATE_VERSION;
- if (vcpu->arch.nested) {
- hvregs.lpid = vcpu->arch.nested->shadow_lpid;
- hvregs.vcpu_token = vcpu->arch.nested_vcpu_id;
- } else {
- hvregs.lpid = vcpu->kvm->arch.lpid;
- hvregs.vcpu_token = vcpu->vcpu_id;
- }
- hvregs.hdec_expiry = time_limit;
- mtspr(SPRN_DAR, vcpu->arch.shregs.dar);
- mtspr(SPRN_DSISR, vcpu->arch.shregs.dsisr);
- trap = plpar_hcall_norets(H_ENTER_NESTED, __pa(&hvregs),
- __pa(&vcpu->arch.regs));
- kvmhv_restore_hv_return_state(vcpu, &hvregs);
- vcpu->arch.shregs.msr = vcpu->arch.regs.msr;
- vcpu->arch.shregs.dar = mfspr(SPRN_DAR);
- vcpu->arch.shregs.dsisr = mfspr(SPRN_DSISR);
- vcpu->arch.psscr = mfspr(SPRN_PSSCR_PR);
+ timer_rearm_host_dec(*tb);
+
+ restore_p9_host_os_sprs(vcpu, &host_os_sprs);
+ if (vcpu->arch.psscr != host_psscr)
mtspr(SPRN_PSSCR_PR, host_psscr);
+ return trap;
+}
+
+/*
+ * Guest entry for POWER9 and later CPUs.
+ */
+static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
+ unsigned long lpcr, u64 *tb)
+{
+ u64 next_timer;
+ int trap;
+
+ next_timer = timer_get_next_tb();
+ if (*tb >= next_timer)
+ return BOOK3S_INTERRUPT_HV_DECREMENTER;
+ if (next_timer < time_limit)
+ time_limit = next_timer;
+ else if (*tb >= time_limit) /* nested time limit */
+ return BOOK3S_INTERRUPT_NESTED_HV_DECREMENTER;
+
+ vcpu->arch.ceded = 0;
+
+ vcpu_vpa_increment_dispatch(vcpu);
+
+ if (kvmhv_on_pseries()) {
+ trap = kvmhv_vcpu_entry_p9_nested(vcpu, time_limit, lpcr, tb);
+
/* H_CEDE has to be handled now, not later */
if (trap == BOOK3S_INTERRUPT_SYSCALL && !vcpu->arch.nested &&
kvmppc_get_gpr(vcpu, 3) == H_CEDE) {
@@ -3982,9 +4044,16 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
kvmppc_set_gpr(vcpu, 3, 0);
trap = 0;
}
+
} else {
+ struct kvm *kvm = vcpu->kvm;
+
kvmppc_xive_push_vcpu(vcpu);
- trap = kvmhv_vcpu_entry_p9(vcpu, time_limit, lpcr);
+
+ __this_cpu_write(cpu_in_guest, kvm);
+ trap = kvmhv_vcpu_entry_p9(vcpu, time_limit, lpcr, tb);
+ __this_cpu_write(cpu_in_guest, NULL);
+
if (trap == BOOK3S_INTERRUPT_SYSCALL && !vcpu->arch.nested &&
!(vcpu->arch.shregs.msr & MSR_PR)) {
unsigned long req = kvmppc_get_gpr(vcpu, 3);
@@ -4009,65 +4078,11 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
}
kvmppc_xive_pull_vcpu(vcpu);
- if (kvm_is_radix(vcpu->kvm))
+ if (kvm_is_radix(kvm))
vcpu->arch.slb_max = 0;
}
- dec = mfspr(SPRN_DEC);
- if (!(lpcr & LPCR_LD)) /* Sign extend if not using large decrementer */
- dec = (s32) dec;
- tb = mftb();
- vcpu->arch.dec_expires = dec + tb;
- vcpu->cpu = -1;
- vcpu->arch.thread_cpu = -1;
-
- store_spr_state(vcpu);
-
- restore_p9_host_os_sprs(vcpu, &host_os_sprs);
-
- msr_check_and_set(MSR_FP | MSR_VEC | MSR_VSX);
- store_fp_state(&vcpu->arch.fp);
-#ifdef CONFIG_ALTIVEC
- store_vr_state(&vcpu->arch.vr);
-#endif
- vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
-
- if (cpu_has_feature(CPU_FTR_TM) ||
- cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST))
- kvmppc_save_tm_hv(vcpu, vcpu->arch.shregs.msr, true);
-
- save_pmu = 1;
- if (vcpu->arch.vpa.pinned_addr) {
- struct lppaca *lp = vcpu->arch.vpa.pinned_addr;
- u32 yield_count = be32_to_cpu(lp->yield_count) + 1;
- lp->yield_count = cpu_to_be32(yield_count);
- vcpu->arch.vpa.dirty = 1;
- save_pmu = lp->pmcregs_in_use;
- }
- /* Must save pmu if this guest is capable of running nested guests */
- save_pmu |= nesting_enabled(vcpu->kvm);
-
- kvmhv_save_guest_pmu(vcpu, save_pmu);
-#ifdef CONFIG_PPC_PSERIES
- if (kvmhv_on_pseries()) {
- barrier();
- get_lppaca()->pmcregs_in_use = ppc_get_pmu_inuse();
- barrier();
- }
-#endif
-
- vc->entry_exit_map = 0x101;
- vc->in_guest = 0;
-
- mtspr(SPRN_DEC, local_paca->kvm_hstate.dec_expires - mftb());
- /* We may have raced with new irq work */
- if (test_irq_work_pending())
- set_dec(1);
- mtspr(SPRN_SPRG_VDSO_WRITE, local_paca->sprg_vdso);
-
- kvmhv_load_host_pmu();
-
- kvmppc_subcore_exit_guest();
+ vcpu_vpa_increment_dispatch(vcpu);
return trap;
}
@@ -4132,6 +4147,13 @@ static bool kvmppc_vcpu_woken(struct kvm_vcpu *vcpu)
return false;
}
+static bool kvmppc_vcpu_check_block(struct kvm_vcpu *vcpu)
+{
+ if (!vcpu->arch.ceded || kvmppc_vcpu_woken(vcpu))
+ return true;
+ return false;
+}
+
/*
* Check to see if any of the runnable vcpus on the vcore have pending
* exceptions or are no longer ceded
@@ -4142,7 +4164,7 @@ static int kvmppc_vcore_check_block(struct kvmppc_vcore *vc)
int i;
for_each_runnable_thread(i, vcpu, vc) {
- if (!vcpu->arch.ceded || kvmppc_vcpu_woken(vcpu))
+ if (kvmppc_vcpu_check_block(vcpu))
return 1;
}
@@ -4159,6 +4181,8 @@ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
int do_sleep = 1;
u64 block_ns;
+ WARN_ON_ONCE(cpu_has_feature(CPU_FTR_ARCH_300));
+
/* Poll for pending exceptions and ceded state */
cur = start_poll = ktime_get();
if (vc->halt_poll_ns) {
@@ -4355,7 +4379,7 @@ static int kvmppc_run_vcpu(struct kvm_vcpu *vcpu)
for_each_runnable_thread(i, v, vc) {
kvmppc_core_prepare_to_enter(v);
if (signal_pending(v->arch.run_task)) {
- kvmppc_remove_runnable(vc, v);
+ kvmppc_remove_runnable(vc, v, mftb());
v->stat.signal_exits++;
v->run->exit_reason = KVM_EXIT_INTR;
v->arch.ret = -EINTR;
@@ -4396,7 +4420,7 @@ static int kvmppc_run_vcpu(struct kvm_vcpu *vcpu)
kvmppc_vcore_end_preempt(vc);
if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) {
- kvmppc_remove_runnable(vc, vcpu);
+ kvmppc_remove_runnable(vc, vcpu, mftb());
vcpu->stat.signal_exits++;
run->exit_reason = KVM_EXIT_INTR;
vcpu->arch.ret = -EINTR;
@@ -4417,12 +4441,15 @@ static int kvmppc_run_vcpu(struct kvm_vcpu *vcpu)
int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
unsigned long lpcr)
{
+ struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu);
struct kvm_run *run = vcpu->run;
int trap, r, pcpu;
int srcu_idx;
struct kvmppc_vcore *vc;
struct kvm *kvm = vcpu->kvm;
struct kvm_nested_guest *nested = vcpu->arch.nested;
+ unsigned long flags;
+ u64 tb;
trace_kvmppc_run_vcpu_enter(vcpu);
@@ -4433,16 +4460,11 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
vc = vcpu->arch.vcore;
vcpu->arch.ceded = 0;
vcpu->arch.run_task = current;
- vcpu->arch.stolen_logged = vcore_stolen_time(vc, mftb());
vcpu->arch.state = KVMPPC_VCPU_RUNNABLE;
- vcpu->arch.busy_preempt = TB_NIL;
vcpu->arch.last_inst = KVM_INST_FETCH_FAILED;
- vc->runnable_threads[0] = vcpu;
- vc->n_runnable = 1;
- vc->runner = vcpu;
/* See if the MMU is ready to go */
- if (!kvm->arch.mmu_ready) {
+ if (unlikely(!kvm->arch.mmu_ready)) {
r = kvmhv_setup_mmu(vcpu);
if (r) {
run->exit_reason = KVM_EXIT_FAIL_ENTRY;
@@ -4457,29 +4479,21 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
kvmppc_update_vpas(vcpu);
- init_vcore_to_run(vc);
- vc->preempt_tb = TB_NIL;
-
preempt_disable();
pcpu = smp_processor_id();
- vc->pcpu = pcpu;
if (kvm_is_radix(kvm))
kvmppc_prepare_radix_vcpu(vcpu, pcpu);
- local_irq_disable();
- hard_irq_disable();
+ /* flags save not required, but irq_pmu has no disable/enable API */
+ powerpc_local_irq_pmu_save(flags);
+
if (signal_pending(current))
goto sigpend;
- if (lazy_irq_pending() || need_resched() || !kvm->arch.mmu_ready)
+ if (need_resched() || !kvm->arch.mmu_ready)
goto out;
if (!nested) {
kvmppc_core_prepare_to_enter(vcpu);
- if (vcpu->arch.doorbell_request) {
- vc->dpdes = 1;
- smp_wmb();
- vcpu->arch.doorbell_request = 0;
- }
if (test_bit(BOOK3S_IRQPRIO_EXTERNAL,
&vcpu->arch.pending_exceptions))
lpcr |= LPCR_MER;
@@ -4490,16 +4504,23 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
goto out;
}
- kvmppc_clear_host_core(pcpu);
+ if (vcpu->arch.timer_running) {
+ hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
+ vcpu->arch.timer_running = 0;
+ }
- local_paca->kvm_hstate.napping = 0;
- local_paca->kvm_hstate.kvm_split_mode = NULL;
- kvmppc_start_thread(vcpu, vc);
- kvmppc_create_dtl_entry(vcpu, vc);
- trace_kvm_guest_enter(vcpu);
+ tb = mftb();
- vc->vcore_state = VCORE_RUNNING;
- trace_kvmppc_run_core(vc, 0);
+ vcpu->cpu = pcpu;
+ vcpu->arch.thread_cpu = pcpu;
+ vc->pcpu = pcpu;
+ local_paca->kvm_hstate.kvm_vcpu = vcpu;
+ local_paca->kvm_hstate.ptid = 0;
+ local_paca->kvm_hstate.fake_suspend = 0;
+
+ __kvmppc_create_dtl_entry(vcpu, pcpu, tb + vc->tb_offset, 0);
+
+ trace_kvm_guest_enter(vcpu);
guest_enter_irqoff();
@@ -4510,7 +4531,7 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
/* Tell lockdep that we're about to enable interrupts */
trace_hardirqs_on();
- trap = kvmhv_p9_guest_entry(vcpu, time_limit, lpcr);
+ trap = kvmhv_p9_guest_entry(vcpu, time_limit, lpcr, &tb);
vcpu->arch.trap = trap;
trace_hardirqs_off();
@@ -4521,8 +4542,6 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
set_irq_happened(trap);
- kvmppc_set_host_core(pcpu);
-
context_tracking_guest_exit();
if (!vtime_accounting_enabled_this_cpu()) {
local_irq_enable();
@@ -4538,9 +4557,10 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
}
vtime_account_guest_exit();
- local_irq_enable();
+ vcpu->cpu = -1;
+ vcpu->arch.thread_cpu = -1;
- cpumask_clear_cpu(pcpu, &kvm->arch.cpu_in_guest);
+ powerpc_local_irq_pmu_restore(flags);
preempt_enable();
@@ -4550,7 +4570,7 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
* by L2 and the L1 decrementer is provided in hdec_expires
*/
if (kvmppc_core_pending_dec(vcpu) &&
- ((get_tb() < vcpu->arch.dec_expires) ||
+ ((tb < kvmppc_dec_expires_host_tb(vcpu)) ||
(trap == BOOK3S_INTERRUPT_SYSCALL &&
kvmppc_get_gpr(vcpu, 3) == H_ENTER_NESTED)))
kvmppc_core_dequeue_dec(vcpu);
@@ -4565,28 +4585,31 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
}
vcpu->arch.ret = r;
- if (is_kvmppc_resume_guest(r) && vcpu->arch.ceded &&
- !kvmppc_vcpu_woken(vcpu)) {
+ if (is_kvmppc_resume_guest(r) && !kvmppc_vcpu_check_block(vcpu)) {
kvmppc_set_timer(vcpu);
- while (vcpu->arch.ceded && !kvmppc_vcpu_woken(vcpu)) {
+
+ prepare_to_rcuwait(wait);
+ for (;;) {
+ set_current_state(TASK_INTERRUPTIBLE);
if (signal_pending(current)) {
vcpu->stat.signal_exits++;
run->exit_reason = KVM_EXIT_INTR;
vcpu->arch.ret = -EINTR;
break;
}
- spin_lock(&vc->lock);
- kvmppc_vcore_blocked(vc);
- spin_unlock(&vc->lock);
+
+ if (kvmppc_vcpu_check_block(vcpu))
+ break;
+
+ trace_kvmppc_vcore_blocked(vc, 0);
+ schedule();
+ trace_kvmppc_vcore_blocked(vc, 1);
}
+ finish_rcuwait(wait);
}
vcpu->arch.ceded = 0;
- vc->vcore_state = VCORE_INACTIVE;
- trace_kvmppc_run_core(vc, 1);
-
done:
- kvmppc_remove_runnable(vc, vcpu);
trace_kvmppc_run_vcpu_exit(vcpu);
return vcpu->arch.ret;
@@ -4596,7 +4619,7 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
run->exit_reason = KVM_EXIT_INTR;
vcpu->arch.ret = -EINTR;
out:
- local_irq_enable();
+ powerpc_local_irq_pmu_restore(flags);
preempt_enable();
goto done;
}
@@ -4606,23 +4629,25 @@ static int kvmppc_vcpu_run_hv(struct kvm_vcpu *vcpu)
struct kvm_run *run = vcpu->run;
int r;
int srcu_idx;
- unsigned long ebb_regs[3] = {}; /* shut up GCC */
- unsigned long user_tar = 0;
- unsigned int user_vrsave;
struct kvm *kvm;
+ unsigned long msr;
if (!vcpu->arch.sane) {
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
return -EINVAL;
}
+ /* No need to go into the guest when all we'll do is come back out */
+ if (signal_pending(current)) {
+ run->exit_reason = KVM_EXIT_INTR;
+ return -EINTR;
+ }
+
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
/*
* Don't allow entry with a suspended transaction, because
* the guest entry/exit code will lose it.
- * If the guest has TM enabled, save away their TM-related SPRs
- * (they will get restored by the TM unavailable interrupt).
*/
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
if (cpu_has_feature(CPU_FTR_TM) && current->thread.regs &&
(current->thread.regs->msr & MSR_TM)) {
if (MSR_TM_ACTIVE(current->thread.regs->msr)) {
@@ -4630,12 +4655,6 @@ static int kvmppc_vcpu_run_hv(struct kvm_vcpu *vcpu)
run->fail_entry.hardware_entry_failure_reason = 0;
return -EINVAL;
}
- /* Enable TM so we can read the TM SPRs */
- mtmsr(mfmsr() | MSR_TM);
- current->thread.tm_tfhar = mfspr(SPRN_TFHAR);
- current->thread.tm_tfiar = mfspr(SPRN_TFIAR);
- current->thread.tm_texasr = mfspr(SPRN_TEXASR);
- current->thread.regs->msr &= ~MSR_TM;
}
#endif
@@ -4650,29 +4669,30 @@ static int kvmppc_vcpu_run_hv(struct kvm_vcpu *vcpu)
kvmppc_core_prepare_to_enter(vcpu);
- /* No need to go into the guest when all we'll do is come back out */
- if (signal_pending(current)) {
- run->exit_reason = KVM_EXIT_INTR;
- return -EINTR;
- }
-
kvm = vcpu->kvm;
atomic_inc(&kvm->arch.vcpus_running);
/* Order vcpus_running vs. mmu_ready, see kvmppc_alloc_reset_hpt */
smp_mb();
- flush_all_to_thread(current);
+ msr = 0;
+ if (IS_ENABLED(CONFIG_PPC_FPU))
+ msr |= MSR_FP;
+ if (cpu_has_feature(CPU_FTR_ALTIVEC))
+ msr |= MSR_VEC;
+ if (cpu_has_feature(CPU_FTR_VSX))
+ msr |= MSR_VSX;
+ if ((cpu_has_feature(CPU_FTR_TM) ||
+ cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) &&
+ (vcpu->arch.hfscr & HFSCR_TM))
+ msr |= MSR_TM;
+ msr = msr_check_and_set(msr);
- /* Save userspace EBB and other register values */
- if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
- ebb_regs[0] = mfspr(SPRN_EBBHR);
- ebb_regs[1] = mfspr(SPRN_EBBRR);
- ebb_regs[2] = mfspr(SPRN_BESCR);
- user_tar = mfspr(SPRN_TAR);
- }
- user_vrsave = mfspr(SPRN_VRSAVE);
+ kvmppc_save_user_regs();
- vcpu->arch.waitp = &vcpu->arch.vcore->wait;
+ kvmppc_save_current_sprs();
+
+ if (!cpu_has_feature(CPU_FTR_ARCH_300))
+ vcpu->arch.waitp = &vcpu->arch.vcore->wait;
vcpu->arch.pgdir = kvm->mm->pgd;
vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
@@ -4711,15 +4731,6 @@ static int kvmppc_vcpu_run_hv(struct kvm_vcpu *vcpu)
}
} while (is_kvmppc_resume_guest(r));
- /* Restore userspace EBB and other register values */
- if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
- mtspr(SPRN_EBBHR, ebb_regs[0]);
- mtspr(SPRN_EBBRR, ebb_regs[1]);
- mtspr(SPRN_BESCR, ebb_regs[2]);
- mtspr(SPRN_TAR, user_tar);
- }
- mtspr(SPRN_VRSAVE, user_vrsave);
-
vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
atomic_dec(&kvm->arch.vcpus_running);
@@ -4786,8 +4797,8 @@ static int kvm_vm_ioctl_get_dirty_log_hv(struct kvm *kvm,
{
struct kvm_memslots *slots;
struct kvm_memory_slot *memslot;
- int i, r;
- unsigned long n;
+ int r;
+ unsigned long n, i;
unsigned long *buf, *p;
struct kvm_vcpu *vcpu;
@@ -4854,37 +4865,38 @@ static void kvmppc_core_free_memslot_hv(struct kvm_memory_slot *slot)
}
static int kvmppc_core_prepare_memory_region_hv(struct kvm *kvm,
- struct kvm_memory_slot *slot,
- const struct kvm_userspace_memory_region *mem,
- enum kvm_mr_change change)
+ const struct kvm_memory_slot *old,
+ struct kvm_memory_slot *new,
+ enum kvm_mr_change change)
{
- unsigned long npages = mem->memory_size >> PAGE_SHIFT;
-
if (change == KVM_MR_CREATE) {
- slot->arch.rmap = vzalloc(array_size(npages,
- sizeof(*slot->arch.rmap)));
- if (!slot->arch.rmap)
+ unsigned long size = array_size(new->npages, sizeof(*new->arch.rmap));
+
+ if ((size >> PAGE_SHIFT) > totalram_pages())
return -ENOMEM;
+
+ new->arch.rmap = vzalloc(size);
+ if (!new->arch.rmap)
+ return -ENOMEM;
+ } else if (change != KVM_MR_DELETE) {
+ new->arch.rmap = old->arch.rmap;
}
return 0;
}
static void kvmppc_core_commit_memory_region_hv(struct kvm *kvm,
- const struct kvm_userspace_memory_region *mem,
- const struct kvm_memory_slot *old,
+ struct kvm_memory_slot *old,
const struct kvm_memory_slot *new,
enum kvm_mr_change change)
{
- unsigned long npages = mem->memory_size >> PAGE_SHIFT;
-
/*
- * If we are making a new memslot, it might make
+ * If we are creating or modifying a memslot, it might make
* some address that was previously cached as emulated
* MMIO be no longer emulated MMIO, so invalidate
* all the caches of emulated MMIO translations.
*/
- if (npages)
+ if (change != KVM_MR_DELETE)
atomic64_inc(&kvm->arch.mmio_update);
/*
@@ -5072,6 +5084,8 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
*/
int kvmppc_switch_mmu_to_hpt(struct kvm *kvm)
{
+ unsigned long lpcr, lpcr_mask;
+
if (nesting_enabled(kvm))
kvmhv_release_all_nested(kvm);
kvmppc_rmap_reset(kvm);
@@ -5081,8 +5095,13 @@ int kvmppc_switch_mmu_to_hpt(struct kvm *kvm)
kvm->arch.radix = 0;
spin_unlock(&kvm->mmu_lock);
kvmppc_free_radix(kvm);
- kvmppc_update_lpcr(kvm, LPCR_VPM1,
- LPCR_VPM1 | LPCR_UPRT | LPCR_GTSE | LPCR_HR);
+
+ lpcr = LPCR_VPM1;
+ lpcr_mask = LPCR_VPM1 | LPCR_UPRT | LPCR_GTSE | LPCR_HR;
+ if (cpu_has_feature(CPU_FTR_ARCH_31))
+ lpcr_mask |= LPCR_HAIL;
+ kvmppc_update_lpcr(kvm, lpcr, lpcr_mask);
+
return 0;
}
@@ -5092,6 +5111,7 @@ int kvmppc_switch_mmu_to_hpt(struct kvm *kvm)
*/
int kvmppc_switch_mmu_to_radix(struct kvm *kvm)
{
+ unsigned long lpcr, lpcr_mask;
int err;
err = kvmppc_init_vm_radix(kvm);
@@ -5103,8 +5123,17 @@ int kvmppc_switch_mmu_to_radix(struct kvm *kvm)
kvm->arch.radix = 1;
spin_unlock(&kvm->mmu_lock);
kvmppc_free_hpt(&kvm->arch.hpt);
- kvmppc_update_lpcr(kvm, LPCR_UPRT | LPCR_GTSE | LPCR_HR,
- LPCR_VPM1 | LPCR_UPRT | LPCR_GTSE | LPCR_HR);
+
+ lpcr = LPCR_UPRT | LPCR_GTSE | LPCR_HR;
+ lpcr_mask = LPCR_VPM1 | LPCR_UPRT | LPCR_GTSE | LPCR_HR;
+ if (cpu_has_feature(CPU_FTR_ARCH_31)) {
+ lpcr_mask |= LPCR_HAIL;
+ if (cpu_has_feature(CPU_FTR_HVMODE) &&
+ (kvm->arch.host_lpcr & LPCR_HAIL))
+ lpcr |= LPCR_HAIL;
+ }
+ kvmppc_update_lpcr(kvm, lpcr, lpcr_mask);
+
return 0;
}
@@ -5126,6 +5155,9 @@ void kvmppc_alloc_host_rm_ops(void)
int cpu, core;
int size;
+ if (cpu_has_feature(CPU_FTR_ARCH_300))
+ return;
+
/* Not the first time here ? */
if (kvmppc_host_rm_ops_hv != NULL)
return;
@@ -5268,6 +5300,10 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm)
kvm->arch.mmu_ready = 1;
lpcr &= ~LPCR_VPM1;
lpcr |= LPCR_UPRT | LPCR_GTSE | LPCR_HR;
+ if (cpu_has_feature(CPU_FTR_HVMODE) &&
+ cpu_has_feature(CPU_FTR_ARCH_31) &&
+ (kvm->arch.host_lpcr & LPCR_HAIL))
+ lpcr |= LPCR_HAIL;
ret = kvmppc_init_vm_radix(kvm);
if (ret) {
kvmppc_free_lpid(kvm->arch.lpid);
@@ -5861,7 +5897,7 @@ static int kvmhv_svm_off(struct kvm *kvm)
int mmu_was_ready;
int srcu_idx;
int ret = 0;
- int i;
+ unsigned long i;
if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START))
return ret;
@@ -5883,11 +5919,12 @@ static int kvmhv_svm_off(struct kvm *kvm)
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
struct kvm_memory_slot *memslot;
struct kvm_memslots *slots = __kvm_memslots(kvm, i);
+ int bkt;
if (!slots)
continue;
- kvm_for_each_memslot(memslot, slots) {
+ kvm_for_each_memslot(memslot, bkt, slots) {
kvmppc_uvmem_drop_pages(memslot, kvm, true);
uv_unregister_mem_slot(kvm->arch.lpid, memslot->id);
}
@@ -6063,9 +6100,11 @@ static int kvmppc_book3s_init_hv(void)
if (r)
return r;
- r = kvm_init_subcore_bitmap();
- if (r)
- return r;
+ if (!cpu_has_feature(CPU_FTR_ARCH_300)) {
+ r = kvm_init_subcore_bitmap();
+ if (r)
+ return r;
+ }
/*
* We need a way of accessing the XICS interrupt controller,
diff --git a/arch/powerpc/kvm/book3s_hv.h b/arch/powerpc/kvm/book3s_hv.h
new file mode 100644
index 000000000000..6b7f07d9026b
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_hv.h
@@ -0,0 +1,42 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+/*
+ * Privileged (non-hypervisor) host registers to save.
+ */
+struct p9_host_os_sprs {
+ unsigned long iamr;
+ unsigned long amr;
+
+ unsigned int pmc1;
+ unsigned int pmc2;
+ unsigned int pmc3;
+ unsigned int pmc4;
+ unsigned int pmc5;
+ unsigned int pmc6;
+ unsigned long mmcr0;
+ unsigned long mmcr1;
+ unsigned long mmcr2;
+ unsigned long mmcr3;
+ unsigned long mmcra;
+ unsigned long siar;
+ unsigned long sier1;
+ unsigned long sier2;
+ unsigned long sier3;
+ unsigned long sdar;
+};
+
+static inline bool nesting_enabled(struct kvm *kvm)
+{
+ return kvm->arch.nested_enable && kvm_is_radix(kvm);
+}
+
+bool load_vcpu_state(struct kvm_vcpu *vcpu,
+ struct p9_host_os_sprs *host_os_sprs);
+void store_vcpu_state(struct kvm_vcpu *vcpu);
+void save_p9_host_os_sprs(struct p9_host_os_sprs *host_os_sprs);
+void restore_p9_host_os_sprs(struct kvm_vcpu *vcpu,
+ struct p9_host_os_sprs *host_os_sprs);
+void switch_pmu_to_guest(struct kvm_vcpu *vcpu,
+ struct p9_host_os_sprs *host_os_sprs);
+void switch_pmu_to_host(struct kvm_vcpu *vcpu,
+ struct p9_host_os_sprs *host_os_sprs);
diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c
index 70b7a8f97153..7d6d91338c3f 100644
--- a/arch/powerpc/kvm/book3s_hv_builtin.c
+++ b/arch/powerpc/kvm/book3s_hv_builtin.c
@@ -649,6 +649,8 @@ void kvmppc_guest_entry_inject_int(struct kvm_vcpu *vcpu)
int ext;
unsigned long lpcr;
+ WARN_ON_ONCE(cpu_has_feature(CPU_FTR_ARCH_300));
+
/* Insert EXTERNAL bit into LPCR at the MER bit position */
ext = (vcpu->arch.pending_exceptions >> BOOK3S_IRQPRIO_EXTERNAL) & 1;
lpcr = mfspr(SPRN_LPCR);
@@ -682,60 +684,23 @@ static void flush_guest_tlb(struct kvm *kvm)
unsigned long rb, set;
rb = PPC_BIT(52); /* IS = 2 */
- if (kvm_is_radix(kvm)) {
- /* R=1 PRS=1 RIC=2 */
+ for (set = 0; set < kvm->arch.tlb_sets; ++set) {
+ /* R=0 PRS=0 RIC=0 */
asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
- : : "r" (rb), "i" (1), "i" (1), "i" (2),
+ : : "r" (rb), "i" (0), "i" (0), "i" (0),
"r" (0) : "memory");
- for (set = 1; set < kvm->arch.tlb_sets; ++set) {
- rb += PPC_BIT(51); /* increment set number */
- /* R=1 PRS=1 RIC=0 */
- asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
- : : "r" (rb), "i" (1), "i" (1), "i" (0),
- "r" (0) : "memory");
- }
- asm volatile("ptesync": : :"memory");
- // POWER9 congruence-class TLBIEL leaves ERAT. Flush it now.
- asm volatile(PPC_RADIX_INVALIDATE_ERAT_GUEST : : :"memory");
- } else {
- for (set = 0; set < kvm->arch.tlb_sets; ++set) {
- /* R=0 PRS=0 RIC=0 */
- asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
- : : "r" (rb), "i" (0), "i" (0), "i" (0),
- "r" (0) : "memory");
- rb += PPC_BIT(51); /* increment set number */
- }
- asm volatile("ptesync": : :"memory");
- // POWER9 congruence-class TLBIEL leaves ERAT. Flush it now.
- if (cpu_has_feature(CPU_FTR_ARCH_300))
- asm volatile(PPC_ISA_3_0_INVALIDATE_ERAT : : :"memory");
+ rb += PPC_BIT(51); /* increment set number */
}
+ asm volatile("ptesync": : :"memory");
}
-void kvmppc_check_need_tlb_flush(struct kvm *kvm, int pcpu,
- struct kvm_nested_guest *nested)
+void kvmppc_check_need_tlb_flush(struct kvm *kvm, int pcpu)
{
- cpumask_t *need_tlb_flush;
-
- /*
- * On POWER9, individual threads can come in here, but the
- * TLB is shared between the 4 threads in a core, hence
- * invalidating on one thread invalidates for all.
- * Thus we make all 4 threads use the same bit.
- */
- if (cpu_has_feature(CPU_FTR_ARCH_300))
- pcpu = cpu_first_tlb_thread_sibling(pcpu);
-
- if (nested)
- need_tlb_flush = &nested->need_tlb_flush;
- else
- need_tlb_flush = &kvm->arch.need_tlb_flush;
-
- if (cpumask_test_cpu(pcpu, need_tlb_flush)) {
+ if (cpumask_test_cpu(pcpu, &kvm->arch.need_tlb_flush)) {
flush_guest_tlb(kvm);
/* Clear the bit after the TLB flush */
- cpumask_clear_cpu(pcpu, need_tlb_flush);
+ cpumask_clear_cpu(pcpu, &kvm->arch.need_tlb_flush);
}
}
EXPORT_SYMBOL_GPL(kvmppc_check_need_tlb_flush);
diff --git a/arch/powerpc/kvm/book3s_hv_hmi.c b/arch/powerpc/kvm/book3s_hv_hmi.c
index 9af660476314..1ec50c69678b 100644
--- a/arch/powerpc/kvm/book3s_hv_hmi.c
+++ b/arch/powerpc/kvm/book3s_hv_hmi.c
@@ -20,10 +20,15 @@ void wait_for_subcore_guest_exit(void)
/*
* NULL bitmap pointer indicates that KVM module hasn't
- * been loaded yet and hence no guests are running.
+ * been loaded yet and hence no guests are running, or running
+ * on POWER9 or newer CPU.
+ *
* If no KVM is in use, no need to co-ordinate among threads
* as all of them will always be in host and no one is going
* to modify TB other than the opal hmi handler.
+ *
+ * POWER9 and newer don't need this synchronisation.
+ *
* Hence, just return from here.
*/
if (!local_paca->sibling_subcore_state)
diff --git a/arch/powerpc/kvm/book3s_hv_interrupts.S b/arch/powerpc/kvm/book3s_hv_interrupts.S
index 4444f83cb133..59d89e4b154a 100644
--- a/arch/powerpc/kvm/book3s_hv_interrupts.S
+++ b/arch/powerpc/kvm/book3s_hv_interrupts.S
@@ -104,7 +104,10 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
mtlr r0
blr
-_GLOBAL(kvmhv_save_host_pmu)
+/*
+ * void kvmhv_save_host_pmu(void)
+ */
+kvmhv_save_host_pmu:
BEGIN_FTR_SECTION
/* Work around P8 PMAE bug */
li r3, -1
@@ -138,14 +141,6 @@ BEGIN_FTR_SECTION
std r8, HSTATE_MMCR2(r13)
std r9, HSTATE_SIER(r13)
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
-BEGIN_FTR_SECTION
- mfspr r5, SPRN_MMCR3
- mfspr r6, SPRN_SIER2
- mfspr r7, SPRN_SIER3
- std r5, HSTATE_MMCR3(r13)
- std r6, HSTATE_SIER2(r13)
- std r7, HSTATE_SIER3(r13)
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_31)
mfspr r3, SPRN_PMC1
mfspr r5, SPRN_PMC2
mfspr r6, SPRN_PMC3
diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c
index ed8a2c9f5629..9d373f8963ee 100644
--- a/arch/powerpc/kvm/book3s_hv_nested.c
+++ b/arch/powerpc/kvm/book3s_hv_nested.c
@@ -358,11 +358,12 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
/* convert TB values/offsets to host (L0) values */
hdec_exp = l2_hv.hdec_expiry - vc->tb_offset;
vc->tb_offset += l2_hv.tb_offset;
+ vcpu->arch.dec_expires += l2_hv.tb_offset;
/* set L1 state to L2 state */
vcpu->arch.nested = l2;
vcpu->arch.nested_vcpu_id = l2_hv.vcpu_token;
- l2->hfscr = l2_hv.hfscr;
+ vcpu->arch.nested_hfscr = l2_hv.hfscr;
vcpu->arch.regs = l2_regs;
/* Guest must always run with ME enabled, HV disabled. */
@@ -374,11 +375,6 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
vcpu->arch.ret = RESUME_GUEST;
vcpu->arch.trap = 0;
do {
- if (mftb() >= hdec_exp) {
- vcpu->arch.trap = BOOK3S_INTERRUPT_HV_DECREMENTER;
- r = RESUME_HOST;
- break;
- }
r = kvmhv_run_single_vcpu(vcpu, hdec_exp, lpcr);
} while (is_kvmppc_resume_guest(r));
@@ -399,6 +395,8 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
if (l2_regs.msr & MSR_TS_MASK)
vcpu->arch.shregs.msr |= MSR_TS_S;
vc->tb_offset = saved_l1_hv.tb_offset;
+ /* XXX: is this always the same delta as saved_l1_hv.tb_offset? */
+ vcpu->arch.dec_expires -= l2_hv.tb_offset;
restore_hv_regs(vcpu, &saved_l1_hv);
vcpu->arch.purr += delta_purr;
vcpu->arch.spurr += delta_spurr;
@@ -582,7 +580,7 @@ long kvmhv_copy_tofrom_guest_nested(struct kvm_vcpu *vcpu)
if (eaddr & (0xFFFUL << 52))
return H_PARAMETER;
- buf = kzalloc(n, GFP_KERNEL);
+ buf = kzalloc(n, GFP_KERNEL | __GFP_NOWARN);
if (!buf)
return H_NO_MEM;
@@ -749,7 +747,7 @@ void kvmhv_release_all_nested(struct kvm *kvm)
struct kvm_nested_guest *gp;
struct kvm_nested_guest *freelist = NULL;
struct kvm_memory_slot *memslot;
- int srcu_idx;
+ int srcu_idx, bkt;
spin_lock(&kvm->mmu_lock);
for (i = 0; i <= kvm->arch.max_nested_lpid; i++) {
@@ -770,7 +768,7 @@ void kvmhv_release_all_nested(struct kvm *kvm)
}
srcu_idx = srcu_read_lock(&kvm->srcu);
- kvm_for_each_memslot(memslot, kvm_memslots(kvm))
+ kvm_for_each_memslot(memslot, bkt, kvm_memslots(kvm))
kvmhv_free_memslot_nest_rmap(memslot);
srcu_read_unlock(&kvm->srcu, srcu_idx);
}
diff --git a/arch/powerpc/kvm/book3s_hv_p9_entry.c b/arch/powerpc/kvm/book3s_hv_p9_entry.c
index 961b3d70483c..a28e5b3daabd 100644
--- a/arch/powerpc/kvm/book3s_hv_p9_entry.c
+++ b/arch/powerpc/kvm/book3s_hv_p9_entry.c
@@ -4,8 +4,439 @@
#include <asm/asm-prototypes.h>
#include <asm/dbell.h>
#include <asm/kvm_ppc.h>
+#include <asm/pmc.h>
#include <asm/ppc-opcode.h>
+#include "book3s_hv.h"
+
+static void freeze_pmu(unsigned long mmcr0, unsigned long mmcra)
+{
+ if (!(mmcr0 & MMCR0_FC))
+ goto do_freeze;
+ if (mmcra & MMCRA_SAMPLE_ENABLE)
+ goto do_freeze;
+ if (cpu_has_feature(CPU_FTR_ARCH_31)) {
+ if (!(mmcr0 & MMCR0_PMCCEXT))
+ goto do_freeze;
+ if (!(mmcra & MMCRA_BHRB_DISABLE))
+ goto do_freeze;
+ }
+ return;
+
+do_freeze:
+ mmcr0 = MMCR0_FC;
+ mmcra = 0;
+ if (cpu_has_feature(CPU_FTR_ARCH_31)) {
+ mmcr0 |= MMCR0_PMCCEXT;
+ mmcra = MMCRA_BHRB_DISABLE;
+ }
+
+ mtspr(SPRN_MMCR0, mmcr0);
+ mtspr(SPRN_MMCRA, mmcra);
+ isync();
+}
+
+void switch_pmu_to_guest(struct kvm_vcpu *vcpu,
+ struct p9_host_os_sprs *host_os_sprs)
+{
+ struct lppaca *lp;
+ int load_pmu = 1;
+
+ lp = vcpu->arch.vpa.pinned_addr;
+ if (lp)
+ load_pmu = lp->pmcregs_in_use;
+
+ /* Save host */
+ if (ppc_get_pmu_inuse()) {
+ /*
+ * It might be better to put PMU handling (at least for the
+ * host) in the perf subsystem because it knows more about what
+ * is being used.
+ */
+
+ /* POWER9, POWER10 do not implement HPMC or SPMC */
+
+ host_os_sprs->mmcr0 = mfspr(SPRN_MMCR0);
+ host_os_sprs->mmcra = mfspr(SPRN_MMCRA);
+
+ freeze_pmu(host_os_sprs->mmcr0, host_os_sprs->mmcra);
+
+ host_os_sprs->pmc1 = mfspr(SPRN_PMC1);
+ host_os_sprs->pmc2 = mfspr(SPRN_PMC2);
+ host_os_sprs->pmc3 = mfspr(SPRN_PMC3);
+ host_os_sprs->pmc4 = mfspr(SPRN_PMC4);
+ host_os_sprs->pmc5 = mfspr(SPRN_PMC5);
+ host_os_sprs->pmc6 = mfspr(SPRN_PMC6);
+ host_os_sprs->mmcr1 = mfspr(SPRN_MMCR1);
+ host_os_sprs->mmcr2 = mfspr(SPRN_MMCR2);
+ host_os_sprs->sdar = mfspr(SPRN_SDAR);
+ host_os_sprs->siar = mfspr(SPRN_SIAR);
+ host_os_sprs->sier1 = mfspr(SPRN_SIER);
+
+ if (cpu_has_feature(CPU_FTR_ARCH_31)) {
+ host_os_sprs->mmcr3 = mfspr(SPRN_MMCR3);
+ host_os_sprs->sier2 = mfspr(SPRN_SIER2);
+ host_os_sprs->sier3 = mfspr(SPRN_SIER3);
+ }
+ }
+
+#ifdef CONFIG_PPC_PSERIES
+ /* After saving PMU, before loading guest PMU, flip pmcregs_in_use */
+ if (kvmhv_on_pseries()) {
+ barrier();
+ get_lppaca()->pmcregs_in_use = load_pmu;
+ barrier();
+ }
+#endif
+
+ /*
+ * Load guest. If the VPA said the PMCs are not in use but the guest
+ * tried to access them anyway, HFSCR[PM] will be set by the HFAC
+ * fault so we can make forward progress.
+ */
+ if (load_pmu || (vcpu->arch.hfscr & HFSCR_PM)) {
+ mtspr(SPRN_PMC1, vcpu->arch.pmc[0]);
+ mtspr(SPRN_PMC2, vcpu->arch.pmc[1]);
+ mtspr(SPRN_PMC3, vcpu->arch.pmc[2]);
+ mtspr(SPRN_PMC4, vcpu->arch.pmc[3]);
+ mtspr(SPRN_PMC5, vcpu->arch.pmc[4]);
+ mtspr(SPRN_PMC6, vcpu->arch.pmc[5]);
+ mtspr(SPRN_MMCR1, vcpu->arch.mmcr[1]);
+ mtspr(SPRN_MMCR2, vcpu->arch.mmcr[2]);
+ mtspr(SPRN_SDAR, vcpu->arch.sdar);
+ mtspr(SPRN_SIAR, vcpu->arch.siar);
+ mtspr(SPRN_SIER, vcpu->arch.sier[0]);
+
+ if (cpu_has_feature(CPU_FTR_ARCH_31)) {
+ mtspr(SPRN_MMCR3, vcpu->arch.mmcr[3]);
+ mtspr(SPRN_SIER2, vcpu->arch.sier[1]);
+ mtspr(SPRN_SIER3, vcpu->arch.sier[2]);
+ }
+
+ /* Set MMCRA then MMCR0 last */
+ mtspr(SPRN_MMCRA, vcpu->arch.mmcra);
+ mtspr(SPRN_MMCR0, vcpu->arch.mmcr[0]);
+ /* No isync necessary because we're starting counters */
+
+ if (!vcpu->arch.nested &&
+ (vcpu->arch.hfscr_permitted & HFSCR_PM))
+ vcpu->arch.hfscr |= HFSCR_PM;
+ }
+}
+EXPORT_SYMBOL_GPL(switch_pmu_to_guest);
+
+void switch_pmu_to_host(struct kvm_vcpu *vcpu,
+ struct p9_host_os_sprs *host_os_sprs)
+{
+ struct lppaca *lp;
+ int save_pmu = 1;
+
+ lp = vcpu->arch.vpa.pinned_addr;
+ if (lp)
+ save_pmu = lp->pmcregs_in_use;
+ if (IS_ENABLED(CONFIG_KVM_BOOK3S_HV_NESTED_PMU_WORKAROUND)) {
+ /*
+ * Save pmu if this guest is capable of running nested guests.
+ * This is option is for old L1s that do not set their
+ * lppaca->pmcregs_in_use properly when entering their L2.
+ */
+ save_pmu |= nesting_enabled(vcpu->kvm);
+ }
+
+ if (save_pmu) {
+ vcpu->arch.mmcr[0] = mfspr(SPRN_MMCR0);
+ vcpu->arch.mmcra = mfspr(SPRN_MMCRA);
+
+ freeze_pmu(vcpu->arch.mmcr[0], vcpu->arch.mmcra);
+
+ vcpu->arch.pmc[0] = mfspr(SPRN_PMC1);
+ vcpu->arch.pmc[1] = mfspr(SPRN_PMC2);
+ vcpu->arch.pmc[2] = mfspr(SPRN_PMC3);
+ vcpu->arch.pmc[3] = mfspr(SPRN_PMC4);
+ vcpu->arch.pmc[4] = mfspr(SPRN_PMC5);
+ vcpu->arch.pmc[5] = mfspr(SPRN_PMC6);
+ vcpu->arch.mmcr[1] = mfspr(SPRN_MMCR1);
+ vcpu->arch.mmcr[2] = mfspr(SPRN_MMCR2);
+ vcpu->arch.sdar = mfspr(SPRN_SDAR);
+ vcpu->arch.siar = mfspr(SPRN_SIAR);
+ vcpu->arch.sier[0] = mfspr(SPRN_SIER);
+
+ if (cpu_has_feature(CPU_FTR_ARCH_31)) {
+ vcpu->arch.mmcr[3] = mfspr(SPRN_MMCR3);
+ vcpu->arch.sier[1] = mfspr(SPRN_SIER2);
+ vcpu->arch.sier[2] = mfspr(SPRN_SIER3);
+ }
+
+ } else if (vcpu->arch.hfscr & HFSCR_PM) {
+ /*
+ * The guest accessed PMC SPRs without specifying they should
+ * be preserved, or it cleared pmcregs_in_use after the last
+ * access. Just ensure they are frozen.
+ */
+ freeze_pmu(mfspr(SPRN_MMCR0), mfspr(SPRN_MMCRA));
+
+ /*
+ * Demand-fault PMU register access in the guest.
+ *
+ * This is used to grab the guest's VPA pmcregs_in_use value
+ * and reflect it into the host's VPA in the case of a nested
+ * hypervisor.
+ *
+ * It also avoids having to zero-out SPRs after each guest
+ * exit to avoid side-channels when.
+ *
+ * This is cleared here when we exit the guest, so later HFSCR
+ * interrupt handling can add it back to run the guest with
+ * PM enabled next time.
+ */
+ if (!vcpu->arch.nested)
+ vcpu->arch.hfscr &= ~HFSCR_PM;
+ } /* otherwise the PMU should still be frozen */
+
+#ifdef CONFIG_PPC_PSERIES
+ if (kvmhv_on_pseries()) {
+ barrier();
+ get_lppaca()->pmcregs_in_use = ppc_get_pmu_inuse();
+ barrier();
+ }
+#endif
+
+ if (ppc_get_pmu_inuse()) {
+ mtspr(SPRN_PMC1, host_os_sprs->pmc1);
+ mtspr(SPRN_PMC2, host_os_sprs->pmc2);
+ mtspr(SPRN_PMC3, host_os_sprs->pmc3);
+ mtspr(SPRN_PMC4, host_os_sprs->pmc4);
+ mtspr(SPRN_PMC5, host_os_sprs->pmc5);
+ mtspr(SPRN_PMC6, host_os_sprs->pmc6);
+ mtspr(SPRN_MMCR1, host_os_sprs->mmcr1);
+ mtspr(SPRN_MMCR2, host_os_sprs->mmcr2);
+ mtspr(SPRN_SDAR, host_os_sprs->sdar);
+ mtspr(SPRN_SIAR, host_os_sprs->siar);
+ mtspr(SPRN_SIER, host_os_sprs->sier1);
+
+ if (cpu_has_feature(CPU_FTR_ARCH_31)) {
+ mtspr(SPRN_MMCR3, host_os_sprs->mmcr3);
+ mtspr(SPRN_SIER2, host_os_sprs->sier2);
+ mtspr(SPRN_SIER3, host_os_sprs->sier3);
+ }
+
+ /* Set MMCRA then MMCR0 last */
+ mtspr(SPRN_MMCRA, host_os_sprs->mmcra);
+ mtspr(SPRN_MMCR0, host_os_sprs->mmcr0);
+ isync();
+ }
+}
+EXPORT_SYMBOL_GPL(switch_pmu_to_host);
+
+static void load_spr_state(struct kvm_vcpu *vcpu,
+ struct p9_host_os_sprs *host_os_sprs)
+{
+ /* TAR is very fast */
+ mtspr(SPRN_TAR, vcpu->arch.tar);
+
+#ifdef CONFIG_ALTIVEC
+ if (cpu_has_feature(CPU_FTR_ALTIVEC) &&
+ current->thread.vrsave != vcpu->arch.vrsave)
+ mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
+#endif
+
+ if (vcpu->arch.hfscr & HFSCR_EBB) {
+ if (current->thread.ebbhr != vcpu->arch.ebbhr)
+ mtspr(SPRN_EBBHR, vcpu->arch.ebbhr);
+ if (current->thread.ebbrr != vcpu->arch.ebbrr)
+ mtspr(SPRN_EBBRR, vcpu->arch.ebbrr);
+ if (current->thread.bescr != vcpu->arch.bescr)
+ mtspr(SPRN_BESCR, vcpu->arch.bescr);
+ }
+
+ if (cpu_has_feature(CPU_FTR_P9_TIDR) &&
+ current->thread.tidr != vcpu->arch.tid)
+ mtspr(SPRN_TIDR, vcpu->arch.tid);
+ if (host_os_sprs->iamr != vcpu->arch.iamr)
+ mtspr(SPRN_IAMR, vcpu->arch.iamr);
+ if (host_os_sprs->amr != vcpu->arch.amr)
+ mtspr(SPRN_AMR, vcpu->arch.amr);
+ if (vcpu->arch.uamor != 0)
+ mtspr(SPRN_UAMOR, vcpu->arch.uamor);
+ if (current->thread.fscr != vcpu->arch.fscr)
+ mtspr(SPRN_FSCR, vcpu->arch.fscr);
+ if (current->thread.dscr != vcpu->arch.dscr)
+ mtspr(SPRN_DSCR, vcpu->arch.dscr);
+ if (vcpu->arch.pspb != 0)
+ mtspr(SPRN_PSPB, vcpu->arch.pspb);
+
+ /*
+ * DAR, DSISR, and for nested HV, SPRGs must be set with MSR[RI]
+ * clear (or hstate set appropriately to catch those registers
+ * being clobbered if we take a MCE or SRESET), so those are done
+ * later.
+ */
+
+ if (!(vcpu->arch.ctrl & 1))
+ mtspr(SPRN_CTRLT, 0);
+}
+
+static void store_spr_state(struct kvm_vcpu *vcpu)
+{
+ vcpu->arch.tar = mfspr(SPRN_TAR);
+
+#ifdef CONFIG_ALTIVEC
+ if (cpu_has_feature(CPU_FTR_ALTIVEC))
+ vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
+#endif
+
+ if (vcpu->arch.hfscr & HFSCR_EBB) {
+ vcpu->arch.ebbhr = mfspr(SPRN_EBBHR);
+ vcpu->arch.ebbrr = mfspr(SPRN_EBBRR);
+ vcpu->arch.bescr = mfspr(SPRN_BESCR);
+ }
+
+ if (cpu_has_feature(CPU_FTR_P9_TIDR))
+ vcpu->arch.tid = mfspr(SPRN_TIDR);
+ vcpu->arch.iamr = mfspr(SPRN_IAMR);
+ vcpu->arch.amr = mfspr(SPRN_AMR);
+ vcpu->arch.uamor = mfspr(SPRN_UAMOR);
+ vcpu->arch.fscr = mfspr(SPRN_FSCR);
+ vcpu->arch.dscr = mfspr(SPRN_DSCR);
+ vcpu->arch.pspb = mfspr(SPRN_PSPB);
+
+ vcpu->arch.ctrl = mfspr(SPRN_CTRLF);
+}
+
+/* Returns true if current MSR and/or guest MSR may have changed */
+bool load_vcpu_state(struct kvm_vcpu *vcpu,
+ struct p9_host_os_sprs *host_os_sprs)
+{
+ bool ret = false;
+
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ if (cpu_has_feature(CPU_FTR_TM) ||
+ cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) {
+ unsigned long guest_msr = vcpu->arch.shregs.msr;
+ if (MSR_TM_ACTIVE(guest_msr)) {
+ kvmppc_restore_tm_hv(vcpu, guest_msr, true);
+ ret = true;
+ } else if (vcpu->arch.hfscr & HFSCR_TM) {
+ mtspr(SPRN_TEXASR, vcpu->arch.texasr);
+ mtspr(SPRN_TFHAR, vcpu->arch.tfhar);
+ mtspr(SPRN_TFIAR, vcpu->arch.tfiar);
+ }
+ }
+#endif
+
+ load_spr_state(vcpu, host_os_sprs);
+
+ load_fp_state(&vcpu->arch.fp);
+#ifdef CONFIG_ALTIVEC
+ load_vr_state(&vcpu->arch.vr);
+#endif
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(load_vcpu_state);
+
+void store_vcpu_state(struct kvm_vcpu *vcpu)
+{
+ store_spr_state(vcpu);
+
+ store_fp_state(&vcpu->arch.fp);
+#ifdef CONFIG_ALTIVEC
+ store_vr_state(&vcpu->arch.vr);
+#endif
+
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ if (cpu_has_feature(CPU_FTR_TM) ||
+ cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) {
+ unsigned long guest_msr = vcpu->arch.shregs.msr;
+ if (MSR_TM_ACTIVE(guest_msr)) {
+ kvmppc_save_tm_hv(vcpu, guest_msr, true);
+ } else if (vcpu->arch.hfscr & HFSCR_TM) {
+ vcpu->arch.texasr = mfspr(SPRN_TEXASR);
+ vcpu->arch.tfhar = mfspr(SPRN_TFHAR);
+ vcpu->arch.tfiar = mfspr(SPRN_TFIAR);
+
+ if (!vcpu->arch.nested) {
+ vcpu->arch.load_tm++; /* see load_ebb comment */
+ if (!vcpu->arch.load_tm)
+ vcpu->arch.hfscr &= ~HFSCR_TM;
+ }
+ }
+ }
+#endif
+}
+EXPORT_SYMBOL_GPL(store_vcpu_state);
+
+void save_p9_host_os_sprs(struct p9_host_os_sprs *host_os_sprs)
+{
+ host_os_sprs->iamr = mfspr(SPRN_IAMR);
+ host_os_sprs->amr = mfspr(SPRN_AMR);
+}
+EXPORT_SYMBOL_GPL(save_p9_host_os_sprs);
+
+/* vcpu guest regs must already be saved */
+void restore_p9_host_os_sprs(struct kvm_vcpu *vcpu,
+ struct p9_host_os_sprs *host_os_sprs)
+{
+ /*
+ * current->thread.xxx registers must all be restored to host
+ * values before a potential context switch, othrewise the context
+ * switch itself will overwrite current->thread.xxx with the values
+ * from the guest SPRs.
+ */
+
+ mtspr(SPRN_SPRG_VDSO_WRITE, local_paca->sprg_vdso);
+
+ if (cpu_has_feature(CPU_FTR_P9_TIDR) &&
+ current->thread.tidr != vcpu->arch.tid)
+ mtspr(SPRN_TIDR, current->thread.tidr);
+ if (host_os_sprs->iamr != vcpu->arch.iamr)
+ mtspr(SPRN_IAMR, host_os_sprs->iamr);
+ if (vcpu->arch.uamor != 0)
+ mtspr(SPRN_UAMOR, 0);
+ if (host_os_sprs->amr != vcpu->arch.amr)
+ mtspr(SPRN_AMR, host_os_sprs->amr);
+ if (current->thread.fscr != vcpu->arch.fscr)
+ mtspr(SPRN_FSCR, current->thread.fscr);
+ if (current->thread.dscr != vcpu->arch.dscr)
+ mtspr(SPRN_DSCR, current->thread.dscr);
+ if (vcpu->arch.pspb != 0)
+ mtspr(SPRN_PSPB, 0);
+
+ /* Save guest CTRL register, set runlatch to 1 */
+ if (!(vcpu->arch.ctrl & 1))
+ mtspr(SPRN_CTRLT, 1);
+
+#ifdef CONFIG_ALTIVEC
+ if (cpu_has_feature(CPU_FTR_ALTIVEC) &&
+ vcpu->arch.vrsave != current->thread.vrsave)
+ mtspr(SPRN_VRSAVE, current->thread.vrsave);
+#endif
+ if (vcpu->arch.hfscr & HFSCR_EBB) {
+ if (vcpu->arch.bescr != current->thread.bescr)
+ mtspr(SPRN_BESCR, current->thread.bescr);
+ if (vcpu->arch.ebbhr != current->thread.ebbhr)
+ mtspr(SPRN_EBBHR, current->thread.ebbhr);
+ if (vcpu->arch.ebbrr != current->thread.ebbrr)
+ mtspr(SPRN_EBBRR, current->thread.ebbrr);
+
+ if (!vcpu->arch.nested) {
+ /*
+ * This is like load_fp in context switching, turn off
+ * the facility after it wraps the u8 to try avoiding
+ * saving and restoring the registers each partition
+ * switch.
+ */
+ vcpu->arch.load_ebb++;
+ if (!vcpu->arch.load_ebb)
+ vcpu->arch.hfscr &= ~HFSCR_EBB;
+ }
+ }
+
+ if (vcpu->arch.tar != current->thread.tar)
+ mtspr(SPRN_TAR, current->thread.tar);
+}
+EXPORT_SYMBOL_GPL(restore_p9_host_os_sprs);
+
#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
static void __start_timing(struct kvm_vcpu *vcpu, struct kvmhv_tb_accumulator *next)
{
@@ -56,10 +487,22 @@ static void __accumulate_time(struct kvm_vcpu *vcpu, struct kvmhv_tb_accumulator
#define accumulate_time(vcpu, next) do {} while (0)
#endif
-static inline void mfslb(unsigned int idx, u64 *slbee, u64 *slbev)
+static inline u64 mfslbv(unsigned int idx)
{
- asm volatile("slbmfev %0,%1" : "=r" (*slbev) : "r" (idx));
- asm volatile("slbmfee %0,%1" : "=r" (*slbee) : "r" (idx));
+ u64 slbev;
+
+ asm volatile("slbmfev %0,%1" : "=r" (slbev) : "r" (idx));
+
+ return slbev;
+}
+
+static inline u64 mfslbe(unsigned int idx)
+{
+ u64 slbee;
+
+ asm volatile("slbmfee %0,%1" : "=r" (slbee) : "r" (idx));
+
+ return slbee;
}
static inline void mtslb(u64 slbee, u64 slbev)
@@ -100,17 +543,19 @@ static void switch_mmu_to_guest_radix(struct kvm *kvm, struct kvm_vcpu *vcpu, u6
lpid = nested ? nested->shadow_lpid : kvm->arch.lpid;
/*
- * All the isync()s are overkill but trivially follow the ISA
- * requirements. Some can likely be replaced with justification
- * comment for why they are not needed.
+ * Prior memory accesses to host PID Q3 must be completed before we
+ * start switching, and stores must be drained to avoid not-my-LPAR
+ * logic (see switch_mmu_to_host).
*/
+ asm volatile("hwsync" ::: "memory");
isync();
mtspr(SPRN_LPID, lpid);
- isync();
mtspr(SPRN_LPCR, lpcr);
- isync();
mtspr(SPRN_PID, vcpu->arch.pid);
- isync();
+ /*
+ * isync not required here because we are HRFID'ing to guest before
+ * any guest context access, which is context synchronising.
+ */
}
static void switch_mmu_to_guest_hpt(struct kvm *kvm, struct kvm_vcpu *vcpu, u64 lpcr)
@@ -120,25 +565,41 @@ static void switch_mmu_to_guest_hpt(struct kvm *kvm, struct kvm_vcpu *vcpu, u64
lpid = kvm->arch.lpid;
+ /*
+ * See switch_mmu_to_guest_radix. ptesync should not be required here
+ * even if the host is in HPT mode because speculative accesses would
+ * not cause RC updates (we are in real mode).
+ */
+ asm volatile("hwsync" ::: "memory");
+ isync();
mtspr(SPRN_LPID, lpid);
mtspr(SPRN_LPCR, lpcr);
mtspr(SPRN_PID, vcpu->arch.pid);
for (i = 0; i < vcpu->arch.slb_max; i++)
mtslb(vcpu->arch.slb[i].orige, vcpu->arch.slb[i].origv);
-
- isync();
+ /*
+ * isync not required here, see switch_mmu_to_guest_radix.
+ */
}
static void switch_mmu_to_host(struct kvm *kvm, u32 pid)
{
+ /*
+ * The guest has exited, so guest MMU context is no longer being
+ * non-speculatively accessed, but a hwsync is needed before the
+ * mtLPIDR / mtPIDR switch, in order to ensure all stores are drained,
+ * so the not-my-LPAR tlbie logic does not overlook them.
+ */
+ asm volatile("hwsync" ::: "memory");
isync();
mtspr(SPRN_PID, pid);
- isync();
mtspr(SPRN_LPID, kvm->arch.host_lpid);
- isync();
mtspr(SPRN_LPCR, kvm->arch.host_lpcr);
- isync();
+ /*
+ * isync is not required after the switch, because mtmsrd with L=0
+ * is performed after this switch, which is context synchronising.
+ */
if (!radix_enabled())
slb_restore_bolted_realmode();
@@ -171,8 +632,10 @@ static void save_clear_guest_mmu(struct kvm *kvm, struct kvm_vcpu *vcpu)
*/
for (i = 0; i < vcpu->arch.slb_nr; i++) {
u64 slbee, slbev;
- mfslb(i, &slbee, &slbev);
+
+ slbee = mfslbe(i);
if (slbee & SLB_ESID_V) {
+ slbev = mfslbv(i);
vcpu->arch.slb[nr].orige = slbee | i;
vcpu->arch.slb[nr].origv = slbev;
nr++;
@@ -183,15 +646,128 @@ static void save_clear_guest_mmu(struct kvm *kvm, struct kvm_vcpu *vcpu)
}
}
-int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpcr)
+static void flush_guest_tlb(struct kvm *kvm)
{
+ unsigned long rb, set;
+
+ rb = PPC_BIT(52); /* IS = 2 */
+ if (kvm_is_radix(kvm)) {
+ /* R=1 PRS=1 RIC=2 */
+ asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
+ : : "r" (rb), "i" (1), "i" (1), "i" (2),
+ "r" (0) : "memory");
+ for (set = 1; set < kvm->arch.tlb_sets; ++set) {
+ rb += PPC_BIT(51); /* increment set number */
+ /* R=1 PRS=1 RIC=0 */
+ asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
+ : : "r" (rb), "i" (1), "i" (1), "i" (0),
+ "r" (0) : "memory");
+ }
+ asm volatile("ptesync": : :"memory");
+ // POWER9 congruence-class TLBIEL leaves ERAT. Flush it now.
+ asm volatile(PPC_RADIX_INVALIDATE_ERAT_GUEST : : :"memory");
+ } else {
+ for (set = 0; set < kvm->arch.tlb_sets; ++set) {
+ /* R=0 PRS=0 RIC=0 */
+ asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
+ : : "r" (rb), "i" (0), "i" (0), "i" (0),
+ "r" (0) : "memory");
+ rb += PPC_BIT(51); /* increment set number */
+ }
+ asm volatile("ptesync": : :"memory");
+ // POWER9 congruence-class TLBIEL leaves ERAT. Flush it now.
+ asm volatile(PPC_ISA_3_0_INVALIDATE_ERAT : : :"memory");
+ }
+}
+
+static void check_need_tlb_flush(struct kvm *kvm, int pcpu,
+ struct kvm_nested_guest *nested)
+{
+ cpumask_t *need_tlb_flush;
+ bool all_set = true;
+ int i;
+
+ if (nested)
+ need_tlb_flush = &nested->need_tlb_flush;
+ else
+ need_tlb_flush = &kvm->arch.need_tlb_flush;
+
+ if (likely(!cpumask_test_cpu(pcpu, need_tlb_flush)))
+ return;
+
+ /*
+ * Individual threads can come in here, but the TLB is shared between
+ * the 4 threads in a core, hence invalidating on one thread
+ * invalidates for all, so only invalidate the first time (if all bits
+ * were set. The others must still execute a ptesync.
+ *
+ * If a race occurs and two threads do the TLB flush, that is not a
+ * problem, just sub-optimal.
+ */
+ for (i = cpu_first_tlb_thread_sibling(pcpu);
+ i <= cpu_last_tlb_thread_sibling(pcpu);
+ i += cpu_tlb_thread_sibling_step()) {
+ if (!cpumask_test_cpu(i, need_tlb_flush)) {
+ all_set = false;
+ break;
+ }
+ }
+ if (all_set)
+ flush_guest_tlb(kvm);
+ else
+ asm volatile("ptesync" ::: "memory");
+
+ /* Clear the bit after the TLB flush */
+ cpumask_clear_cpu(pcpu, need_tlb_flush);
+}
+
+unsigned long kvmppc_msr_hard_disable_set_facilities(struct kvm_vcpu *vcpu, unsigned long msr)
+{
+ unsigned long msr_needed = 0;
+
+ msr &= ~MSR_EE;
+
+ /* MSR bits may have been cleared by context switch so must recheck */
+ if (IS_ENABLED(CONFIG_PPC_FPU))
+ msr_needed |= MSR_FP;
+ if (cpu_has_feature(CPU_FTR_ALTIVEC))
+ msr_needed |= MSR_VEC;
+ if (cpu_has_feature(CPU_FTR_VSX))
+ msr_needed |= MSR_VSX;
+ if ((cpu_has_feature(CPU_FTR_TM) ||
+ cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) &&
+ (vcpu->arch.hfscr & HFSCR_TM))
+ msr_needed |= MSR_TM;
+
+ /*
+ * This could be combined with MSR[RI] clearing, but that expands
+ * the unrecoverable window. It would be better to cover unrecoverable
+ * with KVM bad interrupt handling rather than use MSR[RI] at all.
+ *
+ * Much more difficult and less worthwhile to combine with IR/DR
+ * disable.
+ */
+ if ((msr & msr_needed) != msr_needed) {
+ msr |= msr_needed;
+ __mtmsrd(msr, 0);
+ } else {
+ __hard_irq_disable();
+ }
+ local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
+
+ return msr;
+}
+EXPORT_SYMBOL_GPL(kvmppc_msr_hard_disable_set_facilities);
+
+int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpcr, u64 *tb)
+{
+ struct p9_host_os_sprs host_os_sprs;
struct kvm *kvm = vcpu->kvm;
struct kvm_nested_guest *nested = vcpu->arch.nested;
struct kvmppc_vcore *vc = vcpu->arch.vcore;
- s64 hdec;
- u64 tb, purr, spurr;
+ s64 hdec, dec;
+ u64 purr, spurr;
u64 *exsave;
- bool ri_set;
int trap;
unsigned long msr;
unsigned long host_hfscr;
@@ -199,11 +775,13 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc
unsigned long host_dawr0;
unsigned long host_dawrx0;
unsigned long host_psscr;
+ unsigned long host_hpsscr;
unsigned long host_pidr;
unsigned long host_dawr1;
unsigned long host_dawrx1;
+ unsigned long dpdes;
- hdec = time_limit - mftb();
+ hdec = time_limit - *tb;
if (hdec < 0)
return BOOK3S_INTERRUPT_HV_DECREMENTER;
@@ -214,51 +792,84 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc
vcpu->arch.ceded = 0;
- if (vc->tb_offset) {
- u64 new_tb = mftb() + vc->tb_offset;
- mtspr(SPRN_TBU40, new_tb);
- tb = mftb();
- if ((tb & 0xffffff) < (new_tb & 0xffffff))
- mtspr(SPRN_TBU40, new_tb + 0x1000000);
- vc->tb_offset_applied = vc->tb_offset;
- }
-
- msr = mfmsr();
+ /* Save MSR for restore, with EE clear. */
+ msr = mfmsr() & ~MSR_EE;
host_hfscr = mfspr(SPRN_HFSCR);
host_ciabr = mfspr(SPRN_CIABR);
- host_dawr0 = mfspr(SPRN_DAWR0);
- host_dawrx0 = mfspr(SPRN_DAWRX0);
- host_psscr = mfspr(SPRN_PSSCR);
+ host_psscr = mfspr(SPRN_PSSCR_PR);
+ if (cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST))
+ host_hpsscr = mfspr(SPRN_PSSCR);
host_pidr = mfspr(SPRN_PID);
- if (cpu_has_feature(CPU_FTR_DAWR1)) {
- host_dawr1 = mfspr(SPRN_DAWR1);
- host_dawrx1 = mfspr(SPRN_DAWRX1);
- }
- if (vc->pcr)
- mtspr(SPRN_PCR, vc->pcr | PCR_MASK);
- mtspr(SPRN_DPDES, vc->dpdes);
- mtspr(SPRN_VTB, vc->vtb);
+ if (dawr_enabled()) {
+ host_dawr0 = mfspr(SPRN_DAWR0);
+ host_dawrx0 = mfspr(SPRN_DAWRX0);
+ if (cpu_has_feature(CPU_FTR_DAWR1)) {
+ host_dawr1 = mfspr(SPRN_DAWR1);
+ host_dawrx1 = mfspr(SPRN_DAWRX1);
+ }
+ }
local_paca->kvm_hstate.host_purr = mfspr(SPRN_PURR);
local_paca->kvm_hstate.host_spurr = mfspr(SPRN_SPURR);
+
+ save_p9_host_os_sprs(&host_os_sprs);
+
+ msr = kvmppc_msr_hard_disable_set_facilities(vcpu, msr);
+ if (lazy_irq_pending()) {
+ trap = 0;
+ goto out;
+ }
+
+ if (unlikely(load_vcpu_state(vcpu, &host_os_sprs)))
+ msr = mfmsr(); /* MSR may have been updated */
+
+ if (vc->tb_offset) {
+ u64 new_tb = *tb + vc->tb_offset;
+ mtspr(SPRN_TBU40, new_tb);
+ if ((mftb() & 0xffffff) < (new_tb & 0xffffff)) {
+ new_tb += 0x1000000;
+ mtspr(SPRN_TBU40, new_tb);
+ }
+ *tb = new_tb;
+ vc->tb_offset_applied = vc->tb_offset;
+ }
+
+ mtspr(SPRN_VTB, vc->vtb);
mtspr(SPRN_PURR, vcpu->arch.purr);
mtspr(SPRN_SPURR, vcpu->arch.spurr);
+ if (vc->pcr)
+ mtspr(SPRN_PCR, vc->pcr | PCR_MASK);
+ if (vcpu->arch.doorbell_request) {
+ vcpu->arch.doorbell_request = 0;
+ mtspr(SPRN_DPDES, 1);
+ }
+
if (dawr_enabled()) {
- mtspr(SPRN_DAWR0, vcpu->arch.dawr0);
- mtspr(SPRN_DAWRX0, vcpu->arch.dawrx0);
+ if (vcpu->arch.dawr0 != host_dawr0)
+ mtspr(SPRN_DAWR0, vcpu->arch.dawr0);
+ if (vcpu->arch.dawrx0 != host_dawrx0)
+ mtspr(SPRN_DAWRX0, vcpu->arch.dawrx0);
if (cpu_has_feature(CPU_FTR_DAWR1)) {
- mtspr(SPRN_DAWR1, vcpu->arch.dawr1);
- mtspr(SPRN_DAWRX1, vcpu->arch.dawrx1);
+ if (vcpu->arch.dawr1 != host_dawr1)
+ mtspr(SPRN_DAWR1, vcpu->arch.dawr1);
+ if (vcpu->arch.dawrx1 != host_dawrx1)
+ mtspr(SPRN_DAWRX1, vcpu->arch.dawrx1);
}
}
- mtspr(SPRN_CIABR, vcpu->arch.ciabr);
- mtspr(SPRN_IC, vcpu->arch.ic);
+ if (vcpu->arch.ciabr != host_ciabr)
+ mtspr(SPRN_CIABR, vcpu->arch.ciabr);
- mtspr(SPRN_PSSCR, vcpu->arch.psscr | PSSCR_EC |
- (local_paca->kvm_hstate.fake_suspend << PSSCR_FAKE_SUSPEND_LG));
+
+ if (cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) {
+ mtspr(SPRN_PSSCR, vcpu->arch.psscr | PSSCR_EC |
+ (local_paca->kvm_hstate.fake_suspend << PSSCR_FAKE_SUSPEND_LG));
+ } else {
+ if (vcpu->arch.psscr != host_psscr)
+ mtspr(SPRN_PSSCR_PR, vcpu->arch.psscr);
+ }
mtspr(SPRN_HFSCR, vcpu->arch.hfscr);
@@ -276,18 +887,34 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc
* HDSI which should correctly update the HDSISR the second time HDSI
* entry.
*
- * Just do this on all p9 processors for now.
+ * The "radix prefetch bug" test can be used to test for this bug, as
+ * it also exists fo DD2.1 and below.
*/
- mtspr(SPRN_HDSISR, HDSISR_CANARY);
+ if (cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG))
+ mtspr(SPRN_HDSISR, HDSISR_CANARY);
mtspr(SPRN_SPRG0, vcpu->arch.shregs.sprg0);
mtspr(SPRN_SPRG1, vcpu->arch.shregs.sprg1);
mtspr(SPRN_SPRG2, vcpu->arch.shregs.sprg2);
mtspr(SPRN_SPRG3, vcpu->arch.shregs.sprg3);
- mtspr(SPRN_AMOR, ~0UL);
+ /*
+ * It might be preferable to load_vcpu_state here, in order to get the
+ * GPR/FP register loads executing in parallel with the previous mtSPR
+ * instructions, but for now that can't be done because the TM handling
+ * in load_vcpu_state can change some SPRs and vcpu state (nip, msr).
+ * But TM could be split out if this would be a significant benefit.
+ */
- local_paca->kvm_hstate.in_guest = KVM_GUEST_MODE_HV_P9;
+ /*
+ * MSR[RI] does not need to be cleared (and is not, for radix guests
+ * with no prefetch bug), because in_guest is set. If we take a SRESET
+ * or MCE with in_guest set but still in HV mode, then
+ * kvmppc_p9_bad_interrupt handles the interrupt, which effectively
+ * clears MSR[RI] and doesn't return.
+ */
+ WRITE_ONCE(local_paca->kvm_hstate.in_guest, KVM_GUEST_MODE_HV_P9);
+ barrier(); /* Open in_guest critical section */
/*
* Hash host, hash guest, or radix guest with prefetch bug, all have
@@ -299,17 +926,13 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc
save_clear_host_mmu(kvm);
- if (kvm_is_radix(kvm)) {
+ if (kvm_is_radix(kvm))
switch_mmu_to_guest_radix(kvm, vcpu, lpcr);
- if (!cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG))
- __mtmsrd(0, 1); /* clear RI */
-
- } else {
+ else
switch_mmu_to_guest_hpt(kvm, vcpu, lpcr);
- }
/* TLBIEL uses LPID=LPIDR, so run this after setting guest LPID */
- kvmppc_check_need_tlb_flush(kvm, vc->pcpu, nested);
+ check_need_tlb_flush(kvm, vc->pcpu, nested);
/*
* P9 suppresses the HDEC exception when LPCR[HDICE] = 0,
@@ -317,6 +940,8 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc
*/
mtspr(SPRN_HDEC, hdec);
+ mtspr(SPRN_DEC, vcpu->arch.dec_expires - *tb);
+
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
tm_return_to_guest:
#endif
@@ -327,7 +952,9 @@ tm_return_to_guest:
accumulate_time(vcpu, &vcpu->arch.guest_time);
+ switch_pmu_to_guest(vcpu, &host_os_sprs);
kvmppc_p9_enter_guest(vcpu);
+ switch_pmu_to_host(vcpu, &host_os_sprs);
accumulate_time(vcpu, &vcpu->arch.rm_intr);
@@ -340,36 +967,27 @@ tm_return_to_guest:
/* 0x2 bit for HSRR is only used by PR and P7/8 HV paths, clear it */
trap = local_paca->kvm_hstate.scratch0 & ~0x2;
- /* HSRR interrupts leave MSR[RI] unchanged, SRR interrupts clear it. */
- ri_set = false;
- if (likely(trap > BOOK3S_INTERRUPT_MACHINE_CHECK)) {
- if (trap != BOOK3S_INTERRUPT_SYSCALL &&
- (vcpu->arch.shregs.msr & MSR_RI))
- ri_set = true;
+ if (likely(trap > BOOK3S_INTERRUPT_MACHINE_CHECK))
exsave = local_paca->exgen;
- } else if (trap == BOOK3S_INTERRUPT_SYSTEM_RESET) {
+ else if (trap == BOOK3S_INTERRUPT_SYSTEM_RESET)
exsave = local_paca->exnmi;
- } else { /* trap == 0x200 */
+ else /* trap == 0x200 */
exsave = local_paca->exmc;
- }
vcpu->arch.regs.gpr[1] = local_paca->kvm_hstate.scratch1;
vcpu->arch.regs.gpr[3] = local_paca->kvm_hstate.scratch2;
/*
- * Only set RI after reading machine check regs (DAR, DSISR, SRR0/1)
- * and hstate scratch (which we need to move into exsave to make
- * re-entrant vs SRESET/MCE)
+ * After reading machine check regs (DAR, DSISR, SRR0/1) and hstate
+ * scratch (which we need to move into exsave to make re-entrant vs
+ * SRESET/MCE), register state is protected from reentrancy. However
+ * timebase, MMU, among other state is still set to guest, so don't
+ * enable MSR[RI] here. It gets enabled at the end, after in_guest
+ * is cleared.
+ *
+ * It is possible an NMI could come in here, which is why it is
+ * important to save the above state early so it can be debugged.
*/
- if (ri_set) {
- if (unlikely(!(mfmsr() & MSR_RI))) {
- __mtmsrd(MSR_RI, 1);
- WARN_ON_ONCE(1);
- }
- } else {
- WARN_ON_ONCE(mfmsr() & MSR_RI);
- __mtmsrd(MSR_RI, 1);
- }
vcpu->arch.regs.gpr[9] = exsave[EX_R9/sizeof(u64)];
vcpu->arch.regs.gpr[10] = exsave[EX_R10/sizeof(u64)];
@@ -388,7 +1006,7 @@ tm_return_to_guest:
kvmppc_realmode_machine_check(vcpu);
} else if (unlikely(trap == BOOK3S_INTERRUPT_HMI)) {
- kvmppc_realmode_hmi_handler();
+ kvmppc_p9_realmode_hmi_handler(vcpu);
} else if (trap == BOOK3S_INTERRUPT_H_EMUL_ASSIST) {
vcpu->arch.emul_inst = mfspr(SPRN_HEIR);
@@ -427,13 +1045,6 @@ tm_return_to_guest:
*/
mtspr(SPRN_HSRR0, vcpu->arch.regs.nip);
mtspr(SPRN_HSRR1, vcpu->arch.shregs.msr);
-
- /*
- * tm_return_to_guest re-loads SRR0/1, DAR,
- * DSISR after RI is cleared, in case they had
- * been clobbered by a MCE.
- */
- __mtmsrd(0, 1); /* clear RI */
goto tm_return_to_guest;
}
}
@@ -445,81 +1056,109 @@ tm_return_to_guest:
/* Advance host PURR/SPURR by the amount used by guest */
purr = mfspr(SPRN_PURR);
spurr = mfspr(SPRN_SPURR);
- mtspr(SPRN_PURR, local_paca->kvm_hstate.host_purr +
- purr - vcpu->arch.purr);
- mtspr(SPRN_SPURR, local_paca->kvm_hstate.host_spurr +
- spurr - vcpu->arch.spurr);
+ local_paca->kvm_hstate.host_purr += purr - vcpu->arch.purr;
+ local_paca->kvm_hstate.host_spurr += spurr - vcpu->arch.spurr;
vcpu->arch.purr = purr;
vcpu->arch.spurr = spurr;
vcpu->arch.ic = mfspr(SPRN_IC);
vcpu->arch.pid = mfspr(SPRN_PID);
- vcpu->arch.psscr = mfspr(SPRN_PSSCR) & PSSCR_GUEST_VIS;
+ vcpu->arch.psscr = mfspr(SPRN_PSSCR_PR);
vcpu->arch.shregs.sprg0 = mfspr(SPRN_SPRG0);
vcpu->arch.shregs.sprg1 = mfspr(SPRN_SPRG1);
vcpu->arch.shregs.sprg2 = mfspr(SPRN_SPRG2);
vcpu->arch.shregs.sprg3 = mfspr(SPRN_SPRG3);
- /* Preserve PSSCR[FAKE_SUSPEND] until we've called kvmppc_save_tm_hv */
- mtspr(SPRN_PSSCR, host_psscr |
- (local_paca->kvm_hstate.fake_suspend << PSSCR_FAKE_SUSPEND_LG));
- mtspr(SPRN_HFSCR, host_hfscr);
- mtspr(SPRN_CIABR, host_ciabr);
- mtspr(SPRN_DAWR0, host_dawr0);
- mtspr(SPRN_DAWRX0, host_dawrx0);
- if (cpu_has_feature(CPU_FTR_DAWR1)) {
- mtspr(SPRN_DAWR1, host_dawr1);
- mtspr(SPRN_DAWRX1, host_dawrx1);
- }
-
- if (kvm_is_radix(kvm)) {
- /*
- * Since this is radix, do a eieio; tlbsync; ptesync sequence
- * in case we interrupted the guest between a tlbie and a
- * ptesync.
- */
- asm volatile("eieio; tlbsync; ptesync");
- }
+ dpdes = mfspr(SPRN_DPDES);
+ if (dpdes)
+ vcpu->arch.doorbell_request = 1;
- /*
- * cp_abort is required if the processor supports local copy-paste
- * to clear the copy buffer that was under control of the guest.
- */
- if (cpu_has_feature(CPU_FTR_ARCH_31))
- asm volatile(PPC_CP_ABORT);
-
- vc->dpdes = mfspr(SPRN_DPDES);
vc->vtb = mfspr(SPRN_VTB);
- mtspr(SPRN_DPDES, 0);
- if (vc->pcr)
- mtspr(SPRN_PCR, PCR_MASK);
+
+ dec = mfspr(SPRN_DEC);
+ if (!(lpcr & LPCR_LD)) /* Sign extend if not using large decrementer */
+ dec = (s32) dec;
+ *tb = mftb();
+ vcpu->arch.dec_expires = dec + *tb;
if (vc->tb_offset_applied) {
- u64 new_tb = mftb() - vc->tb_offset_applied;
+ u64 new_tb = *tb - vc->tb_offset_applied;
mtspr(SPRN_TBU40, new_tb);
- tb = mftb();
- if ((tb & 0xffffff) < (new_tb & 0xffffff))
- mtspr(SPRN_TBU40, new_tb + 0x1000000);
+ if ((mftb() & 0xffffff) < (new_tb & 0xffffff)) {
+ new_tb += 0x1000000;
+ mtspr(SPRN_TBU40, new_tb);
+ }
+ *tb = new_tb;
vc->tb_offset_applied = 0;
}
- mtspr(SPRN_HDEC, 0x7fffffff);
-
save_clear_guest_mmu(kvm, vcpu);
switch_mmu_to_host(kvm, host_pidr);
- local_paca->kvm_hstate.in_guest = KVM_GUEST_MODE_NONE;
/*
- * If we are in real mode, only switch MMU on after the MMU is
- * switched to host, to avoid the P9_RADIX_PREFETCH_BUG.
+ * Enable MSR here in order to have facilities enabled to save
+ * guest registers. This enables MMU (if we were in realmode), so
+ * only switch MMU on after the MMU is switched to host, to avoid
+ * the P9_RADIX_PREFETCH_BUG or hash guest context.
*/
if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) &&
- vcpu->arch.shregs.msr & MSR_TS_MASK)
+ vcpu->arch.shregs.msr & MSR_TS_MASK)
msr |= MSR_TS_S;
-
__mtmsrd(msr, 0);
+ store_vcpu_state(vcpu);
+
+ mtspr(SPRN_PURR, local_paca->kvm_hstate.host_purr);
+ mtspr(SPRN_SPURR, local_paca->kvm_hstate.host_spurr);
+
+ if (cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) {
+ /* Preserve PSSCR[FAKE_SUSPEND] until we've called kvmppc_save_tm_hv */
+ mtspr(SPRN_PSSCR, host_hpsscr |
+ (local_paca->kvm_hstate.fake_suspend << PSSCR_FAKE_SUSPEND_LG));
+ }
+
+ mtspr(SPRN_HFSCR, host_hfscr);
+ if (vcpu->arch.ciabr != host_ciabr)
+ mtspr(SPRN_CIABR, host_ciabr);
+
+ if (dawr_enabled()) {
+ if (vcpu->arch.dawr0 != host_dawr0)
+ mtspr(SPRN_DAWR0, host_dawr0);
+ if (vcpu->arch.dawrx0 != host_dawrx0)
+ mtspr(SPRN_DAWRX0, host_dawrx0);
+ if (cpu_has_feature(CPU_FTR_DAWR1)) {
+ if (vcpu->arch.dawr1 != host_dawr1)
+ mtspr(SPRN_DAWR1, host_dawr1);
+ if (vcpu->arch.dawrx1 != host_dawrx1)
+ mtspr(SPRN_DAWRX1, host_dawrx1);
+ }
+ }
+
+ if (dpdes)
+ mtspr(SPRN_DPDES, 0);
+ if (vc->pcr)
+ mtspr(SPRN_PCR, PCR_MASK);
+
+ /* HDEC must be at least as large as DEC, so decrementer_max fits */
+ mtspr(SPRN_HDEC, decrementer_max);
+
+ timer_rearm_host_dec(*tb);
+
+ restore_p9_host_os_sprs(vcpu, &host_os_sprs);
+
+ barrier(); /* Close in_guest critical section */
+ WRITE_ONCE(local_paca->kvm_hstate.in_guest, KVM_GUEST_MODE_NONE);
+ /* Interrupts are recoverable at this point */
+
+ /*
+ * cp_abort is required if the processor supports local copy-paste
+ * to clear the copy buffer that was under control of the guest.
+ */
+ if (cpu_has_feature(CPU_FTR_ARCH_31))
+ asm volatile(PPC_CP_ABORT);
+
+out:
end_timing(vcpu);
return trap;
diff --git a/arch/powerpc/kvm/book3s_hv_ras.c b/arch/powerpc/kvm/book3s_hv_ras.c
index d4bca93b79f6..ccfd96965630 100644
--- a/arch/powerpc/kvm/book3s_hv_ras.c
+++ b/arch/powerpc/kvm/book3s_hv_ras.c
@@ -136,6 +136,60 @@ void kvmppc_realmode_machine_check(struct kvm_vcpu *vcpu)
vcpu->arch.mce_evt = mce_evt;
}
+
+long kvmppc_p9_realmode_hmi_handler(struct kvm_vcpu *vcpu)
+{
+ struct kvmppc_vcore *vc = vcpu->arch.vcore;
+ long ret = 0;
+
+ /*
+ * Unapply and clear the offset first. That way, if the TB was not
+ * resynced then it will remain in host-offset, and if it was resynced
+ * then it is brought into host-offset. Then the tb offset is
+ * re-applied before continuing with the KVM exit.
+ *
+ * This way, we don't need to actually know whether not OPAL resynced
+ * the timebase or do any of the complicated dance that the P7/8
+ * path requires.
+ */
+ if (vc->tb_offset_applied) {
+ u64 new_tb = mftb() - vc->tb_offset_applied;
+ mtspr(SPRN_TBU40, new_tb);
+ if ((mftb() & 0xffffff) < (new_tb & 0xffffff)) {
+ new_tb += 0x1000000;
+ mtspr(SPRN_TBU40, new_tb);
+ }
+ vc->tb_offset_applied = 0;
+ }
+
+ local_paca->hmi_irqs++;
+
+ if (hmi_handle_debugtrig(NULL) >= 0) {
+ ret = 1;
+ goto out;
+ }
+
+ if (ppc_md.hmi_exception_early)
+ ppc_md.hmi_exception_early(NULL);
+
+out:
+ if (vc->tb_offset) {
+ u64 new_tb = mftb() + vc->tb_offset;
+ mtspr(SPRN_TBU40, new_tb);
+ if ((mftb() & 0xffffff) < (new_tb & 0xffffff)) {
+ new_tb += 0x1000000;
+ mtspr(SPRN_TBU40, new_tb);
+ }
+ vc->tb_offset_applied = vc->tb_offset;
+ }
+
+ return ret;
+}
+
+/*
+ * The following subcore HMI handling is all only for pre-POWER9 CPUs.
+ */
+
/* Check if dynamic split is in force and return subcore size accordingly. */
static inline int kvmppc_cur_subcore_size(void)
{
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index 2c1f3c6e72d1..2257fb18cb72 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -55,12 +55,6 @@ static int global_invalidates(struct kvm *kvm)
smp_wmb();
cpumask_setall(&kvm->arch.need_tlb_flush);
cpu = local_paca->kvm_hstate.kvm_vcore->pcpu;
- /*
- * On POWER9, threads are independent but the TLB is shared,
- * so use the bit for the first thread to represent the core.
- */
- if (cpu_has_feature(CPU_FTR_ARCH_300))
- cpu = cpu_first_tlb_thread_sibling(cpu);
cpumask_clear_cpu(cpu, &kvm->arch.need_tlb_flush);
}
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 32a4b4d412b9..d185dee26026 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -778,17 +778,14 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
/* Restore AMR and UAMOR, set AMOR to all 1s */
ld r5,VCPU_AMR(r4)
ld r6,VCPU_UAMOR(r4)
- li r7,-1
mtspr SPRN_AMR,r5
mtspr SPRN_UAMOR,r6
- mtspr SPRN_AMOR,r7
- /* Restore state of CTRL run bit; assume 1 on entry */
+ /* Restore state of CTRL run bit; the host currently has it set to 1 */
lwz r5,VCPU_CTRL(r4)
andi. r5,r5,1
bne 4f
- mfspr r6,SPRN_CTRLF
- clrrdi r6,r6,1
+ li r6,0
mtspr SPRN_CTRLT,r6
4:
/* Secondary threads wait for primary to have done partition switch */
@@ -817,10 +814,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
* Set the decrementer to the guest decrementer.
*/
ld r8,VCPU_DEC_EXPIRES(r4)
- /* r8 is a host timebase value here, convert to guest TB */
- ld r5,HSTATE_KVM_VCORE(r13)
- ld r6,VCORE_TB_OFFSET_APPL(r5)
- add r8,r8,r6
mftb r7
subf r3,r7,r8
mtspr SPRN_DEC,r3
@@ -1195,9 +1188,6 @@ guest_bypass:
mftb r6
extsw r5,r5
16: add r5,r5,r6
- /* r5 is a guest timebase value here, convert to host TB */
- ld r4,VCORE_TB_OFFSET_APPL(r3)
- subf r5,r4,r5
std r5,VCPU_DEC_EXPIRES(r9)
/* Increment exit count, poke other threads to exit */
@@ -1211,12 +1201,12 @@ guest_bypass:
stw r0, VCPU_CPU(r9)
stw r0, VCPU_THREAD_CPU(r9)
- /* Save guest CTRL register, set runlatch to 1 */
+ /* Save guest CTRL register, set runlatch to 1 if it was clear */
mfspr r6,SPRN_CTRLF
stw r6,VCPU_CTRL(r9)
andi. r0,r6,1
bne 4f
- ori r6,r6,1
+ li r6,1
mtspr SPRN_CTRLT,r6
4:
/*
@@ -2163,9 +2153,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_TM)
/* save expiry time of guest decrementer */
add r3, r3, r5
ld r4, HSTATE_KVM_VCPU(r13)
- ld r5, HSTATE_KVM_VCORE(r13)
- ld r6, VCORE_TB_OFFSET_APPL(r5)
- subf r3, r6, r3 /* convert to host TB value */
std r3, VCPU_DEC_EXPIRES(r4)
#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
@@ -2186,8 +2173,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_TM)
* Also clear the runlatch bit before napping.
*/
kvm_do_nap:
- mfspr r0, SPRN_CTRLF
- clrrdi r0, r0, 1
+ li r0,0
mtspr SPRN_CTRLT, r0
li r0,1
@@ -2206,8 +2192,7 @@ kvm_nap_sequence: /* desired LPCR value in r5 */
bl isa206_idle_insn_mayloss
- mfspr r0, SPRN_CTRLF
- ori r0, r0, 1
+ li r0,1
mtspr SPRN_CTRLT, r0
mtspr SPRN_SRR1, r3
@@ -2264,9 +2249,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_TM)
/* Restore guest decrementer */
ld r3, VCPU_DEC_EXPIRES(r4)
- ld r5, HSTATE_KVM_VCORE(r13)
- ld r6, VCORE_TB_OFFSET_APPL(r5)
- add r3, r3, r6 /* convert host TB to guest TB value */
mftb r7
subf r3, r7, r3
mtspr SPRN_DEC, r3
@@ -2711,8 +2693,7 @@ kvmppc_bad_host_intr:
std r0, GPR0(r1)
std r9, GPR1(r1)
std r2, GPR2(r1)
- SAVE_4GPRS(3, r1)
- SAVE_2GPRS(7, r1)
+ SAVE_GPRS(3, 8, r1)
srdi r0, r12, 32
clrldi r12, r12, 32
std r0, _CCR(r1)
@@ -2735,7 +2716,7 @@ kvmppc_bad_host_intr:
ld r9, HSTATE_SCRATCH2(r13)
ld r12, HSTATE_SCRATCH0(r13)
GET_SCRATCH0(r0)
- SAVE_4GPRS(9, r1)
+ SAVE_GPRS(9, 12, r1)
std r0, GPR13(r1)
SAVE_NVGPRS(r1)
ld r5, HSTATE_CFAR(r13)
@@ -2778,10 +2759,11 @@ kvmppc_msr_interrupt:
blr
/*
+ * void kvmhv_load_guest_pmu(struct kvm_vcpu *vcpu)
+ *
* Load up guest PMU state. R3 points to the vcpu struct.
*/
-_GLOBAL(kvmhv_load_guest_pmu)
-EXPORT_SYMBOL_GPL(kvmhv_load_guest_pmu)
+kvmhv_load_guest_pmu:
mr r4, r3
mflr r0
li r3, 1
@@ -2816,26 +2798,16 @@ END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
mtspr SPRN_SIAR, r7
mtspr SPRN_SDAR, r8
BEGIN_FTR_SECTION
- ld r5, VCPU_MMCR + 24(r4)
- ld r6, VCPU_SIER + 8(r4)
- ld r7, VCPU_SIER + 16(r4)
- mtspr SPRN_MMCR3, r5
- mtspr SPRN_SIER2, r6
- mtspr SPRN_SIER3, r7
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_31)
-BEGIN_FTR_SECTION
ld r5, VCPU_MMCR + 16(r4)
ld r6, VCPU_SIER(r4)
mtspr SPRN_MMCR2, r5
mtspr SPRN_SIER, r6
-BEGIN_FTR_SECTION_NESTED(96)
lwz r7, VCPU_PMC + 24(r4)
lwz r8, VCPU_PMC + 28(r4)
ld r9, VCPU_MMCRS(r4)
mtspr SPRN_SPMC1, r7
mtspr SPRN_SPMC2, r8
mtspr SPRN_MMCRS, r9
-END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96)
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
mtspr SPRN_MMCR0, r3
isync
@@ -2843,10 +2815,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
blr
/*
+ * void kvmhv_load_host_pmu(void)
+ *
* Reload host PMU state saved in the PACA by kvmhv_save_host_pmu.
*/
-_GLOBAL(kvmhv_load_host_pmu)
-EXPORT_SYMBOL_GPL(kvmhv_load_host_pmu)
+kvmhv_load_host_pmu:
mflr r0
lbz r4, PACA_PMCINUSE(r13) /* is the host using the PMU? */
cmpwi r4, 0
@@ -2884,25 +2857,18 @@ BEGIN_FTR_SECTION
mtspr SPRN_MMCR2, r8
mtspr SPRN_SIER, r9
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
-BEGIN_FTR_SECTION
- ld r5, HSTATE_MMCR3(r13)
- ld r6, HSTATE_SIER2(r13)
- ld r7, HSTATE_SIER3(r13)
- mtspr SPRN_MMCR3, r5
- mtspr SPRN_SIER2, r6
- mtspr SPRN_SIER3, r7
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_31)
mtspr SPRN_MMCR0, r3
isync
mtlr r0
23: blr
/*
+ * void kvmhv_save_guest_pmu(struct kvm_vcpu *vcpu, bool pmu_in_use)
+ *
* Save guest PMU state into the vcpu struct.
* r3 = vcpu, r4 = full save flag (PMU in use flag set in VPA)
*/
-_GLOBAL(kvmhv_save_guest_pmu)
-EXPORT_SYMBOL_GPL(kvmhv_save_guest_pmu)
+kvmhv_save_guest_pmu:
mr r9, r3
mr r8, r4
BEGIN_FTR_SECTION
@@ -2951,14 +2917,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
BEGIN_FTR_SECTION
std r10, VCPU_MMCR + 16(r9)
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
-BEGIN_FTR_SECTION
- mfspr r5, SPRN_MMCR3
- mfspr r6, SPRN_SIER2
- mfspr r7, SPRN_SIER3
- std r5, VCPU_MMCR + 24(r9)
- std r6, VCPU_SIER + 8(r9)
- std r7, VCPU_SIER + 16(r9)
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_31)
std r7, VCPU_SIAR(r9)
std r8, VCPU_SDAR(r9)
mfspr r3, SPRN_PMC1
@@ -2976,7 +2934,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_31)
BEGIN_FTR_SECTION
mfspr r5, SPRN_SIER
std r5, VCPU_SIER(r9)
-BEGIN_FTR_SECTION_NESTED(96)
mfspr r6, SPRN_SPMC1
mfspr r7, SPRN_SPMC2
mfspr r8, SPRN_MMCRS
@@ -2985,7 +2942,6 @@ BEGIN_FTR_SECTION_NESTED(96)
std r8, VCPU_MMCRS(r9)
lis r4, 0x8000
mtspr SPRN_MMCRS, r4
-END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96)
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
22: blr
diff --git a/arch/powerpc/kvm/book3s_hv_uvmem.c b/arch/powerpc/kvm/book3s_hv_uvmem.c
index 28c436df9935..e414ca44839f 100644
--- a/arch/powerpc/kvm/book3s_hv_uvmem.c
+++ b/arch/powerpc/kvm/book3s_hv_uvmem.c
@@ -459,7 +459,7 @@ unsigned long kvmppc_h_svm_init_start(struct kvm *kvm)
struct kvm_memslots *slots;
struct kvm_memory_slot *memslot, *m;
int ret = H_SUCCESS;
- int srcu_idx;
+ int srcu_idx, bkt;
kvm->arch.secure_guest = KVMPPC_SECURE_INIT_START;
@@ -478,7 +478,7 @@ unsigned long kvmppc_h_svm_init_start(struct kvm *kvm)
/* register the memslot */
slots = kvm_memslots(kvm);
- kvm_for_each_memslot(memslot, slots) {
+ kvm_for_each_memslot(memslot, bkt, slots) {
ret = __kvmppc_uvmem_memslot_create(kvm, memslot);
if (ret)
break;
@@ -486,7 +486,7 @@ unsigned long kvmppc_h_svm_init_start(struct kvm *kvm)
if (ret) {
slots = kvm_memslots(kvm);
- kvm_for_each_memslot(m, slots) {
+ kvm_for_each_memslot(m, bkt, slots) {
if (m == memslot)
break;
__kvmppc_uvmem_memslot_delete(kvm, memslot);
@@ -647,7 +647,7 @@ void kvmppc_uvmem_drop_pages(const struct kvm_memory_slot *slot,
unsigned long kvmppc_h_svm_init_abort(struct kvm *kvm)
{
- int srcu_idx;
+ int srcu_idx, bkt;
struct kvm_memory_slot *memslot;
/*
@@ -662,7 +662,7 @@ unsigned long kvmppc_h_svm_init_abort(struct kvm *kvm)
srcu_idx = srcu_read_lock(&kvm->srcu);
- kvm_for_each_memslot(memslot, kvm_memslots(kvm))
+ kvm_for_each_memslot(memslot, bkt, kvm_memslots(kvm))
kvmppc_uvmem_drop_pages(memslot, kvm, false);
srcu_read_unlock(&kvm->srcu, srcu_idx);
@@ -821,7 +821,7 @@ unsigned long kvmppc_h_svm_init_done(struct kvm *kvm)
{
struct kvm_memslots *slots;
struct kvm_memory_slot *memslot;
- int srcu_idx;
+ int srcu_idx, bkt;
long ret = H_SUCCESS;
if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START))
@@ -830,7 +830,7 @@ unsigned long kvmppc_h_svm_init_done(struct kvm *kvm)
/* migrate any unmoved normal pfn to device pfns*/
srcu_idx = srcu_read_lock(&kvm->srcu);
slots = kvm_memslots(kvm);
- kvm_for_each_memslot(memslot, slots) {
+ kvm_for_each_memslot(memslot, bkt, slots) {
ret = kvmppc_uv_migrate_mem_slot(kvm, memslot);
if (ret) {
/*
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index 6bc9425acb32..34a801c3604a 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -428,7 +428,7 @@ static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu)
/************* MMU Notifiers *************/
static bool do_kvm_unmap_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
{
- long i;
+ unsigned long i;
struct kvm_vcpu *vcpu;
kvm_for_each_vcpu(i, vcpu, kvm)
@@ -492,7 +492,7 @@ static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr)
if (msr & MSR_POW) {
if (!vcpu->arch.pending_exceptions) {
- kvm_vcpu_block(vcpu);
+ kvm_vcpu_halt(vcpu);
kvm_clear_request(KVM_REQ_UNHALT, vcpu);
vcpu->stat.generic.halt_wakeup++;
@@ -1899,16 +1899,15 @@ static void kvmppc_core_flush_memslot_pr(struct kvm *kvm,
}
static int kvmppc_core_prepare_memory_region_pr(struct kvm *kvm,
- struct kvm_memory_slot *memslot,
- const struct kvm_userspace_memory_region *mem,
- enum kvm_mr_change change)
+ const struct kvm_memory_slot *old,
+ struct kvm_memory_slot *new,
+ enum kvm_mr_change change)
{
return 0;
}
static void kvmppc_core_commit_memory_region_pr(struct kvm *kvm,
- const struct kvm_userspace_memory_region *mem,
- const struct kvm_memory_slot *old,
+ struct kvm_memory_slot *old,
const struct kvm_memory_slot *new,
enum kvm_mr_change change)
{
diff --git a/arch/powerpc/kvm/book3s_pr_papr.c b/arch/powerpc/kvm/book3s_pr_papr.c
index ac14239f3424..1f10e7dfcdd0 100644
--- a/arch/powerpc/kvm/book3s_pr_papr.c
+++ b/arch/powerpc/kvm/book3s_pr_papr.c
@@ -376,7 +376,7 @@ int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd)
return kvmppc_h_pr_stuff_tce(vcpu);
case H_CEDE:
kvmppc_set_msr_fast(vcpu, kvmppc_get_msr(vcpu) | MSR_EE);
- kvm_vcpu_block(vcpu);
+ kvm_vcpu_halt(vcpu);
kvm_clear_request(KVM_REQ_UNHALT, vcpu);
vcpu->stat.generic.halt_wakeup++;
return EMULATE_DONE;
diff --git a/arch/powerpc/kvm/book3s_xics.c b/arch/powerpc/kvm/book3s_xics.c
index ebd5d920de8c..9cc466006e8b 100644
--- a/arch/powerpc/kvm/book3s_xics.c
+++ b/arch/powerpc/kvm/book3s_xics.c
@@ -942,8 +942,8 @@ static int xics_debug_show(struct seq_file *m, void *private)
struct kvmppc_xics *xics = m->private;
struct kvm *kvm = xics->kvm;
struct kvm_vcpu *vcpu;
- int icsid, i;
- unsigned long flags;
+ int icsid;
+ unsigned long flags, i;
unsigned long t_rm_kick_vcpu, t_rm_check_resend;
unsigned long t_rm_notify_eoi;
unsigned long t_reject, t_check_resend;
@@ -1340,7 +1340,7 @@ static int xics_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
static void kvmppc_xics_release(struct kvm_device *dev)
{
struct kvmppc_xics *xics = dev->private;
- int i;
+ unsigned long i;
struct kvm *kvm = xics->kvm;
struct kvm_vcpu *vcpu;
diff --git a/arch/powerpc/kvm/book3s_xics.h b/arch/powerpc/kvm/book3s_xics.h
index 6231f76bdd66..8e4c79e2fcd8 100644
--- a/arch/powerpc/kvm/book3s_xics.h
+++ b/arch/powerpc/kvm/book3s_xics.h
@@ -116,7 +116,7 @@ static inline struct kvmppc_icp *kvmppc_xics_find_server(struct kvm *kvm,
u32 nr)
{
struct kvm_vcpu *vcpu = NULL;
- int i;
+ unsigned long i;
kvm_for_each_vcpu(i, vcpu, kvm) {
if (vcpu->arch.icp && nr == vcpu->arch.icp->server_num)
diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c
index 225008882958..e216c068075d 100644
--- a/arch/powerpc/kvm/book3s_xive.c
+++ b/arch/powerpc/kvm/book3s_xive.c
@@ -368,7 +368,8 @@ static int xive_check_provisioning(struct kvm *kvm, u8 prio)
{
struct kvmppc_xive *xive = kvm->arch.xive;
struct kvm_vcpu *vcpu;
- int i, rc;
+ unsigned long i;
+ int rc;
lockdep_assert_held(&xive->lock);
@@ -439,7 +440,8 @@ static int xive_try_pick_queue(struct kvm_vcpu *vcpu, u8 prio)
int kvmppc_xive_select_target(struct kvm *kvm, u32 *server, u8 prio)
{
struct kvm_vcpu *vcpu;
- int i, rc;
+ unsigned long i;
+ int rc;
/* Locate target server */
vcpu = kvmppc_xive_find_server(kvm, *server);
@@ -1519,7 +1521,8 @@ static void xive_pre_save_queue(struct kvmppc_xive *xive, struct xive_q *q)
static void xive_pre_save_scan(struct kvmppc_xive *xive)
{
struct kvm_vcpu *vcpu = NULL;
- int i, j;
+ unsigned long i;
+ int j;
/*
* See comment in xive_get_source() about how this
@@ -1700,7 +1703,7 @@ static bool xive_check_delayed_irq(struct kvmppc_xive *xive, u32 irq)
{
struct kvm *kvm = xive->kvm;
struct kvm_vcpu *vcpu = NULL;
- int i;
+ unsigned long i;
kvm_for_each_vcpu(i, vcpu, kvm) {
struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
@@ -2037,7 +2040,7 @@ static void kvmppc_xive_release(struct kvm_device *dev)
struct kvmppc_xive *xive = dev->private;
struct kvm *kvm = xive->kvm;
struct kvm_vcpu *vcpu;
- int i;
+ unsigned long i;
pr_devel("Releasing xive device\n");
@@ -2291,7 +2294,7 @@ static int xive_debug_show(struct seq_file *m, void *private)
u64 t_vm_h_cppr = 0;
u64 t_vm_h_eoi = 0;
u64 t_vm_h_ipi = 0;
- unsigned int i;
+ unsigned long i;
if (!kvm)
return 0;
diff --git a/arch/powerpc/kvm/book3s_xive.h b/arch/powerpc/kvm/book3s_xive.h
index e6a9651c6f1e..09d0657596c3 100644
--- a/arch/powerpc/kvm/book3s_xive.h
+++ b/arch/powerpc/kvm/book3s_xive.h
@@ -199,7 +199,7 @@ struct kvmppc_xive_vcpu {
static inline struct kvm_vcpu *kvmppc_xive_find_server(struct kvm *kvm, u32 nr)
{
struct kvm_vcpu *vcpu = NULL;
- int i;
+ unsigned long i;
kvm_for_each_vcpu(i, vcpu, kvm) {
if (vcpu->arch.xive_vcpu && nr == vcpu->arch.xive_vcpu->server_num)
@@ -240,7 +240,7 @@ static inline u32 kvmppc_xive_vp(struct kvmppc_xive *xive, u32 server)
static inline bool kvmppc_xive_vp_in_use(struct kvm *kvm, u32 vp_id)
{
struct kvm_vcpu *vcpu = NULL;
- int i;
+ unsigned long i;
kvm_for_each_vcpu(i, vcpu, kvm) {
if (vcpu->arch.xive_vcpu && vp_id == vcpu->arch.xive_vcpu->vp_id)
diff --git a/arch/powerpc/kvm/book3s_xive_native.c b/arch/powerpc/kvm/book3s_xive_native.c
index 99db9ac49901..561a5bfe0468 100644
--- a/arch/powerpc/kvm/book3s_xive_native.c
+++ b/arch/powerpc/kvm/book3s_xive_native.c
@@ -807,7 +807,7 @@ static int kvmppc_xive_reset(struct kvmppc_xive *xive)
{
struct kvm *kvm = xive->kvm;
struct kvm_vcpu *vcpu;
- unsigned int i;
+ unsigned long i;
pr_devel("%s\n", __func__);
@@ -916,7 +916,7 @@ static int kvmppc_xive_native_eq_sync(struct kvmppc_xive *xive)
{
struct kvm *kvm = xive->kvm;
struct kvm_vcpu *vcpu;
- unsigned int i;
+ unsigned long i;
pr_devel("%s\n", __func__);
@@ -1017,7 +1017,7 @@ static void kvmppc_xive_native_release(struct kvm_device *dev)
struct kvmppc_xive *xive = dev->private;
struct kvm *kvm = xive->kvm;
struct kvm_vcpu *vcpu;
- int i;
+ unsigned long i;
pr_devel("Releasing xive native device\n");
@@ -1214,7 +1214,7 @@ static int xive_native_debug_show(struct seq_file *m, void *private)
struct kvmppc_xive *xive = m->private;
struct kvm *kvm = xive->kvm;
struct kvm_vcpu *vcpu;
- unsigned int i;
+ unsigned long i;
if (!kvm)
return 0;
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 8c15c90dd3a9..06c5830a93f9 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -718,7 +718,7 @@ int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
if (vcpu->arch.shared->msr & MSR_WE) {
local_irq_enable();
- kvm_vcpu_block(vcpu);
+ kvm_vcpu_halt(vcpu);
kvm_clear_request(KVM_REQ_UNHALT, vcpu);
hard_irq_disable();
@@ -1821,16 +1821,15 @@ void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
}
int kvmppc_core_prepare_memory_region(struct kvm *kvm,
- struct kvm_memory_slot *memslot,
- const struct kvm_userspace_memory_region *mem,
+ const struct kvm_memory_slot *old,
+ struct kvm_memory_slot *new,
enum kvm_mr_change change)
{
return 0;
}
void kvmppc_core_commit_memory_region(struct kvm *kvm,
- const struct kvm_userspace_memory_region *mem,
- const struct kvm_memory_slot *old,
+ struct kvm_memory_slot *old,
const struct kvm_memory_slot *new,
enum kvm_mr_change change)
{
diff --git a/arch/powerpc/kvm/e500_emulate.c b/arch/powerpc/kvm/e500_emulate.c
index 64eb833e9f02..051102d50c31 100644
--- a/arch/powerpc/kvm/e500_emulate.c
+++ b/arch/powerpc/kvm/e500_emulate.c
@@ -65,7 +65,7 @@ static int kvmppc_e500_emul_msgsnd(struct kvm_vcpu *vcpu, int rb)
ulong param = vcpu->arch.regs.gpr[rb];
int prio = dbell2prio(rb);
int pir = param & PPC_DBELL_PIR_MASK;
- int i;
+ unsigned long i;
struct kvm_vcpu *cvcpu;
if (prio < 0)
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index a72920f4f221..2ad0ccd202d5 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -236,7 +236,7 @@ int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
break;
case EV_HCALL_TOKEN(EV_IDLE):
r = EV_SUCCESS;
- kvm_vcpu_block(vcpu);
+ kvm_vcpu_halt(vcpu);
kvm_clear_request(KVM_REQ_UNHALT, vcpu);
break;
default:
@@ -463,9 +463,6 @@ err_out:
void kvm_arch_destroy_vm(struct kvm *kvm)
{
- unsigned int i;
- struct kvm_vcpu *vcpu;
-
#ifdef CONFIG_KVM_XICS
/*
* We call kick_all_cpus_sync() to ensure that all
@@ -476,14 +473,9 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
kick_all_cpus_sync();
#endif
- kvm_for_each_vcpu(i, vcpu, kvm)
- kvm_vcpu_destroy(vcpu);
+ kvm_destroy_vcpus(kvm);
mutex_lock(&kvm->lock);
- for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
- kvm->vcpus[i] = NULL;
-
- atomic_set(&kvm->online_vcpus, 0);
kvmppc_core_destroy_vm(kvm);
@@ -706,20 +698,19 @@ void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
}
int kvm_arch_prepare_memory_region(struct kvm *kvm,
- struct kvm_memory_slot *memslot,
- const struct kvm_userspace_memory_region *mem,
+ const struct kvm_memory_slot *old,
+ struct kvm_memory_slot *new,
enum kvm_mr_change change)
{
- return kvmppc_core_prepare_memory_region(kvm, memslot, mem, change);
+ return kvmppc_core_prepare_memory_region(kvm, old, new, change);
}
void kvm_arch_commit_memory_region(struct kvm *kvm,
- const struct kvm_userspace_memory_region *mem,
struct kvm_memory_slot *old,
const struct kvm_memory_slot *new,
enum kvm_mr_change change)
{
- kvmppc_core_commit_memory_region(kvm, mem, old, new, change);
+ kvmppc_core_commit_memory_region(kvm, old, new, change);
}
void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
@@ -762,7 +753,8 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
if (err)
goto out_vcpu_uninit;
- vcpu->arch.waitp = &vcpu->wait;
+ rcuwait_init(&vcpu->arch.wait);
+ vcpu->arch.waitp = &vcpu->arch.wait;
kvmppc_create_vcpu_debugfs(vcpu, vcpu->vcpu_id);
return 0;
diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile
index 9e5d0f413b71..5d1881d2e39a 100644
--- a/arch/powerpc/lib/Makefile
+++ b/arch/powerpc/lib/Makefile
@@ -19,7 +19,12 @@ CFLAGS_code-patching.o += -DDISABLE_BRANCH_PROFILING
CFLAGS_feature-fixups.o += -DDISABLE_BRANCH_PROFILING
endif
-obj-y += alloc.o code-patching.o feature-fixups.o pmem.o test_code-patching.o
+CFLAGS_code-patching.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
+CFLAGS_feature-fixups.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
+
+obj-y += alloc.o code-patching.o feature-fixups.o pmem.o
+
+obj-$(CONFIG_CODE_PATCHING_SELFTEST) += test-code-patching.o
ifndef CONFIG_KASAN
obj-y += string.o memcmp_$(BITS).o
diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c
index c5ed98823835..906d43463366 100644
--- a/arch/powerpc/lib/code-patching.c
+++ b/arch/powerpc/lib/code-patching.c
@@ -3,22 +3,18 @@
* Copyright 2008 Michael Ellerman, IBM Corporation.
*/
-#include <linux/kernel.h>
#include <linux/kprobes.h>
#include <linux/vmalloc.h>
#include <linux/init.h>
-#include <linux/mm.h>
#include <linux/cpuhotplug.h>
-#include <linux/slab.h>
#include <linux/uaccess.h>
#include <asm/tlbflush.h>
#include <asm/page.h>
#include <asm/code-patching.h>
-#include <asm/setup.h>
#include <asm/inst.h>
-static int __patch_instruction(u32 *exec_addr, struct ppc_inst instr, u32 *patch_addr)
+static int __patch_instruction(u32 *exec_addr, ppc_inst_t instr, u32 *patch_addr)
{
if (!ppc_inst_prefixed(instr)) {
u32 val = ppc_inst_val(instr);
@@ -39,7 +35,7 @@ failed:
return -EFAULT;
}
-int raw_patch_instruction(u32 *addr, struct ppc_inst instr)
+int raw_patch_instruction(u32 *addr, ppc_inst_t instr)
{
return __patch_instruction(addr, instr, addr);
}
@@ -86,23 +82,16 @@ void __init poking_init(void)
static int map_patch_area(void *addr, unsigned long text_poke_addr)
{
unsigned long pfn;
- int err;
if (is_vmalloc_or_module_addr(addr))
pfn = vmalloc_to_pfn(addr);
else
pfn = __pa_symbol(addr) >> PAGE_SHIFT;
- err = map_kernel_page(text_poke_addr, (pfn << PAGE_SHIFT), PAGE_KERNEL);
-
- pr_devel("Mapped addr %lx with pfn %lx:%d\n", text_poke_addr, pfn, err);
- if (err)
- return -1;
-
- return 0;
+ return map_kernel_page(text_poke_addr, (pfn << PAGE_SHIFT), PAGE_KERNEL);
}
-static inline int unmap_patch_area(unsigned long addr)
+static void unmap_patch_area(unsigned long addr)
{
pte_t *ptep;
pmd_t *pmdp;
@@ -111,43 +100,56 @@ static inline int unmap_patch_area(unsigned long addr)
pgd_t *pgdp;
pgdp = pgd_offset_k(addr);
- if (unlikely(!pgdp))
- return -EINVAL;
+ if (WARN_ON(pgd_none(*pgdp)))
+ return;
p4dp = p4d_offset(pgdp, addr);
- if (unlikely(!p4dp))
- return -EINVAL;
+ if (WARN_ON(p4d_none(*p4dp)))
+ return;
pudp = pud_offset(p4dp, addr);
- if (unlikely(!pudp))
- return -EINVAL;
+ if (WARN_ON(pud_none(*pudp)))
+ return;
pmdp = pmd_offset(pudp, addr);
- if (unlikely(!pmdp))
- return -EINVAL;
+ if (WARN_ON(pmd_none(*pmdp)))
+ return;
ptep = pte_offset_kernel(pmdp, addr);
- if (unlikely(!ptep))
- return -EINVAL;
-
- pr_devel("clearing mm %p, pte %p, addr %lx\n", &init_mm, ptep, addr);
+ if (WARN_ON(pte_none(*ptep)))
+ return;
/*
* In hash, pte_clear flushes the tlb, in radix, we have to
*/
pte_clear(&init_mm, addr, ptep);
flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
+}
- return 0;
+static int __do_patch_instruction(u32 *addr, ppc_inst_t instr)
+{
+ int err;
+ u32 *patch_addr;
+ unsigned long text_poke_addr;
+
+ text_poke_addr = (unsigned long)__this_cpu_read(text_poke_area)->addr;
+ patch_addr = (u32 *)(text_poke_addr + offset_in_page(addr));
+
+ err = map_patch_area(addr, text_poke_addr);
+ if (err)
+ return err;
+
+ err = __patch_instruction(addr, instr, patch_addr);
+
+ unmap_patch_area(text_poke_addr);
+
+ return err;
}
-static int do_patch_instruction(u32 *addr, struct ppc_inst instr)
+static int do_patch_instruction(u32 *addr, ppc_inst_t instr)
{
int err;
- u32 *patch_addr = NULL;
unsigned long flags;
- unsigned long text_poke_addr;
- unsigned long kaddr = (unsigned long)addr;
/*
* During early early boot patch_instruction is called
@@ -158,51 +160,37 @@ static int do_patch_instruction(u32 *addr, struct ppc_inst instr)
return raw_patch_instruction(addr, instr);
local_irq_save(flags);
-
- text_poke_addr = (unsigned long)__this_cpu_read(text_poke_area)->addr;
- if (map_patch_area(addr, text_poke_addr)) {
- err = -1;
- goto out;
- }
-
- patch_addr = (u32 *)(text_poke_addr + (kaddr & ~PAGE_MASK));
-
- __patch_instruction(addr, instr, patch_addr);
-
- err = unmap_patch_area(text_poke_addr);
- if (err)
- pr_warn("failed to unmap %lx\n", text_poke_addr);
-
-out:
+ err = __do_patch_instruction(addr, instr);
local_irq_restore(flags);
return err;
}
#else /* !CONFIG_STRICT_KERNEL_RWX */
-static int do_patch_instruction(u32 *addr, struct ppc_inst instr)
+static int do_patch_instruction(u32 *addr, ppc_inst_t instr)
{
return raw_patch_instruction(addr, instr);
}
#endif /* CONFIG_STRICT_KERNEL_RWX */
-int patch_instruction(u32 *addr, struct ppc_inst instr)
+int patch_instruction(u32 *addr, ppc_inst_t instr)
{
/* Make sure we aren't patching a freed init section */
- if (init_mem_is_free && init_section_contains(addr, 4)) {
- pr_debug("Skipping init section patching addr: 0x%px\n", addr);
+ if (system_state >= SYSTEM_FREEING_INITMEM && init_section_contains(addr, 4))
return 0;
- }
+
return do_patch_instruction(addr, instr);
}
NOKPROBE_SYMBOL(patch_instruction);
int patch_branch(u32 *addr, unsigned long target, int flags)
{
- struct ppc_inst instr;
+ ppc_inst_t instr;
+
+ if (create_branch(&instr, addr, target, flags))
+ return -ERANGE;
- create_branch(&instr, addr, target, flags);
return patch_instruction(addr, instr);
}
@@ -237,7 +225,7 @@ bool is_offset_in_cond_branch_range(long offset)
* Helper to check if a given instruction is a conditional branch
* Derived from the conditional checks in analyse_instr()
*/
-bool is_conditional_branch(struct ppc_inst instr)
+bool is_conditional_branch(ppc_inst_t instr)
{
unsigned int opcode = ppc_inst_primary_opcode(instr);
@@ -255,7 +243,7 @@ bool is_conditional_branch(struct ppc_inst instr)
}
NOKPROBE_SYMBOL(is_conditional_branch);
-int create_branch(struct ppc_inst *instr, const u32 *addr,
+int create_branch(ppc_inst_t *instr, const u32 *addr,
unsigned long target, int flags)
{
long offset;
@@ -275,7 +263,7 @@ int create_branch(struct ppc_inst *instr, const u32 *addr,
return 0;
}
-int create_cond_branch(struct ppc_inst *instr, const u32 *addr,
+int create_cond_branch(ppc_inst_t *instr, const u32 *addr,
unsigned long target, int flags)
{
long offset;
@@ -294,22 +282,7 @@ int create_cond_branch(struct ppc_inst *instr, const u32 *addr,
return 0;
}
-static unsigned int branch_opcode(struct ppc_inst instr)
-{
- return ppc_inst_primary_opcode(instr) & 0x3F;
-}
-
-static int instr_is_branch_iform(struct ppc_inst instr)
-{
- return branch_opcode(instr) == 18;
-}
-
-static int instr_is_branch_bform(struct ppc_inst instr)
-{
- return branch_opcode(instr) == 16;
-}
-
-int instr_is_relative_branch(struct ppc_inst instr)
+int instr_is_relative_branch(ppc_inst_t instr)
{
if (ppc_inst_val(instr) & BRANCH_ABSOLUTE)
return 0;
@@ -317,7 +290,7 @@ int instr_is_relative_branch(struct ppc_inst instr)
return instr_is_branch_iform(instr) || instr_is_branch_bform(instr);
}
-int instr_is_relative_link_branch(struct ppc_inst instr)
+int instr_is_relative_link_branch(ppc_inst_t instr)
{
return instr_is_relative_branch(instr) && (ppc_inst_val(instr) & BRANCH_SET_LINK);
}
@@ -364,7 +337,7 @@ unsigned long branch_target(const u32 *instr)
return 0;
}
-int translate_branch(struct ppc_inst *instr, const u32 *dest, const u32 *src)
+int translate_branch(ppc_inst_t *instr, const u32 *dest, const u32 *src)
{
unsigned long target;
target = branch_target(src);
@@ -378,375 +351,3 @@ int translate_branch(struct ppc_inst *instr, const u32 *dest, const u32 *src)
return 1;
}
-
-#ifdef CONFIG_PPC_BOOK3E_64
-void __patch_exception(int exc, unsigned long addr)
-{
- extern unsigned int interrupt_base_book3e;
- unsigned int *ibase = &interrupt_base_book3e;
-
- /* Our exceptions vectors start with a NOP and -then- a branch
- * to deal with single stepping from userspace which stops on
- * the second instruction. Thus we need to patch the second
- * instruction of the exception, not the first one
- */
-
- patch_branch(ibase + (exc / 4) + 1, addr, 0);
-}
-#endif
-
-#ifdef CONFIG_CODE_PATCHING_SELFTEST
-
-static int instr_is_branch_to_addr(const u32 *instr, unsigned long addr)
-{
- if (instr_is_branch_iform(ppc_inst_read(instr)) ||
- instr_is_branch_bform(ppc_inst_read(instr)))
- return branch_target(instr) == addr;
-
- return 0;
-}
-
-static void __init test_trampoline(void)
-{
- asm ("nop;\n");
-}
-
-#define check(x) \
- if (!(x)) printk("code-patching: test failed at line %d\n", __LINE__);
-
-static void __init test_branch_iform(void)
-{
- int err;
- struct ppc_inst instr;
- u32 tmp[2];
- u32 *iptr = tmp;
- unsigned long addr = (unsigned long)tmp;
-
- /* The simplest case, branch to self, no flags */
- check(instr_is_branch_iform(ppc_inst(0x48000000)));
- /* All bits of target set, and flags */
- check(instr_is_branch_iform(ppc_inst(0x4bffffff)));
- /* High bit of opcode set, which is wrong */
- check(!instr_is_branch_iform(ppc_inst(0xcbffffff)));
- /* Middle bits of opcode set, which is wrong */
- check(!instr_is_branch_iform(ppc_inst(0x7bffffff)));
-
- /* Simplest case, branch to self with link */
- check(instr_is_branch_iform(ppc_inst(0x48000001)));
- /* All bits of targets set */
- check(instr_is_branch_iform(ppc_inst(0x4bfffffd)));
- /* Some bits of targets set */
- check(instr_is_branch_iform(ppc_inst(0x4bff00fd)));
- /* Must be a valid branch to start with */
- check(!instr_is_branch_iform(ppc_inst(0x7bfffffd)));
-
- /* Absolute branch to 0x100 */
- patch_instruction(iptr, ppc_inst(0x48000103));
- check(instr_is_branch_to_addr(iptr, 0x100));
- /* Absolute branch to 0x420fc */
- patch_instruction(iptr, ppc_inst(0x480420ff));
- check(instr_is_branch_to_addr(iptr, 0x420fc));
- /* Maximum positive relative branch, + 20MB - 4B */
- patch_instruction(iptr, ppc_inst(0x49fffffc));
- check(instr_is_branch_to_addr(iptr, addr + 0x1FFFFFC));
- /* Smallest negative relative branch, - 4B */
- patch_instruction(iptr, ppc_inst(0x4bfffffc));
- check(instr_is_branch_to_addr(iptr, addr - 4));
- /* Largest negative relative branch, - 32 MB */
- patch_instruction(iptr, ppc_inst(0x4a000000));
- check(instr_is_branch_to_addr(iptr, addr - 0x2000000));
-
- /* Branch to self, with link */
- err = create_branch(&instr, iptr, addr, BRANCH_SET_LINK);
- patch_instruction(iptr, instr);
- check(instr_is_branch_to_addr(iptr, addr));
-
- /* Branch to self - 0x100, with link */
- err = create_branch(&instr, iptr, addr - 0x100, BRANCH_SET_LINK);
- patch_instruction(iptr, instr);
- check(instr_is_branch_to_addr(iptr, addr - 0x100));
-
- /* Branch to self + 0x100, no link */
- err = create_branch(&instr, iptr, addr + 0x100, 0);
- patch_instruction(iptr, instr);
- check(instr_is_branch_to_addr(iptr, addr + 0x100));
-
- /* Maximum relative negative offset, - 32 MB */
- err = create_branch(&instr, iptr, addr - 0x2000000, BRANCH_SET_LINK);
- patch_instruction(iptr, instr);
- check(instr_is_branch_to_addr(iptr, addr - 0x2000000));
-
- /* Out of range relative negative offset, - 32 MB + 4*/
- err = create_branch(&instr, iptr, addr - 0x2000004, BRANCH_SET_LINK);
- check(err);
-
- /* Out of range relative positive offset, + 32 MB */
- err = create_branch(&instr, iptr, addr + 0x2000000, BRANCH_SET_LINK);
- check(err);
-
- /* Unaligned target */
- err = create_branch(&instr, iptr, addr + 3, BRANCH_SET_LINK);
- check(err);
-
- /* Check flags are masked correctly */
- err = create_branch(&instr, iptr, addr, 0xFFFFFFFC);
- patch_instruction(iptr, instr);
- check(instr_is_branch_to_addr(iptr, addr));
- check(ppc_inst_equal(instr, ppc_inst(0x48000000)));
-}
-
-static void __init test_create_function_call(void)
-{
- u32 *iptr;
- unsigned long dest;
- struct ppc_inst instr;
-
- /* Check we can create a function call */
- iptr = (u32 *)ppc_function_entry(test_trampoline);
- dest = ppc_function_entry(test_create_function_call);
- create_branch(&instr, iptr, dest, BRANCH_SET_LINK);
- patch_instruction(iptr, instr);
- check(instr_is_branch_to_addr(iptr, dest));
-}
-
-static void __init test_branch_bform(void)
-{
- int err;
- unsigned long addr;
- struct ppc_inst instr;
- u32 tmp[2];
- u32 *iptr = tmp;
- unsigned int flags;
-
- addr = (unsigned long)iptr;
-
- /* The simplest case, branch to self, no flags */
- check(instr_is_branch_bform(ppc_inst(0x40000000)));
- /* All bits of target set, and flags */
- check(instr_is_branch_bform(ppc_inst(0x43ffffff)));
- /* High bit of opcode set, which is wrong */
- check(!instr_is_branch_bform(ppc_inst(0xc3ffffff)));
- /* Middle bits of opcode set, which is wrong */
- check(!instr_is_branch_bform(ppc_inst(0x7bffffff)));
-
- /* Absolute conditional branch to 0x100 */
- patch_instruction(iptr, ppc_inst(0x43ff0103));
- check(instr_is_branch_to_addr(iptr, 0x100));
- /* Absolute conditional branch to 0x20fc */
- patch_instruction(iptr, ppc_inst(0x43ff20ff));
- check(instr_is_branch_to_addr(iptr, 0x20fc));
- /* Maximum positive relative conditional branch, + 32 KB - 4B */
- patch_instruction(iptr, ppc_inst(0x43ff7ffc));
- check(instr_is_branch_to_addr(iptr, addr + 0x7FFC));
- /* Smallest negative relative conditional branch, - 4B */
- patch_instruction(iptr, ppc_inst(0x43fffffc));
- check(instr_is_branch_to_addr(iptr, addr - 4));
- /* Largest negative relative conditional branch, - 32 KB */
- patch_instruction(iptr, ppc_inst(0x43ff8000));
- check(instr_is_branch_to_addr(iptr, addr - 0x8000));
-
- /* All condition code bits set & link */
- flags = 0x3ff000 | BRANCH_SET_LINK;
-
- /* Branch to self */
- err = create_cond_branch(&instr, iptr, addr, flags);
- patch_instruction(iptr, instr);
- check(instr_is_branch_to_addr(iptr, addr));
-
- /* Branch to self - 0x100 */
- err = create_cond_branch(&instr, iptr, addr - 0x100, flags);
- patch_instruction(iptr, instr);
- check(instr_is_branch_to_addr(iptr, addr - 0x100));
-
- /* Branch to self + 0x100 */
- err = create_cond_branch(&instr, iptr, addr + 0x100, flags);
- patch_instruction(iptr, instr);
- check(instr_is_branch_to_addr(iptr, addr + 0x100));
-
- /* Maximum relative negative offset, - 32 KB */
- err = create_cond_branch(&instr, iptr, addr - 0x8000, flags);
- patch_instruction(iptr, instr);
- check(instr_is_branch_to_addr(iptr, addr - 0x8000));
-
- /* Out of range relative negative offset, - 32 KB + 4*/
- err = create_cond_branch(&instr, iptr, addr - 0x8004, flags);
- check(err);
-
- /* Out of range relative positive offset, + 32 KB */
- err = create_cond_branch(&instr, iptr, addr + 0x8000, flags);
- check(err);
-
- /* Unaligned target */
- err = create_cond_branch(&instr, iptr, addr + 3, flags);
- check(err);
-
- /* Check flags are masked correctly */
- err = create_cond_branch(&instr, iptr, addr, 0xFFFFFFFC);
- patch_instruction(iptr, instr);
- check(instr_is_branch_to_addr(iptr, addr));
- check(ppc_inst_equal(instr, ppc_inst(0x43FF0000)));
-}
-
-static void __init test_translate_branch(void)
-{
- unsigned long addr;
- void *p, *q;
- struct ppc_inst instr;
- void *buf;
-
- buf = vmalloc(PAGE_ALIGN(0x2000000 + 1));
- check(buf);
- if (!buf)
- return;
-
- /* Simple case, branch to self moved a little */
- p = buf;
- addr = (unsigned long)p;
- patch_branch(p, addr, 0);
- check(instr_is_branch_to_addr(p, addr));
- q = p + 4;
- translate_branch(&instr, q, p);
- patch_instruction(q, instr);
- check(instr_is_branch_to_addr(q, addr));
-
- /* Maximum negative case, move b . to addr + 32 MB */
- p = buf;
- addr = (unsigned long)p;
- patch_branch(p, addr, 0);
- q = buf + 0x2000000;
- translate_branch(&instr, q, p);
- patch_instruction(q, instr);
- check(instr_is_branch_to_addr(p, addr));
- check(instr_is_branch_to_addr(q, addr));
- check(ppc_inst_equal(ppc_inst_read(q), ppc_inst(0x4a000000)));
-
- /* Maximum positive case, move x to x - 32 MB + 4 */
- p = buf + 0x2000000;
- addr = (unsigned long)p;
- patch_branch(p, addr, 0);
- q = buf + 4;
- translate_branch(&instr, q, p);
- patch_instruction(q, instr);
- check(instr_is_branch_to_addr(p, addr));
- check(instr_is_branch_to_addr(q, addr));
- check(ppc_inst_equal(ppc_inst_read(q), ppc_inst(0x49fffffc)));
-
- /* Jump to x + 16 MB moved to x + 20 MB */
- p = buf;
- addr = 0x1000000 + (unsigned long)buf;
- patch_branch(p, addr, BRANCH_SET_LINK);
- q = buf + 0x1400000;
- translate_branch(&instr, q, p);
- patch_instruction(q, instr);
- check(instr_is_branch_to_addr(p, addr));
- check(instr_is_branch_to_addr(q, addr));
-
- /* Jump to x + 16 MB moved to x - 16 MB + 4 */
- p = buf + 0x1000000;
- addr = 0x2000000 + (unsigned long)buf;
- patch_branch(p, addr, 0);
- q = buf + 4;
- translate_branch(&instr, q, p);
- patch_instruction(q, instr);
- check(instr_is_branch_to_addr(p, addr));
- check(instr_is_branch_to_addr(q, addr));
-
-
- /* Conditional branch tests */
-
- /* Simple case, branch to self moved a little */
- p = buf;
- addr = (unsigned long)p;
- create_cond_branch(&instr, p, addr, 0);
- patch_instruction(p, instr);
- check(instr_is_branch_to_addr(p, addr));
- q = buf + 4;
- translate_branch(&instr, q, p);
- patch_instruction(q, instr);
- check(instr_is_branch_to_addr(q, addr));
-
- /* Maximum negative case, move b . to addr + 32 KB */
- p = buf;
- addr = (unsigned long)p;
- create_cond_branch(&instr, p, addr, 0xFFFFFFFC);
- patch_instruction(p, instr);
- q = buf + 0x8000;
- translate_branch(&instr, q, p);
- patch_instruction(q, instr);
- check(instr_is_branch_to_addr(p, addr));
- check(instr_is_branch_to_addr(q, addr));
- check(ppc_inst_equal(ppc_inst_read(q), ppc_inst(0x43ff8000)));
-
- /* Maximum positive case, move x to x - 32 KB + 4 */
- p = buf + 0x8000;
- addr = (unsigned long)p;
- create_cond_branch(&instr, p, addr, 0xFFFFFFFC);
- patch_instruction(p, instr);
- q = buf + 4;
- translate_branch(&instr, q, p);
- patch_instruction(q, instr);
- check(instr_is_branch_to_addr(p, addr));
- check(instr_is_branch_to_addr(q, addr));
- check(ppc_inst_equal(ppc_inst_read(q), ppc_inst(0x43ff7ffc)));
-
- /* Jump to x + 12 KB moved to x + 20 KB */
- p = buf;
- addr = 0x3000 + (unsigned long)buf;
- create_cond_branch(&instr, p, addr, BRANCH_SET_LINK);
- patch_instruction(p, instr);
- q = buf + 0x5000;
- translate_branch(&instr, q, p);
- patch_instruction(q, instr);
- check(instr_is_branch_to_addr(p, addr));
- check(instr_is_branch_to_addr(q, addr));
-
- /* Jump to x + 8 KB moved to x - 8 KB + 4 */
- p = buf + 0x2000;
- addr = 0x4000 + (unsigned long)buf;
- create_cond_branch(&instr, p, addr, 0);
- patch_instruction(p, instr);
- q = buf + 4;
- translate_branch(&instr, q, p);
- patch_instruction(q, instr);
- check(instr_is_branch_to_addr(p, addr));
- check(instr_is_branch_to_addr(q, addr));
-
- /* Free the buffer we were using */
- vfree(buf);
-}
-
-#ifdef CONFIG_PPC64
-static void __init test_prefixed_patching(void)
-{
- extern unsigned int code_patching_test1[];
- extern unsigned int code_patching_test1_expected[];
- extern unsigned int end_code_patching_test1[];
-
- __patch_instruction(code_patching_test1,
- ppc_inst_prefix(OP_PREFIX << 26, 0x00000000),
- code_patching_test1);
-
- check(!memcmp(code_patching_test1,
- code_patching_test1_expected,
- sizeof(unsigned int) *
- (end_code_patching_test1 - code_patching_test1)));
-}
-#else
-static inline void test_prefixed_patching(void) {}
-#endif
-
-static int __init test_code_patching(void)
-{
- printk(KERN_DEBUG "Running code patching self-tests ...\n");
-
- test_branch_iform();
- test_branch_bform();
- test_create_function_call();
- test_translate_branch();
- test_prefixed_patching();
-
- return 0;
-}
-late_initcall(test_code_patching);
-
-#endif /* CONFIG_CODE_PATCHING_SELFTEST */
diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c
index c3e06922468b..343a78826035 100644
--- a/arch/powerpc/lib/feature-fixups.c
+++ b/arch/powerpc/lib/feature-fixups.c
@@ -47,7 +47,7 @@ static u32 *calc_addr(struct fixup_entry *fcur, long offset)
static int patch_alt_instruction(u32 *src, u32 *dest, u32 *alt_start, u32 *alt_end)
{
int err;
- struct ppc_inst instr;
+ ppc_inst_t instr;
instr = ppc_inst_read(src);
@@ -580,7 +580,7 @@ void do_barrier_nospec_fixups_range(bool enable, void *fixup_start, void *fixup_
printk(KERN_DEBUG "barrier-nospec: patched %d locations\n", i);
}
-static void patch_btb_flush_section(long *curr)
+static void __init patch_btb_flush_section(long *curr)
{
unsigned int *start, *end;
@@ -592,7 +592,7 @@ static void patch_btb_flush_section(long *curr)
}
}
-void do_btb_flush_fixups(void)
+void __init do_btb_flush_fixups(void)
{
long *start, *end;
@@ -621,10 +621,10 @@ void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end)
}
}
-static void do_final_fixups(void)
+static void __init do_final_fixups(void)
{
#if defined(CONFIG_PPC64) && defined(CONFIG_RELOCATABLE)
- struct ppc_inst inst;
+ ppc_inst_t inst;
u32 *src, *dest, *end;
if (PHYSICAL_START == 0)
@@ -715,12 +715,12 @@ late_initcall(check_features);
/* This must be after the text it fixes up, vmlinux.lds.S enforces that atm */
static struct fixup_entry fixup;
-static long calc_offset(struct fixup_entry *entry, unsigned int *p)
+static long __init calc_offset(struct fixup_entry *entry, unsigned int *p)
{
return (unsigned long)p - (unsigned long)entry;
}
-static void test_basic_patching(void)
+static void __init test_basic_patching(void)
{
extern unsigned int ftr_fixup_test1[];
extern unsigned int end_ftr_fixup_test1[];
@@ -751,7 +751,7 @@ static void test_basic_patching(void)
check(memcmp(ftr_fixup_test1, ftr_fixup_test1_expected, size) == 0);
}
-static void test_alternative_patching(void)
+static void __init test_alternative_patching(void)
{
extern unsigned int ftr_fixup_test2[];
extern unsigned int end_ftr_fixup_test2[];
@@ -784,7 +784,7 @@ static void test_alternative_patching(void)
check(memcmp(ftr_fixup_test2, ftr_fixup_test2_expected, size) == 0);
}
-static void test_alternative_case_too_big(void)
+static void __init test_alternative_case_too_big(void)
{
extern unsigned int ftr_fixup_test3[];
extern unsigned int end_ftr_fixup_test3[];
@@ -810,7 +810,7 @@ static void test_alternative_case_too_big(void)
check(memcmp(ftr_fixup_test3, ftr_fixup_test3_orig, size) == 0);
}
-static void test_alternative_case_too_small(void)
+static void __init test_alternative_case_too_small(void)
{
extern unsigned int ftr_fixup_test4[];
extern unsigned int end_ftr_fixup_test4[];
@@ -856,7 +856,7 @@ static void test_alternative_case_with_branch(void)
check(memcmp(ftr_fixup_test5, ftr_fixup_test5_expected, size) == 0);
}
-static void test_alternative_case_with_external_branch(void)
+static void __init test_alternative_case_with_external_branch(void)
{
extern unsigned int ftr_fixup_test6[];
extern unsigned int end_ftr_fixup_test6[];
@@ -866,7 +866,7 @@ static void test_alternative_case_with_external_branch(void)
check(memcmp(ftr_fixup_test6, ftr_fixup_test6_expected, size) == 0);
}
-static void test_alternative_case_with_branch_to_end(void)
+static void __init test_alternative_case_with_branch_to_end(void)
{
extern unsigned int ftr_fixup_test7[];
extern unsigned int end_ftr_fixup_test7[];
@@ -876,7 +876,7 @@ static void test_alternative_case_with_branch_to_end(void)
check(memcmp(ftr_fixup_test7, ftr_fixup_test7_expected, size) == 0);
}
-static void test_cpu_macros(void)
+static void __init test_cpu_macros(void)
{
extern u8 ftr_fixup_test_FTR_macros[];
extern u8 ftr_fixup_test_FTR_macros_expected[];
@@ -888,7 +888,7 @@ static void test_cpu_macros(void)
ftr_fixup_test_FTR_macros_expected, size) == 0);
}
-static void test_fw_macros(void)
+static void __init test_fw_macros(void)
{
#ifdef CONFIG_PPC64
extern u8 ftr_fixup_test_FW_FTR_macros[];
@@ -902,7 +902,7 @@ static void test_fw_macros(void)
#endif
}
-static void test_lwsync_macros(void)
+static void __init test_lwsync_macros(void)
{
extern u8 lwsync_fixup_test[];
extern u8 end_lwsync_fixup_test[];
diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c
index 86f49e3e7cf5..bd3734d5be89 100644
--- a/arch/powerpc/lib/sstep.c
+++ b/arch/powerpc/lib/sstep.c
@@ -1354,7 +1354,7 @@ static nokprobe_inline int trap_compare(long v1, long v2)
* otherwise.
*/
int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
- struct ppc_inst instr)
+ ppc_inst_t instr)
{
#ifdef CONFIG_PPC64
unsigned int suffixopcode, prefixtype, prefix_r;
@@ -3264,12 +3264,14 @@ void emulate_update_regs(struct pt_regs *regs, struct instruction_op *op)
case BARRIER_EIEIO:
eieio();
break;
+#ifdef CONFIG_PPC64
case BARRIER_LWSYNC:
asm volatile("lwsync" : : : "memory");
break;
case BARRIER_PTESYNC:
asm volatile("ptesync" : : : "memory");
break;
+#endif
}
break;
@@ -3578,7 +3580,7 @@ NOKPROBE_SYMBOL(emulate_loadstore);
* or -1 if the instruction is one that should not be stepped,
* such as an rfid, or a mtmsrd that would clear MSR_RI.
*/
-int emulate_step(struct pt_regs *regs, struct ppc_inst instr)
+int emulate_step(struct pt_regs *regs, ppc_inst_t instr)
{
struct instruction_op op;
int r, err, type;
diff --git a/arch/powerpc/lib/test-code-patching.c b/arch/powerpc/lib/test-code-patching.c
new file mode 100644
index 000000000000..c44823292f73
--- /dev/null
+++ b/arch/powerpc/lib/test-code-patching.c
@@ -0,0 +1,362 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright 2008 Michael Ellerman, IBM Corporation.
+ */
+
+#include <linux/vmalloc.h>
+#include <linux/init.h>
+
+#include <asm/code-patching.h>
+
+static int __init instr_is_branch_to_addr(const u32 *instr, unsigned long addr)
+{
+ if (instr_is_branch_iform(ppc_inst_read(instr)) ||
+ instr_is_branch_bform(ppc_inst_read(instr)))
+ return branch_target(instr) == addr;
+
+ return 0;
+}
+
+static void __init test_trampoline(void)
+{
+ asm ("nop;nop;\n");
+}
+
+#define check(x) do { \
+ if (!(x)) \
+ pr_err("code-patching: test failed at line %d\n", __LINE__); \
+} while (0)
+
+static void __init test_branch_iform(void)
+{
+ int err;
+ ppc_inst_t instr;
+ u32 tmp[2];
+ u32 *iptr = tmp;
+ unsigned long addr = (unsigned long)tmp;
+
+ /* The simplest case, branch to self, no flags */
+ check(instr_is_branch_iform(ppc_inst(0x48000000)));
+ /* All bits of target set, and flags */
+ check(instr_is_branch_iform(ppc_inst(0x4bffffff)));
+ /* High bit of opcode set, which is wrong */
+ check(!instr_is_branch_iform(ppc_inst(0xcbffffff)));
+ /* Middle bits of opcode set, which is wrong */
+ check(!instr_is_branch_iform(ppc_inst(0x7bffffff)));
+
+ /* Simplest case, branch to self with link */
+ check(instr_is_branch_iform(ppc_inst(0x48000001)));
+ /* All bits of targets set */
+ check(instr_is_branch_iform(ppc_inst(0x4bfffffd)));
+ /* Some bits of targets set */
+ check(instr_is_branch_iform(ppc_inst(0x4bff00fd)));
+ /* Must be a valid branch to start with */
+ check(!instr_is_branch_iform(ppc_inst(0x7bfffffd)));
+
+ /* Absolute branch to 0x100 */
+ ppc_inst_write(iptr, ppc_inst(0x48000103));
+ check(instr_is_branch_to_addr(iptr, 0x100));
+ /* Absolute branch to 0x420fc */
+ ppc_inst_write(iptr, ppc_inst(0x480420ff));
+ check(instr_is_branch_to_addr(iptr, 0x420fc));
+ /* Maximum positive relative branch, + 20MB - 4B */
+ ppc_inst_write(iptr, ppc_inst(0x49fffffc));
+ check(instr_is_branch_to_addr(iptr, addr + 0x1FFFFFC));
+ /* Smallest negative relative branch, - 4B */
+ ppc_inst_write(iptr, ppc_inst(0x4bfffffc));
+ check(instr_is_branch_to_addr(iptr, addr - 4));
+ /* Largest negative relative branch, - 32 MB */
+ ppc_inst_write(iptr, ppc_inst(0x4a000000));
+ check(instr_is_branch_to_addr(iptr, addr - 0x2000000));
+
+ /* Branch to self, with link */
+ err = create_branch(&instr, iptr, addr, BRANCH_SET_LINK);
+ ppc_inst_write(iptr, instr);
+ check(instr_is_branch_to_addr(iptr, addr));
+
+ /* Branch to self - 0x100, with link */
+ err = create_branch(&instr, iptr, addr - 0x100, BRANCH_SET_LINK);
+ ppc_inst_write(iptr, instr);
+ check(instr_is_branch_to_addr(iptr, addr - 0x100));
+
+ /* Branch to self + 0x100, no link */
+ err = create_branch(&instr, iptr, addr + 0x100, 0);
+ ppc_inst_write(iptr, instr);
+ check(instr_is_branch_to_addr(iptr, addr + 0x100));
+
+ /* Maximum relative negative offset, - 32 MB */
+ err = create_branch(&instr, iptr, addr - 0x2000000, BRANCH_SET_LINK);
+ ppc_inst_write(iptr, instr);
+ check(instr_is_branch_to_addr(iptr, addr - 0x2000000));
+
+ /* Out of range relative negative offset, - 32 MB + 4*/
+ err = create_branch(&instr, iptr, addr - 0x2000004, BRANCH_SET_LINK);
+ check(err);
+
+ /* Out of range relative positive offset, + 32 MB */
+ err = create_branch(&instr, iptr, addr + 0x2000000, BRANCH_SET_LINK);
+ check(err);
+
+ /* Unaligned target */
+ err = create_branch(&instr, iptr, addr + 3, BRANCH_SET_LINK);
+ check(err);
+
+ /* Check flags are masked correctly */
+ err = create_branch(&instr, iptr, addr, 0xFFFFFFFC);
+ ppc_inst_write(iptr, instr);
+ check(instr_is_branch_to_addr(iptr, addr));
+ check(ppc_inst_equal(instr, ppc_inst(0x48000000)));
+}
+
+static void __init test_create_function_call(void)
+{
+ u32 *iptr;
+ unsigned long dest;
+ ppc_inst_t instr;
+
+ /* Check we can create a function call */
+ iptr = (u32 *)ppc_function_entry(test_trampoline);
+ dest = ppc_function_entry(test_create_function_call);
+ create_branch(&instr, iptr, dest, BRANCH_SET_LINK);
+ patch_instruction(iptr, instr);
+ check(instr_is_branch_to_addr(iptr, dest));
+}
+
+static void __init test_branch_bform(void)
+{
+ int err;
+ unsigned long addr;
+ ppc_inst_t instr;
+ u32 tmp[2];
+ u32 *iptr = tmp;
+ unsigned int flags;
+
+ addr = (unsigned long)iptr;
+
+ /* The simplest case, branch to self, no flags */
+ check(instr_is_branch_bform(ppc_inst(0x40000000)));
+ /* All bits of target set, and flags */
+ check(instr_is_branch_bform(ppc_inst(0x43ffffff)));
+ /* High bit of opcode set, which is wrong */
+ check(!instr_is_branch_bform(ppc_inst(0xc3ffffff)));
+ /* Middle bits of opcode set, which is wrong */
+ check(!instr_is_branch_bform(ppc_inst(0x7bffffff)));
+
+ /* Absolute conditional branch to 0x100 */
+ ppc_inst_write(iptr, ppc_inst(0x43ff0103));
+ check(instr_is_branch_to_addr(iptr, 0x100));
+ /* Absolute conditional branch to 0x20fc */
+ ppc_inst_write(iptr, ppc_inst(0x43ff20ff));
+ check(instr_is_branch_to_addr(iptr, 0x20fc));
+ /* Maximum positive relative conditional branch, + 32 KB - 4B */
+ ppc_inst_write(iptr, ppc_inst(0x43ff7ffc));
+ check(instr_is_branch_to_addr(iptr, addr + 0x7FFC));
+ /* Smallest negative relative conditional branch, - 4B */
+ ppc_inst_write(iptr, ppc_inst(0x43fffffc));
+ check(instr_is_branch_to_addr(iptr, addr - 4));
+ /* Largest negative relative conditional branch, - 32 KB */
+ ppc_inst_write(iptr, ppc_inst(0x43ff8000));
+ check(instr_is_branch_to_addr(iptr, addr - 0x8000));
+
+ /* All condition code bits set & link */
+ flags = 0x3ff000 | BRANCH_SET_LINK;
+
+ /* Branch to self */
+ err = create_cond_branch(&instr, iptr, addr, flags);
+ ppc_inst_write(iptr, instr);
+ check(instr_is_branch_to_addr(iptr, addr));
+
+ /* Branch to self - 0x100 */
+ err = create_cond_branch(&instr, iptr, addr - 0x100, flags);
+ ppc_inst_write(iptr, instr);
+ check(instr_is_branch_to_addr(iptr, addr - 0x100));
+
+ /* Branch to self + 0x100 */
+ err = create_cond_branch(&instr, iptr, addr + 0x100, flags);
+ ppc_inst_write(iptr, instr);
+ check(instr_is_branch_to_addr(iptr, addr + 0x100));
+
+ /* Maximum relative negative offset, - 32 KB */
+ err = create_cond_branch(&instr, iptr, addr - 0x8000, flags);
+ ppc_inst_write(iptr, instr);
+ check(instr_is_branch_to_addr(iptr, addr - 0x8000));
+
+ /* Out of range relative negative offset, - 32 KB + 4*/
+ err = create_cond_branch(&instr, iptr, addr - 0x8004, flags);
+ check(err);
+
+ /* Out of range relative positive offset, + 32 KB */
+ err = create_cond_branch(&instr, iptr, addr + 0x8000, flags);
+ check(err);
+
+ /* Unaligned target */
+ err = create_cond_branch(&instr, iptr, addr + 3, flags);
+ check(err);
+
+ /* Check flags are masked correctly */
+ err = create_cond_branch(&instr, iptr, addr, 0xFFFFFFFC);
+ ppc_inst_write(iptr, instr);
+ check(instr_is_branch_to_addr(iptr, addr));
+ check(ppc_inst_equal(instr, ppc_inst(0x43FF0000)));
+}
+
+static void __init test_translate_branch(void)
+{
+ unsigned long addr;
+ void *p, *q;
+ ppc_inst_t instr;
+ void *buf;
+
+ buf = vmalloc(PAGE_ALIGN(0x2000000 + 1));
+ check(buf);
+ if (!buf)
+ return;
+
+ /* Simple case, branch to self moved a little */
+ p = buf;
+ addr = (unsigned long)p;
+ create_branch(&instr, p, addr, 0);
+ ppc_inst_write(p, instr);
+ check(instr_is_branch_to_addr(p, addr));
+ q = p + 4;
+ translate_branch(&instr, q, p);
+ ppc_inst_write(q, instr);
+ check(instr_is_branch_to_addr(q, addr));
+
+ /* Maximum negative case, move b . to addr + 32 MB */
+ p = buf;
+ addr = (unsigned long)p;
+ create_branch(&instr, p, addr, 0);
+ ppc_inst_write(p, instr);
+ q = buf + 0x2000000;
+ translate_branch(&instr, q, p);
+ ppc_inst_write(q, instr);
+ check(instr_is_branch_to_addr(p, addr));
+ check(instr_is_branch_to_addr(q, addr));
+ check(ppc_inst_equal(ppc_inst_read(q), ppc_inst(0x4a000000)));
+
+ /* Maximum positive case, move x to x - 32 MB + 4 */
+ p = buf + 0x2000000;
+ addr = (unsigned long)p;
+ create_branch(&instr, p, addr, 0);
+ ppc_inst_write(p, instr);
+ q = buf + 4;
+ translate_branch(&instr, q, p);
+ ppc_inst_write(q, instr);
+ check(instr_is_branch_to_addr(p, addr));
+ check(instr_is_branch_to_addr(q, addr));
+ check(ppc_inst_equal(ppc_inst_read(q), ppc_inst(0x49fffffc)));
+
+ /* Jump to x + 16 MB moved to x + 20 MB */
+ p = buf;
+ addr = 0x1000000 + (unsigned long)buf;
+ create_branch(&instr, p, addr, BRANCH_SET_LINK);
+ ppc_inst_write(p, instr);
+ q = buf + 0x1400000;
+ translate_branch(&instr, q, p);
+ ppc_inst_write(q, instr);
+ check(instr_is_branch_to_addr(p, addr));
+ check(instr_is_branch_to_addr(q, addr));
+
+ /* Jump to x + 16 MB moved to x - 16 MB + 4 */
+ p = buf + 0x1000000;
+ addr = 0x2000000 + (unsigned long)buf;
+ create_branch(&instr, p, addr, 0);
+ ppc_inst_write(p, instr);
+ q = buf + 4;
+ translate_branch(&instr, q, p);
+ ppc_inst_write(q, instr);
+ check(instr_is_branch_to_addr(p, addr));
+ check(instr_is_branch_to_addr(q, addr));
+
+
+ /* Conditional branch tests */
+
+ /* Simple case, branch to self moved a little */
+ p = buf;
+ addr = (unsigned long)p;
+ create_cond_branch(&instr, p, addr, 0);
+ ppc_inst_write(p, instr);
+ check(instr_is_branch_to_addr(p, addr));
+ q = buf + 4;
+ translate_branch(&instr, q, p);
+ ppc_inst_write(q, instr);
+ check(instr_is_branch_to_addr(q, addr));
+
+ /* Maximum negative case, move b . to addr + 32 KB */
+ p = buf;
+ addr = (unsigned long)p;
+ create_cond_branch(&instr, p, addr, 0xFFFFFFFC);
+ ppc_inst_write(p, instr);
+ q = buf + 0x8000;
+ translate_branch(&instr, q, p);
+ ppc_inst_write(q, instr);
+ check(instr_is_branch_to_addr(p, addr));
+ check(instr_is_branch_to_addr(q, addr));
+ check(ppc_inst_equal(ppc_inst_read(q), ppc_inst(0x43ff8000)));
+
+ /* Maximum positive case, move x to x - 32 KB + 4 */
+ p = buf + 0x8000;
+ addr = (unsigned long)p;
+ create_cond_branch(&instr, p, addr, 0xFFFFFFFC);
+ ppc_inst_write(p, instr);
+ q = buf + 4;
+ translate_branch(&instr, q, p);
+ ppc_inst_write(q, instr);
+ check(instr_is_branch_to_addr(p, addr));
+ check(instr_is_branch_to_addr(q, addr));
+ check(ppc_inst_equal(ppc_inst_read(q), ppc_inst(0x43ff7ffc)));
+
+ /* Jump to x + 12 KB moved to x + 20 KB */
+ p = buf;
+ addr = 0x3000 + (unsigned long)buf;
+ create_cond_branch(&instr, p, addr, BRANCH_SET_LINK);
+ ppc_inst_write(p, instr);
+ q = buf + 0x5000;
+ translate_branch(&instr, q, p);
+ ppc_inst_write(q, instr);
+ check(instr_is_branch_to_addr(p, addr));
+ check(instr_is_branch_to_addr(q, addr));
+
+ /* Jump to x + 8 KB moved to x - 8 KB + 4 */
+ p = buf + 0x2000;
+ addr = 0x4000 + (unsigned long)buf;
+ create_cond_branch(&instr, p, addr, 0);
+ ppc_inst_write(p, instr);
+ q = buf + 4;
+ translate_branch(&instr, q, p);
+ ppc_inst_write(q, instr);
+ check(instr_is_branch_to_addr(p, addr));
+ check(instr_is_branch_to_addr(q, addr));
+
+ /* Free the buffer we were using */
+ vfree(buf);
+}
+
+static void __init test_prefixed_patching(void)
+{
+ u32 *iptr = (u32 *)ppc_function_entry(test_trampoline);
+ u32 expected[2] = {OP_PREFIX << 26, 0};
+ ppc_inst_t inst = ppc_inst_prefix(OP_PREFIX << 26, 0);
+
+ if (!IS_ENABLED(CONFIG_PPC64))
+ return;
+
+ patch_instruction(iptr, inst);
+
+ check(!memcmp(iptr, expected, sizeof(expected)));
+}
+
+static int __init test_code_patching(void)
+{
+ pr_info("Running code patching self-tests ...\n");
+
+ test_branch_iform();
+ test_branch_bform();
+ test_create_function_call();
+ test_translate_branch();
+ test_prefixed_patching();
+
+ return 0;
+}
+late_initcall(test_code_patching);
diff --git a/arch/powerpc/lib/test_code-patching.S b/arch/powerpc/lib/test_code-patching.S
deleted file mode 100644
index a9be6107844e..000000000000
--- a/arch/powerpc/lib/test_code-patching.S
+++ /dev/null
@@ -1,20 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (C) 2020 IBM Corporation
- */
-#include <asm/ppc-opcode.h>
-
- .text
-
-#define globl(x) \
- .globl x; \
-x:
-
-globl(code_patching_test1)
- nop
- nop
-globl(end_code_patching_test1)
-
-globl(code_patching_test1_expected)
- .long OP_PREFIX << 26
- .long 0x0000000
diff --git a/arch/powerpc/lib/test_emulate_step.c b/arch/powerpc/lib/test_emulate_step.c
index 8b4f6b3e96c4..4f141daafcff 100644
--- a/arch/powerpc/lib/test_emulate_step.c
+++ b/arch/powerpc/lib/test_emulate_step.c
@@ -792,7 +792,7 @@ static void __init test_lxvpx_stxvpx(void)
#ifdef CONFIG_VSX
static void __init test_plxvp_pstxvp(void)
{
- struct ppc_inst instr;
+ ppc_inst_t instr;
struct pt_regs regs;
union {
vector128 a;
@@ -906,7 +906,7 @@ struct compute_test {
struct {
char *descr;
unsigned long flags;
- struct ppc_inst instr;
+ ppc_inst_t instr;
struct pt_regs regs;
} subtests[MAX_SUBTESTS + 1];
};
@@ -1600,7 +1600,7 @@ static struct compute_test compute_tests[] = {
};
static int __init emulate_compute_instr(struct pt_regs *regs,
- struct ppc_inst instr,
+ ppc_inst_t instr,
bool negative)
{
int analysed;
@@ -1627,7 +1627,7 @@ static int __init emulate_compute_instr(struct pt_regs *regs,
}
static int __init execute_compute_instr(struct pt_regs *regs,
- struct ppc_inst instr)
+ ppc_inst_t instr)
{
extern int exec_instr(struct pt_regs *regs);
@@ -1658,7 +1658,7 @@ static void __init run_tests_compute(void)
struct compute_test *test;
struct pt_regs *regs, exp, got;
unsigned int i, j, k;
- struct ppc_inst instr;
+ ppc_inst_t instr;
bool ignore_gpr, ignore_xer, ignore_ccr, passed, rc, negative;
for (i = 0; i < ARRAY_SIZE(compute_tests); i++) {
diff --git a/arch/powerpc/lib/test_emulate_step_exec_instr.S b/arch/powerpc/lib/test_emulate_step_exec_instr.S
index 9ef941d958d8..5473f9d03df3 100644
--- a/arch/powerpc/lib/test_emulate_step_exec_instr.S
+++ b/arch/powerpc/lib/test_emulate_step_exec_instr.S
@@ -37,7 +37,7 @@ _GLOBAL(exec_instr)
* The stack pointer (GPR1) and the thread pointer (GPR13) are not
* saved as these should not be modified anyway.
*/
- SAVE_2GPRS(2, r1)
+ SAVE_GPRS(2, 3, r1)
SAVE_NVGPRS(r1)
/*
@@ -75,8 +75,7 @@ _GLOBAL(exec_instr)
/* Load GPRs from pt_regs */
REST_GPR(0, r31)
- REST_10GPRS(2, r31)
- REST_GPR(12, r31)
+ REST_GPRS(2, 12, r31)
REST_NVGPRS(r31)
/* Placeholder for the test instruction */
@@ -99,8 +98,7 @@ _GLOBAL(exec_instr)
subi r3, r3, GPR0
SAVE_GPR(0, r3)
SAVE_GPR(2, r3)
- SAVE_8GPRS(4, r3)
- SAVE_GPR(12, r3)
+ SAVE_GPRS(4, 12, r3)
SAVE_NVGPRS(r3)
/* Save resulting LR to pt_regs */
diff --git a/arch/powerpc/mm/book3s32/Makefile b/arch/powerpc/mm/book3s32/Makefile
index 15f4773643d2..50dd8f6bdf46 100644
--- a/arch/powerpc/mm/book3s32/Makefile
+++ b/arch/powerpc/mm/book3s32/Makefile
@@ -9,5 +9,4 @@ endif
obj-y += mmu.o mmu_context.o
obj-$(CONFIG_PPC_BOOK3S_603) += nohash_low.o
obj-$(CONFIG_PPC_BOOK3S_604) += hash_low.o tlb.o
-obj-$(CONFIG_PPC_KUEP) += kuep.o
obj-$(CONFIG_PPC_KUAP) += kuap.o
diff --git a/arch/powerpc/mm/book3s32/kuap.c b/arch/powerpc/mm/book3s32/kuap.c
index 0f920f09af57..28676cabb005 100644
--- a/arch/powerpc/mm/book3s32/kuap.c
+++ b/arch/powerpc/mm/book3s32/kuap.c
@@ -20,8 +20,11 @@ EXPORT_SYMBOL(kuap_unlock_all_ool);
void setup_kuap(bool disabled)
{
- if (!disabled)
+ if (!disabled) {
kuap_lock_all_ool();
+ init_mm.context.sr0 |= SR_KS;
+ current->thread.sr0 |= SR_KS;
+ }
if (smp_processor_id() != boot_cpuid)
return;
diff --git a/arch/powerpc/mm/book3s32/kuep.c b/arch/powerpc/mm/book3s32/kuep.c
deleted file mode 100644
index c20733d6e02c..000000000000
--- a/arch/powerpc/mm/book3s32/kuep.c
+++ /dev/null
@@ -1,20 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-
-#include <asm/kup.h>
-#include <asm/smp.h>
-
-struct static_key_false disable_kuep_key;
-
-void setup_kuep(bool disabled)
-{
- if (!disabled)
- kuep_lock();
-
- if (smp_processor_id() != boot_cpuid)
- return;
-
- if (disabled)
- static_branch_enable(&disable_kuep_key);
- else
- pr_info("Activating Kernel Userspace Execution Prevention\n");
-}
diff --git a/arch/powerpc/mm/book3s32/mmu.c b/arch/powerpc/mm/book3s32/mmu.c
index 27061583a010..203735caf691 100644
--- a/arch/powerpc/mm/book3s32/mmu.c
+++ b/arch/powerpc/mm/book3s32/mmu.c
@@ -76,7 +76,7 @@ unsigned long p_block_mapped(phys_addr_t pa)
return 0;
}
-static int find_free_bat(void)
+int __init find_free_bat(void)
{
int b;
int n = mmu_has_feature(MMU_FTR_USE_HIGH_BATS) ? 8 : 4;
@@ -100,7 +100,7 @@ static int find_free_bat(void)
* - block size has to be a power of two. This is calculated by finding the
* highest bit set to 1.
*/
-static unsigned int block_size(unsigned long base, unsigned long top)
+unsigned int bat_block_size(unsigned long base, unsigned long top)
{
unsigned int max_size = SZ_256M;
unsigned int base_shift = (ffs(base) - 1) & 31;
@@ -145,7 +145,7 @@ static unsigned long __init __mmu_mapin_ram(unsigned long base, unsigned long to
int idx;
while ((idx = find_free_bat()) != -1 && base != top) {
- unsigned int size = block_size(base, top);
+ unsigned int size = bat_block_size(base, top);
if (size < 128 << 10)
break;
@@ -196,18 +196,17 @@ void mmu_mark_initmem_nx(void)
int nb = mmu_has_feature(MMU_FTR_USE_HIGH_BATS) ? 8 : 4;
int i;
unsigned long base = (unsigned long)_stext - PAGE_OFFSET;
- unsigned long top = (unsigned long)_etext - PAGE_OFFSET;
+ unsigned long top = ALIGN((unsigned long)_etext - PAGE_OFFSET, SZ_128K);
unsigned long border = (unsigned long)__init_begin - PAGE_OFFSET;
unsigned long size;
- for (i = 0; i < nb - 1 && base < top && top - base > (128 << 10);) {
- size = block_size(base, top);
+ for (i = 0; i < nb - 1 && base < top;) {
+ size = bat_block_size(base, top);
setibat(i++, PAGE_OFFSET + base, base, size, PAGE_KERNEL_TEXT);
base += size;
}
if (base < top) {
- size = block_size(base, top);
- size = max(size, 128UL << 10);
+ size = bat_block_size(base, top);
if ((top - base) > size) {
size <<= 1;
if (strict_kernel_rwx_enabled() && base + size > border)
diff --git a/arch/powerpc/mm/book3s32/mmu_context.c b/arch/powerpc/mm/book3s32/mmu_context.c
index e2708e387dc3..269a3eb25a73 100644
--- a/arch/powerpc/mm/book3s32/mmu_context.c
+++ b/arch/powerpc/mm/book3s32/mmu_context.c
@@ -69,6 +69,12 @@ EXPORT_SYMBOL_GPL(__init_new_context);
int init_new_context(struct task_struct *t, struct mm_struct *mm)
{
mm->context.id = __init_new_context();
+ mm->context.sr0 = CTX_TO_VSID(mm->context.id, 0);
+
+ if (!kuep_is_disabled())
+ mm->context.sr0 |= SR_NX;
+ if (!kuap_is_disabled())
+ mm->context.sr0 |= SR_KS;
return 0;
}
@@ -108,20 +114,13 @@ void __init mmu_context_init(void)
void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk)
{
long id = next->context.id;
- unsigned long val;
if (id < 0)
panic("mm_struct %p has no context ID", next);
isync();
- val = CTX_TO_VSID(id, 0);
- if (!kuep_is_disabled())
- val |= SR_NX;
- if (!kuap_is_disabled())
- val |= SR_KS;
-
- update_user_segments(val);
+ update_user_segments(next->context.sr0);
if (IS_ENABLED(CONFIG_BDI_SWITCH))
abatron_pteptrs[1] = next->pgd;
diff --git a/arch/powerpc/mm/book3s64/Makefile b/arch/powerpc/mm/book3s64/Makefile
index 1b56d3af47d4..2d50cac499c5 100644
--- a/arch/powerpc/mm/book3s64/Makefile
+++ b/arch/powerpc/mm/book3s64/Makefile
@@ -2,20 +2,23 @@
ccflags-y := $(NO_MINIMAL_TOC)
+obj-y += mmu_context.o pgtable.o trace.o
+ifdef CONFIG_PPC_64S_HASH_MMU
CFLAGS_REMOVE_slb.o = $(CC_FLAGS_FTRACE)
-
-obj-y += hash_pgtable.o hash_utils.o slb.o \
- mmu_context.o pgtable.o hash_tlb.o
-obj-$(CONFIG_PPC_NATIVE) += hash_native.o
-obj-$(CONFIG_PPC_RADIX_MMU) += radix_pgtable.o radix_tlb.o
+obj-y += hash_pgtable.o hash_utils.o hash_tlb.o slb.o
+obj-$(CONFIG_PPC_HASH_MMU_NATIVE) += hash_native.o
obj-$(CONFIG_PPC_4K_PAGES) += hash_4k.o
obj-$(CONFIG_PPC_64K_PAGES) += hash_64k.o
-obj-$(CONFIG_HUGETLB_PAGE) += hash_hugetlbpage.o
+obj-$(CONFIG_TRANSPARENT_HUGEPAGE) += hash_hugepage.o
+obj-$(CONFIG_PPC_SUBPAGE_PROT) += subpage_prot.o
+endif
+
+obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
+
+obj-$(CONFIG_PPC_RADIX_MMU) += radix_pgtable.o radix_tlb.o
ifdef CONFIG_HUGETLB_PAGE
obj-$(CONFIG_PPC_RADIX_MMU) += radix_hugetlbpage.o
endif
-obj-$(CONFIG_TRANSPARENT_HUGEPAGE) += hash_hugepage.o
-obj-$(CONFIG_PPC_SUBPAGE_PROT) += subpage_prot.o
obj-$(CONFIG_SPAPR_TCE_IOMMU) += iommu_api.o
obj-$(CONFIG_PPC_PKEY) += pkeys.o
diff --git a/arch/powerpc/mm/book3s64/hash_native.c b/arch/powerpc/mm/book3s64/hash_native.c
index d8279bfe68ea..623a7b7ab38b 100644
--- a/arch/powerpc/mm/book3s64/hash_native.c
+++ b/arch/powerpc/mm/book3s64/hash_native.c
@@ -43,110 +43,6 @@
static DEFINE_RAW_SPINLOCK(native_tlbie_lock);
-static inline void tlbiel_hash_set_isa206(unsigned int set, unsigned int is)
-{
- unsigned long rb;
-
- rb = (set << PPC_BITLSHIFT(51)) | (is << PPC_BITLSHIFT(53));
-
- asm volatile("tlbiel %0" : : "r" (rb));
-}
-
-/*
- * tlbiel instruction for hash, set invalidation
- * i.e., r=1 and is=01 or is=10 or is=11
- */
-static __always_inline void tlbiel_hash_set_isa300(unsigned int set, unsigned int is,
- unsigned int pid,
- unsigned int ric, unsigned int prs)
-{
- unsigned long rb;
- unsigned long rs;
- unsigned int r = 0; /* hash format */
-
- rb = (set << PPC_BITLSHIFT(51)) | (is << PPC_BITLSHIFT(53));
- rs = ((unsigned long)pid << PPC_BITLSHIFT(31));
-
- asm volatile(PPC_TLBIEL(%0, %1, %2, %3, %4)
- : : "r"(rb), "r"(rs), "i"(ric), "i"(prs), "i"(r)
- : "memory");
-}
-
-
-static void tlbiel_all_isa206(unsigned int num_sets, unsigned int is)
-{
- unsigned int set;
-
- asm volatile("ptesync": : :"memory");
-
- for (set = 0; set < num_sets; set++)
- tlbiel_hash_set_isa206(set, is);
-
- ppc_after_tlbiel_barrier();
-}
-
-static void tlbiel_all_isa300(unsigned int num_sets, unsigned int is)
-{
- unsigned int set;
-
- asm volatile("ptesync": : :"memory");
-
- /*
- * Flush the partition table cache if this is HV mode.
- */
- if (early_cpu_has_feature(CPU_FTR_HVMODE))
- tlbiel_hash_set_isa300(0, is, 0, 2, 0);
-
- /*
- * Now invalidate the process table cache. UPRT=0 HPT modes (what
- * current hardware implements) do not use the process table, but
- * add the flushes anyway.
- *
- * From ISA v3.0B p. 1078:
- * The following forms are invalid.
- * * PRS=1, R=0, and RIC!=2 (The only process-scoped
- * HPT caching is of the Process Table.)
- */
- tlbiel_hash_set_isa300(0, is, 0, 2, 1);
-
- /*
- * Then flush the sets of the TLB proper. Hash mode uses
- * partition scoped TLB translations, which may be flushed
- * in !HV mode.
- */
- for (set = 0; set < num_sets; set++)
- tlbiel_hash_set_isa300(set, is, 0, 0, 0);
-
- ppc_after_tlbiel_barrier();
-
- asm volatile(PPC_ISA_3_0_INVALIDATE_ERAT "; isync" : : :"memory");
-}
-
-void hash__tlbiel_all(unsigned int action)
-{
- unsigned int is;
-
- switch (action) {
- case TLB_INVAL_SCOPE_GLOBAL:
- is = 3;
- break;
- case TLB_INVAL_SCOPE_LPID:
- is = 2;
- break;
- default:
- BUG();
- }
-
- if (early_cpu_has_feature(CPU_FTR_ARCH_300))
- tlbiel_all_isa300(POWER9_TLB_SETS_HASH, is);
- else if (early_cpu_has_feature(CPU_FTR_ARCH_207S))
- tlbiel_all_isa206(POWER8_TLB_SETS, is);
- else if (early_cpu_has_feature(CPU_FTR_ARCH_206))
- tlbiel_all_isa206(POWER7_TLB_SETS, is);
- else
- WARN(1, "%s called on pre-POWER7 CPU\n", __func__);
-}
-
static inline unsigned long ___tlbie(unsigned long vpn, int psize,
int apsize, int ssize)
{
@@ -267,7 +163,7 @@ static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize)
va |= ssize << 8;
sllp = get_sllp_encoding(apsize);
va |= sllp << 5;
- asm volatile(ASM_FTR_IFSET("tlbiel %0", "tlbiel %0,0", %1)
+ asm volatile(ASM_FTR_IFSET("tlbiel %0", PPC_TLBIEL_v205(%0, 0), %1)
: : "r" (va), "i" (CPU_FTR_ARCH_206)
: "memory");
break;
@@ -286,7 +182,7 @@ static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize)
*/
va |= (vpn & 0xfe);
va |= 1; /* L */
- asm volatile(ASM_FTR_IFSET("tlbiel %0", "tlbiel %0,1", %1)
+ asm volatile(ASM_FTR_IFSET("tlbiel %0", PPC_TLBIEL_v205(%0, 1), %1)
: : "r" (va), "i" (CPU_FTR_ARCH_206)
: "memory");
break;
diff --git a/arch/powerpc/mm/book3s64/hash_pgtable.c b/arch/powerpc/mm/book3s64/hash_pgtable.c
index ad5eff097d31..7ce8914992e3 100644
--- a/arch/powerpc/mm/book3s64/hash_pgtable.c
+++ b/arch/powerpc/mm/book3s64/hash_pgtable.c
@@ -16,7 +16,6 @@
#include <mm/mmu_decl.h>
-#define CREATE_TRACE_POINTS
#include <trace/events/thp.h>
#if H_PGTABLE_RANGE > (USER_VSID_RANGE * (TASK_SIZE_USER64 / TASK_CONTEXT_SIZE))
diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c
index cfd45245d009..7abf82a698d3 100644
--- a/arch/powerpc/mm/book3s64/hash_utils.c
+++ b/arch/powerpc/mm/book3s64/hash_utils.c
@@ -99,8 +99,6 @@
*/
static unsigned long _SDR1;
-struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
-EXPORT_SYMBOL_GPL(mmu_psize_defs);
u8 hpte_page_sizes[1 << LP_BITS];
EXPORT_SYMBOL_GPL(hpte_page_sizes);
@@ -114,9 +112,6 @@ EXPORT_SYMBOL_GPL(mmu_linear_psize);
int mmu_virtual_psize = MMU_PAGE_4K;
int mmu_vmalloc_psize = MMU_PAGE_4K;
EXPORT_SYMBOL_GPL(mmu_vmalloc_psize);
-#ifdef CONFIG_SPARSEMEM_VMEMMAP
-int mmu_vmemmap_psize = MMU_PAGE_4K;
-#endif
int mmu_io_psize = MMU_PAGE_4K;
int mmu_kernel_ssize = MMU_SEGSIZE_256M;
EXPORT_SYMBOL_GPL(mmu_kernel_ssize);
@@ -175,6 +170,110 @@ static struct mmu_psize_def mmu_psize_defaults_gp[] = {
},
};
+static inline void tlbiel_hash_set_isa206(unsigned int set, unsigned int is)
+{
+ unsigned long rb;
+
+ rb = (set << PPC_BITLSHIFT(51)) | (is << PPC_BITLSHIFT(53));
+
+ asm volatile("tlbiel %0" : : "r" (rb));
+}
+
+/*
+ * tlbiel instruction for hash, set invalidation
+ * i.e., r=1 and is=01 or is=10 or is=11
+ */
+static __always_inline void tlbiel_hash_set_isa300(unsigned int set, unsigned int is,
+ unsigned int pid,
+ unsigned int ric, unsigned int prs)
+{
+ unsigned long rb;
+ unsigned long rs;
+ unsigned int r = 0; /* hash format */
+
+ rb = (set << PPC_BITLSHIFT(51)) | (is << PPC_BITLSHIFT(53));
+ rs = ((unsigned long)pid << PPC_BITLSHIFT(31));
+
+ asm volatile(PPC_TLBIEL(%0, %1, %2, %3, %4)
+ : : "r"(rb), "r"(rs), "i"(ric), "i"(prs), "i"(r)
+ : "memory");
+}
+
+
+static void tlbiel_all_isa206(unsigned int num_sets, unsigned int is)
+{
+ unsigned int set;
+
+ asm volatile("ptesync": : :"memory");
+
+ for (set = 0; set < num_sets; set++)
+ tlbiel_hash_set_isa206(set, is);
+
+ ppc_after_tlbiel_barrier();
+}
+
+static void tlbiel_all_isa300(unsigned int num_sets, unsigned int is)
+{
+ unsigned int set;
+
+ asm volatile("ptesync": : :"memory");
+
+ /*
+ * Flush the partition table cache if this is HV mode.
+ */
+ if (early_cpu_has_feature(CPU_FTR_HVMODE))
+ tlbiel_hash_set_isa300(0, is, 0, 2, 0);
+
+ /*
+ * Now invalidate the process table cache. UPRT=0 HPT modes (what
+ * current hardware implements) do not use the process table, but
+ * add the flushes anyway.
+ *
+ * From ISA v3.0B p. 1078:
+ * The following forms are invalid.
+ * * PRS=1, R=0, and RIC!=2 (The only process-scoped
+ * HPT caching is of the Process Table.)
+ */
+ tlbiel_hash_set_isa300(0, is, 0, 2, 1);
+
+ /*
+ * Then flush the sets of the TLB proper. Hash mode uses
+ * partition scoped TLB translations, which may be flushed
+ * in !HV mode.
+ */
+ for (set = 0; set < num_sets; set++)
+ tlbiel_hash_set_isa300(set, is, 0, 0, 0);
+
+ ppc_after_tlbiel_barrier();
+
+ asm volatile(PPC_ISA_3_0_INVALIDATE_ERAT "; isync" : : :"memory");
+}
+
+void hash__tlbiel_all(unsigned int action)
+{
+ unsigned int is;
+
+ switch (action) {
+ case TLB_INVAL_SCOPE_GLOBAL:
+ is = 3;
+ break;
+ case TLB_INVAL_SCOPE_LPID:
+ is = 2;
+ break;
+ default:
+ BUG();
+ }
+
+ if (early_cpu_has_feature(CPU_FTR_ARCH_300))
+ tlbiel_all_isa300(POWER9_TLB_SETS_HASH, is);
+ else if (early_cpu_has_feature(CPU_FTR_ARCH_207S))
+ tlbiel_all_isa206(POWER8_TLB_SETS, is);
+ else if (early_cpu_has_feature(CPU_FTR_ARCH_206))
+ tlbiel_all_isa206(POWER7_TLB_SETS, is);
+ else
+ WARN(1, "%s called on pre-POWER7 CPU\n", __func__);
+}
+
/*
* 'R' and 'C' update notes:
* - Under pHyp or KVM, the updatepp path will not set C, thus it *will*
@@ -563,7 +662,7 @@ static int __init htab_dt_scan_hugepage_blocks(unsigned long node,
}
#endif /* CONFIG_HUGETLB_PAGE */
-static void mmu_psize_set_default_penc(void)
+static void __init mmu_psize_set_default_penc(void)
{
int bpsize, apsize;
for (bpsize = 0; bpsize < MMU_PAGE_COUNT; bpsize++)
@@ -573,7 +672,7 @@ static void mmu_psize_set_default_penc(void)
#ifdef CONFIG_PPC_64K_PAGES
-static bool might_have_hea(void)
+static bool __init might_have_hea(void)
{
/*
* The HEA ethernet adapter requires awareness of the
@@ -644,7 +743,7 @@ static void __init htab_scan_page_sizes(void)
* low-order N bits as the encoding for the 2^(12+N) byte page size
* (if it exists).
*/
-static void init_hpte_page_sizes(void)
+static void __init init_hpte_page_sizes(void)
{
long int ap, bp;
long int shift, penc;
@@ -1091,7 +1190,7 @@ void __init hash__early_init_mmu(void)
ps3_early_mm_init();
else if (firmware_has_feature(FW_FEATURE_LPAR))
hpte_init_pseries();
- else if (IS_ENABLED(CONFIG_PPC_NATIVE))
+ else if (IS_ENABLED(CONFIG_PPC_HASH_MMU_NATIVE))
hpte_init_native();
if (!mmu_hash_ops.hpte_insert)
diff --git a/arch/powerpc/mm/book3s64/hash_hugetlbpage.c b/arch/powerpc/mm/book3s64/hugetlbpage.c
index a688e1324ae5..ea8f83afb0ae 100644
--- a/arch/powerpc/mm/book3s64/hash_hugetlbpage.c
+++ b/arch/powerpc/mm/book3s64/hugetlbpage.c
@@ -16,6 +16,7 @@
unsigned int hpage_shift;
EXPORT_SYMBOL(hpage_shift);
+#ifdef CONFIG_PPC_64S_HASH_MMU
int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
pte_t *ptep, unsigned long trap, unsigned long flags,
int ssize, unsigned int shift, unsigned int mmu_psize)
@@ -122,6 +123,7 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
*ptep = __pte(new_pte & ~H_PAGE_BUSY);
return 0;
}
+#endif
pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
@@ -148,7 +150,7 @@ void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr
set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
}
-void hugetlbpage_init_default(void)
+void __init hugetlbpage_init_default(void)
{
/* Set default large page size. Currently, we pick 16M or 1M
* depending on what is available
diff --git a/arch/powerpc/mm/book3s64/mmu_context.c b/arch/powerpc/mm/book3s64/mmu_context.c
index c10fc8a72fb3..c766e4c26e42 100644
--- a/arch/powerpc/mm/book3s64/mmu_context.c
+++ b/arch/powerpc/mm/book3s64/mmu_context.c
@@ -31,7 +31,8 @@ static int alloc_context_id(int min_id, int max_id)
return ida_alloc_range(&mmu_context_ida, min_id, max_id, GFP_KERNEL);
}
-void hash__reserve_context_id(int id)
+#ifdef CONFIG_PPC_64S_HASH_MMU
+void __init hash__reserve_context_id(int id)
{
int result = ida_alloc_range(&mmu_context_ida, id, id, GFP_KERNEL);
@@ -50,7 +51,9 @@ int hash__alloc_context_id(void)
return alloc_context_id(MIN_USER_CONTEXT, max);
}
EXPORT_SYMBOL_GPL(hash__alloc_context_id);
+#endif
+#ifdef CONFIG_PPC_64S_HASH_MMU
static int realloc_context_ids(mm_context_t *ctx)
{
int i, id;
@@ -150,6 +153,13 @@ void hash__setup_new_exec(void)
slb_setup_new_exec();
}
+#else
+static inline int hash__init_new_context(struct mm_struct *mm)
+{
+ BUILD_BUG();
+ return 0;
+}
+#endif
static int radix__init_new_context(struct mm_struct *mm)
{
@@ -175,7 +185,9 @@ static int radix__init_new_context(struct mm_struct *mm)
*/
asm volatile("ptesync;isync" : : : "memory");
+#ifdef CONFIG_PPC_64S_HASH_MMU
mm->context.hash_context = NULL;
+#endif
return index;
}
@@ -213,14 +225,22 @@ EXPORT_SYMBOL_GPL(__destroy_context);
static void destroy_contexts(mm_context_t *ctx)
{
- int index, context_id;
+ if (radix_enabled()) {
+ ida_free(&mmu_context_ida, ctx->id);
+ } else {
+#ifdef CONFIG_PPC_64S_HASH_MMU
+ int index, context_id;
- for (index = 0; index < ARRAY_SIZE(ctx->extended_id); index++) {
- context_id = ctx->extended_id[index];
- if (context_id)
- ida_free(&mmu_context_ida, context_id);
+ for (index = 0; index < ARRAY_SIZE(ctx->extended_id); index++) {
+ context_id = ctx->extended_id[index];
+ if (context_id)
+ ida_free(&mmu_context_ida, context_id);
+ }
+ kfree(ctx->hash_context);
+#else
+ BUILD_BUG(); // radix_enabled() should be constant true
+#endif
}
- kfree(ctx->hash_context);
}
static void pmd_frag_destroy(void *pmd_frag)
diff --git a/arch/powerpc/mm/book3s64/pgtable.c b/arch/powerpc/mm/book3s64/pgtable.c
index 9e16c7b1a6c5..79ce3c22a29d 100644
--- a/arch/powerpc/mm/book3s64/pgtable.c
+++ b/arch/powerpc/mm/book3s64/pgtable.c
@@ -22,6 +22,13 @@
#include "internal.h"
+struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
+EXPORT_SYMBOL_GPL(mmu_psize_defs);
+
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+int mmu_vmemmap_psize = MMU_PAGE_4K;
+#endif
+
unsigned long __pmd_frag_nr;
EXPORT_SYMBOL(__pmd_frag_nr);
unsigned long __pmd_frag_size_shift;
@@ -207,17 +214,12 @@ void __init mmu_partition_table_init(void)
unsigned long patb_size = 1UL << PATB_SIZE_SHIFT;
unsigned long ptcr;
- BUILD_BUG_ON_MSG((PATB_SIZE_SHIFT > 36), "Partition table size too large.");
/* Initialize the Partition Table with no entries */
partition_tb = memblock_alloc(patb_size, patb_size);
if (!partition_tb)
panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
__func__, patb_size, patb_size);
- /*
- * update partition table control register,
- * 64 K size.
- */
ptcr = __pa(partition_tb) | (PATB_SIZE_SHIFT - 12);
set_ptcr_when_no_uv(ptcr);
powernv_set_nmmu_ptcr(ptcr);
@@ -526,3 +528,23 @@ static int __init pgtable_debugfs_setup(void)
return 0;
}
arch_initcall(pgtable_debugfs_setup);
+
+#if defined(CONFIG_ZONE_DEVICE) && defined(CONFIG_ARCH_HAS_MEMREMAP_COMPAT_ALIGN)
+/*
+ * Override the generic version in mm/memremap.c.
+ *
+ * With hash translation, the direct-map range is mapped with just one
+ * page size selected by htab_init_page_sizes(). Consult
+ * mmu_psize_defs[] to determine the minimum page size alignment.
+*/
+unsigned long memremap_compat_align(void)
+{
+ if (!radix_enabled()) {
+ unsigned int shift = mmu_psize_defs[mmu_linear_psize].shift;
+ return max(SUBSECTION_SIZE, 1UL << shift);
+ }
+
+ return SUBSECTION_SIZE;
+}
+EXPORT_SYMBOL_GPL(memremap_compat_align);
+#endif
diff --git a/arch/powerpc/mm/book3s64/pkeys.c b/arch/powerpc/mm/book3s64/pkeys.c
index a2d9ad138709..753e62ba67af 100644
--- a/arch/powerpc/mm/book3s64/pkeys.c
+++ b/arch/powerpc/mm/book3s64/pkeys.c
@@ -66,7 +66,7 @@ static int __init dt_scan_storage_keys(unsigned long node,
return 1;
}
-static int scan_pkey_feature(void)
+static int __init scan_pkey_feature(void)
{
int ret;
int pkeys_total = 0;
diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c
index 3a600bd7fbc6..def04631a74d 100644
--- a/arch/powerpc/mm/book3s64/radix_pgtable.c
+++ b/arch/powerpc/mm/book3s64/radix_pgtable.c
@@ -33,7 +33,6 @@
#include <trace/events/thp.h>
-unsigned int mmu_pid_bits;
unsigned int mmu_base_pid;
unsigned long radix_mem_block_size __ro_after_init;
@@ -335,7 +334,7 @@ static void __init radix_init_pgtable(void)
u64 i;
/* We don't support slb for radix */
- mmu_slb_size = 0;
+ slb_set_size(0);
/*
* Create the linear mapping
@@ -357,18 +356,13 @@ static void __init radix_init_pgtable(void)
-1, PAGE_KERNEL));
}
- /* Find out how many PID bits are supported */
if (!cpu_has_feature(CPU_FTR_HVMODE) &&
cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG)) {
/*
* Older versions of KVM on these machines perfer if the
* guest only uses the low 19 PID bits.
*/
- if (!mmu_pid_bits)
- mmu_pid_bits = 19;
- } else {
- if (!mmu_pid_bits)
- mmu_pid_bits = 20;
+ mmu_pid_bits = 19;
}
mmu_base_pid = 1;
@@ -449,11 +443,6 @@ static int __init radix_dt_scan_page_sizes(unsigned long node,
if (type == NULL || strcmp(type, "cpu") != 0)
return 0;
- /* Find MMU PID size */
- prop = of_get_flat_dt_prop(node, "ibm,mmu-pid-bits", &size);
- if (prop && size == 4)
- mmu_pid_bits = be32_to_cpup(prop);
-
/* Grab page size encodings */
prop = of_get_flat_dt_prop(node, "ibm,processor-radix-AP-encodings", &size);
if (!prop)
@@ -510,7 +499,7 @@ static int __init probe_memory_block_size(unsigned long node, const char *uname,
return 1;
}
-static unsigned long radix_memory_block_size(void)
+static unsigned long __init radix_memory_block_size(void)
{
unsigned long mem_block_size = MIN_MEMORY_BLOCK_SIZE;
@@ -528,7 +517,7 @@ static unsigned long radix_memory_block_size(void)
#else /* CONFIG_MEMORY_HOTPLUG */
-static unsigned long radix_memory_block_size(void)
+static unsigned long __init radix_memory_block_size(void)
{
return 1UL * 1024 * 1024 * 1024;
}
@@ -572,22 +561,11 @@ void __init radix__early_init_devtree(void)
return;
}
-static void radix_init_amor(void)
-{
- /*
- * In HV mode, we init AMOR (Authority Mask Override Register) so that
- * the hypervisor and guest can setup IAMR (Instruction Authority Mask
- * Register), enable key 0 and set it to 1.
- *
- * AMOR = 0b1100 .... 0000 (Mask for key 0 is 11)
- */
- mtspr(SPRN_AMOR, (3ul << 62));
-}
-
void __init radix__early_init_mmu(void)
{
unsigned long lpcr;
+#ifdef CONFIG_PPC_64S_HASH_MMU
#ifdef CONFIG_PPC_64K_PAGES
/* PAGE_SIZE mappings */
mmu_virtual_psize = MMU_PAGE_64K;
@@ -605,6 +583,7 @@ void __init radix__early_init_mmu(void)
} else
mmu_vmemmap_psize = mmu_virtual_psize;
#endif
+#endif
/*
* initialize page table size
*/
@@ -644,7 +623,6 @@ void __init radix__early_init_mmu(void)
lpcr = mfspr(SPRN_LPCR);
mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR);
radix_init_partition_table();
- radix_init_amor();
} else {
radix_init_pseries();
}
@@ -668,8 +646,6 @@ void radix__early_init_mmu_secondary(void)
set_ptcr_when_no_uv(__pa(partition_tb) |
(PATB_SIZE_SHIFT - 12));
-
- radix_init_amor();
}
radix__switch_mmu_context(NULL, &init_mm);
@@ -1100,7 +1076,7 @@ int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
int pud_clear_huge(pud_t *pud)
{
- if (pud_huge(*pud)) {
+ if (pud_is_leaf(*pud)) {
pud_clear(pud);
return 1;
}
@@ -1147,7 +1123,7 @@ int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
int pmd_clear_huge(pmd_t *pmd)
{
- if (pmd_huge(*pmd)) {
+ if (pmd_is_leaf(*pmd)) {
pmd_clear(pmd);
return 1;
}
diff --git a/arch/powerpc/mm/book3s64/slb.c b/arch/powerpc/mm/book3s64/slb.c
index f0037bcc47a0..31f4cef3adac 100644
--- a/arch/powerpc/mm/book3s64/slb.c
+++ b/arch/powerpc/mm/book3s64/slb.c
@@ -868,19 +868,3 @@ DEFINE_INTERRUPT_HANDLER_RAW(do_slb_fault)
return err;
}
}
-
-DEFINE_INTERRUPT_HANDLER(do_bad_slb_fault)
-{
- int err = regs->result;
-
- if (err == -EFAULT) {
- if (user_mode(regs))
- _exception(SIGSEGV, regs, SEGV_BNDERR, regs->dar);
- else
- bad_page_fault(regs, SIGSEGV);
- } else if (err == -EINVAL) {
- unrecoverable_exception(regs);
- } else {
- BUG();
- }
-}
diff --git a/arch/powerpc/mm/book3s64/trace.c b/arch/powerpc/mm/book3s64/trace.c
new file mode 100644
index 000000000000..b86e7b906257
--- /dev/null
+++ b/arch/powerpc/mm/book3s64/trace.c
@@ -0,0 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * This file is for defining trace points and trace related helpers.
+ */
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#define CREATE_TRACE_POINTS
+#include <trace/events/thp.h>
+#endif
diff --git a/arch/powerpc/mm/copro_fault.c b/arch/powerpc/mm/copro_fault.c
index 8acd00178956..c1cb21a00884 100644
--- a/arch/powerpc/mm/copro_fault.c
+++ b/arch/powerpc/mm/copro_fault.c
@@ -82,6 +82,7 @@ out_unlock:
}
EXPORT_SYMBOL_GPL(copro_handle_mm_fault);
+#ifdef CONFIG_PPC_64S_HASH_MMU
int copro_calculate_slb(struct mm_struct *mm, u64 ea, struct copro_slb *slb)
{
u64 vsid, vsidkey;
@@ -146,3 +147,4 @@ void copro_flush_all_slbs(struct mm_struct *mm)
cxl_slbia(mm);
}
EXPORT_SYMBOL_GPL(copro_flush_all_slbs);
+#endif
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index a8d0ce85d39a..eb8ecd7343a9 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -35,6 +35,7 @@
#include <linux/kfence.h>
#include <linux/pkeys.h>
+#include <asm/asm-prototypes.h>
#include <asm/firmware.h>
#include <asm/interrupt.h>
#include <asm/page.h>
@@ -516,10 +517,8 @@ retry:
* case.
*/
if (unlikely(fault & VM_FAULT_RETRY)) {
- if (flags & FAULT_FLAG_ALLOW_RETRY) {
- flags |= FAULT_FLAG_TRIED;
- goto retry;
- }
+ flags |= FAULT_FLAG_TRIED;
+ goto retry;
}
mmap_read_unlock(current->mm);
@@ -620,4 +619,27 @@ DEFINE_INTERRUPT_HANDLER(do_bad_page_fault_segv)
{
bad_page_fault(regs, SIGSEGV);
}
+
+/*
+ * In radix, segment interrupts indicate the EA is not addressable by the
+ * page table geometry, so they are always sent here.
+ *
+ * In hash, this is called if do_slb_fault returns error. Typically it is
+ * because the EA was outside the region allowed by software.
+ */
+DEFINE_INTERRUPT_HANDLER(do_bad_segment_interrupt)
+{
+ int err = regs->result;
+
+ if (err == -EFAULT) {
+ if (user_mode(regs))
+ _exception(SIGSEGV, regs, SEGV_BNDERR, regs->dar);
+ else
+ bad_page_fault(regs, SIGSEGV);
+ } else if (err == -EINVAL) {
+ unrecoverable_exception(regs);
+ } else {
+ BUG();
+ }
+}
#endif
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 82d8b368ca6d..ddead41e2194 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -542,20 +542,26 @@ retry:
return page;
}
-#ifdef CONFIG_PPC_MM_SLICES
+#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
+static inline int file_to_psize(struct file *file)
+{
+ struct hstate *hstate = hstate_file(file);
+ return shift_to_mmu_psize(huge_page_shift(hstate));
+}
+
unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
unsigned long len, unsigned long pgoff,
unsigned long flags)
{
- struct hstate *hstate = hstate_file(file);
- int mmu_psize = shift_to_mmu_psize(huge_page_shift(hstate));
-
#ifdef CONFIG_PPC_RADIX_MMU
if (radix_enabled())
return radix__hugetlb_get_unmapped_area(file, addr, len,
pgoff, flags);
#endif
- return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1);
+#ifdef CONFIG_PPC_MM_SLICES
+ return slice_get_unmapped_area(addr, len, flags, file_to_psize(file), 1);
+#endif
+ BUG();
}
#endif
diff --git a/arch/powerpc/mm/init-common.c b/arch/powerpc/mm/init-common.c
index 3a82f89827a5..119ef491f797 100644
--- a/arch/powerpc/mm/init-common.c
+++ b/arch/powerpc/mm/init-common.c
@@ -20,6 +20,7 @@
#include <linux/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/kup.h>
+#include <asm/smp.h>
phys_addr_t memstart_addr __ro_after_init = (phys_addr_t)~0ull;
EXPORT_SYMBOL_GPL(memstart_addr);
@@ -33,6 +34,9 @@ bool disable_kuap = !IS_ENABLED(CONFIG_PPC_KUAP);
static int __init parse_nosmep(char *p)
{
+ if (!IS_ENABLED(CONFIG_PPC_BOOK3S_64))
+ return 0;
+
disable_kuep = true;
pr_warn("Disabling Kernel Userspace Execution Prevention\n");
return 0;
@@ -47,6 +51,23 @@ static int __init parse_nosmap(char *p)
}
early_param("nosmap", parse_nosmap);
+void __weak setup_kuep(bool disabled)
+{
+ if (!IS_ENABLED(CONFIG_PPC_KUEP) || disabled)
+ return;
+
+ if (smp_processor_id() != boot_cpuid)
+ return;
+
+ pr_info("Activating Kernel Userspace Execution Prevention\n");
+}
+
+void setup_kup(void)
+{
+ setup_kuap(disable_kuap);
+ setup_kuep(disable_kuep);
+}
+
#define CTOR(shift) static void ctor_##shift(void *addr) \
{ \
memset(addr, 0, sizeof(void *) << (shift)); \
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 386be136026e..35f46bf54281 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -370,6 +370,9 @@ void register_page_bootmem_memmap(unsigned long section_nr,
#endif /* CONFIG_SPARSEMEM_VMEMMAP */
#ifdef CONFIG_PPC_BOOK3S_64
+unsigned int mmu_lpid_bits;
+unsigned int mmu_pid_bits;
+
static bool disable_radix = !IS_ENABLED(CONFIG_PPC_RADIX_MMU_DEFAULT);
static int __init parse_disable_radix(char *p)
@@ -437,11 +440,56 @@ static void __init early_check_vec5(void)
}
}
+static int __init dt_scan_mmu_pid_width(unsigned long node,
+ const char *uname, int depth,
+ void *data)
+{
+ int size = 0;
+ const __be32 *prop;
+ const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
+
+ /* We are scanning "cpu" nodes only */
+ if (type == NULL || strcmp(type, "cpu") != 0)
+ return 0;
+
+ /* Find MMU LPID, PID register size */
+ prop = of_get_flat_dt_prop(node, "ibm,mmu-lpid-bits", &size);
+ if (prop && size == 4)
+ mmu_lpid_bits = be32_to_cpup(prop);
+
+ prop = of_get_flat_dt_prop(node, "ibm,mmu-pid-bits", &size);
+ if (prop && size == 4)
+ mmu_pid_bits = be32_to_cpup(prop);
+
+ if (!mmu_pid_bits && !mmu_lpid_bits)
+ return 0;
+
+ return 1;
+}
+
void __init mmu_early_init_devtree(void)
{
+ bool hvmode = !!(mfmsr() & MSR_HV);
+
/* Disable radix mode based on kernel command line. */
- if (disable_radix)
- cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
+ if (disable_radix) {
+ if (IS_ENABLED(CONFIG_PPC_64S_HASH_MMU))
+ cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
+ else
+ pr_warn("WARNING: Ignoring cmdline option disable_radix\n");
+ }
+
+ of_scan_flat_dt(dt_scan_mmu_pid_width, NULL);
+ if (hvmode && !mmu_lpid_bits) {
+ if (early_cpu_has_feature(CPU_FTR_ARCH_207S))
+ mmu_lpid_bits = 12; /* POWER8-10 */
+ else
+ mmu_lpid_bits = 10; /* POWER7 */
+ }
+ if (!mmu_pid_bits) {
+ if (early_cpu_has_feature(CPU_FTR_ARCH_300))
+ mmu_pid_bits = 20; /* POWER9-10 */
+ }
/*
* Check /chosen/ibm,architecture-vec-5 if running as a guest.
@@ -449,11 +497,12 @@ void __init mmu_early_init_devtree(void)
* even though the ibm,architecture-vec-5 property created by
* skiboot doesn't have the necessary bits set.
*/
- if (!(mfmsr() & MSR_HV))
+ if (!hvmode)
early_check_vec5();
if (early_radix_enabled()) {
radix__early_init_devtree();
+
/*
* We have finalized the translation we are going to use by now.
* Radix mode is not limited by RMA / VRMA addressing.
@@ -463,5 +512,9 @@ void __init mmu_early_init_devtree(void)
memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
} else
hash__early_init_devtree();
+
+ if (!(cur_cpu_spec->mmu_features & MMU_FTR_HPTE_TABLE) &&
+ !(cur_cpu_spec->mmu_features & MMU_FTR_TYPE_RADIX))
+ panic("kernel does not support any MMU type offered by platform");
}
#endif /* CONFIG_PPC_BOOK3S_64 */
diff --git a/arch/powerpc/mm/ioremap.c b/arch/powerpc/mm/ioremap.c
index 57342154d2b0..4f12504fb405 100644
--- a/arch/powerpc/mm/ioremap.c
+++ b/arch/powerpc/mm/ioremap.c
@@ -98,23 +98,3 @@ void __iomem *do_ioremap(phys_addr_t pa, phys_addr_t offset, unsigned long size,
return NULL;
}
-
-#ifdef CONFIG_ZONE_DEVICE
-/*
- * Override the generic version in mm/memremap.c.
- *
- * With hash translation, the direct-map range is mapped with just one
- * page size selected by htab_init_page_sizes(). Consult
- * mmu_psize_defs[] to determine the minimum page size alignment.
-*/
-unsigned long memremap_compat_align(void)
-{
- unsigned int shift = mmu_psize_defs[mmu_linear_psize].shift;
-
- if (radix_enabled())
- return SUBSECTION_SIZE;
- return max(SUBSECTION_SIZE, 1UL << shift);
-
-}
-EXPORT_SYMBOL_GPL(memremap_compat_align);
-#endif
diff --git a/arch/powerpc/mm/kasan/book3s_32.c b/arch/powerpc/mm/kasan/book3s_32.c
index 202bd260a009..450a67ef0bbe 100644
--- a/arch/powerpc/mm/kasan/book3s_32.c
+++ b/arch/powerpc/mm/kasan/book3s_32.c
@@ -10,47 +10,51 @@ int __init kasan_init_region(void *start, size_t size)
{
unsigned long k_start = (unsigned long)kasan_mem_to_shadow(start);
unsigned long k_end = (unsigned long)kasan_mem_to_shadow(start + size);
- unsigned long k_cur = k_start;
- int k_size = k_end - k_start;
- int k_size_base = 1 << (ffs(k_size) - 1);
+ unsigned long k_nobat = k_start;
+ unsigned long k_cur;
+ phys_addr_t phys;
int ret;
- void *block;
- block = memblock_alloc(k_size, k_size_base);
-
- if (block && k_size_base >= SZ_128K && k_start == ALIGN(k_start, k_size_base)) {
- int k_size_more = 1 << (ffs(k_size - k_size_base) - 1);
-
- setbat(-1, k_start, __pa(block), k_size_base, PAGE_KERNEL);
- if (k_size_more >= SZ_128K)
- setbat(-1, k_start + k_size_base, __pa(block) + k_size_base,
- k_size_more, PAGE_KERNEL);
- if (v_block_mapped(k_start))
- k_cur = k_start + k_size_base;
- if (v_block_mapped(k_start + k_size_base))
- k_cur = k_start + k_size_base + k_size_more;
-
- update_bats();
+ while (k_nobat < k_end) {
+ unsigned int k_size = bat_block_size(k_nobat, k_end);
+ int idx = find_free_bat();
+
+ if (idx == -1)
+ break;
+ if (k_size < SZ_128K)
+ break;
+ phys = memblock_phys_alloc_range(k_size, k_size, 0,
+ MEMBLOCK_ALLOC_ANYWHERE);
+ if (!phys)
+ break;
+
+ setbat(idx, k_nobat, phys, k_size, PAGE_KERNEL);
+ k_nobat += k_size;
}
+ if (k_nobat != k_start)
+ update_bats();
- if (!block)
- block = memblock_alloc(k_size, PAGE_SIZE);
- if (!block)
- return -ENOMEM;
+ if (k_nobat < k_end) {
+ phys = memblock_phys_alloc_range(k_end - k_nobat, PAGE_SIZE, 0,
+ MEMBLOCK_ALLOC_ANYWHERE);
+ if (!phys)
+ return -ENOMEM;
+ }
ret = kasan_init_shadow_page_tables(k_start, k_end);
if (ret)
return ret;
- kasan_update_early_region(k_start, k_cur, __pte(0));
+ kasan_update_early_region(k_start, k_nobat, __pte(0));
- for (; k_cur < k_end; k_cur += PAGE_SIZE) {
+ for (k_cur = k_nobat; k_cur < k_end; k_cur += PAGE_SIZE) {
pmd_t *pmd = pmd_off_k(k_cur);
- void *va = block + k_cur - k_start;
- pte_t pte = pfn_pte(PHYS_PFN(__pa(va)), PAGE_KERNEL);
+ pte_t pte = pfn_pte(PHYS_PFN(phys + k_cur - k_nobat), PAGE_KERNEL);
__set_pte_at(&init_mm, k_cur, pte_offset_kernel(pmd, k_cur), pte, 0);
}
flush_tlb_kernel_range(k_start, k_end);
+ memset(kasan_mem_to_shadow(start), 0, k_end - k_start);
+
return 0;
}
diff --git a/arch/powerpc/mm/maccess.c b/arch/powerpc/mm/maccess.c
index aad7c47e0030..ea821d0ffe16 100644
--- a/arch/powerpc/mm/maccess.c
+++ b/arch/powerpc/mm/maccess.c
@@ -11,20 +11,3 @@ bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size)
{
return is_kernel_addr((unsigned long)unsafe_src);
}
-
-int copy_inst_from_kernel_nofault(struct ppc_inst *inst, u32 *src)
-{
- unsigned int val, suffix;
- int err;
-
- err = copy_from_kernel_nofault(&val, src, sizeof(val));
- if (err)
- return err;
- if (IS_ENABLED(CONFIG_PPC64) && get_op(val) == OP_PREFIX) {
- err = copy_from_kernel_nofault(&suffix, src + 1, sizeof(suffix));
- *inst = ppc_inst_prefix(val, suffix);
- } else {
- *inst = ppc_inst(val);
- }
- return err;
-}
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index bd5d91a31183..8e301cd8925b 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -26,7 +26,6 @@
#include <mm/mmu_decl.h>
unsigned long long memory_limit;
-bool init_mem_is_free;
unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
EXPORT_SYMBOL(empty_zero_page);
@@ -312,7 +311,6 @@ void free_initmem(void)
{
ppc_md.progress = ppc_printk_progress;
mark_initmem_nx();
- init_mem_is_free = true;
free_initmem_default(POISON_FREE_INITMEM);
}
diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c
index ae683fdc716c..c475cf810aa8 100644
--- a/arch/powerpc/mm/mmap.c
+++ b/arch/powerpc/mm/mmap.c
@@ -80,6 +80,7 @@ static inline unsigned long mmap_base(unsigned long rnd,
return PAGE_ALIGN(DEFAULT_MAP_WINDOW - gap - rnd);
}
+#ifdef HAVE_ARCH_UNMAPPED_AREA
#ifdef CONFIG_PPC_RADIX_MMU
/*
* Same function as generic code used only for radix, because we don't need to overload
@@ -181,11 +182,42 @@ radix__arch_get_unmapped_area_topdown(struct file *filp,
*/
return radix__arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
}
+#endif
+
+unsigned long arch_get_unmapped_area(struct file *filp,
+ unsigned long addr,
+ unsigned long len,
+ unsigned long pgoff,
+ unsigned long flags)
+{
+#ifdef CONFIG_PPC_MM_SLICES
+ return slice_get_unmapped_area(addr, len, flags,
+ mm_ctx_user_psize(&current->mm->context), 0);
+#else
+ BUG();
+#endif
+}
+
+unsigned long arch_get_unmapped_area_topdown(struct file *filp,
+ const unsigned long addr0,
+ const unsigned long len,
+ const unsigned long pgoff,
+ const unsigned long flags)
+{
+#ifdef CONFIG_PPC_MM_SLICES
+ return slice_get_unmapped_area(addr0, len, flags,
+ mm_ctx_user_psize(&current->mm->context), 1);
+#else
+ BUG();
+#endif
+}
+#endif /* HAVE_ARCH_UNMAPPED_AREA */
static void radix__arch_pick_mmap_layout(struct mm_struct *mm,
unsigned long random_factor,
struct rlimit *rlim_stack)
{
+#ifdef CONFIG_PPC_RADIX_MMU
if (mmap_is_legacy(rlim_stack)) {
mm->mmap_base = TASK_UNMAPPED_BASE;
mm->get_unmapped_area = radix__arch_get_unmapped_area;
@@ -193,13 +225,9 @@ static void radix__arch_pick_mmap_layout(struct mm_struct *mm,
mm->mmap_base = mmap_base(random_factor, rlim_stack);
mm->get_unmapped_area = radix__arch_get_unmapped_area_topdown;
}
-}
-#else
-/* dummy */
-extern void radix__arch_pick_mmap_layout(struct mm_struct *mm,
- unsigned long random_factor,
- struct rlimit *rlim_stack);
#endif
+}
+
/*
* This function, called very early during the creation of a new
* process VM image, sets up which VM layout function to use:
diff --git a/arch/powerpc/mm/mmu_context.c b/arch/powerpc/mm/mmu_context.c
index 74246536b832..1fb9c99f8679 100644
--- a/arch/powerpc/mm/mmu_context.c
+++ b/arch/powerpc/mm/mmu_context.c
@@ -18,6 +18,12 @@ static inline void switch_mm_pgdir(struct task_struct *tsk,
{
/* 32-bit keeps track of the current PGDIR in the thread struct */
tsk->thread.pgdir = mm->pgd;
+#ifdef CONFIG_PPC_BOOK3S_32
+ tsk->thread.sr0 = mm->context.sr0;
+#endif
+#if defined(CONFIG_BOOKE_OR_40x) && defined(CONFIG_PPC_KUAP)
+ tsk->thread.pid = mm->context.id;
+#endif
}
#elif defined(CONFIG_PPC_BOOK3E_64)
static inline void switch_mm_pgdir(struct task_struct *tsk,
@@ -25,6 +31,9 @@ static inline void switch_mm_pgdir(struct task_struct *tsk,
{
/* 64-bit Book3E keeps track of current PGD in the PACA */
get_paca()->pgd = mm->pgd;
+#ifdef CONFIG_PPC_KUAP
+ tsk->thread.pid = mm->context.id;
+#endif
}
#else
static inline void switch_mm_pgdir(struct task_struct *tsk,
@@ -81,7 +90,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
* context
*/
if (cpu_has_feature(CPU_FTR_ALTIVEC))
- asm volatile ("dssall");
+ asm volatile (PPC_DSSALL);
if (!new_on_cpu)
membarrier_arch_switch_mm(prev, next, tsk);
diff --git a/arch/powerpc/mm/nohash/44x.c b/arch/powerpc/mm/nohash/44x.c
index e079f26b267e..1beae802bb1c 100644
--- a/arch/powerpc/mm/nohash/44x.c
+++ b/arch/powerpc/mm/nohash/44x.c
@@ -38,7 +38,7 @@ int icache_44x_need_flush;
unsigned long tlb_47x_boltmap[1024/8];
-static void ppc44x_update_tlb_hwater(void)
+static void __init ppc44x_update_tlb_hwater(void)
{
/* The TLB miss handlers hard codes the watermark in a cmpli
* instruction to improve performances rather than loading it
@@ -122,7 +122,7 @@ static void __init ppc47x_update_boltmap(void)
/*
* "Pins" a 256MB TLB entry in AS0 for kernel lowmem for 47x type MMU
*/
-static void ppc47x_pin_tlb(unsigned int virt, unsigned int phys)
+static void __init ppc47x_pin_tlb(unsigned int virt, unsigned int phys)
{
unsigned int rA;
int bolted;
@@ -240,19 +240,3 @@ void __init mmu_init_secondary(int cpu)
}
}
#endif /* CONFIG_SMP */
-
-#ifdef CONFIG_PPC_KUEP
-void setup_kuep(bool disabled)
-{
- if (smp_processor_id() != boot_cpuid)
- return;
-
- if (disabled)
- patch_instruction_site(&patch__tlb_44x_kuep, ppc_inst(PPC_RAW_NOP()));
- else
- pr_info("Activating Kernel Userspace Execution Prevention\n");
-
- if (IS_ENABLED(CONFIG_PPC_47x) && disabled)
- patch_instruction_site(&patch__tlb_47x_kuep, ppc_inst(PPC_RAW_NOP()));
-}
-#endif
diff --git a/arch/powerpc/mm/nohash/8xx.c b/arch/powerpc/mm/nohash/8xx.c
index 0df9fe29dd56..27f9186ae374 100644
--- a/arch/powerpc/mm/nohash/8xx.c
+++ b/arch/powerpc/mm/nohash/8xx.c
@@ -8,11 +8,7 @@
*/
#include <linux/memblock.h>
-#include <linux/mmu_context.h>
#include <linux/hugetlb.h>
-#include <asm/fixmap.h>
-#include <asm/code-patching.h>
-#include <asm/inst.h>
#include <mm/mmu_decl.h>
@@ -212,35 +208,6 @@ void __init setup_initial_memory_limit(phys_addr_t first_memblock_base,
memblock_set_current_limit(min_t(u64, first_memblock_size, SZ_32M));
}
-#ifdef CONFIG_PPC_KUEP
-void __init setup_kuep(bool disabled)
-{
- if (disabled)
- return;
-
- pr_info("Activating Kernel Userspace Execution Prevention\n");
-
- mtspr(SPRN_MI_AP, MI_APG_KUEP);
-}
-#endif
-
-#ifdef CONFIG_PPC_KUAP
-struct static_key_false disable_kuap_key;
-EXPORT_SYMBOL(disable_kuap_key);
-
-void __init setup_kuap(bool disabled)
-{
- if (disabled) {
- static_branch_enable(&disable_kuap_key);
- return;
- }
-
- pr_info("Activating Kernel Userspace Access Protection\n");
-
- mtspr(SPRN_MD_AP, MD_APG_KUAP);
-}
-#endif
-
int pud_clear_huge(pud_t *pud)
{
return 0;
diff --git a/arch/powerpc/mm/nohash/Makefile b/arch/powerpc/mm/nohash/Makefile
index b1f630d423d8..b467a25ee155 100644
--- a/arch/powerpc/mm/nohash/Makefile
+++ b/arch/powerpc/mm/nohash/Makefile
@@ -2,7 +2,7 @@
ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC)
-obj-y += mmu_context.o tlb.o tlb_low.o
+obj-y += mmu_context.o tlb.o tlb_low.o kup.o
obj-$(CONFIG_PPC_BOOK3E_64) += tlb_low_64e.o book3e_pgtable.o
obj-$(CONFIG_40x) += 40x.o
obj-$(CONFIG_44x) += 44x.o
diff --git a/arch/powerpc/mm/nohash/book3e_pgtable.c b/arch/powerpc/mm/nohash/book3e_pgtable.c
index 77884e24281d..7d4368d055a6 100644
--- a/arch/powerpc/mm/nohash/book3e_pgtable.c
+++ b/arch/powerpc/mm/nohash/book3e_pgtable.c
@@ -10,6 +10,7 @@
#include <asm/pgalloc.h>
#include <asm/tlb.h>
#include <asm/dma.h>
+#include <asm/code-patching.h>
#include <mm/mmu_decl.h>
@@ -115,3 +116,17 @@ int __ref map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot)
smp_wmb();
return 0;
}
+
+void __patch_exception(int exc, unsigned long addr)
+{
+ unsigned int *ibase = &interrupt_base_book3e;
+
+ /*
+ * Our exceptions vectors start with a NOP and -then- a branch
+ * to deal with single stepping from userspace which stops on
+ * the second instruction. Thus we need to patch the second
+ * instruction of the exception, not the first one.
+ */
+
+ patch_branch(ibase + (exc / 4) + 1, addr, 0);
+}
diff --git a/arch/powerpc/mm/nohash/fsl_book3e.c b/arch/powerpc/mm/nohash/fsl_book3e.c
index b231a54f540c..dfe715e0f70a 100644
--- a/arch/powerpc/mm/nohash/fsl_book3e.c
+++ b/arch/powerpc/mm/nohash/fsl_book3e.c
@@ -60,11 +60,6 @@ struct tlbcamrange {
phys_addr_t phys;
} tlbcam_addrs[NUM_TLBCAMS];
-unsigned long tlbcam_sz(int idx)
-{
- return tlbcam_addrs[idx].limit - tlbcam_addrs[idx].start + 1;
-}
-
#ifdef CONFIG_FSL_BOOKE
/*
* Return PA for this VA if it is mapped by a CAM, or 0
@@ -264,6 +259,11 @@ void __init MMU_init_hw(void)
flush_instruction_cache();
}
+static unsigned long __init tlbcam_sz(int idx)
+{
+ return tlbcam_addrs[idx].limit - tlbcam_addrs[idx].start + 1;
+}
+
void __init adjust_total_lowmem(void)
{
unsigned long ram;
diff --git a/arch/powerpc/mm/nohash/kaslr_booke.c b/arch/powerpc/mm/nohash/kaslr_booke.c
index 6ec978967da0..96c38f971603 100644
--- a/arch/powerpc/mm/nohash/kaslr_booke.c
+++ b/arch/powerpc/mm/nohash/kaslr_booke.c
@@ -44,9 +44,7 @@ struct regions __initdata regions;
static __init void kaslr_get_cmdline(void *fdt)
{
- int node = fdt_path_offset(fdt, "/chosen");
-
- early_init_dt_scan_chosen(node, "chosen", 1, boot_command_line);
+ early_init_dt_scan_chosen(boot_command_line);
}
static unsigned long __init rotate_xor(unsigned long hash, const void *area,
diff --git a/arch/powerpc/mm/nohash/kup.c b/arch/powerpc/mm/nohash/kup.c
new file mode 100644
index 000000000000..552becf90e97
--- /dev/null
+++ b/arch/powerpc/mm/nohash/kup.c
@@ -0,0 +1,33 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * This file contains the routines for initializing kernel userspace protection
+ */
+
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/jump_label.h>
+#include <linux/printk.h>
+#include <linux/smp.h>
+
+#include <asm/kup.h>
+#include <asm/smp.h>
+
+#ifdef CONFIG_PPC_KUAP
+struct static_key_false disable_kuap_key;
+EXPORT_SYMBOL(disable_kuap_key);
+
+void setup_kuap(bool disabled)
+{
+ if (disabled) {
+ if (IS_ENABLED(CONFIG_40x))
+ disable_kuep = true;
+ if (smp_processor_id() == boot_cpuid)
+ static_branch_enable(&disable_kuap_key);
+ return;
+ }
+
+ pr_info("Activating Kernel Userspace Access Protection\n");
+
+ __prevent_user_access(KUAP_READ_WRITE);
+}
+#endif
diff --git a/arch/powerpc/mm/nohash/mmu_context.c b/arch/powerpc/mm/nohash/mmu_context.c
index 44b2b5e7cabe..85b048f04c56 100644
--- a/arch/powerpc/mm/nohash/mmu_context.c
+++ b/arch/powerpc/mm/nohash/mmu_context.c
@@ -33,6 +33,7 @@
#include <asm/mmu_context.h>
#include <asm/tlbflush.h>
#include <asm/smp.h>
+#include <asm/kup.h>
#include <mm/mmu_decl.h>
@@ -217,7 +218,7 @@ static void set_context(unsigned long id, pgd_t *pgd)
/* sync */
mb();
- } else {
+ } else if (kuap_is_disabled()) {
if (IS_ENABLED(CONFIG_40x))
mb(); /* sync */
@@ -305,6 +306,9 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next,
if (IS_ENABLED(CONFIG_BDI_SWITCH))
abatron_pteptrs[1] = next->pgd;
set_context(id, next->pgd);
+#if defined(CONFIG_BOOKE_OR_40x) && defined(CONFIG_PPC_KUAP)
+ tsk->thread.pid = id;
+#endif
raw_spin_unlock(&context_lock);
}
diff --git a/arch/powerpc/mm/nohash/tlb.c b/arch/powerpc/mm/nohash/tlb.c
index 647bf454a0fa..fd2c77af5c55 100644
--- a/arch/powerpc/mm/nohash/tlb.c
+++ b/arch/powerpc/mm/nohash/tlb.c
@@ -150,7 +150,6 @@ static inline int mmu_get_tsize(int psize)
*/
#ifdef CONFIG_PPC64
-int mmu_linear_psize; /* Page size used for the linear mapping */
int mmu_pte_psize; /* Page size used for PTE pages */
int mmu_vmemmap_psize; /* Page size used for the virtual mem map */
int book3e_htw_mode; /* HW tablewalk? Value is PPC_HTW_* */
@@ -433,7 +432,7 @@ void tlb_flush_pgtable(struct mmu_gather *tlb, unsigned long address)
}
}
-static void setup_page_sizes(void)
+static void __init setup_page_sizes(void)
{
unsigned int tlb0cfg;
unsigned int tlb0ps;
@@ -571,7 +570,7 @@ out:
}
}
-static void setup_mmu_htw(void)
+static void __init setup_mmu_htw(void)
{
/*
* If we want to use HW tablewalk, enable it by patching the TLB miss
@@ -657,14 +656,6 @@ static void early_init_this_mmu(void)
static void __init early_init_mmu_global(void)
{
- /* XXX This will have to be decided at runtime, but right
- * now our boot and TLB miss code hard wires it. Ideally
- * we should find out a suitable page size and patch the
- * TLB miss code (either that or use the PACA to store
- * the value we want)
- */
- mmu_linear_psize = MMU_PAGE_1G;
-
/* XXX This should be decided at runtime based on supported
* page sizes in the TLB, but for now let's assume 16M is
* always there and a good fit (which it probably is)
diff --git a/arch/powerpc/mm/nohash/tlb_low_64e.S b/arch/powerpc/mm/nohash/tlb_low_64e.S
index 9235e720e357..8b97c4acfebf 100644
--- a/arch/powerpc/mm/nohash/tlb_low_64e.S
+++ b/arch/powerpc/mm/nohash/tlb_low_64e.S
@@ -128,6 +128,13 @@ END_BTB_FLUSH_SECTION
bne tlb_miss_kernel_bolted
+tlb_miss_user_bolted:
+#ifdef CONFIG_PPC_KUAP
+ mfspr r10,SPRN_MAS1
+ rlwinm. r10,r10,0,0x3fff0000
+ beq- tlb_miss_fault_bolted /* KUAP fault */
+#endif
+
tlb_miss_common_bolted:
/*
* This is the guts of the TLB miss handler for bolted-linear.
@@ -246,7 +253,7 @@ itlb_miss_fault_bolted:
cmpldi cr0,r15,0 /* Check for user region */
oris r11,r11,_PAGE_ACCESSED@h
- beq tlb_miss_common_bolted
+ beq tlb_miss_user_bolted
b itlb_miss_kernel_bolted
#ifdef CONFIG_PPC_FSL_BOOK3E
@@ -676,6 +683,11 @@ finish_normal_tlb_miss:
/* Check if required permissions are met */
andc. r15,r11,r14
bne- normal_tlb_miss_access_fault
+#ifdef CONFIG_PPC_KUAP
+ mfspr r11,SPRN_MAS1
+ rlwinm. r10,r11,0,0x3fff0000
+ beq- normal_tlb_miss_access_fault /* KUAP fault */
+#endif
/* Now we build the MAS:
*
@@ -689,15 +701,17 @@ finish_normal_tlb_miss:
*
* TODO: mix up code below for better scheduling
*/
- clrrdi r11,r16,12 /* Clear low crap in EA */
- rlwimi r11,r14,32-19,27,31 /* Insert WIMGE */
- mtspr SPRN_MAS2,r11
+ clrrdi r10,r16,12 /* Clear low crap in EA */
+ rlwimi r10,r14,32-19,27,31 /* Insert WIMGE */
+ mtspr SPRN_MAS2,r10
/* Check page size, if not standard, update MAS1 */
- rldicl r11,r14,64-8,64-8
- cmpldi cr0,r11,BOOK3E_PAGESZ_4K
+ rldicl r10,r14,64-8,64-8
+ cmpldi cr0,r10,BOOK3E_PAGESZ_4K
beq- 1f
+#ifndef CONFIG_PPC_KUAP
mfspr r11,SPRN_MAS1
+#endif
rlwimi r11,r14,31,21,24
rlwinm r11,r11,0,21,19
mtspr SPRN_MAS1,r11
@@ -786,7 +800,16 @@ virt_page_table_tlb_miss:
mfspr r10,SPRN_MAS1
rlwinm r10,r10,0,16,1 /* Clear TID */
mtspr SPRN_MAS1,r10
+#ifdef CONFIG_PPC_KUAP
+ b 2f
+1:
+ mfspr r10,SPRN_MAS1
+ rlwinm. r10,r10,0,0x3fff0000
+ beq- virt_page_table_tlb_miss_fault /* KUAP fault */
+2:
+#else
1:
+#endif
BEGIN_MMU_FTR_SECTION
/* Search if we already have a TLB entry for that virtual address, and
* if we do, bail out.
@@ -1027,6 +1050,11 @@ virt_page_table_tlb_miss_whacko_fault:
* avoid too much complication, it will save/restore things for us
*/
htw_tlb_miss:
+#ifdef CONFIG_PPC_KUAP
+ mfspr r10,SPRN_MAS1
+ rlwinm. r10,r10,0,0x3fff0000
+ beq- htw_tlb_miss_fault /* KUAP fault */
+#endif
/* Search if we already have a TLB entry for that virtual address, and
* if we do, bail out.
*
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 59d3cfcd7887..9d5f710d2c20 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -134,7 +134,7 @@ static int __init fake_numa_create_new_node(unsigned long end_pfn,
return 0;
}
-static void reset_numa_cpu_lookup_table(void)
+static void __init reset_numa_cpu_lookup_table(void)
{
unsigned int cpu;
@@ -372,7 +372,7 @@ void update_numa_distance(struct device_node *node)
* ibm,numa-lookup-index-table= {N, domainid1, domainid2, ..... domainidN}
* ibm,numa-distance-table = { N, 1, 2, 4, 5, 1, 6, .... N elements}
*/
-static void initialize_form2_numa_distance_lookup_table(void)
+static void __init initialize_form2_numa_distance_lookup_table(void)
{
int i, j;
struct device_node *root;
@@ -581,7 +581,7 @@ static int of_get_assoc_arrays(struct assoc_arrays *aa)
return 0;
}
-static int get_nid_and_numa_distance(struct drmem_lmb *lmb)
+static int __init get_nid_and_numa_distance(struct drmem_lmb *lmb)
{
struct assoc_arrays aa = { .arrays = NULL };
int default_nid = NUMA_NO_NODE;
diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c
index ce9482383144..6ec5a7dd7913 100644
--- a/arch/powerpc/mm/pgtable.c
+++ b/arch/powerpc/mm/pgtable.c
@@ -81,9 +81,6 @@ static struct page *maybe_pte_to_page(pte_t pte)
static pte_t set_pte_filter_hash(pte_t pte)
{
- if (radix_enabled())
- return pte;
-
pte = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS);
if (pte_looks_normal(pte) && !(cpu_has_feature(CPU_FTR_COHERENT_ICACHE) ||
cpu_has_feature(CPU_FTR_NOEXECUTE))) {
@@ -112,6 +109,9 @@ static inline pte_t set_pte_filter(pte_t pte)
{
struct page *pg;
+ if (radix_enabled())
+ return pte;
+
if (mmu_has_feature(MMU_FTR_HPTE_TABLE))
return set_pte_filter_hash(pte);
@@ -144,6 +144,9 @@ static pte_t set_access_flags_filter(pte_t pte, struct vm_area_struct *vma,
{
struct page *pg;
+ if (IS_ENABLED(CONFIG_PPC_BOOK3S_64))
+ return pte;
+
if (mmu_has_feature(MMU_FTR_HPTE_TABLE))
return pte;
@@ -203,6 +206,15 @@ void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
__set_pte_at(mm, addr, ptep, pte, 0);
}
+void unmap_kernel_page(unsigned long va)
+{
+ pmd_t *pmdp = pmd_off_k(va);
+ pte_t *ptep = pte_offset_kernel(pmdp, va);
+
+ pte_clear(&init_mm, va, ptep);
+ flush_tlb_kernel_range(va, va + PAGE_SIZE);
+}
+
/*
* This is called when relaxing access to a PTE. It's also called in the page
* fault path when we don't hit any of the major fault cases, ie, a minor
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index 78c8cf01db5f..175aabf101e8 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -102,7 +102,8 @@ EXPORT_SYMBOL(__pte_frag_size_shift);
struct page *p4d_page(p4d_t p4d)
{
if (p4d_is_leaf(p4d)) {
- VM_WARN_ON(!p4d_huge(p4d));
+ if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP))
+ VM_WARN_ON(!p4d_huge(p4d));
return pte_page(p4d_pte(p4d));
}
return virt_to_page(p4d_pgtable(p4d));
@@ -112,7 +113,8 @@ struct page *p4d_page(p4d_t p4d)
struct page *pud_page(pud_t pud)
{
if (pud_is_leaf(pud)) {
- VM_WARN_ON(!pud_huge(pud));
+ if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP))
+ VM_WARN_ON(!pud_huge(pud));
return pte_page(pud_pte(pud));
}
return virt_to_page(pud_pgtable(pud));
@@ -125,7 +127,13 @@ struct page *pud_page(pud_t pud)
struct page *pmd_page(pmd_t pmd)
{
if (pmd_is_leaf(pmd)) {
- VM_WARN_ON(!(pmd_large(pmd) || pmd_huge(pmd)));
+ /*
+ * vmalloc_to_page may be called on any vmap address (not only
+ * vmalloc), and it uses pmd_page() etc., when huge vmap is
+ * enabled so these checks can't be used.
+ */
+ if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP))
+ VM_WARN_ON(!(pmd_large(pmd) || pmd_huge(pmd)));
return pte_page(pmd_pte(pmd));
}
return virt_to_page(pmd_page_vaddr(pmd));
diff --git a/arch/powerpc/mm/ptdump/Makefile b/arch/powerpc/mm/ptdump/Makefile
index 4050cbb55acf..b533caaf0910 100644
--- a/arch/powerpc/mm/ptdump/Makefile
+++ b/arch/powerpc/mm/ptdump/Makefile
@@ -10,5 +10,5 @@ obj-$(CONFIG_PPC_BOOK3S_64) += book3s64.o
ifdef CONFIG_PTDUMP_DEBUGFS
obj-$(CONFIG_PPC_BOOK3S_32) += bats.o segment_regs.o
-obj-$(CONFIG_PPC_BOOK3S_64) += hashpagetable.o
+obj-$(CONFIG_PPC_64S_HASH_MMU) += hashpagetable.o
endif
diff --git a/arch/powerpc/mm/ptdump/ptdump.c b/arch/powerpc/mm/ptdump/ptdump.c
index 32bfb215c485..8c846982766f 100644
--- a/arch/powerpc/mm/ptdump/ptdump.c
+++ b/arch/powerpc/mm/ptdump/ptdump.c
@@ -123,7 +123,7 @@ static struct ptdump_range ptdump_range[] __ro_after_init = {
void pt_dump_size(struct seq_file *m, unsigned long size)
{
- static const char units[] = "KMGTPE";
+ static const char units[] = " KMGTPE";
const char *unit = units;
/* Work out what appropriate unit to use */
@@ -176,7 +176,7 @@ static void dump_addr(struct pg_state *st, unsigned long addr)
pt_dump_seq_printf(st->seq, REG "-" REG " ", st->start_address, addr - 1);
pt_dump_seq_printf(st->seq, " " REG " ", st->start_pa);
- pt_dump_size(st->seq, (addr - st->start_address) >> 10);
+ pt_dump_size(st->seq, addr - st->start_address);
}
static void note_prot_wx(struct pg_state *st, unsigned long addr)
@@ -315,7 +315,7 @@ static int ptdump_show(struct seq_file *m, void *v)
DEFINE_SHOW_ATTRIBUTE(ptdump);
-static void build_pgtable_complete_mask(void)
+static void __init build_pgtable_complete_mask(void)
{
unsigned int i, j;
diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
index 82b45b1cb973..f42711f865f3 100644
--- a/arch/powerpc/mm/slice.c
+++ b/arch/powerpc/mm/slice.c
@@ -639,26 +639,6 @@ return_addr:
}
EXPORT_SYMBOL_GPL(slice_get_unmapped_area);
-unsigned long arch_get_unmapped_area(struct file *filp,
- unsigned long addr,
- unsigned long len,
- unsigned long pgoff,
- unsigned long flags)
-{
- return slice_get_unmapped_area(addr, len, flags,
- mm_ctx_user_psize(&current->mm->context), 0);
-}
-
-unsigned long arch_get_unmapped_area_topdown(struct file *filp,
- const unsigned long addr0,
- const unsigned long len,
- const unsigned long pgoff,
- const unsigned long flags)
-{
- return slice_get_unmapped_area(addr0, len, flags,
- mm_ctx_user_psize(&current->mm->context), 1);
-}
-
unsigned int notrace get_slice_psize(struct mm_struct *mm, unsigned long addr)
{
unsigned char *psizes;
diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h
index 7e9b978b768e..b20a2a83a6e7 100644
--- a/arch/powerpc/net/bpf_jit.h
+++ b/arch/powerpc/net/bpf_jit.h
@@ -31,7 +31,7 @@
pr_err_ratelimited("Branch offset 0x%lx (@%u) out of range\n", offset, ctx->idx); \
return -ERANGE; \
} \
- EMIT(PPC_INST_BRANCH | (offset & 0x03fffffc)); \
+ EMIT(PPC_RAW_BRANCH(offset)); \
} while (0)
/* blr; (unconditional 'branch' with link) to absolute address */
@@ -125,8 +125,7 @@
#define COND_LE (CR0_GT | COND_CMP_FALSE)
#define SEEN_FUNC 0x20000000 /* might call external helpers */
-#define SEEN_STACK 0x40000000 /* uses BPF stack */
-#define SEEN_TAILCALL 0x80000000 /* uses tail calls */
+#define SEEN_TAILCALL 0x40000000 /* uses tail calls */
#define SEEN_VREG_MASK 0x1ff80000 /* Volatile registers r3-r12 */
#define SEEN_NVREG_MASK 0x0003ffff /* Non volatile registers r14-r31 */
@@ -151,8 +150,15 @@ struct codegen_context {
unsigned int idx;
unsigned int stack_size;
int b2p[ARRAY_SIZE(b2p)];
+ unsigned int exentry_idx;
};
+#ifdef CONFIG_PPC32
+#define BPF_FIXUP_LEN 3 /* Three instructions => 12 bytes */
+#else
+#define BPF_FIXUP_LEN 2 /* Two instructions => 8 bytes */
+#endif
+
static inline void bpf_flush_icache(void *start, void *end)
{
smp_wmb(); /* smp write barrier */
@@ -176,11 +182,14 @@ static inline void bpf_clear_seen_register(struct codegen_context *ctx, int i)
void bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 func);
int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *ctx,
- u32 *addrs, bool extra_pass);
+ u32 *addrs, int pass);
void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx);
void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx);
void bpf_jit_realloc_regs(struct codegen_context *ctx);
+int bpf_add_extable_entry(struct bpf_prog *fp, u32 *image, int pass, struct codegen_context *ctx,
+ int insn_idx, int jmp_off, int dst_reg);
+
#endif
#endif
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
index 90ce75f0f1e2..56dd1f4e3e44 100644
--- a/arch/powerpc/net/bpf_jit_comp.c
+++ b/arch/powerpc/net/bpf_jit_comp.c
@@ -23,15 +23,15 @@ static void bpf_jit_fill_ill_insns(void *area, unsigned int size)
memset32(area, BREAKPOINT_INSTRUCTION, size / 4);
}
-/* Fix the branch target addresses for subprog calls */
-static int bpf_jit_fixup_subprog_calls(struct bpf_prog *fp, u32 *image,
- struct codegen_context *ctx, u32 *addrs)
+/* Fix updated addresses (for subprog calls, ldimm64, et al) during extra pass */
+static int bpf_jit_fixup_addresses(struct bpf_prog *fp, u32 *image,
+ struct codegen_context *ctx, u32 *addrs)
{
const struct bpf_insn *insn = fp->insnsi;
bool func_addr_fixed;
u64 func_addr;
u32 tmp_idx;
- int i, ret;
+ int i, j, ret;
for (i = 0; i < fp->len; i++) {
/*
@@ -66,6 +66,23 @@ static int bpf_jit_fixup_subprog_calls(struct bpf_prog *fp, u32 *image,
* of the JITed sequence remains unchanged.
*/
ctx->idx = tmp_idx;
+ } else if (insn[i].code == (BPF_LD | BPF_IMM | BPF_DW)) {
+ tmp_idx = ctx->idx;
+ ctx->idx = addrs[i] / 4;
+#ifdef CONFIG_PPC32
+ PPC_LI32(ctx->b2p[insn[i].dst_reg] - 1, (u32)insn[i + 1].imm);
+ PPC_LI32(ctx->b2p[insn[i].dst_reg], (u32)insn[i].imm);
+ for (j = ctx->idx - addrs[i] / 4; j < 4; j++)
+ EMIT(PPC_RAW_NOP());
+#else
+ func_addr = ((u64)(u32)insn[i].imm) | (((u64)(u32)insn[i + 1].imm) << 32);
+ PPC_LI64(b2p[insn[i].dst_reg], func_addr);
+ /* overwrite rest with nops */
+ for (j = ctx->idx - addrs[i] / 4; j < 5; j++)
+ EMIT(PPC_RAW_NOP());
+#endif
+ ctx->idx = tmp_idx;
+ i++;
}
}
@@ -101,6 +118,8 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
struct bpf_prog *tmp_fp;
bool bpf_blinded = false;
bool extra_pass = false;
+ u32 extable_len;
+ u32 fixup_len;
if (!fp->jit_requested)
return org_fp;
@@ -131,7 +150,6 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
image = jit_data->image;
bpf_hdr = jit_data->header;
proglen = jit_data->proglen;
- alloclen = proglen + FUNCTION_DESCR_SIZE;
extra_pass = true;
goto skip_init_ctx;
}
@@ -149,7 +167,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
cgctx.stack_size = round_up(fp->aux->stack_depth, 16);
/* Scouting faux-generate pass 0 */
- if (bpf_jit_build_body(fp, 0, &cgctx, addrs, false)) {
+ if (bpf_jit_build_body(fp, 0, &cgctx, addrs, 0)) {
/* We hit something illegal or unsupported. */
fp = org_fp;
goto out_addrs;
@@ -162,7 +180,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
*/
if (cgctx.seen & SEEN_TAILCALL) {
cgctx.idx = 0;
- if (bpf_jit_build_body(fp, 0, &cgctx, addrs, false)) {
+ if (bpf_jit_build_body(fp, 0, &cgctx, addrs, 0)) {
fp = org_fp;
goto out_addrs;
}
@@ -177,8 +195,11 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
bpf_jit_build_prologue(0, &cgctx);
bpf_jit_build_epilogue(0, &cgctx);
+ fixup_len = fp->aux->num_exentries * BPF_FIXUP_LEN * 4;
+ extable_len = fp->aux->num_exentries * sizeof(struct exception_table_entry);
+
proglen = cgctx.idx * 4;
- alloclen = proglen + FUNCTION_DESCR_SIZE;
+ alloclen = proglen + FUNCTION_DESCR_SIZE + fixup_len + extable_len;
bpf_hdr = bpf_jit_binary_alloc(alloclen, &image, 4, bpf_jit_fill_ill_insns);
if (!bpf_hdr) {
@@ -186,6 +207,9 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
goto out_addrs;
}
+ if (extable_len)
+ fp->aux->extable = (void *)image + FUNCTION_DESCR_SIZE + proglen + fixup_len;
+
skip_init_ctx:
code_base = (u32 *)(image + FUNCTION_DESCR_SIZE);
@@ -193,13 +217,13 @@ skip_init_ctx:
/*
* Do not touch the prologue and epilogue as they will remain
* unchanged. Only fix the branch target address for subprog
- * calls in the body.
+ * calls in the body, and ldimm64 instructions.
*
* This does not change the offsets and lengths of the subprog
* call instruction sequences and hence, the size of the JITed
* image as well.
*/
- bpf_jit_fixup_subprog_calls(fp, code_base, &cgctx, addrs);
+ bpf_jit_fixup_addresses(fp, code_base, &cgctx, addrs);
/* There is no need to perform the usual passes. */
goto skip_codegen_passes;
@@ -210,7 +234,7 @@ skip_init_ctx:
/* Now build the prologue, body code & epilogue for real. */
cgctx.idx = 0;
bpf_jit_build_prologue(code_base, &cgctx);
- if (bpf_jit_build_body(fp, code_base, &cgctx, addrs, extra_pass)) {
+ if (bpf_jit_build_body(fp, code_base, &cgctx, addrs, pass)) {
bpf_jit_binary_free(bpf_hdr);
fp = org_fp;
goto out_addrs;
@@ -238,7 +262,7 @@ skip_codegen_passes:
fp->bpf_func = (void *)image;
fp->jited = 1;
- fp->jited_len = alloclen;
+ fp->jited_len = proglen + FUNCTION_DESCR_SIZE;
bpf_flush_icache(bpf_hdr, (u8 *)bpf_hdr + (bpf_hdr->pages * PAGE_SIZE));
if (!fp->is_func || extra_pass) {
@@ -262,3 +286,52 @@ out:
return fp;
}
+
+/*
+ * The caller should check for (BPF_MODE(code) == BPF_PROBE_MEM) before calling
+ * this function, as this only applies to BPF_PROBE_MEM, for now.
+ */
+int bpf_add_extable_entry(struct bpf_prog *fp, u32 *image, int pass, struct codegen_context *ctx,
+ int insn_idx, int jmp_off, int dst_reg)
+{
+ off_t offset;
+ unsigned long pc;
+ struct exception_table_entry *ex;
+ u32 *fixup;
+
+ /* Populate extable entries only in the last pass */
+ if (pass != 2)
+ return 0;
+
+ if (!fp->aux->extable ||
+ WARN_ON_ONCE(ctx->exentry_idx >= fp->aux->num_exentries))
+ return -EINVAL;
+
+ pc = (unsigned long)&image[insn_idx];
+
+ fixup = (void *)fp->aux->extable -
+ (fp->aux->num_exentries * BPF_FIXUP_LEN * 4) +
+ (ctx->exentry_idx * BPF_FIXUP_LEN * 4);
+
+ fixup[0] = PPC_RAW_LI(dst_reg, 0);
+ if (IS_ENABLED(CONFIG_PPC32))
+ fixup[1] = PPC_RAW_LI(dst_reg - 1, 0); /* clear higher 32-bit register too */
+
+ fixup[BPF_FIXUP_LEN - 1] =
+ PPC_RAW_BRANCH((long)(pc + jmp_off) - (long)&fixup[BPF_FIXUP_LEN - 1]);
+
+ ex = &fp->aux->extable[ctx->exentry_idx];
+
+ offset = pc - (long)&ex->insn;
+ if (WARN_ON_ONCE(offset >= 0 || offset < INT_MIN))
+ return -ERANGE;
+ ex->insn = offset;
+
+ offset = (long)fixup - (long)&ex->fixup;
+ if (WARN_ON_ONCE(offset >= 0 || offset < INT_MIN))
+ return -ERANGE;
+ ex->fixup = offset;
+
+ ctx->exentry_idx++;
+ return 0;
+}
diff --git a/arch/powerpc/net/bpf_jit_comp32.c b/arch/powerpc/net/bpf_jit_comp32.c
index 8a4faa05f9e4..cf8dd8aea386 100644
--- a/arch/powerpc/net/bpf_jit_comp32.c
+++ b/arch/powerpc/net/bpf_jit_comp32.c
@@ -191,6 +191,9 @@ void bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, u64 fun
if (image && rel < 0x2000000 && rel >= -0x2000000) {
PPC_BL_ABS(func);
+ EMIT(PPC_RAW_NOP());
+ EMIT(PPC_RAW_NOP());
+ EMIT(PPC_RAW_NOP());
} else {
/* Load function address into r0 */
EMIT(PPC_RAW_LIS(_R0, IMM_H(func)));
@@ -268,7 +271,7 @@ static int bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 o
/* Assemble the body code between the prologue & epilogue */
int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *ctx,
- u32 *addrs, bool extra_pass)
+ u32 *addrs, int pass)
{
const struct bpf_insn *insn = fp->insnsi;
int flen = fp->len;
@@ -284,11 +287,14 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
u32 src_reg = bpf_to_ppc(ctx, insn[i].src_reg);
u32 src_reg_h = src_reg - 1;
u32 tmp_reg = bpf_to_ppc(ctx, TMP_REG);
+ u32 size = BPF_SIZE(code);
s16 off = insn[i].off;
s32 imm = insn[i].imm;
bool func_addr_fixed;
u64 func_addr;
u32 true_cond;
+ u32 tmp_idx;
+ int j;
/*
* addrs[] maps a BPF bytecode address into a real offset from
@@ -812,23 +818,91 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
* BPF_LDX
*/
case BPF_LDX | BPF_MEM | BPF_B: /* dst = *(u8 *)(ul) (src + off) */
- EMIT(PPC_RAW_LBZ(dst_reg, src_reg, off));
- if (!fp->aux->verifier_zext)
- EMIT(PPC_RAW_LI(dst_reg_h, 0));
- break;
+ case BPF_LDX | BPF_PROBE_MEM | BPF_B:
case BPF_LDX | BPF_MEM | BPF_H: /* dst = *(u16 *)(ul) (src + off) */
- EMIT(PPC_RAW_LHZ(dst_reg, src_reg, off));
- if (!fp->aux->verifier_zext)
- EMIT(PPC_RAW_LI(dst_reg_h, 0));
- break;
+ case BPF_LDX | BPF_PROBE_MEM | BPF_H:
case BPF_LDX | BPF_MEM | BPF_W: /* dst = *(u32 *)(ul) (src + off) */
- EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off));
- if (!fp->aux->verifier_zext)
- EMIT(PPC_RAW_LI(dst_reg_h, 0));
- break;
+ case BPF_LDX | BPF_PROBE_MEM | BPF_W:
case BPF_LDX | BPF_MEM | BPF_DW: /* dst = *(u64 *)(ul) (src + off) */
- EMIT(PPC_RAW_LWZ(dst_reg_h, src_reg, off));
- EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off + 4));
+ case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
+ /*
+ * As PTR_TO_BTF_ID that uses BPF_PROBE_MEM mode could either be a valid
+ * kernel pointer or NULL but not a userspace address, execute BPF_PROBE_MEM
+ * load only if addr is kernel address (see is_kernel_addr()), otherwise
+ * set dst_reg=0 and move on.
+ */
+ if (BPF_MODE(code) == BPF_PROBE_MEM) {
+ PPC_LI32(_R0, TASK_SIZE - off);
+ EMIT(PPC_RAW_CMPLW(src_reg, _R0));
+ PPC_BCC(COND_GT, (ctx->idx + 5) * 4);
+ EMIT(PPC_RAW_LI(dst_reg, 0));
+ /*
+ * For BPF_DW case, "li reg_h,0" would be needed when
+ * !fp->aux->verifier_zext. Emit NOP otherwise.
+ *
+ * Note that "li reg_h,0" is emitted for BPF_B/H/W case,
+ * if necessary. So, jump there insted of emitting an
+ * additional "li reg_h,0" instruction.
+ */
+ if (size == BPF_DW && !fp->aux->verifier_zext)
+ EMIT(PPC_RAW_LI(dst_reg_h, 0));
+ else
+ EMIT(PPC_RAW_NOP());
+ /*
+ * Need to jump two instructions instead of one for BPF_DW case
+ * as there are two load instructions for dst_reg_h & dst_reg
+ * respectively.
+ */
+ if (size == BPF_DW)
+ PPC_JMP((ctx->idx + 3) * 4);
+ else
+ PPC_JMP((ctx->idx + 2) * 4);
+ }
+
+ switch (size) {
+ case BPF_B:
+ EMIT(PPC_RAW_LBZ(dst_reg, src_reg, off));
+ break;
+ case BPF_H:
+ EMIT(PPC_RAW_LHZ(dst_reg, src_reg, off));
+ break;
+ case BPF_W:
+ EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off));
+ break;
+ case BPF_DW:
+ EMIT(PPC_RAW_LWZ(dst_reg_h, src_reg, off));
+ EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off + 4));
+ break;
+ }
+
+ if (size != BPF_DW && !fp->aux->verifier_zext)
+ EMIT(PPC_RAW_LI(dst_reg_h, 0));
+
+ if (BPF_MODE(code) == BPF_PROBE_MEM) {
+ int insn_idx = ctx->idx - 1;
+ int jmp_off = 4;
+
+ /*
+ * In case of BPF_DW, two lwz instructions are emitted, one
+ * for higher 32-bit and another for lower 32-bit. So, set
+ * ex->insn to the first of the two and jump over both
+ * instructions in fixup.
+ *
+ * Similarly, with !verifier_zext, two instructions are
+ * emitted for BPF_B/H/W case. So, set ex->insn to the
+ * instruction that could fault and skip over both
+ * instructions.
+ */
+ if (size == BPF_DW || !fp->aux->verifier_zext) {
+ insn_idx -= 1;
+ jmp_off += 4;
+ }
+
+ ret = bpf_add_extable_entry(fp, image, pass, ctx, insn_idx,
+ jmp_off, dst_reg);
+ if (ret)
+ return ret;
+ }
break;
/*
@@ -836,8 +910,12 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
* 16 byte instruction that uses two 'struct bpf_insn'
*/
case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */
+ tmp_idx = ctx->idx;
PPC_LI32(dst_reg_h, (u32)insn[i + 1].imm);
PPC_LI32(dst_reg, (u32)insn[i].imm);
+ /* padding to allow full 4 instructions for later patching */
+ for (j = ctx->idx - tmp_idx; j < 4; j++)
+ EMIT(PPC_RAW_NOP());
/* Adjust for two bpf instructions */
addrs[++i] = ctx->idx * 4;
break;
@@ -862,7 +940,7 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
case BPF_JMP | BPF_CALL:
ctx->seen |= SEEN_FUNC;
- ret = bpf_jit_get_func_addr(fp, &insn[i], extra_pass,
+ ret = bpf_jit_get_func_addr(fp, &insn[i], false,
&func_addr, &func_addr_fixed);
if (ret < 0)
return ret;
diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
index 8571aafcc9e1..e1e8c934308a 100644
--- a/arch/powerpc/net/bpf_jit_comp64.c
+++ b/arch/powerpc/net/bpf_jit_comp64.c
@@ -297,7 +297,7 @@ asm (
/* Assemble the body code between the prologue & epilogue */
int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *ctx,
- u32 *addrs, bool extra_pass)
+ u32 *addrs, int pass)
{
enum stf_barrier_type stf_barrier = stf_barrier_type_get();
const struct bpf_insn *insn = fp->insnsi;
@@ -311,6 +311,7 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
u32 code = insn[i].code;
u32 dst_reg = b2p[insn[i].dst_reg];
u32 src_reg = b2p[insn[i].src_reg];
+ u32 size = BPF_SIZE(code);
s16 off = insn[i].off;
s32 imm = insn[i].imm;
bool func_addr_fixed;
@@ -318,6 +319,7 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
u64 imm64;
u32 true_cond;
u32 tmp_idx;
+ int j;
/*
* addrs[] maps a BPF bytecode address into a real offset from
@@ -632,17 +634,21 @@ bpf_alu32_trunc:
EMIT(PPC_RAW_MR(dst_reg, b2p[TMP_REG_1]));
break;
case 64:
- /*
- * Way easier and faster(?) to store the value
- * into stack and then use ldbrx
- *
- * ctx->seen will be reliable in pass2, but
- * the instructions generated will remain the
- * same across all passes
- */
+ /* Store the value to stack and then use byte-reverse loads */
PPC_BPF_STL(dst_reg, 1, bpf_jit_stack_local(ctx));
EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], 1, bpf_jit_stack_local(ctx)));
- EMIT(PPC_RAW_LDBRX(dst_reg, 0, b2p[TMP_REG_1]));
+ if (cpu_has_feature(CPU_FTR_ARCH_206)) {
+ EMIT(PPC_RAW_LDBRX(dst_reg, 0, b2p[TMP_REG_1]));
+ } else {
+ EMIT(PPC_RAW_LWBRX(dst_reg, 0, b2p[TMP_REG_1]));
+ if (IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN))
+ EMIT(PPC_RAW_SLDI(dst_reg, dst_reg, 32));
+ EMIT(PPC_RAW_LI(b2p[TMP_REG_2], 4));
+ EMIT(PPC_RAW_LWBRX(b2p[TMP_REG_2], b2p[TMP_REG_2], b2p[TMP_REG_1]));
+ if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
+ EMIT(PPC_RAW_SLDI(b2p[TMP_REG_2], b2p[TMP_REG_2], 32));
+ EMIT(PPC_RAW_OR(dst_reg, dst_reg, b2p[TMP_REG_2]));
+ }
break;
}
break;
@@ -778,25 +784,66 @@ emit_clear:
*/
/* dst = *(u8 *)(ul) (src + off) */
case BPF_LDX | BPF_MEM | BPF_B:
- EMIT(PPC_RAW_LBZ(dst_reg, src_reg, off));
- if (insn_is_zext(&insn[i + 1]))
- addrs[++i] = ctx->idx * 4;
- break;
+ case BPF_LDX | BPF_PROBE_MEM | BPF_B:
/* dst = *(u16 *)(ul) (src + off) */
case BPF_LDX | BPF_MEM | BPF_H:
- EMIT(PPC_RAW_LHZ(dst_reg, src_reg, off));
- if (insn_is_zext(&insn[i + 1]))
- addrs[++i] = ctx->idx * 4;
- break;
+ case BPF_LDX | BPF_PROBE_MEM | BPF_H:
/* dst = *(u32 *)(ul) (src + off) */
case BPF_LDX | BPF_MEM | BPF_W:
- EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off));
- if (insn_is_zext(&insn[i + 1]))
- addrs[++i] = ctx->idx * 4;
- break;
+ case BPF_LDX | BPF_PROBE_MEM | BPF_W:
/* dst = *(u64 *)(ul) (src + off) */
case BPF_LDX | BPF_MEM | BPF_DW:
- PPC_BPF_LL(dst_reg, src_reg, off);
+ case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
+ /*
+ * As PTR_TO_BTF_ID that uses BPF_PROBE_MEM mode could either be a valid
+ * kernel pointer or NULL but not a userspace address, execute BPF_PROBE_MEM
+ * load only if addr is kernel address (see is_kernel_addr()), otherwise
+ * set dst_reg=0 and move on.
+ */
+ if (BPF_MODE(code) == BPF_PROBE_MEM) {
+ EMIT(PPC_RAW_ADDI(b2p[TMP_REG_1], src_reg, off));
+ if (IS_ENABLED(CONFIG_PPC_BOOK3E_64))
+ PPC_LI64(b2p[TMP_REG_2], 0x8000000000000000ul);
+ else /* BOOK3S_64 */
+ PPC_LI64(b2p[TMP_REG_2], PAGE_OFFSET);
+ EMIT(PPC_RAW_CMPLD(b2p[TMP_REG_1], b2p[TMP_REG_2]));
+ PPC_BCC(COND_GT, (ctx->idx + 4) * 4);
+ EMIT(PPC_RAW_LI(dst_reg, 0));
+ /*
+ * Check if 'off' is word aligned because PPC_BPF_LL()
+ * (BPF_DW case) generates two instructions if 'off' is not
+ * word-aligned and one instruction otherwise.
+ */
+ if (BPF_SIZE(code) == BPF_DW && (off & 3))
+ PPC_JMP((ctx->idx + 3) * 4);
+ else
+ PPC_JMP((ctx->idx + 2) * 4);
+ }
+
+ switch (size) {
+ case BPF_B:
+ EMIT(PPC_RAW_LBZ(dst_reg, src_reg, off));
+ break;
+ case BPF_H:
+ EMIT(PPC_RAW_LHZ(dst_reg, src_reg, off));
+ break;
+ case BPF_W:
+ EMIT(PPC_RAW_LWZ(dst_reg, src_reg, off));
+ break;
+ case BPF_DW:
+ PPC_BPF_LL(dst_reg, src_reg, off);
+ break;
+ }
+
+ if (size != BPF_DW && insn_is_zext(&insn[i + 1]))
+ addrs[++i] = ctx->idx * 4;
+
+ if (BPF_MODE(code) == BPF_PROBE_MEM) {
+ ret = bpf_add_extable_entry(fp, image, pass, ctx, ctx->idx - 1,
+ 4, dst_reg);
+ if (ret)
+ return ret;
+ }
break;
/*
@@ -806,9 +853,13 @@ emit_clear:
case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */
imm64 = ((u64)(u32) insn[i].imm) |
(((u64)(u32) insn[i+1].imm) << 32);
+ tmp_idx = ctx->idx;
+ PPC_LI64(dst_reg, imm64);
+ /* padding to allow full 5 instructions for later patching */
+ for (j = ctx->idx - tmp_idx; j < 5; j++)
+ EMIT(PPC_RAW_NOP());
/* Adjust for two bpf instructions */
addrs[++i] = ctx->idx * 4;
- PPC_LI64(dst_reg, imm64);
break;
/*
@@ -831,7 +882,7 @@ emit_clear:
case BPF_JMP | BPF_CALL:
ctx->seen |= SEEN_FUNC;
- ret = bpf_jit_get_func_addr(fp, &insn[i], extra_pass,
+ ret = bpf_jit_get_func_addr(fp, &insn[i], false,
&func_addr, &func_addr_fixed);
if (ret < 0)
return ret;
diff --git a/arch/powerpc/perf/8xx-pmu.c b/arch/powerpc/perf/8xx-pmu.c
index f970d1510d3d..4738c4dbf567 100644
--- a/arch/powerpc/perf/8xx-pmu.c
+++ b/arch/powerpc/perf/8xx-pmu.c
@@ -153,7 +153,7 @@ static void mpc8xx_pmu_read(struct perf_event *event)
static void mpc8xx_pmu_del(struct perf_event *event, int flags)
{
- struct ppc_inst insn = ppc_inst(PPC_RAW_MFSPR(10, SPRN_SPRG_SCRATCH2));
+ ppc_inst_t insn = ppc_inst(PPC_RAW_MFSPR(10, SPRN_SPRG_SCRATCH2));
mpc8xx_pmu_read(event);
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 73e62e9b179b..b5b42cf0a703 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -17,6 +17,7 @@
#include <asm/firmware.h>
#include <asm/ptrace.h>
#include <asm/code-patching.h>
+#include <asm/hw_irq.h>
#include <asm/interrupt.h>
#ifdef CONFIG_PPC64
@@ -775,6 +776,34 @@ static void pmao_restore_workaround(bool ebb)
mtspr(SPRN_PMC6, pmcs[5]);
}
+/*
+ * If the perf subsystem wants performance monitor interrupts as soon as
+ * possible (e.g., to sample the instruction address and stack chain),
+ * this should return true. The IRQ masking code can then enable MSR[EE]
+ * in some places (e.g., interrupt handlers) that allows PMI interrupts
+ * through to improve accuracy of profiles, at the cost of some performance.
+ *
+ * The PMU counters can be enabled by other means (e.g., sysfs raw SPR
+ * access), but in that case there is no need for prompt PMI handling.
+ *
+ * This currently returns true if any perf counter is being used. It
+ * could possibly return false if only events are being counted rather than
+ * samples being taken, but for now this is good enough.
+ */
+bool power_pmu_wants_prompt_pmi(void)
+{
+ struct cpu_hw_events *cpuhw;
+
+ /*
+ * This could simply test local_paca->pmcregs_in_use if that were not
+ * under ifdef KVM.
+ */
+ if (!ppmu)
+ return false;
+
+ cpuhw = this_cpu_ptr(&cpu_hw_events);
+ return cpuhw->n_events;
+}
#endif /* CONFIG_PPC64 */
static void perf_event_interrupt(struct pt_regs *regs);
@@ -857,6 +886,19 @@ static void write_pmc(int idx, unsigned long val)
}
}
+static int any_pmc_overflown(struct cpu_hw_events *cpuhw)
+{
+ int i, idx;
+
+ for (i = 0; i < cpuhw->n_events; i++) {
+ idx = cpuhw->event[i]->hw.idx;
+ if ((idx) && ((int)read_pmc(idx) < 0))
+ return idx;
+ }
+
+ return 0;
+}
+
/* Called from sysrq_handle_showregs() */
void perf_event_print_debug(void)
{
@@ -1281,11 +1323,13 @@ static void power_pmu_disable(struct pmu *pmu)
/*
* Set the 'freeze counters' bit, clear EBE/BHRBA/PMCC/PMAO/FC56
+ * Also clear PMXE to disable PMI's getting triggered in some
+ * corner cases during PMU disable.
*/
val = mmcr0 = mfspr(SPRN_MMCR0);
val |= MMCR0_FC;
val &= ~(MMCR0_EBE | MMCR0_BHRBA | MMCR0_PMCC | MMCR0_PMAO |
- MMCR0_FC56);
+ MMCR0_PMXE | MMCR0_FC56);
/* Set mmcr0 PMCCEXT for p10 */
if (ppmu->flags & PPMU_ARCH_31)
val |= MMCR0_PMCCEXT;
@@ -1299,6 +1343,34 @@ static void power_pmu_disable(struct pmu *pmu)
mb();
isync();
+ /*
+ * Some corner cases could clear the PMU counter overflow
+ * while a masked PMI is pending. One such case is when
+ * a PMI happens during interrupt replay and perf counter
+ * values are cleared by PMU callbacks before replay.
+ *
+ * If any PMC corresponding to the active PMU events are
+ * overflown, disable the interrupt by clearing the paca
+ * bit for PMI since we are disabling the PMU now.
+ * Otherwise provide a warning if there is PMI pending, but
+ * no counter is found overflown.
+ */
+ if (any_pmc_overflown(cpuhw)) {
+ /*
+ * Since power_pmu_disable runs under local_irq_save, it
+ * could happen that code hits a PMC overflow without PMI
+ * pending in paca. Hence only clear PMI pending if it was
+ * set.
+ *
+ * If a PMI is pending, then MSR[EE] must be disabled (because
+ * the masked PMI handler disabling EE). So it is safe to
+ * call clear_pmi_irq_pending().
+ */
+ if (pmi_irq_pending())
+ clear_pmi_irq_pending();
+ } else
+ WARN_ON(pmi_irq_pending());
+
val = mmcra = cpuhw->mmcr.mmcra;
/*
@@ -1390,6 +1462,15 @@ static void power_pmu_enable(struct pmu *pmu)
* (possibly updated for removal of events).
*/
if (!cpuhw->n_added) {
+ /*
+ * If there is any active event with an overflown PMC
+ * value, set back PACA_IRQ_PMI which would have been
+ * cleared in power_pmu_disable().
+ */
+ hard_irq_disable();
+ if (any_pmc_overflown(cpuhw))
+ set_pmi_irq_pending();
+
mtspr(SPRN_MMCRA, cpuhw->mmcr.mmcra & ~MMCRA_SAMPLE_ENABLE);
mtspr(SPRN_MMCR1, cpuhw->mmcr.mmcr1);
if (ppmu->flags & PPMU_ARCH_31)
@@ -2337,6 +2418,14 @@ static void __perf_event_interrupt(struct pt_regs *regs)
break;
}
}
+
+ /*
+ * Clear PACA_IRQ_PMI in case it was set by
+ * set_pmi_irq_pending() when PMU was enabled
+ * after accounting for interrupts.
+ */
+ clear_pmi_irq_pending();
+
if (!active)
/* reset non active counters that have overflowed */
write_pmc(i + 1, 0);
@@ -2356,6 +2445,13 @@ static void __perf_event_interrupt(struct pt_regs *regs)
}
}
}
+
+ /*
+ * During system wide profling or while specific CPU is monitored for an
+ * event, some corner cases could cause PMC to overflow in idle path. This
+ * will trigger a PMI after waking up from idle. Since counter values are _not_
+ * saved/restored in idle path, can lead to below "Can't find PMC" message.
+ */
if (unlikely(!found) && !arch_irq_disabled_regs(regs))
printk_ratelimited(KERN_WARNING "Can't find PMC that caused IRQ\n");
@@ -2392,7 +2488,7 @@ static int power_pmu_prepare_cpu(unsigned int cpu)
return 0;
}
-int register_power_pmu(struct power_pmu *pmu)
+int __init register_power_pmu(struct power_pmu *pmu)
{
if (ppmu)
return -EBUSY; /* something's already registered */
@@ -2419,8 +2515,24 @@ int register_power_pmu(struct power_pmu *pmu)
}
#ifdef CONFIG_PPC64
+static bool pmu_override = false;
+static unsigned long pmu_override_val;
+static void do_pmu_override(void *data)
+{
+ ppc_set_pmu_inuse(1);
+ if (pmu_override_val)
+ mtspr(SPRN_MMCR1, pmu_override_val);
+ mtspr(SPRN_MMCR0, mfspr(SPRN_MMCR0) & ~MMCR0_FC);
+}
+
static int __init init_ppc64_pmu(void)
{
+ if (cpu_has_feature(CPU_FTR_HVMODE) && pmu_override) {
+ pr_warn("disabling perf due to pmu_override= command line option.\n");
+ on_each_cpu(do_pmu_override, NULL, 1);
+ return 0;
+ }
+
/* run through all the pmu drivers one at a time */
if (!init_power5_pmu())
return 0;
@@ -2442,4 +2554,23 @@ static int __init init_ppc64_pmu(void)
return init_generic_compat_pmu();
}
early_initcall(init_ppc64_pmu);
+
+static int __init pmu_setup(char *str)
+{
+ unsigned long val;
+
+ if (!early_cpu_has_feature(CPU_FTR_HVMODE))
+ return 0;
+
+ pmu_override = true;
+
+ if (kstrtoul(str, 0, &val))
+ val = 0;
+
+ pmu_override_val = val;
+
+ return 1;
+}
+__setup("pmu_override=", pmu_setup);
+
#endif
diff --git a/arch/powerpc/perf/generic-compat-pmu.c b/arch/powerpc/perf/generic-compat-pmu.c
index 695975227e60..b6e25f75109d 100644
--- a/arch/powerpc/perf/generic-compat-pmu.c
+++ b/arch/powerpc/perf/generic-compat-pmu.c
@@ -307,7 +307,7 @@ static struct power_pmu generic_compat_pmu = {
.attr_groups = generic_compat_pmu_attr_groups,
};
-int init_generic_compat_pmu(void)
+int __init init_generic_compat_pmu(void)
{
int rc = 0;
diff --git a/arch/powerpc/perf/hv-24x7.c b/arch/powerpc/perf/hv-24x7.c
index 1816f560a465..1e8aa934e37e 100644
--- a/arch/powerpc/perf/hv-24x7.c
+++ b/arch/powerpc/perf/hv-24x7.c
@@ -756,7 +756,7 @@ static ssize_t catalog_event_len_validate(struct hv_24x7_event_data *event,
}
if (calc_ev_end > ev_end) {
- pr_warn("event %zu exceeds it's own length: event=%pK, end=%pK, offset=%zu, calc_ev_end=%pK\n",
+ pr_warn("event %zu exceeds its own length: event=%pK, end=%pK, offset=%zu, calc_ev_end=%pK\n",
event_idx, event, ev_end, offset, calc_ev_end);
return -1;
}
diff --git a/arch/powerpc/perf/internal.h b/arch/powerpc/perf/internal.h
index 80bbf72bfec2..4c18b5504326 100644
--- a/arch/powerpc/perf/internal.h
+++ b/arch/powerpc/perf/internal.h
@@ -2,12 +2,12 @@
//
// Copyright 2019 Madhavan Srinivasan, IBM Corporation.
-extern int init_ppc970_pmu(void);
-extern int init_power5_pmu(void);
-extern int init_power5p_pmu(void);
-extern int init_power6_pmu(void);
-extern int init_power7_pmu(void);
-extern int init_power8_pmu(void);
-extern int init_power9_pmu(void);
-extern int init_power10_pmu(void);
-extern int init_generic_compat_pmu(void);
+int __init init_ppc970_pmu(void);
+int __init init_power5_pmu(void);
+int __init init_power5p_pmu(void);
+int __init init_power6_pmu(void);
+int __init init_power7_pmu(void);
+int __init init_power8_pmu(void);
+int __init init_power9_pmu(void);
+int __init init_power10_pmu(void);
+int __init init_generic_compat_pmu(void);
diff --git a/arch/powerpc/perf/isa207-common.c b/arch/powerpc/perf/isa207-common.c
index 7ea873ab2e6f..4037ea652522 100644
--- a/arch/powerpc/perf/isa207-common.c
+++ b/arch/powerpc/perf/isa207-common.c
@@ -220,22 +220,37 @@ static inline u64 isa207_find_source(u64 idx, u32 sub_idx)
/* Nothing to do */
break;
case 1:
- ret = PH(LVL, L1);
+ ret = PH(LVL, L1) | LEVEL(L1) | P(SNOOP, HIT);
break;
case 2:
- ret = PH(LVL, L2);
+ ret = PH(LVL, L2) | LEVEL(L2) | P(SNOOP, HIT);
break;
case 3:
- ret = PH(LVL, L3);
+ ret = PH(LVL, L3) | LEVEL(L3) | P(SNOOP, HIT);
break;
case 4:
- if (sub_idx <= 1)
- ret = PH(LVL, LOC_RAM);
- else if (sub_idx > 1 && sub_idx <= 2)
- ret = PH(LVL, REM_RAM1);
- else
- ret = PH(LVL, REM_RAM2);
- ret |= P(SNOOP, HIT);
+ if (cpu_has_feature(CPU_FTR_ARCH_31)) {
+ ret = P(SNOOP, HIT);
+
+ if (sub_idx == 1)
+ ret |= PH(LVL, LOC_RAM) | LEVEL(RAM);
+ else if (sub_idx == 2 || sub_idx == 3)
+ ret |= P(LVL, HIT) | LEVEL(PMEM);
+ else if (sub_idx == 4)
+ ret |= PH(LVL, REM_RAM1) | REM | LEVEL(RAM) | P(HOPS, 2);
+ else if (sub_idx == 5 || sub_idx == 7)
+ ret |= P(LVL, HIT) | LEVEL(PMEM) | REM;
+ else if (sub_idx == 6)
+ ret |= PH(LVL, REM_RAM2) | REM | LEVEL(RAM) | P(HOPS, 3);
+ } else {
+ if (sub_idx <= 1)
+ ret = PH(LVL, LOC_RAM);
+ else if (sub_idx > 1 && sub_idx <= 2)
+ ret = PH(LVL, REM_RAM1);
+ else
+ ret = PH(LVL, REM_RAM2);
+ ret |= P(SNOOP, HIT);
+ }
break;
case 5:
if (cpu_has_feature(CPU_FTR_ARCH_31)) {
@@ -261,11 +276,26 @@ static inline u64 isa207_find_source(u64 idx, u32 sub_idx)
}
break;
case 6:
- ret = PH(LVL, REM_CCE2);
- if ((sub_idx == 0) || (sub_idx == 2))
- ret |= P(SNOOP, HIT);
- else if ((sub_idx == 1) || (sub_idx == 3))
- ret |= P(SNOOP, HITM);
+ if (cpu_has_feature(CPU_FTR_ARCH_31)) {
+ if (sub_idx == 0)
+ ret = PH(LVL, REM_CCE1) | LEVEL(ANY_CACHE) | REM |
+ P(SNOOP, HIT) | P(HOPS, 2);
+ else if (sub_idx == 1)
+ ret = PH(LVL, REM_CCE1) | LEVEL(ANY_CACHE) | REM |
+ P(SNOOP, HITM) | P(HOPS, 2);
+ else if (sub_idx == 2)
+ ret = PH(LVL, REM_CCE2) | LEVEL(ANY_CACHE) | REM |
+ P(SNOOP, HIT) | P(HOPS, 3);
+ else if (sub_idx == 3)
+ ret = PH(LVL, REM_CCE2) | LEVEL(ANY_CACHE) | REM |
+ P(SNOOP, HITM) | P(HOPS, 3);
+ } else {
+ ret = PH(LVL, REM_CCE2);
+ if (sub_idx == 0 || sub_idx == 2)
+ ret |= P(SNOOP, HIT);
+ else if (sub_idx == 1 || sub_idx == 3)
+ ret |= P(SNOOP, HITM);
+ }
break;
case 7:
ret = PM(LVL, L1);
diff --git a/arch/powerpc/perf/power10-pmu.c b/arch/powerpc/perf/power10-pmu.c
index 9dd75f385837..0975ad0b42c4 100644
--- a/arch/powerpc/perf/power10-pmu.c
+++ b/arch/powerpc/perf/power10-pmu.c
@@ -592,7 +592,7 @@ static struct power_pmu power10_pmu = {
.check_attr_config = power10_check_attr_config,
};
-int init_power10_pmu(void)
+int __init init_power10_pmu(void)
{
unsigned int pvr;
int rc;
diff --git a/arch/powerpc/perf/power5+-pmu.c b/arch/powerpc/perf/power5+-pmu.c
index 18732267993a..753b4740ef64 100644
--- a/arch/powerpc/perf/power5+-pmu.c
+++ b/arch/powerpc/perf/power5+-pmu.c
@@ -677,7 +677,7 @@ static struct power_pmu power5p_pmu = {
.cache_events = &power5p_cache_events,
};
-int init_power5p_pmu(void)
+int __init init_power5p_pmu(void)
{
if (!cur_cpu_spec->oprofile_cpu_type ||
(strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power5+")
diff --git a/arch/powerpc/perf/power5-pmu.c b/arch/powerpc/perf/power5-pmu.c
index cb611c1e7abe..1f83c4cba0aa 100644
--- a/arch/powerpc/perf/power5-pmu.c
+++ b/arch/powerpc/perf/power5-pmu.c
@@ -618,7 +618,7 @@ static struct power_pmu power5_pmu = {
.flags = PPMU_HAS_SSLOT,
};
-int init_power5_pmu(void)
+int __init init_power5_pmu(void)
{
if (!cur_cpu_spec->oprofile_cpu_type ||
strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power5"))
diff --git a/arch/powerpc/perf/power6-pmu.c b/arch/powerpc/perf/power6-pmu.c
index 69ef38216418..aec746f86804 100644
--- a/arch/powerpc/perf/power6-pmu.c
+++ b/arch/powerpc/perf/power6-pmu.c
@@ -539,7 +539,7 @@ static struct power_pmu power6_pmu = {
.cache_events = &power6_cache_events,
};
-int init_power6_pmu(void)
+int __init init_power6_pmu(void)
{
if (!cur_cpu_spec->oprofile_cpu_type ||
strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power6"))
diff --git a/arch/powerpc/perf/power7-pmu.c b/arch/powerpc/perf/power7-pmu.c
index 894c17f9a762..99b5ba314ea7 100644
--- a/arch/powerpc/perf/power7-pmu.c
+++ b/arch/powerpc/perf/power7-pmu.c
@@ -445,7 +445,7 @@ static struct power_pmu power7_pmu = {
.cache_events = &power7_cache_events,
};
-int init_power7_pmu(void)
+int __init init_power7_pmu(void)
{
if (!cur_cpu_spec->oprofile_cpu_type ||
strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power7"))
diff --git a/arch/powerpc/perf/power8-pmu.c b/arch/powerpc/perf/power8-pmu.c
index 5282e8415ddf..f21194b5604a 100644
--- a/arch/powerpc/perf/power8-pmu.c
+++ b/arch/powerpc/perf/power8-pmu.c
@@ -378,7 +378,7 @@ static struct power_pmu power8_pmu = {
.bhrb_nr = 32,
};
-int init_power8_pmu(void)
+int __init init_power8_pmu(void)
{
int rc;
diff --git a/arch/powerpc/perf/power9-pmu.c b/arch/powerpc/perf/power9-pmu.c
index ff3382140d7e..4b7c17e36100 100644
--- a/arch/powerpc/perf/power9-pmu.c
+++ b/arch/powerpc/perf/power9-pmu.c
@@ -452,7 +452,7 @@ static struct power_pmu power9_pmu = {
.check_attr_config = power9_check_attr_config,
};
-int init_power9_pmu(void)
+int __init init_power9_pmu(void)
{
int rc = 0;
unsigned int pvr = mfspr(SPRN_PVR);
diff --git a/arch/powerpc/perf/ppc970-pmu.c b/arch/powerpc/perf/ppc970-pmu.c
index 1f8263785286..09802482ba72 100644
--- a/arch/powerpc/perf/ppc970-pmu.c
+++ b/arch/powerpc/perf/ppc970-pmu.c
@@ -489,7 +489,7 @@ static struct power_pmu ppc970_pmu = {
.flags = PPMU_NO_SIPR | PPMU_NO_CONT_SAMPLING,
};
-int init_ppc970_pmu(void)
+int __init init_ppc970_pmu(void)
{
if (!cur_cpu_spec->oprofile_cpu_type ||
(strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/970")
diff --git a/arch/powerpc/platforms/40x/Kconfig b/arch/powerpc/platforms/40x/Kconfig
index e3e5217c9822..614ea6dc994c 100644
--- a/arch/powerpc/platforms/40x/Kconfig
+++ b/arch/powerpc/platforms/40x/Kconfig
@@ -23,7 +23,6 @@ config KILAUEA
select PPC4xx_PCI_EXPRESS
select FORCE_PCI
select PCI_MSI
- select PPC4xx_MSI
help
This option enables support for the AMCC PPC405EX evaluation board.
diff --git a/arch/powerpc/platforms/44x/Kconfig b/arch/powerpc/platforms/44x/Kconfig
index 83975ef50975..25b80cd558f8 100644
--- a/arch/powerpc/platforms/44x/Kconfig
+++ b/arch/powerpc/platforms/44x/Kconfig
@@ -23,7 +23,6 @@ config BLUESTONE
select APM821xx
select FORCE_PCI
select PCI_MSI
- select PPC4xx_MSI
select PPC4xx_PCI_EXPRESS
select IBM_EMAC_RGMII if IBM_EMAC
help
@@ -73,7 +72,6 @@ config KATMAI
select FORCE_PCI
select PPC4xx_PCI_EXPRESS
select PCI_MSI
- select PPC4xx_MSI
help
This option enables support for the AMCC PPC440SPe evaluation board.
@@ -115,7 +113,6 @@ config CANYONLANDS
select FORCE_PCI
select PPC4xx_PCI_EXPRESS
select PCI_MSI
- select PPC4xx_MSI
select IBM_EMAC_RGMII if IBM_EMAC
select IBM_EMAC_ZMII if IBM_EMAC
help
@@ -141,7 +138,6 @@ config REDWOOD
select FORCE_PCI
select PPC4xx_PCI_EXPRESS
select PCI_MSI
- select PPC4xx_MSI
help
This option enables support for the AMCC PPC460SX Redwood board.
diff --git a/arch/powerpc/platforms/44x/fsp2.c b/arch/powerpc/platforms/44x/fsp2.c
index 823397c802de..af13a59d2f60 100644
--- a/arch/powerpc/platforms/44x/fsp2.c
+++ b/arch/powerpc/platforms/44x/fsp2.c
@@ -197,7 +197,7 @@ static irqreturn_t rst_wrn_handler(int irq, void *data) {
}
}
-static void node_irq_request(const char *compat, irq_handler_t errirq_handler)
+static void __init node_irq_request(const char *compat, irq_handler_t errirq_handler)
{
struct device_node *np;
unsigned int irq;
@@ -222,7 +222,7 @@ static void node_irq_request(const char *compat, irq_handler_t errirq_handler)
}
}
-static void critical_irq_setup(void)
+static void __init critical_irq_setup(void)
{
node_irq_request(FSP2_CMU_ERR, cmu_err_handler);
node_irq_request(FSP2_BUS_ERR, bus_err_handler);
diff --git a/arch/powerpc/platforms/4xx/Makefile b/arch/powerpc/platforms/4xx/Makefile
index d009d2e0b9e8..2071a0abe09b 100644
--- a/arch/powerpc/platforms/4xx/Makefile
+++ b/arch/powerpc/platforms/4xx/Makefile
@@ -3,6 +3,5 @@ obj-y += uic.o machine_check.o
obj-$(CONFIG_4xx_SOC) += soc.o
obj-$(CONFIG_PCI) += pci.o
obj-$(CONFIG_PPC4xx_HSTA_MSI) += hsta_msi.o
-obj-$(CONFIG_PPC4xx_MSI) += msi.o
obj-$(CONFIG_PPC4xx_CPM) += cpm.o
obj-$(CONFIG_PPC4xx_GPIO) += gpio.o
diff --git a/arch/powerpc/platforms/4xx/cpm.c b/arch/powerpc/platforms/4xx/cpm.c
index ae8b812c9202..2571841625a2 100644
--- a/arch/powerpc/platforms/4xx/cpm.c
+++ b/arch/powerpc/platforms/4xx/cpm.c
@@ -163,7 +163,7 @@ static ssize_t cpm_idle_store(struct kobject *kobj,
static struct kobj_attribute cpm_idle_attr =
__ATTR(idle, 0644, cpm_idle_show, cpm_idle_store);
-static void cpm_idle_config_sysfs(void)
+static void __init cpm_idle_config_sysfs(void)
{
struct device *dev;
unsigned long ret;
@@ -231,7 +231,7 @@ static const struct platform_suspend_ops cpm_suspend_ops = {
.enter = cpm_suspend_enter,
};
-static int cpm_get_uint_property(struct device_node *np,
+static int __init cpm_get_uint_property(struct device_node *np,
const char *name)
{
int len;
diff --git a/arch/powerpc/platforms/4xx/hsta_msi.c b/arch/powerpc/platforms/4xx/hsta_msi.c
index c950fed43b32..fee430eadcc6 100644
--- a/arch/powerpc/platforms/4xx/hsta_msi.c
+++ b/arch/powerpc/platforms/4xx/hsta_msi.c
@@ -47,7 +47,7 @@ static int hsta_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
return -EINVAL;
}
- for_each_pci_msi_entry(entry, dev) {
+ msi_for_each_desc(entry, &dev->dev, MSI_DESC_NOTASSOCIATED) {
irq = msi_bitmap_alloc_hwirqs(&ppc4xx_hsta_msi.bmp, 1);
if (irq < 0) {
pr_debug("%s: Failed to allocate msi interrupt\n",
@@ -105,10 +105,7 @@ static void hsta_teardown_msi_irqs(struct pci_dev *dev)
struct msi_desc *entry;
int irq;
- for_each_pci_msi_entry(entry, dev) {
- if (!entry->irq)
- continue;
-
+ msi_for_each_desc(entry, &dev->dev, MSI_DESC_ASSOCIATED) {
irq = hsta_find_hwirq_offset(entry->irq);
/* entry->irq should always be in irq_map */
diff --git a/arch/powerpc/platforms/4xx/msi.c b/arch/powerpc/platforms/4xx/msi.c
deleted file mode 100644
index 1051564b94f2..000000000000
--- a/arch/powerpc/platforms/4xx/msi.c
+++ /dev/null
@@ -1,281 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Adding PCI-E MSI support for PPC4XX SoCs.
- *
- * Copyright (c) 2010, Applied Micro Circuits Corporation
- * Authors: Tirumala R Marri <tmarri@apm.com>
- * Feng Kan <fkan@apm.com>
- */
-
-#include <linux/irq.h>
-#include <linux/pci.h>
-#include <linux/msi.h>
-#include <linux/of_platform.h>
-#include <linux/interrupt.h>
-#include <linux/export.h>
-#include <linux/kernel.h>
-#include <asm/prom.h>
-#include <asm/hw_irq.h>
-#include <asm/ppc-pci.h>
-#include <asm/dcr.h>
-#include <asm/dcr-regs.h>
-#include <asm/msi_bitmap.h>
-
-#define PEIH_TERMADH 0x00
-#define PEIH_TERMADL 0x08
-#define PEIH_MSIED 0x10
-#define PEIH_MSIMK 0x18
-#define PEIH_MSIASS 0x20
-#define PEIH_FLUSH0 0x30
-#define PEIH_FLUSH1 0x38
-#define PEIH_CNTRST 0x48
-
-static int msi_irqs;
-
-struct ppc4xx_msi {
- u32 msi_addr_lo;
- u32 msi_addr_hi;
- void __iomem *msi_regs;
- int *msi_virqs;
- struct msi_bitmap bitmap;
- struct device_node *msi_dev;
-};
-
-static struct ppc4xx_msi ppc4xx_msi;
-
-static int ppc4xx_msi_init_allocator(struct platform_device *dev,
- struct ppc4xx_msi *msi_data)
-{
- int err;
-
- err = msi_bitmap_alloc(&msi_data->bitmap, msi_irqs,
- dev->dev.of_node);
- if (err)
- return err;
-
- err = msi_bitmap_reserve_dt_hwirqs(&msi_data->bitmap);
- if (err < 0) {
- msi_bitmap_free(&msi_data->bitmap);
- return err;
- }
-
- return 0;
-}
-
-static int ppc4xx_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
-{
- int int_no = -ENOMEM;
- unsigned int virq;
- struct msi_msg msg;
- struct msi_desc *entry;
- struct ppc4xx_msi *msi_data = &ppc4xx_msi;
-
- dev_dbg(&dev->dev, "PCIE-MSI:%s called. vec %x type %d\n",
- __func__, nvec, type);
- if (type == PCI_CAP_ID_MSIX)
- pr_debug("ppc4xx msi: MSI-X untested, trying anyway.\n");
-
- msi_data->msi_virqs = kmalloc_array(msi_irqs, sizeof(int), GFP_KERNEL);
- if (!msi_data->msi_virqs)
- return -ENOMEM;
-
- for_each_pci_msi_entry(entry, dev) {
- int_no = msi_bitmap_alloc_hwirqs(&msi_data->bitmap, 1);
- if (int_no >= 0)
- break;
- if (int_no < 0) {
- pr_debug("%s: fail allocating msi interrupt\n",
- __func__);
- }
- virq = irq_of_parse_and_map(msi_data->msi_dev, int_no);
- if (!virq) {
- dev_err(&dev->dev, "%s: fail mapping irq\n", __func__);
- msi_bitmap_free_hwirqs(&msi_data->bitmap, int_no, 1);
- return -ENOSPC;
- }
- dev_dbg(&dev->dev, "%s: virq = %d\n", __func__, virq);
-
- /* Setup msi address space */
- msg.address_hi = msi_data->msi_addr_hi;
- msg.address_lo = msi_data->msi_addr_lo;
-
- irq_set_msi_desc(virq, entry);
- msg.data = int_no;
- pci_write_msi_msg(virq, &msg);
- }
- return 0;
-}
-
-void ppc4xx_teardown_msi_irqs(struct pci_dev *dev)
-{
- struct msi_desc *entry;
- struct ppc4xx_msi *msi_data = &ppc4xx_msi;
- irq_hw_number_t hwirq;
-
- dev_dbg(&dev->dev, "PCIE-MSI: tearing down msi irqs\n");
-
- for_each_pci_msi_entry(entry, dev) {
- if (!entry->irq)
- continue;
- hwirq = virq_to_hw(entry->irq);
- irq_set_msi_desc(entry->irq, NULL);
- irq_dispose_mapping(entry->irq);
- msi_bitmap_free_hwirqs(&msi_data->bitmap, hwirq, 1);
- }
-}
-
-static int ppc4xx_setup_pcieh_hw(struct platform_device *dev,
- struct resource res, struct ppc4xx_msi *msi)
-{
- const u32 *msi_data;
- const u32 *msi_mask;
- const u32 *sdr_addr;
- dma_addr_t msi_phys;
- void *msi_virt;
- int err;
-
- sdr_addr = of_get_property(dev->dev.of_node, "sdr-base", NULL);
- if (!sdr_addr)
- return -EINVAL;
-
- msi_data = of_get_property(dev->dev.of_node, "msi-data", NULL);
- if (!msi_data)
- return -EINVAL;
-
- msi_mask = of_get_property(dev->dev.of_node, "msi-mask", NULL);
- if (!msi_mask)
- return -EINVAL;
-
- msi->msi_dev = of_find_node_by_name(NULL, "ppc4xx-msi");
- if (!msi->msi_dev)
- return -ENODEV;
-
- msi->msi_regs = of_iomap(msi->msi_dev, 0);
- if (!msi->msi_regs) {
- dev_err(&dev->dev, "of_iomap failed\n");
- err = -ENOMEM;
- goto node_put;
- }
- dev_dbg(&dev->dev, "PCIE-MSI: msi register mapped 0x%x 0x%x\n",
- (u32) (msi->msi_regs + PEIH_TERMADH), (u32) (msi->msi_regs));
-
- msi_virt = dma_alloc_coherent(&dev->dev, 64, &msi_phys, GFP_KERNEL);
- if (!msi_virt) {
- err = -ENOMEM;
- goto iounmap;
- }
- msi->msi_addr_hi = upper_32_bits(msi_phys);
- msi->msi_addr_lo = lower_32_bits(msi_phys & 0xffffffff);
- dev_dbg(&dev->dev, "PCIE-MSI: msi address high 0x%x, low 0x%x\n",
- msi->msi_addr_hi, msi->msi_addr_lo);
-
- mtdcri(SDR0, *sdr_addr, upper_32_bits(res.start)); /*HIGH addr */
- mtdcri(SDR0, *sdr_addr + 1, lower_32_bits(res.start)); /* Low addr */
-
- /* Progam the Interrupt handler Termination addr registers */
- out_be32(msi->msi_regs + PEIH_TERMADH, msi->msi_addr_hi);
- out_be32(msi->msi_regs + PEIH_TERMADL, msi->msi_addr_lo);
-
- /* Program MSI Expected data and Mask bits */
- out_be32(msi->msi_regs + PEIH_MSIED, *msi_data);
- out_be32(msi->msi_regs + PEIH_MSIMK, *msi_mask);
-
- dma_free_coherent(&dev->dev, 64, msi_virt, msi_phys);
-
- return 0;
-
-iounmap:
- iounmap(msi->msi_regs);
-node_put:
- of_node_put(msi->msi_dev);
- return err;
-}
-
-static int ppc4xx_of_msi_remove(struct platform_device *dev)
-{
- struct ppc4xx_msi *msi = dev->dev.platform_data;
- int i;
- int virq;
-
- for (i = 0; i < msi_irqs; i++) {
- virq = msi->msi_virqs[i];
- if (virq)
- irq_dispose_mapping(virq);
- }
-
- if (msi->bitmap.bitmap)
- msi_bitmap_free(&msi->bitmap);
- iounmap(msi->msi_regs);
- of_node_put(msi->msi_dev);
-
- return 0;
-}
-
-static int ppc4xx_msi_probe(struct platform_device *dev)
-{
- struct ppc4xx_msi *msi;
- struct resource res;
- int err = 0;
- struct pci_controller *phb;
-
- dev_dbg(&dev->dev, "PCIE-MSI: Setting up MSI support...\n");
-
- msi = devm_kzalloc(&dev->dev, sizeof(*msi), GFP_KERNEL);
- if (!msi)
- return -ENOMEM;
- dev->dev.platform_data = msi;
-
- /* Get MSI ranges */
- err = of_address_to_resource(dev->dev.of_node, 0, &res);
- if (err) {
- dev_err(&dev->dev, "%pOF resource error!\n", dev->dev.of_node);
- return err;
- }
-
- msi_irqs = of_irq_count(dev->dev.of_node);
- if (!msi_irqs)
- return -ENODEV;
-
- err = ppc4xx_setup_pcieh_hw(dev, res, msi);
- if (err)
- return err;
-
- err = ppc4xx_msi_init_allocator(dev, msi);
- if (err) {
- dev_err(&dev->dev, "Error allocating MSI bitmap\n");
- goto error_out;
- }
- ppc4xx_msi = *msi;
-
- list_for_each_entry(phb, &hose_list, list_node) {
- phb->controller_ops.setup_msi_irqs = ppc4xx_setup_msi_irqs;
- phb->controller_ops.teardown_msi_irqs = ppc4xx_teardown_msi_irqs;
- }
- return 0;
-
-error_out:
- ppc4xx_of_msi_remove(dev);
- return err;
-}
-static const struct of_device_id ppc4xx_msi_ids[] = {
- {
- .compatible = "amcc,ppc4xx-msi",
- },
- {}
-};
-static struct platform_driver ppc4xx_msi_driver = {
- .probe = ppc4xx_msi_probe,
- .remove = ppc4xx_of_msi_remove,
- .driver = {
- .name = "ppc4xx-msi",
- .of_match_table = ppc4xx_msi_ids,
- },
-
-};
-
-static __init int ppc4xx_msi_init(void)
-{
- return platform_driver_register(&ppc4xx_msi_driver);
-}
-
-subsys_initcall(ppc4xx_msi_init);
diff --git a/arch/powerpc/platforms/4xx/pci.c b/arch/powerpc/platforms/4xx/pci.c
index c13d64c3b019..24f41e178cbc 100644
--- a/arch/powerpc/platforms/4xx/pci.c
+++ b/arch/powerpc/platforms/4xx/pci.c
@@ -1273,7 +1273,7 @@ static int __init ppc405ex_pciex_core_init(struct device_node *np)
return 2;
}
-static void ppc405ex_pcie_phy_reset(struct ppc4xx_pciex_port *port)
+static void __init ppc405ex_pcie_phy_reset(struct ppc4xx_pciex_port *port)
{
/* Assert the PE0_PHY reset */
mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET, 0x01010000);
diff --git a/arch/powerpc/platforms/512x/clock-commonclk.c b/arch/powerpc/platforms/512x/clock-commonclk.c
index 30342b60aa63..0b03d812baae 100644
--- a/arch/powerpc/platforms/512x/clock-commonclk.c
+++ b/arch/powerpc/platforms/512x/clock-commonclk.c
@@ -97,7 +97,7 @@ static enum soc_type {
MPC512x_SOC_MPC5125,
} soc;
-static void mpc512x_clk_determine_soc(void)
+static void __init mpc512x_clk_determine_soc(void)
{
if (of_machine_is_compatible("fsl,mpc5121")) {
soc = MPC512x_SOC_MPC5121;
@@ -113,98 +113,98 @@ static void mpc512x_clk_determine_soc(void)
}
}
-static bool soc_has_mbx(void)
+static bool __init soc_has_mbx(void)
{
if (soc == MPC512x_SOC_MPC5121)
return true;
return false;
}
-static bool soc_has_axe(void)
+static bool __init soc_has_axe(void)
{
if (soc == MPC512x_SOC_MPC5125)
return false;
return true;
}
-static bool soc_has_viu(void)
+static bool __init soc_has_viu(void)
{
if (soc == MPC512x_SOC_MPC5125)
return false;
return true;
}
-static bool soc_has_spdif(void)
+static bool __init soc_has_spdif(void)
{
if (soc == MPC512x_SOC_MPC5125)
return false;
return true;
}
-static bool soc_has_pata(void)
+static bool __init soc_has_pata(void)
{
if (soc == MPC512x_SOC_MPC5125)
return false;
return true;
}
-static bool soc_has_sata(void)
+static bool __init soc_has_sata(void)
{
if (soc == MPC512x_SOC_MPC5125)
return false;
return true;
}
-static bool soc_has_pci(void)
+static bool __init soc_has_pci(void)
{
if (soc == MPC512x_SOC_MPC5125)
return false;
return true;
}
-static bool soc_has_fec2(void)
+static bool __init soc_has_fec2(void)
{
if (soc == MPC512x_SOC_MPC5125)
return true;
return false;
}
-static int soc_max_pscnum(void)
+static int __init soc_max_pscnum(void)
{
if (soc == MPC512x_SOC_MPC5125)
return 10;
return 12;
}
-static bool soc_has_sdhc2(void)
+static bool __init soc_has_sdhc2(void)
{
if (soc == MPC512x_SOC_MPC5125)
return true;
return false;
}
-static bool soc_has_nfc_5125(void)
+static bool __init soc_has_nfc_5125(void)
{
if (soc == MPC512x_SOC_MPC5125)
return true;
return false;
}
-static bool soc_has_outclk(void)
+static bool __init soc_has_outclk(void)
{
if (soc == MPC512x_SOC_MPC5125)
return true;
return false;
}
-static bool soc_has_cpmf_0_bypass(void)
+static bool __init soc_has_cpmf_0_bypass(void)
{
if (soc == MPC512x_SOC_MPC5125)
return true;
return false;
}
-static bool soc_has_mclk_mux0_canin(void)
+static bool __init soc_has_mclk_mux0_canin(void)
{
if (soc == MPC512x_SOC_MPC5125)
return true;
@@ -294,7 +294,7 @@ static inline int get_bit_field(uint32_t __iomem *reg, uint8_t pos, uint8_t len)
}
/* get the SPMF and translate it into the "sys pll" multiplier */
-static int get_spmf_mult(void)
+static int __init get_spmf_mult(void)
{
static int spmf_to_mult[] = {
68, 1, 12, 16, 20, 24, 28, 32,
@@ -312,7 +312,7 @@ static int get_spmf_mult(void)
* values returned from here are a multiple of the real factor since the
* divide ratio is fractional
*/
-static int get_sys_div_x2(void)
+static int __init get_sys_div_x2(void)
{
static int sysdiv_code_to_x2[] = {
4, 5, 6, 7, 8, 9, 10, 14,
@@ -333,7 +333,7 @@ static int get_sys_div_x2(void)
* values returned from here are a multiple of the real factor since the
* multiplier ratio is fractional
*/
-static int get_cpmf_mult_x2(void)
+static int __init get_cpmf_mult_x2(void)
{
static int cpmf_to_mult_x36[] = {
/* 0b000 is "times 36" */
@@ -379,7 +379,7 @@ static const struct clk_div_table divtab_1234[] = {
{ .div = 0, },
};
-static int get_freq_from_dt(char *propname)
+static int __init get_freq_from_dt(char *propname)
{
struct device_node *np;
const unsigned int *prop;
@@ -396,7 +396,7 @@ static int get_freq_from_dt(char *propname)
return val;
}
-static void mpc512x_clk_preset_data(void)
+static void __init mpc512x_clk_preset_data(void)
{
size_t i;
@@ -418,7 +418,7 @@ static void mpc512x_clk_preset_data(void)
* SYS -> CSB -> IPS) from the REF clock rate and the returned mul/div
* values
*/
-static void mpc512x_clk_setup_ref_clock(struct device_node *np, int bus_freq,
+static void __init mpc512x_clk_setup_ref_clock(struct device_node *np, int bus_freq,
int *sys_mul, int *sys_div,
int *ips_div)
{
@@ -592,7 +592,7 @@ static struct mclk_setup_data mclk_outclk_data[] = {
};
/* setup the MCLK clock subtree of an individual PSC/MSCAN/SPDIF */
-static void mpc512x_clk_setup_mclk(struct mclk_setup_data *entry, size_t idx)
+static void __init mpc512x_clk_setup_mclk(struct mclk_setup_data *entry, size_t idx)
{
size_t clks_idx_pub, clks_idx_int;
u32 __iomem *mccr_reg; /* MCLK control register (mux, en, div) */
@@ -701,7 +701,7 @@ static void mpc512x_clk_setup_mclk(struct mclk_setup_data *entry, size_t idx)
/* }}} MCLK helpers */
-static void mpc512x_clk_setup_clock_tree(struct device_node *np, int busfreq)
+static void __init mpc512x_clk_setup_clock_tree(struct device_node *np, int busfreq)
{
int sys_mul, sys_div, ips_div;
int mul, div;
@@ -937,7 +937,7 @@ static void mpc512x_clk_setup_clock_tree(struct device_node *np, int busfreq)
* registers the set of public clocks (those listed in the dt-bindings/
* header file) for OF lookups, keeps the intermediates private to us
*/
-static void mpc5121_clk_register_of_provider(struct device_node *np)
+static void __init mpc5121_clk_register_of_provider(struct device_node *np)
{
clk_data.clks = clks;
clk_data.clk_num = MPC512x_CLK_LAST_PUBLIC + 1; /* _not_ ARRAY_SIZE() */
@@ -948,7 +948,7 @@ static void mpc5121_clk_register_of_provider(struct device_node *np)
* temporary support for the period of time between introduction of CCF
* support and the adjustment of peripheral drivers to OF based lookups
*/
-static void mpc5121_clk_provide_migration_support(void)
+static void __init mpc5121_clk_provide_migration_support(void)
{
/*
@@ -1009,7 +1009,7 @@ static void mpc5121_clk_provide_migration_support(void)
* case of not yet adjusted device tree data, where clock related specs
* are missing)
*/
-static void mpc5121_clk_provide_backwards_compat(void)
+static void __init mpc5121_clk_provide_backwards_compat(void)
{
enum did_reg_flags {
DID_REG_PSC = BIT(0),
diff --git a/arch/powerpc/platforms/512x/mpc512x.h b/arch/powerpc/platforms/512x/mpc512x.h
index fff225901e2f..2f3c60e373e1 100644
--- a/arch/powerpc/platforms/512x/mpc512x.h
+++ b/arch/powerpc/platforms/512x/mpc512x.h
@@ -12,8 +12,8 @@ extern void __init mpc512x_init_early(void);
extern void __init mpc512x_init(void);
extern void __init mpc512x_setup_arch(void);
extern int __init mpc5121_clk_init(void);
-extern const char *mpc512x_select_psc_compat(void);
-extern const char *mpc512x_select_reset_compat(void);
+const char *__init mpc512x_select_psc_compat(void);
+const char *__init mpc512x_select_reset_compat(void);
extern void __noreturn mpc512x_restart(char *cmd);
#endif /* __MPC512X_H__ */
diff --git a/arch/powerpc/platforms/512x/mpc512x_shared.c b/arch/powerpc/platforms/512x/mpc512x_shared.c
index 7a9ae9591d60..e3411663edad 100644
--- a/arch/powerpc/platforms/512x/mpc512x_shared.c
+++ b/arch/powerpc/platforms/512x/mpc512x_shared.c
@@ -352,7 +352,7 @@ static void __init mpc512x_declare_of_platform_devices(void)
#define DEFAULT_FIFO_SIZE 16
-const char *mpc512x_select_psc_compat(void)
+const char *__init mpc512x_select_psc_compat(void)
{
if (of_machine_is_compatible("fsl,mpc5121"))
return "fsl,mpc5121-psc";
@@ -363,7 +363,7 @@ const char *mpc512x_select_psc_compat(void)
return NULL;
}
-const char *mpc512x_select_reset_compat(void)
+const char *__init mpc512x_select_reset_compat(void)
{
if (of_machine_is_compatible("fsl,mpc5121"))
return "fsl,mpc5121-reset";
diff --git a/arch/powerpc/platforms/52xx/Kconfig b/arch/powerpc/platforms/52xx/Kconfig
index 99d60acc20c8..b72ed2950ca8 100644
--- a/arch/powerpc/platforms/52xx/Kconfig
+++ b/arch/powerpc/platforms/52xx/Kconfig
@@ -34,7 +34,7 @@ config PPC_EFIKA
bool "bPlan Efika 5k2. MPC5200B based computer"
depends on PPC_MPC52xx
select PPC_RTAS
- select PPC_NATIVE
+ select PPC_HASH_MMU_NATIVE
config PPC_LITE5200
bool "Freescale Lite5200 Eval Board"
diff --git a/arch/powerpc/platforms/83xx/km83xx.c b/arch/powerpc/platforms/83xx/km83xx.c
index 108e1e4d2683..d9eed0decb28 100644
--- a/arch/powerpc/platforms/83xx/km83xx.c
+++ b/arch/powerpc/platforms/83xx/km83xx.c
@@ -39,7 +39,7 @@
#define SVR_REV(svr) (((svr) >> 0) & 0xFFFF) /* Revision field */
-static void quirk_mpc8360e_qe_enet10(void)
+static void __init quirk_mpc8360e_qe_enet10(void)
{
/*
* handle mpc8360E Erratum QE_ENET10:
diff --git a/arch/powerpc/platforms/83xx/mpc834x_mds.c b/arch/powerpc/platforms/83xx/mpc834x_mds.c
index 6d91bdce0a18..0713deffb40c 100644
--- a/arch/powerpc/platforms/83xx/mpc834x_mds.c
+++ b/arch/powerpc/platforms/83xx/mpc834x_mds.c
@@ -35,7 +35,7 @@
#include "mpc83xx.h"
#define BCSR5_INT_USB 0x02
-static int mpc834xemds_usb_cfg(void)
+static int __init mpc834xemds_usb_cfg(void)
{
struct device_node *np;
void __iomem *bcsr_regs = NULL;
diff --git a/arch/powerpc/platforms/83xx/mpc837x_mds.c b/arch/powerpc/platforms/83xx/mpc837x_mds.c
index f28d166ea7db..fc88ab97f6e3 100644
--- a/arch/powerpc/platforms/83xx/mpc837x_mds.c
+++ b/arch/powerpc/platforms/83xx/mpc837x_mds.c
@@ -23,7 +23,7 @@
#define BCSR12_USB_SER_PIN 0x80
#define BCSR12_USB_SER_DEVICE 0x02
-static int mpc837xmds_usb_cfg(void)
+static int __init mpc837xmds_usb_cfg(void)
{
struct device_node *np;
const void *phy_type, *mode;
diff --git a/arch/powerpc/platforms/83xx/mpc837x_rdb.c b/arch/powerpc/platforms/83xx/mpc837x_rdb.c
index 7fb7684c256b..5d48c6842098 100644
--- a/arch/powerpc/platforms/83xx/mpc837x_rdb.c
+++ b/arch/powerpc/platforms/83xx/mpc837x_rdb.c
@@ -18,7 +18,7 @@
#include "mpc83xx.h"
-static void mpc837x_rdb_sd_cfg(void)
+static void __init mpc837x_rdb_sd_cfg(void)
{
void __iomem *im;
diff --git a/arch/powerpc/platforms/83xx/mpc83xx.h b/arch/powerpc/platforms/83xx/mpc83xx.h
index a30d30588cf6..aea803ba3a15 100644
--- a/arch/powerpc/platforms/83xx/mpc83xx.h
+++ b/arch/powerpc/platforms/83xx/mpc83xx.h
@@ -68,9 +68,9 @@
extern void __noreturn mpc83xx_restart(char *cmd);
extern long mpc83xx_time_init(void);
-extern int mpc837x_usb_cfg(void);
-extern int mpc834x_usb_cfg(void);
-extern int mpc831x_usb_cfg(void);
+int __init mpc837x_usb_cfg(void);
+int __init mpc834x_usb_cfg(void);
+int __init mpc831x_usb_cfg(void);
extern void mpc83xx_ipic_init_IRQ(void);
#ifdef CONFIG_PCI
diff --git a/arch/powerpc/platforms/83xx/usb.c b/arch/powerpc/platforms/83xx/usb.c
index 3d247d726ed5..b0bda20aaccf 100644
--- a/arch/powerpc/platforms/83xx/usb.c
+++ b/arch/powerpc/platforms/83xx/usb.c
@@ -20,7 +20,7 @@
#ifdef CONFIG_PPC_MPC834x
-int mpc834x_usb_cfg(void)
+int __init mpc834x_usb_cfg(void)
{
unsigned long sccr, sicrl, sicrh;
void __iomem *immap;
@@ -96,7 +96,7 @@ int mpc834x_usb_cfg(void)
#endif /* CONFIG_PPC_MPC834x */
#ifdef CONFIG_PPC_MPC831x
-int mpc831x_usb_cfg(void)
+int __init mpc831x_usb_cfg(void)
{
u32 temp;
void __iomem *immap, *usb_regs;
@@ -209,7 +209,7 @@ out:
#endif /* CONFIG_PPC_MPC831x */
#ifdef CONFIG_PPC_MPC837x
-int mpc837x_usb_cfg(void)
+int __init mpc837x_usb_cfg(void)
{
void __iomem *immap;
struct device_node *np = NULL;
diff --git a/arch/powerpc/platforms/85xx/c293pcie.c b/arch/powerpc/platforms/85xx/c293pcie.c
index 8d9a2503dd0f..58a398c89e97 100644
--- a/arch/powerpc/platforms/85xx/c293pcie.c
+++ b/arch/powerpc/platforms/85xx/c293pcie.c
@@ -19,7 +19,7 @@
#include "mpc85xx.h"
-void __init c293_pcie_pic_init(void)
+static void __init c293_pcie_pic_init(void)
{
struct mpic *mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN |
MPIC_SINGLE_DEST_CPU, 0, 256, " OpenPIC ");
diff --git a/arch/powerpc/platforms/85xx/ge_imp3a.c b/arch/powerpc/platforms/85xx/ge_imp3a.c
index 83a0f7a1f0de..743c65e4d8e4 100644
--- a/arch/powerpc/platforms/85xx/ge_imp3a.c
+++ b/arch/powerpc/platforms/85xx/ge_imp3a.c
@@ -78,7 +78,7 @@ void __init ge_imp3a_pic_init(void)
of_node_put(cascade_node);
}
-static void ge_imp3a_pci_assign_primary(void)
+static void __init ge_imp3a_pci_assign_primary(void)
{
#ifdef CONFIG_PCI
struct device_node *np;
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_cds.c b/arch/powerpc/platforms/85xx/mpc85xx_cds.c
index 172d2b7cfeb7..5bd487030256 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_cds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_cds.c
@@ -282,7 +282,7 @@ machine_device_initcall(mpc85xx_cds, mpc85xx_cds_8259_attach);
#endif /* CONFIG_PPC_I8259 */
-static void mpc85xx_cds_pci_assign_primary(void)
+static void __init mpc85xx_cds_pci_assign_primary(void)
{
#ifdef CONFIG_PCI
struct device_node *np;
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_pm_ops.c b/arch/powerpc/platforms/85xx/mpc85xx_pm_ops.c
index 4a8af80011a6..f7ac92a8ae97 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_pm_ops.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_pm_ops.c
@@ -15,6 +15,8 @@
#include <asm/io.h>
#include <asm/fsl_pm.h>
+#include "smp.h"
+
static struct ccsr_guts __iomem *guts;
#ifdef CONFIG_FSL_PMC
diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c
index d7081e9af65c..a1c6a7827c8f 100644
--- a/arch/powerpc/platforms/85xx/smp.c
+++ b/arch/powerpc/platforms/85xx/smp.c
@@ -366,7 +366,7 @@ struct smp_ops_t smp_85xx_ops = {
#ifdef CONFIG_PPC32
atomic_t kexec_down_cpus = ATOMIC_INIT(0);
-void mpc85xx_smp_kexec_cpu_down(int crash_shutdown, int secondary)
+static void mpc85xx_smp_kexec_cpu_down(int crash_shutdown, int secondary)
{
local_irq_disable();
@@ -384,7 +384,7 @@ static void mpc85xx_smp_kexec_down(void *arg)
ppc_md.kexec_cpu_down(0,1);
}
#else
-void mpc85xx_smp_kexec_cpu_down(int crash_shutdown, int secondary)
+static void mpc85xx_smp_kexec_cpu_down(int crash_shutdown, int secondary)
{
int cpu = smp_processor_id();
int sibling = cpu_last_thread_sibling(cpu);
diff --git a/arch/powerpc/platforms/85xx/socrates_fpga_pic.c b/arch/powerpc/platforms/85xx/socrates_fpga_pic.c
index 199a137c0ddb..3768c86b9629 100644
--- a/arch/powerpc/platforms/85xx/socrates_fpga_pic.c
+++ b/arch/powerpc/platforms/85xx/socrates_fpga_pic.c
@@ -271,7 +271,7 @@ static const struct irq_domain_ops socrates_fpga_pic_host_ops = {
.xlate = socrates_fpga_pic_host_xlate,
};
-void socrates_fpga_pic_init(struct device_node *pic)
+void __init socrates_fpga_pic_init(struct device_node *pic)
{
unsigned long flags;
int i;
diff --git a/arch/powerpc/platforms/85xx/socrates_fpga_pic.h b/arch/powerpc/platforms/85xx/socrates_fpga_pic.h
index c592b8bc94dd..c50b23794a06 100644
--- a/arch/powerpc/platforms/85xx/socrates_fpga_pic.h
+++ b/arch/powerpc/platforms/85xx/socrates_fpga_pic.h
@@ -6,6 +6,6 @@
#ifndef SOCRATES_FPGA_PIC_H
#define SOCRATES_FPGA_PIC_H
-void socrates_fpga_pic_init(struct device_node *pic);
+void __init socrates_fpga_pic_init(struct device_node *pic);
#endif
diff --git a/arch/powerpc/platforms/85xx/xes_mpc85xx.c b/arch/powerpc/platforms/85xx/xes_mpc85xx.c
index d54e1ae56997..397e158c1edb 100644
--- a/arch/powerpc/platforms/85xx/xes_mpc85xx.c
+++ b/arch/powerpc/platforms/85xx/xes_mpc85xx.c
@@ -45,7 +45,7 @@ void __init xes_mpc85xx_pic_init(void)
mpic_init(mpic);
}
-static void xes_mpc85xx_configure_l2(void __iomem *l2_base)
+static void __init xes_mpc85xx_configure_l2(void __iomem *l2_base)
{
volatile uint32_t ctl, tmp;
@@ -72,7 +72,7 @@ static void xes_mpc85xx_configure_l2(void __iomem *l2_base)
asm volatile("msync; isync");
}
-static void xes_mpc85xx_fixups(void)
+static void __init xes_mpc85xx_fixups(void)
{
struct device_node *np;
int err;
diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig
index e02d29a9d12f..d41dad227de8 100644
--- a/arch/powerpc/platforms/Kconfig
+++ b/arch/powerpc/platforms/Kconfig
@@ -40,9 +40,9 @@ config EPAPR_PARAVIRT
In case of doubt, say Y
-config PPC_NATIVE
+config PPC_HASH_MMU_NATIVE
bool
- depends on PPC_BOOK3S_32 || PPC64
+ depends on PPC_BOOK3S
help
Support for running natively on the hardware, i.e. without
a hypervisor. This option is not user-selectable but should
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index a208997ade88..87bc1929ee5a 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -30,8 +30,6 @@ config PPC_BOOK3S_32
bool "512x/52xx/6xx/7xx/74xx/82xx/83xx/86xx"
imply PPC_FPU
select PPC_HAVE_PMU_SUPPORT
- select PPC_HAVE_KUEP
- select PPC_HAVE_KUAP
select HAVE_ARCH_VMAP_STACK
config PPC_85xx
@@ -42,8 +40,7 @@ config PPC_8xx
bool "Freescale 8xx"
select ARCH_SUPPORTS_HUGETLBFS
select FSL_SOC
- select PPC_HAVE_KUEP
- select PPC_HAVE_KUAP
+ select PPC_KUEP
select HAVE_ARCH_VMAP_STACK
select HUGETLBFS
@@ -53,6 +50,7 @@ config 40x
select PPC_UDBG_16550
select 4xx_SOC
select HAVE_PCI
+ select PPC_KUEP if PPC_KUAP
config 44x
bool "AMCC 44x, 46x or 47x"
@@ -61,7 +59,7 @@ config 44x
select 4xx_SOC
select HAVE_PCI
select PHYS_64BIT
- select PPC_HAVE_KUEP
+ select PPC_KUEP
endchoice
@@ -105,9 +103,7 @@ config PPC_BOOK3S_64
select HAVE_MOVE_PMD
select HAVE_MOVE_PUD
select IRQ_WORK
- select PPC_MM_SLICES
- select PPC_HAVE_KUEP
- select PPC_HAVE_KUAP
+ select PPC_64S_HASH_MMU if !PPC_RADIX_MMU
config PPC_BOOK3E_64
bool "Embedded processors"
@@ -130,11 +126,13 @@ choice
config GENERIC_CPU
bool "Generic (POWER4 and above)"
depends on PPC64 && !CPU_LITTLE_ENDIAN
+ select PPC_64S_HASH_MMU if PPC_BOOK3S_64
config GENERIC_CPU
bool "Generic (POWER8 and above)"
depends on PPC64 && CPU_LITTLE_ENDIAN
select ARCH_HAS_FAST_MULTIPLIER
+ select PPC_64S_HASH_MMU
config GENERIC_CPU
bool "Generic 32 bits powerpc"
@@ -143,24 +141,29 @@ config GENERIC_CPU
config CELL_CPU
bool "Cell Broadband Engine"
depends on PPC_BOOK3S_64 && !CPU_LITTLE_ENDIAN
+ select PPC_64S_HASH_MMU
config POWER5_CPU
bool "POWER5"
depends on PPC_BOOK3S_64 && !CPU_LITTLE_ENDIAN
+ select PPC_64S_HASH_MMU
config POWER6_CPU
bool "POWER6"
depends on PPC_BOOK3S_64 && !CPU_LITTLE_ENDIAN
+ select PPC_64S_HASH_MMU
config POWER7_CPU
bool "POWER7"
depends on PPC_BOOK3S_64
select ARCH_HAS_FAST_MULTIPLIER
+ select PPC_64S_HASH_MMU
config POWER8_CPU
bool "POWER8"
depends on PPC_BOOK3S_64
select ARCH_HAS_FAST_MULTIPLIER
+ select PPC_64S_HASH_MMU
config POWER9_CPU
bool "POWER9"
@@ -278,6 +281,11 @@ config BOOKE
depends on E500 || 44x || PPC_BOOK3E
default y
+config BOOKE_OR_40x
+ bool
+ depends on BOOKE || 40x
+ default y
+
config FSL_BOOKE
bool
depends on E500 && PPC32
@@ -290,6 +298,7 @@ config PPC_FSL_BOOK3E
select FSL_EMB_PERFMON
select PPC_SMP_MUXED_IPI
select PPC_DOORBELL
+ select PPC_KUEP
default y if FSL_BOOKE
config PTE_64BIT
@@ -364,6 +373,22 @@ config SPE
If in doubt, say Y here.
+config PPC_64S_HASH_MMU
+ bool "Hash MMU Support"
+ depends on PPC_BOOK3S_64
+ select PPC_MM_SLICES
+ default y
+ help
+ Enable support for the Power ISA Hash style MMU. This is implemented
+ by all IBM Power and other 64-bit Book3S CPUs before ISA v3.0. The
+ OpenPOWER ISA does not mandate the hash MMU and some CPUs do not
+ implement it (e.g., Microwatt).
+
+ Note that POWER9 PowerVM platforms only support the hash
+ MMU. From POWER10 radix is also supported by PowerVM.
+
+ If you're unsure, say Y.
+
config PPC_RADIX_MMU
bool "Radix MMU Support"
depends on PPC_BOOK3S_64
@@ -375,7 +400,8 @@ config PPC_RADIX_MMU
you can probably disable this.
config PPC_RADIX_MMU_DEFAULT
- bool "Default to using the Radix MMU when possible"
+ bool "Default to using the Radix MMU when possible" if PPC_64S_HASH_MMU
+ depends on PPC_BOOK3S_64
depends on PPC_RADIX_MMU
default y
help
@@ -387,24 +413,16 @@ config PPC_RADIX_MMU_DEFAULT
If you're unsure, say Y.
-config PPC_HAVE_KUEP
- bool
-
config PPC_KUEP
- bool "Kernel Userspace Execution Prevention"
- depends on PPC_HAVE_KUEP
- default y
+ bool "Kernel Userspace Execution Prevention" if !40x
+ default y if !40x
help
Enable support for Kernel Userspace Execution Prevention (KUEP)
If you're unsure, say Y.
-config PPC_HAVE_KUAP
- bool
-
config PPC_KUAP
bool "Kernel Userspace Access Protection"
- depends on PPC_HAVE_KUAP
default y
help
Enable support for Kernel Userspace Access Protection (KUAP)
@@ -413,7 +431,7 @@ config PPC_KUAP
config PPC_KUAP_DEBUG
bool "Extra debugging for Kernel Userspace Access Protection"
- depends on PPC_KUAP && (PPC_RADIX_MMU || PPC32)
+ depends on PPC_KUAP
help
Add extra debugging for Kernel Userspace Access Protection (KUAP)
If you're unsure, say N.
diff --git a/arch/powerpc/platforms/cell/Kconfig b/arch/powerpc/platforms/cell/Kconfig
index cb70c5f25bc6..34669b060f36 100644
--- a/arch/powerpc/platforms/cell/Kconfig
+++ b/arch/powerpc/platforms/cell/Kconfig
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
config PPC_CELL
+ select PPC_64S_HASH_MMU if PPC64
bool
config PPC_CELL_COMMON
@@ -8,7 +9,7 @@ config PPC_CELL_COMMON
select PPC_DCR_MMIO
select PPC_INDIRECT_PIO
select PPC_INDIRECT_MMIO
- select PPC_NATIVE
+ select PPC_HASH_MMU_NATIVE
select PPC_RTAS
select IRQ_EDGE_EOI_HANDLER
diff --git a/arch/powerpc/platforms/cell/axon_msi.c b/arch/powerpc/platforms/cell/axon_msi.c
index 82335e364c44..354a58c1e6f2 100644
--- a/arch/powerpc/platforms/cell/axon_msi.c
+++ b/arch/powerpc/platforms/cell/axon_msi.c
@@ -199,7 +199,6 @@ out_error:
static int setup_msi_msg_address(struct pci_dev *dev, struct msi_msg *msg)
{
struct device_node *dn;
- struct msi_desc *entry;
int len;
const u32 *prop;
@@ -209,10 +208,8 @@ static int setup_msi_msg_address(struct pci_dev *dev, struct msi_msg *msg)
return -ENODEV;
}
- entry = first_pci_msi_entry(dev);
-
for (; dn; dn = of_get_next_parent(dn)) {
- if (entry->msi_attrib.is_64) {
+ if (!dev->no_64bit_msi) {
prop = of_get_property(dn, "msi-address-64", &len);
if (prop)
break;
@@ -265,7 +262,7 @@ static int axon_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
if (rc)
return rc;
- for_each_pci_msi_entry(entry, dev) {
+ msi_for_each_desc(entry, &dev->dev, MSI_DESC_NOTASSOCIATED) {
virq = irq_create_direct_mapping(msic->irq_domain);
if (!virq) {
dev_warn(&dev->dev,
@@ -288,10 +285,7 @@ static void axon_msi_teardown_msi_irqs(struct pci_dev *dev)
dev_dbg(&dev->dev, "axon_msi: tearing down msi irqs\n");
- for_each_pci_msi_entry(entry, dev) {
- if (!entry->irq)
- continue;
-
+ msi_for_each_desc(entry, &dev->dev, MSI_DESC_ASSOCIATED) {
irq_set_msi_desc(entry->irq, NULL);
irq_dispose_mapping(entry->irq);
}
diff --git a/arch/powerpc/platforms/cell/cbe_regs.c b/arch/powerpc/platforms/cell/cbe_regs.c
index c2a0678d85db..1c4c53bec66c 100644
--- a/arch/powerpc/platforms/cell/cbe_regs.c
+++ b/arch/powerpc/platforms/cell/cbe_regs.c
@@ -165,7 +165,7 @@ u32 cbe_node_to_cpu(int node)
}
EXPORT_SYMBOL_GPL(cbe_node_to_cpu);
-static struct device_node *cbe_get_be_node(int cpu_id)
+static struct device_node *__init cbe_get_be_node(int cpu_id)
{
struct device_node *np;
diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
index fa08699aedeb..25e726bf0172 100644
--- a/arch/powerpc/platforms/cell/iommu.c
+++ b/arch/powerpc/platforms/cell/iommu.c
@@ -253,7 +253,7 @@ static irqreturn_t ioc_interrupt(int irq, void *data)
return IRQ_HANDLED;
}
-static int cell_iommu_find_ioc(int nid, unsigned long *base)
+static int __init cell_iommu_find_ioc(int nid, unsigned long *base)
{
struct device_node *np;
struct resource r;
@@ -293,7 +293,7 @@ static int cell_iommu_find_ioc(int nid, unsigned long *base)
return -ENODEV;
}
-static void cell_iommu_setup_stab(struct cbe_iommu *iommu,
+static void __init cell_iommu_setup_stab(struct cbe_iommu *iommu,
unsigned long dbase, unsigned long dsize,
unsigned long fbase, unsigned long fsize)
{
@@ -313,7 +313,7 @@ static void cell_iommu_setup_stab(struct cbe_iommu *iommu,
memset(iommu->stab, 0, stab_size);
}
-static unsigned long *cell_iommu_alloc_ptab(struct cbe_iommu *iommu,
+static unsigned long *__init cell_iommu_alloc_ptab(struct cbe_iommu *iommu,
unsigned long base, unsigned long size, unsigned long gap_base,
unsigned long gap_size, unsigned long page_shift)
{
@@ -373,7 +373,7 @@ static unsigned long *cell_iommu_alloc_ptab(struct cbe_iommu *iommu,
return ptab;
}
-static void cell_iommu_enable_hardware(struct cbe_iommu *iommu)
+static void __init cell_iommu_enable_hardware(struct cbe_iommu *iommu)
{
int ret;
unsigned long reg, xlate_base;
@@ -413,7 +413,7 @@ static void cell_iommu_enable_hardware(struct cbe_iommu *iommu)
out_be64(iommu->cmd_regs + IOC_IOCmd_Cfg, reg);
}
-static void cell_iommu_setup_hardware(struct cbe_iommu *iommu,
+static void __init cell_iommu_setup_hardware(struct cbe_iommu *iommu,
unsigned long base, unsigned long size)
{
cell_iommu_setup_stab(iommu, base, size, 0, 0);
@@ -858,7 +858,7 @@ static bool cell_pci_iommu_bypass_supported(struct pci_dev *pdev, u64 mask)
cell_iommu_get_fixed_address(&pdev->dev) != OF_BAD_ADDR;
}
-static void insert_16M_pte(unsigned long addr, unsigned long *ptab,
+static void __init insert_16M_pte(unsigned long addr, unsigned long *ptab,
unsigned long base_pte)
{
unsigned long segment, offset;
@@ -873,7 +873,7 @@ static void insert_16M_pte(unsigned long addr, unsigned long *ptab,
ptab[offset] = base_pte | (__pa(addr) & CBE_IOPTE_RPN_Mask);
}
-static void cell_iommu_setup_fixed_ptab(struct cbe_iommu *iommu,
+static void __init cell_iommu_setup_fixed_ptab(struct cbe_iommu *iommu,
struct device_node *np, unsigned long dbase, unsigned long dsize,
unsigned long fbase, unsigned long fsize)
{
@@ -977,6 +977,7 @@ static int __init cell_iommu_fixed_mapping_init(void)
if (hbase < dbase || (hend > (dbase + dsize))) {
pr_debug("iommu: hash window doesn't fit in"
"real DMA window\n");
+ of_node_put(np);
return -1;
}
}
diff --git a/arch/powerpc/platforms/cell/pervasive.c b/arch/powerpc/platforms/cell/pervasive.c
index 5b9a7e9f144b..dff8d5e7ab82 100644
--- a/arch/powerpc/platforms/cell/pervasive.c
+++ b/arch/powerpc/platforms/cell/pervasive.c
@@ -78,6 +78,7 @@ static int cbe_system_reset_exception(struct pt_regs *regs)
switch (regs->msr & SRR1_WAKEMASK) {
case SRR1_WAKEDEC:
set_dec(1);
+ break;
case SRR1_WAKEEE:
/*
* Handle these when interrupts get re-enabled and we take
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
index bc48234443b6..83cea9e7ee72 100644
--- a/arch/powerpc/platforms/cell/spu_base.c
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -387,7 +387,7 @@ spu_irq_class_2(int irq, void *data)
return stat ? IRQ_HANDLED : IRQ_NONE;
}
-static int spu_request_irqs(struct spu *spu)
+static int __init spu_request_irqs(struct spu *spu)
{
int ret = 0;
@@ -540,7 +540,7 @@ void spu_remove_dev_attr_group(struct attribute_group *attrs)
}
EXPORT_SYMBOL_GPL(spu_remove_dev_attr_group);
-static int spu_create_dev(struct spu *spu)
+static int __init spu_create_dev(struct spu *spu)
{
int ret;
@@ -711,7 +711,7 @@ static void crash_kexec_stop_spus(void)
}
}
-static void crash_register_spus(struct list_head *list)
+static void __init crash_register_spus(struct list_head *list)
{
struct spu *spu;
int ret;
diff --git a/arch/powerpc/platforms/cell/spu_manage.c b/arch/powerpc/platforms/cell/spu_manage.c
index 8e9ef65240c3..ddf8742f09a3 100644
--- a/arch/powerpc/platforms/cell/spu_manage.c
+++ b/arch/powerpc/platforms/cell/spu_manage.c
@@ -186,7 +186,7 @@ err:
return -EINVAL;
}
-static int spu_map_resource(struct spu *spu, int nr,
+static int __init spu_map_resource(struct spu *spu, int nr,
void __iomem** virt, unsigned long *phys)
{
struct device_node *np = spu->devnode;
@@ -361,7 +361,7 @@ static void disable_spu_by_master_run(struct spu_context *ctx)
static int qs20_reg_idxs[QS20_SPES_PER_BE] = { 0, 2, 4, 6, 7, 5, 3, 1 };
static int qs20_reg_memory[QS20_SPES_PER_BE] = { 1, 1, 0, 0, 0, 0, 0, 0 };
-static struct spu *spu_lookup_reg(int node, u32 reg)
+static struct spu *__init spu_lookup_reg(int node, u32 reg)
{
struct spu *spu;
const u32 *spu_reg;
@@ -374,7 +374,7 @@ static struct spu *spu_lookup_reg(int node, u32 reg)
return NULL;
}
-static void init_affinity_qs20_harcoded(void)
+static void __init init_affinity_qs20_harcoded(void)
{
int node, i;
struct spu *last_spu, *spu;
@@ -396,7 +396,7 @@ static void init_affinity_qs20_harcoded(void)
}
}
-static int of_has_vicinity(void)
+static int __init of_has_vicinity(void)
{
struct device_node *dn;
@@ -409,7 +409,7 @@ static int of_has_vicinity(void)
return 0;
}
-static struct spu *devnode_spu(int cbe, struct device_node *dn)
+static struct spu *__init devnode_spu(int cbe, struct device_node *dn)
{
struct spu *spu;
@@ -419,7 +419,7 @@ static struct spu *devnode_spu(int cbe, struct device_node *dn)
return NULL;
}
-static struct spu *
+static struct spu * __init
neighbour_spu(int cbe, struct device_node *target, struct device_node *avoid)
{
struct spu *spu;
@@ -440,7 +440,7 @@ neighbour_spu(int cbe, struct device_node *target, struct device_node *avoid)
return NULL;
}
-static void init_affinity_node(int cbe)
+static void __init init_affinity_node(int cbe)
{
struct spu *spu, *last_spu;
struct device_node *vic_dn, *last_spu_dn;
@@ -494,7 +494,7 @@ static void init_affinity_node(int cbe)
}
}
-static void init_affinity_fw(void)
+static void __init init_affinity_fw(void)
{
int cbe;
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
index cb25acccd746..4c702192412f 100644
--- a/arch/powerpc/platforms/cell/spufs/inode.c
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
@@ -648,7 +648,7 @@ static void spufs_exit_isolated_loader(void)
get_order(isolated_loader_size));
}
-static void
+static void __init
spufs_init_isolated_loader(void)
{
struct device_node *dn;
diff --git a/arch/powerpc/platforms/chrp/Kconfig b/arch/powerpc/platforms/chrp/Kconfig
index 9b5c5505718a..ff30ed579a39 100644
--- a/arch/powerpc/platforms/chrp/Kconfig
+++ b/arch/powerpc/platforms/chrp/Kconfig
@@ -11,6 +11,6 @@ config PPC_CHRP
select RTAS_ERROR_LOGGING
select PPC_MPC106
select PPC_UDBG_16550
- select PPC_NATIVE
+ select PPC_HASH_MMU_NATIVE
select FORCE_PCI
default y
diff --git a/arch/powerpc/platforms/chrp/pegasos_eth.c b/arch/powerpc/platforms/chrp/pegasos_eth.c
index 485cf5ef73d4..5c4f1a9ca154 100644
--- a/arch/powerpc/platforms/chrp/pegasos_eth.c
+++ b/arch/powerpc/platforms/chrp/pegasos_eth.c
@@ -113,7 +113,7 @@ static struct platform_device *mv643xx_eth_pd_devs[] __initdata = {
static void __iomem *mv643xx_reg_base;
-static int Enable_SRAM(void)
+static int __init Enable_SRAM(void)
{
u32 ALong;
diff --git a/arch/powerpc/platforms/embedded6xx/Kconfig b/arch/powerpc/platforms/embedded6xx/Kconfig
index 4c6d703a4284..c54786f8461e 100644
--- a/arch/powerpc/platforms/embedded6xx/Kconfig
+++ b/arch/powerpc/platforms/embedded6xx/Kconfig
@@ -55,7 +55,7 @@ config MVME5100
select FORCE_PCI
select PPC_INDIRECT_PCI
select PPC_I8259
- select PPC_NATIVE
+ select PPC_HASH_MMU_NATIVE
select PPC_UDBG_16550
help
This option enables support for the Motorola (now Emerson) MVME5100
diff --git a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c
index 15396333a90b..380b4285cce4 100644
--- a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c
+++ b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c
@@ -153,7 +153,7 @@ static void __hlwd_quiesce(void __iomem *io_base)
out_be32(io_base + HW_BROADWAY_ICR, 0xffffffff);
}
-static struct irq_domain *hlwd_pic_init(struct device_node *np)
+static struct irq_domain *__init hlwd_pic_init(struct device_node *np)
{
struct irq_domain *irq_domain;
struct resource res;
@@ -197,7 +197,7 @@ unsigned int hlwd_pic_get_irq(void)
*
*/
-void hlwd_pic_probe(void)
+void __init hlwd_pic_probe(void)
{
struct irq_domain *host;
struct device_node *np;
@@ -214,6 +214,7 @@ void hlwd_pic_probe(void)
irq_set_chained_handler(cascade_virq,
hlwd_pic_irq_cascade);
hlwd_irq_host = host;
+ of_node_put(np);
break;
}
}
diff --git a/arch/powerpc/platforms/embedded6xx/hlwd-pic.h b/arch/powerpc/platforms/embedded6xx/hlwd-pic.h
index f18eeeef0815..c2fa42e191dc 100644
--- a/arch/powerpc/platforms/embedded6xx/hlwd-pic.h
+++ b/arch/powerpc/platforms/embedded6xx/hlwd-pic.h
@@ -11,7 +11,7 @@
#define __HLWD_PIC_H
extern unsigned int hlwd_pic_get_irq(void);
-extern void hlwd_pic_probe(void);
+void __init hlwd_pic_probe(void);
extern void hlwd_quiesce(void);
#endif
diff --git a/arch/powerpc/platforms/embedded6xx/holly.c b/arch/powerpc/platforms/embedded6xx/holly.c
index 7a85b117f7a4..07e71ba3e846 100644
--- a/arch/powerpc/platforms/embedded6xx/holly.c
+++ b/arch/powerpc/platforms/embedded6xx/holly.c
@@ -50,7 +50,7 @@ static int holly_exclude_device(struct pci_controller *hose, u_char bus,
return PCIBIOS_SUCCESSFUL;
}
-static void holly_remap_bridge(void)
+static void __init holly_remap_bridge(void)
{
u32 lut_val, lut_addr;
int i;
diff --git a/arch/powerpc/platforms/embedded6xx/usbgecko_udbg.c b/arch/powerpc/platforms/embedded6xx/usbgecko_udbg.c
index ed45db70a781..5aea46566233 100644
--- a/arch/powerpc/platforms/embedded6xx/usbgecko_udbg.c
+++ b/arch/powerpc/platforms/embedded6xx/usbgecko_udbg.c
@@ -194,7 +194,7 @@ static int ug_udbg_getc_poll(void)
/*
* Retrieves and prepares the virtual address needed to access the hardware.
*/
-static void __iomem *ug_udbg_setup_exi_io_base(struct device_node *np)
+static void __iomem *__init ug_udbg_setup_exi_io_base(struct device_node *np)
{
void __iomem *exi_io_base = NULL;
phys_addr_t paddr;
@@ -212,7 +212,7 @@ static void __iomem *ug_udbg_setup_exi_io_base(struct device_node *np)
/*
* Checks if a USB Gecko adapter is inserted in any memory card slot.
*/
-static void __iomem *ug_udbg_probe(void __iomem *exi_io_base)
+static void __iomem *__init ug_udbg_probe(void __iomem *exi_io_base)
{
int i;
diff --git a/arch/powerpc/platforms/embedded6xx/wii.c b/arch/powerpc/platforms/embedded6xx/wii.c
index a802ef957d63..f60ade584bb2 100644
--- a/arch/powerpc/platforms/embedded6xx/wii.c
+++ b/arch/powerpc/platforms/embedded6xx/wii.c
@@ -69,7 +69,7 @@ static void __noreturn wii_spin(void)
cpu_relax();
}
-static void __iomem *wii_ioremap_hw_regs(char *name, char *compatible)
+static void __iomem *__init wii_ioremap_hw_regs(char *name, char *compatible)
{
void __iomem *hw_regs = NULL;
struct device_node *np;
diff --git a/arch/powerpc/platforms/maple/Kconfig b/arch/powerpc/platforms/maple/Kconfig
index 86ae210bee9a..4c058cc57c90 100644
--- a/arch/powerpc/platforms/maple/Kconfig
+++ b/arch/powerpc/platforms/maple/Kconfig
@@ -9,7 +9,8 @@ config PPC_MAPLE
select GENERIC_TBSYNC
select PPC_UDBG_16550
select PPC_970_NAP
- select PPC_NATIVE
+ select PPC_64S_HASH_MMU
+ select PPC_HASH_MMU_NATIVE
select PPC_RTAS
select MMIO_NVRAM
select ATA_NONSTANDARD if ATA
diff --git a/arch/powerpc/platforms/microwatt/Kconfig b/arch/powerpc/platforms/microwatt/Kconfig
index 8f6a81978461..5e320f49583a 100644
--- a/arch/powerpc/platforms/microwatt/Kconfig
+++ b/arch/powerpc/platforms/microwatt/Kconfig
@@ -5,7 +5,6 @@ config PPC_MICROWATT
select PPC_XICS
select PPC_ICS_NATIVE
select PPC_ICP_NATIVE
- select PPC_NATIVE
select PPC_UDBG_16550
select ARCH_RANDOM
help
diff --git a/arch/powerpc/platforms/microwatt/rng.c b/arch/powerpc/platforms/microwatt/rng.c
index 3d8ee6eb7dad..7bc4d1cbfaf0 100644
--- a/arch/powerpc/platforms/microwatt/rng.c
+++ b/arch/powerpc/platforms/microwatt/rng.c
@@ -14,7 +14,7 @@
#define DARN_ERR 0xFFFFFFFFFFFFFFFFul
-int microwatt_get_random_darn(unsigned long *v)
+static int microwatt_get_random_darn(unsigned long *v)
{
unsigned long val;
diff --git a/arch/powerpc/platforms/pasemi/Kconfig b/arch/powerpc/platforms/pasemi/Kconfig
index c52731a7773f..85ae18ddd911 100644
--- a/arch/powerpc/platforms/pasemi/Kconfig
+++ b/arch/powerpc/platforms/pasemi/Kconfig
@@ -5,7 +5,8 @@ config PPC_PASEMI
select MPIC
select FORCE_PCI
select PPC_UDBG_16550
- select PPC_NATIVE
+ select PPC_64S_HASH_MMU
+ select PPC_HASH_MMU_NATIVE
select MPIC_BROKEN_REGREAD
help
This option enables support for PA Semi's PWRficient line
diff --git a/arch/powerpc/platforms/pasemi/dma_lib.c b/arch/powerpc/platforms/pasemi/dma_lib.c
index 270fa3c0d372..26427311fc72 100644
--- a/arch/powerpc/platforms/pasemi/dma_lib.c
+++ b/arch/powerpc/platforms/pasemi/dma_lib.c
@@ -375,7 +375,7 @@ int pasemi_dma_alloc_flag(void)
int bit;
retry:
- bit = find_next_bit(flags_free, MAX_FLAGS, 0);
+ bit = find_first_bit(flags_free, MAX_FLAGS);
if (bit >= MAX_FLAGS)
return -ENOSPC;
if (!test_and_clear_bit(bit, flags_free))
@@ -440,7 +440,7 @@ int pasemi_dma_alloc_fun(void)
int bit;
retry:
- bit = find_next_bit(fun_free, MAX_FLAGS, 0);
+ bit = find_first_bit(fun_free, MAX_FLAGS);
if (bit >= MAX_FLAGS)
return -ENOSPC;
if (!test_and_clear_bit(bit, fun_free))
diff --git a/arch/powerpc/platforms/pasemi/msi.c b/arch/powerpc/platforms/pasemi/msi.c
index d38944a1e258..ea1e41451408 100644
--- a/arch/powerpc/platforms/pasemi/msi.c
+++ b/arch/powerpc/platforms/pasemi/msi.c
@@ -62,17 +62,12 @@ static void pasemi_msi_teardown_msi_irqs(struct pci_dev *pdev)
pr_debug("pasemi_msi_teardown_msi_irqs, pdev %p\n", pdev);
- for_each_pci_msi_entry(entry, pdev) {
- if (!entry->irq)
- continue;
-
+ msi_for_each_desc(entry, &pdev->dev, MSI_DESC_ASSOCIATED) {
hwirq = virq_to_hw(entry->irq);
irq_set_msi_desc(entry->irq, NULL);
irq_dispose_mapping(entry->irq);
msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap, hwirq, ALLOC_CHUNK);
}
-
- return;
}
static int pasemi_msi_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
@@ -90,7 +85,7 @@ static int pasemi_msi_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
msg.address_hi = 0;
msg.address_lo = PASEMI_MSI_ADDR;
- for_each_pci_msi_entry(entry, pdev) {
+ msi_for_each_desc(entry, &pdev->dev, MSI_DESC_NOTASSOCIATED) {
/* Allocate 16 interrupts for now, since that's the grouping for
* affinity. This can be changed later if it turns out 32 is too
* few MSIs for someone, but restrictions will apply to how the
@@ -135,7 +130,7 @@ static int pasemi_msi_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
return 0;
}
-int mpic_pasemi_msi_init(struct mpic *mpic)
+int __init mpic_pasemi_msi_init(struct mpic *mpic)
{
int rc;
struct pci_controller *phb;
diff --git a/arch/powerpc/platforms/pasemi/pasemi.h b/arch/powerpc/platforms/pasemi/pasemi.h
index 70b56048ed1b..3f277a200fd8 100644
--- a/arch/powerpc/platforms/pasemi/pasemi.h
+++ b/arch/powerpc/platforms/pasemi/pasemi.h
@@ -7,7 +7,7 @@ extern void pas_pci_init(void);
extern void pas_pci_irq_fixup(struct pci_dev *dev);
extern void pas_pci_dma_dev_setup(struct pci_dev *dev);
-extern void __iomem *pasemi_pci_getcfgaddr(struct pci_dev *dev, int offset);
+void __iomem *__init pasemi_pci_getcfgaddr(struct pci_dev *dev, int offset);
extern void __init pasemi_map_registers(void);
diff --git a/arch/powerpc/platforms/pasemi/pci.c b/arch/powerpc/platforms/pasemi/pci.c
index 8779b107d872..d4b922759d6e 100644
--- a/arch/powerpc/platforms/pasemi/pci.c
+++ b/arch/powerpc/platforms/pasemi/pci.c
@@ -287,7 +287,7 @@ void __init pas_pci_init(void)
}
}
-void __iomem *pasemi_pci_getcfgaddr(struct pci_dev *dev, int offset)
+void __iomem *__init pasemi_pci_getcfgaddr(struct pci_dev *dev, int offset)
{
struct pci_controller *hose;
diff --git a/arch/powerpc/platforms/pasemi/setup.c b/arch/powerpc/platforms/pasemi/setup.c
index 376797eb7894..f974bfe7fde1 100644
--- a/arch/powerpc/platforms/pasemi/setup.c
+++ b/arch/powerpc/platforms/pasemi/setup.c
@@ -212,7 +212,7 @@ static void sb600_8259_cascade(struct irq_desc *desc)
chip->irq_eoi(&desc->irq_data);
}
-static void nemo_init_IRQ(struct mpic *mpic)
+static void __init nemo_init_IRQ(struct mpic *mpic)
{
struct device_node *np;
int gpio_virq;
diff --git a/arch/powerpc/platforms/powermac/Kconfig b/arch/powerpc/platforms/powermac/Kconfig
index b97bf12801eb..130707ec9f99 100644
--- a/arch/powerpc/platforms/powermac/Kconfig
+++ b/arch/powerpc/platforms/powermac/Kconfig
@@ -6,7 +6,8 @@ config PPC_PMAC
select FORCE_PCI
select PPC_INDIRECT_PCI if PPC32
select PPC_MPC106 if PPC32
- select PPC_NATIVE
+ select PPC_64S_HASH_MMU if PPC64
+ select PPC_HASH_MMU_NATIVE
select ZONE_DMA if PPC32
default y
diff --git a/arch/powerpc/platforms/powermac/cache.S b/arch/powerpc/platforms/powermac/cache.S
index ced225415486..b8ae56e9f414 100644
--- a/arch/powerpc/platforms/powermac/cache.S
+++ b/arch/powerpc/platforms/powermac/cache.S
@@ -48,7 +48,7 @@ flush_disable_75x:
/* Stop DST streams */
BEGIN_FTR_SECTION
- DSSALL
+ PPC_DSSALL
sync
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
@@ -197,7 +197,7 @@ flush_disable_745x:
isync
/* Stop prefetch streams */
- DSSALL
+ PPC_DSSALL
sync
/* Disable L2 prefetching */
diff --git a/arch/powerpc/platforms/powermac/feature.c b/arch/powerpc/platforms/powermac/feature.c
index 5c77b9a24c0e..e67c624f35a2 100644
--- a/arch/powerpc/platforms/powermac/feature.c
+++ b/arch/powerpc/platforms/powermac/feature.c
@@ -1530,7 +1530,7 @@ static long g5_reset_cpu(struct device_node *node, long param, long value)
* This takes the second CPU off the bus on dual CPU machines
* running UP
*/
-void g5_phy_disable_cpu1(void)
+void __init g5_phy_disable_cpu1(void)
{
if (uninorth_maj == 3)
UN_OUT(U3_API_PHY_CONFIG_1, 0);
diff --git a/arch/powerpc/platforms/powermac/low_i2c.c b/arch/powerpc/platforms/powermac/low_i2c.c
index f77a59b5c2e1..df89d916236d 100644
--- a/arch/powerpc/platforms/powermac/low_i2c.c
+++ b/arch/powerpc/platforms/powermac/low_i2c.c
@@ -582,6 +582,7 @@ static void __init kw_i2c_add(struct pmac_i2c_host_kw *host,
bus->close = kw_i2c_close;
bus->xfer = kw_i2c_xfer;
mutex_init(&bus->mutex);
+ lockdep_register_key(&bus->lock_key);
lockdep_set_class(&bus->mutex, &bus->lock_key);
if (controller == busnode)
bus->flags = pmac_i2c_multibus;
@@ -810,6 +811,7 @@ static void __init pmu_i2c_probe(void)
bus->hostdata = bus + 1;
bus->xfer = pmu_i2c_xfer;
mutex_init(&bus->mutex);
+ lockdep_register_key(&bus->lock_key);
lockdep_set_class(&bus->mutex, &bus->lock_key);
bus->flags = pmac_i2c_multibus;
list_add(&bus->link, &pmac_i2c_busses);
@@ -933,6 +935,7 @@ static void __init smu_i2c_probe(void)
bus->hostdata = bus + 1;
bus->xfer = smu_i2c_xfer;
mutex_init(&bus->mutex);
+ lockdep_register_key(&bus->lock_key);
lockdep_set_class(&bus->mutex, &bus->lock_key);
bus->flags = 0;
list_add(&bus->link, &pmac_i2c_busses);
diff --git a/arch/powerpc/platforms/powermac/nvram.c b/arch/powerpc/platforms/powermac/nvram.c
index 853ccc4480e2..de8fcb607290 100644
--- a/arch/powerpc/platforms/powermac/nvram.c
+++ b/arch/powerpc/platforms/powermac/nvram.c
@@ -258,7 +258,7 @@ static u32 core99_calc_adler(u8 *buffer)
return (high << 16) | low;
}
-static u32 core99_check(u8* datas)
+static u32 __init core99_check(u8 *datas)
{
struct core99_header* hdr99 = (struct core99_header*)datas;
diff --git a/arch/powerpc/platforms/powermac/pfunc_base.c b/arch/powerpc/platforms/powermac/pfunc_base.c
index f5422506d4b0..9c2947a3edd5 100644
--- a/arch/powerpc/platforms/powermac/pfunc_base.c
+++ b/arch/powerpc/platforms/powermac/pfunc_base.c
@@ -93,7 +93,7 @@ static struct pmf_handlers macio_gpio_handlers = {
.delay = macio_do_delay,
};
-static void macio_gpio_init_one(struct macio_chip *macio)
+static void __init macio_gpio_init_one(struct macio_chip *macio)
{
struct device_node *gparent, *gp;
@@ -265,7 +265,7 @@ static struct pmf_handlers macio_mmio_handlers = {
.delay = macio_do_delay,
};
-static void macio_mmio_init_one(struct macio_chip *macio)
+static void __init macio_mmio_init_one(struct macio_chip *macio)
{
DBG("Installing MMIO functions for macio %pOF\n",
macio->of_node);
@@ -294,7 +294,7 @@ static struct pmf_handlers unin_mmio_handlers = {
.delay = macio_do_delay,
};
-static void uninorth_install_pfunc(void)
+static void __init uninorth_install_pfunc(void)
{
struct device_node *np;
diff --git a/arch/powerpc/platforms/powermac/pic.c b/arch/powerpc/platforms/powermac/pic.c
index 4921bccf0376..bb0566633af5 100644
--- a/arch/powerpc/platforms/powermac/pic.c
+++ b/arch/powerpc/platforms/powermac/pic.c
@@ -18,6 +18,7 @@
#include <linux/interrupt.h>
#include <linux/syscore_ops.h>
#include <linux/adb.h>
+#include <linux/minmax.h>
#include <linux/pmu.h>
#include <asm/sections.h>
@@ -311,11 +312,8 @@ static void __init pmac_pic_probe_oldstyle(void)
/* Check ordering of master & slave */
if (of_device_is_compatible(master, "gatwick")) {
- struct device_node *tmp;
BUG_ON(slave == NULL);
- tmp = master;
- master = slave;
- slave = tmp;
+ swap(master, slave);
}
/* We found a slave */
diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c
index 13e8a8a9841c..974d4b49867b 100644
--- a/arch/powerpc/platforms/powermac/setup.c
+++ b/arch/powerpc/platforms/powermac/setup.c
@@ -166,7 +166,7 @@ static void pmac_show_cpuinfo(struct seq_file *m)
}
#ifndef CONFIG_ADB_CUDA
-int find_via_cuda(void)
+int __init find_via_cuda(void)
{
struct device_node *dn = of_find_node_by_name(NULL, "via-cuda");
@@ -180,7 +180,7 @@ int find_via_cuda(void)
#endif
#ifndef CONFIG_ADB_PMU
-int find_via_pmu(void)
+int __init find_via_pmu(void)
{
struct device_node *dn = of_find_node_by_name(NULL, "via-pmu");
@@ -194,7 +194,7 @@ int find_via_pmu(void)
#endif
#ifndef CONFIG_PMAC_SMU
-int smu_init(void)
+int __init smu_init(void)
{
/* should check and warn if SMU is present */
return 0;
diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
index 3256a316e884..da1efdc30d6c 100644
--- a/arch/powerpc/platforms/powermac/smp.c
+++ b/arch/powerpc/platforms/powermac/smp.c
@@ -186,7 +186,7 @@ static const struct irq_domain_ops psurge_host_ops = {
.map = psurge_host_map,
};
-static int psurge_secondary_ipi_init(void)
+static int __init psurge_secondary_ipi_init(void)
{
int rc = -ENOMEM;
@@ -875,7 +875,7 @@ static int smp_core99_cpu_online(unsigned int cpu)
static void __init smp_core99_bringup_done(void)
{
- extern void g5_phy_disable_cpu1(void);
+ extern void __init g5_phy_disable_cpu1(void);
/* Close i2c bus if it was used for tb sync */
if (pmac_tb_clock_chip_host)
diff --git a/arch/powerpc/platforms/powermac/udbg_scc.c b/arch/powerpc/platforms/powermac/udbg_scc.c
index f286bdfe8346..965827ac2e9c 100644
--- a/arch/powerpc/platforms/powermac/udbg_scc.c
+++ b/arch/powerpc/platforms/powermac/udbg_scc.c
@@ -62,7 +62,7 @@ static unsigned char scc_inittab[] = {
3, 0xc1, /* rx enable, 8 bits */
};
-void udbg_scc_init(int force_scc)
+void __init udbg_scc_init(int force_scc)
{
const u32 *reg;
unsigned long addr;
diff --git a/arch/powerpc/platforms/powernv/Kconfig b/arch/powerpc/platforms/powernv/Kconfig
index 043eefbbdd28..161dfe024085 100644
--- a/arch/powerpc/platforms/powernv/Kconfig
+++ b/arch/powerpc/platforms/powernv/Kconfig
@@ -2,7 +2,7 @@
config PPC_POWERNV
depends on PPC64 && PPC_BOOK3S
bool "IBM PowerNV (Non-Virtualized) platform support"
- select PPC_NATIVE
+ select PPC_HASH_MMU_NATIVE if PPC_64S_HASH_MMU
select PPC_XICS
select PPC_ICP_NATIVE
select PPC_XIVE_NATIVE
diff --git a/arch/powerpc/platforms/powernv/idle.c b/arch/powerpc/platforms/powernv/idle.c
index e3ffdc8e8567..9942289f379b 100644
--- a/arch/powerpc/platforms/powernv/idle.c
+++ b/arch/powerpc/platforms/powernv/idle.c
@@ -62,7 +62,7 @@ static bool deepest_stop_found;
static unsigned long power7_offline_type;
-static int pnv_save_sprs_for_deep_states(void)
+static int __init pnv_save_sprs_for_deep_states(void)
{
int cpu;
int rc;
@@ -146,9 +146,13 @@ EXPORT_SYMBOL_GPL(pnv_get_supported_cpuidle_states);
static void pnv_fastsleep_workaround_apply(void *info)
{
+ int cpu = smp_processor_id();
int rc;
int *err = info;
+ if (cpu_first_thread_sibling(cpu) != cpu)
+ return;
+
rc = opal_config_cpu_idle_state(OPAL_CONFIG_IDLE_FASTSLEEP,
OPAL_CONFIG_IDLE_APPLY);
if (rc)
@@ -175,7 +179,6 @@ static ssize_t store_fastsleep_workaround_applyonce(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t count)
{
- cpumask_t primary_thread_mask;
int err;
u8 val;
@@ -200,10 +203,7 @@ static ssize_t store_fastsleep_workaround_applyonce(struct device *dev,
power7_fastsleep_workaround_exit = false;
cpus_read_lock();
- primary_thread_mask = cpu_online_cores_map();
- on_each_cpu_mask(&primary_thread_mask,
- pnv_fastsleep_workaround_apply,
- &err, 1);
+ on_each_cpu(pnv_fastsleep_workaround_apply, &err, 1);
cpus_read_unlock();
if (err) {
pr_err("fastsleep_workaround_applyonce change failed while running pnv_fastsleep_workaround_apply");
@@ -306,8 +306,8 @@ struct p7_sprs {
/* per thread SPRs that get lost in shallow states */
u64 amr;
u64 iamr;
- u64 amor;
u64 uamor;
+ /* amor is restored to constant ~0 */
};
static unsigned long power7_idle_insn(unsigned long type)
@@ -378,7 +378,6 @@ static unsigned long power7_idle_insn(unsigned long type)
if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
sprs.amr = mfspr(SPRN_AMR);
sprs.iamr = mfspr(SPRN_IAMR);
- sprs.amor = mfspr(SPRN_AMOR);
sprs.uamor = mfspr(SPRN_UAMOR);
}
@@ -397,7 +396,7 @@ static unsigned long power7_idle_insn(unsigned long type)
*/
mtspr(SPRN_AMR, sprs.amr);
mtspr(SPRN_IAMR, sprs.iamr);
- mtspr(SPRN_AMOR, sprs.amor);
+ mtspr(SPRN_AMOR, ~0);
mtspr(SPRN_UAMOR, sprs.uamor);
}
}
@@ -492,12 +491,14 @@ subcore_woken:
mtspr(SPRN_SPRG3, local_paca->sprg_vdso);
+#ifdef CONFIG_PPC_64S_HASH_MMU
/*
* The SLB has to be restored here, but it sometimes still
* contains entries, so the __ variant must be used to prevent
* multi hits.
*/
__slb_restore_bolted_realmode();
+#endif
return srr1;
}
@@ -589,7 +590,6 @@ struct p9_sprs {
u64 purr;
u64 spurr;
u64 dscr;
- u64 wort;
u64 ciabr;
u64 mmcra;
@@ -687,7 +687,6 @@ static unsigned long power9_idle_stop(unsigned long psscr)
sprs.amr = mfspr(SPRN_AMR);
sprs.iamr = mfspr(SPRN_IAMR);
- sprs.amor = mfspr(SPRN_AMOR);
sprs.uamor = mfspr(SPRN_UAMOR);
srr1 = isa300_idle_stop_mayloss(psscr); /* go idle */
@@ -708,7 +707,7 @@ static unsigned long power9_idle_stop(unsigned long psscr)
*/
mtspr(SPRN_AMR, sprs.amr);
mtspr(SPRN_IAMR, sprs.iamr);
- mtspr(SPRN_AMOR, sprs.amor);
+ mtspr(SPRN_AMOR, ~0);
mtspr(SPRN_UAMOR, sprs.uamor);
/*
@@ -1124,7 +1123,7 @@ unsigned long pnv_cpu_offline(unsigned int cpu)
* stop instruction
*/
-int validate_psscr_val_mask(u64 *psscr_val, u64 *psscr_mask, u32 flags)
+int __init validate_psscr_val_mask(u64 *psscr_val, u64 *psscr_mask, u32 flags)
{
int err = 0;
@@ -1318,7 +1317,7 @@ static void __init pnv_probe_idle_states(void)
* which is the number of cpuidle states discovered through device-tree.
*/
-static int pnv_parse_cpuidle_dt(void)
+static int __init pnv_parse_cpuidle_dt(void)
{
struct device_node *np;
int nr_idle_states, i;
diff --git a/arch/powerpc/platforms/powernv/opal-core.c b/arch/powerpc/platforms/powernv/opal-core.c
index 5b9736bbc2aa..0331f1973f0e 100644
--- a/arch/powerpc/platforms/powernv/opal-core.c
+++ b/arch/powerpc/platforms/powernv/opal-core.c
@@ -89,7 +89,7 @@ static inline int is_opalcore_usable(void)
return (oc_conf && oc_conf->opalcorebuf != NULL) ? 1 : 0;
}
-static Elf64_Word *append_elf64_note(Elf64_Word *buf, char *name,
+static Elf64_Word *__init append_elf64_note(Elf64_Word *buf, char *name,
u32 type, void *data,
size_t data_len)
{
@@ -108,7 +108,7 @@ static Elf64_Word *append_elf64_note(Elf64_Word *buf, char *name,
return buf;
}
-static void fill_prstatus(struct elf_prstatus *prstatus, int pir,
+static void __init fill_prstatus(struct elf_prstatus *prstatus, int pir,
struct pt_regs *regs)
{
memset(prstatus, 0, sizeof(struct elf_prstatus));
@@ -134,7 +134,7 @@ static void fill_prstatus(struct elf_prstatus *prstatus, int pir,
}
}
-static Elf64_Word *auxv_to_elf64_notes(Elf64_Word *buf,
+static Elf64_Word *__init auxv_to_elf64_notes(Elf64_Word *buf,
u64 opal_boot_entry)
{
Elf64_Off *bufp = (Elf64_Off *)oc_conf->auxv_buf;
diff --git a/arch/powerpc/platforms/powernv/opal-dump.c b/arch/powerpc/platforms/powernv/opal-dump.c
index 717d1d30ade5..410ed5b9de29 100644
--- a/arch/powerpc/platforms/powernv/opal-dump.c
+++ b/arch/powerpc/platforms/powernv/opal-dump.c
@@ -208,11 +208,12 @@ static struct attribute *dump_default_attrs[] = {
&ack_attribute.attr,
NULL,
};
+ATTRIBUTE_GROUPS(dump_default);
static struct kobj_type dump_ktype = {
.sysfs_ops = &dump_sysfs_ops,
.release = &dump_release,
- .default_attrs = dump_default_attrs,
+ .default_groups = dump_default_groups,
};
static int64_t dump_read_info(uint32_t *dump_id, uint32_t *dump_size, uint32_t *dump_type)
diff --git a/arch/powerpc/platforms/powernv/opal-elog.c b/arch/powerpc/platforms/powernv/opal-elog.c
index 5821b0fa8614..554fdd7f88b8 100644
--- a/arch/powerpc/platforms/powernv/opal-elog.c
+++ b/arch/powerpc/platforms/powernv/opal-elog.c
@@ -144,11 +144,12 @@ static struct attribute *elog_default_attrs[] = {
&ack_attribute.attr,
NULL,
};
+ATTRIBUTE_GROUPS(elog_default);
static struct kobj_type elog_ktype = {
.sysfs_ops = &elog_sysfs_ops,
.release = &elog_release,
- .default_attrs = elog_default_attrs,
+ .default_groups = elog_default_groups,
};
/* Maximum size of a single log on FSP is 16KB */
diff --git a/arch/powerpc/platforms/powernv/opal-fadump.c b/arch/powerpc/platforms/powernv/opal-fadump.c
index 9a360ced663b..c8ad057c7221 100644
--- a/arch/powerpc/platforms/powernv/opal-fadump.c
+++ b/arch/powerpc/platforms/powernv/opal-fadump.c
@@ -112,7 +112,7 @@ static void opal_fadump_update_config(struct fw_dump *fadump_conf,
* This function is called in the capture kernel to get configuration details
* from metadata setup by the first kernel.
*/
-static void opal_fadump_get_config(struct fw_dump *fadump_conf,
+static void __init opal_fadump_get_config(struct fw_dump *fadump_conf,
const struct opal_fadump_mem_struct *fdm)
{
unsigned long base, size, last_end, hole_size;
diff --git a/arch/powerpc/platforms/powernv/opal-imc.c b/arch/powerpc/platforms/powernv/opal-imc.c
index 05d3832019b9..3fea5da6d1b3 100644
--- a/arch/powerpc/platforms/powernv/opal-imc.c
+++ b/arch/powerpc/platforms/powernv/opal-imc.c
@@ -200,13 +200,13 @@ static void disable_nest_pmu_counters(void)
static void disable_core_pmu_counters(void)
{
- cpumask_t cores_map;
int cpu, rc;
cpus_read_lock();
/* Disable the IMC Core functions */
- cores_map = cpu_online_cores_map();
- for_each_cpu(cpu, &cores_map) {
+ for_each_online_cpu(cpu) {
+ if (cpu_first_thread_sibling(cpu) != cpu)
+ continue;
rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE,
get_hard_smp_processor_id(cpu));
if (rc)
diff --git a/arch/powerpc/platforms/powernv/opal-lpc.c b/arch/powerpc/platforms/powernv/opal-lpc.c
index 1e5d51db40f8..5390c888db16 100644
--- a/arch/powerpc/platforms/powernv/opal-lpc.c
+++ b/arch/powerpc/platforms/powernv/opal-lpc.c
@@ -396,6 +396,7 @@ void __init opal_lpc_init(void)
if (!of_get_property(np, "primary", NULL))
continue;
opal_lpc_chip_id = of_get_ibm_chip_id(np);
+ of_node_put(np);
break;
}
if (opal_lpc_chip_id < 0)
diff --git a/arch/powerpc/platforms/powernv/opal-msglog.c b/arch/powerpc/platforms/powernv/opal-msglog.c
index d3b6e135c18b..22d6efe17b0d 100644
--- a/arch/powerpc/platforms/powernv/opal-msglog.c
+++ b/arch/powerpc/platforms/powernv/opal-msglog.c
@@ -105,7 +105,7 @@ static struct bin_attribute opal_msglog_attr = {
.read = opal_msglog_read
};
-struct memcons *memcons_init(struct device_node *node, const char *mc_prop_name)
+struct memcons *__init memcons_init(struct device_node *node, const char *mc_prop_name)
{
u64 mcaddr;
struct memcons *mc;
@@ -133,7 +133,7 @@ out_err:
return NULL;
}
-u32 memcons_get_size(struct memcons *mc)
+u32 __init memcons_get_size(struct memcons *mc)
{
return be32_to_cpu(mc->ibuf_size) + be32_to_cpu(mc->obuf_size);
}
diff --git a/arch/powerpc/platforms/powernv/opal-power.c b/arch/powerpc/platforms/powernv/opal-power.c
index 2a3717fc24ea..db99ffcb7b82 100644
--- a/arch/powerpc/platforms/powernv/opal-power.c
+++ b/arch/powerpc/platforms/powernv/opal-power.c
@@ -53,7 +53,7 @@ static bool detect_epow(void)
}
/* Check for existing EPOW, DPO events */
-static bool poweroff_pending(void)
+static bool __init poweroff_pending(void)
{
int rc;
__be64 opal_dpo_timeout;
diff --git a/arch/powerpc/platforms/powernv/opal-powercap.c b/arch/powerpc/platforms/powernv/opal-powercap.c
index c16d44f6f1d1..64506b46e77b 100644
--- a/arch/powerpc/platforms/powernv/opal-powercap.c
+++ b/arch/powerpc/platforms/powernv/opal-powercap.c
@@ -129,7 +129,7 @@ out_token:
return ret;
}
-static void powercap_add_attr(int handle, const char *name,
+static void __init powercap_add_attr(int handle, const char *name,
struct powercap_attr *attr)
{
attr->handle = handle;
diff --git a/arch/powerpc/platforms/powernv/opal-rtc.c b/arch/powerpc/platforms/powernv/opal-rtc.c
index 44d7dacb33a2..a9bcf9217e64 100644
--- a/arch/powerpc/platforms/powernv/opal-rtc.c
+++ b/arch/powerpc/platforms/powernv/opal-rtc.c
@@ -18,7 +18,7 @@
#include <asm/firmware.h>
#include <asm/machdep.h>
-static void opal_to_tm(u32 y_m_d, u64 h_m_s_ms, struct rtc_time *tm)
+static void __init opal_to_tm(u32 y_m_d, u64 h_m_s_ms, struct rtc_time *tm)
{
tm->tm_year = ((bcd2bin(y_m_d >> 24) * 100) +
bcd2bin((y_m_d >> 16) & 0xff)) - 1900;
diff --git a/arch/powerpc/platforms/powernv/opal-sensor-groups.c b/arch/powerpc/platforms/powernv/opal-sensor-groups.c
index f8ae1fb0c102..8fba7d25ae56 100644
--- a/arch/powerpc/platforms/powernv/opal-sensor-groups.c
+++ b/arch/powerpc/platforms/powernv/opal-sensor-groups.c
@@ -126,7 +126,7 @@ static void add_attr(int handle, struct sg_attr *attr, int index)
attr->attr.store = ops_info[index].store;
}
-static int add_attr_group(const __be32 *ops, int len, struct sensor_group *sg,
+static int __init add_attr_group(const __be32 *ops, int len, struct sensor_group *sg,
u32 handle)
{
int i, j;
@@ -144,7 +144,7 @@ static int add_attr_group(const __be32 *ops, int len, struct sensor_group *sg,
return sysfs_create_group(sg_kobj, &sg->sg);
}
-static int get_nr_attrs(const __be32 *ops, int len)
+static int __init get_nr_attrs(const __be32 *ops, int len)
{
int i, j;
int nr_attrs = 0;
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
index e9d18519e650..55a8fbfdb5b2 100644
--- a/arch/powerpc/platforms/powernv/opal.c
+++ b/arch/powerpc/platforms/powernv/opal.c
@@ -73,7 +73,7 @@ static struct task_struct *kopald_tsk;
static struct opal_msg *opal_msg;
static u32 opal_msg_size __ro_after_init;
-void opal_configure_cores(void)
+void __init opal_configure_cores(void)
{
u64 reinit_flags = 0;
@@ -779,7 +779,7 @@ out:
return !!recover_addr;
}
-static int opal_sysfs_init(void)
+static int __init opal_sysfs_init(void)
{
opal_kobj = kobject_create_and_add("opal", firmware_kobj);
if (!opal_kobj) {
@@ -937,7 +937,7 @@ static void __init opal_dump_region_init(void)
"rc = %d\n", rc);
}
-static void opal_pdev_init(const char *compatible)
+static void __init opal_pdev_init(const char *compatible)
{
struct device_node *np;
@@ -981,7 +981,7 @@ void opal_wake_poller(void)
wake_up_process(kopald_tsk);
}
-static void opal_init_heartbeat(void)
+static void __init opal_init_heartbeat(void)
{
/* Old firwmware, we assume the HVC heartbeat is sufficient */
if (of_property_read_u32(opal_node, "ibm,heartbeat-ms",
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 004cd6a96c8a..b722ac902269 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -2154,10 +2154,10 @@ static void pnv_msi_compose_msg(struct irq_data *d, struct msi_msg *msg)
int rc;
rc = __pnv_pci_ioda_msi_setup(phb, pdev, d->hwirq,
- entry->msi_attrib.is_64, msg);
+ entry->pci.msi_attrib.is_64, msg);
if (rc)
dev_err(&pdev->dev, "Failed to setup %s-bit MSI #%ld : %d\n",
- entry->msi_attrib.is_64 ? "64" : "32", d->hwirq, rc);
+ entry->pci.msi_attrib.is_64 ? "64" : "32", d->hwirq, rc);
}
/*
@@ -2265,7 +2265,7 @@ static const struct irq_domain_ops pnv_irq_domain_ops = {
.free = pnv_irq_domain_free,
};
-static int pnv_msi_allocate_domains(struct pci_controller *hose, unsigned int count)
+static int __init pnv_msi_allocate_domains(struct pci_controller *hose, unsigned int count)
{
struct pnv_phb *phb = hose->private_data;
struct irq_domain *parent = irq_get_default_host();
@@ -2298,7 +2298,7 @@ static int pnv_msi_allocate_domains(struct pci_controller *hose, unsigned int co
return 0;
}
-static void pnv_pci_init_ioda_msis(struct pnv_phb *phb)
+static void __init pnv_pci_init_ioda_msis(struct pnv_phb *phb)
{
unsigned int count;
const __be32 *prop = of_get_property(phb->hose->dn,
diff --git a/arch/powerpc/platforms/powernv/powernv.h b/arch/powerpc/platforms/powernv/powernv.h
index 11df4e16a1cc..e297bf4abfcb 100644
--- a/arch/powerpc/platforms/powernv/powernv.h
+++ b/arch/powerpc/platforms/powernv/powernv.h
@@ -39,7 +39,7 @@ bool cpu_core_split_required(void);
struct memcons;
ssize_t memcons_copy(struct memcons *mc, char *to, loff_t pos, size_t count);
-u32 memcons_get_size(struct memcons *mc);
-struct memcons *memcons_init(struct device_node *node, const char *mc_prop_name);
+u32 __init memcons_get_size(struct memcons *mc);
+struct memcons *__init memcons_init(struct device_node *node, const char *mc_prop_name);
#endif /* _POWERNV_H */
diff --git a/arch/powerpc/platforms/powernv/rng.c b/arch/powerpc/platforms/powernv/rng.c
index 72c25295c1c2..b4386714494a 100644
--- a/arch/powerpc/platforms/powernv/rng.c
+++ b/arch/powerpc/platforms/powernv/rng.c
@@ -80,7 +80,7 @@ static int powernv_get_random_darn(unsigned long *v)
return 1;
}
-static int initialise_darn(void)
+static int __init initialise_darn(void)
{
unsigned long val;
int i;
diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
index ad56a54ac9c5..105d889abd51 100644
--- a/arch/powerpc/platforms/powernv/setup.c
+++ b/arch/powerpc/platforms/powernv/setup.c
@@ -40,7 +40,7 @@
#include "powernv.h"
-static bool fw_feature_is(const char *state, const char *name,
+static bool __init fw_feature_is(const char *state, const char *name,
struct device_node *fw_features)
{
struct device_node *np;
@@ -55,7 +55,7 @@ static bool fw_feature_is(const char *state, const char *name,
return rc;
}
-static void init_fw_feat_flags(struct device_node *np)
+static void __init init_fw_feat_flags(struct device_node *np)
{
if (fw_feature_is("enabled", "inst-spec-barrier-ori31,31,0", np))
security_ftr_set(SEC_FTR_SPEC_BAR_ORI31);
@@ -98,7 +98,7 @@ static void init_fw_feat_flags(struct device_node *np)
security_ftr_clear(SEC_FTR_BNDS_CHK_SPEC_BAR);
}
-static void pnv_setup_security_mitigations(void)
+static void __init pnv_setup_security_mitigations(void)
{
struct device_node *np, *fw_features;
enum l1d_flush_type type;
@@ -123,10 +123,14 @@ static void pnv_setup_security_mitigations(void)
}
/*
- * If we are non-Power9 bare metal, we don't need to flush on kernel
- * entry or after user access: they fix a P9 specific vulnerability.
+ * The issues addressed by the entry and uaccess flush don't affect P7
+ * or P8, so on bare metal disable them explicitly in case firmware does
+ * not include the features to disable them. POWER9 and newer processors
+ * should have the appropriate firmware flags.
*/
- if (!pvr_version_is(PVR_POWER9)) {
+ if (pvr_version_is(PVR_POWER7) || pvr_version_is(PVR_POWER7p) ||
+ pvr_version_is(PVR_POWER8E) || pvr_version_is(PVR_POWER8NVL) ||
+ pvr_version_is(PVR_POWER8)) {
security_ftr_clear(SEC_FTR_L1D_FLUSH_ENTRY);
security_ftr_clear(SEC_FTR_L1D_FLUSH_UACCESS);
}
@@ -207,6 +211,7 @@ static void __init pnv_init(void)
#endif
add_preferred_console("hvc", 0, NULL);
+#ifdef CONFIG_PPC_64S_HASH_MMU
if (!radix_enabled()) {
size_t size = sizeof(struct slb_entry) * mmu_slb_size;
int i;
@@ -219,6 +224,7 @@ static void __init pnv_init(void)
cpu_to_node(i));
}
}
+#endif
}
static void __init pnv_init_IRQ(void)
diff --git a/arch/powerpc/platforms/ps3/gelic_udbg.c b/arch/powerpc/platforms/ps3/gelic_udbg.c
index cba4f8f5b8d7..6b298010fd84 100644
--- a/arch/powerpc/platforms/ps3/gelic_udbg.c
+++ b/arch/powerpc/platforms/ps3/gelic_udbg.c
@@ -113,7 +113,7 @@ static int unmap_dma_mem(int bus_id, int dev_id, u64 bus_addr, size_t len)
return lv1_free_device_dma_region(bus_id, dev_id, real_bus_addr);
}
-static void gelic_debug_init(void)
+static void __init gelic_debug_init(void)
{
s64 result;
u64 v2;
diff --git a/arch/powerpc/platforms/ps3/mm.c b/arch/powerpc/platforms/ps3/mm.c
index 9c44f335c0b9..5ce924611b94 100644
--- a/arch/powerpc/platforms/ps3/mm.c
+++ b/arch/powerpc/platforms/ps3/mm.c
@@ -41,7 +41,7 @@ enum {
PAGE_SHIFT_16M = 24U,
};
-static unsigned long make_page_sizes(unsigned long a, unsigned long b)
+static unsigned long __init make_page_sizes(unsigned long a, unsigned long b)
{
return (a << 56) | (b << 48);
}
@@ -215,7 +215,7 @@ notrace void ps3_mm_vas_destroy(void)
}
}
-static int ps3_mm_get_repository_highmem(struct mem_region *r)
+static int __init ps3_mm_get_repository_highmem(struct mem_region *r)
{
int result;
diff --git a/arch/powerpc/platforms/ps3/os-area.c b/arch/powerpc/platforms/ps3/os-area.c
index e8530371aed6..cb844e0add2b 100644
--- a/arch/powerpc/platforms/ps3/os-area.c
+++ b/arch/powerpc/platforms/ps3/os-area.c
@@ -501,7 +501,7 @@ static int db_set_64(struct os_area_db *db, const struct os_area_db_id *id,
return -1;
}
-static int db_get_64(const struct os_area_db *db,
+static int __init db_get_64(const struct os_area_db *db,
const struct os_area_db_id *id, uint64_t *value)
{
struct db_iterator i;
@@ -517,7 +517,7 @@ static int db_get_64(const struct os_area_db *db,
return -1;
}
-static int db_get_rtc_diff(const struct os_area_db *db, int64_t *rtc_diff)
+static int __init db_get_rtc_diff(const struct os_area_db *db, int64_t *rtc_diff)
{
return db_get_64(db, &os_area_db_id_rtc_diff, (uint64_t*)rtc_diff);
}
diff --git a/arch/powerpc/platforms/ps3/platform.h b/arch/powerpc/platforms/ps3/platform.h
index 07bd39ef71ff..6beecdb0d51f 100644
--- a/arch/powerpc/platforms/ps3/platform.h
+++ b/arch/powerpc/platforms/ps3/platform.h
@@ -35,7 +35,7 @@ void __init ps3_register_ipi_irq(unsigned int cpu, unsigned int virq);
/* smp */
-void smp_init_ps3(void);
+void __init smp_init_ps3(void);
#ifdef CONFIG_SMP
void ps3_smp_cleanup_cpu(int cpu);
#else
@@ -134,9 +134,9 @@ struct ps3_repository_device {
int ps3_repository_find_device(struct ps3_repository_device *repo);
int ps3_repository_find_device_by_id(struct ps3_repository_device *repo,
u64 bus_id, u64 dev_id);
-int ps3_repository_find_devices(enum ps3_bus_type bus_type,
+int __init ps3_repository_find_devices(enum ps3_bus_type bus_type,
int (*callback)(const struct ps3_repository_device *repo));
-int ps3_repository_find_bus(enum ps3_bus_type bus_type, unsigned int from,
+int __init ps3_repository_find_bus(enum ps3_bus_type bus_type, unsigned int from,
unsigned int *bus_index);
int ps3_repository_find_interrupt(const struct ps3_repository_device *repo,
enum ps3_interrupt_type intr_type, unsigned int *interrupt_id);
@@ -211,8 +211,8 @@ static inline int ps3_repository_delete_highmem_info(unsigned int region_index)
int ps3_repository_read_num_be(unsigned int *num_be);
int ps3_repository_read_be_node_id(unsigned int be_index, u64 *node_id);
int ps3_repository_read_be_id(u64 node_id, u64 *be_id);
-int ps3_repository_read_tb_freq(u64 node_id, u64 *tb_freq);
-int ps3_repository_read_be_tb_freq(unsigned int be_index, u64 *tb_freq);
+int __init ps3_repository_read_tb_freq(u64 node_id, u64 *tb_freq);
+int __init ps3_repository_read_be_tb_freq(unsigned int be_index, u64 *tb_freq);
/* repository performance monitor info */
@@ -247,7 +247,7 @@ int ps3_repository_read_spu_resource_id(unsigned int res_index,
/* repository vuart info */
-int ps3_repository_read_vuart_av_port(unsigned int *port);
-int ps3_repository_read_vuart_sysmgr_port(unsigned int *port);
+int __init ps3_repository_read_vuart_av_port(unsigned int *port);
+int __init ps3_repository_read_vuart_sysmgr_port(unsigned int *port);
#endif
diff --git a/arch/powerpc/platforms/ps3/repository.c b/arch/powerpc/platforms/ps3/repository.c
index 21712964e76f..205763061a2d 100644
--- a/arch/powerpc/platforms/ps3/repository.c
+++ b/arch/powerpc/platforms/ps3/repository.c
@@ -413,7 +413,7 @@ found_dev:
return 0;
}
-int ps3_repository_find_devices(enum ps3_bus_type bus_type,
+int __init ps3_repository_find_devices(enum ps3_bus_type bus_type,
int (*callback)(const struct ps3_repository_device *repo))
{
int result = 0;
@@ -455,7 +455,7 @@ int ps3_repository_find_devices(enum ps3_bus_type bus_type,
return result;
}
-int ps3_repository_find_bus(enum ps3_bus_type bus_type, unsigned int from,
+int __init ps3_repository_find_bus(enum ps3_bus_type bus_type, unsigned int from,
unsigned int *bus_index)
{
unsigned int i;
@@ -908,7 +908,7 @@ int ps3_repository_read_boot_dat_size(unsigned int *size)
return result;
}
-int ps3_repository_read_vuart_av_port(unsigned int *port)
+int __init ps3_repository_read_vuart_av_port(unsigned int *port)
{
int result;
u64 v1 = 0;
@@ -923,7 +923,7 @@ int ps3_repository_read_vuart_av_port(unsigned int *port)
return result;
}
-int ps3_repository_read_vuart_sysmgr_port(unsigned int *port)
+int __init ps3_repository_read_vuart_sysmgr_port(unsigned int *port)
{
int result;
u64 v1 = 0;
@@ -1005,7 +1005,7 @@ int ps3_repository_read_be_id(u64 node_id, u64 *be_id)
be_id, NULL);
}
-int ps3_repository_read_tb_freq(u64 node_id, u64 *tb_freq)
+int __init ps3_repository_read_tb_freq(u64 node_id, u64 *tb_freq)
{
return read_node(PS3_LPAR_ID_PME,
make_first_field("be", 0),
@@ -1015,7 +1015,7 @@ int ps3_repository_read_tb_freq(u64 node_id, u64 *tb_freq)
tb_freq, NULL);
}
-int ps3_repository_read_be_tb_freq(unsigned int be_index, u64 *tb_freq)
+int __init ps3_repository_read_be_tb_freq(unsigned int be_index, u64 *tb_freq)
{
int result;
u64 node_id;
@@ -1178,7 +1178,7 @@ int ps3_repository_delete_highmem_info(unsigned int region_index)
#if defined(DEBUG)
-int ps3_repository_dump_resource_info(const struct ps3_repository_device *repo)
+int __init ps3_repository_dump_resource_info(const struct ps3_repository_device *repo)
{
int result = 0;
unsigned int res_index;
@@ -1231,7 +1231,7 @@ int ps3_repository_dump_resource_info(const struct ps3_repository_device *repo)
return result;
}
-static int dump_stor_dev_info(struct ps3_repository_device *repo)
+static int __init dump_stor_dev_info(struct ps3_repository_device *repo)
{
int result = 0;
unsigned int num_regions, region_index;
@@ -1279,7 +1279,7 @@ out:
return result;
}
-static int dump_device_info(struct ps3_repository_device *repo,
+static int __init dump_device_info(struct ps3_repository_device *repo,
unsigned int num_dev)
{
int result = 0;
@@ -1323,7 +1323,7 @@ static int dump_device_info(struct ps3_repository_device *repo,
return result;
}
-int ps3_repository_dump_bus_info(void)
+int __init ps3_repository_dump_bus_info(void)
{
int result = 0;
struct ps3_repository_device repo;
diff --git a/arch/powerpc/platforms/ps3/smp.c b/arch/powerpc/platforms/ps3/smp.c
index 93b1e73b3529..85295756005a 100644
--- a/arch/powerpc/platforms/ps3/smp.c
+++ b/arch/powerpc/platforms/ps3/smp.c
@@ -112,7 +112,7 @@ static struct smp_ops_t ps3_smp_ops = {
.kick_cpu = smp_generic_kick_cpu,
};
-void smp_init_ps3(void)
+void __init smp_init_ps3(void)
{
DBG(" -> %s\n", __func__);
smp_ops = &ps3_smp_ops;
diff --git a/arch/powerpc/platforms/ps3/spu.c b/arch/powerpc/platforms/ps3/spu.c
index 0c252478e556..4a2520ec6d7f 100644
--- a/arch/powerpc/platforms/ps3/spu.c
+++ b/arch/powerpc/platforms/ps3/spu.c
@@ -137,7 +137,7 @@ u64 ps3_get_spe_id(void *arg)
}
EXPORT_SYMBOL_GPL(ps3_get_spe_id);
-static unsigned long get_vas_id(void)
+static unsigned long __init get_vas_id(void)
{
u64 id;
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
index 2e57391e0778..f7fd91d153a4 100644
--- a/arch/powerpc/platforms/pseries/Kconfig
+++ b/arch/powerpc/platforms/pseries/Kconfig
@@ -17,7 +17,6 @@ config PPC_PSERIES
select PPC_RTAS_DAEMON
select RTAS_ERROR_LOGGING
select PPC_UDBG_16550
- select PPC_NATIVE
select PPC_DOORBELL
select HOTPLUG_CPU
select ARCH_RANDOM
@@ -61,10 +60,6 @@ config PSERIES_ENERGY
Provides: /sys/devices/system/cpu/pseries_(de)activation_hint_list
and /sys/devices/system/cpu/cpuN/pseries_(de)activation_hint
-config SCANLOG
- tristate "Scanlog dump interface"
- depends on RTAS_PROC && PPC_PSERIES
-
config IO_EVENT_IRQ
bool "IO Event Interrupt support"
depends on PPC_PSERIES
diff --git a/arch/powerpc/platforms/pseries/Makefile b/arch/powerpc/platforms/pseries/Makefile
index 41d8aee98da4..ee60b59024b4 100644
--- a/arch/powerpc/platforms/pseries/Makefile
+++ b/arch/powerpc/platforms/pseries/Makefile
@@ -8,7 +8,6 @@ obj-y := lpar.o hvCall.o nvram.o reconfig.o \
firmware.o power.o dlpar.o mobility.o rng.o \
pci.o pci_dlpar.o eeh_pseries.o msi.o
obj-$(CONFIG_SMP) += smp.o
-obj-$(CONFIG_SCANLOG) += scanlog.o
obj-$(CONFIG_KEXEC_CORE) += kexec.o
obj-$(CONFIG_PSERIES_ENERGY) += pseries_energy.o
diff --git a/arch/powerpc/platforms/pseries/event_sources.c b/arch/powerpc/platforms/pseries/event_sources.c
index be661e919c76..623dfe0d8e1c 100644
--- a/arch/powerpc/platforms/pseries/event_sources.c
+++ b/arch/powerpc/platforms/pseries/event_sources.c
@@ -8,7 +8,7 @@
#include "pseries.h"
-void request_event_sources_irqs(struct device_node *np,
+void __init request_event_sources_irqs(struct device_node *np,
irq_handler_t handler,
const char *name)
{
diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c
index 5ab44600c8d3..b81fc846d99c 100644
--- a/arch/powerpc/platforms/pseries/hotplug-cpu.c
+++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
@@ -864,12 +864,13 @@ static int __init pseries_cpu_hotplug_init(void)
/* Processors can be added/removed only on LPAR */
if (firmware_has_feature(FW_FEATURE_LPAR)) {
for_each_node(node) {
- alloc_bootmem_cpumask_var(&node_recorded_ids_map[node]);
+ if (!alloc_cpumask_var_node(&node_recorded_ids_map[node],
+ GFP_KERNEL, node))
+ return -ENOMEM;
/* Record ids of CPU added at boot time */
- cpumask_or(node_recorded_ids_map[node],
- node_recorded_ids_map[node],
- cpumask_of_node(node));
+ cpumask_copy(node_recorded_ids_map[node],
+ cpumask_of_node(node));
}
of_reconfig_notifier_register(&pseries_smp_nb);
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index 8f998e55735b..4d991cf840d9 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -1654,7 +1654,7 @@ static struct notifier_block iommu_reconfig_nb = {
};
/* These are called very early. */
-void iommu_init_early_pSeries(void)
+void __init iommu_init_early_pSeries(void)
{
if (of_chosen && of_get_property(of_chosen, "linux,iommu-off", NULL))
return;
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index 3df6bdfea475..f8899d506ea4 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -58,6 +58,7 @@ EXPORT_SYMBOL(plpar_hcall);
EXPORT_SYMBOL(plpar_hcall9);
EXPORT_SYMBOL(plpar_hcall_norets);
+#ifdef CONFIG_PPC_64S_HASH_MMU
/*
* H_BLOCK_REMOVE supported block size for this page size in segment who's base
* page size is that page size.
@@ -66,6 +67,7 @@ EXPORT_SYMBOL(plpar_hcall_norets);
* page size.
*/
static int hblkrm_size[MMU_PAGE_COUNT][MMU_PAGE_COUNT] __ro_after_init;
+#endif
/*
* Due to the involved complexity, and that the current hypervisor is only
@@ -689,7 +691,7 @@ void vpa_init(int cpu)
return;
}
-#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
/*
* PAPR says this feature is SLB-Buffer but firmware never
* reports that. All SPLPAR support SLB shadow buffer.
@@ -702,7 +704,7 @@ void vpa_init(int cpu)
"cpu %d (hw %d) of area %lx failed with %ld\n",
cpu, hwcpu, addr, ret);
}
-#endif /* CONFIG_PPC_BOOK3S_64 */
+#endif /* CONFIG_PPC_64S_HASH_MMU */
/*
* Register dispatch trace log, if one has been allocated.
@@ -712,6 +714,36 @@ void vpa_init(int cpu)
#ifdef CONFIG_PPC_BOOK3S_64
+static int __init pseries_lpar_register_process_table(unsigned long base,
+ unsigned long page_size, unsigned long table_size)
+{
+ long rc;
+ unsigned long flags = 0;
+
+ if (table_size)
+ flags |= PROC_TABLE_NEW;
+ if (radix_enabled()) {
+ flags |= PROC_TABLE_RADIX;
+ if (mmu_has_feature(MMU_FTR_GTSE))
+ flags |= PROC_TABLE_GTSE;
+ } else
+ flags |= PROC_TABLE_HPT_SLB;
+ for (;;) {
+ rc = plpar_hcall_norets(H_REGISTER_PROC_TBL, flags, base,
+ page_size, table_size);
+ if (!H_IS_LONG_BUSY(rc))
+ break;
+ mdelay(get_longbusy_msecs(rc));
+ }
+ if (rc != H_SUCCESS) {
+ pr_err("Failed to register process table (rc=%ld)\n", rc);
+ BUG();
+ }
+ return rc;
+}
+
+#ifdef CONFIG_PPC_64S_HASH_MMU
+
static long pSeries_lpar_hpte_insert(unsigned long hpte_group,
unsigned long vpn, unsigned long pa,
unsigned long rflags, unsigned long vflags,
@@ -1680,34 +1712,6 @@ static int pseries_lpar_resize_hpt(unsigned long shift)
return 0;
}
-static int pseries_lpar_register_process_table(unsigned long base,
- unsigned long page_size, unsigned long table_size)
-{
- long rc;
- unsigned long flags = 0;
-
- if (table_size)
- flags |= PROC_TABLE_NEW;
- if (radix_enabled()) {
- flags |= PROC_TABLE_RADIX;
- if (mmu_has_feature(MMU_FTR_GTSE))
- flags |= PROC_TABLE_GTSE;
- } else
- flags |= PROC_TABLE_HPT_SLB;
- for (;;) {
- rc = plpar_hcall_norets(H_REGISTER_PROC_TBL, flags, base,
- page_size, table_size);
- if (!H_IS_LONG_BUSY(rc))
- break;
- mdelay(get_longbusy_msecs(rc));
- }
- if (rc != H_SUCCESS) {
- pr_err("Failed to register process table (rc=%ld)\n", rc);
- BUG();
- }
- return rc;
-}
-
void __init hpte_init_pseries(void)
{
mmu_hash_ops.hpte_invalidate = pSeries_lpar_hpte_invalidate;
@@ -1730,9 +1734,10 @@ void __init hpte_init_pseries(void)
if (cpu_has_feature(CPU_FTR_ARCH_300))
pseries_lpar_register_process_table(0, 0, 0);
}
+#endif /* CONFIG_PPC_64S_HASH_MMU */
#ifdef CONFIG_PPC_RADIX_MMU
-void radix_init_pseries(void)
+void __init radix_init_pseries(void)
{
pr_info("Using radix MMU under hypervisor\n");
@@ -1932,7 +1937,8 @@ int h_get_mpp_x(struct hvcall_mpp_x_data *mpp_x_data)
return rc;
}
-static unsigned long vsid_unscramble(unsigned long vsid, int ssize)
+#ifdef CONFIG_PPC_64S_HASH_MMU
+static unsigned long __init vsid_unscramble(unsigned long vsid, int ssize)
{
unsigned long protovsid;
unsigned long va_bits = VA_BITS;
@@ -1992,6 +1998,7 @@ static int __init reserve_vrma_context_id(void)
return 0;
}
machine_device_initcall(pseries, reserve_vrma_context_id);
+#endif
#ifdef CONFIG_DEBUG_FS
/* debugfs file interface for vpa data */
diff --git a/arch/powerpc/platforms/pseries/lparcfg.c b/arch/powerpc/platforms/pseries/lparcfg.c
index f71eac74ea92..c7940fcfc911 100644
--- a/arch/powerpc/platforms/pseries/lparcfg.c
+++ b/arch/powerpc/platforms/pseries/lparcfg.c
@@ -531,8 +531,9 @@ static int pseries_lparcfg_data(struct seq_file *m, void *v)
seq_printf(m, "shared_processor_mode=%d\n",
lppaca_shared_proc(get_lppaca()));
-#ifdef CONFIG_PPC_BOOK3S_64
- seq_printf(m, "slb_size=%d\n", mmu_slb_size);
+#ifdef CONFIG_PPC_64S_HASH_MMU
+ if (!radix_enabled())
+ seq_printf(m, "slb_size=%d\n", mmu_slb_size);
#endif
parse_em_data(m);
maxmem_data(m);
diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c
index 210a37a065fb..85033f392c78 100644
--- a/arch/powerpc/platforms/pseries/mobility.c
+++ b/arch/powerpc/platforms/pseries/mobility.c
@@ -451,11 +451,15 @@ static void prod_others(void)
static u16 clamp_slb_size(void)
{
+#ifdef CONFIG_PPC_64S_HASH_MMU
u16 prev = mmu_slb_size;
slb_set_size(SLB_MIN_SIZE);
return prev;
+#else
+ return 0;
+#endif
}
static int do_suspend(void)
diff --git a/arch/powerpc/platforms/pseries/msi.c b/arch/powerpc/platforms/pseries/msi.c
index 8627362f613e..fb2919fd6bc0 100644
--- a/arch/powerpc/platforms/pseries/msi.c
+++ b/arch/powerpc/platforms/pseries/msi.c
@@ -321,27 +321,6 @@ out:
return request;
}
-static int check_msix_entries(struct pci_dev *pdev)
-{
- struct msi_desc *entry;
- int expected;
-
- /* There's no way for us to express to firmware that we want
- * a discontiguous, or non-zero based, range of MSI-X entries.
- * So we must reject such requests. */
-
- expected = 0;
- for_each_pci_msi_entry(entry, pdev) {
- if (entry->msi_attrib.entry_nr != expected) {
- pr_debug("rtas_msi: bad MSI-X entries.\n");
- return -EINVAL;
- }
- expected++;
- }
-
- return 0;
-}
-
static void rtas_hack_32bit_msi_gen2(struct pci_dev *pdev)
{
u32 addr_hi, addr_lo;
@@ -380,9 +359,6 @@ static int rtas_prepare_msi_irqs(struct pci_dev *pdev, int nvec_in, int type,
if (quota && quota < nvec)
return quota;
- if (type == PCI_CAP_ID_MSIX && check_msix_entries(pdev))
- return -EINVAL;
-
/*
* Firmware currently refuse any non power of two allocation
* so we round up if the quota will allow it.
@@ -448,8 +424,7 @@ static int pseries_msi_ops_prepare(struct irq_domain *domain, struct device *dev
int nvec, msi_alloc_info_t *arg)
{
struct pci_dev *pdev = to_pci_dev(dev);
- struct msi_desc *desc = first_pci_msi_entry(pdev);
- int type = desc->msi_attrib.is_msix ? PCI_CAP_ID_MSIX : PCI_CAP_ID_MSI;
+ int type = pdev->msix_enabled ? PCI_CAP_ID_MSIX : PCI_CAP_ID_MSI;
return rtas_prepare_msi_irqs(pdev, nvec, type, arg);
}
@@ -530,9 +505,16 @@ static struct irq_chip pseries_pci_msi_irq_chip = {
.irq_write_msi_msg = pseries_msi_write_msg,
};
+
+/*
+ * Set MSI_FLAG_MSIX_CONTIGUOUS as there is no way to express to
+ * firmware to request a discontiguous or non-zero based range of
+ * MSI-X entries. Core code will reject such setup attempts.
+ */
static struct msi_domain_info pseries_msi_domain_info = {
.flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX),
+ MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX |
+ MSI_FLAG_MSIX_CONTIGUOUS),
.ops = &pseries_pci_msi_domain_ops,
.chip = &pseries_pci_msi_irq_chip,
};
@@ -580,7 +562,7 @@ static int pseries_irq_domain_alloc(struct irq_domain *domain, unsigned int virq
int hwirq;
int i, ret;
- hwirq = rtas_query_irq_number(pci_get_pdn(pdev), desc->msi_attrib.entry_nr);
+ hwirq = rtas_query_irq_number(pci_get_pdn(pdev), desc->msi_index);
if (hwirq < 0) {
dev_err(&pdev->dev, "Failed to query HW IRQ: %d\n", hwirq);
return hwirq;
diff --git a/arch/powerpc/platforms/pseries/pseries.h b/arch/powerpc/platforms/pseries/pseries.h
index 3544778e06d0..56c9ef9052e9 100644
--- a/arch/powerpc/platforms/pseries/pseries.h
+++ b/arch/powerpc/platforms/pseries/pseries.h
@@ -11,7 +11,7 @@
struct device_node;
-extern void request_event_sources_irqs(struct device_node *np,
+void __init request_event_sources_irqs(struct device_node *np,
irq_handler_t handler, const char *name);
#include <linux/of.h>
@@ -113,6 +113,11 @@ int dlpar_workqueue_init(void);
extern u32 pseries_security_flavor;
void pseries_setup_security_mitigations(void);
+
+#ifdef CONFIG_PPC_64S_HASH_MMU
void pseries_lpar_read_hblkrm_characteristics(void);
+#else
+static inline void pseries_lpar_read_hblkrm_characteristics(void) { }
+#endif
#endif /* _PSERIES_PSERIES_H */
diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
index 56092dccfdb8..74c9b1b5bc66 100644
--- a/arch/powerpc/platforms/pseries/ras.c
+++ b/arch/powerpc/platforms/pseries/ras.c
@@ -526,6 +526,7 @@ static int mce_handle_err_realmode(int disposition, u8 error_type)
disposition = RTAS_DISP_FULLY_RECOVERED;
break;
case MC_ERROR_TYPE_SLB:
+#ifdef CONFIG_PPC_64S_HASH_MMU
/*
* Store the old slb content in paca before flushing.
* Print this when we go to virtual mode.
@@ -538,6 +539,7 @@ static int mce_handle_err_realmode(int disposition, u8 error_type)
slb_save_contents(local_paca->mce_faulty_slbs);
flush_and_reload_slb();
disposition = RTAS_DISP_FULLY_RECOVERED;
+#endif
break;
default:
break;
diff --git a/arch/powerpc/platforms/pseries/rtas-fadump.c b/arch/powerpc/platforms/pseries/rtas-fadump.c
index f8f73b47b107..35f9cb602c30 100644
--- a/arch/powerpc/platforms/pseries/rtas-fadump.c
+++ b/arch/powerpc/platforms/pseries/rtas-fadump.c
@@ -39,7 +39,7 @@ static void rtas_fadump_update_config(struct fw_dump *fadump_conf,
* This function is called in the capture kernel to get configuration details
* setup in the first kernel and passed to the f/w.
*/
-static void rtas_fadump_get_config(struct fw_dump *fadump_conf,
+static void __init rtas_fadump_get_config(struct fw_dump *fadump_conf,
const struct rtas_fadump_mem_struct *fdm)
{
fadump_conf->boot_mem_addr[0] =
@@ -247,7 +247,7 @@ static inline int rtas_fadump_gpr_index(u64 id)
return i;
}
-static void rtas_fadump_set_regval(struct pt_regs *regs, u64 reg_id, u64 reg_val)
+static void __init rtas_fadump_set_regval(struct pt_regs *regs, u64 reg_id, u64 reg_val)
{
int i;
@@ -272,7 +272,7 @@ static void rtas_fadump_set_regval(struct pt_regs *regs, u64 reg_id, u64 reg_val
regs->dsisr = (unsigned long)reg_val;
}
-static struct rtas_fadump_reg_entry*
+static struct rtas_fadump_reg_entry* __init
rtas_fadump_read_regs(struct rtas_fadump_reg_entry *reg_entry,
struct pt_regs *regs)
{
diff --git a/arch/powerpc/platforms/pseries/scanlog.c b/arch/powerpc/platforms/pseries/scanlog.c
deleted file mode 100644
index 2879c4f0ceb7..000000000000
--- a/arch/powerpc/platforms/pseries/scanlog.c
+++ /dev/null
@@ -1,195 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * c 2001 PPC 64 Team, IBM Corp
- *
- * scan-log-data driver for PPC64 Todd Inglett <tinglett@vnet.ibm.com>
- *
- * When ppc64 hardware fails the service processor dumps internal state
- * of the system. After a reboot the operating system can access a dump
- * of this data using this driver. A dump exists if the device-tree
- * /chosen/ibm,scan-log-data property exists.
- *
- * This driver exports /proc/powerpc/scan-log-dump which can be read.
- * The driver supports only sequential reads.
- *
- * The driver looks at a write to the driver for the single word "reset".
- * If given, the driver will reset the scanlog so the platform can free it.
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/errno.h>
-#include <linux/proc_fs.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include <linux/uaccess.h>
-#include <asm/rtas.h>
-#include <asm/prom.h>
-
-#define MODULE_VERS "1.0"
-#define MODULE_NAME "scanlog"
-
-/* Status returns from ibm,scan-log-dump */
-#define SCANLOG_COMPLETE 0
-#define SCANLOG_HWERROR -1
-#define SCANLOG_CONTINUE 1
-
-
-static unsigned int ibm_scan_log_dump; /* RTAS token */
-static unsigned int *scanlog_buffer; /* The data buffer */
-
-static ssize_t scanlog_read(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- unsigned int *data = scanlog_buffer;
- int status;
- unsigned long len, off;
- unsigned int wait_time;
-
- if (count > RTAS_DATA_BUF_SIZE)
- count = RTAS_DATA_BUF_SIZE;
-
- if (count < 1024) {
- /* This is the min supported by this RTAS call. Rather
- * than do all the buffering we insist the user code handle
- * larger reads. As long as cp works... :)
- */
- printk(KERN_ERR "scanlog: cannot perform a small read (%ld)\n", count);
- return -EINVAL;
- }
-
- if (!access_ok(buf, count))
- return -EFAULT;
-
- for (;;) {
- wait_time = 500; /* default wait if no data */
- spin_lock(&rtas_data_buf_lock);
- memcpy(rtas_data_buf, data, RTAS_DATA_BUF_SIZE);
- status = rtas_call(ibm_scan_log_dump, 2, 1, NULL,
- (u32) __pa(rtas_data_buf), (u32) count);
- memcpy(data, rtas_data_buf, RTAS_DATA_BUF_SIZE);
- spin_unlock(&rtas_data_buf_lock);
-
- pr_debug("scanlog: status=%d, data[0]=%x, data[1]=%x, " \
- "data[2]=%x\n", status, data[0], data[1], data[2]);
- switch (status) {
- case SCANLOG_COMPLETE:
- pr_debug("scanlog: hit eof\n");
- return 0;
- case SCANLOG_HWERROR:
- pr_debug("scanlog: hardware error reading data\n");
- return -EIO;
- case SCANLOG_CONTINUE:
- /* We may or may not have data yet */
- len = data[1];
- off = data[2];
- if (len > 0) {
- if (copy_to_user(buf, ((char *)data)+off, len))
- return -EFAULT;
- return len;
- }
- /* Break to sleep default time */
- break;
- default:
- /* Assume extended busy */
- wait_time = rtas_busy_delay_time(status);
- if (!wait_time) {
- printk(KERN_ERR "scanlog: unknown error " \
- "from rtas: %d\n", status);
- return -EIO;
- }
- }
- /* Apparently no data yet. Wait and try again. */
- msleep_interruptible(wait_time);
- }
- /*NOTREACHED*/
-}
-
-static ssize_t scanlog_write(struct file * file, const char __user * buf,
- size_t count, loff_t *ppos)
-{
- char stkbuf[20];
- int status;
-
- if (count > 19) count = 19;
- if (copy_from_user (stkbuf, buf, count)) {
- return -EFAULT;
- }
- stkbuf[count] = 0;
-
- if (buf) {
- if (strncmp(stkbuf, "reset", 5) == 0) {
- pr_debug("scanlog: reset scanlog\n");
- status = rtas_call(ibm_scan_log_dump, 2, 1, NULL, 0, 0);
- pr_debug("scanlog: rtas returns %d\n", status);
- }
- }
- return count;
-}
-
-static int scanlog_open(struct inode * inode, struct file * file)
-{
- unsigned int *data = scanlog_buffer;
-
- if (data[0] != 0) {
- /* This imperfect test stops a second copy of the
- * data (or a reset while data is being copied)
- */
- return -EBUSY;
- }
-
- data[0] = 0; /* re-init so we restart the scan */
-
- return 0;
-}
-
-static int scanlog_release(struct inode * inode, struct file * file)
-{
- unsigned int *data = scanlog_buffer;
-
- data[0] = 0;
- return 0;
-}
-
-static const struct proc_ops scanlog_proc_ops = {
- .proc_read = scanlog_read,
- .proc_write = scanlog_write,
- .proc_open = scanlog_open,
- .proc_release = scanlog_release,
- .proc_lseek = noop_llseek,
-};
-
-static int __init scanlog_init(void)
-{
- struct proc_dir_entry *ent;
- int err = -ENOMEM;
-
- ibm_scan_log_dump = rtas_token("ibm,scan-log-dump");
- if (ibm_scan_log_dump == RTAS_UNKNOWN_SERVICE)
- return -ENODEV;
-
- /* Ideally we could allocate a buffer < 4G */
- scanlog_buffer = kzalloc(RTAS_DATA_BUF_SIZE, GFP_KERNEL);
- if (!scanlog_buffer)
- goto err;
-
- ent = proc_create("powerpc/rtas/scan-log-dump", 0400, NULL,
- &scanlog_proc_ops);
- if (!ent)
- goto err;
- return 0;
-err:
- kfree(scanlog_buffer);
- return err;
-}
-
-static void __exit scanlog_cleanup(void)
-{
- remove_proc_entry("powerpc/rtas/scan-log-dump", NULL);
- kfree(scanlog_buffer);
-}
-
-module_init(scanlog_init);
-module_exit(scanlog_cleanup);
-MODULE_LICENSE("GPL");
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index 8a62af5b9c24..83a04d967a59 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -112,7 +112,7 @@ static void __init fwnmi_init(void)
u8 *mce_data_buf;
unsigned int i;
int nr_cpus = num_possible_cpus();
-#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
struct slb_entry *slb_ptr;
size_t size;
#endif
@@ -152,7 +152,7 @@ static void __init fwnmi_init(void)
(RTAS_ERROR_LOG_MAX * i);
}
-#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
if (!radix_enabled()) {
/* Allocate per cpu area to save old slb contents during MCE */
size = sizeof(struct slb_entry) * mmu_slb_size * nr_cpus;
@@ -447,7 +447,7 @@ void pseries_big_endian_exceptions(void)
panic("Could not enable big endian exceptions");
}
-void pseries_little_endian_exceptions(void)
+void __init pseries_little_endian_exceptions(void)
{
long rc;
@@ -801,7 +801,9 @@ static void __init pSeries_setup_arch(void)
fwnmi_init();
pseries_setup_security_mitigations();
+#ifdef CONFIG_PPC_64S_HASH_MMU
pseries_lpar_read_hblkrm_characteristics();
+#endif
/* By default, only probe PCI (can be overridden by rtas_pci) */
pci_add_flags(PCI_PROBE_ONLY);
@@ -905,7 +907,7 @@ void pSeries_coalesce_init(void)
* fw_cmo_feature_init - FW_FEATURE_CMO is not stored in ibm,hypertas-functions,
* handle that here. (Stolen from parse_system_parameter_string)
*/
-static void pSeries_cmo_feature_init(void)
+static void __init pSeries_cmo_feature_init(void)
{
char *ptr, *key, *value, *end;
int call_status;
diff --git a/arch/powerpc/platforms/pseries/vas.c b/arch/powerpc/platforms/pseries/vas.c
index b043e3936d21..d243ddc58827 100644
--- a/arch/powerpc/platforms/pseries/vas.c
+++ b/arch/powerpc/platforms/pseries/vas.c
@@ -151,8 +151,15 @@ int h_query_vas_capabilities(const u64 hcall, u8 query_type, u64 result)
if (rc == H_SUCCESS)
return 0;
- pr_err("HCALL(%llx) error %ld, query_type %u, result buffer 0x%llx\n",
- hcall, rc, query_type, result);
+ /* H_FUNCTION means HV does not support VAS so don't print an error */
+ if (rc != H_FUNCTION) {
+ pr_err("%s error %ld, query_type %u, result buffer 0x%llx\n",
+ (hcall == H_QUERY_VAS_CAPABILITIES) ?
+ "H_QUERY_VAS_CAPABILITIES" :
+ "H_QUERY_NX_CAPABILITIES",
+ rc, query_type, result);
+ }
+
return -EIO;
}
EXPORT_SYMBOL_GPL(h_query_vas_capabilities);
@@ -482,7 +489,7 @@ EXPORT_SYMBOL_GPL(vas_unregister_api_pseries);
* Get the specific capabilities based on the feature type.
* Right now supports GZIP default and GZIP QoS capabilities.
*/
-static int get_vas_capabilities(u8 feat, enum vas_cop_feat_type type,
+static int __init get_vas_capabilities(u8 feat, enum vas_cop_feat_type type,
struct hv_vas_cop_feat_caps *hv_caps)
{
struct vas_cop_feat_caps *caps;
diff --git a/arch/powerpc/platforms/pseries/vio.c b/arch/powerpc/platforms/pseries/vio.c
index feafcb582e1b..c9f9be4ea26a 100644
--- a/arch/powerpc/platforms/pseries/vio.c
+++ b/arch/powerpc/platforms/pseries/vio.c
@@ -1061,7 +1061,7 @@ static struct attribute *vio_bus_attrs[] = {
};
ATTRIBUTE_GROUPS(vio_bus);
-static void vio_cmo_sysfs_init(void)
+static void __init vio_cmo_sysfs_init(void)
{
vio_bus_type.dev_groups = vio_cmo_dev_groups;
vio_bus_type.bus_groups = vio_bus_groups;
@@ -1073,7 +1073,7 @@ static int vio_cmo_bus_probe(struct vio_dev *viodev) { return 0; }
static void vio_cmo_bus_remove(struct vio_dev *viodev) {}
static void vio_cmo_set_dma_ops(struct vio_dev *viodev) {}
static void vio_cmo_bus_init(void) {}
-static void vio_cmo_sysfs_init(void) { }
+static void __init vio_cmo_sysfs_init(void) { }
#endif /* CONFIG_PPC_SMLPAR */
EXPORT_SYMBOL(vio_cmo_entitlement_update);
EXPORT_SYMBOL(vio_cmo_set_dev_desired);
@@ -1479,7 +1479,7 @@ EXPORT_SYMBOL(vio_register_device_node);
* Starting from the root node provide, register the device node for
* each child beneath the root.
*/
-static void vio_bus_scan_register_devices(char *root_name)
+static void __init vio_bus_scan_register_devices(char *root_name)
{
struct device_node *node_root, *node_child;
diff --git a/arch/powerpc/sysdev/Kconfig b/arch/powerpc/sysdev/Kconfig
index 9ebcc1337560..5aa92ff3622d 100644
--- a/arch/powerpc/sysdev/Kconfig
+++ b/arch/powerpc/sysdev/Kconfig
@@ -12,17 +12,11 @@ config PPC4xx_HSTA_MSI
depends on PCI_MSI
depends on PCI && 4xx
-config PPC4xx_MSI
- bool
- depends on PCI_MSI
- depends on PCI && 4xx
-
config PPC_MSI_BITMAP
bool
depends on PCI_MSI
default y if MPIC
default y if FSL_PCI
- default y if PPC4xx_MSI
default y if PPC_POWERNV
source "arch/powerpc/sysdev/xics/Kconfig"
diff --git a/arch/powerpc/sysdev/cpm2.c b/arch/powerpc/sysdev/cpm2.c
index 68538b8329f7..3f130312b6e9 100644
--- a/arch/powerpc/sysdev/cpm2.c
+++ b/arch/powerpc/sysdev/cpm2.c
@@ -135,7 +135,7 @@ void __cpm2_setbrg(uint brg, uint rate, uint clk, int div16, int src)
}
EXPORT_SYMBOL(__cpm2_setbrg);
-int cpm2_clk_setup(enum cpm_clk_target target, int clock, int mode)
+int __init cpm2_clk_setup(enum cpm_clk_target target, int clock, int mode)
{
int ret = 0;
int shift;
@@ -265,7 +265,7 @@ int cpm2_clk_setup(enum cpm_clk_target target, int clock, int mode)
return ret;
}
-int cpm2_smc_clk_setup(enum cpm_clk_target target, int clock)
+int __init cpm2_smc_clk_setup(enum cpm_clk_target target, int clock)
{
int ret = 0;
int shift;
@@ -326,7 +326,7 @@ struct cpm2_ioports {
u32 res[3];
};
-void cpm2_set_pin(int port, int pin, int flags)
+void __init cpm2_set_pin(int port, int pin, int flags)
{
struct cpm2_ioports __iomem *iop =
(struct cpm2_ioports __iomem *)&cpm2_immr->im_ioport;
diff --git a/arch/powerpc/sysdev/dart_iommu.c b/arch/powerpc/sysdev/dart_iommu.c
index 1d33b7a5ea83..be6b99b1b352 100644
--- a/arch/powerpc/sysdev/dart_iommu.c
+++ b/arch/powerpc/sysdev/dart_iommu.c
@@ -226,7 +226,7 @@ static void dart_free(struct iommu_table *tbl, long index, long npages)
dart_cache_sync(orig_dp, orig_npages);
}
-static void allocate_dart(void)
+static void __init allocate_dart(void)
{
unsigned long tmp;
diff --git a/arch/powerpc/sysdev/fsl_mpic_err.c b/arch/powerpc/sysdev/fsl_mpic_err.c
index 9a98bb212922..df06bb6b838f 100644
--- a/arch/powerpc/sysdev/fsl_mpic_err.c
+++ b/arch/powerpc/sysdev/fsl_mpic_err.c
@@ -58,7 +58,7 @@ static struct irq_chip fsl_mpic_err_chip = {
.irq_unmask = fsl_mpic_unmask_err,
};
-int mpic_setup_error_int(struct mpic *mpic, int intvec)
+int __init mpic_setup_error_int(struct mpic *mpic, int intvec)
{
int i;
@@ -121,7 +121,7 @@ static irqreturn_t fsl_error_int_handler(int irq, void *data)
return IRQ_HANDLED;
}
-void mpic_err_int_init(struct mpic *mpic, irq_hw_number_t irqnum)
+void __init mpic_err_int_init(struct mpic *mpic, irq_hw_number_t irqnum)
{
unsigned int virq;
int ret;
diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c
index e6b06c3f8197..b3475ae9f236 100644
--- a/arch/powerpc/sysdev/fsl_msi.c
+++ b/arch/powerpc/sysdev/fsl_msi.c
@@ -125,17 +125,13 @@ static void fsl_teardown_msi_irqs(struct pci_dev *pdev)
struct fsl_msi *msi_data;
irq_hw_number_t hwirq;
- for_each_pci_msi_entry(entry, pdev) {
- if (!entry->irq)
- continue;
+ msi_for_each_desc(entry, &pdev->dev, MSI_DESC_ASSOCIATED) {
hwirq = virq_to_hw(entry->irq);
msi_data = irq_get_chip_data(entry->irq);
irq_set_msi_desc(entry->irq, NULL);
irq_dispose_mapping(entry->irq);
msi_bitmap_free_hwirqs(&msi_data->bitmap, hwirq, 1);
}
-
- return;
}
static void fsl_compose_msi_msg(struct pci_dev *pdev, int hwirq,
@@ -215,7 +211,7 @@ static int fsl_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
}
}
- for_each_pci_msi_entry(entry, pdev) {
+ msi_for_each_desc(entry, &pdev->dev, MSI_DESC_NOTASSOCIATED) {
/*
* Loop over all the MSI devices until we find one that has an
* available interrupt.
diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c
index b8f76f3fd994..674f047b7820 100644
--- a/arch/powerpc/sysdev/fsl_pci.c
+++ b/arch/powerpc/sysdev/fsl_pci.c
@@ -1106,7 +1106,7 @@ static const struct of_device_id pci_ids[] = {
struct device_node *fsl_pci_primary;
-void fsl_pci_assign_primary(void)
+void __init fsl_pci_assign_primary(void)
{
struct device_node *np;
diff --git a/arch/powerpc/sysdev/fsl_pci.h b/arch/powerpc/sysdev/fsl_pci.h
index 1d7a41205695..cdbde2e0c96e 100644
--- a/arch/powerpc/sysdev/fsl_pci.h
+++ b/arch/powerpc/sysdev/fsl_pci.h
@@ -120,7 +120,7 @@ u64 fsl_pci_immrbar_base(struct pci_controller *hose);
extern struct device_node *fsl_pci_primary;
#ifdef CONFIG_PCI
-void fsl_pci_assign_primary(void);
+void __init fsl_pci_assign_primary(void);
#else
static inline void fsl_pci_assign_primary(void) {}
#endif
diff --git a/arch/powerpc/sysdev/i8259.c b/arch/powerpc/sysdev/i8259.c
index dc1a151c63d7..3b1ae98e3ce9 100644
--- a/arch/powerpc/sysdev/i8259.c
+++ b/arch/powerpc/sysdev/i8259.c
@@ -208,7 +208,7 @@ static const struct irq_domain_ops i8259_host_ops = {
.xlate = i8259_host_xlate,
};
-struct irq_domain *i8259_get_host(void)
+struct irq_domain *__init i8259_get_host(void)
{
return i8259_host;
}
diff --git a/arch/powerpc/sysdev/ipic.c b/arch/powerpc/sysdev/ipic.c
index 7638a50a7c38..3f10c9fc3b68 100644
--- a/arch/powerpc/sysdev/ipic.c
+++ b/arch/powerpc/sysdev/ipic.c
@@ -767,7 +767,7 @@ struct ipic * __init ipic_init(struct device_node *node, unsigned int flags)
return ipic;
}
-void ipic_set_default_priority(void)
+void __init ipic_set_default_priority(void)
{
ipic_write(primary_ipic->regs, IPIC_SIPRR_A, IPIC_PRIORITY_DEFAULT);
ipic_write(primary_ipic->regs, IPIC_SIPRR_B, IPIC_PRIORITY_DEFAULT);
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index 995fb2ada507..d5cb48b61bbd 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -1323,8 +1323,7 @@ struct mpic * __init mpic_alloc(struct device_node *node,
psrc = of_get_property(mpic->node, "protected-sources", &psize);
if (psrc) {
/* Allocate a bitmap with one bit per interrupt */
- unsigned int mapsize = BITS_TO_LONGS(intvec_top + 1);
- mpic->protected = kcalloc(mapsize, sizeof(long), GFP_KERNEL);
+ mpic->protected = bitmap_zalloc(intvec_top + 1, GFP_KERNEL);
BUG_ON(mpic->protected == NULL);
for (i = 0; i < psize/sizeof(u32); i++) {
if (psrc[i] > intvec_top)
@@ -1840,7 +1839,7 @@ unsigned int mpic_get_mcirq(void)
}
#ifdef CONFIG_SMP
-void mpic_request_ipis(void)
+void __init mpic_request_ipis(void)
{
struct mpic *mpic = mpic_primary;
int i;
diff --git a/arch/powerpc/sysdev/mpic.h b/arch/powerpc/sysdev/mpic.h
index 73a31a429d46..bb460ff57a06 100644
--- a/arch/powerpc/sysdev/mpic.h
+++ b/arch/powerpc/sysdev/mpic.h
@@ -8,8 +8,8 @@
#ifdef CONFIG_PCI_MSI
extern void mpic_msi_reserve_hwirq(struct mpic *mpic, irq_hw_number_t hwirq);
-extern int mpic_msi_init_allocator(struct mpic *mpic);
-extern int mpic_u3msi_init(struct mpic *mpic);
+int __init mpic_msi_init_allocator(struct mpic *mpic);
+int __init mpic_u3msi_init(struct mpic *mpic);
#else
static inline void mpic_msi_reserve_hwirq(struct mpic *mpic,
irq_hw_number_t hwirq)
@@ -24,7 +24,7 @@ static inline int mpic_u3msi_init(struct mpic *mpic)
#endif
#if defined(CONFIG_PCI_MSI) && defined(CONFIG_PPC_PASEMI)
-int mpic_pasemi_msi_init(struct mpic *mpic);
+int __init mpic_pasemi_msi_init(struct mpic *mpic);
#else
static inline int mpic_pasemi_msi_init(struct mpic *mpic) { return -1; }
#endif
@@ -37,8 +37,8 @@ extern void mpic_reset_core(int cpu);
#ifdef CONFIG_FSL_SOC
extern int mpic_map_error_int(struct mpic *mpic, unsigned int virq, irq_hw_number_t hw);
-extern void mpic_err_int_init(struct mpic *mpic, irq_hw_number_t irqnum);
-extern int mpic_setup_error_int(struct mpic *mpic, int intvec);
+void __init mpic_err_int_init(struct mpic *mpic, irq_hw_number_t irqnum);
+int __init mpic_setup_error_int(struct mpic *mpic, int intvec);
#else
static inline int mpic_map_error_int(struct mpic *mpic, unsigned int virq, irq_hw_number_t hw)
{
diff --git a/arch/powerpc/sysdev/mpic_msi.c b/arch/powerpc/sysdev/mpic_msi.c
index 4695c04320ae..f412d6ad0b66 100644
--- a/arch/powerpc/sysdev/mpic_msi.c
+++ b/arch/powerpc/sysdev/mpic_msi.c
@@ -24,7 +24,7 @@ void mpic_msi_reserve_hwirq(struct mpic *mpic, irq_hw_number_t hwirq)
}
#ifdef CONFIG_MPIC_U3_HT_IRQS
-static int mpic_msi_reserve_u3_hwirqs(struct mpic *mpic)
+static int __init mpic_msi_reserve_u3_hwirqs(struct mpic *mpic)
{
irq_hw_number_t hwirq;
const struct irq_domain_ops *ops = mpic->irqhost->ops;
@@ -68,13 +68,13 @@ static int mpic_msi_reserve_u3_hwirqs(struct mpic *mpic)
return 0;
}
#else
-static int mpic_msi_reserve_u3_hwirqs(struct mpic *mpic)
+static int __init mpic_msi_reserve_u3_hwirqs(struct mpic *mpic)
{
return -1;
}
#endif
-int mpic_msi_init_allocator(struct mpic *mpic)
+int __init mpic_msi_init_allocator(struct mpic *mpic)
{
int rc;
diff --git a/arch/powerpc/sysdev/mpic_timer.c b/arch/powerpc/sysdev/mpic_timer.c
index a42a20280035..444e9ce42d0a 100644
--- a/arch/powerpc/sysdev/mpic_timer.c
+++ b/arch/powerpc/sysdev/mpic_timer.c
@@ -384,7 +384,7 @@ struct mpic_timer *mpic_request_timer(irq_handler_t fn, void *dev,
}
EXPORT_SYMBOL(mpic_request_timer);
-static int timer_group_get_freq(struct device_node *np,
+static int __init timer_group_get_freq(struct device_node *np,
struct timer_group_priv *priv)
{
u32 div;
@@ -411,7 +411,7 @@ static int timer_group_get_freq(struct device_node *np,
return 0;
}
-static int timer_group_get_irq(struct device_node *np,
+static int __init timer_group_get_irq(struct device_node *np,
struct timer_group_priv *priv)
{
const u32 all_timer[] = { 0, TIMERS_PER_GROUP };
@@ -459,7 +459,7 @@ static int timer_group_get_irq(struct device_node *np,
return 0;
}
-static void timer_group_init(struct device_node *np)
+static void __init timer_group_init(struct device_node *np)
{
struct timer_group_priv *priv;
unsigned int i = 0;
diff --git a/arch/powerpc/sysdev/mpic_u3msi.c b/arch/powerpc/sysdev/mpic_u3msi.c
index 3861023d378a..3f4841dfefb5 100644
--- a/arch/powerpc/sysdev/mpic_u3msi.c
+++ b/arch/powerpc/sysdev/mpic_u3msi.c
@@ -104,17 +104,12 @@ static void u3msi_teardown_msi_irqs(struct pci_dev *pdev)
struct msi_desc *entry;
irq_hw_number_t hwirq;
- for_each_pci_msi_entry(entry, pdev) {
- if (!entry->irq)
- continue;
-
+ msi_for_each_desc(entry, &pdev->dev, MSI_DESC_ASSOCIATED) {
hwirq = virq_to_hw(entry->irq);
irq_set_msi_desc(entry->irq, NULL);
irq_dispose_mapping(entry->irq);
msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap, hwirq, 1);
}
-
- return;
}
static int u3msi_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
@@ -136,7 +131,7 @@ static int u3msi_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
return -ENXIO;
}
- for_each_pci_msi_entry(entry, pdev) {
+ msi_for_each_desc(entry, &pdev->dev, MSI_DESC_NOTASSOCIATED) {
hwirq = msi_bitmap_alloc_hwirqs(&msi_mpic->msi_bitmap, 1);
if (hwirq < 0) {
pr_debug("u3msi: failed allocating hwirq\n");
@@ -174,7 +169,7 @@ static int u3msi_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
return 0;
}
-int mpic_u3msi_init(struct mpic *mpic)
+int __init mpic_u3msi_init(struct mpic *mpic)
{
int rc;
struct pci_controller *phb;
diff --git a/arch/powerpc/sysdev/tsi108_dev.c b/arch/powerpc/sysdev/tsi108_dev.c
index 4c4a6efd5e5f..9e13fb35ed5c 100644
--- a/arch/powerpc/sysdev/tsi108_dev.c
+++ b/arch/powerpc/sysdev/tsi108_dev.c
@@ -51,13 +51,12 @@ phys_addr_t get_csrbase(void)
}
return tsi108_csr_base;
}
+EXPORT_SYMBOL(get_csrbase);
u32 get_vir_csrbase(void)
{
return (u32) (ioremap(get_csrbase(), 0x10000));
}
-
-EXPORT_SYMBOL(get_csrbase);
EXPORT_SYMBOL(get_vir_csrbase);
static int __init tsi108_eth_of_init(void)
diff --git a/arch/powerpc/sysdev/tsi108_pci.c b/arch/powerpc/sysdev/tsi108_pci.c
index 042bb38fa5c2..1070220f15d5 100644
--- a/arch/powerpc/sysdev/tsi108_pci.c
+++ b/arch/powerpc/sysdev/tsi108_pci.c
@@ -257,7 +257,7 @@ static void tsi108_pci_int_unmask(u_int irq)
mb();
}
-static void init_pci_source(void)
+static void __init init_pci_source(void)
{
tsi108_write_reg(TSI108_PCI_OFFSET + TSI108_PCI_IRP_CFG_CTL,
0x0000ff00);
diff --git a/arch/powerpc/sysdev/udbg_memcons.c b/arch/powerpc/sysdev/udbg_memcons.c
index d38bbeed219b..5020044400dc 100644
--- a/arch/powerpc/sysdev/udbg_memcons.c
+++ b/arch/powerpc/sysdev/udbg_memcons.c
@@ -92,7 +92,7 @@ int memcons_getc(void)
return c;
}
-void udbg_init_memcons(void)
+void __init udbg_init_memcons(void)
{
udbg_putc = memcons_putc;
udbg_getc = memcons_getc;
diff --git a/arch/powerpc/sysdev/xics/icp-hv.c b/arch/powerpc/sysdev/xics/icp-hv.c
index 6765d9e264a3..cf8db19a4f7d 100644
--- a/arch/powerpc/sysdev/xics/icp-hv.c
+++ b/arch/powerpc/sysdev/xics/icp-hv.c
@@ -162,7 +162,7 @@ static const struct icp_ops icp_hv_ops = {
#endif
};
-int icp_hv_init(void)
+int __init icp_hv_init(void)
{
struct device_node *np;
diff --git a/arch/powerpc/sysdev/xics/icp-opal.c b/arch/powerpc/sysdev/xics/icp-opal.c
index 675d708863d5..bda4c32582d9 100644
--- a/arch/powerpc/sysdev/xics/icp-opal.c
+++ b/arch/powerpc/sysdev/xics/icp-opal.c
@@ -184,7 +184,7 @@ static const struct icp_ops icp_opal_ops = {
#endif
};
-int icp_opal_init(void)
+int __init icp_opal_init(void)
{
struct device_node *np;
diff --git a/arch/powerpc/sysdev/xics/xics-common.c b/arch/powerpc/sysdev/xics/xics-common.c
index 244a727c6ba4..f3fb2a12124c 100644
--- a/arch/powerpc/sysdev/xics/xics-common.c
+++ b/arch/powerpc/sysdev/xics/xics-common.c
@@ -121,7 +121,7 @@ void xics_mask_unknown_vec(unsigned int vec)
#ifdef CONFIG_SMP
-static void xics_request_ipi(void)
+static void __init xics_request_ipi(void)
{
unsigned int ipi;
diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c
index 7b69299c2912..1ca5564bda9d 100644
--- a/arch/powerpc/sysdev/xive/common.c
+++ b/arch/powerpc/sysdev/xive/common.c
@@ -85,6 +85,16 @@ static DEFINE_PER_CPU(struct xive_cpu *, xive_cpu);
#define XIVE_INVALID_TARGET (-1)
/*
+ * Global toggle to switch on/off StoreEOI
+ */
+static bool xive_store_eoi = true;
+
+static bool xive_is_store_eoi(struct xive_irq_data *xd)
+{
+ return xd->flags & XIVE_IRQ_FLAG_STORE_EOI && xive_store_eoi;
+}
+
+/*
* Read the next entry in a queue, return its content if it's valid
* or 0 if there is no new entry.
*
@@ -208,7 +218,7 @@ static notrace u8 xive_esb_read(struct xive_irq_data *xd, u32 offset)
{
u64 val;
- if (offset == XIVE_ESB_SET_PQ_10 && xd->flags & XIVE_IRQ_FLAG_STORE_EOI)
+ if (offset == XIVE_ESB_SET_PQ_10 && xive_is_store_eoi(xd))
offset |= XIVE_ESB_LD_ST_MO;
if ((xd->flags & XIVE_IRQ_FLAG_H_INT_ESB) && xive_ops->esb_rw)
@@ -227,6 +237,21 @@ static void xive_esb_write(struct xive_irq_data *xd, u32 offset, u64 data)
out_be64(xd->eoi_mmio + offset, data);
}
+#if defined(CONFIG_XMON) || defined(CONFIG_DEBUG_FS)
+static void xive_irq_data_dump(struct xive_irq_data *xd, char *buffer, size_t size)
+{
+ u64 val = xive_esb_read(xd, XIVE_ESB_GET);
+
+ snprintf(buffer, size, "flags=%c%c%c PQ=%c%c 0x%016llx 0x%016llx",
+ xive_is_store_eoi(xd) ? 'S' : ' ',
+ xd->flags & XIVE_IRQ_FLAG_LSI ? 'L' : ' ',
+ xd->flags & XIVE_IRQ_FLAG_H_INT_ESB ? 'H' : ' ',
+ val & XIVE_ESB_VAL_P ? 'P' : '-',
+ val & XIVE_ESB_VAL_Q ? 'Q' : '-',
+ xd->trig_page, xd->eoi_page);
+}
+#endif
+
#ifdef CONFIG_XMON
static notrace void xive_dump_eq(const char *name, struct xive_q *q)
{
@@ -252,11 +277,10 @@ notrace void xmon_xive_do_dump(int cpu)
#ifdef CONFIG_SMP
{
- u64 val = xive_esb_read(&xc->ipi_data, XIVE_ESB_GET);
+ char buffer[128];
- xmon_printf("IPI=0x%08x PQ=%c%c ", xc->hw_ipi,
- val & XIVE_ESB_VAL_P ? 'P' : '-',
- val & XIVE_ESB_VAL_Q ? 'Q' : '-');
+ xive_irq_data_dump(&xc->ipi_data, buffer, sizeof(buffer));
+ xmon_printf("IPI=0x%08x %s", xc->hw_ipi, buffer);
}
#endif
xive_dump_eq("EQ", &xc->queue[xive_irq_priority]);
@@ -291,15 +315,11 @@ int xmon_xive_get_irq_config(u32 hw_irq, struct irq_data *d)
d = xive_get_irq_data(hw_irq);
if (d) {
- struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
- u64 val = xive_esb_read(xd, XIVE_ESB_GET);
-
- xmon_printf("flags=%c%c%c PQ=%c%c",
- xd->flags & XIVE_IRQ_FLAG_STORE_EOI ? 'S' : ' ',
- xd->flags & XIVE_IRQ_FLAG_LSI ? 'L' : ' ',
- xd->flags & XIVE_IRQ_FLAG_H_INT_ESB ? 'H' : ' ',
- val & XIVE_ESB_VAL_P ? 'P' : '-',
- val & XIVE_ESB_VAL_Q ? 'Q' : '-');
+ char buffer[128];
+
+ xive_irq_data_dump(irq_data_get_irq_handler_data(d),
+ buffer, sizeof(buffer));
+ xmon_printf("%s", buffer);
}
xmon_printf("\n");
@@ -385,7 +405,7 @@ static void xive_do_source_eoi(struct xive_irq_data *xd)
xd->stale_p = false;
/* If the XIVE supports the new "store EOI facility, use it */
- if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI) {
+ if (xive_is_store_eoi(xd)) {
xive_esb_write(xd, XIVE_ESB_STORE_EOI, 0);
return;
}
@@ -451,6 +471,8 @@ static void xive_do_source_set_mask(struct xive_irq_data *xd,
{
u64 val;
+ pr_debug("%s: HW 0x%x %smask\n", __func__, xd->hw_irq, mask ? "" : "un");
+
/*
* If the interrupt had P set, it may be in a queue.
*
@@ -612,8 +634,8 @@ static unsigned int xive_irq_startup(struct irq_data *d)
xd->saved_p = false;
xd->stale_p = false;
- pr_devel("xive_irq_startup: irq %d [0x%x] data @%p\n",
- d->irq, hw_irq, d);
+
+ pr_debug("%s: irq %d [0x%x] data @%p\n", __func__, d->irq, hw_irq, d);
/* Pick a target */
target = xive_pick_irq_target(d, irq_data_get_affinity_mask(d));
@@ -654,8 +676,7 @@ static void xive_irq_shutdown(struct irq_data *d)
struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
- pr_devel("xive_irq_shutdown: irq %d [0x%x] data @%p\n",
- d->irq, hw_irq, d);
+ pr_debug("%s: irq %d [0x%x] data @%p\n", __func__, d->irq, hw_irq, d);
if (WARN_ON(xd->target == XIVE_INVALID_TARGET))
return;
@@ -679,7 +700,7 @@ static void xive_irq_unmask(struct irq_data *d)
{
struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
- pr_devel("xive_irq_unmask: irq %d data @%p\n", d->irq, xd);
+ pr_debug("%s: irq %d data @%p\n", __func__, d->irq, xd);
xive_do_source_set_mask(xd, false);
}
@@ -688,7 +709,7 @@ static void xive_irq_mask(struct irq_data *d)
{
struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
- pr_devel("xive_irq_mask: irq %d data @%p\n", d->irq, xd);
+ pr_debug("%s: irq %d data @%p\n", __func__, d->irq, xd);
xive_do_source_set_mask(xd, true);
}
@@ -702,7 +723,7 @@ static int xive_irq_set_affinity(struct irq_data *d,
u32 target, old_target;
int rc = 0;
- pr_debug("%s: irq %d/%x\n", __func__, d->irq, hw_irq);
+ pr_debug("%s: irq %d/0x%x\n", __func__, d->irq, hw_irq);
/* Is this valid ? */
if (cpumask_any_and(cpumask, cpu_online_mask) >= nr_cpu_ids)
@@ -975,7 +996,7 @@ EXPORT_SYMBOL_GPL(is_xive_irq);
void xive_cleanup_irq_data(struct xive_irq_data *xd)
{
- pr_debug("%s for HW %x\n", __func__, xd->hw_irq);
+ pr_debug("%s for HW 0x%x\n", __func__, xd->hw_irq);
if (xd->eoi_mmio) {
iounmap(xd->eoi_mmio);
@@ -1211,8 +1232,8 @@ static int xive_setup_cpu_ipi(unsigned int cpu)
pr_err("Failed to map IPI CPU %d\n", cpu);
return -EIO;
}
- pr_devel("CPU %d HW IPI %x, virq %d, trig_mmio=%p\n", cpu,
- xc->hw_ipi, xive_ipi_irq, xc->ipi_data.trig_mmio);
+ pr_debug("CPU %d HW IPI 0x%x, virq %d, trig_mmio=%p\n", cpu,
+ xc->hw_ipi, xive_ipi_irq, xc->ipi_data.trig_mmio);
/* Unmask it */
xive_do_source_set_mask(&xc->ipi_data, false);
@@ -1390,7 +1411,7 @@ static int xive_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
if (rc)
return rc;
- pr_debug("%s %d/%lx #%d\n", __func__, virq, hwirq, nr_irqs);
+ pr_debug("%s %d/0x%lx #%d\n", __func__, virq, hwirq, nr_irqs);
for (i = 0; i < nr_irqs; i++) {
/* TODO: call xive_irq_domain_map() */
@@ -1504,7 +1525,7 @@ static void xive_setup_cpu(void)
#ifdef CONFIG_SMP
void xive_smp_setup_cpu(void)
{
- pr_devel("SMP setup CPU %d\n", smp_processor_id());
+ pr_debug("SMP setup CPU %d\n", smp_processor_id());
/* This will have already been done on the boot CPU */
if (smp_processor_id() != boot_cpuid)
@@ -1650,10 +1671,10 @@ bool __init xive_core_init(struct device_node *np, const struct xive_ops *ops,
ppc_md.get_irq = xive_get_irq;
__xive_enabled = true;
- pr_devel("Initializing host..\n");
+ pr_debug("Initializing host..\n");
xive_init_host(np);
- pr_devel("Initializing boot CPU..\n");
+ pr_debug("Initializing boot CPU..\n");
/* Allocate per-CPU data and queues */
xive_prepare_cpu(smp_processor_id());
@@ -1691,36 +1712,36 @@ static int __init xive_off(char *arg)
}
__setup("xive=off", xive_off);
-static void xive_debug_show_cpu(struct seq_file *m, int cpu)
+static int __init xive_store_eoi_cmdline(char *arg)
+{
+ if (!arg)
+ return -EINVAL;
+
+ if (strncmp(arg, "off", 3) == 0) {
+ pr_info("StoreEOI disabled on kernel command line\n");
+ xive_store_eoi = false;
+ }
+ return 0;
+}
+__setup("xive.store-eoi=", xive_store_eoi_cmdline);
+
+#ifdef CONFIG_DEBUG_FS
+static void xive_debug_show_ipi(struct seq_file *m, int cpu)
{
struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
- seq_printf(m, "CPU %d:", cpu);
+ seq_printf(m, "CPU %d: ", cpu);
if (xc) {
seq_printf(m, "pp=%02x CPPR=%02x ", xc->pending_prio, xc->cppr);
#ifdef CONFIG_SMP
{
- u64 val = xive_esb_read(&xc->ipi_data, XIVE_ESB_GET);
+ char buffer[128];
- seq_printf(m, "IPI=0x%08x PQ=%c%c ", xc->hw_ipi,
- val & XIVE_ESB_VAL_P ? 'P' : '-',
- val & XIVE_ESB_VAL_Q ? 'Q' : '-');
+ xive_irq_data_dump(&xc->ipi_data, buffer, sizeof(buffer));
+ seq_printf(m, "IPI=0x%08x %s", xc->hw_ipi, buffer);
}
#endif
- {
- struct xive_q *q = &xc->queue[xive_irq_priority];
- u32 i0, i1, idx;
-
- if (q->qpage) {
- idx = q->idx;
- i0 = be32_to_cpup(q->qpage + idx);
- idx = (idx + 1) & q->msk;
- i1 = be32_to_cpup(q->qpage + idx);
- seq_printf(m, "EQ idx=%d T=%d %08x %08x ...",
- q->idx, q->toggle, i0, i1);
- }
- }
}
seq_puts(m, "\n");
}
@@ -1732,8 +1753,7 @@ static void xive_debug_show_irq(struct seq_file *m, struct irq_data *d)
u32 target;
u8 prio;
u32 lirq;
- struct xive_irq_data *xd;
- u64 val;
+ char buffer[128];
rc = xive_ops->get_irq_config(hw_irq, &target, &prio, &lirq);
if (rc) {
@@ -1744,43 +1764,101 @@ static void xive_debug_show_irq(struct seq_file *m, struct irq_data *d)
seq_printf(m, "IRQ 0x%08x : target=0x%x prio=%02x lirq=0x%x ",
hw_irq, target, prio, lirq);
- xd = irq_data_get_irq_handler_data(d);
- val = xive_esb_read(xd, XIVE_ESB_GET);
- seq_printf(m, "flags=%c%c%c PQ=%c%c",
- xd->flags & XIVE_IRQ_FLAG_STORE_EOI ? 'S' : ' ',
- xd->flags & XIVE_IRQ_FLAG_LSI ? 'L' : ' ',
- xd->flags & XIVE_IRQ_FLAG_H_INT_ESB ? 'H' : ' ',
- val & XIVE_ESB_VAL_P ? 'P' : '-',
- val & XIVE_ESB_VAL_Q ? 'Q' : '-');
+ xive_irq_data_dump(irq_data_get_irq_handler_data(d), buffer, sizeof(buffer));
+ seq_puts(m, buffer);
seq_puts(m, "\n");
}
-static int xive_core_debug_show(struct seq_file *m, void *private)
+static int xive_irq_debug_show(struct seq_file *m, void *private)
{
unsigned int i;
struct irq_desc *desc;
+
+ for_each_irq_desc(i, desc) {
+ struct irq_data *d = irq_domain_get_irq_data(xive_irq_domain, i);
+
+ if (d)
+ xive_debug_show_irq(m, d);
+ }
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(xive_irq_debug);
+
+static int xive_ipi_debug_show(struct seq_file *m, void *private)
+{
int cpu;
if (xive_ops->debug_show)
xive_ops->debug_show(m, private);
for_each_possible_cpu(cpu)
- xive_debug_show_cpu(m, cpu);
+ xive_debug_show_ipi(m, cpu);
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(xive_ipi_debug);
- for_each_irq_desc(i, desc) {
- struct irq_data *d = irq_domain_get_irq_data(xive_irq_domain, i);
+static void xive_eq_debug_show_one(struct seq_file *m, struct xive_q *q, u8 prio)
+{
+ int i;
- if (d)
- xive_debug_show_irq(m, d);
+ seq_printf(m, "EQ%d idx=%d T=%d\n", prio, q->idx, q->toggle);
+ if (q->qpage) {
+ for (i = 0; i < q->msk + 1; i++) {
+ if (!(i % 8))
+ seq_printf(m, "%05d ", i);
+ seq_printf(m, "%08x%s", be32_to_cpup(q->qpage + i),
+ (i + 1) % 8 ? " " : "\n");
+ }
}
+ seq_puts(m, "\n");
+}
+
+static int xive_eq_debug_show(struct seq_file *m, void *private)
+{
+ int cpu = (long)m->private;
+ struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
+
+ if (xc)
+ xive_eq_debug_show_one(m, &xc->queue[xive_irq_priority],
+ xive_irq_priority);
return 0;
}
-DEFINE_SHOW_ATTRIBUTE(xive_core_debug);
+DEFINE_SHOW_ATTRIBUTE(xive_eq_debug);
+
+static void xive_core_debugfs_create(void)
+{
+ struct dentry *xive_dir;
+ struct dentry *xive_eq_dir;
+ long cpu;
+ char name[16];
+
+ xive_dir = debugfs_create_dir("xive", arch_debugfs_dir);
+ if (IS_ERR(xive_dir))
+ return;
+
+ debugfs_create_file("ipis", 0400, xive_dir,
+ NULL, &xive_ipi_debug_fops);
+ debugfs_create_file("interrupts", 0400, xive_dir,
+ NULL, &xive_irq_debug_fops);
+ xive_eq_dir = debugfs_create_dir("eqs", xive_dir);
+ for_each_possible_cpu(cpu) {
+ snprintf(name, sizeof(name), "cpu%ld", cpu);
+ debugfs_create_file(name, 0400, xive_eq_dir, (void *)cpu,
+ &xive_eq_debug_fops);
+ }
+ debugfs_create_bool("store-eoi", 0600, xive_dir, &xive_store_eoi);
+
+ if (xive_ops->debug_create)
+ xive_ops->debug_create(xive_dir);
+}
+#else
+static inline void xive_core_debugfs_create(void) { }
+#endif /* CONFIG_DEBUG_FS */
int xive_core_debug_init(void)
{
- if (xive_enabled())
- debugfs_create_file("xive", 0400, arch_debugfs_dir,
- NULL, &xive_core_debug_fops);
+ if (xive_enabled() && IS_ENABLED(CONFIG_DEBUG_FS))
+ xive_core_debugfs_create();
+
return 0;
}
diff --git a/arch/powerpc/sysdev/xive/native.c b/arch/powerpc/sysdev/xive/native.c
index 1aec282cd650..f940428ad13f 100644
--- a/arch/powerpc/sysdev/xive/native.c
+++ b/arch/powerpc/sysdev/xive/native.c
@@ -41,7 +41,7 @@ static u32 xive_queue_shift;
static u32 xive_pool_vps = XIVE_INVALID_VP;
static struct kmem_cache *xive_provision_cache;
static bool xive_has_single_esc;
-static bool xive_has_save_restore;
+bool xive_has_save_restore;
int xive_native_populate_irq_data(u32 hw_irq, struct xive_irq_data *data)
{
@@ -63,6 +63,8 @@ int xive_native_populate_irq_data(u32 hw_irq, struct xive_irq_data *data)
opal_flags = be64_to_cpu(flags);
if (opal_flags & OPAL_XIVE_IRQ_STORE_EOI)
data->flags |= XIVE_IRQ_FLAG_STORE_EOI;
+ if (opal_flags & OPAL_XIVE_IRQ_STORE_EOI2)
+ data->flags |= XIVE_IRQ_FLAG_STORE_EOI;
if (opal_flags & OPAL_XIVE_IRQ_LSI)
data->flags |= XIVE_IRQ_FLAG_LSI;
data->eoi_page = be64_to_cpu(eoi_page);
@@ -459,6 +461,14 @@ void xive_native_sync_queue(u32 hw_irq)
}
EXPORT_SYMBOL_GPL(xive_native_sync_queue);
+#ifdef CONFIG_DEBUG_FS
+static int xive_native_debug_create(struct dentry *xive_dir)
+{
+ debugfs_create_bool("save-restore", 0600, xive_dir, &xive_has_save_restore);
+ return 0;
+}
+#endif
+
static const struct xive_ops xive_native_ops = {
.populate_irq_data = xive_native_populate_irq_data,
.configure_irq = xive_native_configure_irq,
@@ -476,10 +486,13 @@ static const struct xive_ops xive_native_ops = {
.get_ipi = xive_native_get_ipi,
.put_ipi = xive_native_put_ipi,
#endif /* CONFIG_SMP */
+#ifdef CONFIG_DEBUG_FS
+ .debug_create = xive_native_debug_create,
+#endif /* CONFIG_DEBUG_FS */
.name = "native",
};
-static bool xive_parse_provisioning(struct device_node *np)
+static bool __init xive_parse_provisioning(struct device_node *np)
{
int rc;
@@ -519,7 +532,7 @@ static bool xive_parse_provisioning(struct device_node *np)
return true;
}
-static void xive_native_setup_pools(void)
+static void __init xive_native_setup_pools(void)
{
/* Allocate a pool big enough */
pr_debug("XIVE: Allocating VP block for pool size %u\n", nr_cpu_ids);
diff --git a/arch/powerpc/sysdev/xive/spapr.c b/arch/powerpc/sysdev/xive/spapr.c
index f143b6f111ac..928f95004501 100644
--- a/arch/powerpc/sysdev/xive/spapr.c
+++ b/arch/powerpc/sysdev/xive/spapr.c
@@ -44,7 +44,7 @@ struct xive_irq_bitmap {
static LIST_HEAD(xive_irq_bitmaps);
-static int xive_irq_bitmap_add(int base, int count)
+static int __init xive_irq_bitmap_add(int base, int count)
{
struct xive_irq_bitmap *xibm;
@@ -173,7 +173,7 @@ static long plpar_int_get_source_info(unsigned long flags,
} while (plpar_busy_delay(rc));
if (rc) {
- pr_err("H_INT_GET_SOURCE_INFO lisn=%ld failed %ld\n", lisn, rc);
+ pr_err("H_INT_GET_SOURCE_INFO lisn=0x%lx failed %ld\n", lisn, rc);
return rc;
}
@@ -182,8 +182,8 @@ static long plpar_int_get_source_info(unsigned long flags,
*trig_page = retbuf[2];
*esb_shift = retbuf[3];
- pr_devel("H_INT_GET_SOURCE_INFO flags=%lx eoi=%lx trig=%lx shift=%lx\n",
- retbuf[0], retbuf[1], retbuf[2], retbuf[3]);
+ pr_debug("H_INT_GET_SOURCE_INFO lisn=0x%lx flags=0x%lx eoi=0x%lx trig=0x%lx shift=0x%lx\n",
+ lisn, retbuf[0], retbuf[1], retbuf[2], retbuf[3]);
return 0;
}
@@ -200,8 +200,8 @@ static long plpar_int_set_source_config(unsigned long flags,
long rc;
- pr_devel("H_INT_SET_SOURCE_CONFIG flags=%lx lisn=%lx target=%lx prio=%lx sw_irq=%lx\n",
- flags, lisn, target, prio, sw_irq);
+ pr_debug("H_INT_SET_SOURCE_CONFIG flags=0x%lx lisn=0x%lx target=%ld prio=%ld sw_irq=%ld\n",
+ flags, lisn, target, prio, sw_irq);
do {
@@ -210,7 +210,7 @@ static long plpar_int_set_source_config(unsigned long flags,
} while (plpar_busy_delay(rc));
if (rc) {
- pr_err("H_INT_SET_SOURCE_CONFIG lisn=%ld target=%lx prio=%lx failed %ld\n",
+ pr_err("H_INT_SET_SOURCE_CONFIG lisn=0x%lx target=%ld prio=%ld failed %ld\n",
lisn, target, prio, rc);
return rc;
}
@@ -227,7 +227,7 @@ static long plpar_int_get_source_config(unsigned long flags,
unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
long rc;
- pr_devel("H_INT_GET_SOURCE_CONFIG flags=%lx lisn=%lx\n", flags, lisn);
+ pr_debug("H_INT_GET_SOURCE_CONFIG flags=0x%lx lisn=0x%lx\n", flags, lisn);
do {
rc = plpar_hcall(H_INT_GET_SOURCE_CONFIG, retbuf, flags, lisn,
@@ -235,7 +235,7 @@ static long plpar_int_get_source_config(unsigned long flags,
} while (plpar_busy_delay(rc));
if (rc) {
- pr_err("H_INT_GET_SOURCE_CONFIG lisn=%ld failed %ld\n",
+ pr_err("H_INT_GET_SOURCE_CONFIG lisn=0x%lx failed %ld\n",
lisn, rc);
return rc;
}
@@ -244,8 +244,8 @@ static long plpar_int_get_source_config(unsigned long flags,
*prio = retbuf[1];
*sw_irq = retbuf[2];
- pr_devel("H_INT_GET_SOURCE_CONFIG target=%lx prio=%lx sw_irq=%lx\n",
- retbuf[0], retbuf[1], retbuf[2]);
+ pr_debug("H_INT_GET_SOURCE_CONFIG target=%ld prio=%ld sw_irq=%ld\n",
+ retbuf[0], retbuf[1], retbuf[2]);
return 0;
}
@@ -273,8 +273,8 @@ static long plpar_int_get_queue_info(unsigned long flags,
*esn_page = retbuf[0];
*esn_size = retbuf[1];
- pr_devel("H_INT_GET_QUEUE_INFO page=%lx size=%lx\n",
- retbuf[0], retbuf[1]);
+ pr_debug("H_INT_GET_QUEUE_INFO cpu=%ld prio=%ld page=0x%lx size=0x%lx\n",
+ target, priority, retbuf[0], retbuf[1]);
return 0;
}
@@ -289,8 +289,8 @@ static long plpar_int_set_queue_config(unsigned long flags,
{
long rc;
- pr_devel("H_INT_SET_QUEUE_CONFIG flags=%lx target=%lx priority=%lx qpage=%lx qsize=%lx\n",
- flags, target, priority, qpage, qsize);
+ pr_debug("H_INT_SET_QUEUE_CONFIG flags=0x%lx target=%ld priority=0x%lx qpage=0x%lx qsize=0x%lx\n",
+ flags, target, priority, qpage, qsize);
do {
rc = plpar_hcall_norets(H_INT_SET_QUEUE_CONFIG, flags, target,
@@ -298,7 +298,7 @@ static long plpar_int_set_queue_config(unsigned long flags,
} while (plpar_busy_delay(rc));
if (rc) {
- pr_err("H_INT_SET_QUEUE_CONFIG cpu=%ld prio=%ld qpage=%lx returned %ld\n",
+ pr_err("H_INT_SET_QUEUE_CONFIG cpu=%ld prio=%ld qpage=0x%lx returned %ld\n",
target, priority, qpage, rc);
return rc;
}
@@ -315,7 +315,7 @@ static long plpar_int_sync(unsigned long flags, unsigned long lisn)
} while (plpar_busy_delay(rc));
if (rc) {
- pr_err("H_INT_SYNC lisn=%ld returned %ld\n", lisn, rc);
+ pr_err("H_INT_SYNC lisn=0x%lx returned %ld\n", lisn, rc);
return rc;
}
@@ -333,8 +333,8 @@ static long plpar_int_esb(unsigned long flags,
unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
long rc;
- pr_devel("H_INT_ESB flags=%lx lisn=%lx offset=%lx in=%lx\n",
- flags, lisn, offset, in_data);
+ pr_debug("H_INT_ESB flags=0x%lx lisn=0x%lx offset=0x%lx in=0x%lx\n",
+ flags, lisn, offset, in_data);
do {
rc = plpar_hcall(H_INT_ESB, retbuf, flags, lisn, offset,
@@ -342,7 +342,7 @@ static long plpar_int_esb(unsigned long flags,
} while (plpar_busy_delay(rc));
if (rc) {
- pr_err("H_INT_ESB lisn=%ld offset=%ld returned %ld\n",
+ pr_err("H_INT_ESB lisn=0x%lx offset=0x%lx returned %ld\n",
lisn, offset, rc);
return rc;
}
@@ -653,6 +653,9 @@ static int xive_spapr_debug_show(struct seq_file *m, void *private)
struct xive_irq_bitmap *xibm;
char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
list_for_each_entry(xibm, &xive_irq_bitmaps, list) {
memset(buf, 0, PAGE_SIZE);
bitmap_print_to_pagebuf(true, buf, xibm->bitmap, xibm->count);
@@ -687,7 +690,7 @@ static const struct xive_ops xive_spapr_ops = {
/*
* get max priority from "/ibm,plat-res-int-priorities"
*/
-static bool xive_get_max_prio(u8 *max_prio)
+static bool __init xive_get_max_prio(u8 *max_prio)
{
struct device_node *rootdn;
const __be32 *reg;
@@ -741,7 +744,7 @@ static bool xive_get_max_prio(u8 *max_prio)
return true;
}
-static const u8 *get_vec5_feature(unsigned int index)
+static const u8 *__init get_vec5_feature(unsigned int index)
{
unsigned long root, chosen;
int size;
diff --git a/arch/powerpc/sysdev/xive/xive-internal.h b/arch/powerpc/sysdev/xive/xive-internal.h
index 504e7edce358..fe6d95d54af9 100644
--- a/arch/powerpc/sysdev/xive/xive-internal.h
+++ b/arch/powerpc/sysdev/xive/xive-internal.h
@@ -58,6 +58,7 @@ struct xive_ops {
void (*put_ipi)(unsigned int cpu, struct xive_cpu *xc);
#endif
int (*debug_show)(struct seq_file *m, void *private);
+ int (*debug_create)(struct dentry *xive_dir);
const char *name;
};
@@ -72,5 +73,6 @@ static inline u32 xive_alloc_order(u32 queue_shift)
}
extern bool xive_cmdline_disabled;
+extern bool xive_has_save_restore;
#endif /* __XIVE_INTERNAL_H */
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index 8b28ff9d98d1..fd72753e8ad5 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -125,7 +125,7 @@ static unsigned bpinstr = 0x7fe00008; /* trap */
static int cmds(struct pt_regs *);
static int mread(unsigned long, void *, int);
static int mwrite(unsigned long, void *, int);
-static int mread_instr(unsigned long, struct ppc_inst *);
+static int mread_instr(unsigned long, ppc_inst_t *);
static int handle_fault(struct pt_regs *);
static void byterev(unsigned char *, int);
static void memex(void);
@@ -908,7 +908,7 @@ static struct bpt *new_breakpoint(unsigned long a)
static void insert_bpts(void)
{
int i;
- struct ppc_inst instr, instr2;
+ ppc_inst_t instr, instr2;
struct bpt *bp, *bp2;
bp = bpts;
@@ -988,7 +988,7 @@ static void remove_bpts(void)
{
int i;
struct bpt *bp;
- struct ppc_inst instr;
+ ppc_inst_t instr;
bp = bpts;
for (i = 0; i < NBPTS; ++i, ++bp) {
@@ -1159,7 +1159,7 @@ cmds(struct pt_regs *excp)
case 'P':
show_tasks();
break;
-#ifdef CONFIG_PPC_BOOK3S
+#if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_PPC_64S_HASH_MMU)
case 'u':
dump_segments();
break;
@@ -1204,7 +1204,7 @@ static int do_step(struct pt_regs *regs)
*/
static int do_step(struct pt_regs *regs)
{
- struct ppc_inst instr;
+ ppc_inst_t instr;
int stepped;
force_enable_xmon();
@@ -1459,7 +1459,7 @@ csum(void)
*/
static long check_bp_loc(unsigned long addr)
{
- struct ppc_inst instr;
+ ppc_inst_t instr;
addr &= ~3;
if (!is_kernel_addr(addr)) {
@@ -2107,8 +2107,14 @@ static void dump_300_sprs(void)
if (!cpu_has_feature(CPU_FTR_ARCH_300))
return;
- printf("pidr = %.16lx tidr = %.16lx\n",
- mfspr(SPRN_PID), mfspr(SPRN_TIDR));
+ if (cpu_has_feature(CPU_FTR_P9_TIDR)) {
+ printf("pidr = %.16lx tidr = %.16lx\n",
+ mfspr(SPRN_PID), mfspr(SPRN_TIDR));
+ } else {
+ printf("pidr = %.16lx\n",
+ mfspr(SPRN_PID));
+ }
+
printf("psscr = %.16lx\n",
hv ? mfspr(SPRN_PSSCR) : mfspr(SPRN_PSSCR_PR));
@@ -2300,7 +2306,7 @@ mwrite(unsigned long adrs, void *buf, int size)
}
static int
-mread_instr(unsigned long adrs, struct ppc_inst *instr)
+mread_instr(unsigned long adrs, ppc_inst_t *instr)
{
volatile int n;
@@ -2608,7 +2614,7 @@ static void dump_tracing(void)
static void dump_one_paca(int cpu)
{
struct paca_struct *p;
-#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
int i = 0;
#endif
@@ -2650,6 +2656,7 @@ static void dump_one_paca(int cpu)
DUMP(p, cpu_start, "%#-*x");
DUMP(p, kexec_state, "%#-*x");
#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
if (!early_radix_enabled()) {
for (i = 0; i < SLB_NUM_BOLTED; i++) {
u64 esid, vsid;
@@ -2677,6 +2684,7 @@ static void dump_one_paca(int cpu)
22, "slb_cache", i, p->slb_cache[i]);
}
}
+#endif
DUMP(p, rfi_flush_fallback_area, "%-*px");
#endif
@@ -2809,12 +2817,12 @@ static void dump_all_xives(void)
{
int cpu;
- if (num_possible_cpus() == 0) {
+ if (num_online_cpus() == 0) {
printf("No possible cpus, use 'dx #' to dump individual cpus\n");
return;
}
- for_each_possible_cpu(cpu)
+ for_each_online_cpu(cpu)
dump_one_xive(cpu);
}
@@ -3020,7 +3028,7 @@ generic_inst_dump(unsigned long adr, long count, int praddr,
{
int nr, dotted;
unsigned long first_adr;
- struct ppc_inst inst, last_inst = ppc_inst(0);
+ ppc_inst_t inst, last_inst = ppc_inst(0);
dotted = 0;
for (first_adr = adr; count > 0; --count, adr += ppc_inst_len(inst)) {
@@ -3740,7 +3748,7 @@ static void xmon_print_symbol(unsigned long address, const char *mid,
printf("%s", after);
}
-#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
void dump_segments(void)
{
int i;
@@ -4128,7 +4136,7 @@ struct spu_info {
static struct spu_info spu_info[XMON_NUM_SPUS];
-void xmon_register_spus(struct list_head *list)
+void __init xmon_register_spus(struct list_head *list)
{
struct spu *spu;
diff --git a/arch/powerpc/xmon/xmon_bpts.h b/arch/powerpc/xmon/xmon_bpts.h
index 57e6fb03de48..377068f52edb 100644
--- a/arch/powerpc/xmon/xmon_bpts.h
+++ b/arch/powerpc/xmon/xmon_bpts.h
@@ -5,8 +5,8 @@
#define NBPTS 256
#ifndef __ASSEMBLY__
#include <asm/inst.h>
-#define BPT_SIZE (sizeof(struct ppc_inst) * 2)
-#define BPT_WORDS (BPT_SIZE / sizeof(struct ppc_inst))
+#define BPT_SIZE (sizeof(ppc_inst_t) * 2)
+#define BPT_WORDS (BPT_SIZE / sizeof(ppc_inst_t))
extern unsigned int bpt_table[NBPTS * BPT_WORDS];
#endif /* __ASSEMBLY__ */
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index 09abf62ae0ad..5adcbd9b5e88 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -14,6 +14,7 @@ config RISCV
def_bool y
select ARCH_CLOCKSOURCE_INIT
select ARCH_ENABLE_HUGEPAGE_MIGRATION if HUGETLB_PAGE && MIGRATION
+ select ARCH_ENABLE_SPLIT_PMD_PTLOCK if PGTABLE_LEVELS > 2
select ARCH_HAS_BINFMT_FLAT
select ARCH_HAS_DEBUG_VM_PGTABLE
select ARCH_HAS_DEBUG_VIRTUAL if MMU
@@ -75,6 +76,7 @@ config RISCV
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_TRACEHOOK
select HAVE_ARCH_TRANSPARENT_HUGEPAGE if 64BIT && MMU
+ select ARCH_ENABLE_THP_MIGRATION if TRANSPARENT_HUGEPAGE
select HAVE_ARCH_THREAD_STRUCT_WHITELIST
select HAVE_ARCH_VMAP_STACK if MMU && 64BIT
select HAVE_ASM_MODVERSIONS
@@ -145,27 +147,16 @@ config MMU
Select if you want MMU-based virtualised addressing space
support by paged memory management. If unsure, say 'Y'.
-config VA_BITS
- int
- default 32 if 32BIT
- default 39 if 64BIT
-
-config PA_BITS
- int
- default 34 if 32BIT
- default 56 if 64BIT
-
config PAGE_OFFSET
hex
- default 0xC0000000 if 32BIT && MAXPHYSMEM_1GB
+ default 0xC0000000 if 32BIT
default 0x80000000 if 64BIT && !MMU
- default 0xffffffff80000000 if 64BIT && MAXPHYSMEM_2GB
- default 0xffffffe000000000 if 64BIT && MAXPHYSMEM_128GB
+ default 0xffffaf8000000000 if 64BIT
config KASAN_SHADOW_OFFSET
hex
depends on KASAN_GENERIC
- default 0xdfffffc800000000 if 64BIT
+ default 0xdfffffff00000000 if 64BIT
default 0xffffffff if 32BIT
config ARCH_FLATMEM_ENABLE
@@ -211,7 +202,7 @@ config FIX_EARLYCON_MEM
config PGTABLE_LEVELS
int
- default 3 if 64BIT
+ default 4 if 64BIT
default 2
config LOCKDEP_SUPPORT
@@ -269,24 +260,6 @@ config MODULE_SECTIONS
bool
select HAVE_MOD_ARCH_SPECIFIC
-choice
- prompt "Maximum Physical Memory"
- default MAXPHYSMEM_1GB if 32BIT
- default MAXPHYSMEM_2GB if 64BIT && CMODEL_MEDLOW
- default MAXPHYSMEM_128GB if 64BIT && CMODEL_MEDANY
-
- config MAXPHYSMEM_1GB
- depends on 32BIT
- bool "1GiB"
- config MAXPHYSMEM_2GB
- depends on 64BIT && CMODEL_MEDLOW
- bool "2GiB"
- config MAXPHYSMEM_128GB
- depends on 64BIT && CMODEL_MEDANY
- bool "128GiB"
-endchoice
-
-
config SMP
bool "Symmetric Multi-Processing"
help
@@ -333,6 +306,8 @@ config NUMA
select GENERIC_ARCH_NUMA
select OF_NUMA
select ARCH_SUPPORTS_NUMA_BALANCING
+ select USE_PERCPU_NUMA_NODE_ID
+ select NEED_PER_CPU_EMBED_FIRST_CHUNK
help
Enable NUMA (Non-Uniform Memory Access) support.
@@ -348,14 +323,6 @@ config NODES_SHIFT
Specify the maximum number of NUMA Nodes available on the target
system. Increases memory reserved to accommodate various tables.
-config USE_PERCPU_NUMA_NODE_ID
- def_bool y
- depends on NUMA
-
-config NEED_PER_CPU_EMBED_FIRST_CHUNK
- def_bool y
- depends on NUMA
-
config RISCV_ISA_C
bool "Emit compressed instructions when building Linux"
default y
@@ -396,12 +363,25 @@ source "kernel/Kconfig.hz"
config RISCV_SBI_V01
bool "SBI v0.1 support"
- default y
depends on RISCV_SBI
help
This config allows kernel to use SBI v0.1 APIs. This will be
deprecated in future once legacy M-mode software are no longer in use.
+config RISCV_BOOT_SPINWAIT
+ bool "Spinwait booting method"
+ depends on SMP
+ default y
+ help
+ This enables support for booting Linux via spinwait method. In the
+ spinwait method, all cores randomly jump to Linux. One of the cores
+ gets chosen via lottery and all other keep spinning on a percpu
+ variable. This method cannot support CPU hotplug and sparse hartid
+ scheme. It should be only enabled for M-mode Linux or platforms relying
+ on older firmware without SBI HSM extension. All other platforms should
+ rely on ordered booting via SBI HSM extension which gets chosen
+ dynamically at runtime if the firmware supports it.
+
config KEXEC
bool "Kexec system call"
select KEXEC_CORE
diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile
index 8a107ed18b0d..7d81102cffd4 100644
--- a/arch/riscv/Makefile
+++ b/arch/riscv/Makefile
@@ -50,6 +50,12 @@ riscv-march-$(CONFIG_ARCH_RV32I) := rv32ima
riscv-march-$(CONFIG_ARCH_RV64I) := rv64ima
riscv-march-$(CONFIG_FPU) := $(riscv-march-y)fd
riscv-march-$(CONFIG_RISCV_ISA_C) := $(riscv-march-y)c
+
+# Newer binutils versions default to ISA spec version 20191213 which moves some
+# instructions from the I extension to the Zicsr and Zifencei extensions.
+toolchain-need-zicsr-zifencei := $(call cc-option-yn, -march=$(riscv-march-y)_zicsr_zifencei)
+riscv-march-$(toolchain-need-zicsr-zifencei) := $(riscv-march-y)_zicsr_zifencei
+
KBUILD_CFLAGS += -march=$(subst fd,,$(riscv-march-y))
KBUILD_AFLAGS += -march=$(riscv-march-y)
diff --git a/arch/riscv/boot/dts/canaan/Makefile b/arch/riscv/boot/dts/canaan/Makefile
index 9ee7156c0c31..c61b08ac8554 100644
--- a/arch/riscv/boot/dts/canaan/Makefile
+++ b/arch/riscv/boot/dts/canaan/Makefile
@@ -1,5 +1,3 @@
# SPDX-License-Identifier: GPL-2.0
-ifneq ($(CONFIG_SOC_CANAAN_K210_DTB_SOURCE),"")
-dtb-y += $(strip $(shell echo $(CONFIG_SOC_CANAAN_K210_DTB_SOURCE))).dtb
+dtb-$(CONFIG_SOC_CANAAN_K210_DTB_BUILTIN) += $(addsuffix .dtb, $(CONFIG_SOC_CANAAN_K210_DTB_SOURCE))
obj-$(CONFIG_SOC_CANAAN_K210_DTB_BUILTIN) += $(addsuffix .o, $(dtb-y))
-endif
diff --git a/arch/riscv/boot/dts/canaan/k210.dtsi b/arch/riscv/boot/dts/canaan/k210.dtsi
index 5e8ca8142482..56f57118c633 100644
--- a/arch/riscv/boot/dts/canaan/k210.dtsi
+++ b/arch/riscv/boot/dts/canaan/k210.dtsi
@@ -103,8 +103,8 @@
clint0: timer@2000000 {
compatible = "canaan,k210-clint", "sifive,clint0";
reg = <0x2000000 0xC000>;
- interrupts-extended = <&cpu0_intc 3 &cpu0_intc 7
- &cpu1_intc 3 &cpu1_intc 7>;
+ interrupts-extended = <&cpu0_intc 3>, <&cpu0_intc 7>,
+ <&cpu1_intc 3>, <&cpu1_intc 7>;
};
plic0: interrupt-controller@c000000 {
@@ -113,7 +113,7 @@
compatible = "canaan,k210-plic", "sifive,plic-1.0.0";
reg = <0xC000000 0x4000000>;
interrupt-controller;
- interrupts-extended = <&cpu0_intc 11 &cpu1_intc 11>;
+ interrupts-extended = <&cpu0_intc 11>, <&cpu1_intc 11>;
riscv,ndev = <65>;
};
@@ -130,10 +130,11 @@
compatible = "canaan,k210-gpiohs", "sifive,gpio0";
reg = <0x38001000 0x1000>;
interrupt-controller;
- interrupts = <34 35 36 37 38 39 40 41
- 42 43 44 45 46 47 48 49
- 50 51 52 53 54 55 56 57
- 58 59 60 61 62 63 64 65>;
+ interrupts = <34>, <35>, <36>, <37>, <38>, <39>, <40>,
+ <41>, <42>, <43>, <44>, <45>, <46>, <47>,
+ <48>, <49>, <50>, <51>, <52>, <53>, <54>,
+ <55>, <56>, <57>, <58>, <59>, <60>, <61>,
+ <62>, <63>, <64>, <65>;
gpio-controller;
ngpios = <32>;
};
@@ -141,7 +142,7 @@
dmac0: dma-controller@50000000 {
compatible = "snps,axi-dma-1.01a";
reg = <0x50000000 0x1000>;
- interrupts = <27 28 29 30 31 32>;
+ interrupts = <27>, <28>, <29>, <30>, <31>, <32>;
#dma-cells = <1>;
clocks = <&sysclk K210_CLK_DMA>, <&sysclk K210_CLK_DMA>;
clock-names = "core-clk", "cfgr-clk";
@@ -316,7 +317,7 @@
timer0: timer@502d0000 {
compatible = "snps,dw-apb-timer";
reg = <0x502D0000 0x100>;
- interrupts = <14 15>;
+ interrupts = <14>, <15>;
clocks = <&sysclk K210_CLK_TIMER0>,
<&sysclk K210_CLK_APB0>;
clock-names = "timer", "pclk";
@@ -326,7 +327,7 @@
timer1: timer@502e0000 {
compatible = "snps,dw-apb-timer";
reg = <0x502E0000 0x100>;
- interrupts = <16 17>;
+ interrupts = <16>, <17>;
clocks = <&sysclk K210_CLK_TIMER1>,
<&sysclk K210_CLK_APB0>;
clock-names = "timer", "pclk";
@@ -336,7 +337,7 @@
timer2: timer@502f0000 {
compatible = "snps,dw-apb-timer";
reg = <0x502F0000 0x100>;
- interrupts = <18 19>;
+ interrupts = <18>, <19>;
clocks = <&sysclk K210_CLK_TIMER2>,
<&sysclk K210_CLK_APB0>;
clock-names = "timer", "pclk";
diff --git a/arch/riscv/boot/dts/canaan/sipeed_maix_bit.dts b/arch/riscv/boot/dts/canaan/sipeed_maix_bit.dts
index 0bcaf35045e7..984872f3d3a9 100644
--- a/arch/riscv/boot/dts/canaan/sipeed_maix_bit.dts
+++ b/arch/riscv/boot/dts/canaan/sipeed_maix_bit.dts
@@ -199,7 +199,7 @@
};
&spi3 {
- spi-flash@0 {
+ flash@0 {
compatible = "jedec,spi-nor";
reg = <0>;
spi-max-frequency = <50000000>;
diff --git a/arch/riscv/boot/dts/canaan/sipeed_maix_dock.dts b/arch/riscv/boot/dts/canaan/sipeed_maix_dock.dts
index ac8a03f5867a..7ba99b4da304 100644
--- a/arch/riscv/boot/dts/canaan/sipeed_maix_dock.dts
+++ b/arch/riscv/boot/dts/canaan/sipeed_maix_dock.dts
@@ -201,7 +201,7 @@
};
&spi3 {
- spi-flash@0 {
+ flash@0 {
compatible = "jedec,spi-nor";
reg = <0>;
spi-max-frequency = <50000000>;
diff --git a/arch/riscv/boot/dts/canaan/sipeed_maix_go.dts b/arch/riscv/boot/dts/canaan/sipeed_maix_go.dts
index 623998194bc1..be9b12c9b374 100644
--- a/arch/riscv/boot/dts/canaan/sipeed_maix_go.dts
+++ b/arch/riscv/boot/dts/canaan/sipeed_maix_go.dts
@@ -209,7 +209,7 @@
};
&spi3 {
- spi-flash@0 {
+ flash@0 {
compatible = "jedec,spi-nor";
reg = <0>;
spi-max-frequency = <50000000>;
diff --git a/arch/riscv/boot/dts/canaan/sipeed_maixduino.dts b/arch/riscv/boot/dts/canaan/sipeed_maixduino.dts
index cf605ba0d67e..031c0c28f819 100644
--- a/arch/riscv/boot/dts/canaan/sipeed_maixduino.dts
+++ b/arch/riscv/boot/dts/canaan/sipeed_maixduino.dts
@@ -174,7 +174,7 @@
};
&spi3 {
- spi-flash@0 {
+ flash@0 {
compatible = "jedec,spi-nor";
reg = <0>;
spi-max-frequency = <50000000>;
diff --git a/arch/riscv/boot/dts/microchip/microchip-mpfs-icicle-kit.dts b/arch/riscv/boot/dts/microchip/microchip-mpfs-icicle-kit.dts
index fc1e5869df1b..0c748ae1b006 100644
--- a/arch/riscv/boot/dts/microchip/microchip-mpfs-icicle-kit.dts
+++ b/arch/riscv/boot/dts/microchip/microchip-mpfs-icicle-kit.dts
@@ -35,6 +35,10 @@
};
};
+&refclk {
+ clock-frequency = <600000000>;
+};
+
&serial0 {
status = "okay";
};
diff --git a/arch/riscv/boot/dts/microchip/microchip-mpfs.dtsi b/arch/riscv/boot/dts/microchip/microchip-mpfs.dtsi
index c9f6d205d2ba..869aaf0d5c06 100644
--- a/arch/riscv/boot/dts/microchip/microchip-mpfs.dtsi
+++ b/arch/riscv/boot/dts/microchip/microchip-mpfs.dtsi
@@ -9,9 +9,6 @@
model = "Microchip PolarFire SoC";
compatible = "microchip,mpfs";
- chosen {
- };
-
cpus {
#address-cells = <1>;
#size-cells = <0>;
@@ -142,6 +139,11 @@
};
};
+ refclk: msspllclk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ };
+
soc {
#address-cells = <2>;
#size-cells = <2>;
@@ -156,62 +158,48 @@
cache-size = <2097152>;
cache-unified;
interrupt-parent = <&plic>;
- interrupts = <1 2 3>;
+ interrupts = <1>, <2>, <3>;
reg = <0x0 0x2010000 0x0 0x1000>;
};
clint@2000000 {
compatible = "sifive,fu540-c000-clint", "sifive,clint0";
reg = <0x0 0x2000000 0x0 0xC000>;
- interrupts-extended = <&cpu0_intc 3 &cpu0_intc 7
- &cpu1_intc 3 &cpu1_intc 7
- &cpu2_intc 3 &cpu2_intc 7
- &cpu3_intc 3 &cpu3_intc 7
- &cpu4_intc 3 &cpu4_intc 7>;
+ interrupts-extended = <&cpu0_intc 3>, <&cpu0_intc 7>,
+ <&cpu1_intc 3>, <&cpu1_intc 7>,
+ <&cpu2_intc 3>, <&cpu2_intc 7>,
+ <&cpu3_intc 3>, <&cpu3_intc 7>,
+ <&cpu4_intc 3>, <&cpu4_intc 7>;
};
plic: interrupt-controller@c000000 {
- #interrupt-cells = <1>;
compatible = "sifive,fu540-c000-plic", "sifive,plic-1.0.0";
reg = <0x0 0xc000000 0x0 0x4000000>;
- riscv,ndev = <186>;
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
interrupt-controller;
- interrupts-extended = <&cpu0_intc 11
- &cpu1_intc 11 &cpu1_intc 9
- &cpu2_intc 11 &cpu2_intc 9
- &cpu3_intc 11 &cpu3_intc 9
- &cpu4_intc 11 &cpu4_intc 9>;
+ interrupts-extended = <&cpu0_intc 11>,
+ <&cpu1_intc 11>, <&cpu1_intc 9>,
+ <&cpu2_intc 11>, <&cpu2_intc 9>,
+ <&cpu3_intc 11>, <&cpu3_intc 9>,
+ <&cpu4_intc 11>, <&cpu4_intc 9>;
+ riscv,ndev = <186>;
};
dma@3000000 {
compatible = "sifive,fu540-c000-pdma";
reg = <0x0 0x3000000 0x0 0x8000>;
interrupt-parent = <&plic>;
- interrupts = <23 24 25 26 27 28 29 30>;
+ interrupts = <23>, <24>, <25>, <26>, <27>, <28>, <29>,
+ <30>;
#dma-cells = <1>;
};
- refclk: refclk {
- compatible = "fixed-clock";
- #clock-cells = <0>;
- clock-frequency = <600000000>;
- clock-output-names = "msspllclk";
- };
-
clkcfg: clkcfg@20002000 {
compatible = "microchip,mpfs-clkcfg";
reg = <0x0 0x20002000 0x0 0x1000>;
- reg-names = "mss_sysreg";
clocks = <&refclk>;
#clock-cells = <1>;
- clock-output-names = "cpu", "axi", "ahb", "envm", /* 0-3 */
- "mac0", "mac1", "mmc", "timer", /* 4-7 */
- "mmuart0", "mmuart1", "mmuart2", "mmuart3", /* 8-11 */
- "mmuart4", "spi0", "spi1", "i2c0", /* 12-15 */
- "i2c1", "can0", "can1", "usb", /* 16-19 */
- "rsvd", "rtc", "qspi", "gpio0", /* 20-23 */
- "gpio1", "gpio2", "ddrc", "fic0", /* 24-27 */
- "fic1", "fic2", "fic3", "athena", "cfm"; /* 28-32 */
};
serial0: serial@20000000 {
@@ -267,7 +255,7 @@
compatible = "microchip,mpfs-sd4hc", "cdns,sd4hc";
reg = <0x0 0x20008000 0x0 0x1000>;
interrupt-parent = <&plic>;
- interrupts = <88 89>;
+ interrupts = <88>, <89>;
clocks = <&clkcfg 6>;
max-frequency = <200000000>;
status = "disabled";
@@ -277,7 +265,7 @@
compatible = "cdns,macb";
reg = <0x0 0x20110000 0x0 0x2000>;
interrupt-parent = <&plic>;
- interrupts = <64 65 66 67>;
+ interrupts = <64>, <65>, <66>, <67>;
local-mac-address = [00 00 00 00 00 00];
clocks = <&clkcfg 4>, <&clkcfg 2>;
clock-names = "pclk", "hclk";
@@ -290,7 +278,7 @@
compatible = "cdns,macb";
reg = <0x0 0x20112000 0x0 0x2000>;
interrupt-parent = <&plic>;
- interrupts = <70 71 72 73>;
+ interrupts = <70>, <71>, <72>, <73>;
local-mac-address = [00 00 00 00 00 00];
clocks = <&clkcfg 5>, <&clkcfg 2>;
status = "disabled";
diff --git a/arch/riscv/boot/dts/sifive/fu540-c000.dtsi b/arch/riscv/boot/dts/sifive/fu540-c000.dtsi
index 0655b5c4201d..3eef52b1a59b 100644
--- a/arch/riscv/boot/dts/sifive/fu540-c000.dtsi
+++ b/arch/riscv/boot/dts/sifive/fu540-c000.dtsi
@@ -137,20 +137,21 @@
soc {
#address-cells = <2>;
#size-cells = <2>;
- compatible = "sifive,fu540-c000", "sifive,fu540", "simple-bus";
+ compatible = "simple-bus";
ranges;
plic0: interrupt-controller@c000000 {
- #interrupt-cells = <1>;
compatible = "sifive,fu540-c000-plic", "sifive,plic-1.0.0";
reg = <0x0 0xc000000 0x0 0x4000000>;
- riscv,ndev = <53>;
+ #address-cells = <0>;
+ #interrupt-cells = <1>;
interrupt-controller;
- interrupts-extended = <
- &cpu0_intc 0xffffffff
- &cpu1_intc 0xffffffff &cpu1_intc 9
- &cpu2_intc 0xffffffff &cpu2_intc 9
- &cpu3_intc 0xffffffff &cpu3_intc 9
- &cpu4_intc 0xffffffff &cpu4_intc 9>;
+ interrupts-extended =
+ <&cpu0_intc 0xffffffff>,
+ <&cpu1_intc 0xffffffff>, <&cpu1_intc 9>,
+ <&cpu2_intc 0xffffffff>, <&cpu2_intc 9>,
+ <&cpu3_intc 0xffffffff>, <&cpu3_intc 9>,
+ <&cpu4_intc 0xffffffff>, <&cpu4_intc 9>;
+ riscv,ndev = <53>;
};
prci: clock-controller@10000000 {
compatible = "sifive,fu540-c000-prci";
@@ -170,7 +171,8 @@
compatible = "sifive,fu540-c000-pdma";
reg = <0x0 0x3000000 0x0 0x8000>;
interrupt-parent = <&plic0>;
- interrupts = <23 24 25 26 27 28 29 30>;
+ interrupts = <23>, <24>, <25>, <26>, <27>, <28>, <29>,
+ <30>;
#dma-cells = <1>;
};
uart1: serial@10011000 {
@@ -195,8 +197,8 @@
};
qspi0: spi@10040000 {
compatible = "sifive,fu540-c000-spi", "sifive,spi0";
- reg = <0x0 0x10040000 0x0 0x1000
- 0x0 0x20000000 0x0 0x10000000>;
+ reg = <0x0 0x10040000 0x0 0x1000>,
+ <0x0 0x20000000 0x0 0x10000000>;
interrupt-parent = <&plic0>;
interrupts = <51>;
clocks = <&prci PRCI_CLK_TLCLK>;
@@ -206,8 +208,8 @@
};
qspi1: spi@10041000 {
compatible = "sifive,fu540-c000-spi", "sifive,spi0";
- reg = <0x0 0x10041000 0x0 0x1000
- 0x0 0x30000000 0x0 0x10000000>;
+ reg = <0x0 0x10041000 0x0 0x1000>,
+ <0x0 0x30000000 0x0 0x10000000>;
interrupt-parent = <&plic0>;
interrupts = <52>;
clocks = <&prci PRCI_CLK_TLCLK>;
@@ -229,8 +231,8 @@
compatible = "sifive,fu540-c000-gem";
interrupt-parent = <&plic0>;
interrupts = <53>;
- reg = <0x0 0x10090000 0x0 0x2000
- 0x0 0x100a0000 0x0 0x1000>;
+ reg = <0x0 0x10090000 0x0 0x2000>,
+ <0x0 0x100a0000 0x0 0x1000>;
local-mac-address = [00 00 00 00 00 00];
clock-names = "pclk", "hclk";
clocks = <&prci PRCI_CLK_GEMGXLPLL>,
@@ -243,7 +245,7 @@
compatible = "sifive,fu540-c000-pwm", "sifive,pwm0";
reg = <0x0 0x10020000 0x0 0x1000>;
interrupt-parent = <&plic0>;
- interrupts = <42 43 44 45>;
+ interrupts = <42>, <43>, <44>, <45>;
clocks = <&prci PRCI_CLK_TLCLK>;
#pwm-cells = <3>;
status = "disabled";
@@ -252,7 +254,7 @@
compatible = "sifive,fu540-c000-pwm", "sifive,pwm0";
reg = <0x0 0x10021000 0x0 0x1000>;
interrupt-parent = <&plic0>;
- interrupts = <46 47 48 49>;
+ interrupts = <46>, <47>, <48>, <49>;
clocks = <&prci PRCI_CLK_TLCLK>;
#pwm-cells = <3>;
status = "disabled";
@@ -265,7 +267,7 @@
cache-size = <2097152>;
cache-unified;
interrupt-parent = <&plic0>;
- interrupts = <1 2 3>;
+ interrupts = <1>, <2>, <3>;
reg = <0x0 0x2010000 0x0 0x1000>;
};
gpio: gpio@10060000 {
diff --git a/arch/riscv/boot/dts/sifive/fu740-c000.dtsi b/arch/riscv/boot/dts/sifive/fu740-c000.dtsi
index abbb960f90a0..8464b0e3c887 100644
--- a/arch/riscv/boot/dts/sifive/fu740-c000.dtsi
+++ b/arch/riscv/boot/dts/sifive/fu740-c000.dtsi
@@ -147,12 +147,12 @@
reg = <0x0 0xc000000 0x0 0x4000000>;
riscv,ndev = <69>;
interrupt-controller;
- interrupts-extended = <
- &cpu0_intc 0xffffffff
- &cpu1_intc 0xffffffff &cpu1_intc 9
- &cpu2_intc 0xffffffff &cpu2_intc 9
- &cpu3_intc 0xffffffff &cpu3_intc 9
- &cpu4_intc 0xffffffff &cpu4_intc 9>;
+ interrupts-extended =
+ <&cpu0_intc 0xffffffff>,
+ <&cpu1_intc 0xffffffff>, <&cpu1_intc 9>,
+ <&cpu2_intc 0xffffffff>, <&cpu2_intc 9>,
+ <&cpu3_intc 0xffffffff>, <&cpu3_intc 9>,
+ <&cpu4_intc 0xffffffff>, <&cpu4_intc 9>;
};
prci: clock-controller@10000000 {
compatible = "sifive,fu740-c000-prci";
@@ -273,7 +273,7 @@
cache-size = <2097152>;
cache-unified;
interrupt-parent = <&plic0>;
- interrupts = <19 21 22 20>;
+ interrupts = <19>, <21>, <22>, <20>;
reg = <0x0 0x2010000 0x0 0x1000>;
};
gpio: gpio@10060000 {
diff --git a/arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts b/arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts
index 6bfa1f24d3de..c4ed9efdff03 100644
--- a/arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts
+++ b/arch/riscv/boot/dts/sifive/hifive-unmatched-a00.dts
@@ -39,6 +39,11 @@
clock-frequency = <RTCCLK_FREQ>;
clock-output-names = "rtcclk";
};
+
+ gpio-poweroff {
+ compatible = "gpio-poweroff";
+ gpios = <&gpio 2 GPIO_ACTIVE_LOW>;
+ };
};
&uart0 {
diff --git a/arch/riscv/configs/defconfig b/arch/riscv/configs/defconfig
index ef473e2f503b..f120fcc43d0a 100644
--- a/arch/riscv/configs/defconfig
+++ b/arch/riscv/configs/defconfig
@@ -2,6 +2,7 @@ CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_NO_HZ_IDLE=y
CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BPF_SYSCALL=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_CGROUPS=y
@@ -13,10 +14,10 @@ CONFIG_USER_NS=y
CONFIG_CHECKPOINT_RESTORE=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y
-CONFIG_BPF_SYSCALL=y
+# CONFIG_SYSFS_SYSCALL is not set
+CONFIG_SOC_MICROCHIP_POLARFIRE=y
CONFIG_SOC_SIFIVE=y
CONFIG_SOC_VIRT=y
-CONFIG_SOC_MICROCHIP_POLARFIRE=y
CONFIG_SMP=y
CONFIG_HOTPLUG_CPU=y
CONFIG_VIRTUALIZATION=y
@@ -70,14 +71,14 @@ CONFIG_HW_RANDOM=y
CONFIG_HW_RANDOM_VIRTIO=y
CONFIG_SPI=y
CONFIG_SPI_SIFIVE=y
+# CONFIG_PTP_1588_CLOCK is not set
CONFIG_GPIOLIB=y
CONFIG_GPIO_SIFIVE=y
-# CONFIG_PTP_1588_CLOCK is not set
-CONFIG_POWER_RESET=y
CONFIG_DRM=m
CONFIG_DRM_RADEON=m
CONFIG_DRM_NOUVEAU=m
CONFIG_DRM_VIRTIO_GPU=m
+CONFIG_FB=y
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_USB=y
CONFIG_USB_XHCI_HCD=y
@@ -88,10 +89,10 @@ CONFIG_USB_OHCI_HCD=y
CONFIG_USB_OHCI_HCD_PLATFORM=y
CONFIG_USB_STORAGE=y
CONFIG_USB_UAS=y
+CONFIG_MMC=y
CONFIG_MMC_SDHCI=y
CONFIG_MMC_SDHCI_PLTFM=y
CONFIG_MMC_SDHCI_CADENCE=y
-CONFIG_MMC=y
CONFIG_MMC_SPI=y
CONFIG_RTC_CLASS=y
CONFIG_VIRTIO_PCI=y
@@ -142,5 +143,3 @@ CONFIG_RCU_EQS_DEBUG=y
# CONFIG_FTRACE is not set
# CONFIG_RUNTIME_TESTING_MENU is not set
CONFIG_MEMTEST=y
-# CONFIG_SYSFS_SYSCALL is not set
-CONFIG_EFI=y
diff --git a/arch/riscv/configs/nommu_k210_defconfig b/arch/riscv/configs/nommu_k210_defconfig
index b16a2a12c82a..3f42ed87dde8 100644
--- a/arch/riscv/configs/nommu_k210_defconfig
+++ b/arch/riscv/configs/nommu_k210_defconfig
@@ -29,8 +29,6 @@ CONFIG_EMBEDDED=y
CONFIG_SLOB=y
# CONFIG_MMU is not set
CONFIG_SOC_CANAAN=y
-CONFIG_SOC_CANAAN_K210_DTB_SOURCE="k210_generic"
-CONFIG_MAXPHYSMEM_2GB=y
CONFIG_SMP=y
CONFIG_NR_CPUS=2
CONFIG_CMDLINE="earlycon console=ttySIF0"
@@ -75,7 +73,6 @@ CONFIG_LEDS_GPIO=y
CONFIG_LEDS_USER=y
# CONFIG_VIRTIO_MENU is not set
# CONFIG_VHOST_MENU is not set
-# CONFIG_SURFACE_PLATFORMS is not set
# CONFIG_FILE_LOCKING is not set
# CONFIG_DNOTIFY is not set
# CONFIG_INOTIFY_USER is not set
diff --git a/arch/riscv/configs/nommu_k210_sdcard_defconfig b/arch/riscv/configs/nommu_k210_sdcard_defconfig
index 61f887f65419..2a82a3b2992b 100644
--- a/arch/riscv/configs/nommu_k210_sdcard_defconfig
+++ b/arch/riscv/configs/nommu_k210_sdcard_defconfig
@@ -21,8 +21,6 @@ CONFIG_EMBEDDED=y
CONFIG_SLOB=y
# CONFIG_MMU is not set
CONFIG_SOC_CANAAN=y
-CONFIG_SOC_CANAAN_K210_DTB_SOURCE="k210_generic"
-CONFIG_MAXPHYSMEM_2GB=y
CONFIG_SMP=y
CONFIG_NR_CPUS=2
CONFIG_CMDLINE="earlycon console=ttySIF0 rootdelay=2 root=/dev/mmcblk0p1 ro"
@@ -30,7 +28,6 @@ CONFIG_CMDLINE_FORCE=y
# CONFIG_SECCOMP is not set
# CONFIG_STACKPROTECTOR is not set
# CONFIG_GCC_PLUGINS is not set
-# CONFIG_BLK_DEV_BSG is not set
# CONFIG_MQ_IOSCHED_DEADLINE is not set
# CONFIG_MQ_IOSCHED_KYBER is not set
CONFIG_BINFMT_FLAT=y
@@ -72,7 +69,6 @@ CONFIG_LEDS_GPIO=y
CONFIG_LEDS_USER=y
# CONFIG_VIRTIO_MENU is not set
# CONFIG_VHOST_MENU is not set
-# CONFIG_SURFACE_PLATFORMS is not set
CONFIG_EXT2_FS=y
# CONFIG_FILE_LOCKING is not set
# CONFIG_DNOTIFY is not set
diff --git a/arch/riscv/configs/nommu_virt_defconfig b/arch/riscv/configs/nommu_virt_defconfig
index e046a0babde4..e1c9864b6237 100644
--- a/arch/riscv/configs/nommu_virt_defconfig
+++ b/arch/riscv/configs/nommu_virt_defconfig
@@ -24,15 +24,12 @@ CONFIG_EXPERT=y
# CONFIG_VM_EVENT_COUNTERS is not set
# CONFIG_COMPAT_BRK is not set
CONFIG_SLOB=y
-# CONFIG_SLAB_MERGE_DEFAULT is not set
# CONFIG_MMU is not set
CONFIG_SOC_VIRT=y
-CONFIG_MAXPHYSMEM_2GB=y
CONFIG_SMP=y
CONFIG_CMDLINE="root=/dev/vda rw earlycon=uart8250,mmio,0x10000000,115200n8 console=ttyS0"
CONFIG_CMDLINE_FORCE=y
CONFIG_JUMP_LABEL=y
-# CONFIG_BLK_DEV_BSG is not set
CONFIG_PARTITION_ADVANCED=y
# CONFIG_MSDOS_PARTITION is not set
# CONFIG_EFI_PARTITION is not set
diff --git a/arch/riscv/configs/rv32_defconfig b/arch/riscv/configs/rv32_defconfig
index 6e9f12ff968a..8b56a7f1eb06 100644
--- a/arch/riscv/configs/rv32_defconfig
+++ b/arch/riscv/configs/rv32_defconfig
@@ -2,6 +2,7 @@ CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_NO_HZ_IDLE=y
CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BPF_SYSCALL=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_CGROUPS=y
@@ -13,7 +14,7 @@ CONFIG_USER_NS=y
CONFIG_CHECKPOINT_RESTORE=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y
-CONFIG_BPF_SYSCALL=y
+# CONFIG_SYSFS_SYSCALL is not set
CONFIG_SOC_SIFIVE=y
CONFIG_SOC_VIRT=y
CONFIG_ARCH_RV32I=y
@@ -69,10 +70,10 @@ CONFIG_HW_RANDOM_VIRTIO=y
CONFIG_SPI=y
CONFIG_SPI_SIFIVE=y
# CONFIG_PTP_1588_CLOCK is not set
-CONFIG_POWER_RESET=y
CONFIG_DRM=y
CONFIG_DRM_RADEON=y
CONFIG_DRM_VIRTIO_GPU=y
+CONFIG_FB=y
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_USB=y
CONFIG_USB_XHCI_HCD=y
@@ -132,4 +133,3 @@ CONFIG_RCU_EQS_DEBUG=y
# CONFIG_FTRACE is not set
# CONFIG_RUNTIME_TESTING_MENU is not set
CONFIG_MEMTEST=y
-# CONFIG_SYSFS_SYSCALL is not set
diff --git a/arch/riscv/errata/alternative.c b/arch/riscv/errata/alternative.c
index 3b15885db70b..e8b4a0fe488c 100644
--- a/arch/riscv/errata/alternative.c
+++ b/arch/riscv/errata/alternative.c
@@ -22,7 +22,8 @@ static struct cpu_manufacturer_info_t {
} cpu_mfr_info;
static void (*vendor_patch_func)(struct alt_entry *begin, struct alt_entry *end,
- unsigned long archid, unsigned long impid);
+ unsigned long archid,
+ unsigned long impid) __initdata;
static inline void __init riscv_fill_cpu_mfr_info(void)
{
diff --git a/arch/riscv/include/asm/Kbuild b/arch/riscv/include/asm/Kbuild
index 445ccc97305a..57b86fd9916c 100644
--- a/arch/riscv/include/asm/Kbuild
+++ b/arch/riscv/include/asm/Kbuild
@@ -1,6 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
generic-y += early_ioremap.h
-generic-y += extable.h
generic-y += flat.h
generic-y += kvm_para.h
generic-y += user.h
diff --git a/arch/riscv/include/asm/asm-extable.h b/arch/riscv/include/asm/asm-extable.h
new file mode 100644
index 000000000000..14be0673f5b5
--- /dev/null
+++ b/arch/riscv/include/asm/asm-extable.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __ASM_ASM_EXTABLE_H
+#define __ASM_ASM_EXTABLE_H
+
+#define EX_TYPE_NONE 0
+#define EX_TYPE_FIXUP 1
+#define EX_TYPE_BPF 2
+#define EX_TYPE_UACCESS_ERR_ZERO 3
+
+#ifdef __ASSEMBLY__
+
+#define __ASM_EXTABLE_RAW(insn, fixup, type, data) \
+ .pushsection __ex_table, "a"; \
+ .balign 4; \
+ .long ((insn) - .); \
+ .long ((fixup) - .); \
+ .short (type); \
+ .short (data); \
+ .popsection;
+
+ .macro _asm_extable, insn, fixup
+ __ASM_EXTABLE_RAW(\insn, \fixup, EX_TYPE_FIXUP, 0)
+ .endm
+
+#else /* __ASSEMBLY__ */
+
+#include <linux/bits.h>
+#include <linux/stringify.h>
+#include <asm/gpr-num.h>
+
+#define __ASM_EXTABLE_RAW(insn, fixup, type, data) \
+ ".pushsection __ex_table, \"a\"\n" \
+ ".balign 4\n" \
+ ".long ((" insn ") - .)\n" \
+ ".long ((" fixup ") - .)\n" \
+ ".short (" type ")\n" \
+ ".short (" data ")\n" \
+ ".popsection\n"
+
+#define _ASM_EXTABLE(insn, fixup) \
+ __ASM_EXTABLE_RAW(#insn, #fixup, __stringify(EX_TYPE_FIXUP), "0")
+
+#define EX_DATA_REG_ERR_SHIFT 0
+#define EX_DATA_REG_ERR GENMASK(4, 0)
+#define EX_DATA_REG_ZERO_SHIFT 5
+#define EX_DATA_REG_ZERO GENMASK(9, 5)
+
+#define EX_DATA_REG(reg, gpr) \
+ "((.L__gpr_num_" #gpr ") << " __stringify(EX_DATA_REG_##reg##_SHIFT) ")"
+
+#define _ASM_EXTABLE_UACCESS_ERR_ZERO(insn, fixup, err, zero) \
+ __DEFINE_ASM_GPR_NUMS \
+ __ASM_EXTABLE_RAW(#insn, #fixup, \
+ __stringify(EX_TYPE_UACCESS_ERR_ZERO), \
+ "(" \
+ EX_DATA_REG(ERR, err) " | " \
+ EX_DATA_REG(ZERO, zero) \
+ ")")
+
+#define _ASM_EXTABLE_UACCESS_ERR(insn, fixup, err) \
+ _ASM_EXTABLE_UACCESS_ERR_ZERO(insn, fixup, err, zero)
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __ASM_ASM_EXTABLE_H */
diff --git a/arch/riscv/include/asm/bitops.h b/arch/riscv/include/asm/bitops.h
index 396a3303c537..3540b690944b 100644
--- a/arch/riscv/include/asm/bitops.h
+++ b/arch/riscv/include/asm/bitops.h
@@ -20,7 +20,6 @@
#include <asm-generic/bitops/fls.h>
#include <asm-generic/bitops/__fls.h>
#include <asm-generic/bitops/fls64.h>
-#include <asm-generic/bitops/find.h>
#include <asm-generic/bitops/sched.h>
#include <asm-generic/bitops/ffs.h>
diff --git a/arch/riscv/include/asm/cpu_ops.h b/arch/riscv/include/asm/cpu_ops.h
index a8ec3c5c1bd2..134590f1b843 100644
--- a/arch/riscv/include/asm/cpu_ops.h
+++ b/arch/riscv/include/asm/cpu_ops.h
@@ -40,7 +40,5 @@ struct cpu_operations {
extern const struct cpu_operations *cpu_ops[NR_CPUS];
void __init cpu_set_ops(int cpu);
-void cpu_update_secondary_bootdata(unsigned int cpuid,
- struct task_struct *tidle);
#endif /* ifndef __ASM_CPU_OPS_H */
diff --git a/arch/riscv/include/asm/cpu_ops_sbi.h b/arch/riscv/include/asm/cpu_ops_sbi.h
new file mode 100644
index 000000000000..56e4b76d09ff
--- /dev/null
+++ b/arch/riscv/include/asm/cpu_ops_sbi.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2021 by Rivos Inc.
+ */
+#ifndef __ASM_CPU_OPS_SBI_H
+#define __ASM_CPU_OPS_SBI_H
+
+#ifndef __ASSEMBLY__
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/threads.h>
+
+/**
+ * struct sbi_hart_boot_data - Hart specific boot used during booting and
+ * cpu hotplug.
+ * @task_ptr: A pointer to the hart specific tp
+ * @stack_ptr: A pointer to the hart specific sp
+ */
+struct sbi_hart_boot_data {
+ void *task_ptr;
+ void *stack_ptr;
+};
+#endif
+
+#endif /* ifndef __ASM_CPU_OPS_SBI_H */
diff --git a/arch/riscv/include/asm/csr.h b/arch/riscv/include/asm/csr.h
index 5046f431645c..ae711692eec9 100644
--- a/arch/riscv/include/asm/csr.h
+++ b/arch/riscv/include/asm/csr.h
@@ -40,14 +40,13 @@
#ifndef CONFIG_64BIT
#define SATP_PPN _AC(0x003FFFFF, UL)
#define SATP_MODE_32 _AC(0x80000000, UL)
-#define SATP_MODE SATP_MODE_32
#define SATP_ASID_BITS 9
#define SATP_ASID_SHIFT 22
#define SATP_ASID_MASK _AC(0x1FF, UL)
#else
#define SATP_PPN _AC(0x00000FFFFFFFFFFF, UL)
#define SATP_MODE_39 _AC(0x8000000000000000, UL)
-#define SATP_MODE SATP_MODE_39
+#define SATP_MODE_48 _AC(0x9000000000000000, UL)
#define SATP_ASID_BITS 16
#define SATP_ASID_SHIFT 44
#define SATP_ASID_MASK _AC(0xFFFF, UL)
diff --git a/arch/riscv/include/asm/extable.h b/arch/riscv/include/asm/extable.h
new file mode 100644
index 000000000000..512012d193dc
--- /dev/null
+++ b/arch/riscv/include/asm/extable.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_RISCV_EXTABLE_H
+#define _ASM_RISCV_EXTABLE_H
+
+/*
+ * The exception table consists of pairs of relative offsets: the first
+ * is the relative offset to an instruction that is allowed to fault,
+ * and the second is the relative offset at which the program should
+ * continue. No registers are modified, so it is entirely up to the
+ * continuation code to figure out what to do.
+ *
+ * All the routines below use bits of fixup code that are out of line
+ * with the main instruction path. This means when everything is well,
+ * we don't even have to jump over them. Further, they do not intrude
+ * on our cache or tlb entries.
+ */
+
+struct exception_table_entry {
+ int insn, fixup;
+ short type, data;
+};
+
+#define ARCH_HAS_RELATIVE_EXTABLE
+
+#define swap_ex_entry_fixup(a, b, tmp, delta) \
+do { \
+ (a)->fixup = (b)->fixup + (delta); \
+ (b)->fixup = (tmp).fixup - (delta); \
+ (a)->type = (b)->type; \
+ (b)->type = (tmp).type; \
+ (a)->data = (b)->data; \
+ (b)->data = (tmp).data; \
+} while (0)
+
+bool fixup_exception(struct pt_regs *regs);
+
+#if defined(CONFIG_BPF_JIT) && defined(CONFIG_ARCH_RV64I)
+bool ex_handler_bpf(const struct exception_table_entry *ex, struct pt_regs *regs);
+#else
+static inline bool
+ex_handler_bpf(const struct exception_table_entry *ex,
+ struct pt_regs *regs)
+{
+ return false;
+}
+#endif
+
+#endif
diff --git a/arch/riscv/include/asm/fixmap.h b/arch/riscv/include/asm/fixmap.h
index 54cbf07fb4e9..58a718573ad6 100644
--- a/arch/riscv/include/asm/fixmap.h
+++ b/arch/riscv/include/asm/fixmap.h
@@ -24,6 +24,7 @@ enum fixed_addresses {
FIX_HOLE,
FIX_PTE,
FIX_PMD,
+ FIX_PUD,
FIX_TEXT_POKE1,
FIX_TEXT_POKE0,
FIX_EARLYCON_MEM_BASE,
diff --git a/arch/riscv/include/asm/futex.h b/arch/riscv/include/asm/futex.h
index 1b00badb9f87..fc8130f995c1 100644
--- a/arch/riscv/include/asm/futex.h
+++ b/arch/riscv/include/asm/futex.h
@@ -11,6 +11,7 @@
#include <linux/uaccess.h>
#include <linux/errno.h>
#include <asm/asm.h>
+#include <asm/asm-extable.h>
/* We don't even really need the extable code, but for now keep it simple */
#ifndef CONFIG_MMU
@@ -20,23 +21,14 @@
#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
{ \
- uintptr_t tmp; \
__enable_user_access(); \
__asm__ __volatile__ ( \
"1: " insn " \n" \
"2: \n" \
- " .section .fixup,\"ax\" \n" \
- " .balign 4 \n" \
- "3: li %[r],%[e] \n" \
- " jump 2b,%[t] \n" \
- " .previous \n" \
- " .section __ex_table,\"a\" \n" \
- " .balign " RISCV_SZPTR " \n" \
- " " RISCV_PTR " 1b, 3b \n" \
- " .previous \n" \
+ _ASM_EXTABLE_UACCESS_ERR(1b, 2b, %[r]) \
: [r] "+r" (ret), [ov] "=&r" (oldval), \
- [u] "+m" (*uaddr), [t] "=&r" (tmp) \
- : [op] "Jr" (oparg), [e] "i" (-EFAULT) \
+ [u] "+m" (*uaddr) \
+ : [op] "Jr" (oparg) \
: "memory"); \
__disable_user_access(); \
}
@@ -98,18 +90,10 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
"2: sc.w.aqrl %[t],%z[nv],%[u] \n"
" bnez %[t],1b \n"
"3: \n"
- " .section .fixup,\"ax\" \n"
- " .balign 4 \n"
- "4: li %[r],%[e] \n"
- " jump 3b,%[t] \n"
- " .previous \n"
- " .section __ex_table,\"a\" \n"
- " .balign " RISCV_SZPTR " \n"
- " " RISCV_PTR " 1b, 4b \n"
- " " RISCV_PTR " 2b, 4b \n"
- " .previous \n"
+ _ASM_EXTABLE_UACCESS_ERR(1b, 3b, %[r]) \
+ _ASM_EXTABLE_UACCESS_ERR(2b, 3b, %[r]) \
: [r] "+r" (ret), [v] "=&r" (val), [u] "+m" (*uaddr), [t] "=&r" (tmp)
- : [ov] "Jr" (oldval), [nv] "Jr" (newval), [e] "i" (-EFAULT)
+ : [ov] "Jr" (oldval), [nv] "Jr" (newval)
: "memory");
__disable_user_access();
diff --git a/arch/riscv/include/asm/gpr-num.h b/arch/riscv/include/asm/gpr-num.h
new file mode 100644
index 000000000000..dfee2829fc7c
--- /dev/null
+++ b/arch/riscv/include/asm/gpr-num.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __ASM_GPR_NUM_H
+#define __ASM_GPR_NUM_H
+
+#ifdef __ASSEMBLY__
+ .equ .L__gpr_num_zero, 0
+ .equ .L__gpr_num_ra, 1
+ .equ .L__gpr_num_sp, 2
+ .equ .L__gpr_num_gp, 3
+ .equ .L__gpr_num_tp, 4
+ .equ .L__gpr_num_t0, 5
+ .equ .L__gpr_num_t1, 6
+ .equ .L__gpr_num_t2, 7
+ .equ .L__gpr_num_s0, 8
+ .equ .L__gpr_num_s1, 9
+ .equ .L__gpr_num_a0, 10
+ .equ .L__gpr_num_a1, 11
+ .equ .L__gpr_num_a2, 12
+ .equ .L__gpr_num_a3, 13
+ .equ .L__gpr_num_a4, 14
+ .equ .L__gpr_num_a5, 15
+ .equ .L__gpr_num_a6, 16
+ .equ .L__gpr_num_a7, 17
+ .equ .L__gpr_num_s2, 18
+ .equ .L__gpr_num_s3, 19
+ .equ .L__gpr_num_s4, 20
+ .equ .L__gpr_num_s5, 21
+ .equ .L__gpr_num_s6, 22
+ .equ .L__gpr_num_s7, 23
+ .equ .L__gpr_num_s8, 24
+ .equ .L__gpr_num_s9, 25
+ .equ .L__gpr_num_s10, 26
+ .equ .L__gpr_num_s11, 27
+ .equ .L__gpr_num_t3, 28
+ .equ .L__gpr_num_t4, 29
+ .equ .L__gpr_num_t5, 30
+ .equ .L__gpr_num_t6, 31
+
+#else /* __ASSEMBLY__ */
+
+#define __DEFINE_ASM_GPR_NUMS \
+" .equ .L__gpr_num_zero, 0\n" \
+" .equ .L__gpr_num_ra, 1\n" \
+" .equ .L__gpr_num_sp, 2\n" \
+" .equ .L__gpr_num_gp, 3\n" \
+" .equ .L__gpr_num_tp, 4\n" \
+" .equ .L__gpr_num_t0, 5\n" \
+" .equ .L__gpr_num_t1, 6\n" \
+" .equ .L__gpr_num_t2, 7\n" \
+" .equ .L__gpr_num_s0, 8\n" \
+" .equ .L__gpr_num_s1, 9\n" \
+" .equ .L__gpr_num_a0, 10\n" \
+" .equ .L__gpr_num_a1, 11\n" \
+" .equ .L__gpr_num_a2, 12\n" \
+" .equ .L__gpr_num_a3, 13\n" \
+" .equ .L__gpr_num_a4, 14\n" \
+" .equ .L__gpr_num_a5, 15\n" \
+" .equ .L__gpr_num_a6, 16\n" \
+" .equ .L__gpr_num_a7, 17\n" \
+" .equ .L__gpr_num_s2, 18\n" \
+" .equ .L__gpr_num_s3, 19\n" \
+" .equ .L__gpr_num_s4, 20\n" \
+" .equ .L__gpr_num_s5, 21\n" \
+" .equ .L__gpr_num_s6, 22\n" \
+" .equ .L__gpr_num_s7, 23\n" \
+" .equ .L__gpr_num_s8, 24\n" \
+" .equ .L__gpr_num_s9, 25\n" \
+" .equ .L__gpr_num_s10, 26\n" \
+" .equ .L__gpr_num_s11, 27\n" \
+" .equ .L__gpr_num_t3, 28\n" \
+" .equ .L__gpr_num_t4, 29\n" \
+" .equ .L__gpr_num_t5, 30\n" \
+" .equ .L__gpr_num_t6, 31\n"
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __ASM_GPR_NUM_H */
diff --git a/arch/riscv/include/asm/kasan.h b/arch/riscv/include/asm/kasan.h
index b00f503ec124..0b85e363e778 100644
--- a/arch/riscv/include/asm/kasan.h
+++ b/arch/riscv/include/asm/kasan.h
@@ -27,13 +27,18 @@
*/
#define KASAN_SHADOW_SCALE_SHIFT 3
-#define KASAN_SHADOW_SIZE (UL(1) << ((CONFIG_VA_BITS - 1) - KASAN_SHADOW_SCALE_SHIFT))
-#define KASAN_SHADOW_START KERN_VIRT_START
-#define KASAN_SHADOW_END (KASAN_SHADOW_START + KASAN_SHADOW_SIZE)
+#define KASAN_SHADOW_SIZE (UL(1) << ((VA_BITS - 1) - KASAN_SHADOW_SCALE_SHIFT))
+/*
+ * Depending on the size of the virtual address space, the region may not be
+ * aligned on PGDIR_SIZE, so force its alignment to ease its population.
+ */
+#define KASAN_SHADOW_START ((KASAN_SHADOW_END - KASAN_SHADOW_SIZE) & PGDIR_MASK)
+#define KASAN_SHADOW_END MODULES_LOWEST_VADDR
#define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL)
void kasan_init(void);
asmlinkage void kasan_early_init(void);
+void kasan_swapper_init(void);
#endif
#endif
diff --git a/arch/riscv/include/asm/kvm_host.h b/arch/riscv/include/asm/kvm_host.h
index 2639b9ee48f9..99ef6a120617 100644
--- a/arch/riscv/include/asm/kvm_host.h
+++ b/arch/riscv/include/asm/kvm_host.h
@@ -77,13 +77,6 @@ struct kvm_sbi_context {
int return_handled;
};
-#define KVM_MMU_PAGE_CACHE_NR_OBJS 32
-
-struct kvm_mmu_page_cache {
- int nobjs;
- void *objects[KVM_MMU_PAGE_CACHE_NR_OBJS];
-};
-
struct kvm_cpu_trap {
unsigned long sepc;
unsigned long scause;
@@ -193,7 +186,7 @@ struct kvm_vcpu_arch {
struct kvm_sbi_context sbi_context;
/* Cache pages needed to program page tables with spinlock held */
- struct kvm_mmu_page_cache mmu_page_cache;
+ struct kvm_mmu_memory_cache mmu_page_cache;
/* VCPU power-off state */
bool power_off;
@@ -208,7 +201,6 @@ struct kvm_vcpu_arch {
static inline void kvm_arch_hardware_unsetup(void) {}
static inline void kvm_arch_sync_events(struct kvm *kvm) {}
static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
-static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
#define KVM_ARCH_WANT_MMU_NOTIFIER
@@ -221,12 +213,12 @@ void __kvm_riscv_hfence_gvma_all(void);
int kvm_riscv_stage2_map(struct kvm_vcpu *vcpu,
struct kvm_memory_slot *memslot,
gpa_t gpa, unsigned long hva, bool is_write);
-void kvm_riscv_stage2_flush_cache(struct kvm_vcpu *vcpu);
int kvm_riscv_stage2_alloc_pgd(struct kvm *kvm);
void kvm_riscv_stage2_free_pgd(struct kvm *kvm);
void kvm_riscv_stage2_update_hgatp(struct kvm_vcpu *vcpu);
void kvm_riscv_stage2_mode_detect(void);
unsigned long kvm_riscv_stage2_mode(void);
+int kvm_riscv_stage2_gpa_bits(void);
void kvm_riscv_stage2_vmid_detect(void);
unsigned long kvm_riscv_stage2_vmid_bits(void);
diff --git a/arch/riscv/include/asm/kvm_types.h b/arch/riscv/include/asm/kvm_types.h
index e476b404eb67..e15765f98d7a 100644
--- a/arch/riscv/include/asm/kvm_types.h
+++ b/arch/riscv/include/asm/kvm_types.h
@@ -2,6 +2,6 @@
#ifndef _ASM_RISCV_KVM_TYPES_H
#define _ASM_RISCV_KVM_TYPES_H
-#define KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE 40
+#define KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE 32
#endif /* _ASM_RISCV_KVM_TYPES_H */
diff --git a/arch/riscv/include/asm/kvm_vcpu_sbi.h b/arch/riscv/include/asm/kvm_vcpu_sbi.h
new file mode 100644
index 000000000000..76e4e17a3e00
--- /dev/null
+++ b/arch/riscv/include/asm/kvm_vcpu_sbi.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/**
+ * Copyright (c) 2021 Western Digital Corporation or its affiliates.
+ *
+ * Authors:
+ * Atish Patra <atish.patra@wdc.com>
+ */
+
+#ifndef __RISCV_KVM_VCPU_SBI_H__
+#define __RISCV_KVM_VCPU_SBI_H__
+
+#define KVM_SBI_IMPID 3
+
+#define KVM_SBI_VERSION_MAJOR 0
+#define KVM_SBI_VERSION_MINOR 2
+
+struct kvm_vcpu_sbi_extension {
+ unsigned long extid_start;
+ unsigned long extid_end;
+ /**
+ * SBI extension handler. It can be defined for a given extension or group of
+ * extension. But it should always return linux error codes rather than SBI
+ * specific error codes.
+ */
+ int (*handler)(struct kvm_vcpu *vcpu, struct kvm_run *run,
+ unsigned long *out_val, struct kvm_cpu_trap *utrap,
+ bool *exit);
+};
+
+void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu, struct kvm_run *run);
+const struct kvm_vcpu_sbi_extension *kvm_vcpu_sbi_find_ext(unsigned long extid);
+
+#endif /* __RISCV_KVM_VCPU_SBI_H__ */
diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h
index b3e5ff0125fe..160e3a1e8f8b 100644
--- a/arch/riscv/include/asm/page.h
+++ b/arch/riscv/include/asm/page.h
@@ -31,9 +31,20 @@
* When not using MMU this corresponds to the first free page in
* physical memory (aligned on a page boundary).
*/
+#ifdef CONFIG_64BIT
+#ifdef CONFIG_MMU
+#define PAGE_OFFSET kernel_map.page_offset
+#else
#define PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL)
-
-#define KERN_VIRT_SIZE (-PAGE_OFFSET)
+#endif
+/*
+ * By default, CONFIG_PAGE_OFFSET value corresponds to SV48 address space so
+ * define the PAGE_OFFSET value for SV39.
+ */
+#define PAGE_OFFSET_L3 _AC(0xffffffd800000000, UL)
+#else
+#define PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL)
+#endif /* CONFIG_64BIT */
#ifndef __ASSEMBLY__
@@ -86,6 +97,7 @@ extern unsigned long riscv_pfn_base;
#endif /* CONFIG_MMU */
struct kernel_mapping {
+ unsigned long page_offset;
unsigned long virt_addr;
uintptr_t phys_addr;
uintptr_t size;
diff --git a/arch/riscv/include/asm/pgalloc.h b/arch/riscv/include/asm/pgalloc.h
index 0af6933a7100..11823004b87a 100644
--- a/arch/riscv/include/asm/pgalloc.h
+++ b/arch/riscv/include/asm/pgalloc.h
@@ -11,6 +11,8 @@
#include <asm/tlb.h>
#ifdef CONFIG_MMU
+#define __HAVE_ARCH_PUD_ALLOC_ONE
+#define __HAVE_ARCH_PUD_FREE
#include <asm-generic/pgalloc.h>
static inline void pmd_populate_kernel(struct mm_struct *mm,
@@ -36,6 +38,44 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
set_pud(pud, __pud((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
}
+
+static inline void p4d_populate(struct mm_struct *mm, p4d_t *p4d, pud_t *pud)
+{
+ if (pgtable_l4_enabled) {
+ unsigned long pfn = virt_to_pfn(pud);
+
+ set_p4d(p4d, __p4d((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
+ }
+}
+
+static inline void p4d_populate_safe(struct mm_struct *mm, p4d_t *p4d,
+ pud_t *pud)
+{
+ if (pgtable_l4_enabled) {
+ unsigned long pfn = virt_to_pfn(pud);
+
+ set_p4d_safe(p4d,
+ __p4d((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
+ }
+}
+
+#define pud_alloc_one pud_alloc_one
+static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
+{
+ if (pgtable_l4_enabled)
+ return __pud_alloc_one(mm, addr);
+
+ return NULL;
+}
+
+#define pud_free pud_free
+static inline void pud_free(struct mm_struct *mm, pud_t *pud)
+{
+ if (pgtable_l4_enabled)
+ __pud_free(mm, pud);
+}
+
+#define __pud_free_tlb(tlb, pud, addr) pud_free((tlb)->mm, pud)
#endif /* __PAGETABLE_PMD_FOLDED */
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
diff --git a/arch/riscv/include/asm/pgtable-64.h b/arch/riscv/include/asm/pgtable-64.h
index 228261aa9628..bbbdd66e5e2f 100644
--- a/arch/riscv/include/asm/pgtable-64.h
+++ b/arch/riscv/include/asm/pgtable-64.h
@@ -8,16 +8,36 @@
#include <linux/const.h>
-#define PGDIR_SHIFT 30
+extern bool pgtable_l4_enabled;
+
+#define PGDIR_SHIFT_L3 30
+#define PGDIR_SHIFT_L4 39
+#define PGDIR_SIZE_L3 (_AC(1, UL) << PGDIR_SHIFT_L3)
+
+#define PGDIR_SHIFT (pgtable_l4_enabled ? PGDIR_SHIFT_L4 : PGDIR_SHIFT_L3)
/* Size of region mapped by a page global directory */
#define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT)
#define PGDIR_MASK (~(PGDIR_SIZE - 1))
+/* pud is folded into pgd in case of 3-level page table */
+#define PUD_SHIFT 30
+#define PUD_SIZE (_AC(1, UL) << PUD_SHIFT)
+#define PUD_MASK (~(PUD_SIZE - 1))
+
#define PMD_SHIFT 21
/* Size of region mapped by a page middle directory */
#define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
#define PMD_MASK (~(PMD_SIZE - 1))
+/* Page Upper Directory entry */
+typedef struct {
+ unsigned long pud;
+} pud_t;
+
+#define pud_val(x) ((x).pud)
+#define __pud(x) ((pud_t) { (x) })
+#define PTRS_PER_PUD (PAGE_SIZE / sizeof(pud_t))
+
/* Page Middle Directory entry */
typedef struct {
unsigned long pmd;
@@ -59,6 +79,16 @@ static inline void pud_clear(pud_t *pudp)
set_pud(pudp, __pud(0));
}
+static inline pud_t pfn_pud(unsigned long pfn, pgprot_t prot)
+{
+ return __pud((pfn << _PAGE_PFN_SHIFT) | pgprot_val(prot));
+}
+
+static inline unsigned long _pud_pfn(pud_t pud)
+{
+ return pud_val(pud) >> _PAGE_PFN_SHIFT;
+}
+
static inline pmd_t *pud_pgtable(pud_t pud)
{
return (pmd_t *)pfn_to_virt(pud_val(pud) >> _PAGE_PFN_SHIFT);
@@ -69,6 +99,17 @@ static inline struct page *pud_page(pud_t pud)
return pfn_to_page(pud_val(pud) >> _PAGE_PFN_SHIFT);
}
+#define mm_pud_folded mm_pud_folded
+static inline bool mm_pud_folded(struct mm_struct *mm)
+{
+ if (pgtable_l4_enabled)
+ return false;
+
+ return true;
+}
+
+#define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
+
static inline pmd_t pfn_pmd(unsigned long pfn, pgprot_t prot)
{
return __pmd((pfn << _PAGE_PFN_SHIFT) | pgprot_val(prot));
@@ -84,4 +125,69 @@ static inline unsigned long _pmd_pfn(pmd_t pmd)
#define pmd_ERROR(e) \
pr_err("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
+#define pud_ERROR(e) \
+ pr_err("%s:%d: bad pud %016lx.\n", __FILE__, __LINE__, pud_val(e))
+
+static inline void set_p4d(p4d_t *p4dp, p4d_t p4d)
+{
+ if (pgtable_l4_enabled)
+ *p4dp = p4d;
+ else
+ set_pud((pud_t *)p4dp, (pud_t){ p4d_val(p4d) });
+}
+
+static inline int p4d_none(p4d_t p4d)
+{
+ if (pgtable_l4_enabled)
+ return (p4d_val(p4d) == 0);
+
+ return 0;
+}
+
+static inline int p4d_present(p4d_t p4d)
+{
+ if (pgtable_l4_enabled)
+ return (p4d_val(p4d) & _PAGE_PRESENT);
+
+ return 1;
+}
+
+static inline int p4d_bad(p4d_t p4d)
+{
+ if (pgtable_l4_enabled)
+ return !p4d_present(p4d);
+
+ return 0;
+}
+
+static inline void p4d_clear(p4d_t *p4d)
+{
+ if (pgtable_l4_enabled)
+ set_p4d(p4d, __p4d(0));
+}
+
+static inline pud_t *p4d_pgtable(p4d_t p4d)
+{
+ if (pgtable_l4_enabled)
+ return (pud_t *)pfn_to_virt(p4d_val(p4d) >> _PAGE_PFN_SHIFT);
+
+ return (pud_t *)pud_pgtable((pud_t) { p4d_val(p4d) });
+}
+
+static inline struct page *p4d_page(p4d_t p4d)
+{
+ return pfn_to_page(p4d_val(p4d) >> _PAGE_PFN_SHIFT);
+}
+
+#define pud_index(addr) (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
+
+#define pud_offset pud_offset
+static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
+{
+ if (pgtable_l4_enabled)
+ return p4d_pgtable(*p4d) + pud_index(address);
+
+ return (pud_t *)p4d;
+}
+
#endif /* _ASM_RISCV_PGTABLE_64_H */
diff --git a/arch/riscv/include/asm/pgtable-bits.h b/arch/riscv/include/asm/pgtable-bits.h
index 2ee413912926..a6b0c89824c2 100644
--- a/arch/riscv/include/asm/pgtable-bits.h
+++ b/arch/riscv/include/asm/pgtable-bits.h
@@ -31,7 +31,7 @@
* _PAGE_PROT_NONE is set on not-present pages (and ignored by the hardware) to
* distinguish them from swapped out pages
*/
-#define _PAGE_PROT_NONE _PAGE_READ
+#define _PAGE_PROT_NONE _PAGE_GLOBAL
#define _PAGE_PFN_SHIFT 10
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index bf204e7c1f74..7e949f25c933 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -24,8 +24,19 @@
#define KERNEL_LINK_ADDR PAGE_OFFSET
#endif
+/* Number of entries in the page global directory */
+#define PTRS_PER_PGD (PAGE_SIZE / sizeof(pgd_t))
+/* Number of entries in the page table */
+#define PTRS_PER_PTE (PAGE_SIZE / sizeof(pte_t))
+
+/*
+ * Half of the kernel address space (half of the entries of the page global
+ * directory) is for the direct mapping.
+ */
+#define KERN_VIRT_SIZE ((PTRS_PER_PGD / 2 * PGDIR_SIZE) / 2)
+
#define VMALLOC_SIZE (KERN_VIRT_SIZE >> 1)
-#define VMALLOC_END (PAGE_OFFSET - 1)
+#define VMALLOC_END PAGE_OFFSET
#define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE)
#define BPF_JIT_REGION_SIZE (SZ_128M)
@@ -39,8 +50,10 @@
/* Modules always live before the kernel */
#ifdef CONFIG_64BIT
-#define MODULES_VADDR (PFN_ALIGN((unsigned long)&_end) - SZ_2G)
-#define MODULES_END (PFN_ALIGN((unsigned long)&_start))
+/* This is used to define the end of the KASAN shadow region */
+#define MODULES_LOWEST_VADDR (KERNEL_LINK_ADDR - SZ_2G)
+#define MODULES_VADDR (PFN_ALIGN((unsigned long)&_end) - SZ_2G)
+#define MODULES_END (PFN_ALIGN((unsigned long)&_start))
#endif
/*
@@ -48,10 +61,16 @@
* struct pages to map half the virtual address space. Then
* position vmemmap directly below the VMALLOC region.
*/
+#ifdef CONFIG_64BIT
+#define VA_BITS (pgtable_l4_enabled ? 48 : 39)
+#else
+#define VA_BITS 32
+#endif
+
#define VMEMMAP_SHIFT \
- (CONFIG_VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT)
+ (VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT)
#define VMEMMAP_SIZE BIT(VMEMMAP_SHIFT)
-#define VMEMMAP_END (VMALLOC_START - 1)
+#define VMEMMAP_END VMALLOC_START
#define VMEMMAP_START (VMALLOC_START - VMEMMAP_SIZE)
/*
@@ -83,8 +102,7 @@
#ifndef __ASSEMBLY__
-/* Page Upper Directory not used in RISC-V */
-#include <asm-generic/pgtable-nopud.h>
+#include <asm-generic/pgtable-nop4d.h>
#include <asm/page.h>
#include <asm/tlbflush.h>
#include <linux/mm_types.h>
@@ -107,19 +125,27 @@
#define XIP_FIXUP(addr) (addr)
#endif /* CONFIG_XIP_KERNEL */
-#ifdef CONFIG_MMU
-/* Number of entries in the page global directory */
-#define PTRS_PER_PGD (PAGE_SIZE / sizeof(pgd_t))
-/* Number of entries in the page table */
-#define PTRS_PER_PTE (PAGE_SIZE / sizeof(pte_t))
+struct pt_alloc_ops {
+ pte_t *(*get_pte_virt)(phys_addr_t pa);
+ phys_addr_t (*alloc_pte)(uintptr_t va);
+#ifndef __PAGETABLE_PMD_FOLDED
+ pmd_t *(*get_pmd_virt)(phys_addr_t pa);
+ phys_addr_t (*alloc_pmd)(uintptr_t va);
+ pud_t *(*get_pud_virt)(phys_addr_t pa);
+ phys_addr_t (*alloc_pud)(uintptr_t va);
+#endif
+};
+
+extern struct pt_alloc_ops pt_ops __initdata;
+#ifdef CONFIG_MMU
/* Number of PGD entries that a user-mode program can use */
#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
/* Page protection bits */
#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER)
-#define PAGE_NONE __pgprot(_PAGE_PROT_NONE)
+#define PAGE_NONE __pgprot(_PAGE_PROT_NONE | _PAGE_READ)
#define PAGE_READ __pgprot(_PAGE_BASE | _PAGE_READ)
#define PAGE_WRITE __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_WRITE)
#define PAGE_EXEC __pgprot(_PAGE_BASE | _PAGE_EXEC)
@@ -628,11 +654,12 @@ static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
*
* Format of swap PTE:
* bit 0: _PAGE_PRESENT (zero)
- * bit 1: _PAGE_PROT_NONE (zero)
- * bits 2 to 6: swap type
- * bits 7 to XLEN-1: swap offset
+ * bit 1 to 3: _PAGE_LEAF (zero)
+ * bit 5: _PAGE_PROT_NONE (zero)
+ * bits 6 to 10: swap type
+ * bits 10 to XLEN-1: swap offset
*/
-#define __SWP_TYPE_SHIFT 2
+#define __SWP_TYPE_SHIFT 6
#define __SWP_TYPE_BITS 5
#define __SWP_TYPE_MASK ((1UL << __SWP_TYPE_BITS) - 1)
#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
@@ -648,12 +675,17 @@ static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
+#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
+#define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val(pmd) })
+#define __swp_entry_to_pmd(swp) __pmd((swp).val)
+#endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
+
/*
* In the RV64 Linux scheme, we give the user half of the virtual-address space
* and give the kernel the other (upper) half.
*/
#ifdef CONFIG_64BIT
-#define KERN_VIRT_START (-(BIT(CONFIG_VA_BITS)) + TASK_SIZE)
+#define KERN_VIRT_START (-(BIT(VA_BITS)) + TASK_SIZE)
#else
#define KERN_VIRT_START FIXADDR_START
#endif
@@ -661,11 +693,22 @@ static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
/*
* Task size is 0x4000000000 for RV64 or 0x9fc00000 for RV32.
* Note that PGDIR_SIZE must evenly divide TASK_SIZE.
+ * Task size is:
+ * - 0x9fc00000 (~2.5GB) for RV32.
+ * - 0x4000000000 ( 256GB) for RV64 using SV39 mmu
+ * - 0x800000000000 ( 128TB) for RV64 using SV48 mmu
+ *
+ * Note that PGDIR_SIZE must evenly divide TASK_SIZE since "RISC-V
+ * Instruction Set Manual Volume II: Privileged Architecture" states that
+ * "load and store effective addresses, which are 64bits, must have bits
+ * 63–48 all equal to bit 47, or else a page-fault exception will occur."
*/
#ifdef CONFIG_64BIT
-#define TASK_SIZE (PGDIR_SIZE * PTRS_PER_PGD / 2)
+#define TASK_SIZE (PGDIR_SIZE * PTRS_PER_PGD / 2)
+#define TASK_SIZE_MIN (PGDIR_SIZE_L3 * PTRS_PER_PGD / 2)
#else
-#define TASK_SIZE FIXADDR_START
+#define TASK_SIZE FIXADDR_START
+#define TASK_SIZE_MIN TASK_SIZE
#endif
#else /* CONFIG_MMU */
@@ -691,6 +734,8 @@ extern uintptr_t _dtb_early_pa;
#define dtb_early_va _dtb_early_va
#define dtb_early_pa _dtb_early_pa
#endif /* CONFIG_XIP_KERNEL */
+extern u64 satp_mode;
+extern bool pgtable_l4_enabled;
void paging_init(void);
void misc_mem_init(void);
diff --git a/arch/riscv/include/asm/sbi.h b/arch/riscv/include/asm/sbi.h
index 0d42693cb65e..d1c37479d828 100644
--- a/arch/riscv/include/asm/sbi.h
+++ b/arch/riscv/include/asm/sbi.h
@@ -8,6 +8,7 @@
#define _ASM_RISCV_SBI_H
#include <linux/types.h>
+#include <linux/cpumask.h>
#ifdef CONFIG_RISCV_SBI
enum sbi_ext_id {
@@ -27,6 +28,15 @@ enum sbi_ext_id {
SBI_EXT_IPI = 0x735049,
SBI_EXT_RFENCE = 0x52464E43,
SBI_EXT_HSM = 0x48534D,
+ SBI_EXT_SRST = 0x53525354,
+
+ /* Experimentals extensions must lie within this range */
+ SBI_EXT_EXPERIMENTAL_START = 0x08000000,
+ SBI_EXT_EXPERIMENTAL_END = 0x08FFFFFF,
+
+ /* Vendor extensions must lie within this range */
+ SBI_EXT_VENDOR_START = 0x09000000,
+ SBI_EXT_VENDOR_END = 0x09FFFFFF,
};
enum sbi_ext_base_fid {
@@ -70,6 +80,21 @@ enum sbi_hsm_hart_status {
SBI_HSM_HART_STATUS_STOP_PENDING,
};
+enum sbi_ext_srst_fid {
+ SBI_EXT_SRST_RESET = 0,
+};
+
+enum sbi_srst_reset_type {
+ SBI_SRST_RESET_TYPE_SHUTDOWN = 0,
+ SBI_SRST_RESET_TYPE_COLD_REBOOT,
+ SBI_SRST_RESET_TYPE_WARM_REBOOT,
+};
+
+enum sbi_srst_reset_reason {
+ SBI_SRST_RESET_REASON_NONE = 0,
+ SBI_SRST_RESET_REASON_SYS_FAILURE,
+};
+
#define SBI_SPEC_VERSION_DEFAULT 0x1
#define SBI_SPEC_VERSION_MAJOR_SHIFT 24
#define SBI_SPEC_VERSION_MAJOR_MASK 0x7f
@@ -82,6 +107,7 @@ enum sbi_hsm_hart_status {
#define SBI_ERR_INVALID_PARAM -3
#define SBI_ERR_DENIED -4
#define SBI_ERR_INVALID_ADDRESS -5
+#define SBI_ERR_ALREADY_AVAILABLE -6
extern unsigned long sbi_spec_version;
struct sbiret {
@@ -103,27 +129,27 @@ long sbi_get_mimpid(void);
void sbi_set_timer(uint64_t stime_value);
void sbi_shutdown(void);
void sbi_clear_ipi(void);
-int sbi_send_ipi(const unsigned long *hart_mask);
-int sbi_remote_fence_i(const unsigned long *hart_mask);
-int sbi_remote_sfence_vma(const unsigned long *hart_mask,
+int sbi_send_ipi(const struct cpumask *cpu_mask);
+int sbi_remote_fence_i(const struct cpumask *cpu_mask);
+int sbi_remote_sfence_vma(const struct cpumask *cpu_mask,
unsigned long start,
unsigned long size);
-int sbi_remote_sfence_vma_asid(const unsigned long *hart_mask,
+int sbi_remote_sfence_vma_asid(const struct cpumask *cpu_mask,
unsigned long start,
unsigned long size,
unsigned long asid);
-int sbi_remote_hfence_gvma(const unsigned long *hart_mask,
+int sbi_remote_hfence_gvma(const struct cpumask *cpu_mask,
unsigned long start,
unsigned long size);
-int sbi_remote_hfence_gvma_vmid(const unsigned long *hart_mask,
+int sbi_remote_hfence_gvma_vmid(const struct cpumask *cpu_mask,
unsigned long start,
unsigned long size,
unsigned long vmid);
-int sbi_remote_hfence_vvma(const unsigned long *hart_mask,
+int sbi_remote_hfence_vvma(const struct cpumask *cpu_mask,
unsigned long start,
unsigned long size);
-int sbi_remote_hfence_vvma_asid(const unsigned long *hart_mask,
+int sbi_remote_hfence_vvma_asid(const struct cpumask *cpu_mask,
unsigned long start,
unsigned long size,
unsigned long asid);
@@ -148,9 +174,17 @@ static inline unsigned long sbi_minor_version(void)
return sbi_spec_version & SBI_SPEC_VERSION_MINOR_MASK;
}
+/* Make SBI version */
+static inline unsigned long sbi_mk_version(unsigned long major,
+ unsigned long minor)
+{
+ return ((major & SBI_SPEC_VERSION_MAJOR_MASK) <<
+ SBI_SPEC_VERSION_MAJOR_SHIFT) | minor;
+}
+
int sbi_err_map_linux_errno(int err);
#else /* CONFIG_RISCV_SBI */
-static inline int sbi_remote_fence_i(const unsigned long *hart_mask) { return -1; }
+static inline int sbi_remote_fence_i(const struct cpumask *cpu_mask) { return -1; }
static inline void sbi_init(void) {}
#endif /* CONFIG_RISCV_SBI */
#endif /* _ASM_RISCV_SBI_H */
diff --git a/arch/riscv/include/asm/smp.h b/arch/riscv/include/asm/smp.h
index a7d2811f3536..23170c933d73 100644
--- a/arch/riscv/include/asm/smp.h
+++ b/arch/riscv/include/asm/smp.h
@@ -43,7 +43,6 @@ void arch_send_call_function_ipi_mask(struct cpumask *mask);
void arch_send_call_function_single_ipi(int cpu);
int riscv_hartid_to_cpuid(int hartid);
-void riscv_cpuid_to_hartid_mask(const struct cpumask *in, struct cpumask *out);
/* Set custom IPI operations */
void riscv_set_ipi_ops(const struct riscv_ipi_ops *ops);
@@ -63,8 +62,6 @@ asmlinkage void smp_callin(void);
#if defined CONFIG_HOTPLUG_CPU
int __cpu_disable(void);
void __cpu_die(unsigned int cpu);
-void cpu_stop(void);
-#else
#endif /* CONFIG_HOTPLUG_CPU */
#else
@@ -85,13 +82,6 @@ static inline unsigned long cpuid_to_hartid_map(int cpu)
return boot_cpu_hartid;
}
-static inline void riscv_cpuid_to_hartid_mask(const struct cpumask *in,
- struct cpumask *out)
-{
- cpumask_clear(out);
- cpumask_set_cpu(boot_cpu_hartid, out);
-}
-
static inline void riscv_set_ipi_ops(const struct riscv_ipi_ops *ops)
{
}
diff --git a/arch/riscv/include/asm/sparsemem.h b/arch/riscv/include/asm/sparsemem.h
index 45a7018a8118..63acaecc3374 100644
--- a/arch/riscv/include/asm/sparsemem.h
+++ b/arch/riscv/include/asm/sparsemem.h
@@ -4,7 +4,11 @@
#define _ASM_RISCV_SPARSEMEM_H
#ifdef CONFIG_SPARSEMEM
-#define MAX_PHYSMEM_BITS CONFIG_PA_BITS
+#ifdef CONFIG_64BIT
+#define MAX_PHYSMEM_BITS 56
+#else
+#define MAX_PHYSMEM_BITS 34
+#endif /* CONFIG_64BIT */
#define SECTION_SIZE_BITS 27
#endif /* CONFIG_SPARSEMEM */
diff --git a/arch/riscv/include/asm/uaccess.h b/arch/riscv/include/asm/uaccess.h
index f314ff44c48d..c701a5e57a2b 100644
--- a/arch/riscv/include/asm/uaccess.h
+++ b/arch/riscv/include/asm/uaccess.h
@@ -8,6 +8,7 @@
#ifndef _ASM_RISCV_UACCESS_H
#define _ASM_RISCV_UACCESS_H
+#include <asm/asm-extable.h>
#include <asm/pgtable.h> /* for TASK_SIZE */
/*
@@ -80,25 +81,14 @@ static inline int __access_ok(unsigned long addr, unsigned long size)
#define __get_user_asm(insn, x, ptr, err) \
do { \
- uintptr_t __tmp; \
__typeof__(x) __x; \
__asm__ __volatile__ ( \
"1:\n" \
- " " insn " %1, %3\n" \
+ " " insn " %1, %2\n" \
"2:\n" \
- " .section .fixup,\"ax\"\n" \
- " .balign 4\n" \
- "3:\n" \
- " li %0, %4\n" \
- " li %1, 0\n" \
- " jump 2b, %2\n" \
- " .previous\n" \
- " .section __ex_table,\"a\"\n" \
- " .balign " RISCV_SZPTR "\n" \
- " " RISCV_PTR " 1b, 3b\n" \
- " .previous" \
- : "+r" (err), "=&r" (__x), "=r" (__tmp) \
- : "m" (*(ptr)), "i" (-EFAULT)); \
+ _ASM_EXTABLE_UACCESS_ERR_ZERO(1b, 2b, %0, %1) \
+ : "+r" (err), "=&r" (__x) \
+ : "m" (*(ptr))); \
(x) = __x; \
} while (0)
@@ -110,30 +100,18 @@ do { \
do { \
u32 __user *__ptr = (u32 __user *)(ptr); \
u32 __lo, __hi; \
- uintptr_t __tmp; \
__asm__ __volatile__ ( \
"1:\n" \
- " lw %1, %4\n" \
+ " lw %1, %3\n" \
"2:\n" \
- " lw %2, %5\n" \
+ " lw %2, %4\n" \
"3:\n" \
- " .section .fixup,\"ax\"\n" \
- " .balign 4\n" \
- "4:\n" \
- " li %0, %6\n" \
- " li %1, 0\n" \
- " li %2, 0\n" \
- " jump 3b, %3\n" \
- " .previous\n" \
- " .section __ex_table,\"a\"\n" \
- " .balign " RISCV_SZPTR "\n" \
- " " RISCV_PTR " 1b, 4b\n" \
- " " RISCV_PTR " 2b, 4b\n" \
- " .previous" \
- : "+r" (err), "=&r" (__lo), "=r" (__hi), \
- "=r" (__tmp) \
- : "m" (__ptr[__LSW]), "m" (__ptr[__MSW]), \
- "i" (-EFAULT)); \
+ _ASM_EXTABLE_UACCESS_ERR_ZERO(1b, 3b, %0, %1) \
+ _ASM_EXTABLE_UACCESS_ERR_ZERO(2b, 3b, %0, %1) \
+ : "+r" (err), "=&r" (__lo), "=r" (__hi) \
+ : "m" (__ptr[__LSW]), "m" (__ptr[__MSW])); \
+ if (err) \
+ __hi = 0; \
(x) = (__typeof__(x))((__typeof__((x)-(x)))( \
(((u64)__hi << 32) | __lo))); \
} while (0)
@@ -221,24 +199,14 @@ do { \
#define __put_user_asm(insn, x, ptr, err) \
do { \
- uintptr_t __tmp; \
__typeof__(*(ptr)) __x = x; \
__asm__ __volatile__ ( \
"1:\n" \
- " " insn " %z3, %2\n" \
+ " " insn " %z2, %1\n" \
"2:\n" \
- " .section .fixup,\"ax\"\n" \
- " .balign 4\n" \
- "3:\n" \
- " li %0, %4\n" \
- " jump 2b, %1\n" \
- " .previous\n" \
- " .section __ex_table,\"a\"\n" \
- " .balign " RISCV_SZPTR "\n" \
- " " RISCV_PTR " 1b, 3b\n" \
- " .previous" \
- : "+r" (err), "=r" (__tmp), "=m" (*(ptr)) \
- : "rJ" (__x), "i" (-EFAULT)); \
+ _ASM_EXTABLE_UACCESS_ERR(1b, 2b, %0) \
+ : "+r" (err), "=m" (*(ptr)) \
+ : "rJ" (__x)); \
} while (0)
#ifdef CONFIG_64BIT
@@ -249,28 +217,18 @@ do { \
do { \
u32 __user *__ptr = (u32 __user *)(ptr); \
u64 __x = (__typeof__((x)-(x)))(x); \
- uintptr_t __tmp; \
__asm__ __volatile__ ( \
"1:\n" \
- " sw %z4, %2\n" \
+ " sw %z3, %1\n" \
"2:\n" \
- " sw %z5, %3\n" \
+ " sw %z4, %2\n" \
"3:\n" \
- " .section .fixup,\"ax\"\n" \
- " .balign 4\n" \
- "4:\n" \
- " li %0, %6\n" \
- " jump 3b, %1\n" \
- " .previous\n" \
- " .section __ex_table,\"a\"\n" \
- " .balign " RISCV_SZPTR "\n" \
- " " RISCV_PTR " 1b, 4b\n" \
- " " RISCV_PTR " 2b, 4b\n" \
- " .previous" \
- : "+r" (err), "=r" (__tmp), \
+ _ASM_EXTABLE_UACCESS_ERR(1b, 3b, %0) \
+ _ASM_EXTABLE_UACCESS_ERR(2b, 3b, %0) \
+ : "+r" (err), \
"=m" (__ptr[__LSW]), \
"=m" (__ptr[__MSW]) \
- : "rJ" (__x), "rJ" (__x >> 32), "i" (-EFAULT)); \
+ : "rJ" (__x), "rJ" (__x >> 32)); \
} while (0)
#endif /* CONFIG_64BIT */
@@ -388,81 +346,6 @@ unsigned long __must_check clear_user(void __user *to, unsigned long n)
__clear_user(to, n) : n;
}
-/*
- * Atomic compare-and-exchange, but with a fixup for userspace faults. Faults
- * will set "err" to -EFAULT, while successful accesses return the previous
- * value.
- */
-#define __cmpxchg_user(ptr, old, new, err, size, lrb, scb) \
-({ \
- __typeof__(ptr) __ptr = (ptr); \
- __typeof__(*(ptr)) __old = (old); \
- __typeof__(*(ptr)) __new = (new); \
- __typeof__(*(ptr)) __ret; \
- __typeof__(err) __err = 0; \
- register unsigned int __rc; \
- __enable_user_access(); \
- switch (size) { \
- case 4: \
- __asm__ __volatile__ ( \
- "0:\n" \
- " lr.w" #scb " %[ret], %[ptr]\n" \
- " bne %[ret], %z[old], 1f\n" \
- " sc.w" #lrb " %[rc], %z[new], %[ptr]\n" \
- " bnez %[rc], 0b\n" \
- "1:\n" \
- ".section .fixup,\"ax\"\n" \
- ".balign 4\n" \
- "2:\n" \
- " li %[err], %[efault]\n" \
- " jump 1b, %[rc]\n" \
- ".previous\n" \
- ".section __ex_table,\"a\"\n" \
- ".balign " RISCV_SZPTR "\n" \
- " " RISCV_PTR " 1b, 2b\n" \
- ".previous\n" \
- : [ret] "=&r" (__ret), \
- [rc] "=&r" (__rc), \
- [ptr] "+A" (*__ptr), \
- [err] "=&r" (__err) \
- : [old] "rJ" (__old), \
- [new] "rJ" (__new), \
- [efault] "i" (-EFAULT)); \
- break; \
- case 8: \
- __asm__ __volatile__ ( \
- "0:\n" \
- " lr.d" #scb " %[ret], %[ptr]\n" \
- " bne %[ret], %z[old], 1f\n" \
- " sc.d" #lrb " %[rc], %z[new], %[ptr]\n" \
- " bnez %[rc], 0b\n" \
- "1:\n" \
- ".section .fixup,\"ax\"\n" \
- ".balign 4\n" \
- "2:\n" \
- " li %[err], %[efault]\n" \
- " jump 1b, %[rc]\n" \
- ".previous\n" \
- ".section __ex_table,\"a\"\n" \
- ".balign " RISCV_SZPTR "\n" \
- " " RISCV_PTR " 1b, 2b\n" \
- ".previous\n" \
- : [ret] "=&r" (__ret), \
- [rc] "=&r" (__rc), \
- [ptr] "+A" (*__ptr), \
- [err] "=&r" (__err) \
- : [old] "rJ" (__old), \
- [new] "rJ" (__new), \
- [efault] "i" (-EFAULT)); \
- break; \
- default: \
- BUILD_BUG(); \
- } \
- __disable_user_access(); \
- (err) = __err; \
- __ret; \
-})
-
#define HAVE_GET_KERNEL_NOFAULT
#define __get_kernel_nofault(dst, src, type, err_label) \
diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile
index 3397ddac1a30..612556faa527 100644
--- a/arch/riscv/kernel/Makefile
+++ b/arch/riscv/kernel/Makefile
@@ -43,7 +43,8 @@ obj-$(CONFIG_FPU) += fpu.o
obj-$(CONFIG_SMP) += smpboot.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_SMP) += cpu_ops.o
-obj-$(CONFIG_SMP) += cpu_ops_spinwait.o
+
+obj-$(CONFIG_RISCV_BOOT_SPINWAIT) += cpu_ops_spinwait.o
obj-$(CONFIG_MODULES) += module.o
obj-$(CONFIG_MODULE_SECTIONS) += module-sections.o
diff --git a/arch/riscv/kernel/asm-offsets.c b/arch/riscv/kernel/asm-offsets.c
index 253126e4beef..df0519a64eaf 100644
--- a/arch/riscv/kernel/asm-offsets.c
+++ b/arch/riscv/kernel/asm-offsets.c
@@ -12,6 +12,7 @@
#include <asm/kvm_host.h>
#include <asm/thread_info.h>
#include <asm/ptrace.h>
+#include <asm/cpu_ops_sbi.h>
void asm_offsets(void);
@@ -468,4 +469,6 @@ void asm_offsets(void)
DEFINE(PT_SIZE_ON_STACK, ALIGN(sizeof(struct pt_regs), STACK_ALIGN));
OFFSET(KERNEL_MAP_VIRT_ADDR, kernel_mapping, virt_addr);
+ OFFSET(SBI_HART_BOOT_TASK_PTR_OFFSET, sbi_hart_boot_data, task_ptr);
+ OFFSET(SBI_HART_BOOT_STACK_PTR_OFFSET, sbi_hart_boot_data, stack_ptr);
}
diff --git a/arch/riscv/kernel/cpu-hotplug.c b/arch/riscv/kernel/cpu-hotplug.c
index df84e0c13db1..f7a832e3a1d1 100644
--- a/arch/riscv/kernel/cpu-hotplug.c
+++ b/arch/riscv/kernel/cpu-hotplug.c
@@ -12,14 +12,9 @@
#include <linux/sched/hotplug.h>
#include <asm/irq.h>
#include <asm/cpu_ops.h>
+#include <asm/numa.h>
#include <asm/sbi.h>
-void cpu_stop(void);
-void arch_cpu_idle_dead(void)
-{
- cpu_stop();
-}
-
bool cpu_has_hotplug(unsigned int cpu)
{
if (cpu_ops[cpu]->cpu_stop)
@@ -46,6 +41,7 @@ int __cpu_disable(void)
return ret;
remove_cpu_topology(cpu);
+ numa_remove_cpu(cpu);
set_cpu_online(cpu, false);
irq_migrate_all_off_this_cpu();
@@ -75,7 +71,7 @@ void __cpu_die(unsigned int cpu)
/*
* Called from the idle thread for the CPU which has been shutdown.
*/
-void cpu_stop(void)
+void arch_cpu_idle_dead(void)
{
idle_task_exit();
diff --git a/arch/riscv/kernel/cpu.c b/arch/riscv/kernel/cpu.c
index f13b2c9ea912..ad0a7e9f828b 100644
--- a/arch/riscv/kernel/cpu.c
+++ b/arch/riscv/kernel/cpu.c
@@ -7,6 +7,7 @@
#include <linux/seq_file.h>
#include <linux/of.h>
#include <asm/smp.h>
+#include <asm/pgtable.h>
/*
* Returns the hart ID of the given device tree node, or -ENODEV if the node
@@ -71,18 +72,19 @@ static void print_isa(struct seq_file *f, const char *isa)
seq_puts(f, "\n");
}
-static void print_mmu(struct seq_file *f, const char *mmu_type)
+static void print_mmu(struct seq_file *f)
{
+ char sv_type[16];
+
#if defined(CONFIG_32BIT)
- if (strcmp(mmu_type, "riscv,sv32") != 0)
- return;
+ strncpy(sv_type, "sv32", 5);
#elif defined(CONFIG_64BIT)
- if (strcmp(mmu_type, "riscv,sv39") != 0 &&
- strcmp(mmu_type, "riscv,sv48") != 0)
- return;
+ if (pgtable_l4_enabled)
+ strncpy(sv_type, "sv48", 5);
+ else
+ strncpy(sv_type, "sv39", 5);
#endif
-
- seq_printf(f, "mmu\t\t: %s\n", mmu_type+6);
+ seq_printf(f, "mmu\t\t: %s\n", sv_type);
}
static void *c_start(struct seq_file *m, loff_t *pos)
@@ -107,14 +109,13 @@ static int c_show(struct seq_file *m, void *v)
{
unsigned long cpu_id = (unsigned long)v - 1;
struct device_node *node = of_get_cpu_node(cpu_id, NULL);
- const char *compat, *isa, *mmu;
+ const char *compat, *isa;
seq_printf(m, "processor\t: %lu\n", cpu_id);
seq_printf(m, "hart\t\t: %lu\n", cpuid_to_hartid_map(cpu_id));
if (!of_property_read_string(node, "riscv,isa", &isa))
print_isa(m, isa);
- if (!of_property_read_string(node, "mmu-type", &mmu))
- print_mmu(m, mmu);
+ print_mmu(m);
if (!of_property_read_string(node, "compatible", &compat)
&& strcmp(compat, "riscv"))
seq_printf(m, "uarch\t\t: %s\n", compat);
diff --git a/arch/riscv/kernel/cpu_ops.c b/arch/riscv/kernel/cpu_ops.c
index 1985884fe829..170d07e57721 100644
--- a/arch/riscv/kernel/cpu_ops.c
+++ b/arch/riscv/kernel/cpu_ops.c
@@ -8,37 +8,29 @@
#include <linux/of.h>
#include <linux/string.h>
#include <linux/sched.h>
-#include <linux/sched/task_stack.h>
#include <asm/cpu_ops.h>
#include <asm/sbi.h>
#include <asm/smp.h>
const struct cpu_operations *cpu_ops[NR_CPUS] __ro_after_init;
-void *__cpu_up_stack_pointer[NR_CPUS] __section(".data");
-void *__cpu_up_task_pointer[NR_CPUS] __section(".data");
-
extern const struct cpu_operations cpu_ops_sbi;
+#ifdef CONFIG_RISCV_BOOT_SPINWAIT
extern const struct cpu_operations cpu_ops_spinwait;
-
-void cpu_update_secondary_bootdata(unsigned int cpuid,
- struct task_struct *tidle)
-{
- int hartid = cpuid_to_hartid_map(cpuid);
-
- /* Make sure tidle is updated */
- smp_mb();
- WRITE_ONCE(__cpu_up_stack_pointer[hartid],
- task_stack_page(tidle) + THREAD_SIZE);
- WRITE_ONCE(__cpu_up_task_pointer[hartid], tidle);
-}
+#else
+const struct cpu_operations cpu_ops_spinwait = {
+ .name = "",
+ .cpu_prepare = NULL,
+ .cpu_start = NULL,
+};
+#endif
void __init cpu_set_ops(int cpuid)
{
#if IS_ENABLED(CONFIG_RISCV_SBI)
if (sbi_probe_extension(SBI_EXT_HSM) > 0) {
if (!cpuid)
- pr_info("SBI v0.2 HSM extension detected\n");
+ pr_info("SBI HSM extension detected\n");
cpu_ops[cpuid] = &cpu_ops_sbi;
} else
#endif
diff --git a/arch/riscv/kernel/cpu_ops_sbi.c b/arch/riscv/kernel/cpu_ops_sbi.c
index 685fae72b7f5..dae29cbfe550 100644
--- a/arch/riscv/kernel/cpu_ops_sbi.c
+++ b/arch/riscv/kernel/cpu_ops_sbi.c
@@ -7,13 +7,22 @@
#include <linux/init.h>
#include <linux/mm.h>
+#include <linux/sched/task_stack.h>
#include <asm/cpu_ops.h>
+#include <asm/cpu_ops_sbi.h>
#include <asm/sbi.h>
#include <asm/smp.h>
extern char secondary_start_sbi[];
const struct cpu_operations cpu_ops_sbi;
+/*
+ * Ordered booting via HSM brings one cpu at a time. However, cpu hotplug can
+ * be invoked from multiple threads in parallel. Define a per cpu data
+ * to handle that.
+ */
+DEFINE_PER_CPU(struct sbi_hart_boot_data, boot_data);
+
static int sbi_hsm_hart_start(unsigned long hartid, unsigned long saddr,
unsigned long priv)
{
@@ -55,14 +64,19 @@ static int sbi_hsm_hart_get_status(unsigned long hartid)
static int sbi_cpu_start(unsigned int cpuid, struct task_struct *tidle)
{
- int rc;
unsigned long boot_addr = __pa_symbol(secondary_start_sbi);
int hartid = cpuid_to_hartid_map(cpuid);
-
- cpu_update_secondary_bootdata(cpuid, tidle);
- rc = sbi_hsm_hart_start(hartid, boot_addr, 0);
-
- return rc;
+ unsigned long hsm_data;
+ struct sbi_hart_boot_data *bdata = &per_cpu(boot_data, cpuid);
+
+ /* Make sure tidle is updated */
+ smp_mb();
+ bdata->task_ptr = tidle;
+ bdata->stack_ptr = task_stack_page(tidle) + THREAD_SIZE;
+ /* Make sure boot data is updated */
+ smp_mb();
+ hsm_data = __pa(bdata);
+ return sbi_hsm_hart_start(hartid, boot_addr, hsm_data);
}
static int sbi_cpu_prepare(unsigned int cpuid)
diff --git a/arch/riscv/kernel/cpu_ops_spinwait.c b/arch/riscv/kernel/cpu_ops_spinwait.c
index b2c957bb68c1..346847f6c41c 100644
--- a/arch/riscv/kernel/cpu_ops_spinwait.c
+++ b/arch/riscv/kernel/cpu_ops_spinwait.c
@@ -6,11 +6,36 @@
#include <linux/errno.h>
#include <linux/of.h>
#include <linux/string.h>
+#include <linux/sched/task_stack.h>
#include <asm/cpu_ops.h>
#include <asm/sbi.h>
#include <asm/smp.h>
const struct cpu_operations cpu_ops_spinwait;
+void *__cpu_spinwait_stack_pointer[NR_CPUS] __section(".data");
+void *__cpu_spinwait_task_pointer[NR_CPUS] __section(".data");
+
+static void cpu_update_secondary_bootdata(unsigned int cpuid,
+ struct task_struct *tidle)
+{
+ int hartid = cpuid_to_hartid_map(cpuid);
+
+ /*
+ * The hartid must be less than NR_CPUS to avoid out-of-bound access
+ * errors for __cpu_spinwait_stack/task_pointer. That is not always possible
+ * for platforms with discontiguous hartid numbering scheme. That's why
+ * spinwait booting is not the recommended approach for any platforms
+ * booting Linux in S-mode and can be disabled in the future.
+ */
+ if (hartid == INVALID_HARTID || hartid >= NR_CPUS)
+ return;
+
+ /* Make sure tidle is updated */
+ smp_mb();
+ WRITE_ONCE(__cpu_spinwait_stack_pointer[hartid],
+ task_stack_page(tidle) + THREAD_SIZE);
+ WRITE_ONCE(__cpu_spinwait_task_pointer[hartid], tidle);
+}
static int spinwait_cpu_prepare(unsigned int cpuid)
{
@@ -28,7 +53,7 @@ static int spinwait_cpu_start(unsigned int cpuid, struct task_struct *tidle)
* selects the first cpu to boot the kernel and causes the remainder
* of the cpus to spin in a loop waiting for their stack pointer to be
* setup by that main cpu. Writing to bootdata
- * (i.e __cpu_up_stack_pointer) signals to the spinning cpus that they
+ * (i.e __cpu_spinwait_stack_pointer) signals to the spinning cpus that they
* can continue the boot process.
*/
cpu_update_secondary_bootdata(cpuid, tidle);
diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S
index f52f01ecbeea..ec07f991866a 100644
--- a/arch/riscv/kernel/head.S
+++ b/arch/riscv/kernel/head.S
@@ -11,6 +11,7 @@
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/csr.h>
+#include <asm/cpu_ops_sbi.h>
#include <asm/hwcap.h>
#include <asm/image.h>
#include "efi-header.S"
@@ -21,14 +22,13 @@
add \reg, \reg, t0
.endm
.macro XIP_FIXUP_FLASH_OFFSET reg
- la t1, __data_loc
- li t0, XIP_OFFSET_MASK
- and t1, t1, t0
- li t1, XIP_OFFSET
- sub t0, t0, t1
- sub \reg, \reg, t0
+ la t0, __data_loc
+ REG_L t1, _xip_phys_offset
+ sub \reg, \reg, t1
+ add \reg, \reg, t0
.endm
_xip_fixup: .dword CONFIG_PHYS_RAM_BASE - CONFIG_XIP_PHYS_ADDR - XIP_OFFSET
+_xip_phys_offset: .dword CONFIG_XIP_PHYS_ADDR + XIP_OFFSET
#else
.macro XIP_FIXUP_OFFSET reg
.endm
@@ -105,7 +105,8 @@ relocate:
/* Compute satp for kernel page tables, but don't load it yet */
srl a2, a0, PAGE_SHIFT
- li a1, SATP_MODE
+ la a1, satp_mode
+ REG_L a1, 0(a1)
or a2, a2, a1
/*
@@ -135,7 +136,7 @@ relocate:
/*
* Switch to kernel page tables. A full fence is necessary in order to
* avoid using the trampoline translations, which are only correct for
- * the first superpage. Fetching the fence is guarnteed to work
+ * the first superpage. Fetching the fence is guaranteed to work
* because that first superpage is translated the same way.
*/
csrw CSR_SATP, a2
@@ -167,18 +168,17 @@ secondary_start_sbi:
la a3, .Lsecondary_park
csrw CSR_TVEC, a3
- slli a3, a0, LGREG
- la a4, __cpu_up_stack_pointer
- XIP_FIXUP_OFFSET a4
- la a5, __cpu_up_task_pointer
- XIP_FIXUP_OFFSET a5
- add a4, a3, a4
- add a5, a3, a5
- REG_L sp, (a4)
- REG_L tp, (a5)
-
- .global secondary_start_common
-secondary_start_common:
+ /* a0 contains the hartid & a1 contains boot data */
+ li a2, SBI_HART_BOOT_TASK_PTR_OFFSET
+ XIP_FIXUP_OFFSET a2
+ add a2, a2, a1
+ REG_L tp, (a2)
+ li a3, SBI_HART_BOOT_STACK_PTR_OFFSET
+ XIP_FIXUP_OFFSET a3
+ add a3, a3, a1
+ REG_L sp, (a3)
+
+.Lsecondary_start_common:
#ifdef CONFIG_MMU
/* Enable virtual memory and relocate to virtual address */
@@ -258,13 +258,13 @@ pmp_done:
li t0, SR_FS
csrc CSR_STATUS, t0
-#ifdef CONFIG_SMP
+#ifdef CONFIG_RISCV_BOOT_SPINWAIT
li t0, CONFIG_NR_CPUS
blt a0, t0, .Lgood_cores
tail .Lsecondary_park
.Lgood_cores:
-#endif
+ /* The lottery system is only required for spinwait booting method */
#ifndef CONFIG_XIP_KERNEL
/* Pick one hart to run the main boot sequence */
la a3, hart_lottery
@@ -283,6 +283,10 @@ pmp_done:
/* first time here if hart_lottery in RAM is not set */
beq t0, t1, .Lsecondary_start
+#endif /* CONFIG_XIP */
+#endif /* CONFIG_RISCV_BOOT_SPINWAIT */
+
+#ifdef CONFIG_XIP_KERNEL
la sp, _end + THREAD_SIZE
XIP_FIXUP_OFFSET sp
mv s0, a0
@@ -339,16 +343,16 @@ clear_bss_done:
call soc_early_init
tail start_kernel
+#if CONFIG_RISCV_BOOT_SPINWAIT
.Lsecondary_start:
-#ifdef CONFIG_SMP
/* Set trap vector to spin forever to help debug */
la a3, .Lsecondary_park
csrw CSR_TVEC, a3
slli a3, a0, LGREG
- la a1, __cpu_up_stack_pointer
+ la a1, __cpu_spinwait_stack_pointer
XIP_FIXUP_OFFSET a1
- la a2, __cpu_up_task_pointer
+ la a2, __cpu_spinwait_task_pointer
XIP_FIXUP_OFFSET a2
add a1, a3, a1
add a2, a3, a2
@@ -365,8 +369,8 @@ clear_bss_done:
beqz tp, .Lwait_for_cpu_up
fence
- tail secondary_start_common
-#endif
+ tail .Lsecondary_start_common
+#endif /* CONFIG_RISCV_BOOT_SPINWAIT */
END(_start_kernel)
@@ -448,7 +452,3 @@ ENTRY(reset_regs)
ret
END(reset_regs)
#endif /* CONFIG_RISCV_M_MODE */
-
-__PAGE_ALIGNED_BSS
- /* Empty zero page */
- .balign PAGE_SIZE
diff --git a/arch/riscv/kernel/head.h b/arch/riscv/kernel/head.h
index aabbc3ac3e48..726731ada534 100644
--- a/arch/riscv/kernel/head.h
+++ b/arch/riscv/kernel/head.h
@@ -16,7 +16,9 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa);
asmlinkage void __init __copy_data(void);
#endif
-extern void *__cpu_up_stack_pointer[];
-extern void *__cpu_up_task_pointer[];
+#ifdef CONFIG_RISCV_BOOT_SPINWAIT
+extern void *__cpu_spinwait_stack_pointer[];
+extern void *__cpu_spinwait_task_pointer[];
+#endif
#endif /* __ASM_HEAD_H */
diff --git a/arch/riscv/kernel/kexec_relocate.S b/arch/riscv/kernel/kexec_relocate.S
index a80b52a74f58..059c5e216ae7 100644
--- a/arch/riscv/kernel/kexec_relocate.S
+++ b/arch/riscv/kernel/kexec_relocate.S
@@ -159,25 +159,15 @@ SYM_CODE_START(riscv_kexec_norelocate)
* s0: (const) Phys address to jump to
* s1: (const) Phys address of the FDT image
* s2: (const) The hartid of the current hart
- * s3: (const) kernel_map.va_pa_offset, used when switching MMU off
*/
mv s0, a1
mv s1, a2
mv s2, a3
- mv s3, a4
/* Disable / cleanup interrupts */
csrw CSR_SIE, zero
csrw CSR_SIP, zero
- /* Switch to physical addressing */
- la s4, 1f
- sub s4, s4, s3
- csrw CSR_STVEC, s4
- csrw CSR_SATP, zero
-
-.align 2
-1:
/* Pass the arguments to the next kernel / Cleanup*/
mv a0, s2
mv a1, s1
@@ -214,7 +204,15 @@ SYM_CODE_START(riscv_kexec_norelocate)
csrw CSR_SCAUSE, zero
csrw CSR_SSCRATCH, zero
- jalr zero, a2, 0
+ /*
+ * Switch to physical addressing
+ * This will also trigger a jump to CSR_STVEC
+ * which in this case is the address of the new
+ * kernel.
+ */
+ csrw CSR_STVEC, a2
+ csrw CSR_SATP, zero
+
SYM_CODE_END(riscv_kexec_norelocate)
.section ".rodata"
diff --git a/arch/riscv/kernel/machine_kexec.c b/arch/riscv/kernel/machine_kexec.c
index e6eca271a4d6..cbef0fc73afa 100644
--- a/arch/riscv/kernel/machine_kexec.c
+++ b/arch/riscv/kernel/machine_kexec.c
@@ -169,7 +169,8 @@ machine_kexec(struct kimage *image)
struct kimage_arch *internal = &image->arch;
unsigned long jump_addr = (unsigned long) image->start;
unsigned long first_ind_entry = (unsigned long) &image->head;
- unsigned long this_hart_id = raw_smp_processor_id();
+ unsigned long this_cpu_id = smp_processor_id();
+ unsigned long this_hart_id = cpuid_to_hartid_map(this_cpu_id);
unsigned long fdt_addr = internal->fdt_addr;
void *control_code_buffer = page_address(image->control_code_page);
riscv_kexec_method kexec_method = NULL;
diff --git a/arch/riscv/kernel/perf_callchain.c b/arch/riscv/kernel/perf_callchain.c
index 0bb1854dce83..1fc075b8f764 100644
--- a/arch/riscv/kernel/perf_callchain.c
+++ b/arch/riscv/kernel/perf_callchain.c
@@ -58,10 +58,6 @@ void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
{
unsigned long fp = 0;
- /* RISC-V does not support perf in guest mode. */
- if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
- return;
-
fp = regs->s0;
perf_callchain_store(entry, regs->epc);
@@ -78,11 +74,5 @@ static bool fill_callchain(void *entry, unsigned long pc)
void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
struct pt_regs *regs)
{
- /* RISC-V does not support perf in guest mode. */
- if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
- pr_warn("RISC-V does not support perf in guest mode!");
- return;
- }
-
walk_stackframe(NULL, regs, fill_callchain, entry);
}
diff --git a/arch/riscv/kernel/ptrace.c b/arch/riscv/kernel/ptrace.c
index 9c0511119bad..a89243730153 100644
--- a/arch/riscv/kernel/ptrace.c
+++ b/arch/riscv/kernel/ptrace.c
@@ -42,12 +42,10 @@ static int riscv_gpr_set(struct task_struct *target,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
- int ret;
struct pt_regs *regs;
regs = task_pt_regs(target);
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, regs, 0, -1);
- return ret;
+ return user_regset_copyin(&pos, &count, &kbuf, &ubuf, regs, 0, -1);
}
#ifdef CONFIG_FPU
diff --git a/arch/riscv/kernel/sbi.c b/arch/riscv/kernel/sbi.c
index 7402a417f38e..775d3322b422 100644
--- a/arch/riscv/kernel/sbi.c
+++ b/arch/riscv/kernel/sbi.c
@@ -5,8 +5,10 @@
* Copyright (c) 2020 Western Digital Corporation or its affiliates.
*/
+#include <linux/bits.h>
#include <linux/init.h>
#include <linux/pm.h>
+#include <linux/reboot.h>
#include <asm/sbi.h>
#include <asm/smp.h>
@@ -15,8 +17,8 @@ unsigned long sbi_spec_version __ro_after_init = SBI_SPEC_VERSION_DEFAULT;
EXPORT_SYMBOL(sbi_spec_version);
static void (*__sbi_set_timer)(uint64_t stime) __ro_after_init;
-static int (*__sbi_send_ipi)(const unsigned long *hart_mask) __ro_after_init;
-static int (*__sbi_rfence)(int fid, const unsigned long *hart_mask,
+static int (*__sbi_send_ipi)(const struct cpumask *cpu_mask) __ro_after_init;
+static int (*__sbi_rfence)(int fid, const struct cpumask *cpu_mask,
unsigned long start, unsigned long size,
unsigned long arg4, unsigned long arg5) __ro_after_init;
@@ -66,6 +68,30 @@ int sbi_err_map_linux_errno(int err)
EXPORT_SYMBOL(sbi_err_map_linux_errno);
#ifdef CONFIG_RISCV_SBI_V01
+static unsigned long __sbi_v01_cpumask_to_hartmask(const struct cpumask *cpu_mask)
+{
+ unsigned long cpuid, hartid;
+ unsigned long hmask = 0;
+
+ /*
+ * There is no maximum hartid concept in RISC-V and NR_CPUS must not be
+ * associated with hartid. As SBI v0.1 is only kept for backward compatibility
+ * and will be removed in the future, there is no point in supporting hartid
+ * greater than BITS_PER_LONG (32 for RV32 and 64 for RV64). Ideally, SBI v0.2
+ * should be used for platforms with hartid greater than BITS_PER_LONG.
+ */
+ for_each_cpu(cpuid, cpu_mask) {
+ hartid = cpuid_to_hartid_map(cpuid);
+ if (hartid >= BITS_PER_LONG) {
+ pr_warn("Unable to send any request to hartid > BITS_PER_LONG for SBI v0.1\n");
+ break;
+ }
+ hmask |= BIT(hartid);
+ }
+
+ return hmask;
+}
+
/**
* sbi_console_putchar() - Writes given character to the console device.
* @ch: The data to be written to the console.
@@ -131,33 +157,44 @@ static void __sbi_set_timer_v01(uint64_t stime_value)
#endif
}
-static int __sbi_send_ipi_v01(const unsigned long *hart_mask)
+static int __sbi_send_ipi_v01(const struct cpumask *cpu_mask)
{
- sbi_ecall(SBI_EXT_0_1_SEND_IPI, 0, (unsigned long)hart_mask,
+ unsigned long hart_mask;
+
+ if (!cpu_mask || cpumask_empty(cpu_mask))
+ cpu_mask = cpu_online_mask;
+ hart_mask = __sbi_v01_cpumask_to_hartmask(cpu_mask);
+
+ sbi_ecall(SBI_EXT_0_1_SEND_IPI, 0, (unsigned long)(&hart_mask),
0, 0, 0, 0, 0);
return 0;
}
-static int __sbi_rfence_v01(int fid, const unsigned long *hart_mask,
+static int __sbi_rfence_v01(int fid, const struct cpumask *cpu_mask,
unsigned long start, unsigned long size,
unsigned long arg4, unsigned long arg5)
{
int result = 0;
+ unsigned long hart_mask;
+
+ if (!cpu_mask || cpumask_empty(cpu_mask))
+ cpu_mask = cpu_online_mask;
+ hart_mask = __sbi_v01_cpumask_to_hartmask(cpu_mask);
/* v0.2 function IDs are equivalent to v0.1 extension IDs */
switch (fid) {
case SBI_EXT_RFENCE_REMOTE_FENCE_I:
sbi_ecall(SBI_EXT_0_1_REMOTE_FENCE_I, 0,
- (unsigned long)hart_mask, 0, 0, 0, 0, 0);
+ (unsigned long)&hart_mask, 0, 0, 0, 0, 0);
break;
case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA:
sbi_ecall(SBI_EXT_0_1_REMOTE_SFENCE_VMA, 0,
- (unsigned long)hart_mask, start, size,
+ (unsigned long)&hart_mask, start, size,
0, 0, 0);
break;
case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID:
sbi_ecall(SBI_EXT_0_1_REMOTE_SFENCE_VMA_ASID, 0,
- (unsigned long)hart_mask, start, size,
+ (unsigned long)&hart_mask, start, size,
arg4, 0, 0);
break;
default:
@@ -179,7 +216,7 @@ static void __sbi_set_timer_v01(uint64_t stime_value)
sbi_major_version(), sbi_minor_version());
}
-static int __sbi_send_ipi_v01(const unsigned long *hart_mask)
+static int __sbi_send_ipi_v01(const struct cpumask *cpu_mask)
{
pr_warn("IPI extension is not available in SBI v%lu.%lu\n",
sbi_major_version(), sbi_minor_version());
@@ -187,7 +224,7 @@ static int __sbi_send_ipi_v01(const unsigned long *hart_mask)
return 0;
}
-static int __sbi_rfence_v01(int fid, const unsigned long *hart_mask,
+static int __sbi_rfence_v01(int fid, const struct cpumask *cpu_mask,
unsigned long start, unsigned long size,
unsigned long arg4, unsigned long arg5)
{
@@ -211,37 +248,44 @@ static void __sbi_set_timer_v02(uint64_t stime_value)
#endif
}
-static int __sbi_send_ipi_v02(const unsigned long *hart_mask)
+static int __sbi_send_ipi_v02(const struct cpumask *cpu_mask)
{
- unsigned long hartid, hmask_val, hbase;
- struct cpumask tmask;
+ unsigned long hartid, cpuid, hmask = 0, hbase = 0, htop = 0;
struct sbiret ret = {0};
int result;
- if (!hart_mask || !(*hart_mask)) {
- riscv_cpuid_to_hartid_mask(cpu_online_mask, &tmask);
- hart_mask = cpumask_bits(&tmask);
- }
-
- hmask_val = 0;
- hbase = 0;
- for_each_set_bit(hartid, hart_mask, NR_CPUS) {
- if (hmask_val && ((hbase + BITS_PER_LONG) <= hartid)) {
- ret = sbi_ecall(SBI_EXT_IPI, SBI_EXT_IPI_SEND_IPI,
- hmask_val, hbase, 0, 0, 0, 0);
- if (ret.error)
- goto ecall_failed;
- hmask_val = 0;
- hbase = 0;
+ if (!cpu_mask || cpumask_empty(cpu_mask))
+ cpu_mask = cpu_online_mask;
+
+ for_each_cpu(cpuid, cpu_mask) {
+ hartid = cpuid_to_hartid_map(cpuid);
+ if (hmask) {
+ if (hartid + BITS_PER_LONG <= htop ||
+ hbase + BITS_PER_LONG <= hartid) {
+ ret = sbi_ecall(SBI_EXT_IPI,
+ SBI_EXT_IPI_SEND_IPI, hmask,
+ hbase, 0, 0, 0, 0);
+ if (ret.error)
+ goto ecall_failed;
+ hmask = 0;
+ } else if (hartid < hbase) {
+ /* shift the mask to fit lower hartid */
+ hmask <<= hbase - hartid;
+ hbase = hartid;
+ }
}
- if (!hmask_val)
+ if (!hmask) {
hbase = hartid;
- hmask_val |= 1UL << (hartid - hbase);
+ htop = hartid;
+ } else if (hartid > htop) {
+ htop = hartid;
+ }
+ hmask |= BIT(hartid - hbase);
}
- if (hmask_val) {
+ if (hmask) {
ret = sbi_ecall(SBI_EXT_IPI, SBI_EXT_IPI_SEND_IPI,
- hmask_val, hbase, 0, 0, 0, 0);
+ hmask, hbase, 0, 0, 0, 0);
if (ret.error)
goto ecall_failed;
}
@@ -251,11 +295,11 @@ static int __sbi_send_ipi_v02(const unsigned long *hart_mask)
ecall_failed:
result = sbi_err_map_linux_errno(ret.error);
pr_err("%s: hbase = [%lu] hmask = [0x%lx] failed (error [%d])\n",
- __func__, hbase, hmask_val, result);
+ __func__, hbase, hmask, result);
return result;
}
-static int __sbi_rfence_v02_call(unsigned long fid, unsigned long hmask_val,
+static int __sbi_rfence_v02_call(unsigned long fid, unsigned long hmask,
unsigned long hbase, unsigned long start,
unsigned long size, unsigned long arg4,
unsigned long arg5)
@@ -266,31 +310,31 @@ static int __sbi_rfence_v02_call(unsigned long fid, unsigned long hmask_val,
switch (fid) {
case SBI_EXT_RFENCE_REMOTE_FENCE_I:
- ret = sbi_ecall(ext, fid, hmask_val, hbase, 0, 0, 0, 0);
+ ret = sbi_ecall(ext, fid, hmask, hbase, 0, 0, 0, 0);
break;
case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA:
- ret = sbi_ecall(ext, fid, hmask_val, hbase, start,
+ ret = sbi_ecall(ext, fid, hmask, hbase, start,
size, 0, 0);
break;
case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID:
- ret = sbi_ecall(ext, fid, hmask_val, hbase, start,
+ ret = sbi_ecall(ext, fid, hmask, hbase, start,
size, arg4, 0);
break;
case SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA:
- ret = sbi_ecall(ext, fid, hmask_val, hbase, start,
+ ret = sbi_ecall(ext, fid, hmask, hbase, start,
size, 0, 0);
break;
case SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA_VMID:
- ret = sbi_ecall(ext, fid, hmask_val, hbase, start,
+ ret = sbi_ecall(ext, fid, hmask, hbase, start,
size, arg4, 0);
break;
case SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA:
- ret = sbi_ecall(ext, fid, hmask_val, hbase, start,
+ ret = sbi_ecall(ext, fid, hmask, hbase, start,
size, 0, 0);
break;
case SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA_ASID:
- ret = sbi_ecall(ext, fid, hmask_val, hbase, start,
+ ret = sbi_ecall(ext, fid, hmask, hbase, start,
size, arg4, 0);
break;
default:
@@ -302,43 +346,49 @@ static int __sbi_rfence_v02_call(unsigned long fid, unsigned long hmask_val,
if (ret.error) {
result = sbi_err_map_linux_errno(ret.error);
pr_err("%s: hbase = [%lu] hmask = [0x%lx] failed (error [%d])\n",
- __func__, hbase, hmask_val, result);
+ __func__, hbase, hmask, result);
}
return result;
}
-static int __sbi_rfence_v02(int fid, const unsigned long *hart_mask,
+static int __sbi_rfence_v02(int fid, const struct cpumask *cpu_mask,
unsigned long start, unsigned long size,
unsigned long arg4, unsigned long arg5)
{
- unsigned long hmask_val, hartid, hbase;
- struct cpumask tmask;
+ unsigned long hartid, cpuid, hmask = 0, hbase = 0, htop = 0;
int result;
- if (!hart_mask || !(*hart_mask)) {
- riscv_cpuid_to_hartid_mask(cpu_online_mask, &tmask);
- hart_mask = cpumask_bits(&tmask);
- }
-
- hmask_val = 0;
- hbase = 0;
- for_each_set_bit(hartid, hart_mask, NR_CPUS) {
- if (hmask_val && ((hbase + BITS_PER_LONG) <= hartid)) {
- result = __sbi_rfence_v02_call(fid, hmask_val, hbase,
- start, size, arg4, arg5);
- if (result)
- return result;
- hmask_val = 0;
- hbase = 0;
+ if (!cpu_mask || cpumask_empty(cpu_mask))
+ cpu_mask = cpu_online_mask;
+
+ for_each_cpu(cpuid, cpu_mask) {
+ hartid = cpuid_to_hartid_map(cpuid);
+ if (hmask) {
+ if (hartid + BITS_PER_LONG <= htop ||
+ hbase + BITS_PER_LONG <= hartid) {
+ result = __sbi_rfence_v02_call(fid, hmask,
+ hbase, start, size, arg4, arg5);
+ if (result)
+ return result;
+ hmask = 0;
+ } else if (hartid < hbase) {
+ /* shift the mask to fit lower hartid */
+ hmask <<= hbase - hartid;
+ hbase = hartid;
+ }
}
- if (!hmask_val)
+ if (!hmask) {
hbase = hartid;
- hmask_val |= 1UL << (hartid - hbase);
+ htop = hartid;
+ } else if (hartid > htop) {
+ htop = hartid;
+ }
+ hmask |= BIT(hartid - hbase);
}
- if (hmask_val) {
- result = __sbi_rfence_v02_call(fid, hmask_val, hbase,
+ if (hmask) {
+ result = __sbi_rfence_v02_call(fid, hmask, hbase,
start, size, arg4, arg5);
if (result)
return result;
@@ -360,44 +410,44 @@ void sbi_set_timer(uint64_t stime_value)
/**
* sbi_send_ipi() - Send an IPI to any hart.
- * @hart_mask: A cpu mask containing all the target harts.
+ * @cpu_mask: A cpu mask containing all the target harts.
*
* Return: 0 on success, appropriate linux error code otherwise.
*/
-int sbi_send_ipi(const unsigned long *hart_mask)
+int sbi_send_ipi(const struct cpumask *cpu_mask)
{
- return __sbi_send_ipi(hart_mask);
+ return __sbi_send_ipi(cpu_mask);
}
EXPORT_SYMBOL(sbi_send_ipi);
/**
* sbi_remote_fence_i() - Execute FENCE.I instruction on given remote harts.
- * @hart_mask: A cpu mask containing all the target harts.
+ * @cpu_mask: A cpu mask containing all the target harts.
*
* Return: 0 on success, appropriate linux error code otherwise.
*/
-int sbi_remote_fence_i(const unsigned long *hart_mask)
+int sbi_remote_fence_i(const struct cpumask *cpu_mask)
{
return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_FENCE_I,
- hart_mask, 0, 0, 0, 0);
+ cpu_mask, 0, 0, 0, 0);
}
EXPORT_SYMBOL(sbi_remote_fence_i);
/**
* sbi_remote_sfence_vma() - Execute SFENCE.VMA instructions on given remote
* harts for the specified virtual address range.
- * @hart_mask: A cpu mask containing all the target harts.
+ * @cpu_mask: A cpu mask containing all the target harts.
* @start: Start of the virtual address
* @size: Total size of the virtual address range.
*
* Return: 0 on success, appropriate linux error code otherwise.
*/
-int sbi_remote_sfence_vma(const unsigned long *hart_mask,
+int sbi_remote_sfence_vma(const struct cpumask *cpu_mask,
unsigned long start,
unsigned long size)
{
return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_SFENCE_VMA,
- hart_mask, start, size, 0, 0);
+ cpu_mask, start, size, 0, 0);
}
EXPORT_SYMBOL(sbi_remote_sfence_vma);
@@ -405,38 +455,38 @@ EXPORT_SYMBOL(sbi_remote_sfence_vma);
* sbi_remote_sfence_vma_asid() - Execute SFENCE.VMA instructions on given
* remote harts for a virtual address range belonging to a specific ASID.
*
- * @hart_mask: A cpu mask containing all the target harts.
+ * @cpu_mask: A cpu mask containing all the target harts.
* @start: Start of the virtual address
* @size: Total size of the virtual address range.
* @asid: The value of address space identifier (ASID).
*
* Return: 0 on success, appropriate linux error code otherwise.
*/
-int sbi_remote_sfence_vma_asid(const unsigned long *hart_mask,
+int sbi_remote_sfence_vma_asid(const struct cpumask *cpu_mask,
unsigned long start,
unsigned long size,
unsigned long asid)
{
return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID,
- hart_mask, start, size, asid, 0);
+ cpu_mask, start, size, asid, 0);
}
EXPORT_SYMBOL(sbi_remote_sfence_vma_asid);
/**
* sbi_remote_hfence_gvma() - Execute HFENCE.GVMA instructions on given remote
* harts for the specified guest physical address range.
- * @hart_mask: A cpu mask containing all the target harts.
+ * @cpu_mask: A cpu mask containing all the target harts.
* @start: Start of the guest physical address
* @size: Total size of the guest physical address range.
*
* Return: None
*/
-int sbi_remote_hfence_gvma(const unsigned long *hart_mask,
+int sbi_remote_hfence_gvma(const struct cpumask *cpu_mask,
unsigned long start,
unsigned long size)
{
return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA,
- hart_mask, start, size, 0, 0);
+ cpu_mask, start, size, 0, 0);
}
EXPORT_SYMBOL_GPL(sbi_remote_hfence_gvma);
@@ -444,38 +494,38 @@ EXPORT_SYMBOL_GPL(sbi_remote_hfence_gvma);
* sbi_remote_hfence_gvma_vmid() - Execute HFENCE.GVMA instructions on given
* remote harts for a guest physical address range belonging to a specific VMID.
*
- * @hart_mask: A cpu mask containing all the target harts.
+ * @cpu_mask: A cpu mask containing all the target harts.
* @start: Start of the guest physical address
* @size: Total size of the guest physical address range.
* @vmid: The value of guest ID (VMID).
*
* Return: 0 if success, Error otherwise.
*/
-int sbi_remote_hfence_gvma_vmid(const unsigned long *hart_mask,
+int sbi_remote_hfence_gvma_vmid(const struct cpumask *cpu_mask,
unsigned long start,
unsigned long size,
unsigned long vmid)
{
return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA_VMID,
- hart_mask, start, size, vmid, 0);
+ cpu_mask, start, size, vmid, 0);
}
EXPORT_SYMBOL(sbi_remote_hfence_gvma_vmid);
/**
* sbi_remote_hfence_vvma() - Execute HFENCE.VVMA instructions on given remote
* harts for the current guest virtual address range.
- * @hart_mask: A cpu mask containing all the target harts.
+ * @cpu_mask: A cpu mask containing all the target harts.
* @start: Start of the current guest virtual address
* @size: Total size of the current guest virtual address range.
*
* Return: None
*/
-int sbi_remote_hfence_vvma(const unsigned long *hart_mask,
+int sbi_remote_hfence_vvma(const struct cpumask *cpu_mask,
unsigned long start,
unsigned long size)
{
return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA,
- hart_mask, start, size, 0, 0);
+ cpu_mask, start, size, 0, 0);
}
EXPORT_SYMBOL(sbi_remote_hfence_vvma);
@@ -484,23 +534,49 @@ EXPORT_SYMBOL(sbi_remote_hfence_vvma);
* remote harts for current guest virtual address range belonging to a specific
* ASID.
*
- * @hart_mask: A cpu mask containing all the target harts.
+ * @cpu_mask: A cpu mask containing all the target harts.
* @start: Start of the current guest virtual address
* @size: Total size of the current guest virtual address range.
* @asid: The value of address space identifier (ASID).
*
* Return: None
*/
-int sbi_remote_hfence_vvma_asid(const unsigned long *hart_mask,
+int sbi_remote_hfence_vvma_asid(const struct cpumask *cpu_mask,
unsigned long start,
unsigned long size,
unsigned long asid)
{
return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA_ASID,
- hart_mask, start, size, asid, 0);
+ cpu_mask, start, size, asid, 0);
}
EXPORT_SYMBOL(sbi_remote_hfence_vvma_asid);
+static void sbi_srst_reset(unsigned long type, unsigned long reason)
+{
+ sbi_ecall(SBI_EXT_SRST, SBI_EXT_SRST_RESET, type, reason,
+ 0, 0, 0, 0);
+ pr_warn("%s: type=0x%lx reason=0x%lx failed\n",
+ __func__, type, reason);
+}
+
+static int sbi_srst_reboot(struct notifier_block *this,
+ unsigned long mode, void *cmd)
+{
+ sbi_srst_reset((mode == REBOOT_WARM || mode == REBOOT_SOFT) ?
+ SBI_SRST_RESET_TYPE_WARM_REBOOT :
+ SBI_SRST_RESET_TYPE_COLD_REBOOT,
+ SBI_SRST_RESET_REASON_NONE);
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block sbi_srst_reboot_nb;
+
+static void sbi_srst_power_off(void)
+{
+ sbi_srst_reset(SBI_SRST_RESET_TYPE_SHUTDOWN,
+ SBI_SRST_RESET_REASON_NONE);
+}
+
/**
* sbi_probe_extension() - Check if an SBI extension ID is supported or not.
* @extid: The extension ID to be probed.
@@ -564,11 +640,7 @@ long sbi_get_mimpid(void)
static void sbi_send_cpumask_ipi(const struct cpumask *target)
{
- struct cpumask hartid_mask;
-
- riscv_cpuid_to_hartid_mask(target, &hartid_mask);
-
- sbi_send_ipi(cpumask_bits(&hartid_mask));
+ sbi_send_ipi(target);
}
static const struct riscv_ipi_ops sbi_ipi_ops = {
@@ -608,6 +680,14 @@ void __init sbi_init(void)
} else {
__sbi_rfence = __sbi_rfence_v01;
}
+ if ((sbi_spec_version >= sbi_mk_version(0, 3)) &&
+ (sbi_probe_extension(SBI_EXT_SRST) > 0)) {
+ pr_info("SBI SRST extension detected\n");
+ pm_power_off = sbi_srst_power_off;
+ sbi_srst_reboot_nb.notifier_call = sbi_srst_reboot;
+ sbi_srst_reboot_nb.priority = 192;
+ register_restart_handler(&sbi_srst_reboot_nb);
+ }
} else {
__sbi_set_timer = __sbi_set_timer_v01;
__sbi_send_ipi = __sbi_send_ipi_v01;
diff --git a/arch/riscv/kernel/smp.c b/arch/riscv/kernel/smp.c
index 2f6da845c9ae..b5d30ea92292 100644
--- a/arch/riscv/kernel/smp.c
+++ b/arch/riscv/kernel/smp.c
@@ -59,16 +59,6 @@ int riscv_hartid_to_cpuid(int hartid)
return -ENOENT;
}
-void riscv_cpuid_to_hartid_mask(const struct cpumask *in, struct cpumask *out)
-{
- int cpu;
-
- cpumask_clear(out);
- for_each_cpu(cpu, in)
- cpumask_set_cpu(cpuid_to_hartid_map(cpu), out);
-}
-EXPORT_SYMBOL_GPL(riscv_cpuid_to_hartid_mask);
-
bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
{
return phys_id == cpuid_to_hartid_map(cpu);
diff --git a/arch/riscv/kernel/smpboot.c b/arch/riscv/kernel/smpboot.c
index bd82375db51a..622f226454d5 100644
--- a/arch/riscv/kernel/smpboot.c
+++ b/arch/riscv/kernel/smpboot.c
@@ -96,7 +96,7 @@ void __init setup_smp(void)
if (cpuid >= NR_CPUS) {
pr_warn("Invalid cpuid [%d] for hartid [%d]\n",
cpuid, hart);
- break;
+ continue;
}
cpuid_to_hartid_map(cpuid) = hart;
diff --git a/arch/riscv/kernel/stacktrace.c b/arch/riscv/kernel/stacktrace.c
index 201ee206fb57..14d2b53ec322 100644
--- a/arch/riscv/kernel/stacktrace.c
+++ b/arch/riscv/kernel/stacktrace.c
@@ -22,15 +22,16 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
bool (*fn)(void *, unsigned long), void *arg)
{
unsigned long fp, sp, pc;
+ int level = 0;
if (regs) {
fp = frame_pointer(regs);
sp = user_stack_pointer(regs);
pc = instruction_pointer(regs);
} else if (task == NULL || task == current) {
- fp = (unsigned long)__builtin_frame_address(1);
- sp = (unsigned long)__builtin_frame_address(0);
- pc = (unsigned long)__builtin_return_address(0);
+ fp = (unsigned long)__builtin_frame_address(0);
+ sp = sp_in_global;
+ pc = (unsigned long)walk_stackframe;
} else {
/* task blocked in __switch_to */
fp = task->thread.s[0];
@@ -42,7 +43,7 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
unsigned long low, high;
struct stackframe *frame;
- if (unlikely(!__kernel_text_address(pc) || !fn(arg, pc)))
+ if (unlikely(!__kernel_text_address(pc) || (level++ >= 1 && !fn(arg, pc))))
break;
/* Validate frame pointer */
diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c
index 0daaa3e4630d..fe92e119e6a3 100644
--- a/arch/riscv/kernel/traps.c
+++ b/arch/riscv/kernel/traps.c
@@ -54,7 +54,7 @@ void die(struct pt_regs *regs, const char *str)
if (panic_on_oops)
panic("Fatal exception");
if (ret != NOTIFY_STOP)
- do_exit(SIGSEGV);
+ make_task_dead(SIGSEGV);
}
void do_trap(struct pt_regs *regs, int signo, int code, unsigned long addr)
diff --git a/arch/riscv/kernel/vmlinux-xip.lds.S b/arch/riscv/kernel/vmlinux-xip.lds.S
index f5ed08262139..75e0fa8a700a 100644
--- a/arch/riscv/kernel/vmlinux-xip.lds.S
+++ b/arch/riscv/kernel/vmlinux-xip.lds.S
@@ -45,7 +45,6 @@ SECTIONS
ENTRY_TEXT
IRQENTRY_TEXT
SOFTIRQENTRY_TEXT
- *(.fixup)
_etext = .;
}
RO_DATA(L1_CACHE_BYTES)
diff --git a/arch/riscv/kernel/vmlinux.lds.S b/arch/riscv/kernel/vmlinux.lds.S
index 5104f3a871e3..4e6c88aa4d87 100644
--- a/arch/riscv/kernel/vmlinux.lds.S
+++ b/arch/riscv/kernel/vmlinux.lds.S
@@ -4,7 +4,7 @@
* Copyright (C) 2017 SiFive
*/
-#define RO_EXCEPTION_TABLE_ALIGN 16
+#define RO_EXCEPTION_TABLE_ALIGN 4
#ifdef CONFIG_XIP_KERNEL
#include "vmlinux-xip.lds.S"
@@ -48,7 +48,6 @@ SECTIONS
ENTRY_TEXT
IRQENTRY_TEXT
SOFTIRQENTRY_TEXT
- *(.fixup)
_etext = .;
}
diff --git a/arch/riscv/kvm/Makefile b/arch/riscv/kvm/Makefile
index 30cdd1df0098..e5c56182f48f 100644
--- a/arch/riscv/kvm/Makefile
+++ b/arch/riscv/kvm/Makefile
@@ -5,14 +5,10 @@
ccflags-y += -I $(srctree)/$(src)
-KVM := ../../../virt/kvm
+include $(srctree)/virt/kvm/Makefile.kvm
obj-$(CONFIG_KVM) += kvm.o
-kvm-y += $(KVM)/kvm_main.o
-kvm-y += $(KVM)/coalesced_mmio.o
-kvm-y += $(KVM)/binary_stats.o
-kvm-y += $(KVM)/eventfd.o
kvm-y += main.o
kvm-y += vm.o
kvm-y += vmid.o
@@ -23,4 +19,8 @@ kvm-y += vcpu_exit.o
kvm-y += vcpu_fp.o
kvm-y += vcpu_switch.o
kvm-y += vcpu_sbi.o
+kvm-$(CONFIG_RISCV_SBI_V01) += vcpu_sbi_v01.o
+kvm-y += vcpu_sbi_base.o
+kvm-y += vcpu_sbi_replace.o
+kvm-y += vcpu_sbi_hsm.o
kvm-y += vcpu_timer.o
diff --git a/arch/riscv/kvm/main.c b/arch/riscv/kvm/main.c
index 421ecf4e6360..2e5ca43c8c49 100644
--- a/arch/riscv/kvm/main.c
+++ b/arch/riscv/kvm/main.c
@@ -58,6 +58,14 @@ int kvm_arch_hardware_enable(void)
void kvm_arch_hardware_disable(void)
{
+ /*
+ * After clearing the hideleg CSR, the host kernel will receive
+ * spurious interrupts if hvip CSR has pending interrupts and the
+ * corresponding enable bits in vsie CSR are asserted. To avoid it,
+ * hvip CSR and vsie CSR must be cleared before clearing hideleg CSR.
+ */
+ csr_write(CSR_VSIE, 0);
+ csr_write(CSR_HVIP, 0);
csr_write(CSR_HEDELEG, 0);
csr_write(CSR_HIDELEG, 0);
}
diff --git a/arch/riscv/kvm/mmu.c b/arch/riscv/kvm/mmu.c
index fc058ff5f4b6..f80a34fbf102 100644
--- a/arch/riscv/kvm/mmu.c
+++ b/arch/riscv/kvm/mmu.c
@@ -83,43 +83,6 @@ static int stage2_level_to_page_size(u32 level, unsigned long *out_pgsize)
return 0;
}
-static int stage2_cache_topup(struct kvm_mmu_page_cache *pcache,
- int min, int max)
-{
- void *page;
-
- BUG_ON(max > KVM_MMU_PAGE_CACHE_NR_OBJS);
- if (pcache->nobjs >= min)
- return 0;
- while (pcache->nobjs < max) {
- page = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
- if (!page)
- return -ENOMEM;
- pcache->objects[pcache->nobjs++] = page;
- }
-
- return 0;
-}
-
-static void stage2_cache_flush(struct kvm_mmu_page_cache *pcache)
-{
- while (pcache && pcache->nobjs)
- free_page((unsigned long)pcache->objects[--pcache->nobjs]);
-}
-
-static void *stage2_cache_alloc(struct kvm_mmu_page_cache *pcache)
-{
- void *p;
-
- if (!pcache)
- return NULL;
-
- BUG_ON(!pcache->nobjs);
- p = pcache->objects[--pcache->nobjs];
-
- return p;
-}
-
static bool stage2_get_leaf_entry(struct kvm *kvm, gpa_t addr,
pte_t **ptepp, u32 *ptep_level)
{
@@ -151,7 +114,6 @@ static bool stage2_get_leaf_entry(struct kvm *kvm, gpa_t addr,
static void stage2_remote_tlb_flush(struct kvm *kvm, u32 level, gpa_t addr)
{
- struct cpumask hmask;
unsigned long size = PAGE_SIZE;
struct kvm_vmid *vmid = &kvm->arch.vmid;
@@ -164,14 +126,13 @@ static void stage2_remote_tlb_flush(struct kvm *kvm, u32 level, gpa_t addr)
* where the Guest/VM is running.
*/
preempt_disable();
- riscv_cpuid_to_hartid_mask(cpu_online_mask, &hmask);
- sbi_remote_hfence_gvma_vmid(cpumask_bits(&hmask), addr, size,
+ sbi_remote_hfence_gvma_vmid(cpu_online_mask, addr, size,
READ_ONCE(vmid->vmid));
preempt_enable();
}
static int stage2_set_pte(struct kvm *kvm, u32 level,
- struct kvm_mmu_page_cache *pcache,
+ struct kvm_mmu_memory_cache *pcache,
gpa_t addr, const pte_t *new_pte)
{
u32 current_level = stage2_pgd_levels - 1;
@@ -186,7 +147,9 @@ static int stage2_set_pte(struct kvm *kvm, u32 level,
return -EEXIST;
if (!pte_val(*ptep)) {
- next_ptep = stage2_cache_alloc(pcache);
+ if (!pcache)
+ return -ENOMEM;
+ next_ptep = kvm_mmu_memory_cache_alloc(pcache);
if (!next_ptep)
return -ENOMEM;
*ptep = pfn_pte(PFN_DOWN(__pa(next_ptep)),
@@ -209,7 +172,7 @@ static int stage2_set_pte(struct kvm *kvm, u32 level,
}
static int stage2_map_page(struct kvm *kvm,
- struct kvm_mmu_page_cache *pcache,
+ struct kvm_mmu_memory_cache *pcache,
gpa_t gpa, phys_addr_t hpa,
unsigned long page_size,
bool page_rdonly, bool page_exec)
@@ -384,7 +347,10 @@ static int stage2_ioremap(struct kvm *kvm, gpa_t gpa, phys_addr_t hpa,
int ret = 0;
unsigned long pfn;
phys_addr_t addr, end;
- struct kvm_mmu_page_cache pcache = { 0, };
+ struct kvm_mmu_memory_cache pcache;
+
+ memset(&pcache, 0, sizeof(pcache));
+ pcache.gfp_zero = __GFP_ZERO;
end = (gpa + size + PAGE_SIZE - 1) & PAGE_MASK;
pfn = __phys_to_pfn(hpa);
@@ -395,9 +361,7 @@ static int stage2_ioremap(struct kvm *kvm, gpa_t gpa, phys_addr_t hpa,
if (!writable)
pte = pte_wrprotect(pte);
- ret = stage2_cache_topup(&pcache,
- stage2_pgd_levels,
- KVM_MMU_PAGE_CACHE_NR_OBJS);
+ ret = kvm_mmu_topup_memory_cache(&pcache, stage2_pgd_levels);
if (ret)
goto out;
@@ -411,7 +375,7 @@ static int stage2_ioremap(struct kvm *kvm, gpa_t gpa, phys_addr_t hpa,
}
out:
- stage2_cache_flush(&pcache);
+ kvm_mmu_free_memory_cache(&pcache);
return ret;
}
@@ -462,7 +426,6 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
}
void kvm_arch_commit_memory_region(struct kvm *kvm,
- const struct kvm_userspace_memory_region *mem,
struct kvm_memory_slot *old,
const struct kvm_memory_slot *new,
enum kvm_mr_change change)
@@ -472,18 +435,18 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
* allocated dirty_bitmap[], dirty pages will be tracked while
* the memory slot is write protected.
*/
- if (change != KVM_MR_DELETE && mem->flags & KVM_MEM_LOG_DIRTY_PAGES)
- stage2_wp_memory_region(kvm, mem->slot);
+ if (change != KVM_MR_DELETE && new->flags & KVM_MEM_LOG_DIRTY_PAGES)
+ stage2_wp_memory_region(kvm, new->id);
}
int kvm_arch_prepare_memory_region(struct kvm *kvm,
- struct kvm_memory_slot *memslot,
- const struct kvm_userspace_memory_region *mem,
+ const struct kvm_memory_slot *old,
+ struct kvm_memory_slot *new,
enum kvm_mr_change change)
{
- hva_t hva = mem->userspace_addr;
- hva_t reg_end = hva + mem->memory_size;
- bool writable = !(mem->flags & KVM_MEM_READONLY);
+ hva_t hva, reg_end, size;
+ gpa_t base_gpa;
+ bool writable;
int ret = 0;
if (change != KVM_MR_CREATE && change != KVM_MR_MOVE &&
@@ -494,10 +457,16 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
* Prevent userspace from creating a memory region outside of the GPA
* space addressable by the KVM guest GPA space.
*/
- if ((memslot->base_gfn + memslot->npages) >=
+ if ((new->base_gfn + new->npages) >=
(stage2_gpa_size >> PAGE_SHIFT))
return -EFAULT;
+ hva = new->userspace_addr;
+ size = new->npages << PAGE_SHIFT;
+ reg_end = hva + size;
+ base_gpa = new->base_gfn << PAGE_SHIFT;
+ writable = !(new->flags & KVM_MEM_READONLY);
+
mmap_read_lock(current->mm);
/*
@@ -533,15 +502,14 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
vm_end = min(reg_end, vma->vm_end);
if (vma->vm_flags & VM_PFNMAP) {
- gpa_t gpa = mem->guest_phys_addr +
- (vm_start - mem->userspace_addr);
+ gpa_t gpa = base_gpa + (vm_start - hva);
phys_addr_t pa;
pa = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
pa += vm_start - vma->vm_start;
/* IO region dirty page logging not allowed */
- if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES) {
+ if (new->flags & KVM_MEM_LOG_DIRTY_PAGES) {
ret = -EINVAL;
goto out;
}
@@ -559,8 +527,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
spin_lock(&kvm->mmu_lock);
if (ret)
- stage2_unmap_range(kvm, mem->guest_phys_addr,
- mem->memory_size, false);
+ stage2_unmap_range(kvm, base_gpa, size, false);
spin_unlock(&kvm->mmu_lock);
out:
@@ -646,7 +613,7 @@ int kvm_riscv_stage2_map(struct kvm_vcpu *vcpu,
gfn_t gfn = gpa >> PAGE_SHIFT;
struct vm_area_struct *vma;
struct kvm *kvm = vcpu->kvm;
- struct kvm_mmu_page_cache *pcache = &vcpu->arch.mmu_page_cache;
+ struct kvm_mmu_memory_cache *pcache = &vcpu->arch.mmu_page_cache;
bool logging = (memslot->dirty_bitmap &&
!(memslot->flags & KVM_MEM_READONLY)) ? true : false;
unsigned long vma_pagesize, mmu_seq;
@@ -681,8 +648,7 @@ int kvm_riscv_stage2_map(struct kvm_vcpu *vcpu,
}
/* We need minimum second+third level pages */
- ret = stage2_cache_topup(pcache, stage2_pgd_levels,
- KVM_MMU_PAGE_CACHE_NR_OBJS);
+ ret = kvm_mmu_topup_memory_cache(pcache, stage2_pgd_levels);
if (ret) {
kvm_err("Failed to topup stage2 cache\n");
return ret;
@@ -731,11 +697,6 @@ out_unlock:
return ret;
}
-void kvm_riscv_stage2_flush_cache(struct kvm_vcpu *vcpu)
-{
- stage2_cache_flush(&vcpu->arch.mmu_page_cache);
-}
-
int kvm_riscv_stage2_alloc_pgd(struct kvm *kvm)
{
struct page *pgd_page;
@@ -806,3 +767,8 @@ unsigned long kvm_riscv_stage2_mode(void)
{
return stage2_mode >> HGATP_MODE_SHIFT;
}
+
+int kvm_riscv_stage2_gpa_bits(void)
+{
+ return stage2_gpa_bits;
+}
diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c
index fb84619df012..624166004e36 100644
--- a/arch/riscv/kvm/vcpu.c
+++ b/arch/riscv/kvm/vcpu.c
@@ -53,6 +53,17 @@ static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu)
struct kvm_vcpu_csr *reset_csr = &vcpu->arch.guest_reset_csr;
struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
struct kvm_cpu_context *reset_cntx = &vcpu->arch.guest_reset_context;
+ bool loaded;
+
+ /**
+ * The preemption should be disabled here because it races with
+ * kvm_sched_out/kvm_sched_in(called from preempt notifiers) which
+ * also calls vcpu_load/put.
+ */
+ get_cpu();
+ loaded = (vcpu->cpu != -1);
+ if (loaded)
+ kvm_arch_vcpu_put(vcpu);
memcpy(csr, reset_csr, sizeof(*csr));
@@ -64,6 +75,11 @@ static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu)
WRITE_ONCE(vcpu->arch.irqs_pending, 0);
WRITE_ONCE(vcpu->arch.irqs_pending_mask, 0);
+
+ /* Reset the guest CSRs for hotplug usecase */
+ if (loaded)
+ kvm_arch_vcpu_load(vcpu, smp_processor_id());
+ put_cpu();
}
int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
@@ -74,9 +90,11 @@ int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
{
struct kvm_cpu_context *cntx;
+ struct kvm_vcpu_csr *reset_csr = &vcpu->arch.guest_reset_csr;
/* Mark this VCPU never ran */
vcpu->arch.ran_atleast_once = false;
+ vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO;
/* Setup ISA features available to VCPU */
vcpu->arch.isa = riscv_isa_extension_base(NULL) & KVM_RISCV_ISA_ALLOWED;
@@ -89,6 +107,9 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
cntx->hstatus |= HSTATUS_SPVP;
cntx->hstatus |= HSTATUS_SPV;
+ /* By default, make CY, TM, and IR counters accessible in VU mode */
+ reset_csr->scounteren = 0x7;
+
/* Setup VCPU timer */
kvm_riscv_vcpu_timer_init(vcpu);
@@ -100,6 +121,13 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
{
+ /**
+ * vcpu with id 0 is the designated boot cpu.
+ * Keep all vcpus with non-zero id in power-off state so that
+ * they can be brought up using SBI HSM extension.
+ */
+ if (vcpu->vcpu_idx != 0)
+ kvm_riscv_vcpu_power_off(vcpu);
}
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
@@ -107,8 +135,8 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
/* Cleanup VCPU timer */
kvm_riscv_vcpu_timer_deinit(vcpu);
- /* Flush the pages pre-allocated for Stage2 page table mappings */
- kvm_riscv_stage2_flush_cache(vcpu);
+ /* Free unused pages pre-allocated for Stage2 page table mappings */
+ kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
}
int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
@@ -675,6 +703,20 @@ static void kvm_riscv_update_hvip(struct kvm_vcpu *vcpu)
csr_write(CSR_HVIP, csr->hvip);
}
+/*
+ * Actually run the vCPU, entering an RCU extended quiescent state (EQS) while
+ * the vCPU is running.
+ *
+ * This must be noinstr as instrumentation may make use of RCU, and this is not
+ * safe during the EQS.
+ */
+static void noinstr kvm_riscv_vcpu_enter_exit(struct kvm_vcpu *vcpu)
+{
+ guest_state_enter_irqoff();
+ __kvm_riscv_switch_to(&vcpu->arch);
+ guest_state_exit_irqoff();
+}
+
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
{
int ret;
@@ -766,9 +808,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
continue;
}
- guest_enter_irqoff();
+ guest_timing_enter_irqoff();
- __kvm_riscv_switch_to(&vcpu->arch);
+ kvm_riscv_vcpu_enter_exit(vcpu);
vcpu->mode = OUTSIDE_GUEST_MODE;
vcpu->stat.exits++;
@@ -788,25 +830,21 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
kvm_riscv_vcpu_sync_interrupts(vcpu);
/*
- * We may have taken a host interrupt in VS/VU-mode (i.e.
- * while executing the guest). This interrupt is still
- * pending, as we haven't serviced it yet!
+ * We must ensure that any pending interrupts are taken before
+ * we exit guest timing so that timer ticks are accounted as
+ * guest time. Transiently unmask interrupts so that any
+ * pending interrupts are taken.
*
- * We're now back in HS-mode with interrupts disabled
- * so enabling the interrupts now will have the effect
- * of taking the interrupt again, in HS-mode this time.
+ * There's no barrier which ensures that pending interrupts are
+ * recognised, so we just hope that the CPU takes any pending
+ * interrupts between the enable and disable.
*/
local_irq_enable();
+ local_irq_disable();
- /*
- * We do local_irq_enable() before calling guest_exit() so
- * that if a timer interrupt hits while running the guest
- * we account that tick as being spent in the guest. We
- * enable preemption after calling guest_exit() so that if
- * we get preempted we make sure ticks after that is not
- * counted as guest time.
- */
- guest_exit();
+ guest_timing_exit_irqoff();
+
+ local_irq_enable();
preempt_enable();
diff --git a/arch/riscv/kvm/vcpu_exit.c b/arch/riscv/kvm/vcpu_exit.c
index 7f2d742ae4c6..571f319e995a 100644
--- a/arch/riscv/kvm/vcpu_exit.c
+++ b/arch/riscv/kvm/vcpu_exit.c
@@ -146,7 +146,7 @@ static int system_opcode_insn(struct kvm_vcpu *vcpu,
vcpu->stat.wfi_exit_stat++;
if (!kvm_arch_vcpu_runnable(vcpu)) {
srcu_read_unlock(&vcpu->kvm->srcu, vcpu->arch.srcu_idx);
- kvm_vcpu_block(vcpu);
+ kvm_vcpu_halt(vcpu);
vcpu->arch.srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
kvm_clear_request(KVM_REQ_UNHALT, vcpu);
}
diff --git a/arch/riscv/kvm/vcpu_fp.c b/arch/riscv/kvm/vcpu_fp.c
index 1b070152578f..4449a976e5a6 100644
--- a/arch/riscv/kvm/vcpu_fp.c
+++ b/arch/riscv/kvm/vcpu_fp.c
@@ -26,7 +26,7 @@ void kvm_riscv_vcpu_fp_reset(struct kvm_vcpu *vcpu)
cntx->sstatus |= SR_FS_OFF;
}
-void kvm_riscv_vcpu_fp_clean(struct kvm_cpu_context *cntx)
+static void kvm_riscv_vcpu_fp_clean(struct kvm_cpu_context *cntx)
{
cntx->sstatus &= ~SR_FS;
cntx->sstatus |= SR_FS_CLEAN;
diff --git a/arch/riscv/kvm/vcpu_sbi.c b/arch/riscv/kvm/vcpu_sbi.c
index 3b0e703d22cf..78aa3db76225 100644
--- a/arch/riscv/kvm/vcpu_sbi.c
+++ b/arch/riscv/kvm/vcpu_sbi.c
@@ -9,15 +9,58 @@
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/kvm_host.h>
-#include <asm/csr.h>
#include <asm/sbi.h>
-#include <asm/kvm_vcpu_timer.h>
+#include <asm/kvm_vcpu_sbi.h>
-#define SBI_VERSION_MAJOR 0
-#define SBI_VERSION_MINOR 1
+static int kvm_linux_err_map_sbi(int err)
+{
+ switch (err) {
+ case 0:
+ return SBI_SUCCESS;
+ case -EPERM:
+ return SBI_ERR_DENIED;
+ case -EINVAL:
+ return SBI_ERR_INVALID_PARAM;
+ case -EFAULT:
+ return SBI_ERR_INVALID_ADDRESS;
+ case -EOPNOTSUPP:
+ return SBI_ERR_NOT_SUPPORTED;
+ case -EALREADY:
+ return SBI_ERR_ALREADY_AVAILABLE;
+ default:
+ return SBI_ERR_FAILURE;
+ };
+}
-static void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu,
- struct kvm_run *run)
+#ifdef CONFIG_RISCV_SBI_V01
+extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_v01;
+#else
+static const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_v01 = {
+ .extid_start = -1UL,
+ .extid_end = -1UL,
+ .handler = NULL,
+};
+#endif
+extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_base;
+extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_time;
+extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_ipi;
+extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_rfence;
+extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_hsm;
+extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_experimental;
+extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_vendor;
+
+static const struct kvm_vcpu_sbi_extension *sbi_ext[] = {
+ &vcpu_sbi_ext_v01,
+ &vcpu_sbi_ext_base,
+ &vcpu_sbi_ext_time,
+ &vcpu_sbi_ext_ipi,
+ &vcpu_sbi_ext_rfence,
+ &vcpu_sbi_ext_hsm,
+ &vcpu_sbi_ext_experimental,
+ &vcpu_sbi_ext_vendor,
+};
+
+void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
@@ -55,131 +98,73 @@ int kvm_riscv_vcpu_sbi_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
return 0;
}
-#ifdef CONFIG_RISCV_SBI_V01
-
-static void kvm_sbi_system_shutdown(struct kvm_vcpu *vcpu,
- struct kvm_run *run, u32 type)
+const struct kvm_vcpu_sbi_extension *kvm_vcpu_sbi_find_ext(unsigned long extid)
{
- int i;
- struct kvm_vcpu *tmp;
+ int i = 0;
- kvm_for_each_vcpu(i, tmp, vcpu->kvm)
- tmp->arch.power_off = true;
- kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_SLEEP);
+ for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
+ if (sbi_ext[i]->extid_start <= extid &&
+ sbi_ext[i]->extid_end >= extid)
+ return sbi_ext[i];
+ }
- memset(&run->system_event, 0, sizeof(run->system_event));
- run->system_event.type = type;
- run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
+ return NULL;
}
int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
- ulong hmask;
- int i, ret = 1;
- u64 next_cycle;
- struct kvm_vcpu *rvcpu;
+ int ret = 1;
bool next_sepc = true;
- struct cpumask cm, hm;
- struct kvm *kvm = vcpu->kvm;
- struct kvm_cpu_trap utrap = { 0 };
+ bool userspace_exit = false;
struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
+ const struct kvm_vcpu_sbi_extension *sbi_ext;
+ struct kvm_cpu_trap utrap = { 0 };
+ unsigned long out_val = 0;
+ bool ext_is_v01 = false;
- if (!cp)
- return -EINVAL;
-
- switch (cp->a7) {
- case SBI_EXT_0_1_CONSOLE_GETCHAR:
- case SBI_EXT_0_1_CONSOLE_PUTCHAR:
- /*
- * The CONSOLE_GETCHAR/CONSOLE_PUTCHAR SBI calls cannot be
- * handled in kernel so we forward these to user-space
- */
- kvm_riscv_vcpu_sbi_forward(vcpu, run);
- next_sepc = false;
- ret = 0;
- break;
- case SBI_EXT_0_1_SET_TIMER:
-#if __riscv_xlen == 32
- next_cycle = ((u64)cp->a1 << 32) | (u64)cp->a0;
-#else
- next_cycle = (u64)cp->a0;
+ sbi_ext = kvm_vcpu_sbi_find_ext(cp->a7);
+ if (sbi_ext && sbi_ext->handler) {
+#ifdef CONFIG_RISCV_SBI_V01
+ if (cp->a7 >= SBI_EXT_0_1_SET_TIMER &&
+ cp->a7 <= SBI_EXT_0_1_SHUTDOWN)
+ ext_is_v01 = true;
#endif
- kvm_riscv_vcpu_timer_next_event(vcpu, next_cycle);
- break;
- case SBI_EXT_0_1_CLEAR_IPI:
- kvm_riscv_vcpu_unset_interrupt(vcpu, IRQ_VS_SOFT);
- break;
- case SBI_EXT_0_1_SEND_IPI:
- if (cp->a0)
- hmask = kvm_riscv_vcpu_unpriv_read(vcpu, false, cp->a0,
- &utrap);
- else
- hmask = (1UL << atomic_read(&kvm->online_vcpus)) - 1;
- if (utrap.scause) {
- utrap.sepc = cp->sepc;
- kvm_riscv_vcpu_trap_redirect(vcpu, &utrap);
- next_sepc = false;
- break;
- }
- for_each_set_bit(i, &hmask, BITS_PER_LONG) {
- rvcpu = kvm_get_vcpu_by_id(vcpu->kvm, i);
- kvm_riscv_vcpu_set_interrupt(rvcpu, IRQ_VS_SOFT);
- }
- break;
- case SBI_EXT_0_1_SHUTDOWN:
- kvm_sbi_system_shutdown(vcpu, run, KVM_SYSTEM_EVENT_SHUTDOWN);
- next_sepc = false;
- ret = 0;
- break;
- case SBI_EXT_0_1_REMOTE_FENCE_I:
- case SBI_EXT_0_1_REMOTE_SFENCE_VMA:
- case SBI_EXT_0_1_REMOTE_SFENCE_VMA_ASID:
- if (cp->a0)
- hmask = kvm_riscv_vcpu_unpriv_read(vcpu, false, cp->a0,
- &utrap);
- else
- hmask = (1UL << atomic_read(&kvm->online_vcpus)) - 1;
- if (utrap.scause) {
- utrap.sepc = cp->sepc;
- kvm_riscv_vcpu_trap_redirect(vcpu, &utrap);
- next_sepc = false;
- break;
- }
- cpumask_clear(&cm);
- for_each_set_bit(i, &hmask, BITS_PER_LONG) {
- rvcpu = kvm_get_vcpu_by_id(vcpu->kvm, i);
- if (rvcpu->cpu < 0)
- continue;
- cpumask_set_cpu(rvcpu->cpu, &cm);
- }
- riscv_cpuid_to_hartid_mask(&cm, &hm);
- if (cp->a7 == SBI_EXT_0_1_REMOTE_FENCE_I)
- sbi_remote_fence_i(cpumask_bits(&hm));
- else if (cp->a7 == SBI_EXT_0_1_REMOTE_SFENCE_VMA)
- sbi_remote_hfence_vvma(cpumask_bits(&hm),
- cp->a1, cp->a2);
- else
- sbi_remote_hfence_vvma_asid(cpumask_bits(&hm),
- cp->a1, cp->a2, cp->a3);
- break;
- default:
+ ret = sbi_ext->handler(vcpu, run, &out_val, &utrap, &userspace_exit);
+ } else {
/* Return error for unsupported SBI calls */
cp->a0 = SBI_ERR_NOT_SUPPORTED;
- break;
+ goto ecall_done;
+ }
+
+ /* Handle special error cases i.e trap, exit or userspace forward */
+ if (utrap.scause) {
+ /* No need to increment sepc or exit ioctl loop */
+ ret = 1;
+ utrap.sepc = cp->sepc;
+ kvm_riscv_vcpu_trap_redirect(vcpu, &utrap);
+ next_sepc = false;
+ goto ecall_done;
}
+ /* Exit ioctl loop or Propagate the error code the guest */
+ if (userspace_exit) {
+ next_sepc = false;
+ ret = 0;
+ } else {
+ /**
+ * SBI extension handler always returns an Linux error code. Convert
+ * it to the SBI specific error code that can be propagated the SBI
+ * caller.
+ */
+ ret = kvm_linux_err_map_sbi(ret);
+ cp->a0 = ret;
+ ret = 1;
+ }
+ecall_done:
if (next_sepc)
cp->sepc += 4;
+ if (!ext_is_v01)
+ cp->a1 = out_val;
return ret;
}
-
-#else
-
-int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu, struct kvm_run *run)
-{
- kvm_riscv_vcpu_sbi_forward(vcpu, run);
- return 0;
-}
-
-#endif
diff --git a/arch/riscv/kvm/vcpu_sbi_base.c b/arch/riscv/kvm/vcpu_sbi_base.c
new file mode 100644
index 000000000000..48f431091cdb
--- /dev/null
+++ b/arch/riscv/kvm/vcpu_sbi_base.c
@@ -0,0 +1,100 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2021 Western Digital Corporation or its affiliates.
+ *
+ * Authors:
+ * Atish Patra <atish.patra@wdc.com>
+ */
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/kvm_host.h>
+#include <linux/version.h>
+#include <asm/csr.h>
+#include <asm/sbi.h>
+#include <asm/kvm_vcpu_timer.h>
+#include <asm/kvm_vcpu_sbi.h>
+
+static int kvm_sbi_ext_base_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
+ unsigned long *out_val,
+ struct kvm_cpu_trap *trap, bool *exit)
+{
+ int ret = 0;
+ struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
+ struct sbiret ecall_ret;
+
+ switch (cp->a6) {
+ case SBI_EXT_BASE_GET_SPEC_VERSION:
+ *out_val = (KVM_SBI_VERSION_MAJOR <<
+ SBI_SPEC_VERSION_MAJOR_SHIFT) |
+ KVM_SBI_VERSION_MINOR;
+ break;
+ case SBI_EXT_BASE_GET_IMP_ID:
+ *out_val = KVM_SBI_IMPID;
+ break;
+ case SBI_EXT_BASE_GET_IMP_VERSION:
+ *out_val = LINUX_VERSION_CODE;
+ break;
+ case SBI_EXT_BASE_PROBE_EXT:
+ if ((cp->a0 >= SBI_EXT_EXPERIMENTAL_START &&
+ cp->a0 <= SBI_EXT_EXPERIMENTAL_END) ||
+ (cp->a0 >= SBI_EXT_VENDOR_START &&
+ cp->a0 <= SBI_EXT_VENDOR_END)) {
+ /*
+ * For experimental/vendor extensions
+ * forward it to the userspace
+ */
+ kvm_riscv_vcpu_sbi_forward(vcpu, run);
+ *exit = true;
+ } else
+ *out_val = kvm_vcpu_sbi_find_ext(cp->a0) ? 1 : 0;
+ break;
+ case SBI_EXT_BASE_GET_MVENDORID:
+ case SBI_EXT_BASE_GET_MARCHID:
+ case SBI_EXT_BASE_GET_MIMPID:
+ ecall_ret = sbi_ecall(SBI_EXT_BASE, cp->a6, 0, 0, 0, 0, 0, 0);
+ if (!ecall_ret.error)
+ *out_val = ecall_ret.value;
+ /*TODO: We are unnecessarily converting the error twice */
+ ret = sbi_err_map_linux_errno(ecall_ret.error);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ return ret;
+}
+
+const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_base = {
+ .extid_start = SBI_EXT_BASE,
+ .extid_end = SBI_EXT_BASE,
+ .handler = kvm_sbi_ext_base_handler,
+};
+
+static int kvm_sbi_ext_forward_handler(struct kvm_vcpu *vcpu,
+ struct kvm_run *run,
+ unsigned long *out_val,
+ struct kvm_cpu_trap *utrap,
+ bool *exit)
+{
+ /*
+ * Both SBI experimental and vendor extensions are
+ * unconditionally forwarded to userspace.
+ */
+ kvm_riscv_vcpu_sbi_forward(vcpu, run);
+ *exit = true;
+ return 0;
+}
+
+const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_experimental = {
+ .extid_start = SBI_EXT_EXPERIMENTAL_START,
+ .extid_end = SBI_EXT_EXPERIMENTAL_END,
+ .handler = kvm_sbi_ext_forward_handler,
+};
+
+const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_vendor = {
+ .extid_start = SBI_EXT_VENDOR_START,
+ .extid_end = SBI_EXT_VENDOR_END,
+ .handler = kvm_sbi_ext_forward_handler,
+};
diff --git a/arch/riscv/kvm/vcpu_sbi_hsm.c b/arch/riscv/kvm/vcpu_sbi_hsm.c
new file mode 100644
index 000000000000..2e383687fa48
--- /dev/null
+++ b/arch/riscv/kvm/vcpu_sbi_hsm.c
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2021 Western Digital Corporation or its affiliates.
+ *
+ * Authors:
+ * Atish Patra <atish.patra@wdc.com>
+ */
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/kvm_host.h>
+#include <asm/csr.h>
+#include <asm/sbi.h>
+#include <asm/kvm_vcpu_sbi.h>
+
+static int kvm_sbi_hsm_vcpu_start(struct kvm_vcpu *vcpu)
+{
+ struct kvm_cpu_context *reset_cntx;
+ struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
+ struct kvm_vcpu *target_vcpu;
+ unsigned long target_vcpuid = cp->a0;
+
+ target_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, target_vcpuid);
+ if (!target_vcpu)
+ return -EINVAL;
+ if (!target_vcpu->arch.power_off)
+ return -EALREADY;
+
+ reset_cntx = &target_vcpu->arch.guest_reset_context;
+ /* start address */
+ reset_cntx->sepc = cp->a1;
+ /* target vcpu id to start */
+ reset_cntx->a0 = target_vcpuid;
+ /* private data passed from kernel */
+ reset_cntx->a1 = cp->a2;
+ kvm_make_request(KVM_REQ_VCPU_RESET, target_vcpu);
+
+ kvm_riscv_vcpu_power_on(target_vcpu);
+
+ return 0;
+}
+
+static int kvm_sbi_hsm_vcpu_stop(struct kvm_vcpu *vcpu)
+{
+ if (vcpu->arch.power_off)
+ return -EINVAL;
+
+ kvm_riscv_vcpu_power_off(vcpu);
+
+ return 0;
+}
+
+static int kvm_sbi_hsm_vcpu_get_status(struct kvm_vcpu *vcpu)
+{
+ struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
+ unsigned long target_vcpuid = cp->a0;
+ struct kvm_vcpu *target_vcpu;
+
+ target_vcpu = kvm_get_vcpu_by_id(vcpu->kvm, target_vcpuid);
+ if (!target_vcpu)
+ return -EINVAL;
+ if (!target_vcpu->arch.power_off)
+ return SBI_HSM_HART_STATUS_STARTED;
+ else
+ return SBI_HSM_HART_STATUS_STOPPED;
+}
+
+static int kvm_sbi_ext_hsm_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
+ unsigned long *out_val,
+ struct kvm_cpu_trap *utrap,
+ bool *exit)
+{
+ int ret = 0;
+ struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
+ struct kvm *kvm = vcpu->kvm;
+ unsigned long funcid = cp->a6;
+
+ switch (funcid) {
+ case SBI_EXT_HSM_HART_START:
+ mutex_lock(&kvm->lock);
+ ret = kvm_sbi_hsm_vcpu_start(vcpu);
+ mutex_unlock(&kvm->lock);
+ break;
+ case SBI_EXT_HSM_HART_STOP:
+ ret = kvm_sbi_hsm_vcpu_stop(vcpu);
+ break;
+ case SBI_EXT_HSM_HART_STATUS:
+ ret = kvm_sbi_hsm_vcpu_get_status(vcpu);
+ if (ret >= 0) {
+ *out_val = ret;
+ ret = 0;
+ }
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_hsm = {
+ .extid_start = SBI_EXT_HSM,
+ .extid_end = SBI_EXT_HSM,
+ .handler = kvm_sbi_ext_hsm_handler,
+};
diff --git a/arch/riscv/kvm/vcpu_sbi_replace.c b/arch/riscv/kvm/vcpu_sbi_replace.c
new file mode 100644
index 000000000000..1bc0608a5bfd
--- /dev/null
+++ b/arch/riscv/kvm/vcpu_sbi_replace.c
@@ -0,0 +1,132 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2021 Western Digital Corporation or its affiliates.
+ *
+ * Authors:
+ * Atish Patra <atish.patra@wdc.com>
+ */
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/kvm_host.h>
+#include <asm/csr.h>
+#include <asm/sbi.h>
+#include <asm/kvm_vcpu_timer.h>
+#include <asm/kvm_vcpu_sbi.h>
+
+static int kvm_sbi_ext_time_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
+ unsigned long *out_val,
+ struct kvm_cpu_trap *utrap, bool *exit)
+{
+ int ret = 0;
+ struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
+ u64 next_cycle;
+
+ if (cp->a6 != SBI_EXT_TIME_SET_TIMER)
+ return -EINVAL;
+
+#if __riscv_xlen == 32
+ next_cycle = ((u64)cp->a1 << 32) | (u64)cp->a0;
+#else
+ next_cycle = (u64)cp->a0;
+#endif
+ kvm_riscv_vcpu_timer_next_event(vcpu, next_cycle);
+
+ return ret;
+}
+
+const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_time = {
+ .extid_start = SBI_EXT_TIME,
+ .extid_end = SBI_EXT_TIME,
+ .handler = kvm_sbi_ext_time_handler,
+};
+
+static int kvm_sbi_ext_ipi_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
+ unsigned long *out_val,
+ struct kvm_cpu_trap *utrap, bool *exit)
+{
+ int ret = 0;
+ unsigned long i;
+ struct kvm_vcpu *tmp;
+ struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
+ unsigned long hmask = cp->a0;
+ unsigned long hbase = cp->a1;
+
+ if (cp->a6 != SBI_EXT_IPI_SEND_IPI)
+ return -EINVAL;
+
+ kvm_for_each_vcpu(i, tmp, vcpu->kvm) {
+ if (hbase != -1UL) {
+ if (tmp->vcpu_id < hbase)
+ continue;
+ if (!(hmask & (1UL << (tmp->vcpu_id - hbase))))
+ continue;
+ }
+ ret = kvm_riscv_vcpu_set_interrupt(tmp, IRQ_VS_SOFT);
+ if (ret < 0)
+ break;
+ }
+
+ return ret;
+}
+
+const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_ipi = {
+ .extid_start = SBI_EXT_IPI,
+ .extid_end = SBI_EXT_IPI,
+ .handler = kvm_sbi_ext_ipi_handler,
+};
+
+static int kvm_sbi_ext_rfence_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
+ unsigned long *out_val,
+ struct kvm_cpu_trap *utrap, bool *exit)
+{
+ int ret = 0;
+ unsigned long i;
+ struct cpumask cm;
+ struct kvm_vcpu *tmp;
+ struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
+ unsigned long hmask = cp->a0;
+ unsigned long hbase = cp->a1;
+ unsigned long funcid = cp->a6;
+
+ cpumask_clear(&cm);
+ kvm_for_each_vcpu(i, tmp, vcpu->kvm) {
+ if (hbase != -1UL) {
+ if (tmp->vcpu_id < hbase)
+ continue;
+ if (!(hmask & (1UL << (tmp->vcpu_id - hbase))))
+ continue;
+ }
+ if (tmp->cpu < 0)
+ continue;
+ cpumask_set_cpu(tmp->cpu, &cm);
+ }
+
+ switch (funcid) {
+ case SBI_EXT_RFENCE_REMOTE_FENCE_I:
+ ret = sbi_remote_fence_i(&cm);
+ break;
+ case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA:
+ ret = sbi_remote_hfence_vvma(&cm, cp->a2, cp->a3);
+ break;
+ case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID:
+ ret = sbi_remote_hfence_vvma_asid(&cm, cp->a2,
+ cp->a3, cp->a4);
+ break;
+ case SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA:
+ case SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA_VMID:
+ case SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA:
+ case SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA_ASID:
+ /* TODO: implement for nested hypervisor case */
+ default:
+ ret = -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_rfence = {
+ .extid_start = SBI_EXT_RFENCE,
+ .extid_end = SBI_EXT_RFENCE,
+ .handler = kvm_sbi_ext_rfence_handler,
+};
diff --git a/arch/riscv/kvm/vcpu_sbi_v01.c b/arch/riscv/kvm/vcpu_sbi_v01.c
new file mode 100644
index 000000000000..07e2de14433a
--- /dev/null
+++ b/arch/riscv/kvm/vcpu_sbi_v01.c
@@ -0,0 +1,123 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2021 Western Digital Corporation or its affiliates.
+ *
+ * Authors:
+ * Atish Patra <atish.patra@wdc.com>
+ */
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/kvm_host.h>
+#include <asm/csr.h>
+#include <asm/sbi.h>
+#include <asm/kvm_vcpu_timer.h>
+#include <asm/kvm_vcpu_sbi.h>
+
+static void kvm_sbi_system_shutdown(struct kvm_vcpu *vcpu,
+ struct kvm_run *run, u32 type)
+{
+ unsigned long i;
+ struct kvm_vcpu *tmp;
+
+ kvm_for_each_vcpu(i, tmp, vcpu->kvm)
+ tmp->arch.power_off = true;
+ kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_SLEEP);
+
+ memset(&run->system_event, 0, sizeof(run->system_event));
+ run->system_event.type = type;
+ run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
+}
+
+static int kvm_sbi_ext_v01_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
+ unsigned long *out_val,
+ struct kvm_cpu_trap *utrap,
+ bool *exit)
+{
+ ulong hmask;
+ int i, ret = 0;
+ u64 next_cycle;
+ struct kvm_vcpu *rvcpu;
+ struct cpumask cm;
+ struct kvm *kvm = vcpu->kvm;
+ struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
+
+ switch (cp->a7) {
+ case SBI_EXT_0_1_CONSOLE_GETCHAR:
+ case SBI_EXT_0_1_CONSOLE_PUTCHAR:
+ /*
+ * The CONSOLE_GETCHAR/CONSOLE_PUTCHAR SBI calls cannot be
+ * handled in kernel so we forward these to user-space
+ */
+ kvm_riscv_vcpu_sbi_forward(vcpu, run);
+ *exit = true;
+ break;
+ case SBI_EXT_0_1_SET_TIMER:
+#if __riscv_xlen == 32
+ next_cycle = ((u64)cp->a1 << 32) | (u64)cp->a0;
+#else
+ next_cycle = (u64)cp->a0;
+#endif
+ ret = kvm_riscv_vcpu_timer_next_event(vcpu, next_cycle);
+ break;
+ case SBI_EXT_0_1_CLEAR_IPI:
+ ret = kvm_riscv_vcpu_unset_interrupt(vcpu, IRQ_VS_SOFT);
+ break;
+ case SBI_EXT_0_1_SEND_IPI:
+ if (cp->a0)
+ hmask = kvm_riscv_vcpu_unpriv_read(vcpu, false, cp->a0,
+ utrap);
+ else
+ hmask = (1UL << atomic_read(&kvm->online_vcpus)) - 1;
+ if (utrap->scause)
+ break;
+
+ for_each_set_bit(i, &hmask, BITS_PER_LONG) {
+ rvcpu = kvm_get_vcpu_by_id(vcpu->kvm, i);
+ ret = kvm_riscv_vcpu_set_interrupt(rvcpu, IRQ_VS_SOFT);
+ if (ret < 0)
+ break;
+ }
+ break;
+ case SBI_EXT_0_1_SHUTDOWN:
+ kvm_sbi_system_shutdown(vcpu, run, KVM_SYSTEM_EVENT_SHUTDOWN);
+ *exit = true;
+ break;
+ case SBI_EXT_0_1_REMOTE_FENCE_I:
+ case SBI_EXT_0_1_REMOTE_SFENCE_VMA:
+ case SBI_EXT_0_1_REMOTE_SFENCE_VMA_ASID:
+ if (cp->a0)
+ hmask = kvm_riscv_vcpu_unpriv_read(vcpu, false, cp->a0,
+ utrap);
+ else
+ hmask = (1UL << atomic_read(&kvm->online_vcpus)) - 1;
+ if (utrap->scause)
+ break;
+
+ cpumask_clear(&cm);
+ for_each_set_bit(i, &hmask, BITS_PER_LONG) {
+ rvcpu = kvm_get_vcpu_by_id(vcpu->kvm, i);
+ if (rvcpu->cpu < 0)
+ continue;
+ cpumask_set_cpu(rvcpu->cpu, &cm);
+ }
+ if (cp->a7 == SBI_EXT_0_1_REMOTE_FENCE_I)
+ ret = sbi_remote_fence_i(&cm);
+ else if (cp->a7 == SBI_EXT_0_1_REMOTE_SFENCE_VMA)
+ ret = sbi_remote_hfence_vvma(&cm, cp->a1, cp->a2);
+ else
+ ret = sbi_remote_hfence_vvma_asid(&cm, cp->a1, cp->a2, cp->a3);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ };
+
+ return ret;
+}
+
+const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_v01 = {
+ .extid_start = SBI_EXT_0_1_SET_TIMER,
+ .extid_end = SBI_EXT_0_1_SHUTDOWN,
+ .handler = kvm_sbi_ext_v01_handler,
+};
diff --git a/arch/riscv/kvm/vm.c b/arch/riscv/kvm/vm.c
index fb18af34a4b5..c768f75279ef 100644
--- a/arch/riscv/kvm/vm.c
+++ b/arch/riscv/kvm/vm.c
@@ -46,15 +46,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
void kvm_arch_destroy_vm(struct kvm *kvm)
{
- int i;
-
- for (i = 0; i < KVM_MAX_VCPUS; ++i) {
- if (kvm->vcpus[i]) {
- kvm_vcpu_destroy(kvm->vcpus[i]);
- kvm->vcpus[i] = NULL;
- }
- }
- atomic_set(&kvm->online_vcpus, 0);
+ kvm_destroy_vcpus(kvm);
}
int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
@@ -82,6 +74,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_NR_MEMSLOTS:
r = KVM_USER_MEM_SLOTS;
break;
+ case KVM_CAP_VM_GPA_BITS:
+ r = kvm_riscv_stage2_gpa_bits();
+ break;
default:
r = 0;
break;
diff --git a/arch/riscv/kvm/vmid.c b/arch/riscv/kvm/vmid.c
index 2c6253b293bc..2fa4f7b1813d 100644
--- a/arch/riscv/kvm/vmid.c
+++ b/arch/riscv/kvm/vmid.c
@@ -65,9 +65,8 @@ bool kvm_riscv_stage2_vmid_ver_changed(struct kvm_vmid *vmid)
void kvm_riscv_stage2_vmid_update(struct kvm_vcpu *vcpu)
{
- int i;
+ unsigned long i;
struct kvm_vcpu *v;
- struct cpumask hmask;
struct kvm_vmid *vmid = &vcpu->kvm->arch.vmid;
if (!kvm_riscv_stage2_vmid_ver_changed(vmid))
@@ -102,8 +101,7 @@ void kvm_riscv_stage2_vmid_update(struct kvm_vcpu *vcpu)
* running, we force VM exits on all host CPUs using IPI and
* flush all Guest TLBs.
*/
- riscv_cpuid_to_hartid_mask(cpu_online_mask, &hmask);
- sbi_remote_hfence_gvma(cpumask_bits(&hmask), 0, 0);
+ sbi_remote_hfence_gvma(cpu_online_mask, 0, 0);
}
vmid->vmid = vmid_next;
diff --git a/arch/riscv/lib/uaccess.S b/arch/riscv/lib/uaccess.S
index 63bc691cff91..8c475f4da308 100644
--- a/arch/riscv/lib/uaccess.S
+++ b/arch/riscv/lib/uaccess.S
@@ -1,15 +1,13 @@
#include <linux/linkage.h>
#include <asm-generic/export.h>
#include <asm/asm.h>
+#include <asm/asm-extable.h>
#include <asm/csr.h>
.macro fixup op reg addr lbl
100:
\op \reg, \addr
- .section __ex_table,"a"
- .balign RISCV_SZPTR
- RISCV_PTR 100b, \lbl
- .previous
+ _asm_extable 100b, \lbl
.endm
ENTRY(__asm_copy_to_user)
@@ -173,6 +171,13 @@ ENTRY(__asm_copy_from_user)
csrc CSR_STATUS, t6
li a0, 0
ret
+
+ /* Exception fixup code */
+10:
+ /* Disable access to user memory */
+ csrs CSR_STATUS, t6
+ mv a0, t5
+ ret
ENDPROC(__asm_copy_to_user)
ENDPROC(__asm_copy_from_user)
EXPORT_SYMBOL(__asm_copy_to_user)
@@ -218,19 +223,12 @@ ENTRY(__clear_user)
addi a0, a0, 1
bltu a0, a3, 5b
j 3b
-ENDPROC(__clear_user)
-EXPORT_SYMBOL(__clear_user)
- .section .fixup,"ax"
- .balign 4
- /* Fixup code for __copy_user(10) and __clear_user(11) */
-10:
- /* Disable access to user memory */
- csrs CSR_STATUS, t6
- mv a0, t5
- ret
+ /* Exception fixup code */
11:
+ /* Disable access to user memory */
csrs CSR_STATUS, t6
mv a0, a1
ret
- .previous
+ENDPROC(__clear_user)
+EXPORT_SYMBOL(__clear_user)
diff --git a/arch/riscv/mm/cacheflush.c b/arch/riscv/mm/cacheflush.c
index 89f81067e09e..6cb7d96ad9c7 100644
--- a/arch/riscv/mm/cacheflush.c
+++ b/arch/riscv/mm/cacheflush.c
@@ -67,10 +67,7 @@ void flush_icache_mm(struct mm_struct *mm, bool local)
*/
smp_mb();
} else if (IS_ENABLED(CONFIG_RISCV_SBI)) {
- cpumask_t hartid_mask;
-
- riscv_cpuid_to_hartid_mask(&others, &hartid_mask);
- sbi_remote_fence_i(cpumask_bits(&hartid_mask));
+ sbi_remote_fence_i(&others);
} else {
on_each_cpu_mask(&others, ipi_remote_fence_i, NULL, 1);
}
diff --git a/arch/riscv/mm/context.c b/arch/riscv/mm/context.c
index ea54cc0c9106..7acbfbd14557 100644
--- a/arch/riscv/mm/context.c
+++ b/arch/riscv/mm/context.c
@@ -192,7 +192,7 @@ static void set_mm_asid(struct mm_struct *mm, unsigned int cpu)
switch_mm_fast:
csr_write(CSR_SATP, virt_to_pfn(mm->pgd) |
((cntx & asid_mask) << SATP_ASID_SHIFT) |
- SATP_MODE);
+ satp_mode);
if (need_flush_tlb)
local_flush_tlb_all();
@@ -201,7 +201,7 @@ switch_mm_fast:
static void set_mm_noasid(struct mm_struct *mm)
{
/* Switch the page table and blindly nuke entire local TLB */
- csr_write(CSR_SATP, virt_to_pfn(mm->pgd) | SATP_MODE);
+ csr_write(CSR_SATP, virt_to_pfn(mm->pgd) | satp_mode);
local_flush_tlb_all();
}
diff --git a/arch/riscv/mm/extable.c b/arch/riscv/mm/extable.c
index ddb7d3b99e89..35484d830fd6 100644
--- a/arch/riscv/mm/extable.c
+++ b/arch/riscv/mm/extable.c
@@ -7,27 +7,65 @@
*/
+#include <linux/bitfield.h>
#include <linux/extable.h>
#include <linux/module.h>
#include <linux/uaccess.h>
+#include <asm/asm-extable.h>
+#include <asm/ptrace.h>
-#if defined(CONFIG_BPF_JIT) && defined(CONFIG_ARCH_RV64I)
-int rv_bpf_fixup_exception(const struct exception_table_entry *ex, struct pt_regs *regs);
-#endif
+static inline unsigned long
+get_ex_fixup(const struct exception_table_entry *ex)
+{
+ return ((unsigned long)&ex->fixup + ex->fixup);
+}
+
+static bool ex_handler_fixup(const struct exception_table_entry *ex,
+ struct pt_regs *regs)
+{
+ regs->epc = get_ex_fixup(ex);
+ return true;
+}
+
+static inline void regs_set_gpr(struct pt_regs *regs, unsigned int offset,
+ unsigned long val)
+{
+ if (unlikely(offset > MAX_REG_OFFSET))
+ return;
+
+ if (offset)
+ *(unsigned long *)((unsigned long)regs + offset) = val;
+}
+
+static bool ex_handler_uaccess_err_zero(const struct exception_table_entry *ex,
+ struct pt_regs *regs)
+{
+ int reg_err = FIELD_GET(EX_DATA_REG_ERR, ex->data);
+ int reg_zero = FIELD_GET(EX_DATA_REG_ZERO, ex->data);
+
+ regs_set_gpr(regs, reg_err * sizeof(unsigned long), -EFAULT);
+ regs_set_gpr(regs, reg_zero * sizeof(unsigned long), 0);
+
+ regs->epc = get_ex_fixup(ex);
+ return true;
+}
-int fixup_exception(struct pt_regs *regs)
+bool fixup_exception(struct pt_regs *regs)
{
- const struct exception_table_entry *fixup;
+ const struct exception_table_entry *ex;
- fixup = search_exception_tables(regs->epc);
- if (!fixup)
- return 0;
+ ex = search_exception_tables(regs->epc);
+ if (!ex)
+ return false;
-#if defined(CONFIG_BPF_JIT) && defined(CONFIG_ARCH_RV64I)
- if (regs->epc >= BPF_JIT_REGION_START && regs->epc < BPF_JIT_REGION_END)
- return rv_bpf_fixup_exception(fixup, regs);
-#endif
+ switch (ex->type) {
+ case EX_TYPE_FIXUP:
+ return ex_handler_fixup(ex, regs);
+ case EX_TYPE_BPF:
+ return ex_handler_bpf(ex, regs);
+ case EX_TYPE_UACCESS_ERR_ZERO:
+ return ex_handler_uaccess_err_zero(ex, regs);
+ }
- regs->epc = fixup->fixup;
- return 1;
+ BUG();
}
diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c
index aa08dd2f8fae..4e9efbe46d5f 100644
--- a/arch/riscv/mm/fault.c
+++ b/arch/riscv/mm/fault.c
@@ -31,7 +31,7 @@ static void die_kernel_fault(const char *msg, unsigned long addr,
bust_spinlocks(0);
die(regs, "Oops");
- do_exit(SIGKILL);
+ make_task_dead(SIGKILL);
}
static inline void no_context(struct pt_regs *regs, unsigned long addr)
@@ -235,7 +235,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs)
* only copy the information from the master page table,
* nothing more.
*/
- if (unlikely((addr >= VMALLOC_START) && (addr <= VMALLOC_END))) {
+ if (unlikely((addr >= VMALLOC_START) && (addr < VMALLOC_END))) {
vmalloc_fault(regs, code, addr);
return;
}
@@ -330,7 +330,7 @@ good_area:
if (fault_signal_pending(fault, regs))
return;
- if (unlikely((fault & VM_FAULT_RETRY) && (flags & FAULT_FLAG_ALLOW_RETRY))) {
+ if (unlikely(fault & VM_FAULT_RETRY)) {
flags |= FAULT_FLAG_TRIED;
/*
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index 24b2b8044602..c27294128e18 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -37,13 +37,19 @@ EXPORT_SYMBOL(kernel_map);
#define kernel_map (*(struct kernel_mapping *)XIP_FIXUP(&kernel_map))
#endif
+#ifdef CONFIG_64BIT
+u64 satp_mode = !IS_ENABLED(CONFIG_XIP_KERNEL) ? SATP_MODE_48 : SATP_MODE_39;
+#else
+u64 satp_mode = SATP_MODE_32;
+#endif
+EXPORT_SYMBOL(satp_mode);
+
+bool pgtable_l4_enabled = IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_XIP_KERNEL);
+EXPORT_SYMBOL(pgtable_l4_enabled);
+
phys_addr_t phys_ram_base __ro_after_init;
EXPORT_SYMBOL(phys_ram_base);
-#ifdef CONFIG_XIP_KERNEL
-extern char _xiprom[], _exiprom[], __data_loc;
-#endif
-
unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
__page_aligned_bss;
EXPORT_SYMBOL(empty_zero_page);
@@ -53,15 +59,6 @@ extern char _start[];
void *_dtb_early_va __initdata;
uintptr_t _dtb_early_pa __initdata;
-struct pt_alloc_ops {
- pte_t *(*get_pte_virt)(phys_addr_t pa);
- phys_addr_t (*alloc_pte)(uintptr_t va);
-#ifndef __PAGETABLE_PMD_FOLDED
- pmd_t *(*get_pmd_virt)(phys_addr_t pa);
- phys_addr_t (*alloc_pmd)(uintptr_t va);
-#endif
-};
-
static phys_addr_t dma32_phys_limit __initdata;
static void __init zone_sizes_init(void)
@@ -102,10 +99,14 @@ static void __init print_vm_layout(void)
(unsigned long)VMALLOC_END);
print_mlm("lowmem", (unsigned long)PAGE_OFFSET,
(unsigned long)high_memory);
-#ifdef CONFIG_64BIT
- print_mlm("kernel", (unsigned long)KERNEL_LINK_ADDR,
- (unsigned long)ADDRESS_SPACE_END);
+ if (IS_ENABLED(CONFIG_64BIT)) {
+#ifdef CONFIG_KASAN
+ print_mlm("kasan", KASAN_SHADOW_START, KASAN_SHADOW_END);
#endif
+
+ print_mlm("kernel", (unsigned long)KERNEL_LINK_ADDR,
+ (unsigned long)ADDRESS_SPACE_END);
+ }
}
#else
static void print_vm_layout(void) { }
@@ -130,18 +131,8 @@ void __init mem_init(void)
print_vm_layout();
}
-/*
- * The default maximal physical memory size is -PAGE_OFFSET for 32-bit kernel,
- * whereas for 64-bit kernel, the end of the virtual address space is occupied
- * by the modules/BPF/kernel mappings which reduces the available size of the
- * linear mapping.
- * Limit the memory size via mem.
- */
-#ifdef CONFIG_64BIT
-static phys_addr_t memory_limit = -PAGE_OFFSET - SZ_4G;
-#else
-static phys_addr_t memory_limit = -PAGE_OFFSET;
-#endif
+/* Limit the memory size via mem. */
+static phys_addr_t memory_limit;
static int __init early_mem(char *p)
{
@@ -162,35 +153,31 @@ early_param("mem", early_mem);
static void __init setup_bootmem(void)
{
phys_addr_t vmlinux_end = __pa_symbol(&_end);
- phys_addr_t vmlinux_start = __pa_symbol(&_start);
- phys_addr_t __maybe_unused max_mapped_addr;
- phys_addr_t phys_ram_end;
+ phys_addr_t max_mapped_addr;
+ phys_addr_t phys_ram_end, vmlinux_start;
-#ifdef CONFIG_XIP_KERNEL
- vmlinux_start = __pa_symbol(&_sdata);
-#endif
+ if (IS_ENABLED(CONFIG_XIP_KERNEL))
+ vmlinux_start = __pa_symbol(&_sdata);
+ else
+ vmlinux_start = __pa_symbol(&_start);
memblock_enforce_memory_limit(memory_limit);
/*
- * Reserve from the start of the kernel to the end of the kernel
- */
-#if defined(CONFIG_64BIT) && defined(CONFIG_STRICT_KERNEL_RWX)
- /*
* Make sure we align the reservation on PMD_SIZE since we will
* map the kernel in the linear mapping as read-only: we do not want
* any allocation to happen between _end and the next pmd aligned page.
*/
- vmlinux_end = (vmlinux_end + PMD_SIZE - 1) & PMD_MASK;
-#endif
+ if (IS_ENABLED(CONFIG_64BIT) && IS_ENABLED(CONFIG_STRICT_KERNEL_RWX))
+ vmlinux_end = (vmlinux_end + PMD_SIZE - 1) & PMD_MASK;
+ /*
+ * Reserve from the start of the kernel to the end of the kernel
+ */
memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start);
-
phys_ram_end = memblock_end_of_DRAM();
-#ifndef CONFIG_64BIT
-#ifndef CONFIG_XIP_KERNEL
- phys_ram_base = memblock_start_of_DRAM();
-#endif
+ if (!IS_ENABLED(CONFIG_XIP_KERNEL))
+ phys_ram_base = memblock_start_of_DRAM();
/*
* memblock allocator is not aware of the fact that last 4K bytes of
* the addressable memory can not be mapped because of IS_ERR_VALUE
@@ -200,10 +187,11 @@ static void __init setup_bootmem(void)
* address space is occupied by the kernel mapping then this check must
* be done as soon as the kernel mapping base address is determined.
*/
- max_mapped_addr = __pa(~(ulong)0);
- if (max_mapped_addr == (phys_ram_end - 1))
- memblock_set_current_limit(max_mapped_addr - 4096);
-#endif
+ if (!IS_ENABLED(CONFIG_64BIT)) {
+ max_mapped_addr = __pa(~(ulong)0);
+ if (max_mapped_addr == (phys_ram_end - 1))
+ memblock_set_current_limit(max_mapped_addr - 4096);
+ }
min_low_pfn = PFN_UP(phys_ram_base);
max_low_pfn = max_pfn = PFN_DOWN(phys_ram_end);
@@ -229,13 +217,7 @@ static void __init setup_bootmem(void)
}
#ifdef CONFIG_MMU
-static struct pt_alloc_ops _pt_ops __initdata;
-
-#ifdef CONFIG_XIP_KERNEL
-#define pt_ops (*(struct pt_alloc_ops *)XIP_FIXUP(&_pt_ops))
-#else
-#define pt_ops _pt_ops
-#endif
+struct pt_alloc_ops pt_ops __initdata;
unsigned long riscv_pfn_base __ro_after_init;
EXPORT_SYMBOL(riscv_pfn_base);
@@ -245,9 +227,12 @@ pgd_t trampoline_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
static pte_t fixmap_pte[PTRS_PER_PTE] __page_aligned_bss;
pgd_t early_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
+static pud_t __maybe_unused early_dtb_pud[PTRS_PER_PUD] __initdata __aligned(PAGE_SIZE);
static pmd_t __maybe_unused early_dtb_pmd[PTRS_PER_PMD] __initdata __aligned(PAGE_SIZE);
#ifdef CONFIG_XIP_KERNEL
+#define pt_ops (*(struct pt_alloc_ops *)XIP_FIXUP(&pt_ops))
+#define riscv_pfn_base (*(unsigned long *)XIP_FIXUP(&riscv_pfn_base))
#define trampoline_pg_dir ((pgd_t *)XIP_FIXUP(trampoline_pg_dir))
#define fixmap_pte ((pte_t *)XIP_FIXUP(fixmap_pte))
#define early_pg_dir ((pgd_t *)XIP_FIXUP(early_pg_dir))
@@ -333,6 +318,16 @@ static pmd_t early_pmd[PTRS_PER_PMD] __initdata __aligned(PAGE_SIZE);
#define early_pmd ((pmd_t *)XIP_FIXUP(early_pmd))
#endif /* CONFIG_XIP_KERNEL */
+static pud_t trampoline_pud[PTRS_PER_PUD] __page_aligned_bss;
+static pud_t fixmap_pud[PTRS_PER_PUD] __page_aligned_bss;
+static pud_t early_pud[PTRS_PER_PUD] __initdata __aligned(PAGE_SIZE);
+
+#ifdef CONFIG_XIP_KERNEL
+#define trampoline_pud ((pud_t *)XIP_FIXUP(trampoline_pud))
+#define fixmap_pud ((pud_t *)XIP_FIXUP(fixmap_pud))
+#define early_pud ((pud_t *)XIP_FIXUP(early_pud))
+#endif /* CONFIG_XIP_KERNEL */
+
static pmd_t *__init get_pmd_virt_early(phys_addr_t pa)
{
/* Before MMU is enabled */
@@ -352,7 +347,7 @@ static pmd_t *__init get_pmd_virt_late(phys_addr_t pa)
static phys_addr_t __init alloc_pmd_early(uintptr_t va)
{
- BUG_ON((va - kernel_map.virt_addr) >> PGDIR_SHIFT);
+ BUG_ON((va - kernel_map.virt_addr) >> PUD_SHIFT);
return (uintptr_t)early_pmd;
}
@@ -367,7 +362,8 @@ static phys_addr_t __init alloc_pmd_late(uintptr_t va)
unsigned long vaddr;
vaddr = __get_free_page(GFP_KERNEL);
- BUG_ON(!vaddr);
+ BUG_ON(!vaddr || !pgtable_pmd_page_ctor(virt_to_page(vaddr)));
+
return __pa(vaddr);
}
@@ -398,21 +394,97 @@ static void __init create_pmd_mapping(pmd_t *pmdp,
create_pte_mapping(ptep, va, pa, sz, prot);
}
-#define pgd_next_t pmd_t
-#define alloc_pgd_next(__va) pt_ops.alloc_pmd(__va)
-#define get_pgd_next_virt(__pa) pt_ops.get_pmd_virt(__pa)
+static pud_t *__init get_pud_virt_early(phys_addr_t pa)
+{
+ return (pud_t *)((uintptr_t)pa);
+}
+
+static pud_t *__init get_pud_virt_fixmap(phys_addr_t pa)
+{
+ clear_fixmap(FIX_PUD);
+ return (pud_t *)set_fixmap_offset(FIX_PUD, pa);
+}
+
+static pud_t *__init get_pud_virt_late(phys_addr_t pa)
+{
+ return (pud_t *)__va(pa);
+}
+
+static phys_addr_t __init alloc_pud_early(uintptr_t va)
+{
+ /* Only one PUD is available for early mapping */
+ BUG_ON((va - kernel_map.virt_addr) >> PGDIR_SHIFT);
+
+ return (uintptr_t)early_pud;
+}
+
+static phys_addr_t __init alloc_pud_fixmap(uintptr_t va)
+{
+ return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
+}
+
+static phys_addr_t alloc_pud_late(uintptr_t va)
+{
+ unsigned long vaddr;
+
+ vaddr = __get_free_page(GFP_KERNEL);
+ BUG_ON(!vaddr);
+ return __pa(vaddr);
+}
+
+static void __init create_pud_mapping(pud_t *pudp,
+ uintptr_t va, phys_addr_t pa,
+ phys_addr_t sz, pgprot_t prot)
+{
+ pmd_t *nextp;
+ phys_addr_t next_phys;
+ uintptr_t pud_index = pud_index(va);
+
+ if (sz == PUD_SIZE) {
+ if (pud_val(pudp[pud_index]) == 0)
+ pudp[pud_index] = pfn_pud(PFN_DOWN(pa), prot);
+ return;
+ }
+
+ if (pud_val(pudp[pud_index]) == 0) {
+ next_phys = pt_ops.alloc_pmd(va);
+ pudp[pud_index] = pfn_pud(PFN_DOWN(next_phys), PAGE_TABLE);
+ nextp = pt_ops.get_pmd_virt(next_phys);
+ memset(nextp, 0, PAGE_SIZE);
+ } else {
+ next_phys = PFN_PHYS(_pud_pfn(pudp[pud_index]));
+ nextp = pt_ops.get_pmd_virt(next_phys);
+ }
+
+ create_pmd_mapping(nextp, va, pa, sz, prot);
+}
+
+#define pgd_next_t pud_t
+#define alloc_pgd_next(__va) (pgtable_l4_enabled ? \
+ pt_ops.alloc_pud(__va) : pt_ops.alloc_pmd(__va))
+#define get_pgd_next_virt(__pa) (pgtable_l4_enabled ? \
+ pt_ops.get_pud_virt(__pa) : (pgd_next_t *)pt_ops.get_pmd_virt(__pa))
#define create_pgd_next_mapping(__nextp, __va, __pa, __sz, __prot) \
- create_pmd_mapping(__nextp, __va, __pa, __sz, __prot)
-#define fixmap_pgd_next fixmap_pmd
+ (pgtable_l4_enabled ? \
+ create_pud_mapping(__nextp, __va, __pa, __sz, __prot) : \
+ create_pmd_mapping((pmd_t *)__nextp, __va, __pa, __sz, __prot))
+#define fixmap_pgd_next (pgtable_l4_enabled ? \
+ (uintptr_t)fixmap_pud : (uintptr_t)fixmap_pmd)
+#define trampoline_pgd_next (pgtable_l4_enabled ? \
+ (uintptr_t)trampoline_pud : (uintptr_t)trampoline_pmd)
+#define early_dtb_pgd_next (pgtable_l4_enabled ? \
+ (uintptr_t)early_dtb_pud : (uintptr_t)early_dtb_pmd)
#else
#define pgd_next_t pte_t
#define alloc_pgd_next(__va) pt_ops.alloc_pte(__va)
#define get_pgd_next_virt(__pa) pt_ops.get_pte_virt(__pa)
#define create_pgd_next_mapping(__nextp, __va, __pa, __sz, __prot) \
create_pte_mapping(__nextp, __va, __pa, __sz, __prot)
-#define fixmap_pgd_next fixmap_pte
+#define fixmap_pgd_next ((uintptr_t)fixmap_pte)
+#define early_dtb_pgd_next ((uintptr_t)early_dtb_pmd)
+#define create_pud_mapping(__pmdp, __va, __pa, __sz, __prot)
#define create_pmd_mapping(__pmdp, __va, __pa, __sz, __prot)
-#endif
+#endif /* __PAGETABLE_PMD_FOLDED */
void __init create_pgd_mapping(pgd_t *pgdp,
uintptr_t va, phys_addr_t pa,
@@ -451,6 +523,9 @@ static uintptr_t __init best_map_size(phys_addr_t base, phys_addr_t size)
}
#ifdef CONFIG_XIP_KERNEL
+#define phys_ram_base (*(phys_addr_t *)XIP_FIXUP(&phys_ram_base))
+extern char _xiprom[], _exiprom[], __data_loc;
+
/* called from head.S with MMU off */
asmlinkage void __init __copy_data(void)
{
@@ -499,6 +574,57 @@ static __init pgprot_t pgprot_from_va(uintptr_t va)
}
#endif /* CONFIG_STRICT_KERNEL_RWX */
+#ifdef CONFIG_64BIT
+static void __init disable_pgtable_l4(void)
+{
+ pgtable_l4_enabled = false;
+ kernel_map.page_offset = PAGE_OFFSET_L3;
+ satp_mode = SATP_MODE_39;
+}
+
+/*
+ * There is a simple way to determine if 4-level is supported by the
+ * underlying hardware: establish 1:1 mapping in 4-level page table mode
+ * then read SATP to see if the configuration was taken into account
+ * meaning sv48 is supported.
+ */
+static __init void set_satp_mode(void)
+{
+ u64 identity_satp, hw_satp;
+ uintptr_t set_satp_mode_pmd;
+
+ set_satp_mode_pmd = ((unsigned long)set_satp_mode) & PMD_MASK;
+ create_pgd_mapping(early_pg_dir,
+ set_satp_mode_pmd, (uintptr_t)early_pud,
+ PGDIR_SIZE, PAGE_TABLE);
+ create_pud_mapping(early_pud,
+ set_satp_mode_pmd, (uintptr_t)early_pmd,
+ PUD_SIZE, PAGE_TABLE);
+ /* Handle the case where set_satp_mode straddles 2 PMDs */
+ create_pmd_mapping(early_pmd,
+ set_satp_mode_pmd, set_satp_mode_pmd,
+ PMD_SIZE, PAGE_KERNEL_EXEC);
+ create_pmd_mapping(early_pmd,
+ set_satp_mode_pmd + PMD_SIZE,
+ set_satp_mode_pmd + PMD_SIZE,
+ PMD_SIZE, PAGE_KERNEL_EXEC);
+
+ identity_satp = PFN_DOWN((uintptr_t)&early_pg_dir) | satp_mode;
+
+ local_flush_tlb_all();
+ csr_write(CSR_SATP, identity_satp);
+ hw_satp = csr_swap(CSR_SATP, 0ULL);
+ local_flush_tlb_all();
+
+ if (hw_satp != identity_satp)
+ disable_pgtable_l4();
+
+ memset(early_pg_dir, 0, PAGE_SIZE);
+ memset(early_pud, 0, PAGE_SIZE);
+ memset(early_pmd, 0, PAGE_SIZE);
+}
+#endif
+
/*
* setup_vm() is called from head.S with MMU-off.
*
@@ -563,10 +689,15 @@ static void __init create_fdt_early_page_table(pgd_t *pgdir, uintptr_t dtb_pa)
uintptr_t pa = dtb_pa & ~(PMD_SIZE - 1);
create_pgd_mapping(early_pg_dir, DTB_EARLY_BASE_VA,
- IS_ENABLED(CONFIG_64BIT) ? (uintptr_t)early_dtb_pmd : pa,
+ IS_ENABLED(CONFIG_64BIT) ? early_dtb_pgd_next : pa,
PGDIR_SIZE,
IS_ENABLED(CONFIG_64BIT) ? PAGE_TABLE : PAGE_KERNEL);
+ if (pgtable_l4_enabled) {
+ create_pud_mapping(early_dtb_pud, DTB_EARLY_BASE_VA,
+ (uintptr_t)early_dtb_pmd, PUD_SIZE, PAGE_TABLE);
+ }
+
if (IS_ENABLED(CONFIG_64BIT)) {
create_pmd_mapping(early_dtb_pmd, DTB_EARLY_BASE_VA,
pa, PMD_SIZE, PAGE_KERNEL);
@@ -588,11 +719,64 @@ static void __init create_fdt_early_page_table(pgd_t *pgdir, uintptr_t dtb_pa)
dtb_early_pa = dtb_pa;
}
+/*
+ * MMU is not enabled, the page tables are allocated directly using
+ * early_pmd/pud/p4d and the address returned is the physical one.
+ */
+void __init pt_ops_set_early(void)
+{
+ pt_ops.alloc_pte = alloc_pte_early;
+ pt_ops.get_pte_virt = get_pte_virt_early;
+#ifndef __PAGETABLE_PMD_FOLDED
+ pt_ops.alloc_pmd = alloc_pmd_early;
+ pt_ops.get_pmd_virt = get_pmd_virt_early;
+ pt_ops.alloc_pud = alloc_pud_early;
+ pt_ops.get_pud_virt = get_pud_virt_early;
+#endif
+}
+
+/*
+ * MMU is enabled but page table setup is not complete yet.
+ * fixmap page table alloc functions must be used as a means to temporarily
+ * map the allocated physical pages since the linear mapping does not exist yet.
+ *
+ * Note that this is called with MMU disabled, hence kernel_mapping_pa_to_va,
+ * but it will be used as described above.
+ */
+void __init pt_ops_set_fixmap(void)
+{
+ pt_ops.alloc_pte = kernel_mapping_pa_to_va((uintptr_t)alloc_pte_fixmap);
+ pt_ops.get_pte_virt = kernel_mapping_pa_to_va((uintptr_t)get_pte_virt_fixmap);
+#ifndef __PAGETABLE_PMD_FOLDED
+ pt_ops.alloc_pmd = kernel_mapping_pa_to_va((uintptr_t)alloc_pmd_fixmap);
+ pt_ops.get_pmd_virt = kernel_mapping_pa_to_va((uintptr_t)get_pmd_virt_fixmap);
+ pt_ops.alloc_pud = kernel_mapping_pa_to_va((uintptr_t)alloc_pud_fixmap);
+ pt_ops.get_pud_virt = kernel_mapping_pa_to_va((uintptr_t)get_pud_virt_fixmap);
+#endif
+}
+
+/*
+ * MMU is enabled and page table setup is complete, so from now, we can use
+ * generic page allocation functions to setup page table.
+ */
+void __init pt_ops_set_late(void)
+{
+ pt_ops.alloc_pte = alloc_pte_late;
+ pt_ops.get_pte_virt = get_pte_virt_late;
+#ifndef __PAGETABLE_PMD_FOLDED
+ pt_ops.alloc_pmd = alloc_pmd_late;
+ pt_ops.get_pmd_virt = get_pmd_virt_late;
+ pt_ops.alloc_pud = alloc_pud_late;
+ pt_ops.get_pud_virt = get_pud_virt_late;
+#endif
+}
+
asmlinkage void __init setup_vm(uintptr_t dtb_pa)
{
pmd_t __maybe_unused fix_bmap_spmd, fix_bmap_epmd;
kernel_map.virt_addr = KERNEL_LINK_ADDR;
+ kernel_map.page_offset = _AC(CONFIG_PAGE_OFFSET, UL);
#ifdef CONFIG_XIP_KERNEL
kernel_map.xiprom = (uintptr_t)CONFIG_XIP_PHYS_ADDR;
@@ -607,11 +791,24 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
kernel_map.phys_addr = (uintptr_t)(&_start);
kernel_map.size = (uintptr_t)(&_end) - kernel_map.phys_addr;
#endif
+
+#if defined(CONFIG_64BIT) && !defined(CONFIG_XIP_KERNEL)
+ set_satp_mode();
+#endif
+
kernel_map.va_pa_offset = PAGE_OFFSET - kernel_map.phys_addr;
kernel_map.va_kernel_pa_offset = kernel_map.virt_addr - kernel_map.phys_addr;
riscv_pfn_base = PFN_DOWN(kernel_map.phys_addr);
+ /*
+ * The default maximal physical memory size is KERN_VIRT_SIZE for 32-bit
+ * kernel, whereas for 64-bit kernel, the end of the virtual address
+ * space is occupied by the modules/BPF/kernel mappings which reduces
+ * the available size of the linear mapping.
+ */
+ memory_limit = KERN_VIRT_SIZE - (IS_ENABLED(CONFIG_64BIT) ? SZ_4G : 0);
+
/* Sanity check alignment and size */
BUG_ON((PAGE_OFFSET % PGDIR_SIZE) != 0);
BUG_ON((kernel_map.phys_addr % PMD_SIZE) != 0);
@@ -624,23 +821,25 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
BUG_ON((kernel_map.virt_addr + kernel_map.size) > ADDRESS_SPACE_END - SZ_4K);
#endif
- pt_ops.alloc_pte = alloc_pte_early;
- pt_ops.get_pte_virt = get_pte_virt_early;
-#ifndef __PAGETABLE_PMD_FOLDED
- pt_ops.alloc_pmd = alloc_pmd_early;
- pt_ops.get_pmd_virt = get_pmd_virt_early;
-#endif
+ pt_ops_set_early();
+
/* Setup early PGD for fixmap */
create_pgd_mapping(early_pg_dir, FIXADDR_START,
- (uintptr_t)fixmap_pgd_next, PGDIR_SIZE, PAGE_TABLE);
+ fixmap_pgd_next, PGDIR_SIZE, PAGE_TABLE);
#ifndef __PAGETABLE_PMD_FOLDED
- /* Setup fixmap PMD */
+ /* Setup fixmap PUD and PMD */
+ if (pgtable_l4_enabled)
+ create_pud_mapping(fixmap_pud, FIXADDR_START,
+ (uintptr_t)fixmap_pmd, PUD_SIZE, PAGE_TABLE);
create_pmd_mapping(fixmap_pmd, FIXADDR_START,
(uintptr_t)fixmap_pte, PMD_SIZE, PAGE_TABLE);
/* Setup trampoline PGD and PMD */
create_pgd_mapping(trampoline_pg_dir, kernel_map.virt_addr,
- (uintptr_t)trampoline_pmd, PGDIR_SIZE, PAGE_TABLE);
+ trampoline_pgd_next, PGDIR_SIZE, PAGE_TABLE);
+ if (pgtable_l4_enabled)
+ create_pud_mapping(trampoline_pud, kernel_map.virt_addr,
+ (uintptr_t)trampoline_pmd, PUD_SIZE, PAGE_TABLE);
#ifdef CONFIG_XIP_KERNEL
create_pmd_mapping(trampoline_pmd, kernel_map.virt_addr,
kernel_map.xiprom, PMD_SIZE, PAGE_KERNEL_EXEC);
@@ -668,7 +867,7 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
* Bootime fixmap only can handle PMD_SIZE mapping. Thus, boot-ioremap
* range can not span multiple pmds.
*/
- BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
+ BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
!= (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
#ifndef __PAGETABLE_PMD_FOLDED
@@ -693,6 +892,8 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
pr_warn("FIX_BTMAP_BEGIN: %d\n", FIX_BTMAP_BEGIN);
}
#endif
+
+ pt_ops_set_fixmap();
}
static void __init setup_vm_final(void)
@@ -701,16 +902,6 @@ static void __init setup_vm_final(void)
phys_addr_t pa, start, end;
u64 i;
- /**
- * MMU is enabled at this point. But page table setup is not complete yet.
- * fixmap page table alloc functions should be used at this point
- */
- pt_ops.alloc_pte = alloc_pte_fixmap;
- pt_ops.get_pte_virt = get_pte_virt_fixmap;
-#ifndef __PAGETABLE_PMD_FOLDED
- pt_ops.alloc_pmd = alloc_pmd_fixmap;
- pt_ops.get_pmd_virt = get_pmd_virt_fixmap;
-#endif
/* Setup swapper PGD for fixmap */
create_pgd_mapping(swapper_pg_dir, FIXADDR_START,
__pa_symbol(fixmap_pgd_next),
@@ -735,26 +926,24 @@ static void __init setup_vm_final(void)
}
}
-#ifdef CONFIG_64BIT
/* Map the kernel */
- create_kernel_page_table(swapper_pg_dir, false);
+ if (IS_ENABLED(CONFIG_64BIT))
+ create_kernel_page_table(swapper_pg_dir, false);
+
+#ifdef CONFIG_KASAN
+ kasan_swapper_init();
#endif
/* Clear fixmap PTE and PMD mappings */
clear_fixmap(FIX_PTE);
clear_fixmap(FIX_PMD);
+ clear_fixmap(FIX_PUD);
/* Move to swapper page table */
- csr_write(CSR_SATP, PFN_DOWN(__pa_symbol(swapper_pg_dir)) | SATP_MODE);
+ csr_write(CSR_SATP, PFN_DOWN(__pa_symbol(swapper_pg_dir)) | satp_mode);
local_flush_tlb_all();
- /* generic page allocation functions must be used to setup page table */
- pt_ops.alloc_pte = alloc_pte_late;
- pt_ops.get_pte_virt = get_pte_virt_late;
-#ifndef __PAGETABLE_PMD_FOLDED
- pt_ops.alloc_pmd = alloc_pmd_late;
- pt_ops.get_pmd_virt = get_pmd_virt_late;
-#endif
+ pt_ops_set_late();
}
#else
asmlinkage void __init setup_vm(uintptr_t dtb_pa)
@@ -790,12 +979,10 @@ static void __init reserve_crashkernel(void)
* since it doesn't make much sense and we have limited memory
* resources.
*/
-#ifdef CONFIG_CRASH_DUMP
if (is_kdump_kernel()) {
pr_info("crashkernel: ignoring reservation request\n");
return;
}
-#endif
ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
&crash_size, &crash_base);
@@ -812,13 +999,22 @@ static void __init reserve_crashkernel(void)
/*
* Current riscv boot protocol requires 2MB alignment for
* RV64 and 4MB alignment for RV32 (hugepage size)
+ *
+ * Try to alloc from 32bit addressible physical memory so that
+ * swiotlb can work on the crash kernel.
*/
crash_base = memblock_phys_alloc_range(crash_size, PMD_SIZE,
- search_start, search_end);
+ search_start,
+ min(search_end, (unsigned long) SZ_4G));
if (crash_base == 0) {
- pr_warn("crashkernel: couldn't allocate %lldKB\n",
- crash_size >> 10);
- return;
+ /* Try again without restricting region to 32bit addressible memory */
+ crash_base = memblock_phys_alloc_range(crash_size, PMD_SIZE,
+ search_start, search_end);
+ if (crash_base == 0) {
+ pr_warn("crashkernel: couldn't allocate %lldKB\n",
+ crash_size >> 10);
+ return;
+ }
}
pr_info("crashkernel: reserved 0x%016llx - 0x%016llx (%lld MB)\n",
diff --git a/arch/riscv/mm/kasan_init.c b/arch/riscv/mm/kasan_init.c
index 54294f83513d..f61f7ca6fe0f 100644
--- a/arch/riscv/mm/kasan_init.c
+++ b/arch/riscv/mm/kasan_init.c
@@ -11,45 +11,27 @@
#include <asm/fixmap.h>
#include <asm/pgalloc.h>
-extern pgd_t early_pg_dir[PTRS_PER_PGD];
-asmlinkage void __init kasan_early_init(void)
-{
- uintptr_t i;
- pgd_t *pgd = early_pg_dir + pgd_index(KASAN_SHADOW_START);
-
- BUILD_BUG_ON(KASAN_SHADOW_OFFSET !=
- KASAN_SHADOW_END - (1UL << (64 - KASAN_SHADOW_SCALE_SHIFT)));
-
- for (i = 0; i < PTRS_PER_PTE; ++i)
- set_pte(kasan_early_shadow_pte + i,
- mk_pte(virt_to_page(kasan_early_shadow_page),
- PAGE_KERNEL));
-
- for (i = 0; i < PTRS_PER_PMD; ++i)
- set_pmd(kasan_early_shadow_pmd + i,
- pfn_pmd(PFN_DOWN
- (__pa((uintptr_t) kasan_early_shadow_pte)),
- __pgprot(_PAGE_TABLE)));
-
- for (i = KASAN_SHADOW_START; i < KASAN_SHADOW_END;
- i += PGDIR_SIZE, ++pgd)
- set_pgd(pgd,
- pfn_pgd(PFN_DOWN
- (__pa(((uintptr_t) kasan_early_shadow_pmd))),
- __pgprot(_PAGE_TABLE)));
-
- /* init for swapper_pg_dir */
- pgd = pgd_offset_k(KASAN_SHADOW_START);
-
- for (i = KASAN_SHADOW_START; i < KASAN_SHADOW_END;
- i += PGDIR_SIZE, ++pgd)
- set_pgd(pgd,
- pfn_pgd(PFN_DOWN
- (__pa(((uintptr_t) kasan_early_shadow_pmd))),
- __pgprot(_PAGE_TABLE)));
+/*
+ * Kasan shadow region must lie at a fixed address across sv39, sv48 and sv57
+ * which is right before the kernel.
+ *
+ * For sv39, the region is aligned on PGDIR_SIZE so we only need to populate
+ * the page global directory with kasan_early_shadow_pmd.
+ *
+ * For sv48 and sv57, the region is not aligned on PGDIR_SIZE so the mapping
+ * must be divided as follows:
+ * - the first PGD entry, although incomplete, is populated with
+ * kasan_early_shadow_pud/p4d
+ * - the PGD entries in the middle are populated with kasan_early_shadow_pud/p4d
+ * - the last PGD entry is shared with the kernel mapping so populated at the
+ * lower levels pud/p4d
+ *
+ * In addition, when shallow populating a kasan region (for example vmalloc),
+ * this region may also not be aligned on PGDIR size, so we must go down to the
+ * pud level too.
+ */
- local_flush_tlb_all();
-}
+extern pgd_t early_pg_dir[PTRS_PER_PGD];
static void __init kasan_populate_pte(pmd_t *pmd, unsigned long vaddr, unsigned long end)
{
@@ -73,15 +55,19 @@ static void __init kasan_populate_pte(pmd_t *pmd, unsigned long vaddr, unsigned
set_pmd(pmd, pfn_pmd(PFN_DOWN(__pa(base_pte)), PAGE_TABLE));
}
-static void __init kasan_populate_pmd(pgd_t *pgd, unsigned long vaddr, unsigned long end)
+static void __init kasan_populate_pmd(pud_t *pud, unsigned long vaddr, unsigned long end)
{
phys_addr_t phys_addr;
pmd_t *pmdp, *base_pmd;
unsigned long next;
- base_pmd = (pmd_t *)pgd_page_vaddr(*pgd);
- if (base_pmd == lm_alias(kasan_early_shadow_pmd))
+ if (pud_none(*pud)) {
base_pmd = memblock_alloc(PTRS_PER_PMD * sizeof(pmd_t), PAGE_SIZE);
+ } else {
+ base_pmd = (pmd_t *)pud_pgtable(*pud);
+ if (base_pmd == lm_alias(kasan_early_shadow_pmd))
+ base_pmd = memblock_alloc(PTRS_PER_PMD * sizeof(pmd_t), PAGE_SIZE);
+ }
pmdp = base_pmd + pmd_index(vaddr);
@@ -105,59 +91,207 @@ static void __init kasan_populate_pmd(pgd_t *pgd, unsigned long vaddr, unsigned
* it entirely, memblock could allocate a page at a physical address
* where KASAN is not populated yet and then we'd get a page fault.
*/
- set_pgd(pgd, pfn_pgd(PFN_DOWN(__pa(base_pmd)), PAGE_TABLE));
+ set_pud(pud, pfn_pud(PFN_DOWN(__pa(base_pmd)), PAGE_TABLE));
+}
+
+static void __init kasan_populate_pud(pgd_t *pgd,
+ unsigned long vaddr, unsigned long end,
+ bool early)
+{
+ phys_addr_t phys_addr;
+ pud_t *pudp, *base_pud;
+ unsigned long next;
+
+ if (early) {
+ /*
+ * We can't use pgd_page_vaddr here as it would return a linear
+ * mapping address but it is not mapped yet, but when populating
+ * early_pg_dir, we need the physical address and when populating
+ * swapper_pg_dir, we need the kernel virtual address so use
+ * pt_ops facility.
+ */
+ base_pud = pt_ops.get_pud_virt(pfn_to_phys(_pgd_pfn(*pgd)));
+ } else {
+ base_pud = (pud_t *)pgd_page_vaddr(*pgd);
+ if (base_pud == lm_alias(kasan_early_shadow_pud))
+ base_pud = memblock_alloc(PTRS_PER_PUD * sizeof(pud_t), PAGE_SIZE);
+ }
+
+ pudp = base_pud + pud_index(vaddr);
+
+ do {
+ next = pud_addr_end(vaddr, end);
+
+ if (pud_none(*pudp) && IS_ALIGNED(vaddr, PUD_SIZE) && (next - vaddr) >= PUD_SIZE) {
+ if (early) {
+ phys_addr = __pa(((uintptr_t)kasan_early_shadow_pmd));
+ set_pud(pudp, pfn_pud(PFN_DOWN(phys_addr), PAGE_TABLE));
+ continue;
+ } else {
+ phys_addr = memblock_phys_alloc(PUD_SIZE, PUD_SIZE);
+ if (phys_addr) {
+ set_pud(pudp, pfn_pud(PFN_DOWN(phys_addr), PAGE_KERNEL));
+ continue;
+ }
+ }
+ }
+
+ kasan_populate_pmd(pudp, vaddr, next);
+ } while (pudp++, vaddr = next, vaddr != end);
+
+ /*
+ * Wait for the whole PGD to be populated before setting the PGD in
+ * the page table, otherwise, if we did set the PGD before populating
+ * it entirely, memblock could allocate a page at a physical address
+ * where KASAN is not populated yet and then we'd get a page fault.
+ */
+ if (!early)
+ set_pgd(pgd, pfn_pgd(PFN_DOWN(__pa(base_pud)), PAGE_TABLE));
}
-static void __init kasan_populate_pgd(unsigned long vaddr, unsigned long end)
+#define kasan_early_shadow_pgd_next (pgtable_l4_enabled ? \
+ (uintptr_t)kasan_early_shadow_pud : \
+ (uintptr_t)kasan_early_shadow_pmd)
+#define kasan_populate_pgd_next(pgdp, vaddr, next, early) \
+ (pgtable_l4_enabled ? \
+ kasan_populate_pud(pgdp, vaddr, next, early) : \
+ kasan_populate_pmd((pud_t *)pgdp, vaddr, next))
+
+static void __init kasan_populate_pgd(pgd_t *pgdp,
+ unsigned long vaddr, unsigned long end,
+ bool early)
{
phys_addr_t phys_addr;
- pgd_t *pgdp = pgd_offset_k(vaddr);
unsigned long next;
do {
next = pgd_addr_end(vaddr, end);
- /*
- * pgdp can't be none since kasan_early_init initialized all KASAN
- * shadow region with kasan_early_shadow_pmd: if this is stillthe case,
- * that means we can try to allocate a hugepage as a replacement.
- */
- if (pgd_page_vaddr(*pgdp) == (unsigned long)lm_alias(kasan_early_shadow_pmd) &&
- IS_ALIGNED(vaddr, PGDIR_SIZE) && (next - vaddr) >= PGDIR_SIZE) {
- phys_addr = memblock_phys_alloc(PGDIR_SIZE, PGDIR_SIZE);
- if (phys_addr) {
- set_pgd(pgdp, pfn_pgd(PFN_DOWN(phys_addr), PAGE_KERNEL));
+ if (IS_ALIGNED(vaddr, PGDIR_SIZE) && (next - vaddr) >= PGDIR_SIZE) {
+ if (early) {
+ phys_addr = __pa((uintptr_t)kasan_early_shadow_pgd_next);
+ set_pgd(pgdp, pfn_pgd(PFN_DOWN(phys_addr), PAGE_TABLE));
continue;
+ } else if (pgd_page_vaddr(*pgdp) ==
+ (unsigned long)lm_alias(kasan_early_shadow_pgd_next)) {
+ /*
+ * pgdp can't be none since kasan_early_init
+ * initialized all KASAN shadow region with
+ * kasan_early_shadow_pud: if this is still the
+ * case, that means we can try to allocate a
+ * hugepage as a replacement.
+ */
+ phys_addr = memblock_phys_alloc(PGDIR_SIZE, PGDIR_SIZE);
+ if (phys_addr) {
+ set_pgd(pgdp, pfn_pgd(PFN_DOWN(phys_addr), PAGE_KERNEL));
+ continue;
+ }
}
}
- kasan_populate_pmd(pgdp, vaddr, next);
+ kasan_populate_pgd_next(pgdp, vaddr, next, early);
} while (pgdp++, vaddr = next, vaddr != end);
}
+asmlinkage void __init kasan_early_init(void)
+{
+ uintptr_t i;
+
+ BUILD_BUG_ON(KASAN_SHADOW_OFFSET !=
+ KASAN_SHADOW_END - (1UL << (64 - KASAN_SHADOW_SCALE_SHIFT)));
+
+ for (i = 0; i < PTRS_PER_PTE; ++i)
+ set_pte(kasan_early_shadow_pte + i,
+ mk_pte(virt_to_page(kasan_early_shadow_page),
+ PAGE_KERNEL));
+
+ for (i = 0; i < PTRS_PER_PMD; ++i)
+ set_pmd(kasan_early_shadow_pmd + i,
+ pfn_pmd(PFN_DOWN
+ (__pa((uintptr_t)kasan_early_shadow_pte)),
+ PAGE_TABLE));
+
+ if (pgtable_l4_enabled) {
+ for (i = 0; i < PTRS_PER_PUD; ++i)
+ set_pud(kasan_early_shadow_pud + i,
+ pfn_pud(PFN_DOWN
+ (__pa(((uintptr_t)kasan_early_shadow_pmd))),
+ PAGE_TABLE));
+ }
+
+ kasan_populate_pgd(early_pg_dir + pgd_index(KASAN_SHADOW_START),
+ KASAN_SHADOW_START, KASAN_SHADOW_END, true);
+
+ local_flush_tlb_all();
+}
+
+void __init kasan_swapper_init(void)
+{
+ kasan_populate_pgd(pgd_offset_k(KASAN_SHADOW_START),
+ KASAN_SHADOW_START, KASAN_SHADOW_END, true);
+
+ local_flush_tlb_all();
+}
+
static void __init kasan_populate(void *start, void *end)
{
unsigned long vaddr = (unsigned long)start & PAGE_MASK;
unsigned long vend = PAGE_ALIGN((unsigned long)end);
- kasan_populate_pgd(vaddr, vend);
+ kasan_populate_pgd(pgd_offset_k(vaddr), vaddr, vend, false);
local_flush_tlb_all();
memset(start, KASAN_SHADOW_INIT, end - start);
}
+static void __init kasan_shallow_populate_pud(pgd_t *pgdp,
+ unsigned long vaddr, unsigned long end,
+ bool kasan_populate)
+{
+ unsigned long next;
+ pud_t *pudp, *base_pud;
+ pmd_t *base_pmd;
+ bool is_kasan_pmd;
+
+ base_pud = (pud_t *)pgd_page_vaddr(*pgdp);
+ pudp = base_pud + pud_index(vaddr);
+
+ if (kasan_populate)
+ memcpy(base_pud, (void *)kasan_early_shadow_pgd_next,
+ sizeof(pud_t) * PTRS_PER_PUD);
+
+ do {
+ next = pud_addr_end(vaddr, end);
+ is_kasan_pmd = (pud_pgtable(*pudp) == lm_alias(kasan_early_shadow_pmd));
+
+ if (is_kasan_pmd) {
+ base_pmd = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+ set_pud(pudp, pfn_pud(PFN_DOWN(__pa(base_pmd)), PAGE_TABLE));
+ }
+ } while (pudp++, vaddr = next, vaddr != end);
+}
+
static void __init kasan_shallow_populate_pgd(unsigned long vaddr, unsigned long end)
{
unsigned long next;
void *p;
pgd_t *pgd_k = pgd_offset_k(vaddr);
+ bool is_kasan_pgd_next;
do {
next = pgd_addr_end(vaddr, end);
- if (pgd_page_vaddr(*pgd_k) == (unsigned long)lm_alias(kasan_early_shadow_pmd)) {
+ is_kasan_pgd_next = (pgd_page_vaddr(*pgd_k) ==
+ (unsigned long)lm_alias(kasan_early_shadow_pgd_next));
+
+ if (is_kasan_pgd_next) {
p = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
set_pgd(pgd_k, pfn_pgd(PFN_DOWN(__pa(p)), PAGE_TABLE));
}
+
+ if (IS_ALIGNED(vaddr, PGDIR_SIZE) && (next - vaddr) >= PGDIR_SIZE)
+ continue;
+
+ kasan_shallow_populate_pud(pgd_k, vaddr, next, is_kasan_pgd_next);
} while (pgd_k++, vaddr = next, vaddr != end);
}
diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c
index 64f8201237c2..37ed760d007c 100644
--- a/arch/riscv/mm/tlbflush.c
+++ b/arch/riscv/mm/tlbflush.c
@@ -32,7 +32,6 @@ static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start,
unsigned long size, unsigned long stride)
{
struct cpumask *cmask = mm_cpumask(mm);
- struct cpumask hmask;
unsigned int cpuid;
bool broadcast;
@@ -46,9 +45,7 @@ static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start,
unsigned long asid = atomic_long_read(&mm->context.id);
if (broadcast) {
- riscv_cpuid_to_hartid_mask(cmask, &hmask);
- sbi_remote_sfence_vma_asid(cpumask_bits(&hmask),
- start, size, asid);
+ sbi_remote_sfence_vma_asid(cmask, start, size, asid);
} else if (size <= stride) {
local_flush_tlb_page_asid(start, asid);
} else {
@@ -56,9 +53,7 @@ static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start,
}
} else {
if (broadcast) {
- riscv_cpuid_to_hartid_mask(cmask, &hmask);
- sbi_remote_sfence_vma(cpumask_bits(&hmask),
- start, size);
+ sbi_remote_sfence_vma(cmask, start, size);
} else if (size <= stride) {
local_flush_tlb_page(start);
} else {
diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c
index 603630b6f3c5..0bcda99d1d68 100644
--- a/arch/riscv/net/bpf_jit_comp64.c
+++ b/arch/riscv/net/bpf_jit_comp64.c
@@ -458,10 +458,8 @@ static int emit_call(bool fixed, u64 addr, struct rv_jit_context *ctx)
#define BPF_FIXUP_OFFSET_MASK GENMASK(26, 0)
#define BPF_FIXUP_REG_MASK GENMASK(31, 27)
-int rv_bpf_fixup_exception(const struct exception_table_entry *ex,
- struct pt_regs *regs);
-int rv_bpf_fixup_exception(const struct exception_table_entry *ex,
- struct pt_regs *regs)
+bool ex_handler_bpf(const struct exception_table_entry *ex,
+ struct pt_regs *regs)
{
off_t offset = FIELD_GET(BPF_FIXUP_OFFSET_MASK, ex->fixup);
int regs_offset = FIELD_GET(BPF_FIXUP_REG_MASK, ex->fixup);
@@ -469,7 +467,7 @@ int rv_bpf_fixup_exception(const struct exception_table_entry *ex,
*(unsigned long *)((void *)regs + pt_regmap[regs_offset]) = 0;
regs->epc = (unsigned long)&ex->fixup - offset;
- return 1;
+ return true;
}
/* For accesses to BTF pointers, add an entry to the exception table */
@@ -499,7 +497,7 @@ static int add_exception_handler(const struct bpf_insn *insn,
offset = pc - (long)&ex->insn;
if (WARN_ON_ONCE(offset >= 0 || offset < INT_MIN))
return -ERANGE;
- ex->insn = pc;
+ ex->insn = offset;
/*
* Since the extable follows the program, the fixup offset is always
@@ -515,6 +513,7 @@ static int add_exception_handler(const struct bpf_insn *insn,
ex->fixup = FIELD_PREP(BPF_FIXUP_OFFSET_MASK, offset) |
FIELD_PREP(BPF_FIXUP_REG_MASK, dst_reg);
+ ex->type = EX_TYPE_BPF;
ctx->nexentries++;
return 0;
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index f6a9475cbc8c..be9f39fd06df 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -127,7 +127,6 @@ config S390
select GENERIC_CPU_AUTOPROBE
select GENERIC_CPU_VULNERABILITIES
select GENERIC_ENTRY
- select GENERIC_FIND_FIRST_BIT
select GENERIC_GETTIMEOFDAY
select GENERIC_PTDUMP
select GENERIC_SMP_IDLE_THREAD
@@ -946,6 +945,9 @@ config S390_GUEST
endmenu
+config S390_MODULES_SANITY_TEST_HELPERS
+ def_bool n
+
menu "Selftests"
config S390_UNWIND_SELFTEST
@@ -972,4 +974,16 @@ config S390_KPROBES_SANITY_TEST
Say N if you are unsure.
+config S390_MODULES_SANITY_TEST
+ def_tristate n
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ prompt "Enable s390 specific modules tests"
+ select S390_MODULES_SANITY_TEST_HELPERS
+ help
+ This option enables an s390 specific modules test. This option is
+ not useful for distributions or general kernels, but only for
+ kernel developers working on architecture code.
+
+ Say N if you are unsure.
endmenu
diff --git a/arch/s390/boot/compressed/Makefile b/arch/s390/boot/compressed/Makefile
index 3b860061e84d..d04e0e7de0b3 100644
--- a/arch/s390/boot/compressed/Makefile
+++ b/arch/s390/boot/compressed/Makefile
@@ -58,8 +58,6 @@ OBJCOPYFLAGS_vmlinux.bin := -O binary --remove-section=.comment --remove-section
$(obj)/vmlinux.bin: vmlinux FORCE
$(call if_changed,objcopy)
-vmlinux.bin.all-y := $(obj)/vmlinux.bin
-
suffix-$(CONFIG_KERNEL_GZIP) := .gz
suffix-$(CONFIG_KERNEL_BZIP2) := .bz2
suffix-$(CONFIG_KERNEL_LZ4) := .lz4
@@ -68,20 +66,20 @@ suffix-$(CONFIG_KERNEL_LZO) := .lzo
suffix-$(CONFIG_KERNEL_XZ) := .xz
suffix-$(CONFIG_KERNEL_ZSTD) := .zst
-$(obj)/vmlinux.bin.gz: $(vmlinux.bin.all-y) FORCE
+$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE
$(call if_changed,gzip)
-$(obj)/vmlinux.bin.bz2: $(vmlinux.bin.all-y) FORCE
- $(call if_changed,bzip2)
-$(obj)/vmlinux.bin.lz4: $(vmlinux.bin.all-y) FORCE
- $(call if_changed,lz4)
-$(obj)/vmlinux.bin.lzma: $(vmlinux.bin.all-y) FORCE
- $(call if_changed,lzma)
-$(obj)/vmlinux.bin.lzo: $(vmlinux.bin.all-y) FORCE
- $(call if_changed,lzo)
-$(obj)/vmlinux.bin.xz: $(vmlinux.bin.all-y) FORCE
- $(call if_changed,xzkern)
-$(obj)/vmlinux.bin.zst: $(vmlinux.bin.all-y) FORCE
- $(call if_changed,zstd22)
+$(obj)/vmlinux.bin.bz2: $(obj)/vmlinux.bin FORCE
+ $(call if_changed,bzip2_with_size)
+$(obj)/vmlinux.bin.lz4: $(obj)/vmlinux.bin FORCE
+ $(call if_changed,lz4_with_size)
+$(obj)/vmlinux.bin.lzma: $(obj)/vmlinux.bin FORCE
+ $(call if_changed,lzma_with_size)
+$(obj)/vmlinux.bin.lzo: $(obj)/vmlinux.bin FORCE
+ $(call if_changed,lzo_with_size)
+$(obj)/vmlinux.bin.xz: $(obj)/vmlinux.bin FORCE
+ $(call if_changed,xzkern_with_size)
+$(obj)/vmlinux.bin.zst: $(obj)/vmlinux.bin FORCE
+ $(call if_changed,zstd22_with_size)
OBJCOPYFLAGS_piggy.o := -I binary -O elf64-s390 -B s390:64-bit --rename-section .data=.vmlinux.bin.compressed
$(obj)/piggy.o: $(obj)/vmlinux.bin$(suffix-y) FORCE
diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig
index 354e51dcb3e2..498bed9b261b 100644
--- a/arch/s390/configs/debug_defconfig
+++ b/arch/s390/configs/debug_defconfig
@@ -63,6 +63,7 @@ CONFIG_APPLDATA_BASE=y
CONFIG_KVM=m
CONFIG_S390_UNWIND_SELFTEST=m
CONFIG_S390_KPROBES_SANITY_TEST=m
+CONFIG_S390_MODULES_SANITY_TEST=m
CONFIG_KPROBES=y
CONFIG_JUMP_LABEL=y
CONFIG_STATIC_KEYS_SELFTEST=y
@@ -96,8 +97,6 @@ CONFIG_MEMORY_HOTPLUG=y
CONFIG_MEMORY_HOTREMOVE=y
CONFIG_KSM=y
CONFIG_TRANSPARENT_HUGEPAGE=y
-CONFIG_CLEANCACHE=y
-CONFIG_FRONTSWAP=y
CONFIG_CMA_DEBUG=y
CONFIG_CMA_DEBUGFS=y
CONFIG_CMA_SYSFS=y
@@ -110,6 +109,7 @@ CONFIG_DEFERRED_STRUCT_PAGE_INIT=y
CONFIG_IDLE_PAGE_TRACKING=y
CONFIG_PERCPU_STATS=y
CONFIG_GUP_TEST=y
+CONFIG_ANON_VMA_NAME=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_PACKET_DIAG=m
@@ -117,7 +117,6 @@ CONFIG_UNIX=y
CONFIG_UNIX_DIAG=m
CONFIG_XFRM_USER=m
CONFIG_NET_KEY=m
-CONFIG_NET_SWITCHDEV=y
CONFIG_SMC=m
CONFIG_SMC_DIAG=m
CONFIG_INET=y
@@ -186,7 +185,6 @@ CONFIG_NF_CT_NETLINK_TIMEOUT=m
CONFIG_NF_TABLES=m
CONFIG_NF_TABLES_INET=y
CONFIG_NFT_CT=m
-CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
CONFIG_NFT_NAT=m
@@ -392,6 +390,7 @@ CONFIG_OPENVSWITCH=m
CONFIG_VSOCKETS=m
CONFIG_VIRTIO_VSOCKETS=m
CONFIG_NETLINK_DIAG=m
+CONFIG_NET_SWITCHDEV=y
CONFIG_CGROUP_NET_PRIO=y
CONFIG_NET_PKTGEN=m
CONFIG_PCI=y
@@ -401,6 +400,7 @@ CONFIG_PCI_IOV=y
CONFIG_HOTPLUG_PCI=y
CONFIG_HOTPLUG_PCI_S390=y
CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_SAFE=y
CONFIG_CONNECTOR=y
CONFIG_ZRAM=y
CONFIG_BLK_DEV_LOOP=m
@@ -502,6 +502,7 @@ CONFIG_NLMON=m
# CONFIG_NET_VENDOR_DEC is not set
# CONFIG_NET_VENDOR_DLINK is not set
# CONFIG_NET_VENDOR_EMULEX is not set
+# CONFIG_NET_VENDOR_ENGLEDER is not set
# CONFIG_NET_VENDOR_EZCHIP is not set
# CONFIG_NET_VENDOR_GOOGLE is not set
# CONFIG_NET_VENDOR_HUAWEI is not set
@@ -512,7 +513,6 @@ CONFIG_NLMON=m
CONFIG_MLX4_EN=m
CONFIG_MLX5_CORE=m
CONFIG_MLX5_CORE_EN=y
-CONFIG_MLX5_ESWITCH=y
# CONFIG_NET_VENDOR_MICREL is not set
# CONFIG_NET_VENDOR_MICROCHIP is not set
# CONFIG_NET_VENDOR_MICROSEMI is not set
@@ -543,6 +543,7 @@ CONFIG_MLX5_ESWITCH=y
# CONFIG_NET_VENDOR_SYNOPSYS is not set
# CONFIG_NET_VENDOR_TEHUTI is not set
# CONFIG_NET_VENDOR_TI is not set
+# CONFIG_NET_VENDOR_VERTEXCOM is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
# CONFIG_NET_VENDOR_XILINX is not set
@@ -593,6 +594,7 @@ CONFIG_VIRTIO_BALLOON=m
CONFIG_VIRTIO_INPUT=y
CONFIG_VHOST_NET=m
CONFIG_VHOST_VSOCK=m
+# CONFIG_SURFACE_PLATFORMS is not set
CONFIG_S390_CCW_IOMMU=y
CONFIG_S390_AP_IOMMU=y
CONFIG_EXT4_FS=y
@@ -757,9 +759,6 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
CONFIG_CRYPTO_STATS=y
-CONFIG_CRYPTO_LIB_BLAKE2S=m
-CONFIG_CRYPTO_LIB_CURVE25519=m
-CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m
CONFIG_ZCRYPT=m
CONFIG_PKEY=m
CONFIG_CRYPTO_PAES_S390=m
@@ -775,6 +774,8 @@ CONFIG_CRYPTO_GHASH_S390=m
CONFIG_CRYPTO_CRC32_S390=y
CONFIG_CRYPTO_DEV_VIRTIO=m
CONFIG_CORDIC=m
+CONFIG_CRYPTO_LIB_CURVE25519=m
+CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m
CONFIG_CRC32_SELFTEST=y
CONFIG_CRC4=m
CONFIG_CRC7=m
@@ -808,7 +809,6 @@ CONFIG_SLUB_DEBUG_ON=y
CONFIG_SLUB_STATS=y
CONFIG_DEBUG_STACK_USAGE=y
CONFIG_DEBUG_VM=y
-CONFIG_DEBUG_VM_VMACACHE=y
CONFIG_DEBUG_VM_PGFLAGS=y
CONFIG_DEBUG_MEMORY_INIT=y
CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m
@@ -820,12 +820,11 @@ CONFIG_PANIC_ON_OOPS=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_WQ_WATCHDOG=y
CONFIG_TEST_LOCKUP=m
-CONFIG_DEBUG_TIMEKEEPING=y
CONFIG_PROVE_LOCKING=y
CONFIG_LOCK_STAT=y
-CONFIG_DEBUG_LOCKDEP=y
CONFIG_DEBUG_ATOMIC_SLEEP=y
CONFIG_DEBUG_LOCKING_API_SELFTESTS=y
+CONFIG_DEBUG_IRQFLAGS=y
CONFIG_DEBUG_SG=y
CONFIG_DEBUG_NOTIFIERS=y
CONFIG_BUG_ON_DATA_CORRUPTION=y
diff --git a/arch/s390/configs/defconfig b/arch/s390/configs/defconfig
index 8dee6c3782f3..61e36b999f67 100644
--- a/arch/s390/configs/defconfig
+++ b/arch/s390/configs/defconfig
@@ -61,6 +61,7 @@ CONFIG_APPLDATA_BASE=y
CONFIG_KVM=m
CONFIG_S390_UNWIND_SELFTEST=m
CONFIG_S390_KPROBES_SANITY_TEST=m
+CONFIG_S390_MODULES_SANITY_TEST=m
CONFIG_KPROBES=y
CONFIG_JUMP_LABEL=y
# CONFIG_GCC_PLUGINS is not set
@@ -91,8 +92,6 @@ CONFIG_MEMORY_HOTPLUG=y
CONFIG_MEMORY_HOTREMOVE=y
CONFIG_KSM=y
CONFIG_TRANSPARENT_HUGEPAGE=y
-CONFIG_CLEANCACHE=y
-CONFIG_FRONTSWAP=y
CONFIG_CMA_SYSFS=y
CONFIG_CMA_AREAS=7
CONFIG_MEM_SOFT_DIRTY=y
@@ -102,6 +101,7 @@ CONFIG_ZSMALLOC_STAT=y
CONFIG_DEFERRED_STRUCT_PAGE_INIT=y
CONFIG_IDLE_PAGE_TRACKING=y
CONFIG_PERCPU_STATS=y
+CONFIG_ANON_VMA_NAME=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_PACKET_DIAG=m
@@ -109,7 +109,6 @@ CONFIG_UNIX=y
CONFIG_UNIX_DIAG=m
CONFIG_XFRM_USER=m
CONFIG_NET_KEY=m
-CONFIG_NET_SWITCHDEV=y
CONFIG_SMC=m
CONFIG_SMC_DIAG=m
CONFIG_INET=y
@@ -178,7 +177,6 @@ CONFIG_NF_CT_NETLINK_TIMEOUT=m
CONFIG_NF_TABLES=m
CONFIG_NF_TABLES_INET=y
CONFIG_NFT_CT=m
-CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
CONFIG_NFT_NAT=m
@@ -383,6 +381,7 @@ CONFIG_OPENVSWITCH=m
CONFIG_VSOCKETS=m
CONFIG_VIRTIO_VSOCKETS=m
CONFIG_NETLINK_DIAG=m
+CONFIG_NET_SWITCHDEV=y
CONFIG_CGROUP_NET_PRIO=y
CONFIG_NET_PKTGEN=m
CONFIG_PCI=y
@@ -392,6 +391,7 @@ CONFIG_HOTPLUG_PCI=y
CONFIG_HOTPLUG_PCI_S390=y
CONFIG_UEVENT_HELPER=y
CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_SAFE=y
CONFIG_CONNECTOR=y
CONFIG_ZRAM=y
CONFIG_BLK_DEV_LOOP=m
@@ -493,6 +493,7 @@ CONFIG_NLMON=m
# CONFIG_NET_VENDOR_DEC is not set
# CONFIG_NET_VENDOR_DLINK is not set
# CONFIG_NET_VENDOR_EMULEX is not set
+# CONFIG_NET_VENDOR_ENGLEDER is not set
# CONFIG_NET_VENDOR_EZCHIP is not set
# CONFIG_NET_VENDOR_GOOGLE is not set
# CONFIG_NET_VENDOR_HUAWEI is not set
@@ -503,7 +504,6 @@ CONFIG_NLMON=m
CONFIG_MLX4_EN=m
CONFIG_MLX5_CORE=m
CONFIG_MLX5_CORE_EN=y
-CONFIG_MLX5_ESWITCH=y
# CONFIG_NET_VENDOR_MICREL is not set
# CONFIG_NET_VENDOR_MICROCHIP is not set
# CONFIG_NET_VENDOR_MICROSEMI is not set
@@ -534,6 +534,7 @@ CONFIG_MLX5_ESWITCH=y
# CONFIG_NET_VENDOR_SYNOPSYS is not set
# CONFIG_NET_VENDOR_TEHUTI is not set
# CONFIG_NET_VENDOR_TI is not set
+# CONFIG_NET_VENDOR_VERTEXCOM is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
# CONFIG_NET_VENDOR_XILINX is not set
@@ -583,6 +584,7 @@ CONFIG_VIRTIO_BALLOON=m
CONFIG_VIRTIO_INPUT=y
CONFIG_VHOST_NET=m
CONFIG_VHOST_VSOCK=m
+# CONFIG_SURFACE_PLATFORMS is not set
CONFIG_S390_CCW_IOMMU=y
CONFIG_S390_AP_IOMMU=y
CONFIG_EXT4_FS=y
@@ -744,9 +746,6 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m
CONFIG_CRYPTO_STATS=y
-CONFIG_CRYPTO_LIB_BLAKE2S=m
-CONFIG_CRYPTO_LIB_CURVE25519=m
-CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m
CONFIG_ZCRYPT=m
CONFIG_PKEY=m
CONFIG_CRYPTO_PAES_S390=m
@@ -763,6 +762,8 @@ CONFIG_CRYPTO_CRC32_S390=y
CONFIG_CRYPTO_DEV_VIRTIO=m
CONFIG_CORDIC=m
CONFIG_PRIME_NUMBERS=m
+CONFIG_CRYPTO_LIB_CURVE25519=m
+CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m
CONFIG_CRC4=m
CONFIG_CRC7=m
CONFIG_CRC8=m
diff --git a/arch/s390/configs/zfcpdump_defconfig b/arch/s390/configs/zfcpdump_defconfig
index eed3b9acfa71..c55c668dc3c7 100644
--- a/arch/s390/configs/zfcpdump_defconfig
+++ b/arch/s390/configs/zfcpdump_defconfig
@@ -1,6 +1,7 @@
# CONFIG_SWAP is not set
CONFIG_NO_HZ_IDLE=y
CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BPF_SYSCALL=y
# CONFIG_CPU_ISOLATION is not set
# CONFIG_UTS_NS is not set
# CONFIG_TIME_NS is not set
@@ -34,6 +35,7 @@ CONFIG_NET=y
# CONFIG_PCPU_DEV_REFCNT is not set
# CONFIG_ETHTOOL_NETLINK is not set
CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_SAFE=y
CONFIG_BLK_DEV_RAM=y
# CONFIG_DCSSBLK is not set
# CONFIG_DASD is not set
@@ -58,6 +60,7 @@ CONFIG_ZFCP=y
# CONFIG_HID is not set
# CONFIG_VIRTIO_MENU is not set
# CONFIG_VHOST_MENU is not set
+# CONFIG_SURFACE_PLATFORMS is not set
# CONFIG_IOMMU_SUPPORT is not set
# CONFIG_DNOTIFY is not set
# CONFIG_INOTIFY_USER is not set
diff --git a/arch/s390/hypfs/hypfs_vm.c b/arch/s390/hypfs/hypfs_vm.c
index 33f973ff9744..e8f15dbb89d0 100644
--- a/arch/s390/hypfs/hypfs_vm.c
+++ b/arch/s390/hypfs/hypfs_vm.c
@@ -20,6 +20,7 @@
static char local_guest[] = " ";
static char all_guests[] = "* ";
+static char *all_groups = all_guests;
static char *guest_query;
struct diag2fc_data {
@@ -62,10 +63,11 @@ static int diag2fc(int size, char* query, void *addr)
memcpy(parm_list.userid, query, NAME_LEN);
ASCEBC(parm_list.userid, NAME_LEN);
- parm_list.addr = (unsigned long) addr ;
+ memcpy(parm_list.aci_grp, all_groups, NAME_LEN);
+ ASCEBC(parm_list.aci_grp, NAME_LEN);
+ parm_list.addr = (unsigned long)addr;
parm_list.size = size;
parm_list.fmt = 0x02;
- memset(parm_list.aci_grp, 0x40, NAME_LEN);
rc = -1;
diag_stat_inc(DIAG_STAT_X2FC);
diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h
index 5a530c552c23..1d40630128a5 100644
--- a/arch/s390/include/asm/bitops.h
+++ b/arch/s390/include/asm/bitops.h
@@ -387,7 +387,6 @@ static inline int fls(unsigned int word)
#endif /* CONFIG_HAVE_MARCH_Z9_109_FEATURES */
#include <asm-generic/bitops/ffz.h>
-#include <asm-generic/bitops/find.h>
#include <asm-generic/bitops/hweight.h>
#include <asm-generic/bitops/sched.h>
#include <asm-generic/bitops/le.h>
diff --git a/arch/s390/include/asm/cpu_mf.h b/arch/s390/include/asm/cpu_mf.h
index 0d90cbeb89b4..e3f12db46cfc 100644
--- a/arch/s390/include/asm/cpu_mf.h
+++ b/arch/s390/include/asm/cpu_mf.h
@@ -109,7 +109,9 @@ struct hws_basic_entry {
unsigned int AS:2; /* 29-30 PSW address-space control */
unsigned int I:1; /* 31 entry valid or invalid */
unsigned int CL:2; /* 32-33 Configuration Level */
- unsigned int:14;
+ unsigned int H:1; /* 34 Host Indicator */
+ unsigned int LS:1; /* 35 Limited Sampling */
+ unsigned int:12;
unsigned int prim_asn:16; /* primary ASN */
unsigned long long ia; /* Instruction Address */
unsigned long long gpp; /* Guest Program Parameter */
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index a604d51acfc8..a22c9266ea05 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -1010,6 +1010,4 @@ static inline void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
-void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu);
-
#endif
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
index ce550d06abc3..d74e26b48604 100644
--- a/arch/s390/include/asm/uaccess.h
+++ b/arch/s390/include/asm/uaccess.h
@@ -47,53 +47,87 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n);
int __put_user_bad(void) __attribute__((noreturn));
int __get_user_bad(void) __attribute__((noreturn));
+union oac {
+ unsigned int val;
+ struct {
+ struct {
+ unsigned short key : 4;
+ unsigned short : 4;
+ unsigned short as : 2;
+ unsigned short : 4;
+ unsigned short k : 1;
+ unsigned short a : 1;
+ } oac1;
+ struct {
+ unsigned short key : 4;
+ unsigned short : 4;
+ unsigned short as : 2;
+ unsigned short : 4;
+ unsigned short k : 1;
+ unsigned short a : 1;
+ } oac2;
+ };
+};
+
#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
-#define __put_get_user_asm(to, from, size, insn) \
-({ \
- int __rc; \
- \
- asm volatile( \
- insn " 0,%[spec]\n" \
- "0: mvcos %[_to],%[_from],%[_size]\n" \
- "1: xr %[rc],%[rc]\n" \
- "2:\n" \
- ".pushsection .fixup, \"ax\"\n" \
- "3: lhi %[rc],%[retval]\n" \
- " jg 2b\n" \
- ".popsection\n" \
- EX_TABLE(0b,3b) EX_TABLE(1b,3b) \
- : [rc] "=&d" (__rc), [_to] "+Q" (*(to)) \
- : [_size] "d" (size), [_from] "Q" (*(from)), \
- [retval] "K" (-EFAULT), [spec] "K" (0x81UL) \
- : "cc", "0"); \
- __rc; \
+#define __put_get_user_asm(to, from, size, oac_spec) \
+({ \
+ int __rc; \
+ \
+ asm volatile( \
+ " lr 0,%[spec]\n" \
+ "0: mvcos %[_to],%[_from],%[_size]\n" \
+ "1: xr %[rc],%[rc]\n" \
+ "2:\n" \
+ ".pushsection .fixup, \"ax\"\n" \
+ "3: lhi %[rc],%[retval]\n" \
+ " jg 2b\n" \
+ ".popsection\n" \
+ EX_TABLE(0b,3b) EX_TABLE(1b,3b) \
+ : [rc] "=&d" (__rc), [_to] "+Q" (*(to)) \
+ : [_size] "d" (size), [_from] "Q" (*(from)), \
+ [retval] "K" (-EFAULT), [spec] "d" (oac_spec.val) \
+ : "cc", "0"); \
+ __rc; \
})
+#define __put_user_asm(to, from, size) \
+ __put_get_user_asm(to, from, size, ((union oac) { \
+ .oac1.as = PSW_BITS_AS_SECONDARY, \
+ .oac1.a = 1 \
+ }))
+
+#define __get_user_asm(to, from, size) \
+ __put_get_user_asm(to, from, size, ((union oac) { \
+ .oac2.as = PSW_BITS_AS_SECONDARY, \
+ .oac2.a = 1 \
+ })) \
+
static __always_inline int __put_user_fn(void *x, void __user *ptr, unsigned long size)
{
int rc;
switch (size) {
case 1:
- rc = __put_get_user_asm((unsigned char __user *)ptr,
- (unsigned char *)x,
- size, "llilh");
+ rc = __put_user_asm((unsigned char __user *)ptr,
+ (unsigned char *)x,
+ size);
break;
case 2:
- rc = __put_get_user_asm((unsigned short __user *)ptr,
- (unsigned short *)x,
- size, "llilh");
+ rc = __put_user_asm((unsigned short __user *)ptr,
+ (unsigned short *)x,
+ size);
break;
case 4:
- rc = __put_get_user_asm((unsigned int __user *)ptr,
- (unsigned int *)x,
- size, "llilh");
+ rc = __put_user_asm((unsigned int __user *)ptr,
+ (unsigned int *)x,
+ size);
break;
case 8:
- rc = __put_get_user_asm((unsigned long __user *)ptr,
- (unsigned long *)x,
- size, "llilh");
+ rc = __put_user_asm((unsigned long __user *)ptr,
+ (unsigned long *)x,
+ size);
break;
default:
__put_user_bad();
@@ -108,24 +142,24 @@ static __always_inline int __get_user_fn(void *x, const void __user *ptr, unsign
switch (size) {
case 1:
- rc = __put_get_user_asm((unsigned char *)x,
- (unsigned char __user *)ptr,
- size, "lghi");
+ rc = __get_user_asm((unsigned char *)x,
+ (unsigned char __user *)ptr,
+ size);
break;
case 2:
- rc = __put_get_user_asm((unsigned short *)x,
- (unsigned short __user *)ptr,
- size, "lghi");
+ rc = __get_user_asm((unsigned short *)x,
+ (unsigned short __user *)ptr,
+ size);
break;
case 4:
- rc = __put_get_user_asm((unsigned int *)x,
- (unsigned int __user *)ptr,
- size, "lghi");
+ rc = __get_user_asm((unsigned int *)x,
+ (unsigned int __user *)ptr,
+ size);
break;
case 8:
- rc = __put_get_user_asm((unsigned long *)x,
- (unsigned long __user *)ptr,
- size, "lghi");
+ rc = __get_user_asm((unsigned long *)x,
+ (unsigned long __user *)ptr,
+ size);
break;
default:
__get_user_bad();
diff --git a/arch/s390/include/asm/uv.h b/arch/s390/include/asm/uv.h
index 72d3e49c2860..86218382d29c 100644
--- a/arch/s390/include/asm/uv.h
+++ b/arch/s390/include/asm/uv.h
@@ -91,23 +91,23 @@ struct uv_cb_header {
/* Query Ultravisor Information */
struct uv_cb_qui {
- struct uv_cb_header header;
- u64 reserved08;
- u64 inst_calls_list[4];
- u64 reserved30[2];
- u64 uv_base_stor_len;
- u64 reserved48;
- u64 conf_base_phys_stor_len;
- u64 conf_base_virt_stor_len;
- u64 conf_virt_var_stor_len;
- u64 cpu_stor_len;
- u32 reserved70[3];
- u32 max_num_sec_conf;
- u64 max_guest_stor_addr;
- u8 reserved88[158 - 136];
- u16 max_guest_cpu_id;
- u64 uv_feature_indications;
- u8 reserveda0[200 - 168];
+ struct uv_cb_header header; /* 0x0000 */
+ u64 reserved08; /* 0x0008 */
+ u64 inst_calls_list[4]; /* 0x0010 */
+ u64 reserved30[2]; /* 0x0030 */
+ u64 uv_base_stor_len; /* 0x0040 */
+ u64 reserved48; /* 0x0048 */
+ u64 conf_base_phys_stor_len; /* 0x0050 */
+ u64 conf_base_virt_stor_len; /* 0x0058 */
+ u64 conf_virt_var_stor_len; /* 0x0060 */
+ u64 cpu_stor_len; /* 0x0068 */
+ u32 reserved70[3]; /* 0x0070 */
+ u32 max_num_sec_conf; /* 0x007c */
+ u64 max_guest_stor_addr; /* 0x0080 */
+ u8 reserved88[158 - 136]; /* 0x0088 */
+ u16 max_guest_cpu_id; /* 0x009e */
+ u64 uv_feature_indications; /* 0x00a0 */
+ u8 reserveda8[200 - 168]; /* 0x00a8 */
} __packed __aligned(8);
/* Initialize Ultravisor */
diff --git a/arch/s390/kernel/dumpstack.c b/arch/s390/kernel/dumpstack.c
index 0681c55e831d..1e3233eb510a 100644
--- a/arch/s390/kernel/dumpstack.c
+++ b/arch/s390/kernel/dumpstack.c
@@ -224,5 +224,5 @@ void __noreturn die(struct pt_regs *regs, const char *str)
if (panic_on_oops)
panic("Fatal exception: panic_on_oops");
oops_exit();
- do_exit(SIGSEGV);
+ make_task_dead(SIGSEGV);
}
diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
index b01ba460b7ca..b032e556eeb7 100644
--- a/arch/s390/kernel/module.c
+++ b/arch/s390/kernel/module.c
@@ -33,18 +33,19 @@
#define DEBUGP(fmt , ...)
#endif
-#define PLT_ENTRY_SIZE 20
+#define PLT_ENTRY_SIZE 22
void *module_alloc(unsigned long size)
{
+ gfp_t gfp_mask = GFP_KERNEL;
void *p;
if (PAGE_ALIGN(size) > MODULES_LEN)
return NULL;
p = __vmalloc_node_range(size, MODULE_ALIGN, MODULES_VADDR, MODULES_END,
- GFP_KERNEL, PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
+ gfp_mask, PAGE_KERNEL_EXEC, VM_DEFER_KMEMLEAK, NUMA_NO_NODE,
__builtin_return_address(0));
- if (p && (kasan_module_alloc(p, size) < 0)) {
+ if (p && (kasan_module_alloc(p, size, gfp_mask) < 0)) {
vfree(p);
return NULL;
}
@@ -340,27 +341,26 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
case R_390_PLTOFF32: /* 32 bit offset from GOT to PLT. */
case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
if (info->plt_initialized == 0) {
- unsigned int insn[5];
- unsigned int *ip = me->core_layout.base +
- me->arch.plt_offset +
- info->plt_offset;
-
- insn[0] = 0x0d10e310; /* basr 1,0 */
- insn[1] = 0x100a0004; /* lg 1,10(1) */
+ unsigned char insn[PLT_ENTRY_SIZE];
+ char *plt_base;
+ char *ip;
+
+ plt_base = me->core_layout.base + me->arch.plt_offset;
+ ip = plt_base + info->plt_offset;
+ *(int *)insn = 0x0d10e310; /* basr 1,0 */
+ *(int *)&insn[4] = 0x100c0004; /* lg 1,12(1) */
if (IS_ENABLED(CONFIG_EXPOLINE) && !nospec_disable) {
- unsigned int *ij;
- ij = me->core_layout.base +
- me->arch.plt_offset +
- me->arch.plt_size - PLT_ENTRY_SIZE;
- insn[2] = 0xa7f40000 + /* j __jump_r1 */
- (unsigned int)(u16)
- (((unsigned long) ij - 8 -
- (unsigned long) ip) / 2);
+ char *jump_r1;
+
+ jump_r1 = plt_base + me->arch.plt_size -
+ PLT_ENTRY_SIZE;
+ /* brcl 0xf,__jump_r1 */
+ *(short *)&insn[8] = 0xc0f4;
+ *(int *)&insn[10] = (jump_r1 - (ip + 8)) / 2;
} else {
- insn[2] = 0x07f10000; /* br %r1 */
+ *(int *)&insn[8] = 0x07f10000; /* br %r1 */
}
- insn[3] = (unsigned int) (val >> 32);
- insn[4] = (unsigned int) val;
+ *(long *)&insn[14] = val;
write(ip, insn, sizeof(insn));
info->plt_initialized = 1;
diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c
index 1cf1e37553e8..651a51914e34 100644
--- a/arch/s390/kernel/nmi.c
+++ b/arch/s390/kernel/nmi.c
@@ -166,7 +166,7 @@ void __s390_handle_mcck(void)
"malfunction (code 0x%016lx).\n", mcck.mcck_code);
printk(KERN_EMERG "mcck: task: %s, pid: %d.\n",
current->comm, current->pid);
- do_exit(SIGSEGV);
+ make_task_dead(SIGSEGV);
}
}
@@ -264,7 +264,14 @@ static int notrace s390_validate_registers(union mci mci, int umode)
/* Validate vector registers */
union ctlreg0 cr0;
- if (!mci.vr) {
+ /*
+ * The vector validity must only be checked if not running a
+ * KVM guest. For KVM guests the machine check is forwarded by
+ * KVM and it is the responsibility of the guest to take
+ * appropriate actions. The host vector or FPU values have been
+ * saved by KVM and will be restored by KVM.
+ */
+ if (!mci.vr && !test_cpu_flag(CIF_MCCK_GUEST)) {
/*
* Vector registers can't be restored. If the kernel
* currently uses vector registers the system is
@@ -307,11 +314,21 @@ static int notrace s390_validate_registers(union mci mci, int umode)
if (cr2.gse) {
if (!mci.gs) {
/*
- * Guarded storage register can't be restored and
- * the current processes uses guarded storage.
- * It has to be terminated.
+ * 2 cases:
+ * - machine check in kernel or userspace
+ * - machine check while running SIE (KVM guest)
+ * For kernel or userspace the userspace values of
+ * guarded storage control can not be recreated, the
+ * process must be terminated.
+ * For SIE the guest values of guarded storage can not
+ * be recreated. This is either due to a bug or due to
+ * GS being disabled in the guest. The guest will be
+ * notified by KVM code and the guests machine check
+ * handling must take care of this. The host values
+ * are saved by KVM and are not affected.
*/
- kill_task = 1;
+ if (!test_cpu_flag(CIF_MCCK_GUEST))
+ kill_task = 1;
} else {
load_gs_cb((struct gs_cb *)mcesa->guarded_storage_save_area);
}
diff --git a/arch/s390/kernel/perf_cpum_cf_common.c b/arch/s390/kernel/perf_cpum_cf_common.c
index 30f0242de4a5..8ee48672233f 100644
--- a/arch/s390/kernel/perf_cpum_cf_common.c
+++ b/arch/s390/kernel/perf_cpum_cf_common.c
@@ -178,7 +178,7 @@ size_t cpum_cf_ctrset_size(enum cpumf_ctr_set ctrset,
case CPUMF_CTR_SET_CRYPTO:
if (info->csvn >= 1 && info->csvn <= 5)
ctrset_size = 16;
- else if (info->csvn == 6)
+ else if (info->csvn == 6 || info->csvn == 7)
ctrset_size = 20;
break;
case CPUMF_CTR_SET_EXT:
@@ -188,7 +188,7 @@ size_t cpum_cf_ctrset_size(enum cpumf_ctr_set ctrset,
ctrset_size = 48;
else if (info->csvn >= 3 && info->csvn <= 5)
ctrset_size = 128;
- else if (info->csvn == 6)
+ else if (info->csvn == 6 || info->csvn == 7)
ctrset_size = 160;
break;
case CPUMF_CTR_SET_MT_DIAG:
diff --git a/arch/s390/kernel/perf_cpum_cf_events.c b/arch/s390/kernel/perf_cpum_cf_events.c
index 37265f551a11..52c1fe23b823 100644
--- a/arch/s390/kernel/perf_cpum_cf_events.c
+++ b/arch/s390/kernel/perf_cpum_cf_events.c
@@ -344,7 +344,7 @@ static struct attribute *cpumcf_svn_12345_pmu_event_attr[] __initdata = {
NULL,
};
-static struct attribute *cpumcf_svn_6_pmu_event_attr[] __initdata = {
+static struct attribute *cpumcf_svn_67_pmu_event_attr[] __initdata = {
CPUMF_EVENT_PTR(cf_svn_12345, PRNG_FUNCTIONS),
CPUMF_EVENT_PTR(cf_svn_12345, PRNG_CYCLES),
CPUMF_EVENT_PTR(cf_svn_12345, PRNG_BLOCKED_FUNCTIONS),
@@ -715,8 +715,8 @@ __init const struct attribute_group **cpumf_cf_event_group(void)
case 1 ... 5:
csvn = cpumcf_svn_12345_pmu_event_attr;
break;
- case 6:
- csvn = cpumcf_svn_6_pmu_event_attr;
+ case 6 ... 7:
+ csvn = cpumcf_svn_67_pmu_event_attr;
break;
default:
csvn = none;
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
index db62def4ef28..332a49965130 100644
--- a/arch/s390/kernel/perf_cpum_sf.c
+++ b/arch/s390/kernel/perf_cpum_sf.c
@@ -1179,7 +1179,7 @@ static void hw_collect_samples(struct perf_event *event, unsigned long *sdbt,
sample = (struct hws_basic_entry *) *sdbt;
while ((unsigned long *) sample < (unsigned long *) te) {
/* Check for an empty sample */
- if (!sample->def)
+ if (!sample->def || sample->LS)
break;
/* Update perf event period */
diff --git a/arch/s390/kernel/syscalls/syscall.tbl b/arch/s390/kernel/syscalls/syscall.tbl
index ed9c5c2eafad..799147658dee 100644
--- a/arch/s390/kernel/syscalls/syscall.tbl
+++ b/arch/s390/kernel/syscalls/syscall.tbl
@@ -452,3 +452,4 @@
# 447 reserved for memfd_secret
448 common process_mrelease sys_process_mrelease sys_process_mrelease
449 common futex_waitv sys_futex_waitv sys_futex_waitv
+450 common set_mempolicy_home_node sys_set_mempolicy_home_node sys_set_mempolicy_home_node
diff --git a/arch/s390/kvm/Kconfig b/arch/s390/kvm/Kconfig
index 67a8e770e369..2e84d3922f7c 100644
--- a/arch/s390/kvm/Kconfig
+++ b/arch/s390/kvm/Kconfig
@@ -33,6 +33,7 @@ config KVM
select HAVE_KVM_NO_POLL
select SRCU
select KVM_VFIO
+ select INTERVAL_TREE
help
Support hosting paravirtualized guest machines using the SIE
virtualization capability on the mainframe. This should work
diff --git a/arch/s390/kvm/Makefile b/arch/s390/kvm/Makefile
index b3aaadc60ead..26f4a74e5ce4 100644
--- a/arch/s390/kvm/Makefile
+++ b/arch/s390/kvm/Makefile
@@ -3,13 +3,11 @@
#
# Copyright IBM Corp. 2008
-KVM := ../../../virt/kvm
-common-objs = $(KVM)/kvm_main.o $(KVM)/eventfd.o $(KVM)/async_pf.o \
- $(KVM)/irqchip.o $(KVM)/vfio.o $(KVM)/binary_stats.o
+include $(srctree)/virt/kvm/Makefile.kvm
ccflags-y := -Ivirt/kvm -Iarch/s390/kvm
-kvm-objs := $(common-objs) kvm-s390.o intercept.o interrupt.o priv.o sigp.o
-kvm-objs += diag.o gaccess.o guestdbg.o vsie.o pv.o
+kvm-y += kvm-s390.o intercept.o interrupt.o priv.o sigp.o
+kvm-y += diag.o gaccess.o guestdbg.o vsie.o pv.o
obj-$(CONFIG_KVM) += kvm.o
diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c
index 6af59c59cc1b..4460808c3b9a 100644
--- a/arch/s390/kvm/gaccess.c
+++ b/arch/s390/kvm/gaccess.c
@@ -794,46 +794,100 @@ static int low_address_protection_enabled(struct kvm_vcpu *vcpu,
return 1;
}
-static int guest_page_range(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar,
- unsigned long *pages, unsigned long nr_pages,
- const union asce asce, enum gacc_mode mode)
+/**
+ * guest_range_to_gpas() - Calculate guest physical addresses of page fragments
+ * covering a logical range
+ * @vcpu: virtual cpu
+ * @ga: guest address, start of range
+ * @ar: access register
+ * @gpas: output argument, may be NULL
+ * @len: length of range in bytes
+ * @asce: address-space-control element to use for translation
+ * @mode: access mode
+ *
+ * Translate a logical range to a series of guest absolute addresses,
+ * such that the concatenation of page fragments starting at each gpa make up
+ * the whole range.
+ * The translation is performed as if done by the cpu for the given @asce, @ar,
+ * @mode and state of the @vcpu.
+ * If the translation causes an exception, its program interruption code is
+ * returned and the &struct kvm_s390_pgm_info pgm member of @vcpu is modified
+ * such that a subsequent call to kvm_s390_inject_prog_vcpu() will inject
+ * a correct exception into the guest.
+ * The resulting gpas are stored into @gpas, unless it is NULL.
+ *
+ * Note: All fragments except the first one start at the beginning of a page.
+ * When deriving the boundaries of a fragment from a gpa, all but the last
+ * fragment end at the end of the page.
+ *
+ * Return:
+ * * 0 - success
+ * * <0 - translation could not be performed, for example if guest
+ * memory could not be accessed
+ * * >0 - an access exception occurred. In this case the returned value
+ * is the program interruption code and the contents of pgm may
+ * be used to inject an exception into the guest.
+ */
+static int guest_range_to_gpas(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar,
+ unsigned long *gpas, unsigned long len,
+ const union asce asce, enum gacc_mode mode)
{
psw_t *psw = &vcpu->arch.sie_block->gpsw;
+ unsigned int offset = offset_in_page(ga);
+ unsigned int fragment_len;
int lap_enabled, rc = 0;
enum prot_type prot;
+ unsigned long gpa;
lap_enabled = low_address_protection_enabled(vcpu, asce);
- while (nr_pages) {
+ while (min(PAGE_SIZE - offset, len) > 0) {
+ fragment_len = min(PAGE_SIZE - offset, len);
ga = kvm_s390_logical_to_effective(vcpu, ga);
if (mode == GACC_STORE && lap_enabled && is_low_address(ga))
return trans_exc(vcpu, PGM_PROTECTION, ga, ar, mode,
PROT_TYPE_LA);
- ga &= PAGE_MASK;
if (psw_bits(*psw).dat) {
- rc = guest_translate(vcpu, ga, pages, asce, mode, &prot);
+ rc = guest_translate(vcpu, ga, &gpa, asce, mode, &prot);
if (rc < 0)
return rc;
} else {
- *pages = kvm_s390_real_to_abs(vcpu, ga);
- if (kvm_is_error_gpa(vcpu->kvm, *pages))
+ gpa = kvm_s390_real_to_abs(vcpu, ga);
+ if (kvm_is_error_gpa(vcpu->kvm, gpa))
rc = PGM_ADDRESSING;
}
if (rc)
return trans_exc(vcpu, rc, ga, ar, mode, prot);
- ga += PAGE_SIZE;
- pages++;
- nr_pages--;
+ if (gpas)
+ *gpas++ = gpa;
+ offset = 0;
+ ga += fragment_len;
+ len -= fragment_len;
}
return 0;
}
+static int access_guest_page(struct kvm *kvm, enum gacc_mode mode, gpa_t gpa,
+ void *data, unsigned int len)
+{
+ const unsigned int offset = offset_in_page(gpa);
+ const gfn_t gfn = gpa_to_gfn(gpa);
+ int rc;
+
+ if (mode == GACC_STORE)
+ rc = kvm_write_guest_page(kvm, gfn, data, offset, len);
+ else
+ rc = kvm_read_guest_page(kvm, gfn, data, offset, len);
+ return rc;
+}
+
int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar, void *data,
unsigned long len, enum gacc_mode mode)
{
psw_t *psw = &vcpu->arch.sie_block->gpsw;
- unsigned long _len, nr_pages, gpa, idx;
- unsigned long pages_array[2];
- unsigned long *pages;
+ unsigned long nr_pages, idx;
+ unsigned long gpa_array[2];
+ unsigned int fragment_len;
+ unsigned long *gpas;
int need_ipte_lock;
union asce asce;
int rc;
@@ -845,49 +899,42 @@ int access_guest(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar, void *data,
if (rc)
return rc;
nr_pages = (((ga & ~PAGE_MASK) + len - 1) >> PAGE_SHIFT) + 1;
- pages = pages_array;
- if (nr_pages > ARRAY_SIZE(pages_array))
- pages = vmalloc(array_size(nr_pages, sizeof(unsigned long)));
- if (!pages)
+ gpas = gpa_array;
+ if (nr_pages > ARRAY_SIZE(gpa_array))
+ gpas = vmalloc(array_size(nr_pages, sizeof(unsigned long)));
+ if (!gpas)
return -ENOMEM;
need_ipte_lock = psw_bits(*psw).dat && !asce.r;
if (need_ipte_lock)
ipte_lock(vcpu);
- rc = guest_page_range(vcpu, ga, ar, pages, nr_pages, asce, mode);
+ rc = guest_range_to_gpas(vcpu, ga, ar, gpas, len, asce, mode);
for (idx = 0; idx < nr_pages && !rc; idx++) {
- gpa = *(pages + idx) + (ga & ~PAGE_MASK);
- _len = min(PAGE_SIZE - (gpa & ~PAGE_MASK), len);
- if (mode == GACC_STORE)
- rc = kvm_write_guest(vcpu->kvm, gpa, data, _len);
- else
- rc = kvm_read_guest(vcpu->kvm, gpa, data, _len);
- len -= _len;
- ga += _len;
- data += _len;
+ fragment_len = min(PAGE_SIZE - offset_in_page(gpas[idx]), len);
+ rc = access_guest_page(vcpu->kvm, mode, gpas[idx], data, fragment_len);
+ len -= fragment_len;
+ data += fragment_len;
}
if (need_ipte_lock)
ipte_unlock(vcpu);
- if (nr_pages > ARRAY_SIZE(pages_array))
- vfree(pages);
+ if (nr_pages > ARRAY_SIZE(gpa_array))
+ vfree(gpas);
return rc;
}
int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
void *data, unsigned long len, enum gacc_mode mode)
{
- unsigned long _len, gpa;
+ unsigned int fragment_len;
+ unsigned long gpa;
int rc = 0;
while (len && !rc) {
gpa = kvm_s390_real_to_abs(vcpu, gra);
- _len = min(PAGE_SIZE - (gpa & ~PAGE_MASK), len);
- if (mode)
- rc = write_guest_abs(vcpu, gpa, data, _len);
- else
- rc = read_guest_abs(vcpu, gpa, data, _len);
- len -= _len;
- gra += _len;
- data += _len;
+ fragment_len = min(PAGE_SIZE - offset_in_page(gpa), len);
+ rc = access_guest_page(vcpu->kvm, mode, gpa, data, fragment_len);
+ len -= fragment_len;
+ gra += fragment_len;
+ data += fragment_len;
}
return rc;
}
@@ -909,8 +956,6 @@ int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar,
unsigned long *gpa, enum gacc_mode mode)
{
- psw_t *psw = &vcpu->arch.sie_block->gpsw;
- enum prot_type prot;
union asce asce;
int rc;
@@ -918,23 +963,7 @@ int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar,
rc = get_vcpu_asce(vcpu, &asce, gva, ar, mode);
if (rc)
return rc;
- if (is_low_address(gva) && low_address_protection_enabled(vcpu, asce)) {
- if (mode == GACC_STORE)
- return trans_exc(vcpu, PGM_PROTECTION, gva, 0,
- mode, PROT_TYPE_LA);
- }
-
- if (psw_bits(*psw).dat && !asce.r) { /* Use DAT? */
- rc = guest_translate(vcpu, gva, gpa, asce, mode, &prot);
- if (rc > 0)
- return trans_exc(vcpu, rc, gva, 0, mode, prot);
- } else {
- *gpa = kvm_s390_real_to_abs(vcpu, gva);
- if (kvm_is_error_gpa(vcpu->kvm, *gpa))
- return trans_exc(vcpu, rc, gva, PGM_ADDRESSING, mode, 0);
- }
-
- return rc;
+ return guest_range_to_gpas(vcpu, gva, ar, gpa, 1, asce, mode);
}
/**
@@ -948,17 +977,14 @@ int guest_translate_address(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar,
int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar,
unsigned long length, enum gacc_mode mode)
{
- unsigned long gpa;
- unsigned long currlen;
+ union asce asce;
int rc = 0;
+ rc = get_vcpu_asce(vcpu, &asce, gva, ar, mode);
+ if (rc)
+ return rc;
ipte_lock(vcpu);
- while (length > 0 && !rc) {
- currlen = min(length, PAGE_SIZE - (gva % PAGE_SIZE));
- rc = guest_translate_address(vcpu, gva, ar, &gpa, mode);
- gva += currlen;
- length -= currlen;
- }
+ rc = guest_range_to_gpas(vcpu, gva, ar, NULL, length, asce, mode);
ipte_unlock(vcpu);
return rc;
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index c3bd993fdd0c..db933c252dbc 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -1335,7 +1335,8 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
VCPU_EVENT(vcpu, 4, "enabled wait: %llu ns", sltime);
no_timer:
srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
- kvm_vcpu_block(vcpu);
+ kvm_vcpu_halt(vcpu);
+ vcpu->valid_wakeup = false;
__unset_cpu_idle(vcpu);
vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
@@ -2115,6 +2116,13 @@ int kvm_s390_is_stop_irq_pending(struct kvm_vcpu *vcpu)
return test_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs);
}
+int kvm_s390_is_restart_irq_pending(struct kvm_vcpu *vcpu)
+{
+ struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
+
+ return test_bit(IRQ_PEND_RESTART, &li->pending_irqs);
+}
+
void kvm_s390_clear_stop_irq(struct kvm_vcpu *vcpu)
{
struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
@@ -2659,7 +2667,7 @@ static int flic_ais_mode_set_all(struct kvm *kvm, struct kvm_device_attr *attr)
static int flic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
{
int r = 0;
- unsigned int i;
+ unsigned long i;
struct kvm_vcpu *vcpu;
switch (attr->group) {
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 14a18ba5ff2c..2296b1ff1e02 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -295,7 +295,7 @@ static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
{
struct kvm *kvm;
struct kvm_vcpu *vcpu;
- int i;
+ unsigned long i;
unsigned long long *delta = v;
list_for_each_entry(kvm, &vm_list, vm_list) {
@@ -682,7 +682,7 @@ out:
static void icpt_operexc_on_all_vcpus(struct kvm *kvm)
{
- unsigned int i;
+ unsigned long i;
struct kvm_vcpu *vcpu;
kvm_for_each_vcpu(i, vcpu, kvm) {
@@ -936,7 +936,7 @@ static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
void kvm_s390_vcpu_crypto_reset_all(struct kvm *kvm)
{
struct kvm_vcpu *vcpu;
- int i;
+ unsigned long i;
kvm_s390_vcpu_block_all(kvm);
@@ -1021,7 +1021,7 @@ static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
static void kvm_s390_sync_request_broadcast(struct kvm *kvm, int req)
{
- int cx;
+ unsigned long cx;
struct kvm_vcpu *vcpu;
kvm_for_each_vcpu(cx, vcpu, kvm)
@@ -1037,13 +1037,13 @@ static int kvm_s390_vm_start_migration(struct kvm *kvm)
struct kvm_memory_slot *ms;
struct kvm_memslots *slots;
unsigned long ram_pages = 0;
- int slotnr;
+ int bkt;
/* migration mode already enabled */
if (kvm->arch.migration_mode)
return 0;
slots = kvm_memslots(kvm);
- if (!slots || !slots->used_slots)
+ if (!slots || kvm_memslots_empty(slots))
return -EINVAL;
if (!kvm->arch.use_cmma) {
@@ -1051,8 +1051,7 @@ static int kvm_s390_vm_start_migration(struct kvm *kvm)
return 0;
}
/* mark all the pages in active slots as dirty */
- for (slotnr = 0; slotnr < slots->used_slots; slotnr++) {
- ms = slots->memslots + slotnr;
+ kvm_for_each_memslot(ms, bkt, slots) {
if (!ms->dirty_bitmap)
return -EINVAL;
/*
@@ -1943,41 +1942,6 @@ out:
/* for consistency */
#define KVM_S390_CMMA_SIZE_MAX ((u32)KVM_S390_SKEYS_MAX)
-/*
- * Similar to gfn_to_memslot, but returns the index of a memslot also when the
- * address falls in a hole. In that case the index of one of the memslots
- * bordering the hole is returned.
- */
-static int gfn_to_memslot_approx(struct kvm_memslots *slots, gfn_t gfn)
-{
- int start = 0, end = slots->used_slots;
- int slot = atomic_read(&slots->last_used_slot);
- struct kvm_memory_slot *memslots = slots->memslots;
-
- if (gfn >= memslots[slot].base_gfn &&
- gfn < memslots[slot].base_gfn + memslots[slot].npages)
- return slot;
-
- while (start < end) {
- slot = start + (end - start) / 2;
-
- if (gfn >= memslots[slot].base_gfn)
- end = slot;
- else
- start = slot + 1;
- }
-
- if (start >= slots->used_slots)
- return slots->used_slots - 1;
-
- if (gfn >= memslots[start].base_gfn &&
- gfn < memslots[start].base_gfn + memslots[start].npages) {
- atomic_set(&slots->last_used_slot, start);
- }
-
- return start;
-}
-
static int kvm_s390_peek_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
u8 *res, unsigned long bufsize)
{
@@ -2001,27 +1965,32 @@ static int kvm_s390_peek_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
return 0;
}
+static struct kvm_memory_slot *gfn_to_memslot_approx(struct kvm_memslots *slots,
+ gfn_t gfn)
+{
+ return ____gfn_to_memslot(slots, gfn, true);
+}
+
static unsigned long kvm_s390_next_dirty_cmma(struct kvm_memslots *slots,
unsigned long cur_gfn)
{
- int slotidx = gfn_to_memslot_approx(slots, cur_gfn);
- struct kvm_memory_slot *ms = slots->memslots + slotidx;
+ struct kvm_memory_slot *ms = gfn_to_memslot_approx(slots, cur_gfn);
unsigned long ofs = cur_gfn - ms->base_gfn;
+ struct rb_node *mnode = &ms->gfn_node[slots->node_idx];
if (ms->base_gfn + ms->npages <= cur_gfn) {
- slotidx--;
+ mnode = rb_next(mnode);
/* If we are above the highest slot, wrap around */
- if (slotidx < 0)
- slotidx = slots->used_slots - 1;
+ if (!mnode)
+ mnode = rb_first(&slots->gfn_tree);
- ms = slots->memslots + slotidx;
+ ms = container_of(mnode, struct kvm_memory_slot, gfn_node[slots->node_idx]);
ofs = 0;
}
ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, ofs);
- while ((slotidx > 0) && (ofs >= ms->npages)) {
- slotidx--;
- ms = slots->memslots + slotidx;
- ofs = find_next_bit(kvm_second_dirty_bitmap(ms), ms->npages, 0);
+ while (ofs >= ms->npages && (mnode = rb_next(mnode))) {
+ ms = container_of(mnode, struct kvm_memory_slot, gfn_node[slots->node_idx]);
+ ofs = find_first_bit(kvm_second_dirty_bitmap(ms), ms->npages);
}
return ms->base_gfn + ofs;
}
@@ -2033,7 +2002,7 @@ static int kvm_s390_get_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
struct kvm_memslots *slots = kvm_memslots(kvm);
struct kvm_memory_slot *ms;
- if (unlikely(!slots->used_slots))
+ if (unlikely(kvm_memslots_empty(slots)))
return 0;
cur_gfn = kvm_s390_next_dirty_cmma(slots, args->start_gfn);
@@ -2043,7 +2012,7 @@ static int kvm_s390_get_cmma(struct kvm *kvm, struct kvm_s390_cmma_log *args,
if (!ms)
return 0;
next_gfn = kvm_s390_next_dirty_cmma(slots, cur_gfn + 1);
- mem_end = slots->memslots[0].base_gfn + slots->memslots[0].npages;
+ mem_end = kvm_s390_get_gfn_end(slots);
while (args->count < bufsize) {
hva = gfn_to_hva(kvm, cur_gfn);
@@ -2206,7 +2175,7 @@ static int kvm_s390_cpus_from_pv(struct kvm *kvm, u16 *rcp, u16 *rrcp)
struct kvm_vcpu *vcpu;
u16 rc, rrc;
int ret = 0;
- int i;
+ unsigned long i;
/*
* We ignore failures and try to destroy as many CPUs as possible.
@@ -2230,7 +2199,8 @@ static int kvm_s390_cpus_from_pv(struct kvm *kvm, u16 *rcp, u16 *rrcp)
static int kvm_s390_cpus_to_pv(struct kvm *kvm, u16 *rc, u16 *rrc)
{
- int i, r = 0;
+ unsigned long i;
+ int r = 0;
u16 dummy;
struct kvm_vcpu *vcpu;
@@ -2821,27 +2791,11 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
free_page((unsigned long)(vcpu->arch.sie_block));
}
-static void kvm_free_vcpus(struct kvm *kvm)
-{
- unsigned int i;
- struct kvm_vcpu *vcpu;
-
- kvm_for_each_vcpu(i, vcpu, kvm)
- kvm_vcpu_destroy(vcpu);
-
- mutex_lock(&kvm->lock);
- for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
- kvm->vcpus[i] = NULL;
-
- atomic_set(&kvm->online_vcpus, 0);
- mutex_unlock(&kvm->lock);
-}
-
void kvm_arch_destroy_vm(struct kvm *kvm)
{
u16 rc, rrc;
- kvm_free_vcpus(kvm);
+ kvm_destroy_vcpus(kvm);
sca_dispose(kvm);
kvm_s390_gisa_destroy(kvm);
/*
@@ -2945,7 +2899,7 @@ static int sca_switch_to_extended(struct kvm *kvm)
struct bsca_block *old_sca = kvm->arch.sca;
struct esca_block *new_sca;
struct kvm_vcpu *vcpu;
- unsigned int vcpu_idx;
+ unsigned long vcpu_idx;
u32 scaol, scaoh;
if (kvm->arch.use_esca)
@@ -3427,7 +3381,7 @@ static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
struct kvm *kvm = gmap->private;
struct kvm_vcpu *vcpu;
unsigned long prefix;
- int i;
+ unsigned long i;
if (gmap_is_shadow(gmap))
return;
@@ -3449,7 +3403,7 @@ bool kvm_arch_no_poll(struct kvm_vcpu *vcpu)
{
/* do not poll with more than halt_poll_max_steal percent of steal time */
if (S390_lowcore.avg_steal_timer * 100 / (TICK_USEC << 12) >=
- halt_poll_max_steal) {
+ READ_ONCE(halt_poll_max_steal)) {
vcpu->stat.halt_no_poll_steal++;
return true;
}
@@ -3920,7 +3874,7 @@ void kvm_s390_set_tod_clock(struct kvm *kvm,
{
struct kvm_vcpu *vcpu;
union tod_clock clk;
- int i;
+ unsigned long i;
mutex_lock(&kvm->lock);
preempt_disable();
@@ -4552,7 +4506,7 @@ static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
{
- unsigned int i;
+ unsigned long i;
struct kvm_vcpu *vcpu;
kvm_for_each_vcpu(i, vcpu, kvm) {
@@ -4590,7 +4544,7 @@ int kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
}
for (i = 0; i < online_vcpus; i++) {
- if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
+ if (!is_vcpu_stopped(kvm_get_vcpu(vcpu->kvm, i)))
started_vcpus++;
}
@@ -4645,16 +4599,23 @@ int kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
}
}
- /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
+ /*
+ * Set the VCPU to STOPPED and THEN clear the interrupt flag,
+ * now that the SIGP STOP and SIGP STOP AND STORE STATUS orders
+ * have been fully processed. This will ensure that the VCPU
+ * is kept BUSY if another VCPU is inquiring with SIGP SENSE.
+ */
+ kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOPPED);
kvm_s390_clear_stop_irq(vcpu);
- kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOPPED);
__disable_ibs_on_vcpu(vcpu);
for (i = 0; i < online_vcpus; i++) {
- if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
+ struct kvm_vcpu *tmp = kvm_get_vcpu(vcpu->kvm, i);
+
+ if (!is_vcpu_stopped(tmp)) {
started_vcpus++;
- started_vcpu = vcpu->kvm->vcpus[i];
+ started_vcpu = tmp;
}
}
@@ -4706,6 +4667,8 @@ static long kvm_s390_guest_sida_op(struct kvm_vcpu *vcpu,
return -EINVAL;
if (mop->size + mop->sida_offset > sida_size(vcpu->arch.sie_block))
return -E2BIG;
+ if (!kvm_s390_pv_cpu_is_protected(vcpu))
+ return -EINVAL;
switch (mop->op) {
case KVM_S390_MEMOP_SIDA_READ:
@@ -5020,32 +4983,38 @@ vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
/* Section: memory related */
int kvm_arch_prepare_memory_region(struct kvm *kvm,
- struct kvm_memory_slot *memslot,
- const struct kvm_userspace_memory_region *mem,
+ const struct kvm_memory_slot *old,
+ struct kvm_memory_slot *new,
enum kvm_mr_change change)
{
+ gpa_t size;
+
+ /* When we are protected, we should not change the memory slots */
+ if (kvm_s390_pv_get_handle(kvm))
+ return -EINVAL;
+
+ if (change == KVM_MR_DELETE || change == KVM_MR_FLAGS_ONLY)
+ return 0;
+
/* A few sanity checks. We can have memory slots which have to be
located/ended at a segment boundary (1MB). The memory in userland is
ok to be fragmented into various different vmas. It is okay to mmap()
and munmap() stuff in this slot after doing this call at any time */
- if (mem->userspace_addr & 0xffffful)
+ if (new->userspace_addr & 0xffffful)
return -EINVAL;
- if (mem->memory_size & 0xffffful)
+ size = new->npages * PAGE_SIZE;
+ if (size & 0xffffful)
return -EINVAL;
- if (mem->guest_phys_addr + mem->memory_size > kvm->arch.mem_limit)
+ if ((new->base_gfn * PAGE_SIZE) + size > kvm->arch.mem_limit)
return -EINVAL;
- /* When we are protected, we should not change the memory slots */
- if (kvm_s390_pv_get_handle(kvm))
- return -EINVAL;
return 0;
}
void kvm_arch_commit_memory_region(struct kvm *kvm,
- const struct kvm_userspace_memory_region *mem,
struct kvm_memory_slot *old,
const struct kvm_memory_slot *new,
enum kvm_mr_change change)
@@ -5064,8 +5033,9 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
break;
fallthrough;
case KVM_MR_CREATE:
- rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
- mem->guest_phys_addr, mem->memory_size);
+ rc = gmap_map_segment(kvm->arch.gmap, new->userspace_addr,
+ new->base_gfn * PAGE_SIZE,
+ new->npages * PAGE_SIZE);
break;
case KVM_MR_FLAGS_ONLY:
break;
@@ -5084,11 +5054,6 @@ static inline unsigned long nonhyp_mask(int i)
return 0x0000ffffffffffffUL >> (nonhyp_fai << 4);
}
-void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu)
-{
- vcpu->valid_wakeup = false;
-}
-
static int __init kvm_s390_init(void)
{
int i;
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index c07a050d757d..098831e815e6 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -217,6 +217,20 @@ static inline void kvm_s390_set_user_cpu_state_ctrl(struct kvm *kvm)
kvm->arch.user_cpu_state_ctrl = 1;
}
+/* get the end gfn of the last (highest gfn) memslot */
+static inline unsigned long kvm_s390_get_gfn_end(struct kvm_memslots *slots)
+{
+ struct rb_node *node;
+ struct kvm_memory_slot *ms;
+
+ if (WARN_ON(kvm_memslots_empty(slots)))
+ return 0;
+
+ node = rb_last(&slots->gfn_tree);
+ ms = container_of(node, struct kvm_memory_slot, gfn_node[slots->node_idx]);
+ return ms->base_gfn + ms->npages;
+}
+
/* implemented in pv.c */
int kvm_s390_pv_destroy_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc);
int kvm_s390_pv_create_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc);
@@ -357,7 +371,7 @@ int kvm_s390_handle_diag(struct kvm_vcpu *vcpu);
static inline void kvm_s390_vcpu_block_all(struct kvm *kvm)
{
- int i;
+ unsigned long i;
struct kvm_vcpu *vcpu;
WARN_ON(!mutex_is_locked(&kvm->lock));
@@ -367,7 +381,7 @@ static inline void kvm_s390_vcpu_block_all(struct kvm *kvm)
static inline void kvm_s390_vcpu_unblock_all(struct kvm *kvm)
{
- int i;
+ unsigned long i;
struct kvm_vcpu *vcpu;
kvm_for_each_vcpu(i, vcpu, kvm)
@@ -427,6 +441,7 @@ void kvm_s390_destroy_adapters(struct kvm *kvm);
int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu);
extern struct kvm_device_ops kvm_flic_ops;
int kvm_s390_is_stop_irq_pending(struct kvm_vcpu *vcpu);
+int kvm_s390_is_restart_irq_pending(struct kvm_vcpu *vcpu);
void kvm_s390_clear_stop_irq(struct kvm_vcpu *vcpu);
int kvm_s390_set_irq_state(struct kvm_vcpu *vcpu,
void __user *buf, int len);
diff --git a/arch/s390/kvm/pv.c b/arch/s390/kvm/pv.c
index 00d272d134c2..7f7c0d6af2ce 100644
--- a/arch/s390/kvm/pv.c
+++ b/arch/s390/kvm/pv.c
@@ -116,7 +116,6 @@ static int kvm_s390_pv_alloc_vm(struct kvm *kvm)
unsigned long base = uv_info.guest_base_stor_len;
unsigned long virt = uv_info.guest_virt_var_stor_len;
unsigned long npages = 0, vlen = 0;
- struct kvm_memory_slot *memslot;
kvm->arch.pv.stor_var = NULL;
kvm->arch.pv.stor_base = __get_free_pages(GFP_KERNEL_ACCOUNT, get_order(base));
@@ -130,8 +129,7 @@ static int kvm_s390_pv_alloc_vm(struct kvm *kvm)
* Slots are sorted by GFN
*/
mutex_lock(&kvm->slots_lock);
- memslot = kvm_memslots(kvm)->memslots;
- npages = memslot->base_gfn + memslot->npages;
+ npages = kvm_s390_get_gfn_end(kvm_memslots(kvm));
mutex_unlock(&kvm->slots_lock);
kvm->arch.pv.guest_len = npages * PAGE_SIZE;
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c
index cf4de80bd541..8aaee2892ec3 100644
--- a/arch/s390/kvm/sigp.c
+++ b/arch/s390/kvm/sigp.c
@@ -276,6 +276,34 @@ static int handle_sigp_dst(struct kvm_vcpu *vcpu, u8 order_code,
if (!dst_vcpu)
return SIGP_CC_NOT_OPERATIONAL;
+ /*
+ * SIGP RESTART, SIGP STOP, and SIGP STOP AND STORE STATUS orders
+ * are processed asynchronously. Until the affected VCPU finishes
+ * its work and calls back into KVM to clear the (RESTART or STOP)
+ * interrupt, we need to return any new non-reset orders "busy".
+ *
+ * This is important because a single VCPU could issue:
+ * 1) SIGP STOP $DESTINATION
+ * 2) SIGP SENSE $DESTINATION
+ *
+ * If the SIGP SENSE would not be rejected as "busy", it could
+ * return an incorrect answer as to whether the VCPU is STOPPED
+ * or OPERATING.
+ */
+ if (order_code != SIGP_INITIAL_CPU_RESET &&
+ order_code != SIGP_CPU_RESET) {
+ /*
+ * Lockless check. Both SIGP STOP and SIGP (RE)START
+ * properly synchronize everything while processing
+ * their orders, while the guest cannot observe a
+ * difference when issuing other orders from two
+ * different VCPUs.
+ */
+ if (kvm_s390_is_stop_irq_pending(dst_vcpu) ||
+ kvm_s390_is_restart_irq_pending(dst_vcpu))
+ return SIGP_CC_BUSY;
+ }
+
switch (order_code) {
case SIGP_SENSE:
vcpu->stat.instruction_sigp_sense++;
diff --git a/arch/s390/lib/Makefile b/arch/s390/lib/Makefile
index 707cd4622c13..69feb8ed3312 100644
--- a/arch/s390/lib/Makefile
+++ b/arch/s390/lib/Makefile
@@ -17,4 +17,7 @@ KASAN_SANITIZE_uaccess.o := n
obj-$(CONFIG_S390_UNWIND_SELFTEST) += test_unwind.o
CFLAGS_test_unwind.o += -fno-optimize-sibling-calls
+obj-$(CONFIG_S390_MODULES_SANITY_TEST) += test_modules.o
+obj-$(CONFIG_S390_MODULES_SANITY_TEST_HELPERS) += test_modules_helpers.o
+
lib-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o
diff --git a/arch/s390/lib/test_modules.c b/arch/s390/lib/test_modules.c
new file mode 100644
index 000000000000..9894009fc1f2
--- /dev/null
+++ b/arch/s390/lib/test_modules.c
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <kunit/test.h>
+#include <linux/module.h>
+
+#include "test_modules.h"
+
+/*
+ * Test that modules with many relocations are loaded properly.
+ */
+static void test_modules_many_vmlinux_relocs(struct kunit *test)
+{
+ int result = 0;
+
+#define CALL_RETURN(i) result += test_modules_return_ ## i()
+ REPEAT_10000(CALL_RETURN);
+ KUNIT_ASSERT_EQ(test, result, 49995000);
+}
+
+static struct kunit_case modules_testcases[] = {
+ KUNIT_CASE(test_modules_many_vmlinux_relocs),
+ {}
+};
+
+static struct kunit_suite modules_test_suite = {
+ .name = "modules_test_s390",
+ .test_cases = modules_testcases,
+};
+
+kunit_test_suites(&modules_test_suite);
+
+MODULE_LICENSE("GPL");
diff --git a/arch/s390/lib/test_modules.h b/arch/s390/lib/test_modules.h
new file mode 100644
index 000000000000..6371fcf17684
--- /dev/null
+++ b/arch/s390/lib/test_modules.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+#ifndef TEST_MODULES_H
+#define TEST_MODULES_H
+
+#define __REPEAT_10000_3(f, x) \
+ f(x ## 0); \
+ f(x ## 1); \
+ f(x ## 2); \
+ f(x ## 3); \
+ f(x ## 4); \
+ f(x ## 5); \
+ f(x ## 6); \
+ f(x ## 7); \
+ f(x ## 8); \
+ f(x ## 9)
+#define __REPEAT_10000_2(f, x) \
+ __REPEAT_10000_3(f, x ## 0); \
+ __REPEAT_10000_3(f, x ## 1); \
+ __REPEAT_10000_3(f, x ## 2); \
+ __REPEAT_10000_3(f, x ## 3); \
+ __REPEAT_10000_3(f, x ## 4); \
+ __REPEAT_10000_3(f, x ## 5); \
+ __REPEAT_10000_3(f, x ## 6); \
+ __REPEAT_10000_3(f, x ## 7); \
+ __REPEAT_10000_3(f, x ## 8); \
+ __REPEAT_10000_3(f, x ## 9)
+#define __REPEAT_10000_1(f, x) \
+ __REPEAT_10000_2(f, x ## 0); \
+ __REPEAT_10000_2(f, x ## 1); \
+ __REPEAT_10000_2(f, x ## 2); \
+ __REPEAT_10000_2(f, x ## 3); \
+ __REPEAT_10000_2(f, x ## 4); \
+ __REPEAT_10000_2(f, x ## 5); \
+ __REPEAT_10000_2(f, x ## 6); \
+ __REPEAT_10000_2(f, x ## 7); \
+ __REPEAT_10000_2(f, x ## 8); \
+ __REPEAT_10000_2(f, x ## 9)
+#define REPEAT_10000(f) \
+ __REPEAT_10000_1(f, 0); \
+ __REPEAT_10000_1(f, 1); \
+ __REPEAT_10000_1(f, 2); \
+ __REPEAT_10000_1(f, 3); \
+ __REPEAT_10000_1(f, 4); \
+ __REPEAT_10000_1(f, 5); \
+ __REPEAT_10000_1(f, 6); \
+ __REPEAT_10000_1(f, 7); \
+ __REPEAT_10000_1(f, 8); \
+ __REPEAT_10000_1(f, 9)
+
+#define DECLARE_RETURN(i) int test_modules_return_ ## i(void)
+REPEAT_10000(DECLARE_RETURN);
+
+#endif
diff --git a/arch/s390/lib/test_modules_helpers.c b/arch/s390/lib/test_modules_helpers.c
new file mode 100644
index 000000000000..1670349a03eb
--- /dev/null
+++ b/arch/s390/lib/test_modules_helpers.c
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/export.h>
+
+#include "test_modules.h"
+
+#define DEFINE_RETURN(i) \
+ int test_modules_return_ ## i(void) \
+ { \
+ return 1 ## i - 10000; \
+ } \
+ EXPORT_SYMBOL_GPL(test_modules_return_ ## i)
+REPEAT_10000(DEFINE_RETURN);
diff --git a/arch/s390/lib/uaccess.c b/arch/s390/lib/uaccess.c
index a596e69d3c47..8a5d21461889 100644
--- a/arch/s390/lib/uaccess.c
+++ b/arch/s390/lib/uaccess.c
@@ -62,10 +62,14 @@ static inline unsigned long copy_from_user_mvcos(void *x, const void __user *ptr
unsigned long size)
{
unsigned long tmp1, tmp2;
+ union oac spec = {
+ .oac2.as = PSW_BITS_AS_SECONDARY,
+ .oac2.a = 1,
+ };
tmp1 = -4096UL;
asm volatile(
- " lghi 0,%[spec]\n"
+ " lr 0,%[spec]\n"
"0: .insn ss,0xc80000000000,0(%0,%2),0(%1),0\n"
"6: jz 4f\n"
"1: algr %0,%3\n"
@@ -84,7 +88,7 @@ static inline unsigned long copy_from_user_mvcos(void *x, const void __user *ptr
"5:\n"
EX_TABLE(0b,2b) EX_TABLE(3b,5b) EX_TABLE(6b,2b) EX_TABLE(7b,5b)
: "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
- : [spec] "K" (0x81UL)
+ : [spec] "d" (spec.val)
: "cc", "memory", "0");
return size;
}
@@ -135,10 +139,14 @@ static inline unsigned long copy_to_user_mvcos(void __user *ptr, const void *x,
unsigned long size)
{
unsigned long tmp1, tmp2;
+ union oac spec = {
+ .oac1.as = PSW_BITS_AS_SECONDARY,
+ .oac1.a = 1,
+ };
tmp1 = -4096UL;
asm volatile(
- " llilh 0,%[spec]\n"
+ " lr 0,%[spec]\n"
"0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n"
"6: jz 4f\n"
"1: algr %0,%3\n"
@@ -157,7 +165,7 @@ static inline unsigned long copy_to_user_mvcos(void __user *ptr, const void *x,
"5:\n"
EX_TABLE(0b,2b) EX_TABLE(3b,5b) EX_TABLE(6b,2b) EX_TABLE(7b,5b)
: "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
- : [spec] "K" (0x81UL)
+ : [spec] "d" (spec.val)
: "cc", "memory", "0");
return size;
}
@@ -207,10 +215,14 @@ EXPORT_SYMBOL(raw_copy_to_user);
static inline unsigned long clear_user_mvcos(void __user *to, unsigned long size)
{
unsigned long tmp1, tmp2;
+ union oac spec = {
+ .oac1.as = PSW_BITS_AS_SECONDARY,
+ .oac1.a = 1,
+ };
tmp1 = -4096UL;
asm volatile(
- " llilh 0,%[spec]\n"
+ " lr 0,%[spec]\n"
"0: .insn ss,0xc80000000000,0(%0,%1),0(%4),0\n"
" jz 4f\n"
"1: algr %0,%2\n"
@@ -228,7 +240,7 @@ static inline unsigned long clear_user_mvcos(void __user *to, unsigned long size
"5:\n"
EX_TABLE(0b,2b) EX_TABLE(3b,5b)
: "+a" (size), "+a" (to), "+a" (tmp1), "=a" (tmp2)
- : "a" (empty_zero_page), [spec] "K" (0x81UL)
+ : "a" (empty_zero_page), [spec] "d" (spec.val)
: "cc", "memory", "0");
return size;
}
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 6ed2886fc014..ff16ce0d04ee 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -452,21 +452,21 @@ retry:
if (unlikely(fault & VM_FAULT_ERROR))
goto out_up;
- if (flags & FAULT_FLAG_ALLOW_RETRY) {
- if (fault & VM_FAULT_RETRY) {
- if (IS_ENABLED(CONFIG_PGSTE) && gmap &&
- (flags & FAULT_FLAG_RETRY_NOWAIT)) {
- /* FAULT_FLAG_RETRY_NOWAIT has been set,
- * mmap_lock has not been released */
- current->thread.gmap_pfault = 1;
- fault = VM_FAULT_PFAULT;
- goto out_up;
- }
- flags &= ~FAULT_FLAG_RETRY_NOWAIT;
- flags |= FAULT_FLAG_TRIED;
- mmap_read_lock(mm);
- goto retry;
+ if (fault & VM_FAULT_RETRY) {
+ if (IS_ENABLED(CONFIG_PGSTE) && gmap &&
+ (flags & FAULT_FLAG_RETRY_NOWAIT)) {
+ /*
+ * FAULT_FLAG_RETRY_NOWAIT has been set, mmap_lock has
+ * not been released
+ */
+ current->thread.gmap_pfault = 1;
+ fault = VM_FAULT_PFAULT;
+ goto out_up;
}
+ flags &= ~FAULT_FLAG_RETRY_NOWAIT;
+ flags |= FAULT_FLAG_TRIED;
+ mmap_read_lock(mm);
+ goto retry;
}
if (IS_ENABLED(CONFIG_PGSTE) && gmap) {
address = __gmap_link(gmap, current->thread.gmap_addr,
diff --git a/arch/s390/pci/pci_irq.c b/arch/s390/pci/pci_irq.c
index aefd306ea71e..2b6062c486f5 100644
--- a/arch/s390/pci/pci_irq.c
+++ b/arch/s390/pci/pci_irq.c
@@ -303,7 +303,7 @@ int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
/* Request MSI interrupts */
hwirq = bit;
- for_each_pci_msi_entry(msi, pdev) {
+ msi_for_each_desc(msi, &pdev->dev, MSI_DESC_NOTASSOCIATED) {
rc = -EIO;
if (hwirq - bit >= msi_vecs)
break;
@@ -362,9 +362,7 @@ void arch_teardown_msi_irqs(struct pci_dev *pdev)
return;
/* Release MSI interrupts */
- for_each_pci_msi_entry(msi, pdev) {
- if (!msi->irq)
- continue;
+ msi_for_each_desc(msi, &pdev->dev, MSI_DESC_ASSOCIATED) {
irq_set_msi_desc(msi->irq, NULL);
irq_free_desc(msi->irq);
msi->msg.address_lo = 0;
@@ -387,13 +385,13 @@ void arch_teardown_msi_irqs(struct pci_dev *pdev)
airq_iv_free(zpci_ibv[0], zdev->msi_first_bit, zdev->msi_nr_irqs);
}
-void arch_restore_msi_irqs(struct pci_dev *pdev)
+bool arch_restore_msi_irqs(struct pci_dev *pdev)
{
struct zpci_dev *zdev = to_zpci(pdev);
if (!zdev->irqs_registered)
zpci_set_irq(zdev);
- default_restore_msi_irqs(pdev);
+ return true;
}
static struct airq_struct zpci_airq = {
diff --git a/arch/sh/boot/Makefile b/arch/sh/boot/Makefile
index 5c123f5b2797..1f5d2df3c7e0 100644
--- a/arch/sh/boot/Makefile
+++ b/arch/sh/boot/Makefile
@@ -19,12 +19,12 @@ CONFIG_ZERO_PAGE_OFFSET ?= 0x00001000
CONFIG_ENTRY_OFFSET ?= 0x00001000
CONFIG_PHYSICAL_START ?= $(CONFIG_MEMORY_START)
-suffix-y := bin
-suffix-$(CONFIG_KERNEL_GZIP) := gz
-suffix-$(CONFIG_KERNEL_BZIP2) := bz2
-suffix-$(CONFIG_KERNEL_LZMA) := lzma
-suffix-$(CONFIG_KERNEL_XZ) := xz
-suffix-$(CONFIG_KERNEL_LZO) := lzo
+suffix_y := bin
+suffix_$(CONFIG_KERNEL_GZIP) := gz
+suffix_$(CONFIG_KERNEL_BZIP2) := bz2
+suffix_$(CONFIG_KERNEL_LZMA) := lzma
+suffix_$(CONFIG_KERNEL_XZ) := xz
+suffix_$(CONFIG_KERNEL_LZO) := lzo
targets := zImage vmlinux.srec romImage uImage uImage.srec uImage.gz \
uImage.bz2 uImage.lzma uImage.xz uImage.lzo uImage.bin \
@@ -106,10 +106,10 @@ OBJCOPYFLAGS_uImage.srec := -I binary -O srec
$(obj)/uImage.srec: $(obj)/uImage FORCE
$(call if_changed,objcopy)
-$(obj)/uImage: $(obj)/uImage.$(suffix-y)
+$(obj)/uImage: $(obj)/uImage.$(suffix_y)
@ln -sf $(notdir $<) $@
@echo ' Image $@ is ready'
export CONFIG_PAGE_OFFSET CONFIG_MEMORY_START CONFIG_BOOT_LINK_OFFSET \
CONFIG_PHYSICAL_START CONFIG_ZERO_PAGE_OFFSET CONFIG_ENTRY_OFFSET \
- KERNEL_MEMORY suffix-y
+ KERNEL_MEMORY suffix_y
diff --git a/arch/sh/boot/compressed/Makefile b/arch/sh/boot/compressed/Makefile
index cf3174df7859..591125c42d49 100644
--- a/arch/sh/boot/compressed/Makefile
+++ b/arch/sh/boot/compressed/Makefile
@@ -47,22 +47,20 @@ $(obj)/vmlinux: $(addprefix $(obj)/, $(OBJECTS)) FORCE
$(obj)/vmlinux.bin: vmlinux FORCE
$(call if_changed,objcopy)
-vmlinux.bin.all-y := $(obj)/vmlinux.bin
-
-$(obj)/vmlinux.bin.gz: $(vmlinux.bin.all-y) FORCE
+$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE
$(call if_changed,gzip)
-$(obj)/vmlinux.bin.bz2: $(vmlinux.bin.all-y) FORCE
- $(call if_changed,bzip2)
-$(obj)/vmlinux.bin.lzma: $(vmlinux.bin.all-y) FORCE
- $(call if_changed,lzma)
-$(obj)/vmlinux.bin.xz: $(vmlinux.bin.all-y) FORCE
- $(call if_changed,xzkern)
-$(obj)/vmlinux.bin.lzo: $(vmlinux.bin.all-y) FORCE
- $(call if_changed,lzo)
+$(obj)/vmlinux.bin.bz2: $(obj)/vmlinux.bin FORCE
+ $(call if_changed,bzip2_with_size)
+$(obj)/vmlinux.bin.lzma: $(obj)/vmlinux.bin FORCE
+ $(call if_changed,lzma_with_size)
+$(obj)/vmlinux.bin.xz: $(obj)/vmlinux.bin FORCE
+ $(call if_changed,xzkern_with_size)
+$(obj)/vmlinux.bin.lzo: $(obj)/vmlinux.bin FORCE
+ $(call if_changed,lzo_with_size)
OBJCOPYFLAGS += -R .empty_zero_page
LDFLAGS_piggy.o := -r --format binary --oformat $(ld-bfd) -T
-$(obj)/piggy.o: $(obj)/vmlinux.scr $(obj)/vmlinux.bin.$(suffix-y) FORCE
+$(obj)/piggy.o: $(obj)/vmlinux.scr $(obj)/vmlinux.bin.$(suffix_y) FORCE
$(call if_changed,ld)
diff --git a/arch/sh/boot/dts/Makefile b/arch/sh/boot/dts/Makefile
index c17d65b82abe..4a6dec9714a9 100644
--- a/arch/sh/boot/dts/Makefile
+++ b/arch/sh/boot/dts/Makefile
@@ -1,4 +1,2 @@
# SPDX-License-Identifier: GPL-2.0-only
-ifneq ($(CONFIG_BUILTIN_DTB_SOURCE),"")
-obj-$(CONFIG_USE_BUILTIN_DTB) += $(patsubst "%",%,$(CONFIG_BUILTIN_DTB_SOURCE)).dtb.o
-endif
+obj-$(CONFIG_USE_BUILTIN_DTB) += $(addsuffix .dtb.o, $(CONFIG_BUILTIN_DTB_SOURCE))
diff --git a/arch/sh/include/asm/bitops.h b/arch/sh/include/asm/bitops.h
index 3b6c7b5b7ec9..10ceb0d6b5a9 100644
--- a/arch/sh/include/asm/bitops.h
+++ b/arch/sh/include/asm/bitops.h
@@ -68,6 +68,5 @@ static inline unsigned long __ffs(unsigned long word)
#include <asm-generic/bitops/fls64.h>
#include <asm-generic/bitops/le.h>
-#include <asm-generic/bitops/find.h>
#endif /* __ASM_SH_BITOPS_H */
diff --git a/arch/sh/kernel/cpu/sh4/sq.c b/arch/sh/kernel/cpu/sh4/sq.c
index d432164b23b7..a76b94e41e91 100644
--- a/arch/sh/kernel/cpu/sh4/sq.c
+++ b/arch/sh/kernel/cpu/sh4/sq.c
@@ -324,6 +324,7 @@ static struct attribute *sq_sysfs_attrs[] = {
&mapping_attr.attr,
NULL,
};
+ATTRIBUTE_GROUPS(sq_sysfs);
static const struct sysfs_ops sq_sysfs_ops = {
.show = sq_sysfs_show,
@@ -332,7 +333,7 @@ static const struct sysfs_ops sq_sysfs_ops = {
static struct kobj_type ktype_percpu_entry = {
.sysfs_ops = &sq_sysfs_ops,
- .default_attrs = sq_sysfs_attrs,
+ .default_groups = sq_sysfs_groups,
};
static int sq_dev_add(struct device *dev, struct subsys_interface *sif)
diff --git a/arch/sh/kernel/syscalls/syscall.tbl b/arch/sh/kernel/syscalls/syscall.tbl
index d9539d28bdaa..2de85c977f54 100644
--- a/arch/sh/kernel/syscalls/syscall.tbl
+++ b/arch/sh/kernel/syscalls/syscall.tbl
@@ -452,3 +452,4 @@
# 447 reserved for memfd_secret
448 common process_mrelease sys_process_mrelease
449 common futex_waitv sys_futex_waitv
+450 common set_mempolicy_home_node sys_set_mempolicy_home_node
diff --git a/arch/sh/kernel/traps.c b/arch/sh/kernel/traps.c
index cbe3201d4f21..01884054aeb2 100644
--- a/arch/sh/kernel/traps.c
+++ b/arch/sh/kernel/traps.c
@@ -57,7 +57,7 @@ void __noreturn die(const char *str, struct pt_regs *regs, long err)
if (panic_on_oops)
panic("Fatal exception");
- do_exit(SIGSEGV);
+ make_task_dead(SIGSEGV);
}
void die_if_kernel(const char *str, struct pt_regs *regs, long err)
diff --git a/arch/sh/mm/alignment.c b/arch/sh/mm/alignment.c
index fb517b82a87b..3a76a766f423 100644
--- a/arch/sh/mm/alignment.c
+++ b/arch/sh/mm/alignment.c
@@ -140,7 +140,7 @@ static int alignment_proc_open(struct inode *inode, struct file *file)
static ssize_t alignment_proc_write(struct file *file,
const char __user *buffer, size_t count, loff_t *pos)
{
- int *data = PDE_DATA(file_inode(file));
+ int *data = pde_data(file_inode(file));
char mode;
if (count > 0) {
@@ -161,7 +161,7 @@ static const struct proc_ops alignment_proc_ops = {
};
/*
- * This needs to be done after sysctl_init, otherwise sys/ will be
+ * This needs to be done after sysctl_init_bases(), otherwise sys/ will be
* overwritten. Actually, this shouldn't be in sys/ at all since
* it isn't a sysctl, and it doesn't contain sysctl information.
* We now locate it in /proc/cpu/alignment instead.
diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c
index 1e1aa75df3ca..e175667b1363 100644
--- a/arch/sh/mm/fault.c
+++ b/arch/sh/mm/fault.c
@@ -485,17 +485,15 @@ good_area:
if (mm_fault_error(regs, error_code, address, fault))
return;
- if (flags & FAULT_FLAG_ALLOW_RETRY) {
- if (fault & VM_FAULT_RETRY) {
- flags |= FAULT_FLAG_TRIED;
-
- /*
- * No need to mmap_read_unlock(mm) as we would
- * have already released it in __lock_page_or_retry
- * in mm/filemap.c.
- */
- goto retry;
- }
+ if (fault & VM_FAULT_RETRY) {
+ flags |= FAULT_FLAG_TRIED;
+
+ /*
+ * No need to mmap_read_unlock(mm) as we would
+ * have already released it in __lock_page_or_retry
+ * in mm/filemap.c.
+ */
+ goto retry;
}
mmap_read_unlock(mm);
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 66fc08646be5..1cab1b284f1a 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -97,6 +97,9 @@ config SPARC64
select PCI_DOMAINS if PCI
select ARCH_HAS_GIGANTIC_PAGE
select HAVE_SOFTIRQ_ON_OWN_STACK
+ select HAVE_SETUP_PER_CPU_AREA
+ select NEED_PER_CPU_EMBED_FIRST_CHUNK
+ select NEED_PER_CPU_PAGE_FIRST_CHUNK
config ARCH_PROC_KCORE_TEXT
def_bool y
@@ -123,15 +126,6 @@ config AUDIT_ARCH
bool
default y
-config HAVE_SETUP_PER_CPU_AREA
- def_bool y if SPARC64
-
-config NEED_PER_CPU_EMBED_FIRST_CHUNK
- def_bool y if SPARC64
-
-config NEED_PER_CPU_PAGE_FIRST_CHUNK
- def_bool y if SPARC64
-
config MMU
bool
default y
diff --git a/arch/sparc/include/asm/bitops_32.h b/arch/sparc/include/asm/bitops_32.h
index 0ceff3b915a8..889afa9f990f 100644
--- a/arch/sparc/include/asm/bitops_32.h
+++ b/arch/sparc/include/asm/bitops_32.h
@@ -100,7 +100,6 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
#include <asm-generic/bitops/fls64.h>
#include <asm-generic/bitops/hweight.h>
#include <asm-generic/bitops/lock.h>
-#include <asm-generic/bitops/find.h>
#include <asm-generic/bitops/le.h>
#include <asm-generic/bitops/ext2-atomic.h>
diff --git a/arch/sparc/include/asm/bitops_64.h b/arch/sparc/include/asm/bitops_64.h
index ca7ea5913494..005a8ae858f1 100644
--- a/arch/sparc/include/asm/bitops_64.h
+++ b/arch/sparc/include/asm/bitops_64.h
@@ -52,8 +52,6 @@ unsigned int __arch_hweight8(unsigned int w);
#include <asm-generic/bitops/lock.h>
#endif /* __KERNEL__ */
-#include <asm-generic/bitops/find.h>
-
#ifdef __KERNEL__
#include <asm-generic/bitops/le.h>
diff --git a/arch/sparc/kernel/led.c b/arch/sparc/kernel/led.c
index 3a66e62eb2a0..ab657b359789 100644
--- a/arch/sparc/kernel/led.c
+++ b/arch/sparc/kernel/led.c
@@ -114,18 +114,16 @@ static const struct proc_ops led_proc_ops = {
};
#endif
-static struct proc_dir_entry *led;
-
#define LED_VERSION "0.1"
static int __init led_init(void)
{
timer_setup(&led_blink_timer, led_blink, 0);
- led = proc_create("led", 0, NULL, &led_proc_ops);
- if (!led)
+#ifdef CONFIG_PROC_FS
+ if (!proc_create("led", 0, NULL, &led_proc_ops))
return -ENOMEM;
-
+#endif
printk(KERN_INFO
"led: version %s, Lars Kotthoff <metalhead@metalhead.ws>\n",
LED_VERSION);
diff --git a/arch/sparc/kernel/pci_msi.c b/arch/sparc/kernel/pci_msi.c
index fb5899cbfa51..9ed11985768e 100644
--- a/arch/sparc/kernel/pci_msi.c
+++ b/arch/sparc/kernel/pci_msi.c
@@ -146,13 +146,13 @@ static int sparc64_setup_msi_irq(unsigned int *irq_p,
msiqid = pick_msiq(pbm);
err = ops->msi_setup(pbm, msiqid, msi,
- (entry->msi_attrib.is_64 ? 1 : 0));
+ (entry->pci.msi_attrib.is_64 ? 1 : 0));
if (err)
goto out_msi_free;
pbm->msi_irq_table[msi - pbm->msi_first] = *irq_p;
- if (entry->msi_attrib.is_64) {
+ if (entry->pci.msi_attrib.is_64) {
msg.address_hi = pbm->msi64_start >> 32;
msg.address_lo = pbm->msi64_start & 0xffffffff;
} else {
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index b98a7bbe6728..a1f78e9ddaf3 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -1526,50 +1526,6 @@ void smp_send_stop(void)
smp_call_function(stop_this_cpu, NULL, 0);
}
-/**
- * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu
- * @cpu: cpu to allocate for
- * @size: size allocation in bytes
- * @align: alignment
- *
- * Allocate @size bytes aligned at @align for cpu @cpu. This wrapper
- * does the right thing for NUMA regardless of the current
- * configuration.
- *
- * RETURNS:
- * Pointer to the allocated area on success, NULL on failure.
- */
-static void * __init pcpu_alloc_bootmem(unsigned int cpu, size_t size,
- size_t align)
-{
- const unsigned long goal = __pa(MAX_DMA_ADDRESS);
-#ifdef CONFIG_NUMA
- int node = cpu_to_node(cpu);
- void *ptr;
-
- if (!node_online(node) || !NODE_DATA(node)) {
- ptr = memblock_alloc_from(size, align, goal);
- pr_info("cpu %d has no node %d or node-local memory\n",
- cpu, node);
- pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n",
- cpu, size, __pa(ptr));
- } else {
- ptr = memblock_alloc_try_nid(size, align, goal,
- MEMBLOCK_ALLOC_ACCESSIBLE, node);
- pr_debug("per cpu data for cpu%d %lu bytes on node%d at "
- "%016lx\n", cpu, size, node, __pa(ptr));
- }
- return ptr;
-#else
- return memblock_alloc_from(size, align, goal);
-#endif
-}
-
-static void __init pcpu_free_bootmem(void *ptr, size_t size)
-{
- memblock_free(ptr, size);
-}
-
static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
{
if (cpu_to_node(from) == cpu_to_node(to))
@@ -1578,57 +1534,9 @@ static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
return REMOTE_DISTANCE;
}
-static void __init pcpu_populate_pte(unsigned long addr)
+static int __init pcpu_cpu_to_node(int cpu)
{
- pgd_t *pgd = pgd_offset_k(addr);
- p4d_t *p4d;
- pud_t *pud;
- pmd_t *pmd;
-
- if (pgd_none(*pgd)) {
- pud_t *new;
-
- new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
- if (!new)
- goto err_alloc;
- pgd_populate(&init_mm, pgd, new);
- }
-
- p4d = p4d_offset(pgd, addr);
- if (p4d_none(*p4d)) {
- pud_t *new;
-
- new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
- if (!new)
- goto err_alloc;
- p4d_populate(&init_mm, p4d, new);
- }
-
- pud = pud_offset(p4d, addr);
- if (pud_none(*pud)) {
- pmd_t *new;
-
- new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
- if (!new)
- goto err_alloc;
- pud_populate(&init_mm, pud, new);
- }
-
- pmd = pmd_offset(pud, addr);
- if (!pmd_present(*pmd)) {
- pte_t *new;
-
- new = memblock_alloc_from(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
- if (!new)
- goto err_alloc;
- pmd_populate_kernel(&init_mm, pmd, new);
- }
-
- return;
-
-err_alloc:
- panic("%s: Failed to allocate %lu bytes align=%lx from=%lx\n",
- __func__, PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
+ return cpu_to_node(cpu);
}
void __init setup_per_cpu_areas(void)
@@ -1641,8 +1549,7 @@ void __init setup_per_cpu_areas(void)
rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
PERCPU_DYNAMIC_RESERVE, 4 << 20,
pcpu_cpu_distance,
- pcpu_alloc_bootmem,
- pcpu_free_bootmem);
+ pcpu_cpu_to_node);
if (rc)
pr_warn("PERCPU: %s allocator failed (%d), "
"falling back to page size\n",
@@ -1650,9 +1557,7 @@ void __init setup_per_cpu_areas(void)
}
if (rc < 0)
rc = pcpu_page_first_chunk(PERCPU_MODULE_RESERVE,
- pcpu_alloc_bootmem,
- pcpu_free_bootmem,
- pcpu_populate_pte);
+ pcpu_cpu_to_node);
if (rc < 0)
panic("cannot initialize percpu area (err=%d)", rc);
diff --git a/arch/sparc/kernel/syscalls/syscall.tbl b/arch/sparc/kernel/syscalls/syscall.tbl
index 46adabcb1720..4398cc6fb68d 100644
--- a/arch/sparc/kernel/syscalls/syscall.tbl
+++ b/arch/sparc/kernel/syscalls/syscall.tbl
@@ -495,3 +495,4 @@
# 447 reserved for memfd_secret
448 common process_mrelease sys_process_mrelease
449 common futex_waitv sys_futex_waitv
+450 common set_mempolicy_home_node sys_set_mempolicy_home_node
diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
index 5630e5a395e0..179aabfa712e 100644
--- a/arch/sparc/kernel/traps_32.c
+++ b/arch/sparc/kernel/traps_32.c
@@ -86,9 +86,7 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
}
printk("Instruction DUMP:");
instruction_dump ((unsigned long *) regs->pc);
- if(regs->psr & PSR_PS)
- do_exit(SIGKILL);
- do_exit(SIGSEGV);
+ make_task_dead((regs->psr & PSR_PS) ? SIGKILL : SIGSEGV);
}
void do_hw_interrupt(struct pt_regs *regs, unsigned long type)
diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
index 6863025ed56d..21077821f427 100644
--- a/arch/sparc/kernel/traps_64.c
+++ b/arch/sparc/kernel/traps_64.c
@@ -2559,9 +2559,7 @@ void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
}
if (panic_on_oops)
panic("Fatal exception");
- if (regs->tstate & TSTATE_PRIV)
- do_exit(SIGKILL);
- do_exit(SIGSEGV);
+ make_task_dead((regs->tstate & TSTATE_PRIV)? SIGKILL : SIGSEGV);
}
EXPORT_SYMBOL(die_if_kernel);
diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
index 90dc4ae315c8..ad569d9bd124 100644
--- a/arch/sparc/mm/fault_32.c
+++ b/arch/sparc/mm/fault_32.c
@@ -200,17 +200,15 @@ good_area:
BUG();
}
- if (flags & FAULT_FLAG_ALLOW_RETRY) {
- if (fault & VM_FAULT_RETRY) {
- flags |= FAULT_FLAG_TRIED;
+ if (fault & VM_FAULT_RETRY) {
+ flags |= FAULT_FLAG_TRIED;
- /* No need to mmap_read_unlock(mm) as we would
- * have already released it in __lock_page_or_retry
- * in mm/filemap.c.
- */
+ /* No need to mmap_read_unlock(mm) as we would
+ * have already released it in __lock_page_or_retry
+ * in mm/filemap.c.
+ */
- goto retry;
- }
+ goto retry;
}
mmap_read_unlock(mm);
diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
index 9a9652a15fed..253e07043298 100644
--- a/arch/sparc/mm/fault_64.c
+++ b/arch/sparc/mm/fault_64.c
@@ -437,17 +437,15 @@ good_area:
BUG();
}
- if (flags & FAULT_FLAG_ALLOW_RETRY) {
- if (fault & VM_FAULT_RETRY) {
- flags |= FAULT_FLAG_TRIED;
+ if (fault & VM_FAULT_RETRY) {
+ flags |= FAULT_FLAG_TRIED;
- /* No need to mmap_read_unlock(mm) as we would
- * have already released it in __lock_page_or_retry
- * in mm/filemap.c.
- */
+ /* No need to mmap_read_unlock(mm) as we would
+ * have already released it in __lock_page_or_retry
+ * in mm/filemap.c.
+ */
- goto retry;
- }
+ goto retry;
}
mmap_read_unlock(mm);
diff --git a/arch/um/drivers/virt-pci.c b/arch/um/drivers/virt-pci.c
index 0ab58016db22..5c092a9153ea 100644
--- a/arch/um/drivers/virt-pci.c
+++ b/arch/um/drivers/virt-pci.c
@@ -616,7 +616,7 @@ static void um_pci_virtio_remove(struct virtio_device *vdev)
int i;
/* Stop all virtqueues */
- vdev->config->reset(vdev);
+ virtio_reset_device(vdev);
vdev->config->del_vqs(vdev);
device_set_wakeup_enable(&vdev->dev, false);
diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c
index 561a2b03c3cf..d1d5d0be0308 100644
--- a/arch/um/kernel/trap.c
+++ b/arch/um/kernel/trap.c
@@ -87,12 +87,10 @@ good_area:
}
BUG();
}
- if (flags & FAULT_FLAG_ALLOW_RETRY) {
- if (fault & VM_FAULT_RETRY) {
- flags |= FAULT_FLAG_TRIED;
+ if (fault & VM_FAULT_RETRY) {
+ flags |= FAULT_FLAG_TRIED;
- goto retry;
- }
+ goto retry;
}
pmd = pmd_off(mm, address);
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 8910b09b5601..9f5bd41bf660 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -78,7 +78,7 @@ config X86
select ARCH_HAS_FILTER_PGPROT
select ARCH_HAS_FORTIFY_SOURCE
select ARCH_HAS_GCOV_PROFILE_ALL
- select ARCH_HAS_KCOV if X86_64 && STACK_VALIDATION
+ select ARCH_HAS_KCOV if X86_64
select ARCH_HAS_MEM_ENCRYPT
select ARCH_HAS_MEMBARRIER_SYNC_CORE
select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
@@ -104,6 +104,7 @@ config X86
select ARCH_SUPPORTS_ACPI
select ARCH_SUPPORTS_ATOMIC_RMW
select ARCH_SUPPORTS_DEBUG_PAGEALLOC
+ select ARCH_SUPPORTS_PAGE_TABLE_CHECK if X86_64
select ARCH_SUPPORTS_NUMA_BALANCING if X86_64
select ARCH_SUPPORTS_KMAP_LOCAL_FORCE_MAP if NR_CPUS <= 4096
select ARCH_SUPPORTS_LTO_CLANG
@@ -136,7 +137,6 @@ config X86
select GENERIC_CPU_VULNERABILITIES
select GENERIC_EARLY_IOREMAP
select GENERIC_ENTRY
- select GENERIC_FIND_FIRST_BIT
select GENERIC_IOMAP
select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP
select GENERIC_IRQ_MATRIX_ALLOCATOR if X86_LOCAL_APIC
@@ -186,6 +186,7 @@ config X86
select HAVE_CONTEXT_TRACKING_OFFSTACK if HAVE_CONTEXT_TRACKING
select HAVE_C_RECORDMCOUNT
select HAVE_OBJTOOL_MCOUNT if STACK_VALIDATION
+ select HAVE_BUILDTIME_MCOUNT_SORT
select HAVE_DEBUG_KMEMLEAK
select HAVE_DMA_CONTIGUOUS
select HAVE_DYNAMIC_FTRACE
@@ -239,6 +240,7 @@ config X86
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RELIABLE_STACKTRACE if X86_64 && (UNWINDER_FRAME_POINTER || UNWINDER_ORC) && STACK_VALIDATION
select HAVE_FUNCTION_ARG_ACCESS_API
+ select HAVE_SETUP_PER_CPU_AREA
select HAVE_SOFTIRQ_ON_OWN_STACK
select HAVE_STACKPROTECTOR if CC_HAS_SANE_STACKPROTECTOR
select HAVE_STACK_VALIDATION if X86_64
@@ -252,6 +254,8 @@ config X86
select HAVE_GENERIC_VDSO
select HOTPLUG_SMT if SMP
select IRQ_FORCED_THREADING
+ select NEED_PER_CPU_EMBED_FIRST_CHUNK
+ select NEED_PER_CPU_PAGE_FIRST_CHUNK
select NEED_SG_DMA_LENGTH
select PCI_DOMAINS if PCI
select PCI_LOCKLESS_CONFIG if PCI
@@ -332,15 +336,6 @@ config ARCH_HAS_CPU_RELAX
config ARCH_HAS_FILTER_PGPROT
def_bool y
-config HAVE_SETUP_PER_CPU_AREA
- def_bool y
-
-config NEED_PER_CPU_EMBED_FIRST_CHUNK
- def_bool y
-
-config NEED_PER_CPU_PAGE_FIRST_CHUNK
- def_bool y
-
config ARCH_HIBERNATION_POSSIBLE
def_bool y
@@ -473,6 +468,18 @@ config RETPOLINE
branches. Requires a compiler with -mindirect-branch=thunk-extern
support for full protection. The kernel may run slower.
+config CC_HAS_SLS
+ def_bool $(cc-option,-mharden-sls=all)
+
+config SLS
+ bool "Mitigate Straight-Line-Speculation"
+ depends on CC_HAS_SLS && X86_64
+ default n
+ help
+ Compile the kernel with straight-line-speculation options to guard
+ against straight line speculation. The kernel image might be slightly
+ larger.
+
config X86_CPU_RESCTRL
bool "x86 CPU resource control support"
depends on X86 && (CPU_SUP_INTEL || CPU_SUP_AMD)
@@ -1562,6 +1569,7 @@ config NUMA
depends on SMP
depends on X86_64 || (X86_32 && HIGHMEM64G && X86_BIGSMP)
default y if X86_BIGSMP
+ select USE_PERCPU_NUMA_NODE_ID
help
Enable NUMA (Non-Uniform Memory Access) support.
@@ -1952,7 +1960,7 @@ config EFI
config EFI_STUB
bool "EFI stub support"
- depends on EFI && !X86_USE_3DNOW
+ depends on EFI
depends on $(cc-option,-mabi=ms) || X86_32
select RELOCATABLE
help
@@ -2437,10 +2445,6 @@ config ARCH_HAS_ADD_PAGES
config ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE
def_bool y
-config USE_PERCPU_NUMA_NODE_ID
- def_bool y
- depends on NUMA
-
menu "Power management and ACPI options"
config ARCH_HIBERNATION_HEADER
diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
index eefc434351db..542377cd419d 100644
--- a/arch/x86/Kconfig.cpu
+++ b/arch/x86/Kconfig.cpu
@@ -342,10 +342,6 @@ config X86_USE_PPRO_CHECKSUM
def_bool y
depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MATOM
-config X86_USE_3DNOW
- def_bool y
- depends on (MCYRIXIII || MK7 || MGEODE_LX) && !UML
-
#
# P6_NOPs are a relatively minor optimization that require a family >=
# 6 processor, except that it is broken on certain VIA chips.
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 42243869216d..e84cdd409b64 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -12,6 +12,18 @@ else
KBUILD_DEFCONFIG := $(ARCH)_defconfig
endif
+ifdef CONFIG_CC_IS_GCC
+RETPOLINE_CFLAGS := $(call cc-option,-mindirect-branch=thunk-extern -mindirect-branch-register)
+RETPOLINE_CFLAGS += $(call cc-option,-mindirect-branch-cs-prefix)
+RETPOLINE_VDSO_CFLAGS := $(call cc-option,-mindirect-branch=thunk-inline -mindirect-branch-register)
+endif
+ifdef CONFIG_CC_IS_CLANG
+RETPOLINE_CFLAGS := -mretpoline-external-thunk
+RETPOLINE_VDSO_CFLAGS := -mretpoline
+endif
+export RETPOLINE_CFLAGS
+export RETPOLINE_VDSO_CFLAGS
+
# For gcc stack alignment is specified with -mpreferred-stack-boundary,
# clang has the option -mstack-alignment for that purpose.
ifneq ($(call cc-option, -mpreferred-stack-boundary=4),)
@@ -179,6 +191,10 @@ ifdef CONFIG_RETPOLINE
endif
endif
+ifdef CONFIG_SLS
+ KBUILD_CFLAGS += -mharden-sls=all
+endif
+
KBUILD_LDFLAGS += -m elf_$(UTS_MACHINE)
ifdef CONFIG_LTO_CLANG
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
index e11813646051..6115274fe10f 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -126,17 +126,17 @@ vmlinux.bin.all-$(CONFIG_X86_NEED_RELOCS) += $(obj)/vmlinux.relocs
$(obj)/vmlinux.bin.gz: $(vmlinux.bin.all-y) FORCE
$(call if_changed,gzip)
$(obj)/vmlinux.bin.bz2: $(vmlinux.bin.all-y) FORCE
- $(call if_changed,bzip2)
+ $(call if_changed,bzip2_with_size)
$(obj)/vmlinux.bin.lzma: $(vmlinux.bin.all-y) FORCE
- $(call if_changed,lzma)
+ $(call if_changed,lzma_with_size)
$(obj)/vmlinux.bin.xz: $(vmlinux.bin.all-y) FORCE
- $(call if_changed,xzkern)
+ $(call if_changed,xzkern_with_size)
$(obj)/vmlinux.bin.lzo: $(vmlinux.bin.all-y) FORCE
- $(call if_changed,lzo)
+ $(call if_changed,lzo_with_size)
$(obj)/vmlinux.bin.lz4: $(vmlinux.bin.all-y) FORCE
- $(call if_changed,lz4)
+ $(call if_changed,lz4_with_size)
$(obj)/vmlinux.bin.zst: $(vmlinux.bin.all-y) FORCE
- $(call if_changed,zstd22)
+ $(call if_changed,zstd22_with_size)
suffix-$(CONFIG_KERNEL_GZIP) := gz
suffix-$(CONFIG_KERNEL_BZIP2) := bz2
diff --git a/arch/x86/boot/compressed/efi_thunk_64.S b/arch/x86/boot/compressed/efi_thunk_64.S
index d05f781d54f2..67e7edcdfea8 100644
--- a/arch/x86/boot/compressed/efi_thunk_64.S
+++ b/arch/x86/boot/compressed/efi_thunk_64.S
@@ -101,7 +101,7 @@ SYM_FUNC_START(__efi64_thunk)
pop %rbx
pop %rbp
- ret
+ RET
SYM_FUNC_END(__efi64_thunk)
.code32
diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
index 572c535cf45b..fd9441f40457 100644
--- a/arch/x86/boot/compressed/head_64.S
+++ b/arch/x86/boot/compressed/head_64.S
@@ -813,7 +813,7 @@ SYM_FUNC_START(efi32_pe_entry)
2: popl %edi // restore callee-save registers
popl %ebx
leave
- ret
+ RET
SYM_FUNC_END(efi32_pe_entry)
.section ".rodata"
@@ -868,7 +868,7 @@ SYM_FUNC_START(startup32_set_idt_entry)
pop %ecx
pop %ebx
- ret
+ RET
SYM_FUNC_END(startup32_set_idt_entry)
#endif
@@ -884,7 +884,7 @@ SYM_FUNC_START(startup32_load_idt)
movl %eax, rva(boot32_idt_desc+2)(%ebp)
lidt rva(boot32_idt_desc)(%ebp)
#endif
- ret
+ RET
SYM_FUNC_END(startup32_load_idt)
/*
@@ -954,7 +954,7 @@ SYM_FUNC_START(startup32_check_sev_cbit)
popl %ebx
popl %eax
#endif
- ret
+ RET
SYM_FUNC_END(startup32_check_sev_cbit)
/*
diff --git a/arch/x86/boot/compressed/mem_encrypt.S b/arch/x86/boot/compressed/mem_encrypt.S
index c1e81a848b2a..a63424d13627 100644
--- a/arch/x86/boot/compressed/mem_encrypt.S
+++ b/arch/x86/boot/compressed/mem_encrypt.S
@@ -58,7 +58,7 @@ SYM_FUNC_START(get_sev_encryption_bit)
#endif /* CONFIG_AMD_MEM_ENCRYPT */
- ret
+ RET
SYM_FUNC_END(get_sev_encryption_bit)
/**
@@ -92,7 +92,7 @@ SYM_CODE_START_LOCAL(sev_es_req_cpuid)
/* All good - return success */
xorl %eax, %eax
1:
- ret
+ RET
2:
movl $-1, %eax
jmp 1b
@@ -221,7 +221,7 @@ SYM_FUNC_START(set_sev_encryption_mask)
#endif
xor %rax, %rax
- ret
+ RET
SYM_FUNC_END(set_sev_encryption_mask)
.data
diff --git a/arch/x86/crypto/aegis128-aesni-asm.S b/arch/x86/crypto/aegis128-aesni-asm.S
index 51d46d93efbc..b48ddebb4748 100644
--- a/arch/x86/crypto/aegis128-aesni-asm.S
+++ b/arch/x86/crypto/aegis128-aesni-asm.S
@@ -122,7 +122,7 @@ SYM_FUNC_START_LOCAL(__load_partial)
pxor T0, MSG
.Lld_partial_8:
- ret
+ RET
SYM_FUNC_END(__load_partial)
/*
@@ -180,7 +180,7 @@ SYM_FUNC_START_LOCAL(__store_partial)
mov %r10b, (%r9)
.Lst_partial_1:
- ret
+ RET
SYM_FUNC_END(__store_partial)
/*
@@ -225,7 +225,7 @@ SYM_FUNC_START(crypto_aegis128_aesni_init)
movdqu STATE4, 0x40(STATEP)
FRAME_END
- ret
+ RET
SYM_FUNC_END(crypto_aegis128_aesni_init)
/*
@@ -337,7 +337,7 @@ SYM_FUNC_START(crypto_aegis128_aesni_ad)
movdqu STATE3, 0x30(STATEP)
movdqu STATE4, 0x40(STATEP)
FRAME_END
- ret
+ RET
.Lad_out_1:
movdqu STATE4, 0x00(STATEP)
@@ -346,7 +346,7 @@ SYM_FUNC_START(crypto_aegis128_aesni_ad)
movdqu STATE2, 0x30(STATEP)
movdqu STATE3, 0x40(STATEP)
FRAME_END
- ret
+ RET
.Lad_out_2:
movdqu STATE3, 0x00(STATEP)
@@ -355,7 +355,7 @@ SYM_FUNC_START(crypto_aegis128_aesni_ad)
movdqu STATE1, 0x30(STATEP)
movdqu STATE2, 0x40(STATEP)
FRAME_END
- ret
+ RET
.Lad_out_3:
movdqu STATE2, 0x00(STATEP)
@@ -364,7 +364,7 @@ SYM_FUNC_START(crypto_aegis128_aesni_ad)
movdqu STATE0, 0x30(STATEP)
movdqu STATE1, 0x40(STATEP)
FRAME_END
- ret
+ RET
.Lad_out_4:
movdqu STATE1, 0x00(STATEP)
@@ -373,11 +373,11 @@ SYM_FUNC_START(crypto_aegis128_aesni_ad)
movdqu STATE4, 0x30(STATEP)
movdqu STATE0, 0x40(STATEP)
FRAME_END
- ret
+ RET
.Lad_out:
FRAME_END
- ret
+ RET
SYM_FUNC_END(crypto_aegis128_aesni_ad)
.macro encrypt_block a s0 s1 s2 s3 s4 i
@@ -452,7 +452,7 @@ SYM_FUNC_START(crypto_aegis128_aesni_enc)
movdqu STATE2, 0x30(STATEP)
movdqu STATE3, 0x40(STATEP)
FRAME_END
- ret
+ RET
.Lenc_out_1:
movdqu STATE3, 0x00(STATEP)
@@ -461,7 +461,7 @@ SYM_FUNC_START(crypto_aegis128_aesni_enc)
movdqu STATE1, 0x30(STATEP)
movdqu STATE2, 0x40(STATEP)
FRAME_END
- ret
+ RET
.Lenc_out_2:
movdqu STATE2, 0x00(STATEP)
@@ -470,7 +470,7 @@ SYM_FUNC_START(crypto_aegis128_aesni_enc)
movdqu STATE0, 0x30(STATEP)
movdqu STATE1, 0x40(STATEP)
FRAME_END
- ret
+ RET
.Lenc_out_3:
movdqu STATE1, 0x00(STATEP)
@@ -479,7 +479,7 @@ SYM_FUNC_START(crypto_aegis128_aesni_enc)
movdqu STATE4, 0x30(STATEP)
movdqu STATE0, 0x40(STATEP)
FRAME_END
- ret
+ RET
.Lenc_out_4:
movdqu STATE0, 0x00(STATEP)
@@ -488,11 +488,11 @@ SYM_FUNC_START(crypto_aegis128_aesni_enc)
movdqu STATE3, 0x30(STATEP)
movdqu STATE4, 0x40(STATEP)
FRAME_END
- ret
+ RET
.Lenc_out:
FRAME_END
- ret
+ RET
SYM_FUNC_END(crypto_aegis128_aesni_enc)
/*
@@ -532,7 +532,7 @@ SYM_FUNC_START(crypto_aegis128_aesni_enc_tail)
movdqu STATE3, 0x40(STATEP)
FRAME_END
- ret
+ RET
SYM_FUNC_END(crypto_aegis128_aesni_enc_tail)
.macro decrypt_block a s0 s1 s2 s3 s4 i
@@ -606,7 +606,7 @@ SYM_FUNC_START(crypto_aegis128_aesni_dec)
movdqu STATE2, 0x30(STATEP)
movdqu STATE3, 0x40(STATEP)
FRAME_END
- ret
+ RET
.Ldec_out_1:
movdqu STATE3, 0x00(STATEP)
@@ -615,7 +615,7 @@ SYM_FUNC_START(crypto_aegis128_aesni_dec)
movdqu STATE1, 0x30(STATEP)
movdqu STATE2, 0x40(STATEP)
FRAME_END
- ret
+ RET
.Ldec_out_2:
movdqu STATE2, 0x00(STATEP)
@@ -624,7 +624,7 @@ SYM_FUNC_START(crypto_aegis128_aesni_dec)
movdqu STATE0, 0x30(STATEP)
movdqu STATE1, 0x40(STATEP)
FRAME_END
- ret
+ RET
.Ldec_out_3:
movdqu STATE1, 0x00(STATEP)
@@ -633,7 +633,7 @@ SYM_FUNC_START(crypto_aegis128_aesni_dec)
movdqu STATE4, 0x30(STATEP)
movdqu STATE0, 0x40(STATEP)
FRAME_END
- ret
+ RET
.Ldec_out_4:
movdqu STATE0, 0x00(STATEP)
@@ -642,11 +642,11 @@ SYM_FUNC_START(crypto_aegis128_aesni_dec)
movdqu STATE3, 0x30(STATEP)
movdqu STATE4, 0x40(STATEP)
FRAME_END
- ret
+ RET
.Ldec_out:
FRAME_END
- ret
+ RET
SYM_FUNC_END(crypto_aegis128_aesni_dec)
/*
@@ -696,7 +696,7 @@ SYM_FUNC_START(crypto_aegis128_aesni_dec_tail)
movdqu STATE3, 0x40(STATEP)
FRAME_END
- ret
+ RET
SYM_FUNC_END(crypto_aegis128_aesni_dec_tail)
/*
@@ -743,5 +743,5 @@ SYM_FUNC_START(crypto_aegis128_aesni_final)
movdqu MSG, (%rsi)
FRAME_END
- ret
+ RET
SYM_FUNC_END(crypto_aegis128_aesni_final)
diff --git a/arch/x86/crypto/aes_ctrby8_avx-x86_64.S b/arch/x86/crypto/aes_ctrby8_avx-x86_64.S
index 3f0fc7dd87d7..c799838242a6 100644
--- a/arch/x86/crypto/aes_ctrby8_avx-x86_64.S
+++ b/arch/x86/crypto/aes_ctrby8_avx-x86_64.S
@@ -525,7 +525,7 @@ ddq_add_8:
/* return updated IV */
vpshufb xbyteswap, xcounter, xcounter
vmovdqu xcounter, (p_iv)
- ret
+ RET
.endm
/*
diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
index 4e3972570916..363699dd7220 100644
--- a/arch/x86/crypto/aesni-intel_asm.S
+++ b/arch/x86/crypto/aesni-intel_asm.S
@@ -1594,7 +1594,7 @@ SYM_FUNC_START(aesni_gcm_dec)
GCM_ENC_DEC dec
GCM_COMPLETE arg10, arg11
FUNC_RESTORE
- ret
+ RET
SYM_FUNC_END(aesni_gcm_dec)
@@ -1683,7 +1683,7 @@ SYM_FUNC_START(aesni_gcm_enc)
GCM_COMPLETE arg10, arg11
FUNC_RESTORE
- ret
+ RET
SYM_FUNC_END(aesni_gcm_enc)
/*****************************************************************************
@@ -1701,7 +1701,7 @@ SYM_FUNC_START(aesni_gcm_init)
FUNC_SAVE
GCM_INIT %arg3, %arg4,%arg5, %arg6
FUNC_RESTORE
- ret
+ RET
SYM_FUNC_END(aesni_gcm_init)
/*****************************************************************************
@@ -1716,7 +1716,7 @@ SYM_FUNC_START(aesni_gcm_enc_update)
FUNC_SAVE
GCM_ENC_DEC enc
FUNC_RESTORE
- ret
+ RET
SYM_FUNC_END(aesni_gcm_enc_update)
/*****************************************************************************
@@ -1731,7 +1731,7 @@ SYM_FUNC_START(aesni_gcm_dec_update)
FUNC_SAVE
GCM_ENC_DEC dec
FUNC_RESTORE
- ret
+ RET
SYM_FUNC_END(aesni_gcm_dec_update)
/*****************************************************************************
@@ -1746,7 +1746,7 @@ SYM_FUNC_START(aesni_gcm_finalize)
FUNC_SAVE
GCM_COMPLETE %arg3 %arg4
FUNC_RESTORE
- ret
+ RET
SYM_FUNC_END(aesni_gcm_finalize)
#endif
@@ -1762,7 +1762,7 @@ SYM_FUNC_START_LOCAL(_key_expansion_256a)
pxor %xmm1, %xmm0
movaps %xmm0, (TKEYP)
add $0x10, TKEYP
- ret
+ RET
SYM_FUNC_END(_key_expansion_256a)
SYM_FUNC_END_ALIAS(_key_expansion_128)
@@ -1787,7 +1787,7 @@ SYM_FUNC_START_LOCAL(_key_expansion_192a)
shufps $0b01001110, %xmm2, %xmm1
movaps %xmm1, 0x10(TKEYP)
add $0x20, TKEYP
- ret
+ RET
SYM_FUNC_END(_key_expansion_192a)
SYM_FUNC_START_LOCAL(_key_expansion_192b)
@@ -1806,7 +1806,7 @@ SYM_FUNC_START_LOCAL(_key_expansion_192b)
movaps %xmm0, (TKEYP)
add $0x10, TKEYP
- ret
+ RET
SYM_FUNC_END(_key_expansion_192b)
SYM_FUNC_START_LOCAL(_key_expansion_256b)
@@ -1818,7 +1818,7 @@ SYM_FUNC_START_LOCAL(_key_expansion_256b)
pxor %xmm1, %xmm2
movaps %xmm2, (TKEYP)
add $0x10, TKEYP
- ret
+ RET
SYM_FUNC_END(_key_expansion_256b)
/*
@@ -1933,7 +1933,7 @@ SYM_FUNC_START(aesni_set_key)
popl KEYP
#endif
FRAME_END
- ret
+ RET
SYM_FUNC_END(aesni_set_key)
/*
@@ -1957,7 +1957,7 @@ SYM_FUNC_START(aesni_enc)
popl KEYP
#endif
FRAME_END
- ret
+ RET
SYM_FUNC_END(aesni_enc)
/*
@@ -2014,7 +2014,7 @@ SYM_FUNC_START_LOCAL(_aesni_enc1)
aesenc KEY, STATE
movaps 0x70(TKEYP), KEY
aesenclast KEY, STATE
- ret
+ RET
SYM_FUNC_END(_aesni_enc1)
/*
@@ -2122,7 +2122,7 @@ SYM_FUNC_START_LOCAL(_aesni_enc4)
aesenclast KEY, STATE2
aesenclast KEY, STATE3
aesenclast KEY, STATE4
- ret
+ RET
SYM_FUNC_END(_aesni_enc4)
/*
@@ -2147,7 +2147,7 @@ SYM_FUNC_START(aesni_dec)
popl KEYP
#endif
FRAME_END
- ret
+ RET
SYM_FUNC_END(aesni_dec)
/*
@@ -2204,7 +2204,7 @@ SYM_FUNC_START_LOCAL(_aesni_dec1)
aesdec KEY, STATE
movaps 0x70(TKEYP), KEY
aesdeclast KEY, STATE
- ret
+ RET
SYM_FUNC_END(_aesni_dec1)
/*
@@ -2312,7 +2312,7 @@ SYM_FUNC_START_LOCAL(_aesni_dec4)
aesdeclast KEY, STATE2
aesdeclast KEY, STATE3
aesdeclast KEY, STATE4
- ret
+ RET
SYM_FUNC_END(_aesni_dec4)
/*
@@ -2372,7 +2372,7 @@ SYM_FUNC_START(aesni_ecb_enc)
popl LEN
#endif
FRAME_END
- ret
+ RET
SYM_FUNC_END(aesni_ecb_enc)
/*
@@ -2433,7 +2433,7 @@ SYM_FUNC_START(aesni_ecb_dec)
popl LEN
#endif
FRAME_END
- ret
+ RET
SYM_FUNC_END(aesni_ecb_dec)
/*
@@ -2477,7 +2477,7 @@ SYM_FUNC_START(aesni_cbc_enc)
popl IVP
#endif
FRAME_END
- ret
+ RET
SYM_FUNC_END(aesni_cbc_enc)
/*
@@ -2570,7 +2570,7 @@ SYM_FUNC_START(aesni_cbc_dec)
popl IVP
#endif
FRAME_END
- ret
+ RET
SYM_FUNC_END(aesni_cbc_dec)
/*
@@ -2627,7 +2627,7 @@ SYM_FUNC_START(aesni_cts_cbc_enc)
popl IVP
#endif
FRAME_END
- ret
+ RET
SYM_FUNC_END(aesni_cts_cbc_enc)
/*
@@ -2688,7 +2688,7 @@ SYM_FUNC_START(aesni_cts_cbc_dec)
popl IVP
#endif
FRAME_END
- ret
+ RET
SYM_FUNC_END(aesni_cts_cbc_dec)
.pushsection .rodata
@@ -2725,7 +2725,7 @@ SYM_FUNC_START_LOCAL(_aesni_inc_init)
mov $1, TCTR_LOW
movq TCTR_LOW, INC
movq CTR, TCTR_LOW
- ret
+ RET
SYM_FUNC_END(_aesni_inc_init)
/*
@@ -2753,7 +2753,7 @@ SYM_FUNC_START_LOCAL(_aesni_inc)
.Linc_low:
movaps CTR, IV
pshufb BSWAP_MASK, IV
- ret
+ RET
SYM_FUNC_END(_aesni_inc)
/*
@@ -2816,7 +2816,7 @@ SYM_FUNC_START(aesni_ctr_enc)
movups IV, (IVP)
.Lctr_enc_just_ret:
FRAME_END
- ret
+ RET
SYM_FUNC_END(aesni_ctr_enc)
#endif
@@ -2932,7 +2932,7 @@ SYM_FUNC_START(aesni_xts_encrypt)
popl IVP
#endif
FRAME_END
- ret
+ RET
.Lxts_enc_1x:
add $64, LEN
@@ -3092,7 +3092,7 @@ SYM_FUNC_START(aesni_xts_decrypt)
popl IVP
#endif
FRAME_END
- ret
+ RET
.Lxts_dec_1x:
add $64, LEN
diff --git a/arch/x86/crypto/aesni-intel_avx-x86_64.S b/arch/x86/crypto/aesni-intel_avx-x86_64.S
index 98e3552b6e03..0852ab573fd3 100644
--- a/arch/x86/crypto/aesni-intel_avx-x86_64.S
+++ b/arch/x86/crypto/aesni-intel_avx-x86_64.S
@@ -1767,7 +1767,7 @@ SYM_FUNC_START(aesni_gcm_init_avx_gen2)
FUNC_SAVE
INIT GHASH_MUL_AVX, PRECOMPUTE_AVX
FUNC_RESTORE
- ret
+ RET
SYM_FUNC_END(aesni_gcm_init_avx_gen2)
###############################################################################
@@ -1788,15 +1788,15 @@ SYM_FUNC_START(aesni_gcm_enc_update_avx_gen2)
# must be 192
GCM_ENC_DEC INITIAL_BLOCKS_AVX, GHASH_8_ENCRYPT_8_PARALLEL_AVX, GHASH_LAST_8_AVX, GHASH_MUL_AVX, ENC, 11
FUNC_RESTORE
- ret
+ RET
key_128_enc_update:
GCM_ENC_DEC INITIAL_BLOCKS_AVX, GHASH_8_ENCRYPT_8_PARALLEL_AVX, GHASH_LAST_8_AVX, GHASH_MUL_AVX, ENC, 9
FUNC_RESTORE
- ret
+ RET
key_256_enc_update:
GCM_ENC_DEC INITIAL_BLOCKS_AVX, GHASH_8_ENCRYPT_8_PARALLEL_AVX, GHASH_LAST_8_AVX, GHASH_MUL_AVX, ENC, 13
FUNC_RESTORE
- ret
+ RET
SYM_FUNC_END(aesni_gcm_enc_update_avx_gen2)
###############################################################################
@@ -1817,15 +1817,15 @@ SYM_FUNC_START(aesni_gcm_dec_update_avx_gen2)
# must be 192
GCM_ENC_DEC INITIAL_BLOCKS_AVX, GHASH_8_ENCRYPT_8_PARALLEL_AVX, GHASH_LAST_8_AVX, GHASH_MUL_AVX, DEC, 11
FUNC_RESTORE
- ret
+ RET
key_128_dec_update:
GCM_ENC_DEC INITIAL_BLOCKS_AVX, GHASH_8_ENCRYPT_8_PARALLEL_AVX, GHASH_LAST_8_AVX, GHASH_MUL_AVX, DEC, 9
FUNC_RESTORE
- ret
+ RET
key_256_dec_update:
GCM_ENC_DEC INITIAL_BLOCKS_AVX, GHASH_8_ENCRYPT_8_PARALLEL_AVX, GHASH_LAST_8_AVX, GHASH_MUL_AVX, DEC, 13
FUNC_RESTORE
- ret
+ RET
SYM_FUNC_END(aesni_gcm_dec_update_avx_gen2)
###############################################################################
@@ -1846,15 +1846,15 @@ SYM_FUNC_START(aesni_gcm_finalize_avx_gen2)
# must be 192
GCM_COMPLETE GHASH_MUL_AVX, 11, arg3, arg4
FUNC_RESTORE
- ret
+ RET
key_128_finalize:
GCM_COMPLETE GHASH_MUL_AVX, 9, arg3, arg4
FUNC_RESTORE
- ret
+ RET
key_256_finalize:
GCM_COMPLETE GHASH_MUL_AVX, 13, arg3, arg4
FUNC_RESTORE
- ret
+ RET
SYM_FUNC_END(aesni_gcm_finalize_avx_gen2)
###############################################################################
@@ -2735,7 +2735,7 @@ SYM_FUNC_START(aesni_gcm_init_avx_gen4)
FUNC_SAVE
INIT GHASH_MUL_AVX2, PRECOMPUTE_AVX2
FUNC_RESTORE
- ret
+ RET
SYM_FUNC_END(aesni_gcm_init_avx_gen4)
###############################################################################
@@ -2756,15 +2756,15 @@ SYM_FUNC_START(aesni_gcm_enc_update_avx_gen4)
# must be 192
GCM_ENC_DEC INITIAL_BLOCKS_AVX2, GHASH_8_ENCRYPT_8_PARALLEL_AVX2, GHASH_LAST_8_AVX2, GHASH_MUL_AVX2, ENC, 11
FUNC_RESTORE
- ret
+ RET
key_128_enc_update4:
GCM_ENC_DEC INITIAL_BLOCKS_AVX2, GHASH_8_ENCRYPT_8_PARALLEL_AVX2, GHASH_LAST_8_AVX2, GHASH_MUL_AVX2, ENC, 9
FUNC_RESTORE
- ret
+ RET
key_256_enc_update4:
GCM_ENC_DEC INITIAL_BLOCKS_AVX2, GHASH_8_ENCRYPT_8_PARALLEL_AVX2, GHASH_LAST_8_AVX2, GHASH_MUL_AVX2, ENC, 13
FUNC_RESTORE
- ret
+ RET
SYM_FUNC_END(aesni_gcm_enc_update_avx_gen4)
###############################################################################
@@ -2785,15 +2785,15 @@ SYM_FUNC_START(aesni_gcm_dec_update_avx_gen4)
# must be 192
GCM_ENC_DEC INITIAL_BLOCKS_AVX2, GHASH_8_ENCRYPT_8_PARALLEL_AVX2, GHASH_LAST_8_AVX2, GHASH_MUL_AVX2, DEC, 11
FUNC_RESTORE
- ret
+ RET
key_128_dec_update4:
GCM_ENC_DEC INITIAL_BLOCKS_AVX2, GHASH_8_ENCRYPT_8_PARALLEL_AVX2, GHASH_LAST_8_AVX2, GHASH_MUL_AVX2, DEC, 9
FUNC_RESTORE
- ret
+ RET
key_256_dec_update4:
GCM_ENC_DEC INITIAL_BLOCKS_AVX2, GHASH_8_ENCRYPT_8_PARALLEL_AVX2, GHASH_LAST_8_AVX2, GHASH_MUL_AVX2, DEC, 13
FUNC_RESTORE
- ret
+ RET
SYM_FUNC_END(aesni_gcm_dec_update_avx_gen4)
###############################################################################
@@ -2814,13 +2814,13 @@ SYM_FUNC_START(aesni_gcm_finalize_avx_gen4)
# must be 192
GCM_COMPLETE GHASH_MUL_AVX2, 11, arg3, arg4
FUNC_RESTORE
- ret
+ RET
key_128_finalize4:
GCM_COMPLETE GHASH_MUL_AVX2, 9, arg3, arg4
FUNC_RESTORE
- ret
+ RET
key_256_finalize4:
GCM_COMPLETE GHASH_MUL_AVX2, 13, arg3, arg4
FUNC_RESTORE
- ret
+ RET
SYM_FUNC_END(aesni_gcm_finalize_avx_gen4)
diff --git a/arch/x86/crypto/blake2s-core.S b/arch/x86/crypto/blake2s-core.S
index 2ca79974f819..b50b35ff1fdb 100644
--- a/arch/x86/crypto/blake2s-core.S
+++ b/arch/x86/crypto/blake2s-core.S
@@ -171,7 +171,7 @@ SYM_FUNC_START(blake2s_compress_ssse3)
movdqu %xmm1,0x10(%rdi)
movdqu %xmm14,0x20(%rdi)
.Lendofloop:
- ret
+ RET
SYM_FUNC_END(blake2s_compress_ssse3)
#ifdef CONFIG_AS_AVX512
@@ -251,6 +251,6 @@ SYM_FUNC_START(blake2s_compress_avx512)
vmovdqu %xmm1,0x10(%rdi)
vmovdqu %xmm4,0x20(%rdi)
vzeroupper
- retq
+ RET
SYM_FUNC_END(blake2s_compress_avx512)
#endif /* CONFIG_AS_AVX512 */
diff --git a/arch/x86/crypto/blake2s-shash.c b/arch/x86/crypto/blake2s-shash.c
index f9e2fecdb761..59ae28abe35c 100644
--- a/arch/x86/crypto/blake2s-shash.c
+++ b/arch/x86/crypto/blake2s-shash.c
@@ -18,12 +18,12 @@
static int crypto_blake2s_update_x86(struct shash_desc *desc,
const u8 *in, unsigned int inlen)
{
- return crypto_blake2s_update(desc, in, inlen, blake2s_compress);
+ return crypto_blake2s_update(desc, in, inlen, false);
}
static int crypto_blake2s_final_x86(struct shash_desc *desc, u8 *out)
{
- return crypto_blake2s_final(desc, out, blake2s_compress);
+ return crypto_blake2s_final(desc, out, false);
}
#define BLAKE2S_ALG(name, driver_name, digest_size) \
diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
index 4222ac6d6584..802d71582689 100644
--- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
+++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
@@ -135,10 +135,10 @@ SYM_FUNC_START(__blowfish_enc_blk)
jnz .L__enc_xor;
write_block();
- ret;
+ RET;
.L__enc_xor:
xor_block();
- ret;
+ RET;
SYM_FUNC_END(__blowfish_enc_blk)
SYM_FUNC_START(blowfish_dec_blk)
@@ -170,7 +170,7 @@ SYM_FUNC_START(blowfish_dec_blk)
movq %r11, %r12;
- ret;
+ RET;
SYM_FUNC_END(blowfish_dec_blk)
/**********************************************************************
@@ -322,14 +322,14 @@ SYM_FUNC_START(__blowfish_enc_blk_4way)
popq %rbx;
popq %r12;
- ret;
+ RET;
.L__enc_xor4:
xor_block4();
popq %rbx;
popq %r12;
- ret;
+ RET;
SYM_FUNC_END(__blowfish_enc_blk_4way)
SYM_FUNC_START(blowfish_dec_blk_4way)
@@ -364,5 +364,5 @@ SYM_FUNC_START(blowfish_dec_blk_4way)
popq %rbx;
popq %r12;
- ret;
+ RET;
SYM_FUNC_END(blowfish_dec_blk_4way)
diff --git a/arch/x86/crypto/camellia-aesni-avx-asm_64.S b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
index e2a0e0f4bf9d..2e1658ddbe1a 100644
--- a/arch/x86/crypto/camellia-aesni-avx-asm_64.S
+++ b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
@@ -192,7 +192,7 @@ SYM_FUNC_START_LOCAL(roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_c
roundsm16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
%xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15,
%rcx, (%r9));
- ret;
+ RET;
SYM_FUNC_END(roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
.align 8
@@ -200,7 +200,7 @@ SYM_FUNC_START_LOCAL(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_a
roundsm16(%xmm4, %xmm5, %xmm6, %xmm7, %xmm0, %xmm1, %xmm2, %xmm3,
%xmm12, %xmm13, %xmm14, %xmm15, %xmm8, %xmm9, %xmm10, %xmm11,
%rax, (%r9));
- ret;
+ RET;
SYM_FUNC_END(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
/*
@@ -778,7 +778,7 @@ SYM_FUNC_START_LOCAL(__camellia_enc_blk16)
%xmm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 16(%rax));
FRAME_END
- ret;
+ RET;
.align 8
.Lenc_max32:
@@ -865,7 +865,7 @@ SYM_FUNC_START_LOCAL(__camellia_dec_blk16)
%xmm15, (key_table)(CTX), (%rax), 1 * 16(%rax));
FRAME_END
- ret;
+ RET;
.align 8
.Ldec_max32:
@@ -906,7 +906,7 @@ SYM_FUNC_START(camellia_ecb_enc_16way)
%xmm8, %rsi);
FRAME_END
- ret;
+ RET;
SYM_FUNC_END(camellia_ecb_enc_16way)
SYM_FUNC_START(camellia_ecb_dec_16way)
@@ -936,7 +936,7 @@ SYM_FUNC_START(camellia_ecb_dec_16way)
%xmm8, %rsi);
FRAME_END
- ret;
+ RET;
SYM_FUNC_END(camellia_ecb_dec_16way)
SYM_FUNC_START(camellia_cbc_dec_16way)
@@ -987,5 +987,5 @@ SYM_FUNC_START(camellia_cbc_dec_16way)
%xmm8, %rsi);
FRAME_END
- ret;
+ RET;
SYM_FUNC_END(camellia_cbc_dec_16way)
diff --git a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
index 706f70829a07..0e4e9abbf4de 100644
--- a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
+++ b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
@@ -226,7 +226,7 @@ SYM_FUNC_START_LOCAL(roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_c
roundsm32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
%ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15,
%rcx, (%r9));
- ret;
+ RET;
SYM_FUNC_END(roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
.align 8
@@ -234,7 +234,7 @@ SYM_FUNC_START_LOCAL(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_a
roundsm32(%ymm4, %ymm5, %ymm6, %ymm7, %ymm0, %ymm1, %ymm2, %ymm3,
%ymm12, %ymm13, %ymm14, %ymm15, %ymm8, %ymm9, %ymm10, %ymm11,
%rax, (%r9));
- ret;
+ RET;
SYM_FUNC_END(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
/*
@@ -814,7 +814,7 @@ SYM_FUNC_START_LOCAL(__camellia_enc_blk32)
%ymm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 32(%rax));
FRAME_END
- ret;
+ RET;
.align 8
.Lenc_max32:
@@ -901,7 +901,7 @@ SYM_FUNC_START_LOCAL(__camellia_dec_blk32)
%ymm15, (key_table)(CTX), (%rax), 1 * 32(%rax));
FRAME_END
- ret;
+ RET;
.align 8
.Ldec_max32:
@@ -946,7 +946,7 @@ SYM_FUNC_START(camellia_ecb_enc_32way)
vzeroupper;
FRAME_END
- ret;
+ RET;
SYM_FUNC_END(camellia_ecb_enc_32way)
SYM_FUNC_START(camellia_ecb_dec_32way)
@@ -980,7 +980,7 @@ SYM_FUNC_START(camellia_ecb_dec_32way)
vzeroupper;
FRAME_END
- ret;
+ RET;
SYM_FUNC_END(camellia_ecb_dec_32way)
SYM_FUNC_START(camellia_cbc_dec_32way)
@@ -1047,5 +1047,5 @@ SYM_FUNC_START(camellia_cbc_dec_32way)
addq $(16 * 32), %rsp;
FRAME_END
- ret;
+ RET;
SYM_FUNC_END(camellia_cbc_dec_32way)
diff --git a/arch/x86/crypto/camellia-x86_64-asm_64.S b/arch/x86/crypto/camellia-x86_64-asm_64.S
index 1372e6408850..347c059f5940 100644
--- a/arch/x86/crypto/camellia-x86_64-asm_64.S
+++ b/arch/x86/crypto/camellia-x86_64-asm_64.S
@@ -213,13 +213,13 @@ SYM_FUNC_START(__camellia_enc_blk)
enc_outunpack(mov, RT1);
movq RR12, %r12;
- ret;
+ RET;
.L__enc_xor:
enc_outunpack(xor, RT1);
movq RR12, %r12;
- ret;
+ RET;
SYM_FUNC_END(__camellia_enc_blk)
SYM_FUNC_START(camellia_dec_blk)
@@ -257,7 +257,7 @@ SYM_FUNC_START(camellia_dec_blk)
dec_outunpack();
movq RR12, %r12;
- ret;
+ RET;
SYM_FUNC_END(camellia_dec_blk)
/**********************************************************************
@@ -448,14 +448,14 @@ SYM_FUNC_START(__camellia_enc_blk_2way)
movq RR12, %r12;
popq %rbx;
- ret;
+ RET;
.L__enc2_xor:
enc_outunpack2(xor, RT2);
movq RR12, %r12;
popq %rbx;
- ret;
+ RET;
SYM_FUNC_END(__camellia_enc_blk_2way)
SYM_FUNC_START(camellia_dec_blk_2way)
@@ -495,5 +495,5 @@ SYM_FUNC_START(camellia_dec_blk_2way)
movq RR12, %r12;
movq RXOR, %rbx;
- ret;
+ RET;
SYM_FUNC_END(camellia_dec_blk_2way)
diff --git a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
index 8a6181b08b59..b258af420c92 100644
--- a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
+++ b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
@@ -279,7 +279,7 @@ SYM_FUNC_START_LOCAL(__cast5_enc_blk16)
outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
- ret;
+ RET;
SYM_FUNC_END(__cast5_enc_blk16)
.align 16
@@ -352,7 +352,7 @@ SYM_FUNC_START_LOCAL(__cast5_dec_blk16)
outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
- ret;
+ RET;
.L__skip_dec:
vpsrldq $4, RKR, RKR;
@@ -393,7 +393,7 @@ SYM_FUNC_START(cast5_ecb_enc_16way)
popq %r15;
FRAME_END
- ret;
+ RET;
SYM_FUNC_END(cast5_ecb_enc_16way)
SYM_FUNC_START(cast5_ecb_dec_16way)
@@ -431,7 +431,7 @@ SYM_FUNC_START(cast5_ecb_dec_16way)
popq %r15;
FRAME_END
- ret;
+ RET;
SYM_FUNC_END(cast5_ecb_dec_16way)
SYM_FUNC_START(cast5_cbc_dec_16way)
@@ -483,7 +483,7 @@ SYM_FUNC_START(cast5_cbc_dec_16way)
popq %r15;
popq %r12;
FRAME_END
- ret;
+ RET;
SYM_FUNC_END(cast5_cbc_dec_16way)
SYM_FUNC_START(cast5_ctr_16way)
@@ -559,5 +559,5 @@ SYM_FUNC_START(cast5_ctr_16way)
popq %r15;
popq %r12;
FRAME_END
- ret;
+ RET;
SYM_FUNC_END(cast5_ctr_16way)
diff --git a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
index fbddcecc3e3f..82b716fd5dba 100644
--- a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
+++ b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
@@ -289,7 +289,7 @@ SYM_FUNC_START_LOCAL(__cast6_enc_blk8)
outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
- ret;
+ RET;
SYM_FUNC_END(__cast6_enc_blk8)
.align 8
@@ -336,7 +336,7 @@ SYM_FUNC_START_LOCAL(__cast6_dec_blk8)
outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
- ret;
+ RET;
SYM_FUNC_END(__cast6_dec_blk8)
SYM_FUNC_START(cast6_ecb_enc_8way)
@@ -359,7 +359,7 @@ SYM_FUNC_START(cast6_ecb_enc_8way)
popq %r15;
FRAME_END
- ret;
+ RET;
SYM_FUNC_END(cast6_ecb_enc_8way)
SYM_FUNC_START(cast6_ecb_dec_8way)
@@ -382,7 +382,7 @@ SYM_FUNC_START(cast6_ecb_dec_8way)
popq %r15;
FRAME_END
- ret;
+ RET;
SYM_FUNC_END(cast6_ecb_dec_8way)
SYM_FUNC_START(cast6_cbc_dec_8way)
@@ -408,5 +408,5 @@ SYM_FUNC_START(cast6_cbc_dec_8way)
popq %r15;
popq %r12;
FRAME_END
- ret;
+ RET;
SYM_FUNC_END(cast6_cbc_dec_8way)
diff --git a/arch/x86/crypto/chacha-avx2-x86_64.S b/arch/x86/crypto/chacha-avx2-x86_64.S
index ee9a40ab4109..f3d8fc018249 100644
--- a/arch/x86/crypto/chacha-avx2-x86_64.S
+++ b/arch/x86/crypto/chacha-avx2-x86_64.S
@@ -193,7 +193,7 @@ SYM_FUNC_START(chacha_2block_xor_avx2)
.Ldone2:
vzeroupper
- ret
+ RET
.Lxorpart2:
# xor remaining bytes from partial register into output
@@ -498,7 +498,7 @@ SYM_FUNC_START(chacha_4block_xor_avx2)
.Ldone4:
vzeroupper
- ret
+ RET
.Lxorpart4:
# xor remaining bytes from partial register into output
@@ -992,7 +992,7 @@ SYM_FUNC_START(chacha_8block_xor_avx2)
.Ldone8:
vzeroupper
lea -8(%r10),%rsp
- ret
+ RET
.Lxorpart8:
# xor remaining bytes from partial register into output
diff --git a/arch/x86/crypto/chacha-avx512vl-x86_64.S b/arch/x86/crypto/chacha-avx512vl-x86_64.S
index bb193fde123a..946f74dd6fba 100644
--- a/arch/x86/crypto/chacha-avx512vl-x86_64.S
+++ b/arch/x86/crypto/chacha-avx512vl-x86_64.S
@@ -166,7 +166,7 @@ SYM_FUNC_START(chacha_2block_xor_avx512vl)
.Ldone2:
vzeroupper
- ret
+ RET
.Lxorpart2:
# xor remaining bytes from partial register into output
@@ -432,7 +432,7 @@ SYM_FUNC_START(chacha_4block_xor_avx512vl)
.Ldone4:
vzeroupper
- ret
+ RET
.Lxorpart4:
# xor remaining bytes from partial register into output
@@ -812,7 +812,7 @@ SYM_FUNC_START(chacha_8block_xor_avx512vl)
.Ldone8:
vzeroupper
- ret
+ RET
.Lxorpart8:
# xor remaining bytes from partial register into output
diff --git a/arch/x86/crypto/chacha-ssse3-x86_64.S b/arch/x86/crypto/chacha-ssse3-x86_64.S
index ca1788bfee16..7111949cd5b9 100644
--- a/arch/x86/crypto/chacha-ssse3-x86_64.S
+++ b/arch/x86/crypto/chacha-ssse3-x86_64.S
@@ -108,7 +108,7 @@ SYM_FUNC_START_LOCAL(chacha_permute)
sub $2,%r8d
jnz .Ldoubleround
- ret
+ RET
SYM_FUNC_END(chacha_permute)
SYM_FUNC_START(chacha_block_xor_ssse3)
@@ -166,7 +166,7 @@ SYM_FUNC_START(chacha_block_xor_ssse3)
.Ldone:
FRAME_END
- ret
+ RET
.Lxorpart:
# xor remaining bytes from partial register into output
@@ -217,7 +217,7 @@ SYM_FUNC_START(hchacha_block_ssse3)
movdqu %xmm3,0x10(%rsi)
FRAME_END
- ret
+ RET
SYM_FUNC_END(hchacha_block_ssse3)
SYM_FUNC_START(chacha_4block_xor_ssse3)
@@ -762,7 +762,7 @@ SYM_FUNC_START(chacha_4block_xor_ssse3)
.Ldone4:
lea -8(%r10),%rsp
- ret
+ RET
.Lxorpart4:
# xor remaining bytes from partial register into output
diff --git a/arch/x86/crypto/crc32-pclmul_asm.S b/arch/x86/crypto/crc32-pclmul_asm.S
index 6e7d4c4d3208..c392a6edbfff 100644
--- a/arch/x86/crypto/crc32-pclmul_asm.S
+++ b/arch/x86/crypto/crc32-pclmul_asm.S
@@ -236,5 +236,5 @@ fold_64:
pxor %xmm2, %xmm1
pextrd $0x01, %xmm1, %eax
- ret
+ RET
SYM_FUNC_END(crc32_pclmul_le_16)
diff --git a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
index ac1f303eed0f..80c0d22fc42c 100644
--- a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
+++ b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
@@ -306,7 +306,7 @@ do_return:
popq %rsi
popq %rdi
popq %rbx
- ret
+ RET
SYM_FUNC_END(crc_pcl)
.section .rodata, "a", @progbits
diff --git a/arch/x86/crypto/crct10dif-pcl-asm_64.S b/arch/x86/crypto/crct10dif-pcl-asm_64.S
index b2533d63030e..721474abfb71 100644
--- a/arch/x86/crypto/crct10dif-pcl-asm_64.S
+++ b/arch/x86/crypto/crct10dif-pcl-asm_64.S
@@ -257,7 +257,7 @@ SYM_FUNC_START(crc_t10dif_pcl)
# Final CRC value (x^16 * M(x)) mod G(x) is in low 16 bits of xmm0.
pextrw $0, %xmm0, %eax
- ret
+ RET
.align 16
.Lless_than_256_bytes:
diff --git a/arch/x86/crypto/des3_ede-asm_64.S b/arch/x86/crypto/des3_ede-asm_64.S
index fac0fdc3f25d..f4c760f4cade 100644
--- a/arch/x86/crypto/des3_ede-asm_64.S
+++ b/arch/x86/crypto/des3_ede-asm_64.S
@@ -243,7 +243,7 @@ SYM_FUNC_START(des3_ede_x86_64_crypt_blk)
popq %r12;
popq %rbx;
- ret;
+ RET;
SYM_FUNC_END(des3_ede_x86_64_crypt_blk)
/***********************************************************************
@@ -528,7 +528,7 @@ SYM_FUNC_START(des3_ede_x86_64_crypt_blk_3way)
popq %r12;
popq %rbx;
- ret;
+ RET;
SYM_FUNC_END(des3_ede_x86_64_crypt_blk_3way)
.section .rodata, "a", @progbits
diff --git a/arch/x86/crypto/ghash-clmulni-intel_asm.S b/arch/x86/crypto/ghash-clmulni-intel_asm.S
index 99ac25e18e09..2bf871899920 100644
--- a/arch/x86/crypto/ghash-clmulni-intel_asm.S
+++ b/arch/x86/crypto/ghash-clmulni-intel_asm.S
@@ -85,7 +85,7 @@ SYM_FUNC_START_LOCAL(__clmul_gf128mul_ble)
psrlq $1, T2
pxor T2, T1
pxor T1, DATA
- ret
+ RET
SYM_FUNC_END(__clmul_gf128mul_ble)
/* void clmul_ghash_mul(char *dst, const u128 *shash) */
@@ -99,7 +99,7 @@ SYM_FUNC_START(clmul_ghash_mul)
pshufb BSWAP, DATA
movups DATA, (%rdi)
FRAME_END
- ret
+ RET
SYM_FUNC_END(clmul_ghash_mul)
/*
@@ -128,5 +128,5 @@ SYM_FUNC_START(clmul_ghash_update)
movups DATA, (%rdi)
.Lupdate_just_ret:
FRAME_END
- ret
+ RET
SYM_FUNC_END(clmul_ghash_update)
diff --git a/arch/x86/crypto/nh-avx2-x86_64.S b/arch/x86/crypto/nh-avx2-x86_64.S
index b22c7b936272..6a0b15e7196a 100644
--- a/arch/x86/crypto/nh-avx2-x86_64.S
+++ b/arch/x86/crypto/nh-avx2-x86_64.S
@@ -153,5 +153,5 @@ SYM_FUNC_START(nh_avx2)
vpaddq T1, T0, T0
vpaddq T4, T0, T0
vmovdqu T0, (HASH)
- ret
+ RET
SYM_FUNC_END(nh_avx2)
diff --git a/arch/x86/crypto/nh-sse2-x86_64.S b/arch/x86/crypto/nh-sse2-x86_64.S
index d7ae22dd6683..34c567bbcb4f 100644
--- a/arch/x86/crypto/nh-sse2-x86_64.S
+++ b/arch/x86/crypto/nh-sse2-x86_64.S
@@ -119,5 +119,5 @@ SYM_FUNC_START(nh_sse2)
paddq PASS2_SUMS, T1
movdqu T0, 0x00(HASH)
movdqu T1, 0x10(HASH)
- ret
+ RET
SYM_FUNC_END(nh_sse2)
diff --git a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
index b7ee24df7fba..82f2313f512b 100644
--- a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
+++ b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
@@ -601,7 +601,7 @@ SYM_FUNC_START_LOCAL(__serpent_enc_blk8_avx)
write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
- ret;
+ RET;
SYM_FUNC_END(__serpent_enc_blk8_avx)
.align 8
@@ -655,7 +655,7 @@ SYM_FUNC_START_LOCAL(__serpent_dec_blk8_avx)
write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
- ret;
+ RET;
SYM_FUNC_END(__serpent_dec_blk8_avx)
SYM_FUNC_START(serpent_ecb_enc_8way_avx)
@@ -673,7 +673,7 @@ SYM_FUNC_START(serpent_ecb_enc_8way_avx)
store_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
FRAME_END
- ret;
+ RET;
SYM_FUNC_END(serpent_ecb_enc_8way_avx)
SYM_FUNC_START(serpent_ecb_dec_8way_avx)
@@ -691,7 +691,7 @@ SYM_FUNC_START(serpent_ecb_dec_8way_avx)
store_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
FRAME_END
- ret;
+ RET;
SYM_FUNC_END(serpent_ecb_dec_8way_avx)
SYM_FUNC_START(serpent_cbc_dec_8way_avx)
@@ -709,5 +709,5 @@ SYM_FUNC_START(serpent_cbc_dec_8way_avx)
store_cbc_8way(%rdx, %rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
FRAME_END
- ret;
+ RET;
SYM_FUNC_END(serpent_cbc_dec_8way_avx)
diff --git a/arch/x86/crypto/serpent-avx2-asm_64.S b/arch/x86/crypto/serpent-avx2-asm_64.S
index 9161b6e441f3..8ea34c9b9316 100644
--- a/arch/x86/crypto/serpent-avx2-asm_64.S
+++ b/arch/x86/crypto/serpent-avx2-asm_64.S
@@ -601,7 +601,7 @@ SYM_FUNC_START_LOCAL(__serpent_enc_blk16)
write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
- ret;
+ RET;
SYM_FUNC_END(__serpent_enc_blk16)
.align 8
@@ -655,7 +655,7 @@ SYM_FUNC_START_LOCAL(__serpent_dec_blk16)
write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
- ret;
+ RET;
SYM_FUNC_END(__serpent_dec_blk16)
SYM_FUNC_START(serpent_ecb_enc_16way)
@@ -677,7 +677,7 @@ SYM_FUNC_START(serpent_ecb_enc_16way)
vzeroupper;
FRAME_END
- ret;
+ RET;
SYM_FUNC_END(serpent_ecb_enc_16way)
SYM_FUNC_START(serpent_ecb_dec_16way)
@@ -699,7 +699,7 @@ SYM_FUNC_START(serpent_ecb_dec_16way)
vzeroupper;
FRAME_END
- ret;
+ RET;
SYM_FUNC_END(serpent_ecb_dec_16way)
SYM_FUNC_START(serpent_cbc_dec_16way)
@@ -722,5 +722,5 @@ SYM_FUNC_START(serpent_cbc_dec_16way)
vzeroupper;
FRAME_END
- ret;
+ RET;
SYM_FUNC_END(serpent_cbc_dec_16way)
diff --git a/arch/x86/crypto/serpent-sse2-i586-asm_32.S b/arch/x86/crypto/serpent-sse2-i586-asm_32.S
index 6379b99cb722..8ccb03ad7cef 100644
--- a/arch/x86/crypto/serpent-sse2-i586-asm_32.S
+++ b/arch/x86/crypto/serpent-sse2-i586-asm_32.S
@@ -553,12 +553,12 @@ SYM_FUNC_START(__serpent_enc_blk_4way)
write_blocks(%eax, RA, RB, RC, RD, RT0, RT1, RE);
- ret;
+ RET;
.L__enc_xor4:
xor_blocks(%eax, RA, RB, RC, RD, RT0, RT1, RE);
- ret;
+ RET;
SYM_FUNC_END(__serpent_enc_blk_4way)
SYM_FUNC_START(serpent_dec_blk_4way)
@@ -612,5 +612,5 @@ SYM_FUNC_START(serpent_dec_blk_4way)
movl arg_dst(%esp), %eax;
write_blocks(%eax, RC, RD, RB, RE, RT0, RT1, RA);
- ret;
+ RET;
SYM_FUNC_END(serpent_dec_blk_4way)
diff --git a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
index efb6dc17dc90..e0998a011d1d 100644
--- a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
+++ b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
@@ -675,13 +675,13 @@ SYM_FUNC_START(__serpent_enc_blk_8way)
write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
- ret;
+ RET;
.L__enc_xor8:
xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
- ret;
+ RET;
SYM_FUNC_END(__serpent_enc_blk_8way)
SYM_FUNC_START(serpent_dec_blk_8way)
@@ -735,5 +735,5 @@ SYM_FUNC_START(serpent_dec_blk_8way)
write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2);
write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2);
- ret;
+ RET;
SYM_FUNC_END(serpent_dec_blk_8way)
diff --git a/arch/x86/crypto/sha1_avx2_x86_64_asm.S b/arch/x86/crypto/sha1_avx2_x86_64_asm.S
index 5eed620f4676..a96b2fd26dab 100644
--- a/arch/x86/crypto/sha1_avx2_x86_64_asm.S
+++ b/arch/x86/crypto/sha1_avx2_x86_64_asm.S
@@ -674,7 +674,7 @@ _loop3:
pop %r12
pop %rbx
- ret
+ RET
SYM_FUNC_END(\name)
.endm
diff --git a/arch/x86/crypto/sha1_ni_asm.S b/arch/x86/crypto/sha1_ni_asm.S
index 5d8415f482bd..2f94ec0e763b 100644
--- a/arch/x86/crypto/sha1_ni_asm.S
+++ b/arch/x86/crypto/sha1_ni_asm.S
@@ -290,7 +290,7 @@ SYM_FUNC_START(sha1_ni_transform)
mov %rbp, %rsp
pop %rbp
- ret
+ RET
SYM_FUNC_END(sha1_ni_transform)
.section .rodata.cst16.PSHUFFLE_BYTE_FLIP_MASK, "aM", @progbits, 16
diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
index d25668d2a1e9..263f916362e0 100644
--- a/arch/x86/crypto/sha1_ssse3_asm.S
+++ b/arch/x86/crypto/sha1_ssse3_asm.S
@@ -99,7 +99,7 @@
pop %rbp
pop %r12
pop %rbx
- ret
+ RET
SYM_FUNC_END(\name)
.endm
diff --git a/arch/x86/crypto/sha256-avx-asm.S b/arch/x86/crypto/sha256-avx-asm.S
index 4739cd31b9db..3baa1ec39097 100644
--- a/arch/x86/crypto/sha256-avx-asm.S
+++ b/arch/x86/crypto/sha256-avx-asm.S
@@ -458,7 +458,7 @@ done_hash:
popq %r13
popq %r12
popq %rbx
- ret
+ RET
SYM_FUNC_END(sha256_transform_avx)
.section .rodata.cst256.K256, "aM", @progbits, 256
diff --git a/arch/x86/crypto/sha256-avx2-asm.S b/arch/x86/crypto/sha256-avx2-asm.S
index 4087f7432a7e..9bcdbc47b8b4 100644
--- a/arch/x86/crypto/sha256-avx2-asm.S
+++ b/arch/x86/crypto/sha256-avx2-asm.S
@@ -710,7 +710,7 @@ done_hash:
popq %r13
popq %r12
popq %rbx
- ret
+ RET
SYM_FUNC_END(sha256_transform_rorx)
.section .rodata.cst512.K256, "aM", @progbits, 512
diff --git a/arch/x86/crypto/sha256-ssse3-asm.S b/arch/x86/crypto/sha256-ssse3-asm.S
index ddfa863b4ee3..c4a5db612c32 100644
--- a/arch/x86/crypto/sha256-ssse3-asm.S
+++ b/arch/x86/crypto/sha256-ssse3-asm.S
@@ -472,7 +472,7 @@ done_hash:
popq %r12
popq %rbx
- ret
+ RET
SYM_FUNC_END(sha256_transform_ssse3)
.section .rodata.cst256.K256, "aM", @progbits, 256
diff --git a/arch/x86/crypto/sha256_ni_asm.S b/arch/x86/crypto/sha256_ni_asm.S
index 7abade04a3a3..94d50dd27cb5 100644
--- a/arch/x86/crypto/sha256_ni_asm.S
+++ b/arch/x86/crypto/sha256_ni_asm.S
@@ -326,7 +326,7 @@ SYM_FUNC_START(sha256_ni_transform)
.Ldone_hash:
- ret
+ RET
SYM_FUNC_END(sha256_ni_transform)
.section .rodata.cst256.K256, "aM", @progbits, 256
diff --git a/arch/x86/crypto/sha512-avx-asm.S b/arch/x86/crypto/sha512-avx-asm.S
index 3d8f0fd4eea8..1fefe6dd3a9e 100644
--- a/arch/x86/crypto/sha512-avx-asm.S
+++ b/arch/x86/crypto/sha512-avx-asm.S
@@ -361,7 +361,7 @@ updateblock:
pop %rbx
nowork:
- ret
+ RET
SYM_FUNC_END(sha512_transform_avx)
########################################################################
diff --git a/arch/x86/crypto/sha512-avx2-asm.S b/arch/x86/crypto/sha512-avx2-asm.S
index 072cb0f0deae..5cdaab7d6901 100644
--- a/arch/x86/crypto/sha512-avx2-asm.S
+++ b/arch/x86/crypto/sha512-avx2-asm.S
@@ -679,7 +679,7 @@ done_hash:
pop %r12
pop %rbx
- ret
+ RET
SYM_FUNC_END(sha512_transform_rorx)
########################################################################
diff --git a/arch/x86/crypto/sha512-ssse3-asm.S b/arch/x86/crypto/sha512-ssse3-asm.S
index bd51c9070bed..b84c22e06c5f 100644
--- a/arch/x86/crypto/sha512-ssse3-asm.S
+++ b/arch/x86/crypto/sha512-ssse3-asm.S
@@ -363,7 +363,7 @@ updateblock:
pop %rbx
nowork:
- ret
+ RET
SYM_FUNC_END(sha512_transform_ssse3)
########################################################################
diff --git a/arch/x86/crypto/sm4-aesni-avx-asm_64.S b/arch/x86/crypto/sm4-aesni-avx-asm_64.S
index 1cc72b4804fa..4767ab61ff48 100644
--- a/arch/x86/crypto/sm4-aesni-avx-asm_64.S
+++ b/arch/x86/crypto/sm4-aesni-avx-asm_64.S
@@ -246,7 +246,7 @@ SYM_FUNC_START(sm4_aesni_avx_crypt4)
.Lblk4_store_output_done:
vzeroall;
FRAME_END
- ret;
+ RET;
SYM_FUNC_END(sm4_aesni_avx_crypt4)
.align 8
@@ -356,7 +356,7 @@ SYM_FUNC_START_LOCAL(__sm4_crypt_blk8)
vpshufb RTMP2, RB3, RB3;
FRAME_END
- ret;
+ RET;
SYM_FUNC_END(__sm4_crypt_blk8)
/*
@@ -412,7 +412,7 @@ SYM_FUNC_START(sm4_aesni_avx_crypt8)
.Lblk8_store_output_done:
vzeroall;
FRAME_END
- ret;
+ RET;
SYM_FUNC_END(sm4_aesni_avx_crypt8)
/*
@@ -487,7 +487,7 @@ SYM_FUNC_START(sm4_aesni_avx_ctr_enc_blk8)
vzeroall;
FRAME_END
- ret;
+ RET;
SYM_FUNC_END(sm4_aesni_avx_ctr_enc_blk8)
/*
@@ -537,7 +537,7 @@ SYM_FUNC_START(sm4_aesni_avx_cbc_dec_blk8)
vzeroall;
FRAME_END
- ret;
+ RET;
SYM_FUNC_END(sm4_aesni_avx_cbc_dec_blk8)
/*
@@ -590,5 +590,5 @@ SYM_FUNC_START(sm4_aesni_avx_cfb_dec_blk8)
vzeroall;
FRAME_END
- ret;
+ RET;
SYM_FUNC_END(sm4_aesni_avx_cfb_dec_blk8)
diff --git a/arch/x86/crypto/sm4-aesni-avx2-asm_64.S b/arch/x86/crypto/sm4-aesni-avx2-asm_64.S
index 9c5d3f3ad45a..4732fe8bb65b 100644
--- a/arch/x86/crypto/sm4-aesni-avx2-asm_64.S
+++ b/arch/x86/crypto/sm4-aesni-avx2-asm_64.S
@@ -268,7 +268,7 @@ SYM_FUNC_START_LOCAL(__sm4_crypt_blk16)
vpshufb RTMP2, RB3, RB3;
FRAME_END
- ret;
+ RET;
SYM_FUNC_END(__sm4_crypt_blk16)
#define inc_le128(x, minus_one, tmp) \
@@ -387,7 +387,7 @@ SYM_FUNC_START(sm4_aesni_avx2_ctr_enc_blk16)
vzeroall;
FRAME_END
- ret;
+ RET;
SYM_FUNC_END(sm4_aesni_avx2_ctr_enc_blk16)
/*
@@ -441,7 +441,7 @@ SYM_FUNC_START(sm4_aesni_avx2_cbc_dec_blk16)
vzeroall;
FRAME_END
- ret;
+ RET;
SYM_FUNC_END(sm4_aesni_avx2_cbc_dec_blk16)
/*
@@ -497,5 +497,5 @@ SYM_FUNC_START(sm4_aesni_avx2_cfb_dec_blk16)
vzeroall;
FRAME_END
- ret;
+ RET;
SYM_FUNC_END(sm4_aesni_avx2_cfb_dec_blk16)
diff --git a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
index 37e63b3c664e..31f9b2ec3857 100644
--- a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
+++ b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
@@ -267,7 +267,7 @@ SYM_FUNC_START_LOCAL(__twofish_enc_blk8)
outunpack_blocks(RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2);
outunpack_blocks(RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2);
- ret;
+ RET;
SYM_FUNC_END(__twofish_enc_blk8)
.align 8
@@ -307,7 +307,7 @@ SYM_FUNC_START_LOCAL(__twofish_dec_blk8)
outunpack_blocks(RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2);
outunpack_blocks(RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2);
- ret;
+ RET;
SYM_FUNC_END(__twofish_dec_blk8)
SYM_FUNC_START(twofish_ecb_enc_8way)
@@ -327,7 +327,7 @@ SYM_FUNC_START(twofish_ecb_enc_8way)
store_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
FRAME_END
- ret;
+ RET;
SYM_FUNC_END(twofish_ecb_enc_8way)
SYM_FUNC_START(twofish_ecb_dec_8way)
@@ -347,7 +347,7 @@ SYM_FUNC_START(twofish_ecb_dec_8way)
store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
FRAME_END
- ret;
+ RET;
SYM_FUNC_END(twofish_ecb_dec_8way)
SYM_FUNC_START(twofish_cbc_dec_8way)
@@ -372,5 +372,5 @@ SYM_FUNC_START(twofish_cbc_dec_8way)
popq %r12;
FRAME_END
- ret;
+ RET;
SYM_FUNC_END(twofish_cbc_dec_8way)
diff --git a/arch/x86/crypto/twofish-i586-asm_32.S b/arch/x86/crypto/twofish-i586-asm_32.S
index a6f09e4f2e46..3abcad661884 100644
--- a/arch/x86/crypto/twofish-i586-asm_32.S
+++ b/arch/x86/crypto/twofish-i586-asm_32.S
@@ -260,7 +260,7 @@ SYM_FUNC_START(twofish_enc_blk)
pop %ebx
pop %ebp
mov $1, %eax
- ret
+ RET
SYM_FUNC_END(twofish_enc_blk)
SYM_FUNC_START(twofish_dec_blk)
@@ -317,5 +317,5 @@ SYM_FUNC_START(twofish_dec_blk)
pop %ebx
pop %ebp
mov $1, %eax
- ret
+ RET
SYM_FUNC_END(twofish_dec_blk)
diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
index bca4cea757ce..d2288bf38a8a 100644
--- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
+++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
@@ -258,7 +258,7 @@ SYM_FUNC_START(__twofish_enc_blk_3way)
popq %rbx;
popq %r12;
popq %r13;
- ret;
+ RET;
.L__enc_xor3:
outunpack_enc3(xor);
@@ -266,7 +266,7 @@ SYM_FUNC_START(__twofish_enc_blk_3way)
popq %rbx;
popq %r12;
popq %r13;
- ret;
+ RET;
SYM_FUNC_END(__twofish_enc_blk_3way)
SYM_FUNC_START(twofish_dec_blk_3way)
@@ -301,5 +301,5 @@ SYM_FUNC_START(twofish_dec_blk_3way)
popq %rbx;
popq %r12;
popq %r13;
- ret;
+ RET;
SYM_FUNC_END(twofish_dec_blk_3way)
diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
index d2e56232494a..775af290cd19 100644
--- a/arch/x86/crypto/twofish-x86_64-asm_64.S
+++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
@@ -252,7 +252,7 @@ SYM_FUNC_START(twofish_enc_blk)
popq R1
movl $1,%eax
- ret
+ RET
SYM_FUNC_END(twofish_enc_blk)
SYM_FUNC_START(twofish_dec_blk)
@@ -304,5 +304,5 @@ SYM_FUNC_START(twofish_dec_blk)
popq R1
movl $1,%eax
- ret
+ RET
SYM_FUNC_END(twofish_dec_blk)
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index ccb9d32768f3..887420844066 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -268,19 +268,16 @@
1: popl %ds
2: popl %es
3: popl %fs
- addl $(4 + \pop), %esp /* pop the unused "gs" slot */
+4: addl $(4 + \pop), %esp /* pop the unused "gs" slot */
IRET_FRAME
-.pushsection .fixup, "ax"
-4: movl $0, (%esp)
- jmp 1b
-5: movl $0, (%esp)
- jmp 2b
-6: movl $0, (%esp)
- jmp 3b
-.popsection
- _ASM_EXTABLE(1b, 4b)
- _ASM_EXTABLE(2b, 5b)
- _ASM_EXTABLE(3b, 6b)
+
+ /*
+ * There is no _ASM_EXTABLE_TYPE_REG() for ASM, however since this is
+ * ASM the registers are known and we can trivially hard-code them.
+ */
+ _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_POP_ZERO|EX_REG_DS)
+ _ASM_EXTABLE_TYPE(2b, 3b, EX_TYPE_POP_ZERO|EX_REG_ES)
+ _ASM_EXTABLE_TYPE(3b, 4b, EX_TYPE_POP_ZERO|EX_REG_FS)
.endm
.macro RESTORE_ALL_NMI cr3_reg:req pop=0
@@ -740,7 +737,7 @@ SYM_FUNC_START(schedule_tail_wrapper)
popl %eax
FRAME_END
- ret
+ RET
SYM_FUNC_END(schedule_tail_wrapper)
.popsection
@@ -925,10 +922,8 @@ SYM_FUNC_START(entry_SYSENTER_32)
sti
sysexit
-.pushsection .fixup, "ax"
-2: movl $0, PT_FS(%esp)
- jmp 1b
-.popsection
+2: movl $0, PT_FS(%esp)
+ jmp 1b
_ASM_EXTABLE(1b, 2b)
.Lsysenter_fix_flags:
@@ -996,8 +991,7 @@ restore_all_switch_stack:
*/
iret
-.section .fixup, "ax"
-SYM_CODE_START(asm_iret_error)
+.Lasm_iret_error:
pushl $0 # no error code
pushl $iret_error
@@ -1014,9 +1008,8 @@ SYM_CODE_START(asm_iret_error)
#endif
jmp handle_exception
-SYM_CODE_END(asm_iret_error)
-.previous
- _ASM_EXTABLE(.Lirq_return, asm_iret_error)
+
+ _ASM_EXTABLE(.Lirq_return, .Lasm_iret_error)
SYM_FUNC_END(entry_INT80_32)
.macro FIXUP_ESPFIX_STACK
@@ -1248,14 +1241,14 @@ SYM_CODE_START(asm_exc_nmi)
SYM_CODE_END(asm_exc_nmi)
.pushsection .text, "ax"
-SYM_CODE_START(rewind_stack_do_exit)
+SYM_CODE_START(rewind_stack_and_make_dead)
/* Prevent any naive code from trying to unwind to our caller. */
xorl %ebp, %ebp
movl PER_CPU_VAR(cpu_current_top_of_stack), %esi
leal -TOP_OF_KERNEL_STACK_PADDING-PTREGS_SIZE(%esi), %esp
- call do_exit
+ call make_task_dead
1: jmp 1b
-SYM_CODE_END(rewind_stack_do_exit)
+SYM_CODE_END(rewind_stack_and_make_dead)
.popsection
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 97b1f84bb53f..466df3e50276 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -738,14 +738,10 @@ SYM_FUNC_START(asm_load_gs_index)
2: ALTERNATIVE "", "mfence", X86_BUG_SWAPGS_FENCE
swapgs
FRAME_END
- ret
-SYM_FUNC_END(asm_load_gs_index)
-EXPORT_SYMBOL(asm_load_gs_index)
+ RET
- _ASM_EXTABLE(.Lgs_change, .Lbad_gs)
- .section .fixup, "ax"
/* running with kernelgs */
-SYM_CODE_START_LOCAL_NOALIGN(.Lbad_gs)
+.Lbad_gs:
swapgs /* switch back to user gs */
.macro ZAP_GS
/* This can't be a string because the preprocessor needs to see it. */
@@ -756,8 +752,11 @@ SYM_CODE_START_LOCAL_NOALIGN(.Lbad_gs)
xorl %eax, %eax
movl %eax, %gs
jmp 2b
-SYM_CODE_END(.Lbad_gs)
- .previous
+
+ _ASM_EXTABLE(.Lgs_change, .Lbad_gs)
+
+SYM_FUNC_END(asm_load_gs_index)
+EXPORT_SYMBOL(asm_load_gs_index)
#ifdef CONFIG_XEN_PV
/*
@@ -889,7 +888,7 @@ SYM_CODE_START_LOCAL(paranoid_entry)
* is needed here.
*/
SAVE_AND_SET_GSBASE scratch_reg=%rax save_reg=%rbx
- ret
+ RET
.Lparanoid_entry_checkgs:
/* EBX = 1 -> kernel GSBASE active, no restore required */
@@ -910,7 +909,7 @@ SYM_CODE_START_LOCAL(paranoid_entry)
.Lparanoid_kernel_gsbase:
FENCE_SWAPGS_KERNEL_ENTRY
- ret
+ RET
SYM_CODE_END(paranoid_entry)
/*
@@ -989,7 +988,7 @@ SYM_CODE_START_LOCAL(error_entry)
movq %rax, %rsp /* switch stack */
ENCODE_FRAME_POINTER
pushq %r12
- ret
+ RET
/*
* There are two places in the kernel that can potentially fault with
@@ -1020,7 +1019,7 @@ SYM_CODE_START_LOCAL(error_entry)
*/
.Lerror_entry_done_lfence:
FENCE_SWAPGS_KERNEL_ENTRY
- ret
+ RET
.Lbstep_iret:
/* Fix truncated RIP */
@@ -1428,7 +1427,7 @@ SYM_CODE_END(ignore_sysret)
#endif
.pushsection .text, "ax"
-SYM_CODE_START(rewind_stack_do_exit)
+SYM_CODE_START(rewind_stack_and_make_dead)
UNWIND_HINT_FUNC
/* Prevent any naive code from trying to unwind to our caller. */
xorl %ebp, %ebp
@@ -1437,6 +1436,6 @@ SYM_CODE_START(rewind_stack_do_exit)
leaq -PTREGS_SIZE(%rax), %rsp
UNWIND_HINT_REGS
- call do_exit
-SYM_CODE_END(rewind_stack_do_exit)
+ call make_task_dead
+SYM_CODE_END(rewind_stack_and_make_dead)
.popsection
diff --git a/arch/x86/entry/syscalls/syscall_32.tbl b/arch/x86/entry/syscalls/syscall_32.tbl
index 7e25543693de..320480a8db4f 100644
--- a/arch/x86/entry/syscalls/syscall_32.tbl
+++ b/arch/x86/entry/syscalls/syscall_32.tbl
@@ -454,3 +454,4 @@
447 i386 memfd_secret sys_memfd_secret
448 i386 process_mrelease sys_process_mrelease
449 i386 futex_waitv sys_futex_waitv
+450 i386 set_mempolicy_home_node sys_set_mempolicy_home_node
diff --git a/arch/x86/entry/syscalls/syscall_64.tbl b/arch/x86/entry/syscalls/syscall_64.tbl
index fe8f8dd157b4..c84d12608cd2 100644
--- a/arch/x86/entry/syscalls/syscall_64.tbl
+++ b/arch/x86/entry/syscalls/syscall_64.tbl
@@ -371,6 +371,7 @@
447 common memfd_secret sys_memfd_secret
448 common process_mrelease sys_process_mrelease
449 common futex_waitv sys_futex_waitv
+450 common set_mempolicy_home_node sys_set_mempolicy_home_node
#
# Due to a historical design error, certain syscalls are numbered differently
diff --git a/arch/x86/entry/thunk_32.S b/arch/x86/entry/thunk_32.S
index f1f96d4d8cd6..7591bab060f7 100644
--- a/arch/x86/entry/thunk_32.S
+++ b/arch/x86/entry/thunk_32.S
@@ -24,7 +24,7 @@ SYM_CODE_START_NOALIGN(\name)
popl %edx
popl %ecx
popl %eax
- ret
+ RET
_ASM_NOKPROBE(\name)
SYM_CODE_END(\name)
.endm
diff --git a/arch/x86/entry/thunk_64.S b/arch/x86/entry/thunk_64.S
index 496b11ec469d..505b488fcc65 100644
--- a/arch/x86/entry/thunk_64.S
+++ b/arch/x86/entry/thunk_64.S
@@ -50,7 +50,7 @@ SYM_CODE_START_LOCAL_NOALIGN(__thunk_restore)
popq %rsi
popq %rdi
popq %rbp
- ret
+ RET
_ASM_NOKPROBE(__thunk_restore)
SYM_CODE_END(__thunk_restore)
#endif
diff --git a/arch/x86/entry/vdso/vdso-layout.lds.S b/arch/x86/entry/vdso/vdso-layout.lds.S
index dc8da7695859..bafa73f09e92 100644
--- a/arch/x86/entry/vdso/vdso-layout.lds.S
+++ b/arch/x86/entry/vdso/vdso-layout.lds.S
@@ -77,7 +77,6 @@ SECTIONS
.text : {
*(.text*)
- *(.fixup)
} :text =0x90909090,
diff --git a/arch/x86/entry/vdso/vdso32/system_call.S b/arch/x86/entry/vdso/vdso32/system_call.S
index 6ddd7a937b3e..d33c6513fd2c 100644
--- a/arch/x86/entry/vdso/vdso32/system_call.S
+++ b/arch/x86/entry/vdso/vdso32/system_call.S
@@ -78,7 +78,7 @@ SYM_INNER_LABEL(int80_landing_pad, SYM_L_GLOBAL)
popl %ecx
CFI_RESTORE ecx
CFI_ADJUST_CFA_OFFSET -4
- ret
+ RET
CFI_ENDPROC
.size __kernel_vsyscall,.-__kernel_vsyscall
diff --git a/arch/x86/entry/vdso/vsgx.S b/arch/x86/entry/vdso/vsgx.S
index 99dafac992e2..d77d278ee9dd 100644
--- a/arch/x86/entry/vdso/vsgx.S
+++ b/arch/x86/entry/vdso/vsgx.S
@@ -81,7 +81,7 @@ SYM_FUNC_START(__vdso_sgx_enter_enclave)
pop %rbx
leave
.cfi_def_cfa %rsp, 8
- ret
+ RET
/* The out-of-line code runs with the pre-leave stack frame. */
.cfi_def_cfa %rbp, 16
diff --git a/arch/x86/entry/vsyscall/vsyscall_emu_64.S b/arch/x86/entry/vsyscall/vsyscall_emu_64.S
index 2e203f3a25a7..15e35159ebb6 100644
--- a/arch/x86/entry/vsyscall/vsyscall_emu_64.S
+++ b/arch/x86/entry/vsyscall/vsyscall_emu_64.S
@@ -19,17 +19,17 @@ __vsyscall_page:
mov $__NR_gettimeofday, %rax
syscall
- ret
+ RET
.balign 1024, 0xcc
mov $__NR_time, %rax
syscall
- ret
+ RET
.balign 1024, 0xcc
mov $__NR_getcpu, %rax
syscall
- ret
+ RET
.balign 4096, 0xcc
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 68dea7ce6a22..e686c5e0537b 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -2771,7 +2771,7 @@ perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *re
struct unwind_state state;
unsigned long addr;
- if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
+ if (perf_guest_state()) {
/* TODO: We don't support guest os callchain now */
return;
}
@@ -2874,7 +2874,7 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs
struct stack_frame frame;
const struct stack_frame __user *fp;
- if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
+ if (perf_guest_state()) {
/* TODO: We don't support guest os callchain now */
return;
}
@@ -2951,18 +2951,19 @@ static unsigned long code_segment_base(struct pt_regs *regs)
unsigned long perf_instruction_pointer(struct pt_regs *regs)
{
- if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
- return perf_guest_cbs->get_guest_ip();
+ if (perf_guest_state())
+ return perf_guest_get_ip();
return regs->ip + code_segment_base(regs);
}
unsigned long perf_misc_flags(struct pt_regs *regs)
{
+ unsigned int guest_state = perf_guest_state();
int misc = 0;
- if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
- if (perf_guest_cbs->is_user_mode())
+ if (guest_state) {
+ if (guest_state & PERF_GUEST_USER)
misc |= PERF_RECORD_MISC_GUEST_USER;
else
misc |= PERF_RECORD_MISC_GUEST_KERNEL;
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index ec6444f2c9dc..a3c7ca876aeb 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -2901,10 +2901,7 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status)
*/
if (__test_and_clear_bit(GLOBAL_STATUS_TRACE_TOPAPMI_BIT, (unsigned long *)&status)) {
handled++;
- if (unlikely(perf_guest_cbs && perf_guest_cbs->is_in_guest() &&
- perf_guest_cbs->handle_intel_pt_intr))
- perf_guest_cbs->handle_intel_pt_intr();
- else
+ if (!perf_guest_handle_intel_pt_intr())
intel_pt_interrupt();
}
@@ -4706,6 +4703,19 @@ static __initconst const struct x86_pmu intel_pmu = {
.lbr_read = intel_pmu_lbr_read_64,
.lbr_save = intel_pmu_lbr_save,
.lbr_restore = intel_pmu_lbr_restore,
+
+ /*
+ * SMM has access to all 4 rings and while traditionally SMM code only
+ * ran in CPL0, 2021-era firmware is starting to make use of CPL3 in SMM.
+ *
+ * Since the EVENTSEL.{USR,OS} CPL filtering makes no distinction
+ * between SMM or not, this results in what should be pure userspace
+ * counters including SMM data.
+ *
+ * This is a clear privilege issue, therefore globally disable
+ * counting SMM by default.
+ */
+ .attr_freeze_on_smi = 1,
};
static __init void intel_clovertown_quirk(void)
@@ -6239,6 +6249,19 @@ __init int intel_pmu_init(void)
pmu->num_counters = x86_pmu.num_counters;
pmu->num_counters_fixed = x86_pmu.num_counters_fixed;
}
+
+ /*
+ * Quirk: For some Alder Lake machine, when all E-cores are disabled in
+ * a BIOS, the leaf 0xA will enumerate all counters of P-cores. However,
+ * the X86_FEATURE_HYBRID_CPU is still set. The above codes will
+ * mistakenly add extra counters for P-cores. Correct the number of
+ * counters here.
+ */
+ if ((pmu->num_counters > 8) || (pmu->num_counters_fixed > 4)) {
+ pmu->num_counters = x86_pmu.num_counters;
+ pmu->num_counters_fixed = x86_pmu.num_counters_fixed;
+ }
+
pmu->max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, pmu->num_counters);
pmu->unconstrained = (struct event_constraint)
__EVENT_CONSTRAINT(0, (1ULL << pmu->num_counters) - 1,
@@ -6343,6 +6366,8 @@ __init int intel_pmu_init(void)
}
if (x86_pmu.lbr_nr) {
+ intel_pmu_lbr_init();
+
pr_cont("%d-deep LBR, ", x86_pmu.lbr_nr);
/* only support branch_stack snapshot for perfmon >= v2 */
diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c
index 8043213b75a5..669c2be14784 100644
--- a/arch/x86/events/intel/lbr.c
+++ b/arch/x86/events/intel/lbr.c
@@ -8,14 +8,6 @@
#include "../perf_event.h"
-static const enum {
- LBR_EIP_FLAGS = 1,
- LBR_TSX = 2,
-} lbr_desc[LBR_FORMAT_MAX_KNOWN + 1] = {
- [LBR_FORMAT_EIP_FLAGS] = LBR_EIP_FLAGS,
- [LBR_FORMAT_EIP_FLAGS2] = LBR_EIP_FLAGS | LBR_TSX,
-};
-
/*
* Intel LBR_SELECT bits
* Intel Vol3a, April 2011, Section 16.7 Table 16-10
@@ -243,7 +235,7 @@ void intel_pmu_lbr_reset_64(void)
for (i = 0; i < x86_pmu.lbr_nr; i++) {
wrmsrl(x86_pmu.lbr_from + i, 0);
wrmsrl(x86_pmu.lbr_to + i, 0);
- if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
+ if (x86_pmu.lbr_has_info)
wrmsrl(x86_pmu.lbr_info + i, 0);
}
}
@@ -305,11 +297,10 @@ enum {
*/
static inline bool lbr_from_signext_quirk_needed(void)
{
- int lbr_format = x86_pmu.intel_cap.lbr_format;
bool tsx_support = boot_cpu_has(X86_FEATURE_HLE) ||
boot_cpu_has(X86_FEATURE_RTM);
- return !tsx_support && (lbr_desc[lbr_format] & LBR_TSX);
+ return !tsx_support && x86_pmu.lbr_has_tsx;
}
static DEFINE_STATIC_KEY_FALSE(lbr_from_quirk_key);
@@ -427,12 +418,12 @@ rdlbr_all(struct lbr_entry *lbr, unsigned int idx, bool need_info)
void intel_pmu_lbr_restore(void *ctx)
{
- bool need_info = x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO;
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct x86_perf_task_context *task_ctx = ctx;
- int i;
- unsigned lbr_idx, mask;
+ bool need_info = x86_pmu.lbr_has_info;
u64 tos = task_ctx->tos;
+ unsigned lbr_idx, mask;
+ int i;
mask = x86_pmu.lbr_nr - 1;
for (i = 0; i < task_ctx->valid_lbrs; i++) {
@@ -444,7 +435,7 @@ void intel_pmu_lbr_restore(void *ctx)
lbr_idx = (tos - i) & mask;
wrlbr_from(lbr_idx, 0);
wrlbr_to(lbr_idx, 0);
- if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
+ if (need_info)
wrlbr_info(lbr_idx, 0);
}
@@ -519,9 +510,9 @@ static void __intel_pmu_lbr_restore(void *ctx)
void intel_pmu_lbr_save(void *ctx)
{
- bool need_info = x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO;
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct x86_perf_task_context *task_ctx = ctx;
+ bool need_info = x86_pmu.lbr_has_info;
unsigned lbr_idx, mask;
u64 tos;
int i;
@@ -816,7 +807,6 @@ void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc)
{
bool need_info = false, call_stack = false;
unsigned long mask = x86_pmu.lbr_nr - 1;
- int lbr_format = x86_pmu.intel_cap.lbr_format;
u64 tos = intel_pmu_lbr_tos();
int i;
int out = 0;
@@ -831,9 +821,7 @@ void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc)
for (i = 0; i < num; i++) {
unsigned long lbr_idx = (tos - i) & mask;
u64 from, to, mis = 0, pred = 0, in_tx = 0, abort = 0;
- int skip = 0;
u16 cycles = 0;
- int lbr_flags = lbr_desc[lbr_format];
from = rdlbr_from(lbr_idx, NULL);
to = rdlbr_to(lbr_idx, NULL);
@@ -845,37 +833,39 @@ void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc)
if (call_stack && !from)
break;
- if (lbr_format == LBR_FORMAT_INFO && need_info) {
- u64 info;
-
- info = rdlbr_info(lbr_idx, NULL);
- mis = !!(info & LBR_INFO_MISPRED);
- pred = !mis;
- in_tx = !!(info & LBR_INFO_IN_TX);
- abort = !!(info & LBR_INFO_ABORT);
- cycles = (info & LBR_INFO_CYCLES);
- }
-
- if (lbr_format == LBR_FORMAT_TIME) {
- mis = !!(from & LBR_FROM_FLAG_MISPRED);
- pred = !mis;
- skip = 1;
- cycles = ((to >> 48) & LBR_INFO_CYCLES);
-
- to = (u64)((((s64)to) << 16) >> 16);
- }
-
- if (lbr_flags & LBR_EIP_FLAGS) {
- mis = !!(from & LBR_FROM_FLAG_MISPRED);
- pred = !mis;
- skip = 1;
- }
- if (lbr_flags & LBR_TSX) {
- in_tx = !!(from & LBR_FROM_FLAG_IN_TX);
- abort = !!(from & LBR_FROM_FLAG_ABORT);
- skip = 3;
+ if (x86_pmu.lbr_has_info) {
+ if (need_info) {
+ u64 info;
+
+ info = rdlbr_info(lbr_idx, NULL);
+ mis = !!(info & LBR_INFO_MISPRED);
+ pred = !mis;
+ cycles = (info & LBR_INFO_CYCLES);
+ if (x86_pmu.lbr_has_tsx) {
+ in_tx = !!(info & LBR_INFO_IN_TX);
+ abort = !!(info & LBR_INFO_ABORT);
+ }
+ }
+ } else {
+ int skip = 0;
+
+ if (x86_pmu.lbr_from_flags) {
+ mis = !!(from & LBR_FROM_FLAG_MISPRED);
+ pred = !mis;
+ skip = 1;
+ }
+ if (x86_pmu.lbr_has_tsx) {
+ in_tx = !!(from & LBR_FROM_FLAG_IN_TX);
+ abort = !!(from & LBR_FROM_FLAG_ABORT);
+ skip = 3;
+ }
+ from = (u64)((((s64)from) << skip) >> skip);
+
+ if (x86_pmu.lbr_to_cycles) {
+ cycles = ((to >> 48) & LBR_INFO_CYCLES);
+ to = (u64)((((s64)to) << 16) >> 16);
+ }
}
- from = (u64)((((s64)from) << skip) >> skip);
/*
* Some CPUs report duplicated abort records,
@@ -903,37 +893,40 @@ void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc)
cpuc->lbr_stack.hw_idx = tos;
}
+static DEFINE_STATIC_KEY_FALSE(x86_lbr_mispred);
+static DEFINE_STATIC_KEY_FALSE(x86_lbr_cycles);
+static DEFINE_STATIC_KEY_FALSE(x86_lbr_type);
+
static __always_inline int get_lbr_br_type(u64 info)
{
- if (!static_cpu_has(X86_FEATURE_ARCH_LBR) || !x86_pmu.lbr_br_type)
- return 0;
+ int type = 0;
- return (info & LBR_INFO_BR_TYPE) >> LBR_INFO_BR_TYPE_OFFSET;
+ if (static_branch_likely(&x86_lbr_type))
+ type = (info & LBR_INFO_BR_TYPE) >> LBR_INFO_BR_TYPE_OFFSET;
+
+ return type;
}
static __always_inline bool get_lbr_mispred(u64 info)
{
- if (static_cpu_has(X86_FEATURE_ARCH_LBR) && !x86_pmu.lbr_mispred)
- return 0;
+ bool mispred = 0;
- return !!(info & LBR_INFO_MISPRED);
-}
+ if (static_branch_likely(&x86_lbr_mispred))
+ mispred = !!(info & LBR_INFO_MISPRED);
-static __always_inline bool get_lbr_predicted(u64 info)
-{
- if (static_cpu_has(X86_FEATURE_ARCH_LBR) && !x86_pmu.lbr_mispred)
- return 0;
-
- return !(info & LBR_INFO_MISPRED);
+ return mispred;
}
static __always_inline u16 get_lbr_cycles(u64 info)
{
+ u16 cycles = info & LBR_INFO_CYCLES;
+
if (static_cpu_has(X86_FEATURE_ARCH_LBR) &&
- !(x86_pmu.lbr_timed_lbr && info & LBR_INFO_CYC_CNT_VALID))
- return 0;
+ (!static_branch_likely(&x86_lbr_cycles) ||
+ !(info & LBR_INFO_CYC_CNT_VALID)))
+ cycles = 0;
- return info & LBR_INFO_CYCLES;
+ return cycles;
}
static void intel_pmu_store_lbr(struct cpu_hw_events *cpuc,
@@ -961,7 +954,7 @@ static void intel_pmu_store_lbr(struct cpu_hw_events *cpuc,
e->from = from;
e->to = to;
e->mispred = get_lbr_mispred(info);
- e->predicted = get_lbr_predicted(info);
+ e->predicted = !e->mispred;
e->in_tx = !!(info & LBR_INFO_IN_TX);
e->abort = !!(info & LBR_INFO_ABORT);
e->cycles = get_lbr_cycles(info);
@@ -1120,7 +1113,7 @@ static int intel_pmu_setup_hw_lbr_filter(struct perf_event *event)
if ((br_type & PERF_SAMPLE_BRANCH_NO_CYCLES) &&
(br_type & PERF_SAMPLE_BRANCH_NO_FLAGS) &&
- (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO))
+ x86_pmu.lbr_has_info)
reg->config |= LBR_NO_INFO;
return 0;
@@ -1706,6 +1699,38 @@ void intel_pmu_lbr_init_knl(void)
x86_pmu.intel_cap.lbr_format = LBR_FORMAT_EIP_FLAGS;
}
+void intel_pmu_lbr_init(void)
+{
+ switch (x86_pmu.intel_cap.lbr_format) {
+ case LBR_FORMAT_EIP_FLAGS2:
+ x86_pmu.lbr_has_tsx = 1;
+ fallthrough;
+ case LBR_FORMAT_EIP_FLAGS:
+ x86_pmu.lbr_from_flags = 1;
+ break;
+
+ case LBR_FORMAT_INFO:
+ x86_pmu.lbr_has_tsx = 1;
+ fallthrough;
+ case LBR_FORMAT_INFO2:
+ x86_pmu.lbr_has_info = 1;
+ break;
+
+ case LBR_FORMAT_TIME:
+ x86_pmu.lbr_from_flags = 1;
+ x86_pmu.lbr_to_cycles = 1;
+ break;
+ }
+
+ if (x86_pmu.lbr_has_info) {
+ /*
+ * Only used in combination with baseline pebs.
+ */
+ static_branch_enable(&x86_lbr_mispred);
+ static_branch_enable(&x86_lbr_cycles);
+ }
+}
+
/*
* LBR state size is variable based on the max number of registers.
* This calculates the expected state size, which should match
@@ -1726,6 +1751,9 @@ static bool is_arch_lbr_xsave_available(void)
* Check the LBR state with the corresponding software structure.
* Disable LBR XSAVES support if the size doesn't match.
*/
+ if (xfeature_size(XFEATURE_LBR) == 0)
+ return false;
+
if (WARN_ON(xfeature_size(XFEATURE_LBR) != get_lbr_state_size()))
return false;
@@ -1765,6 +1793,12 @@ void __init intel_pmu_arch_lbr_init(void)
x86_pmu.lbr_br_type = ecx.split.lbr_br_type;
x86_pmu.lbr_nr = lbr_nr;
+ if (x86_pmu.lbr_mispred)
+ static_branch_enable(&x86_lbr_mispred);
+ if (x86_pmu.lbr_timed_lbr)
+ static_branch_enable(&x86_lbr_cycles);
+ if (x86_pmu.lbr_br_type)
+ static_branch_enable(&x86_lbr_type);
arch_lbr_xsave = is_arch_lbr_xsave_available();
if (arch_lbr_xsave) {
diff --git a/arch/x86/events/intel/pt.c b/arch/x86/events/intel/pt.c
index 7f406c14715f..2d33bba9a144 100644
--- a/arch/x86/events/intel/pt.c
+++ b/arch/x86/events/intel/pt.c
@@ -897,8 +897,9 @@ static void pt_handle_status(struct pt *pt)
* means we are already losing data; need to let the decoder
* know.
*/
- if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries) ||
- buf->output_off == pt_buffer_region_size(buf)) {
+ if (!buf->single &&
+ (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries) ||
+ buf->output_off == pt_buffer_region_size(buf))) {
perf_aux_output_flag(&pt->handle,
PERF_AUX_FLAG_TRUNCATED);
advance++;
diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c
index f1ba6ab2e97e..e497da9bf427 100644
--- a/arch/x86/events/intel/uncore.c
+++ b/arch/x86/events/intel/uncore.c
@@ -1762,7 +1762,7 @@ static const struct intel_uncore_init_fun rkl_uncore_init __initconst = {
static const struct intel_uncore_init_fun adl_uncore_init __initconst = {
.cpu_init = adl_uncore_cpu_init,
- .mmio_init = tgl_uncore_mmio_init,
+ .mmio_init = adl_uncore_mmio_init,
};
static const struct intel_uncore_init_fun icx_uncore_init __initconst = {
diff --git a/arch/x86/events/intel/uncore.h b/arch/x86/events/intel/uncore.h
index b9687980aab6..2adeaf4de4df 100644
--- a/arch/x86/events/intel/uncore.h
+++ b/arch/x86/events/intel/uncore.h
@@ -584,10 +584,11 @@ void snb_uncore_cpu_init(void);
void nhm_uncore_cpu_init(void);
void skl_uncore_cpu_init(void);
void icl_uncore_cpu_init(void);
-void adl_uncore_cpu_init(void);
void tgl_uncore_cpu_init(void);
+void adl_uncore_cpu_init(void);
void tgl_uncore_mmio_init(void);
void tgl_l_uncore_mmio_init(void);
+void adl_uncore_mmio_init(void);
int snb_pci2phy_map_init(int devid);
/* uncore_snbep.c */
diff --git a/arch/x86/events/intel/uncore_discovery.c b/arch/x86/events/intel/uncore_discovery.c
index 3049c646fa20..6ddadb482f68 100644
--- a/arch/x86/events/intel/uncore_discovery.c
+++ b/arch/x86/events/intel/uncore_discovery.c
@@ -494,8 +494,8 @@ void intel_generic_uncore_mmio_enable_box(struct intel_uncore_box *box)
writel(0, box->io_addr);
}
-static void intel_generic_uncore_mmio_enable_event(struct intel_uncore_box *box,
- struct perf_event *event)
+void intel_generic_uncore_mmio_enable_event(struct intel_uncore_box *box,
+ struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
diff --git a/arch/x86/events/intel/uncore_discovery.h b/arch/x86/events/intel/uncore_discovery.h
index 6d735611c281..cfaf558bdb6b 100644
--- a/arch/x86/events/intel/uncore_discovery.h
+++ b/arch/x86/events/intel/uncore_discovery.h
@@ -139,6 +139,8 @@ void intel_generic_uncore_mmio_disable_box(struct intel_uncore_box *box);
void intel_generic_uncore_mmio_enable_box(struct intel_uncore_box *box);
void intel_generic_uncore_mmio_disable_event(struct intel_uncore_box *box,
struct perf_event *event);
+void intel_generic_uncore_mmio_enable_event(struct intel_uncore_box *box,
+ struct perf_event *event);
void intel_generic_uncore_pci_init_box(struct intel_uncore_box *box);
void intel_generic_uncore_pci_disable_box(struct intel_uncore_box *box);
diff --git a/arch/x86/events/intel/uncore_snb.c b/arch/x86/events/intel/uncore_snb.c
index 0f63706cdadf..f698a55bde81 100644
--- a/arch/x86/events/intel/uncore_snb.c
+++ b/arch/x86/events/intel/uncore_snb.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Nehalem/SandBridge/Haswell/Broadwell/Skylake uncore support */
#include "uncore.h"
+#include "uncore_discovery.h"
/* Uncore IMC PCI IDs */
#define PCI_DEVICE_ID_INTEL_SNB_IMC 0x0100
@@ -64,6 +65,20 @@
#define PCI_DEVICE_ID_INTEL_RKL_2_IMC 0x4c53
#define PCI_DEVICE_ID_INTEL_ADL_1_IMC 0x4660
#define PCI_DEVICE_ID_INTEL_ADL_2_IMC 0x4641
+#define PCI_DEVICE_ID_INTEL_ADL_3_IMC 0x4601
+#define PCI_DEVICE_ID_INTEL_ADL_4_IMC 0x4602
+#define PCI_DEVICE_ID_INTEL_ADL_5_IMC 0x4609
+#define PCI_DEVICE_ID_INTEL_ADL_6_IMC 0x460a
+#define PCI_DEVICE_ID_INTEL_ADL_7_IMC 0x4621
+#define PCI_DEVICE_ID_INTEL_ADL_8_IMC 0x4623
+#define PCI_DEVICE_ID_INTEL_ADL_9_IMC 0x4629
+#define PCI_DEVICE_ID_INTEL_ADL_10_IMC 0x4637
+#define PCI_DEVICE_ID_INTEL_ADL_11_IMC 0x463b
+#define PCI_DEVICE_ID_INTEL_ADL_12_IMC 0x4648
+#define PCI_DEVICE_ID_INTEL_ADL_13_IMC 0x4649
+#define PCI_DEVICE_ID_INTEL_ADL_14_IMC 0x4650
+#define PCI_DEVICE_ID_INTEL_ADL_15_IMC 0x4668
+#define PCI_DEVICE_ID_INTEL_ADL_16_IMC 0x4670
/* SNB event control */
#define SNB_UNC_CTL_EV_SEL_MASK 0x000000ff
@@ -155,6 +170,7 @@
DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
+DEFINE_UNCORE_FORMAT_ATTR(chmask, chmask, "config:8-11");
DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23");
DEFINE_UNCORE_FORMAT_ATTR(cmask5, cmask, "config:24-28");
@@ -1334,6 +1350,62 @@ static const struct pci_device_id tgl_uncore_pci_ids[] = {
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ADL_2_IMC),
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
},
+ { /* IMC */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ADL_3_IMC),
+ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+ },
+ { /* IMC */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ADL_4_IMC),
+ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+ },
+ { /* IMC */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ADL_5_IMC),
+ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+ },
+ { /* IMC */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ADL_6_IMC),
+ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+ },
+ { /* IMC */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ADL_7_IMC),
+ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+ },
+ { /* IMC */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ADL_8_IMC),
+ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+ },
+ { /* IMC */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ADL_9_IMC),
+ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+ },
+ { /* IMC */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ADL_10_IMC),
+ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+ },
+ { /* IMC */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ADL_11_IMC),
+ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+ },
+ { /* IMC */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ADL_12_IMC),
+ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+ },
+ { /* IMC */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ADL_13_IMC),
+ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+ },
+ { /* IMC */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ADL_14_IMC),
+ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+ },
+ { /* IMC */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ADL_15_IMC),
+ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+ },
+ { /* IMC */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ADL_16_IMC),
+ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+ },
{ /* end: all zeroes */ }
};
@@ -1390,7 +1462,8 @@ static struct pci_dev *tgl_uncore_get_mc_dev(void)
#define TGL_UNCORE_MMIO_IMC_MEM_OFFSET 0x10000
#define TGL_UNCORE_PCI_IMC_MAP_SIZE 0xe000
-static void tgl_uncore_imc_freerunning_init_box(struct intel_uncore_box *box)
+static void __uncore_imc_init_box(struct intel_uncore_box *box,
+ unsigned int base_offset)
{
struct pci_dev *pdev = tgl_uncore_get_mc_dev();
struct intel_uncore_pmu *pmu = box->pmu;
@@ -1417,11 +1490,17 @@ static void tgl_uncore_imc_freerunning_init_box(struct intel_uncore_box *box)
addr |= ((resource_size_t)mch_bar << 32);
#endif
+ addr += base_offset;
box->io_addr = ioremap(addr, type->mmio_map_size);
if (!box->io_addr)
pr_warn("perf uncore: Failed to ioremap for %s.\n", type->name);
}
+static void tgl_uncore_imc_freerunning_init_box(struct intel_uncore_box *box)
+{
+ __uncore_imc_init_box(box, 0);
+}
+
static struct intel_uncore_ops tgl_uncore_imc_freerunning_ops = {
.init_box = tgl_uncore_imc_freerunning_init_box,
.exit_box = uncore_mmio_exit_box,
@@ -1469,3 +1548,136 @@ void tgl_uncore_mmio_init(void)
}
/* end of Tiger Lake MMIO uncore support */
+
+/* Alder Lake MMIO uncore support */
+#define ADL_UNCORE_IMC_BASE 0xd900
+#define ADL_UNCORE_IMC_MAP_SIZE 0x200
+#define ADL_UNCORE_IMC_CTR 0xe8
+#define ADL_UNCORE_IMC_CTRL 0xd0
+#define ADL_UNCORE_IMC_GLOBAL_CTL 0xc0
+#define ADL_UNCORE_IMC_BOX_CTL 0xc4
+#define ADL_UNCORE_IMC_FREERUNNING_BASE 0xd800
+#define ADL_UNCORE_IMC_FREERUNNING_MAP_SIZE 0x100
+
+#define ADL_UNCORE_IMC_CTL_FRZ (1 << 0)
+#define ADL_UNCORE_IMC_CTL_RST_CTRL (1 << 1)
+#define ADL_UNCORE_IMC_CTL_RST_CTRS (1 << 2)
+#define ADL_UNCORE_IMC_CTL_INT (ADL_UNCORE_IMC_CTL_RST_CTRL | \
+ ADL_UNCORE_IMC_CTL_RST_CTRS)
+
+static void adl_uncore_imc_init_box(struct intel_uncore_box *box)
+{
+ __uncore_imc_init_box(box, ADL_UNCORE_IMC_BASE);
+
+ /* The global control in MC1 can control both MCs. */
+ if (box->io_addr && (box->pmu->pmu_idx == 1))
+ writel(ADL_UNCORE_IMC_CTL_INT, box->io_addr + ADL_UNCORE_IMC_GLOBAL_CTL);
+}
+
+static void adl_uncore_mmio_disable_box(struct intel_uncore_box *box)
+{
+ if (!box->io_addr)
+ return;
+
+ writel(ADL_UNCORE_IMC_CTL_FRZ, box->io_addr + uncore_mmio_box_ctl(box));
+}
+
+static void adl_uncore_mmio_enable_box(struct intel_uncore_box *box)
+{
+ if (!box->io_addr)
+ return;
+
+ writel(0, box->io_addr + uncore_mmio_box_ctl(box));
+}
+
+static struct intel_uncore_ops adl_uncore_mmio_ops = {
+ .init_box = adl_uncore_imc_init_box,
+ .exit_box = uncore_mmio_exit_box,
+ .disable_box = adl_uncore_mmio_disable_box,
+ .enable_box = adl_uncore_mmio_enable_box,
+ .disable_event = intel_generic_uncore_mmio_disable_event,
+ .enable_event = intel_generic_uncore_mmio_enable_event,
+ .read_counter = uncore_mmio_read_counter,
+};
+
+#define ADL_UNC_CTL_CHMASK_MASK 0x00000f00
+#define ADL_UNC_IMC_EVENT_MASK (SNB_UNC_CTL_EV_SEL_MASK | \
+ ADL_UNC_CTL_CHMASK_MASK | \
+ SNB_UNC_CTL_EDGE_DET)
+
+static struct attribute *adl_uncore_imc_formats_attr[] = {
+ &format_attr_event.attr,
+ &format_attr_chmask.attr,
+ &format_attr_edge.attr,
+ NULL,
+};
+
+static const struct attribute_group adl_uncore_imc_format_group = {
+ .name = "format",
+ .attrs = adl_uncore_imc_formats_attr,
+};
+
+static struct intel_uncore_type adl_uncore_imc = {
+ .name = "imc",
+ .num_counters = 5,
+ .num_boxes = 2,
+ .perf_ctr_bits = 64,
+ .perf_ctr = ADL_UNCORE_IMC_CTR,
+ .event_ctl = ADL_UNCORE_IMC_CTRL,
+ .event_mask = ADL_UNC_IMC_EVENT_MASK,
+ .box_ctl = ADL_UNCORE_IMC_BOX_CTL,
+ .mmio_offset = 0,
+ .mmio_map_size = ADL_UNCORE_IMC_MAP_SIZE,
+ .ops = &adl_uncore_mmio_ops,
+ .format_group = &adl_uncore_imc_format_group,
+};
+
+enum perf_adl_uncore_imc_freerunning_types {
+ ADL_MMIO_UNCORE_IMC_DATA_TOTAL,
+ ADL_MMIO_UNCORE_IMC_DATA_READ,
+ ADL_MMIO_UNCORE_IMC_DATA_WRITE,
+ ADL_MMIO_UNCORE_IMC_FREERUNNING_TYPE_MAX
+};
+
+static struct freerunning_counters adl_uncore_imc_freerunning[] = {
+ [ADL_MMIO_UNCORE_IMC_DATA_TOTAL] = { 0x40, 0x0, 0x0, 1, 64 },
+ [ADL_MMIO_UNCORE_IMC_DATA_READ] = { 0x58, 0x0, 0x0, 1, 64 },
+ [ADL_MMIO_UNCORE_IMC_DATA_WRITE] = { 0xA0, 0x0, 0x0, 1, 64 },
+};
+
+static void adl_uncore_imc_freerunning_init_box(struct intel_uncore_box *box)
+{
+ __uncore_imc_init_box(box, ADL_UNCORE_IMC_FREERUNNING_BASE);
+}
+
+static struct intel_uncore_ops adl_uncore_imc_freerunning_ops = {
+ .init_box = adl_uncore_imc_freerunning_init_box,
+ .exit_box = uncore_mmio_exit_box,
+ .read_counter = uncore_mmio_read_counter,
+ .hw_config = uncore_freerunning_hw_config,
+};
+
+static struct intel_uncore_type adl_uncore_imc_free_running = {
+ .name = "imc_free_running",
+ .num_counters = 3,
+ .num_boxes = 2,
+ .num_freerunning_types = ADL_MMIO_UNCORE_IMC_FREERUNNING_TYPE_MAX,
+ .mmio_map_size = ADL_UNCORE_IMC_FREERUNNING_MAP_SIZE,
+ .freerunning = adl_uncore_imc_freerunning,
+ .ops = &adl_uncore_imc_freerunning_ops,
+ .event_descs = tgl_uncore_imc_events,
+ .format_group = &tgl_uncore_imc_format_group,
+};
+
+static struct intel_uncore_type *adl_mmio_uncores[] = {
+ &adl_uncore_imc,
+ &adl_uncore_imc_free_running,
+ NULL
+};
+
+void adl_uncore_mmio_init(void)
+{
+ uncore_mmio_uncores = adl_mmio_uncores;
+}
+
+/* end of Alder Lake MMIO uncore support */
diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c
index 3660f698fb2a..ed869443efb2 100644
--- a/arch/x86/events/intel/uncore_snbep.c
+++ b/arch/x86/events/intel/uncore_snbep.c
@@ -5482,7 +5482,7 @@ static struct intel_uncore_type icx_uncore_imc = {
.fixed_ctr_bits = 48,
.fixed_ctr = SNR_IMC_MMIO_PMON_FIXED_CTR,
.fixed_ctl = SNR_IMC_MMIO_PMON_FIXED_CTL,
- .event_descs = hswep_uncore_imc_events,
+ .event_descs = snr_uncore_imc_events,
.perf_ctr = SNR_IMC_MMIO_PMON_CTR0,
.event_ctl = SNR_IMC_MMIO_PMON_CTL0,
.event_mask = SNBEP_PMON_RAW_EVENT_MASK,
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index 9d376e528dfc..150261d929b9 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -215,7 +215,8 @@ enum {
LBR_FORMAT_EIP_FLAGS2 = 0x04,
LBR_FORMAT_INFO = 0x05,
LBR_FORMAT_TIME = 0x06,
- LBR_FORMAT_MAX_KNOWN = LBR_FORMAT_TIME,
+ LBR_FORMAT_INFO2 = 0x07,
+ LBR_FORMAT_MAX_KNOWN = LBR_FORMAT_INFO2,
};
enum {
@@ -840,6 +841,11 @@ struct x86_pmu {
bool lbr_double_abort; /* duplicated lbr aborts */
bool lbr_pt_coexist; /* (LBR|BTS) may coexist with PT */
+ unsigned int lbr_has_info:1;
+ unsigned int lbr_has_tsx:1;
+ unsigned int lbr_from_flags:1;
+ unsigned int lbr_to_cycles:1;
+
/*
* Intel Architectural LBR CPUID Enumeration
*/
@@ -1392,6 +1398,8 @@ void intel_pmu_lbr_init_skl(void);
void intel_pmu_lbr_init_knl(void);
+void intel_pmu_lbr_init(void);
+
void intel_pmu_arch_lbr_init(void);
void intel_pmu_pebs_data_source_nhm(void);
diff --git a/arch/x86/events/rapl.c b/arch/x86/events/rapl.c
index 85feafacc445..77e3a47af5ad 100644
--- a/arch/x86/events/rapl.c
+++ b/arch/x86/events/rapl.c
@@ -536,11 +536,14 @@ static struct perf_msr intel_rapl_spr_msrs[] = {
* - perf_msr_probe(PERF_RAPL_MAX)
* - want to use same event codes across both architectures
*/
-static struct perf_msr amd_rapl_msrs[PERF_RAPL_MAX] = {
- [PERF_RAPL_PKG] = { MSR_AMD_PKG_ENERGY_STATUS, &rapl_events_pkg_group, test_msr },
+static struct perf_msr amd_rapl_msrs[] = {
+ [PERF_RAPL_PP0] = { 0, &rapl_events_cores_group, 0, false, 0 },
+ [PERF_RAPL_PKG] = { MSR_AMD_PKG_ENERGY_STATUS, &rapl_events_pkg_group, test_msr, false, RAPL_MSR_MASK },
+ [PERF_RAPL_RAM] = { 0, &rapl_events_ram_group, 0, false, 0 },
+ [PERF_RAPL_PP1] = { 0, &rapl_events_gpu_group, 0, false, 0 },
+ [PERF_RAPL_PSYS] = { 0, &rapl_events_psys_group, 0, false, 0 },
};
-
static int rapl_cpu_offline(unsigned int cpu)
{
struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c
index 96eb7db31c8e..8b392b6b7b93 100644
--- a/arch/x86/hyperv/hv_init.c
+++ b/arch/x86/hyperv/hv_init.c
@@ -28,6 +28,7 @@
#include <linux/syscore_ops.h>
#include <clocksource/hyperv_timer.h>
#include <linux/highmem.h>
+#include <linux/swiotlb.h>
int hyperv_init_cpuhp;
u64 hv_current_partition_id = ~0ull;
@@ -36,7 +37,7 @@ EXPORT_SYMBOL_GPL(hv_current_partition_id);
void *hv_hypercall_pg;
EXPORT_SYMBOL_GPL(hv_hypercall_pg);
-union hv_ghcb __percpu **hv_ghcb_pg;
+union hv_ghcb * __percpu *hv_ghcb_pg;
/* Storage to save the hypercall page temporarily for hibernation */
static void *hv_hypercall_pg_saved;
@@ -498,6 +499,17 @@ void __init hyperv_init(void)
/* Query the VMs extended capability once, so that it can be cached. */
hv_query_ext_cap(0);
+
+#ifdef CONFIG_SWIOTLB
+ /*
+ * Swiotlb bounce buffer needs to be mapped in extra address
+ * space. Map function doesn't work in the early place and so
+ * call swiotlb_update_mem_attributes() here.
+ */
+ if (hv_is_isolation_supported())
+ swiotlb_update_mem_attributes();
+#endif
+
return;
clean_guest_os_id:
diff --git a/arch/x86/hyperv/irqdomain.c b/arch/x86/hyperv/irqdomain.c
index 514fc64e23d5..7e0f6bedc248 100644
--- a/arch/x86/hyperv/irqdomain.c
+++ b/arch/x86/hyperv/irqdomain.c
@@ -253,64 +253,43 @@ static int hv_unmap_msi_interrupt(struct pci_dev *dev, struct hv_interrupt_entry
return hv_unmap_interrupt(hv_build_pci_dev_id(dev).as_uint64, old_entry);
}
-static void hv_teardown_msi_irq_common(struct pci_dev *dev, struct msi_desc *msidesc, int irq)
+static void hv_teardown_msi_irq(struct pci_dev *dev, struct irq_data *irqd)
{
- u64 status;
struct hv_interrupt_entry old_entry;
- struct irq_desc *desc;
- struct irq_data *data;
struct msi_msg msg;
+ u64 status;
- desc = irq_to_desc(irq);
- if (!desc) {
- pr_debug("%s: no irq desc\n", __func__);
- return;
- }
-
- data = &desc->irq_data;
- if (!data) {
- pr_debug("%s: no irq data\n", __func__);
- return;
- }
-
- if (!data->chip_data) {
+ if (!irqd->chip_data) {
pr_debug("%s: no chip data\n!", __func__);
return;
}
- old_entry = *(struct hv_interrupt_entry *)data->chip_data;
+ old_entry = *(struct hv_interrupt_entry *)irqd->chip_data;
entry_to_msi_msg(&old_entry, &msg);
- kfree(data->chip_data);
- data->chip_data = NULL;
+ kfree(irqd->chip_data);
+ irqd->chip_data = NULL;
status = hv_unmap_msi_interrupt(dev, &old_entry);
- if (status != HV_STATUS_SUCCESS) {
+ if (status != HV_STATUS_SUCCESS)
pr_err("%s: hypercall failed, status %lld\n", __func__, status);
- return;
- }
}
-static void hv_msi_domain_free_irqs(struct irq_domain *domain, struct device *dev)
+static void hv_msi_free_irq(struct irq_domain *domain,
+ struct msi_domain_info *info, unsigned int virq)
{
- int i;
- struct msi_desc *entry;
- struct pci_dev *pdev;
+ struct irq_data *irqd = irq_get_irq_data(virq);
+ struct msi_desc *desc;
- if (WARN_ON_ONCE(!dev_is_pci(dev)))
+ if (!irqd)
return;
- pdev = to_pci_dev(dev);
+ desc = irq_data_get_msi_desc(irqd);
+ if (!desc || !desc->irq || WARN_ON_ONCE(!dev_is_pci(desc->dev)))
+ return;
- for_each_pci_msi_entry(entry, pdev) {
- if (entry->irq) {
- for (i = 0; i < entry->nvec_used; i++) {
- hv_teardown_msi_irq_common(pdev, entry, entry->irq + i);
- irq_domain_free_irqs(entry->irq + i, 1);
- }
- }
- }
+ hv_teardown_msi_irq(to_pci_dev(desc->dev), irqd);
}
/*
@@ -329,7 +308,7 @@ static struct irq_chip hv_pci_msi_controller = {
};
static struct msi_domain_ops pci_msi_domain_ops = {
- .domain_free_irqs = hv_msi_domain_free_irqs,
+ .msi_free = hv_msi_free_irq,
.msi_prepare = pci_msi_prepare,
};
diff --git a/arch/x86/hyperv/ivm.c b/arch/x86/hyperv/ivm.c
index 69c7a57f3307..2b994117581e 100644
--- a/arch/x86/hyperv/ivm.c
+++ b/arch/x86/hyperv/ivm.c
@@ -287,3 +287,31 @@ int hv_set_mem_host_visibility(unsigned long kbuffer, int pagecount, bool visibl
kfree(pfn_array);
return ret;
}
+
+/*
+ * hv_map_memory - map memory to extra space in the AMD SEV-SNP Isolation VM.
+ */
+void *hv_map_memory(void *addr, unsigned long size)
+{
+ unsigned long *pfns = kcalloc(size / PAGE_SIZE,
+ sizeof(unsigned long), GFP_KERNEL);
+ void *vaddr;
+ int i;
+
+ if (!pfns)
+ return NULL;
+
+ for (i = 0; i < size / PAGE_SIZE; i++)
+ pfns[i] = vmalloc_to_pfn(addr + i * PAGE_SIZE) +
+ (ms_hyperv.shared_gpa_boundary >> PAGE_SHIFT);
+
+ vaddr = vmap_pfn(pfns, size / PAGE_SIZE, PAGE_KERNEL_IO);
+ kfree(pfns);
+
+ return vaddr;
+}
+
+void hv_unmap_memory(void *addr)
+{
+ vunmap(addr);
+}
diff --git a/arch/x86/hyperv/mmu.c b/arch/x86/hyperv/mmu.c
index bd13736d0c05..0ad2378fe6ad 100644
--- a/arch/x86/hyperv/mmu.c
+++ b/arch/x86/hyperv/mmu.c
@@ -68,15 +68,6 @@ static void hyperv_flush_tlb_multi(const struct cpumask *cpus,
local_irq_save(flags);
- /*
- * Only check the mask _after_ interrupt has been disabled to avoid the
- * mask changing under our feet.
- */
- if (cpumask_empty(cpus)) {
- local_irq_restore(flags);
- return;
- }
-
flush_pcpu = (struct hv_tlb_flush **)
this_cpu_ptr(hyperv_pcpu_input_arg);
@@ -115,7 +106,9 @@ static void hyperv_flush_tlb_multi(const struct cpumask *cpus,
* must. We will also check all VP numbers when walking the
* supplied CPU set to remain correct in all cases.
*/
- if (hv_cpu_number_to_vp_number(cpumask_last(cpus)) >= 64)
+ cpu = cpumask_last(cpus);
+
+ if (cpu < nr_cpumask_bits && hv_cpu_number_to_vp_number(cpu) >= 64)
goto do_ex_hypercall;
for_each_cpu(cpu, cpus) {
@@ -131,6 +124,12 @@ static void hyperv_flush_tlb_multi(const struct cpumask *cpus,
__set_bit(vcpu, (unsigned long *)
&flush->processor_mask);
}
+
+ /* nothing to flush if 'processor_mask' ends up being empty */
+ if (!flush->processor_mask) {
+ local_irq_restore(flags);
+ return;
+ }
}
/*
diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h
index 3a168483bc8e..c878fed3056f 100644
--- a/arch/x86/include/asm/asm.h
+++ b/arch/x86/include/asm/asm.h
@@ -152,6 +152,33 @@
#else /* ! __ASSEMBLY__ */
+# define DEFINE_EXTABLE_TYPE_REG \
+ ".macro extable_type_reg type:req reg:req\n" \
+ ".set found, 0\n" \
+ ".set regnr, 0\n" \
+ ".irp rs,rax,rcx,rdx,rbx,rsp,rbp,rsi,rdi,r8,r9,r10,r11,r12,r13,r14,r15\n" \
+ ".ifc \\reg, %%\\rs\n" \
+ ".set found, found+1\n" \
+ ".long \\type + (regnr << 8)\n" \
+ ".endif\n" \
+ ".set regnr, regnr+1\n" \
+ ".endr\n" \
+ ".set regnr, 0\n" \
+ ".irp rs,eax,ecx,edx,ebx,esp,ebp,esi,edi,r8d,r9d,r10d,r11d,r12d,r13d,r14d,r15d\n" \
+ ".ifc \\reg, %%\\rs\n" \
+ ".set found, found+1\n" \
+ ".long \\type + (regnr << 8)\n" \
+ ".endif\n" \
+ ".set regnr, regnr+1\n" \
+ ".endr\n" \
+ ".if (found != 1)\n" \
+ ".error \"extable_type_reg: bad register argument\"\n" \
+ ".endif\n" \
+ ".endm\n"
+
+# define UNDEFINE_EXTABLE_TYPE_REG \
+ ".purgem extable_type_reg\n"
+
# define _ASM_EXTABLE_TYPE(from, to, type) \
" .pushsection \"__ex_table\",\"a\"\n" \
" .balign 4\n" \
@@ -160,6 +187,16 @@
" .long " __stringify(type) " \n" \
" .popsection\n"
+# define _ASM_EXTABLE_TYPE_REG(from, to, type, reg) \
+ " .pushsection \"__ex_table\",\"a\"\n" \
+ " .balign 4\n" \
+ " .long (" #from ") - .\n" \
+ " .long (" #to ") - .\n" \
+ DEFINE_EXTABLE_TYPE_REG \
+ "extable_type_reg reg=" __stringify(reg) ", type=" __stringify(type) " \n"\
+ UNDEFINE_EXTABLE_TYPE_REG \
+ " .popsection\n"
+
/* For C file, we already have NOKPROBE_SYMBOL macro */
/*
diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
index 0367efdc5b7a..a288ecd230ab 100644
--- a/arch/x86/include/asm/bitops.h
+++ b/arch/x86/include/asm/bitops.h
@@ -380,8 +380,6 @@ static __always_inline int fls64(__u64 x)
#include <asm-generic/bitops/fls64.h>
#endif
-#include <asm-generic/bitops/find.h>
-
#include <asm-generic/bitops/sched.h>
#include <asm/arch_hweight.h>
diff --git a/arch/x86/include/asm/bug.h b/arch/x86/include/asm/bug.h
index 84b87538a15d..bab883c0b6fe 100644
--- a/arch/x86/include/asm/bug.h
+++ b/arch/x86/include/asm/bug.h
@@ -22,7 +22,7 @@
#ifdef CONFIG_DEBUG_BUGVERBOSE
-#define _BUG_FLAGS(ins, flags) \
+#define _BUG_FLAGS(ins, flags, extra) \
do { \
asm_inline volatile("1:\t" ins "\n" \
".pushsection __bug_table,\"aw\"\n" \
@@ -31,7 +31,8 @@ do { \
"\t.word %c1" "\t# bug_entry::line\n" \
"\t.word %c2" "\t# bug_entry::flags\n" \
"\t.org 2b+%c3\n" \
- ".popsection" \
+ ".popsection\n" \
+ extra \
: : "i" (__FILE__), "i" (__LINE__), \
"i" (flags), \
"i" (sizeof(struct bug_entry))); \
@@ -39,14 +40,15 @@ do { \
#else /* !CONFIG_DEBUG_BUGVERBOSE */
-#define _BUG_FLAGS(ins, flags) \
+#define _BUG_FLAGS(ins, flags, extra) \
do { \
asm_inline volatile("1:\t" ins "\n" \
".pushsection __bug_table,\"aw\"\n" \
"2:\t" __BUG_REL(1b) "\t# bug_entry::bug_addr\n" \
"\t.word %c0" "\t# bug_entry::flags\n" \
"\t.org 2b+%c1\n" \
- ".popsection" \
+ ".popsection\n" \
+ extra \
: : "i" (flags), \
"i" (sizeof(struct bug_entry))); \
} while (0)
@@ -55,7 +57,7 @@ do { \
#else
-#define _BUG_FLAGS(ins, flags) asm volatile(ins)
+#define _BUG_FLAGS(ins, flags, extra) asm volatile(ins)
#endif /* CONFIG_GENERIC_BUG */
@@ -63,8 +65,8 @@ do { \
#define BUG() \
do { \
instrumentation_begin(); \
- _BUG_FLAGS(ASM_UD2, 0); \
- unreachable(); \
+ _BUG_FLAGS(ASM_UD2, 0, ""); \
+ __builtin_unreachable(); \
} while (0)
/*
@@ -75,9 +77,9 @@ do { \
*/
#define __WARN_FLAGS(flags) \
do { \
+ __auto_type f = BUGFLAG_WARNING|(flags); \
instrumentation_begin(); \
- _BUG_FLAGS(ASM_UD2, BUGFLAG_WARNING|(flags)); \
- annotate_reachable(); \
+ _BUG_FLAGS(ASM_UD2, f, ASM_REACHABLE); \
instrumentation_end(); \
} while (0)
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index 18de5f76f198..6db4e2932b3d 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -299,7 +299,9 @@
/* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */
#define X86_FEATURE_AVX_VNNI (12*32+ 4) /* AVX VNNI instructions */
#define X86_FEATURE_AVX512_BF16 (12*32+ 5) /* AVX512 BFLOAT16 instructions */
+#define X86_FEATURE_AMX_BF16 (18*32+22) /* AMX bf16 Support */
#define X86_FEATURE_AMX_TILE (18*32+24) /* AMX tile Support */
+#define X86_FEATURE_AMX_INT8 (18*32+25) /* AMX int8 Support */
/* AMD-defined CPU features, CPUID level 0x80000008 (EBX), word 13 */
#define X86_FEATURE_CLZERO (13*32+ 0) /* CLZERO instruction */
diff --git a/arch/x86/include/asm/extable.h b/arch/x86/include/asm/extable.h
index 93f400eb728f..155c991ba95e 100644
--- a/arch/x86/include/asm/extable.h
+++ b/arch/x86/include/asm/extable.h
@@ -21,7 +21,7 @@
*/
struct exception_table_entry {
- int insn, fixup, type;
+ int insn, fixup, data;
};
struct pt_regs;
@@ -31,8 +31,8 @@ struct pt_regs;
do { \
(a)->fixup = (b)->fixup + (delta); \
(b)->fixup = (tmp).fixup - (delta); \
- (a)->type = (b)->type; \
- (b)->type = (tmp).type; \
+ (a)->data = (b)->data; \
+ (b)->data = (tmp).data; \
} while (0)
extern int fixup_exception(struct pt_regs *regs, int trapnr,
diff --git a/arch/x86/include/asm/extable_fixup_types.h b/arch/x86/include/asm/extable_fixup_types.h
index 409524d5d2eb..503622627400 100644
--- a/arch/x86/include/asm/extable_fixup_types.h
+++ b/arch/x86/include/asm/extable_fixup_types.h
@@ -2,6 +2,36 @@
#ifndef _ASM_X86_EXTABLE_FIXUP_TYPES_H
#define _ASM_X86_EXTABLE_FIXUP_TYPES_H
+/*
+ * Our IMM is signed, as such it must live at the top end of the word. Also,
+ * since C99 hex constants are of ambigious type, force cast the mask to 'int'
+ * so that FIELD_GET() will DTRT and sign extend the value when it extracts it.
+ */
+#define EX_DATA_TYPE_MASK ((int)0x000000FF)
+#define EX_DATA_REG_MASK ((int)0x00000F00)
+#define EX_DATA_FLAG_MASK ((int)0x0000F000)
+#define EX_DATA_IMM_MASK ((int)0xFFFF0000)
+
+#define EX_DATA_REG_SHIFT 8
+#define EX_DATA_FLAG_SHIFT 12
+#define EX_DATA_IMM_SHIFT 16
+
+#define EX_DATA_REG(reg) ((reg) << EX_DATA_REG_SHIFT)
+#define EX_DATA_FLAG(flag) ((flag) << EX_DATA_FLAG_SHIFT)
+#define EX_DATA_IMM(imm) ((imm) << EX_DATA_IMM_SHIFT)
+
+/* segment regs */
+#define EX_REG_DS EX_DATA_REG(8)
+#define EX_REG_ES EX_DATA_REG(9)
+#define EX_REG_FS EX_DATA_REG(10)
+#define EX_REG_GS EX_DATA_REG(11)
+
+/* flags */
+#define EX_FLAG_CLEAR_AX EX_DATA_FLAG(1)
+#define EX_FLAG_CLEAR_DX EX_DATA_FLAG(2)
+#define EX_FLAG_CLEAR_AX_DX EX_DATA_FLAG(3)
+
+/* types */
#define EX_TYPE_NONE 0
#define EX_TYPE_DEFAULT 1
#define EX_TYPE_FAULT 2
@@ -9,14 +39,29 @@
#define EX_TYPE_COPY 4
#define EX_TYPE_CLEAR_FS 5
#define EX_TYPE_FPU_RESTORE 6
-#define EX_TYPE_WRMSR 7
-#define EX_TYPE_RDMSR 8
-#define EX_TYPE_BPF 9
+#define EX_TYPE_BPF 7
+#define EX_TYPE_WRMSR 8
+#define EX_TYPE_RDMSR 9
+#define EX_TYPE_WRMSR_SAFE 10 /* reg := -EIO */
+#define EX_TYPE_RDMSR_SAFE 11 /* reg := -EIO */
+#define EX_TYPE_WRMSR_IN_MCE 12
+#define EX_TYPE_RDMSR_IN_MCE 13
+#define EX_TYPE_DEFAULT_MCE_SAFE 14
+#define EX_TYPE_FAULT_MCE_SAFE 15
+
+#define EX_TYPE_POP_REG 16 /* sp += sizeof(long) */
+#define EX_TYPE_POP_ZERO (EX_TYPE_POP_REG | EX_DATA_IMM(0))
+
+#define EX_TYPE_IMM_REG 17 /* reg := (long)imm */
+#define EX_TYPE_EFAULT_REG (EX_TYPE_IMM_REG | EX_DATA_IMM(-EFAULT))
+#define EX_TYPE_ZERO_REG (EX_TYPE_IMM_REG | EX_DATA_IMM(0))
+#define EX_TYPE_ONE_REG (EX_TYPE_IMM_REG | EX_DATA_IMM(1))
-#define EX_TYPE_WRMSR_IN_MCE 10
-#define EX_TYPE_RDMSR_IN_MCE 11
+#define EX_TYPE_FAULT_SGX 18
-#define EX_TYPE_DEFAULT_MCE_SAFE 12
-#define EX_TYPE_FAULT_MCE_SAFE 13
+#define EX_TYPE_UCOPY_LEN 19 /* cx := reg + imm*cx */
+#define EX_TYPE_UCOPY_LEN1 (EX_TYPE_UCOPY_LEN | EX_DATA_IMM(1))
+#define EX_TYPE_UCOPY_LEN4 (EX_TYPE_UCOPY_LEN | EX_DATA_IMM(4))
+#define EX_TYPE_UCOPY_LEN8 (EX_TYPE_UCOPY_LEN | EX_DATA_IMM(8))
#endif
diff --git a/arch/x86/include/asm/fpu/api.h b/arch/x86/include/asm/fpu/api.h
index c2767a6a387e..c83b3020350a 100644
--- a/arch/x86/include/asm/fpu/api.h
+++ b/arch/x86/include/asm/fpu/api.h
@@ -132,10 +132,21 @@ static inline void fpstate_free(struct fpu *fpu) { }
/* fpstate-related functions which are exported to KVM */
extern void fpstate_clear_xstate_component(struct fpstate *fps, unsigned int xfeature);
+extern u64 xstate_get_guest_group_perm(void);
+
/* KVM specific functions */
extern bool fpu_alloc_guest_fpstate(struct fpu_guest *gfpu);
extern void fpu_free_guest_fpstate(struct fpu_guest *gfpu);
extern int fpu_swap_kvm_fpstate(struct fpu_guest *gfpu, bool enter_guest);
+extern int fpu_enable_guest_xfd_features(struct fpu_guest *guest_fpu, u64 xfeatures);
+
+#ifdef CONFIG_X86_64
+extern void fpu_update_guest_xfd(struct fpu_guest *guest_fpu, u64 xfd);
+extern void fpu_sync_guest_vmexit_xfd_state(void);
+#else
+static inline void fpu_update_guest_xfd(struct fpu_guest *guest_fpu, u64 xfd) { }
+static inline void fpu_sync_guest_vmexit_xfd_state(void) { }
+#endif
extern void fpu_copy_guest_fpstate_to_uabi(struct fpu_guest *gfpu, void *buf, unsigned int size, u32 pkru);
extern int fpu_copy_uabi_to_guest_fpstate(struct fpu_guest *gfpu, const void *buf, u64 xcr0, u32 *vpkru);
diff --git a/arch/x86/include/asm/fpu/types.h b/arch/x86/include/asm/fpu/types.h
index 3c06c82ab355..eb7cd1139d97 100644
--- a/arch/x86/include/asm/fpu/types.h
+++ b/arch/x86/include/asm/fpu/types.h
@@ -387,6 +387,8 @@ struct fpstate {
/* @regs is dynamically sized! Don't add anything after @regs! */
} __aligned(64);
+#define FPU_GUEST_PERM_LOCKED BIT_ULL(63)
+
struct fpu_state_perm {
/*
* @__state_perm:
@@ -477,6 +479,13 @@ struct fpu {
struct fpu_state_perm perm;
/*
+ * @guest_perm:
+ *
+ * Permission related information for guest pseudo FPUs
+ */
+ struct fpu_state_perm guest_perm;
+
+ /*
* @__fpstate:
*
* Initial in-memory storage for FPU registers which are saved in
@@ -496,6 +505,29 @@ struct fpu {
*/
struct fpu_guest {
/*
+ * @xfeatures: xfeature bitmap of features which are
+ * currently enabled for the guest vCPU.
+ */
+ u64 xfeatures;
+
+ /*
+ * @perm: xfeature bitmap of features which are
+ * permitted to be enabled for the guest
+ * vCPU.
+ */
+ u64 perm;
+
+ /*
+ * @xfd_err: Save the guest value.
+ */
+ u64 xfd_err;
+
+ /*
+ * @uabi_size: Size required for save/restore
+ */
+ unsigned int uabi_size;
+
+ /*
* @fpstate: Pointer to the allocated guest fpstate
*/
struct fpstate *fpstate;
diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
index f9c00110a69a..99d345b686fa 100644
--- a/arch/x86/include/asm/futex.h
+++ b/arch/x86/include/asm/futex.h
@@ -17,13 +17,9 @@ do { \
int oldval = 0, ret; \
asm volatile("1:\t" insn "\n" \
"2:\n" \
- "\t.section .fixup,\"ax\"\n" \
- "3:\tmov\t%3, %1\n" \
- "\tjmp\t2b\n" \
- "\t.previous\n" \
- _ASM_EXTABLE_UA(1b, 3b) \
+ _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG, %1) \
: "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
- : "i" (-EFAULT), "0" (oparg), "1" (0)); \
+ : "0" (oparg), "1" (0)); \
if (ret) \
goto label; \
*oval = oldval; \
@@ -39,15 +35,11 @@ do { \
"3:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \
"\tjnz\t2b\n" \
"4:\n" \
- "\t.section .fixup,\"ax\"\n" \
- "5:\tmov\t%5, %1\n" \
- "\tjmp\t4b\n" \
- "\t.previous\n" \
- _ASM_EXTABLE_UA(1b, 5b) \
- _ASM_EXTABLE_UA(3b, 5b) \
+ _ASM_EXTABLE_TYPE_REG(1b, 4b, EX_TYPE_EFAULT_REG, %1) \
+ _ASM_EXTABLE_TYPE_REG(3b, 4b, EX_TYPE_EFAULT_REG, %1) \
: "=&a" (oldval), "=&r" (ret), \
"+m" (*uaddr), "=&r" (tem) \
- : "r" (oparg), "i" (-EFAULT), "1" (0)); \
+ : "r" (oparg), "1" (0)); \
if (ret) \
goto label; \
*oval = oldval; \
@@ -95,15 +87,11 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
if (!user_access_begin(uaddr, sizeof(u32)))
return -EFAULT;
asm volatile("\n"
- "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
+ "1:\t" LOCK_PREFIX "cmpxchgl %3, %2\n"
"2:\n"
- "\t.section .fixup, \"ax\"\n"
- "3:\tmov %3, %0\n"
- "\tjmp 2b\n"
- "\t.previous\n"
- _ASM_EXTABLE_UA(1b, 3b)
+ _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG, %0) \
: "+r" (ret), "=a" (oldval), "+m" (*uaddr)
- : "i" (-EFAULT), "r" (newval), "1" (oldval)
+ : "r" (newval), "1" (oldval)
: "memory"
);
user_access_end();
diff --git a/arch/x86/include/asm/hyperv-tlfs.h b/arch/x86/include/asm/hyperv-tlfs.h
index 381e88122a5f..0a9407dc0859 100644
--- a/arch/x86/include/asm/hyperv-tlfs.h
+++ b/arch/x86/include/asm/hyperv-tlfs.h
@@ -602,6 +602,39 @@ enum hv_interrupt_type {
HV_X64_INTERRUPT_TYPE_MAXIMUM = 0x000A,
};
+union hv_msi_address_register {
+ u32 as_uint32;
+ struct {
+ u32 reserved1:2;
+ u32 destination_mode:1;
+ u32 redirection_hint:1;
+ u32 reserved2:8;
+ u32 destination_id:8;
+ u32 msi_base:12;
+ };
+} __packed;
+
+union hv_msi_data_register {
+ u32 as_uint32;
+ struct {
+ u32 vector:8;
+ u32 delivery_mode:3;
+ u32 reserved1:3;
+ u32 level_assert:1;
+ u32 trigger_mode:1;
+ u32 reserved2:16;
+ };
+} __packed;
+
+/* HvRetargetDeviceInterrupt hypercall */
+union hv_msi_entry {
+ u64 as_uint64;
+ struct {
+ union hv_msi_address_register address;
+ union hv_msi_data_register data;
+ } __packed;
+};
+
#include <asm-generic/hyperv-tlfs.h>
#endif
diff --git a/arch/x86/include/asm/insn-eval.h b/arch/x86/include/asm/insn-eval.h
index 43785ee363f1..f07faa61c7f3 100644
--- a/arch/x86/include/asm/insn-eval.h
+++ b/arch/x86/include/asm/insn-eval.h
@@ -15,6 +15,8 @@
#define INSN_CODE_SEG_OPND_SZ(params) (params & 0xf)
#define INSN_CODE_SEG_PARAMS(oper_sz, addr_sz) (oper_sz | (addr_sz << 4))
+int pt_regs_offset(struct pt_regs *regs, int regno);
+
bool insn_has_rep_prefix(struct insn *insn);
void __user *insn_get_addr_ref(struct insn *insn, struct pt_regs *regs);
int insn_get_modrm_rm_off(struct insn *insn, struct pt_regs *regs);
diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h
index 9e50da3ed01a..d39e0de06be2 100644
--- a/arch/x86/include/asm/kvm-x86-ops.h
+++ b/arch/x86/include/asm/kvm-x86-ops.h
@@ -35,6 +35,7 @@ KVM_X86_OP(get_cpl)
KVM_X86_OP(set_segment)
KVM_X86_OP_NULL(get_cs_db_l_bits)
KVM_X86_OP(set_cr0)
+KVM_X86_OP_NULL(post_set_cr3)
KVM_X86_OP(is_valid_cr4)
KVM_X86_OP(set_cr4)
KVM_X86_OP(set_efer)
@@ -54,6 +55,7 @@ KVM_X86_OP_NULL(tlb_remote_flush)
KVM_X86_OP_NULL(tlb_remote_flush_with_range)
KVM_X86_OP(tlb_flush_gva)
KVM_X86_OP(tlb_flush_guest)
+KVM_X86_OP(vcpu_pre_run)
KVM_X86_OP(run)
KVM_X86_OP_NULL(handle_exit)
KVM_X86_OP_NULL(skip_emulated_instruction)
@@ -80,7 +82,7 @@ KVM_X86_OP_NULL(guest_apic_has_interrupt)
KVM_X86_OP(load_eoi_exitmap)
KVM_X86_OP(set_virtual_apic_mode)
KVM_X86_OP_NULL(set_apic_access_page_addr)
-KVM_X86_OP(deliver_posted_interrupt)
+KVM_X86_OP(deliver_interrupt)
KVM_X86_OP_NULL(sync_pir_to_irr)
KVM_X86_OP(set_tss_addr)
KVM_X86_OP(set_identity_map_addr)
@@ -97,8 +99,6 @@ KVM_X86_OP(handle_exit_irqoff)
KVM_X86_OP_NULL(request_immediate_exit)
KVM_X86_OP(sched_in)
KVM_X86_OP_NULL(update_cpu_dirty_logging)
-KVM_X86_OP_NULL(pre_block)
-KVM_X86_OP_NULL(post_block)
KVM_X86_OP_NULL(vcpu_blocking)
KVM_X86_OP_NULL(vcpu_unblocking)
KVM_X86_OP_NULL(update_pi_irte)
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 555f4de47ef2..6dcccb304775 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -135,7 +135,7 @@
#define KVM_HPAGE_MASK(x) (~(KVM_HPAGE_SIZE(x) - 1))
#define KVM_PAGES_PER_HPAGE(x) (KVM_HPAGE_SIZE(x) / PAGE_SIZE)
-#define KVM_PERMILLE_MMU_PAGES 20
+#define KVM_MEMSLOT_PAGES_TO_MMU_PAGES_RATIO 50
#define KVM_MIN_ALLOC_MMU_PAGES 64UL
#define KVM_MMU_HASH_SHIFT 12
#define KVM_NUM_MMU_PAGES (1 << KVM_MMU_HASH_SHIFT)
@@ -291,25 +291,31 @@ struct kvm_kernel_irq_routing_entry;
* the number of unique SPs that can theoretically be created is 2^n, where n
* is the number of bits that are used to compute the role.
*
- * But, even though there are 18 bits in the mask below, not all combinations
- * of modes and flags are possible. The maximum number of possible upper-level
- * shadow pages for a single gfn is in the neighborhood of 2^13.
+ * But, even though there are 19 bits in the mask below, not all combinations
+ * of modes and flags are possible:
*
- * - invalid shadow pages are not accounted.
- * - level is effectively limited to four combinations, not 16 as the number
- * bits would imply, as 4k SPs are not tracked (allowed to go unsync).
- * - level is effectively unused for non-PAE paging because there is exactly
- * one upper level (see 4k SP exception above).
- * - quadrant is used only for non-PAE paging and is exclusive with
- * gpte_is_8_bytes.
- * - execonly and ad_disabled are used only for nested EPT, which makes it
- * exclusive with quadrant.
+ * - invalid shadow pages are not accounted, so the bits are effectively 18
+ *
+ * - quadrant will only be used if has_4_byte_gpte=1 (non-PAE paging);
+ * execonly and ad_disabled are only used for nested EPT which has
+ * has_4_byte_gpte=0. Therefore, 2 bits are always unused.
+ *
+ * - the 4 bits of level are effectively limited to the values 2/3/4/5,
+ * as 4k SPs are not tracked (allowed to go unsync). In addition non-PAE
+ * paging has exactly one upper level, making level completely redundant
+ * when has_4_byte_gpte=1.
+ *
+ * - on top of this, smep_andnot_wp and smap_andnot_wp are only set if
+ * cr0_wp=0, therefore these three bits only give rise to 5 possibilities.
+ *
+ * Therefore, the maximum number of possible upper-level shadow pages for a
+ * single gfn is a bit less than 2^13.
*/
union kvm_mmu_page_role {
u32 word;
struct {
unsigned level:4;
- unsigned gpte_is_8_bytes:1;
+ unsigned has_4_byte_gpte:1;
unsigned quadrant:2;
unsigned direct:1;
unsigned access:3;
@@ -420,10 +426,9 @@ struct kvm_mmu {
int (*page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault);
void (*inject_page_fault)(struct kvm_vcpu *vcpu,
struct x86_exception *fault);
- gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gpa_t gva_or_gpa,
- u32 access, struct x86_exception *exception);
- gpa_t (*translate_gpa)(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
- struct x86_exception *exception);
+ gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
+ gpa_t gva_or_gpa, u32 access,
+ struct x86_exception *exception);
int (*sync_page)(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *sp);
void (*invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa);
@@ -490,6 +495,7 @@ struct kvm_pmc {
*/
u64 current_config;
bool is_paused;
+ bool intr;
};
struct kvm_pmu {
@@ -604,6 +610,7 @@ struct kvm_vcpu_xen {
u64 last_steal;
u64 runstate_entry_time;
u64 runstate_times[4];
+ unsigned long evtchn_pending_sel;
};
struct kvm_vcpu_arch {
@@ -640,6 +647,7 @@ struct kvm_vcpu_arch {
u64 smi_count;
bool tpr_access_reporting;
bool xsaves_enabled;
+ bool xfd_no_write_intercept;
u64 ia32_xss;
u64 microcode_version;
u64 arch_capabilities;
@@ -774,6 +782,7 @@ struct kvm_vcpu_arch {
unsigned nmi_pending; /* NMI queued after currently running handler */
bool nmi_injected; /* Trying to inject an NMI this entry */
bool smi_pending; /* SMI queued after currently running handler */
+ u8 handling_intr_from_guest;
struct kvm_mtrr mtrr_state;
u64 pat;
@@ -1014,7 +1023,7 @@ struct msr_bitmap_range {
struct kvm_xen {
bool long_mode;
u8 upcall_vector;
- gfn_t shinfo_gfn;
+ struct gfn_to_pfn_cache shinfo_cache;
};
enum kvm_irqchip_mode {
@@ -1337,6 +1346,7 @@ struct kvm_x86_ops {
struct kvm_segment *var, int seg);
void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l);
void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0);
+ void (*post_set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
bool (*is_valid_cr4)(struct kvm_vcpu *vcpu, unsigned long cr0);
void (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4);
int (*set_efer)(struct kvm_vcpu *vcpu, u64 efer);
@@ -1371,6 +1381,7 @@ struct kvm_x86_ops {
*/
void (*tlb_flush_guest)(struct kvm_vcpu *vcpu);
+ int (*vcpu_pre_run)(struct kvm_vcpu *vcpu);
enum exit_fastpath_completion (*run)(struct kvm_vcpu *vcpu);
int (*handle_exit)(struct kvm_vcpu *vcpu,
enum exit_fastpath_completion exit_fastpath);
@@ -1399,7 +1410,8 @@ struct kvm_x86_ops {
void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
void (*set_virtual_apic_mode)(struct kvm_vcpu *vcpu);
void (*set_apic_access_page_addr)(struct kvm_vcpu *vcpu);
- int (*deliver_posted_interrupt)(struct kvm_vcpu *vcpu, int vector);
+ void (*deliver_interrupt)(struct kvm_lapic *apic, int delivery_mode,
+ int trig_mode, int vector);
int (*sync_pir_to_irr)(struct kvm_vcpu *vcpu);
int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
int (*set_identity_map_addr)(struct kvm *kvm, u64 ident_addr);
@@ -1444,18 +1456,6 @@ struct kvm_x86_ops {
const struct kvm_pmu_ops *pmu_ops;
const struct kvm_x86_nested_ops *nested_ops;
- /*
- * Architecture specific hooks for vCPU blocking due to
- * HLT instruction.
- * Returns for .pre_block():
- * - 0 means continue to block the vCPU.
- * - 1 means we cannot block the vCPU since some event
- * happens during this period, such as, 'ON' bit in
- * posted-interrupts descriptor is set.
- */
- int (*pre_block)(struct kvm_vcpu *vcpu);
- void (*post_block)(struct kvm_vcpu *vcpu);
-
void (*vcpu_blocking)(struct kvm_vcpu *vcpu);
void (*vcpu_unblocking)(struct kvm_vcpu *vcpu);
@@ -1484,7 +1484,8 @@ struct kvm_x86_ops {
int (*get_msr_feature)(struct kvm_msr_entry *entry);
- bool (*can_emulate_instruction)(struct kvm_vcpu *vcpu, void *insn, int insn_len);
+ bool (*can_emulate_instruction)(struct kvm_vcpu *vcpu, int emul_type,
+ void *insn, int insn_len);
bool (*apic_init_signal_blocked)(struct kvm_vcpu *vcpu);
int (*enable_direct_tlbflush)(struct kvm_vcpu *vcpu);
@@ -1497,6 +1498,7 @@ struct kvm_x86_ops {
};
struct kvm_x86_nested_ops {
+ void (*leave_nested)(struct kvm_vcpu *vcpu);
int (*check_events)(struct kvm_vcpu *vcpu);
bool (*hv_timer_pending)(struct kvm_vcpu *vcpu);
void (*triple_fault)(struct kvm_vcpu *vcpu);
@@ -1519,6 +1521,7 @@ struct kvm_x86_init_ops {
int (*disabled_by_bios)(void);
int (*check_processor_compatibility)(void);
int (*hardware_setup)(void);
+ unsigned int (*handle_intel_pt_intr)(void);
struct kvm_x86_ops *runtime_ops;
};
@@ -1568,6 +1571,9 @@ static inline int kvm_arch_flush_remote_tlb(struct kvm *kvm)
return -ENOTSUPP;
}
+#define kvm_arch_pmi_in_guest(vcpu) \
+ ((vcpu) && (vcpu)->arch.handling_intr_from_guest)
+
int kvm_mmu_module_init(void);
void kvm_mmu_module_exit(void);
@@ -1587,10 +1593,9 @@ void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
const struct kvm_memory_slot *memslot);
void kvm_mmu_zap_all(struct kvm *kvm);
void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm, u64 gen);
-unsigned long kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm);
void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned long kvm_nr_mmu_pages);
-int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3);
+int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3);
int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
const void *val, int bytes);
@@ -1640,7 +1645,8 @@ extern u64 kvm_mce_cap_supported;
*
* EMULTYPE_SKIP - Set when emulating solely to skip an instruction, i.e. to
* decode the instruction length. For use *only* by
- * kvm_x86_ops.skip_emulated_instruction() implementations.
+ * kvm_x86_ops.skip_emulated_instruction() implementations if
+ * EMULTYPE_COMPLETE_USER_EXIT is not set.
*
* EMULTYPE_ALLOW_RETRY_PF - Set when the emulator should resume the guest to
* retry native execution under certain conditions,
@@ -1660,6 +1666,10 @@ extern u64 kvm_mce_cap_supported;
*
* EMULTYPE_PF - Set when emulating MMIO by way of an intercepted #PF, in which
* case the CR2/GPA value pass on the stack is valid.
+ *
+ * EMULTYPE_COMPLETE_USER_EXIT - Set when the emulator should update interruptibility
+ * state and inject single-step #DBs after skipping
+ * an instruction (after completing userspace I/O).
*/
#define EMULTYPE_NO_DECODE (1 << 0)
#define EMULTYPE_TRAP_UD (1 << 1)
@@ -1668,6 +1678,7 @@ extern u64 kvm_mce_cap_supported;
#define EMULTYPE_TRAP_UD_FORCED (1 << 4)
#define EMULTYPE_VMWARE_GP (1 << 5)
#define EMULTYPE_PF (1 << 6)
+#define EMULTYPE_COMPLETE_USER_EXIT (1 << 7)
int kvm_emulate_instruction(struct kvm_vcpu *vcpu, int emulation_type);
int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu,
@@ -1692,7 +1703,7 @@ int kvm_emulate_monitor(struct kvm_vcpu *vcpu);
int kvm_fast_pio(struct kvm_vcpu *vcpu, int size, unsigned short port, int in);
int kvm_emulate_cpuid(struct kvm_vcpu *vcpu);
int kvm_emulate_halt(struct kvm_vcpu *vcpu);
-int kvm_vcpu_halt(struct kvm_vcpu *vcpu);
+int kvm_emulate_halt_noskip(struct kvm_vcpu *vcpu);
int kvm_emulate_ap_reset_hold(struct kvm_vcpu *vcpu);
int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu);
@@ -1758,12 +1769,9 @@ void kvm_inject_nmi(struct kvm_vcpu *vcpu);
void kvm_update_dr7(struct kvm_vcpu *vcpu);
int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn);
-void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
ulong roots_to_free);
void kvm_mmu_free_guest_mode_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu);
-gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
- struct x86_exception *exception);
gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
struct x86_exception *exception);
gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva,
@@ -1856,7 +1864,6 @@ int kvm_cpu_has_extint(struct kvm_vcpu *v);
int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu);
int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event);
-void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu);
int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low,
unsigned long ipi_bitmap_high, u32 min,
@@ -1897,8 +1904,6 @@ int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu);
int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err);
void __kvm_request_immediate_exit(struct kvm_vcpu *vcpu);
-int kvm_is_in_guest(void);
-
void __user *__x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa,
u32 size);
bool kvm_vcpu_is_reset_bsp(struct kvm_vcpu *vcpu);
@@ -1927,8 +1932,6 @@ static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
static_call_cond(kvm_x86_vcpu_unblocking)(vcpu);
}
-static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
-
static inline int kvm_cpu_get_apicid(int mps_cpu)
{
#ifdef CONFIG_X86_LOCAL_APIC
diff --git a/arch/x86/include/asm/kvm_page_track.h b/arch/x86/include/asm/kvm_page_track.h
index 9d4a3b1b25b9..eb186bc57f6a 100644
--- a/arch/x86/include/asm/kvm_page_track.h
+++ b/arch/x86/include/asm/kvm_page_track.h
@@ -63,9 +63,9 @@ void kvm_slot_page_track_add_page(struct kvm *kvm,
void kvm_slot_page_track_remove_page(struct kvm *kvm,
struct kvm_memory_slot *slot, gfn_t gfn,
enum kvm_page_track_mode mode);
-bool kvm_slot_page_track_is_active(struct kvm_vcpu *vcpu,
- struct kvm_memory_slot *slot, gfn_t gfn,
- enum kvm_page_track_mode mode);
+bool kvm_slot_page_track_is_active(struct kvm *kvm,
+ const struct kvm_memory_slot *slot,
+ gfn_t gfn, enum kvm_page_track_mode mode);
void
kvm_page_track_register_notifier(struct kvm *kvm,
diff --git a/arch/x86/include/asm/linkage.h b/arch/x86/include/asm/linkage.h
index 365111789cc6..030907922bd0 100644
--- a/arch/x86/include/asm/linkage.h
+++ b/arch/x86/include/asm/linkage.h
@@ -18,6 +18,20 @@
#define __ALIGN_STR __stringify(__ALIGN)
#endif
+#ifdef CONFIG_SLS
+#define RET ret; int3
+#else
+#define RET ret
+#endif
+
+#else /* __ASSEMBLY__ */
+
+#ifdef CONFIG_SLS
+#define ASM_RET "ret; int3\n\t"
+#else
+#define ASM_RET "ret\n\t"
+#endif
+
#endif /* __ASSEMBLY__ */
#endif /* _ASM_X86_LINKAGE_H */
diff --git a/arch/x86/include/asm/mmx.h b/arch/x86/include/asm/mmx.h
index f572d0f944bb..e69de29bb2d1 100644
--- a/arch/x86/include/asm/mmx.h
+++ b/arch/x86/include/asm/mmx.h
@@ -1,15 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_X86_MMX_H
-#define _ASM_X86_MMX_H
-
-/*
- * MMX 3Dnow! helper operations
- */
-
-#include <linux/types.h>
-
-extern void *_mmx_memcpy(void *to, const void *from, size_t size);
-extern void mmx_clear_page(void *page);
-extern void mmx_copy_page(void *to, void *from);
-
-#endif /* _ASM_X86_MMX_H */
diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h
index da3972fe5a7a..a82f603d4312 100644
--- a/arch/x86/include/asm/mshyperv.h
+++ b/arch/x86/include/asm/mshyperv.h
@@ -30,7 +30,7 @@ extern void *hv_hypercall_pg;
extern u64 hv_current_partition_id;
-extern union hv_ghcb __percpu **hv_ghcb_pg;
+extern union hv_ghcb * __percpu *hv_ghcb_pg;
int hv_call_deposit_pages(int node, u64 partition_id, u32 num_pages);
int hv_call_add_logical_proc(int node, u32 lp_index, u32 acpi_id);
@@ -169,13 +169,6 @@ bool hv_vcpu_is_preempted(int vcpu);
static inline void hv_apic_init(void) {}
#endif
-static inline void hv_set_msi_entry_from_desc(union hv_msi_entry *msi_entry,
- struct msi_desc *msi_desc)
-{
- msi_entry->address.as_uint32 = msi_desc->msg.address_lo;
- msi_entry->data.as_uint32 = msi_desc->msg.data;
-}
-
struct irq_domain *hv_create_pci_msi_domain(void);
int hv_map_ioapic_interrupt(int ioapic_id, bool level, int vcpu, int vector,
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 3faf0f97edb1..a4a39c3e0f19 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -476,6 +476,7 @@
#define MSR_AMD64_ICIBSEXTDCTL 0xc001103c
#define MSR_AMD64_IBSOPDATA4 0xc001103d
#define MSR_AMD64_IBS_REG_COUNT_MAX 8 /* includes MSR_AMD64_IBSBRTARGET */
+#define MSR_AMD64_SVM_AVIC_DOORBELL 0xc001011b
#define MSR_AMD64_VM_PAGE_FLUSH 0xc001011e
#define MSR_AMD64_SEV_ES_GHCB 0xc0010130
#define MSR_AMD64_SEV 0xc0010131
diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h
index 6b52182e178a..d42e6c6b47b1 100644
--- a/arch/x86/include/asm/msr.h
+++ b/arch/x86/include/asm/msr.h
@@ -137,17 +137,11 @@ static inline unsigned long long native_read_msr_safe(unsigned int msr,
{
DECLARE_ARGS(val, low, high);
- asm volatile("2: rdmsr ; xor %[err],%[err]\n"
- "1:\n\t"
- ".section .fixup,\"ax\"\n\t"
- "3: mov %[fault],%[err]\n\t"
- "xorl %%eax, %%eax\n\t"
- "xorl %%edx, %%edx\n\t"
- "jmp 1b\n\t"
- ".previous\n\t"
- _ASM_EXTABLE(2b, 3b)
+ asm volatile("1: rdmsr ; xor %[err],%[err]\n"
+ "2:\n\t"
+ _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_RDMSR_SAFE, %[err])
: [err] "=r" (*err), EAX_EDX_RET(val, low, high)
- : "c" (msr), [fault] "i" (-EIO));
+ : "c" (msr));
if (tracepoint_enabled(read_msr))
do_trace_read_msr(msr, EAX_EDX_VAL(val, low, high), *err);
return EAX_EDX_VAL(val, low, high);
@@ -169,15 +163,11 @@ native_write_msr_safe(unsigned int msr, u32 low, u32 high)
{
int err;
- asm volatile("2: wrmsr ; xor %[err],%[err]\n"
- "1:\n\t"
- ".section .fixup,\"ax\"\n\t"
- "3: mov %[fault],%[err] ; jmp 1b\n\t"
- ".previous\n\t"
- _ASM_EXTABLE(2b, 3b)
+ asm volatile("1: wrmsr ; xor %[err],%[err]\n"
+ "2:\n\t"
+ _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_WRMSR_SAFE, %[err])
: [err] "=a" (err)
- : "c" (msr), "0" (low), "d" (high),
- [fault] "i" (-EIO)
+ : "c" (msr), "0" (low), "d" (high)
: "memory");
if (tracepoint_enabled(write_msr))
do_trace_write_msr(msr, ((u64)high << 32 | low), err);
diff --git a/arch/x86/include/asm/page_32.h b/arch/x86/include/asm/page_32.h
index b13f8488ac85..df42f8aa99e4 100644
--- a/arch/x86/include/asm/page_32.h
+++ b/arch/x86/include/asm/page_32.h
@@ -19,19 +19,6 @@ extern unsigned long __phys_addr(unsigned long);
#define pfn_valid(pfn) ((pfn) < max_mapnr)
#endif /* CONFIG_FLATMEM */
-#ifdef CONFIG_X86_USE_3DNOW
-#include <asm/mmx.h>
-
-static inline void clear_page(void *page)
-{
- mmx_clear_page(page);
-}
-
-static inline void copy_page(void *to, void *from)
-{
- mmx_copy_page(to, from);
-}
-#else /* !CONFIG_X86_USE_3DNOW */
#include <linux/string.h>
static inline void clear_page(void *page)
@@ -43,7 +30,6 @@ static inline void copy_page(void *to, void *from)
{
memcpy(to, from, PAGE_SIZE);
}
-#endif /* CONFIG_X86_USE_3DNOW */
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_X86_PAGE_32_H */
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index 27d276232c80..0d76502cc6f5 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -671,7 +671,7 @@ bool __raw_callee_save___native_vcpu_is_preempted(long cpu);
"call " #func ";" \
PV_RESTORE_ALL_CALLER_REGS \
FRAME_END \
- "ret;" \
+ ASM_RET \
".size " PV_THUNK_NAME(func) ", .-" PV_THUNK_NAME(func) ";" \
".popsection")
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index a34430b7af4a..8a9432fb3802 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -27,6 +27,7 @@
#include <asm/pkru.h>
#include <asm/fpu/api.h>
#include <asm-generic/pgtable_uffd.h>
+#include <linux/page_table_check.h>
extern pgd_t early_top_pgt[PTRS_PER_PGD];
bool __init __early_make_pgtable(unsigned long address, pmdval_t pmd);
@@ -753,7 +754,7 @@ static inline bool pte_accessible(struct mm_struct *mm, pte_t a)
return true;
if ((pte_flags(a) & _PAGE_PROTNONE) &&
- mm_tlb_flush_pending(mm))
+ atomic_read(&mm->tlb_flush_pending))
return true;
return false;
@@ -1007,18 +1008,21 @@ static inline pud_t native_local_pudp_get_and_clear(pud_t *pudp)
static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte)
{
+ page_table_check_pte_set(mm, addr, ptep, pte);
set_pte(ptep, pte);
}
static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, pmd_t pmd)
{
+ page_table_check_pmd_set(mm, addr, pmdp, pmd);
set_pmd(pmdp, pmd);
}
static inline void set_pud_at(struct mm_struct *mm, unsigned long addr,
pud_t *pudp, pud_t pud)
{
+ page_table_check_pud_set(mm, addr, pudp, pud);
native_set_pud(pudp, pud);
}
@@ -1049,6 +1053,7 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep)
{
pte_t pte = native_ptep_get_and_clear(ptep);
+ page_table_check_pte_clear(mm, addr, pte);
return pte;
}
@@ -1064,12 +1069,23 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
* care about updates and native needs no locking
*/
pte = native_local_ptep_get_and_clear(ptep);
+ page_table_check_pte_clear(mm, addr, pte);
} else {
pte = ptep_get_and_clear(mm, addr, ptep);
}
return pte;
}
+#define __HAVE_ARCH_PTEP_CLEAR
+static inline void ptep_clear(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep)
+{
+ if (IS_ENABLED(CONFIG_PAGE_TABLE_CHECK))
+ ptep_get_and_clear(mm, addr, ptep);
+ else
+ pte_clear(mm, addr, ptep);
+}
+
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
static inline void ptep_set_wrprotect(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
@@ -1110,14 +1126,22 @@ static inline int pmd_write(pmd_t pmd)
static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp)
{
- return native_pmdp_get_and_clear(pmdp);
+ pmd_t pmd = native_pmdp_get_and_clear(pmdp);
+
+ page_table_check_pmd_clear(mm, addr, pmd);
+
+ return pmd;
}
#define __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR
static inline pud_t pudp_huge_get_and_clear(struct mm_struct *mm,
unsigned long addr, pud_t *pudp)
{
- return native_pudp_get_and_clear(pudp);
+ pud_t pud = native_pudp_get_and_clear(pudp);
+
+ page_table_check_pud_clear(mm, addr, pud);
+
+ return pud;
}
#define __HAVE_ARCH_PMDP_SET_WRPROTECT
@@ -1138,6 +1162,7 @@ static inline int pud_write(pud_t pud)
static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp, pmd_t pmd)
{
+ page_table_check_pmd_set(vma->vm_mm, address, pmdp, pmd);
if (IS_ENABLED(CONFIG_SMP)) {
return xchg(pmdp, pmd);
} else {
diff --git a/arch/x86/include/asm/qspinlock_paravirt.h b/arch/x86/include/asm/qspinlock_paravirt.h
index 159622ee0674..1474cf96251d 100644
--- a/arch/x86/include/asm/qspinlock_paravirt.h
+++ b/arch/x86/include/asm/qspinlock_paravirt.h
@@ -48,7 +48,7 @@ asm (".pushsection .text;"
"jne .slowpath;"
"pop %rdx;"
FRAME_END
- "ret;"
+ ASM_RET
".slowpath: "
"push %rsi;"
"movzbl %al,%esi;"
@@ -56,7 +56,7 @@ asm (".pushsection .text;"
"pop %rsi;"
"pop %rdx;"
FRAME_END
- "ret;"
+ ASM_RET
".size " PV_UNLOCK ", .-" PV_UNLOCK ";"
".popsection");
diff --git a/arch/x86/include/asm/required-features.h b/arch/x86/include/asm/required-features.h
index b2d504f11937..aff774775c67 100644
--- a/arch/x86/include/asm/required-features.h
+++ b/arch/x86/include/asm/required-features.h
@@ -35,11 +35,7 @@
# define NEED_CMOV 0
#endif
-#ifdef CONFIG_X86_USE_3DNOW
-# define NEED_3DNOW (1<<(X86_FEATURE_3DNOW & 31))
-#else
# define NEED_3DNOW 0
-#endif
#if defined(CONFIG_X86_P6_NOP) || defined(CONFIG_X86_64)
# define NEED_NOPL (1<<(X86_FEATURE_NOPL & 31))
diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
index 8dd8e8ec9fa5..b228c9d44ee7 100644
--- a/arch/x86/include/asm/segment.h
+++ b/arch/x86/include/asm/segment.h
@@ -307,14 +307,7 @@ do { \
\
asm volatile(" \n" \
"1: movl %k0,%%" #seg " \n" \
- \
- ".section .fixup,\"ax\" \n" \
- "2: xorl %k0,%k0 \n" \
- " jmp 1b \n" \
- ".previous \n" \
- \
- _ASM_EXTABLE(1b, 2b) \
- \
+ _ASM_EXTABLE_TYPE_REG(1b, 1b, EX_TYPE_ZERO_REG, %k0)\
: "+r" (__val) : : "memory"); \
} while (0)
diff --git a/arch/x86/include/asm/sgx.h b/arch/x86/include/asm/sgx.h
index 05f3e21f01a7..3f9334ef67cd 100644
--- a/arch/x86/include/asm/sgx.h
+++ b/arch/x86/include/asm/sgx.h
@@ -46,6 +46,24 @@ enum sgx_encls_function {
};
/**
+ * SGX_ENCLS_FAULT_FLAG - flag signifying an ENCLS return code is a trapnr
+ *
+ * ENCLS has its own (positive value) error codes and also generates
+ * ENCLS specific #GP and #PF faults. And the ENCLS values get munged
+ * with system error codes as everything percolates back up the stack.
+ * Unfortunately (for us), we need to precisely identify each unique
+ * error code, e.g. the action taken if EWB fails varies based on the
+ * type of fault and on the exact SGX error code, i.e. we can't simply
+ * convert all faults to -EFAULT.
+ *
+ * To make all three error types coexist, we set bit 30 to identify an
+ * ENCLS fault. Bit 31 (technically bits N:31) is used to differentiate
+ * between positive (faults and SGX error codes) and negative (system
+ * error codes) values.
+ */
+#define SGX_ENCLS_FAULT_FLAG 0x40000000
+
+/**
* enum sgx_return_code - The return code type for ENCLS, ENCLU and ENCLV
* %SGX_NOT_TRACKED: Previous ETRACK's shootdown sequence has not
* been completed yet.
diff --git a/arch/x86/include/asm/static_call.h b/arch/x86/include/asm/static_call.h
index 39ebe0511869..ed4f8bb6c2d9 100644
--- a/arch/x86/include/asm/static_call.h
+++ b/arch/x86/include/asm/static_call.h
@@ -36,7 +36,7 @@
__ARCH_DEFINE_STATIC_CALL_TRAMP(name, ".byte 0xe9; .long " #func " - (. + 4)")
#define ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name) \
- __ARCH_DEFINE_STATIC_CALL_TRAMP(name, "ret; nop; nop; nop; nop")
+ __ARCH_DEFINE_STATIC_CALL_TRAMP(name, "ret; int3; nop; nop; nop")
#define ARCH_ADD_TRAMP_KEY(name) \
diff --git a/arch/x86/include/asm/string_32.h b/arch/x86/include/asm/string_32.h
index f74362b05619..32c0d981a82a 100644
--- a/arch/x86/include/asm/string_32.h
+++ b/arch/x86/include/asm/string_32.h
@@ -146,42 +146,9 @@ static __always_inline void *__constant_memcpy(void *to, const void *from,
extern void *memcpy(void *, const void *, size_t);
#ifndef CONFIG_FORTIFY_SOURCE
-#ifdef CONFIG_X86_USE_3DNOW
-
-#include <asm/mmx.h>
-
-/*
- * This CPU favours 3DNow strongly (eg AMD Athlon)
- */
-
-static inline void *__constant_memcpy3d(void *to, const void *from, size_t len)
-{
- if (len < 512)
- return __constant_memcpy(to, from, len);
- return _mmx_memcpy(to, from, len);
-}
-
-static inline void *__memcpy3d(void *to, const void *from, size_t len)
-{
- if (len < 512)
- return __memcpy(to, from, len);
- return _mmx_memcpy(to, from, len);
-}
-
-#define memcpy(t, f, n) \
- (__builtin_constant_p((n)) \
- ? __constant_memcpy3d((t), (f), (n)) \
- : __memcpy3d((t), (f), (n)))
-
-#else
-
-/*
- * No 3D Now!
- */
#define memcpy(t, f, n) __builtin_memcpy(t, f, n)
-#endif
#endif /* !CONFIG_FORTIFY_SOURCE */
#define __HAVE_ARCH_MEMMOVE
diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
index b00dbc5fac2b..bb2fb78523ce 100644
--- a/arch/x86/include/asm/svm.h
+++ b/arch/x86/include/asm/svm.h
@@ -220,6 +220,42 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
#define SVM_NESTED_CTL_SEV_ENABLE BIT(1)
#define SVM_NESTED_CTL_SEV_ES_ENABLE BIT(2)
+
+/* AVIC */
+#define AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK (0xFF)
+#define AVIC_LOGICAL_ID_ENTRY_VALID_BIT 31
+#define AVIC_LOGICAL_ID_ENTRY_VALID_MASK (1 << 31)
+
+#define AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK (0xFFULL)
+#define AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK (0xFFFFFFFFFFULL << 12)
+#define AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK (1ULL << 62)
+#define AVIC_PHYSICAL_ID_ENTRY_VALID_MASK (1ULL << 63)
+#define AVIC_PHYSICAL_ID_TABLE_SIZE_MASK (0xFF)
+
+#define AVIC_DOORBELL_PHYSICAL_ID_MASK (0xFF)
+
+#define AVIC_UNACCEL_ACCESS_WRITE_MASK 1
+#define AVIC_UNACCEL_ACCESS_OFFSET_MASK 0xFF0
+#define AVIC_UNACCEL_ACCESS_VECTOR_MASK 0xFFFFFFFF
+
+enum avic_ipi_failure_cause {
+ AVIC_IPI_FAILURE_INVALID_INT_TYPE,
+ AVIC_IPI_FAILURE_TARGET_NOT_RUNNING,
+ AVIC_IPI_FAILURE_INVALID_TARGET,
+ AVIC_IPI_FAILURE_INVALID_BACKING_PAGE,
+};
+
+
+/*
+ * 0xff is broadcast, so the max index allowed for physical APIC ID
+ * table is 0xfe. APIC IDs above 0xff are reserved.
+ */
+#define AVIC_MAX_PHYSICAL_ID_COUNT 0xff
+
+#define AVIC_HPA_MASK ~((0xFFFULL << 52) | 0xFFF)
+#define VMCB_AVIC_APIC_BAR_MASK 0xFFFFFFFFFF000ULL
+
+
struct vmcb_seg {
u16 selector;
u16 attrib;
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 8ab9e79abb2b..ac96f9b2d64b 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -352,24 +352,22 @@ do { \
"1: movl %[lowbits],%%eax\n" \
"2: movl %[highbits],%%edx\n" \
"3:\n" \
- ".section .fixup,\"ax\"\n" \
- "4: mov %[efault],%[errout]\n" \
- " xorl %%eax,%%eax\n" \
- " xorl %%edx,%%edx\n" \
- " jmp 3b\n" \
- ".previous\n" \
- _ASM_EXTABLE_UA(1b, 4b) \
- _ASM_EXTABLE_UA(2b, 4b) \
+ _ASM_EXTABLE_TYPE_REG(1b, 3b, EX_TYPE_EFAULT_REG | \
+ EX_FLAG_CLEAR_AX_DX, \
+ %[errout]) \
+ _ASM_EXTABLE_TYPE_REG(2b, 3b, EX_TYPE_EFAULT_REG | \
+ EX_FLAG_CLEAR_AX_DX, \
+ %[errout]) \
: [errout] "=r" (retval), \
[output] "=&A"(x) \
: [lowbits] "m" (__m(__ptr)), \
[highbits] "m" __m(((u32 __user *)(__ptr)) + 1), \
- [efault] "i" (-EFAULT), "0" (retval)); \
+ "0" (retval)); \
})
#else
#define __get_user_asm_u64(x, ptr, retval) \
- __get_user_asm(x, ptr, retval, "q", "=r")
+ __get_user_asm(x, ptr, retval, "q")
#endif
#define __get_user_size(x, ptr, size, retval) \
@@ -380,14 +378,14 @@ do { \
__chk_user_ptr(ptr); \
switch (size) { \
case 1: \
- __get_user_asm(x_u8__, ptr, retval, "b", "=q"); \
+ __get_user_asm(x_u8__, ptr, retval, "b"); \
(x) = x_u8__; \
break; \
case 2: \
- __get_user_asm(x, ptr, retval, "w", "=r"); \
+ __get_user_asm(x, ptr, retval, "w"); \
break; \
case 4: \
- __get_user_asm(x, ptr, retval, "l", "=r"); \
+ __get_user_asm(x, ptr, retval, "l"); \
break; \
case 8: \
__get_user_asm_u64(x, ptr, retval); \
@@ -397,20 +395,17 @@ do { \
} \
} while (0)
-#define __get_user_asm(x, addr, err, itype, ltype) \
+#define __get_user_asm(x, addr, err, itype) \
asm volatile("\n" \
"1: mov"itype" %[umem],%[output]\n" \
"2:\n" \
- ".section .fixup,\"ax\"\n" \
- "3: mov %[efault],%[errout]\n" \
- " xorl %k[output],%k[output]\n" \
- " jmp 2b\n" \
- ".previous\n" \
- _ASM_EXTABLE_UA(1b, 3b) \
+ _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG | \
+ EX_FLAG_CLEAR_AX, \
+ %[errout]) \
: [errout] "=r" (err), \
- [output] ltype(x) \
+ [output] "=a" (x) \
: [umem] "m" (__m(addr)), \
- [efault] "i" (-EFAULT), "0" (err))
+ "0" (err))
#endif // CONFIG_CC_HAS_ASM_GOTO_OUTPUT
diff --git a/arch/x86/include/asm/word-at-a-time.h b/arch/x86/include/asm/word-at-a-time.h
index 06006b0351f3..8338b0432b50 100644
--- a/arch/x86/include/asm/word-at-a-time.h
+++ b/arch/x86/include/asm/word-at-a-time.h
@@ -77,30 +77,58 @@ static inline unsigned long find_zero(unsigned long mask)
* and the next page not being mapped, take the exception and
* return zeroes in the non-existing part.
*/
+#ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
+
static inline unsigned long load_unaligned_zeropad(const void *addr)
{
- unsigned long ret, dummy;
+ unsigned long offset, data;
+ unsigned long ret;
+
+ asm_volatile_goto(
+ "1: mov %[mem], %[ret]\n"
+
+ _ASM_EXTABLE(1b, %l[do_exception])
+
+ : [ret] "=r" (ret)
+ : [mem] "m" (*(unsigned long *)addr)
+ : : do_exception);
+
+ return ret;
+
+do_exception:
+ offset = (unsigned long)addr & (sizeof(long) - 1);
+ addr = (void *)((unsigned long)addr & ~(sizeof(long) - 1));
+ data = *(unsigned long *)addr;
+ ret = data >> offset * 8;
+
+ return ret;
+}
- asm(
- "1:\tmov %2,%0\n"
+#else /* !CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
+
+static inline unsigned long load_unaligned_zeropad(const void *addr)
+{
+ unsigned long offset, data;
+ unsigned long ret, err = 0;
+
+ asm( "1: mov %[mem], %[ret]\n"
"2:\n"
- ".section .fixup,\"ax\"\n"
- "3:\t"
- "lea %2,%1\n\t"
- "and %3,%1\n\t"
- "mov (%1),%0\n\t"
- "leal %2,%%ecx\n\t"
- "andl %4,%%ecx\n\t"
- "shll $3,%%ecx\n\t"
- "shr %%cl,%0\n\t"
- "jmp 2b\n"
- ".previous\n"
- _ASM_EXTABLE(1b, 3b)
- :"=&r" (ret),"=&c" (dummy)
- :"m" (*(unsigned long *)addr),
- "i" (-sizeof(unsigned long)),
- "i" (sizeof(unsigned long)-1));
+
+ _ASM_EXTABLE_FAULT(1b, 2b)
+
+ : [ret] "=&r" (ret), "+a" (err)
+ : [mem] "m" (*(unsigned long *)addr));
+
+ if (unlikely(err)) {
+ offset = (unsigned long)addr & (sizeof(long) - 1);
+ addr = (void *)((unsigned long)addr & ~(sizeof(long) - 1));
+ data = *(unsigned long *)addr;
+ ret = data >> offset * 8;
+ }
+
return ret;
}
+#endif /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
+
#endif /* _ASM_WORD_AT_A_TIME_H */
diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
index 5c69f7eb5d47..22b7412c08f6 100644
--- a/arch/x86/include/asm/x86_init.h
+++ b/arch/x86/include/asm/x86_init.h
@@ -289,12 +289,6 @@ struct x86_platform_ops {
struct x86_hyper_runtime hyper;
};
-struct pci_dev;
-
-struct x86_msi_ops {
- void (*restore_msi_irqs)(struct pci_dev *dev);
-};
-
struct x86_apic_ops {
unsigned int (*io_apic_read) (unsigned int apic, unsigned int reg);
void (*restore)(void);
diff --git a/arch/x86/include/asm/xen/cpuid.h b/arch/x86/include/asm/xen/cpuid.h
index a9630104f1c4..78e667a31d6c 100644
--- a/arch/x86/include/asm/xen/cpuid.h
+++ b/arch/x86/include/asm/xen/cpuid.h
@@ -100,6 +100,13 @@
/* Memory mapped from other domains has valid IOMMU entries */
#define XEN_HVM_CPUID_IOMMU_MAPPINGS (1u << 2)
#define XEN_HVM_CPUID_VCPU_ID_PRESENT (1u << 3) /* vcpu id is present in EBX */
+#define XEN_HVM_CPUID_DOMID_PRESENT (1u << 4) /* domid is present in ECX */
+/*
+ * Bits 55:49 from the IO-APIC RTE and bits 11:5 from the MSI address can be
+ * used to store high bits for the Destination ID. This expands the Destination
+ * ID field from 8 to 15 bits, allowing to target APIC IDs up 32768.
+ */
+#define XEN_HVM_CPUID_EXT_DEST_ID (1u << 5)
/*
* Leaf 6 (0x40000x05)
diff --git a/arch/x86/include/asm/xen/hypervisor.h b/arch/x86/include/asm/xen/hypervisor.h
index 5adab895127e..16f548a661cf 100644
--- a/arch/x86/include/asm/xen/hypervisor.h
+++ b/arch/x86/include/asm/xen/hypervisor.h
@@ -43,18 +43,12 @@ static inline uint32_t xen_cpuid_base(void)
return hypervisor_cpuid_base("XenVMMXenVMM", 2);
}
-#ifdef CONFIG_XEN
-extern bool __init xen_hvm_need_lapic(void);
+struct pci_dev;
-static inline bool __init xen_x2apic_para_available(void)
-{
- return xen_hvm_need_lapic();
-}
+#ifdef CONFIG_XEN_PV_DOM0
+bool xen_initdom_restore_msi(struct pci_dev *dev);
#else
-static inline bool __init xen_x2apic_para_available(void)
-{
- return (xen_cpuid_base() != 0);
-}
+static inline bool xen_initdom_restore_msi(struct pci_dev *dev) { return true; }
#endif
#ifdef CONFIG_HOTPLUG_CPU
diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
index 1a162e559753..e989bc2269f5 100644
--- a/arch/x86/include/asm/xen/page.h
+++ b/arch/x86/include/asm/xen/page.h
@@ -96,11 +96,7 @@ static inline int xen_safe_write_ulong(unsigned long *addr, unsigned long val)
asm volatile("1: mov %[val], %[ptr]\n"
"2:\n"
- ".section .fixup, \"ax\"\n"
- "3: sub $1, %[ret]\n"
- " jmp 2b\n"
- ".previous\n"
- _ASM_EXTABLE(1b, 3b)
+ _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG, %[ret])
: [ret] "+r" (ret), [ptr] "=m" (*addr)
: [val] "r" (val));
@@ -110,16 +106,12 @@ static inline int xen_safe_write_ulong(unsigned long *addr, unsigned long val)
static inline int xen_safe_read_ulong(const unsigned long *addr,
unsigned long *val)
{
- int ret = 0;
unsigned long rval = ~0ul;
+ int ret = 0;
asm volatile("1: mov %[ptr], %[rval]\n"
"2:\n"
- ".section .fixup, \"ax\"\n"
- "3: sub $1, %[ret]\n"
- " jmp 2b\n"
- ".previous\n"
- _ASM_EXTABLE(1b, 3b)
+ _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG, %[ret])
: [ret] "+r" (ret), [rval] "+r" (rval)
: [ptr] "m" (*addr));
*val = rval;
diff --git a/arch/x86/include/uapi/asm/kvm.h b/arch/x86/include/uapi/asm/kvm.h
index 5a776a08f78c..bf6e96011dfe 100644
--- a/arch/x86/include/uapi/asm/kvm.h
+++ b/arch/x86/include/uapi/asm/kvm.h
@@ -373,9 +373,23 @@ struct kvm_debugregs {
__u64 reserved[9];
};
-/* for KVM_CAP_XSAVE */
+/* for KVM_CAP_XSAVE and KVM_CAP_XSAVE2 */
struct kvm_xsave {
+ /*
+ * KVM_GET_XSAVE2 and KVM_SET_XSAVE write and read as many bytes
+ * as are returned by KVM_CHECK_EXTENSION(KVM_CAP_XSAVE2)
+ * respectively, when invoked on the vm file descriptor.
+ *
+ * The size value returned by KVM_CHECK_EXTENSION(KVM_CAP_XSAVE2)
+ * will always be at least 4096. Currently, it is only greater
+ * than 4096 if a dynamic feature has been enabled with
+ * ``arch_prctl()``, but this may change in the future.
+ *
+ * The offsets of the state save areas in struct kvm_xsave follow
+ * the contents of CPUID leaf 0xD on the host.
+ */
__u32 region[1024];
+ __u32 extra[0];
};
#define KVM_MAX_XCRS 16
@@ -438,6 +452,9 @@ struct kvm_sync_regs {
#define KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE 0x00000001
+/* attributes for system fd (group 0) */
+#define KVM_X86_XCOMP_GUEST_SUPP 0
+
struct kvm_vmx_nested_state_data {
__u8 vmcs12[KVM_STATE_NESTED_VMX_VMCS_SIZE];
__u8 shadow_vmcs12[KVM_STATE_NESTED_VMX_VMCS_SIZE];
diff --git a/arch/x86/include/uapi/asm/prctl.h b/arch/x86/include/uapi/asm/prctl.h
index 754a07856817..500b96e71f18 100644
--- a/arch/x86/include/uapi/asm/prctl.h
+++ b/arch/x86/include/uapi/asm/prctl.h
@@ -2,20 +2,22 @@
#ifndef _ASM_X86_PRCTL_H
#define _ASM_X86_PRCTL_H
-#define ARCH_SET_GS 0x1001
-#define ARCH_SET_FS 0x1002
-#define ARCH_GET_FS 0x1003
-#define ARCH_GET_GS 0x1004
+#define ARCH_SET_GS 0x1001
+#define ARCH_SET_FS 0x1002
+#define ARCH_GET_FS 0x1003
+#define ARCH_GET_GS 0x1004
-#define ARCH_GET_CPUID 0x1011
-#define ARCH_SET_CPUID 0x1012
+#define ARCH_GET_CPUID 0x1011
+#define ARCH_SET_CPUID 0x1012
-#define ARCH_GET_XCOMP_SUPP 0x1021
-#define ARCH_GET_XCOMP_PERM 0x1022
-#define ARCH_REQ_XCOMP_PERM 0x1023
+#define ARCH_GET_XCOMP_SUPP 0x1021
+#define ARCH_GET_XCOMP_PERM 0x1022
+#define ARCH_REQ_XCOMP_PERM 0x1023
+#define ARCH_GET_XCOMP_GUEST_PERM 0x1024
+#define ARCH_REQ_XCOMP_GUEST_PERM 0x1025
-#define ARCH_MAP_VDSO_X32 0x2001
-#define ARCH_MAP_VDSO_32 0x2002
-#define ARCH_MAP_VDSO_64 0x2003
+#define ARCH_MAP_VDSO_X32 0x2001
+#define ARCH_MAP_VDSO_32 0x2002
+#define ARCH_MAP_VDSO_64 0x2003
#endif /* _ASM_X86_PRCTL_H */
diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
index daf88f8143c5..cf69081073b5 100644
--- a/arch/x86/kernel/acpi/wakeup_32.S
+++ b/arch/x86/kernel/acpi/wakeup_32.S
@@ -60,7 +60,7 @@ save_registers:
popl saved_context_eflags
movl $ret_point, saved_eip
- ret
+ RET
restore_registers:
@@ -70,7 +70,7 @@ restore_registers:
movl saved_context_edi, %edi
pushl saved_context_eflags
popfl
- ret
+ RET
SYM_CODE_START(do_suspend_lowlevel)
call save_processor_state
@@ -86,7 +86,7 @@ SYM_CODE_START(do_suspend_lowlevel)
ret_point:
call restore_registers
call restore_processor_state
- ret
+ RET
SYM_CODE_END(do_suspend_lowlevel)
.data
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index 23fb4d51a5da..5007c3ffe96f 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -714,7 +714,7 @@ asm (
" .type int3_magic, @function\n"
"int3_magic:\n"
" movl $1, (%" _ASM_ARG1 ")\n"
-" ret\n"
+ ASM_RET
" .size int3_magic, .-int3_magic\n"
" .popsection\n"
);
@@ -1113,10 +1113,13 @@ void text_poke_sync(void)
}
struct text_poke_loc {
- s32 rel_addr; /* addr := _stext + rel_addr */
- s32 rel32;
+ /* addr := _stext + rel_addr */
+ s32 rel_addr;
+ s32 disp;
+ u8 len;
u8 opcode;
const u8 text[POKE_MAX_OPCODE_SIZE];
+ /* see text_poke_bp_batch() */
u8 old;
};
@@ -1131,7 +1134,8 @@ static struct bp_patching_desc *bp_desc;
static __always_inline
struct bp_patching_desc *try_get_desc(struct bp_patching_desc **descp)
{
- struct bp_patching_desc *desc = __READ_ONCE(*descp); /* rcu_dereference */
+ /* rcu_dereference */
+ struct bp_patching_desc *desc = __READ_ONCE(*descp);
if (!desc || !arch_atomic_inc_not_zero(&desc->refs))
return NULL;
@@ -1165,7 +1169,7 @@ noinstr int poke_int3_handler(struct pt_regs *regs)
{
struct bp_patching_desc *desc;
struct text_poke_loc *tp;
- int len, ret = 0;
+ int ret = 0;
void *ip;
if (user_mode(regs))
@@ -1205,8 +1209,7 @@ noinstr int poke_int3_handler(struct pt_regs *regs)
goto out_put;
}
- len = text_opcode_size(tp->opcode);
- ip += len;
+ ip += tp->len;
switch (tp->opcode) {
case INT3_INSN_OPCODE:
@@ -1221,12 +1224,12 @@ noinstr int poke_int3_handler(struct pt_regs *regs)
break;
case CALL_INSN_OPCODE:
- int3_emulate_call(regs, (long)ip + tp->rel32);
+ int3_emulate_call(regs, (long)ip + tp->disp);
break;
case JMP32_INSN_OPCODE:
case JMP8_INSN_OPCODE:
- int3_emulate_jmp(regs, (long)ip + tp->rel32);
+ int3_emulate_jmp(regs, (long)ip + tp->disp);
break;
default:
@@ -1301,7 +1304,7 @@ static void text_poke_bp_batch(struct text_poke_loc *tp, unsigned int nr_entries
*/
for (do_sync = 0, i = 0; i < nr_entries; i++) {
u8 old[POKE_MAX_OPCODE_SIZE] = { tp[i].old, };
- int len = text_opcode_size(tp[i].opcode);
+ int len = tp[i].len;
if (len - INT3_INSN_SIZE > 0) {
memcpy(old + INT3_INSN_SIZE,
@@ -1378,21 +1381,37 @@ static void text_poke_loc_init(struct text_poke_loc *tp, void *addr,
const void *opcode, size_t len, const void *emulate)
{
struct insn insn;
- int ret;
+ int ret, i;
memcpy((void *)tp->text, opcode, len);
if (!emulate)
emulate = opcode;
ret = insn_decode_kernel(&insn, emulate);
-
BUG_ON(ret < 0);
- BUG_ON(len != insn.length);
tp->rel_addr = addr - (void *)_stext;
+ tp->len = len;
tp->opcode = insn.opcode.bytes[0];
switch (tp->opcode) {
+ case RET_INSN_OPCODE:
+ case JMP32_INSN_OPCODE:
+ case JMP8_INSN_OPCODE:
+ /*
+ * Control flow instructions without implied execution of the
+ * next instruction can be padded with INT3.
+ */
+ for (i = insn.length; i < len; i++)
+ BUG_ON(tp->text[i] != INT3_INSN_OPCODE);
+ break;
+
+ default:
+ BUG_ON(len != insn.length);
+ };
+
+
+ switch (tp->opcode) {
case INT3_INSN_OPCODE:
case RET_INSN_OPCODE:
break;
@@ -1400,7 +1419,7 @@ static void text_poke_loc_init(struct text_poke_loc *tp, void *addr,
case CALL_INSN_OPCODE:
case JMP32_INSN_OPCODE:
case JMP8_INSN_OPCODE:
- tp->rel32 = insn.immediate.value;
+ tp->disp = insn.immediate.value;
break;
default: /* assume NOP */
@@ -1408,13 +1427,13 @@ static void text_poke_loc_init(struct text_poke_loc *tp, void *addr,
case 2: /* NOP2 -- emulate as JMP8+0 */
BUG_ON(memcmp(emulate, x86_nops[len], len));
tp->opcode = JMP8_INSN_OPCODE;
- tp->rel32 = 0;
+ tp->disp = 0;
break;
case 5: /* NOP5 -- emulate as JMP32+0 */
BUG_ON(memcmp(emulate, x86_nops[len], len));
tp->opcode = JMP32_INSN_OPCODE;
- tp->rel32 = 0;
+ tp->disp = 0;
break;
default: /* unknown instruction */
diff --git a/arch/x86/kernel/apic/msi.c b/arch/x86/kernel/apic/msi.c
index dbacb9ec8843..7517eb05bdc1 100644
--- a/arch/x86/kernel/apic/msi.c
+++ b/arch/x86/kernel/apic/msi.c
@@ -19,6 +19,7 @@
#include <asm/hw_irq.h>
#include <asm/apic.h>
#include <asm/irq_remapping.h>
+#include <asm/xen/hypervisor.h>
struct irq_domain *x86_pci_msi_default_domain __ro_after_init;
@@ -159,11 +160,8 @@ static struct irq_chip pci_msi_controller = {
int pci_msi_prepare(struct irq_domain *domain, struct device *dev, int nvec,
msi_alloc_info_t *arg)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct msi_desc *desc = first_pci_msi_entry(pdev);
-
init_irq_alloc_info(arg, NULL);
- if (desc->msi_attrib.is_msix) {
+ if (to_pci_dev(dev)->msix_enabled) {
arg->type = X86_IRQ_ALLOC_TYPE_PCI_MSIX;
} else {
arg->type = X86_IRQ_ALLOC_TYPE_PCI_MSI;
@@ -345,3 +343,8 @@ void dmar_free_hwirq(int irq)
irq_domain_free_irqs(irq, 1);
}
#endif
+
+bool arch_restore_msi_irqs(struct pci_dev *dev)
+{
+ return xen_initdom_restore_msi(dev);
+}
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
index c132daabe615..3e6f6b448f6a 100644
--- a/arch/x86/kernel/apic/vector.c
+++ b/arch/x86/kernel/apic/vector.c
@@ -760,9 +760,9 @@ void __init lapic_update_legacy_vectors(void)
void __init lapic_assign_system_vectors(void)
{
- unsigned int i, vector = 0;
+ unsigned int i, vector;
- for_each_set_bit_from(vector, system_vectors, NR_VECTORS)
+ for_each_set_bit(vector, system_vectors, NR_VECTORS)
irq_matrix_assign_system(vector_matrix, vector, false);
if (nr_legacy_irqs() > 1)
diff --git a/arch/x86/kernel/cc_platform.c b/arch/x86/kernel/cc_platform.c
index 8a25b1c0d480..6a6ffcd978f6 100644
--- a/arch/x86/kernel/cc_platform.c
+++ b/arch/x86/kernel/cc_platform.c
@@ -11,6 +11,7 @@
#include <linux/cc_platform.h>
#include <linux/mem_encrypt.h>
+#include <asm/mshyperv.h>
#include <asm/processor.h>
static bool __maybe_unused intel_cc_platform_has(enum cc_attr attr)
@@ -66,12 +67,19 @@ static bool amd_cc_platform_has(enum cc_attr attr)
#endif
}
+static bool hyperv_cc_platform_has(enum cc_attr attr)
+{
+ return attr == CC_ATTR_GUEST_MEM_ENCRYPT;
+}
bool cc_platform_has(enum cc_attr attr)
{
if (sme_me_mask)
return amd_cc_platform_has(attr);
+ if (hv_is_isolation_supported())
+ return hyperv_cc_platform_has(attr);
+
return false;
}
EXPORT_SYMBOL_GPL(cc_platform_has);
diff --git a/arch/x86/kernel/cpu/mce/amd.c b/arch/x86/kernel/cpu/mce/amd.c
index a1e2f41796dc..9f4b508886dd 100644
--- a/arch/x86/kernel/cpu/mce/amd.c
+++ b/arch/x86/kernel/cpu/mce/amd.c
@@ -423,7 +423,7 @@ static void threshold_restart_bank(void *_tr)
u32 hi, lo;
/* sysfs write might race against an offline operation */
- if (this_cpu_read(threshold_banks))
+ if (!this_cpu_read(threshold_banks) && !tr->set_lvt_off)
return;
rdmsr(tr->b->address, lo, hi);
diff --git a/arch/x86/kernel/cpu/mce/intel.c b/arch/x86/kernel/cpu/mce/intel.c
index bb9a46a804bf..baafbb37be67 100644
--- a/arch/x86/kernel/cpu/mce/intel.c
+++ b/arch/x86/kernel/cpu/mce/intel.c
@@ -486,6 +486,7 @@ static void intel_ppin_init(struct cpuinfo_x86 *c)
case INTEL_FAM6_BROADWELL_X:
case INTEL_FAM6_SKYLAKE_X:
case INTEL_FAM6_ICELAKE_X:
+ case INTEL_FAM6_ICELAKE_D:
case INTEL_FAM6_SAPPHIRERAPIDS_X:
case INTEL_FAM6_XEON_PHI_KNL:
case INTEL_FAM6_XEON_PHI_KNM:
diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
index 2a0f83678911..5a99f993e639 100644
--- a/arch/x86/kernel/cpu/mshyperv.c
+++ b/arch/x86/kernel/cpu/mshyperv.c
@@ -18,6 +18,7 @@
#include <linux/kexec.h>
#include <linux/i8253.h>
#include <linux/random.h>
+#include <linux/swiotlb.h>
#include <asm/processor.h>
#include <asm/hypervisor.h>
#include <asm/hyperv-tlfs.h>
@@ -329,8 +330,20 @@ static void __init ms_hyperv_init_platform(void)
pr_info("Hyper-V: Isolation Config: Group A 0x%x, Group B 0x%x\n",
ms_hyperv.isolation_config_a, ms_hyperv.isolation_config_b);
- if (hv_get_isolation_type() == HV_ISOLATION_TYPE_SNP)
+ if (hv_get_isolation_type() == HV_ISOLATION_TYPE_SNP) {
static_branch_enable(&isolation_type_snp);
+#ifdef CONFIG_SWIOTLB
+ swiotlb_unencrypted_base = ms_hyperv.shared_gpa_boundary;
+#endif
+ }
+
+#ifdef CONFIG_SWIOTLB
+ /*
+ * Enable swiotlb force mode in Isolation VM to
+ * use swiotlb bounce buffer for dma transaction.
+ */
+ swiotlb_force = SWIOTLB_FORCE;
+#endif
}
if (hv_max_functions_eax >= HYPERV_CPUID_NESTED_FEATURES) {
diff --git a/arch/x86/kernel/cpu/sgx/encl.c b/arch/x86/kernel/cpu/sgx/encl.c
index 001808e3901c..48afe96ae0f0 100644
--- a/arch/x86/kernel/cpu/sgx/encl.c
+++ b/arch/x86/kernel/cpu/sgx/encl.c
@@ -410,6 +410,8 @@ void sgx_encl_release(struct kref *ref)
}
kfree(entry);
+ /* Invoke scheduler to prevent soft lockups. */
+ cond_resched();
}
xa_destroy(&encl->page_array);
diff --git a/arch/x86/kernel/cpu/sgx/encls.h b/arch/x86/kernel/cpu/sgx/encls.h
index 9b204843b78d..fa04a73daf9c 100644
--- a/arch/x86/kernel/cpu/sgx/encls.h
+++ b/arch/x86/kernel/cpu/sgx/encls.h
@@ -11,26 +11,8 @@
#include <asm/traps.h>
#include "sgx.h"
-/**
- * ENCLS_FAULT_FLAG - flag signifying an ENCLS return code is a trapnr
- *
- * ENCLS has its own (positive value) error codes and also generates
- * ENCLS specific #GP and #PF faults. And the ENCLS values get munged
- * with system error codes as everything percolates back up the stack.
- * Unfortunately (for us), we need to precisely identify each unique
- * error code, e.g. the action taken if EWB fails varies based on the
- * type of fault and on the exact SGX error code, i.e. we can't simply
- * convert all faults to -EFAULT.
- *
- * To make all three error types coexist, we set bit 30 to identify an
- * ENCLS fault. Bit 31 (technically bits N:31) is used to differentiate
- * between positive (faults and SGX error codes) and negative (system
- * error codes) values.
- */
-#define ENCLS_FAULT_FLAG 0x40000000
-
/* Retrieve the encoded trapnr from the specified return code. */
-#define ENCLS_TRAPNR(r) ((r) & ~ENCLS_FAULT_FLAG)
+#define ENCLS_TRAPNR(r) ((r) & ~SGX_ENCLS_FAULT_FLAG)
/* Issue a WARN() about an ENCLS function. */
#define ENCLS_WARN(r, name) { \
@@ -50,7 +32,7 @@
*/
static inline bool encls_faulted(int ret)
{
- return ret & ENCLS_FAULT_FLAG;
+ return ret & SGX_ENCLS_FAULT_FLAG;
}
/**
@@ -88,11 +70,7 @@ static inline bool encls_failed(int ret)
asm volatile( \
"1: .byte 0x0f, 0x01, 0xcf;\n\t" \
"2:\n" \
- ".section .fixup,\"ax\"\n" \
- "3: orl $"__stringify(ENCLS_FAULT_FLAG)",%%eax\n" \
- " jmp 2b\n" \
- ".previous\n" \
- _ASM_EXTABLE_FAULT(1b, 3b) \
+ _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_FAULT_SGX) \
: "=a"(ret) \
: "a"(rax), inputs \
: "memory", "cc"); \
@@ -127,7 +105,7 @@ static inline bool encls_failed(int ret)
*
* Return:
* 0 on success,
- * trapnr with ENCLS_FAULT_FLAG set on fault
+ * trapnr with SGX_ENCLS_FAULT_FLAG set on fault
*/
#define __encls_N(rax, rbx_out, inputs...) \
({ \
@@ -136,11 +114,7 @@ static inline bool encls_failed(int ret)
"1: .byte 0x0f, 0x01, 0xcf;\n\t" \
" xor %%eax,%%eax;\n" \
"2:\n" \
- ".section .fixup,\"ax\"\n" \
- "3: orl $"__stringify(ENCLS_FAULT_FLAG)",%%eax\n" \
- " jmp 2b\n" \
- ".previous\n" \
- _ASM_EXTABLE_FAULT(1b, 3b) \
+ _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_FAULT_SGX) \
: "=a"(ret), "=b"(rbx_out) \
: "a"(rax), inputs \
: "memory"); \
diff --git a/arch/x86/kernel/cpu/sgx/main.c b/arch/x86/kernel/cpu/sgx/main.c
index 4b41efc9e367..8e4bc6453d26 100644
--- a/arch/x86/kernel/cpu/sgx/main.c
+++ b/arch/x86/kernel/cpu/sgx/main.c
@@ -344,10 +344,8 @@ static void sgx_reclaim_pages(void)
{
struct sgx_epc_page *chunk[SGX_NR_TO_SCAN];
struct sgx_backing backing[SGX_NR_TO_SCAN];
- struct sgx_epc_section *section;
struct sgx_encl_page *encl_page;
struct sgx_epc_page *epc_page;
- struct sgx_numa_node *node;
pgoff_t page_index;
int cnt = 0;
int ret;
@@ -418,13 +416,7 @@ skip:
kref_put(&encl_page->encl->refcount, sgx_encl_release);
epc_page->flags &= ~SGX_EPC_PAGE_RECLAIMER_TRACKED;
- section = &sgx_epc_sections[epc_page->section];
- node = section->node;
-
- spin_lock(&node->lock);
- list_add_tail(&epc_page->list, &node->free_page_list);
- spin_unlock(&node->lock);
- atomic_long_inc(&sgx_nr_free_pages);
+ sgx_free_epc_page(epc_page);
}
}
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index ea4fe192189d..53de044e5654 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -351,7 +351,7 @@ unsigned long oops_begin(void)
}
NOKPROBE_SYMBOL(oops_begin);
-void __noreturn rewind_stack_do_exit(int signr);
+void __noreturn rewind_stack_and_make_dead(int signr);
void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
{
@@ -386,7 +386,7 @@ void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
* reuse the task stack and that existing poisons are invalid.
*/
kasan_unpoison_task_stack(current);
- rewind_stack_do_exit(signr);
+ rewind_stack_and_make_dead(signr);
}
NOKPROBE_SYMBOL(oops_end);
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
index fd2d3ab38ebb..dc7da08bc700 100644
--- a/arch/x86/kernel/early-quirks.c
+++ b/arch/x86/kernel/early-quirks.c
@@ -515,6 +515,7 @@ static const struct intel_early_ops gen11_early_ops __initconst = {
.stolen_size = gen9_stolen_size,
};
+/* Intel integrated GPUs for which we need to reserve "stolen memory" */
static const struct pci_device_id intel_early_ids[] __initconst = {
INTEL_I830_IDS(&i830_early_ops),
INTEL_I845G_IDS(&i845_early_ops),
@@ -592,6 +593,13 @@ static void __init intel_graphics_quirks(int num, int slot, int func)
u16 device;
int i;
+ /*
+ * Reserve "stolen memory" for an integrated GPU. If we've already
+ * found one, there's nothing to do for other (discrete) GPUs.
+ */
+ if (resource_size(&intel_graphics_stolen_res))
+ return;
+
device = read_pci_config_16(num, slot, func, PCI_DEVICE_ID);
for (i = 0; i < ARRAY_SIZE(intel_early_ids); i++) {
@@ -704,7 +712,7 @@ static struct chipset early_qrk[] __initdata = {
{ PCI_VENDOR_ID_INTEL, 0x3406, PCI_CLASS_BRIDGE_HOST,
PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check },
{ PCI_VENDOR_ID_INTEL, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA, PCI_ANY_ID,
- QFLAG_APPLY_ONCE, intel_graphics_quirks },
+ 0, intel_graphics_quirks },
/*
* HPET on the current version of the Baytrail platform has accuracy
* problems: it will halt in deep idle state - so we disable it.
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
index dd3777ac0443..8dea01ffc5c1 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -206,7 +206,27 @@ void fpu_reset_from_exception_fixup(void)
}
#if IS_ENABLED(CONFIG_KVM)
-static void __fpstate_reset(struct fpstate *fpstate);
+static void __fpstate_reset(struct fpstate *fpstate, u64 xfd);
+
+static void fpu_init_guest_permissions(struct fpu_guest *gfpu)
+{
+ struct fpu_state_perm *fpuperm;
+ u64 perm;
+
+ if (!IS_ENABLED(CONFIG_X86_64))
+ return;
+
+ spin_lock_irq(&current->sighand->siglock);
+ fpuperm = &current->group_leader->thread.fpu.guest_perm;
+ perm = fpuperm->__state_perm;
+
+ /* First fpstate allocation locks down permissions. */
+ WRITE_ONCE(fpuperm->__state_perm, perm | FPU_GUEST_PERM_LOCKED);
+
+ spin_unlock_irq(&current->sighand->siglock);
+
+ gfpu->perm = perm & ~FPU_GUEST_PERM_LOCKED;
+}
bool fpu_alloc_guest_fpstate(struct fpu_guest *gfpu)
{
@@ -218,12 +238,18 @@ bool fpu_alloc_guest_fpstate(struct fpu_guest *gfpu)
if (!fpstate)
return false;
- __fpstate_reset(fpstate);
+ /* Leave xfd to 0 (the reset value defined by spec) */
+ __fpstate_reset(fpstate, 0);
fpstate_init_user(fpstate);
fpstate->is_valloc = true;
fpstate->is_guest = true;
- gfpu->fpstate = fpstate;
+ gfpu->fpstate = fpstate;
+ gfpu->xfeatures = fpu_user_cfg.default_features;
+ gfpu->perm = fpu_user_cfg.default_features;
+ gfpu->uabi_size = fpu_user_cfg.default_size;
+ fpu_init_guest_permissions(gfpu);
+
return true;
}
EXPORT_SYMBOL_GPL(fpu_alloc_guest_fpstate);
@@ -243,6 +269,64 @@ void fpu_free_guest_fpstate(struct fpu_guest *gfpu)
}
EXPORT_SYMBOL_GPL(fpu_free_guest_fpstate);
+/*
+ * fpu_enable_guest_xfd_features - Check xfeatures against guest perm and enable
+ * @guest_fpu: Pointer to the guest FPU container
+ * @xfeatures: Features requested by guest CPUID
+ *
+ * Enable all dynamic xfeatures according to guest perm and requested CPUID.
+ *
+ * Return: 0 on success, error code otherwise
+ */
+int fpu_enable_guest_xfd_features(struct fpu_guest *guest_fpu, u64 xfeatures)
+{
+ lockdep_assert_preemption_enabled();
+
+ /* Nothing to do if all requested features are already enabled. */
+ xfeatures &= ~guest_fpu->xfeatures;
+ if (!xfeatures)
+ return 0;
+
+ return __xfd_enable_feature(xfeatures, guest_fpu);
+}
+EXPORT_SYMBOL_GPL(fpu_enable_guest_xfd_features);
+
+#ifdef CONFIG_X86_64
+void fpu_update_guest_xfd(struct fpu_guest *guest_fpu, u64 xfd)
+{
+ fpregs_lock();
+ guest_fpu->fpstate->xfd = xfd;
+ if (guest_fpu->fpstate->in_use)
+ xfd_update_state(guest_fpu->fpstate);
+ fpregs_unlock();
+}
+EXPORT_SYMBOL_GPL(fpu_update_guest_xfd);
+
+/**
+ * fpu_sync_guest_vmexit_xfd_state - Synchronize XFD MSR and software state
+ *
+ * Must be invoked from KVM after a VMEXIT before enabling interrupts when
+ * XFD write emulation is disabled. This is required because the guest can
+ * freely modify XFD and the state at VMEXIT is not guaranteed to be the
+ * same as the state on VMENTER. So software state has to be udpated before
+ * any operation which depends on it can take place.
+ *
+ * Note: It can be invoked unconditionally even when write emulation is
+ * enabled for the price of a then pointless MSR read.
+ */
+void fpu_sync_guest_vmexit_xfd_state(void)
+{
+ struct fpstate *fps = current->thread.fpu.fpstate;
+
+ lockdep_assert_irqs_disabled();
+ if (fpu_state_size_dynamic()) {
+ rdmsrl(MSR_IA32_XFD, fps->xfd);
+ __this_cpu_write(xfd_state, fps->xfd);
+ }
+}
+EXPORT_SYMBOL_GPL(fpu_sync_guest_vmexit_xfd_state);
+#endif /* CONFIG_X86_64 */
+
int fpu_swap_kvm_fpstate(struct fpu_guest *guest_fpu, bool enter_guest)
{
struct fpstate *guest_fps = guest_fpu->fpstate;
@@ -437,26 +521,28 @@ void fpstate_init_user(struct fpstate *fpstate)
fpstate_init_fstate(fpstate);
}
-static void __fpstate_reset(struct fpstate *fpstate)
+static void __fpstate_reset(struct fpstate *fpstate, u64 xfd)
{
/* Initialize sizes and feature masks */
fpstate->size = fpu_kernel_cfg.default_size;
fpstate->user_size = fpu_user_cfg.default_size;
fpstate->xfeatures = fpu_kernel_cfg.default_features;
fpstate->user_xfeatures = fpu_user_cfg.default_features;
- fpstate->xfd = init_fpstate.xfd;
+ fpstate->xfd = xfd;
}
void fpstate_reset(struct fpu *fpu)
{
/* Set the fpstate pointer to the default fpstate */
fpu->fpstate = &fpu->__fpstate;
- __fpstate_reset(fpu->fpstate);
+ __fpstate_reset(fpu->fpstate, init_fpstate.xfd);
/* Initialize the permission related info in fpu */
fpu->perm.__state_perm = fpu_kernel_cfg.default_features;
fpu->perm.__state_size = fpu_kernel_cfg.default_size;
fpu->perm.__user_state_size = fpu_user_cfg.default_size;
+ /* Same defaults for guests */
+ fpu->guest_perm = fpu->perm;
}
static inline void fpu_inherit_perms(struct fpu *dst_fpu)
@@ -467,6 +553,7 @@ static inline void fpu_inherit_perms(struct fpu *dst_fpu)
spin_lock_irq(&current->sighand->siglock);
/* Fork also inherits the permissions of the parent */
dst_fpu->perm = src_fpu->perm;
+ dst_fpu->guest_perm = src_fpu->guest_perm;
spin_unlock_irq(&current->sighand->siglock);
}
}
diff --git a/arch/x86/kernel/fpu/legacy.h b/arch/x86/kernel/fpu/legacy.h
index 17c26b164c63..098f367bb8a7 100644
--- a/arch/x86/kernel/fpu/legacy.h
+++ b/arch/x86/kernel/fpu/legacy.h
@@ -35,11 +35,7 @@ static inline void ldmxcsr(u32 mxcsr)
int err; \
asm volatile("1:" #insn "\n\t" \
"2:\n" \
- ".section .fixup,\"ax\"\n" \
- "3: movl $-1,%[err]\n" \
- " jmp 2b\n" \
- ".previous\n" \
- _ASM_EXTABLE(1b, 3b) \
+ _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG, %[err]) \
: [err] "=r" (err), output \
: "0"(0), input); \
err; \
diff --git a/arch/x86/kernel/fpu/regset.c b/arch/x86/kernel/fpu/regset.c
index 437d7c930c0b..75ffaef8c299 100644
--- a/arch/x86/kernel/fpu/regset.c
+++ b/arch/x86/kernel/fpu/regset.c
@@ -91,11 +91,9 @@ int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
const void *kbuf, const void __user *ubuf)
{
struct fpu *fpu = &target->thread.fpu;
- struct user32_fxsr_struct newstate;
+ struct fxregs_state newstate;
int ret;
- BUILD_BUG_ON(sizeof(newstate) != sizeof(struct fxregs_state));
-
if (!cpu_feature_enabled(X86_FEATURE_FXSR))
return -ENODEV;
@@ -116,9 +114,10 @@ int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
/* Copy the state */
memcpy(&fpu->fpstate->regs.fxsave, &newstate, sizeof(newstate));
- /* Clear xmm8..15 */
+ /* Clear xmm8..15 for 32-bit callers */
BUILD_BUG_ON(sizeof(fpu->__fpstate.regs.fxsave.xmm_space) != 16 * 16);
- memset(&fpu->fpstate->regs.fxsave.xmm_space[8], 0, 8 * 16);
+ if (in_ia32_syscall())
+ memset(&fpu->fpstate->regs.fxsave.xmm_space[8*4], 0, 8 * 16);
/* Mark FP and SSE as in use when XSAVE is enabled */
if (use_xsave())
diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
index d28829403ed0..02b3ddaf4f75 100644
--- a/arch/x86/kernel/fpu/xstate.c
+++ b/arch/x86/kernel/fpu/xstate.c
@@ -1500,35 +1500,13 @@ void fpstate_free(struct fpu *fpu)
}
/**
- * fpu_install_fpstate - Update the active fpstate in the FPU
- *
- * @fpu: A struct fpu * pointer
- * @newfps: A struct fpstate * pointer
- *
- * Returns: A null pointer if the last active fpstate is the embedded
- * one or the new fpstate is already installed;
- * otherwise, a pointer to the old fpstate which has to
- * be freed by the caller.
- */
-static struct fpstate *fpu_install_fpstate(struct fpu *fpu,
- struct fpstate *newfps)
-{
- struct fpstate *oldfps = fpu->fpstate;
-
- if (fpu->fpstate == newfps)
- return NULL;
-
- fpu->fpstate = newfps;
- return oldfps != &fpu->__fpstate ? oldfps : NULL;
-}
-
-/**
* fpstate_realloc - Reallocate struct fpstate for the requested new features
*
* @xfeatures: A bitmap of xstate features which extend the enabled features
* of that task
* @ksize: The required size for the kernel buffer
* @usize: The required size for user space buffers
+ * @guest_fpu: Pointer to a guest FPU container. NULL for host allocations
*
* Note vs. vmalloc(): If the task with a vzalloc()-allocated buffer
* terminates quickly, vfree()-induced IPIs may be a concern, but tasks
@@ -1537,13 +1515,13 @@ static struct fpstate *fpu_install_fpstate(struct fpu *fpu,
* Returns: 0 on success, -ENOMEM on allocation error.
*/
static int fpstate_realloc(u64 xfeatures, unsigned int ksize,
- unsigned int usize)
+ unsigned int usize, struct fpu_guest *guest_fpu)
{
struct fpu *fpu = &current->thread.fpu;
struct fpstate *curfps, *newfps = NULL;
unsigned int fpsize;
+ bool in_use;
- curfps = fpu->fpstate;
fpsize = ksize + ALIGN(offsetof(struct fpstate, regs), 64);
newfps = vzalloc(fpsize);
@@ -1553,28 +1531,56 @@ static int fpstate_realloc(u64 xfeatures, unsigned int ksize,
newfps->user_size = usize;
newfps->is_valloc = true;
+ /*
+ * When a guest FPU is supplied, use @guest_fpu->fpstate
+ * as reference independent whether it is in use or not.
+ */
+ curfps = guest_fpu ? guest_fpu->fpstate : fpu->fpstate;
+
+ /* Determine whether @curfps is the active fpstate */
+ in_use = fpu->fpstate == curfps;
+
+ if (guest_fpu) {
+ newfps->is_guest = true;
+ newfps->is_confidential = curfps->is_confidential;
+ newfps->in_use = curfps->in_use;
+ guest_fpu->xfeatures |= xfeatures;
+ guest_fpu->uabi_size = usize;
+ }
+
fpregs_lock();
/*
- * Ensure that the current state is in the registers before
- * swapping fpstate as that might invalidate it due to layout
- * changes.
+ * If @curfps is in use, ensure that the current state is in the
+ * registers before swapping fpstate as that might invalidate it
+ * due to layout changes.
*/
- if (test_thread_flag(TIF_NEED_FPU_LOAD))
+ if (in_use && test_thread_flag(TIF_NEED_FPU_LOAD))
fpregs_restore_userregs();
newfps->xfeatures = curfps->xfeatures | xfeatures;
newfps->user_xfeatures = curfps->user_xfeatures | xfeatures;
newfps->xfd = curfps->xfd & ~xfeatures;
- curfps = fpu_install_fpstate(fpu, newfps);
-
/* Do the final updates within the locked region */
xstate_init_xcomp_bv(&newfps->regs.xsave, newfps->xfeatures);
- xfd_update_state(newfps);
+ if (guest_fpu) {
+ guest_fpu->fpstate = newfps;
+ /* If curfps is active, update the FPU fpstate pointer */
+ if (in_use)
+ fpu->fpstate = newfps;
+ } else {
+ fpu->fpstate = newfps;
+ }
+
+ if (in_use)
+ xfd_update_state(fpu->fpstate);
fpregs_unlock();
- vfree(curfps);
+ /* Only free valloc'ed state */
+ if (curfps && curfps->is_valloc)
+ vfree(curfps);
+
return 0;
}
@@ -1595,7 +1601,7 @@ static int validate_sigaltstack(unsigned int usize)
return 0;
}
-static int __xstate_request_perm(u64 permitted, u64 requested)
+static int __xstate_request_perm(u64 permitted, u64 requested, bool guest)
{
/*
* This deliberately does not exclude !XSAVES as we still might
@@ -1605,9 +1611,10 @@ static int __xstate_request_perm(u64 permitted, u64 requested)
*/
bool compacted = cpu_feature_enabled(X86_FEATURE_XSAVES);
struct fpu *fpu = &current->group_leader->thread.fpu;
+ struct fpu_state_perm *perm;
unsigned int ksize, usize;
u64 mask;
- int ret;
+ int ret = 0;
/* Check whether fully enabled */
if ((permitted & requested) == requested)
@@ -1621,15 +1628,18 @@ static int __xstate_request_perm(u64 permitted, u64 requested)
mask &= XFEATURE_MASK_USER_SUPPORTED;
usize = xstate_calculate_size(mask, false);
- ret = validate_sigaltstack(usize);
- if (ret)
- return ret;
+ if (!guest) {
+ ret = validate_sigaltstack(usize);
+ if (ret)
+ return ret;
+ }
+ perm = guest ? &fpu->guest_perm : &fpu->perm;
/* Pairs with the READ_ONCE() in xstate_get_group_perm() */
- WRITE_ONCE(fpu->perm.__state_perm, requested);
+ WRITE_ONCE(perm->__state_perm, requested);
/* Protected by sighand lock */
- fpu->perm.__state_size = ksize;
- fpu->perm.__user_state_size = usize;
+ perm->__state_size = ksize;
+ perm->__user_state_size = usize;
return ret;
}
@@ -1640,7 +1650,7 @@ static const u64 xstate_prctl_req[XFEATURE_MAX] = {
[XFEATURE_XTILE_DATA] = XFEATURE_MASK_XTILE_DATA,
};
-static int xstate_request_perm(unsigned long idx)
+static int xstate_request_perm(unsigned long idx, bool guest)
{
u64 permitted, requested;
int ret;
@@ -1661,26 +1671,33 @@ static int xstate_request_perm(unsigned long idx)
return -EOPNOTSUPP;
/* Lockless quick check */
- permitted = xstate_get_host_group_perm();
+ permitted = xstate_get_group_perm(guest);
if ((permitted & requested) == requested)
return 0;
/* Protect against concurrent modifications */
spin_lock_irq(&current->sighand->siglock);
- permitted = xstate_get_host_group_perm();
- ret = __xstate_request_perm(permitted, requested);
+ permitted = xstate_get_group_perm(guest);
+
+ /* First vCPU allocation locks the permissions. */
+ if (guest && (permitted & FPU_GUEST_PERM_LOCKED))
+ ret = -EBUSY;
+ else
+ ret = __xstate_request_perm(permitted, requested, guest);
spin_unlock_irq(&current->sighand->siglock);
return ret;
}
-int xfd_enable_feature(u64 xfd_err)
+int __xfd_enable_feature(u64 xfd_err, struct fpu_guest *guest_fpu)
{
u64 xfd_event = xfd_err & XFEATURE_MASK_USER_DYNAMIC;
+ struct fpu_state_perm *perm;
unsigned int ksize, usize;
struct fpu *fpu;
if (!xfd_event) {
- pr_err_once("XFD: Invalid xfd error: %016llx\n", xfd_err);
+ if (!guest_fpu)
+ pr_err_once("XFD: Invalid xfd error: %016llx\n", xfd_err);
return 0;
}
@@ -1688,14 +1705,16 @@ int xfd_enable_feature(u64 xfd_err)
spin_lock_irq(&current->sighand->siglock);
/* If not permitted let it die */
- if ((xstate_get_host_group_perm() & xfd_event) != xfd_event) {
+ if ((xstate_get_group_perm(!!guest_fpu) & xfd_event) != xfd_event) {
spin_unlock_irq(&current->sighand->siglock);
return -EPERM;
}
fpu = &current->group_leader->thread.fpu;
- ksize = fpu->perm.__state_size;
- usize = fpu->perm.__user_state_size;
+ perm = guest_fpu ? &fpu->guest_perm : &fpu->perm;
+ ksize = perm->__state_size;
+ usize = perm->__user_state_size;
+
/*
* The feature is permitted. State size is sufficient. Dropping
* the lock is safe here even if more features are added from
@@ -1708,17 +1727,29 @@ int xfd_enable_feature(u64 xfd_err)
* Try to allocate a new fpstate. If that fails there is no way
* out.
*/
- if (fpstate_realloc(xfd_event, ksize, usize))
+ if (fpstate_realloc(xfd_event, ksize, usize, guest_fpu))
return -EFAULT;
return 0;
}
+
+int xfd_enable_feature(u64 xfd_err)
+{
+ return __xfd_enable_feature(xfd_err, NULL);
+}
+
#else /* CONFIG_X86_64 */
-static inline int xstate_request_perm(unsigned long idx)
+static inline int xstate_request_perm(unsigned long idx, bool guest)
{
return -EPERM;
}
#endif /* !CONFIG_X86_64 */
+u64 xstate_get_guest_group_perm(void)
+{
+ return xstate_get_group_perm(true);
+}
+EXPORT_SYMBOL_GPL(xstate_get_guest_group_perm);
+
/**
* fpu_xstate_prctl - xstate permission operations
* @tsk: Redundant pointer to current
@@ -1742,6 +1773,7 @@ long fpu_xstate_prctl(struct task_struct *tsk, int option, unsigned long arg2)
u64 __user *uptr = (u64 __user *)arg2;
u64 permitted, supported;
unsigned long idx = arg2;
+ bool guest = false;
if (tsk != current)
return -EPERM;
@@ -1760,11 +1792,20 @@ long fpu_xstate_prctl(struct task_struct *tsk, int option, unsigned long arg2)
permitted &= XFEATURE_MASK_USER_SUPPORTED;
return put_user(permitted, uptr);
+ case ARCH_GET_XCOMP_GUEST_PERM:
+ permitted = xstate_get_guest_group_perm();
+ permitted &= XFEATURE_MASK_USER_SUPPORTED;
+ return put_user(permitted, uptr);
+
+ case ARCH_REQ_XCOMP_GUEST_PERM:
+ guest = true;
+ fallthrough;
+
case ARCH_REQ_XCOMP_PERM:
if (!IS_ENABLED(CONFIG_X86_64))
return -EOPNOTSUPP;
- return xstate_request_perm(idx);
+ return xstate_request_perm(idx, guest);
default:
return -EINVAL;
diff --git a/arch/x86/kernel/fpu/xstate.h b/arch/x86/kernel/fpu/xstate.h
index 86ea7c0fa2f6..d22ace092ca2 100644
--- a/arch/x86/kernel/fpu/xstate.h
+++ b/arch/x86/kernel/fpu/xstate.h
@@ -20,10 +20,19 @@ static inline void xstate_init_xcomp_bv(struct xregs_state *xsave, u64 mask)
xsave->header.xcomp_bv = mask | XCOMP_BV_COMPACTED_FORMAT;
}
-static inline u64 xstate_get_host_group_perm(void)
+static inline u64 xstate_get_group_perm(bool guest)
{
+ struct fpu *fpu = &current->group_leader->thread.fpu;
+ struct fpu_state_perm *perm;
+
/* Pairs with WRITE_ONCE() in xstate_request_perm() */
- return READ_ONCE(current->group_leader->thread.fpu.perm.__state_perm);
+ perm = guest ? &fpu->guest_perm : &fpu->perm;
+ return READ_ONCE(perm->__state_perm);
+}
+
+static inline u64 xstate_get_host_group_perm(void)
+{
+ return xstate_get_group_perm(false);
}
enum xstate_copy_mode {
@@ -108,11 +117,7 @@ static inline u64 xfeatures_mask_independent(void)
"\n" \
"xor %[err], %[err]\n" \
"3:\n" \
- ".pushsection .fixup,\"ax\"\n" \
- "4: movl $-2, %[err]\n" \
- "jmp 3b\n" \
- ".popsection\n" \
- _ASM_EXTABLE(661b, 4b) \
+ _ASM_EXTABLE_TYPE_REG(661b, 3b, EX_TYPE_EFAULT_REG, %[err]) \
: [err] "=r" (err) \
: "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \
: "memory")
@@ -149,8 +154,14 @@ static inline void xfd_update_state(struct fpstate *fpstate)
}
}
}
+
+extern int __xfd_enable_feature(u64 which, struct fpu_guest *guest_fpu);
#else
static inline void xfd_update_state(struct fpstate *fpstate) { }
+
+static inline int __xfd_enable_feature(u64 which, struct fpu_guest *guest_fpu) {
+ return -EPERM;
+}
#endif
/*
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index c39f906cdc4e..7cc540e6de0c 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -303,7 +303,7 @@ union ftrace_op_code_union {
} __attribute__((packed));
};
-#define RET_SIZE 1
+#define RET_SIZE 1 + IS_ENABLED(CONFIG_SLS)
static unsigned long
create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
diff --git a/arch/x86/kernel/ftrace_32.S b/arch/x86/kernel/ftrace_32.S
index e405fe1a8bf4..a0ed0e4a2c0c 100644
--- a/arch/x86/kernel/ftrace_32.S
+++ b/arch/x86/kernel/ftrace_32.S
@@ -19,7 +19,7 @@
#endif
SYM_FUNC_START(__fentry__)
- ret
+ RET
SYM_FUNC_END(__fentry__)
EXPORT_SYMBOL(__fentry__)
@@ -84,7 +84,7 @@ ftrace_graph_call:
/* This is weak to keep gas from relaxing the jumps */
SYM_INNER_LABEL_ALIGN(ftrace_stub, SYM_L_WEAK)
- ret
+ RET
SYM_CODE_END(ftrace_caller)
SYM_CODE_START(ftrace_regs_caller)
@@ -177,7 +177,7 @@ SYM_CODE_START(ftrace_graph_caller)
popl %edx
popl %ecx
popl %eax
- ret
+ RET
SYM_CODE_END(ftrace_graph_caller)
.globl return_to_handler
diff --git a/arch/x86/kernel/ftrace_64.S b/arch/x86/kernel/ftrace_64.S
index 7a879901f103..11ac028e30e4 100644
--- a/arch/x86/kernel/ftrace_64.S
+++ b/arch/x86/kernel/ftrace_64.S
@@ -132,7 +132,7 @@
#ifdef CONFIG_DYNAMIC_FTRACE
SYM_FUNC_START(__fentry__)
- retq
+ RET
SYM_FUNC_END(__fentry__)
EXPORT_SYMBOL(__fentry__)
@@ -176,11 +176,11 @@ SYM_FUNC_END(ftrace_caller);
SYM_FUNC_START(ftrace_epilogue)
/*
* This is weak to keep gas from relaxing the jumps.
- * It is also used to copy the retq for trampolines.
+ * It is also used to copy the RET for trampolines.
*/
SYM_INNER_LABEL_ALIGN(ftrace_stub, SYM_L_WEAK)
UNWIND_HINT_FUNC
- retq
+ RET
SYM_FUNC_END(ftrace_epilogue)
SYM_FUNC_START(ftrace_regs_caller)
@@ -284,7 +284,7 @@ SYM_FUNC_START(__fentry__)
jnz trace
SYM_INNER_LABEL(ftrace_stub, SYM_L_GLOBAL)
- retq
+ RET
trace:
/* save_mcount_regs fills in first two parameters */
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index d8c64dab0efe..eb8656bac99b 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -340,7 +340,7 @@ SYM_FUNC_END(startup_32_smp)
__INIT
setup_once:
andl $0,setup_once_ref /* Once is enough, thanks */
- ret
+ RET
SYM_FUNC_START(early_idt_handler_array)
# 36(%esp) %eflags
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index 882213df3713..71f336425e58 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -1435,8 +1435,12 @@ irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id)
hpet_rtc_timer_reinit();
memset(&curr_time, 0, sizeof(struct rtc_time));
- if (hpet_rtc_flags & (RTC_UIE | RTC_AIE))
- mc146818_get_time(&curr_time);
+ if (hpet_rtc_flags & (RTC_UIE | RTC_AIE)) {
+ if (unlikely(mc146818_get_time(&curr_time) < 0)) {
+ pr_err_ratelimited("unable to read current time from RTC\n");
+ return IRQ_HANDLED;
+ }
+ }
if (hpet_rtc_flags & RTC_UIE &&
curr_time.tm_sec != hpet_prev_update_sec) {
diff --git a/arch/x86/kernel/irqflags.S b/arch/x86/kernel/irqflags.S
index 760e1f293093..aaf9e776f323 100644
--- a/arch/x86/kernel/irqflags.S
+++ b/arch/x86/kernel/irqflags.S
@@ -11,7 +11,7 @@
SYM_FUNC_START(native_save_fl)
pushf
pop %_ASM_AX
- ret
+ RET
SYM_FUNC_END(native_save_fl)
.popsection
EXPORT_SYMBOL(native_save_fl)
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index fce99e249d61..6290712cb36d 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -1051,7 +1051,7 @@ asm(
" addl $4, %esp\n"
" popfl\n"
#endif
- " ret\n"
+ ASM_RET
".size __kretprobe_trampoline, .-__kretprobe_trampoline\n"
);
NOKPROBE_SYMBOL(__kretprobe_trampoline);
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 59abbdad7729..a438217cbfac 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -313,7 +313,7 @@ static void kvm_register_steal_time(void)
return;
wrmsrl(MSR_KVM_STEAL_TIME, (slow_virt_to_phys(st) | KVM_MSR_ENABLED));
- pr_info("stealtime: cpu %d, msr %llx\n", cpu,
+ pr_debug("stealtime: cpu %d, msr %llx\n", cpu,
(unsigned long long) slow_virt_to_phys(st));
}
@@ -350,7 +350,7 @@ static void kvm_guest_cpu_init(void)
wrmsrl(MSR_KVM_ASYNC_PF_EN, pa);
__this_cpu_write(apf_reason.enabled, 1);
- pr_info("setup async PF for cpu %d\n", smp_processor_id());
+ pr_debug("setup async PF for cpu %d\n", smp_processor_id());
}
if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) {
@@ -376,7 +376,7 @@ static void kvm_pv_disable_apf(void)
wrmsrl(MSR_KVM_ASYNC_PF_EN, 0);
__this_cpu_write(apf_reason.enabled, 0);
- pr_info("disable async PF for cpu %d\n", smp_processor_id());
+ pr_debug("disable async PF for cpu %d\n", smp_processor_id());
}
static void kvm_disable_steal_time(void)
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
index 462dd8e9b03d..a35cbf9107af 100644
--- a/arch/x86/kernel/kvmclock.c
+++ b/arch/x86/kernel/kvmclock.c
@@ -174,7 +174,7 @@ static void kvm_register_clock(char *txt)
pa = slow_virt_to_phys(&src->pvti) | 0x01ULL;
wrmsrl(msr_kvm_system_time, pa);
- pr_info("kvm-clock: cpu %d, msr %llx, %s", smp_processor_id(), pa, txt);
+ pr_debug("kvm-clock: cpu %d, msr %llx, %s", smp_processor_id(), pa, txt);
}
static void kvm_save_sched_clock_state(void)
diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
index 169fb6f4cd2e..95fa745e310a 100644
--- a/arch/x86/kernel/module.c
+++ b/arch/x86/kernel/module.c
@@ -67,6 +67,7 @@ static unsigned long int get_module_load_offset(void)
void *module_alloc(unsigned long size)
{
+ gfp_t gfp_mask = GFP_KERNEL;
void *p;
if (PAGE_ALIGN(size) > MODULES_LEN)
@@ -74,10 +75,10 @@ void *module_alloc(unsigned long size)
p = __vmalloc_node_range(size, MODULE_ALIGN,
MODULES_VADDR + get_module_load_offset(),
- MODULES_END, GFP_KERNEL,
- PAGE_KERNEL, 0, NUMA_NO_NODE,
+ MODULES_END, gfp_mask,
+ PAGE_KERNEL, VM_DEFER_KMEMLEAK, NUMA_NO_NODE,
__builtin_return_address(0));
- if (p && (kasan_module_alloc(p, size) < 0)) {
+ if (p && (kasan_module_alloc(p, size, gfp_mask) < 0)) {
vfree(p);
return NULL;
}
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index 7f7636aac620..4420499f7bb4 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -41,7 +41,7 @@ extern void _paravirt_nop(void);
asm (".pushsection .entry.text, \"ax\"\n"
".global _paravirt_nop\n"
"_paravirt_nop:\n\t"
- "ret\n\t"
+ ASM_RET
".size _paravirt_nop, . - _paravirt_nop\n\t"
".type _paravirt_nop, @function\n\t"
".popsection");
@@ -51,7 +51,7 @@ asm (".pushsection .entry.text, \"ax\"\n"
".global paravirt_ret0\n"
"paravirt_ret0:\n\t"
"xor %" _ASM_AX ", %" _ASM_AX ";\n\t"
- "ret\n\t"
+ ASM_RET
".size paravirt_ret0, . - paravirt_ret0\n\t"
".type paravirt_ret0, @function\n\t"
".popsection");
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 5d481038fe0b..81d8ef036637 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -993,6 +993,8 @@ long do_arch_prctl_common(struct task_struct *task, int option,
case ARCH_GET_XCOMP_SUPP:
case ARCH_GET_XCOMP_PERM:
case ARCH_REQ_XCOMP_PERM:
+ case ARCH_GET_XCOMP_GUEST_PERM:
+ case ARCH_REQ_XCOMP_GUEST_PERM:
return fpu_xstate_prctl(task, option, arg2);
}
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index 6d2244c94799..8d2f2f995539 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -1224,7 +1224,7 @@ static struct user_regset x86_64_regsets[] __ro_after_init = {
},
[REGSET_FP] = {
.core_note_type = NT_PRFPREG,
- .n = sizeof(struct user_i387_struct) / sizeof(long),
+ .n = sizeof(struct fxregs_state) / sizeof(long),
.size = sizeof(long), .align = sizeof(long),
.active = regset_xregset_fpregs_active, .regset_get = xfpregs_get, .set = xfpregs_set
},
@@ -1271,7 +1271,7 @@ static struct user_regset x86_32_regsets[] __ro_after_init = {
},
[REGSET_XFP] = {
.core_note_type = NT_PRXFPREG,
- .n = sizeof(struct user32_fxsr_struct) / sizeof(u32),
+ .n = sizeof(struct fxregs_state) / sizeof(u32),
.size = sizeof(u32), .align = sizeof(u32),
.active = regset_xregset_fpregs_active, .regset_get = xfpregs_get, .set = xfpregs_set
},
diff --git a/arch/x86/kernel/relocate_kernel_32.S b/arch/x86/kernel/relocate_kernel_32.S
index f469153eca8a..fcc8a7699103 100644
--- a/arch/x86/kernel/relocate_kernel_32.S
+++ b/arch/x86/kernel/relocate_kernel_32.S
@@ -91,7 +91,7 @@ SYM_CODE_START_NOALIGN(relocate_kernel)
movl %edi, %eax
addl $(identity_mapped - relocate_kernel), %eax
pushl %eax
- ret
+ RET
SYM_CODE_END(relocate_kernel)
SYM_CODE_START_LOCAL_NOALIGN(identity_mapped)
@@ -159,7 +159,7 @@ SYM_CODE_START_LOCAL_NOALIGN(identity_mapped)
xorl %edx, %edx
xorl %esi, %esi
xorl %ebp, %ebp
- ret
+ RET
1:
popl %edx
movl CP_PA_SWAP_PAGE(%edi), %esp
@@ -190,7 +190,7 @@ SYM_CODE_START_LOCAL_NOALIGN(identity_mapped)
movl %edi, %eax
addl $(virtual_mapped - relocate_kernel), %eax
pushl %eax
- ret
+ RET
SYM_CODE_END(identity_mapped)
SYM_CODE_START_LOCAL_NOALIGN(virtual_mapped)
@@ -208,7 +208,7 @@ SYM_CODE_START_LOCAL_NOALIGN(virtual_mapped)
popl %edi
popl %esi
popl %ebx
- ret
+ RET
SYM_CODE_END(virtual_mapped)
/* Do the copies */
@@ -271,7 +271,7 @@ SYM_CODE_START_LOCAL_NOALIGN(swap_pages)
popl %edi
popl %ebx
popl %ebp
- ret
+ RET
SYM_CODE_END(swap_pages)
.globl kexec_control_code_size
diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
index c8fe74a28143..399f075ccdc4 100644
--- a/arch/x86/kernel/relocate_kernel_64.S
+++ b/arch/x86/kernel/relocate_kernel_64.S
@@ -104,7 +104,7 @@ SYM_CODE_START_NOALIGN(relocate_kernel)
/* jump to identity mapped page */
addq $(identity_mapped - relocate_kernel), %r8
pushq %r8
- ret
+ RET
SYM_CODE_END(relocate_kernel)
SYM_CODE_START_LOCAL_NOALIGN(identity_mapped)
@@ -191,7 +191,7 @@ SYM_CODE_START_LOCAL_NOALIGN(identity_mapped)
xorl %r14d, %r14d
xorl %r15d, %r15d
- ret
+ RET
1:
popq %rdx
@@ -210,7 +210,7 @@ SYM_CODE_START_LOCAL_NOALIGN(identity_mapped)
call swap_pages
movq $virtual_mapped, %rax
pushq %rax
- ret
+ RET
SYM_CODE_END(identity_mapped)
SYM_CODE_START_LOCAL_NOALIGN(virtual_mapped)
@@ -231,7 +231,7 @@ SYM_CODE_START_LOCAL_NOALIGN(virtual_mapped)
popq %r12
popq %rbp
popq %rbx
- ret
+ RET
SYM_CODE_END(virtual_mapped)
/* Do the copies */
@@ -288,7 +288,7 @@ SYM_CODE_START_LOCAL_NOALIGN(swap_pages)
lea PAGE_SIZE(%rax), %rsi
jmp 0b
3:
- ret
+ RET
SYM_CODE_END(swap_pages)
.globl kexec_control_code_size
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index 7b65275544b2..49325caa7307 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -84,60 +84,6 @@ static bool __init pcpu_need_numa(void)
}
#endif
-/**
- * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu
- * @cpu: cpu to allocate for
- * @size: size allocation in bytes
- * @align: alignment
- *
- * Allocate @size bytes aligned at @align for cpu @cpu. This wrapper
- * does the right thing for NUMA regardless of the current
- * configuration.
- *
- * RETURNS:
- * Pointer to the allocated area on success, NULL on failure.
- */
-static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
- unsigned long align)
-{
- const unsigned long goal = __pa(MAX_DMA_ADDRESS);
-#ifdef CONFIG_NUMA
- int node = early_cpu_to_node(cpu);
- void *ptr;
-
- if (!node_online(node) || !NODE_DATA(node)) {
- ptr = memblock_alloc_from(size, align, goal);
- pr_info("cpu %d has no node %d or node-local memory\n",
- cpu, node);
- pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n",
- cpu, size, __pa(ptr));
- } else {
- ptr = memblock_alloc_try_nid(size, align, goal,
- MEMBLOCK_ALLOC_ACCESSIBLE,
- node);
-
- pr_debug("per cpu data for cpu%d %lu bytes on node%d at %016lx\n",
- cpu, size, node, __pa(ptr));
- }
- return ptr;
-#else
- return memblock_alloc_from(size, align, goal);
-#endif
-}
-
-/*
- * Helpers for first chunk memory allocation
- */
-static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align)
-{
- return pcpu_alloc_bootmem(cpu, size, align);
-}
-
-static void __init pcpu_fc_free(void *ptr, size_t size)
-{
- memblock_free(ptr, size);
-}
-
static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
{
#ifdef CONFIG_NUMA
@@ -150,7 +96,12 @@ static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
#endif
}
-static void __init pcpup_populate_pte(unsigned long addr)
+static int __init pcpu_cpu_to_node(int cpu)
+{
+ return early_cpu_to_node(cpu);
+}
+
+void __init pcpu_populate_pte(unsigned long addr)
{
populate_extra_pte(addr);
}
@@ -205,15 +156,14 @@ void __init setup_per_cpu_areas(void)
rc = pcpu_embed_first_chunk(PERCPU_FIRST_CHUNK_RESERVE,
dyn_size, atom_size,
pcpu_cpu_distance,
- pcpu_fc_alloc, pcpu_fc_free);
+ pcpu_cpu_to_node);
if (rc < 0)
pr_warn("%s allocator failed (%d), falling back to page size\n",
pcpu_fc_names[pcpu_chosen_fc], rc);
}
if (rc < 0)
rc = pcpu_page_first_chunk(PERCPU_FIRST_CHUNK_RESERVE,
- pcpu_fc_alloc, pcpu_fc_free,
- pcpup_populate_pte);
+ pcpu_cpu_to_node);
if (rc < 0)
panic("cannot initialize percpu area (err=%d)", rc);
diff --git a/arch/x86/kernel/sev_verify_cbit.S b/arch/x86/kernel/sev_verify_cbit.S
index ee04941a6546..3355e27c69eb 100644
--- a/arch/x86/kernel/sev_verify_cbit.S
+++ b/arch/x86/kernel/sev_verify_cbit.S
@@ -85,5 +85,5 @@ SYM_FUNC_START(sev_verify_cbit)
#endif
/* Return page-table pointer */
movq %rdi, %rax
- ret
+ RET
SYM_FUNC_END(sev_verify_cbit)
diff --git a/arch/x86/kernel/static_call.c b/arch/x86/kernel/static_call.c
index 9c407a33a774..531fb4cbb63f 100644
--- a/arch/x86/kernel/static_call.c
+++ b/arch/x86/kernel/static_call.c
@@ -17,6 +17,8 @@ enum insn_type {
*/
static const u8 xor5rax[] = { 0x66, 0x66, 0x48, 0x31, 0xc0 };
+static const u8 retinsn[] = { RET_INSN_OPCODE, 0xcc, 0xcc, 0xcc, 0xcc };
+
static void __ref __static_call_transform(void *insn, enum insn_type type, void *func)
{
const void *emulate = NULL;
@@ -42,8 +44,7 @@ static void __ref __static_call_transform(void *insn, enum insn_type type, void
break;
case RET:
- code = text_gen_insn(RET_INSN_OPCODE, insn, func);
- size = RET_INSN_SIZE;
+ code = &retinsn;
break;
}
diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
index 641f0fe1e5b4..1258a5872d12 100644
--- a/arch/x86/kernel/verify_cpu.S
+++ b/arch/x86/kernel/verify_cpu.S
@@ -132,9 +132,9 @@ SYM_FUNC_START_LOCAL(verify_cpu)
.Lverify_cpu_no_longmode:
popf # Restore caller passed flags
movl $1,%eax
- ret
+ RET
.Lverify_cpu_sse_ok:
popf # Restore caller passed flags
xorl %eax, %eax
- ret
+ RET
SYM_FUNC_END(verify_cpu)
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index 3d6dc12d198f..27f830345b6f 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -137,7 +137,6 @@ SECTIONS
ALIGN_ENTRY_TEXT_END
SOFTIRQENTRY_TEXT
STATIC_CALL_TEXT
- *(.fixup)
*(.gnu.warning)
#ifdef CONFIG_RETPOLINE
diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
index 8b395821cb8d..7d20c1d34a3c 100644
--- a/arch/x86/kernel/x86_init.c
+++ b/arch/x86/kernel/x86_init.c
@@ -145,18 +145,6 @@ struct x86_platform_ops x86_platform __ro_after_init = {
EXPORT_SYMBOL_GPL(x86_platform);
-#if defined(CONFIG_PCI_MSI)
-struct x86_msi_ops x86_msi __ro_after_init = {
- .restore_msi_irqs = default_restore_msi_irqs,
-};
-
-/* MSI arch specific hooks */
-void arch_restore_msi_irqs(struct pci_dev *dev)
-{
- x86_msi.restore_msi_irqs(dev);
-}
-#endif
-
struct x86_apic_ops x86_apic_ops __ro_after_init = {
.io_apic_read = native_io_apic_read,
.restore = native_restore_boot_irq_mode,
diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
index 619186138176..2b1548da00eb 100644
--- a/arch/x86/kvm/Kconfig
+++ b/arch/x86/kvm/Kconfig
@@ -26,7 +26,9 @@ config KVM
select PREEMPT_NOTIFIERS
select MMU_NOTIFIER
select HAVE_KVM_IRQCHIP
+ select HAVE_KVM_PFNCACHE
select HAVE_KVM_IRQFD
+ select HAVE_KVM_DIRTY_RING
select IRQ_BYPASS_MANAGER
select HAVE_KVM_IRQ_BYPASS
select HAVE_KVM_IRQ_ROUTING
@@ -36,6 +38,7 @@ config KVM
select KVM_MMIO
select SCHED_INFO
select PERF_EVENTS
+ select GUEST_PERF_EVENTS
select HAVE_KVM_MSI
select HAVE_KVM_CPU_RELAX_INTERCEPT
select HAVE_KVM_NO_POLL
@@ -43,6 +46,7 @@ config KVM
select KVM_GENERIC_DIRTYLOG_READ_PROTECT
select KVM_VFIO
select SRCU
+ select INTERVAL_TREE
select HAVE_KVM_PM_NOTIFIER if PM
help
Support hosting fully virtualized guest machines using hardware
diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile
index 75dfd27b6e8a..30f244b64523 100644
--- a/arch/x86/kvm/Makefile
+++ b/arch/x86/kvm/Makefile
@@ -7,12 +7,7 @@ ifeq ($(CONFIG_FRAME_POINTER),y)
OBJECT_FILES_NON_STANDARD_vmenter.o := y
endif
-KVM := ../../../virt/kvm
-
-kvm-y += $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o \
- $(KVM)/eventfd.o $(KVM)/irqchip.o $(KVM)/vfio.o \
- $(KVM)/dirty_ring.o $(KVM)/binary_stats.o
-kvm-$(CONFIG_KVM_ASYNC_PF) += $(KVM)/async_pf.o
+include $(srctree)/virt/kvm/Makefile.kvm
kvm-y += x86.o emulate.o i8259.o irq.o lapic.o \
i8254.o ioapic.o irq_comm.o cpuid.o pmu.o mtrr.o \
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 07e9215e911d..494d4d351859 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -32,7 +32,7 @@
u32 kvm_cpu_caps[NR_KVM_CPU_CAPS] __read_mostly;
EXPORT_SYMBOL_GPL(kvm_cpu_caps);
-static u32 xstate_required_size(u64 xstate_bv, bool compacted)
+u32 xstate_required_size(u64 xstate_bv, bool compacted)
{
int feature_bit = 0;
u32 ret = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET;
@@ -42,7 +42,11 @@ static u32 xstate_required_size(u64 xstate_bv, bool compacted)
if (xstate_bv & 0x1) {
u32 eax, ebx, ecx, edx, offset;
cpuid_count(0xD, feature_bit, &eax, &ebx, &ecx, &edx);
- offset = compacted ? ret : ebx;
+ /* ECX[1]: 64B alignment in compacted form */
+ if (compacted)
+ offset = (ecx & 0x2) ? ALIGN(ret, 64) : ret;
+ else
+ offset = ebx;
ret = max(ret, offset + eax);
}
@@ -80,9 +84,12 @@ static inline struct kvm_cpuid_entry2 *cpuid_entry2_find(
return NULL;
}
-static int kvm_check_cpuid(struct kvm_cpuid_entry2 *entries, int nent)
+static int kvm_check_cpuid(struct kvm_vcpu *vcpu,
+ struct kvm_cpuid_entry2 *entries,
+ int nent)
{
struct kvm_cpuid_entry2 *best;
+ u64 xfeatures;
/*
* The existing code assumes virtual address is 48-bit or 57-bit in the
@@ -96,6 +103,42 @@ static int kvm_check_cpuid(struct kvm_cpuid_entry2 *entries, int nent)
return -EINVAL;
}
+ /*
+ * Exposing dynamic xfeatures to the guest requires additional
+ * enabling in the FPU, e.g. to expand the guest XSAVE state size.
+ */
+ best = cpuid_entry2_find(entries, nent, 0xd, 0);
+ if (!best)
+ return 0;
+
+ xfeatures = best->eax | ((u64)best->edx << 32);
+ xfeatures &= XFEATURE_MASK_USER_DYNAMIC;
+ if (!xfeatures)
+ return 0;
+
+ return fpu_enable_guest_xfd_features(&vcpu->arch.guest_fpu, xfeatures);
+}
+
+/* Check whether the supplied CPUID data is equal to what is already set for the vCPU. */
+static int kvm_cpuid_check_equal(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry2 *e2,
+ int nent)
+{
+ struct kvm_cpuid_entry2 *orig;
+ int i;
+
+ if (nent != vcpu->arch.cpuid_nent)
+ return -EINVAL;
+
+ for (i = 0; i < nent; i++) {
+ orig = &vcpu->arch.cpuid_entries[i];
+ if (e2[i].function != orig->function ||
+ e2[i].index != orig->index ||
+ e2[i].flags != orig->flags ||
+ e2[i].eax != orig->eax || e2[i].ebx != orig->ebx ||
+ e2[i].ecx != orig->ecx || e2[i].edx != orig->edx)
+ return -EINVAL;
+ }
+
return 0;
}
@@ -125,14 +168,21 @@ static void kvm_update_kvm_cpuid_base(struct kvm_vcpu *vcpu)
}
}
-static struct kvm_cpuid_entry2 *kvm_find_kvm_cpuid_features(struct kvm_vcpu *vcpu)
+static struct kvm_cpuid_entry2 *__kvm_find_kvm_cpuid_features(struct kvm_vcpu *vcpu,
+ struct kvm_cpuid_entry2 *entries, int nent)
{
u32 base = vcpu->arch.kvm_cpuid_base;
if (!base)
return NULL;
- return kvm_find_cpuid_entry(vcpu, base | KVM_CPUID_FEATURES, 0);
+ return cpuid_entry2_find(entries, nent, base | KVM_CPUID_FEATURES, 0);
+}
+
+static struct kvm_cpuid_entry2 *kvm_find_kvm_cpuid_features(struct kvm_vcpu *vcpu)
+{
+ return __kvm_find_kvm_cpuid_features(vcpu, vcpu->arch.cpuid_entries,
+ vcpu->arch.cpuid_nent);
}
void kvm_update_pv_runtime(struct kvm_vcpu *vcpu)
@@ -147,11 +197,28 @@ void kvm_update_pv_runtime(struct kvm_vcpu *vcpu)
vcpu->arch.pv_cpuid.features = best->eax;
}
-void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu)
+/*
+ * Calculate guest's supported XCR0 taking into account guest CPUID data and
+ * supported_xcr0 (comprised of host configuration and KVM_SUPPORTED_XCR0).
+ */
+static u64 cpuid_get_supported_xcr0(struct kvm_cpuid_entry2 *entries, int nent)
{
struct kvm_cpuid_entry2 *best;
- best = kvm_find_cpuid_entry(vcpu, 1, 0);
+ best = cpuid_entry2_find(entries, nent, 0xd, 0);
+ if (!best)
+ return 0;
+
+ return (best->eax | ((u64)best->edx << 32)) & supported_xcr0;
+}
+
+static void __kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry2 *entries,
+ int nent)
+{
+ struct kvm_cpuid_entry2 *best;
+ u64 guest_supported_xcr0 = cpuid_get_supported_xcr0(entries, nent);
+
+ best = cpuid_entry2_find(entries, nent, 1, 0);
if (best) {
/* Update OSXSAVE bit */
if (boot_cpu_has(X86_FEATURE_XSAVE))
@@ -162,32 +229,52 @@ void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu)
vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE);
}
- best = kvm_find_cpuid_entry(vcpu, 7, 0);
+ best = cpuid_entry2_find(entries, nent, 7, 0);
if (best && boot_cpu_has(X86_FEATURE_PKU) && best->function == 0x7)
cpuid_entry_change(best, X86_FEATURE_OSPKE,
kvm_read_cr4_bits(vcpu, X86_CR4_PKE));
- best = kvm_find_cpuid_entry(vcpu, 0xD, 0);
+ best = cpuid_entry2_find(entries, nent, 0xD, 0);
if (best)
best->ebx = xstate_required_size(vcpu->arch.xcr0, false);
- best = kvm_find_cpuid_entry(vcpu, 0xD, 1);
+ best = cpuid_entry2_find(entries, nent, 0xD, 1);
if (best && (cpuid_entry_has(best, X86_FEATURE_XSAVES) ||
cpuid_entry_has(best, X86_FEATURE_XSAVEC)))
best->ebx = xstate_required_size(vcpu->arch.xcr0, true);
- best = kvm_find_kvm_cpuid_features(vcpu);
+ best = __kvm_find_kvm_cpuid_features(vcpu, entries, nent);
if (kvm_hlt_in_guest(vcpu->kvm) && best &&
(best->eax & (1 << KVM_FEATURE_PV_UNHALT)))
best->eax &= ~(1 << KVM_FEATURE_PV_UNHALT);
if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT)) {
- best = kvm_find_cpuid_entry(vcpu, 0x1, 0);
+ best = cpuid_entry2_find(entries, nent, 0x1, 0);
if (best)
cpuid_entry_change(best, X86_FEATURE_MWAIT,
vcpu->arch.ia32_misc_enable_msr &
MSR_IA32_MISC_ENABLE_MWAIT);
}
+
+ /*
+ * Bits 127:0 of the allowed SECS.ATTRIBUTES (CPUID.0x12.0x1) enumerate
+ * the supported XSAVE Feature Request Mask (XFRM), i.e. the enclave's
+ * requested XCR0 value. The enclave's XFRM must be a subset of XCRO
+ * at the time of EENTER, thus adjust the allowed XFRM by the guest's
+ * supported XCR0. Similar to XCR0 handling, FP and SSE are forced to
+ * '1' even on CPUs that don't support XSAVE.
+ */
+ best = cpuid_entry2_find(entries, nent, 0x12, 0x1);
+ if (best) {
+ best->ecx &= guest_supported_xcr0 & 0xffffffff;
+ best->edx &= guest_supported_xcr0 >> 32;
+ best->ecx |= XFEATURE_MASK_FPSSE;
+ }
+}
+
+void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu)
+{
+ __kvm_update_cpuid_runtime(vcpu, vcpu->arch.cpuid_entries, vcpu->arch.cpuid_nent);
}
EXPORT_SYMBOL_GPL(kvm_update_cpuid_runtime);
@@ -206,27 +293,8 @@ static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
kvm_apic_set_version(vcpu);
}
- best = kvm_find_cpuid_entry(vcpu, 0xD, 0);
- if (!best)
- vcpu->arch.guest_supported_xcr0 = 0;
- else
- vcpu->arch.guest_supported_xcr0 =
- (best->eax | ((u64)best->edx << 32)) & supported_xcr0;
-
- /*
- * Bits 127:0 of the allowed SECS.ATTRIBUTES (CPUID.0x12.0x1) enumerate
- * the supported XSAVE Feature Request Mask (XFRM), i.e. the enclave's
- * requested XCR0 value. The enclave's XFRM must be a subset of XCRO
- * at the time of EENTER, thus adjust the allowed XFRM by the guest's
- * supported XCR0. Similar to XCR0 handling, FP and SSE are forced to
- * '1' even on CPUs that don't support XSAVE.
- */
- best = kvm_find_cpuid_entry(vcpu, 0x12, 0x1);
- if (best) {
- best->ecx &= vcpu->arch.guest_supported_xcr0 & 0xffffffff;
- best->edx &= vcpu->arch.guest_supported_xcr0 >> 32;
- best->ecx |= XFEATURE_MASK_FPSSE;
- }
+ vcpu->arch.guest_supported_xcr0 =
+ cpuid_get_supported_xcr0(vcpu->arch.cpuid_entries, vcpu->arch.cpuid_nent);
kvm_update_pv_runtime(vcpu);
@@ -276,21 +344,42 @@ u64 kvm_vcpu_reserved_gpa_bits_raw(struct kvm_vcpu *vcpu)
static int kvm_set_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry2 *e2,
int nent)
{
- int r;
+ int r;
+
+ __kvm_update_cpuid_runtime(vcpu, e2, nent);
+
+ /*
+ * KVM does not correctly handle changing guest CPUID after KVM_RUN, as
+ * MAXPHYADDR, GBPAGES support, AMD reserved bit behavior, etc.. aren't
+ * tracked in kvm_mmu_page_role. As a result, KVM may miss guest page
+ * faults due to reusing SPs/SPTEs. In practice no sane VMM mucks with
+ * the core vCPU model on the fly. It would've been better to forbid any
+ * KVM_SET_CPUID{,2} calls after KVM_RUN altogether but unfortunately
+ * some VMMs (e.g. QEMU) reuse vCPU fds for CPU hotplug/unplug and do
+ * KVM_SET_CPUID{,2} again. To support this legacy behavior, check
+ * whether the supplied CPUID data is equal to what's already set.
+ */
+ if (vcpu->arch.last_vmentry_cpu != -1) {
+ r = kvm_cpuid_check_equal(vcpu, e2, nent);
+ if (r)
+ return r;
+
+ kvfree(e2);
+ return 0;
+ }
- r = kvm_check_cpuid(e2, nent);
- if (r)
- return r;
+ r = kvm_check_cpuid(vcpu, e2, nent);
+ if (r)
+ return r;
- kvfree(vcpu->arch.cpuid_entries);
- vcpu->arch.cpuid_entries = e2;
- vcpu->arch.cpuid_nent = nent;
+ kvfree(vcpu->arch.cpuid_entries);
+ vcpu->arch.cpuid_entries = e2;
+ vcpu->arch.cpuid_nent = nent;
- kvm_update_kvm_cpuid_base(vcpu);
- kvm_update_cpuid_runtime(vcpu);
- kvm_vcpu_after_set_cpuid(vcpu);
+ kvm_update_kvm_cpuid_base(vcpu);
+ kvm_vcpu_after_set_cpuid(vcpu);
- return 0;
+ return 0;
}
/* when an old userspace process fills a new kernel module */
@@ -422,9 +511,11 @@ void kvm_set_cpu_caps(void)
#ifdef CONFIG_X86_64
unsigned int f_gbpages = F(GBPAGES);
unsigned int f_lm = F(LM);
+ unsigned int f_xfd = F(XFD);
#else
unsigned int f_gbpages = 0;
unsigned int f_lm = 0;
+ unsigned int f_xfd = 0;
#endif
memset(kvm_cpu_caps, 0, sizeof(kvm_cpu_caps));
@@ -463,12 +554,13 @@ void kvm_set_cpu_caps(void)
);
kvm_cpu_cap_mask(CPUID_7_0_EBX,
- F(FSGSBASE) | F(SGX) | F(BMI1) | F(HLE) | F(AVX2) | F(SMEP) |
- F(BMI2) | F(ERMS) | F(INVPCID) | F(RTM) | 0 /*MPX*/ | F(RDSEED) |
- F(ADX) | F(SMAP) | F(AVX512IFMA) | F(AVX512F) | F(AVX512PF) |
- F(AVX512ER) | F(AVX512CD) | F(CLFLUSHOPT) | F(CLWB) | F(AVX512DQ) |
- F(SHA_NI) | F(AVX512BW) | F(AVX512VL) | 0 /*INTEL_PT*/
- );
+ F(FSGSBASE) | F(SGX) | F(BMI1) | F(HLE) | F(AVX2) |
+ F(FDP_EXCPTN_ONLY) | F(SMEP) | F(BMI2) | F(ERMS) | F(INVPCID) |
+ F(RTM) | F(ZERO_FCS_FDS) | 0 /*MPX*/ | F(AVX512F) |
+ F(AVX512DQ) | F(RDSEED) | F(ADX) | F(SMAP) | F(AVX512IFMA) |
+ F(CLFLUSHOPT) | F(CLWB) | 0 /*INTEL_PT*/ | F(AVX512PF) |
+ F(AVX512ER) | F(AVX512CD) | F(SHA_NI) | F(AVX512BW) |
+ F(AVX512VL));
kvm_cpu_cap_mask(CPUID_7_ECX,
F(AVX512VBMI) | F(LA57) | F(PKU) | 0 /*OSPKE*/ | F(RDPID) |
@@ -492,7 +584,8 @@ void kvm_set_cpu_caps(void)
F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) |
F(SPEC_CTRL_SSBD) | F(ARCH_CAPABILITIES) | F(INTEL_STIBP) |
F(MD_CLEAR) | F(AVX512_VP2INTERSECT) | F(FSRM) |
- F(SERIALIZE) | F(TSXLDTRK) | F(AVX512_FP16)
+ F(SERIALIZE) | F(TSXLDTRK) | F(AVX512_FP16) |
+ F(AMX_TILE) | F(AMX_INT8) | F(AMX_BF16)
);
/* TSC_ADJUST and ARCH_CAPABILITIES are emulated in software. */
@@ -511,7 +604,7 @@ void kvm_set_cpu_caps(void)
);
kvm_cpu_cap_mask(CPUID_D_1_EAX,
- F(XSAVEOPT) | F(XSAVEC) | F(XGETBV1) | F(XSAVES)
+ F(XSAVEOPT) | F(XSAVEC) | F(XGETBV1) | F(XSAVES) | f_xfd
);
kvm_cpu_cap_init_scattered(CPUID_12_EAX,
@@ -523,7 +616,7 @@ void kvm_set_cpu_caps(void)
F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) |
F(3DNOWPREFETCH) | F(OSVW) | 0 /* IBS */ | F(XOP) |
0 /* SKINIT, WDT, LWP */ | F(FMA4) | F(TBM) |
- F(TOPOEXT) | F(PERFCTR_CORE)
+ F(TOPOEXT) | 0 /* PERFCTR_CORE */
);
kvm_cpu_cap_mask(CPUID_8000_0001_EDX,
@@ -637,6 +730,8 @@ static struct kvm_cpuid_entry2 *do_host_cpuid(struct kvm_cpuid_array *array,
case 0x14:
case 0x17:
case 0x18:
+ case 0x1d:
+ case 0x1e:
case 0x1f:
case 0x8000001d:
entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
@@ -770,10 +865,10 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
perf_get_x86_pmu_capability(&cap);
/*
- * Only support guest architectural pmu on a host
- * with architectural pmu.
+ * The guest architecture pmu is only supported if the architecture
+ * pmu exists on the host and the module parameters allow it.
*/
- if (!cap.version)
+ if (!cap.version || !enable_pmu)
memset(&cap, 0, sizeof(cap));
eax.split.version_id = min(cap.version, 2);
@@ -811,12 +906,15 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
goto out;
}
break;
- case 0xd:
- entry->eax &= supported_xcr0;
- entry->ebx = xstate_required_size(supported_xcr0, false);
+ case 0xd: {
+ u64 permitted_xcr0 = supported_xcr0 & xstate_get_guest_group_perm();
+ u64 permitted_xss = supported_xss;
+
+ entry->eax &= permitted_xcr0;
+ entry->ebx = xstate_required_size(permitted_xcr0, false);
entry->ecx = entry->ebx;
- entry->edx &= supported_xcr0 >> 32;
- if (!supported_xcr0)
+ entry->edx &= permitted_xcr0 >> 32;
+ if (!permitted_xcr0)
break;
entry = do_host_cpuid(array, function, 1);
@@ -825,20 +923,20 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
cpuid_entry_override(entry, CPUID_D_1_EAX);
if (entry->eax & (F(XSAVES)|F(XSAVEC)))
- entry->ebx = xstate_required_size(supported_xcr0 | supported_xss,
+ entry->ebx = xstate_required_size(permitted_xcr0 | permitted_xss,
true);
else {
- WARN_ON_ONCE(supported_xss != 0);
+ WARN_ON_ONCE(permitted_xss != 0);
entry->ebx = 0;
}
- entry->ecx &= supported_xss;
- entry->edx &= supported_xss >> 32;
+ entry->ecx &= permitted_xss;
+ entry->edx &= permitted_xss >> 32;
for (i = 2; i < 64; ++i) {
bool s_state;
- if (supported_xcr0 & BIT_ULL(i))
+ if (permitted_xcr0 & BIT_ULL(i))
s_state = false;
- else if (supported_xss & BIT_ULL(i))
+ else if (permitted_xss & BIT_ULL(i))
s_state = true;
else
continue;
@@ -852,16 +950,20 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
* invalid sub-leafs. Only valid sub-leafs should
* reach this point, and they should have a non-zero
* save state size. Furthermore, check whether the
- * processor agrees with supported_xcr0/supported_xss
+ * processor agrees with permitted_xcr0/permitted_xss
* on whether this is an XCR0- or IA32_XSS-managed area.
*/
if (WARN_ON_ONCE(!entry->eax || (entry->ecx & 0x1) != s_state)) {
--array->nent;
continue;
}
+
+ if (!kvm_cpu_cap_has(X86_FEATURE_XFD))
+ entry->ecx &= ~BIT_ULL(2);
entry->edx = 0;
}
break;
+ }
case 0x12:
/* Intel SGX */
if (!kvm_cpu_cap_has(X86_FEATURE_SGX)) {
@@ -906,6 +1008,24 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
goto out;
}
break;
+ /* Intel AMX TILE */
+ case 0x1d:
+ if (!kvm_cpu_cap_has(X86_FEATURE_AMX_TILE)) {
+ entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
+ break;
+ }
+
+ for (i = 1, max_idx = entry->eax; i <= max_idx; ++i) {
+ if (!do_host_cpuid(array, function, i))
+ goto out;
+ }
+ break;
+ case 0x1e: /* TMUL information */
+ if (!kvm_cpu_cap_has(X86_FEATURE_AMX_TILE)) {
+ entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
+ break;
+ }
+ break;
case KVM_CPUID_SIGNATURE: {
const u32 *sigptr = (const u32 *)KVM_SIGNATURE;
entry->eax = KVM_CPUID_FEATURES;
diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
index c99edfff7f82..8a770b481d9d 100644
--- a/arch/x86/kvm/cpuid.h
+++ b/arch/x86/kvm/cpuid.h
@@ -30,6 +30,8 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx,
u32 *ecx, u32 *edx, bool exact_only);
+u32 xstate_required_size(u64 xstate_bv, bool compacted);
+
int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu);
u64 kvm_vcpu_reserved_gpa_bits_raw(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/kvm/debugfs.c b/arch/x86/kvm/debugfs.c
index f33c804a922a..9240b3b7f8dd 100644
--- a/arch/x86/kvm/debugfs.c
+++ b/arch/x86/kvm/debugfs.c
@@ -110,9 +110,10 @@ static int kvm_mmu_rmaps_stat_show(struct seq_file *m, void *v)
write_lock(&kvm->mmu_lock);
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
+ int bkt;
+
slots = __kvm_memslots(kvm, i);
- for (j = 0; j < slots->used_slots; j++) {
- slot = &slots->memslots[j];
+ kvm_for_each_memslot(slot, bkt, slots)
for (k = 0; k < KVM_NR_PAGE_SIZES; k++) {
rmap = slot->arch.rmap[k];
lpage_size = kvm_mmu_slot_lpages(slot, k + 1);
@@ -124,7 +125,6 @@ static int kvm_mmu_rmaps_stat_show(struct seq_file *m, void *v)
cur[index]++;
}
}
- }
}
write_unlock(&kvm->mmu_lock);
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 28b1a4e57827..5719d8cfdbd9 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -175,6 +175,7 @@
#define No16 ((u64)1 << 53) /* No 16 bit operand */
#define IncSP ((u64)1 << 54) /* SP is incremented before ModRM calc */
#define TwoMemOp ((u64)1 << 55) /* Instruction has two memory operand */
+#define IsBranch ((u64)1 << 56) /* Instruction is considered a branch. */
#define DstXacc (DstAccLo | SrcAccHi | SrcWrite)
@@ -191,8 +192,9 @@
#define FASTOP_SIZE 8
struct opcode {
- u64 flags : 56;
- u64 intercept : 8;
+ u64 flags;
+ u8 intercept;
+ u8 pad[7];
union {
int (*execute)(struct x86_emulate_ctxt *ctxt);
const struct opcode *group;
@@ -315,7 +317,7 @@ static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop);
__FOP_FUNC(#name)
#define __FOP_RET(name) \
- "ret \n\t" \
+ "11: " ASM_RET \
".size " name ", .-" name "\n\t"
#define FOP_RET(name) \
@@ -344,7 +346,7 @@ static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop);
__FOP_RET(#op "_" #dst)
#define FOP1EEX(op, dst) \
- FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception)
+ FOP1E(op, dst) _ASM_EXTABLE_TYPE_REG(10b, 11b, EX_TYPE_ZERO_REG, %%esi)
#define FASTOP1(op) \
FOP_START(op) \
@@ -434,10 +436,6 @@ static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop);
#op " %al \n\t" \
__FOP_RET(#op)
-asm(".pushsection .fixup, \"ax\"\n"
- "kvm_fastop_exception: xor %esi, %esi; ret\n"
- ".popsection");
-
FOP_START(setcc)
FOP_SETCC(seto)
FOP_SETCC(setno)
@@ -473,12 +471,8 @@ FOP_END;
\
asm volatile("1:" insn "\n" \
"2:\n" \
- ".pushsection .fixup, \"ax\"\n" \
- "3: movl $1, %[_fault]\n" \
- " jmp 2b\n" \
- ".popsection\n" \
- _ASM_EXTABLE(1b, 3b) \
- : [_fault] "+qm"(_fault) inoutclob ); \
+ _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_ONE_REG, %[_fault]) \
+ : [_fault] "+r"(_fault) inoutclob ); \
\
_fault ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE; \
})
@@ -4364,10 +4358,10 @@ static const struct opcode group4[] = {
static const struct opcode group5[] = {
F(DstMem | SrcNone | Lock, em_inc),
F(DstMem | SrcNone | Lock, em_dec),
- I(SrcMem | NearBranch, em_call_near_abs),
- I(SrcMemFAddr | ImplicitOps, em_call_far),
- I(SrcMem | NearBranch, em_jmp_abs),
- I(SrcMemFAddr | ImplicitOps, em_jmp_far),
+ I(SrcMem | NearBranch | IsBranch, em_call_near_abs),
+ I(SrcMemFAddr | ImplicitOps | IsBranch, em_call_far),
+ I(SrcMem | NearBranch | IsBranch, em_jmp_abs),
+ I(SrcMemFAddr | ImplicitOps | IsBranch, em_jmp_far),
I(SrcMem | Stack | TwoMemOp, em_push), D(Undefined),
};
@@ -4577,7 +4571,7 @@ static const struct opcode opcode_table[256] = {
I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in), /* insb, insw/insd */
I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */
/* 0x70 - 0x7F */
- X16(D(SrcImmByte | NearBranch)),
+ X16(D(SrcImmByte | NearBranch | IsBranch)),
/* 0x80 - 0x87 */
G(ByteOp | DstMem | SrcImm, group1),
G(DstMem | SrcImm, group1),
@@ -4596,7 +4590,7 @@ static const struct opcode opcode_table[256] = {
DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
/* 0x98 - 0x9F */
D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
- I(SrcImmFAddr | No64, em_call_far), N,
+ I(SrcImmFAddr | No64 | IsBranch, em_call_far), N,
II(ImplicitOps | Stack, em_pushf, pushf),
II(ImplicitOps | Stack, em_popf, popf),
I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf),
@@ -4616,17 +4610,19 @@ static const struct opcode opcode_table[256] = {
X8(I(DstReg | SrcImm64 | Mov, em_mov)),
/* 0xC0 - 0xC7 */
G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2),
- I(ImplicitOps | NearBranch | SrcImmU16, em_ret_near_imm),
- I(ImplicitOps | NearBranch, em_ret),
+ I(ImplicitOps | NearBranch | SrcImmU16 | IsBranch, em_ret_near_imm),
+ I(ImplicitOps | NearBranch | IsBranch, em_ret),
I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
G(ByteOp, group11), G(0, group11),
/* 0xC8 - 0xCF */
- I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave),
- I(ImplicitOps | SrcImmU16, em_ret_far_imm),
- I(ImplicitOps, em_ret_far),
- D(ImplicitOps), DI(SrcImmByte, intn),
- D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
+ I(Stack | SrcImmU16 | Src2ImmByte | IsBranch, em_enter),
+ I(Stack | IsBranch, em_leave),
+ I(ImplicitOps | SrcImmU16 | IsBranch, em_ret_far_imm),
+ I(ImplicitOps | IsBranch, em_ret_far),
+ D(ImplicitOps | IsBranch), DI(SrcImmByte | IsBranch, intn),
+ D(ImplicitOps | No64 | IsBranch),
+ II(ImplicitOps | IsBranch, em_iret, iret),
/* 0xD0 - 0xD7 */
G(Src2One | ByteOp, group2), G(Src2One, group2),
G(Src2CL | ByteOp, group2), G(Src2CL, group2),
@@ -4637,14 +4633,15 @@ static const struct opcode opcode_table[256] = {
/* 0xD8 - 0xDF */
N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
/* 0xE0 - 0xE7 */
- X3(I(SrcImmByte | NearBranch, em_loop)),
- I(SrcImmByte | NearBranch, em_jcxz),
+ X3(I(SrcImmByte | NearBranch | IsBranch, em_loop)),
+ I(SrcImmByte | NearBranch | IsBranch, em_jcxz),
I2bvIP(SrcImmUByte | DstAcc, em_in, in, check_perm_in),
I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
/* 0xE8 - 0xEF */
- I(SrcImm | NearBranch, em_call), D(SrcImm | ImplicitOps | NearBranch),
- I(SrcImmFAddr | No64, em_jmp_far),
- D(SrcImmByte | ImplicitOps | NearBranch),
+ I(SrcImm | NearBranch | IsBranch, em_call),
+ D(SrcImm | ImplicitOps | NearBranch | IsBranch),
+ I(SrcImmFAddr | No64 | IsBranch, em_jmp_far),
+ D(SrcImmByte | ImplicitOps | NearBranch | IsBranch),
I2bvIP(SrcDX | DstAcc, em_in, in, check_perm_in),
I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
/* 0xF0 - 0xF7 */
@@ -4660,7 +4657,7 @@ static const struct opcode opcode_table[256] = {
static const struct opcode twobyte_table[256] = {
/* 0x00 - 0x0F */
G(0, group6), GD(0, &group7), N, N,
- N, I(ImplicitOps | EmulateOnUD, em_syscall),
+ N, I(ImplicitOps | EmulateOnUD | IsBranch, em_syscall),
II(ImplicitOps | Priv, em_clts, clts), N,
DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
@@ -4691,8 +4688,8 @@ static const struct opcode twobyte_table[256] = {
IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
II(ImplicitOps | Priv, em_rdmsr, rdmsr),
IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
- I(ImplicitOps | EmulateOnUD, em_sysenter),
- I(ImplicitOps | Priv | EmulateOnUD, em_sysexit),
+ I(ImplicitOps | EmulateOnUD | IsBranch, em_sysenter),
+ I(ImplicitOps | Priv | EmulateOnUD | IsBranch, em_sysexit),
N, N,
N, N, N, N, N, N, N, N,
/* 0x40 - 0x4F */
@@ -4710,7 +4707,7 @@ static const struct opcode twobyte_table[256] = {
N, N, N, N,
N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
/* 0x80 - 0x8F */
- X16(D(SrcImm | NearBranch)),
+ X16(D(SrcImm | NearBranch | IsBranch)),
/* 0x90 - 0x9F */
X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
/* 0xA0 - 0xA7 */
@@ -5224,6 +5221,8 @@ done_prefixes:
ctxt->d |= opcode.flags;
}
+ ctxt->is_branch = opcode.flags & IsBranch;
+
/* Unrecognised? */
if (ctxt->d == 0)
return EMULATION_FAILED;
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index 8d8c1cc7cb53..6e38a7d22e97 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -164,7 +164,7 @@ static int synic_set_sint(struct kvm_vcpu_hv_synic *synic, int sint,
static struct kvm_vcpu *get_vcpu_by_vpidx(struct kvm *kvm, u32 vpidx)
{
struct kvm_vcpu *vcpu = NULL;
- int i;
+ unsigned long i;
if (vpidx >= KVM_MAX_VCPUS)
return NULL;
@@ -1716,7 +1716,8 @@ static __always_inline unsigned long *sparse_set_to_vcpu_mask(
{
struct kvm_hv *hv = to_kvm_hv(kvm);
struct kvm_vcpu *vcpu;
- int i, bank, sbank = 0;
+ int bank, sbank = 0;
+ unsigned long i;
memset(vp_bitmap, 0,
KVM_HV_MAX_SPARSE_VCPU_SET_BITS * sizeof(*vp_bitmap));
@@ -1863,7 +1864,7 @@ static void kvm_send_ipi_to_many(struct kvm *kvm, u32 vector,
.vector = vector
};
struct kvm_vcpu *vcpu;
- int i;
+ unsigned long i;
kvm_for_each_vcpu(i, vcpu, kvm) {
if (vcpu_bitmap && !test_bit(i, vcpu_bitmap))
@@ -2519,6 +2520,8 @@ int kvm_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
case HYPERV_CPUID_NESTED_FEATURES:
ent->eax = evmcs_ver;
+ if (evmcs_ver)
+ ent->eax |= HV_X64_NESTED_MSR_BITMAP;
break;
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
index 5a69cce4d72d..0b65a764ed3a 100644
--- a/arch/x86/kvm/i8254.c
+++ b/arch/x86/kvm/i8254.c
@@ -242,7 +242,7 @@ static void pit_do_work(struct kthread_work *work)
struct kvm_pit *pit = container_of(work, struct kvm_pit, expired);
struct kvm *kvm = pit->kvm;
struct kvm_vcpu *vcpu;
- int i;
+ unsigned long i;
struct kvm_kpit_state *ps = &pit->pit_state;
if (atomic_read(&ps->reinject) && !atomic_xchg(&ps->irq_ack, 0))
diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c
index 0b80263d46d8..814064d06016 100644
--- a/arch/x86/kvm/i8259.c
+++ b/arch/x86/kvm/i8259.c
@@ -50,7 +50,7 @@ static void pic_unlock(struct kvm_pic *s)
{
bool wakeup = s->wakeup_needed;
struct kvm_vcpu *vcpu;
- int i;
+ unsigned long i;
s->wakeup_needed = false;
@@ -270,7 +270,8 @@ int kvm_pic_read_irq(struct kvm *kvm)
static void kvm_pic_reset(struct kvm_kpic_state *s)
{
- int irq, i;
+ int irq;
+ unsigned long i;
struct kvm_vcpu *vcpu;
u8 edge_irr = s->irr & ~s->elcr;
bool found = false;
diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c
index 816a82515dcd..decfa36b7891 100644
--- a/arch/x86/kvm/ioapic.c
+++ b/arch/x86/kvm/ioapic.c
@@ -149,7 +149,7 @@ void kvm_rtc_eoi_tracking_restore_one(struct kvm_vcpu *vcpu)
static void kvm_rtc_eoi_tracking_restore_all(struct kvm_ioapic *ioapic)
{
struct kvm_vcpu *vcpu;
- int i;
+ unsigned long i;
if (RTC_GSI >= IOAPIC_NUM_PINS)
return;
@@ -184,7 +184,7 @@ static bool rtc_irq_check_coalesced(struct kvm_ioapic *ioapic)
static void ioapic_lazy_update_eoi(struct kvm_ioapic *ioapic, int irq)
{
- int i;
+ unsigned long i;
struct kvm_vcpu *vcpu;
union kvm_ioapic_redirect_entry *entry = &ioapic->redirtbl[irq];
diff --git a/arch/x86/kvm/irq_comm.c b/arch/x86/kvm/irq_comm.c
index d5b72a08e566..6e0dab04320e 100644
--- a/arch/x86/kvm/irq_comm.c
+++ b/arch/x86/kvm/irq_comm.c
@@ -24,6 +24,7 @@
#include "hyperv.h"
#include "x86.h"
+#include "xen.h"
static int kvm_set_pic_irq(struct kvm_kernel_irq_routing_entry *e,
struct kvm *kvm, int irq_source_id, int level,
@@ -45,9 +46,9 @@ static int kvm_set_ioapic_irq(struct kvm_kernel_irq_routing_entry *e,
int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src,
struct kvm_lapic_irq *irq, struct dest_map *dest_map)
{
- int i, r = -1;
+ int r = -1;
struct kvm_vcpu *vcpu, *lowest = NULL;
- unsigned long dest_vcpu_bitmap[BITS_TO_LONGS(KVM_MAX_VCPUS)];
+ unsigned long i, dest_vcpu_bitmap[BITS_TO_LONGS(KVM_MAX_VCPUS)];
unsigned int dest_vcpus = 0;
if (kvm_irq_delivery_to_apic_fast(kvm, src, irq, &r, dest_map))
@@ -175,6 +176,13 @@ int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e,
return r;
break;
+#ifdef CONFIG_KVM_XEN
+ case KVM_IRQ_ROUTING_XEN_EVTCHN:
+ if (!level)
+ return -1;
+
+ return kvm_xen_set_evtchn_fast(e, kvm);
+#endif
default:
break;
}
@@ -310,6 +318,10 @@ int kvm_set_routing_entry(struct kvm *kvm,
e->hv_sint.vcpu = ue->u.hv_sint.vcpu;
e->hv_sint.sint = ue->u.hv_sint.sint;
break;
+#ifdef CONFIG_KVM_XEN
+ case KVM_IRQ_ROUTING_XEN_EVTCHN:
+ return kvm_xen_setup_evtchn(kvm, e, ue);
+#endif
default:
return -EINVAL;
}
@@ -320,7 +332,8 @@ int kvm_set_routing_entry(struct kvm *kvm,
bool kvm_intr_is_single_vcpu(struct kvm *kvm, struct kvm_lapic_irq *irq,
struct kvm_vcpu **dest_vcpu)
{
- int i, r = 0;
+ int r = 0;
+ unsigned long i;
struct kvm_vcpu *vcpu;
if (kvm_intr_is_single_vcpu_fast(kvm, irq, dest_vcpu))
diff --git a/arch/x86/kvm/kvm_cache_regs.h b/arch/x86/kvm/kvm_cache_regs.h
index 90e1ffdc05b7..3febc342360c 100644
--- a/arch/x86/kvm/kvm_cache_regs.h
+++ b/arch/x86/kvm/kvm_cache_regs.h
@@ -9,6 +9,12 @@
(X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \
| X86_CR4_OSXMMEXCPT | X86_CR4_PGE | X86_CR4_TSD | X86_CR4_FSGSBASE)
+#define X86_CR0_PDPTR_BITS (X86_CR0_CD | X86_CR0_NW | X86_CR0_PG)
+#define X86_CR4_TLBFLUSH_BITS (X86_CR4_PGE | X86_CR4_PCIDE | X86_CR4_PAE | X86_CR4_SMEP)
+#define X86_CR4_PDPTR_BITS (X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_SMEP)
+
+static_assert(!(KVM_POSSIBLE_CR0_GUEST_BITS & X86_CR0_PDPTR_BITS));
+
#define BUILD_KVM_GPR_ACCESSORS(lname, uname) \
static __always_inline unsigned long kvm_##lname##_read(struct kvm_vcpu *vcpu)\
{ \
@@ -37,6 +43,13 @@ BUILD_KVM_GPR_ACCESSORS(r14, R14)
BUILD_KVM_GPR_ACCESSORS(r15, R15)
#endif
+/*
+ * avail dirty
+ * 0 0 register in VMCS/VMCB
+ * 0 1 *INVALID*
+ * 1 0 register in vcpu->arch
+ * 1 1 register in vcpu->arch, needs to be stored back
+ */
static inline bool kvm_register_is_available(struct kvm_vcpu *vcpu,
enum kvm_reg reg)
{
@@ -55,13 +68,6 @@ static inline void kvm_register_mark_available(struct kvm_vcpu *vcpu,
__set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
}
-static inline void kvm_register_clear_available(struct kvm_vcpu *vcpu,
- enum kvm_reg reg)
-{
- __clear_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
- __clear_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty);
-}
-
static inline void kvm_register_mark_dirty(struct kvm_vcpu *vcpu,
enum kvm_reg reg)
{
diff --git a/arch/x86/kvm/kvm_emulate.h b/arch/x86/kvm/kvm_emulate.h
index 68b420289d7e..39eded2426ff 100644
--- a/arch/x86/kvm/kvm_emulate.h
+++ b/arch/x86/kvm/kvm_emulate.h
@@ -369,6 +369,7 @@ struct x86_emulate_ctxt {
struct fetch_cache fetch;
struct read_cache io_read;
struct read_cache mem_read;
+ bool is_branch;
};
/* Repeat String Operation Prefix */
diff --git a/arch/x86/kvm/kvm_onhyperv.c b/arch/x86/kvm/kvm_onhyperv.c
index c7db2df50a7a..b469f45e3fe4 100644
--- a/arch/x86/kvm/kvm_onhyperv.c
+++ b/arch/x86/kvm/kvm_onhyperv.c
@@ -33,7 +33,8 @@ int hv_remote_flush_tlb_with_range(struct kvm *kvm,
{
struct kvm_arch *kvm_arch = &kvm->arch;
struct kvm_vcpu *vcpu;
- int ret = 0, i, nr_unique_valid_roots;
+ int ret = 0, nr_unique_valid_roots;
+ unsigned long i;
hpa_t root;
spin_lock(&kvm_arch->hv_root_tdp_lock);
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index f206fc35deff..9322e6340a74 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -185,7 +185,7 @@ void kvm_recalculate_apic_map(struct kvm *kvm)
{
struct kvm_apic_map *new, *old = NULL;
struct kvm_vcpu *vcpu;
- int i;
+ unsigned long i;
u32 max_id = 255; /* enough space for any xAPIC ID */
/* Read kvm->arch.apic_map_dirty before kvm->arch.apic_map. */
@@ -673,35 +673,34 @@ static inline bool pv_eoi_enabled(struct kvm_vcpu *vcpu)
return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED;
}
-static bool pv_eoi_get_pending(struct kvm_vcpu *vcpu)
-{
- u8 val;
- if (pv_eoi_get_user(vcpu, &val) < 0) {
- printk(KERN_WARNING "Can't read EOI MSR value: 0x%llx\n",
- (unsigned long long)vcpu->arch.pv_eoi.msr_val);
- return false;
- }
- return val & KVM_PV_EOI_ENABLED;
-}
-
static void pv_eoi_set_pending(struct kvm_vcpu *vcpu)
{
- if (pv_eoi_put_user(vcpu, KVM_PV_EOI_ENABLED) < 0) {
- printk(KERN_WARNING "Can't set EOI MSR value: 0x%llx\n",
- (unsigned long long)vcpu->arch.pv_eoi.msr_val);
+ if (pv_eoi_put_user(vcpu, KVM_PV_EOI_ENABLED) < 0)
return;
- }
+
__set_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention);
}
-static void pv_eoi_clr_pending(struct kvm_vcpu *vcpu)
+static bool pv_eoi_test_and_clr_pending(struct kvm_vcpu *vcpu)
{
- if (pv_eoi_put_user(vcpu, KVM_PV_EOI_DISABLED) < 0) {
- printk(KERN_WARNING "Can't clear EOI MSR value: 0x%llx\n",
- (unsigned long long)vcpu->arch.pv_eoi.msr_val);
- return;
- }
+ u8 val;
+
+ if (pv_eoi_get_user(vcpu, &val) < 0)
+ return false;
+
+ val &= KVM_PV_EOI_ENABLED;
+
+ if (val && pv_eoi_put_user(vcpu, KVM_PV_EOI_DISABLED) < 0)
+ return false;
+
+ /*
+ * Clear pending bit in any case: it will be set again on vmentry.
+ * While this might not be ideal from performance point of view,
+ * this makes sure pv eoi is only enabled when we know it's safe.
+ */
__clear_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention);
+
+ return val;
}
static int apic_has_interrupt_for_ppr(struct kvm_lapic *apic, u32 ppr)
@@ -1097,11 +1096,8 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
apic->regs + APIC_TMR);
}
- if (static_call(kvm_x86_deliver_posted_interrupt)(vcpu, vector)) {
- kvm_lapic_set_irr(vector, apic);
- kvm_make_request(KVM_REQ_EVENT, vcpu);
- kvm_vcpu_kick(vcpu);
- }
+ static_call(kvm_x86_deliver_interrupt)(apic, delivery_mode,
+ trig_mode, vector);
break;
case APIC_DM_REMRD:
@@ -1172,8 +1168,8 @@ void kvm_bitmap_or_dest_vcpus(struct kvm *kvm, struct kvm_lapic_irq *irq,
struct kvm_lapic *src = NULL;
struct kvm_apic_map *map;
struct kvm_vcpu *vcpu;
- unsigned long bitmap;
- int i, vcpu_idx;
+ unsigned long bitmap, i;
+ int vcpu_idx;
bool ret;
rcu_read_lock();
@@ -1931,7 +1927,7 @@ void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu)
/* If the preempt notifier has already run, it also called apic_timer_expired */
if (!apic->lapic_timer.hv_timer_in_use)
goto out;
- WARN_ON(rcuwait_active(&vcpu->wait));
+ WARN_ON(kvm_vcpu_is_blocking(vcpu));
apic_timer_expired(apic, false);
cancel_hv_timer(apic);
@@ -1948,7 +1944,6 @@ void kvm_lapic_switch_to_hv_timer(struct kvm_vcpu *vcpu)
{
restart_apic_timer(vcpu->arch.apic);
}
-EXPORT_SYMBOL_GPL(kvm_lapic_switch_to_hv_timer);
void kvm_lapic_switch_to_sw_timer(struct kvm_vcpu *vcpu)
{
@@ -1960,7 +1955,6 @@ void kvm_lapic_switch_to_sw_timer(struct kvm_vcpu *vcpu)
start_sw_timer(apic);
preempt_enable();
}
-EXPORT_SYMBOL_GPL(kvm_lapic_switch_to_sw_timer);
void kvm_lapic_restart_hv_timer(struct kvm_vcpu *vcpu)
{
@@ -2312,7 +2306,12 @@ void kvm_apic_update_apicv(struct kvm_vcpu *vcpu)
apic->irr_pending = true;
apic->isr_count = 1;
} else {
- apic->irr_pending = (apic_search_irr(apic) != -1);
+ /*
+ * Don't clear irr_pending, searching the IRR can race with
+ * updates from the CPU as APICv is still active from hardware's
+ * perspective. The flag will be cleared as appropriate when
+ * KVM injects the interrupt.
+ */
apic->isr_count = count_vectors(apic->regs + APIC_ISR);
}
}
@@ -2629,7 +2628,7 @@ int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
kvm_apic_set_version(vcpu);
apic_update_ppr(apic);
- hrtimer_cancel(&apic->lapic_timer.timer);
+ cancel_apic_timer(apic);
apic->lapic_timer.expired_tscdeadline = 0;
apic_update_lvtt(apic);
apic_manage_nmi_watchdog(apic, kvm_lapic_get_reg(apic, APIC_LVT0));
@@ -2677,7 +2676,6 @@ void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu)
static void apic_sync_pv_eoi_from_guest(struct kvm_vcpu *vcpu,
struct kvm_lapic *apic)
{
- bool pending;
int vector;
/*
* PV EOI state is derived from KVM_APIC_PV_EOI_PENDING in host
@@ -2691,14 +2689,8 @@ static void apic_sync_pv_eoi_from_guest(struct kvm_vcpu *vcpu,
* -> host enabled PV EOI, guest executed EOI.
*/
BUG_ON(!pv_eoi_enabled(vcpu));
- pending = pv_eoi_get_pending(vcpu);
- /*
- * Clear pending bit in any case: it will be set again on vmentry.
- * While this might not be ideal from performance point of view,
- * this makes sure pv eoi is only enabled when we know it's safe.
- */
- pv_eoi_clr_pending(vcpu);
- if (pending)
+
+ if (pv_eoi_test_and_clr_pending(vcpu))
return;
vector = apic_set_eoi(apic);
trace_kvm_pv_eoi(apic, vector);
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index 9ae6168d381e..e9fbb2c8bbe2 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -71,7 +71,8 @@ void kvm_init_mmu(struct kvm_vcpu *vcpu);
void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, unsigned long cr0,
unsigned long cr4, u64 efer, gpa_t nested_cr3);
void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
- bool accessed_dirty, gpa_t new_eptp);
+ int huge_page_level, bool accessed_dirty,
+ gpa_t new_eptp);
bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu);
int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
u64 fault_address, char *insn, int insn_len);
@@ -351,4 +352,17 @@ static inline void kvm_update_page_stats(struct kvm *kvm, int level, int count)
{
atomic64_add(count, &kvm->stat.pages[level - 1]);
}
+
+gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
+ struct x86_exception *exception);
+
+static inline gpa_t kvm_translate_gpa(struct kvm_vcpu *vcpu,
+ struct kvm_mmu *mmu,
+ gpa_t gpa, u32 access,
+ struct x86_exception *exception)
+{
+ if (mmu != &vcpu->arch.nested_mmu)
+ return gpa;
+ return translate_nested_gpa(vcpu, gpa, access, exception);
+}
#endif
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index fcdf3f8bb59a..593093b52395 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -335,12 +335,6 @@ static bool check_mmio_spte(struct kvm_vcpu *vcpu, u64 spte)
return likely(kvm_gen == spte_gen);
}
-static gpa_t translate_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
- struct x86_exception *exception)
-{
- return gpa;
-}
-
static int is_cpuid_PSE36(void)
{
return 1;
@@ -1454,7 +1448,7 @@ static bool kvm_set_pte_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
{
u64 *sptep;
struct rmap_iterator iter;
- int need_flush = 0;
+ bool need_flush = false;
u64 new_spte;
kvm_pfn_t new_pfn;
@@ -1466,7 +1460,7 @@ restart:
rmap_printk("spte %p %llx gfn %llx (%d)\n",
sptep, *sptep, gfn, level);
- need_flush = 1;
+ need_flush = true;
if (pte_write(pte)) {
pte_list_remove(kvm, rmap_head, sptep);
@@ -1482,7 +1476,7 @@ restart:
if (need_flush && kvm_available_flush_tlb_with_range()) {
kvm_flush_remote_tlbs_with_address(kvm, gfn, 1);
- return 0;
+ return false;
}
return need_flush;
@@ -1623,8 +1617,8 @@ static bool kvm_test_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
for_each_rmap_spte(rmap_head, &iter, sptep)
if (is_accessed_spte(*sptep))
- return 1;
- return 0;
+ return true;
+ return false;
}
#define RMAP_RECYCLE_THRESHOLD 1000
@@ -2086,10 +2080,8 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
role = vcpu->arch.mmu->mmu_role.base;
role.level = level;
role.direct = direct;
- if (role.direct)
- role.gpte_is_8_bytes = true;
role.access = access;
- if (!direct_mmu && vcpu->arch.mmu->root_level <= PT32_ROOT_LEVEL) {
+ if (role.has_4_byte_gpte) {
quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
role.quadrant = quadrant;
@@ -2565,10 +2557,10 @@ static int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
return r;
}
-static void kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
+static void kvm_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
{
trace_kvm_mmu_unsync_page(sp);
- ++vcpu->kvm->stat.mmu_unsync;
+ ++kvm->stat.mmu_unsync;
sp->unsync = 1;
kvm_mmu_mark_parents_unsync(sp);
@@ -2580,7 +2572,7 @@ static void kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
* were marked unsync (or if there is no shadow page), -EPERM if the SPTE must
* be write-protected.
*/
-int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot,
+int mmu_try_to_unsync_pages(struct kvm *kvm, const struct kvm_memory_slot *slot,
gfn_t gfn, bool can_unsync, bool prefetch)
{
struct kvm_mmu_page *sp;
@@ -2591,7 +2583,7 @@ int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot,
* track machinery is used to write-protect upper-level shadow pages,
* i.e. this guards the role.level == 4K assertion below!
*/
- if (kvm_slot_page_track_is_active(vcpu, slot, gfn, KVM_PAGE_TRACK_WRITE))
+ if (kvm_slot_page_track_is_active(kvm, slot, gfn, KVM_PAGE_TRACK_WRITE))
return -EPERM;
/*
@@ -2600,7 +2592,7 @@ int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot,
* that case, KVM must complete emulation of the guest TLB flush before
* allowing shadow pages to become unsync (writable by the guest).
*/
- for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) {
+ for_each_gfn_indirect_valid_sp(kvm, sp, gfn) {
if (!can_unsync)
return -EPERM;
@@ -2619,7 +2611,7 @@ int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot,
*/
if (!locked) {
locked = true;
- spin_lock(&vcpu->kvm->arch.mmu_unsync_pages_lock);
+ spin_lock(&kvm->arch.mmu_unsync_pages_lock);
/*
* Recheck after taking the spinlock, a different vCPU
@@ -2634,10 +2626,10 @@ int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot,
}
WARN_ON(sp->role.level != PG_LEVEL_4K);
- kvm_unsync_page(vcpu, sp);
+ kvm_unsync_page(kvm, sp);
}
if (locked)
- spin_unlock(&vcpu->kvm->arch.mmu_unsync_pages_lock);
+ spin_unlock(&kvm->arch.mmu_unsync_pages_lock);
/*
* We need to ensure that the marking of unsync pages is visible
@@ -3409,7 +3401,7 @@ static int mmu_first_shadow_root_alloc(struct kvm *kvm)
{
struct kvm_memslots *slots;
struct kvm_memory_slot *slot;
- int r = 0, i;
+ int r = 0, i, bkt;
/*
* Check if this is the first shadow root being allocated before
@@ -3434,7 +3426,7 @@ static int mmu_first_shadow_root_alloc(struct kvm *kvm)
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
slots = __kvm_memslots(kvm, i);
- kvm_for_each_memslot(slot, slots) {
+ kvm_for_each_memslot(slot, bkt, slots) {
/*
* Both of these functions are no-ops if the target is
* already allocated, so unconditionally calling both
@@ -3734,21 +3726,13 @@ void kvm_mmu_sync_prev_roots(struct kvm_vcpu *vcpu)
kvm_mmu_free_roots(vcpu, vcpu->arch.mmu, roots_to_free);
}
-static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gpa_t vaddr,
- u32 access, struct x86_exception *exception)
+static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
+ gpa_t vaddr, u32 access,
+ struct x86_exception *exception)
{
if (exception)
exception->error_code = 0;
- return vaddr;
-}
-
-static gpa_t nonpaging_gva_to_gpa_nested(struct kvm_vcpu *vcpu, gpa_t vaddr,
- u32 access,
- struct x86_exception *exception)
-{
- if (exception)
- exception->error_code = 0;
- return vcpu->arch.nested_mmu.translate_gpa(vcpu, vaddr, access, exception);
+ return kvm_translate_gpa(vcpu, mmu, vaddr, access, exception);
}
static bool mmio_info_in_cache(struct kvm_vcpu *vcpu, u64 addr, bool direct)
@@ -3888,7 +3872,7 @@ static bool page_fault_handle_page_track(struct kvm_vcpu *vcpu,
* guest is writing the page which is write tracked which can
* not be fixed by page fault handler.
*/
- if (kvm_slot_page_track_is_active(vcpu, fault->slot, fault->gfn, KVM_PAGE_TRACK_WRITE))
+ if (kvm_slot_page_track_is_active(vcpu->kvm, fault->slot, fault->gfn, KVM_PAGE_TRACK_WRITE))
return true;
return false;
@@ -4388,22 +4372,28 @@ static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
static void
__reset_rsvds_bits_mask_ept(struct rsvd_bits_validate *rsvd_check,
- u64 pa_bits_rsvd, bool execonly)
+ u64 pa_bits_rsvd, bool execonly, int huge_page_level)
{
u64 high_bits_rsvd = pa_bits_rsvd & rsvd_bits(0, 51);
+ u64 large_1g_rsvd = 0, large_2m_rsvd = 0;
u64 bad_mt_xwr;
+ if (huge_page_level < PG_LEVEL_1G)
+ large_1g_rsvd = rsvd_bits(7, 7);
+ if (huge_page_level < PG_LEVEL_2M)
+ large_2m_rsvd = rsvd_bits(7, 7);
+
rsvd_check->rsvd_bits_mask[0][4] = high_bits_rsvd | rsvd_bits(3, 7);
rsvd_check->rsvd_bits_mask[0][3] = high_bits_rsvd | rsvd_bits(3, 7);
- rsvd_check->rsvd_bits_mask[0][2] = high_bits_rsvd | rsvd_bits(3, 6);
- rsvd_check->rsvd_bits_mask[0][1] = high_bits_rsvd | rsvd_bits(3, 6);
+ rsvd_check->rsvd_bits_mask[0][2] = high_bits_rsvd | rsvd_bits(3, 6) | large_1g_rsvd;
+ rsvd_check->rsvd_bits_mask[0][1] = high_bits_rsvd | rsvd_bits(3, 6) | large_2m_rsvd;
rsvd_check->rsvd_bits_mask[0][0] = high_bits_rsvd;
/* large page */
rsvd_check->rsvd_bits_mask[1][4] = rsvd_check->rsvd_bits_mask[0][4];
rsvd_check->rsvd_bits_mask[1][3] = rsvd_check->rsvd_bits_mask[0][3];
- rsvd_check->rsvd_bits_mask[1][2] = high_bits_rsvd | rsvd_bits(12, 29);
- rsvd_check->rsvd_bits_mask[1][1] = high_bits_rsvd | rsvd_bits(12, 20);
+ rsvd_check->rsvd_bits_mask[1][2] = high_bits_rsvd | rsvd_bits(12, 29) | large_1g_rsvd;
+ rsvd_check->rsvd_bits_mask[1][1] = high_bits_rsvd | rsvd_bits(12, 20) | large_2m_rsvd;
rsvd_check->rsvd_bits_mask[1][0] = rsvd_check->rsvd_bits_mask[0][0];
bad_mt_xwr = 0xFFull << (2 * 8); /* bits 3..5 must not be 2 */
@@ -4419,10 +4409,11 @@ __reset_rsvds_bits_mask_ept(struct rsvd_bits_validate *rsvd_check,
}
static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu,
- struct kvm_mmu *context, bool execonly)
+ struct kvm_mmu *context, bool execonly, int huge_page_level)
{
__reset_rsvds_bits_mask_ept(&context->guest_rsvd_check,
- vcpu->arch.reserved_gpa_bits, execonly);
+ vcpu->arch.reserved_gpa_bits, execonly,
+ huge_page_level);
}
static inline u64 reserved_hpa_bits(void)
@@ -4498,7 +4489,8 @@ reset_tdp_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
false, true);
else
__reset_rsvds_bits_mask_ept(shadow_zero_check,
- reserved_hpa_bits(), false);
+ reserved_hpa_bits(), false,
+ max_huge_page_level);
if (!shadow_me_mask)
return;
@@ -4518,7 +4510,8 @@ reset_ept_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
struct kvm_mmu *context, bool execonly)
{
__reset_rsvds_bits_mask_ept(&context->shadow_zero_check,
- reserved_hpa_bits(), execonly);
+ reserved_hpa_bits(), execonly,
+ max_huge_page_level);
}
#define BYTE_MASK(access) \
@@ -4767,7 +4760,7 @@ kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu,
role.base.ad_disabled = (shadow_accessed_mask == 0);
role.base.level = kvm_mmu_get_tdp_level(vcpu);
role.base.direct = true;
- role.base.gpte_is_8_bytes = true;
+ role.base.has_4_byte_gpte = false;
return role;
}
@@ -4812,7 +4805,7 @@ kvm_calc_shadow_root_page_role_common(struct kvm_vcpu *vcpu,
role.base.smep_andnot_wp = role.ext.cr4_smep && !____is_cr0_wp(regs);
role.base.smap_andnot_wp = role.ext.cr4_smap && !____is_cr0_wp(regs);
- role.base.gpte_is_8_bytes = ____is_cr0_pg(regs) && ____is_cr4_pae(regs);
+ role.base.has_4_byte_gpte = ____is_cr0_pg(regs) && !____is_cr4_pae(regs);
return role;
}
@@ -4911,7 +4904,7 @@ kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty,
role.base.smm = vcpu->arch.root_mmu.mmu_role.base.smm;
role.base.level = level;
- role.base.gpte_is_8_bytes = true;
+ role.base.has_4_byte_gpte = false;
role.base.direct = false;
role.base.ad_disabled = !accessed_dirty;
role.base.guest_mode = true;
@@ -4926,7 +4919,8 @@ kvm_calc_shadow_ept_root_page_role(struct kvm_vcpu *vcpu, bool accessed_dirty,
}
void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
- bool accessed_dirty, gpa_t new_eptp)
+ int huge_page_level, bool accessed_dirty,
+ gpa_t new_eptp)
{
struct kvm_mmu *context = &vcpu->arch.guest_mmu;
u8 level = vmx_eptp_page_walk_level(new_eptp);
@@ -4953,7 +4947,7 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
update_permission_bitmask(context, true);
context->pkru_mask = 0;
- reset_rsvds_bits_mask_ept(vcpu, context, execonly);
+ reset_rsvds_bits_mask_ept(vcpu, context, execonly, huge_page_level);
reset_ept_shadow_zero_bits_mask(vcpu, context, execonly);
}
EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu);
@@ -5017,13 +5011,13 @@ static void init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
* the gva_to_gpa functions between mmu and nested_mmu are swapped.
*/
if (!is_paging(vcpu))
- g_context->gva_to_gpa = nonpaging_gva_to_gpa_nested;
+ g_context->gva_to_gpa = nonpaging_gva_to_gpa;
else if (is_long_mode(vcpu))
- g_context->gva_to_gpa = paging64_gva_to_gpa_nested;
+ g_context->gva_to_gpa = paging64_gva_to_gpa;
else if (is_pae(vcpu))
- g_context->gva_to_gpa = paging64_gva_to_gpa_nested;
+ g_context->gva_to_gpa = paging64_gva_to_gpa;
else
- g_context->gva_to_gpa = paging32_gva_to_gpa_nested;
+ g_context->gva_to_gpa = paging32_gva_to_gpa;
reset_guest_paging_metadata(vcpu, g_context);
}
@@ -5188,7 +5182,7 @@ static bool detect_write_misaligned(struct kvm_mmu_page *sp, gpa_t gpa,
gpa, bytes, sp->role.word);
offset = offset_in_page(gpa);
- pte_size = sp->role.gpte_is_8_bytes ? 8 : 4;
+ pte_size = sp->role.has_4_byte_gpte ? 4 : 8;
/*
* Sometimes, the OS only writes the last one bytes to update status
@@ -5212,7 +5206,7 @@ static u64 *get_written_sptes(struct kvm_mmu_page *sp, gpa_t gpa, int *nspte)
page_offset = offset_in_page(gpa);
level = sp->role.level;
*nspte = 1;
- if (!sp->role.gpte_is_8_bytes) {
+ if (sp->role.has_4_byte_gpte) {
page_offset <<= 1; /* 32->64 */
/*
* A 32-bit pde maps 4MB while the shadow pdes map
@@ -5524,10 +5518,13 @@ static int __kvm_mmu_create(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
mmu->root_hpa = INVALID_PAGE;
mmu->root_pgd = 0;
- mmu->translate_gpa = translate_gpa;
for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
mmu->prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;
+ /* vcpu->arch.guest_mmu isn't used when !tdp_enabled. */
+ if (!tdp_enabled && mmu == &vcpu->arch.guest_mmu)
+ return 0;
+
/*
* When using PAE paging, the four PDPTEs are treated as 'root' pages,
* while the PDP table is a per-vCPU construct that's allocated at MMU
@@ -5537,7 +5534,7 @@ static int __kvm_mmu_create(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
* generally doesn't use PAE paging and can skip allocating the PDP
* table. The main exception, handled here, is SVM's 32-bit NPT. The
* other exception is for shadowing L1's 32-bit or PAE NPT on 64-bit
- * KVM; that horror is handled on-demand by mmu_alloc_shadow_roots().
+ * KVM; that horror is handled on-demand by mmu_alloc_special_roots().
*/
if (tdp_enabled && kvm_mmu_get_tdp_level(vcpu) > PT32E_ROOT_LEVEL)
return 0;
@@ -5582,8 +5579,6 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu)
vcpu->arch.mmu = &vcpu->arch.root_mmu;
vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
- vcpu->arch.nested_mmu.translate_gpa = translate_nested_gpa;
-
ret = __kvm_mmu_create(vcpu, &vcpu->arch.guest_mmu);
if (ret)
return ret;
@@ -5742,6 +5737,7 @@ static bool __kvm_zap_rmaps(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
{
const struct kvm_memory_slot *memslot;
struct kvm_memslots *slots;
+ struct kvm_memslot_iter iter;
bool flush = false;
gfn_t start, end;
int i;
@@ -5751,13 +5747,16 @@ static bool __kvm_zap_rmaps(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
slots = __kvm_memslots(kvm, i);
- kvm_for_each_memslot(memslot, slots) {
+
+ kvm_for_each_memslot_in_gfn_range(&iter, slots, gfn_start, gfn_end) {
+ memslot = iter.slot;
start = max(gfn_start, memslot->base_gfn);
end = min(gfn_end, memslot->base_gfn + memslot->npages);
- if (start >= end)
+ if (WARN_ON_ONCE(start >= end))
continue;
flush = slot_handle_level_range(kvm, memslot, kvm_zap_rmapp,
+
PG_LEVEL_4K, KVM_MAX_HUGEPAGE_LEVEL,
start, end - 1, true, flush);
}
@@ -5775,6 +5774,9 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
bool flush;
int i;
+ if (WARN_ON_ONCE(gfn_end <= gfn_start))
+ return;
+
write_lock(&kvm->mmu_lock);
kvm_inc_notifier_count(kvm, gfn_start, gfn_end);
@@ -5824,15 +5826,27 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
}
/*
- * We can flush all the TLBs out of the mmu lock without TLB
- * corruption since we just change the spte from writable to
- * readonly so that we only need to care the case of changing
- * spte from present to present (changing the spte from present
- * to nonpresent will flush all the TLBs immediately), in other
- * words, the only case we care is mmu_spte_update() where we
- * have checked Host-writable | MMU-writable instead of
- * PT_WRITABLE_MASK, that means it does not depend on PT_WRITABLE_MASK
- * anymore.
+ * Flush TLBs if any SPTEs had to be write-protected to ensure that
+ * guest writes are reflected in the dirty bitmap before the memslot
+ * update completes, i.e. before enabling dirty logging is visible to
+ * userspace.
+ *
+ * Perform the TLB flush outside the mmu_lock to reduce the amount of
+ * time the lock is held. However, this does mean that another CPU can
+ * now grab mmu_lock and encounter a write-protected SPTE while CPUs
+ * still have a writable mapping for the associated GFN in their TLB.
+ *
+ * This is safe but requires KVM to be careful when making decisions
+ * based on the write-protection status of an SPTE. Specifically, KVM
+ * also write-protects SPTEs to monitor changes to guest page tables
+ * during shadow paging, and must guarantee no CPUs can write to those
+ * page before the lock is dropped. As mentioned in the previous
+ * paragraph, a write-protected SPTE is no guarantee that CPU cannot
+ * perform writes. So to determine if a TLB flush is truly required, KVM
+ * will clear a separate software-only bit (MMU-writable) and skip the
+ * flush if-and-only-if this bit was already clear.
+ *
+ * See DEFAULT_SPTE_MMU_WRITEABLE for more details.
*/
if (flush)
kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
@@ -6164,30 +6178,6 @@ out:
return ret;
}
-/*
- * Calculate mmu pages needed for kvm.
- */
-unsigned long kvm_mmu_calculate_default_mmu_pages(struct kvm *kvm)
-{
- unsigned long nr_mmu_pages;
- unsigned long nr_pages = 0;
- struct kvm_memslots *slots;
- struct kvm_memory_slot *memslot;
- int i;
-
- for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
- slots = __kvm_memslots(kvm, i);
-
- kvm_for_each_memslot(memslot, slots)
- nr_pages += memslot->npages;
- }
-
- nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
- nr_mmu_pages = max(nr_mmu_pages, KVM_MIN_ALLOC_MMU_PAGES);
-
- return nr_mmu_pages;
-}
-
void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
{
kvm_mmu_unload(vcpu);
diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h
index 52c6527b1a06..da6166b5c377 100644
--- a/arch/x86/kvm/mmu/mmu_internal.h
+++ b/arch/x86/kvm/mmu/mmu_internal.h
@@ -104,7 +104,7 @@ static inline int kvm_mmu_page_as_id(struct kvm_mmu_page *sp)
return kvm_mmu_role_as_id(sp->role);
}
-static inline bool kvm_vcpu_ad_need_write_protect(struct kvm_vcpu *vcpu)
+static inline bool kvm_mmu_page_ad_need_write_protect(struct kvm_mmu_page *sp)
{
/*
* When using the EPT page-modification log, the GPAs in the CPU dirty
@@ -112,13 +112,12 @@ static inline bool kvm_vcpu_ad_need_write_protect(struct kvm_vcpu *vcpu)
* on write protection to record dirty pages, which bypasses PML, since
* writes now result in a vmexit. Note, the check on CPU dirty logging
* being enabled is mandatory as the bits used to denote WP-only SPTEs
- * are reserved for NPT w/ PAE (32-bit KVM).
+ * are reserved for PAE paging (32-bit KVM).
*/
- return vcpu->arch.mmu == &vcpu->arch.guest_mmu &&
- kvm_x86_ops.cpu_dirty_log_size;
+ return kvm_x86_ops.cpu_dirty_log_size && sp->role.guest_mode;
}
-int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot,
+int mmu_try_to_unsync_pages(struct kvm *kvm, const struct kvm_memory_slot *slot,
gfn_t gfn, bool can_unsync, bool prefetch);
void kvm_mmu_gfn_disallow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn);
diff --git a/arch/x86/kvm/mmu/mmutrace.h b/arch/x86/kvm/mmu/mmutrace.h
index b8151bbca36a..de5e8e4e1aa7 100644
--- a/arch/x86/kvm/mmu/mmutrace.h
+++ b/arch/x86/kvm/mmu/mmutrace.h
@@ -35,7 +35,7 @@
" %snxe %sad root %u %s%c", \
__entry->mmu_valid_gen, \
__entry->gfn, role.level, \
- role.gpte_is_8_bytes ? 8 : 4, \
+ role.has_4_byte_gpte ? 4 : 8, \
role.quadrant, \
role.direct ? " direct" : "", \
access_str[role.access], \
diff --git a/arch/x86/kvm/mmu/page_track.c b/arch/x86/kvm/mmu/page_track.c
index cc4eb5b7fb76..68eb1fb548b6 100644
--- a/arch/x86/kvm/mmu/page_track.c
+++ b/arch/x86/kvm/mmu/page_track.c
@@ -173,9 +173,9 @@ EXPORT_SYMBOL_GPL(kvm_slot_page_track_remove_page);
/*
* check if the corresponding access on the specified guest page is tracked.
*/
-bool kvm_slot_page_track_is_active(struct kvm_vcpu *vcpu,
- struct kvm_memory_slot *slot, gfn_t gfn,
- enum kvm_page_track_mode mode)
+bool kvm_slot_page_track_is_active(struct kvm *kvm,
+ const struct kvm_memory_slot *slot,
+ gfn_t gfn, enum kvm_page_track_mode mode)
{
int index;
@@ -186,7 +186,7 @@ bool kvm_slot_page_track_is_active(struct kvm_vcpu *vcpu,
return false;
if (mode == KVM_PAGE_TRACK_WRITE &&
- !kvm_page_track_write_tracking_enabled(vcpu->kvm))
+ !kvm_page_track_write_tracking_enabled(kvm))
return false;
index = gfn_to_index(gfn, slot->base_gfn, PG_LEVEL_4K);
diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
index 708a5d297fe1..5b5bdac97c7b 100644
--- a/arch/x86/kvm/mmu/paging_tmpl.h
+++ b/arch/x86/kvm/mmu/paging_tmpl.h
@@ -403,9 +403,8 @@ retry_walk:
walker->table_gfn[walker->level - 1] = table_gfn;
walker->pte_gpa[walker->level - 1] = pte_gpa;
- real_gpa = mmu->translate_gpa(vcpu, gfn_to_gpa(table_gfn),
- nested_access,
- &walker->fault);
+ real_gpa = kvm_translate_gpa(vcpu, mmu, gfn_to_gpa(table_gfn),
+ nested_access, &walker->fault);
/*
* FIXME: This can happen if emulation (for of an INS/OUTS
@@ -467,7 +466,7 @@ retry_walk:
if (PTTYPE == 32 && walker->level > PG_LEVEL_4K && is_cpuid_PSE36())
gfn += pse36_gfn_delta(pte);
- real_gpa = mmu->translate_gpa(vcpu, gfn_to_gpa(gfn), access, &walker->fault);
+ real_gpa = kvm_translate_gpa(vcpu, mmu, gfn_to_gpa(gfn), access, &walker->fault);
if (real_gpa == UNMAPPED_GVA)
return 0;
@@ -547,16 +546,6 @@ static int FNAME(walk_addr)(struct guest_walker *walker,
access);
}
-#if PTTYPE != PTTYPE_EPT
-static int FNAME(walk_addr_nested)(struct guest_walker *walker,
- struct kvm_vcpu *vcpu, gva_t addr,
- u32 access)
-{
- return FNAME(walk_addr_generic)(walker, vcpu, &vcpu->arch.nested_mmu,
- addr, access);
-}
-#endif
-
static bool
FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
u64 *spte, pt_element_t gpte, bool no_dirty_log)
@@ -1000,50 +989,29 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa)
}
/* Note, @addr is a GPA when gva_to_gpa() translates an L2 GPA to an L1 GPA. */
-static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gpa_t addr, u32 access,
+static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
+ gpa_t addr, u32 access,
struct x86_exception *exception)
{
struct guest_walker walker;
gpa_t gpa = UNMAPPED_GVA;
int r;
- r = FNAME(walk_addr)(&walker, vcpu, addr, access);
-
- if (r) {
- gpa = gfn_to_gpa(walker.gfn);
- gpa |= addr & ~PAGE_MASK;
- } else if (exception)
- *exception = walker.fault;
-
- return gpa;
-}
-
-#if PTTYPE != PTTYPE_EPT
-/* Note, gva_to_gpa_nested() is only used to translate L2 GVAs. */
-static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gpa_t vaddr,
- u32 access,
- struct x86_exception *exception)
-{
- struct guest_walker walker;
- gpa_t gpa = UNMAPPED_GVA;
- int r;
-
#ifndef CONFIG_X86_64
/* A 64-bit GVA should be impossible on 32-bit KVM. */
- WARN_ON_ONCE(vaddr >> 32);
+ WARN_ON_ONCE((addr >> 32) && mmu == vcpu->arch.walk_mmu);
#endif
- r = FNAME(walk_addr_nested)(&walker, vcpu, vaddr, access);
+ r = FNAME(walk_addr_generic)(&walker, vcpu, mmu, addr, access);
if (r) {
gpa = gfn_to_gpa(walker.gfn);
- gpa |= vaddr & ~PAGE_MASK;
+ gpa |= addr & ~PAGE_MASK;
} else if (exception)
*exception = walker.fault;
return gpa;
}
-#endif
/*
* Using the cached information from sp->gfns is safe because:
diff --git a/arch/x86/kvm/mmu/spte.c b/arch/x86/kvm/mmu/spte.c
index fad546df0bba..73cfe62fdad1 100644
--- a/arch/x86/kvm/mmu/spte.c
+++ b/arch/x86/kvm/mmu/spte.c
@@ -91,7 +91,7 @@ static bool kvm_is_mmio_pfn(kvm_pfn_t pfn)
}
bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
- struct kvm_memory_slot *slot,
+ const struct kvm_memory_slot *slot,
unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn,
u64 old_spte, bool prefetch, bool can_unsync,
bool host_writable, u64 *new_spte)
@@ -102,7 +102,7 @@ bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
if (sp->role.ad_disabled)
spte |= SPTE_TDP_AD_DISABLED_MASK;
- else if (kvm_vcpu_ad_need_write_protect(vcpu))
+ else if (kvm_mmu_page_ad_need_write_protect(sp))
spte |= SPTE_TDP_AD_WRPROT_ONLY_MASK;
/*
@@ -162,7 +162,7 @@ bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
* e.g. it's write-tracked (upper-level SPs) or has one or more
* shadow pages and unsync'ing pages is not allowed.
*/
- if (mmu_try_to_unsync_pages(vcpu, slot, gfn, can_unsync, prefetch)) {
+ if (mmu_try_to_unsync_pages(vcpu->kvm, slot, gfn, can_unsync, prefetch)) {
pgprintk("%s: found shadow page for %llx, marking ro\n",
__func__, gfn);
wrprot = true;
@@ -216,6 +216,7 @@ u64 kvm_mmu_changed_pte_notifier_make_spte(u64 old_spte, kvm_pfn_t new_pfn)
new_spte &= ~PT_WRITABLE_MASK;
new_spte &= ~shadow_host_writable_mask;
+ new_spte &= ~shadow_mmu_writable_mask;
new_spte = mark_spte_for_access_track(new_spte);
diff --git a/arch/x86/kvm/mmu/spte.h b/arch/x86/kvm/mmu/spte.h
index cc432f9a966b..be6a007a4af3 100644
--- a/arch/x86/kvm/mmu/spte.h
+++ b/arch/x86/kvm/mmu/spte.h
@@ -60,10 +60,6 @@ static_assert(SPTE_TDP_AD_ENABLED_MASK == 0);
(((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1))
#define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
-/* Bits 9 and 10 are ignored by all non-EPT PTEs. */
-#define DEFAULT_SPTE_HOST_WRITEABLE BIT_ULL(9)
-#define DEFAULT_SPTE_MMU_WRITEABLE BIT_ULL(10)
-
/*
* The mask/shift to use for saving the original R/X bits when marking the PTE
* as not-present for access tracking purposes. We do not save the W bit as the
@@ -79,6 +75,35 @@ static_assert(SPTE_TDP_AD_ENABLED_MASK == 0);
static_assert(!(SPTE_TDP_AD_MASK & SHADOW_ACC_TRACK_SAVED_MASK));
/*
+ * *_SPTE_HOST_WRITEABLE (aka Host-writable) indicates whether the host permits
+ * writes to the guest page mapped by the SPTE. This bit is cleared on SPTEs
+ * that map guest pages in read-only memslots and read-only VMAs.
+ *
+ * Invariants:
+ * - If Host-writable is clear, PT_WRITABLE_MASK must be clear.
+ *
+ *
+ * *_SPTE_MMU_WRITEABLE (aka MMU-writable) indicates whether the shadow MMU
+ * allows writes to the guest page mapped by the SPTE. This bit is cleared when
+ * the guest page mapped by the SPTE contains a page table that is being
+ * monitored for shadow paging. In this case the SPTE can only be made writable
+ * by unsyncing the shadow page under the mmu_lock.
+ *
+ * Invariants:
+ * - If MMU-writable is clear, PT_WRITABLE_MASK must be clear.
+ * - If MMU-writable is set, Host-writable must be set.
+ *
+ * If MMU-writable is set, PT_WRITABLE_MASK is normally set but can be cleared
+ * to track writes for dirty logging. For such SPTEs, KVM will locklessly set
+ * PT_WRITABLE_MASK upon the next write from the guest and record the write in
+ * the dirty log (see fast_page_fault()).
+ */
+
+/* Bits 9 and 10 are ignored by all non-EPT PTEs. */
+#define DEFAULT_SPTE_HOST_WRITEABLE BIT_ULL(9)
+#define DEFAULT_SPTE_MMU_WRITEABLE BIT_ULL(10)
+
+/*
* Low ignored bits are at a premium for EPT, use high ignored bits, taking care
* to not overlap the A/D type mask or the saved access bits of access-tracked
* SPTEs when A/D bits are disabled.
@@ -316,8 +341,13 @@ static __always_inline bool is_rsvd_spte(struct rsvd_bits_validate *rsvd_check,
static inline bool spte_can_locklessly_be_made_writable(u64 spte)
{
- return (spte & shadow_host_writable_mask) &&
- (spte & shadow_mmu_writable_mask);
+ if (spte & shadow_mmu_writable_mask) {
+ WARN_ON_ONCE(!(spte & shadow_host_writable_mask));
+ return true;
+ }
+
+ WARN_ON_ONCE(spte & PT_WRITABLE_MASK);
+ return false;
}
static inline u64 get_mmio_spte_generation(u64 spte)
@@ -330,7 +360,7 @@ static inline u64 get_mmio_spte_generation(u64 spte)
}
bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
- struct kvm_memory_slot *slot,
+ const struct kvm_memory_slot *slot,
unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn,
u64 old_spte, bool prefetch, bool can_unsync,
bool host_writable, u64 *new_spte);
diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index 1beb4ca90560..bc9e3553fba2 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -165,7 +165,7 @@ static union kvm_mmu_page_role page_role_for_level(struct kvm_vcpu *vcpu,
role = vcpu->arch.mmu->mmu_role.base;
role.level = level;
role.direct = true;
- role.gpte_is_8_bytes = true;
+ role.has_4_byte_gpte = false;
role.access = ACC_ALL;
role.ad_disabled = !shadow_accessed_mask;
@@ -1442,12 +1442,12 @@ static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root,
!is_last_spte(iter.old_spte, iter.level))
continue;
- if (!is_writable_pte(iter.old_spte))
- break;
-
new_spte = iter.old_spte &
~(PT_WRITABLE_MASK | shadow_mmu_writable_mask);
+ if (new_spte == iter.old_spte)
+ break;
+
tdp_mmu_set_spte(kvm, &iter, new_spte);
spte_set = true;
}
diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
index 09873f6488f7..b1a02993782b 100644
--- a/arch/x86/kvm/pmu.c
+++ b/arch/x86/kvm/pmu.c
@@ -13,6 +13,8 @@
#include <linux/types.h>
#include <linux/kvm_host.h>
#include <linux/perf_event.h>
+#include <linux/bsearch.h>
+#include <linux/sort.h>
#include <asm/perf_event.h>
#include "x86.h"
#include "cpuid.h"
@@ -55,47 +57,45 @@ static void kvm_pmi_trigger_fn(struct irq_work *irq_work)
kvm_pmu_deliver_pmi(vcpu);
}
-static void kvm_perf_overflow(struct perf_event *perf_event,
- struct perf_sample_data *data,
- struct pt_regs *regs)
+static inline void __kvm_perf_overflow(struct kvm_pmc *pmc, bool in_pmi)
{
- struct kvm_pmc *pmc = perf_event->overflow_handler_context;
struct kvm_pmu *pmu = pmc_to_pmu(pmc);
- if (!test_and_set_bit(pmc->idx, pmu->reprogram_pmi)) {
- __set_bit(pmc->idx, (unsigned long *)&pmu->global_status);
- kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
- }
+ /* Ignore counters that have been reprogrammed already. */
+ if (test_and_set_bit(pmc->idx, pmu->reprogram_pmi))
+ return;
+
+ __set_bit(pmc->idx, (unsigned long *)&pmu->global_status);
+ kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
+
+ if (!pmc->intr)
+ return;
+
+ /*
+ * Inject PMI. If vcpu was in a guest mode during NMI PMI
+ * can be ejected on a guest mode re-entry. Otherwise we can't
+ * be sure that vcpu wasn't executing hlt instruction at the
+ * time of vmexit and is not going to re-enter guest mode until
+ * woken up. So we should wake it, but this is impossible from
+ * NMI context. Do it from irq work instead.
+ */
+ if (in_pmi && !kvm_handling_nmi_from_guest(pmc->vcpu))
+ irq_work_queue(&pmc_to_pmu(pmc)->irq_work);
+ else
+ kvm_make_request(KVM_REQ_PMI, pmc->vcpu);
}
-static void kvm_perf_overflow_intr(struct perf_event *perf_event,
- struct perf_sample_data *data,
- struct pt_regs *regs)
+static void kvm_perf_overflow(struct perf_event *perf_event,
+ struct perf_sample_data *data,
+ struct pt_regs *regs)
{
struct kvm_pmc *pmc = perf_event->overflow_handler_context;
- struct kvm_pmu *pmu = pmc_to_pmu(pmc);
- if (!test_and_set_bit(pmc->idx, pmu->reprogram_pmi)) {
- __set_bit(pmc->idx, (unsigned long *)&pmu->global_status);
- kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
-
- /*
- * Inject PMI. If vcpu was in a guest mode during NMI PMI
- * can be ejected on a guest mode re-entry. Otherwise we can't
- * be sure that vcpu wasn't executing hlt instruction at the
- * time of vmexit and is not going to re-enter guest mode until
- * woken up. So we should wake it, but this is impossible from
- * NMI context. Do it from irq work instead.
- */
- if (!kvm_is_in_guest())
- irq_work_queue(&pmc_to_pmu(pmc)->irq_work);
- else
- kvm_make_request(KVM_REQ_PMI, pmc->vcpu);
- }
+ __kvm_perf_overflow(pmc, true);
}
static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type,
- unsigned config, bool exclude_user,
+ u64 config, bool exclude_user,
bool exclude_kernel, bool intr,
bool in_tx, bool in_tx_cp)
{
@@ -111,6 +111,9 @@ static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type,
.config = config,
};
+ if (type == PERF_TYPE_HARDWARE && config >= PERF_COUNT_HW_MAX)
+ return;
+
attr.sample_period = get_sample_period(pmc, pmc->counter);
if (in_tx)
@@ -126,7 +129,6 @@ static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type,
}
event = perf_event_create_kernel_counter(&attr, -1, current,
- intr ? kvm_perf_overflow_intr :
kvm_perf_overflow, pmc);
if (IS_ERR(event)) {
pr_debug_ratelimited("kvm_pmu: event creation failed %ld for pmc->idx = %d\n",
@@ -138,6 +140,7 @@ static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type,
pmc_to_pmu(pmc)->event_count++;
clear_bit(pmc->idx, pmc_to_pmu(pmc)->reprogram_pmi);
pmc->is_paused = false;
+ pmc->intr = intr;
}
static void pmc_pause_counter(struct kvm_pmc *pmc)
@@ -171,13 +174,17 @@ static bool pmc_resume_counter(struct kvm_pmc *pmc)
return true;
}
+static int cmp_u64(const void *a, const void *b)
+{
+ return *(__u64 *)a - *(__u64 *)b;
+}
+
void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
{
- unsigned config, type = PERF_TYPE_RAW;
- u8 event_select, unit_mask;
+ u64 config;
+ u32 type = PERF_TYPE_RAW;
struct kvm *kvm = pmc->vcpu->kvm;
struct kvm_pmu_event_filter *filter;
- int i;
bool allow_event = true;
if (eventsel & ARCH_PERFMON_EVENTSEL_PIN_CONTROL)
@@ -192,37 +199,29 @@ void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
filter = srcu_dereference(kvm->arch.pmu_event_filter, &kvm->srcu);
if (filter) {
- for (i = 0; i < filter->nevents; i++)
- if (filter->events[i] ==
- (eventsel & AMD64_RAW_EVENT_MASK_NB))
- break;
- if (filter->action == KVM_PMU_EVENT_ALLOW &&
- i == filter->nevents)
- allow_event = false;
- if (filter->action == KVM_PMU_EVENT_DENY &&
- i < filter->nevents)
- allow_event = false;
+ __u64 key = eventsel & AMD64_RAW_EVENT_MASK_NB;
+
+ if (bsearch(&key, filter->events, filter->nevents,
+ sizeof(__u64), cmp_u64))
+ allow_event = filter->action == KVM_PMU_EVENT_ALLOW;
+ else
+ allow_event = filter->action == KVM_PMU_EVENT_DENY;
}
if (!allow_event)
return;
- event_select = eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
- unit_mask = (eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
-
if (!(eventsel & (ARCH_PERFMON_EVENTSEL_EDGE |
ARCH_PERFMON_EVENTSEL_INV |
ARCH_PERFMON_EVENTSEL_CMASK |
HSW_IN_TX |
HSW_IN_TX_CHECKPOINTED))) {
- config = kvm_x86_ops.pmu_ops->find_arch_event(pmc_to_pmu(pmc),
- event_select,
- unit_mask);
+ config = kvm_x86_ops.pmu_ops->pmc_perf_hw_id(pmc);
if (config != PERF_COUNT_HW_MAX)
type = PERF_TYPE_HARDWARE;
}
if (type == PERF_TYPE_RAW)
- config = eventsel & X86_RAW_EVENT_MASK;
+ config = eventsel & AMD64_RAW_EVENT_MASK;
if (pmc->current_config == eventsel && pmc_resume_counter(pmc))
return;
@@ -268,7 +267,7 @@ void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int idx)
pmc->current_config = (u64)ctrl;
pmc_reprogram_counter(pmc, PERF_TYPE_HARDWARE,
- kvm_x86_ops.pmu_ops->find_fixed_event(idx),
+ kvm_x86_ops.pmu_ops->pmc_perf_hw_id(pmc),
!(en_field & 0x2), /* exclude user */
!(en_field & 0x1), /* exclude kernel */
pmi, false, false);
@@ -490,6 +489,66 @@ void kvm_pmu_destroy(struct kvm_vcpu *vcpu)
kvm_pmu_reset(vcpu);
}
+static void kvm_pmu_incr_counter(struct kvm_pmc *pmc)
+{
+ struct kvm_pmu *pmu = pmc_to_pmu(pmc);
+ u64 prev_count;
+
+ prev_count = pmc->counter;
+ pmc->counter = (pmc->counter + 1) & pmc_bitmask(pmc);
+
+ reprogram_counter(pmu, pmc->idx);
+ if (pmc->counter < prev_count)
+ __kvm_perf_overflow(pmc, false);
+}
+
+static inline bool eventsel_match_perf_hw_id(struct kvm_pmc *pmc,
+ unsigned int perf_hw_id)
+{
+ u64 old_eventsel = pmc->eventsel;
+ unsigned int config;
+
+ pmc->eventsel &= (ARCH_PERFMON_EVENTSEL_EVENT | ARCH_PERFMON_EVENTSEL_UMASK);
+ config = kvm_x86_ops.pmu_ops->pmc_perf_hw_id(pmc);
+ pmc->eventsel = old_eventsel;
+ return config == perf_hw_id;
+}
+
+static inline bool cpl_is_matched(struct kvm_pmc *pmc)
+{
+ bool select_os, select_user;
+ u64 config = pmc->current_config;
+
+ if (pmc_is_gp(pmc)) {
+ select_os = config & ARCH_PERFMON_EVENTSEL_OS;
+ select_user = config & ARCH_PERFMON_EVENTSEL_USR;
+ } else {
+ select_os = config & 0x1;
+ select_user = config & 0x2;
+ }
+
+ return (static_call(kvm_x86_get_cpl)(pmc->vcpu) == 0) ? select_os : select_user;
+}
+
+void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 perf_hw_id)
+{
+ struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
+ struct kvm_pmc *pmc;
+ int i;
+
+ for_each_set_bit(i, pmu->all_valid_pmc_idx, X86_PMC_IDX_MAX) {
+ pmc = kvm_x86_ops.pmu_ops->pmc_idx_to_pmc(pmu, i);
+
+ if (!pmc || !pmc_is_enabled(pmc) || !pmc_speculative_in_use(pmc))
+ continue;
+
+ /* Ignore checks for edge detect, pin control, invert and CMASK bits */
+ if (eventsel_match_perf_hw_id(pmc, perf_hw_id) && cpl_is_matched(pmc))
+ kvm_pmu_incr_counter(pmc);
+ }
+}
+EXPORT_SYMBOL_GPL(kvm_pmu_trigger_event);
+
int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp)
{
struct kvm_pmu_event_filter tmp, *filter;
@@ -521,6 +580,11 @@ int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp)
/* Ensure nevents can't be changed between the user copies. */
*filter = tmp;
+ /*
+ * Sort the in-kernel list so that we can search it with bsearch.
+ */
+ sort(&filter->events, filter->nevents, sizeof(__u64), cmp_u64, NULL);
+
mutex_lock(&kvm->lock);
filter = rcu_replace_pointer(kvm->arch.pmu_event_filter, filter,
mutex_is_locked(&kvm->lock));
diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h
index 59d6b76203d5..7a7b8d5b775e 100644
--- a/arch/x86/kvm/pmu.h
+++ b/arch/x86/kvm/pmu.h
@@ -24,9 +24,7 @@ struct kvm_event_hw_type_mapping {
};
struct kvm_pmu_ops {
- unsigned (*find_arch_event)(struct kvm_pmu *pmu, u8 event_select,
- u8 unit_mask);
- unsigned (*find_fixed_event)(int idx);
+ unsigned int (*pmc_perf_hw_id)(struct kvm_pmc *pmc);
bool (*pmc_is_enabled)(struct kvm_pmc *pmc);
struct kvm_pmc *(*pmc_idx_to_pmc)(struct kvm_pmu *pmu, int pmc_idx);
struct kvm_pmc *(*rdpmc_ecx_to_pmc)(struct kvm_vcpu *vcpu,
@@ -159,6 +157,7 @@ void kvm_pmu_init(struct kvm_vcpu *vcpu);
void kvm_pmu_cleanup(struct kvm_vcpu *vcpu);
void kvm_pmu_destroy(struct kvm_vcpu *vcpu);
int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp);
+void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 perf_hw_id);
bool is_vmware_backdoor_pmc(u32 pmc_idx);
diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c
index 8f9af7b7dbbe..fb3e20791338 100644
--- a/arch/x86/kvm/svm/avic.c
+++ b/arch/x86/kvm/svm/avic.c
@@ -27,20 +27,6 @@
#include "irq.h"
#include "svm.h"
-#define SVM_AVIC_DOORBELL 0xc001011b
-
-#define AVIC_HPA_MASK ~((0xFFFULL << 52) | 0xFFF)
-
-/*
- * 0xff is broadcast, so the max index allowed for physical APIC ID
- * table is 0xfe. APIC IDs above 0xff are reserved.
- */
-#define AVIC_MAX_PHYSICAL_ID_COUNT 255
-
-#define AVIC_UNACCEL_ACCESS_WRITE_MASK 1
-#define AVIC_UNACCEL_ACCESS_OFFSET_MASK 0xFF0
-#define AVIC_UNACCEL_ACCESS_VECTOR_MASK 0xFFFFFFFF
-
/* AVIC GATAG is encoded using VM and VCPU IDs */
#define AVIC_VCPU_ID_BITS 8
#define AVIC_VCPU_ID_MASK ((1 << AVIC_VCPU_ID_BITS) - 1)
@@ -73,12 +59,6 @@ struct amd_svm_iommu_ir {
void *data; /* Storing pointer to struct amd_ir_data */
};
-enum avic_ipi_failure_cause {
- AVIC_IPI_FAILURE_INVALID_INT_TYPE,
- AVIC_IPI_FAILURE_TARGET_NOT_RUNNING,
- AVIC_IPI_FAILURE_INVALID_TARGET,
- AVIC_IPI_FAILURE_INVALID_BACKING_PAGE,
-};
/* Note:
* This function is called from IOMMU driver to notify
@@ -289,20 +269,44 @@ static int avic_init_backing_page(struct kvm_vcpu *vcpu)
return 0;
}
+void avic_ring_doorbell(struct kvm_vcpu *vcpu)
+{
+ /*
+ * Note, the vCPU could get migrated to a different pCPU at any point,
+ * which could result in signalling the wrong/previous pCPU. But if
+ * that happens the vCPU is guaranteed to do a VMRUN (after being
+ * migrated) and thus will process pending interrupts, i.e. a doorbell
+ * is not needed (and the spurious one is harmless).
+ */
+ int cpu = READ_ONCE(vcpu->cpu);
+
+ if (cpu != get_cpu())
+ wrmsrl(MSR_AMD64_SVM_AVIC_DOORBELL, kvm_cpu_get_apicid(cpu));
+ put_cpu();
+}
+
static void avic_kick_target_vcpus(struct kvm *kvm, struct kvm_lapic *source,
u32 icrl, u32 icrh)
{
struct kvm_vcpu *vcpu;
- int i;
+ unsigned long i;
+ /*
+ * Wake any target vCPUs that are blocking, i.e. waiting for a wake
+ * event. There's no need to signal doorbells, as hardware has handled
+ * vCPUs that were in guest at the time of the IPI, and vCPUs that have
+ * since entered the guest will have processed pending IRQs at VMRUN.
+ */
kvm_for_each_vcpu(i, vcpu, kvm) {
- bool m = kvm_apic_match_dest(vcpu, source,
- icrl & APIC_SHORT_MASK,
- GET_APIC_DEST_FIELD(icrh),
- icrl & APIC_DEST_MASK);
-
- if (m && !avic_vcpu_is_running(vcpu))
- kvm_vcpu_wake_up(vcpu);
+ if (kvm_apic_match_dest(vcpu, source, icrl & APIC_SHORT_MASK,
+ GET_APIC_DEST_FIELD(icrh),
+ icrl & APIC_DEST_MASK)) {
+ vcpu->arch.apic->irr_pending = true;
+ svm_complete_interrupt_delivery(vcpu,
+ icrl & APIC_MODE_MASK,
+ icrl & APIC_INT_LEVELTRIG,
+ icrl & APIC_VECTOR_MASK);
+ }
}
}
@@ -342,8 +346,6 @@ int avic_incomplete_ipi_interception(struct kvm_vcpu *vcpu)
avic_kick_target_vcpus(vcpu->kvm, apic, icrl, icrh);
break;
case AVIC_IPI_FAILURE_INVALID_TARGET:
- WARN_ONCE(1, "Invalid IPI target: index=%u, vcpu=%d, icr=%#0x:%#0x\n",
- index, vcpu->vcpu_id, icrh, icrl);
break;
case AVIC_IPI_FAILURE_INVALID_BACKING_PAGE:
WARN_ONCE(1, "Invalid backing page\n");
@@ -666,26 +668,6 @@ void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
return;
}
-int svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec)
-{
- if (!vcpu->arch.apicv_active)
- return -1;
-
- kvm_lapic_set_irr(vec, vcpu->arch.apic);
- smp_mb__after_atomic();
-
- if (avic_vcpu_is_running(vcpu)) {
- int cpuid = vcpu->cpu;
-
- if (cpuid != get_cpu())
- wrmsrl(SVM_AVIC_DOORBELL, kvm_cpu_get_apicid(cpuid));
- put_cpu();
- } else
- kvm_vcpu_wake_up(vcpu);
-
- return 0;
-}
-
bool svm_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu)
{
return false;
@@ -949,6 +931,8 @@ void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
int h_physical_id = kvm_cpu_get_apicid(cpu);
struct vcpu_svm *svm = to_svm(vcpu);
+ lockdep_assert_preemption_disabled();
+
/*
* Since the host physical APIC id is 8 bits,
* we can support host APIC ID upto 255.
@@ -956,19 +940,25 @@ void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
if (WARN_ON(h_physical_id > AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK))
return;
+ /*
+ * No need to update anything if the vCPU is blocking, i.e. if the vCPU
+ * is being scheduled in after being preempted. The CPU entries in the
+ * Physical APIC table and IRTE are consumed iff IsRun{ning} is '1'.
+ * If the vCPU was migrated, its new CPU value will be stuffed when the
+ * vCPU unblocks.
+ */
+ if (kvm_vcpu_is_blocking(vcpu))
+ return;
+
entry = READ_ONCE(*(svm->avic_physical_id_cache));
WARN_ON(entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK);
entry &= ~AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK;
entry |= (h_physical_id & AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK);
-
- entry &= ~AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
- if (svm->avic_is_running)
- entry |= AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
+ entry |= AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
WRITE_ONCE(*(svm->avic_physical_id_cache), entry);
- avic_update_iommu_vcpu_affinity(vcpu, h_physical_id,
- svm->avic_is_running);
+ avic_update_iommu_vcpu_affinity(vcpu, h_physical_id, true);
}
void avic_vcpu_put(struct kvm_vcpu *vcpu)
@@ -976,42 +966,56 @@ void avic_vcpu_put(struct kvm_vcpu *vcpu)
u64 entry;
struct vcpu_svm *svm = to_svm(vcpu);
+ lockdep_assert_preemption_disabled();
+
entry = READ_ONCE(*(svm->avic_physical_id_cache));
- if (entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK)
- avic_update_iommu_vcpu_affinity(vcpu, -1, 0);
+
+ /* Nothing to do if IsRunning == '0' due to vCPU blocking. */
+ if (!(entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK))
+ return;
+
+ avic_update_iommu_vcpu_affinity(vcpu, -1, 0);
entry &= ~AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
WRITE_ONCE(*(svm->avic_physical_id_cache), entry);
}
-/*
- * This function is called during VCPU halt/unhalt.
- */
-static void avic_set_running(struct kvm_vcpu *vcpu, bool is_run)
+void avic_vcpu_blocking(struct kvm_vcpu *vcpu)
{
- struct vcpu_svm *svm = to_svm(vcpu);
- int cpu = get_cpu();
+ if (!kvm_vcpu_apicv_active(vcpu))
+ return;
- WARN_ON(cpu != vcpu->cpu);
- svm->avic_is_running = is_run;
+ preempt_disable();
- if (kvm_vcpu_apicv_active(vcpu)) {
- if (is_run)
- avic_vcpu_load(vcpu, cpu);
- else
- avic_vcpu_put(vcpu);
- }
- put_cpu();
-}
+ /*
+ * Unload the AVIC when the vCPU is about to block, _before_
+ * the vCPU actually blocks.
+ *
+ * Any IRQs that arrive before IsRunning=0 will not cause an
+ * incomplete IPI vmexit on the source, therefore vIRR will also
+ * be checked by kvm_vcpu_check_block() before blocking. The
+ * memory barrier implicit in set_current_state orders writing
+ * IsRunning=0 before reading the vIRR. The processor needs a
+ * matching memory barrier on interrupt delivery between writing
+ * IRR and reading IsRunning; the lack of this barrier might be
+ * the cause of errata #1235).
+ */
+ avic_vcpu_put(vcpu);
-void svm_vcpu_blocking(struct kvm_vcpu *vcpu)
-{
- avic_set_running(vcpu, false);
+ preempt_enable();
}
-void svm_vcpu_unblocking(struct kvm_vcpu *vcpu)
+void avic_vcpu_unblocking(struct kvm_vcpu *vcpu)
{
- if (kvm_check_request(KVM_REQ_APICV_UPDATE, vcpu))
- kvm_vcpu_update_apicv(vcpu);
- avic_set_running(vcpu, true);
+ int cpu;
+
+ if (!kvm_vcpu_apicv_active(vcpu))
+ return;
+
+ cpu = get_cpu();
+ WARN_ON(cpu != vcpu->cpu);
+
+ avic_vcpu_load(vcpu, cpu);
+
+ put_cpu();
}
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index f8b7bc04b3e7..39d280e7e80e 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -58,8 +58,9 @@ static void svm_inject_page_fault_nested(struct kvm_vcpu *vcpu, struct x86_excep
struct vcpu_svm *svm = to_svm(vcpu);
WARN_ON(!is_guest_mode(vcpu));
- if (vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_EXCEPTION_OFFSET + PF_VECTOR) &&
- !svm->nested.nested_run_pending) {
+ if (vmcb12_is_intercept(&svm->nested.ctl,
+ INTERCEPT_EXCEPTION_OFFSET + PF_VECTOR) &&
+ !svm->nested.nested_run_pending) {
svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + PF_VECTOR;
svm->vmcb->control.exit_code_hi = 0;
svm->vmcb->control.exit_info_1 = fault->error_code;
@@ -121,7 +122,8 @@ static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu)
void recalc_intercepts(struct vcpu_svm *svm)
{
- struct vmcb_control_area *c, *h, *g;
+ struct vmcb_control_area *c, *h;
+ struct vmcb_ctrl_area_cached *g;
unsigned int i;
vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
@@ -163,37 +165,6 @@ void recalc_intercepts(struct vcpu_svm *svm)
vmcb_set_intercept(c, INTERCEPT_VMSAVE);
}
-static void copy_vmcb_control_area(struct vmcb_control_area *dst,
- struct vmcb_control_area *from)
-{
- unsigned int i;
-
- for (i = 0; i < MAX_INTERCEPT; i++)
- dst->intercepts[i] = from->intercepts[i];
-
- dst->iopm_base_pa = from->iopm_base_pa;
- dst->msrpm_base_pa = from->msrpm_base_pa;
- dst->tsc_offset = from->tsc_offset;
- /* asid not copied, it is handled manually for svm->vmcb. */
- dst->tlb_ctl = from->tlb_ctl;
- dst->int_ctl = from->int_ctl;
- dst->int_vector = from->int_vector;
- dst->int_state = from->int_state;
- dst->exit_code = from->exit_code;
- dst->exit_code_hi = from->exit_code_hi;
- dst->exit_info_1 = from->exit_info_1;
- dst->exit_info_2 = from->exit_info_2;
- dst->exit_int_info = from->exit_int_info;
- dst->exit_int_info_err = from->exit_int_info_err;
- dst->nested_ctl = from->nested_ctl;
- dst->event_inj = from->event_inj;
- dst->event_inj_err = from->event_inj_err;
- dst->nested_cr3 = from->nested_cr3;
- dst->virt_ext = from->virt_ext;
- dst->pause_filter_count = from->pause_filter_count;
- dst->pause_filter_thresh = from->pause_filter_thresh;
-}
-
static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
{
/*
@@ -203,7 +174,7 @@ static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
*/
int i;
- if (!(vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT)))
+ if (!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT)))
return true;
for (i = 0; i < MSRPM_OFFSETS; i++) {
@@ -250,10 +221,10 @@ static bool nested_svm_check_tlb_ctl(struct kvm_vcpu *vcpu, u8 tlb_ctl)
}
}
-static bool nested_vmcb_check_controls(struct kvm_vcpu *vcpu,
- struct vmcb_control_area *control)
+static bool __nested_vmcb_check_controls(struct kvm_vcpu *vcpu,
+ struct vmcb_ctrl_area_cached *control)
{
- if (CC(!vmcb_is_intercept(control, INTERCEPT_VMRUN)))
+ if (CC(!vmcb12_is_intercept(control, INTERCEPT_VMRUN)))
return false;
if (CC(control->asid == 0))
@@ -275,9 +246,20 @@ static bool nested_vmcb_check_controls(struct kvm_vcpu *vcpu,
return true;
}
-static bool nested_vmcb_check_cr3_cr4(struct kvm_vcpu *vcpu,
- struct vmcb_save_area *save)
+/* Common checks that apply to both L1 and L2 state. */
+static bool __nested_vmcb_check_save(struct kvm_vcpu *vcpu,
+ struct vmcb_save_area_cached *save)
{
+ if (CC(!(save->efer & EFER_SVME)))
+ return false;
+
+ if (CC((save->cr0 & X86_CR0_CD) == 0 && (save->cr0 & X86_CR0_NW)) ||
+ CC(save->cr0 & ~0xffffffffULL))
+ return false;
+
+ if (CC(!kvm_dr6_valid(save->dr6)) || CC(!kvm_dr7_valid(save->dr7)))
+ return false;
+
/*
* These checks are also performed by KVM_SET_SREGS,
* except that EFER.LMA is not checked by SVM against
@@ -293,48 +275,90 @@ static bool nested_vmcb_check_cr3_cr4(struct kvm_vcpu *vcpu,
if (CC(!kvm_is_valid_cr4(vcpu, save->cr4)))
return false;
+ if (CC(!kvm_valid_efer(vcpu, save->efer)))
+ return false;
+
return true;
}
-/* Common checks that apply to both L1 and L2 state. */
-static bool nested_vmcb_valid_sregs(struct kvm_vcpu *vcpu,
- struct vmcb_save_area *save)
+static bool nested_vmcb_check_save(struct kvm_vcpu *vcpu)
{
- /*
- * FIXME: these should be done after copying the fields,
- * to avoid TOC/TOU races. For these save area checks
- * the possible damage is limited since kvm_set_cr0 and
- * kvm_set_cr4 handle failure; EFER_SVME is an exception
- * so it is force-set later in nested_prepare_vmcb_save.
- */
- if (CC(!(save->efer & EFER_SVME)))
- return false;
+ struct vcpu_svm *svm = to_svm(vcpu);
+ struct vmcb_save_area_cached *save = &svm->nested.save;
- if (CC((save->cr0 & X86_CR0_CD) == 0 && (save->cr0 & X86_CR0_NW)) ||
- CC(save->cr0 & ~0xffffffffULL))
- return false;
+ return __nested_vmcb_check_save(vcpu, save);
+}
- if (CC(!kvm_dr6_valid(save->dr6)) || CC(!kvm_dr7_valid(save->dr7)))
- return false;
+static bool nested_vmcb_check_controls(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_svm *svm = to_svm(vcpu);
+ struct vmcb_ctrl_area_cached *ctl = &svm->nested.ctl;
- if (!nested_vmcb_check_cr3_cr4(vcpu, save))
- return false;
+ return __nested_vmcb_check_controls(vcpu, ctl);
+}
- if (CC(!kvm_valid_efer(vcpu, save->efer)))
- return false;
+static
+void __nested_copy_vmcb_control_to_cache(struct vmcb_ctrl_area_cached *to,
+ struct vmcb_control_area *from)
+{
+ unsigned int i;
- return true;
+ for (i = 0; i < MAX_INTERCEPT; i++)
+ to->intercepts[i] = from->intercepts[i];
+
+ to->iopm_base_pa = from->iopm_base_pa;
+ to->msrpm_base_pa = from->msrpm_base_pa;
+ to->tsc_offset = from->tsc_offset;
+ to->tlb_ctl = from->tlb_ctl;
+ to->int_ctl = from->int_ctl;
+ to->int_vector = from->int_vector;
+ to->int_state = from->int_state;
+ to->exit_code = from->exit_code;
+ to->exit_code_hi = from->exit_code_hi;
+ to->exit_info_1 = from->exit_info_1;
+ to->exit_info_2 = from->exit_info_2;
+ to->exit_int_info = from->exit_int_info;
+ to->exit_int_info_err = from->exit_int_info_err;
+ to->nested_ctl = from->nested_ctl;
+ to->event_inj = from->event_inj;
+ to->event_inj_err = from->event_inj_err;
+ to->nested_cr3 = from->nested_cr3;
+ to->virt_ext = from->virt_ext;
+ to->pause_filter_count = from->pause_filter_count;
+ to->pause_filter_thresh = from->pause_filter_thresh;
+
+ /* Copy asid here because nested_vmcb_check_controls will check it. */
+ to->asid = from->asid;
+ to->msrpm_base_pa &= ~0x0fffULL;
+ to->iopm_base_pa &= ~0x0fffULL;
}
-void nested_load_control_from_vmcb12(struct vcpu_svm *svm,
- struct vmcb_control_area *control)
+void nested_copy_vmcb_control_to_cache(struct vcpu_svm *svm,
+ struct vmcb_control_area *control)
{
- copy_vmcb_control_area(&svm->nested.ctl, control);
+ __nested_copy_vmcb_control_to_cache(&svm->nested.ctl, control);
+}
- /* Copy it here because nested_svm_check_controls will check it. */
- svm->nested.ctl.asid = control->asid;
- svm->nested.ctl.msrpm_base_pa &= ~0x0fffULL;
- svm->nested.ctl.iopm_base_pa &= ~0x0fffULL;
+static void __nested_copy_vmcb_save_to_cache(struct vmcb_save_area_cached *to,
+ struct vmcb_save_area *from)
+{
+ /*
+ * Copy only fields that are validated, as we need them
+ * to avoid TOC/TOU races.
+ */
+ to->efer = from->efer;
+ to->cr0 = from->cr0;
+ to->cr3 = from->cr3;
+ to->cr4 = from->cr4;
+
+ to->dr6 = from->dr6;
+ to->dr7 = from->dr7;
+}
+
+void nested_copy_vmcb_save_to_cache(struct vcpu_svm *svm,
+ struct vmcb_save_area *save)
+{
+ __nested_copy_vmcb_save_to_cache(&svm->nested.save, save);
}
/*
@@ -437,14 +461,13 @@ static int nested_svm_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3,
return -EINVAL;
if (reload_pdptrs && !nested_npt && is_pae_paging(vcpu) &&
- CC(!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3)))
+ CC(!load_pdptrs(vcpu, cr3)))
return -EINVAL;
if (!nested_npt)
kvm_mmu_new_pgd(vcpu, cr3);
vcpu->arch.cr3 = cr3;
- kvm_register_mark_available(vcpu, VCPU_EXREG_CR3);
/* Re-initialize the MMU, e.g. to pick up CR4 MMU role changes. */
kvm_init_mmu(vcpu);
@@ -490,15 +513,10 @@ static void nested_vmcb02_prepare_save(struct vcpu_svm *svm, struct vmcb *vmcb12
kvm_set_rflags(&svm->vcpu, vmcb12->save.rflags | X86_EFLAGS_FIXED);
- /*
- * Force-set EFER_SVME even though it is checked earlier on the
- * VMCB12, because the guest can flip the bit between the check
- * and now. Clearing EFER_SVME would call svm_free_nested.
- */
- svm_set_efer(&svm->vcpu, vmcb12->save.efer | EFER_SVME);
+ svm_set_efer(&svm->vcpu, svm->nested.save.efer);
- svm_set_cr0(&svm->vcpu, vmcb12->save.cr0);
- svm_set_cr4(&svm->vcpu, vmcb12->save.cr4);
+ svm_set_cr0(&svm->vcpu, svm->nested.save.cr0);
+ svm_set_cr4(&svm->vcpu, svm->nested.save.cr4);
svm->vcpu.arch.cr2 = vmcb12->save.cr2;
@@ -513,8 +531,8 @@ static void nested_vmcb02_prepare_save(struct vcpu_svm *svm, struct vmcb *vmcb12
/* These bits will be set properly on the first execution when new_vmc12 is true */
if (unlikely(new_vmcb12 || vmcb_is_dirty(vmcb12, VMCB_DR))) {
- svm->vmcb->save.dr7 = vmcb12->save.dr7 | DR7_FIXED_1;
- svm->vcpu.arch.dr6 = vmcb12->save.dr6 | DR6_ACTIVE_LOW;
+ svm->vmcb->save.dr7 = svm->nested.save.dr7 | DR7_FIXED_1;
+ svm->vcpu.arch.dr6 = svm->nested.save.dr6 | DR6_ACTIVE_LOW;
vmcb_mark_dirty(svm->vmcb, VMCB_DR);
}
}
@@ -628,7 +646,7 @@ int enter_svm_guest_mode(struct kvm_vcpu *vcpu, u64 vmcb12_gpa,
nested_vmcb02_prepare_control(svm);
nested_vmcb02_prepare_save(svm, vmcb12);
- ret = nested_svm_load_cr3(&svm->vcpu, vmcb12->save.cr3,
+ ret = nested_svm_load_cr3(&svm->vcpu, svm->nested.save.cr3,
nested_npt_enabled(svm), from_vmrun);
if (ret)
return ret;
@@ -678,10 +696,11 @@ int nested_svm_vmrun(struct kvm_vcpu *vcpu)
if (WARN_ON_ONCE(!svm->nested.initialized))
return -EINVAL;
- nested_load_control_from_vmcb12(svm, &vmcb12->control);
+ nested_copy_vmcb_control_to_cache(svm, &vmcb12->control);
+ nested_copy_vmcb_save_to_cache(svm, &vmcb12->save);
- if (!nested_vmcb_valid_sregs(vcpu, &vmcb12->save) ||
- !nested_vmcb_check_controls(vcpu, &svm->nested.ctl)) {
+ if (!nested_vmcb_check_save(vcpu) ||
+ !nested_vmcb_check_controls(vcpu)) {
vmcb12->control.exit_code = SVM_EXIT_ERR;
vmcb12->control.exit_code_hi = 0;
vmcb12->control.exit_info_1 = 0;
@@ -964,9 +983,9 @@ void svm_free_nested(struct vcpu_svm *svm)
/*
* Forcibly leave nested mode in order to be able to reset the VCPU later on.
*/
-void svm_leave_nested(struct vcpu_svm *svm)
+void svm_leave_nested(struct kvm_vcpu *vcpu)
{
- struct kvm_vcpu *vcpu = &svm->vcpu;
+ struct vcpu_svm *svm = to_svm(vcpu);
if (is_guest_mode(vcpu)) {
svm->nested.nested_run_pending = 0;
@@ -988,7 +1007,7 @@ static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
u32 offset, msr, value;
int write, mask;
- if (!(vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT)))
+ if (!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT)))
return NESTED_EXIT_HOST;
msr = svm->vcpu.arch.regs[VCPU_REGS_RCX];
@@ -1015,7 +1034,7 @@ static int nested_svm_intercept_ioio(struct vcpu_svm *svm)
u8 start_bit;
u64 gpa;
- if (!(vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_IOIO_PROT)))
+ if (!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_IOIO_PROT)))
return NESTED_EXIT_HOST;
port = svm->vmcb->control.exit_info_1 >> 16;
@@ -1046,12 +1065,12 @@ static int nested_svm_intercept(struct vcpu_svm *svm)
vmexit = nested_svm_intercept_ioio(svm);
break;
case SVM_EXIT_READ_CR0 ... SVM_EXIT_WRITE_CR8: {
- if (vmcb_is_intercept(&svm->nested.ctl, exit_code))
+ if (vmcb12_is_intercept(&svm->nested.ctl, exit_code))
vmexit = NESTED_EXIT_DONE;
break;
}
case SVM_EXIT_READ_DR0 ... SVM_EXIT_WRITE_DR7: {
- if (vmcb_is_intercept(&svm->nested.ctl, exit_code))
+ if (vmcb12_is_intercept(&svm->nested.ctl, exit_code))
vmexit = NESTED_EXIT_DONE;
break;
}
@@ -1069,7 +1088,7 @@ static int nested_svm_intercept(struct vcpu_svm *svm)
break;
}
default: {
- if (vmcb_is_intercept(&svm->nested.ctl, exit_code))
+ if (vmcb12_is_intercept(&svm->nested.ctl, exit_code))
vmexit = NESTED_EXIT_DONE;
}
}
@@ -1147,7 +1166,7 @@ static void nested_svm_inject_exception_vmexit(struct vcpu_svm *svm)
static inline bool nested_exit_on_init(struct vcpu_svm *svm)
{
- return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_INIT);
+ return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_INIT);
}
static int svm_check_nested_events(struct kvm_vcpu *vcpu)
@@ -1251,11 +1270,47 @@ void nested_svm_update_tsc_ratio_msr(struct kvm_vcpu *vcpu)
svm_write_tsc_multiplier(vcpu, vcpu->arch.tsc_scaling_ratio);
}
+/* Inverse operation of nested_copy_vmcb_control_to_cache(). asid is copied too. */
+static void nested_copy_vmcb_cache_to_control(struct vmcb_control_area *dst,
+ struct vmcb_ctrl_area_cached *from)
+{
+ unsigned int i;
+
+ memset(dst, 0, sizeof(struct vmcb_control_area));
+
+ for (i = 0; i < MAX_INTERCEPT; i++)
+ dst->intercepts[i] = from->intercepts[i];
+
+ dst->iopm_base_pa = from->iopm_base_pa;
+ dst->msrpm_base_pa = from->msrpm_base_pa;
+ dst->tsc_offset = from->tsc_offset;
+ dst->asid = from->asid;
+ dst->tlb_ctl = from->tlb_ctl;
+ dst->int_ctl = from->int_ctl;
+ dst->int_vector = from->int_vector;
+ dst->int_state = from->int_state;
+ dst->exit_code = from->exit_code;
+ dst->exit_code_hi = from->exit_code_hi;
+ dst->exit_info_1 = from->exit_info_1;
+ dst->exit_info_2 = from->exit_info_2;
+ dst->exit_int_info = from->exit_int_info;
+ dst->exit_int_info_err = from->exit_int_info_err;
+ dst->nested_ctl = from->nested_ctl;
+ dst->event_inj = from->event_inj;
+ dst->event_inj_err = from->event_inj_err;
+ dst->nested_cr3 = from->nested_cr3;
+ dst->virt_ext = from->virt_ext;
+ dst->pause_filter_count = from->pause_filter_count;
+ dst->pause_filter_thresh = from->pause_filter_thresh;
+}
+
static int svm_get_nested_state(struct kvm_vcpu *vcpu,
struct kvm_nested_state __user *user_kvm_nested_state,
u32 user_data_size)
{
struct vcpu_svm *svm;
+ struct vmcb_control_area *ctl;
+ unsigned long r;
struct kvm_nested_state kvm_state = {
.flags = 0,
.format = KVM_STATE_NESTED_FORMAT_SVM,
@@ -1297,9 +1352,18 @@ static int svm_get_nested_state(struct kvm_vcpu *vcpu,
*/
if (clear_user(user_vmcb, KVM_STATE_NESTED_SVM_VMCB_SIZE))
return -EFAULT;
- if (copy_to_user(&user_vmcb->control, &svm->nested.ctl,
- sizeof(user_vmcb->control)))
+
+ ctl = kzalloc(sizeof(*ctl), GFP_KERNEL);
+ if (!ctl)
+ return -ENOMEM;
+
+ nested_copy_vmcb_cache_to_control(ctl, &svm->nested.ctl);
+ r = copy_to_user(&user_vmcb->control, ctl,
+ sizeof(user_vmcb->control));
+ kfree(ctl);
+ if (r)
return -EFAULT;
+
if (copy_to_user(&user_vmcb->save, &svm->vmcb01.ptr->save,
sizeof(user_vmcb->save)))
return -EFAULT;
@@ -1316,6 +1380,8 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
&user_kvm_nested_state->data.svm[0];
struct vmcb_control_area *ctl;
struct vmcb_save_area *save;
+ struct vmcb_save_area_cached save_cached;
+ struct vmcb_ctrl_area_cached ctl_cached;
unsigned long cr0;
int ret;
@@ -1345,7 +1411,7 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
return -EINVAL;
if (!(kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE)) {
- svm_leave_nested(svm);
+ svm_leave_nested(vcpu);
svm_set_gif(svm, !!(kvm_state->flags & KVM_STATE_NESTED_GIF_SET));
return 0;
}
@@ -1368,7 +1434,8 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
goto out_free;
ret = -EINVAL;
- if (!nested_vmcb_check_controls(vcpu, ctl))
+ __nested_copy_vmcb_control_to_cache(&ctl_cached, ctl);
+ if (!__nested_vmcb_check_controls(vcpu, &ctl_cached))
goto out_free;
/*
@@ -1383,22 +1450,11 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
* Validate host state saved from before VMRUN (see
* nested_svm_check_permissions).
*/
+ __nested_copy_vmcb_save_to_cache(&save_cached, save);
if (!(save->cr0 & X86_CR0_PG) ||
!(save->cr0 & X86_CR0_PE) ||
(save->rflags & X86_EFLAGS_VM) ||
- !nested_vmcb_valid_sregs(vcpu, save))
- goto out_free;
-
- /*
- * While the nested guest CR3 is already checked and set by
- * KVM_SET_SREGS, it was set when nested state was yet loaded,
- * thus MMU might not be initialized correctly.
- * Set it again to fix this.
- */
-
- ret = nested_svm_load_cr3(&svm->vcpu, vcpu->arch.cr3,
- nested_npt_enabled(svm), false);
- if (WARN_ON_ONCE(ret))
+ !__nested_vmcb_check_save(vcpu, &save_cached))
goto out_free;
@@ -1410,7 +1466,7 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
*/
if (is_guest_mode(vcpu))
- svm_leave_nested(svm);
+ svm_leave_nested(vcpu);
else
svm->nested.vmcb02.ptr->save = svm->vmcb01.ptr->save;
@@ -1422,10 +1478,24 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
svm->nested.vmcb12_gpa = kvm_state->hdr.svm.vmcb_pa;
svm_copy_vmrun_state(&svm->vmcb01.ptr->save, save);
- nested_load_control_from_vmcb12(svm, ctl);
+ nested_copy_vmcb_control_to_cache(svm, ctl);
svm_switch_vmcb(svm, &svm->nested.vmcb02);
nested_vmcb02_prepare_control(svm);
+
+ /*
+ * While the nested guest CR3 is already checked and set by
+ * KVM_SET_SREGS, it was set when nested state was yet loaded,
+ * thus MMU might not be initialized correctly.
+ * Set it again to fix this.
+ */
+
+ ret = nested_svm_load_cr3(&svm->vcpu, vcpu->arch.cr3,
+ nested_npt_enabled(svm), false);
+ if (WARN_ON_ONCE(ret))
+ goto out_free;
+
+
kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
ret = 0;
out_free:
@@ -1449,7 +1519,7 @@ static bool svm_get_nested_state_pages(struct kvm_vcpu *vcpu)
* the guest CR3 might be restored prior to setting the nested
* state which can lead to a load of wrong PDPTRs.
*/
- if (CC(!load_pdptrs(vcpu, vcpu->arch.walk_mmu, vcpu->arch.cr3)))
+ if (CC(!load_pdptrs(vcpu, vcpu->arch.cr3)))
return false;
if (!nested_svm_vmrun_msrpm(svm)) {
@@ -1464,6 +1534,7 @@ static bool svm_get_nested_state_pages(struct kvm_vcpu *vcpu)
}
struct kvm_x86_nested_ops svm_nested_ops = {
+ .leave_nested = svm_leave_nested,
.check_events = svm_check_nested_events,
.triple_fault = nested_svm_triple_fault,
.get_nested_state_pages = svm_get_nested_state_pages,
diff --git a/arch/x86/kvm/svm/pmu.c b/arch/x86/kvm/svm/pmu.c
index b4095dfeeee6..5aa45f13b16d 100644
--- a/arch/x86/kvm/svm/pmu.c
+++ b/arch/x86/kvm/svm/pmu.c
@@ -16,6 +16,7 @@
#include "cpuid.h"
#include "lapic.h"
#include "pmu.h"
+#include "svm.h"
enum pmu_type {
PMU_TYPE_COUNTER = 0,
@@ -100,6 +101,9 @@ static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr,
{
struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
+ if (!enable_pmu)
+ return NULL;
+
switch (msr) {
case MSR_F15H_PERF_CTL0:
case MSR_F15H_PERF_CTL1:
@@ -134,12 +138,16 @@ static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr,
return &pmu->gp_counters[msr_to_index(msr)];
}
-static unsigned amd_find_arch_event(struct kvm_pmu *pmu,
- u8 event_select,
- u8 unit_mask)
+static unsigned int amd_pmc_perf_hw_id(struct kvm_pmc *pmc)
{
+ u8 event_select = pmc->eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
+ u8 unit_mask = (pmc->eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
int i;
+ /* return PERF_COUNT_HW_MAX as AMD doesn't have fixed events */
+ if (WARN_ON(pmc_is_fixed(pmc)))
+ return PERF_COUNT_HW_MAX;
+
for (i = 0; i < ARRAY_SIZE(amd_event_mapping); i++)
if (amd_event_mapping[i].eventsel == event_select
&& amd_event_mapping[i].unit_mask == unit_mask)
@@ -151,12 +159,6 @@ static unsigned amd_find_arch_event(struct kvm_pmu *pmu,
return amd_event_mapping[i].event_type;
}
-/* return PERF_COUNT_HW_MAX as AMD doesn't have fixed events */
-static unsigned amd_find_fixed_event(int idx)
-{
- return PERF_COUNT_HW_MAX;
-}
-
/* check if a PMC is enabled by comparing it against global_ctrl bits. Because
* AMD CPU doesn't have global_ctrl MSR, all PMCs are enabled (return TRUE).
*/
@@ -319,8 +321,7 @@ static void amd_pmu_reset(struct kvm_vcpu *vcpu)
}
struct kvm_pmu_ops amd_pmu_ops = {
- .find_arch_event = amd_find_arch_event,
- .find_fixed_event = amd_find_fixed_event,
+ .pmc_perf_hw_id = amd_pmc_perf_hw_id,
.pmc_is_enabled = amd_pmc_is_enabled,
.pmc_idx_to_pmc = amd_pmc_idx_to_pmc,
.rdpmc_ecx_to_pmc = amd_rdpmc_ecx_to_pmc,
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index be2883141220..17b53457d866 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -636,7 +636,8 @@ static int __sev_launch_update_vmsa(struct kvm *kvm, struct kvm_vcpu *vcpu,
static int sev_launch_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp)
{
struct kvm_vcpu *vcpu;
- int i, ret;
+ unsigned long i;
+ int ret;
if (!sev_es_guest(kvm))
return -ENOTTY;
@@ -1593,7 +1594,7 @@ static void sev_unlock_two_vms(struct kvm *dst_kvm, struct kvm *src_kvm)
static int sev_lock_vcpus_for_migration(struct kvm *kvm)
{
struct kvm_vcpu *vcpu;
- int i, j;
+ unsigned long i, j;
kvm_for_each_vcpu(i, vcpu, kvm) {
if (mutex_lock_killable(&vcpu->mutex))
@@ -1615,7 +1616,7 @@ out_unlock:
static void sev_unlock_vcpus_for_migration(struct kvm *kvm)
{
struct kvm_vcpu *vcpu;
- int i;
+ unsigned long i;
kvm_for_each_vcpu(i, vcpu, kvm) {
mutex_unlock(&vcpu->mutex);
@@ -1642,7 +1643,7 @@ static void sev_migrate_from(struct kvm_sev_info *dst,
static int sev_es_migrate_from(struct kvm *dst, struct kvm *src)
{
- int i;
+ unsigned long i;
struct kvm_vcpu *dst_vcpu, *src_vcpu;
struct vcpu_svm *dst_svm, *src_svm;
@@ -2099,8 +2100,13 @@ void __init sev_hardware_setup(void)
if (!sev_enabled || !npt_enabled)
goto out;
- /* Does the CPU support SEV? */
- if (!boot_cpu_has(X86_FEATURE_SEV))
+ /*
+ * SEV must obviously be supported in hardware. Sanity check that the
+ * CPU supports decode assists, which is mandatory for SEV guests to
+ * support instruction emulation.
+ */
+ if (!boot_cpu_has(X86_FEATURE_SEV) ||
+ WARN_ON_ONCE(!boot_cpu_has(X86_FEATURE_DECODEASSISTS)))
goto out;
/* Retrieve SEV CPUID information */
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 5151efa424ac..821edf664e7a 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -265,7 +265,7 @@ u32 svm_msrpm_offset(u32 msr)
#define MAX_INST_SIZE 15
-static int get_max_npt_level(void)
+static int get_npt_level(void)
{
#ifdef CONFIG_X86_64
return pgtable_l5_enabled() ? PT64_ROOT_5LEVEL : PT64_ROOT_4LEVEL;
@@ -290,7 +290,7 @@ int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
if ((old_efer & EFER_SVME) != (efer & EFER_SVME)) {
if (!(efer & EFER_SVME)) {
- svm_leave_nested(svm);
+ svm_leave_nested(vcpu);
svm_set_gif(svm, true);
/* #GP intercept is still needed for vmware backdoor */
if (!enable_vmware_backdoor)
@@ -312,7 +312,11 @@ int svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
return ret;
}
- if (svm_gp_erratum_intercept)
+ /*
+ * Never intercept #GP for SEV guests, KVM can't
+ * decrypt guest memory to workaround the erratum.
+ */
+ if (svm_gp_erratum_intercept && !sev_guest(vcpu->kvm))
set_exception_intercept(svm, GP_VECTOR);
}
}
@@ -585,12 +589,10 @@ static int svm_cpu_init(int cpu)
if (!sd)
return ret;
sd->cpu = cpu;
- sd->save_area = alloc_page(GFP_KERNEL);
+ sd->save_area = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!sd->save_area)
goto free_cpu_data;
- clear_page(page_address(sd->save_area));
-
ret = sev_cpu_init(sd);
if (ret)
goto free_save_area;
@@ -871,47 +873,6 @@ static void shrink_ple_window(struct kvm_vcpu *vcpu)
}
}
-/*
- * The default MMIO mask is a single bit (excluding the present bit),
- * which could conflict with the memory encryption bit. Check for
- * memory encryption support and override the default MMIO mask if
- * memory encryption is enabled.
- */
-static __init void svm_adjust_mmio_mask(void)
-{
- unsigned int enc_bit, mask_bit;
- u64 msr, mask;
-
- /* If there is no memory encryption support, use existing mask */
- if (cpuid_eax(0x80000000) < 0x8000001f)
- return;
-
- /* If memory encryption is not enabled, use existing mask */
- rdmsrl(MSR_AMD64_SYSCFG, msr);
- if (!(msr & MSR_AMD64_SYSCFG_MEM_ENCRYPT))
- return;
-
- enc_bit = cpuid_ebx(0x8000001f) & 0x3f;
- mask_bit = boot_cpu_data.x86_phys_bits;
-
- /* Increment the mask bit if it is the same as the encryption bit */
- if (enc_bit == mask_bit)
- mask_bit++;
-
- /*
- * If the mask bit location is below 52, then some bits above the
- * physical addressing limit will always be reserved, so use the
- * rsvd_bits() function to generate the mask. This mask, along with
- * the present bit, will be used to generate a page fault with
- * PFER.RSV = 1.
- *
- * If the mask bit location is 52 (or above), then clear the mask.
- */
- mask = (mask_bit < 52) ? rsvd_bits(mask_bit, 51) | PT_PRESENT_MASK : 0;
-
- kvm_mmu_set_mmio_spte_mask(mask, mask, PT_WRITABLE_MASK | PT_USER_MASK);
-}
-
static void svm_hardware_teardown(void)
{
int cpu;
@@ -926,191 +887,6 @@ static void svm_hardware_teardown(void)
iopm_base = 0;
}
-static __init void svm_set_cpu_caps(void)
-{
- kvm_set_cpu_caps();
-
- supported_xss = 0;
-
- /* CPUID 0x80000001 and 0x8000000A (SVM features) */
- if (nested) {
- kvm_cpu_cap_set(X86_FEATURE_SVM);
-
- if (nrips)
- kvm_cpu_cap_set(X86_FEATURE_NRIPS);
-
- if (npt_enabled)
- kvm_cpu_cap_set(X86_FEATURE_NPT);
-
- if (tsc_scaling)
- kvm_cpu_cap_set(X86_FEATURE_TSCRATEMSR);
-
- /* Nested VM can receive #VMEXIT instead of triggering #GP */
- kvm_cpu_cap_set(X86_FEATURE_SVME_ADDR_CHK);
- }
-
- /* CPUID 0x80000008 */
- if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD) ||
- boot_cpu_has(X86_FEATURE_AMD_SSBD))
- kvm_cpu_cap_set(X86_FEATURE_VIRT_SSBD);
-
- /* CPUID 0x8000001F (SME/SEV features) */
- sev_set_cpu_caps();
-}
-
-static __init int svm_hardware_setup(void)
-{
- int cpu;
- struct page *iopm_pages;
- void *iopm_va;
- int r;
- unsigned int order = get_order(IOPM_SIZE);
-
- /*
- * NX is required for shadow paging and for NPT if the NX huge pages
- * mitigation is enabled.
- */
- if (!boot_cpu_has(X86_FEATURE_NX)) {
- pr_err_ratelimited("NX (Execute Disable) not supported\n");
- return -EOPNOTSUPP;
- }
- kvm_enable_efer_bits(EFER_NX);
-
- iopm_pages = alloc_pages(GFP_KERNEL, order);
-
- if (!iopm_pages)
- return -ENOMEM;
-
- iopm_va = page_address(iopm_pages);
- memset(iopm_va, 0xff, PAGE_SIZE * (1 << order));
- iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT;
-
- init_msrpm_offsets();
-
- supported_xcr0 &= ~(XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR);
-
- if (boot_cpu_has(X86_FEATURE_FXSR_OPT))
- kvm_enable_efer_bits(EFER_FFXSR);
-
- if (tsc_scaling) {
- if (!boot_cpu_has(X86_FEATURE_TSCRATEMSR)) {
- tsc_scaling = false;
- } else {
- pr_info("TSC scaling supported\n");
- kvm_has_tsc_control = true;
- kvm_max_tsc_scaling_ratio = TSC_RATIO_MAX;
- kvm_tsc_scaling_ratio_frac_bits = 32;
- }
- }
-
- tsc_aux_uret_slot = kvm_add_user_return_msr(MSR_TSC_AUX);
-
- /* Check for pause filtering support */
- if (!boot_cpu_has(X86_FEATURE_PAUSEFILTER)) {
- pause_filter_count = 0;
- pause_filter_thresh = 0;
- } else if (!boot_cpu_has(X86_FEATURE_PFTHRESHOLD)) {
- pause_filter_thresh = 0;
- }
-
- if (nested) {
- printk(KERN_INFO "kvm: Nested Virtualization enabled\n");
- kvm_enable_efer_bits(EFER_SVME | EFER_LMSLE);
- }
-
- /*
- * KVM's MMU doesn't support using 2-level paging for itself, and thus
- * NPT isn't supported if the host is using 2-level paging since host
- * CR4 is unchanged on VMRUN.
- */
- if (!IS_ENABLED(CONFIG_X86_64) && !IS_ENABLED(CONFIG_X86_PAE))
- npt_enabled = false;
-
- if (!boot_cpu_has(X86_FEATURE_NPT))
- npt_enabled = false;
-
- /* Force VM NPT level equal to the host's max NPT level */
- kvm_configure_mmu(npt_enabled, get_max_npt_level(),
- get_max_npt_level(), PG_LEVEL_1G);
- pr_info("kvm: Nested Paging %sabled\n", npt_enabled ? "en" : "dis");
-
- /* Note, SEV setup consumes npt_enabled. */
- sev_hardware_setup();
-
- svm_hv_hardware_setup();
-
- svm_adjust_mmio_mask();
-
- for_each_possible_cpu(cpu) {
- r = svm_cpu_init(cpu);
- if (r)
- goto err;
- }
-
- if (nrips) {
- if (!boot_cpu_has(X86_FEATURE_NRIPS))
- nrips = false;
- }
-
- enable_apicv = avic = avic && npt_enabled && boot_cpu_has(X86_FEATURE_AVIC);
-
- if (enable_apicv) {
- pr_info("AVIC enabled\n");
-
- amd_iommu_register_ga_log_notifier(&avic_ga_log_notifier);
- }
-
- if (vls) {
- if (!npt_enabled ||
- !boot_cpu_has(X86_FEATURE_V_VMSAVE_VMLOAD) ||
- !IS_ENABLED(CONFIG_X86_64)) {
- vls = false;
- } else {
- pr_info("Virtual VMLOAD VMSAVE supported\n");
- }
- }
-
- if (boot_cpu_has(X86_FEATURE_SVME_ADDR_CHK))
- svm_gp_erratum_intercept = false;
-
- if (vgif) {
- if (!boot_cpu_has(X86_FEATURE_VGIF))
- vgif = false;
- else
- pr_info("Virtual GIF supported\n");
- }
-
- if (lbrv) {
- if (!boot_cpu_has(X86_FEATURE_LBRV))
- lbrv = false;
- else
- pr_info("LBR virtualization supported\n");
- }
-
- svm_set_cpu_caps();
-
- /*
- * It seems that on AMD processors PTE's accessed bit is
- * being set by the CPU hardware before the NPF vmexit.
- * This is not expected behaviour and our tests fail because
- * of it.
- * A workaround here is to disable support for
- * GUEST_MAXPHYADDR < HOST_MAXPHYADDR if NPT is enabled.
- * In this case userspace can know if there is support using
- * KVM_CAP_SMALLER_MAXPHYADDR extension and decide how to handle
- * it
- * If future AMD CPU models change the behaviour described above,
- * this variable can be changed accordingly
- */
- allow_smaller_maxphyaddr = !npt_enabled;
-
- return 0;
-
-err:
- svm_hardware_teardown();
- return r;
-}
-
static void init_seg(struct vmcb_seg *seg)
{
seg->selector = 0;
@@ -1238,9 +1014,10 @@ static void init_vmcb(struct kvm_vcpu *vcpu)
* Guest access to VMware backdoor ports could legitimately
* trigger #GP because of TSS I/O permission bitmap.
* We intercept those #GP and allow access to them anyway
- * as VMware does.
+ * as VMware does. Don't intercept #GP for SEV guests as KVM can't
+ * decrypt guest memory to decode the faulting instruction.
*/
- if (enable_vmware_backdoor)
+ if (enable_vmware_backdoor && !sev_guest(vcpu->kvm))
set_exception_intercept(svm, GP_VECTOR);
svm_set_intercept(svm, INTERCEPT_INTR);
@@ -1435,12 +1212,6 @@ static int svm_create_vcpu(struct kvm_vcpu *vcpu)
if (err)
goto error_free_vmsa_page;
- /* We initialize this flag to true to make sure that the is_running
- * bit would be set the first time the vcpu is loaded.
- */
- if (irqchip_in_kernel(vcpu->kvm) && kvm_apicv_activated(vcpu->kvm))
- svm->avic_is_running = true;
-
svm->msrpm = svm_vcpu_alloc_msrpm();
if (!svm->msrpm) {
err = -ENOMEM;
@@ -1596,10 +1367,16 @@ static bool svm_get_if_flag(struct kvm_vcpu *vcpu)
static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
{
+ kvm_register_mark_available(vcpu, reg);
+
switch (reg) {
case VCPU_EXREG_PDPTR:
- BUG_ON(!npt_enabled);
- load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu));
+ /*
+ * When !npt_enabled, mmu->pdptrs[] is already available since
+ * it is always updated per SDM when moving to CRs.
+ */
+ if (npt_enabled)
+ load_pdptrs(vcpu, kvm_read_cr3(vcpu));
break;
default:
KVM_BUG_ON(1, vcpu->kvm);
@@ -1786,10 +1563,29 @@ static void svm_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
vmcb_mark_dirty(svm->vmcb, VMCB_DT);
}
+static void svm_post_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
+{
+ struct vcpu_svm *svm = to_svm(vcpu);
+
+ /*
+ * For guests that don't set guest_state_protected, the cr3 update is
+ * handled via kvm_mmu_load() while entering the guest. For guests
+ * that do (SEV-ES/SEV-SNP), the cr3 update needs to be written to
+ * VMCB save area now, since the save area will become the initial
+ * contents of the VMSA, and future VMCB save area updates won't be
+ * seen.
+ */
+ if (sev_es_guest(vcpu->kvm)) {
+ svm->vmcb->save.cr3 = cr3;
+ vmcb_mark_dirty(svm->vmcb, VMCB_CR);
+ }
+}
+
void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
{
struct vcpu_svm *svm = to_svm(vcpu);
u64 hcr0 = cr0;
+ bool old_paging = is_paging(vcpu);
#ifdef CONFIG_X86_64
if (vcpu->arch.efer & EFER_LME && !vcpu->arch.guest_state_protected) {
@@ -1806,8 +1602,11 @@ void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
#endif
vcpu->arch.cr0 = cr0;
- if (!npt_enabled)
+ if (!npt_enabled) {
hcr0 |= X86_CR0_PG | X86_CR0_WP;
+ if (old_paging != is_paging(vcpu))
+ svm_set_cr4(vcpu, kvm_read_cr4(vcpu));
+ }
/*
* re-enable caching here because the QEMU bios
@@ -1851,8 +1650,12 @@ void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
svm_flush_tlb(vcpu);
vcpu->arch.cr4 = cr4;
- if (!npt_enabled)
+ if (!npt_enabled) {
cr4 |= X86_CR4_PAE;
+
+ if (!is_paging(vcpu))
+ cr4 &= ~(X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE);
+ }
cr4 |= host_cr4_mce;
to_svm(vcpu)->vmcb->save.cr4 = cr4;
vmcb_mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
@@ -2301,10 +2104,6 @@ static int gp_interception(struct kvm_vcpu *vcpu)
if (error_code)
goto reinject;
- /* All SVM instructions expect page aligned RAX */
- if (svm->vmcb->save.rax & ~PAGE_MASK)
- goto reinject;
-
/* Decode the instruction for usage later */
if (x86_decode_emulated_instruction(vcpu, 0, NULL, 0) != EMULATION_OK)
goto reinject;
@@ -2322,8 +2121,13 @@ static int gp_interception(struct kvm_vcpu *vcpu)
if (!is_guest_mode(vcpu))
return kvm_emulate_instruction(vcpu,
EMULTYPE_VMWARE_GP | EMULTYPE_NO_DECODE);
- } else
+ } else {
+ /* All SVM instructions expect page aligned RAX */
+ if (svm->vmcb->save.rax & ~PAGE_MASK)
+ goto reinject;
+
return emulate_svm_instr(vcpu, opcode);
+ }
reinject:
kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
@@ -2517,7 +2321,7 @@ static bool check_selective_cr0_intercepted(struct kvm_vcpu *vcpu,
bool ret = false;
if (!is_guest_mode(vcpu) ||
- (!(vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_SELECTIVE_CR0))))
+ (!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_SELECTIVE_CR0))))
return false;
cr0 &= ~SVM_CR0_SELECTIVE_MASK;
@@ -3495,6 +3299,55 @@ static void svm_set_irq(struct kvm_vcpu *vcpu)
SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR;
}
+void svm_complete_interrupt_delivery(struct kvm_vcpu *vcpu, int delivery_mode,
+ int trig_mode, int vector)
+{
+ /*
+ * vcpu->arch.apicv_active must be read after vcpu->mode.
+ * Pairs with smp_store_release in vcpu_enter_guest.
+ */
+ bool in_guest_mode = (smp_load_acquire(&vcpu->mode) == IN_GUEST_MODE);
+
+ if (!READ_ONCE(vcpu->arch.apicv_active)) {
+ /* Process the interrupt via inject_pending_event */
+ kvm_make_request(KVM_REQ_EVENT, vcpu);
+ kvm_vcpu_kick(vcpu);
+ return;
+ }
+
+ trace_kvm_apicv_accept_irq(vcpu->vcpu_id, delivery_mode, trig_mode, vector);
+ if (in_guest_mode) {
+ /*
+ * Signal the doorbell to tell hardware to inject the IRQ. If
+ * the vCPU exits the guest before the doorbell chimes, hardware
+ * will automatically process AVIC interrupts at the next VMRUN.
+ */
+ avic_ring_doorbell(vcpu);
+ } else {
+ /*
+ * Wake the vCPU if it was blocking. KVM will then detect the
+ * pending IRQ when checking if the vCPU has a wake event.
+ */
+ kvm_vcpu_wake_up(vcpu);
+ }
+}
+
+static void svm_deliver_interrupt(struct kvm_lapic *apic, int delivery_mode,
+ int trig_mode, int vector)
+{
+ kvm_lapic_set_irr(vector, apic);
+
+ /*
+ * Pairs with the smp_mb_*() after setting vcpu->guest_mode in
+ * vcpu_enter_guest() to ensure the write to the vIRR is ordered before
+ * the read of guest_mode. This guarantees that either VMRUN will see
+ * and process the new vIRR entry, or that svm_complete_interrupt_delivery
+ * will signal the doorbell if the CPU has already entered the guest.
+ */
+ smp_mb__after_atomic();
+ svm_complete_interrupt_delivery(apic->vcpu, delivery_mode, trig_mode, vector);
+}
+
static void svm_update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
{
struct vcpu_svm *svm = to_svm(vcpu);
@@ -3542,11 +3395,13 @@ static int svm_nmi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
if (svm->nested.nested_run_pending)
return -EBUSY;
+ if (svm_nmi_blocked(vcpu))
+ return 0;
+
/* An NMI must not be injected into L2 if it's supposed to VM-Exit. */
if (for_injection && is_guest_mode(vcpu) && nested_exit_on_nmi(svm))
return -EBUSY;
-
- return !svm_nmi_blocked(vcpu);
+ return 1;
}
static bool svm_get_nmi_mask(struct kvm_vcpu *vcpu)
@@ -3598,9 +3453,13 @@ bool svm_interrupt_blocked(struct kvm_vcpu *vcpu)
static int svm_interrupt_allowed(struct kvm_vcpu *vcpu, bool for_injection)
{
struct vcpu_svm *svm = to_svm(vcpu);
+
if (svm->nested.nested_run_pending)
return -EBUSY;
+ if (svm_interrupt_blocked(vcpu))
+ return 0;
+
/*
* An IRQ must not be injected into L2 if it's supposed to VM-Exit,
* e.g. if the IRQ arrived asynchronously after checking nested events.
@@ -3608,7 +3467,7 @@ static int svm_interrupt_allowed(struct kvm_vcpu *vcpu, bool for_injection)
if (for_injection && is_guest_mode(vcpu) && nested_exit_on_intr(svm))
return -EBUSY;
- return !svm_interrupt_blocked(vcpu);
+ return 1;
}
static void svm_enable_irq_window(struct kvm_vcpu *vcpu)
@@ -3800,6 +3659,11 @@ static void svm_cancel_injection(struct kvm_vcpu *vcpu)
svm_complete_interrupts(vcpu);
}
+static int svm_vcpu_pre_run(struct kvm_vcpu *vcpu)
+{
+ return 1;
+}
+
static fastpath_t svm_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
{
if (to_svm(vcpu)->vmcb->control.exit_code == SVM_EXIT_MSR &&
@@ -3814,7 +3678,7 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu)
struct vcpu_svm *svm = to_svm(vcpu);
unsigned long vmcb_pa = svm->current_vmcb->pa;
- kvm_guest_enter_irqoff();
+ guest_state_enter_irqoff();
if (sev_es_guest(vcpu->kvm)) {
__svm_sev_es_vcpu_run(vmcb_pa);
@@ -3834,7 +3698,7 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu)
vmload(__sme_page_pa(sd->save_area));
}
- kvm_guest_exit_irqoff();
+ guest_state_exit_irqoff();
}
static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
@@ -3931,9 +3795,10 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
}
+ vcpu->arch.regs_dirty = 0;
if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
- kvm_before_interrupt(vcpu);
+ kvm_before_interrupt(vcpu, KVM_HANDLING_NMI);
kvm_load_host_xsave_state(vcpu);
stgi();
@@ -3965,8 +3830,7 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
vcpu->arch.apf.host_apf_flags =
kvm_read_and_reset_apf_flags();
- if (npt_enabled)
- kvm_register_clear_available(vcpu, VCPU_EXREG_PDPTR);
+ vcpu->arch.regs_avail &= ~SVM_REGS_LAZY_LOAD_SET;
/*
* We need to handle MC intercepts here before the vcpu has a chance to
@@ -3996,9 +3860,6 @@ static void svm_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa,
hv_track_root_tdp(vcpu, root_hpa);
- /* Loading L2's CR3 is handled by enter_svm_guest_mode. */
- if (!test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail))
- return;
cr3 = vcpu->arch.cr3;
} else if (vcpu->arch.mmu->shadow_root_level >= PT64_ROOT_4LEVEL) {
cr3 = __sme_set(root_hpa) | kvm_get_active_pcid(vcpu);
@@ -4217,7 +4078,7 @@ static int svm_check_intercept(struct kvm_vcpu *vcpu,
info->intercept == x86_intercept_clts)
break;
- if (!(vmcb_is_intercept(&svm->nested.ctl,
+ if (!(vmcb12_is_intercept(&svm->nested.ctl,
INTERCEPT_SELECTIVE_CR0)))
break;
@@ -4337,11 +4198,14 @@ static int svm_smi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
if (svm->nested.nested_run_pending)
return -EBUSY;
+ if (svm_smi_blocked(vcpu))
+ return 0;
+
/* An SMI must not be injected into L2 if it's supposed to VM-Exit. */
if (for_injection && is_guest_mode(vcpu) && nested_exit_on_smi(svm))
return -EBUSY;
- return !svm_smi_blocked(vcpu);
+ return 1;
}
static int svm_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
@@ -4435,10 +4299,18 @@ static int svm_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
* Enter the nested guest now
*/
+ vmcb_mark_all_dirty(svm->vmcb01.ptr);
+
vmcb12 = map.hva;
- nested_load_control_from_vmcb12(svm, &vmcb12->control);
+ nested_copy_vmcb_control_to_cache(svm, &vmcb12->control);
+ nested_copy_vmcb_save_to_cache(svm, &vmcb12->save);
ret = enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12, false);
+ if (ret)
+ goto unmap_save;
+
+ svm->nested.nested_run_pending = 1;
+
unmap_save:
kvm_vcpu_unmap(vcpu, &map_save, true);
unmap_map:
@@ -4459,79 +4331,140 @@ static void svm_enable_smi_window(struct kvm_vcpu *vcpu)
}
}
-static bool svm_can_emulate_instruction(struct kvm_vcpu *vcpu, void *insn, int insn_len)
+static bool svm_can_emulate_instruction(struct kvm_vcpu *vcpu, int emul_type,
+ void *insn, int insn_len)
{
bool smep, smap, is_user;
unsigned long cr4;
+ u64 error_code;
+
+ /* Emulation is always possible when KVM has access to all guest state. */
+ if (!sev_guest(vcpu->kvm))
+ return true;
+
+ /* #UD and #GP should never be intercepted for SEV guests. */
+ WARN_ON_ONCE(emul_type & (EMULTYPE_TRAP_UD |
+ EMULTYPE_TRAP_UD_FORCED |
+ EMULTYPE_VMWARE_GP));
/*
- * When the guest is an SEV-ES guest, emulation is not possible.
+ * Emulation is impossible for SEV-ES guests as KVM doesn't have access
+ * to guest register state.
*/
if (sev_es_guest(vcpu->kvm))
return false;
/*
+ * Emulation is possible if the instruction is already decoded, e.g.
+ * when completing I/O after returning from userspace.
+ */
+ if (emul_type & EMULTYPE_NO_DECODE)
+ return true;
+
+ /*
+ * Emulation is possible for SEV guests if and only if a prefilled
+ * buffer containing the bytes of the intercepted instruction is
+ * available. SEV guest memory is encrypted with a guest specific key
+ * and cannot be decrypted by KVM, i.e. KVM would read cyphertext and
+ * decode garbage.
+ *
+ * Inject #UD if KVM reached this point without an instruction buffer.
+ * In practice, this path should never be hit by a well-behaved guest,
+ * e.g. KVM doesn't intercept #UD or #GP for SEV guests, but this path
+ * is still theoretically reachable, e.g. via unaccelerated fault-like
+ * AVIC access, and needs to be handled by KVM to avoid putting the
+ * guest into an infinite loop. Injecting #UD is somewhat arbitrary,
+ * but its the least awful option given lack of insight into the guest.
+ */
+ if (unlikely(!insn)) {
+ kvm_queue_exception(vcpu, UD_VECTOR);
+ return false;
+ }
+
+ /*
+ * Emulate for SEV guests if the insn buffer is not empty. The buffer
+ * will be empty if the DecodeAssist microcode cannot fetch bytes for
+ * the faulting instruction because the code fetch itself faulted, e.g.
+ * the guest attempted to fetch from emulated MMIO or a guest page
+ * table used to translate CS:RIP resides in emulated MMIO.
+ */
+ if (likely(insn_len))
+ return true;
+
+ /*
* Detect and workaround Errata 1096 Fam_17h_00_0Fh.
*
* Errata:
- * When CPU raise #NPF on guest data access and vCPU CR4.SMAP=1, it is
- * possible that CPU microcode implementing DecodeAssist will fail
- * to read bytes of instruction which caused #NPF. In this case,
- * GuestIntrBytes field of the VMCB on a VMEXIT will incorrectly
- * return 0 instead of the correct guest instruction bytes.
+ * When CPU raises #NPF on guest data access and vCPU CR4.SMAP=1, it is
+ * possible that CPU microcode implementing DecodeAssist will fail to
+ * read guest memory at CS:RIP and vmcb.GuestIntrBytes will incorrectly
+ * be '0'. This happens because microcode reads CS:RIP using a _data_
+ * loap uop with CPL=0 privileges. If the load hits a SMAP #PF, ucode
+ * gives up and does not fill the instruction bytes buffer.
*
- * This happens because CPU microcode reading instruction bytes
- * uses a special opcode which attempts to read data using CPL=0
- * privileges. The microcode reads CS:RIP and if it hits a SMAP
- * fault, it gives up and returns no instruction bytes.
+ * As above, KVM reaches this point iff the VM is an SEV guest, the CPU
+ * supports DecodeAssist, a #NPF was raised, KVM's page fault handler
+ * triggered emulation (e.g. for MMIO), and the CPU returned 0 in the
+ * GuestIntrBytes field of the VMCB.
*
- * Detection:
- * We reach here in case CPU supports DecodeAssist, raised #NPF and
- * returned 0 in GuestIntrBytes field of the VMCB.
- * First, errata can only be triggered in case vCPU CR4.SMAP=1.
- * Second, if vCPU CR4.SMEP=1, errata could only be triggered
- * in case vCPU CPL==3 (Because otherwise guest would have triggered
- * a SMEP fault instead of #NPF).
- * Otherwise, vCPU CR4.SMEP=0, errata could be triggered by any vCPU CPL.
- * As most guests enable SMAP if they have also enabled SMEP, use above
- * logic in order to attempt minimize false-positive of detecting errata
- * while still preserving all cases semantic correctness.
+ * This does _not_ mean that the erratum has been encountered, as the
+ * DecodeAssist will also fail if the load for CS:RIP hits a legitimate
+ * #PF, e.g. if the guest attempt to execute from emulated MMIO and
+ * encountered a reserved/not-present #PF.
*
- * Workaround:
- * To determine what instruction the guest was executing, the hypervisor
- * will have to decode the instruction at the instruction pointer.
+ * To hit the erratum, the following conditions must be true:
+ * 1. CR4.SMAP=1 (obviously).
+ * 2. CR4.SMEP=0 || CPL=3. If SMEP=1 and CPL<3, the erratum cannot
+ * have been hit as the guest would have encountered a SMEP
+ * violation #PF, not a #NPF.
+ * 3. The #NPF is not due to a code fetch, in which case failure to
+ * retrieve the instruction bytes is legitimate (see abvoe).
*
- * In non SEV guest, hypervisor will be able to read the guest
- * memory to decode the instruction pointer when insn_len is zero
- * so we return true to indicate that decoding is possible.
- *
- * But in the SEV guest, the guest memory is encrypted with the
- * guest specific key and hypervisor will not be able to decode the
- * instruction pointer so we will not able to workaround it. Lets
- * print the error and request to kill the guest.
+ * In addition, don't apply the erratum workaround if the #NPF occurred
+ * while translating guest page tables (see below).
*/
- if (likely(!insn || insn_len))
- return true;
-
- /*
- * If RIP is invalid, go ahead with emulation which will cause an
- * internal error exit.
- */
- if (!kvm_vcpu_gfn_to_memslot(vcpu, kvm_rip_read(vcpu) >> PAGE_SHIFT))
- return true;
+ error_code = to_svm(vcpu)->vmcb->control.exit_info_1;
+ if (error_code & (PFERR_GUEST_PAGE_MASK | PFERR_FETCH_MASK))
+ goto resume_guest;
cr4 = kvm_read_cr4(vcpu);
smep = cr4 & X86_CR4_SMEP;
smap = cr4 & X86_CR4_SMAP;
is_user = svm_get_cpl(vcpu) == 3;
if (smap && (!smep || is_user)) {
- if (!sev_guest(vcpu->kvm))
- return true;
-
pr_err_ratelimited("KVM: SEV Guest triggered AMD Erratum 1096\n");
- kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
+
+ /*
+ * If the fault occurred in userspace, arbitrarily inject #GP
+ * to avoid killing the guest and to hopefully avoid confusing
+ * the guest kernel too much, e.g. injecting #PF would not be
+ * coherent with respect to the guest's page tables. Request
+ * triple fault if the fault occurred in the kernel as there's
+ * no fault that KVM can inject without confusing the guest.
+ * In practice, the triple fault is moot as no sane SEV kernel
+ * will execute from user memory while also running with SMAP=1.
+ */
+ if (is_user)
+ kvm_inject_gp(vcpu, 0);
+ else
+ kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
}
+resume_guest:
+ /*
+ * If the erratum was not hit, simply resume the guest and let it fault
+ * again. While awful, e.g. the vCPU may get stuck in an infinite loop
+ * if the fault is at CPL=0, it's the lesser of all evils. Exiting to
+ * userspace will kill the guest, and letting the emulator read garbage
+ * will yield random behavior and potentially corrupt the guest.
+ *
+ * Simply resuming the guest is technically not a violation of the SEV
+ * architecture. AMD's APM states that all code fetches and page table
+ * accesses for SEV guest are encrypted, regardless of the C-Bit. The
+ * APM also states that encrypted accesses to MMIO are "ignored", but
+ * doesn't explicitly define "ignored", i.e. doing nothing and letting
+ * the guest spin is technically "ignoring" the access.
+ */
return false;
}
@@ -4598,8 +4531,8 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
.prepare_guest_switch = svm_prepare_guest_switch,
.vcpu_load = svm_vcpu_load,
.vcpu_put = svm_vcpu_put,
- .vcpu_blocking = svm_vcpu_blocking,
- .vcpu_unblocking = svm_vcpu_unblocking,
+ .vcpu_blocking = avic_vcpu_blocking,
+ .vcpu_unblocking = avic_vcpu_unblocking,
.update_exception_bitmap = svm_update_exception_bitmap,
.get_msr_feature = svm_get_msr_feature,
@@ -4611,6 +4544,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
.get_cpl = svm_get_cpl,
.get_cs_db_l_bits = kvm_get_cs_db_l_bits,
.set_cr0 = svm_set_cr0,
+ .post_set_cr3 = svm_post_set_cr3,
.is_valid_cr4 = svm_is_valid_cr4,
.set_cr4 = svm_set_cr4,
.set_efer = svm_set_efer,
@@ -4630,6 +4564,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
.tlb_flush_gva = svm_flush_tlb_gva,
.tlb_flush_guest = svm_flush_tlb,
+ .vcpu_pre_run = svm_vcpu_pre_run,
.run = svm_vcpu_run,
.handle_exit = handle_exit,
.skip_emulated_instruction = skip_emulated_instruction,
@@ -4683,7 +4618,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
.pmu_ops = &amd_pmu_ops,
.nested_ops = &svm_nested_ops,
- .deliver_posted_interrupt = svm_deliver_avic_intr,
+ .deliver_interrupt = svm_deliver_interrupt,
.dy_apicv_has_pending_interrupt = svm_dy_apicv_has_pending_interrupt,
.update_pi_irte = svm_update_pi_irte,
.setup_mce = svm_setup_mce,
@@ -4710,6 +4645,244 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
.vcpu_deliver_sipi_vector = svm_vcpu_deliver_sipi_vector,
};
+/*
+ * The default MMIO mask is a single bit (excluding the present bit),
+ * which could conflict with the memory encryption bit. Check for
+ * memory encryption support and override the default MMIO mask if
+ * memory encryption is enabled.
+ */
+static __init void svm_adjust_mmio_mask(void)
+{
+ unsigned int enc_bit, mask_bit;
+ u64 msr, mask;
+
+ /* If there is no memory encryption support, use existing mask */
+ if (cpuid_eax(0x80000000) < 0x8000001f)
+ return;
+
+ /* If memory encryption is not enabled, use existing mask */
+ rdmsrl(MSR_AMD64_SYSCFG, msr);
+ if (!(msr & MSR_AMD64_SYSCFG_MEM_ENCRYPT))
+ return;
+
+ enc_bit = cpuid_ebx(0x8000001f) & 0x3f;
+ mask_bit = boot_cpu_data.x86_phys_bits;
+
+ /* Increment the mask bit if it is the same as the encryption bit */
+ if (enc_bit == mask_bit)
+ mask_bit++;
+
+ /*
+ * If the mask bit location is below 52, then some bits above the
+ * physical addressing limit will always be reserved, so use the
+ * rsvd_bits() function to generate the mask. This mask, along with
+ * the present bit, will be used to generate a page fault with
+ * PFER.RSV = 1.
+ *
+ * If the mask bit location is 52 (or above), then clear the mask.
+ */
+ mask = (mask_bit < 52) ? rsvd_bits(mask_bit, 51) | PT_PRESENT_MASK : 0;
+
+ kvm_mmu_set_mmio_spte_mask(mask, mask, PT_WRITABLE_MASK | PT_USER_MASK);
+}
+
+static __init void svm_set_cpu_caps(void)
+{
+ kvm_set_cpu_caps();
+
+ supported_xss = 0;
+
+ /* CPUID 0x80000001 and 0x8000000A (SVM features) */
+ if (nested) {
+ kvm_cpu_cap_set(X86_FEATURE_SVM);
+ kvm_cpu_cap_set(X86_FEATURE_VMCBCLEAN);
+
+ if (nrips)
+ kvm_cpu_cap_set(X86_FEATURE_NRIPS);
+
+ if (npt_enabled)
+ kvm_cpu_cap_set(X86_FEATURE_NPT);
+
+ if (tsc_scaling)
+ kvm_cpu_cap_set(X86_FEATURE_TSCRATEMSR);
+
+ /* Nested VM can receive #VMEXIT instead of triggering #GP */
+ kvm_cpu_cap_set(X86_FEATURE_SVME_ADDR_CHK);
+ }
+
+ /* CPUID 0x80000008 */
+ if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD) ||
+ boot_cpu_has(X86_FEATURE_AMD_SSBD))
+ kvm_cpu_cap_set(X86_FEATURE_VIRT_SSBD);
+
+ /* AMD PMU PERFCTR_CORE CPUID */
+ if (enable_pmu && boot_cpu_has(X86_FEATURE_PERFCTR_CORE))
+ kvm_cpu_cap_set(X86_FEATURE_PERFCTR_CORE);
+
+ /* CPUID 0x8000001F (SME/SEV features) */
+ sev_set_cpu_caps();
+}
+
+static __init int svm_hardware_setup(void)
+{
+ int cpu;
+ struct page *iopm_pages;
+ void *iopm_va;
+ int r;
+ unsigned int order = get_order(IOPM_SIZE);
+
+ /*
+ * NX is required for shadow paging and for NPT if the NX huge pages
+ * mitigation is enabled.
+ */
+ if (!boot_cpu_has(X86_FEATURE_NX)) {
+ pr_err_ratelimited("NX (Execute Disable) not supported\n");
+ return -EOPNOTSUPP;
+ }
+ kvm_enable_efer_bits(EFER_NX);
+
+ iopm_pages = alloc_pages(GFP_KERNEL, order);
+
+ if (!iopm_pages)
+ return -ENOMEM;
+
+ iopm_va = page_address(iopm_pages);
+ memset(iopm_va, 0xff, PAGE_SIZE * (1 << order));
+ iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT;
+
+ init_msrpm_offsets();
+
+ supported_xcr0 &= ~(XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR);
+
+ if (boot_cpu_has(X86_FEATURE_FXSR_OPT))
+ kvm_enable_efer_bits(EFER_FFXSR);
+
+ if (tsc_scaling) {
+ if (!boot_cpu_has(X86_FEATURE_TSCRATEMSR)) {
+ tsc_scaling = false;
+ } else {
+ pr_info("TSC scaling supported\n");
+ kvm_has_tsc_control = true;
+ kvm_max_tsc_scaling_ratio = TSC_RATIO_MAX;
+ kvm_tsc_scaling_ratio_frac_bits = 32;
+ }
+ }
+
+ tsc_aux_uret_slot = kvm_add_user_return_msr(MSR_TSC_AUX);
+
+ /* Check for pause filtering support */
+ if (!boot_cpu_has(X86_FEATURE_PAUSEFILTER)) {
+ pause_filter_count = 0;
+ pause_filter_thresh = 0;
+ } else if (!boot_cpu_has(X86_FEATURE_PFTHRESHOLD)) {
+ pause_filter_thresh = 0;
+ }
+
+ if (nested) {
+ printk(KERN_INFO "kvm: Nested Virtualization enabled\n");
+ kvm_enable_efer_bits(EFER_SVME | EFER_LMSLE);
+ }
+
+ /*
+ * KVM's MMU doesn't support using 2-level paging for itself, and thus
+ * NPT isn't supported if the host is using 2-level paging since host
+ * CR4 is unchanged on VMRUN.
+ */
+ if (!IS_ENABLED(CONFIG_X86_64) && !IS_ENABLED(CONFIG_X86_PAE))
+ npt_enabled = false;
+
+ if (!boot_cpu_has(X86_FEATURE_NPT))
+ npt_enabled = false;
+
+ /* Force VM NPT level equal to the host's paging level */
+ kvm_configure_mmu(npt_enabled, get_npt_level(),
+ get_npt_level(), PG_LEVEL_1G);
+ pr_info("kvm: Nested Paging %sabled\n", npt_enabled ? "en" : "dis");
+
+ /* Note, SEV setup consumes npt_enabled. */
+ sev_hardware_setup();
+
+ svm_hv_hardware_setup();
+
+ svm_adjust_mmio_mask();
+
+ for_each_possible_cpu(cpu) {
+ r = svm_cpu_init(cpu);
+ if (r)
+ goto err;
+ }
+
+ if (nrips) {
+ if (!boot_cpu_has(X86_FEATURE_NRIPS))
+ nrips = false;
+ }
+
+ enable_apicv = avic = avic && npt_enabled && boot_cpu_has(X86_FEATURE_AVIC);
+
+ if (enable_apicv) {
+ pr_info("AVIC enabled\n");
+
+ amd_iommu_register_ga_log_notifier(&avic_ga_log_notifier);
+ } else {
+ svm_x86_ops.vcpu_blocking = NULL;
+ svm_x86_ops.vcpu_unblocking = NULL;
+ }
+
+ if (vls) {
+ if (!npt_enabled ||
+ !boot_cpu_has(X86_FEATURE_V_VMSAVE_VMLOAD) ||
+ !IS_ENABLED(CONFIG_X86_64)) {
+ vls = false;
+ } else {
+ pr_info("Virtual VMLOAD VMSAVE supported\n");
+ }
+ }
+
+ if (boot_cpu_has(X86_FEATURE_SVME_ADDR_CHK))
+ svm_gp_erratum_intercept = false;
+
+ if (vgif) {
+ if (!boot_cpu_has(X86_FEATURE_VGIF))
+ vgif = false;
+ else
+ pr_info("Virtual GIF supported\n");
+ }
+
+ if (lbrv) {
+ if (!boot_cpu_has(X86_FEATURE_LBRV))
+ lbrv = false;
+ else
+ pr_info("LBR virtualization supported\n");
+ }
+
+ if (!enable_pmu)
+ pr_info("PMU virtualization is disabled\n");
+
+ svm_set_cpu_caps();
+
+ /*
+ * It seems that on AMD processors PTE's accessed bit is
+ * being set by the CPU hardware before the NPF vmexit.
+ * This is not expected behaviour and our tests fail because
+ * of it.
+ * A workaround here is to disable support for
+ * GUEST_MAXPHYADDR < HOST_MAXPHYADDR if NPT is enabled.
+ * In this case userspace can know if there is support using
+ * KVM_CAP_SMALLER_MAXPHYADDR extension and decide how to handle
+ * it
+ * If future AMD CPU models change the behaviour described above,
+ * this variable can be changed accordingly
+ */
+ allow_smaller_maxphyaddr = !npt_enabled;
+
+ return 0;
+
+err:
+ svm_hardware_teardown();
+ return r;
+}
+
+
static struct kvm_x86_init_ops svm_init_ops __initdata = {
.cpu_has_kvm_support = has_svm,
.disabled_by_bios = is_disabled,
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index 1c7306c370fa..fa98d6844728 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -105,6 +105,40 @@ struct kvm_vmcb_info {
uint64_t asid_generation;
};
+struct vmcb_save_area_cached {
+ u64 efer;
+ u64 cr4;
+ u64 cr3;
+ u64 cr0;
+ u64 dr7;
+ u64 dr6;
+};
+
+struct vmcb_ctrl_area_cached {
+ u32 intercepts[MAX_INTERCEPT];
+ u16 pause_filter_thresh;
+ u16 pause_filter_count;
+ u64 iopm_base_pa;
+ u64 msrpm_base_pa;
+ u64 tsc_offset;
+ u32 asid;
+ u8 tlb_ctl;
+ u32 int_ctl;
+ u32 int_vector;
+ u32 int_state;
+ u32 exit_code;
+ u32 exit_code_hi;
+ u64 exit_info_1;
+ u64 exit_info_2;
+ u32 exit_int_info;
+ u32 exit_int_info_err;
+ u64 nested_ctl;
+ u32 event_inj;
+ u32 event_inj_err;
+ u64 nested_cr3;
+ u64 virt_ext;
+};
+
struct svm_nested_state {
struct kvm_vmcb_info vmcb02;
u64 hsave_msr;
@@ -120,7 +154,13 @@ struct svm_nested_state {
bool nested_run_pending;
/* cache for control fields of the guest */
- struct vmcb_control_area ctl;
+ struct vmcb_ctrl_area_cached ctl;
+
+ /*
+ * Note: this struct is not kept up-to-date while L2 runs; it is only
+ * valid within nested_svm_vmrun.
+ */
+ struct vmcb_save_area_cached save;
bool initialized;
};
@@ -185,7 +225,6 @@ struct vcpu_svm {
u32 dfr_reg;
struct page *avic_backing_page;
u64 *avic_physical_id_cache;
- bool avic_is_running;
/*
* Per-vcpu list of struct amd_svm_iommu_ir:
@@ -265,11 +304,6 @@ static inline void vmcb_mark_all_clean(struct vmcb *vmcb)
& ~VMCB_ALWAYS_DIRTY_MASK;
}
-static inline bool vmcb_is_clean(struct vmcb *vmcb, int bit)
-{
- return (vmcb->control.clean & (1 << bit));
-}
-
static inline void vmcb_mark_dirty(struct vmcb *vmcb, int bit)
{
vmcb->control.clean &= ~(1 << bit);
@@ -285,6 +319,16 @@ static __always_inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
return container_of(vcpu, struct vcpu_svm, vcpu);
}
+/*
+ * Only the PDPTRs are loaded on demand into the shadow MMU. All other
+ * fields are synchronized in handle_exit, because accessing the VMCB is cheap.
+ *
+ * CR3 might be out of date in the VMCB but it is not marked dirty; instead,
+ * KVM_REQ_LOAD_MMU_PGD is always requested when the cached vcpu->arch.cr3
+ * is changed. svm_load_mmu_pgd() then syncs the new CR3 value into the VMCB.
+ */
+#define SVM_REGS_LAZY_LOAD_SET (1 << VCPU_EXREG_PDPTR)
+
static inline void vmcb_set_intercept(struct vmcb_control_area *control, u32 bit)
{
WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
@@ -303,6 +347,12 @@ static inline bool vmcb_is_intercept(struct vmcb_control_area *control, u32 bit)
return test_bit(bit, (unsigned long *)&control->intercepts);
}
+static inline bool vmcb12_is_intercept(struct vmcb_ctrl_area_cached *control, u32 bit)
+{
+ WARN_ON_ONCE(bit >= 32 * MAX_INTERCEPT);
+ return test_bit(bit, (unsigned long *)&control->intercepts);
+}
+
static inline void set_dr_intercepts(struct vcpu_svm *svm)
{
struct vmcb *vmcb = svm->vmcb01.ptr;
@@ -439,6 +489,8 @@ void svm_set_gif(struct vcpu_svm *svm, bool value);
int svm_invoke_exit_handler(struct kvm_vcpu *vcpu, u64 exit_code);
void set_msr_interception(struct kvm_vcpu *vcpu, u32 *msrpm, u32 msr,
int read, int write);
+void svm_complete_interrupt_delivery(struct kvm_vcpu *vcpu, int delivery_mode,
+ int trig_mode, int vec);
/* nested.c */
@@ -455,22 +507,22 @@ static inline bool nested_svm_virtualize_tpr(struct kvm_vcpu *vcpu)
static inline bool nested_exit_on_smi(struct vcpu_svm *svm)
{
- return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_SMI);
+ return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_SMI);
}
static inline bool nested_exit_on_intr(struct vcpu_svm *svm)
{
- return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_INTR);
+ return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_INTR);
}
static inline bool nested_exit_on_nmi(struct vcpu_svm *svm)
{
- return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_NMI);
+ return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_NMI);
}
int enter_svm_guest_mode(struct kvm_vcpu *vcpu,
u64 vmcb_gpa, struct vmcb *vmcb12, bool from_vmrun);
-void svm_leave_nested(struct vcpu_svm *svm);
+void svm_leave_nested(struct kvm_vcpu *vcpu);
void svm_free_nested(struct vcpu_svm *svm);
int svm_allocate_nested(struct vcpu_svm *svm);
int nested_svm_vmrun(struct kvm_vcpu *vcpu);
@@ -494,8 +546,10 @@ int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
int nested_svm_exit_special(struct vcpu_svm *svm);
void nested_svm_update_tsc_ratio_msr(struct kvm_vcpu *vcpu);
void svm_write_tsc_multiplier(struct kvm_vcpu *vcpu, u64 multiplier);
-void nested_load_control_from_vmcb12(struct vcpu_svm *svm,
- struct vmcb_control_area *control);
+void nested_copy_vmcb_control_to_cache(struct vcpu_svm *svm,
+ struct vmcb_control_area *control);
+void nested_copy_vmcb_save_to_cache(struct vcpu_svm *svm,
+ struct vmcb_save_area *save);
void nested_sync_control_from_vmcb02(struct vcpu_svm *svm);
void nested_vmcb02_compute_g_pat(struct vcpu_svm *svm);
void svm_switch_vmcb(struct vcpu_svm *svm, struct kvm_vmcb_info *target_vmcb);
@@ -504,28 +558,6 @@ extern struct kvm_x86_nested_ops svm_nested_ops;
/* avic.c */
-#define AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK (0xFF)
-#define AVIC_LOGICAL_ID_ENTRY_VALID_BIT 31
-#define AVIC_LOGICAL_ID_ENTRY_VALID_MASK (1 << 31)
-
-#define AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK (0xFFULL)
-#define AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK (0xFFFFFFFFFFULL << 12)
-#define AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK (1ULL << 62)
-#define AVIC_PHYSICAL_ID_ENTRY_VALID_MASK (1ULL << 63)
-
-#define VMCB_AVIC_APIC_BAR_MASK 0xFFFFFFFFFF000ULL
-
-static inline bool avic_vcpu_is_running(struct kvm_vcpu *vcpu)
-{
- struct vcpu_svm *svm = to_svm(vcpu);
- u64 *entry = svm->avic_physical_id_cache;
-
- if (!entry)
- return false;
-
- return (READ_ONCE(*entry) & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK);
-}
-
int avic_ga_log_notifier(u32 ga_tag);
void avic_vm_destroy(struct kvm *kvm);
int avic_vm_init(struct kvm *kvm);
@@ -542,12 +574,12 @@ bool svm_check_apicv_inhibit_reasons(ulong bit);
void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
void svm_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr);
void svm_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr);
-int svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec);
bool svm_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu);
int svm_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
uint32_t guest_irq, bool set);
-void svm_vcpu_blocking(struct kvm_vcpu *vcpu);
-void svm_vcpu_unblocking(struct kvm_vcpu *vcpu);
+void avic_vcpu_blocking(struct kvm_vcpu *vcpu);
+void avic_vcpu_unblocking(struct kvm_vcpu *vcpu);
+void avic_ring_doorbell(struct kvm_vcpu *vcpu);
/* sev.c */
diff --git a/arch/x86/kvm/svm/svm_onhyperv.h b/arch/x86/kvm/svm/svm_onhyperv.h
index c53b8bf8d013..489ca56212c6 100644
--- a/arch/x86/kvm/svm/svm_onhyperv.h
+++ b/arch/x86/kvm/svm/svm_onhyperv.h
@@ -46,6 +46,9 @@ static inline void svm_hv_init_vmcb(struct vmcb *vmcb)
if (npt_enabled &&
ms_hyperv.nested_features & HV_X64_NESTED_ENLIGHTENED_TLB)
hve->hv_enlightenments_control.enlightened_npt_tlb = 1;
+
+ if (ms_hyperv.nested_features & HV_X64_NESTED_MSR_BITMAP)
+ hve->hv_enlightenments_control.msr_bitmap = 1;
}
static inline void svm_hv_hardware_setup(void)
@@ -83,14 +86,7 @@ static inline void svm_hv_vmcb_dirty_nested_enlightenments(
struct hv_enlightenments *hve =
(struct hv_enlightenments *)vmcb->control.reserved_sw;
- /*
- * vmcb can be NULL if called during early vcpu init.
- * And its okay not to mark vmcb dirty during vcpu init
- * as we mark it dirty unconditionally towards end of vcpu
- * init phase.
- */
- if (vmcb_is_clean(vmcb, VMCB_HV_NESTED_ENLIGHTENMENTS) &&
- hve->hv_enlightenments_control.msr_bitmap)
+ if (hve->hv_enlightenments_control.msr_bitmap)
vmcb_mark_dirty(vmcb, VMCB_HV_NESTED_ENLIGHTENMENTS);
}
diff --git a/arch/x86/kvm/svm/vmenter.S b/arch/x86/kvm/svm/vmenter.S
index 4fa17df123cd..dfaeb47fcf2a 100644
--- a/arch/x86/kvm/svm/vmenter.S
+++ b/arch/x86/kvm/svm/vmenter.S
@@ -148,7 +148,7 @@ SYM_FUNC_START(__svm_vcpu_run)
pop %edi
#endif
pop %_ASM_BP
- ret
+ RET
3: cmpb $0, kvm_rebooting
jne 2b
@@ -202,7 +202,7 @@ SYM_FUNC_START(__svm_sev_es_vcpu_run)
pop %edi
#endif
pop %_ASM_BP
- ret
+ RET
3: cmpb $0, kvm_rebooting
jne 2b
diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h
index 953b0fcb21ee..92e6f6702f00 100644
--- a/arch/x86/kvm/trace.h
+++ b/arch/x86/kvm/trace.h
@@ -1356,6 +1356,30 @@ TRACE_EVENT(kvm_apicv_update_request,
__entry->bit)
);
+TRACE_EVENT(kvm_apicv_accept_irq,
+ TP_PROTO(__u32 apicid, __u16 dm, __u16 tm, __u8 vec),
+ TP_ARGS(apicid, dm, tm, vec),
+
+ TP_STRUCT__entry(
+ __field( __u32, apicid )
+ __field( __u16, dm )
+ __field( __u16, tm )
+ __field( __u8, vec )
+ ),
+
+ TP_fast_assign(
+ __entry->apicid = apicid;
+ __entry->dm = dm;
+ __entry->tm = tm;
+ __entry->vec = vec;
+ ),
+
+ TP_printk("apicid %x vec %u (%s|%s)",
+ __entry->apicid, __entry->vec,
+ __print_symbolic((__entry->dm >> 8 & 0x7), kvm_deliver_mode),
+ __entry->tm ? "level" : "edge")
+);
+
/*
* Tracepoint for AMD AVIC
*/
diff --git a/arch/x86/kvm/vmx/capabilities.h b/arch/x86/kvm/vmx/capabilities.h
index 4705ad55abb5..3f430e218375 100644
--- a/arch/x86/kvm/vmx/capabilities.h
+++ b/arch/x86/kvm/vmx/capabilities.h
@@ -5,6 +5,7 @@
#include <asm/vmx.h>
#include "lapic.h"
+#include "x86.h"
extern bool __read_mostly enable_vpid;
extern bool __read_mostly flexpriority_enabled;
@@ -53,7 +54,6 @@ struct nested_vmx_msrs {
struct vmcs_config {
int size;
- int order;
u32 basic_cap;
u32 revision_id;
u32 pin_based_exec_ctrl;
@@ -312,6 +312,15 @@ static inline bool cpu_has_vmx_ept_1g_page(void)
return vmx_capability.ept & VMX_EPT_1GB_PAGE_BIT;
}
+static inline int ept_caps_to_lpage_level(u32 ept_caps)
+{
+ if (ept_caps & VMX_EPT_1GB_PAGE_BIT)
+ return PG_LEVEL_1G;
+ if (ept_caps & VMX_EPT_2MB_PAGE_BIT)
+ return PG_LEVEL_2M;
+ return PG_LEVEL_4K;
+}
+
static inline bool cpu_has_vmx_ept_ad_bits(void)
{
return vmx_capability.ept & VMX_EPT_AD_BIT;
@@ -380,6 +389,9 @@ static inline u64 vmx_get_perf_capabilities(void)
{
u64 perf_cap = 0;
+ if (!enable_pmu)
+ return perf_cap;
+
if (boot_cpu_has(X86_FEATURE_PDCM))
rdmsrl(MSR_IA32_PERF_CAPABILITIES, perf_cap);
diff --git a/arch/x86/kvm/vmx/evmcs.c b/arch/x86/kvm/vmx/evmcs.c
index ba6f99f584ac..87e3dc10edf4 100644
--- a/arch/x86/kvm/vmx/evmcs.c
+++ b/arch/x86/kvm/vmx/evmcs.c
@@ -12,8 +12,6 @@
DEFINE_STATIC_KEY_FALSE(enable_evmcs);
-#if IS_ENABLED(CONFIG_HYPERV)
-
#define EVMCS1_OFFSET(x) offsetof(struct hv_enlightened_vmcs, x)
#define EVMCS1_FIELD(number, name, clean_field)[ROL16(number, 6)] = \
{EVMCS1_OFFSET(name), clean_field}
@@ -296,6 +294,7 @@ const struct evmcs_field vmcs_field_to_evmcs_1[] = {
};
const unsigned int nr_evmcs_1_fields = ARRAY_SIZE(vmcs_field_to_evmcs_1);
+#if IS_ENABLED(CONFIG_HYPERV)
__init void evmcs_sanitize_exec_ctrls(struct vmcs_config *vmcs_conf)
{
vmcs_conf->pin_based_exec_ctrl &= ~EVMCS1_UNSUPPORTED_PINCTRL;
@@ -362,6 +361,7 @@ void nested_evmcs_filter_control_msr(u32 msr_index, u64 *pdata)
case MSR_IA32_VMX_PROCBASED_CTLS2:
ctl_high &= ~EVMCS1_UNSUPPORTED_2NDEXEC;
break;
+ case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
case MSR_IA32_VMX_PINBASED_CTLS:
ctl_high &= ~EVMCS1_UNSUPPORTED_PINCTRL;
break;
diff --git a/arch/x86/kvm/vmx/evmcs.h b/arch/x86/kvm/vmx/evmcs.h
index 16731d2cf231..8d70f9aea94b 100644
--- a/arch/x86/kvm/vmx/evmcs.h
+++ b/arch/x86/kvm/vmx/evmcs.h
@@ -59,12 +59,12 @@ DECLARE_STATIC_KEY_FALSE(enable_evmcs);
SECONDARY_EXEC_SHADOW_VMCS | \
SECONDARY_EXEC_TSC_SCALING | \
SECONDARY_EXEC_PAUSE_LOOP_EXITING)
-#define EVMCS1_UNSUPPORTED_VMEXIT_CTRL (VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL)
+#define EVMCS1_UNSUPPORTED_VMEXIT_CTRL \
+ (VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | \
+ VM_EXIT_SAVE_VMX_PREEMPTION_TIMER)
#define EVMCS1_UNSUPPORTED_VMENTRY_CTRL (VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL)
#define EVMCS1_UNSUPPORTED_VMFUNC (VMX_VMFUNC_EPTP_SWITCHING)
-#if IS_ENABLED(CONFIG_HYPERV)
-
struct evmcs_field {
u16 offset;
u16 clean_field;
@@ -73,26 +73,56 @@ struct evmcs_field {
extern const struct evmcs_field vmcs_field_to_evmcs_1[];
extern const unsigned int nr_evmcs_1_fields;
-static __always_inline int get_evmcs_offset(unsigned long field,
- u16 *clean_field)
+static __always_inline int evmcs_field_offset(unsigned long field,
+ u16 *clean_field)
{
unsigned int index = ROL16(field, 6);
const struct evmcs_field *evmcs_field;
- if (unlikely(index >= nr_evmcs_1_fields)) {
- WARN_ONCE(1, "KVM: accessing unsupported EVMCS field %lx\n",
- field);
+ if (unlikely(index >= nr_evmcs_1_fields))
return -ENOENT;
- }
evmcs_field = &vmcs_field_to_evmcs_1[index];
+ /*
+ * Use offset=0 to detect holes in eVMCS. This offset belongs to
+ * 'revision_id' but this field has no encoding and is supposed to
+ * be accessed directly.
+ */
+ if (unlikely(!evmcs_field->offset))
+ return -ENOENT;
+
if (clean_field)
*clean_field = evmcs_field->clean_field;
return evmcs_field->offset;
}
+static inline u64 evmcs_read_any(struct hv_enlightened_vmcs *evmcs,
+ unsigned long field, u16 offset)
+{
+ /*
+ * vmcs12_read_any() doesn't care whether the supplied structure
+ * is 'struct vmcs12' or 'struct hv_enlightened_vmcs' as it takes
+ * the exact offset of the required field, use it for convenience
+ * here.
+ */
+ return vmcs12_read_any((void *)evmcs, field, offset);
+}
+
+#if IS_ENABLED(CONFIG_HYPERV)
+
+static __always_inline int get_evmcs_offset(unsigned long field,
+ u16 *clean_field)
+{
+ int offset = evmcs_field_offset(field, clean_field);
+
+ WARN_ONCE(offset < 0, "KVM: accessing unsupported EVMCS field %lx\n",
+ field);
+
+ return offset;
+}
+
static __always_inline void evmcs_write64(unsigned long field, u64 value)
{
u16 clean_field;
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index 9c941535f78c..ba34e94049c7 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -7,6 +7,7 @@
#include <asm/mmu_context.h>
#include "cpuid.h"
+#include "evmcs.h"
#include "hyperv.h"
#include "mmu.h"
#include "nested.h"
@@ -245,7 +246,8 @@ static void vmx_sync_vmcs_host_state(struct vcpu_vmx *vmx,
src = &prev->host_state;
dest = &vmx->loaded_vmcs->host_state;
- vmx_set_host_fs_gs(dest, src->fs_sel, src->gs_sel, src->fs_base, src->gs_base);
+ vmx_set_vmcs_host_state(dest, src->cr3, src->fs_sel, src->gs_sel,
+ src->fs_base, src->gs_base);
dest->ldt_sel = src->ldt_sel;
#ifdef CONFIG_X86_64
dest->ds_sel = src->ds_sel;
@@ -269,7 +271,13 @@ static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs)
vmx_sync_vmcs_host_state(vmx, prev);
put_cpu();
- vmx_register_cache_reset(vcpu);
+ vcpu->arch.regs_avail = ~VMX_REGS_LAZY_LOAD_SET;
+
+ /*
+ * All lazily updated registers will be reloaded from VMCS12 on both
+ * vmentry and vmexit.
+ */
+ vcpu->arch.regs_dirty = 0;
}
/*
@@ -391,9 +399,11 @@ static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu,
static void nested_ept_new_eptp(struct kvm_vcpu *vcpu)
{
- kvm_init_shadow_ept_mmu(vcpu,
- to_vmx(vcpu)->nested.msrs.ept_caps &
- VMX_EPT_EXECUTE_ONLY_BIT,
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ bool execonly = vmx->nested.msrs.ept_caps & VMX_EPT_EXECUTE_ONLY_BIT;
+ int ept_lpage_level = ept_caps_to_lpage_level(vmx->nested.msrs.ept_caps);
+
+ kvm_init_shadow_ept_mmu(vcpu, execonly, ept_lpage_level,
nested_ept_ad_enabled(vcpu),
nested_ept_get_eptp(vcpu));
}
@@ -591,6 +601,7 @@ static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
int msr;
unsigned long *msr_bitmap_l1;
unsigned long *msr_bitmap_l0 = vmx->nested.vmcs02.msr_bitmap;
+ struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs;
struct kvm_host_map *map = &vmx->nested.msr_bitmap_map;
/* Nothing to do if the MSR bitmap is not in use. */
@@ -598,6 +609,19 @@ static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
return false;
+ /*
+ * MSR bitmap update can be skipped when:
+ * - MSR bitmap for L1 hasn't changed.
+ * - Nested hypervisor (L1) is attempting to launch the same L2 as
+ * before.
+ * - Nested hypervisor (L1) has enabled 'Enlightened MSR Bitmap' feature
+ * and tells KVM (L0) there were no changes in MSR bitmap for L2.
+ */
+ if (!vmx->nested.force_msr_bitmap_recalc && evmcs &&
+ evmcs->hv_enlightenments_control.msr_bitmap &&
+ evmcs->hv_clean_fields & HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP)
+ return true;
+
if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->msr_bitmap), map))
return false;
@@ -664,6 +688,8 @@ static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
kvm_vcpu_unmap(vcpu, &vmx->nested.msr_bitmap_map, false);
+ vmx->nested.force_msr_bitmap_recalc = false;
+
return true;
}
@@ -1095,7 +1121,7 @@ static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3,
* must not be dereferenced.
*/
if (reload_pdptrs && !nested_ept && is_pae_paging(vcpu) &&
- CC(!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))) {
+ CC(!load_pdptrs(vcpu, cr3))) {
*entry_failure_code = ENTRY_FAIL_PDPTE;
return -EINVAL;
}
@@ -1104,7 +1130,7 @@ static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3,
kvm_mmu_new_pgd(vcpu, cr3);
vcpu->arch.cr3 = cr3;
- kvm_register_mark_available(vcpu, VCPU_EXREG_CR3);
+ kvm_register_mark_dirty(vcpu, VCPU_EXREG_CR3);
/* Re-initialize the MMU, e.g. to pick up CR4 MMU role changes. */
kvm_init_mmu(vcpu);
@@ -2021,10 +2047,13 @@ static enum nested_evmptrld_status nested_vmx_handle_enlightened_vmptrld(
* Clean fields data can't be used on VMLAUNCH and when we switch
* between different L2 guests as KVM keeps a single VMCS12 per L1.
*/
- if (from_launch || evmcs_gpa_changed)
+ if (from_launch || evmcs_gpa_changed) {
vmx->nested.hv_evmcs->hv_clean_fields &=
~HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL;
+ vmx->nested.force_msr_bitmap_recalc = true;
+ }
+
return EVMPTRLD_SUCCEEDED;
}
@@ -3027,7 +3056,7 @@ static int nested_vmx_check_guest_state(struct kvm_vcpu *vcpu,
static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
- unsigned long cr3, cr4;
+ unsigned long cr4;
bool vm_fail;
if (!nested_early_check)
@@ -3050,12 +3079,6 @@ static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu)
*/
vmcs_writel(GUEST_RFLAGS, 0);
- cr3 = __get_current_cr3_fast();
- if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) {
- vmcs_writel(HOST_CR3, cr3);
- vmx->loaded_vmcs->host_state.cr3 = cr3;
- }
-
cr4 = cr4_read_shadow();
if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) {
vmcs_writel(HOST_CR4, cr4);
@@ -3145,7 +3168,7 @@ static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
* the guest CR3 might be restored prior to setting the nested
* state which can lead to a load of wrong PDPTRs.
*/
- if (CC(!load_pdptrs(vcpu, vcpu->arch.walk_mmu, vcpu->arch.cr3)))
+ if (CC(!load_pdptrs(vcpu, vcpu->arch.cr3)))
return false;
}
@@ -3504,10 +3527,13 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
if (evmptrld_status == EVMPTRLD_ERROR) {
kvm_queue_exception(vcpu, UD_VECTOR);
return 1;
- } else if (CC(evmptrld_status == EVMPTRLD_VMFAIL)) {
- return nested_vmx_failInvalid(vcpu);
}
+ kvm_pmu_trigger_event(vcpu, PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
+
+ if (CC(evmptrld_status == EVMPTRLD_VMFAIL))
+ return nested_vmx_failInvalid(vcpu);
+
if (CC(!evmptr_is_valid(vmx->nested.hv_evmcs_vmptr) &&
vmx->nested.current_vmptr == INVALID_GPA))
return nested_vmx_failInvalid(vcpu);
@@ -3603,7 +3629,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
!(nested_cpu_has(vmcs12, CPU_BASED_INTR_WINDOW_EXITING) &&
(vmcs12->guest_rflags & X86_EFLAGS_IF))) {
vmx->nested.nested_run_pending = 0;
- return kvm_vcpu_halt(vcpu);
+ return kvm_emulate_halt_noskip(vcpu);
}
break;
case GUEST_ACTIVITY_WAIT_SIPI:
@@ -4826,18 +4852,20 @@ static struct vmcs *alloc_shadow_vmcs(struct kvm_vcpu *vcpu)
struct loaded_vmcs *loaded_vmcs = vmx->loaded_vmcs;
/*
- * We should allocate a shadow vmcs for vmcs01 only when L1
- * executes VMXON and free it when L1 executes VMXOFF.
- * As it is invalid to execute VMXON twice, we shouldn't reach
- * here when vmcs01 already have an allocated shadow vmcs.
+ * KVM allocates a shadow VMCS only when L1 executes VMXON and frees it
+ * when L1 executes VMXOFF or the vCPU is forced out of nested
+ * operation. VMXON faults if the CPU is already post-VMXON, so it
+ * should be impossible to already have an allocated shadow VMCS. KVM
+ * doesn't support virtualization of VMCS shadowing, so vmcs01 should
+ * always be the loaded VMCS.
*/
- WARN_ON(loaded_vmcs == &vmx->vmcs01 && loaded_vmcs->shadow_vmcs);
+ if (WARN_ON(loaded_vmcs != &vmx->vmcs01 || loaded_vmcs->shadow_vmcs))
+ return loaded_vmcs->shadow_vmcs;
+
+ loaded_vmcs->shadow_vmcs = alloc_vmcs(true);
+ if (loaded_vmcs->shadow_vmcs)
+ vmcs_clear(loaded_vmcs->shadow_vmcs);
- if (!loaded_vmcs->shadow_vmcs) {
- loaded_vmcs->shadow_vmcs = alloc_vmcs(true);
- if (loaded_vmcs->shadow_vmcs)
- vmcs_clear(loaded_vmcs->shadow_vmcs);
- }
return loaded_vmcs->shadow_vmcs;
}
@@ -5074,27 +5102,49 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
if (!nested_vmx_check_permission(vcpu))
return 1;
- /*
- * In VMX non-root operation, when the VMCS-link pointer is INVALID_GPA,
- * any VMREAD sets the ALU flags for VMfailInvalid.
- */
- if (vmx->nested.current_vmptr == INVALID_GPA ||
- (is_guest_mode(vcpu) &&
- get_vmcs12(vcpu)->vmcs_link_pointer == INVALID_GPA))
- return nested_vmx_failInvalid(vcpu);
-
/* Decode instruction info and find the field to read */
field = kvm_register_read(vcpu, (((instr_info) >> 28) & 0xf));
- offset = vmcs_field_to_offset(field);
- if (offset < 0)
- return nested_vmx_fail(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
+ if (!evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) {
+ /*
+ * In VMX non-root operation, when the VMCS-link pointer is INVALID_GPA,
+ * any VMREAD sets the ALU flags for VMfailInvalid.
+ */
+ if (vmx->nested.current_vmptr == INVALID_GPA ||
+ (is_guest_mode(vcpu) &&
+ get_vmcs12(vcpu)->vmcs_link_pointer == INVALID_GPA))
+ return nested_vmx_failInvalid(vcpu);
- if (!is_guest_mode(vcpu) && is_vmcs12_ext_field(field))
- copy_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
+ offset = get_vmcs12_field_offset(field);
+ if (offset < 0)
+ return nested_vmx_fail(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
+
+ if (!is_guest_mode(vcpu) && is_vmcs12_ext_field(field))
+ copy_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
+
+ /* Read the field, zero-extended to a u64 value */
+ value = vmcs12_read_any(vmcs12, field, offset);
+ } else {
+ /*
+ * Hyper-V TLFS (as of 6.0b) explicitly states, that while an
+ * enlightened VMCS is active VMREAD/VMWRITE instructions are
+ * unsupported. Unfortunately, certain versions of Windows 11
+ * don't comply with this requirement which is not enforced in
+ * genuine Hyper-V. Allow VMREAD from an enlightened VMCS as a
+ * workaround, as misbehaving guests will panic on VM-Fail.
+ * Note, enlightened VMCS is incompatible with shadow VMCS so
+ * all VMREADs from L2 should go to L1.
+ */
+ if (WARN_ON_ONCE(is_guest_mode(vcpu)))
+ return nested_vmx_failInvalid(vcpu);
- /* Read the field, zero-extended to a u64 value */
- value = vmcs12_read_any(vmcs12, field, offset);
+ offset = evmcs_field_offset(field, NULL);
+ if (offset < 0)
+ return nested_vmx_fail(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
+
+ /* Read the field, zero-extended to a u64 value */
+ value = evmcs_read_any(vmx->nested.hv_evmcs, field, offset);
+ }
/*
* Now copy part of this value to register or memory, as requested.
@@ -5189,7 +5239,7 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
field = kvm_register_read(vcpu, (((instr_info) >> 28) & 0xf));
- offset = vmcs_field_to_offset(field);
+ offset = get_vmcs12_field_offset(field);
if (offset < 0)
return nested_vmx_fail(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
@@ -5258,6 +5308,7 @@ static void set_current_vmptr(struct vcpu_vmx *vmx, gpa_t vmptr)
vmx->nested.need_vmcs12_to_shadow_sync = true;
}
vmx->nested.dirty_vmcs12 = true;
+ vmx->nested.force_msr_bitmap_recalc = true;
}
/* Emulate the VMPTRLD instruction */
@@ -6393,6 +6444,7 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
goto error_guest_mode;
vmx->nested.dirty_vmcs12 = true;
+ vmx->nested.force_msr_bitmap_recalc = true;
ret = nested_vmx_enter_non_root_mode(vcpu, false);
if (ret)
goto error_guest_mode;
@@ -6435,7 +6487,7 @@ static u64 nested_vmx_calc_vmcs_enum_msr(void)
max_idx = 0;
for (i = 0; i < nr_vmcs12_fields; i++) {
/* The vmcs12 table is very, very sparsely populated. */
- if (!vmcs_field_to_offset_table[i])
+ if (!vmcs12_field_offsets[i])
continue;
idx = vmcs_field_index(VMCS12_IDX_TO_ENC(i));
@@ -6744,6 +6796,7 @@ __init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *))
}
struct kvm_x86_nested_ops vmx_nested_ops = {
+ .leave_nested = vmx_leave_nested,
.check_events = vmx_check_nested_events,
.hv_timer_pending = nested_vmx_preemption_timer_pending,
.triple_fault = nested_vmx_triple_fault,
diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
index 1b7456b2177b..466d18fc0c5d 100644
--- a/arch/x86/kvm/vmx/pmu_intel.c
+++ b/arch/x86/kvm/vmx/pmu_intel.c
@@ -21,7 +21,6 @@
#define MSR_PMC_FULL_WIDTH_BIT (MSR_IA32_PMC0 - MSR_IA32_PERFCTR0)
static struct kvm_event_hw_type_mapping intel_arch_events[] = {
- /* Index must match CPUID 0x0A.EBX bit vector */
[0] = { 0x3c, 0x00, PERF_COUNT_HW_CPU_CYCLES },
[1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS },
[2] = { 0x3c, 0x01, PERF_COUNT_HW_BUS_CYCLES },
@@ -29,6 +28,7 @@ static struct kvm_event_hw_type_mapping intel_arch_events[] = {
[4] = { 0x2e, 0x41, PERF_COUNT_HW_CACHE_MISSES },
[5] = { 0xc4, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
[6] = { 0xc5, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
+ /* The above index must match CPUID 0x0A.EBX bit vector */
[7] = { 0x00, 0x03, PERF_COUNT_HW_REF_CPU_CYCLES },
};
@@ -68,34 +68,29 @@ static void global_ctrl_changed(struct kvm_pmu *pmu, u64 data)
reprogram_counter(pmu, bit);
}
-static unsigned intel_find_arch_event(struct kvm_pmu *pmu,
- u8 event_select,
- u8 unit_mask)
+static unsigned int intel_pmc_perf_hw_id(struct kvm_pmc *pmc)
{
+ struct kvm_pmu *pmu = pmc_to_pmu(pmc);
+ u8 event_select = pmc->eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
+ u8 unit_mask = (pmc->eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
int i;
- for (i = 0; i < ARRAY_SIZE(intel_arch_events); i++)
- if (intel_arch_events[i].eventsel == event_select
- && intel_arch_events[i].unit_mask == unit_mask
- && (pmu->available_event_types & (1 << i)))
- break;
-
- if (i == ARRAY_SIZE(intel_arch_events))
- return PERF_COUNT_HW_MAX;
+ for (i = 0; i < ARRAY_SIZE(intel_arch_events); i++) {
+ if (intel_arch_events[i].eventsel != event_select ||
+ intel_arch_events[i].unit_mask != unit_mask)
+ continue;
- return intel_arch_events[i].event_type;
-}
+ /* disable event that reported as not present by cpuid */
+ if ((i < 7) && !(pmu->available_event_types & (1 << i)))
+ return PERF_COUNT_HW_MAX + 1;
-static unsigned intel_find_fixed_event(int idx)
-{
- u32 event;
- size_t size = ARRAY_SIZE(fixed_pmc_events);
+ break;
+ }
- if (idx >= size)
+ if (i == ARRAY_SIZE(intel_arch_events))
return PERF_COUNT_HW_MAX;
- event = fixed_pmc_events[array_index_nospec(idx, size)];
- return intel_arch_events[event].event_type;
+ return intel_arch_events[i].event_type;
}
/* check if a PMC is enabled by comparing it with globl_ctrl bits. */
@@ -459,6 +454,21 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
return 1;
}
+static void setup_fixed_pmc_eventsel(struct kvm_pmu *pmu)
+{
+ size_t size = ARRAY_SIZE(fixed_pmc_events);
+ struct kvm_pmc *pmc;
+ u32 event;
+ int i;
+
+ for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
+ pmc = &pmu->fixed_counters[i];
+ event = fixed_pmc_events[array_index_nospec(i, size)];
+ pmc->eventsel = (intel_arch_events[event].unit_mask << 8) |
+ intel_arch_events[event].eventsel;
+ }
+}
+
static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
{
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
@@ -477,7 +487,7 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
pmu->reserved_bits = 0xffffffff00200000ull;
entry = kvm_find_cpuid_entry(vcpu, 0xa, 0);
- if (!entry)
+ if (!entry || !enable_pmu)
return;
eax.full = entry->eax;
edx.full = entry->edx;
@@ -500,12 +510,14 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
pmu->nr_arch_fixed_counters = 0;
} else {
pmu->nr_arch_fixed_counters =
- min_t(int, edx.split.num_counters_fixed,
- x86_pmu.num_counters_fixed);
+ min3(ARRAY_SIZE(fixed_pmc_events),
+ (size_t) edx.split.num_counters_fixed,
+ (size_t) x86_pmu.num_counters_fixed);
edx.split.bit_width_fixed = min_t(int,
edx.split.bit_width_fixed, x86_pmu.bit_width_fixed);
pmu->counter_bitmask[KVM_PMC_FIXED] =
((u64)1 << edx.split.bit_width_fixed) - 1;
+ setup_fixed_pmc_eventsel(pmu);
}
pmu->global_ctrl = ((1ull << pmu->nr_arch_gp_counters) - 1) |
@@ -703,8 +715,7 @@ static void intel_pmu_cleanup(struct kvm_vcpu *vcpu)
}
struct kvm_pmu_ops intel_pmu_ops = {
- .find_arch_event = intel_find_arch_event,
- .find_fixed_event = intel_find_fixed_event,
+ .pmc_perf_hw_id = intel_pmc_perf_hw_id,
.pmc_is_enabled = intel_pmc_is_enabled,
.pmc_idx_to_pmc = intel_pmc_idx_to_pmc,
.rdpmc_ecx_to_pmc = intel_rdpmc_ecx_to_pmc,
diff --git a/arch/x86/kvm/vmx/posted_intr.c b/arch/x86/kvm/vmx/posted_intr.c
index 1c94783b5a54..aa1fe9085d77 100644
--- a/arch/x86/kvm/vmx/posted_intr.c
+++ b/arch/x86/kvm/vmx/posted_intr.c
@@ -11,58 +11,109 @@
#include "vmx.h"
/*
- * We maintain a per-CPU linked-list of vCPU, so in wakeup_handler() we
- * can find which vCPU should be waken up.
+ * Maintain a per-CPU list of vCPUs that need to be awakened by wakeup_handler()
+ * when a WAKEUP_VECTOR interrupted is posted. vCPUs are added to the list when
+ * the vCPU is scheduled out and is blocking (e.g. in HLT) with IRQs enabled.
+ * The vCPUs posted interrupt descriptor is updated at the same time to set its
+ * notification vector to WAKEUP_VECTOR, so that posted interrupt from devices
+ * wake the target vCPUs. vCPUs are removed from the list and the notification
+ * vector is reset when the vCPU is scheduled in.
*/
-static DEFINE_PER_CPU(struct list_head, blocked_vcpu_on_cpu);
-static DEFINE_PER_CPU(spinlock_t, blocked_vcpu_on_cpu_lock);
+static DEFINE_PER_CPU(struct list_head, wakeup_vcpus_on_cpu);
+/*
+ * Protect the per-CPU list with a per-CPU spinlock to handle task migration.
+ * When a blocking vCPU is awakened _and_ migrated to a different pCPU, the
+ * ->sched_in() path will need to take the vCPU off the list of the _previous_
+ * CPU. IRQs must be disabled when taking this lock, otherwise deadlock will
+ * occur if a wakeup IRQ arrives and attempts to acquire the lock.
+ */
+static DEFINE_PER_CPU(raw_spinlock_t, wakeup_vcpus_on_cpu_lock);
static inline struct pi_desc *vcpu_to_pi_desc(struct kvm_vcpu *vcpu)
{
return &(to_vmx(vcpu)->pi_desc);
}
+static int pi_try_set_control(struct pi_desc *pi_desc, u64 old, u64 new)
+{
+ /*
+ * PID.ON can be set at any time by a different vCPU or by hardware,
+ * e.g. a device. PID.control must be written atomically, and the
+ * update must be retried with a fresh snapshot an ON change causes
+ * the cmpxchg to fail.
+ */
+ if (cmpxchg64(&pi_desc->control, old, new) != old)
+ return -EBUSY;
+
+ return 0;
+}
+
void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
{
struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
struct pi_desc old, new;
+ unsigned long flags;
unsigned int dest;
/*
- * In case of hot-plug or hot-unplug, we may have to undo
- * vmx_vcpu_pi_put even if there is no assigned device. And we
- * always keep PI.NDST up to date for simplicity: it makes the
- * code easier, and CPU migration is not a fast path.
+ * To simplify hot-plug and dynamic toggling of APICv, keep PI.NDST and
+ * PI.SN up-to-date even if there is no assigned device or if APICv is
+ * deactivated due to a dynamic inhibit bit, e.g. for Hyper-V's SyncIC.
*/
- if (!pi_test_sn(pi_desc) && vcpu->cpu == cpu)
+ if (!enable_apicv || !lapic_in_kernel(vcpu))
return;
/*
- * If the 'nv' field is POSTED_INTR_WAKEUP_VECTOR, do not change
- * PI.NDST: pi_post_block is the one expected to change PID.NDST and the
- * wakeup handler expects the vCPU to be on the blocked_vcpu_list that
- * matches PI.NDST. Otherwise, a vcpu may not be able to be woken up
- * correctly.
+ * If the vCPU wasn't on the wakeup list and wasn't migrated, then the
+ * full update can be skipped as neither the vector nor the destination
+ * needs to be changed.
*/
- if (pi_desc->nv == POSTED_INTR_WAKEUP_VECTOR || vcpu->cpu == cpu) {
- pi_clear_sn(pi_desc);
- goto after_clear_sn;
+ if (pi_desc->nv != POSTED_INTR_WAKEUP_VECTOR && vcpu->cpu == cpu) {
+ /*
+ * Clear SN if it was set due to being preempted. Again, do
+ * this even if there is no assigned device for simplicity.
+ */
+ if (pi_test_and_clear_sn(pi_desc))
+ goto after_clear_sn;
+ return;
}
- /* The full case. */
- do {
- old.control = new.control = pi_desc->control;
+ local_irq_save(flags);
- dest = cpu_physical_id(cpu);
+ /*
+ * If the vCPU was waiting for wakeup, remove the vCPU from the wakeup
+ * list of the _previous_ pCPU, which will not be the same as the
+ * current pCPU if the task was migrated.
+ */
+ if (pi_desc->nv == POSTED_INTR_WAKEUP_VECTOR) {
+ raw_spin_lock(&per_cpu(wakeup_vcpus_on_cpu_lock, vcpu->cpu));
+ list_del(&vmx->pi_wakeup_list);
+ raw_spin_unlock(&per_cpu(wakeup_vcpus_on_cpu_lock, vcpu->cpu));
+ }
- if (x2apic_mode)
- new.ndst = dest;
- else
- new.ndst = (dest << 8) & 0xFF00;
+ dest = cpu_physical_id(cpu);
+ if (!x2apic_mode)
+ dest = (dest << 8) & 0xFF00;
+
+ do {
+ old.control = new.control = READ_ONCE(pi_desc->control);
+ /*
+ * Clear SN (as above) and refresh the destination APIC ID to
+ * handle task migration (@cpu != vcpu->cpu).
+ */
+ new.ndst = dest;
new.sn = 0;
- } while (cmpxchg64(&pi_desc->control, old.control,
- new.control) != old.control);
+
+ /*
+ * Restore the notification vector; in the blocking case, the
+ * descriptor was modified on "put" to use the wakeup vector.
+ */
+ new.nv = POSTED_INTR_VECTOR;
+ } while (pi_try_set_control(pi_desc, old.control, new.control));
+
+ local_irq_restore(flags);
after_clear_sn:
@@ -85,126 +136,64 @@ static bool vmx_can_use_vtd_pi(struct kvm *kvm)
irq_remapping_cap(IRQ_POSTING_CAP);
}
-void vmx_vcpu_pi_put(struct kvm_vcpu *vcpu)
-{
- struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
-
- if (!vmx_can_use_vtd_pi(vcpu->kvm))
- return;
-
- /* Set SN when the vCPU is preempted */
- if (vcpu->preempted)
- pi_set_sn(pi_desc);
-}
-
-static void __pi_post_block(struct kvm_vcpu *vcpu)
-{
- struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
- struct pi_desc old, new;
- unsigned int dest;
-
- do {
- old.control = new.control = pi_desc->control;
- WARN(old.nv != POSTED_INTR_WAKEUP_VECTOR,
- "Wakeup handler not enabled while the VCPU is blocked\n");
-
- dest = cpu_physical_id(vcpu->cpu);
-
- if (x2apic_mode)
- new.ndst = dest;
- else
- new.ndst = (dest << 8) & 0xFF00;
-
- /* set 'NV' to 'notification vector' */
- new.nv = POSTED_INTR_VECTOR;
- } while (cmpxchg64(&pi_desc->control, old.control,
- new.control) != old.control);
-
- if (!WARN_ON_ONCE(vcpu->pre_pcpu == -1)) {
- spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
- list_del(&vcpu->blocked_vcpu_list);
- spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
- vcpu->pre_pcpu = -1;
- }
-}
-
/*
- * This routine does the following things for vCPU which is going
- * to be blocked if VT-d PI is enabled.
- * - Store the vCPU to the wakeup list, so when interrupts happen
- * we can find the right vCPU to wake up.
- * - Change the Posted-interrupt descriptor as below:
- * 'NDST' <-- vcpu->pre_pcpu
- * 'NV' <-- POSTED_INTR_WAKEUP_VECTOR
- * - If 'ON' is set during this process, which means at least one
- * interrupt is posted for this vCPU, we cannot block it, in
- * this case, return 1, otherwise, return 0.
- *
+ * Put the vCPU on this pCPU's list of vCPUs that needs to be awakened and set
+ * WAKEUP as the notification vector in the PI descriptor.
*/
-int pi_pre_block(struct kvm_vcpu *vcpu)
+static void pi_enable_wakeup_handler(struct kvm_vcpu *vcpu)
{
- unsigned int dest;
- struct pi_desc old, new;
struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+ struct pi_desc old, new;
+ unsigned long flags;
- if (!vmx_can_use_vtd_pi(vcpu->kvm))
- return 0;
-
- WARN_ON(irqs_disabled());
- local_irq_disable();
- if (!WARN_ON_ONCE(vcpu->pre_pcpu != -1)) {
- vcpu->pre_pcpu = vcpu->cpu;
- spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
- list_add_tail(&vcpu->blocked_vcpu_list,
- &per_cpu(blocked_vcpu_on_cpu,
- vcpu->pre_pcpu));
- spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, vcpu->pre_pcpu));
- }
-
- do {
- old.control = new.control = pi_desc->control;
+ local_irq_save(flags);
- WARN((pi_desc->sn == 1),
- "Warning: SN field of posted-interrupts "
- "is set before blocking\n");
+ raw_spin_lock(&per_cpu(wakeup_vcpus_on_cpu_lock, vcpu->cpu));
+ list_add_tail(&vmx->pi_wakeup_list,
+ &per_cpu(wakeup_vcpus_on_cpu, vcpu->cpu));
+ raw_spin_unlock(&per_cpu(wakeup_vcpus_on_cpu_lock, vcpu->cpu));
- /*
- * Since vCPU can be preempted during this process,
- * vcpu->cpu could be different with pre_pcpu, we
- * need to set pre_pcpu as the destination of wakeup
- * notification event, then we can find the right vCPU
- * to wakeup in wakeup handler if interrupts happen
- * when the vCPU is in blocked state.
- */
- dest = cpu_physical_id(vcpu->pre_pcpu);
+ WARN(pi_desc->sn, "PI descriptor SN field set before blocking");
- if (x2apic_mode)
- new.ndst = dest;
- else
- new.ndst = (dest << 8) & 0xFF00;
+ do {
+ old.control = new.control = READ_ONCE(pi_desc->control);
/* set 'NV' to 'wakeup vector' */
new.nv = POSTED_INTR_WAKEUP_VECTOR;
- } while (cmpxchg64(&pi_desc->control, old.control,
- new.control) != old.control);
+ } while (pi_try_set_control(pi_desc, old.control, new.control));
- /* We should not block the vCPU if an interrupt is posted for it. */
- if (pi_test_on(pi_desc) == 1)
- __pi_post_block(vcpu);
+ /*
+ * Send a wakeup IPI to this CPU if an interrupt may have been posted
+ * before the notification vector was updated, in which case the IRQ
+ * will arrive on the non-wakeup vector. An IPI is needed as calling
+ * try_to_wake_up() from ->sched_out() isn't allowed (IRQs are not
+ * enabled until it is safe to call try_to_wake_up() on the task being
+ * scheduled out).
+ */
+ if (pi_test_on(&new))
+ apic->send_IPI_self(POSTED_INTR_WAKEUP_VECTOR);
- local_irq_enable();
- return (vcpu->pre_pcpu == -1);
+ local_irq_restore(flags);
}
-void pi_post_block(struct kvm_vcpu *vcpu)
+void vmx_vcpu_pi_put(struct kvm_vcpu *vcpu)
{
- if (vcpu->pre_pcpu == -1)
+ struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
+
+ if (!vmx_can_use_vtd_pi(vcpu->kvm))
return;
- WARN_ON(irqs_disabled());
- local_irq_disable();
- __pi_post_block(vcpu);
- local_irq_enable();
+ if (kvm_vcpu_is_blocking(vcpu) && !vmx_interrupt_blocked(vcpu))
+ pi_enable_wakeup_handler(vcpu);
+
+ /*
+ * Set SN when the vCPU is preempted. Note, the vCPU can both be seen
+ * as blocking and preempted, e.g. if it's preempted between setting
+ * its wait state and manually scheduling out.
+ */
+ if (vcpu->preempted)
+ pi_set_sn(pi_desc);
}
/*
@@ -212,24 +201,23 @@ void pi_post_block(struct kvm_vcpu *vcpu)
*/
void pi_wakeup_handler(void)
{
- struct kvm_vcpu *vcpu;
int cpu = smp_processor_id();
+ struct vcpu_vmx *vmx;
- spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, cpu));
- list_for_each_entry(vcpu, &per_cpu(blocked_vcpu_on_cpu, cpu),
- blocked_vcpu_list) {
- struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
+ raw_spin_lock(&per_cpu(wakeup_vcpus_on_cpu_lock, cpu));
+ list_for_each_entry(vmx, &per_cpu(wakeup_vcpus_on_cpu, cpu),
+ pi_wakeup_list) {
- if (pi_test_on(pi_desc) == 1)
- kvm_vcpu_kick(vcpu);
+ if (pi_test_on(&vmx->pi_desc))
+ kvm_vcpu_wake_up(&vmx->vcpu);
}
- spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, cpu));
+ raw_spin_unlock(&per_cpu(wakeup_vcpus_on_cpu_lock, cpu));
}
void __init pi_init_cpu(int cpu)
{
- INIT_LIST_HEAD(&per_cpu(blocked_vcpu_on_cpu, cpu));
- spin_lock_init(&per_cpu(blocked_vcpu_on_cpu_lock, cpu));
+ INIT_LIST_HEAD(&per_cpu(wakeup_vcpus_on_cpu, cpu));
+ raw_spin_lock_init(&per_cpu(wakeup_vcpus_on_cpu_lock, cpu));
}
bool pi_has_pending_interrupt(struct kvm_vcpu *vcpu)
@@ -245,7 +233,7 @@ bool pi_has_pending_interrupt(struct kvm_vcpu *vcpu)
* Bail out of the block loop if the VM has an assigned
* device, but the blocking vCPU didn't reconfigure the
* PI.NV to the wakeup vector, i.e. the assigned device
- * came along after the initial check in pi_pre_block().
+ * came along after the initial check in vmx_vcpu_pi_put().
*/
void vmx_pi_start_assignment(struct kvm *kvm)
{
diff --git a/arch/x86/kvm/vmx/posted_intr.h b/arch/x86/kvm/vmx/posted_intr.h
index 7f7b2326caf5..eb14e76b84ef 100644
--- a/arch/x86/kvm/vmx/posted_intr.h
+++ b/arch/x86/kvm/vmx/posted_intr.h
@@ -40,7 +40,13 @@ static inline bool pi_test_and_clear_on(struct pi_desc *pi_desc)
(unsigned long *)&pi_desc->control);
}
-static inline int pi_test_and_set_pir(int vector, struct pi_desc *pi_desc)
+static inline bool pi_test_and_clear_sn(struct pi_desc *pi_desc)
+{
+ return test_and_clear_bit(POSTED_INTR_SN,
+ (unsigned long *)&pi_desc->control);
+}
+
+static inline bool pi_test_and_set_pir(int vector, struct pi_desc *pi_desc)
{
return test_and_set_bit(vector, (unsigned long *)pi_desc->pir);
}
@@ -74,13 +80,13 @@ static inline void pi_clear_sn(struct pi_desc *pi_desc)
(unsigned long *)&pi_desc->control);
}
-static inline int pi_test_on(struct pi_desc *pi_desc)
+static inline bool pi_test_on(struct pi_desc *pi_desc)
{
return test_bit(POSTED_INTR_ON,
(unsigned long *)&pi_desc->control);
}
-static inline int pi_test_sn(struct pi_desc *pi_desc)
+static inline bool pi_test_sn(struct pi_desc *pi_desc)
{
return test_bit(POSTED_INTR_SN,
(unsigned long *)&pi_desc->control);
@@ -88,8 +94,6 @@ static inline int pi_test_sn(struct pi_desc *pi_desc)
void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu);
void vmx_vcpu_pi_put(struct kvm_vcpu *vcpu);
-int pi_pre_block(struct kvm_vcpu *vcpu);
-void pi_post_block(struct kvm_vcpu *vcpu);
void pi_wakeup_handler(void);
void __init pi_init_cpu(int cpu);
bool pi_has_pending_interrupt(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/kvm/vmx/vmcs.h b/arch/x86/kvm/vmx/vmcs.h
index 6e5de2e2b0da..e325c290a816 100644
--- a/arch/x86/kvm/vmx/vmcs.h
+++ b/arch/x86/kvm/vmx/vmcs.h
@@ -129,6 +129,11 @@ static inline bool is_machine_check(u32 intr_info)
return is_exception_n(intr_info, MC_VECTOR);
}
+static inline bool is_nm_fault(u32 intr_info)
+{
+ return is_exception_n(intr_info, NM_VECTOR);
+}
+
/* Undocumented: icebp/int1 */
static inline bool is_icebp(u32 intr_info)
{
diff --git a/arch/x86/kvm/vmx/vmcs12.c b/arch/x86/kvm/vmx/vmcs12.c
index cab6ba7a5005..2251b60920f8 100644
--- a/arch/x86/kvm/vmx/vmcs12.c
+++ b/arch/x86/kvm/vmx/vmcs12.c
@@ -8,7 +8,7 @@
FIELD(number, name), \
[ROL16(number##_HIGH, 6)] = VMCS12_OFFSET(name) + sizeof(u32)
-const unsigned short vmcs_field_to_offset_table[] = {
+const unsigned short vmcs12_field_offsets[] = {
FIELD(VIRTUAL_PROCESSOR_ID, virtual_processor_id),
FIELD(POSTED_INTR_NV, posted_intr_nv),
FIELD(GUEST_ES_SELECTOR, guest_es_selector),
@@ -151,4 +151,4 @@ const unsigned short vmcs_field_to_offset_table[] = {
FIELD(HOST_RSP, host_rsp),
FIELD(HOST_RIP, host_rip),
};
-const unsigned int nr_vmcs12_fields = ARRAY_SIZE(vmcs_field_to_offset_table);
+const unsigned int nr_vmcs12_fields = ARRAY_SIZE(vmcs12_field_offsets);
diff --git a/arch/x86/kvm/vmx/vmcs12.h b/arch/x86/kvm/vmx/vmcs12.h
index 2a45f026ee11..746129ddd5ae 100644
--- a/arch/x86/kvm/vmx/vmcs12.h
+++ b/arch/x86/kvm/vmx/vmcs12.h
@@ -361,10 +361,10 @@ static inline void vmx_check_vmcs12_offsets(void)
CHECK_OFFSET(guest_pml_index, 996);
}
-extern const unsigned short vmcs_field_to_offset_table[];
+extern const unsigned short vmcs12_field_offsets[];
extern const unsigned int nr_vmcs12_fields;
-static inline short vmcs_field_to_offset(unsigned long field)
+static inline short get_vmcs12_field_offset(unsigned long field)
{
unsigned short offset;
unsigned int index;
@@ -377,7 +377,7 @@ static inline short vmcs_field_to_offset(unsigned long field)
return -ENOENT;
index = array_index_nospec(index, nr_vmcs12_fields);
- offset = vmcs_field_to_offset_table[index];
+ offset = vmcs12_field_offsets[index];
if (offset == 0)
return -ENOENT;
return offset;
diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S
index 3a6461694fc2..435c187927c4 100644
--- a/arch/x86/kvm/vmx/vmenter.S
+++ b/arch/x86/kvm/vmx/vmenter.S
@@ -49,14 +49,14 @@ SYM_FUNC_START_LOCAL(vmx_vmenter)
je 2f
1: vmresume
- ret
+ RET
2: vmlaunch
- ret
+ RET
3: cmpb $0, kvm_rebooting
je 4f
- ret
+ RET
4: ud2
_ASM_EXTABLE(1b, 3b)
@@ -89,7 +89,7 @@ SYM_FUNC_START(vmx_vmexit)
pop %_ASM_AX
.Lvmexit_skip_rsb:
#endif
- ret
+ RET
SYM_FUNC_END(vmx_vmexit)
/**
@@ -228,7 +228,7 @@ SYM_FUNC_START(__vmx_vcpu_run)
pop %edi
#endif
pop %_ASM_BP
- ret
+ RET
/* VM-Fail. Out-of-line to avoid a taken Jcc after VM-Exit. */
2: mov $1, %eax
@@ -293,7 +293,7 @@ SYM_FUNC_START(vmread_error_trampoline)
pop %_ASM_AX
pop %_ASM_BP
- ret
+ RET
SYM_FUNC_END(vmread_error_trampoline)
SYM_FUNC_START(vmx_do_interrupt_nmi_irqoff)
@@ -326,5 +326,5 @@ SYM_FUNC_START(vmx_do_interrupt_nmi_irqoff)
*/
mov %_ASM_BP, %_ASM_SP
pop %_ASM_BP
- ret
+ RET
SYM_FUNC_END(vmx_do_interrupt_nmi_irqoff)
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 0dbf94eb954f..efda5e4d6247 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -36,6 +36,7 @@
#include <asm/debugreg.h>
#include <asm/desc.h>
#include <asm/fpu/api.h>
+#include <asm/fpu/xstate.h>
#include <asm/idtentry.h>
#include <asm/io.h>
#include <asm/irq_remapping.h>
@@ -161,6 +162,8 @@ static u32 vmx_possible_passthrough_msrs[MAX_POSSIBLE_PASSTHROUGH_MSRS] = {
MSR_FS_BASE,
MSR_GS_BASE,
MSR_KERNEL_GS_BASE,
+ MSR_IA32_XFD,
+ MSR_IA32_XFD_ERR,
#endif
MSR_IA32_SYSENTER_CS,
MSR_IA32_SYSENTER_ESP,
@@ -602,15 +605,13 @@ static int vmx_set_guest_uret_msr(struct vcpu_vmx *vmx,
unsigned int slot = msr - vmx->guest_uret_msrs;
int ret = 0;
- u64 old_msr_data = msr->data;
- msr->data = data;
if (msr->load_into_hardware) {
preempt_disable();
- ret = kvm_set_user_return_msr(slot, msr->data, msr->mask);
+ ret = kvm_set_user_return_msr(slot, data, msr->mask);
preempt_enable();
- if (ret)
- msr->data = old_msr_data;
}
+ if (!ret)
+ msr->data = data;
return ret;
}
@@ -763,6 +764,14 @@ void vmx_update_exception_bitmap(struct kvm_vcpu *vcpu)
vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, match);
}
+ /*
+ * Disabling xfd interception indicates that dynamic xfeatures
+ * might be used in the guest. Always trap #NM in this case
+ * to save guest xfd_err timely.
+ */
+ if (vcpu->arch.xfd_no_write_intercept)
+ eb |= (1u << NM_VECTOR);
+
vmcs_write32(EXCEPTION_BITMAP, eb);
}
@@ -1071,9 +1080,14 @@ static void pt_guest_exit(struct vcpu_vmx *vmx)
wrmsrl(MSR_IA32_RTIT_CTL, vmx->pt_desc.host.ctl);
}
-void vmx_set_host_fs_gs(struct vmcs_host_state *host, u16 fs_sel, u16 gs_sel,
- unsigned long fs_base, unsigned long gs_base)
+void vmx_set_vmcs_host_state(struct vmcs_host_state *host, unsigned long cr3,
+ u16 fs_sel, u16 gs_sel,
+ unsigned long fs_base, unsigned long gs_base)
{
+ if (unlikely(cr3 != host->cr3)) {
+ vmcs_writel(HOST_CR3, cr3);
+ host->cr3 = cr3;
+ }
if (unlikely(fs_sel != host->fs_sel)) {
if (!(fs_sel & 7))
vmcs_write16(HOST_FS_SELECTOR, fs_sel);
@@ -1168,7 +1182,9 @@ void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
gs_base = segment_base(gs_sel);
#endif
- vmx_set_host_fs_gs(host_state, fs_sel, gs_sel, fs_base, gs_base);
+ vmx_set_vmcs_host_state(host_state, __get_current_cr3_fast(),
+ fs_sel, gs_sel, fs_base, gs_base);
+
vmx->guest_state_loaded = true;
}
@@ -1271,7 +1287,6 @@ void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu,
if (!already_loaded) {
void *gdt = get_current_gdt_ro();
- unsigned long sysenter_esp;
/*
* Flush all EPTP/VPID contexts, the new pCPU may have stale
@@ -1287,8 +1302,11 @@ void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu,
(unsigned long)&get_cpu_entry_area(cpu)->tss.x86_tss);
vmcs_writel(HOST_GDTR_BASE, (unsigned long)gdt); /* 22.2.4 */
- rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
- vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
+ if (IS_ENABLED(CONFIG_IA32_EMULATION) || IS_ENABLED(CONFIG_X86_32)) {
+ /* 22.2.3 */
+ vmcs_writel(HOST_IA32_SYSENTER_ESP,
+ (unsigned long)(cpu_entry_stack(cpu) + 1));
+ }
vmx->loaded_vmcs->cpu = cpu;
}
@@ -1469,11 +1487,12 @@ static int vmx_rtit_ctl_check(struct kvm_vcpu *vcpu, u64 data)
return 0;
}
-static bool vmx_can_emulate_instruction(struct kvm_vcpu *vcpu, void *insn, int insn_len)
+static bool vmx_can_emulate_instruction(struct kvm_vcpu *vcpu, int emul_type,
+ void *insn, int insn_len)
{
/*
* Emulation of instructions in SGX enclaves is impossible as RIP does
- * not point tthe failing instruction, and even if it did, the code
+ * not point at the failing instruction, and even if it did, the code
* stream is inaccessible. Inject #UD instead of exiting to userspace
* so that guest userspace can't DoS the guest simply by triggering
* emulation (enclaves are CPL3 only).
@@ -1753,7 +1772,7 @@ static int vmx_get_msr_feature(struct kvm_msr_entry *msr)
}
/*
- * Reads an msr value (of 'msr_index') into 'pdata'.
+ * Reads an msr value (of 'msr_info->index') into 'msr_info->data'.
* Returns 0 on success, non-0 otherwise.
* Assumes vcpu_load() was already called.
*/
@@ -1960,6 +1979,24 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
case MSR_KERNEL_GS_BASE:
vmx_write_guest_kernel_gs_base(vmx, data);
break;
+ case MSR_IA32_XFD:
+ ret = kvm_set_msr_common(vcpu, msr_info);
+ /*
+ * Always intercepting WRMSR could incur non-negligible
+ * overhead given xfd might be changed frequently in
+ * guest context switch. Disable write interception
+ * upon the first write with a non-zero value (indicating
+ * potential usage on dynamic xfeatures). Also update
+ * exception bitmap to trap #NM for proper virtualization
+ * of guest xfd_err.
+ */
+ if (!ret && data) {
+ vmx_disable_intercept_for_msr(vcpu, MSR_IA32_XFD,
+ MSR_TYPE_RW);
+ vcpu->arch.xfd_no_write_intercept = true;
+ vmx_update_exception_bitmap(vcpu);
+ }
+ break;
#endif
case MSR_IA32_SYSENTER_CS:
if (is_guest_mode(vcpu))
@@ -2100,9 +2137,6 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
}
ret = kvm_set_msr_common(vcpu, msr_info);
break;
- case MSR_IA32_TSC_ADJUST:
- ret = kvm_set_msr_common(vcpu, msr_info);
- break;
case MSR_IA32_MCG_EXT_CTL:
if ((!msr_info->host_initiated &&
!(to_vmx(vcpu)->msr_ia32_feature_control &
@@ -2570,7 +2604,6 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf,
return -EIO;
vmcs_conf->size = vmx_msr_high & 0x1fff;
- vmcs_conf->order = get_order(vmcs_conf->size);
vmcs_conf->basic_cap = vmx_msr_high & ~0x1fff;
vmcs_conf->revision_id = vmx_msr_low;
@@ -2595,7 +2628,7 @@ struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu, gfp_t flags)
struct page *pages;
struct vmcs *vmcs;
- pages = __alloc_pages_node(node, flags, vmcs_config.order);
+ pages = __alloc_pages_node(node, flags, 0);
if (!pages)
return NULL;
vmcs = page_address(pages);
@@ -2614,7 +2647,7 @@ struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu, gfp_t flags)
void free_vmcs(struct vmcs *vmcs)
{
- free_pages((unsigned long)vmcs, vmcs_config.order);
+ free_page((unsigned long)vmcs);
}
/*
@@ -2985,7 +3018,7 @@ void ept_save_pdptrs(struct kvm_vcpu *vcpu)
mmu->pdptrs[2] = vmcs_read64(GUEST_PDPTR2);
mmu->pdptrs[3] = vmcs_read64(GUEST_PDPTR3);
- kvm_register_mark_dirty(vcpu, VCPU_EXREG_PDPTR);
+ kvm_register_mark_available(vcpu, VCPU_EXREG_PDPTR);
}
#define CR3_EXITING_BITS (CPU_BASED_CR3_LOAD_EXITING | \
@@ -3067,6 +3100,13 @@ void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
/* Note, vmx_set_cr4() consumes the new vcpu->arch.cr0. */
if ((old_cr0_pg ^ cr0) & X86_CR0_PG)
vmx_set_cr4(vcpu, kvm_read_cr4(vcpu));
+
+ /*
+ * When !CR0_PG -> CR0_PG, vcpu->arch.cr3 becomes active, but
+ * GUEST_CR3 is still vmx->ept_identity_map_addr if EPT + !URG.
+ */
+ if (!(old_cr0_pg & X86_CR0_PG) && (cr0 & X86_CR0_PG))
+ kvm_register_mark_dirty(vcpu, VCPU_EXREG_CR3);
}
/* depends on vcpu->arch.cr0 to be set to a new value */
@@ -3110,9 +3150,9 @@ static void vmx_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa,
if (!enable_unrestricted_guest && !is_paging(vcpu))
guest_cr3 = to_kvm_vmx(kvm)->ept_identity_map_addr;
- else if (test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail))
+ else if (kvm_register_is_dirty(vcpu, VCPU_EXREG_CR3))
guest_cr3 = vcpu->arch.cr3;
- else /* vmcs01.GUEST_CR3 is already up-to-date. */
+ else /* vmcs.GUEST_CR3 is already up-to-date. */
update_guest_cr3 = false;
vmx_ept_load_pdptrs(vcpu);
} else {
@@ -3123,6 +3163,7 @@ static void vmx_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa,
vmcs_writel(GUEST_CR3, guest_cr3);
}
+
static bool vmx_is_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
{
/*
@@ -3687,6 +3728,19 @@ void free_vpid(int vpid)
spin_unlock(&vmx_vpid_lock);
}
+static void vmx_msr_bitmap_l01_changed(struct vcpu_vmx *vmx)
+{
+ /*
+ * When KVM is a nested hypervisor on top of Hyper-V and uses
+ * 'Enlightened MSR Bitmap' feature L0 needs to know that MSR
+ * bitmap has changed.
+ */
+ if (static_branch_unlikely(&enable_evmcs))
+ evmcs_touch_msr_bitmap();
+
+ vmx->nested.force_msr_bitmap_recalc = true;
+}
+
void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -3695,8 +3749,7 @@ void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
if (!cpu_has_vmx_msr_bitmap())
return;
- if (static_branch_unlikely(&enable_evmcs))
- evmcs_touch_msr_bitmap();
+ vmx_msr_bitmap_l01_changed(vmx);
/*
* Mark the desired intercept state in shadow bitmap, this is needed
@@ -3740,8 +3793,7 @@ void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type)
if (!cpu_has_vmx_msr_bitmap())
return;
- if (static_branch_unlikely(&enable_evmcs))
- evmcs_touch_msr_bitmap();
+ vmx_msr_bitmap_l01_changed(vmx);
/*
* Mark the desired intercept state in shadow bitmap, this is needed
@@ -3879,12 +3931,10 @@ static void vmx_msr_filter_changed(struct kvm_vcpu *vcpu)
pt_update_intercept_for_msr(vcpu);
}
-static inline bool kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu,
- bool nested)
+static inline void kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu,
+ int pi_vec)
{
#ifdef CONFIG_SMP
- int pi_vec = nested ? POSTED_INTR_NESTED_VECTOR : POSTED_INTR_VECTOR;
-
if (vcpu->mode == IN_GUEST_MODE) {
/*
* The vector of interrupt to be delivered to vcpu had
@@ -3912,10 +3962,15 @@ static inline bool kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu,
*/
apic->send_IPI_mask(get_cpu_mask(vcpu->cpu), pi_vec);
- return true;
+ return;
}
#endif
- return false;
+ /*
+ * The vCPU isn't in the guest; wake the vCPU in case it is blocking,
+ * otherwise do nothing as KVM will grab the highest priority pending
+ * IRQ via ->sync_pir_to_irr() in vcpu_enter_guest().
+ */
+ kvm_vcpu_wake_up(vcpu);
}
static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu,
@@ -3931,9 +3986,21 @@ static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu,
*/
vmx->nested.pi_pending = true;
kvm_make_request(KVM_REQ_EVENT, vcpu);
+
+ /*
+ * This pairs with the smp_mb_*() after setting vcpu->mode in
+ * vcpu_enter_guest() to guarantee the vCPU sees the event
+ * request if triggering a posted interrupt "fails" because
+ * vcpu->mode != IN_GUEST_MODE. The extra barrier is needed as
+ * the smb_wmb() in kvm_make_request() only ensures everything
+ * done before making the request is visible when the request
+ * is visible, it doesn't ensure ordering between the store to
+ * vcpu->requests and the load from vcpu->mode.
+ */
+ smp_mb__after_atomic();
+
/* the PIR and ON have been set by L1. */
- if (!kvm_vcpu_trigger_posted_interrupt(vcpu, true))
- kvm_vcpu_kick(vcpu);
+ kvm_vcpu_trigger_posted_interrupt(vcpu, POSTED_INTR_NESTED_VECTOR);
return 0;
}
return -1;
@@ -3964,12 +4031,31 @@ static int vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector)
if (pi_test_and_set_on(&vmx->pi_desc))
return 0;
- if (!kvm_vcpu_trigger_posted_interrupt(vcpu, false))
- kvm_vcpu_kick(vcpu);
-
+ /*
+ * The implied barrier in pi_test_and_set_on() pairs with the smp_mb_*()
+ * after setting vcpu->mode in vcpu_enter_guest(), thus the vCPU is
+ * guaranteed to see PID.ON=1 and sync the PIR to IRR if triggering a
+ * posted interrupt "fails" because vcpu->mode != IN_GUEST_MODE.
+ */
+ kvm_vcpu_trigger_posted_interrupt(vcpu, POSTED_INTR_VECTOR);
return 0;
}
+static void vmx_deliver_interrupt(struct kvm_lapic *apic, int delivery_mode,
+ int trig_mode, int vector)
+{
+ struct kvm_vcpu *vcpu = apic->vcpu;
+
+ if (vmx_deliver_posted_interrupt(vcpu, vector)) {
+ kvm_lapic_set_irr(vector, apic);
+ kvm_make_request(KVM_REQ_EVENT, vcpu);
+ kvm_vcpu_kick(vcpu);
+ } else {
+ trace_kvm_apicv_accept_irq(vcpu->vcpu_id, delivery_mode,
+ trig_mode, vector);
+ }
+}
+
/*
* Set up the vmcs's constant host-state fields, i.e., host-state fields that
* will not change in the lifetime of the guest.
@@ -4021,6 +4107,16 @@ void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
+
+ /*
+ * SYSENTER is used for 32-bit system calls on either 32-bit or
+ * 64-bit kernels. It is always zero If neither is allowed, otherwise
+ * vmx_vcpu_load_vmcs loads it with the per-CPU entry stack (and may
+ * have already done so!).
+ */
+ if (!IS_ENABLED(CONFIG_IA32_EMULATION) && !IS_ENABLED(CONFIG_X86_32))
+ vmcs_writel(HOST_IA32_SYSENTER_ESP, 0);
+
rdmsrl(MSR_IA32_SYSENTER_EIP, tmpl);
vmcs_writel(HOST_IA32_SYSENTER_EIP, tmpl); /* 22.2.3 */
@@ -4039,8 +4135,10 @@ void set_cr4_guest_host_mask(struct vcpu_vmx *vmx)
vcpu->arch.cr4_guest_owned_bits = KVM_POSSIBLE_CR4_GUEST_BITS &
~vcpu->arch.cr4_guest_rsvd_bits;
- if (!enable_ept)
- vcpu->arch.cr4_guest_owned_bits &= ~X86_CR4_PGE;
+ if (!enable_ept) {
+ vcpu->arch.cr4_guest_owned_bits &= ~X86_CR4_TLBFLUSH_BITS;
+ vcpu->arch.cr4_guest_owned_bits &= ~X86_CR4_PDPTR_BITS;
+ }
if (is_guest_mode(&vmx->vcpu))
vcpu->arch.cr4_guest_owned_bits &=
~get_vmcs12(vcpu)->cr4_guest_host_mask;
@@ -4692,7 +4790,7 @@ static int handle_rmode_exception(struct kvm_vcpu *vcpu,
if (kvm_emulate_instruction(vcpu, 0)) {
if (vcpu->arch.halt_request) {
vcpu->arch.halt_request = 0;
- return kvm_vcpu_halt(vcpu);
+ return kvm_emulate_halt_noskip(vcpu);
}
return 1;
}
@@ -4748,6 +4846,17 @@ static int handle_exception_nmi(struct kvm_vcpu *vcpu)
if (is_machine_check(intr_info) || is_nmi(intr_info))
return 1; /* handled by handle_exception_nmi_irqoff() */
+ /*
+ * Queue the exception here instead of in handle_nm_fault_irqoff().
+ * This ensures the nested_vmx check is not skipped so vmexit can
+ * be reflected to L1 (when it intercepts #NM) before reaching this
+ * point.
+ */
+ if (is_nm_fault(intr_info)) {
+ kvm_queue_exception(vcpu, NM_VECTOR);
+ return 1;
+ }
+
if (is_invalid_opcode(intr_info))
return handle_ud(vcpu);
@@ -4811,8 +4920,33 @@ static int handle_exception_nmi(struct kvm_vcpu *vcpu)
dr6 = vmx_get_exit_qual(vcpu);
if (!(vcpu->guest_debug &
(KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) {
+ /*
+ * If the #DB was due to ICEBP, a.k.a. INT1, skip the
+ * instruction. ICEBP generates a trap-like #DB, but
+ * despite its interception control being tied to #DB,
+ * is an instruction intercept, i.e. the VM-Exit occurs
+ * on the ICEBP itself. Note, skipping ICEBP also
+ * clears STI and MOVSS blocking.
+ *
+ * For all other #DBs, set vmcs.PENDING_DBG_EXCEPTIONS.BS
+ * if single-step is enabled in RFLAGS and STI or MOVSS
+ * blocking is active, as the CPU doesn't set the bit
+ * on VM-Exit due to #DB interception. VM-Entry has a
+ * consistency check that a single-step #DB is pending
+ * in this scenario as the previous instruction cannot
+ * have toggled RFLAGS.TF 0=>1 (because STI and POP/MOV
+ * don't modify RFLAGS), therefore the one instruction
+ * delay when activating single-step breakpoints must
+ * have already expired. Note, the CPU sets/clears BS
+ * as appropriate for all other VM-Exits types.
+ */
if (is_icebp(intr_info))
WARN_ON(!skip_emulated_instruction(vcpu));
+ else if ((vmx_get_rflags(vcpu) & X86_EFLAGS_TF) &&
+ (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
+ (GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS)))
+ vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS,
+ vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS) | DR6_BS);
kvm_queue_exception_p(vcpu, DB_VECTOR, dr6);
return 1;
@@ -5307,7 +5441,7 @@ static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
{
gpa_t gpa;
- if (!vmx_can_emulate_instruction(vcpu, NULL, 0))
+ if (!vmx_can_emulate_instruction(vcpu, EMULTYPE_PF, NULL, 0))
return 1;
/*
@@ -5336,6 +5470,14 @@ static int handle_nmi_window(struct kvm_vcpu *vcpu)
return 1;
}
+static bool vmx_emulation_required_with_pending_exception(struct kvm_vcpu *vcpu)
+{
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+ return vmx->emulation_required && !vmx->rmode.vm86_active &&
+ vcpu->arch.exception.pending;
+}
+
static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -5355,15 +5497,14 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
if (!kvm_emulate_instruction(vcpu, 0))
return 0;
- if (vmx->emulation_required && !vmx->rmode.vm86_active &&
- vcpu->arch.exception.pending) {
+ if (vmx_emulation_required_with_pending_exception(vcpu)) {
kvm_prepare_emulation_failure_exit(vcpu);
return 0;
}
if (vcpu->arch.halt_request) {
vcpu->arch.halt_request = 0;
- return kvm_vcpu_halt(vcpu);
+ return kvm_emulate_halt_noskip(vcpu);
}
/*
@@ -5378,6 +5519,16 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
return 1;
}
+static int vmx_vcpu_pre_run(struct kvm_vcpu *vcpu)
+{
+ if (vmx_emulation_required_with_pending_exception(vcpu)) {
+ kvm_prepare_emulation_failure_exit(vcpu);
+ return 0;
+ }
+
+ return 1;
+}
+
static void grow_ple_window(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -6344,11 +6495,33 @@ void vmx_do_interrupt_nmi_irqoff(unsigned long entry);
static void handle_interrupt_nmi_irqoff(struct kvm_vcpu *vcpu,
unsigned long entry)
{
- kvm_before_interrupt(vcpu);
+ bool is_nmi = entry == (unsigned long)asm_exc_nmi_noist;
+
+ kvm_before_interrupt(vcpu, is_nmi ? KVM_HANDLING_NMI : KVM_HANDLING_IRQ);
vmx_do_interrupt_nmi_irqoff(entry);
kvm_after_interrupt(vcpu);
}
+static void handle_nm_fault_irqoff(struct kvm_vcpu *vcpu)
+{
+ /*
+ * Save xfd_err to guest_fpu before interrupt is enabled, so the
+ * MSR value is not clobbered by the host activity before the guest
+ * has chance to consume it.
+ *
+ * Do not blindly read xfd_err here, since this exception might
+ * be caused by L1 interception on a platform which doesn't
+ * support xfd at all.
+ *
+ * Do it conditionally upon guest_fpu::xfd. xfd_err matters
+ * only when xfd contains a non-zero value.
+ *
+ * Queuing exception is done in vmx_handle_exit. See comment there.
+ */
+ if (vcpu->arch.guest_fpu.fpstate->xfd)
+ rdmsrl(MSR_IA32_XFD_ERR, vcpu->arch.guest_fpu.xfd_err);
+}
+
static void handle_exception_nmi_irqoff(struct vcpu_vmx *vmx)
{
const unsigned long nmi_entry = (unsigned long)asm_exc_nmi_noist;
@@ -6357,6 +6530,9 @@ static void handle_exception_nmi_irqoff(struct vcpu_vmx *vmx)
/* if exit due to PF check for async PF */
if (is_page_fault(intr_info))
vmx->vcpu.arch.apf.host_apf_flags = kvm_read_and_reset_apf_flags();
+ /* if exit due to NM, handle before interrupts are enabled */
+ else if (is_nm_fault(intr_info))
+ handle_nm_fault_irqoff(&vmx->vcpu);
/* Handle machine checks before interrupts are enabled */
else if (is_machine_check(intr_info))
kvm_machine_check();
@@ -6593,7 +6769,7 @@ static fastpath_t vmx_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,
struct vcpu_vmx *vmx)
{
- kvm_guest_enter_irqoff();
+ guest_state_enter_irqoff();
/* L1D Flush includes CPU buffer clear to mitigate MDS */
if (static_branch_unlikely(&vmx_l1d_should_flush))
@@ -6609,13 +6785,13 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,
vcpu->arch.cr2 = native_read_cr2();
- kvm_guest_exit_irqoff();
+ guest_state_exit_irqoff();
}
static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
- unsigned long cr3, cr4;
+ unsigned long cr4;
/* Record the guest's net vcpu time for enforced NMI injections. */
if (unlikely(!enable_vnmi &&
@@ -6656,12 +6832,7 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]);
if (kvm_register_is_dirty(vcpu, VCPU_REGS_RIP))
vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
-
- cr3 = __get_current_cr3_fast();
- if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) {
- vmcs_writel(HOST_CR3, cr3);
- vmx->loaded_vmcs->host_state.cr3 = cr3;
- }
+ vcpu->arch.regs_dirty = 0;
cr4 = cr4_read_shadow();
if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) {
@@ -6750,7 +6921,7 @@ static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
loadsegment(es, __USER_DS);
#endif
- vmx_register_cache_reset(vcpu);
+ vcpu->arch.regs_avail &= ~VMX_REGS_LAZY_LOAD_SET;
pt_guest_exit(vmx);
@@ -6818,6 +6989,8 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
BUILD_BUG_ON(offsetof(struct vcpu_vmx, vcpu) != 0);
vmx = to_vmx(vcpu);
+ INIT_LIST_HEAD(&vmx->pi_wakeup_list);
+
err = -ENOMEM;
vmx->vpid = allocate_vpid();
@@ -6969,7 +7142,6 @@ static int __init vmx_check_processor_compat(void)
static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
{
u8 cache;
- u64 ipat = 0;
/* We wanted to honor guest CD/MTRR/PAT, but doing so could result in
* memory aliases with conflicting memory types and sometimes MCEs.
@@ -6989,30 +7161,22 @@ static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
* EPT memory type is used to emulate guest CD/MTRR.
*/
- if (is_mmio) {
- cache = MTRR_TYPE_UNCACHABLE;
- goto exit;
- }
+ if (is_mmio)
+ return MTRR_TYPE_UNCACHABLE << VMX_EPT_MT_EPTE_SHIFT;
- if (!kvm_arch_has_noncoherent_dma(vcpu->kvm)) {
- ipat = VMX_EPT_IPAT_BIT;
- cache = MTRR_TYPE_WRBACK;
- goto exit;
- }
+ if (!kvm_arch_has_noncoherent_dma(vcpu->kvm))
+ return (MTRR_TYPE_WRBACK << VMX_EPT_MT_EPTE_SHIFT) | VMX_EPT_IPAT_BIT;
if (kvm_read_cr0(vcpu) & X86_CR0_CD) {
- ipat = VMX_EPT_IPAT_BIT;
if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
cache = MTRR_TYPE_WRBACK;
else
cache = MTRR_TYPE_UNCACHABLE;
- goto exit;
- }
- cache = kvm_mtrr_get_guest_memory_type(vcpu, gfn);
+ return (cache << VMX_EPT_MT_EPTE_SHIFT) | VMX_EPT_IPAT_BIT;
+ }
-exit:
- return (cache << VMX_EPT_MT_EPTE_SHIFT) | ipat;
+ return kvm_mtrr_get_guest_memory_type(vcpu, gfn) << VMX_EPT_MT_EPTE_SHIFT;
}
static void vmcs_set_secondary_exec_control(struct vcpu_vmx *vmx, u32 new_ctl)
@@ -7204,6 +7368,11 @@ static void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
}
}
+ if (kvm_cpu_cap_has(X86_FEATURE_XFD))
+ vmx_set_intercept_for_msr(vcpu, MSR_IA32_XFD_ERR, MSR_TYPE_R,
+ !guest_cpuid_has(vcpu, X86_FEATURE_XFD));
+
+
set_cr4_guest_host_mask(vmx);
vmx_write_encls_bitmap(vcpu, NULL);
@@ -7443,25 +7612,6 @@ void vmx_update_cpu_dirty_logging(struct kvm_vcpu *vcpu)
secondary_exec_controls_clearbit(vmx, SECONDARY_EXEC_ENABLE_PML);
}
-static int vmx_pre_block(struct kvm_vcpu *vcpu)
-{
- if (pi_pre_block(vcpu))
- return 1;
-
- if (kvm_lapic_hv_timer_in_use(vcpu))
- kvm_lapic_switch_to_sw_timer(vcpu);
-
- return 0;
-}
-
-static void vmx_post_block(struct kvm_vcpu *vcpu)
-{
- if (kvm_x86_ops.set_hv_timer)
- kvm_lapic_switch_to_hv_timer(vcpu);
-
- pi_post_block(vcpu);
-}
-
static void vmx_setup_mce(struct kvm_vcpu *vcpu)
{
if (vcpu->arch.mcg_cap & MCG_LMCE_P)
@@ -7509,6 +7659,7 @@ static int vmx_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
if (ret)
return ret;
+ vmx->nested.nested_run_pending = 1;
vmx->nested.smm.guest_mode = false;
}
return 0;
@@ -7604,6 +7755,7 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
.tlb_flush_gva = vmx_flush_tlb_gva,
.tlb_flush_guest = vmx_flush_tlb_guest,
+ .vcpu_pre_run = vmx_vcpu_pre_run,
.run = vmx_vcpu_run,
.handle_exit = vmx_handle_exit,
.skip_emulated_instruction = vmx_skip_emulated_instruction,
@@ -7632,7 +7784,7 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
.hwapic_isr_update = vmx_hwapic_isr_update,
.guest_apic_has_interrupt = vmx_guest_apic_has_interrupt,
.sync_pir_to_irr = vmx_sync_pir_to_irr,
- .deliver_posted_interrupt = vmx_deliver_posted_interrupt,
+ .deliver_interrupt = vmx_deliver_interrupt,
.dy_apicv_has_pending_interrupt = pi_has_pending_interrupt,
.set_tss_addr = vmx_set_tss_addr,
@@ -7662,9 +7814,6 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
.cpu_dirty_log_size = PML_ENTITY_NUM,
.update_cpu_dirty_logging = vmx_update_cpu_dirty_logging,
- .pre_block = vmx_pre_block,
- .post_block = vmx_post_block,
-
.pmu_ops = &intel_pmu_ops,
.nested_ops = &vmx_nested_ops,
@@ -7693,6 +7842,20 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
.vcpu_deliver_sipi_vector = kvm_vcpu_deliver_sipi_vector,
};
+static unsigned int vmx_handle_intel_pt_intr(void)
+{
+ struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
+
+ /* '0' on failure so that the !PT case can use a RET0 static call. */
+ if (!kvm_arch_pmi_in_guest(vcpu))
+ return 0;
+
+ kvm_make_request(KVM_REQ_PMI, vcpu);
+ __set_bit(MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI_BIT,
+ (unsigned long *)&vcpu->arch.pmu.global_status);
+ return 1;
+}
+
static __init void vmx_setup_user_return_msrs(void)
{
@@ -7719,11 +7882,13 @@ static __init void vmx_setup_user_return_msrs(void)
kvm_add_user_return_msr(vmx_uret_msrs_list[i]);
}
+static struct kvm_x86_init_ops vmx_init_ops __initdata;
+
static __init int hardware_setup(void)
{
unsigned long host_bndcfgs;
struct desc_ptr dt;
- int r, ept_lpage_level;
+ int r;
store_idt(&dt);
host_idt_base = dt.address;
@@ -7820,16 +7985,8 @@ static __init int hardware_setup(void)
kvm_mmu_set_ept_masks(enable_ept_ad_bits,
cpu_has_vmx_ept_execute_only());
- if (!enable_ept)
- ept_lpage_level = 0;
- else if (cpu_has_vmx_ept_1g_page())
- ept_lpage_level = PG_LEVEL_1G;
- else if (cpu_has_vmx_ept_2m_page())
- ept_lpage_level = PG_LEVEL_2M;
- else
- ept_lpage_level = PG_LEVEL_4K;
kvm_configure_mmu(enable_ept, 0, vmx_get_max_tdp_level(),
- ept_lpage_level);
+ ept_caps_to_lpage_level(vmx_capability.ept));
/*
* Only enable PML when hardware supports PML feature, and both EPT
@@ -7877,6 +8034,10 @@ static __init int hardware_setup(void)
return -EINVAL;
if (!enable_ept || !cpu_has_vmx_intel_pt())
pt_mode = PT_MODE_SYSTEM;
+ if (pt_mode == PT_MODE_HOST_GUEST)
+ vmx_init_ops.handle_intel_pt_intr = vmx_handle_intel_pt_intr;
+ else
+ vmx_init_ops.handle_intel_pt_intr = NULL;
setup_default_sgx_lepubkeyhash();
@@ -7905,6 +8066,7 @@ static struct kvm_x86_init_ops vmx_init_ops __initdata = {
.disabled_by_bios = vmx_disabled_by_bios,
.check_processor_compatibility = vmx_check_processor_compat,
.hardware_setup = hardware_setup,
+ .handle_intel_pt_intr = NULL,
.runtime_ops = &vmx_x86_ops,
};
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
index 4df2ac24ffc1..7f2c82e7f38f 100644
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -159,6 +159,15 @@ struct nested_vmx {
bool dirty_vmcs12;
/*
+ * Indicates whether MSR bitmap for L2 needs to be rebuilt due to
+ * changes in MSR bitmap for L1 or switching to a different L2. Note,
+ * this flag can only be used reliably in conjunction with a paravirt L1
+ * which informs L0 whether any changes to MSR bitmap for L2 were done
+ * on its side.
+ */
+ bool force_msr_bitmap_recalc;
+
+ /*
* Indicates lazily loaded guest state has not yet been decached from
* vmcs02.
*/
@@ -308,6 +317,9 @@ struct vcpu_vmx {
/* Posted interrupt descriptor */
struct pi_desc pi_desc;
+ /* Used if this vCPU is waiting for PI notification wakeup. */
+ struct list_head pi_wakeup_list;
+
/* Support for a guest hypervisor (nested VMX) */
struct nested_vmx nested;
@@ -340,7 +352,7 @@ struct vcpu_vmx {
struct lbr_desc lbr_desc;
/* Save desired MSR intercept (read: pass-through) state */
-#define MAX_POSSIBLE_PASSTHROUGH_MSRS 13
+#define MAX_POSSIBLE_PASSTHROUGH_MSRS 15
struct {
DECLARE_BITMAP(read, MAX_POSSIBLE_PASSTHROUGH_MSRS);
DECLARE_BITMAP(write, MAX_POSSIBLE_PASSTHROUGH_MSRS);
@@ -362,8 +374,9 @@ int allocate_vpid(void);
void free_vpid(int vpid);
void vmx_set_constant_host_state(struct vcpu_vmx *vmx);
void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu);
-void vmx_set_host_fs_gs(struct vmcs_host_state *host, u16 fs_sel, u16 gs_sel,
- unsigned long fs_base, unsigned long gs_base);
+void vmx_set_vmcs_host_state(struct vmcs_host_state *host, unsigned long cr3,
+ u16 fs_sel, u16 gs_sel,
+ unsigned long fs_base, unsigned long gs_base);
int vmx_get_cpl(struct kvm_vcpu *vcpu);
bool vmx_emulation_required(struct kvm_vcpu *vcpu);
unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu);
@@ -473,19 +486,21 @@ BUILD_CONTROLS_SHADOW(pin, PIN_BASED_VM_EXEC_CONTROL)
BUILD_CONTROLS_SHADOW(exec, CPU_BASED_VM_EXEC_CONTROL)
BUILD_CONTROLS_SHADOW(secondary_exec, SECONDARY_VM_EXEC_CONTROL)
-static inline void vmx_register_cache_reset(struct kvm_vcpu *vcpu)
-{
- vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
- | (1 << VCPU_EXREG_RFLAGS)
- | (1 << VCPU_EXREG_PDPTR)
- | (1 << VCPU_EXREG_SEGMENTS)
- | (1 << VCPU_EXREG_CR0)
- | (1 << VCPU_EXREG_CR3)
- | (1 << VCPU_EXREG_CR4)
- | (1 << VCPU_EXREG_EXIT_INFO_1)
- | (1 << VCPU_EXREG_EXIT_INFO_2));
- vcpu->arch.regs_dirty = 0;
-}
+/*
+ * VMX_REGS_LAZY_LOAD_SET - The set of registers that will be updated in the
+ * cache on demand. Other registers not listed here are synced to
+ * the cache immediately after VM-Exit.
+ */
+#define VMX_REGS_LAZY_LOAD_SET ((1 << VCPU_REGS_RIP) | \
+ (1 << VCPU_REGS_RSP) | \
+ (1 << VCPU_EXREG_RFLAGS) | \
+ (1 << VCPU_EXREG_PDPTR) | \
+ (1 << VCPU_EXREG_SEGMENTS) | \
+ (1 << VCPU_EXREG_CR0) | \
+ (1 << VCPU_EXREG_CR3) | \
+ (1 << VCPU_EXREG_CR4) | \
+ (1 << VCPU_EXREG_EXIT_INFO_1) | \
+ (1 << VCPU_EXREG_EXIT_INFO_2))
static inline struct kvm_vmx *to_kvm_vmx(struct kvm *kvm)
{
diff --git a/arch/x86/kvm/vmx/vmx_ops.h b/arch/x86/kvm/vmx/vmx_ops.h
index 9e9ef47e988c..5e7f41225780 100644
--- a/arch/x86/kvm/vmx/vmx_ops.h
+++ b/arch/x86/kvm/vmx/vmx_ops.h
@@ -71,6 +71,31 @@ static __always_inline unsigned long __vmcs_readl(unsigned long field)
{
unsigned long value;
+#ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
+
+ asm_volatile_goto("1: vmread %[field], %[output]\n\t"
+ "jna %l[do_fail]\n\t"
+
+ _ASM_EXTABLE(1b, %l[do_exception])
+
+ : [output] "=r" (value)
+ : [field] "r" (field)
+ : "cc"
+ : do_fail, do_exception);
+
+ return value;
+
+do_fail:
+ WARN_ONCE(1, "kvm: vmread failed: field=%lx\n", field);
+ pr_warn_ratelimited("kvm: vmread failed: field=%lx\n", field);
+ return 0;
+
+do_exception:
+ kvm_spurious_fault();
+ return 0;
+
+#else /* !CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
+
asm volatile("1: vmread %2, %1\n\t"
".byte 0x3e\n\t" /* branch taken hint */
"ja 3f\n\t"
@@ -80,9 +105,11 @@ static __always_inline unsigned long __vmcs_readl(unsigned long field)
* @field, and bounce through the trampoline to preserve
* volatile registers.
*/
- "push $0\n\t"
+ "xorl %k1, %k1\n\t"
+ "2:\n\t"
+ "push %1\n\t"
"push %2\n\t"
- "2:call vmread_error_trampoline\n\t"
+ "call vmread_error_trampoline\n\t"
/*
* Unwind the stack. Note, the trampoline zeros out the
@@ -93,14 +120,12 @@ static __always_inline unsigned long __vmcs_readl(unsigned long field)
"3:\n\t"
/* VMREAD faulted. As above, except push '1' for @fault. */
- ".pushsection .fixup, \"ax\"\n\t"
- "4: push $1\n\t"
- "push %2\n\t"
- "jmp 2b\n\t"
- ".popsection\n\t"
- _ASM_EXTABLE(1b, 4b)
- : ASM_CALL_CONSTRAINT, "=r"(value) : "r"(field) : "cc");
+ _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_ONE_REG, %1)
+
+ : ASM_CALL_CONSTRAINT, "=&r"(value) : "r"(field) : "cc");
return value;
+
+#endif /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
}
static __always_inline u16 vmcs_read16(unsigned long field)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index e50e97ac4408..641044db415d 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -90,6 +90,8 @@
u64 __read_mostly kvm_mce_cap_supported = MCG_CTL_P | MCG_SER_P;
EXPORT_SYMBOL_GPL(kvm_mce_cap_supported);
+#define ERR_PTR_USR(e) ((void __user *)ERR_PTR(e))
+
#define emul_to_vcpu(ctxt) \
((struct kvm_vcpu *)(ctxt)->vcpu)
@@ -118,6 +120,7 @@ static void enter_smm(struct kvm_vcpu *vcpu);
static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
static void store_regs(struct kvm_vcpu *vcpu);
static int sync_regs(struct kvm_vcpu *vcpu);
+static int kvm_vcpu_do_singlestep(struct kvm_vcpu *vcpu);
static int __set_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2);
static void __get_sregs2(struct kvm_vcpu *vcpu, struct kvm_sregs2 *sregs2);
@@ -186,6 +189,11 @@ module_param(force_emulation_prefix, bool, S_IRUGO);
int __read_mostly pi_inject_timer = -1;
module_param(pi_inject_timer, bint, S_IRUGO | S_IWUSR);
+/* Enable/disable PMU virtualization */
+bool __read_mostly enable_pmu = true;
+EXPORT_SYMBOL_GPL(enable_pmu);
+module_param(enable_pmu, bool, 0444);
+
/*
* Restoring the host value for MSRs that are only consumed when running in
* usermode, e.g. SYSCALL MSRs and TSC_AUX, can be deferred until the CPU
@@ -210,7 +218,7 @@ static struct kvm_user_return_msrs __percpu *user_return_msrs;
#define KVM_SUPPORTED_XCR0 (XFEATURE_MASK_FP | XFEATURE_MASK_SSE \
| XFEATURE_MASK_YMM | XFEATURE_MASK_BNDREGS \
| XFEATURE_MASK_BNDCSR | XFEATURE_MASK_AVX512 \
- | XFEATURE_MASK_PKRU)
+ | XFEATURE_MASK_PKRU | XFEATURE_MASK_XTILE)
u64 __read_mostly host_efer;
EXPORT_SYMBOL_GPL(host_efer);
@@ -710,6 +718,17 @@ int kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err)
}
EXPORT_SYMBOL_GPL(kvm_complete_insn_gp);
+static int complete_emulated_insn_gp(struct kvm_vcpu *vcpu, int err)
+{
+ if (err) {
+ kvm_inject_gp(vcpu, 0);
+ return 1;
+ }
+
+ return kvm_emulate_instruction(vcpu, EMULTYPE_NO_DECODE | EMULTYPE_SKIP |
+ EMULTYPE_COMPLETE_USER_EXIT);
+}
+
void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
{
++vcpu->stat.pf_guest;
@@ -798,8 +817,9 @@ static inline u64 pdptr_rsvd_bits(struct kvm_vcpu *vcpu)
/*
* Load the pae pdptrs. Return 1 if they are all valid, 0 otherwise.
*/
-int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3)
+int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
{
+ struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
gpa_t real_gpa;
int i;
@@ -810,8 +830,8 @@ int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3)
* If the MMU is nested, CR3 holds an L2 GPA and needs to be translated
* to an L1 GPA.
*/
- real_gpa = mmu->translate_gpa(vcpu, gfn_to_gpa(pdpt_gfn),
- PFERR_USER_MASK | PFERR_WRITE_MASK, NULL);
+ real_gpa = kvm_translate_gpa(vcpu, mmu, gfn_to_gpa(pdpt_gfn),
+ PFERR_USER_MASK | PFERR_WRITE_MASK, NULL);
if (real_gpa == UNMAPPED_GVA)
return 0;
@@ -828,8 +848,16 @@ int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3)
}
}
+ /*
+ * Marking VCPU_EXREG_PDPTR dirty doesn't work for !tdp_enabled.
+ * Shadow page roots need to be reconstructed instead.
+ */
+ if (!tdp_enabled && memcmp(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs)))
+ kvm_mmu_free_roots(vcpu, mmu, KVM_MMU_ROOT_CURRENT);
+
memcpy(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs));
kvm_register_mark_dirty(vcpu, VCPU_EXREG_PDPTR);
+ kvm_make_request(KVM_REQ_LOAD_MMU_PGD, vcpu);
vcpu->arch.pdptrs_from_userspace = false;
return 1;
@@ -856,7 +884,6 @@ EXPORT_SYMBOL_GPL(kvm_post_set_cr0);
int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
{
unsigned long old_cr0 = kvm_read_cr0(vcpu);
- unsigned long pdptr_bits = X86_CR0_CD | X86_CR0_NW | X86_CR0_PG;
cr0 |= X86_CR0_ET;
@@ -886,8 +913,8 @@ int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
}
#endif
if (!(vcpu->arch.efer & EFER_LME) && (cr0 & X86_CR0_PG) &&
- is_pae(vcpu) && ((cr0 ^ old_cr0) & pdptr_bits) &&
- !load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu)))
+ is_pae(vcpu) && ((cr0 ^ old_cr0) & X86_CR0_PDPTR_BITS) &&
+ !load_pdptrs(vcpu, kvm_read_cr3(vcpu)))
return 1;
if (!(cr0 & X86_CR0_PG) &&
@@ -990,6 +1017,11 @@ static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
if ((xcr0 & XFEATURE_MASK_AVX512) != XFEATURE_MASK_AVX512)
return 1;
}
+
+ if ((xcr0 & XFEATURE_MASK_XTILE) &&
+ ((xcr0 & XFEATURE_MASK_XTILE) != XFEATURE_MASK_XTILE))
+ return 1;
+
vcpu->arch.xcr0 = xcr0;
if ((xcr0 ^ old_xcr0) & XFEATURE_MASK_EXTEND)
@@ -1051,8 +1083,6 @@ EXPORT_SYMBOL_GPL(kvm_post_set_cr4);
int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
{
unsigned long old_cr4 = kvm_read_cr4(vcpu);
- unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE |
- X86_CR4_SMEP;
if (!kvm_is_valid_cr4(vcpu, cr4))
return 1;
@@ -1063,9 +1093,8 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
if ((cr4 ^ old_cr4) & X86_CR4_LA57)
return 1;
} else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
- && ((cr4 ^ old_cr4) & pdptr_bits)
- && !load_pdptrs(vcpu, vcpu->arch.walk_mmu,
- kvm_read_cr3(vcpu)))
+ && ((cr4 ^ old_cr4) & X86_CR4_PDPTR_BITS)
+ && !load_pdptrs(vcpu, kvm_read_cr3(vcpu)))
return 1;
if ((cr4 & X86_CR4_PCIDE) && !(old_cr4 & X86_CR4_PCIDE)) {
@@ -1154,14 +1183,15 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
if (kvm_vcpu_is_illegal_gpa(vcpu, cr3))
return 1;
- if (is_pae_paging(vcpu) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))
+ if (is_pae_paging(vcpu) && !load_pdptrs(vcpu, cr3))
return 1;
if (cr3 != kvm_read_cr3(vcpu))
kvm_mmu_new_pgd(vcpu, cr3);
vcpu->arch.cr3 = cr3;
- kvm_register_mark_available(vcpu, VCPU_EXREG_CR3);
+ kvm_register_mark_dirty(vcpu, VCPU_EXREG_CR3);
+ /* Do not call post_set_cr3, we do not get here for confidential guests. */
handle_tlb_flush:
/*
@@ -1359,6 +1389,7 @@ static const u32 msrs_to_save_all[] = {
MSR_F15H_PERF_CTL3, MSR_F15H_PERF_CTL4, MSR_F15H_PERF_CTL5,
MSR_F15H_PERF_CTR0, MSR_F15H_PERF_CTR1, MSR_F15H_PERF_CTR2,
MSR_F15H_PERF_CTR3, MSR_F15H_PERF_CTR4, MSR_F15H_PERF_CTR5,
+ MSR_IA32_XFD, MSR_IA32_XFD_ERR,
};
static u32 msrs_to_save[ARRAY_SIZE(msrs_to_save_all)];
@@ -1815,22 +1846,36 @@ int kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data)
}
EXPORT_SYMBOL_GPL(kvm_set_msr);
-static int complete_emulated_rdmsr(struct kvm_vcpu *vcpu)
+static void complete_userspace_rdmsr(struct kvm_vcpu *vcpu)
{
- int err = vcpu->run->msr.error;
- if (!err) {
+ if (!vcpu->run->msr.error) {
kvm_rax_write(vcpu, (u32)vcpu->run->msr.data);
kvm_rdx_write(vcpu, vcpu->run->msr.data >> 32);
}
+}
+
+static int complete_emulated_msr_access(struct kvm_vcpu *vcpu)
+{
+ return complete_emulated_insn_gp(vcpu, vcpu->run->msr.error);
+}
- return static_call(kvm_x86_complete_emulated_msr)(vcpu, err);
+static int complete_emulated_rdmsr(struct kvm_vcpu *vcpu)
+{
+ complete_userspace_rdmsr(vcpu);
+ return complete_emulated_msr_access(vcpu);
}
-static int complete_emulated_wrmsr(struct kvm_vcpu *vcpu)
+static int complete_fast_msr_access(struct kvm_vcpu *vcpu)
{
return static_call(kvm_x86_complete_emulated_msr)(vcpu, vcpu->run->msr.error);
}
+static int complete_fast_rdmsr(struct kvm_vcpu *vcpu)
+{
+ complete_userspace_rdmsr(vcpu);
+ return complete_fast_msr_access(vcpu);
+}
+
static u64 kvm_msr_reason(int r)
{
switch (r) {
@@ -1865,18 +1910,6 @@ static int kvm_msr_user_space(struct kvm_vcpu *vcpu, u32 index,
return 1;
}
-static int kvm_get_msr_user_space(struct kvm_vcpu *vcpu, u32 index, int r)
-{
- return kvm_msr_user_space(vcpu, index, KVM_EXIT_X86_RDMSR, 0,
- complete_emulated_rdmsr, r);
-}
-
-static int kvm_set_msr_user_space(struct kvm_vcpu *vcpu, u32 index, u64 data, int r)
-{
- return kvm_msr_user_space(vcpu, index, KVM_EXIT_X86_WRMSR, data,
- complete_emulated_wrmsr, r);
-}
-
int kvm_emulate_rdmsr(struct kvm_vcpu *vcpu)
{
u32 ecx = kvm_rcx_read(vcpu);
@@ -1885,18 +1918,16 @@ int kvm_emulate_rdmsr(struct kvm_vcpu *vcpu)
r = kvm_get_msr(vcpu, ecx, &data);
- /* MSR read failed? See if we should ask user space */
- if (r && kvm_get_msr_user_space(vcpu, ecx, r)) {
- /* Bounce to user space */
- return 0;
- }
-
if (!r) {
trace_kvm_msr_read(ecx, data);
kvm_rax_write(vcpu, data & -1u);
kvm_rdx_write(vcpu, (data >> 32) & -1u);
} else {
+ /* MSR read failed? See if we should ask user space */
+ if (kvm_msr_user_space(vcpu, ecx, KVM_EXIT_X86_RDMSR, 0,
+ complete_fast_rdmsr, r))
+ return 0;
trace_kvm_msr_read_ex(ecx);
}
@@ -1912,19 +1943,18 @@ int kvm_emulate_wrmsr(struct kvm_vcpu *vcpu)
r = kvm_set_msr(vcpu, ecx, data);
- /* MSR write failed? See if we should ask user space */
- if (r && kvm_set_msr_user_space(vcpu, ecx, data, r))
- /* Bounce to user space */
- return 0;
-
- /* Signal all other negative errors to userspace */
- if (r < 0)
- return r;
-
- if (!r)
+ if (!r) {
trace_kvm_msr_write(ecx, data);
- else
+ } else {
+ /* MSR write failed? See if we should ask user space */
+ if (kvm_msr_user_space(vcpu, ecx, KVM_EXIT_X86_WRMSR, data,
+ complete_fast_msr_access, r))
+ return 0;
+ /* Signal all other negative errors to userspace */
+ if (r < 0)
+ return r;
trace_kvm_msr_write_ex(ecx, data);
+ }
return static_call(kvm_x86_complete_emulated_msr)(vcpu, r);
}
@@ -2119,7 +2149,7 @@ static s64 get_kvmclock_base_ns(void)
}
#endif
-void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock, int sec_hi_ofs)
+static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock, int sec_hi_ofs)
{
int version;
int r;
@@ -2817,7 +2847,7 @@ static void kvm_end_pvclock_update(struct kvm *kvm)
{
struct kvm_arch *ka = &kvm->arch;
struct kvm_vcpu *vcpu;
- int i;
+ unsigned long i;
write_seqcount_end(&ka->pvclock_sc);
raw_spin_unlock_irq(&ka->tsc_write_lock);
@@ -3066,7 +3096,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
static void kvmclock_update_fn(struct work_struct *work)
{
- int i;
+ unsigned long i;
struct delayed_work *dwork = to_delayed_work(work);
struct kvm_arch *ka = container_of(dwork, struct kvm_arch,
kvmclock_update_work);
@@ -3507,6 +3537,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (data & ~supported_xss)
return 1;
vcpu->arch.ia32_xss = data;
+ kvm_update_cpuid_runtime(vcpu);
break;
case MSR_SMI_COUNT:
if (!msr_info->host_initiated)
@@ -3669,6 +3700,30 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
return 1;
vcpu->arch.msr_misc_features_enables = data;
break;
+#ifdef CONFIG_X86_64
+ case MSR_IA32_XFD:
+ if (!msr_info->host_initiated &&
+ !guest_cpuid_has(vcpu, X86_FEATURE_XFD))
+ return 1;
+
+ if (data & ~(XFEATURE_MASK_USER_DYNAMIC &
+ vcpu->arch.guest_supported_xcr0))
+ return 1;
+
+ fpu_update_guest_xfd(&vcpu->arch.guest_fpu, data);
+ break;
+ case MSR_IA32_XFD_ERR:
+ if (!msr_info->host_initiated &&
+ !guest_cpuid_has(vcpu, X86_FEATURE_XFD))
+ return 1;
+
+ if (data & ~(XFEATURE_MASK_USER_DYNAMIC &
+ vcpu->arch.guest_supported_xcr0))
+ return 1;
+
+ vcpu->arch.guest_fpu.xfd_err = data;
+ break;
+#endif
default:
if (kvm_pmu_is_valid_msr(vcpu, msr))
return kvm_pmu_set_msr(vcpu, msr_info);
@@ -3989,6 +4044,22 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
case MSR_K7_HWCR:
msr_info->data = vcpu->arch.msr_hwcr;
break;
+#ifdef CONFIG_X86_64
+ case MSR_IA32_XFD:
+ if (!msr_info->host_initiated &&
+ !guest_cpuid_has(vcpu, X86_FEATURE_XFD))
+ return 1;
+
+ msr_info->data = vcpu->arch.guest_fpu.fpstate->xfd;
+ break;
+ case MSR_IA32_XFD_ERR:
+ if (!msr_info->host_initiated &&
+ !guest_cpuid_has(vcpu, X86_FEATURE_XFD))
+ return 1;
+
+ msr_info->data = vcpu->arch.guest_fpu.xfd_err;
+ break;
+#endif
default:
if (kvm_pmu_is_valid_msr(vcpu, msr_info->index))
return kvm_pmu_get_msr(vcpu, msr_info);
@@ -4161,6 +4232,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_SREGS2:
case KVM_CAP_EXIT_ON_EMULATION_FAILURE:
case KVM_CAP_VCPU_ATTRIBUTES:
+ case KVM_CAP_SYS_ATTRIBUTES:
r = 1;
break;
case KVM_CAP_EXIT_HYPERCALL:
@@ -4172,7 +4244,8 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_XEN_HVM:
r = KVM_XEN_HVM_CONFIG_HYPERCALL_MSR |
KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL |
- KVM_XEN_HVM_CONFIG_SHARED_INFO;
+ KVM_XEN_HVM_CONFIG_SHARED_INFO |
+ KVM_XEN_HVM_CONFIG_EVTCHN_2LEVEL;
if (sched_info_on())
r |= KVM_XEN_HVM_CONFIG_RUNSTATE;
break;
@@ -4250,11 +4323,61 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
else
r = 0;
break;
+ case KVM_CAP_XSAVE2: {
+ u64 guest_perm = xstate_get_guest_group_perm();
+
+ r = xstate_required_size(supported_xcr0 & guest_perm, false);
+ if (r < sizeof(struct kvm_xsave))
+ r = sizeof(struct kvm_xsave);
+ break;
+ }
default:
break;
}
return r;
+}
+static inline void __user *kvm_get_attr_addr(struct kvm_device_attr *attr)
+{
+ void __user *uaddr = (void __user*)(unsigned long)attr->addr;
+
+ if ((u64)(unsigned long)uaddr != attr->addr)
+ return ERR_PTR_USR(-EFAULT);
+ return uaddr;
+}
+
+static int kvm_x86_dev_get_attr(struct kvm_device_attr *attr)
+{
+ u64 __user *uaddr = kvm_get_attr_addr(attr);
+
+ if (attr->group)
+ return -ENXIO;
+
+ if (IS_ERR(uaddr))
+ return PTR_ERR(uaddr);
+
+ switch (attr->attr) {
+ case KVM_X86_XCOMP_GUEST_SUPP:
+ if (put_user(supported_xcr0, uaddr))
+ return -EFAULT;
+ return 0;
+ default:
+ return -ENXIO;
+ break;
+ }
+}
+
+static int kvm_x86_dev_has_attr(struct kvm_device_attr *attr)
+{
+ if (attr->group)
+ return -ENXIO;
+
+ switch (attr->attr) {
+ case KVM_X86_XCOMP_GUEST_SUPP:
+ return 0;
+ default:
+ return -ENXIO;
+ }
}
long kvm_arch_dev_ioctl(struct file *filp,
@@ -4345,6 +4468,22 @@ long kvm_arch_dev_ioctl(struct file *filp,
case KVM_GET_SUPPORTED_HV_CPUID:
r = kvm_ioctl_get_supported_hv_cpuid(NULL, argp);
break;
+ case KVM_GET_DEVICE_ATTR: {
+ struct kvm_device_attr attr;
+ r = -EFAULT;
+ if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
+ break;
+ r = kvm_x86_dev_get_attr(&attr);
+ break;
+ }
+ case KVM_HAS_DEVICE_ATTR: {
+ struct kvm_device_attr attr;
+ r = -EFAULT;
+ if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
+ break;
+ r = kvm_x86_dev_has_attr(&attr);
+ break;
+ }
default:
r = -EINVAL;
break;
@@ -4783,8 +4922,10 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
vcpu->arch.apic->sipi_vector = events->sipi_vector;
if (events->flags & KVM_VCPUEVENT_VALID_SMM) {
- if (!!(vcpu->arch.hflags & HF_SMM_MASK) != events->smi.smm)
+ if (!!(vcpu->arch.hflags & HF_SMM_MASK) != events->smi.smm) {
+ kvm_x86_ops.nested_ops->leave_nested(vcpu);
kvm_smm_changed(vcpu, events->smi.smm);
+ }
vcpu->arch.smi_pending = events->smi.pending;
@@ -4853,6 +4994,16 @@ static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
vcpu->arch.pkru);
}
+static void kvm_vcpu_ioctl_x86_get_xsave2(struct kvm_vcpu *vcpu,
+ u8 *state, unsigned int size)
+{
+ if (fpstate_is_confidential(&vcpu->arch.guest_fpu))
+ return;
+
+ fpu_copy_guest_fpstate_to_uabi(&vcpu->arch.guest_fpu,
+ state, size, vcpu->arch.pkru);
+}
+
static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
struct kvm_xsave *guest_xsave)
{
@@ -4935,11 +5086,11 @@ static int kvm_arch_tsc_has_attr(struct kvm_vcpu *vcpu,
static int kvm_arch_tsc_get_attr(struct kvm_vcpu *vcpu,
struct kvm_device_attr *attr)
{
- u64 __user *uaddr = (u64 __user *)(unsigned long)attr->addr;
+ u64 __user *uaddr = kvm_get_attr_addr(attr);
int r;
- if ((u64)(unsigned long)uaddr != attr->addr)
- return -EFAULT;
+ if (IS_ERR(uaddr))
+ return PTR_ERR(uaddr);
switch (attr->attr) {
case KVM_VCPU_TSC_OFFSET:
@@ -4958,12 +5109,12 @@ static int kvm_arch_tsc_get_attr(struct kvm_vcpu *vcpu,
static int kvm_arch_tsc_set_attr(struct kvm_vcpu *vcpu,
struct kvm_device_attr *attr)
{
- u64 __user *uaddr = (u64 __user *)(unsigned long)attr->addr;
+ u64 __user *uaddr = kvm_get_attr_addr(attr);
struct kvm *kvm = vcpu->kvm;
int r;
- if ((u64)(unsigned long)uaddr != attr->addr)
- return -EFAULT;
+ if (IS_ERR(uaddr))
+ return PTR_ERR(uaddr);
switch (attr->attr) {
case KVM_VCPU_TSC_OFFSET: {
@@ -5148,17 +5299,6 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
struct kvm_cpuid __user *cpuid_arg = argp;
struct kvm_cpuid cpuid;
- /*
- * KVM does not correctly handle changing guest CPUID after KVM_RUN, as
- * MAXPHYADDR, GBPAGES support, AMD reserved bit behavior, etc.. aren't
- * tracked in kvm_mmu_page_role. As a result, KVM may miss guest page
- * faults due to reusing SPs/SPTEs. In practice no sane VMM mucks with
- * the core vCPU model on the fly, so fail.
- */
- r = -EINVAL;
- if (vcpu->arch.last_vmentry_cpu != -1)
- goto out;
-
r = -EFAULT;
if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid)))
goto out;
@@ -5169,14 +5309,6 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
struct kvm_cpuid2 __user *cpuid_arg = argp;
struct kvm_cpuid2 cpuid;
- /*
- * KVM_SET_CPUID{,2} after KVM_RUN is forbidded, see the comment in
- * KVM_SET_CPUID case above.
- */
- r = -EINVAL;
- if (vcpu->arch.last_vmentry_cpu != -1)
- goto out;
-
r = -EFAULT;
if (copy_from_user(&cpuid, cpuid_arg, sizeof(cpuid)))
goto out;
@@ -5306,6 +5438,10 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
break;
}
case KVM_GET_XSAVE: {
+ r = -EINVAL;
+ if (vcpu->arch.guest_fpu.uabi_size > sizeof(struct kvm_xsave))
+ break;
+
u.xsave = kzalloc(sizeof(struct kvm_xsave), GFP_KERNEL_ACCOUNT);
r = -ENOMEM;
if (!u.xsave)
@@ -5320,7 +5456,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
break;
}
case KVM_SET_XSAVE: {
- u.xsave = memdup_user(argp, sizeof(*u.xsave));
+ int size = vcpu->arch.guest_fpu.uabi_size;
+
+ u.xsave = memdup_user(argp, size);
if (IS_ERR(u.xsave)) {
r = PTR_ERR(u.xsave);
goto out_nofree;
@@ -5329,6 +5467,25 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
r = kvm_vcpu_ioctl_x86_set_xsave(vcpu, u.xsave);
break;
}
+
+ case KVM_GET_XSAVE2: {
+ int size = vcpu->arch.guest_fpu.uabi_size;
+
+ u.xsave = kzalloc(size, GFP_KERNEL_ACCOUNT);
+ r = -ENOMEM;
+ if (!u.xsave)
+ break;
+
+ kvm_vcpu_ioctl_x86_get_xsave2(vcpu, u.buffer, size);
+
+ r = -EFAULT;
+ if (copy_to_user(argp, u.xsave, size))
+ break;
+
+ r = 0;
+ break;
+ }
+
case KVM_GET_XCRS: {
u.xcrs = kzalloc(sizeof(struct kvm_xcrs), GFP_KERNEL_ACCOUNT);
r = -ENOMEM;
@@ -5693,7 +5850,7 @@ void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
* VM-Exit.
*/
struct kvm_vcpu *vcpu;
- int i;
+ unsigned long i;
kvm_for_each_vcpu(i, vcpu, kvm)
kvm_vcpu_kick(vcpu);
@@ -5962,7 +6119,8 @@ static int kvm_vm_ioctl_set_msr_filter(struct kvm *kvm, void __user *argp)
static int kvm_arch_suspend_notifier(struct kvm *kvm)
{
struct kvm_vcpu *vcpu;
- int i, ret = 0;
+ unsigned long i;
+ int ret = 0;
mutex_lock(&kvm->lock);
kvm_for_each_vcpu(i, vcpu, kvm) {
@@ -6422,6 +6580,11 @@ static void kvm_init_msr_list(void)
min(INTEL_PMC_MAX_GENERIC, x86_pmu.num_counters_gp))
continue;
break;
+ case MSR_IA32_XFD:
+ case MSR_IA32_XFD_ERR:
+ if (!kvm_cpu_cap_has(X86_FEATURE_XFD))
+ continue;
+ break;
default:
break;
}
@@ -6505,13 +6668,14 @@ void kvm_get_segment(struct kvm_vcpu *vcpu,
gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
struct x86_exception *exception)
{
+ struct kvm_mmu *mmu = vcpu->arch.mmu;
gpa_t t_gpa;
BUG_ON(!mmu_is_nested(vcpu));
/* NPT walks are always user-walks */
access |= PFERR_USER_MASK;
- t_gpa = vcpu->arch.mmu->gva_to_gpa(vcpu, gpa, access, exception);
+ t_gpa = mmu->gva_to_gpa(vcpu, mmu, gpa, access, exception);
return t_gpa;
}
@@ -6519,25 +6683,31 @@ gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
struct x86_exception *exception)
{
+ struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
+
u32 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0;
- return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
+ return mmu->gva_to_gpa(vcpu, mmu, gva, access, exception);
}
EXPORT_SYMBOL_GPL(kvm_mmu_gva_to_gpa_read);
gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva,
struct x86_exception *exception)
{
+ struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
+
u32 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0;
access |= PFERR_FETCH_MASK;
- return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
+ return mmu->gva_to_gpa(vcpu, mmu, gva, access, exception);
}
gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva,
struct x86_exception *exception)
{
+ struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
+
u32 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0;
access |= PFERR_WRITE_MASK;
- return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
+ return mmu->gva_to_gpa(vcpu, mmu, gva, access, exception);
}
EXPORT_SYMBOL_GPL(kvm_mmu_gva_to_gpa_write);
@@ -6545,19 +6715,21 @@ EXPORT_SYMBOL_GPL(kvm_mmu_gva_to_gpa_write);
gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
struct x86_exception *exception)
{
- return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, 0, exception);
+ struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
+
+ return mmu->gva_to_gpa(vcpu, mmu, gva, 0, exception);
}
static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
struct kvm_vcpu *vcpu, u32 access,
struct x86_exception *exception)
{
+ struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
void *data = val;
int r = X86EMUL_CONTINUE;
while (bytes) {
- gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access,
- exception);
+ gpa_t gpa = mmu->gva_to_gpa(vcpu, mmu, addr, access, exception);
unsigned offset = addr & (PAGE_SIZE-1);
unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset);
int ret;
@@ -6585,13 +6757,14 @@ static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt,
struct x86_exception *exception)
{
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
+ struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
u32 access = (static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0;
unsigned offset;
int ret;
/* Inline kvm_read_guest_virt_helper for speed. */
- gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access|PFERR_FETCH_MASK,
- exception);
+ gpa_t gpa = mmu->gva_to_gpa(vcpu, mmu, addr, access|PFERR_FETCH_MASK,
+ exception);
if (unlikely(gpa == UNMAPPED_GVA))
return X86EMUL_PROPAGATE_FAULT;
@@ -6650,13 +6823,12 @@ static int kvm_write_guest_virt_helper(gva_t addr, void *val, unsigned int bytes
struct kvm_vcpu *vcpu, u32 access,
struct x86_exception *exception)
{
+ struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
void *data = val;
int r = X86EMUL_CONTINUE;
while (bytes) {
- gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr,
- access,
- exception);
+ gpa_t gpa = mmu->gva_to_gpa(vcpu, mmu, addr, access, exception);
unsigned offset = addr & (PAGE_SIZE-1);
unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
int ret;
@@ -6702,6 +6874,13 @@ int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu, gva_t addr, void *val,
}
EXPORT_SYMBOL_GPL(kvm_write_guest_virt_system);
+static int kvm_can_emulate_insn(struct kvm_vcpu *vcpu, int emul_type,
+ void *insn, int insn_len)
+{
+ return static_call(kvm_x86_can_emulate_instruction)(vcpu, emul_type,
+ insn, insn_len);
+}
+
int handle_ud(struct kvm_vcpu *vcpu)
{
static const char kvm_emulate_prefix[] = { __KVM_EMULATE_PREFIX };
@@ -6709,7 +6888,7 @@ int handle_ud(struct kvm_vcpu *vcpu)
char sig[5]; /* ud2; .ascii "kvm" */
struct x86_exception e;
- if (unlikely(!static_call(kvm_x86_can_emulate_instruction)(vcpu, NULL, 0)))
+ if (unlikely(!kvm_can_emulate_insn(vcpu, emul_type, NULL, 0)))
return 1;
if (force_emulation_prefix &&
@@ -6743,6 +6922,7 @@ static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
gpa_t *gpa, struct x86_exception *exception,
bool write)
{
+ struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
u32 access = ((static_call(kvm_x86_get_cpl)(vcpu) == 3) ? PFERR_USER_MASK : 0)
| (write ? PFERR_WRITE_MASK : 0);
@@ -6760,7 +6940,7 @@ static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
return 1;
}
- *gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
+ *gpa = mmu->gva_to_gpa(vcpu, mmu, gva, access, exception);
if (*gpa == UNMAPPED_GVA)
return -1;
@@ -7394,7 +7574,8 @@ static int emulator_get_msr(struct x86_emulate_ctxt *ctxt,
r = kvm_get_msr(vcpu, msr_index, pdata);
- if (r && kvm_get_msr_user_space(vcpu, msr_index, r)) {
+ if (r && kvm_msr_user_space(vcpu, msr_index, KVM_EXIT_X86_RDMSR, 0,
+ complete_emulated_rdmsr, r)) {
/* Bounce to user space */
return X86EMUL_IO_NEEDED;
}
@@ -7410,7 +7591,8 @@ static int emulator_set_msr(struct x86_emulate_ctxt *ctxt,
r = kvm_set_msr(vcpu, msr_index, data);
- if (r && kvm_set_msr_user_space(vcpu, msr_index, data, r)) {
+ if (r && kvm_msr_user_space(vcpu, msr_index, KVM_EXIT_X86_WRMSR, data,
+ complete_emulated_msr_access, r)) {
/* Bounce to user space */
return X86EMUL_IO_NEEDED;
}
@@ -7961,6 +8143,8 @@ int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu)
if (unlikely(!r))
return 0;
+ kvm_pmu_trigger_event(vcpu, PERF_COUNT_HW_INSTRUCTIONS);
+
/*
* rflags is the old, "raw" value of the flags. The new value has
* not been saved yet.
@@ -8080,7 +8264,7 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
bool writeback = true;
bool write_fault_to_spt;
- if (unlikely(!static_call(kvm_x86_can_emulate_instruction)(vcpu, insn, insn_len)))
+ if (unlikely(!kvm_can_emulate_insn(vcpu, emulation_type, insn, insn_len)))
return 1;
vcpu->arch.l1tf_flush_l1d = true;
@@ -8128,12 +8312,23 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
}
/*
- * Note, EMULTYPE_SKIP is intended for use *only* by vendor callbacks
- * for kvm_skip_emulated_instruction(). The caller is responsible for
- * updating interruptibility state and injecting single-step #DBs.
+ * EMULTYPE_SKIP without EMULTYPE_COMPLETE_USER_EXIT is intended for
+ * use *only* by vendor callbacks for kvm_skip_emulated_instruction().
+ * The caller is responsible for updating interruptibility state and
+ * injecting single-step #DBs.
*/
if (emulation_type & EMULTYPE_SKIP) {
- kvm_rip_write(vcpu, ctxt->_eip);
+ if (ctxt->mode != X86EMUL_MODE_PROT64)
+ ctxt->eip = (u32)ctxt->_eip;
+ else
+ ctxt->eip = ctxt->_eip;
+
+ if (emulation_type & EMULTYPE_COMPLETE_USER_EXIT) {
+ r = 1;
+ goto writeback;
+ }
+
+ kvm_rip_write(vcpu, ctxt->eip);
if (ctxt->eflags & X86_EFLAGS_RF)
kvm_set_rflags(vcpu, ctxt->eflags & ~X86_EFLAGS_RF);
return 1;
@@ -8197,17 +8392,24 @@ restart:
writeback = false;
r = 0;
vcpu->arch.complete_userspace_io = complete_emulated_mmio;
+ } else if (vcpu->arch.complete_userspace_io) {
+ writeback = false;
+ r = 0;
} else if (r == EMULATION_RESTART)
goto restart;
else
r = 1;
+writeback:
if (writeback) {
unsigned long rflags = static_call(kvm_x86_get_rflags)(vcpu);
toggle_interruptibility(vcpu, ctxt->interruptibility);
vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
if (!ctxt->have_exception ||
exception_type(ctxt->exception.vector) == EXCPT_TRAP) {
+ kvm_pmu_trigger_event(vcpu, PERF_COUNT_HW_INSTRUCTIONS);
+ if (ctxt->is_branch)
+ kvm_pmu_trigger_event(vcpu, PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
kvm_rip_write(vcpu, ctxt->eip);
if (r && (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)))
r = kvm_vcpu_do_singlestep(vcpu);
@@ -8394,7 +8596,8 @@ static void __kvmclock_cpufreq_notifier(struct cpufreq_freqs *freq, int cpu)
{
struct kvm *kvm;
struct kvm_vcpu *vcpu;
- int i, send_ipi = 0;
+ int send_ipi = 0;
+ unsigned long i;
/*
* We allow guests to temporarily run on slowing clocks,
@@ -8519,57 +8722,12 @@ static void kvm_timer_init(void)
kvmclock_cpu_online, kvmclock_cpu_down_prep);
}
-DEFINE_PER_CPU(struct kvm_vcpu *, current_vcpu);
-EXPORT_PER_CPU_SYMBOL_GPL(current_vcpu);
-
-int kvm_is_in_guest(void)
-{
- return __this_cpu_read(current_vcpu) != NULL;
-}
-
-static int kvm_is_user_mode(void)
-{
- int user_mode = 3;
-
- if (__this_cpu_read(current_vcpu))
- user_mode = static_call(kvm_x86_get_cpl)(__this_cpu_read(current_vcpu));
-
- return user_mode != 0;
-}
-
-static unsigned long kvm_get_guest_ip(void)
-{
- unsigned long ip = 0;
-
- if (__this_cpu_read(current_vcpu))
- ip = kvm_rip_read(__this_cpu_read(current_vcpu));
-
- return ip;
-}
-
-static void kvm_handle_intel_pt_intr(void)
-{
- struct kvm_vcpu *vcpu = __this_cpu_read(current_vcpu);
-
- kvm_make_request(KVM_REQ_PMI, vcpu);
- __set_bit(MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI_BIT,
- (unsigned long *)&vcpu->arch.pmu.global_status);
-}
-
-static struct perf_guest_info_callbacks kvm_guest_cbs = {
- .is_in_guest = kvm_is_in_guest,
- .is_user_mode = kvm_is_user_mode,
- .get_guest_ip = kvm_get_guest_ip,
- .handle_intel_pt_intr = kvm_handle_intel_pt_intr,
-};
-
#ifdef CONFIG_X86_64
static void pvclock_gtod_update_fn(struct work_struct *work)
{
struct kvm *kvm;
-
struct kvm_vcpu *vcpu;
- int i;
+ unsigned long i;
mutex_lock(&kvm_lock);
list_for_each_entry(kvm, &vm_list, vm_list)
@@ -8676,8 +8834,6 @@ int kvm_arch_init(void *opaque)
kvm_timer_init();
- perf_register_guest_info_callbacks(&kvm_guest_cbs);
-
if (boot_cpu_has(X86_FEATURE_XSAVE)) {
host_xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
supported_xcr0 = host_xcr0 & KVM_SUPPORTED_XCR0;
@@ -8709,7 +8865,6 @@ void kvm_arch_exit(void)
clear_hv_tscchange_cb();
#endif
kvm_lapic_exit();
- perf_unregister_guest_info_callbacks(&kvm_guest_cbs);
if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block,
@@ -8730,8 +8885,15 @@ void kvm_arch_exit(void)
#endif
}
-static int __kvm_vcpu_halt(struct kvm_vcpu *vcpu, int state, int reason)
+static int __kvm_emulate_halt(struct kvm_vcpu *vcpu, int state, int reason)
{
+ /*
+ * The vCPU has halted, e.g. executed HLT. Update the run state if the
+ * local APIC is in-kernel, the run loop will detect the non-runnable
+ * state and halt the vCPU. Exit to userspace if the local APIC is
+ * managed by userspace, in which case userspace is responsible for
+ * handling wake events.
+ */
++vcpu->stat.halt_exits;
if (lapic_in_kernel(vcpu)) {
vcpu->arch.mp_state = state;
@@ -8742,11 +8904,11 @@ static int __kvm_vcpu_halt(struct kvm_vcpu *vcpu, int state, int reason)
}
}
-int kvm_vcpu_halt(struct kvm_vcpu *vcpu)
+int kvm_emulate_halt_noskip(struct kvm_vcpu *vcpu)
{
- return __kvm_vcpu_halt(vcpu, KVM_MP_STATE_HALTED, KVM_EXIT_HLT);
+ return __kvm_emulate_halt(vcpu, KVM_MP_STATE_HALTED, KVM_EXIT_HLT);
}
-EXPORT_SYMBOL_GPL(kvm_vcpu_halt);
+EXPORT_SYMBOL_GPL(kvm_emulate_halt_noskip);
int kvm_emulate_halt(struct kvm_vcpu *vcpu)
{
@@ -8755,7 +8917,7 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu)
* TODO: we might be squashing a GUESTDBG_SINGLESTEP-triggered
* KVM_EXIT_DEBUG here.
*/
- return kvm_vcpu_halt(vcpu) && ret;
+ return kvm_emulate_halt_noskip(vcpu) && ret;
}
EXPORT_SYMBOL_GPL(kvm_emulate_halt);
@@ -8763,7 +8925,8 @@ int kvm_emulate_ap_reset_hold(struct kvm_vcpu *vcpu)
{
int ret = kvm_skip_emulated_instruction(vcpu);
- return __kvm_vcpu_halt(vcpu, KVM_MP_STATE_AP_RESET_HOLD, KVM_EXIT_AP_RESET_HOLD) && ret;
+ return __kvm_emulate_halt(vcpu, KVM_MP_STATE_AP_RESET_HOLD,
+ KVM_EXIT_AP_RESET_HOLD) && ret;
}
EXPORT_SYMBOL_GPL(kvm_emulate_ap_reset_hold);
@@ -9614,7 +9777,7 @@ void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
kvm_make_all_cpus_request(kvm, KVM_REQ_APIC_PAGE_RELOAD);
}
-void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu)
+static void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu)
{
if (!lapic_in_kernel(vcpu))
return;
@@ -9820,7 +9983,9 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
* result in virtual interrupt delivery.
*/
local_irq_disable();
- vcpu->mode = IN_GUEST_MODE;
+
+ /* Store vcpu->apicv_active before vcpu->mode. */
+ smp_store_release(&vcpu->mode, IN_GUEST_MODE);
srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
@@ -9839,10 +10004,11 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
smp_mb__after_srcu_read_unlock();
/*
- * This handles the case where a posted interrupt was
- * notified with kvm_vcpu_kick. Assigned devices can
- * use the POSTED_INTR_VECTOR even if APICv is disabled,
- * so do it even if APICv is disabled on this vCPU.
+ * Process pending posted interrupts to handle the case where the
+ * notification IRQ arrived in the host, or was never sent (because the
+ * target vCPU wasn't running). Do this regardless of the vCPU's APICv
+ * status, KVM doesn't update assigned devices when APICv is inhibited,
+ * i.e. they can post interrupts even if APICv is temporarily disabled.
*/
if (kvm_lapic_enabled(vcpu))
static_call_cond(kvm_x86_sync_pir_to_irr)(vcpu);
@@ -9866,6 +10032,9 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
if (test_thread_flag(TIF_NEED_FPU_LOAD))
switch_fpu_return();
+ if (vcpu->arch.guest_fpu.xfd_err)
+ wrmsrl(MSR_IA32_XFD_ERR, vcpu->arch.guest_fpu.xfd_err);
+
if (unlikely(vcpu->arch.switch_db_regs)) {
set_debugreg(0, 7);
set_debugreg(vcpu->arch.eff_db[0], 0);
@@ -9876,6 +10045,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
set_debugreg(0, 7);
}
+ guest_timing_enter_irqoff();
+
for (;;) {
/*
* Assert that vCPU vs. VM APICv state is consistent. An APICv
@@ -9927,8 +10098,19 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
vcpu->mode = OUTSIDE_GUEST_MODE;
smp_wmb();
+ /*
+ * Sync xfd before calling handle_exit_irqoff() which may
+ * rely on the fact that guest_fpu::xfd is up-to-date (e.g.
+ * in #NM irqoff handler).
+ */
+ if (vcpu->arch.xfd_no_write_intercept)
+ fpu_sync_guest_vmexit_xfd_state();
+
static_call(kvm_x86_handle_exit_irqoff)(vcpu);
+ if (vcpu->arch.guest_fpu.xfd_err)
+ wrmsrl(MSR_IA32_XFD_ERR, 0);
+
/*
* Consume any pending interrupts, including the possible source of
* VM-Exit on SVM and any ticks that occur between VM-Exit and now.
@@ -9936,7 +10118,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
* interrupts on processors that implement an interrupt shadow, the
* stat.exits increment will do nicely.
*/
- kvm_before_interrupt(vcpu);
+ kvm_before_interrupt(vcpu, KVM_HANDLING_IRQ);
local_irq_enable();
++vcpu->stat.exits;
local_irq_disable();
@@ -9949,7 +10131,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
* of accounting via context tracking, but the loss of accuracy is
* acceptable for all known use cases.
*/
- vtime_account_guest_exit();
+ guest_timing_exit_irqoff();
if (lapic_in_kernel(vcpu)) {
s64 delta = vcpu->arch.apic->lapic_timer.advance_expire_delta;
@@ -9993,14 +10175,29 @@ out:
static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu)
{
- if (!kvm_arch_vcpu_runnable(vcpu) &&
- (!kvm_x86_ops.pre_block || static_call(kvm_x86_pre_block)(vcpu) == 0)) {
+ bool hv_timer;
+
+ if (!kvm_arch_vcpu_runnable(vcpu)) {
+ /*
+ * Switch to the software timer before halt-polling/blocking as
+ * the guest's timer may be a break event for the vCPU, and the
+ * hypervisor timer runs only when the CPU is in guest mode.
+ * Switch before halt-polling so that KVM recognizes an expired
+ * timer before blocking.
+ */
+ hv_timer = kvm_lapic_hv_timer_in_use(vcpu);
+ if (hv_timer)
+ kvm_lapic_switch_to_sw_timer(vcpu);
+
srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
- kvm_vcpu_block(vcpu);
+ if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED)
+ kvm_vcpu_halt(vcpu);
+ else
+ kvm_vcpu_block(vcpu);
vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
- if (kvm_x86_ops.post_block)
- static_call(kvm_x86_post_block)(vcpu);
+ if (hv_timer)
+ kvm_lapic_switch_to_hv_timer(vcpu);
if (!kvm_check_request(KVM_REQ_UNHALT, vcpu))
return 1;
@@ -10193,6 +10390,11 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
r = -EINTR;
goto out;
}
+ /*
+ * It should be impossible for the hypervisor timer to be in
+ * use before KVM has ever run the vCPU.
+ */
+ WARN_ON_ONCE(kvm_lapic_hv_timer_in_use(vcpu));
kvm_vcpu_block(vcpu);
if (kvm_apic_accept_events(vcpu) < 0) {
r = 0;
@@ -10237,10 +10439,16 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
} else
WARN_ON(vcpu->arch.pio.count || vcpu->mmio_needed);
- if (kvm_run->immediate_exit)
+ if (kvm_run->immediate_exit) {
r = -EINTR;
- else
- r = vcpu_run(vcpu);
+ goto out;
+ }
+
+ r = static_call(kvm_x86_vcpu_pre_run)(vcpu);
+ if (r <= 0)
+ goto out;
+
+ r = vcpu_run(vcpu);
out:
kvm_put_guest_fpu(vcpu);
@@ -10556,7 +10764,8 @@ static int __set_sregs_common(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs,
vcpu->arch.cr2 = sregs->cr2;
*mmu_reset_needed |= kvm_read_cr3(vcpu) != sregs->cr3;
vcpu->arch.cr3 = sregs->cr3;
- kvm_register_mark_available(vcpu, VCPU_EXREG_CR3);
+ kvm_register_mark_dirty(vcpu, VCPU_EXREG_CR3);
+ static_call_cond(kvm_x86_post_set_cr3)(vcpu, sregs->cr3);
kvm_set_cr8(vcpu, sregs->cr8);
@@ -10573,7 +10782,7 @@ static int __set_sregs_common(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs,
if (update_pdptrs) {
idx = srcu_read_lock(&vcpu->kvm->srcu);
if (is_pae_paging(vcpu)) {
- load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu));
+ load_pdptrs(vcpu, kvm_read_cr3(vcpu));
*mmu_reset_needed = 1;
}
srcu_read_unlock(&vcpu->kvm->srcu, idx);
@@ -10671,7 +10880,7 @@ static void kvm_arch_vcpu_guestdbg_update_apicv_inhibit(struct kvm *kvm)
{
bool inhibit = false;
struct kvm_vcpu *vcpu;
- int i;
+ unsigned long i;
down_write(&kvm->arch.apicv_update_lock);
@@ -11075,7 +11284,8 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
vcpu->arch.msr_misc_features_enables = 0;
- vcpu->arch.xcr0 = XFEATURE_MASK_FP;
+ __kvm_set_xcr(vcpu, 0, XFEATURE_MASK_FP);
+ __kvm_set_msr(vcpu, MSR_IA32_XSS, 0, true);
}
/* All GPRs except RDX (handled below) are zeroed on RESET/INIT. */
@@ -11092,8 +11302,6 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
cpuid_0x1 = kvm_find_cpuid_entry(vcpu, 1, 0);
kvm_rdx_write(vcpu, cpuid_0x1 ? cpuid_0x1->eax : 0x600);
- vcpu->arch.ia32_xss = 0;
-
static_call(kvm_x86_vcpu_reset)(vcpu, init_event);
kvm_set_rflags(vcpu, X86_EFLAGS_FIXED);
@@ -11159,7 +11367,7 @@ int kvm_arch_hardware_enable(void)
{
struct kvm *kvm;
struct kvm_vcpu *vcpu;
- int i;
+ unsigned long i;
int ret;
u64 local_tsc;
u64 max_tsc = 0;
@@ -11269,6 +11477,8 @@ int kvm_arch_hardware_setup(void *opaque)
memcpy(&kvm_x86_ops, ops->runtime_ops, sizeof(kvm_x86_ops));
kvm_ops_static_call_update();
+ kvm_register_perf_callbacks(ops->handle_intel_pt_intr);
+
if (!kvm_cpu_cap_has(X86_FEATURE_XSAVES))
supported_xss = 0;
@@ -11296,6 +11506,8 @@ int kvm_arch_hardware_setup(void *opaque)
void kvm_arch_hardware_unsetup(void)
{
+ kvm_unregister_perf_callbacks();
+
static_call(kvm_x86_hardware_unsetup)();
}
@@ -11412,7 +11624,7 @@ static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
static void kvm_free_vcpus(struct kvm *kvm)
{
- unsigned int i;
+ unsigned long i;
struct kvm_vcpu *vcpu;
/*
@@ -11422,15 +11634,8 @@ static void kvm_free_vcpus(struct kvm *kvm)
kvm_clear_async_pf_completion_queue(vcpu);
kvm_unload_vcpu_mmu(vcpu);
}
- kvm_for_each_vcpu(i, vcpu, kvm)
- kvm_vcpu_destroy(vcpu);
- mutex_lock(&kvm->lock);
- for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
- kvm->vcpus[i] = NULL;
-
- atomic_set(&kvm->online_vcpus, 0);
- mutex_unlock(&kvm->lock);
+ kvm_destroy_vcpus(kvm);
}
void kvm_arch_sync_events(struct kvm *kvm)
@@ -11440,8 +11645,6 @@ void kvm_arch_sync_events(struct kvm *kvm)
kvm_free_pit(kvm);
}
-#define ERR_PTR_USR(e) ((void __user *)ERR_PTR(e))
-
/**
* __x86_set_memory_region: Setup KVM internal memory slot
*
@@ -11598,9 +11801,9 @@ int memslot_rmap_alloc(struct kvm_memory_slot *slot, unsigned long npages)
}
static int kvm_alloc_memslot_metadata(struct kvm *kvm,
- struct kvm_memory_slot *slot,
- unsigned long npages)
+ struct kvm_memory_slot *slot)
{
+ unsigned long npages = slot->npages;
int i, r;
/*
@@ -11665,7 +11868,7 @@ out_free:
void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
{
struct kvm_vcpu *vcpu;
- int i;
+ unsigned long i;
/*
* memslots->generation has been incremented.
@@ -11679,13 +11882,18 @@ void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
}
int kvm_arch_prepare_memory_region(struct kvm *kvm,
- struct kvm_memory_slot *memslot,
- const struct kvm_userspace_memory_region *mem,
- enum kvm_mr_change change)
+ const struct kvm_memory_slot *old,
+ struct kvm_memory_slot *new,
+ enum kvm_mr_change change)
{
if (change == KVM_MR_CREATE || change == KVM_MR_MOVE)
- return kvm_alloc_memslot_metadata(kvm, memslot,
- mem->memory_size >> PAGE_SHIFT);
+ return kvm_alloc_memslot_metadata(kvm, new);
+
+ if (change == KVM_MR_FLAGS_ONLY)
+ memcpy(&new->arch, &old->arch, sizeof(old->arch));
+ else if (WARN_ON_ONCE(change != KVM_MR_DELETE))
+ return -EIO;
+
return 0;
}
@@ -11709,13 +11917,15 @@ static void kvm_mmu_slot_apply_flags(struct kvm *kvm,
const struct kvm_memory_slot *new,
enum kvm_mr_change change)
{
- bool log_dirty_pages = new->flags & KVM_MEM_LOG_DIRTY_PAGES;
+ u32 old_flags = old ? old->flags : 0;
+ u32 new_flags = new ? new->flags : 0;
+ bool log_dirty_pages = new_flags & KVM_MEM_LOG_DIRTY_PAGES;
/*
* Update CPU dirty logging if dirty logging is being toggled. This
* applies to all operations.
*/
- if ((old->flags ^ new->flags) & KVM_MEM_LOG_DIRTY_PAGES)
+ if ((old_flags ^ new_flags) & KVM_MEM_LOG_DIRTY_PAGES)
kvm_mmu_update_cpu_dirty_logging(kvm, log_dirty_pages);
/*
@@ -11733,7 +11943,7 @@ static void kvm_mmu_slot_apply_flags(struct kvm *kvm,
* MOVE/DELETE: The old mappings will already have been cleaned up by
* kvm_arch_flush_shadow_memslot().
*/
- if ((change != KVM_MR_FLAGS_ONLY) || (new->flags & KVM_MEM_READONLY))
+ if ((change != KVM_MR_FLAGS_ONLY) || (new_flags & KVM_MEM_READONLY))
return;
/*
@@ -11741,7 +11951,7 @@ static void kvm_mmu_slot_apply_flags(struct kvm *kvm,
* other flag is LOG_DIRTY_PAGES, i.e. something is wrong if dirty
* logging isn't being toggled on or off.
*/
- if (WARN_ON_ONCE(!((old->flags ^ new->flags) & KVM_MEM_LOG_DIRTY_PAGES)))
+ if (WARN_ON_ONCE(!((old_flags ^ new_flags) & KVM_MEM_LOG_DIRTY_PAGES)))
return;
if (!log_dirty_pages) {
@@ -11777,14 +11987,18 @@ static void kvm_mmu_slot_apply_flags(struct kvm *kvm,
}
void kvm_arch_commit_memory_region(struct kvm *kvm,
- const struct kvm_userspace_memory_region *mem,
struct kvm_memory_slot *old,
const struct kvm_memory_slot *new,
enum kvm_mr_change change)
{
- if (!kvm->arch.n_requested_mmu_pages)
- kvm_mmu_change_mmu_pages(kvm,
- kvm_mmu_calculate_default_mmu_pages(kvm));
+ if (!kvm->arch.n_requested_mmu_pages &&
+ (change == KVM_MR_CREATE || change == KVM_MR_DELETE)) {
+ unsigned long nr_mmu_pages;
+
+ nr_mmu_pages = kvm->nr_memslot_pages / KVM_MEMSLOT_PAGES_TO_MMU_PAGES_RATIO;
+ nr_mmu_pages = max(nr_mmu_pages, KVM_MIN_ALLOC_MMU_PAGES);
+ kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
+ }
kvm_mmu_slot_apply_flags(kvm, old, new, change);
@@ -11885,6 +12099,11 @@ bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
return vcpu->arch.preempted_in_kernel;
}
+unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu)
+{
+ return kvm_rip_read(vcpu);
+}
+
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
{
return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
@@ -12294,12 +12513,13 @@ EXPORT_SYMBOL_GPL(kvm_spec_ctrl_test_value);
void kvm_fixup_and_inject_pf_error(struct kvm_vcpu *vcpu, gva_t gva, u16 error_code)
{
+ struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
struct x86_exception fault;
u32 access = error_code &
(PFERR_WRITE_MASK | PFERR_FETCH_MASK | PFERR_USER_MASK);
if (!(error_code & PFERR_PRESENT_MASK) ||
- vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, &fault) != UNMAPPED_GVA) {
+ mmu->gva_to_gpa(vcpu, mmu, gva, access, &fault) != UNMAPPED_GVA) {
/*
* If vcpu->arch.walk_mmu->gva_to_gpa succeeded, the page
* tables probably do not match the TLB. Just proceed
@@ -12636,6 +12856,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_unaccelerated_access);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_incomplete_ipi);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_avic_ga_log);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_apicv_update_request);
+EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_apicv_accept_irq);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_enter);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_exit);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_msr_protocol_enter);
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index 4abcd8d9836d..767ec7f99516 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -10,51 +10,6 @@
void kvm_spurious_fault(void);
-static __always_inline void kvm_guest_enter_irqoff(void)
-{
- /*
- * VMENTER enables interrupts (host state), but the kernel state is
- * interrupts disabled when this is invoked. Also tell RCU about
- * it. This is the same logic as for exit_to_user_mode().
- *
- * This ensures that e.g. latency analysis on the host observes
- * guest mode as interrupt enabled.
- *
- * guest_enter_irqoff() informs context tracking about the
- * transition to guest mode and if enabled adjusts RCU state
- * accordingly.
- */
- instrumentation_begin();
- trace_hardirqs_on_prepare();
- lockdep_hardirqs_on_prepare(CALLER_ADDR0);
- instrumentation_end();
-
- guest_enter_irqoff();
- lockdep_hardirqs_on(CALLER_ADDR0);
-}
-
-static __always_inline void kvm_guest_exit_irqoff(void)
-{
- /*
- * VMEXIT disables interrupts (host state), but tracing and lockdep
- * have them in state 'on' as recorded before entering guest mode.
- * Same as enter_from_user_mode().
- *
- * context_tracking_guest_exit() restores host context and reinstates
- * RCU if enabled and required.
- *
- * This needs to be done immediately after VM-Exit, before any code
- * that might contain tracepoints or call out to the greater world,
- * e.g. before x86_spec_ctrl_restore_host().
- */
- lockdep_hardirqs_off(CALLER_ADDR0);
- context_tracking_guest_exit();
-
- instrumentation_begin();
- trace_hardirqs_off_finish();
- instrumentation_end();
-}
-
#define KVM_NESTED_VMENTER_CONSISTENCY_CHECK(consistency_check) \
({ \
bool failed = (consistency_check); \
@@ -301,7 +256,6 @@ static inline bool kvm_vcpu_latch_init(struct kvm_vcpu *vcpu)
return is_smm(vcpu) || static_call(kvm_x86_apic_init_signal_blocked)(vcpu);
}
-void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock, int sec_hi_ofs);
void kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip);
u64 get_kvmclock_ns(struct kvm *kvm);
@@ -337,6 +291,7 @@ extern u64 host_xcr0;
extern u64 supported_xcr0;
extern u64 host_xss;
extern u64 supported_xss;
+extern bool enable_pmu;
static inline bool kvm_mpx_supported(void)
{
@@ -392,18 +347,27 @@ static inline bool kvm_cstate_in_guest(struct kvm *kvm)
return kvm->arch.cstate_in_guest;
}
-DECLARE_PER_CPU(struct kvm_vcpu *, current_vcpu);
+enum kvm_intr_type {
+ /* Values are arbitrary, but must be non-zero. */
+ KVM_HANDLING_IRQ = 1,
+ KVM_HANDLING_NMI,
+};
-static inline void kvm_before_interrupt(struct kvm_vcpu *vcpu)
+static inline void kvm_before_interrupt(struct kvm_vcpu *vcpu,
+ enum kvm_intr_type intr)
{
- __this_cpu_write(current_vcpu, vcpu);
+ WRITE_ONCE(vcpu->arch.handling_intr_from_guest, (u8)intr);
}
static inline void kvm_after_interrupt(struct kvm_vcpu *vcpu)
{
- __this_cpu_write(current_vcpu, NULL);
+ WRITE_ONCE(vcpu->arch.handling_intr_from_guest, 0);
}
+static inline bool kvm_handling_nmi_from_guest(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.handling_intr_from_guest == KVM_HANDLING_NMI;
+}
static inline bool kvm_pat_valid(u64 data)
{
diff --git a/arch/x86/kvm/xen.c b/arch/x86/kvm/xen.c
index dff2bdf9507a..74be1fda58e3 100644
--- a/arch/x86/kvm/xen.c
+++ b/arch/x86/kvm/xen.c
@@ -16,6 +16,7 @@
#include <trace/events/kvm.h>
#include <xen/interface/xen.h>
#include <xen/interface/vcpu.h>
+#include <xen/interface/event_channel.h>
#include "trace.h"
@@ -23,38 +24,77 @@ DEFINE_STATIC_KEY_DEFERRED_FALSE(kvm_xen_enabled, HZ);
static int kvm_xen_shared_info_init(struct kvm *kvm, gfn_t gfn)
{
+ struct gfn_to_pfn_cache *gpc = &kvm->arch.xen.shinfo_cache;
+ struct pvclock_wall_clock *wc;
gpa_t gpa = gfn_to_gpa(gfn);
- int wc_ofs, sec_hi_ofs;
+ u32 *wc_sec_hi;
+ u32 wc_version;
+ u64 wall_nsec;
int ret = 0;
int idx = srcu_read_lock(&kvm->srcu);
- if (kvm_is_error_hva(gfn_to_hva(kvm, gfn))) {
- ret = -EFAULT;
+ if (gfn == GPA_INVALID) {
+ kvm_gfn_to_pfn_cache_destroy(kvm, gpc);
goto out;
}
- kvm->arch.xen.shinfo_gfn = gfn;
+
+ do {
+ ret = kvm_gfn_to_pfn_cache_init(kvm, gpc, NULL, false, true,
+ gpa, PAGE_SIZE, false);
+ if (ret)
+ goto out;
+
+ /*
+ * This code mirrors kvm_write_wall_clock() except that it writes
+ * directly through the pfn cache and doesn't mark the page dirty.
+ */
+ wall_nsec = ktime_get_real_ns() - get_kvmclock_ns(kvm);
+
+ /* It could be invalid again already, so we need to check */
+ read_lock_irq(&gpc->lock);
+
+ if (gpc->valid)
+ break;
+
+ read_unlock_irq(&gpc->lock);
+ } while (1);
/* Paranoia checks on the 32-bit struct layout */
BUILD_BUG_ON(offsetof(struct compat_shared_info, wc) != 0x900);
BUILD_BUG_ON(offsetof(struct compat_shared_info, arch.wc_sec_hi) != 0x924);
BUILD_BUG_ON(offsetof(struct pvclock_vcpu_time_info, version) != 0);
- /* 32-bit location by default */
- wc_ofs = offsetof(struct compat_shared_info, wc);
- sec_hi_ofs = offsetof(struct compat_shared_info, arch.wc_sec_hi);
-
#ifdef CONFIG_X86_64
/* Paranoia checks on the 64-bit struct layout */
BUILD_BUG_ON(offsetof(struct shared_info, wc) != 0xc00);
BUILD_BUG_ON(offsetof(struct shared_info, wc_sec_hi) != 0xc0c);
- if (kvm->arch.xen.long_mode) {
- wc_ofs = offsetof(struct shared_info, wc);
- sec_hi_ofs = offsetof(struct shared_info, wc_sec_hi);
- }
+ if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) {
+ struct shared_info *shinfo = gpc->khva;
+
+ wc_sec_hi = &shinfo->wc_sec_hi;
+ wc = &shinfo->wc;
+ } else
#endif
+ {
+ struct compat_shared_info *shinfo = gpc->khva;
+
+ wc_sec_hi = &shinfo->arch.wc_sec_hi;
+ wc = &shinfo->wc;
+ }
+
+ /* Increment and ensure an odd value */
+ wc_version = wc->version = (wc->version + 1) | 1;
+ smp_wmb();
+
+ wc->nsec = do_div(wall_nsec, 1000000000);
+ wc->sec = (u32)wall_nsec;
+ *wc_sec_hi = wall_nsec >> 32;
+ smp_wmb();
+
+ wc->version = wc_version + 1;
+ read_unlock_irq(&gpc->lock);
- kvm_write_wall_clock(kvm, gpa + wc_ofs, sec_hi_ofs - wc_ofs);
kvm_make_all_cpus_request(kvm, KVM_REQ_MASTERCLOCK_UPDATE);
out:
@@ -93,32 +133,57 @@ static void kvm_xen_update_runstate(struct kvm_vcpu *v, int state)
void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, int state)
{
struct kvm_vcpu_xen *vx = &v->arch.xen;
+ struct gfn_to_hva_cache *ghc = &vx->runstate_cache;
+ struct kvm_memslots *slots = kvm_memslots(v->kvm);
+ bool atomic = (state == RUNSTATE_runnable);
uint64_t state_entry_time;
- unsigned int offset;
+ int __user *user_state;
+ uint64_t __user *user_times;
kvm_xen_update_runstate(v, state);
if (!vx->runstate_set)
return;
- BUILD_BUG_ON(sizeof(struct compat_vcpu_runstate_info) != 0x2c);
+ if (unlikely(slots->generation != ghc->generation || kvm_is_error_hva(ghc->hva)) &&
+ kvm_gfn_to_hva_cache_init(v->kvm, ghc, ghc->gpa, ghc->len))
+ return;
+
+ /* We made sure it fits in a single page */
+ BUG_ON(!ghc->memslot);
+
+ if (atomic)
+ pagefault_disable();
- offset = offsetof(struct compat_vcpu_runstate_info, state_entry_time);
-#ifdef CONFIG_X86_64
/*
- * The only difference is alignment of uint64_t in 32-bit.
- * So the first field 'state' is accessed directly using
- * offsetof() (where its offset happens to be zero), while the
- * remaining fields which are all uint64_t, start at 'offset'
- * which we tweak here by adding 4.
+ * The only difference between 32-bit and 64-bit versions of the
+ * runstate struct us the alignment of uint64_t in 32-bit, which
+ * means that the 64-bit version has an additional 4 bytes of
+ * padding after the first field 'state'.
+ *
+ * So we use 'int __user *user_state' to point to the state field,
+ * and 'uint64_t __user *user_times' for runstate_entry_time. So
+ * the actual array of time[] in each state starts at user_times[1].
*/
+ BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, state) != 0);
+ BUILD_BUG_ON(offsetof(struct compat_vcpu_runstate_info, state) != 0);
+ user_state = (int __user *)ghc->hva;
+
+ BUILD_BUG_ON(sizeof(struct compat_vcpu_runstate_info) != 0x2c);
+
+ user_times = (uint64_t __user *)(ghc->hva +
+ offsetof(struct compat_vcpu_runstate_info,
+ state_entry_time));
+#ifdef CONFIG_X86_64
BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, state_entry_time) !=
offsetof(struct compat_vcpu_runstate_info, state_entry_time) + 4);
BUILD_BUG_ON(offsetof(struct vcpu_runstate_info, time) !=
offsetof(struct compat_vcpu_runstate_info, time) + 4);
if (v->kvm->arch.xen.long_mode)
- offset = offsetof(struct vcpu_runstate_info, state_entry_time);
+ user_times = (uint64_t __user *)(ghc->hva +
+ offsetof(struct vcpu_runstate_info,
+ state_entry_time));
#endif
/*
* First write the updated state_entry_time at the appropriate
@@ -132,10 +197,8 @@ void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, int state)
BUILD_BUG_ON(sizeof_field(struct compat_vcpu_runstate_info, state_entry_time) !=
sizeof(state_entry_time));
- if (kvm_write_guest_offset_cached(v->kvm, &v->arch.xen.runstate_cache,
- &state_entry_time, offset,
- sizeof(state_entry_time)))
- return;
+ if (__put_user(state_entry_time, user_times))
+ goto out;
smp_wmb();
/*
@@ -149,11 +212,8 @@ void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, int state)
BUILD_BUG_ON(sizeof_field(struct compat_vcpu_runstate_info, state) !=
sizeof(vx->current_runstate));
- if (kvm_write_guest_offset_cached(v->kvm, &v->arch.xen.runstate_cache,
- &vx->current_runstate,
- offsetof(struct vcpu_runstate_info, state),
- sizeof(vx->current_runstate)))
- return;
+ if (__put_user(vx->current_runstate, user_state))
+ goto out;
/*
* Write the actual runstate times immediately after the
@@ -168,28 +228,29 @@ void kvm_xen_update_runstate_guest(struct kvm_vcpu *v, int state)
BUILD_BUG_ON(sizeof_field(struct vcpu_runstate_info, time) !=
sizeof(vx->runstate_times));
- if (kvm_write_guest_offset_cached(v->kvm, &v->arch.xen.runstate_cache,
- &vx->runstate_times[0],
- offset + sizeof(u64),
- sizeof(vx->runstate_times)))
- return;
-
+ if (__copy_to_user(user_times + 1, vx->runstate_times, sizeof(vx->runstate_times)))
+ goto out;
smp_wmb();
/*
* Finally, clear the XEN_RUNSTATE_UPDATE bit in the guest's
* runstate_entry_time field.
*/
-
state_entry_time &= ~XEN_RUNSTATE_UPDATE;
- if (kvm_write_guest_offset_cached(v->kvm, &v->arch.xen.runstate_cache,
- &state_entry_time, offset,
- sizeof(state_entry_time)))
- return;
+ __put_user(state_entry_time, user_times);
+ smp_wmb();
+
+ out:
+ mark_page_dirty_in_slot(v->kvm, ghc->memslot, ghc->gpa >> PAGE_SHIFT);
+
+ if (atomic)
+ pagefault_enable();
}
int __kvm_xen_has_interrupt(struct kvm_vcpu *v)
{
+ unsigned long evtchn_pending_sel = READ_ONCE(v->arch.xen.evtchn_pending_sel);
+ bool atomic = in_atomic() || !task_is_running(current);
int err;
u8 rc = 0;
@@ -199,6 +260,9 @@ int __kvm_xen_has_interrupt(struct kvm_vcpu *v)
*/
struct gfn_to_hva_cache *ghc = &v->arch.xen.vcpu_info_cache;
struct kvm_memslots *slots = kvm_memslots(v->kvm);
+ bool ghc_valid = slots->generation == ghc->generation &&
+ !kvm_is_error_hva(ghc->hva) && ghc->memslot;
+
unsigned int offset = offsetof(struct vcpu_info, evtchn_upcall_pending);
/* No need for compat handling here */
@@ -214,8 +278,7 @@ int __kvm_xen_has_interrupt(struct kvm_vcpu *v)
* cache in kvm_read_guest_offset_cached(), but just uses
* __get_user() instead. And falls back to the slow path.
*/
- if (likely(slots->generation == ghc->generation &&
- !kvm_is_error_hva(ghc->hva) && ghc->memslot)) {
+ if (!evtchn_pending_sel && ghc_valid) {
/* Fast path */
pagefault_disable();
err = __get_user(rc, (u8 __user *)ghc->hva + offset);
@@ -234,11 +297,76 @@ int __kvm_xen_has_interrupt(struct kvm_vcpu *v)
* and we'll end up getting called again from a context where we *can*
* fault in the page and wait for it.
*/
- if (in_atomic() || !task_is_running(current))
+ if (atomic)
return 1;
- kvm_read_guest_offset_cached(v->kvm, ghc, &rc, offset,
- sizeof(rc));
+ if (!ghc_valid) {
+ err = kvm_gfn_to_hva_cache_init(v->kvm, ghc, ghc->gpa, ghc->len);
+ if (err || !ghc->memslot) {
+ /*
+ * If this failed, userspace has screwed up the
+ * vcpu_info mapping. No interrupts for you.
+ */
+ return 0;
+ }
+ }
+
+ /*
+ * Now we have a valid (protected by srcu) userspace HVA in
+ * ghc->hva which points to the struct vcpu_info. If there
+ * are any bits in the in-kernel evtchn_pending_sel then
+ * we need to write those to the guest vcpu_info and set
+ * its evtchn_upcall_pending flag. If there aren't any bits
+ * to add, we only want to *check* evtchn_upcall_pending.
+ */
+ if (evtchn_pending_sel) {
+ bool long_mode = v->kvm->arch.xen.long_mode;
+
+ if (!user_access_begin((void __user *)ghc->hva, sizeof(struct vcpu_info)))
+ return 0;
+
+ if (IS_ENABLED(CONFIG_64BIT) && long_mode) {
+ struct vcpu_info __user *vi = (void __user *)ghc->hva;
+
+ /* Attempt to set the evtchn_pending_sel bits in the
+ * guest, and if that succeeds then clear the same
+ * bits in the in-kernel version. */
+ asm volatile("1:\t" LOCK_PREFIX "orq %0, %1\n"
+ "\tnotq %0\n"
+ "\t" LOCK_PREFIX "andq %0, %2\n"
+ "2:\n"
+ _ASM_EXTABLE_UA(1b, 2b)
+ : "=r" (evtchn_pending_sel),
+ "+m" (vi->evtchn_pending_sel),
+ "+m" (v->arch.xen.evtchn_pending_sel)
+ : "0" (evtchn_pending_sel));
+ } else {
+ struct compat_vcpu_info __user *vi = (void __user *)ghc->hva;
+ u32 evtchn_pending_sel32 = evtchn_pending_sel;
+
+ /* Attempt to set the evtchn_pending_sel bits in the
+ * guest, and if that succeeds then clear the same
+ * bits in the in-kernel version. */
+ asm volatile("1:\t" LOCK_PREFIX "orl %0, %1\n"
+ "\tnotl %0\n"
+ "\t" LOCK_PREFIX "andl %0, %2\n"
+ "2:\n"
+ _ASM_EXTABLE_UA(1b, 2b)
+ : "=r" (evtchn_pending_sel32),
+ "+m" (vi->evtchn_pending_sel),
+ "+m" (v->arch.xen.evtchn_pending_sel)
+ : "0" (evtchn_pending_sel32));
+ }
+ rc = 1;
+ unsafe_put_user(rc, (u8 __user *)ghc->hva + offset, err);
+
+ err:
+ user_access_end();
+
+ mark_page_dirty_in_slot(v->kvm, ghc->memslot, ghc->gpa >> PAGE_SHIFT);
+ } else {
+ __get_user(rc, (u8 __user *)ghc->hva + offset);
+ }
return rc;
}
@@ -260,15 +388,9 @@ int kvm_xen_hvm_set_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
break;
case KVM_XEN_ATTR_TYPE_SHARED_INFO:
- if (data->u.shared_info.gfn == GPA_INVALID) {
- kvm->arch.xen.shinfo_gfn = GPA_INVALID;
- r = 0;
- break;
- }
r = kvm_xen_shared_info_init(kvm, data->u.shared_info.gfn);
break;
-
case KVM_XEN_ATTR_TYPE_UPCALL_VECTOR:
if (data->u.vector && data->u.vector < 0x10)
r = -EINVAL;
@@ -299,7 +421,10 @@ int kvm_xen_hvm_get_attr(struct kvm *kvm, struct kvm_xen_hvm_attr *data)
break;
case KVM_XEN_ATTR_TYPE_SHARED_INFO:
- data->u.shared_info.gfn = kvm->arch.xen.shinfo_gfn;
+ if (kvm->arch.xen.shinfo_cache.active)
+ data->u.shared_info.gfn = gpa_to_gfn(kvm->arch.xen.shinfo_cache.gpa);
+ else
+ data->u.shared_info.gfn = GPA_INVALID;
r = 0;
break;
@@ -337,6 +462,12 @@ int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
break;
}
+ /* It must fit within a single page */
+ if ((data->u.gpa & ~PAGE_MASK) + sizeof(struct vcpu_info) > PAGE_SIZE) {
+ r = -EINVAL;
+ break;
+ }
+
r = kvm_gfn_to_hva_cache_init(vcpu->kvm,
&vcpu->arch.xen.vcpu_info_cache,
data->u.gpa,
@@ -354,6 +485,12 @@ int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
break;
}
+ /* It must fit within a single page */
+ if ((data->u.gpa & ~PAGE_MASK) + sizeof(struct pvclock_vcpu_time_info) > PAGE_SIZE) {
+ r = -EINVAL;
+ break;
+ }
+
r = kvm_gfn_to_hva_cache_init(vcpu->kvm,
&vcpu->arch.xen.vcpu_time_info_cache,
data->u.gpa,
@@ -375,6 +512,12 @@ int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
break;
}
+ /* It must fit within a single page */
+ if ((data->u.gpa & ~PAGE_MASK) + sizeof(struct vcpu_runstate_info) > PAGE_SIZE) {
+ r = -EINVAL;
+ break;
+ }
+
r = kvm_gfn_to_hva_cache_init(vcpu->kvm,
&vcpu->arch.xen.runstate_cache,
data->u.gpa,
@@ -661,11 +804,12 @@ int kvm_xen_hvm_config(struct kvm *kvm, struct kvm_xen_hvm_config *xhc)
void kvm_xen_init_vm(struct kvm *kvm)
{
- kvm->arch.xen.shinfo_gfn = GPA_INVALID;
}
void kvm_xen_destroy_vm(struct kvm *kvm)
{
+ kvm_gfn_to_pfn_cache_destroy(kvm, &kvm->arch.xen.shinfo_cache);
+
if (kvm->arch.xen_hvm_config.msr)
static_branch_slow_dec_deferred(&kvm_xen_enabled);
}
@@ -737,3 +881,179 @@ int kvm_xen_hypercall(struct kvm_vcpu *vcpu)
return 0;
}
+
+static inline int max_evtchn_port(struct kvm *kvm)
+{
+ if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode)
+ return EVTCHN_2L_NR_CHANNELS;
+ else
+ return COMPAT_EVTCHN_2L_NR_CHANNELS;
+}
+
+/*
+ * This follows the kvm_set_irq() API, so it returns:
+ * < 0 Interrupt was ignored (masked or not delivered for other reasons)
+ * = 0 Interrupt was coalesced (previous irq is still pending)
+ * > 0 Number of CPUs interrupt was delivered to
+ */
+int kvm_xen_set_evtchn_fast(struct kvm_kernel_irq_routing_entry *e,
+ struct kvm *kvm)
+{
+ struct gfn_to_pfn_cache *gpc = &kvm->arch.xen.shinfo_cache;
+ struct kvm_vcpu *vcpu;
+ unsigned long *pending_bits, *mask_bits;
+ unsigned long flags;
+ int port_word_bit;
+ bool kick_vcpu = false;
+ int idx;
+ int rc;
+
+ vcpu = kvm_get_vcpu_by_id(kvm, e->xen_evtchn.vcpu);
+ if (!vcpu)
+ return -1;
+
+ if (!vcpu->arch.xen.vcpu_info_set)
+ return -1;
+
+ if (e->xen_evtchn.port >= max_evtchn_port(kvm))
+ return -1;
+
+ rc = -EWOULDBLOCK;
+ read_lock_irqsave(&gpc->lock, flags);
+
+ idx = srcu_read_lock(&kvm->srcu);
+ if (!kvm_gfn_to_pfn_cache_check(kvm, gpc, gpc->gpa, PAGE_SIZE))
+ goto out_rcu;
+
+ if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) {
+ struct shared_info *shinfo = gpc->khva;
+ pending_bits = (unsigned long *)&shinfo->evtchn_pending;
+ mask_bits = (unsigned long *)&shinfo->evtchn_mask;
+ port_word_bit = e->xen_evtchn.port / 64;
+ } else {
+ struct compat_shared_info *shinfo = gpc->khva;
+ pending_bits = (unsigned long *)&shinfo->evtchn_pending;
+ mask_bits = (unsigned long *)&shinfo->evtchn_mask;
+ port_word_bit = e->xen_evtchn.port / 32;
+ }
+
+ /*
+ * If this port wasn't already set, and if it isn't masked, then
+ * we try to set the corresponding bit in the in-kernel shadow of
+ * evtchn_pending_sel for the target vCPU. And if *that* wasn't
+ * already set, then we kick the vCPU in question to write to the
+ * *real* evtchn_pending_sel in its own guest vcpu_info struct.
+ */
+ if (test_and_set_bit(e->xen_evtchn.port, pending_bits)) {
+ rc = 0; /* It was already raised */
+ } else if (test_bit(e->xen_evtchn.port, mask_bits)) {
+ rc = -1; /* Masked */
+ } else {
+ rc = 1; /* Delivered. But was the vCPU waking already? */
+ if (!test_and_set_bit(port_word_bit, &vcpu->arch.xen.evtchn_pending_sel))
+ kick_vcpu = true;
+ }
+
+ out_rcu:
+ srcu_read_unlock(&kvm->srcu, idx);
+ read_unlock_irqrestore(&gpc->lock, flags);
+
+ if (kick_vcpu) {
+ kvm_make_request(KVM_REQ_EVENT, vcpu);
+ kvm_vcpu_kick(vcpu);
+ }
+
+ return rc;
+}
+
+/* This is the version called from kvm_set_irq() as the .set function */
+static int evtchn_set_fn(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm,
+ int irq_source_id, int level, bool line_status)
+{
+ bool mm_borrowed = false;
+ int rc;
+
+ if (!level)
+ return -1;
+
+ rc = kvm_xen_set_evtchn_fast(e, kvm);
+ if (rc != -EWOULDBLOCK)
+ return rc;
+
+ if (current->mm != kvm->mm) {
+ /*
+ * If not on a thread which already belongs to this KVM,
+ * we'd better be in the irqfd workqueue.
+ */
+ if (WARN_ON_ONCE(current->mm))
+ return -EINVAL;
+
+ kthread_use_mm(kvm->mm);
+ mm_borrowed = true;
+ }
+
+ /*
+ * For the irqfd workqueue, using the main kvm->lock mutex is
+ * fine since this function is invoked from kvm_set_irq() with
+ * no other lock held, no srcu. In future if it will be called
+ * directly from a vCPU thread (e.g. on hypercall for an IPI)
+ * then it may need to switch to using a leaf-node mutex for
+ * serializing the shared_info mapping.
+ */
+ mutex_lock(&kvm->lock);
+
+ /*
+ * It is theoretically possible for the page to be unmapped
+ * and the MMU notifier to invalidate the shared_info before
+ * we even get to use it. In that case, this looks like an
+ * infinite loop. It was tempting to do it via the userspace
+ * HVA instead... but that just *hides* the fact that it's
+ * an infinite loop, because if a fault occurs and it waits
+ * for the page to come back, it can *still* immediately
+ * fault and have to wait again, repeatedly.
+ *
+ * Conversely, the page could also have been reinstated by
+ * another thread before we even obtain the mutex above, so
+ * check again *first* before remapping it.
+ */
+ do {
+ struct gfn_to_pfn_cache *gpc = &kvm->arch.xen.shinfo_cache;
+ int idx;
+
+ rc = kvm_xen_set_evtchn_fast(e, kvm);
+ if (rc != -EWOULDBLOCK)
+ break;
+
+ idx = srcu_read_lock(&kvm->srcu);
+ rc = kvm_gfn_to_pfn_cache_refresh(kvm, gpc, gpc->gpa,
+ PAGE_SIZE, false);
+ srcu_read_unlock(&kvm->srcu, idx);
+ } while(!rc);
+
+ mutex_unlock(&kvm->lock);
+
+ if (mm_borrowed)
+ kthread_unuse_mm(kvm->mm);
+
+ return rc;
+}
+
+int kvm_xen_setup_evtchn(struct kvm *kvm,
+ struct kvm_kernel_irq_routing_entry *e,
+ const struct kvm_irq_routing_entry *ue)
+
+{
+ if (ue->u.xen_evtchn.port >= max_evtchn_port(kvm))
+ return -EINVAL;
+
+ /* We only support 2 level event channels for now */
+ if (ue->u.xen_evtchn.priority != KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL)
+ return -EINVAL;
+
+ e->xen_evtchn.port = ue->u.xen_evtchn.port;
+ e->xen_evtchn.vcpu = ue->u.xen_evtchn.vcpu;
+ e->xen_evtchn.priority = ue->u.xen_evtchn.priority;
+ e->set = evtchn_set_fn;
+
+ return 0;
+}
diff --git a/arch/x86/kvm/xen.h b/arch/x86/kvm/xen.h
index cc0cf5f37450..adbcc9ed59db 100644
--- a/arch/x86/kvm/xen.h
+++ b/arch/x86/kvm/xen.h
@@ -24,6 +24,12 @@ int kvm_xen_hvm_config(struct kvm *kvm, struct kvm_xen_hvm_config *xhc);
void kvm_xen_init_vm(struct kvm *kvm);
void kvm_xen_destroy_vm(struct kvm *kvm);
+int kvm_xen_set_evtchn_fast(struct kvm_kernel_irq_routing_entry *e,
+ struct kvm *kvm);
+int kvm_xen_setup_evtchn(struct kvm *kvm,
+ struct kvm_kernel_irq_routing_entry *e,
+ const struct kvm_irq_routing_entry *ue);
+
static inline bool kvm_xen_msr_enabled(struct kvm *kvm)
{
return static_branch_unlikely(&kvm_xen_enabled.key) &&
@@ -134,6 +140,9 @@ struct compat_shared_info {
struct compat_arch_shared_info arch;
};
+#define COMPAT_EVTCHN_2L_NR_CHANNELS (8 * \
+ sizeof_field(struct compat_shared_info, \
+ evtchn_pending))
struct compat_vcpu_runstate_info {
int state;
uint64_t state_entry_time;
diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile
index c6506c6a7092..f76747862bd2 100644
--- a/arch/x86/lib/Makefile
+++ b/arch/x86/lib/Makefile
@@ -63,7 +63,6 @@ ifeq ($(CONFIG_X86_32),y)
ifneq ($(CONFIG_X86_CMPXCHG64),y)
lib-y += cmpxchg8b_emu.o atomic64_386_32.o
endif
- lib-$(CONFIG_X86_USE_3DNOW) += mmx_32.o
else
obj-y += iomap_copy_64.o
lib-y += csum-partial_64.o csum-copy_64.o csum-wrappers_64.o
diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
index 16bc9130e7a5..e768815e58ae 100644
--- a/arch/x86/lib/atomic64_386_32.S
+++ b/arch/x86/lib/atomic64_386_32.S
@@ -9,81 +9,83 @@
#include <asm/alternative.h>
/* if you want SMP support, implement these with real spinlocks */
-.macro LOCK reg
+.macro IRQ_SAVE reg
pushfl
cli
.endm
-.macro UNLOCK reg
+.macro IRQ_RESTORE reg
popfl
.endm
-#define BEGIN(op) \
+#define BEGIN_IRQ_SAVE(op) \
.macro endp; \
SYM_FUNC_END(atomic64_##op##_386); \
.purgem endp; \
.endm; \
SYM_FUNC_START(atomic64_##op##_386); \
- LOCK v;
+ IRQ_SAVE v;
#define ENDP endp
-#define RET \
- UNLOCK v; \
- ret
-
-#define RET_ENDP \
- RET; \
- ENDP
+#define RET_IRQ_RESTORE \
+ IRQ_RESTORE v; \
+ RET
#define v %ecx
-BEGIN(read)
+BEGIN_IRQ_SAVE(read)
movl (v), %eax
movl 4(v), %edx
-RET_ENDP
+ RET_IRQ_RESTORE
+ENDP
#undef v
#define v %esi
-BEGIN(set)
+BEGIN_IRQ_SAVE(set)
movl %ebx, (v)
movl %ecx, 4(v)
-RET_ENDP
+ RET_IRQ_RESTORE
+ENDP
#undef v
#define v %esi
-BEGIN(xchg)
+BEGIN_IRQ_SAVE(xchg)
movl (v), %eax
movl 4(v), %edx
movl %ebx, (v)
movl %ecx, 4(v)
-RET_ENDP
+ RET_IRQ_RESTORE
+ENDP
#undef v
#define v %ecx
-BEGIN(add)
+BEGIN_IRQ_SAVE(add)
addl %eax, (v)
adcl %edx, 4(v)
-RET_ENDP
+ RET_IRQ_RESTORE
+ENDP
#undef v
#define v %ecx
-BEGIN(add_return)
+BEGIN_IRQ_SAVE(add_return)
addl (v), %eax
adcl 4(v), %edx
movl %eax, (v)
movl %edx, 4(v)
-RET_ENDP
+ RET_IRQ_RESTORE
+ENDP
#undef v
#define v %ecx
-BEGIN(sub)
+BEGIN_IRQ_SAVE(sub)
subl %eax, (v)
sbbl %edx, 4(v)
-RET_ENDP
+ RET_IRQ_RESTORE
+ENDP
#undef v
#define v %ecx
-BEGIN(sub_return)
+BEGIN_IRQ_SAVE(sub_return)
negl %edx
negl %eax
sbbl $0, %edx
@@ -91,47 +93,52 @@ BEGIN(sub_return)
adcl 4(v), %edx
movl %eax, (v)
movl %edx, 4(v)
-RET_ENDP
+ RET_IRQ_RESTORE
+ENDP
#undef v
#define v %esi
-BEGIN(inc)
+BEGIN_IRQ_SAVE(inc)
addl $1, (v)
adcl $0, 4(v)
-RET_ENDP
+ RET_IRQ_RESTORE
+ENDP
#undef v
#define v %esi
-BEGIN(inc_return)
+BEGIN_IRQ_SAVE(inc_return)
movl (v), %eax
movl 4(v), %edx
addl $1, %eax
adcl $0, %edx
movl %eax, (v)
movl %edx, 4(v)
-RET_ENDP
+ RET_IRQ_RESTORE
+ENDP
#undef v
#define v %esi
-BEGIN(dec)
+BEGIN_IRQ_SAVE(dec)
subl $1, (v)
sbbl $0, 4(v)
-RET_ENDP
+ RET_IRQ_RESTORE
+ENDP
#undef v
#define v %esi
-BEGIN(dec_return)
+BEGIN_IRQ_SAVE(dec_return)
movl (v), %eax
movl 4(v), %edx
subl $1, %eax
sbbl $0, %edx
movl %eax, (v)
movl %edx, 4(v)
-RET_ENDP
+ RET_IRQ_RESTORE
+ENDP
#undef v
#define v %esi
-BEGIN(add_unless)
+BEGIN_IRQ_SAVE(add_unless)
addl %eax, %ecx
adcl %edx, %edi
addl (v), %eax
@@ -143,7 +150,7 @@ BEGIN(add_unless)
movl %edx, 4(v)
movl $1, %eax
2:
- RET
+ RET_IRQ_RESTORE
3:
cmpl %edx, %edi
jne 1b
@@ -153,7 +160,7 @@ ENDP
#undef v
#define v %esi
-BEGIN(inc_not_zero)
+BEGIN_IRQ_SAVE(inc_not_zero)
movl (v), %eax
movl 4(v), %edx
testl %eax, %eax
@@ -165,7 +172,7 @@ BEGIN(inc_not_zero)
movl %edx, 4(v)
movl $1, %eax
2:
- RET
+ RET_IRQ_RESTORE
3:
testl %edx, %edx
jne 1b
@@ -174,7 +181,7 @@ ENDP
#undef v
#define v %esi
-BEGIN(dec_if_positive)
+BEGIN_IRQ_SAVE(dec_if_positive)
movl (v), %eax
movl 4(v), %edx
subl $1, %eax
@@ -183,5 +190,6 @@ BEGIN(dec_if_positive)
movl %eax, (v)
movl %edx, 4(v)
1:
-RET_ENDP
+ RET_IRQ_RESTORE
+ENDP
#undef v
diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
index ce6935690766..90afb488b396 100644
--- a/arch/x86/lib/atomic64_cx8_32.S
+++ b/arch/x86/lib/atomic64_cx8_32.S
@@ -18,7 +18,7 @@
SYM_FUNC_START(atomic64_read_cx8)
read64 %ecx
- ret
+ RET
SYM_FUNC_END(atomic64_read_cx8)
SYM_FUNC_START(atomic64_set_cx8)
@@ -28,7 +28,7 @@ SYM_FUNC_START(atomic64_set_cx8)
cmpxchg8b (%esi)
jne 1b
- ret
+ RET
SYM_FUNC_END(atomic64_set_cx8)
SYM_FUNC_START(atomic64_xchg_cx8)
@@ -37,7 +37,7 @@ SYM_FUNC_START(atomic64_xchg_cx8)
cmpxchg8b (%esi)
jne 1b
- ret
+ RET
SYM_FUNC_END(atomic64_xchg_cx8)
.macro addsub_return func ins insc
@@ -68,7 +68,7 @@ SYM_FUNC_START(atomic64_\func\()_return_cx8)
popl %esi
popl %ebx
popl %ebp
- ret
+ RET
SYM_FUNC_END(atomic64_\func\()_return_cx8)
.endm
@@ -93,7 +93,7 @@ SYM_FUNC_START(atomic64_\func\()_return_cx8)
movl %ebx, %eax
movl %ecx, %edx
popl %ebx
- ret
+ RET
SYM_FUNC_END(atomic64_\func\()_return_cx8)
.endm
@@ -118,7 +118,7 @@ SYM_FUNC_START(atomic64_dec_if_positive_cx8)
movl %ebx, %eax
movl %ecx, %edx
popl %ebx
- ret
+ RET
SYM_FUNC_END(atomic64_dec_if_positive_cx8)
SYM_FUNC_START(atomic64_add_unless_cx8)
@@ -149,7 +149,7 @@ SYM_FUNC_START(atomic64_add_unless_cx8)
addl $8, %esp
popl %ebx
popl %ebp
- ret
+ RET
4:
cmpl %edx, 4(%esp)
jne 2b
@@ -176,5 +176,5 @@ SYM_FUNC_START(atomic64_inc_not_zero_cx8)
movl $1, %eax
3:
popl %ebx
- ret
+ RET
SYM_FUNC_END(atomic64_inc_not_zero_cx8)
diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
index 4304320e51f4..23318c338db0 100644
--- a/arch/x86/lib/checksum_32.S
+++ b/arch/x86/lib/checksum_32.S
@@ -127,7 +127,7 @@ SYM_FUNC_START(csum_partial)
8:
popl %ebx
popl %esi
- ret
+ RET
SYM_FUNC_END(csum_partial)
#else
@@ -245,7 +245,7 @@ SYM_FUNC_START(csum_partial)
90:
popl %ebx
popl %esi
- ret
+ RET
SYM_FUNC_END(csum_partial)
#endif
@@ -260,9 +260,9 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
* Copy from ds while checksumming, otherwise like csum_partial
*/
-#define EXC(y...) \
- 9999: y; \
- _ASM_EXTABLE_UA(9999b, 6001f)
+#define EXC(y...) \
+ 9999: y; \
+ _ASM_EXTABLE_TYPE(9999b, 7f, EX_TYPE_UACCESS | EX_FLAG_CLEAR_AX)
#ifndef CONFIG_X86_USE_PPRO_CHECKSUM
@@ -358,20 +358,11 @@ EXC( movb %cl, (%edi) )
adcl $0, %eax
7:
-# Exception handler:
-.section .fixup, "ax"
-
-6001:
- xorl %eax, %eax
- jmp 7b
-
-.previous
-
popl %ebx
popl %esi
popl %edi
popl %ecx # equivalent to addl $4,%esp
- ret
+ RET
SYM_FUNC_END(csum_partial_copy_generic)
#else
@@ -439,15 +430,11 @@ EXC( movb %dl, (%edi) )
6: addl %edx, %eax
adcl $0, %eax
7:
-.section .fixup, "ax"
-6001: xorl %eax, %eax
- jmp 7b
-.previous
popl %esi
popl %edi
popl %ebx
- ret
+ RET
SYM_FUNC_END(csum_partial_copy_generic)
#undef ROUND
diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
index c4c7dd115953..fe59b8ac4fcc 100644
--- a/arch/x86/lib/clear_page_64.S
+++ b/arch/x86/lib/clear_page_64.S
@@ -17,7 +17,7 @@ SYM_FUNC_START(clear_page_rep)
movl $4096/8,%ecx
xorl %eax,%eax
rep stosq
- ret
+ RET
SYM_FUNC_END(clear_page_rep)
EXPORT_SYMBOL_GPL(clear_page_rep)
@@ -39,7 +39,7 @@ SYM_FUNC_START(clear_page_orig)
leaq 64(%rdi),%rdi
jnz .Lloop
nop
- ret
+ RET
SYM_FUNC_END(clear_page_orig)
EXPORT_SYMBOL_GPL(clear_page_orig)
@@ -47,6 +47,6 @@ SYM_FUNC_START(clear_page_erms)
movl $4096,%ecx
xorl %eax,%eax
rep stosb
- ret
+ RET
SYM_FUNC_END(clear_page_erms)
EXPORT_SYMBOL_GPL(clear_page_erms)
diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
index 3542502faa3b..33c70c0160ea 100644
--- a/arch/x86/lib/cmpxchg16b_emu.S
+++ b/arch/x86/lib/cmpxchg16b_emu.S
@@ -37,11 +37,11 @@ SYM_FUNC_START(this_cpu_cmpxchg16b_emu)
popfq
mov $1, %al
- ret
+ RET
.Lnot_same:
popfq
xor %al,%al
- ret
+ RET
SYM_FUNC_END(this_cpu_cmpxchg16b_emu)
diff --git a/arch/x86/lib/cmpxchg8b_emu.S b/arch/x86/lib/cmpxchg8b_emu.S
index ca01ed6029f4..6a912d58fecc 100644
--- a/arch/x86/lib/cmpxchg8b_emu.S
+++ b/arch/x86/lib/cmpxchg8b_emu.S
@@ -32,7 +32,7 @@ SYM_FUNC_START(cmpxchg8b_emu)
movl %ecx, 4(%esi)
popfl
- ret
+ RET
.Lnot_same:
movl (%esi), %eax
@@ -40,7 +40,7 @@ SYM_FUNC_START(cmpxchg8b_emu)
movl 4(%esi), %edx
popfl
- ret
+ RET
SYM_FUNC_END(cmpxchg8b_emu)
EXPORT_SYMBOL(cmpxchg8b_emu)
diff --git a/arch/x86/lib/copy_mc_64.S b/arch/x86/lib/copy_mc_64.S
index 7334055157ba..c859a8a09860 100644
--- a/arch/x86/lib/copy_mc_64.S
+++ b/arch/x86/lib/copy_mc_64.S
@@ -77,10 +77,8 @@ SYM_FUNC_START(copy_mc_fragile)
.L_done_memcpy_trap:
xorl %eax, %eax
.L_done:
- ret
-SYM_FUNC_END(copy_mc_fragile)
+ RET
- .section .fixup, "ax"
/*
* Return number of bytes not copied for any failure. Note that
* there is no "tail" handling since the source buffer is 8-byte
@@ -105,14 +103,14 @@ SYM_FUNC_END(copy_mc_fragile)
movl %ecx, %edx
jmp copy_mc_fragile_handle_tail
- .previous
-
_ASM_EXTABLE_TYPE(.L_read_leading_bytes, .E_leading_bytes, EX_TYPE_DEFAULT_MCE_SAFE)
_ASM_EXTABLE_TYPE(.L_read_words, .E_read_words, EX_TYPE_DEFAULT_MCE_SAFE)
_ASM_EXTABLE_TYPE(.L_read_trailing_bytes, .E_trailing_bytes, EX_TYPE_DEFAULT_MCE_SAFE)
_ASM_EXTABLE(.L_write_leading_bytes, .E_leading_bytes)
_ASM_EXTABLE(.L_write_words, .E_write_words)
_ASM_EXTABLE(.L_write_trailing_bytes, .E_trailing_bytes)
+
+SYM_FUNC_END(copy_mc_fragile)
#endif /* CONFIG_X86_MCE */
/*
@@ -132,10 +130,8 @@ SYM_FUNC_START(copy_mc_enhanced_fast_string)
rep movsb
/* Copy successful. Return zero */
xorl %eax, %eax
- ret
-SYM_FUNC_END(copy_mc_enhanced_fast_string)
+ RET
- .section .fixup, "ax"
.E_copy:
/*
* On fault %rcx is updated such that the copy instruction could
@@ -145,9 +141,9 @@ SYM_FUNC_END(copy_mc_enhanced_fast_string)
* user-copy routines.
*/
movq %rcx, %rax
- ret
-
- .previous
+ RET
_ASM_EXTABLE_TYPE(.L_copy, .E_copy, EX_TYPE_DEFAULT_MCE_SAFE)
+
+SYM_FUNC_END(copy_mc_enhanced_fast_string)
#endif /* !CONFIG_UML */
diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
index db4b4f9197c7..30ea644bf446 100644
--- a/arch/x86/lib/copy_page_64.S
+++ b/arch/x86/lib/copy_page_64.S
@@ -17,7 +17,7 @@ SYM_FUNC_START(copy_page)
ALTERNATIVE "jmp copy_page_regs", "", X86_FEATURE_REP_GOOD
movl $4096/8, %ecx
rep movsq
- ret
+ RET
SYM_FUNC_END(copy_page)
EXPORT_SYMBOL(copy_page)
@@ -85,5 +85,5 @@ SYM_FUNC_START_LOCAL(copy_page_regs)
movq (%rsp), %rbx
movq 1*8(%rsp), %r12
addq $2*8, %rsp
- ret
+ RET
SYM_FUNC_END(copy_page_regs)
diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
index a2cbeae4b180..8ca5ecf16dc4 100644
--- a/arch/x86/lib/copy_user_64.S
+++ b/arch/x86/lib/copy_user_64.S
@@ -32,14 +32,10 @@
decl %ecx
jnz 100b
102:
- .section .fixup,"ax"
-103: addl %ecx,%edx /* ecx is zerorest also */
- jmp .Lcopy_user_handle_tail
- .previous
- _ASM_EXTABLE_CPY(100b, 103b)
- _ASM_EXTABLE_CPY(101b, 103b)
- .endm
+ _ASM_EXTABLE_CPY(100b, .Lcopy_user_handle_align)
+ _ASM_EXTABLE_CPY(101b, .Lcopy_user_handle_align)
+.endm
/*
* copy_user_generic_unrolled - memory copy with exception handling.
@@ -105,9 +101,8 @@ SYM_FUNC_START(copy_user_generic_unrolled)
jnz 21b
23: xor %eax,%eax
ASM_CLAC
- ret
+ RET
- .section .fixup,"ax"
30: shll $6,%ecx
addl %ecx,%edx
jmp 60f
@@ -115,7 +110,6 @@ SYM_FUNC_START(copy_user_generic_unrolled)
jmp 60f
50: movl %ecx,%edx
60: jmp .Lcopy_user_handle_tail /* ecx is zerorest also */
- .previous
_ASM_EXTABLE_CPY(1b, 30b)
_ASM_EXTABLE_CPY(2b, 30b)
@@ -166,20 +160,16 @@ SYM_FUNC_START(copy_user_generic_string)
movl %edx,%ecx
shrl $3,%ecx
andl $7,%edx
-1: rep
- movsq
+1: rep movsq
2: movl %edx,%ecx
-3: rep
- movsb
+3: rep movsb
xorl %eax,%eax
ASM_CLAC
- ret
+ RET
- .section .fixup,"ax"
11: leal (%rdx,%rcx,8),%ecx
12: movl %ecx,%edx /* ecx is zerorest also */
jmp .Lcopy_user_handle_tail
- .previous
_ASM_EXTABLE_CPY(1b, 11b)
_ASM_EXTABLE_CPY(3b, 12b)
@@ -203,16 +193,13 @@ SYM_FUNC_START(copy_user_enhanced_fast_string)
/* CPUs without FSRM should avoid rep movsb for short copies */
ALTERNATIVE "cmpl $64, %edx; jb .L_copy_short_string", "", X86_FEATURE_FSRM
movl %edx,%ecx
-1: rep
- movsb
+1: rep movsb
xorl %eax,%eax
ASM_CLAC
- ret
+ RET
- .section .fixup,"ax"
12: movl %ecx,%edx /* ecx is zerorest also */
jmp .Lcopy_user_handle_tail
- .previous
_ASM_EXTABLE_CPY(1b, 12b)
SYM_FUNC_END(copy_user_enhanced_fast_string)
@@ -241,7 +228,7 @@ SYM_CODE_START_LOCAL(.Lcopy_user_handle_tail)
1: rep movsb
2: mov %ecx,%eax
ASM_CLAC
- ret
+ RET
3:
movl %edx,%eax
@@ -249,6 +236,11 @@ SYM_CODE_START_LOCAL(.Lcopy_user_handle_tail)
RET
_ASM_EXTABLE_CPY(1b, 2b)
+
+.Lcopy_user_handle_align:
+ addl %ecx,%edx /* ecx is zerorest also */
+ jmp .Lcopy_user_handle_tail
+
SYM_CODE_END(.Lcopy_user_handle_tail)
/*
@@ -357,9 +349,8 @@ SYM_FUNC_START(__copy_user_nocache)
xorl %eax,%eax
ASM_CLAC
sfence
- ret
+ RET
- .section .fixup,"ax"
.L_fixup_4x8b_copy:
shll $6,%ecx
addl %ecx,%edx
@@ -375,7 +366,6 @@ SYM_FUNC_START(__copy_user_nocache)
.L_fixup_handle_tail:
sfence
jmp .Lcopy_user_handle_tail
- .previous
_ASM_EXTABLE_CPY(1b, .L_fixup_4x8b_copy)
_ASM_EXTABLE_CPY(2b, .L_fixup_4x8b_copy)
diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
index 1fbd8ee9642d..d9e16a2cf285 100644
--- a/arch/x86/lib/csum-copy_64.S
+++ b/arch/x86/lib/csum-copy_64.S
@@ -201,7 +201,7 @@ SYM_FUNC_START(csum_partial_copy_generic)
movq 3*8(%rsp), %r13
movq 4*8(%rsp), %r15
addq $5*8, %rsp
- ret
+ RET
.Lshort:
movl %ecx, %r10d
jmp .L1
diff --git a/arch/x86/lib/csum-partial_64.c b/arch/x86/lib/csum-partial_64.c
index e7925d668b68..1f8a8f895173 100644
--- a/arch/x86/lib/csum-partial_64.c
+++ b/arch/x86/lib/csum-partial_64.c
@@ -9,6 +9,7 @@
#include <linux/compiler.h>
#include <linux/export.h>
#include <asm/checksum.h>
+#include <asm/word-at-a-time.h>
static inline unsigned short from32to16(unsigned a)
{
@@ -21,120 +22,119 @@ static inline unsigned short from32to16(unsigned a)
}
/*
- * Do a 64-bit checksum on an arbitrary memory area.
+ * Do a checksum on an arbitrary memory area.
* Returns a 32bit checksum.
*
* This isn't as time critical as it used to be because many NICs
* do hardware checksumming these days.
- *
- * Things tried and found to not make it faster:
- * Manual Prefetching
- * Unrolling to an 128 bytes inner loop.
- * Using interleaving with more registers to break the carry chains.
+ *
+ * Still, with CHECKSUM_COMPLETE this is called to compute
+ * checksums on IPv6 headers (40 bytes) and other small parts.
+ * it's best to have buff aligned on a 64-bit boundary
*/
-static unsigned do_csum(const unsigned char *buff, unsigned len)
+__wsum csum_partial(const void *buff, int len, __wsum sum)
{
- unsigned odd, count;
- unsigned long result = 0;
+ u64 temp64 = (__force u64)sum;
+ unsigned odd, result;
- if (unlikely(len == 0))
- return result;
odd = 1 & (unsigned long) buff;
if (unlikely(odd)) {
- result = *buff << 8;
+ if (unlikely(len == 0))
+ return sum;
+ temp64 = ror32((__force u32)sum, 8);
+ temp64 += (*(unsigned char *)buff << 8);
len--;
buff++;
}
- count = len >> 1; /* nr of 16-bit words.. */
- if (count) {
- if (2 & (unsigned long) buff) {
- result += *(unsigned short *)buff;
- count--;
- len -= 2;
- buff += 2;
- }
- count >>= 1; /* nr of 32-bit words.. */
- if (count) {
- unsigned long zero;
- unsigned count64;
- if (4 & (unsigned long) buff) {
- result += *(unsigned int *) buff;
- count--;
- len -= 4;
- buff += 4;
- }
- count >>= 1; /* nr of 64-bit words.. */
- /* main loop using 64byte blocks */
- zero = 0;
- count64 = count >> 3;
- while (count64) {
- asm("addq 0*8(%[src]),%[res]\n\t"
- "adcq 1*8(%[src]),%[res]\n\t"
- "adcq 2*8(%[src]),%[res]\n\t"
- "adcq 3*8(%[src]),%[res]\n\t"
- "adcq 4*8(%[src]),%[res]\n\t"
- "adcq 5*8(%[src]),%[res]\n\t"
- "adcq 6*8(%[src]),%[res]\n\t"
- "adcq 7*8(%[src]),%[res]\n\t"
- "adcq %[zero],%[res]"
- : [res] "=r" (result)
- : [src] "r" (buff), [zero] "r" (zero),
- "[res]" (result));
- buff += 64;
- count64--;
- }
+ while (unlikely(len >= 64)) {
+ asm("addq 0*8(%[src]),%[res]\n\t"
+ "adcq 1*8(%[src]),%[res]\n\t"
+ "adcq 2*8(%[src]),%[res]\n\t"
+ "adcq 3*8(%[src]),%[res]\n\t"
+ "adcq 4*8(%[src]),%[res]\n\t"
+ "adcq 5*8(%[src]),%[res]\n\t"
+ "adcq 6*8(%[src]),%[res]\n\t"
+ "adcq 7*8(%[src]),%[res]\n\t"
+ "adcq $0,%[res]"
+ : [res] "+r" (temp64)
+ : [src] "r" (buff)
+ : "memory");
+ buff += 64;
+ len -= 64;
+ }
+
+ if (len & 32) {
+ asm("addq 0*8(%[src]),%[res]\n\t"
+ "adcq 1*8(%[src]),%[res]\n\t"
+ "adcq 2*8(%[src]),%[res]\n\t"
+ "adcq 3*8(%[src]),%[res]\n\t"
+ "adcq $0,%[res]"
+ : [res] "+r" (temp64)
+ : [src] "r" (buff)
+ : "memory");
+ buff += 32;
+ }
+ if (len & 16) {
+ asm("addq 0*8(%[src]),%[res]\n\t"
+ "adcq 1*8(%[src]),%[res]\n\t"
+ "adcq $0,%[res]"
+ : [res] "+r" (temp64)
+ : [src] "r" (buff)
+ : "memory");
+ buff += 16;
+ }
+ if (len & 8) {
+ asm("addq 0*8(%[src]),%[res]\n\t"
+ "adcq $0,%[res]"
+ : [res] "+r" (temp64)
+ : [src] "r" (buff)
+ : "memory");
+ buff += 8;
+ }
+ if (len & 7) {
+#ifdef CONFIG_DCACHE_WORD_ACCESS
+ unsigned int shift = (8 - (len & 7)) * 8;
+ unsigned long trail;
- /* last up to 7 8byte blocks */
- count %= 8;
- while (count) {
- asm("addq %1,%0\n\t"
- "adcq %2,%0\n"
- : "=r" (result)
- : "m" (*(unsigned long *)buff),
- "r" (zero), "0" (result));
- --count;
- buff += 8;
- }
- result = add32_with_carry(result>>32,
- result&0xffffffff);
+ trail = (load_unaligned_zeropad(buff) << shift) >> shift;
- if (len & 4) {
- result += *(unsigned int *) buff;
- buff += 4;
- }
+ asm("addq %[trail],%[res]\n\t"
+ "adcq $0,%[res]"
+ : [res] "+r" (temp64)
+ : [trail] "r" (trail));
+#else
+ if (len & 4) {
+ asm("addq %[val],%[res]\n\t"
+ "adcq $0,%[res]"
+ : [res] "+r" (temp64)
+ : [val] "r" ((u64)*(u32 *)buff)
+ : "memory");
+ buff += 4;
}
if (len & 2) {
- result += *(unsigned short *) buff;
+ asm("addq %[val],%[res]\n\t"
+ "adcq $0,%[res]"
+ : [res] "+r" (temp64)
+ : [val] "r" ((u64)*(u16 *)buff)
+ : "memory");
buff += 2;
}
+ if (len & 1) {
+ asm("addq %[val],%[res]\n\t"
+ "adcq $0,%[res]"
+ : [res] "+r" (temp64)
+ : [val] "r" ((u64)*(u8 *)buff)
+ : "memory");
+ }
+#endif
}
- if (len & 1)
- result += *buff;
- result = add32_with_carry(result>>32, result & 0xffffffff);
- if (unlikely(odd)) {
+ result = add32_with_carry(temp64 >> 32, temp64 & 0xffffffff);
+ if (unlikely(odd)) {
result = from32to16(result);
result = ((result >> 8) & 0xff) | ((result & 0xff) << 8);
}
- return result;
-}
-
-/*
- * computes the checksum of a memory block at buff, length len,
- * and adds in "sum" (32-bit)
- *
- * returns a 32-bit number suitable for feeding into itself
- * or csum_tcpudp_magic
- *
- * this function must be called with even lengths, except
- * for the last fragment, which may be odd
- *
- * it's best to have buff aligned on a 64-bit boundary
- */
-__wsum csum_partial(const void *buff, int len, __wsum sum)
-{
- return (__force __wsum)add32_with_carry(do_csum(buff, len),
- (__force u32)sum);
+ return (__force __wsum)result;
}
EXPORT_SYMBOL(csum_partial);
@@ -147,4 +147,3 @@ __sum16 ip_compute_csum(const void *buff, int len)
return csum_fold(csum_partial(buff,len,0));
}
EXPORT_SYMBOL(ip_compute_csum);
-
diff --git a/arch/x86/lib/error-inject.c b/arch/x86/lib/error-inject.c
index be5b5fb1598b..520897061ee0 100644
--- a/arch/x86/lib/error-inject.c
+++ b/arch/x86/lib/error-inject.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
+#include <linux/linkage.h>
#include <linux/error-injection.h>
#include <linux/kprobes.h>
@@ -10,7 +11,7 @@ asm(
".type just_return_func, @function\n"
".globl just_return_func\n"
"just_return_func:\n"
- " ret\n"
+ ASM_RET
".size just_return_func, .-just_return_func\n"
);
diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
index fa1bc2104b32..b70d98d79a9d 100644
--- a/arch/x86/lib/getuser.S
+++ b/arch/x86/lib/getuser.S
@@ -57,7 +57,7 @@ SYM_FUNC_START(__get_user_1)
1: movzbl (%_ASM_AX),%edx
xor %eax,%eax
ASM_CLAC
- ret
+ RET
SYM_FUNC_END(__get_user_1)
EXPORT_SYMBOL(__get_user_1)
@@ -71,7 +71,7 @@ SYM_FUNC_START(__get_user_2)
2: movzwl (%_ASM_AX),%edx
xor %eax,%eax
ASM_CLAC
- ret
+ RET
SYM_FUNC_END(__get_user_2)
EXPORT_SYMBOL(__get_user_2)
@@ -85,7 +85,7 @@ SYM_FUNC_START(__get_user_4)
3: movl (%_ASM_AX),%edx
xor %eax,%eax
ASM_CLAC
- ret
+ RET
SYM_FUNC_END(__get_user_4)
EXPORT_SYMBOL(__get_user_4)
@@ -100,7 +100,7 @@ SYM_FUNC_START(__get_user_8)
4: movq (%_ASM_AX),%rdx
xor %eax,%eax
ASM_CLAC
- ret
+ RET
#else
LOAD_TASK_SIZE_MINUS_N(7)
cmp %_ASM_DX,%_ASM_AX
@@ -112,7 +112,7 @@ SYM_FUNC_START(__get_user_8)
5: movl 4(%_ASM_AX),%ecx
xor %eax,%eax
ASM_CLAC
- ret
+ RET
#endif
SYM_FUNC_END(__get_user_8)
EXPORT_SYMBOL(__get_user_8)
@@ -124,7 +124,7 @@ SYM_FUNC_START(__get_user_nocheck_1)
6: movzbl (%_ASM_AX),%edx
xor %eax,%eax
ASM_CLAC
- ret
+ RET
SYM_FUNC_END(__get_user_nocheck_1)
EXPORT_SYMBOL(__get_user_nocheck_1)
@@ -134,7 +134,7 @@ SYM_FUNC_START(__get_user_nocheck_2)
7: movzwl (%_ASM_AX),%edx
xor %eax,%eax
ASM_CLAC
- ret
+ RET
SYM_FUNC_END(__get_user_nocheck_2)
EXPORT_SYMBOL(__get_user_nocheck_2)
@@ -144,7 +144,7 @@ SYM_FUNC_START(__get_user_nocheck_4)
8: movl (%_ASM_AX),%edx
xor %eax,%eax
ASM_CLAC
- ret
+ RET
SYM_FUNC_END(__get_user_nocheck_4)
EXPORT_SYMBOL(__get_user_nocheck_4)
@@ -159,7 +159,7 @@ SYM_FUNC_START(__get_user_nocheck_8)
#endif
xor %eax,%eax
ASM_CLAC
- ret
+ RET
SYM_FUNC_END(__get_user_nocheck_8)
EXPORT_SYMBOL(__get_user_nocheck_8)
@@ -169,7 +169,7 @@ SYM_CODE_START_LOCAL(.Lbad_get_user_clac)
bad_get_user:
xor %edx,%edx
mov $(-EFAULT),%_ASM_AX
- ret
+ RET
SYM_CODE_END(.Lbad_get_user_clac)
#ifdef CONFIG_X86_32
@@ -179,7 +179,7 @@ bad_get_user_8:
xor %edx,%edx
xor %ecx,%ecx
mov $(-EFAULT),%_ASM_AX
- ret
+ RET
SYM_CODE_END(.Lbad_get_user_8_clac)
#endif
diff --git a/arch/x86/lib/hweight.S b/arch/x86/lib/hweight.S
index dbf8cc97b7f5..12c16c6aa44a 100644
--- a/arch/x86/lib/hweight.S
+++ b/arch/x86/lib/hweight.S
@@ -32,7 +32,7 @@ SYM_FUNC_START(__sw_hweight32)
imull $0x01010101, %eax, %eax # w_tmp *= 0x01010101
shrl $24, %eax # w = w_tmp >> 24
__ASM_SIZE(pop,) %__ASM_REG(dx)
- ret
+ RET
SYM_FUNC_END(__sw_hweight32)
EXPORT_SYMBOL(__sw_hweight32)
@@ -65,7 +65,7 @@ SYM_FUNC_START(__sw_hweight64)
popq %rdx
popq %rdi
- ret
+ RET
#else /* CONFIG_X86_32 */
/* We're getting an u64 arg in (%eax,%edx): unsigned long hweight64(__u64 w) */
pushl %ecx
@@ -77,7 +77,7 @@ SYM_FUNC_START(__sw_hweight64)
addl %ecx, %eax # result
popl %ecx
- ret
+ RET
#endif
SYM_FUNC_END(__sw_hweight64)
EXPORT_SYMBOL(__sw_hweight64)
diff --git a/arch/x86/lib/insn-eval.c b/arch/x86/lib/insn-eval.c
index 53e57ef5925c..b781d324211b 100644
--- a/arch/x86/lib/insn-eval.c
+++ b/arch/x86/lib/insn-eval.c
@@ -410,32 +410,44 @@ static short get_segment_selector(struct pt_regs *regs, int seg_reg_idx)
#endif /* CONFIG_X86_64 */
}
-static int get_reg_offset(struct insn *insn, struct pt_regs *regs,
- enum reg_type type)
+static const int pt_regoff[] = {
+ offsetof(struct pt_regs, ax),
+ offsetof(struct pt_regs, cx),
+ offsetof(struct pt_regs, dx),
+ offsetof(struct pt_regs, bx),
+ offsetof(struct pt_regs, sp),
+ offsetof(struct pt_regs, bp),
+ offsetof(struct pt_regs, si),
+ offsetof(struct pt_regs, di),
+#ifdef CONFIG_X86_64
+ offsetof(struct pt_regs, r8),
+ offsetof(struct pt_regs, r9),
+ offsetof(struct pt_regs, r10),
+ offsetof(struct pt_regs, r11),
+ offsetof(struct pt_regs, r12),
+ offsetof(struct pt_regs, r13),
+ offsetof(struct pt_regs, r14),
+ offsetof(struct pt_regs, r15),
+#else
+ offsetof(struct pt_regs, ds),
+ offsetof(struct pt_regs, es),
+ offsetof(struct pt_regs, fs),
+ offsetof(struct pt_regs, gs),
+#endif
+};
+
+int pt_regs_offset(struct pt_regs *regs, int regno)
+{
+ if ((unsigned)regno < ARRAY_SIZE(pt_regoff))
+ return pt_regoff[regno];
+ return -EDOM;
+}
+
+static int get_regno(struct insn *insn, enum reg_type type)
{
+ int nr_registers = ARRAY_SIZE(pt_regoff);
int regno = 0;
- static const int regoff[] = {
- offsetof(struct pt_regs, ax),
- offsetof(struct pt_regs, cx),
- offsetof(struct pt_regs, dx),
- offsetof(struct pt_regs, bx),
- offsetof(struct pt_regs, sp),
- offsetof(struct pt_regs, bp),
- offsetof(struct pt_regs, si),
- offsetof(struct pt_regs, di),
-#ifdef CONFIG_X86_64
- offsetof(struct pt_regs, r8),
- offsetof(struct pt_regs, r9),
- offsetof(struct pt_regs, r10),
- offsetof(struct pt_regs, r11),
- offsetof(struct pt_regs, r12),
- offsetof(struct pt_regs, r13),
- offsetof(struct pt_regs, r14),
- offsetof(struct pt_regs, r15),
-#endif
- };
- int nr_registers = ARRAY_SIZE(regoff);
/*
* Don't possibly decode a 32-bit instructions as
* reading a 64-bit-only register.
@@ -503,7 +515,18 @@ static int get_reg_offset(struct insn *insn, struct pt_regs *regs,
WARN_ONCE(1, "decoded an instruction with an invalid register");
return -EINVAL;
}
- return regoff[regno];
+ return regno;
+}
+
+static int get_reg_offset(struct insn *insn, struct pt_regs *regs,
+ enum reg_type type)
+{
+ int regno = get_regno(insn, type);
+
+ if (regno < 0)
+ return regno;
+
+ return pt_regs_offset(regs, regno);
}
/**
diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
index cb5a1964506b..a1f9416bf67a 100644
--- a/arch/x86/lib/iomap_copy_64.S
+++ b/arch/x86/lib/iomap_copy_64.S
@@ -11,5 +11,5 @@
SYM_FUNC_START(__iowrite32_copy)
movl %edx,%ecx
rep movsd
- ret
+ RET
SYM_FUNC_END(__iowrite32_copy)
diff --git a/arch/x86/lib/memcpy_32.c b/arch/x86/lib/memcpy_32.c
index e565d1c9019e..3a6e6cfe8c35 100644
--- a/arch/x86/lib/memcpy_32.c
+++ b/arch/x86/lib/memcpy_32.c
@@ -7,11 +7,7 @@
__visible void *memcpy(void *to, const void *from, size_t n)
{
-#if defined(CONFIG_X86_USE_3DNOW) && !defined(CONFIG_FORTIFY_SOURCE)
- return __memcpy3d(to, from, n);
-#else
return __memcpy(to, from, n);
-#endif
}
EXPORT_SYMBOL(memcpy);
diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
index 1cc9da6e29c7..59cf2343f3d9 100644
--- a/arch/x86/lib/memcpy_64.S
+++ b/arch/x86/lib/memcpy_64.S
@@ -39,7 +39,7 @@ SYM_FUNC_START_WEAK(memcpy)
rep movsq
movl %edx, %ecx
rep movsb
- ret
+ RET
SYM_FUNC_END(memcpy)
SYM_FUNC_END_ALIAS(__memcpy)
EXPORT_SYMBOL(memcpy)
@@ -53,7 +53,7 @@ SYM_FUNC_START_LOCAL(memcpy_erms)
movq %rdi, %rax
movq %rdx, %rcx
rep movsb
- ret
+ RET
SYM_FUNC_END(memcpy_erms)
SYM_FUNC_START_LOCAL(memcpy_orig)
@@ -137,7 +137,7 @@ SYM_FUNC_START_LOCAL(memcpy_orig)
movq %r9, 1*8(%rdi)
movq %r10, -2*8(%rdi, %rdx)
movq %r11, -1*8(%rdi, %rdx)
- retq
+ RET
.p2align 4
.Lless_16bytes:
cmpl $8, %edx
@@ -149,7 +149,7 @@ SYM_FUNC_START_LOCAL(memcpy_orig)
movq -1*8(%rsi, %rdx), %r9
movq %r8, 0*8(%rdi)
movq %r9, -1*8(%rdi, %rdx)
- retq
+ RET
.p2align 4
.Lless_8bytes:
cmpl $4, %edx
@@ -162,7 +162,7 @@ SYM_FUNC_START_LOCAL(memcpy_orig)
movl -4(%rsi, %rdx), %r8d
movl %ecx, (%rdi)
movl %r8d, -4(%rdi, %rdx)
- retq
+ RET
.p2align 4
.Lless_3bytes:
subl $1, %edx
@@ -180,7 +180,7 @@ SYM_FUNC_START_LOCAL(memcpy_orig)
movb %cl, (%rdi)
.Lend:
- retq
+ RET
SYM_FUNC_END(memcpy_orig)
.popsection
diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
index 64801010d312..50ea390df712 100644
--- a/arch/x86/lib/memmove_64.S
+++ b/arch/x86/lib/memmove_64.S
@@ -40,7 +40,7 @@ SYM_FUNC_START(__memmove)
/* FSRM implies ERMS => no length checks, do the copy directly */
.Lmemmove_begin_forward:
ALTERNATIVE "cmp $0x20, %rdx; jb 1f", "", X86_FEATURE_FSRM
- ALTERNATIVE "", "movq %rdx, %rcx; rep movsb; retq", X86_FEATURE_ERMS
+ ALTERNATIVE "", __stringify(movq %rdx, %rcx; rep movsb; RET), X86_FEATURE_ERMS
/*
* movsq instruction have many startup latency
@@ -205,7 +205,7 @@ SYM_FUNC_START(__memmove)
movb (%rsi), %r11b
movb %r11b, (%rdi)
13:
- retq
+ RET
SYM_FUNC_END(__memmove)
SYM_FUNC_END_ALIAS(memmove)
EXPORT_SYMBOL(__memmove)
diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
index 9827ae267f96..d624f2bc42f1 100644
--- a/arch/x86/lib/memset_64.S
+++ b/arch/x86/lib/memset_64.S
@@ -40,7 +40,7 @@ SYM_FUNC_START(__memset)
movl %edx,%ecx
rep stosb
movq %r9,%rax
- ret
+ RET
SYM_FUNC_END(__memset)
SYM_FUNC_END_ALIAS(memset)
EXPORT_SYMBOL(memset)
@@ -63,7 +63,7 @@ SYM_FUNC_START_LOCAL(memset_erms)
movq %rdx,%rcx
rep stosb
movq %r9,%rax
- ret
+ RET
SYM_FUNC_END(memset_erms)
SYM_FUNC_START_LOCAL(memset_orig)
@@ -125,7 +125,7 @@ SYM_FUNC_START_LOCAL(memset_orig)
.Lende:
movq %r10,%rax
- ret
+ RET
.Lbad_alignment:
cmpq $7,%rdx
diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
index cc5f4ea943d3..e69de29bb2d1 100644
--- a/arch/x86/lib/mmx_32.c
+++ b/arch/x86/lib/mmx_32.c
@@ -1,388 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * MMX 3DNow! library helper functions
- *
- * To do:
- * We can use MMX just for prefetch in IRQ's. This may be a win.
- * (reported so on K6-III)
- * We should use a better code neutral filler for the short jump
- * leal ebx. [ebx] is apparently best for K6-2, but Cyrix ??
- * We also want to clobber the filler register so we don't get any
- * register forwarding stalls on the filler.
- *
- * Add *user handling. Checksums are not a win with MMX on any CPU
- * tested so far for any MMX solution figured.
- *
- * 22/09/2000 - Arjan van de Ven
- * Improved for non-engineering-sample Athlons
- *
- */
-#include <linux/hardirq.h>
-#include <linux/string.h>
-#include <linux/export.h>
-#include <linux/sched.h>
-#include <linux/types.h>
-
-#include <asm/fpu/api.h>
-#include <asm/asm.h>
-
-/*
- * Use KFPU_387. MMX instructions are not affected by MXCSR,
- * but both AMD and Intel documentation states that even integer MMX
- * operations will result in #MF if an exception is pending in FCW.
- *
- * EMMS is not needed afterwards because, after calling kernel_fpu_end(),
- * any subsequent user of the 387 stack will reinitialize it using
- * KFPU_387.
- */
-
-void *_mmx_memcpy(void *to, const void *from, size_t len)
-{
- void *p;
- int i;
-
- if (unlikely(in_interrupt()))
- return __memcpy(to, from, len);
-
- p = to;
- i = len >> 6; /* len/64 */
-
- kernel_fpu_begin_mask(KFPU_387);
-
- __asm__ __volatile__ (
- "1: prefetch (%0)\n" /* This set is 28 bytes */
- " prefetch 64(%0)\n"
- " prefetch 128(%0)\n"
- " prefetch 192(%0)\n"
- " prefetch 256(%0)\n"
- "2: \n"
- ".section .fixup, \"ax\"\n"
- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
- " jmp 2b\n"
- ".previous\n"
- _ASM_EXTABLE(1b, 3b)
- : : "r" (from));
-
- for ( ; i > 5; i--) {
- __asm__ __volatile__ (
- "1: prefetch 320(%0)\n"
- "2: movq (%0), %%mm0\n"
- " movq 8(%0), %%mm1\n"
- " movq 16(%0), %%mm2\n"
- " movq 24(%0), %%mm3\n"
- " movq %%mm0, (%1)\n"
- " movq %%mm1, 8(%1)\n"
- " movq %%mm2, 16(%1)\n"
- " movq %%mm3, 24(%1)\n"
- " movq 32(%0), %%mm0\n"
- " movq 40(%0), %%mm1\n"
- " movq 48(%0), %%mm2\n"
- " movq 56(%0), %%mm3\n"
- " movq %%mm0, 32(%1)\n"
- " movq %%mm1, 40(%1)\n"
- " movq %%mm2, 48(%1)\n"
- " movq %%mm3, 56(%1)\n"
- ".section .fixup, \"ax\"\n"
- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
- " jmp 2b\n"
- ".previous\n"
- _ASM_EXTABLE(1b, 3b)
- : : "r" (from), "r" (to) : "memory");
-
- from += 64;
- to += 64;
- }
-
- for ( ; i > 0; i--) {
- __asm__ __volatile__ (
- " movq (%0), %%mm0\n"
- " movq 8(%0), %%mm1\n"
- " movq 16(%0), %%mm2\n"
- " movq 24(%0), %%mm3\n"
- " movq %%mm0, (%1)\n"
- " movq %%mm1, 8(%1)\n"
- " movq %%mm2, 16(%1)\n"
- " movq %%mm3, 24(%1)\n"
- " movq 32(%0), %%mm0\n"
- " movq 40(%0), %%mm1\n"
- " movq 48(%0), %%mm2\n"
- " movq 56(%0), %%mm3\n"
- " movq %%mm0, 32(%1)\n"
- " movq %%mm1, 40(%1)\n"
- " movq %%mm2, 48(%1)\n"
- " movq %%mm3, 56(%1)\n"
- : : "r" (from), "r" (to) : "memory");
-
- from += 64;
- to += 64;
- }
- /*
- * Now do the tail of the block:
- */
- __memcpy(to, from, len & 63);
- kernel_fpu_end();
-
- return p;
-}
-EXPORT_SYMBOL(_mmx_memcpy);
-
-#ifdef CONFIG_MK7
-
-/*
- * The K7 has streaming cache bypass load/store. The Cyrix III, K6 and
- * other MMX using processors do not.
- */
-
-static void fast_clear_page(void *page)
-{
- int i;
-
- kernel_fpu_begin_mask(KFPU_387);
-
- __asm__ __volatile__ (
- " pxor %%mm0, %%mm0\n" : :
- );
-
- for (i = 0; i < 4096/64; i++) {
- __asm__ __volatile__ (
- " movntq %%mm0, (%0)\n"
- " movntq %%mm0, 8(%0)\n"
- " movntq %%mm0, 16(%0)\n"
- " movntq %%mm0, 24(%0)\n"
- " movntq %%mm0, 32(%0)\n"
- " movntq %%mm0, 40(%0)\n"
- " movntq %%mm0, 48(%0)\n"
- " movntq %%mm0, 56(%0)\n"
- : : "r" (page) : "memory");
- page += 64;
- }
-
- /*
- * Since movntq is weakly-ordered, a "sfence" is needed to become
- * ordered again:
- */
- __asm__ __volatile__("sfence\n"::);
-
- kernel_fpu_end();
-}
-
-static void fast_copy_page(void *to, void *from)
-{
- int i;
-
- kernel_fpu_begin_mask(KFPU_387);
-
- /*
- * maybe the prefetch stuff can go before the expensive fnsave...
- * but that is for later. -AV
- */
- __asm__ __volatile__(
- "1: prefetch (%0)\n"
- " prefetch 64(%0)\n"
- " prefetch 128(%0)\n"
- " prefetch 192(%0)\n"
- " prefetch 256(%0)\n"
- "2: \n"
- ".section .fixup, \"ax\"\n"
- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
- " jmp 2b\n"
- ".previous\n"
- _ASM_EXTABLE(1b, 3b) : : "r" (from));
-
- for (i = 0; i < (4096-320)/64; i++) {
- __asm__ __volatile__ (
- "1: prefetch 320(%0)\n"
- "2: movq (%0), %%mm0\n"
- " movntq %%mm0, (%1)\n"
- " movq 8(%0), %%mm1\n"
- " movntq %%mm1, 8(%1)\n"
- " movq 16(%0), %%mm2\n"
- " movntq %%mm2, 16(%1)\n"
- " movq 24(%0), %%mm3\n"
- " movntq %%mm3, 24(%1)\n"
- " movq 32(%0), %%mm4\n"
- " movntq %%mm4, 32(%1)\n"
- " movq 40(%0), %%mm5\n"
- " movntq %%mm5, 40(%1)\n"
- " movq 48(%0), %%mm6\n"
- " movntq %%mm6, 48(%1)\n"
- " movq 56(%0), %%mm7\n"
- " movntq %%mm7, 56(%1)\n"
- ".section .fixup, \"ax\"\n"
- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
- " jmp 2b\n"
- ".previous\n"
- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
-
- from += 64;
- to += 64;
- }
-
- for (i = (4096-320)/64; i < 4096/64; i++) {
- __asm__ __volatile__ (
- "2: movq (%0), %%mm0\n"
- " movntq %%mm0, (%1)\n"
- " movq 8(%0), %%mm1\n"
- " movntq %%mm1, 8(%1)\n"
- " movq 16(%0), %%mm2\n"
- " movntq %%mm2, 16(%1)\n"
- " movq 24(%0), %%mm3\n"
- " movntq %%mm3, 24(%1)\n"
- " movq 32(%0), %%mm4\n"
- " movntq %%mm4, 32(%1)\n"
- " movq 40(%0), %%mm5\n"
- " movntq %%mm5, 40(%1)\n"
- " movq 48(%0), %%mm6\n"
- " movntq %%mm6, 48(%1)\n"
- " movq 56(%0), %%mm7\n"
- " movntq %%mm7, 56(%1)\n"
- : : "r" (from), "r" (to) : "memory");
- from += 64;
- to += 64;
- }
- /*
- * Since movntq is weakly-ordered, a "sfence" is needed to become
- * ordered again:
- */
- __asm__ __volatile__("sfence \n"::);
- kernel_fpu_end();
-}
-
-#else /* CONFIG_MK7 */
-
-/*
- * Generic MMX implementation without K7 specific streaming
- */
-static void fast_clear_page(void *page)
-{
- int i;
-
- kernel_fpu_begin_mask(KFPU_387);
-
- __asm__ __volatile__ (
- " pxor %%mm0, %%mm0\n" : :
- );
-
- for (i = 0; i < 4096/128; i++) {
- __asm__ __volatile__ (
- " movq %%mm0, (%0)\n"
- " movq %%mm0, 8(%0)\n"
- " movq %%mm0, 16(%0)\n"
- " movq %%mm0, 24(%0)\n"
- " movq %%mm0, 32(%0)\n"
- " movq %%mm0, 40(%0)\n"
- " movq %%mm0, 48(%0)\n"
- " movq %%mm0, 56(%0)\n"
- " movq %%mm0, 64(%0)\n"
- " movq %%mm0, 72(%0)\n"
- " movq %%mm0, 80(%0)\n"
- " movq %%mm0, 88(%0)\n"
- " movq %%mm0, 96(%0)\n"
- " movq %%mm0, 104(%0)\n"
- " movq %%mm0, 112(%0)\n"
- " movq %%mm0, 120(%0)\n"
- : : "r" (page) : "memory");
- page += 128;
- }
-
- kernel_fpu_end();
-}
-
-static void fast_copy_page(void *to, void *from)
-{
- int i;
-
- kernel_fpu_begin_mask(KFPU_387);
-
- __asm__ __volatile__ (
- "1: prefetch (%0)\n"
- " prefetch 64(%0)\n"
- " prefetch 128(%0)\n"
- " prefetch 192(%0)\n"
- " prefetch 256(%0)\n"
- "2: \n"
- ".section .fixup, \"ax\"\n"
- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
- " jmp 2b\n"
- ".previous\n"
- _ASM_EXTABLE(1b, 3b) : : "r" (from));
-
- for (i = 0; i < 4096/64; i++) {
- __asm__ __volatile__ (
- "1: prefetch 320(%0)\n"
- "2: movq (%0), %%mm0\n"
- " movq 8(%0), %%mm1\n"
- " movq 16(%0), %%mm2\n"
- " movq 24(%0), %%mm3\n"
- " movq %%mm0, (%1)\n"
- " movq %%mm1, 8(%1)\n"
- " movq %%mm2, 16(%1)\n"
- " movq %%mm3, 24(%1)\n"
- " movq 32(%0), %%mm0\n"
- " movq 40(%0), %%mm1\n"
- " movq 48(%0), %%mm2\n"
- " movq 56(%0), %%mm3\n"
- " movq %%mm0, 32(%1)\n"
- " movq %%mm1, 40(%1)\n"
- " movq %%mm2, 48(%1)\n"
- " movq %%mm3, 56(%1)\n"
- ".section .fixup, \"ax\"\n"
- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
- " jmp 2b\n"
- ".previous\n"
- _ASM_EXTABLE(1b, 3b)
- : : "r" (from), "r" (to) : "memory");
-
- from += 64;
- to += 64;
- }
- kernel_fpu_end();
-}
-
-#endif /* !CONFIG_MK7 */
-
-/*
- * Favour MMX for page clear and copy:
- */
-static void slow_zero_page(void *page)
-{
- int d0, d1;
-
- __asm__ __volatile__(
- "cld\n\t"
- "rep ; stosl"
-
- : "=&c" (d0), "=&D" (d1)
- :"a" (0), "1" (page), "0" (1024)
- :"memory");
-}
-
-void mmx_clear_page(void *page)
-{
- if (unlikely(in_interrupt()))
- slow_zero_page(page);
- else
- fast_clear_page(page);
-}
-EXPORT_SYMBOL(mmx_clear_page);
-
-static void slow_copy_page(void *to, void *from)
-{
- int d0, d1, d2;
-
- __asm__ __volatile__(
- "cld\n\t"
- "rep ; movsl"
- : "=&c" (d0), "=&D" (d1), "=&S" (d2)
- : "0" (1024), "1" ((long) to), "2" ((long) from)
- : "memory");
-}
-
-void mmx_copy_page(void *to, void *from)
-{
- if (unlikely(in_interrupt()))
- slow_copy_page(to, from);
- else
- fast_copy_page(to, from);
-}
-EXPORT_SYMBOL(mmx_copy_page);
diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
index a2b9caa5274c..ebd259f31496 100644
--- a/arch/x86/lib/msr-reg.S
+++ b/arch/x86/lib/msr-reg.S
@@ -35,7 +35,7 @@ SYM_FUNC_START(\op\()_safe_regs)
movl %edi, 28(%r10)
popq %r12
popq %rbx
- ret
+ RET
3:
movl $-EIO, %r11d
jmp 2b
@@ -77,7 +77,7 @@ SYM_FUNC_START(\op\()_safe_regs)
popl %esi
popl %ebp
popl %ebx
- ret
+ RET
3:
movl $-EIO, 4(%esp)
jmp 2b
diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
index 0ea344c5ea43..ecb2049c1273 100644
--- a/arch/x86/lib/putuser.S
+++ b/arch/x86/lib/putuser.S
@@ -52,7 +52,7 @@ SYM_INNER_LABEL(__put_user_nocheck_1, SYM_L_GLOBAL)
1: movb %al,(%_ASM_CX)
xor %ecx,%ecx
ASM_CLAC
- ret
+ RET
SYM_FUNC_END(__put_user_1)
EXPORT_SYMBOL(__put_user_1)
EXPORT_SYMBOL(__put_user_nocheck_1)
@@ -66,7 +66,7 @@ SYM_INNER_LABEL(__put_user_nocheck_2, SYM_L_GLOBAL)
2: movw %ax,(%_ASM_CX)
xor %ecx,%ecx
ASM_CLAC
- ret
+ RET
SYM_FUNC_END(__put_user_2)
EXPORT_SYMBOL(__put_user_2)
EXPORT_SYMBOL(__put_user_nocheck_2)
@@ -80,7 +80,7 @@ SYM_INNER_LABEL(__put_user_nocheck_4, SYM_L_GLOBAL)
3: movl %eax,(%_ASM_CX)
xor %ecx,%ecx
ASM_CLAC
- ret
+ RET
SYM_FUNC_END(__put_user_4)
EXPORT_SYMBOL(__put_user_4)
EXPORT_SYMBOL(__put_user_nocheck_4)
diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S
index cf0b39f97adc..89b3fb244e15 100644
--- a/arch/x86/lib/retpoline.S
+++ b/arch/x86/lib/retpoline.S
@@ -23,7 +23,7 @@
.Ldo_rop_\@:
mov %\reg, (%_ASM_SP)
UNWIND_HINT_FUNC
- ret
+ RET
.endm
.macro THUNK reg
@@ -34,7 +34,7 @@ SYM_INNER_LABEL(__x86_indirect_thunk_\reg, SYM_L_GLOBAL)
ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; jmp *%\reg), \
__stringify(RETPOLINE \reg), X86_FEATURE_RETPOLINE, \
- __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; jmp *%\reg), X86_FEATURE_RETPOLINE_AMD
+ __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; jmp *%\reg; int3), X86_FEATURE_RETPOLINE_AMD
.endm
diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
index 7d290777246d..422257c350c6 100644
--- a/arch/x86/lib/usercopy_32.c
+++ b/arch/x86/lib/usercopy_32.c
@@ -8,7 +8,6 @@
*/
#include <linux/export.h>
#include <linux/uaccess.h>
-#include <asm/mmx.h>
#include <asm/asm.h>
#ifdef CONFIG_X86_INTEL_USERCOPY
@@ -43,11 +42,7 @@ do { \
" movl %2,%0\n" \
"1: rep; stosb\n" \
"2: " ASM_CLAC "\n" \
- ".section .fixup,\"ax\"\n" \
- "3: lea 0(%2,%0,4),%0\n" \
- " jmp 2b\n" \
- ".previous\n" \
- _ASM_EXTABLE_UA(0b, 3b) \
+ _ASM_EXTABLE_TYPE_REG(0b, 2b, EX_TYPE_UCOPY_LEN4, %2) \
_ASM_EXTABLE_UA(1b, 2b) \
: "=&c"(size), "=&D" (__d0) \
: "r"(size & 3), "0"(size / 4), "1"(addr), "a"(0)); \
@@ -149,10 +144,6 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
"36: movl %%eax, %0\n"
"37: rep; movsb\n"
"100:\n"
- ".section .fixup,\"ax\"\n"
- "101: lea 0(%%eax,%0,4),%0\n"
- " jmp 100b\n"
- ".previous\n"
_ASM_EXTABLE_UA(1b, 100b)
_ASM_EXTABLE_UA(2b, 100b)
_ASM_EXTABLE_UA(3b, 100b)
@@ -190,7 +181,7 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
_ASM_EXTABLE_UA(35b, 100b)
_ASM_EXTABLE_UA(36b, 100b)
_ASM_EXTABLE_UA(37b, 100b)
- _ASM_EXTABLE_UA(99b, 101b)
+ _ASM_EXTABLE_TYPE_REG(99b, 100b, EX_TYPE_UCOPY_LEN4, %%eax)
: "=&c"(size), "=&D" (d0), "=&S" (d1)
: "1"(to), "2"(from), "0"(size)
: "eax", "edx", "memory");
@@ -255,30 +246,26 @@ static unsigned long __copy_user_intel_nocache(void *to,
" movl %%eax,%0\n"
"7: rep; movsb\n"
"8:\n"
- ".section .fixup,\"ax\"\n"
- "9: lea 0(%%eax,%0,4),%0\n"
- "16: jmp 8b\n"
- ".previous\n"
- _ASM_EXTABLE_UA(0b, 16b)
- _ASM_EXTABLE_UA(1b, 16b)
- _ASM_EXTABLE_UA(2b, 16b)
- _ASM_EXTABLE_UA(21b, 16b)
- _ASM_EXTABLE_UA(3b, 16b)
- _ASM_EXTABLE_UA(31b, 16b)
- _ASM_EXTABLE_UA(4b, 16b)
- _ASM_EXTABLE_UA(41b, 16b)
- _ASM_EXTABLE_UA(10b, 16b)
- _ASM_EXTABLE_UA(51b, 16b)
- _ASM_EXTABLE_UA(11b, 16b)
- _ASM_EXTABLE_UA(61b, 16b)
- _ASM_EXTABLE_UA(12b, 16b)
- _ASM_EXTABLE_UA(71b, 16b)
- _ASM_EXTABLE_UA(13b, 16b)
- _ASM_EXTABLE_UA(81b, 16b)
- _ASM_EXTABLE_UA(14b, 16b)
- _ASM_EXTABLE_UA(91b, 16b)
- _ASM_EXTABLE_UA(6b, 9b)
- _ASM_EXTABLE_UA(7b, 16b)
+ _ASM_EXTABLE_UA(0b, 8b)
+ _ASM_EXTABLE_UA(1b, 8b)
+ _ASM_EXTABLE_UA(2b, 8b)
+ _ASM_EXTABLE_UA(21b, 8b)
+ _ASM_EXTABLE_UA(3b, 8b)
+ _ASM_EXTABLE_UA(31b, 8b)
+ _ASM_EXTABLE_UA(4b, 8b)
+ _ASM_EXTABLE_UA(41b, 8b)
+ _ASM_EXTABLE_UA(10b, 8b)
+ _ASM_EXTABLE_UA(51b, 8b)
+ _ASM_EXTABLE_UA(11b, 8b)
+ _ASM_EXTABLE_UA(61b, 8b)
+ _ASM_EXTABLE_UA(12b, 8b)
+ _ASM_EXTABLE_UA(71b, 8b)
+ _ASM_EXTABLE_UA(13b, 8b)
+ _ASM_EXTABLE_UA(81b, 8b)
+ _ASM_EXTABLE_UA(14b, 8b)
+ _ASM_EXTABLE_UA(91b, 8b)
+ _ASM_EXTABLE_TYPE_REG(6b, 8b, EX_TYPE_UCOPY_LEN4, %%eax)
+ _ASM_EXTABLE_UA(7b, 8b)
: "=&c"(size), "=&D" (d0), "=&S" (d1)
: "1"(to), "2"(from), "0"(size)
: "eax", "edx", "memory");
@@ -315,14 +302,8 @@ do { \
" movl %3,%0\n" \
"1: rep; movsb\n" \
"2:\n" \
- ".section .fixup,\"ax\"\n" \
- "5: addl %3,%0\n" \
- " jmp 2b\n" \
- "3: lea 0(%3,%0,4),%0\n" \
- " jmp 2b\n" \
- ".previous\n" \
- _ASM_EXTABLE_UA(4b, 5b) \
- _ASM_EXTABLE_UA(0b, 3b) \
+ _ASM_EXTABLE_TYPE_REG(4b, 2b, EX_TYPE_UCOPY_LEN1, %3) \
+ _ASM_EXTABLE_TYPE_REG(0b, 2b, EX_TYPE_UCOPY_LEN4, %3) \
_ASM_EXTABLE_UA(1b, 2b) \
: "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2) \
: "3"(size), "0"(size), "1"(to), "2"(from) \
diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
index 508c81e97ab1..0402a749f3a0 100644
--- a/arch/x86/lib/usercopy_64.c
+++ b/arch/x86/lib/usercopy_64.c
@@ -35,12 +35,10 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
" incq %[dst]\n"
" decl %%ecx ; jnz 1b\n"
"2:\n"
- ".section .fixup,\"ax\"\n"
- "3: lea 0(%[size1],%[size8],8),%[size8]\n"
- " jmp 2b\n"
- ".previous\n"
- _ASM_EXTABLE_UA(0b, 3b)
+
+ _ASM_EXTABLE_TYPE_REG(0b, 2b, EX_TYPE_UCOPY_LEN8, %[size1])
_ASM_EXTABLE_UA(1b, 2b)
+
: [size8] "=&c"(size), [dst] "=&D" (__d0)
: [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr));
clac();
diff --git a/arch/x86/math-emu/div_Xsig.S b/arch/x86/math-emu/div_Xsig.S
index 951da2ad54bb..8c270ab415be 100644
--- a/arch/x86/math-emu/div_Xsig.S
+++ b/arch/x86/math-emu/div_Xsig.S
@@ -341,7 +341,7 @@ L_exit:
popl %esi
leave
- ret
+ RET
#ifdef PARANOID
diff --git a/arch/x86/math-emu/div_small.S b/arch/x86/math-emu/div_small.S
index d047d1816abe..637439bfefa4 100644
--- a/arch/x86/math-emu/div_small.S
+++ b/arch/x86/math-emu/div_small.S
@@ -44,5 +44,5 @@ SYM_FUNC_START(FPU_div_small)
popl %esi
leave
- ret
+ RET
SYM_FUNC_END(FPU_div_small)
diff --git a/arch/x86/math-emu/mul_Xsig.S b/arch/x86/math-emu/mul_Xsig.S
index 4afc7b1fa6e9..54a031b66142 100644
--- a/arch/x86/math-emu/mul_Xsig.S
+++ b/arch/x86/math-emu/mul_Xsig.S
@@ -62,7 +62,7 @@ SYM_FUNC_START(mul32_Xsig)
popl %esi
leave
- ret
+ RET
SYM_FUNC_END(mul32_Xsig)
@@ -115,7 +115,7 @@ SYM_FUNC_START(mul64_Xsig)
popl %esi
leave
- ret
+ RET
SYM_FUNC_END(mul64_Xsig)
@@ -175,5 +175,5 @@ SYM_FUNC_START(mul_Xsig_Xsig)
popl %esi
leave
- ret
+ RET
SYM_FUNC_END(mul_Xsig_Xsig)
diff --git a/arch/x86/math-emu/polynom_Xsig.S b/arch/x86/math-emu/polynom_Xsig.S
index 702315eecb86..35fd723fc0df 100644
--- a/arch/x86/math-emu/polynom_Xsig.S
+++ b/arch/x86/math-emu/polynom_Xsig.S
@@ -133,5 +133,5 @@ L_accum_done:
popl %edi
popl %esi
leave
- ret
+ RET
SYM_FUNC_END(polynomial_Xsig)
diff --git a/arch/x86/math-emu/reg_norm.S b/arch/x86/math-emu/reg_norm.S
index cad1d60b1e84..594936eeed67 100644
--- a/arch/x86/math-emu/reg_norm.S
+++ b/arch/x86/math-emu/reg_norm.S
@@ -72,7 +72,7 @@ L_exit_valid:
L_exit:
popl %ebx
leave
- ret
+ RET
L_zero:
@@ -138,7 +138,7 @@ L_exit_nuo_valid:
popl %ebx
leave
- ret
+ RET
L_exit_nuo_zero:
movl TAG_Zero,%eax
@@ -146,5 +146,5 @@ L_exit_nuo_zero:
popl %ebx
leave
- ret
+ RET
SYM_FUNC_END(FPU_normalize_nuo)
diff --git a/arch/x86/math-emu/reg_round.S b/arch/x86/math-emu/reg_round.S
index 4a9fc3cc5a4d..0bb2a092161a 100644
--- a/arch/x86/math-emu/reg_round.S
+++ b/arch/x86/math-emu/reg_round.S
@@ -437,7 +437,7 @@ fpu_Arith_exit:
popl %edi
popl %esi
leave
- ret
+ RET
/*
diff --git a/arch/x86/math-emu/reg_u_add.S b/arch/x86/math-emu/reg_u_add.S
index 9c9e2c810afe..07247287a3af 100644
--- a/arch/x86/math-emu/reg_u_add.S
+++ b/arch/x86/math-emu/reg_u_add.S
@@ -164,6 +164,6 @@ L_exit:
popl %edi
popl %esi
leave
- ret
+ RET
#endif /* PARANOID */
SYM_FUNC_END(FPU_u_add)
diff --git a/arch/x86/math-emu/reg_u_div.S b/arch/x86/math-emu/reg_u_div.S
index e2fb5c2644c5..b5a41e2fc484 100644
--- a/arch/x86/math-emu/reg_u_div.S
+++ b/arch/x86/math-emu/reg_u_div.S
@@ -468,7 +468,7 @@ L_exit:
popl %esi
leave
- ret
+ RET
#endif /* PARANOID */
SYM_FUNC_END(FPU_u_div)
diff --git a/arch/x86/math-emu/reg_u_mul.S b/arch/x86/math-emu/reg_u_mul.S
index 0c779c87ac5b..e2588b24b8c2 100644
--- a/arch/x86/math-emu/reg_u_mul.S
+++ b/arch/x86/math-emu/reg_u_mul.S
@@ -144,7 +144,7 @@ L_exit:
popl %edi
popl %esi
leave
- ret
+ RET
#endif /* PARANOID */
SYM_FUNC_END(FPU_u_mul)
diff --git a/arch/x86/math-emu/reg_u_sub.S b/arch/x86/math-emu/reg_u_sub.S
index e9bb7c248649..4c900c29e4ff 100644
--- a/arch/x86/math-emu/reg_u_sub.S
+++ b/arch/x86/math-emu/reg_u_sub.S
@@ -270,5 +270,5 @@ L_exit:
popl %edi
popl %esi
leave
- ret
+ RET
SYM_FUNC_END(FPU_u_sub)
diff --git a/arch/x86/math-emu/round_Xsig.S b/arch/x86/math-emu/round_Xsig.S
index d9d7de8dbd7b..126c40473bad 100644
--- a/arch/x86/math-emu/round_Xsig.S
+++ b/arch/x86/math-emu/round_Xsig.S
@@ -78,7 +78,7 @@ L_exit:
popl %esi
popl %ebx
leave
- ret
+ RET
SYM_FUNC_END(round_Xsig)
@@ -138,5 +138,5 @@ L_n_exit:
popl %esi
popl %ebx
leave
- ret
+ RET
SYM_FUNC_END(norm_Xsig)
diff --git a/arch/x86/math-emu/shr_Xsig.S b/arch/x86/math-emu/shr_Xsig.S
index 726af985f758..f726bf6f6396 100644
--- a/arch/x86/math-emu/shr_Xsig.S
+++ b/arch/x86/math-emu/shr_Xsig.S
@@ -45,7 +45,7 @@ SYM_FUNC_START(shr_Xsig)
popl %ebx
popl %esi
leave
- ret
+ RET
L_more_than_31:
cmpl $64,%ecx
@@ -61,7 +61,7 @@ L_more_than_31:
movl $0,8(%esi)
popl %esi
leave
- ret
+ RET
L_more_than_63:
cmpl $96,%ecx
@@ -76,7 +76,7 @@ L_more_than_63:
movl %edx,8(%esi)
popl %esi
leave
- ret
+ RET
L_more_than_95:
xorl %eax,%eax
@@ -85,5 +85,5 @@ L_more_than_95:
movl %eax,8(%esi)
popl %esi
leave
- ret
+ RET
SYM_FUNC_END(shr_Xsig)
diff --git a/arch/x86/math-emu/wm_shrx.S b/arch/x86/math-emu/wm_shrx.S
index 4fc89174caf0..f608a28a4c43 100644
--- a/arch/x86/math-emu/wm_shrx.S
+++ b/arch/x86/math-emu/wm_shrx.S
@@ -55,7 +55,7 @@ SYM_FUNC_START(FPU_shrx)
popl %ebx
popl %esi
leave
- ret
+ RET
L_more_than_31:
cmpl $64,%ecx
@@ -70,7 +70,7 @@ L_more_than_31:
movl $0,4(%esi)
popl %esi
leave
- ret
+ RET
L_more_than_63:
cmpl $96,%ecx
@@ -84,7 +84,7 @@ L_more_than_63:
movl %edx,4(%esi)
popl %esi
leave
- ret
+ RET
L_more_than_95:
xorl %eax,%eax
@@ -92,7 +92,7 @@ L_more_than_95:
movl %eax,4(%esi)
popl %esi
leave
- ret
+ RET
SYM_FUNC_END(FPU_shrx)
@@ -146,7 +146,7 @@ SYM_FUNC_START(FPU_shrxs)
popl %ebx
popl %esi
leave
- ret
+ RET
/* Shift by [0..31] bits */
Ls_less_than_32:
@@ -163,7 +163,7 @@ Ls_less_than_32:
popl %ebx
popl %esi
leave
- ret
+ RET
/* Shift by [64..95] bits */
Ls_more_than_63:
@@ -189,7 +189,7 @@ Ls_more_than_63:
popl %ebx
popl %esi
leave
- ret
+ RET
Ls_more_than_95:
/* Shift by [96..inf) bits */
@@ -203,5 +203,5 @@ Ls_more_than_95:
popl %ebx
popl %esi
leave
- ret
+ RET
SYM_FUNC_END(FPU_shrxs)
diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
index 5cd2a88930a9..dba2197c05c3 100644
--- a/arch/x86/mm/extable.c
+++ b/arch/x86/mm/extable.c
@@ -2,12 +2,26 @@
#include <linux/extable.h>
#include <linux/uaccess.h>
#include <linux/sched/debug.h>
+#include <linux/bitfield.h>
#include <xen/xen.h>
#include <asm/fpu/api.h>
#include <asm/sev.h>
#include <asm/traps.h>
#include <asm/kdebug.h>
+#include <asm/insn-eval.h>
+#include <asm/sgx.h>
+
+static inline unsigned long *pt_regs_nr(struct pt_regs *regs, int nr)
+{
+ int reg_offset = pt_regs_offset(regs, nr);
+ static unsigned long __dummy;
+
+ if (WARN_ON_ONCE(reg_offset < 0))
+ return &__dummy;
+
+ return (unsigned long *)((unsigned long)regs + reg_offset);
+}
static inline unsigned long
ex_fixup_addr(const struct exception_table_entry *x)
@@ -15,10 +29,15 @@ ex_fixup_addr(const struct exception_table_entry *x)
return (unsigned long)&x->fixup + x->fixup;
}
-static bool ex_handler_default(const struct exception_table_entry *fixup,
+static bool ex_handler_default(const struct exception_table_entry *e,
struct pt_regs *regs)
{
- regs->ip = ex_fixup_addr(fixup);
+ if (e->data & EX_FLAG_CLEAR_AX)
+ regs->ax = 0;
+ if (e->data & EX_FLAG_CLEAR_DX)
+ regs->dx = 0;
+
+ regs->ip = ex_fixup_addr(e);
return true;
}
@@ -29,6 +48,13 @@ static bool ex_handler_fault(const struct exception_table_entry *fixup,
return ex_handler_default(fixup, regs);
}
+static bool ex_handler_sgx(const struct exception_table_entry *fixup,
+ struct pt_regs *regs, int trapnr)
+{
+ regs->ax = trapnr | SGX_ENCLS_FAULT_FLAG;
+ return ex_handler_default(fixup, regs);
+}
+
/*
* Handler for when we fail to restore a task's FPU state. We should never get
* here because the FPU state of a task using the FPU (task->thread.fpu.state)
@@ -65,28 +91,29 @@ static bool ex_handler_copy(const struct exception_table_entry *fixup,
return ex_handler_fault(fixup, regs, trapnr);
}
-static bool ex_handler_rdmsr_unsafe(const struct exception_table_entry *fixup,
- struct pt_regs *regs)
+static bool ex_handler_msr(const struct exception_table_entry *fixup,
+ struct pt_regs *regs, bool wrmsr, bool safe, int reg)
{
- if (pr_warn_once("unchecked MSR access error: RDMSR from 0x%x at rIP: 0x%lx (%pS)\n",
+ if (!safe && wrmsr &&
+ pr_warn_once("unchecked MSR access error: WRMSR to 0x%x (tried to write 0x%08x%08x) at rIP: 0x%lx (%pS)\n",
+ (unsigned int)regs->cx, (unsigned int)regs->dx,
+ (unsigned int)regs->ax, regs->ip, (void *)regs->ip))
+ show_stack_regs(regs);
+
+ if (!safe && !wrmsr &&
+ pr_warn_once("unchecked MSR access error: RDMSR from 0x%x at rIP: 0x%lx (%pS)\n",
(unsigned int)regs->cx, regs->ip, (void *)regs->ip))
show_stack_regs(regs);
- /* Pretend that the read succeeded and returned 0. */
- regs->ax = 0;
- regs->dx = 0;
- return ex_handler_default(fixup, regs);
-}
+ if (!wrmsr) {
+ /* Pretend that the read succeeded and returned 0. */
+ regs->ax = 0;
+ regs->dx = 0;
+ }
-static bool ex_handler_wrmsr_unsafe(const struct exception_table_entry *fixup,
- struct pt_regs *regs)
-{
- if (pr_warn_once("unchecked MSR access error: WRMSR to 0x%x (tried to write 0x%08x%08x) at rIP: 0x%lx (%pS)\n",
- (unsigned int)regs->cx, (unsigned int)regs->dx,
- (unsigned int)regs->ax, regs->ip, (void *)regs->ip))
- show_stack_regs(regs);
+ if (safe)
+ *pt_regs_nr(regs, reg) = -EIO;
- /* Pretend that the write succeeded. */
return ex_handler_default(fixup, regs);
}
@@ -99,17 +126,32 @@ static bool ex_handler_clear_fs(const struct exception_table_entry *fixup,
return ex_handler_default(fixup, regs);
}
+static bool ex_handler_imm_reg(const struct exception_table_entry *fixup,
+ struct pt_regs *regs, int reg, int imm)
+{
+ *pt_regs_nr(regs, reg) = (long)imm;
+ return ex_handler_default(fixup, regs);
+}
+
+static bool ex_handler_ucopy_len(const struct exception_table_entry *fixup,
+ struct pt_regs *regs, int trapnr, int reg, int imm)
+{
+ regs->cx = imm * regs->cx + *pt_regs_nr(regs, reg);
+ return ex_handler_uaccess(fixup, regs, trapnr);
+}
+
int ex_get_fixup_type(unsigned long ip)
{
const struct exception_table_entry *e = search_exception_tables(ip);
- return e ? e->type : EX_TYPE_NONE;
+ return e ? FIELD_GET(EX_DATA_TYPE_MASK, e->data) : EX_TYPE_NONE;
}
int fixup_exception(struct pt_regs *regs, int trapnr, unsigned long error_code,
unsigned long fault_addr)
{
const struct exception_table_entry *e;
+ int type, reg, imm;
#ifdef CONFIG_PNPBIOS
if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
@@ -129,7 +171,11 @@ int fixup_exception(struct pt_regs *regs, int trapnr, unsigned long error_code,
if (!e)
return 0;
- switch (e->type) {
+ type = FIELD_GET(EX_DATA_TYPE_MASK, e->data);
+ reg = FIELD_GET(EX_DATA_REG_MASK, e->data);
+ imm = FIELD_GET(EX_DATA_IMM_MASK, e->data);
+
+ switch (type) {
case EX_TYPE_DEFAULT:
case EX_TYPE_DEFAULT_MCE_SAFE:
return ex_handler_default(e, regs);
@@ -144,18 +190,31 @@ int fixup_exception(struct pt_regs *regs, int trapnr, unsigned long error_code,
return ex_handler_clear_fs(e, regs);
case EX_TYPE_FPU_RESTORE:
return ex_handler_fprestore(e, regs);
- case EX_TYPE_RDMSR:
- return ex_handler_rdmsr_unsafe(e, regs);
- case EX_TYPE_WRMSR:
- return ex_handler_wrmsr_unsafe(e, regs);
case EX_TYPE_BPF:
return ex_handler_bpf(e, regs);
- case EX_TYPE_RDMSR_IN_MCE:
- ex_handler_msr_mce(regs, false);
- break;
+ case EX_TYPE_WRMSR:
+ return ex_handler_msr(e, regs, true, false, reg);
+ case EX_TYPE_RDMSR:
+ return ex_handler_msr(e, regs, false, false, reg);
+ case EX_TYPE_WRMSR_SAFE:
+ return ex_handler_msr(e, regs, true, true, reg);
+ case EX_TYPE_RDMSR_SAFE:
+ return ex_handler_msr(e, regs, false, true, reg);
case EX_TYPE_WRMSR_IN_MCE:
ex_handler_msr_mce(regs, true);
break;
+ case EX_TYPE_RDMSR_IN_MCE:
+ ex_handler_msr_mce(regs, false);
+ break;
+ case EX_TYPE_POP_REG:
+ regs->sp += sizeof(long);
+ fallthrough;
+ case EX_TYPE_IMM_REG:
+ return ex_handler_imm_reg(e, regs, reg, imm);
+ case EX_TYPE_FAULT_SGX:
+ return ex_handler_sgx(e, regs, trapnr);
+ case EX_TYPE_UCOPY_LEN:
+ return ex_handler_ucopy_len(e, regs, trapnr, reg, imm);
}
BUG();
}
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 4bfed53e210e..d0074c6ed31a 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -1413,8 +1413,7 @@ good_area:
* and if there is a fatal signal pending there is no guarantee
* that we made any progress. Handle this case first.
*/
- if (unlikely((fault & VM_FAULT_RETRY) &&
- (flags & FAULT_FLAG_ALLOW_RETRY))) {
+ if (unlikely(fault & VM_FAULT_RETRY)) {
flags |= FAULT_FLAG_TRIED;
goto retry;
}
diff --git a/arch/x86/mm/mem_encrypt_boot.S b/arch/x86/mm/mem_encrypt_boot.S
index 17d292b7072f..3d1dba05fce4 100644
--- a/arch/x86/mm/mem_encrypt_boot.S
+++ b/arch/x86/mm/mem_encrypt_boot.S
@@ -65,7 +65,7 @@ SYM_FUNC_START(sme_encrypt_execute)
movq %rbp, %rsp /* Restore original stack pointer */
pop %rbp
- ret
+ RET
SYM_FUNC_END(sme_encrypt_execute)
SYM_FUNC_START(__enc_copy)
@@ -151,6 +151,6 @@ SYM_FUNC_START(__enc_copy)
pop %r12
pop %r15
- ret
+ RET
.L__enc_copy_end:
SYM_FUNC_END(__enc_copy)
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index ce1f86f245c9..2b1e266ff95c 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -1326,7 +1326,7 @@ st: if (is_imm8(insn->off))
}
ex->insn = delta;
- ex->type = EX_TYPE_BPF;
+ ex->data = EX_TYPE_BPF;
if (dst_reg > BPF_REG_9) {
pr_err("verifier error\n");
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
index 948656069cdd..052f1d78a562 100644
--- a/arch/x86/pci/acpi.c
+++ b/arch/x86/pci/acpi.c
@@ -20,7 +20,7 @@ struct pci_root_info {
};
static bool pci_use_crs = true;
-static bool pci_ignore_seg = false;
+static bool pci_ignore_seg;
static int __init set_use_crs(const struct dmi_system_id *id)
{
diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
index 2edd86649468..615a76d70019 100644
--- a/arch/x86/pci/fixup.c
+++ b/arch/x86/pci/fixup.c
@@ -353,8 +353,8 @@ static void pci_fixup_video(struct pci_dev *pdev)
}
}
}
-DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID,
- PCI_CLASS_DISPLAY_VGA, 8, pci_fixup_video);
+DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_ANY_ID, PCI_ANY_ID,
+ PCI_CLASS_DISPLAY_VGA, 8, pci_fixup_video);
static const struct dmi_system_id msi_k8t_dmi_table[] = {
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
index 12da00558631..9bb1e2941179 100644
--- a/arch/x86/pci/xen.c
+++ b/arch/x86/pci/xen.c
@@ -184,7 +184,7 @@ static int xen_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
if (ret)
goto error;
i = 0;
- for_each_pci_msi_entry(msidesc, dev) {
+ msi_for_each_desc(msidesc, &dev->dev, MSI_DESC_NOTASSOCIATED) {
irq = xen_bind_pirq_msi_to_irq(dev, msidesc, v[i],
(type == PCI_CAP_ID_MSI) ? nvec : 1,
(type == PCI_CAP_ID_MSIX) ?
@@ -235,7 +235,7 @@ static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
if (type == PCI_CAP_ID_MSI && nvec > 1)
return 1;
- for_each_pci_msi_entry(msidesc, dev) {
+ msi_for_each_desc(msidesc, &dev->dev, MSI_DESC_NOTASSOCIATED) {
pirq = xen_allocate_pirq_msi(dev, msidesc);
if (pirq < 0) {
irq = -ENODEV;
@@ -270,7 +270,7 @@ static int xen_initdom_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
int ret = 0;
struct msi_desc *msidesc;
- for_each_pci_msi_entry(msidesc, dev) {
+ msi_for_each_desc(msidesc, &dev->dev, MSI_DESC_NOTASSOCIATED) {
struct physdev_map_pirq map_irq;
domid_t domid;
@@ -306,7 +306,7 @@ static int xen_initdom_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
return -EINVAL;
map_irq.table_base = pci_resource_start(dev, bir);
- map_irq.entry_nr = msidesc->msi_attrib.entry_nr;
+ map_irq.entry_nr = msidesc->msi_index;
}
ret = -EINVAL;
@@ -351,10 +351,13 @@ out:
return ret;
}
-static void xen_initdom_restore_msi_irqs(struct pci_dev *dev)
+bool xen_initdom_restore_msi(struct pci_dev *dev)
{
int ret = 0;
+ if (!xen_initial_domain())
+ return true;
+
if (pci_seg_supported) {
struct physdev_pci_device restore_ext;
@@ -375,10 +378,10 @@ static void xen_initdom_restore_msi_irqs(struct pci_dev *dev)
ret = HYPERVISOR_physdev_op(PHYSDEVOP_restore_msi, &restore);
WARN(ret && ret != -ENOSYS, "restore_msi -> %d\n", ret);
}
+ return false;
}
#else /* CONFIG_XEN_PV_DOM0 */
#define xen_initdom_setup_msi_irqs NULL
-#define xen_initdom_restore_msi_irqs NULL
#endif /* !CONFIG_XEN_PV_DOM0 */
static void xen_teardown_msi_irqs(struct pci_dev *dev)
@@ -386,19 +389,15 @@ static void xen_teardown_msi_irqs(struct pci_dev *dev)
struct msi_desc *msidesc;
int i;
- for_each_pci_msi_entry(msidesc, dev) {
- if (msidesc->irq) {
- for (i = 0; i < msidesc->nvec_used; i++)
- xen_destroy_irq(msidesc->irq + i);
- }
+ msi_for_each_desc(msidesc, &dev->dev, MSI_DESC_ASSOCIATED) {
+ for (i = 0; i < msidesc->nvec_used; i++)
+ xen_destroy_irq(msidesc->irq + i);
}
}
static void xen_pv_teardown_msi_irqs(struct pci_dev *dev)
{
- struct msi_desc *msidesc = first_pci_msi_entry(dev);
-
- if (msidesc->msi_attrib.is_msix)
+ if (dev->msix_enabled)
xen_pci_frontend_disable_msix(dev);
else
xen_pci_frontend_disable_msi(dev);
@@ -414,10 +413,7 @@ static int xen_msi_domain_alloc_irqs(struct irq_domain *domain,
if (WARN_ON_ONCE(!dev_is_pci(dev)))
return -EINVAL;
- if (first_msi_entry(dev)->msi_attrib.is_msix)
- type = PCI_CAP_ID_MSIX;
- else
- type = PCI_CAP_ID_MSI;
+ type = to_pci_dev(dev)->msix_enabled ? PCI_CAP_ID_MSIX : PCI_CAP_ID_MSI;
return xen_msi_ops.setup_msi_irqs(to_pci_dev(dev), nvec, type);
}
@@ -466,12 +462,10 @@ static __init struct irq_domain *xen_create_pci_msi_domain(void)
static __init void xen_setup_pci_msi(void)
{
if (xen_pv_domain()) {
- if (xen_initial_domain()) {
+ if (xen_initial_domain())
xen_msi_ops.setup_msi_irqs = xen_initdom_setup_msi_irqs;
- x86_msi.restore_msi_irqs = xen_initdom_restore_msi_irqs;
- } else {
+ else
xen_msi_ops.setup_msi_irqs = xen_setup_msi_irqs;
- }
xen_msi_ops.teardown_msi_irqs = xen_pv_teardown_msi_irqs;
pci_msi_ignore_mask = 1;
} else if (xen_hvm_domain()) {
diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
index 09ec84f6ef51..f3cfdb1c9a35 100644
--- a/arch/x86/platform/efi/efi_stub_32.S
+++ b/arch/x86/platform/efi/efi_stub_32.S
@@ -56,5 +56,5 @@ SYM_FUNC_START(efi_call_svam)
movl 16(%esp), %ebx
leave
- ret
+ RET
SYM_FUNC_END(efi_call_svam)
diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
index 90380a17ab23..2206b8bc47b8 100644
--- a/arch/x86/platform/efi/efi_stub_64.S
+++ b/arch/x86/platform/efi/efi_stub_64.S
@@ -23,5 +23,5 @@ SYM_FUNC_START(__efi_call)
mov %rsi, %rcx
CALL_NOSPEC rdi
leave
- ret
+ RET
SYM_FUNC_END(__efi_call)
diff --git a/arch/x86/platform/efi/efi_thunk_64.S b/arch/x86/platform/efi/efi_thunk_64.S
index 5b7c6e09954e..25799d768624 100644
--- a/arch/x86/platform/efi/efi_thunk_64.S
+++ b/arch/x86/platform/efi/efi_thunk_64.S
@@ -73,7 +73,7 @@ SYM_CODE_START(__efi64_thunk)
1: movq 0x20(%rsp), %rsp
pop %rbx
pop %rbp
- retq
+ RET
.code32
2: pushl $__KERNEL_CS
diff --git a/arch/x86/platform/olpc/xo1-wakeup.S b/arch/x86/platform/olpc/xo1-wakeup.S
index 75f4faff8468..3a5abffe5660 100644
--- a/arch/x86/platform/olpc/xo1-wakeup.S
+++ b/arch/x86/platform/olpc/xo1-wakeup.S
@@ -77,7 +77,7 @@ save_registers:
pushfl
popl saved_context_eflags
- ret
+ RET
restore_registers:
movl saved_context_ebp, %ebp
@@ -88,7 +88,7 @@ restore_registers:
pushl saved_context_eflags
popfl
- ret
+ RET
SYM_CODE_START(do_olpc_suspend_lowlevel)
call save_processor_state
@@ -109,7 +109,7 @@ ret_point:
call restore_registers
call restore_processor_state
- ret
+ RET
SYM_CODE_END(do_olpc_suspend_lowlevel)
.data
diff --git a/arch/x86/power/hibernate_asm_32.S b/arch/x86/power/hibernate_asm_32.S
index 8786653ad3c0..5606a15cf9a1 100644
--- a/arch/x86/power/hibernate_asm_32.S
+++ b/arch/x86/power/hibernate_asm_32.S
@@ -32,7 +32,7 @@ SYM_FUNC_START(swsusp_arch_suspend)
FRAME_BEGIN
call swsusp_save
FRAME_END
- ret
+ RET
SYM_FUNC_END(swsusp_arch_suspend)
SYM_CODE_START(restore_image)
@@ -108,5 +108,5 @@ SYM_FUNC_START(restore_registers)
/* tell the hibernation core that we've just restored the memory */
movl %eax, in_suspend
- ret
+ RET
SYM_FUNC_END(restore_registers)
diff --git a/arch/x86/power/hibernate_asm_64.S b/arch/x86/power/hibernate_asm_64.S
index d9bed596d849..0a0539e1cc81 100644
--- a/arch/x86/power/hibernate_asm_64.S
+++ b/arch/x86/power/hibernate_asm_64.S
@@ -66,7 +66,7 @@ SYM_FUNC_START(restore_registers)
/* tell the hibernation core that we've just restored the memory */
movq %rax, in_suspend(%rip)
- ret
+ RET
SYM_FUNC_END(restore_registers)
SYM_FUNC_START(swsusp_arch_suspend)
@@ -96,7 +96,7 @@ SYM_FUNC_START(swsusp_arch_suspend)
FRAME_BEGIN
call swsusp_save
FRAME_END
- ret
+ RET
SYM_FUNC_END(swsusp_arch_suspend)
SYM_FUNC_START(restore_image)
diff --git a/arch/x86/um/Kconfig b/arch/x86/um/Kconfig
index 95d26a69088b..40d6a06e41c8 100644
--- a/arch/x86/um/Kconfig
+++ b/arch/x86/um/Kconfig
@@ -8,7 +8,6 @@ endmenu
config UML_X86
def_bool y
- select GENERIC_FIND_FIRST_BIT
config 64BIT
bool "64-bit kernel" if "$(SUBARCH)" = "x86"
diff --git a/arch/x86/um/checksum_32.S b/arch/x86/um/checksum_32.S
index 13f118dec74f..aed782ab7721 100644
--- a/arch/x86/um/checksum_32.S
+++ b/arch/x86/um/checksum_32.S
@@ -110,7 +110,7 @@ csum_partial:
7:
popl %ebx
popl %esi
- ret
+ RET
#else
@@ -208,7 +208,7 @@ csum_partial:
80:
popl %ebx
popl %esi
- ret
+ RET
#endif
EXPORT_SYMBOL(csum_partial)
diff --git a/arch/x86/um/setjmp_32.S b/arch/x86/um/setjmp_32.S
index 62eaf8c80e04..2d991ddbcca5 100644
--- a/arch/x86/um/setjmp_32.S
+++ b/arch/x86/um/setjmp_32.S
@@ -34,7 +34,7 @@ kernel_setjmp:
movl %esi,12(%edx)
movl %edi,16(%edx)
movl %ecx,20(%edx) # Return address
- ret
+ RET
.size kernel_setjmp,.-kernel_setjmp
diff --git a/arch/x86/um/setjmp_64.S b/arch/x86/um/setjmp_64.S
index 1b5d40d4ff46..b46acb6a8ebd 100644
--- a/arch/x86/um/setjmp_64.S
+++ b/arch/x86/um/setjmp_64.S
@@ -33,7 +33,7 @@ kernel_setjmp:
movq %r14,40(%rdi)
movq %r15,48(%rdi)
movq %rsi,56(%rdi) # Return address
- ret
+ RET
.size kernel_setjmp,.-kernel_setjmp
diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig
index 6bcd3d8ca6ac..85246dd9faa1 100644
--- a/arch/x86/xen/Kconfig
+++ b/arch/x86/xen/Kconfig
@@ -23,6 +23,7 @@ config XEN_PV
select PARAVIRT_XXL
select XEN_HAVE_PVMMU
select XEN_HAVE_VPMU
+ select GUEST_PERF_EVENTS
help
Support running as a Xen PV guest.
diff --git a/arch/x86/xen/enlighten_hvm.c b/arch/x86/xen/enlighten_hvm.c
index 42300941ec29..517a9d8d8f94 100644
--- a/arch/x86/xen/enlighten_hvm.c
+++ b/arch/x86/xen/enlighten_hvm.c
@@ -9,6 +9,7 @@
#include <xen/events.h>
#include <xen/interface/memory.h>
+#include <asm/apic.h>
#include <asm/cpu.h>
#include <asm/smp.h>
#include <asm/io_apic.h>
@@ -184,8 +185,7 @@ static int xen_cpu_dead_hvm(unsigned int cpu)
if (xen_have_vector_callback && xen_feature(XENFEAT_hvm_safe_pvclock))
xen_teardown_timer(cpu);
-
- return 0;
+ return 0;
}
static bool no_vector_callback __initdata;
@@ -242,15 +242,14 @@ static __init int xen_parse_no_vector_callback(char *arg)
}
early_param("xen_no_vector_callback", xen_parse_no_vector_callback);
-bool __init xen_hvm_need_lapic(void)
+static __init bool xen_x2apic_available(void)
{
- if (xen_pv_domain())
- return false;
- if (!xen_hvm_domain())
- return false;
- if (xen_feature(XENFEAT_hvm_pirqs) && xen_have_vector_callback)
- return false;
- return true;
+ return x2apic_supported();
+}
+
+static bool __init msi_ext_dest_id(void)
+{
+ return cpuid_eax(xen_cpuid_base() + 4) & XEN_HVM_CPUID_EXT_DEST_ID;
}
static __init void xen_hvm_guest_late_init(void)
@@ -312,9 +311,10 @@ struct hypervisor_x86 x86_hyper_xen_hvm __initdata = {
.detect = xen_platform_hvm,
.type = X86_HYPER_XEN_HVM,
.init.init_platform = xen_hvm_guest_init,
- .init.x2apic_available = xen_x2apic_para_available,
+ .init.x2apic_available = xen_x2apic_available,
.init.init_mem_mapping = xen_hvm_init_mem_mapping,
.init.guest_late_init = xen_hvm_guest_late_init,
+ .init.msi_ext_dest_id = msi_ext_dest_id,
.runtime.pin_vcpu = xen_pin_vcpu,
.ignore_nopv = true,
};
diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
index 5004feb16783..d47c3d176ae4 100644
--- a/arch/x86/xen/enlighten_pv.c
+++ b/arch/x86/xen/enlighten_pv.c
@@ -1341,10 +1341,6 @@ asmlinkage __visible void __init xen_start_kernel(void)
xen_acpi_sleep_register();
- /* Avoid searching for BIOS MP tables */
- x86_init.mpparse.find_smp_config = x86_init_noop;
- x86_init.mpparse.get_smp_config = x86_init_uint_noop;
-
xen_boot_params_init_edd();
#ifdef CONFIG_ACPI
diff --git a/arch/x86/xen/pmu.c b/arch/x86/xen/pmu.c
index e13b0b49fcdf..89dd6b1708b0 100644
--- a/arch/x86/xen/pmu.c
+++ b/arch/x86/xen/pmu.c
@@ -413,34 +413,29 @@ int pmu_apic_update(uint32_t val)
}
/* perf callbacks */
-static int xen_is_in_guest(void)
+static unsigned int xen_guest_state(void)
{
const struct xen_pmu_data *xenpmu_data = get_xenpmu_data();
+ unsigned int state = 0;
if (!xenpmu_data) {
pr_warn_once("%s: pmudata not initialized\n", __func__);
- return 0;
+ return state;
}
if (!xen_initial_domain() || (xenpmu_data->domain_id >= DOMID_SELF))
- return 0;
+ return state;
- return 1;
-}
-
-static int xen_is_user_mode(void)
-{
- const struct xen_pmu_data *xenpmu_data = get_xenpmu_data();
+ state |= PERF_GUEST_ACTIVE;
- if (!xenpmu_data) {
- pr_warn_once("%s: pmudata not initialized\n", __func__);
- return 0;
+ if (xenpmu_data->pmu.pmu_flags & PMU_SAMPLE_PV) {
+ if (xenpmu_data->pmu.pmu_flags & PMU_SAMPLE_USER)
+ state |= PERF_GUEST_USER;
+ } else if (xenpmu_data->pmu.r.regs.cpl & 3) {
+ state |= PERF_GUEST_USER;
}
- if (xenpmu_data->pmu.pmu_flags & PMU_SAMPLE_PV)
- return (xenpmu_data->pmu.pmu_flags & PMU_SAMPLE_USER);
- else
- return !!(xenpmu_data->pmu.r.regs.cpl & 3);
+ return state;
}
static unsigned long xen_get_guest_ip(void)
@@ -456,9 +451,8 @@ static unsigned long xen_get_guest_ip(void)
}
static struct perf_guest_info_callbacks xen_guest_cbs = {
- .is_in_guest = xen_is_in_guest,
- .is_user_mode = xen_is_user_mode,
- .get_guest_ip = xen_get_guest_ip,
+ .state = xen_guest_state,
+ .get_ip = xen_get_guest_ip,
};
/* Convert registers from Xen's format to Linux' */
diff --git a/arch/x86/xen/smp_pv.c b/arch/x86/xen/smp_pv.c
index 6a8f3b53ab83..4a6019238ee7 100644
--- a/arch/x86/xen/smp_pv.c
+++ b/arch/x86/xen/smp_pv.c
@@ -148,28 +148,12 @@ int xen_smp_intr_init_pv(unsigned int cpu)
return rc;
}
-static void __init xen_fill_possible_map(void)
-{
- int i, rc;
-
- if (xen_initial_domain())
- return;
-
- for (i = 0; i < nr_cpu_ids; i++) {
- rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
- if (rc >= 0) {
- num_processors++;
- set_cpu_possible(i, true);
- }
- }
-}
-
-static void __init xen_filter_cpu_maps(void)
+static void __init _get_smp_config(unsigned int early)
{
int i, rc;
unsigned int subtract = 0;
- if (!xen_initial_domain())
+ if (early)
return;
num_processors = 0;
@@ -210,7 +194,6 @@ static void __init xen_pv_smp_prepare_boot_cpu(void)
* sure the old memory can be recycled. */
make_lowmem_page_readwrite(xen_initial_gdt);
- xen_filter_cpu_maps();
xen_setup_vcpu_info_placement();
/*
@@ -476,5 +459,8 @@ static const struct smp_ops xen_smp_ops __initconst = {
void __init xen_smp_init(void)
{
smp_ops = xen_smp_ops;
- xen_fill_possible_map();
+
+ /* Avoid searching for BIOS MP tables */
+ x86_init.mpparse.find_smp_config = x86_init_noop;
+ x86_init.mpparse.get_smp_config = _get_smp_config;
}
diff --git a/arch/x86/xen/vga.c b/arch/x86/xen/vga.c
index e336f223f7f4..14ea32e734d5 100644
--- a/arch/x86/xen/vga.c
+++ b/arch/x86/xen/vga.c
@@ -57,16 +57,20 @@ void __init xen_init_vga(const struct dom0_vga_console_info *info, size_t size)
screen_info->rsvd_size = info->u.vesa_lfb.rsvd_size;
screen_info->rsvd_pos = info->u.vesa_lfb.rsvd_pos;
+ if (size >= offsetof(struct dom0_vga_console_info,
+ u.vesa_lfb.ext_lfb_base)
+ + sizeof(info->u.vesa_lfb.ext_lfb_base)
+ && info->u.vesa_lfb.ext_lfb_base) {
+ screen_info->ext_lfb_base = info->u.vesa_lfb.ext_lfb_base;
+ screen_info->capabilities |= VIDEO_CAPABILITY_64BIT_BASE;
+ }
+
if (info->video_type == XEN_VGATYPE_EFI_LFB) {
screen_info->orig_video_isVGA = VIDEO_TYPE_EFI;
break;
}
if (size >= offsetof(struct dom0_vga_console_info,
- u.vesa_lfb.gbl_caps)
- + sizeof(info->u.vesa_lfb.gbl_caps))
- screen_info->capabilities = info->u.vesa_lfb.gbl_caps;
- if (size >= offsetof(struct dom0_vga_console_info,
u.vesa_lfb.mode_attrs)
+ sizeof(info->u.vesa_lfb.mode_attrs))
screen_info->vesa_attributes = info->u.vesa_lfb.mode_attrs;
diff --git a/arch/x86/xen/xen-asm.S b/arch/x86/xen/xen-asm.S
index 444d824775f6..e730e6200e64 100644
--- a/arch/x86/xen/xen-asm.S
+++ b/arch/x86/xen/xen-asm.S
@@ -29,7 +29,7 @@
*/
SYM_FUNC_START(xen_irq_disable_direct)
movb $1, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
- ret
+ RET
SYM_FUNC_END(xen_irq_disable_direct)
/*
@@ -58,7 +58,7 @@ SYM_FUNC_START(check_events)
pop %rcx
pop %rax
FRAME_END
- ret
+ RET
SYM_FUNC_END(check_events)
/*
@@ -84,7 +84,7 @@ SYM_FUNC_START(xen_irq_enable_direct)
call check_events
1:
FRAME_END
- ret
+ RET
SYM_FUNC_END(xen_irq_enable_direct)
/*
@@ -100,7 +100,7 @@ SYM_FUNC_START(xen_save_fl_direct)
testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
setz %ah
addb %ah, %ah
- ret
+ RET
SYM_FUNC_END(xen_save_fl_direct)
SYM_FUNC_START(xen_read_cr2)
@@ -108,14 +108,14 @@ SYM_FUNC_START(xen_read_cr2)
_ASM_MOV PER_CPU_VAR(xen_vcpu), %_ASM_AX
_ASM_MOV XEN_vcpu_info_arch_cr2(%_ASM_AX), %_ASM_AX
FRAME_END
- ret
+ RET
SYM_FUNC_END(xen_read_cr2);
SYM_FUNC_START(xen_read_cr2_direct)
FRAME_BEGIN
_ASM_MOV PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_arch_cr2, %_ASM_AX
FRAME_END
- ret
+ RET
SYM_FUNC_END(xen_read_cr2_direct);
.popsection
diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
index 6a64496edefb..11d286529fe5 100644
--- a/arch/x86/xen/xen-head.S
+++ b/arch/x86/xen/xen-head.S
@@ -26,7 +26,7 @@ SYM_CODE_START(hypercall_page)
.rept (PAGE_SIZE / 32)
UNWIND_HINT_FUNC
.skip 31, 0x90
- ret
+ RET
.endr
#define HYPERCALL(n) \
diff --git a/arch/xtensa/Makefile b/arch/xtensa/Makefile
index 9778216d6e09..ee2769519eaf 100644
--- a/arch/xtensa/Makefile
+++ b/arch/xtensa/Makefile
@@ -12,7 +12,7 @@
# Core configuration.
# (Use VAR=<xtensa_config> to use another default compiler.)
-variant-y := $(patsubst "%",%,$(CONFIG_XTENSA_VARIANT_NAME))
+variant-y := $(CONFIG_XTENSA_VARIANT_NAME)
VARIANT = $(variant-y)
diff --git a/arch/xtensa/boot/dts/Makefile b/arch/xtensa/boot/dts/Makefile
index 0b8d00cdae7c..720628c0d8b9 100644
--- a/arch/xtensa/boot/dts/Makefile
+++ b/arch/xtensa/boot/dts/Makefile
@@ -7,10 +7,7 @@
#
#
-BUILTIN_DTB_SOURCE := $(patsubst "%",%,$(CONFIG_BUILTIN_DTB_SOURCE)).dtb.o
-ifneq ($(CONFIG_BUILTIN_DTB_SOURCE),"")
-obj-$(CONFIG_OF) += $(BUILTIN_DTB_SOURCE)
-endif
+obj-$(CONFIG_OF) += $(addsuffix .dtb.o, $(CONFIG_BUILTIN_DTB_SOURCE))
# for CONFIG_OF_ALL_DTBS test
dtstree := $(srctree)/$(src)
diff --git a/arch/xtensa/include/asm/bitops.h b/arch/xtensa/include/asm/bitops.h
index 3f71d364ba90..cd225896c40f 100644
--- a/arch/xtensa/include/asm/bitops.h
+++ b/arch/xtensa/include/asm/bitops.h
@@ -205,7 +205,6 @@ BIT_OPS(change, "xor", )
#undef BIT_OP
#undef TEST_AND_BIT_OP
-#include <asm-generic/bitops/find.h>
#include <asm-generic/bitops/le.h>
#include <asm-generic/bitops/ext2-atomic-setbit.h>
diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S
index 99ab3c1a3387..a1029a5b6a1d 100644
--- a/arch/xtensa/kernel/entry.S
+++ b/arch/xtensa/kernel/entry.S
@@ -1433,7 +1433,7 @@ ENTRY(fast_syscall_spill_registers)
rsync
movi abi_arg0, SIGSEGV
- abi_call do_exit
+ abi_call make_task_dead
/* shouldn't return, so panic */
diff --git a/arch/xtensa/kernel/syscalls/syscall.tbl b/arch/xtensa/kernel/syscalls/syscall.tbl
index 3e3e1a506bed..52c94ab5c205 100644
--- a/arch/xtensa/kernel/syscalls/syscall.tbl
+++ b/arch/xtensa/kernel/syscalls/syscall.tbl
@@ -420,3 +420,4 @@
# 447 reserved for memfd_secret
448 common process_mrelease sys_process_mrelease
449 common futex_waitv sys_futex_waitv
+450 common set_mempolicy_home_node sys_set_mempolicy_home_node
diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c
index 4b4dbeb2d612..9345007d474d 100644
--- a/arch/xtensa/kernel/traps.c
+++ b/arch/xtensa/kernel/traps.c
@@ -552,5 +552,5 @@ void __noreturn die(const char * str, struct pt_regs * regs, long err)
if (panic_on_oops)
panic("Fatal exception");
- do_exit(err);
+ make_task_dead(err);
}
diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c
index fd6a70635962..06d0973a0d74 100644
--- a/arch/xtensa/mm/fault.c
+++ b/arch/xtensa/mm/fault.c
@@ -127,17 +127,16 @@ good_area:
goto do_sigbus;
BUG();
}
- if (flags & FAULT_FLAG_ALLOW_RETRY) {
- if (fault & VM_FAULT_RETRY) {
- flags |= FAULT_FLAG_TRIED;
- /* No need to mmap_read_unlock(mm) as we would
- * have already released it in __lock_page_or_retry
- * in mm/filemap.c.
- */
+ if (fault & VM_FAULT_RETRY) {
+ flags |= FAULT_FLAG_TRIED;
- goto retry;
- }
+ /* No need to mmap_read_unlock(mm) as we would
+ * have already released it in __lock_page_or_retry
+ * in mm/filemap.c.
+ */
+
+ goto retry;
}
mmap_read_unlock(mm);
diff --git a/arch/xtensa/platforms/iss/simdisk.c b/arch/xtensa/platforms/iss/simdisk.c
index 07b642c1916a..8eb6ad1a3a1d 100644
--- a/arch/xtensa/platforms/iss/simdisk.c
+++ b/arch/xtensa/platforms/iss/simdisk.c
@@ -208,7 +208,7 @@ static int simdisk_detach(struct simdisk *dev)
static ssize_t proc_read_simdisk(struct file *file, char __user *buf,
size_t size, loff_t *ppos)
{
- struct simdisk *dev = PDE_DATA(file_inode(file));
+ struct simdisk *dev = pde_data(file_inode(file));
const char *s = dev->filename;
if (s) {
ssize_t n = simple_read_from_buffer(buf, size, ppos,
@@ -225,7 +225,7 @@ static ssize_t proc_write_simdisk(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
char *tmp = memdup_user_nul(buf, count);
- struct simdisk *dev = PDE_DATA(file_inode(file));
+ struct simdisk *dev = pde_data(file_inode(file));
int err;
if (IS_ERR(tmp))
diff --git a/block/Kconfig b/block/Kconfig
index c6ce41a5e5b2..d5d4197b7ed2 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -35,6 +35,9 @@ config BLK_CGROUP_RWSTAT
config BLK_DEV_BSG_COMMON
tristate
+config BLK_ICQ
+ bool
+
config BLK_DEV_BSGLIB
bool "Block layer SG support v4 helper lib"
select BLK_DEV_BSG_COMMON
diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched
index 885fee86dfca..615516146086 100644
--- a/block/Kconfig.iosched
+++ b/block/Kconfig.iosched
@@ -18,6 +18,7 @@ config MQ_IOSCHED_KYBER
config IOSCHED_BFQ
tristate "BFQ I/O scheduler"
+ select BLK_ICQ
help
BFQ I/O scheduler for BLK-MQ. BFQ distributes the bandwidth of
of the device among all processes according to their weights,
diff --git a/block/Makefile b/block/Makefile
index 44df57e562bf..f38eaa612929 100644
--- a/block/Makefile
+++ b/block/Makefile
@@ -5,7 +5,7 @@
obj-y := bdev.o fops.o bio.o elevator.o blk-core.o blk-sysfs.o \
blk-flush.o blk-settings.o blk-ioc.o blk-map.o \
- blk-exec.o blk-merge.o blk-timeout.o \
+ blk-merge.o blk-timeout.o \
blk-lib.o blk-mq.o blk-mq-tag.o blk-stat.o \
blk-mq-sysfs.o blk-mq-cpumap.o blk-mq-sched.o ioctl.o \
genhd.o ioprio.o badblocks.o partitions/ blk-rq-qos.o \
diff --git a/block/bdev.c b/block/bdev.c
index b1d087e5e205..102837a37051 100644
--- a/block/bdev.c
+++ b/block/bdev.c
@@ -24,7 +24,6 @@
#include <linux/pseudo_fs.h>
#include <linux/uio.h>
#include <linux/namei.h>
-#include <linux/cleancache.h>
#include <linux/part_stat.h>
#include <linux/uaccess.h>
#include "../fs/internal.h"
@@ -88,10 +87,6 @@ void invalidate_bdev(struct block_device *bdev)
lru_add_drain_all(); /* make sure all lru add caches are flushed */
invalidate_mapping_pages(mapping, 0, -1);
}
- /* 99% of the time, we don't need to flush the cleancache on the bdev.
- * But, for the strange corners, lets be cautious
- */
- cleancache_invalidate_inode(mapping);
}
EXPORT_SYMBOL(invalidate_bdev);
@@ -665,7 +660,7 @@ static void blkdev_flush_mapping(struct block_device *bdev)
static int blkdev_get_whole(struct block_device *bdev, fmode_t mode)
{
struct gendisk *disk = bdev->bd_disk;
- int ret = 0;
+ int ret;
if (disk->fops->open) {
ret = disk->fops->open(bdev, mode);
@@ -750,14 +745,6 @@ struct block_device *blkdev_get_no_open(dev_t dev)
if (!kobject_get_unless_zero(&bdev->bd_device.kobj))
bdev = NULL;
iput(inode);
-
- if (!bdev)
- return NULL;
- if ((bdev->bd_disk->flags & GENHD_FL_HIDDEN)) {
- put_device(&bdev->bd_device);
- return NULL;
- }
-
return bdev;
}
@@ -837,7 +824,7 @@ struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder)
* used in blkdev_get/put().
*/
if ((mode & FMODE_WRITE) && !bdev->bd_write_holder &&
- (disk->flags & GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE)) {
+ (disk->event_flags & DISK_EVENT_FLAG_BLOCK_ON_EXCL_WRITE)) {
bdev->bd_write_holder = true;
unblock_events = false;
}
@@ -963,15 +950,15 @@ void blkdev_put(struct block_device *bdev, fmode_t mode)
EXPORT_SYMBOL(blkdev_put);
/**
- * lookup_bdev - lookup a struct block_device by name
- * @pathname: special file representing the block device
- * @dev: return value of the block device's dev_t
+ * lookup_bdev() - Look up a struct block_device by name.
+ * @pathname: Name of the block device in the filesystem.
+ * @dev: Pointer to the block device's dev_t, if found.
*
* Lookup the block device's dev_t at @pathname in the current
- * namespace if possible and return it by @dev.
+ * namespace if possible and return it in @dev.
*
- * RETURNS:
- * 0 if succeeded, errno otherwise.
+ * Context: May sleep.
+ * Return: 0 if succeeded, negative errno otherwise.
*/
int lookup_bdev(const char *pathname, dev_t *dev)
{
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index fec18118dc30..36a66e97e3c2 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -433,26 +433,21 @@ static struct bfq_io_cq *icq_to_bic(struct io_cq *icq)
/**
* bfq_bic_lookup - search into @ioc a bic associated to @bfqd.
- * @bfqd: the lookup key.
- * @ioc: the io_context of the process doing I/O.
* @q: the request queue.
*/
-static struct bfq_io_cq *bfq_bic_lookup(struct bfq_data *bfqd,
- struct io_context *ioc,
- struct request_queue *q)
+static struct bfq_io_cq *bfq_bic_lookup(struct request_queue *q)
{
- if (ioc) {
- unsigned long flags;
- struct bfq_io_cq *icq;
+ struct bfq_io_cq *icq;
+ unsigned long flags;
- spin_lock_irqsave(&q->queue_lock, flags);
- icq = icq_to_bic(ioc_lookup_icq(ioc, q));
- spin_unlock_irqrestore(&q->queue_lock, flags);
+ if (!current->io_context)
+ return NULL;
- return icq;
- }
+ spin_lock_irqsave(&q->queue_lock, flags);
+ icq = icq_to_bic(ioc_lookup_icq(q));
+ spin_unlock_irqrestore(&q->queue_lock, flags);
- return NULL;
+ return icq;
}
/*
@@ -565,26 +560,134 @@ static struct request *bfq_choose_req(struct bfq_data *bfqd,
}
}
+#define BFQ_LIMIT_INLINE_DEPTH 16
+
+#ifdef CONFIG_BFQ_GROUP_IOSCHED
+static bool bfqq_request_over_limit(struct bfq_queue *bfqq, int limit)
+{
+ struct bfq_data *bfqd = bfqq->bfqd;
+ struct bfq_entity *entity = &bfqq->entity;
+ struct bfq_entity *inline_entities[BFQ_LIMIT_INLINE_DEPTH];
+ struct bfq_entity **entities = inline_entities;
+ int depth, level;
+ int class_idx = bfqq->ioprio_class - 1;
+ struct bfq_sched_data *sched_data;
+ unsigned long wsum;
+ bool ret = false;
+
+ if (!entity->on_st_or_in_serv)
+ return false;
+
+ /* +1 for bfqq entity, root cgroup not included */
+ depth = bfqg_to_blkg(bfqq_group(bfqq))->blkcg->css.cgroup->level + 1;
+ if (depth > BFQ_LIMIT_INLINE_DEPTH) {
+ entities = kmalloc_array(depth, sizeof(*entities), GFP_NOIO);
+ if (!entities)
+ return false;
+ }
+
+ spin_lock_irq(&bfqd->lock);
+ sched_data = entity->sched_data;
+ /* Gather our ancestors as we need to traverse them in reverse order */
+ level = 0;
+ for_each_entity(entity) {
+ /*
+ * If at some level entity is not even active, allow request
+ * queueing so that BFQ knows there's work to do and activate
+ * entities.
+ */
+ if (!entity->on_st_or_in_serv)
+ goto out;
+ /* Uh, more parents than cgroup subsystem thinks? */
+ if (WARN_ON_ONCE(level >= depth))
+ break;
+ entities[level++] = entity;
+ }
+ WARN_ON_ONCE(level != depth);
+ for (level--; level >= 0; level--) {
+ entity = entities[level];
+ if (level > 0) {
+ wsum = bfq_entity_service_tree(entity)->wsum;
+ } else {
+ int i;
+ /*
+ * For bfqq itself we take into account service trees
+ * of all higher priority classes and multiply their
+ * weights so that low prio queue from higher class
+ * gets more requests than high prio queue from lower
+ * class.
+ */
+ wsum = 0;
+ for (i = 0; i <= class_idx; i++) {
+ wsum = wsum * IOPRIO_BE_NR +
+ sched_data->service_tree[i].wsum;
+ }
+ }
+ limit = DIV_ROUND_CLOSEST(limit * entity->weight, wsum);
+ if (entity->allocated >= limit) {
+ bfq_log_bfqq(bfqq->bfqd, bfqq,
+ "too many requests: allocated %d limit %d level %d",
+ entity->allocated, limit, level);
+ ret = true;
+ break;
+ }
+ }
+out:
+ spin_unlock_irq(&bfqd->lock);
+ if (entities != inline_entities)
+ kfree(entities);
+ return ret;
+}
+#else
+static bool bfqq_request_over_limit(struct bfq_queue *bfqq, int limit)
+{
+ return false;
+}
+#endif
+
/*
* Async I/O can easily starve sync I/O (both sync reads and sync
* writes), by consuming all tags. Similarly, storms of sync writes,
* such as those that sync(2) may trigger, can starve sync reads.
* Limit depths of async I/O and sync writes so as to counter both
* problems.
+ *
+ * Also if a bfq queue or its parent cgroup consume more tags than would be
+ * appropriate for their weight, we trim the available tag depth to 1. This
+ * avoids a situation where one cgroup can starve another cgroup from tags and
+ * thus block service differentiation among cgroups. Note that because the
+ * queue / cgroup already has many requests allocated and queued, this does not
+ * significantly affect service guarantees coming from the BFQ scheduling
+ * algorithm.
*/
static void bfq_limit_depth(unsigned int op, struct blk_mq_alloc_data *data)
{
struct bfq_data *bfqd = data->q->elevator->elevator_data;
+ struct bfq_io_cq *bic = bfq_bic_lookup(data->q);
+ struct bfq_queue *bfqq = bic ? bic_to_bfqq(bic, op_is_sync(op)) : NULL;
+ int depth;
+ unsigned limit = data->q->nr_requests;
+
+ /* Sync reads have full depth available */
+ if (op_is_sync(op) && !op_is_write(op)) {
+ depth = 0;
+ } else {
+ depth = bfqd->word_depths[!!bfqd->wr_busy_queues][op_is_sync(op)];
+ limit = (limit * depth) >> bfqd->full_depth_shift;
+ }
- if (op_is_sync(op) && !op_is_write(op))
- return;
-
- data->shallow_depth =
- bfqd->word_depths[!!bfqd->wr_busy_queues][op_is_sync(op)];
+ /*
+ * Does queue (or any parent entity) exceed number of requests that
+ * should be available to it? Heavily limit depth so that it cannot
+ * consume more available requests and thus starve other entities.
+ */
+ if (bfqq && bfqq_request_over_limit(bfqq, limit))
+ depth = 1;
bfq_log(bfqd, "[%s] wr_busy %d sync %d depth %u",
- __func__, bfqd->wr_busy_queues, op_is_sync(op),
- data->shallow_depth);
+ __func__, bfqd->wr_busy_queues, op_is_sync(op), depth);
+ if (depth)
+ data->shallow_depth = depth;
}
static struct bfq_queue *
@@ -1113,7 +1216,8 @@ bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_data *bfqd,
static int bfqq_process_refs(struct bfq_queue *bfqq)
{
- return bfqq->ref - bfqq->allocated - bfqq->entity.on_st_or_in_serv -
+ return bfqq->ref - bfqq->entity.allocated -
+ bfqq->entity.on_st_or_in_serv -
(bfqq->weight_counter != NULL) - bfqq->stable_ref;
}
@@ -1982,20 +2086,19 @@ static void bfq_update_io_intensity(struct bfq_queue *bfqq, u64 now_ns)
* aspect, see the comments on the choice of the queue for injection
* in bfq_select_queue().
*
- * Turning back to the detection of a waker queue, a queue Q is deemed
- * as a waker queue for bfqq if, for three consecutive times, bfqq
- * happens to become non empty right after a request of Q has been
- * completed. In this respect, even if bfqq is empty, we do not check
- * for a waker if it still has some in-flight I/O. In fact, in this
- * case bfqq is actually still being served by the drive, and may
- * receive new I/O on the completion of some of the in-flight
- * requests. In particular, on the first time, Q is tentatively set as
- * a candidate waker queue, while on the third consecutive time that Q
- * is detected, the field waker_bfqq is set to Q, to confirm that Q is
- * a waker queue for bfqq. These detection steps are performed only if
- * bfqq has a long think time, so as to make it more likely that
- * bfqq's I/O is actually being blocked by a synchronization. This
- * last filter, plus the above three-times requirement, make false
+ * Turning back to the detection of a waker queue, a queue Q is deemed as a
+ * waker queue for bfqq if, for three consecutive times, bfqq happens to become
+ * non empty right after a request of Q has been completed within given
+ * timeout. In this respect, even if bfqq is empty, we do not check for a waker
+ * if it still has some in-flight I/O. In fact, in this case bfqq is actually
+ * still being served by the drive, and may receive new I/O on the completion
+ * of some of the in-flight requests. In particular, on the first time, Q is
+ * tentatively set as a candidate waker queue, while on the third consecutive
+ * time that Q is detected, the field waker_bfqq is set to Q, to confirm that Q
+ * is a waker queue for bfqq. These detection steps are performed only if bfqq
+ * has a long think time, so as to make it more likely that bfqq's I/O is
+ * actually being blocked by a synchronization. This last filter, plus the
+ * above three-times requirement and time limit for detection, make false
* positives less likely.
*
* NOTE
@@ -2019,6 +2122,8 @@ static void bfq_update_io_intensity(struct bfq_queue *bfqq, u64 now_ns)
static void bfq_check_waker(struct bfq_data *bfqd, struct bfq_queue *bfqq,
u64 now_ns)
{
+ char waker_name[MAX_BFQQ_NAME_LENGTH];
+
if (!bfqd->last_completed_rq_bfqq ||
bfqd->last_completed_rq_bfqq == bfqq ||
bfq_bfqq_has_short_ttime(bfqq) ||
@@ -2027,8 +2132,16 @@ static void bfq_check_waker(struct bfq_data *bfqd, struct bfq_queue *bfqq,
bfqd->last_completed_rq_bfqq == bfqq->waker_bfqq)
return;
+ /*
+ * We reset waker detection logic also if too much time has passed
+ * since the first detection. If wakeups are rare, pointless idling
+ * doesn't hurt throughput that much. The condition below makes sure
+ * we do not uselessly idle blocking waker in more than 1/64 cases.
+ */
if (bfqd->last_completed_rq_bfqq !=
- bfqq->tentative_waker_bfqq) {
+ bfqq->tentative_waker_bfqq ||
+ now_ns > bfqq->waker_detection_started +
+ 128 * (u64)bfqd->bfq_slice_idle) {
/*
* First synchronization detected with a
* candidate waker queue, or with a different
@@ -2037,12 +2150,19 @@ static void bfq_check_waker(struct bfq_data *bfqd, struct bfq_queue *bfqq,
bfqq->tentative_waker_bfqq =
bfqd->last_completed_rq_bfqq;
bfqq->num_waker_detections = 1;
+ bfqq->waker_detection_started = now_ns;
+ bfq_bfqq_name(bfqq->tentative_waker_bfqq, waker_name,
+ MAX_BFQQ_NAME_LENGTH);
+ bfq_log_bfqq(bfqd, bfqq, "set tenative waker %s", waker_name);
} else /* Same tentative waker queue detected again */
bfqq->num_waker_detections++;
if (bfqq->num_waker_detections == 3) {
bfqq->waker_bfqq = bfqd->last_completed_rq_bfqq;
bfqq->tentative_waker_bfqq = NULL;
+ bfq_bfqq_name(bfqq->waker_bfqq, waker_name,
+ MAX_BFQQ_NAME_LENGTH);
+ bfq_log_bfqq(bfqd, bfqq, "set waker %s", waker_name);
/*
* If the waker queue disappears, then
@@ -2332,7 +2452,7 @@ static bool bfq_bio_merge(struct request_queue *q, struct bio *bio,
* returned by bfq_bic_lookup does not go away before
* bfqd->lock is taken.
*/
- struct bfq_io_cq *bic = bfq_bic_lookup(bfqd, current->io_context, q);
+ struct bfq_io_cq *bic = bfq_bic_lookup(q);
bool ret;
spin_lock_irq(&bfqd->lock);
@@ -5878,6 +5998,22 @@ static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq,
}
}
+static void bfqq_request_allocated(struct bfq_queue *bfqq)
+{
+ struct bfq_entity *entity = &bfqq->entity;
+
+ for_each_entity(entity)
+ entity->allocated++;
+}
+
+static void bfqq_request_freed(struct bfq_queue *bfqq)
+{
+ struct bfq_entity *entity = &bfqq->entity;
+
+ for_each_entity(entity)
+ entity->allocated--;
+}
+
/* returns true if it causes the idle timer to be disabled */
static bool __bfq_insert_request(struct bfq_data *bfqd, struct request *rq)
{
@@ -5891,8 +6027,8 @@ static bool __bfq_insert_request(struct bfq_data *bfqd, struct request *rq)
* Release the request's reference to the old bfqq
* and make sure one is taken to the shared queue.
*/
- new_bfqq->allocated++;
- bfqq->allocated--;
+ bfqq_request_allocated(new_bfqq);
+ bfqq_request_freed(bfqq);
new_bfqq->ref++;
/*
* If the bic associated with the process
@@ -5991,48 +6127,7 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
spin_lock_irq(&bfqd->lock);
bfqq = bfq_init_rq(rq);
-
- /*
- * Reqs with at_head or passthrough flags set are to be put
- * directly into dispatch list. Additional case for putting rq
- * directly into the dispatch queue: the only active
- * bfq_queues are bfqq and either its waker bfq_queue or one
- * of its woken bfq_queues. The rationale behind this
- * additional condition is as follows:
- * - consider a bfq_queue, say Q1, detected as a waker of
- * another bfq_queue, say Q2
- * - by definition of a waker, Q1 blocks the I/O of Q2, i.e.,
- * some I/O of Q1 needs to be completed for new I/O of Q2
- * to arrive. A notable example of waker is journald
- * - so, Q1 and Q2 are in any respect the queues of two
- * cooperating processes (or of two cooperating sets of
- * processes): the goal of Q1's I/O is doing what needs to
- * be done so that new Q2's I/O can finally be
- * issued. Therefore, if the service of Q1's I/O is delayed,
- * then Q2's I/O is delayed too. Conversely, if Q2's I/O is
- * delayed, the goal of Q1's I/O is hindered.
- * - as a consequence, if some I/O of Q1/Q2 arrives while
- * Q2/Q1 is the only queue in service, there is absolutely
- * no point in delaying the service of such an I/O. The
- * only possible result is a throughput loss
- * - so, when the above condition holds, the best option is to
- * have the new I/O dispatched as soon as possible
- * - the most effective and efficient way to attain the above
- * goal is to put the new I/O directly in the dispatch
- * list
- * - as an additional restriction, Q1 and Q2 must be the only
- * busy queues for this commit to put the I/O of Q2/Q1 in
- * the dispatch list. This is necessary, because, if also
- * other queues are waiting for service, then putting new
- * I/O directly in the dispatch list may evidently cause a
- * violation of service guarantees for the other queues
- */
- if (!bfqq ||
- (bfqq != bfqd->in_service_queue &&
- bfqd->in_service_queue != NULL &&
- bfq_tot_busy_queues(bfqd) == 1 + bfq_bfqq_busy(bfqq) &&
- (bfqq->waker_bfqq == bfqd->in_service_queue ||
- bfqd->in_service_queue->waker_bfqq == bfqq)) || at_head) {
+ if (!bfqq || at_head) {
if (at_head)
list_add(&rq->queuelist, &bfqd->dispatch);
else
@@ -6059,7 +6154,6 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
* merge).
*/
cmd_flags = rq->cmd_flags;
-
spin_unlock_irq(&bfqd->lock);
bfq_update_insert_stats(q, bfqq, idle_timer_disabled,
@@ -6251,8 +6345,7 @@ static void bfq_completed_request(struct bfq_queue *bfqq, struct bfq_data *bfqd)
static void bfq_finish_requeue_request_body(struct bfq_queue *bfqq)
{
- bfqq->allocated--;
-
+ bfqq_request_freed(bfqq);
bfq_put_queue(bfqq);
}
@@ -6476,6 +6569,16 @@ static void bfq_finish_requeue_request(struct request *rq)
rq->elv.priv[1] = NULL;
}
+static void bfq_finish_request(struct request *rq)
+{
+ bfq_finish_requeue_request(rq);
+
+ if (rq->elv.icq) {
+ put_io_context(rq->elv.icq->ioc);
+ rq->elv.icq = NULL;
+ }
+}
+
/*
* Removes the association between the current task and bfqq, assuming
* that bic points to the bfq iocontext of the task.
@@ -6573,6 +6676,8 @@ static struct bfq_queue *bfq_get_bfqq_handle_split(struct bfq_data *bfqd,
*/
static void bfq_prepare_request(struct request *rq)
{
+ rq->elv.icq = ioc_find_get_icq(rq->q);
+
/*
* Regardless of whether we have an icq attached, we have to
* clear the scheduler pointers, as they might point to
@@ -6672,7 +6777,7 @@ static struct bfq_queue *bfq_init_rq(struct request *rq)
}
}
- bfqq->allocated++;
+ bfqq_request_allocated(bfqq);
bfqq->ref++;
bfq_log_bfqq(bfqd, bfqq, "get_request %p: bfqq %p, %d",
rq, bfqq, bfqq->ref);
@@ -6835,11 +6940,11 @@ void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg)
* See the comments on bfq_limit_depth for the purpose of
* the depths set in the function. Return minimum shallow depth we'll use.
*/
-static unsigned int bfq_update_depths(struct bfq_data *bfqd,
- struct sbitmap_queue *bt)
+static void bfq_update_depths(struct bfq_data *bfqd, struct sbitmap_queue *bt)
{
- unsigned int i, j, min_shallow = UINT_MAX;
+ unsigned int depth = 1U << bt->sb.shift;
+ bfqd->full_depth_shift = bt->sb.shift;
/*
* In-word depths if no bfq_queue is being weight-raised:
* leaving 25% of tags only for sync reads.
@@ -6851,13 +6956,13 @@ static unsigned int bfq_update_depths(struct bfq_data *bfqd,
* limit 'something'.
*/
/* no more than 50% of tags for async I/O */
- bfqd->word_depths[0][0] = max((1U << bt->sb.shift) >> 1, 1U);
+ bfqd->word_depths[0][0] = max(depth >> 1, 1U);
/*
* no more than 75% of tags for sync writes (25% extra tags
* w.r.t. async I/O, to prevent async I/O from starving sync
* writes)
*/
- bfqd->word_depths[0][1] = max(((1U << bt->sb.shift) * 3) >> 2, 1U);
+ bfqd->word_depths[0][1] = max((depth * 3) >> 2, 1U);
/*
* In-word depths in case some bfq_queue is being weight-
@@ -6867,25 +6972,18 @@ static unsigned int bfq_update_depths(struct bfq_data *bfqd,
* shortage.
*/
/* no more than ~18% of tags for async I/O */
- bfqd->word_depths[1][0] = max(((1U << bt->sb.shift) * 3) >> 4, 1U);
+ bfqd->word_depths[1][0] = max((depth * 3) >> 4, 1U);
/* no more than ~37% of tags for sync writes (~20% extra tags) */
- bfqd->word_depths[1][1] = max(((1U << bt->sb.shift) * 6) >> 4, 1U);
-
- for (i = 0; i < 2; i++)
- for (j = 0; j < 2; j++)
- min_shallow = min(min_shallow, bfqd->word_depths[i][j]);
-
- return min_shallow;
+ bfqd->word_depths[1][1] = max((depth * 6) >> 4, 1U);
}
static void bfq_depth_updated(struct blk_mq_hw_ctx *hctx)
{
struct bfq_data *bfqd = hctx->queue->elevator->elevator_data;
struct blk_mq_tags *tags = hctx->sched_tags;
- unsigned int min_shallow;
- min_shallow = bfq_update_depths(bfqd, &tags->bitmap_tags);
- sbitmap_queue_min_shallow_depth(&tags->bitmap_tags, min_shallow);
+ bfq_update_depths(bfqd, &tags->bitmap_tags);
+ sbitmap_queue_min_shallow_depth(&tags->bitmap_tags, 1);
}
static int bfq_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int index)
@@ -6920,6 +7018,8 @@ static void bfq_exit_queue(struct elevator_queue *e)
spin_unlock_irq(&bfqd->lock);
#endif
+ wbt_enable_default(bfqd->queue);
+
kfree(bfqd);
}
@@ -7300,7 +7400,7 @@ static struct elevator_type iosched_bfq_mq = {
.limit_depth = bfq_limit_depth,
.prepare_request = bfq_prepare_request,
.requeue_request = bfq_finish_requeue_request,
- .finish_request = bfq_finish_requeue_request,
+ .finish_request = bfq_finish_request,
.exit_icq = bfq_exit_icq,
.insert_requests = bfq_insert_requests,
.dispatch_request = bfq_dispatch_request,
diff --git a/block/bfq-iosched.h b/block/bfq-iosched.h
index a73488eec8a4..07288b9da389 100644
--- a/block/bfq-iosched.h
+++ b/block/bfq-iosched.h
@@ -25,7 +25,7 @@
#define BFQ_DEFAULT_GRP_IOPRIO 0
#define BFQ_DEFAULT_GRP_CLASS IOPRIO_CLASS_BE
-#define MAX_PID_STR_LENGTH 12
+#define MAX_BFQQ_NAME_LENGTH 16
/*
* Soft real-time applications are extremely more latency sensitive
@@ -170,6 +170,9 @@ struct bfq_entity {
/* budget, used also to calculate F_i: F_i = S_i + @budget / @weight */
int budget;
+ /* Number of requests allocated in the subtree of this entity */
+ int allocated;
+
/* device weight, if non-zero, it overrides the default weight of
* bfq_group_data */
int dev_weight;
@@ -266,8 +269,6 @@ struct bfq_queue {
struct request *next_rq;
/* number of sync and async requests queued */
int queued[2];
- /* number of requests currently allocated */
- int allocated;
/* number of pending metadata requests */
int meta_pending;
/* fifo list of requests in sort_list */
@@ -387,6 +388,8 @@ struct bfq_queue {
struct bfq_queue *tentative_waker_bfqq;
/* number of times the same tentative waker has been detected */
unsigned int num_waker_detections;
+ /* time when we started considering this waker */
+ u64 waker_detection_started;
/* node for woken_list, see below */
struct hlist_node woken_list_node;
@@ -768,6 +771,7 @@ struct bfq_data {
* function)
*/
unsigned int word_depths[2][2];
+ unsigned int full_depth_shift;
};
enum bfqq_state_flags {
@@ -1079,26 +1083,27 @@ void bfq_add_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq);
/* --------------- end of interface of B-WF2Q+ ---------------- */
/* Logging facilities. */
-static inline void bfq_pid_to_str(int pid, char *str, int len)
+static inline void bfq_bfqq_name(struct bfq_queue *bfqq, char *str, int len)
{
- if (pid != -1)
- snprintf(str, len, "%d", pid);
+ char type = bfq_bfqq_sync(bfqq) ? 'S' : 'A';
+
+ if (bfqq->pid != -1)
+ snprintf(str, len, "bfq%d%c", bfqq->pid, type);
else
- snprintf(str, len, "SHARED-");
+ snprintf(str, len, "bfqSHARED-%c", type);
}
#ifdef CONFIG_BFQ_GROUP_IOSCHED
struct bfq_group *bfqq_group(struct bfq_queue *bfqq);
#define bfq_log_bfqq(bfqd, bfqq, fmt, args...) do { \
- char pid_str[MAX_PID_STR_LENGTH]; \
+ char pid_str[MAX_BFQQ_NAME_LENGTH]; \
if (likely(!blk_trace_note_message_enabled((bfqd)->queue))) \
break; \
- bfq_pid_to_str((bfqq)->pid, pid_str, MAX_PID_STR_LENGTH); \
+ bfq_bfqq_name((bfqq), pid_str, MAX_BFQQ_NAME_LENGTH); \
blk_add_cgroup_trace_msg((bfqd)->queue, \
bfqg_to_blkg(bfqq_group(bfqq))->blkcg, \
- "bfq%s%c " fmt, pid_str, \
- bfq_bfqq_sync((bfqq)) ? 'S' : 'A', ##args); \
+ "%s " fmt, pid_str, ##args); \
} while (0)
#define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do { \
@@ -1109,13 +1114,11 @@ struct bfq_group *bfqq_group(struct bfq_queue *bfqq);
#else /* CONFIG_BFQ_GROUP_IOSCHED */
#define bfq_log_bfqq(bfqd, bfqq, fmt, args...) do { \
- char pid_str[MAX_PID_STR_LENGTH]; \
+ char pid_str[MAX_BFQQ_NAME_LENGTH]; \
if (likely(!blk_trace_note_message_enabled((bfqd)->queue))) \
break; \
- bfq_pid_to_str((bfqq)->pid, pid_str, MAX_PID_STR_LENGTH); \
- blk_add_trace_msg((bfqd)->queue, "bfq%s%c " fmt, pid_str, \
- bfq_bfqq_sync((bfqq)) ? 'S' : 'A', \
- ##args); \
+ bfq_bfqq_name((bfqq), pid_str, MAX_BFQQ_NAME_LENGTH); \
+ blk_add_trace_msg((bfqd)->queue, "%s " fmt, pid_str, ##args); \
} while (0)
#define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do {} while (0)
diff --git a/block/bio-integrity.c b/block/bio-integrity.c
index d25114715459..0827b19820c5 100644
--- a/block/bio-integrity.c
+++ b/block/bio-integrity.c
@@ -373,7 +373,7 @@ void bio_integrity_advance(struct bio *bio, unsigned int bytes_done)
struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
unsigned bytes = bio_integrity_bytes(bi, bytes_done >> 9);
- bip->bip_iter.bi_sector += bytes_done >> 9;
+ bip->bip_iter.bi_sector += bio_integrity_intervals(bi, bytes_done >> 9);
bvec_iter_advance(bip->bip_vec, &bip->bip_iter, bytes);
}
diff --git a/block/bio.c b/block/bio.c
index 15ab0d6d1c06..4312a8085396 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -26,7 +26,7 @@
#include "blk-rq-qos.h"
struct bio_alloc_cache {
- struct bio_list free_list;
+ struct bio *free_list;
unsigned int nr;
};
@@ -569,7 +569,8 @@ static void bio_truncate(struct bio *bio, unsigned new_size)
offset = new_size - done;
else
offset = 0;
- zero_user(bv.bv_page, offset, bv.bv_len - offset);
+ zero_user(bv.bv_page, bv.bv_offset + offset,
+ bv.bv_len - offset);
truncated = true;
}
done += bv.bv_len;
@@ -630,7 +631,8 @@ static void bio_alloc_cache_prune(struct bio_alloc_cache *cache,
unsigned int i = 0;
struct bio *bio;
- while ((bio = bio_list_pop(&cache->free_list)) != NULL) {
+ while ((bio = cache->free_list) != NULL) {
+ cache->free_list = bio->bi_next;
cache->nr--;
bio_free(bio);
if (++i == nr)
@@ -689,7 +691,8 @@ void bio_put(struct bio *bio)
bio_uninit(bio);
cache = per_cpu_ptr(bio->bi_pool->cache, get_cpu());
- bio_list_add_head(&cache->free_list, bio);
+ bio->bi_next = cache->free_list;
+ cache->free_list = bio;
if (++cache->nr > ALLOC_CACHE_MAX + ALLOC_CACHE_SLACK)
bio_alloc_cache_prune(cache, ALLOC_CACHE_SLACK);
put_cpu();
@@ -1033,6 +1036,28 @@ int bio_add_page(struct bio *bio, struct page *page,
}
EXPORT_SYMBOL(bio_add_page);
+/**
+ * bio_add_folio - Attempt to add part of a folio to a bio.
+ * @bio: BIO to add to.
+ * @folio: Folio to add.
+ * @len: How many bytes from the folio to add.
+ * @off: First byte in this folio to add.
+ *
+ * Filesystems that use folios can call this function instead of calling
+ * bio_add_page() for each page in the folio. If @off is bigger than
+ * PAGE_SIZE, this function can create a bio_vec that starts in a page
+ * after the bv_page. BIOs do not support folios that are 4GiB or larger.
+ *
+ * Return: Whether the addition was successful.
+ */
+bool bio_add_folio(struct bio *bio, struct folio *folio, size_t len,
+ size_t off)
+{
+ if (len > UINT_MAX || off > UINT_MAX)
+ return 0;
+ return bio_add_page(bio, &folio->page, len, off) > 0;
+}
+
void __bio_release_pages(struct bio *bio, bool mark_dirty)
{
struct bvec_iter_all iter_all;
@@ -1704,8 +1729,9 @@ struct bio *bio_alloc_kiocb(struct kiocb *kiocb, unsigned short nr_vecs,
return bio_alloc_bioset(GFP_KERNEL, nr_vecs, bs);
cache = per_cpu_ptr(bs->cache, get_cpu());
- bio = bio_list_pop(&cache->free_list);
- if (bio) {
+ if (cache->free_list) {
+ bio = cache->free_list;
+ cache->free_list = bio->bi_next;
cache->nr--;
put_cpu();
bio_init(bio, nr_vecs ? bio->bi_inline_vecs : NULL, nr_vecs);
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 663aabfeba18..650f7e27989f 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -30,6 +30,7 @@
#include <linux/blk-cgroup.h>
#include <linux/tracehook.h>
#include <linux/psi.h>
+#include <linux/part_stat.h>
#include "blk.h"
#include "blk-ioprio.h"
#include "blk-throttle.h"
diff --git a/block/blk-core.c b/block/blk-core.c
index 1378d084c770..1039515c99d6 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -16,7 +16,6 @@
#include <linux/module.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
-#include <linux/blk-mq.h>
#include <linux/blk-pm.h>
#include <linux/blk-integrity.h>
#include <linux/highmem.h>
@@ -40,6 +39,7 @@
#include <linux/debugfs.h>
#include <linux/bpf.h>
#include <linux/psi.h>
+#include <linux/part_stat.h>
#include <linux/sched/sysctl.h>
#include <linux/blk-crypto.h>
@@ -47,7 +47,6 @@
#include <trace/events/block.h>
#include "blk.h"
-#include "blk-mq.h"
#include "blk-mq-sched.h"
#include "blk-pm.h"
#include "blk-throttle.h"
@@ -67,6 +66,7 @@ DEFINE_IDA(blk_queue_ida);
* For queue allocation
*/
struct kmem_cache *blk_requestq_cachep;
+struct kmem_cache *blk_requestq_srcu_cachep;
/*
* Controlling structure to kblockd
@@ -109,23 +109,6 @@ bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q)
}
EXPORT_SYMBOL_GPL(blk_queue_flag_test_and_set);
-void blk_rq_init(struct request_queue *q, struct request *rq)
-{
- memset(rq, 0, sizeof(*rq));
-
- INIT_LIST_HEAD(&rq->queuelist);
- rq->q = q;
- rq->__sector = (sector_t) -1;
- INIT_HLIST_NODE(&rq->hash);
- RB_CLEAR_NODE(&rq->rb_node);
- rq->tag = BLK_MQ_NO_TAG;
- rq->internal_tag = BLK_MQ_NO_TAG;
- rq->start_time_ns = ktime_get_ns();
- rq->part = NULL;
- blk_crypto_rq_set_defaults(rq);
-}
-EXPORT_SYMBOL(blk_rq_init);
-
#define REQ_OP_NAME(name) [REQ_OP_##name] = #name
static const char *const blk_op_name[] = {
REQ_OP_NAME(READ),
@@ -216,38 +199,15 @@ int blk_status_to_errno(blk_status_t status)
}
EXPORT_SYMBOL_GPL(blk_status_to_errno);
-void blk_print_req_error(struct request *req, blk_status_t status)
+const char *blk_status_to_str(blk_status_t status)
{
int idx = (__force int)status;
if (WARN_ON_ONCE(idx >= ARRAY_SIZE(blk_errors)))
- return;
-
- printk_ratelimited(KERN_ERR
- "%s error, dev %s, sector %llu op 0x%x:(%s) flags 0x%x "
- "phys_seg %u prio class %u\n",
- blk_errors[idx].name,
- req->rq_disk ? req->rq_disk->disk_name : "?",
- blk_rq_pos(req), req_op(req), blk_op_str(req_op(req)),
- req->cmd_flags & ~REQ_OP_MASK,
- req->nr_phys_segments,
- IOPRIO_PRIO_CLASS(req->ioprio));
+ return "<null>";
+ return blk_errors[idx].name;
}
-void blk_dump_rq_flags(struct request *rq, char *msg)
-{
- printk(KERN_INFO "%s: dev %s: flags=%llx\n", msg,
- rq->rq_disk ? rq->rq_disk->disk_name : "?",
- (unsigned long long) rq->cmd_flags);
-
- printk(KERN_INFO " sector %llu, nr/cnr %u/%u\n",
- (unsigned long long)blk_rq_pos(rq),
- blk_rq_sectors(rq), blk_rq_cur_sectors(rq));
- printk(KERN_INFO " bio %p, biotail %p, len %u\n",
- rq->bio, rq->biotail, blk_rq_bytes(rq));
-}
-EXPORT_SYMBOL(blk_dump_rq_flags);
-
/**
* blk_sync_queue - cancel any pending callbacks on a queue
* @q: the queue
@@ -324,13 +284,6 @@ void blk_queue_start_drain(struct request_queue *q)
wake_up_all(&q->mq_freeze_wq);
}
-void blk_set_queue_dying(struct request_queue *q)
-{
- blk_queue_flag_set(QUEUE_FLAG_DYING, q);
- blk_queue_start_drain(q);
-}
-EXPORT_SYMBOL_GPL(blk_set_queue_dying);
-
/**
* blk_cleanup_queue - shutdown a request queue
* @q: request queue to shutdown
@@ -348,7 +301,8 @@ void blk_cleanup_queue(struct request_queue *q)
WARN_ON_ONCE(blk_queue_registered(q));
/* mark @q DYING, no new request or merges will be allowed afterwards */
- blk_set_queue_dying(q);
+ blk_queue_flag_set(QUEUE_FLAG_DYING, q);
+ blk_queue_start_drain(q);
blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q);
blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
@@ -478,21 +432,27 @@ static void blk_timeout_work(struct work_struct *work)
{
}
-struct request_queue *blk_alloc_queue(int node_id)
+struct request_queue *blk_alloc_queue(int node_id, bool alloc_srcu)
{
struct request_queue *q;
int ret;
- q = kmem_cache_alloc_node(blk_requestq_cachep,
- GFP_KERNEL | __GFP_ZERO, node_id);
+ q = kmem_cache_alloc_node(blk_get_queue_kmem_cache(alloc_srcu),
+ GFP_KERNEL | __GFP_ZERO, node_id);
if (!q)
return NULL;
+ if (alloc_srcu) {
+ blk_queue_flag_set(QUEUE_FLAG_HAS_SRCU, q);
+ if (init_srcu_struct(q->srcu) != 0)
+ goto fail_q;
+ }
+
q->last_merge = NULL;
q->id = ida_simple_get(&blk_queue_ida, 0, 0, GFP_KERNEL);
if (q->id < 0)
- goto fail_q;
+ goto fail_srcu;
ret = bioset_init(&q->bio_split, BIO_POOL_SIZE, 0, 0);
if (ret)
@@ -549,8 +509,11 @@ fail_split:
bioset_exit(&q->bio_split);
fail_id:
ida_simple_remove(&blk_queue_ida, q->id);
+fail_srcu:
+ if (alloc_srcu)
+ cleanup_srcu_struct(q->srcu);
fail_q:
- kmem_cache_free(blk_requestq_cachep, q);
+ kmem_cache_free(blk_get_queue_kmem_cache(alloc_srcu), q);
return NULL;
}
@@ -594,7 +557,7 @@ static int __init setup_fail_make_request(char *str)
}
__setup("fail_make_request=", setup_fail_make_request);
-static bool should_fail_request(struct block_device *part, unsigned int bytes)
+bool should_fail_request(struct block_device *part, unsigned int bytes)
{
return part->bd_make_it_fail && should_fail(&fail_make_request, bytes);
}
@@ -608,15 +571,6 @@ static int __init fail_make_request_debugfs(void)
}
late_initcall(fail_make_request_debugfs);
-
-#else /* CONFIG_FAIL_MAKE_REQUEST */
-
-static inline bool should_fail_request(struct block_device *part,
- unsigned int bytes)
-{
- return false;
-}
-
#endif /* CONFIG_FAIL_MAKE_REQUEST */
static inline bool bio_check_ro(struct bio *bio)
@@ -802,15 +756,6 @@ noinline_for_stack bool submit_bio_checks(struct bio *bio)
break;
}
- /*
- * Various block parts want %current->io_context, so allocate it up
- * front rather than dealing with lots of pain to allocate it only
- * where needed. This may fail and the block layer knows how to live
- * with it.
- */
- if (unlikely(!current->io_context))
- create_task_io_context(current, GFP_ATOMIC, q->node);
-
if (blk_throtl_bio(bio))
return false;
@@ -836,17 +781,21 @@ end_io:
static void __submit_bio_fops(struct gendisk *disk, struct bio *bio)
{
- if (unlikely(bio_queue_enter(bio) != 0))
- return;
- if (submit_bio_checks(bio) && blk_crypto_bio_prep(&bio))
- disk->fops->submit_bio(bio);
- blk_queue_exit(disk->queue);
+ if (blk_crypto_bio_prep(&bio)) {
+ if (likely(bio_queue_enter(bio) == 0)) {
+ disk->fops->submit_bio(bio);
+ blk_queue_exit(disk->queue);
+ }
+ }
}
static void __submit_bio(struct bio *bio)
{
struct gendisk *disk = bio->bi_bdev->bd_disk;
+ if (unlikely(!submit_bio_checks(bio)))
+ return;
+
if (!disk->fops->submit_bio)
blk_mq_submit_bio(bio);
else
@@ -1090,135 +1039,7 @@ int iocb_bio_iopoll(struct kiocb *kiocb, struct io_comp_batch *iob,
}
EXPORT_SYMBOL_GPL(iocb_bio_iopoll);
-/**
- * blk_cloned_rq_check_limits - Helper function to check a cloned request
- * for the new queue limits
- * @q: the queue
- * @rq: the request being checked
- *
- * Description:
- * @rq may have been made based on weaker limitations of upper-level queues
- * in request stacking drivers, and it may violate the limitation of @q.
- * Since the block layer and the underlying device driver trust @rq
- * after it is inserted to @q, it should be checked against @q before
- * the insertion using this generic function.
- *
- * Request stacking drivers like request-based dm may change the queue
- * limits when retrying requests on other queues. Those requests need
- * to be checked against the new queue limits again during dispatch.
- */
-static blk_status_t blk_cloned_rq_check_limits(struct request_queue *q,
- struct request *rq)
-{
- unsigned int max_sectors = blk_queue_get_max_sectors(q, req_op(rq));
-
- if (blk_rq_sectors(rq) > max_sectors) {
- /*
- * SCSI device does not have a good way to return if
- * Write Same/Zero is actually supported. If a device rejects
- * a non-read/write command (discard, write same,etc.) the
- * low-level device driver will set the relevant queue limit to
- * 0 to prevent blk-lib from issuing more of the offending
- * operations. Commands queued prior to the queue limit being
- * reset need to be completed with BLK_STS_NOTSUPP to avoid I/O
- * errors being propagated to upper layers.
- */
- if (max_sectors == 0)
- return BLK_STS_NOTSUPP;
-
- printk(KERN_ERR "%s: over max size limit. (%u > %u)\n",
- __func__, blk_rq_sectors(rq), max_sectors);
- return BLK_STS_IOERR;
- }
-
- /*
- * The queue settings related to segment counting may differ from the
- * original queue.
- */
- rq->nr_phys_segments = blk_recalc_rq_segments(rq);
- if (rq->nr_phys_segments > queue_max_segments(q)) {
- printk(KERN_ERR "%s: over max segments limit. (%hu > %hu)\n",
- __func__, rq->nr_phys_segments, queue_max_segments(q));
- return BLK_STS_IOERR;
- }
-
- return BLK_STS_OK;
-}
-
-/**
- * blk_insert_cloned_request - Helper for stacking drivers to submit a request
- * @q: the queue to submit the request
- * @rq: the request being queued
- */
-blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *rq)
-{
- blk_status_t ret;
-
- ret = blk_cloned_rq_check_limits(q, rq);
- if (ret != BLK_STS_OK)
- return ret;
-
- if (rq->rq_disk &&
- should_fail_request(rq->rq_disk->part0, blk_rq_bytes(rq)))
- return BLK_STS_IOERR;
-
- if (blk_crypto_insert_cloned_request(rq))
- return BLK_STS_IOERR;
-
- blk_account_io_start(rq);
-
- /*
- * Since we have a scheduler attached on the top device,
- * bypass a potential scheduler on the bottom device for
- * insert.
- */
- return blk_mq_request_issue_directly(rq, true);
-}
-EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
-
-/**
- * blk_rq_err_bytes - determine number of bytes till the next failure boundary
- * @rq: request to examine
- *
- * Description:
- * A request could be merge of IOs which require different failure
- * handling. This function determines the number of bytes which
- * can be failed from the beginning of the request without
- * crossing into area which need to be retried further.
- *
- * Return:
- * The number of bytes to fail.
- */
-unsigned int blk_rq_err_bytes(const struct request *rq)
-{
- unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
- unsigned int bytes = 0;
- struct bio *bio;
-
- if (!(rq->rq_flags & RQF_MIXED_MERGE))
- return blk_rq_bytes(rq);
-
- /*
- * Currently the only 'mixing' which can happen is between
- * different fastfail types. We can safely fail portions
- * which have all the failfast bits that the first one has -
- * the ones which are at least as eager to fail as the first
- * one.
- */
- for (bio = rq->bio; bio; bio = bio->bi_next) {
- if ((bio->bi_opf & ff) != ff)
- break;
- bytes += bio->bi_iter.bi_size;
- }
-
- /* this could lead to infinite loop */
- BUG_ON(blk_rq_bytes(rq) && !bytes);
- return bytes;
-}
-EXPORT_SYMBOL_GPL(blk_rq_err_bytes);
-
-static void update_io_ticks(struct block_device *part, unsigned long now,
- bool end)
+void update_io_ticks(struct block_device *part, unsigned long now, bool end)
{
unsigned long stamp;
again:
@@ -1233,47 +1054,35 @@ again:
}
}
-void __blk_account_io_done(struct request *req, u64 now)
-{
- const int sgrp = op_stat_group(req_op(req));
-
- part_stat_lock();
- update_io_ticks(req->part, jiffies, true);
- part_stat_inc(req->part, ios[sgrp]);
- part_stat_add(req->part, nsecs[sgrp], now - req->start_time_ns);
- part_stat_unlock();
-}
-
-void __blk_account_io_start(struct request *rq)
-{
- /* passthrough requests can hold bios that do not have ->bi_bdev set */
- if (rq->bio && rq->bio->bi_bdev)
- rq->part = rq->bio->bi_bdev;
- else
- rq->part = rq->rq_disk->part0;
-
- part_stat_lock();
- update_io_ticks(rq->part, jiffies, false);
- part_stat_unlock();
-}
-
static unsigned long __part_start_io_acct(struct block_device *part,
- unsigned int sectors, unsigned int op)
+ unsigned int sectors, unsigned int op,
+ unsigned long start_time)
{
const int sgrp = op_stat_group(op);
- unsigned long now = READ_ONCE(jiffies);
part_stat_lock();
- update_io_ticks(part, now, false);
+ update_io_ticks(part, start_time, false);
part_stat_inc(part, ios[sgrp]);
part_stat_add(part, sectors[sgrp], sectors);
part_stat_local_inc(part, in_flight[op_is_write(op)]);
part_stat_unlock();
- return now;
+ return start_time;
}
/**
+ * bio_start_io_acct_time - start I/O accounting for bio based drivers
+ * @bio: bio to start account for
+ * @start_time: start time that should be passed back to bio_end_io_acct().
+ */
+void bio_start_io_acct_time(struct bio *bio, unsigned long start_time)
+{
+ __part_start_io_acct(bio->bi_bdev, bio_sectors(bio),
+ bio_op(bio), start_time);
+}
+EXPORT_SYMBOL_GPL(bio_start_io_acct_time);
+
+/**
* bio_start_io_acct - start I/O accounting for bio based drivers
* @bio: bio to start account for
*
@@ -1281,14 +1090,15 @@ static unsigned long __part_start_io_acct(struct block_device *part,
*/
unsigned long bio_start_io_acct(struct bio *bio)
{
- return __part_start_io_acct(bio->bi_bdev, bio_sectors(bio), bio_op(bio));
+ return __part_start_io_acct(bio->bi_bdev, bio_sectors(bio),
+ bio_op(bio), jiffies);
}
EXPORT_SYMBOL_GPL(bio_start_io_acct);
unsigned long disk_start_io_acct(struct gendisk *disk, unsigned int sectors,
unsigned int op)
{
- return __part_start_io_acct(disk->part0, sectors, op);
+ return __part_start_io_acct(disk->part0, sectors, op, jiffies);
}
EXPORT_SYMBOL(disk_start_io_acct);
@@ -1320,46 +1130,6 @@ void disk_end_io_acct(struct gendisk *disk, unsigned int op,
}
EXPORT_SYMBOL(disk_end_io_acct);
-/*
- * Steal bios from a request and add them to a bio list.
- * The request must not have been partially completed before.
- */
-void blk_steal_bios(struct bio_list *list, struct request *rq)
-{
- if (rq->bio) {
- if (list->tail)
- list->tail->bi_next = rq->bio;
- else
- list->head = rq->bio;
- list->tail = rq->biotail;
-
- rq->bio = NULL;
- rq->biotail = NULL;
- }
-
- rq->__data_len = 0;
-}
-EXPORT_SYMBOL_GPL(blk_steal_bios);
-
-#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
-/**
- * rq_flush_dcache_pages - Helper function to flush all pages in a request
- * @rq: the request to be flushed
- *
- * Description:
- * Flush all pages in @rq.
- */
-void rq_flush_dcache_pages(struct request *rq)
-{
- struct req_iterator iter;
- struct bio_vec bvec;
-
- rq_for_each_segment(bvec, rq, iter)
- flush_dcache_page(bvec.bv_page);
-}
-EXPORT_SYMBOL_GPL(rq_flush_dcache_pages);
-#endif
-
/**
* blk_lld_busy - Check if underlying low-level drivers of a device are busy
* @q : the queue of the device being checked
@@ -1388,93 +1158,6 @@ int blk_lld_busy(struct request_queue *q)
}
EXPORT_SYMBOL_GPL(blk_lld_busy);
-/**
- * blk_rq_unprep_clone - Helper function to free all bios in a cloned request
- * @rq: the clone request to be cleaned up
- *
- * Description:
- * Free all bios in @rq for a cloned request.
- */
-void blk_rq_unprep_clone(struct request *rq)
-{
- struct bio *bio;
-
- while ((bio = rq->bio) != NULL) {
- rq->bio = bio->bi_next;
-
- bio_put(bio);
- }
-}
-EXPORT_SYMBOL_GPL(blk_rq_unprep_clone);
-
-/**
- * blk_rq_prep_clone - Helper function to setup clone request
- * @rq: the request to be setup
- * @rq_src: original request to be cloned
- * @bs: bio_set that bios for clone are allocated from
- * @gfp_mask: memory allocation mask for bio
- * @bio_ctr: setup function to be called for each clone bio.
- * Returns %0 for success, non %0 for failure.
- * @data: private data to be passed to @bio_ctr
- *
- * Description:
- * Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq.
- * Also, pages which the original bios are pointing to are not copied
- * and the cloned bios just point same pages.
- * So cloned bios must be completed before original bios, which means
- * the caller must complete @rq before @rq_src.
- */
-int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
- struct bio_set *bs, gfp_t gfp_mask,
- int (*bio_ctr)(struct bio *, struct bio *, void *),
- void *data)
-{
- struct bio *bio, *bio_src;
-
- if (!bs)
- bs = &fs_bio_set;
-
- __rq_for_each_bio(bio_src, rq_src) {
- bio = bio_clone_fast(bio_src, gfp_mask, bs);
- if (!bio)
- goto free_and_out;
-
- if (bio_ctr && bio_ctr(bio, bio_src, data))
- goto free_and_out;
-
- if (rq->bio) {
- rq->biotail->bi_next = bio;
- rq->biotail = bio;
- } else {
- rq->bio = rq->biotail = bio;
- }
- bio = NULL;
- }
-
- /* Copy attributes of the original request to the clone request. */
- rq->__sector = blk_rq_pos(rq_src);
- rq->__data_len = blk_rq_bytes(rq_src);
- if (rq_src->rq_flags & RQF_SPECIAL_PAYLOAD) {
- rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
- rq->special_vec = rq_src->special_vec;
- }
- rq->nr_phys_segments = rq_src->nr_phys_segments;
- rq->ioprio = rq_src->ioprio;
-
- if (rq->bio && blk_crypto_rq_bio_prep(rq, rq->bio, gfp_mask) < 0)
- goto free_and_out;
-
- return 0;
-
-free_and_out:
- if (bio)
- bio_put(bio);
- blk_rq_unprep_clone(rq);
-
- return -ENOMEM;
-}
-EXPORT_SYMBOL_GPL(blk_rq_prep_clone);
-
int kblockd_schedule_work(struct work_struct *work)
{
return queue_work(kblockd_workqueue, work);
@@ -1639,6 +1322,9 @@ int __init blk_dev_init(void)
sizeof_field(struct request, cmd_flags));
BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
sizeof_field(struct bio, bi_opf));
+ BUILD_BUG_ON(ALIGN(offsetof(struct request_queue, srcu),
+ __alignof__(struct request_queue)) !=
+ sizeof(struct request_queue));
/* used for unplugging and affects IO latency/throughput - HIGHPRI */
kblockd_workqueue = alloc_workqueue("kblockd",
@@ -1649,6 +1335,10 @@ int __init blk_dev_init(void)
blk_requestq_cachep = kmem_cache_create("request_queue",
sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
+ blk_requestq_srcu_cachep = kmem_cache_create("request_queue_srcu",
+ sizeof(struct request_queue) +
+ sizeof(struct srcu_struct), 0, SLAB_PANIC, NULL);
+
blk_debugfs_root = debugfs_create_dir("block", NULL);
return 0;
diff --git a/block/blk-crypto-profile.c b/block/blk-crypto-profile.c
index 605ba0626a5c..96c511967386 100644
--- a/block/blk-crypto-profile.c
+++ b/block/blk-crypto-profile.c
@@ -463,11 +463,6 @@ bool blk_crypto_register(struct blk_crypto_profile *profile,
}
EXPORT_SYMBOL_GPL(blk_crypto_register);
-void blk_crypto_unregister(struct request_queue *q)
-{
- q->crypto_profile = NULL;
-}
-
/**
* blk_crypto_intersect_capabilities() - restrict supported crypto capabilities
* by child device
diff --git a/block/blk-exec.c b/block/blk-exec.c
deleted file mode 100644
index 1b8b47f6e79b..000000000000
--- a/block/blk-exec.c
+++ /dev/null
@@ -1,116 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Functions related to setting various queue properties from drivers
- */
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/bio.h>
-#include <linux/blkdev.h>
-#include <linux/blk-mq.h>
-#include <linux/sched/sysctl.h>
-
-#include "blk.h"
-#include "blk-mq-sched.h"
-
-/**
- * blk_end_sync_rq - executes a completion event on a request
- * @rq: request to complete
- * @error: end I/O status of the request
- */
-static void blk_end_sync_rq(struct request *rq, blk_status_t error)
-{
- struct completion *waiting = rq->end_io_data;
-
- rq->end_io_data = (void *)(uintptr_t)error;
-
- /*
- * complete last, if this is a stack request the process (and thus
- * the rq pointer) could be invalid right after this complete()
- */
- complete(waiting);
-}
-
-/**
- * blk_execute_rq_nowait - insert a request to I/O scheduler for execution
- * @bd_disk: matching gendisk
- * @rq: request to insert
- * @at_head: insert request at head or tail of queue
- * @done: I/O completion handler
- *
- * Description:
- * Insert a fully prepared request at the back of the I/O scheduler queue
- * for execution. Don't wait for completion.
- *
- * Note:
- * This function will invoke @done directly if the queue is dead.
- */
-void blk_execute_rq_nowait(struct gendisk *bd_disk, struct request *rq,
- int at_head, rq_end_io_fn *done)
-{
- WARN_ON(irqs_disabled());
- WARN_ON(!blk_rq_is_passthrough(rq));
-
- rq->rq_disk = bd_disk;
- rq->end_io = done;
-
- blk_account_io_start(rq);
-
- /*
- * don't check dying flag for MQ because the request won't
- * be reused after dying flag is set
- */
- blk_mq_sched_insert_request(rq, at_head, true, false);
-}
-EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
-
-static bool blk_rq_is_poll(struct request *rq)
-{
- if (!rq->mq_hctx)
- return false;
- if (rq->mq_hctx->type != HCTX_TYPE_POLL)
- return false;
- if (WARN_ON_ONCE(!rq->bio))
- return false;
- return true;
-}
-
-static void blk_rq_poll_completion(struct request *rq, struct completion *wait)
-{
- do {
- bio_poll(rq->bio, NULL, 0);
- cond_resched();
- } while (!completion_done(wait));
-}
-
-/**
- * blk_execute_rq - insert a request into queue for execution
- * @bd_disk: matching gendisk
- * @rq: request to insert
- * @at_head: insert request at head or tail of queue
- *
- * Description:
- * Insert a fully prepared request at the back of the I/O scheduler queue
- * for execution and wait for completion.
- * Return: The blk_status_t result provided to blk_mq_end_request().
- */
-blk_status_t blk_execute_rq(struct gendisk *bd_disk, struct request *rq, int at_head)
-{
- DECLARE_COMPLETION_ONSTACK(wait);
- unsigned long hang_check;
-
- rq->end_io_data = &wait;
- blk_execute_rq_nowait(bd_disk, rq, at_head, blk_end_sync_rq);
-
- /* Prevent hang_check timer from firing at us during very long I/O */
- hang_check = sysctl_hung_task_timeout_secs;
-
- if (blk_rq_is_poll(rq))
- blk_rq_poll_completion(rq, &wait);
- else if (hang_check)
- while (!wait_for_completion_io_timeout(&wait, hang_check * (HZ/2)));
- else
- wait_for_completion_io(&wait);
-
- return (blk_status_t)(uintptr_t)rq->end_io_data;
-}
-EXPORT_SYMBOL(blk_execute_rq);
diff --git a/block/blk-flush.c b/block/blk-flush.c
index 1fce6d16e6d3..e4df894189ce 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -69,6 +69,7 @@
#include <linux/blkdev.h>
#include <linux/gfp.h>
#include <linux/blk-mq.h>
+#include <linux/part_stat.h>
#include "blk.h"
#include "blk-mq.h"
@@ -95,6 +96,12 @@ enum {
static void blk_kick_flush(struct request_queue *q,
struct blk_flush_queue *fq, unsigned int flags);
+static inline struct blk_flush_queue *
+blk_get_flush_queue(struct request_queue *q, struct blk_mq_ctx *ctx)
+{
+ return blk_mq_map_queue(q, REQ_OP_FLUSH, ctx)->fq;
+}
+
static unsigned int blk_flush_policy(unsigned long fflags, struct request *rq)
{
unsigned int policy = 0;
@@ -138,7 +145,7 @@ static void blk_flush_queue_rq(struct request *rq, bool add_front)
static void blk_account_io_flush(struct request *rq)
{
- struct block_device *part = rq->rq_disk->part0;
+ struct block_device *part = rq->q->disk->part0;
part_stat_lock();
part_stat_inc(part, ios[STAT_FLUSH]);
@@ -222,7 +229,7 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error)
/* release the tag's ownership to the req cloned from */
spin_lock_irqsave(&fq->mq_flush_lock, flags);
- if (!refcount_dec_and_test(&flush_rq->ref)) {
+ if (!req_ref_put_and_test(flush_rq)) {
fq->rq_status = error;
spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
return;
@@ -235,8 +242,10 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error)
* avoiding use-after-free.
*/
WRITE_ONCE(flush_rq->state, MQ_RQ_IDLE);
- if (fq->rq_status != BLK_STS_OK)
+ if (fq->rq_status != BLK_STS_OK) {
error = fq->rq_status;
+ fq->rq_status = BLK_STS_OK;
+ }
if (!q->elevator) {
flush_rq->tag = BLK_MQ_NO_TAG;
@@ -332,7 +341,6 @@ static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
flush_rq->cmd_flags = REQ_OP_FLUSH | REQ_PREFLUSH;
flush_rq->cmd_flags |= (flags & REQ_DRV) | (flags & REQ_FAILFAST_MASK);
flush_rq->rq_flags |= RQF_FLUSH_SEQ;
- flush_rq->rq_disk = first_rq->rq_disk;
flush_rq->end_io = flush_end_io;
/*
* Order WRITE ->end_io and WRITE rq->ref, and its pair is the one
@@ -341,7 +349,7 @@ static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
* and READ flush_rq->end_io
*/
smp_wmb();
- refcount_set(&flush_rq->ref, 1);
+ req_ref_set(flush_rq, 1);
blk_flush_queue_rq(flush_rq, false);
}
diff --git a/block/blk-ia-ranges.c b/block/blk-ia-ranges.c
index b925f3db3ab7..18c68d8b9138 100644
--- a/block/blk-ia-ranges.c
+++ b/block/blk-ia-ranges.c
@@ -144,7 +144,7 @@ int disk_register_independent_access_ranges(struct gendisk *disk,
&q->kobj, "%s", "independent_access_ranges");
if (ret) {
q->ia_ranges = NULL;
- kfree(iars);
+ kobject_put(&iars->kobj);
return ret;
}
diff --git a/block/blk-integrity.c b/block/blk-integrity.c
index d670d54e5f7a..69eed260a823 100644
--- a/block/blk-integrity.c
+++ b/block/blk-integrity.c
@@ -411,7 +411,7 @@ void blk_integrity_register(struct gendisk *disk, struct blk_integrity *template
#ifdef CONFIG_BLK_INLINE_ENCRYPTION
if (disk->queue->crypto_profile) {
pr_warn("blk-integrity: Integrity and hardware inline encryption are not supported together. Disabling hardware inline encryption.\n");
- blk_crypto_unregister(disk->queue);
+ disk->queue->crypto_profile = NULL;
}
#endif
}
diff --git a/block/blk-ioc.c b/block/blk-ioc.c
index 57299f860d41..11f49f78db32 100644
--- a/block/blk-ioc.c
+++ b/block/blk-ioc.c
@@ -8,22 +8,25 @@
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/slab.h>
+#include <linux/security.h>
#include <linux/sched/task.h>
#include "blk.h"
+#include "blk-mq-sched.h"
/*
* For io context allocations
*/
static struct kmem_cache *iocontext_cachep;
+#ifdef CONFIG_BLK_ICQ
/**
* get_io_context - increment reference count to io_context
* @ioc: io_context to get
*
* Increment reference count to @ioc.
*/
-void get_io_context(struct io_context *ioc)
+static void get_io_context(struct io_context *ioc)
{
BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
atomic_long_inc(&ioc->refcount);
@@ -53,6 +56,16 @@ static void ioc_exit_icq(struct io_cq *icq)
icq->flags |= ICQ_EXITED;
}
+static void ioc_exit_icqs(struct io_context *ioc)
+{
+ struct io_cq *icq;
+
+ spin_lock_irq(&ioc->lock);
+ hlist_for_each_entry(icq, &ioc->icq_list, ioc_node)
+ ioc_exit_icq(icq);
+ spin_unlock_irq(&ioc->lock);
+}
+
/*
* Release an icq. Called with ioc locked for blk-mq, and with both ioc
* and queue locked for legacy.
@@ -132,68 +145,74 @@ static void ioc_release_fn(struct work_struct *work)
kmem_cache_free(iocontext_cachep, ioc);
}
-/**
- * put_io_context - put a reference of io_context
- * @ioc: io_context to put
- *
- * Decrement reference count of @ioc and release it if the count reaches
- * zero.
+/*
+ * Releasing icqs requires reverse order double locking and we may already be
+ * holding a queue_lock. Do it asynchronously from a workqueue.
*/
-void put_io_context(struct io_context *ioc)
+static bool ioc_delay_free(struct io_context *ioc)
{
unsigned long flags;
- bool free_ioc = false;
- if (ioc == NULL)
- return;
-
- BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
-
- /*
- * Releasing ioc requires reverse order double locking and we may
- * already be holding a queue_lock. Do it asynchronously from wq.
- */
- if (atomic_long_dec_and_test(&ioc->refcount)) {
- spin_lock_irqsave(&ioc->lock, flags);
- if (!hlist_empty(&ioc->icq_list))
- queue_work(system_power_efficient_wq,
- &ioc->release_work);
- else
- free_ioc = true;
+ spin_lock_irqsave(&ioc->lock, flags);
+ if (!hlist_empty(&ioc->icq_list)) {
+ queue_work(system_power_efficient_wq, &ioc->release_work);
spin_unlock_irqrestore(&ioc->lock, flags);
+ return true;
}
-
- if (free_ioc)
- kmem_cache_free(iocontext_cachep, ioc);
+ spin_unlock_irqrestore(&ioc->lock, flags);
+ return false;
}
/**
- * put_io_context_active - put active reference on ioc
- * @ioc: ioc of interest
+ * ioc_clear_queue - break any ioc association with the specified queue
+ * @q: request_queue being cleared
*
- * Undo get_io_context_active(). If active reference reaches zero after
- * put, @ioc can never issue further IOs and ioscheds are notified.
+ * Walk @q->icq_list and exit all io_cq's.
*/
-void put_io_context_active(struct io_context *ioc)
+void ioc_clear_queue(struct request_queue *q)
{
- struct io_cq *icq;
+ LIST_HEAD(icq_list);
- if (!atomic_dec_and_test(&ioc->active_ref)) {
- put_io_context(ioc);
- return;
- }
+ spin_lock_irq(&q->queue_lock);
+ list_splice_init(&q->icq_list, &icq_list);
+ spin_unlock_irq(&q->queue_lock);
- spin_lock_irq(&ioc->lock);
- hlist_for_each_entry(icq, &ioc->icq_list, ioc_node) {
- if (icq->flags & ICQ_EXITED)
- continue;
+ rcu_read_lock();
+ while (!list_empty(&icq_list)) {
+ struct io_cq *icq =
+ list_entry(icq_list.next, struct io_cq, q_node);
- ioc_exit_icq(icq);
+ spin_lock_irq(&icq->ioc->lock);
+ if (!(icq->flags & ICQ_DESTROYED))
+ ioc_destroy_icq(icq);
+ spin_unlock_irq(&icq->ioc->lock);
}
- spin_unlock_irq(&ioc->lock);
+ rcu_read_unlock();
+}
+#else /* CONFIG_BLK_ICQ */
+static inline void ioc_exit_icqs(struct io_context *ioc)
+{
+}
+static inline bool ioc_delay_free(struct io_context *ioc)
+{
+ return false;
+}
+#endif /* CONFIG_BLK_ICQ */
- put_io_context(ioc);
+/**
+ * put_io_context - put a reference of io_context
+ * @ioc: io_context to put
+ *
+ * Decrement reference count of @ioc and release it if the count reaches
+ * zero.
+ */
+void put_io_context(struct io_context *ioc)
+{
+ BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
+ if (atomic_long_dec_and_test(&ioc->refcount) && !ioc_delay_free(ioc))
+ kmem_cache_free(iocontext_cachep, ioc);
}
+EXPORT_SYMBOL_GPL(put_io_context);
/* Called by the exiting task */
void exit_io_context(struct task_struct *task)
@@ -205,132 +224,109 @@ void exit_io_context(struct task_struct *task)
task->io_context = NULL;
task_unlock(task);
- atomic_dec(&ioc->nr_tasks);
- put_io_context_active(ioc);
-}
-
-static void __ioc_clear_queue(struct list_head *icq_list)
-{
- unsigned long flags;
-
- rcu_read_lock();
- while (!list_empty(icq_list)) {
- struct io_cq *icq = list_entry(icq_list->next,
- struct io_cq, q_node);
- struct io_context *ioc = icq->ioc;
-
- spin_lock_irqsave(&ioc->lock, flags);
- if (icq->flags & ICQ_DESTROYED) {
- spin_unlock_irqrestore(&ioc->lock, flags);
- continue;
- }
- ioc_destroy_icq(icq);
- spin_unlock_irqrestore(&ioc->lock, flags);
+ if (atomic_dec_and_test(&ioc->active_ref)) {
+ ioc_exit_icqs(ioc);
+ put_io_context(ioc);
}
- rcu_read_unlock();
}
-/**
- * ioc_clear_queue - break any ioc association with the specified queue
- * @q: request_queue being cleared
- *
- * Walk @q->icq_list and exit all io_cq's.
- */
-void ioc_clear_queue(struct request_queue *q)
-{
- LIST_HEAD(icq_list);
-
- spin_lock_irq(&q->queue_lock);
- list_splice_init(&q->icq_list, &icq_list);
- spin_unlock_irq(&q->queue_lock);
-
- __ioc_clear_queue(&icq_list);
-}
-
-int create_task_io_context(struct task_struct *task, gfp_t gfp_flags, int node)
+static struct io_context *alloc_io_context(gfp_t gfp_flags, int node)
{
struct io_context *ioc;
- int ret;
ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO,
node);
if (unlikely(!ioc))
- return -ENOMEM;
+ return NULL;
- /* initialize */
atomic_long_set(&ioc->refcount, 1);
- atomic_set(&ioc->nr_tasks, 1);
atomic_set(&ioc->active_ref, 1);
+#ifdef CONFIG_BLK_ICQ
spin_lock_init(&ioc->lock);
INIT_RADIX_TREE(&ioc->icq_tree, GFP_ATOMIC);
INIT_HLIST_HEAD(&ioc->icq_list);
INIT_WORK(&ioc->release_work, ioc_release_fn);
+#endif
+ return ioc;
+}
- /*
- * Try to install. ioc shouldn't be installed if someone else
- * already did or @task, which isn't %current, is exiting. Note
- * that we need to allow ioc creation on exiting %current as exit
- * path may issue IOs from e.g. exit_files(). The exit path is
- * responsible for not issuing IO after exit_io_context().
- */
- task_lock(task);
- if (!task->io_context &&
- (task == current || !(task->flags & PF_EXITING)))
- task->io_context = ioc;
- else
- kmem_cache_free(iocontext_cachep, ioc);
+int set_task_ioprio(struct task_struct *task, int ioprio)
+{
+ int err;
+ const struct cred *cred = current_cred(), *tcred;
- ret = task->io_context ? 0 : -EBUSY;
+ rcu_read_lock();
+ tcred = __task_cred(task);
+ if (!uid_eq(tcred->uid, cred->euid) &&
+ !uid_eq(tcred->uid, cred->uid) && !capable(CAP_SYS_NICE)) {
+ rcu_read_unlock();
+ return -EPERM;
+ }
+ rcu_read_unlock();
- task_unlock(task);
+ err = security_task_setioprio(task, ioprio);
+ if (err)
+ return err;
- return ret;
-}
+ task_lock(task);
+ if (unlikely(!task->io_context)) {
+ struct io_context *ioc;
-/**
- * get_task_io_context - get io_context of a task
- * @task: task of interest
- * @gfp_flags: allocation flags, used if allocation is necessary
- * @node: allocation node, used if allocation is necessary
- *
- * Return io_context of @task. If it doesn't exist, it is created with
- * @gfp_flags and @node. The returned io_context has its reference count
- * incremented.
- *
- * This function always goes through task_lock() and it's better to use
- * %current->io_context + get_io_context() for %current.
- */
-struct io_context *get_task_io_context(struct task_struct *task,
- gfp_t gfp_flags, int node)
-{
- struct io_context *ioc;
+ task_unlock(task);
- might_sleep_if(gfpflags_allow_blocking(gfp_flags));
+ ioc = alloc_io_context(GFP_ATOMIC, NUMA_NO_NODE);
+ if (!ioc)
+ return -ENOMEM;
- do {
task_lock(task);
- ioc = task->io_context;
- if (likely(ioc)) {
- get_io_context(ioc);
- task_unlock(task);
- return ioc;
+ if (task->flags & PF_EXITING) {
+ err = -ESRCH;
+ kmem_cache_free(iocontext_cachep, ioc);
+ goto out;
}
- task_unlock(task);
- } while (!create_task_io_context(task, gfp_flags, node));
+ if (task->io_context)
+ kmem_cache_free(iocontext_cachep, ioc);
+ else
+ task->io_context = ioc;
+ }
+ task->io_context->ioprio = ioprio;
+out:
+ task_unlock(task);
+ return err;
+}
+EXPORT_SYMBOL_GPL(set_task_ioprio);
- return NULL;
+int __copy_io(unsigned long clone_flags, struct task_struct *tsk)
+{
+ struct io_context *ioc = current->io_context;
+
+ /*
+ * Share io context with parent, if CLONE_IO is set
+ */
+ if (clone_flags & CLONE_IO) {
+ atomic_inc(&ioc->active_ref);
+ tsk->io_context = ioc;
+ } else if (ioprio_valid(ioc->ioprio)) {
+ tsk->io_context = alloc_io_context(GFP_KERNEL, NUMA_NO_NODE);
+ if (!tsk->io_context)
+ return -ENOMEM;
+ tsk->io_context->ioprio = ioc->ioprio;
+ }
+
+ return 0;
}
+#ifdef CONFIG_BLK_ICQ
/**
* ioc_lookup_icq - lookup io_cq from ioc
- * @ioc: the associated io_context
* @q: the associated request_queue
*
* Look up io_cq associated with @ioc - @q pair from @ioc. Must be called
* with @q->queue_lock held.
*/
-struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q)
+struct io_cq *ioc_lookup_icq(struct request_queue *q)
{
+ struct io_context *ioc = current->io_context;
struct io_cq *icq;
lockdep_assert_held(&q->queue_lock);
@@ -359,9 +355,7 @@ EXPORT_SYMBOL(ioc_lookup_icq);
/**
* ioc_create_icq - create and link io_cq
- * @ioc: io_context of interest
* @q: request_queue of interest
- * @gfp_mask: allocation mask
*
* Make sure io_cq linking @ioc and @q exists. If icq doesn't exist, they
* will be created using @gfp_mask.
@@ -369,19 +363,19 @@ EXPORT_SYMBOL(ioc_lookup_icq);
* The caller is responsible for ensuring @ioc won't go away and @q is
* alive and will stay alive until this function returns.
*/
-struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
- gfp_t gfp_mask)
+static struct io_cq *ioc_create_icq(struct request_queue *q)
{
+ struct io_context *ioc = current->io_context;
struct elevator_type *et = q->elevator->type;
struct io_cq *icq;
/* allocate stuff */
- icq = kmem_cache_alloc_node(et->icq_cache, gfp_mask | __GFP_ZERO,
+ icq = kmem_cache_alloc_node(et->icq_cache, GFP_ATOMIC | __GFP_ZERO,
q->node);
if (!icq)
return NULL;
- if (radix_tree_maybe_preload(gfp_mask) < 0) {
+ if (radix_tree_maybe_preload(GFP_ATOMIC) < 0) {
kmem_cache_free(et->icq_cache, icq);
return NULL;
}
@@ -402,7 +396,7 @@ struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
et->ops.init_icq(icq);
} else {
kmem_cache_free(et->icq_cache, icq);
- icq = ioc_lookup_icq(ioc, q);
+ icq = ioc_lookup_icq(q);
if (!icq)
printk(KERN_ERR "cfq: icq link failed!\n");
}
@@ -413,6 +407,46 @@ struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
return icq;
}
+struct io_cq *ioc_find_get_icq(struct request_queue *q)
+{
+ struct io_context *ioc = current->io_context;
+ struct io_cq *icq = NULL;
+
+ if (unlikely(!ioc)) {
+ ioc = alloc_io_context(GFP_ATOMIC, q->node);
+ if (!ioc)
+ return NULL;
+
+ task_lock(current);
+ if (current->io_context) {
+ kmem_cache_free(iocontext_cachep, ioc);
+ ioc = current->io_context;
+ } else {
+ current->io_context = ioc;
+ }
+
+ get_io_context(ioc);
+ task_unlock(current);
+ } else {
+ get_io_context(ioc);
+
+ spin_lock_irq(&q->queue_lock);
+ icq = ioc_lookup_icq(q);
+ spin_unlock_irq(&q->queue_lock);
+ }
+
+ if (!icq) {
+ icq = ioc_create_icq(q);
+ if (!icq) {
+ put_io_context(ioc);
+ return NULL;
+ }
+ }
+ return icq;
+}
+EXPORT_SYMBOL_GPL(ioc_find_get_icq);
+#endif /* CONFIG_BLK_ICQ */
+
static int __init blk_ioc_init(void)
{
iocontext_cachep = kmem_cache_create("blkdev_ioc",
diff --git a/block/blk-ioprio.c b/block/blk-ioprio.c
index 332a07761bf8..2e7f10e1c03f 100644
--- a/block/blk-ioprio.c
+++ b/block/blk-ioprio.c
@@ -62,6 +62,7 @@ struct ioprio_blkg {
struct ioprio_blkcg {
struct blkcg_policy_data cpd;
enum prio_policy prio_policy;
+ bool prio_set;
};
static inline struct ioprio_blkg *pd_to_ioprio(struct blkg_policy_data *pd)
@@ -112,7 +113,7 @@ static ssize_t ioprio_set_prio_policy(struct kernfs_open_file *of, char *buf,
if (ret < 0)
return ret;
blkcg->prio_policy = ret;
-
+ blkcg->prio_set = true;
return nbytes;
}
@@ -190,6 +191,10 @@ static void blkcg_ioprio_track(struct rq_qos *rqos, struct request *rq,
struct bio *bio)
{
struct ioprio_blkcg *blkcg = ioprio_blkcg_from_bio(bio);
+ u16 prio;
+
+ if (!blkcg->prio_set)
+ return;
/*
* Except for IOPRIO_CLASS_NONE, higher I/O priority numbers
@@ -199,8 +204,10 @@ static void blkcg_ioprio_track(struct rq_qos *rqos, struct request *rq,
* bio I/O priority is not modified. If the bio I/O priority equals
* IOPRIO_CLASS_NONE, the cgroup I/O priority is assigned to the bio.
*/
- bio->bi_ioprio = max_t(u16, bio->bi_ioprio,
- IOPRIO_PRIO_VALUE(blkcg->prio_policy, 0));
+ prio = max_t(u16, bio->bi_ioprio,
+ IOPRIO_PRIO_VALUE(blkcg->prio_policy, 0));
+ if (prio > bio->bi_ioprio)
+ bio->bi_ioprio = prio;
}
static void blkcg_ioprio_exit(struct rq_qos *rqos)
diff --git a/block/blk-map.c b/block/blk-map.c
index 4526adde0156..c7f71d83eff1 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -446,7 +446,7 @@ static struct bio *bio_copy_kern(struct request_queue *q, void *data,
if (bytes > len)
bytes = len;
- page = alloc_page(GFP_NOIO | gfp_mask);
+ page = alloc_page(GFP_NOIO | __GFP_ZERO | gfp_mask);
if (!page)
goto cleanup;
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 893c1a60b701..4de34a332c9f 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -8,10 +8,12 @@
#include <linux/blkdev.h>
#include <linux/blk-integrity.h>
#include <linux/scatterlist.h>
+#include <linux/part_stat.h>
#include <trace/events/block.h>
#include "blk.h"
+#include "blk-mq-sched.h"
#include "blk-rq-qos.h"
#include "blk-throttle.h"
@@ -775,8 +777,7 @@ static struct request *attempt_merge(struct request_queue *q,
if (req_op(req) != req_op(next))
return NULL;
- if (rq_data_dir(req) != rq_data_dir(next)
- || req->rq_disk != next->rq_disk)
+ if (rq_data_dir(req) != rq_data_dir(next))
return NULL;
if (req_op(req) == REQ_OP_WRITE_SAME &&
@@ -903,10 +904,6 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
if (bio_data_dir(bio) != rq_data_dir(rq))
return false;
- /* must be same device */
- if (rq->rq_disk != bio->bi_bdev->bd_disk)
- return false;
-
/* only merge integrity protected bio into ditto rq */
if (blk_integrity_merge_bio(rq->q, rq, bio) == false)
return false;
@@ -1067,7 +1064,6 @@ static enum bio_merge_status blk_attempt_bio_merge(struct request_queue *q,
* @q: request_queue new bio is being queued at
* @bio: new bio being queued
* @nr_segs: number of segments in @bio
- * @same_queue_rq: output value, will be true if there's an existing request
* from the passed in @q already in the plug list
*
* Determine whether @bio being queued on @q can be merged with the previous
@@ -1084,7 +1080,7 @@ static enum bio_merge_status blk_attempt_bio_merge(struct request_queue *q,
* Caller must ensure !blk_queue_nomerges(q) beforehand.
*/
bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
- unsigned int nr_segs, bool *same_queue_rq)
+ unsigned int nr_segs)
{
struct blk_plug *plug;
struct request *rq;
@@ -1096,12 +1092,6 @@ bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
/* check the previously added entry for a quick merge attempt */
rq = rq_list_peek(&plug->mq_list);
if (rq->q == q) {
- /*
- * Only blk-mq multiple hardware queues case checks the rq in
- * the same queue, there should be only one such rq in a queue
- */
- *same_queue_rq = true;
-
if (blk_attempt_bio_merge(q, rq, bio, nr_segs, false) ==
BIO_MERGE_OK)
return true;
diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c
index 4f2cf8399f3d..3a790eb4995c 100644
--- a/block/blk-mq-debugfs.c
+++ b/block/blk-mq-debugfs.c
@@ -11,6 +11,7 @@
#include "blk.h"
#include "blk-mq.h"
#include "blk-mq-debugfs.h"
+#include "blk-mq-sched.h"
#include "blk-mq-tag.h"
#include "blk-rq-qos.h"
@@ -29,6 +30,9 @@ static int queue_poll_stat_show(void *data, struct seq_file *m)
struct request_queue *q = data;
int bucket;
+ if (!q->poll_stat)
+ return 0;
+
for (bucket = 0; bucket < (BLK_MQ_POLL_STATS_BKTS / 2); bucket++) {
seq_printf(m, "read (%d Bytes): ", 1 << (9 + bucket));
print_stat(m, &q->poll_stat[2 * bucket]);
@@ -122,7 +126,6 @@ static const char *const blk_queue_flag_name[] = {
QUEUE_FLAG_NAME(FUA),
QUEUE_FLAG_NAME(DAX),
QUEUE_FLAG_NAME(STATS),
- QUEUE_FLAG_NAME(POLL_STATS),
QUEUE_FLAG_NAME(REGISTERED),
QUEUE_FLAG_NAME(QUIESCED),
QUEUE_FLAG_NAME(PCI_P2PDMA),
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index ba21449439cc..55488ba97823 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -18,32 +18,6 @@
#include "blk-mq-tag.h"
#include "blk-wbt.h"
-void blk_mq_sched_assign_ioc(struct request *rq)
-{
- struct request_queue *q = rq->q;
- struct io_context *ioc;
- struct io_cq *icq;
-
- /*
- * May not have an IO context if it's a passthrough request
- */
- ioc = current->io_context;
- if (!ioc)
- return;
-
- spin_lock_irq(&q->queue_lock);
- icq = ioc_lookup_icq(ioc, q);
- spin_unlock_irq(&q->queue_lock);
-
- if (!icq) {
- icq = ioc_create_icq(ioc, q, GFP_ATOMIC);
- if (!icq)
- return;
- }
- get_io_context(icq->ioc);
- rq->elv.icq = icq;
-}
-
/*
* Mark a hardware queue as needing a restart. For shared queues, maintain
* a count of how many hardware queues are marked for restart.
@@ -501,7 +475,8 @@ void blk_mq_sched_insert_requests(struct blk_mq_hw_ctx *hctx,
* us one extra enqueue & dequeue to sw queue.
*/
if (!hctx->dispatch_busy && !run_queue_async) {
- blk_mq_try_issue_list_directly(hctx, list);
+ blk_mq_run_dispatch_ops(hctx->queue,
+ blk_mq_try_issue_list_directly(hctx, list));
if (list_empty(list))
goto out;
}
diff --git a/block/blk-mq-sched.h b/block/blk-mq-sched.h
index 25d1034952b6..025013972453 100644
--- a/block/blk-mq-sched.h
+++ b/block/blk-mq-sched.h
@@ -8,8 +8,6 @@
#define MAX_SCHED_RQ (16 * BLKDEV_DEFAULT_RQ)
-void blk_mq_sched_assign_ioc(struct request *rq);
-
bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
unsigned int nr_segs, struct request **merged_request);
bool blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c
index 253c857cba47..674786574075 100644
--- a/block/blk-mq-sysfs.c
+++ b/block/blk-mq-sysfs.c
@@ -36,8 +36,6 @@ static void blk_mq_hw_sysfs_release(struct kobject *kobj)
struct blk_mq_hw_ctx *hctx = container_of(kobj, struct blk_mq_hw_ctx,
kobj);
- if (hctx->flags & BLK_MQ_F_BLOCKING)
- cleanup_srcu_struct(hctx->srcu);
blk_free_flush_queue(hctx->fq);
sbitmap_free(&hctx->ctx_map);
free_cpumask_var(hctx->cpumask);
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index 995336abee33..845f74e8dd7b 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -17,6 +17,21 @@
#include "blk-mq-tag.h"
/*
+ * Recalculate wakeup batch when tag is shared by hctx.
+ */
+static void blk_mq_update_wake_batch(struct blk_mq_tags *tags,
+ unsigned int users)
+{
+ if (!users)
+ return;
+
+ sbitmap_queue_recalculate_wake_batch(&tags->bitmap_tags,
+ users);
+ sbitmap_queue_recalculate_wake_batch(&tags->breserved_tags,
+ users);
+}
+
+/*
* If a previously inactive queue goes active, bump the active user count.
* We need to do this before try to allocate driver tag, then even if fail
* to get tag when first time, the other shared-tag users could reserve
@@ -24,18 +39,26 @@
*/
bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
{
+ unsigned int users;
+
if (blk_mq_is_shared_tags(hctx->flags)) {
struct request_queue *q = hctx->queue;
- if (!test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags) &&
- !test_and_set_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags))
- atomic_inc(&hctx->tags->active_queues);
+ if (test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags) ||
+ test_and_set_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags)) {
+ return true;
+ }
} else {
- if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) &&
- !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
- atomic_inc(&hctx->tags->active_queues);
+ if (test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) ||
+ test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) {
+ return true;
+ }
}
+ users = atomic_inc_return(&hctx->tags->active_queues);
+
+ blk_mq_update_wake_batch(hctx->tags, users);
+
return true;
}
@@ -56,6 +79,7 @@ void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve)
void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
{
struct blk_mq_tags *tags = hctx->tags;
+ unsigned int users;
if (blk_mq_is_shared_tags(hctx->flags)) {
struct request_queue *q = hctx->queue;
@@ -68,7 +92,9 @@ void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
return;
}
- atomic_dec(&tags->active_queues);
+ users = atomic_dec_return(&tags->active_queues);
+
+ blk_mq_update_wake_batch(tags, users);
blk_mq_tag_wakeup_all(tags, false);
}
@@ -215,7 +241,8 @@ void blk_mq_put_tags(struct blk_mq_tags *tags, int *tag_array, int nr_tags)
struct bt_iter_data {
struct blk_mq_hw_ctx *hctx;
- busy_iter_fn *fn;
+ struct request_queue *q;
+ busy_tag_iter_fn *fn;
void *data;
bool reserved;
};
@@ -228,7 +255,7 @@ static struct request *blk_mq_find_and_get_req(struct blk_mq_tags *tags,
spin_lock_irqsave(&tags->lock, flags);
rq = tags->rqs[bitnr];
- if (!rq || rq->tag != bitnr || !refcount_inc_not_zero(&rq->ref))
+ if (!rq || rq->tag != bitnr || !req_ref_inc_not_zero(rq))
rq = NULL;
spin_unlock_irqrestore(&tags->lock, flags);
return rq;
@@ -238,11 +265,18 @@ static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
{
struct bt_iter_data *iter_data = data;
struct blk_mq_hw_ctx *hctx = iter_data->hctx;
- struct blk_mq_tags *tags = hctx->tags;
+ struct request_queue *q = iter_data->q;
+ struct blk_mq_tag_set *set = q->tag_set;
bool reserved = iter_data->reserved;
+ struct blk_mq_tags *tags;
struct request *rq;
bool ret = true;
+ if (blk_mq_is_shared_tags(set->flags))
+ tags = set->shared_tags;
+ else
+ tags = hctx->tags;
+
if (!reserved)
bitnr += tags->nr_reserved_tags;
/*
@@ -253,8 +287,8 @@ static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
if (!rq)
return true;
- if (rq->q == hctx->queue && rq->mq_hctx == hctx)
- ret = iter_data->fn(hctx, rq, iter_data->data, reserved);
+ if (rq->q == q && (!hctx || rq->mq_hctx == hctx))
+ ret = iter_data->fn(rq, iter_data->data, reserved);
blk_mq_put_rq_ref(rq);
return ret;
}
@@ -262,6 +296,7 @@ static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
/**
* bt_for_each - iterate over the requests associated with a hardware queue
* @hctx: Hardware queue to examine.
+ * @q: Request queue to examine.
* @bt: sbitmap to examine. This is either the breserved_tags member
* or the bitmap_tags member of struct blk_mq_tags.
* @fn: Pointer to the function that will be called for each request
@@ -273,14 +308,16 @@ static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
* @reserved: Indicates whether @bt is the breserved_tags member or the
* bitmap_tags member of struct blk_mq_tags.
*/
-static void bt_for_each(struct blk_mq_hw_ctx *hctx, struct sbitmap_queue *bt,
- busy_iter_fn *fn, void *data, bool reserved)
+static void bt_for_each(struct blk_mq_hw_ctx *hctx, struct request_queue *q,
+ struct sbitmap_queue *bt, busy_tag_iter_fn *fn,
+ void *data, bool reserved)
{
struct bt_iter_data iter_data = {
.hctx = hctx,
.fn = fn,
.data = data,
.reserved = reserved,
+ .q = q,
};
sbitmap_for_each_set(&bt->sb, bt_iter, &iter_data);
@@ -457,12 +494,9 @@ EXPORT_SYMBOL(blk_mq_tagset_wait_completed_request);
* called for all requests on all queues that share that tag set and not only
* for requests associated with @q.
*/
-void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
+void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_tag_iter_fn *fn,
void *priv)
{
- struct blk_mq_hw_ctx *hctx;
- int i;
-
/*
* __blk_mq_update_nr_hw_queues() updates nr_hw_queues and queue_hw_ctx
* while the queue is frozen. So we can use q_usage_counter to avoid
@@ -471,19 +505,34 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
if (!percpu_ref_tryget(&q->q_usage_counter))
return;
- queue_for_each_hw_ctx(q, hctx, i) {
- struct blk_mq_tags *tags = hctx->tags;
-
- /*
- * If no software queues are currently mapped to this
- * hardware queue, there's nothing to check
- */
- if (!blk_mq_hw_queue_mapped(hctx))
- continue;
+ if (blk_mq_is_shared_tags(q->tag_set->flags)) {
+ struct blk_mq_tags *tags = q->tag_set->shared_tags;
+ struct sbitmap_queue *bresv = &tags->breserved_tags;
+ struct sbitmap_queue *btags = &tags->bitmap_tags;
if (tags->nr_reserved_tags)
- bt_for_each(hctx, &tags->breserved_tags, fn, priv, true);
- bt_for_each(hctx, &tags->bitmap_tags, fn, priv, false);
+ bt_for_each(NULL, q, bresv, fn, priv, true);
+ bt_for_each(NULL, q, btags, fn, priv, false);
+ } else {
+ struct blk_mq_hw_ctx *hctx;
+ int i;
+
+ queue_for_each_hw_ctx(q, hctx, i) {
+ struct blk_mq_tags *tags = hctx->tags;
+ struct sbitmap_queue *bresv = &tags->breserved_tags;
+ struct sbitmap_queue *btags = &tags->bitmap_tags;
+
+ /*
+ * If no software queues are currently mapped to this
+ * hardware queue, there's nothing to check
+ */
+ if (!blk_mq_hw_queue_mapped(hctx))
+ continue;
+
+ if (tags->nr_reserved_tags)
+ bt_for_each(hctx, q, bresv, fn, priv, true);
+ bt_for_each(hctx, q, btags, fn, priv, false);
+ }
}
blk_queue_exit(q);
}
diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h
index df787b5a23bd..5668e28be0b7 100644
--- a/block/blk-mq-tag.h
+++ b/block/blk-mq-tag.h
@@ -28,7 +28,7 @@ extern void blk_mq_tag_resize_shared_tags(struct blk_mq_tag_set *set,
extern void blk_mq_tag_update_sched_shared_tags(struct request_queue *q);
extern void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool);
-void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
+void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_tag_iter_fn *fn,
void *priv);
void blk_mq_all_tag_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn,
void *priv);
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 8874a63ae952..d69ca91fbc8b 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -28,6 +28,7 @@
#include <linux/crash_dump.h>
#include <linux/prefetch.h>
#include <linux/blk-crypto.h>
+#include <linux/part_stat.h>
#include <trace/events/block.h>
@@ -126,8 +127,7 @@ struct mq_inflight {
unsigned int inflight[2];
};
-static bool blk_mq_check_inflight(struct blk_mq_hw_ctx *hctx,
- struct request *rq, void *priv,
+static bool blk_mq_check_inflight(struct request *rq, void *priv,
bool reserved)
{
struct mq_inflight *mi = priv;
@@ -259,17 +259,9 @@ EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue_nowait);
*/
void blk_mq_wait_quiesce_done(struct request_queue *q)
{
- struct blk_mq_hw_ctx *hctx;
- unsigned int i;
- bool rcu = false;
-
- queue_for_each_hw_ctx(q, hctx, i) {
- if (hctx->flags & BLK_MQ_F_BLOCKING)
- synchronize_srcu(hctx->srcu);
- else
- rcu = true;
- }
- if (rcu)
+ if (blk_queue_has_srcu(q))
+ synchronize_srcu(q->srcu);
+ else
synchronize_rcu();
}
EXPORT_SYMBOL_GPL(blk_mq_wait_quiesce_done);
@@ -327,6 +319,23 @@ void blk_mq_wake_waiters(struct request_queue *q)
blk_mq_tag_wakeup_all(hctx->tags, true);
}
+void blk_rq_init(struct request_queue *q, struct request *rq)
+{
+ memset(rq, 0, sizeof(*rq));
+
+ INIT_LIST_HEAD(&rq->queuelist);
+ rq->q = q;
+ rq->__sector = (sector_t) -1;
+ INIT_HLIST_NODE(&rq->hash);
+ RB_CLEAR_NODE(&rq->rb_node);
+ rq->tag = BLK_MQ_NO_TAG;
+ rq->internal_tag = BLK_MQ_NO_TAG;
+ rq->start_time_ns = ktime_get_ns();
+ rq->part = NULL;
+ blk_crypto_rq_set_defaults(rq);
+}
+EXPORT_SYMBOL(blk_rq_init);
+
static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
struct blk_mq_tags *tags, unsigned int tag, u64 alloc_time_ns)
{
@@ -359,7 +368,6 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
rq->start_time_ns = ktime_get_ns();
else
rq->start_time_ns = 0;
- rq->rq_disk = NULL;
rq->part = NULL;
#ifdef CONFIG_BLK_RQ_ALLOC_TIME
rq->alloc_time_ns = alloc_time_ns;
@@ -377,20 +385,16 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
INIT_LIST_HEAD(&rq->queuelist);
/* tag was already set */
WRITE_ONCE(rq->deadline, 0);
- refcount_set(&rq->ref, 1);
+ req_ref_set(rq, 1);
if (rq->rq_flags & RQF_ELV) {
struct elevator_queue *e = data->q->elevator;
- rq->elv.icq = NULL;
INIT_HLIST_NODE(&rq->hash);
RB_CLEAR_NODE(&rq->rb_node);
if (!op_is_flush(data->cmd_flags) &&
e->type->ops.prepare_request) {
- if (e->type->icq_cache)
- blk_mq_sched_assign_ioc(rq);
-
e->type->ops.prepare_request(rq);
rq->rq_flags |= RQF_ELVPRIV;
}
@@ -616,16 +620,9 @@ void blk_mq_free_request(struct request *rq)
struct request_queue *q = rq->q;
struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
- if (rq->rq_flags & RQF_ELVPRIV) {
- struct elevator_queue *e = q->elevator;
-
- if (e->type->ops.finish_request)
- e->type->ops.finish_request(rq);
- if (rq->elv.icq) {
- put_io_context(rq->elv.icq->ioc);
- rq->elv.icq = NULL;
- }
- }
+ if ((rq->rq_flags & RQF_ELVPRIV) &&
+ q->elevator->type->ops.finish_request)
+ q->elevator->type->ops.finish_request(rq);
if (rq->rq_flags & RQF_MQ_INFLIGHT)
__blk_mq_dec_active_requests(hctx);
@@ -636,7 +633,7 @@ void blk_mq_free_request(struct request *rq)
rq_qos_done(q, rq);
WRITE_ONCE(rq->state, MQ_RQ_IDLE);
- if (refcount_dec_and_test(&rq->ref))
+ if (req_ref_put_and_test(rq))
__blk_mq_free_request(rq);
}
EXPORT_SYMBOL_GPL(blk_mq_free_request);
@@ -649,6 +646,20 @@ void blk_mq_free_plug_rqs(struct blk_plug *plug)
blk_mq_free_request(rq);
}
+void blk_dump_rq_flags(struct request *rq, char *msg)
+{
+ printk(KERN_INFO "%s: dev %s: flags=%llx\n", msg,
+ rq->q->disk ? rq->q->disk->disk_name : "?",
+ (unsigned long long) rq->cmd_flags);
+
+ printk(KERN_INFO " sector %llu, nr/cnr %u/%u\n",
+ (unsigned long long)blk_rq_pos(rq),
+ blk_rq_sectors(rq), blk_rq_cur_sectors(rq));
+ printk(KERN_INFO " bio %p, biotail %p, len %u\n",
+ rq->bio, rq->biotail, blk_rq_bytes(rq));
+}
+EXPORT_SYMBOL(blk_dump_rq_flags);
+
static void req_bio_endio(struct request *rq, struct bio *bio,
unsigned int nbytes, blk_status_t error)
{
@@ -685,6 +696,64 @@ static void blk_account_io_completion(struct request *req, unsigned int bytes)
}
}
+static void blk_print_req_error(struct request *req, blk_status_t status)
+{
+ printk_ratelimited(KERN_ERR
+ "%s error, dev %s, sector %llu op 0x%x:(%s) flags 0x%x "
+ "phys_seg %u prio class %u\n",
+ blk_status_to_str(status),
+ req->q->disk ? req->q->disk->disk_name : "?",
+ blk_rq_pos(req), req_op(req), blk_op_str(req_op(req)),
+ req->cmd_flags & ~REQ_OP_MASK,
+ req->nr_phys_segments,
+ IOPRIO_PRIO_CLASS(req->ioprio));
+}
+
+/*
+ * Fully end IO on a request. Does not support partial completions, or
+ * errors.
+ */
+static void blk_complete_request(struct request *req)
+{
+ const bool is_flush = (req->rq_flags & RQF_FLUSH_SEQ) != 0;
+ int total_bytes = blk_rq_bytes(req);
+ struct bio *bio = req->bio;
+
+ trace_block_rq_complete(req, BLK_STS_OK, total_bytes);
+
+ if (!bio)
+ return;
+
+#ifdef CONFIG_BLK_DEV_INTEGRITY
+ if (blk_integrity_rq(req) && req_op(req) == REQ_OP_READ)
+ req->q->integrity.profile->complete_fn(req, total_bytes);
+#endif
+
+ blk_account_io_completion(req, total_bytes);
+
+ do {
+ struct bio *next = bio->bi_next;
+
+ /* Completion has already been traced */
+ bio_clear_flag(bio, BIO_TRACE_COMPLETION);
+
+ if (req_op(req) == REQ_OP_ZONE_APPEND)
+ bio->bi_iter.bi_sector = req->__sector;
+
+ if (!is_flush)
+ bio_endio(bio);
+ bio = next;
+ } while (bio);
+
+ /*
+ * Reset counters so that the request stacking driver
+ * can find how many bytes remain in the request
+ * later.
+ */
+ req->bio = NULL;
+ req->__data_len = 0;
+}
+
/**
* blk_update_request - Complete multiple bytes without completing the request
* @req: the request being processed
@@ -791,6 +860,48 @@ bool blk_update_request(struct request *req, blk_status_t error,
}
EXPORT_SYMBOL_GPL(blk_update_request);
+static void __blk_account_io_done(struct request *req, u64 now)
+{
+ const int sgrp = op_stat_group(req_op(req));
+
+ part_stat_lock();
+ update_io_ticks(req->part, jiffies, true);
+ part_stat_inc(req->part, ios[sgrp]);
+ part_stat_add(req->part, nsecs[sgrp], now - req->start_time_ns);
+ part_stat_unlock();
+}
+
+static inline void blk_account_io_done(struct request *req, u64 now)
+{
+ /*
+ * Account IO completion. flush_rq isn't accounted as a
+ * normal IO on queueing nor completion. Accounting the
+ * containing request is enough.
+ */
+ if (blk_do_io_stat(req) && req->part &&
+ !(req->rq_flags & RQF_FLUSH_SEQ))
+ __blk_account_io_done(req, now);
+}
+
+static void __blk_account_io_start(struct request *rq)
+{
+ /* passthrough requests can hold bios that do not have ->bi_bdev set */
+ if (rq->bio && rq->bio->bi_bdev)
+ rq->part = rq->bio->bi_bdev;
+ else if (rq->q->disk)
+ rq->part = rq->q->disk->part0;
+
+ part_stat_lock();
+ update_io_ticks(rq->part, jiffies, false);
+ part_stat_unlock();
+}
+
+static inline void blk_account_io_start(struct request *req)
+{
+ if (blk_do_io_stat(req))
+ __blk_account_io_start(req);
+}
+
static inline void __blk_mq_end_request_acct(struct request *rq, u64 now)
{
if (rq->rq_flags & RQF_STATS) {
@@ -856,14 +967,14 @@ void blk_mq_end_request_batch(struct io_comp_batch *iob)
prefetch(rq->bio);
prefetch(rq->rq_next);
- blk_update_request(rq, BLK_STS_OK, blk_rq_bytes(rq));
+ blk_complete_request(rq);
if (iob->need_ts)
__blk_mq_end_request_acct(rq, now);
rq_qos_done(rq->q, rq);
WRITE_ONCE(rq->state, MQ_RQ_IDLE);
- if (!refcount_dec_and_test(&rq->ref))
+ if (!req_ref_put_and_test(rq))
continue;
blk_crypto_free_request(rq);
@@ -996,26 +1107,6 @@ void blk_mq_complete_request(struct request *rq)
}
EXPORT_SYMBOL(blk_mq_complete_request);
-static void hctx_unlock(struct blk_mq_hw_ctx *hctx, int srcu_idx)
- __releases(hctx->srcu)
-{
- if (!(hctx->flags & BLK_MQ_F_BLOCKING))
- rcu_read_unlock();
- else
- srcu_read_unlock(hctx->srcu, srcu_idx);
-}
-
-static void hctx_lock(struct blk_mq_hw_ctx *hctx, int *srcu_idx)
- __acquires(hctx->srcu)
-{
- if (!(hctx->flags & BLK_MQ_F_BLOCKING)) {
- /* shut up gcc false positive */
- *srcu_idx = 0;
- rcu_read_lock();
- } else
- *srcu_idx = srcu_read_lock(hctx->srcu);
-}
-
/**
* blk_mq_start_request - Start processing a request
* @rq: Pointer to request to be started
@@ -1058,6 +1149,107 @@ void blk_mq_start_request(struct request *rq)
}
EXPORT_SYMBOL(blk_mq_start_request);
+/**
+ * blk_end_sync_rq - executes a completion event on a request
+ * @rq: request to complete
+ * @error: end I/O status of the request
+ */
+static void blk_end_sync_rq(struct request *rq, blk_status_t error)
+{
+ struct completion *waiting = rq->end_io_data;
+
+ rq->end_io_data = (void *)(uintptr_t)error;
+
+ /*
+ * complete last, if this is a stack request the process (and thus
+ * the rq pointer) could be invalid right after this complete()
+ */
+ complete(waiting);
+}
+
+/**
+ * blk_execute_rq_nowait - insert a request to I/O scheduler for execution
+ * @rq: request to insert
+ * @at_head: insert request at head or tail of queue
+ * @done: I/O completion handler
+ *
+ * Description:
+ * Insert a fully prepared request at the back of the I/O scheduler queue
+ * for execution. Don't wait for completion.
+ *
+ * Note:
+ * This function will invoke @done directly if the queue is dead.
+ */
+void blk_execute_rq_nowait(struct request *rq, bool at_head, rq_end_io_fn *done)
+{
+ WARN_ON(irqs_disabled());
+ WARN_ON(!blk_rq_is_passthrough(rq));
+
+ rq->end_io = done;
+
+ blk_account_io_start(rq);
+
+ /*
+ * don't check dying flag for MQ because the request won't
+ * be reused after dying flag is set
+ */
+ blk_mq_sched_insert_request(rq, at_head, true, false);
+}
+EXPORT_SYMBOL_GPL(blk_execute_rq_nowait);
+
+static bool blk_rq_is_poll(struct request *rq)
+{
+ if (!rq->mq_hctx)
+ return false;
+ if (rq->mq_hctx->type != HCTX_TYPE_POLL)
+ return false;
+ if (WARN_ON_ONCE(!rq->bio))
+ return false;
+ return true;
+}
+
+static void blk_rq_poll_completion(struct request *rq, struct completion *wait)
+{
+ do {
+ bio_poll(rq->bio, NULL, 0);
+ cond_resched();
+ } while (!completion_done(wait));
+}
+
+/**
+ * blk_execute_rq - insert a request into queue for execution
+ * @rq: request to insert
+ * @at_head: insert request at head or tail of queue
+ *
+ * Description:
+ * Insert a fully prepared request at the back of the I/O scheduler queue
+ * for execution and wait for completion.
+ * Return: The blk_status_t result provided to blk_mq_end_request().
+ */
+blk_status_t blk_execute_rq(struct request *rq, bool at_head)
+{
+ DECLARE_COMPLETION_ONSTACK(wait);
+ unsigned long hang_check;
+
+ rq->end_io_data = &wait;
+ blk_execute_rq_nowait(rq, at_head, blk_end_sync_rq);
+
+ /* Prevent hang_check timer from firing at us during very long I/O */
+ hang_check = sysctl_hung_task_timeout_secs;
+
+ if (blk_rq_is_poll(rq))
+ blk_rq_poll_completion(rq, &wait);
+ else if (hang_check)
+ while (!wait_for_completion_io_timeout(&wait,
+ hang_check * (HZ/2)))
+ ;
+ else
+ wait_for_completion_io(&wait);
+
+ return (blk_status_t)(uintptr_t)rq->end_io_data;
+}
+EXPORT_SYMBOL(blk_execute_rq);
+
static void __blk_mq_requeue_request(struct request *rq)
{
struct request_queue *q = rq->q;
@@ -1160,14 +1352,15 @@ void blk_mq_delay_kick_requeue_list(struct request_queue *q,
}
EXPORT_SYMBOL(blk_mq_delay_kick_requeue_list);
-static bool blk_mq_rq_inflight(struct blk_mq_hw_ctx *hctx, struct request *rq,
- void *priv, bool reserved)
+static bool blk_mq_rq_inflight(struct request *rq, void *priv,
+ bool reserved)
{
/*
- * If we find a request that isn't idle and the queue matches,
- * we know the queue is busy. Return false to stop the iteration.
+ * If we find a request that isn't idle we know the queue is busy
+ * as it's checked in the iter.
+ * Return false to stop the iteration.
*/
- if (blk_mq_request_started(rq) && rq->q == hctx->queue) {
+ if (blk_mq_request_started(rq)) {
bool *busy = priv;
*busy = true;
@@ -1225,12 +1418,11 @@ void blk_mq_put_rq_ref(struct request *rq)
{
if (is_flush_rq(rq))
rq->end_io(rq, 0);
- else if (refcount_dec_and_test(&rq->ref))
+ else if (req_ref_put_and_test(rq))
__blk_mq_free_request(rq);
}
-static bool blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
- struct request *rq, void *priv, bool reserved)
+static bool blk_mq_check_expired(struct request *rq, void *priv, bool reserved)
{
unsigned long *next = priv;
@@ -1771,19 +1963,14 @@ out:
*/
static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
{
- int srcu_idx;
-
/*
* We can't run the queue inline with ints disabled. Ensure that
* we catch bad users of this early.
*/
WARN_ON_ONCE(in_interrupt());
- might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
-
- hctx_lock(hctx, &srcu_idx);
- blk_mq_sched_dispatch_requests(hctx);
- hctx_unlock(hctx, srcu_idx);
+ blk_mq_run_dispatch_ops(hctx->queue,
+ blk_mq_sched_dispatch_requests(hctx));
}
static inline int blk_mq_first_mapped_cpu(struct blk_mq_hw_ctx *hctx)
@@ -1895,7 +2082,6 @@ EXPORT_SYMBOL(blk_mq_delay_run_hw_queue);
*/
void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
{
- int srcu_idx;
bool need_run;
/*
@@ -1906,10 +2092,9 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
* And queue will be rerun in blk_mq_unquiesce_queue() if it is
* quiesced.
*/
- hctx_lock(hctx, &srcu_idx);
- need_run = !blk_queue_quiesced(hctx->queue) &&
- blk_mq_hctx_has_pending(hctx);
- hctx_unlock(hctx, srcu_idx);
+ __blk_mq_run_dispatch_ops(hctx->queue, false,
+ need_run = !blk_queue_quiesced(hctx->queue) &&
+ blk_mq_hctx_has_pending(hctx));
if (need_run)
__blk_mq_delay_run_hw_queue(hctx, async, 0);
@@ -2202,98 +2387,6 @@ static void blk_mq_commit_rqs(struct blk_mq_hw_ctx *hctx, int *queued,
*queued = 0;
}
-static void blk_mq_plug_issue_direct(struct blk_plug *plug, bool from_schedule)
-{
- struct blk_mq_hw_ctx *hctx = NULL;
- struct request *rq;
- int queued = 0;
- int errors = 0;
-
- while ((rq = rq_list_pop(&plug->mq_list))) {
- bool last = rq_list_empty(plug->mq_list);
- blk_status_t ret;
-
- if (hctx != rq->mq_hctx) {
- if (hctx)
- blk_mq_commit_rqs(hctx, &queued, from_schedule);
- hctx = rq->mq_hctx;
- }
-
- ret = blk_mq_request_issue_directly(rq, last);
- switch (ret) {
- case BLK_STS_OK:
- queued++;
- break;
- case BLK_STS_RESOURCE:
- case BLK_STS_DEV_RESOURCE:
- blk_mq_request_bypass_insert(rq, false, last);
- blk_mq_commit_rqs(hctx, &queued, from_schedule);
- return;
- default:
- blk_mq_end_request(rq, ret);
- errors++;
- break;
- }
- }
-
- /*
- * If we didn't flush the entire list, we could have told the driver
- * there was more coming, but that turned out to be a lie.
- */
- if (errors)
- blk_mq_commit_rqs(hctx, &queued, from_schedule);
-}
-
-void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
-{
- struct blk_mq_hw_ctx *this_hctx;
- struct blk_mq_ctx *this_ctx;
- unsigned int depth;
- LIST_HEAD(list);
-
- if (rq_list_empty(plug->mq_list))
- return;
- plug->rq_count = 0;
-
- if (!plug->multiple_queues && !plug->has_elevator && !from_schedule) {
- blk_mq_plug_issue_direct(plug, false);
- if (rq_list_empty(plug->mq_list))
- return;
- }
-
- this_hctx = NULL;
- this_ctx = NULL;
- depth = 0;
- do {
- struct request *rq;
-
- rq = rq_list_pop(&plug->mq_list);
-
- if (!this_hctx) {
- this_hctx = rq->mq_hctx;
- this_ctx = rq->mq_ctx;
- } else if (this_hctx != rq->mq_hctx || this_ctx != rq->mq_ctx) {
- trace_block_unplug(this_hctx->queue, depth,
- !from_schedule);
- blk_mq_sched_insert_requests(this_hctx, this_ctx,
- &list, from_schedule);
- depth = 0;
- this_hctx = rq->mq_hctx;
- this_ctx = rq->mq_ctx;
-
- }
-
- list_add(&rq->queuelist, &list);
- depth++;
- } while (!rq_list_empty(plug->mq_list));
-
- if (!list_empty(&list)) {
- trace_block_unplug(this_hctx->queue, depth, !from_schedule);
- blk_mq_sched_insert_requests(this_hctx, this_ctx, &list,
- from_schedule);
- }
-}
-
static void blk_mq_bio_to_request(struct request *rq, struct bio *bio,
unsigned int nr_segs)
{
@@ -2404,33 +2497,141 @@ insert:
static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
struct request *rq)
{
- blk_status_t ret;
- int srcu_idx;
-
- might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
+ blk_status_t ret =
+ __blk_mq_try_issue_directly(hctx, rq, false, true);
- hctx_lock(hctx, &srcu_idx);
-
- ret = __blk_mq_try_issue_directly(hctx, rq, false, true);
if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE)
blk_mq_request_bypass_insert(rq, false, true);
else if (ret != BLK_STS_OK)
blk_mq_end_request(rq, ret);
+}
- hctx_unlock(hctx, srcu_idx);
+static blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last)
+{
+ return __blk_mq_try_issue_directly(rq->mq_hctx, rq, true, last);
}
-blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last)
+static void blk_mq_plug_issue_direct(struct blk_plug *plug, bool from_schedule)
{
- blk_status_t ret;
- int srcu_idx;
- struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
+ struct blk_mq_hw_ctx *hctx = NULL;
+ struct request *rq;
+ int queued = 0;
+ int errors = 0;
- hctx_lock(hctx, &srcu_idx);
- ret = __blk_mq_try_issue_directly(hctx, rq, true, last);
- hctx_unlock(hctx, srcu_idx);
+ while ((rq = rq_list_pop(&plug->mq_list))) {
+ bool last = rq_list_empty(plug->mq_list);
+ blk_status_t ret;
- return ret;
+ if (hctx != rq->mq_hctx) {
+ if (hctx)
+ blk_mq_commit_rqs(hctx, &queued, from_schedule);
+ hctx = rq->mq_hctx;
+ }
+
+ ret = blk_mq_request_issue_directly(rq, last);
+ switch (ret) {
+ case BLK_STS_OK:
+ queued++;
+ break;
+ case BLK_STS_RESOURCE:
+ case BLK_STS_DEV_RESOURCE:
+ blk_mq_request_bypass_insert(rq, false, last);
+ blk_mq_commit_rqs(hctx, &queued, from_schedule);
+ return;
+ default:
+ blk_mq_end_request(rq, ret);
+ errors++;
+ break;
+ }
+ }
+
+ /*
+ * If we didn't flush the entire list, we could have told the driver
+ * there was more coming, but that turned out to be a lie.
+ */
+ if (errors)
+ blk_mq_commit_rqs(hctx, &queued, from_schedule);
+}
+
+static void __blk_mq_flush_plug_list(struct request_queue *q,
+ struct blk_plug *plug)
+{
+ if (blk_queue_quiesced(q))
+ return;
+ q->mq_ops->queue_rqs(&plug->mq_list);
+}
+
+void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
+{
+ struct blk_mq_hw_ctx *this_hctx;
+ struct blk_mq_ctx *this_ctx;
+ struct request *rq;
+ unsigned int depth;
+ LIST_HEAD(list);
+
+ if (rq_list_empty(plug->mq_list))
+ return;
+ plug->rq_count = 0;
+
+ if (!plug->multiple_queues && !plug->has_elevator && !from_schedule) {
+ struct request_queue *q;
+
+ rq = rq_list_peek(&plug->mq_list);
+ q = rq->q;
+
+ /*
+ * Peek first request and see if we have a ->queue_rqs() hook.
+ * If we do, we can dispatch the whole plug list in one go. We
+ * already know at this point that all requests belong to the
+ * same queue, caller must ensure that's the case.
+ *
+ * Since we pass off the full list to the driver at this point,
+ * we do not increment the active request count for the queue.
+ * Bypass shared tags for now because of that.
+ */
+ if (q->mq_ops->queue_rqs &&
+ !(rq->mq_hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED)) {
+ blk_mq_run_dispatch_ops(q,
+ __blk_mq_flush_plug_list(q, plug));
+ if (rq_list_empty(plug->mq_list))
+ return;
+ }
+
+ blk_mq_run_dispatch_ops(q,
+ blk_mq_plug_issue_direct(plug, false));
+ if (rq_list_empty(plug->mq_list))
+ return;
+ }
+
+ this_hctx = NULL;
+ this_ctx = NULL;
+ depth = 0;
+ do {
+ rq = rq_list_pop(&plug->mq_list);
+
+ if (!this_hctx) {
+ this_hctx = rq->mq_hctx;
+ this_ctx = rq->mq_ctx;
+ } else if (this_hctx != rq->mq_hctx || this_ctx != rq->mq_ctx) {
+ trace_block_unplug(this_hctx->queue, depth,
+ !from_schedule);
+ blk_mq_sched_insert_requests(this_hctx, this_ctx,
+ &list, from_schedule);
+ depth = 0;
+ this_hctx = rq->mq_hctx;
+ this_ctx = rq->mq_ctx;
+
+ }
+
+ list_add(&rq->queuelist, &list);
+ depth++;
+ } while (!rq_list_empty(plug->mq_list));
+
+ if (!list_empty(&list)) {
+ trace_block_unplug(this_hctx->queue, depth, !from_schedule);
+ blk_mq_sched_insert_requests(this_hctx, this_ctx, &list,
+ from_schedule);
+ }
}
void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
@@ -2469,21 +2670,6 @@ void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
hctx->queue->mq_ops->commit_rqs(hctx);
}
-static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq)
-{
- if (!plug->multiple_queues) {
- struct request *nxt = rq_list_peek(&plug->mq_list);
-
- if (nxt && nxt->q != rq->q)
- plug->multiple_queues = true;
- }
- if (!plug->has_elevator && (rq->rq_flags & RQF_ELV))
- plug->has_elevator = true;
- rq->rq_next = NULL;
- rq_list_add(&plug->mq_list, rq);
- plug->rq_count++;
-}
-
/*
* Allow 2x BLK_MAX_REQUEST_COUNT requests on plug queue for multiple
* queues. This is important for md arrays to benefit from merging
@@ -2496,12 +2682,33 @@ static inline unsigned short blk_plug_max_rq_count(struct blk_plug *plug)
return BLK_MAX_REQUEST_COUNT;
}
+static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq)
+{
+ struct request *last = rq_list_peek(&plug->mq_list);
+
+ if (!plug->rq_count) {
+ trace_block_plug(rq->q);
+ } else if (plug->rq_count >= blk_plug_max_rq_count(plug) ||
+ (!blk_queue_nomerges(rq->q) &&
+ blk_rq_bytes(last) >= BLK_PLUG_FLUSH_SIZE)) {
+ blk_mq_flush_plug_list(plug, false);
+ trace_block_plug(rq->q);
+ }
+
+ if (!plug->multiple_queues && last && last->q != rq->q)
+ plug->multiple_queues = true;
+ if (!plug->has_elevator && (rq->rq_flags & RQF_ELV))
+ plug->has_elevator = true;
+ rq->rq_next = NULL;
+ rq_list_add(&plug->mq_list, rq);
+ plug->rq_count++;
+}
+
static bool blk_mq_attempt_bio_merge(struct request_queue *q,
- struct bio *bio, unsigned int nr_segs,
- bool *same_queue_rq)
+ struct bio *bio, unsigned int nr_segs)
{
if (!blk_queue_nomerges(q) && bio_mergeable(bio)) {
- if (blk_attempt_plug_merge(q, bio, nr_segs, same_queue_rq))
+ if (blk_attempt_plug_merge(q, bio, nr_segs))
return true;
if (blk_mq_sched_bio_merge(q, bio, nr_segs))
return true;
@@ -2511,9 +2718,7 @@ static bool blk_mq_attempt_bio_merge(struct request_queue *q,
static struct request *blk_mq_get_new_requests(struct request_queue *q,
struct blk_plug *plug,
- struct bio *bio,
- unsigned int nsegs,
- bool *same_queue_rq)
+ struct bio *bio)
{
struct blk_mq_alloc_data data = {
.q = q,
@@ -2522,11 +2727,9 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q,
};
struct request *rq;
- if (blk_mq_attempt_bio_merge(q, bio, nsegs, same_queue_rq))
+ if (unlikely(bio_queue_enter(bio)))
return NULL;
- rq_qos_throttle(q, bio);
-
if (plug) {
data.nr_tags = plug->nr_ios;
plug->nr_ios = 1;
@@ -2536,64 +2739,33 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q,
rq = __blk_mq_alloc_requests(&data);
if (rq)
return rq;
-
rq_qos_cleanup(q, bio);
if (bio->bi_opf & REQ_NOWAIT)
bio_wouldblock_error(bio);
-
+ blk_queue_exit(q);
return NULL;
}
-static inline bool blk_mq_can_use_cached_rq(struct request *rq, struct bio *bio)
-{
- if (blk_mq_get_hctx_type(bio->bi_opf) != rq->mq_hctx->type)
- return false;
-
- if (op_is_flush(rq->cmd_flags) != op_is_flush(bio->bi_opf))
- return false;
-
- return true;
-}
-
-static inline struct request *blk_mq_get_request(struct request_queue *q,
- struct blk_plug *plug,
- struct bio *bio,
- unsigned int nsegs,
- bool *same_queue_rq)
+static inline struct request *blk_mq_get_cached_request(struct request_queue *q,
+ struct blk_plug *plug, struct bio *bio)
{
struct request *rq;
- bool checked = false;
- if (plug) {
- rq = rq_list_peek(&plug->cached_rq);
- if (rq && rq->q == q) {
- if (unlikely(!submit_bio_checks(bio)))
- return NULL;
- if (blk_mq_attempt_bio_merge(q, bio, nsegs,
- same_queue_rq))
- return NULL;
- checked = true;
- if (!blk_mq_can_use_cached_rq(rq, bio))
- goto fallback;
- rq->cmd_flags = bio->bi_opf;
- plug->cached_rq = rq_list_next(rq);
- INIT_LIST_HEAD(&rq->queuelist);
- rq_qos_throttle(q, bio);
- return rq;
- }
- }
+ if (!plug)
+ return NULL;
+ rq = rq_list_peek(&plug->cached_rq);
+ if (!rq || rq->q != q)
+ return NULL;
-fallback:
- if (unlikely(bio_queue_enter(bio)))
+ if (blk_mq_get_hctx_type(bio->bi_opf) != rq->mq_hctx->type)
return NULL;
- if (unlikely(!checked && !submit_bio_checks(bio)))
- goto out_put;
- rq = blk_mq_get_new_requests(q, plug, bio, nsegs, same_queue_rq);
- if (rq)
- return rq;
-out_put:
- blk_queue_exit(q);
- return NULL;
+ if (op_is_flush(rq->cmd_flags) != op_is_flush(bio->bi_opf))
+ return NULL;
+
+ rq->cmd_flags = bio->bi_opf;
+ plug->cached_rq = rq_list_next(rq);
+ INIT_LIST_HEAD(&rq->queuelist);
+ return rq;
}
/**
@@ -2612,10 +2784,9 @@ out_put:
void blk_mq_submit_bio(struct bio *bio)
{
struct request_queue *q = bdev_get_queue(bio->bi_bdev);
+ struct blk_plug *plug = blk_mq_plug(q, bio);
const int is_sync = op_is_sync(bio->bi_opf);
struct request *rq;
- struct blk_plug *plug;
- bool same_queue_rq = false;
unsigned int nr_segs = 1;
blk_status_t ret;
@@ -2629,11 +2800,18 @@ void blk_mq_submit_bio(struct bio *bio)
if (!bio_integrity_prep(bio))
return;
- plug = blk_mq_plug(q, bio);
- rq = blk_mq_get_request(q, plug, bio, nr_segs, &same_queue_rq);
- if (unlikely(!rq))
+ if (blk_mq_attempt_bio_merge(q, bio, nr_segs))
return;
+ rq_qos_throttle(q, bio);
+
+ rq = blk_mq_get_cached_request(q, plug, bio);
+ if (!rq) {
+ rq = blk_mq_get_new_requests(q, plug, bio);
+ if (unlikely(!rq))
+ return;
+ }
+
trace_block_getrq(bio);
rq_qos_track(q, rq, bio);
@@ -2653,69 +2831,215 @@ void blk_mq_submit_bio(struct bio *bio)
return;
}
- if (plug && (q->nr_hw_queues == 1 ||
- blk_mq_is_shared_tags(rq->mq_hctx->flags) ||
- q->mq_ops->commit_rqs || !blk_queue_nonrot(q))) {
- /*
- * Use plugging if we have a ->commit_rqs() hook as well, as
- * we know the driver uses bd->last in a smart fashion.
- *
- * Use normal plugging if this disk is slow HDD, as sequential
- * IO may benefit a lot from plug merging.
- */
- unsigned int request_count = plug->rq_count;
- struct request *last = NULL;
-
- if (!request_count) {
- trace_block_plug(q);
- } else if (!blk_queue_nomerges(q)) {
- last = rq_list_peek(&plug->mq_list);
- if (blk_rq_bytes(last) < BLK_PLUG_FLUSH_SIZE)
- last = NULL;
- }
-
- if (request_count >= blk_plug_max_rq_count(plug) || last) {
- blk_mq_flush_plug_list(plug, false);
- trace_block_plug(q);
- }
-
+ if (plug)
blk_add_rq_to_plug(plug, rq);
- } else if (rq->rq_flags & RQF_ELV) {
- /* Insert the request at the IO scheduler queue */
+ else if ((rq->rq_flags & RQF_ELV) ||
+ (rq->mq_hctx->dispatch_busy &&
+ (q->nr_hw_queues == 1 || !is_sync)))
blk_mq_sched_insert_request(rq, false, true, true);
- } else if (plug && !blk_queue_nomerges(q)) {
- struct request *next_rq = NULL;
+ else
+ blk_mq_run_dispatch_ops(rq->q,
+ blk_mq_try_issue_directly(rq->mq_hctx, rq));
+}
+/**
+ * blk_cloned_rq_check_limits - Helper function to check a cloned request
+ * for the new queue limits
+ * @q: the queue
+ * @rq: the request being checked
+ *
+ * Description:
+ * @rq may have been made based on weaker limitations of upper-level queues
+ * in request stacking drivers, and it may violate the limitation of @q.
+ * Since the block layer and the underlying device driver trust @rq
+ * after it is inserted to @q, it should be checked against @q before
+ * the insertion using this generic function.
+ *
+ * Request stacking drivers like request-based dm may change the queue
+ * limits when retrying requests on other queues. Those requests need
+ * to be checked against the new queue limits again during dispatch.
+ */
+static blk_status_t blk_cloned_rq_check_limits(struct request_queue *q,
+ struct request *rq)
+{
+ unsigned int max_sectors = blk_queue_get_max_sectors(q, req_op(rq));
+
+ if (blk_rq_sectors(rq) > max_sectors) {
/*
- * We do limited plugging. If the bio can be merged, do that.
- * Otherwise the existing request in the plug list will be
- * issued. So the plug list will have one request at most
- * The plug list might get flushed before this. If that happens,
- * the plug list is empty, and same_queue_rq is invalid.
+ * SCSI device does not have a good way to return if
+ * Write Same/Zero is actually supported. If a device rejects
+ * a non-read/write command (discard, write same,etc.) the
+ * low-level device driver will set the relevant queue limit to
+ * 0 to prevent blk-lib from issuing more of the offending
+ * operations. Commands queued prior to the queue limit being
+ * reset need to be completed with BLK_STS_NOTSUPP to avoid I/O
+ * errors being propagated to upper layers.
*/
- if (same_queue_rq) {
- next_rq = rq_list_pop(&plug->mq_list);
- plug->rq_count--;
- }
- blk_add_rq_to_plug(plug, rq);
- trace_block_plug(q);
+ if (max_sectors == 0)
+ return BLK_STS_NOTSUPP;
+
+ printk(KERN_ERR "%s: over max size limit. (%u > %u)\n",
+ __func__, blk_rq_sectors(rq), max_sectors);
+ return BLK_STS_IOERR;
+ }
+
+ /*
+ * The queue settings related to segment counting may differ from the
+ * original queue.
+ */
+ rq->nr_phys_segments = blk_recalc_rq_segments(rq);
+ if (rq->nr_phys_segments > queue_max_segments(q)) {
+ printk(KERN_ERR "%s: over max segments limit. (%hu > %hu)\n",
+ __func__, rq->nr_phys_segments, queue_max_segments(q));
+ return BLK_STS_IOERR;
+ }
+
+ return BLK_STS_OK;
+}
+
+/**
+ * blk_insert_cloned_request - Helper for stacking drivers to submit a request
+ * @q: the queue to submit the request
+ * @rq: the request being queued
+ */
+blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *rq)
+{
+ blk_status_t ret;
+
+ ret = blk_cloned_rq_check_limits(q, rq);
+ if (ret != BLK_STS_OK)
+ return ret;
+
+ if (rq->q->disk &&
+ should_fail_request(rq->q->disk->part0, blk_rq_bytes(rq)))
+ return BLK_STS_IOERR;
+
+ if (blk_crypto_insert_cloned_request(rq))
+ return BLK_STS_IOERR;
- if (next_rq) {
- trace_block_unplug(q, 1, true);
- blk_mq_try_issue_directly(next_rq->mq_hctx, next_rq);
+ blk_account_io_start(rq);
+
+ /*
+ * Since we have a scheduler attached on the top device,
+ * bypass a potential scheduler on the bottom device for
+ * insert.
+ */
+ blk_mq_run_dispatch_ops(rq->q,
+ ret = blk_mq_request_issue_directly(rq, true));
+ if (ret)
+ blk_account_io_done(rq, ktime_get_ns());
+ return ret;
+}
+EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
+
+/**
+ * blk_rq_unprep_clone - Helper function to free all bios in a cloned request
+ * @rq: the clone request to be cleaned up
+ *
+ * Description:
+ * Free all bios in @rq for a cloned request.
+ */
+void blk_rq_unprep_clone(struct request *rq)
+{
+ struct bio *bio;
+
+ while ((bio = rq->bio) != NULL) {
+ rq->bio = bio->bi_next;
+
+ bio_put(bio);
+ }
+}
+EXPORT_SYMBOL_GPL(blk_rq_unprep_clone);
+
+/**
+ * blk_rq_prep_clone - Helper function to setup clone request
+ * @rq: the request to be setup
+ * @rq_src: original request to be cloned
+ * @bs: bio_set that bios for clone are allocated from
+ * @gfp_mask: memory allocation mask for bio
+ * @bio_ctr: setup function to be called for each clone bio.
+ * Returns %0 for success, non %0 for failure.
+ * @data: private data to be passed to @bio_ctr
+ *
+ * Description:
+ * Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq.
+ * Also, pages which the original bios are pointing to are not copied
+ * and the cloned bios just point same pages.
+ * So cloned bios must be completed before original bios, which means
+ * the caller must complete @rq before @rq_src.
+ */
+int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
+ struct bio_set *bs, gfp_t gfp_mask,
+ int (*bio_ctr)(struct bio *, struct bio *, void *),
+ void *data)
+{
+ struct bio *bio, *bio_src;
+
+ if (!bs)
+ bs = &fs_bio_set;
+
+ __rq_for_each_bio(bio_src, rq_src) {
+ bio = bio_clone_fast(bio_src, gfp_mask, bs);
+ if (!bio)
+ goto free_and_out;
+ bio->bi_bdev = rq->q->disk->part0;
+
+ if (bio_ctr && bio_ctr(bio, bio_src, data))
+ goto free_and_out;
+
+ if (rq->bio) {
+ rq->biotail->bi_next = bio;
+ rq->biotail = bio;
+ } else {
+ rq->bio = rq->biotail = bio;
}
- } else if ((q->nr_hw_queues > 1 && is_sync) ||
- !rq->mq_hctx->dispatch_busy) {
- /*
- * There is no scheduler and we can try to send directly
- * to the hardware.
- */
- blk_mq_try_issue_directly(rq->mq_hctx, rq);
- } else {
- /* Default case. */
- blk_mq_sched_insert_request(rq, false, true, true);
+ bio = NULL;
+ }
+
+ /* Copy attributes of the original request to the clone request. */
+ rq->__sector = blk_rq_pos(rq_src);
+ rq->__data_len = blk_rq_bytes(rq_src);
+ if (rq_src->rq_flags & RQF_SPECIAL_PAYLOAD) {
+ rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
+ rq->special_vec = rq_src->special_vec;
}
+ rq->nr_phys_segments = rq_src->nr_phys_segments;
+ rq->ioprio = rq_src->ioprio;
+
+ if (rq->bio && blk_crypto_rq_bio_prep(rq, rq->bio, gfp_mask) < 0)
+ goto free_and_out;
+
+ return 0;
+
+free_and_out:
+ if (bio)
+ bio_put(bio);
+ blk_rq_unprep_clone(rq);
+
+ return -ENOMEM;
}
+EXPORT_SYMBOL_GPL(blk_rq_prep_clone);
+
+/*
+ * Steal bios from a request and add them to a bio list.
+ * The request must not have been partially completed before.
+ */
+void blk_steal_bios(struct bio_list *list, struct request *rq)
+{
+ if (rq->bio) {
+ if (list->tail)
+ list->tail->bi_next = rq->bio;
+ else
+ list->head = rq->bio;
+ list->tail = rq->biotail;
+
+ rq->bio = NULL;
+ rq->biotail = NULL;
+ }
+
+ rq->__data_len = 0;
+}
+EXPORT_SYMBOL_GPL(blk_steal_bios);
static size_t order_to_size(unsigned int order)
{
@@ -2743,7 +3067,7 @@ static void blk_mq_clear_rq_mapping(struct blk_mq_tags *drv_tags,
unsigned long rq_addr = (unsigned long)rq;
if (rq_addr >= start && rq_addr < end) {
- WARN_ON_ONCE(refcount_read(&rq->ref) != 0);
+ WARN_ON_ONCE(req_ref_read(rq) != 0);
cmpxchg(&drv_tags->rqs[i], rq, NULL);
}
}
@@ -2967,7 +3291,7 @@ static bool blk_mq_hctx_has_requests(struct blk_mq_hw_ctx *hctx)
static inline bool blk_mq_last_cpu_in_hctx(unsigned int cpu,
struct blk_mq_hw_ctx *hctx)
{
- if (cpumask_next_and(-1, hctx->cpumask, cpu_online_mask) != cpu)
+ if (cpumask_first_and(hctx->cpumask, cpu_online_mask) != cpu)
return false;
if (cpumask_next_and(cpu, hctx->cpumask, cpu_online_mask) < nr_cpu_ids)
return false;
@@ -3077,7 +3401,7 @@ static void blk_mq_clear_flush_rq_mapping(struct blk_mq_tags *tags,
if (!tags)
return;
- WARN_ON_ONCE(refcount_read(&flush_rq->ref) != 0);
+ WARN_ON_ONCE(req_ref_read(flush_rq) != 0);
for (i = 0; i < queue_depth; i++)
cmpxchg(&tags->rqs[i], flush_rq, NULL);
@@ -3131,20 +3455,6 @@ static void blk_mq_exit_hw_queues(struct request_queue *q,
}
}
-static int blk_mq_hw_ctx_size(struct blk_mq_tag_set *tag_set)
-{
- int hw_ctx_size = sizeof(struct blk_mq_hw_ctx);
-
- BUILD_BUG_ON(ALIGN(offsetof(struct blk_mq_hw_ctx, srcu),
- __alignof__(struct blk_mq_hw_ctx)) !=
- sizeof(struct blk_mq_hw_ctx));
-
- if (tag_set->flags & BLK_MQ_F_BLOCKING)
- hw_ctx_size += sizeof(struct srcu_struct);
-
- return hw_ctx_size;
-}
-
static int blk_mq_init_hctx(struct request_queue *q,
struct blk_mq_tag_set *set,
struct blk_mq_hw_ctx *hctx, unsigned hctx_idx)
@@ -3182,7 +3492,7 @@ blk_mq_alloc_hctx(struct request_queue *q, struct blk_mq_tag_set *set,
struct blk_mq_hw_ctx *hctx;
gfp_t gfp = GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY;
- hctx = kzalloc_node(blk_mq_hw_ctx_size(set), gfp, node);
+ hctx = kzalloc_node(sizeof(struct blk_mq_hw_ctx), gfp, node);
if (!hctx)
goto fail_alloc_hctx;
@@ -3224,8 +3534,6 @@ blk_mq_alloc_hctx(struct request_queue *q, struct blk_mq_tag_set *set,
if (!hctx->fq)
goto free_bitmap;
- if (hctx->flags & BLK_MQ_F_BLOCKING)
- init_srcu_struct(hctx->srcu);
blk_mq_hctx_kobj_init(hctx);
return hctx;
@@ -3561,7 +3869,7 @@ static struct request_queue *blk_mq_init_queue_data(struct blk_mq_tag_set *set,
struct request_queue *q;
int ret;
- q = blk_alloc_queue(set->numa_node);
+ q = blk_alloc_queue(set->numa_node, set->flags & BLK_MQ_F_BLOCKING);
if (!q)
return ERR_PTR(-ENOMEM);
q->queuedata = queuedata;
@@ -3710,6 +4018,9 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
struct request_queue *q)
{
+ WARN_ON_ONCE(blk_queue_has_srcu(q) !=
+ !!(set->flags & BLK_MQ_F_BLOCKING));
+
/* mark the queue as mq asap */
q->mq_ops = set->ops;
@@ -4246,11 +4557,10 @@ EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues);
/* Enable polling stats and return whether they were already enabled. */
static bool blk_poll_stats_enable(struct request_queue *q)
{
- if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags) ||
- blk_queue_flag_test_and_set(QUEUE_FLAG_POLL_STATS, q))
+ if (q->poll_stat)
return true;
- blk_stat_add_callback(q, q->poll_cb);
- return false;
+
+ return blk_stats_alloc_enable(q);
}
static void blk_mq_poll_stats_start(struct request_queue *q)
@@ -4259,8 +4569,7 @@ static void blk_mq_poll_stats_start(struct request_queue *q)
* We don't arm the callback if polling stats are not enabled or the
* callback is already active.
*/
- if (!test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags) ||
- blk_stat_is_active(q->poll_cb))
+ if (!q->poll_stat || blk_stat_is_active(q->poll_cb))
return;
blk_stat_activate_msecs(q->poll_cb, 100);
diff --git a/block/blk-mq.h b/block/blk-mq.h
index afcf9931a489..948791ea2a3e 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -65,9 +65,6 @@ void blk_mq_request_bypass_insert(struct request *rq, bool at_head,
bool run_queue);
void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
struct list_head *list);
-
-/* Used by blk_insert_cloned_request() to issue request directly */
-blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last);
void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
struct list_head *list);
@@ -377,5 +374,24 @@ static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
return __blk_mq_active_requests(hctx) < depth;
}
+/* run the code block in @dispatch_ops with rcu/srcu read lock held */
+#define __blk_mq_run_dispatch_ops(q, check_sleep, dispatch_ops) \
+do { \
+ if (!blk_queue_has_srcu(q)) { \
+ rcu_read_lock(); \
+ (dispatch_ops); \
+ rcu_read_unlock(); \
+ } else { \
+ int srcu_idx; \
+ \
+ might_sleep_if(check_sleep); \
+ srcu_idx = srcu_read_lock((q)->srcu); \
+ (dispatch_ops); \
+ srcu_read_unlock((q)->srcu, srcu_idx); \
+ } \
+} while (0)
+
+#define blk_mq_run_dispatch_ops(q, dispatch_ops) \
+ __blk_mq_run_dispatch_ops(q, true, dispatch_ops) \
#endif
diff --git a/block/blk-pm.c b/block/blk-pm.c
index 17bd020268d4..2dad62cc1572 100644
--- a/block/blk-pm.c
+++ b/block/blk-pm.c
@@ -163,27 +163,19 @@ EXPORT_SYMBOL(blk_pre_runtime_resume);
/**
* blk_post_runtime_resume - Post runtime resume processing
* @q: the queue of the device
- * @err: return value of the device's runtime_resume function
*
* Description:
- * Update the queue's runtime status according to the return value of the
- * device's runtime_resume function. If the resume was successful, call
- * blk_set_runtime_active() to do the real work of restarting the queue.
+ * For historical reasons, this routine merely calls blk_set_runtime_active()
+ * to do the real work of restarting the queue. It does this regardless of
+ * whether the device's runtime-resume succeeded; even if it failed the
+ * driver or error handler will need to communicate with the device.
*
* This function should be called near the end of the device's
* runtime_resume callback.
*/
-void blk_post_runtime_resume(struct request_queue *q, int err)
+void blk_post_runtime_resume(struct request_queue *q)
{
- if (!q->dev)
- return;
- if (!err) {
- blk_set_runtime_active(q);
- } else {
- spin_lock_irq(&q->queue_lock);
- q->rpm_status = RPM_SUSPENDED;
- spin_unlock_irq(&q->queue_lock);
- }
+ blk_set_runtime_active(q);
}
EXPORT_SYMBOL(blk_post_runtime_resume);
@@ -201,7 +193,7 @@ EXPORT_SYMBOL(blk_post_runtime_resume);
* runtime PM status and re-enable peeking requests from the queue. It
* should be called before first request is added to the queue.
*
- * This function is also called by blk_post_runtime_resume() for successful
+ * This function is also called by blk_post_runtime_resume() for
* runtime resumes. It does everything necessary to restart the queue.
*/
void blk_set_runtime_active(struct request_queue *q)
diff --git a/block/blk-stat.c b/block/blk-stat.c
index ae3dd1fb8e61..2ea01b5c1aca 100644
--- a/block/blk-stat.c
+++ b/block/blk-stat.c
@@ -15,7 +15,7 @@
struct blk_queue_stats {
struct list_head callbacks;
spinlock_t lock;
- bool enable_accounting;
+ int accounting;
};
void blk_rq_stat_init(struct blk_rq_stat *stat)
@@ -161,7 +161,7 @@ void blk_stat_remove_callback(struct request_queue *q,
spin_lock_irqsave(&q->stats->lock, flags);
list_del_rcu(&cb->list);
- if (list_empty(&q->stats->callbacks) && !q->stats->enable_accounting)
+ if (list_empty(&q->stats->callbacks) && !q->stats->accounting)
blk_queue_flag_clear(QUEUE_FLAG_STATS, q);
spin_unlock_irqrestore(&q->stats->lock, flags);
@@ -184,13 +184,24 @@ void blk_stat_free_callback(struct blk_stat_callback *cb)
call_rcu(&cb->rcu, blk_stat_free_callback_rcu);
}
+void blk_stat_disable_accounting(struct request_queue *q)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&q->stats->lock, flags);
+ if (!--q->stats->accounting)
+ blk_queue_flag_clear(QUEUE_FLAG_STATS, q);
+ spin_unlock_irqrestore(&q->stats->lock, flags);
+}
+EXPORT_SYMBOL_GPL(blk_stat_disable_accounting);
+
void blk_stat_enable_accounting(struct request_queue *q)
{
unsigned long flags;
spin_lock_irqsave(&q->stats->lock, flags);
- q->stats->enable_accounting = true;
- blk_queue_flag_set(QUEUE_FLAG_STATS, q);
+ if (!q->stats->accounting++)
+ blk_queue_flag_set(QUEUE_FLAG_STATS, q);
spin_unlock_irqrestore(&q->stats->lock, flags);
}
EXPORT_SYMBOL_GPL(blk_stat_enable_accounting);
@@ -205,7 +216,7 @@ struct blk_queue_stats *blk_alloc_queue_stats(void)
INIT_LIST_HEAD(&stats->callbacks);
spin_lock_init(&stats->lock);
- stats->enable_accounting = false;
+ stats->accounting = 0;
return stats;
}
@@ -219,3 +230,21 @@ void blk_free_queue_stats(struct blk_queue_stats *stats)
kfree(stats);
}
+
+bool blk_stats_alloc_enable(struct request_queue *q)
+{
+ struct blk_rq_stat *poll_stat;
+
+ poll_stat = kcalloc(BLK_MQ_POLL_STATS_BKTS, sizeof(*poll_stat),
+ GFP_ATOMIC);
+ if (!poll_stat)
+ return false;
+
+ if (cmpxchg(&q->poll_stat, NULL, poll_stat) != NULL) {
+ kfree(poll_stat);
+ return true;
+ }
+
+ blk_stat_add_callback(q, q->poll_cb);
+ return false;
+}
diff --git a/block/blk-stat.h b/block/blk-stat.h
index 17b47a86eefb..17e1eb4ec7e2 100644
--- a/block/blk-stat.h
+++ b/block/blk-stat.h
@@ -64,11 +64,13 @@ struct blk_stat_callback {
struct blk_queue_stats *blk_alloc_queue_stats(void);
void blk_free_queue_stats(struct blk_queue_stats *);
+bool blk_stats_alloc_enable(struct request_queue *q);
void blk_stat_add(struct request *rq, u64 now);
/* record time/size info in request but not add a callback */
void blk_stat_enable_accounting(struct request_queue *q);
+void blk_stat_disable_accounting(struct request_queue *q);
/**
* blk_stat_alloc_callback() - Allocate a block statistics callback.
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index cd75b0f73dc6..9f32882ceb2f 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -16,6 +16,7 @@
#include "blk.h"
#include "blk-mq.h"
#include "blk-mq-debugfs.h"
+#include "blk-mq-sched.h"
#include "blk-wbt.h"
#include "blk-throttle.h"
@@ -734,7 +735,8 @@ static void blk_free_queue_rcu(struct rcu_head *rcu_head)
{
struct request_queue *q = container_of(rcu_head, struct request_queue,
rcu_head);
- kmem_cache_free(blk_requestq_cachep, q);
+
+ kmem_cache_free(blk_get_queue_kmem_cache(blk_queue_has_srcu(q)), q);
}
/* Unconfigure the I/O scheduler and dissociate from the cgroup controller. */
@@ -747,7 +749,7 @@ static void blk_exit_queue(struct request_queue *q)
*/
if (q->elevator) {
ioc_clear_queue(q);
- __elevator_exit(q, q->elevator);
+ elevator_exit(q);
}
/*
@@ -785,14 +787,15 @@ static void blk_release_queue(struct kobject *kobj)
might_sleep();
- if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags))
+ if (q->poll_stat)
blk_stat_remove_callback(q, q->poll_cb);
blk_stat_free_callback(q->poll_cb);
- blk_free_queue_stats(q->stats);
-
blk_exit_queue(q);
+ blk_free_queue_stats(q->stats);
+ kfree(q->poll_stat);
+
blk_queue_free_zone_bitmaps(q);
if (queue_is_mq(q))
@@ -808,6 +811,9 @@ static void blk_release_queue(struct kobject *kobj)
bioset_exit(&q->bio_split);
+ if (blk_queue_has_srcu(q))
+ cleanup_srcu_struct(q->srcu);
+
ida_simple_remove(&blk_queue_ida, q->id);
call_rcu(&q->rcu_head, blk_free_queue_rcu);
}
@@ -884,7 +890,6 @@ int blk_register_queue(struct gendisk *disk)
kobject_uevent(&q->elevator->kobj, KOBJ_ADD);
mutex_unlock(&q->sysfs_lock);
- ret = 0;
unlock:
mutex_unlock(&q->sysfs_dir_lock);
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 39bb6e68a9a2..7c462c006b26 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -13,6 +13,7 @@
#include <linux/blk-cgroup.h>
#include "blk.h"
#include "blk-cgroup-rwstat.h"
+#include "blk-stat.h"
#include "blk-throttle.h"
/* Max dispatch from a group in 1 round */
diff --git a/block/blk.h b/block/blk.h
index ccde6e6f1736..8bd43b3ad33d 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -2,15 +2,10 @@
#ifndef BLK_INTERNAL_H
#define BLK_INTERNAL_H
-#include <linux/idr.h>
-#include <linux/blk-mq.h>
-#include <linux/part_stat.h>
#include <linux/blk-crypto.h>
#include <linux/memblock.h> /* for max_pfn/max_low_pfn */
#include <xen/xen.h>
#include "blk-crypto-internal.h"
-#include "blk-mq.h"
-#include "blk-mq-sched.h"
struct elevator_type;
@@ -32,15 +27,10 @@ struct blk_flush_queue {
};
extern struct kmem_cache *blk_requestq_cachep;
+extern struct kmem_cache *blk_requestq_srcu_cachep;
extern struct kobj_type blk_queue_ktype;
extern struct ida blk_queue_ida;
-static inline struct blk_flush_queue *
-blk_get_flush_queue(struct request_queue *q, struct blk_mq_ctx *ctx)
-{
- return blk_mq_map_queue(q, REQ_OP_FLUSH, ctx)->fq;
-}
-
static inline void __blk_get_queue(struct request_queue *q)
{
kobject_get(&q->kobj);
@@ -250,16 +240,13 @@ static inline void blk_integrity_del(struct gendisk *disk)
unsigned long blk_rq_timeout(unsigned long timeout);
void blk_add_timer(struct request *req);
-void blk_print_req_error(struct request *req, blk_status_t status);
+const char *blk_status_to_str(blk_status_t status);
bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
- unsigned int nr_segs, bool *same_queue_rq);
+ unsigned int nr_segs);
bool blk_bio_list_merge(struct request_queue *q, struct list_head *list,
struct bio *bio, unsigned int nr_segs);
-void __blk_account_io_start(struct request *req);
-void __blk_account_io_done(struct request *req, u64 now);
-
/*
* Plug flush limits
*/
@@ -275,19 +262,10 @@ void blk_insert_flush(struct request *rq);
int elevator_switch_mq(struct request_queue *q,
struct elevator_type *new_e);
-void __elevator_exit(struct request_queue *, struct elevator_queue *);
+void elevator_exit(struct request_queue *q);
int elv_register_queue(struct request_queue *q, bool uevent);
void elv_unregister_queue(struct request_queue *q);
-static inline void elevator_exit(struct request_queue *q,
- struct elevator_queue *e)
-{
- lockdep_assert_held(&q->sysfs_lock);
-
- blk_mq_sched_free_rqs(q);
- __elevator_exit(q, e);
-}
-
ssize_t part_size_show(struct device *dev, struct device_attribute *attr,
char *buf);
ssize_t part_stat_show(struct device *dev, struct device_attribute *attr,
@@ -347,26 +325,10 @@ int blk_dev_init(void);
*/
static inline bool blk_do_io_stat(struct request *rq)
{
- return (rq->rq_flags & RQF_IO_STAT) && rq->rq_disk;
-}
-
-static inline void blk_account_io_done(struct request *req, u64 now)
-{
- /*
- * Account IO completion. flush_rq isn't accounted as a
- * normal IO on queueing nor completion. Accounting the
- * containing request is enough.
- */
- if (blk_do_io_stat(req) && req->part &&
- !(req->rq_flags & RQF_FLUSH_SEQ))
- __blk_account_io_done(req, now);
+ return (rq->rq_flags & RQF_IO_STAT) && rq->q->disk;
}
-static inline void blk_account_io_start(struct request *req)
-{
- if (blk_do_io_stat(req))
- __blk_account_io_start(req);
-}
+void update_io_ticks(struct block_device *part, unsigned long now, bool end);
static inline void req_set_nomerge(struct request_queue *q, struct request *req)
{
@@ -402,13 +364,15 @@ static inline unsigned int bio_aligned_discard_max_sectors(
/*
* Internal io_context interface
*/
-void get_io_context(struct io_context *ioc);
-struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q);
-struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
- gfp_t gfp_mask);
+struct io_cq *ioc_find_get_icq(struct request_queue *q);
+struct io_cq *ioc_lookup_icq(struct request_queue *q);
+#ifdef CONFIG_BLK_ICQ
void ioc_clear_queue(struct request_queue *q);
-
-int create_task_io_context(struct task_struct *task, gfp_t gfp_mask, int node);
+#else
+static inline void ioc_clear_queue(struct request_queue *q)
+{
+}
+#endif /* CONFIG_BLK_ICQ */
#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
extern ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page);
@@ -467,7 +431,15 @@ int bio_add_hw_page(struct request_queue *q, struct bio *bio,
struct page *page, unsigned int len, unsigned int offset,
unsigned int max_sectors, bool *same_page);
-struct request_queue *blk_alloc_queue(int node_id);
+static inline struct kmem_cache *blk_get_queue_kmem_cache(bool srcu)
+{
+ if (srcu)
+ return blk_requestq_srcu_cachep;
+ return blk_requestq_cachep;
+}
+struct request_queue *blk_alloc_queue(int node_id, bool alloc_srcu);
+
+int disk_scan_partitions(struct gendisk *disk, fmode_t mode);
int disk_alloc_events(struct gendisk *disk);
void disk_add_events(struct gendisk *disk);
@@ -493,4 +465,45 @@ int disk_register_independent_access_ranges(struct gendisk *disk,
struct blk_independent_access_ranges *new_iars);
void disk_unregister_independent_access_ranges(struct gendisk *disk);
+#ifdef CONFIG_FAIL_MAKE_REQUEST
+bool should_fail_request(struct block_device *part, unsigned int bytes);
+#else /* CONFIG_FAIL_MAKE_REQUEST */
+static inline bool should_fail_request(struct block_device *part,
+ unsigned int bytes)
+{
+ return false;
+}
+#endif /* CONFIG_FAIL_MAKE_REQUEST */
+
+/*
+ * Optimized request reference counting. Ideally we'd make timeouts be more
+ * clever, as that's the only reason we need references at all... But until
+ * this happens, this is faster than using refcount_t. Also see:
+ *
+ * abc54d634334 ("io_uring: switch to atomic_t for io_kiocb reference count")
+ */
+#define req_ref_zero_or_close_to_overflow(req) \
+ ((unsigned int) atomic_read(&(req->ref)) + 127u <= 127u)
+
+static inline bool req_ref_inc_not_zero(struct request *req)
+{
+ return atomic_inc_not_zero(&req->ref);
+}
+
+static inline bool req_ref_put_and_test(struct request *req)
+{
+ WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
+ return atomic_dec_and_test(&req->ref);
+}
+
+static inline void req_ref_set(struct request *req, int value)
+{
+ atomic_set(&req->ref, value);
+}
+
+static inline int req_ref_read(struct request *req)
+{
+ return atomic_read(&req->ref);
+}
+
#endif /* BLK_INTERNAL_H */
diff --git a/block/bsg-lib.c b/block/bsg-lib.c
index 10aa378702fa..acfe1357bf6c 100644
--- a/block/bsg-lib.c
+++ b/block/bsg-lib.c
@@ -92,7 +92,7 @@ static int bsg_transport_sg_io_fn(struct request_queue *q, struct sg_io_v4 *hdr,
goto out_unmap_bidi_rq;
bio = rq->bio;
- blk_execute_rq(NULL, rq, !(hdr->flags & BSG_FLAG_Q_AT_TAIL));
+ blk_execute_rq(rq, !(hdr->flags & BSG_FLAG_Q_AT_TAIL));
/*
* The assignments below don't make much sense, but are kept for
diff --git a/block/elevator.c b/block/elevator.c
index 19a78d5516ba..482df2a350fc 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -188,8 +188,10 @@ static void elevator_release(struct kobject *kobj)
kfree(e);
}
-void __elevator_exit(struct request_queue *q, struct elevator_queue *e)
+void elevator_exit(struct request_queue *q)
{
+ struct elevator_queue *e = q->elevator;
+
mutex_lock(&e->sysfs_lock);
blk_mq_exit_sched(q, e);
mutex_unlock(&e->sysfs_lock);
@@ -523,8 +525,6 @@ void elv_unregister_queue(struct request_queue *q)
kobject_del(&e->kobj);
e->registered = 0;
- /* Re-enable throttling in case elevator disabled it */
- wbt_enable_default(q);
}
}
@@ -595,7 +595,8 @@ int elevator_switch_mq(struct request_queue *q,
elv_unregister_queue(q);
ioc_clear_queue(q);
- elevator_exit(q, q->elevator);
+ blk_mq_sched_free_rqs(q);
+ elevator_exit(q);
}
ret = blk_mq_init_sched(q, new_e);
@@ -605,7 +606,8 @@ int elevator_switch_mq(struct request_queue *q,
if (new_e) {
ret = elv_register_queue(q, true);
if (ret) {
- elevator_exit(q, q->elevator);
+ blk_mq_sched_free_rqs(q);
+ elevator_exit(q);
goto out;
}
}
diff --git a/block/fops.c b/block/fops.c
index 0da147edbd18..4f59e0f5bf30 100644
--- a/block/fops.c
+++ b/block/fops.c
@@ -568,20 +568,52 @@ static ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
loff_t size = bdev_nr_bytes(bdev);
loff_t pos = iocb->ki_pos;
size_t shorted = 0;
- ssize_t ret;
+ ssize_t ret = 0;
+ size_t count;
if (unlikely(pos + iov_iter_count(to) > size)) {
if (pos >= size)
return 0;
size -= pos;
- if (iov_iter_count(to) > size) {
- shorted = iov_iter_count(to) - size;
- iov_iter_truncate(to, size);
+ shorted = iov_iter_count(to) - size;
+ iov_iter_truncate(to, size);
+ }
+
+ count = iov_iter_count(to);
+ if (!count)
+ goto reexpand; /* skip atime */
+
+ if (iocb->ki_flags & IOCB_DIRECT) {
+ struct address_space *mapping = iocb->ki_filp->f_mapping;
+
+ if (iocb->ki_flags & IOCB_NOWAIT) {
+ if (filemap_range_needs_writeback(mapping, pos,
+ pos + count - 1)) {
+ ret = -EAGAIN;
+ goto reexpand;
+ }
+ } else {
+ ret = filemap_write_and_wait_range(mapping, pos,
+ pos + count - 1);
+ if (ret < 0)
+ goto reexpand;
+ }
+
+ file_accessed(iocb->ki_filp);
+
+ ret = blkdev_direct_IO(iocb, to);
+ if (ret >= 0) {
+ iocb->ki_pos += ret;
+ count -= ret;
}
+ iov_iter_revert(to, count - iov_iter_count(to));
+ if (ret < 0 || !count)
+ goto reexpand;
}
- ret = generic_file_read_iter(iocb, to);
+ ret = filemap_read(iocb, to, ret);
+reexpand:
if (unlikely(shorted))
iov_iter_reexpand(to, iov_iter_count(to) + shorted);
return ret;
diff --git a/block/genhd.c b/block/genhd.c
index 30362aeacac4..9eca1f7d35c9 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -25,8 +25,10 @@
#include <linux/log2.h>
#include <linux/pm_runtime.h>
#include <linux/badblocks.h>
+#include <linux/part_stat.h>
#include "blk.h"
+#include "blk-mq-sched.h"
#include "blk-rq-qos.h"
static struct kobject *block_depr;
@@ -372,17 +374,21 @@ void disk_uevent(struct gendisk *disk, enum kobject_action action)
}
EXPORT_SYMBOL_GPL(disk_uevent);
-static void disk_scan_partitions(struct gendisk *disk)
+int disk_scan_partitions(struct gendisk *disk, fmode_t mode)
{
struct block_device *bdev;
- if (!get_capacity(disk) || !disk_part_scan_enabled(disk))
- return;
+ if (disk->flags & (GENHD_FL_NO_PART | GENHD_FL_HIDDEN))
+ return -EINVAL;
+ if (disk->open_partitions)
+ return -EBUSY;
set_bit(GD_NEED_PART_SCAN, &disk->state);
- bdev = blkdev_get_by_dev(disk_devt(disk), FMODE_READ, NULL);
- if (!IS_ERR(bdev))
- blkdev_put(bdev, FMODE_READ);
+ bdev = blkdev_get_by_dev(disk_devt(disk), mode, NULL);
+ if (IS_ERR(bdev))
+ return PTR_ERR(bdev);
+ blkdev_put(bdev, mode);
+ return 0;
}
/**
@@ -425,6 +431,8 @@ int __must_check device_add_disk(struct device *parent, struct gendisk *disk,
DISK_MAX_PARTS);
disk->minors = DISK_MAX_PARTS;
}
+ if (disk->first_minor + disk->minors > MINORMASK + 1)
+ return -EINVAL;
} else {
if (WARN_ON(disk->minors))
return -EINVAL;
@@ -434,13 +442,8 @@ int __must_check device_add_disk(struct device *parent, struct gendisk *disk,
return ret;
disk->major = BLOCK_EXT_MAJOR;
disk->first_minor = ret;
- disk->flags |= GENHD_FL_EXT_DEVT;
}
- ret = disk_alloc_events(disk);
- if (ret)
- goto out_free_ext_minor;
-
/* delay uevents, until we scanned partition table */
dev_set_uevent_suppress(ddev, 1);
@@ -451,7 +454,12 @@ int __must_check device_add_disk(struct device *parent, struct gendisk *disk,
ddev->devt = MKDEV(disk->major, disk->first_minor);
ret = device_add(ddev);
if (ret)
- goto out_disk_release_events;
+ goto out_free_ext_minor;
+
+ ret = disk_alloc_events(disk);
+ if (ret)
+ goto out_device_del;
+
if (!sysfs_deprecated) {
ret = sysfs_create_link(block_depr, &ddev->kobj,
kobject_name(&ddev->kobj));
@@ -490,14 +498,7 @@ int __must_check device_add_disk(struct device *parent, struct gendisk *disk,
if (ret)
goto out_put_slave_dir;
- if (disk->flags & GENHD_FL_HIDDEN) {
- /*
- * Don't let hidden disks show up in /proc/partitions,
- * and don't bother scanning for partitions either.
- */
- disk->flags |= GENHD_FL_SUPPRESS_PARTITION_INFO;
- disk->flags |= GENHD_FL_NO_PART_SCAN;
- } else {
+ if (!(disk->flags & GENHD_FL_HIDDEN)) {
ret = bdi_register(disk->bdi, "%u:%u",
disk->major, disk->first_minor);
if (ret)
@@ -509,7 +510,8 @@ int __must_check device_add_disk(struct device *parent, struct gendisk *disk,
goto out_unregister_bdi;
bdev_add(disk->part0, ddev->devt);
- disk_scan_partitions(disk);
+ if (get_capacity(disk))
+ disk_scan_partitions(disk, FMODE_READ);
/*
* Announce the disk and partitions after all partitions are
@@ -539,8 +541,6 @@ out_del_block_link:
sysfs_remove_link(block_depr, dev_name(ddev));
out_device_del:
device_del(ddev);
-out_disk_release_events:
- disk_release_events(disk);
out_free_ext_minor:
if (disk->major == BLOCK_EXT_MAJOR)
blk_free_ext_minor(disk->first_minor);
@@ -549,6 +549,20 @@ out_free_ext_minor:
EXPORT_SYMBOL(device_add_disk);
/**
+ * blk_mark_disk_dead - mark a disk as dead
+ * @disk: disk to mark as dead
+ *
+ * Mark as disk as dead (e.g. surprise removed) and don't accept any new I/O
+ * to this disk.
+ */
+void blk_mark_disk_dead(struct gendisk *disk)
+{
+ set_bit(GD_DEAD, &disk->state);
+ blk_queue_start_drain(disk->queue);
+}
+EXPORT_SYMBOL_GPL(blk_mark_disk_dead);
+
+/**
* del_gendisk - remove the gendisk
* @disk: the struct gendisk to remove
*
@@ -720,8 +734,7 @@ void __init printk_all_partitions(void)
* Don't show empty devices or things that have been
* suppressed
*/
- if (get_capacity(disk) == 0 ||
- (disk->flags & GENHD_FL_SUPPRESS_PARTITION_INFO))
+ if (get_capacity(disk) == 0 || (disk->flags & GENHD_FL_HIDDEN))
continue;
/*
@@ -814,11 +827,7 @@ static int show_partition(struct seq_file *seqf, void *v)
struct block_device *part;
unsigned long idx;
- /* Don't show non-partitionable removeable devices or empty devices */
- if (!get_capacity(sgp) || (!disk_max_parts(sgp) &&
- (sgp->flags & GENHD_FL_REMOVABLE)))
- return 0;
- if (sgp->flags & GENHD_FL_SUPPRESS_PARTITION_INFO)
+ if (!get_capacity(sgp) || (sgp->flags & GENHD_FL_HIDDEN))
return 0;
rcu_read_lock();
@@ -874,7 +883,8 @@ static ssize_t disk_ext_range_show(struct device *dev,
{
struct gendisk *disk = dev_to_disk(dev);
- return sprintf(buf, "%d\n", disk_max_parts(disk));
+ return sprintf(buf, "%d\n",
+ (disk->flags & GENHD_FL_NO_PART) ? 1 : DISK_MAX_PARTS);
}
static ssize_t disk_removable_show(struct device *dev,
@@ -1343,7 +1353,7 @@ struct gendisk *__blk_alloc_disk(int node, struct lock_class_key *lkclass)
struct request_queue *q;
struct gendisk *disk;
- q = blk_alloc_queue(node);
+ q = blk_alloc_queue(node, false);
if (!q)
return NULL;
diff --git a/block/ioctl.c b/block/ioctl.c
index 0a1d10ac2e1a..4a86340133e4 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -82,31 +82,6 @@ static int compat_blkpg_ioctl(struct block_device *bdev,
}
#endif
-static int blkdev_reread_part(struct block_device *bdev, fmode_t mode)
-{
- struct block_device *tmp;
-
- if (!disk_part_scan_enabled(bdev->bd_disk) || bdev_is_partition(bdev))
- return -EINVAL;
- if (!capable(CAP_SYS_ADMIN))
- return -EACCES;
- if (bdev->bd_disk->open_partitions)
- return -EBUSY;
-
- /*
- * Reopen the device to revalidate the driver state and force a
- * partition rescan.
- */
- mode &= ~FMODE_EXCL;
- set_bit(GD_NEED_PART_SCAN, &bdev->bd_disk->state);
-
- tmp = blkdev_get_by_dev(bdev->bd_dev, mode, NULL);
- if (IS_ERR(tmp))
- return PTR_ERR(tmp);
- blkdev_put(tmp, mode);
- return 0;
-}
-
static int blk_ioctl_discard(struct block_device *bdev, fmode_t mode,
unsigned long arg, unsigned long flags)
{
@@ -522,7 +497,11 @@ static int blkdev_common_ioctl(struct block_device *bdev, fmode_t mode,
bdev->bd_disk->bdi->ra_pages = (arg * 512) / PAGE_SIZE;
return 0;
case BLKRRPART:
- return blkdev_reread_part(bdev, mode);
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+ if (bdev_is_partition(bdev))
+ return -EINVAL;
+ return disk_scan_partitions(bdev->bd_disk, mode & ~FMODE_EXCL);
case BLKTRACESTART:
case BLKTRACESTOP:
case BLKTRACETEARDOWN:
diff --git a/block/ioprio.c b/block/ioprio.c
index 6f01d35a5145..2fe068fcaad5 100644
--- a/block/ioprio.c
+++ b/block/ioprio.c
@@ -22,46 +22,14 @@
*/
#include <linux/gfp.h>
#include <linux/kernel.h>
-#include <linux/export.h>
#include <linux/ioprio.h>
#include <linux/cred.h>
#include <linux/blkdev.h>
#include <linux/capability.h>
-#include <linux/sched/user.h>
-#include <linux/sched/task.h>
#include <linux/syscalls.h>
#include <linux/security.h>
#include <linux/pid_namespace.h>
-int set_task_ioprio(struct task_struct *task, int ioprio)
-{
- int err;
- struct io_context *ioc;
- const struct cred *cred = current_cred(), *tcred;
-
- rcu_read_lock();
- tcred = __task_cred(task);
- if (!uid_eq(tcred->uid, cred->euid) &&
- !uid_eq(tcred->uid, cred->uid) && !capable(CAP_SYS_NICE)) {
- rcu_read_unlock();
- return -EPERM;
- }
- rcu_read_unlock();
-
- err = security_task_setioprio(task, ioprio);
- if (err)
- return err;
-
- ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE);
- if (ioc) {
- ioc->ioprio = ioprio;
- put_io_context(ioc);
- }
-
- return err;
-}
-EXPORT_SYMBOL_GPL(set_task_ioprio);
-
int ioprio_check_cap(int ioprio)
{
int class = IOPRIO_PRIO_CLASS(ioprio);
diff --git a/block/kyber-iosched.c b/block/kyber-iosched.c
index fdd74a4df56f..70ff2a599ef6 100644
--- a/block/kyber-iosched.c
+++ b/block/kyber-iosched.c
@@ -433,6 +433,7 @@ static void kyber_exit_sched(struct elevator_queue *e)
int i;
del_timer_sync(&kqd->timer);
+ blk_stat_disable_accounting(kqd->q);
for (i = 0; i < KYBER_NUM_DOMAINS; i++)
sbitmap_queue_free(&kqd->domain_tokens[i]);
diff --git a/block/mq-deadline.c b/block/mq-deadline.c
index 85d919bf60c7..3ed5eaf3446a 100644
--- a/block/mq-deadline.c
+++ b/block/mq-deadline.c
@@ -865,7 +865,7 @@ SHOW_JIFFIES(deadline_write_expire_show, dd->fifo_expire[DD_WRITE]);
SHOW_JIFFIES(deadline_prio_aging_expire_show, dd->prio_aging_expire);
SHOW_INT(deadline_writes_starved_show, dd->writes_starved);
SHOW_INT(deadline_front_merges_show, dd->front_merges);
-SHOW_INT(deadline_async_depth_show, dd->front_merges);
+SHOW_INT(deadline_async_depth_show, dd->async_depth);
SHOW_INT(deadline_fifo_batch_show, dd->fifo_batch);
#undef SHOW_INT
#undef SHOW_JIFFIES
@@ -895,7 +895,7 @@ STORE_JIFFIES(deadline_write_expire_store, &dd->fifo_expire[DD_WRITE], 0, INT_MA
STORE_JIFFIES(deadline_prio_aging_expire_store, &dd->prio_aging_expire, 0, INT_MAX);
STORE_INT(deadline_writes_starved_store, &dd->writes_starved, INT_MIN, INT_MAX);
STORE_INT(deadline_front_merges_store, &dd->front_merges, 0, 1);
-STORE_INT(deadline_async_depth_store, &dd->front_merges, 1, INT_MAX);
+STORE_INT(deadline_async_depth_store, &dd->async_depth, 1, INT_MAX);
STORE_INT(deadline_fifo_batch_store, &dd->fifo_batch, 0, INT_MAX);
#undef STORE_FUNCTION
#undef STORE_INT
diff --git a/block/partitions/core.c b/block/partitions/core.c
index 334b72ef1d73..c2a1635922b1 100644
--- a/block/partitions/core.c
+++ b/block/partitions/core.c
@@ -98,13 +98,12 @@ static void bdev_set_nr_sectors(struct block_device *bdev, sector_t sectors)
static struct parsed_partitions *allocate_partitions(struct gendisk *hd)
{
struct parsed_partitions *state;
- int nr;
+ int nr = DISK_MAX_PARTS;
state = kzalloc(sizeof(*state), GFP_KERNEL);
if (!state)
return NULL;
- nr = disk_max_parts(hd);
state->parts = vzalloc(array_size(nr, sizeof(state->parts[0])));
if (!state->parts) {
kfree(state);
@@ -326,7 +325,7 @@ static struct block_device *add_partition(struct gendisk *disk, int partno,
lockdep_assert_held(&disk->open_mutex);
- if (partno >= disk_max_parts(disk))
+ if (partno >= DISK_MAX_PARTS)
return ERR_PTR(-EINVAL);
/*
@@ -527,18 +526,15 @@ out_unlock:
static bool disk_unlock_native_capacity(struct gendisk *disk)
{
- const struct block_device_operations *bdops = disk->fops;
-
- if (bdops->unlock_native_capacity &&
- !(disk->flags & GENHD_FL_NATIVE_CAPACITY)) {
- printk(KERN_CONT "enabling native capacity\n");
- bdops->unlock_native_capacity(disk);
- disk->flags |= GENHD_FL_NATIVE_CAPACITY;
- return true;
- } else {
+ if (!disk->fops->unlock_native_capacity ||
+ test_and_set_bit(GD_NATIVE_CAPACITY, &disk->state)) {
printk(KERN_CONT "truncated\n");
return false;
}
+
+ printk(KERN_CONT "enabling native capacity\n");
+ disk->fops->unlock_native_capacity(disk);
+ return true;
}
void blk_drop_partitions(struct gendisk *disk)
@@ -607,7 +603,7 @@ static int blk_add_partitions(struct gendisk *disk)
struct parsed_partitions *state;
int ret = -EAGAIN, p;
- if (!disk_part_scan_enabled(disk))
+ if (disk->flags & GENHD_FL_NO_PART)
return 0;
state = check_partition(disk);
@@ -690,7 +686,7 @@ rescan:
* userspace for this particular setup.
*/
if (invalidate) {
- if (disk_part_scan_enabled(disk) ||
+ if (!(disk->flags & GENHD_FL_NO_PART) ||
!(disk->flags & GENHD_FL_REMOVABLE))
set_capacity(disk, 0);
}
diff --git a/certs/.gitignore b/certs/.gitignore
index 8c3763f80be3..9e42fe3e02f5 100644
--- a/certs/.gitignore
+++ b/certs/.gitignore
@@ -1,3 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
+/extract-cert
/x509_certificate_list
/x509_revocation_list
diff --git a/certs/Kconfig b/certs/Kconfig
index ae7f2e876a31..73d1350c223a 100644
--- a/certs/Kconfig
+++ b/certs/Kconfig
@@ -17,21 +17,19 @@ config MODULE_SIG_KEY
choice
prompt "Type of module signing key to be generated"
- default MODULE_SIG_KEY_TYPE_RSA
+ depends on MODULE_SIG || (IMA_APPRAISE_MODSIG && MODULES)
help
The type of module signing key type to generate. This option
does not apply if a #PKCS11 URI is used.
config MODULE_SIG_KEY_TYPE_RSA
bool "RSA"
- depends on MODULE_SIG || (IMA_APPRAISE_MODSIG && MODULES)
help
Use an RSA key for module signing.
config MODULE_SIG_KEY_TYPE_ECDSA
bool "ECDSA"
select CRYPTO_ECDSA
- depends on MODULE_SIG || (IMA_APPRAISE_MODSIG && MODULES)
help
Use an elliptic curve key (NIST P384) for module signing. Consider
using a strong hash like sha256 or sha384 for hashing modules.
diff --git a/certs/Makefile b/certs/Makefile
index 279433783b10..3ea7fe60823f 100644
--- a/certs/Makefile
+++ b/certs/Makefile
@@ -6,31 +6,21 @@
obj-$(CONFIG_SYSTEM_TRUSTED_KEYRING) += system_keyring.o system_certificates.o common.o
obj-$(CONFIG_SYSTEM_BLACKLIST_KEYRING) += blacklist.o common.o
obj-$(CONFIG_SYSTEM_REVOCATION_LIST) += revocation_certificates.o
-ifneq ($(CONFIG_SYSTEM_BLACKLIST_HASH_LIST),"")
+ifneq ($(CONFIG_SYSTEM_BLACKLIST_HASH_LIST),)
obj-$(CONFIG_SYSTEM_BLACKLIST_KEYRING) += blacklist_hashes.o
else
obj-$(CONFIG_SYSTEM_BLACKLIST_KEYRING) += blacklist_nohashes.o
endif
-ifeq ($(CONFIG_SYSTEM_TRUSTED_KEYRING),y)
+quiet_cmd_extract_certs = CERT $@
+ cmd_extract_certs = $(obj)/extract-cert $(2) $@
-$(eval $(call config_filename,SYSTEM_TRUSTED_KEYS))
-
-# GCC doesn't include .incbin files in -MD generated dependencies (PR#66871)
$(obj)/system_certificates.o: $(obj)/x509_certificate_list
-# Cope with signing_key.x509 existing in $(srctree) not $(objtree)
-AFLAGS_system_certificates.o := -I$(srctree)
-
-quiet_cmd_extract_certs = EXTRACT_CERTS $(patsubst "%",%,$(2))
- cmd_extract_certs = scripts/extract-cert $(2) $@
+$(obj)/x509_certificate_list: $(CONFIG_SYSTEM_TRUSTED_KEYS) $(obj)/extract-cert FORCE
+ $(call if_changed,extract_certs,$(if $(CONFIG_SYSTEM_TRUSTED_KEYS),$<,""))
targets += x509_certificate_list
-$(obj)/x509_certificate_list: scripts/extract-cert $(SYSTEM_TRUSTED_KEYS_SRCPREFIX)$(SYSTEM_TRUSTED_KEYS_FILENAME) FORCE
- $(call if_changed,extract_certs,$(SYSTEM_TRUSTED_KEYS_SRCPREFIX)$(CONFIG_SYSTEM_TRUSTED_KEYS))
-endif # CONFIG_SYSTEM_TRUSTED_KEYRING
-
-clean-files := x509_certificate_list .x509.list x509_revocation_list
ifeq ($(CONFIG_MODULE_SIG),y)
SIGN_KEY = y
@@ -50,103 +40,54 @@ ifdef SIGN_KEY
# fail and that the kernel may be used afterwards.
#
###############################################################################
-ifndef CONFIG_MODULE_SIG_HASH
-$(error Could not determine digest type to use from kernel config)
-endif
-
-redirect_openssl = 2>&1
-quiet_redirect_openssl = 2>&1
-silent_redirect_openssl = 2>/dev/null
-openssl_available = $(shell openssl help 2>/dev/null && echo yes)
# We do it this way rather than having a boolean option for enabling an
# external private key, because 'make randconfig' might enable such a
# boolean option and we unfortunately can't make it depend on !RANDCONFIG.
-ifeq ($(CONFIG_MODULE_SIG_KEY),"certs/signing_key.pem")
+ifeq ($(CONFIG_MODULE_SIG_KEY),certs/signing_key.pem)
-ifeq ($(openssl_available),yes)
-X509TEXT=$(shell openssl x509 -in "certs/signing_key.pem" -text 2>/dev/null)
-endif
+keytype-$(CONFIG_MODULE_SIG_KEY_TYPE_ECDSA) := -newkey ec -pkeyopt ec_paramgen_curve:secp384r1
-# Support user changing key type
-ifdef CONFIG_MODULE_SIG_KEY_TYPE_ECDSA
-keytype_openssl = -newkey ec -pkeyopt ec_paramgen_curve:secp384r1
-ifeq ($(openssl_available),yes)
-$(if $(findstring id-ecPublicKey,$(X509TEXT)),,$(shell rm -f "certs/signing_key.pem"))
-endif
-endif # CONFIG_MODULE_SIG_KEY_TYPE_ECDSA
+quiet_cmd_gen_key = GENKEY $@
+ cmd_gen_key = openssl req -new -nodes -utf8 -$(CONFIG_MODULE_SIG_HASH) -days 36500 \
+ -batch -x509 -config $< \
+ -outform PEM -out $@ -keyout $@ $(keytype-y) 2>&1
-ifdef CONFIG_MODULE_SIG_KEY_TYPE_RSA
-ifeq ($(openssl_available),yes)
-$(if $(findstring rsaEncryption,$(X509TEXT)),,$(shell rm -f "certs/signing_key.pem"))
-endif
-endif # CONFIG_MODULE_SIG_KEY_TYPE_RSA
-
-$(obj)/signing_key.pem: $(obj)/x509.genkey
- @$(kecho) "###"
- @$(kecho) "### Now generating an X.509 key pair to be used for signing modules."
- @$(kecho) "###"
- @$(kecho) "### If this takes a long time, you might wish to run rngd in the"
- @$(kecho) "### background to keep the supply of entropy topped up. It"
- @$(kecho) "### needs to be run as root, and uses a hardware random"
- @$(kecho) "### number generator if one is available."
- @$(kecho) "###"
- $(Q)openssl req -new -nodes -utf8 -$(CONFIG_MODULE_SIG_HASH) -days 36500 \
- -batch -x509 -config $(obj)/x509.genkey \
- -outform PEM -out $(obj)/signing_key.pem \
- -keyout $(obj)/signing_key.pem \
- $(keytype_openssl) \
- $($(quiet)redirect_openssl)
- @$(kecho) "###"
- @$(kecho) "### Key pair generated."
- @$(kecho) "###"
+$(obj)/signing_key.pem: $(obj)/x509.genkey FORCE
+ $(call if_changed,gen_key)
+
+targets += signing_key.pem
+quiet_cmd_copy_x509_config = COPY $@
+ cmd_copy_x509_config = cat $(srctree)/$(src)/default_x509.genkey > $@
+
+# You can provide your own config file. If not present, copy the default one.
$(obj)/x509.genkey:
- @$(kecho) Generating X.509 key generation config
- @echo >$@ "[ req ]"
- @echo >>$@ "default_bits = 4096"
- @echo >>$@ "distinguished_name = req_distinguished_name"
- @echo >>$@ "prompt = no"
- @echo >>$@ "string_mask = utf8only"
- @echo >>$@ "x509_extensions = myexts"
- @echo >>$@
- @echo >>$@ "[ req_distinguished_name ]"
- @echo >>$@ "#O = Unspecified company"
- @echo >>$@ "CN = Build time autogenerated kernel key"
- @echo >>$@ "#emailAddress = unspecified.user@unspecified.company"
- @echo >>$@
- @echo >>$@ "[ myexts ]"
- @echo >>$@ "basicConstraints=critical,CA:FALSE"
- @echo >>$@ "keyUsage=digitalSignature"
- @echo >>$@ "subjectKeyIdentifier=hash"
- @echo >>$@ "authorityKeyIdentifier=keyid"
-endif # CONFIG_MODULE_SIG_KEY
+ $(call cmd,copy_x509_config)
-$(eval $(call config_filename,MODULE_SIG_KEY))
+endif # CONFIG_MODULE_SIG_KEY
# If CONFIG_MODULE_SIG_KEY isn't a PKCS#11 URI, depend on it
-ifeq ($(patsubst pkcs11:%,%,$(firstword $(MODULE_SIG_KEY_FILENAME))),$(firstword $(MODULE_SIG_KEY_FILENAME)))
-X509_DEP := $(MODULE_SIG_KEY_SRCPREFIX)$(MODULE_SIG_KEY_FILENAME)
+ifneq ($(filter-out pkcs11:%, $(CONFIG_MODULE_SIG_KEY)),)
+X509_DEP := $(CONFIG_MODULE_SIG_KEY)
endif
-# GCC PR#66871 again.
$(obj)/system_certificates.o: $(obj)/signing_key.x509
-targets += signing_key.x509
-$(obj)/signing_key.x509: scripts/extract-cert $(X509_DEP) FORCE
- $(call if_changed,extract_certs,$(MODULE_SIG_KEY_SRCPREFIX)$(CONFIG_MODULE_SIG_KEY))
+$(obj)/signing_key.x509: $(X509_DEP) $(obj)/extract-cert FORCE
+ $(call if_changed,extract_certs,$(if $(CONFIG_MODULE_SIG_KEY),$(if $(X509_DEP),$<,$(CONFIG_MODULE_SIG_KEY)),""))
endif # CONFIG_MODULE_SIG
-ifeq ($(CONFIG_SYSTEM_REVOCATION_LIST),y)
-
-$(eval $(call config_filename,SYSTEM_REVOCATION_KEYS))
+targets += signing_key.x509
$(obj)/revocation_certificates.o: $(obj)/x509_revocation_list
-quiet_cmd_extract_certs = EXTRACT_CERTS $(patsubst "%",%,$(2))
- cmd_extract_certs = scripts/extract-cert $(2) $@
+$(obj)/x509_revocation_list: $(CONFIG_SYSTEM_REVOCATION_KEYS) $(obj)/extract-cert FORCE
+ $(call if_changed,extract_certs,$(if $(CONFIG_SYSTEM_REVOCATION_KEYS),$<,""))
targets += x509_revocation_list
-$(obj)/x509_revocation_list: scripts/extract-cert $(SYSTEM_REVOCATION_KEYS_SRCPREFIX)$(SYSTEM_REVOCATION_KEYS_FILENAME) FORCE
- $(call if_changed,extract_certs,$(SYSTEM_REVOCATION_KEYS_SRCPREFIX)$(CONFIG_SYSTEM_REVOCATION_KEYS))
-endif
+
+hostprogs := extract-cert
+
+HOSTCFLAGS_extract-cert.o = $(shell pkg-config --cflags libcrypto 2> /dev/null)
+HOSTLDLIBS_extract-cert = $(shell pkg-config --libs libcrypto 2> /dev/null || echo -lcrypto)
diff --git a/certs/default_x509.genkey b/certs/default_x509.genkey
new file mode 100644
index 000000000000..d4c6628cb8e5
--- /dev/null
+++ b/certs/default_x509.genkey
@@ -0,0 +1,17 @@
+[ req ]
+default_bits = 4096
+distinguished_name = req_distinguished_name
+prompt = no
+string_mask = utf8only
+x509_extensions = myexts
+
+[ req_distinguished_name ]
+#O = Unspecified company
+CN = Build time autogenerated kernel key
+#emailAddress = unspecified.user@unspecified.company
+
+[ myexts ]
+basicConstraints=critical,CA:FALSE
+keyUsage=digitalSignature
+subjectKeyIdentifier=hash
+authorityKeyIdentifier=keyid
diff --git a/scripts/extract-cert.c b/certs/extract-cert.c
index 3bc48c726c41..f7ef7862f207 100644
--- a/scripts/extract-cert.c
+++ b/certs/extract-cert.c
@@ -29,7 +29,7 @@ static __attribute__((noreturn))
void format(void)
{
fprintf(stderr,
- "Usage: scripts/extract-cert <source> <dest>\n");
+ "Usage: extract-cert <source> <dest>\n");
exit(2);
}
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 94bfa32cc6a1..442765219c37 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -1928,5 +1928,3 @@ source "crypto/asymmetric_keys/Kconfig"
source "certs/Kconfig"
endif # if CRYPTO
-
-source "lib/crypto/Kconfig"
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index e1ea18536a5f..c8289b7a85ba 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -25,12 +25,9 @@ struct alg_type_list {
struct list_head list;
};
-static atomic_long_t alg_memory_allocated;
-
static struct proto alg_proto = {
.name = "ALG",
.owner = THIS_MODULE,
- .memory_allocated = &alg_memory_allocated,
.obj_size = sizeof(struct alg_sock),
};
diff --git a/crypto/algapi.c b/crypto/algapi.c
index a366cb3e8aa1..76fdaa16bd4a 100644
--- a/crypto/algapi.c
+++ b/crypto/algapi.c
@@ -1324,3 +1324,4 @@ module_exit(crypto_algapi_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Cryptographic algorithms API");
+MODULE_SOFTDEP("pre: cryptomgr");
diff --git a/crypto/algboss.c b/crypto/algboss.c
index 1814d2c5188a..eb5fe84efb83 100644
--- a/crypto/algboss.c
+++ b/crypto/algboss.c
@@ -67,7 +67,7 @@ out:
complete_all(&param->larval->completion);
crypto_alg_put(&param->larval->alg);
kfree(param);
- module_put_and_exit(0);
+ module_put_and_kthread_exit(0);
}
static int cryptomgr_schedule_probe(struct crypto_larval *larval)
@@ -190,7 +190,7 @@ skiptest:
crypto_alg_tested(param->driver, err);
kfree(param);
- module_put_and_exit(0);
+ module_put_and_kthread_exit(0);
}
static int cryptomgr_schedule_test(struct crypto_alg *alg)
diff --git a/crypto/api.c b/crypto/api.c
index cf0869dd130b..7ddfe946dd56 100644
--- a/crypto/api.c
+++ b/crypto/api.c
@@ -643,4 +643,3 @@ EXPORT_SYMBOL_GPL(crypto_req_done);
MODULE_DESCRIPTION("Cryptographic core API");
MODULE_LICENSE("GPL");
-MODULE_SOFTDEP("pre: cryptomgr");
diff --git a/crypto/blake2s_generic.c b/crypto/blake2s_generic.c
index 72fe480f9bd6..5f96a21f8788 100644
--- a/crypto/blake2s_generic.c
+++ b/crypto/blake2s_generic.c
@@ -15,12 +15,12 @@
static int crypto_blake2s_update_generic(struct shash_desc *desc,
const u8 *in, unsigned int inlen)
{
- return crypto_blake2s_update(desc, in, inlen, blake2s_compress_generic);
+ return crypto_blake2s_update(desc, in, inlen, true);
}
static int crypto_blake2s_final_generic(struct shash_desc *desc, u8 *out)
{
- return crypto_blake2s_final(desc, out, blake2s_compress_generic);
+ return crypto_blake2s_final(desc, out, true);
}
#define BLAKE2S_ALG(name, driver_name, digest_size) \
diff --git a/drivers/accessibility/speakup/speakup_acntpc.c b/drivers/accessibility/speakup/speakup_acntpc.c
index c1ec087dca13..023172ca22ef 100644
--- a/drivers/accessibility/speakup/speakup_acntpc.c
+++ b/drivers/accessibility/speakup/speakup_acntpc.c
@@ -247,7 +247,7 @@ static void synth_flush(struct spk_synth *synth)
static int synth_probe(struct spk_synth *synth)
{
unsigned int port_val = 0;
- int i = 0;
+ int i;
pr_info("Probing for %s.\n", synth->long_name);
if (port_forced) {
diff --git a/drivers/accessibility/speakup/speakup_dectlk.c b/drivers/accessibility/speakup/speakup_dectlk.c
index 580ec796816b..78ca4987e619 100644
--- a/drivers/accessibility/speakup/speakup_dectlk.c
+++ b/drivers/accessibility/speakup/speakup_dectlk.c
@@ -44,6 +44,7 @@ static struct var_t vars[] = {
{ CAPS_START, .u.s = {"[:dv ap 160] " } },
{ CAPS_STOP, .u.s = {"[:dv ap 100 ] " } },
{ RATE, .u.n = {"[:ra %d] ", 180, 75, 650, 0, 0, NULL } },
+ { PITCH, .u.n = {"[:dv ap %d] ", 122, 50, 350, 0, 0, NULL } },
{ INFLECTION, .u.n = {"[:dv pr %d] ", 100, 0, 10000, 0, 0, NULL } },
{ VOL, .u.n = {"[:dv g5 %d] ", 86, 60, 86, 0, 0, NULL } },
{ PUNCT, .u.n = {"[:pu %c] ", 0, 0, 2, 0, 0, "nsa" } },
diff --git a/drivers/accessibility/speakup/speakup_dtlk.c b/drivers/accessibility/speakup/speakup_dtlk.c
index 92838d3ae9eb..a9dd5c45d237 100644
--- a/drivers/accessibility/speakup/speakup_dtlk.c
+++ b/drivers/accessibility/speakup/speakup_dtlk.c
@@ -316,7 +316,7 @@ static struct synth_settings *synth_interrogate(struct spk_synth *synth)
static int synth_probe(struct spk_synth *synth)
{
unsigned int port_val = 0;
- int i = 0;
+ int i;
struct synth_settings *sp;
pr_info("Probing for DoubleTalk.\n");
diff --git a/drivers/accessibility/speakup/speakup_keypc.c b/drivers/accessibility/speakup/speakup_keypc.c
index 311f4aa0be22..1618be87bff1 100644
--- a/drivers/accessibility/speakup/speakup_keypc.c
+++ b/drivers/accessibility/speakup/speakup_keypc.c
@@ -254,7 +254,7 @@ static void synth_flush(struct spk_synth *synth)
static int synth_probe(struct spk_synth *synth)
{
unsigned int port_val = 0;
- int i = 0;
+ int i;
pr_info("Probing for %s.\n", synth->long_name);
if (port_forced) {
diff --git a/drivers/accessibility/speakup/spk_ttyio.c b/drivers/accessibility/speakup/spk_ttyio.c
index 0d1f397cd896..08cf8a17754b 100644
--- a/drivers/accessibility/speakup/spk_ttyio.c
+++ b/drivers/accessibility/speakup/spk_ttyio.c
@@ -88,7 +88,7 @@ static int spk_ttyio_receive_buf2(struct tty_struct *tty,
}
if (!ldisc_data->buf_free)
- /* ttyio_in will tty_schedule_flip */
+ /* ttyio_in will tty_flip_buffer_push */
return 0;
/* Make sure the consumer has read buf before we have seen
@@ -312,7 +312,7 @@ static unsigned char ttyio_in(struct spk_synth *in_synth, int timeout)
mb();
ldisc_data->buf_free = true;
/* Let TTY push more characters */
- tty_schedule_flip(tty->port);
+ tty_flip_buffer_push(tty->port);
return rv;
}
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 60b5424bd318..273741dedfd2 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -11,6 +11,7 @@ menuconfig ACPI
depends on ARCH_SUPPORTS_ACPI
select PNP
select NLS
+ select CRC32
default y if X86
help
Advanced Configuration and Power Interface (ACPI) support for
@@ -59,6 +60,9 @@ config ACPI_SYSTEM_POWER_STATES_SUPPORT
config ACPI_CCA_REQUIRED
bool
+config ACPI_TABLE_LIB
+ bool
+
config ACPI_DEBUGGER
bool "AML debugger interface"
select ACPI_DEBUG
@@ -517,6 +521,28 @@ config ACPI_CONFIGFS
userspace. The configurable ACPI groups will be visible under
/config/acpi, assuming configfs is mounted under /config.
+config ACPI_PFRUT
+ tristate "ACPI Platform Firmware Runtime Update and Telemetry"
+ depends on 64BIT
+ help
+ This mechanism allows certain pieces of the platform firmware
+ to be updated on the fly while the system is running (runtime)
+ without the need to restart it, which is key in the cases when
+ the system needs to be available 100% of the time and it cannot
+ afford the downtime related to restarting it, or when the work
+ carried out by the system is particularly important, so it cannot
+ be interrupted, and it is not practical to wait until it is complete.
+
+ The existing firmware code can be modified (driver update) or
+ extended by adding new code to the firmware (code injection).
+
+ Besides, the telemetry driver allows user space to fetch telemetry
+ data from the firmware with the help of the Platform Firmware Runtime
+ Telemetry interface.
+
+ To compile the drivers as modules, choose M here:
+ the modules will be called pfr_update and pfr_telemetry.
+
if ARM64
source "drivers/acpi/arm64/Kconfig"
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 08c2d985c57c..bb757148e7ba 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -9,7 +9,7 @@ ccflags-$(CONFIG_ACPI_DEBUG) += -DACPI_DEBUG_OUTPUT
# ACPI Boot-Time Table Parsing
#
ifeq ($(CONFIG_ACPI_CUSTOM_DSDT),y)
-tables.o: $(src)/../../include/$(subst $\",,$(CONFIG_ACPI_CUSTOM_DSDT_FILE)) ;
+tables.o: $(src)/../../include/$(CONFIG_ACPI_CUSTOM_DSDT_FILE) ;
endif
@@ -103,6 +103,7 @@ obj-$(CONFIG_ACPI_CPPC_LIB) += cppc_acpi.o
obj-$(CONFIG_ACPI_SPCR_TABLE) += spcr.o
obj-$(CONFIG_ACPI_DEBUGGER_USER) += acpi_dbg.o
obj-$(CONFIG_ACPI_PPTT) += pptt.o
+obj-$(CONFIG_ACPI_PFRUT) += pfr_update.o pfr_telemetry.o
# processor has its own "processor." module_param namespace
processor-y := processor_driver.o
diff --git a/drivers/acpi/acpi_apd.c b/drivers/acpi/acpi_apd.c
index 6e02448d15d9..e7934ba79b02 100644
--- a/drivers/acpi/acpi_apd.c
+++ b/drivers/acpi/acpi_apd.c
@@ -87,14 +87,23 @@ static int fch_misc_setup(struct apd_private_data *pdata)
if (ret < 0)
return -ENOENT;
- if (!acpi_dev_get_property(adev, "is-rv", ACPI_TYPE_INTEGER, &obj))
- clk_data->is_rv = obj->integer.value;
+ if (!acpi_dev_get_property(adev, "clk-name", ACPI_TYPE_STRING, &obj)) {
+ clk_data->name = devm_kzalloc(&adev->dev, obj->string.length,
+ GFP_KERNEL);
+
+ strcpy(clk_data->name, obj->string.pointer);
+ } else {
+ /* Set default name to mclk if entry missing in firmware */
+ clk_data->name = "mclk";
+ }
list_for_each_entry(rentry, &resource_list, node) {
clk_data->base = devm_ioremap(&adev->dev, rentry->res->start,
resource_size(rentry->res));
break;
}
+ if (!clk_data->base)
+ return -ENOMEM;
acpi_dev_free_resource_list(&resource_list);
diff --git a/drivers/acpi/acpi_pcc.c b/drivers/acpi/acpi_pcc.c
index 41e3ebd204ff..a12b55d81209 100644
--- a/drivers/acpi/acpi_pcc.c
+++ b/drivers/acpi/acpi_pcc.c
@@ -31,7 +31,7 @@ struct pcc_data {
struct acpi_pcc_info ctx;
};
-struct acpi_pcc_info pcc_ctx;
+static struct acpi_pcc_info pcc_ctx;
static void pcc_rx_callback(struct mbox_client *cl, void *m)
{
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
index 3b23fb775ac4..f2f8f05662de 100644
--- a/drivers/acpi/arm64/iort.c
+++ b/drivers/acpi/arm64/iort.c
@@ -1361,9 +1361,17 @@ static void __init arm_smmu_v3_pmcg_init_resources(struct resource *res,
res[0].start = pmcg->page0_base_address;
res[0].end = pmcg->page0_base_address + SZ_4K - 1;
res[0].flags = IORESOURCE_MEM;
- res[1].start = pmcg->page1_base_address;
- res[1].end = pmcg->page1_base_address + SZ_4K - 1;
- res[1].flags = IORESOURCE_MEM;
+ /*
+ * The initial version in DEN0049C lacked a way to describe register
+ * page 1, which makes it broken for most PMCG implementations; in
+ * that case, just let the driver fail gracefully if it expects to
+ * find a second memory resource.
+ */
+ if (node->revision > 0) {
+ res[1].start = pmcg->page1_base_address;
+ res[1].end = pmcg->page1_base_address + SZ_4K - 1;
+ res[1].flags = IORESOURCE_MEM;
+ }
if (pmcg->overflow_gsiv)
acpi_iort_register_irq(pmcg->overflow_gsiv, "overflow",
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 75a61626eddd..07f604832fd6 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -1043,6 +1043,7 @@ struct bus_type acpi_bus_type = {
.remove = acpi_device_remove,
.uevent = acpi_device_uevent,
};
+EXPORT_SYMBOL_GPL(acpi_bus_type);
/* --------------------------------------------------------------------------
Initialization/Cleanup
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index a9d2de403c0c..866560cbb082 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -915,30 +915,31 @@ int __weak cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val)
static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
{
- int ret_val = 0;
void __iomem *vaddr = NULL;
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
struct cpc_reg *reg = &reg_res->cpc_entry.reg;
if (reg_res->type == ACPI_TYPE_INTEGER) {
*val = reg_res->cpc_entry.int_value;
- return ret_val;
+ return 0;
}
*val = 0;
if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
u32 width = 8 << (reg->access_width - 1);
+ u32 val_u32;
acpi_status status;
status = acpi_os_read_port((acpi_io_address)reg->address,
- (u32 *)val, width);
+ &val_u32, width);
if (ACPI_FAILURE(status)) {
pr_debug("Error: Failed to read SystemIO port %llx\n",
reg->address);
return -EFAULT;
}
+ *val = val_u32;
return 0;
} else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0)
vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
@@ -966,10 +967,10 @@ static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
default:
pr_debug("Error: Cannot read %u bit width from PCC for ss: %d\n",
reg->bit_width, pcc_ss_id);
- ret_val = -EFAULT;
+ return -EFAULT;
}
- return ret_val;
+ return 0;
}
static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
diff --git a/drivers/acpi/dptf/dptf_pch_fivr.c b/drivers/acpi/dptf/dptf_pch_fivr.c
index e7ab0fc90db9..c0da24c9f8c3 100644
--- a/drivers/acpi/dptf/dptf_pch_fivr.c
+++ b/drivers/acpi/dptf/dptf_pch_fivr.c
@@ -151,6 +151,7 @@ static int pch_fivr_remove(struct platform_device *pdev)
static const struct acpi_device_id pch_fivr_device_ids[] = {
{"INTC1045", 0},
{"INTC1049", 0},
+ {"INTC10A3", 0},
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, pch_fivr_device_ids);
diff --git a/drivers/acpi/dptf/dptf_power.c b/drivers/acpi/dptf/dptf_power.c
index a24d5d7aa117..dc1f52a5b3f4 100644
--- a/drivers/acpi/dptf/dptf_power.c
+++ b/drivers/acpi/dptf/dptf_power.c
@@ -231,6 +231,8 @@ static const struct acpi_device_id int3407_device_ids[] = {
{"INTC1050", 0},
{"INTC1060", 0},
{"INTC1061", 0},
+ {"INTC10A4", 0},
+ {"INTC10A5", 0},
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, int3407_device_ids);
diff --git a/drivers/acpi/dptf/int340x_thermal.c b/drivers/acpi/dptf/int340x_thermal.c
index da5d5f0be2f2..42a556346548 100644
--- a/drivers/acpi/dptf/int340x_thermal.c
+++ b/drivers/acpi/dptf/int340x_thermal.c
@@ -37,6 +37,12 @@ static const struct acpi_device_id int340x_thermal_device_ids[] = {
{"INTC1050"},
{"INTC1060"},
{"INTC1061"},
+ {"INTC10A0"},
+ {"INTC10A1"},
+ {"INTC10A2"},
+ {"INTC10A3"},
+ {"INTC10A4"},
+ {"INTC10A5"},
{""},
};
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 0077d2c85df8..46710380a402 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -2066,6 +2066,16 @@ bool acpi_ec_dispatch_gpe(void)
return true;
/*
+ * Cancel the SCI wakeup and process all pending events in case there
+ * are any wakeup ones in there.
+ *
+ * Note that if any non-EC GPEs are active at this point, the SCI will
+ * retrigger after the rearming in acpi_s2idle_wake(), so no events
+ * should be missed by canceling the wakeup here.
+ */
+ pm_system_cancel_wakeup();
+
+ /*
* Dispatch the EC GPE in-band, but do not report wakeup in any case
* to allow the caller to process events properly after that.
*/
diff --git a/drivers/acpi/fan.h b/drivers/acpi/fan.h
index dc9a6efa514b..dd9bb8ca2244 100644
--- a/drivers/acpi/fan.h
+++ b/drivers/acpi/fan.h
@@ -10,4 +10,5 @@
{"INT3404", }, /* Fan */ \
{"INTC1044", }, /* Fan for Tiger Lake generation */ \
{"INTC1048", }, /* Fan for Alder Lake generation */ \
+ {"INTC10A2", }, /* Fan for Raptor Lake generation */ \
{"PNP0C0B", } /* Generic ACPI fan */
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 1db3a2f81763..457e11d851b8 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -14,7 +14,7 @@
int early_acpi_osi_init(void);
int acpi_osi_init(void);
acpi_status acpi_os_initialize1(void);
-int acpi_scan_init(void);
+void acpi_scan_init(void);
#ifdef CONFIG_PCI
void acpi_pci_root_init(void);
void acpi_pci_link_init(void);
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index 7dd80acf92c7..e5d7f2bda13f 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -678,10 +678,12 @@ static const char *spa_type_name(u16 type)
int nfit_spa_type(struct acpi_nfit_system_address *spa)
{
+ guid_t guid;
int i;
+ import_guid(&guid, spa->range_guid);
for (i = 0; i < NFIT_UUID_MAX; i++)
- if (guid_equal(to_nfit_uuid(i), (guid_t *)&spa->range_guid))
+ if (guid_equal(to_nfit_uuid(i), &guid))
return i;
return -1;
}
diff --git a/drivers/acpi/numa/srat.c b/drivers/acpi/numa/srat.c
index 6c884f3e8332..3b818ab186be 100644
--- a/drivers/acpi/numa/srat.c
+++ b/drivers/acpi/numa/srat.c
@@ -297,6 +297,47 @@ out_err_bad_srat:
out_err:
return -EINVAL;
}
+
+static int __init acpi_parse_cfmws(union acpi_subtable_headers *header,
+ void *arg, const unsigned long table_end)
+{
+ struct acpi_cedt_cfmws *cfmws;
+ int *fake_pxm = arg;
+ u64 start, end;
+ int node;
+
+ cfmws = (struct acpi_cedt_cfmws *)header;
+ start = cfmws->base_hpa;
+ end = cfmws->base_hpa + cfmws->window_size;
+
+ /* Skip if the SRAT already described the NUMA details for this HPA */
+ node = phys_to_target_node(start);
+ if (node != NUMA_NO_NODE)
+ return 0;
+
+ node = acpi_map_pxm_to_node(*fake_pxm);
+
+ if (node == NUMA_NO_NODE) {
+ pr_err("ACPI NUMA: Too many proximity domains while processing CFMWS.\n");
+ return -EINVAL;
+ }
+
+ if (numa_add_memblk(node, start, end) < 0) {
+ /* CXL driver must handle the NUMA_NO_NODE case */
+ pr_warn("ACPI NUMA: Failed to add memblk for CFMWS node %d [mem %#llx-%#llx]\n",
+ node, start, end);
+ }
+
+ /* Set the next available fake_pxm value */
+ (*fake_pxm)++;
+ return 0;
+}
+#else
+static int __init acpi_parse_cfmws(union acpi_subtable_headers *header,
+ void *arg, const unsigned long table_end)
+{
+ return 0;
+}
#endif /* defined(CONFIG_X86) || defined (CONFIG_ARM64) */
static int __init acpi_parse_slit(struct acpi_table_header *table)
@@ -441,7 +482,7 @@ acpi_table_parse_srat(enum acpi_srat_type id,
int __init acpi_numa_init(void)
{
- int cnt = 0;
+ int i, fake_pxm, cnt = 0;
if (acpi_disabled)
return -EINVAL;
@@ -477,6 +518,22 @@ int __init acpi_numa_init(void)
/* SLIT: System Locality Information Table */
acpi_table_parse(ACPI_SIG_SLIT, acpi_parse_slit);
+ /*
+ * CXL Fixed Memory Window Structures (CFMWS) must be parsed
+ * after the SRAT. Create NUMA Nodes for CXL memory ranges that
+ * are defined in the CFMWS and not already defined in the SRAT.
+ * Initialize a fake_pxm as the first available PXM to emulate.
+ */
+
+ /* fake_pxm is the next unused PXM value after SRAT parsing */
+ for (i = 0, fake_pxm = -1; i < MAX_NUMNODES - 1; i++) {
+ if (node_to_pxm_map[i] > fake_pxm)
+ fake_pxm = node_to_pxm_map[i];
+ }
+ fake_pxm++;
+ acpi_table_parse_cedt(ACPI_CEDT_TYPE_CFMWS, acpi_parse_cfmws,
+ &fake_pxm);
+
if (cnt < 0)
return cnt;
else if (!parsed_numa_memblks)
diff --git a/drivers/acpi/pfr_telemetry.c b/drivers/acpi/pfr_telemetry.c
new file mode 100644
index 000000000000..9abf350bd7a5
--- /dev/null
+++ b/drivers/acpi/pfr_telemetry.c
@@ -0,0 +1,435 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ACPI Platform Firmware Runtime Telemetry driver
+ *
+ * Copyright (C) 2021 Intel Corporation
+ * Author: Chen Yu <yu.c.chen@intel.com>
+ *
+ * This driver allows user space to fetch telemetry data from the
+ * firmware with the help of the Platform Firmware Runtime Telemetry
+ * interface.
+ */
+#include <linux/acpi.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/platform_device.h>
+#include <linux/string.h>
+#include <linux/uaccess.h>
+#include <linux/uio.h>
+#include <linux/uuid.h>
+
+#include <uapi/linux/pfrut.h>
+
+#define PFRT_LOG_EXEC_IDX 0
+#define PFRT_LOG_HISTORY_IDX 1
+
+#define PFRT_LOG_ERR 0
+#define PFRT_LOG_WARN 1
+#define PFRT_LOG_INFO 2
+#define PFRT_LOG_VERB 4
+
+#define PFRT_FUNC_SET_LEV 1
+#define PFRT_FUNC_GET_LEV 2
+#define PFRT_FUNC_GET_DATA 3
+
+#define PFRT_REVID_1 1
+#define PFRT_REVID_2 2
+#define PFRT_DEFAULT_REV_ID PFRT_REVID_1
+
+enum log_index {
+ LOG_STATUS_IDX = 0,
+ LOG_EXT_STATUS_IDX = 1,
+ LOG_MAX_SZ_IDX = 2,
+ LOG_CHUNK1_LO_IDX = 3,
+ LOG_CHUNK1_HI_IDX = 4,
+ LOG_CHUNK1_SZ_IDX = 5,
+ LOG_CHUNK2_LO_IDX = 6,
+ LOG_CHUNK2_HI_IDX = 7,
+ LOG_CHUNK2_SZ_IDX = 8,
+ LOG_ROLLOVER_CNT_IDX = 9,
+ LOG_RESET_CNT_IDX = 10,
+ LOG_NR_IDX
+};
+
+struct pfrt_log_device {
+ int index;
+ struct pfrt_log_info info;
+ struct device *parent_dev;
+ struct miscdevice miscdev;
+};
+
+/* pfrt_guid is the parameter for _DSM method */
+static const guid_t pfrt_log_guid =
+ GUID_INIT(0x75191659, 0x8178, 0x4D9D, 0xB8, 0x8F, 0xAC, 0x5E,
+ 0x5E, 0x93, 0xE8, 0xBF);
+
+static DEFINE_IDA(pfrt_log_ida);
+
+static inline struct pfrt_log_device *to_pfrt_log_dev(struct file *file)
+{
+ return container_of(file->private_data, struct pfrt_log_device, miscdev);
+}
+
+static int get_pfrt_log_data_info(struct pfrt_log_data_info *data_info,
+ struct pfrt_log_device *pfrt_log_dev)
+{
+ acpi_handle handle = ACPI_HANDLE(pfrt_log_dev->parent_dev);
+ union acpi_object *out_obj, in_obj, in_buf;
+ int ret = -EBUSY;
+
+ memset(data_info, 0, sizeof(*data_info));
+ memset(&in_obj, 0, sizeof(in_obj));
+ memset(&in_buf, 0, sizeof(in_buf));
+ in_obj.type = ACPI_TYPE_PACKAGE;
+ in_obj.package.count = 1;
+ in_obj.package.elements = &in_buf;
+ in_buf.type = ACPI_TYPE_INTEGER;
+ in_buf.integer.value = pfrt_log_dev->info.log_type;
+
+ out_obj = acpi_evaluate_dsm_typed(handle, &pfrt_log_guid,
+ pfrt_log_dev->info.log_revid, PFRT_FUNC_GET_DATA,
+ &in_obj, ACPI_TYPE_PACKAGE);
+ if (!out_obj)
+ return -EINVAL;
+
+ if (out_obj->package.count < LOG_NR_IDX ||
+ out_obj->package.elements[LOG_STATUS_IDX].type != ACPI_TYPE_INTEGER ||
+ out_obj->package.elements[LOG_EXT_STATUS_IDX].type != ACPI_TYPE_INTEGER ||
+ out_obj->package.elements[LOG_MAX_SZ_IDX].type != ACPI_TYPE_INTEGER ||
+ out_obj->package.elements[LOG_CHUNK1_LO_IDX].type != ACPI_TYPE_INTEGER ||
+ out_obj->package.elements[LOG_CHUNK1_HI_IDX].type != ACPI_TYPE_INTEGER ||
+ out_obj->package.elements[LOG_CHUNK1_SZ_IDX].type != ACPI_TYPE_INTEGER ||
+ out_obj->package.elements[LOG_CHUNK2_LO_IDX].type != ACPI_TYPE_INTEGER ||
+ out_obj->package.elements[LOG_CHUNK2_HI_IDX].type != ACPI_TYPE_INTEGER ||
+ out_obj->package.elements[LOG_CHUNK2_SZ_IDX].type != ACPI_TYPE_INTEGER ||
+ out_obj->package.elements[LOG_ROLLOVER_CNT_IDX].type != ACPI_TYPE_INTEGER ||
+ out_obj->package.elements[LOG_RESET_CNT_IDX].type != ACPI_TYPE_INTEGER)
+ goto free_acpi_buffer;
+
+ data_info->status = out_obj->package.elements[LOG_STATUS_IDX].integer.value;
+ data_info->ext_status =
+ out_obj->package.elements[LOG_EXT_STATUS_IDX].integer.value;
+ if (data_info->status != DSM_SUCCEED) {
+ dev_dbg(pfrt_log_dev->parent_dev, "Error Status:%d\n", data_info->status);
+ dev_dbg(pfrt_log_dev->parent_dev, "Error Extend Status:%d\n",
+ data_info->ext_status);
+ goto free_acpi_buffer;
+ }
+
+ data_info->max_data_size =
+ out_obj->package.elements[LOG_MAX_SZ_IDX].integer.value;
+ data_info->chunk1_addr_lo =
+ out_obj->package.elements[LOG_CHUNK1_LO_IDX].integer.value;
+ data_info->chunk1_addr_hi =
+ out_obj->package.elements[LOG_CHUNK1_HI_IDX].integer.value;
+ data_info->chunk1_size =
+ out_obj->package.elements[LOG_CHUNK1_SZ_IDX].integer.value;
+ data_info->chunk2_addr_lo =
+ out_obj->package.elements[LOG_CHUNK2_LO_IDX].integer.value;
+ data_info->chunk2_addr_hi =
+ out_obj->package.elements[LOG_CHUNK2_HI_IDX].integer.value;
+ data_info->chunk2_size =
+ out_obj->package.elements[LOG_CHUNK2_SZ_IDX].integer.value;
+ data_info->rollover_cnt =
+ out_obj->package.elements[LOG_ROLLOVER_CNT_IDX].integer.value;
+ data_info->reset_cnt =
+ out_obj->package.elements[LOG_RESET_CNT_IDX].integer.value;
+
+ ret = 0;
+
+free_acpi_buffer:
+ kfree(out_obj);
+
+ return ret;
+}
+
+static int set_pfrt_log_level(int level, struct pfrt_log_device *pfrt_log_dev)
+{
+ acpi_handle handle = ACPI_HANDLE(pfrt_log_dev->parent_dev);
+ union acpi_object *out_obj, *obj, in_obj, in_buf;
+ enum pfru_dsm_status status, ext_status;
+ int ret = 0;
+
+ memset(&in_obj, 0, sizeof(in_obj));
+ memset(&in_buf, 0, sizeof(in_buf));
+ in_obj.type = ACPI_TYPE_PACKAGE;
+ in_obj.package.count = 1;
+ in_obj.package.elements = &in_buf;
+ in_buf.type = ACPI_TYPE_INTEGER;
+ in_buf.integer.value = level;
+
+ out_obj = acpi_evaluate_dsm_typed(handle, &pfrt_log_guid,
+ pfrt_log_dev->info.log_revid, PFRT_FUNC_SET_LEV,
+ &in_obj, ACPI_TYPE_PACKAGE);
+ if (!out_obj)
+ return -EINVAL;
+
+ obj = &out_obj->package.elements[0];
+ status = obj->integer.value;
+ if (status != DSM_SUCCEED) {
+ obj = &out_obj->package.elements[1];
+ ext_status = obj->integer.value;
+ dev_dbg(pfrt_log_dev->parent_dev, "Error Status:%d\n", status);
+ dev_dbg(pfrt_log_dev->parent_dev, "Error Extend Status:%d\n", ext_status);
+ ret = -EBUSY;
+ }
+
+ kfree(out_obj);
+
+ return ret;
+}
+
+static int get_pfrt_log_level(struct pfrt_log_device *pfrt_log_dev)
+{
+ acpi_handle handle = ACPI_HANDLE(pfrt_log_dev->parent_dev);
+ union acpi_object *out_obj, *obj;
+ enum pfru_dsm_status status, ext_status;
+ int ret = -EBUSY;
+
+ out_obj = acpi_evaluate_dsm_typed(handle, &pfrt_log_guid,
+ pfrt_log_dev->info.log_revid, PFRT_FUNC_GET_LEV,
+ NULL, ACPI_TYPE_PACKAGE);
+ if (!out_obj)
+ return -EINVAL;
+
+ obj = &out_obj->package.elements[0];
+ if (obj->type != ACPI_TYPE_INTEGER)
+ goto free_acpi_buffer;
+
+ status = obj->integer.value;
+ if (status != DSM_SUCCEED) {
+ obj = &out_obj->package.elements[1];
+ ext_status = obj->integer.value;
+ dev_dbg(pfrt_log_dev->parent_dev, "Error Status:%d\n", status);
+ dev_dbg(pfrt_log_dev->parent_dev, "Error Extend Status:%d\n", ext_status);
+ goto free_acpi_buffer;
+ }
+
+ obj = &out_obj->package.elements[2];
+ if (obj->type != ACPI_TYPE_INTEGER)
+ goto free_acpi_buffer;
+
+ ret = obj->integer.value;
+
+free_acpi_buffer:
+ kfree(out_obj);
+
+ return ret;
+}
+
+static int valid_log_level(u32 level)
+{
+ return level == PFRT_LOG_ERR || level == PFRT_LOG_WARN ||
+ level == PFRT_LOG_INFO || level == PFRT_LOG_VERB;
+}
+
+static int valid_log_type(u32 type)
+{
+ return type == PFRT_LOG_EXEC_IDX || type == PFRT_LOG_HISTORY_IDX;
+}
+
+static inline int valid_log_revid(u32 id)
+{
+ return id == PFRT_REVID_1 || id == PFRT_REVID_2;
+}
+
+static long pfrt_log_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct pfrt_log_device *pfrt_log_dev = to_pfrt_log_dev(file);
+ struct pfrt_log_data_info data_info;
+ struct pfrt_log_info info;
+ void __user *p;
+ int ret = 0;
+
+ p = (void __user *)arg;
+
+ switch (cmd) {
+ case PFRT_LOG_IOC_SET_INFO:
+ if (copy_from_user(&info, p, sizeof(info)))
+ return -EFAULT;
+
+ if (valid_log_revid(info.log_revid))
+ pfrt_log_dev->info.log_revid = info.log_revid;
+
+ if (valid_log_level(info.log_level)) {
+ ret = set_pfrt_log_level(info.log_level, pfrt_log_dev);
+ if (ret < 0)
+ return ret;
+
+ pfrt_log_dev->info.log_level = info.log_level;
+ }
+
+ if (valid_log_type(info.log_type))
+ pfrt_log_dev->info.log_type = info.log_type;
+
+ return 0;
+
+ case PFRT_LOG_IOC_GET_INFO:
+ info.log_level = get_pfrt_log_level(pfrt_log_dev);
+ if (ret < 0)
+ return ret;
+
+ info.log_type = pfrt_log_dev->info.log_type;
+ info.log_revid = pfrt_log_dev->info.log_revid;
+ if (copy_to_user(p, &info, sizeof(info)))
+ return -EFAULT;
+
+ return 0;
+
+ case PFRT_LOG_IOC_GET_DATA_INFO:
+ ret = get_pfrt_log_data_info(&data_info, pfrt_log_dev);
+ if (ret)
+ return ret;
+
+ if (copy_to_user(p, &data_info, sizeof(struct pfrt_log_data_info)))
+ return -EFAULT;
+
+ return 0;
+
+ default:
+ return -ENOTTY;
+ }
+}
+
+static int
+pfrt_log_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct pfrt_log_device *pfrt_log_dev;
+ struct pfrt_log_data_info info;
+ unsigned long psize, vsize;
+ phys_addr_t base_addr;
+ int ret;
+
+ if (vma->vm_flags & VM_WRITE)
+ return -EROFS;
+
+ /* changing from read to write with mprotect is not allowed */
+ vma->vm_flags &= ~VM_MAYWRITE;
+
+ pfrt_log_dev = to_pfrt_log_dev(file);
+
+ ret = get_pfrt_log_data_info(&info, pfrt_log_dev);
+ if (ret)
+ return ret;
+
+ base_addr = (phys_addr_t)((info.chunk2_addr_hi << 32) | info.chunk2_addr_lo);
+ /* pfrt update has not been launched yet */
+ if (!base_addr)
+ return -ENODEV;
+
+ psize = info.max_data_size;
+ /* base address and total buffer size must be page aligned */
+ if (!PAGE_ALIGNED(base_addr) || !PAGE_ALIGNED(psize))
+ return -ENODEV;
+
+ vsize = vma->vm_end - vma->vm_start;
+ if (vsize > psize)
+ return -EINVAL;
+
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ if (io_remap_pfn_range(vma, vma->vm_start, PFN_DOWN(base_addr),
+ vsize, vma->vm_page_prot))
+ return -EAGAIN;
+
+ return 0;
+}
+
+static const struct file_operations acpi_pfrt_log_fops = {
+ .owner = THIS_MODULE,
+ .mmap = pfrt_log_mmap,
+ .unlocked_ioctl = pfrt_log_ioctl,
+ .llseek = noop_llseek,
+};
+
+static int acpi_pfrt_log_remove(struct platform_device *pdev)
+{
+ struct pfrt_log_device *pfrt_log_dev = platform_get_drvdata(pdev);
+
+ misc_deregister(&pfrt_log_dev->miscdev);
+
+ return 0;
+}
+
+static void pfrt_log_put_idx(void *data)
+{
+ struct pfrt_log_device *pfrt_log_dev = data;
+
+ ida_free(&pfrt_log_ida, pfrt_log_dev->index);
+}
+
+static int acpi_pfrt_log_probe(struct platform_device *pdev)
+{
+ acpi_handle handle = ACPI_HANDLE(&pdev->dev);
+ struct pfrt_log_device *pfrt_log_dev;
+ int ret;
+
+ if (!acpi_has_method(handle, "_DSM")) {
+ dev_dbg(&pdev->dev, "Missing _DSM\n");
+ return -ENODEV;
+ }
+
+ pfrt_log_dev = devm_kzalloc(&pdev->dev, sizeof(*pfrt_log_dev), GFP_KERNEL);
+ if (!pfrt_log_dev)
+ return -ENOMEM;
+
+ ret = ida_alloc(&pfrt_log_ida, GFP_KERNEL);
+ if (ret < 0)
+ return ret;
+
+ pfrt_log_dev->index = ret;
+ ret = devm_add_action_or_reset(&pdev->dev, pfrt_log_put_idx, pfrt_log_dev);
+ if (ret)
+ return ret;
+
+ pfrt_log_dev->info.log_revid = PFRT_DEFAULT_REV_ID;
+ pfrt_log_dev->parent_dev = &pdev->dev;
+
+ pfrt_log_dev->miscdev.minor = MISC_DYNAMIC_MINOR;
+ pfrt_log_dev->miscdev.name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
+ "pfrt%d",
+ pfrt_log_dev->index);
+ if (!pfrt_log_dev->miscdev.name)
+ return -ENOMEM;
+
+ pfrt_log_dev->miscdev.nodename = devm_kasprintf(&pdev->dev, GFP_KERNEL,
+ "acpi_pfr_telemetry%d",
+ pfrt_log_dev->index);
+ if (!pfrt_log_dev->miscdev.nodename)
+ return -ENOMEM;
+
+ pfrt_log_dev->miscdev.fops = &acpi_pfrt_log_fops;
+ pfrt_log_dev->miscdev.parent = &pdev->dev;
+
+ ret = misc_register(&pfrt_log_dev->miscdev);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, pfrt_log_dev);
+
+ return 0;
+}
+
+static const struct acpi_device_id acpi_pfrt_log_ids[] = {
+ {"INTC1081"},
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, acpi_pfrt_log_ids);
+
+static struct platform_driver acpi_pfrt_log_driver = {
+ .driver = {
+ .name = "pfr_telemetry",
+ .acpi_match_table = acpi_pfrt_log_ids,
+ },
+ .probe = acpi_pfrt_log_probe,
+ .remove = acpi_pfrt_log_remove,
+};
+module_platform_driver(acpi_pfrt_log_driver);
+
+MODULE_DESCRIPTION("Platform Firmware Runtime Update Telemetry driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/acpi/pfr_update.c b/drivers/acpi/pfr_update.c
new file mode 100644
index 000000000000..6bb0b778b5da
--- /dev/null
+++ b/drivers/acpi/pfr_update.c
@@ -0,0 +1,575 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ACPI Platform Firmware Runtime Update Device driver
+ *
+ * Copyright (C) 2021 Intel Corporation
+ * Author: Chen Yu <yu.c.chen@intel.com>
+ *
+ * pfr_update driver is used for Platform Firmware Runtime
+ * Update, which includes the code injection and driver update.
+ */
+#include <linux/acpi.h>
+#include <linux/device.h>
+#include <linux/efi.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/idr.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/string.h>
+#include <linux/uaccess.h>
+#include <linux/uio.h>
+#include <linux/uuid.h>
+
+#include <uapi/linux/pfrut.h>
+
+#define PFRU_FUNC_STANDARD_QUERY 0
+#define PFRU_FUNC_QUERY_UPDATE_CAP 1
+#define PFRU_FUNC_QUERY_BUF 2
+#define PFRU_FUNC_START 3
+
+#define PFRU_CODE_INJECT_TYPE 1
+#define PFRU_DRIVER_UPDATE_TYPE 2
+
+#define PFRU_REVID_1 1
+#define PFRU_REVID_2 2
+#define PFRU_DEFAULT_REV_ID PFRU_REVID_1
+
+enum cap_index {
+ CAP_STATUS_IDX = 0,
+ CAP_UPDATE_IDX = 1,
+ CAP_CODE_TYPE_IDX = 2,
+ CAP_FW_VER_IDX = 3,
+ CAP_CODE_RT_VER_IDX = 4,
+ CAP_DRV_TYPE_IDX = 5,
+ CAP_DRV_RT_VER_IDX = 6,
+ CAP_DRV_SVN_IDX = 7,
+ CAP_PLAT_ID_IDX = 8,
+ CAP_OEM_ID_IDX = 9,
+ CAP_OEM_INFO_IDX = 10,
+ CAP_NR_IDX
+};
+
+enum buf_index {
+ BUF_STATUS_IDX = 0,
+ BUF_EXT_STATUS_IDX = 1,
+ BUF_ADDR_LOW_IDX = 2,
+ BUF_ADDR_HI_IDX = 3,
+ BUF_SIZE_IDX = 4,
+ BUF_NR_IDX
+};
+
+enum update_index {
+ UPDATE_STATUS_IDX = 0,
+ UPDATE_EXT_STATUS_IDX = 1,
+ UPDATE_AUTH_TIME_LOW_IDX = 2,
+ UPDATE_AUTH_TIME_HI_IDX = 3,
+ UPDATE_EXEC_TIME_LOW_IDX = 4,
+ UPDATE_EXEC_TIME_HI_IDX = 5,
+ UPDATE_NR_IDX
+};
+
+enum pfru_start_action {
+ START_STAGE = 0,
+ START_ACTIVATE = 1,
+ START_STAGE_ACTIVATE = 2,
+};
+
+struct pfru_device {
+ u32 rev_id, index;
+ struct device *parent_dev;
+ struct miscdevice miscdev;
+};
+
+static DEFINE_IDA(pfru_ida);
+
+/*
+ * Manual reference:
+ * https://uefi.org/sites/default/files/resources/Intel_MM_OS_Interface_Spec_Rev100.pdf
+ *
+ * pfru_guid is the parameter for _DSM method
+ */
+static const guid_t pfru_guid =
+ GUID_INIT(0xECF9533B, 0x4A3C, 0x4E89, 0x93, 0x9E, 0xC7, 0x71,
+ 0x12, 0x60, 0x1C, 0x6D);
+
+/* pfru_code_inj_guid is the UUID to identify code injection EFI capsule file */
+static const guid_t pfru_code_inj_guid =
+ GUID_INIT(0xB2F84B79, 0x7B6E, 0x4E45, 0x88, 0x5F, 0x3F, 0xB9,
+ 0xBB, 0x18, 0x54, 0x02);
+
+/* pfru_drv_update_guid is the UUID to identify driver update EFI capsule file */
+static const guid_t pfru_drv_update_guid =
+ GUID_INIT(0x4569DD8C, 0x75F1, 0x429A, 0xA3, 0xD6, 0x24, 0xDE,
+ 0x80, 0x97, 0xA0, 0xDF);
+
+static inline int pfru_valid_revid(u32 id)
+{
+ return id == PFRU_REVID_1 || id == PFRU_REVID_2;
+}
+
+static inline struct pfru_device *to_pfru_dev(struct file *file)
+{
+ return container_of(file->private_data, struct pfru_device, miscdev);
+}
+
+static int query_capability(struct pfru_update_cap_info *cap_hdr,
+ struct pfru_device *pfru_dev)
+{
+ acpi_handle handle = ACPI_HANDLE(pfru_dev->parent_dev);
+ union acpi_object *out_obj;
+ int ret = -EINVAL;
+
+ out_obj = acpi_evaluate_dsm_typed(handle, &pfru_guid,
+ pfru_dev->rev_id,
+ PFRU_FUNC_QUERY_UPDATE_CAP,
+ NULL, ACPI_TYPE_PACKAGE);
+ if (!out_obj)
+ return ret;
+
+ if (out_obj->package.count < CAP_NR_IDX ||
+ out_obj->package.elements[CAP_STATUS_IDX].type != ACPI_TYPE_INTEGER ||
+ out_obj->package.elements[CAP_UPDATE_IDX].type != ACPI_TYPE_INTEGER ||
+ out_obj->package.elements[CAP_CODE_TYPE_IDX].type != ACPI_TYPE_BUFFER ||
+ out_obj->package.elements[CAP_FW_VER_IDX].type != ACPI_TYPE_INTEGER ||
+ out_obj->package.elements[CAP_CODE_RT_VER_IDX].type != ACPI_TYPE_INTEGER ||
+ out_obj->package.elements[CAP_DRV_TYPE_IDX].type != ACPI_TYPE_BUFFER ||
+ out_obj->package.elements[CAP_DRV_RT_VER_IDX].type != ACPI_TYPE_INTEGER ||
+ out_obj->package.elements[CAP_DRV_SVN_IDX].type != ACPI_TYPE_INTEGER ||
+ out_obj->package.elements[CAP_PLAT_ID_IDX].type != ACPI_TYPE_BUFFER ||
+ out_obj->package.elements[CAP_OEM_ID_IDX].type != ACPI_TYPE_BUFFER ||
+ out_obj->package.elements[CAP_OEM_INFO_IDX].type != ACPI_TYPE_BUFFER)
+ goto free_acpi_buffer;
+
+ cap_hdr->status = out_obj->package.elements[CAP_STATUS_IDX].integer.value;
+ if (cap_hdr->status != DSM_SUCCEED) {
+ ret = -EBUSY;
+ dev_dbg(pfru_dev->parent_dev, "Error Status:%d\n", cap_hdr->status);
+ goto free_acpi_buffer;
+ }
+
+ cap_hdr->update_cap = out_obj->package.elements[CAP_UPDATE_IDX].integer.value;
+ memcpy(&cap_hdr->code_type,
+ out_obj->package.elements[CAP_CODE_TYPE_IDX].buffer.pointer,
+ out_obj->package.elements[CAP_CODE_TYPE_IDX].buffer.length);
+ cap_hdr->fw_version =
+ out_obj->package.elements[CAP_FW_VER_IDX].integer.value;
+ cap_hdr->code_rt_version =
+ out_obj->package.elements[CAP_CODE_RT_VER_IDX].integer.value;
+ memcpy(&cap_hdr->drv_type,
+ out_obj->package.elements[CAP_DRV_TYPE_IDX].buffer.pointer,
+ out_obj->package.elements[CAP_DRV_TYPE_IDX].buffer.length);
+ cap_hdr->drv_rt_version =
+ out_obj->package.elements[CAP_DRV_RT_VER_IDX].integer.value;
+ cap_hdr->drv_svn =
+ out_obj->package.elements[CAP_DRV_SVN_IDX].integer.value;
+ memcpy(&cap_hdr->platform_id,
+ out_obj->package.elements[CAP_PLAT_ID_IDX].buffer.pointer,
+ out_obj->package.elements[CAP_PLAT_ID_IDX].buffer.length);
+ memcpy(&cap_hdr->oem_id,
+ out_obj->package.elements[CAP_OEM_ID_IDX].buffer.pointer,
+ out_obj->package.elements[CAP_OEM_ID_IDX].buffer.length);
+ cap_hdr->oem_info_len =
+ out_obj->package.elements[CAP_OEM_INFO_IDX].buffer.length;
+
+ ret = 0;
+
+free_acpi_buffer:
+ kfree(out_obj);
+
+ return ret;
+}
+
+static int query_buffer(struct pfru_com_buf_info *info,
+ struct pfru_device *pfru_dev)
+{
+ acpi_handle handle = ACPI_HANDLE(pfru_dev->parent_dev);
+ union acpi_object *out_obj;
+ int ret = -EINVAL;
+
+ out_obj = acpi_evaluate_dsm_typed(handle, &pfru_guid,
+ pfru_dev->rev_id, PFRU_FUNC_QUERY_BUF,
+ NULL, ACPI_TYPE_PACKAGE);
+ if (!out_obj)
+ return ret;
+
+ if (out_obj->package.count < BUF_NR_IDX ||
+ out_obj->package.elements[BUF_STATUS_IDX].type != ACPI_TYPE_INTEGER ||
+ out_obj->package.elements[BUF_EXT_STATUS_IDX].type != ACPI_TYPE_INTEGER ||
+ out_obj->package.elements[BUF_ADDR_LOW_IDX].type != ACPI_TYPE_INTEGER ||
+ out_obj->package.elements[BUF_ADDR_HI_IDX].type != ACPI_TYPE_INTEGER ||
+ out_obj->package.elements[BUF_SIZE_IDX].type != ACPI_TYPE_INTEGER)
+ goto free_acpi_buffer;
+
+ info->status = out_obj->package.elements[BUF_STATUS_IDX].integer.value;
+ info->ext_status =
+ out_obj->package.elements[BUF_EXT_STATUS_IDX].integer.value;
+ if (info->status != DSM_SUCCEED) {
+ ret = -EBUSY;
+ dev_dbg(pfru_dev->parent_dev, "Error Status:%d\n", info->status);
+ dev_dbg(pfru_dev->parent_dev, "Error Extended Status:%d\n", info->ext_status);
+
+ goto free_acpi_buffer;
+ }
+
+ info->addr_lo =
+ out_obj->package.elements[BUF_ADDR_LOW_IDX].integer.value;
+ info->addr_hi =
+ out_obj->package.elements[BUF_ADDR_HI_IDX].integer.value;
+ info->buf_size = out_obj->package.elements[BUF_SIZE_IDX].integer.value;
+
+ ret = 0;
+
+free_acpi_buffer:
+ kfree(out_obj);
+
+ return ret;
+}
+
+static int get_image_type(const struct efi_manage_capsule_image_header *img_hdr,
+ struct pfru_device *pfru_dev)
+{
+ const efi_guid_t *image_type_id = &img_hdr->image_type_id;
+
+ /* check whether this is a code injection or driver update */
+ if (guid_equal(image_type_id, &pfru_code_inj_guid))
+ return PFRU_CODE_INJECT_TYPE;
+
+ if (guid_equal(image_type_id, &pfru_drv_update_guid))
+ return PFRU_DRIVER_UPDATE_TYPE;
+
+ return -EINVAL;
+}
+
+static int adjust_efi_size(const struct efi_manage_capsule_image_header *img_hdr,
+ int size)
+{
+ /*
+ * The (u64 hw_ins) was introduced in UEFI spec version 2,
+ * and (u64 capsule_support) was introduced in version 3.
+ * The size needs to be adjusted accordingly. That is to
+ * say, version 1 should subtract the size of hw_ins+capsule_support,
+ * and version 2 should sbstract the size of capsule_support.
+ */
+ size += sizeof(struct efi_manage_capsule_image_header);
+ switch (img_hdr->ver) {
+ case 1:
+ return size - 2 * sizeof(u64);
+
+ case 2:
+ return size - sizeof(u64);
+
+ default:
+ /* only support version 1 and 2 */
+ return -EINVAL;
+ }
+}
+
+static bool applicable_image(const void *data, struct pfru_update_cap_info *cap,
+ struct pfru_device *pfru_dev)
+{
+ struct pfru_payload_hdr *payload_hdr;
+ const efi_capsule_header_t *cap_hdr = data;
+ const struct efi_manage_capsule_header *m_hdr;
+ const struct efi_manage_capsule_image_header *m_img_hdr;
+ const struct efi_image_auth *auth;
+ int type, size;
+
+ /*
+ * If the code in the capsule is older than the current
+ * firmware code, the update will be rejected by the firmware,
+ * so check the version of it upfront without engaging the
+ * Management Mode update mechanism which may be costly.
+ */
+ size = cap_hdr->headersize;
+ m_hdr = data + size;
+ /*
+ * Current data structure size plus variable array indicated
+ * by number of (emb_drv_cnt + payload_cnt)
+ */
+ size += offsetof(struct efi_manage_capsule_header, offset_list) +
+ (m_hdr->emb_drv_cnt + m_hdr->payload_cnt) * sizeof(u64);
+ m_img_hdr = data + size;
+
+ type = get_image_type(m_img_hdr, pfru_dev);
+ if (type < 0)
+ return false;
+
+ size = adjust_efi_size(m_img_hdr, size);
+ if (size < 0)
+ return false;
+
+ auth = data + size;
+ size += sizeof(u64) + auth->auth_info.hdr.len;
+ payload_hdr = (struct pfru_payload_hdr *)(data + size);
+
+ /* finally compare the version */
+ if (type == PFRU_CODE_INJECT_TYPE)
+ return payload_hdr->rt_ver >= cap->code_rt_version;
+
+ return payload_hdr->rt_ver >= cap->drv_rt_version;
+}
+
+static void print_update_debug_info(struct pfru_updated_result *result,
+ struct pfru_device *pfru_dev)
+{
+ dev_dbg(pfru_dev->parent_dev, "Update result:\n");
+ dev_dbg(pfru_dev->parent_dev, "Authentication Time Low:%lld\n",
+ result->low_auth_time);
+ dev_dbg(pfru_dev->parent_dev, "Authentication Time High:%lld\n",
+ result->high_auth_time);
+ dev_dbg(pfru_dev->parent_dev, "Execution Time Low:%lld\n",
+ result->low_exec_time);
+ dev_dbg(pfru_dev->parent_dev, "Execution Time High:%lld\n",
+ result->high_exec_time);
+}
+
+static int start_update(int action, struct pfru_device *pfru_dev)
+{
+ union acpi_object *out_obj, in_obj, in_buf;
+ struct pfru_updated_result update_result;
+ acpi_handle handle;
+ int ret = -EINVAL;
+
+ memset(&in_obj, 0, sizeof(in_obj));
+ memset(&in_buf, 0, sizeof(in_buf));
+ in_obj.type = ACPI_TYPE_PACKAGE;
+ in_obj.package.count = 1;
+ in_obj.package.elements = &in_buf;
+ in_buf.type = ACPI_TYPE_INTEGER;
+ in_buf.integer.value = action;
+
+ handle = ACPI_HANDLE(pfru_dev->parent_dev);
+ out_obj = acpi_evaluate_dsm_typed(handle, &pfru_guid,
+ pfru_dev->rev_id, PFRU_FUNC_START,
+ &in_obj, ACPI_TYPE_PACKAGE);
+ if (!out_obj)
+ return ret;
+
+ if (out_obj->package.count < UPDATE_NR_IDX ||
+ out_obj->package.elements[UPDATE_STATUS_IDX].type != ACPI_TYPE_INTEGER ||
+ out_obj->package.elements[UPDATE_EXT_STATUS_IDX].type != ACPI_TYPE_INTEGER ||
+ out_obj->package.elements[UPDATE_AUTH_TIME_LOW_IDX].type != ACPI_TYPE_INTEGER ||
+ out_obj->package.elements[UPDATE_AUTH_TIME_HI_IDX].type != ACPI_TYPE_INTEGER ||
+ out_obj->package.elements[UPDATE_EXEC_TIME_LOW_IDX].type != ACPI_TYPE_INTEGER ||
+ out_obj->package.elements[UPDATE_EXEC_TIME_HI_IDX].type != ACPI_TYPE_INTEGER)
+ goto free_acpi_buffer;
+
+ update_result.status =
+ out_obj->package.elements[UPDATE_STATUS_IDX].integer.value;
+ update_result.ext_status =
+ out_obj->package.elements[UPDATE_EXT_STATUS_IDX].integer.value;
+
+ if (update_result.status != DSM_SUCCEED) {
+ ret = -EBUSY;
+ dev_dbg(pfru_dev->parent_dev, "Error Status:%d\n", update_result.status);
+ dev_dbg(pfru_dev->parent_dev, "Error Extended Status:%d\n",
+ update_result.ext_status);
+
+ goto free_acpi_buffer;
+ }
+
+ update_result.low_auth_time =
+ out_obj->package.elements[UPDATE_AUTH_TIME_LOW_IDX].integer.value;
+ update_result.high_auth_time =
+ out_obj->package.elements[UPDATE_AUTH_TIME_HI_IDX].integer.value;
+ update_result.low_exec_time =
+ out_obj->package.elements[UPDATE_EXEC_TIME_LOW_IDX].integer.value;
+ update_result.high_exec_time =
+ out_obj->package.elements[UPDATE_EXEC_TIME_HI_IDX].integer.value;
+
+ print_update_debug_info(&update_result, pfru_dev);
+ ret = 0;
+
+free_acpi_buffer:
+ kfree(out_obj);
+
+ return ret;
+}
+
+static long pfru_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct pfru_update_cap_info cap_hdr;
+ struct pfru_device *pfru_dev = to_pfru_dev(file);
+ void __user *p = (void __user *)arg;
+ u32 rev;
+ int ret;
+
+ switch (cmd) {
+ case PFRU_IOC_QUERY_CAP:
+ ret = query_capability(&cap_hdr, pfru_dev);
+ if (ret)
+ return ret;
+
+ if (copy_to_user(p, &cap_hdr, sizeof(cap_hdr)))
+ return -EFAULT;
+
+ return 0;
+
+ case PFRU_IOC_SET_REV:
+ if (copy_from_user(&rev, p, sizeof(rev)))
+ return -EFAULT;
+
+ if (!pfru_valid_revid(rev))
+ return -EINVAL;
+
+ pfru_dev->rev_id = rev;
+
+ return 0;
+
+ case PFRU_IOC_STAGE:
+ return start_update(START_STAGE, pfru_dev);
+
+ case PFRU_IOC_ACTIVATE:
+ return start_update(START_ACTIVATE, pfru_dev);
+
+ case PFRU_IOC_STAGE_ACTIVATE:
+ return start_update(START_STAGE_ACTIVATE, pfru_dev);
+
+ default:
+ return -ENOTTY;
+ }
+}
+
+static ssize_t pfru_write(struct file *file, const char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ struct pfru_device *pfru_dev = to_pfru_dev(file);
+ struct pfru_update_cap_info cap;
+ struct pfru_com_buf_info buf_info;
+ phys_addr_t phy_addr;
+ struct iov_iter iter;
+ struct iovec iov;
+ char *buf_ptr;
+ int ret;
+
+ ret = query_buffer(&buf_info, pfru_dev);
+ if (ret)
+ return ret;
+
+ if (len > buf_info.buf_size)
+ return -EINVAL;
+
+ iov.iov_base = (void __user *)buf;
+ iov.iov_len = len;
+ iov_iter_init(&iter, WRITE, &iov, 1, len);
+
+ /* map the communication buffer */
+ phy_addr = (phys_addr_t)((buf_info.addr_hi << 32) | buf_info.addr_lo);
+ buf_ptr = memremap(phy_addr, buf_info.buf_size, MEMREMAP_WB);
+ if (!buf_ptr)
+ return -ENOMEM;
+
+ if (!copy_from_iter_full(buf_ptr, len, &iter)) {
+ ret = -EINVAL;
+ goto unmap;
+ }
+
+ /* check if the capsule header has a valid version number */
+ ret = query_capability(&cap, pfru_dev);
+ if (ret)
+ goto unmap;
+
+ if (!applicable_image(buf_ptr, &cap, pfru_dev))
+ ret = -EINVAL;
+
+unmap:
+ memunmap(buf_ptr);
+
+ return ret ?: len;
+}
+
+static const struct file_operations acpi_pfru_fops = {
+ .owner = THIS_MODULE,
+ .write = pfru_write,
+ .unlocked_ioctl = pfru_ioctl,
+ .llseek = noop_llseek,
+};
+
+static int acpi_pfru_remove(struct platform_device *pdev)
+{
+ struct pfru_device *pfru_dev = platform_get_drvdata(pdev);
+
+ misc_deregister(&pfru_dev->miscdev);
+
+ return 0;
+}
+
+static void pfru_put_idx(void *data)
+{
+ struct pfru_device *pfru_dev = data;
+
+ ida_free(&pfru_ida, pfru_dev->index);
+}
+
+static int acpi_pfru_probe(struct platform_device *pdev)
+{
+ acpi_handle handle = ACPI_HANDLE(&pdev->dev);
+ struct pfru_device *pfru_dev;
+ int ret;
+
+ if (!acpi_has_method(handle, "_DSM")) {
+ dev_dbg(&pdev->dev, "Missing _DSM\n");
+ return -ENODEV;
+ }
+
+ pfru_dev = devm_kzalloc(&pdev->dev, sizeof(*pfru_dev), GFP_KERNEL);
+ if (!pfru_dev)
+ return -ENOMEM;
+
+ ret = ida_alloc(&pfru_ida, GFP_KERNEL);
+ if (ret < 0)
+ return ret;
+
+ pfru_dev->index = ret;
+ ret = devm_add_action_or_reset(&pdev->dev, pfru_put_idx, pfru_dev);
+ if (ret)
+ return ret;
+
+ pfru_dev->rev_id = PFRU_DEFAULT_REV_ID;
+ pfru_dev->parent_dev = &pdev->dev;
+
+ pfru_dev->miscdev.minor = MISC_DYNAMIC_MINOR;
+ pfru_dev->miscdev.name = devm_kasprintf(&pdev->dev, GFP_KERNEL,
+ "pfru%d", pfru_dev->index);
+ if (!pfru_dev->miscdev.name)
+ return -ENOMEM;
+
+ pfru_dev->miscdev.nodename = devm_kasprintf(&pdev->dev, GFP_KERNEL,
+ "acpi_pfr_update%d", pfru_dev->index);
+ if (!pfru_dev->miscdev.nodename)
+ return -ENOMEM;
+
+ pfru_dev->miscdev.fops = &acpi_pfru_fops;
+ pfru_dev->miscdev.parent = &pdev->dev;
+
+ ret = misc_register(&pfru_dev->miscdev);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, pfru_dev);
+
+ return 0;
+}
+
+static const struct acpi_device_id acpi_pfru_ids[] = {
+ {"INTC1080"},
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, acpi_pfru_ids);
+
+static struct platform_driver acpi_pfru_driver = {
+ .driver = {
+ .name = "pfr_update",
+ .acpi_match_table = acpi_pfru_ids,
+ },
+ .probe = acpi_pfru_probe,
+ .remove = acpi_pfru_remove,
+};
+module_platform_driver(acpi_pfru_driver);
+
+MODULE_DESCRIPTION("Platform Firmware Runtime Update device driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
index 0cca7991f186..4322f2da6d10 100644
--- a/drivers/acpi/proc.c
+++ b/drivers/acpi/proc.c
@@ -127,7 +127,7 @@ static int
acpi_system_wakeup_device_open_fs(struct inode *inode, struct file *file)
{
return single_open(file, acpi_system_wakeup_device_seq_show,
- PDE_DATA(inode));
+ pde_data(inode));
}
static const struct proc_ops acpi_system_wakeup_device_proc_ops = {
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 86560a28751b..f8e9fa82cb9b 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -96,6 +96,11 @@ static const struct dmi_system_id processor_power_dmi_table[] = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
DMI_MATCH(DMI_PRODUCT_NAME,"L8400B series Notebook PC")},
(void *)1},
+ /* T40 can not handle C3 idle state */
+ { set_max_cstate, "IBM ThinkPad T40", {
+ DMI_MATCH(DMI_SYS_VENDOR, "IBM"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "23737CU")},
+ (void *)2},
{},
};
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 1185ecea59d1..1331756d4cfc 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -19,6 +19,7 @@
#include <linux/dma-map-ops.h>
#include <linux/platform_data/x86/apple.h>
#include <linux/pgtable.h>
+#include <linux/crc32.h>
#include "internal.h"
@@ -667,6 +668,19 @@ static int acpi_tie_acpi_dev(struct acpi_device *adev)
return 0;
}
+static void acpi_store_pld_crc(struct acpi_device *adev)
+{
+ struct acpi_pld_info *pld;
+ acpi_status status;
+
+ status = acpi_get_physical_device_location(adev->handle, &pld);
+ if (ACPI_FAILURE(status))
+ return;
+
+ adev->pld_crc = crc32(~0, pld, sizeof(*pld));
+ ACPI_FREE(pld);
+}
+
static int __acpi_device_add(struct acpi_device *device,
void (*release)(struct device *))
{
@@ -725,6 +739,8 @@ static int __acpi_device_add(struct acpi_device *device,
if (device->wakeup.flags.valid)
list_add_tail(&device->wakeup_list, &acpi_wakeup_device_list);
+ acpi_store_pld_crc(device);
+
mutex_unlock(&acpi_device_lock);
if (device->parent)
@@ -2486,42 +2502,33 @@ int acpi_bus_register_early_device(int type)
}
EXPORT_SYMBOL_GPL(acpi_bus_register_early_device);
-static int acpi_bus_scan_fixed(void)
+static void acpi_bus_scan_fixed(void)
{
- int result = 0;
-
- /*
- * Enumerate all fixed-feature devices.
- */
if (!(acpi_gbl_FADT.flags & ACPI_FADT_POWER_BUTTON)) {
- struct acpi_device *device = NULL;
-
- result = acpi_add_single_object(&device, NULL,
- ACPI_BUS_TYPE_POWER_BUTTON, false);
- if (result)
- return result;
-
- device->flags.match_driver = true;
- result = device_attach(&device->dev);
- if (result < 0)
- return result;
-
- device_init_wakeup(&device->dev, true);
+ struct acpi_device *adev = NULL;
+
+ acpi_add_single_object(&adev, NULL, ACPI_BUS_TYPE_POWER_BUTTON,
+ false);
+ if (adev) {
+ adev->flags.match_driver = true;
+ if (device_attach(&adev->dev) >= 0)
+ device_init_wakeup(&adev->dev, true);
+ else
+ dev_dbg(&adev->dev, "No driver\n");
+ }
}
if (!(acpi_gbl_FADT.flags & ACPI_FADT_SLEEP_BUTTON)) {
- struct acpi_device *device = NULL;
-
- result = acpi_add_single_object(&device, NULL,
- ACPI_BUS_TYPE_SLEEP_BUTTON, false);
- if (result)
- return result;
-
- device->flags.match_driver = true;
- result = device_attach(&device->dev);
+ struct acpi_device *adev = NULL;
+
+ acpi_add_single_object(&adev, NULL, ACPI_BUS_TYPE_SLEEP_BUTTON,
+ false);
+ if (adev) {
+ adev->flags.match_driver = true;
+ if (device_attach(&adev->dev) < 0)
+ dev_dbg(&adev->dev, "No driver\n");
+ }
}
-
- return result < 0 ? result : 0;
}
static void __init acpi_get_spcr_uart_addr(void)
@@ -2542,9 +2549,8 @@ static void __init acpi_get_spcr_uart_addr(void)
static bool acpi_scan_initialized;
-int __init acpi_scan_init(void)
+void __init acpi_scan_init(void)
{
- int result;
acpi_status status;
struct acpi_table_stao *stao_ptr;
@@ -2594,33 +2600,23 @@ int __init acpi_scan_init(void)
/*
* Enumerate devices in the ACPI namespace.
*/
- result = acpi_bus_scan(ACPI_ROOT_OBJECT);
- if (result)
- goto out;
+ if (acpi_bus_scan(ACPI_ROOT_OBJECT))
+ goto unlock;
acpi_root = acpi_fetch_acpi_dev(ACPI_ROOT_OBJECT);
if (!acpi_root)
- goto out;
+ goto unlock;
/* Fixed feature devices do not exist on HW-reduced platform */
- if (!acpi_gbl_reduced_hardware) {
- result = acpi_bus_scan_fixed();
- if (result) {
- acpi_detach_data(acpi_root->handle,
- acpi_scan_drop_device);
- acpi_device_del(acpi_root);
- acpi_bus_put_acpi_device(acpi_root);
- goto out;
- }
- }
+ if (!acpi_gbl_reduced_hardware)
+ acpi_bus_scan_fixed();
acpi_turn_off_unused_power_resources();
acpi_scan_initialized = true;
- out:
+unlock:
mutex_unlock(&acpi_scan_lock);
- return result;
}
static struct acpi_probe_entry *ape;
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index a60ff5dfed3a..d4fbea91ab6b 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -736,21 +736,15 @@ bool acpi_s2idle_wake(void)
return true;
}
- /* Check non-EC GPE wakeups and dispatch the EC GPE. */
+ /*
+ * Check non-EC GPE wakeups and if there are none, cancel the
+ * SCI-related wakeup and dispatch the EC GPE.
+ */
if (acpi_ec_dispatch_gpe()) {
pm_pr_dbg("ACPI non-EC GPE wakeup\n");
return true;
}
- /*
- * Cancel the SCI wakeup and process all pending events in case
- * there are any wakeup ones in there.
- *
- * Note that if any non-EC GPEs are active at this point, the
- * SCI will retrigger after the rearming below, so no events
- * should be missed by canceling the wakeup here.
- */
- pm_system_cancel_wakeup();
acpi_os_wait_events_complete();
/*
@@ -764,6 +758,7 @@ bool acpi_s2idle_wake(void)
return true;
}
+ pm_wakeup_clear(acpi_sci_irq);
rearm_wake_irq(acpi_sci_irq);
}
diff --git a/drivers/acpi/spcr.c b/drivers/acpi/spcr.c
index 25c2d0be953e..d589543875b8 100644
--- a/drivers/acpi/spcr.c
+++ b/drivers/acpi/spcr.c
@@ -107,8 +107,13 @@ int __init acpi_parse_spcr(bool enable_earlycon, bool enable_console)
pr_info("SPCR table version %d\n", table->header.revision);
if (table->serial_port.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
- switch (ACPI_ACCESS_BIT_WIDTH((
- table->serial_port.access_width))) {
+ u32 bit_width = table->serial_port.access_width;
+
+ if (bit_width > ACPI_ACCESS_BIT_MAX) {
+ pr_err("Unacceptable wide SPCR Access Width. Defaulting to byte size\n");
+ bit_width = ACPI_ACCESS_BIT_DEFAULT;
+ }
+ switch (ACPI_ACCESS_BIT_WIDTH((bit_width))) {
default:
pr_err("Unexpected SPCR Access Width. Defaulting to byte size\n");
fallthrough;
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index 682a3ea9cb40..34600b5b9d8e 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -35,12 +35,13 @@ static char *mps_inti_flags_trigger[] = { "dfl", "edge", "res", "level" };
static struct acpi_table_desc initial_tables[ACPI_MAX_TABLES] __initdata;
-static int acpi_apic_instance __initdata;
+static int acpi_apic_instance __initdata_or_acpilib;
enum acpi_subtable_type {
ACPI_SUBTABLE_COMMON,
ACPI_SUBTABLE_HMAT,
ACPI_SUBTABLE_PRMT,
+ ACPI_SUBTABLE_CEDT,
};
struct acpi_subtable_entry {
@@ -52,7 +53,7 @@ struct acpi_subtable_entry {
* Disable table checksum verification for the early stage due to the size
* limitation of the current x86 early mapping implementation.
*/
-static bool acpi_verify_table_checksum __initdata = false;
+static bool acpi_verify_table_checksum __initdata_or_acpilib = false;
void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
{
@@ -216,7 +217,7 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
}
}
-static unsigned long __init
+static unsigned long __init_or_acpilib
acpi_get_entry_type(struct acpi_subtable_entry *entry)
{
switch (entry->type) {
@@ -226,11 +227,13 @@ acpi_get_entry_type(struct acpi_subtable_entry *entry)
return entry->hdr->hmat.type;
case ACPI_SUBTABLE_PRMT:
return 0;
+ case ACPI_SUBTABLE_CEDT:
+ return entry->hdr->cedt.type;
}
return 0;
}
-static unsigned long __init
+static unsigned long __init_or_acpilib
acpi_get_entry_length(struct acpi_subtable_entry *entry)
{
switch (entry->type) {
@@ -240,11 +243,13 @@ acpi_get_entry_length(struct acpi_subtable_entry *entry)
return entry->hdr->hmat.length;
case ACPI_SUBTABLE_PRMT:
return entry->hdr->prmt.length;
+ case ACPI_SUBTABLE_CEDT:
+ return entry->hdr->cedt.length;
}
return 0;
}
-static unsigned long __init
+static unsigned long __init_or_acpilib
acpi_get_subtable_header_length(struct acpi_subtable_entry *entry)
{
switch (entry->type) {
@@ -254,20 +259,40 @@ acpi_get_subtable_header_length(struct acpi_subtable_entry *entry)
return sizeof(entry->hdr->hmat);
case ACPI_SUBTABLE_PRMT:
return sizeof(entry->hdr->prmt);
+ case ACPI_SUBTABLE_CEDT:
+ return sizeof(entry->hdr->cedt);
}
return 0;
}
-static enum acpi_subtable_type __init
+static enum acpi_subtable_type __init_or_acpilib
acpi_get_subtable_type(char *id)
{
if (strncmp(id, ACPI_SIG_HMAT, 4) == 0)
return ACPI_SUBTABLE_HMAT;
if (strncmp(id, ACPI_SIG_PRMT, 4) == 0)
return ACPI_SUBTABLE_PRMT;
+ if (strncmp(id, ACPI_SIG_CEDT, 4) == 0)
+ return ACPI_SUBTABLE_CEDT;
return ACPI_SUBTABLE_COMMON;
}
+static __init_or_acpilib bool has_handler(struct acpi_subtable_proc *proc)
+{
+ return proc->handler || proc->handler_arg;
+}
+
+static __init_or_acpilib int call_handler(struct acpi_subtable_proc *proc,
+ union acpi_subtable_headers *hdr,
+ unsigned long end)
+{
+ if (proc->handler)
+ return proc->handler(hdr, end);
+ if (proc->handler_arg)
+ return proc->handler_arg(hdr, proc->arg, end);
+ return -EINVAL;
+}
+
/**
* acpi_parse_entries_array - for each proc_num find a suitable subtable
*
@@ -291,10 +316,10 @@ acpi_get_subtable_type(char *id)
* On success returns sum of all matching entries for all proc handlers.
* Otherwise, -ENODEV or -EINVAL is returned.
*/
-static int __init acpi_parse_entries_array(char *id, unsigned long table_size,
- struct acpi_table_header *table_header,
- struct acpi_subtable_proc *proc, int proc_num,
- unsigned int max_entries)
+static int __init_or_acpilib acpi_parse_entries_array(
+ char *id, unsigned long table_size,
+ struct acpi_table_header *table_header, struct acpi_subtable_proc *proc,
+ int proc_num, unsigned int max_entries)
{
struct acpi_subtable_entry entry;
unsigned long table_end, subtable_len, entry_len;
@@ -318,8 +343,9 @@ static int __init acpi_parse_entries_array(char *id, unsigned long table_size,
for (i = 0; i < proc_num; i++) {
if (acpi_get_entry_type(&entry) != proc[i].id)
continue;
- if (!proc[i].handler ||
- (!errs && proc[i].handler(entry.hdr, table_end))) {
+ if (!has_handler(&proc[i]) ||
+ (!errs &&
+ call_handler(&proc[i], entry.hdr, table_end))) {
errs++;
continue;
}
@@ -352,10 +378,9 @@ static int __init acpi_parse_entries_array(char *id, unsigned long table_size,
return errs ? -EINVAL : count;
}
-int __init acpi_table_parse_entries_array(char *id,
- unsigned long table_size,
- struct acpi_subtable_proc *proc, int proc_num,
- unsigned int max_entries)
+int __init_or_acpilib acpi_table_parse_entries_array(
+ char *id, unsigned long table_size, struct acpi_subtable_proc *proc,
+ int proc_num, unsigned int max_entries)
{
struct acpi_table_header *table_header = NULL;
int count;
@@ -375,7 +400,7 @@ int __init acpi_table_parse_entries_array(char *id,
acpi_get_table(id, instance, &table_header);
if (!table_header) {
- pr_warn("%4.4s not present\n", id);
+ pr_debug("%4.4s not present\n", id);
return -ENODEV;
}
@@ -386,21 +411,41 @@ int __init acpi_table_parse_entries_array(char *id,
return count;
}
-int __init acpi_table_parse_entries(char *id,
- unsigned long table_size,
- int entry_id,
- acpi_tbl_entry_handler handler,
- unsigned int max_entries)
+static int __init_or_acpilib __acpi_table_parse_entries(
+ char *id, unsigned long table_size, int entry_id,
+ acpi_tbl_entry_handler handler, acpi_tbl_entry_handler_arg handler_arg,
+ void *arg, unsigned int max_entries)
{
struct acpi_subtable_proc proc = {
.id = entry_id,
.handler = handler,
+ .handler_arg = handler_arg,
+ .arg = arg,
};
return acpi_table_parse_entries_array(id, table_size, &proc, 1,
max_entries);
}
+int __init_or_acpilib
+acpi_table_parse_cedt(enum acpi_cedt_type id,
+ acpi_tbl_entry_handler_arg handler_arg, void *arg)
+{
+ return __acpi_table_parse_entries(ACPI_SIG_CEDT,
+ sizeof(struct acpi_table_cedt), id,
+ NULL, handler_arg, arg, 0);
+}
+EXPORT_SYMBOL_ACPI_LIB(acpi_table_parse_cedt);
+
+int __init acpi_table_parse_entries(char *id, unsigned long table_size,
+ int entry_id,
+ acpi_tbl_entry_handler handler,
+ unsigned int max_entries)
+{
+ return __acpi_table_parse_entries(id, table_size, entry_id, handler,
+ NULL, NULL, max_entries);
+}
+
int __init acpi_table_parse_madt(enum acpi_madt_type id,
acpi_tbl_entry_handler handler, unsigned int max_entries)
{
diff --git a/drivers/acpi/x86/s2idle.c b/drivers/acpi/x86/s2idle.c
index abc06e7f89d8..ed889f827f53 100644
--- a/drivers/acpi/x86/s2idle.c
+++ b/drivers/acpi/x86/s2idle.c
@@ -424,15 +424,11 @@ static int lps0_device_attach(struct acpi_device *adev,
mem_sleep_current = PM_SUSPEND_TO_IDLE;
/*
- * Some Intel based LPS0 systems, like ASUS Zenbook UX430UNR/i7-8550U don't
- * use intel-hid or intel-vbtn but require the EC GPE to be enabled while
- * suspended for certain wakeup devices to work, so mark it as wakeup-capable.
- *
- * Only enable on !AMD as enabling this universally causes problems for a number
- * of AMD based systems.
+ * Some LPS0 systems, like ASUS Zenbook UX430UNR/i7-8550U, require the
+ * EC GPE to be enabled while suspended for certain wakeup devices to
+ * work, so mark it as wakeup-capable.
*/
- if (!acpi_s2idle_vendor_amd())
- acpi_ec_mark_gpe_for_wake();
+ acpi_ec_mark_gpe_for_wake();
return 0;
}
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index c75fb600740c..8351c5638880 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -69,7 +69,7 @@
#include <uapi/linux/android/binder.h>
-#include <asm/cacheflush.h>
+#include <linux/cacheflush.h>
#include "binder_internal.h"
#include "binder_trace.h"
@@ -1608,15 +1608,21 @@ static void binder_cleanup_transaction(struct binder_transaction *t,
/**
* binder_get_object() - gets object and checks for valid metadata
* @proc: binder_proc owning the buffer
+ * @u: sender's user pointer to base of buffer
* @buffer: binder_buffer that we're parsing.
* @offset: offset in the @buffer at which to validate an object.
* @object: struct binder_object to read into
*
- * Return: If there's a valid metadata object at @offset in @buffer, the
+ * Copy the binder object at the given offset into @object. If @u is
+ * provided then the copy is from the sender's buffer. If not, then
+ * it is copied from the target's @buffer.
+ *
+ * Return: If there's a valid metadata object at @offset, the
* size of that object. Otherwise, it returns zero. The object
* is read into the struct binder_object pointed to by @object.
*/
static size_t binder_get_object(struct binder_proc *proc,
+ const void __user *u,
struct binder_buffer *buffer,
unsigned long offset,
struct binder_object *object)
@@ -1626,10 +1632,16 @@ static size_t binder_get_object(struct binder_proc *proc,
size_t object_size = 0;
read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
- if (offset > buffer->data_size || read_size < sizeof(*hdr) ||
- binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
- offset, read_size))
+ if (offset > buffer->data_size || read_size < sizeof(*hdr))
return 0;
+ if (u) {
+ if (copy_from_user(object, u + offset, read_size))
+ return 0;
+ } else {
+ if (binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
+ offset, read_size))
+ return 0;
+ }
/* Ok, now see if we read a complete object. */
hdr = &object->hdr;
@@ -1702,7 +1714,7 @@ static struct binder_buffer_object *binder_validate_ptr(
b, buffer_offset,
sizeof(object_offset)))
return NULL;
- object_size = binder_get_object(proc, b, object_offset, object);
+ object_size = binder_get_object(proc, NULL, b, object_offset, object);
if (!object_size || object->hdr.type != BINDER_TYPE_PTR)
return NULL;
if (object_offsetp)
@@ -1767,7 +1779,8 @@ static bool binder_validate_fixup(struct binder_proc *proc,
unsigned long buffer_offset;
struct binder_object last_object;
struct binder_buffer_object *last_bbo;
- size_t object_size = binder_get_object(proc, b, last_obj_offset,
+ size_t object_size = binder_get_object(proc, NULL, b,
+ last_obj_offset,
&last_object);
if (object_size != sizeof(*last_bbo))
return false;
@@ -1882,7 +1895,7 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
if (!binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
buffer, buffer_offset,
sizeof(object_offset)))
- object_size = binder_get_object(proc, buffer,
+ object_size = binder_get_object(proc, NULL, buffer,
object_offset, &object);
if (object_size == 0) {
pr_err("transaction release %d bad object at offset %lld, size %zd\n",
@@ -1933,7 +1946,7 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
case BINDER_TYPE_FD: {
/*
* No need to close the file here since user-space
- * closes it for for successfully delivered
+ * closes it for successfully delivered
* transactions. For transactions that weren't
* delivered, the new fd was never allocated so
* there is no need to close and the fput on the
@@ -2220,16 +2233,258 @@ err_fd_not_accepted:
return ret;
}
-static int binder_translate_fd_array(struct binder_fd_array_object *fda,
+/**
+ * struct binder_ptr_fixup - data to be fixed-up in target buffer
+ * @offset offset in target buffer to fixup
+ * @skip_size bytes to skip in copy (fixup will be written later)
+ * @fixup_data data to write at fixup offset
+ * @node list node
+ *
+ * This is used for the pointer fixup list (pf) which is created and consumed
+ * during binder_transaction() and is only accessed locally. No
+ * locking is necessary.
+ *
+ * The list is ordered by @offset.
+ */
+struct binder_ptr_fixup {
+ binder_size_t offset;
+ size_t skip_size;
+ binder_uintptr_t fixup_data;
+ struct list_head node;
+};
+
+/**
+ * struct binder_sg_copy - scatter-gather data to be copied
+ * @offset offset in target buffer
+ * @sender_uaddr user address in source buffer
+ * @length bytes to copy
+ * @node list node
+ *
+ * This is used for the sg copy list (sgc) which is created and consumed
+ * during binder_transaction() and is only accessed locally. No
+ * locking is necessary.
+ *
+ * The list is ordered by @offset.
+ */
+struct binder_sg_copy {
+ binder_size_t offset;
+ const void __user *sender_uaddr;
+ size_t length;
+ struct list_head node;
+};
+
+/**
+ * binder_do_deferred_txn_copies() - copy and fixup scatter-gather data
+ * @alloc: binder_alloc associated with @buffer
+ * @buffer: binder buffer in target process
+ * @sgc_head: list_head of scatter-gather copy list
+ * @pf_head: list_head of pointer fixup list
+ *
+ * Processes all elements of @sgc_head, applying fixups from @pf_head
+ * and copying the scatter-gather data from the source process' user
+ * buffer to the target's buffer. It is expected that the list creation
+ * and processing all occurs during binder_transaction() so these lists
+ * are only accessed in local context.
+ *
+ * Return: 0=success, else -errno
+ */
+static int binder_do_deferred_txn_copies(struct binder_alloc *alloc,
+ struct binder_buffer *buffer,
+ struct list_head *sgc_head,
+ struct list_head *pf_head)
+{
+ int ret = 0;
+ struct binder_sg_copy *sgc, *tmpsgc;
+ struct binder_ptr_fixup *pf =
+ list_first_entry_or_null(pf_head, struct binder_ptr_fixup,
+ node);
+
+ list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
+ size_t bytes_copied = 0;
+
+ while (bytes_copied < sgc->length) {
+ size_t copy_size;
+ size_t bytes_left = sgc->length - bytes_copied;
+ size_t offset = sgc->offset + bytes_copied;
+
+ /*
+ * We copy up to the fixup (pointed to by pf)
+ */
+ copy_size = pf ? min(bytes_left, (size_t)pf->offset - offset)
+ : bytes_left;
+ if (!ret && copy_size)
+ ret = binder_alloc_copy_user_to_buffer(
+ alloc, buffer,
+ offset,
+ sgc->sender_uaddr + bytes_copied,
+ copy_size);
+ bytes_copied += copy_size;
+ if (copy_size != bytes_left) {
+ BUG_ON(!pf);
+ /* we stopped at a fixup offset */
+ if (pf->skip_size) {
+ /*
+ * we are just skipping. This is for
+ * BINDER_TYPE_FDA where the translated
+ * fds will be fixed up when we get
+ * to target context.
+ */
+ bytes_copied += pf->skip_size;
+ } else {
+ /* apply the fixup indicated by pf */
+ if (!ret)
+ ret = binder_alloc_copy_to_buffer(
+ alloc, buffer,
+ pf->offset,
+ &pf->fixup_data,
+ sizeof(pf->fixup_data));
+ bytes_copied += sizeof(pf->fixup_data);
+ }
+ list_del(&pf->node);
+ kfree(pf);
+ pf = list_first_entry_or_null(pf_head,
+ struct binder_ptr_fixup, node);
+ }
+ }
+ list_del(&sgc->node);
+ kfree(sgc);
+ }
+ BUG_ON(!list_empty(pf_head));
+ BUG_ON(!list_empty(sgc_head));
+
+ return ret > 0 ? -EINVAL : ret;
+}
+
+/**
+ * binder_cleanup_deferred_txn_lists() - free specified lists
+ * @sgc_head: list_head of scatter-gather copy list
+ * @pf_head: list_head of pointer fixup list
+ *
+ * Called to clean up @sgc_head and @pf_head if there is an
+ * error.
+ */
+static void binder_cleanup_deferred_txn_lists(struct list_head *sgc_head,
+ struct list_head *pf_head)
+{
+ struct binder_sg_copy *sgc, *tmpsgc;
+ struct binder_ptr_fixup *pf, *tmppf;
+
+ list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
+ list_del(&sgc->node);
+ kfree(sgc);
+ }
+ list_for_each_entry_safe(pf, tmppf, pf_head, node) {
+ list_del(&pf->node);
+ kfree(pf);
+ }
+}
+
+/**
+ * binder_defer_copy() - queue a scatter-gather buffer for copy
+ * @sgc_head: list_head of scatter-gather copy list
+ * @offset: binder buffer offset in target process
+ * @sender_uaddr: user address in source process
+ * @length: bytes to copy
+ *
+ * Specify a scatter-gather block to be copied. The actual copy must
+ * be deferred until all the needed fixups are identified and queued.
+ * Then the copy and fixups are done together so un-translated values
+ * from the source are never visible in the target buffer.
+ *
+ * We are guaranteed that repeated calls to this function will have
+ * monotonically increasing @offset values so the list will naturally
+ * be ordered.
+ *
+ * Return: 0=success, else -errno
+ */
+static int binder_defer_copy(struct list_head *sgc_head, binder_size_t offset,
+ const void __user *sender_uaddr, size_t length)
+{
+ struct binder_sg_copy *bc = kzalloc(sizeof(*bc), GFP_KERNEL);
+
+ if (!bc)
+ return -ENOMEM;
+
+ bc->offset = offset;
+ bc->sender_uaddr = sender_uaddr;
+ bc->length = length;
+ INIT_LIST_HEAD(&bc->node);
+
+ /*
+ * We are guaranteed that the deferred copies are in-order
+ * so just add to the tail.
+ */
+ list_add_tail(&bc->node, sgc_head);
+
+ return 0;
+}
+
+/**
+ * binder_add_fixup() - queue a fixup to be applied to sg copy
+ * @pf_head: list_head of binder ptr fixup list
+ * @offset: binder buffer offset in target process
+ * @fixup: bytes to be copied for fixup
+ * @skip_size: bytes to skip when copying (fixup will be applied later)
+ *
+ * Add the specified fixup to a list ordered by @offset. When copying
+ * the scatter-gather buffers, the fixup will be copied instead of
+ * data from the source buffer. For BINDER_TYPE_FDA fixups, the fixup
+ * will be applied later (in target process context), so we just skip
+ * the bytes specified by @skip_size. If @skip_size is 0, we copy the
+ * value in @fixup.
+ *
+ * This function is called *mostly* in @offset order, but there are
+ * exceptions. Since out-of-order inserts are relatively uncommon,
+ * we insert the new element by searching backward from the tail of
+ * the list.
+ *
+ * Return: 0=success, else -errno
+ */
+static int binder_add_fixup(struct list_head *pf_head, binder_size_t offset,
+ binder_uintptr_t fixup, size_t skip_size)
+{
+ struct binder_ptr_fixup *pf = kzalloc(sizeof(*pf), GFP_KERNEL);
+ struct binder_ptr_fixup *tmppf;
+
+ if (!pf)
+ return -ENOMEM;
+
+ pf->offset = offset;
+ pf->fixup_data = fixup;
+ pf->skip_size = skip_size;
+ INIT_LIST_HEAD(&pf->node);
+
+ /* Fixups are *mostly* added in-order, but there are some
+ * exceptions. Look backwards through list for insertion point.
+ */
+ list_for_each_entry_reverse(tmppf, pf_head, node) {
+ if (tmppf->offset < pf->offset) {
+ list_add(&pf->node, &tmppf->node);
+ return 0;
+ }
+ }
+ /*
+ * if we get here, then the new offset is the lowest so
+ * insert at the head
+ */
+ list_add(&pf->node, pf_head);
+ return 0;
+}
+
+static int binder_translate_fd_array(struct list_head *pf_head,
+ struct binder_fd_array_object *fda,
+ const void __user *sender_ubuffer,
struct binder_buffer_object *parent,
+ struct binder_buffer_object *sender_uparent,
struct binder_transaction *t,
struct binder_thread *thread,
struct binder_transaction *in_reply_to)
{
binder_size_t fdi, fd_buf_size;
binder_size_t fda_offset;
+ const void __user *sender_ufda_base;
struct binder_proc *proc = thread->proc;
- struct binder_proc *target_proc = t->to_proc;
+ int ret;
fd_buf_size = sizeof(u32) * fda->num_fds;
if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
@@ -2253,29 +2508,36 @@ static int binder_translate_fd_array(struct binder_fd_array_object *fda,
*/
fda_offset = (parent->buffer - (uintptr_t)t->buffer->user_data) +
fda->parent_offset;
- if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32))) {
+ sender_ufda_base = (void __user *)(uintptr_t)sender_uparent->buffer +
+ fda->parent_offset;
+
+ if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32)) ||
+ !IS_ALIGNED((unsigned long)sender_ufda_base, sizeof(u32))) {
binder_user_error("%d:%d parent offset not aligned correctly.\n",
proc->pid, thread->pid);
return -EINVAL;
}
+ ret = binder_add_fixup(pf_head, fda_offset, 0, fda->num_fds * sizeof(u32));
+ if (ret)
+ return ret;
+
for (fdi = 0; fdi < fda->num_fds; fdi++) {
u32 fd;
- int ret;
binder_size_t offset = fda_offset + fdi * sizeof(fd);
+ binder_size_t sender_uoffset = fdi * sizeof(fd);
- ret = binder_alloc_copy_from_buffer(&target_proc->alloc,
- &fd, t->buffer,
- offset, sizeof(fd));
+ ret = copy_from_user(&fd, sender_ufda_base + sender_uoffset, sizeof(fd));
if (!ret)
ret = binder_translate_fd(fd, offset, t, thread,
in_reply_to);
- if (ret < 0)
- return ret;
+ if (ret)
+ return ret > 0 ? -EINVAL : ret;
}
return 0;
}
-static int binder_fixup_parent(struct binder_transaction *t,
+static int binder_fixup_parent(struct list_head *pf_head,
+ struct binder_transaction *t,
struct binder_thread *thread,
struct binder_buffer_object *bp,
binder_size_t off_start_offset,
@@ -2321,14 +2583,7 @@ static int binder_fixup_parent(struct binder_transaction *t,
}
buffer_offset = bp->parent_offset +
(uintptr_t)parent->buffer - (uintptr_t)b->user_data;
- if (binder_alloc_copy_to_buffer(&target_proc->alloc, b, buffer_offset,
- &bp->buffer, sizeof(bp->buffer))) {
- binder_user_error("%d:%d got transaction with invalid parent offset\n",
- proc->pid, thread->pid);
- return -EINVAL;
- }
-
- return 0;
+ return binder_add_fixup(pf_head, buffer_offset, bp->buffer, 0);
}
/**
@@ -2455,6 +2710,7 @@ static void binder_transaction(struct binder_proc *proc,
binder_size_t off_start_offset, off_end_offset;
binder_size_t off_min;
binder_size_t sg_buf_offset, sg_buf_end_offset;
+ binder_size_t user_offset = 0;
struct binder_proc *target_proc = NULL;
struct binder_thread *target_thread = NULL;
struct binder_node *target_node = NULL;
@@ -2469,6 +2725,12 @@ static void binder_transaction(struct binder_proc *proc,
int t_debug_id = atomic_inc_return(&binder_last_id);
char *secctx = NULL;
u32 secctx_sz = 0;
+ struct list_head sgc_head;
+ struct list_head pf_head;
+ const void __user *user_buffer = (const void __user *)
+ (uintptr_t)tr->data.ptr.buffer;
+ INIT_LIST_HEAD(&sgc_head);
+ INIT_LIST_HEAD(&pf_head);
e = binder_transaction_log_add(&binder_transaction_log);
e->debug_id = t_debug_id;
@@ -2782,19 +3044,6 @@ static void binder_transaction(struct binder_proc *proc,
if (binder_alloc_copy_user_to_buffer(
&target_proc->alloc,
- t->buffer, 0,
- (const void __user *)
- (uintptr_t)tr->data.ptr.buffer,
- tr->data_size)) {
- binder_user_error("%d:%d got transaction with invalid data ptr\n",
- proc->pid, thread->pid);
- return_error = BR_FAILED_REPLY;
- return_error_param = -EFAULT;
- return_error_line = __LINE__;
- goto err_copy_data_failed;
- }
- if (binder_alloc_copy_user_to_buffer(
- &target_proc->alloc,
t->buffer,
ALIGN(tr->data_size, sizeof(void *)),
(const void __user *)
@@ -2837,6 +3086,7 @@ static void binder_transaction(struct binder_proc *proc,
size_t object_size;
struct binder_object object;
binder_size_t object_offset;
+ binder_size_t copy_size;
if (binder_alloc_copy_from_buffer(&target_proc->alloc,
&object_offset,
@@ -2848,8 +3098,27 @@ static void binder_transaction(struct binder_proc *proc,
return_error_line = __LINE__;
goto err_bad_offset;
}
- object_size = binder_get_object(target_proc, t->buffer,
- object_offset, &object);
+
+ /*
+ * Copy the source user buffer up to the next object
+ * that will be processed.
+ */
+ copy_size = object_offset - user_offset;
+ if (copy_size && (user_offset > object_offset ||
+ binder_alloc_copy_user_to_buffer(
+ &target_proc->alloc,
+ t->buffer, user_offset,
+ user_buffer + user_offset,
+ copy_size))) {
+ binder_user_error("%d:%d got transaction with invalid data ptr\n",
+ proc->pid, thread->pid);
+ return_error = BR_FAILED_REPLY;
+ return_error_param = -EFAULT;
+ return_error_line = __LINE__;
+ goto err_copy_data_failed;
+ }
+ object_size = binder_get_object(target_proc, user_buffer,
+ t->buffer, object_offset, &object);
if (object_size == 0 || object_offset < off_min) {
binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
proc->pid, thread->pid,
@@ -2861,6 +3130,11 @@ static void binder_transaction(struct binder_proc *proc,
return_error_line = __LINE__;
goto err_bad_offset;
}
+ /*
+ * Set offset to the next buffer fragment to be
+ * copied
+ */
+ user_offset = object_offset + object_size;
hdr = &object.hdr;
off_min = object_offset + object_size;
@@ -2923,6 +3197,8 @@ static void binder_transaction(struct binder_proc *proc,
case BINDER_TYPE_FDA: {
struct binder_object ptr_object;
binder_size_t parent_offset;
+ struct binder_object user_object;
+ size_t user_parent_size;
struct binder_fd_array_object *fda =
to_binder_fd_array_object(hdr);
size_t num_valid = (buffer_offset - off_start_offset) /
@@ -2954,11 +3230,35 @@ static void binder_transaction(struct binder_proc *proc,
return_error_line = __LINE__;
goto err_bad_parent;
}
- ret = binder_translate_fd_array(fda, parent, t, thread,
- in_reply_to);
- if (ret < 0) {
+ /*
+ * We need to read the user version of the parent
+ * object to get the original user offset
+ */
+ user_parent_size =
+ binder_get_object(proc, user_buffer, t->buffer,
+ parent_offset, &user_object);
+ if (user_parent_size != sizeof(user_object.bbo)) {
+ binder_user_error("%d:%d invalid ptr object size: %zd vs %zd\n",
+ proc->pid, thread->pid,
+ user_parent_size,
+ sizeof(user_object.bbo));
return_error = BR_FAILED_REPLY;
- return_error_param = ret;
+ return_error_param = -EINVAL;
+ return_error_line = __LINE__;
+ goto err_bad_parent;
+ }
+ ret = binder_translate_fd_array(&pf_head, fda,
+ user_buffer, parent,
+ &user_object.bbo, t,
+ thread, in_reply_to);
+ if (!ret)
+ ret = binder_alloc_copy_to_buffer(&target_proc->alloc,
+ t->buffer,
+ object_offset,
+ fda, sizeof(*fda));
+ if (ret) {
+ return_error = BR_FAILED_REPLY;
+ return_error_param = ret > 0 ? -EINVAL : ret;
return_error_line = __LINE__;
goto err_translate_failed;
}
@@ -2980,19 +3280,14 @@ static void binder_transaction(struct binder_proc *proc,
return_error_line = __LINE__;
goto err_bad_offset;
}
- if (binder_alloc_copy_user_to_buffer(
- &target_proc->alloc,
- t->buffer,
- sg_buf_offset,
- (const void __user *)
- (uintptr_t)bp->buffer,
- bp->length)) {
- binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
- proc->pid, thread->pid);
- return_error_param = -EFAULT;
+ ret = binder_defer_copy(&sgc_head, sg_buf_offset,
+ (const void __user *)(uintptr_t)bp->buffer,
+ bp->length);
+ if (ret) {
return_error = BR_FAILED_REPLY;
+ return_error_param = ret;
return_error_line = __LINE__;
- goto err_copy_data_failed;
+ goto err_translate_failed;
}
/* Fixup buffer pointer to target proc address space */
bp->buffer = (uintptr_t)
@@ -3001,7 +3296,8 @@ static void binder_transaction(struct binder_proc *proc,
num_valid = (buffer_offset - off_start_offset) /
sizeof(binder_size_t);
- ret = binder_fixup_parent(t, thread, bp,
+ ret = binder_fixup_parent(&pf_head, t,
+ thread, bp,
off_start_offset,
num_valid,
last_fixup_obj_off,
@@ -3028,6 +3324,30 @@ static void binder_transaction(struct binder_proc *proc,
goto err_bad_object_type;
}
}
+ /* Done processing objects, copy the rest of the buffer */
+ if (binder_alloc_copy_user_to_buffer(
+ &target_proc->alloc,
+ t->buffer, user_offset,
+ user_buffer + user_offset,
+ tr->data_size - user_offset)) {
+ binder_user_error("%d:%d got transaction with invalid data ptr\n",
+ proc->pid, thread->pid);
+ return_error = BR_FAILED_REPLY;
+ return_error_param = -EFAULT;
+ return_error_line = __LINE__;
+ goto err_copy_data_failed;
+ }
+
+ ret = binder_do_deferred_txn_copies(&target_proc->alloc, t->buffer,
+ &sgc_head, &pf_head);
+ if (ret) {
+ binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
+ proc->pid, thread->pid);
+ return_error = BR_FAILED_REPLY;
+ return_error_param = ret;
+ return_error_line = __LINE__;
+ goto err_copy_data_failed;
+ }
if (t->buffer->oneway_spam_suspect)
tcomplete->type = BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT;
else
@@ -3101,6 +3421,7 @@ err_bad_object_type:
err_bad_offset:
err_bad_parent:
err_copy_data_failed:
+ binder_cleanup_deferred_txn_lists(&sgc_head, &pf_head);
binder_free_txn_fixups(t);
trace_binder_transaction_failed_buffer_release(t->buffer);
binder_transaction_buffer_release(target_proc, NULL, t->buffer,
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index a7da8ea7b3ed..cb54631fd950 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -146,7 +146,7 @@ config SATA_AHCI_PLATFORM
config AHCI_BRCM
tristate "Broadcom AHCI SATA support"
depends on ARCH_BRCMSTB || BMIPS_GENERIC || ARCH_BCM_NSP || \
- ARCH_BCM_63XX
+ ARCH_BCM_63XX || COMPILE_TEST
select SATA_HOST
help
This option enables support for the AHCI SATA3 controller found on
@@ -156,7 +156,7 @@ config AHCI_BRCM
config AHCI_DA850
tristate "DaVinci DA850 AHCI SATA support"
- depends on ARCH_DAVINCI_DA850
+ depends on ARCH_DAVINCI_DA850 || COMPILE_TEST
select SATA_HOST
help
This option enables support for the DaVinci DA850 SoC's
@@ -166,7 +166,7 @@ config AHCI_DA850
config AHCI_DM816
tristate "DaVinci DM816 AHCI SATA support"
- depends on ARCH_OMAP2PLUS
+ depends on ARCH_OMAP2PLUS || COMPILE_TEST
select SATA_HOST
help
This option enables support for the DaVinci DM816 SoC's
@@ -206,7 +206,7 @@ config AHCI_CEVA
config AHCI_MTK
tristate "MediaTek AHCI SATA support"
- depends on ARCH_MEDIATEK
+ depends on ARCH_MEDIATEK || COMPILE_TEST
select MFD_SYSCON
select SATA_HOST
help
@@ -217,7 +217,7 @@ config AHCI_MTK
config AHCI_MVEBU
tristate "Marvell EBU AHCI SATA support"
- depends on ARCH_MVEBU
+ depends on ARCH_MVEBU || COMPILE_TEST
select SATA_HOST
help
This option enables support for the Marvebu EBU SoC's
@@ -236,7 +236,7 @@ config AHCI_OCTEON
config AHCI_SUNXI
tristate "Allwinner sunxi AHCI SATA support"
- depends on ARCH_SUNXI
+ depends on ARCH_SUNXI || COMPILE_TEST
select SATA_HOST
help
This option enables support for the Allwinner sunxi SoC's
@@ -246,7 +246,7 @@ config AHCI_SUNXI
config AHCI_TEGRA
tristate "NVIDIA Tegra AHCI SATA support"
- depends on ARCH_TEGRA
+ depends on ARCH_TEGRA || COMPILE_TEST
select SATA_HOST
help
This option enables support for the NVIDIA Tegra SoC's
@@ -256,7 +256,7 @@ config AHCI_TEGRA
config AHCI_XGENE
tristate "APM X-Gene 6.0Gbps AHCI SATA host controller support"
- depends on PHY_XGENE
+ depends on PHY_XGENE || COMPILE_TEST
select SATA_HOST
help
This option enables support for APM X-Gene SoC SATA host controller.
@@ -273,7 +273,7 @@ config AHCI_QORIQ
config SATA_FSL
tristate "Freescale 3.0Gbps SATA support"
- depends on FSL_SOC
+ depends on FSL_SOC || COMPILE_TEST
select SATA_HOST
help
This option enables support for Freescale 3.0Gbps SATA controller.
@@ -294,7 +294,7 @@ config SATA_GEMINI
config SATA_AHCI_SEATTLE
tristate "AMD Seattle 6.0Gbps AHCI SATA host controller support"
- depends on ARCH_SEATTLE
+ depends on ARCH_SEATTLE || COMPILE_TEST
select SATA_HOST
help
This option enables support for AMD Seattle SATA host controller.
@@ -432,18 +432,6 @@ config SATA_DWC_OLD_DMA
This option enables support for old device trees without the
"dmas" property.
-config SATA_DWC_DEBUG
- bool "Debugging driver version"
- depends on SATA_DWC
- help
- This option enables debugging output in the driver.
-
-config SATA_DWC_VDEBUG
- bool "Verbose debug output"
- depends on SATA_DWC_DEBUG
- help
- This option enables the taskfile dumping and NCQ debugging.
-
config SATA_HIGHBANK
tristate "Calxeda Highbank SATA support"
depends on ARCH_HIGHBANK || COMPILE_TEST
@@ -611,7 +599,7 @@ config PATA_ATP867X
config PATA_BK3710
tristate "Palmchip BK3710 PATA support"
- depends on ARCH_DAVINCI
+ depends on ARCH_DAVINCI || COMPILE_TEST
select PATA_TIMINGS
help
This option enables support for the integrated IDE controller on
@@ -649,7 +637,7 @@ config PATA_CS5530
config PATA_CS5535
tristate "CS5535 PATA support (Experimental)"
- depends on PCI && X86_32
+ depends on PCI && (X86_32 || (X86_64 && COMPILE_TEST))
help
This option enables support for the NatSemi/AMD CS5535
companion chip used with the Geode processor family.
@@ -697,7 +685,7 @@ config PATA_EP93XX
config PATA_FTIDE010
tristate "Faraday Technology FTIDE010 PATA support"
depends on OF
- depends on ARM
+ depends on ARM || COMPILE_TEST
depends on SATA_GEMINI
help
This option enables support for the Faraday FTIDE010
@@ -760,7 +748,7 @@ config PATA_ICSIDE
config PATA_IMX
tristate "PATA support for Freescale iMX"
- depends on ARCH_MXC
+ depends on ARCH_MXC || COMPILE_TEST
select PATA_TIMINGS
help
This option enables support for the PATA host available on Freescale
@@ -981,7 +969,7 @@ config PATA_VIA
config PATA_PXA
tristate "PXA DMA-capable PATA support"
- depends on ARCH_PXA
+ depends on ARCH_PXA || COMPILE_TEST
help
This option enables support for harddrive attached to PXA CPU's bus.
@@ -1157,7 +1145,7 @@ config PATA_RZ1000
config PATA_SAMSUNG_CF
tristate "Samsung SoC PATA support"
- depends on SAMSUNG_DEV_IDE
+ depends on SAMSUNG_DEV_IDE || COMPILE_TEST
select PATA_TIMINGS
help
This option enables basic support for Samsung's S3C/S5P board
diff --git a/drivers/ata/acard-ahci.c b/drivers/ata/acard-ahci.c
index 2a04e8abd397..536d4cb8f08b 100644
--- a/drivers/ata/acard-ahci.c
+++ b/drivers/ata/acard-ahci.c
@@ -185,8 +185,6 @@ static unsigned int acard_ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl)
struct acard_sg *acard_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ;
unsigned int si, last_si = 0;
- VPRINTK("ENTER\n");
-
/*
* Next, the S/G list.
*/
@@ -362,8 +360,6 @@ static int acard_ahci_init_one(struct pci_dev *pdev, const struct pci_device_id
struct ata_host *host;
int n_ports, i, rc;
- VPRINTK("ENTER\n");
-
WARN_ON((int)ATA_MAX_QUEUE > AHCI_MAX_CMDS);
ata_print_version_once(&pdev->dev, DRV_VERSION);
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 1e1167e725a4..ab5811ef5a53 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -51,6 +51,7 @@ enum board_ids {
board_ahci,
board_ahci_ign_iferr,
board_ahci_mobile,
+ board_ahci_no_debounce_delay,
board_ahci_nomsi,
board_ahci_noncq,
board_ahci_nosntf,
@@ -141,6 +142,13 @@ static const struct ata_port_info ahci_port_info[] = {
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
},
+ [board_ahci_no_debounce_delay] = {
+ .flags = AHCI_FLAG_COMMON,
+ .link_flags = ATA_LFLAG_NO_DEBOUNCE_DELAY,
+ .pio_mask = ATA_PIO4,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &ahci_ops,
+ },
[board_ahci_nomsi] = {
AHCI_HFLAGS (AHCI_HFLAG_NO_MSI),
.flags = AHCI_FLAG_COMMON,
@@ -437,6 +445,7 @@ static const struct pci_device_id ahci_pci_tbl[] = {
board_ahci_al },
/* AMD */
{ PCI_VDEVICE(AMD, 0x7800), board_ahci }, /* AMD Hudson-2 */
+ { PCI_VDEVICE(AMD, 0x7801), board_ahci_no_debounce_delay }, /* AMD Hudson-2 (AHCI mode) */
{ PCI_VDEVICE(AMD, 0x7900), board_ahci }, /* AMD CZ */
{ PCI_VDEVICE(AMD, 0x7901), board_ahci_mobile }, /* AMD Green Sardine */
/* AMD is using RAID class only for ahci controllers */
@@ -684,7 +693,7 @@ static void ahci_pci_init_controller(struct ata_host *host)
/* clear port IRQ */
tmp = readl(port_mmio + PORT_IRQ_STAT);
- VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
+ dev_dbg(&pdev->dev, "PORT_IRQ_STAT 0x%x\n", tmp);
if (tmp)
writel(tmp, port_mmio + PORT_IRQ_STAT);
}
@@ -700,8 +709,6 @@ static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
bool online;
int rc;
- DPRINTK("ENTER\n");
-
hpriv->stop_engine(ap);
rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context),
@@ -709,8 +716,6 @@ static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
hpriv->start_engine(ap);
- DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
-
/* vt8251 doesn't clear BSY on signature FIS reception,
* request follow-up softreset.
*/
@@ -790,8 +795,6 @@ static int ahci_avn_hardreset(struct ata_link *link, unsigned int *class,
bool online;
int rc, i;
- DPRINTK("ENTER\n");
-
hpriv->stop_engine(ap);
for (i = 0; i < 2; i++) {
@@ -829,7 +832,6 @@ static int ahci_avn_hardreset(struct ata_link *link, unsigned int *class,
if (online)
*class = ahci_dev_classify(ap);
- DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
return rc;
}
@@ -1476,7 +1478,6 @@ static irqreturn_t ahci_thunderx_irq_handler(int irq, void *dev_instance)
u32 irq_stat, irq_masked;
unsigned int handled = 1;
- VPRINTK("ENTER\n");
hpriv = host->private_data;
mmio = hpriv->mmio;
irq_stat = readl(mmio + HOST_IRQ_STAT);
@@ -1493,7 +1494,6 @@ static irqreturn_t ahci_thunderx_irq_handler(int irq, void *dev_instance)
irq_stat = readl(mmio + HOST_IRQ_STAT);
spin_unlock(&host->lock);
} while (irq_stat);
- VPRINTK("EXIT\n");
return IRQ_RETVAL(handled);
}
@@ -1657,7 +1657,7 @@ static ssize_t remapped_nvme_show(struct device *dev,
struct ata_host *host = dev_get_drvdata(dev);
struct ahci_host_priv *hpriv = host->private_data;
- return sprintf(buf, "%u\n", hpriv->remapped_nvme);
+ return sysfs_emit(buf, "%u\n", hpriv->remapped_nvme);
}
static DEVICE_ATTR_RO(remapped_nvme);
@@ -1673,8 +1673,6 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
int n_ports, i, rc;
int ahci_pci_bar = AHCI_PCI_BAR_STANDARD;
- VPRINTK("ENTER\n");
-
WARN_ON((int)ATA_MAX_QUEUE > AHCI_MAX_CMDS);
ata_print_version_once(&pdev->dev, DRV_VERSION);
diff --git a/drivers/ata/ahci_brcm.c b/drivers/ata/ahci_brcm.c
index 6e9c5ade4c2e..64dd8aa397d5 100644
--- a/drivers/ata/ahci_brcm.c
+++ b/drivers/ata/ahci_brcm.c
@@ -246,7 +246,7 @@ static void brcm_sata_init(struct brcm_ahci_priv *priv)
}
static unsigned int brcm_ahci_read_id(struct ata_device *dev,
- struct ata_taskfile *tf, u16 *id)
+ struct ata_taskfile *tf, __le16 *id)
{
struct ata_port *ap = dev->link->ap;
struct ata_host *host = ap->host;
@@ -333,7 +333,7 @@ static struct ata_port_operations ahci_brcm_platform_ops = {
static const struct ata_port_info ahci_brcm_port_info = {
.flags = AHCI_FLAG_COMMON | ATA_FLAG_NO_DIPM,
- .link_flags = ATA_LFLAG_NO_DB_DELAY,
+ .link_flags = ATA_LFLAG_NO_DEBOUNCE_DELAY,
.pio_mask = ATA_PIO4,
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_brcm_platform_ops,
diff --git a/drivers/ata/ahci_ceva.c b/drivers/ata/ahci_ceva.c
index e9c7c07fd84c..acf59f51b356 100644
--- a/drivers/ata/ahci_ceva.c
+++ b/drivers/ata/ahci_ceva.c
@@ -92,9 +92,8 @@ struct ceva_ahci_priv {
};
static unsigned int ceva_ahci_read_id(struct ata_device *dev,
- struct ata_taskfile *tf, u16 *id)
+ struct ata_taskfile *tf, __le16 *id)
{
- __le16 *__id = (__le16 *)id;
u32 err_mask;
err_mask = ata_do_dev_read_id(dev, tf, id);
@@ -104,7 +103,7 @@ static unsigned int ceva_ahci_read_id(struct ata_device *dev,
* Since CEVA controller does not support device sleep feature, we
* need to clear DEVSLP (bit 8) in word78 of the IDENTIFY DEVICE data.
*/
- __id[ATA_ID_FEATURE_SUPP] &= cpu_to_le16(~(1 << 8));
+ id[ATA_ID_FEATURE_SUPP] &= cpu_to_le16(~(1 << 8));
return 0;
}
diff --git a/drivers/ata/ahci_qoriq.c b/drivers/ata/ahci_qoriq.c
index 5b46fc9aeb4a..bf5b388bd4e0 100644
--- a/drivers/ata/ahci_qoriq.c
+++ b/drivers/ata/ahci_qoriq.c
@@ -103,8 +103,6 @@ static int ahci_qoriq_hardreset(struct ata_link *link, unsigned int *class,
int rc;
bool ls1021a_workaround = (qoriq_priv->type == AHCI_LS1021A);
- DPRINTK("ENTER\n");
-
hpriv->stop_engine(ap);
/*
@@ -146,8 +144,6 @@ static int ahci_qoriq_hardreset(struct ata_link *link, unsigned int *class,
if (online)
*class = ahci_dev_classify(ap);
-
- DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
return rc;
}
diff --git a/drivers/ata/ahci_xgene.c b/drivers/ata/ahci_xgene.c
index dffc432b9d54..8e206379d699 100644
--- a/drivers/ata/ahci_xgene.c
+++ b/drivers/ata/ahci_xgene.c
@@ -193,7 +193,7 @@ static unsigned int xgene_ahci_qc_issue(struct ata_queued_cmd *qc)
struct xgene_ahci_context *ctx = hpriv->plat_data;
int rc = 0;
u32 port_fbs;
- void *port_mmio = ahci_port_base(ap);
+ void __iomem *port_mmio = ahci_port_base(ap);
/*
* Write the pmp value to PxFBS.DEV
@@ -237,7 +237,7 @@ static bool xgene_ahci_is_memram_inited(struct xgene_ahci_context *ctx)
* does not support DEVSLP.
*/
static unsigned int xgene_ahci_read_id(struct ata_device *dev,
- struct ata_taskfile *tf, u16 *id)
+ struct ata_taskfile *tf, __le16 *id)
{
u32 err_mask;
@@ -454,7 +454,7 @@ static int xgene_ahci_pmp_softreset(struct ata_link *link, unsigned int *class,
int pmp = sata_srst_pmp(link);
struct ata_port *ap = link->ap;
u32 rc;
- void *port_mmio = ahci_port_base(ap);
+ void __iomem *port_mmio = ahci_port_base(ap);
u32 port_fbs;
/*
@@ -499,7 +499,7 @@ static int xgene_ahci_softreset(struct ata_link *link, unsigned int *class,
struct ata_port *ap = link->ap;
struct ahci_host_priv *hpriv = ap->host->private_data;
struct xgene_ahci_context *ctx = hpriv->plat_data;
- void *port_mmio = ahci_port_base(ap);
+ void __iomem *port_mmio = ahci_port_base(ap);
u32 port_fbs;
u32 port_fbs_save;
u32 retry = 1;
@@ -588,8 +588,6 @@ static irqreturn_t xgene_ahci_irq_intr(int irq, void *dev_instance)
void __iomem *mmio;
u32 irq_stat, irq_masked;
- VPRINTK("ENTER\n");
-
hpriv = host->private_data;
mmio = hpriv->mmio;
@@ -612,8 +610,6 @@ static irqreturn_t xgene_ahci_irq_intr(int irq, void *dev_instance)
spin_unlock(&host->lock);
- VPRINTK("EXIT\n");
-
return IRQ_RETVAL(rc);
}
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index 0b2fcf0d1d6c..27b0d903f91f 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -77,6 +77,7 @@
#include <scsi/scsi_host.h>
#include <linux/libata.h>
#include <linux/dmi.h>
+#include <trace/events/libata.h>
#define DRV_NAME "ata_piix"
#define DRV_VERSION "2.13"
@@ -816,10 +817,15 @@ static int piix_sidpr_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
static bool piix_irq_check(struct ata_port *ap)
{
+ unsigned char host_stat;
+
if (unlikely(!ap->ioaddr.bmdma_addr))
return false;
- return ap->ops->bmdma_status(ap) & ATA_DMA_INTR;
+ host_stat = ap->ops->bmdma_status(ap);
+ trace_ata_bmdma_status(ap, host_stat);
+
+ return host_stat & ATA_DMA_INTR;
}
#ifdef CONFIG_PM_SLEEP
@@ -1345,7 +1351,6 @@ static void piix_init_pcs(struct ata_host *host,
new_pcs = pcs | map_db->port_enable;
if (new_pcs != pcs) {
- DPRINTK("updating PCS from 0x%x to 0x%x\n", pcs, new_pcs);
pci_write_config_word(pdev, ICH5_PCS, new_pcs);
msleep(150);
}
@@ -1769,14 +1774,12 @@ static int __init piix_init(void)
{
int rc;
- DPRINTK("pci_register_driver\n");
rc = pci_register_driver(&piix_pci_driver);
if (rc)
return rc;
in_module_init = 0;
- DPRINTK("done\n");
return 0;
}
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index f76b8418e6fb..0ed484e04fd6 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -1234,12 +1234,12 @@ static void ahci_port_init(struct device *dev, struct ata_port *ap,
/* clear SError */
tmp = readl(port_mmio + PORT_SCR_ERR);
- VPRINTK("PORT_SCR_ERR 0x%x\n", tmp);
+ dev_dbg(dev, "PORT_SCR_ERR 0x%x\n", tmp);
writel(tmp, port_mmio + PORT_SCR_ERR);
/* clear port IRQ */
tmp = readl(port_mmio + PORT_IRQ_STAT);
- VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
+ dev_dbg(dev, "PORT_IRQ_STAT 0x%x\n", tmp);
if (tmp)
writel(tmp, port_mmio + PORT_IRQ_STAT);
@@ -1270,10 +1270,10 @@ void ahci_init_controller(struct ata_host *host)
}
tmp = readl(mmio + HOST_CTL);
- VPRINTK("HOST_CTL 0x%x\n", tmp);
+ dev_dbg(host->dev, "HOST_CTL 0x%x\n", tmp);
writel(tmp | HOST_IRQ_EN, mmio + HOST_CTL);
tmp = readl(mmio + HOST_CTL);
- VPRINTK("HOST_CTL 0x%x\n", tmp);
+ dev_dbg(host->dev, "HOST_CTL 0x%x\n", tmp);
}
EXPORT_SYMBOL_GPL(ahci_init_controller);
@@ -1300,7 +1300,7 @@ unsigned int ahci_dev_classify(struct ata_port *ap)
tf.lbal = (tmp >> 8) & 0xff;
tf.nsect = (tmp) & 0xff;
- return ata_dev_classify(&tf);
+ return ata_port_classify(ap, &tf);
}
EXPORT_SYMBOL_GPL(ahci_dev_classify);
@@ -1415,8 +1415,6 @@ int ahci_do_softreset(struct ata_link *link, unsigned int *class,
bool fbs_disabled = false;
int rc;
- DPRINTK("ENTER\n");
-
/* prepare for SRST (AHCI-1.1 10.4.1) */
rc = ahci_kick_engine(ap);
if (rc && rc != -EOPNOTSUPP)
@@ -1476,7 +1474,6 @@ int ahci_do_softreset(struct ata_link *link, unsigned int *class,
if (fbs_disabled)
ahci_enable_fbs(ap);
- DPRINTK("EXIT, class=%u\n", *class);
return 0;
fail:
@@ -1498,8 +1495,6 @@ static int ahci_softreset(struct ata_link *link, unsigned int *class,
{
int pmp = sata_srst_pmp(link);
- DPRINTK("ENTER\n");
-
return ahci_do_softreset(link, class, pmp, deadline, ahci_check_ready);
}
EXPORT_SYMBOL_GPL(ahci_do_softreset);
@@ -1529,8 +1524,6 @@ static int ahci_pmp_retry_softreset(struct ata_link *link, unsigned int *class,
int rc;
u32 irq_sts;
- DPRINTK("ENTER\n");
-
rc = ahci_do_softreset(link, class, pmp, deadline,
ahci_bad_pmp_check_ready);
@@ -1564,8 +1557,6 @@ int ahci_do_hardreset(struct ata_link *link, unsigned int *class,
struct ata_taskfile tf;
int rc;
- DPRINTK("ENTER\n");
-
hpriv->stop_engine(ap);
/* clear D2H reception area to properly wait for D2H FIS */
@@ -1581,7 +1572,6 @@ int ahci_do_hardreset(struct ata_link *link, unsigned int *class,
if (*online)
*class = ahci_dev_classify(ap);
- DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
return rc;
}
EXPORT_SYMBOL_GPL(ahci_do_hardreset);
@@ -1620,8 +1610,6 @@ static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl)
struct ahci_sg *ahci_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ;
unsigned int si;
- VPRINTK("ENTER\n");
-
/*
* Next, the S/G list.
*/
@@ -1695,7 +1683,6 @@ static void ahci_fbs_dec_intr(struct ata_port *ap)
u32 fbs = readl(port_mmio + PORT_FBS);
int retries = 3;
- DPRINTK("ENTER\n");
BUG_ON(!pp->fbs_enabled);
/* time to wait for DEC is not specified by AHCI spec,
@@ -1924,8 +1911,6 @@ static irqreturn_t ahci_multi_irqs_intr_hard(int irq, void *dev_instance)
void __iomem *port_mmio = ahci_port_base(ap);
u32 status;
- VPRINTK("ENTER\n");
-
status = readl(port_mmio + PORT_IRQ_STAT);
writel(status, port_mmio + PORT_IRQ_STAT);
@@ -1933,8 +1918,6 @@ static irqreturn_t ahci_multi_irqs_intr_hard(int irq, void *dev_instance)
ahci_handle_port_interrupt(ap, port_mmio, status);
spin_unlock(ap->lock);
- VPRINTK("EXIT\n");
-
return IRQ_HANDLED;
}
@@ -1951,9 +1934,7 @@ u32 ahci_handle_port_intr(struct ata_host *host, u32 irq_masked)
ap = host->ports[i];
if (ap) {
ahci_port_intr(ap);
- VPRINTK("port %u\n", i);
} else {
- VPRINTK("port %u (no irq)\n", i);
if (ata_ratelimit())
dev_warn(host->dev,
"interrupt on disabled port %u\n", i);
@@ -1974,8 +1955,6 @@ static irqreturn_t ahci_single_level_irq_intr(int irq, void *dev_instance)
void __iomem *mmio;
u32 irq_stat, irq_masked;
- VPRINTK("ENTER\n");
-
hpriv = host->private_data;
mmio = hpriv->mmio;
@@ -2003,8 +1982,6 @@ static irqreturn_t ahci_single_level_irq_intr(int irq, void *dev_instance)
spin_unlock(&host->lock);
- VPRINTK("EXIT\n");
-
return IRQ_RETVAL(rc);
}
diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c
index 0910441321f7..18296443ccba 100644
--- a/drivers/ata/libahci_platform.c
+++ b/drivers/ata/libahci_platform.c
@@ -579,11 +579,8 @@ int ahci_platform_init_host(struct platform_device *pdev,
int i, irq, n_ports, rc;
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- if (irq != -EPROBE_DEFER)
- dev_err(dev, "no irq\n");
+ if (irq < 0)
return irq;
- }
if (!irq)
return -EINVAL;
@@ -642,13 +639,8 @@ int ahci_platform_init_host(struct platform_device *pdev,
if (hpriv->cap & HOST_CAP_64) {
rc = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64));
if (rc) {
- rc = dma_coerce_mask_and_coherent(dev,
- DMA_BIT_MASK(32));
- if (rc) {
- dev_err(dev, "Failed to enable 64-bit DMA.\n");
- return rc;
- }
- dev_warn(dev, "Enable 32-bit DMA instead of 64-bit.\n");
+ dev_err(dev, "Failed to enable 64-bit DMA.\n");
+ return rc;
}
}
diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
index 7a7d6642edcc..8cfa8c96bb13 100644
--- a/drivers/ata/libata-acpi.c
+++ b/drivers/ata/libata-acpi.c
@@ -402,7 +402,6 @@ EXPORT_SYMBOL_GPL(ata_acpi_stm);
*/
static int ata_dev_get_GTF(struct ata_device *dev, struct ata_acpi_gtf **gtf)
{
- struct ata_port *ap = dev->link->ap;
acpi_status status;
struct acpi_buffer output;
union acpi_object *out_obj;
@@ -418,10 +417,6 @@ static int ata_dev_get_GTF(struct ata_device *dev, struct ata_acpi_gtf **gtf)
output.length = ACPI_ALLOCATE_BUFFER;
output.pointer = NULL; /* ACPI-CA sets this; save/free it later */
- if (ata_msg_probe(ap))
- ata_dev_dbg(dev, "%s: ENTER: port#: %d\n",
- __func__, ap->port_no);
-
/* _GTF has no input parameters */
status = acpi_evaluate_object(ata_dev_acpi_handle(dev), "_GTF", NULL,
&output);
@@ -437,11 +432,9 @@ static int ata_dev_get_GTF(struct ata_device *dev, struct ata_acpi_gtf **gtf)
}
if (!output.length || !output.pointer) {
- if (ata_msg_probe(ap))
- ata_dev_dbg(dev, "%s: Run _GTF: length or ptr is NULL (0x%llx, 0x%p)\n",
- __func__,
- (unsigned long long)output.length,
- output.pointer);
+ ata_dev_dbg(dev, "Run _GTF: length or ptr is NULL (0x%llx, 0x%p)\n",
+ (unsigned long long)output.length,
+ output.pointer);
rc = -EINVAL;
goto out_free;
}
@@ -464,9 +457,8 @@ static int ata_dev_get_GTF(struct ata_device *dev, struct ata_acpi_gtf **gtf)
rc = out_obj->buffer.length / REGS_PER_GTF;
if (gtf) {
*gtf = (void *)out_obj->buffer.pointer;
- if (ata_msg_probe(ap))
- ata_dev_dbg(dev, "%s: returning gtf=%p, gtf_count=%d\n",
- __func__, *gtf, rc);
+ ata_dev_dbg(dev, "returning gtf=%p, gtf_count=%d\n",
+ *gtf, rc);
}
return rc;
@@ -650,9 +642,7 @@ static int ata_acpi_run_tf(struct ata_device *dev,
struct ata_taskfile *pptf = NULL;
struct ata_taskfile tf, ptf, rtf;
unsigned int err_mask;
- const char *level;
const char *descr;
- char msg[60];
int rc;
if ((gtf->tf[0] == 0) && (gtf->tf[1] == 0) && (gtf->tf[2] == 0)
@@ -666,6 +656,8 @@ static int ata_acpi_run_tf(struct ata_device *dev,
pptf = &ptf;
}
+ descr = ata_get_cmd_name(tf.command);
+
if (!ata_acpi_filter_tf(dev, &tf, pptf)) {
rtf = tf;
err_mask = ata_exec_internal(dev, &rtf, NULL,
@@ -673,40 +665,42 @@ static int ata_acpi_run_tf(struct ata_device *dev,
switch (err_mask) {
case 0:
- level = KERN_DEBUG;
- snprintf(msg, sizeof(msg), "succeeded");
+ ata_dev_dbg(dev,
+ "ACPI cmd %02x/%02x:%02x:%02x:%02x:%02x:%02x"
+ "(%s) succeeded\n",
+ tf.command, tf.feature, tf.nsect, tf.lbal,
+ tf.lbam, tf.lbah, tf.device, descr);
rc = 1;
break;
case AC_ERR_DEV:
- level = KERN_INFO;
- snprintf(msg, sizeof(msg),
- "rejected by device (Stat=0x%02x Err=0x%02x)",
- rtf.command, rtf.feature);
+ ata_dev_info(dev,
+ "ACPI cmd %02x/%02x:%02x:%02x:%02x:%02x:%02x"
+ "(%s) rejected by device (Stat=0x%02x Err=0x%02x)",
+ tf.command, tf.feature, tf.nsect, tf.lbal,
+ tf.lbam, tf.lbah, tf.device, descr,
+ rtf.command, rtf.feature);
rc = 0;
break;
default:
- level = KERN_ERR;
- snprintf(msg, sizeof(msg),
- "failed (Emask=0x%x Stat=0x%02x Err=0x%02x)",
- err_mask, rtf.command, rtf.feature);
+ ata_dev_err(dev,
+ "ACPI cmd %02x/%02x:%02x:%02x:%02x:%02x:%02x"
+ "(%s) failed (Emask=0x%x Stat=0x%02x Err=0x%02x)",
+ tf.command, tf.feature, tf.nsect, tf.lbal,
+ tf.lbam, tf.lbah, tf.device, descr,
+ err_mask, rtf.command, rtf.feature);
rc = -EIO;
break;
}
} else {
- level = KERN_INFO;
- snprintf(msg, sizeof(msg), "filtered out");
+ ata_dev_info(dev,
+ "ACPI cmd %02x/%02x:%02x:%02x:%02x:%02x:%02x"
+ "(%s) filtered out\n",
+ tf.command, tf.feature, tf.nsect, tf.lbal,
+ tf.lbam, tf.lbah, tf.device, descr);
rc = 0;
}
- descr = ata_get_cmd_descript(tf.command);
-
- ata_dev_printk(dev, level,
- "ACPI cmd %02x/%02x:%02x:%02x:%02x:%02x:%02x (%s) %s\n",
- tf.command, tf.feature, tf.nsect, tf.lbal,
- tf.lbam, tf.lbah, tf.device,
- (descr ? descr : "unknown"), msg);
-
return rc;
}
@@ -776,9 +770,8 @@ static int ata_acpi_push_id(struct ata_device *dev)
struct acpi_object_list input;
union acpi_object in_params[1];
- if (ata_msg_probe(ap))
- ata_dev_dbg(dev, "%s: ix = %d, port#: %d\n",
- __func__, dev->devno, ap->port_no);
+ ata_dev_dbg(dev, "%s: ix = %d, port#: %d\n",
+ __func__, dev->devno, ap->port_no);
/* Give the drive Identify data to the drive via the _SDD method */
/* _SDD: set up input parameters */
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index aba0c67d1bd6..0c854aebfe0b 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -764,9 +764,6 @@ int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
head = track % dev->heads;
sect = (u32)block % dev->sectors + 1;
- DPRINTK("block %u track %u cyl %u head %u sect %u\n",
- (u32)block, track, cyl, head, sect);
-
/* Check whether the converted CHS can fit.
Cylinder: 0-65535
Head: 0-15
@@ -1010,32 +1007,21 @@ unsigned int ata_dev_classify(const struct ata_taskfile *tf)
* SEMB signature. This is worked around in
* ata_dev_read_id().
*/
- if ((tf->lbam == 0) && (tf->lbah == 0)) {
- DPRINTK("found ATA device by sig\n");
+ if (tf->lbam == 0 && tf->lbah == 0)
return ATA_DEV_ATA;
- }
- if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) {
- DPRINTK("found ATAPI device by sig\n");
+ if (tf->lbam == 0x14 && tf->lbah == 0xeb)
return ATA_DEV_ATAPI;
- }
- if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) {
- DPRINTK("found PMP device by sig\n");
+ if (tf->lbam == 0x69 && tf->lbah == 0x96)
return ATA_DEV_PMP;
- }
- if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) {
- DPRINTK("found SEMB device by sig (could be ATA device)\n");
+ if (tf->lbam == 0x3c && tf->lbah == 0xc3)
return ATA_DEV_SEMB;
- }
- if ((tf->lbam == 0xcd) && (tf->lbah == 0xab)) {
- DPRINTK("found ZAC device by sig\n");
+ if (tf->lbam == 0xcd && tf->lbah == 0xab)
return ATA_DEV_ZAC;
- }
- DPRINTK("unknown device\n");
return ATA_DEV_UNKNOWN;
}
EXPORT_SYMBOL_GPL(ata_dev_classify);
@@ -1355,6 +1341,7 @@ static int ata_hpa_resize(struct ata_device *dev)
/**
* ata_dump_id - IDENTIFY DEVICE info debugging output
+ * @dev: device from which the information is fetched
* @id: IDENTIFY DEVICE page to dump
*
* Dump selected 16-bit words from the given IDENTIFY DEVICE
@@ -1364,32 +1351,14 @@ static int ata_hpa_resize(struct ata_device *dev)
* caller.
*/
-static inline void ata_dump_id(const u16 *id)
-{
- DPRINTK("49==0x%04x "
- "53==0x%04x "
- "63==0x%04x "
- "64==0x%04x "
- "75==0x%04x \n",
- id[49],
- id[53],
- id[63],
- id[64],
- id[75]);
- DPRINTK("80==0x%04x "
- "81==0x%04x "
- "82==0x%04x "
- "83==0x%04x "
- "84==0x%04x \n",
- id[80],
- id[81],
- id[82],
- id[83],
- id[84]);
- DPRINTK("88==0x%04x "
- "93==0x%04x\n",
- id[88],
- id[93]);
+static inline void ata_dump_id(struct ata_device *dev, const u16 *id)
+{
+ ata_dev_dbg(dev,
+ "49==0x%04x 53==0x%04x 63==0x%04x 64==0x%04x 75==0x%04x\n"
+ "80==0x%04x 81==0x%04x 82==0x%04x 83==0x%04x 84==0x%04x\n"
+ "88==0x%04x 93==0x%04x\n",
+ id[49], id[53], id[63], id[64], id[75], id[80],
+ id[81], id[82], id[83], id[84], id[88], id[93]);
}
/**
@@ -1602,9 +1571,8 @@ unsigned ata_exec_internal_sg(struct ata_device *dev,
else
ata_qc_complete(qc);
- if (ata_msg_warn(ap))
- ata_dev_warn(dev, "qc timeout (cmd 0x%x)\n",
- command);
+ ata_dev_warn(dev, "qc timeout (cmd 0x%x)\n",
+ command);
}
spin_unlock_irqrestore(ap->lock, flags);
@@ -1754,7 +1722,7 @@ static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
* this function is wrapped or replaced by the driver
*/
unsigned int ata_do_dev_read_id(struct ata_device *dev,
- struct ata_taskfile *tf, u16 *id)
+ struct ata_taskfile *tf, __le16 *id)
{
return ata_exec_internal(dev, tf, NULL, DMA_FROM_DEVICE,
id, sizeof(id[0]) * ATA_ID_WORDS, 0);
@@ -1794,9 +1762,6 @@ int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
int may_fallback = 1, tried_spinup = 0;
int rc;
- if (ata_msg_ctl(ap))
- ata_dev_dbg(dev, "%s: ENTER\n", __func__);
-
retry:
ata_tf_init(dev, &tf);
@@ -1830,9 +1795,9 @@ retry:
tf.flags |= ATA_TFLAG_POLLING;
if (ap->ops->read_id)
- err_mask = ap->ops->read_id(dev, &tf, id);
+ err_mask = ap->ops->read_id(dev, &tf, (__le16 *)id);
else
- err_mask = ata_do_dev_read_id(dev, &tf, id);
+ err_mask = ata_do_dev_read_id(dev, &tf, (__le16 *)id);
if (err_mask) {
if (err_mask & AC_ERR_NODEV_HINT) {
@@ -1879,10 +1844,10 @@ retry:
}
if (dev->horkage & ATA_HORKAGE_DUMP_ID) {
- ata_dev_dbg(dev, "dumping IDENTIFY data, "
+ ata_dev_info(dev, "dumping IDENTIFY data, "
"class=%d may_fallback=%d tried_spinup=%d\n",
class, may_fallback, tried_spinup);
- print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET,
+ print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET,
16, 2, id, ATA_ID_WORDS * sizeof(*id), true);
}
@@ -1966,9 +1931,8 @@ retry:
return 0;
err_out:
- if (ata_msg_warn(ap))
- ata_dev_warn(dev, "failed to IDENTIFY (%s, err_mask=0x%x)\n",
- reason, err_mask);
+ ata_dev_warn(dev, "failed to IDENTIFY (%s, err_mask=0x%x)\n",
+ reason, err_mask);
return rc;
}
@@ -1996,7 +1960,7 @@ unsigned int ata_read_log_page(struct ata_device *dev, u8 log,
unsigned int err_mask;
bool dma = false;
- DPRINTK("read log page - log 0x%x, page 0x%x\n", log, page);
+ ata_dev_dbg(dev, "read log page - log 0x%x, page 0x%x\n", log, page);
/*
* Return error without actually issuing the command on controllers
@@ -2043,6 +2007,9 @@ static bool ata_log_supported(struct ata_device *dev, u8 log)
{
struct ata_port *ap = dev->link->ap;
+ if (dev->horkage & ATA_HORKAGE_NO_LOG_DIR)
+ return false;
+
if (ata_read_log_page(dev, ATA_LOG_DIRECTORY, 0, ap->sector_buf, 1))
return false;
return get_unaligned_le16(&ap->sector_buf[log * 2]) ? true : false;
@@ -2390,7 +2357,6 @@ static void ata_dev_config_trusted(struct ata_device *dev)
static int ata_dev_config_lba(struct ata_device *dev)
{
- struct ata_port *ap = dev->link->ap;
const u16 *id = dev->id;
const char *lba_desc;
char ncq_desc[24];
@@ -2412,7 +2378,7 @@ static int ata_dev_config_lba(struct ata_device *dev)
ret = ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
/* print device info to dmesg */
- if (ata_msg_drv(ap) && ata_dev_print_info(dev))
+ if (ata_dev_print_info(dev))
ata_dev_info(dev,
"%llu sectors, multi %u: %s %s\n",
(unsigned long long)dev->n_sectors,
@@ -2423,7 +2389,6 @@ static int ata_dev_config_lba(struct ata_device *dev)
static void ata_dev_config_chs(struct ata_device *dev)
{
- struct ata_port *ap = dev->link->ap;
const u16 *id = dev->id;
if (ata_id_current_chs_valid(id)) {
@@ -2439,7 +2404,7 @@ static void ata_dev_config_chs(struct ata_device *dev)
}
/* print device info to dmesg */
- if (ata_msg_drv(ap) && ata_dev_print_info(dev))
+ if (ata_dev_print_info(dev))
ata_dev_info(dev,
"%llu sectors, multi %u, CHS %u/%u/%u\n",
(unsigned long long)dev->n_sectors,
@@ -2483,23 +2448,21 @@ static void ata_dev_config_cpr(struct ata_device *dev)
struct ata_cpr_log *cpr_log = NULL;
u8 *desc, *buf = NULL;
- if (!ata_identify_page_supported(dev,
- ATA_LOG_CONCURRENT_POSITIONING_RANGES))
+ if (ata_id_major_version(dev->id) < 11 ||
+ !ata_log_supported(dev, ATA_LOG_CONCURRENT_POSITIONING_RANGES))
goto out;
/*
- * Read IDENTIFY DEVICE data log, page 0x47
- * (concurrent positioning ranges). We can have at most 255 32B range
- * descriptors plus a 64B header.
+ * Read the concurrent positioning ranges log (0x47). We can have at
+ * most 255 32B range descriptors plus a 64B header.
*/
buf_len = (64 + 255 * 32 + 511) & ~511;
buf = kzalloc(buf_len, GFP_KERNEL);
if (!buf)
goto out;
- err_mask = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE,
- ATA_LOG_CONCURRENT_POSITIONING_RANGES,
- buf, buf_len >> 9);
+ err_mask = ata_read_log_page(dev, ATA_LOG_CONCURRENT_POSITIONING_RANGES,
+ 0, buf, buf_len >> 9);
if (err_mask)
goto out;
@@ -2566,14 +2529,11 @@ int ata_dev_configure(struct ata_device *dev)
char modelbuf[ATA_ID_PROD_LEN+1];
int rc;
- if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
- ata_dev_info(dev, "%s: ENTER/EXIT -- nodev\n", __func__);
+ if (!ata_dev_enabled(dev)) {
+ ata_dev_dbg(dev, "no device\n");
return 0;
}
- if (ata_msg_probe(ap))
- ata_dev_dbg(dev, "%s: ENTER\n", __func__);
-
/* set horkage */
dev->horkage |= ata_dev_blacklisted(dev);
ata_force_horkage(dev);
@@ -2621,13 +2581,12 @@ int ata_dev_configure(struct ata_device *dev)
return rc;
/* print device capabilities */
- if (ata_msg_probe(ap))
- ata_dev_dbg(dev,
- "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
- "85:%04x 86:%04x 87:%04x 88:%04x\n",
- __func__,
- id[49], id[82], id[83], id[84],
- id[85], id[86], id[87], id[88]);
+ ata_dev_dbg(dev,
+ "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
+ "85:%04x 86:%04x 87:%04x 88:%04x\n",
+ __func__,
+ id[49], id[82], id[83], id[84],
+ id[85], id[86], id[87], id[88]);
/* initialize to-be-configured parameters */
dev->flags &= ~ATA_DFLAG_CFG_MASK;
@@ -2646,8 +2605,7 @@ int ata_dev_configure(struct ata_device *dev)
/* find max transfer mode; for printk only */
xfer_mask = ata_id_xfermask(id);
- if (ata_msg_probe(ap))
- ata_dump_id(id);
+ ata_dump_id(dev, id);
/* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
@@ -2685,7 +2643,7 @@ int ata_dev_configure(struct ata_device *dev)
}
/* print device info to dmesg */
- if (ata_msg_drv(ap) && print_info)
+ if (print_info)
ata_dev_info(dev, "%s: %s, %s, max %s\n",
revbuf, modelbuf, fwrevbuf,
ata_mode_string(xfer_mask));
@@ -2705,7 +2663,7 @@ int ata_dev_configure(struct ata_device *dev)
ata_dev_config_cpr(dev);
dev->cdb_len = 32;
- if (ata_msg_drv(ap) && print_info)
+ if (print_info)
ata_dev_print_features(dev);
}
@@ -2718,8 +2676,7 @@ int ata_dev_configure(struct ata_device *dev)
rc = atapi_cdb_len(id);
if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
- if (ata_msg_warn(ap))
- ata_dev_warn(dev, "unsupported CDB len\n");
+ ata_dev_warn(dev, "unsupported CDB len %d\n", rc);
rc = -EINVAL;
goto err_out_nosup;
}
@@ -2763,7 +2720,7 @@ int ata_dev_configure(struct ata_device *dev)
}
/* print device info to dmesg */
- if (ata_msg_drv(ap) && print_info)
+ if (print_info)
ata_dev_info(dev,
"ATAPI: %s, %s, max %s%s%s%s\n",
modelbuf, fwrevbuf,
@@ -2780,7 +2737,7 @@ int ata_dev_configure(struct ata_device *dev)
/* Limit PATA drive on SATA cable bridge transfers to udma5,
200 sectors */
if (ata_dev_knobble(dev)) {
- if (ata_msg_drv(ap) && print_info)
+ if (print_info)
ata_dev_info(dev, "applying bridge limits\n");
dev->udma_mask &= ATA_UDMA5;
dev->max_sectors = ATA_MAX_SECTORS;
@@ -2829,8 +2786,6 @@ int ata_dev_configure(struct ata_device *dev)
return 0;
err_out_nosup:
- if (ata_msg_probe(ap))
- ata_dev_dbg(dev, "%s: EXIT, err\n", __func__);
return rc;
}
@@ -3373,8 +3328,8 @@ static int ata_dev_set_mode(struct ata_device *dev)
dev_err_whine = " (device error ignored)";
}
- DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
- dev->xfer_shift, (int)dev->xfer_mode);
+ ata_dev_dbg(dev, "xfer_shift=%u, xfer_mode=0x%x\n",
+ dev->xfer_shift, (int)dev->xfer_mode);
if (!(ehc->i.flags & ATA_EHI_QUIET) ||
ehc->i.flags & ATA_EHI_DID_HARDRESET)
@@ -3688,16 +3643,12 @@ void ata_std_postreset(struct ata_link *link, unsigned int *classes)
{
u32 serror;
- DPRINTK("ENTER\n");
-
/* reset complete, clear SError */
if (!sata_scr_read(link, SCR_ERROR, &serror))
sata_scr_write(link, SCR_ERROR, serror);
/* print link status */
sata_print_link_status(link);
-
- DPRINTK("EXIT\n");
}
EXPORT_SYMBOL_GPL(ata_std_postreset);
@@ -4078,6 +4029,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
/* devices that don't properly handle TRIM commands */
{ "SuperSSpeed S238*", NULL, ATA_HORKAGE_NOTRIM, },
+ { "M88V29*", NULL, ATA_HORKAGE_NOTRIM, },
/*
* As defined, the DRAT (Deterministic Read After Trim) and RZAT
@@ -4123,6 +4075,13 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
{ "WDC WD3000JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
{ "WDC WD3200JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM },
+ /*
+ * This sata dom device goes on a walkabout when the ATA_LOG_DIRECTORY
+ * log page is accessed. Ensure we never ask for this log page with
+ * these devices.
+ */
+ { "SATADOM-ML 3ME", NULL, ATA_HORKAGE_NO_LOG_DIR },
+
/* End Marker */
{ }
};
@@ -4324,7 +4283,7 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
unsigned int err_mask;
/* set up set-features taskfile */
- DPRINTK("set features - xfer mode\n");
+ ata_dev_dbg(dev, "set features - xfer mode\n");
/* Some controllers and ATAPI devices show flaky interrupt
* behavior after setting xfer mode. Use polling instead.
@@ -4346,7 +4305,6 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
/* On some disks, this command causes spin-up, so we need longer timeout */
err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 15000);
- DPRINTK("EXIT, err_mask=%x\n", err_mask);
return err_mask;
}
@@ -4372,7 +4330,7 @@ unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable, u8 feature)
unsigned long timeout = 0;
/* set up set-features taskfile */
- DPRINTK("set features - SATA features\n");
+ ata_dev_dbg(dev, "set features - SATA features\n");
ata_tf_init(dev, &tf);
tf.command = ATA_CMD_SET_FEATURES;
@@ -4386,7 +4344,6 @@ unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable, u8 feature)
ata_probe_timeout * 1000 : SETFEATURES_SPINUP_TIMEOUT;
err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, timeout);
- DPRINTK("EXIT, err_mask=%x\n", err_mask);
return err_mask;
}
EXPORT_SYMBOL_GPL(ata_dev_set_feature);
@@ -4414,7 +4371,7 @@ static unsigned int ata_dev_init_params(struct ata_device *dev,
return AC_ERR_INVALID;
/* set up init dev params taskfile */
- DPRINTK("init dev params \n");
+ ata_dev_dbg(dev, "init dev params \n");
ata_tf_init(dev, &tf);
tf.command = ATA_CMD_INIT_DEV_PARAMS;
@@ -4430,7 +4387,6 @@ static unsigned int ata_dev_init_params(struct ata_device *dev,
if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
err_mask = 0;
- DPRINTK("EXIT, err_mask=%x\n", err_mask);
return err_mask;
}
@@ -4542,8 +4498,6 @@ static void ata_sg_clean(struct ata_queued_cmd *qc)
WARN_ON_ONCE(sg == NULL);
- VPRINTK("unmapping %u sg elements\n", qc->n_elem);
-
if (qc->n_elem)
dma_unmap_sg(ap->dev, sg, qc->orig_n_elem, dir);
@@ -4569,13 +4523,10 @@ static int ata_sg_setup(struct ata_queued_cmd *qc)
struct ata_port *ap = qc->ap;
unsigned int n_elem;
- VPRINTK("ENTER, ata%u\n", ap->print_id);
-
n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir);
if (n_elem < 1)
return -1;
- VPRINTK("%d sg elements mapped\n", n_elem);
qc->orig_n_elem = qc->n_elem;
qc->n_elem = n_elem;
qc->flags |= ATA_QCFLAG_DMAMAP;
@@ -4930,6 +4881,7 @@ void ata_qc_issue(struct ata_queued_cmd *qc)
return;
}
+ trace_ata_qc_prep(qc);
qc->err_mask |= ap->ops->qc_prep(qc);
if (unlikely(qc->err_mask))
goto err;
@@ -5375,8 +5327,6 @@ struct ata_port *ata_port_alloc(struct ata_host *host)
{
struct ata_port *ap;
- DPRINTK("ENTER\n");
-
ap = kzalloc(sizeof(*ap), GFP_KERNEL);
if (!ap)
return NULL;
@@ -5388,15 +5338,6 @@ struct ata_port *ata_port_alloc(struct ata_host *host)
ap->host = host;
ap->dev = host->dev;
-#if defined(ATA_VERBOSE_DEBUG)
- /* turn on all debugging levels */
- ap->msg_enable = 0x00FF;
-#elif defined(ATA_DEBUG)
- ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
-#else
- ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
-#endif
-
mutex_init(&ap->scsi_scan_mutex);
INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
@@ -5493,8 +5434,6 @@ struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
int i;
void *dr;
- DPRINTK("ENTER\n");
-
/* alloc a container for our list of ATA ports (buses) */
sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
host = kzalloc(sz, GFP_KERNEL);
@@ -5784,9 +5723,7 @@ int ata_port_probe(struct ata_port *ap)
__ata_port_probe(ap);
ata_port_wait_eh(ap);
} else {
- DPRINTK("ata%u: bus probe begin\n", ap->print_id);
rc = ata_bus_probe(ap);
- DPRINTK("ata%u: bus probe end\n", ap->print_id);
}
return rc;
}
@@ -6553,69 +6490,14 @@ const struct ata_port_info ata_dummy_port_info = {
};
EXPORT_SYMBOL_GPL(ata_dummy_port_info);
-/*
- * Utility print functions
- */
-void ata_port_printk(const struct ata_port *ap, const char *level,
- const char *fmt, ...)
-{
- struct va_format vaf;
- va_list args;
-
- va_start(args, fmt);
-
- vaf.fmt = fmt;
- vaf.va = &args;
-
- printk("%sata%u: %pV", level, ap->print_id, &vaf);
-
- va_end(args);
-}
-EXPORT_SYMBOL(ata_port_printk);
-
-void ata_link_printk(const struct ata_link *link, const char *level,
- const char *fmt, ...)
-{
- struct va_format vaf;
- va_list args;
-
- va_start(args, fmt);
-
- vaf.fmt = fmt;
- vaf.va = &args;
-
- if (sata_pmp_attached(link->ap) || link->ap->slave_link)
- printk("%sata%u.%02u: %pV",
- level, link->ap->print_id, link->pmp, &vaf);
- else
- printk("%sata%u: %pV",
- level, link->ap->print_id, &vaf);
-
- va_end(args);
-}
-EXPORT_SYMBOL(ata_link_printk);
-
-void ata_dev_printk(const struct ata_device *dev, const char *level,
- const char *fmt, ...)
-{
- struct va_format vaf;
- va_list args;
-
- va_start(args, fmt);
-
- vaf.fmt = fmt;
- vaf.va = &args;
-
- printk("%sata%u.%02u: %pV",
- level, dev->link->ap->print_id, dev->link->pmp + dev->devno,
- &vaf);
-
- va_end(args);
-}
-EXPORT_SYMBOL(ata_dev_printk);
-
void ata_print_version(const struct device *dev, const char *version)
{
dev_printk(KERN_DEBUG, dev, "version %s\n", version);
}
EXPORT_SYMBOL(ata_print_version);
+
+EXPORT_TRACEPOINT_SYMBOL_GPL(ata_tf_load);
+EXPORT_TRACEPOINT_SYMBOL_GPL(ata_exec_command);
+EXPORT_TRACEPOINT_SYMBOL_GPL(ata_bmdma_setup);
+EXPORT_TRACEPOINT_SYMBOL_GPL(ata_bmdma_start);
+EXPORT_TRACEPOINT_SYMBOL_GPL(ata_bmdma_status);
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 1d4a6f1e88cd..7951fd946bf9 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -533,8 +533,6 @@ void ata_scsi_error(struct Scsi_Host *host)
unsigned long flags;
LIST_HEAD(eh_work_q);
- DPRINTK("ENTER\n");
-
spin_lock_irqsave(host->host_lock, flags);
list_splice_init(&host->eh_cmd_q, &eh_work_q);
spin_unlock_irqrestore(host->host_lock, flags);
@@ -548,7 +546,6 @@ void ata_scsi_error(struct Scsi_Host *host)
/* finish or retry handled scmd's and clean up */
WARN_ON(!list_empty(&eh_work_q));
- DPRINTK("EXIT\n");
}
/**
@@ -940,7 +937,7 @@ void ata_std_sched_eh(struct ata_port *ap)
ata_eh_set_pending(ap, 1);
scsi_schedule_eh(ap->scsi_host);
- DPRINTK("port EH scheduled\n");
+ trace_ata_std_sched_eh(ap);
}
EXPORT_SYMBOL_GPL(ata_std_sched_eh);
@@ -1070,7 +1067,7 @@ static void __ata_port_freeze(struct ata_port *ap)
ap->pflags |= ATA_PFLAG_FROZEN;
- DPRINTK("ata%u port frozen\n", ap->print_id);
+ trace_ata_port_freeze(ap);
}
/**
@@ -1147,7 +1144,7 @@ void ata_eh_thaw_port(struct ata_port *ap)
spin_unlock_irqrestore(ap->lock, flags);
- DPRINTK("ata%u port thawed\n", ap->print_id);
+ trace_ata_port_thaw(ap);
}
static void ata_eh_scsidone(struct scsi_cmnd *scmd)
@@ -1217,8 +1214,7 @@ void ata_dev_disable(struct ata_device *dev)
if (!ata_dev_enabled(dev))
return;
- if (ata_msg_drv(dev->link->ap))
- ata_dev_warn(dev, "disabled\n");
+ ata_dev_warn(dev, "disable device\n");
ata_acpi_on_disable(dev);
ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 | ATA_DNXFER_QUIET);
dev->class++;
@@ -1287,6 +1283,8 @@ void ata_eh_about_to_do(struct ata_link *link, struct ata_device *dev,
struct ata_eh_context *ehc = &link->eh_context;
unsigned long flags;
+ trace_ata_eh_about_to_do(link, dev ? dev->devno : 0, action);
+
spin_lock_irqsave(ap->lock, flags);
ata_eh_clear_action(link, dev, ehi, action);
@@ -1317,6 +1315,8 @@ void ata_eh_done(struct ata_link *link, struct ata_device *dev,
{
struct ata_eh_context *ehc = &link->eh_context;
+ trace_ata_eh_done(link, dev ? dev->devno : 0, action);
+
ata_eh_clear_action(link, dev, &ehc->i, action);
}
@@ -1421,8 +1421,6 @@ static void ata_eh_request_sense(struct ata_queued_cmd *qc,
return;
}
- DPRINTK("ATA request sense\n");
-
ata_tf_init(dev, &tf);
tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
tf.flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
@@ -1463,8 +1461,6 @@ unsigned int atapi_eh_request_sense(struct ata_device *dev,
struct ata_port *ap = dev->link->ap;
struct ata_taskfile tf;
- DPRINTK("ATAPI request sense\n");
-
memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
/* initialize sense_buf with the error register,
@@ -1928,8 +1924,6 @@ static void ata_eh_link_autopsy(struct ata_link *link)
u32 serror;
int rc;
- DPRINTK("ENTER\n");
-
if (ehc->i.flags & ATA_EHI_NO_AUTOPSY)
return;
@@ -2036,7 +2030,6 @@ static void ata_eh_link_autopsy(struct ata_link *link)
ehc->i.action |= ata_eh_speed_down(dev, eflags, all_err_mask);
trace_ata_eh_link_autopsy(dev, ehc->i.action, all_err_mask);
}
- DPRINTK("EXIT\n");
}
/**
@@ -2086,16 +2079,15 @@ void ata_eh_autopsy(struct ata_port *ap)
}
/**
- * ata_get_cmd_descript - get description for ATA command
- * @command: ATA command code to get description for
+ * ata_get_cmd_name - get name for ATA command
+ * @command: ATA command code to get name for
*
- * Return a textual description of the given command, or NULL if the
- * command is not known.
+ * Return a textual name of the given command or "unknown"
*
* LOCKING:
* None
*/
-const char *ata_get_cmd_descript(u8 command)
+const char *ata_get_cmd_name(u8 command)
{
#ifdef CONFIG_ATA_VERBOSE_ERROR
static const struct
@@ -2203,9 +2195,9 @@ const char *ata_get_cmd_descript(u8 command)
return cmd_descr[i].text;
#endif
- return NULL;
+ return "unknown";
}
-EXPORT_SYMBOL_GPL(ata_get_cmd_descript);
+EXPORT_SYMBOL_GPL(ata_get_cmd_name);
/**
* ata_eh_link_report - report error handling to user
@@ -2354,12 +2346,9 @@ static void ata_eh_link_report(struct ata_link *link)
}
__scsi_format_command(cdb_buf, sizeof(cdb_buf),
cdb, cdb_len);
- } else {
- const char *descr = ata_get_cmd_descript(cmd->command);
- if (descr)
- ata_dev_err(qc->dev, "failed command: %s\n",
- descr);
- }
+ } else
+ ata_dev_err(qc->dev, "failed command: %s\n",
+ ata_get_cmd_name(cmd->command));
ata_dev_err(qc->dev,
"cmd %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
@@ -2596,12 +2585,19 @@ int ata_eh_reset(struct ata_link *link, int classify,
/* mark that this EH session started with reset */
ehc->last_reset = jiffies;
- if (reset == hardreset)
+ if (reset == hardreset) {
ehc->i.flags |= ATA_EHI_DID_HARDRESET;
- else
+ trace_ata_link_hardreset_begin(link, classes, deadline);
+ } else {
ehc->i.flags |= ATA_EHI_DID_SOFTRESET;
+ trace_ata_link_softreset_begin(link, classes, deadline);
+ }
rc = ata_do_reset(link, reset, classes, deadline, true);
+ if (reset == hardreset)
+ trace_ata_link_hardreset_end(link, classes, rc);
+ else
+ trace_ata_link_softreset_end(link, classes, rc);
if (rc && rc != -EAGAIN) {
failed_link = link;
goto fail;
@@ -2615,8 +2611,11 @@ int ata_eh_reset(struct ata_link *link, int classify,
ata_link_info(slave, "hard resetting link\n");
ata_eh_about_to_do(slave, NULL, ATA_EH_RESET);
+ trace_ata_slave_hardreset_begin(slave, classes,
+ deadline);
tmp = ata_do_reset(slave, reset, classes, deadline,
false);
+ trace_ata_slave_hardreset_end(slave, classes, tmp);
switch (tmp) {
case -EAGAIN:
rc = -EAGAIN;
@@ -2644,7 +2643,9 @@ int ata_eh_reset(struct ata_link *link, int classify,
}
ata_eh_about_to_do(link, NULL, ATA_EH_RESET);
+ trace_ata_link_softreset_begin(link, classes, deadline);
rc = ata_do_reset(link, reset, classes, deadline, true);
+ trace_ata_link_softreset_end(link, classes, rc);
if (rc) {
failed_link = link;
goto fail;
@@ -2698,8 +2699,11 @@ int ata_eh_reset(struct ata_link *link, int classify,
*/
if (postreset) {
postreset(link, classes);
- if (slave)
+ trace_ata_link_postreset(link, classes, rc);
+ if (slave) {
postreset(slave, classes);
+ trace_ata_slave_postreset(slave, classes, rc);
+ }
}
/*
@@ -2921,8 +2925,6 @@ static int ata_eh_revalidate_and_attach(struct ata_link *link,
unsigned long flags;
int rc = 0;
- DPRINTK("ENTER\n");
-
/* For PATA drive side cable detection to work, IDENTIFY must
* be done backwards such that PDIAG- is released by the slave
* device before the master device is identified.
@@ -3036,7 +3038,6 @@ static int ata_eh_revalidate_and_attach(struct ata_link *link,
err:
*r_failed_dev = dev;
- DPRINTK("EXIT rc=%d\n", rc);
return rc;
}
@@ -3551,8 +3552,6 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
int rc, nr_fails;
unsigned long flags, deadline;
- DPRINTK("ENTER\n");
-
/* prep for recovery */
ata_for_each_link(link, ap, EDGE) {
struct ata_eh_context *ehc = &link->eh_context;
@@ -3760,7 +3759,6 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
if (rc && r_failed_link)
*r_failed_link = link;
- DPRINTK("EXIT, rc=%d\n", rc);
return rc;
}
diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
index ba7be3f38617..e2e9cbd405fa 100644
--- a/drivers/ata/libata-pmp.c
+++ b/drivers/ata/libata-pmp.c
@@ -652,8 +652,6 @@ static int sata_pmp_revalidate(struct ata_device *dev, unsigned int new_class)
u32 *gscr = (void *)ap->sector_buf;
int rc;
- DPRINTK("ENTER\n");
-
ata_eh_about_to_do(link, NULL, ATA_EH_REVALIDATE);
if (!ata_dev_enabled(dev)) {
@@ -686,12 +684,10 @@ static int sata_pmp_revalidate(struct ata_device *dev, unsigned int new_class)
ata_eh_done(link, NULL, ATA_EH_REVALIDATE);
- DPRINTK("EXIT, rc=0\n");
return 0;
fail:
ata_dev_err(dev, "PMP revalidation failed (errno=%d)\n", rc);
- DPRINTK("EXIT, rc=%d\n", rc);
return rc;
}
@@ -759,8 +755,6 @@ static int sata_pmp_eh_recover_pmp(struct ata_port *ap,
int detach = 0, rc = 0;
int reval_failed = 0;
- DPRINTK("ENTER\n");
-
if (dev->flags & ATA_DFLAG_DETACH) {
detach = 1;
rc = -ENODEV;
@@ -828,7 +822,6 @@ static int sata_pmp_eh_recover_pmp(struct ata_port *ap,
/* okay, PMP resurrected */
ehc->i.flags = 0;
- DPRINTK("EXIT, rc=0\n");
return 0;
fail:
@@ -838,7 +831,6 @@ static int sata_pmp_eh_recover_pmp(struct ata_port *ap,
else
ata_dev_disable(dev);
- DPRINTK("EXIT, rc=%d\n", rc);
return rc;
}
diff --git a/drivers/ata/libata-sata.c b/drivers/ata/libata-sata.c
index b9c77885b872..071158c0c44c 100644
--- a/drivers/ata/libata-sata.c
+++ b/drivers/ata/libata-sata.c
@@ -317,7 +317,7 @@ int sata_link_resume(struct ata_link *link, const unsigned long *params,
* immediately after resuming. Delay 200ms before
* debouncing.
*/
- if (!(link->flags & ATA_LFLAG_NO_DB_DELAY))
+ if (!(link->flags & ATA_LFLAG_NO_DEBOUNCE_DELAY))
ata_msleep(link->ap, 200);
/* is SControl restored correctly? */
@@ -533,8 +533,6 @@ int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
u32 scontrol;
int rc;
- DPRINTK("ENTER\n");
-
if (online)
*online = false;
@@ -610,7 +608,6 @@ int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
*online = false;
ata_link_err(link, "COMRESET failed (errno=%d)\n", rc);
}
- DPRINTK("EXIT, rc=%d\n", rc);
return rc;
}
EXPORT_SYMBOL_GPL(sata_link_hardreset);
@@ -876,7 +873,7 @@ static ssize_t ata_ncq_prio_enable_show(struct device *device,
ncq_prio_enable = dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLE;
spin_unlock_irq(ap->lock);
- return rc ? rc : snprintf(buf, 20, "%u\n", ncq_prio_enable);
+ return rc ? rc : sysfs_emit(buf, "%u\n", ncq_prio_enable);
}
static ssize_t ata_ncq_prio_enable_store(struct device *device,
@@ -972,7 +969,7 @@ ata_scsi_em_message_type_show(struct device *dev, struct device_attribute *attr,
struct Scsi_Host *shost = class_to_shost(dev);
struct ata_port *ap = ata_shost_to_port(shost);
- return snprintf(buf, 23, "%d\n", ap->em_message_type);
+ return sysfs_emit(buf, "%d\n", ap->em_message_type);
}
DEVICE_ATTR(em_message_type, S_IRUGO,
ata_scsi_em_message_type_show, NULL);
@@ -1261,8 +1258,6 @@ int ata_sas_queuecmd(struct scsi_cmnd *cmd, struct ata_port *ap)
{
int rc = 0;
- ata_scsi_dump_cdb(ap, cmd);
-
if (likely(ata_dev_enabled(ap->link.device)))
rc = __ata_scsi_queuecmd(cmd, ap->link.device);
else {
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 313e9475507b..ed8be585a98f 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -121,7 +121,7 @@ static ssize_t ata_scsi_park_show(struct device *device,
unlock:
spin_unlock_irq(ap->lock);
- return rc ? rc : snprintf(buf, 20, "%u\n", msecs);
+ return rc ? rc : sysfs_emit(buf, "%u\n", msecs);
}
static ssize_t ata_scsi_park_store(struct device *device,
@@ -668,7 +668,7 @@ static void ata_qc_set_pc_nbytes(struct ata_queued_cmd *qc)
/**
* ata_dump_status - user friendly display of error info
- * @id: id of the port in question
+ * @ap: the port in question
* @tf: ptr to filled out taskfile
*
* Decode and dump the ATA error/status registers for the user so
@@ -678,37 +678,32 @@ static void ata_qc_set_pc_nbytes(struct ata_queued_cmd *qc)
* LOCKING:
* inherited from caller
*/
-static void ata_dump_status(unsigned id, struct ata_taskfile *tf)
+static void ata_dump_status(struct ata_port *ap, struct ata_taskfile *tf)
{
u8 stat = tf->command, err = tf->feature;
- pr_warn("ata%u: status=0x%02x { ", id, stat);
if (stat & ATA_BUSY) {
- pr_cont("Busy }\n"); /* Data is not valid in this case */
+ ata_port_warn(ap, "status=0x%02x {Busy} ", stat);
} else {
- if (stat & ATA_DRDY) pr_cont("DriveReady ");
- if (stat & ATA_DF) pr_cont("DeviceFault ");
- if (stat & ATA_DSC) pr_cont("SeekComplete ");
- if (stat & ATA_DRQ) pr_cont("DataRequest ");
- if (stat & ATA_CORR) pr_cont("CorrectedError ");
- if (stat & ATA_SENSE) pr_cont("Sense ");
- if (stat & ATA_ERR) pr_cont("Error ");
- pr_cont("}\n");
-
- if (err) {
- pr_warn("ata%u: error=0x%02x { ", id, err);
- if (err & ATA_ABORTED) pr_cont("DriveStatusError ");
- if (err & ATA_ICRC) {
- if (err & ATA_ABORTED)
- pr_cont("BadCRC ");
- else pr_cont("Sector ");
- }
- if (err & ATA_UNC) pr_cont("UncorrectableError ");
- if (err & ATA_IDNF) pr_cont("SectorIdNotFound ");
- if (err & ATA_TRK0NF) pr_cont("TrackZeroNotFound ");
- if (err & ATA_AMNF) pr_cont("AddrMarkNotFound ");
- pr_cont("}\n");
- }
+ ata_port_warn(ap, "status=0x%02x { %s%s%s%s%s%s%s} ", stat,
+ stat & ATA_DRDY ? "DriveReady " : "",
+ stat & ATA_DF ? "DeviceFault " : "",
+ stat & ATA_DSC ? "SeekComplete " : "",
+ stat & ATA_DRQ ? "DataRequest " : "",
+ stat & ATA_CORR ? "CorrectedError " : "",
+ stat & ATA_SENSE ? "Sense " : "",
+ stat & ATA_ERR ? "Error " : "");
+ if (err)
+ ata_port_warn(ap, "error=0x%02x {%s%s%s%s%s%s", err,
+ err & ATA_ABORTED ?
+ "DriveStatusError " : "",
+ err & ATA_ICRC ?
+ (err & ATA_ABORTED ?
+ "BadCRC " : "Sector ") : "",
+ err & ATA_UNC ? "UncorrectableError " : "",
+ err & ATA_IDNF ? "SectorIdNotFound " : "",
+ err & ATA_TRK0NF ? "TrackZeroNotFound " : "",
+ err & ATA_AMNF ? "AddrMarkNotFound " : "");
}
}
@@ -1299,8 +1294,6 @@ static void scsi_6_lba_len(const u8 *cdb, u64 *plba, u32 *plen)
u64 lba = 0;
u32 len;
- VPRINTK("six-byte command\n");
-
lba |= ((u64)(cdb[1] & 0x1f)) << 16;
lba |= ((u64)cdb[2]) << 8;
lba |= ((u64)cdb[3]);
@@ -1326,8 +1319,6 @@ static void scsi_10_lba_len(const u8 *cdb, u64 *plba, u32 *plen)
u64 lba = 0;
u32 len = 0;
- VPRINTK("ten-byte command\n");
-
lba |= ((u64)cdb[2]) << 24;
lba |= ((u64)cdb[3]) << 16;
lba |= ((u64)cdb[4]) << 8;
@@ -1355,8 +1346,6 @@ static void scsi_16_lba_len(const u8 *cdb, u64 *plba, u32 *plen)
u64 lba = 0;
u32 len = 0;
- VPRINTK("sixteen-byte command\n");
-
lba |= ((u64)cdb[2]) << 56;
lba |= ((u64)cdb[3]) << 48;
lba |= ((u64)cdb[4]) << 40;
@@ -1469,9 +1458,6 @@ static unsigned int ata_scsi_verify_xlat(struct ata_queued_cmd *qc)
head = track % dev->heads;
sect = (u32)block % dev->sectors + 1;
- DPRINTK("block %u track %u cyl %u head %u sect %u\n",
- (u32)block, track, cyl, head, sect);
-
/* Check whether the converted CHS can fit.
Cylinder: 0-65535
Head: 0-15
@@ -1594,7 +1580,6 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc)
goto invalid_fld;
break;
default:
- DPRINTK("no-byte command\n");
fp = 0;
goto invalid_fld;
}
@@ -1672,7 +1657,7 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
cmd->result = SAM_STAT_GOOD;
if (need_sense && !ap->ops->error_handler)
- ata_dump_status(ap->print_id, &qc->result_tf);
+ ata_dump_status(ap, &qc->result_tf);
ata_qc_done(qc);
}
@@ -1710,8 +1695,6 @@ static int ata_scsi_translate(struct ata_device *dev, struct scsi_cmnd *cmd,
struct ata_queued_cmd *qc;
int rc;
- VPRINTK("ENTER\n");
-
qc = ata_scsi_qc_new(dev, cmd);
if (!qc)
goto err_mem;
@@ -1742,13 +1725,11 @@ static int ata_scsi_translate(struct ata_device *dev, struct scsi_cmnd *cmd,
/* select device, send command to hardware */
ata_qc_issue(qc);
- VPRINTK("EXIT\n");
return 0;
early_finish:
ata_qc_free(qc);
scsi_done(cmd);
- DPRINTK("EXIT - early finish (good or error)\n");
return 0;
err_did:
@@ -1756,12 +1737,10 @@ err_did:
cmd->result = (DID_ERROR << 16);
scsi_done(cmd);
err_mem:
- DPRINTK("EXIT - internal\n");
return 0;
defer:
ata_qc_free(qc);
- DPRINTK("EXIT - defer\n");
if (rc == ATA_DEFER_LINK)
return SCSI_MLQUEUE_DEVICE_BUSY;
else
@@ -1858,8 +1837,6 @@ static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf)
2
};
- VPRINTK("ENTER\n");
-
/* set scsi removable (RMB) bit per ata bit, or if the
* AHCI port says it's external (Hotplug-capable, eSATA).
*/
@@ -2294,8 +2271,6 @@ static unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf)
u8 dpofua, bp = 0xff;
u16 fp;
- VPRINTK("ENTER\n");
-
six_byte = (scsicmd[0] == MODE_SENSE);
ebd = !(scsicmd[1] & 0x8); /* dbd bit inverted == edb */
/*
@@ -2413,8 +2388,6 @@ static unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf)
log2_per_phys = ata_id_log2_per_physical_sector(dev->id);
lowest_aligned = ata_id_logical_sector_offset(dev->id, log2_per_phys);
- VPRINTK("ENTER\n");
-
if (args->cmd->cmnd[0] == READ_CAPACITY) {
if (last_lba >= 0xffffffffULL)
last_lba = 0xffffffff;
@@ -2481,7 +2454,6 @@ static unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf)
*/
static unsigned int ata_scsiop_report_luns(struct ata_scsi_args *args, u8 *rbuf)
{
- VPRINTK("ENTER\n");
rbuf[3] = 8; /* just one lun, LUN 0, size 8 bytes */
return 0;
@@ -2512,8 +2484,6 @@ static void atapi_request_sense(struct ata_queued_cmd *qc)
struct ata_port *ap = qc->ap;
struct scsi_cmnd *cmd = qc->scsicmd;
- DPRINTK("ATAPI request sense\n");
-
memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
#ifdef CONFIG_ATA_SFF
@@ -2552,8 +2522,6 @@ static void atapi_request_sense(struct ata_queued_cmd *qc)
qc->complete_fn = atapi_sense_complete;
ata_qc_issue(qc);
-
- DPRINTK("EXIT\n");
}
/*
@@ -2581,8 +2549,6 @@ static void atapi_qc_complete(struct ata_queued_cmd *qc)
struct scsi_cmnd *cmd = qc->scsicmd;
unsigned int err_mask = qc->err_mask;
- VPRINTK("ENTER, err_mask 0x%X\n", err_mask);
-
/* handle completion from new EH */
if (unlikely(qc->ap->ops->error_handler &&
(err_mask || qc->flags & ATA_QCFLAG_SENSE_VALID))) {
@@ -2663,7 +2629,6 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc)
qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
if (scmd->sc_data_direction == DMA_TO_DEVICE) {
qc->tf.flags |= ATA_TFLAG_WRITE;
- DPRINTK("direction: write\n");
}
qc->tf.command = ATA_CMD_PACKET;
@@ -3591,10 +3556,7 @@ static int ata_mselect_caching(struct ata_queued_cmd *qc,
*/
if (len != CACHE_MPAGE_LEN - 2) {
- if (len < CACHE_MPAGE_LEN - 2)
- *fp = len;
- else
- *fp = CACHE_MPAGE_LEN - 2;
+ *fp = min(len, CACHE_MPAGE_LEN - 2);
return -EINVAL;
}
@@ -3647,10 +3609,7 @@ static int ata_mselect_control(struct ata_queued_cmd *qc,
*/
if (len != CONTROL_MPAGE_LEN - 2) {
- if (len < CONTROL_MPAGE_LEN - 2)
- *fp = len;
- else
- *fp = CONTROL_MPAGE_LEN - 2;
+ *fp = min(len, CONTROL_MPAGE_LEN - 2);
return -EINVAL;
}
@@ -3698,8 +3657,6 @@ static unsigned int ata_scsi_mode_select_xlat(struct ata_queued_cmd *qc)
u8 buffer[64];
const u8 *p = buffer;
- VPRINTK("ENTER\n");
-
six_byte = (cdb[0] == MODE_SELECT);
if (six_byte) {
if (scmd->cmd_len < 5) {
@@ -3997,70 +3954,45 @@ static inline ata_xlat_func_t ata_get_xlat_func(struct ata_device *dev, u8 cmd)
return NULL;
}
-/**
- * ata_scsi_dump_cdb - dump SCSI command contents to dmesg
- * @ap: ATA port to which the command was being sent
- * @cmd: SCSI command to dump
- *
- * Prints the contents of a SCSI command via printk().
- */
-
-void ata_scsi_dump_cdb(struct ata_port *ap, struct scsi_cmnd *cmd)
-{
-#ifdef ATA_VERBOSE_DEBUG
- struct scsi_device *scsidev = cmd->device;
-
- VPRINTK("CDB (%u:%d,%d,%lld) %9ph\n",
- ap->print_id,
- scsidev->channel, scsidev->id, scsidev->lun,
- cmd->cmnd);
-#endif
-}
-
int __ata_scsi_queuecmd(struct scsi_cmnd *scmd, struct ata_device *dev)
{
u8 scsi_op = scmd->cmnd[0];
ata_xlat_func_t xlat_func;
- int rc = 0;
+
+ if (unlikely(!scmd->cmd_len))
+ goto bad_cdb_len;
if (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ZAC) {
- if (unlikely(!scmd->cmd_len || scmd->cmd_len > dev->cdb_len))
+ if (unlikely(scmd->cmd_len > dev->cdb_len))
goto bad_cdb_len;
xlat_func = ata_get_xlat_func(dev, scsi_op);
- } else {
- if (unlikely(!scmd->cmd_len))
- goto bad_cdb_len;
+ } else if (likely((scsi_op != ATA_16) || !atapi_passthru16)) {
+ /* relay SCSI command to ATAPI device */
+ int len = COMMAND_SIZE(scsi_op);
- xlat_func = NULL;
- if (likely((scsi_op != ATA_16) || !atapi_passthru16)) {
- /* relay SCSI command to ATAPI device */
- int len = COMMAND_SIZE(scsi_op);
- if (unlikely(len > scmd->cmd_len ||
- len > dev->cdb_len ||
- scmd->cmd_len > ATAPI_CDB_LEN))
- goto bad_cdb_len;
+ if (unlikely(len > scmd->cmd_len ||
+ len > dev->cdb_len ||
+ scmd->cmd_len > ATAPI_CDB_LEN))
+ goto bad_cdb_len;
- xlat_func = atapi_xlat;
- } else {
- /* ATA_16 passthru, treat as an ATA command */
- if (unlikely(scmd->cmd_len > 16))
- goto bad_cdb_len;
+ xlat_func = atapi_xlat;
+ } else {
+ /* ATA_16 passthru, treat as an ATA command */
+ if (unlikely(scmd->cmd_len > 16))
+ goto bad_cdb_len;
- xlat_func = ata_get_xlat_func(dev, scsi_op);
- }
+ xlat_func = ata_get_xlat_func(dev, scsi_op);
}
if (xlat_func)
- rc = ata_scsi_translate(dev, scmd, xlat_func);
- else
- ata_scsi_simulate(dev, scmd);
+ return ata_scsi_translate(dev, scmd, xlat_func);
- return rc;
+ ata_scsi_simulate(dev, scmd);
+
+ return 0;
bad_cdb_len:
- DPRINTK("bad CDB len=%u, scsi_op=0x%02x, max=%u\n",
- scmd->cmd_len, scsi_op, dev->cdb_len);
scmd->result = DID_ERROR << 16;
scsi_done(scmd);
return 0;
@@ -4097,8 +4029,6 @@ int ata_scsi_queuecmd(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
spin_lock_irqsave(ap->lock, irq_flags);
- ata_scsi_dump_cdb(ap, cmd);
-
dev = ata_scsi_find_dev(ap, scsidev);
if (likely(dev))
rc = __ata_scsi_queuecmd(cmd, dev);
@@ -4531,12 +4461,9 @@ void ata_scsi_hotplug(struct work_struct *work)
container_of(work, struct ata_port, hotplug_task.work);
int i;
- if (ap->pflags & ATA_PFLAG_UNLOADING) {
- DPRINTK("ENTER/EXIT - unloading\n");
+ if (ap->pflags & ATA_PFLAG_UNLOADING)
return;
- }
- DPRINTK("ENTER\n");
mutex_lock(&ap->scsi_scan_mutex);
/* Unplug detached devices. We cannot use link iterator here
@@ -4552,7 +4479,6 @@ void ata_scsi_hotplug(struct work_struct *work)
ata_scsi_scan_host(ap, 0);
mutex_unlock(&ap->scsi_scan_mutex);
- DPRINTK("EXIT\n");
}
/**
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index b71ea4a680b0..75217828dfe3 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -18,7 +18,7 @@
#include <linux/module.h>
#include <linux/libata.h>
#include <linux/highmem.h>
-
+#include <trace/events/libata.h>
#include "libata.h"
static struct workqueue_struct *ata_sff_wq;
@@ -330,10 +330,6 @@ EXPORT_SYMBOL_GPL(ata_sff_dev_select);
static void ata_dev_select(struct ata_port *ap, unsigned int device,
unsigned int wait, unsigned int can_sleep)
{
- if (ata_msg_probe(ap))
- ata_port_info(ap, "ata_dev_select: ENTER, device %u, wait %u\n",
- device, wait);
-
if (wait)
ata_wait_idle(ap);
@@ -409,12 +405,6 @@ void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
iowrite8(tf->hob_lbal, ioaddr->lbal_addr);
iowrite8(tf->hob_lbam, ioaddr->lbam_addr);
iowrite8(tf->hob_lbah, ioaddr->lbah_addr);
- VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
- tf->hob_feature,
- tf->hob_nsect,
- tf->hob_lbal,
- tf->hob_lbam,
- tf->hob_lbah);
}
if (is_addr) {
@@ -423,18 +413,10 @@ void ata_sff_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
iowrite8(tf->lbal, ioaddr->lbal_addr);
iowrite8(tf->lbam, ioaddr->lbam_addr);
iowrite8(tf->lbah, ioaddr->lbah_addr);
- VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
- tf->feature,
- tf->nsect,
- tf->lbal,
- tf->lbam,
- tf->lbah);
}
- if (tf->flags & ATA_TFLAG_DEVICE) {
+ if (tf->flags & ATA_TFLAG_DEVICE)
iowrite8(tf->device, ioaddr->device_addr);
- VPRINTK("device 0x%X\n", tf->device);
- }
ata_wait_idle(ap);
}
@@ -494,8 +476,6 @@ EXPORT_SYMBOL_GPL(ata_sff_tf_read);
*/
void ata_sff_exec_command(struct ata_port *ap, const struct ata_taskfile *tf)
{
- DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command);
-
iowrite8(tf->command, ap->ioaddr.command_addr);
ata_sff_pause(ap);
}
@@ -505,6 +485,7 @@ EXPORT_SYMBOL_GPL(ata_sff_exec_command);
* ata_tf_to_host - issue ATA taskfile to host controller
* @ap: port to which command is being issued
* @tf: ATA taskfile register set
+ * @tag: tag of the associated command
*
* Issues ATA taskfile register set to ATA host controller,
* with proper synchronization with interrupt handler and
@@ -514,9 +495,12 @@ EXPORT_SYMBOL_GPL(ata_sff_exec_command);
* spin_lock_irqsave(host lock)
*/
static inline void ata_tf_to_host(struct ata_port *ap,
- const struct ata_taskfile *tf)
+ const struct ata_taskfile *tf,
+ unsigned int tag)
{
+ trace_ata_tf_load(ap, tf);
ap->ops->sff_tf_load(ap, tf);
+ trace_ata_exec_command(ap, tf, tag);
ap->ops->sff_exec_command(ap, tf);
}
@@ -680,7 +664,7 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
page = nth_page(page, (offset >> PAGE_SHIFT));
offset %= PAGE_SIZE;
- DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
+ trace_ata_sff_pio_transfer_data(qc, offset, qc->sect_size);
/*
* Split the transfer when it splits a page boundary. Note that the
@@ -750,7 +734,7 @@ static void ata_pio_sectors(struct ata_queued_cmd *qc)
static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
{
/* send SCSI cdb */
- DPRINTK("send cdb\n");
+ trace_atapi_send_cdb(qc, 0, qc->dev->cdb_len);
WARN_ON_ONCE(qc->dev->cdb_len < 12);
ap->ops->sff_data_xfer(qc, qc->cdb, qc->dev->cdb_len, 1);
@@ -768,6 +752,7 @@ static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
case ATAPI_PROT_DMA:
ap->hsm_task_state = HSM_ST_LAST;
/* initiate bmdma */
+ trace_ata_bmdma_start(ap, &qc->tf, qc->tag);
ap->ops->bmdma_start(qc);
break;
#endif /* CONFIG_ATA_BMDMA */
@@ -820,7 +805,7 @@ next_sg:
/* don't cross page boundaries */
count = min(count, (unsigned int)PAGE_SIZE - offset);
- DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
+ trace_atapi_pio_transfer_data(qc, offset, count);
/* do the actual data transfer */
buf = kmap_atomic(page);
@@ -888,8 +873,6 @@ static void atapi_pio_bytes(struct ata_queued_cmd *qc)
if (unlikely(!bytes))
goto atapi_check;
- VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
-
if (unlikely(__atapi_pio_bytes(qc, bytes)))
goto err_out;
ata_sff_sync(ap); /* flush */
@@ -1002,8 +985,7 @@ int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
WARN_ON_ONCE(in_wq != ata_hsm_ok_in_wq(ap, qc));
fsm_start:
- DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
- ap->print_id, qc->tf.protocol, ap->hsm_task_state, status);
+ trace_ata_sff_hsm_state(qc, status);
switch (ap->hsm_task_state) {
case HSM_ST_FIRST:
@@ -1204,8 +1186,7 @@ fsm_start:
}
/* no more data to transfer */
- DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
- ap->print_id, qc->dev->devno, status);
+ trace_ata_sff_hsm_command_complete(qc, status);
WARN_ON_ONCE(qc->err_mask & (AC_ERR_DEV | AC_ERR_HSM));
@@ -1262,7 +1243,7 @@ EXPORT_SYMBOL_GPL(ata_sff_queue_pio_task);
void ata_sff_flush_pio_task(struct ata_port *ap)
{
- DPRINTK("ENTER\n");
+ trace_ata_sff_flush_pio_task(ap);
cancel_delayed_work_sync(&ap->sff_pio_task);
@@ -1279,9 +1260,6 @@ void ata_sff_flush_pio_task(struct ata_port *ap)
spin_unlock_irq(ap->lock);
ap->sff_pio_task_link = NULL;
-
- if (ata_msg_ctl(ap))
- ata_port_dbg(ap, "%s: EXIT\n", __func__);
}
static void ata_sff_pio_task(struct work_struct *work)
@@ -1376,7 +1354,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
if (qc->tf.flags & ATA_TFLAG_POLLING)
ata_qc_set_polling(qc);
- ata_tf_to_host(ap, &qc->tf);
+ ata_tf_to_host(ap, &qc->tf, qc->tag);
ap->hsm_task_state = HSM_ST_LAST;
if (qc->tf.flags & ATA_TFLAG_POLLING)
@@ -1388,7 +1366,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
if (qc->tf.flags & ATA_TFLAG_POLLING)
ata_qc_set_polling(qc);
- ata_tf_to_host(ap, &qc->tf);
+ ata_tf_to_host(ap, &qc->tf, qc->tag);
if (qc->tf.flags & ATA_TFLAG_WRITE) {
/* PIO data out protocol */
@@ -1418,7 +1396,7 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
if (qc->tf.flags & ATA_TFLAG_POLLING)
ata_qc_set_polling(qc);
- ata_tf_to_host(ap, &qc->tf);
+ ata_tf_to_host(ap, &qc->tf, qc->tag);
ap->hsm_task_state = HSM_ST_FIRST;
@@ -1478,8 +1456,7 @@ static unsigned int __ata_sff_port_intr(struct ata_port *ap,
{
u8 status;
- VPRINTK("ata%u: protocol %d task_state %d\n",
- ap->print_id, qc->tf.protocol, ap->hsm_task_state);
+ trace_ata_sff_port_intr(qc, hsmv_on_idle);
/* Check whether we are expecting interrupt in this state */
switch (ap->hsm_task_state) {
@@ -1853,7 +1830,7 @@ unsigned int ata_sff_dev_classify(struct ata_device *dev, int present,
return ATA_DEV_NONE;
/* determine if device is ATA or ATAPI */
- class = ata_dev_classify(&tf);
+ class = ata_port_classify(ap, &tf);
if (class == ATA_DEV_UNKNOWN) {
/* If the device failed diagnostic, it's likely to
@@ -1956,8 +1933,6 @@ static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
{
struct ata_ioports *ioaddr = &ap->ioaddr;
- DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
-
if (ap->ioaddr.ctl_addr) {
/* software reset. causes dev0 to be selected */
iowrite8(ap->ctl, ioaddr->ctl_addr);
@@ -1995,8 +1970,6 @@ int ata_sff_softreset(struct ata_link *link, unsigned int *classes,
int rc;
u8 err;
- DPRINTK("ENTER\n");
-
/* determine if device 0/1 are present */
if (ata_devchk(ap, 0))
devmask |= (1 << 0);
@@ -2007,7 +1980,6 @@ int ata_sff_softreset(struct ata_link *link, unsigned int *classes,
ap->ops->sff_dev_select(ap, 0);
/* issue bus reset */
- DPRINTK("about to softreset, devmask=%x\n", devmask);
rc = ata_bus_softreset(ap, devmask, deadline);
/* if link is occupied, -ENODEV too is an error */
if (rc && (rc != -ENODEV || sata_scr_valid(link))) {
@@ -2022,7 +1994,6 @@ int ata_sff_softreset(struct ata_link *link, unsigned int *classes,
classes[1] = ata_sff_dev_classify(&link->device[1],
devmask & (1 << 1), &err);
- DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
return 0;
}
EXPORT_SYMBOL_GPL(ata_sff_softreset);
@@ -2055,7 +2026,6 @@ int sata_sff_hardreset(struct ata_link *link, unsigned int *class,
if (online)
*class = ata_sff_dev_classify(link->device, 1, NULL);
- DPRINTK("EXIT, class=%u\n", *class);
return rc;
}
EXPORT_SYMBOL_GPL(sata_sff_hardreset);
@@ -2085,10 +2055,8 @@ void ata_sff_postreset(struct ata_link *link, unsigned int *classes)
ap->ops->sff_dev_select(ap, 0);
/* bail out if no device is present */
- if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
- DPRINTK("EXIT, no device\n");
+ if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE)
return;
- }
/* set up device control */
if (ap->ops->sff_set_devctl || ap->ioaddr.ctl_addr) {
@@ -2123,7 +2091,6 @@ void ata_sff_drain_fifo(struct ata_queued_cmd *qc)
&& count < 65536; count += 2)
ioread16(ap->ioaddr.data_addr);
- /* Can become DEBUG later */
if (count)
ata_port_dbg(ap, "drained %d bytes to clear DRQ\n", count);
@@ -2467,8 +2434,6 @@ static int ata_pci_init_one(struct pci_dev *pdev,
struct ata_host *host = NULL;
int rc;
- DPRINTK("ENTER\n");
-
pi = ata_sff_find_valid_pi(ppi);
if (!pi) {
dev_err(&pdev->dev, "no valid port_info specified\n");
@@ -2614,7 +2579,6 @@ static void ata_bmdma_fill_sg(struct ata_queued_cmd *qc)
prd[pi].addr = cpu_to_le32(addr);
prd[pi].flags_len = cpu_to_le32(len & 0xffff);
- VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
pi++;
sg_len -= len;
@@ -2674,7 +2638,6 @@ static void ata_bmdma_fill_sg_dumb(struct ata_queued_cmd *qc)
prd[++pi].addr = cpu_to_le32(addr + 0x8000);
}
prd[pi].flags_len = cpu_to_le32(blen);
- VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
pi++;
sg_len -= len;
@@ -2756,8 +2719,11 @@ unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc)
case ATA_PROT_DMA:
WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING);
+ trace_ata_tf_load(ap, &qc->tf);
ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */
+ trace_ata_bmdma_setup(ap, &qc->tf, qc->tag);
ap->ops->bmdma_setup(qc); /* set up bmdma */
+ trace_ata_bmdma_start(ap, &qc->tf, qc->tag);
ap->ops->bmdma_start(qc); /* initiate bmdma */
ap->hsm_task_state = HSM_ST_LAST;
break;
@@ -2765,7 +2731,9 @@ unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc)
case ATAPI_PROT_DMA:
WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING);
+ trace_ata_tf_load(ap, &qc->tf);
ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */
+ trace_ata_bmdma_setup(ap, &qc->tf, qc->tag);
ap->ops->bmdma_setup(qc); /* set up bmdma */
ap->hsm_task_state = HSM_ST_FIRST;
@@ -2806,13 +2774,14 @@ unsigned int ata_bmdma_port_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
if (ap->hsm_task_state == HSM_ST_LAST && ata_is_dma(qc->tf.protocol)) {
/* check status of DMA engine */
host_stat = ap->ops->bmdma_status(ap);
- VPRINTK("ata%u: host_stat 0x%X\n", ap->print_id, host_stat);
+ trace_ata_bmdma_status(ap, host_stat);
/* if it's not our irq... */
if (!(host_stat & ATA_DMA_INTR))
return ata_sff_idle_irq(ap);
/* before we do anything else, clear DMA-Start bit */
+ trace_ata_bmdma_stop(ap, &qc->tf, qc->tag);
ap->ops->bmdma_stop(qc);
bmdma_stopped = true;
@@ -2881,6 +2850,7 @@ void ata_bmdma_error_handler(struct ata_port *ap)
u8 host_stat;
host_stat = ap->ops->bmdma_status(ap);
+ trace_ata_bmdma_status(ap, host_stat);
/* BMDMA controllers indicate host bus error by
* setting DMA_ERR bit and timing out. As it wasn't
@@ -2892,6 +2862,7 @@ void ata_bmdma_error_handler(struct ata_port *ap)
thaw = true;
}
+ trace_ata_bmdma_stop(ap, &qc->tf, qc->tag);
ap->ops->bmdma_stop(qc);
/* if we're gonna thaw, make sure IRQ is clear */
@@ -2925,6 +2896,7 @@ void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc)
if (ata_is_dma(qc->tf.protocol)) {
spin_lock_irqsave(ap->lock, flags);
+ trace_ata_bmdma_stop(ap, &qc->tf, qc->tag);
ap->ops->bmdma_stop(qc);
spin_unlock_irqrestore(ap->lock, flags);
}
diff --git a/drivers/ata/libata-trace.c b/drivers/ata/libata-trace.c
index 08e001303a82..e0e4d0d5a100 100644
--- a/drivers/ata/libata-trace.c
+++ b/drivers/ata/libata-trace.c
@@ -39,6 +39,24 @@ libata_trace_parse_status(struct trace_seq *p, unsigned char status)
}
const char *
+libata_trace_parse_host_stat(struct trace_seq *p, unsigned char host_stat)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+
+ trace_seq_printf(p, "{ ");
+ if (host_stat & ATA_DMA_INTR)
+ trace_seq_printf(p, "INTR ");
+ if (host_stat & ATA_DMA_ERR)
+ trace_seq_printf(p, "ERR ");
+ if (host_stat & ATA_DMA_ACTIVE)
+ trace_seq_printf(p, "ACTIVE ");
+ trace_seq_putc(p, '}');
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+const char *
libata_trace_parse_eh_action(struct trace_seq *p, unsigned int eh_action)
{
const char *ret = trace_seq_buffer_ptr(p);
@@ -138,6 +156,35 @@ libata_trace_parse_qc_flags(struct trace_seq *p, unsigned int qc_flags)
}
const char *
+libata_trace_parse_tf_flags(struct trace_seq *p, unsigned int tf_flags)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+
+ trace_seq_printf(p, "%x", tf_flags);
+ if (tf_flags) {
+ trace_seq_printf(p, "{ ");
+ if (tf_flags & ATA_TFLAG_LBA48)
+ trace_seq_printf(p, "LBA48 ");
+ if (tf_flags & ATA_TFLAG_ISADDR)
+ trace_seq_printf(p, "ISADDR ");
+ if (tf_flags & ATA_TFLAG_DEVICE)
+ trace_seq_printf(p, "DEV ");
+ if (tf_flags & ATA_TFLAG_WRITE)
+ trace_seq_printf(p, "WRITE ");
+ if (tf_flags & ATA_TFLAG_LBA)
+ trace_seq_printf(p, "LBA ");
+ if (tf_flags & ATA_TFLAG_FUA)
+ trace_seq_printf(p, "FUA ");
+ if (tf_flags & ATA_TFLAG_POLLING)
+ trace_seq_printf(p, "POLL ");
+ trace_seq_putc(p, '}');
+ }
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+const char *
libata_trace_parse_subcmd(struct trace_seq *p, unsigned char cmd,
unsigned char feature, unsigned char hob_nsect)
{
diff --git a/drivers/ata/libata-transport.c b/drivers/ata/libata-transport.c
index 34bb4608bdc6..ca129854a88c 100644
--- a/drivers/ata/libata-transport.c
+++ b/drivers/ata/libata-transport.c
@@ -163,7 +163,7 @@ static struct {
{ AC_ERR_INVALID, "InvalidArg" },
{ AC_ERR_OTHER, "Unknown" },
{ AC_ERR_NODEV_HINT, "NoDeviceHint" },
- { AC_ERR_NCQ, "NCQError" }
+ { AC_ERR_NCQ, "NCQError" }
};
ata_bitfield_name_match(err, ata_err_names)
@@ -321,13 +321,43 @@ int ata_tport_add(struct device *parent,
return error;
}
+/**
+ * ata_port_classify - determine device type based on ATA-spec signature
+ * @ap: ATA port device on which the classification should be run
+ * @tf: ATA taskfile register set for device to be identified
+ *
+ * A wrapper around ata_dev_classify() to provide additional logging
+ *
+ * RETURNS:
+ * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP,
+ * %ATA_DEV_ZAC, or %ATA_DEV_UNKNOWN the event of failure.
+ */
+unsigned int ata_port_classify(struct ata_port *ap,
+ const struct ata_taskfile *tf)
+{
+ int i;
+ unsigned int class = ata_dev_classify(tf);
+
+ /* Start with index '1' to skip the 'unknown' entry */
+ for (i = 1; i < ARRAY_SIZE(ata_class_names); i++) {
+ if (ata_class_names[i].value == class) {
+ ata_port_dbg(ap, "found %s device by sig\n",
+ ata_class_names[i].name);
+ return class;
+ }
+ }
+
+ ata_port_info(ap, "found unknown device (class %u)\n", class);
+ return class;
+}
+EXPORT_SYMBOL_GPL(ata_port_classify);
/*
* ATA link attributes
*/
static int noop(int x) { return x; }
-#define ata_link_show_linkspeed(field, format) \
+#define ata_link_show_linkspeed(field, format) \
static ssize_t \
show_ata_link_##field(struct device *dev, \
struct device_attribute *attr, char *buf) \
@@ -416,7 +446,7 @@ int ata_tlink_add(struct ata_link *link)
dev->release = ata_tlink_release;
if (ata_is_host_link(link))
dev_set_name(dev, "link%d", ap->print_id);
- else
+ else
dev_set_name(dev, "link%d.%d", ap->print_id, link->pmp);
transport_setup_device(dev);
@@ -472,7 +502,7 @@ ata_dev_attr(xfer, dma_mode);
ata_dev_attr(xfer, xfer_mode);
-#define ata_dev_show_simple(field, format_string, cast) \
+#define ata_dev_show_simple(field, format_string, cast) \
static ssize_t \
show_ata_dev_##field(struct device *dev, \
struct device_attribute *attr, char *buf) \
@@ -482,9 +512,9 @@ show_ata_dev_##field(struct device *dev, \
return scnprintf(buf, 20, format_string, cast ata_dev->field); \
}
-#define ata_dev_simple_attr(field, format_string, type) \
+#define ata_dev_simple_attr(field, format_string, type) \
ata_dev_show_simple(field, format_string, (type)) \
-static DEVICE_ATTR(field, S_IRUGO, \
+ static DEVICE_ATTR(field, S_IRUGO, \
show_ata_dev_##field, NULL)
ata_dev_simple_attr(spdn_cnt, "%d\n", int);
@@ -502,7 +532,7 @@ static int ata_show_ering(struct ata_ering_entry *ent, void *void_arg)
seconds = div_u64_rem(ent->timestamp, HZ, &rem);
arg->written += sprintf(arg->buf + arg->written,
- "[%5llu.%09lu]", seconds,
+ "[%5llu.%09lu]", seconds,
rem * NSEC_PER_SEC / HZ);
arg->written += get_ata_err_names(ent->err_mask,
arg->buf + arg->written);
@@ -667,7 +697,7 @@ static int ata_tdev_add(struct ata_device *ata_dev)
dev->release = ata_tdev_release;
if (ata_is_host_link(link))
dev_set_name(dev, "dev%d.%d", ap->print_id,ata_dev->devno);
- else
+ else
dev_set_name(dev, "dev%d.%d.0", ap->print_id, link->pmp);
transport_setup_device(dev);
@@ -689,7 +719,7 @@ static int ata_tdev_add(struct ata_device *ata_dev)
*/
#define SETUP_TEMPLATE(attrb, field, perm, test) \
- i->private_##attrb[count] = dev_attr_##field; \
+ i->private_##attrb[count] = dev_attr_##field; \
i->private_##attrb[count].attr.mode = perm; \
i->attrb[count] = &i->private_##attrb[count]; \
if (test) \
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index 68cdd81d747c..51e01acdd241 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -148,7 +148,6 @@ extern int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel,
unsigned int id, u64 lun);
void ata_scsi_sdev_config(struct scsi_device *sdev);
int ata_scsi_dev_config(struct scsi_device *sdev, struct ata_device *dev);
-void ata_scsi_dump_cdb(struct ata_port *ap, struct scsi_cmnd *cmd);
int __ata_scsi_queuecmd(struct scsi_cmnd *scmd, struct ata_device *dev);
/* libata-eh.c */
@@ -166,7 +165,7 @@ extern void ata_eh_about_to_do(struct ata_link *link, struct ata_device *dev,
extern void ata_eh_done(struct ata_link *link, struct ata_device *dev,
unsigned int action);
extern void ata_eh_autopsy(struct ata_port *ap);
-const char *ata_get_cmd_descript(u8 command);
+const char *ata_get_cmd_name(u8 command);
extern void ata_eh_report(struct ata_port *ap);
extern int ata_eh_reset(struct ata_link *link, int classify,
ata_prereset_fn_t prereset, ata_reset_fn_t softreset,
@@ -179,7 +178,7 @@ extern int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
extern void ata_eh_finish(struct ata_port *ap);
extern int ata_ering_map(struct ata_ering *ering,
int (*map_fn)(struct ata_ering_entry *, void *),
- void *arg);
+ void *arg);
extern unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key);
extern unsigned int atapi_eh_request_sense(struct ata_device *dev,
u8 *sense_buf, u8 dfl_sense_key);
diff --git a/drivers/ata/pata_ali.c b/drivers/ata/pata_ali.c
index b7ff63ed3bbb..1b90cda27246 100644
--- a/drivers/ata/pata_ali.c
+++ b/drivers/ata/pata_ali.c
@@ -37,7 +37,7 @@
#define DRV_NAME "pata_ali"
#define DRV_VERSION "0.7.8"
-static int ali_atapi_dma = 0;
+static int ali_atapi_dma;
module_param_named(atapi_dma, ali_atapi_dma, int, 0644);
MODULE_PARM_DESC(atapi_dma, "Enable ATAPI DMA (0=disable, 1=enable)");
@@ -123,7 +123,7 @@ static unsigned long ali_20_filter(struct ata_device *adev, unsigned long mask)
mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
ata_id_c_string(adev->id, model_num, ATA_ID_PROD, sizeof(model_num));
if (strstr(model_num, "WDC"))
- return mask &= ~ATA_MASK_UDMA;
+ mask &= ~ATA_MASK_UDMA;
return mask;
}
diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
index 63f39440a9b4..24c3d5e1fca3 100644
--- a/drivers/ata/pata_arasan_cf.c
+++ b/drivers/ata/pata_arasan_cf.c
@@ -39,6 +39,7 @@
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/workqueue.h>
+#include <trace/events/libata.h>
#define DRIVER_NAME "arasan_cf"
#define TIMEOUT msecs_to_jiffies(3000)
@@ -703,9 +704,11 @@ static unsigned int arasan_cf_qc_issue(struct ata_queued_cmd *qc)
case ATA_PROT_DMA:
WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING);
+ trace_ata_tf_load(ap, &qc->tf);
ap->ops->sff_tf_load(ap, &qc->tf);
acdev->dma_status = 0;
acdev->qc = qc;
+ trace_ata_bmdma_start(ap, &qc->tf, qc->tag);
arasan_cf_dma_start(acdev);
ap->hsm_task_state = HSM_ST_LAST;
break;
diff --git a/drivers/ata/pata_atp867x.c b/drivers/ata/pata_atp867x.c
index 2bc5fc81efe3..779d660415c8 100644
--- a/drivers/ata/pata_atp867x.c
+++ b/drivers/ata/pata_atp867x.c
@@ -155,7 +155,7 @@ static int atp867x_get_active_clocks_shifted(struct ata_port *ap,
case 1 ... 6:
break;
default:
- printk(KERN_WARNING "ATP867X: active %dclk is invalid. "
+ ata_port_warn(ap, "ATP867X: active %dclk is invalid. "
"Using 12clk.\n", clk);
fallthrough;
case 9 ... 12:
@@ -171,7 +171,8 @@ active_clock_shift_done:
return clocks << ATP867X_IO_PIOSPD_ACTIVE_SHIFT;
}
-static int atp867x_get_recover_clocks_shifted(unsigned int clk)
+static int atp867x_get_recover_clocks_shifted(struct ata_port *ap,
+ unsigned int clk)
{
unsigned char clocks = clk;
@@ -188,7 +189,7 @@ static int atp867x_get_recover_clocks_shifted(unsigned int clk)
case 15:
break;
default:
- printk(KERN_WARNING "ATP867X: recover %dclk is invalid. "
+ ata_port_warn(ap, "ATP867X: recover %dclk is invalid. "
"Using default 12clk.\n", clk);
fallthrough;
case 12: /* default 12 clk */
@@ -225,7 +226,7 @@ static void atp867x_set_piomode(struct ata_port *ap, struct ata_device *adev)
iowrite8(b, dp->dma_mode);
b = atp867x_get_active_clocks_shifted(ap, t.active) |
- atp867x_get_recover_clocks_shifted(t.recover);
+ atp867x_get_recover_clocks_shifted(ap, t.recover);
if (adev->devno & 1)
iowrite8(b, dp->slave_piospd);
@@ -233,7 +234,7 @@ static void atp867x_set_piomode(struct ata_port *ap, struct ata_device *adev)
iowrite8(b, dp->mstr_piospd);
b = atp867x_get_active_clocks_shifted(ap, t.act8b) |
- atp867x_get_recover_clocks_shifted(t.rec8b);
+ atp867x_get_recover_clocks_shifted(ap, t.rec8b);
iowrite8(b, dp->eightb_piospd);
}
@@ -270,7 +271,6 @@ static struct ata_port_operations atp867x_ops = {
};
-#ifdef ATP867X_DEBUG
static void atp867x_check_res(struct pci_dev *pdev)
{
int i;
@@ -280,7 +280,7 @@ static void atp867x_check_res(struct pci_dev *pdev)
for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
start = pci_resource_start(pdev, i);
len = pci_resource_len(pdev, i);
- printk(KERN_DEBUG "ATP867X: resource start:len=%lx:%lx\n",
+ dev_dbg(&pdev->dev, "ATP867X: resource start:len=%lx:%lx\n",
start, len);
}
}
@@ -290,49 +290,48 @@ static void atp867x_check_ports(struct ata_port *ap, int port)
struct ata_ioports *ioaddr = &ap->ioaddr;
struct atp867x_priv *dp = ap->private_data;
- printk(KERN_DEBUG "ATP867X: port[%d] addresses\n"
- " cmd_addr =0x%llx, 0x%llx\n"
- " ctl_addr =0x%llx, 0x%llx\n"
- " bmdma_addr =0x%llx, 0x%llx\n"
- " data_addr =0x%llx\n"
- " error_addr =0x%llx\n"
- " feature_addr =0x%llx\n"
- " nsect_addr =0x%llx\n"
- " lbal_addr =0x%llx\n"
- " lbam_addr =0x%llx\n"
- " lbah_addr =0x%llx\n"
- " device_addr =0x%llx\n"
- " status_addr =0x%llx\n"
- " command_addr =0x%llx\n"
- " dp->dma_mode =0x%llx\n"
- " dp->mstr_piospd =0x%llx\n"
- " dp->slave_piospd =0x%llx\n"
- " dp->eightb_piospd =0x%llx\n"
+ ata_port_dbg(ap, "ATP867X: port[%d] addresses\n"
+ " cmd_addr =0x%lx, 0x%lx\n"
+ " ctl_addr =0x%lx, 0x%lx\n"
+ " bmdma_addr =0x%lx, 0x%lx\n"
+ " data_addr =0x%lx\n"
+ " error_addr =0x%lx\n"
+ " feature_addr =0x%lx\n"
+ " nsect_addr =0x%lx\n"
+ " lbal_addr =0x%lx\n"
+ " lbam_addr =0x%lx\n"
+ " lbah_addr =0x%lx\n"
+ " device_addr =0x%lx\n"
+ " status_addr =0x%lx\n"
+ " command_addr =0x%lx\n"
+ " dp->dma_mode =0x%lx\n"
+ " dp->mstr_piospd =0x%lx\n"
+ " dp->slave_piospd =0x%lx\n"
+ " dp->eightb_piospd =0x%lx\n"
" dp->pci66mhz =0x%lx\n",
port,
- (unsigned long long)ioaddr->cmd_addr,
- (unsigned long long)ATP867X_IO_PORTBASE(ap, port),
- (unsigned long long)ioaddr->ctl_addr,
- (unsigned long long)ATP867X_IO_ALTSTATUS(ap, port),
- (unsigned long long)ioaddr->bmdma_addr,
- (unsigned long long)ATP867X_IO_DMABASE(ap, port),
- (unsigned long long)ioaddr->data_addr,
- (unsigned long long)ioaddr->error_addr,
- (unsigned long long)ioaddr->feature_addr,
- (unsigned long long)ioaddr->nsect_addr,
- (unsigned long long)ioaddr->lbal_addr,
- (unsigned long long)ioaddr->lbam_addr,
- (unsigned long long)ioaddr->lbah_addr,
- (unsigned long long)ioaddr->device_addr,
- (unsigned long long)ioaddr->status_addr,
- (unsigned long long)ioaddr->command_addr,
- (unsigned long long)dp->dma_mode,
- (unsigned long long)dp->mstr_piospd,
- (unsigned long long)dp->slave_piospd,
- (unsigned long long)dp->eightb_piospd,
+ (unsigned long)ioaddr->cmd_addr,
+ (unsigned long)ATP867X_IO_PORTBASE(ap, port),
+ (unsigned long)ioaddr->ctl_addr,
+ (unsigned long)ATP867X_IO_ALTSTATUS(ap, port),
+ (unsigned long)ioaddr->bmdma_addr,
+ (unsigned long)ATP867X_IO_DMABASE(ap, port),
+ (unsigned long)ioaddr->data_addr,
+ (unsigned long)ioaddr->error_addr,
+ (unsigned long)ioaddr->feature_addr,
+ (unsigned long)ioaddr->nsect_addr,
+ (unsigned long)ioaddr->lbal_addr,
+ (unsigned long)ioaddr->lbam_addr,
+ (unsigned long)ioaddr->lbah_addr,
+ (unsigned long)ioaddr->device_addr,
+ (unsigned long)ioaddr->status_addr,
+ (unsigned long)ioaddr->command_addr,
+ (unsigned long)dp->dma_mode,
+ (unsigned long)dp->mstr_piospd,
+ (unsigned long)dp->slave_piospd,
+ (unsigned long)dp->eightb_piospd,
(unsigned long)dp->pci66mhz);
}
-#endif
static int atp867x_set_priv(struct ata_port *ap)
{
@@ -370,8 +369,7 @@ static void atp867x_fixup(struct ata_host *host)
if (v < 0x80) {
v = 0x80;
pci_write_config_byte(pdev, PCI_LATENCY_TIMER, v);
- printk(KERN_DEBUG "ATP867X: set latency timer of device %s"
- " to %d\n", pci_name(pdev), v);
+ dev_dbg(&pdev->dev, "ATP867X: set latency timer to %d\n", v);
}
/*
@@ -419,13 +417,11 @@ static int atp867x_ata_pci_sff_init_host(struct ata_host *host)
return rc;
host->iomap = pcim_iomap_table(pdev);
-#ifdef ATP867X_DEBUG
atp867x_check_res(pdev);
for (i = 0; i < PCI_STD_NUM_BARS; i++)
- printk(KERN_DEBUG "ATP867X: iomap[%d]=0x%llx\n", i,
- (unsigned long long)(host->iomap[i]));
-#endif
+ dev_dbg(gdev, "ATP867X: iomap[%d]=0x%p\n", i,
+ host->iomap[i]);
/*
* request, iomap BARs and init port addresses accordingly
@@ -444,9 +440,8 @@ static int atp867x_ata_pci_sff_init_host(struct ata_host *host)
if (rc)
return rc;
-#ifdef ATP867X_DEBUG
atp867x_check_ports(ap, i);
-#endif
+
ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx",
(unsigned long)ioaddr->cmd_addr,
(unsigned long)ioaddr->ctl_addr);
@@ -486,7 +481,7 @@ static int atp867x_init_one(struct pci_dev *pdev,
if (rc)
return rc;
- printk(KERN_INFO "ATP867X: ATP867 ATA UDMA133 controller (rev %02X)",
+ dev_info(&pdev->dev, "ATP867X: ATP867 ATA UDMA133 controller (rev %02X)",
pdev->device);
host = ata_host_alloc_pinfo(&pdev->dev, ppi, ATP867X_NUM_PORTS);
diff --git a/drivers/ata/pata_cmd640.c b/drivers/ata/pata_cmd640.c
index d0bcabb58b44..1a3372a72213 100644
--- a/drivers/ata/pata_cmd640.c
+++ b/drivers/ata/pata_cmd640.c
@@ -61,7 +61,7 @@ static void cmd640_set_piomode(struct ata_port *ap, struct ata_device *adev)
struct ata_device *pair = ata_dev_pair(adev);
if (ata_timing_compute(adev, adev->pio_mode, &t, T, 0) < 0) {
- printk(KERN_ERR DRV_NAME ": mode computation failed.\n");
+ ata_dev_err(adev, DRV_NAME ": mode computation failed.\n");
return;
}
diff --git a/drivers/ata/pata_cmd64x.c b/drivers/ata/pata_cmd64x.c
index 1d74d89b5bed..5baa4a7819c1 100644
--- a/drivers/ata/pata_cmd64x.c
+++ b/drivers/ata/pata_cmd64x.c
@@ -116,7 +116,7 @@ static void cmd64x_set_timing(struct ata_port *ap, struct ata_device *adev, u8 m
/* ata_timing_compute is smart and will produce timings for MWDMA
that don't violate the drives PIO capabilities. */
if (ata_timing_compute(adev, mode, &t, T, 0) < 0) {
- printk(KERN_ERR DRV_NAME ": mode computation failed.\n");
+ ata_dev_err(adev, DRV_NAME ": mode computation failed.\n");
return;
}
if (ap->port_no) {
@@ -130,7 +130,7 @@ static void cmd64x_set_timing(struct ata_port *ap, struct ata_device *adev, u8 m
}
}
- printk(KERN_DEBUG DRV_NAME ": active %d recovery %d setup %d.\n",
+ ata_dev_dbg(adev, DRV_NAME ": active %d recovery %d setup %d.\n",
t.active, t.recover, t.setup);
if (t.recover > 16) {
t.active += t.recover - 16;
diff --git a/drivers/ata/pata_cs5520.c b/drivers/ata/pata_cs5520.c
index 247c14702624..24ce8665b1f9 100644
--- a/drivers/ata/pata_cs5520.c
+++ b/drivers/ata/pata_cs5520.c
@@ -153,12 +153,12 @@ static int cs5520_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
/* Perform set up for DMA */
if (pci_enable_device_io(pdev)) {
- printk(KERN_ERR DRV_NAME ": unable to configure BAR2.\n");
+ dev_err(&pdev->dev, "unable to configure BAR2.\n");
return -ENODEV;
}
if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) {
- printk(KERN_ERR DRV_NAME ": unable to configure DMA mask.\n");
+ dev_err(&pdev->dev, "unable to configure DMA mask.\n");
return -ENODEV;
}
diff --git a/drivers/ata/pata_cs5536.c b/drivers/ata/pata_cs5536.c
index 760ac6e65216..ab47aeb5587f 100644
--- a/drivers/ata/pata_cs5536.c
+++ b/drivers/ata/pata_cs5536.c
@@ -263,12 +263,12 @@ static int cs5536_init_one(struct pci_dev *dev, const struct pci_device_id *id)
ppi[1] = &ata_dummy_port_info;
if (use_msr)
- printk(KERN_ERR DRV_NAME ": Using MSR regs instead of PCI\n");
+ dev_err(&dev->dev, DRV_NAME ": Using MSR regs instead of PCI\n");
cs5536_read(dev, CFG, &cfg);
if ((cfg & IDE_CFG_CHANEN) == 0) {
- printk(KERN_ERR DRV_NAME ": disabled by BIOS\n");
+ dev_err(&dev->dev, DRV_NAME ": disabled by BIOS\n");
return -ENODEV;
}
diff --git a/drivers/ata/pata_cypress.c b/drivers/ata/pata_cypress.c
index 5b3a7a8ebef6..3be5d52a777b 100644
--- a/drivers/ata/pata_cypress.c
+++ b/drivers/ata/pata_cypress.c
@@ -62,7 +62,7 @@ static void cy82c693_set_piomode(struct ata_port *ap, struct ata_device *adev)
u32 addr;
if (ata_timing_compute(adev, adev->pio_mode, &t, T, 1) < 0) {
- printk(KERN_ERR DRV_NAME ": mome computation failed.\n");
+ ata_dev_err(adev, DRV_NAME ": mome computation failed.\n");
return;
}
diff --git a/drivers/ata/pata_ep93xx.c b/drivers/ata/pata_ep93xx.c
index 46208ececbb6..b78f71c70f27 100644
--- a/drivers/ata/pata_ep93xx.c
+++ b/drivers/ata/pata_ep93xx.c
@@ -855,7 +855,6 @@ static void ep93xx_pata_drain_fifo(struct ata_queued_cmd *qc)
&& count < 65536; count += 2)
ep93xx_pata_read_reg(drv_data, IDECTRL_ADDR_DATA);
- /* Can become DEBUG later */
if (count)
ata_port_dbg(ap, "drained %d bytes to clear DRQ.\n", count);
diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c
index 06b7c4a9ec95..778c893f276b 100644
--- a/drivers/ata/pata_hpt366.c
+++ b/drivers/ata/pata_hpt366.c
@@ -14,9 +14,6 @@
* TODO
* Look into engine reset on timeout errors. Should not be required.
*/
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
@@ -183,7 +180,7 @@ static int hpt_dma_blacklisted(const struct ata_device *dev, char *modestr,
i = match_string(list, -1, model_num);
if (i >= 0) {
- pr_warn("%s is not supported for %s\n", modestr, list[i]);
+ ata_dev_warn(dev, "%s is not supported for %s\n", modestr, list[i]);
return 1;
}
return 0;
diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c
index f242157bc81b..7abc7e04f656 100644
--- a/drivers/ata/pata_hpt37x.c
+++ b/drivers/ata/pata_hpt37x.c
@@ -14,9 +14,6 @@
* TODO
* Look into engine reset on timeout errors. Should not be required.
*/
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
@@ -231,7 +228,8 @@ static int hpt_dma_blacklisted(const struct ata_device *dev, char *modestr,
i = match_string(list, -1, model_num);
if (i >= 0) {
- pr_warn("%s is not supported for %s\n", modestr, list[i]);
+ ata_dev_warn(dev, "%s is not supported for %s\n",
+ modestr, list[i]);
return 1;
}
return 0;
@@ -864,7 +862,8 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
chip_table = &hpt372;
break;
default:
- pr_err("Unknown HPT366 subtype, please report (%d)\n",
+ dev_err(&dev->dev,
+ "Unknown HPT366 subtype, please report (%d)\n",
rev);
return -ENODEV;
}
@@ -905,7 +904,8 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
*ppi = &info_hpt374_fn1;
break;
default:
- pr_err("PCI table is bogus, please report (%d)\n", dev->device);
+ dev_err(&dev->dev, "PCI table is bogus, please report (%d)\n",
+ dev->device);
return -ENODEV;
}
/* Ok so this is a chip we support */
@@ -953,7 +953,7 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
u8 sr;
u32 total = 0;
- pr_warn("BIOS has not set timing clocks\n");
+ dev_warn(&dev->dev, "BIOS has not set timing clocks\n");
/* This is the process the HPT371 BIOS is reported to use */
for (i = 0; i < 128; i++) {
@@ -1009,7 +1009,7 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
(f_high << 16) | f_low | 0x100);
}
if (adjust == 8) {
- pr_err("DPLL did not stabilize!\n");
+ dev_err(&dev->dev, "DPLL did not stabilize!\n");
return -ENODEV;
}
if (dpll == 3)
@@ -1017,7 +1017,7 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
else
private_data = (void *)hpt37x_timings_50;
- pr_info("bus clock %dMHz, using %dMHz DPLL\n",
+ dev_info(&dev->dev, "bus clock %dMHz, using %dMHz DPLL\n",
MHz[clock_slot], MHz[dpll]);
} else {
private_data = (void *)chip_table->clocks[clock_slot];
@@ -1032,7 +1032,7 @@ static int hpt37x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
if (clock_slot < 2 && ppi[0] == &info_hpt370a)
ppi[0] = &info_hpt370a_33;
- pr_info("%s using %dMHz bus clock\n",
+ dev_info(&dev->dev, "%s using %dMHz bus clock\n",
chip_table->name, MHz[clock_slot]);
}
diff --git a/drivers/ata/pata_hpt3x2n.c b/drivers/ata/pata_hpt3x2n.c
index 48eef338e050..1d9d4eec5b8a 100644
--- a/drivers/ata/pata_hpt3x2n.c
+++ b/drivers/ata/pata_hpt3x2n.c
@@ -15,9 +15,6 @@
* TODO
* Work out best PLL policy
*/
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
@@ -420,7 +417,7 @@ static int hpt3x2n_pci_clock(struct pci_dev *pdev)
u16 sr;
u32 total = 0;
- pr_warn("BIOS clock data not set\n");
+ dev_warn(&pdev->dev, "BIOS clock data not set\n");
/* This is the process the HPT371 BIOS is reported to use */
for (i = 0; i < 128; i++) {
@@ -530,7 +527,8 @@ hpt372n:
ppi[0] = &info_hpt372n;
break;
default:
- pr_err("PCI table is bogus, please report (%d)\n", dev->device);
+ dev_err(&dev->dev,"PCI table is bogus, please report (%d)\n",
+ dev->device);
return -ENODEV;
}
@@ -579,11 +577,11 @@ hpt372n:
pci_write_config_dword(dev, 0x5C, (f_high << 16) | f_low);
}
if (adjust == 8) {
- pr_err("DPLL did not stabilize!\n");
+ dev_err(&dev->dev, "DPLL did not stabilize!\n");
return -ENODEV;
}
- pr_info("bus clock %dMHz, using 66MHz DPLL\n", pci_mhz);
+ dev_info(&dev->dev, "bus clock %dMHz, using 66MHz DPLL\n", pci_mhz);
/*
* Set our private data up. We only need a few flags
diff --git a/drivers/ata/pata_it821x.c b/drivers/ata/pata_it821x.c
index 0e2265978a34..8a5b4e0079ab 100644
--- a/drivers/ata/pata_it821x.c
+++ b/drivers/ata/pata_it821x.c
@@ -431,7 +431,8 @@ static unsigned int it821x_smart_qc_issue(struct ata_queued_cmd *qc)
case ATA_CMD_SET_FEATURES:
return ata_bmdma_qc_issue(qc);
}
- printk(KERN_DEBUG "it821x: can't process command 0x%02X\n", qc->tf.command);
+ ata_dev_dbg(qc->dev, "it821x: can't process command 0x%02X\n",
+ qc->tf.command);
return AC_ERR_DEV;
}
@@ -507,12 +508,14 @@ static void it821x_dev_config(struct ata_device *adev)
if (strstr(model_num, "Integrated Technology Express")) {
/* RAID mode */
- ata_dev_info(adev, "%sRAID%d volume",
- adev->id[147] ? "Bootable " : "",
- adev->id[129]);
- if (adev->id[129] != 1)
- pr_cont("(%dK stripe)", adev->id[146]);
- pr_cont("\n");
+ if (adev->id[129] == 1)
+ ata_dev_info(adev, "%sRAID%d volume\n",
+ adev->id[147] ? "Bootable " : "",
+ adev->id[129]);
+ else
+ ata_dev_info(adev, "%sRAID%d volume (%dK stripe)\n",
+ adev->id[147] ? "Bootable " : "",
+ adev->id[129], adev->id[146]);
}
/* This is a controller firmware triggered funny, don't
report the drive faulty! */
@@ -534,7 +537,7 @@ static void it821x_dev_config(struct ata_device *adev)
*/
static unsigned int it821x_read_id(struct ata_device *adev,
- struct ata_taskfile *tf, u16 *id)
+ struct ata_taskfile *tf, __le16 *id)
{
unsigned int err_mask;
unsigned char model_num[ATA_ID_PROD_LEN + 1];
@@ -542,21 +545,20 @@ static unsigned int it821x_read_id(struct ata_device *adev,
err_mask = ata_do_dev_read_id(adev, tf, id);
if (err_mask)
return err_mask;
- ata_id_c_string(id, model_num, ATA_ID_PROD, sizeof(model_num));
+ ata_id_c_string((u16 *)id, model_num, ATA_ID_PROD, sizeof(model_num));
- id[83] &= ~(1 << 12); /* Cache flush is firmware handled */
- id[83] &= ~(1 << 13); /* Ditto for LBA48 flushes */
- id[84] &= ~(1 << 6); /* No FUA */
- id[85] &= ~(1 << 10); /* No HPA */
- id[76] = 0; /* No NCQ/AN etc */
+ id[83] &= cpu_to_le16(~(1 << 12)); /* Cache flush is firmware handled */
+ id[84] &= cpu_to_le16(~(1 << 6)); /* No FUA */
+ id[85] &= cpu_to_le16(~(1 << 10)); /* No HPA */
+ id[76] = 0; /* No NCQ/AN etc */
if (strstr(model_num, "Integrated Technology Express")) {
/* Set feature bits the firmware neglects */
- id[49] |= 0x0300; /* LBA, DMA */
- id[83] &= 0x7FFF;
- id[83] |= 0x4400; /* Word 83 is valid and LBA48 */
- id[86] |= 0x0400; /* LBA48 on */
- id[ATA_ID_MAJOR_VER] |= 0x1F;
+ id[49] |= cpu_to_le16(0x0300); /* LBA, DMA */
+ id[83] &= cpu_to_le16(0x7FFF);
+ id[83] |= cpu_to_le16(0x4400); /* Word 83 is valid and LBA48 */
+ id[86] |= cpu_to_le16(0x0400); /* LBA48 on */
+ id[ATA_ID_MAJOR_VER] |= cpu_to_le16(0x1F);
/* Clear the serial number because it's different each boot
which breaks validation on resume */
memset(&id[ATA_ID_SERNO], 0x20, ATA_ID_SERNO_LEN);
@@ -593,6 +595,7 @@ static int it821x_check_atapi_dma(struct ata_queued_cmd *qc)
/**
* it821x_display_disk - display disk setup
+ * @ap: ATA port
* @n: Device number
* @buf: Buffer block from firmware
*
@@ -600,7 +603,7 @@ static int it821x_check_atapi_dma(struct ata_queued_cmd *qc)
* by the firmware.
*/
-static void it821x_display_disk(int n, u8 *buf)
+static void it821x_display_disk(struct ata_port *ap, int n, u8 *buf)
{
unsigned char id[41];
int mode = 0;
@@ -633,13 +636,13 @@ static void it821x_display_disk(int n, u8 *buf)
else
strcpy(mbuf, "PIO");
if (buf[52] == 4)
- printk(KERN_INFO "%d: %-6s %-8s %s %s\n",
+ ata_port_info(ap, "%d: %-6s %-8s %s %s\n",
n, mbuf, types[buf[52]], id, cbl);
else
- printk(KERN_INFO "%d: %-6s %-8s Volume: %1d %s %s\n",
+ ata_port_info(ap, "%d: %-6s %-8s Volume: %1d %s %s\n",
n, mbuf, types[buf[52]], buf[53], id, cbl);
if (buf[125] < 100)
- printk(KERN_INFO "%d: Rebuilding: %d%%\n", n, buf[125]);
+ ata_port_info(ap, "%d: Rebuilding: %d%%\n", n, buf[125]);
}
/**
@@ -676,7 +679,7 @@ static u8 *it821x_firmware_command(struct ata_port *ap, u8 cmd, int len)
status = ioread8(ap->ioaddr.status_addr);
if (status & ATA_ERR) {
kfree(buf);
- printk(KERN_ERR "it821x_firmware_command: rejected\n");
+ ata_port_err(ap, "%s: rejected\n", __func__);
return NULL;
}
if (status & ATA_DRQ) {
@@ -686,7 +689,7 @@ static u8 *it821x_firmware_command(struct ata_port *ap, u8 cmd, int len)
usleep_range(500, 1000);
}
kfree(buf);
- printk(KERN_ERR "it821x_firmware_command: timeout\n");
+ ata_port_err(ap, "%s: timeout\n", __func__);
return NULL;
}
@@ -709,13 +712,13 @@ static void it821x_probe_firmware(struct ata_port *ap)
buf = it821x_firmware_command(ap, 0xFA, 512);
if (buf != NULL) {
- printk(KERN_INFO "pata_it821x: Firmware %02X/%02X/%02X%02X\n",
+ ata_port_info(ap, "pata_it821x: Firmware %02X/%02X/%02X%02X\n",
buf[505],
buf[506],
buf[507],
buf[508]);
for (i = 0; i < 4; i++)
- it821x_display_disk(i, buf + 128 * i);
+ it821x_display_disk(ap, i, buf + 128 * i);
kfree(buf);
}
}
@@ -771,7 +774,8 @@ static int it821x_port_start(struct ata_port *ap)
itdev->timing10 = 1;
/* Need to disable ATAPI DMA for this case */
if (!itdev->smart)
- printk(KERN_WARNING DRV_NAME": Revision 0x10, workarounds activated.\n");
+ dev_warn(&pdev->dev,
+ "Revision 0x10, workarounds activated.\n");
}
return 0;
@@ -919,14 +923,14 @@ static int it821x_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
} else {
/* Force the card into bypass mode if so requested */
if (it8212_noraid) {
- printk(KERN_INFO DRV_NAME ": forcing bypass mode.\n");
+ dev_info(&pdev->dev, "forcing bypass mode.\n");
it821x_disable_raid(pdev);
}
pci_read_config_byte(pdev, 0x50, &conf);
conf &= 1;
- printk(KERN_INFO DRV_NAME": controller in %s mode.\n",
- mode[conf]);
+ dev_info(&pdev->dev, "controller in %s mode.\n", mode[conf]);
+
if (conf == 0)
ppi[0] = &info_passthru;
else
diff --git a/drivers/ata/pata_ixp4xx_cf.c b/drivers/ata/pata_ixp4xx_cf.c
index 99c63087c8ae..17b557c91e1c 100644
--- a/drivers/ata/pata_ixp4xx_cf.c
+++ b/drivers/ata/pata_ixp4xx_cf.c
@@ -114,7 +114,7 @@ static void ixp4xx_set_piomode(struct ata_port *ap, struct ata_device *adev)
{
struct ixp4xx_pata *ixpp = ap->host->private_data;
- ata_dev_printk(adev, KERN_INFO, "configured for PIO%d 8bit\n",
+ ata_dev_info(adev, "configured for PIO%d 8bit\n",
adev->pio_mode - XFER_PIO_0);
ixp4xx_set_8bit_timing(ixpp, adev->pio_mode);
}
@@ -132,8 +132,8 @@ static unsigned int ixp4xx_mmio_data_xfer(struct ata_queued_cmd *qc,
struct ixp4xx_pata *ixpp = ap->host->private_data;
unsigned long flags;
- ata_dev_printk(adev, KERN_DEBUG, "%s %d bytes\n", (rw == READ) ? "READ" : "WRITE",
- buflen);
+ ata_dev_dbg(adev, "%s %d bytes\n", (rw == READ) ? "READ" : "WRITE",
+ buflen);
spin_lock_irqsave(ap->lock, flags);
/* set the expansion bus in 16bit mode and restore
diff --git a/drivers/ata/pata_marvell.c b/drivers/ata/pata_marvell.c
index 361597d14c56..0c5a51970fbf 100644
--- a/drivers/ata/pata_marvell.c
+++ b/drivers/ata/pata_marvell.c
@@ -32,7 +32,6 @@
static int marvell_pata_active(struct pci_dev *pdev)
{
- int i;
u32 devices;
void __iomem *barp;
@@ -44,11 +43,6 @@ static int marvell_pata_active(struct pci_dev *pdev)
if (barp == NULL)
return -ENOMEM;
- printk("BAR5:");
- for(i = 0; i <= 0x0F; i++)
- printk("%02X:%02X ", i, ioread8(barp + i));
- printk("\n");
-
devices = ioread32(barp + 0x0C);
pci_iounmap(pdev, barp);
@@ -149,7 +143,8 @@ static int marvell_init_one (struct pci_dev *pdev, const struct pci_device_id *i
#if IS_ENABLED(CONFIG_SATA_AHCI)
if (!marvell_pata_active(pdev)) {
- printk(KERN_INFO DRV_NAME ": PATA port not active, deferring to AHCI driver.\n");
+ dev_info(&pdev->dev,
+ "PATA port not active, deferring to AHCI driver.\n");
return -ENODEV;
}
#endif
diff --git a/drivers/ata/pata_netcell.c b/drivers/ata/pata_netcell.c
index a7ecc1a204b5..06929e77c491 100644
--- a/drivers/ata/pata_netcell.c
+++ b/drivers/ata/pata_netcell.c
@@ -21,12 +21,13 @@
/* No PIO or DMA methods needed for this device */
static unsigned int netcell_read_id(struct ata_device *adev,
- struct ata_taskfile *tf, u16 *id)
+ struct ata_taskfile *tf, __le16 *id)
{
unsigned int err_mask = ata_do_dev_read_id(adev, tf, id);
+
/* Firmware forgets to mark words 85-87 valid */
if (err_mask == 0)
- id[ATA_ID_CSF_DEFAULT] |= 0x4000;
+ id[ATA_ID_CSF_DEFAULT] |= cpu_to_le16(0x4000);
return err_mask;
}
diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
index b5a3f710d76d..05c2ab375756 100644
--- a/drivers/ata/pata_octeon_cf.c
+++ b/drivers/ata/pata_octeon_cf.c
@@ -19,7 +19,7 @@
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <scsi/scsi_host.h>
-
+#include <trace/events/libata.h>
#include <asm/byteorder.h>
#include <asm/octeon/octeon.h>
@@ -73,16 +73,12 @@ MODULE_PARM_DESC(enable_dma,
*/
static unsigned int ns_to_tim_reg(unsigned int tim_mult, unsigned int nsecs)
{
- unsigned int val;
-
/*
* Compute # of eclock periods to get desired duration in
* nanoseconds.
*/
- val = DIV_ROUND_UP(nsecs * (octeon_get_io_clock_rate() / 1000000),
+ return DIV_ROUND_UP(nsecs * (octeon_get_io_clock_rate() / 1000000),
1000 * tim_mult);
-
- return val;
}
static void octeon_cf_set_boot_reg_cfg(int cs, unsigned int multiplier)
@@ -273,9 +269,9 @@ static void octeon_cf_set_dmamode(struct ata_port *ap, struct ata_device *dev)
dma_tim.s.we_n = ns_to_tim_reg(tim_mult, oe_n);
dma_tim.s.we_a = ns_to_tim_reg(tim_mult, oe_a);
- pr_debug("ns to ticks (mult %d) of %d is: %d\n", tim_mult, 60,
+ ata_dev_dbg(dev, "ns to ticks (mult %d) of %d is: %d\n", tim_mult, 60,
ns_to_tim_reg(tim_mult, 60));
- pr_debug("oe_n: %d, oe_a: %d, dmack_s: %d, dmack_h: %d, dmarq: %d, pause: %d\n",
+ ata_dev_dbg(dev, "oe_n: %d, oe_a: %d, dmack_s: %d, dmack_h: %d, dmarq: %d, pause: %d\n",
dma_tim.s.oe_n, dma_tim.s.oe_a, dma_tim.s.dmack_s,
dma_tim.s.dmack_h, dma_tim.s.dmarq, dma_tim.s.pause);
@@ -440,7 +436,6 @@ static int octeon_cf_softreset16(struct ata_link *link, unsigned int *classes,
int rc;
u8 err;
- DPRINTK("about to softreset\n");
__raw_writew(ap->ctl, base + 0xe);
udelay(20);
__raw_writew(ap->ctl | ATA_SRST, base + 0xe);
@@ -455,7 +450,6 @@ static int octeon_cf_softreset16(struct ata_link *link, unsigned int *classes,
/* determine by signature whether we have ATA or ATAPI devices */
classes[0] = ata_sff_dev_classify(&link->device[0], 1, &err);
- DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
return 0;
}
@@ -479,23 +473,11 @@ static void octeon_cf_tf_load16(struct ata_port *ap,
__raw_writew(tf->hob_feature << 8, base + 0xc);
__raw_writew(tf->hob_nsect | tf->hob_lbal << 8, base + 2);
__raw_writew(tf->hob_lbam | tf->hob_lbah << 8, base + 4);
- VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
- tf->hob_feature,
- tf->hob_nsect,
- tf->hob_lbal,
- tf->hob_lbam,
- tf->hob_lbah);
}
if (is_addr) {
__raw_writew(tf->feature << 8, base + 0xc);
__raw_writew(tf->nsect | tf->lbal << 8, base + 2);
__raw_writew(tf->lbam | tf->lbah << 8, base + 4);
- VPRINTK("feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
- tf->feature,
- tf->nsect,
- tf->lbal,
- tf->lbam,
- tf->lbah);
}
ata_wait_idle(ap);
}
@@ -516,20 +498,14 @@ static void octeon_cf_exec_command16(struct ata_port *ap,
{
/* The base of the registers is at ioaddr.data_addr. */
void __iomem *base = ap->ioaddr.data_addr;
- u16 blob;
+ u16 blob = 0;
- if (tf->flags & ATA_TFLAG_DEVICE) {
- VPRINTK("device 0x%X\n", tf->device);
+ if (tf->flags & ATA_TFLAG_DEVICE)
blob = tf->device;
- } else {
- blob = 0;
- }
- DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command);
blob |= (tf->command << 8);
__raw_writew(blob, base + 6);
-
ata_wait_idle(ap);
}
@@ -543,12 +519,10 @@ static void octeon_cf_dma_setup(struct ata_queued_cmd *qc)
struct octeon_cf_port *cf_port;
cf_port = ap->private_data;
- DPRINTK("ENTER\n");
/* issue r/w command */
qc->cursg = qc->sg;
cf_port->dma_finished = 0;
ap->ops->sff_exec_command(ap, &qc->tf);
- DPRINTK("EXIT\n");
}
/**
@@ -563,8 +537,6 @@ static void octeon_cf_dma_start(struct ata_queued_cmd *qc)
union cvmx_mio_boot_dma_intx mio_boot_dma_int;
struct scatterlist *sg;
- VPRINTK("%d scatterlists\n", qc->n_elem);
-
/* Get the scatter list entry we need to DMA into */
sg = qc->cursg;
BUG_ON(!sg);
@@ -605,10 +577,6 @@ static void octeon_cf_dma_start(struct ata_queued_cmd *qc)
mio_boot_dma_cfg.s.adr = sg_dma_address(sg);
- VPRINTK("%s %d bytes address=%p\n",
- (mio_boot_dma_cfg.s.rw) ? "write" : "read", sg->length,
- (void *)(unsigned long)mio_boot_dma_cfg.s.adr);
-
cvmx_write_csr(cf_port->dma_base + DMA_CFG, mio_boot_dma_cfg.u64);
}
@@ -627,9 +595,7 @@ static unsigned int octeon_cf_dma_finished(struct ata_port *ap,
union cvmx_mio_boot_dma_intx dma_int;
u8 status;
- VPRINTK("ata%u: protocol %d task_state %d\n",
- ap->print_id, qc->tf.protocol, ap->hsm_task_state);
-
+ trace_ata_bmdma_stop(ap, &qc->tf, qc->tag);
if (ap->hsm_task_state != HSM_ST_LAST)
return 0;
@@ -678,7 +644,6 @@ static irqreturn_t octeon_cf_interrupt(int irq, void *dev_instance)
spin_lock_irqsave(&host->lock, flags);
- DPRINTK("ENTER\n");
for (i = 0; i < host->n_ports; i++) {
u8 status;
struct ata_port *ap;
@@ -701,6 +666,7 @@ static irqreturn_t octeon_cf_interrupt(int irq, void *dev_instance)
if (!sg_is_last(qc->cursg)) {
qc->cursg = sg_next(qc->cursg);
handled = 1;
+ trace_ata_bmdma_start(ap, &qc->tf, qc->tag);
octeon_cf_dma_start(qc);
continue;
} else {
@@ -732,7 +698,6 @@ static irqreturn_t octeon_cf_interrupt(int irq, void *dev_instance)
}
}
spin_unlock_irqrestore(&host->lock, flags);
- DPRINTK("EXIT\n");
return IRQ_RETVAL(handled);
}
@@ -800,8 +765,11 @@ static unsigned int octeon_cf_qc_issue(struct ata_queued_cmd *qc)
case ATA_PROT_DMA:
WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
+ trace_ata_tf_load(ap, &qc->tf);
ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */
+ trace_ata_bmdma_setup(ap, &qc->tf, qc->tag);
octeon_cf_dma_setup(qc); /* set up dma */
+ trace_ata_bmdma_start(ap, &qc->tf, qc->tag);
octeon_cf_dma_start(qc); /* initiate dma */
ap->hsm_task_state = HSM_ST_LAST;
break;
diff --git a/drivers/ata/pata_of_platform.c b/drivers/ata/pata_of_platform.c
index 35aa158fc976..c3a40b717dcd 100644
--- a/drivers/ata/pata_of_platform.c
+++ b/drivers/ata/pata_of_platform.c
@@ -25,11 +25,12 @@ static int pata_of_platform_probe(struct platform_device *ofdev)
struct device_node *dn = ofdev->dev.of_node;
struct resource io_res;
struct resource ctl_res;
- struct resource *irq_res;
+ struct resource irq_res;
unsigned int reg_shift = 0;
int pio_mode = 0;
int pio_mask;
bool use16bit;
+ int irq;
ret = of_address_to_resource(dn, 0, &io_res);
if (ret) {
@@ -45,7 +46,15 @@ static int pata_of_platform_probe(struct platform_device *ofdev)
return -EINVAL;
}
- irq_res = platform_get_resource(ofdev, IORESOURCE_IRQ, 0);
+ memset(&irq_res, 0, sizeof(irq_res));
+
+ irq = platform_get_irq_optional(ofdev, 0);
+ if (irq < 0 && irq != -ENXIO)
+ return irq;
+ if (irq > 0) {
+ irq_res.start = irq;
+ irq_res.end = irq;
+ }
of_property_read_u32(dn, "reg-shift", &reg_shift);
@@ -63,7 +72,7 @@ static int pata_of_platform_probe(struct platform_device *ofdev)
pio_mask = 1 << pio_mode;
pio_mask |= (1 << pio_mode) - 1;
- return __pata_platform_probe(&ofdev->dev, &io_res, &ctl_res, irq_res,
+ return __pata_platform_probe(&ofdev->dev, &io_res, &ctl_res, irq > 0 ? &irq_res : NULL,
reg_shift, pio_mask, &pata_platform_sht,
use16bit);
}
diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c
index effc1a09444d..4fbb3eed8b0b 100644
--- a/drivers/ata/pata_pdc2027x.c
+++ b/drivers/ata/pata_pdc2027x.c
@@ -30,13 +30,6 @@
#define DRV_NAME "pata_pdc2027x"
#define DRV_VERSION "1.0"
-#undef PDC_DEBUG
-
-#ifdef PDC_DEBUG
-#define PDPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ## args)
-#else
-#define PDPRINTK(fmt, args...)
-#endif
enum {
PDC_MMIO_BAR = 5,
@@ -214,11 +207,11 @@ static int pdc2027x_cable_detect(struct ata_port *ap)
if (cgcr & (1 << 26))
goto cbl40;
- PDPRINTK("No cable or 80-conductor cable on port %d\n", ap->port_no);
+ ata_port_dbg(ap, "No cable or 80-conductor cable\n");
return ATA_CBL_PATA80;
cbl40:
- printk(KERN_INFO DRV_NAME ": 40-conductor cable detected on port %d\n", ap->port_no);
+ ata_port_info(ap, DRV_NAME ":40-conductor cable detected\n");
return ATA_CBL_PATA40;
}
@@ -292,17 +285,17 @@ static void pdc2027x_set_piomode(struct ata_port *ap, struct ata_device *adev)
unsigned int pio = adev->pio_mode - XFER_PIO_0;
u32 ctcr0, ctcr1;
- PDPRINTK("adev->pio_mode[%X]\n", adev->pio_mode);
+ ata_port_dbg(ap, "adev->pio_mode[%X]\n", adev->pio_mode);
/* Sanity check */
if (pio > 4) {
- printk(KERN_ERR DRV_NAME ": Unknown pio mode [%d] ignored\n", pio);
+ ata_port_err(ap, "Unknown pio mode [%d] ignored\n", pio);
return;
}
/* Set the PIO timing registers using value table for 133MHz */
- PDPRINTK("Set pio regs... \n");
+ ata_port_dbg(ap, "Set pio regs... \n");
ctcr0 = ioread32(dev_mmio(ap, adev, PDC_CTCR0));
ctcr0 &= 0xffff0000;
@@ -315,9 +308,7 @@ static void pdc2027x_set_piomode(struct ata_port *ap, struct ata_device *adev)
ctcr1 |= (pdc2027x_pio_timing_tbl[pio].value2 << 24);
iowrite32(ctcr1, dev_mmio(ap, adev, PDC_CTCR1));
- PDPRINTK("Set pio regs done\n");
-
- PDPRINTK("Set to pio mode[%u] \n", pio);
+ ata_port_dbg(ap, "Set to pio mode[%u] \n", pio);
}
/**
@@ -350,7 +341,7 @@ static void pdc2027x_set_dmamode(struct ata_port *ap, struct ata_device *adev)
iowrite32(ctcr1 & ~(1 << 7), dev_mmio(ap, adev, PDC_CTCR1));
}
- PDPRINTK("Set udma regs... \n");
+ ata_port_dbg(ap, "Set udma regs... \n");
ctcr1 = ioread32(dev_mmio(ap, adev, PDC_CTCR1));
ctcr1 &= 0xff000000;
@@ -359,16 +350,14 @@ static void pdc2027x_set_dmamode(struct ata_port *ap, struct ata_device *adev)
(pdc2027x_udma_timing_tbl[udma_mode].value2 << 16);
iowrite32(ctcr1, dev_mmio(ap, adev, PDC_CTCR1));
- PDPRINTK("Set udma regs done\n");
-
- PDPRINTK("Set to udma mode[%u] \n", udma_mode);
+ ata_port_dbg(ap, "Set to udma mode[%u] \n", udma_mode);
} else if ((dma_mode >= XFER_MW_DMA_0) &&
(dma_mode <= XFER_MW_DMA_2)) {
/* Set the MDMA timing registers with value table for 133MHz */
unsigned int mdma_mode = dma_mode & 0x07;
- PDPRINTK("Set mdma regs... \n");
+ ata_port_dbg(ap, "Set mdma regs... \n");
ctcr0 = ioread32(dev_mmio(ap, adev, PDC_CTCR0));
ctcr0 &= 0x0000ffff;
@@ -376,11 +365,10 @@ static void pdc2027x_set_dmamode(struct ata_port *ap, struct ata_device *adev)
(pdc2027x_mdma_timing_tbl[mdma_mode].value1 << 24);
iowrite32(ctcr0, dev_mmio(ap, adev, PDC_CTCR0));
- PDPRINTK("Set mdma regs done\n");
- PDPRINTK("Set to mdma mode[%u] \n", mdma_mode);
+ ata_port_dbg(ap, "Set to mdma mode[%u] \n", mdma_mode);
} else {
- printk(KERN_ERR DRV_NAME ": Unknown dma mode [%u] ignored\n", dma_mode);
+ ata_port_err(ap, "Unknown dma mode [%u] ignored\n", dma_mode);
}
}
@@ -414,7 +402,7 @@ static int pdc2027x_set_mode(struct ata_link *link, struct ata_device **r_failed
ctcr1 |= (1 << 25);
iowrite32(ctcr1, dev_mmio(ap, dev, PDC_CTCR1));
- PDPRINTK("Turn on prefetch\n");
+ ata_dev_dbg(dev, "Turn on prefetch\n");
} else {
pdc2027x_set_dmamode(ap, dev);
}
@@ -485,8 +473,8 @@ retry:
counter = (bccrh << 15) | bccrl;
- PDPRINTK("bccrh [%X] bccrl [%X]\n", bccrh, bccrl);
- PDPRINTK("bccrhv[%X] bccrlv[%X]\n", bccrhv, bccrlv);
+ dev_dbg(host->dev, "bccrh [%X] bccrl [%X]\n", bccrh, bccrl);
+ dev_dbg(host->dev, "bccrhv[%X] bccrlv[%X]\n", bccrhv, bccrlv);
/*
* The 30-bit decreasing counter are read by 2 pieces.
@@ -495,7 +483,7 @@ retry:
*/
if (retry && !(bccrh == bccrhv && bccrl >= bccrlv)) {
retry--;
- PDPRINTK("rereading counter\n");
+ dev_dbg(host->dev, "rereading counter\n");
goto retry;
}
@@ -520,20 +508,19 @@ static void pdc_adjust_pll(struct ata_host *host, long pll_clock, unsigned int b
/* Sanity check */
if (unlikely(pll_clock_khz < 5000L || pll_clock_khz > 70000L)) {
- printk(KERN_ERR DRV_NAME ": Invalid PLL input clock %ldkHz, give up!\n", pll_clock_khz);
+ dev_err(host->dev, "Invalid PLL input clock %ldkHz, give up!\n",
+ pll_clock_khz);
return;
}
-#ifdef PDC_DEBUG
- PDPRINTK("pout_required is %ld\n", pout_required);
+ dev_dbg(host->dev, "pout_required is %ld\n", pout_required);
/* Show the current clock value of PLL control register
* (maybe already configured by the firmware)
*/
pll_ctl = ioread16(mmio_base + PDC_PLL_CTL);
- PDPRINTK("pll_ctl[%X]\n", pll_ctl);
-#endif
+ dev_dbg(host->dev, "pll_ctl[%X]\n", pll_ctl);
/*
* Calculate the ratio of F, R and OD
@@ -552,7 +539,7 @@ static void pdc_adjust_pll(struct ata_host *host, long pll_clock, unsigned int b
R = 0x00;
} else {
/* Invalid ratio */
- printk(KERN_ERR DRV_NAME ": Invalid ratio %ld, give up!\n", ratio);
+ dev_err(host->dev, "Invalid ratio %ld, give up!\n", ratio);
return;
}
@@ -560,15 +547,15 @@ static void pdc_adjust_pll(struct ata_host *host, long pll_clock, unsigned int b
if (unlikely(F < 0 || F > 127)) {
/* Invalid F */
- printk(KERN_ERR DRV_NAME ": F[%d] invalid!\n", F);
+ dev_err(host->dev, "F[%d] invalid!\n", F);
return;
}
- PDPRINTK("F[%d] R[%d] ratio*1000[%ld]\n", F, R, ratio);
+ dev_dbg(host->dev, "F[%d] R[%d] ratio*1000[%ld]\n", F, R, ratio);
pll_ctl = (R << 8) | F;
- PDPRINTK("Writing pll_ctl[%X]\n", pll_ctl);
+ dev_dbg(host->dev, "Writing pll_ctl[%X]\n", pll_ctl);
iowrite16(pll_ctl, mmio_base + PDC_PLL_CTL);
ioread16(mmio_base + PDC_PLL_CTL); /* flush */
@@ -576,15 +563,13 @@ static void pdc_adjust_pll(struct ata_host *host, long pll_clock, unsigned int b
/* Wait the PLL circuit to be stable */
msleep(30);
-#ifdef PDC_DEBUG
/*
* Show the current clock value of PLL control register
* (maybe configured by the firmware)
*/
pll_ctl = ioread16(mmio_base + PDC_PLL_CTL);
- PDPRINTK("pll_ctl[%X]\n", pll_ctl);
-#endif
+ dev_dbg(host->dev, "pll_ctl[%X]\n", pll_ctl);
return;
}
@@ -605,7 +590,7 @@ static long pdc_detect_pll_input_clock(struct ata_host *host)
/* Start the test mode */
scr = ioread32(mmio_base + PDC_SYS_CTL);
- PDPRINTK("scr[%X]\n", scr);
+ dev_dbg(host->dev, "scr[%X]\n", scr);
iowrite32(scr | (0x01 << 14), mmio_base + PDC_SYS_CTL);
ioread32(mmio_base + PDC_SYS_CTL); /* flush */
@@ -622,7 +607,7 @@ static long pdc_detect_pll_input_clock(struct ata_host *host)
/* Stop the test mode */
scr = ioread32(mmio_base + PDC_SYS_CTL);
- PDPRINTK("scr[%X]\n", scr);
+ dev_dbg(host->dev, "scr[%X]\n", scr);
iowrite32(scr & ~(0x01 << 14), mmio_base + PDC_SYS_CTL);
ioread32(mmio_base + PDC_SYS_CTL); /* flush */
@@ -632,8 +617,8 @@ static long pdc_detect_pll_input_clock(struct ata_host *host)
pll_clock = ((start_count - end_count) & 0x3fffffff) / 100 *
(100000000 / usec_elapsed);
- PDPRINTK("start[%ld] end[%ld] \n", start_count, end_count);
- PDPRINTK("PLL input clock[%ld]Hz\n", pll_clock);
+ dev_dbg(host->dev, "start[%ld] end[%ld] PLL input clock[%ld]HZ\n",
+ start_count, end_count, pll_clock);
return pll_clock;
}
diff --git a/drivers/ata/pata_pdc202xx_old.c b/drivers/ata/pata_pdc202xx_old.c
index 0c5cbcd28d0d..b99849095853 100644
--- a/drivers/ata/pata_pdc202xx_old.c
+++ b/drivers/ata/pata_pdc202xx_old.c
@@ -38,8 +38,6 @@ static int pdc2026x_cable_detect(struct ata_port *ap)
static void pdc202xx_exec_command(struct ata_port *ap,
const struct ata_taskfile *tf)
{
- DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command);
-
iowrite8(tf->command, ap->ioaddr.command_addr);
ndelay(400);
}
diff --git a/drivers/ata/pata_platform.c b/drivers/ata/pata_platform.c
index 028329428b75..87c7c90676ca 100644
--- a/drivers/ata/pata_platform.c
+++ b/drivers/ata/pata_platform.c
@@ -128,6 +128,8 @@ int __pata_platform_probe(struct device *dev, struct resource *io_res,
ap = host->ports[0];
ap->ops = devm_kzalloc(dev, sizeof(*ap->ops), GFP_KERNEL);
+ if (!ap->ops)
+ return -ENOMEM;
ap->ops->inherits = &ata_sff_port_ops;
ap->ops->cable_detect = ata_cable_unknown;
ap->ops->set_mode = pata_platform_set_mode;
diff --git a/drivers/ata/pata_rz1000.c b/drivers/ata/pata_rz1000.c
index 3722a67083fd..fb00c3e5fd19 100644
--- a/drivers/ata/pata_rz1000.c
+++ b/drivers/ata/pata_rz1000.c
@@ -69,7 +69,7 @@ static int rz1000_fifo_disable(struct pci_dev *pdev)
reg &= 0xDFFF;
if (pci_write_config_word(pdev, 0x40, reg) != 0)
return -1;
- printk(KERN_INFO DRV_NAME ": disabled chipset readahead.\n");
+ dev_info(&pdev->dev, "disabled chipset readahead.\n");
return 0;
}
@@ -97,7 +97,7 @@ static int rz1000_init_one (struct pci_dev *pdev, const struct pci_device_id *en
if (rz1000_fifo_disable(pdev) == 0)
return ata_pci_sff_init_one(pdev, ppi, &rz1000_sht, NULL, 0);
- printk(KERN_ERR DRV_NAME ": failed to disable read-ahead on chipset..\n");
+ dev_err(&pdev->dev, "failed to disable read-ahead on chipset.\n");
/* Not safe to use so skip */
return -ENODEV;
}
diff --git a/drivers/ata/pata_serverworks.c b/drivers/ata/pata_serverworks.c
index b602e303fb54..e410fe44177f 100644
--- a/drivers/ata/pata_serverworks.c
+++ b/drivers/ata/pata_serverworks.c
@@ -286,13 +286,13 @@ static int serverworks_fixup_osb4(struct pci_dev *pdev)
pci_read_config_dword(isa_dev, 0x64, &reg);
reg &= ~0x00002000; /* disable 600ns interrupt mask */
if (!(reg & 0x00004000))
- printk(KERN_DEBUG DRV_NAME ": UDMA not BIOS enabled.\n");
+ dev_info(&pdev->dev, "UDMA not BIOS enabled.\n");
reg |= 0x00004000; /* enable UDMA/33 support */
pci_write_config_dword(isa_dev, 0x64, reg);
pci_dev_put(isa_dev);
return 0;
}
- printk(KERN_WARNING DRV_NAME ": Unable to find bridge.\n");
+ dev_warn(&pdev->dev, "Unable to find bridge.\n");
return -ENODEV;
}
diff --git a/drivers/ata/pata_sil680.c b/drivers/ata/pata_sil680.c
index 43215a664b96..0da58ce20d82 100644
--- a/drivers/ata/pata_sil680.c
+++ b/drivers/ata/pata_sil680.c
@@ -212,7 +212,6 @@ static void sil680_set_dmamode(struct ata_port *ap, struct ata_device *adev)
static void sil680_sff_exec_command(struct ata_port *ap,
const struct ata_taskfile *tf)
{
- DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command);
iowrite8(tf->command, ap->ioaddr.command_addr);
ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
}
@@ -309,17 +308,17 @@ static u8 sil680_init_chip(struct pci_dev *pdev, int *try_mmio)
switch (tmpbyte & 0x30) {
case 0x00:
- printk(KERN_INFO "sil680: 100MHz clock.\n");
+ dev_info(&pdev->dev, "sil680: 100MHz clock.\n");
break;
case 0x10:
- printk(KERN_INFO "sil680: 133MHz clock.\n");
+ dev_info(&pdev->dev, "sil680: 133MHz clock.\n");
break;
case 0x20:
- printk(KERN_INFO "sil680: Using PCI clock.\n");
+ dev_info(&pdev->dev, "sil680: Using PCI clock.\n");
break;
/* This last case is _NOT_ ok */
case 0x30:
- printk(KERN_ERR "sil680: Clock disabled ?\n");
+ dev_err(&pdev->dev, "sil680: Clock disabled ?\n");
}
return tmpbyte & 0x30;
}
diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
index 475032048984..439ca882f73c 100644
--- a/drivers/ata/pata_via.c
+++ b/drivers/ata/pata_via.c
@@ -414,12 +414,6 @@ static void via_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
iowrite8(tf->hob_lbal, ioaddr->lbal_addr);
iowrite8(tf->hob_lbam, ioaddr->lbam_addr);
iowrite8(tf->hob_lbah, ioaddr->lbah_addr);
- VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
- tf->hob_feature,
- tf->hob_nsect,
- tf->hob_lbal,
- tf->hob_lbam,
- tf->hob_lbah);
}
if (is_addr) {
@@ -428,12 +422,6 @@ static void via_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
iowrite8(tf->lbal, ioaddr->lbal_addr);
iowrite8(tf->lbam, ioaddr->lbam_addr);
iowrite8(tf->lbah, ioaddr->lbah_addr);
- VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
- tf->feature,
- tf->nsect,
- tf->lbal,
- tf->lbam,
- tf->lbah);
}
ata_wait_idle(ap);
diff --git a/drivers/ata/pdc_adma.c b/drivers/ata/pdc_adma.c
index 5db55e1e2a61..35b823ac20c9 100644
--- a/drivers/ata/pdc_adma.c
+++ b/drivers/ata/pdc_adma.c
@@ -284,9 +284,6 @@ static int adma_fill_sg(struct ata_queued_cmd *qc)
*(__le32 *)(buf + i) =
(pFLAGS & pEND) ? 0 : cpu_to_le32(pp->pkt_dma + i + 4);
i += 4;
-
- VPRINTK("PRD[%u] = (0x%lX, 0x%X)\n", i/4,
- (unsigned long)addr, len);
}
if (likely(last_buf))
@@ -302,8 +299,6 @@ static enum ata_completion_errors adma_qc_prep(struct ata_queued_cmd *qc)
u32 pkt_dma = (u32)pp->pkt_dma;
int i = 0;
- VPRINTK("ENTER\n");
-
adma_enter_reg_mode(qc->ap);
if (qc->tf.protocol != ATA_PROT_DMA)
return AC_ERR_OK;
@@ -355,22 +350,6 @@ static enum ata_completion_errors adma_qc_prep(struct ata_queued_cmd *qc)
i = adma_fill_sg(qc);
wmb(); /* flush PRDs and pkt to memory */
-#if 0
- /* dump out CPB + PRDs for debug */
- {
- int j, len = 0;
- static char obuf[2048];
- for (j = 0; j < i; ++j) {
- len += sprintf(obuf+len, "%02x ", buf[j]);
- if ((j & 7) == 7) {
- printk("%s\n", obuf);
- len = 0;
- }
- }
- if (len)
- printk("%s\n", obuf);
- }
-#endif
return AC_ERR_OK;
}
@@ -379,8 +358,6 @@ static inline void adma_packet_start(struct ata_queued_cmd *qc)
struct ata_port *ap = qc->ap;
void __iomem *chan = ADMA_PORT_REGS(ap);
- VPRINTK("ENTER, ap %p\n", ap);
-
/* fire up the ADMA engine */
writew(aPIOMD4 | aGO, chan + ADMA_CONTROL);
}
@@ -475,8 +452,6 @@ static inline unsigned int adma_intr_mmio(struct ata_host *host)
u8 status = ata_sff_check_status(ap);
if ((status & ATA_BUSY))
continue;
- DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n",
- ap->print_id, qc->tf.protocol, status);
/* complete taskfile transaction */
pp->state = adma_state_idle;
@@ -504,14 +479,10 @@ static irqreturn_t adma_intr(int irq, void *dev_instance)
struct ata_host *host = dev_instance;
unsigned int handled = 0;
- VPRINTK("ENTER\n");
-
spin_lock(&host->lock);
handled = adma_intr_pkt(host) | adma_intr_mmio(host);
spin_unlock(&host->lock);
- VPRINTK("EXIT\n");
-
return IRQ_RETVAL(handled);
}
@@ -547,8 +518,8 @@ static int adma_port_start(struct ata_port *ap)
return -ENOMEM;
/* paranoia? */
if ((pp->pkt_dma & 7) != 0) {
- printk(KERN_ERR "bad alignment for pp->pkt_dma: %08x\n",
- (u32)pp->pkt_dma);
+ ata_port_err(ap, "bad alignment for pp->pkt_dma: %08x\n",
+ (u32)pp->pkt_dma);
return -ENOMEM;
}
ap->private_data = pp;
diff --git a/drivers/ata/sata_dwc_460ex.c b/drivers/ata/sata_dwc_460ex.c
index 338c2e50f759..bec33d781ae0 100644
--- a/drivers/ata/sata_dwc_460ex.c
+++ b/drivers/ata/sata_dwc_460ex.c
@@ -14,15 +14,6 @@
* COPYRIGHT (C) 2005 SYNOPSYS, INC. ALL RIGHTS RESERVED
*/
-#ifdef CONFIG_SATA_DWC_DEBUG
-#define DEBUG
-#endif
-
-#ifdef CONFIG_SATA_DWC_VDEBUG
-#define VERBOSE_DEBUG
-#define DEBUG_NCQ
-#endif
-
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/device.h>
@@ -34,6 +25,7 @@
#include <linux/phy/phy.h>
#include <linux/libata.h>
#include <linux/slab.h>
+#include <trace/events/libata.h>
#include "libata.h"
@@ -182,10 +174,8 @@ enum {
* Prototypes
*/
static void sata_dwc_bmdma_start_by_tag(struct ata_queued_cmd *qc, u8 tag);
-static int sata_dwc_qc_complete(struct ata_port *ap, struct ata_queued_cmd *qc,
- u32 check_status);
-static void sata_dwc_dma_xfer_complete(struct ata_port *ap, u32 check_status);
-static void sata_dwc_port_stop(struct ata_port *ap);
+static int sata_dwc_qc_complete(struct ata_port *ap, struct ata_queued_cmd *qc);
+static void sata_dwc_dma_xfer_complete(struct ata_port *ap);
static void sata_dwc_clear_dmacr(struct sata_dwc_device_port *hsdevp, u8 tag);
#ifdef CONFIG_SATA_DWC_OLD_DMA
@@ -215,9 +205,10 @@ static int sata_dwc_dma_get_channel_old(struct sata_dwc_device_port *hsdevp)
{
struct sata_dwc_device *hsdev = hsdevp->hsdev;
struct dw_dma_slave *dws = &sata_dwc_dma_dws;
+ struct device *dev = hsdev->dev;
dma_cap_mask_t mask;
- dws->dma_dev = hsdev->dev;
+ dws->dma_dev = dev;
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
@@ -225,8 +216,7 @@ static int sata_dwc_dma_get_channel_old(struct sata_dwc_device_port *hsdevp)
/* Acquire DMA channel */
hsdevp->chan = dma_request_channel(mask, sata_dwc_dma_filter, hsdevp);
if (!hsdevp->chan) {
- dev_err(hsdev->dev, "%s: dma channel unavailable\n",
- __func__);
+ dev_err(dev, "%s: dma channel unavailable\n", __func__);
return -EAGAIN;
}
@@ -236,26 +226,25 @@ static int sata_dwc_dma_get_channel_old(struct sata_dwc_device_port *hsdevp)
static int sata_dwc_dma_init_old(struct platform_device *pdev,
struct sata_dwc_device *hsdev)
{
- struct device_node *np = pdev->dev.of_node;
- struct resource *res;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
- hsdev->dma = devm_kzalloc(&pdev->dev, sizeof(*hsdev->dma), GFP_KERNEL);
+ hsdev->dma = devm_kzalloc(dev, sizeof(*hsdev->dma), GFP_KERNEL);
if (!hsdev->dma)
return -ENOMEM;
- hsdev->dma->dev = &pdev->dev;
+ hsdev->dma->dev = dev;
hsdev->dma->id = pdev->id;
/* Get SATA DMA interrupt number */
hsdev->dma->irq = irq_of_parse_and_map(np, 1);
if (hsdev->dma->irq == NO_IRQ) {
- dev_err(&pdev->dev, "no SATA DMA irq\n");
+ dev_err(dev, "no SATA DMA irq\n");
return -ENODEV;
}
/* Get physical SATA DMA register base address */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- hsdev->dma->regs = devm_ioremap_resource(&pdev->dev, res);
+ hsdev->dma->regs = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(hsdev->dma->regs))
return PTR_ERR(hsdev->dma->regs);
@@ -297,35 +286,6 @@ static const char *get_prot_descript(u8 protocol)
}
}
-static const char *get_dma_dir_descript(int dma_dir)
-{
- switch ((enum dma_data_direction)dma_dir) {
- case DMA_BIDIRECTIONAL:
- return "bidirectional";
- case DMA_TO_DEVICE:
- return "to device";
- case DMA_FROM_DEVICE:
- return "from device";
- default:
- return "none";
- }
-}
-
-static void sata_dwc_tf_dump(struct ata_port *ap, struct ata_taskfile *tf)
-{
- dev_vdbg(ap->dev,
- "taskfile cmd: 0x%02x protocol: %s flags: 0x%lx device: %x\n",
- tf->command, get_prot_descript(tf->protocol), tf->flags,
- tf->device);
- dev_vdbg(ap->dev,
- "feature: 0x%02x nsect: 0x%x lbal: 0x%x lbam: 0x%x lbah: 0x%x\n",
- tf->feature, tf->nsect, tf->lbal, tf->lbam, tf->lbah);
- dev_vdbg(ap->dev,
- "hob_feature: 0x%02x hob_nsect: 0x%x hob_lbal: 0x%x hob_lbam: 0x%x hob_lbah: 0x%x\n",
- tf->hob_feature, tf->hob_nsect, tf->hob_lbal, tf->hob_lbam,
- tf->hob_lbah);
-}
-
static void dma_dwc_xfer_done(void *hsdev_instance)
{
unsigned long flags;
@@ -355,7 +315,7 @@ static void dma_dwc_xfer_done(void *hsdev_instance)
}
if ((hsdevp->dma_interrupt_count % 2) == 0)
- sata_dwc_dma_xfer_complete(ap, 1);
+ sata_dwc_dma_xfer_complete(ap);
spin_unlock_irqrestore(&host->lock, flags);
}
@@ -553,6 +513,7 @@ static irqreturn_t sata_dwc_isr(int irq, void *dev_instance)
* active tag. It is the tag that matches the command about to
* be completed.
*/
+ trace_ata_bmdma_start(ap, &qc->tf, tag);
qc->ap->link.active_tag = tag;
sata_dwc_bmdma_start_by_tag(qc, tag);
@@ -586,7 +547,7 @@ static irqreturn_t sata_dwc_isr(int irq, void *dev_instance)
if (status & ATA_ERR) {
dev_dbg(ap->dev, "interrupt ATA_ERR (0x%x)\n", status);
- sata_dwc_qc_complete(ap, qc, 1);
+ sata_dwc_qc_complete(ap, qc);
handled = 1;
goto DONE;
}
@@ -611,13 +572,13 @@ DRVSTILLBUSY:
}
if ((hsdevp->dma_interrupt_count % 2) == 0)
- sata_dwc_dma_xfer_complete(ap, 1);
+ sata_dwc_dma_xfer_complete(ap);
} else if (ata_is_pio(qc->tf.protocol)) {
ata_sff_hsm_move(ap, qc, status, 0);
handled = 1;
goto DONE;
} else {
- if (unlikely(sata_dwc_qc_complete(ap, qc, 1)))
+ if (unlikely(sata_dwc_qc_complete(ap, qc)))
goto DRVSTILLBUSY;
}
@@ -677,7 +638,7 @@ DRVSTILLBUSY:
if (status & ATA_ERR) {
dev_dbg(ap->dev, "%s ATA_ERR (0x%x)\n", __func__,
status);
- sata_dwc_qc_complete(ap, qc, 1);
+ sata_dwc_qc_complete(ap, qc);
handled = 1;
goto DONE;
}
@@ -692,9 +653,9 @@ DRVSTILLBUSY:
dev_warn(ap->dev, "%s: DMA not pending?\n",
__func__);
if ((hsdevp->dma_interrupt_count % 2) == 0)
- sata_dwc_dma_xfer_complete(ap, 1);
+ sata_dwc_dma_xfer_complete(ap);
} else {
- if (unlikely(sata_dwc_qc_complete(ap, qc, 1)))
+ if (unlikely(sata_dwc_qc_complete(ap, qc)))
goto STILLBUSY;
}
continue;
@@ -749,7 +710,7 @@ static void sata_dwc_clear_dmacr(struct sata_dwc_device_port *hsdevp, u8 tag)
}
}
-static void sata_dwc_dma_xfer_complete(struct ata_port *ap, u32 check_status)
+static void sata_dwc_dma_xfer_complete(struct ata_port *ap)
{
struct ata_queued_cmd *qc;
struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
@@ -763,17 +724,6 @@ static void sata_dwc_dma_xfer_complete(struct ata_port *ap, u32 check_status)
return;
}
-#ifdef DEBUG_NCQ
- if (tag > 0) {
- dev_info(ap->dev,
- "%s tag=%u cmd=0x%02x dma dir=%s proto=%s dmacr=0x%08x\n",
- __func__, qc->hw_tag, qc->tf.command,
- get_dma_dir_descript(qc->dma_dir),
- get_prot_descript(qc->tf.protocol),
- sata_dwc_readl(&hsdev->sata_dwc_regs->dmacr));
- }
-#endif
-
if (ata_is_dma(qc->tf.protocol)) {
if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_NONE) {
dev_err(ap->dev,
@@ -783,15 +733,14 @@ static void sata_dwc_dma_xfer_complete(struct ata_port *ap, u32 check_status)
}
hsdevp->dma_pending[tag] = SATA_DWC_DMA_PENDING_NONE;
- sata_dwc_qc_complete(ap, qc, check_status);
+ sata_dwc_qc_complete(ap, qc);
ap->link.active_tag = ATA_TAG_POISON;
} else {
- sata_dwc_qc_complete(ap, qc, check_status);
+ sata_dwc_qc_complete(ap, qc);
}
}
-static int sata_dwc_qc_complete(struct ata_port *ap, struct ata_queued_cmd *qc,
- u32 check_status)
+static int sata_dwc_qc_complete(struct ata_port *ap, struct ata_queued_cmd *qc)
{
u8 status = 0;
u32 mask = 0x0;
@@ -799,7 +748,6 @@ static int sata_dwc_qc_complete(struct ata_port *ap, struct ata_queued_cmd *qc,
struct sata_dwc_device *hsdev = HSDEV_FROM_AP(ap);
struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
hsdev->sactive_queued = 0;
- dev_dbg(ap->dev, "%s checkstatus? %x\n", __func__, check_status);
if (hsdevp->dma_pending[tag] == SATA_DWC_DMA_PENDING_TX)
dev_err(ap->dev, "TX DMA PENDING\n");
@@ -980,9 +928,6 @@ static void sata_dwc_exec_command_by_tag(struct ata_port *ap,
{
struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
- dev_dbg(ap->dev, "%s cmd(0x%02x): %s tag=%d\n", __func__, tf->command,
- ata_get_cmd_descript(tf->command), tag);
-
hsdevp->cmd_issued[tag] = cmd_issued;
/*
@@ -1005,12 +950,9 @@ static void sata_dwc_bmdma_setup(struct ata_queued_cmd *qc)
{
u8 tag = qc->hw_tag;
- if (ata_is_ncq(qc->tf.protocol)) {
- dev_dbg(qc->ap->dev, "%s: ap->link.sactive=0x%08x tag=%d\n",
- __func__, qc->ap->link.sactive, tag);
- } else {
+ if (!ata_is_ncq(qc->tf.protocol))
tag = 0;
- }
+
sata_dwc_bmdma_setup_by_tag(qc, tag);
}
@@ -1037,12 +979,6 @@ static void sata_dwc_bmdma_start_by_tag(struct ata_queued_cmd *qc, u8 tag)
start_dma = 0;
}
- dev_dbg(ap->dev,
- "%s qc=%p tag: %x cmd: 0x%02x dma_dir: %s start_dma? %x\n",
- __func__, qc, tag, qc->tf.command,
- get_dma_dir_descript(qc->dma_dir), start_dma);
- sata_dwc_tf_dump(ap, &qc->tf);
-
if (start_dma) {
sata_dwc_scr_read(&ap->link, SCR_ERROR, &reg);
if (reg & SATA_DWC_SERROR_ERR_BITS) {
@@ -1067,13 +1003,9 @@ static void sata_dwc_bmdma_start(struct ata_queued_cmd *qc)
{
u8 tag = qc->hw_tag;
- if (ata_is_ncq(qc->tf.protocol)) {
- dev_dbg(qc->ap->dev, "%s: ap->link.sactive=0x%08x tag=%d\n",
- __func__, qc->ap->link.sactive, tag);
- } else {
+ if (!ata_is_ncq(qc->tf.protocol))
tag = 0;
- }
- dev_dbg(qc->ap->dev, "%s\n", __func__);
+
sata_dwc_bmdma_start_by_tag(qc, tag);
}
@@ -1084,16 +1016,6 @@ static unsigned int sata_dwc_qc_issue(struct ata_queued_cmd *qc)
struct ata_port *ap = qc->ap;
struct sata_dwc_device_port *hsdevp = HSDEVP_FROM_AP(ap);
-#ifdef DEBUG_NCQ
- if (qc->hw_tag > 0 || ap->link.sactive > 1)
- dev_info(ap->dev,
- "%s ap id=%d cmd(0x%02x)=%s qc tag=%d prot=%s ap active_tag=0x%08x ap sactive=0x%08x\n",
- __func__, ap->print_id, qc->tf.command,
- ata_get_cmd_descript(qc->tf.command),
- qc->hw_tag, get_prot_descript(qc->tf.protocol),
- ap->link.active_tag, ap->link.sactive);
-#endif
-
if (!ata_is_ncq(qc->tf.protocol))
tag = 0;
@@ -1110,11 +1032,9 @@ static unsigned int sata_dwc_qc_issue(struct ata_queued_cmd *qc)
sactive |= (0x00000001 << tag);
sata_dwc_scr_write(&ap->link, SCR_ACTIVE, sactive);
- dev_dbg(qc->ap->dev,
- "%s: tag=%d ap->link.sactive = 0x%08x sactive=0x%08x\n",
- __func__, tag, qc->ap->link.sactive, sactive);
-
+ trace_ata_tf_load(ap, &qc->tf);
ap->ops->sff_tf_load(ap, &qc->tf);
+ trace_ata_exec_command(ap, &qc->tf, tag);
sata_dwc_exec_command_by_tag(ap, &qc->tf, tag,
SATA_DWC_CMD_ISSUED_PEND);
} else {
@@ -1207,6 +1127,8 @@ static const struct ata_port_info sata_dwc_port_info[] = {
static int sata_dwc_probe(struct platform_device *ofdev)
{
+ struct device *dev = &ofdev->dev;
+ struct device_node *np = dev->of_node;
struct sata_dwc_device *hsdev;
u32 idr, versionr;
char *ver = (char *)&versionr;
@@ -1216,23 +1138,21 @@ static int sata_dwc_probe(struct platform_device *ofdev)
struct ata_host *host;
struct ata_port_info pi = sata_dwc_port_info[0];
const struct ata_port_info *ppi[] = { &pi, NULL };
- struct device_node *np = ofdev->dev.of_node;
struct resource *res;
/* Allocate DWC SATA device */
- host = ata_host_alloc_pinfo(&ofdev->dev, ppi, SATA_DWC_MAX_PORTS);
- hsdev = devm_kzalloc(&ofdev->dev, sizeof(*hsdev), GFP_KERNEL);
+ host = ata_host_alloc_pinfo(dev, ppi, SATA_DWC_MAX_PORTS);
+ hsdev = devm_kzalloc(dev, sizeof(*hsdev), GFP_KERNEL);
if (!host || !hsdev)
return -ENOMEM;
host->private_data = hsdev;
/* Ioremap SATA registers */
- res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&ofdev->dev, res);
+ base = devm_platform_get_and_ioremap_resource(ofdev, 0, &res);
if (IS_ERR(base))
return PTR_ERR(base);
- dev_dbg(&ofdev->dev, "ioremap done for SATA register address\n");
+ dev_dbg(dev, "ioremap done for SATA register address\n");
/* Synopsys DWC SATA specific Registers */
hsdev->sata_dwc_regs = base + SATA_DWC_REG_OFFSET;
@@ -1246,11 +1166,10 @@ static int sata_dwc_probe(struct platform_device *ofdev)
/* Read the ID and Version Registers */
idr = sata_dwc_readl(&hsdev->sata_dwc_regs->idr);
versionr = sata_dwc_readl(&hsdev->sata_dwc_regs->versionr);
- dev_notice(&ofdev->dev, "id %d, controller version %c.%c%c\n",
- idr, ver[0], ver[1], ver[2]);
+ dev_notice(dev, "id %d, controller version %c.%c%c\n", idr, ver[0], ver[1], ver[2]);
/* Save dev for later use in dev_xxx() routines */
- hsdev->dev = &ofdev->dev;
+ hsdev->dev = dev;
/* Enable SATA Interrupts */
sata_dwc_enable_interrupts(hsdev);
@@ -1258,7 +1177,7 @@ static int sata_dwc_probe(struct platform_device *ofdev)
/* Get SATA interrupt number */
irq = irq_of_parse_and_map(np, 0);
if (irq == NO_IRQ) {
- dev_err(&ofdev->dev, "no SATA DMA irq\n");
+ dev_err(dev, "no SATA DMA irq\n");
return -ENODEV;
}
@@ -1270,7 +1189,7 @@ static int sata_dwc_probe(struct platform_device *ofdev)
}
#endif
- hsdev->phy = devm_phy_optional_get(hsdev->dev, "sata-phy");
+ hsdev->phy = devm_phy_optional_get(dev, "sata-phy");
if (IS_ERR(hsdev->phy))
return PTR_ERR(hsdev->phy);
@@ -1285,7 +1204,7 @@ static int sata_dwc_probe(struct platform_device *ofdev)
*/
err = ata_host_activate(host, irq, sata_dwc_isr, 0, &sata_dwc_sht);
if (err)
- dev_err(&ofdev->dev, "failed to activate host");
+ dev_err(dev, "failed to activate host");
return 0;
@@ -1309,7 +1228,7 @@ static int sata_dwc_remove(struct platform_device *ofdev)
sata_dwc_dma_exit_old(hsdev);
#endif
- dev_dbg(&ofdev->dev, "done\n");
+ dev_dbg(dev, "done\n");
return 0;
}
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index 3b31a4f596d8..556034a15430 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -221,10 +221,10 @@ enum {
* 4 Dwords per command slot, command header size == 64 Dwords.
*/
struct cmdhdr_tbl_entry {
- u32 cda;
- u32 prde_fis_len;
- u32 ttl;
- u32 desc_info;
+ __le32 cda;
+ __le32 prde_fis_len;
+ __le32 ttl;
+ __le32 desc_info;
};
/*
@@ -246,8 +246,10 @@ enum {
struct command_desc {
u8 cfis[8 * 4];
u8 sfis[8 * 4];
- u8 acmd[4 * 4];
- u8 fill[4 * 4];
+ struct_group(cdb,
+ u8 acmd[4 * 4];
+ u8 fill[4 * 4];
+ );
u32 prdt[SATA_FSL_MAX_PRD_DIRECT * 4];
u32 prdt_indirect[(SATA_FSL_MAX_PRD - SATA_FSL_MAX_PRD_DIRECT) * 4];
};
@@ -257,9 +259,9 @@ struct command_desc {
*/
struct prde {
- u32 dba;
+ __le32 dba;
u8 fill[2 * 4];
- u32 ddc_and_ext;
+ __le32 ddc_and_ext;
};
/*
@@ -311,16 +313,16 @@ static void fsl_sata_set_irq_coalescing(struct ata_host *host,
intr_coalescing_ticks = ticks;
spin_unlock_irqrestore(&host->lock, flags);
- DPRINTK("interrupt coalescing, count = 0x%x, ticks = %x\n",
- intr_coalescing_count, intr_coalescing_ticks);
- DPRINTK("ICC register status: (hcr base: %p) = 0x%x\n",
- hcr_base, ioread32(hcr_base + ICC));
+ dev_dbg(host->dev, "interrupt coalescing, count = 0x%x, ticks = %x\n",
+ intr_coalescing_count, intr_coalescing_ticks);
+ dev_dbg(host->dev, "ICC register status: (hcr base: 0x%p) = 0x%x\n",
+ hcr_base, ioread32(hcr_base + ICC));
}
static ssize_t fsl_sata_intr_coalescing_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return sprintf(buf, "%d %d\n",
+ return sysfs_emit(buf, "%u %u\n",
intr_coalescing_count, intr_coalescing_ticks);
}
@@ -330,10 +332,8 @@ static ssize_t fsl_sata_intr_coalescing_store(struct device *dev,
{
unsigned int coalescing_count, coalescing_ticks;
- if (sscanf(buf, "%d%d",
- &coalescing_count,
- &coalescing_ticks) != 2) {
- printk(KERN_ERR "fsl-sata: wrong parameter format.\n");
+ if (sscanf(buf, "%u%u", &coalescing_count, &coalescing_ticks) != 2) {
+ dev_err(dev, "fsl-sata: wrong parameter format.\n");
return -EINVAL;
}
@@ -355,9 +355,9 @@ static ssize_t fsl_sata_rx_watermark_show(struct device *dev,
spin_lock_irqsave(&host->lock, flags);
rx_watermark = ioread32(csr_base + TRANSCFG);
rx_watermark &= 0x1f;
-
spin_unlock_irqrestore(&host->lock, flags);
- return sprintf(buf, "%d\n", rx_watermark);
+
+ return sysfs_emit(buf, "%u\n", rx_watermark);
}
static ssize_t fsl_sata_rx_watermark_store(struct device *dev,
@@ -371,8 +371,8 @@ static ssize_t fsl_sata_rx_watermark_store(struct device *dev,
void __iomem *csr_base = host_priv->csr_base;
u32 temp;
- if (sscanf(buf, "%d", &rx_watermark) != 1) {
- printk(KERN_ERR "fsl-sata: wrong parameter format.\n");
+ if (kstrtouint(buf, 10, &rx_watermark) < 0) {
+ dev_err(dev, "fsl-sata: wrong parameter format.\n");
return -EINVAL;
}
@@ -380,30 +380,32 @@ static ssize_t fsl_sata_rx_watermark_store(struct device *dev,
temp = ioread32(csr_base + TRANSCFG);
temp &= 0xffffffe0;
iowrite32(temp | rx_watermark, csr_base + TRANSCFG);
-
spin_unlock_irqrestore(&host->lock, flags);
+
return strlen(buf);
}
-static inline unsigned int sata_fsl_tag(unsigned int tag,
+static inline unsigned int sata_fsl_tag(struct ata_port *ap,
+ unsigned int tag,
void __iomem *hcr_base)
{
/* We let libATA core do actual (queue) tag allocation */
if (unlikely(tag >= SATA_FSL_QUEUE_DEPTH)) {
- DPRINTK("tag %d invalid : out of range\n", tag);
+ ata_port_dbg(ap, "tag %d invalid : out of range\n", tag);
return 0;
}
if (unlikely((ioread32(hcr_base + CQ)) & (1 << tag))) {
- DPRINTK("tag %d invalid : in use!!\n", tag);
+ ata_port_dbg(ap, "tag %d invalid : in use!!\n", tag);
return 0;
}
return tag;
}
-static void sata_fsl_setup_cmd_hdr_entry(struct sata_fsl_port_priv *pp,
+static void sata_fsl_setup_cmd_hdr_entry(struct ata_port *ap,
+ struct sata_fsl_port_priv *pp,
unsigned int tag, u32 desc_info,
u32 data_xfer_len, u8 num_prde,
u8 fis_len)
@@ -421,11 +423,11 @@ static void sata_fsl_setup_cmd_hdr_entry(struct sata_fsl_port_priv *pp,
pp->cmdslot[tag].ttl = cpu_to_le32(data_xfer_len & ~0x03);
pp->cmdslot[tag].desc_info = cpu_to_le32(desc_info | (tag & 0x1F));
- VPRINTK("cda=0x%x, prde_fis_len=0x%x, ttl=0x%x, di=0x%x\n",
- pp->cmdslot[tag].cda,
- pp->cmdslot[tag].prde_fis_len,
- pp->cmdslot[tag].ttl, pp->cmdslot[tag].desc_info);
-
+ ata_port_dbg(ap, "cda=0x%x, prde_fis_len=0x%x, ttl=0x%x, di=0x%x\n",
+ le32_to_cpu(pp->cmdslot[tag].cda),
+ le32_to_cpu(pp->cmdslot[tag].prde_fis_len),
+ le32_to_cpu(pp->cmdslot[tag].ttl),
+ le32_to_cpu(pp->cmdslot[tag].desc_info));
}
static unsigned int sata_fsl_fill_sg(struct ata_queued_cmd *qc, void *cmd_desc,
@@ -447,8 +449,6 @@ static unsigned int sata_fsl_fill_sg(struct ata_queued_cmd *qc, void *cmd_desc,
dma_addr_t indirect_ext_segment_paddr;
unsigned int si;
- VPRINTK("SATA FSL : cd = 0x%p, prd = 0x%p\n", cmd_desc, prd);
-
indirect_ext_segment_paddr = cmd_desc_paddr +
SATA_FSL_CMD_DESC_OFFSET_TO_PRDT + SATA_FSL_MAX_PRD_DIRECT * 16;
@@ -456,9 +456,6 @@ static unsigned int sata_fsl_fill_sg(struct ata_queued_cmd *qc, void *cmd_desc,
dma_addr_t sg_addr = sg_dma_address(sg);
u32 sg_len = sg_dma_len(sg);
- VPRINTK("SATA FSL : fill_sg, sg_addr = 0x%llx, sg_len = %d\n",
- (unsigned long long)sg_addr, sg_len);
-
/* warn if each s/g element is not dword aligned */
if (unlikely(sg_addr & 0x03))
ata_port_err(qc->ap, "s/g addr unaligned : 0x%llx\n",
@@ -469,7 +466,6 @@ static unsigned int sata_fsl_fill_sg(struct ata_queued_cmd *qc, void *cmd_desc,
if (num_prde == (SATA_FSL_MAX_PRD_DIRECT - 1) &&
sg_next(sg) != NULL) {
- VPRINTK("setting indirect prde\n");
prd_ptr_to_indirect_ext = prd;
prd->dba = cpu_to_le32(indirect_ext_segment_paddr);
indirect_ext_segment_sz = 0;
@@ -481,9 +477,6 @@ static unsigned int sata_fsl_fill_sg(struct ata_queued_cmd *qc, void *cmd_desc,
prd->dba = cpu_to_le32(sg_addr);
prd->ddc_and_ext = cpu_to_le32(data_snoop | (sg_len & ~0x03));
- VPRINTK("sg_fill, ttl=%d, dba=0x%x, ddc=0x%x\n",
- ttl_dwords, prd->dba, prd->ddc_and_ext);
-
++num_prde;
++prd;
if (prd_ptr_to_indirect_ext)
@@ -508,7 +501,7 @@ static enum ata_completion_errors sata_fsl_qc_prep(struct ata_queued_cmd *qc)
struct sata_fsl_port_priv *pp = ap->private_data;
struct sata_fsl_host_priv *host_priv = ap->host->private_data;
void __iomem *hcr_base = host_priv->hcr_base;
- unsigned int tag = sata_fsl_tag(qc->hw_tag, hcr_base);
+ unsigned int tag = sata_fsl_tag(ap, qc->hw_tag, hcr_base);
struct command_desc *cd;
u32 desc_info = CMD_DESC_RES | CMD_DESC_SNOOP_ENABLE;
u32 num_prde = 0;
@@ -520,19 +513,11 @@ static enum ata_completion_errors sata_fsl_qc_prep(struct ata_queued_cmd *qc)
ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, (u8 *) &cd->cfis);
- VPRINTK("Dumping cfis : 0x%x, 0x%x, 0x%x\n",
- cd->cfis[0], cd->cfis[1], cd->cfis[2]);
-
- if (qc->tf.protocol == ATA_PROT_NCQ) {
- VPRINTK("FPDMA xfer,Sctor cnt[0:7],[8:15] = %d,%d\n",
- cd->cfis[3], cd->cfis[11]);
- }
-
/* setup "ACMD - atapi command" in cmd. desc. if this is ATAPI cmd */
if (ata_is_atapi(qc->tf.protocol)) {
desc_info |= ATAPI_CMD;
- memset((void *)&cd->acmd, 0, 32);
- memcpy((void *)&cd->acmd, qc->cdb, qc->dev->cdb_len);
+ memset(&cd->cdb, 0, sizeof(cd->cdb));
+ memcpy(&cd->cdb, qc->cdb, qc->dev->cdb_len);
}
if (qc->flags & ATA_QCFLAG_DMAMAP)
@@ -543,10 +528,10 @@ static enum ata_completion_errors sata_fsl_qc_prep(struct ata_queued_cmd *qc)
if (qc->tf.protocol == ATA_PROT_NCQ)
desc_info |= FPDMA_QUEUED_CMD;
- sata_fsl_setup_cmd_hdr_entry(pp, tag, desc_info, ttl_dwords,
+ sata_fsl_setup_cmd_hdr_entry(ap, pp, tag, desc_info, ttl_dwords,
num_prde, 5);
- VPRINTK("SATA FSL : xx_qc_prep, di = 0x%x, ttl = %d, num_prde = %d\n",
+ ata_port_dbg(ap, "SATA FSL : di = 0x%x, ttl = %d, num_prde = %d\n",
desc_info, ttl_dwords, num_prde);
return AC_ERR_OK;
@@ -557,9 +542,9 @@ static unsigned int sata_fsl_qc_issue(struct ata_queued_cmd *qc)
struct ata_port *ap = qc->ap;
struct sata_fsl_host_priv *host_priv = ap->host->private_data;
void __iomem *hcr_base = host_priv->hcr_base;
- unsigned int tag = sata_fsl_tag(qc->hw_tag, hcr_base);
+ unsigned int tag = sata_fsl_tag(ap, qc->hw_tag, hcr_base);
- VPRINTK("xx_qc_issue called,CQ=0x%x,CA=0x%x,CE=0x%x,CC=0x%x\n",
+ ata_port_dbg(ap, "CQ=0x%x,CA=0x%x,CE=0x%x,CC=0x%x\n",
ioread32(CQ + hcr_base),
ioread32(CA + hcr_base),
ioread32(CE + hcr_base), ioread32(CC + hcr_base));
@@ -569,10 +554,10 @@ static unsigned int sata_fsl_qc_issue(struct ata_queued_cmd *qc)
/* Simply queue command to the controller/device */
iowrite32(1 << tag, CQ + hcr_base);
- VPRINTK("xx_qc_issue called, tag=%d, CQ=0x%x, CA=0x%x\n",
+ ata_port_dbg(ap, "tag=%d, CQ=0x%x, CA=0x%x\n",
tag, ioread32(CQ + hcr_base), ioread32(CA + hcr_base));
- VPRINTK("CE=0x%x, DE=0x%x, CC=0x%x, CmdStat = 0x%x\n",
+ ata_port_dbg(ap, "CE=0x%x, DE=0x%x, CC=0x%x, CmdStat = 0x%x\n",
ioread32(CE + hcr_base),
ioread32(DE + hcr_base),
ioread32(CC + hcr_base),
@@ -586,7 +571,7 @@ static bool sata_fsl_qc_fill_rtf(struct ata_queued_cmd *qc)
struct sata_fsl_port_priv *pp = qc->ap->private_data;
struct sata_fsl_host_priv *host_priv = qc->ap->host->private_data;
void __iomem *hcr_base = host_priv->hcr_base;
- unsigned int tag = sata_fsl_tag(qc->hw_tag, hcr_base);
+ unsigned int tag = sata_fsl_tag(qc->ap, qc->hw_tag, hcr_base);
struct command_desc *cd;
cd = pp->cmdentry + tag;
@@ -613,7 +598,7 @@ static int sata_fsl_scr_write(struct ata_link *link,
return -EINVAL;
}
- VPRINTK("xx_scr_write, reg_in = %d\n", sc_reg);
+ ata_link_dbg(link, "reg_in = %d\n", sc_reg);
iowrite32(val, ssr_base + (sc_reg * 4));
return 0;
@@ -637,7 +622,7 @@ static int sata_fsl_scr_read(struct ata_link *link,
return -EINVAL;
}
- VPRINTK("xx_scr_read, reg_in = %d\n", sc_reg);
+ ata_link_dbg(link, "reg_in = %d\n", sc_reg);
*val = ioread32(ssr_base + (sc_reg * 4));
return 0;
@@ -649,18 +634,18 @@ static void sata_fsl_freeze(struct ata_port *ap)
void __iomem *hcr_base = host_priv->hcr_base;
u32 temp;
- VPRINTK("xx_freeze, CQ=0x%x, CA=0x%x, CE=0x%x, DE=0x%x\n",
+ ata_port_dbg(ap, "CQ=0x%x, CA=0x%x, CE=0x%x, DE=0x%x\n",
ioread32(CQ + hcr_base),
ioread32(CA + hcr_base),
ioread32(CE + hcr_base), ioread32(DE + hcr_base));
- VPRINTK("CmdStat = 0x%x\n",
+ ata_port_dbg(ap, "CmdStat = 0x%x\n",
ioread32(host_priv->csr_base + COMMANDSTAT));
/* disable interrupts on the controller/port */
temp = ioread32(hcr_base + HCONTROL);
iowrite32((temp & ~0x3F), hcr_base + HCONTROL);
- VPRINTK("in xx_freeze : HControl = 0x%x, HStatus = 0x%x\n",
+ ata_port_dbg(ap, "HControl = 0x%x, HStatus = 0x%x\n",
ioread32(hcr_base + HCONTROL), ioread32(hcr_base + HSTATUS));
}
@@ -673,7 +658,7 @@ static void sata_fsl_thaw(struct ata_port *ap)
/* ack. any pending IRQs for this controller/port */
temp = ioread32(hcr_base + HSTATUS);
- VPRINTK("xx_thaw, pending IRQs = 0x%x\n", (temp & 0x3F));
+ ata_port_dbg(ap, "pending IRQs = 0x%x\n", (temp & 0x3F));
if (temp & 0x3F)
iowrite32((temp & 0x3F), hcr_base + HSTATUS);
@@ -682,7 +667,7 @@ static void sata_fsl_thaw(struct ata_port *ap)
temp = ioread32(hcr_base + HCONTROL);
iowrite32((temp | DEFAULT_PORT_IRQ_ENABLE_MASK), hcr_base + HCONTROL);
- VPRINTK("xx_thaw : HControl = 0x%x, HStatus = 0x%x\n",
+ ata_port_dbg(ap, "HControl = 0x%x, HStatus = 0x%x\n",
ioread32(hcr_base + HCONTROL), ioread32(hcr_base + HSTATUS));
}
@@ -744,8 +729,9 @@ static int sata_fsl_port_start(struct ata_port *ap)
ap->private_data = pp;
- VPRINTK("CHBA = 0x%x, cmdentry_phys = 0x%x\n",
- pp->cmdslot_paddr, pp->cmdentry_paddr);
+ ata_port_dbg(ap, "CHBA = 0x%lx, cmdentry_phys = 0x%lx\n",
+ (unsigned long)pp->cmdslot_paddr,
+ (unsigned long)pp->cmdentry_paddr);
/* Now, update the CHBA register in host controller cmd register set */
iowrite32(pp->cmdslot_paddr & 0xffffffff, hcr_base + CHBA);
@@ -761,9 +747,9 @@ static int sata_fsl_port_start(struct ata_port *ap)
temp = ioread32(hcr_base + HCONTROL);
iowrite32((temp | HCONTROL_ONLINE_PHY_RST), hcr_base + HCONTROL);
- VPRINTK("HStatus = 0x%x\n", ioread32(hcr_base + HSTATUS));
- VPRINTK("HControl = 0x%x\n", ioread32(hcr_base + HCONTROL));
- VPRINTK("CHBA = 0x%x\n", ioread32(hcr_base + CHBA));
+ ata_port_dbg(ap, "HStatus = 0x%x\n", ioread32(hcr_base + HSTATUS));
+ ata_port_dbg(ap, "HControl = 0x%x\n", ioread32(hcr_base + HCONTROL));
+ ata_port_dbg(ap, "CHBA = 0x%x\n", ioread32(hcr_base + CHBA));
return 0;
}
@@ -803,16 +789,15 @@ static unsigned int sata_fsl_dev_classify(struct ata_port *ap)
temp = ioread32(hcr_base + SIGNATURE);
- VPRINTK("raw sig = 0x%x\n", temp);
- VPRINTK("HStatus = 0x%x\n", ioread32(hcr_base + HSTATUS));
- VPRINTK("HControl = 0x%x\n", ioread32(hcr_base + HCONTROL));
+ ata_port_dbg(ap, "HStatus = 0x%x\n", ioread32(hcr_base + HSTATUS));
+ ata_port_dbg(ap, "HControl = 0x%x\n", ioread32(hcr_base + HCONTROL));
tf.lbah = (temp >> 24) & 0xff;
tf.lbam = (temp >> 16) & 0xff;
tf.lbal = (temp >> 8) & 0xff;
tf.nsect = temp & 0xff;
- return ata_dev_classify(&tf);
+ return ata_port_classify(ap, &tf);
}
static int sata_fsl_hardreset(struct ata_link *link, unsigned int *class,
@@ -825,8 +810,6 @@ static int sata_fsl_hardreset(struct ata_link *link, unsigned int *class,
int i = 0;
unsigned long start_jiffies;
- DPRINTK("in xx_hardreset\n");
-
try_offline_again:
/*
* Force host controller to go off-line, aborting current operations
@@ -852,9 +835,10 @@ try_offline_again:
goto try_offline_again;
}
- DPRINTK("hardreset, controller off-lined\n");
- VPRINTK("HStatus = 0x%x\n", ioread32(hcr_base + HSTATUS));
- VPRINTK("HControl = 0x%x\n", ioread32(hcr_base + HCONTROL));
+ ata_port_dbg(ap, "hardreset, controller off-lined\n"
+ "HStatus = 0x%x HControl = 0x%x\n",
+ ioread32(hcr_base + HSTATUS),
+ ioread32(hcr_base + HCONTROL));
/*
* PHY reset should remain asserted for atleast 1ms
@@ -882,9 +866,10 @@ try_offline_again:
goto err;
}
- DPRINTK("hardreset, controller off-lined & on-lined\n");
- VPRINTK("HStatus = 0x%x\n", ioread32(hcr_base + HSTATUS));
- VPRINTK("HControl = 0x%x\n", ioread32(hcr_base + HCONTROL));
+ ata_port_dbg(ap, "controller off-lined & on-lined\n"
+ "HStatus = 0x%x HControl = 0x%x\n",
+ ioread32(hcr_base + HSTATUS),
+ ioread32(hcr_base + HCONTROL));
/*
* First, wait for the PHYRDY change to occur before waiting for
@@ -941,10 +926,7 @@ static int sata_fsl_softreset(struct ata_link *link, unsigned int *class,
u8 *cfis;
u32 Serror;
- DPRINTK("in xx_softreset\n");
-
if (ata_link_offline(link)) {
- DPRINTK("PHY reports no device\n");
*class = ATA_DEV_NONE;
return 0;
}
@@ -957,19 +939,17 @@ static int sata_fsl_softreset(struct ata_link *link, unsigned int *class,
* reached here, we can send a command to the target device
*/
- DPRINTK("Sending SRST/device reset\n");
-
ata_tf_init(link->device, &tf);
cfis = (u8 *) &pp->cmdentry->cfis;
/* device reset/SRST is a control register update FIS, uses tag0 */
- sata_fsl_setup_cmd_hdr_entry(pp, 0,
+ sata_fsl_setup_cmd_hdr_entry(ap, pp, 0,
SRST_CMD | CMD_DESC_RES | CMD_DESC_SNOOP_ENABLE, 0, 0, 5);
tf.ctl |= ATA_SRST; /* setup SRST bit in taskfile control reg */
ata_tf_to_fis(&tf, pmp, 0, cfis);
- DPRINTK("Dumping cfis : 0x%x, 0x%x, 0x%x, 0x%x\n",
+ ata_port_dbg(ap, "Dumping cfis : 0x%x, 0x%x, 0x%x, 0x%x\n",
cfis[0], cfis[1], cfis[2], cfis[3]);
/*
@@ -977,7 +957,7 @@ static int sata_fsl_softreset(struct ata_link *link, unsigned int *class,
* other commands are active on the controller/device
*/
- DPRINTK("@Softreset, CQ = 0x%x, CA = 0x%x, CC = 0x%x\n",
+ ata_port_dbg(ap, "CQ = 0x%x, CA = 0x%x, CC = 0x%x\n",
ioread32(CQ + hcr_base),
ioread32(CA + hcr_base), ioread32(CC + hcr_base));
@@ -990,15 +970,16 @@ static int sata_fsl_softreset(struct ata_link *link, unsigned int *class,
if (temp & 0x1) {
ata_port_warn(ap, "ATA_SRST issue failed\n");
- DPRINTK("Softreset@5000,CQ=0x%x,CA=0x%x,CC=0x%x\n",
+ ata_port_dbg(ap, "Softreset@5000,CQ=0x%x,CA=0x%x,CC=0x%x\n",
ioread32(CQ + hcr_base),
ioread32(CA + hcr_base), ioread32(CC + hcr_base));
sata_fsl_scr_read(&ap->link, SCR_ERROR, &Serror);
- DPRINTK("HStatus = 0x%x\n", ioread32(hcr_base + HSTATUS));
- DPRINTK("HControl = 0x%x\n", ioread32(hcr_base + HCONTROL));
- DPRINTK("Serror = 0x%x\n", Serror);
+ ata_port_dbg(ap, "HStatus = 0x%x HControl = 0x%x Serror = 0x%x\n",
+ ioread32(hcr_base + HSTATUS),
+ ioread32(hcr_base + HCONTROL),
+ Serror);
goto err;
}
@@ -1012,8 +993,9 @@ static int sata_fsl_softreset(struct ata_link *link, unsigned int *class,
* using ATA signature D2H register FIS to the host controller.
*/
- sata_fsl_setup_cmd_hdr_entry(pp, 0, CMD_DESC_RES | CMD_DESC_SNOOP_ENABLE,
- 0, 0, 5);
+ sata_fsl_setup_cmd_hdr_entry(ap, pp, 0,
+ CMD_DESC_RES | CMD_DESC_SNOOP_ENABLE,
+ 0, 0, 5);
tf.ctl &= ~ATA_SRST; /* 2nd H2D Ctl. register FIS */
ata_tf_to_fis(&tf, pmp, 0, cfis);
@@ -1030,8 +1012,6 @@ static int sata_fsl_softreset(struct ata_link *link, unsigned int *class,
*/
iowrite32(0x01, CC + hcr_base); /* We know it will be cmd#0 always */
- DPRINTK("SATA FSL : Now checking device signature\n");
-
*class = ATA_DEV_NONE;
/* Verify if SStatus indicates device presence */
@@ -1045,9 +1025,8 @@ static int sata_fsl_softreset(struct ata_link *link, unsigned int *class,
*class = sata_fsl_dev_classify(ap);
- DPRINTK("class = %d\n", *class);
- VPRINTK("ccreg = 0x%x\n", ioread32(hcr_base + CC));
- VPRINTK("cereg = 0x%x\n", ioread32(hcr_base + CE));
+ ata_port_dbg(ap, "ccreg = 0x%x\n", ioread32(hcr_base + CC));
+ ata_port_dbg(ap, "cereg = 0x%x\n", ioread32(hcr_base + CE));
}
return 0;
@@ -1058,10 +1037,7 @@ err:
static void sata_fsl_error_handler(struct ata_port *ap)
{
-
- DPRINTK("in xx_error_handler\n");
sata_pmp_error_handler(ap);
-
}
static void sata_fsl_post_internal_cmd(struct ata_queued_cmd *qc)
@@ -1102,7 +1078,7 @@ static void sata_fsl_error_intr(struct ata_port *ap)
if (unlikely(SError & 0xFFFF0000))
sata_fsl_scr_write(&ap->link, SCR_ERROR, SError);
- DPRINTK("error_intr,hStat=0x%x,CE=0x%x,DE =0x%x,SErr=0x%x\n",
+ ata_port_dbg(ap, "hStat=0x%x,CE=0x%x,DE =0x%x,SErr=0x%x\n",
hstatus, cereg, ioread32(hcr_base + DE), SError);
/* handle fatal errors */
@@ -1119,7 +1095,7 @@ static void sata_fsl_error_intr(struct ata_port *ap)
/* Handle PHYRDY change notification */
if (hstatus & INT_ON_PHYRDY_CHG) {
- DPRINTK("SATA FSL: PHYRDY change indication\n");
+ ata_port_dbg(ap, "PHYRDY change indication\n");
/* Setup a soft-reset EH action */
ata_ehi_hotplugged(ehi);
@@ -1140,7 +1116,7 @@ static void sata_fsl_error_intr(struct ata_port *ap)
*/
abort = 1;
- DPRINTK("single device error, CE=0x%x, DE=0x%x\n",
+ ata_port_dbg(ap, "single device error, CE=0x%x, DE=0x%x\n",
ioread32(hcr_base + CE), ioread32(hcr_base + DE));
/* find out the offending link and qc */
@@ -1245,18 +1221,18 @@ static void sata_fsl_host_intr(struct ata_port *ap)
}
if (unlikely(SError & 0xFFFF0000)) {
- DPRINTK("serror @host_intr : 0x%x\n", SError);
+ ata_port_dbg(ap, "serror @host_intr : 0x%x\n", SError);
sata_fsl_error_intr(ap);
}
if (unlikely(hstatus & status_mask)) {
- DPRINTK("error interrupt!!\n");
+ ata_port_dbg(ap, "error interrupt!!\n");
sata_fsl_error_intr(ap);
return;
}
- VPRINTK("Status of all queues :\n");
- VPRINTK("done_mask/CC = 0x%x, CA = 0x%x, CE=0x%x,CQ=0x%x,apqa=0x%llx\n",
+ ata_port_dbg(ap, "Status of all queues :\n");
+ ata_port_dbg(ap, "done_mask/CC = 0x%x, CA = 0x%x, CE=0x%x,CQ=0x%x,apqa=0x%llx\n",
done_mask,
ioread32(hcr_base + CA),
ioread32(hcr_base + CE),
@@ -1268,15 +1244,13 @@ static void sata_fsl_host_intr(struct ata_port *ap)
/* clear CC bit, this will also complete the interrupt */
iowrite32(done_mask, hcr_base + CC);
- DPRINTK("Status of all queues :\n");
- DPRINTK("done_mask/CC = 0x%x, CA = 0x%x, CE=0x%x\n",
+ ata_port_dbg(ap, "Status of all queues: done_mask/CC = 0x%x, CA = 0x%x, CE=0x%x\n",
done_mask, ioread32(hcr_base + CA),
ioread32(hcr_base + CE));
for (i = 0; i < SATA_FSL_QUEUE_DEPTH; i++) {
if (done_mask & (1 << i))
- DPRINTK
- ("completing ncq cmd,tag=%d,CC=0x%x,CA=0x%x\n",
+ ata_port_dbg(ap, "completing ncq cmd,tag=%d,CC=0x%x,CA=0x%x\n",
i, ioread32(hcr_base + CC),
ioread32(hcr_base + CA));
}
@@ -1287,7 +1261,7 @@ static void sata_fsl_host_intr(struct ata_port *ap)
iowrite32(1, hcr_base + CC);
qc = ata_qc_from_tag(ap, ATA_TAG_INTERNAL);
- DPRINTK("completing non-ncq cmd, CC=0x%x\n",
+ ata_port_dbg(ap, "completing non-ncq cmd, CC=0x%x\n",
ioread32(hcr_base + CC));
if (qc) {
@@ -1295,7 +1269,7 @@ static void sata_fsl_host_intr(struct ata_port *ap)
}
} else {
/* Spurious Interrupt!! */
- DPRINTK("spurious interrupt!!, CC = 0x%x\n",
+ ata_port_dbg(ap, "spurious interrupt!!, CC = 0x%x\n",
ioread32(hcr_base + CC));
iowrite32(done_mask, hcr_base + CC);
return;
@@ -1315,8 +1289,6 @@ static irqreturn_t sata_fsl_interrupt(int irq, void *dev_instance)
interrupt_enables = ioread32(hcr_base + HSTATUS);
interrupt_enables &= 0x3F;
- DPRINTK("interrupt status 0x%x\n", interrupt_enables);
-
if (!interrupt_enables)
return IRQ_NONE;
@@ -1369,7 +1341,7 @@ static int sata_fsl_init_controller(struct ata_host *host)
iowrite32((temp & ~0x3F), hcr_base + HCONTROL);
/* Disable interrupt coalescing control(icc), for the moment */
- DPRINTK("icc = 0x%x\n", ioread32(hcr_base + ICC));
+ dev_dbg(host->dev, "icc = 0x%x\n", ioread32(hcr_base + ICC));
iowrite32(0x01000000, hcr_base + ICC);
/* clear error registers, SError is cleared by libATA */
@@ -1388,8 +1360,8 @@ static int sata_fsl_init_controller(struct ata_host *host)
* callback, that should also initiate the OOB, COMINIT sequence
*/
- DPRINTK("HStatus = 0x%x\n", ioread32(hcr_base + HSTATUS));
- DPRINTK("HControl = 0x%x\n", ioread32(hcr_base + HCONTROL));
+ dev_dbg(host->dev, "HStatus = 0x%x HControl = 0x%x\n",
+ ioread32(hcr_base + HSTATUS), ioread32(hcr_base + HCONTROL));
return 0;
}
@@ -1406,8 +1378,7 @@ static void sata_fsl_host_stop(struct ata_host *host)
* scsi mid-layer and libata interface structures
*/
static struct scsi_host_template sata_fsl_sht = {
- ATA_NCQ_SHT("sata_fsl"),
- .can_queue = SATA_FSL_QUEUE_DEPTH,
+ ATA_NCQ_SHT_QD("sata_fsl", SATA_FSL_QUEUE_DEPTH),
.sg_tablesize = SATA_FSL_MAX_PRD_USABLE,
.dma_boundary = ATA_DMA_BOUNDARY,
};
@@ -1478,9 +1449,8 @@ static int sata_fsl_probe(struct platform_device *ofdev)
iowrite32(temp | TRANSCFG_RX_WATER_MARK, csr_base + TRANSCFG);
}
- DPRINTK("@reset i/o = 0x%x\n", ioread32(csr_base + TRANSCFG));
- DPRINTK("sizeof(cmd_desc) = %d\n", sizeof(struct command_desc));
- DPRINTK("sizeof(#define cmd_desc) = %d\n", SATA_FSL_CMD_DESC_SIZE);
+ dev_dbg(&ofdev->dev, "@reset i/o = 0x%x\n",
+ ioread32(csr_base + TRANSCFG));
host_priv = kzalloc(sizeof(struct sata_fsl_host_priv), GFP_KERNEL);
if (!host_priv)
diff --git a/drivers/ata/sata_gemini.c b/drivers/ata/sata_gemini.c
index f793564f3d78..440a63de20d0 100644
--- a/drivers/ata/sata_gemini.c
+++ b/drivers/ata/sata_gemini.c
@@ -253,12 +253,12 @@ static int gemini_sata_bridge_init(struct sata_gemini *sg)
ret = clk_prepare_enable(sg->sata0_pclk);
if (ret) {
- pr_err("failed to enable SATA0 PCLK\n");
+ dev_err(dev, "failed to enable SATA0 PCLK\n");
return ret;
}
ret = clk_prepare_enable(sg->sata1_pclk);
if (ret) {
- pr_err("failed to enable SATA1 PCLK\n");
+ dev_err(dev, "failed to enable SATA1 PCLK\n");
clk_disable_unprepare(sg->sata0_pclk);
return ret;
}
diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c
index e517bd8822a5..781901151d82 100644
--- a/drivers/ata/sata_inic162x.c
+++ b/drivers/ata/sata_inic162x.c
@@ -488,8 +488,6 @@ static enum ata_completion_errors inic_qc_prep(struct ata_queued_cmd *qc)
bool is_data = ata_is_data(qc->tf.protocol);
unsigned int cdb_len = 0;
- VPRINTK("ENTER\n");
-
if (is_atapi)
cdb_len = qc->dev->cdb_len;
@@ -657,7 +655,7 @@ static int inic_hardreset(struct ata_link *link, unsigned int *class,
}
inic_tf_read(ap, &tf);
- *class = ata_dev_classify(&tf);
+ *class = ata_port_classify(ap, &tf);
}
return 0;
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index cae4c1eab102..53446b997740 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -579,7 +579,7 @@ struct mv_hw_ops {
void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
void __iomem *mmio);
- int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
+ int (*reset_hc)(struct ata_host *host, void __iomem *mmio,
unsigned int n_hc);
void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
@@ -606,7 +606,7 @@ static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
void __iomem *mmio);
-static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
+static int mv5_reset_hc(struct ata_host *host, void __iomem *mmio,
unsigned int n_hc);
static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
@@ -616,14 +616,14 @@ static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
void __iomem *mmio);
-static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
+static int mv6_reset_hc(struct ata_host *host, void __iomem *mmio,
unsigned int n_hc);
static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
void __iomem *mmio);
static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
void __iomem *mmio);
-static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
+static int mv_soc_reset_hc(struct ata_host *host,
void __iomem *mmio, unsigned int n_hc);
static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
void __iomem *mmio);
@@ -1248,81 +1248,74 @@ static int mv_stop_edma(struct ata_port *ap)
return err;
}
-#ifdef ATA_DEBUG
-static void mv_dump_mem(void __iomem *start, unsigned bytes)
+static void mv_dump_mem(struct device *dev, void __iomem *start, unsigned bytes)
{
- int b, w;
+ int b, w, o;
+ unsigned char linebuf[38];
+
for (b = 0; b < bytes; ) {
- DPRINTK("%p: ", start + b);
- for (w = 0; b < bytes && w < 4; w++) {
- printk("%08x ", readl(start + b));
+ for (w = 0, o = 0; b < bytes && w < 4; w++) {
+ o += snprintf(linebuf + o, sizeof(linebuf) - o,
+ "%08x ", readl(start + b));
b += sizeof(u32);
}
- printk("\n");
+ dev_dbg(dev, "%s: %p: %s\n",
+ __func__, start + b, linebuf);
}
}
-#endif
-#if defined(ATA_DEBUG) || defined(CONFIG_PCI)
+
static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
{
-#ifdef ATA_DEBUG
- int b, w;
- u32 dw;
+ int b, w, o;
+ u32 dw = 0;
+ unsigned char linebuf[38];
+
for (b = 0; b < bytes; ) {
- DPRINTK("%02x: ", b);
- for (w = 0; b < bytes && w < 4; w++) {
+ for (w = 0, o = 0; b < bytes && w < 4; w++) {
(void) pci_read_config_dword(pdev, b, &dw);
- printk("%08x ", dw);
+ o += snprintf(linebuf + o, sizeof(linebuf) - o,
+ "%08x ", dw);
b += sizeof(u32);
}
- printk("\n");
+ dev_dbg(&pdev->dev, "%s: %02x: %s\n",
+ __func__, b, linebuf);
}
-#endif
}
-#endif
-static void mv_dump_all_regs(void __iomem *mmio_base, int port,
+
+static void mv_dump_all_regs(void __iomem *mmio_base,
struct pci_dev *pdev)
{
-#ifdef ATA_DEBUG
- void __iomem *hc_base = mv_hc_base(mmio_base,
- port >> MV_PORT_HC_SHIFT);
+ void __iomem *hc_base;
void __iomem *port_base;
int start_port, num_ports, p, start_hc, num_hcs, hc;
- if (0 > port) {
- start_hc = start_port = 0;
- num_ports = 8; /* shld be benign for 4 port devs */
- num_hcs = 2;
- } else {
- start_hc = port >> MV_PORT_HC_SHIFT;
- start_port = port;
- num_ports = num_hcs = 1;
- }
- DPRINTK("All registers for port(s) %u-%u:\n", start_port,
- num_ports > 1 ? num_ports - 1 : start_port);
+ start_hc = start_port = 0;
+ num_ports = 8; /* should be benign for 4 port devs */
+ num_hcs = 2;
+ dev_dbg(&pdev->dev,
+ "%s: All registers for port(s) %u-%u:\n", __func__,
+ start_port, num_ports > 1 ? num_ports - 1 : start_port);
- if (NULL != pdev) {
- DPRINTK("PCI config space regs:\n");
- mv_dump_pci_cfg(pdev, 0x68);
- }
- DPRINTK("PCI regs:\n");
- mv_dump_mem(mmio_base+0xc00, 0x3c);
- mv_dump_mem(mmio_base+0xd00, 0x34);
- mv_dump_mem(mmio_base+0xf00, 0x4);
- mv_dump_mem(mmio_base+0x1d00, 0x6c);
+ dev_dbg(&pdev->dev, "%s: PCI config space regs:\n", __func__);
+ mv_dump_pci_cfg(pdev, 0x68);
+
+ dev_dbg(&pdev->dev, "%s: PCI regs:\n", __func__);
+ mv_dump_mem(&pdev->dev, mmio_base+0xc00, 0x3c);
+ mv_dump_mem(&pdev->dev, mmio_base+0xd00, 0x34);
+ mv_dump_mem(&pdev->dev, mmio_base+0xf00, 0x4);
+ mv_dump_mem(&pdev->dev, mmio_base+0x1d00, 0x6c);
for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
hc_base = mv_hc_base(mmio_base, hc);
- DPRINTK("HC regs (HC %i):\n", hc);
- mv_dump_mem(hc_base, 0x1c);
+ dev_dbg(&pdev->dev, "%s: HC regs (HC %i):\n", __func__, hc);
+ mv_dump_mem(&pdev->dev, hc_base, 0x1c);
}
for (p = start_port; p < start_port + num_ports; p++) {
port_base = mv_port_base(mmio_base, p);
- DPRINTK("EDMA regs (port %i):\n", p);
- mv_dump_mem(port_base, 0x54);
- DPRINTK("SATA regs (port %i):\n", p);
- mv_dump_mem(port_base+0x300, 0x60);
+ dev_dbg(&pdev->dev, "%s: EDMA regs (port %i):\n", __func__, p);
+ mv_dump_mem(&pdev->dev, port_base, 0x54);
+ dev_dbg(&pdev->dev, "%s: SATA regs (port %i):\n", __func__, p);
+ mv_dump_mem(&pdev->dev, port_base+0x300, 0x60);
}
-#endif
}
static unsigned int mv_scr_offset(unsigned int sc_reg_in)
@@ -2962,8 +2955,8 @@ static int mv_pci_error(struct ata_host *host, void __iomem *mmio)
dev_err(host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n", err_cause);
- DPRINTK("All regs @ PCI error\n");
- mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
+ dev_dbg(host->dev, "%s: All regs @ PCI error\n", __func__);
+ mv_dump_all_regs(mmio, to_pci_dev(host->dev));
writelfl(0, mmio + hpriv->irq_cause_offset);
@@ -3201,9 +3194,10 @@ static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
}
#undef ZERO
-static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
+static int mv5_reset_hc(struct ata_host *host, void __iomem *mmio,
unsigned int n_hc)
{
+ struct mv_host_priv *hpriv = host->private_data;
unsigned int hc, port;
for (hc = 0; hc < n_hc; hc++) {
@@ -3262,7 +3256,7 @@ static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
* LOCKING:
* Inherited from caller.
*/
-static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
+static int mv6_reset_hc(struct ata_host *host, void __iomem *mmio,
unsigned int n_hc)
{
void __iomem *reg = mmio + PCI_MAIN_CMD_STS;
@@ -3282,7 +3276,7 @@ static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
break;
}
if (!(PCI_MASTER_EMPTY & t)) {
- printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
+ dev_err(host->dev, "PCI master won't flush\n");
rc = 1;
goto done;
}
@@ -3296,7 +3290,7 @@ static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
} while (!(GLOB_SFT_RST & t) && (i-- > 0));
if (!(GLOB_SFT_RST & t)) {
- printk(KERN_ERR DRV_NAME ": can't set global reset\n");
+ dev_err(host->dev, "can't set global reset\n");
rc = 1;
goto done;
}
@@ -3310,7 +3304,7 @@ static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
} while ((GLOB_SFT_RST & t) && (i-- > 0));
if (GLOB_SFT_RST & t) {
- printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
+ dev_err(host->dev, "can't clear global reset\n");
rc = 1;
}
done:
@@ -3479,9 +3473,10 @@ static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
#undef ZERO
-static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
+static int mv_soc_reset_hc(struct ata_host *host,
void __iomem *mmio, unsigned int n_hc)
{
+ struct mv_host_priv *hpriv = host->private_data;
unsigned int port;
for (port = 0; port < hpriv->n_ports; port++)
@@ -3723,11 +3718,6 @@ static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
/* unmask all non-transient EDMA error interrupts */
writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK);
-
- VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
- readl(port_mmio + EDMA_CFG),
- readl(port_mmio + EDMA_ERR_IRQ_CAUSE),
- readl(port_mmio + EDMA_ERR_IRQ_MASK));
}
static unsigned int mv_in_pcix_mode(struct ata_host *host)
@@ -3859,11 +3849,11 @@ static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
*
* Warn the user, lest they think we're just buggy.
*/
- printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
+ dev_warn(&pdev->dev, "Highpoint RocketRAID"
" BIOS CORRUPTS DATA on all attached drives,"
" regardless of if/how they are configured."
" BEWARE!\n");
- printk(KERN_WARNING DRV_NAME ": For data safety, do not"
+ dev_warn(&pdev->dev, "For data safety, do not"
" use sectors 8-9 on \"Legacy\" drives,"
" and avoid the final two gigabytes on"
" all RocketRAID BIOS initialized drives.\n");
@@ -3954,7 +3944,7 @@ static int mv_init_host(struct ata_host *host)
if (hpriv->ops->read_preamp)
hpriv->ops->read_preamp(hpriv, port, mmio);
- rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
+ rc = hpriv->ops->reset_hc(host, mmio, n_hc);
if (rc)
goto done;
@@ -3972,7 +3962,7 @@ static int mv_init_host(struct ata_host *host)
for (hc = 0; hc < n_hc; hc++) {
void __iomem *hc_mmio = mv_hc_base(mmio, hc);
- VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
+ dev_dbg(host->dev, "HC%i: HC config=0x%08x HC IRQ cause "
"(before clear)=0x%08x\n", hc,
readl(hc_mmio + HC_CFG),
readl(hc_mmio + HC_IRQ_CAUSE));
@@ -4270,7 +4260,7 @@ static int mv_platform_resume(struct platform_device *pdev)
/* initialize adapter */
ret = mv_init_host(host);
if (ret) {
- printk(KERN_ERR DRV_NAME ": Error during HW init\n");
+ dev_err(&pdev->dev, "Error during HW init\n");
return ret;
}
ata_host_resume(host);
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index 16272c111208..7f14d0d31057 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -31,6 +31,7 @@
#include <scsi/scsi_host.h>
#include <scsi/scsi_device.h>
#include <linux/libata.h>
+#include <trace/events/libata.h>
#define DRV_NAME "sata_nv"
#define DRV_VERSION "3.5"
@@ -808,7 +809,7 @@ static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
struct nv_adma_port_priv *pp = ap->private_data;
u8 flags = pp->cpb[cpb_num].resp_flags;
- VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags);
+ ata_port_dbg(ap, "CPB %d, flags=0x%x\n", cpb_num, flags);
if (unlikely((force_err ||
flags & (NV_CPB_RESP_ATA_ERR |
@@ -1100,8 +1101,6 @@ static int nv_adma_port_start(struct ata_port *ap)
struct pci_dev *pdev = to_pci_dev(dev);
u16 tmp;
- VPRINTK("ENTER\n");
-
/*
* Ensure DMA mask is set to 32-bit before allocating legacy PRD and
* pad buffers.
@@ -1190,7 +1189,6 @@ static void nv_adma_port_stop(struct ata_port *ap)
struct nv_adma_port_priv *pp = ap->private_data;
void __iomem *mmio = pp->ctl_block;
- VPRINTK("ENTER\n");
writew(0, mmio + NV_ADMA_CTL);
}
@@ -1252,8 +1250,6 @@ static void nv_adma_setup_port(struct ata_port *ap)
void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
struct ata_ioports *ioport = &ap->ioaddr;
- VPRINTK("ENTER\n");
-
mmio += NV_ADMA_PORT + ap->port_no * NV_ADMA_PORT_SIZE;
ioport->cmd_addr = mmio;
@@ -1277,8 +1273,6 @@ static int nv_adma_host_init(struct ata_host *host)
unsigned int i;
u32 tmp32;
- VPRINTK("ENTER\n");
-
/* enable ADMA on the ports */
pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
@@ -1320,8 +1314,6 @@ static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
struct scatterlist *sg;
unsigned int si;
- VPRINTK("ENTER\n");
-
for_each_sg(qc->sg, sg, qc->n_elem, si) {
aprd = (si < 5) ? &cpb->aprd[si] :
&pp->aprd[NV_ADMA_SGTBL_LEN * qc->hw_tag + (si-5)];
@@ -1378,8 +1370,6 @@ static enum ata_completion_errors nv_adma_qc_prep(struct ata_queued_cmd *qc)
if (qc->tf.protocol == ATA_PROT_NCQ)
ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
- VPRINTK("qc->flags = 0x%lx\n", qc->flags);
-
nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
if (qc->flags & ATA_QCFLAG_DMAMAP) {
@@ -1404,8 +1394,6 @@ static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
void __iomem *mmio = pp->ctl_block;
int curr_ncq = (qc->tf.protocol == ATA_PROT_NCQ);
- VPRINTK("ENTER\n");
-
/* We can't handle result taskfile with NCQ commands, since
retrieving the taskfile switches us out of ADMA mode and would abort
existing commands. */
@@ -1417,7 +1405,6 @@ static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
if (nv_adma_use_reg_mode(qc)) {
/* use ATA register mode */
- VPRINTK("using ATA register mode: 0x%lx\n", qc->flags);
BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
(qc->flags & ATA_QCFLAG_DMAMAP));
nv_adma_register_mode(qc->ap);
@@ -1438,8 +1425,6 @@ static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
writew(qc->hw_tag, mmio + NV_ADMA_APPEND);
- DPRINTK("Issued tag %u\n", qc->hw_tag);
-
return 0;
}
@@ -1871,12 +1856,12 @@ static void nv_swncq_host_init(struct ata_host *host)
/* enable swncq */
tmp = readl(mmio + NV_CTL_MCP55);
- VPRINTK("HOST_CTL:0x%X\n", tmp);
+ dev_dbg(&pdev->dev, "HOST_CTL:0x%X\n", tmp);
writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
/* enable irq intr */
tmp = readl(mmio + NV_INT_ENABLE_MCP55);
- VPRINTK("HOST_ENABLE:0x%X\n", tmp);
+ dev_dbg(&pdev->dev, "HOST_ENABLE:0x%X\n", tmp);
writel(tmp | 0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
/* clear port irq */
@@ -2017,19 +2002,17 @@ static unsigned int nv_swncq_issue_atacmd(struct ata_port *ap,
if (qc == NULL)
return 0;
- DPRINTK("Enter\n");
-
writel((1 << qc->hw_tag), pp->sactive_block);
pp->last_issue_tag = qc->hw_tag;
pp->dhfis_bits &= ~(1 << qc->hw_tag);
pp->dmafis_bits &= ~(1 << qc->hw_tag);
pp->qc_active |= (0x1 << qc->hw_tag);
+ trace_ata_tf_load(ap, &qc->tf);
ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */
+ trace_ata_exec_command(ap, &qc->tf, qc->hw_tag);
ap->ops->sff_exec_command(ap, &qc->tf);
- DPRINTK("Issued tag %u\n", qc->hw_tag);
-
return 0;
}
@@ -2041,8 +2024,6 @@ static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc)
if (qc->tf.protocol != ATA_PROT_NCQ)
return ata_bmdma_qc_issue(qc);
- DPRINTK("Enter\n");
-
if (!pp->qc_active)
nv_swncq_issue_atacmd(ap, qc);
else
@@ -2087,6 +2068,7 @@ static int nv_swncq_sdbfis(struct ata_port *ap)
u8 lack_dhfis = 0;
host_stat = ap->ops->bmdma_status(ap);
+ trace_ata_bmdma_status(ap, host_stat);
if (unlikely(host_stat & ATA_DMA_ERR)) {
/* error when transferring data to/from memory */
ata_ehi_clear_desc(ehi);
@@ -2109,7 +2091,7 @@ static int nv_swncq_sdbfis(struct ata_port *ap)
ata_qc_complete_multiple(ap, ata_qc_get_active(ap) ^ done_mask);
if (!ap->qc_active) {
- DPRINTK("over\n");
+ ata_port_dbg(ap, "over\n");
nv_swncq_pp_reinit(ap);
return 0;
}
@@ -2124,12 +2106,12 @@ static int nv_swncq_sdbfis(struct ata_port *ap)
*/
lack_dhfis = 1;
- DPRINTK("id 0x%x QC: qc_active 0x%llx,"
- "SWNCQ:qc_active 0x%X defer_bits %X "
- "dhfis 0x%X dmafis 0x%X last_issue_tag %x\n",
- ap->print_id, ap->qc_active, pp->qc_active,
- pp->defer_queue.defer_bits, pp->dhfis_bits,
- pp->dmafis_bits, pp->last_issue_tag);
+ ata_port_dbg(ap, "QC: qc_active 0x%llx,"
+ "SWNCQ:qc_active 0x%X defer_bits %X "
+ "dhfis 0x%X dmafis 0x%X last_issue_tag %x\n",
+ ap->qc_active, pp->qc_active,
+ pp->defer_queue.defer_bits, pp->dhfis_bits,
+ pp->dmafis_bits, pp->last_issue_tag);
nv_swncq_fis_reinit(ap);
@@ -2169,7 +2151,7 @@ static void nv_swncq_dmafis(struct ata_port *ap)
__ata_bmdma_stop(ap);
tag = nv_swncq_tag(ap);
- DPRINTK("dma setup tag 0x%x\n", tag);
+ ata_port_dbg(ap, "dma setup tag 0x%x\n", tag);
qc = ata_qc_from_tag(ap, tag);
if (unlikely(!qc))
@@ -2237,9 +2219,9 @@ static void nv_swncq_host_interrupt(struct ata_port *ap, u16 fis)
if (fis & NV_SWNCQ_IRQ_SDBFIS) {
pp->ncq_flags |= ncq_saw_sdb;
- DPRINTK("id 0x%x SWNCQ: qc_active 0x%X "
+ ata_port_dbg(ap, "SWNCQ: qc_active 0x%X "
"dhfis 0x%X dmafis 0x%X sactive 0x%X\n",
- ap->print_id, pp->qc_active, pp->dhfis_bits,
+ pp->qc_active, pp->dhfis_bits,
pp->dmafis_bits, readl(pp->sactive_block));
if (nv_swncq_sdbfis(ap) < 0)
goto irq_error;
@@ -2265,7 +2247,7 @@ static void nv_swncq_host_interrupt(struct ata_port *ap, u16 fis)
goto irq_exit;
if (pp->defer_queue.defer_bits) {
- DPRINTK("send next command\n");
+ ata_port_dbg(ap, "send next command\n");
qc = nv_swncq_qc_from_dq(ap);
nv_swncq_issue_atacmd(ap, qc);
}
diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
index 7815da8ef9e5..b8465fef2ed2 100644
--- a/drivers/ata/sata_promise.c
+++ b/drivers/ata/sata_promise.c
@@ -596,7 +596,8 @@ static void pdc_fill_sg(struct ata_queued_cmd *qc)
prd[idx].addr = cpu_to_le32(addr);
prd[idx].flags_len = cpu_to_le32(len & 0xffff);
- VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
+ ata_port_dbg(ap, "PRD[%u] = (0x%X, 0x%X)\n",
+ idx, addr, len);
idx++;
sg_len -= len;
@@ -609,17 +610,16 @@ static void pdc_fill_sg(struct ata_queued_cmd *qc)
if (len > SG_COUNT_ASIC_BUG) {
u32 addr;
- VPRINTK("Splitting last PRD.\n");
-
addr = le32_to_cpu(prd[idx - 1].addr);
prd[idx - 1].flags_len = cpu_to_le32(len - SG_COUNT_ASIC_BUG);
- VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx - 1, addr, SG_COUNT_ASIC_BUG);
+ ata_port_dbg(ap, "PRD[%u] = (0x%X, 0x%X)\n",
+ idx - 1, addr, SG_COUNT_ASIC_BUG);
addr = addr + len - SG_COUNT_ASIC_BUG;
len = SG_COUNT_ASIC_BUG;
prd[idx].addr = cpu_to_le32(addr);
prd[idx].flags_len = cpu_to_le32(len);
- VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
+ ata_port_dbg(ap, "PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
idx++;
}
@@ -632,8 +632,6 @@ static enum ata_completion_errors pdc_qc_prep(struct ata_queued_cmd *qc)
struct pdc_port_priv *pp = qc->ap->private_data;
unsigned int i;
- VPRINTK("ENTER\n");
-
switch (qc->tf.protocol) {
case ATA_PROT_DMA:
pdc_fill_sg(qc);
@@ -922,12 +920,8 @@ static irqreturn_t pdc_interrupt(int irq, void *dev_instance)
u32 hotplug_status;
int is_sataii_tx4;
- VPRINTK("ENTER\n");
-
- if (!host || !host->iomap[PDC_MMIO_BAR]) {
- VPRINTK("QUICK EXIT\n");
+ if (!host || !host->iomap[PDC_MMIO_BAR])
return IRQ_NONE;
- }
host_mmio = host->iomap[PDC_MMIO_BAR];
@@ -946,23 +940,18 @@ static irqreturn_t pdc_interrupt(int irq, void *dev_instance)
/* reading should also clear interrupts */
mask = readl(host_mmio + PDC_INT_SEQMASK);
- if (mask == 0xffffffff && hotplug_status == 0) {
- VPRINTK("QUICK EXIT 2\n");
+ if (mask == 0xffffffff && hotplug_status == 0)
goto done_irq;
- }
mask &= 0xffff; /* only 16 SEQIDs possible */
- if (mask == 0 && hotplug_status == 0) {
- VPRINTK("QUICK EXIT 3\n");
+ if (mask == 0 && hotplug_status == 0)
goto done_irq;
- }
writel(mask, host_mmio + PDC_INT_SEQMASK);
is_sataii_tx4 = pdc_is_sataii_tx4(host->ports[0]->flags);
for (i = 0; i < host->n_ports; i++) {
- VPRINTK("port %u\n", i);
ap = host->ports[i];
/* check for a plug or unplug event */
@@ -989,8 +978,6 @@ static irqreturn_t pdc_interrupt(int irq, void *dev_instance)
}
}
- VPRINTK("EXIT\n");
-
done_irq:
spin_unlock(&host->lock);
return IRQ_RETVAL(handled);
@@ -1005,8 +992,6 @@ static void pdc_packet_start(struct ata_queued_cmd *qc)
unsigned int port_no = ap->port_no;
u8 seq = (u8) (port_no + 1);
- VPRINTK("ENTER, ap %p\n", ap);
-
writel(0x00000001, host_mmio + (seq * 4));
readl(host_mmio + (seq * 4)); /* flush */
diff --git a/drivers/ata/sata_qstor.c b/drivers/ata/sata_qstor.c
index ef00ab644afb..8ca0810aad26 100644
--- a/drivers/ata/sata_qstor.c
+++ b/drivers/ata/sata_qstor.c
@@ -252,9 +252,6 @@ static unsigned int qs_fill_sg(struct ata_queued_cmd *qc)
len = sg_dma_len(sg);
*(__le32 *)prd = cpu_to_le32(len);
prd += sizeof(u64);
-
- VPRINTK("PRD[%u] = (0x%llX, 0x%X)\n", si,
- (unsigned long long)addr, len);
}
return si;
@@ -268,8 +265,6 @@ static enum ata_completion_errors qs_qc_prep(struct ata_queued_cmd *qc)
u64 addr;
unsigned int nelem;
- VPRINTK("ENTER\n");
-
qs_enter_reg_mode(qc->ap);
if (qc->tf.protocol != ATA_PROT_DMA)
return AC_ERR_OK;
@@ -304,8 +299,6 @@ static inline void qs_packet_start(struct ata_queued_cmd *qc)
struct ata_port *ap = qc->ap;
u8 __iomem *chan = qs_mmio_base(ap->host) + (ap->port_no * 0x4000);
- VPRINTK("ENTER, ap %p\n", ap);
-
writeb(QS_CTR0_CLER, chan + QS_CCT_CTR0);
wmb(); /* flush PRDs and pkt to memory */
writel(QS_CCF_RUN_PKT, chan + QS_CCT_CFF);
@@ -374,8 +367,8 @@ static inline unsigned int qs_intr_pkt(struct ata_host *host)
struct qs_port_priv *pp = ap->private_data;
struct ata_queued_cmd *qc;
- DPRINTK("SFF=%08x%08x: sCHAN=%u sHST=%d sDST=%02x\n",
- sff1, sff0, port_no, sHST, sDST);
+ dev_dbg(host->dev, "SFF=%08x%08x: sHST=%d sDST=%02x\n",
+ sff1, sff0, sHST, sDST);
handled = 1;
if (!pp || pp->state != qs_state_pkt)
continue;
@@ -435,14 +428,10 @@ static irqreturn_t qs_intr(int irq, void *dev_instance)
unsigned int handled = 0;
unsigned long flags;
- VPRINTK("ENTER\n");
-
spin_lock_irqsave(&host->lock, flags);
handled = qs_intr_pkt(host) | qs_intr_mmio(host);
spin_unlock_irqrestore(&host->lock, flags);
- VPRINTK("EXIT\n");
-
return IRQ_RETVAL(handled);
}
diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c
index 44b0ed8f6bb8..3d96b6faa3f0 100644
--- a/drivers/ata/sata_rcar.c
+++ b/drivers/ata/sata_rcar.c
@@ -323,8 +323,6 @@ static int sata_rcar_bus_softreset(struct ata_port *ap, unsigned long deadline)
{
struct ata_ioports *ioaddr = &ap->ioaddr;
- DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
-
/* software reset. causes dev0 to be selected */
iowrite32(ap->ctl, ioaddr->ctl_addr);
udelay(20);
@@ -350,7 +348,6 @@ static int sata_rcar_softreset(struct ata_link *link, unsigned int *classes,
devmask |= 1 << 0;
/* issue bus reset */
- DPRINTK("about to softreset, devmask=%x\n", devmask);
rc = sata_rcar_bus_softreset(ap, deadline);
/* if link is occupied, -ENODEV too is an error */
if (rc && (rc != -ENODEV || sata_scr_valid(link))) {
@@ -361,7 +358,6 @@ static int sata_rcar_softreset(struct ata_link *link, unsigned int *classes,
/* determine by signature whether we have ATA or ATAPI devices */
classes[0] = ata_sff_dev_classify(&link->device[0], devmask, &err);
- DPRINTK("classes[0]=%u\n", classes[0]);
return 0;
}
@@ -383,12 +379,6 @@ static void sata_rcar_tf_load(struct ata_port *ap,
iowrite32(tf->hob_lbal, ioaddr->lbal_addr);
iowrite32(tf->hob_lbam, ioaddr->lbam_addr);
iowrite32(tf->hob_lbah, ioaddr->lbah_addr);
- VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
- tf->hob_feature,
- tf->hob_nsect,
- tf->hob_lbal,
- tf->hob_lbam,
- tf->hob_lbah);
}
if (is_addr) {
@@ -397,18 +387,10 @@ static void sata_rcar_tf_load(struct ata_port *ap,
iowrite32(tf->lbal, ioaddr->lbal_addr);
iowrite32(tf->lbam, ioaddr->lbam_addr);
iowrite32(tf->lbah, ioaddr->lbah_addr);
- VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
- tf->feature,
- tf->nsect,
- tf->lbal,
- tf->lbam,
- tf->lbah);
}
- if (tf->flags & ATA_TFLAG_DEVICE) {
+ if (tf->flags & ATA_TFLAG_DEVICE)
iowrite32(tf->device, ioaddr->device_addr);
- VPRINTK("device 0x%X\n", tf->device);
- }
ata_wait_idle(ap);
}
@@ -440,8 +422,6 @@ static void sata_rcar_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
static void sata_rcar_exec_command(struct ata_port *ap,
const struct ata_taskfile *tf)
{
- DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command);
-
iowrite32(tf->command, ap->ioaddr.command_addr);
ata_sff_pause(ap);
}
@@ -499,7 +479,6 @@ static void sata_rcar_drain_fifo(struct ata_queued_cmd *qc)
count < 65536; count += 2)
ioread32(ap->ioaddr.data_addr);
- /* Can become DEBUG later */
if (count)
ata_port_dbg(ap, "drained %d bytes to clear DRQ\n", count);
}
@@ -543,7 +522,6 @@ static void sata_rcar_bmdma_fill_sg(struct ata_queued_cmd *qc)
prd[si].addr = cpu_to_le32(addr);
prd[si].flags_len = cpu_to_le32(sg_len);
- VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", si, addr, sg_len);
}
/* end-of-table flag */
@@ -685,7 +663,7 @@ static void sata_rcar_serr_interrupt(struct ata_port *ap)
if (!serror)
return;
- DPRINTK("SError @host_intr: 0x%x\n", serror);
+ ata_port_dbg(ap, "SError @host_intr: 0x%x\n", serror);
/* first, analyze and record host port events */
ata_ehi_clear_desc(ehi);
diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
index 75321f1ceba5..3b989a52879d 100644
--- a/drivers/ata/sata_sil.c
+++ b/drivers/ata/sata_sil.c
@@ -307,7 +307,6 @@ static void sil_fill_sg(struct ata_queued_cmd *qc)
prd->addr = cpu_to_le32(addr);
prd->flags_len = cpu_to_le32(sg_len);
- VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", si, addr, sg_len);
last_prd = prd;
prd++;
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
index f99ec6f7d7c0..2fef6ce93f07 100644
--- a/drivers/ata/sata_sil24.c
+++ b/drivers/ata/sata_sil24.c
@@ -656,8 +656,6 @@ static int sil24_softreset(struct ata_link *link, unsigned int *class,
const char *reason;
int rc;
- DPRINTK("ENTER\n");
-
/* put the port into known state */
if (sil24_init_port(ap)) {
reason = "port not ready";
@@ -680,9 +678,8 @@ static int sil24_softreset(struct ata_link *link, unsigned int *class,
}
sil24_read_tf(ap, 0, &tf);
- *class = ata_dev_classify(&tf);
+ *class = ata_port_classify(ap, &tf);
- DPRINTK("EXIT, class=%u\n", *class);
return 0;
err:
diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c
index 4c01190a5e37..6ceec59cb291 100644
--- a/drivers/ata/sata_sx4.c
+++ b/drivers/ata/sata_sx4.c
@@ -78,6 +78,9 @@
#define DRV_NAME "sata_sx4"
#define DRV_VERSION "0.12"
+static int dimm_test;
+module_param(dimm_test, int, 0644);
+MODULE_PARM_DESC(dimm_test, "Enable DIMM test during startup (1 = enabled)");
enum {
PDC_MMIO_BAR = 3,
@@ -211,10 +214,8 @@ static unsigned int pdc20621_i2c_read(struct ata_host *host,
u32 device, u32 subaddr, u32 *pdata);
static int pdc20621_prog_dimm0(struct ata_host *host);
static unsigned int pdc20621_prog_dimm_global(struct ata_host *host);
-#ifdef ATA_VERBOSE_DEBUG
static void pdc20621_get_from_dimm(struct ata_host *host,
void *psource, u32 offset, u32 size);
-#endif
static void pdc20621_put_to_dimm(struct ata_host *host,
void *psource, u32 offset, u32 size);
static void pdc20621_irq_clear(struct ata_port *ap);
@@ -308,15 +309,9 @@ static inline void pdc20621_ata_sg(u8 *buf, unsigned int portno,
/* output ATA packet S/G table */
addr = PDC_20621_DIMM_BASE + PDC_20621_DIMM_DATA +
(PDC_DIMM_DATA_STEP * portno);
- VPRINTK("ATA sg addr 0x%x, %d\n", addr, addr);
+
buf32[dw] = cpu_to_le32(addr);
buf32[dw + 1] = cpu_to_le32(total_len | ATA_PRD_EOT);
-
- VPRINTK("ATA PSG @ %x == (0x%x, 0x%x)\n",
- PDC_20621_DIMM_BASE +
- (PDC_DIMM_WINDOW_STEP * portno) +
- PDC_DIMM_APKT_PRD,
- buf32[dw], buf32[dw + 1]);
}
static inline void pdc20621_host_sg(u8 *buf, unsigned int portno,
@@ -332,12 +327,6 @@ static inline void pdc20621_host_sg(u8 *buf, unsigned int portno,
buf32[dw] = cpu_to_le32(addr);
buf32[dw + 1] = cpu_to_le32(total_len | ATA_PRD_EOT);
-
- VPRINTK("HOST PSG @ %x == (0x%x, 0x%x)\n",
- PDC_20621_DIMM_BASE +
- (PDC_DIMM_WINDOW_STEP * portno) +
- PDC_DIMM_HPKT_PRD,
- buf32[dw], buf32[dw + 1]);
}
static inline unsigned int pdc20621_ata_pkt(struct ata_taskfile *tf,
@@ -351,7 +340,6 @@ static inline unsigned int pdc20621_ata_pkt(struct ata_taskfile *tf,
unsigned int dimm_sg = PDC_20621_DIMM_BASE +
(PDC_DIMM_WINDOW_STEP * portno) +
PDC_DIMM_APKT_PRD;
- VPRINTK("ENTER, dimm_sg == 0x%x, %d\n", dimm_sg, dimm_sg);
i = PDC_DIMM_ATA_PKT;
@@ -406,8 +394,6 @@ static inline void pdc20621_host_pkt(struct ata_taskfile *tf, u8 *buf,
unsigned int dimm_sg = PDC_20621_DIMM_BASE +
(PDC_DIMM_WINDOW_STEP * portno) +
PDC_DIMM_HPKT_PRD;
- VPRINTK("ENTER, dimm_sg == 0x%x, %d\n", dimm_sg, dimm_sg);
- VPRINTK("host_sg == 0x%x, %d\n", host_sg, host_sg);
dw = PDC_DIMM_HOST_PKT >> 2;
@@ -424,14 +410,6 @@ static inline void pdc20621_host_pkt(struct ata_taskfile *tf, u8 *buf,
buf32[dw + 1] = cpu_to_le32(host_sg);
buf32[dw + 2] = cpu_to_le32(dimm_sg);
buf32[dw + 3] = 0;
-
- VPRINTK("HOST PKT @ %x == (0x%x 0x%x 0x%x 0x%x)\n",
- PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * portno) +
- PDC_DIMM_HOST_PKT,
- buf32[dw + 0],
- buf32[dw + 1],
- buf32[dw + 2],
- buf32[dw + 3]);
}
static void pdc20621_dma_prep(struct ata_queued_cmd *qc)
@@ -447,8 +425,6 @@ static void pdc20621_dma_prep(struct ata_queued_cmd *qc)
WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
- VPRINTK("ata%u: ENTER\n", ap->print_id);
-
/* hard-code chip #0 */
mmio += PDC_CHIP0_OFS;
@@ -492,7 +468,8 @@ static void pdc20621_dma_prep(struct ata_queued_cmd *qc)
readl(dimm_mmio); /* MMIO PCI posting flush */
- VPRINTK("ata pkt buf ofs %u, prd size %u, mmio copied\n", i, sgt_len);
+ ata_port_dbg(ap, "ata pkt buf ofs %u, prd size %u, mmio copied\n",
+ i, sgt_len);
}
static void pdc20621_nodata_prep(struct ata_queued_cmd *qc)
@@ -504,8 +481,6 @@ static void pdc20621_nodata_prep(struct ata_queued_cmd *qc)
unsigned int portno = ap->port_no;
unsigned int i;
- VPRINTK("ata%u: ENTER\n", ap->print_id);
-
/* hard-code chip #0 */
mmio += PDC_CHIP0_OFS;
@@ -527,7 +502,7 @@ static void pdc20621_nodata_prep(struct ata_queued_cmd *qc)
readl(dimm_mmio); /* MMIO PCI posting flush */
- VPRINTK("ata pkt buf ofs %u, mmio copied\n", i);
+ ata_port_dbg(ap, "ata pkt buf ofs %u, mmio copied\n", i);
}
static enum ata_completion_errors pdc20621_qc_prep(struct ata_queued_cmd *qc)
@@ -601,7 +576,6 @@ static void pdc20621_pop_hdma(struct ata_queued_cmd *qc)
pp->hdma_cons++;
}
-#ifdef ATA_VERBOSE_DEBUG
static void pdc20621_dump_hdma(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
@@ -611,14 +585,10 @@ static void pdc20621_dump_hdma(struct ata_queued_cmd *qc)
dimm_mmio += (port_no * PDC_DIMM_WINDOW_STEP);
dimm_mmio += PDC_DIMM_HOST_PKT;
- printk(KERN_ERR "HDMA[0] == 0x%08X\n", readl(dimm_mmio));
- printk(KERN_ERR "HDMA[1] == 0x%08X\n", readl(dimm_mmio + 4));
- printk(KERN_ERR "HDMA[2] == 0x%08X\n", readl(dimm_mmio + 8));
- printk(KERN_ERR "HDMA[3] == 0x%08X\n", readl(dimm_mmio + 12));
+ ata_port_dbg(ap, "HDMA 0x%08X 0x%08X 0x%08X 0x%08X\n",
+ readl(dimm_mmio), readl(dimm_mmio + 4),
+ readl(dimm_mmio + 8), readl(dimm_mmio + 12));
}
-#else
-static inline void pdc20621_dump_hdma(struct ata_queued_cmd *qc) { }
-#endif /* ATA_VERBOSE_DEBUG */
static void pdc20621_packet_start(struct ata_queued_cmd *qc)
{
@@ -633,8 +603,6 @@ static void pdc20621_packet_start(struct ata_queued_cmd *qc)
/* hard-code chip #0 */
mmio += PDC_CHIP0_OFS;
- VPRINTK("ata%u: ENTER\n", ap->print_id);
-
wmb(); /* flush PRD, pkt writes */
port_ofs = PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * port_no);
@@ -645,7 +613,7 @@ static void pdc20621_packet_start(struct ata_queued_cmd *qc)
pdc20621_dump_hdma(qc);
pdc20621_push_hdma(qc, seq, port_ofs + PDC_DIMM_HOST_PKT);
- VPRINTK("queued ofs 0x%x (%u), seq %u\n",
+ ata_port_dbg(ap, "queued ofs 0x%x (%u), seq %u\n",
port_ofs + PDC_DIMM_HOST_PKT,
port_ofs + PDC_DIMM_HOST_PKT,
seq);
@@ -656,7 +624,7 @@ static void pdc20621_packet_start(struct ata_queued_cmd *qc)
writel(port_ofs + PDC_DIMM_ATA_PKT,
ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
readl(ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
- VPRINTK("submitted ofs 0x%x (%u), seq %u\n",
+ ata_port_dbg(ap, "submitted ofs 0x%x (%u), seq %u\n",
port_ofs + PDC_DIMM_ATA_PKT,
port_ofs + PDC_DIMM_ATA_PKT,
seq);
@@ -696,14 +664,12 @@ static inline unsigned int pdc20621_host_intr(struct ata_port *ap,
u8 status;
unsigned int handled = 0;
- VPRINTK("ENTER\n");
-
if ((qc->tf.protocol == ATA_PROT_DMA) && /* read */
(!(qc->tf.flags & ATA_TFLAG_WRITE))) {
/* step two - DMA from DIMM to host */
if (doing_hdma) {
- VPRINTK("ata%u: read hdma, 0x%x 0x%x\n", ap->print_id,
+ ata_port_dbg(ap, "read hdma, 0x%x 0x%x\n",
readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
/* get drive status; clear intr; complete txn */
qc->err_mask |= ac_err_mask(ata_wait_idle(ap));
@@ -714,7 +680,7 @@ static inline unsigned int pdc20621_host_intr(struct ata_port *ap,
/* step one - exec ATA command */
else {
u8 seq = (u8) (port_no + 1 + 4);
- VPRINTK("ata%u: read ata, 0x%x 0x%x\n", ap->print_id,
+ ata_port_dbg(ap, "read ata, 0x%x 0x%x\n",
readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
/* submit hdma pkt */
@@ -729,7 +695,7 @@ static inline unsigned int pdc20621_host_intr(struct ata_port *ap,
/* step one - DMA from host to DIMM */
if (doing_hdma) {
u8 seq = (u8) (port_no + 1);
- VPRINTK("ata%u: write hdma, 0x%x 0x%x\n", ap->print_id,
+ ata_port_dbg(ap, "write hdma, 0x%x 0x%x\n",
readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
/* submit ata pkt */
@@ -742,7 +708,7 @@ static inline unsigned int pdc20621_host_intr(struct ata_port *ap,
/* step two - execute ATA command */
else {
- VPRINTK("ata%u: write ata, 0x%x 0x%x\n", ap->print_id,
+ ata_port_dbg(ap, "write ata, 0x%x 0x%x\n",
readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT));
/* get drive status; clear intr; complete txn */
qc->err_mask |= ac_err_mask(ata_wait_idle(ap));
@@ -755,7 +721,7 @@ static inline unsigned int pdc20621_host_intr(struct ata_port *ap,
} else if (qc->tf.protocol == ATA_PROT_NODATA) {
status = ata_sff_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
- DPRINTK("BUS_NODATA (drv_stat 0x%X)\n", status);
+ ata_port_dbg(ap, "BUS_NODATA (drv_stat 0x%X)\n", status);
qc->err_mask |= ac_err_mask(status);
ata_qc_complete(qc);
handled = 1;
@@ -781,29 +747,21 @@ static irqreturn_t pdc20621_interrupt(int irq, void *dev_instance)
unsigned int handled = 0;
void __iomem *mmio_base;
- VPRINTK("ENTER\n");
-
- if (!host || !host->iomap[PDC_MMIO_BAR]) {
- VPRINTK("QUICK EXIT\n");
+ if (!host || !host->iomap[PDC_MMIO_BAR])
return IRQ_NONE;
- }
mmio_base = host->iomap[PDC_MMIO_BAR];
/* reading should also clear interrupts */
mmio_base += PDC_CHIP0_OFS;
mask = readl(mmio_base + PDC_20621_SEQMASK);
- VPRINTK("mask == 0x%x\n", mask);
- if (mask == 0xffffffff) {
- VPRINTK("QUICK EXIT 2\n");
+ if (mask == 0xffffffff)
return IRQ_NONE;
- }
+
mask &= 0xffff; /* only 16 tags possible */
- if (!mask) {
- VPRINTK("QUICK EXIT 3\n");
+ if (!mask)
return IRQ_NONE;
- }
spin_lock(&host->lock);
@@ -816,7 +774,8 @@ static irqreturn_t pdc20621_interrupt(int irq, void *dev_instance)
else
ap = host->ports[port_no];
tmp = mask & (1 << i);
- VPRINTK("seq %u, port_no %u, ap %p, tmp %x\n", i, port_no, ap, tmp);
+ if (ap)
+ ata_port_dbg(ap, "seq %u, tmp %x\n", i, tmp);
if (tmp && ap) {
struct ata_queued_cmd *qc;
@@ -829,10 +788,6 @@ static irqreturn_t pdc20621_interrupt(int irq, void *dev_instance)
spin_unlock(&host->lock);
- VPRINTK("mask == 0x%x\n", mask);
-
- VPRINTK("EXIT\n");
-
return IRQ_RETVAL(handled);
}
@@ -979,7 +934,6 @@ static void pdc_sata_setup_port(struct ata_ioports *port, void __iomem *base)
}
-#ifdef ATA_VERBOSE_DEBUG
static void pdc20621_get_from_dimm(struct ata_host *host, void *psource,
u32 offset, u32 size)
{
@@ -1029,7 +983,6 @@ static void pdc20621_get_from_dimm(struct ata_host *host, void *psource,
memcpy_fromio(psource, dimm_mmio, size / 4);
}
}
-#endif
static void pdc20621_put_to_dimm(struct ata_host *host, void *psource,
@@ -1226,15 +1179,16 @@ static unsigned int pdc20621_prog_dimm_global(struct ata_host *host)
/* Turn on for ECC */
if (!pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
PDC_DIMM_SPD_TYPE, &spd0)) {
- pr_err("Failed in i2c read: device=%#x, subaddr=%#x\n",
- PDC_DIMM0_SPD_DEV_ADDRESS, PDC_DIMM_SPD_TYPE);
+ dev_err(host->dev,
+ "Failed in i2c read: device=%#x, subaddr=%#x\n",
+ PDC_DIMM0_SPD_DEV_ADDRESS, PDC_DIMM_SPD_TYPE);
return 1;
}
if (spd0 == 0x02) {
data |= (0x01 << 16);
writel(data, mmio + PDC_SDRAM_CONTROL);
readl(mmio + PDC_SDRAM_CONTROL);
- printk(KERN_ERR "Local DIMM ECC Enabled\n");
+ dev_err(host->dev, "Local DIMM ECC Enabled\n");
}
/* DIMM Initialization Select/Enable (bit 18/19) */
@@ -1274,7 +1228,7 @@ static unsigned int pdc20621_dimm_init(struct ata_host *host)
/* Initialize Time Period Register */
writel(0xffffffff, mmio + PDC_TIME_PERIOD);
time_period = readl(mmio + PDC_TIME_PERIOD);
- VPRINTK("Time Period Register (0x40): 0x%x\n", time_period);
+ dev_dbg(host->dev, "Time Period Register (0x40): 0x%x\n", time_period);
/* Enable timer */
writel(PDC_TIMER_DEFAULT, mmio + PDC_TIME_CONTROL);
@@ -1289,7 +1243,7 @@ static unsigned int pdc20621_dimm_init(struct ata_host *host)
*/
tcount = readl(mmio + PDC_TIME_COUNTER);
- VPRINTK("Time Counter Register (0x44): 0x%x\n", tcount);
+ dev_dbg(host->dev, "Time Counter Register (0x44): 0x%x\n", tcount);
/*
If SX4 is on PCI-X bus, after 3 seconds, the timer counter
@@ -1297,17 +1251,19 @@ static unsigned int pdc20621_dimm_init(struct ata_host *host)
*/
if (tcount >= PCI_X_TCOUNT) {
ticks = (time_period - tcount);
- VPRINTK("Num counters 0x%x (%d)\n", ticks, ticks);
+ dev_dbg(host->dev, "Num counters 0x%x (%d)\n", ticks, ticks);
clock = (ticks / 300000);
- VPRINTK("10 * Internal clk = 0x%x (%d)\n", clock, clock);
+ dev_dbg(host->dev, "10 * Internal clk = 0x%x (%d)\n",
+ clock, clock);
clock = (clock * 33);
- VPRINTK("10 * Internal clk * 33 = 0x%x (%d)\n", clock, clock);
+ dev_dbg(host->dev, "10 * Internal clk * 33 = 0x%x (%d)\n",
+ clock, clock);
/* PLL F Param (bit 22:16) */
fparam = (1400000 / clock) - 2;
- VPRINTK("PLL F Param: 0x%x (%d)\n", fparam, fparam);
+ dev_dbg(host->dev, "PLL F Param: 0x%x (%d)\n", fparam, fparam);
/* OD param = 0x2 (bit 31:30), R param = 0x5 (bit 29:25) */
pci_status = (0x8a001824 | (fparam << 16));
@@ -1315,7 +1271,7 @@ static unsigned int pdc20621_dimm_init(struct ata_host *host)
pci_status = PCI_PLL_INIT;
/* Initialize PLL. */
- VPRINTK("pci_status: 0x%x\n", pci_status);
+ dev_dbg(host->dev, "pci_status: 0x%x\n", pci_status);
writel(pci_status, mmio + PDC_CTL_STATUS);
readl(mmio + PDC_CTL_STATUS);
@@ -1324,23 +1280,23 @@ static unsigned int pdc20621_dimm_init(struct ata_host *host)
and program the DIMM Module Controller.
*/
if (!(speed = pdc20621_detect_dimm(host))) {
- printk(KERN_ERR "Detect Local DIMM Fail\n");
+ dev_err(host->dev, "Detect Local DIMM Fail\n");
return 1; /* DIMM error */
}
- VPRINTK("Local DIMM Speed = %d\n", speed);
+ dev_dbg(host->dev, "Local DIMM Speed = %d\n", speed);
/* Programming DIMM0 Module Control Register (index_CID0:80h) */
size = pdc20621_prog_dimm0(host);
- VPRINTK("Local DIMM Size = %dMB\n", size);
+ dev_dbg(host->dev, "Local DIMM Size = %dMB\n", size);
/* Programming DIMM Module Global Control Register (index_CID0:88h) */
if (pdc20621_prog_dimm_global(host)) {
- printk(KERN_ERR "Programming DIMM Module Global Control Register Fail\n");
+ dev_err(host->dev,
+ "Programming DIMM Module Global Control Register Fail\n");
return 1;
}
-#ifdef ATA_VERBOSE_DEBUG
- {
+ if (dimm_test) {
u8 test_parttern1[40] =
{0x55,0xAA,'P','r','o','m','i','s','e',' ',
'N','o','t',' ','Y','e','t',' ',
@@ -1354,31 +1310,33 @@ static unsigned int pdc20621_dimm_init(struct ata_host *host)
pdc20621_put_to_dimm(host, test_parttern1, 0x10040, 40);
pdc20621_get_from_dimm(host, test_parttern2, 0x40, 40);
- printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0],
+ dev_info(host->dev, "DIMM test pattern 1: %x, %x, %s\n", test_parttern2[0],
test_parttern2[1], &(test_parttern2[2]));
pdc20621_get_from_dimm(host, test_parttern2, 0x10040,
40);
- printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0],
- test_parttern2[1], &(test_parttern2[2]));
+ dev_info(host->dev, "DIMM test pattern 2: %x, %x, %s\n",
+ test_parttern2[0],
+ test_parttern2[1], &(test_parttern2[2]));
pdc20621_put_to_dimm(host, test_parttern1, 0x40, 40);
pdc20621_get_from_dimm(host, test_parttern2, 0x40, 40);
- printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0],
- test_parttern2[1], &(test_parttern2[2]));
+ dev_info(host->dev, "DIMM test pattern 3: %x, %x, %s\n",
+ test_parttern2[0],
+ test_parttern2[1], &(test_parttern2[2]));
}
-#endif
/* ECC initiliazation. */
if (!pdc20621_i2c_read(host, PDC_DIMM0_SPD_DEV_ADDRESS,
PDC_DIMM_SPD_TYPE, &spd0)) {
- pr_err("Failed in i2c read: device=%#x, subaddr=%#x\n",
+ dev_err(host->dev,
+ "Failed in i2c read: device=%#x, subaddr=%#x\n",
PDC_DIMM0_SPD_DEV_ADDRESS, PDC_DIMM_SPD_TYPE);
return 1;
}
if (spd0 == 0x02) {
void *buf;
- VPRINTK("Start ECC initialization\n");
+ dev_dbg(host->dev, "Start ECC initialization\n");
addr = 0;
length = size * 1024 * 1024;
buf = kzalloc(ECC_ERASE_BUF_SZ, GFP_KERNEL);
@@ -1390,7 +1348,7 @@ static unsigned int pdc20621_dimm_init(struct ata_host *host)
addr += ECC_ERASE_BUF_SZ;
}
kfree(buf);
- VPRINTK("Finish ECC initialization\n");
+ dev_dbg(host->dev, "Finish ECC initialization\n");
}
return 0;
}
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
index bc8e8d9f176b..3e726ee91fdc 100644
--- a/drivers/atm/iphase.c
+++ b/drivers/atm/iphase.c
@@ -178,7 +178,6 @@ static void ia_hack_tcq(IADEV *dev) {
static u16 get_desc (IADEV *dev, struct ia_vcc *iavcc) {
u_short desc_num, i;
- struct sk_buff *skb;
struct ia_vcc *iavcc_r = NULL;
unsigned long delta;
static unsigned long timer = 0;
@@ -202,8 +201,7 @@ static u16 get_desc (IADEV *dev, struct ia_vcc *iavcc) {
else
dev->ffL.tcq_rd -= 2;
*(u_short *)(dev->seg_ram + dev->ffL.tcq_rd) = i+1;
- if (!(skb = dev->desc_tbl[i].txskb) ||
- !(iavcc_r = dev->desc_tbl[i].iavcc))
+ if (!dev->desc_tbl[i].txskb || !(iavcc_r = dev->desc_tbl[i].iavcc))
printk("Fatal err, desc table vcc or skb is NULL\n");
else
iavcc_r->vc_desc_cnt--;
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index ffcbe2bc460e..6f04b831a5c0 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -62,6 +62,17 @@ config DEVTMPFS_MOUNT
rescue mode with init=/bin/sh, even when the /dev directory
on the rootfs is completely empty.
+config DEVTMPFS_SAFE
+ bool "Use nosuid,noexec mount options on devtmpfs"
+ depends on DEVTMPFS
+ help
+ This instructs the kernel to include the MS_NOEXEC and MS_NOSUID mount
+ flags when mounting devtmpfs.
+
+ Notice: If enabled, things like /dev/mem cannot be mmapped
+ with the PROT_EXEC flag. This can break, for example, non-KMS
+ video drivers.
+
config STANDALONE
bool "Select only drivers that don't need compile-time external firmware"
default y
diff --git a/drivers/base/arch_numa.c b/drivers/base/arch_numa.c
index bc1876915457..eaa31e567d1e 100644
--- a/drivers/base/arch_numa.c
+++ b/drivers/base/arch_numa.c
@@ -14,7 +14,6 @@
#include <linux/of.h>
#include <asm/sections.h>
-#include <asm/pgalloc.h>
struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
EXPORT_SYMBOL(node_data);
@@ -155,66 +154,6 @@ static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
return node_distance(early_cpu_to_node(from), early_cpu_to_node(to));
}
-static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size,
- size_t align)
-{
- int nid = early_cpu_to_node(cpu);
-
- return memblock_alloc_try_nid(size, align,
- __pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, nid);
-}
-
-static void __init pcpu_fc_free(void *ptr, size_t size)
-{
- memblock_free(ptr, size);
-}
-
-#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
-static void __init pcpu_populate_pte(unsigned long addr)
-{
- pgd_t *pgd = pgd_offset_k(addr);
- p4d_t *p4d;
- pud_t *pud;
- pmd_t *pmd;
-
- p4d = p4d_offset(pgd, addr);
- if (p4d_none(*p4d)) {
- pud_t *new;
-
- new = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
- if (!new)
- goto err_alloc;
- p4d_populate(&init_mm, p4d, new);
- }
-
- pud = pud_offset(p4d, addr);
- if (pud_none(*pud)) {
- pmd_t *new;
-
- new = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
- if (!new)
- goto err_alloc;
- pud_populate(&init_mm, pud, new);
- }
-
- pmd = pmd_offset(pud, addr);
- if (!pmd_present(*pmd)) {
- pte_t *new;
-
- new = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
- if (!new)
- goto err_alloc;
- pmd_populate_kernel(&init_mm, pmd, new);
- }
-
- return;
-
-err_alloc:
- panic("%s: Failed to allocate %lu bytes align=%lx from=%lx\n",
- __func__, PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
-}
-#endif
-
void __init setup_per_cpu_areas(void)
{
unsigned long delta;
@@ -229,7 +168,7 @@ void __init setup_per_cpu_areas(void)
rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
PERCPU_DYNAMIC_RESERVE, PAGE_SIZE,
pcpu_cpu_distance,
- pcpu_fc_alloc, pcpu_fc_free);
+ early_cpu_to_node);
#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
if (rc < 0)
pr_warn("PERCPU: %s allocator failed (%d), falling back to page size\n",
@@ -239,10 +178,7 @@ void __init setup_per_cpu_areas(void)
#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
if (rc < 0)
- rc = pcpu_page_first_chunk(PERCPU_MODULE_RESERVE,
- pcpu_fc_alloc,
- pcpu_fc_free,
- pcpu_populate_pte);
+ rc = pcpu_page_first_chunk(PERCPU_MODULE_RESERVE, early_cpu_to_node);
#endif
if (rc < 0)
panic("Failed to initialize percpu areas (err=%d).", rc);
diff --git a/drivers/base/auxiliary.c b/drivers/base/auxiliary.c
index 9230c9472bb0..8c5e65930617 100644
--- a/drivers/base/auxiliary.c
+++ b/drivers/base/auxiliary.c
@@ -17,6 +17,147 @@
#include <linux/auxiliary_bus.h>
#include "base.h"
+/**
+ * DOC: PURPOSE
+ *
+ * In some subsystems, the functionality of the core device (PCI/ACPI/other) is
+ * too complex for a single device to be managed by a monolithic driver (e.g.
+ * Sound Open Firmware), multiple devices might implement a common intersection
+ * of functionality (e.g. NICs + RDMA), or a driver may want to export an
+ * interface for another subsystem to drive (e.g. SIOV Physical Function export
+ * Virtual Function management). A split of the functionality into child-
+ * devices representing sub-domains of functionality makes it possible to
+ * compartmentalize, layer, and distribute domain-specific concerns via a Linux
+ * device-driver model.
+ *
+ * An example for this kind of requirement is the audio subsystem where a
+ * single IP is handling multiple entities such as HDMI, Soundwire, local
+ * devices such as mics/speakers etc. The split for the core's functionality
+ * can be arbitrary or be defined by the DSP firmware topology and include
+ * hooks for test/debug. This allows for the audio core device to be minimal
+ * and focused on hardware-specific control and communication.
+ *
+ * Each auxiliary_device represents a part of its parent functionality. The
+ * generic behavior can be extended and specialized as needed by encapsulating
+ * an auxiliary_device within other domain-specific structures and the use of
+ * .ops callbacks. Devices on the auxiliary bus do not share any structures and
+ * the use of a communication channel with the parent is domain-specific.
+ *
+ * Note that ops are intended as a way to augment instance behavior within a
+ * class of auxiliary devices, it is not the mechanism for exporting common
+ * infrastructure from the parent. Consider EXPORT_SYMBOL_NS() to convey
+ * infrastructure from the parent module to the auxiliary module(s).
+ */
+
+/**
+ * DOC: USAGE
+ *
+ * The auxiliary bus is to be used when a driver and one or more kernel
+ * modules, who share a common header file with the driver, need a mechanism to
+ * connect and provide access to a shared object allocated by the
+ * auxiliary_device's registering driver. The registering driver for the
+ * auxiliary_device(s) and the kernel module(s) registering auxiliary_drivers
+ * can be from the same subsystem, or from multiple subsystems.
+ *
+ * The emphasis here is on a common generic interface that keeps subsystem
+ * customization out of the bus infrastructure.
+ *
+ * One example is a PCI network device that is RDMA-capable and exports a child
+ * device to be driven by an auxiliary_driver in the RDMA subsystem. The PCI
+ * driver allocates and registers an auxiliary_device for each physical
+ * function on the NIC. The RDMA driver registers an auxiliary_driver that
+ * claims each of these auxiliary_devices. This conveys data/ops published by
+ * the parent PCI device/driver to the RDMA auxiliary_driver.
+ *
+ * Another use case is for the PCI device to be split out into multiple sub
+ * functions. For each sub function an auxiliary_device is created. A PCI sub
+ * function driver binds to such devices that creates its own one or more class
+ * devices. A PCI sub function auxiliary device is likely to be contained in a
+ * struct with additional attributes such as user defined sub function number
+ * and optional attributes such as resources and a link to the parent device.
+ * These attributes could be used by systemd/udev; and hence should be
+ * initialized before a driver binds to an auxiliary_device.
+ *
+ * A key requirement for utilizing the auxiliary bus is that there is no
+ * dependency on a physical bus, device, register accesses or regmap support.
+ * These individual devices split from the core cannot live on the platform bus
+ * as they are not physical devices that are controlled by DT/ACPI. The same
+ * argument applies for not using MFD in this scenario as MFD relies on
+ * individual function devices being physical devices.
+ */
+
+/**
+ * DOC: EXAMPLE
+ *
+ * Auxiliary devices are created and registered by a subsystem-level core
+ * device that needs to break up its functionality into smaller fragments. One
+ * way to extend the scope of an auxiliary_device is to encapsulate it within a
+ * domain- pecific structure defined by the parent device. This structure
+ * contains the auxiliary_device and any associated shared data/callbacks
+ * needed to establish the connection with the parent.
+ *
+ * An example is:
+ *
+ * .. code-block:: c
+ *
+ * struct foo {
+ * struct auxiliary_device auxdev;
+ * void (*connect)(struct auxiliary_device *auxdev);
+ * void (*disconnect)(struct auxiliary_device *auxdev);
+ * void *data;
+ * };
+ *
+ * The parent device then registers the auxiliary_device by calling
+ * auxiliary_device_init(), and then auxiliary_device_add(), with the pointer
+ * to the auxdev member of the above structure. The parent provides a name for
+ * the auxiliary_device that, combined with the parent's KBUILD_MODNAME,
+ * creates a match_name that is be used for matching and binding with a driver.
+ *
+ * Whenever an auxiliary_driver is registered, based on the match_name, the
+ * auxiliary_driver's probe() is invoked for the matching devices. The
+ * auxiliary_driver can also be encapsulated inside custom drivers that make
+ * the core device's functionality extensible by adding additional
+ * domain-specific ops as follows:
+ *
+ * .. code-block:: c
+ *
+ * struct my_ops {
+ * void (*send)(struct auxiliary_device *auxdev);
+ * void (*receive)(struct auxiliary_device *auxdev);
+ * };
+ *
+ *
+ * struct my_driver {
+ * struct auxiliary_driver auxiliary_drv;
+ * const struct my_ops ops;
+ * };
+ *
+ * An example of this type of usage is:
+ *
+ * .. code-block:: c
+ *
+ * const struct auxiliary_device_id my_auxiliary_id_table[] = {
+ * { .name = "foo_mod.foo_dev" },
+ * { },
+ * };
+ *
+ * const struct my_ops my_custom_ops = {
+ * .send = my_tx,
+ * .receive = my_rx,
+ * };
+ *
+ * const struct my_driver my_drv = {
+ * .auxiliary_drv = {
+ * .name = "myauxiliarydrv",
+ * .id_table = my_auxiliary_id_table,
+ * .probe = my_probe,
+ * .remove = my_remove,
+ * .shutdown = my_shutdown,
+ * },
+ * .ops = my_custom_ops,
+ * };
+ */
+
static const struct auxiliary_device_id *auxiliary_match_id(const struct auxiliary_device_id *id,
const struct auxiliary_device *auxdev)
{
@@ -117,7 +258,7 @@ static struct bus_type auxiliary_bus_type = {
* auxiliary_device_init - check auxiliary_device and initialize
* @auxdev: auxiliary device struct
*
- * This is the first step in the two-step process to register an
+ * This is the second step in the three-step process to register an
* auxiliary_device.
*
* When this function returns an error code, then the device_initialize will
@@ -155,7 +296,7 @@ EXPORT_SYMBOL_GPL(auxiliary_device_init);
* @auxdev: auxiliary bus device to add to the bus
* @modname: name of the parent device's driver module
*
- * This is the second step in the two-step process to register an
+ * This is the third step in the three-step process to register an
* auxiliary_device.
*
* This function must be called after a successful call to
@@ -202,6 +343,8 @@ EXPORT_SYMBOL_GPL(__auxiliary_device_add);
* This function returns a reference to a device that is 'found'
* for later use, as determined by the @match callback.
*
+ * The reference returned should be released with put_device().
+ *
* The callback should return 0 if the device doesn't match and non-zero
* if it does. If the callback returns non-zero, this function will
* return to the caller and not iterate over any more devices.
@@ -225,6 +368,11 @@ EXPORT_SYMBOL_GPL(auxiliary_find_device);
* @auxdrv: auxiliary_driver structure
* @owner: owning module/driver
* @modname: KBUILD_MODNAME for parent driver
+ *
+ * The expectation is that users will call the "auxiliary_driver_register"
+ * macro so that the caller's KBUILD_MODNAME is automatically inserted for the
+ * modname parameter. Only if a user requires a custom name would this version
+ * be called directly.
*/
int __auxiliary_driver_register(struct auxiliary_driver *auxdrv,
struct module *owner, const char *modname)
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index bdc98c5713d5..97936ec49bde 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -163,9 +163,9 @@ static struct kobj_type bus_ktype = {
.release = bus_release,
};
-static int bus_uevent_filter(struct kset *kset, struct kobject *kobj)
+static int bus_uevent_filter(struct kobject *kobj)
{
- struct kobj_type *ktype = get_ktype(kobj);
+ const struct kobj_type *ktype = get_ktype(kobj);
if (ktype == &bus_ktype)
return 1;
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 63e769057487..7bb957b11861 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -2260,9 +2260,9 @@ static struct kobj_type device_ktype = {
};
-static int dev_uevent_filter(struct kset *kset, struct kobject *kobj)
+static int dev_uevent_filter(struct kobject *kobj)
{
- struct kobj_type *ktype = get_ktype(kobj);
+ const struct kobj_type *ktype = get_ktype(kobj);
if (ktype == &device_ktype) {
struct device *dev = kobj_to_dev(kobj);
@@ -2274,7 +2274,7 @@ static int dev_uevent_filter(struct kset *kset, struct kobject *kobj)
return 0;
}
-static const char *dev_uevent_name(struct kset *kset, struct kobject *kobj)
+static const char *dev_uevent_name(struct kobject *kobj)
{
struct device *dev = kobj_to_dev(kobj);
@@ -2285,8 +2285,7 @@ static const char *dev_uevent_name(struct kset *kset, struct kobject *kobj)
return NULL;
}
-static int dev_uevent(struct kset *kset, struct kobject *kobj,
- struct kobj_uevent_env *env)
+static int dev_uevent(struct kobject *kobj, struct kobj_uevent_env *env)
{
struct device *dev = kobj_to_dev(kobj);
int retval = 0;
@@ -2381,7 +2380,7 @@ static ssize_t uevent_show(struct device *dev, struct device_attribute *attr,
/* respect filter */
if (kset->uevent_ops && kset->uevent_ops->filter)
- if (!kset->uevent_ops->filter(kset, &dev->kobj))
+ if (!kset->uevent_ops->filter(&dev->kobj))
goto out;
env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL);
@@ -2389,7 +2388,7 @@ static ssize_t uevent_show(struct device *dev, struct device_attribute *attr,
return -ENOMEM;
/* let the kset specific function add its keys */
- retval = kset->uevent_ops->uevent(kset, &dev->kobj, env);
+ retval = kset->uevent_ops->uevent(&dev->kobj, env);
if (retval)
goto out;
@@ -2873,10 +2872,6 @@ void device_initialize(struct device *dev)
INIT_LIST_HEAD(&dev->devres_head);
device_pm_init(dev);
set_dev_node(dev, NUMA_NO_NODE);
-#ifdef CONFIG_GENERIC_MSI_IRQ
- raw_spin_lock_init(&dev->msi_lock);
- INIT_LIST_HEAD(&dev->msi_list);
-#endif
INIT_LIST_HEAD(&dev->links.consumers);
INIT_LIST_HEAD(&dev->links.suppliers);
INIT_LIST_HEAD(&dev->links.defer_sync);
@@ -3028,6 +3023,23 @@ static inline struct kobject *get_glue_dir(struct device *dev)
return dev->kobj.parent;
}
+/**
+ * kobject_has_children - Returns whether a kobject has children.
+ * @kobj: the object to test
+ *
+ * This will return whether a kobject has other kobjects as children.
+ *
+ * It does NOT account for the presence of attribute files, only sub
+ * directories. It also assumes there is no concurrent addition or
+ * removal of such children, and thus relies on external locking.
+ */
+static inline bool kobject_has_children(struct kobject *kobj)
+{
+ WARN_ON_ONCE(kref_read(&kobj->kref) == 0);
+
+ return kobj->sd && kobj->sd->dir.subdirs;
+}
+
/*
* make sure cleaning up dir as the last step, we need to make
* sure .release handler of kobject is run with holding the
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 68ea1f949daa..9eaaff2f556c 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -577,14 +577,14 @@ re_probe:
if (dev->bus->dma_configure) {
ret = dev->bus->dma_configure(dev);
if (ret)
- goto probe_failed;
+ goto pinctrl_bind_failed;
}
ret = driver_sysfs_add(dev);
if (ret) {
pr_err("%s: driver_sysfs_add(%s) failed\n",
__func__, dev_name(dev));
- goto probe_failed;
+ goto sysfs_failed;
}
if (dev->pm_domain && dev->pm_domain->activate) {
@@ -657,6 +657,8 @@ dev_groups_failed:
else if (drv->remove)
drv->remove(dev);
probe_failed:
+ driver_sysfs_remove(dev);
+sysfs_failed:
if (dev->bus)
blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
BUS_NOTIFY_DRIVER_NOT_BOUND, dev);
@@ -666,7 +668,6 @@ pinctrl_bind_failed:
arch_teardown_dma_ops(dev);
kfree(dev->dma_range_map);
dev->dma_range_map = NULL;
- driver_sysfs_remove(dev);
dev->driver = NULL;
dev_set_drvdata(dev, NULL);
if (dev->pm_domain && dev->pm_domain->dismiss)
diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
index 8be352ab4ddb..f41063ac1aee 100644
--- a/drivers/base/devtmpfs.c
+++ b/drivers/base/devtmpfs.c
@@ -29,6 +29,12 @@
#include <uapi/linux/mount.h>
#include "base.h"
+#ifdef CONFIG_DEVTMPFS_SAFE
+#define DEVTMPFS_MFLAGS (MS_SILENT | MS_NOEXEC | MS_NOSUID)
+#else
+#define DEVTMPFS_MFLAGS (MS_SILENT)
+#endif
+
static struct task_struct *thread;
static int __initdata mount_dev = IS_ENABLED(CONFIG_DEVTMPFS_MOUNT);
@@ -59,8 +65,15 @@ static struct dentry *public_dev_mount(struct file_system_type *fs_type, int fla
const char *dev_name, void *data)
{
struct super_block *s = mnt->mnt_sb;
+ int err;
+
atomic_inc(&s->s_active);
down_write(&s->s_umount);
+ err = reconfigure_single(s, flags, data);
+ if (err < 0) {
+ deactivate_locked_super(s);
+ return ERR_PTR(err);
+ }
return dget(s->s_root);
}
@@ -363,7 +376,7 @@ int __init devtmpfs_mount(void)
if (!thread)
return 0;
- err = init_mount("devtmpfs", "dev", "devtmpfs", MS_SILENT, NULL);
+ err = init_mount("devtmpfs", "dev", "devtmpfs", DEVTMPFS_MFLAGS, NULL);
if (err)
printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
else
@@ -412,7 +425,7 @@ static noinline int __init devtmpfs_setup(void *p)
err = ksys_unshare(CLONE_NEWNS);
if (err)
goto out;
- err = init_mount("devtmpfs", "/", "devtmpfs", MS_SILENT, NULL);
+ err = init_mount("devtmpfs", "/", "devtmpfs", DEVTMPFS_MFLAGS, NULL);
if (err)
goto out;
init_chdir("/.."); /* will traverse into overmounted root */
diff --git a/drivers/base/firmware_loader/builtin/Makefile b/drivers/base/firmware_loader/builtin/Makefile
index eb4be452062a..6c067dedc01e 100644
--- a/drivers/base/firmware_loader/builtin/Makefile
+++ b/drivers/base/firmware_loader/builtin/Makefile
@@ -3,10 +3,10 @@ obj-y += main.o
# Create $(fwdir) from $(CONFIG_EXTRA_FIRMWARE_DIR) -- if it doesn't have a
# leading /, it's relative to $(srctree).
-fwdir := $(subst $(quote),,$(CONFIG_EXTRA_FIRMWARE_DIR))
+fwdir := $(CONFIG_EXTRA_FIRMWARE_DIR)
fwdir := $(addprefix $(srctree)/,$(filter-out /%,$(fwdir)))$(filter /%,$(fwdir))
-firmware := $(addsuffix .gen.o, $(subst $(quote),,$(CONFIG_EXTRA_FIRMWARE)))
+firmware := $(addsuffix .gen.o, $(CONFIG_EXTRA_FIRMWARE))
obj-y += $(firmware)
FWNAME = $(patsubst $(obj)/%.gen.S,%,$@)
diff --git a/drivers/base/firmware_loader/fallback.c b/drivers/base/firmware_loader/fallback.c
index d7d63c1aa993..4afb0e9312c0 100644
--- a/drivers/base/firmware_loader/fallback.c
+++ b/drivers/base/firmware_loader/fallback.c
@@ -199,11 +199,16 @@ static struct class firmware_class = {
int register_sysfs_loader(void)
{
- return class_register(&firmware_class);
+ int ret = class_register(&firmware_class);
+
+ if (ret != 0)
+ return ret;
+ return register_firmware_config_sysctl();
}
void unregister_sysfs_loader(void)
{
+ unregister_firmware_config_sysctl();
class_unregister(&firmware_class);
}
diff --git a/drivers/base/firmware_loader/fallback.h b/drivers/base/firmware_loader/fallback.h
index 3af7205b302f..9f3055d3b4ca 100644
--- a/drivers/base/firmware_loader/fallback.h
+++ b/drivers/base/firmware_loader/fallback.h
@@ -42,6 +42,17 @@ void fw_fallback_set_default_timeout(void);
int register_sysfs_loader(void);
void unregister_sysfs_loader(void);
+#ifdef CONFIG_SYSCTL
+extern int register_firmware_config_sysctl(void);
+extern void unregister_firmware_config_sysctl(void);
+#else
+static inline int register_firmware_config_sysctl(void)
+{
+ return 0;
+}
+static inline void unregister_firmware_config_sysctl(void) { }
+#endif /* CONFIG_SYSCTL */
+
#else /* CONFIG_FW_LOADER_USER_HELPER */
static inline int firmware_fallback_sysfs(struct firmware *fw, const char *name,
struct device *device,
diff --git a/drivers/base/firmware_loader/fallback_table.c b/drivers/base/firmware_loader/fallback_table.c
index 46a731dede6f..e5ac098d0742 100644
--- a/drivers/base/firmware_loader/fallback_table.c
+++ b/drivers/base/firmware_loader/fallback_table.c
@@ -4,6 +4,7 @@
#include <linux/kconfig.h>
#include <linux/list.h>
#include <linux/slab.h>
+#include <linux/export.h>
#include <linux/security.h>
#include <linux/highmem.h>
#include <linux/umh.h>
@@ -24,7 +25,7 @@ struct firmware_fallback_config fw_fallback_config = {
EXPORT_SYMBOL_NS_GPL(fw_fallback_config, FIRMWARE_LOADER_PRIVATE);
#ifdef CONFIG_SYSCTL
-struct ctl_table firmware_config_table[] = {
+static struct ctl_table firmware_config_table[] = {
{
.procname = "force_sysfs_fallback",
.data = &fw_fallback_config.force_sysfs_fallback,
@@ -45,4 +46,24 @@ struct ctl_table firmware_config_table[] = {
},
{ }
};
-#endif
+
+static struct ctl_table_header *firmware_config_sysct_table_header;
+int register_firmware_config_sysctl(void)
+{
+ firmware_config_sysct_table_header =
+ register_sysctl("kernel/firmware_config",
+ firmware_config_table);
+ if (!firmware_config_sysct_table_header)
+ return -ENOMEM;
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(register_firmware_config_sysctl, FIRMWARE_LOADER_PRIVATE);
+
+void unregister_firmware_config_sysctl(void)
+{
+ unregister_sysctl_table(firmware_config_sysct_table_header);
+ firmware_config_sysct_table_header = NULL;
+}
+EXPORT_SYMBOL_NS_GPL(unregister_firmware_config_sysctl, FIRMWARE_LOADER_PRIVATE);
+
+#endif /* CONFIG_SYSCTL */
diff --git a/drivers/base/platform-msi.c b/drivers/base/platform-msi.c
index 3d6c8f9caf43..296ea673d661 100644
--- a/drivers/base/platform-msi.c
+++ b/drivers/base/platform-msi.c
@@ -23,7 +23,6 @@
struct platform_msi_priv_data {
struct device *dev;
void *host_data;
- const struct attribute_group **msi_irq_groups;
msi_alloc_info_t arg;
irq_write_msi_msg_t write_msg;
int devid;
@@ -39,11 +38,9 @@ static DEFINE_IDA(platform_msi_devid_ida);
*/
static irq_hw_number_t platform_msi_calc_hwirq(struct msi_desc *desc)
{
- u32 devid;
+ u32 devid = desc->dev->msi.data->platform_data->devid;
- devid = desc->platform.msi_priv_data->devid;
-
- return (devid << (32 - DEV_ID_SHIFT)) | desc->platform.msi_index;
+ return (devid << (32 - DEV_ID_SHIFT)) | desc->msi_index;
}
static void platform_msi_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc)
@@ -86,11 +83,8 @@ static void platform_msi_update_dom_ops(struct msi_domain_info *info)
static void platform_msi_write_msg(struct irq_data *data, struct msi_msg *msg)
{
struct msi_desc *desc = irq_data_get_msi_desc(data);
- struct platform_msi_priv_data *priv_data;
-
- priv_data = desc->platform.msi_priv_data;
- priv_data->write_msg(desc, msg);
+ desc->dev->msi.data->platform_data->write_msg(desc, msg);
}
static void platform_msi_update_chip_ops(struct msi_domain_info *info)
@@ -113,62 +107,6 @@ static void platform_msi_update_chip_ops(struct msi_domain_info *info)
info->flags &= ~MSI_FLAG_LEVEL_CAPABLE;
}
-static void platform_msi_free_descs(struct device *dev, int base, int nvec)
-{
- struct msi_desc *desc, *tmp;
-
- list_for_each_entry_safe(desc, tmp, dev_to_msi_list(dev), list) {
- if (desc->platform.msi_index >= base &&
- desc->platform.msi_index < (base + nvec)) {
- list_del(&desc->list);
- free_msi_entry(desc);
- }
- }
-}
-
-static int platform_msi_alloc_descs_with_irq(struct device *dev, int virq,
- int nvec,
- struct platform_msi_priv_data *data)
-
-{
- struct msi_desc *desc;
- int i, base = 0;
-
- if (!list_empty(dev_to_msi_list(dev))) {
- desc = list_last_entry(dev_to_msi_list(dev),
- struct msi_desc, list);
- base = desc->platform.msi_index + 1;
- }
-
- for (i = 0; i < nvec; i++) {
- desc = alloc_msi_entry(dev, 1, NULL);
- if (!desc)
- break;
-
- desc->platform.msi_priv_data = data;
- desc->platform.msi_index = base + i;
- desc->irq = virq ? virq + i : 0;
-
- list_add_tail(&desc->list, dev_to_msi_list(dev));
- }
-
- if (i != nvec) {
- /* Clean up the mess */
- platform_msi_free_descs(dev, base, nvec);
-
- return -ENOMEM;
- }
-
- return 0;
-}
-
-static int platform_msi_alloc_descs(struct device *dev, int nvec,
- struct platform_msi_priv_data *data)
-
-{
- return platform_msi_alloc_descs_with_irq(dev, 0, nvec, data);
-}
-
/**
* platform_msi_create_irq_domain - Create a platform MSI interrupt domain
* @fwnode: Optional fwnode of the interrupt controller
@@ -191,6 +129,8 @@ struct irq_domain *platform_msi_create_irq_domain(struct fwnode_handle *fwnode,
platform_msi_update_dom_ops(info);
if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS)
platform_msi_update_chip_ops(info);
+ info->flags |= MSI_FLAG_DEV_SYSFS | MSI_FLAG_ALLOC_SIMPLE_MSI_DESCS |
+ MSI_FLAG_FREE_MSI_DESCS;
domain = msi_create_irq_domain(fwnode, info, parent);
if (domain)
@@ -199,49 +139,57 @@ struct irq_domain *platform_msi_create_irq_domain(struct fwnode_handle *fwnode,
return domain;
}
-static struct platform_msi_priv_data *
-platform_msi_alloc_priv_data(struct device *dev, unsigned int nvec,
- irq_write_msi_msg_t write_msi_msg)
+static int platform_msi_alloc_priv_data(struct device *dev, unsigned int nvec,
+ irq_write_msi_msg_t write_msi_msg)
{
struct platform_msi_priv_data *datap;
+ int err;
+
/*
* Limit the number of interrupts to 2048 per device. Should we
* need to bump this up, DEV_ID_SHIFT should be adjusted
* accordingly (which would impact the max number of MSI
* capable devices).
*/
- if (!dev->msi_domain || !write_msi_msg || !nvec || nvec > MAX_DEV_MSIS)
- return ERR_PTR(-EINVAL);
+ if (!dev->msi.domain || !write_msi_msg || !nvec || nvec > MAX_DEV_MSIS)
+ return -EINVAL;
- if (dev->msi_domain->bus_token != DOMAIN_BUS_PLATFORM_MSI) {
+ if (dev->msi.domain->bus_token != DOMAIN_BUS_PLATFORM_MSI) {
dev_err(dev, "Incompatible msi_domain, giving up\n");
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
}
- /* Already had a helping of MSI? Greed... */
- if (!list_empty(dev_to_msi_list(dev)))
- return ERR_PTR(-EBUSY);
+ err = msi_setup_device_data(dev);
+ if (err)
+ return err;
+
+ /* Already initialized? */
+ if (dev->msi.data->platform_data)
+ return -EBUSY;
datap = kzalloc(sizeof(*datap), GFP_KERNEL);
if (!datap)
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
datap->devid = ida_simple_get(&platform_msi_devid_ida,
0, 1 << DEV_ID_SHIFT, GFP_KERNEL);
if (datap->devid < 0) {
- int err = datap->devid;
+ err = datap->devid;
kfree(datap);
- return ERR_PTR(err);
+ return err;
}
datap->write_msg = write_msi_msg;
datap->dev = dev;
-
- return datap;
+ dev->msi.data->platform_data = datap;
+ return 0;
}
-static void platform_msi_free_priv_data(struct platform_msi_priv_data *data)
+static void platform_msi_free_priv_data(struct device *dev)
{
+ struct platform_msi_priv_data *data = dev->msi.data->platform_data;
+
+ dev->msi.data->platform_data = NULL;
ida_simple_remove(&platform_msi_devid_ida, data->devid);
kfree(data);
}
@@ -258,35 +206,15 @@ static void platform_msi_free_priv_data(struct platform_msi_priv_data *data)
int platform_msi_domain_alloc_irqs(struct device *dev, unsigned int nvec,
irq_write_msi_msg_t write_msi_msg)
{
- struct platform_msi_priv_data *priv_data;
int err;
- priv_data = platform_msi_alloc_priv_data(dev, nvec, write_msi_msg);
- if (IS_ERR(priv_data))
- return PTR_ERR(priv_data);
-
- err = platform_msi_alloc_descs(dev, nvec, priv_data);
+ err = platform_msi_alloc_priv_data(dev, nvec, write_msi_msg);
if (err)
- goto out_free_priv_data;
+ return err;
- err = msi_domain_alloc_irqs(dev->msi_domain, dev, nvec);
+ err = msi_domain_alloc_irqs(dev->msi.domain, dev, nvec);
if (err)
- goto out_free_desc;
-
- priv_data->msi_irq_groups = msi_populate_sysfs(dev);
- if (IS_ERR(priv_data->msi_irq_groups)) {
- err = PTR_ERR(priv_data->msi_irq_groups);
- goto out_free_irqs;
- }
-
- return 0;
-
-out_free_irqs:
- msi_domain_free_irqs(dev->msi_domain, dev);
-out_free_desc:
- platform_msi_free_descs(dev, 0, nvec);
-out_free_priv_data:
- platform_msi_free_priv_data(priv_data);
+ platform_msi_free_priv_data(dev);
return err;
}
@@ -298,16 +226,8 @@ EXPORT_SYMBOL_GPL(platform_msi_domain_alloc_irqs);
*/
void platform_msi_domain_free_irqs(struct device *dev)
{
- if (!list_empty(dev_to_msi_list(dev))) {
- struct msi_desc *desc;
-
- desc = first_msi_entry(dev);
- msi_destroy_sysfs(dev, desc->platform.msi_priv_data->msi_irq_groups);
- platform_msi_free_priv_data(desc->platform.msi_priv_data);
- }
-
- msi_domain_free_irqs(dev->msi_domain, dev);
- platform_msi_free_descs(dev, 0, MAX_DEV_MSIS);
+ msi_domain_free_irqs(dev->msi.domain, dev);
+ platform_msi_free_priv_data(dev);
}
EXPORT_SYMBOL_GPL(platform_msi_domain_free_irqs);
@@ -316,17 +236,20 @@ EXPORT_SYMBOL_GPL(platform_msi_domain_free_irqs);
* a platform-msi domain
* @domain: The platform-msi domain
*
- * Returns the private data provided when calling
- * platform_msi_create_device_domain.
+ * Return: The private data provided when calling
+ * platform_msi_create_device_domain().
*/
void *platform_msi_get_host_data(struct irq_domain *domain)
{
struct platform_msi_priv_data *data = domain->host_data;
+
return data->host_data;
}
+static struct lock_class_key platform_device_msi_lock_class;
+
/**
- * __platform_msi_create_device_domain - Create a platform-msi domain
+ * __platform_msi_create_device_domain - Create a platform-msi device domain
*
* @dev: The device generating the MSIs
* @nvec: The number of MSIs that need to be allocated
@@ -335,7 +258,11 @@ void *platform_msi_get_host_data(struct irq_domain *domain)
* @ops: The hierarchy domain operations to use
* @host_data: Private data associated to this domain
*
- * Returns an irqdomain for @nvec interrupts
+ * Return: An irqdomain for @nvec interrupts on success, NULL in case of error.
+ *
+ * This is for interrupt domains which stack on a platform-msi domain
+ * created by platform_msi_create_irq_domain(). @dev->msi.domain points to
+ * that platform-msi domain which is the parent for the new domain.
*/
struct irq_domain *
__platform_msi_create_device_domain(struct device *dev,
@@ -349,12 +276,20 @@ __platform_msi_create_device_domain(struct device *dev,
struct irq_domain *domain;
int err;
- data = platform_msi_alloc_priv_data(dev, nvec, write_msi_msg);
- if (IS_ERR(data))
+ err = platform_msi_alloc_priv_data(dev, nvec, write_msi_msg);
+ if (err)
return NULL;
+ /*
+ * Use a separate lock class for the MSI descriptor mutex on
+ * platform MSI device domains because the descriptor mutex nests
+ * into the domain mutex. See alloc/free below.
+ */
+ lockdep_set_class(&dev->msi.data->mutex, &platform_device_msi_lock_class);
+
+ data = dev->msi.data->platform_data;
data->host_data = host_data;
- domain = irq_domain_create_hierarchy(dev->msi_domain, 0,
+ domain = irq_domain_create_hierarchy(dev->msi.domain, 0,
is_tree ? 0 : nvec,
dev->fwnode, ops, data);
if (!domain)
@@ -370,61 +305,46 @@ __platform_msi_create_device_domain(struct device *dev,
free_domain:
irq_domain_remove(domain);
free_priv:
- platform_msi_free_priv_data(data);
+ platform_msi_free_priv_data(dev);
return NULL;
}
/**
- * platform_msi_domain_free - Free interrupts associated with a platform-msi
- * domain
+ * platform_msi_device_domain_free - Free interrupts associated with a platform-msi
+ * device domain
*
- * @domain: The platform-msi domain
+ * @domain: The platform-msi device domain
* @virq: The base irq from which to perform the free operation
- * @nvec: How many interrupts to free from @virq
+ * @nr_irqs: How many interrupts to free from @virq
*/
-void platform_msi_domain_free(struct irq_domain *domain, unsigned int virq,
- unsigned int nvec)
+void platform_msi_device_domain_free(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs)
{
struct platform_msi_priv_data *data = domain->host_data;
- struct msi_desc *desc, *tmp;
- for_each_msi_entry_safe(desc, tmp, data->dev) {
- if (WARN_ON(!desc->irq || desc->nvec_used != 1))
- return;
- if (!(desc->irq >= virq && desc->irq < (virq + nvec)))
- continue;
-
- irq_domain_free_irqs_common(domain, desc->irq, 1);
- list_del(&desc->list);
- free_msi_entry(desc);
- }
+
+ msi_lock_descs(data->dev);
+ irq_domain_free_irqs_common(domain, virq, nr_irqs);
+ msi_free_msi_descs_range(data->dev, MSI_DESC_ALL, virq, virq + nr_irqs - 1);
+ msi_unlock_descs(data->dev);
}
/**
- * platform_msi_domain_alloc - Allocate interrupts associated with
- * a platform-msi domain
+ * platform_msi_device_domain_alloc - Allocate interrupts associated with
+ * a platform-msi device domain
*
- * @domain: The platform-msi domain
+ * @domain: The platform-msi device domain
* @virq: The base irq from which to perform the allocate operation
- * @nr_irqs: How many interrupts to free from @virq
+ * @nr_irqs: How many interrupts to allocate from @virq
*
* Return 0 on success, or an error code on failure. Must be called
* with irq_domain_mutex held (which can only be done as part of a
* top-level interrupt allocation).
*/
-int platform_msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
- unsigned int nr_irqs)
+int platform_msi_device_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs)
{
struct platform_msi_priv_data *data = domain->host_data;
- int err;
-
- err = platform_msi_alloc_descs_with_irq(data->dev, virq, nr_irqs, data);
- if (err)
- return err;
-
- err = msi_domain_populate_irqs(domain->parent, data->dev,
- virq, nr_irqs, &data->arg);
- if (err)
- platform_msi_domain_free(domain, virq, nr_irqs);
+ struct device *dev = data->dev;
- return err;
+ return msi_domain_populate_irqs(domain->parent, dev, virq, nr_irqs, &data->arg);
}
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 598acf93a360..6cb04ac48bf0 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -258,8 +258,9 @@ int platform_get_irq(struct platform_device *dev, unsigned int num)
int ret;
ret = platform_get_irq_optional(dev, num);
- if (ret < 0 && ret != -EPROBE_DEFER)
- dev_err(&dev->dev, "IRQ index %u not found\n", num);
+ if (ret < 0)
+ return dev_err_probe(&dev->dev, ret,
+ "IRQ index %u not found\n", num);
return ret;
}
@@ -762,6 +763,10 @@ EXPORT_SYMBOL_GPL(platform_device_del);
/**
* platform_device_register - add a platform-level device
* @pdev: platform device we're adding
+ *
+ * NOTE: _Never_ directly free @pdev after calling this function, even if it
+ * returned an error! Always use platform_device_put() to give up the
+ * reference initialised in this function instead.
*/
int platform_device_register(struct platform_device *pdev)
{
diff --git a/drivers/base/power/trace.c b/drivers/base/power/trace.c
index 94665037f4a3..72b7a92337b1 100644
--- a/drivers/base/power/trace.c
+++ b/drivers/base/power/trace.c
@@ -120,7 +120,11 @@ static unsigned int read_magic_time(void)
struct rtc_time time;
unsigned int val;
- mc146818_get_time(&time);
+ if (mc146818_get_time(&time) < 0) {
+ pr_err("Unable to read current time from RTC\n");
+ return 0;
+ }
+
pr_info("RTC time: %ptRt, date: %ptRd\n", &time, &time);
val = time.tm_year; /* 100 years */
if (val > 100)
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index 99bda0da23a8..8666590201c9 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -34,7 +34,8 @@ suspend_state_t pm_suspend_target_state;
bool events_check_enabled __read_mostly;
/* First wakeup IRQ seen by the kernel in the last cycle. */
-unsigned int pm_wakeup_irq __read_mostly;
+static unsigned int wakeup_irq[2] __read_mostly;
+static DEFINE_RAW_SPINLOCK(wakeup_irq_lock);
/* If greater than 0 and the system is suspending, terminate the suspend. */
static atomic_t pm_abort_suspend __read_mostly;
@@ -942,19 +943,45 @@ void pm_system_cancel_wakeup(void)
atomic_dec_if_positive(&pm_abort_suspend);
}
-void pm_wakeup_clear(bool reset)
+void pm_wakeup_clear(unsigned int irq_number)
{
- pm_wakeup_irq = 0;
- if (reset)
+ raw_spin_lock_irq(&wakeup_irq_lock);
+
+ if (irq_number && wakeup_irq[0] == irq_number)
+ wakeup_irq[0] = wakeup_irq[1];
+ else
+ wakeup_irq[0] = 0;
+
+ wakeup_irq[1] = 0;
+
+ raw_spin_unlock_irq(&wakeup_irq_lock);
+
+ if (!irq_number)
atomic_set(&pm_abort_suspend, 0);
}
void pm_system_irq_wakeup(unsigned int irq_number)
{
- if (pm_wakeup_irq == 0) {
- pm_wakeup_irq = irq_number;
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&wakeup_irq_lock, flags);
+
+ if (wakeup_irq[0] == 0)
+ wakeup_irq[0] = irq_number;
+ else if (wakeup_irq[1] == 0)
+ wakeup_irq[1] = irq_number;
+ else
+ irq_number = 0;
+
+ raw_spin_unlock_irqrestore(&wakeup_irq_lock, flags);
+
+ if (irq_number)
pm_system_wakeup();
- }
+}
+
+unsigned int pm_wakeup_irq(void)
+{
+ return wakeup_irq[0];
}
/**
diff --git a/drivers/base/property.c b/drivers/base/property.c
index 5379eae478b1..e6497f6877ee 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -478,8 +478,17 @@ int fwnode_property_get_reference_args(const struct fwnode_handle *fwnode,
unsigned int nargs, unsigned int index,
struct fwnode_reference_args *args)
{
- return fwnode_call_int_op(fwnode, get_reference_args, prop, nargs_prop,
- nargs, index, args);
+ int ret;
+
+ ret = fwnode_call_int_op(fwnode, get_reference_args, prop, nargs_prop,
+ nargs, index, args);
+
+ if (ret < 0 && !IS_ERR_OR_NULL(fwnode) &&
+ !IS_ERR_OR_NULL(fwnode->secondary))
+ ret = fwnode_call_int_op(fwnode->secondary, get_reference_args,
+ prop, nargs_prop, nargs, index, args);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(fwnode_property_get_reference_args);
@@ -911,6 +920,22 @@ int fwnode_irq_get(const struct fwnode_handle *fwnode, unsigned int index)
EXPORT_SYMBOL(fwnode_irq_get);
/**
+ * fwnode_iomap - Maps the memory mapped IO for a given fwnode
+ * @fwnode: Pointer to the firmware node
+ * @index: Index of the IO range
+ *
+ * Returns a pointer to the mapped memory.
+ */
+void __iomem *fwnode_iomap(struct fwnode_handle *fwnode, int index)
+{
+ if (IS_ENABLED(CONFIG_OF_ADDRESS) && is_of_node(fwnode))
+ return of_iomap(to_of_node(fwnode), index);
+
+ return NULL;
+}
+EXPORT_SYMBOL(fwnode_iomap);
+
+/**
* fwnode_graph_get_next_endpoint - Get next endpoint firmware node
* @fwnode: Pointer to the parent firmware node
* @prev: Previous endpoint node or %NULL to get the first
diff --git a/drivers/base/test/test_async_driver_probe.c b/drivers/base/test/test_async_driver_probe.c
index 3bb7beb127a9..4d1976ca5072 100644
--- a/drivers/base/test/test_async_driver_probe.c
+++ b/drivers/base/test/test_async_driver_probe.c
@@ -104,7 +104,7 @@ static int __init test_async_probe_init(void)
struct platform_device **pdev = NULL;
int async_id = 0, sync_id = 0;
unsigned long long duration;
- ktime_t calltime, delta;
+ ktime_t calltime;
int err, nid, cpu;
pr_info("registering first set of asynchronous devices...\n");
@@ -133,8 +133,7 @@ static int __init test_async_probe_init(void)
goto err_unregister_async_devs;
}
- delta = ktime_sub(ktime_get(), calltime);
- duration = (unsigned long long) ktime_to_ms(delta);
+ duration = (unsigned long long)ktime_ms_delta(ktime_get(), calltime);
pr_info("registration took %lld msecs\n", duration);
if (duration > TEST_PROBE_THRESHOLD) {
pr_err("test failed: probe took too long\n");
@@ -161,8 +160,7 @@ static int __init test_async_probe_init(void)
async_id++;
}
- delta = ktime_sub(ktime_get(), calltime);
- duration = (unsigned long long) ktime_to_ms(delta);
+ duration = (unsigned long long)ktime_ms_delta(ktime_get(), calltime);
dev_info(&(*pdev)->dev,
"registration took %lld msecs\n", duration);
if (duration > TEST_PROBE_THRESHOLD) {
@@ -197,8 +195,7 @@ static int __init test_async_probe_init(void)
goto err_unregister_sync_devs;
}
- delta = ktime_sub(ktime_get(), calltime);
- duration = (unsigned long long) ktime_to_ms(delta);
+ duration = (unsigned long long)ktime_ms_delta(ktime_get(), calltime);
pr_info("registration took %lld msecs\n", duration);
if (duration < TEST_PROBE_THRESHOLD) {
dev_err(&(*pdev)->dev,
@@ -223,8 +220,7 @@ static int __init test_async_probe_init(void)
sync_id++;
- delta = ktime_sub(ktime_get(), calltime);
- duration = (unsigned long long) ktime_to_ms(delta);
+ duration = (unsigned long long)ktime_ms_delta(ktime_get(), calltime);
dev_info(&(*pdev)->dev,
"registration took %lld msecs\n", duration);
if (duration < TEST_PROBE_THRESHOLD) {
diff --git a/drivers/base/topology.c b/drivers/base/topology.c
index 8f2b641d0b8c..fc24e89f9592 100644
--- a/drivers/base/topology.c
+++ b/drivers/base/topology.c
@@ -45,11 +45,15 @@ static ssize_t name##_list_read(struct file *file, struct kobject *kobj, \
define_id_show_func(physical_package_id);
static DEVICE_ATTR_RO(physical_package_id);
+#ifdef TOPOLOGY_DIE_SYSFS
define_id_show_func(die_id);
static DEVICE_ATTR_RO(die_id);
+#endif
+#ifdef TOPOLOGY_CLUSTER_SYSFS
define_id_show_func(cluster_id);
static DEVICE_ATTR_RO(cluster_id);
+#endif
define_id_show_func(core_id);
static DEVICE_ATTR_RO(core_id);
@@ -66,19 +70,23 @@ define_siblings_read_func(core_siblings, core_cpumask);
static BIN_ATTR_RO(core_siblings, 0);
static BIN_ATTR_RO(core_siblings_list, 0);
+#ifdef TOPOLOGY_CLUSTER_SYSFS
define_siblings_read_func(cluster_cpus, cluster_cpumask);
static BIN_ATTR_RO(cluster_cpus, 0);
static BIN_ATTR_RO(cluster_cpus_list, 0);
+#endif
+#ifdef TOPOLOGY_DIE_SYSFS
define_siblings_read_func(die_cpus, die_cpumask);
static BIN_ATTR_RO(die_cpus, 0);
static BIN_ATTR_RO(die_cpus_list, 0);
+#endif
define_siblings_read_func(package_cpus, core_cpumask);
static BIN_ATTR_RO(package_cpus, 0);
static BIN_ATTR_RO(package_cpus_list, 0);
-#ifdef CONFIG_SCHED_BOOK
+#ifdef TOPOLOGY_BOOK_SYSFS
define_id_show_func(book_id);
static DEVICE_ATTR_RO(book_id);
define_siblings_read_func(book_siblings, book_cpumask);
@@ -86,7 +94,7 @@ static BIN_ATTR_RO(book_siblings, 0);
static BIN_ATTR_RO(book_siblings_list, 0);
#endif
-#ifdef CONFIG_SCHED_DRAWER
+#ifdef TOPOLOGY_DRAWER_SYSFS
define_id_show_func(drawer_id);
static DEVICE_ATTR_RO(drawer_id);
define_siblings_read_func(drawer_siblings, drawer_cpumask);
@@ -101,17 +109,21 @@ static struct bin_attribute *bin_attrs[] = {
&bin_attr_thread_siblings_list,
&bin_attr_core_siblings,
&bin_attr_core_siblings_list,
+#ifdef TOPOLOGY_CLUSTER_SYSFS
&bin_attr_cluster_cpus,
&bin_attr_cluster_cpus_list,
+#endif
+#ifdef TOPOLOGY_DIE_SYSFS
&bin_attr_die_cpus,
&bin_attr_die_cpus_list,
+#endif
&bin_attr_package_cpus,
&bin_attr_package_cpus_list,
-#ifdef CONFIG_SCHED_BOOK
+#ifdef TOPOLOGY_BOOK_SYSFS
&bin_attr_book_siblings,
&bin_attr_book_siblings_list,
#endif
-#ifdef CONFIG_SCHED_DRAWER
+#ifdef TOPOLOGY_DRAWER_SYSFS
&bin_attr_drawer_siblings,
&bin_attr_drawer_siblings_list,
#endif
@@ -120,13 +132,17 @@ static struct bin_attribute *bin_attrs[] = {
static struct attribute *default_attrs[] = {
&dev_attr_physical_package_id.attr,
+#ifdef TOPOLOGY_DIE_SYSFS
&dev_attr_die_id.attr,
+#endif
+#ifdef TOPOLOGY_CLUSTER_SYSFS
&dev_attr_cluster_id.attr,
+#endif
&dev_attr_core_id.attr,
-#ifdef CONFIG_SCHED_BOOK
+#ifdef TOPOLOGY_BOOK_SYSFS
&dev_attr_book_id.attr,
#endif
-#ifdef CONFIG_SCHED_DRAWER
+#ifdef TOPOLOGY_DRAWER_SYSFS
&dev_attr_drawer_id.attr,
#endif
NULL
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 2a51dfb09c8f..519b6d38d4df 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -392,17 +392,6 @@ config BLK_DEV_RBD
If unsure, say N.
-config BLK_DEV_RSXX
- tristate "IBM Flash Adapter 900GB Full Height PCIe Device Driver"
- depends on PCI
- select CRC32
- help
- Device driver for IBM's high speed PCIe SSD
- storage device: Flash Adapter 900GB Full Height.
-
- To compile this driver as a module, choose M here: the
- module will be called rsxx.
-
source "drivers/block/rnbd/Kconfig"
endif # BLK_DEV
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
index 11a74f17c9ad..934a9c7c3a7c 100644
--- a/drivers/block/Makefile
+++ b/drivers/block/Makefile
@@ -34,7 +34,6 @@ obj-$(CONFIG_BLK_DEV_DRBD) += drbd/
obj-$(CONFIG_BLK_DEV_RBD) += rbd.o
obj-$(CONFIG_BLK_DEV_PCIESSD_MTIP32XX) += mtip32xx/
-obj-$(CONFIG_BLK_DEV_RSXX) += rsxx/
obj-$(CONFIG_ZRAM) += zram/
obj-$(CONFIG_BLK_DEV_RNBD) += rnbd/
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c
index bf5c124c5452..5a566f2fd533 100644
--- a/drivers/block/amiflop.c
+++ b/drivers/block/amiflop.c
@@ -1505,7 +1505,7 @@ static blk_status_t amiflop_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
struct request *rq = bd->rq;
- struct amiga_floppy_struct *floppy = rq->rq_disk->private_data;
+ struct amiga_floppy_struct *floppy = rq->q->disk->private_data;
blk_status_t err;
if (!spin_trylock_irq(&amiflop_lock))
@@ -1790,6 +1790,7 @@ static int fd_alloc_disk(int drive, int system)
disk->first_minor = drive + system;
disk->minors = 1;
disk->fops = &floppy_fops;
+ disk->flags |= GENHD_FL_NO_PART;
disk->events = DISK_EVENT_MEDIA_CHANGE;
if (system)
sprintf(disk->disk_name, "fd%d_msdos", drive);
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index 588889bea7c3..6af111f568e4 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -122,7 +122,7 @@ newtag(struct aoedev *d)
register ulong n;
n = jiffies & 0xffff;
- return n |= (++d->lasttag & 0x7fff) << 16;
+ return n | (++d->lasttag & 0x7fff) << 16;
}
static u32
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
index bf769e6e32fe..5d819a466e2f 100644
--- a/drivers/block/ataflop.c
+++ b/drivers/block/ataflop.c
@@ -1502,7 +1502,7 @@ static void setup_req_params( int drive )
static blk_status_t ataflop_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
- struct atari_floppy_struct *floppy = bd->rq->rq_disk->private_data;
+ struct atari_floppy_struct *floppy = bd->rq->q->disk->private_data;
int drive = floppy - unit;
int type = floppy->type;
@@ -1538,7 +1538,7 @@ static blk_status_t ataflop_queue_rq(struct blk_mq_hw_ctx *hctx,
if (!UDT) {
Probing = 1;
UDT = atari_disk_type + StartDiskType[DriveType];
- set_capacity(bd->rq->rq_disk, UDT->blocks);
+ set_capacity(bd->rq->q->disk, UDT->blocks);
UD.autoprobe = 1;
}
}
@@ -1558,7 +1558,7 @@ static blk_status_t ataflop_queue_rq(struct blk_mq_hw_ctx *hctx,
}
type = minor2disktype[type].index;
UDT = &atari_disk_type[type];
- set_capacity(bd->rq->rq_disk, UDT->blocks);
+ set_capacity(bd->rq->q->disk, UDT->blocks);
UD.autoprobe = 0;
}
@@ -2000,6 +2000,7 @@ static int ataflop_alloc_disk(unsigned int drive, unsigned int type)
disk->minors = 1;
sprintf(disk->disk_name, "fd%d", drive);
disk->fops = &floppy_fops;
+ disk->flags |= GENHD_FL_NO_PART;
disk->events = DISK_EVENT_MEDIA_CHANGE;
disk->private_data = &unit[drive];
set_capacity(disk, MAX_DISK_SIZE * 2);
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index a896ee175d86..6e3f2f0d2352 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -362,7 +362,6 @@ __setup("ramdisk_size=", ramdisk_size);
* (should share code eventually).
*/
static LIST_HEAD(brd_devices);
-static DEFINE_MUTEX(brd_devices_mutex);
static struct dentry *brd_debugfs_dir;
static int brd_alloc(int i)
@@ -372,21 +371,14 @@ static int brd_alloc(int i)
char buf[DISK_NAME_LEN];
int err = -ENOMEM;
- mutex_lock(&brd_devices_mutex);
- list_for_each_entry(brd, &brd_devices, brd_list) {
- if (brd->brd_number == i) {
- mutex_unlock(&brd_devices_mutex);
+ list_for_each_entry(brd, &brd_devices, brd_list)
+ if (brd->brd_number == i)
return -EEXIST;
- }
- }
brd = kzalloc(sizeof(*brd), GFP_KERNEL);
- if (!brd) {
- mutex_unlock(&brd_devices_mutex);
+ if (!brd)
return -ENOMEM;
- }
brd->brd_number = i;
list_add_tail(&brd->brd_list, &brd_devices);
- mutex_unlock(&brd_devices_mutex);
spin_lock_init(&brd->brd_lock);
INIT_RADIX_TREE(&brd->brd_pages, GFP_ATOMIC);
@@ -405,7 +397,6 @@ static int brd_alloc(int i)
disk->minors = max_part;
disk->fops = &brd_fops;
disk->private_data = brd;
- disk->flags = GENHD_FL_EXT_DEVT;
strlcpy(disk->disk_name, buf, DISK_NAME_LEN);
set_capacity(disk, rd_size * 2);
@@ -430,9 +421,7 @@ static int brd_alloc(int i)
out_cleanup_disk:
blk_cleanup_disk(disk);
out_free_dev:
- mutex_lock(&brd_devices_mutex);
list_del(&brd->brd_list);
- mutex_unlock(&brd_devices_mutex);
kfree(brd);
return err;
}
@@ -442,15 +431,19 @@ static void brd_probe(dev_t dev)
brd_alloc(MINOR(dev) / max_part);
}
-static void brd_del_one(struct brd_device *brd)
+static void brd_cleanup(void)
{
- del_gendisk(brd->brd_disk);
- blk_cleanup_disk(brd->brd_disk);
- brd_free_pages(brd);
- mutex_lock(&brd_devices_mutex);
- list_del(&brd->brd_list);
- mutex_unlock(&brd_devices_mutex);
- kfree(brd);
+ struct brd_device *brd, *next;
+
+ debugfs_remove_recursive(brd_debugfs_dir);
+
+ list_for_each_entry_safe(brd, next, &brd_devices, brd_list) {
+ del_gendisk(brd->brd_disk);
+ blk_cleanup_disk(brd->brd_disk);
+ brd_free_pages(brd);
+ list_del(&brd->brd_list);
+ kfree(brd);
+ }
}
static inline void brd_check_and_reset_par(void)
@@ -474,9 +467,18 @@ static inline void brd_check_and_reset_par(void)
static int __init brd_init(void)
{
- struct brd_device *brd, *next;
int err, i;
+ brd_check_and_reset_par();
+
+ brd_debugfs_dir = debugfs_create_dir("ramdisk_pages", NULL);
+
+ for (i = 0; i < rd_nr; i++) {
+ err = brd_alloc(i);
+ if (err)
+ goto out_free;
+ }
+
/*
* brd module now has a feature to instantiate underlying device
* structure on-demand, provided that there is an access dev node.
@@ -492,28 +494,16 @@ static int __init brd_init(void)
* dynamically.
*/
- if (__register_blkdev(RAMDISK_MAJOR, "ramdisk", brd_probe))
- return -EIO;
-
- brd_check_and_reset_par();
-
- brd_debugfs_dir = debugfs_create_dir("ramdisk_pages", NULL);
-
- for (i = 0; i < rd_nr; i++) {
- err = brd_alloc(i);
- if (err)
- goto out_free;
+ if (__register_blkdev(RAMDISK_MAJOR, "ramdisk", brd_probe)) {
+ err = -EIO;
+ goto out_free;
}
pr_info("brd: module loaded\n");
return 0;
out_free:
- unregister_blkdev(RAMDISK_MAJOR, "ramdisk");
- debugfs_remove_recursive(brd_debugfs_dir);
-
- list_for_each_entry_safe(brd, next, &brd_devices, brd_list)
- brd_del_one(brd);
+ brd_cleanup();
pr_info("brd: module NOT loaded !!!\n");
return err;
@@ -521,13 +511,9 @@ out_free:
static void __exit brd_exit(void)
{
- struct brd_device *brd, *next;
unregister_blkdev(RAMDISK_MAJOR, "ramdisk");
- debugfs_remove_recursive(brd_debugfs_dir);
-
- list_for_each_entry_safe(brd, next, &brd_devices, brd_list)
- brd_del_one(brd);
+ brd_cleanup();
pr_info("brd: module unloaded\n");
}
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 53ba2dddba6e..6f450816c4fa 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -729,7 +729,8 @@ int drbd_send_sync_param(struct drbd_peer_device *peer_device)
cmd = apv >= 89 ? P_SYNC_PARAM89 : P_SYNC_PARAM;
/* initialize verify_alg and csums_alg */
- memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
+ BUILD_BUG_ON(sizeof(p->algs) != 2 * SHARED_SECRET_MAX);
+ memset(&p->algs, 0, sizeof(p->algs));
if (get_ldev(peer_device->device)) {
dc = rcu_dereference(peer_device->device->ldev->disk_conf);
@@ -2734,6 +2735,7 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig
disk->first_minor = minor;
disk->minors = 1;
disk->fops = &drbd_ops;
+ disk->flags |= GENHD_FL_NO_PART;
sprintf(disk->disk_name, "drbd%d", minor);
disk->private_data = device;
diff --git a/drivers/block/drbd/drbd_protocol.h b/drivers/block/drbd/drbd_protocol.h
index dea59c92ecc1..a882b65ab5d2 100644
--- a/drivers/block/drbd/drbd_protocol.h
+++ b/drivers/block/drbd/drbd_protocol.h
@@ -283,8 +283,10 @@ struct p_rs_param_89 {
struct p_rs_param_95 {
u32 resync_rate;
- char verify_alg[SHARED_SECRET_MAX];
- char csums_alg[SHARED_SECRET_MAX];
+ struct_group(algs,
+ char verify_alg[SHARED_SECRET_MAX];
+ char csums_alg[SHARED_SECRET_MAX];
+ );
u32 c_plan_ahead;
u32 c_delay_target;
u32 c_fill_target;
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 1f740e42e457..6df2539e215b 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -3921,7 +3921,8 @@ static int receive_SyncParam(struct drbd_connection *connection, struct packet_i
/* initialize verify_alg and csums_alg */
p = pi->data;
- memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
+ BUILD_BUG_ON(sizeof(p->algs) != 2 * SHARED_SECRET_MAX);
+ memset(&p->algs, 0, sizeof(p->algs));
err = drbd_recv_all(peer_device->connection, p, header_size);
if (err)
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index c4267da716fe..e611411a934c 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -1015,7 +1015,7 @@ static DECLARE_DELAYED_WORK(fd_timer, fd_timer_workfn);
static void cancel_activity(void)
{
do_floppy = NULL;
- cancel_delayed_work_sync(&fd_timer);
+ cancel_delayed_work(&fd_timer);
cancel_work_sync(&floppy_work);
}
@@ -2259,7 +2259,7 @@ static int do_format(int drive, struct format_descr *tmp_format_req)
static void floppy_end_request(struct request *req, blk_status_t error)
{
unsigned int nr_sectors = current_count_sectors;
- unsigned int drive = (unsigned long)req->rq_disk->private_data;
+ unsigned int drive = (unsigned long)req->q->disk->private_data;
/* current_count_sectors can be zero if transfer failed */
if (error)
@@ -2550,7 +2550,7 @@ static int make_raw_rw_request(void)
if (WARN(max_buffer_sectors == 0, "VFS: Block I/O scheduled on unopened device\n"))
return 0;
- set_fdc((long)current_req->rq_disk->private_data);
+ set_fdc((long)current_req->q->disk->private_data);
raw_cmd = &default_raw_cmd;
raw_cmd->flags = FD_RAW_SPIN | FD_RAW_NEED_DISK | FD_RAW_NEED_SEEK;
@@ -2792,7 +2792,7 @@ do_request:
return;
}
}
- drive = (long)current_req->rq_disk->private_data;
+ drive = (long)current_req->q->disk->private_data;
set_fdc(drive);
reschedule_timeout(current_drive, "redo fd request");
@@ -3081,6 +3081,8 @@ static void raw_cmd_free(struct floppy_raw_cmd **ptr)
}
}
+#define MAX_LEN (1UL << MAX_ORDER << PAGE_SHIFT)
+
static int raw_cmd_copyin(int cmd, void __user *param,
struct floppy_raw_cmd **rcmd)
{
@@ -3108,7 +3110,7 @@ loop:
ptr->resultcode = 0;
if (ptr->flags & (FD_RAW_READ | FD_RAW_WRITE)) {
- if (ptr->length <= 0)
+ if (ptr->length <= 0 || ptr->length >= MAX_LEN)
return -EINVAL;
ptr->kernel_data = (char *)fd_dma_mem_alloc(ptr->length);
fallback_on_nodma_alloc(&ptr->kernel_data, ptr->length);
@@ -4503,6 +4505,7 @@ static int floppy_alloc_disk(unsigned int drive, unsigned int type)
disk->first_minor = TOMINOR(drive) | (type << 2);
disk->minors = 1;
disk->fops = &floppy_fops;
+ disk->flags |= GENHD_FL_NO_PART;
disk->events = DISK_EVENT_MEDIA_CHANGE;
if (type)
sprintf(disk->disk_name, "fd%d_type%d", drive, type);
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index c3a36cfaa855..19fe19eaa50e 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -79,6 +79,7 @@
#include <linux/ioprio.h>
#include <linux/blk-cgroup.h>
#include <linux/sched/mm.h>
+#include <linux/statfs.h>
#include "loop.h"
@@ -774,8 +775,13 @@ static void loop_config_discard(struct loop_device *lo)
granularity = 0;
} else {
+ struct kstatfs sbuf;
+
max_discard_sectors = UINT_MAX >> 9;
- granularity = inode->i_sb->s_blocksize;
+ if (!vfs_statfs(&file->f_path, &sbuf))
+ granularity = sbuf.f_bsize;
+ else
+ max_discard_sectors = 0;
}
if (max_discard_sectors) {
@@ -820,7 +826,7 @@ static inline int queue_on_root_worker(struct cgroup_subsys_state *css)
static void loop_queue_work(struct loop_device *lo, struct loop_cmd *cmd)
{
- struct rb_node **node = &(lo->worker_tree.rb_node), *parent = NULL;
+ struct rb_node **node, *parent = NULL;
struct loop_worker *cur_worker, *worker = NULL;
struct work_struct *work;
struct list_head *cmd_list;
@@ -1061,7 +1067,7 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
lo->lo_flags |= LO_FLAGS_PARTSCAN;
partscan = lo->lo_flags & LO_FLAGS_PARTSCAN;
if (partscan)
- lo->lo_disk->flags &= ~GENHD_FL_NO_PART_SCAN;
+ lo->lo_disk->flags &= ~GENHD_FL_NO_PART;
loop_global_unlock(lo, is_loop);
if (partscan)
@@ -1082,13 +1088,10 @@ out_putf:
return error;
}
-static int __loop_clr_fd(struct loop_device *lo, bool release)
+static void __loop_clr_fd(struct loop_device *lo, bool release)
{
- struct file *filp = NULL;
+ struct file *filp;
gfp_t gfp = lo->old_gfp_mask;
- int err = 0;
- bool partscan = false;
- int lo_number;
struct loop_worker *pos, *worker;
/*
@@ -1103,17 +1106,14 @@ static int __loop_clr_fd(struct loop_device *lo, bool release)
* became visible.
*/
+ /*
+ * Since this function is called upon "ioctl(LOOP_CLR_FD)" xor "close()
+ * after ioctl(LOOP_CLR_FD)", it is a sign of something going wrong if
+ * lo->lo_state has changed while waiting for lo->lo_mutex.
+ */
mutex_lock(&lo->lo_mutex);
- if (WARN_ON_ONCE(lo->lo_state != Lo_rundown)) {
- err = -ENXIO;
- goto out_unlock;
- }
-
- filp = lo->lo_backing_file;
- if (filp == NULL) {
- err = -EINVAL;
- goto out_unlock;
- }
+ BUG_ON(lo->lo_state != Lo_rundown);
+ mutex_unlock(&lo->lo_mutex);
if (test_bit(QUEUE_FLAG_WC, &lo->lo_queue->queue_flags))
blk_queue_write_cache(lo->lo_queue, false, false);
@@ -1134,6 +1134,7 @@ static int __loop_clr_fd(struct loop_device *lo, bool release)
del_timer_sync(&lo->timer);
spin_lock_irq(&lo->lo_lock);
+ filp = lo->lo_backing_file;
lo->lo_backing_file = NULL;
spin_unlock_irq(&lo->lo_lock);
@@ -1153,12 +1154,11 @@ static int __loop_clr_fd(struct loop_device *lo, bool release)
module_put(THIS_MODULE);
blk_mq_unfreeze_queue(lo->lo_queue);
- partscan = lo->lo_flags & LO_FLAGS_PARTSCAN;
- lo_number = lo->lo_number;
disk_force_media_change(lo->lo_disk, DISK_EVENT_MEDIA_CHANGE);
-out_unlock:
- mutex_unlock(&lo->lo_mutex);
- if (partscan) {
+
+ if (lo->lo_flags & LO_FLAGS_PARTSCAN) {
+ int err;
+
/*
* open_mutex has been held already in release path, so don't
* acquire it if this function is called in such case.
@@ -1174,24 +1174,20 @@ out_unlock:
mutex_unlock(&lo->lo_disk->open_mutex);
if (err)
pr_warn("%s: partition scan of loop%d failed (rc=%d)\n",
- __func__, lo_number, err);
+ __func__, lo->lo_number, err);
/* Device is gone, no point in returning error */
- err = 0;
}
/*
* lo->lo_state is set to Lo_unbound here after above partscan has
- * finished.
- *
- * There cannot be anybody else entering __loop_clr_fd() as
- * lo->lo_backing_file is already cleared and Lo_rundown state
- * protects us from all the other places trying to change the 'lo'
- * device.
+ * finished. There cannot be anybody else entering __loop_clr_fd() as
+ * Lo_rundown state protects us from all the other places trying to
+ * change the 'lo' device.
*/
- mutex_lock(&lo->lo_mutex);
lo->lo_flags = 0;
if (!part_shift)
- lo->lo_disk->flags |= GENHD_FL_NO_PART_SCAN;
+ lo->lo_disk->flags |= GENHD_FL_NO_PART;
+ mutex_lock(&lo->lo_mutex);
lo->lo_state = Lo_unbound;
mutex_unlock(&lo->lo_mutex);
@@ -1200,9 +1196,7 @@ out_unlock:
* lo_mutex triggers a circular lock dependency possibility warning as
* fput can take open_mutex which is usually taken before lo_mutex.
*/
- if (filp)
- fput(filp);
- return err;
+ fput(filp);
}
static int loop_clr_fd(struct loop_device *lo)
@@ -1234,7 +1228,8 @@ static int loop_clr_fd(struct loop_device *lo)
lo->lo_state = Lo_rundown;
mutex_unlock(&lo->lo_mutex);
- return __loop_clr_fd(lo, false);
+ __loop_clr_fd(lo, false);
+ return 0;
}
static int
@@ -1301,7 +1296,7 @@ out_unfreeze:
if (!err && (lo->lo_flags & LO_FLAGS_PARTSCAN) &&
!(prev_lo_flags & LO_FLAGS_PARTSCAN)) {
- lo->lo_disk->flags &= ~GENHD_FL_NO_PART_SCAN;
+ lo->lo_disk->flags &= ~GENHD_FL_NO_PART;
partscan = true;
}
out_unlock:
@@ -2032,8 +2027,7 @@ static int loop_add(int i)
* userspace tools. Parameters like this in general should be avoided.
*/
if (!part_shift)
- disk->flags |= GENHD_FL_NO_PART_SCAN;
- disk->flags |= GENHD_FL_EXT_DEVT;
+ disk->flags |= GENHD_FL_NO_PART;
atomic_set(&lo->lo_refcnt, 0);
mutex_init(&lo->lo_mutex);
lo->lo_number = i;
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index c91b9010c1a6..2b588b62cbbb 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -136,16 +136,15 @@ struct mtip_compat_ide_task_request_s {
* return value
* true if device removed, else false
*/
-static bool mtip_check_surprise_removal(struct pci_dev *pdev)
+static bool mtip_check_surprise_removal(struct driver_data *dd)
{
u16 vendor_id = 0;
- struct driver_data *dd = pci_get_drvdata(pdev);
if (dd->sr)
return true;
/* Read the vendorID from the configuration space */
- pci_read_config_word(pdev, 0x00, &vendor_id);
+ pci_read_config_word(dd->pdev, 0x00, &vendor_id);
if (vendor_id == 0xFFFF) {
dd->sr = true;
if (dd->queue)
@@ -447,7 +446,7 @@ static int mtip_device_reset(struct driver_data *dd)
{
int rv = 0;
- if (mtip_check_surprise_removal(dd->pdev))
+ if (mtip_check_surprise_removal(dd))
return 0;
if (mtip_hba_reset(dd) < 0)
@@ -727,7 +726,7 @@ static inline void mtip_process_errors(struct driver_data *dd, u32 port_stat)
dev_warn(&dd->pdev->dev,
"Port stat errors %x unhandled\n",
(port_stat & ~PORT_IRQ_HANDLED));
- if (mtip_check_surprise_removal(dd->pdev))
+ if (mtip_check_surprise_removal(dd))
return;
}
if (likely(port_stat & (PORT_IRQ_TF_ERR | PORT_IRQ_IF_ERR))) {
@@ -752,7 +751,7 @@ static inline irqreturn_t mtip_handle_irq(struct driver_data *data)
/* Acknowledge the interrupt status on the port.*/
port_stat = readl(port->mmio + PORT_IRQ_STAT);
if (unlikely(port_stat == 0xFFFFFFFF)) {
- mtip_check_surprise_removal(dd->pdev);
+ mtip_check_surprise_removal(dd);
return IRQ_HANDLED;
}
writel(port_stat, port->mmio + PORT_IRQ_STAT);
@@ -796,7 +795,7 @@ static inline irqreturn_t mtip_handle_irq(struct driver_data *data)
}
if (unlikely(port_stat & PORT_IRQ_ERR)) {
- if (unlikely(mtip_check_surprise_removal(dd->pdev))) {
+ if (unlikely(mtip_check_surprise_removal(dd))) {
/* don't proceed further */
return IRQ_HANDLED;
}
@@ -915,7 +914,7 @@ static int mtip_quiesce_io(struct mtip_port *port, unsigned long timeout)
msleep(100);
- if (mtip_check_surprise_removal(port->dd->pdev))
+ if (mtip_check_surprise_removal(port->dd))
goto err_fault;
active = mtip_commands_active(port);
@@ -980,7 +979,7 @@ static int mtip_exec_internal_command(struct mtip_port *port,
return -EFAULT;
}
- if (mtip_check_surprise_removal(dd->pdev))
+ if (mtip_check_surprise_removal(dd))
return -EFAULT;
rq = blk_mq_alloc_request(dd->queue, REQ_OP_DRV_IN, BLK_MQ_REQ_RESERVED);
@@ -1015,14 +1014,14 @@ static int mtip_exec_internal_command(struct mtip_port *port,
rq->timeout = timeout;
/* insert request and run queue */
- blk_execute_rq(NULL, rq, true);
+ blk_execute_rq(rq, true);
if (int_cmd->status) {
dev_err(&dd->pdev->dev, "Internal command [%02X] failed %d\n",
fis->command, int_cmd->status);
rv = -EIO;
- if (mtip_check_surprise_removal(dd->pdev) ||
+ if (mtip_check_surprise_removal(dd) ||
test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
&dd->dd_flag)) {
dev_err(&dd->pdev->dev,
@@ -2513,7 +2512,7 @@ static int mtip_ftl_rebuild_poll(struct driver_data *dd)
if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
&dd->dd_flag)))
return -EFAULT;
- if (mtip_check_surprise_removal(dd->pdev))
+ if (mtip_check_surprise_removal(dd))
return -EFAULT;
if (mtip_get_identify(dd->port, NULL) < 0)
@@ -2891,7 +2890,7 @@ static int mtip_hw_init(struct driver_data *dd)
time_before(jiffies, timeout)) {
mdelay(100);
}
- if (unlikely(mtip_check_surprise_removal(dd->pdev))) {
+ if (unlikely(mtip_check_surprise_removal(dd))) {
timetaken = jiffies - timetaken;
dev_warn(&dd->pdev->dev,
"Surprise removal detected at %u ms\n",
@@ -4098,7 +4097,7 @@ static void mtip_pci_remove(struct pci_dev *pdev)
list_add(&dd->remove_list, &removing_list);
spin_unlock_irqrestore(&dev_lock, flags);
- mtip_check_surprise_removal(pdev);
+ mtip_check_surprise_removal(dd);
synchronize_irq(dd->pdev->irq);
/* Spin until workers are done */
@@ -4113,7 +4112,7 @@ static void mtip_pci_remove(struct pci_dev *pdev)
"Completion workers still active!\n");
}
- blk_set_queue_dying(dd->queue);
+ blk_mark_disk_dead(dd->disk);
set_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag);
/* Clean up the block layer. */
@@ -4145,36 +4144,17 @@ static void mtip_pci_remove(struct pci_dev *pdev)
* 0 Success
* <0 Error
*/
-static int mtip_pci_suspend(struct pci_dev *pdev, pm_message_t mesg)
+static int __maybe_unused mtip_pci_suspend(struct device *dev)
{
int rv = 0;
- struct driver_data *dd = pci_get_drvdata(pdev);
-
- if (!dd) {
- dev_err(&pdev->dev,
- "Driver private datastructure is NULL\n");
- return -EFAULT;
- }
+ struct driver_data *dd = dev_get_drvdata(dev);
set_bit(MTIP_DDF_RESUME_BIT, &dd->dd_flag);
/* Disable ports & interrupts then send standby immediate */
rv = mtip_block_suspend(dd);
- if (rv < 0) {
- dev_err(&pdev->dev,
- "Failed to suspend controller\n");
- return rv;
- }
-
- /*
- * Save the pci config space to pdev structure &
- * disable the device
- */
- pci_save_state(pdev);
- pci_disable_device(pdev);
-
- /* Move to Low power state*/
- pci_set_power_state(pdev, PCI_D3hot);
+ if (rv < 0)
+ dev_err(dev, "Failed to suspend controller\n");
return rv;
}
@@ -4186,32 +4166,10 @@ static int mtip_pci_suspend(struct pci_dev *pdev, pm_message_t mesg)
* 0 Success
* <0 Error
*/
-static int mtip_pci_resume(struct pci_dev *pdev)
+static int __maybe_unused mtip_pci_resume(struct device *dev)
{
int rv = 0;
- struct driver_data *dd;
-
- dd = pci_get_drvdata(pdev);
- if (!dd) {
- dev_err(&pdev->dev,
- "Driver private datastructure is NULL\n");
- return -EFAULT;
- }
-
- /* Move the device to active State */
- pci_set_power_state(pdev, PCI_D0);
-
- /* Restore PCI configuration space */
- pci_restore_state(pdev);
-
- /* Enable the PCI device*/
- rv = pcim_enable_device(pdev);
- if (rv < 0) {
- dev_err(&pdev->dev,
- "Failed to enable card during resume\n");
- goto err;
- }
- pci_set_master(pdev);
+ struct driver_data *dd = dev_get_drvdata(dev);
/*
* Calls hbaReset, initPort, & startPort function
@@ -4219,9 +4177,8 @@ static int mtip_pci_resume(struct pci_dev *pdev)
*/
rv = mtip_block_resume(dd);
if (rv < 0)
- dev_err(&pdev->dev, "Unable to resume\n");
+ dev_err(dev, "Unable to resume\n");
-err:
clear_bit(MTIP_DDF_RESUME_BIT, &dd->dd_flag);
return rv;
@@ -4252,14 +4209,15 @@ static const struct pci_device_id mtip_pci_tbl[] = {
{ 0 }
};
+static SIMPLE_DEV_PM_OPS(mtip_pci_pm_ops, mtip_pci_suspend, mtip_pci_resume);
+
/* Structure that describes the PCI driver functions. */
static struct pci_driver mtip_pci_driver = {
.name = MTIP_DRV_NAME,
.id_table = mtip_pci_tbl,
.probe = mtip_pci_probe,
.remove = mtip_pci_remove,
- .suspend = mtip_pci_suspend,
- .resume = mtip_pci_resume,
+ .driver.pm = &mtip_pci_pm_ops,
.shutdown = mtip_pci_shutdown,
};
diff --git a/drivers/block/n64cart.c b/drivers/block/n64cart.c
index 78282f01f581..4db9a8c244af 100644
--- a/drivers/block/n64cart.c
+++ b/drivers/block/n64cart.c
@@ -136,7 +136,7 @@ static int __init n64cart_probe(struct platform_device *pdev)
goto out;
disk->first_minor = 0;
- disk->flags = GENHD_FL_NO_PART_SCAN;
+ disk->flags = GENHD_FL_NO_PART;
disk->fops = &n64cart_fops;
disk->private_data = &pdev->dev;
strcpy(disk->disk_name, "n64cart");
diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
index 323af5c9c802..13004beb48ca 100644
--- a/drivers/block/null_blk/main.c
+++ b/drivers/block/null_blk/main.c
@@ -340,9 +340,9 @@ static int nullb_update_nr_hw_queues(struct nullb_device *dev,
return 0;
/*
- * Make sure at least one queue exists for each of submit and poll.
+ * Make sure at least one submit queue exists.
*/
- if (!submit_queues || !poll_queues)
+ if (!submit_queues)
return -EINVAL;
/*
@@ -1574,7 +1574,9 @@ static int null_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
cmd = blk_mq_rq_to_pdu(req);
cmd->error = null_process_cmd(cmd, req_op(req), blk_rq_pos(req),
blk_rq_sectors(req));
- end_cmd(cmd);
+ if (!blk_mq_add_to_batch(req, iob, (__force int) cmd->error,
+ blk_mq_end_request_batch))
+ end_cmd(cmd);
nr++;
}
@@ -1850,7 +1852,6 @@ static int null_gendisk_register(struct nullb *nullb)
set_capacity(disk, size);
- disk->flags |= GENHD_FL_EXT_DEVT | GENHD_FL_SUPPRESS_PARTITION_INFO;
disk->major = null_major;
disk->first_minor = nullb->index;
disk->minors = 1;
@@ -1891,7 +1892,7 @@ static int null_init_tag_set(struct nullb *nullb, struct blk_mq_tag_set *set)
if (g_shared_tag_bitmap)
set->flags |= BLK_MQ_F_TAG_HCTX_SHARED;
set->driver_data = nullb;
- if (g_poll_queues)
+ if (poll_queues)
set->nr_maps = 3;
else
set->nr_maps = 1;
@@ -1918,8 +1919,6 @@ static int null_validate_conf(struct nullb_device *dev)
if (dev->poll_queues > g_poll_queues)
dev->poll_queues = g_poll_queues;
- else if (dev->poll_queues == 0)
- dev->poll_queues = 1;
dev->prev_poll_queues = dev->poll_queues;
dev->queue_mode = min_t(unsigned int, dev->queue_mode, NULL_Q_MQ);
diff --git a/drivers/block/null_blk/trace.h b/drivers/block/null_blk/trace.h
index ce3b430e88c5..86d6c12c603c 100644
--- a/drivers/block/null_blk/trace.h
+++ b/drivers/block/null_blk/trace.h
@@ -44,7 +44,7 @@ TRACE_EVENT(nullb_zone_op,
__entry->op = req_op(cmd->rq);
__entry->zone_no = zone_no;
__entry->zone_cond = zone_cond;
- __assign_disk_name(__entry->disk, cmd->rq->rq_disk);
+ __assign_disk_name(__entry->disk, cmd->rq->q->disk);
),
TP_printk("%s req=%-15s zone_no=%u zone_cond=%-10s",
__print_disk_name(__entry->disk),
diff --git a/drivers/block/paride/bpck.c b/drivers/block/paride/bpck.c
index f5f63ca2889d..d880a9465e9b 100644
--- a/drivers/block/paride/bpck.c
+++ b/drivers/block/paride/bpck.c
@@ -28,6 +28,7 @@
#undef r2
#undef w2
+#undef PC
#define PC pi->private
#define r2() (PC=(in_p(2) & 0xff))
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c
index f6b1d63e96e1..f462ad67931a 100644
--- a/drivers/block/paride/pcd.c
+++ b/drivers/block/paride/pcd.c
@@ -690,7 +690,7 @@ static void pcd_request(void)
if (!pcd_req && !set_next_request())
return;
- cd = pcd_req->rq_disk->private_data;
+ cd = pcd_req->q->disk->private_data;
if (cd != pcd_current)
pcd_bufblk = -1;
pcd_current = cd;
@@ -928,8 +928,9 @@ static int pcd_init_unit(struct pcd_unit *cd, bool autoprobe, int port,
disk->minors = 1;
strcpy(disk->disk_name, cd->name); /* umm... */
disk->fops = &pcd_bdops;
- disk->flags = GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE;
+ disk->flags |= GENHD_FL_NO_PART;
disk->events = DISK_EVENT_MEDIA_CHANGE;
+ disk->event_flags = DISK_EVENT_FLAG_BLOCK_ON_EXCL_WRITE;
if (!pi_init(cd->pi, autoprobe, port, mode, unit, protocol, delay,
pcd_buffer, PI_PCD, verbose, cd->name)) {
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
index fba865058a17..3637c38c72f9 100644
--- a/drivers/block/paride/pd.c
+++ b/drivers/block/paride/pd.c
@@ -430,7 +430,7 @@ static void run_fsm(void)
int stop = 0;
if (!phase) {
- pd_current = pd_req->rq_disk->private_data;
+ pd_current = pd_req->q->disk->private_data;
pi_current = pd_current->pi;
phase = do_pd_io_start;
}
@@ -492,7 +492,7 @@ static enum action do_pd_io_start(void)
case REQ_OP_WRITE:
pd_block = blk_rq_pos(pd_req);
pd_count = blk_rq_cur_sectors(pd_req);
- if (pd_block + pd_count > get_capacity(pd_req->rq_disk))
+ if (pd_block + pd_count > get_capacity(pd_req->q->disk))
return Fail;
pd_run = blk_rq_sectors(pd_req);
pd_buf = bio_data(pd_req->bio);
@@ -781,7 +781,7 @@ static int pd_special_command(struct pd_unit *disk,
req = blk_mq_rq_to_pdu(rq);
req->func = func;
- blk_execute_rq(disk->gd, rq, 0);
+ blk_execute_rq(rq, false);
blk_mq_free_request(rq);
return 0;
}
diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c
index bf8d0ef41a0a..292e9a4ce1b9 100644
--- a/drivers/block/paride/pf.c
+++ b/drivers/block/paride/pf.c
@@ -746,12 +746,12 @@ repeat:
if (!pf_req && !set_next_request())
return;
- pf_current = pf_req->rq_disk->private_data;
+ pf_current = pf_req->q->disk->private_data;
pf_block = blk_rq_pos(pf_req);
pf_run = blk_rq_sectors(pf_req);
pf_count = blk_rq_cur_sectors(pf_req);
- if (pf_block + pf_count > get_capacity(pf_req->rq_disk)) {
+ if (pf_block + pf_count > get_capacity(pf_req->q->disk)) {
pf_end_request(BLK_STS_IOERR);
goto repeat;
}
@@ -942,6 +942,7 @@ static int __init pf_init_unit(struct pf_unit *pf, bool autoprobe, int port,
disk->minors = 1;
strcpy(disk->disk_name, pf->name);
disk->fops = &pf_fops;
+ disk->flags |= GENHD_FL_NO_PART;
disk->events = DISK_EVENT_MEDIA_CHANGE;
disk->private_data = pf;
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index b53f648302c1..2b6b70a39e76 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -113,57 +113,10 @@ static sector_t get_zone(sector_t sector, struct pktcdvd_device *pd)
return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1);
}
-/*
- * create and register a pktcdvd kernel object.
- */
-static struct pktcdvd_kobj* pkt_kobj_create(struct pktcdvd_device *pd,
- const char* name,
- struct kobject* parent,
- struct kobj_type* ktype)
-{
- struct pktcdvd_kobj *p;
- int error;
-
- p = kzalloc(sizeof(*p), GFP_KERNEL);
- if (!p)
- return NULL;
- p->pd = pd;
- error = kobject_init_and_add(&p->kobj, ktype, parent, "%s", name);
- if (error) {
- kobject_put(&p->kobj);
- return NULL;
- }
- kobject_uevent(&p->kobj, KOBJ_ADD);
- return p;
-}
-/*
- * remove a pktcdvd kernel object.
- */
-static void pkt_kobj_remove(struct pktcdvd_kobj *p)
-{
- if (p)
- kobject_put(&p->kobj);
-}
-/*
- * default release function for pktcdvd kernel objects.
- */
-static void pkt_kobj_release(struct kobject *kobj)
-{
- kfree(to_pktcdvdkobj(kobj));
-}
-
-
/**********************************************************
- *
* sysfs interface for pktcdvd
* by (C) 2006 Thomas Maier <balagi@justmail.de>
- *
- **********************************************************/
-
-#define DEF_ATTR(_obj,_name,_mode) \
- static struct attribute _obj = { .name = _name, .mode = _mode }
-
-/**********************************************************
+
/sys/class/pktcdvd/pktcdvd[0-7]/
stat/reset
stat/packets_started
@@ -176,75 +129,94 @@ static void pkt_kobj_release(struct kobject *kobj)
write_queue/congestion_on
**********************************************************/
-DEF_ATTR(kobj_pkt_attr_st1, "reset", 0200);
-DEF_ATTR(kobj_pkt_attr_st2, "packets_started", 0444);
-DEF_ATTR(kobj_pkt_attr_st3, "packets_finished", 0444);
-DEF_ATTR(kobj_pkt_attr_st4, "kb_written", 0444);
-DEF_ATTR(kobj_pkt_attr_st5, "kb_read", 0444);
-DEF_ATTR(kobj_pkt_attr_st6, "kb_read_gather", 0444);
-
-static struct attribute *kobj_pkt_attrs_stat[] = {
- &kobj_pkt_attr_st1,
- &kobj_pkt_attr_st2,
- &kobj_pkt_attr_st3,
- &kobj_pkt_attr_st4,
- &kobj_pkt_attr_st5,
- &kobj_pkt_attr_st6,
- NULL
-};
+static ssize_t packets_started_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pktcdvd_device *pd = dev_get_drvdata(dev);
-DEF_ATTR(kobj_pkt_attr_wq1, "size", 0444);
-DEF_ATTR(kobj_pkt_attr_wq2, "congestion_off", 0644);
-DEF_ATTR(kobj_pkt_attr_wq3, "congestion_on", 0644);
+ return sysfs_emit(buf, "%lu\n", pd->stats.pkt_started);
+}
+static DEVICE_ATTR_RO(packets_started);
-static struct attribute *kobj_pkt_attrs_wqueue[] = {
- &kobj_pkt_attr_wq1,
- &kobj_pkt_attr_wq2,
- &kobj_pkt_attr_wq3,
- NULL
-};
+static ssize_t packets_finished_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pktcdvd_device *pd = dev_get_drvdata(dev);
-static ssize_t kobj_pkt_show(struct kobject *kobj,
- struct attribute *attr, char *data)
+ return sysfs_emit(buf, "%lu\n", pd->stats.pkt_ended);
+}
+static DEVICE_ATTR_RO(packets_finished);
+
+static ssize_t kb_written_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- struct pktcdvd_device *pd = to_pktcdvdkobj(kobj)->pd;
- int n = 0;
- int v;
- if (strcmp(attr->name, "packets_started") == 0) {
- n = sprintf(data, "%lu\n", pd->stats.pkt_started);
+ struct pktcdvd_device *pd = dev_get_drvdata(dev);
- } else if (strcmp(attr->name, "packets_finished") == 0) {
- n = sprintf(data, "%lu\n", pd->stats.pkt_ended);
+ return sysfs_emit(buf, "%lu\n", pd->stats.secs_w >> 1);
+}
+static DEVICE_ATTR_RO(kb_written);
- } else if (strcmp(attr->name, "kb_written") == 0) {
- n = sprintf(data, "%lu\n", pd->stats.secs_w >> 1);
+static ssize_t kb_read_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pktcdvd_device *pd = dev_get_drvdata(dev);
- } else if (strcmp(attr->name, "kb_read") == 0) {
- n = sprintf(data, "%lu\n", pd->stats.secs_r >> 1);
+ return sysfs_emit(buf, "%lu\n", pd->stats.secs_r >> 1);
+}
+static DEVICE_ATTR_RO(kb_read);
- } else if (strcmp(attr->name, "kb_read_gather") == 0) {
- n = sprintf(data, "%lu\n", pd->stats.secs_rg >> 1);
+static ssize_t kb_read_gather_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pktcdvd_device *pd = dev_get_drvdata(dev);
- } else if (strcmp(attr->name, "size") == 0) {
- spin_lock(&pd->lock);
- v = pd->bio_queue_size;
- spin_unlock(&pd->lock);
- n = sprintf(data, "%d\n", v);
+ return sysfs_emit(buf, "%lu\n", pd->stats.secs_rg >> 1);
+}
+static DEVICE_ATTR_RO(kb_read_gather);
- } else if (strcmp(attr->name, "congestion_off") == 0) {
- spin_lock(&pd->lock);
- v = pd->write_congestion_off;
- spin_unlock(&pd->lock);
- n = sprintf(data, "%d\n", v);
+static ssize_t reset_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct pktcdvd_device *pd = dev_get_drvdata(dev);
- } else if (strcmp(attr->name, "congestion_on") == 0) {
- spin_lock(&pd->lock);
- v = pd->write_congestion_on;
- spin_unlock(&pd->lock);
- n = sprintf(data, "%d\n", v);
+ if (len > 0) {
+ pd->stats.pkt_started = 0;
+ pd->stats.pkt_ended = 0;
+ pd->stats.secs_w = 0;
+ pd->stats.secs_rg = 0;
+ pd->stats.secs_r = 0;
}
+ return len;
+}
+static DEVICE_ATTR_WO(reset);
+
+static struct attribute *pkt_stat_attrs[] = {
+ &dev_attr_packets_finished.attr,
+ &dev_attr_packets_started.attr,
+ &dev_attr_kb_read.attr,
+ &dev_attr_kb_written.attr,
+ &dev_attr_kb_read_gather.attr,
+ &dev_attr_reset.attr,
+ NULL,
+};
+
+static const struct attribute_group pkt_stat_group = {
+ .name = "stat",
+ .attrs = pkt_stat_attrs,
+};
+
+static ssize_t size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pktcdvd_device *pd = dev_get_drvdata(dev);
+ int n;
+
+ spin_lock(&pd->lock);
+ n = sysfs_emit(buf, "%d\n", pd->bio_queue_size);
+ spin_unlock(&pd->lock);
return n;
}
+static DEVICE_ATTR_RO(size);
static void init_write_congestion_marks(int* lo, int* hi)
{
@@ -263,30 +235,56 @@ static void init_write_congestion_marks(int* lo, int* hi)
}
}
-static ssize_t kobj_pkt_store(struct kobject *kobj,
- struct attribute *attr,
- const char *data, size_t len)
+static ssize_t congestion_off_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- struct pktcdvd_device *pd = to_pktcdvdkobj(kobj)->pd;
- int val;
+ struct pktcdvd_device *pd = dev_get_drvdata(dev);
+ int n;
- if (strcmp(attr->name, "reset") == 0 && len > 0) {
- pd->stats.pkt_started = 0;
- pd->stats.pkt_ended = 0;
- pd->stats.secs_w = 0;
- pd->stats.secs_rg = 0;
- pd->stats.secs_r = 0;
+ spin_lock(&pd->lock);
+ n = sysfs_emit(buf, "%d\n", pd->write_congestion_off);
+ spin_unlock(&pd->lock);
+ return n;
+}
+
+static ssize_t congestion_off_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct pktcdvd_device *pd = dev_get_drvdata(dev);
+ int val;
- } else if (strcmp(attr->name, "congestion_off") == 0
- && sscanf(data, "%d", &val) == 1) {
+ if (sscanf(buf, "%d", &val) == 1) {
spin_lock(&pd->lock);
pd->write_congestion_off = val;
init_write_congestion_marks(&pd->write_congestion_off,
&pd->write_congestion_on);
spin_unlock(&pd->lock);
+ }
+ return len;
+}
+static DEVICE_ATTR_RW(congestion_off);
+
+static ssize_t congestion_on_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct pktcdvd_device *pd = dev_get_drvdata(dev);
+ int n;
+
+ spin_lock(&pd->lock);
+ n = sysfs_emit(buf, "%d\n", pd->write_congestion_on);
+ spin_unlock(&pd->lock);
+ return n;
+}
+
+static ssize_t congestion_on_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct pktcdvd_device *pd = dev_get_drvdata(dev);
+ int val;
- } else if (strcmp(attr->name, "congestion_on") == 0
- && sscanf(data, "%d", &val) == 1) {
+ if (sscanf(buf, "%d", &val) == 1) {
spin_lock(&pd->lock);
pd->write_congestion_on = val;
init_write_congestion_marks(&pd->write_congestion_off,
@@ -295,44 +293,39 @@ static ssize_t kobj_pkt_store(struct kobject *kobj,
}
return len;
}
+static DEVICE_ATTR_RW(congestion_on);
-static const struct sysfs_ops kobj_pkt_ops = {
- .show = kobj_pkt_show,
- .store = kobj_pkt_store
+static struct attribute *pkt_wq_attrs[] = {
+ &dev_attr_congestion_on.attr,
+ &dev_attr_congestion_off.attr,
+ &dev_attr_size.attr,
+ NULL,
};
-static struct kobj_type kobj_pkt_type_stat = {
- .release = pkt_kobj_release,
- .sysfs_ops = &kobj_pkt_ops,
- .default_attrs = kobj_pkt_attrs_stat
+
+static const struct attribute_group pkt_wq_group = {
+ .name = "write_queue",
+ .attrs = pkt_wq_attrs,
};
-static struct kobj_type kobj_pkt_type_wqueue = {
- .release = pkt_kobj_release,
- .sysfs_ops = &kobj_pkt_ops,
- .default_attrs = kobj_pkt_attrs_wqueue
+
+static const struct attribute_group *pkt_groups[] = {
+ &pkt_stat_group,
+ &pkt_wq_group,
+ NULL,
};
static void pkt_sysfs_dev_new(struct pktcdvd_device *pd)
{
if (class_pktcdvd) {
- pd->dev = device_create(class_pktcdvd, NULL, MKDEV(0, 0), NULL,
- "%s", pd->name);
+ pd->dev = device_create_with_groups(class_pktcdvd, NULL,
+ MKDEV(0, 0), pd, pkt_groups,
+ "%s", pd->name);
if (IS_ERR(pd->dev))
pd->dev = NULL;
}
- if (pd->dev) {
- pd->kobj_stat = pkt_kobj_create(pd, "stat",
- &pd->dev->kobj,
- &kobj_pkt_type_stat);
- pd->kobj_wqueue = pkt_kobj_create(pd, "write_queue",
- &pd->dev->kobj,
- &kobj_pkt_type_wqueue);
- }
}
static void pkt_sysfs_dev_remove(struct pktcdvd_device *pd)
{
- pkt_kobj_remove(pd->kobj_stat);
- pkt_kobj_remove(pd->kobj_wqueue);
if (class_pktcdvd)
device_unregister(pd->dev);
}
@@ -722,7 +715,7 @@ static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *
if (cgc->quiet)
rq->rq_flags |= RQF_QUIET;
- blk_execute_rq(pd->bdev->bd_disk, rq, 0);
+ blk_execute_rq(rq, false);
if (scsi_req(rq)->result)
ret = -EIO;
out:
@@ -1107,7 +1100,6 @@ static int pkt_handle_queue(struct pktcdvd_device *pd)
sector_t zone = 0; /* Suppress gcc warning */
struct pkt_rb_node *node, *first_node;
struct rb_node *n;
- int wakeup;
atomic_set(&pd->scan_queue, 0);
@@ -1179,12 +1171,14 @@ try_next_bio:
spin_unlock(&pkt->lock);
}
/* check write congestion marks, and if bio_queue_size is
- below, wake up any waiters */
- wakeup = (pd->write_congestion_on > 0
- && pd->bio_queue_size <= pd->write_congestion_off);
+ * below, wake up any waiters
+ */
+ if (pd->congested &&
+ pd->bio_queue_size <= pd->write_congestion_off) {
+ pd->congested = false;
+ wake_up_var(&pd->congested);
+ }
spin_unlock(&pd->lock);
- if (wakeup)
- clear_bdi_congested(pd->disk->bdi, BLK_RW_ASYNC);
pkt->sleep_time = max(PACKET_WAIT_TIME, 1);
pkt_set_state(pkt, PACKET_WAITING_STATE);
@@ -2356,7 +2350,7 @@ static void pkt_make_request_write(struct request_queue *q, struct bio *bio)
}
spin_unlock(&pd->cdrw.active_list_lock);
- /*
+ /*
* Test if there is enough room left in the bio work queue
* (queue size >= congestion on mark).
* If not, wait till the work queue size is below the congestion off mark.
@@ -2364,12 +2358,20 @@ static void pkt_make_request_write(struct request_queue *q, struct bio *bio)
spin_lock(&pd->lock);
if (pd->write_congestion_on > 0
&& pd->bio_queue_size >= pd->write_congestion_on) {
- set_bdi_congested(bio->bi_bdev->bd_disk->bdi, BLK_RW_ASYNC);
- do {
+ struct wait_bit_queue_entry wqe;
+
+ init_wait_var_entry(&wqe, &pd->congested, 0);
+ for (;;) {
+ prepare_to_wait_event(__var_waitqueue(&pd->congested),
+ &wqe.wq_entry,
+ TASK_UNINTERRUPTIBLE);
+ if (pd->bio_queue_size <= pd->write_congestion_off)
+ break;
+ pd->congested = true;
spin_unlock(&pd->lock);
- congestion_wait(BLK_RW_ASYNC, HZ);
+ schedule();
spin_lock(&pd->lock);
- } while(pd->bio_queue_size > pd->write_congestion_off);
+ }
}
spin_unlock(&pd->lock);
@@ -2719,7 +2721,7 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
disk->first_minor = idx;
disk->minors = 1;
disk->fops = &pktcdvd_ops;
- disk->flags = GENHD_FL_REMOVABLE;
+ disk->flags = GENHD_FL_REMOVABLE | GENHD_FL_NO_PART;
strcpy(disk->disk_name, pd->name);
disk->private_data = pd;
diff --git a/drivers/block/ps3vram.c b/drivers/block/ps3vram.c
index c1876646a4cb..4f90819e245e 100644
--- a/drivers/block/ps3vram.c
+++ b/drivers/block/ps3vram.c
@@ -742,6 +742,7 @@ static int ps3vram_probe(struct ps3_system_bus_device *dev)
priv->gendisk = gendisk;
gendisk->major = ps3vram_major;
gendisk->minors = 1;
+ gendisk->flags |= GENHD_FL_NO_PART;
gendisk->fops = &ps3vram_fops;
gendisk->private_data = dev;
strlcpy(gendisk->disk_name, DEVICE_NAME, sizeof(gendisk->disk_name));
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 953fa134cd3d..b844432bad20 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -4924,12 +4924,10 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
rbd_dev->dev_id);
disk->major = rbd_dev->major;
disk->first_minor = rbd_dev->minor;
- if (single_major) {
+ if (single_major)
disk->minors = (1 << RBD_SINGLE_MAJOR_PART_SHIFT);
- disk->flags |= GENHD_FL_EXT_DEVT;
- } else {
+ else
disk->minors = RBD_MINORS_PER_MAJOR;
- }
disk->fops = &rbd_bd_ops;
disk->private_data = rbd_dev;
@@ -6191,7 +6189,7 @@ static inline size_t next_token(const char **buf)
* These are the characters that produce nonzero for
* isspace() in the "C" and "POSIX" locales.
*/
- const char *spaces = " \f\n\r\t\v";
+ static const char spaces[] = " \f\n\r\t\v";
*buf += strspn(*buf, spaces); /* Find start of token */
@@ -6497,7 +6495,8 @@ static int rbd_add_parse_args(const char *buf,
pctx.opts->exclusive = RBD_EXCLUSIVE_DEFAULT;
pctx.opts->trim = RBD_TRIM_DEFAULT;
- ret = ceph_parse_mon_ips(mon_addrs, mon_addrs_size, pctx.copts, NULL);
+ ret = ceph_parse_mon_ips(mon_addrs, mon_addrs_size, pctx.copts, NULL,
+ ',');
if (ret)
goto out_err;
@@ -7186,7 +7185,7 @@ static ssize_t do_rbd_remove(struct bus_type *bus,
* IO to complete/fail.
*/
blk_mq_freeze_queue(rbd_dev->disk->queue);
- blk_set_queue_dying(rbd_dev->disk->queue);
+ blk_mark_disk_dead(rbd_dev->disk);
}
del_gendisk(rbd_dev->disk);
diff --git a/drivers/block/rnbd/rnbd-clt-sysfs.c b/drivers/block/rnbd/rnbd-clt-sysfs.c
index 44e45af00e83..2be5d87a3ca6 100644
--- a/drivers/block/rnbd/rnbd-clt-sysfs.c
+++ b/drivers/block/rnbd/rnbd-clt-sysfs.c
@@ -452,6 +452,7 @@ static struct attribute *rnbd_dev_attrs[] = {
&rnbd_clt_nr_poll_queues.attr,
NULL,
};
+ATTRIBUTE_GROUPS(rnbd_dev);
void rnbd_clt_remove_dev_symlink(struct rnbd_clt_dev *dev)
{
@@ -474,7 +475,7 @@ void rnbd_clt_remove_dev_symlink(struct rnbd_clt_dev *dev)
static struct kobj_type rnbd_dev_ktype = {
.sysfs_ops = &kobj_sysfs_ops,
- .default_attrs = rnbd_dev_attrs,
+ .default_groups = rnbd_dev_groups,
};
static int rnbd_clt_add_dev_kobj(struct rnbd_clt_dev *dev)
diff --git a/drivers/block/rnbd/rnbd-clt.c b/drivers/block/rnbd/rnbd-clt.c
index 2df0657cdf00..c08971de369f 100644
--- a/drivers/block/rnbd/rnbd-clt.c
+++ b/drivers/block/rnbd/rnbd-clt.c
@@ -196,7 +196,7 @@ rnbd_get_cpu_qlist(struct rnbd_clt_session *sess, int cpu)
return per_cpu_ptr(sess->cpu_queues, bit);
} else if (cpu != 0) {
/* Search from 0 to cpu */
- bit = find_next_bit(sess->cpu_queues_bm, cpu, 0);
+ bit = find_first_bit(sess->cpu_queues_bm, cpu);
if (bit < cpu)
return per_cpu_ptr(sess->cpu_queues, bit);
}
@@ -393,7 +393,7 @@ static void rnbd_put_iu(struct rnbd_clt_session *sess, struct rnbd_iu *iu)
static void rnbd_softirq_done_fn(struct request *rq)
{
- struct rnbd_clt_dev *dev = rq->rq_disk->private_data;
+ struct rnbd_clt_dev *dev = rq->q->disk->private_data;
struct rnbd_clt_session *sess = dev->sess;
struct rnbd_iu *iu;
@@ -433,7 +433,7 @@ static void msg_conf(void *priv, int errno)
schedule_work(&iu->work);
}
-static int send_usr_msg(struct rtrs_clt *rtrs, int dir,
+static int send_usr_msg(struct rtrs_clt_sess *rtrs, int dir,
struct rnbd_iu *iu, struct kvec *vec,
size_t len, struct scatterlist *sg, unsigned int sg_len,
void (*conf)(struct work_struct *work),
@@ -1010,7 +1010,7 @@ static int rnbd_client_xfer_request(struct rnbd_clt_dev *dev,
struct request *rq,
struct rnbd_iu *iu)
{
- struct rtrs_clt *rtrs = dev->sess->rtrs;
+ struct rtrs_clt_sess *rtrs = dev->sess->rtrs;
struct rtrs_permit *permit = iu->permit;
struct rnbd_msg_io msg;
struct rtrs_clt_req_ops req_ops;
@@ -1133,7 +1133,7 @@ static blk_status_t rnbd_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
struct request *rq = bd->rq;
- struct rnbd_clt_dev *dev = rq->rq_disk->private_data;
+ struct rnbd_clt_dev *dev = rq->q->disk->private_data;
struct rnbd_iu *iu = blk_mq_rq_to_pdu(rq);
int err;
blk_status_t ret = BLK_STS_IOERR;
diff --git a/drivers/block/rnbd/rnbd-clt.h b/drivers/block/rnbd/rnbd-clt.h
index 9ef8c4f306f2..0c2cae7f39b9 100644
--- a/drivers/block/rnbd/rnbd-clt.h
+++ b/drivers/block/rnbd/rnbd-clt.h
@@ -75,7 +75,7 @@ struct rnbd_cpu_qlist {
struct rnbd_clt_session {
struct list_head list;
- struct rtrs_clt *rtrs;
+ struct rtrs_clt_sess *rtrs;
wait_queue_head_t rtrs_waitq;
bool rtrs_ready;
struct rnbd_cpu_qlist __percpu
diff --git a/drivers/block/rnbd/rnbd-srv.c b/drivers/block/rnbd/rnbd-srv.c
index aafecfe97055..1ee808fc600c 100644
--- a/drivers/block/rnbd/rnbd-srv.c
+++ b/drivers/block/rnbd/rnbd-srv.c
@@ -263,15 +263,15 @@ out:
kfree(srv_sess);
}
-static int create_sess(struct rtrs_srv *rtrs)
+static int create_sess(struct rtrs_srv_sess *rtrs)
{
struct rnbd_srv_session *srv_sess;
- char sessname[NAME_MAX];
+ char pathname[NAME_MAX];
int err;
- err = rtrs_srv_get_sess_name(rtrs, sessname, sizeof(sessname));
+ err = rtrs_srv_get_path_name(rtrs, pathname, sizeof(pathname));
if (err) {
- pr_err("rtrs_srv_get_sess_name(%s): %d\n", sessname, err);
+ pr_err("rtrs_srv_get_path_name(%s): %d\n", pathname, err);
return err;
}
@@ -284,8 +284,8 @@ static int create_sess(struct rtrs_srv *rtrs)
offsetof(struct rnbd_dev_blk_io, bio),
BIOSET_NEED_BVECS);
if (err) {
- pr_err("Allocating srv_session for session %s failed\n",
- sessname);
+ pr_err("Allocating srv_session for path %s failed\n",
+ pathname);
kfree(srv_sess);
return err;
}
@@ -298,14 +298,14 @@ static int create_sess(struct rtrs_srv *rtrs)
mutex_unlock(&sess_lock);
srv_sess->rtrs = rtrs;
- strscpy(srv_sess->sessname, sessname, sizeof(srv_sess->sessname));
+ strscpy(srv_sess->sessname, pathname, sizeof(srv_sess->sessname));
rtrs_srv_set_sess_priv(rtrs, srv_sess);
return 0;
}
-static int rnbd_srv_link_ev(struct rtrs_srv *rtrs,
+static int rnbd_srv_link_ev(struct rtrs_srv_sess *rtrs,
enum rtrs_srv_link_ev ev, void *priv)
{
struct rnbd_srv_session *srv_sess = priv;
diff --git a/drivers/block/rnbd/rnbd-srv.h b/drivers/block/rnbd/rnbd-srv.h
index 98ddc31eb408..e5604bce123a 100644
--- a/drivers/block/rnbd/rnbd-srv.h
+++ b/drivers/block/rnbd/rnbd-srv.h
@@ -20,7 +20,7 @@
struct rnbd_srv_session {
/* Entry inside global sess_list */
struct list_head list;
- struct rtrs_srv *rtrs;
+ struct rtrs_srv_sess *rtrs;
char sessname[NAME_MAX];
int queue_depth;
struct bio_set sess_bio_set;
diff --git a/drivers/block/rsxx/Makefile b/drivers/block/rsxx/Makefile
deleted file mode 100644
index 7ef158099d33..000000000000
--- a/drivers/block/rsxx/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_BLK_DEV_RSXX) += rsxx.o
-rsxx-objs := config.o core.o cregs.o dev.o dma.o
diff --git a/drivers/block/rsxx/config.c b/drivers/block/rsxx/config.c
deleted file mode 100644
index 11ed1d9646b9..000000000000
--- a/drivers/block/rsxx/config.c
+++ /dev/null
@@ -1,197 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
-* Filename: config.c
-*
-* Authors: Joshua Morris <josh.h.morris@us.ibm.com>
-* Philip Kelleher <pjk1939@linux.vnet.ibm.com>
-*
-* (C) Copyright 2013 IBM Corporation
-*/
-
-#include <linux/types.h>
-#include <linux/crc32.h>
-#include <linux/swab.h>
-
-#include "rsxx_priv.h"
-#include "rsxx_cfg.h"
-
-static void initialize_config(struct rsxx_card_cfg *cfg)
-{
- cfg->hdr.version = RSXX_CFG_VERSION;
-
- cfg->data.block_size = RSXX_HW_BLK_SIZE;
- cfg->data.stripe_size = RSXX_HW_BLK_SIZE;
- cfg->data.vendor_id = RSXX_VENDOR_ID_IBM;
- cfg->data.cache_order = (-1);
- cfg->data.intr_coal.mode = RSXX_INTR_COAL_DISABLED;
- cfg->data.intr_coal.count = 0;
- cfg->data.intr_coal.latency = 0;
-}
-
-static u32 config_data_crc32(struct rsxx_card_cfg *cfg)
-{
- /*
- * Return the compliment of the CRC to ensure compatibility
- * (i.e. this is how early rsxx drivers did it.)
- */
-
- return ~crc32(~0, &cfg->data, sizeof(cfg->data));
-}
-
-
-/*----------------- Config Byte Swap Functions -------------------*/
-static void config_hdr_be_to_cpu(struct card_cfg_hdr *hdr)
-{
- hdr->version = be32_to_cpu((__force __be32) hdr->version);
- hdr->crc = be32_to_cpu((__force __be32) hdr->crc);
-}
-
-static void config_hdr_cpu_to_be(struct card_cfg_hdr *hdr)
-{
- hdr->version = (__force u32) cpu_to_be32(hdr->version);
- hdr->crc = (__force u32) cpu_to_be32(hdr->crc);
-}
-
-static void config_data_swab(struct rsxx_card_cfg *cfg)
-{
- u32 *data = (u32 *) &cfg->data;
- int i;
-
- for (i = 0; i < (sizeof(cfg->data) / 4); i++)
- data[i] = swab32(data[i]);
-}
-
-static void config_data_le_to_cpu(struct rsxx_card_cfg *cfg)
-{
- u32 *data = (u32 *) &cfg->data;
- int i;
-
- for (i = 0; i < (sizeof(cfg->data) / 4); i++)
- data[i] = le32_to_cpu((__force __le32) data[i]);
-}
-
-static void config_data_cpu_to_le(struct rsxx_card_cfg *cfg)
-{
- u32 *data = (u32 *) &cfg->data;
- int i;
-
- for (i = 0; i < (sizeof(cfg->data) / 4); i++)
- data[i] = (__force u32) cpu_to_le32(data[i]);
-}
-
-
-/*----------------- Config Operations ------------------*/
-static int rsxx_save_config(struct rsxx_cardinfo *card)
-{
- struct rsxx_card_cfg cfg;
- int st;
-
- memcpy(&cfg, &card->config, sizeof(cfg));
-
- if (unlikely(cfg.hdr.version != RSXX_CFG_VERSION)) {
- dev_err(CARD_TO_DEV(card),
- "Cannot save config with invalid version %d\n",
- cfg.hdr.version);
- return -EINVAL;
- }
-
- /* Convert data to little endian for the CRC calculation. */
- config_data_cpu_to_le(&cfg);
-
- cfg.hdr.crc = config_data_crc32(&cfg);
-
- /*
- * Swap the data from little endian to big endian so it can be
- * stored.
- */
- config_data_swab(&cfg);
- config_hdr_cpu_to_be(&cfg.hdr);
-
- st = rsxx_creg_write(card, CREG_ADD_CONFIG, sizeof(cfg), &cfg, 1);
- if (st)
- return st;
-
- return 0;
-}
-
-int rsxx_load_config(struct rsxx_cardinfo *card)
-{
- int st;
- u32 crc;
-
- st = rsxx_creg_read(card, CREG_ADD_CONFIG, sizeof(card->config),
- &card->config, 1);
- if (st) {
- dev_err(CARD_TO_DEV(card),
- "Failed reading card config.\n");
- return st;
- }
-
- config_hdr_be_to_cpu(&card->config.hdr);
-
- if (card->config.hdr.version == RSXX_CFG_VERSION) {
- /*
- * We calculate the CRC with the data in little endian, because
- * early drivers did not take big endian CPUs into account.
- * The data is always stored in big endian, so we need to byte
- * swap it before calculating the CRC.
- */
-
- config_data_swab(&card->config);
-
- /* Check the CRC */
- crc = config_data_crc32(&card->config);
- if (crc != card->config.hdr.crc) {
- dev_err(CARD_TO_DEV(card),
- "Config corruption detected!\n");
- dev_info(CARD_TO_DEV(card),
- "CRC (sb x%08x is x%08x)\n",
- card->config.hdr.crc, crc);
- return -EIO;
- }
-
- /* Convert the data to CPU byteorder */
- config_data_le_to_cpu(&card->config);
-
- } else if (card->config.hdr.version != 0) {
- dev_err(CARD_TO_DEV(card),
- "Invalid config version %d.\n",
- card->config.hdr.version);
- /*
- * Config version changes require special handling from the
- * user
- */
- return -EINVAL;
- } else {
- dev_info(CARD_TO_DEV(card),
- "Initializing card configuration.\n");
- initialize_config(&card->config);
- st = rsxx_save_config(card);
- if (st)
- return st;
- }
-
- card->config_valid = 1;
-
- dev_dbg(CARD_TO_DEV(card), "version: x%08x\n",
- card->config.hdr.version);
- dev_dbg(CARD_TO_DEV(card), "crc: x%08x\n",
- card->config.hdr.crc);
- dev_dbg(CARD_TO_DEV(card), "block_size: x%08x\n",
- card->config.data.block_size);
- dev_dbg(CARD_TO_DEV(card), "stripe_size: x%08x\n",
- card->config.data.stripe_size);
- dev_dbg(CARD_TO_DEV(card), "vendor_id: x%08x\n",
- card->config.data.vendor_id);
- dev_dbg(CARD_TO_DEV(card), "cache_order: x%08x\n",
- card->config.data.cache_order);
- dev_dbg(CARD_TO_DEV(card), "mode: x%08x\n",
- card->config.data.intr_coal.mode);
- dev_dbg(CARD_TO_DEV(card), "count: x%08x\n",
- card->config.data.intr_coal.count);
- dev_dbg(CARD_TO_DEV(card), "latency: x%08x\n",
- card->config.data.intr_coal.latency);
-
- return 0;
-}
-
diff --git a/drivers/block/rsxx/core.c b/drivers/block/rsxx/core.c
deleted file mode 100644
index 8d9d69f5dfbc..000000000000
--- a/drivers/block/rsxx/core.c
+++ /dev/null
@@ -1,1126 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
-* Filename: core.c
-*
-* Authors: Joshua Morris <josh.h.morris@us.ibm.com>
-* Philip Kelleher <pjk1939@linux.vnet.ibm.com>
-*
-* (C) Copyright 2013 IBM Corporation
-*/
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/reboot.h>
-#include <linux/slab.h>
-#include <linux/bitops.h>
-#include <linux/delay.h>
-#include <linux/debugfs.h>
-#include <linux/seq_file.h>
-
-#include <linux/genhd.h>
-#include <linux/idr.h>
-
-#include "rsxx_priv.h"
-#include "rsxx_cfg.h"
-
-#define NO_LEGACY 0
-#define SYNC_START_TIMEOUT (10 * 60) /* 10 minutes */
-
-MODULE_DESCRIPTION("IBM Flash Adapter 900GB Full Height Device Driver");
-MODULE_AUTHOR("Joshua Morris/Philip Kelleher, IBM");
-MODULE_LICENSE("GPL");
-MODULE_VERSION(DRIVER_VERSION);
-
-static unsigned int force_legacy = NO_LEGACY;
-module_param(force_legacy, uint, 0444);
-MODULE_PARM_DESC(force_legacy, "Force the use of legacy type PCI interrupts");
-
-static unsigned int sync_start = 1;
-module_param(sync_start, uint, 0444);
-MODULE_PARM_DESC(sync_start, "On by Default: Driver load will not complete "
- "until the card startup has completed.");
-
-static DEFINE_IDA(rsxx_disk_ida);
-
-/* --------------------Debugfs Setup ------------------- */
-
-static int rsxx_attr_pci_regs_show(struct seq_file *m, void *p)
-{
- struct rsxx_cardinfo *card = m->private;
-
- seq_printf(m, "HWID 0x%08x\n",
- ioread32(card->regmap + HWID));
- seq_printf(m, "SCRATCH 0x%08x\n",
- ioread32(card->regmap + SCRATCH));
- seq_printf(m, "IER 0x%08x\n",
- ioread32(card->regmap + IER));
- seq_printf(m, "IPR 0x%08x\n",
- ioread32(card->regmap + IPR));
- seq_printf(m, "CREG_CMD 0x%08x\n",
- ioread32(card->regmap + CREG_CMD));
- seq_printf(m, "CREG_ADD 0x%08x\n",
- ioread32(card->regmap + CREG_ADD));
- seq_printf(m, "CREG_CNT 0x%08x\n",
- ioread32(card->regmap + CREG_CNT));
- seq_printf(m, "CREG_STAT 0x%08x\n",
- ioread32(card->regmap + CREG_STAT));
- seq_printf(m, "CREG_DATA0 0x%08x\n",
- ioread32(card->regmap + CREG_DATA0));
- seq_printf(m, "CREG_DATA1 0x%08x\n",
- ioread32(card->regmap + CREG_DATA1));
- seq_printf(m, "CREG_DATA2 0x%08x\n",
- ioread32(card->regmap + CREG_DATA2));
- seq_printf(m, "CREG_DATA3 0x%08x\n",
- ioread32(card->regmap + CREG_DATA3));
- seq_printf(m, "CREG_DATA4 0x%08x\n",
- ioread32(card->regmap + CREG_DATA4));
- seq_printf(m, "CREG_DATA5 0x%08x\n",
- ioread32(card->regmap + CREG_DATA5));
- seq_printf(m, "CREG_DATA6 0x%08x\n",
- ioread32(card->regmap + CREG_DATA6));
- seq_printf(m, "CREG_DATA7 0x%08x\n",
- ioread32(card->regmap + CREG_DATA7));
- seq_printf(m, "INTR_COAL 0x%08x\n",
- ioread32(card->regmap + INTR_COAL));
- seq_printf(m, "HW_ERROR 0x%08x\n",
- ioread32(card->regmap + HW_ERROR));
- seq_printf(m, "DEBUG0 0x%08x\n",
- ioread32(card->regmap + PCI_DEBUG0));
- seq_printf(m, "DEBUG1 0x%08x\n",
- ioread32(card->regmap + PCI_DEBUG1));
- seq_printf(m, "DEBUG2 0x%08x\n",
- ioread32(card->regmap + PCI_DEBUG2));
- seq_printf(m, "DEBUG3 0x%08x\n",
- ioread32(card->regmap + PCI_DEBUG3));
- seq_printf(m, "DEBUG4 0x%08x\n",
- ioread32(card->regmap + PCI_DEBUG4));
- seq_printf(m, "DEBUG5 0x%08x\n",
- ioread32(card->regmap + PCI_DEBUG5));
- seq_printf(m, "DEBUG6 0x%08x\n",
- ioread32(card->regmap + PCI_DEBUG6));
- seq_printf(m, "DEBUG7 0x%08x\n",
- ioread32(card->regmap + PCI_DEBUG7));
- seq_printf(m, "RECONFIG 0x%08x\n",
- ioread32(card->regmap + PCI_RECONFIG));
-
- return 0;
-}
-
-static int rsxx_attr_stats_show(struct seq_file *m, void *p)
-{
- struct rsxx_cardinfo *card = m->private;
- int i;
-
- for (i = 0; i < card->n_targets; i++) {
- seq_printf(m, "Ctrl %d CRC Errors = %d\n",
- i, card->ctrl[i].stats.crc_errors);
- seq_printf(m, "Ctrl %d Hard Errors = %d\n",
- i, card->ctrl[i].stats.hard_errors);
- seq_printf(m, "Ctrl %d Soft Errors = %d\n",
- i, card->ctrl[i].stats.soft_errors);
- seq_printf(m, "Ctrl %d Writes Issued = %d\n",
- i, card->ctrl[i].stats.writes_issued);
- seq_printf(m, "Ctrl %d Writes Failed = %d\n",
- i, card->ctrl[i].stats.writes_failed);
- seq_printf(m, "Ctrl %d Reads Issued = %d\n",
- i, card->ctrl[i].stats.reads_issued);
- seq_printf(m, "Ctrl %d Reads Failed = %d\n",
- i, card->ctrl[i].stats.reads_failed);
- seq_printf(m, "Ctrl %d Reads Retried = %d\n",
- i, card->ctrl[i].stats.reads_retried);
- seq_printf(m, "Ctrl %d Discards Issued = %d\n",
- i, card->ctrl[i].stats.discards_issued);
- seq_printf(m, "Ctrl %d Discards Failed = %d\n",
- i, card->ctrl[i].stats.discards_failed);
- seq_printf(m, "Ctrl %d DMA SW Errors = %d\n",
- i, card->ctrl[i].stats.dma_sw_err);
- seq_printf(m, "Ctrl %d DMA HW Faults = %d\n",
- i, card->ctrl[i].stats.dma_hw_fault);
- seq_printf(m, "Ctrl %d DMAs Cancelled = %d\n",
- i, card->ctrl[i].stats.dma_cancelled);
- seq_printf(m, "Ctrl %d SW Queue Depth = %d\n",
- i, card->ctrl[i].stats.sw_q_depth);
- seq_printf(m, "Ctrl %d HW Queue Depth = %d\n",
- i, atomic_read(&card->ctrl[i].stats.hw_q_depth));
- }
-
- return 0;
-}
-
-static int rsxx_attr_stats_open(struct inode *inode, struct file *file)
-{
- return single_open(file, rsxx_attr_stats_show, inode->i_private);
-}
-
-static int rsxx_attr_pci_regs_open(struct inode *inode, struct file *file)
-{
- return single_open(file, rsxx_attr_pci_regs_show, inode->i_private);
-}
-
-static ssize_t rsxx_cram_read(struct file *fp, char __user *ubuf,
- size_t cnt, loff_t *ppos)
-{
- struct rsxx_cardinfo *card = file_inode(fp)->i_private;
- char *buf;
- int st;
-
- buf = kzalloc(cnt, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- st = rsxx_creg_read(card, CREG_ADD_CRAM + (u32)*ppos, cnt, buf, 1);
- if (!st) {
- if (copy_to_user(ubuf, buf, cnt))
- st = -EFAULT;
- }
- kfree(buf);
- if (st)
- return st;
- *ppos += cnt;
- return cnt;
-}
-
-static ssize_t rsxx_cram_write(struct file *fp, const char __user *ubuf,
- size_t cnt, loff_t *ppos)
-{
- struct rsxx_cardinfo *card = file_inode(fp)->i_private;
- char *buf;
- ssize_t st;
-
- buf = memdup_user(ubuf, cnt);
- if (IS_ERR(buf))
- return PTR_ERR(buf);
-
- st = rsxx_creg_write(card, CREG_ADD_CRAM + (u32)*ppos, cnt, buf, 1);
- kfree(buf);
- if (st)
- return st;
- *ppos += cnt;
- return cnt;
-}
-
-static const struct file_operations debugfs_cram_fops = {
- .owner = THIS_MODULE,
- .read = rsxx_cram_read,
- .write = rsxx_cram_write,
-};
-
-static const struct file_operations debugfs_stats_fops = {
- .owner = THIS_MODULE,
- .open = rsxx_attr_stats_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static const struct file_operations debugfs_pci_regs_fops = {
- .owner = THIS_MODULE,
- .open = rsxx_attr_pci_regs_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static void rsxx_debugfs_dev_new(struct rsxx_cardinfo *card)
-{
- struct dentry *debugfs_stats;
- struct dentry *debugfs_pci_regs;
- struct dentry *debugfs_cram;
-
- card->debugfs_dir = debugfs_create_dir(card->gendisk->disk_name, NULL);
- if (IS_ERR_OR_NULL(card->debugfs_dir))
- goto failed_debugfs_dir;
-
- debugfs_stats = debugfs_create_file("stats", 0444,
- card->debugfs_dir, card,
- &debugfs_stats_fops);
- if (IS_ERR_OR_NULL(debugfs_stats))
- goto failed_debugfs_stats;
-
- debugfs_pci_regs = debugfs_create_file("pci_regs", 0444,
- card->debugfs_dir, card,
- &debugfs_pci_regs_fops);
- if (IS_ERR_OR_NULL(debugfs_pci_regs))
- goto failed_debugfs_pci_regs;
-
- debugfs_cram = debugfs_create_file("cram", 0644,
- card->debugfs_dir, card,
- &debugfs_cram_fops);
- if (IS_ERR_OR_NULL(debugfs_cram))
- goto failed_debugfs_cram;
-
- return;
-failed_debugfs_cram:
- debugfs_remove(debugfs_pci_regs);
-failed_debugfs_pci_regs:
- debugfs_remove(debugfs_stats);
-failed_debugfs_stats:
- debugfs_remove(card->debugfs_dir);
-failed_debugfs_dir:
- card->debugfs_dir = NULL;
-}
-
-/*----------------- Interrupt Control & Handling -------------------*/
-
-static void rsxx_mask_interrupts(struct rsxx_cardinfo *card)
-{
- card->isr_mask = 0;
- card->ier_mask = 0;
-}
-
-static void __enable_intr(unsigned int *mask, unsigned int intr)
-{
- *mask |= intr;
-}
-
-static void __disable_intr(unsigned int *mask, unsigned int intr)
-{
- *mask &= ~intr;
-}
-
-/*
- * NOTE: Disabling the IER will disable the hardware interrupt.
- * Disabling the ISR will disable the software handling of the ISR bit.
- *
- * Enable/Disable interrupt functions assume the card->irq_lock
- * is held by the caller.
- */
-void rsxx_enable_ier(struct rsxx_cardinfo *card, unsigned int intr)
-{
- if (unlikely(card->halt) ||
- unlikely(card->eeh_state))
- return;
-
- __enable_intr(&card->ier_mask, intr);
- iowrite32(card->ier_mask, card->regmap + IER);
-}
-
-void rsxx_disable_ier(struct rsxx_cardinfo *card, unsigned int intr)
-{
- if (unlikely(card->eeh_state))
- return;
-
- __disable_intr(&card->ier_mask, intr);
- iowrite32(card->ier_mask, card->regmap + IER);
-}
-
-void rsxx_enable_ier_and_isr(struct rsxx_cardinfo *card,
- unsigned int intr)
-{
- if (unlikely(card->halt) ||
- unlikely(card->eeh_state))
- return;
-
- __enable_intr(&card->isr_mask, intr);
- __enable_intr(&card->ier_mask, intr);
- iowrite32(card->ier_mask, card->regmap + IER);
-}
-void rsxx_disable_ier_and_isr(struct rsxx_cardinfo *card,
- unsigned int intr)
-{
- if (unlikely(card->eeh_state))
- return;
-
- __disable_intr(&card->isr_mask, intr);
- __disable_intr(&card->ier_mask, intr);
- iowrite32(card->ier_mask, card->regmap + IER);
-}
-
-static irqreturn_t rsxx_isr(int irq, void *pdata)
-{
- struct rsxx_cardinfo *card = pdata;
- unsigned int isr;
- int handled = 0;
- int reread_isr;
- int i;
-
- spin_lock(&card->irq_lock);
-
- do {
- reread_isr = 0;
-
- if (unlikely(card->eeh_state))
- break;
-
- isr = ioread32(card->regmap + ISR);
- if (isr == 0xffffffff) {
- /*
- * A few systems seem to have an intermittent issue
- * where PCI reads return all Fs, but retrying the read
- * a little later will return as expected.
- */
- dev_info(CARD_TO_DEV(card),
- "ISR = 0xFFFFFFFF, retrying later\n");
- break;
- }
-
- isr &= card->isr_mask;
- if (!isr)
- break;
-
- for (i = 0; i < card->n_targets; i++) {
- if (isr & CR_INTR_DMA(i)) {
- if (card->ier_mask & CR_INTR_DMA(i)) {
- rsxx_disable_ier(card, CR_INTR_DMA(i));
- reread_isr = 1;
- }
- queue_work(card->ctrl[i].done_wq,
- &card->ctrl[i].dma_done_work);
- handled++;
- }
- }
-
- if (isr & CR_INTR_CREG) {
- queue_work(card->creg_ctrl.creg_wq,
- &card->creg_ctrl.done_work);
- handled++;
- }
-
- if (isr & CR_INTR_EVENT) {
- queue_work(card->event_wq, &card->event_work);
- rsxx_disable_ier_and_isr(card, CR_INTR_EVENT);
- handled++;
- }
- } while (reread_isr);
-
- spin_unlock(&card->irq_lock);
-
- return handled ? IRQ_HANDLED : IRQ_NONE;
-}
-
-/*----------------- Card Event Handler -------------------*/
-static const char *rsxx_card_state_to_str(unsigned int state)
-{
- static const char * const state_strings[] = {
- "Unknown", "Shutdown", "Starting", "Formatting",
- "Uninitialized", "Good", "Shutting Down",
- "Fault", "Read Only Fault", "dStroying"
- };
-
- return state_strings[ffs(state)];
-}
-
-static void card_state_change(struct rsxx_cardinfo *card,
- unsigned int new_state)
-{
- int st;
-
- dev_info(CARD_TO_DEV(card),
- "card state change detected.(%s -> %s)\n",
- rsxx_card_state_to_str(card->state),
- rsxx_card_state_to_str(new_state));
-
- card->state = new_state;
-
- /* Don't attach DMA interfaces if the card has an invalid config */
- if (!card->config_valid)
- return;
-
- switch (new_state) {
- case CARD_STATE_RD_ONLY_FAULT:
- dev_crit(CARD_TO_DEV(card),
- "Hardware has entered read-only mode!\n");
- /*
- * Fall through so the DMA devices can be attached and
- * the user can attempt to pull off their data.
- */
- fallthrough;
- case CARD_STATE_GOOD:
- st = rsxx_get_card_size8(card, &card->size8);
- if (st)
- dev_err(CARD_TO_DEV(card),
- "Failed attaching DMA devices\n");
-
- if (card->config_valid)
- set_capacity(card->gendisk, card->size8 >> 9);
- break;
-
- case CARD_STATE_FAULT:
- dev_crit(CARD_TO_DEV(card),
- "Hardware Fault reported!\n");
- fallthrough;
-
- /* Everything else, detach DMA interface if it's attached. */
- case CARD_STATE_SHUTDOWN:
- case CARD_STATE_STARTING:
- case CARD_STATE_FORMATTING:
- case CARD_STATE_UNINITIALIZED:
- case CARD_STATE_SHUTTING_DOWN:
- /*
- * dStroy is a term coined by marketing to represent the low level
- * secure erase.
- */
- case CARD_STATE_DSTROYING:
- set_capacity(card->gendisk, 0);
- break;
- }
-}
-
-static void card_event_handler(struct work_struct *work)
-{
- struct rsxx_cardinfo *card;
- unsigned int state;
- unsigned long flags;
- int st;
-
- card = container_of(work, struct rsxx_cardinfo, event_work);
-
- if (unlikely(card->halt))
- return;
-
- /*
- * Enable the interrupt now to avoid any weird race conditions where a
- * state change might occur while rsxx_get_card_state() is
- * processing a returned creg cmd.
- */
- spin_lock_irqsave(&card->irq_lock, flags);
- rsxx_enable_ier_and_isr(card, CR_INTR_EVENT);
- spin_unlock_irqrestore(&card->irq_lock, flags);
-
- st = rsxx_get_card_state(card, &state);
- if (st) {
- dev_info(CARD_TO_DEV(card),
- "Failed reading state after event.\n");
- return;
- }
-
- if (card->state != state)
- card_state_change(card, state);
-
- if (card->creg_ctrl.creg_stats.stat & CREG_STAT_LOG_PENDING)
- rsxx_read_hw_log(card);
-}
-
-/*----------------- Card Operations -------------------*/
-static int card_shutdown(struct rsxx_cardinfo *card)
-{
- unsigned int state;
- signed long start;
- const int timeout = msecs_to_jiffies(120000);
- int st;
-
- /* We can't issue a shutdown if the card is in a transition state */
- start = jiffies;
- do {
- st = rsxx_get_card_state(card, &state);
- if (st)
- return st;
- } while (state == CARD_STATE_STARTING &&
- (jiffies - start < timeout));
-
- if (state == CARD_STATE_STARTING)
- return -ETIMEDOUT;
-
- /* Only issue a shutdown if we need to */
- if ((state != CARD_STATE_SHUTTING_DOWN) &&
- (state != CARD_STATE_SHUTDOWN)) {
- st = rsxx_issue_card_cmd(card, CARD_CMD_SHUTDOWN);
- if (st)
- return st;
- }
-
- start = jiffies;
- do {
- st = rsxx_get_card_state(card, &state);
- if (st)
- return st;
- } while (state != CARD_STATE_SHUTDOWN &&
- (jiffies - start < timeout));
-
- if (state != CARD_STATE_SHUTDOWN)
- return -ETIMEDOUT;
-
- return 0;
-}
-
-static int rsxx_eeh_frozen(struct pci_dev *dev)
-{
- struct rsxx_cardinfo *card = pci_get_drvdata(dev);
- int i;
- int st;
-
- dev_warn(&dev->dev, "IBM Flash Adapter PCI: preparing for slot reset.\n");
-
- card->eeh_state = 1;
- rsxx_mask_interrupts(card);
-
- /*
- * We need to guarantee that the write for eeh_state and masking
- * interrupts does not become reordered. This will prevent a possible
- * race condition with the EEH code.
- */
- wmb();
-
- pci_disable_device(dev);
-
- st = rsxx_eeh_save_issued_dmas(card);
- if (st)
- return st;
-
- rsxx_eeh_save_issued_creg(card);
-
- for (i = 0; i < card->n_targets; i++) {
- if (card->ctrl[i].status.buf)
- dma_free_coherent(&card->dev->dev,
- STATUS_BUFFER_SIZE8,
- card->ctrl[i].status.buf,
- card->ctrl[i].status.dma_addr);
- if (card->ctrl[i].cmd.buf)
- dma_free_coherent(&card->dev->dev,
- COMMAND_BUFFER_SIZE8,
- card->ctrl[i].cmd.buf,
- card->ctrl[i].cmd.dma_addr);
- }
-
- return 0;
-}
-
-static void rsxx_eeh_failure(struct pci_dev *dev)
-{
- struct rsxx_cardinfo *card = pci_get_drvdata(dev);
- int i;
- int cnt = 0;
-
- dev_err(&dev->dev, "IBM Flash Adapter PCI: disabling failed card.\n");
-
- card->eeh_state = 1;
- card->halt = 1;
-
- for (i = 0; i < card->n_targets; i++) {
- spin_lock_bh(&card->ctrl[i].queue_lock);
- cnt = rsxx_cleanup_dma_queue(&card->ctrl[i],
- &card->ctrl[i].queue,
- COMPLETE_DMA);
- spin_unlock_bh(&card->ctrl[i].queue_lock);
-
- cnt += rsxx_dma_cancel(&card->ctrl[i]);
-
- if (cnt)
- dev_info(CARD_TO_DEV(card),
- "Freed %d queued DMAs on channel %d\n",
- cnt, card->ctrl[i].id);
- }
-}
-
-static int rsxx_eeh_fifo_flush_poll(struct rsxx_cardinfo *card)
-{
- unsigned int status;
- int iter = 0;
-
- /* We need to wait for the hardware to reset */
- while (iter++ < 10) {
- status = ioread32(card->regmap + PCI_RECONFIG);
-
- if (status & RSXX_FLUSH_BUSY) {
- ssleep(1);
- continue;
- }
-
- if (status & RSXX_FLUSH_TIMEOUT)
- dev_warn(CARD_TO_DEV(card), "HW: flash controller timeout\n");
- return 0;
- }
-
- /* Hardware failed resetting itself. */
- return -1;
-}
-
-static pci_ers_result_t rsxx_error_detected(struct pci_dev *dev,
- pci_channel_state_t error)
-{
- int st;
-
- if (dev->revision < RSXX_EEH_SUPPORT)
- return PCI_ERS_RESULT_NONE;
-
- if (error == pci_channel_io_perm_failure) {
- rsxx_eeh_failure(dev);
- return PCI_ERS_RESULT_DISCONNECT;
- }
-
- st = rsxx_eeh_frozen(dev);
- if (st) {
- dev_err(&dev->dev, "Slot reset setup failed\n");
- rsxx_eeh_failure(dev);
- return PCI_ERS_RESULT_DISCONNECT;
- }
-
- return PCI_ERS_RESULT_NEED_RESET;
-}
-
-static pci_ers_result_t rsxx_slot_reset(struct pci_dev *dev)
-{
- struct rsxx_cardinfo *card = pci_get_drvdata(dev);
- unsigned long flags;
- int i;
- int st;
-
- dev_warn(&dev->dev,
- "IBM Flash Adapter PCI: recovering from slot reset.\n");
-
- st = pci_enable_device(dev);
- if (st)
- goto failed_hw_setup;
-
- pci_set_master(dev);
-
- st = rsxx_eeh_fifo_flush_poll(card);
- if (st)
- goto failed_hw_setup;
-
- rsxx_dma_queue_reset(card);
-
- for (i = 0; i < card->n_targets; i++) {
- st = rsxx_hw_buffers_init(dev, &card->ctrl[i]);
- if (st)
- goto failed_hw_buffers_init;
- }
-
- if (card->config_valid)
- rsxx_dma_configure(card);
-
- /* Clears the ISR register from spurious interrupts */
- st = ioread32(card->regmap + ISR);
-
- card->eeh_state = 0;
-
- spin_lock_irqsave(&card->irq_lock, flags);
- if (card->n_targets & RSXX_MAX_TARGETS)
- rsxx_enable_ier_and_isr(card, CR_INTR_ALL_G);
- else
- rsxx_enable_ier_and_isr(card, CR_INTR_ALL_C);
- spin_unlock_irqrestore(&card->irq_lock, flags);
-
- rsxx_kick_creg_queue(card);
-
- for (i = 0; i < card->n_targets; i++) {
- spin_lock(&card->ctrl[i].queue_lock);
- if (list_empty(&card->ctrl[i].queue)) {
- spin_unlock(&card->ctrl[i].queue_lock);
- continue;
- }
- spin_unlock(&card->ctrl[i].queue_lock);
-
- queue_work(card->ctrl[i].issue_wq,
- &card->ctrl[i].issue_dma_work);
- }
-
- dev_info(&dev->dev, "IBM Flash Adapter PCI: recovery complete.\n");
-
- return PCI_ERS_RESULT_RECOVERED;
-
-failed_hw_buffers_init:
- for (i = 0; i < card->n_targets; i++) {
- if (card->ctrl[i].status.buf)
- dma_free_coherent(&card->dev->dev,
- STATUS_BUFFER_SIZE8,
- card->ctrl[i].status.buf,
- card->ctrl[i].status.dma_addr);
- if (card->ctrl[i].cmd.buf)
- dma_free_coherent(&card->dev->dev,
- COMMAND_BUFFER_SIZE8,
- card->ctrl[i].cmd.buf,
- card->ctrl[i].cmd.dma_addr);
- }
-failed_hw_setup:
- rsxx_eeh_failure(dev);
- return PCI_ERS_RESULT_DISCONNECT;
-
-}
-
-/*----------------- Driver Initialization & Setup -------------------*/
-/* Returns: 0 if the driver is compatible with the device
- -1 if the driver is NOT compatible with the device */
-static int rsxx_compatibility_check(struct rsxx_cardinfo *card)
-{
- unsigned char pci_rev;
-
- pci_read_config_byte(card->dev, PCI_REVISION_ID, &pci_rev);
-
- if (pci_rev > RS70_PCI_REV_SUPPORTED)
- return -1;
- return 0;
-}
-
-static int rsxx_pci_probe(struct pci_dev *dev,
- const struct pci_device_id *id)
-{
- struct rsxx_cardinfo *card;
- int st;
- unsigned int sync_timeout;
-
- dev_info(&dev->dev, "PCI-Flash SSD discovered\n");
-
- card = kzalloc(sizeof(*card), GFP_KERNEL);
- if (!card)
- return -ENOMEM;
-
- card->dev = dev;
- pci_set_drvdata(dev, card);
-
- st = ida_alloc(&rsxx_disk_ida, GFP_KERNEL);
- if (st < 0)
- goto failed_ida_get;
- card->disk_id = st;
-
- st = pci_enable_device(dev);
- if (st)
- goto failed_enable;
-
- pci_set_master(dev);
-
- st = dma_set_mask(&dev->dev, DMA_BIT_MASK(64));
- if (st) {
- dev_err(CARD_TO_DEV(card),
- "No usable DMA configuration,aborting\n");
- goto failed_dma_mask;
- }
-
- st = pci_request_regions(dev, DRIVER_NAME);
- if (st) {
- dev_err(CARD_TO_DEV(card),
- "Failed to request memory region\n");
- goto failed_request_regions;
- }
-
- if (pci_resource_len(dev, 0) == 0) {
- dev_err(CARD_TO_DEV(card), "BAR0 has length 0!\n");
- st = -ENOMEM;
- goto failed_iomap;
- }
-
- card->regmap = pci_iomap(dev, 0, 0);
- if (!card->regmap) {
- dev_err(CARD_TO_DEV(card), "Failed to map BAR0\n");
- st = -ENOMEM;
- goto failed_iomap;
- }
-
- spin_lock_init(&card->irq_lock);
- card->halt = 0;
- card->eeh_state = 0;
-
- spin_lock_irq(&card->irq_lock);
- rsxx_disable_ier_and_isr(card, CR_INTR_ALL);
- spin_unlock_irq(&card->irq_lock);
-
- if (!force_legacy) {
- st = pci_enable_msi(dev);
- if (st)
- dev_warn(CARD_TO_DEV(card),
- "Failed to enable MSI\n");
- }
-
- st = request_irq(dev->irq, rsxx_isr, IRQF_SHARED,
- DRIVER_NAME, card);
- if (st) {
- dev_err(CARD_TO_DEV(card),
- "Failed requesting IRQ%d\n", dev->irq);
- goto failed_irq;
- }
-
- /************* Setup Processor Command Interface *************/
- st = rsxx_creg_setup(card);
- if (st) {
- dev_err(CARD_TO_DEV(card), "Failed to setup creg interface.\n");
- goto failed_creg_setup;
- }
-
- spin_lock_irq(&card->irq_lock);
- rsxx_enable_ier_and_isr(card, CR_INTR_CREG);
- spin_unlock_irq(&card->irq_lock);
-
- st = rsxx_compatibility_check(card);
- if (st) {
- dev_warn(CARD_TO_DEV(card),
- "Incompatible driver detected. Please update the driver.\n");
- st = -EINVAL;
- goto failed_compatiblity_check;
- }
-
- /************* Load Card Config *************/
- st = rsxx_load_config(card);
- if (st)
- dev_err(CARD_TO_DEV(card),
- "Failed loading card config\n");
-
- /************* Setup DMA Engine *************/
- st = rsxx_get_num_targets(card, &card->n_targets);
- if (st)
- dev_info(CARD_TO_DEV(card),
- "Failed reading the number of DMA targets\n");
-
- card->ctrl = kcalloc(card->n_targets, sizeof(*card->ctrl),
- GFP_KERNEL);
- if (!card->ctrl) {
- st = -ENOMEM;
- goto failed_dma_setup;
- }
-
- st = rsxx_dma_setup(card);
- if (st) {
- dev_info(CARD_TO_DEV(card),
- "Failed to setup DMA engine\n");
- goto failed_dma_setup;
- }
-
- /************* Setup Card Event Handler *************/
- card->event_wq = create_singlethread_workqueue(DRIVER_NAME"_event");
- if (!card->event_wq) {
- dev_err(CARD_TO_DEV(card), "Failed card event setup.\n");
- st = -ENOMEM;
- goto failed_event_handler;
- }
-
- INIT_WORK(&card->event_work, card_event_handler);
-
- st = rsxx_setup_dev(card);
- if (st)
- goto failed_create_dev;
-
- rsxx_get_card_state(card, &card->state);
-
- dev_info(CARD_TO_DEV(card),
- "card state: %s\n",
- rsxx_card_state_to_str(card->state));
-
- /*
- * Now that the DMA Engine and devices have been setup,
- * we can enable the event interrupt(it kicks off actions in
- * those layers so we couldn't enable it right away.)
- */
- spin_lock_irq(&card->irq_lock);
- rsxx_enable_ier_and_isr(card, CR_INTR_EVENT);
- spin_unlock_irq(&card->irq_lock);
-
- if (card->state == CARD_STATE_SHUTDOWN) {
- st = rsxx_issue_card_cmd(card, CARD_CMD_STARTUP);
- if (st)
- dev_crit(CARD_TO_DEV(card),
- "Failed issuing card startup\n");
- if (sync_start) {
- sync_timeout = SYNC_START_TIMEOUT;
-
- dev_info(CARD_TO_DEV(card),
- "Waiting for card to startup\n");
-
- do {
- ssleep(1);
- sync_timeout--;
-
- rsxx_get_card_state(card, &card->state);
- } while (sync_timeout &&
- (card->state == CARD_STATE_STARTING));
-
- if (card->state == CARD_STATE_STARTING) {
- dev_warn(CARD_TO_DEV(card),
- "Card startup timed out\n");
- card->size8 = 0;
- } else {
- dev_info(CARD_TO_DEV(card),
- "card state: %s\n",
- rsxx_card_state_to_str(card->state));
- st = rsxx_get_card_size8(card, &card->size8);
- if (st)
- card->size8 = 0;
- }
- }
- } else if (card->state == CARD_STATE_GOOD ||
- card->state == CARD_STATE_RD_ONLY_FAULT) {
- st = rsxx_get_card_size8(card, &card->size8);
- if (st)
- card->size8 = 0;
- }
-
- st = rsxx_attach_dev(card);
- if (st)
- goto failed_create_dev;
-
- /************* Setup Debugfs *************/
- rsxx_debugfs_dev_new(card);
-
- return 0;
-
-failed_create_dev:
- destroy_workqueue(card->event_wq);
- card->event_wq = NULL;
-failed_event_handler:
- rsxx_dma_destroy(card);
-failed_dma_setup:
-failed_compatiblity_check:
- destroy_workqueue(card->creg_ctrl.creg_wq);
- card->creg_ctrl.creg_wq = NULL;
-failed_creg_setup:
- spin_lock_irq(&card->irq_lock);
- rsxx_disable_ier_and_isr(card, CR_INTR_ALL);
- spin_unlock_irq(&card->irq_lock);
- free_irq(dev->irq, card);
- if (!force_legacy)
- pci_disable_msi(dev);
-failed_irq:
- pci_iounmap(dev, card->regmap);
-failed_iomap:
- pci_release_regions(dev);
-failed_request_regions:
-failed_dma_mask:
- pci_disable_device(dev);
-failed_enable:
- ida_free(&rsxx_disk_ida, card->disk_id);
-failed_ida_get:
- kfree(card);
-
- return st;
-}
-
-static void rsxx_pci_remove(struct pci_dev *dev)
-{
- struct rsxx_cardinfo *card = pci_get_drvdata(dev);
- unsigned long flags;
- int st;
- int i;
-
- if (!card)
- return;
-
- dev_info(CARD_TO_DEV(card),
- "Removing PCI-Flash SSD.\n");
-
- rsxx_detach_dev(card);
-
- for (i = 0; i < card->n_targets; i++) {
- spin_lock_irqsave(&card->irq_lock, flags);
- rsxx_disable_ier_and_isr(card, CR_INTR_DMA(i));
- spin_unlock_irqrestore(&card->irq_lock, flags);
- }
-
- st = card_shutdown(card);
- if (st)
- dev_crit(CARD_TO_DEV(card), "Shutdown failed!\n");
-
- /* Sync outstanding event handlers. */
- spin_lock_irqsave(&card->irq_lock, flags);
- rsxx_disable_ier_and_isr(card, CR_INTR_EVENT);
- spin_unlock_irqrestore(&card->irq_lock, flags);
-
- cancel_work_sync(&card->event_work);
-
- destroy_workqueue(card->event_wq);
- rsxx_destroy_dev(card);
- rsxx_dma_destroy(card);
- destroy_workqueue(card->creg_ctrl.creg_wq);
-
- spin_lock_irqsave(&card->irq_lock, flags);
- rsxx_disable_ier_and_isr(card, CR_INTR_ALL);
- spin_unlock_irqrestore(&card->irq_lock, flags);
-
- /* Prevent work_structs from re-queuing themselves. */
- card->halt = 1;
-
- debugfs_remove_recursive(card->debugfs_dir);
-
- free_irq(dev->irq, card);
-
- if (!force_legacy)
- pci_disable_msi(dev);
-
- rsxx_creg_destroy(card);
-
- pci_iounmap(dev, card->regmap);
-
- pci_disable_device(dev);
- pci_release_regions(dev);
-
- ida_free(&rsxx_disk_ida, card->disk_id);
- kfree(card);
-}
-
-static int rsxx_pci_suspend(struct pci_dev *dev, pm_message_t state)
-{
- /* We don't support suspend at this time. */
- return -ENOSYS;
-}
-
-static void rsxx_pci_shutdown(struct pci_dev *dev)
-{
- struct rsxx_cardinfo *card = pci_get_drvdata(dev);
- unsigned long flags;
- int i;
-
- if (!card)
- return;
-
- dev_info(CARD_TO_DEV(card), "Shutting down PCI-Flash SSD.\n");
-
- rsxx_detach_dev(card);
-
- for (i = 0; i < card->n_targets; i++) {
- spin_lock_irqsave(&card->irq_lock, flags);
- rsxx_disable_ier_and_isr(card, CR_INTR_DMA(i));
- spin_unlock_irqrestore(&card->irq_lock, flags);
- }
-
- card_shutdown(card);
-}
-
-static const struct pci_error_handlers rsxx_err_handler = {
- .error_detected = rsxx_error_detected,
- .slot_reset = rsxx_slot_reset,
-};
-
-static const struct pci_device_id rsxx_pci_ids[] = {
- {PCI_DEVICE(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_FS70_FLASH)},
- {PCI_DEVICE(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_FS80_FLASH)},
- {0,},
-};
-
-MODULE_DEVICE_TABLE(pci, rsxx_pci_ids);
-
-static struct pci_driver rsxx_pci_driver = {
- .name = DRIVER_NAME,
- .id_table = rsxx_pci_ids,
- .probe = rsxx_pci_probe,
- .remove = rsxx_pci_remove,
- .suspend = rsxx_pci_suspend,
- .shutdown = rsxx_pci_shutdown,
- .err_handler = &rsxx_err_handler,
-};
-
-static int __init rsxx_core_init(void)
-{
- int st;
-
- st = rsxx_dev_init();
- if (st)
- return st;
-
- st = rsxx_dma_init();
- if (st)
- goto dma_init_failed;
-
- st = rsxx_creg_init();
- if (st)
- goto creg_init_failed;
-
- return pci_register_driver(&rsxx_pci_driver);
-
-creg_init_failed:
- rsxx_dma_cleanup();
-dma_init_failed:
- rsxx_dev_cleanup();
-
- return st;
-}
-
-static void __exit rsxx_core_cleanup(void)
-{
- pci_unregister_driver(&rsxx_pci_driver);
- rsxx_creg_cleanup();
- rsxx_dma_cleanup();
- rsxx_dev_cleanup();
-}
-
-module_init(rsxx_core_init);
-module_exit(rsxx_core_cleanup);
diff --git a/drivers/block/rsxx/cregs.c b/drivers/block/rsxx/cregs.c
deleted file mode 100644
index 60ecd3f7cbd2..000000000000
--- a/drivers/block/rsxx/cregs.c
+++ /dev/null
@@ -1,789 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
-* Filename: cregs.c
-*
-* Authors: Joshua Morris <josh.h.morris@us.ibm.com>
-* Philip Kelleher <pjk1939@linux.vnet.ibm.com>
-*
-* (C) Copyright 2013 IBM Corporation
-*/
-
-#include <linux/completion.h>
-#include <linux/slab.h>
-
-#include "rsxx_priv.h"
-
-#define CREG_TIMEOUT_MSEC 10000
-
-typedef void (*creg_cmd_cb)(struct rsxx_cardinfo *card,
- struct creg_cmd *cmd,
- int st);
-
-struct creg_cmd {
- struct list_head list;
- creg_cmd_cb cb;
- void *cb_private;
- unsigned int op;
- unsigned int addr;
- int cnt8;
- void *buf;
- unsigned int stream;
- unsigned int status;
-};
-
-static struct kmem_cache *creg_cmd_pool;
-
-
-/*------------ Private Functions --------------*/
-
-#if defined(__LITTLE_ENDIAN)
-#define LITTLE_ENDIAN 1
-#elif defined(__BIG_ENDIAN)
-#define LITTLE_ENDIAN 0
-#else
-#error Unknown endianess!!! Aborting...
-#endif
-
-static int copy_to_creg_data(struct rsxx_cardinfo *card,
- int cnt8,
- void *buf,
- unsigned int stream)
-{
- int i = 0;
- u32 *data = buf;
-
- if (unlikely(card->eeh_state))
- return -EIO;
-
- for (i = 0; cnt8 > 0; i++, cnt8 -= 4) {
- /*
- * Firmware implementation makes it necessary to byte swap on
- * little endian processors.
- */
- if (LITTLE_ENDIAN && stream)
- iowrite32be(data[i], card->regmap + CREG_DATA(i));
- else
- iowrite32(data[i], card->regmap + CREG_DATA(i));
- }
-
- return 0;
-}
-
-
-static int copy_from_creg_data(struct rsxx_cardinfo *card,
- int cnt8,
- void *buf,
- unsigned int stream)
-{
- int i = 0;
- u32 *data = buf;
-
- if (unlikely(card->eeh_state))
- return -EIO;
-
- for (i = 0; cnt8 > 0; i++, cnt8 -= 4) {
- /*
- * Firmware implementation makes it necessary to byte swap on
- * little endian processors.
- */
- if (LITTLE_ENDIAN && stream)
- data[i] = ioread32be(card->regmap + CREG_DATA(i));
- else
- data[i] = ioread32(card->regmap + CREG_DATA(i));
- }
-
- return 0;
-}
-
-static void creg_issue_cmd(struct rsxx_cardinfo *card, struct creg_cmd *cmd)
-{
- int st;
-
- if (unlikely(card->eeh_state))
- return;
-
- iowrite32(cmd->addr, card->regmap + CREG_ADD);
- iowrite32(cmd->cnt8, card->regmap + CREG_CNT);
-
- if (cmd->op == CREG_OP_WRITE) {
- if (cmd->buf) {
- st = copy_to_creg_data(card, cmd->cnt8,
- cmd->buf, cmd->stream);
- if (st)
- return;
- }
- }
-
- if (unlikely(card->eeh_state))
- return;
-
- /* Setting the valid bit will kick off the command. */
- iowrite32(cmd->op, card->regmap + CREG_CMD);
-}
-
-static void creg_kick_queue(struct rsxx_cardinfo *card)
-{
- if (card->creg_ctrl.active || list_empty(&card->creg_ctrl.queue))
- return;
-
- card->creg_ctrl.active = 1;
- card->creg_ctrl.active_cmd = list_first_entry(&card->creg_ctrl.queue,
- struct creg_cmd, list);
- list_del(&card->creg_ctrl.active_cmd->list);
- card->creg_ctrl.q_depth--;
-
- /*
- * We have to set the timer before we push the new command. Otherwise,
- * we could create a race condition that would occur if the timer
- * was not canceled, and expired after the new command was pushed,
- * but before the command was issued to hardware.
- */
- mod_timer(&card->creg_ctrl.cmd_timer,
- jiffies + msecs_to_jiffies(CREG_TIMEOUT_MSEC));
-
- creg_issue_cmd(card, card->creg_ctrl.active_cmd);
-}
-
-static int creg_queue_cmd(struct rsxx_cardinfo *card,
- unsigned int op,
- unsigned int addr,
- unsigned int cnt8,
- void *buf,
- int stream,
- creg_cmd_cb callback,
- void *cb_private)
-{
- struct creg_cmd *cmd;
-
- /* Don't queue stuff up if we're halted. */
- if (unlikely(card->halt))
- return -EINVAL;
-
- if (card->creg_ctrl.reset)
- return -EAGAIN;
-
- if (cnt8 > MAX_CREG_DATA8)
- return -EINVAL;
-
- cmd = kmem_cache_alloc(creg_cmd_pool, GFP_KERNEL);
- if (!cmd)
- return -ENOMEM;
-
- INIT_LIST_HEAD(&cmd->list);
-
- cmd->op = op;
- cmd->addr = addr;
- cmd->cnt8 = cnt8;
- cmd->buf = buf;
- cmd->stream = stream;
- cmd->cb = callback;
- cmd->cb_private = cb_private;
- cmd->status = 0;
-
- spin_lock_bh(&card->creg_ctrl.lock);
- list_add_tail(&cmd->list, &card->creg_ctrl.queue);
- card->creg_ctrl.q_depth++;
- creg_kick_queue(card);
- spin_unlock_bh(&card->creg_ctrl.lock);
-
- return 0;
-}
-
-static void creg_cmd_timed_out(struct timer_list *t)
-{
- struct rsxx_cardinfo *card = from_timer(card, t, creg_ctrl.cmd_timer);
- struct creg_cmd *cmd;
-
- spin_lock(&card->creg_ctrl.lock);
- cmd = card->creg_ctrl.active_cmd;
- card->creg_ctrl.active_cmd = NULL;
- spin_unlock(&card->creg_ctrl.lock);
-
- if (cmd == NULL) {
- card->creg_ctrl.creg_stats.creg_timeout++;
- dev_warn(CARD_TO_DEV(card),
- "No active command associated with timeout!\n");
- return;
- }
-
- if (cmd->cb)
- cmd->cb(card, cmd, -ETIMEDOUT);
-
- kmem_cache_free(creg_cmd_pool, cmd);
-
-
- spin_lock(&card->creg_ctrl.lock);
- card->creg_ctrl.active = 0;
- creg_kick_queue(card);
- spin_unlock(&card->creg_ctrl.lock);
-}
-
-
-static void creg_cmd_done(struct work_struct *work)
-{
- struct rsxx_cardinfo *card;
- struct creg_cmd *cmd;
- int st = 0;
-
- card = container_of(work, struct rsxx_cardinfo,
- creg_ctrl.done_work);
-
- /*
- * The timer could not be cancelled for some reason,
- * race to pop the active command.
- */
- if (del_timer_sync(&card->creg_ctrl.cmd_timer) == 0)
- card->creg_ctrl.creg_stats.failed_cancel_timer++;
-
- spin_lock_bh(&card->creg_ctrl.lock);
- cmd = card->creg_ctrl.active_cmd;
- card->creg_ctrl.active_cmd = NULL;
- spin_unlock_bh(&card->creg_ctrl.lock);
-
- if (cmd == NULL) {
- dev_err(CARD_TO_DEV(card),
- "Spurious creg interrupt!\n");
- return;
- }
-
- card->creg_ctrl.creg_stats.stat = ioread32(card->regmap + CREG_STAT);
- cmd->status = card->creg_ctrl.creg_stats.stat;
- if ((cmd->status & CREG_STAT_STATUS_MASK) == 0) {
- dev_err(CARD_TO_DEV(card),
- "Invalid status on creg command\n");
- /*
- * At this point we're probably reading garbage from HW. Don't
- * do anything else that could mess up the system and let
- * the sync function return an error.
- */
- st = -EIO;
- goto creg_done;
- } else if (cmd->status & CREG_STAT_ERROR) {
- st = -EIO;
- }
-
- if (cmd->op == CREG_OP_READ) {
- unsigned int cnt8 = ioread32(card->regmap + CREG_CNT);
-
- /* Paranoid Sanity Checks */
- if (!cmd->buf) {
- dev_err(CARD_TO_DEV(card),
- "Buffer not given for read.\n");
- st = -EIO;
- goto creg_done;
- }
- if (cnt8 != cmd->cnt8) {
- dev_err(CARD_TO_DEV(card),
- "count mismatch\n");
- st = -EIO;
- goto creg_done;
- }
-
- st = copy_from_creg_data(card, cnt8, cmd->buf, cmd->stream);
- }
-
-creg_done:
- if (cmd->cb)
- cmd->cb(card, cmd, st);
-
- kmem_cache_free(creg_cmd_pool, cmd);
-
- spin_lock_bh(&card->creg_ctrl.lock);
- card->creg_ctrl.active = 0;
- creg_kick_queue(card);
- spin_unlock_bh(&card->creg_ctrl.lock);
-}
-
-static void creg_reset(struct rsxx_cardinfo *card)
-{
- struct creg_cmd *cmd = NULL;
- struct creg_cmd *tmp;
- unsigned long flags;
-
- /*
- * mutex_trylock is used here because if reset_lock is taken then a
- * reset is already happening. So, we can just go ahead and return.
- */
- if (!mutex_trylock(&card->creg_ctrl.reset_lock))
- return;
-
- card->creg_ctrl.reset = 1;
- spin_lock_irqsave(&card->irq_lock, flags);
- rsxx_disable_ier_and_isr(card, CR_INTR_CREG | CR_INTR_EVENT);
- spin_unlock_irqrestore(&card->irq_lock, flags);
-
- dev_warn(CARD_TO_DEV(card),
- "Resetting creg interface for recovery\n");
-
- /* Cancel outstanding commands */
- spin_lock_bh(&card->creg_ctrl.lock);
- list_for_each_entry_safe(cmd, tmp, &card->creg_ctrl.queue, list) {
- list_del(&cmd->list);
- card->creg_ctrl.q_depth--;
- if (cmd->cb)
- cmd->cb(card, cmd, -ECANCELED);
- kmem_cache_free(creg_cmd_pool, cmd);
- }
-
- cmd = card->creg_ctrl.active_cmd;
- card->creg_ctrl.active_cmd = NULL;
- if (cmd) {
- if (timer_pending(&card->creg_ctrl.cmd_timer))
- del_timer_sync(&card->creg_ctrl.cmd_timer);
-
- if (cmd->cb)
- cmd->cb(card, cmd, -ECANCELED);
- kmem_cache_free(creg_cmd_pool, cmd);
-
- card->creg_ctrl.active = 0;
- }
- spin_unlock_bh(&card->creg_ctrl.lock);
-
- card->creg_ctrl.reset = 0;
- spin_lock_irqsave(&card->irq_lock, flags);
- rsxx_enable_ier_and_isr(card, CR_INTR_CREG | CR_INTR_EVENT);
- spin_unlock_irqrestore(&card->irq_lock, flags);
-
- mutex_unlock(&card->creg_ctrl.reset_lock);
-}
-
-/* Used for synchronous accesses */
-struct creg_completion {
- struct completion *cmd_done;
- int st;
- u32 creg_status;
-};
-
-static void creg_cmd_done_cb(struct rsxx_cardinfo *card,
- struct creg_cmd *cmd,
- int st)
-{
- struct creg_completion *cmd_completion;
-
- cmd_completion = cmd->cb_private;
- BUG_ON(!cmd_completion);
-
- cmd_completion->st = st;
- cmd_completion->creg_status = cmd->status;
- complete(cmd_completion->cmd_done);
-}
-
-static int __issue_creg_rw(struct rsxx_cardinfo *card,
- unsigned int op,
- unsigned int addr,
- unsigned int cnt8,
- void *buf,
- int stream,
- unsigned int *hw_stat)
-{
- DECLARE_COMPLETION_ONSTACK(cmd_done);
- struct creg_completion completion;
- unsigned long timeout;
- int st;
-
- completion.cmd_done = &cmd_done;
- completion.st = 0;
- completion.creg_status = 0;
-
- st = creg_queue_cmd(card, op, addr, cnt8, buf, stream, creg_cmd_done_cb,
- &completion);
- if (st)
- return st;
-
- /*
- * This timeout is necessary for unresponsive hardware. The additional
- * 20 seconds to used to guarantee that each cregs requests has time to
- * complete.
- */
- timeout = msecs_to_jiffies(CREG_TIMEOUT_MSEC *
- card->creg_ctrl.q_depth + 20000);
-
- /*
- * The creg interface is guaranteed to complete. It has a timeout
- * mechanism that will kick in if hardware does not respond.
- */
- st = wait_for_completion_timeout(completion.cmd_done, timeout);
- if (st == 0) {
- /*
- * This is really bad, because the kernel timer did not
- * expire and notify us of a timeout!
- */
- dev_crit(CARD_TO_DEV(card),
- "cregs timer failed\n");
- creg_reset(card);
- return -EIO;
- }
-
- *hw_stat = completion.creg_status;
-
- if (completion.st) {
- /*
- * This read is needed to verify that there has not been any
- * extreme errors that might have occurred, i.e. EEH. The
- * function iowrite32 will not detect EEH errors, so it is
- * necessary that we recover if such an error is the reason
- * for the timeout. This is a dummy read.
- */
- ioread32(card->regmap + SCRATCH);
-
- dev_warn(CARD_TO_DEV(card),
- "creg command failed(%d x%08x)\n",
- completion.st, addr);
- return completion.st;
- }
-
- return 0;
-}
-
-static int issue_creg_rw(struct rsxx_cardinfo *card,
- u32 addr,
- unsigned int size8,
- void *data,
- int stream,
- int read)
-{
- unsigned int hw_stat;
- unsigned int xfer;
- unsigned int op;
- int st;
-
- op = read ? CREG_OP_READ : CREG_OP_WRITE;
-
- do {
- xfer = min_t(unsigned int, size8, MAX_CREG_DATA8);
-
- st = __issue_creg_rw(card, op, addr, xfer,
- data, stream, &hw_stat);
- if (st)
- return st;
-
- data = (char *)data + xfer;
- addr += xfer;
- size8 -= xfer;
- } while (size8);
-
- return 0;
-}
-
-/* ---------------------------- Public API ---------------------------------- */
-int rsxx_creg_write(struct rsxx_cardinfo *card,
- u32 addr,
- unsigned int size8,
- void *data,
- int byte_stream)
-{
- return issue_creg_rw(card, addr, size8, data, byte_stream, 0);
-}
-
-int rsxx_creg_read(struct rsxx_cardinfo *card,
- u32 addr,
- unsigned int size8,
- void *data,
- int byte_stream)
-{
- return issue_creg_rw(card, addr, size8, data, byte_stream, 1);
-}
-
-int rsxx_get_card_state(struct rsxx_cardinfo *card, unsigned int *state)
-{
- return rsxx_creg_read(card, CREG_ADD_CARD_STATE,
- sizeof(*state), state, 0);
-}
-
-int rsxx_get_card_size8(struct rsxx_cardinfo *card, u64 *size8)
-{
- unsigned int size;
- int st;
-
- st = rsxx_creg_read(card, CREG_ADD_CARD_SIZE,
- sizeof(size), &size, 0);
- if (st)
- return st;
-
- *size8 = (u64)size * RSXX_HW_BLK_SIZE;
- return 0;
-}
-
-int rsxx_get_num_targets(struct rsxx_cardinfo *card,
- unsigned int *n_targets)
-{
- return rsxx_creg_read(card, CREG_ADD_NUM_TARGETS,
- sizeof(*n_targets), n_targets, 0);
-}
-
-int rsxx_get_card_capabilities(struct rsxx_cardinfo *card,
- u32 *capabilities)
-{
- return rsxx_creg_read(card, CREG_ADD_CAPABILITIES,
- sizeof(*capabilities), capabilities, 0);
-}
-
-int rsxx_issue_card_cmd(struct rsxx_cardinfo *card, u32 cmd)
-{
- return rsxx_creg_write(card, CREG_ADD_CARD_CMD,
- sizeof(cmd), &cmd, 0);
-}
-
-
-/*----------------- HW Log Functions -------------------*/
-static void hw_log_msg(struct rsxx_cardinfo *card, const char *str, int len)
-{
- static char level;
-
- /*
- * New messages start with "<#>", where # is the log level. Messages
- * that extend past the log buffer will use the previous level
- */
- if ((len > 3) && (str[0] == '<') && (str[2] == '>')) {
- level = str[1];
- str += 3; /* Skip past the log level. */
- len -= 3;
- }
-
- switch (level) {
- case '0':
- dev_emerg(CARD_TO_DEV(card), "HW: %.*s", len, str);
- break;
- case '1':
- dev_alert(CARD_TO_DEV(card), "HW: %.*s", len, str);
- break;
- case '2':
- dev_crit(CARD_TO_DEV(card), "HW: %.*s", len, str);
- break;
- case '3':
- dev_err(CARD_TO_DEV(card), "HW: %.*s", len, str);
- break;
- case '4':
- dev_warn(CARD_TO_DEV(card), "HW: %.*s", len, str);
- break;
- case '5':
- dev_notice(CARD_TO_DEV(card), "HW: %.*s", len, str);
- break;
- case '6':
- dev_info(CARD_TO_DEV(card), "HW: %.*s", len, str);
- break;
- case '7':
- dev_dbg(CARD_TO_DEV(card), "HW: %.*s", len, str);
- break;
- default:
- dev_info(CARD_TO_DEV(card), "HW: %.*s", len, str);
- break;
- }
-}
-
-/*
- * The substrncpy function copies the src string (which includes the
- * terminating '\0' character), up to the count into the dest pointer.
- * Returns the number of bytes copied to dest.
- */
-static int substrncpy(char *dest, const char *src, int count)
-{
- int max_cnt = count;
-
- while (count) {
- count--;
- *dest = *src;
- if (*dest == '\0')
- break;
- src++;
- dest++;
- }
- return max_cnt - count;
-}
-
-
-static void read_hw_log_done(struct rsxx_cardinfo *card,
- struct creg_cmd *cmd,
- int st)
-{
- char *buf;
- char *log_str;
- int cnt;
- int len;
- int off;
-
- buf = cmd->buf;
- off = 0;
-
- /* Failed getting the log message */
- if (st)
- return;
-
- while (off < cmd->cnt8) {
- log_str = &card->log.buf[card->log.buf_len];
- cnt = min(cmd->cnt8 - off, LOG_BUF_SIZE8 - card->log.buf_len);
- len = substrncpy(log_str, &buf[off], cnt);
-
- off += len;
- card->log.buf_len += len;
-
- /*
- * Flush the log if we've hit the end of a message or if we've
- * run out of buffer space.
- */
- if ((log_str[len - 1] == '\0') ||
- (card->log.buf_len == LOG_BUF_SIZE8)) {
- if (card->log.buf_len != 1) /* Don't log blank lines. */
- hw_log_msg(card, card->log.buf,
- card->log.buf_len);
- card->log.buf_len = 0;
- }
-
- }
-
- if (cmd->status & CREG_STAT_LOG_PENDING)
- rsxx_read_hw_log(card);
-}
-
-int rsxx_read_hw_log(struct rsxx_cardinfo *card)
-{
- int st;
-
- st = creg_queue_cmd(card, CREG_OP_READ, CREG_ADD_LOG,
- sizeof(card->log.tmp), card->log.tmp,
- 1, read_hw_log_done, NULL);
- if (st)
- dev_err(CARD_TO_DEV(card),
- "Failed getting log text\n");
-
- return st;
-}
-
-/*-------------- IOCTL REG Access ------------------*/
-static int issue_reg_cmd(struct rsxx_cardinfo *card,
- struct rsxx_reg_access *cmd,
- int read)
-{
- unsigned int op = read ? CREG_OP_READ : CREG_OP_WRITE;
-
- return __issue_creg_rw(card, op, cmd->addr, cmd->cnt, cmd->data,
- cmd->stream, &cmd->stat);
-}
-
-int rsxx_reg_access(struct rsxx_cardinfo *card,
- struct rsxx_reg_access __user *ucmd,
- int read)
-{
- struct rsxx_reg_access cmd;
- int st;
-
- st = copy_from_user(&cmd, ucmd, sizeof(cmd));
- if (st)
- return -EFAULT;
-
- if (cmd.cnt > RSXX_MAX_REG_CNT)
- return -EFAULT;
-
- st = issue_reg_cmd(card, &cmd, read);
- if (st)
- return st;
-
- st = put_user(cmd.stat, &ucmd->stat);
- if (st)
- return -EFAULT;
-
- if (read) {
- st = copy_to_user(ucmd->data, cmd.data, cmd.cnt);
- if (st)
- return -EFAULT;
- }
-
- return 0;
-}
-
-void rsxx_eeh_save_issued_creg(struct rsxx_cardinfo *card)
-{
- struct creg_cmd *cmd = NULL;
-
- cmd = card->creg_ctrl.active_cmd;
- card->creg_ctrl.active_cmd = NULL;
-
- if (cmd) {
- del_timer_sync(&card->creg_ctrl.cmd_timer);
-
- spin_lock_bh(&card->creg_ctrl.lock);
- list_add(&cmd->list, &card->creg_ctrl.queue);
- card->creg_ctrl.q_depth++;
- card->creg_ctrl.active = 0;
- spin_unlock_bh(&card->creg_ctrl.lock);
- }
-}
-
-void rsxx_kick_creg_queue(struct rsxx_cardinfo *card)
-{
- spin_lock_bh(&card->creg_ctrl.lock);
- if (!list_empty(&card->creg_ctrl.queue))
- creg_kick_queue(card);
- spin_unlock_bh(&card->creg_ctrl.lock);
-}
-
-/*------------ Initialization & Setup --------------*/
-int rsxx_creg_setup(struct rsxx_cardinfo *card)
-{
- card->creg_ctrl.active_cmd = NULL;
-
- card->creg_ctrl.creg_wq =
- create_singlethread_workqueue(DRIVER_NAME"_creg");
- if (!card->creg_ctrl.creg_wq)
- return -ENOMEM;
-
- INIT_WORK(&card->creg_ctrl.done_work, creg_cmd_done);
- mutex_init(&card->creg_ctrl.reset_lock);
- INIT_LIST_HEAD(&card->creg_ctrl.queue);
- spin_lock_init(&card->creg_ctrl.lock);
- timer_setup(&card->creg_ctrl.cmd_timer, creg_cmd_timed_out, 0);
-
- return 0;
-}
-
-void rsxx_creg_destroy(struct rsxx_cardinfo *card)
-{
- struct creg_cmd *cmd;
- struct creg_cmd *tmp;
- int cnt = 0;
-
- /* Cancel outstanding commands */
- spin_lock_bh(&card->creg_ctrl.lock);
- list_for_each_entry_safe(cmd, tmp, &card->creg_ctrl.queue, list) {
- list_del(&cmd->list);
- if (cmd->cb)
- cmd->cb(card, cmd, -ECANCELED);
- kmem_cache_free(creg_cmd_pool, cmd);
- cnt++;
- }
-
- if (cnt)
- dev_info(CARD_TO_DEV(card),
- "Canceled %d queue creg commands\n", cnt);
-
- cmd = card->creg_ctrl.active_cmd;
- card->creg_ctrl.active_cmd = NULL;
- if (cmd) {
- if (timer_pending(&card->creg_ctrl.cmd_timer))
- del_timer_sync(&card->creg_ctrl.cmd_timer);
-
- if (cmd->cb)
- cmd->cb(card, cmd, -ECANCELED);
- dev_info(CARD_TO_DEV(card),
- "Canceled active creg command\n");
- kmem_cache_free(creg_cmd_pool, cmd);
- }
- spin_unlock_bh(&card->creg_ctrl.lock);
-
- cancel_work_sync(&card->creg_ctrl.done_work);
-}
-
-
-int rsxx_creg_init(void)
-{
- creg_cmd_pool = KMEM_CACHE(creg_cmd, SLAB_HWCACHE_ALIGN);
- if (!creg_cmd_pool)
- return -ENOMEM;
-
- return 0;
-}
-
-void rsxx_creg_cleanup(void)
-{
- kmem_cache_destroy(creg_cmd_pool);
-}
diff --git a/drivers/block/rsxx/dev.c b/drivers/block/rsxx/dev.c
deleted file mode 100644
index dd33f1bdf3b8..000000000000
--- a/drivers/block/rsxx/dev.c
+++ /dev/null
@@ -1,306 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
-* Filename: dev.c
-*
-* Authors: Joshua Morris <josh.h.morris@us.ibm.com>
-* Philip Kelleher <pjk1939@linux.vnet.ibm.com>
-*
-* (C) Copyright 2013 IBM Corporation
-*/
-
-#include <linux/kernel.h>
-#include <linux/interrupt.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/slab.h>
-
-#include <linux/hdreg.h>
-#include <linux/genhd.h>
-#include <linux/blkdev.h>
-#include <linux/bio.h>
-
-#include <linux/fs.h>
-
-#include "rsxx_priv.h"
-
-static unsigned int blkdev_minors = 64;
-module_param(blkdev_minors, uint, 0444);
-MODULE_PARM_DESC(blkdev_minors, "Number of minors(partitions)");
-
-/*
- * For now I'm making this tweakable in case any applications hit this limit.
- * If you see a "bio too big" error in the log you will need to raise this
- * value.
- */
-static unsigned int blkdev_max_hw_sectors = 1024;
-module_param(blkdev_max_hw_sectors, uint, 0444);
-MODULE_PARM_DESC(blkdev_max_hw_sectors, "Max hw sectors for a single BIO");
-
-static unsigned int enable_blkdev = 1;
-module_param(enable_blkdev , uint, 0444);
-MODULE_PARM_DESC(enable_blkdev, "Enable block device interfaces");
-
-
-struct rsxx_bio_meta {
- struct bio *bio;
- atomic_t pending_dmas;
- atomic_t error;
- unsigned long start_time;
-};
-
-static struct kmem_cache *bio_meta_pool;
-
-static void rsxx_submit_bio(struct bio *bio);
-
-/*----------------- Block Device Operations -----------------*/
-static int rsxx_blkdev_ioctl(struct block_device *bdev,
- fmode_t mode,
- unsigned int cmd,
- unsigned long arg)
-{
- struct rsxx_cardinfo *card = bdev->bd_disk->private_data;
-
- switch (cmd) {
- case RSXX_GETREG:
- return rsxx_reg_access(card, (void __user *)arg, 1);
- case RSXX_SETREG:
- return rsxx_reg_access(card, (void __user *)arg, 0);
- }
-
- return -ENOTTY;
-}
-
-static int rsxx_getgeo(struct block_device *bdev, struct hd_geometry *geo)
-{
- struct rsxx_cardinfo *card = bdev->bd_disk->private_data;
- u64 blocks = card->size8 >> 9;
-
- /*
- * get geometry: Fake it. I haven't found any drivers that set
- * geo->start, so we won't either.
- */
- if (card->size8) {
- geo->heads = 64;
- geo->sectors = 16;
- do_div(blocks, (geo->heads * geo->sectors));
- geo->cylinders = blocks;
- } else {
- geo->heads = 0;
- geo->sectors = 0;
- geo->cylinders = 0;
- }
- return 0;
-}
-
-static const struct block_device_operations rsxx_fops = {
- .owner = THIS_MODULE,
- .submit_bio = rsxx_submit_bio,
- .getgeo = rsxx_getgeo,
- .ioctl = rsxx_blkdev_ioctl,
-};
-
-static void bio_dma_done_cb(struct rsxx_cardinfo *card,
- void *cb_data,
- unsigned int error)
-{
- struct rsxx_bio_meta *meta = cb_data;
-
- if (error)
- atomic_set(&meta->error, 1);
-
- if (atomic_dec_and_test(&meta->pending_dmas)) {
- if (!card->eeh_state && card->gendisk)
- bio_end_io_acct(meta->bio, meta->start_time);
-
- if (atomic_read(&meta->error))
- bio_io_error(meta->bio);
- else
- bio_endio(meta->bio);
- kmem_cache_free(bio_meta_pool, meta);
- }
-}
-
-static void rsxx_submit_bio(struct bio *bio)
-{
- struct rsxx_cardinfo *card = bio->bi_bdev->bd_disk->private_data;
- struct rsxx_bio_meta *bio_meta;
- blk_status_t st = BLK_STS_IOERR;
-
- blk_queue_split(&bio);
-
- might_sleep();
-
- if (!card)
- goto req_err;
-
- if (bio_end_sector(bio) > get_capacity(card->gendisk))
- goto req_err;
-
- if (unlikely(card->halt))
- goto req_err;
-
- if (unlikely(card->dma_fault))
- goto req_err;
-
- if (bio->bi_iter.bi_size == 0) {
- dev_err(CARD_TO_DEV(card), "size zero BIO!\n");
- goto req_err;
- }
-
- bio_meta = kmem_cache_alloc(bio_meta_pool, GFP_KERNEL);
- if (!bio_meta) {
- st = BLK_STS_RESOURCE;
- goto req_err;
- }
-
- bio_meta->bio = bio;
- atomic_set(&bio_meta->error, 0);
- atomic_set(&bio_meta->pending_dmas, 0);
-
- if (!unlikely(card->halt))
- bio_meta->start_time = bio_start_io_acct(bio);
-
- dev_dbg(CARD_TO_DEV(card), "BIO[%c]: meta: %p addr8: x%llx size: %d\n",
- bio_data_dir(bio) ? 'W' : 'R', bio_meta,
- (u64)bio->bi_iter.bi_sector << 9, bio->bi_iter.bi_size);
-
- st = rsxx_dma_queue_bio(card, bio, &bio_meta->pending_dmas,
- bio_dma_done_cb, bio_meta);
- if (st)
- goto queue_err;
-
- return;
-
-queue_err:
- kmem_cache_free(bio_meta_pool, bio_meta);
-req_err:
- if (st)
- bio->bi_status = st;
- bio_endio(bio);
-}
-
-/*----------------- Device Setup -------------------*/
-static bool rsxx_discard_supported(struct rsxx_cardinfo *card)
-{
- unsigned char pci_rev;
-
- pci_read_config_byte(card->dev, PCI_REVISION_ID, &pci_rev);
-
- return (pci_rev >= RSXX_DISCARD_SUPPORT);
-}
-
-int rsxx_attach_dev(struct rsxx_cardinfo *card)
-{
- int err = 0;
-
- mutex_lock(&card->dev_lock);
-
- /* The block device requires the stripe size from the config. */
- if (enable_blkdev) {
- if (card->config_valid)
- set_capacity(card->gendisk, card->size8 >> 9);
- else
- set_capacity(card->gendisk, 0);
- err = device_add_disk(CARD_TO_DEV(card), card->gendisk, NULL);
- if (err == 0)
- card->bdev_attached = 1;
- }
-
- mutex_unlock(&card->dev_lock);
-
- if (err)
- blk_cleanup_disk(card->gendisk);
-
- return err;
-}
-
-void rsxx_detach_dev(struct rsxx_cardinfo *card)
-{
- mutex_lock(&card->dev_lock);
-
- if (card->bdev_attached) {
- del_gendisk(card->gendisk);
- card->bdev_attached = 0;
- }
-
- mutex_unlock(&card->dev_lock);
-}
-
-int rsxx_setup_dev(struct rsxx_cardinfo *card)
-{
- unsigned short blk_size;
-
- mutex_init(&card->dev_lock);
-
- if (!enable_blkdev)
- return 0;
-
- card->major = register_blkdev(0, DRIVER_NAME);
- if (card->major < 0) {
- dev_err(CARD_TO_DEV(card), "Failed to get major number\n");
- return -ENOMEM;
- }
-
- card->gendisk = blk_alloc_disk(blkdev_minors);
- if (!card->gendisk) {
- dev_err(CARD_TO_DEV(card), "Failed disk alloc\n");
- unregister_blkdev(card->major, DRIVER_NAME);
- return -ENOMEM;
- }
-
- if (card->config_valid) {
- blk_size = card->config.data.block_size;
- blk_queue_dma_alignment(card->gendisk->queue, blk_size - 1);
- blk_queue_logical_block_size(card->gendisk->queue, blk_size);
- }
-
- blk_queue_max_hw_sectors(card->gendisk->queue, blkdev_max_hw_sectors);
- blk_queue_physical_block_size(card->gendisk->queue, RSXX_HW_BLK_SIZE);
-
- blk_queue_flag_set(QUEUE_FLAG_NONROT, card->gendisk->queue);
- blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, card->gendisk->queue);
- if (rsxx_discard_supported(card)) {
- blk_queue_flag_set(QUEUE_FLAG_DISCARD, card->gendisk->queue);
- blk_queue_max_discard_sectors(card->gendisk->queue,
- RSXX_HW_BLK_SIZE >> 9);
- card->gendisk->queue->limits.discard_granularity =
- RSXX_HW_BLK_SIZE;
- card->gendisk->queue->limits.discard_alignment =
- RSXX_HW_BLK_SIZE;
- }
-
- snprintf(card->gendisk->disk_name, sizeof(card->gendisk->disk_name),
- "rsxx%d", card->disk_id);
- card->gendisk->major = card->major;
- card->gendisk->minors = blkdev_minors;
- card->gendisk->fops = &rsxx_fops;
- card->gendisk->private_data = card;
-
- return 0;
-}
-
-void rsxx_destroy_dev(struct rsxx_cardinfo *card)
-{
- if (!enable_blkdev)
- return;
-
- blk_cleanup_disk(card->gendisk);
- card->gendisk = NULL;
- unregister_blkdev(card->major, DRIVER_NAME);
-}
-
-int rsxx_dev_init(void)
-{
- bio_meta_pool = KMEM_CACHE(rsxx_bio_meta, SLAB_HWCACHE_ALIGN);
- if (!bio_meta_pool)
- return -ENOMEM;
-
- return 0;
-}
-
-void rsxx_dev_cleanup(void)
-{
- kmem_cache_destroy(bio_meta_pool);
-}
-
-
diff --git a/drivers/block/rsxx/dma.c b/drivers/block/rsxx/dma.c
deleted file mode 100644
index ed182f3dd054..000000000000
--- a/drivers/block/rsxx/dma.c
+++ /dev/null
@@ -1,1085 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
-* Filename: dma.c
-*
-* Authors: Joshua Morris <josh.h.morris@us.ibm.com>
-* Philip Kelleher <pjk1939@linux.vnet.ibm.com>
-*
-* (C) Copyright 2013 IBM Corporation
-*/
-
-#include <linux/slab.h>
-#include "rsxx_priv.h"
-
-struct rsxx_dma {
- struct list_head list;
- u8 cmd;
- unsigned int laddr; /* Logical address */
- struct {
- u32 off;
- u32 cnt;
- } sub_page;
- dma_addr_t dma_addr;
- struct page *page;
- unsigned int pg_off; /* Page Offset */
- rsxx_dma_cb cb;
- void *cb_data;
-};
-
-/* This timeout is used to detect a stalled DMA channel */
-#define DMA_ACTIVITY_TIMEOUT msecs_to_jiffies(10000)
-
-struct hw_status {
- u8 status;
- u8 tag;
- __le16 count;
- __le32 _rsvd2;
- __le64 _rsvd3;
-} __packed;
-
-enum rsxx_dma_status {
- DMA_SW_ERR = 0x1,
- DMA_HW_FAULT = 0x2,
- DMA_CANCELLED = 0x4,
-};
-
-struct hw_cmd {
- u8 command;
- u8 tag;
- u8 _rsvd;
- u8 sub_page; /* Bit[0:2]: 512byte offset */
- /* Bit[4:6]: 512byte count */
- __le32 device_addr;
- __le64 host_addr;
-} __packed;
-
-enum rsxx_hw_cmd {
- HW_CMD_BLK_DISCARD = 0x70,
- HW_CMD_BLK_WRITE = 0x80,
- HW_CMD_BLK_READ = 0xC0,
- HW_CMD_BLK_RECON_READ = 0xE0,
-};
-
-enum rsxx_hw_status {
- HW_STATUS_CRC = 0x01,
- HW_STATUS_HARD_ERR = 0x02,
- HW_STATUS_SOFT_ERR = 0x04,
- HW_STATUS_FAULT = 0x08,
-};
-
-static struct kmem_cache *rsxx_dma_pool;
-
-struct dma_tracker {
- int next_tag;
- struct rsxx_dma *dma;
-};
-
-struct dma_tracker_list {
- spinlock_t lock;
- int head;
- struct dma_tracker list[];
-};
-
-
-/*----------------- Misc Utility Functions -------------------*/
-static unsigned int rsxx_addr8_to_laddr(u64 addr8, struct rsxx_cardinfo *card)
-{
- unsigned long long tgt_addr8;
-
- tgt_addr8 = ((addr8 >> card->_stripe.upper_shift) &
- card->_stripe.upper_mask) |
- ((addr8) & card->_stripe.lower_mask);
- do_div(tgt_addr8, RSXX_HW_BLK_SIZE);
- return tgt_addr8;
-}
-
-static unsigned int rsxx_get_dma_tgt(struct rsxx_cardinfo *card, u64 addr8)
-{
- unsigned int tgt;
-
- tgt = (addr8 >> card->_stripe.target_shift) & card->_stripe.target_mask;
-
- return tgt;
-}
-
-void rsxx_dma_queue_reset(struct rsxx_cardinfo *card)
-{
- /* Reset all DMA Command/Status Queues */
- iowrite32(DMA_QUEUE_RESET, card->regmap + RESET);
-}
-
-static unsigned int get_dma_size(struct rsxx_dma *dma)
-{
- if (dma->sub_page.cnt)
- return dma->sub_page.cnt << 9;
- else
- return RSXX_HW_BLK_SIZE;
-}
-
-
-/*----------------- DMA Tracker -------------------*/
-static void set_tracker_dma(struct dma_tracker_list *trackers,
- int tag,
- struct rsxx_dma *dma)
-{
- trackers->list[tag].dma = dma;
-}
-
-static struct rsxx_dma *get_tracker_dma(struct dma_tracker_list *trackers,
- int tag)
-{
- return trackers->list[tag].dma;
-}
-
-static int pop_tracker(struct dma_tracker_list *trackers)
-{
- int tag;
-
- spin_lock(&trackers->lock);
- tag = trackers->head;
- if (tag != -1) {
- trackers->head = trackers->list[tag].next_tag;
- trackers->list[tag].next_tag = -1;
- }
- spin_unlock(&trackers->lock);
-
- return tag;
-}
-
-static void push_tracker(struct dma_tracker_list *trackers, int tag)
-{
- spin_lock(&trackers->lock);
- trackers->list[tag].next_tag = trackers->head;
- trackers->head = tag;
- trackers->list[tag].dma = NULL;
- spin_unlock(&trackers->lock);
-}
-
-
-/*----------------- Interrupt Coalescing -------------*/
-/*
- * Interrupt Coalescing Register Format:
- * Interrupt Timer (64ns units) [15:0]
- * Interrupt Count [24:16]
- * Reserved [31:25]
-*/
-#define INTR_COAL_LATENCY_MASK (0x0000ffff)
-
-#define INTR_COAL_COUNT_SHIFT 16
-#define INTR_COAL_COUNT_BITS 9
-#define INTR_COAL_COUNT_MASK (((1 << INTR_COAL_COUNT_BITS) - 1) << \
- INTR_COAL_COUNT_SHIFT)
-#define INTR_COAL_LATENCY_UNITS_NS 64
-
-
-static u32 dma_intr_coal_val(u32 mode, u32 count, u32 latency)
-{
- u32 latency_units = latency / INTR_COAL_LATENCY_UNITS_NS;
-
- if (mode == RSXX_INTR_COAL_DISABLED)
- return 0;
-
- return ((count << INTR_COAL_COUNT_SHIFT) & INTR_COAL_COUNT_MASK) |
- (latency_units & INTR_COAL_LATENCY_MASK);
-
-}
-
-static void dma_intr_coal_auto_tune(struct rsxx_cardinfo *card)
-{
- int i;
- u32 q_depth = 0;
- u32 intr_coal;
-
- if (card->config.data.intr_coal.mode != RSXX_INTR_COAL_AUTO_TUNE ||
- unlikely(card->eeh_state))
- return;
-
- for (i = 0; i < card->n_targets; i++)
- q_depth += atomic_read(&card->ctrl[i].stats.hw_q_depth);
-
- intr_coal = dma_intr_coal_val(card->config.data.intr_coal.mode,
- q_depth / 2,
- card->config.data.intr_coal.latency);
- iowrite32(intr_coal, card->regmap + INTR_COAL);
-}
-
-/*----------------- RSXX DMA Handling -------------------*/
-static void rsxx_free_dma(struct rsxx_dma_ctrl *ctrl, struct rsxx_dma *dma)
-{
- if (dma->cmd != HW_CMD_BLK_DISCARD) {
- if (!dma_mapping_error(&ctrl->card->dev->dev, dma->dma_addr)) {
- dma_unmap_page(&ctrl->card->dev->dev, dma->dma_addr,
- get_dma_size(dma),
- dma->cmd == HW_CMD_BLK_WRITE ?
- DMA_TO_DEVICE :
- DMA_FROM_DEVICE);
- }
- }
-
- kmem_cache_free(rsxx_dma_pool, dma);
-}
-
-static void rsxx_complete_dma(struct rsxx_dma_ctrl *ctrl,
- struct rsxx_dma *dma,
- unsigned int status)
-{
- if (status & DMA_SW_ERR)
- ctrl->stats.dma_sw_err++;
- if (status & DMA_HW_FAULT)
- ctrl->stats.dma_hw_fault++;
- if (status & DMA_CANCELLED)
- ctrl->stats.dma_cancelled++;
-
- if (dma->cb)
- dma->cb(ctrl->card, dma->cb_data, status ? 1 : 0);
-
- rsxx_free_dma(ctrl, dma);
-}
-
-int rsxx_cleanup_dma_queue(struct rsxx_dma_ctrl *ctrl,
- struct list_head *q, unsigned int done)
-{
- struct rsxx_dma *dma;
- struct rsxx_dma *tmp;
- int cnt = 0;
-
- list_for_each_entry_safe(dma, tmp, q, list) {
- list_del(&dma->list);
- if (done & COMPLETE_DMA)
- rsxx_complete_dma(ctrl, dma, DMA_CANCELLED);
- else
- rsxx_free_dma(ctrl, dma);
- cnt++;
- }
-
- return cnt;
-}
-
-static void rsxx_requeue_dma(struct rsxx_dma_ctrl *ctrl,
- struct rsxx_dma *dma)
-{
- /*
- * Requeued DMAs go to the front of the queue so they are issued
- * first.
- */
- spin_lock_bh(&ctrl->queue_lock);
- ctrl->stats.sw_q_depth++;
- list_add(&dma->list, &ctrl->queue);
- spin_unlock_bh(&ctrl->queue_lock);
-}
-
-static void rsxx_handle_dma_error(struct rsxx_dma_ctrl *ctrl,
- struct rsxx_dma *dma,
- u8 hw_st)
-{
- unsigned int status = 0;
- int requeue_cmd = 0;
-
- dev_dbg(CARD_TO_DEV(ctrl->card),
- "Handling DMA error(cmd x%02x, laddr x%08x st:x%02x)\n",
- dma->cmd, dma->laddr, hw_st);
-
- if (hw_st & HW_STATUS_CRC)
- ctrl->stats.crc_errors++;
- if (hw_st & HW_STATUS_HARD_ERR)
- ctrl->stats.hard_errors++;
- if (hw_st & HW_STATUS_SOFT_ERR)
- ctrl->stats.soft_errors++;
-
- switch (dma->cmd) {
- case HW_CMD_BLK_READ:
- if (hw_st & (HW_STATUS_CRC | HW_STATUS_HARD_ERR)) {
- if (ctrl->card->scrub_hard) {
- dma->cmd = HW_CMD_BLK_RECON_READ;
- requeue_cmd = 1;
- ctrl->stats.reads_retried++;
- } else {
- status |= DMA_HW_FAULT;
- ctrl->stats.reads_failed++;
- }
- } else if (hw_st & HW_STATUS_FAULT) {
- status |= DMA_HW_FAULT;
- ctrl->stats.reads_failed++;
- }
-
- break;
- case HW_CMD_BLK_RECON_READ:
- if (hw_st & (HW_STATUS_CRC | HW_STATUS_HARD_ERR)) {
- /* Data could not be reconstructed. */
- status |= DMA_HW_FAULT;
- ctrl->stats.reads_failed++;
- }
-
- break;
- case HW_CMD_BLK_WRITE:
- status |= DMA_HW_FAULT;
- ctrl->stats.writes_failed++;
-
- break;
- case HW_CMD_BLK_DISCARD:
- status |= DMA_HW_FAULT;
- ctrl->stats.discards_failed++;
-
- break;
- default:
- dev_err(CARD_TO_DEV(ctrl->card),
- "Unknown command in DMA!(cmd: x%02x "
- "laddr x%08x st: x%02x\n",
- dma->cmd, dma->laddr, hw_st);
- status |= DMA_SW_ERR;
-
- break;
- }
-
- if (requeue_cmd)
- rsxx_requeue_dma(ctrl, dma);
- else
- rsxx_complete_dma(ctrl, dma, status);
-}
-
-static void dma_engine_stalled(struct timer_list *t)
-{
- struct rsxx_dma_ctrl *ctrl = from_timer(ctrl, t, activity_timer);
- int cnt;
-
- if (atomic_read(&ctrl->stats.hw_q_depth) == 0 ||
- unlikely(ctrl->card->eeh_state))
- return;
-
- if (ctrl->cmd.idx != ioread32(ctrl->regmap + SW_CMD_IDX)) {
- /*
- * The dma engine was stalled because the SW_CMD_IDX write
- * was lost. Issue it again to recover.
- */
- dev_warn(CARD_TO_DEV(ctrl->card),
- "SW_CMD_IDX write was lost, re-writing...\n");
- iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX);
- mod_timer(&ctrl->activity_timer,
- jiffies + DMA_ACTIVITY_TIMEOUT);
- } else {
- dev_warn(CARD_TO_DEV(ctrl->card),
- "DMA channel %d has stalled, faulting interface.\n",
- ctrl->id);
- ctrl->card->dma_fault = 1;
-
- /* Clean up the DMA queue */
- spin_lock(&ctrl->queue_lock);
- cnt = rsxx_cleanup_dma_queue(ctrl, &ctrl->queue, COMPLETE_DMA);
- spin_unlock(&ctrl->queue_lock);
-
- cnt += rsxx_dma_cancel(ctrl);
-
- if (cnt)
- dev_info(CARD_TO_DEV(ctrl->card),
- "Freed %d queued DMAs on channel %d\n",
- cnt, ctrl->id);
- }
-}
-
-static void rsxx_issue_dmas(struct rsxx_dma_ctrl *ctrl)
-{
- struct rsxx_dma *dma;
- int tag;
- int cmds_pending = 0;
- struct hw_cmd *hw_cmd_buf;
- int dir;
-
- hw_cmd_buf = ctrl->cmd.buf;
-
- if (unlikely(ctrl->card->halt) ||
- unlikely(ctrl->card->eeh_state))
- return;
-
- while (1) {
- spin_lock_bh(&ctrl->queue_lock);
- if (list_empty(&ctrl->queue)) {
- spin_unlock_bh(&ctrl->queue_lock);
- break;
- }
- spin_unlock_bh(&ctrl->queue_lock);
-
- tag = pop_tracker(ctrl->trackers);
- if (tag == -1)
- break;
-
- spin_lock_bh(&ctrl->queue_lock);
- dma = list_entry(ctrl->queue.next, struct rsxx_dma, list);
- list_del(&dma->list);
- ctrl->stats.sw_q_depth--;
- spin_unlock_bh(&ctrl->queue_lock);
-
- /*
- * This will catch any DMAs that slipped in right before the
- * fault, but was queued after all the other DMAs were
- * cancelled.
- */
- if (unlikely(ctrl->card->dma_fault)) {
- push_tracker(ctrl->trackers, tag);
- rsxx_complete_dma(ctrl, dma, DMA_CANCELLED);
- continue;
- }
-
- if (dma->cmd != HW_CMD_BLK_DISCARD) {
- if (dma->cmd == HW_CMD_BLK_WRITE)
- dir = DMA_TO_DEVICE;
- else
- dir = DMA_FROM_DEVICE;
-
- /*
- * The function dma_map_page is placed here because we
- * can only, by design, issue up to 255 commands to the
- * hardware at one time per DMA channel. So the maximum
- * amount of mapped memory would be 255 * 4 channels *
- * 4096 Bytes which is less than 2GB, the limit of a x8
- * Non-HWWD PCIe slot. This way the dma_map_page
- * function should never fail because of a lack of
- * mappable memory.
- */
- dma->dma_addr = dma_map_page(&ctrl->card->dev->dev, dma->page,
- dma->pg_off, dma->sub_page.cnt << 9, dir);
- if (dma_mapping_error(&ctrl->card->dev->dev, dma->dma_addr)) {
- push_tracker(ctrl->trackers, tag);
- rsxx_complete_dma(ctrl, dma, DMA_CANCELLED);
- continue;
- }
- }
-
- set_tracker_dma(ctrl->trackers, tag, dma);
- hw_cmd_buf[ctrl->cmd.idx].command = dma->cmd;
- hw_cmd_buf[ctrl->cmd.idx].tag = tag;
- hw_cmd_buf[ctrl->cmd.idx]._rsvd = 0;
- hw_cmd_buf[ctrl->cmd.idx].sub_page =
- ((dma->sub_page.cnt & 0x7) << 4) |
- (dma->sub_page.off & 0x7);
-
- hw_cmd_buf[ctrl->cmd.idx].device_addr =
- cpu_to_le32(dma->laddr);
-
- hw_cmd_buf[ctrl->cmd.idx].host_addr =
- cpu_to_le64(dma->dma_addr);
-
- dev_dbg(CARD_TO_DEV(ctrl->card),
- "Issue DMA%d(laddr %d tag %d) to idx %d\n",
- ctrl->id, dma->laddr, tag, ctrl->cmd.idx);
-
- ctrl->cmd.idx = (ctrl->cmd.idx + 1) & RSXX_CS_IDX_MASK;
- cmds_pending++;
-
- if (dma->cmd == HW_CMD_BLK_WRITE)
- ctrl->stats.writes_issued++;
- else if (dma->cmd == HW_CMD_BLK_DISCARD)
- ctrl->stats.discards_issued++;
- else
- ctrl->stats.reads_issued++;
- }
-
- /* Let HW know we've queued commands. */
- if (cmds_pending) {
- atomic_add(cmds_pending, &ctrl->stats.hw_q_depth);
- mod_timer(&ctrl->activity_timer,
- jiffies + DMA_ACTIVITY_TIMEOUT);
-
- if (unlikely(ctrl->card->eeh_state)) {
- del_timer_sync(&ctrl->activity_timer);
- return;
- }
-
- iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX);
- }
-}
-
-static void rsxx_dma_done(struct rsxx_dma_ctrl *ctrl)
-{
- struct rsxx_dma *dma;
- unsigned long flags;
- u16 count;
- u8 status;
- u8 tag;
- struct hw_status *hw_st_buf;
-
- hw_st_buf = ctrl->status.buf;
-
- if (unlikely(ctrl->card->halt) ||
- unlikely(ctrl->card->dma_fault) ||
- unlikely(ctrl->card->eeh_state))
- return;
-
- count = le16_to_cpu(hw_st_buf[ctrl->status.idx].count);
-
- while (count == ctrl->e_cnt) {
- /*
- * The read memory-barrier is necessary to keep aggressive
- * processors/optimizers (such as the PPC Apple G5) from
- * reordering the following status-buffer tag & status read
- * *before* the count read on subsequent iterations of the
- * loop!
- */
- rmb();
-
- status = hw_st_buf[ctrl->status.idx].status;
- tag = hw_st_buf[ctrl->status.idx].tag;
-
- dma = get_tracker_dma(ctrl->trackers, tag);
- if (dma == NULL) {
- spin_lock_irqsave(&ctrl->card->irq_lock, flags);
- rsxx_disable_ier(ctrl->card, CR_INTR_DMA_ALL);
- spin_unlock_irqrestore(&ctrl->card->irq_lock, flags);
-
- dev_err(CARD_TO_DEV(ctrl->card),
- "No tracker for tag %d "
- "(idx %d id %d)\n",
- tag, ctrl->status.idx, ctrl->id);
- return;
- }
-
- dev_dbg(CARD_TO_DEV(ctrl->card),
- "Completing DMA%d"
- "(laddr x%x tag %d st: x%x cnt: x%04x) from idx %d.\n",
- ctrl->id, dma->laddr, tag, status, count,
- ctrl->status.idx);
-
- atomic_dec(&ctrl->stats.hw_q_depth);
-
- mod_timer(&ctrl->activity_timer,
- jiffies + DMA_ACTIVITY_TIMEOUT);
-
- if (status)
- rsxx_handle_dma_error(ctrl, dma, status);
- else
- rsxx_complete_dma(ctrl, dma, 0);
-
- push_tracker(ctrl->trackers, tag);
-
- ctrl->status.idx = (ctrl->status.idx + 1) &
- RSXX_CS_IDX_MASK;
- ctrl->e_cnt++;
-
- count = le16_to_cpu(hw_st_buf[ctrl->status.idx].count);
- }
-
- dma_intr_coal_auto_tune(ctrl->card);
-
- if (atomic_read(&ctrl->stats.hw_q_depth) == 0)
- del_timer_sync(&ctrl->activity_timer);
-
- spin_lock_irqsave(&ctrl->card->irq_lock, flags);
- rsxx_enable_ier(ctrl->card, CR_INTR_DMA(ctrl->id));
- spin_unlock_irqrestore(&ctrl->card->irq_lock, flags);
-
- spin_lock_bh(&ctrl->queue_lock);
- if (ctrl->stats.sw_q_depth)
- queue_work(ctrl->issue_wq, &ctrl->issue_dma_work);
- spin_unlock_bh(&ctrl->queue_lock);
-}
-
-static void rsxx_schedule_issue(struct work_struct *work)
-{
- struct rsxx_dma_ctrl *ctrl;
-
- ctrl = container_of(work, struct rsxx_dma_ctrl, issue_dma_work);
-
- mutex_lock(&ctrl->work_lock);
- rsxx_issue_dmas(ctrl);
- mutex_unlock(&ctrl->work_lock);
-}
-
-static void rsxx_schedule_done(struct work_struct *work)
-{
- struct rsxx_dma_ctrl *ctrl;
-
- ctrl = container_of(work, struct rsxx_dma_ctrl, dma_done_work);
-
- mutex_lock(&ctrl->work_lock);
- rsxx_dma_done(ctrl);
- mutex_unlock(&ctrl->work_lock);
-}
-
-static blk_status_t rsxx_queue_discard(struct rsxx_cardinfo *card,
- struct list_head *q,
- unsigned int laddr,
- rsxx_dma_cb cb,
- void *cb_data)
-{
- struct rsxx_dma *dma;
-
- dma = kmem_cache_alloc(rsxx_dma_pool, GFP_KERNEL);
- if (!dma)
- return BLK_STS_RESOURCE;
-
- dma->cmd = HW_CMD_BLK_DISCARD;
- dma->laddr = laddr;
- dma->dma_addr = 0;
- dma->sub_page.off = 0;
- dma->sub_page.cnt = 0;
- dma->page = NULL;
- dma->pg_off = 0;
- dma->cb = cb;
- dma->cb_data = cb_data;
-
- dev_dbg(CARD_TO_DEV(card), "Queuing[D] laddr %x\n", dma->laddr);
-
- list_add_tail(&dma->list, q);
-
- return 0;
-}
-
-static blk_status_t rsxx_queue_dma(struct rsxx_cardinfo *card,
- struct list_head *q,
- int dir,
- unsigned int dma_off,
- unsigned int dma_len,
- unsigned int laddr,
- struct page *page,
- unsigned int pg_off,
- rsxx_dma_cb cb,
- void *cb_data)
-{
- struct rsxx_dma *dma;
-
- dma = kmem_cache_alloc(rsxx_dma_pool, GFP_KERNEL);
- if (!dma)
- return BLK_STS_RESOURCE;
-
- dma->cmd = dir ? HW_CMD_BLK_WRITE : HW_CMD_BLK_READ;
- dma->laddr = laddr;
- dma->sub_page.off = (dma_off >> 9);
- dma->sub_page.cnt = (dma_len >> 9);
- dma->page = page;
- dma->pg_off = pg_off;
- dma->cb = cb;
- dma->cb_data = cb_data;
-
- dev_dbg(CARD_TO_DEV(card),
- "Queuing[%c] laddr %x off %d cnt %d page %p pg_off %d\n",
- dir ? 'W' : 'R', dma->laddr, dma->sub_page.off,
- dma->sub_page.cnt, dma->page, dma->pg_off);
-
- /* Queue the DMA */
- list_add_tail(&dma->list, q);
-
- return 0;
-}
-
-blk_status_t rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
- struct bio *bio,
- atomic_t *n_dmas,
- rsxx_dma_cb cb,
- void *cb_data)
-{
- struct list_head dma_list[RSXX_MAX_TARGETS];
- struct bio_vec bvec;
- struct bvec_iter iter;
- unsigned long long addr8;
- unsigned int laddr;
- unsigned int bv_len;
- unsigned int bv_off;
- unsigned int dma_off;
- unsigned int dma_len;
- int dma_cnt[RSXX_MAX_TARGETS];
- int tgt;
- blk_status_t st;
- int i;
-
- addr8 = bio->bi_iter.bi_sector << 9; /* sectors are 512 bytes */
- atomic_set(n_dmas, 0);
-
- for (i = 0; i < card->n_targets; i++) {
- INIT_LIST_HEAD(&dma_list[i]);
- dma_cnt[i] = 0;
- }
-
- if (bio_op(bio) == REQ_OP_DISCARD) {
- bv_len = bio->bi_iter.bi_size;
-
- while (bv_len > 0) {
- tgt = rsxx_get_dma_tgt(card, addr8);
- laddr = rsxx_addr8_to_laddr(addr8, card);
-
- st = rsxx_queue_discard(card, &dma_list[tgt], laddr,
- cb, cb_data);
- if (st)
- goto bvec_err;
-
- dma_cnt[tgt]++;
- atomic_inc(n_dmas);
- addr8 += RSXX_HW_BLK_SIZE;
- bv_len -= RSXX_HW_BLK_SIZE;
- }
- } else {
- bio_for_each_segment(bvec, bio, iter) {
- bv_len = bvec.bv_len;
- bv_off = bvec.bv_offset;
-
- while (bv_len > 0) {
- tgt = rsxx_get_dma_tgt(card, addr8);
- laddr = rsxx_addr8_to_laddr(addr8, card);
- dma_off = addr8 & RSXX_HW_BLK_MASK;
- dma_len = min(bv_len,
- RSXX_HW_BLK_SIZE - dma_off);
-
- st = rsxx_queue_dma(card, &dma_list[tgt],
- bio_data_dir(bio),
- dma_off, dma_len,
- laddr, bvec.bv_page,
- bv_off, cb, cb_data);
- if (st)
- goto bvec_err;
-
- dma_cnt[tgt]++;
- atomic_inc(n_dmas);
- addr8 += dma_len;
- bv_off += dma_len;
- bv_len -= dma_len;
- }
- }
- }
-
- for (i = 0; i < card->n_targets; i++) {
- if (!list_empty(&dma_list[i])) {
- spin_lock_bh(&card->ctrl[i].queue_lock);
- card->ctrl[i].stats.sw_q_depth += dma_cnt[i];
- list_splice_tail(&dma_list[i], &card->ctrl[i].queue);
- spin_unlock_bh(&card->ctrl[i].queue_lock);
-
- queue_work(card->ctrl[i].issue_wq,
- &card->ctrl[i].issue_dma_work);
- }
- }
-
- return 0;
-
-bvec_err:
- for (i = 0; i < card->n_targets; i++)
- rsxx_cleanup_dma_queue(&card->ctrl[i], &dma_list[i],
- FREE_DMA);
- return st;
-}
-
-
-/*----------------- DMA Engine Initialization & Setup -------------------*/
-int rsxx_hw_buffers_init(struct pci_dev *dev, struct rsxx_dma_ctrl *ctrl)
-{
- ctrl->status.buf = dma_alloc_coherent(&dev->dev, STATUS_BUFFER_SIZE8,
- &ctrl->status.dma_addr, GFP_KERNEL);
- ctrl->cmd.buf = dma_alloc_coherent(&dev->dev, COMMAND_BUFFER_SIZE8,
- &ctrl->cmd.dma_addr, GFP_KERNEL);
- if (ctrl->status.buf == NULL || ctrl->cmd.buf == NULL)
- return -ENOMEM;
-
- memset(ctrl->status.buf, 0xac, STATUS_BUFFER_SIZE8);
- iowrite32(lower_32_bits(ctrl->status.dma_addr),
- ctrl->regmap + SB_ADD_LO);
- iowrite32(upper_32_bits(ctrl->status.dma_addr),
- ctrl->regmap + SB_ADD_HI);
-
- memset(ctrl->cmd.buf, 0x83, COMMAND_BUFFER_SIZE8);
- iowrite32(lower_32_bits(ctrl->cmd.dma_addr), ctrl->regmap + CB_ADD_LO);
- iowrite32(upper_32_bits(ctrl->cmd.dma_addr), ctrl->regmap + CB_ADD_HI);
-
- ctrl->status.idx = ioread32(ctrl->regmap + HW_STATUS_CNT);
- if (ctrl->status.idx > RSXX_MAX_OUTSTANDING_CMDS) {
- dev_crit(&dev->dev, "Failed reading status cnt x%x\n",
- ctrl->status.idx);
- return -EINVAL;
- }
- iowrite32(ctrl->status.idx, ctrl->regmap + HW_STATUS_CNT);
- iowrite32(ctrl->status.idx, ctrl->regmap + SW_STATUS_CNT);
-
- ctrl->cmd.idx = ioread32(ctrl->regmap + HW_CMD_IDX);
- if (ctrl->cmd.idx > RSXX_MAX_OUTSTANDING_CMDS) {
- dev_crit(&dev->dev, "Failed reading cmd cnt x%x\n",
- ctrl->status.idx);
- return -EINVAL;
- }
- iowrite32(ctrl->cmd.idx, ctrl->regmap + HW_CMD_IDX);
- iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX);
-
- return 0;
-}
-
-static int rsxx_dma_ctrl_init(struct pci_dev *dev,
- struct rsxx_dma_ctrl *ctrl)
-{
- int i;
- int st;
-
- memset(&ctrl->stats, 0, sizeof(ctrl->stats));
-
- ctrl->trackers = vmalloc(struct_size(ctrl->trackers, list,
- RSXX_MAX_OUTSTANDING_CMDS));
- if (!ctrl->trackers)
- return -ENOMEM;
-
- ctrl->trackers->head = 0;
- for (i = 0; i < RSXX_MAX_OUTSTANDING_CMDS; i++) {
- ctrl->trackers->list[i].next_tag = i + 1;
- ctrl->trackers->list[i].dma = NULL;
- }
- ctrl->trackers->list[RSXX_MAX_OUTSTANDING_CMDS-1].next_tag = -1;
- spin_lock_init(&ctrl->trackers->lock);
-
- spin_lock_init(&ctrl->queue_lock);
- mutex_init(&ctrl->work_lock);
- INIT_LIST_HEAD(&ctrl->queue);
-
- timer_setup(&ctrl->activity_timer, dma_engine_stalled, 0);
-
- ctrl->issue_wq = alloc_ordered_workqueue(DRIVER_NAME"_issue", 0);
- if (!ctrl->issue_wq)
- return -ENOMEM;
-
- ctrl->done_wq = alloc_ordered_workqueue(DRIVER_NAME"_done", 0);
- if (!ctrl->done_wq)
- return -ENOMEM;
-
- INIT_WORK(&ctrl->issue_dma_work, rsxx_schedule_issue);
- INIT_WORK(&ctrl->dma_done_work, rsxx_schedule_done);
-
- st = rsxx_hw_buffers_init(dev, ctrl);
- if (st)
- return st;
-
- return 0;
-}
-
-static int rsxx_dma_stripe_setup(struct rsxx_cardinfo *card,
- unsigned int stripe_size8)
-{
- if (!is_power_of_2(stripe_size8)) {
- dev_err(CARD_TO_DEV(card),
- "stripe_size is NOT a power of 2!\n");
- return -EINVAL;
- }
-
- card->_stripe.lower_mask = stripe_size8 - 1;
-
- card->_stripe.upper_mask = ~(card->_stripe.lower_mask);
- card->_stripe.upper_shift = ffs(card->n_targets) - 1;
-
- card->_stripe.target_mask = card->n_targets - 1;
- card->_stripe.target_shift = ffs(stripe_size8) - 1;
-
- dev_dbg(CARD_TO_DEV(card), "_stripe.lower_mask = x%016llx\n",
- card->_stripe.lower_mask);
- dev_dbg(CARD_TO_DEV(card), "_stripe.upper_shift = x%016llx\n",
- card->_stripe.upper_shift);
- dev_dbg(CARD_TO_DEV(card), "_stripe.upper_mask = x%016llx\n",
- card->_stripe.upper_mask);
- dev_dbg(CARD_TO_DEV(card), "_stripe.target_mask = x%016llx\n",
- card->_stripe.target_mask);
- dev_dbg(CARD_TO_DEV(card), "_stripe.target_shift = x%016llx\n",
- card->_stripe.target_shift);
-
- return 0;
-}
-
-int rsxx_dma_configure(struct rsxx_cardinfo *card)
-{
- u32 intr_coal;
-
- intr_coal = dma_intr_coal_val(card->config.data.intr_coal.mode,
- card->config.data.intr_coal.count,
- card->config.data.intr_coal.latency);
- iowrite32(intr_coal, card->regmap + INTR_COAL);
-
- return rsxx_dma_stripe_setup(card, card->config.data.stripe_size);
-}
-
-int rsxx_dma_setup(struct rsxx_cardinfo *card)
-{
- unsigned long flags;
- int st;
- int i;
-
- dev_info(CARD_TO_DEV(card),
- "Initializing %d DMA targets\n",
- card->n_targets);
-
- /* Regmap is divided up into 4K chunks. One for each DMA channel */
- for (i = 0; i < card->n_targets; i++)
- card->ctrl[i].regmap = card->regmap + (i * 4096);
-
- card->dma_fault = 0;
-
- /* Reset the DMA queues */
- rsxx_dma_queue_reset(card);
-
- /************* Setup DMA Control *************/
- for (i = 0; i < card->n_targets; i++) {
- st = rsxx_dma_ctrl_init(card->dev, &card->ctrl[i]);
- if (st)
- goto failed_dma_setup;
-
- card->ctrl[i].card = card;
- card->ctrl[i].id = i;
- }
-
- card->scrub_hard = 1;
-
- if (card->config_valid)
- rsxx_dma_configure(card);
-
- /* Enable the interrupts after all setup has completed. */
- for (i = 0; i < card->n_targets; i++) {
- spin_lock_irqsave(&card->irq_lock, flags);
- rsxx_enable_ier_and_isr(card, CR_INTR_DMA(i));
- spin_unlock_irqrestore(&card->irq_lock, flags);
- }
-
- return 0;
-
-failed_dma_setup:
- for (i = 0; i < card->n_targets; i++) {
- struct rsxx_dma_ctrl *ctrl = &card->ctrl[i];
-
- if (ctrl->issue_wq) {
- destroy_workqueue(ctrl->issue_wq);
- ctrl->issue_wq = NULL;
- }
-
- if (ctrl->done_wq) {
- destroy_workqueue(ctrl->done_wq);
- ctrl->done_wq = NULL;
- }
-
- vfree(ctrl->trackers);
-
- if (ctrl->status.buf)
- dma_free_coherent(&card->dev->dev, STATUS_BUFFER_SIZE8,
- ctrl->status.buf,
- ctrl->status.dma_addr);
- if (ctrl->cmd.buf)
- dma_free_coherent(&card->dev->dev, COMMAND_BUFFER_SIZE8,
- ctrl->cmd.buf, ctrl->cmd.dma_addr);
- }
-
- return st;
-}
-
-int rsxx_dma_cancel(struct rsxx_dma_ctrl *ctrl)
-{
- struct rsxx_dma *dma;
- int i;
- int cnt = 0;
-
- /* Clean up issued DMAs */
- for (i = 0; i < RSXX_MAX_OUTSTANDING_CMDS; i++) {
- dma = get_tracker_dma(ctrl->trackers, i);
- if (dma) {
- atomic_dec(&ctrl->stats.hw_q_depth);
- rsxx_complete_dma(ctrl, dma, DMA_CANCELLED);
- push_tracker(ctrl->trackers, i);
- cnt++;
- }
- }
-
- return cnt;
-}
-
-void rsxx_dma_destroy(struct rsxx_cardinfo *card)
-{
- struct rsxx_dma_ctrl *ctrl;
- int i;
-
- for (i = 0; i < card->n_targets; i++) {
- ctrl = &card->ctrl[i];
-
- if (ctrl->issue_wq) {
- destroy_workqueue(ctrl->issue_wq);
- ctrl->issue_wq = NULL;
- }
-
- if (ctrl->done_wq) {
- destroy_workqueue(ctrl->done_wq);
- ctrl->done_wq = NULL;
- }
-
- if (timer_pending(&ctrl->activity_timer))
- del_timer_sync(&ctrl->activity_timer);
-
- /* Clean up the DMA queue */
- spin_lock_bh(&ctrl->queue_lock);
- rsxx_cleanup_dma_queue(ctrl, &ctrl->queue, COMPLETE_DMA);
- spin_unlock_bh(&ctrl->queue_lock);
-
- rsxx_dma_cancel(ctrl);
-
- vfree(ctrl->trackers);
-
- dma_free_coherent(&card->dev->dev, STATUS_BUFFER_SIZE8,
- ctrl->status.buf, ctrl->status.dma_addr);
- dma_free_coherent(&card->dev->dev, COMMAND_BUFFER_SIZE8,
- ctrl->cmd.buf, ctrl->cmd.dma_addr);
- }
-}
-
-int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card)
-{
- int i;
- int j;
- int cnt;
- struct rsxx_dma *dma;
- struct list_head *issued_dmas;
-
- issued_dmas = kcalloc(card->n_targets, sizeof(*issued_dmas),
- GFP_KERNEL);
- if (!issued_dmas)
- return -ENOMEM;
-
- for (i = 0; i < card->n_targets; i++) {
- INIT_LIST_HEAD(&issued_dmas[i]);
- cnt = 0;
- for (j = 0; j < RSXX_MAX_OUTSTANDING_CMDS; j++) {
- dma = get_tracker_dma(card->ctrl[i].trackers, j);
- if (dma == NULL)
- continue;
-
- if (dma->cmd == HW_CMD_BLK_WRITE)
- card->ctrl[i].stats.writes_issued--;
- else if (dma->cmd == HW_CMD_BLK_DISCARD)
- card->ctrl[i].stats.discards_issued--;
- else
- card->ctrl[i].stats.reads_issued--;
-
- if (dma->cmd != HW_CMD_BLK_DISCARD) {
- dma_unmap_page(&card->dev->dev, dma->dma_addr,
- get_dma_size(dma),
- dma->cmd == HW_CMD_BLK_WRITE ?
- DMA_TO_DEVICE :
- DMA_FROM_DEVICE);
- }
-
- list_add_tail(&dma->list, &issued_dmas[i]);
- push_tracker(card->ctrl[i].trackers, j);
- cnt++;
- }
-
- spin_lock_bh(&card->ctrl[i].queue_lock);
- list_splice(&issued_dmas[i], &card->ctrl[i].queue);
-
- atomic_sub(cnt, &card->ctrl[i].stats.hw_q_depth);
- card->ctrl[i].stats.sw_q_depth += cnt;
- card->ctrl[i].e_cnt = 0;
- spin_unlock_bh(&card->ctrl[i].queue_lock);
- }
-
- kfree(issued_dmas);
-
- return 0;
-}
-
-int rsxx_dma_init(void)
-{
- rsxx_dma_pool = KMEM_CACHE(rsxx_dma, SLAB_HWCACHE_ALIGN);
- if (!rsxx_dma_pool)
- return -ENOMEM;
-
- return 0;
-}
-
-
-void rsxx_dma_cleanup(void)
-{
- kmem_cache_destroy(rsxx_dma_pool);
-}
-
diff --git a/drivers/block/rsxx/rsxx.h b/drivers/block/rsxx/rsxx.h
deleted file mode 100644
index 4f84905a6fd2..000000000000
--- a/drivers/block/rsxx/rsxx.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
-* Filename: rsxx.h
-*
-* Authors: Joshua Morris <josh.h.morris@us.ibm.com>
-* Philip Kelleher <pjk1939@linux.vnet.ibm.com>
-*
-* (C) Copyright 2013 IBM Corporation
-*/
-
-#ifndef __RSXX_H__
-#define __RSXX_H__
-
-/*----------------- IOCTL Definitions -------------------*/
-
-#define RSXX_MAX_DATA 8
-
-struct rsxx_reg_access {
- __u32 addr;
- __u32 cnt;
- __u32 stat;
- __u32 stream;
- __u32 data[RSXX_MAX_DATA];
-};
-
-#define RSXX_MAX_REG_CNT (RSXX_MAX_DATA * (sizeof(__u32)))
-
-#define RSXX_IOC_MAGIC 'r'
-
-#define RSXX_GETREG _IOWR(RSXX_IOC_MAGIC, 0x20, struct rsxx_reg_access)
-#define RSXX_SETREG _IOWR(RSXX_IOC_MAGIC, 0x21, struct rsxx_reg_access)
-
-#endif /* __RSXX_H_ */
diff --git a/drivers/block/rsxx/rsxx_cfg.h b/drivers/block/rsxx/rsxx_cfg.h
deleted file mode 100644
index 2b79015f5849..000000000000
--- a/drivers/block/rsxx/rsxx_cfg.h
+++ /dev/null
@@ -1,58 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
-* Filename: rsXX_cfg.h
-*
-* Authors: Joshua Morris <josh.h.morris@us.ibm.com>
-* Philip Kelleher <pjk1939@linux.vnet.ibm.com>
-*
-* (C) Copyright 2013 IBM Corporation
-*/
-
-#ifndef __RSXX_CFG_H__
-#define __RSXX_CFG_H__
-
-/* NOTE: Config values will be saved in network byte order (i.e. Big endian) */
-#include <linux/types.h>
-
-/*
- * The card config version must match the driver's expected version. If it does
- * not, the DMA interfaces will not be attached and the user will need to
- * initialize/upgrade the card configuration using the card config utility.
- */
-#define RSXX_CFG_VERSION 4
-
-struct card_cfg_hdr {
- __u32 version;
- __u32 crc;
-};
-
-struct card_cfg_data {
- __u32 block_size;
- __u32 stripe_size;
- __u32 vendor_id;
- __u32 cache_order;
- struct {
- __u32 mode; /* Disabled, manual, auto-tune... */
- __u32 count; /* Number of intr to coalesce */
- __u32 latency;/* Max wait time (in ns) */
- } intr_coal;
-};
-
-struct rsxx_card_cfg {
- struct card_cfg_hdr hdr;
- struct card_cfg_data data;
-};
-
-/* Vendor ID Values */
-#define RSXX_VENDOR_ID_IBM 0
-#define RSXX_VENDOR_ID_DSI 1
-#define RSXX_VENDOR_COUNT 2
-
-/* Interrupt Coalescing Values */
-#define RSXX_INTR_COAL_DISABLED 0
-#define RSXX_INTR_COAL_EXPLICIT 1
-#define RSXX_INTR_COAL_AUTO_TUNE 2
-
-
-#endif /* __RSXX_CFG_H__ */
-
diff --git a/drivers/block/rsxx/rsxx_priv.h b/drivers/block/rsxx/rsxx_priv.h
deleted file mode 100644
index 26c320c0d924..000000000000
--- a/drivers/block/rsxx/rsxx_priv.h
+++ /dev/null
@@ -1,418 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
-* Filename: rsxx_priv.h
-*
-* Authors: Joshua Morris <josh.h.morris@us.ibm.com>
-* Philip Kelleher <pjk1939@linux.vnet.ibm.com>
-*
-* (C) Copyright 2013 IBM Corporation
-*/
-
-#ifndef __RSXX_PRIV_H__
-#define __RSXX_PRIV_H__
-
-#include <linux/semaphore.h>
-
-#include <linux/fs.h>
-#include <linux/interrupt.h>
-#include <linux/mutex.h>
-#include <linux/pci.h>
-#include <linux/spinlock.h>
-#include <linux/sysfs.h>
-#include <linux/workqueue.h>
-#include <linux/bio.h>
-#include <linux/vmalloc.h>
-#include <linux/timer.h>
-#include <linux/ioctl.h>
-#include <linux/delay.h>
-
-#include "rsxx.h"
-#include "rsxx_cfg.h"
-
-struct proc_cmd;
-
-#define PCI_DEVICE_ID_FS70_FLASH 0x04A9
-#define PCI_DEVICE_ID_FS80_FLASH 0x04AA
-
-#define RS70_PCI_REV_SUPPORTED 4
-
-#define DRIVER_NAME "rsxx"
-#define DRIVER_VERSION "4.0.3.2516"
-
-/* Block size is 4096 */
-#define RSXX_HW_BLK_SHIFT 12
-#define RSXX_HW_BLK_SIZE (1 << RSXX_HW_BLK_SHIFT)
-#define RSXX_HW_BLK_MASK (RSXX_HW_BLK_SIZE - 1)
-
-#define MAX_CREG_DATA8 32
-#define LOG_BUF_SIZE8 128
-
-#define RSXX_MAX_OUTSTANDING_CMDS 255
-#define RSXX_CS_IDX_MASK 0xff
-
-#define STATUS_BUFFER_SIZE8 4096
-#define COMMAND_BUFFER_SIZE8 4096
-
-#define RSXX_MAX_TARGETS 8
-
-struct dma_tracker_list;
-
-/* DMA Command/Status Buffer structure */
-struct rsxx_cs_buffer {
- dma_addr_t dma_addr;
- void *buf;
- u32 idx;
-};
-
-struct rsxx_dma_stats {
- u32 crc_errors;
- u32 hard_errors;
- u32 soft_errors;
- u32 writes_issued;
- u32 writes_failed;
- u32 reads_issued;
- u32 reads_failed;
- u32 reads_retried;
- u32 discards_issued;
- u32 discards_failed;
- u32 done_rescheduled;
- u32 issue_rescheduled;
- u32 dma_sw_err;
- u32 dma_hw_fault;
- u32 dma_cancelled;
- u32 sw_q_depth; /* Number of DMAs on the SW queue. */
- atomic_t hw_q_depth; /* Number of DMAs queued to HW. */
-};
-
-struct rsxx_dma_ctrl {
- struct rsxx_cardinfo *card;
- int id;
- void __iomem *regmap;
- struct rsxx_cs_buffer status;
- struct rsxx_cs_buffer cmd;
- u16 e_cnt;
- spinlock_t queue_lock;
- struct list_head queue;
- struct workqueue_struct *issue_wq;
- struct work_struct issue_dma_work;
- struct workqueue_struct *done_wq;
- struct work_struct dma_done_work;
- struct timer_list activity_timer;
- struct dma_tracker_list *trackers;
- struct rsxx_dma_stats stats;
- struct mutex work_lock;
-};
-
-struct rsxx_cardinfo {
- struct pci_dev *dev;
- unsigned int halt;
- unsigned int eeh_state;
-
- void __iomem *regmap;
- spinlock_t irq_lock;
- unsigned int isr_mask;
- unsigned int ier_mask;
-
- struct rsxx_card_cfg config;
- int config_valid;
-
- /* Embedded CPU Communication */
- struct {
- spinlock_t lock;
- bool active;
- struct creg_cmd *active_cmd;
- struct workqueue_struct *creg_wq;
- struct work_struct done_work;
- struct list_head queue;
- unsigned int q_depth;
- /* Cache the creg status to prevent ioreads */
- struct {
- u32 stat;
- u32 failed_cancel_timer;
- u32 creg_timeout;
- } creg_stats;
- struct timer_list cmd_timer;
- struct mutex reset_lock;
- int reset;
- } creg_ctrl;
-
- struct {
- char tmp[MAX_CREG_DATA8];
- char buf[LOG_BUF_SIZE8]; /* terminated */
- int buf_len;
- } log;
-
- struct workqueue_struct *event_wq;
- struct work_struct event_work;
- unsigned int state;
- u64 size8;
-
- /* Lock the device attach/detach function */
- struct mutex dev_lock;
-
- /* Block Device Variables */
- bool bdev_attached;
- int disk_id;
- int major;
- struct gendisk *gendisk;
- struct {
- /* Used to convert a byte address to a device address. */
- u64 lower_mask;
- u64 upper_shift;
- u64 upper_mask;
- u64 target_mask;
- u64 target_shift;
- } _stripe;
- unsigned int dma_fault;
-
- int scrub_hard;
-
- int n_targets;
- struct rsxx_dma_ctrl *ctrl;
-
- struct dentry *debugfs_dir;
-};
-
-enum rsxx_pci_regmap {
- HWID = 0x00, /* Hardware Identification Register */
- SCRATCH = 0x04, /* Scratch/Debug Register */
- RESET = 0x08, /* Reset Register */
- ISR = 0x10, /* Interrupt Status Register */
- IER = 0x14, /* Interrupt Enable Register */
- IPR = 0x18, /* Interrupt Poll Register */
- CB_ADD_LO = 0x20, /* Command Host Buffer Address [31:0] */
- CB_ADD_HI = 0x24, /* Command Host Buffer Address [63:32]*/
- HW_CMD_IDX = 0x28, /* Hardware Processed Command Index */
- SW_CMD_IDX = 0x2C, /* Software Processed Command Index */
- SB_ADD_LO = 0x30, /* Status Host Buffer Address [31:0] */
- SB_ADD_HI = 0x34, /* Status Host Buffer Address [63:32] */
- HW_STATUS_CNT = 0x38, /* Hardware Status Counter */
- SW_STATUS_CNT = 0x3C, /* Deprecated */
- CREG_CMD = 0x40, /* CPU Command Register */
- CREG_ADD = 0x44, /* CPU Address Register */
- CREG_CNT = 0x48, /* CPU Count Register */
- CREG_STAT = 0x4C, /* CPU Status Register */
- CREG_DATA0 = 0x50, /* CPU Data Registers */
- CREG_DATA1 = 0x54,
- CREG_DATA2 = 0x58,
- CREG_DATA3 = 0x5C,
- CREG_DATA4 = 0x60,
- CREG_DATA5 = 0x64,
- CREG_DATA6 = 0x68,
- CREG_DATA7 = 0x6c,
- INTR_COAL = 0x70, /* Interrupt Coalescing Register */
- HW_ERROR = 0x74, /* Card Error Register */
- PCI_DEBUG0 = 0x78, /* PCI Debug Registers */
- PCI_DEBUG1 = 0x7C,
- PCI_DEBUG2 = 0x80,
- PCI_DEBUG3 = 0x84,
- PCI_DEBUG4 = 0x88,
- PCI_DEBUG5 = 0x8C,
- PCI_DEBUG6 = 0x90,
- PCI_DEBUG7 = 0x94,
- PCI_POWER_THROTTLE = 0x98,
- PERF_CTRL = 0x9c,
- PERF_TIMER_LO = 0xa0,
- PERF_TIMER_HI = 0xa4,
- PERF_RD512_LO = 0xa8,
- PERF_RD512_HI = 0xac,
- PERF_WR512_LO = 0xb0,
- PERF_WR512_HI = 0xb4,
- PCI_RECONFIG = 0xb8,
-};
-
-enum rsxx_intr {
- CR_INTR_DMA0 = 0x00000001,
- CR_INTR_CREG = 0x00000002,
- CR_INTR_DMA1 = 0x00000004,
- CR_INTR_EVENT = 0x00000008,
- CR_INTR_DMA2 = 0x00000010,
- CR_INTR_DMA3 = 0x00000020,
- CR_INTR_DMA4 = 0x00000040,
- CR_INTR_DMA5 = 0x00000080,
- CR_INTR_DMA6 = 0x00000100,
- CR_INTR_DMA7 = 0x00000200,
- CR_INTR_ALL_C = 0x0000003f,
- CR_INTR_ALL_G = 0x000003ff,
- CR_INTR_DMA_ALL = 0x000003f5,
- CR_INTR_ALL = 0xffffffff,
-};
-
-static inline int CR_INTR_DMA(int N)
-{
- static const unsigned int _CR_INTR_DMA[] = {
- CR_INTR_DMA0, CR_INTR_DMA1, CR_INTR_DMA2, CR_INTR_DMA3,
- CR_INTR_DMA4, CR_INTR_DMA5, CR_INTR_DMA6, CR_INTR_DMA7
- };
- return _CR_INTR_DMA[N];
-}
-enum rsxx_pci_reset {
- DMA_QUEUE_RESET = 0x00000001,
-};
-
-enum rsxx_hw_fifo_flush {
- RSXX_FLUSH_BUSY = 0x00000002,
- RSXX_FLUSH_TIMEOUT = 0x00000004,
-};
-
-enum rsxx_pci_revision {
- RSXX_DISCARD_SUPPORT = 2,
- RSXX_EEH_SUPPORT = 3,
-};
-
-enum rsxx_creg_cmd {
- CREG_CMD_TAG_MASK = 0x0000FF00,
- CREG_OP_WRITE = 0x000000C0,
- CREG_OP_READ = 0x000000E0,
-};
-
-enum rsxx_creg_addr {
- CREG_ADD_CARD_CMD = 0x80001000,
- CREG_ADD_CARD_STATE = 0x80001004,
- CREG_ADD_CARD_SIZE = 0x8000100c,
- CREG_ADD_CAPABILITIES = 0x80001050,
- CREG_ADD_LOG = 0x80002000,
- CREG_ADD_NUM_TARGETS = 0x80003000,
- CREG_ADD_CRAM = 0xA0000000,
- CREG_ADD_CONFIG = 0xB0000000,
-};
-
-enum rsxx_creg_card_cmd {
- CARD_CMD_STARTUP = 1,
- CARD_CMD_SHUTDOWN = 2,
- CARD_CMD_LOW_LEVEL_FORMAT = 3,
- CARD_CMD_FPGA_RECONFIG_BR = 4,
- CARD_CMD_FPGA_RECONFIG_MAIN = 5,
- CARD_CMD_BACKUP = 6,
- CARD_CMD_RESET = 7,
- CARD_CMD_deprecated = 8,
- CARD_CMD_UNINITIALIZE = 9,
- CARD_CMD_DSTROY_EMERGENCY = 10,
- CARD_CMD_DSTROY_NORMAL = 11,
- CARD_CMD_DSTROY_EXTENDED = 12,
- CARD_CMD_DSTROY_ABORT = 13,
-};
-
-enum rsxx_card_state {
- CARD_STATE_SHUTDOWN = 0x00000001,
- CARD_STATE_STARTING = 0x00000002,
- CARD_STATE_FORMATTING = 0x00000004,
- CARD_STATE_UNINITIALIZED = 0x00000008,
- CARD_STATE_GOOD = 0x00000010,
- CARD_STATE_SHUTTING_DOWN = 0x00000020,
- CARD_STATE_FAULT = 0x00000040,
- CARD_STATE_RD_ONLY_FAULT = 0x00000080,
- CARD_STATE_DSTROYING = 0x00000100,
-};
-
-enum rsxx_led {
- LED_DEFAULT = 0x0,
- LED_IDENTIFY = 0x1,
- LED_SOAK = 0x2,
-};
-
-enum rsxx_creg_flash_lock {
- CREG_FLASH_LOCK = 1,
- CREG_FLASH_UNLOCK = 2,
-};
-
-enum rsxx_card_capabilities {
- CARD_CAP_SUBPAGE_WRITES = 0x00000080,
-};
-
-enum rsxx_creg_stat {
- CREG_STAT_STATUS_MASK = 0x00000003,
- CREG_STAT_SUCCESS = 0x1,
- CREG_STAT_ERROR = 0x2,
- CREG_STAT_CHAR_PENDING = 0x00000004, /* Character I/O pending bit */
- CREG_STAT_LOG_PENDING = 0x00000008, /* HW log message pending bit */
- CREG_STAT_TAG_MASK = 0x0000ff00,
-};
-
-enum rsxx_dma_finish {
- FREE_DMA = 0x0,
- COMPLETE_DMA = 0x1,
-};
-
-static inline unsigned int CREG_DATA(int N)
-{
- return CREG_DATA0 + (N << 2);
-}
-
-/*----------------- Convenient Log Wrappers -------------------*/
-#define CARD_TO_DEV(__CARD) (&(__CARD)->dev->dev)
-
-/***** config.c *****/
-int rsxx_load_config(struct rsxx_cardinfo *card);
-
-/***** core.c *****/
-void rsxx_enable_ier(struct rsxx_cardinfo *card, unsigned int intr);
-void rsxx_disable_ier(struct rsxx_cardinfo *card, unsigned int intr);
-void rsxx_enable_ier_and_isr(struct rsxx_cardinfo *card,
- unsigned int intr);
-void rsxx_disable_ier_and_isr(struct rsxx_cardinfo *card,
- unsigned int intr);
-
-/***** dev.c *****/
-int rsxx_attach_dev(struct rsxx_cardinfo *card);
-void rsxx_detach_dev(struct rsxx_cardinfo *card);
-int rsxx_setup_dev(struct rsxx_cardinfo *card);
-void rsxx_destroy_dev(struct rsxx_cardinfo *card);
-int rsxx_dev_init(void);
-void rsxx_dev_cleanup(void);
-
-/***** dma.c ****/
-typedef void (*rsxx_dma_cb)(struct rsxx_cardinfo *card,
- void *cb_data,
- unsigned int status);
-int rsxx_dma_setup(struct rsxx_cardinfo *card);
-void rsxx_dma_destroy(struct rsxx_cardinfo *card);
-int rsxx_dma_init(void);
-int rsxx_cleanup_dma_queue(struct rsxx_dma_ctrl *ctrl,
- struct list_head *q,
- unsigned int done);
-int rsxx_dma_cancel(struct rsxx_dma_ctrl *ctrl);
-void rsxx_dma_cleanup(void);
-void rsxx_dma_queue_reset(struct rsxx_cardinfo *card);
-int rsxx_dma_configure(struct rsxx_cardinfo *card);
-blk_status_t rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
- struct bio *bio,
- atomic_t *n_dmas,
- rsxx_dma_cb cb,
- void *cb_data);
-int rsxx_hw_buffers_init(struct pci_dev *dev, struct rsxx_dma_ctrl *ctrl);
-int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card);
-int rsxx_eeh_remap_dmas(struct rsxx_cardinfo *card);
-
-/***** cregs.c *****/
-int rsxx_creg_write(struct rsxx_cardinfo *card, u32 addr,
- unsigned int size8,
- void *data,
- int byte_stream);
-int rsxx_creg_read(struct rsxx_cardinfo *card,
- u32 addr,
- unsigned int size8,
- void *data,
- int byte_stream);
-int rsxx_read_hw_log(struct rsxx_cardinfo *card);
-int rsxx_get_card_state(struct rsxx_cardinfo *card,
- unsigned int *state);
-int rsxx_get_card_size8(struct rsxx_cardinfo *card, u64 *size8);
-int rsxx_get_num_targets(struct rsxx_cardinfo *card,
- unsigned int *n_targets);
-int rsxx_get_card_capabilities(struct rsxx_cardinfo *card,
- u32 *capabilities);
-int rsxx_issue_card_cmd(struct rsxx_cardinfo *card, u32 cmd);
-int rsxx_creg_setup(struct rsxx_cardinfo *card);
-void rsxx_creg_destroy(struct rsxx_cardinfo *card);
-int rsxx_creg_init(void);
-void rsxx_creg_cleanup(void);
-int rsxx_reg_access(struct rsxx_cardinfo *card,
- struct rsxx_reg_access __user *ucmd,
- int read);
-void rsxx_eeh_save_issued_creg(struct rsxx_cardinfo *card);
-void rsxx_kick_creg_queue(struct rsxx_cardinfo *card);
-
-
-
-#endif /* __DRIVERS_BLOCK_RSXX_H__ */
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
index 6f45a53f7cbf..146d85d80e0e 100644
--- a/drivers/block/sunvdc.c
+++ b/drivers/block/sunvdc.c
@@ -143,8 +143,8 @@ static int vdc_getgeo(struct block_device *bdev, struct hd_geometry *geo)
static int vdc_ioctl(struct block_device *bdev, fmode_t mode,
unsigned command, unsigned long argument)
{
+ struct vdc_port *port = bdev->bd_disk->private_data;
int i;
- struct gendisk *disk;
switch (command) {
case CDROMMULTISESSION:
@@ -155,12 +155,15 @@ static int vdc_ioctl(struct block_device *bdev, fmode_t mode,
return 0;
case CDROM_GET_CAPABILITY:
- disk = bdev->bd_disk;
-
- if (bdev->bd_disk && (disk->flags & GENHD_FL_CD))
+ if (!vdc_version_supported(port, 1, 1))
+ return -EINVAL;
+ switch (port->vdisk_mtype) {
+ case VD_MEDIA_TYPE_CD:
+ case VD_MEDIA_TYPE_DVD:
return 0;
- return -EINVAL;
-
+ default:
+ return -EINVAL;
+ }
default:
pr_debug(PFX "ioctl %08x not supported\n", command);
return -EINVAL;
@@ -459,7 +462,7 @@ static int __vdc_tx_trigger(struct vdc_port *port)
static int __send_request(struct request *req)
{
- struct vdc_port *port = req->rq_disk->private_data;
+ struct vdc_port *port = req->q->disk->private_data;
struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
struct scatterlist sg[MAX_RING_COOKIES];
struct vdc_req_entry *rqe;
@@ -854,14 +857,12 @@ static int probe_disk(struct vdc_port *port)
switch (port->vdisk_mtype) {
case VD_MEDIA_TYPE_CD:
pr_info(PFX "Virtual CDROM %s\n", port->disk_name);
- g->flags |= GENHD_FL_CD;
g->flags |= GENHD_FL_REMOVABLE;
set_disk_ro(g, 1);
break;
case VD_MEDIA_TYPE_DVD:
pr_info(PFX "Virtual DVD %s\n", port->disk_name);
- g->flags |= GENHD_FL_CD;
g->flags |= GENHD_FL_REMOVABLE;
set_disk_ro(g, 1);
break;
diff --git a/drivers/block/swim.c b/drivers/block/swim.c
index 821594cd1315..fef65a18d56f 100644
--- a/drivers/block/swim.c
+++ b/drivers/block/swim.c
@@ -840,6 +840,7 @@ static int swim_floppy_init(struct swim_priv *swd)
swd->unit[drive].disk->minors = 1;
sprintf(swd->unit[drive].disk->disk_name, "fd%d", drive);
swd->unit[drive].disk->fops = &floppy_fops;
+ swd->unit[drive].disk->flags |= GENHD_FL_NO_PART;
swd->unit[drive].disk->events = DISK_EVENT_MEDIA_CHANGE;
swd->unit[drive].disk->private_data = &swd->unit[drive];
set_capacity(swd->unit[drive].disk, 2880);
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index 4b91c9aa5892..6c39f2c9f806 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -1227,7 +1227,7 @@ static int swim3_attach(struct macio_dev *mdev,
disk->fops = &floppy_fops;
disk->private_data = fs;
disk->events = DISK_EVENT_MEDIA_CHANGE;
- disk->flags |= GENHD_FL_REMOVABLE;
+ disk->flags |= GENHD_FL_REMOVABLE | GENHD_FL_NO_PART;
sprintf(disk->disk_name, "fd%d", floppy_count);
set_capacity(disk, 2880);
rc = add_disk(disk);
diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c
index d1676fe0da1a..b361583944b9 100644
--- a/drivers/block/sx8.c
+++ b/drivers/block/sx8.c
@@ -540,7 +540,7 @@ static int carm_array_info (struct carm_host *host, unsigned int array_idx)
spin_unlock_irq(&host->lock);
DPRINTK("blk_execute_rq_nowait, tag == %u\n", rq->tag);
- blk_execute_rq_nowait(NULL, rq, true, NULL);
+ blk_execute_rq_nowait(rq, true, NULL);
return 0;
@@ -579,7 +579,7 @@ static int carm_send_special (struct carm_host *host, carm_sspc_t func)
crq->msg_bucket = (u32) rc;
DPRINTK("blk_execute_rq_nowait, tag == %u\n", rq->tag);
- blk_execute_rq_nowait(NULL, rq, true, NULL);
+ blk_execute_rq_nowait(rq, true, NULL);
return 0;
}
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 6ae38776e30e..c443cd64fc9b 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -384,7 +384,7 @@ static int virtblk_get_id(struct gendisk *disk, char *id_str)
if (err)
goto out;
- blk_execute_rq(vblk->disk, req, false);
+ blk_execute_rq(req, false);
err = blk_status_to_errno(virtblk_result(blk_mq_rq_to_pdu(req)));
out:
blk_mq_free_request(req);
@@ -843,7 +843,6 @@ static int virtblk_probe(struct virtio_device *vdev)
vblk->disk->minors = 1 << PART_BITS;
vblk->disk->private_data = vblk;
vblk->disk->fops = &virtblk_fops;
- vblk->disk->flags |= GENHD_FL_EXT_DEVT;
vblk->index = index;
/* configure queue flush support */
@@ -977,7 +976,7 @@ static void virtblk_remove(struct virtio_device *vdev)
mutex_lock(&vblk->vdev_mutex);
/* Stop all the virtqueues. */
- vdev->config->reset(vdev);
+ virtio_reset_device(vdev);
/* Virtqueues are stopped, nothing can use vblk->vdev anymore. */
vblk->vdev = NULL;
@@ -996,7 +995,7 @@ static int virtblk_freeze(struct virtio_device *vdev)
struct virtio_blk *vblk = vdev->priv;
/* Ensure we don't receive any more interrupts */
- vdev->config->reset(vdev);
+ virtio_reset_device(vdev);
/* Make sure no work handler is accessing the device. */
flush_work(&vblk->config_work);
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index 914587aabca0..62125fd4af4a 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -510,7 +510,7 @@ static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
}
vbd->size = vbd_sz(vbd);
- if (vbd->bdev->bd_disk->flags & GENHD_FL_CD || cdrom)
+ if (cdrom || disk_to_cdi(vbd->bdev->bd_disk))
vbd->type |= VDISK_CDROM;
if (vbd->bdev->bd_disk->flags & GENHD_FL_REMOVABLE)
vbd->type |= VDISK_REMOVABLE;
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 286cf1afad78..ca71a0585333 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -198,6 +198,7 @@ struct blkfront_info
struct gendisk *gd;
u16 sector_size;
unsigned int physical_sector_size;
+ unsigned long vdisk_info;
int vdevice;
blkif_vdev_t handle;
enum blkif_state connected;
@@ -505,6 +506,7 @@ static int blkif_getgeo(struct block_device *bd, struct hd_geometry *hg)
static int blkif_ioctl(struct block_device *bdev, fmode_t mode,
unsigned command, unsigned long argument)
{
+ struct blkfront_info *info = bdev->bd_disk->private_data;
int i;
switch (command) {
@@ -514,9 +516,9 @@ static int blkif_ioctl(struct block_device *bdev, fmode_t mode,
return -EFAULT;
return 0;
case CDROM_GET_CAPABILITY:
- if (bdev->bd_disk->flags & GENHD_FL_CD)
- return 0;
- return -EINVAL;
+ if (!(info->vdisk_info & VDISK_CDROM))
+ return -EINVAL;
+ return 0;
default:
return -EINVAL;
}
@@ -1057,9 +1059,8 @@ static char *encode_disk_name(char *ptr, unsigned int n)
}
static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
- struct blkfront_info *info,
- u16 vdisk_info, u16 sector_size,
- unsigned int physical_sector_size)
+ struct blkfront_info *info, u16 sector_size,
+ unsigned int physical_sector_size)
{
struct gendisk *gd;
int nr_minors = 1;
@@ -1157,15 +1158,11 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
xlvbd_flush(info);
- if (vdisk_info & VDISK_READONLY)
+ if (info->vdisk_info & VDISK_READONLY)
set_disk_ro(gd, 1);
-
- if (vdisk_info & VDISK_REMOVABLE)
+ if (info->vdisk_info & VDISK_REMOVABLE)
gd->flags |= GENHD_FL_REMOVABLE;
- if (vdisk_info & VDISK_CDROM)
- gd->flags |= GENHD_FL_CD;
-
return 0;
out_free_tag_set:
@@ -2129,7 +2126,7 @@ static void blkfront_closing(struct blkfront_info *info)
/* No more blkif_request(). */
blk_mq_stop_hw_queues(info->rq);
- blk_set_queue_dying(info->rq);
+ blk_mark_disk_dead(info->gd);
set_capacity(info->gd, 0);
for_each_rinfo(info, rinfo, i) {
@@ -2313,7 +2310,6 @@ static void blkfront_connect(struct blkfront_info *info)
unsigned long long sectors;
unsigned long sector_size;
unsigned int physical_sector_size;
- unsigned int binfo;
int err, i;
struct blkfront_ring_info *rinfo;
@@ -2351,7 +2347,7 @@ static void blkfront_connect(struct blkfront_info *info)
err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
"sectors", "%llu", &sectors,
- "info", "%u", &binfo,
+ "info", "%u", &info->vdisk_info,
"sector-size", "%lu", &sector_size,
NULL);
if (err) {
@@ -2380,7 +2376,7 @@ static void blkfront_connect(struct blkfront_info *info)
}
}
- err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size,
+ err = xlvbd_alloc_gendisk(sectors, info, sector_size,
physical_sector_size);
if (err) {
xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s",
diff --git a/drivers/block/z2ram.c b/drivers/block/z2ram.c
index ccc52c935faf..7a6ed83481b8 100644
--- a/drivers/block/z2ram.c
+++ b/drivers/block/z2ram.c
@@ -327,6 +327,7 @@ static int z2ram_register_disk(int minor)
disk->major = Z2RAM_MAJOR;
disk->first_minor = minor;
disk->minors = 1;
+ disk->flags |= GENHD_FL_NO_PART;
disk->fops = &z2_fops;
if (minor)
sprintf(disk->disk_name, "z2ram%d", minor);
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 25071126995b..cb253d80d72b 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -1903,14 +1903,7 @@ static struct attribute *zram_disk_attrs[] = {
NULL,
};
-static const struct attribute_group zram_disk_attr_group = {
- .attrs = zram_disk_attrs,
-};
-
-static const struct attribute_group *zram_disk_attr_groups[] = {
- &zram_disk_attr_group,
- NULL,
-};
+ATTRIBUTE_GROUPS(zram_disk);
/*
* Allocate and initialize new zram device. the function returns
@@ -1947,6 +1940,7 @@ static int zram_add(void)
zram->disk->major = zram_major;
zram->disk->first_minor = device_id;
zram->disk->minors = 1;
+ zram->disk->flags |= GENHD_FL_NO_PART;
zram->disk->fops = &zram_devops;
zram->disk->private_data = zram;
snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
@@ -1982,7 +1976,7 @@ static int zram_add(void)
blk_queue_max_write_zeroes_sectors(zram->disk->queue, UINT_MAX);
blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, zram->disk->queue);
- ret = device_add_disk(NULL, zram->disk, zram_disk_attr_groups);
+ ret = device_add_disk(NULL, zram->disk, zram_disk_groups);
if (ret)
goto out_cleanup_disk;
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index ecdf8e034351..f537673ede17 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -739,14 +739,13 @@ static int hci_uart_set_flags(struct hci_uart *hu, unsigned long flags)
* Arguments:
*
* tty pointer to tty instance data
- * file pointer to open file object for device
* cmd IOCTL command code
* arg argument for IOCTL call (cmd dependent)
*
* Return Value: Command dependent
*/
-static int hci_uart_tty_ioctl(struct tty_struct *tty, struct file *file,
- unsigned int cmd, unsigned long arg)
+static int hci_uart_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
+ unsigned long arg)
{
struct hci_uart *hu = tty->disc_data;
int err = 0;
diff --git a/drivers/bluetooth/virtio_bt.c b/drivers/bluetooth/virtio_bt.c
index 076e4942a3f0..67c21263f9e0 100644
--- a/drivers/bluetooth/virtio_bt.c
+++ b/drivers/bluetooth/virtio_bt.c
@@ -367,7 +367,7 @@ static void virtbt_remove(struct virtio_device *vdev)
struct hci_dev *hdev = vbt->hdev;
hci_unregister_dev(hdev);
- vdev->config->reset(vdev);
+ virtio_reset_device(vdev);
hci_free_dev(hdev);
vbt->hdev = NULL;
diff --git a/drivers/bus/fsl-mc/dprc-driver.c b/drivers/bus/fsl-mc/dprc-driver.c
index 315e830b6ecd..5e70f9775a0e 100644
--- a/drivers/bus/fsl-mc/dprc-driver.c
+++ b/drivers/bus/fsl-mc/dprc-driver.c
@@ -400,7 +400,7 @@ static irqreturn_t dprc_irq0_handler_thread(int irq_num, void *arg)
struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev);
struct fsl_mc_io *mc_io = mc_dev->mc_io;
- struct msi_desc *msi_desc = mc_dev->irqs[0]->msi_desc;
+ int irq = mc_dev->irqs[0]->virq;
dev_dbg(dev, "DPRC IRQ %d triggered on CPU %u\n",
irq_num, smp_processor_id());
@@ -409,7 +409,7 @@ static irqreturn_t dprc_irq0_handler_thread(int irq_num, void *arg)
return IRQ_HANDLED;
mutex_lock(&mc_bus->scan_mutex);
- if (!msi_desc || msi_desc->irq != (u32)irq_num)
+ if (irq != (u32)irq_num)
goto out;
status = 0;
@@ -521,7 +521,7 @@ static int register_dprc_irq_handler(struct fsl_mc_device *mc_dev)
* function that programs the MSI physically in the device
*/
error = devm_request_threaded_irq(&mc_dev->dev,
- irq->msi_desc->irq,
+ irq->virq,
dprc_irq0_handler,
dprc_irq0_handler_thread,
IRQF_NO_SUSPEND | IRQF_ONESHOT,
@@ -771,7 +771,7 @@ static void dprc_teardown_irq(struct fsl_mc_device *mc_dev)
(void)disable_dprc_irq(mc_dev);
- devm_free_irq(&mc_dev->dev, irq->msi_desc->irq, &mc_dev->dev);
+ devm_free_irq(&mc_dev->dev, irq->virq, &mc_dev->dev);
fsl_mc_free_irqs(mc_dev);
}
diff --git a/drivers/bus/fsl-mc/fsl-mc-allocator.c b/drivers/bus/fsl-mc/fsl-mc-allocator.c
index 6c513556911e..dced427ca8ba 100644
--- a/drivers/bus/fsl-mc/fsl-mc-allocator.c
+++ b/drivers/bus/fsl-mc/fsl-mc-allocator.c
@@ -350,7 +350,6 @@ int fsl_mc_populate_irq_pool(struct fsl_mc_device *mc_bus_dev,
unsigned int irq_count)
{
unsigned int i;
- struct msi_desc *msi_desc;
struct fsl_mc_device_irq *irq_resources;
struct fsl_mc_device_irq *mc_dev_irq;
int error;
@@ -388,16 +387,12 @@ int fsl_mc_populate_irq_pool(struct fsl_mc_device *mc_bus_dev,
mc_dev_irq->resource.type = res_pool->type;
mc_dev_irq->resource.data = mc_dev_irq;
mc_dev_irq->resource.parent_pool = res_pool;
+ mc_dev_irq->virq = msi_get_virq(&mc_bus_dev->dev, i);
+ mc_dev_irq->resource.id = mc_dev_irq->virq;
INIT_LIST_HEAD(&mc_dev_irq->resource.node);
list_add_tail(&mc_dev_irq->resource.node, &res_pool->free_list);
}
- for_each_msi_entry(msi_desc, &mc_bus_dev->dev) {
- mc_dev_irq = &irq_resources[msi_desc->fsl_mc.msi_index];
- mc_dev_irq->msi_desc = msi_desc;
- mc_dev_irq->resource.id = msi_desc->irq;
- }
-
res_pool->max_count = irq_count;
res_pool->free_count = irq_count;
mc_bus->irq_resources = irq_resources;
diff --git a/drivers/bus/fsl-mc/fsl-mc-msi.c b/drivers/bus/fsl-mc/fsl-mc-msi.c
index cf974870ba55..5e0e4393ce4d 100644
--- a/drivers/bus/fsl-mc/fsl-mc-msi.c
+++ b/drivers/bus/fsl-mc/fsl-mc-msi.c
@@ -29,7 +29,7 @@ static irq_hw_number_t fsl_mc_domain_calc_hwirq(struct fsl_mc_device *dev,
* Make the base hwirq value for ICID*10000 so it is readable
* as a decimal value in /proc/interrupts.
*/
- return (irq_hw_number_t)(desc->fsl_mc.msi_index + (dev->icid * 10000));
+ return (irq_hw_number_t)(desc->msi_index + (dev->icid * 10000));
}
static void fsl_mc_msi_set_desc(msi_alloc_info_t *arg,
@@ -58,11 +58,11 @@ static void fsl_mc_msi_update_dom_ops(struct msi_domain_info *info)
}
static void __fsl_mc_msi_write_msg(struct fsl_mc_device *mc_bus_dev,
- struct fsl_mc_device_irq *mc_dev_irq)
+ struct fsl_mc_device_irq *mc_dev_irq,
+ struct msi_desc *msi_desc)
{
int error;
struct fsl_mc_device *owner_mc_dev = mc_dev_irq->mc_dev;
- struct msi_desc *msi_desc = mc_dev_irq->msi_desc;
struct dprc_irq_cfg irq_cfg;
/*
@@ -122,14 +122,14 @@ static void fsl_mc_msi_write_msg(struct irq_data *irq_data,
struct fsl_mc_device *mc_bus_dev = to_fsl_mc_device(msi_desc->dev);
struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev);
struct fsl_mc_device_irq *mc_dev_irq =
- &mc_bus->irq_resources[msi_desc->fsl_mc.msi_index];
+ &mc_bus->irq_resources[msi_desc->msi_index];
msi_desc->msg = *msg;
/*
* Program the MSI (paddr, value) pair in the device:
*/
- __fsl_mc_msi_write_msg(mc_bus_dev, mc_dev_irq);
+ __fsl_mc_msi_write_msg(mc_bus_dev, mc_dev_irq, msi_desc);
}
static void fsl_mc_msi_update_chip_ops(struct msi_domain_info *info)
@@ -170,6 +170,7 @@ struct irq_domain *fsl_mc_msi_create_irq_domain(struct fwnode_handle *fwnode,
fsl_mc_msi_update_dom_ops(info);
if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS)
fsl_mc_msi_update_chip_ops(info);
+ info->flags |= MSI_FLAG_ALLOC_SIMPLE_MSI_DESCS | MSI_FLAG_FREE_MSI_DESCS;
domain = msi_create_irq_domain(fwnode, info, parent);
if (domain)
@@ -210,61 +211,21 @@ struct irq_domain *fsl_mc_find_msi_domain(struct device *dev)
return msi_domain;
}
-static void fsl_mc_msi_free_descs(struct device *dev)
-{
- struct msi_desc *desc, *tmp;
-
- list_for_each_entry_safe(desc, tmp, dev_to_msi_list(dev), list) {
- list_del(&desc->list);
- free_msi_entry(desc);
- }
-}
-
-static int fsl_mc_msi_alloc_descs(struct device *dev, unsigned int irq_count)
-
-{
- unsigned int i;
- int error;
- struct msi_desc *msi_desc;
-
- for (i = 0; i < irq_count; i++) {
- msi_desc = alloc_msi_entry(dev, 1, NULL);
- if (!msi_desc) {
- dev_err(dev, "Failed to allocate msi entry\n");
- error = -ENOMEM;
- goto cleanup_msi_descs;
- }
-
- msi_desc->fsl_mc.msi_index = i;
- INIT_LIST_HEAD(&msi_desc->list);
- list_add_tail(&msi_desc->list, dev_to_msi_list(dev));
- }
-
- return 0;
-
-cleanup_msi_descs:
- fsl_mc_msi_free_descs(dev);
- return error;
-}
-
-int fsl_mc_msi_domain_alloc_irqs(struct device *dev,
- unsigned int irq_count)
+int fsl_mc_msi_domain_alloc_irqs(struct device *dev, unsigned int irq_count)
{
struct irq_domain *msi_domain;
int error;
- if (!list_empty(dev_to_msi_list(dev)))
+ msi_domain = dev_get_msi_domain(dev);
+ if (!msi_domain)
return -EINVAL;
- error = fsl_mc_msi_alloc_descs(dev, irq_count);
- if (error < 0)
+ error = msi_setup_device_data(dev);
+ if (error)
return error;
- msi_domain = dev_get_msi_domain(dev);
- if (!msi_domain) {
- error = -EINVAL;
- goto cleanup_msi_descs;
- }
+ if (msi_first_desc(dev, MSI_DESC_ALL))
+ return -EINVAL;
/*
* NOTE: Calling this function will trigger the invocation of the
@@ -272,15 +233,8 @@ int fsl_mc_msi_domain_alloc_irqs(struct device *dev,
*/
error = msi_domain_alloc_irqs(msi_domain, dev, irq_count);
- if (error) {
+ if (error)
dev_err(dev, "Failed to allocate IRQs\n");
- goto cleanup_msi_descs;
- }
-
- return 0;
-
-cleanup_msi_descs:
- fsl_mc_msi_free_descs(dev);
return error;
}
@@ -293,9 +247,4 @@ void fsl_mc_msi_domain_free_irqs(struct device *dev)
return;
msi_domain_free_irqs(msi_domain, dev);
-
- if (list_empty(dev_to_msi_list(dev)))
- return;
-
- fsl_mc_msi_free_descs(dev);
}
diff --git a/drivers/bus/mhi/core/boot.c b/drivers/bus/mhi/core/boot.c
index 0a972620a403..74295d3cc662 100644
--- a/drivers/bus/mhi/core/boot.c
+++ b/drivers/bus/mhi/core/boot.c
@@ -417,7 +417,7 @@ void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl)
}
/* wait for ready on pass through or any other execution environment */
- if (mhi_cntrl->ee != MHI_EE_EDL && mhi_cntrl->ee != MHI_EE_PBL)
+ if (!MHI_FW_LOAD_CAPABLE(mhi_cntrl->ee))
goto fw_load_ready_state;
fw_name = (mhi_cntrl->ee == MHI_EE_EDL) ?
diff --git a/drivers/bus/mhi/core/init.c b/drivers/bus/mhi/core/init.c
index 5aaca6d0f52b..046f407dc5d6 100644
--- a/drivers/bus/mhi/core/init.c
+++ b/drivers/bus/mhi/core/init.c
@@ -79,7 +79,8 @@ static const char * const mhi_pm_state_str[] = {
const char *to_mhi_pm_state_str(enum mhi_pm_state state)
{
- int index = find_last_bit((unsigned long *)&state, 32);
+ unsigned long pm_state = state;
+ int index = find_last_bit(&pm_state, 32);
if (index >= ARRAY_SIZE(mhi_pm_state_str))
return "Invalid State";
@@ -788,6 +789,7 @@ static int parse_ch_cfg(struct mhi_controller *mhi_cntrl,
mhi_chan->offload_ch = ch_cfg->offload_channel;
mhi_chan->db_cfg.reset_req = ch_cfg->doorbell_mode_switch;
mhi_chan->pre_alloc = ch_cfg->auto_queue;
+ mhi_chan->wake_capable = ch_cfg->wake_capable;
/*
* If MHI host allocates buffers, then the channel direction
diff --git a/drivers/bus/mhi/core/internal.h b/drivers/bus/mhi/core/internal.h
index 3a732afaf73e..e2e10474a9d9 100644
--- a/drivers/bus/mhi/core/internal.h
+++ b/drivers/bus/mhi/core/internal.h
@@ -390,7 +390,8 @@ extern const char * const mhi_ee_str[MHI_EE_MAX];
#define MHI_IN_PBL(ee) (ee == MHI_EE_PBL || ee == MHI_EE_PTHRU || \
ee == MHI_EE_EDL)
-
+#define MHI_POWER_UP_CAPABLE(ee) (MHI_IN_PBL(ee) || ee == MHI_EE_AMSS)
+#define MHI_FW_LOAD_CAPABLE(ee) (ee == MHI_EE_PBL || ee == MHI_EE_EDL)
#define MHI_IN_MISSION_MODE(ee) (ee == MHI_EE_AMSS || ee == MHI_EE_WFW || \
ee == MHI_EE_FP)
@@ -681,8 +682,12 @@ void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl);
void mhi_rddm_prepare(struct mhi_controller *mhi_cntrl,
struct image_info *img_info);
void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl);
+
+/* Automatically allocate and queue inbound buffers */
+#define MHI_CH_INBOUND_ALLOC_BUFS BIT(0)
int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
- struct mhi_chan *mhi_chan);
+ struct mhi_chan *mhi_chan, unsigned int flags);
+
int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl,
struct mhi_chan *mhi_chan);
void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl,
diff --git a/drivers/bus/mhi/core/main.c b/drivers/bus/mhi/core/main.c
index b15c5bc37dd4..ffde617f93a3 100644
--- a/drivers/bus/mhi/core/main.c
+++ b/drivers/bus/mhi/core/main.c
@@ -1065,7 +1065,7 @@ void mhi_ctrl_ev_task(unsigned long data)
return;
}
- /* Process ctrl events events */
+ /* Process ctrl events */
ret = mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX);
/*
@@ -1430,7 +1430,7 @@ exit_unprepare_channel:
}
int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
- struct mhi_chan *mhi_chan)
+ struct mhi_chan *mhi_chan, unsigned int flags)
{
int ret = 0;
struct device *dev = &mhi_chan->mhi_dev->dev;
@@ -1455,6 +1455,9 @@ int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
if (ret)
goto error_pm_state;
+ if (mhi_chan->dir == DMA_FROM_DEVICE)
+ mhi_chan->pre_alloc = !!(flags & MHI_CH_INBOUND_ALLOC_BUFS);
+
/* Pre-allocate buffer for xfer ring */
if (mhi_chan->pre_alloc) {
int nr_el = get_nr_avail_ring_elements(mhi_cntrl,
@@ -1464,6 +1467,7 @@ int mhi_prepare_channel(struct mhi_controller *mhi_cntrl,
while (nr_el--) {
void *buf;
struct mhi_buf_info info = { };
+
buf = kmalloc(len, GFP_KERNEL);
if (!buf) {
ret = -ENOMEM;
@@ -1609,8 +1613,7 @@ void mhi_reset_chan(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan)
read_unlock_bh(&mhi_cntrl->pm_lock);
}
-/* Move channel to start state */
-int mhi_prepare_for_transfer(struct mhi_device *mhi_dev)
+static int __mhi_prepare_for_transfer(struct mhi_device *mhi_dev, unsigned int flags)
{
int ret, dir;
struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
@@ -1621,7 +1624,7 @@ int mhi_prepare_for_transfer(struct mhi_device *mhi_dev)
if (!mhi_chan)
continue;
- ret = mhi_prepare_channel(mhi_cntrl, mhi_chan);
+ ret = mhi_prepare_channel(mhi_cntrl, mhi_chan, flags);
if (ret)
goto error_open_chan;
}
@@ -1639,8 +1642,19 @@ error_open_chan:
return ret;
}
+
+int mhi_prepare_for_transfer(struct mhi_device *mhi_dev)
+{
+ return __mhi_prepare_for_transfer(mhi_dev, 0);
+}
EXPORT_SYMBOL_GPL(mhi_prepare_for_transfer);
+int mhi_prepare_for_transfer_autoqueue(struct mhi_device *mhi_dev)
+{
+ return __mhi_prepare_for_transfer(mhi_dev, MHI_CH_INBOUND_ALLOC_BUFS);
+}
+EXPORT_SYMBOL_GPL(mhi_prepare_for_transfer_autoqueue);
+
void mhi_unprepare_from_transfer(struct mhi_device *mhi_dev)
{
struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
diff --git a/drivers/bus/mhi/core/pm.c b/drivers/bus/mhi/core/pm.c
index 547e6e769546..4aae0baea008 100644
--- a/drivers/bus/mhi/core/pm.c
+++ b/drivers/bus/mhi/core/pm.c
@@ -42,7 +42,7 @@
* L3: LD_ERR_FATAL_DETECT <--> LD_ERR_FATAL_DETECT
* LD_ERR_FATAL_DETECT -> DISABLE
*/
-static struct mhi_pm_transitions const dev_state_transitions[] = {
+static const struct mhi_pm_transitions dev_state_transitions[] = {
/* L0 States */
{
MHI_PM_DISABLE,
@@ -1053,7 +1053,7 @@ int mhi_async_power_up(struct mhi_controller *mhi_cntrl)
enum mhi_ee_type current_ee;
enum dev_st_transition next_state;
struct device *dev = &mhi_cntrl->mhi_dev->dev;
- u32 val;
+ u32 interval_us = 25000; /* poll register field every 25 milliseconds */
int ret;
dev_info(dev, "Requested to power ON\n");
@@ -1070,10 +1070,6 @@ int mhi_async_power_up(struct mhi_controller *mhi_cntrl)
mutex_lock(&mhi_cntrl->pm_mutex);
mhi_cntrl->pm_state = MHI_PM_DISABLE;
- ret = mhi_init_irq_setup(mhi_cntrl);
- if (ret)
- goto error_setup_irq;
-
/* Setup BHI INTVEC */
write_lock_irq(&mhi_cntrl->pm_lock);
mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
@@ -1083,11 +1079,11 @@ int mhi_async_power_up(struct mhi_controller *mhi_cntrl)
write_unlock_irq(&mhi_cntrl->pm_lock);
/* Confirm that the device is in valid exec env */
- if (!MHI_IN_PBL(current_ee) && current_ee != MHI_EE_AMSS) {
+ if (!MHI_POWER_UP_CAPABLE(current_ee)) {
dev_err(dev, "%s is not a valid EE for power on\n",
TO_MHI_EXEC_STR(current_ee));
ret = -EIO;
- goto error_async_power_up;
+ goto error_exit;
}
state = mhi_get_mhi_state(mhi_cntrl);
@@ -1096,20 +1092,12 @@ int mhi_async_power_up(struct mhi_controller *mhi_cntrl)
if (state == MHI_STATE_SYS_ERR) {
mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET);
- ret = wait_event_timeout(mhi_cntrl->state_event,
- MHI_PM_IN_FATAL_STATE(mhi_cntrl->pm_state) ||
- mhi_read_reg_field(mhi_cntrl,
- mhi_cntrl->regs,
- MHICTRL,
- MHICTRL_RESET_MASK,
- MHICTRL_RESET_SHIFT,
- &val) ||
- !val,
- msecs_to_jiffies(mhi_cntrl->timeout_ms));
- if (!ret) {
- ret = -EIO;
+ ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL,
+ MHICTRL_RESET_MASK, MHICTRL_RESET_SHIFT, 0,
+ interval_us);
+ if (ret) {
dev_info(dev, "Failed to reset MHI due to syserr state\n");
- goto error_async_power_up;
+ goto error_exit;
}
/*
@@ -1119,6 +1107,10 @@ int mhi_async_power_up(struct mhi_controller *mhi_cntrl)
mhi_write_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_INTVEC, 0);
}
+ ret = mhi_init_irq_setup(mhi_cntrl);
+ if (ret)
+ goto error_exit;
+
/* Transition to next state */
next_state = MHI_IN_PBL(current_ee) ?
DEV_ST_TRANSITION_PBL : DEV_ST_TRANSITION_READY;
@@ -1131,10 +1123,7 @@ int mhi_async_power_up(struct mhi_controller *mhi_cntrl)
return 0;
-error_async_power_up:
- mhi_deinit_free_irq(mhi_cntrl);
-
-error_setup_irq:
+error_exit:
mhi_cntrl->pm_state = MHI_PM_DISABLE;
mutex_unlock(&mhi_cntrl->pm_mutex);
diff --git a/drivers/bus/mhi/pci_generic.c b/drivers/bus/mhi/pci_generic.c
index 4c577a731709..b79895810c52 100644
--- a/drivers/bus/mhi/pci_generic.c
+++ b/drivers/bus/mhi/pci_generic.c
@@ -366,6 +366,7 @@ static const struct mhi_pci_dev_info mhi_foxconn_sdx55_info = {
.config = &modem_foxconn_sdx55_config,
.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
.dma_data_width = 32,
+ .mru_default = 32768,
.sideband_wake = false,
};
@@ -401,9 +402,53 @@ static const struct mhi_pci_dev_info mhi_mv31_info = {
.config = &modem_mv31_config,
.bar_num = MHI_PCI_DEFAULT_BAR_NUM,
.dma_data_width = 32,
+ .mru_default = 32768,
+};
+
+static const struct mhi_channel_config mhi_sierra_em919x_channels[] = {
+ MHI_CHANNEL_CONFIG_UL_SBL(2, "SAHARA", 32, 0),
+ MHI_CHANNEL_CONFIG_DL_SBL(3, "SAHARA", 256, 0),
+ MHI_CHANNEL_CONFIG_UL(4, "DIAG", 32, 0),
+ MHI_CHANNEL_CONFIG_DL(5, "DIAG", 32, 0),
+ MHI_CHANNEL_CONFIG_UL(12, "MBIM", 128, 0),
+ MHI_CHANNEL_CONFIG_DL(13, "MBIM", 128, 0),
+ MHI_CHANNEL_CONFIG_UL(14, "QMI", 32, 0),
+ MHI_CHANNEL_CONFIG_DL(15, "QMI", 32, 0),
+ MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0),
+ MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0),
+ MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0", 512, 1),
+ MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0", 512, 2),
+};
+
+static struct mhi_event_config modem_sierra_em919x_mhi_events[] = {
+ /* first ring is control+data and DIAG ring */
+ MHI_EVENT_CONFIG_CTRL(0, 2048),
+ /* Hardware channels request dedicated hardware event rings */
+ MHI_EVENT_CONFIG_HW_DATA(1, 2048, 100),
+ MHI_EVENT_CONFIG_HW_DATA(2, 2048, 101)
+};
+
+static const struct mhi_controller_config modem_sierra_em919x_config = {
+ .max_channels = 128,
+ .timeout_ms = 24000,
+ .num_channels = ARRAY_SIZE(mhi_sierra_em919x_channels),
+ .ch_cfg = mhi_sierra_em919x_channels,
+ .num_events = ARRAY_SIZE(modem_sierra_em919x_mhi_events),
+ .event_cfg = modem_sierra_em919x_mhi_events,
+};
+
+static const struct mhi_pci_dev_info mhi_sierra_em919x_info = {
+ .name = "sierra-em919x",
+ .config = &modem_sierra_em919x_config,
+ .bar_num = MHI_PCI_DEFAULT_BAR_NUM,
+ .dma_data_width = 32,
+ .sideband_wake = false,
};
static const struct pci_device_id mhi_pci_id_table[] = {
+ /* EM919x (sdx55), use the same vid:pid as qcom-sdx55m */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0306, 0x18d7, 0x0200),
+ .driver_data = (kernel_ulong_t) &mhi_sierra_em919x_info },
{ PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0306),
.driver_data = (kernel_ulong_t) &mhi_qcom_sdx55_info },
{ PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0304),
@@ -423,6 +468,9 @@ static const struct pci_device_id mhi_pci_id_table[] = {
/* DW5930e (sdx55), Non-eSIM, It's also T99W175 */
{ PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0b1),
.driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
+ /* T99W175 (sdx55), Based on Qualcomm new baseline */
+ { PCI_DEVICE(PCI_VENDOR_ID_FOXCONN, 0xe0bf),
+ .driver_data = (kernel_ulong_t) &mhi_foxconn_sdx55_info },
/* MV31-W (Cinterion) */
{ PCI_DEVICE(0x1269, 0x00b3),
.driver_data = (kernel_ulong_t) &mhi_mv31_info },
@@ -529,18 +577,12 @@ static int mhi_pci_claim(struct mhi_controller *mhi_cntrl,
mhi_cntrl->regs = pcim_iomap_table(pdev)[bar_num];
mhi_cntrl->reg_len = pci_resource_len(pdev, bar_num);
- err = pci_set_dma_mask(pdev, dma_mask);
+ err = dma_set_mask_and_coherent(&pdev->dev, dma_mask);
if (err) {
dev_err(&pdev->dev, "Cannot set proper DMA mask\n");
return err;
}
- err = pci_set_consistent_dma_mask(pdev, dma_mask);
- if (err) {
- dev_err(&pdev->dev, "set consistent dma mask failed\n");
- return err;
- }
-
pci_set_master(pdev);
return 0;
@@ -1018,7 +1060,7 @@ static int __maybe_unused mhi_pci_freeze(struct device *dev)
* context.
*/
if (test_and_clear_bit(MHI_PCI_DEV_STARTED, &mhi_pdev->status)) {
- mhi_power_down(mhi_cntrl, false);
+ mhi_power_down(mhi_cntrl, true);
mhi_unprepare_after_power_down(mhi_cntrl);
}
diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c
index ea0424922de7..db612045616f 100644
--- a/drivers/bus/mvebu-mbus.c
+++ b/drivers/bus/mvebu-mbus.c
@@ -914,6 +914,7 @@ int mvebu_mbus_add_window_remap_by_id(unsigned int target,
return mvebu_mbus_alloc_window(s, base, size, remap, target, attribute);
}
+EXPORT_SYMBOL_GPL(mvebu_mbus_add_window_remap_by_id);
int mvebu_mbus_add_window_by_id(unsigned int target, unsigned int attribute,
phys_addr_t base, size_t size)
@@ -921,6 +922,7 @@ int mvebu_mbus_add_window_by_id(unsigned int target, unsigned int attribute,
return mvebu_mbus_add_window_remap_by_id(target, attribute, base,
size, MVEBU_MBUS_NO_REMAP);
}
+EXPORT_SYMBOL_GPL(mvebu_mbus_add_window_by_id);
int mvebu_mbus_del_window(phys_addr_t base, size_t size)
{
@@ -933,6 +935,7 @@ int mvebu_mbus_del_window(phys_addr_t base, size_t size)
mvebu_mbus_disable_window(&mbus_state, win);
return 0;
}
+EXPORT_SYMBOL_GPL(mvebu_mbus_del_window);
void mvebu_mbus_get_pcie_mem_aperture(struct resource *res)
{
@@ -940,6 +943,7 @@ void mvebu_mbus_get_pcie_mem_aperture(struct resource *res)
return;
*res = mbus_state.pcie_mem_aperture;
}
+EXPORT_SYMBOL_GPL(mvebu_mbus_get_pcie_mem_aperture);
void mvebu_mbus_get_pcie_io_aperture(struct resource *res)
{
@@ -947,6 +951,7 @@ void mvebu_mbus_get_pcie_io_aperture(struct resource *res)
return;
*res = mbus_state.pcie_io_aperture;
}
+EXPORT_SYMBOL_GPL(mvebu_mbus_get_pcie_io_aperture);
int mvebu_mbus_get_dram_win_info(phys_addr_t phyaddr, u8 *target, u8 *attr)
{
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index 9877e413fce3..1b57d4666e43 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -3691,27 +3691,6 @@ static struct ctl_table cdrom_table[] = {
},
{ }
};
-
-static struct ctl_table cdrom_cdrom_table[] = {
- {
- .procname = "cdrom",
- .maxlen = 0,
- .mode = 0555,
- .child = cdrom_table,
- },
- { }
-};
-
-/* Make sure that /proc/sys/dev is there */
-static struct ctl_table cdrom_root_table[] = {
- {
- .procname = "dev",
- .maxlen = 0,
- .mode = 0555,
- .child = cdrom_cdrom_table,
- },
- { }
-};
static struct ctl_table_header *cdrom_sysctl_header;
static void cdrom_sysctl_register(void)
@@ -3721,7 +3700,7 @@ static void cdrom_sysctl_register(void)
if (!atomic_add_unless(&initialized, 1, 1))
return;
- cdrom_sysctl_header = register_sysctl_table(cdrom_root_table);
+ cdrom_sysctl_header = register_sysctl("dev/cdrom", cdrom_table);
/* set the defaults */
cdrom_sysctl_settings.autoclose = autoclose;
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
index d50cc1fd34d5..faead41709bc 100644
--- a/drivers/cdrom/gdrom.c
+++ b/drivers/cdrom/gdrom.c
@@ -719,6 +719,7 @@ static void probe_gdrom_setupdisk(void)
gd.disk->major = gdrom_major;
gd.disk->first_minor = 1;
gd.disk->minors = 1;
+ gd.disk->flags |= GENHD_FL_NO_PART;
strcpy(gd.disk->disk_name, GDROM_DEV_NAME);
}
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c
index b40edae32817..dc78a4fb879e 100644
--- a/drivers/char/agp/amd64-agp.c
+++ b/drivers/char/agp/amd64-agp.c
@@ -588,20 +588,11 @@ static void agp_amd64_remove(struct pci_dev *pdev)
agp_bridges_found--;
}
-#ifdef CONFIG_PM
+#define agp_amd64_suspend NULL
-static int agp_amd64_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused agp_amd64_resume(struct device *dev)
{
- pci_save_state(pdev);
- pci_set_power_state(pdev, pci_choose_state(pdev, state));
-
- return 0;
-}
-
-static int agp_amd64_resume(struct pci_dev *pdev)
-{
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
+ struct pci_dev *pdev = to_pci_dev(dev);
if (pdev->vendor == PCI_VENDOR_ID_NVIDIA)
nforce3_agp_init(pdev);
@@ -609,8 +600,6 @@ static int agp_amd64_resume(struct pci_dev *pdev)
return amd_8151_configure();
}
-#endif /* CONFIG_PM */
-
static const struct pci_device_id agp_amd64_pci_table[] = {
{
.class = (PCI_CLASS_BRIDGE_HOST << 8),
@@ -738,15 +727,14 @@ static const struct pci_device_id agp_amd64_pci_promisc_table[] = {
{ }
};
+static SIMPLE_DEV_PM_OPS(agp_amd64_pm_ops, agp_amd64_suspend, agp_amd64_resume);
+
static struct pci_driver agp_amd64_pci_driver = {
.name = "agpgart-amd64",
.id_table = agp_amd64_pci_table,
.probe = agp_amd64_probe,
.remove = agp_amd64_remove,
-#ifdef CONFIG_PM
- .suspend = agp_amd64_suspend,
- .resume = agp_amd64_resume,
-#endif
+ .driver.pm = &agp_amd64_pm_ops,
};
diff --git a/drivers/char/agp/sis-agp.c b/drivers/char/agp/sis-agp.c
index 14909fc5d767..f8a02f4bef1b 100644
--- a/drivers/char/agp/sis-agp.c
+++ b/drivers/char/agp/sis-agp.c
@@ -217,26 +217,14 @@ static void agp_sis_remove(struct pci_dev *pdev)
agp_put_bridge(bridge);
}
-#ifdef CONFIG_PM
+#define agp_sis_suspend NULL
-static int agp_sis_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused agp_sis_resume(
+ __attribute__((unused)) struct device *dev)
{
- pci_save_state(pdev);
- pci_set_power_state(pdev, pci_choose_state(pdev, state));
-
- return 0;
-}
-
-static int agp_sis_resume(struct pci_dev *pdev)
-{
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
-
return sis_driver.configure();
}
-#endif /* CONFIG_PM */
-
static const struct pci_device_id agp_sis_pci_table[] = {
{
.class = (PCI_CLASS_BRIDGE_HOST << 8),
@@ -419,15 +407,14 @@ static const struct pci_device_id agp_sis_pci_table[] = {
MODULE_DEVICE_TABLE(pci, agp_sis_pci_table);
+static SIMPLE_DEV_PM_OPS(agp_sis_pm_ops, agp_sis_suspend, agp_sis_resume);
+
static struct pci_driver agp_sis_pci_driver = {
.name = "agpgart-sis",
.id_table = agp_sis_pci_table,
.probe = agp_sis_probe,
.remove = agp_sis_remove,
-#ifdef CONFIG_PM
- .suspend = agp_sis_suspend,
- .resume = agp_sis_resume,
-#endif
+ .driver.pm = &agp_sis_pm_ops,
};
static int __init agp_sis_init(void)
diff --git a/drivers/char/agp/via-agp.c b/drivers/char/agp/via-agp.c
index 87a92a044570..a460ae352772 100644
--- a/drivers/char/agp/via-agp.c
+++ b/drivers/char/agp/via-agp.c
@@ -492,22 +492,11 @@ static void agp_via_remove(struct pci_dev *pdev)
agp_put_bridge(bridge);
}
-#ifdef CONFIG_PM
+#define agp_via_suspend NULL
-static int agp_via_suspend(struct pci_dev *pdev, pm_message_t state)
+static int __maybe_unused agp_via_resume(struct device *dev)
{
- pci_save_state (pdev);
- pci_set_power_state (pdev, PCI_D3hot);
-
- return 0;
-}
-
-static int agp_via_resume(struct pci_dev *pdev)
-{
- struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
-
- pci_set_power_state (pdev, PCI_D0);
- pci_restore_state(pdev);
+ struct agp_bridge_data *bridge = dev_get_drvdata(dev);
if (bridge->driver == &via_agp3_driver)
return via_configure_agp3();
@@ -517,8 +506,6 @@ static int agp_via_resume(struct pci_dev *pdev)
return 0;
}
-#endif /* CONFIG_PM */
-
/* must be the same order as name table above */
static const struct pci_device_id agp_via_pci_table[] = {
#define ID(x) \
@@ -567,16 +554,14 @@ static const struct pci_device_id agp_via_pci_table[] = {
MODULE_DEVICE_TABLE(pci, agp_via_pci_table);
+static SIMPLE_DEV_PM_OPS(agp_via_pm_ops, agp_via_suspend, agp_via_resume);
static struct pci_driver agp_via_pci_driver = {
.name = "agpgart-via",
.id_table = agp_via_pci_table,
.probe = agp_via_probe,
.remove = agp_via_remove,
-#ifdef CONFIG_PM
- .suspend = agp_via_suspend,
- .resume = agp_via_resume,
-#endif
+ .driver.pm = &agp_via_pm_ops,
};
diff --git a/drivers/char/applicom.c b/drivers/char/applicom.c
index deb85a334c93..36203d3fa6ea 100644
--- a/drivers/char/applicom.c
+++ b/drivers/char/applicom.c
@@ -89,8 +89,8 @@ static struct applicom_board {
spinlock_t mutex;
} apbs[MAX_BOARD];
-static unsigned int irq = 0; /* interrupt number IRQ */
-static unsigned long mem = 0; /* physical segment of board */
+static unsigned int irq; /* interrupt number IRQ */
+static unsigned long mem; /* physical segment of board */
module_param_hw(irq, uint, irq, 0);
MODULE_PARM_DESC(irq, "IRQ of the Applicom board");
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index 4e5431f01450..563dfae3b8da 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -746,26 +746,6 @@ static struct ctl_table hpet_table[] = {
{}
};
-static struct ctl_table hpet_root[] = {
- {
- .procname = "hpet",
- .maxlen = 0,
- .mode = 0555,
- .child = hpet_table,
- },
- {}
-};
-
-static struct ctl_table dev_root[] = {
- {
- .procname = "dev",
- .maxlen = 0,
- .mode = 0555,
- .child = hpet_root,
- },
- {}
-};
-
static struct ctl_table_header *sysctl_header;
/*
@@ -1061,7 +1041,7 @@ static int __init hpet_init(void)
if (result < 0)
return -ENODEV;
- sysctl_header = register_sysctl_table(dev_root);
+ sysctl_header = register_sysctl("dev/hpet", hpet_table);
result = acpi_bus_register_driver(&hpet_acpi_driver);
if (result < 0) {
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index 001b819f5298..9704963f9d50 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -226,19 +226,6 @@ config HW_RANDOM_VIRTIO
To compile this driver as a module, choose M here: the
module will be called virtio-rng. If unsure, say N.
-config HW_RANDOM_TX4939
- tristate "TX4939 Random Number Generator support"
- depends on SOC_TX4939
- default HW_RANDOM
- help
- This driver provides kernel-side support for the Random Number
- Generator hardware found on TX4939 SoC.
-
- To compile this driver as a module, choose M here: the
- module will be called tx4939-rng.
-
- If unsure, say Y.
-
config HW_RANDOM_MXC_RNGA
tristate "Freescale i.MX RNGA Random Number Generator"
depends on SOC_IMX31
diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile
index a2f1ce0790d1..584d47ba32f7 100644
--- a/drivers/char/hw_random/Makefile
+++ b/drivers/char/hw_random/Makefile
@@ -20,7 +20,6 @@ obj-$(CONFIG_HW_RANDOM_OMAP) += omap-rng.o
obj-$(CONFIG_HW_RANDOM_OMAP3_ROM) += omap3-rom-rng.o
obj-$(CONFIG_HW_RANDOM_PASEMI) += pasemi-rng.o
obj-$(CONFIG_HW_RANDOM_VIRTIO) += virtio-rng.o
-obj-$(CONFIG_HW_RANDOM_TX4939) += tx4939-rng.o
obj-$(CONFIG_HW_RANDOM_MXC_RNGA) += mxc-rnga.o
obj-$(CONFIG_HW_RANDOM_IMX_RNGC) += imx-rngc.o
obj-$(CONFIG_HW_RANDOM_INGENIC_RNG) += ingenic-rng.o
diff --git a/drivers/char/hw_random/tx4939-rng.c b/drivers/char/hw_random/tx4939-rng.c
deleted file mode 100644
index c8bd34e740fd..000000000000
--- a/drivers/char/hw_random/tx4939-rng.c
+++ /dev/null
@@ -1,157 +0,0 @@
-/*
- * RNG driver for TX4939 Random Number Generators (RNG)
- *
- * Copyright (C) 2009 Atsushi Nemoto <anemo@mba.ocn.ne.jp>
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-#include <linux/err.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/io.h>
-#include <linux/platform_device.h>
-#include <linux/hw_random.h>
-#include <linux/gfp.h>
-
-#define TX4939_RNG_RCSR 0x00000000
-#define TX4939_RNG_ROR(n) (0x00000018 + (n) * 8)
-
-#define TX4939_RNG_RCSR_INTE 0x00000008
-#define TX4939_RNG_RCSR_RST 0x00000004
-#define TX4939_RNG_RCSR_FIN 0x00000002
-#define TX4939_RNG_RCSR_ST 0x00000001
-
-struct tx4939_rng {
- struct hwrng rng;
- void __iomem *base;
- u64 databuf[3];
- unsigned int data_avail;
-};
-
-static void rng_io_start(void)
-{
-#ifndef CONFIG_64BIT
- /*
- * readq is reading a 64-bit register using a 64-bit load. On
- * a 32-bit kernel however interrupts or any other processor
- * exception would clobber the upper 32-bit of the processor
- * register so interrupts need to be disabled.
- */
- local_irq_disable();
-#endif
-}
-
-static void rng_io_end(void)
-{
-#ifndef CONFIG_64BIT
- local_irq_enable();
-#endif
-}
-
-static u64 read_rng(void __iomem *base, unsigned int offset)
-{
- return ____raw_readq(base + offset);
-}
-
-static void write_rng(u64 val, void __iomem *base, unsigned int offset)
-{
- return ____raw_writeq(val, base + offset);
-}
-
-static int tx4939_rng_data_present(struct hwrng *rng, int wait)
-{
- struct tx4939_rng *rngdev = container_of(rng, struct tx4939_rng, rng);
- int i;
-
- if (rngdev->data_avail)
- return rngdev->data_avail;
- for (i = 0; i < 20; i++) {
- rng_io_start();
- if (!(read_rng(rngdev->base, TX4939_RNG_RCSR)
- & TX4939_RNG_RCSR_ST)) {
- rngdev->databuf[0] =
- read_rng(rngdev->base, TX4939_RNG_ROR(0));
- rngdev->databuf[1] =
- read_rng(rngdev->base, TX4939_RNG_ROR(1));
- rngdev->databuf[2] =
- read_rng(rngdev->base, TX4939_RNG_ROR(2));
- rngdev->data_avail =
- sizeof(rngdev->databuf) / sizeof(u32);
- /* Start RNG */
- write_rng(TX4939_RNG_RCSR_ST,
- rngdev->base, TX4939_RNG_RCSR);
- wait = 0;
- }
- rng_io_end();
- if (!wait)
- break;
- /* 90 bus clock cycles by default for generation */
- ndelay(90 * 5);
- }
- return rngdev->data_avail;
-}
-
-static int tx4939_rng_data_read(struct hwrng *rng, u32 *buffer)
-{
- struct tx4939_rng *rngdev = container_of(rng, struct tx4939_rng, rng);
-
- rngdev->data_avail--;
- *buffer = *((u32 *)&rngdev->databuf + rngdev->data_avail);
- return sizeof(u32);
-}
-
-static int __init tx4939_rng_probe(struct platform_device *dev)
-{
- struct tx4939_rng *rngdev;
- int i;
-
- rngdev = devm_kzalloc(&dev->dev, sizeof(*rngdev), GFP_KERNEL);
- if (!rngdev)
- return -ENOMEM;
- rngdev->base = devm_platform_ioremap_resource(dev, 0);
- if (IS_ERR(rngdev->base))
- return PTR_ERR(rngdev->base);
-
- rngdev->rng.name = dev_name(&dev->dev);
- rngdev->rng.data_present = tx4939_rng_data_present;
- rngdev->rng.data_read = tx4939_rng_data_read;
-
- rng_io_start();
- /* Reset RNG */
- write_rng(TX4939_RNG_RCSR_RST, rngdev->base, TX4939_RNG_RCSR);
- write_rng(0, rngdev->base, TX4939_RNG_RCSR);
- /* Start RNG */
- write_rng(TX4939_RNG_RCSR_ST, rngdev->base, TX4939_RNG_RCSR);
- rng_io_end();
- /*
- * Drop first two results. From the datasheet:
- * The quality of the random numbers generated immediately
- * after reset can be insufficient. Therefore, do not use
- * random numbers obtained from the first and second
- * generations; use the ones from the third or subsequent
- * generation.
- */
- for (i = 0; i < 2; i++) {
- rngdev->data_avail = 0;
- if (!tx4939_rng_data_present(&rngdev->rng, 1))
- return -EIO;
- }
-
- platform_set_drvdata(dev, rngdev);
- return devm_hwrng_register(&dev->dev, &rngdev->rng);
-}
-
-static struct platform_driver tx4939_rng_driver = {
- .driver = {
- .name = "tx4939-rng",
- },
-};
-
-module_platform_driver_probe(tx4939_rng_driver, tx4939_rng_probe);
-
-MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver for TX4939");
-MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c
index 0a7dde135db1..e856df7e285c 100644
--- a/drivers/char/hw_random/virtio-rng.c
+++ b/drivers/char/hw_random/virtio-rng.c
@@ -179,9 +179,9 @@ static void remove_common(struct virtio_device *vdev)
vi->data_avail = 0;
vi->data_idx = 0;
complete(&vi->have_data);
- vdev->config->reset(vdev);
if (vi->hwrng_register_done)
hwrng_unregister(&vi->hwrng);
+ virtio_reset_device(vdev);
vdev->config->del_vqs(vdev);
ida_simple_remove(&rng_index_ida, vi->index);
kfree(vi);
diff --git a/drivers/char/mwave/3780i.h b/drivers/char/mwave/3780i.h
index 9ccb6b270b07..95164246afd1 100644
--- a/drivers/char/mwave/3780i.h
+++ b/drivers/char/mwave/3780i.h
@@ -68,7 +68,7 @@ typedef struct {
unsigned char ClockControl:1; /* RW: Clock control: 0=normal, 1=stop 3780i clocks */
unsigned char SoftReset:1; /* RW: Soft reset 0=normal, 1=soft reset active */
unsigned char ConfigMode:1; /* RW: Configuration mode, 0=normal, 1=config mode */
- unsigned char Reserved:5; /* 0: Reserved */
+ unsigned short Reserved:13; /* 0: Reserved */
} DSP_ISA_SLAVE_CONTROL;
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 227fb7802738..3404a91edf29 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -101,7 +101,7 @@
* ===============================
*
* There are four exported interfaces; two for use within the kernel,
- * and two or use from userspace.
+ * and two for use from userspace.
*
* Exported interfaces ---- userspace output
* -----------------------------------------
@@ -124,7 +124,7 @@
*
* The primary kernel interface is
*
- * void get_random_bytes(void *buf, int nbytes);
+ * void get_random_bytes(void *buf, int nbytes);
*
* This interface will return the requested number of random bytes,
* and place it in the requested buffer. This is equivalent to a
@@ -132,10 +132,10 @@
*
* For less critical applications, there are the functions:
*
- * u32 get_random_u32()
- * u64 get_random_u64()
- * unsigned int get_random_int()
- * unsigned long get_random_long()
+ * u32 get_random_u32()
+ * u64 get_random_u64()
+ * unsigned int get_random_int()
+ * unsigned long get_random_long()
*
* These are produced by a cryptographic RNG seeded from get_random_bytes,
* and so do not deplete the entropy pool as much. These are recommended
@@ -197,10 +197,10 @@
* from the devices are:
*
* void add_device_randomness(const void *buf, unsigned int size);
- * void add_input_randomness(unsigned int type, unsigned int code,
+ * void add_input_randomness(unsigned int type, unsigned int code,
* unsigned int value);
* void add_interrupt_randomness(int irq);
- * void add_disk_randomness(struct gendisk *disk);
+ * void add_disk_randomness(struct gendisk *disk);
* void add_hwgenerator_randomness(const char *buffer, size_t count,
* size_t entropy);
* void add_bootloader_randomness(const void *buf, unsigned int size);
@@ -296,8 +296,8 @@
* /dev/random and /dev/urandom created already, they can be created
* by using the commands:
*
- * mknod /dev/random c 1 8
- * mknod /dev/urandom c 1 9
+ * mknod /dev/random c 1 8
+ * mknod /dev/urandom c 1 9
*
* Acknowledgements:
* =================
@@ -337,7 +337,6 @@
#include <linux/spinlock.h>
#include <linux/kthread.h>
#include <linux/percpu.h>
-#include <linux/fips.h>
#include <linux/ptrace.h>
#include <linux/workqueue.h>
#include <linux/irq.h>
@@ -360,30 +359,11 @@
/* #define ADD_INTERRUPT_BENCH */
/*
- * Configuration information
- */
-#define INPUT_POOL_SHIFT 12
-#define INPUT_POOL_WORDS (1 << (INPUT_POOL_SHIFT-5))
-#define OUTPUT_POOL_SHIFT 10
-#define OUTPUT_POOL_WORDS (1 << (OUTPUT_POOL_SHIFT-5))
-#define EXTRACT_SIZE (BLAKE2S_HASH_SIZE / 2)
-
-/*
- * To allow fractional bits to be tracked, the entropy_count field is
- * denominated in units of 1/8th bits.
- *
- * 2*(ENTROPY_SHIFT + poolbitshift) must <= 31, or the multiply in
- * credit_entropy_bits() needs to be 64 bits wide.
- */
-#define ENTROPY_SHIFT 3
-#define ENTROPY_BITS(r) ((r)->entropy_count >> ENTROPY_SHIFT)
-
-/*
* If the entropy count falls under this number of bits, then we
* should wake up processes which are selecting or polling on write
* access to /dev/random.
*/
-static int random_write_wakeup_bits = 28 * OUTPUT_POOL_WORDS;
+static int random_write_wakeup_bits = 28 * (1 << 5);
/*
* Originally, we used a primitive polynomial of degree .poolwords
@@ -430,14 +410,27 @@ static int random_write_wakeup_bits = 28 * OUTPUT_POOL_WORDS;
* polynomial which improves the resulting TGFSR polynomial to be
* irreducible, which we have made here.
*/
-static const struct poolinfo {
- int poolbitshift, poolwords, poolbytes, poolfracbits;
-#define S(x) ilog2(x)+5, (x), (x)*4, (x) << (ENTROPY_SHIFT+5)
- int tap1, tap2, tap3, tap4, tap5;
-} poolinfo_table[] = {
- /* was: x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 */
+enum poolinfo {
+ POOL_WORDS = 128,
+ POOL_WORDMASK = POOL_WORDS - 1,
+ POOL_BYTES = POOL_WORDS * sizeof(u32),
+ POOL_BITS = POOL_BYTES * 8,
+ POOL_BITSHIFT = ilog2(POOL_BITS),
+
+ /* To allow fractional bits to be tracked, the entropy_count field is
+ * denominated in units of 1/8th bits. */
+ POOL_ENTROPY_SHIFT = 3,
+#define POOL_ENTROPY_BITS() (input_pool.entropy_count >> POOL_ENTROPY_SHIFT)
+ POOL_FRACBITS = POOL_BITS << POOL_ENTROPY_SHIFT,
+
/* x^128 + x^104 + x^76 + x^51 +x^25 + x + 1 */
- { S(128), 104, 76, 51, 25, 1 },
+ POOL_TAP1 = 104,
+ POOL_TAP2 = 76,
+ POOL_TAP3 = 51,
+ POOL_TAP4 = 25,
+ POOL_TAP5 = 1,
+
+ EXTRACT_SIZE = BLAKE2S_HASH_SIZE / 2
};
/*
@@ -450,9 +443,9 @@ static DEFINE_SPINLOCK(random_ready_list_lock);
static LIST_HEAD(random_ready_list);
struct crng_state {
- __u32 state[16];
- unsigned long init_time;
- spinlock_t lock;
+ u32 state[16];
+ unsigned long init_time;
+ spinlock_t lock;
};
static struct crng_state primary_crng = {
@@ -476,10 +469,10 @@ static bool crng_need_final_init = false;
#define crng_ready() (likely(crng_init > 1))
static int crng_init_cnt = 0;
static unsigned long crng_global_init_time = 0;
-#define CRNG_INIT_CNT_THRESH (2*CHACHA_KEY_SIZE)
-static void _extract_crng(struct crng_state *crng, __u8 out[CHACHA_BLOCK_SIZE]);
+#define CRNG_INIT_CNT_THRESH (2 * CHACHA_KEY_SIZE)
+static void _extract_crng(struct crng_state *crng, u8 out[CHACHA_BLOCK_SIZE]);
static void _crng_backtrack_protect(struct crng_state *crng,
- __u8 tmp[CHACHA_BLOCK_SIZE], int used);
+ u8 tmp[CHACHA_BLOCK_SIZE], int used);
static void process_random_ready_list(void);
static void _get_random_bytes(void *buf, int nbytes);
@@ -500,38 +493,23 @@ MODULE_PARM_DESC(ratelimit_disable, "Disable random ratelimit suppression");
*
**********************************************************************/
-struct entropy_store;
-struct entropy_store {
- /* read-only data: */
- const struct poolinfo *poolinfo;
- __u32 *pool;
- const char *name;
+static u32 input_pool_data[POOL_WORDS] __latent_entropy;
- /* read-write data: */
+static struct {
spinlock_t lock;
- unsigned short add_ptr;
- unsigned short input_rotate;
+ u16 add_ptr;
+ u16 input_rotate;
int entropy_count;
- unsigned int last_data_init:1;
- __u8 last_data[EXTRACT_SIZE];
+} input_pool = {
+ .lock = __SPIN_LOCK_UNLOCKED(input_pool.lock),
};
-static ssize_t extract_entropy(struct entropy_store *r, void *buf,
- size_t nbytes, int min, int rsvd);
-static ssize_t _extract_entropy(struct entropy_store *r, void *buf,
- size_t nbytes, int fips);
-
-static void crng_reseed(struct crng_state *crng, struct entropy_store *r);
-static __u32 input_pool_data[INPUT_POOL_WORDS] __latent_entropy;
+static ssize_t extract_entropy(void *buf, size_t nbytes, int min);
+static ssize_t _extract_entropy(void *buf, size_t nbytes);
-static struct entropy_store input_pool = {
- .poolinfo = &poolinfo_table[0],
- .name = "input",
- .lock = __SPIN_LOCK_UNLOCKED(input_pool.lock),
- .pool = input_pool_data
-};
+static void crng_reseed(struct crng_state *crng, bool use_input_pool);
-static __u32 const twist_table[8] = {
+static const u32 twist_table[8] = {
0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 };
@@ -545,39 +523,31 @@ static __u32 const twist_table[8] = {
* it's cheap to do so and helps slightly in the expected case where
* the entropy is concentrated in the low-order bits.
*/
-static void _mix_pool_bytes(struct entropy_store *r, const void *in,
- int nbytes)
+static void _mix_pool_bytes(const void *in, int nbytes)
{
- unsigned long i, tap1, tap2, tap3, tap4, tap5;
+ unsigned long i;
int input_rotate;
- int wordmask = r->poolinfo->poolwords - 1;
- const unsigned char *bytes = in;
- __u32 w;
+ const u8 *bytes = in;
+ u32 w;
- tap1 = r->poolinfo->tap1;
- tap2 = r->poolinfo->tap2;
- tap3 = r->poolinfo->tap3;
- tap4 = r->poolinfo->tap4;
- tap5 = r->poolinfo->tap5;
-
- input_rotate = r->input_rotate;
- i = r->add_ptr;
+ input_rotate = input_pool.input_rotate;
+ i = input_pool.add_ptr;
/* mix one byte at a time to simplify size handling and churn faster */
while (nbytes--) {
w = rol32(*bytes++, input_rotate);
- i = (i - 1) & wordmask;
+ i = (i - 1) & POOL_WORDMASK;
/* XOR in the various taps */
- w ^= r->pool[i];
- w ^= r->pool[(i + tap1) & wordmask];
- w ^= r->pool[(i + tap2) & wordmask];
- w ^= r->pool[(i + tap3) & wordmask];
- w ^= r->pool[(i + tap4) & wordmask];
- w ^= r->pool[(i + tap5) & wordmask];
+ w ^= input_pool_data[i];
+ w ^= input_pool_data[(i + POOL_TAP1) & POOL_WORDMASK];
+ w ^= input_pool_data[(i + POOL_TAP2) & POOL_WORDMASK];
+ w ^= input_pool_data[(i + POOL_TAP3) & POOL_WORDMASK];
+ w ^= input_pool_data[(i + POOL_TAP4) & POOL_WORDMASK];
+ w ^= input_pool_data[(i + POOL_TAP5) & POOL_WORDMASK];
/* Mix the result back in with a twist */
- r->pool[i] = (w >> 3) ^ twist_table[w & 7];
+ input_pool_data[i] = (w >> 3) ^ twist_table[w & 7];
/*
* Normally, we add 7 bits of rotation to the pool.
@@ -588,33 +558,31 @@ static void _mix_pool_bytes(struct entropy_store *r, const void *in,
input_rotate = (input_rotate + (i ? 7 : 14)) & 31;
}
- r->input_rotate = input_rotate;
- r->add_ptr = i;
+ input_pool.input_rotate = input_rotate;
+ input_pool.add_ptr = i;
}
-static void __mix_pool_bytes(struct entropy_store *r, const void *in,
- int nbytes)
+static void __mix_pool_bytes(const void *in, int nbytes)
{
- trace_mix_pool_bytes_nolock(r->name, nbytes, _RET_IP_);
- _mix_pool_bytes(r, in, nbytes);
+ trace_mix_pool_bytes_nolock(nbytes, _RET_IP_);
+ _mix_pool_bytes(in, nbytes);
}
-static void mix_pool_bytes(struct entropy_store *r, const void *in,
- int nbytes)
+static void mix_pool_bytes(const void *in, int nbytes)
{
unsigned long flags;
- trace_mix_pool_bytes(r->name, nbytes, _RET_IP_);
- spin_lock_irqsave(&r->lock, flags);
- _mix_pool_bytes(r, in, nbytes);
- spin_unlock_irqrestore(&r->lock, flags);
+ trace_mix_pool_bytes(nbytes, _RET_IP_);
+ spin_lock_irqsave(&input_pool.lock, flags);
+ _mix_pool_bytes(in, nbytes);
+ spin_unlock_irqrestore(&input_pool.lock, flags);
}
struct fast_pool {
- __u32 pool[4];
- unsigned long last;
- unsigned short reg_idx;
- unsigned char count;
+ u32 pool[4];
+ unsigned long last;
+ u16 reg_idx;
+ u8 count;
};
/*
@@ -624,8 +592,8 @@ struct fast_pool {
*/
static void fast_mix(struct fast_pool *f)
{
- __u32 a = f->pool[0], b = f->pool[1];
- __u32 c = f->pool[2], d = f->pool[3];
+ u32 a = f->pool[0], b = f->pool[1];
+ u32 c = f->pool[2], d = f->pool[3];
a += b; c += d;
b = rol32(b, 6); d = rol32(d, 27);
@@ -669,17 +637,19 @@ static void process_random_ready_list(void)
* Use credit_entropy_bits_safe() if the value comes from userspace
* or otherwise should be checked for extreme values.
*/
-static void credit_entropy_bits(struct entropy_store *r, int nbits)
+static void credit_entropy_bits(int nbits)
{
- int entropy_count, orig;
- const int pool_size = r->poolinfo->poolfracbits;
- int nfrac = nbits << ENTROPY_SHIFT;
+ int entropy_count, entropy_bits, orig;
+ int nfrac = nbits << POOL_ENTROPY_SHIFT;
+
+ /* Ensure that the multiplication can avoid being 64 bits wide. */
+ BUILD_BUG_ON(2 * (POOL_ENTROPY_SHIFT + POOL_BITSHIFT) > 31);
if (!nbits)
return;
retry:
- entropy_count = orig = READ_ONCE(r->entropy_count);
+ entropy_count = orig = READ_ONCE(input_pool.entropy_count);
if (nfrac < 0) {
/* Debit */
entropy_count += nfrac;
@@ -706,50 +676,43 @@ retry:
* turns no matter how large nbits is.
*/
int pnfrac = nfrac;
- const int s = r->poolinfo->poolbitshift + ENTROPY_SHIFT + 2;
+ const int s = POOL_BITSHIFT + POOL_ENTROPY_SHIFT + 2;
/* The +2 corresponds to the /4 in the denominator */
do {
- unsigned int anfrac = min(pnfrac, pool_size/2);
+ unsigned int anfrac = min(pnfrac, POOL_FRACBITS / 2);
unsigned int add =
- ((pool_size - entropy_count)*anfrac*3) >> s;
+ ((POOL_FRACBITS - entropy_count) * anfrac * 3) >> s;
entropy_count += add;
pnfrac -= anfrac;
- } while (unlikely(entropy_count < pool_size-2 && pnfrac));
+ } while (unlikely(entropy_count < POOL_FRACBITS - 2 && pnfrac));
}
if (WARN_ON(entropy_count < 0)) {
- pr_warn("negative entropy/overflow: pool %s count %d\n",
- r->name, entropy_count);
+ pr_warn("negative entropy/overflow: count %d\n", entropy_count);
entropy_count = 0;
- } else if (entropy_count > pool_size)
- entropy_count = pool_size;
- if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
+ } else if (entropy_count > POOL_FRACBITS)
+ entropy_count = POOL_FRACBITS;
+ if (cmpxchg(&input_pool.entropy_count, orig, entropy_count) != orig)
goto retry;
- trace_credit_entropy_bits(r->name, nbits,
- entropy_count >> ENTROPY_SHIFT, _RET_IP_);
-
- if (r == &input_pool) {
- int entropy_bits = entropy_count >> ENTROPY_SHIFT;
+ trace_credit_entropy_bits(nbits, entropy_count >> POOL_ENTROPY_SHIFT, _RET_IP_);
- if (crng_init < 2 && entropy_bits >= 128)
- crng_reseed(&primary_crng, r);
- }
+ entropy_bits = entropy_count >> POOL_ENTROPY_SHIFT;
+ if (crng_init < 2 && entropy_bits >= 128)
+ crng_reseed(&primary_crng, true);
}
-static int credit_entropy_bits_safe(struct entropy_store *r, int nbits)
+static int credit_entropy_bits_safe(int nbits)
{
- const int nbits_max = r->poolinfo->poolwords * 32;
-
if (nbits < 0)
return -EINVAL;
/* Cap the value to avoid overflows */
- nbits = min(nbits, nbits_max);
+ nbits = min(nbits, POOL_BITS);
- credit_entropy_bits(r, nbits);
+ credit_entropy_bits(nbits);
return 0;
}
@@ -759,7 +722,7 @@ static int credit_entropy_bits_safe(struct entropy_store *r, int nbits)
*
*********************************************************************/
-#define CRNG_RESEED_INTERVAL (300*HZ)
+#define CRNG_RESEED_INTERVAL (300 * HZ)
static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait);
@@ -783,9 +746,9 @@ early_param("random.trust_cpu", parse_trust_cpu);
static bool crng_init_try_arch(struct crng_state *crng)
{
- int i;
- bool arch_init = true;
- unsigned long rv;
+ int i;
+ bool arch_init = true;
+ unsigned long rv;
for (i = 4; i < 16; i++) {
if (!arch_get_random_seed_long(&rv) &&
@@ -799,11 +762,11 @@ static bool crng_init_try_arch(struct crng_state *crng)
return arch_init;
}
-static bool __init crng_init_try_arch_early(struct crng_state *crng)
+static bool __init crng_init_try_arch_early(void)
{
- int i;
- bool arch_init = true;
- unsigned long rv;
+ int i;
+ bool arch_init = true;
+ unsigned long rv;
for (i = 4; i < 16; i++) {
if (!arch_get_random_seed_long_early(&rv) &&
@@ -811,7 +774,7 @@ static bool __init crng_init_try_arch_early(struct crng_state *crng)
rv = random_get_entropy();
arch_init = false;
}
- crng->state[i] ^= rv;
+ primary_crng.state[i] ^= rv;
}
return arch_init;
@@ -820,27 +783,25 @@ static bool __init crng_init_try_arch_early(struct crng_state *crng)
static void crng_initialize_secondary(struct crng_state *crng)
{
chacha_init_consts(crng->state);
- _get_random_bytes(&crng->state[4], sizeof(__u32) * 12);
+ _get_random_bytes(&crng->state[4], sizeof(u32) * 12);
crng_init_try_arch(crng);
crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
}
-static void __init crng_initialize_primary(struct crng_state *crng)
+static void __init crng_initialize_primary(void)
{
- _extract_entropy(&input_pool, &crng->state[4], sizeof(__u32) * 12, 0);
- if (crng_init_try_arch_early(crng) && trust_cpu && crng_init < 2) {
+ _extract_entropy(&primary_crng.state[4], sizeof(u32) * 12);
+ if (crng_init_try_arch_early() && trust_cpu && crng_init < 2) {
invalidate_batched_entropy();
numa_crng_init();
crng_init = 2;
pr_notice("crng init done (trusting CPU's manufacturer)\n");
}
- crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
+ primary_crng.init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
}
-static void crng_finalize_init(struct crng_state *crng)
+static void crng_finalize_init(void)
{
- if (crng != &primary_crng || crng_init >= 2)
- return;
if (!system_wq) {
/* We can't call numa_crng_init until we have workqueues,
* so mark this for processing later. */
@@ -851,6 +812,7 @@ static void crng_finalize_init(struct crng_state *crng)
invalidate_batched_entropy();
numa_crng_init();
crng_init = 2;
+ crng_need_final_init = false;
process_random_ready_list();
wake_up_interruptible(&crng_init_wait);
kill_fasync(&fasync, SIGIO, POLL_IN);
@@ -873,7 +835,7 @@ static void do_numa_crng_init(struct work_struct *work)
struct crng_state *crng;
struct crng_state **pool;
- pool = kcalloc(nr_node_ids, sizeof(*pool), GFP_KERNEL|__GFP_NOFAIL);
+ pool = kcalloc(nr_node_ids, sizeof(*pool), GFP_KERNEL | __GFP_NOFAIL);
for_each_online_node(i) {
crng = kmalloc_node(sizeof(struct crng_state),
GFP_KERNEL | __GFP_NOFAIL, i);
@@ -917,10 +879,10 @@ static struct crng_state *select_crng(void)
* path. So we can't afford to dilly-dally. Returns the number of
* bytes processed from cp.
*/
-static size_t crng_fast_load(const char *cp, size_t len)
+static size_t crng_fast_load(const u8 *cp, size_t len)
{
unsigned long flags;
- char *p;
+ u8 *p;
size_t ret = 0;
if (!spin_trylock_irqsave(&primary_crng.lock, flags))
@@ -929,7 +891,7 @@ static size_t crng_fast_load(const char *cp, size_t len)
spin_unlock_irqrestore(&primary_crng.lock, flags);
return 0;
}
- p = (unsigned char *) &primary_crng.state[4];
+ p = (u8 *)&primary_crng.state[4];
while (len > 0 && crng_init_cnt < CRNG_INIT_CNT_THRESH) {
p[crng_init_cnt % CHACHA_KEY_SIZE] ^= *cp;
cp++; crng_init_cnt++; len--; ret++;
@@ -957,14 +919,14 @@ static size_t crng_fast_load(const char *cp, size_t len)
* like a fixed DMI table (for example), which might very well be
* unique to the machine, but is otherwise unvarying.
*/
-static int crng_slow_load(const char *cp, size_t len)
+static int crng_slow_load(const u8 *cp, size_t len)
{
- unsigned long flags;
- static unsigned char lfsr = 1;
- unsigned char tmp;
- unsigned i, max = CHACHA_KEY_SIZE;
- const char * src_buf = cp;
- char * dest_buf = (char *) &primary_crng.state[4];
+ unsigned long flags;
+ static u8 lfsr = 1;
+ u8 tmp;
+ unsigned int i, max = CHACHA_KEY_SIZE;
+ const u8 *src_buf = cp;
+ u8 *dest_buf = (u8 *)&primary_crng.state[4];
if (!spin_trylock_irqsave(&primary_crng.lock, flags))
return 0;
@@ -975,7 +937,7 @@ static int crng_slow_load(const char *cp, size_t len)
if (len > max)
max = len;
- for (i = 0; i < max ; i++) {
+ for (i = 0; i < max; i++) {
tmp = lfsr;
lfsr >>= 1;
if (tmp & 1)
@@ -988,17 +950,17 @@ static int crng_slow_load(const char *cp, size_t len)
return 1;
}
-static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
+static void crng_reseed(struct crng_state *crng, bool use_input_pool)
{
- unsigned long flags;
- int i, num;
+ unsigned long flags;
+ int i, num;
union {
- __u8 block[CHACHA_BLOCK_SIZE];
- __u32 key[8];
+ u8 block[CHACHA_BLOCK_SIZE];
+ u32 key[8];
} buf;
- if (r) {
- num = extract_entropy(r, &buf, 32, 16, 0);
+ if (use_input_pool) {
+ num = extract_entropy(&buf, 32, 16);
if (num == 0)
return;
} else {
@@ -1008,20 +970,20 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
}
spin_lock_irqsave(&crng->lock, flags);
for (i = 0; i < 8; i++) {
- unsigned long rv;
+ unsigned long rv;
if (!arch_get_random_seed_long(&rv) &&
!arch_get_random_long(&rv))
rv = random_get_entropy();
- crng->state[i+4] ^= buf.key[i] ^ rv;
+ crng->state[i + 4] ^= buf.key[i] ^ rv;
}
memzero_explicit(&buf, sizeof(buf));
WRITE_ONCE(crng->init_time, jiffies);
spin_unlock_irqrestore(&crng->lock, flags);
- crng_finalize_init(crng);
+ if (crng == &primary_crng && crng_init < 2)
+ crng_finalize_init();
}
-static void _extract_crng(struct crng_state *crng,
- __u8 out[CHACHA_BLOCK_SIZE])
+static void _extract_crng(struct crng_state *crng, u8 out[CHACHA_BLOCK_SIZE])
{
unsigned long flags, init_time;
@@ -1029,8 +991,7 @@ static void _extract_crng(struct crng_state *crng,
init_time = READ_ONCE(crng->init_time);
if (time_after(READ_ONCE(crng_global_init_time), init_time) ||
time_after(jiffies, init_time + CRNG_RESEED_INTERVAL))
- crng_reseed(crng, crng == &primary_crng ?
- &input_pool : NULL);
+ crng_reseed(crng, crng == &primary_crng);
}
spin_lock_irqsave(&crng->lock, flags);
chacha20_block(&crng->state[0], out);
@@ -1039,7 +1000,7 @@ static void _extract_crng(struct crng_state *crng,
spin_unlock_irqrestore(&crng->lock, flags);
}
-static void extract_crng(__u8 out[CHACHA_BLOCK_SIZE])
+static void extract_crng(u8 out[CHACHA_BLOCK_SIZE])
{
_extract_crng(select_crng(), out);
}
@@ -1049,26 +1010,26 @@ static void extract_crng(__u8 out[CHACHA_BLOCK_SIZE])
* enough) to mutate the CRNG key to provide backtracking protection.
*/
static void _crng_backtrack_protect(struct crng_state *crng,
- __u8 tmp[CHACHA_BLOCK_SIZE], int used)
+ u8 tmp[CHACHA_BLOCK_SIZE], int used)
{
- unsigned long flags;
- __u32 *s, *d;
- int i;
+ unsigned long flags;
+ u32 *s, *d;
+ int i;
- used = round_up(used, sizeof(__u32));
+ used = round_up(used, sizeof(u32));
if (used + CHACHA_KEY_SIZE > CHACHA_BLOCK_SIZE) {
extract_crng(tmp);
used = 0;
}
spin_lock_irqsave(&crng->lock, flags);
- s = (__u32 *) &tmp[used];
+ s = (u32 *)&tmp[used];
d = &crng->state[4];
- for (i=0; i < 8; i++)
+ for (i = 0; i < 8; i++)
*d++ ^= *s++;
spin_unlock_irqrestore(&crng->lock, flags);
}
-static void crng_backtrack_protect(__u8 tmp[CHACHA_BLOCK_SIZE], int used)
+static void crng_backtrack_protect(u8 tmp[CHACHA_BLOCK_SIZE], int used)
{
_crng_backtrack_protect(select_crng(), tmp, used);
}
@@ -1076,7 +1037,7 @@ static void crng_backtrack_protect(__u8 tmp[CHACHA_BLOCK_SIZE], int used)
static ssize_t extract_crng_user(void __user *buf, size_t nbytes)
{
ssize_t ret = 0, i = CHACHA_BLOCK_SIZE;
- __u8 tmp[CHACHA_BLOCK_SIZE] __aligned(4);
+ u8 tmp[CHACHA_BLOCK_SIZE] __aligned(4);
int large_request = (nbytes > 256);
while (nbytes) {
@@ -1108,7 +1069,6 @@ static ssize_t extract_crng_user(void __user *buf, size_t nbytes)
return ret;
}
-
/*********************************************************************
*
* Entropy input management
@@ -1141,8 +1101,8 @@ void add_device_randomness(const void *buf, unsigned int size)
trace_add_device_randomness(size, _RET_IP_);
spin_lock_irqsave(&input_pool.lock, flags);
- _mix_pool_bytes(&input_pool, buf, size);
- _mix_pool_bytes(&input_pool, &time, sizeof(time));
+ _mix_pool_bytes(buf, size);
+ _mix_pool_bytes(&time, sizeof(time));
spin_unlock_irqrestore(&input_pool.lock, flags);
}
EXPORT_SYMBOL(add_device_randomness);
@@ -1161,19 +1121,17 @@ static struct timer_rand_state input_timer_state = INIT_TIMER_RAND_STATE;
*/
static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
{
- struct entropy_store *r;
struct {
long jiffies;
- unsigned cycles;
- unsigned num;
+ unsigned int cycles;
+ unsigned int num;
} sample;
long delta, delta2, delta3;
sample.jiffies = jiffies;
sample.cycles = random_get_entropy();
sample.num = num;
- r = &input_pool;
- mix_pool_bytes(r, &sample, sizeof(sample));
+ mix_pool_bytes(&sample, sizeof(sample));
/*
* Calculate number of bits of randomness we probably added.
@@ -1205,11 +1163,11 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
* Round down by 1 bit on general principles,
* and limit entropy estimate to 12 bits.
*/
- credit_entropy_bits(r, min_t(int, fls(delta>>1), 11));
+ credit_entropy_bits(min_t(int, fls(delta >> 1), 11));
}
void add_input_randomness(unsigned int type, unsigned int code,
- unsigned int value)
+ unsigned int value)
{
static unsigned char last_value;
@@ -1220,7 +1178,7 @@ void add_input_randomness(unsigned int type, unsigned int code,
last_value = value;
add_timer_randomness(&input_timer_state,
(type << 4) ^ code ^ (code >> 4) ^ value);
- trace_add_input_randomness(ENTROPY_BITS(&input_pool));
+ trace_add_input_randomness(POOL_ENTROPY_BITS());
}
EXPORT_SYMBOL_GPL(add_input_randomness);
@@ -1229,33 +1187,33 @@ static DEFINE_PER_CPU(struct fast_pool, irq_randomness);
#ifdef ADD_INTERRUPT_BENCH
static unsigned long avg_cycles, avg_deviation;
-#define AVG_SHIFT 8 /* Exponential average factor k=1/256 */
-#define FIXED_1_2 (1 << (AVG_SHIFT-1))
+#define AVG_SHIFT 8 /* Exponential average factor k=1/256 */
+#define FIXED_1_2 (1 << (AVG_SHIFT - 1))
static void add_interrupt_bench(cycles_t start)
{
- long delta = random_get_entropy() - start;
+ long delta = random_get_entropy() - start;
- /* Use a weighted moving average */
- delta = delta - ((avg_cycles + FIXED_1_2) >> AVG_SHIFT);
- avg_cycles += delta;
- /* And average deviation */
- delta = abs(delta) - ((avg_deviation + FIXED_1_2) >> AVG_SHIFT);
- avg_deviation += delta;
+ /* Use a weighted moving average */
+ delta = delta - ((avg_cycles + FIXED_1_2) >> AVG_SHIFT);
+ avg_cycles += delta;
+ /* And average deviation */
+ delta = abs(delta) - ((avg_deviation + FIXED_1_2) >> AVG_SHIFT);
+ avg_deviation += delta;
}
#else
#define add_interrupt_bench(x)
#endif
-static __u32 get_reg(struct fast_pool *f, struct pt_regs *regs)
+static u32 get_reg(struct fast_pool *f, struct pt_regs *regs)
{
- __u32 *ptr = (__u32 *) regs;
+ u32 *ptr = (u32 *)regs;
unsigned int idx;
if (regs == NULL)
return 0;
idx = READ_ONCE(f->reg_idx);
- if (idx >= sizeof(struct pt_regs) / sizeof(__u32))
+ if (idx >= sizeof(struct pt_regs) / sizeof(u32))
idx = 0;
ptr += idx++;
WRITE_ONCE(f->reg_idx, idx);
@@ -1264,13 +1222,12 @@ static __u32 get_reg(struct fast_pool *f, struct pt_regs *regs)
void add_interrupt_randomness(int irq)
{
- struct entropy_store *r;
- struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness);
- struct pt_regs *regs = get_irq_regs();
- unsigned long now = jiffies;
- cycles_t cycles = random_get_entropy();
- __u32 c_high, j_high;
- __u64 ip;
+ struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness);
+ struct pt_regs *regs = get_irq_regs();
+ unsigned long now = jiffies;
+ cycles_t cycles = random_get_entropy();
+ u32 c_high, j_high;
+ u64 ip;
if (cycles == 0)
cycles = get_reg(fast_pool, regs);
@@ -1280,38 +1237,35 @@ void add_interrupt_randomness(int irq)
fast_pool->pool[1] ^= now ^ c_high;
ip = regs ? instruction_pointer(regs) : _RET_IP_;
fast_pool->pool[2] ^= ip;
- fast_pool->pool[3] ^= (sizeof(ip) > 4) ? ip >> 32 :
- get_reg(fast_pool, regs);
+ fast_pool->pool[3] ^=
+ (sizeof(ip) > 4) ? ip >> 32 : get_reg(fast_pool, regs);
fast_mix(fast_pool);
add_interrupt_bench(cycles);
if (unlikely(crng_init == 0)) {
if ((fast_pool->count >= 64) &&
- crng_fast_load((char *) fast_pool->pool,
- sizeof(fast_pool->pool)) > 0) {
+ crng_fast_load((u8 *)fast_pool->pool, sizeof(fast_pool->pool)) > 0) {
fast_pool->count = 0;
fast_pool->last = now;
}
return;
}
- if ((fast_pool->count < 64) &&
- !time_after(now, fast_pool->last + HZ))
+ if ((fast_pool->count < 64) && !time_after(now, fast_pool->last + HZ))
return;
- r = &input_pool;
- if (!spin_trylock(&r->lock))
+ if (!spin_trylock(&input_pool.lock))
return;
fast_pool->last = now;
- __mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool));
- spin_unlock(&r->lock);
+ __mix_pool_bytes(&fast_pool->pool, sizeof(fast_pool->pool));
+ spin_unlock(&input_pool.lock);
fast_pool->count = 0;
/* award one bit for the contents of the fast pool */
- credit_entropy_bits(r, 1);
+ credit_entropy_bits(1);
}
EXPORT_SYMBOL_GPL(add_interrupt_randomness);
@@ -1322,7 +1276,7 @@ void add_disk_randomness(struct gendisk *disk)
return;
/* first major is 1, so we get >= 0x200 here */
add_timer_randomness(disk->random, 0x100 + disk_devt(disk));
- trace_add_disk_randomness(disk_devt(disk), ENTROPY_BITS(&input_pool));
+ trace_add_disk_randomness(disk_devt(disk), POOL_ENTROPY_BITS());
}
EXPORT_SYMBOL_GPL(add_disk_randomness);
#endif
@@ -1337,43 +1291,36 @@ EXPORT_SYMBOL_GPL(add_disk_randomness);
* This function decides how many bytes to actually take from the
* given pool, and also debits the entropy count accordingly.
*/
-static size_t account(struct entropy_store *r, size_t nbytes, int min,
- int reserved)
+static size_t account(size_t nbytes, int min)
{
- int entropy_count, orig, have_bytes;
+ int entropy_count, orig;
size_t ibytes, nfrac;
- BUG_ON(r->entropy_count > r->poolinfo->poolfracbits);
+ BUG_ON(input_pool.entropy_count > POOL_FRACBITS);
/* Can we pull enough? */
retry:
- entropy_count = orig = READ_ONCE(r->entropy_count);
- ibytes = nbytes;
- /* never pull more than available */
- have_bytes = entropy_count >> (ENTROPY_SHIFT + 3);
-
- if ((have_bytes -= reserved) < 0)
- have_bytes = 0;
- ibytes = min_t(size_t, ibytes, have_bytes);
- if (ibytes < min)
- ibytes = 0;
-
+ entropy_count = orig = READ_ONCE(input_pool.entropy_count);
if (WARN_ON(entropy_count < 0)) {
- pr_warn("negative entropy count: pool %s count %d\n",
- r->name, entropy_count);
+ pr_warn("negative entropy count: count %d\n", entropy_count);
entropy_count = 0;
}
- nfrac = ibytes << (ENTROPY_SHIFT + 3);
- if ((size_t) entropy_count > nfrac)
+
+ /* never pull more than available */
+ ibytes = min_t(size_t, nbytes, entropy_count >> (POOL_ENTROPY_SHIFT + 3));
+ if (ibytes < min)
+ ibytes = 0;
+ nfrac = ibytes << (POOL_ENTROPY_SHIFT + 3);
+ if ((size_t)entropy_count > nfrac)
entropy_count -= nfrac;
else
entropy_count = 0;
- if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
+ if (cmpxchg(&input_pool.entropy_count, orig, entropy_count) != orig)
goto retry;
- trace_debit_entropy(r->name, 8 * ibytes);
- if (ibytes && ENTROPY_BITS(r) < random_write_wakeup_bits) {
+ trace_debit_entropy(8 * ibytes);
+ if (ibytes && POOL_ENTROPY_BITS() < random_write_wakeup_bits) {
wake_up_interruptible(&random_write_wait);
kill_fasync(&fasync, SIGIO, POLL_OUT);
}
@@ -1386,7 +1333,7 @@ retry:
*
* Note: we assume that .poolwords is a multiple of 16 words.
*/
-static void extract_buf(struct entropy_store *r, __u8 *out)
+static void extract_buf(u8 *out)
{
struct blake2s_state state __aligned(__alignof__(unsigned long));
u8 hash[BLAKE2S_HASH_SIZE];
@@ -1408,9 +1355,8 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
}
/* Generate a hash across the pool */
- spin_lock_irqsave(&r->lock, flags);
- blake2s_update(&state, (const u8 *)r->pool,
- r->poolinfo->poolwords * sizeof(*r->pool));
+ spin_lock_irqsave(&input_pool.lock, flags);
+ blake2s_update(&state, (const u8 *)input_pool_data, POOL_BYTES);
blake2s_final(&state, hash); /* final zeros out state */
/*
@@ -1422,8 +1368,8 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
* brute-forcing the feedback as hard as brute-forcing the
* hash.
*/
- __mix_pool_bytes(r, hash, sizeof(hash));
- spin_unlock_irqrestore(&r->lock, flags);
+ __mix_pool_bytes(hash, sizeof(hash));
+ spin_unlock_irqrestore(&input_pool.lock, flags);
/* Note that EXTRACT_SIZE is half of hash size here, because above
* we've dumped the full length back into mixer. By reducing the
@@ -1433,23 +1379,13 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
memzero_explicit(hash, sizeof(hash));
}
-static ssize_t _extract_entropy(struct entropy_store *r, void *buf,
- size_t nbytes, int fips)
+static ssize_t _extract_entropy(void *buf, size_t nbytes)
{
ssize_t ret = 0, i;
- __u8 tmp[EXTRACT_SIZE];
- unsigned long flags;
+ u8 tmp[EXTRACT_SIZE];
while (nbytes) {
- extract_buf(r, tmp);
-
- if (fips) {
- spin_lock_irqsave(&r->lock, flags);
- if (!memcmp(tmp, r->last_data, EXTRACT_SIZE))
- panic("Hardware RNG duplicated output!\n");
- memcpy(r->last_data, tmp, EXTRACT_SIZE);
- spin_unlock_irqrestore(&r->lock, flags);
- }
+ extract_buf(tmp);
i = min_t(int, nbytes, EXTRACT_SIZE);
memcpy(buf, tmp, i);
nbytes -= i;
@@ -1468,42 +1404,19 @@ static ssize_t _extract_entropy(struct entropy_store *r, void *buf,
* returns it in a buffer.
*
* The min parameter specifies the minimum amount we can pull before
- * failing to avoid races that defeat catastrophic reseeding while the
- * reserved parameter indicates how much entropy we must leave in the
- * pool after each pull to avoid starving other readers.
+ * failing to avoid races that defeat catastrophic reseeding.
*/
-static ssize_t extract_entropy(struct entropy_store *r, void *buf,
- size_t nbytes, int min, int reserved)
+static ssize_t extract_entropy(void *buf, size_t nbytes, int min)
{
- __u8 tmp[EXTRACT_SIZE];
- unsigned long flags;
-
- /* if last_data isn't primed, we need EXTRACT_SIZE extra bytes */
- if (fips_enabled) {
- spin_lock_irqsave(&r->lock, flags);
- if (!r->last_data_init) {
- r->last_data_init = 1;
- spin_unlock_irqrestore(&r->lock, flags);
- trace_extract_entropy(r->name, EXTRACT_SIZE,
- ENTROPY_BITS(r), _RET_IP_);
- extract_buf(r, tmp);
- spin_lock_irqsave(&r->lock, flags);
- memcpy(r->last_data, tmp, EXTRACT_SIZE);
- }
- spin_unlock_irqrestore(&r->lock, flags);
- }
-
- trace_extract_entropy(r->name, nbytes, ENTROPY_BITS(r), _RET_IP_);
- nbytes = account(r, nbytes, min, reserved);
-
- return _extract_entropy(r, buf, nbytes, fips_enabled);
+ trace_extract_entropy(nbytes, POOL_ENTROPY_BITS(), _RET_IP_);
+ nbytes = account(nbytes, min);
+ return _extract_entropy(buf, nbytes);
}
#define warn_unseeded_randomness(previous) \
- _warn_unseeded_randomness(__func__, (void *) _RET_IP_, (previous))
+ _warn_unseeded_randomness(__func__, (void *)_RET_IP_, (previous))
-static void _warn_unseeded_randomness(const char *func_name, void *caller,
- void **previous)
+static void _warn_unseeded_randomness(const char *func_name, void *caller, void **previous)
{
#ifdef CONFIG_WARN_ALL_UNSEEDED_RANDOM
const bool print_once = false;
@@ -1511,8 +1424,7 @@ static void _warn_unseeded_randomness(const char *func_name, void *caller,
static bool print_once __read_mostly;
#endif
- if (print_once ||
- crng_ready() ||
+ if (print_once || crng_ready() ||
(previous && (caller == READ_ONCE(*previous))))
return;
WRITE_ONCE(*previous, caller);
@@ -1520,9 +1432,8 @@ static void _warn_unseeded_randomness(const char *func_name, void *caller,
print_once = true;
#endif
if (__ratelimit(&unseeded_warning))
- printk_deferred(KERN_NOTICE "random: %s called from %pS "
- "with crng_init=%d\n", func_name, caller,
- crng_init);
+ printk_deferred(KERN_NOTICE "random: %s called from %pS with crng_init=%d\n",
+ func_name, caller, crng_init);
}
/*
@@ -1537,7 +1448,7 @@ static void _warn_unseeded_randomness(const char *func_name, void *caller,
*/
static void _get_random_bytes(void *buf, int nbytes)
{
- __u8 tmp[CHACHA_BLOCK_SIZE] __aligned(4);
+ u8 tmp[CHACHA_BLOCK_SIZE] __aligned(4);
trace_get_random_bytes(nbytes, _RET_IP_);
@@ -1565,7 +1476,6 @@ void get_random_bytes(void *buf, int nbytes)
}
EXPORT_SYMBOL(get_random_bytes);
-
/*
* Each time the timer fires, we expect that we got an unpredictable
* jump in the cycle counter. Even if the timer is running on another
@@ -1581,7 +1491,7 @@ EXPORT_SYMBOL(get_random_bytes);
*/
static void entropy_timer(struct timer_list *t)
{
- credit_entropy_bits(&input_pool, 1);
+ credit_entropy_bits(1);
}
/*
@@ -1604,15 +1514,15 @@ static void try_to_generate_entropy(void)
timer_setup_on_stack(&stack.timer, entropy_timer, 0);
while (!crng_ready()) {
if (!timer_pending(&stack.timer))
- mod_timer(&stack.timer, jiffies+1);
- mix_pool_bytes(&input_pool, &stack.now, sizeof(stack.now));
+ mod_timer(&stack.timer, jiffies + 1);
+ mix_pool_bytes(&stack.now, sizeof(stack.now));
schedule();
stack.now = random_get_entropy();
}
del_timer_sync(&stack.timer);
destroy_timer_on_stack(&stack.timer);
- mix_pool_bytes(&input_pool, &stack.now, sizeof(stack.now));
+ mix_pool_bytes(&stack.now, sizeof(stack.now));
}
/*
@@ -1731,7 +1641,7 @@ EXPORT_SYMBOL(del_random_ready_callback);
int __must_check get_random_bytes_arch(void *buf, int nbytes)
{
int left = nbytes;
- char *p = buf;
+ u8 *p = buf;
trace_get_random_bytes_arch(left, _RET_IP_);
while (left) {
@@ -1753,26 +1663,24 @@ EXPORT_SYMBOL(get_random_bytes_arch);
/*
* init_std_data - initialize pool with system data
*
- * @r: pool to initialize
- *
* This function clears the pool's entropy count and mixes some system
* data into the pool to prepare it for use. The pool is not cleared
* as that can only decrease the entropy in the pool.
*/
-static void __init init_std_data(struct entropy_store *r)
+static void __init init_std_data(void)
{
int i;
ktime_t now = ktime_get_real();
unsigned long rv;
- mix_pool_bytes(r, &now, sizeof(now));
- for (i = r->poolinfo->poolbytes; i > 0; i -= sizeof(rv)) {
+ mix_pool_bytes(&now, sizeof(now));
+ for (i = POOL_BYTES; i > 0; i -= sizeof(rv)) {
if (!arch_get_random_seed_long(&rv) &&
!arch_get_random_long(&rv))
rv = random_get_entropy();
- mix_pool_bytes(r, &rv, sizeof(rv));
+ mix_pool_bytes(&rv, sizeof(rv));
}
- mix_pool_bytes(r, utsname(), sizeof(*(utsname())));
+ mix_pool_bytes(utsname(), sizeof(*(utsname())));
}
/*
@@ -1787,10 +1695,10 @@ static void __init init_std_data(struct entropy_store *r)
*/
int __init rand_initialize(void)
{
- init_std_data(&input_pool);
+ init_std_data();
if (crng_need_final_init)
- crng_finalize_init(&primary_crng);
- crng_initialize_primary(&primary_crng);
+ crng_finalize_init();
+ crng_initialize_primary();
crng_global_init_time = jiffies;
if (ratelimit_disable) {
urandom_warning.interval = 0;
@@ -1816,20 +1724,19 @@ void rand_initialize_disk(struct gendisk *disk)
}
#endif
-static ssize_t
-urandom_read_nowarn(struct file *file, char __user *buf, size_t nbytes,
- loff_t *ppos)
+static ssize_t urandom_read_nowarn(struct file *file, char __user *buf,
+ size_t nbytes, loff_t *ppos)
{
int ret;
- nbytes = min_t(size_t, nbytes, INT_MAX >> (ENTROPY_SHIFT + 3));
+ nbytes = min_t(size_t, nbytes, INT_MAX >> (POOL_ENTROPY_SHIFT + 3));
ret = extract_crng_user(buf, nbytes);
- trace_urandom_read(8 * nbytes, 0, ENTROPY_BITS(&input_pool));
+ trace_urandom_read(8 * nbytes, 0, POOL_ENTROPY_BITS());
return ret;
}
-static ssize_t
-urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
+static ssize_t urandom_read(struct file *file, char __user *buf, size_t nbytes,
+ loff_t *ppos)
{
static int maxwarn = 10;
@@ -1843,8 +1750,8 @@ urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
return urandom_read_nowarn(file, buf, nbytes, ppos);
}
-static ssize_t
-random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
+static ssize_t random_read(struct file *file, char __user *buf, size_t nbytes,
+ loff_t *ppos)
{
int ret;
@@ -1854,8 +1761,7 @@ random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
return urandom_read_nowarn(file, buf, nbytes, ppos);
}
-static __poll_t
-random_poll(struct file *file, poll_table * wait)
+static __poll_t random_poll(struct file *file, poll_table *wait)
{
__poll_t mask;
@@ -1864,16 +1770,15 @@ random_poll(struct file *file, poll_table * wait)
mask = 0;
if (crng_ready())
mask |= EPOLLIN | EPOLLRDNORM;
- if (ENTROPY_BITS(&input_pool) < random_write_wakeup_bits)
+ if (POOL_ENTROPY_BITS() < random_write_wakeup_bits)
mask |= EPOLLOUT | EPOLLWRNORM;
return mask;
}
-static int
-write_pool(struct entropy_store *r, const char __user *buffer, size_t count)
+static int write_pool(const char __user *buffer, size_t count)
{
size_t bytes;
- __u32 t, buf[16];
+ u32 t, buf[16];
const char __user *p = buffer;
while (count > 0) {
@@ -1883,7 +1788,7 @@ write_pool(struct entropy_store *r, const char __user *buffer, size_t count)
if (copy_from_user(&buf, p, bytes))
return -EFAULT;
- for (b = bytes ; b > 0 ; b -= sizeof(__u32), i++) {
+ for (b = bytes; b > 0; b -= sizeof(u32), i++) {
if (!arch_get_random_int(&t))
break;
buf[i] ^= t;
@@ -1892,7 +1797,7 @@ write_pool(struct entropy_store *r, const char __user *buffer, size_t count)
count -= bytes;
p += bytes;
- mix_pool_bytes(r, buf, bytes);
+ mix_pool_bytes(buf, bytes);
cond_resched();
}
@@ -1904,7 +1809,7 @@ static ssize_t random_write(struct file *file, const char __user *buffer,
{
size_t ret;
- ret = write_pool(&input_pool, buffer, count);
+ ret = write_pool(buffer, count);
if (ret)
return ret;
@@ -1920,7 +1825,7 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
switch (cmd) {
case RNDGETENTCNT:
/* inherently racy, no point locking */
- ent_count = ENTROPY_BITS(&input_pool);
+ ent_count = POOL_ENTROPY_BITS();
if (put_user(ent_count, p))
return -EFAULT;
return 0;
@@ -1929,7 +1834,7 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
return -EPERM;
if (get_user(ent_count, p))
return -EFAULT;
- return credit_entropy_bits_safe(&input_pool, ent_count);
+ return credit_entropy_bits_safe(ent_count);
case RNDADDENTROPY:
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
@@ -1939,11 +1844,10 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
return -EINVAL;
if (get_user(size, p++))
return -EFAULT;
- retval = write_pool(&input_pool, (const char __user *)p,
- size);
+ retval = write_pool((const char __user *)p, size);
if (retval < 0)
return retval;
- return credit_entropy_bits_safe(&input_pool, ent_count);
+ return credit_entropy_bits_safe(ent_count);
case RNDZAPENTCNT:
case RNDCLEARPOOL:
/*
@@ -1952,14 +1856,17 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
*/
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- input_pool.entropy_count = 0;
+ if (xchg(&input_pool.entropy_count, 0) && random_write_wakeup_bits) {
+ wake_up_interruptible(&random_write_wait);
+ kill_fasync(&fasync, SIGIO, POLL_OUT);
+ }
return 0;
case RNDRESEEDCRNG:
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
if (crng_init < 2)
return -ENODATA;
- crng_reseed(&primary_crng, &input_pool);
+ crng_reseed(&primary_crng, true);
WRITE_ONCE(crng_global_init_time, jiffies - 1);
return 0;
default:
@@ -1973,9 +1880,9 @@ static int random_fasync(int fd, struct file *filp, int on)
}
const struct file_operations random_fops = {
- .read = random_read,
+ .read = random_read,
.write = random_write,
- .poll = random_poll,
+ .poll = random_poll,
.unlocked_ioctl = random_ioctl,
.compat_ioctl = compat_ptr_ioctl,
.fasync = random_fasync,
@@ -1983,7 +1890,7 @@ const struct file_operations random_fops = {
};
const struct file_operations urandom_fops = {
- .read = urandom_read,
+ .read = urandom_read,
.write = random_write,
.unlocked_ioctl = random_ioctl,
.compat_ioctl = compat_ptr_ioctl,
@@ -1991,19 +1898,19 @@ const struct file_operations urandom_fops = {
.llseek = noop_llseek,
};
-SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count,
- unsigned int, flags)
+SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count, unsigned int,
+ flags)
{
int ret;
- if (flags & ~(GRND_NONBLOCK|GRND_RANDOM|GRND_INSECURE))
+ if (flags & ~(GRND_NONBLOCK | GRND_RANDOM | GRND_INSECURE))
return -EINVAL;
/*
* Requesting insecure and blocking randomness at the same time makes
* no sense.
*/
- if ((flags & (GRND_INSECURE|GRND_RANDOM)) == (GRND_INSECURE|GRND_RANDOM))
+ if ((flags & (GRND_INSECURE | GRND_RANDOM)) == (GRND_INSECURE | GRND_RANDOM))
return -EINVAL;
if (count > INT_MAX)
@@ -2030,7 +1937,7 @@ SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count,
#include <linux/sysctl.h>
static int min_write_thresh;
-static int max_write_thresh = INPUT_POOL_WORDS * 32;
+static int max_write_thresh = POOL_BITS;
static int random_min_urandom_seed = 60;
static char sysctl_bootid[16];
@@ -2043,8 +1950,8 @@ static char sysctl_bootid[16];
* returned as an ASCII string in the standard UUID format; if via the
* sysctl system call, as 16 bytes of binary data.
*/
-static int proc_do_uuid(struct ctl_table *table, int write,
- void *buffer, size_t *lenp, loff_t *ppos)
+static int proc_do_uuid(struct ctl_table *table, int write, void *buffer,
+ size_t *lenp, loff_t *ppos)
{
struct ctl_table fake_table;
unsigned char buf[64], tmp_uuid[16], *uuid;
@@ -2073,13 +1980,13 @@ static int proc_do_uuid(struct ctl_table *table, int write,
/*
* Return entropy available scaled to integral bits
*/
-static int proc_do_entropy(struct ctl_table *table, int write,
- void *buffer, size_t *lenp, loff_t *ppos)
+static int proc_do_entropy(struct ctl_table *table, int write, void *buffer,
+ size_t *lenp, loff_t *ppos)
{
struct ctl_table fake_table;
int entropy_count;
- entropy_count = *(int *)table->data >> ENTROPY_SHIFT;
+ entropy_count = *(int *)table->data >> POOL_ENTROPY_SHIFT;
fake_table.data = &entropy_count;
fake_table.maxlen = sizeof(entropy_count);
@@ -2087,9 +1994,8 @@ static int proc_do_entropy(struct ctl_table *table, int write,
return proc_dointvec(&fake_table, write, buffer, lenp, ppos);
}
-static int sysctl_poolsize = INPUT_POOL_WORDS * 32;
-extern struct ctl_table random_table[];
-struct ctl_table random_table[] = {
+static int sysctl_poolsize = POOL_BITS;
+static struct ctl_table random_table[] = {
{
.procname = "poolsize",
.data = &sysctl_poolsize,
@@ -2151,7 +2057,18 @@ struct ctl_table random_table[] = {
#endif
{ }
};
-#endif /* CONFIG_SYSCTL */
+
+/*
+ * rand_initialize() is called before sysctl_init(),
+ * so we cannot call register_sysctl_init() in rand_initialize()
+ */
+static int __init random_sysctls_init(void)
+{
+ register_sysctl_init("kernel/random", random_table);
+ return 0;
+}
+device_initcall(random_sysctls_init);
+#endif /* CONFIG_SYSCTL */
struct batched_entropy {
union {
@@ -2171,7 +2088,7 @@ struct batched_entropy {
* point prior.
*/
static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64) = {
- .batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u64.lock),
+ .batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u64.lock),
};
u64 get_random_u64(void)
@@ -2196,7 +2113,7 @@ u64 get_random_u64(void)
EXPORT_SYMBOL(get_random_u64);
static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32) = {
- .batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u32.lock),
+ .batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u32.lock),
};
u32 get_random_u32(void)
{
@@ -2228,7 +2145,7 @@ static void invalidate_batched_entropy(void)
int cpu;
unsigned long flags;
- for_each_possible_cpu (cpu) {
+ for_each_possible_cpu(cpu) {
struct batched_entropy *batched_entropy;
batched_entropy = per_cpu_ptr(&batched_entropy_u32, cpu);
@@ -2257,8 +2174,7 @@ static void invalidate_batched_entropy(void)
* Return: A page aligned address within [start, start + range). On error,
* @start is returned.
*/
-unsigned long
-randomize_page(unsigned long start, unsigned long range)
+unsigned long randomize_page(unsigned long start, unsigned long range)
{
if (!PAGE_ALIGNED(start)) {
range -= PAGE_ALIGN(start) - start;
@@ -2283,26 +2199,26 @@ randomize_page(unsigned long start, unsigned long range)
void add_hwgenerator_randomness(const char *buffer, size_t count,
size_t entropy)
{
- struct entropy_store *poolp = &input_pool;
-
if (unlikely(crng_init == 0)) {
size_t ret = crng_fast_load(buffer, count);
- mix_pool_bytes(poolp, buffer, ret);
+ mix_pool_bytes(buffer, ret);
count -= ret;
buffer += ret;
if (!count || crng_init == 0)
return;
}
- /* Suspend writing if we're above the trickle threshold.
+ /* Throttle writing if we're above the trickle threshold.
* We'll be woken up again once below random_write_wakeup_thresh,
- * or when the calling thread is about to terminate.
+ * when the calling thread is about to terminate, or once
+ * CRNG_RESEED_INTERVAL has lapsed.
*/
- wait_event_interruptible(random_write_wait,
+ wait_event_interruptible_timeout(random_write_wait,
!system_wq || kthread_should_stop() ||
- ENTROPY_BITS(&input_pool) <= random_write_wakeup_bits);
- mix_pool_bytes(poolp, buffer, count);
- credit_entropy_bits(poolp, entropy);
+ POOL_ENTROPY_BITS() <= random_write_wakeup_bits,
+ CRNG_RESEED_INTERVAL);
+ mix_pool_bytes(buffer, count);
+ credit_entropy_bits(entropy);
}
EXPORT_SYMBOL_GPL(add_hwgenerator_randomness);
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 660c5c388c29..2359889a35a0 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -1958,7 +1958,7 @@ static void virtcons_remove(struct virtio_device *vdev)
spin_unlock_irq(&pdrvdata_lock);
/* Disable interrupts for vqs */
- vdev->config->reset(vdev);
+ virtio_reset_device(vdev);
/* Finish up work that's lined up */
if (use_multiport(portdev))
cancel_work_sync(&portdev->control_work);
@@ -2148,7 +2148,7 @@ static int virtcons_freeze(struct virtio_device *vdev)
portdev = vdev->priv;
- vdev->config->reset(vdev);
+ virtio_reset_device(vdev);
if (use_multiport(portdev))
virtqueue_disable_cb(portdev->c_ivq);
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index c91931c94888..ad4256d54361 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -169,6 +169,14 @@ config COMMON_CLK_CDCE706
help
This driver supports TI CDCE706 programmable 3-PLL clock synthesizer.
+config COMMON_CLK_TPS68470
+ tristate "Clock Driver for TI TPS68470 PMIC"
+ depends on I2C
+ depends on INTEL_SKL_INT3472 || COMPILE_TEST
+ select REGMAP_I2C
+ help
+ This driver supports the clocks provided by the TPS68470 PMIC.
+
config COMMON_CLK_CDCE925
tristate "Clock driver for TI CDCE913/925/937/949 devices"
depends on I2C
@@ -221,6 +229,13 @@ config COMMON_CLK_GEMINI
This driver supports the SoC clocks on the Cortina Systems Gemini
platform, also known as SL3516 or CS3516.
+config COMMON_CLK_LAN966X
+ bool "Generic Clock Controller driver for LAN966X SoC"
+ help
+ This driver provides support for Generic Clock Controller(GCK) on
+ LAN966X SoC. GCK generates and supplies clock to various peripherals
+ within the SoC.
+
config COMMON_CLK_ASPEED
bool "Clock driver for Aspeed BMC SoCs"
depends on ARCH_ASPEED || COMPILE_TEST
@@ -339,16 +354,6 @@ config COMMON_CLK_STM32MP157
help
Support for stm32mp157 SoC family clocks
-config COMMON_CLK_STM32MP157_SCMI
- bool "stm32mp157 Clock driver with Trusted Firmware"
- depends on COMMON_CLK_STM32MP157
- select COMMON_CLK_SCMI
- select ARM_SCMI_PROTOCOL
- default y
- help
- Support for stm32mp157 SoC family clocks with Trusted Firmware using
- SCMI protocol.
-
config COMMON_CLK_STM32F
def_bool COMMON_CLK && (MACH_STM32F429 || MACH_STM32F469 || MACH_STM32F746)
help
@@ -418,6 +423,7 @@ source "drivers/clk/sunxi-ng/Kconfig"
source "drivers/clk/tegra/Kconfig"
source "drivers/clk/ti/Kconfig"
source "drivers/clk/uniphier/Kconfig"
+source "drivers/clk/visconti/Kconfig"
source "drivers/clk/x86/Kconfig"
source "drivers/clk/xilinx/Kconfig"
source "drivers/clk/zynqmp/Kconfig"
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index a9bb2478fbdd..16e588630472 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -37,6 +37,7 @@ obj-$(CONFIG_ARCH_HIGHBANK) += clk-highbank.o
obj-$(CONFIG_CLK_HSDK) += clk-hsdk-pll.o
obj-$(CONFIG_COMMON_CLK_K210) += clk-k210.o
obj-$(CONFIG_LMK04832) += clk-lmk04832.o
+obj-$(CONFIG_COMMON_CLK_LAN966X) += clk-lan966x.o
obj-$(CONFIG_COMMON_CLK_LOCHNAGAR) += clk-lochnagar.o
obj-$(CONFIG_COMMON_CLK_MAX77686) += clk-max77686.o
obj-$(CONFIG_COMMON_CLK_MAX9485) += clk-max9485.o
@@ -63,6 +64,7 @@ obj-$(CONFIG_COMMON_CLK_SI570) += clk-si570.o
obj-$(CONFIG_COMMON_CLK_STM32F) += clk-stm32f4.o
obj-$(CONFIG_COMMON_CLK_STM32H7) += clk-stm32h7.o
obj-$(CONFIG_COMMON_CLK_STM32MP157) += clk-stm32mp1.o
+obj-$(CONFIG_COMMON_CLK_TPS68470) += clk-tps68470.o
obj-$(CONFIG_CLK_TWL6040) += clk-twl6040.o
obj-$(CONFIG_ARCH_VT8500) += clk-vt8500.o
obj-$(CONFIG_COMMON_CLK_VC5) += clk-versaclock5.o
@@ -111,12 +113,13 @@ obj-y += sprd/
obj-$(CONFIG_ARCH_STI) += st/
obj-$(CONFIG_SOC_STARFIVE) += starfive/
obj-$(CONFIG_ARCH_SUNXI) += sunxi/
-obj-$(CONFIG_SUNXI_CCU) += sunxi-ng/
+obj-y += sunxi-ng/
obj-$(CONFIG_ARCH_TEGRA) += tegra/
obj-y += ti/
obj-$(CONFIG_CLK_UNIPHIER) += uniphier/
obj-$(CONFIG_ARCH_U8500) += ux500/
obj-y += versatile/
+obj-$(CONFIG_COMMON_CLK_VISCONTI) += visconti/
ifeq ($(CONFIG_COMMON_CLK), y)
obj-$(CONFIG_X86) += x86/
endif
diff --git a/drivers/clk/clk-bm1880.c b/drivers/clk/clk-bm1880.c
index e6d6599d310a..fad78a22218e 100644
--- a/drivers/clk/clk-bm1880.c
+++ b/drivers/clk/clk-bm1880.c
@@ -522,14 +522,6 @@ static struct clk_hw *bm1880_clk_register_pll(struct bm1880_pll_hw_clock *pll_cl
return hw;
}
-static void bm1880_clk_unregister_pll(struct clk_hw *hw)
-{
- struct bm1880_pll_hw_clock *pll_hw = to_bm1880_pll_clk(hw);
-
- clk_hw_unregister(hw);
- kfree(pll_hw);
-}
-
static int bm1880_clk_register_plls(struct bm1880_pll_hw_clock *clks,
int num_clks,
struct bm1880_clock_data *data)
@@ -555,7 +547,7 @@ static int bm1880_clk_register_plls(struct bm1880_pll_hw_clock *clks,
err_clk:
while (i--)
- bm1880_clk_unregister_pll(data->hw_data.hws[clks[i].pll.id]);
+ clk_hw_unregister(data->hw_data.hws[clks[i].pll.id]);
return PTR_ERR(hw);
}
@@ -695,14 +687,6 @@ static struct clk_hw *bm1880_clk_register_div(struct bm1880_div_hw_clock *div_cl
return hw;
}
-static void bm1880_clk_unregister_div(struct clk_hw *hw)
-{
- struct bm1880_div_hw_clock *div_hw = to_bm1880_div_clk(hw);
-
- clk_hw_unregister(hw);
- kfree(div_hw);
-}
-
static int bm1880_clk_register_divs(struct bm1880_div_hw_clock *clks,
int num_clks,
struct bm1880_clock_data *data)
@@ -729,7 +713,7 @@ static int bm1880_clk_register_divs(struct bm1880_div_hw_clock *clks,
err_clk:
while (i--)
- bm1880_clk_unregister_div(data->hw_data.hws[clks[i].div.id]);
+ clk_hw_unregister(data->hw_data.hws[clks[i].div.id]);
return PTR_ERR(hw);
}
diff --git a/drivers/clk/clk-gate.c b/drivers/clk/clk-gate.c
index 070dc47e95a1..64283807600b 100644
--- a/drivers/clk/clk-gate.c
+++ b/drivers/clk/clk-gate.c
@@ -7,6 +7,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/device.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/io.h>
@@ -222,3 +223,37 @@ void clk_hw_unregister_gate(struct clk_hw *hw)
kfree(gate);
}
EXPORT_SYMBOL_GPL(clk_hw_unregister_gate);
+
+static void devm_clk_hw_release_gate(struct device *dev, void *res)
+{
+ clk_hw_unregister_gate(*(struct clk_hw **)res);
+}
+
+struct clk_hw *__devm_clk_hw_register_gate(struct device *dev,
+ struct device_node *np, const char *name,
+ const char *parent_name, const struct clk_hw *parent_hw,
+ const struct clk_parent_data *parent_data,
+ unsigned long flags,
+ void __iomem *reg, u8 bit_idx,
+ u8 clk_gate_flags, spinlock_t *lock)
+{
+ struct clk_hw **ptr, *hw;
+
+ ptr = devres_alloc(devm_clk_hw_release_gate, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
+
+ hw = __clk_hw_register_gate(dev, np, name, parent_name, parent_hw,
+ parent_data, flags, reg, bit_idx,
+ clk_gate_flags, lock);
+
+ if (!IS_ERR(hw)) {
+ *ptr = hw;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return hw;
+}
+EXPORT_SYMBOL_GPL(__devm_clk_hw_register_gate);
diff --git a/drivers/clk/clk-gemini.c b/drivers/clk/clk-gemini.c
index b51069e794ff..a23fa6d47ef1 100644
--- a/drivers/clk/clk-gemini.c
+++ b/drivers/clk/clk-gemini.c
@@ -50,7 +50,7 @@ static DEFINE_SPINLOCK(gemini_clk_lock);
#define PCI_DLL_TAP_SEL_MASK 0x1f
/**
- * struct gemini_data_data - Gemini gated clocks
+ * struct gemini_gate_data - Gemini gated clocks
* @bit_idx: the bit used to gate this clock in the clock register
* @name: the clock name
* @parent_name: the name of the parent clock
diff --git a/drivers/clk/clk-lan966x.c b/drivers/clk/clk-lan966x.c
new file mode 100644
index 000000000000..d1535ac13e89
--- /dev/null
+++ b/drivers/clk/clk-lan966x.c
@@ -0,0 +1,293 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Microchip LAN966x SoC Clock driver.
+ *
+ * Copyright (C) 2021 Microchip Technology, Inc. and its subsidiaries
+ *
+ * Author: Kavyasree Kotagiri <kavyasree.kotagiri@microchip.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <dt-bindings/clock/microchip,lan966x.h>
+
+#define GCK_ENA BIT(0)
+#define GCK_SRC_SEL GENMASK(9, 8)
+#define GCK_PRESCALER GENMASK(23, 16)
+
+#define DIV_MAX 255
+
+static const char *clk_names[N_CLOCKS] = {
+ "qspi0", "qspi1", "qspi2", "sdmmc0",
+ "pi", "mcan0", "mcan1", "flexcom0",
+ "flexcom1", "flexcom2", "flexcom3",
+ "flexcom4", "timer1", "usb_refclk",
+};
+
+struct lan966x_gck {
+ struct clk_hw hw;
+ void __iomem *reg;
+};
+#define to_lan966x_gck(hw) container_of(hw, struct lan966x_gck, hw)
+
+static const struct clk_parent_data lan966x_gck_pdata[] = {
+ { .fw_name = "cpu", },
+ { .fw_name = "ddr", },
+ { .fw_name = "sys", },
+};
+
+static struct clk_init_data init = {
+ .parent_data = lan966x_gck_pdata,
+ .num_parents = ARRAY_SIZE(lan966x_gck_pdata),
+};
+
+struct clk_gate_soc_desc {
+ const char *name;
+ int bit_idx;
+};
+
+static const struct clk_gate_soc_desc clk_gate_desc[] = {
+ { "uhphs", 11 },
+ { "udphs", 10 },
+ { "mcramc", 9 },
+ { "hmatrix", 8 },
+ { }
+};
+
+static DEFINE_SPINLOCK(clk_gate_lock);
+static void __iomem *base;
+
+static int lan966x_gck_enable(struct clk_hw *hw)
+{
+ struct lan966x_gck *gck = to_lan966x_gck(hw);
+ u32 val = readl(gck->reg);
+
+ val |= GCK_ENA;
+ writel(val, gck->reg);
+
+ return 0;
+}
+
+static void lan966x_gck_disable(struct clk_hw *hw)
+{
+ struct lan966x_gck *gck = to_lan966x_gck(hw);
+ u32 val = readl(gck->reg);
+
+ val &= ~GCK_ENA;
+ writel(val, gck->reg);
+}
+
+static int lan966x_gck_set_rate(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct lan966x_gck *gck = to_lan966x_gck(hw);
+ u32 div, val = readl(gck->reg);
+
+ if (rate == 0 || parent_rate == 0)
+ return -EINVAL;
+
+ /* Set Prescalar */
+ div = parent_rate / rate;
+ val &= ~GCK_PRESCALER;
+ val |= FIELD_PREP(GCK_PRESCALER, (div - 1));
+ writel(val, gck->reg);
+
+ return 0;
+}
+
+static long lan966x_gck_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ unsigned int div;
+
+ if (rate == 0 || *parent_rate == 0)
+ return -EINVAL;
+
+ if (rate >= *parent_rate)
+ return *parent_rate;
+
+ div = DIV_ROUND_CLOSEST(*parent_rate, rate);
+
+ return *parent_rate / div;
+}
+
+static unsigned long lan966x_gck_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct lan966x_gck *gck = to_lan966x_gck(hw);
+ u32 div, val = readl(gck->reg);
+
+ div = FIELD_GET(GCK_PRESCALER, val);
+
+ return parent_rate / (div + 1);
+}
+
+static int lan966x_gck_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct clk_hw *parent;
+ int i;
+
+ for (i = 0; i < clk_hw_get_num_parents(hw); ++i) {
+ parent = clk_hw_get_parent_by_index(hw, i);
+ if (!parent)
+ continue;
+
+ /* Allowed prescaler divider range is 0-255 */
+ if (clk_hw_get_rate(parent) / req->rate <= DIV_MAX) {
+ req->best_parent_hw = parent;
+ req->best_parent_rate = clk_hw_get_rate(parent);
+
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static u8 lan966x_gck_get_parent(struct clk_hw *hw)
+{
+ struct lan966x_gck *gck = to_lan966x_gck(hw);
+ u32 val = readl(gck->reg);
+
+ return FIELD_GET(GCK_SRC_SEL, val);
+}
+
+static int lan966x_gck_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct lan966x_gck *gck = to_lan966x_gck(hw);
+ u32 val = readl(gck->reg);
+
+ val &= ~GCK_SRC_SEL;
+ val |= FIELD_PREP(GCK_SRC_SEL, index);
+ writel(val, gck->reg);
+
+ return 0;
+}
+
+static const struct clk_ops lan966x_gck_ops = {
+ .enable = lan966x_gck_enable,
+ .disable = lan966x_gck_disable,
+ .set_rate = lan966x_gck_set_rate,
+ .round_rate = lan966x_gck_round_rate,
+ .recalc_rate = lan966x_gck_recalc_rate,
+ .determine_rate = lan966x_gck_determine_rate,
+ .set_parent = lan966x_gck_set_parent,
+ .get_parent = lan966x_gck_get_parent,
+};
+
+static struct clk_hw *lan966x_gck_clk_register(struct device *dev, int i)
+{
+ struct lan966x_gck *priv;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return ERR_PTR(-ENOMEM);
+
+ priv->reg = base + (i * 4);
+ priv->hw.init = &init;
+ ret = devm_clk_hw_register(dev, &priv->hw);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return &priv->hw;
+};
+
+static int lan966x_gate_clk_register(struct device *dev,
+ struct clk_hw_onecell_data *hw_data,
+ void __iomem *gate_base)
+{
+ int i;
+
+ for (i = GCK_GATE_UHPHS; i < N_CLOCKS; ++i) {
+ int idx = i - GCK_GATE_UHPHS;
+
+ hw_data->hws[i] =
+ devm_clk_hw_register_gate(dev, clk_gate_desc[idx].name,
+ "lan966x", 0, base,
+ clk_gate_desc[idx].bit_idx,
+ 0, &clk_gate_lock);
+
+ if (IS_ERR(hw_data->hws[i]))
+ return dev_err_probe(dev, PTR_ERR(hw_data->hws[i]),
+ "failed to register %s clock\n",
+ clk_gate_desc[idx].name);
+ }
+
+ return 0;
+}
+
+static int lan966x_clk_probe(struct platform_device *pdev)
+{
+ struct clk_hw_onecell_data *hw_data;
+ struct device *dev = &pdev->dev;
+ void __iomem *gate_base;
+ struct resource *res;
+ int i, ret;
+
+ hw_data = devm_kzalloc(dev, struct_size(hw_data, hws, N_CLOCKS),
+ GFP_KERNEL);
+ if (!hw_data)
+ return -ENOMEM;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ init.ops = &lan966x_gck_ops;
+
+ hw_data->num = GCK_GATE_UHPHS;
+
+ for (i = 0; i < GCK_GATE_UHPHS; i++) {
+ init.name = clk_names[i];
+ hw_data->hws[i] = lan966x_gck_clk_register(dev, i);
+ if (IS_ERR(hw_data->hws[i])) {
+ dev_err(dev, "failed to register %s clock\n",
+ init.name);
+ return PTR_ERR(hw_data->hws[i]);
+ }
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (res) {
+ gate_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(gate_base))
+ return PTR_ERR(gate_base);
+
+ hw_data->num = N_CLOCKS;
+
+ ret = lan966x_gate_clk_register(dev, hw_data, gate_base);
+ if (ret)
+ return ret;
+ }
+
+ return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, hw_data);
+}
+
+static const struct of_device_id lan966x_clk_dt_ids[] = {
+ { .compatible = "microchip,lan966x-gck", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, lan966x_clk_dt_ids);
+
+static struct platform_driver lan966x_clk_driver = {
+ .probe = lan966x_clk_probe,
+ .driver = {
+ .name = "lan966x-clk",
+ .of_match_table = lan966x_clk_dt_ids,
+ },
+};
+builtin_platform_driver(lan966x_clk_driver);
+
+MODULE_AUTHOR("Kavyasree Kotagiri <kavyasree.kotagiri@microchip.com>");
+MODULE_DESCRIPTION("LAN966X clock driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/clk-si5341.c b/drivers/clk/clk-si5341.c
index 57ae183982d8..f7b41366666e 100644
--- a/drivers/clk/clk-si5341.c
+++ b/drivers/clk/clk-si5341.c
@@ -1740,7 +1740,7 @@ static int si5341_probe(struct i2c_client *client,
clk_prepare(data->clk[i].hw.clk);
}
- err = of_clk_add_hw_provider(client->dev.of_node, of_clk_si5341_get,
+ err = devm_of_clk_add_hw_provider(&client->dev, of_clk_si5341_get,
data);
if (err) {
dev_err(&client->dev, "unable to add clk provider\n");
diff --git a/drivers/clk/clk-stm32f4.c b/drivers/clk/clk-stm32f4.c
index af46176ad053..473dfe632cc5 100644
--- a/drivers/clk/clk-stm32f4.c
+++ b/drivers/clk/clk-stm32f4.c
@@ -129,7 +129,6 @@ static const struct stm32f4_gate_data stm32f429_gates[] __initconst = {
{ STM32F4_RCC_APB2ENR, 20, "spi5", "apb2_div" },
{ STM32F4_RCC_APB2ENR, 21, "spi6", "apb2_div" },
{ STM32F4_RCC_APB2ENR, 22, "sai1", "apb2_div" },
- { STM32F4_RCC_APB2ENR, 26, "ltdc", "apb2_div" },
};
static const struct stm32f4_gate_data stm32f469_gates[] __initconst = {
@@ -211,7 +210,6 @@ static const struct stm32f4_gate_data stm32f469_gates[] __initconst = {
{ STM32F4_RCC_APB2ENR, 20, "spi5", "apb2_div" },
{ STM32F4_RCC_APB2ENR, 21, "spi6", "apb2_div" },
{ STM32F4_RCC_APB2ENR, 22, "sai1", "apb2_div" },
- { STM32F4_RCC_APB2ENR, 26, "ltdc", "apb2_div" },
};
static const struct stm32f4_gate_data stm32f746_gates[] __initconst = {
@@ -286,7 +284,6 @@ static const struct stm32f4_gate_data stm32f746_gates[] __initconst = {
{ STM32F4_RCC_APB2ENR, 21, "spi6", "apb2_div" },
{ STM32F4_RCC_APB2ENR, 22, "sai1", "apb2_div" },
{ STM32F4_RCC_APB2ENR, 23, "sai2", "apb2_div" },
- { STM32F4_RCC_APB2ENR, 26, "ltdc", "apb2_div" },
};
static const struct stm32f4_gate_data stm32f769_gates[] __initconst = {
@@ -364,7 +361,6 @@ static const struct stm32f4_gate_data stm32f769_gates[] __initconst = {
{ STM32F4_RCC_APB2ENR, 21, "spi6", "apb2_div" },
{ STM32F4_RCC_APB2ENR, 22, "sai1", "apb2_div" },
{ STM32F4_RCC_APB2ENR, 23, "sai2", "apb2_div" },
- { STM32F4_RCC_APB2ENR, 26, "ltdc", "apb2_div" },
{ STM32F4_RCC_APB2ENR, 30, "mdio", "apb2_div" },
};
diff --git a/drivers/clk/clk-stm32mp1.c b/drivers/clk/clk-stm32mp1.c
index 4bd1fe7d8af4..863274aa50e3 100644
--- a/drivers/clk/clk-stm32mp1.c
+++ b/drivers/clk/clk-stm32mp1.c
@@ -2253,8 +2253,6 @@ static int stm32_rcc_reset_init(struct device *dev, void __iomem *base,
const struct stm32_rcc_match_data *data = match->data;
struct stm32_reset_data *reset_data = NULL;
- data = match->data;
-
reset_data = kzalloc(sizeof(*reset_data), GFP_KERNEL);
if (!reset_data)
return -ENOMEM;
diff --git a/drivers/clk/clk-tps68470.c b/drivers/clk/clk-tps68470.c
new file mode 100644
index 000000000000..e5fbefd6ac2d
--- /dev/null
+++ b/drivers/clk/clk-tps68470.c
@@ -0,0 +1,261 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Clock driver for TPS68470 PMIC
+ *
+ * Copyright (c) 2021 Red Hat Inc.
+ * Copyright (C) 2018 Intel Corporation
+ *
+ * Authors:
+ * Hans de Goede <hdegoede@redhat.com>
+ * Zaikuo Wang <zaikuo.wang@intel.com>
+ * Tianshu Qiu <tian.shu.qiu@intel.com>
+ * Jian Xu Zheng <jian.xu.zheng@intel.com>
+ * Yuning Pu <yuning.pu@intel.com>
+ * Antti Laakso <antti.laakso@intel.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/kernel.h>
+#include <linux/mfd/tps68470.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/tps68470.h>
+#include <linux/regmap.h>
+
+#define TPS68470_CLK_NAME "tps68470-clk"
+
+#define to_tps68470_clkdata(clkd) \
+ container_of(clkd, struct tps68470_clkdata, clkout_hw)
+
+static struct tps68470_clkout_freqs {
+ unsigned long freq;
+ unsigned int xtaldiv;
+ unsigned int plldiv;
+ unsigned int postdiv;
+ unsigned int buckdiv;
+ unsigned int boostdiv;
+} clk_freqs[] = {
+/*
+ * The PLL is used to multiply the crystal oscillator
+ * frequency range of 3 MHz to 27 MHz by a programmable
+ * factor of F = (M/N)*(1/P) such that the output
+ * available at the HCLK_A or HCLK_B pins are in the range
+ * of 4 MHz to 64 MHz in increments of 0.1 MHz.
+ *
+ * hclk_# = osc_in * (((plldiv*2)+320) / (xtaldiv+30)) * (1 / 2^postdiv)
+ *
+ * PLL_REF_CLK should be as close as possible to 100kHz
+ * PLL_REF_CLK = input clk / XTALDIV[7:0] + 30)
+ *
+ * PLL_VCO_CLK = (PLL_REF_CLK * (plldiv*2 + 320))
+ *
+ * BOOST should be as close as possible to 2Mhz
+ * BOOST = PLL_VCO_CLK / (BOOSTDIV[4:0] + 16) *
+ *
+ * BUCK should be as close as possible to 5.2Mhz
+ * BUCK = PLL_VCO_CLK / (BUCKDIV[3:0] + 5)
+ *
+ * osc_in xtaldiv plldiv postdiv hclk_#
+ * 20Mhz 170 32 1 19.2Mhz
+ * 20Mhz 170 40 1 20Mhz
+ * 20Mhz 170 80 1 24Mhz
+ */
+ { 19200000, 170, 32, 1, 2, 3 },
+ { 20000000, 170, 40, 1, 3, 4 },
+ { 24000000, 170, 80, 1, 4, 8 },
+};
+
+struct tps68470_clkdata {
+ struct clk_hw clkout_hw;
+ struct regmap *regmap;
+ unsigned long rate;
+};
+
+static int tps68470_clk_is_prepared(struct clk_hw *hw)
+{
+ struct tps68470_clkdata *clkdata = to_tps68470_clkdata(hw);
+ int val;
+
+ if (regmap_read(clkdata->regmap, TPS68470_REG_PLLCTL, &val))
+ return 0;
+
+ return val & TPS68470_PLL_EN_MASK;
+}
+
+static int tps68470_clk_prepare(struct clk_hw *hw)
+{
+ struct tps68470_clkdata *clkdata = to_tps68470_clkdata(hw);
+
+ regmap_write(clkdata->regmap, TPS68470_REG_CLKCFG1,
+ (TPS68470_PLL_OUTPUT_ENABLE << TPS68470_OUTPUT_A_SHIFT) |
+ (TPS68470_PLL_OUTPUT_ENABLE << TPS68470_OUTPUT_B_SHIFT));
+
+ regmap_update_bits(clkdata->regmap, TPS68470_REG_PLLCTL,
+ TPS68470_PLL_EN_MASK, TPS68470_PLL_EN_MASK);
+
+ /*
+ * The PLLCTL reg lock bit is set by the PMIC after approx. 4ms and
+ * does not indicate a true lock, so just wait 4 ms.
+ */
+ usleep_range(4000, 5000);
+
+ return 0;
+}
+
+static void tps68470_clk_unprepare(struct clk_hw *hw)
+{
+ struct tps68470_clkdata *clkdata = to_tps68470_clkdata(hw);
+
+ /* Disable clock first ... */
+ regmap_update_bits(clkdata->regmap, TPS68470_REG_PLLCTL, TPS68470_PLL_EN_MASK, 0);
+
+ /* ... and then tri-state the clock outputs. */
+ regmap_write(clkdata->regmap, TPS68470_REG_CLKCFG1, 0);
+}
+
+static unsigned long tps68470_clk_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+{
+ struct tps68470_clkdata *clkdata = to_tps68470_clkdata(hw);
+
+ return clkdata->rate;
+}
+
+/*
+ * This returns the index of the clk_freqs[] cfg with the closest rate for
+ * use in tps68470_clk_round_rate(). tps68470_clk_set_rate() checks that
+ * the rate of the returned cfg is an exact match.
+ */
+static unsigned int tps68470_clk_cfg_lookup(unsigned long rate)
+{
+ long diff, best_diff = LONG_MAX;
+ unsigned int i, best_idx = 0;
+
+ for (i = 0; i < ARRAY_SIZE(clk_freqs); i++) {
+ diff = clk_freqs[i].freq - rate;
+ if (diff == 0)
+ return i;
+
+ diff = abs(diff);
+ if (diff < best_diff) {
+ best_diff = diff;
+ best_idx = i;
+ }
+ }
+
+ return best_idx;
+}
+
+static long tps68470_clk_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ unsigned int idx = tps68470_clk_cfg_lookup(rate);
+
+ return clk_freqs[idx].freq;
+}
+
+static int tps68470_clk_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct tps68470_clkdata *clkdata = to_tps68470_clkdata(hw);
+ unsigned int idx = tps68470_clk_cfg_lookup(rate);
+
+ if (rate != clk_freqs[idx].freq)
+ return -EINVAL;
+
+ regmap_write(clkdata->regmap, TPS68470_REG_BOOSTDIV, clk_freqs[idx].boostdiv);
+ regmap_write(clkdata->regmap, TPS68470_REG_BUCKDIV, clk_freqs[idx].buckdiv);
+ regmap_write(clkdata->regmap, TPS68470_REG_PLLSWR, TPS68470_PLLSWR_DEFAULT);
+ regmap_write(clkdata->regmap, TPS68470_REG_XTALDIV, clk_freqs[idx].xtaldiv);
+ regmap_write(clkdata->regmap, TPS68470_REG_PLLDIV, clk_freqs[idx].plldiv);
+ regmap_write(clkdata->regmap, TPS68470_REG_POSTDIV, clk_freqs[idx].postdiv);
+ regmap_write(clkdata->regmap, TPS68470_REG_POSTDIV2, clk_freqs[idx].postdiv);
+ regmap_write(clkdata->regmap, TPS68470_REG_CLKCFG2, TPS68470_CLKCFG2_DRV_STR_2MA);
+
+ regmap_write(clkdata->regmap, TPS68470_REG_PLLCTL,
+ TPS68470_OSC_EXT_CAP_DEFAULT << TPS68470_OSC_EXT_CAP_SHIFT |
+ TPS68470_CLK_SRC_XTAL << TPS68470_CLK_SRC_SHIFT);
+
+ clkdata->rate = rate;
+
+ return 0;
+}
+
+static const struct clk_ops tps68470_clk_ops = {
+ .is_prepared = tps68470_clk_is_prepared,
+ .prepare = tps68470_clk_prepare,
+ .unprepare = tps68470_clk_unprepare,
+ .recalc_rate = tps68470_clk_recalc_rate,
+ .round_rate = tps68470_clk_round_rate,
+ .set_rate = tps68470_clk_set_rate,
+};
+
+static int tps68470_clk_probe(struct platform_device *pdev)
+{
+ struct tps68470_clk_platform_data *pdata = pdev->dev.platform_data;
+ struct clk_init_data tps68470_clk_initdata = {
+ .name = TPS68470_CLK_NAME,
+ .ops = &tps68470_clk_ops,
+ /* Changing the dividers when the PLL is on is not allowed */
+ .flags = CLK_SET_RATE_GATE,
+ };
+ struct tps68470_clkdata *tps68470_clkdata;
+ int ret;
+
+ tps68470_clkdata = devm_kzalloc(&pdev->dev, sizeof(*tps68470_clkdata),
+ GFP_KERNEL);
+ if (!tps68470_clkdata)
+ return -ENOMEM;
+
+ tps68470_clkdata->regmap = dev_get_drvdata(pdev->dev.parent);
+ tps68470_clkdata->clkout_hw.init = &tps68470_clk_initdata;
+
+ /* Set initial rate */
+ tps68470_clk_set_rate(&tps68470_clkdata->clkout_hw, clk_freqs[0].freq, 0);
+
+ ret = devm_clk_hw_register(&pdev->dev, &tps68470_clkdata->clkout_hw);
+ if (ret)
+ return ret;
+
+ ret = devm_clk_hw_register_clkdev(&pdev->dev, &tps68470_clkdata->clkout_hw,
+ TPS68470_CLK_NAME, NULL);
+ if (ret)
+ return ret;
+
+ if (pdata) {
+ ret = devm_clk_hw_register_clkdev(&pdev->dev,
+ &tps68470_clkdata->clkout_hw,
+ pdata->consumer_con_id,
+ pdata->consumer_dev_name);
+ }
+
+ return ret;
+}
+
+static struct platform_driver tps68470_clk_driver = {
+ .driver = {
+ .name = TPS68470_CLK_NAME,
+ },
+ .probe = tps68470_clk_probe,
+};
+
+/*
+ * The ACPI tps68470 probe-ordering depends on the clk/gpio/regulator drivers
+ * registering before the drivers for the camera-sensors which use them bind.
+ * subsys_initcall() ensures this when the drivers are builtin.
+ */
+static int __init tps68470_clk_init(void)
+{
+ return platform_driver_register(&tps68470_clk_driver);
+}
+subsys_initcall(tps68470_clk_init);
+
+static void __exit tps68470_clk_exit(void)
+{
+ platform_driver_unregister(&tps68470_clk_driver);
+}
+module_exit(tps68470_clk_exit);
+
+MODULE_ALIAS("platform:tps68470-clk");
+MODULE_DESCRIPTION("clock driver for TPS68470 pmic");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 566ee2c78709..8de6a22498e7 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -424,19 +424,20 @@ static void clk_core_fill_parent_index(struct clk_core *core, u8 index)
if (entry->hw) {
parent = entry->hw->core;
- /*
- * We have a direct reference but it isn't registered yet?
- * Orphan it and let clk_reparent() update the orphan status
- * when the parent is registered.
- */
- if (!parent)
- parent = ERR_PTR(-EPROBE_DEFER);
} else {
parent = clk_core_get(core, index);
if (PTR_ERR(parent) == -ENOENT && entry->name)
parent = clk_core_lookup(entry->name);
}
+ /*
+ * We have a direct reference but it isn't registered yet?
+ * Orphan it and let clk_reparent() update the orphan status
+ * when the parent is registered.
+ */
+ if (!parent)
+ parent = ERR_PTR(-EPROBE_DEFER);
+
/* Only cache it if it's not an error */
if (!IS_ERR(parent))
entry->core = parent;
@@ -2965,7 +2966,9 @@ static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c,
{
struct clk_core *child;
+ clk_pm_runtime_get(c);
clk_summary_show_one(s, c, level);
+ clk_pm_runtime_put(c);
hlist_for_each_entry(child, &c->children, child_node)
clk_summary_show_subtree(s, child, level + 1);
@@ -3217,6 +3220,42 @@ static int current_parent_show(struct seq_file *s, void *data)
}
DEFINE_SHOW_ATTRIBUTE(current_parent);
+#ifdef CLOCK_ALLOW_WRITE_DEBUGFS
+static ssize_t current_parent_write(struct file *file, const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct clk_core *core = s->private;
+ struct clk_core *parent;
+ u8 idx;
+ int err;
+
+ err = kstrtou8_from_user(ubuf, count, 0, &idx);
+ if (err < 0)
+ return err;
+
+ parent = clk_core_get_parent_by_index(core, idx);
+ if (!parent)
+ return -ENOENT;
+
+ clk_prepare_lock();
+ err = clk_core_set_parent_nolock(core, parent);
+ clk_prepare_unlock();
+ if (err)
+ return err;
+
+ return count;
+}
+
+static const struct file_operations current_parent_rw_fops = {
+ .open = current_parent_open,
+ .write = current_parent_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+#endif
+
static int clk_duty_cycle_show(struct seq_file *s, void *data)
{
struct clk_core *core = s->private;
@@ -3282,8 +3321,12 @@ static void clk_debug_create_one(struct clk_core *core, struct dentry *pdentry)
#ifdef CLOCK_ALLOW_WRITE_DEBUGFS
debugfs_create_file("clk_prepare_enable", 0644, root, core,
&clk_prepare_enable_fops);
-#endif
+ if (core->num_parents > 1)
+ debugfs_create_file("clk_parent", 0644, root, core,
+ &current_parent_rw_fops);
+ else
+#endif
if (core->num_parents > 0)
debugfs_create_file("clk_parent", 0444, root, core,
&current_parent_fops);
@@ -3343,6 +3386,24 @@ static int __init clk_debug_init(void)
{
struct clk_core *core;
+#ifdef CLOCK_ALLOW_WRITE_DEBUGFS
+ pr_warn("\n");
+ pr_warn("********************************************************************\n");
+ pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
+ pr_warn("** **\n");
+ pr_warn("** WRITEABLE clk DebugFS SUPPORT HAS BEEN ENABLED IN THIS KERNEL **\n");
+ pr_warn("** **\n");
+ pr_warn("** This means that this kernel is built to expose clk operations **\n");
+ pr_warn("** such as parent or rate setting, enabling, disabling, etc. **\n");
+ pr_warn("** to userspace, which may compromise security on your system. **\n");
+ pr_warn("** **\n");
+ pr_warn("** If you see this message and you are not debugging the **\n");
+ pr_warn("** kernel, report this immediately to your vendor! **\n");
+ pr_warn("** **\n");
+ pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
+ pr_warn("********************************************************************\n");
+#endif
+
rootdir = debugfs_create_dir("clk", NULL);
debugfs_create_file("clk_summary", 0444, rootdir, &all_lists,
@@ -3413,9 +3474,6 @@ static int __clk_core_init(struct clk_core *core)
unsigned long rate;
int phase;
- if (!core)
- return -EINVAL;
-
clk_prepare_lock();
/*
diff --git a/drivers/clk/imx/clk-imx8mn.c b/drivers/clk/imx/clk-imx8mn.c
index c55577604e16..021355a24708 100644
--- a/drivers/clk/imx/clk-imx8mn.c
+++ b/drivers/clk/imx/clk-imx8mn.c
@@ -277,9 +277,9 @@ static const char * const imx8mn_pdm_sels[] = {"osc_24m", "sys_pll2_100m", "audi
static const char * const imx8mn_dram_core_sels[] = {"dram_pll_out", "dram_alt_root", };
-static const char * const imx8mn_clko1_sels[] = {"osc_24m", "sys_pll1_800m", "osc_27m",
- "sys_pll1_200m", "audio_pll2_out", "vpu_pll",
- "sys_pll1_80m", };
+static const char * const imx8mn_clko1_sels[] = {"osc_24m", "sys_pll1_800m", "dummy",
+ "sys_pll1_200m", "audio_pll2_out", "sys_pll2_500m",
+ "dummy", "sys_pll1_80m", };
static const char * const imx8mn_clko2_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll1_400m",
"sys_pll2_166m", "sys_pll3_out", "audio_pll1_out",
"video_pll1_out", "osc_32k", };
diff --git a/drivers/clk/imx/clk-imx8mp.c b/drivers/clk/imx/clk-imx8mp.c
index 12837304545d..c990ad37882b 100644
--- a/drivers/clk/imx/clk-imx8mp.c
+++ b/drivers/clk/imx/clk-imx8mp.c
@@ -700,7 +700,7 @@ static int imx8mp_clocks_probe(struct platform_device *pdev)
hws[IMX8MP_CLK_HDMI_ROOT] = imx_clk_hw_gate4("hdmi_root_clk", "hdmi_axi", ccm_base + 0x45f0, 0);
hws[IMX8MP_CLK_TSENSOR_ROOT] = imx_clk_hw_gate4("tsensor_root_clk", "ipg_root", ccm_base + 0x4620, 0);
hws[IMX8MP_CLK_VPU_ROOT] = imx_clk_hw_gate4("vpu_root_clk", "vpu_bus", ccm_base + 0x4630, 0);
- hws[IMX8MP_CLK_AUDIO_ROOT] = imx_clk_hw_gate4("audio_root_clk", "ipg_root", ccm_base + 0x4650, 0);
+ hws[IMX8MP_CLK_AUDIO_ROOT] = imx_clk_hw_gate4("audio_root_clk", "audio_ahb", ccm_base + 0x4650, 0);
hws[IMX8MP_CLK_ARM] = imx_clk_hw_cpu("arm", "arm_a53_core",
hws[IMX8MP_CLK_A53_CORE]->clk,
diff --git a/drivers/clk/imx/clk-imx8ulp.c b/drivers/clk/imx/clk-imx8ulp.c
index 6699437e17b8..8eb1af2d6429 100644
--- a/drivers/clk/imx/clk-imx8ulp.c
+++ b/drivers/clk/imx/clk-imx8ulp.c
@@ -559,6 +559,7 @@ static struct platform_driver imx8ulp_clk_driver = {
.probe = imx8ulp_clk_probe,
.driver = {
.name = KBUILD_MODNAME,
+ .suppress_bind_attrs = true,
.of_match_table = imx8ulp_clk_dt_ids,
},
};
diff --git a/drivers/clk/imx/clk-pllv1.c b/drivers/clk/imx/clk-pllv1.c
index 36ffb0525735..93ee81b28fc7 100644
--- a/drivers/clk/imx/clk-pllv1.c
+++ b/drivers/clk/imx/clk-pllv1.c
@@ -8,20 +8,19 @@
#include "clk.h"
+#define MFN_BITS (10)
+#define MFN_SIGN (BIT(MFN_BITS - 1))
+#define MFN_MASK (MFN_SIGN - 1)
+
/**
- * pll v1
+ * struct clk_pllv1 - IMX PLLv1 clock descriptor
*
- * @clk_hw clock source
- * @parent the parent clock name
- * @base base address of pll registers
+ * @hw: clock source
+ * @base: base address of pll registers
+ * @type: type of IMX_PLLV1
*
* PLL clock version 1, found on i.MX1/21/25/27/31/35
*/
-
-#define MFN_BITS (10)
-#define MFN_SIGN (BIT(MFN_BITS - 1))
-#define MFN_MASK (MFN_SIGN - 1)
-
struct clk_pllv1 {
struct clk_hw hw;
void __iomem *base;
diff --git a/drivers/clk/imx/clk-pllv3.c b/drivers/clk/imx/clk-pllv3.c
index 20ee9611ba6e..eea32f87c60a 100644
--- a/drivers/clk/imx/clk-pllv3.c
+++ b/drivers/clk/imx/clk-pllv3.c
@@ -247,7 +247,7 @@ static long clk_pllv3_av_round_rate(struct clk_hw *hw, unsigned long rate,
div = rate / parent_rate;
temp64 = (u64) (rate - div * parent_rate);
temp64 *= mfd;
- do_div(temp64, parent_rate);
+ temp64 = div64_ul(temp64, parent_rate);
mfn = temp64;
temp64 = (u64)parent_rate;
@@ -277,7 +277,7 @@ static int clk_pllv3_av_set_rate(struct clk_hw *hw, unsigned long rate,
div = rate / parent_rate;
temp64 = (u64) (rate - div * parent_rate);
temp64 *= mfd;
- do_div(temp64, parent_rate);
+ temp64 = div64_ul(temp64, parent_rate);
mfn = temp64;
val = readl_relaxed(pll->base);
@@ -334,7 +334,7 @@ static struct clk_pllv3_vf610_mf clk_pllv3_vf610_rate_to_mf(
/* rate = parent_rate * (mfi + mfn/mfd) */
temp64 = rate - parent_rate * mf.mfi;
temp64 *= mf.mfd;
- do_div(temp64, parent_rate);
+ temp64 = div64_ul(temp64, parent_rate);
mf.mfn = temp64;
}
diff --git a/drivers/clk/ingenic/jz4760-cgu.c b/drivers/clk/ingenic/jz4760-cgu.c
index 080d492ac95c..8fdd383560fb 100644
--- a/drivers/clk/ingenic/jz4760-cgu.c
+++ b/drivers/clk/ingenic/jz4760-cgu.c
@@ -313,6 +313,16 @@ static const struct ingenic_cgu_clk_info jz4760_cgu_clocks[] = {
.parents = { JZ4760_CLK_H2CLK, },
.gate = { CGU_REG_CLKGR0, 21 },
},
+ [JZ4760_CLK_MDMA] = {
+ "mdma", CGU_CLK_GATE,
+ .parents = { JZ4760_CLK_HCLK, },
+ .gate = { CGU_REG_CLKGR0, 25 },
+ },
+ [JZ4760_CLK_BDMA] = {
+ "bdma", CGU_CLK_GATE,
+ .parents = { JZ4760_CLK_HCLK, },
+ .gate = { CGU_REG_CLKGR1, 0 },
+ },
[JZ4760_CLK_I2C0] = {
"i2c0", CGU_CLK_GATE,
.parents = { JZ4760_CLK_EXT, },
diff --git a/drivers/clk/ingenic/jz4770-cgu.c b/drivers/clk/ingenic/jz4770-cgu.c
index 8c6c1208f462..7ef91257630e 100644
--- a/drivers/clk/ingenic/jz4770-cgu.c
+++ b/drivers/clk/ingenic/jz4770-cgu.c
@@ -329,6 +329,11 @@ static const struct ingenic_cgu_clk_info jz4770_cgu_clocks[] = {
.parents = { JZ4770_CLK_H2CLK, },
.gate = { CGU_REG_CLKGR0, 21 },
},
+ [JZ4770_CLK_BDMA] = {
+ "bdma", CGU_CLK_GATE,
+ .parents = { JZ4770_CLK_H2CLK, },
+ .gate = { CGU_REG_CLKGR1, 0 },
+ },
[JZ4770_CLK_I2C0] = {
"i2c0", CGU_CLK_GATE,
.parents = { JZ4770_CLK_EXT, },
diff --git a/drivers/clk/mediatek/Kconfig b/drivers/clk/mediatek/Kconfig
index 3ce6fb04d8ff..01ef02c54725 100644
--- a/drivers/clk/mediatek/Kconfig
+++ b/drivers/clk/mediatek/Kconfig
@@ -344,6 +344,23 @@ config COMMON_CLK_MT7629_HIFSYS
This driver supports MediaTek MT7629 HIFSYS clocks providing
to PCI-E and USB.
+config COMMON_CLK_MT7986
+ bool "Clock driver for MediaTek MT7986"
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ select COMMON_CLK_MEDIATEK
+ default ARCH_MEDIATEK
+ help
+ This driver supports MediaTek MT7986 basic clocks and clocks
+ required for various peripherals found on MediaTek.
+
+config COMMON_CLK_MT7986_ETHSYS
+ bool "Clock driver for MediaTek MT7986 ETHSYS"
+ depends on COMMON_CLK_MT7986
+ default COMMON_CLK_MT7986
+ help
+ This driver adds support for clocks for Ethernet and SGMII
+ required on MediaTek MT7986 SoC.
+
config COMMON_CLK_MT8135
bool "Clock driver for MediaTek MT8135"
depends on (ARCH_MEDIATEK && ARM) || COMPILE_TEST
diff --git a/drivers/clk/mediatek/Makefile b/drivers/clk/mediatek/Makefile
index dc96038a0155..7b0c2646ce4a 100644
--- a/drivers/clk/mediatek/Makefile
+++ b/drivers/clk/mediatek/Makefile
@@ -46,6 +46,10 @@ obj-$(CONFIG_COMMON_CLK_MT7622_AUDSYS) += clk-mt7622-aud.o
obj-$(CONFIG_COMMON_CLK_MT7629) += clk-mt7629.o
obj-$(CONFIG_COMMON_CLK_MT7629_ETHSYS) += clk-mt7629-eth.o
obj-$(CONFIG_COMMON_CLK_MT7629_HIFSYS) += clk-mt7629-hif.o
+obj-$(CONFIG_COMMON_CLK_MT7986) += clk-mt7986-apmixed.o
+obj-$(CONFIG_COMMON_CLK_MT7986) += clk-mt7986-topckgen.o
+obj-$(CONFIG_COMMON_CLK_MT7986) += clk-mt7986-infracfg.o
+obj-$(CONFIG_COMMON_CLK_MT7986_ETHSYS) += clk-mt7986-eth.o
obj-$(CONFIG_COMMON_CLK_MT8135) += clk-mt8135.o
obj-$(CONFIG_COMMON_CLK_MT8167) += clk-mt8167.o
obj-$(CONFIG_COMMON_CLK_MT8167_AUDSYS) += clk-mt8167-aud.o
diff --git a/drivers/clk/mediatek/clk-gate.c b/drivers/clk/mediatek/clk-gate.c
index b02d2f74dd0d..5d88b428565b 100644
--- a/drivers/clk/mediatek/clk-gate.c
+++ b/drivers/clk/mediatek/clk-gate.c
@@ -16,28 +16,24 @@
#include "clk-mtk.h"
#include "clk-gate.h"
-static int mtk_cg_bit_is_cleared(struct clk_hw *hw)
+static u32 mtk_get_clockgating(struct clk_hw *hw)
{
struct mtk_clk_gate *cg = to_mtk_clk_gate(hw);
u32 val;
regmap_read(cg->regmap, cg->sta_ofs, &val);
- val &= BIT(cg->bit);
+ return val & BIT(cg->bit);
+}
- return val == 0;
+static int mtk_cg_bit_is_cleared(struct clk_hw *hw)
+{
+ return mtk_get_clockgating(hw) == 0;
}
static int mtk_cg_bit_is_set(struct clk_hw *hw)
{
- struct mtk_clk_gate *cg = to_mtk_clk_gate(hw);
- u32 val;
-
- regmap_read(cg->regmap, cg->sta_ofs, &val);
-
- val &= BIT(cg->bit);
-
- return val != 0;
+ return mtk_get_clockgating(hw) != 0;
}
static void mtk_cg_set_bit(struct clk_hw *hw)
@@ -57,17 +53,15 @@ static void mtk_cg_clr_bit(struct clk_hw *hw)
static void mtk_cg_set_bit_no_setclr(struct clk_hw *hw)
{
struct mtk_clk_gate *cg = to_mtk_clk_gate(hw);
- u32 cgbit = BIT(cg->bit);
- regmap_update_bits(cg->regmap, cg->sta_ofs, cgbit, cgbit);
+ regmap_set_bits(cg->regmap, cg->sta_ofs, BIT(cg->bit));
}
static void mtk_cg_clr_bit_no_setclr(struct clk_hw *hw)
{
struct mtk_clk_gate *cg = to_mtk_clk_gate(hw);
- u32 cgbit = BIT(cg->bit);
- regmap_update_bits(cg->regmap, cg->sta_ofs, cgbit, 0);
+ regmap_clear_bits(cg->regmap, cg->sta_ofs, BIT(cg->bit));
}
static int mtk_cg_enable(struct clk_hw *hw)
diff --git a/drivers/clk/mediatek/clk-mt7986-apmixed.c b/drivers/clk/mediatek/clk-mt7986-apmixed.c
new file mode 100644
index 000000000000..98ec3887585f
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt7986-apmixed.c
@@ -0,0 +1,100 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2021 MediaTek Inc.
+ * Author: Sam Shih <sam.shih@mediatek.com>
+ * Author: Wenzhen Yu <wenzhen.yu@mediatek.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include "clk-mtk.h"
+#include "clk-gate.h"
+#include "clk-mux.h"
+
+#include <dt-bindings/clock/mt7986-clk.h>
+#include <linux/clk.h>
+
+#define MT7986_PLL_FMAX (2500UL * MHZ)
+#define CON0_MT7986_RST_BAR BIT(27)
+
+#define PLL_xtal(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits, \
+ _pd_reg, _pd_shift, _tuner_reg, _pcw_reg, _pcw_shift, \
+ _div_table, _parent_name) \
+ { \
+ .id = _id, .name = _name, .reg = _reg, .pwr_reg = _pwr_reg, \
+ .en_mask = _en_mask, .flags = _flags, \
+ .rst_bar_mask = CON0_MT7986_RST_BAR, .fmax = MT7986_PLL_FMAX, \
+ .pcwbits = _pcwbits, .pd_reg = _pd_reg, .pd_shift = _pd_shift, \
+ .tuner_reg = _tuner_reg, .pcw_reg = _pcw_reg, \
+ .pcw_shift = _pcw_shift, .div_table = _div_table, \
+ .parent_name = _parent_name, \
+ }
+
+#define PLL(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits, _pd_reg, \
+ _pd_shift, _tuner_reg, _pcw_reg, _pcw_shift) \
+ PLL_xtal(_id, _name, _reg, _pwr_reg, _en_mask, _flags, _pcwbits, \
+ _pd_reg, _pd_shift, _tuner_reg, _pcw_reg, _pcw_shift, NULL, \
+ "clkxtal")
+
+static const struct mtk_pll_data plls[] = {
+ PLL(CLK_APMIXED_ARMPLL, "armpll", 0x0200, 0x020C, 0x00000001, 0, 32,
+ 0x0200, 4, 0, 0x0204, 0),
+ PLL(CLK_APMIXED_NET2PLL, "net2pll", 0x0210, 0x021C, 0x00000001, 0, 32,
+ 0x0210, 4, 0, 0x0214, 0),
+ PLL(CLK_APMIXED_MMPLL, "mmpll", 0x0220, 0x022C, 0x00000001, 0, 32,
+ 0x0220, 4, 0, 0x0224, 0),
+ PLL(CLK_APMIXED_SGMPLL, "sgmpll", 0x0230, 0x023c, 0x00000001, 0, 32,
+ 0x0230, 4, 0, 0x0234, 0),
+ PLL(CLK_APMIXED_WEDMCUPLL, "wedmcupll", 0x0240, 0x024c, 0x00000001, 0,
+ 32, 0x0240, 4, 0, 0x0244, 0),
+ PLL(CLK_APMIXED_NET1PLL, "net1pll", 0x0250, 0x025c, 0x00000001, 0, 32,
+ 0x0250, 4, 0, 0x0254, 0),
+ PLL(CLK_APMIXED_MPLL, "mpll", 0x0260, 0x0270, 0x00000001, 0, 32, 0x0260,
+ 4, 0, 0x0264, 0),
+ PLL(CLK_APMIXED_APLL2, "apll2", 0x0278, 0x0288, 0x00000001, 0, 32,
+ 0x0278, 4, 0, 0x027c, 0),
+};
+
+static const struct of_device_id of_match_clk_mt7986_apmixed[] = {
+ { .compatible = "mediatek,mt7986-apmixedsys", },
+ {}
+};
+
+static int clk_mt7986_apmixed_probe(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+ int r;
+
+ clk_data = mtk_alloc_clk_data(ARRAY_SIZE(plls));
+ if (!clk_data)
+ return -ENOMEM;
+
+ mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data);
+
+ clk_prepare_enable(clk_data->clks[CLK_APMIXED_ARMPLL]);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ if (r) {
+ pr_err("%s(): could not register clock provider: %d\n",
+ __func__, r);
+ goto free_apmixed_data;
+ }
+ return r;
+
+free_apmixed_data:
+ mtk_free_clk_data(clk_data);
+ return r;
+}
+
+static struct platform_driver clk_mt7986_apmixed_drv = {
+ .probe = clk_mt7986_apmixed_probe,
+ .driver = {
+ .name = "clk-mt7986-apmixed",
+ .of_match_table = of_match_clk_mt7986_apmixed,
+ },
+};
+builtin_platform_driver(clk_mt7986_apmixed_drv);
diff --git a/drivers/clk/mediatek/clk-mt7986-eth.c b/drivers/clk/mediatek/clk-mt7986-eth.c
new file mode 100644
index 000000000000..495d023ccad7
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt7986-eth.c
@@ -0,0 +1,132 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2021 MediaTek Inc.
+ * Author: Sam Shih <sam.shih@mediatek.com>
+ * Author: Wenzhen Yu <wenzhen.yu@mediatek.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include "clk-mtk.h"
+#include "clk-gate.h"
+
+#include <dt-bindings/clock/mt7986-clk.h>
+
+static const struct mtk_gate_regs sgmii0_cg_regs = {
+ .set_ofs = 0xe4,
+ .clr_ofs = 0xe4,
+ .sta_ofs = 0xe4,
+};
+
+#define GATE_SGMII0(_id, _name, _parent, _shift) \
+ { \
+ .id = _id, .name = _name, .parent_name = _parent, \
+ .regs = &sgmii0_cg_regs, .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_no_setclr_inv, \
+ }
+
+static const struct mtk_gate sgmii0_clks[] __initconst = {
+ GATE_SGMII0(CLK_SGMII0_TX250M_EN, "sgmii0_tx250m_en", "top_xtal", 2),
+ GATE_SGMII0(CLK_SGMII0_RX250M_EN, "sgmii0_rx250m_en", "top_xtal", 3),
+ GATE_SGMII0(CLK_SGMII0_CDR_REF, "sgmii0_cdr_ref", "top_xtal", 4),
+ GATE_SGMII0(CLK_SGMII0_CDR_FB, "sgmii0_cdr_fb", "top_xtal", 5),
+};
+
+static const struct mtk_gate_regs sgmii1_cg_regs = {
+ .set_ofs = 0xe4,
+ .clr_ofs = 0xe4,
+ .sta_ofs = 0xe4,
+};
+
+#define GATE_SGMII1(_id, _name, _parent, _shift) \
+ { \
+ .id = _id, .name = _name, .parent_name = _parent, \
+ .regs = &sgmii1_cg_regs, .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_no_setclr_inv, \
+ }
+
+static const struct mtk_gate sgmii1_clks[] __initconst = {
+ GATE_SGMII1(CLK_SGMII1_TX250M_EN, "sgmii1_tx250m_en", "top_xtal", 2),
+ GATE_SGMII1(CLK_SGMII1_RX250M_EN, "sgmii1_rx250m_en", "top_xtal", 3),
+ GATE_SGMII1(CLK_SGMII1_CDR_REF, "sgmii1_cdr_ref", "top_xtal", 4),
+ GATE_SGMII1(CLK_SGMII1_CDR_FB, "sgmii1_cdr_fb", "top_xtal", 5),
+};
+
+static const struct mtk_gate_regs eth_cg_regs = {
+ .set_ofs = 0x30,
+ .clr_ofs = 0x30,
+ .sta_ofs = 0x30,
+};
+
+#define GATE_ETH(_id, _name, _parent, _shift) \
+ { \
+ .id = _id, .name = _name, .parent_name = _parent, \
+ .regs = &eth_cg_regs, .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_no_setclr_inv, \
+ }
+
+static const struct mtk_gate eth_clks[] __initconst = {
+ GATE_ETH(CLK_ETH_FE_EN, "eth_fe_en", "netsys_2x_sel", 6),
+ GATE_ETH(CLK_ETH_GP2_EN, "eth_gp2_en", "sgm_325m_sel", 7),
+ GATE_ETH(CLK_ETH_GP1_EN, "eth_gp1_en", "sgm_325m_sel", 8),
+ GATE_ETH(CLK_ETH_WOCPU1_EN, "eth_wocpu1_en", "netsys_mcu_sel", 14),
+ GATE_ETH(CLK_ETH_WOCPU0_EN, "eth_wocpu0_en", "netsys_mcu_sel", 15),
+};
+
+static void __init mtk_sgmiisys_0_init(struct device_node *node)
+{
+ struct clk_onecell_data *clk_data;
+ int r;
+
+ clk_data = mtk_alloc_clk_data(ARRAY_SIZE(sgmii0_clks));
+
+ mtk_clk_register_gates(node, sgmii0_clks, ARRAY_SIZE(sgmii0_clks),
+ clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ if (r)
+ pr_err("%s(): could not register clock provider: %d\n",
+ __func__, r);
+}
+CLK_OF_DECLARE(mtk_sgmiisys_0, "mediatek,mt7986-sgmiisys_0",
+ mtk_sgmiisys_0_init);
+
+static void __init mtk_sgmiisys_1_init(struct device_node *node)
+{
+ struct clk_onecell_data *clk_data;
+ int r;
+
+ clk_data = mtk_alloc_clk_data(ARRAY_SIZE(sgmii1_clks));
+
+ mtk_clk_register_gates(node, sgmii1_clks, ARRAY_SIZE(sgmii1_clks),
+ clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+
+ if (r)
+ pr_err("%s(): could not register clock provider: %d\n",
+ __func__, r);
+}
+CLK_OF_DECLARE(mtk_sgmiisys_1, "mediatek,mt7986-sgmiisys_1",
+ mtk_sgmiisys_1_init);
+
+static void __init mtk_ethsys_init(struct device_node *node)
+{
+ struct clk_onecell_data *clk_data;
+ int r;
+
+ clk_data = mtk_alloc_clk_data(ARRAY_SIZE(eth_clks));
+
+ mtk_clk_register_gates(node, eth_clks, ARRAY_SIZE(eth_clks), clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+
+ if (r)
+ pr_err("%s(): could not register clock provider: %d\n",
+ __func__, r);
+}
+CLK_OF_DECLARE(mtk_ethsys, "mediatek,mt7986-ethsys_ck", mtk_ethsys_init);
diff --git a/drivers/clk/mediatek/clk-mt7986-infracfg.c b/drivers/clk/mediatek/clk-mt7986-infracfg.c
new file mode 100644
index 000000000000..f209c559fbc3
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt7986-infracfg.c
@@ -0,0 +1,224 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2021 MediaTek Inc.
+ * Author: Sam Shih <sam.shih@mediatek.com>
+ * Author: Wenzhen Yu <wenzhen.yu@mediatek.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include "clk-mtk.h"
+#include "clk-gate.h"
+#include "clk-mux.h"
+
+#include <dt-bindings/clock/mt7986-clk.h>
+#include <linux/clk.h>
+
+static DEFINE_SPINLOCK(mt7986_clk_lock);
+
+static const struct mtk_fixed_factor infra_divs[] = {
+ FACTOR(CLK_INFRA_SYSAXI_D2, "infra_sysaxi_d2", "sysaxi_sel", 1, 2),
+};
+
+static const char *const infra_uart_parent[] __initconst = { "csw_f26m_sel",
+ "uart_sel" };
+
+static const char *const infra_spi_parents[] __initconst = { "i2c_sel",
+ "spi_sel" };
+
+static const char *const infra_pwm_bsel_parents[] __initconst = {
+ "top_rtc_32p7k", "csw_f26m_sel", "infra_sysaxi_d2", "pwm_sel"
+};
+
+static const char *const infra_pcie_parents[] __initconst = {
+ "top_rtc_32p7k", "csw_f26m_sel", "top_xtal", "pextp_tl_ck_sel"
+};
+
+static const struct mtk_mux infra_muxes[] = {
+ /* MODULE_CLK_SEL_0 */
+ MUX_GATE_CLR_SET_UPD(CLK_INFRA_UART0_SEL, "infra_uart0_sel",
+ infra_uart_parent, 0x0018, 0x0010, 0x0014, 0, 1,
+ -1, -1, -1),
+ MUX_GATE_CLR_SET_UPD(CLK_INFRA_UART1_SEL, "infra_uart1_sel",
+ infra_uart_parent, 0x0018, 0x0010, 0x0014, 1, 1,
+ -1, -1, -1),
+ MUX_GATE_CLR_SET_UPD(CLK_INFRA_UART2_SEL, "infra_uart2_sel",
+ infra_uart_parent, 0x0018, 0x0010, 0x0014, 2, 1,
+ -1, -1, -1),
+ MUX_GATE_CLR_SET_UPD(CLK_INFRA_SPI0_SEL, "infra_spi0_sel",
+ infra_spi_parents, 0x0018, 0x0010, 0x0014, 4, 1,
+ -1, -1, -1),
+ MUX_GATE_CLR_SET_UPD(CLK_INFRA_SPI1_SEL, "infra_spi1_sel",
+ infra_spi_parents, 0x0018, 0x0010, 0x0014, 5, 1,
+ -1, -1, -1),
+ MUX_GATE_CLR_SET_UPD(CLK_INFRA_PWM1_SEL, "infra_pwm1_sel",
+ infra_pwm_bsel_parents, 0x0018, 0x0010, 0x0014, 9,
+ 2, -1, -1, -1),
+ MUX_GATE_CLR_SET_UPD(CLK_INFRA_PWM2_SEL, "infra_pwm2_sel",
+ infra_pwm_bsel_parents, 0x0018, 0x0010, 0x0014, 11,
+ 2, -1, -1, -1),
+ MUX_GATE_CLR_SET_UPD(CLK_INFRA_PWM_BSEL, "infra_pwm_bsel",
+ infra_pwm_bsel_parents, 0x0018, 0x0010, 0x0014, 13,
+ 2, -1, -1, -1),
+ /* MODULE_CLK_SEL_1 */
+ MUX_GATE_CLR_SET_UPD(CLK_INFRA_PCIE_SEL, "infra_pcie_sel",
+ infra_pcie_parents, 0x0028, 0x0020, 0x0024, 0, 2,
+ -1, -1, -1),
+};
+
+static const struct mtk_gate_regs infra0_cg_regs = {
+ .set_ofs = 0x40,
+ .clr_ofs = 0x44,
+ .sta_ofs = 0x48,
+};
+
+static const struct mtk_gate_regs infra1_cg_regs = {
+ .set_ofs = 0x50,
+ .clr_ofs = 0x54,
+ .sta_ofs = 0x58,
+};
+
+static const struct mtk_gate_regs infra2_cg_regs = {
+ .set_ofs = 0x60,
+ .clr_ofs = 0x64,
+ .sta_ofs = 0x68,
+};
+
+#define GATE_INFRA0(_id, _name, _parent, _shift) \
+ { \
+ .id = _id, .name = _name, .parent_name = _parent, \
+ .regs = &infra0_cg_regs, .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+#define GATE_INFRA1(_id, _name, _parent, _shift) \
+ { \
+ .id = _id, .name = _name, .parent_name = _parent, \
+ .regs = &infra1_cg_regs, .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+#define GATE_INFRA2(_id, _name, _parent, _shift) \
+ { \
+ .id = _id, .name = _name, .parent_name = _parent, \
+ .regs = &infra2_cg_regs, .shift = _shift, \
+ .ops = &mtk_clk_gate_ops_setclr, \
+ }
+
+static const struct mtk_gate infra_clks[] = {
+ /* INFRA0 */
+ GATE_INFRA0(CLK_INFRA_GPT_STA, "infra_gpt_sta", "infra_sysaxi_d2", 0),
+ GATE_INFRA0(CLK_INFRA_PWM_HCK, "infra_pwm_hck", "infra_sysaxi_d2", 1),
+ GATE_INFRA0(CLK_INFRA_PWM_STA, "infra_pwm_sta", "infra_pwm_bsel", 2),
+ GATE_INFRA0(CLK_INFRA_PWM1_CK, "infra_pwm1", "infra_pwm1_sel", 3),
+ GATE_INFRA0(CLK_INFRA_PWM2_CK, "infra_pwm2", "infra_pwm2_sel", 4),
+ GATE_INFRA0(CLK_INFRA_CQ_DMA_CK, "infra_cq_dma", "sysaxi_sel", 6),
+ GATE_INFRA0(CLK_INFRA_EIP97_CK, "infra_eip97", "eip_b_sel", 7),
+ GATE_INFRA0(CLK_INFRA_AUD_BUS_CK, "infra_aud_bus", "sysaxi_sel", 8),
+ GATE_INFRA0(CLK_INFRA_AUD_26M_CK, "infra_aud_26m", "csw_f26m_sel", 9),
+ GATE_INFRA0(CLK_INFRA_AUD_L_CK, "infra_aud_l", "aud_l_sel", 10),
+ GATE_INFRA0(CLK_INFRA_AUD_AUD_CK, "infra_aud_aud", "a1sys_sel", 11),
+ GATE_INFRA0(CLK_INFRA_AUD_EG2_CK, "infra_aud_eg2", "a_tuner_sel", 13),
+ GATE_INFRA0(CLK_INFRA_DRAMC_26M_CK, "infra_dramc_26m", "csw_f26m_sel",
+ 14),
+ GATE_INFRA0(CLK_INFRA_DBG_CK, "infra_dbg", "infra_sysaxi_d2", 15),
+ GATE_INFRA0(CLK_INFRA_AP_DMA_CK, "infra_ap_dma", "infra_sysaxi_d2", 16),
+ GATE_INFRA0(CLK_INFRA_SEJ_CK, "infra_sej", "infra_sysaxi_d2", 24),
+ GATE_INFRA0(CLK_INFRA_SEJ_13M_CK, "infra_sej_13m", "csw_f26m_sel", 25),
+ GATE_INFRA0(CLK_INFRA_TRNG_CK, "infra_trng", "sysaxi_sel", 26),
+ /* INFRA1 */
+ GATE_INFRA1(CLK_INFRA_THERM_CK, "infra_therm", "csw_f26m_sel", 0),
+ GATE_INFRA1(CLK_INFRA_I2C0_CK, "infra_i2c0", "i2c_sel", 1),
+ GATE_INFRA1(CLK_INFRA_UART0_CK, "infra_uart0", "infra_uart0_sel", 2),
+ GATE_INFRA1(CLK_INFRA_UART1_CK, "infra_uart1", "infra_uart1_sel", 3),
+ GATE_INFRA1(CLK_INFRA_UART2_CK, "infra_uart2", "infra_uart2_sel", 4),
+ GATE_INFRA1(CLK_INFRA_NFI1_CK, "infra_nfi1", "nfi1x_sel", 8),
+ GATE_INFRA1(CLK_INFRA_SPINFI1_CK, "infra_spinfi1", "spinfi_sel", 9),
+ GATE_INFRA1(CLK_INFRA_NFI_HCK_CK, "infra_nfi_hck", "infra_sysaxi_d2",
+ 10),
+ GATE_INFRA1(CLK_INFRA_SPI0_CK, "infra_spi0", "infra_spi0_sel", 11),
+ GATE_INFRA1(CLK_INFRA_SPI1_CK, "infra_spi1", "infra_spi1_sel", 12),
+ GATE_INFRA1(CLK_INFRA_SPI0_HCK_CK, "infra_spi0_hck", "infra_sysaxi_d2",
+ 13),
+ GATE_INFRA1(CLK_INFRA_SPI1_HCK_CK, "infra_spi1_hck", "infra_sysaxi_d2",
+ 14),
+ GATE_INFRA1(CLK_INFRA_FRTC_CK, "infra_frtc", "top_rtc_32k", 15),
+ GATE_INFRA1(CLK_INFRA_MSDC_CK, "infra_msdc", "emmc_416m_sel", 16),
+ GATE_INFRA1(CLK_INFRA_MSDC_HCK_CK, "infra_msdc_hck", "emmc_250m_sel",
+ 17),
+ GATE_INFRA1(CLK_INFRA_MSDC_133M_CK, "infra_msdc_133m", "sysaxi_sel",
+ 18),
+ GATE_INFRA1(CLK_INFRA_MSDC_66M_CK, "infra_msdc_66m", "infra_sysaxi_d2",
+ 19),
+ GATE_INFRA1(CLK_INFRA_ADC_26M_CK, "infra_adc_26m", "csw_f26m_sel", 20),
+ GATE_INFRA1(CLK_INFRA_ADC_FRC_CK, "infra_adc_frc", "csw_f26m_sel", 21),
+ GATE_INFRA1(CLK_INFRA_FBIST2FPC_CK, "infra_fbist2fpc", "nfi1x_sel", 23),
+ /* INFRA2 */
+ GATE_INFRA2(CLK_INFRA_IUSB_133_CK, "infra_iusb_133", "sysaxi_sel", 0),
+ GATE_INFRA2(CLK_INFRA_IUSB_66M_CK, "infra_iusb_66m", "infra_sysaxi_d2",
+ 1),
+ GATE_INFRA2(CLK_INFRA_IUSB_SYS_CK, "infra_iusb_sys", "u2u3_sys_sel", 2),
+ GATE_INFRA2(CLK_INFRA_IUSB_CK, "infra_iusb", "u2u3_sel", 3),
+ GATE_INFRA2(CLK_INFRA_IPCIE_CK, "infra_ipcie", "pextp_tl_ck_sel", 12),
+ GATE_INFRA2(CLK_INFRA_IPCIE_PIPE_CK, "infra_ipcie_pipe", "top_xtal",
+ 13),
+ GATE_INFRA2(CLK_INFRA_IPCIER_CK, "infra_ipcier", "csw_f26m_sel", 14),
+ GATE_INFRA2(CLK_INFRA_IPCIEB_CK, "infra_ipcieb", "sysaxi_sel", 15),
+};
+
+static int clk_mt7986_infracfg_probe(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+ int r;
+ void __iomem *base;
+ int nr = ARRAY_SIZE(infra_divs) + ARRAY_SIZE(infra_muxes) +
+ ARRAY_SIZE(infra_clks);
+
+ base = of_iomap(node, 0);
+ if (!base) {
+ pr_err("%s(): ioremap failed\n", __func__);
+ return -ENOMEM;
+ }
+
+ clk_data = mtk_alloc_clk_data(nr);
+
+ if (!clk_data)
+ return -ENOMEM;
+
+ mtk_clk_register_factors(infra_divs, ARRAY_SIZE(infra_divs), clk_data);
+ mtk_clk_register_muxes(infra_muxes, ARRAY_SIZE(infra_muxes), node,
+ &mt7986_clk_lock, clk_data);
+ mtk_clk_register_gates(node, infra_clks, ARRAY_SIZE(infra_clks),
+ clk_data);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ if (r) {
+ pr_err("%s(): could not register clock provider: %d\n",
+ __func__, r);
+ goto free_infracfg_data;
+ }
+ return r;
+
+free_infracfg_data:
+ mtk_free_clk_data(clk_data);
+ return r;
+
+}
+
+static const struct of_device_id of_match_clk_mt7986_infracfg[] = {
+ { .compatible = "mediatek,mt7986-infracfg", },
+ {}
+};
+
+static struct platform_driver clk_mt7986_infracfg_drv = {
+ .probe = clk_mt7986_infracfg_probe,
+ .driver = {
+ .name = "clk-mt7986-infracfg",
+ .of_match_table = of_match_clk_mt7986_infracfg,
+ },
+};
+builtin_platform_driver(clk_mt7986_infracfg_drv);
diff --git a/drivers/clk/mediatek/clk-mt7986-topckgen.c b/drivers/clk/mediatek/clk-mt7986-topckgen.c
new file mode 100644
index 000000000000..8f6f79b6e31e
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt7986-topckgen.c
@@ -0,0 +1,342 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2021 MediaTek Inc.
+ * Author: Sam Shih <sam.shih@mediatek.com>
+ * Author: Wenzhen Yu <wenzhen.yu@mediatek.com>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include "clk-mtk.h"
+#include "clk-gate.h"
+#include "clk-mux.h"
+
+#include <dt-bindings/clock/mt7986-clk.h>
+#include <linux/clk.h>
+
+static DEFINE_SPINLOCK(mt7986_clk_lock);
+
+static const struct mtk_fixed_clk top_fixed_clks[] = {
+ FIXED_CLK(CLK_TOP_XTAL, "top_xtal", "clkxtal", 40000000),
+ FIXED_CLK(CLK_TOP_JTAG, "top_jtag", "clkxtal", 50000000),
+};
+
+static const struct mtk_fixed_factor top_divs[] = {
+ /* XTAL */
+ FACTOR(CLK_TOP_XTAL_D2, "top_xtal_d2", "top_xtal", 1, 2),
+ FACTOR(CLK_TOP_RTC_32K, "top_rtc_32k", "top_xtal", 1, 1250),
+ FACTOR(CLK_TOP_RTC_32P7K, "top_rtc_32p7k", "top_xtal", 1, 1220),
+ /* MPLL */
+ FACTOR(CLK_TOP_MPLL_D2, "top_mpll_d2", "mpll", 1, 2),
+ FACTOR(CLK_TOP_MPLL_D4, "top_mpll_d4", "mpll", 1, 4),
+ FACTOR(CLK_TOP_MPLL_D8, "top_mpll_d8", "mpll", 1, 8),
+ FACTOR(CLK_TOP_MPLL_D8_D2, "top_mpll_d8_d2", "mpll", 1, 16),
+ FACTOR(CLK_TOP_MPLL_D3_D2, "top_mpll_d3_d2", "mpll", 1, 6),
+ /* MMPLL */
+ FACTOR(CLK_TOP_MMPLL_D2, "top_mmpll_d2", "mmpll", 1, 2),
+ FACTOR(CLK_TOP_MMPLL_D4, "top_mmpll_d4", "mmpll", 1, 4),
+ FACTOR(CLK_TOP_MMPLL_D8, "top_mmpll_d8", "mmpll", 1, 8),
+ FACTOR(CLK_TOP_MMPLL_D8_D2, "top_mmpll_d8_d2", "mmpll", 1, 16),
+ FACTOR(CLK_TOP_MMPLL_D3_D8, "top_mmpll_d3_d8", "mmpll", 1, 24),
+ FACTOR(CLK_TOP_MMPLL_U2PHY, "top_mmpll_u2phy", "mmpll", 1, 30),
+ /* APLL2 */
+ FACTOR(CLK_TOP_APLL2_D4, "top_apll2_d4", "apll2", 1, 4),
+ /* NET1PLL */
+ FACTOR(CLK_TOP_NET1PLL_D4, "top_net1pll_d4", "net1pll", 1, 4),
+ FACTOR(CLK_TOP_NET1PLL_D5, "top_net1pll_d5", "net1pll", 1, 5),
+ FACTOR(CLK_TOP_NET1PLL_D5_D2, "top_net1pll_d5_d2", "net1pll", 1, 10),
+ FACTOR(CLK_TOP_NET1PLL_D5_D4, "top_net1pll_d5_d4", "net1pll", 1, 20),
+ FACTOR(CLK_TOP_NET1PLL_D8_D2, "top_net1pll_d8_d2", "net1pll", 1, 16),
+ FACTOR(CLK_TOP_NET1PLL_D8_D4, "top_net1pll_d8_d4", "net1pll", 1, 32),
+ /* NET2PLL */
+ FACTOR(CLK_TOP_NET2PLL_D4, "top_net2pll_d4", "net2pll", 1, 4),
+ FACTOR(CLK_TOP_NET2PLL_D4_D2, "top_net2pll_d4_d2", "net2pll", 1, 8),
+ FACTOR(CLK_TOP_NET2PLL_D3_D2, "top_net2pll_d3_d2", "net2pll", 1, 2),
+ /* WEDMCUPLL */
+ FACTOR(CLK_TOP_WEDMCUPLL_D5_D2, "top_wedmcupll_d5_d2", "wedmcupll", 1,
+ 10),
+};
+
+static const char *const nfi1x_parents[] __initconst = { "top_xtal",
+ "top_mmpll_d8",
+ "top_net1pll_d8_d2",
+ "top_net2pll_d3_d2",
+ "top_mpll_d4",
+ "top_mmpll_d8_d2",
+ "top_wedmcupll_d5_d2",
+ "top_mpll_d8" };
+
+static const char *const spinfi_parents[] __initconst = {
+ "top_xtal_d2", "top_xtal", "top_net1pll_d5_d4",
+ "top_mpll_d4", "top_mmpll_d8_d2", "top_wedmcupll_d5_d2",
+ "top_mmpll_d3_d8", "top_mpll_d8"
+};
+
+static const char *const spi_parents[] __initconst = {
+ "top_xtal", "top_mpll_d2", "top_mmpll_d8",
+ "top_net1pll_d8_d2", "top_net2pll_d3_d2", "top_net1pll_d5_d4",
+ "top_mpll_d4", "top_wedmcupll_d5_d2"
+};
+
+static const char *const uart_parents[] __initconst = { "top_xtal",
+ "top_mpll_d8",
+ "top_mpll_d8_d2" };
+
+static const char *const pwm_parents[] __initconst = {
+ "top_xtal", "top_net1pll_d8_d2", "top_net1pll_d5_d4", "top_mpll_d4"
+};
+
+static const char *const i2c_parents[] __initconst = {
+ "top_xtal", "top_net1pll_d5_d4", "top_mpll_d4", "top_net1pll_d8_d4"
+};
+
+static const char *const pextp_tl_ck_parents[] __initconst = {
+ "top_xtal", "top_net1pll_d5_d4", "top_net2pll_d4_d2", "top_rtc_32k"
+};
+
+static const char *const emmc_250m_parents[] __initconst = {
+ "top_xtal", "top_net1pll_d5_d2"
+};
+
+static const char *const emmc_416m_parents[] __initconst = { "top_xtal",
+ "mpll" };
+
+static const char *const f_26m_adc_parents[] __initconst = { "top_xtal",
+ "top_mpll_d8_d2" };
+
+static const char *const dramc_md32_parents[] __initconst = { "top_xtal",
+ "top_mpll_d2" };
+
+static const char *const sysaxi_parents[] __initconst = { "top_xtal",
+ "top_net1pll_d8_d2",
+ "top_net2pll_d4" };
+
+static const char *const sysapb_parents[] __initconst = { "top_xtal",
+ "top_mpll_d3_d2",
+ "top_net2pll_d4_d2" };
+
+static const char *const arm_db_main_parents[] __initconst = {
+ "top_xtal", "top_net2pll_d3_d2"
+};
+
+static const char *const arm_db_jtsel_parents[] __initconst = { "top_jtag",
+ "top_xtal" };
+
+static const char *const netsys_parents[] __initconst = { "top_xtal",
+ "top_mmpll_d4" };
+
+static const char *const netsys_500m_parents[] __initconst = {
+ "top_xtal", "top_net1pll_d5"
+};
+
+static const char *const netsys_mcu_parents[] __initconst = {
+ "top_xtal", "wedmcupll", "top_mmpll_d2", "top_net1pll_d4",
+ "top_net1pll_d5"
+};
+
+static const char *const netsys_2x_parents[] __initconst = {
+ "top_xtal", "net2pll", "wedmcupll", "top_mmpll_d2"
+};
+
+static const char *const sgm_325m_parents[] __initconst = { "top_xtal",
+ "sgmpll" };
+
+static const char *const sgm_reg_parents[] __initconst = {
+ "top_xtal", "top_net1pll_d8_d4"
+};
+
+static const char *const a1sys_parents[] __initconst = { "top_xtal",
+ "top_apll2_d4" };
+
+static const char *const conn_mcusys_parents[] __initconst = { "top_xtal",
+ "top_mmpll_d2" };
+
+static const char *const eip_b_parents[] __initconst = { "top_xtal",
+ "net2pll" };
+
+static const char *const aud_l_parents[] __initconst = { "top_xtal", "apll2",
+ "top_mpll_d8_d2" };
+
+static const char *const a_tuner_parents[] __initconst = { "top_xtal",
+ "top_apll2_d4",
+ "top_mpll_d8_d2" };
+
+static const char *const u2u3_sys_parents[] __initconst = {
+ "top_xtal", "top_net1pll_d5_d4"
+};
+
+static const char *const da_u2_refsel_parents[] __initconst = {
+ "top_xtal", "top_mmpll_u2phy"
+};
+
+static const struct mtk_mux top_muxes[] = {
+ /* CLK_CFG_0 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_NFI1X_SEL, "nfi1x_sel", nfi1x_parents,
+ 0x000, 0x004, 0x008, 0, 3, 7, 0x1C0, 0),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SPINFI_SEL, "spinfi_sel", spinfi_parents,
+ 0x000, 0x004, 0x008, 8, 3, 15, 0x1C0, 1),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SPI_SEL, "spi_sel", spi_parents, 0x000,
+ 0x004, 0x008, 16, 3, 23, 0x1C0, 2),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SPIM_MST_SEL, "spim_mst_sel", spi_parents,
+ 0x000, 0x004, 0x008, 24, 3, 31, 0x1C0, 3),
+ /* CLK_CFG_1 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_UART_SEL, "uart_sel", uart_parents, 0x010,
+ 0x014, 0x018, 0, 2, 7, 0x1C0, 4),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_PWM_SEL, "pwm_sel", pwm_parents, 0x010,
+ 0x014, 0x018, 8, 2, 15, 0x1C0, 5),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_I2C_SEL, "i2c_sel", i2c_parents, 0x010,
+ 0x014, 0x018, 16, 2, 23, 0x1C0, 6),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_PEXTP_TL_SEL, "pextp_tl_ck_sel",
+ pextp_tl_ck_parents, 0x010, 0x014, 0x018, 24, 2,
+ 31, 0x1C0, 7),
+ /* CLK_CFG_2 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_EMMC_250M_SEL, "emmc_250m_sel",
+ emmc_250m_parents, 0x020, 0x024, 0x028, 0, 1, 7,
+ 0x1C0, 8),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_EMMC_416M_SEL, "emmc_416m_sel",
+ emmc_416m_parents, 0x020, 0x024, 0x028, 8, 1, 15,
+ 0x1C0, 9),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_F_26M_ADC_SEL, "f_26m_adc_sel",
+ f_26m_adc_parents, 0x020, 0x024, 0x028, 16, 1, 23,
+ 0x1C0, 10),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_DRAMC_SEL, "dramc_sel", f_26m_adc_parents,
+ 0x020, 0x024, 0x028, 24, 1, 31, 0x1C0, 11),
+ /* CLK_CFG_3 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_DRAMC_MD32_SEL, "dramc_md32_sel",
+ dramc_md32_parents, 0x030, 0x034, 0x038, 0, 1, 7,
+ 0x1C0, 12),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SYSAXI_SEL, "sysaxi_sel", sysaxi_parents,
+ 0x030, 0x034, 0x038, 8, 2, 15, 0x1C0, 13),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SYSAPB_SEL, "sysapb_sel", sysapb_parents,
+ 0x030, 0x034, 0x038, 16, 2, 23, 0x1C0, 14),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_ARM_DB_MAIN_SEL, "arm_db_main_sel",
+ arm_db_main_parents, 0x030, 0x034, 0x038, 24, 1,
+ 31, 0x1C0, 15),
+ /* CLK_CFG_4 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_ARM_DB_JTSEL, "arm_db_jtsel",
+ arm_db_jtsel_parents, 0x040, 0x044, 0x048, 0, 1, 7,
+ 0x1C0, 16),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_NETSYS_SEL, "netsys_sel", netsys_parents,
+ 0x040, 0x044, 0x048, 8, 1, 15, 0x1C0, 17),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_NETSYS_500M_SEL, "netsys_500m_sel",
+ netsys_500m_parents, 0x040, 0x044, 0x048, 16, 1,
+ 23, 0x1C0, 18),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_NETSYS_MCU_SEL, "netsys_mcu_sel",
+ netsys_mcu_parents, 0x040, 0x044, 0x048, 24, 3, 31,
+ 0x1C0, 19),
+ /* CLK_CFG_5 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_NETSYS_2X_SEL, "netsys_2x_sel",
+ netsys_2x_parents, 0x050, 0x054, 0x058, 0, 2, 7,
+ 0x1C0, 20),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SGM_325M_SEL, "sgm_325m_sel",
+ sgm_325m_parents, 0x050, 0x054, 0x058, 8, 1, 15,
+ 0x1C0, 21),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SGM_REG_SEL, "sgm_reg_sel",
+ sgm_reg_parents, 0x050, 0x054, 0x058, 16, 1, 23,
+ 0x1C0, 22),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_A1SYS_SEL, "a1sys_sel", a1sys_parents,
+ 0x050, 0x054, 0x058, 24, 1, 31, 0x1C0, 23),
+ /* CLK_CFG_6 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_CONN_MCUSYS_SEL, "conn_mcusys_sel",
+ conn_mcusys_parents, 0x060, 0x064, 0x068, 0, 1, 7,
+ 0x1C0, 24),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_EIP_B_SEL, "eip_b_sel", eip_b_parents,
+ 0x060, 0x064, 0x068, 8, 1, 15, 0x1C0, 25),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_PCIE_PHY_SEL, "pcie_phy_sel",
+ f_26m_adc_parents, 0x060, 0x064, 0x068, 16, 1, 23,
+ 0x1C0, 26),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_USB3_PHY_SEL, "usb3_phy_sel",
+ f_26m_adc_parents, 0x060, 0x064, 0x068, 24, 1, 31,
+ 0x1C0, 27),
+ /* CLK_CFG_7 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_F26M_SEL, "csw_f26m_sel",
+ f_26m_adc_parents, 0x070, 0x074, 0x078, 0, 1, 7,
+ 0x1C0, 28),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_AUD_L_SEL, "aud_l_sel", aud_l_parents,
+ 0x070, 0x074, 0x078, 8, 2, 15, 0x1C0, 29),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_A_TUNER_SEL, "a_tuner_sel",
+ a_tuner_parents, 0x070, 0x074, 0x078, 16, 2, 23,
+ 0x1C0, 30),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_U2U3_SEL, "u2u3_sel", f_26m_adc_parents,
+ 0x070, 0x074, 0x078, 24, 1, 31, 0x1C4, 0),
+ /* CLK_CFG_8 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_U2U3_SYS_SEL, "u2u3_sys_sel",
+ u2u3_sys_parents, 0x080, 0x084, 0x088, 0, 1, 7,
+ 0x1C4, 1),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_U2U3_XHCI_SEL, "u2u3_xhci_sel",
+ u2u3_sys_parents, 0x080, 0x084, 0x088, 8, 1, 15,
+ 0x1C4, 2),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_DA_U2_REFSEL, "da_u2_refsel",
+ da_u2_refsel_parents, 0x080, 0x084, 0x088, 16, 1,
+ 23, 0x1C4, 3),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_DA_U2_CK_1P_SEL, "da_u2_ck_1p_sel",
+ da_u2_refsel_parents, 0x080, 0x084, 0x088, 24, 1,
+ 31, 0x1C4, 4),
+ /* CLK_CFG_9 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_AP2CNN_HOST_SEL, "ap2cnn_host_sel",
+ sgm_reg_parents, 0x090, 0x094, 0x098, 0, 1, 7,
+ 0x1C4, 5),
+};
+
+static int clk_mt7986_topckgen_probe(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+ int r;
+ void __iomem *base;
+ int nr = ARRAY_SIZE(top_fixed_clks) + ARRAY_SIZE(top_divs) +
+ ARRAY_SIZE(top_muxes);
+
+ base = of_iomap(node, 0);
+ if (!base) {
+ pr_err("%s(): ioremap failed\n", __func__);
+ return -ENOMEM;
+ }
+
+ clk_data = mtk_alloc_clk_data(nr);
+ if (!clk_data)
+ return -ENOMEM;
+
+ mtk_clk_register_fixed_clks(top_fixed_clks, ARRAY_SIZE(top_fixed_clks),
+ clk_data);
+ mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs), clk_data);
+ mtk_clk_register_muxes(top_muxes, ARRAY_SIZE(top_muxes), node,
+ &mt7986_clk_lock, clk_data);
+
+ clk_prepare_enable(clk_data->clks[CLK_TOP_SYSAXI_SEL]);
+ clk_prepare_enable(clk_data->clks[CLK_TOP_SYSAPB_SEL]);
+ clk_prepare_enable(clk_data->clks[CLK_TOP_DRAMC_SEL]);
+ clk_prepare_enable(clk_data->clks[CLK_TOP_DRAMC_MD32_SEL]);
+ clk_prepare_enable(clk_data->clks[CLK_TOP_F26M_SEL]);
+ clk_prepare_enable(clk_data->clks[CLK_TOP_SGM_REG_SEL]);
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+
+ if (r) {
+ pr_err("%s(): could not register clock provider: %d\n",
+ __func__, r);
+ goto free_topckgen_data;
+ }
+ return r;
+
+free_topckgen_data:
+ mtk_free_clk_data(clk_data);
+ return r;
+}
+
+static const struct of_device_id of_match_clk_mt7986_topckgen[] = {
+ { .compatible = "mediatek,mt7986-topckgen", },
+ {}
+};
+
+static struct platform_driver clk_mt7986_topckgen_drv = {
+ .probe = clk_mt7986_topckgen_probe,
+ .driver = {
+ .name = "clk-mt7986-topckgen",
+ .of_match_table = of_match_clk_mt7986_topckgen,
+ },
+};
+builtin_platform_driver(clk_mt7986_topckgen_drv);
diff --git a/drivers/clk/meson/gxbb.c b/drivers/clk/meson/gxbb.c
index d6eed760327d..608e0e8ca49a 100644
--- a/drivers/clk/meson/gxbb.c
+++ b/drivers/clk/meson/gxbb.c
@@ -720,6 +720,35 @@ static struct clk_regmap gxbb_mpll0_div = {
.width = 14,
},
.sdm_en = {
+ .reg_off = HHI_MPLL_CNTL,
+ .shift = 25,
+ .width = 1,
+ },
+ .n2 = {
+ .reg_off = HHI_MPLL_CNTL7,
+ .shift = 16,
+ .width = 9,
+ },
+ .lock = &meson_clk_lock,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "mpll0_div",
+ .ops = &meson_clk_mpll_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &gxbb_mpll_prediv.hw
+ },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_regmap gxl_mpll0_div = {
+ .data = &(struct meson_clk_mpll_data){
+ .sdm = {
+ .reg_off = HHI_MPLL_CNTL7,
+ .shift = 0,
+ .width = 14,
+ },
+ .sdm_en = {
.reg_off = HHI_MPLL_CNTL7,
.shift = 15,
.width = 1,
@@ -749,7 +778,16 @@ static struct clk_regmap gxbb_mpll0 = {
.hw.init = &(struct clk_init_data){
.name = "mpll0",
.ops = &clk_regmap_gate_ops,
- .parent_hws = (const struct clk_hw *[]) { &gxbb_mpll0_div.hw },
+ .parent_data = &(const struct clk_parent_data) {
+ /*
+ * Note:
+ * GXL and GXBB have different SDM_EN registers. We
+ * fallback to the global naming string mechanism so
+ * mpll0_div picks up the appropriate one.
+ */
+ .name = "mpll0_div",
+ .index = -1,
+ },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
},
@@ -3044,7 +3082,7 @@ static struct clk_hw_onecell_data gxl_hw_onecell_data = {
[CLKID_VAPB_1] = &gxbb_vapb_1.hw,
[CLKID_VAPB_SEL] = &gxbb_vapb_sel.hw,
[CLKID_VAPB] = &gxbb_vapb.hw,
- [CLKID_MPLL0_DIV] = &gxbb_mpll0_div.hw,
+ [CLKID_MPLL0_DIV] = &gxl_mpll0_div.hw,
[CLKID_MPLL1_DIV] = &gxbb_mpll1_div.hw,
[CLKID_MPLL2_DIV] = &gxbb_mpll2_div.hw,
[CLKID_MPLL_PREDIV] = &gxbb_mpll_prediv.hw,
@@ -3439,7 +3477,7 @@ static struct clk_regmap *const gxl_clk_regmaps[] = {
&gxbb_mpll0,
&gxbb_mpll1,
&gxbb_mpll2,
- &gxbb_mpll0_div,
+ &gxl_mpll0_div,
&gxbb_mpll1_div,
&gxbb_mpll2_div,
&gxbb_cts_amclk_div,
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index 74efc82127e1..42c874194d1a 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -265,6 +265,14 @@ config MSM_MMCC_8974
Say Y if you want to support multimedia devices such as display,
graphics, video encode/decode, camera, etc.
+config MSM_GCC_8976
+ tristate "MSM8956/76 Global Clock Controller"
+ select QCOM_GDSC
+ help
+ Support for the global clock controller on msm8956/76 devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ i2c, USB, SD/eMMC, SATA, PCIe, etc.
+
config MSM_MMCC_8994
tristate "MSM8994 Multimedia Clock Controller"
select MSM_GCC_8994
@@ -564,6 +572,14 @@ config SM_CAMCC_8250
Support for the camera clock controller on SM8250 devices.
Say Y if you want to support camera devices and camera functionality.
+config SDX_GCC_65
+ tristate "SDX65 Global Clock Controller"
+ select QCOM_GDSC
+ help
+ Support for the global clock controller on SDX65 devices.
+ Say Y if you want to use peripheral devices such as UART,
+ SPI, I2C, USB, SD/UFS, PCIe etc.
+
config SM_DISPCC_8250
tristate "SM8150 and SM8250 Display Clock Controller"
depends on SM_GCC_8150 || SM_GCC_8250
@@ -618,6 +634,14 @@ config SM_GCC_8350
Say Y if you want to use peripheral devices such as UART,
SPI, I2C, USB, SD/UFS, PCIe etc.
+config SM_GCC_8450
+ tristate "SM8450 Global Clock Controller"
+ select QCOM_GDSC
+ help
+ Support for the global clock controller on SM8450 devices.
+ Say Y if you want to use peripheral devices such as UART,
+ SPI, I2C, USB, SD/UFS, PCIe etc.
+
config SM_GPUCC_8150
tristate "SM8150 Graphics Clock Controller"
select SM_GCC_8150
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index 1718c34d3551..0d98ca9be67f 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -36,6 +36,7 @@ obj-$(CONFIG_MSM_GCC_8939) += gcc-msm8939.o
obj-$(CONFIG_MSM_GCC_8953) += gcc-msm8953.o
obj-$(CONFIG_MSM_GCC_8960) += gcc-msm8960.o
obj-$(CONFIG_MSM_GCC_8974) += gcc-msm8974.o
+obj-$(CONFIG_MSM_GCC_8976) += gcc-msm8976.o
obj-$(CONFIG_MSM_GCC_8994) += gcc-msm8994.o
obj-$(CONFIG_MSM_GCC_8996) += gcc-msm8996.o
obj-$(CONFIG_MSM_LCC_8960) += lcc-msm8960.o
@@ -83,6 +84,7 @@ obj-$(CONFIG_SDM_LPASSCC_845) += lpasscc-sdm845.o
obj-$(CONFIG_SDM_VIDEOCC_845) += videocc-sdm845.o
obj-$(CONFIG_SDX_GCC_55) += gcc-sdx55.o
obj-$(CONFIG_SM_CAMCC_8250) += camcc-sm8250.o
+obj-$(CONFIG_SDX_GCC_65) += gcc-sdx65.o
obj-$(CONFIG_SM_DISPCC_8250) += dispcc-sm8250.o
obj-$(CONFIG_SM_GCC_6115) += gcc-sm6115.o
obj-$(CONFIG_SM_GCC_6125) += gcc-sm6125.o
@@ -90,6 +92,7 @@ obj-$(CONFIG_SM_GCC_6350) += gcc-sm6350.o
obj-$(CONFIG_SM_GCC_8150) += gcc-sm8150.o
obj-$(CONFIG_SM_GCC_8250) += gcc-sm8250.o
obj-$(CONFIG_SM_GCC_8350) += gcc-sm8350.o
+obj-$(CONFIG_SM_GCC_8450) += gcc-sm8450.o
obj-$(CONFIG_SM_GPUCC_8150) += gpucc-sm8150.o
obj-$(CONFIG_SM_GPUCC_8250) += gpucc-sm8250.o
obj-$(CONFIG_SM_VIDEOCC_8150) += videocc-sm8150.o
diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
index 8f65b9bdafce..4406cf609aae 100644
--- a/drivers/clk/qcom/clk-alpha-pll.c
+++ b/drivers/clk/qcom/clk-alpha-pll.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2015, 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/kernel.h>
@@ -139,6 +140,20 @@ const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
[PLL_OFF_OPMODE] = 0x28,
[PLL_OFF_STATUS] = 0x38,
},
+ [CLK_ALPHA_PLL_TYPE_LUCID_EVO] = {
+ [PLL_OFF_OPMODE] = 0x04,
+ [PLL_OFF_STATUS] = 0x0c,
+ [PLL_OFF_L_VAL] = 0x10,
+ [PLL_OFF_ALPHA_VAL] = 0x14,
+ [PLL_OFF_USER_CTL] = 0x18,
+ [PLL_OFF_USER_CTL_U] = 0x1c,
+ [PLL_OFF_CONFIG_CTL] = 0x20,
+ [PLL_OFF_CONFIG_CTL_U] = 0x24,
+ [PLL_OFF_CONFIG_CTL_U1] = 0x28,
+ [PLL_OFF_TEST_CTL] = 0x2c,
+ [PLL_OFF_TEST_CTL_U] = 0x30,
+ [PLL_OFF_TEST_CTL_U1] = 0x34,
+ },
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_regs);
@@ -175,6 +190,10 @@ EXPORT_SYMBOL_GPL(clk_alpha_pll_regs);
#define LUCID_5LPE_PLL_LATCH_INPUT BIT(14)
#define LUCID_5LPE_ENABLE_VOTE_RUN BIT(21)
+/* LUCID EVO PLL specific settings and offsets */
+#define LUCID_EVO_ENABLE_VOTE_RUN BIT(25)
+#define LUCID_EVO_PLL_L_VAL_MASK GENMASK(15, 0)
+
/* ZONDA PLL specific */
#define ZONDA_PLL_OUT_MASK 0xf
#define ZONDA_STAY_IN_CFA BIT(16)
@@ -204,7 +223,7 @@ static int wait_for_pll(struct clk_alpha_pll *pll, u32 mask, bool inverse,
if (ret)
return ret;
- for (count = 100; count > 0; count--) {
+ for (count = 200; count > 0; count--) {
ret = regmap_read(pll->clkr.regmap, PLL_MODE(pll), &val);
if (ret)
return ret;
@@ -1750,24 +1769,32 @@ static int alpha_pll_lucid_5lpe_set_rate(struct clk_hw *hw, unsigned long rate,
LUCID_5LPE_ALPHA_PLL_ACK_LATCH);
}
-static int clk_lucid_5lpe_pll_postdiv_set_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long parent_rate)
+static int __clk_lucid_pll_postdiv_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate,
+ unsigned long enable_vote_run)
{
struct clk_alpha_pll_postdiv *pll = to_clk_alpha_pll_postdiv(hw);
- int i, val = 0, div, ret;
+ struct regmap *regmap = pll->clkr.regmap;
+ int i, val, div, ret;
u32 mask;
/*
* If the PLL is in FSM mode, then treat set_rate callback as a
* no-operation.
*/
- ret = regmap_read(pll->clkr.regmap, PLL_USER_CTL(pll), &val);
+ ret = regmap_read(regmap, PLL_USER_CTL(pll), &val);
if (ret)
return ret;
- if (val & LUCID_5LPE_ENABLE_VOTE_RUN)
+ if (val & enable_vote_run)
return 0;
+ if (!pll->post_div_table) {
+ pr_err("Missing the post_div_table for the %s PLL\n",
+ clk_hw_get_name(&pll->clkr.hw));
+ return -EINVAL;
+ }
+
div = DIV_ROUND_UP_ULL((u64)parent_rate, rate);
for (i = 0; i < pll->num_post_div; i++) {
if (pll->post_div_table[i].div == div) {
@@ -1781,6 +1808,12 @@ static int clk_lucid_5lpe_pll_postdiv_set_rate(struct clk_hw *hw, unsigned long
mask, val << pll->post_div_shift);
}
+static int clk_lucid_5lpe_pll_postdiv_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ return __clk_lucid_pll_postdiv_set_rate(hw, rate, parent_rate, LUCID_5LPE_ENABLE_VOTE_RUN);
+}
+
const struct clk_ops clk_alpha_pll_lucid_5lpe_ops = {
.prepare = alpha_pll_lucid_5lpe_prepare,
.enable = alpha_pll_lucid_5lpe_enable,
@@ -1960,3 +1993,124 @@ const struct clk_ops clk_alpha_pll_zonda_ops = {
.set_rate = clk_zonda_pll_set_rate,
};
EXPORT_SYMBOL(clk_alpha_pll_zonda_ops);
+
+static int alpha_pll_lucid_evo_enable(struct clk_hw *hw)
+{
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ struct regmap *regmap = pll->clkr.regmap;
+ u32 val;
+ int ret;
+
+ ret = regmap_read(regmap, PLL_USER_CTL(pll), &val);
+ if (ret)
+ return ret;
+
+ /* If in FSM mode, just vote for it */
+ if (val & LUCID_EVO_ENABLE_VOTE_RUN) {
+ ret = clk_enable_regmap(hw);
+ if (ret)
+ return ret;
+ return wait_for_pll_enable_lock(pll);
+ }
+
+ /* Check if PLL is already enabled */
+ ret = trion_pll_is_enabled(pll, regmap);
+ if (ret < 0) {
+ return ret;
+ } else if (ret) {
+ pr_warn("%s PLL is already enabled\n", clk_hw_get_name(&pll->clkr.hw));
+ return 0;
+ }
+
+ ret = regmap_update_bits(regmap, PLL_MODE(pll), PLL_RESET_N, PLL_RESET_N);
+ if (ret)
+ return ret;
+
+ /* Set operation mode to RUN */
+ regmap_write(regmap, PLL_OPMODE(pll), PLL_RUN);
+
+ ret = wait_for_pll_enable_lock(pll);
+ if (ret)
+ return ret;
+
+ /* Enable the PLL outputs */
+ ret = regmap_update_bits(regmap, PLL_USER_CTL(pll), PLL_OUT_MASK, PLL_OUT_MASK);
+ if (ret)
+ return ret;
+
+ /* Enable the global PLL outputs */
+ ret = regmap_update_bits(regmap, PLL_MODE(pll), PLL_OUTCTRL, PLL_OUTCTRL);
+ if (ret)
+ return ret;
+
+ /* Ensure that the write above goes through before returning. */
+ mb();
+ return ret;
+}
+
+static void alpha_pll_lucid_evo_disable(struct clk_hw *hw)
+{
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ struct regmap *regmap = pll->clkr.regmap;
+ u32 val;
+ int ret;
+
+ ret = regmap_read(regmap, PLL_USER_CTL(pll), &val);
+ if (ret)
+ return;
+
+ /* If in FSM mode, just unvote it */
+ if (val & LUCID_EVO_ENABLE_VOTE_RUN) {
+ clk_disable_regmap(hw);
+ return;
+ }
+
+ /* Disable the global PLL output */
+ ret = regmap_update_bits(regmap, PLL_MODE(pll), PLL_OUTCTRL, 0);
+ if (ret)
+ return;
+
+ /* Disable the PLL outputs */
+ ret = regmap_update_bits(regmap, PLL_USER_CTL(pll), PLL_OUT_MASK, 0);
+ if (ret)
+ return;
+
+ /* Place the PLL mode in STANDBY */
+ regmap_write(regmap, PLL_OPMODE(pll), PLL_STANDBY);
+}
+
+static unsigned long alpha_pll_lucid_evo_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ struct regmap *regmap = pll->clkr.regmap;
+ u32 l, frac;
+
+ regmap_read(regmap, PLL_L_VAL(pll), &l);
+ l &= LUCID_EVO_PLL_L_VAL_MASK;
+ regmap_read(regmap, PLL_ALPHA_VAL(pll), &frac);
+
+ return alpha_pll_calc_rate(parent_rate, l, frac, pll_alpha_width(pll));
+}
+
+static int clk_lucid_evo_pll_postdiv_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ return __clk_lucid_pll_postdiv_set_rate(hw, rate, parent_rate, LUCID_EVO_ENABLE_VOTE_RUN);
+}
+
+const struct clk_ops clk_alpha_pll_fixed_lucid_evo_ops = {
+ .enable = alpha_pll_lucid_evo_enable,
+ .disable = alpha_pll_lucid_evo_disable,
+ .is_enabled = clk_trion_pll_is_enabled,
+ .recalc_rate = alpha_pll_lucid_evo_recalc_rate,
+ .round_rate = clk_alpha_pll_round_rate,
+};
+EXPORT_SYMBOL_GPL(clk_alpha_pll_fixed_lucid_evo_ops);
+
+const struct clk_ops clk_alpha_pll_postdiv_lucid_evo_ops = {
+ .recalc_rate = clk_alpha_pll_postdiv_fabia_recalc_rate,
+ .round_rate = clk_alpha_pll_postdiv_fabia_round_rate,
+ .set_rate = clk_lucid_evo_pll_postdiv_set_rate,
+};
+EXPORT_SYMBOL_GPL(clk_alpha_pll_postdiv_lucid_evo_ops);
diff --git a/drivers/clk/qcom/clk-alpha-pll.h b/drivers/clk/qcom/clk-alpha-pll.h
index 55e4fa47912f..6e9907deaf30 100644
--- a/drivers/clk/qcom/clk-alpha-pll.h
+++ b/drivers/clk/qcom/clk-alpha-pll.h
@@ -17,6 +17,7 @@ enum {
CLK_ALPHA_PLL_TYPE_LUCID = CLK_ALPHA_PLL_TYPE_TRION,
CLK_ALPHA_PLL_TYPE_AGERA,
CLK_ALPHA_PLL_TYPE_ZONDA,
+ CLK_ALPHA_PLL_TYPE_LUCID_EVO,
CLK_ALPHA_PLL_TYPE_MAX,
};
@@ -151,6 +152,8 @@ extern const struct clk_ops clk_alpha_pll_postdiv_lucid_5lpe_ops;
extern const struct clk_ops clk_alpha_pll_zonda_ops;
#define clk_alpha_pll_postdiv_zonda_ops clk_alpha_pll_postdiv_fabia_ops
+extern const struct clk_ops clk_alpha_pll_fixed_lucid_evo_ops;
+extern const struct clk_ops clk_alpha_pll_postdiv_lucid_evo_ops;
void clk_alpha_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
const struct alpha_pll_config *config);
diff --git a/drivers/clk/qcom/clk-rpmh.c b/drivers/clk/qcom/clk-rpmh.c
index 441d7a20e6f3..74e57c84f60a 100644
--- a/drivers/clk/qcom/clk-rpmh.c
+++ b/drivers/clk/qcom/clk-rpmh.c
@@ -515,6 +515,32 @@ static const struct clk_rpmh_desc clk_rpmh_sm8350 = {
/* Resource name must match resource id present in cmd-db */
DEFINE_CLK_RPMH_ARC(sc7280, bi_tcxo, bi_tcxo_ao, "xo.lvl", 0x3, 4);
+DEFINE_CLK_RPMH_VRM(sm8450, ln_bb_clk1, ln_bb_clk1_ao, "lnbclka1", 4);
+DEFINE_CLK_RPMH_VRM(sm8450, ln_bb_clk2, ln_bb_clk2_ao, "lnbclka2", 4);
+
+static struct clk_hw *sm8450_rpmh_clocks[] = {
+ [RPMH_CXO_CLK] = &sc7280_bi_tcxo.hw,
+ [RPMH_CXO_CLK_A] = &sc7280_bi_tcxo_ao.hw,
+ [RPMH_LN_BB_CLK1] = &sm8450_ln_bb_clk1.hw,
+ [RPMH_LN_BB_CLK1_A] = &sm8450_ln_bb_clk1_ao.hw,
+ [RPMH_LN_BB_CLK2] = &sm8450_ln_bb_clk2.hw,
+ [RPMH_LN_BB_CLK2_A] = &sm8450_ln_bb_clk2_ao.hw,
+ [RPMH_RF_CLK1] = &sdm845_rf_clk1.hw,
+ [RPMH_RF_CLK1_A] = &sdm845_rf_clk1_ao.hw,
+ [RPMH_RF_CLK2] = &sdm845_rf_clk2.hw,
+ [RPMH_RF_CLK2_A] = &sdm845_rf_clk2_ao.hw,
+ [RPMH_RF_CLK3] = &sdm845_rf_clk3.hw,
+ [RPMH_RF_CLK3_A] = &sdm845_rf_clk3_ao.hw,
+ [RPMH_RF_CLK4] = &sm8350_rf_clk4.hw,
+ [RPMH_RF_CLK4_A] = &sm8350_rf_clk4_ao.hw,
+ [RPMH_IPA_CLK] = &sdm845_ipa.hw,
+};
+
+static const struct clk_rpmh_desc clk_rpmh_sm8450 = {
+ .clks = sm8450_rpmh_clocks,
+ .num_clks = ARRAY_SIZE(sm8450_rpmh_clocks),
+};
+
static struct clk_hw *sc7280_rpmh_clocks[] = {
[RPMH_CXO_CLK] = &sc7280_bi_tcxo.hw,
[RPMH_CXO_CLK_A] = &sc7280_bi_tcxo_ao.hw,
@@ -556,6 +582,30 @@ static const struct clk_rpmh_desc clk_rpmh_sm6350 = {
.num_clks = ARRAY_SIZE(sm6350_rpmh_clocks),
};
+DEFINE_CLK_RPMH_VRM(sdx65, ln_bb_clk1, ln_bb_clk1_ao, "lnbclka1", 4);
+
+static struct clk_hw *sdx65_rpmh_clocks[] = {
+ [RPMH_CXO_CLK] = &sc7280_bi_tcxo.hw,
+ [RPMH_CXO_CLK_A] = &sc7280_bi_tcxo_ao.hw,
+ [RPMH_LN_BB_CLK1] = &sdx65_ln_bb_clk1.hw,
+ [RPMH_LN_BB_CLK1_A] = &sdx65_ln_bb_clk1_ao.hw,
+ [RPMH_RF_CLK1] = &sdm845_rf_clk1.hw,
+ [RPMH_RF_CLK1_A] = &sdm845_rf_clk1_ao.hw,
+ [RPMH_RF_CLK2] = &sdm845_rf_clk2.hw,
+ [RPMH_RF_CLK2_A] = &sdm845_rf_clk2_ao.hw,
+ [RPMH_RF_CLK3] = &sdm845_rf_clk3.hw,
+ [RPMH_RF_CLK3_A] = &sdm845_rf_clk3_ao.hw,
+ [RPMH_RF_CLK4] = &sm8350_rf_clk4.hw,
+ [RPMH_RF_CLK4_A] = &sm8350_rf_clk4_ao.hw,
+ [RPMH_IPA_CLK] = &sdm845_ipa.hw,
+ [RPMH_QPIC_CLK] = &sdx55_qpic_clk.hw,
+};
+
+static const struct clk_rpmh_desc clk_rpmh_sdx65 = {
+ .clks = sdx65_rpmh_clocks,
+ .num_clks = ARRAY_SIZE(sdx65_rpmh_clocks),
+};
+
static struct clk_hw *of_clk_rpmh_hw_get(struct of_phandle_args *clkspec,
void *data)
{
@@ -643,10 +693,12 @@ static const struct of_device_id clk_rpmh_match_table[] = {
{ .compatible = "qcom,sc8180x-rpmh-clk", .data = &clk_rpmh_sc8180x},
{ .compatible = "qcom,sdm845-rpmh-clk", .data = &clk_rpmh_sdm845},
{ .compatible = "qcom,sdx55-rpmh-clk", .data = &clk_rpmh_sdx55},
+ { .compatible = "qcom,sdx65-rpmh-clk", .data = &clk_rpmh_sdx65},
{ .compatible = "qcom,sm6350-rpmh-clk", .data = &clk_rpmh_sm6350},
{ .compatible = "qcom,sm8150-rpmh-clk", .data = &clk_rpmh_sm8150},
{ .compatible = "qcom,sm8250-rpmh-clk", .data = &clk_rpmh_sm8250},
{ .compatible = "qcom,sm8350-rpmh-clk", .data = &clk_rpmh_sm8350},
+ { .compatible = "qcom,sm8450-rpmh-clk", .data = &clk_rpmh_sm8450},
{ .compatible = "qcom,sc7280-rpmh-clk", .data = &clk_rpmh_sc7280},
{ }
};
diff --git a/drivers/clk/qcom/clk-smd-rpm.c b/drivers/clk/qcom/clk-smd-rpm.c
index 5776d85a1e5c..ea28e45ca371 100644
--- a/drivers/clk/qcom/clk-smd-rpm.c
+++ b/drivers/clk/qcom/clk-smd-rpm.c
@@ -17,7 +17,6 @@
#include <linux/soc/qcom/smd-rpm.h>
#include <dt-bindings/clock/qcom,rpmcc.h>
-#include <dt-bindings/mfd/qcom-rpm.h>
#define QCOM_RPM_KEY_SOFTWARE_ENABLE 0x6e657773
#define QCOM_RPM_KEY_PIN_CTRL_CLK_BUFFER_ENABLE_KEY 0x62636370
@@ -151,12 +150,6 @@ struct clk_smd_rpm_req {
__le32 value;
};
-struct rpm_cc {
- struct qcom_rpm *rpm;
- struct clk_smd_rpm **clks;
- size_t num_clks;
-};
-
struct rpm_smd_clk_desc {
struct clk_smd_rpm **clks;
size_t num_clks;
@@ -196,10 +189,6 @@ static int clk_smd_rpm_set_rate_active(struct clk_smd_rpm *r,
.value = cpu_to_le32(DIV_ROUND_UP(rate, 1000)), /* to kHz */
};
- /* Buffered clock needs a binary value */
- if (r->rpm_res_type == QCOM_SMD_RPM_CLK_BUF_A)
- req.value = cpu_to_le32(!!req.value);
-
return qcom_rpm_smd_write(r->rpm, QCOM_SMD_RPM_ACTIVE_STATE,
r->rpm_res_type, r->rpm_clk_id, &req,
sizeof(req));
@@ -214,10 +203,6 @@ static int clk_smd_rpm_set_rate_sleep(struct clk_smd_rpm *r,
.value = cpu_to_le32(DIV_ROUND_UP(rate, 1000)), /* to kHz */
};
- /* Buffered clock needs a binary value */
- if (r->rpm_res_type == QCOM_SMD_RPM_CLK_BUF_A)
- req.value = cpu_to_le32(!!req.value);
-
return qcom_rpm_smd_write(r->rpm, QCOM_SMD_RPM_SLEEP_STATE,
r->rpm_res_type, r->rpm_clk_id, &req,
sizeof(req));
@@ -1159,20 +1144,19 @@ MODULE_DEVICE_TABLE(of, rpm_smd_clk_match_table);
static struct clk_hw *qcom_smdrpm_clk_hw_get(struct of_phandle_args *clkspec,
void *data)
{
- struct rpm_cc *rcc = data;
+ const struct rpm_smd_clk_desc *desc = data;
unsigned int idx = clkspec->args[0];
- if (idx >= rcc->num_clks) {
+ if (idx >= desc->num_clks) {
pr_err("%s: invalid index %u\n", __func__, idx);
return ERR_PTR(-EINVAL);
}
- return rcc->clks[idx] ? &rcc->clks[idx]->hw : ERR_PTR(-ENOENT);
+ return desc->clks[idx] ? &desc->clks[idx]->hw : ERR_PTR(-ENOENT);
}
static int rpm_smd_clk_probe(struct platform_device *pdev)
{
- struct rpm_cc *rcc;
int ret;
size_t num_clks, i;
struct qcom_smd_rpm *rpm;
@@ -1192,13 +1176,6 @@ static int rpm_smd_clk_probe(struct platform_device *pdev)
rpm_smd_clks = desc->clks;
num_clks = desc->num_clks;
- rcc = devm_kzalloc(&pdev->dev, sizeof(*rcc), GFP_KERNEL);
- if (!rcc)
- return -ENOMEM;
-
- rcc->clks = rpm_smd_clks;
- rcc->num_clks = num_clks;
-
for (i = 0; i < num_clks; i++) {
if (!rpm_smd_clks[i])
continue;
@@ -1224,7 +1201,7 @@ static int rpm_smd_clk_probe(struct platform_device *pdev)
}
ret = devm_of_clk_add_hw_provider(&pdev->dev, qcom_smdrpm_clk_hw_get,
- rcc);
+ (void *)desc);
if (ret)
goto err;
diff --git a/drivers/clk/qcom/gcc-msm8976.c b/drivers/clk/qcom/gcc-msm8976.c
new file mode 100644
index 000000000000..a8b15814933e
--- /dev/null
+++ b/drivers/clk/qcom/gcc-msm8976.c
@@ -0,0 +1,4155 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Qualcomm Global Clock Controller driver for MSM8956/76
+ *
+ * Copyright (c) 2016-2021, AngeloGioacchino Del Regno
+ * <angelogioacchino.delregno@somainline.org>
+ *
+ * Driver cleanup and modernization
+ * Copyright (c) 2021, Konrad Dybcio <konrad.dybcio@somainline.org>
+ * Marijn Suijten <marijn.suijten@somainline.org>
+ *
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,gcc-msm8976.h>
+
+#include "clk-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ P_GPLL0_OUT_MAIN,
+ P_GPLL0_AUX,
+ P_GPLL0_OUT,
+ P_GPLL0_OUT_M,
+ P_GPLL0_OUT_MDP,
+ P_GPLL2_AUX,
+ P_GPLL2_OUT,
+ P_GPLL4_OUT_MAIN,
+ P_GPLL4_AUX,
+ P_GPLL4_OUT,
+ P_GPLL4_GFX3D,
+ P_GPLL6_OUT_MAIN,
+ P_GPLL6_AUX,
+ P_GPLL6_OUT,
+ P_GPLL6_GFX3D,
+ P_DSI0PLL,
+ P_DSI1PLL,
+ P_DSI0PLL_BYTE,
+ P_DSI1PLL_BYTE,
+ P_XO_A,
+ P_XO,
+};
+
+static struct clk_pll gpll0 = {
+ .l_reg = 0x21004,
+ .m_reg = 0x21008,
+ .n_reg = 0x2100c,
+ .config_reg = 0x21014,
+ .mode_reg = 0x21000,
+ .status_reg = 0x2101c,
+ .status_bit = 17,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll0",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "xo",
+ },
+ .num_parents = 1,
+ .ops = &clk_pll_ops,
+ },
+};
+
+static struct clk_regmap gpll0_vote = {
+ .enable_reg = 0x45000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll0_vote",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ /* This clock is required for other ones to function. */
+ .flags = CLK_IS_CRITICAL,
+ .ops = &clk_pll_vote_ops,
+ },
+};
+
+static struct clk_pll gpll2 = {
+ .l_reg = 0x4a004,
+ .m_reg = 0x4a008,
+ .n_reg = 0x4a00c,
+ .config_reg = 0x4a014,
+ .mode_reg = 0x4a000,
+ .status_reg = 0x4a01c,
+ .status_bit = 17,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll2",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "xo",
+ },
+ .num_parents = 1,
+ .ops = &clk_pll_ops,
+ },
+};
+
+static struct clk_regmap gpll2_vote = {
+ .enable_reg = 0x45000,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll2_vote",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gpll2.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_pll_vote_ops,
+ },
+};
+
+static const struct pll_freq_tbl gpll3_freq_tbl[] = {
+ { 1100000000, 57, 7, 24, 0 },
+ { }
+};
+
+static struct clk_pll gpll3 = {
+ .l_reg = 0x22004,
+ .m_reg = 0x22008,
+ .n_reg = 0x2200c,
+ .config_reg = 0x22010,
+ .mode_reg = 0x22000,
+ .status_reg = 0x22024,
+ .status_bit = 17,
+ .freq_tbl = gpll3_freq_tbl,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "gpll3",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "xo",
+ },
+ .num_parents = 1,
+ .ops = &clk_pll_ops,
+ },
+};
+
+static struct clk_regmap gpll3_vote = {
+ .enable_reg = 0x45000,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll3_vote",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gpll3.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_pll_vote_ops,
+ },
+};
+
+/* GPLL3 at 1100MHz, main output enabled. */
+static const struct pll_config gpll3_config = {
+ .l = 57,
+ .m = 7,
+ .n = 24,
+ .vco_val = 0x0,
+ .vco_mask = 0x3 << 20,
+ .pre_div_val = 0x0,
+ .pre_div_mask = 0x7 << 12,
+ .post_div_val = 0x0,
+ .post_div_mask = 0x3 << 8,
+ .mn_ena_mask = BIT(24),
+ .main_output_mask = BIT(0),
+ .aux_output_mask = BIT(1),
+};
+
+static struct clk_pll gpll4 = {
+ .l_reg = 0x24004,
+ .m_reg = 0x24008,
+ .n_reg = 0x2400c,
+ .config_reg = 0x24018,
+ .mode_reg = 0x24000,
+ .status_reg = 0x24024,
+ .status_bit = 17,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll4",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "xo",
+ },
+ .num_parents = 1,
+ .ops = &clk_pll_ops,
+ },
+};
+
+static struct clk_regmap gpll4_vote = {
+ .enable_reg = 0x45000,
+ .enable_mask = BIT(5),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll4_vote",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gpll4.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_pll_vote_ops,
+ },
+};
+
+static struct clk_pll gpll6 = {
+ .mode_reg = 0x37000,
+ .l_reg = 0x37004,
+ .m_reg = 0x37008,
+ .n_reg = 0x3700c,
+ .config_reg = 0x37014,
+ .status_reg = 0x3701c,
+ .status_bit = 17,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll6",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "xo",
+ },
+ .num_parents = 1,
+ .ops = &clk_pll_ops,
+ },
+};
+
+static struct clk_regmap gpll6_vote = {
+ .enable_reg = 0x45000,
+ .enable_mask = BIT(7),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll6_vote",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gpll6.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_pll_vote_ops,
+ },
+};
+
+static const struct parent_map gcc_parent_map_1[] = {
+ { P_XO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL4_OUT, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_1[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll0_vote.hw },
+ { .hw = &gpll4_vote.hw },
+};
+
+static const struct parent_map gcc_parent_map_v1_1[] = {
+ { P_XO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL2_OUT, 4 },
+};
+
+static const struct clk_parent_data gcc_parent_data_v1_1[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll0_vote.hw },
+ { .hw = &gpll2_vote.hw },
+};
+
+static const struct parent_map gcc_parent_map_2[] = {
+ { P_XO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL2_AUX, 3 },
+ { P_GPLL4_OUT, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_2[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll0_vote.hw },
+ { .hw = &gpll2_vote.hw },
+ { .hw = &gpll4_vote.hw },
+};
+
+static const struct parent_map gcc_parent_map_3[] = {
+ { P_XO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL2_AUX, 3 },
+ { P_GPLL6_AUX, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_3[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll0_vote.hw },
+ { .hw = &gpll2_vote.hw },
+ { .hw = &gpll6_vote.hw },
+};
+
+static const struct parent_map gcc_parent_map_4[] = {
+ { P_XO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+};
+
+static const struct parent_map gcc_parent_map_4_fs[] = {
+ { P_XO, 0 },
+ { P_GPLL0_OUT, 2 },
+};
+
+static const struct parent_map gcc_parent_map_5[] = {
+ { P_XO, 0 },
+ { P_GPLL4_OUT, 2 },
+ { P_GPLL6_OUT_MAIN, 1 },
+};
+
+static const struct clk_parent_data gcc_parent_data_5[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll4_vote.hw },
+ { .hw = &gpll6_vote.hw },
+};
+
+static const struct parent_map gcc_parent_map_6[] = {
+ { P_XO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL4_OUT_MAIN, 5 },
+};
+
+static const struct clk_parent_data gcc_parent_data_6[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll0_vote.hw },
+ { .hw = &gpll4_vote.hw },
+};
+
+static const struct parent_map gcc_parent_map_7_mdp[] = {
+ { P_XO, 0 },
+ { P_GPLL6_OUT, 3 },
+ { P_GPLL0_OUT_MDP, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_7_mdp[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll6_vote.hw },
+ { .hw = &gpll0_vote.hw },
+};
+
+static const struct parent_map gcc_parent_map_7[] = {
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL6_OUT, 3 },
+};
+
+static const struct clk_parent_data gcc_parent_data_7[] = {
+ { .hw = &gpll0_vote.hw },
+ { .hw = &gpll6_vote.hw },
+};
+
+static const struct parent_map gcc_parent_map_8[] = {
+ { P_XO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+};
+
+static const struct clk_parent_data gcc_parent_data_4_8[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll0_vote.hw },
+};
+
+static const struct parent_map gcc_parent_map_8_a[] = {
+ { P_XO_A, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+};
+
+static const struct clk_parent_data gcc_parent_data_8_a[] = {
+ { .fw_name = "xo_a" },
+ { .hw = &gpll0_vote.hw },
+};
+
+static const struct parent_map gcc_parent_map_8_gp[] = {
+ { P_GPLL0_OUT_MAIN, 1 },
+};
+
+static const struct clk_parent_data gcc_parent_data_8_gp[] = {
+ { .hw = &gpll0_vote.hw },
+};
+
+static const struct parent_map gcc_parent_map_9[] = {
+ { P_XO, 0 },
+ { P_GPLL6_OUT_MAIN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_9[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll6_vote.hw },
+};
+
+static const struct parent_map gcc_parent_map_10[] = {
+ { P_XO, 0 },
+};
+
+static const struct clk_parent_data gcc_parent_data_10[] = {
+ { .fw_name = "xo" },
+};
+
+static const struct parent_map gcc_parent_map_sdcc_ice[] = {
+ { P_XO, 0 },
+ { P_GPLL0_OUT_M, 3 },
+};
+
+static const struct parent_map gcc_parent_map_cci[] = {
+ { P_XO, 0 },
+ { P_GPLL0_AUX, 2 },
+};
+
+static const struct parent_map gcc_parent_map_cpp[] = {
+ { P_XO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL4_AUX, 3 },
+};
+
+static const struct parent_map gcc_parent_map_mdss_pix0[] = {
+ { P_XO, 0 },
+ { P_DSI0PLL, 1 },
+};
+
+static const struct clk_parent_data gcc_parent_data_mdss_pix0[] = {
+ { .fw_name = "xo" },
+ { .fw_name = "dsi0pll" },
+};
+
+static const struct parent_map gcc_parent_map_mdss_pix1[] = {
+ { P_XO, 0 },
+ { P_DSI0PLL, 3 },
+ { P_DSI1PLL, 1 },
+};
+
+static const struct clk_parent_data gcc_parent_data_mdss_pix1[] = {
+ { .fw_name = "xo" },
+ { .fw_name = "dsi0pll" },
+ { .fw_name = "dsi1pll" },
+};
+
+static const struct parent_map gcc_parent_map_mdss_byte0[] = {
+ { P_XO, 0 },
+ { P_DSI0PLL_BYTE, 1 },
+};
+
+static const struct clk_parent_data gcc_parent_data_mdss_byte0[] = {
+ { .fw_name = "xo" },
+ { .fw_name = "dsi0pllbyte" },
+};
+
+static const struct parent_map gcc_parent_map_mdss_byte1[] = {
+ { P_XO, 0 },
+ { P_DSI0PLL_BYTE, 3 },
+ { P_DSI1PLL_BYTE, 1 },
+};
+
+static const struct clk_parent_data gcc_parent_data_mdss_byte1[] = {
+ { .fw_name = "xo" },
+ { .fw_name = "dsi0pllbyte" },
+ { .fw_name = "dsi1pllbyte" },
+};
+
+static const struct parent_map gcc_parent_map_gfx3d[] = {
+ { P_XO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL4_GFX3D, 5 },
+ { P_GPLL6_GFX3D, 3 },
+};
+
+static const struct clk_parent_data gcc_parent_data_gfx3d[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll0_vote.hw },
+ { .hw = &gpll4_vote.hw },
+ { .hw = &gpll6_vote.hw },
+};
+
+static const struct freq_tbl ftbl_aps_0_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(300000000, P_GPLL4_OUT, 4, 0, 0),
+ F(540000000, P_GPLL6_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 aps_0_clk_src = {
+ .cmd_rcgr = 0x78008,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_5,
+ .freq_tbl = ftbl_aps_0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "aps_0_clk_src",
+ .parent_data = gcc_parent_data_5,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_5),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_aps_1_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(300000000, P_GPLL4_OUT, 4, 0, 0),
+ F(540000000, P_GPLL6_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 aps_1_clk_src = {
+ .cmd_rcgr = 0x79008,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_5,
+ .freq_tbl = ftbl_aps_1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "aps_1_clk_src",
+ .parent_data = gcc_parent_data_5,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_5),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_apss_ahb_clk_src[] = {
+ F(19200000, P_XO_A, 1, 0, 0),
+ F(50000000, P_GPLL0_OUT_MAIN, 16, 0, 0),
+ F(88890000, P_GPLL0_OUT_MAIN, 9, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 8, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 apss_ahb_clk_src = {
+ .cmd_rcgr = 0x46000,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8_a,
+ .freq_tbl = ftbl_apss_ahb_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "apss_ahb_clk_src",
+ .parent_data = gcc_parent_data_8_a,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_8_a),
+ .ops = &clk_rcg2_ops,
+ /*
+ * This clock allows the CPUs to communicate with
+ * the rest of the SoC. Without it, the brain will
+ * operate without the rest of the body.
+ */
+ .flags = CLK_IS_CRITICAL,
+ },
+};
+
+static const struct freq_tbl ftbl_blsp_i2c_apps_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(50000000, P_GPLL0_OUT_MAIN, 16, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 blsp1_qup1_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x200c,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup1_i2c_apps_clk_src",
+ .parent_data = gcc_parent_data_4_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4_8),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_blsp_spi_apps_clk_src[] = {
+ F(960000, P_XO, 10, 1, 2),
+ F(4800000, P_XO, 4, 0, 0),
+ F(9600000, P_XO, 2, 0, 0),
+ F(16000000, P_GPLL0_OUT_MAIN, 10, 1, 5),
+ F(19200000, P_XO, 1, 0, 0),
+ F(25000000, P_GPLL0_OUT_MAIN, 16, 1, 2),
+ F(50000000, P_GPLL0_OUT_MAIN, 16, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 blsp1_qup1_spi_apps_clk_src = {
+ .cmd_rcgr = 0x2024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_blsp_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup1_spi_apps_clk_src",
+ .parent_data = gcc_parent_data_4_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4_8),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup2_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x3000,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup2_i2c_apps_clk_src",
+ .parent_data = gcc_parent_data_4_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4_8),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup2_spi_apps_clk_src = {
+ .cmd_rcgr = 0x3014,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_blsp_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup2_spi_apps_clk_src",
+ .parent_data = gcc_parent_data_4_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4_8),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup3_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x4000,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup3_i2c_apps_clk_src",
+ .parent_data = gcc_parent_data_4_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4_8),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup3_spi_apps_clk_src = {
+ .cmd_rcgr = 0x4024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_blsp_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup3_spi_apps_clk_src",
+ .parent_data = gcc_parent_data_4_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4_8),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup4_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x5000,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup4_i2c_apps_clk_src",
+ .parent_data = gcc_parent_data_4_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4_8),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_qup4_spi_apps_clk_src = {
+ .cmd_rcgr = 0x5024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_blsp_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_qup4_spi_apps_clk_src",
+ .parent_data = gcc_parent_data_4_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4_8),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_blsp_uart_apps_clk_src[] = {
+ F(3686400, P_GPLL0_OUT_MAIN, 1, 72, 15625),
+ F(7372800, P_GPLL0_OUT_MAIN, 1, 144, 15625),
+ F(14745600, P_GPLL0_OUT_MAIN, 1, 288, 15625),
+ F(16000000, P_GPLL0_OUT_MAIN, 10, 1, 5),
+ F(19200000, P_XO, 1, 0, 0),
+ F(24000000, P_GPLL0_OUT_MAIN, 1, 3, 100),
+ F(25000000, P_GPLL0_OUT_MAIN, 16, 1, 2),
+ F(32000000, P_GPLL0_OUT_MAIN, 1, 1, 25),
+ F(40000000, P_GPLL0_OUT_MAIN, 1, 1, 20),
+ F(46400000, P_GPLL0_OUT_MAIN, 1, 29, 500),
+ F(48000000, P_GPLL0_OUT_MAIN, 1, 3, 50),
+ F(51200000, P_GPLL0_OUT_MAIN, 1, 8, 125),
+ F(56000000, P_GPLL0_OUT_MAIN, 1, 7, 100),
+ F(58982400, P_GPLL0_OUT_MAIN, 1, 1152, 15625),
+ F(60000000, P_GPLL0_OUT_MAIN, 1, 3, 40),
+ F(64000000, P_GPLL0_OUT_MAIN, 1, 2, 25),
+ { }
+};
+
+static struct clk_rcg2 blsp1_uart1_apps_clk_src = {
+ .cmd_rcgr = 0x2044,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_blsp_uart_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_uart1_apps_clk_src",
+ .parent_data = gcc_parent_data_4_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4_8),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp1_uart2_apps_clk_src = {
+ .cmd_rcgr = 0x3034,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_blsp_uart_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp1_uart2_apps_clk_src",
+ .parent_data = gcc_parent_data_4_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4_8),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup1_i2c_apps_clk_src = {
+ .cmd_rcgr = 0xc00c,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup1_i2c_apps_clk_src",
+ .parent_data = gcc_parent_data_4_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4_8),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup1_spi_apps_clk_src = {
+ .cmd_rcgr = 0xc024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_blsp_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup1_spi_apps_clk_src",
+ .parent_data = gcc_parent_data_4_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4_8),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup2_i2c_apps_clk_src = {
+ .cmd_rcgr = 0xd000,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup2_i2c_apps_clk_src",
+ .parent_data = gcc_parent_data_4_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4_8),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup2_spi_apps_clk_src = {
+ .cmd_rcgr = 0xd014,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_blsp_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup2_spi_apps_clk_src",
+ .parent_data = gcc_parent_data_4_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4_8),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup3_i2c_apps_clk_src = {
+ .cmd_rcgr = 0xf000,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup3_i2c_apps_clk_src",
+ .parent_data = gcc_parent_data_4_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4_8),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup3_spi_apps_clk_src = {
+ .cmd_rcgr = 0xf024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_blsp_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup3_spi_apps_clk_src",
+ .parent_data = gcc_parent_data_4_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4_8),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup4_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x18000,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_blsp_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup4_i2c_apps_clk_src",
+ .parent_data = gcc_parent_data_4_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4_8),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_qup4_spi_apps_clk_src = {
+ .cmd_rcgr = 0x18024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_blsp_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_qup4_spi_apps_clk_src",
+ .parent_data = gcc_parent_data_4_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4_8),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_uart1_apps_clk_src = {
+ .cmd_rcgr = 0xc044,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_blsp_uart_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_uart1_apps_clk_src",
+ .parent_data = gcc_parent_data_4_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4_8),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 blsp2_uart2_apps_clk_src = {
+ .cmd_rcgr = 0xd034,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_blsp_uart_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "blsp2_uart2_apps_clk_src",
+ .parent_data = gcc_parent_data_4_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4_8),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cci_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(37500000, P_GPLL0_AUX, 1, 3, 64),
+ { }
+};
+
+static struct clk_rcg2 cci_clk_src = {
+ .cmd_rcgr = 0x51000,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_cci,
+ .freq_tbl = ftbl_cci_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cci_clk_src",
+ .parent_data = gcc_parent_data_4_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4_8),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cpp_clk_src[] = {
+ F(160000000, P_GPLL0_OUT_MAIN, 5, 0, 0),
+ F(240000000, P_GPLL4_AUX, 5, 0, 0),
+ F(320000000, P_GPLL0_OUT_MAIN, 2.5, 0, 0),
+ F(400000000, P_GPLL0_OUT_MAIN, 2, 0, 0),
+ F(480000000, P_GPLL4_AUX, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cpp_clk_src = {
+ .cmd_rcgr = 0x58018,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_cpp,
+ .freq_tbl = ftbl_cpp_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cpp_clk_src",
+ .parent_data = gcc_parent_data_6,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_6),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_csi0_clk_src[] = {
+ F(100000000, P_GPLL0_OUT_MAIN, 8, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(266670000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 csi0_clk_src = {
+ .cmd_rcgr = 0x4e020,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_csi0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "csi0_clk_src",
+ .parent_data = gcc_parent_data_4_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4_8),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_csi1_clk_src[] = {
+ F(100000000, P_GPLL0_OUT_MAIN, 8, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(266670000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 csi1_clk_src = {
+ .cmd_rcgr = 0x4f020,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_csi1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "csi1_clk_src",
+ .parent_data = gcc_parent_data_4_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4_8),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_csi2_clk_src[] = {
+ F(100000000, P_GPLL0_OUT_MAIN, 8, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(266670000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 csi2_clk_src = {
+ .cmd_rcgr = 0x3c020,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_csi2_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "csi2_clk_src",
+ .parent_data = gcc_parent_data_4_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4_8),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_camss_gp0_clk_src[] = {
+ F(100000000, P_GPLL0_OUT_MAIN, 8, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(266670000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 camss_gp0_clk_src = {
+ .cmd_rcgr = 0x54000,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8_gp,
+ .freq_tbl = ftbl_camss_gp0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camss_gp0_clk_src",
+ .parent_data = gcc_parent_data_8_gp,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_8_gp),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_camss_gp1_clk_src[] = {
+ F(100000000, P_GPLL0_OUT_MAIN, 8, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(266670000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 camss_gp1_clk_src = {
+ .cmd_rcgr = 0x55000,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8_gp,
+ .freq_tbl = ftbl_camss_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camss_gp1_clk_src",
+ .parent_data = gcc_parent_data_8_gp,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_8_gp),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_jpeg0_clk_src[] = {
+ F(133330000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(266666667, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(320000000, P_GPLL0_OUT_MAIN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 jpeg0_clk_src = {
+ .cmd_rcgr = 0x57000,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_6,
+ .freq_tbl = ftbl_jpeg0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "jpeg0_clk_src",
+ .parent_data = gcc_parent_data_6,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_6),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_mclk_clk_src[] = {
+ F(8000000, P_GPLL0_OUT_MAIN, 1, 1, 100),
+ F(24000000, P_GPLL6_OUT, 1, 1, 45),
+ F(66670000, P_GPLL0_OUT_MAIN, 12, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 mclk0_clk_src = {
+ .cmd_rcgr = 0x52000,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_7,
+ .freq_tbl = ftbl_mclk_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mclk0_clk_src",
+ .parent_data = gcc_parent_data_7,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_7),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 mclk1_clk_src = {
+ .cmd_rcgr = 0x53000,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_7,
+ .freq_tbl = ftbl_mclk_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mclk1_clk_src",
+ .parent_data = gcc_parent_data_7,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_7),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 mclk2_clk_src = {
+ .cmd_rcgr = 0x5c000,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_7,
+ .freq_tbl = ftbl_mclk_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mclk2_clk_src",
+ .parent_data = gcc_parent_data_7,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_7),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_csi0phytimer_clk_src[] = {
+ F(100000000, P_GPLL0_OUT_MAIN, 8, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(266670000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 csi0phytimer_clk_src = {
+ .cmd_rcgr = 0x4e000,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_csi0phytimer_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "csi0phytimer_clk_src",
+ .parent_data = gcc_parent_data_4_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4_8),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_csi1phytimer_clk_src[] = {
+ F(100000000, P_GPLL0_OUT_MAIN, 8, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(266670000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 csi1phytimer_clk_src = {
+ .cmd_rcgr = 0x4f000,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_csi1phytimer_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "csi1phytimer_clk_src",
+ .parent_data = gcc_parent_data_4_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4_8),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_camss_top_ahb_clk_src[] = {
+ F(40000000, P_GPLL0_OUT_MAIN, 10, 1, 2),
+ F(80000000, P_GPLL0_OUT_MAIN, 10, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 camss_top_ahb_clk_src = {
+ .cmd_rcgr = 0x5a000,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_camss_top_ahb_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "camss_top_ahb_clk_src",
+ .parent_data = gcc_parent_data_4_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4_8),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_vfe0_clk_src[] = {
+ F(50000000, P_GPLL0_OUT_MAIN, 16, 0, 0),
+ F(80000000, P_GPLL0_OUT_MAIN, 10, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 8, 0, 0),
+ F(133333333, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(160000000, P_GPLL0_OUT_MAIN, 5, 0, 0),
+ F(177777778, P_GPLL0_OUT_MAIN, 4.5, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(266666667, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(300000000, P_GPLL4_OUT, 4, 0, 0),
+ F(320000000, P_GPLL0_OUT_MAIN, 2.5, 0, 0),
+ F(466000000, P_GPLL2_AUX, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 vfe0_clk_src = {
+ .cmd_rcgr = 0x58000,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_vfe0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "vfe0_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_vfe1_clk_src[] = {
+ F(50000000, P_GPLL0_OUT_MAIN, 16, 0, 0),
+ F(80000000, P_GPLL0_OUT_MAIN, 10, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 8, 0, 0),
+ F(133333333, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(160000000, P_GPLL0_OUT_MAIN, 5, 0, 0),
+ F(177777778, P_GPLL0_OUT_MAIN, 4.5, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(266666667, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(300000000, P_GPLL4_OUT, 4, 0, 0),
+ F(320000000, P_GPLL0_OUT_MAIN, 2.5, 0, 0),
+ F(466000000, P_GPLL2_AUX, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 vfe1_clk_src = {
+ .cmd_rcgr = 0x58054,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_vfe1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "vfe1_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_crypto_clk_src[] = {
+ F(50000000, P_GPLL0_OUT_MAIN, 16, 0, 0),
+ F(80000000, P_GPLL0_OUT_MAIN, 10, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 8, 0, 0),
+ F(160000000, P_GPLL0_OUT_MAIN, 5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 crypto_clk_src = {
+ .cmd_rcgr = 0x16004,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_crypto_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "crypto_clk_src",
+ .parent_data = gcc_parent_data_4_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4_8),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gp1_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gp1_clk_src = {
+ .cmd_rcgr = 0x8004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8_gp,
+ .freq_tbl = ftbl_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gp1_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gpll0_vote.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gp2_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gp2_clk_src = {
+ .cmd_rcgr = 0x9004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8_gp,
+ .freq_tbl = ftbl_gp2_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gp2_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gpll0_vote.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gp3_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gp3_clk_src = {
+ .cmd_rcgr = 0xa004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8_gp,
+ .freq_tbl = ftbl_gp3_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gp3_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gpll0_vote.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 byte0_clk_src = {
+ .cmd_rcgr = 0x4d044,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_mdss_byte0,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "byte0_clk_src",
+ .parent_data = gcc_parent_data_mdss_byte0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_mdss_byte0),
+ .ops = &clk_byte2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_rcg2 byte1_clk_src = {
+ .cmd_rcgr = 0x4d0b0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_mdss_byte1,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "byte1_clk_src",
+ .parent_data = gcc_parent_data_mdss_byte1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_mdss_byte1),
+ .ops = &clk_byte2_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static const struct freq_tbl ftbl_esc0_1_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 esc0_clk_src = {
+ .cmd_rcgr = 0x4d05c,
+ .hid_width = 5,
+ .freq_tbl = ftbl_esc0_1_clk_src,
+ .parent_map = gcc_parent_map_mdss_byte0,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "esc0_clk_src",
+ .parent_data = gcc_parent_data_mdss_byte0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_mdss_byte0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 esc1_clk_src = {
+ .cmd_rcgr = 0x4d0a8,
+ .hid_width = 5,
+ .freq_tbl = ftbl_esc0_1_clk_src,
+ .parent_map = gcc_parent_map_mdss_byte1,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "esc1_clk_src",
+ .parent_data = gcc_parent_data_mdss_byte1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_mdss_byte1),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_mdp_clk_src[] = {
+ F(50000000, P_GPLL0_OUT_MDP, 16, 0, 0),
+ F(80000000, P_GPLL0_OUT_MDP, 10, 0, 0),
+ F(100000000, P_GPLL0_OUT_MDP, 8, 0, 0),
+ F(145454545, P_GPLL0_OUT_MDP, 5.5, 0, 0),
+ F(160000000, P_GPLL0_OUT_MDP, 5, 0, 0),
+ F(177777778, P_GPLL0_OUT_MDP, 4.5, 0, 0),
+ F(200000000, P_GPLL0_OUT_MDP, 4, 0, 0),
+ F(270000000, P_GPLL6_OUT, 4, 0, 0),
+ F(320000000, P_GPLL0_OUT_MDP, 2.5, 0, 0),
+ F(360000000, P_GPLL6_OUT, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 mdp_clk_src = {
+ .cmd_rcgr = 0x4d014,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_7_mdp,
+ .freq_tbl = ftbl_mdp_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "mdp_clk_src",
+ .parent_data = gcc_parent_data_7_mdp,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_7_mdp),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 pclk0_clk_src = {
+ .cmd_rcgr = 0x4d000,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_mdss_pix0,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "pclk0_clk_src",
+ .parent_data = gcc_parent_data_mdss_pix0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_mdss_pix0),
+ .ops = &clk_pixel_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_rcg2 pclk1_clk_src = {
+ .cmd_rcgr = 0x4d0b8,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_mdss_pix1,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "pclk1_clk_src",
+ .parent_data = gcc_parent_data_mdss_pix1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_mdss_pix1),
+ .ops = &clk_pixel_ops,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static const struct freq_tbl ftbl_vsync_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 vsync_clk_src = {
+ .cmd_rcgr = 0x4d02c,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_10,
+ .freq_tbl = ftbl_vsync_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "vsync_clk_src",
+ .parent_data = gcc_parent_data_10,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_10),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gfx3d_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(50000000, P_GPLL0_OUT_MAIN, 16, 0, 0),
+ F(80000000, P_GPLL0_OUT_MAIN, 10, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 8, 0, 0),
+ F(133333333, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(160000000, P_GPLL0_OUT_MAIN, 5, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(228571429, P_GPLL0_OUT_MAIN, 3.5, 0, 0),
+ F(240000000, P_GPLL6_GFX3D, 4.5, 0, 0),
+ F(266666667, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(300000000, P_GPLL4_GFX3D, 4, 0, 0),
+ F(360000000, P_GPLL6_GFX3D, 3, 0, 0),
+ F(400000000, P_GPLL0_OUT_MAIN, 2, 0, 0),
+ F(432000000, P_GPLL6_GFX3D, 2.5, 0, 0),
+ F(480000000, P_GPLL4_GFX3D, 2.5, 0, 0),
+ F(540000000, P_GPLL6_GFX3D, 2, 0, 0),
+ F(600000000, P_GPLL4_GFX3D, 2, 0, 0),
+ { }
+};
+
+static const struct clk_init_data gfx3d_clk_params = {
+ .name = "gfx3d_clk_src",
+ .parent_data = gcc_parent_data_gfx3d,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_gfx3d),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gfx3d_clk_src = {
+ .cmd_rcgr = 0x59000,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_gfx3d,
+ .freq_tbl = ftbl_gfx3d_clk_src,
+ .clkr.hw.init = &gfx3d_clk_params,
+};
+
+static const struct freq_tbl ftbl_pdm2_clk_src[] = {
+ F(64000000, P_GPLL0_OUT_MAIN, 12.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 pdm2_clk_src = {
+ .cmd_rcgr = 0x44010,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_pdm2_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "pdm2_clk_src",
+ .parent_data = gcc_parent_data_4_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4_8),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_rbcpr_gfx_clk_src[] = {
+ F(19200000, P_XO, 1, 0, 0),
+ F(50000000, P_GPLL0_OUT_MAIN, 16, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 rbcpr_gfx_clk_src = {
+ .cmd_rcgr = 0x3a00c,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_rbcpr_gfx_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "rbcpr_gfx_clk_src",
+ .parent_data = gcc_parent_data_4_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4_8),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_sdcc1_apps_clk_src[] = {
+ F(144000, P_XO, 16, 3, 25),
+ F(400000, P_XO, 12, 1, 4),
+ F(20000000, P_GPLL0_OUT_MAIN, 10, 1, 4),
+ F(25000000, P_GPLL0_OUT_MAIN, 16, 1, 2),
+ F(50000000, P_GPLL0_OUT_MAIN, 16, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 8, 0, 0),
+ F(177777778, P_GPLL0_OUT_MAIN, 4.5, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(342850000, P_GPLL4_OUT, 3.5, 0, 0),
+ F(400000000, P_GPLL4_OUT, 3, 0, 0),
+ { }
+};
+
+static const struct freq_tbl ftbl_sdcc1_8976_v1_1_apps_clk_src[] = {
+ F(144000, P_XO, 16, 3, 25),
+ F(400000, P_XO, 12, 1, 4),
+ F(20000000, P_GPLL0_OUT_MAIN, 10, 1, 4),
+ F(25000000, P_GPLL0_OUT_MAIN, 16, 1, 2),
+ F(50000000, P_GPLL0_OUT_MAIN, 16, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 8, 0, 0),
+ F(177777778, P_GPLL0_OUT_MAIN, 4.5, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(186400000, P_GPLL2_OUT, 5, 0, 0),
+ F(372800000, P_GPLL2_OUT, 2.5, 0, 0),
+ { }
+};
+
+static const struct clk_init_data sdcc1_apps_clk_src_8976v1_1_init = {
+ .name = "sdcc1_apps_clk_src",
+ .parent_data = gcc_parent_data_v1_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_v1_1),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 sdcc1_apps_clk_src = {
+ .cmd_rcgr = 0x42004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_sdcc1_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "sdcc1_apps_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_sdcc1_ice_core_clk_src[] = {
+ F(100000000, P_GPLL0_OUT_M, 8, 0, 0),
+ F(200000000, P_GPLL0_OUT_M, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 sdcc1_ice_core_clk_src = {
+ .cmd_rcgr = 0x5d000,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_sdcc_ice,
+ .freq_tbl = ftbl_sdcc1_ice_core_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "sdcc1_ice_core_clk_src",
+ .parent_data = gcc_parent_data_4_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4_8),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_sdcc2_4_apps_clk_src[] = {
+ F(144000, P_XO, 16, 3, 25),
+ F(400000, P_XO, 12, 1, 4),
+ F(20000000, P_GPLL0_OUT_MAIN, 10, 1, 4),
+ F(25000000, P_GPLL0_OUT_MAIN, 16, 1, 2),
+ F(40000000, P_GPLL0_OUT_MAIN, 10, 1, 2),
+ F(50000000, P_GPLL0_OUT_MAIN, 16, 0, 0),
+ F(80000000, P_GPLL0_OUT_MAIN, 10, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 8, 0, 0),
+ F(177777778, P_GPLL0_OUT_MAIN, 4.5, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 sdcc2_apps_clk_src = {
+ .cmd_rcgr = 0x43004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_sdcc2_4_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "sdcc2_apps_clk_src",
+ .parent_data = gcc_parent_data_4_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4_8),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 sdcc3_apps_clk_src = {
+ .cmd_rcgr = 0x39004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_sdcc2_4_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "sdcc3_apps_clk_src",
+ .parent_data = gcc_parent_data_4_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4_8),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_usb_fs_ic_clk_src[] = {
+ F(60000000, P_GPLL6_OUT_MAIN, 6, 1, 3),
+ { }
+};
+
+static struct clk_rcg2 usb_fs_ic_clk_src = {
+ .cmd_rcgr = 0x3f034,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_9,
+ .freq_tbl = ftbl_usb_fs_ic_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "usb_fs_ic_clk_src",
+ .parent_data = gcc_parent_data_9,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_9),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_usb_fs_system_clk_src[] = {
+ F(64000000, P_GPLL0_OUT, 12.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 usb_fs_system_clk_src = {
+ .cmd_rcgr = 0x3f010,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4_fs,
+ .freq_tbl = ftbl_usb_fs_system_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "usb_fs_system_clk_src",
+ .parent_data = gcc_parent_data_4_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4_8),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_usb_hs_system_clk_src[] = {
+ F(57140000, P_GPLL0_OUT_MAIN, 14, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 8, 0, 0),
+ F(133333333, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(177780000, P_GPLL0_OUT_MAIN, 4.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 usb_hs_system_clk_src = {
+ .cmd_rcgr = 0x41010,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_usb_hs_system_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "usb_hs_system_clk_src",
+ .parent_data = gcc_parent_data_4_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4_8),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_vcodec0_clk_src[] = {
+ F(72727200, P_GPLL0_OUT_MAIN, 11, 0, 0),
+ F(80000000, P_GPLL0_OUT_MAIN, 10, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 8, 0, 0),
+ F(133333333, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(228570000, P_GPLL0_OUT_MAIN, 3.5, 0, 0),
+ F(310667000, P_GPLL2_AUX, 3, 0, 0),
+ F(360000000, P_GPLL6_AUX, 3, 0, 0),
+ F(400000000, P_GPLL0_OUT_MAIN, 2, 0, 0),
+ F(466000000, P_GPLL2_AUX, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 vcodec0_clk_src = {
+ .cmd_rcgr = 0x4c000,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_vcodec0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "vcodec0_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_aps_0_clk = {
+ .halt_reg = 0x78004,
+ .clkr = {
+ .enable_reg = 0x78004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_aps_0_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &aps_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aps_1_clk = {
+ .halt_reg = 0x79004,
+ .clkr = {
+ .enable_reg = 0x79004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_aps_1_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &aps_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup1_i2c_apps_clk = {
+ .halt_reg = 0x2008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp1_qup1_i2c_apps_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp1_qup1_i2c_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup1_spi_apps_clk = {
+ .halt_reg = 0x2004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp1_qup1_spi_apps_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp1_qup1_spi_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup2_i2c_apps_clk = {
+ .halt_reg = 0x3010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp1_qup2_i2c_apps_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp1_qup2_i2c_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup2_spi_apps_clk = {
+ .halt_reg = 0x300c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x300c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp1_qup2_spi_apps_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp1_qup2_spi_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup3_i2c_apps_clk = {
+ .halt_reg = 0x4020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp1_qup3_i2c_apps_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp1_qup3_i2c_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup3_spi_apps_clk = {
+ .halt_reg = 0x401c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x401c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp1_qup3_spi_apps_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp1_qup3_spi_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup4_i2c_apps_clk = {
+ .halt_reg = 0x5020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp1_qup4_i2c_apps_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp1_qup4_i2c_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup4_spi_apps_clk = {
+ .halt_reg = 0x501c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x501c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp1_qup4_spi_apps_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp1_qup4_spi_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart1_apps_clk = {
+ .halt_reg = 0x203c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x203c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp1_uart1_apps_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp1_uart1_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart2_apps_clk = {
+ .halt_reg = 0x302c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x302c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp1_uart2_apps_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp1_uart2_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup1_i2c_apps_clk = {
+ .halt_reg = 0xc008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp2_qup1_i2c_apps_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp2_qup1_i2c_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup1_spi_apps_clk = {
+ .halt_reg = 0xc004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp2_qup1_spi_apps_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp2_qup1_spi_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup2_i2c_apps_clk = {
+ .halt_reg = 0xd010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xd010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp2_qup2_i2c_apps_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp2_qup2_i2c_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup2_spi_apps_clk = {
+ .halt_reg = 0xd00c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xd00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp2_qup2_spi_apps_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp2_qup2_spi_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup3_i2c_apps_clk = {
+ .halt_reg = 0xf020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp2_qup3_i2c_apps_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp2_qup3_i2c_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup3_spi_apps_clk = {
+ .halt_reg = 0xf01c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp2_qup3_spi_apps_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp2_qup3_spi_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup4_i2c_apps_clk = {
+ .halt_reg = 0x18020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x18020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp2_qup4_i2c_apps_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp2_qup4_i2c_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_qup4_spi_apps_clk = {
+ .halt_reg = 0x1801c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1801c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp2_qup4_spi_apps_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp2_qup4_spi_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_uart1_apps_clk = {
+ .halt_reg = 0xc03c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc03c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp2_uart1_apps_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp2_uart1_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_uart2_apps_clk = {
+ .halt_reg = 0xd02c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xd02c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_blsp2_uart2_apps_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp2_uart2_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_cci_ahb_clk = {
+ .halt_reg = 0x5101c,
+ .clkr = {
+ .enable_reg = 0x5101c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_cci_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &camss_top_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_cci_clk = {
+ .halt_reg = 0x51018,
+ .clkr = {
+ .enable_reg = 0x51018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_cci_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &cci_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_cpp_ahb_clk = {
+ .halt_reg = 0x58040,
+ .clkr = {
+ .enable_reg = 0x58040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_cpp_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &camss_top_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_cpp_axi_clk = {
+ .halt_reg = 0x58064,
+ .clkr = {
+ .enable_reg = 0x58064,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_cpp_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_cpp_clk = {
+ .halt_reg = 0x5803c,
+ .clkr = {
+ .enable_reg = 0x5803c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_cpp_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &cpp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_csi0_ahb_clk = {
+ .halt_reg = 0x4e040,
+ .clkr = {
+ .enable_reg = 0x4e040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi0_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &camss_top_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_csi0_clk = {
+ .halt_reg = 0x4e03c,
+ .clkr = {
+ .enable_reg = 0x4e03c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi0_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &csi0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_csi0phy_clk = {
+ .halt_reg = 0x4e048,
+ .clkr = {
+ .enable_reg = 0x4e048,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi0phy_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &csi0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_csi0pix_clk = {
+ .halt_reg = 0x4e058,
+ .clkr = {
+ .enable_reg = 0x4e058,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi0pix_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &csi0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_csi0rdi_clk = {
+ .halt_reg = 0x4e050,
+ .clkr = {
+ .enable_reg = 0x4e050,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi0rdi_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &csi0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_csi1_ahb_clk = {
+ .halt_reg = 0x4f040,
+ .clkr = {
+ .enable_reg = 0x4f040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi1_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &camss_top_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_csi1_clk = {
+ .halt_reg = 0x4f03c,
+ .clkr = {
+ .enable_reg = 0x4f03c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi1_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &csi1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_csi1phy_clk = {
+ .halt_reg = 0x4f048,
+ .clkr = {
+ .enable_reg = 0x4f048,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi1phy_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &csi1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_csi1pix_clk = {
+ .halt_reg = 0x4f058,
+ .clkr = {
+ .enable_reg = 0x4f058,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi1pix_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &csi1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_csi1rdi_clk = {
+ .halt_reg = 0x4f050,
+ .clkr = {
+ .enable_reg = 0x4f050,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi1rdi_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &csi1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_csi2_ahb_clk = {
+ .halt_reg = 0x3c040,
+ .clkr = {
+ .enable_reg = 0x3c040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi2_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &camss_top_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_csi2_clk = {
+ .halt_reg = 0x3c03c,
+ .clkr = {
+ .enable_reg = 0x3c03c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi2_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &csi2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_csi2phy_clk = {
+ .halt_reg = 0x3c048,
+ .clkr = {
+ .enable_reg = 0x3c048,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi2phy_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &csi2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_csi2pix_clk = {
+ .halt_reg = 0x3c058,
+ .clkr = {
+ .enable_reg = 0x3c058,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi2pix_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &csi2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_csi2rdi_clk = {
+ .halt_reg = 0x3c050,
+ .clkr = {
+ .enable_reg = 0x3c050,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi2rdi_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &csi2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_csi_vfe0_clk = {
+ .halt_reg = 0x58050,
+ .clkr = {
+ .enable_reg = 0x58050,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi_vfe0_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &vfe0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_csi_vfe1_clk = {
+ .halt_reg = 0x58074,
+ .clkr = {
+ .enable_reg = 0x58074,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi_vfe1_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &vfe1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_gp0_clk = {
+ .halt_reg = 0x54018,
+ .clkr = {
+ .enable_reg = 0x54018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_gp0_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &camss_gp0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_gp1_clk = {
+ .halt_reg = 0x55018,
+ .clkr = {
+ .enable_reg = 0x55018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_gp1_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &camss_gp1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_ispif_ahb_clk = {
+ .halt_reg = 0x50004,
+ .clkr = {
+ .enable_reg = 0x50004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_ispif_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &camss_top_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_jpeg0_clk = {
+ .halt_reg = 0x57020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x57020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_jpeg0_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &jpeg0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_jpeg_ahb_clk = {
+ .halt_reg = 0x57024,
+ .clkr = {
+ .enable_reg = 0x57024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_jpeg_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &camss_top_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_jpeg_axi_clk = {
+ .halt_reg = 0x57028,
+ .clkr = {
+ .enable_reg = 0x57028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_jpeg_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_mclk0_clk = {
+ .halt_reg = 0x52018,
+ .clkr = {
+ .enable_reg = 0x52018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_mclk0_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &mclk0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_mclk1_clk = {
+ .halt_reg = 0x53018,
+ .clkr = {
+ .enable_reg = 0x53018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_mclk1_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &mclk1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_mclk2_clk = {
+ .halt_reg = 0x5c018,
+ .clkr = {
+ .enable_reg = 0x5c018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_mclk2_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &mclk2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_micro_ahb_clk = {
+ .halt_reg = 0x5600c,
+ .clkr = {
+ .enable_reg = 0x5600c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_micro_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &camss_top_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_csi0phytimer_clk = {
+ .halt_reg = 0x4e01c,
+ .clkr = {
+ .enable_reg = 0x4e01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi0phytimer_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &csi0phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_csi1phytimer_clk = {
+ .halt_reg = 0x4f01c,
+ .clkr = {
+ .enable_reg = 0x4f01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_csi1phytimer_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &csi1phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_ahb_clk = {
+ .halt_reg = 0x56004,
+ .clkr = {
+ .enable_reg = 0x56004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_top_ahb_clk = {
+ .halt_reg = 0x5a014,
+ .clkr = {
+ .enable_reg = 0x5a014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_top_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &camss_top_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_vfe0_clk = {
+ .halt_reg = 0x58038,
+ .clkr = {
+ .enable_reg = 0x58038,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_vfe0_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &vfe0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_vfe_ahb_clk = {
+ .halt_reg = 0x58044,
+ .clkr = {
+ .enable_reg = 0x58044,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_vfe_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &camss_top_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_vfe_axi_clk = {
+ .halt_reg = 0x58048,
+ .clkr = {
+ .enable_reg = 0x58048,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_vfe_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_vfe1_ahb_clk = {
+ .halt_reg = 0x58060,
+ .clkr = {
+ .enable_reg = 0x58060,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_vfe1_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &camss_top_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_vfe1_axi_clk = {
+ .halt_reg = 0x58068,
+ .clkr = {
+ .enable_reg = 0x58068,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_vfe1_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_vfe1_clk = {
+ .halt_reg = 0x5805c,
+ .clkr = {
+ .enable_reg = 0x5805c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_camss_vfe1_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &vfe1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_dcc_clk = {
+ .halt_reg = 0x77004,
+ .clkr = {
+ .enable_reg = 0x77004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_dcc_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_oxili_gmem_clk = {
+ .halt_reg = 0x59024,
+ .clkr = {
+ .enable_reg = 0x59024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_oxili_gmem_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gfx3d_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp1_clk = {
+ .halt_reg = 0x8000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_gp1_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gp1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp2_clk = {
+ .halt_reg = 0x9000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_gp2_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gp2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp3_clk = {
+ .halt_reg = 0xa000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_gp3_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gp3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mdss_ahb_clk = {
+ .halt_reg = 0x4d07c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4d07c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_mdss_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mdss_axi_clk = {
+ .halt_reg = 0x4d080,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4d080,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_mdss_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mdss_byte0_clk = {
+ .halt_reg = 0x4d094,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4d094,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_mdss_byte0_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &byte0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mdss_byte1_clk = {
+ .halt_reg = 0x4d0a0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4d0a0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_mdss_byte1_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &byte1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mdss_esc0_clk = {
+ .halt_reg = 0x4d098,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4d098,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_mdss_esc0_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &esc0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mdss_esc1_clk = {
+ .halt_reg = 0x4d09c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4d09c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_mdss_esc1_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &esc1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mdss_mdp_clk = {
+ .halt_reg = 0x4d088,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4d088,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_mdss_mdp_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &mdp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mdss_pclk0_clk = {
+ .halt_reg = 0x4d084,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4d084,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_mdss_pclk0_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &pclk0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mdss_pclk1_clk = {
+ .halt_reg = 0x4d0a4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4d0a4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_mdss_pclk1_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &pclk1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mdss_vsync_clk = {
+ .halt_reg = 0x4d090,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4d090,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_mdss_vsync_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &vsync_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mss_cfg_ahb_clk = {
+ .halt_reg = 0x49000,
+ .clkr = {
+ .enable_reg = 0x49000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_mss_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mss_q6_bimc_axi_clk = {
+ .halt_reg = 0x49004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x49004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_mss_q6_bimc_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_bimc_gfx_clk = {
+ .halt_reg = 0x59048,
+ .clkr = {
+ .enable_reg = 0x59048,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_bimc_gfx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_oxili_ahb_clk = {
+ .halt_reg = 0x59028,
+ .clkr = {
+ .enable_reg = 0x59028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_oxili_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_oxili_aon_clk = {
+ .halt_reg = 0x59044,
+ .clkr = {
+ .enable_reg = 0x59044,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_oxili_aon_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gfx3d_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_oxili_gfx3d_clk = {
+ .halt_reg = 0x59020,
+ .clkr = {
+ .enable_reg = 0x59020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_oxili_gfx3d_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gfx3d_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_oxili_timer_clk = {
+ .halt_reg = 0x59040,
+ .clkr = {
+ .enable_reg = 0x59040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_oxili_timer_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "xo",
+ },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm2_clk = {
+ .halt_reg = 0x4400c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4400c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_pdm2_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &pdm2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_ahb_clk = {
+ .halt_reg = 0x44004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x44004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_pdm_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_rbcpr_gfx_ahb_clk = {
+ .halt_reg = 0x3a008,
+ .clkr = {
+ .enable_reg = 0x3a008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_rbcpr_gfx_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_rbcpr_gfx_clk = {
+ .halt_reg = 0x3a004,
+ .clkr = {
+ .enable_reg = 0x3a004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_rbcpr_gfx_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &rbcpr_gfx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_ahb_clk = {
+ .halt_reg = 0x4201c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4201c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_sdcc1_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_apps_clk = {
+ .halt_reg = 0x42018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x42018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_sdcc1_apps_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &sdcc1_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_ice_core_clk = {
+ .halt_reg = 0x5d014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5d014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_sdcc1_ice_core_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &sdcc1_ice_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_ahb_clk = {
+ .halt_reg = 0x4301c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4301c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_sdcc2_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_apps_clk = {
+ .halt_reg = 0x43018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x43018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_sdcc2_apps_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &sdcc2_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc3_ahb_clk = {
+ .halt_reg = 0x3901c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x3901c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_sdcc3_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc3_apps_clk = {
+ .halt_reg = 0x39018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x39018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_sdcc3_apps_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &sdcc3_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb2a_phy_sleep_clk = {
+ .halt_reg = 0x4102c,
+ .clkr = {
+ .enable_reg = 0x4102c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_usb2a_phy_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb_hs_phy_cfg_ahb_clk = {
+ .halt_reg = 0x41030,
+ .clkr = {
+ .enable_reg = 0x41030,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_usb_hs_phy_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb_fs_ahb_clk = {
+ .halt_reg = 0x3f008,
+ .clkr = {
+ .enable_reg = 0x3f008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_usb_fs_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb_fs_ic_clk = {
+ .halt_reg = 0x3f030,
+ .clkr = {
+ .enable_reg = 0x3f030,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_usb_fs_ic_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &usb_fs_ic_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb_fs_system_clk = {
+ .halt_reg = 0x3f004,
+ .clkr = {
+ .enable_reg = 0x3f004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_usb_fs_system_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &usb_fs_system_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb_hs_ahb_clk = {
+ .halt_reg = 0x41008,
+ .clkr = {
+ .enable_reg = 0x41008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_usb_hs_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb_hs_system_clk = {
+ .halt_reg = 0x41004,
+ .clkr = {
+ .enable_reg = 0x41004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_usb_hs_system_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &usb_hs_system_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_venus0_ahb_clk = {
+ .halt_reg = 0x4c020,
+ .clkr = {
+ .enable_reg = 0x4c020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_venus0_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_venus0_axi_clk = {
+ .halt_reg = 0x4c024,
+ .clkr = {
+ .enable_reg = 0x4c024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_venus0_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_venus0_core0_vcodec0_clk = {
+ .halt_reg = 0x4c02c,
+ .clkr = {
+ .enable_reg = 0x4c02c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_venus0_core0_vcodec0_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &vcodec0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_venus0_core1_vcodec0_clk = {
+ .halt_reg = 0x4c034,
+ .clkr = {
+ .enable_reg = 0x4c034,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_venus0_core1_vcodec0_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &vcodec0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_venus0_vcodec0_clk = {
+ .halt_reg = 0x4c01c,
+ .clkr = {
+ .enable_reg = 0x4c01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "gcc_venus0_vcodec0_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &vcodec0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+/* Vote clocks */
+static struct clk_branch gcc_apss_ahb_clk = {
+ .halt_reg = 0x4601c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x45004,
+ .enable_mask = BIT(14),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_apss_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_apss_axi_clk = {
+ .halt_reg = 0x46020,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x45004,
+ .enable_mask = BIT(13),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_apss_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_ahb_clk = {
+ .halt_reg = 0x1008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x45004,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp2_ahb_clk = {
+ .halt_reg = 0xb008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x45004,
+ .enable_mask = BIT(20),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp2_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_prng_ahb_clk = {
+ .halt_reg = 0x13004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x45004,
+ .enable_mask = BIT(8),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_prng_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_boot_rom_ahb_clk = {
+ .halt_reg = 0x1300c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x45004,
+ .enable_mask = BIT(7),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_boot_rom_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_crypto_ahb_clk = {
+ .halt_reg = 0x16024,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x45004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_crypto_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_crypto_axi_clk = {
+ .halt_reg = 0x16020,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x45004,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_crypto_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_crypto_clk = {
+ .halt_reg = 0x1601c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x45004,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_crypto_clk",
+ .parent_hws = (const struct clk_hw *[]) {
+ &crypto_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cpp_tbu_clk = {
+ .halt_reg = 0x12040,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(14),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cpp_tbu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gfx_1_tbu_clk = {
+ .halt_reg = 0x12098,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(19),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gfx_1_tbu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gfx_tbu_clk = {
+ .halt_reg = 0x12010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(3),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gfx_tbu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gfx_tcu_clk = {
+ .halt_reg = 0x12020,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gfx_tcu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_apss_tcu_clk = {
+ .halt_reg = 0x12018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_apss_tcu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gtcu_ahb_clk = {
+ .halt_reg = 0x12044,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(13),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gtcu_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_jpeg_tbu_clk = {
+ .halt_reg = 0x12034,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_jpeg_tbu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mdp_rt_tbu_clk = {
+ .halt_reg = 0x1204c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(15),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mdp_rt_tbu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_mdp_tbu_clk = {
+ .halt_reg = 0x1201c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_mdp_tbu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_smmu_cfg_clk = {
+ .halt_reg = 0x12038,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(12),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_smmu_cfg_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_venus_1_tbu_clk = {
+ .halt_reg = 0x1209c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(20),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_venus_1_tbu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_venus_tbu_clk = {
+ .halt_reg = 0x12014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(5),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_venus_tbu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_vfe1_tbu_clk = {
+ .halt_reg = 0x12090,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(17),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_vfe1_tbu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_vfe_tbu_clk = {
+ .halt_reg = 0x1203c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4500c,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_vfe_tbu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc venus_gdsc = {
+ .gdscr = 0x4c018,
+ .cxcs = (unsigned int []){ 0x4c024, 0x4c01c },
+ .cxc_count = 2,
+ .pd = {
+ .name = "venus_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc venus_core0_gdsc = {
+ .gdscr = 0x4c028,
+ .cxcs = (unsigned int []){ 0x4c02c },
+ .cxc_count = 1,
+ .pd = {
+ .name = "venus_core0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc venus_core1_gdsc = {
+ .gdscr = 0x4c030,
+ .pd = {
+ .name = "venus_core1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc mdss_gdsc = {
+ .gdscr = 0x4d078,
+ .cxcs = (unsigned int []){ 0x4d080, 0x4d088 },
+ .cxc_count = 2,
+ .pd = {
+ .name = "mdss_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc jpeg_gdsc = {
+ .gdscr = 0x5701c,
+ .cxcs = (unsigned int []){ 0x57020, 0x57028 },
+ .cxc_count = 2,
+ .pd = {
+ .name = "jpeg_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc vfe0_gdsc = {
+ .gdscr = 0x58034,
+ .cxcs = (unsigned int []){ 0x58038, 0x58048, 0x5600c, 0x58050 },
+ .cxc_count = 4,
+ .pd = {
+ .name = "vfe0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc vfe1_gdsc = {
+ .gdscr = 0x5806c,
+ .cxcs = (unsigned int []){ 0x5805c, 0x58068, 0x5600c, 0x58074 },
+ .cxc_count = 4,
+ .pd = {
+ .name = "vfe1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc cpp_gdsc = {
+ .gdscr = 0x58078,
+ .cxcs = (unsigned int []){ 0x5803c, 0x58064 },
+ .cxc_count = 2,
+ .pd = {
+ .name = "cpp_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc oxili_cx_gdsc = {
+ .gdscr = 0x5904c,
+ .cxcs = (unsigned int []){ 0x59020 },
+ .cxc_count = 1,
+ .pd = {
+ .name = "oxili_cx_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc oxili_gx_gdsc = {
+ .gdscr = 0x5901c,
+ .clamp_io_ctrl = 0x5b00c,
+ .cxcs = (unsigned int []){ 0x59000, 0x59024 },
+ .cxc_count = 2,
+ .pd = {
+ .name = "oxili_gx_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .supply = "vdd_gfx",
+ .flags = CLAMP_IO,
+};
+
+static struct clk_regmap *gcc_msm8976_clocks[] = {
+ [GPLL0] = &gpll0.clkr,
+ [GPLL2] = &gpll2.clkr,
+ [GPLL3] = &gpll3.clkr,
+ [GPLL4] = &gpll4.clkr,
+ [GPLL6] = &gpll6.clkr,
+ [GPLL0_CLK_SRC] = &gpll0_vote,
+ [GPLL2_CLK_SRC] = &gpll2_vote,
+ [GPLL3_CLK_SRC] = &gpll3_vote,
+ [GPLL4_CLK_SRC] = &gpll4_vote,
+ [GPLL6_CLK_SRC] = &gpll6_vote,
+ [GCC_BLSP1_QUP1_SPI_APPS_CLK] = &gcc_blsp1_qup1_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP1_I2C_APPS_CLK] = &gcc_blsp1_qup1_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP2_I2C_APPS_CLK] = &gcc_blsp1_qup2_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP2_SPI_APPS_CLK] = &gcc_blsp1_qup2_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP3_I2C_APPS_CLK] = &gcc_blsp1_qup3_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP3_SPI_APPS_CLK] = &gcc_blsp1_qup3_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP4_I2C_APPS_CLK] = &gcc_blsp1_qup4_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP4_SPI_APPS_CLK] = &gcc_blsp1_qup4_spi_apps_clk.clkr,
+ [GCC_BLSP1_UART1_APPS_CLK] = &gcc_blsp1_uart1_apps_clk.clkr,
+ [GCC_BLSP1_UART2_APPS_CLK] = &gcc_blsp1_uart2_apps_clk.clkr,
+ [GCC_BLSP2_QUP1_I2C_APPS_CLK] = &gcc_blsp2_qup1_i2c_apps_clk.clkr,
+ [GCC_BLSP2_QUP1_SPI_APPS_CLK] = &gcc_blsp2_qup1_spi_apps_clk.clkr,
+ [GCC_BLSP2_QUP2_I2C_APPS_CLK] = &gcc_blsp2_qup2_i2c_apps_clk.clkr,
+ [GCC_BLSP2_QUP2_SPI_APPS_CLK] = &gcc_blsp2_qup2_spi_apps_clk.clkr,
+ [GCC_BLSP2_QUP3_I2C_APPS_CLK] = &gcc_blsp2_qup3_i2c_apps_clk.clkr,
+ [GCC_BLSP2_QUP3_SPI_APPS_CLK] = &gcc_blsp2_qup3_spi_apps_clk.clkr,
+ [GCC_BLSP2_QUP4_I2C_APPS_CLK] = &gcc_blsp2_qup4_i2c_apps_clk.clkr,
+ [GCC_BLSP2_QUP4_SPI_APPS_CLK] = &gcc_blsp2_qup4_spi_apps_clk.clkr,
+ [GCC_BLSP2_UART1_APPS_CLK] = &gcc_blsp2_uart1_apps_clk.clkr,
+ [GCC_BLSP2_UART2_APPS_CLK] = &gcc_blsp2_uart2_apps_clk.clkr,
+ [GCC_CAMSS_CCI_AHB_CLK] = &gcc_camss_cci_ahb_clk.clkr,
+ [GCC_CAMSS_CCI_CLK] = &gcc_camss_cci_clk.clkr,
+ [GCC_CAMSS_CPP_AHB_CLK] = &gcc_camss_cpp_ahb_clk.clkr,
+ [GCC_CAMSS_CPP_AXI_CLK] = &gcc_camss_cpp_axi_clk.clkr,
+ [GCC_CAMSS_CPP_CLK] = &gcc_camss_cpp_clk.clkr,
+ [GCC_CAMSS_CSI0_AHB_CLK] = &gcc_camss_csi0_ahb_clk.clkr,
+ [GCC_CAMSS_CSI0_CLK] = &gcc_camss_csi0_clk.clkr,
+ [GCC_CAMSS_CSI0PHY_CLK] = &gcc_camss_csi0phy_clk.clkr,
+ [GCC_CAMSS_CSI0PIX_CLK] = &gcc_camss_csi0pix_clk.clkr,
+ [GCC_CAMSS_CSI0RDI_CLK] = &gcc_camss_csi0rdi_clk.clkr,
+ [GCC_CAMSS_CSI1_AHB_CLK] = &gcc_camss_csi1_ahb_clk.clkr,
+ [GCC_CAMSS_CSI1_CLK] = &gcc_camss_csi1_clk.clkr,
+ [GCC_CAMSS_CSI1PHY_CLK] = &gcc_camss_csi1phy_clk.clkr,
+ [GCC_CAMSS_CSI1PIX_CLK] = &gcc_camss_csi1pix_clk.clkr,
+ [GCC_CAMSS_CSI1RDI_CLK] = &gcc_camss_csi1rdi_clk.clkr,
+ [GCC_CAMSS_CSI2_AHB_CLK] = &gcc_camss_csi2_ahb_clk.clkr,
+ [GCC_CAMSS_CSI2_CLK] = &gcc_camss_csi2_clk.clkr,
+ [GCC_CAMSS_CSI2PHY_CLK] = &gcc_camss_csi2phy_clk.clkr,
+ [GCC_CAMSS_CSI2PIX_CLK] = &gcc_camss_csi2pix_clk.clkr,
+ [GCC_CAMSS_CSI2RDI_CLK] = &gcc_camss_csi2rdi_clk.clkr,
+ [GCC_CAMSS_CSI_VFE0_CLK] = &gcc_camss_csi_vfe0_clk.clkr,
+ [GCC_CAMSS_CSI_VFE1_CLK] = &gcc_camss_csi_vfe1_clk.clkr,
+ [GCC_CAMSS_GP0_CLK] = &gcc_camss_gp0_clk.clkr,
+ [GCC_CAMSS_GP1_CLK] = &gcc_camss_gp1_clk.clkr,
+ [GCC_CAMSS_ISPIF_AHB_CLK] = &gcc_camss_ispif_ahb_clk.clkr,
+ [GCC_CAMSS_JPEG0_CLK] = &gcc_camss_jpeg0_clk.clkr,
+ [GCC_CAMSS_JPEG_AHB_CLK] = &gcc_camss_jpeg_ahb_clk.clkr,
+ [GCC_CAMSS_JPEG_AXI_CLK] = &gcc_camss_jpeg_axi_clk.clkr,
+ [GCC_CAMSS_MCLK0_CLK] = &gcc_camss_mclk0_clk.clkr,
+ [GCC_CAMSS_MCLK1_CLK] = &gcc_camss_mclk1_clk.clkr,
+ [GCC_CAMSS_MCLK2_CLK] = &gcc_camss_mclk2_clk.clkr,
+ [GCC_CAMSS_MICRO_AHB_CLK] = &gcc_camss_micro_ahb_clk.clkr,
+ [GCC_CAMSS_CSI0PHYTIMER_CLK] = &gcc_camss_csi0phytimer_clk.clkr,
+ [GCC_CAMSS_CSI1PHYTIMER_CLK] = &gcc_camss_csi1phytimer_clk.clkr,
+ [GCC_CAMSS_AHB_CLK] = &gcc_camss_ahb_clk.clkr,
+ [GCC_CAMSS_TOP_AHB_CLK] = &gcc_camss_top_ahb_clk.clkr,
+ [GCC_CAMSS_VFE0_CLK] = &gcc_camss_vfe0_clk.clkr,
+ [GCC_CAMSS_VFE_AHB_CLK] = &gcc_camss_vfe_ahb_clk.clkr,
+ [GCC_CAMSS_VFE_AXI_CLK] = &gcc_camss_vfe_axi_clk.clkr,
+ [GCC_CAMSS_VFE1_AHB_CLK] = &gcc_camss_vfe1_ahb_clk.clkr,
+ [GCC_CAMSS_VFE1_AXI_CLK] = &gcc_camss_vfe1_axi_clk.clkr,
+ [GCC_CAMSS_VFE1_CLK] = &gcc_camss_vfe1_clk.clkr,
+ [GCC_DCC_CLK] = &gcc_dcc_clk.clkr,
+ [GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
+ [GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
+ [GCC_GP3_CLK] = &gcc_gp3_clk.clkr,
+ [GCC_MDSS_AHB_CLK] = &gcc_mdss_ahb_clk.clkr,
+ [GCC_MDSS_AXI_CLK] = &gcc_mdss_axi_clk.clkr,
+ [GCC_MDSS_ESC0_CLK] = &gcc_mdss_esc0_clk.clkr,
+ [GCC_MDSS_ESC1_CLK] = &gcc_mdss_esc1_clk.clkr,
+ [GCC_MDSS_MDP_CLK] = &gcc_mdss_mdp_clk.clkr,
+ [GCC_MDSS_VSYNC_CLK] = &gcc_mdss_vsync_clk.clkr,
+ [GCC_MSS_CFG_AHB_CLK] = &gcc_mss_cfg_ahb_clk.clkr,
+ [GCC_MSS_Q6_BIMC_AXI_CLK] = &gcc_mss_q6_bimc_axi_clk.clkr,
+ [GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr,
+ [GCC_PRNG_AHB_CLK] = &gcc_prng_ahb_clk.clkr,
+ [GCC_PDM_AHB_CLK] = &gcc_pdm_ahb_clk.clkr,
+ [GCC_RBCPR_GFX_AHB_CLK] = &gcc_rbcpr_gfx_ahb_clk.clkr,
+ [GCC_RBCPR_GFX_CLK] = &gcc_rbcpr_gfx_clk.clkr,
+ [GCC_SDCC1_AHB_CLK] = &gcc_sdcc1_ahb_clk.clkr,
+ [GCC_SDCC1_APPS_CLK] = &gcc_sdcc1_apps_clk.clkr,
+ [GCC_SDCC1_ICE_CORE_CLK] = &gcc_sdcc1_ice_core_clk.clkr,
+ [GCC_SDCC2_AHB_CLK] = &gcc_sdcc2_ahb_clk.clkr,
+ [GCC_SDCC2_APPS_CLK] = &gcc_sdcc2_apps_clk.clkr,
+ [GCC_SDCC3_AHB_CLK] = &gcc_sdcc3_ahb_clk.clkr,
+ [GCC_SDCC3_APPS_CLK] = &gcc_sdcc3_apps_clk.clkr,
+ [GCC_USB2A_PHY_SLEEP_CLK] = &gcc_usb2a_phy_sleep_clk.clkr,
+ [GCC_USB_HS_PHY_CFG_AHB_CLK] = &gcc_usb_hs_phy_cfg_ahb_clk.clkr,
+ [GCC_USB_FS_AHB_CLK] = &gcc_usb_fs_ahb_clk.clkr,
+ [GCC_USB_FS_IC_CLK] = &gcc_usb_fs_ic_clk.clkr,
+ [GCC_USB_FS_SYSTEM_CLK] = &gcc_usb_fs_system_clk.clkr,
+ [GCC_USB_HS_AHB_CLK] = &gcc_usb_hs_ahb_clk.clkr,
+ [GCC_USB_HS_SYSTEM_CLK] = &gcc_usb_hs_system_clk.clkr,
+ [GCC_VENUS0_AHB_CLK] = &gcc_venus0_ahb_clk.clkr,
+ [GCC_VENUS0_AXI_CLK] = &gcc_venus0_axi_clk.clkr,
+ [GCC_VENUS0_CORE0_VCODEC0_CLK] = &gcc_venus0_core0_vcodec0_clk.clkr,
+ [GCC_VENUS0_CORE1_VCODEC0_CLK] = &gcc_venus0_core1_vcodec0_clk.clkr,
+ [GCC_VENUS0_VCODEC0_CLK] = &gcc_venus0_vcodec0_clk.clkr,
+ [GCC_APSS_AHB_CLK] = &gcc_apss_ahb_clk.clkr,
+ [GCC_APSS_AXI_CLK] = &gcc_apss_axi_clk.clkr,
+ [GCC_BLSP1_AHB_CLK] = &gcc_blsp1_ahb_clk.clkr,
+ [GCC_BLSP2_AHB_CLK] = &gcc_blsp2_ahb_clk.clkr,
+ [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
+ [GCC_CRYPTO_AHB_CLK] = &gcc_crypto_ahb_clk.clkr,
+ [GCC_CRYPTO_AXI_CLK] = &gcc_crypto_axi_clk.clkr,
+ [GCC_CRYPTO_CLK] = &gcc_crypto_clk.clkr,
+ [GCC_CPP_TBU_CLK] = &gcc_cpp_tbu_clk.clkr,
+ [GCC_APSS_TCU_CLK] = &gcc_apss_tcu_clk.clkr,
+ [GCC_JPEG_TBU_CLK] = &gcc_jpeg_tbu_clk.clkr,
+ [GCC_MDP_RT_TBU_CLK] = &gcc_mdp_rt_tbu_clk.clkr,
+ [GCC_MDP_TBU_CLK] = &gcc_mdp_tbu_clk.clkr,
+ [GCC_SMMU_CFG_CLK] = &gcc_smmu_cfg_clk.clkr,
+ [GCC_VENUS_1_TBU_CLK] = &gcc_venus_1_tbu_clk.clkr,
+ [GCC_VENUS_TBU_CLK] = &gcc_venus_tbu_clk.clkr,
+ [GCC_VFE1_TBU_CLK] = &gcc_vfe1_tbu_clk.clkr,
+ [GCC_VFE_TBU_CLK] = &gcc_vfe_tbu_clk.clkr,
+ [GCC_APS_0_CLK] = &gcc_aps_0_clk.clkr,
+ [GCC_APS_1_CLK] = &gcc_aps_1_clk.clkr,
+ [APS_0_CLK_SRC] = &aps_0_clk_src.clkr,
+ [APS_1_CLK_SRC] = &aps_1_clk_src.clkr,
+ [APSS_AHB_CLK_SRC] = &apss_ahb_clk_src.clkr,
+ [BLSP1_QUP1_I2C_APPS_CLK_SRC] = &blsp1_qup1_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP1_SPI_APPS_CLK_SRC] = &blsp1_qup1_spi_apps_clk_src.clkr,
+ [BLSP1_QUP2_I2C_APPS_CLK_SRC] = &blsp1_qup2_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP2_SPI_APPS_CLK_SRC] = &blsp1_qup2_spi_apps_clk_src.clkr,
+ [BLSP1_QUP3_I2C_APPS_CLK_SRC] = &blsp1_qup3_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP3_SPI_APPS_CLK_SRC] = &blsp1_qup3_spi_apps_clk_src.clkr,
+ [BLSP1_QUP4_I2C_APPS_CLK_SRC] = &blsp1_qup4_i2c_apps_clk_src.clkr,
+ [BLSP1_QUP4_SPI_APPS_CLK_SRC] = &blsp1_qup4_spi_apps_clk_src.clkr,
+ [BLSP1_UART1_APPS_CLK_SRC] = &blsp1_uart1_apps_clk_src.clkr,
+ [BLSP1_UART2_APPS_CLK_SRC] = &blsp1_uart2_apps_clk_src.clkr,
+ [BLSP2_QUP1_I2C_APPS_CLK_SRC] = &blsp2_qup1_i2c_apps_clk_src.clkr,
+ [BLSP2_QUP1_SPI_APPS_CLK_SRC] = &blsp2_qup1_spi_apps_clk_src.clkr,
+ [BLSP2_QUP2_I2C_APPS_CLK_SRC] = &blsp2_qup2_i2c_apps_clk_src.clkr,
+ [BLSP2_QUP2_SPI_APPS_CLK_SRC] = &blsp2_qup2_spi_apps_clk_src.clkr,
+ [BLSP2_QUP3_I2C_APPS_CLK_SRC] = &blsp2_qup3_i2c_apps_clk_src.clkr,
+ [BLSP2_QUP3_SPI_APPS_CLK_SRC] = &blsp2_qup3_spi_apps_clk_src.clkr,
+ [BLSP2_QUP4_I2C_APPS_CLK_SRC] = &blsp2_qup4_i2c_apps_clk_src.clkr,
+ [BLSP2_QUP4_SPI_APPS_CLK_SRC] = &blsp2_qup4_spi_apps_clk_src.clkr,
+ [BLSP2_UART1_APPS_CLK_SRC] = &blsp2_uart1_apps_clk_src.clkr,
+ [BLSP2_UART2_APPS_CLK_SRC] = &blsp2_uart2_apps_clk_src.clkr,
+ [CCI_CLK_SRC] = &cci_clk_src.clkr,
+ [CPP_CLK_SRC] = &cpp_clk_src.clkr,
+ [CSI0_CLK_SRC] = &csi0_clk_src.clkr,
+ [CSI1_CLK_SRC] = &csi1_clk_src.clkr,
+ [CSI2_CLK_SRC] = &csi2_clk_src.clkr,
+ [CAMSS_GP0_CLK_SRC] = &camss_gp0_clk_src.clkr,
+ [CAMSS_GP1_CLK_SRC] = &camss_gp1_clk_src.clkr,
+ [JPEG0_CLK_SRC] = &jpeg0_clk_src.clkr,
+ [MCLK0_CLK_SRC] = &mclk0_clk_src.clkr,
+ [MCLK1_CLK_SRC] = &mclk1_clk_src.clkr,
+ [MCLK2_CLK_SRC] = &mclk2_clk_src.clkr,
+ [CSI0PHYTIMER_CLK_SRC] = &csi0phytimer_clk_src.clkr,
+ [CSI1PHYTIMER_CLK_SRC] = &csi1phytimer_clk_src.clkr,
+ [CAMSS_TOP_AHB_CLK_SRC] = &camss_top_ahb_clk_src.clkr,
+ [VFE0_CLK_SRC] = &vfe0_clk_src.clkr,
+ [VFE1_CLK_SRC] = &vfe1_clk_src.clkr,
+ [CRYPTO_CLK_SRC] = &crypto_clk_src.clkr,
+ [GP1_CLK_SRC] = &gp1_clk_src.clkr,
+ [GP2_CLK_SRC] = &gp2_clk_src.clkr,
+ [GP3_CLK_SRC] = &gp3_clk_src.clkr,
+ [ESC0_CLK_SRC] = &esc0_clk_src.clkr,
+ [ESC1_CLK_SRC] = &esc1_clk_src.clkr,
+ [MDP_CLK_SRC] = &mdp_clk_src.clkr,
+ [VSYNC_CLK_SRC] = &vsync_clk_src.clkr,
+ [PDM2_CLK_SRC] = &pdm2_clk_src.clkr,
+ [RBCPR_GFX_CLK_SRC] = &rbcpr_gfx_clk_src.clkr,
+ [SDCC1_APPS_CLK_SRC] = &sdcc1_apps_clk_src.clkr,
+ [SDCC1_ICE_CORE_CLK_SRC] = &sdcc1_ice_core_clk_src.clkr,
+ [SDCC2_APPS_CLK_SRC] = &sdcc2_apps_clk_src.clkr,
+ [SDCC3_APPS_CLK_SRC] = &sdcc3_apps_clk_src.clkr,
+ [USB_FS_IC_CLK_SRC] = &usb_fs_ic_clk_src.clkr,
+ [USB_FS_SYSTEM_CLK_SRC] = &usb_fs_system_clk_src.clkr,
+ [USB_HS_SYSTEM_CLK_SRC] = &usb_hs_system_clk_src.clkr,
+ [VCODEC0_CLK_SRC] = &vcodec0_clk_src.clkr,
+ [GCC_MDSS_BYTE0_CLK_SRC] = &byte0_clk_src.clkr,
+ [GCC_MDSS_BYTE1_CLK_SRC] = &byte1_clk_src.clkr,
+ [GCC_MDSS_BYTE0_CLK] = &gcc_mdss_byte0_clk.clkr,
+ [GCC_MDSS_BYTE1_CLK] = &gcc_mdss_byte1_clk.clkr,
+ [GCC_MDSS_PCLK0_CLK_SRC] = &pclk0_clk_src.clkr,
+ [GCC_MDSS_PCLK1_CLK_SRC] = &pclk1_clk_src.clkr,
+ [GCC_MDSS_PCLK0_CLK] = &gcc_mdss_pclk0_clk.clkr,
+ [GCC_MDSS_PCLK1_CLK] = &gcc_mdss_pclk1_clk.clkr,
+ [GCC_GFX3D_CLK_SRC] = &gfx3d_clk_src.clkr,
+ [GCC_GFX3D_OXILI_CLK] = &gcc_oxili_gfx3d_clk.clkr,
+ [GCC_GFX3D_BIMC_CLK] = &gcc_bimc_gfx_clk.clkr,
+ [GCC_GFX3D_OXILI_AHB_CLK] = &gcc_oxili_ahb_clk.clkr,
+ [GCC_GFX3D_OXILI_AON_CLK] = &gcc_oxili_aon_clk.clkr,
+ [GCC_GFX3D_OXILI_GMEM_CLK] = &gcc_oxili_gmem_clk.clkr,
+ [GCC_GFX3D_OXILI_TIMER_CLK] = &gcc_oxili_timer_clk.clkr,
+ [GCC_GFX3D_TBU0_CLK] = &gcc_gfx_tbu_clk.clkr,
+ [GCC_GFX3D_TBU1_CLK] = &gcc_gfx_1_tbu_clk.clkr,
+ [GCC_GFX3D_TCU_CLK] = &gcc_gfx_tcu_clk.clkr,
+ [GCC_GFX3D_GTCU_AHB_CLK] = &gcc_gtcu_ahb_clk.clkr,
+};
+
+static const struct qcom_reset_map gcc_msm8976_resets[] = {
+ [RST_CAMSS_MICRO_BCR] = { 0x56008 },
+ [RST_USB_HS_BCR] = { 0x41000 },
+ [RST_QUSB2_PHY_BCR] = { 0x4103c },
+ [RST_USB2_HS_PHY_ONLY_BCR] = { 0x41034 },
+ [RST_USB_HS_PHY_CFG_AHB_BCR] = { 0x41038 },
+ [RST_USB_FS_BCR] = { 0x3f000 },
+ [RST_CAMSS_CSI1PIX_BCR] = { 0x4f054 },
+ [RST_CAMSS_CSI_VFE1_BCR] = { 0x58070 },
+ [RST_CAMSS_VFE1_BCR] = { 0x5807c },
+ [RST_CAMSS_CPP_BCR] = { 0x58080 },
+};
+
+static struct gdsc *gcc_msm8976_gdscs[] = {
+ [VENUS_GDSC] = &venus_gdsc,
+ [VENUS_CORE0_GDSC] = &venus_core0_gdsc,
+ [VENUS_CORE1_GDSC] = &venus_core1_gdsc,
+ [MDSS_GDSC] = &mdss_gdsc,
+ [JPEG_GDSC] = &jpeg_gdsc,
+ [VFE0_GDSC] = &vfe0_gdsc,
+ [VFE1_GDSC] = &vfe1_gdsc,
+ [CPP_GDSC] = &cpp_gdsc,
+ [OXILI_GX_GDSC] = &oxili_gx_gdsc,
+ [OXILI_CX_GDSC] = &oxili_cx_gdsc,
+};
+
+static const struct regmap_config gcc_msm8976_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x7fffc,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc gcc_msm8976_desc = {
+ .config = &gcc_msm8976_regmap_config,
+ .clks = gcc_msm8976_clocks,
+ .num_clks = ARRAY_SIZE(gcc_msm8976_clocks),
+ .resets = gcc_msm8976_resets,
+ .num_resets = ARRAY_SIZE(gcc_msm8976_resets),
+ .gdscs = gcc_msm8976_gdscs,
+ .num_gdscs = ARRAY_SIZE(gcc_msm8976_gdscs),
+};
+
+static const struct of_device_id gcc_msm8976_match_table[] = {
+ { .compatible = "qcom,gcc-msm8976" }, /* Also valid for 8x56 */
+ { .compatible = "qcom,gcc-msm8976-v1.1" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gcc_msm8976_match_table);
+
+static int gcc_msm8976_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+ int ret;
+
+ if (of_device_is_compatible(pdev->dev.of_node, "qcom,gcc-msm8976-v1.1")) {
+ sdcc1_apps_clk_src.parent_map = gcc_parent_map_v1_1;
+ sdcc1_apps_clk_src.freq_tbl = ftbl_sdcc1_8976_v1_1_apps_clk_src;
+ sdcc1_apps_clk_src.clkr.hw.init = &sdcc1_apps_clk_src_8976v1_1_init;
+ }
+
+ regmap = qcom_cc_map(pdev, &gcc_msm8976_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ /* Set Sleep and Wakeup cycles to 0 for GMEM clock */
+ ret = regmap_update_bits(regmap, gcc_oxili_gmem_clk.clkr.enable_reg, 0xff0, 0);
+ if (ret)
+ return ret;
+
+ clk_pll_configure_sr_hpm_lp(&gpll3, regmap, &gpll3_config, true);
+
+ /* Enable AUX2 clock for APSS */
+ ret = regmap_update_bits(regmap, 0x60000, BIT(2), BIT(2));
+ if (ret)
+ return ret;
+
+ /* Set Sleep cycles to 0 for OXILI clock */
+ ret = regmap_update_bits(regmap, gcc_oxili_gfx3d_clk.clkr.enable_reg, 0xf0, 0);
+ if (ret)
+ return ret;
+
+ return qcom_cc_really_probe(pdev, &gcc_msm8976_desc, regmap);
+}
+
+static struct platform_driver gcc_msm8976_driver = {
+ .probe = gcc_msm8976_probe,
+ .driver = {
+ .name = "qcom,gcc-msm8976",
+ .of_match_table = gcc_msm8976_match_table,
+ },
+};
+
+static int __init gcc_msm8976_init(void)
+{
+ return platform_driver_register(&gcc_msm8976_driver);
+}
+core_initcall(gcc_msm8976_init);
+
+static void __exit gcc_msm8976_exit(void)
+{
+ platform_driver_unregister(&gcc_msm8976_driver);
+}
+module_exit(gcc_msm8976_exit);
+
+MODULE_AUTHOR("AngeloGioacchino Del Regno <angelogioacchino.delregno@somainline.org>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/gcc-msm8994.c b/drivers/clk/qcom/gcc-msm8994.c
index 702a9bdc0559..71aa630fa4bd 100644
--- a/drivers/clk/qcom/gcc-msm8994.c
+++ b/drivers/clk/qcom/gcc-msm8994.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
*/
+#include <linux/clk-provider.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/err.h>
diff --git a/drivers/clk/qcom/gcc-sc7280.c b/drivers/clk/qcom/gcc-sc7280.c
index 8fb6bd69f240..423627d49719 100644
--- a/drivers/clk/qcom/gcc-sc7280.c
+++ b/drivers/clk/qcom/gcc-sc7280.c
@@ -2917,7 +2917,7 @@ static struct clk_branch gcc_cfg_noc_lpass_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_cfg_noc_lpass_clk",
- .ops = &clk_branch2_ops,
+ .ops = &clk_branch2_aon_ops,
},
},
};
diff --git a/drivers/clk/qcom/gcc-sdx65.c b/drivers/clk/qcom/gcc-sdx65.c
new file mode 100644
index 000000000000..748ac15b5ed8
--- /dev/null
+++ b/drivers/clk/qcom/gcc-sdx65.c
@@ -0,0 +1,1611 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,gcc-sdx65.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ P_BI_TCXO,
+ P_GPLL0_OUT_EVEN,
+ P_GPLL0_OUT_MAIN,
+ P_PCIE_PIPE_CLK,
+ P_SLEEP_CLK,
+ P_USB3_PHY_WRAPPER_GCC_USB30_PIPE_CLK,
+};
+
+static struct clk_alpha_pll gpll0 = {
+ .offset = 0x0,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .enable_reg = 0x6d000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll0",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_gpll0_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gpll0_out_even = {
+ .offset = 0x0,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_gpll0_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_gpll0_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll0_out_even",
+ .parent_hws = (const struct clk_hw *[]){ &gpll0.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_lucid_evo_ops,
+ },
+};
+
+static const struct parent_map gcc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_0[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll0_out_even.clkr.hw },
+};
+
+static const struct clk_parent_data gcc_parent_data_0_ao[] = {
+ { .fw_name = "bi_tcxo_ao" },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_SLEEP_CLK, 5 },
+ { P_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_2[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gpll0.clkr.hw },
+ { .fw_name = "sleep_clk" },
+ { .hw = &gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_SLEEP_CLK, 5 },
+};
+
+static const struct clk_parent_data gcc_parent_data_3[] = {
+ { .fw_name = "bi_tcxo" },
+ { .fw_name = "sleep_clk" },
+};
+
+static const struct parent_map gcc_parent_map_4[] = {
+ { P_BI_TCXO, 2 },
+};
+
+static const struct parent_map gcc_parent_map_5[] = {
+ { P_PCIE_PIPE_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_5[] = {
+ { .fw_name = "pcie_pipe_clk"},
+ { .fw_name = "bi_tcxo"},
+};
+
+static const struct parent_map gcc_parent_map_6[] = {
+ { P_USB3_PHY_WRAPPER_GCC_USB30_PIPE_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_6[] = {
+ { .fw_name = "usb3_phy_wrapper_gcc_usb30_pipe_clk"},
+ { .fw_name = "bi_tcxo"},
+};
+
+static struct clk_regmap_mux gcc_pcie_aux_clk_src = {
+ .reg = 0x43060,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_4,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_aux_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_pcie_pipe_clk_src = {
+ .reg = 0x43044,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_5,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_pipe_clk_src",
+ .parent_data = gcc_parent_data_5,
+ .num_parents = 2,
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb3_phy_pipe_clk_src = {
+ .reg = 0x1706c,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_6,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_phy_pipe_clk_src",
+ .parent_data = gcc_parent_data_6,
+ .num_parents = 2,
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_blsp1_qup1_i2c_apps_clk_src[] = {
+ F(9600000, P_BI_TCXO, 2, 0, 0),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(50000000, P_GPLL0_OUT_MAIN, 12, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_blsp1_qup1_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x1c024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup1_i2c_apps_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_blsp1_qup1_spi_apps_clk_src[] = {
+ F(960000, P_BI_TCXO, 10, 1, 2),
+ F(4800000, P_BI_TCXO, 4, 0, 0),
+ F(9600000, P_BI_TCXO, 2, 0, 0),
+ F(15000000, P_GPLL0_OUT_EVEN, 5, 1, 4),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(24000000, P_GPLL0_OUT_MAIN, 12.5, 1, 2),
+ F(25000000, P_GPLL0_OUT_MAIN, 12, 1, 2),
+ F(50000000, P_GPLL0_OUT_MAIN, 12, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_blsp1_qup1_spi_apps_clk_src = {
+ .cmd_rcgr = 0x1c00c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup1_spi_apps_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_blsp1_qup2_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x1e024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup2_i2c_apps_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_blsp1_qup2_spi_apps_clk_src = {
+ .cmd_rcgr = 0x1e00c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup2_spi_apps_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_blsp1_qup3_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x20024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup3_i2c_apps_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_blsp1_qup3_spi_apps_clk_src = {
+ .cmd_rcgr = 0x2000c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup3_spi_apps_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_blsp1_qup4_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x22024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup4_i2c_apps_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_blsp1_qup4_spi_apps_clk_src = {
+ .cmd_rcgr = 0x2200c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup4_spi_apps_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_blsp1_uart1_apps_clk_src[] = {
+ F(3686400, P_GPLL0_OUT_EVEN, 1, 192, 15625),
+ F(7372800, P_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(9600000, P_BI_TCXO, 2, 0, 0),
+ F(14745600, P_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(16000000, P_GPLL0_OUT_EVEN, 1, 4, 75),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(19354839, P_GPLL0_OUT_MAIN, 15.5, 1, 2),
+ F(20000000, P_GPLL0_OUT_MAIN, 15, 1, 2),
+ F(20689655, P_GPLL0_OUT_MAIN, 14.5, 1, 2),
+ F(21428571, P_GPLL0_OUT_MAIN, 14, 1, 2),
+ F(22222222, P_GPLL0_OUT_MAIN, 13.5, 1, 2),
+ F(23076923, P_GPLL0_OUT_MAIN, 13, 1, 2),
+ F(24000000, P_GPLL0_OUT_MAIN, 5, 1, 5),
+ F(25000000, P_GPLL0_OUT_MAIN, 12, 1, 2),
+ F(26086957, P_GPLL0_OUT_MAIN, 11.5, 1, 2),
+ F(27272727, P_GPLL0_OUT_MAIN, 11, 1, 2),
+ F(28571429, P_GPLL0_OUT_MAIN, 10.5, 1, 2),
+ F(32000000, P_GPLL0_OUT_MAIN, 1, 4, 75),
+ F(40000000, P_GPLL0_OUT_MAIN, 15, 0, 0),
+ F(46400000, P_GPLL0_OUT_MAIN, 1, 29, 375),
+ F(48000000, P_GPLL0_OUT_MAIN, 12.5, 0, 0),
+ F(51200000, P_GPLL0_OUT_MAIN, 1, 32, 375),
+ F(56000000, P_GPLL0_OUT_MAIN, 1, 7, 75),
+ F(58982400, P_GPLL0_OUT_MAIN, 1, 1536, 15625),
+ F(60000000, P_GPLL0_OUT_MAIN, 10, 0, 0),
+ F(63157895, P_GPLL0_OUT_MAIN, 9.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_blsp1_uart1_apps_clk_src = {
+ .cmd_rcgr = 0x1d00c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_blsp1_uart1_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart1_apps_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_blsp1_uart2_apps_clk_src = {
+ .cmd_rcgr = 0x1f00c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_blsp1_uart1_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart2_apps_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_blsp1_uart3_apps_clk_src = {
+ .cmd_rcgr = 0x2100c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_blsp1_uart1_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart3_apps_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_blsp1_uart4_apps_clk_src = {
+ .cmd_rcgr = 0x2300c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_blsp1_uart1_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart4_apps_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_cpuss_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(133333333, P_GPLL0_OUT_MAIN, 4.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_cpuss_ahb_clk_src = {
+ .cmd_rcgr = 0x3000c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_cpuss_ahb_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_cpuss_ahb_clk_src",
+ .parent_data = gcc_parent_data_0_ao,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_gp1_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_gp1_clk_src = {
+ .cmd_rcgr = 0x37004,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gp1_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp2_clk_src = {
+ .cmd_rcgr = 0x38004,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gp2_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp3_clk_src = {
+ .cmd_rcgr = 0x39004,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gp3_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcie_aux_phy_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcie_aux_phy_clk_src = {
+ .cmd_rcgr = 0x43048,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_pcie_aux_phy_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_aux_phy_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = 2,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcie_rchng_phy_clk_src[] = {
+ F(100000000, P_GPLL0_OUT_EVEN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcie_rchng_phy_clk_src = {
+ .cmd_rcgr = 0x43064,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie_rchng_phy_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_rchng_phy_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pdm2_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(60000000, P_GPLL0_OUT_MAIN, 10, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pdm2_clk_src = {
+ .cmd_rcgr = 0x24010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pdm2_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm2_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc1_apps_clk_src[] = {
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc1_apps_clk_src = {
+ .cmd_rcgr = 0x1a010,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_sdcc1_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_apps_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_master_clk_src[] = {
+ F(200000000, P_GPLL0_OUT_EVEN, 1.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb30_master_clk_src = {
+ .cmd_rcgr = 0x17030,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_master_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_master_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb30_mock_utmi_clk_src = {
+ .cmd_rcgr = 0x17048,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_aux_phy_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_mock_utmi_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb3_phy_aux_clk_src[] = {
+ F(1000000, P_BI_TCXO, 1, 5, 96),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb3_phy_aux_clk_src = {
+ .cmd_rcgr = 0x17070,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_usb3_phy_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = 2,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_cpuss_ahb_postdiv_clk_src = {
+ .reg = 0x30024,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "gcc_cpuss_ahb_postdiv_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_cpuss_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_usb30_mock_utmi_postdiv_clk_src = {
+ .reg = 0x17060,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "gcc_usb30_mock_utmi_postdiv_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb30_mock_utmi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_branch gcc_ahb_pcie_link_clk = {
+ .halt_reg = 0x2e004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2e004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ahb_pcie_link_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_ahb_clk = {
+ .halt_reg = 0x1b004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x6d008,
+ .enable_mask = BIT(14),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup1_i2c_apps_clk = {
+ .halt_reg = 0x1c008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1c008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup1_i2c_apps_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_blsp1_qup1_i2c_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup1_spi_apps_clk = {
+ .halt_reg = 0x1c004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1c004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup1_spi_apps_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_blsp1_qup1_spi_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup2_i2c_apps_clk = {
+ .halt_reg = 0x1e008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1e008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup2_i2c_apps_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_blsp1_qup2_i2c_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup2_spi_apps_clk = {
+ .halt_reg = 0x1e004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1e004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup2_spi_apps_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_blsp1_qup2_spi_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup3_i2c_apps_clk = {
+ .halt_reg = 0x20008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x20008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup3_i2c_apps_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_blsp1_qup3_i2c_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup3_spi_apps_clk = {
+ .halt_reg = 0x20004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x20004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup3_spi_apps_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_blsp1_qup3_spi_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup4_i2c_apps_clk = {
+ .halt_reg = 0x22008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x22008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup4_i2c_apps_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_blsp1_qup4_i2c_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup4_spi_apps_clk = {
+ .halt_reg = 0x22004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x22004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup4_spi_apps_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_blsp1_qup4_spi_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_sleep_clk = {
+ .halt_reg = 0x1b00c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x6d008,
+ .enable_mask = BIT(15),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart1_apps_clk = {
+ .halt_reg = 0x1d004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1d004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart1_apps_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_blsp1_uart1_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart2_apps_clk = {
+ .halt_reg = 0x1f004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1f004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart2_apps_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_blsp1_uart2_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart3_apps_clk = {
+ .halt_reg = 0x21004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x21004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart3_apps_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_blsp1_uart3_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart4_apps_clk = {
+ .halt_reg = 0x23004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x23004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart4_apps_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_blsp1_uart4_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_boot_rom_ahb_clk = {
+ .halt_reg = 0x27004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x27004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x6d008,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_boot_rom_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp1_clk = {
+ .halt_reg = 0x37000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x37000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp1_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_gp1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp2_clk = {
+ .halt_reg = 0x38000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x38000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp2_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_gp2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp3_clk = {
+ .halt_reg = 0x39000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x39000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp3_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_gp3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_clkref_en = {
+ .halt_reg = 0x88004,
+ /*
+ * The clock controller does not handle the status bit for
+ * the clocks with gdscs(powerdomains) in hw controlled mode
+ * and hence avoid checking for the status bit of those clocks
+ * by setting the BRANCH_HALT_DELAY flag
+ */
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x88004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_clkref_en",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_aux_clk = {
+ .halt_reg = 0x43034,
+ /*
+ * The clock controller does not handle the status bit for
+ * the clocks with gdscs(powerdomains) in hw controlled mode
+ * and hence avoid checking for the status bit of those clocks
+ * by setting the BRANCH_HALT_DELAY flag
+ */
+ .halt_check = BRANCH_HALT_DELAY,
+ .hwcg_reg = 0x43034,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x6d010,
+ .enable_mask = BIT(3),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_aux_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_pcie_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_cfg_ahb_clk = {
+ .halt_reg = 0x4302c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x4302c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x6d010,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_mstr_axi_clk = {
+ .halt_reg = 0x43024,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x43024,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x6d010,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_pipe_clk = {
+ .halt_reg = 0x4303c,
+ /*
+ * The clock controller does not handle the status bit for
+ * the clocks with gdscs(powerdomains) in hw controlled mode
+ * and hence avoid checking for the status bit of those clocks
+ * by setting the BRANCH_HALT_DELAY flag
+ */
+ .halt_check = BRANCH_HALT_DELAY,
+ .hwcg_reg = 0x4303c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x6d010,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_pipe_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_pcie_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_rchng_phy_clk = {
+ .halt_reg = 0x43030,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x43030,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x6d010,
+ .enable_mask = BIT(7),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_rchng_phy_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_pcie_rchng_phy_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_sleep_clk = {
+ .halt_reg = 0x43038,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x43038,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x6d010,
+ .enable_mask = BIT(6),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_sleep_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_pcie_aux_phy_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_slv_axi_clk = {
+ .halt_reg = 0x4301c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x4301c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x6d010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_slv_q2a_axi_clk = {
+ .halt_reg = 0x43018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x43018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x6d010,
+ .enable_mask = BIT(5),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm2_clk = {
+ .halt_reg = 0x2400c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2400c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm2_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_pdm2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_ahb_clk = {
+ .halt_reg = 0x24004,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x24004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x24004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_xo4_clk = {
+ .halt_reg = 0x24008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x24008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm_xo4_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_rx1_usb2_clkref_en = {
+ .halt_reg = 0x88008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x88008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_rx1_usb2_clkref_en",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_ahb_clk = {
+ .halt_reg = 0x1a00c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1a00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_apps_clk = {
+ .halt_reg = 0x1a004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1a004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_apps_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_sdcc1_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_master_clk = {
+ .halt_reg = 0x17018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x17018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_master_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb30_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_mock_utmi_clk = {
+ .halt_reg = 0x1702c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1702c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_mock_utmi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw =
+ &gcc_usb30_mock_utmi_postdiv_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_mstr_axi_clk = {
+ .halt_reg = 0x17020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x17020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_sleep_clk = {
+ .halt_reg = 0x17028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x17028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_slv_ahb_clk = {
+ .halt_reg = 0x17024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x17024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_slv_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_phy_aux_clk = {
+ .halt_reg = 0x17064,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x17064,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_phy_aux_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb3_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc usb30_gdsc = {
+ .gdscr = 0x17004,
+ .pd = {
+ .name = "usb30_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc pcie_gdsc = {
+ .gdscr = 0x43004,
+ .pd = {
+ .name = "pcie_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct clk_branch gcc_usb3_phy_pipe_clk = {
+ .halt_reg = 0x17068,
+ /*
+ * The clock controller does not handle the status bit for
+ * the clocks with gdscs(powerdomains) in hw controlled mode
+ * and hence avoid checking for the status bit of those clocks
+ * by setting the BRANCH_HALT_DELAY flag
+ */
+ .halt_check = BRANCH_HALT_DELAY,
+ .hwcg_reg = 0x17068,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x17068,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_phy_pipe_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb3_phy_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_clkref_en = {
+ .halt_reg = 0x88000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x88000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_clkref_en",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb_phy_cfg_ahb2phy_clk = {
+ .halt_reg = 0x19008,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x19008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x19008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb_phy_cfg_ahb2phy_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_xo_div4_clk = {
+ .halt_reg = 0x2e010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2e010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_xo_div4_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_xo_pcie_link_clk = {
+ .halt_reg = 0x2e008,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x2e008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x2e008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_xo_pcie_link_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_regmap *gcc_sdx65_clocks[] = {
+ [GCC_AHB_PCIE_LINK_CLK] = &gcc_ahb_pcie_link_clk.clkr,
+ [GCC_BLSP1_AHB_CLK] = &gcc_blsp1_ahb_clk.clkr,
+ [GCC_BLSP1_QUP1_I2C_APPS_CLK] = &gcc_blsp1_qup1_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP1_I2C_APPS_CLK_SRC] = &gcc_blsp1_qup1_i2c_apps_clk_src.clkr,
+ [GCC_BLSP1_QUP1_SPI_APPS_CLK] = &gcc_blsp1_qup1_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP1_SPI_APPS_CLK_SRC] = &gcc_blsp1_qup1_spi_apps_clk_src.clkr,
+ [GCC_BLSP1_QUP2_I2C_APPS_CLK] = &gcc_blsp1_qup2_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP2_I2C_APPS_CLK_SRC] = &gcc_blsp1_qup2_i2c_apps_clk_src.clkr,
+ [GCC_BLSP1_QUP2_SPI_APPS_CLK] = &gcc_blsp1_qup2_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP2_SPI_APPS_CLK_SRC] = &gcc_blsp1_qup2_spi_apps_clk_src.clkr,
+ [GCC_BLSP1_QUP3_I2C_APPS_CLK] = &gcc_blsp1_qup3_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP3_I2C_APPS_CLK_SRC] = &gcc_blsp1_qup3_i2c_apps_clk_src.clkr,
+ [GCC_BLSP1_QUP3_SPI_APPS_CLK] = &gcc_blsp1_qup3_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP3_SPI_APPS_CLK_SRC] = &gcc_blsp1_qup3_spi_apps_clk_src.clkr,
+ [GCC_BLSP1_QUP4_I2C_APPS_CLK] = &gcc_blsp1_qup4_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP4_I2C_APPS_CLK_SRC] = &gcc_blsp1_qup4_i2c_apps_clk_src.clkr,
+ [GCC_BLSP1_QUP4_SPI_APPS_CLK] = &gcc_blsp1_qup4_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP4_SPI_APPS_CLK_SRC] = &gcc_blsp1_qup4_spi_apps_clk_src.clkr,
+ [GCC_BLSP1_SLEEP_CLK] = &gcc_blsp1_sleep_clk.clkr,
+ [GCC_BLSP1_UART1_APPS_CLK] = &gcc_blsp1_uart1_apps_clk.clkr,
+ [GCC_BLSP1_UART1_APPS_CLK_SRC] = &gcc_blsp1_uart1_apps_clk_src.clkr,
+ [GCC_BLSP1_UART2_APPS_CLK] = &gcc_blsp1_uart2_apps_clk.clkr,
+ [GCC_BLSP1_UART2_APPS_CLK_SRC] = &gcc_blsp1_uart2_apps_clk_src.clkr,
+ [GCC_BLSP1_UART3_APPS_CLK] = &gcc_blsp1_uart3_apps_clk.clkr,
+ [GCC_BLSP1_UART3_APPS_CLK_SRC] = &gcc_blsp1_uart3_apps_clk_src.clkr,
+ [GCC_BLSP1_UART4_APPS_CLK] = &gcc_blsp1_uart4_apps_clk.clkr,
+ [GCC_BLSP1_UART4_APPS_CLK_SRC] = &gcc_blsp1_uart4_apps_clk_src.clkr,
+ [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
+ [GCC_CPUSS_AHB_CLK_SRC] = &gcc_cpuss_ahb_clk_src.clkr,
+ [GCC_CPUSS_AHB_POSTDIV_CLK_SRC] = &gcc_cpuss_ahb_postdiv_clk_src.clkr,
+ [GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
+ [GCC_GP1_CLK_SRC] = &gcc_gp1_clk_src.clkr,
+ [GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
+ [GCC_GP2_CLK_SRC] = &gcc_gp2_clk_src.clkr,
+ [GCC_GP3_CLK] = &gcc_gp3_clk.clkr,
+ [GCC_GP3_CLK_SRC] = &gcc_gp3_clk_src.clkr,
+ [GCC_PCIE_0_CLKREF_EN] = &gcc_pcie_0_clkref_en.clkr,
+ [GCC_PCIE_AUX_CLK] = &gcc_pcie_aux_clk.clkr,
+ [GCC_PCIE_AUX_CLK_SRC] = &gcc_pcie_aux_clk_src.clkr,
+ [GCC_PCIE_AUX_PHY_CLK_SRC] = &gcc_pcie_aux_phy_clk_src.clkr,
+ [GCC_PCIE_CFG_AHB_CLK] = &gcc_pcie_cfg_ahb_clk.clkr,
+ [GCC_PCIE_MSTR_AXI_CLK] = &gcc_pcie_mstr_axi_clk.clkr,
+ [GCC_PCIE_PIPE_CLK] = &gcc_pcie_pipe_clk.clkr,
+ [GCC_PCIE_PIPE_CLK_SRC] = &gcc_pcie_pipe_clk_src.clkr,
+ [GCC_PCIE_RCHNG_PHY_CLK] = &gcc_pcie_rchng_phy_clk.clkr,
+ [GCC_PCIE_RCHNG_PHY_CLK_SRC] = &gcc_pcie_rchng_phy_clk_src.clkr,
+ [GCC_PCIE_SLEEP_CLK] = &gcc_pcie_sleep_clk.clkr,
+ [GCC_PCIE_SLV_AXI_CLK] = &gcc_pcie_slv_axi_clk.clkr,
+ [GCC_PCIE_SLV_Q2A_AXI_CLK] = &gcc_pcie_slv_q2a_axi_clk.clkr,
+ [GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr,
+ [GCC_PDM2_CLK_SRC] = &gcc_pdm2_clk_src.clkr,
+ [GCC_PDM_AHB_CLK] = &gcc_pdm_ahb_clk.clkr,
+ [GCC_PDM_XO4_CLK] = &gcc_pdm_xo4_clk.clkr,
+ [GCC_RX1_USB2_CLKREF_EN] = &gcc_rx1_usb2_clkref_en.clkr,
+ [GCC_SDCC1_AHB_CLK] = &gcc_sdcc1_ahb_clk.clkr,
+ [GCC_SDCC1_APPS_CLK] = &gcc_sdcc1_apps_clk.clkr,
+ [GCC_SDCC1_APPS_CLK_SRC] = &gcc_sdcc1_apps_clk_src.clkr,
+ [GCC_USB30_MASTER_CLK] = &gcc_usb30_master_clk.clkr,
+ [GCC_USB30_MASTER_CLK_SRC] = &gcc_usb30_master_clk_src.clkr,
+ [GCC_USB30_MOCK_UTMI_CLK] = &gcc_usb30_mock_utmi_clk.clkr,
+ [GCC_USB30_MOCK_UTMI_CLK_SRC] = &gcc_usb30_mock_utmi_clk_src.clkr,
+ [GCC_USB30_MOCK_UTMI_POSTDIV_CLK_SRC] = &gcc_usb30_mock_utmi_postdiv_clk_src.clkr,
+ [GCC_USB30_MSTR_AXI_CLK] = &gcc_usb30_mstr_axi_clk.clkr,
+ [GCC_USB30_SLEEP_CLK] = &gcc_usb30_sleep_clk.clkr,
+ [GCC_USB30_SLV_AHB_CLK] = &gcc_usb30_slv_ahb_clk.clkr,
+ [GCC_USB3_PHY_AUX_CLK] = &gcc_usb3_phy_aux_clk.clkr,
+ [GCC_USB3_PHY_AUX_CLK_SRC] = &gcc_usb3_phy_aux_clk_src.clkr,
+ [GCC_USB3_PHY_PIPE_CLK] = &gcc_usb3_phy_pipe_clk.clkr,
+ [GCC_USB3_PHY_PIPE_CLK_SRC] = &gcc_usb3_phy_pipe_clk_src.clkr,
+ [GCC_USB3_PRIM_CLKREF_EN] = &gcc_usb3_prim_clkref_en.clkr,
+ [GCC_USB_PHY_CFG_AHB2PHY_CLK] = &gcc_usb_phy_cfg_ahb2phy_clk.clkr,
+ [GCC_XO_DIV4_CLK] = &gcc_xo_div4_clk.clkr,
+ [GCC_XO_PCIE_LINK_CLK] = &gcc_xo_pcie_link_clk.clkr,
+ [GPLL0] = &gpll0.clkr,
+ [GPLL0_OUT_EVEN] = &gpll0_out_even.clkr,
+};
+
+static const struct qcom_reset_map gcc_sdx65_resets[] = {
+ [GCC_BLSP1_QUP1_BCR] = { 0x1c000 },
+ [GCC_BLSP1_QUP2_BCR] = { 0x1e000 },
+ [GCC_BLSP1_QUP3_BCR] = { 0x20000 },
+ [GCC_BLSP1_QUP4_BCR] = { 0x22000 },
+ [GCC_BLSP1_UART1_BCR] = { 0x1d000 },
+ [GCC_BLSP1_UART2_BCR] = { 0x1f000 },
+ [GCC_BLSP1_UART3_BCR] = { 0x21000 },
+ [GCC_BLSP1_UART4_BCR] = { 0x23000 },
+ [GCC_PCIE_BCR] = { 0x43000 },
+ [GCC_PCIE_LINK_DOWN_BCR] = { 0x77000 },
+ [GCC_PCIE_NOCSR_COM_PHY_BCR] = { 0x78008 },
+ [GCC_PCIE_PHY_BCR] = { 0x44000 },
+ [GCC_PCIE_PHY_CFG_AHB_BCR] = { 0x78000 },
+ [GCC_PCIE_PHY_COM_BCR] = { 0x78004 },
+ [GCC_PCIE_PHY_NOCSR_COM_PHY_BCR] = { 0x7800c },
+ [GCC_PDM_BCR] = { 0x24000 },
+ [GCC_QUSB2PHY_BCR] = { 0x19000 },
+ [GCC_SDCC1_BCR] = { 0x1a000 },
+ [GCC_TCSR_PCIE_BCR] = { 0x57000 },
+ [GCC_USB30_BCR] = { 0x17000 },
+ [GCC_USB3_PHY_BCR] = { 0x18000 },
+ [GCC_USB3PHY_PHY_BCR] = { 0x18004 },
+ [GCC_USB_PHY_CFG_AHB2PHY_BCR] = { 0x19004 },
+};
+
+static struct gdsc *gcc_sdx65_gdscs[] = {
+ [USB30_GDSC] = &usb30_gdsc,
+ [PCIE_GDSC] = &pcie_gdsc,
+};
+
+static const struct regmap_config gcc_sdx65_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x1f101c,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc gcc_sdx65_desc = {
+ .config = &gcc_sdx65_regmap_config,
+ .clks = gcc_sdx65_clocks,
+ .num_clks = ARRAY_SIZE(gcc_sdx65_clocks),
+ .resets = gcc_sdx65_resets,
+ .num_resets = ARRAY_SIZE(gcc_sdx65_resets),
+ .gdscs = gcc_sdx65_gdscs,
+ .num_gdscs = ARRAY_SIZE(gcc_sdx65_gdscs),
+};
+
+static const struct of_device_id gcc_sdx65_match_table[] = {
+ { .compatible = "qcom,gcc-sdx65" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gcc_sdx65_match_table);
+
+static int gcc_sdx65_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+
+ regmap = qcom_cc_map(pdev, &gcc_sdx65_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+ /*
+ * Keep the clocks always-ON as they are critical to the functioning
+ * of the system:
+ * GCC_SYS_NOC_CPUSS_AHB_CLK, GCC_CPUSS_AHB_CLK, GCC_CPUSS_GNOC_CLK
+ */
+ regmap_update_bits(regmap, 0x6d008, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x6d008, BIT(21), BIT(21));
+ regmap_update_bits(regmap, 0x6d008, BIT(22), BIT(22));
+
+ return qcom_cc_really_probe(pdev, &gcc_sdx65_desc, regmap);
+}
+
+static struct platform_driver gcc_sdx65_driver = {
+ .probe = gcc_sdx65_probe,
+ .driver = {
+ .name = "gcc-sdx65",
+ .of_match_table = gcc_sdx65_match_table,
+ },
+};
+
+static int __init gcc_sdx65_init(void)
+{
+ return platform_driver_register(&gcc_sdx65_driver);
+}
+subsys_initcall(gcc_sdx65_init);
+
+static void __exit gcc_sdx65_exit(void)
+{
+ platform_driver_unregister(&gcc_sdx65_driver);
+}
+module_exit(gcc_sdx65_exit);
+
+MODULE_DESCRIPTION("QTI GCC SDX65 Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/gcc-sm6350.c b/drivers/clk/qcom/gcc-sm6350.c
index 3236706771b1..a4f7fba70393 100644
--- a/drivers/clk/qcom/gcc-sm6350.c
+++ b/drivers/clk/qcom/gcc-sm6350.c
@@ -4,6 +4,7 @@
* Copyright (c) 2021, Konrad Dybcio <konrad.dybcio@somainline.org>
*/
+#include <linux/clk-provider.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
diff --git a/drivers/clk/qcom/gcc-sm8350.c b/drivers/clk/qcom/gcc-sm8350.c
index 6d0a9e2d5104..c3731f96c8e6 100644
--- a/drivers/clk/qcom/gcc-sm8350.c
+++ b/drivers/clk/qcom/gcc-sm8350.c
@@ -4,6 +4,7 @@
* Copyright (c) 2020-2021, Linaro Limited
*/
+#include <linux/clk-provider.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
diff --git a/drivers/clk/qcom/gcc-sm8450.c b/drivers/clk/qcom/gcc-sm8450.c
new file mode 100644
index 000000000000..593a195467ff
--- /dev/null
+++ b/drivers/clk/qcom/gcc-sm8450.c
@@ -0,0 +1,3304 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021, Linaro Limited
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,gcc-sm8450.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ P_BI_TCXO,
+ P_GCC_GPLL0_OUT_EVEN,
+ P_GCC_GPLL0_OUT_MAIN,
+ P_GCC_GPLL4_OUT_MAIN,
+ P_GCC_GPLL9_OUT_MAIN,
+ P_PCIE_0_PIPE_CLK,
+ P_PCIE_1_PHY_AUX_CLK,
+ P_PCIE_1_PIPE_CLK,
+ P_SLEEP_CLK,
+ P_UFS_PHY_RX_SYMBOL_0_CLK,
+ P_UFS_PHY_RX_SYMBOL_1_CLK,
+ P_UFS_PHY_TX_SYMBOL_0_CLK,
+ P_USB3_PHY_WRAPPER_GCC_USB30_PIPE_CLK,
+};
+
+static struct clk_alpha_pll gcc_gpll0 = {
+ .offset = 0x0,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpll0",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_gcc_gpll0_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gcc_gpll0_out_even = {
+ .offset = 0x0,
+ .post_div_shift = 10,
+ .post_div_table = post_div_table_gcc_gpll0_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_gcc_gpll0_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gpll0_out_even",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_lucid_evo_ops,
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll4 = {
+ .offset = 0x4000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpll4",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_evo_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gcc_gpll9 = {
+ .offset = 0x9000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_EVO],
+ .clkr = {
+ .enable_reg = 0x62018,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpll9",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_evo_ops,
+ },
+ },
+};
+
+static const struct parent_map gcc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_0[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_SLEEP_CLK, 5 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_1[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .fw_name = "sleep_clk" },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_SLEEP_CLK, 5 },
+};
+
+static const struct clk_parent_data gcc_parent_data_2[] = {
+ { .fw_name = "bi_tcxo" },
+ { .fw_name = "sleep_clk" },
+};
+
+static const struct parent_map gcc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+};
+
+static const struct clk_parent_data gcc_parent_data_3[] = {
+ { .fw_name = "bi_tcxo" },
+};
+
+static const struct parent_map gcc_parent_map_4[] = {
+ { P_PCIE_0_PIPE_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_4[] = {
+ { .fw_name = "pcie_0_pipe_clk", },
+ { .fw_name = "bi_tcxo", },
+};
+
+static const struct parent_map gcc_parent_map_5[] = {
+ { P_PCIE_1_PHY_AUX_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_5[] = {
+ { .fw_name = "pcie_1_phy_aux_clk" },
+ { .fw_name = "bi_tcxo" },
+};
+
+static const struct parent_map gcc_parent_map_6[] = {
+ { P_PCIE_1_PIPE_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_6[] = {
+ { .fw_name = "pcie_1_pipe_clk" },
+ { .fw_name = "bi_tcxo" },
+};
+
+static const struct parent_map gcc_parent_map_7[] = {
+ { P_BI_TCXO, 0 },
+ { P_GCC_GPLL0_OUT_MAIN, 1 },
+ { P_GCC_GPLL9_OUT_MAIN, 2 },
+ { P_GCC_GPLL4_OUT_MAIN, 5 },
+ { P_GCC_GPLL0_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data gcc_parent_data_7[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gcc_gpll0.clkr.hw },
+ { .hw = &gcc_gpll9.clkr.hw },
+ { .hw = &gcc_gpll4.clkr.hw },
+ { .hw = &gcc_gpll0_out_even.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_8[] = {
+ { P_UFS_PHY_RX_SYMBOL_0_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_8[] = {
+ { .fw_name = "ufs_phy_rx_symbol_0_clk" },
+ { .fw_name = "bi_tcxo" },
+};
+
+static const struct parent_map gcc_parent_map_9[] = {
+ { P_UFS_PHY_RX_SYMBOL_1_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_9[] = {
+ { .fw_name = "ufs_phy_rx_symbol_1_clk" },
+ { .fw_name = "bi_tcxo" },
+};
+
+static const struct parent_map gcc_parent_map_10[] = {
+ { P_UFS_PHY_TX_SYMBOL_0_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_10[] = {
+ { .fw_name = "ufs_phy_tx_symbol_0_clk" },
+ { .fw_name = "bi_tcxo" },
+};
+
+static const struct parent_map gcc_parent_map_11[] = {
+ { P_USB3_PHY_WRAPPER_GCC_USB30_PIPE_CLK, 0 },
+ { P_BI_TCXO, 2 },
+};
+
+static const struct clk_parent_data gcc_parent_data_11[] = {
+ { .fw_name = "usb3_phy_wrapper_gcc_usb30_pipe_clk" },
+ { .fw_name = "bi_tcxo" },
+};
+
+static struct clk_regmap_mux gcc_pcie_0_pipe_clk_src = {
+ .reg = 0x7b060,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_4,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_pipe_clk_src",
+ .parent_data = gcc_parent_data_4,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_pcie_1_phy_aux_clk_src = {
+ .reg = 0x9d080,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_5,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_5,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_5),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_pcie_1_pipe_clk_src = {
+ .reg = 0x9d064,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_6,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_pipe_clk_src",
+ .parent_data = gcc_parent_data_6,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_6),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_ufs_phy_rx_symbol_0_clk_src = {
+ .reg = 0x87060,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_8,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_rx_symbol_0_clk_src",
+ .parent_data = gcc_parent_data_8,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_8),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_ufs_phy_rx_symbol_1_clk_src = {
+ .reg = 0x870d0,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_9,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_rx_symbol_1_clk_src",
+ .parent_data = gcc_parent_data_9,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_9),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_ufs_phy_tx_symbol_0_clk_src = {
+ .reg = 0x87050,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_10,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_tx_symbol_0_clk_src",
+ .parent_data = gcc_parent_data_10,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_10),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static struct clk_regmap_mux gcc_usb3_prim_phy_pipe_clk_src = {
+ .reg = 0x49068,
+ .shift = 0,
+ .width = 2,
+ .parent_map = gcc_parent_map_11,
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_pipe_clk_src",
+ .parent_data = gcc_parent_data_11,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_11),
+ .ops = &clk_regmap_mux_closest_ops,
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_gp1_clk_src[] = {
+ F(50000000, P_GCC_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(200000000, P_GCC_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_gp1_clk_src = {
+ .cmd_rcgr = 0x74004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gp1_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp2_clk_src = {
+ .cmd_rcgr = 0x75004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gp2_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp3_clk_src = {
+ .cmd_rcgr = 0x76004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gp3_clk_src",
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcie_0_aux_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcie_0_aux_clk_src = {
+ .cmd_rcgr = 0x7b064,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_aux_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcie_0_phy_rchng_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcie_0_phy_rchng_clk_src = {
+ .cmd_rcgr = 0x7b048,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_phy_rchng_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_phy_rchng_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_1_aux_clk_src = {
+ .cmd_rcgr = 0x9d068,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_aux_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_1_phy_rchng_clk_src = {
+ .cmd_rcgr = 0x9d04c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_phy_rchng_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_phy_rchng_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pdm2_clk_src[] = {
+ F(60000000, P_GCC_GPLL0_OUT_MAIN, 10, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pdm2_clk_src = {
+ .cmd_rcgr = 0x43010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pdm2_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm2_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s0_clk_src[] = {
+ F(7372800, P_GCC_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(14745600, P_GCC_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GCC_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ F(32000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(48000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(64000000, P_GCC_GPLL0_OUT_EVEN, 1, 16, 75),
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(80000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 15),
+ F(96000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 25),
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s0_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s0_clk_src = {
+ .cmd_rcgr = 0x27014,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s1_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s1_clk_src = {
+ .cmd_rcgr = 0x27148,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s1_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s2_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s2_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s2_clk_src = {
+ .cmd_rcgr = 0x2727c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s2_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s3_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s3_clk_src = {
+ .cmd_rcgr = 0x273b0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s4_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s4_clk_src = {
+ .cmd_rcgr = 0x274e4,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s4_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s5_clk_src[] = {
+ F(7372800, P_GCC_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(14745600, P_GCC_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GCC_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ F(32000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(37500000, P_GCC_GPLL0_OUT_EVEN, 8, 0, 0),
+ F(48000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(50000000, P_GCC_GPLL0_OUT_MAIN, 12, 0, 0),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s5_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s5_clk_src = {
+ .cmd_rcgr = 0x27618,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s5_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s5_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s6_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s6_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s6_clk_src = {
+ .cmd_rcgr = 0x2774c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s6_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s7_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s7_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s7_clk_src = {
+ .cmd_rcgr = 0x27880,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s7_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap1_s0_clk_src[] = {
+ F(7372800, P_GCC_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(14745600, P_GCC_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GCC_GPLL0_OUT_EVEN, 1, 1536, 15625),
+ F(32000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 75),
+ F(48000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 25),
+ F(64000000, P_GCC_GPLL0_OUT_EVEN, 1, 16, 75),
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(80000000, P_GCC_GPLL0_OUT_EVEN, 1, 4, 15),
+ F(96000000, P_GCC_GPLL0_OUT_EVEN, 1, 8, 25),
+ F(100000000, P_GCC_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(102400000, P_GCC_GPLL0_OUT_EVEN, 1, 128, 375),
+ F(112000000, P_GCC_GPLL0_OUT_EVEN, 1, 28, 75),
+ F(117964800, P_GCC_GPLL0_OUT_EVEN, 1, 6144, 15625),
+ F(120000000, P_GCC_GPLL0_OUT_MAIN, 5, 0, 0),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s0_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = {
+ .cmd_rcgr = 0x28014,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap1_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s1_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = {
+ .cmd_rcgr = 0x28148,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap1_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s1_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s2_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s2_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s2_clk_src = {
+ .cmd_rcgr = 0x2827c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s2_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s3_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s3_clk_src = {
+ .cmd_rcgr = 0x283b0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s4_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = {
+ .cmd_rcgr = 0x284e4,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s5_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = {
+ .cmd_rcgr = 0x28618,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s5_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap1_s6_clk_src_init = {
+ .name = "gcc_qupv3_wrap1_s6_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap1_s6_clk_src = {
+ .cmd_rcgr = 0x2874c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap1_s6_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s0_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s0_clk_src = {
+ .cmd_rcgr = 0x2e014,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap1_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s1_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s1_clk_src = {
+ .cmd_rcgr = 0x2e148,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap1_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s1_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s2_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s2_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s2_clk_src = {
+ .cmd_rcgr = 0x2e27c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s2_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s3_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s3_clk_src = {
+ .cmd_rcgr = 0x2e3b0,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s4_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s4_clk_src = {
+ .cmd_rcgr = 0x2e4e4,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s5_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s5_clk_src = {
+ .cmd_rcgr = 0x2e618,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s5_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap2_s6_clk_src_init = {
+ .name = "gcc_qupv3_wrap2_s6_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap2_s6_clk_src = {
+ .cmd_rcgr = 0x2e74c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap2_s6_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc2_apps_clk_src[] = {
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(25000000, P_GCC_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(50000000, P_GCC_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0),
+ F(202000000, P_GCC_GPLL9_OUT_MAIN, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc2_apps_clk_src = {
+ .cmd_rcgr = 0x24014,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_7,
+ .freq_tbl = ftbl_gcc_sdcc2_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_apps_clk_src",
+ .parent_data = gcc_parent_data_7,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_7),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc4_apps_clk_src[] = {
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(25000000, P_GCC_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(100000000, P_GCC_GPLL0_OUT_EVEN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc4_apps_clk_src = {
+ .cmd_rcgr = 0x26014,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_sdcc4_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc4_apps_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_axi_clk_src[] = {
+ F(25000000, P_GCC_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(150000000, P_GCC_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(300000000, P_GCC_GPLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_axi_clk_src = {
+ .cmd_rcgr = 0x8702c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_phy_axi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_axi_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_ice_core_clk_src[] = {
+ F(75000000, P_GCC_GPLL0_OUT_EVEN, 4, 0, 0),
+ F(150000000, P_GCC_GPLL0_OUT_MAIN, 4, 0, 0),
+ F(300000000, P_GCC_GPLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_ice_core_clk_src = {
+ .cmd_rcgr = 0x87074,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_phy_ice_core_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_ice_core_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_phy_aux_clk_src[] = {
+ F(9600000, P_BI_TCXO, 2, 0, 0),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_phy_aux_clk_src = {
+ .cmd_rcgr = 0x870a8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_ufs_phy_phy_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_ufs_phy_unipro_core_clk_src = {
+ .cmd_rcgr = 0x8708c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_phy_ice_core_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_unipro_core_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_prim_master_clk_src[] = {
+ F(66666667, P_GCC_GPLL0_OUT_EVEN, 4.5, 0, 0),
+ F(133333333, P_GCC_GPLL0_OUT_MAIN, 4.5, 0, 0),
+ F(200000000, P_GCC_GPLL0_OUT_MAIN, 3, 0, 0),
+ F(240000000, P_GCC_GPLL0_OUT_MAIN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb30_prim_master_clk_src = {
+ .cmd_rcgr = 0x49028,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_prim_master_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_master_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb30_prim_mock_utmi_clk_src = {
+ .cmd_rcgr = 0x49040,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_mock_utmi_clk_src",
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb3_prim_phy_aux_clk_src = {
+ .cmd_rcgr = 0x4906c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_aux_clk_src",
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_usb30_prim_mock_utmi_postdiv_clk_src = {
+ .reg = 0x49058,
+ .shift = 0,
+ .width = 4,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "gcc_usb30_prim_mock_utmi_postdiv_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb30_prim_mock_utmi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static struct clk_branch gcc_aggre_noc_pcie_0_axi_clk = {
+ .halt_reg = 0x7b08c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x7b08c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62000,
+ .enable_mask = BIT(12),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_noc_pcie_0_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_noc_pcie_1_axi_clk = {
+ .halt_reg = 0x9d098,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x9d098,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62000,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_noc_pcie_1_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_ufs_phy_axi_clk = {
+ .halt_reg = 0x870d4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x870d4,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x870d4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_ufs_phy_axi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_ufs_phy_axi_hw_ctl_clk = {
+ .halt_reg = 0x870d4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x870d4,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x870d4,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_ufs_phy_axi_hw_ctl_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_aggre_usb3_prim_axi_clk = {
+ .halt_reg = 0x49088,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x49088,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x49088,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_aggre_usb3_prim_axi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_boot_rom_ahb_clk = {
+ .halt_reg = 0x48004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x48004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62000,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_boot_rom_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_hf_axi_clk = {
+ .halt_reg = 0x36010,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x36010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x36010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camera_hf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_sf_axi_clk = {
+ .halt_reg = 0x36018,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x36018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x36018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camera_sf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_pcie_anoc_ahb_clk = {
+ .halt_reg = 0x20030,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x20030,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62000,
+ .enable_mask = BIT(20),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cfg_noc_pcie_anoc_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb3_prim_axi_clk = {
+ .halt_reg = 0x49084,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x49084,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x49084,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cfg_noc_usb3_prim_axi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ddrss_gpu_axi_clk = {
+ .halt_reg = 0x81154,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x81154,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x81154,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ddrss_gpu_axi_clk",
+ .ops = &clk_branch2_aon_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ddrss_pcie_sf_tbu_clk = {
+ .halt_reg = 0x9d094,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x9d094,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62000,
+ .enable_mask = BIT(19),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ddrss_pcie_sf_tbu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_hf_axi_clk = {
+ .halt_reg = 0x3700c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x3700c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x3700c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_disp_hf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_sf_axi_clk = {
+ .halt_reg = 0x37014,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x37014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x37014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_disp_sf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_eusb3_0_clkref_en = {
+ .halt_reg = 0x9c00c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9c00c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_eusb3_0_clkref_en",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp1_clk = {
+ .halt_reg = 0x74000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x74000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp1_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_gp1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp2_clk = {
+ .halt_reg = 0x75000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x75000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp2_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_gp2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp3_clk = {
+ .halt_reg = 0x76000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x76000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp3_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_gp3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gpll0_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x62000,
+ .enable_mask = BIT(15),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_gpll0_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gpll0_div_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x62000,
+ .enable_mask = BIT(16),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_gpll0_div_clk_src",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_gpll0_out_even.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_memnoc_gfx_clk = {
+ .halt_reg = 0x81010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x81010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x81010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_memnoc_gfx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_snoc_dvm_gfx_clk = {
+ .halt_reg = 0x81018,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x81018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_snoc_dvm_gfx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_aux_clk = {
+ .halt_reg = 0x7b034,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(3),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_aux_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_pcie_0_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_cfg_ahb_clk = {
+ .halt_reg = 0x7b030,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x7b030,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_clkref_en = {
+ .halt_reg = 0x9c004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9c004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_clkref_en",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_mstr_axi_clk = {
+ .halt_reg = 0x7b028,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_phy_rchng_clk = {
+ .halt_reg = 0x7b044,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62000,
+ .enable_mask = BIT(22),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_phy_rchng_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_pcie_0_phy_rchng_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_pipe_clk = {
+ .halt_reg = 0x7b03c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_pipe_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_pcie_0_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_slv_axi_clk = {
+ .halt_reg = 0x7b020,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x7b020,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_slv_q2a_axi_clk = {
+ .halt_reg = 0x7b01c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(5),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_aux_clk = {
+ .halt_reg = 0x9d030,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62000,
+ .enable_mask = BIT(29),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_aux_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_pcie_1_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_cfg_ahb_clk = {
+ .halt_reg = 0x9d02c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x9d02c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62000,
+ .enable_mask = BIT(28),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_clkref_en = {
+ .halt_reg = 0x9c008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9c008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_clkref_en",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_mstr_axi_clk = {
+ .halt_reg = 0x9d024,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x62000,
+ .enable_mask = BIT(27),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_phy_aux_clk = {
+ .halt_reg = 0x9d038,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62000,
+ .enable_mask = BIT(24),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_phy_aux_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_pcie_1_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_phy_rchng_clk = {
+ .halt_reg = 0x9d048,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62000,
+ .enable_mask = BIT(23),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_phy_rchng_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_pcie_1_phy_rchng_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_pipe_clk = {
+ .halt_reg = 0x9d040,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x62000,
+ .enable_mask = BIT(30),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_pipe_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_pcie_1_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_slv_axi_clk = {
+ .halt_reg = 0x9d01c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x9d01c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62000,
+ .enable_mask = BIT(26),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_1_slv_q2a_axi_clk = {
+ .halt_reg = 0x9d018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62000,
+ .enable_mask = BIT(25),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_1_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm2_clk = {
+ .halt_reg = 0x4300c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4300c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm2_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_pdm2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_ahb_clk = {
+ .halt_reg = 0x43004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x43004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x43004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_xo4_clk = {
+ .halt_reg = 0x43008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x43008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm_xo4_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_camera_nrt_ahb_clk = {
+ .halt_reg = 0x36008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x36008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x36008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_camera_nrt_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_camera_rt_ahb_clk = {
+ .halt_reg = 0x3600c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x3600c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x3600c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_camera_rt_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_disp_ahb_clk = {
+ .halt_reg = 0x37008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x37008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x37008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_disp_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_gpu_ahb_clk = {
+ .halt_reg = 0x81008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x81008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x81008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_gpu_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_pcie_ahb_clk = {
+ .halt_reg = 0x7b018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x7b018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7b018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_pcie_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_cv_cpu_ahb_clk = {
+ .halt_reg = 0x42014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x42014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x42014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_video_cv_cpu_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_cvp_ahb_clk = {
+ .halt_reg = 0x42008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x42008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x42008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_video_cvp_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_v_cpu_ahb_clk = {
+ .halt_reg = 0x42010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x42010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x42010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_video_v_cpu_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_vcodec_ahb_clk = {
+ .halt_reg = 0x4200c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x4200c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x4200c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_video_vcodec_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_core_2x_clk = {
+ .halt_reg = 0x3300c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_core_clk = {
+ .halt_reg = 0x33000,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(8),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s0_clk = {
+ .halt_reg = 0x2700c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap0_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s1_clk = {
+ .halt_reg = 0x27140,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s1_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap0_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s2_clk = {
+ .halt_reg = 0x27274,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(12),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s2_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap0_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s3_clk = {
+ .halt_reg = 0x273a8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(13),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s3_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap0_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s4_clk = {
+ .halt_reg = 0x274dc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(14),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s4_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap0_s4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s5_clk = {
+ .halt_reg = 0x27610,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(15),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s5_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap0_s5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s6_clk = {
+ .halt_reg = 0x27744,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(16),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s6_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap0_s6_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s7_clk = {
+ .halt_reg = 0x27878,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(17),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s7_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap0_s7_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_core_2x_clk = {
+ .halt_reg = 0x3314c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(18),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_core_clk = {
+ .halt_reg = 0x33140,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(19),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s0_clk = {
+ .halt_reg = 0x2800c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(22),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap1_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s1_clk = {
+ .halt_reg = 0x28140,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(23),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s1_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap1_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s2_clk = {
+ .halt_reg = 0x28274,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(24),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s2_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap1_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s3_clk = {
+ .halt_reg = 0x283a8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(25),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s3_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap1_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s4_clk = {
+ .halt_reg = 0x284dc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(26),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s4_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap1_s4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s5_clk = {
+ .halt_reg = 0x28610,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(27),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s5_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap1_s5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap1_s6_clk = {
+ .halt_reg = 0x28744,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(28),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap1_s6_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap1_s6_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_core_2x_clk = {
+ .halt_reg = 0x3328c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(3),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap2_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_core_clk = {
+ .halt_reg = 0x33280,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap2_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s0_clk = {
+ .halt_reg = 0x2e00c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap2_s0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap2_s0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s1_clk = {
+ .halt_reg = 0x2e140,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(5),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap2_s1_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap2_s1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s2_clk = {
+ .halt_reg = 0x2e274,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(6),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap2_s2_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap2_s2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s3_clk = {
+ .halt_reg = 0x2e3a8,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(7),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap2_s3_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap2_s3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s4_clk = {
+ .halt_reg = 0x2e4dc,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(8),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap2_s4_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap2_s4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s5_clk = {
+ .halt_reg = 0x2e610,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap2_s5_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap2_s5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap2_s6_clk = {
+ .halt_reg = 0x2e744,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap2_s6_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_qupv3_wrap2_s6_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_0_m_ahb_clk = {
+ .halt_reg = 0x27004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x27004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(6),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_0_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_0_s_ahb_clk = {
+ .halt_reg = 0x27008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x27008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(7),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_0_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_1_m_ahb_clk = {
+ .halt_reg = 0x28004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x28004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(20),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_1_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_1_s_ahb_clk = {
+ .halt_reg = 0x28008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x28008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62008,
+ .enable_mask = BIT(21),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_1_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_2_m_ahb_clk = {
+ .halt_reg = 0x2e004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2e004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_2_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_2_s_ahb_clk = {
+ .halt_reg = 0x2e008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2e008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x62010,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_2_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_ahb_clk = {
+ .halt_reg = 0x2400c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2400c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_apps_clk = {
+ .halt_reg = 0x24004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x24004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_apps_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_sdcc2_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_at_clk = {
+ .halt_reg = 0x24010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x24010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x24010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_at_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc4_ahb_clk = {
+ .halt_reg = 0x2600c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2600c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc4_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc4_apps_clk = {
+ .halt_reg = 0x26004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x26004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc4_apps_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_sdcc4_apps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc4_at_clk = {
+ .halt_reg = 0x26010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x26010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x26010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc4_at_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_0_clkref_en = {
+ .halt_reg = 0x9c000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9c000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_0_clkref_en",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ahb_clk = {
+ .halt_reg = 0x87020,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x87020,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x87020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_axi_clk = {
+ .halt_reg = 0x87018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x87018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x87018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_axi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_axi_hw_ctl_clk = {
+ .halt_reg = 0x87018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x87018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x87018,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_axi_hw_ctl_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_phy_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ice_core_clk = {
+ .halt_reg = 0x8706c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x8706c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x8706c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_ice_core_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_phy_ice_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ice_core_hw_ctl_clk = {
+ .halt_reg = 0x8706c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x8706c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x8706c,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_ice_core_hw_ctl_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_phy_ice_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_phy_aux_clk = {
+ .halt_reg = 0x870a4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x870a4,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x870a4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_phy_aux_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_phy_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_phy_aux_hw_ctl_clk = {
+ .halt_reg = 0x870a4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x870a4,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x870a4,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_phy_aux_hw_ctl_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_phy_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_rx_symbol_0_clk = {
+ .halt_reg = 0x87028,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x87028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_rx_symbol_0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_phy_rx_symbol_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_rx_symbol_1_clk = {
+ .halt_reg = 0x870c0,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x870c0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_rx_symbol_1_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_phy_rx_symbol_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_tx_symbol_0_clk = {
+ .halt_reg = 0x87024,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x87024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_tx_symbol_0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_phy_tx_symbol_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_unipro_core_clk = {
+ .halt_reg = 0x87064,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x87064,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x87064,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_unipro_core_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_phy_unipro_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_unipro_core_hw_ctl_clk = {
+ .halt_reg = 0x87064,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x87064,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x87064,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_unipro_core_hw_ctl_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_ufs_phy_unipro_core_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_master_clk = {
+ .halt_reg = 0x49018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x49018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_master_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb30_prim_master_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_mock_utmi_clk = {
+ .halt_reg = 0x49024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x49024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_mock_utmi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb30_prim_mock_utmi_postdiv_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_sleep_clk = {
+ .halt_reg = 0x49020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x49020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_0_clkref_en = {
+ .halt_reg = 0x9c010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9c010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_0_clkref_en",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_aux_clk = {
+ .halt_reg = 0x4905c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4905c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_aux_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb3_prim_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_com_aux_clk = {
+ .halt_reg = 0x49060,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x49060,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_com_aux_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb3_prim_phy_aux_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_pipe_clk = {
+ .halt_reg = 0x49064,
+ .halt_check = BRANCH_HALT_DELAY,
+ .hwcg_reg = 0x49064,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x49064,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_pipe_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gcc_usb3_prim_phy_pipe_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_axi0_clk = {
+ .halt_reg = 0x42018,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x42018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x42018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_video_axi0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_axi1_clk = {
+ .halt_reg = 0x42020,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x42020,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x42020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_video_axi1_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc pcie_0_gdsc = {
+ .gdscr = 0x7b004,
+ .pd = {
+ .name = "pcie_0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc pcie_1_gdsc = {
+ .gdscr = 0x9d004,
+ .pd = {
+ .name = "pcie_1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc ufs_phy_gdsc = {
+ .gdscr = 0x87004,
+ .pd = {
+ .name = "ufs_phy_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc usb30_prim_gdsc = {
+ .gdscr = 0x49004,
+ .pd = {
+ .name = "usb30_prim_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct clk_regmap *gcc_sm8450_clocks[] = {
+ [GCC_AGGRE_NOC_PCIE_0_AXI_CLK] = &gcc_aggre_noc_pcie_0_axi_clk.clkr,
+ [GCC_AGGRE_NOC_PCIE_1_AXI_CLK] = &gcc_aggre_noc_pcie_1_axi_clk.clkr,
+ [GCC_AGGRE_UFS_PHY_AXI_CLK] = &gcc_aggre_ufs_phy_axi_clk.clkr,
+ [GCC_AGGRE_UFS_PHY_AXI_HW_CTL_CLK] = &gcc_aggre_ufs_phy_axi_hw_ctl_clk.clkr,
+ [GCC_AGGRE_USB3_PRIM_AXI_CLK] = &gcc_aggre_usb3_prim_axi_clk.clkr,
+ [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
+ [GCC_CAMERA_HF_AXI_CLK] = &gcc_camera_hf_axi_clk.clkr,
+ [GCC_CAMERA_SF_AXI_CLK] = &gcc_camera_sf_axi_clk.clkr,
+ [GCC_CFG_NOC_PCIE_ANOC_AHB_CLK] = &gcc_cfg_noc_pcie_anoc_ahb_clk.clkr,
+ [GCC_CFG_NOC_USB3_PRIM_AXI_CLK] = &gcc_cfg_noc_usb3_prim_axi_clk.clkr,
+ [GCC_DDRSS_GPU_AXI_CLK] = &gcc_ddrss_gpu_axi_clk.clkr,
+ [GCC_DDRSS_PCIE_SF_TBU_CLK] = &gcc_ddrss_pcie_sf_tbu_clk.clkr,
+ [GCC_DISP_HF_AXI_CLK] = &gcc_disp_hf_axi_clk.clkr,
+ [GCC_DISP_SF_AXI_CLK] = &gcc_disp_sf_axi_clk.clkr,
+ [GCC_EUSB3_0_CLKREF_EN] = &gcc_eusb3_0_clkref_en.clkr,
+ [GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
+ [GCC_GP1_CLK_SRC] = &gcc_gp1_clk_src.clkr,
+ [GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
+ [GCC_GP2_CLK_SRC] = &gcc_gp2_clk_src.clkr,
+ [GCC_GP3_CLK] = &gcc_gp3_clk.clkr,
+ [GCC_GP3_CLK_SRC] = &gcc_gp3_clk_src.clkr,
+ [GCC_GPLL0] = &gcc_gpll0.clkr,
+ [GCC_GPLL0_OUT_EVEN] = &gcc_gpll0_out_even.clkr,
+ [GCC_GPLL4] = &gcc_gpll4.clkr,
+ [GCC_GPLL9] = &gcc_gpll9.clkr,
+ [GCC_GPU_GPLL0_CLK_SRC] = &gcc_gpu_gpll0_clk_src.clkr,
+ [GCC_GPU_GPLL0_DIV_CLK_SRC] = &gcc_gpu_gpll0_div_clk_src.clkr,
+ [GCC_GPU_MEMNOC_GFX_CLK] = &gcc_gpu_memnoc_gfx_clk.clkr,
+ [GCC_GPU_SNOC_DVM_GFX_CLK] = &gcc_gpu_snoc_dvm_gfx_clk.clkr,
+ [GCC_PCIE_0_AUX_CLK] = &gcc_pcie_0_aux_clk.clkr,
+ [GCC_PCIE_0_AUX_CLK_SRC] = &gcc_pcie_0_aux_clk_src.clkr,
+ [GCC_PCIE_0_CFG_AHB_CLK] = &gcc_pcie_0_cfg_ahb_clk.clkr,
+ [GCC_PCIE_0_CLKREF_EN] = &gcc_pcie_0_clkref_en.clkr,
+ [GCC_PCIE_0_MSTR_AXI_CLK] = &gcc_pcie_0_mstr_axi_clk.clkr,
+ [GCC_PCIE_0_PHY_RCHNG_CLK] = &gcc_pcie_0_phy_rchng_clk.clkr,
+ [GCC_PCIE_0_PHY_RCHNG_CLK_SRC] = &gcc_pcie_0_phy_rchng_clk_src.clkr,
+ [GCC_PCIE_0_PIPE_CLK] = &gcc_pcie_0_pipe_clk.clkr,
+ [GCC_PCIE_0_PIPE_CLK_SRC] = &gcc_pcie_0_pipe_clk_src.clkr,
+ [GCC_PCIE_0_SLV_AXI_CLK] = &gcc_pcie_0_slv_axi_clk.clkr,
+ [GCC_PCIE_0_SLV_Q2A_AXI_CLK] = &gcc_pcie_0_slv_q2a_axi_clk.clkr,
+ [GCC_PCIE_1_AUX_CLK] = &gcc_pcie_1_aux_clk.clkr,
+ [GCC_PCIE_1_AUX_CLK_SRC] = &gcc_pcie_1_aux_clk_src.clkr,
+ [GCC_PCIE_1_CFG_AHB_CLK] = &gcc_pcie_1_cfg_ahb_clk.clkr,
+ [GCC_PCIE_1_CLKREF_EN] = &gcc_pcie_1_clkref_en.clkr,
+ [GCC_PCIE_1_MSTR_AXI_CLK] = &gcc_pcie_1_mstr_axi_clk.clkr,
+ [GCC_PCIE_1_PHY_AUX_CLK] = &gcc_pcie_1_phy_aux_clk.clkr,
+ [GCC_PCIE_1_PHY_AUX_CLK_SRC] = &gcc_pcie_1_phy_aux_clk_src.clkr,
+ [GCC_PCIE_1_PHY_RCHNG_CLK] = &gcc_pcie_1_phy_rchng_clk.clkr,
+ [GCC_PCIE_1_PHY_RCHNG_CLK_SRC] = &gcc_pcie_1_phy_rchng_clk_src.clkr,
+ [GCC_PCIE_1_PIPE_CLK] = &gcc_pcie_1_pipe_clk.clkr,
+ [GCC_PCIE_1_PIPE_CLK_SRC] = &gcc_pcie_1_pipe_clk_src.clkr,
+ [GCC_PCIE_1_SLV_AXI_CLK] = &gcc_pcie_1_slv_axi_clk.clkr,
+ [GCC_PCIE_1_SLV_Q2A_AXI_CLK] = &gcc_pcie_1_slv_q2a_axi_clk.clkr,
+ [GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr,
+ [GCC_PDM2_CLK_SRC] = &gcc_pdm2_clk_src.clkr,
+ [GCC_PDM_AHB_CLK] = &gcc_pdm_ahb_clk.clkr,
+ [GCC_PDM_XO4_CLK] = &gcc_pdm_xo4_clk.clkr,
+ [GCC_QMIP_CAMERA_NRT_AHB_CLK] = &gcc_qmip_camera_nrt_ahb_clk.clkr,
+ [GCC_QMIP_CAMERA_RT_AHB_CLK] = &gcc_qmip_camera_rt_ahb_clk.clkr,
+ [GCC_QMIP_DISP_AHB_CLK] = &gcc_qmip_disp_ahb_clk.clkr,
+ [GCC_QMIP_GPU_AHB_CLK] = &gcc_qmip_gpu_ahb_clk.clkr,
+ [GCC_QMIP_PCIE_AHB_CLK] = &gcc_qmip_pcie_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_CV_CPU_AHB_CLK] = &gcc_qmip_video_cv_cpu_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_CVP_AHB_CLK] = &gcc_qmip_video_cvp_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_V_CPU_AHB_CLK] = &gcc_qmip_video_v_cpu_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_VCODEC_AHB_CLK] = &gcc_qmip_video_vcodec_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP0_CORE_2X_CLK] = &gcc_qupv3_wrap0_core_2x_clk.clkr,
+ [GCC_QUPV3_WRAP0_CORE_CLK] = &gcc_qupv3_wrap0_core_clk.clkr,
+ [GCC_QUPV3_WRAP0_S0_CLK] = &gcc_qupv3_wrap0_s0_clk.clkr,
+ [GCC_QUPV3_WRAP0_S0_CLK_SRC] = &gcc_qupv3_wrap0_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S1_CLK] = &gcc_qupv3_wrap0_s1_clk.clkr,
+ [GCC_QUPV3_WRAP0_S1_CLK_SRC] = &gcc_qupv3_wrap0_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S2_CLK] = &gcc_qupv3_wrap0_s2_clk.clkr,
+ [GCC_QUPV3_WRAP0_S2_CLK_SRC] = &gcc_qupv3_wrap0_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S3_CLK] = &gcc_qupv3_wrap0_s3_clk.clkr,
+ [GCC_QUPV3_WRAP0_S3_CLK_SRC] = &gcc_qupv3_wrap0_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S4_CLK] = &gcc_qupv3_wrap0_s4_clk.clkr,
+ [GCC_QUPV3_WRAP0_S4_CLK_SRC] = &gcc_qupv3_wrap0_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S5_CLK] = &gcc_qupv3_wrap0_s5_clk.clkr,
+ [GCC_QUPV3_WRAP0_S5_CLK_SRC] = &gcc_qupv3_wrap0_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S6_CLK] = &gcc_qupv3_wrap0_s6_clk.clkr,
+ [GCC_QUPV3_WRAP0_S6_CLK_SRC] = &gcc_qupv3_wrap0_s6_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S7_CLK] = &gcc_qupv3_wrap0_s7_clk.clkr,
+ [GCC_QUPV3_WRAP0_S7_CLK_SRC] = &gcc_qupv3_wrap0_s7_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_CORE_2X_CLK] = &gcc_qupv3_wrap1_core_2x_clk.clkr,
+ [GCC_QUPV3_WRAP1_CORE_CLK] = &gcc_qupv3_wrap1_core_clk.clkr,
+ [GCC_QUPV3_WRAP1_S0_CLK] = &gcc_qupv3_wrap1_s0_clk.clkr,
+ [GCC_QUPV3_WRAP1_S0_CLK_SRC] = &gcc_qupv3_wrap1_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S1_CLK] = &gcc_qupv3_wrap1_s1_clk.clkr,
+ [GCC_QUPV3_WRAP1_S1_CLK_SRC] = &gcc_qupv3_wrap1_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S2_CLK] = &gcc_qupv3_wrap1_s2_clk.clkr,
+ [GCC_QUPV3_WRAP1_S2_CLK_SRC] = &gcc_qupv3_wrap1_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S3_CLK] = &gcc_qupv3_wrap1_s3_clk.clkr,
+ [GCC_QUPV3_WRAP1_S3_CLK_SRC] = &gcc_qupv3_wrap1_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S4_CLK] = &gcc_qupv3_wrap1_s4_clk.clkr,
+ [GCC_QUPV3_WRAP1_S4_CLK_SRC] = &gcc_qupv3_wrap1_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S5_CLK] = &gcc_qupv3_wrap1_s5_clk.clkr,
+ [GCC_QUPV3_WRAP1_S5_CLK_SRC] = &gcc_qupv3_wrap1_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP1_S6_CLK] = &gcc_qupv3_wrap1_s6_clk.clkr,
+ [GCC_QUPV3_WRAP1_S6_CLK_SRC] = &gcc_qupv3_wrap1_s6_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_CORE_2X_CLK] = &gcc_qupv3_wrap2_core_2x_clk.clkr,
+ [GCC_QUPV3_WRAP2_CORE_CLK] = &gcc_qupv3_wrap2_core_clk.clkr,
+ [GCC_QUPV3_WRAP2_S0_CLK] = &gcc_qupv3_wrap2_s0_clk.clkr,
+ [GCC_QUPV3_WRAP2_S0_CLK_SRC] = &gcc_qupv3_wrap2_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S1_CLK] = &gcc_qupv3_wrap2_s1_clk.clkr,
+ [GCC_QUPV3_WRAP2_S1_CLK_SRC] = &gcc_qupv3_wrap2_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S2_CLK] = &gcc_qupv3_wrap2_s2_clk.clkr,
+ [GCC_QUPV3_WRAP2_S2_CLK_SRC] = &gcc_qupv3_wrap2_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S3_CLK] = &gcc_qupv3_wrap2_s3_clk.clkr,
+ [GCC_QUPV3_WRAP2_S3_CLK_SRC] = &gcc_qupv3_wrap2_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S4_CLK] = &gcc_qupv3_wrap2_s4_clk.clkr,
+ [GCC_QUPV3_WRAP2_S4_CLK_SRC] = &gcc_qupv3_wrap2_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S5_CLK] = &gcc_qupv3_wrap2_s5_clk.clkr,
+ [GCC_QUPV3_WRAP2_S5_CLK_SRC] = &gcc_qupv3_wrap2_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP2_S6_CLK] = &gcc_qupv3_wrap2_s6_clk.clkr,
+ [GCC_QUPV3_WRAP2_S6_CLK_SRC] = &gcc_qupv3_wrap2_s6_clk_src.clkr,
+ [GCC_QUPV3_WRAP_0_M_AHB_CLK] = &gcc_qupv3_wrap_0_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_0_S_AHB_CLK] = &gcc_qupv3_wrap_0_s_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_1_M_AHB_CLK] = &gcc_qupv3_wrap_1_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_1_S_AHB_CLK] = &gcc_qupv3_wrap_1_s_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_2_M_AHB_CLK] = &gcc_qupv3_wrap_2_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_2_S_AHB_CLK] = &gcc_qupv3_wrap_2_s_ahb_clk.clkr,
+ [GCC_SDCC2_AHB_CLK] = &gcc_sdcc2_ahb_clk.clkr,
+ [GCC_SDCC2_APPS_CLK] = &gcc_sdcc2_apps_clk.clkr,
+ [GCC_SDCC2_APPS_CLK_SRC] = &gcc_sdcc2_apps_clk_src.clkr,
+ [GCC_SDCC2_AT_CLK] = &gcc_sdcc2_at_clk.clkr,
+ [GCC_SDCC4_AHB_CLK] = &gcc_sdcc4_ahb_clk.clkr,
+ [GCC_SDCC4_APPS_CLK] = &gcc_sdcc4_apps_clk.clkr,
+ [GCC_SDCC4_APPS_CLK_SRC] = &gcc_sdcc4_apps_clk_src.clkr,
+ [GCC_SDCC4_AT_CLK] = &gcc_sdcc4_at_clk.clkr,
+ [GCC_UFS_0_CLKREF_EN] = &gcc_ufs_0_clkref_en.clkr,
+ [GCC_UFS_PHY_AHB_CLK] = &gcc_ufs_phy_ahb_clk.clkr,
+ [GCC_UFS_PHY_AXI_CLK] = &gcc_ufs_phy_axi_clk.clkr,
+ [GCC_UFS_PHY_AXI_CLK_SRC] = &gcc_ufs_phy_axi_clk_src.clkr,
+ [GCC_UFS_PHY_AXI_HW_CTL_CLK] = &gcc_ufs_phy_axi_hw_ctl_clk.clkr,
+ [GCC_UFS_PHY_ICE_CORE_CLK] = &gcc_ufs_phy_ice_core_clk.clkr,
+ [GCC_UFS_PHY_ICE_CORE_CLK_SRC] = &gcc_ufs_phy_ice_core_clk_src.clkr,
+ [GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK] = &gcc_ufs_phy_ice_core_hw_ctl_clk.clkr,
+ [GCC_UFS_PHY_PHY_AUX_CLK] = &gcc_ufs_phy_phy_aux_clk.clkr,
+ [GCC_UFS_PHY_PHY_AUX_CLK_SRC] = &gcc_ufs_phy_phy_aux_clk_src.clkr,
+ [GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK] = &gcc_ufs_phy_phy_aux_hw_ctl_clk.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_0_CLK] = &gcc_ufs_phy_rx_symbol_0_clk.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_0_CLK_SRC] = &gcc_ufs_phy_rx_symbol_0_clk_src.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_1_CLK] = &gcc_ufs_phy_rx_symbol_1_clk.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_1_CLK_SRC] = &gcc_ufs_phy_rx_symbol_1_clk_src.clkr,
+ [GCC_UFS_PHY_TX_SYMBOL_0_CLK] = &gcc_ufs_phy_tx_symbol_0_clk.clkr,
+ [GCC_UFS_PHY_TX_SYMBOL_0_CLK_SRC] = &gcc_ufs_phy_tx_symbol_0_clk_src.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_CLK] = &gcc_ufs_phy_unipro_core_clk.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC] = &gcc_ufs_phy_unipro_core_clk_src.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK] = &gcc_ufs_phy_unipro_core_hw_ctl_clk.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK] = &gcc_usb30_prim_master_clk.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK_SRC] = &gcc_usb30_prim_master_clk_src.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK] = &gcc_usb30_prim_mock_utmi_clk.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC] = &gcc_usb30_prim_mock_utmi_clk_src.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC] = &gcc_usb30_prim_mock_utmi_postdiv_clk_src.clkr,
+ [GCC_USB30_PRIM_SLEEP_CLK] = &gcc_usb30_prim_sleep_clk.clkr,
+ [GCC_USB3_0_CLKREF_EN] = &gcc_usb3_0_clkref_en.clkr,
+ [GCC_USB3_PRIM_PHY_AUX_CLK] = &gcc_usb3_prim_phy_aux_clk.clkr,
+ [GCC_USB3_PRIM_PHY_AUX_CLK_SRC] = &gcc_usb3_prim_phy_aux_clk_src.clkr,
+ [GCC_USB3_PRIM_PHY_COM_AUX_CLK] = &gcc_usb3_prim_phy_com_aux_clk.clkr,
+ [GCC_USB3_PRIM_PHY_PIPE_CLK] = &gcc_usb3_prim_phy_pipe_clk.clkr,
+ [GCC_USB3_PRIM_PHY_PIPE_CLK_SRC] = &gcc_usb3_prim_phy_pipe_clk_src.clkr,
+ [GCC_VIDEO_AXI0_CLK] = &gcc_video_axi0_clk.clkr,
+ [GCC_VIDEO_AXI1_CLK] = &gcc_video_axi1_clk.clkr,
+};
+
+static const struct qcom_reset_map gcc_sm8450_resets[] = {
+ [GCC_CAMERA_BCR] = { 0x36000 },
+ [GCC_DISPLAY_BCR] = { 0x37000 },
+ [GCC_GPU_BCR] = { 0x81000 },
+ [GCC_PCIE_0_BCR] = { 0x7b000 },
+ [GCC_PCIE_0_LINK_DOWN_BCR] = { 0x7c014 },
+ [GCC_PCIE_0_NOCSR_COM_PHY_BCR] = { 0x7c020 },
+ [GCC_PCIE_0_PHY_BCR] = { 0x7c01c },
+ [GCC_PCIE_0_PHY_NOCSR_COM_PHY_BCR] = { 0x7c028 },
+ [GCC_PCIE_1_BCR] = { 0x9d000 },
+ [GCC_PCIE_1_LINK_DOWN_BCR] = { 0x9e014 },
+ [GCC_PCIE_1_NOCSR_COM_PHY_BCR] = { 0x9e020 },
+ [GCC_PCIE_1_PHY_BCR] = { 0x9e01c },
+ [GCC_PCIE_1_PHY_NOCSR_COM_PHY_BCR] = { 0x9e000 },
+ [GCC_PCIE_PHY_BCR] = { 0x7f000 },
+ [GCC_PCIE_PHY_CFG_AHB_BCR] = { 0x7f00c },
+ [GCC_PCIE_PHY_COM_BCR] = { 0x7f010 },
+ [GCC_PDM_BCR] = { 0x43000 },
+ [GCC_QUPV3_WRAPPER_0_BCR] = { 0x27000 },
+ [GCC_QUPV3_WRAPPER_1_BCR] = { 0x28000 },
+ [GCC_QUPV3_WRAPPER_2_BCR] = { 0x2e000 },
+ [GCC_QUSB2PHY_PRIM_BCR] = { 0x22000 },
+ [GCC_QUSB2PHY_SEC_BCR] = { 0x22004 },
+ [GCC_SDCC2_BCR] = { 0x24000 },
+ [GCC_SDCC4_BCR] = { 0x26000 },
+ [GCC_UFS_PHY_BCR] = { 0x87000 },
+ [GCC_USB30_PRIM_BCR] = { 0x49000 },
+ [GCC_USB3_DP_PHY_PRIM_BCR] = { 0x60008 },
+ [GCC_USB3_DP_PHY_SEC_BCR] = { 0x60014 },
+ [GCC_USB3_PHY_PRIM_BCR] = { 0x60000 },
+ [GCC_USB3_PHY_SEC_BCR] = { 0x6000c },
+ [GCC_USB3PHY_PHY_PRIM_BCR] = { 0x60004 },
+ [GCC_USB3PHY_PHY_SEC_BCR] = { 0x60010 },
+ [GCC_USB_PHY_CFG_AHB2PHY_BCR] = { 0x7a000 },
+ [GCC_VIDEO_AXI0_CLK_ARES] = { 0x42018, 2 },
+ [GCC_VIDEO_AXI1_CLK_ARES] = { 0x42020, 2 },
+ [GCC_VIDEO_BCR] = { 0x42000 },
+};
+
+static const struct clk_rcg_dfs_data gcc_dfs_clocks[] = {
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s2_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s5_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s6_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s7_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s2_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s5_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap1_s6_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s2_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s5_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap2_s6_clk_src),
+};
+
+static struct gdsc *gcc_sm8450_gdscs[] = {
+ [PCIE_0_GDSC] = &pcie_0_gdsc,
+ [PCIE_1_GDSC] = &pcie_1_gdsc,
+ [UFS_PHY_GDSC] = &ufs_phy_gdsc,
+ [USB30_PRIM_GDSC] = &usb30_prim_gdsc,
+};
+
+static const struct regmap_config gcc_sm8450_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x1f1030,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc gcc_sm8450_desc = {
+ .config = &gcc_sm8450_regmap_config,
+ .clks = gcc_sm8450_clocks,
+ .num_clks = ARRAY_SIZE(gcc_sm8450_clocks),
+ .resets = gcc_sm8450_resets,
+ .num_resets = ARRAY_SIZE(gcc_sm8450_resets),
+ .gdscs = gcc_sm8450_gdscs,
+ .num_gdscs = ARRAY_SIZE(gcc_sm8450_gdscs),
+};
+
+static const struct of_device_id gcc_sm8450_match_table[] = {
+ { .compatible = "qcom,gcc-sm8450" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gcc_sm8450_match_table);
+
+static int gcc_sm8450_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+ int ret;
+
+ regmap = qcom_cc_map(pdev, &gcc_sm8450_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ ret = qcom_cc_register_rcg_dfs(regmap, gcc_dfs_clocks,
+ ARRAY_SIZE(gcc_dfs_clocks));
+ if (ret)
+ return ret;
+
+ /* FORCE_MEM_CORE_ON for ufs phy ice core clocks */
+ regmap_update_bits(regmap, gcc_ufs_phy_ice_core_clk.halt_reg, BIT(14), BIT(14));
+
+ /*
+ * Keep the critical clock always-On
+ * gcc_camera_ahb_clk, gcc_camera_xo_clk, gcc_disp_ahb_clk,
+ * gcc_disp_xo_clk, gcc_gpu_cfg_ahb_clk, gcc_video_ahb_clk,
+ * gcc_video_xo_clk
+ */
+ regmap_update_bits(regmap, 0x36004, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x36020, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x37004, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x3701c, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x81004, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x42004, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x42028, BIT(0), BIT(0));
+
+ return qcom_cc_really_probe(pdev, &gcc_sm8450_desc, regmap);
+}
+
+static struct platform_driver gcc_sm8450_driver = {
+ .probe = gcc_sm8450_probe,
+ .driver = {
+ .name = "gcc-sm8450",
+ .of_match_table = gcc_sm8450_match_table,
+ },
+};
+
+static int __init gcc_sm8450_init(void)
+{
+ return platform_driver_register(&gcc_sm8450_driver);
+}
+subsys_initcall(gcc_sm8450_init);
+
+static void __exit gcc_sm8450_exit(void)
+{
+ platform_driver_unregister(&gcc_sm8450_driver);
+}
+module_exit(gcc_sm8450_exit);
+
+MODULE_DESCRIPTION("QTI GCC SM8450 Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/lpasscc-sc7280.c b/drivers/clk/qcom/lpasscc-sc7280.c
index 89f1ad6631da..b39ee1c9647b 100644
--- a/drivers/clk/qcom/lpasscc-sc7280.c
+++ b/drivers/clk/qcom/lpasscc-sc7280.c
@@ -3,6 +3,7 @@
* Copyright (c) 2021, The Linux Foundation. All rights reserved.
*/
+#include <linux/clk-provider.h>
#include <linux/platform_device.h>
#include <linux/pm_clock.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/clk/qcom/lpasscc-sdm845.c b/drivers/clk/qcom/lpasscc-sdm845.c
index 56d3e9928892..7040da952728 100644
--- a/drivers/clk/qcom/lpasscc-sdm845.c
+++ b/drivers/clk/qcom/lpasscc-sdm845.c
@@ -3,6 +3,7 @@
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
*/
+#include <linux/clk-provider.h>
#include <linux/platform_device.h>
#include <linux/module.h>
#include <linux/of_address.h>
diff --git a/drivers/clk/qcom/mmcc-apq8084.c b/drivers/clk/qcom/mmcc-apq8084.c
index fbfcf0006739..e9f971359155 100644
--- a/drivers/clk/qcom/mmcc-apq8084.c
+++ b/drivers/clk/qcom/mmcc-apq8084.c
@@ -3,6 +3,7 @@
* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
*/
+#include <linux/clk-provider.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/module.h>
diff --git a/drivers/clk/qcom/q6sstop-qcs404.c b/drivers/clk/qcom/q6sstop-qcs404.c
index 507386bee07d..780074e05841 100644
--- a/drivers/clk/qcom/q6sstop-qcs404.c
+++ b/drivers/clk/qcom/q6sstop-qcs404.c
@@ -4,6 +4,7 @@
*/
#include <linux/bitops.h>
+#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/platform_device.h>
diff --git a/drivers/clk/qcom/turingcc-qcs404.c b/drivers/clk/qcom/turingcc-qcs404.c
index 4543bda793f4..43184459228f 100644
--- a/drivers/clk/qcom/turingcc-qcs404.c
+++ b/drivers/clk/qcom/turingcc-qcs404.c
@@ -4,6 +4,7 @@
*/
#include <linux/bitops.h>
+#include <linux/clk-provider.h>
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/module.h>
diff --git a/drivers/clk/renesas/Kconfig b/drivers/clk/renesas/Kconfig
index 6d0280751bb1..be6e6ae7448c 100644
--- a/drivers/clk/renesas/Kconfig
+++ b/drivers/clk/renesas/Kconfig
@@ -31,6 +31,7 @@ config CLK_RENESAS
select CLK_R8A77990 if ARCH_R8A77990
select CLK_R8A77995 if ARCH_R8A77995
select CLK_R8A779A0 if ARCH_R8A779A0
+ select CLK_R8A779F0 if ARCH_R8A779F0
select CLK_R9A06G032 if ARCH_R9A06G032
select CLK_R9A07G044 if ARCH_R9A07G044
select CLK_SH73A0 if ARCH_SH73A0
@@ -149,8 +150,11 @@ config CLK_R8A77995
config CLK_R8A779A0
bool "R-Car V3U clock support" if COMPILE_TEST
- select CLK_RCAR_CPG_LIB
- select CLK_RENESAS_CPG_MSSR
+ select CLK_RCAR_GEN4_CPG
+
+config CLK_R8A779F0
+ bool "R-Car S4-8 clock support" if COMPILE_TEST
+ select CLK_RCAR_GEN4_CPG
config CLK_R9A06G032
bool "RZ/N1D clock support" if COMPILE_TEST
@@ -178,6 +182,11 @@ config CLK_RCAR_GEN3_CPG
select CLK_RCAR_CPG_LIB
select CLK_RENESAS_CPG_MSSR
+config CLK_RCAR_GEN4_CPG
+ bool "R-Car Gen4 clock support" if COMPILE_TEST
+ select CLK_RCAR_CPG_LIB
+ select CLK_RENESAS_CPG_MSSR
+
config CLK_RCAR_USB2_CLOCK_SEL
bool "Renesas R-Car USB2 clock selector support"
depends on ARCH_RENESAS || COMPILE_TEST
diff --git a/drivers/clk/renesas/Makefile b/drivers/clk/renesas/Makefile
index 7d018700d08b..8b34db1a328c 100644
--- a/drivers/clk/renesas/Makefile
+++ b/drivers/clk/renesas/Makefile
@@ -28,6 +28,7 @@ obj-$(CONFIG_CLK_R8A77980) += r8a77980-cpg-mssr.o
obj-$(CONFIG_CLK_R8A77990) += r8a77990-cpg-mssr.o
obj-$(CONFIG_CLK_R8A77995) += r8a77995-cpg-mssr.o
obj-$(CONFIG_CLK_R8A779A0) += r8a779a0-cpg-mssr.o
+obj-$(CONFIG_CLK_R8A779F0) += r8a779f0-cpg-mssr.o
obj-$(CONFIG_CLK_R9A06G032) += r9a06g032-clocks.o
obj-$(CONFIG_CLK_R9A07G044) += r9a07g044-cpg.o
obj-$(CONFIG_CLK_SH73A0) += clk-sh73a0.o
@@ -36,6 +37,7 @@ obj-$(CONFIG_CLK_SH73A0) += clk-sh73a0.o
obj-$(CONFIG_CLK_RCAR_CPG_LIB) += rcar-cpg-lib.o
obj-$(CONFIG_CLK_RCAR_GEN2_CPG) += rcar-gen2-cpg.o
obj-$(CONFIG_CLK_RCAR_GEN3_CPG) += rcar-gen3-cpg.o
+obj-$(CONFIG_CLK_RCAR_GEN4_CPG) += rcar-gen4-cpg.o
obj-$(CONFIG_CLK_RCAR_USB2_CLOCK_SEL) += rcar-usb2-clock-sel.o
obj-$(CONFIG_CLK_RZG2L) += rzg2l-cpg.o
diff --git a/drivers/clk/renesas/r8a774a1-cpg-mssr.c b/drivers/clk/renesas/r8a774a1-cpg-mssr.c
index 39b185d8e957..95dd56b64d64 100644
--- a/drivers/clk/renesas/r8a774a1-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a774a1-cpg-mssr.c
@@ -100,10 +100,14 @@ static const struct cpg_core_clk r8a774a1_core_clks[] __initconst = {
DEF_FIXED("s3d2", R8A774A1_CLK_S3D2, CLK_S3, 2, 1),
DEF_FIXED("s3d4", R8A774A1_CLK_S3D4, CLK_S3, 4, 1),
- DEF_GEN3_SD("sd0", R8A774A1_CLK_SD0, CLK_SDSRC, 0x074),
- DEF_GEN3_SD("sd1", R8A774A1_CLK_SD1, CLK_SDSRC, 0x078),
- DEF_GEN3_SD("sd2", R8A774A1_CLK_SD2, CLK_SDSRC, 0x268),
- DEF_GEN3_SD("sd3", R8A774A1_CLK_SD3, CLK_SDSRC, 0x26c),
+ DEF_GEN3_SDH("sd0h", R8A774A1_CLK_SD0H, CLK_SDSRC, 0x074),
+ DEF_GEN3_SDH("sd1h", R8A774A1_CLK_SD1H, CLK_SDSRC, 0x078),
+ DEF_GEN3_SDH("sd2h", R8A774A1_CLK_SD2H, CLK_SDSRC, 0x268),
+ DEF_GEN3_SDH("sd3h", R8A774A1_CLK_SD3H, CLK_SDSRC, 0x26c),
+ DEF_GEN3_SD("sd0", R8A774A1_CLK_SD0, R8A774A1_CLK_SD0H, 0x074),
+ DEF_GEN3_SD("sd1", R8A774A1_CLK_SD1, R8A774A1_CLK_SD1H, 0x078),
+ DEF_GEN3_SD("sd2", R8A774A1_CLK_SD2, R8A774A1_CLK_SD2H, 0x268),
+ DEF_GEN3_SD("sd3", R8A774A1_CLK_SD3, R8A774A1_CLK_SD3H, 0x26c),
DEF_FIXED("cl", R8A774A1_CLK_CL, CLK_PLL1_DIV2, 48, 1),
DEF_FIXED("cp", R8A774A1_CLK_CP, CLK_EXTAL, 2, 1),
diff --git a/drivers/clk/renesas/r8a774b1-cpg-mssr.c b/drivers/clk/renesas/r8a774b1-cpg-mssr.c
index af602d83c8ce..56061b9b8437 100644
--- a/drivers/clk/renesas/r8a774b1-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a774b1-cpg-mssr.c
@@ -97,10 +97,14 @@ static const struct cpg_core_clk r8a774b1_core_clks[] __initconst = {
DEF_FIXED("s3d2", R8A774B1_CLK_S3D2, CLK_S3, 2, 1),
DEF_FIXED("s3d4", R8A774B1_CLK_S3D4, CLK_S3, 4, 1),
- DEF_GEN3_SD("sd0", R8A774B1_CLK_SD0, CLK_SDSRC, 0x074),
- DEF_GEN3_SD("sd1", R8A774B1_CLK_SD1, CLK_SDSRC, 0x078),
- DEF_GEN3_SD("sd2", R8A774B1_CLK_SD2, CLK_SDSRC, 0x268),
- DEF_GEN3_SD("sd3", R8A774B1_CLK_SD3, CLK_SDSRC, 0x26c),
+ DEF_GEN3_SDH("sd0h", R8A774B1_CLK_SD0H, CLK_SDSRC, 0x074),
+ DEF_GEN3_SDH("sd1h", R8A774B1_CLK_SD1H, CLK_SDSRC, 0x078),
+ DEF_GEN3_SDH("sd2h", R8A774B1_CLK_SD2H, CLK_SDSRC, 0x268),
+ DEF_GEN3_SDH("sd3h", R8A774B1_CLK_SD3H, CLK_SDSRC, 0x26c),
+ DEF_GEN3_SD("sd0", R8A774B1_CLK_SD0, R8A774B1_CLK_SD0H, 0x074),
+ DEF_GEN3_SD("sd1", R8A774B1_CLK_SD1, R8A774B1_CLK_SD1H, 0x078),
+ DEF_GEN3_SD("sd2", R8A774B1_CLK_SD2, R8A774B1_CLK_SD2H, 0x268),
+ DEF_GEN3_SD("sd3", R8A774B1_CLK_SD3, R8A774B1_CLK_SD3H, 0x26c),
DEF_FIXED("cl", R8A774B1_CLK_CL, CLK_PLL1_DIV2, 48, 1),
DEF_FIXED("cp", R8A774B1_CLK_CP, CLK_EXTAL, 2, 1),
diff --git a/drivers/clk/renesas/r8a774c0-cpg-mssr.c b/drivers/clk/renesas/r8a774c0-cpg-mssr.c
index 5b938eb2df25..b5eb5dc45d62 100644
--- a/drivers/clk/renesas/r8a774c0-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a774c0-cpg-mssr.c
@@ -108,9 +108,12 @@ static const struct cpg_core_clk r8a774c0_core_clks[] __initconst = {
DEF_FIXED("s3d2", R8A774C0_CLK_S3D2, CLK_S3, 2, 1),
DEF_FIXED("s3d4", R8A774C0_CLK_S3D4, CLK_S3, 4, 1),
- DEF_GEN3_SD("sd0", R8A774C0_CLK_SD0, CLK_SDSRC, 0x0074),
- DEF_GEN3_SD("sd1", R8A774C0_CLK_SD1, CLK_SDSRC, 0x0078),
- DEF_GEN3_SD("sd3", R8A774C0_CLK_SD3, CLK_SDSRC, 0x026c),
+ DEF_GEN3_SDH("sd0h", R8A774C0_CLK_SD0H, CLK_SDSRC, 0x0074),
+ DEF_GEN3_SDH("sd1h", R8A774C0_CLK_SD1H, CLK_SDSRC, 0x0078),
+ DEF_GEN3_SDH("sd3h", R8A774C0_CLK_SD3H, CLK_SDSRC, 0x026c),
+ DEF_GEN3_SD("sd0", R8A774C0_CLK_SD0, R8A774C0_CLK_SD0H, 0x0074),
+ DEF_GEN3_SD("sd1", R8A774C0_CLK_SD1, R8A774C0_CLK_SD1H, 0x0078),
+ DEF_GEN3_SD("sd3", R8A774C0_CLK_SD3, R8A774C0_CLK_SD3H, 0x026c),
DEF_FIXED("cl", R8A774C0_CLK_CL, CLK_PLL1, 48, 1),
DEF_FIXED("cp", R8A774C0_CLK_CP, CLK_EXTAL, 2, 1),
diff --git a/drivers/clk/renesas/r8a774e1-cpg-mssr.c b/drivers/clk/renesas/r8a774e1-cpg-mssr.c
index 40c71466df37..2950f0db90ae 100644
--- a/drivers/clk/renesas/r8a774e1-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a774e1-cpg-mssr.c
@@ -100,10 +100,14 @@ static const struct cpg_core_clk r8a774e1_core_clks[] __initconst = {
DEF_FIXED("s3d2", R8A774E1_CLK_S3D2, CLK_S3, 2, 1),
DEF_FIXED("s3d4", R8A774E1_CLK_S3D4, CLK_S3, 4, 1),
- DEF_GEN3_SD("sd0", R8A774E1_CLK_SD0, CLK_SDSRC, 0x074),
- DEF_GEN3_SD("sd1", R8A774E1_CLK_SD1, CLK_SDSRC, 0x078),
- DEF_GEN3_SD("sd2", R8A774E1_CLK_SD2, CLK_SDSRC, 0x268),
- DEF_GEN3_SD("sd3", R8A774E1_CLK_SD3, CLK_SDSRC, 0x26c),
+ DEF_GEN3_SDH("sd0h", R8A774E1_CLK_SD0H, CLK_SDSRC, 0x074),
+ DEF_GEN3_SDH("sd1h", R8A774E1_CLK_SD1H, CLK_SDSRC, 0x078),
+ DEF_GEN3_SDH("sd2h", R8A774E1_CLK_SD2H, CLK_SDSRC, 0x268),
+ DEF_GEN3_SDH("sd3h", R8A774E1_CLK_SD3H, CLK_SDSRC, 0x26c),
+ DEF_GEN3_SD("sd0", R8A774E1_CLK_SD0, R8A774E1_CLK_SD0H, 0x074),
+ DEF_GEN3_SD("sd1", R8A774E1_CLK_SD1, R8A774E1_CLK_SD1H, 0x078),
+ DEF_GEN3_SD("sd2", R8A774E1_CLK_SD2, R8A774E1_CLK_SD2H, 0x268),
+ DEF_GEN3_SD("sd3", R8A774E1_CLK_SD3, R8A774E1_CLK_SD3H, 0x26c),
DEF_FIXED("cl", R8A774E1_CLK_CL, CLK_PLL1_DIV2, 48, 1),
DEF_FIXED("cr", R8A774E1_CLK_CR, CLK_PLL1_DIV4, 2, 1),
diff --git a/drivers/clk/renesas/r8a7795-cpg-mssr.c b/drivers/clk/renesas/r8a7795-cpg-mssr.c
index d6b1d0148bfd..991a44315d71 100644
--- a/drivers/clk/renesas/r8a7795-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a7795-cpg-mssr.c
@@ -104,10 +104,14 @@ static struct cpg_core_clk r8a7795_core_clks[] __initdata = {
DEF_FIXED("s3d2", R8A7795_CLK_S3D2, CLK_S3, 2, 1),
DEF_FIXED("s3d4", R8A7795_CLK_S3D4, CLK_S3, 4, 1),
- DEF_GEN3_SD("sd0", R8A7795_CLK_SD0, CLK_SDSRC, 0x074),
- DEF_GEN3_SD("sd1", R8A7795_CLK_SD1, CLK_SDSRC, 0x078),
- DEF_GEN3_SD("sd2", R8A7795_CLK_SD2, CLK_SDSRC, 0x268),
- DEF_GEN3_SD("sd3", R8A7795_CLK_SD3, CLK_SDSRC, 0x26c),
+ DEF_GEN3_SDH("sd0h", R8A7795_CLK_SD0H, CLK_SDSRC, 0x074),
+ DEF_GEN3_SDH("sd1h", R8A7795_CLK_SD1H, CLK_SDSRC, 0x078),
+ DEF_GEN3_SDH("sd2h", R8A7795_CLK_SD2H, CLK_SDSRC, 0x268),
+ DEF_GEN3_SDH("sd3h", R8A7795_CLK_SD3H, CLK_SDSRC, 0x26c),
+ DEF_GEN3_SD("sd0", R8A7795_CLK_SD0, R8A7795_CLK_SD0H, 0x074),
+ DEF_GEN3_SD("sd1", R8A7795_CLK_SD1, R8A7795_CLK_SD1H, 0x078),
+ DEF_GEN3_SD("sd2", R8A7795_CLK_SD2, R8A7795_CLK_SD2H, 0x268),
+ DEF_GEN3_SD("sd3", R8A7795_CLK_SD3, R8A7795_CLK_SD3H, 0x26c),
DEF_FIXED("cl", R8A7795_CLK_CL, CLK_PLL1_DIV2, 48, 1),
DEF_FIXED("cr", R8A7795_CLK_CR, CLK_PLL1_DIV4, 2, 1),
diff --git a/drivers/clk/renesas/r8a7796-cpg-mssr.c b/drivers/clk/renesas/r8a7796-cpg-mssr.c
index 9c22977e42c2..7950313611ef 100644
--- a/drivers/clk/renesas/r8a7796-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a7796-cpg-mssr.c
@@ -106,10 +106,14 @@ static const struct cpg_core_clk r8a7796_core_clks[] __initconst = {
DEF_FIXED("s3d2", R8A7796_CLK_S3D2, CLK_S3, 2, 1),
DEF_FIXED("s3d4", R8A7796_CLK_S3D4, CLK_S3, 4, 1),
- DEF_GEN3_SD("sd0", R8A7796_CLK_SD0, CLK_SDSRC, 0x074),
- DEF_GEN3_SD("sd1", R8A7796_CLK_SD1, CLK_SDSRC, 0x078),
- DEF_GEN3_SD("sd2", R8A7796_CLK_SD2, CLK_SDSRC, 0x268),
- DEF_GEN3_SD("sd3", R8A7796_CLK_SD3, CLK_SDSRC, 0x26c),
+ DEF_GEN3_SDH("sd0h", R8A7796_CLK_SD0H, CLK_SDSRC, 0x074),
+ DEF_GEN3_SDH("sd1h", R8A7796_CLK_SD1H, CLK_SDSRC, 0x078),
+ DEF_GEN3_SDH("sd2h", R8A7796_CLK_SD2H, CLK_SDSRC, 0x268),
+ DEF_GEN3_SDH("sd3h", R8A7796_CLK_SD3H, CLK_SDSRC, 0x26c),
+ DEF_GEN3_SD("sd0", R8A7796_CLK_SD0, R8A7796_CLK_SD0H, 0x074),
+ DEF_GEN3_SD("sd1", R8A7796_CLK_SD1, R8A7796_CLK_SD1H, 0x078),
+ DEF_GEN3_SD("sd2", R8A7796_CLK_SD2, R8A7796_CLK_SD2H, 0x268),
+ DEF_GEN3_SD("sd3", R8A7796_CLK_SD3, R8A7796_CLK_SD3H, 0x26c),
DEF_FIXED("cl", R8A7796_CLK_CL, CLK_PLL1_DIV2, 48, 1),
DEF_FIXED("cr", R8A7796_CLK_CR, CLK_PLL1_DIV4, 2, 1),
diff --git a/drivers/clk/renesas/r8a77965-cpg-mssr.c b/drivers/clk/renesas/r8a77965-cpg-mssr.c
index 7eee45a31b2a..d687c29efa3c 100644
--- a/drivers/clk/renesas/r8a77965-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a77965-cpg-mssr.c
@@ -101,10 +101,14 @@ static const struct cpg_core_clk r8a77965_core_clks[] __initconst = {
DEF_FIXED("s3d2", R8A77965_CLK_S3D2, CLK_S3, 2, 1),
DEF_FIXED("s3d4", R8A77965_CLK_S3D4, CLK_S3, 4, 1),
- DEF_GEN3_SD("sd0", R8A77965_CLK_SD0, CLK_SDSRC, 0x074),
- DEF_GEN3_SD("sd1", R8A77965_CLK_SD1, CLK_SDSRC, 0x078),
- DEF_GEN3_SD("sd2", R8A77965_CLK_SD2, CLK_SDSRC, 0x268),
- DEF_GEN3_SD("sd3", R8A77965_CLK_SD3, CLK_SDSRC, 0x26c),
+ DEF_GEN3_SDH("sd0h", R8A77965_CLK_SD0H, CLK_SDSRC, 0x074),
+ DEF_GEN3_SDH("sd1h", R8A77965_CLK_SD1H, CLK_SDSRC, 0x078),
+ DEF_GEN3_SDH("sd2h", R8A77965_CLK_SD2H, CLK_SDSRC, 0x268),
+ DEF_GEN3_SDH("sd3h", R8A77965_CLK_SD3H, CLK_SDSRC, 0x26c),
+ DEF_GEN3_SD("sd0", R8A77965_CLK_SD0, R8A77965_CLK_SD0H, 0x074),
+ DEF_GEN3_SD("sd1", R8A77965_CLK_SD1, R8A77965_CLK_SD1H, 0x078),
+ DEF_GEN3_SD("sd2", R8A77965_CLK_SD2, R8A77965_CLK_SD2H, 0x268),
+ DEF_GEN3_SD("sd3", R8A77965_CLK_SD3, R8A77965_CLK_SD3H, 0x26c),
DEF_FIXED("cl", R8A77965_CLK_CL, CLK_PLL1_DIV2, 48, 1),
DEF_FIXED("cr", R8A77965_CLK_CR, CLK_PLL1_DIV4, 2, 1),
diff --git a/drivers/clk/renesas/r8a77980-cpg-mssr.c b/drivers/clk/renesas/r8a77980-cpg-mssr.c
index 9fe372286c1e..f3cd64de4dc6 100644
--- a/drivers/clk/renesas/r8a77980-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a77980-cpg-mssr.c
@@ -96,7 +96,8 @@ static const struct cpg_core_clk r8a77980_core_clks[] __initconst = {
DEF_FIXED("s3d2", R8A77980_CLK_S3D2, CLK_S3, 2, 1),
DEF_FIXED("s3d4", R8A77980_CLK_S3D4, CLK_S3, 4, 1),
- DEF_GEN3_SD("sd0", R8A77980_CLK_SD0, CLK_SDSRC, 0x0074),
+ DEF_GEN3_SDH("sd0h", R8A77980_CLK_SD0H, CLK_SDSRC, 0x0074),
+ DEF_GEN3_SD("sd0", R8A77980_CLK_SD0, R8A77980_CLK_SD0H, 0x0074),
DEF_FIXED("cl", R8A77980_CLK_CL, CLK_PLL1_DIV2, 48, 1),
DEF_FIXED("cp", R8A77980_CLK_CP, CLK_EXTAL, 2, 1),
diff --git a/drivers/clk/renesas/r8a77990-cpg-mssr.c b/drivers/clk/renesas/r8a77990-cpg-mssr.c
index a582f2ec3294..faf60f7adc8d 100644
--- a/drivers/clk/renesas/r8a77990-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a77990-cpg-mssr.c
@@ -100,9 +100,12 @@ static const struct cpg_core_clk r8a77990_core_clks[] __initconst = {
DEF_FIXED("s3d2", R8A77990_CLK_S3D2, CLK_S3, 2, 1),
DEF_FIXED("s3d4", R8A77990_CLK_S3D4, CLK_S3, 4, 1),
- DEF_GEN3_SD("sd0", R8A77990_CLK_SD0, CLK_SDSRC, 0x0074),
- DEF_GEN3_SD("sd1", R8A77990_CLK_SD1, CLK_SDSRC, 0x0078),
- DEF_GEN3_SD("sd3", R8A77990_CLK_SD3, CLK_SDSRC, 0x026c),
+ DEF_GEN3_SDH("sd0h", R8A77990_CLK_SD0H, CLK_SDSRC, 0x0074),
+ DEF_GEN3_SDH("sd1h", R8A77990_CLK_SD1H, CLK_SDSRC, 0x0078),
+ DEF_GEN3_SDH("sd3h", R8A77990_CLK_SD3H, CLK_SDSRC, 0x026c),
+ DEF_GEN3_SD("sd0", R8A77990_CLK_SD0, R8A77990_CLK_SD0H, 0x0074),
+ DEF_GEN3_SD("sd1", R8A77990_CLK_SD1, R8A77990_CLK_SD1H, 0x0078),
+ DEF_GEN3_SD("sd3", R8A77990_CLK_SD3, R8A77990_CLK_SD3H, 0x026c),
DEF_FIXED("cl", R8A77990_CLK_CL, CLK_PLL1, 48, 1),
DEF_FIXED("cr", R8A77990_CLK_CR, CLK_PLL1D2, 2, 1),
diff --git a/drivers/clk/renesas/r8a77995-cpg-mssr.c b/drivers/clk/renesas/r8a77995-cpg-mssr.c
index 81c0bc1e78af..7713cfd99c1d 100644
--- a/drivers/clk/renesas/r8a77995-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a77995-cpg-mssr.c
@@ -103,7 +103,8 @@ static const struct cpg_core_clk r8a77995_core_clks[] __initconst = {
DEF_GEN3_PE("s3d2c", R8A77995_CLK_S3D2C, CLK_S3, 2, CLK_PE, 2),
DEF_GEN3_PE("s3d4c", R8A77995_CLK_S3D4C, CLK_S3, 4, CLK_PE, 4),
- DEF_GEN3_SD("sd0", R8A77995_CLK_SD0, CLK_SDSRC, 0x268),
+ DEF_GEN3_SDH("sd0h", R8A77995_CLK_SD0H, CLK_SDSRC, 0x268),
+ DEF_GEN3_SD("sd0", R8A77995_CLK_SD0, R8A77995_CLK_SD0H, 0x268),
DEF_DIV6P1("canfd", R8A77995_CLK_CANFD, CLK_PLL0D3, 0x244),
DEF_DIV6P1("mso", R8A77995_CLK_MSO, CLK_PLL1D2, 0x014),
diff --git a/drivers/clk/renesas/r8a779a0-cpg-mssr.c b/drivers/clk/renesas/r8a779a0-cpg-mssr.c
index fbd7454f2beb..1c09d4ebe90f 100644
--- a/drivers/clk/renesas/r8a779a0-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a779a0-cpg-mssr.c
@@ -10,46 +10,19 @@
* Copyright (C) 2015 Renesas Electronics Corp.
*/
-#include <linux/bug.h>
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/init.h>
-#include <linux/io.h>
#include <linux/kernel.h>
-#include <linux/pm.h>
-#include <linux/slab.h>
#include <linux/soc/renesas/rcar-rst.h>
#include <dt-bindings/clock/r8a779a0-cpg-mssr.h>
-#include "rcar-cpg-lib.h"
#include "renesas-cpg-mssr.h"
-
-enum rcar_r8a779a0_clk_types {
- CLK_TYPE_R8A779A0_MAIN = CLK_TYPE_CUSTOM,
- CLK_TYPE_R8A779A0_PLL1,
- CLK_TYPE_R8A779A0_PLL2X_3X, /* PLL[23][01] */
- CLK_TYPE_R8A779A0_PLL5,
- CLK_TYPE_R8A779A0_Z,
- CLK_TYPE_R8A779A0_SD,
- CLK_TYPE_R8A779A0_MDSEL, /* Select parent/divider using mode pin */
- CLK_TYPE_R8A779A0_OSC, /* OSC EXTAL predivider and fixed divider */
- CLK_TYPE_R8A779A0_RPCSRC,
- CLK_TYPE_R8A779A0_RPC,
- CLK_TYPE_R8A779A0_RPCD2,
-};
-
-struct rcar_r8a779a0_cpg_pll_config {
- u8 extal_div;
- u8 pll1_mult;
- u8 pll1_div;
- u8 pll5_mult;
- u8 pll5_div;
- u8 osc_prediv;
-};
+#include "rcar-gen4-cpg.h"
enum clk_ids {
/* Core Clock Outputs exported to DT */
@@ -85,33 +58,18 @@ enum clk_ids {
};
#define DEF_PLL(_name, _id, _offset) \
- DEF_BASE(_name, _id, CLK_TYPE_R8A779A0_PLL2X_3X, CLK_MAIN, \
+ DEF_BASE(_name, _id, CLK_TYPE_GEN4_PLL2X_3X, CLK_MAIN, \
.offset = _offset)
-#define DEF_Z(_name, _id, _parent, _div, _offset) \
- DEF_BASE(_name, _id, CLK_TYPE_R8A779A0_Z, _parent, .div = _div, \
- .offset = _offset)
-
-#define DEF_SD(_name, _id, _parent, _offset) \
- DEF_BASE(_name, _id, CLK_TYPE_R8A779A0_SD, _parent, .offset = _offset)
-
-#define DEF_MDSEL(_name, _id, _md, _parent0, _div0, _parent1, _div1) \
- DEF_BASE(_name, _id, CLK_TYPE_R8A779A0_MDSEL, \
- (_parent0) << 16 | (_parent1), \
- .div = (_div0) << 16 | (_div1), .offset = _md)
-
-#define DEF_OSC(_name, _id, _parent, _div) \
- DEF_BASE(_name, _id, CLK_TYPE_R8A779A0_OSC, _parent, .div = _div)
-
static const struct cpg_core_clk r8a779a0_core_clks[] __initconst = {
/* External Clock Inputs */
DEF_INPUT("extal", CLK_EXTAL),
DEF_INPUT("extalr", CLK_EXTALR),
/* Internal Core Clocks */
- DEF_BASE(".main", CLK_MAIN, CLK_TYPE_R8A779A0_MAIN, CLK_EXTAL),
- DEF_BASE(".pll1", CLK_PLL1, CLK_TYPE_R8A779A0_PLL1, CLK_MAIN),
- DEF_BASE(".pll5", CLK_PLL5, CLK_TYPE_R8A779A0_PLL5, CLK_MAIN),
+ DEF_BASE(".main", CLK_MAIN, CLK_TYPE_GEN4_MAIN, CLK_EXTAL),
+ DEF_BASE(".pll1", CLK_PLL1, CLK_TYPE_GEN4_PLL1, CLK_MAIN),
+ DEF_BASE(".pll5", CLK_PLL5, CLK_TYPE_GEN4_PLL5, CLK_MAIN),
DEF_PLL(".pll20", CLK_PLL20, 0x0834),
DEF_PLL(".pll21", CLK_PLL21, 0x0838),
DEF_PLL(".pll30", CLK_PLL30, 0x083c),
@@ -128,14 +86,14 @@ static const struct cpg_core_clk r8a779a0_core_clks[] __initconst = {
DEF_FIXED(".s3", CLK_S3, CLK_PLL1_DIV2, 4, 1),
DEF_FIXED(".sdsrc", CLK_SDSRC, CLK_PLL5_DIV4, 1, 1),
DEF_RATE(".oco", CLK_OCO, 32768),
- DEF_BASE(".rpcsrc", CLK_RPCSRC, CLK_TYPE_R8A779A0_RPCSRC, CLK_PLL5),
- DEF_BASE("rpc", R8A779A0_CLK_RPC, CLK_TYPE_R8A779A0_RPC, CLK_RPCSRC),
- DEF_BASE("rpcd2", R8A779A0_CLK_RPCD2, CLK_TYPE_R8A779A0_RPCD2,
+ DEF_BASE(".rpcsrc", CLK_RPCSRC, CLK_TYPE_GEN4_RPCSRC, CLK_PLL5),
+ DEF_BASE("rpc", R8A779A0_CLK_RPC, CLK_TYPE_GEN4_RPC, CLK_RPCSRC),
+ DEF_BASE("rpcd2", R8A779A0_CLK_RPCD2, CLK_TYPE_GEN4_RPCD2,
R8A779A0_CLK_RPC),
/* Core Clock Outputs */
- DEF_Z("z0", R8A779A0_CLK_Z0, CLK_PLL20, 2, 0),
- DEF_Z("z1", R8A779A0_CLK_Z1, CLK_PLL21, 2, 8),
+ DEF_GEN4_Z("z0", R8A779A0_CLK_Z0, CLK_TYPE_GEN4_Z, CLK_PLL20, 2, 0),
+ DEF_GEN4_Z("z1", R8A779A0_CLK_Z1, CLK_TYPE_GEN4_Z, CLK_PLL21, 2, 8),
DEF_FIXED("zx", R8A779A0_CLK_ZX, CLK_PLL20_DIV2, 2, 1),
DEF_FIXED("s1d1", R8A779A0_CLK_S1D1, CLK_S1, 1, 1),
DEF_FIXED("s1d2", R8A779A0_CLK_S1D2, CLK_S1, 2, 1),
@@ -159,15 +117,16 @@ static const struct cpg_core_clk r8a779a0_core_clks[] __initconst = {
DEF_FIXED("cp", R8A779A0_CLK_CP, CLK_EXTAL, 2, 1),
DEF_FIXED("cl16mck", R8A779A0_CLK_CL16MCK, CLK_PLL1_DIV2, 64, 1),
- DEF_SD("sd0", R8A779A0_CLK_SD0, CLK_SDSRC, 0x870),
+ DEF_GEN4_SDH("sdh0", R8A779A0_CLK_SD0H, CLK_SDSRC, 0x870),
+ DEF_GEN4_SD("sd0", R8A779A0_CLK_SD0, R8A779A0_CLK_SD0H, 0x870),
DEF_DIV6P1("mso", R8A779A0_CLK_MSO, CLK_PLL5_DIV4, 0x87c),
DEF_DIV6P1("canfd", R8A779A0_CLK_CANFD, CLK_PLL5_DIV4, 0x878),
DEF_DIV6P1("csi0", R8A779A0_CLK_CSI0, CLK_PLL5_DIV4, 0x880),
DEF_DIV6P1("dsi", R8A779A0_CLK_DSI, CLK_PLL5_DIV4, 0x884),
- DEF_OSC("osc", R8A779A0_CLK_OSC, CLK_EXTAL, 8),
- DEF_MDSEL("r", R8A779A0_CLK_R, 29, CLK_EXTALR, 1, CLK_OCO, 1),
+ DEF_GEN4_OSC("osc", R8A779A0_CLK_OSC, CLK_EXTAL, 8),
+ DEF_GEN4_MDSEL("r", R8A779A0_CLK_R, 29, CLK_EXTALR, 1, CLK_OCO, 1),
};
static const struct mssr_mod_clk r8a779a0_mod_clks[] __initconst = {
@@ -271,256 +230,6 @@ static const struct mssr_mod_clk r8a779a0_mod_clks[] __initconst = {
DEF_MOD("vspx3", 1031, R8A779A0_CLK_S1D1),
};
-static const struct rcar_r8a779a0_cpg_pll_config *cpg_pll_config __initdata;
-static unsigned int cpg_clk_extalr __initdata;
-static u32 cpg_mode __initdata;
-
-/*
- * Z0 Clock & Z1 Clock
- */
-#define CPG_FRQCRB 0x00000804
-#define CPG_FRQCRB_KICK BIT(31)
-#define CPG_FRQCRC 0x00000808
-
-struct cpg_z_clk {
- struct clk_hw hw;
- void __iomem *reg;
- void __iomem *kick_reg;
- unsigned long max_rate; /* Maximum rate for normal mode */
- unsigned int fixed_div;
- u32 mask;
-};
-
-#define to_z_clk(_hw) container_of(_hw, struct cpg_z_clk, hw)
-
-static unsigned long cpg_z_clk_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
-{
- struct cpg_z_clk *zclk = to_z_clk(hw);
- unsigned int mult;
- u32 val;
-
- val = readl(zclk->reg) & zclk->mask;
- mult = 32 - (val >> __ffs(zclk->mask));
-
- return DIV_ROUND_CLOSEST_ULL((u64)parent_rate * mult,
- 32 * zclk->fixed_div);
-}
-
-static int cpg_z_clk_determine_rate(struct clk_hw *hw,
- struct clk_rate_request *req)
-{
- struct cpg_z_clk *zclk = to_z_clk(hw);
- unsigned int min_mult, max_mult, mult;
- unsigned long rate, prate;
-
- rate = min(req->rate, req->max_rate);
- if (rate <= zclk->max_rate) {
- /* Set parent rate to initial value for normal modes */
- prate = zclk->max_rate;
- } else {
- /* Set increased parent rate for boost modes */
- prate = rate;
- }
- req->best_parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw),
- prate * zclk->fixed_div);
-
- prate = req->best_parent_rate / zclk->fixed_div;
- min_mult = max(div64_ul(req->min_rate * 32ULL, prate), 1ULL);
- max_mult = min(div64_ul(req->max_rate * 32ULL, prate), 32ULL);
- if (max_mult < min_mult)
- return -EINVAL;
-
- mult = DIV_ROUND_CLOSEST_ULL(rate * 32ULL, prate);
- mult = clamp(mult, min_mult, max_mult);
-
- req->rate = DIV_ROUND_CLOSEST_ULL((u64)prate * mult, 32);
- return 0;
-}
-
-static int cpg_z_clk_set_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long parent_rate)
-{
- struct cpg_z_clk *zclk = to_z_clk(hw);
- unsigned int mult;
- unsigned int i;
-
- mult = DIV64_U64_ROUND_CLOSEST(rate * 32ULL * zclk->fixed_div,
- parent_rate);
- mult = clamp(mult, 1U, 32U);
-
- if (readl(zclk->kick_reg) & CPG_FRQCRB_KICK)
- return -EBUSY;
-
- cpg_reg_modify(zclk->reg, zclk->mask, (32 - mult) << __ffs(zclk->mask));
-
- /*
- * Set KICK bit in FRQCRB to update hardware setting and wait for
- * clock change completion.
- */
- cpg_reg_modify(zclk->kick_reg, 0, CPG_FRQCRB_KICK);
-
- /*
- * Note: There is no HW information about the worst case latency.
- *
- * Using experimental measurements, it seems that no more than
- * ~10 iterations are needed, independently of the CPU rate.
- * Since this value might be dependent on external xtal rate, pll1
- * rate or even the other emulation clocks rate, use 1000 as a
- * "super" safe value.
- */
- for (i = 1000; i; i--) {
- if (!(readl(zclk->kick_reg) & CPG_FRQCRB_KICK))
- return 0;
-
- cpu_relax();
- }
-
- return -ETIMEDOUT;
-}
-
-static const struct clk_ops cpg_z_clk_ops = {
- .recalc_rate = cpg_z_clk_recalc_rate,
- .determine_rate = cpg_z_clk_determine_rate,
- .set_rate = cpg_z_clk_set_rate,
-};
-
-static struct clk * __init cpg_z_clk_register(const char *name,
- const char *parent_name,
- void __iomem *reg,
- unsigned int div,
- unsigned int offset)
-{
- struct clk_init_data init = {};
- struct cpg_z_clk *zclk;
- struct clk *clk;
-
- zclk = kzalloc(sizeof(*zclk), GFP_KERNEL);
- if (!zclk)
- return ERR_PTR(-ENOMEM);
-
- init.name = name;
- init.ops = &cpg_z_clk_ops;
- init.flags = CLK_SET_RATE_PARENT;
- init.parent_names = &parent_name;
- init.num_parents = 1;
-
- zclk->reg = reg + CPG_FRQCRC;
- zclk->kick_reg = reg + CPG_FRQCRB;
- zclk->hw.init = &init;
- zclk->mask = GENMASK(offset + 4, offset);
- zclk->fixed_div = div; /* PLLVCO x 1/div x SYS-CPU divider */
-
- clk = clk_register(NULL, &zclk->hw);
- if (IS_ERR(clk)) {
- kfree(zclk);
- return clk;
- }
-
- zclk->max_rate = clk_hw_get_rate(clk_hw_get_parent(&zclk->hw)) /
- zclk->fixed_div;
- return clk;
-}
-
-/*
- * RPC Clocks
- */
-#define CPG_RPCCKCR 0x874
-
-static const struct clk_div_table cpg_rpcsrc_div_table[] = {
- { 0, 4 }, { 1, 6 }, { 2, 5 }, { 3, 6 }, { 0, 0 },
-};
-
-static struct clk * __init rcar_r8a779a0_cpg_clk_register(struct device *dev,
- const struct cpg_core_clk *core, const struct cpg_mssr_info *info,
- struct clk **clks, void __iomem *base,
- struct raw_notifier_head *notifiers)
-{
- const struct clk *parent;
- unsigned int mult = 1;
- unsigned int div = 1;
- u32 value;
-
- parent = clks[core->parent & 0xffff]; /* some types use high bits */
- if (IS_ERR(parent))
- return ERR_CAST(parent);
-
- switch (core->type) {
- case CLK_TYPE_R8A779A0_MAIN:
- div = cpg_pll_config->extal_div;
- break;
-
- case CLK_TYPE_R8A779A0_PLL1:
- mult = cpg_pll_config->pll1_mult;
- div = cpg_pll_config->pll1_div;
- break;
-
- case CLK_TYPE_R8A779A0_PLL2X_3X:
- value = readl(base + core->offset);
- mult = (((value >> 24) & 0x7f) + 1) * 2;
- break;
-
- case CLK_TYPE_R8A779A0_PLL5:
- mult = cpg_pll_config->pll5_mult;
- div = cpg_pll_config->pll5_div;
- break;
-
- case CLK_TYPE_R8A779A0_Z:
- return cpg_z_clk_register(core->name, __clk_get_name(parent),
- base, core->div, core->offset);
-
- case CLK_TYPE_R8A779A0_SD:
- return cpg_sd_clk_register(core->name, base, core->offset,
- __clk_get_name(parent), notifiers,
- false);
- break;
-
- case CLK_TYPE_R8A779A0_MDSEL:
- /*
- * Clock selectable between two parents and two fixed dividers
- * using a mode pin
- */
- if (cpg_mode & BIT(core->offset)) {
- div = core->div & 0xffff;
- } else {
- parent = clks[core->parent >> 16];
- if (IS_ERR(parent))
- return ERR_CAST(parent);
- div = core->div >> 16;
- }
- mult = 1;
- break;
-
- case CLK_TYPE_R8A779A0_OSC:
- /*
- * Clock combining OSC EXTAL predivider and a fixed divider
- */
- div = cpg_pll_config->osc_prediv * core->div;
- break;
-
- case CLK_TYPE_R8A779A0_RPCSRC:
- return clk_register_divider_table(NULL, core->name,
- __clk_get_name(parent), 0,
- base + CPG_RPCCKCR, 3, 2, 0,
- cpg_rpcsrc_div_table,
- &cpg_lock);
-
- case CLK_TYPE_R8A779A0_RPC:
- return cpg_rpc_clk_register(core->name, base + CPG_RPCCKCR,
- __clk_get_name(parent), notifiers);
-
- case CLK_TYPE_R8A779A0_RPCD2:
- return cpg_rpcd2_clk_register(core->name, base + CPG_RPCCKCR,
- __clk_get_name(parent));
-
- default:
- return ERR_PTR(-EINVAL);
- }
-
- return clk_register_fixed_factor(NULL, core->name,
- __clk_get_name(parent), 0, mult, div);
-}
-
static const unsigned int r8a779a0_crit_mod_clks[] __initconst = {
MOD_CLK_ID(907), /* RWDT */
};
@@ -539,17 +248,19 @@ static const unsigned int r8a779a0_crit_mod_clks[] __initconst = {
*/
#define CPG_PLL_CONFIG_INDEX(md) ((((md) & BIT(14)) >> 13) | \
(((md) & BIT(13)) >> 13))
-
-static const struct rcar_r8a779a0_cpg_pll_config cpg_pll_configs[4] = {
- /* EXTAL div PLL1 mult/div PLL5 mult/div OSC prediv */
- { 1, 128, 1, 192, 1, 16, },
- { 1, 106, 1, 160, 1, 19, },
- { 0, 0, 0, 0, 0, 0, },
- { 2, 128, 1, 192, 1, 32, },
+static const struct rcar_gen4_cpg_pll_config cpg_pll_configs[4] = {
+ /* EXTAL div PLL1 mult/div PLL2 mult/div PLL3 mult/div PLL5 mult/div PLL6 mult/div OSC prediv */
+ { 1, 128, 1, 0, 0, 0, 0, 192, 1, 0, 0, 16, },
+ { 1, 106, 1, 0, 0, 0, 0, 160, 1, 0, 0, 19, },
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
+ { 2, 128, 1, 0, 0, 0, 0, 192, 1, 0, 0, 32, },
};
+
static int __init r8a779a0_cpg_mssr_init(struct device *dev)
{
+ const struct rcar_gen4_cpg_pll_config *cpg_pll_config;
+ u32 cpg_mode;
int error;
error = rcar_rst_read_mode_pins(&cpg_mode);
@@ -557,10 +268,8 @@ static int __init r8a779a0_cpg_mssr_init(struct device *dev)
return error;
cpg_pll_config = &cpg_pll_configs[CPG_PLL_CONFIG_INDEX(cpg_mode)];
- cpg_clk_extalr = CLK_EXTALR;
- spin_lock_init(&cpg_lock);
- return 0;
+ return rcar_gen4_cpg_init(cpg_pll_config, CLK_EXTALR, cpg_mode);
}
const struct cpg_mssr_info r8a779a0_cpg_mssr_info __initconst = {
@@ -581,7 +290,7 @@ const struct cpg_mssr_info r8a779a0_cpg_mssr_info __initconst = {
/* Callbacks */
.init = r8a779a0_cpg_mssr_init,
- .cpg_clk_register = rcar_r8a779a0_cpg_clk_register,
+ .cpg_clk_register = rcar_gen4_cpg_clk_register,
- .reg_layout = CLK_REG_LAYOUT_RCAR_V3U,
+ .reg_layout = CLK_REG_LAYOUT_RCAR_GEN4,
};
diff --git a/drivers/clk/renesas/r8a779f0-cpg-mssr.c b/drivers/clk/renesas/r8a779f0-cpg-mssr.c
new file mode 100644
index 000000000000..e6ec02c2c2a8
--- /dev/null
+++ b/drivers/clk/renesas/r8a779f0-cpg-mssr.c
@@ -0,0 +1,183 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * r8a779f0 Clock Pulse Generator / Module Standby and Software Reset
+ *
+ * Copyright (C) 2021 Renesas Electronics Corp.
+ *
+ * Based on r8a779a0-cpg-mssr.c
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/soc/renesas/rcar-rst.h>
+
+#include <dt-bindings/clock/r8a779f0-cpg-mssr.h>
+
+#include "renesas-cpg-mssr.h"
+#include "rcar-gen4-cpg.h"
+
+enum clk_ids {
+ /* Core Clock Outputs exported to DT */
+ LAST_DT_CORE_CLK = R8A779F0_CLK_R,
+
+ /* External Input Clocks */
+ CLK_EXTAL,
+ CLK_EXTALR,
+
+ /* Internal Core Clocks */
+ CLK_MAIN,
+ CLK_PLL1,
+ CLK_PLL2,
+ CLK_PLL3,
+ CLK_PLL5,
+ CLK_PLL6,
+ CLK_PLL1_DIV2,
+ CLK_PLL2_DIV2,
+ CLK_PLL3_DIV2,
+ CLK_PLL5_DIV2,
+ CLK_PLL5_DIV4,
+ CLK_PLL6_DIV2,
+ CLK_S0,
+ CLK_SDSRC,
+ CLK_RPCSRC,
+ CLK_OCO,
+
+ /* Module Clocks */
+ MOD_CLK_BASE
+};
+
+static const struct cpg_core_clk r8a779f0_core_clks[] __initconst = {
+ /* External Clock Inputs */
+ DEF_INPUT("extal", CLK_EXTAL),
+ DEF_INPUT("extalr", CLK_EXTALR),
+
+ /* Internal Core Clocks */
+ DEF_BASE(".main", CLK_MAIN, CLK_TYPE_GEN4_MAIN, CLK_EXTAL),
+ DEF_BASE(".pll1", CLK_PLL1, CLK_TYPE_GEN4_PLL1, CLK_MAIN),
+ DEF_BASE(".pll2", CLK_PLL2, CLK_TYPE_GEN4_PLL2, CLK_MAIN),
+ DEF_BASE(".pll3", CLK_PLL3, CLK_TYPE_GEN4_PLL3, CLK_MAIN),
+ DEF_BASE(".pll5", CLK_PLL5, CLK_TYPE_GEN4_PLL5, CLK_MAIN),
+ DEF_BASE(".pll6", CLK_PLL6, CLK_TYPE_GEN4_PLL6, CLK_MAIN),
+
+ DEF_FIXED(".pll1_div2", CLK_PLL1_DIV2, CLK_PLL1, 2, 1),
+ DEF_FIXED(".pll2_div2", CLK_PLL2_DIV2, CLK_PLL2, 2, 1),
+ DEF_FIXED(".pll3_div2", CLK_PLL3_DIV2, CLK_PLL3, 2, 1),
+ DEF_FIXED(".pll5_div2", CLK_PLL5_DIV2, CLK_PLL5, 2, 1),
+ DEF_FIXED(".pll5_div4", CLK_PLL5_DIV4, CLK_PLL5_DIV2, 2, 1),
+ DEF_FIXED(".pll6_div2", CLK_PLL6_DIV2, CLK_PLL6, 2, 1),
+ DEF_FIXED(".s0", CLK_S0, CLK_PLL1_DIV2, 2, 1),
+ DEF_BASE(".sdsrc", CLK_SDSRC, CLK_TYPE_GEN4_SDSRC, CLK_PLL5),
+ DEF_RATE(".oco", CLK_OCO, 32768),
+
+ DEF_BASE(".rpcsrc", CLK_RPCSRC, CLK_TYPE_GEN4_RPCSRC, CLK_PLL5),
+ DEF_BASE(".rpc", R8A779F0_CLK_RPC, CLK_TYPE_GEN4_RPC, CLK_RPCSRC),
+ DEF_BASE("rpcd2", R8A779F0_CLK_RPCD2, CLK_TYPE_GEN4_RPCD2, R8A779F0_CLK_RPC),
+
+ /* Core Clock Outputs */
+ DEF_FIXED("s0d2", R8A779F0_CLK_S0D2, CLK_S0, 2, 1),
+ DEF_FIXED("s0d3", R8A779F0_CLK_S0D3, CLK_S0, 3, 1),
+ DEF_FIXED("s0d4", R8A779F0_CLK_S0D4, CLK_S0, 4, 1),
+ DEF_FIXED("cl16m", R8A779F0_CLK_CL16M, CLK_S0, 48, 1),
+ DEF_FIXED("s0d2_mm", R8A779F0_CLK_S0D2_MM, CLK_S0, 2, 1),
+ DEF_FIXED("s0d3_mm", R8A779F0_CLK_S0D3_MM, CLK_S0, 3, 1),
+ DEF_FIXED("s0d4_mm", R8A779F0_CLK_S0D4_MM, CLK_S0, 4, 1),
+ DEF_FIXED("cl16m_mm", R8A779F0_CLK_CL16M_MM, CLK_S0, 48, 1),
+ DEF_FIXED("s0d2_rt", R8A779F0_CLK_S0D2_RT, CLK_S0, 2, 1),
+ DEF_FIXED("s0d3_rt", R8A779F0_CLK_S0D3_RT, CLK_S0, 3, 1),
+ DEF_FIXED("s0d4_rt", R8A779F0_CLK_S0D4_RT, CLK_S0, 4, 1),
+ DEF_FIXED("s0d6_rt", R8A779F0_CLK_S0D6_RT, CLK_S0, 6, 1),
+ DEF_FIXED("cl16m_rt", R8A779F0_CLK_CL16M_RT, CLK_S0, 48, 1),
+ DEF_FIXED("s0d3_per", R8A779F0_CLK_S0D3_PER, CLK_S0, 3, 1),
+ DEF_FIXED("s0d6_per", R8A779F0_CLK_S0D6_PER, CLK_S0, 6, 1),
+ DEF_FIXED("s0d12_per", R8A779F0_CLK_S0D12_PER, CLK_S0, 12, 1),
+ DEF_FIXED("s0d24_per", R8A779F0_CLK_S0D24_PER, CLK_S0, 24, 1),
+ DEF_FIXED("cl16m_per", R8A779F0_CLK_CL16M_PER, CLK_S0, 48, 1),
+ DEF_FIXED("s0d2_hsc", R8A779F0_CLK_S0D2_HSC, CLK_S0, 2, 1),
+ DEF_FIXED("s0d3_hsc", R8A779F0_CLK_S0D3_HSC, CLK_S0, 3, 1),
+ DEF_FIXED("s0d4_hsc", R8A779F0_CLK_S0D4_HSC, CLK_S0, 4, 1),
+ DEF_FIXED("s0d6_hsc", R8A779F0_CLK_S0D6_HSC, CLK_S0, 6, 1),
+ DEF_FIXED("s0d12_hsc", R8A779F0_CLK_S0D12_HSC, CLK_S0, 12, 1),
+ DEF_FIXED("cl16m_hsc", R8A779F0_CLK_CL16M_HSC, CLK_S0, 48, 1),
+ DEF_FIXED("s0d2_cc", R8A779F0_CLK_S0D2_CC, CLK_S0, 2, 1),
+ DEF_FIXED("rsw2", R8A779F0_CLK_RSW2, CLK_PLL5, 2, 1),
+ DEF_FIXED("cbfusa", R8A779F0_CLK_CBFUSA, CLK_EXTAL, 2, 1),
+ DEF_FIXED("cpex", R8A779F0_CLK_CPEX, CLK_EXTAL, 2, 1),
+
+ DEF_GEN4_SD("sd0", R8A779F0_CLK_SD0, CLK_SDSRC, 0x870),
+ DEF_DIV6P1("mso", R8A779F0_CLK_MSO, CLK_PLL5_DIV4, 0x87c),
+
+ DEF_GEN4_OSC("osc", R8A779F0_CLK_OSC, CLK_EXTAL, 8),
+ DEF_GEN4_MDSEL("r", R8A779F0_CLK_R, 29, CLK_EXTALR, 1, CLK_OCO, 1),
+};
+
+static const struct mssr_mod_clk r8a779f0_mod_clks[] __initconst = {
+ DEF_MOD("scif0", 702, R8A779F0_CLK_S0D12_PER),
+ DEF_MOD("scif1", 703, R8A779F0_CLK_S0D12_PER),
+ DEF_MOD("scif3", 704, R8A779F0_CLK_S0D12_PER),
+ DEF_MOD("scif4", 705, R8A779F0_CLK_S0D12_PER),
+};
+
+/*
+ * CPG Clock Data
+ */
+/*
+ * MD EXTAL PLL1 PLL2 PLL3 PLL5 PLL6 OSC
+ * 14 13 (MHz)
+ * ----------------------------------------------------------------
+ * 0 0 16 / 1 x200 x150 x200 x200 x134 /15
+ * 0 1 20 / 1 x160 x120 x160 x160 x106 /19
+ * 1 0 Prohibited setting
+ * 1 1 40 / 2 x160 x120 x160 x160 x106 /38
+ */
+#define CPG_PLL_CONFIG_INDEX(md) ((((md) & BIT(14)) >> 13) | \
+ (((md) & BIT(13)) >> 13))
+
+static const struct rcar_gen4_cpg_pll_config cpg_pll_configs[4] = {
+ /* EXTAL div PLL1 mult/div PLL2 mult/div PLL3 mult/div PLL5 mult/div PLL6 mult/div OSC prediv */
+ { 1, 200, 1, 150, 1, 200, 1, 200, 1, 134, 1, 15, },
+ { 1, 160, 1, 120, 1, 160, 1, 160, 1, 106, 1, 19, },
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
+ { 2, 160, 1, 120, 1, 160, 1, 160, 1, 106, 1, 38, },
+};
+
+static int __init r8a779f0_cpg_mssr_init(struct device *dev)
+{
+ const struct rcar_gen4_cpg_pll_config *cpg_pll_config;
+ u32 cpg_mode;
+ int error;
+
+ error = rcar_rst_read_mode_pins(&cpg_mode);
+ if (error)
+ return error;
+
+ cpg_pll_config = &cpg_pll_configs[CPG_PLL_CONFIG_INDEX(cpg_mode)];
+ if (!cpg_pll_config->extal_div) {
+ dev_err(dev, "Prohibited setting (cpg_mode=0x%x)\n", cpg_mode);
+ return -EINVAL;
+ }
+
+ return rcar_gen4_cpg_init(cpg_pll_config, CLK_EXTALR, cpg_mode);
+}
+
+const struct cpg_mssr_info r8a779f0_cpg_mssr_info __initconst = {
+ /* Core Clocks */
+ .core_clks = r8a779f0_core_clks,
+ .num_core_clks = ARRAY_SIZE(r8a779f0_core_clks),
+ .last_dt_core_clk = LAST_DT_CORE_CLK,
+ .num_total_core_clks = MOD_CLK_BASE,
+
+ /* Module Clocks */
+ .mod_clks = r8a779f0_mod_clks,
+ .num_mod_clks = ARRAY_SIZE(r8a779f0_mod_clks),
+ .num_hw_mod_clks = 28 * 32,
+
+ /* Callbacks */
+ .init = r8a779f0_cpg_mssr_init,
+ .cpg_clk_register = rcar_gen4_cpg_clk_register,
+
+ .reg_layout = CLK_REG_LAYOUT_RCAR_GEN4,
+};
diff --git a/drivers/clk/renesas/r9a07g044-cpg.c b/drivers/clk/renesas/r9a07g044-cpg.c
index 47c16265fca9..79042bf46fe8 100644
--- a/drivers/clk/renesas/r9a07g044-cpg.c
+++ b/drivers/clk/renesas/r9a07g044-cpg.c
@@ -26,15 +26,15 @@ enum clk_ids {
CLK_PLL1,
CLK_PLL2,
CLK_PLL2_DIV2,
- CLK_PLL2_DIV16,
- CLK_PLL2_DIV20,
+ CLK_PLL2_DIV2_8,
+ CLK_PLL2_DIV2_10,
CLK_PLL3,
CLK_PLL3_400,
CLK_PLL3_533,
CLK_PLL3_DIV2,
+ CLK_PLL3_DIV2_2,
CLK_PLL3_DIV2_4,
CLK_PLL3_DIV2_4_2,
- CLK_PLL3_DIV4,
CLK_SEL_PLL3_3,
CLK_DIV_PLL3_C,
CLK_PLL4,
@@ -50,12 +50,21 @@ enum clk_ids {
CLK_PLL2_SDHI_266,
CLK_SD0_DIV4,
CLK_SD1_DIV4,
+ CLK_SEL_GPU2,
/* Module Clocks */
MOD_CLK_BASE,
};
/* Divider tables */
+static const struct clk_div_table dtable_1_8[] = {
+ {0, 1},
+ {1, 2},
+ {2, 4},
+ {3, 8},
+ {0, 0},
+};
+
static const struct clk_div_table dtable_1_32[] = {
{0, 1},
{1, 2},
@@ -69,6 +78,7 @@ static const struct clk_div_table dtable_1_32[] = {
static const char * const sel_pll3_3[] = { ".pll3_533", ".pll3_400" };
static const char * const sel_pll6_2[] = { ".pll6_250", ".pll5_250" };
static const char * const sel_shdi[] = { ".clk_533", ".clk_400", ".clk_266" };
+static const char * const sel_gpu2[] = { ".pll6", ".pll3_div2_2" };
static const struct cpg_core_clk r9a07g044_core_clks[] __initconst = {
/* External Clock Inputs */
@@ -94,13 +104,13 @@ static const struct cpg_core_clk r9a07g044_core_clks[] __initconst = {
DEF_FIXED(".clk_400", CLK_PLL2_SDHI_400, CLK_PLL2_800, 1, 2),
DEF_FIXED(".clk_266", CLK_PLL2_SDHI_266, CLK_PLL2_SDHI_533, 1, 2),
- DEF_FIXED(".pll2_div16", CLK_PLL2_DIV16, CLK_PLL2, 1, 16),
- DEF_FIXED(".pll2_div20", CLK_PLL2_DIV20, CLK_PLL2, 1, 20),
+ DEF_FIXED(".pll2_div2_8", CLK_PLL2_DIV2_8, CLK_PLL2_DIV2, 1, 8),
+ DEF_FIXED(".pll2_div2_10", CLK_PLL2_DIV2_10, CLK_PLL2_DIV2, 1, 10),
DEF_FIXED(".pll3_div2", CLK_PLL3_DIV2, CLK_PLL3, 1, 2),
+ DEF_FIXED(".pll3_div2_2", CLK_PLL3_DIV2_2, CLK_PLL3_DIV2, 1, 2),
DEF_FIXED(".pll3_div2_4", CLK_PLL3_DIV2_4, CLK_PLL3_DIV2, 1, 4),
DEF_FIXED(".pll3_div2_4_2", CLK_PLL3_DIV2_4_2, CLK_PLL3_DIV2_4, 1, 2),
- DEF_FIXED(".pll3_div4", CLK_PLL3_DIV4, CLK_PLL3, 1, 4),
DEF_MUX(".sel_pll3_3", CLK_SEL_PLL3_3, SEL_PLL3_3,
sel_pll3_3, ARRAY_SIZE(sel_pll3_3), 0, CLK_MUX_READ_ONLY),
DEF_DIV("divpl3c", CLK_DIV_PLL3_C, CLK_SEL_PLL3_3,
@@ -108,13 +118,16 @@ static const struct cpg_core_clk r9a07g044_core_clks[] __initconst = {
DEF_FIXED(".pll5_250", CLK_PLL5_250, CLK_PLL5_FOUT3, 1, 2),
DEF_FIXED(".pll6_250", CLK_PLL6_250, CLK_PLL6, 1, 2),
+ DEF_MUX(".sel_gpu2", CLK_SEL_GPU2, SEL_GPU2,
+ sel_gpu2, ARRAY_SIZE(sel_gpu2), 0, CLK_MUX_READ_ONLY),
/* Core output clk */
- DEF_FIXED("I", R9A07G044_CLK_I, CLK_PLL1, 1, 1),
- DEF_DIV("P0", R9A07G044_CLK_P0, CLK_PLL2_DIV16, DIVPL2A,
+ DEF_DIV("I", R9A07G044_CLK_I, CLK_PLL1, DIVPL1A, dtable_1_8,
+ CLK_DIVIDER_HIWORD_MASK),
+ DEF_DIV("P0", R9A07G044_CLK_P0, CLK_PLL2_DIV2_8, DIVPL2A,
dtable_1_32, CLK_DIVIDER_HIWORD_MASK),
DEF_FIXED("P0_DIV2", R9A07G044_CLK_P0_DIV2, R9A07G044_CLK_P0, 1, 2),
- DEF_FIXED("TSU", R9A07G044_CLK_TSU, CLK_PLL2_DIV20, 1, 1),
+ DEF_FIXED("TSU", R9A07G044_CLK_TSU, CLK_PLL2_DIV2_10, 1, 1),
DEF_DIV("P1", R9A07G044_CLK_P1, CLK_PLL3_DIV2_4,
DIVPL3B, dtable_1_32, CLK_DIVIDER_HIWORD_MASK),
DEF_FIXED("P1_DIV2", CLK_P1_DIV2, R9A07G044_CLK_P1, 1, 2),
@@ -132,6 +145,8 @@ static const struct cpg_core_clk r9a07g044_core_clks[] __initconst = {
sel_shdi, ARRAY_SIZE(sel_shdi)),
DEF_FIXED("SD0_DIV4", CLK_SD0_DIV4, R9A07G044_CLK_SD0, 1, 4),
DEF_FIXED("SD1_DIV4", CLK_SD1_DIV4, R9A07G044_CLK_SD1, 1, 4),
+ DEF_DIV("G", R9A07G044_CLK_G, CLK_SEL_GPU2, DIVGPU, dtable_1_8,
+ CLK_DIVIDER_HIWORD_MASK),
};
static struct rzg2l_mod_clk r9a07g044_mod_clks[] = {
@@ -145,6 +160,24 @@ static struct rzg2l_mod_clk r9a07g044_mod_clks[] = {
0x52c, 0),
DEF_MOD("dmac_pclk", R9A07G044_DMAC_PCLK, CLK_P1_DIV2,
0x52c, 1),
+ DEF_MOD("ostm0_pclk", R9A07G044_OSTM0_PCLK, R9A07G044_CLK_P0,
+ 0x534, 0),
+ DEF_MOD("ostm1_clk", R9A07G044_OSTM1_PCLK, R9A07G044_CLK_P0,
+ 0x534, 1),
+ DEF_MOD("ostm2_pclk", R9A07G044_OSTM2_PCLK, R9A07G044_CLK_P0,
+ 0x534, 2),
+ DEF_MOD("wdt0_pclk", R9A07G044_WDT0_PCLK, R9A07G044_CLK_P0,
+ 0x548, 0),
+ DEF_MOD("wdt0_clk", R9A07G044_WDT0_CLK, R9A07G044_OSCCLK,
+ 0x548, 1),
+ DEF_MOD("wdt1_pclk", R9A07G044_WDT1_PCLK, R9A07G044_CLK_P0,
+ 0x548, 2),
+ DEF_MOD("wdt1_clk", R9A07G044_WDT1_CLK, R9A07G044_OSCCLK,
+ 0x548, 3),
+ DEF_MOD("wdt2_pclk", R9A07G044_WDT2_PCLK, R9A07G044_CLK_P0,
+ 0x548, 4),
+ DEF_MOD("wdt2_clk", R9A07G044_WDT2_CLK, R9A07G044_OSCCLK,
+ 0x548, 5),
DEF_MOD("spi_clk2", R9A07G044_SPI_CLK2, R9A07G044_CLK_SPI1,
0x550, 0),
DEF_MOD("spi_clk", R9A07G044_SPI_CLK, R9A07G044_CLK_SPI0,
@@ -165,6 +198,12 @@ static struct rzg2l_mod_clk r9a07g044_mod_clks[] = {
0x554, 6),
DEF_MOD("sdhi1_aclk", R9A07G044_SDHI1_ACLK, R9A07G044_CLK_P1,
0x554, 7),
+ DEF_MOD("gpu_clk", R9A07G044_GPU_CLK, R9A07G044_CLK_G,
+ 0x558, 0),
+ DEF_MOD("gpu_axi_clk", R9A07G044_GPU_AXI_CLK, R9A07G044_CLK_P1,
+ 0x558, 1),
+ DEF_MOD("gpu_ace_clk", R9A07G044_GPU_ACE_CLK, R9A07G044_CLK_P1,
+ 0x558, 2),
DEF_MOD("ssi0_pclk", R9A07G044_SSI0_PCLK2, R9A07G044_CLK_P0,
0x570, 0),
DEF_MOD("ssi0_sfr", R9A07G044_SSI0_PCLK_SFR, R9A07G044_CLK_P0,
@@ -217,6 +256,14 @@ static struct rzg2l_mod_clk r9a07g044_mod_clks[] = {
0x584, 4),
DEF_MOD("sci0", R9A07G044_SCI0_CLKP, R9A07G044_CLK_P0,
0x588, 0),
+ DEF_MOD("sci1", R9A07G044_SCI1_CLKP, R9A07G044_CLK_P0,
+ 0x588, 1),
+ DEF_MOD("rspi0", R9A07G044_RSPI0_CLKB, R9A07G044_CLK_P0,
+ 0x590, 0),
+ DEF_MOD("rspi1", R9A07G044_RSPI1_CLKB, R9A07G044_CLK_P0,
+ 0x590, 1),
+ DEF_MOD("rspi2", R9A07G044_RSPI2_CLKB, R9A07G044_CLK_P0,
+ 0x590, 2),
DEF_MOD("canfd", R9A07G044_CANFD_PCLK, R9A07G044_CLK_P0,
0x594, 0),
DEF_MOD("gpio", R9A07G044_GPIO_HCLK, R9A07G044_OSCCLK,
@@ -225,6 +272,8 @@ static struct rzg2l_mod_clk r9a07g044_mod_clks[] = {
0x5a8, 0),
DEF_MOD("adc_pclk", R9A07G044_ADC_PCLK, R9A07G044_CLK_P0,
0x5a8, 1),
+ DEF_MOD("tsu_pclk", R9A07G044_TSU_PCLK, R9A07G044_CLK_TSU,
+ 0x5ac, 0),
};
static struct rzg2l_reset r9a07g044_resets[] = {
@@ -233,9 +282,18 @@ static struct rzg2l_reset r9a07g044_resets[] = {
DEF_RST(R9A07G044_IA55_RESETN, 0x818, 0),
DEF_RST(R9A07G044_DMAC_ARESETN, 0x82c, 0),
DEF_RST(R9A07G044_DMAC_RST_ASYNC, 0x82c, 1),
+ DEF_RST(R9A07G044_OSTM0_PRESETZ, 0x834, 0),
+ DEF_RST(R9A07G044_OSTM1_PRESETZ, 0x834, 1),
+ DEF_RST(R9A07G044_OSTM2_PRESETZ, 0x834, 2),
+ DEF_RST(R9A07G044_WDT0_PRESETN, 0x848, 0),
+ DEF_RST(R9A07G044_WDT1_PRESETN, 0x848, 1),
+ DEF_RST(R9A07G044_WDT2_PRESETN, 0x848, 2),
DEF_RST(R9A07G044_SPI_RST, 0x850, 0),
DEF_RST(R9A07G044_SDHI0_IXRST, 0x854, 0),
DEF_RST(R9A07G044_SDHI1_IXRST, 0x854, 1),
+ DEF_RST(R9A07G044_GPU_RESETN, 0x858, 0),
+ DEF_RST(R9A07G044_GPU_AXI_RESETN, 0x858, 1),
+ DEF_RST(R9A07G044_GPU_ACE_RESETN, 0x858, 2),
DEF_RST(R9A07G044_SSI0_RST_M2_REG, 0x870, 0),
DEF_RST(R9A07G044_SSI1_RST_M2_REG, 0x870, 1),
DEF_RST(R9A07G044_SSI2_RST_M2_REG, 0x870, 2),
@@ -256,6 +314,10 @@ static struct rzg2l_reset r9a07g044_resets[] = {
DEF_RST(R9A07G044_SCIF3_RST_SYSTEM_N, 0x884, 3),
DEF_RST(R9A07G044_SCIF4_RST_SYSTEM_N, 0x884, 4),
DEF_RST(R9A07G044_SCI0_RST, 0x888, 0),
+ DEF_RST(R9A07G044_SCI1_RST, 0x888, 1),
+ DEF_RST(R9A07G044_RSPI0_RST, 0x890, 0),
+ DEF_RST(R9A07G044_RSPI1_RST, 0x890, 1),
+ DEF_RST(R9A07G044_RSPI2_RST, 0x890, 2),
DEF_RST(R9A07G044_CANFD_RSTP_N, 0x894, 0),
DEF_RST(R9A07G044_CANFD_RSTC_N, 0x894, 1),
DEF_RST(R9A07G044_GPIO_RSTN, 0x898, 0),
@@ -263,6 +325,7 @@ static struct rzg2l_reset r9a07g044_resets[] = {
DEF_RST(R9A07G044_GPIO_SPARE_RESETN, 0x898, 2),
DEF_RST(R9A07G044_ADC_PRESETN, 0x8a8, 0),
DEF_RST(R9A07G044_ADC_ADRST_N, 0x8a8, 1),
+ DEF_RST(R9A07G044_TSU_PRESETN, 0x8ac, 0),
};
static const unsigned int r9a07g044_crit_mod_clks[] __initconst = {
diff --git a/drivers/clk/renesas/rcar-cpg-lib.c b/drivers/clk/renesas/rcar-cpg-lib.c
index e93f0011eb07..e2e0447de190 100644
--- a/drivers/clk/renesas/rcar-cpg-lib.c
+++ b/drivers/clk/renesas/rcar-cpg-lib.c
@@ -65,206 +65,49 @@ void cpg_simple_notifier_register(struct raw_notifier_head *notifiers,
/*
* SDn Clock
*/
-#define CPG_SD_STP_HCK BIT(9)
-#define CPG_SD_STP_CK BIT(8)
-
-#define CPG_SD_STP_MASK (CPG_SD_STP_HCK | CPG_SD_STP_CK)
-#define CPG_SD_FC_MASK (0x7 << 2 | 0x3 << 0)
-
-#define CPG_SD_DIV_TABLE_DATA(stp_hck, sd_srcfc, sd_fc, sd_div) \
-{ \
- .val = ((stp_hck) ? CPG_SD_STP_HCK : 0) | \
- ((sd_srcfc) << 2) | \
- ((sd_fc) << 0), \
- .div = (sd_div), \
-}
-struct sd_div_table {
- u32 val;
- unsigned int div;
-};
+#define SDnSRCFC_SHIFT 2
+#define STPnHCK BIT(9 - SDnSRCFC_SHIFT)
-struct sd_clock {
- struct clk_hw hw;
- const struct sd_div_table *div_table;
- struct cpg_simple_notifier csn;
- unsigned int div_num;
- unsigned int cur_div_idx;
-};
-
-/* SDn divider
- * sd_srcfc sd_fc div
- * stp_hck (div) (div) = sd_srcfc x sd_fc
- *---------------------------------------------------------
- * 0 0 (1) 1 (4) 4 : SDR104 / HS200 / HS400 (8 TAP)
- * 0 1 (2) 1 (4) 8 : SDR50
- * 1 2 (4) 1 (4) 16 : HS / SDR25
- * 1 3 (8) 1 (4) 32 : NS / SDR12
- * 1 4 (16) 1 (4) 64
- * 0 0 (1) 0 (2) 2
- * 0 1 (2) 0 (2) 4 : SDR104 / HS200 / HS400 (4 TAP)
- * 1 2 (4) 0 (2) 8
- * 1 3 (8) 0 (2) 16
- * 1 4 (16) 0 (2) 32
- *
- * NOTE: There is a quirk option to ignore the first row of the dividers
- * table when searching for suitable settings. This is because HS400 on
- * early ES versions of H3 and M3-W requires a specific setting to work.
- */
-static const struct sd_div_table cpg_sd_div_table[] = {
-/* CPG_SD_DIV_TABLE_DATA(stp_hck, sd_srcfc, sd_fc, sd_div) */
- CPG_SD_DIV_TABLE_DATA(0, 0, 1, 4),
- CPG_SD_DIV_TABLE_DATA(0, 1, 1, 8),
- CPG_SD_DIV_TABLE_DATA(1, 2, 1, 16),
- CPG_SD_DIV_TABLE_DATA(1, 3, 1, 32),
- CPG_SD_DIV_TABLE_DATA(1, 4, 1, 64),
- CPG_SD_DIV_TABLE_DATA(0, 0, 0, 2),
- CPG_SD_DIV_TABLE_DATA(0, 1, 0, 4),
- CPG_SD_DIV_TABLE_DATA(1, 2, 0, 8),
- CPG_SD_DIV_TABLE_DATA(1, 3, 0, 16),
- CPG_SD_DIV_TABLE_DATA(1, 4, 0, 32),
+static const struct clk_div_table cpg_sdh_div_table[] = {
+ { 0, 1 }, { 1, 2 }, { STPnHCK | 2, 4 }, { STPnHCK | 3, 8 },
+ { STPnHCK | 4, 16 }, { 0, 0 },
};
-#define to_sd_clock(_hw) container_of(_hw, struct sd_clock, hw)
-
-static int cpg_sd_clock_enable(struct clk_hw *hw)
-{
- struct sd_clock *clock = to_sd_clock(hw);
-
- cpg_reg_modify(clock->csn.reg, CPG_SD_STP_MASK,
- clock->div_table[clock->cur_div_idx].val &
- CPG_SD_STP_MASK);
-
- return 0;
-}
-
-static void cpg_sd_clock_disable(struct clk_hw *hw)
-{
- struct sd_clock *clock = to_sd_clock(hw);
-
- cpg_reg_modify(clock->csn.reg, 0, CPG_SD_STP_MASK);
-}
-
-static int cpg_sd_clock_is_enabled(struct clk_hw *hw)
+struct clk * __init cpg_sdh_clk_register(const char *name,
+ void __iomem *sdnckcr, const char *parent_name,
+ struct raw_notifier_head *notifiers)
{
- struct sd_clock *clock = to_sd_clock(hw);
-
- return !(readl(clock->csn.reg) & CPG_SD_STP_MASK);
-}
+ struct cpg_simple_notifier *csn;
+ struct clk *clk;
-static unsigned long cpg_sd_clock_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
-{
- struct sd_clock *clock = to_sd_clock(hw);
+ csn = kzalloc(sizeof(*csn), GFP_KERNEL);
+ if (!csn)
+ return ERR_PTR(-ENOMEM);
- return DIV_ROUND_CLOSEST(parent_rate,
- clock->div_table[clock->cur_div_idx].div);
-}
+ csn->reg = sdnckcr;
-static int cpg_sd_clock_determine_rate(struct clk_hw *hw,
- struct clk_rate_request *req)
-{
- unsigned long best_rate = ULONG_MAX, diff_min = ULONG_MAX;
- struct sd_clock *clock = to_sd_clock(hw);
- unsigned long calc_rate, diff;
- unsigned int i;
-
- for (i = 0; i < clock->div_num; i++) {
- calc_rate = DIV_ROUND_CLOSEST(req->best_parent_rate,
- clock->div_table[i].div);
- if (calc_rate < req->min_rate || calc_rate > req->max_rate)
- continue;
-
- diff = calc_rate > req->rate ? calc_rate - req->rate
- : req->rate - calc_rate;
- if (diff < diff_min) {
- best_rate = calc_rate;
- diff_min = diff;
- }
+ clk = clk_register_divider_table(NULL, name, parent_name, 0, sdnckcr,
+ SDnSRCFC_SHIFT, 8, 0, cpg_sdh_div_table,
+ &cpg_lock);
+ if (IS_ERR(clk)) {
+ kfree(csn);
+ return clk;
}
- if (best_rate == ULONG_MAX)
- return -EINVAL;
-
- req->rate = best_rate;
- return 0;
-}
-
-static int cpg_sd_clock_set_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long parent_rate)
-{
- struct sd_clock *clock = to_sd_clock(hw);
- unsigned int i;
-
- for (i = 0; i < clock->div_num; i++)
- if (rate == DIV_ROUND_CLOSEST(parent_rate,
- clock->div_table[i].div))
- break;
-
- if (i >= clock->div_num)
- return -EINVAL;
-
- clock->cur_div_idx = i;
-
- cpg_reg_modify(clock->csn.reg, CPG_SD_STP_MASK | CPG_SD_FC_MASK,
- clock->div_table[i].val &
- (CPG_SD_STP_MASK | CPG_SD_FC_MASK));
-
- return 0;
+ cpg_simple_notifier_register(notifiers, csn);
+ return clk;
}
-static const struct clk_ops cpg_sd_clock_ops = {
- .enable = cpg_sd_clock_enable,
- .disable = cpg_sd_clock_disable,
- .is_enabled = cpg_sd_clock_is_enabled,
- .recalc_rate = cpg_sd_clock_recalc_rate,
- .determine_rate = cpg_sd_clock_determine_rate,
- .set_rate = cpg_sd_clock_set_rate,
+static const struct clk_div_table cpg_sd_div_table[] = {
+ { 0, 2 }, { 1, 4 }, { 0, 0 },
};
struct clk * __init cpg_sd_clk_register(const char *name,
- void __iomem *base, unsigned int offset, const char *parent_name,
- struct raw_notifier_head *notifiers, bool skip_first)
+ void __iomem *sdnckcr, const char *parent_name)
{
- struct clk_init_data init = {};
- struct sd_clock *clock;
- struct clk *clk;
- u32 val;
-
- clock = kzalloc(sizeof(*clock), GFP_KERNEL);
- if (!clock)
- return ERR_PTR(-ENOMEM);
-
- init.name = name;
- init.ops = &cpg_sd_clock_ops;
- init.flags = CLK_SET_RATE_PARENT;
- init.parent_names = &parent_name;
- init.num_parents = 1;
-
- clock->csn.reg = base + offset;
- clock->hw.init = &init;
- clock->div_table = cpg_sd_div_table;
- clock->div_num = ARRAY_SIZE(cpg_sd_div_table);
-
- if (skip_first) {
- clock->div_table++;
- clock->div_num--;
- }
-
- val = readl(clock->csn.reg) & ~CPG_SD_FC_MASK;
- val |= CPG_SD_STP_MASK | (clock->div_table[0].val & CPG_SD_FC_MASK);
- writel(val, clock->csn.reg);
-
- clk = clk_register(NULL, &clock->hw);
- if (IS_ERR(clk))
- goto free_clock;
-
- cpg_simple_notifier_register(notifiers, &clock->csn);
- return clk;
-
-free_clock:
- kfree(clock);
- return clk;
+ return clk_register_divider_table(NULL, name, parent_name, 0, sdnckcr,
+ 0, 2, 0, cpg_sd_div_table, &cpg_lock);
}
struct rpc_clock {
diff --git a/drivers/clk/renesas/rcar-cpg-lib.h b/drivers/clk/renesas/rcar-cpg-lib.h
index 35c0217c2f8b..94627df1c94c 100644
--- a/drivers/clk/renesas/rcar-cpg-lib.h
+++ b/drivers/clk/renesas/rcar-cpg-lib.h
@@ -26,9 +26,12 @@ void cpg_simple_notifier_register(struct raw_notifier_head *notifiers,
void cpg_reg_modify(void __iomem *reg, u32 clear, u32 set);
+struct clk * __init cpg_sdh_clk_register(const char *name,
+ void __iomem *sdnckcr, const char *parent_name,
+ struct raw_notifier_head *notifiers);
+
struct clk * __init cpg_sd_clk_register(const char *name,
- void __iomem *base, unsigned int offset, const char *parent_name,
- struct raw_notifier_head *notifiers, bool skip_first);
+ void __iomem *sdnckcr, const char *parent_name);
struct clk * __init cpg_rpc_clk_register(const char *name,
void __iomem *rpcckcr, const char *parent_name,
diff --git a/drivers/clk/renesas/rcar-gen3-cpg.c b/drivers/clk/renesas/rcar-gen3-cpg.c
index 741f6e74bbcf..e668f23c75e7 100644
--- a/drivers/clk/renesas/rcar-gen3-cpg.c
+++ b/drivers/clk/renesas/rcar-gen3-cpg.c
@@ -312,29 +312,20 @@ static u32 cpg_quirks __initdata;
#define PLL_ERRATA BIT(0) /* Missing PLL0/2/4 post-divider */
#define RCKCR_CKSEL BIT(1) /* Manual RCLK parent selection */
-#define SD_SKIP_FIRST BIT(2) /* Skip first clock in SD table */
static const struct soc_device_attribute cpg_quirks_match[] __initconst = {
{
.soc_id = "r8a7795", .revision = "ES1.0",
- .data = (void *)(PLL_ERRATA | RCKCR_CKSEL | SD_SKIP_FIRST),
+ .data = (void *)(PLL_ERRATA | RCKCR_CKSEL),
},
{
.soc_id = "r8a7795", .revision = "ES1.*",
- .data = (void *)(RCKCR_CKSEL | SD_SKIP_FIRST),
- },
- {
- .soc_id = "r8a7795", .revision = "ES2.0",
- .data = (void *)SD_SKIP_FIRST,
+ .data = (void *)(RCKCR_CKSEL),
},
{
.soc_id = "r8a7796", .revision = "ES1.0",
- .data = (void *)(RCKCR_CKSEL | SD_SKIP_FIRST),
- },
- {
- .soc_id = "r8a7796", .revision = "ES1.1",
- .data = (void *)SD_SKIP_FIRST,
+ .data = (void *)(RCKCR_CKSEL),
},
{ /* sentinel */ }
};
@@ -401,10 +392,13 @@ struct clk * __init rcar_gen3_cpg_clk_register(struct device *dev,
mult *= 2;
break;
+ case CLK_TYPE_GEN3_SDH:
+ return cpg_sdh_clk_register(core->name, base + core->offset,
+ __clk_get_name(parent), notifiers);
+
case CLK_TYPE_GEN3_SD:
- return cpg_sd_clk_register(core->name, base, core->offset,
- __clk_get_name(parent), notifiers,
- cpg_quirks & SD_SKIP_FIRST);
+ return cpg_sd_clk_register(core->name, base + core->offset,
+ __clk_get_name(parent));
case CLK_TYPE_GEN3_R:
if (cpg_quirks & RCKCR_CKSEL) {
diff --git a/drivers/clk/renesas/rcar-gen3-cpg.h b/drivers/clk/renesas/rcar-gen3-cpg.h
index 3d949c4a3244..2bc0afadf604 100644
--- a/drivers/clk/renesas/rcar-gen3-cpg.h
+++ b/drivers/clk/renesas/rcar-gen3-cpg.h
@@ -17,6 +17,7 @@ enum rcar_gen3_clk_types {
CLK_TYPE_GEN3_PLL2,
CLK_TYPE_GEN3_PLL3,
CLK_TYPE_GEN3_PLL4,
+ CLK_TYPE_GEN3_SDH,
CLK_TYPE_GEN3_SD,
CLK_TYPE_GEN3_R,
CLK_TYPE_GEN3_MDSEL, /* Select parent/divider using mode pin */
@@ -32,6 +33,9 @@ enum rcar_gen3_clk_types {
CLK_TYPE_GEN3_SOC_BASE,
};
+#define DEF_GEN3_SDH(_name, _id, _parent, _offset) \
+ DEF_BASE(_name, _id, CLK_TYPE_GEN3_SDH, _parent, .offset = _offset)
+
#define DEF_GEN3_SD(_name, _id, _parent, _offset) \
DEF_BASE(_name, _id, CLK_TYPE_GEN3_SD, _parent, .offset = _offset)
diff --git a/drivers/clk/renesas/rcar-gen4-cpg.c b/drivers/clk/renesas/rcar-gen4-cpg.c
new file mode 100644
index 000000000000..54ebf4b3c128
--- /dev/null
+++ b/drivers/clk/renesas/rcar-gen4-cpg.c
@@ -0,0 +1,305 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * R-Car Gen4 Clock Pulse Generator
+ *
+ * Copyright (C) 2021 Renesas Electronics Corp.
+ *
+ * Based on rcar-gen3-cpg.c
+ *
+ * Copyright (C) 2015-2018 Glider bvba
+ * Copyright (C) 2019 Renesas Electronics Corp.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+
+#include "renesas-cpg-mssr.h"
+#include "rcar-gen4-cpg.h"
+#include "rcar-cpg-lib.h"
+
+static const struct rcar_gen4_cpg_pll_config *cpg_pll_config __initconst;
+static unsigned int cpg_clk_extalr __initdata;
+static u32 cpg_mode __initdata;
+
+/*
+ * Z0 Clock & Z1 Clock
+ */
+#define CPG_FRQCRB 0x00000804
+#define CPG_FRQCRB_KICK BIT(31)
+#define CPG_FRQCRC 0x00000808
+
+struct cpg_z_clk {
+ struct clk_hw hw;
+ void __iomem *reg;
+ void __iomem *kick_reg;
+ unsigned long max_rate; /* Maximum rate for normal mode */
+ unsigned int fixed_div;
+ u32 mask;
+};
+
+#define to_z_clk(_hw) container_of(_hw, struct cpg_z_clk, hw)
+
+static unsigned long cpg_z_clk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct cpg_z_clk *zclk = to_z_clk(hw);
+ unsigned int mult;
+ u32 val;
+
+ val = readl(zclk->reg) & zclk->mask;
+ mult = 32 - (val >> __ffs(zclk->mask));
+
+ return DIV_ROUND_CLOSEST_ULL((u64)parent_rate * mult,
+ 32 * zclk->fixed_div);
+}
+
+static int cpg_z_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct cpg_z_clk *zclk = to_z_clk(hw);
+ unsigned int min_mult, max_mult, mult;
+ unsigned long rate, prate;
+
+ rate = min(req->rate, req->max_rate);
+ if (rate <= zclk->max_rate) {
+ /* Set parent rate to initial value for normal modes */
+ prate = zclk->max_rate;
+ } else {
+ /* Set increased parent rate for boost modes */
+ prate = rate;
+ }
+ req->best_parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw),
+ prate * zclk->fixed_div);
+
+ prate = req->best_parent_rate / zclk->fixed_div;
+ min_mult = max(div64_ul(req->min_rate * 32ULL, prate), 1ULL);
+ max_mult = min(div64_ul(req->max_rate * 32ULL, prate), 32ULL);
+ if (max_mult < min_mult)
+ return -EINVAL;
+
+ mult = DIV_ROUND_CLOSEST_ULL(rate * 32ULL, prate);
+ mult = clamp(mult, min_mult, max_mult);
+
+ req->rate = DIV_ROUND_CLOSEST_ULL((u64)prate * mult, 32);
+ return 0;
+}
+
+static int cpg_z_clk_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct cpg_z_clk *zclk = to_z_clk(hw);
+ unsigned int mult;
+ unsigned int i;
+
+ mult = DIV64_U64_ROUND_CLOSEST(rate * 32ULL * zclk->fixed_div,
+ parent_rate);
+ mult = clamp(mult, 1U, 32U);
+
+ if (readl(zclk->kick_reg) & CPG_FRQCRB_KICK)
+ return -EBUSY;
+
+ cpg_reg_modify(zclk->reg, zclk->mask, (32 - mult) << __ffs(zclk->mask));
+
+ /*
+ * Set KICK bit in FRQCRB to update hardware setting and wait for
+ * clock change completion.
+ */
+ cpg_reg_modify(zclk->kick_reg, 0, CPG_FRQCRB_KICK);
+
+ /*
+ * Note: There is no HW information about the worst case latency.
+ *
+ * Using experimental measurements, it seems that no more than
+ * ~10 iterations are needed, independently of the CPU rate.
+ * Since this value might be dependent on external xtal rate, pll1
+ * rate or even the other emulation clocks rate, use 1000 as a
+ * "super" safe value.
+ */
+ for (i = 1000; i; i--) {
+ if (!(readl(zclk->kick_reg) & CPG_FRQCRB_KICK))
+ return 0;
+
+ cpu_relax();
+ }
+
+ return -ETIMEDOUT;
+}
+
+static const struct clk_ops cpg_z_clk_ops = {
+ .recalc_rate = cpg_z_clk_recalc_rate,
+ .determine_rate = cpg_z_clk_determine_rate,
+ .set_rate = cpg_z_clk_set_rate,
+};
+
+static struct clk * __init cpg_z_clk_register(const char *name,
+ const char *parent_name,
+ void __iomem *reg,
+ unsigned int div,
+ unsigned int offset)
+{
+ struct clk_init_data init = {};
+ struct cpg_z_clk *zclk;
+ struct clk *clk;
+
+ zclk = kzalloc(sizeof(*zclk), GFP_KERNEL);
+ if (!zclk)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &cpg_z_clk_ops;
+ init.flags = CLK_SET_RATE_PARENT;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+
+ zclk->reg = reg + CPG_FRQCRC;
+ zclk->kick_reg = reg + CPG_FRQCRB;
+ zclk->hw.init = &init;
+ zclk->mask = GENMASK(offset + 4, offset);
+ zclk->fixed_div = div; /* PLLVCO x 1/div x SYS-CPU divider */
+
+ clk = clk_register(NULL, &zclk->hw);
+ if (IS_ERR(clk)) {
+ kfree(zclk);
+ return clk;
+ }
+
+ zclk->max_rate = clk_hw_get_rate(clk_hw_get_parent(&zclk->hw)) /
+ zclk->fixed_div;
+ return clk;
+}
+
+/*
+ * RPC Clocks
+ */
+static const struct clk_div_table cpg_rpcsrc_div_table[] = {
+ { 0, 4 }, { 1, 6 }, { 2, 5 }, { 3, 6 }, { 0, 0 },
+};
+
+struct clk * __init rcar_gen4_cpg_clk_register(struct device *dev,
+ const struct cpg_core_clk *core, const struct cpg_mssr_info *info,
+ struct clk **clks, void __iomem *base,
+ struct raw_notifier_head *notifiers)
+{
+ const struct clk *parent;
+ unsigned int mult = 1;
+ unsigned int div = 1;
+ u32 value;
+
+ parent = clks[core->parent & 0xffff]; /* some types use high bits */
+ if (IS_ERR(parent))
+ return ERR_CAST(parent);
+
+ switch (core->type) {
+ case CLK_TYPE_GEN4_MAIN:
+ div = cpg_pll_config->extal_div;
+ break;
+
+ case CLK_TYPE_GEN4_PLL1:
+ mult = cpg_pll_config->pll1_mult;
+ div = cpg_pll_config->pll1_div;
+ break;
+
+ case CLK_TYPE_GEN4_PLL2:
+ mult = cpg_pll_config->pll2_mult;
+ div = cpg_pll_config->pll2_div;
+ break;
+
+ case CLK_TYPE_GEN4_PLL3:
+ mult = cpg_pll_config->pll3_mult;
+ div = cpg_pll_config->pll3_div;
+ break;
+
+ case CLK_TYPE_GEN4_PLL5:
+ mult = cpg_pll_config->pll5_mult;
+ div = cpg_pll_config->pll5_div;
+ break;
+
+ case CLK_TYPE_GEN4_PLL6:
+ mult = cpg_pll_config->pll6_mult;
+ div = cpg_pll_config->pll6_div;
+ break;
+
+ case CLK_TYPE_GEN4_PLL2X_3X:
+ value = readl(base + core->offset);
+ mult = (((value >> 24) & 0x7f) + 1) * 2;
+ break;
+
+ case CLK_TYPE_GEN4_Z:
+ return cpg_z_clk_register(core->name, __clk_get_name(parent),
+ base, core->div, core->offset);
+
+ case CLK_TYPE_GEN4_SDSRC:
+ div = ((readl(base + SD0CKCR1) >> 29) & 0x03) + 4;
+ break;
+
+ case CLK_TYPE_GEN4_SDH:
+ return cpg_sdh_clk_register(core->name, base + core->offset,
+ __clk_get_name(parent), notifiers);
+
+ case CLK_TYPE_GEN4_SD:
+ return cpg_sd_clk_register(core->name, base + core->offset,
+ __clk_get_name(parent));
+
+ case CLK_TYPE_GEN4_MDSEL:
+ /*
+ * Clock selectable between two parents and two fixed dividers
+ * using a mode pin
+ */
+ if (cpg_mode & BIT(core->offset)) {
+ div = core->div & 0xffff;
+ } else {
+ parent = clks[core->parent >> 16];
+ if (IS_ERR(parent))
+ return ERR_CAST(parent);
+ div = core->div >> 16;
+ }
+ mult = 1;
+ break;
+
+ case CLK_TYPE_GEN4_OSC:
+ /*
+ * Clock combining OSC EXTAL predivider and a fixed divider
+ */
+ div = cpg_pll_config->osc_prediv * core->div;
+ break;
+
+ case CLK_TYPE_GEN4_RPCSRC:
+ return clk_register_divider_table(NULL, core->name,
+ __clk_get_name(parent), 0,
+ base + CPG_RPCCKCR, 3, 2, 0,
+ cpg_rpcsrc_div_table,
+ &cpg_lock);
+
+ case CLK_TYPE_GEN4_RPC:
+ return cpg_rpc_clk_register(core->name, base + CPG_RPCCKCR,
+ __clk_get_name(parent), notifiers);
+
+ case CLK_TYPE_GEN4_RPCD2:
+ return cpg_rpcd2_clk_register(core->name, base + CPG_RPCCKCR,
+ __clk_get_name(parent));
+
+ default:
+ return ERR_PTR(-EINVAL);
+ }
+
+ return clk_register_fixed_factor(NULL, core->name,
+ __clk_get_name(parent), 0, mult, div);
+}
+
+int __init rcar_gen4_cpg_init(const struct rcar_gen4_cpg_pll_config *config,
+ unsigned int clk_extalr, u32 mode)
+{
+ cpg_pll_config = config;
+ cpg_clk_extalr = clk_extalr;
+ cpg_mode = mode;
+
+ spin_lock_init(&cpg_lock);
+
+ return 0;
+}
diff --git a/drivers/clk/renesas/rcar-gen4-cpg.h b/drivers/clk/renesas/rcar-gen4-cpg.h
new file mode 100644
index 000000000000..afc8c024d538
--- /dev/null
+++ b/drivers/clk/renesas/rcar-gen4-cpg.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * R-Car Gen4 Clock Pulse Generator
+ *
+ * Copyright (C) 2021 Renesas Electronics Corp.
+ *
+ */
+
+#ifndef __CLK_RENESAS_RCAR_GEN4_CPG_H__
+#define __CLK_RENESAS_RCAR_GEN4_CPG_H__
+
+enum rcar_gen4_clk_types {
+ CLK_TYPE_GEN4_MAIN = CLK_TYPE_CUSTOM,
+ CLK_TYPE_GEN4_PLL1,
+ CLK_TYPE_GEN4_PLL2,
+ CLK_TYPE_GEN4_PLL2X_3X, /* r8a779a0 only */
+ CLK_TYPE_GEN4_PLL3,
+ CLK_TYPE_GEN4_PLL5,
+ CLK_TYPE_GEN4_PLL6,
+ CLK_TYPE_GEN4_SDSRC,
+ CLK_TYPE_GEN4_SDH,
+ CLK_TYPE_GEN4_SD,
+ CLK_TYPE_GEN4_MDSEL, /* Select parent/divider using mode pin */
+ CLK_TYPE_GEN4_Z,
+ CLK_TYPE_GEN4_OSC, /* OSC EXTAL predivider and fixed divider */
+ CLK_TYPE_GEN4_RPCSRC,
+ CLK_TYPE_GEN4_RPC,
+ CLK_TYPE_GEN4_RPCD2,
+
+ /* SoC specific definitions start here */
+ CLK_TYPE_GEN4_SOC_BASE,
+};
+
+#define DEF_GEN4_SDH(_name, _id, _parent, _offset) \
+ DEF_BASE(_name, _id, CLK_TYPE_GEN4_SDH, _parent, .offset = _offset)
+
+#define DEF_GEN4_SD(_name, _id, _parent, _offset) \
+ DEF_BASE(_name, _id, CLK_TYPE_GEN4_SD, _parent, .offset = _offset)
+
+#define DEF_GEN4_MDSEL(_name, _id, _md, _parent0, _div0, _parent1, _div1) \
+ DEF_BASE(_name, _id, CLK_TYPE_GEN4_MDSEL, \
+ (_parent0) << 16 | (_parent1), \
+ .div = (_div0) << 16 | (_div1), .offset = _md)
+
+#define DEF_GEN4_OSC(_name, _id, _parent, _div) \
+ DEF_BASE(_name, _id, CLK_TYPE_GEN4_OSC, _parent, .div = _div)
+
+#define DEF_GEN4_Z(_name, _id, _type, _parent, _div, _offset) \
+ DEF_BASE(_name, _id, _type, _parent, .div = _div, .offset = _offset)
+
+struct rcar_gen4_cpg_pll_config {
+ u8 extal_div;
+ u8 pll1_mult;
+ u8 pll1_div;
+ u8 pll2_mult;
+ u8 pll2_div;
+ u8 pll3_mult;
+ u8 pll3_div;
+ u8 pll5_mult;
+ u8 pll5_div;
+ u8 pll6_mult;
+ u8 pll6_div;
+ u8 osc_prediv;
+};
+
+#define CPG_RPCCKCR 0x874
+#define SD0CKCR1 0x8a4
+
+struct clk *rcar_gen4_cpg_clk_register(struct device *dev,
+ const struct cpg_core_clk *core, const struct cpg_mssr_info *info,
+ struct clk **clks, void __iomem *base,
+ struct raw_notifier_head *notifiers);
+int rcar_gen4_cpg_init(const struct rcar_gen4_cpg_pll_config *config,
+ unsigned int clk_extalr, u32 mode);
+
+#endif
diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c
index 21f762aa2131..5d2c3edbaa14 100644
--- a/drivers/clk/renesas/renesas-cpg-mssr.c
+++ b/drivers/clk/renesas/renesas-cpg-mssr.c
@@ -57,9 +57,11 @@ static const u16 mstpsr[] = {
0x9A0, 0x9A4, 0x9A8, 0x9AC,
};
-static const u16 mstpsr_for_v3u[] = {
+static const u16 mstpsr_for_gen4[] = {
0x2E00, 0x2E04, 0x2E08, 0x2E0C, 0x2E10, 0x2E14, 0x2E18, 0x2E1C,
- 0x2E20, 0x2E24, 0x2E28, 0x2E2C, 0x2E30, 0x2E34, 0x2E38,
+ 0x2E20, 0x2E24, 0x2E28, 0x2E2C, 0x2E30, 0x2E34, 0x2E38, 0x2E3C,
+ 0x2E40, 0x2E44, 0x2E48, 0x2E4C, 0x2E50, 0x2E54, 0x2E58, 0x2E5C,
+ 0x2E60, 0x2E64, 0x2E68, 0x2E6C,
};
/*
@@ -71,9 +73,11 @@ static const u16 smstpcr[] = {
0x990, 0x994, 0x998, 0x99C,
};
-static const u16 mstpcr_for_v3u[] = {
+static const u16 mstpcr_for_gen4[] = {
0x2D00, 0x2D04, 0x2D08, 0x2D0C, 0x2D10, 0x2D14, 0x2D18, 0x2D1C,
- 0x2D20, 0x2D24, 0x2D28, 0x2D2C, 0x2D30, 0x2D34, 0x2D38,
+ 0x2D20, 0x2D24, 0x2D28, 0x2D2C, 0x2D30, 0x2D34, 0x2D38, 0x2D3C,
+ 0x2D40, 0x2D44, 0x2D48, 0x2D4C, 0x2D50, 0x2D54, 0x2D58, 0x2D5C,
+ 0x2D60, 0x2D64, 0x2D68, 0x2D6C,
};
/*
@@ -95,9 +99,11 @@ static const u16 srcr[] = {
0x920, 0x924, 0x928, 0x92C,
};
-static const u16 srcr_for_v3u[] = {
+static const u16 srcr_for_gen4[] = {
0x2C00, 0x2C04, 0x2C08, 0x2C0C, 0x2C10, 0x2C14, 0x2C18, 0x2C1C,
- 0x2C20, 0x2C24, 0x2C28, 0x2C2C, 0x2C30, 0x2C34, 0x2C38,
+ 0x2C20, 0x2C24, 0x2C28, 0x2C2C, 0x2C30, 0x2C34, 0x2C38, 0x2C3C,
+ 0x2C40, 0x2C44, 0x2C48, 0x2C4C, 0x2C50, 0x2C54, 0x2C58, 0x2C5C,
+ 0x2C60, 0x2C64, 0x2C68, 0x2C6C,
};
/*
@@ -109,9 +115,11 @@ static const u16 srstclr[] = {
0x960, 0x964, 0x968, 0x96C,
};
-static const u16 srstclr_for_v3u[] = {
+static const u16 srstclr_for_gen4[] = {
0x2C80, 0x2C84, 0x2C88, 0x2C8C, 0x2C90, 0x2C94, 0x2C98, 0x2C9C,
- 0x2CA0, 0x2CA4, 0x2CA8, 0x2CAC, 0x2CB0, 0x2CB4, 0x2CB8,
+ 0x2CA0, 0x2CA4, 0x2CA8, 0x2CAC, 0x2CB0, 0x2CB4, 0x2CB8, 0x2CBC,
+ 0x2CC0, 0x2CC4, 0x2CC8, 0x2CCC, 0x2CD0, 0x2CD4, 0x2CD8, 0x2CDC,
+ 0x2CE0, 0x2CE4, 0x2CE8, 0x2CEC,
};
/**
@@ -158,7 +166,7 @@ struct cpg_mssr_priv {
struct {
u32 mask;
u32 val;
- } smstpcr_saved[ARRAY_SIZE(mstpsr_for_v3u)];
+ } smstpcr_saved[ARRAY_SIZE(mstpsr_for_gen4)];
struct clk *clks[];
};
@@ -552,6 +560,11 @@ void cpg_mssr_detach_dev(struct generic_pm_domain *unused, struct device *dev)
pm_clk_destroy(dev);
}
+static void cpg_mssr_genpd_remove(void *data)
+{
+ pm_genpd_remove(data);
+}
+
static int __init cpg_mssr_add_clk_domain(struct device *dev,
const unsigned int *core_pm_clks,
unsigned int num_core_pm_clks)
@@ -560,6 +573,7 @@ static int __init cpg_mssr_add_clk_domain(struct device *dev,
struct generic_pm_domain *genpd;
struct cpg_mssr_clk_domain *pd;
size_t pm_size = num_core_pm_clks * sizeof(core_pm_clks[0]);
+ int ret;
pd = devm_kzalloc(dev, sizeof(*pd) + pm_size, GFP_KERNEL);
if (!pd)
@@ -574,11 +588,17 @@ static int __init cpg_mssr_add_clk_domain(struct device *dev,
GENPD_FLAG_ACTIVE_WAKEUP;
genpd->attach_dev = cpg_mssr_attach_dev;
genpd->detach_dev = cpg_mssr_detach_dev;
- pm_genpd_init(genpd, &pm_domain_always_on_gov, false);
+ ret = pm_genpd_init(genpd, &pm_domain_always_on_gov, false);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(dev, cpg_mssr_genpd_remove, genpd);
+ if (ret)
+ return ret;
+
cpg_mssr_clk_domain = pd;
- of_genpd_add_provider_simple(np, genpd);
- return 0;
+ return of_genpd_add_provider_simple(np, genpd);
}
#ifdef CONFIG_RESET_CONTROLLER
@@ -828,6 +848,12 @@ static const struct of_device_id cpg_mssr_match[] = {
.data = &r8a779a0_cpg_mssr_info,
},
#endif
+#ifdef CONFIG_CLK_R8A779F0
+ {
+ .compatible = "renesas,r8a779f0-cpg-mssr",
+ .data = &r8a779f0_cpg_mssr_info,
+ },
+#endif
{ /* sentinel */ }
};
@@ -970,11 +996,11 @@ static int __init cpg_mssr_common_init(struct device *dev,
priv->reset_clear_regs = srstclr;
} else if (priv->reg_layout == CLK_REG_LAYOUT_RZ_A) {
priv->control_regs = stbcr;
- } else if (priv->reg_layout == CLK_REG_LAYOUT_RCAR_V3U) {
- priv->status_regs = mstpsr_for_v3u;
- priv->control_regs = mstpcr_for_v3u;
- priv->reset_regs = srcr_for_v3u;
- priv->reset_clear_regs = srstclr_for_v3u;
+ } else if (priv->reg_layout == CLK_REG_LAYOUT_RCAR_GEN4) {
+ priv->status_regs = mstpsr_for_gen4;
+ priv->control_regs = mstpcr_for_gen4;
+ priv->reset_regs = srcr_for_gen4;
+ priv->reset_clear_regs = srstclr_for_gen4;
} else {
error = -EINVAL;
goto out_err;
diff --git a/drivers/clk/renesas/renesas-cpg-mssr.h b/drivers/clk/renesas/renesas-cpg-mssr.h
index 6b2a0ade482e..16810dd4e6ac 100644
--- a/drivers/clk/renesas/renesas-cpg-mssr.h
+++ b/drivers/clk/renesas/renesas-cpg-mssr.h
@@ -88,7 +88,7 @@ struct device_node;
enum clk_reg_layout {
CLK_REG_LAYOUT_RCAR_GEN2_AND_GEN3 = 0,
CLK_REG_LAYOUT_RZ_A,
- CLK_REG_LAYOUT_RCAR_V3U,
+ CLK_REG_LAYOUT_RCAR_GEN4,
};
/**
@@ -178,6 +178,7 @@ extern const struct cpg_mssr_info r8a77980_cpg_mssr_info;
extern const struct cpg_mssr_info r8a77990_cpg_mssr_info;
extern const struct cpg_mssr_info r8a77995_cpg_mssr_info;
extern const struct cpg_mssr_info r8a779a0_cpg_mssr_info;
+extern const struct cpg_mssr_info r8a779f0_cpg_mssr_info;
void __init cpg_mssr_early_init(struct device_node *np,
const struct cpg_mssr_info *info);
diff --git a/drivers/clk/renesas/rzg2l-cpg.c b/drivers/clk/renesas/rzg2l-cpg.c
index 4021f6cabda4..edd0abe34a37 100644
--- a/drivers/clk/renesas/rzg2l-cpg.c
+++ b/drivers/clk/renesas/rzg2l-cpg.c
@@ -74,6 +74,7 @@ struct sd_hw_data {
* @clks: Array containing all Core and Module Clocks
* @num_core_clks: Number of Core Clocks in clks[]
* @num_mod_clks: Number of Module Clocks in clks[]
+ * @num_resets: Number of Module Resets in info->resets[]
* @last_dt_core_clk: ID of the last Core Clock exported to DT
* @notifiers: Notifier chain to save/restore clock state for system resume
* @info: Pointer to platform data
@@ -850,10 +851,16 @@ static void rzg2l_cpg_detach_dev(struct generic_pm_domain *unused, struct device
pm_clk_destroy(dev);
}
+static void rzg2l_cpg_genpd_remove(void *data)
+{
+ pm_genpd_remove(data);
+}
+
static int __init rzg2l_cpg_add_clk_domain(struct device *dev)
{
struct device_node *np = dev->of_node;
struct generic_pm_domain *genpd;
+ int ret;
genpd = devm_kzalloc(dev, sizeof(*genpd), GFP_KERNEL);
if (!genpd)
@@ -864,10 +871,15 @@ static int __init rzg2l_cpg_add_clk_domain(struct device *dev)
GENPD_FLAG_ACTIVE_WAKEUP;
genpd->attach_dev = rzg2l_cpg_attach_dev;
genpd->detach_dev = rzg2l_cpg_detach_dev;
- pm_genpd_init(genpd, &pm_domain_always_on_gov, false);
+ ret = pm_genpd_init(genpd, &pm_domain_always_on_gov, false);
+ if (ret)
+ return ret;
- of_genpd_add_provider_simple(np, genpd);
- return 0;
+ ret = devm_add_action_or_reset(dev, rzg2l_cpg_genpd_remove, genpd);
+ if (ret)
+ return ret;
+
+ return of_genpd_add_provider_simple(np, genpd);
}
static int __init rzg2l_cpg_probe(struct platform_device *pdev)
diff --git a/drivers/clk/renesas/rzg2l-cpg.h b/drivers/clk/renesas/rzg2l-cpg.h
index 7fb6b4030f72..5729d102034b 100644
--- a/drivers/clk/renesas/rzg2l-cpg.h
+++ b/drivers/clk/renesas/rzg2l-cpg.h
@@ -9,11 +9,14 @@
#ifndef __RENESAS_RZG2L_CPG_H__
#define __RENESAS_RZG2L_CPG_H__
+#define CPG_PL1_DDIV (0x200)
#define CPG_PL2_DDIV (0x204)
#define CPG_PL3A_DDIV (0x208)
+#define CPG_PL6_DDIV (0x210)
#define CPG_PL2SDHI_DSEL (0x218)
#define CPG_CLKSTATUS (0x280)
#define CPG_PL3_SSEL (0x408)
+#define CPG_PL6_SSEL (0x414)
#define CPG_PL6_ETH_SSEL (0x418)
#define CPG_CLKSTATUS_SELSDHI0_STS BIT(28)
@@ -29,16 +32,19 @@
#define DDIV_PACK(offset, bitpos, size) \
(((offset) << 20) | ((bitpos) << 12) | ((size) << 8))
+#define DIVPL1A DDIV_PACK(CPG_PL1_DDIV, 0, 2)
#define DIVPL2A DDIV_PACK(CPG_PL2_DDIV, 0, 3)
#define DIVPL3A DDIV_PACK(CPG_PL3A_DDIV, 0, 3)
#define DIVPL3B DDIV_PACK(CPG_PL3A_DDIV, 4, 3)
#define DIVPL3C DDIV_PACK(CPG_PL3A_DDIV, 8, 3)
+#define DIVGPU DDIV_PACK(CPG_PL6_DDIV, 0, 2)
#define SEL_PLL_PACK(offset, bitpos, size) \
(((offset) << 20) | ((bitpos) << 12) | ((size) << 8))
#define SEL_PLL3_3 SEL_PLL_PACK(CPG_PL3_SSEL, 8, 1)
#define SEL_PLL6_2 SEL_PLL_PACK(CPG_PL6_ETH_SSEL, 0, 1)
+#define SEL_GPU2 SEL_PLL_PACK(CPG_PL6_SSEL, 12, 1)
#define SEL_SDHI0 DDIV_PACK(CPG_PL2SDHI_DSEL, 0, 2)
#define SEL_SDHI1 DDIV_PACK(CPG_PL2SDHI_DSEL, 4, 2)
@@ -168,6 +174,9 @@ struct rzg2l_reset {
* @num_mod_clks: Number of entries in mod_clks[]
* @num_hw_mod_clks: Number of Module Clocks supported by the hardware
*
+ * @resets: Array of Module Reset definitions
+ * @num_resets: Number of entries in resets[]
+ *
* @crit_mod_clks: Array with Module Clock IDs of critical clocks that
* should not be disabled without a knowledgeable driver
* @num_crit_mod_clks: Number of entries in crit_mod_clks[]
diff --git a/drivers/clk/samsung/Makefile b/drivers/clk/samsung/Makefile
index c46cf11e4d0b..0df74916a895 100644
--- a/drivers/clk/samsung/Makefile
+++ b/drivers/clk/samsung/Makefile
@@ -16,7 +16,9 @@ obj-$(CONFIG_EXYNOS_5420_COMMON_CLK) += clk-exynos5-subcmu.o
obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos5433.o
obj-$(CONFIG_EXYNOS_AUDSS_CLK_CON) += clk-exynos-audss.o
obj-$(CONFIG_EXYNOS_CLKOUT) += clk-exynos-clkout.o
+obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos-arm64.o
obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos7.o
+obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos7885.o
obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos850.o
obj-$(CONFIG_S3C2410_COMMON_CLK)+= clk-s3c2410.o
obj-$(CONFIG_S3C2410_COMMON_DCLK)+= clk-s3c2410-dclk.o
diff --git a/drivers/clk/samsung/clk-cpu.c b/drivers/clk/samsung/clk-cpu.c
index 7f20d9aedaa9..3e62ade120c5 100644
--- a/drivers/clk/samsung/clk-cpu.c
+++ b/drivers/clk/samsung/clk-cpu.c
@@ -400,7 +400,7 @@ static int exynos5433_cpuclk_notifier_cb(struct notifier_block *nb,
}
/* helper function to register a CPU clock */
-int __init exynos_register_cpu_clock(struct samsung_clk_provider *ctx,
+static int __init exynos_register_cpu_clock(struct samsung_clk_provider *ctx,
unsigned int lookup_id, const char *name,
const struct clk_hw *parent, const struct clk_hw *alt_parent,
unsigned long offset, const struct exynos_cpuclk_cfg_data *cfg,
diff --git a/drivers/clk/samsung/clk-cpu.h b/drivers/clk/samsung/clk-cpu.h
index af74686db9ef..fc9f67a3b22e 100644
--- a/drivers/clk/samsung/clk-cpu.h
+++ b/drivers/clk/samsung/clk-cpu.h
@@ -62,11 +62,4 @@ struct exynos_cpuclk {
#define CLK_CPU_HAS_E5433_REGS_LAYOUT (1 << 2)
};
-int __init exynos_register_cpu_clock(struct samsung_clk_provider *ctx,
- unsigned int lookup_id, const char *name,
- const struct clk_hw *parent, const struct clk_hw *alt_parent,
- unsigned long offset,
- const struct exynos_cpuclk_cfg_data *cfg,
- unsigned long num_cfgs, unsigned long flags);
-
#endif /* __SAMSUNG_CLK_CPU_H */
diff --git a/drivers/clk/samsung/clk-exynos-arm64.c b/drivers/clk/samsung/clk-exynos-arm64.c
new file mode 100644
index 000000000000..b921b9a1134a
--- /dev/null
+++ b/drivers/clk/samsung/clk-exynos-arm64.c
@@ -0,0 +1,94 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2021 Linaro Ltd.
+ * Copyright (C) 2021 Dávid Virág <virag.david003@gmail.com>
+ * Author: Sam Protsenko <semen.protsenko@linaro.org>
+ * Author: Dávid Virág <virag.david003@gmail.com>
+ *
+ * This file contains shared functions used by some arm64 Exynos SoCs,
+ * such as Exynos7885 or Exynos850 to register and init CMUs.
+ */
+#include <linux/clk.h>
+#include <linux/of_address.h>
+
+#include "clk-exynos-arm64.h"
+
+/* Gate register bits */
+#define GATE_MANUAL BIT(20)
+#define GATE_ENABLE_HWACG BIT(28)
+
+/* Gate register offsets range */
+#define GATE_OFF_START 0x2000
+#define GATE_OFF_END 0x2fff
+
+/**
+ * exynos_arm64_init_clocks - Set clocks initial configuration
+ * @np: CMU device tree node with "reg" property (CMU addr)
+ * @reg_offs: Register offsets array for clocks to init
+ * @reg_offs_len: Number of register offsets in reg_offs array
+ *
+ * Set manual control mode for all gate clocks.
+ */
+static void __init exynos_arm64_init_clocks(struct device_node *np,
+ const unsigned long *reg_offs, size_t reg_offs_len)
+{
+ void __iomem *reg_base;
+ size_t i;
+
+ reg_base = of_iomap(np, 0);
+ if (!reg_base)
+ panic("%s: failed to map registers\n", __func__);
+
+ for (i = 0; i < reg_offs_len; ++i) {
+ void __iomem *reg = reg_base + reg_offs[i];
+ u32 val;
+
+ /* Modify only gate clock registers */
+ if (reg_offs[i] < GATE_OFF_START || reg_offs[i] > GATE_OFF_END)
+ continue;
+
+ val = readl(reg);
+ val |= GATE_MANUAL;
+ val &= ~GATE_ENABLE_HWACG;
+ writel(val, reg);
+ }
+
+ iounmap(reg_base);
+}
+
+/**
+ * exynos_arm64_register_cmu - Register specified Exynos CMU domain
+ * @dev: Device object; may be NULL if this function is not being
+ * called from platform driver probe function
+ * @np: CMU device tree node
+ * @cmu: CMU data
+ *
+ * Register specified CMU domain, which includes next steps:
+ *
+ * 1. Enable parent clock of @cmu CMU
+ * 2. Set initial registers configuration for @cmu CMU clocks
+ * 3. Register @cmu CMU clocks using Samsung clock framework API
+ */
+void __init exynos_arm64_register_cmu(struct device *dev,
+ struct device_node *np, const struct samsung_cmu_info *cmu)
+{
+ /* Keep CMU parent clock running (needed for CMU registers access) */
+ if (cmu->clk_name) {
+ struct clk *parent_clk;
+
+ if (dev)
+ parent_clk = clk_get(dev, cmu->clk_name);
+ else
+ parent_clk = of_clk_get_by_name(np, cmu->clk_name);
+
+ if (IS_ERR(parent_clk)) {
+ pr_err("%s: could not find bus clock %s; err = %ld\n",
+ __func__, cmu->clk_name, PTR_ERR(parent_clk));
+ } else {
+ clk_prepare_enable(parent_clk);
+ }
+ }
+
+ exynos_arm64_init_clocks(np, cmu->clk_regs, cmu->nr_clk_regs);
+ samsung_cmu_register_one(np, cmu);
+}
diff --git a/drivers/clk/samsung/clk-exynos-arm64.h b/drivers/clk/samsung/clk-exynos-arm64.h
new file mode 100644
index 000000000000..0dd174693935
--- /dev/null
+++ b/drivers/clk/samsung/clk-exynos-arm64.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2021 Linaro Ltd.
+ * Copyright (C) 2021 Dávid Virág <virag.david003@gmail.com>
+ * Author: Sam Protsenko <semen.protsenko@linaro.org>
+ * Author: Dávid Virág <virag.david003@gmail.com>
+ *
+ * This file contains shared functions used by some arm64 Exynos SoCs,
+ * such as Exynos7885 or Exynos850 to register and init CMUs.
+ */
+
+#ifndef __CLK_EXYNOS_ARM64_H
+#define __CLK_EXYNOS_ARM64_H
+
+#include "clk.h"
+
+void exynos_arm64_register_cmu(struct device *dev,
+ struct device_node *np, const struct samsung_cmu_info *cmu);
+
+#endif /* __CLK_EXYNOS_ARM64_H */
diff --git a/drivers/clk/samsung/clk-exynos3250.c b/drivers/clk/samsung/clk-exynos3250.c
index 17df7f9755aa..6cc65ccf867c 100644
--- a/drivers/clk/samsung/clk-exynos3250.c
+++ b/drivers/clk/samsung/clk-exynos3250.c
@@ -748,6 +748,31 @@ static const struct samsung_pll_clock exynos3250_plls[] __initconst = {
UPLL_LOCK, UPLL_CON0, exynos3250_pll_rates),
};
+#define E3250_CPU_DIV0(apll, pclk_dbg, atb, corem) \
+ (((apll) << 24) | ((pclk_dbg) << 20) | ((atb) << 16) | \
+ ((corem) << 4))
+#define E3250_CPU_DIV1(hpm, copy) \
+ (((hpm) << 4) | ((copy) << 0))
+
+static const struct exynos_cpuclk_cfg_data e3250_armclk_d[] __initconst = {
+ { 1000000, E3250_CPU_DIV0(1, 7, 4, 1), E3250_CPU_DIV1(7, 7), },
+ { 900000, E3250_CPU_DIV0(1, 7, 3, 1), E3250_CPU_DIV1(7, 7), },
+ { 800000, E3250_CPU_DIV0(1, 7, 3, 1), E3250_CPU_DIV1(7, 7), },
+ { 700000, E3250_CPU_DIV0(1, 7, 3, 1), E3250_CPU_DIV1(7, 7), },
+ { 600000, E3250_CPU_DIV0(1, 7, 3, 1), E3250_CPU_DIV1(7, 7), },
+ { 500000, E3250_CPU_DIV0(1, 7, 3, 1), E3250_CPU_DIV1(7, 7), },
+ { 400000, E3250_CPU_DIV0(1, 7, 3, 1), E3250_CPU_DIV1(7, 7), },
+ { 300000, E3250_CPU_DIV0(1, 5, 3, 1), E3250_CPU_DIV1(7, 7), },
+ { 200000, E3250_CPU_DIV0(1, 3, 3, 1), E3250_CPU_DIV1(7, 7), },
+ { 100000, E3250_CPU_DIV0(1, 1, 1, 1), E3250_CPU_DIV1(7, 7), },
+ { 0 },
+};
+
+static const struct samsung_cpu_clock exynos3250_cpu_clks[] __initconst = {
+ CPU_CLK(CLK_ARM_CLK, "armclk", CLK_MOUT_APLL, CLK_MOUT_MPLL_USER_C,
+ CLK_CPU_HAS_DIV1, 0x14200, e3250_armclk_d),
+};
+
static void __init exynos3_core_down_clock(void __iomem *reg_base)
{
unsigned int tmp;
@@ -780,46 +805,21 @@ static const struct samsung_cmu_info cmu_info __initconst = {
.nr_gate_clks = ARRAY_SIZE(gate_clks),
.fixed_factor_clks = fixed_factor_clks,
.nr_fixed_factor_clks = ARRAY_SIZE(fixed_factor_clks),
+ .cpu_clks = exynos3250_cpu_clks,
+ .nr_cpu_clks = ARRAY_SIZE(exynos3250_cpu_clks),
.nr_clk_ids = CLK_NR_CLKS,
.clk_regs = exynos3250_cmu_clk_regs,
.nr_clk_regs = ARRAY_SIZE(exynos3250_cmu_clk_regs),
};
-#define E3250_CPU_DIV0(apll, pclk_dbg, atb, corem) \
- (((apll) << 24) | ((pclk_dbg) << 20) | ((atb) << 16) | \
- ((corem) << 4))
-#define E3250_CPU_DIV1(hpm, copy) \
- (((hpm) << 4) | ((copy) << 0))
-
-static const struct exynos_cpuclk_cfg_data e3250_armclk_d[] __initconst = {
- { 1000000, E3250_CPU_DIV0(1, 7, 4, 1), E3250_CPU_DIV1(7, 7), },
- { 900000, E3250_CPU_DIV0(1, 7, 3, 1), E3250_CPU_DIV1(7, 7), },
- { 800000, E3250_CPU_DIV0(1, 7, 3, 1), E3250_CPU_DIV1(7, 7), },
- { 700000, E3250_CPU_DIV0(1, 7, 3, 1), E3250_CPU_DIV1(7, 7), },
- { 600000, E3250_CPU_DIV0(1, 7, 3, 1), E3250_CPU_DIV1(7, 7), },
- { 500000, E3250_CPU_DIV0(1, 7, 3, 1), E3250_CPU_DIV1(7, 7), },
- { 400000, E3250_CPU_DIV0(1, 7, 3, 1), E3250_CPU_DIV1(7, 7), },
- { 300000, E3250_CPU_DIV0(1, 5, 3, 1), E3250_CPU_DIV1(7, 7), },
- { 200000, E3250_CPU_DIV0(1, 3, 3, 1), E3250_CPU_DIV1(7, 7), },
- { 100000, E3250_CPU_DIV0(1, 1, 1, 1), E3250_CPU_DIV1(7, 7), },
- { 0 },
-};
-
static void __init exynos3250_cmu_init(struct device_node *np)
{
struct samsung_clk_provider *ctx;
- struct clk_hw **hws;
ctx = samsung_cmu_register_one(np, &cmu_info);
if (!ctx)
return;
- hws = ctx->clk_data.hws;
- exynos_register_cpu_clock(ctx, CLK_ARM_CLK, "armclk",
- hws[CLK_MOUT_APLL], hws[CLK_MOUT_MPLL_USER_C],
- 0x14200, e3250_armclk_d, ARRAY_SIZE(e3250_armclk_d),
- CLK_CPU_HAS_DIV1);
-
exynos3_core_down_clock(ctx->reg_base);
}
CLK_OF_DECLARE(exynos3250_cmu, "samsung,exynos3250-cmu", exynos3250_cmu_init);
diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c
index bf13e29a655c..22009cb53428 100644
--- a/drivers/clk/samsung/clk-exynos4.c
+++ b/drivers/clk/samsung/clk-exynos4.c
@@ -437,7 +437,7 @@ static const struct samsung_mux_clock exynos4_mux_clks[] __initconst = {
/* list of mux clocks supported in exynos4210 soc */
static const struct samsung_mux_clock exynos4210_mux_early[] __initconst = {
- MUX(0, "mout_vpllsrc", mout_vpllsrc_p, SRC_TOP1, 0, 1),
+ MUX(CLK_MOUT_VPLLSRC, "mout_vpllsrc", mout_vpllsrc_p, SRC_TOP1, 0, 1),
};
static const struct samsung_mux_clock exynos4210_mux_clks[] __initconst = {
@@ -603,7 +603,7 @@ static const struct samsung_div_clock exynos4_div_clks[] __initconst = {
DIV(0, "div_periph", "div_core2", DIV_CPU0, 12, 3),
DIV(0, "div_atb", "mout_core", DIV_CPU0, 16, 3),
DIV(0, "div_pclk_dbg", "div_atb", DIV_CPU0, 20, 3),
- DIV(0, "div_core2", "div_core", DIV_CPU0, 28, 3),
+ DIV(CLK_DIV_CORE2, "div_core2", "div_core", DIV_CPU0, 28, 3),
DIV(0, "div_copy", "mout_hpm", DIV_CPU1, 0, 3),
DIV(0, "div_hpm", "div_copy", DIV_CPU1, 4, 3),
DIV(0, "div_clkout_cpu", "mout_clkout_cpu", CLKOUT_CMU_CPU, 8, 6),
@@ -1228,6 +1228,16 @@ static const struct exynos_cpuclk_cfg_data e4412_armclk_d[] __initconst = {
{ 0 },
};
+static const struct samsung_cpu_clock exynos4210_cpu_clks[] __initconst = {
+ CPU_CLK(CLK_ARM_CLK, "armclk", CLK_MOUT_APLL, CLK_SCLK_MPLL,
+ CLK_CPU_NEEDS_DEBUG_ALT_DIV | CLK_CPU_HAS_DIV1, 0x14200, e4210_armclk_d),
+};
+
+static const struct samsung_cpu_clock exynos4412_cpu_clks[] __initconst = {
+ CPU_CLK(CLK_ARM_CLK, "armclk", CLK_MOUT_APLL, CLK_MOUT_MPLL_USER_C,
+ CLK_CPU_NEEDS_DEBUG_ALT_DIV | CLK_CPU_HAS_DIV1, 0x14200, e4412_armclk_d),
+};
+
/* register exynos4 clocks */
static void __init exynos4_clk_init(struct device_node *np,
enum exynos4_soc soc)
@@ -1254,21 +1264,21 @@ static void __init exynos4_clk_init(struct device_node *np,
samsung_clk_register_mux(ctx, exynos4210_mux_early,
ARRAY_SIZE(exynos4210_mux_early));
- if (_get_rate("fin_pll") == 24000000) {
+ if (clk_hw_get_rate(hws[CLK_FIN_PLL]) == 24000000) {
exynos4210_plls[apll].rate_table =
exynos4210_apll_rates;
exynos4210_plls[epll].rate_table =
exynos4210_epll_rates;
}
- if (_get_rate("mout_vpllsrc") == 24000000)
+ if (clk_hw_get_rate(hws[CLK_MOUT_VPLLSRC]) == 24000000)
exynos4210_plls[vpll].rate_table =
exynos4210_vpll_rates;
samsung_clk_register_pll(ctx, exynos4210_plls,
ARRAY_SIZE(exynos4210_plls), reg_base);
} else {
- if (_get_rate("fin_pll") == 24000000) {
+ if (clk_hw_get_rate(hws[CLK_FIN_PLL]) == 24000000) {
exynos4x12_plls[apll].rate_table =
exynos4x12_apll_rates;
exynos4x12_plls[epll].rate_table =
@@ -1304,10 +1314,8 @@ static void __init exynos4_clk_init(struct device_node *np,
samsung_clk_register_fixed_factor(ctx,
exynos4210_fixed_factor_clks,
ARRAY_SIZE(exynos4210_fixed_factor_clks));
- exynos_register_cpu_clock(ctx, CLK_ARM_CLK, "armclk",
- hws[CLK_MOUT_APLL], hws[CLK_SCLK_MPLL], 0x14200,
- e4210_armclk_d, ARRAY_SIZE(e4210_armclk_d),
- CLK_CPU_NEEDS_DEBUG_ALT_DIV | CLK_CPU_HAS_DIV1);
+ samsung_clk_register_cpu(ctx, exynos4210_cpu_clks,
+ ARRAY_SIZE(exynos4210_cpu_clks));
} else {
samsung_clk_register_mux(ctx, exynos4x12_mux_clks,
ARRAY_SIZE(exynos4x12_mux_clks));
@@ -1318,11 +1326,8 @@ static void __init exynos4_clk_init(struct device_node *np,
samsung_clk_register_fixed_factor(ctx,
exynos4x12_fixed_factor_clks,
ARRAY_SIZE(exynos4x12_fixed_factor_clks));
-
- exynos_register_cpu_clock(ctx, CLK_ARM_CLK, "armclk",
- hws[CLK_MOUT_APLL], hws[CLK_MOUT_MPLL_USER_C], 0x14200,
- e4412_armclk_d, ARRAY_SIZE(e4412_armclk_d),
- CLK_CPU_NEEDS_DEBUG_ALT_DIV | CLK_CPU_HAS_DIV1);
+ samsung_clk_register_cpu(ctx, exynos4412_cpu_clks,
+ ARRAY_SIZE(exynos4412_cpu_clks));
}
if (soc == EXYNOS4X12)
@@ -1344,9 +1349,11 @@ static void __init exynos4_clk_init(struct device_node *np,
pr_info("%s clocks: sclk_apll = %ld, sclk_mpll = %ld\n"
"\tsclk_epll = %ld, sclk_vpll = %ld, arm_clk = %ld\n",
exynos4_soc == EXYNOS4210 ? "Exynos4210" : "Exynos4x12",
- _get_rate("sclk_apll"), _get_rate("sclk_mpll"),
- _get_rate("sclk_epll"), _get_rate("sclk_vpll"),
- _get_rate("div_core2"));
+ clk_hw_get_rate(hws[CLK_SCLK_APLL]),
+ clk_hw_get_rate(hws[CLK_SCLK_MPLL]),
+ clk_hw_get_rate(hws[CLK_SCLK_EPLL]),
+ clk_hw_get_rate(hws[CLK_SCLK_VPLL]),
+ clk_hw_get_rate(hws[CLK_DIV_CORE2]));
}
diff --git a/drivers/clk/samsung/clk-exynos5250.c b/drivers/clk/samsung/clk-exynos5250.c
index 06588fab408a..113df773ee44 100644
--- a/drivers/clk/samsung/clk-exynos5250.c
+++ b/drivers/clk/samsung/clk-exynos5250.c
@@ -239,7 +239,7 @@ static const struct samsung_fixed_factor_clock exynos5250_fixed_factor_clks[] __
};
static const struct samsung_mux_clock exynos5250_pll_pmux_clks[] __initconst = {
- MUX(0, "mout_vpllsrc", mout_vpllsrc_p, SRC_TOP2, 0, 1),
+ MUX(CLK_MOUT_VPLLSRC, "mout_vpllsrc", mout_vpllsrc_p, SRC_TOP2, 0, 1),
};
static const struct samsung_mux_clock exynos5250_mux_clks[] __initconst = {
@@ -351,7 +351,7 @@ static const struct samsung_div_clock exynos5250_div_clks[] __initconst = {
*/
DIV(0, "div_arm", "mout_cpu", DIV_CPU0, 0, 3),
DIV(0, "div_apll", "mout_apll", DIV_CPU0, 24, 3),
- DIV(0, "div_arm2", "div_arm", DIV_CPU0, 28, 3),
+ DIV(CLK_DIV_ARM2, "div_arm2", "div_arm", DIV_CPU0, 28, 3),
/*
* CMU_TOP
@@ -772,6 +772,11 @@ static const struct exynos_cpuclk_cfg_data exynos5250_armclk_d[] __initconst = {
{ 0 },
};
+static const struct samsung_cpu_clock exynos5250_cpu_clks[] __initconst = {
+ CPU_CLK(CLK_ARM_CLK, "armclk", CLK_MOUT_APLL, CLK_MOUT_MPLL, CLK_CPU_HAS_DIV1, 0x200,
+ exynos5250_armclk_d),
+};
+
static const struct of_device_id ext_clk_match[] __initconst = {
{ .compatible = "samsung,clock-xxti", .data = (void *)0, },
{ },
@@ -801,12 +806,12 @@ static void __init exynos5250_clk_init(struct device_node *np)
samsung_clk_register_mux(ctx, exynos5250_pll_pmux_clks,
ARRAY_SIZE(exynos5250_pll_pmux_clks));
- if (_get_rate("fin_pll") == 24 * MHZ) {
+ if (clk_hw_get_rate(hws[CLK_FIN_PLL]) == 24 * MHZ) {
exynos5250_plls[epll].rate_table = epll_24mhz_tbl;
exynos5250_plls[apll].rate_table = apll_24mhz_tbl;
}
- if (_get_rate("mout_vpllsrc") == 24 * MHZ)
+ if (clk_hw_get_rate(hws[CLK_MOUT_VPLLSRC]) == 24 * MHZ)
exynos5250_plls[vpll].rate_table = vpll_24mhz_tbl;
samsung_clk_register_pll(ctx, exynos5250_plls,
@@ -822,10 +827,8 @@ static void __init exynos5250_clk_init(struct device_node *np)
ARRAY_SIZE(exynos5250_div_clks));
samsung_clk_register_gate(ctx, exynos5250_gate_clks,
ARRAY_SIZE(exynos5250_gate_clks));
- exynos_register_cpu_clock(ctx, CLK_ARM_CLK, "armclk",
- hws[CLK_MOUT_APLL], hws[CLK_MOUT_MPLL], 0x200,
- exynos5250_armclk_d, ARRAY_SIZE(exynos5250_armclk_d),
- CLK_CPU_HAS_DIV1);
+ samsung_clk_register_cpu(ctx, exynos5250_cpu_clks,
+ ARRAY_SIZE(exynos5250_cpu_clks));
/*
* Enable arm clock down (in idle) and set arm divider
@@ -855,6 +858,6 @@ static void __init exynos5250_clk_init(struct device_node *np)
samsung_clk_of_add_provider(np, ctx);
pr_info("Exynos5250: clock setup completed, armclk=%ld\n",
- _get_rate("div_arm2"));
+ clk_hw_get_rate(hws[CLK_DIV_ARM2]));
}
CLK_OF_DECLARE_DRIVER(exynos5250_clk, "samsung,exynos5250-clock", exynos5250_clk_init);
diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c
index 3ccd4eabd2a6..caad74dee297 100644
--- a/drivers/clk/samsung/clk-exynos5420.c
+++ b/drivers/clk/samsung/clk-exynos5420.c
@@ -1551,6 +1551,20 @@ static const struct exynos_cpuclk_cfg_data exynos5420_kfcclk_d[] __initconst = {
{ 0 },
};
+static const struct samsung_cpu_clock exynos5420_cpu_clks[] __initconst = {
+ CPU_CLK(CLK_ARM_CLK, "armclk", CLK_MOUT_APLL, CLK_MOUT_MSPLL_CPU, 0, 0x200,
+ exynos5420_eglclk_d),
+ CPU_CLK(CLK_KFC_CLK, "kfcclk", CLK_MOUT_KPLL, CLK_MOUT_MSPLL_KFC, 0, 0x28200,
+ exynos5420_kfcclk_d),
+};
+
+static const struct samsung_cpu_clock exynos5800_cpu_clks[] __initconst = {
+ CPU_CLK(CLK_ARM_CLK, "armclk", CLK_MOUT_APLL, CLK_MOUT_MSPLL_CPU, 0, 0x200,
+ exynos5800_eglclk_d),
+ CPU_CLK(CLK_KFC_CLK, "kfcclk", CLK_MOUT_KPLL, CLK_MOUT_MSPLL_KFC, 0, 0x28200,
+ exynos5420_kfcclk_d),
+};
+
static const struct of_device_id ext_clk_match[] __initconst = {
{ .compatible = "samsung,exynos5420-oscclk", .data = (void *)0, },
{ },
@@ -1580,7 +1594,7 @@ static void __init exynos5x_clk_init(struct device_node *np,
ARRAY_SIZE(exynos5x_fixed_rate_ext_clks),
ext_clk_match);
- if (_get_rate("fin_pll") == 24 * MHZ) {
+ if (clk_hw_get_rate(hws[CLK_FIN_PLL]) == 24 * MHZ) {
exynos5x_plls[apll].rate_table = exynos5420_pll2550x_24mhz_tbl;
exynos5x_plls[epll].rate_table = exynos5420_epll_24mhz_tbl;
exynos5x_plls[kpll].rate_table = exynos5420_pll2550x_24mhz_tbl;
@@ -1625,17 +1639,12 @@ static void __init exynos5x_clk_init(struct device_node *np,
}
if (soc == EXYNOS5420) {
- exynos_register_cpu_clock(ctx, CLK_ARM_CLK, "armclk",
- hws[CLK_MOUT_APLL], hws[CLK_MOUT_MSPLL_CPU], 0x200,
- exynos5420_eglclk_d, ARRAY_SIZE(exynos5420_eglclk_d), 0);
+ samsung_clk_register_cpu(ctx, exynos5420_cpu_clks,
+ ARRAY_SIZE(exynos5420_cpu_clks));
} else {
- exynos_register_cpu_clock(ctx, CLK_ARM_CLK, "armclk",
- hws[CLK_MOUT_APLL], hws[CLK_MOUT_MSPLL_CPU], 0x200,
- exynos5800_eglclk_d, ARRAY_SIZE(exynos5800_eglclk_d), 0);
+ samsung_clk_register_cpu(ctx, exynos5800_cpu_clks,
+ ARRAY_SIZE(exynos5800_cpu_clks));
}
- exynos_register_cpu_clock(ctx, CLK_KFC_CLK, "kfcclk",
- hws[CLK_MOUT_KPLL], hws[CLK_MOUT_MSPLL_KFC], 0x28200,
- exynos5420_kfcclk_d, ARRAY_SIZE(exynos5420_kfcclk_d), 0);
samsung_clk_extended_sleep_init(reg_base,
exynos5x_clk_regs, ARRAY_SIZE(exynos5x_clk_regs),
diff --git a/drivers/clk/samsung/clk-exynos7885.c b/drivers/clk/samsung/clk-exynos7885.c
new file mode 100644
index 000000000000..a7b106302706
--- /dev/null
+++ b/drivers/clk/samsung/clk-exynos7885.c
@@ -0,0 +1,597 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2021 Dávid Virág <virag.david003@gmail.com>
+ * Author: Dávid Virág <virag.david003@gmail.com>
+ *
+ * Common Clock Framework support for Exynos7885 SoC.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include <dt-bindings/clock/exynos7885.h>
+
+#include "clk.h"
+#include "clk-exynos-arm64.h"
+
+/* ---- CMU_TOP ------------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_TOP (0x12060000) */
+#define PLL_LOCKTIME_PLL_SHARED0 0x0000
+#define PLL_LOCKTIME_PLL_SHARED1 0x0004
+#define PLL_CON0_PLL_SHARED0 0x0100
+#define PLL_CON0_PLL_SHARED1 0x0120
+#define CLK_CON_MUX_MUX_CLKCMU_CORE_BUS 0x1014
+#define CLK_CON_MUX_MUX_CLKCMU_CORE_CCI 0x1018
+#define CLK_CON_MUX_MUX_CLKCMU_CORE_G3D 0x101c
+#define CLK_CON_MUX_MUX_CLKCMU_PERI_BUS 0x1058
+#define CLK_CON_MUX_MUX_CLKCMU_PERI_SPI0 0x105c
+#define CLK_CON_MUX_MUX_CLKCMU_PERI_SPI1 0x1060
+#define CLK_CON_MUX_MUX_CLKCMU_PERI_UART0 0x1064
+#define CLK_CON_MUX_MUX_CLKCMU_PERI_UART1 0x1068
+#define CLK_CON_MUX_MUX_CLKCMU_PERI_UART2 0x106c
+#define CLK_CON_MUX_MUX_CLKCMU_PERI_USI0 0x1070
+#define CLK_CON_MUX_MUX_CLKCMU_PERI_USI1 0x1074
+#define CLK_CON_MUX_MUX_CLKCMU_PERI_USI2 0x1078
+#define CLK_CON_DIV_CLKCMU_CORE_BUS 0x181c
+#define CLK_CON_DIV_CLKCMU_CORE_CCI 0x1820
+#define CLK_CON_DIV_CLKCMU_CORE_G3D 0x1824
+#define CLK_CON_DIV_CLKCMU_PERI_BUS 0x1874
+#define CLK_CON_DIV_CLKCMU_PERI_SPI0 0x1878
+#define CLK_CON_DIV_CLKCMU_PERI_SPI1 0x187c
+#define CLK_CON_DIV_CLKCMU_PERI_UART0 0x1880
+#define CLK_CON_DIV_CLKCMU_PERI_UART1 0x1884
+#define CLK_CON_DIV_CLKCMU_PERI_UART2 0x1888
+#define CLK_CON_DIV_CLKCMU_PERI_USI0 0x188c
+#define CLK_CON_DIV_CLKCMU_PERI_USI1 0x1890
+#define CLK_CON_DIV_CLKCMU_PERI_USI2 0x1894
+#define CLK_CON_DIV_PLL_SHARED0_DIV2 0x189c
+#define CLK_CON_DIV_PLL_SHARED0_DIV3 0x18a0
+#define CLK_CON_DIV_PLL_SHARED0_DIV4 0x18a4
+#define CLK_CON_DIV_PLL_SHARED0_DIV5 0x18a8
+#define CLK_CON_DIV_PLL_SHARED1_DIV2 0x18ac
+#define CLK_CON_DIV_PLL_SHARED1_DIV3 0x18b0
+#define CLK_CON_DIV_PLL_SHARED1_DIV4 0x18b4
+#define CLK_CON_GAT_GATE_CLKCMUC_PERI_UART1 0x2004
+#define CLK_CON_GAT_GATE_CLKCMU_CORE_BUS 0x201c
+#define CLK_CON_GAT_GATE_CLKCMU_CORE_CCI 0x2020
+#define CLK_CON_GAT_GATE_CLKCMU_CORE_G3D 0x2024
+#define CLK_CON_GAT_GATE_CLKCMU_PERI_BUS 0x207c
+#define CLK_CON_GAT_GATE_CLKCMU_PERI_SPI0 0x2080
+#define CLK_CON_GAT_GATE_CLKCMU_PERI_SPI1 0x2084
+#define CLK_CON_GAT_GATE_CLKCMU_PERI_UART0 0x2088
+#define CLK_CON_GAT_GATE_CLKCMU_PERI_UART2 0x208c
+#define CLK_CON_GAT_GATE_CLKCMU_PERI_USI0 0x2090
+#define CLK_CON_GAT_GATE_CLKCMU_PERI_USI1 0x2094
+#define CLK_CON_GAT_GATE_CLKCMU_PERI_USI2 0x2098
+
+static const unsigned long top_clk_regs[] __initconst = {
+ PLL_LOCKTIME_PLL_SHARED0,
+ PLL_LOCKTIME_PLL_SHARED1,
+ PLL_CON0_PLL_SHARED0,
+ PLL_CON0_PLL_SHARED1,
+ CLK_CON_MUX_MUX_CLKCMU_CORE_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_CORE_CCI,
+ CLK_CON_MUX_MUX_CLKCMU_CORE_G3D,
+ CLK_CON_MUX_MUX_CLKCMU_PERI_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_PERI_SPI0,
+ CLK_CON_MUX_MUX_CLKCMU_PERI_SPI1,
+ CLK_CON_MUX_MUX_CLKCMU_PERI_UART0,
+ CLK_CON_MUX_MUX_CLKCMU_PERI_UART1,
+ CLK_CON_MUX_MUX_CLKCMU_PERI_UART2,
+ CLK_CON_MUX_MUX_CLKCMU_PERI_USI0,
+ CLK_CON_MUX_MUX_CLKCMU_PERI_USI1,
+ CLK_CON_MUX_MUX_CLKCMU_PERI_USI2,
+ CLK_CON_DIV_CLKCMU_CORE_BUS,
+ CLK_CON_DIV_CLKCMU_CORE_CCI,
+ CLK_CON_DIV_CLKCMU_CORE_G3D,
+ CLK_CON_DIV_CLKCMU_PERI_BUS,
+ CLK_CON_DIV_CLKCMU_PERI_SPI0,
+ CLK_CON_DIV_CLKCMU_PERI_SPI1,
+ CLK_CON_DIV_CLKCMU_PERI_UART0,
+ CLK_CON_DIV_CLKCMU_PERI_UART1,
+ CLK_CON_DIV_CLKCMU_PERI_UART2,
+ CLK_CON_DIV_CLKCMU_PERI_USI0,
+ CLK_CON_DIV_CLKCMU_PERI_USI1,
+ CLK_CON_DIV_CLKCMU_PERI_USI2,
+ CLK_CON_DIV_PLL_SHARED0_DIV2,
+ CLK_CON_DIV_PLL_SHARED0_DIV3,
+ CLK_CON_DIV_PLL_SHARED0_DIV4,
+ CLK_CON_DIV_PLL_SHARED0_DIV5,
+ CLK_CON_DIV_PLL_SHARED1_DIV2,
+ CLK_CON_DIV_PLL_SHARED1_DIV3,
+ CLK_CON_DIV_PLL_SHARED1_DIV4,
+ CLK_CON_GAT_GATE_CLKCMUC_PERI_UART1,
+ CLK_CON_GAT_GATE_CLKCMU_CORE_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_CORE_CCI,
+ CLK_CON_GAT_GATE_CLKCMU_CORE_G3D,
+ CLK_CON_GAT_GATE_CLKCMU_PERI_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_PERI_SPI0,
+ CLK_CON_GAT_GATE_CLKCMU_PERI_SPI1,
+ CLK_CON_GAT_GATE_CLKCMU_PERI_UART0,
+ CLK_CON_GAT_GATE_CLKCMU_PERI_UART2,
+ CLK_CON_GAT_GATE_CLKCMU_PERI_USI0,
+ CLK_CON_GAT_GATE_CLKCMU_PERI_USI1,
+ CLK_CON_GAT_GATE_CLKCMU_PERI_USI2,
+};
+
+static const struct samsung_pll_clock top_pll_clks[] __initconst = {
+ PLL(pll_1417x, CLK_FOUT_SHARED0_PLL, "fout_shared0_pll", "oscclk",
+ PLL_LOCKTIME_PLL_SHARED0, PLL_CON0_PLL_SHARED0,
+ NULL),
+ PLL(pll_1417x, CLK_FOUT_SHARED1_PLL, "fout_shared1_pll", "oscclk",
+ PLL_LOCKTIME_PLL_SHARED1, PLL_CON0_PLL_SHARED1,
+ NULL),
+};
+
+/* List of parent clocks for Muxes in CMU_TOP: for CMU_CORE */
+PNAME(mout_core_bus_p) = { "dout_shared0_div2", "dout_shared1_div2",
+ "dout_shared0_div3", "dout_shared0_div3" };
+PNAME(mout_core_cci_p) = { "dout_shared0_div2", "dout_shared1_div2",
+ "dout_shared0_div3", "dout_shared0_div3" };
+PNAME(mout_core_g3d_p) = { "dout_shared0_div2", "dout_shared1_div2",
+ "dout_shared0_div3", "dout_shared0_div3" };
+
+/* List of parent clocks for Muxes in CMU_TOP: for CMU_PERI */
+PNAME(mout_peri_bus_p) = { "dout_shared0_div4", "dout_shared1_div4" };
+PNAME(mout_peri_spi0_p) = { "oscclk", "dout_shared0_div4" };
+PNAME(mout_peri_spi1_p) = { "oscclk", "dout_shared0_div4" };
+PNAME(mout_peri_uart0_p) = { "oscclk", "dout_shared0_div4" };
+PNAME(mout_peri_uart1_p) = { "oscclk", "dout_shared0_div4" };
+PNAME(mout_peri_uart2_p) = { "oscclk", "dout_shared0_div4" };
+PNAME(mout_peri_usi0_p) = { "oscclk", "dout_shared0_div4" };
+PNAME(mout_peri_usi1_p) = { "oscclk", "dout_shared0_div4" };
+PNAME(mout_peri_usi2_p) = { "oscclk", "dout_shared0_div4" };
+
+static const struct samsung_mux_clock top_mux_clks[] __initconst = {
+ /* CORE */
+ MUX(CLK_MOUT_CORE_BUS, "mout_core_bus", mout_core_bus_p,
+ CLK_CON_MUX_MUX_CLKCMU_CORE_BUS, 0, 2),
+ MUX(CLK_MOUT_CORE_CCI, "mout_core_cci", mout_core_cci_p,
+ CLK_CON_MUX_MUX_CLKCMU_CORE_CCI, 0, 2),
+ MUX(CLK_MOUT_CORE_G3D, "mout_core_g3d", mout_core_g3d_p,
+ CLK_CON_MUX_MUX_CLKCMU_CORE_G3D, 0, 2),
+
+ /* PERI */
+ MUX(CLK_MOUT_PERI_BUS, "mout_peri_bus", mout_peri_bus_p,
+ CLK_CON_MUX_MUX_CLKCMU_PERI_BUS, 0, 1),
+ MUX(CLK_MOUT_PERI_SPI0, "mout_peri_spi0", mout_peri_spi0_p,
+ CLK_CON_MUX_MUX_CLKCMU_PERI_SPI0, 0, 1),
+ MUX(CLK_MOUT_PERI_SPI1, "mout_peri_spi1", mout_peri_spi1_p,
+ CLK_CON_MUX_MUX_CLKCMU_PERI_SPI1, 0, 1),
+ MUX(CLK_MOUT_PERI_UART0, "mout_peri_uart0", mout_peri_uart0_p,
+ CLK_CON_MUX_MUX_CLKCMU_PERI_UART0, 0, 1),
+ MUX(CLK_MOUT_PERI_UART1, "mout_peri_uart1", mout_peri_uart1_p,
+ CLK_CON_MUX_MUX_CLKCMU_PERI_UART1, 0, 1),
+ MUX(CLK_MOUT_PERI_UART2, "mout_peri_uart2", mout_peri_uart2_p,
+ CLK_CON_MUX_MUX_CLKCMU_PERI_UART2, 0, 1),
+ MUX(CLK_MOUT_PERI_USI0, "mout_peri_usi0", mout_peri_usi0_p,
+ CLK_CON_MUX_MUX_CLKCMU_PERI_USI0, 0, 1),
+ MUX(CLK_MOUT_PERI_USI1, "mout_peri_usi1", mout_peri_usi1_p,
+ CLK_CON_MUX_MUX_CLKCMU_PERI_USI1, 0, 1),
+ MUX(CLK_MOUT_PERI_USI2, "mout_peri_usi2", mout_peri_usi2_p,
+ CLK_CON_MUX_MUX_CLKCMU_PERI_USI2, 0, 1),
+};
+
+static const struct samsung_div_clock top_div_clks[] __initconst = {
+ /* TOP */
+ DIV(CLK_DOUT_SHARED0_DIV2, "dout_shared0_div2", "fout_shared0_pll",
+ CLK_CON_DIV_PLL_SHARED0_DIV2, 0, 1),
+ DIV(CLK_DOUT_SHARED0_DIV3, "dout_shared0_div3", "fout_shared0_pll",
+ CLK_CON_DIV_PLL_SHARED0_DIV3, 0, 2),
+ DIV(CLK_DOUT_SHARED0_DIV4, "dout_shared0_div4", "fout_shared0_pll",
+ CLK_CON_DIV_PLL_SHARED0_DIV4, 0, 1),
+ DIV(CLK_DOUT_SHARED0_DIV5, "dout_shared0_div5", "fout_shared0_pll",
+ CLK_CON_DIV_PLL_SHARED0_DIV5, 0, 3),
+ DIV(CLK_DOUT_SHARED1_DIV2, "dout_shared1_div2", "fout_shared1_pll",
+ CLK_CON_DIV_PLL_SHARED1_DIV2, 0, 1),
+ DIV(CLK_DOUT_SHARED1_DIV3, "dout_shared1_div3", "fout_shared1_pll",
+ CLK_CON_DIV_PLL_SHARED1_DIV3, 0, 2),
+ DIV(CLK_DOUT_SHARED1_DIV4, "dout_shared1_div4", "fout_shared1_pll",
+ CLK_CON_DIV_PLL_SHARED1_DIV4, 0, 1),
+
+ /* CORE */
+ DIV(CLK_DOUT_CORE_BUS, "dout_core_bus", "gout_core_bus",
+ CLK_CON_DIV_CLKCMU_CORE_BUS, 0, 3),
+ DIV(CLK_DOUT_CORE_CCI, "dout_core_cci", "gout_core_cci",
+ CLK_CON_DIV_CLKCMU_CORE_CCI, 0, 3),
+ DIV(CLK_DOUT_CORE_G3D, "dout_core_g3d", "gout_core_g3d",
+ CLK_CON_DIV_CLKCMU_CORE_G3D, 0, 3),
+
+ /* PERI */
+ DIV(CLK_DOUT_PERI_BUS, "dout_peri_bus", "gout_peri_bus",
+ CLK_CON_DIV_CLKCMU_PERI_BUS, 0, 4),
+ DIV(CLK_DOUT_PERI_SPI0, "dout_peri_spi0", "gout_peri_spi0",
+ CLK_CON_DIV_CLKCMU_PERI_SPI0, 0, 6),
+ DIV(CLK_DOUT_PERI_SPI1, "dout_peri_spi1", "gout_peri_spi1",
+ CLK_CON_DIV_CLKCMU_PERI_SPI1, 0, 6),
+ DIV(CLK_DOUT_PERI_UART0, "dout_peri_uart0", "gout_peri_uart0",
+ CLK_CON_DIV_CLKCMU_PERI_UART0, 0, 4),
+ DIV(CLK_DOUT_PERI_UART1, "dout_peri_uart1", "gout_peri_uart1",
+ CLK_CON_DIV_CLKCMU_PERI_UART1, 0, 4),
+ DIV(CLK_DOUT_PERI_UART2, "dout_peri_uart2", "gout_peri_uart2",
+ CLK_CON_DIV_CLKCMU_PERI_UART2, 0, 4),
+ DIV(CLK_DOUT_PERI_USI0, "dout_peri_usi0", "gout_peri_usi0",
+ CLK_CON_DIV_CLKCMU_PERI_USI0, 0, 4),
+ DIV(CLK_DOUT_PERI_USI1, "dout_peri_usi1", "gout_peri_usi1",
+ CLK_CON_DIV_CLKCMU_PERI_USI1, 0, 4),
+ DIV(CLK_DOUT_PERI_USI2, "dout_peri_usi2", "gout_peri_usi2",
+ CLK_CON_DIV_CLKCMU_PERI_USI2, 0, 4),
+};
+
+static const struct samsung_gate_clock top_gate_clks[] __initconst = {
+ /* CORE */
+ GATE(CLK_GOUT_CORE_BUS, "gout_core_bus", "mout_core_bus",
+ CLK_CON_GAT_GATE_CLKCMU_CORE_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_CORE_CCI, "gout_core_cci", "mout_core_cci",
+ CLK_CON_GAT_GATE_CLKCMU_CORE_CCI, 21, 0, 0),
+ GATE(CLK_GOUT_CORE_G3D, "gout_core_g3d", "mout_core_g3d",
+ CLK_CON_GAT_GATE_CLKCMU_CORE_G3D, 21, 0, 0),
+
+ /* PERI */
+ GATE(CLK_GOUT_PERI_BUS, "gout_peri_bus", "mout_peri_bus",
+ CLK_CON_GAT_GATE_CLKCMU_PERI_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_PERI_SPI0, "gout_peri_spi0", "mout_peri_spi0",
+ CLK_CON_GAT_GATE_CLKCMU_PERI_SPI0, 21, 0, 0),
+ GATE(CLK_GOUT_PERI_SPI1, "gout_peri_spi1", "mout_peri_spi1",
+ CLK_CON_GAT_GATE_CLKCMU_PERI_SPI1, 21, 0, 0),
+ GATE(CLK_GOUT_PERI_UART0, "gout_peri_uart0", "mout_peri_uart0",
+ CLK_CON_GAT_GATE_CLKCMU_PERI_UART0, 21, 0, 0),
+ GATE(CLK_GOUT_PERI_UART1, "gout_peri_uart1", "mout_peri_uart1",
+ CLK_CON_GAT_GATE_CLKCMUC_PERI_UART1, 21, 0, 0),
+ GATE(CLK_GOUT_PERI_UART2, "gout_peri_uart2", "mout_peri_uart2",
+ CLK_CON_GAT_GATE_CLKCMU_PERI_UART2, 21, 0, 0),
+ GATE(CLK_GOUT_PERI_USI0, "gout_peri_usi0", "mout_peri_usi0",
+ CLK_CON_GAT_GATE_CLKCMU_PERI_USI0, 21, 0, 0),
+ GATE(CLK_GOUT_PERI_USI1, "gout_peri_usi1", "mout_peri_usi1",
+ CLK_CON_GAT_GATE_CLKCMU_PERI_USI1, 21, 0, 0),
+ GATE(CLK_GOUT_PERI_USI2, "gout_peri_usi2", "mout_peri_usi2",
+ CLK_CON_GAT_GATE_CLKCMU_PERI_USI2, 21, 0, 0),
+};
+
+static const struct samsung_cmu_info top_cmu_info __initconst = {
+ .pll_clks = top_pll_clks,
+ .nr_pll_clks = ARRAY_SIZE(top_pll_clks),
+ .mux_clks = top_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(top_mux_clks),
+ .div_clks = top_div_clks,
+ .nr_div_clks = ARRAY_SIZE(top_div_clks),
+ .gate_clks = top_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(top_gate_clks),
+ .nr_clk_ids = TOP_NR_CLK,
+ .clk_regs = top_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(top_clk_regs),
+};
+
+static void __init exynos7885_cmu_top_init(struct device_node *np)
+{
+ exynos_arm64_register_cmu(NULL, np, &top_cmu_info);
+}
+
+/* Register CMU_TOP early, as it's a dependency for other early domains */
+CLK_OF_DECLARE(exynos7885_cmu_top, "samsung,exynos7885-cmu-top",
+ exynos7885_cmu_top_init);
+
+/* ---- CMU_PERI ------------------------------------------------------------ */
+
+/* Register Offset definitions for CMU_PERI (0x10010000) */
+#define PLL_CON0_MUX_CLKCMU_PERI_BUS_USER 0x0100
+#define PLL_CON0_MUX_CLKCMU_PERI_SPI0_USER 0x0120
+#define PLL_CON0_MUX_CLKCMU_PERI_SPI1_USER 0x0140
+#define PLL_CON0_MUX_CLKCMU_PERI_UART0_USER 0x0160
+#define PLL_CON0_MUX_CLKCMU_PERI_UART1_USER 0x0180
+#define PLL_CON0_MUX_CLKCMU_PERI_UART2_USER 0x01a0
+#define PLL_CON0_MUX_CLKCMU_PERI_USI0_USER 0x01c0
+#define PLL_CON0_MUX_CLKCMU_PERI_USI1_USER 0x01e0
+#define PLL_CON0_MUX_CLKCMU_PERI_USI2_USER 0x0200
+#define CLK_CON_GAT_GOUT_PERI_GPIO_TOP_PCLK 0x2024
+#define CLK_CON_GAT_GOUT_PERI_HSI2C_0_PCLK 0x2028
+#define CLK_CON_GAT_GOUT_PERI_HSI2C_1_PCLK 0x202c
+#define CLK_CON_GAT_GOUT_PERI_HSI2C_2_PCLK 0x2030
+#define CLK_CON_GAT_GOUT_PERI_HSI2C_3_PCLK 0x2034
+#define CLK_CON_GAT_GOUT_PERI_I2C_0_PCLK 0x2038
+#define CLK_CON_GAT_GOUT_PERI_I2C_1_PCLK 0x203c
+#define CLK_CON_GAT_GOUT_PERI_I2C_2_PCLK 0x2040
+#define CLK_CON_GAT_GOUT_PERI_I2C_3_PCLK 0x2044
+#define CLK_CON_GAT_GOUT_PERI_I2C_4_PCLK 0x2048
+#define CLK_CON_GAT_GOUT_PERI_I2C_5_PCLK 0x204c
+#define CLK_CON_GAT_GOUT_PERI_I2C_6_PCLK 0x2050
+#define CLK_CON_GAT_GOUT_PERI_I2C_7_PCLK 0x2054
+#define CLK_CON_GAT_GOUT_PERI_PWM_MOTOR_PCLK 0x2058
+#define CLK_CON_GAT_GOUT_PERI_SPI_0_PCLK 0x205c
+#define CLK_CON_GAT_GOUT_PERI_SPI_0_EXT_CLK 0x2060
+#define CLK_CON_GAT_GOUT_PERI_SPI_1_PCLK 0x2064
+#define CLK_CON_GAT_GOUT_PERI_SPI_1_EXT_CLK 0x2068
+#define CLK_CON_GAT_GOUT_PERI_UART_0_EXT_UCLK 0x206c
+#define CLK_CON_GAT_GOUT_PERI_UART_0_PCLK 0x2070
+#define CLK_CON_GAT_GOUT_PERI_UART_1_EXT_UCLK 0x2074
+#define CLK_CON_GAT_GOUT_PERI_UART_1_PCLK 0x2078
+#define CLK_CON_GAT_GOUT_PERI_UART_2_EXT_UCLK 0x207c
+#define CLK_CON_GAT_GOUT_PERI_UART_2_PCLK 0x2080
+#define CLK_CON_GAT_GOUT_PERI_USI0_PCLK 0x2084
+#define CLK_CON_GAT_GOUT_PERI_USI0_SCLK 0x2088
+#define CLK_CON_GAT_GOUT_PERI_USI1_PCLK 0x208c
+#define CLK_CON_GAT_GOUT_PERI_USI1_SCLK 0x2090
+#define CLK_CON_GAT_GOUT_PERI_USI2_PCLK 0x2094
+#define CLK_CON_GAT_GOUT_PERI_USI2_SCLK 0x2098
+#define CLK_CON_GAT_GOUT_PERI_MCT_PCLK 0x20a0
+#define CLK_CON_GAT_GOUT_PERI_SYSREG_PERI_PCLK 0x20b0
+#define CLK_CON_GAT_GOUT_PERI_WDT_CLUSTER0_PCLK 0x20b4
+#define CLK_CON_GAT_GOUT_PERI_WDT_CLUSTER1_PCLK 0x20b8
+
+static const unsigned long peri_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLKCMU_PERI_BUS_USER,
+ PLL_CON0_MUX_CLKCMU_PERI_SPI0_USER,
+ PLL_CON0_MUX_CLKCMU_PERI_SPI1_USER,
+ PLL_CON0_MUX_CLKCMU_PERI_UART0_USER,
+ PLL_CON0_MUX_CLKCMU_PERI_UART1_USER,
+ PLL_CON0_MUX_CLKCMU_PERI_UART2_USER,
+ PLL_CON0_MUX_CLKCMU_PERI_USI0_USER,
+ PLL_CON0_MUX_CLKCMU_PERI_USI1_USER,
+ PLL_CON0_MUX_CLKCMU_PERI_USI2_USER,
+ CLK_CON_GAT_GOUT_PERI_GPIO_TOP_PCLK,
+ CLK_CON_GAT_GOUT_PERI_HSI2C_0_PCLK,
+ CLK_CON_GAT_GOUT_PERI_HSI2C_1_PCLK,
+ CLK_CON_GAT_GOUT_PERI_HSI2C_2_PCLK,
+ CLK_CON_GAT_GOUT_PERI_HSI2C_3_PCLK,
+ CLK_CON_GAT_GOUT_PERI_I2C_0_PCLK,
+ CLK_CON_GAT_GOUT_PERI_I2C_1_PCLK,
+ CLK_CON_GAT_GOUT_PERI_I2C_2_PCLK,
+ CLK_CON_GAT_GOUT_PERI_I2C_3_PCLK,
+ CLK_CON_GAT_GOUT_PERI_I2C_4_PCLK,
+ CLK_CON_GAT_GOUT_PERI_I2C_5_PCLK,
+ CLK_CON_GAT_GOUT_PERI_I2C_6_PCLK,
+ CLK_CON_GAT_GOUT_PERI_I2C_7_PCLK,
+ CLK_CON_GAT_GOUT_PERI_PWM_MOTOR_PCLK,
+ CLK_CON_GAT_GOUT_PERI_SPI_0_PCLK,
+ CLK_CON_GAT_GOUT_PERI_SPI_0_EXT_CLK,
+ CLK_CON_GAT_GOUT_PERI_SPI_1_PCLK,
+ CLK_CON_GAT_GOUT_PERI_SPI_1_EXT_CLK,
+ CLK_CON_GAT_GOUT_PERI_UART_0_EXT_UCLK,
+ CLK_CON_GAT_GOUT_PERI_UART_0_PCLK,
+ CLK_CON_GAT_GOUT_PERI_UART_1_EXT_UCLK,
+ CLK_CON_GAT_GOUT_PERI_UART_1_PCLK,
+ CLK_CON_GAT_GOUT_PERI_UART_2_EXT_UCLK,
+ CLK_CON_GAT_GOUT_PERI_UART_2_PCLK,
+ CLK_CON_GAT_GOUT_PERI_USI0_PCLK,
+ CLK_CON_GAT_GOUT_PERI_USI0_SCLK,
+ CLK_CON_GAT_GOUT_PERI_USI1_PCLK,
+ CLK_CON_GAT_GOUT_PERI_USI1_SCLK,
+ CLK_CON_GAT_GOUT_PERI_USI2_PCLK,
+ CLK_CON_GAT_GOUT_PERI_USI2_SCLK,
+ CLK_CON_GAT_GOUT_PERI_MCT_PCLK,
+ CLK_CON_GAT_GOUT_PERI_SYSREG_PERI_PCLK,
+ CLK_CON_GAT_GOUT_PERI_WDT_CLUSTER0_PCLK,
+ CLK_CON_GAT_GOUT_PERI_WDT_CLUSTER1_PCLK,
+};
+
+/* List of parent clocks for Muxes in CMU_PERI */
+PNAME(mout_peri_bus_user_p) = { "oscclk", "dout_peri_bus" };
+PNAME(mout_peri_spi0_user_p) = { "oscclk", "dout_peri_spi0" };
+PNAME(mout_peri_spi1_user_p) = { "oscclk", "dout_peri_spi1" };
+PNAME(mout_peri_uart0_user_p) = { "oscclk", "dout_peri_uart0" };
+PNAME(mout_peri_uart1_user_p) = { "oscclk", "dout_peri_uart1" };
+PNAME(mout_peri_uart2_user_p) = { "oscclk", "dout_peri_uart2" };
+PNAME(mout_peri_usi0_user_p) = { "oscclk", "dout_peri_usi0" };
+PNAME(mout_peri_usi1_user_p) = { "oscclk", "dout_peri_usi1" };
+PNAME(mout_peri_usi2_user_p) = { "oscclk", "dout_peri_usi2" };
+
+static const struct samsung_mux_clock peri_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_PERI_BUS_USER, "mout_peri_bus_user", mout_peri_bus_user_p,
+ PLL_CON0_MUX_CLKCMU_PERI_BUS_USER, 4, 1),
+ MUX(CLK_MOUT_PERI_SPI0_USER, "mout_peri_spi0_user", mout_peri_spi0_user_p,
+ PLL_CON0_MUX_CLKCMU_PERI_SPI0_USER, 4, 1),
+ MUX(CLK_MOUT_PERI_SPI1_USER, "mout_peri_spi1_user", mout_peri_spi1_user_p,
+ PLL_CON0_MUX_CLKCMU_PERI_SPI1_USER, 4, 1),
+ MUX(CLK_MOUT_PERI_UART0_USER, "mout_peri_uart0_user",
+ mout_peri_uart0_user_p, PLL_CON0_MUX_CLKCMU_PERI_UART0_USER, 4, 1),
+ MUX(CLK_MOUT_PERI_UART1_USER, "mout_peri_uart1_user",
+ mout_peri_uart1_user_p, PLL_CON0_MUX_CLKCMU_PERI_UART1_USER, 4, 1),
+ MUX(CLK_MOUT_PERI_UART2_USER, "mout_peri_uart2_user",
+ mout_peri_uart2_user_p, PLL_CON0_MUX_CLKCMU_PERI_UART2_USER, 4, 1),
+ MUX(CLK_MOUT_PERI_USI0_USER, "mout_peri_usi0_user",
+ mout_peri_usi0_user_p, PLL_CON0_MUX_CLKCMU_PERI_USI0_USER, 4, 1),
+ MUX(CLK_MOUT_PERI_USI1_USER, "mout_peri_usi1_user",
+ mout_peri_usi1_user_p, PLL_CON0_MUX_CLKCMU_PERI_USI1_USER, 4, 1),
+ MUX(CLK_MOUT_PERI_USI2_USER, "mout_peri_usi2_user",
+ mout_peri_usi2_user_p, PLL_CON0_MUX_CLKCMU_PERI_USI2_USER, 4, 1),
+};
+
+static const struct samsung_gate_clock peri_gate_clks[] __initconst = {
+ /* TODO: Should be enabled in GPIO driver (or made CLK_IS_CRITICAL) */
+ GATE(CLK_GOUT_GPIO_TOP_PCLK, "gout_gpio_top_pclk",
+ "mout_peri_bus_user",
+ CLK_CON_GAT_GOUT_PERI_GPIO_TOP_PCLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_HSI2C0_PCLK, "gout_hsi2c0_pclk", "mout_peri_bus_user",
+ CLK_CON_GAT_GOUT_PERI_HSI2C_0_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_HSI2C1_PCLK, "gout_hsi2c1_pclk", "mout_peri_bus_user",
+ CLK_CON_GAT_GOUT_PERI_HSI2C_1_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_HSI2C2_PCLK, "gout_hsi2c2_pclk", "mout_peri_bus_user",
+ CLK_CON_GAT_GOUT_PERI_HSI2C_2_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_HSI2C3_PCLK, "gout_hsi2c3_pclk", "mout_peri_bus_user",
+ CLK_CON_GAT_GOUT_PERI_HSI2C_3_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_I2C0_PCLK, "gout_i2c0_pclk", "mout_peri_bus_user",
+ CLK_CON_GAT_GOUT_PERI_I2C_0_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_I2C1_PCLK, "gout_i2c1_pclk", "mout_peri_bus_user",
+ CLK_CON_GAT_GOUT_PERI_I2C_1_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_I2C2_PCLK, "gout_i2c2_pclk", "mout_peri_bus_user",
+ CLK_CON_GAT_GOUT_PERI_I2C_2_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_I2C3_PCLK, "gout_i2c3_pclk", "mout_peri_bus_user",
+ CLK_CON_GAT_GOUT_PERI_I2C_3_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_I2C4_PCLK, "gout_i2c4_pclk", "mout_peri_bus_user",
+ CLK_CON_GAT_GOUT_PERI_I2C_4_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_I2C5_PCLK, "gout_i2c5_pclk", "mout_peri_bus_user",
+ CLK_CON_GAT_GOUT_PERI_I2C_5_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_I2C6_PCLK, "gout_i2c6_pclk", "mout_peri_bus_user",
+ CLK_CON_GAT_GOUT_PERI_I2C_6_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_I2C7_PCLK, "gout_i2c7_pclk", "mout_peri_bus_user",
+ CLK_CON_GAT_GOUT_PERI_I2C_7_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_PWM_MOTOR_PCLK, "gout_pwm_motor_pclk",
+ "mout_peri_bus_user",
+ CLK_CON_GAT_GOUT_PERI_PWM_MOTOR_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_SPI0_PCLK, "gout_spi0_pclk", "mout_peri_bus_user",
+ CLK_CON_GAT_GOUT_PERI_SPI_0_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_SPI0_EXT_CLK, "gout_spi0_ipclk", "mout_peri_spi0_user",
+ CLK_CON_GAT_GOUT_PERI_SPI_0_EXT_CLK, 21, 0, 0),
+ GATE(CLK_GOUT_SPI1_PCLK, "gout_spi1_pclk", "mout_peri_bus_user",
+ CLK_CON_GAT_GOUT_PERI_SPI_1_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_SPI1_EXT_CLK, "gout_spi1_ipclk", "mout_peri_spi1_user",
+ CLK_CON_GAT_GOUT_PERI_SPI_1_EXT_CLK, 21, 0, 0),
+ GATE(CLK_GOUT_UART0_EXT_UCLK, "gout_uart0_ext_uclk", "mout_peri_uart0_user",
+ CLK_CON_GAT_GOUT_PERI_UART_0_EXT_UCLK, 21, 0, 0),
+ GATE(CLK_GOUT_UART0_PCLK, "gout_uart0_pclk", "mout_peri_bus_user",
+ CLK_CON_GAT_GOUT_PERI_UART_0_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_UART1_EXT_UCLK, "gout_uart1_ext_uclk", "mout_peri_uart1_user",
+ CLK_CON_GAT_GOUT_PERI_UART_1_EXT_UCLK, 21, 0, 0),
+ GATE(CLK_GOUT_UART1_PCLK, "gout_uart1_pclk", "mout_peri_bus_user",
+ CLK_CON_GAT_GOUT_PERI_UART_1_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_UART2_EXT_UCLK, "gout_uart2_ext_uclk", "mout_peri_uart2_user",
+ CLK_CON_GAT_GOUT_PERI_UART_2_EXT_UCLK, 21, 0, 0),
+ GATE(CLK_GOUT_UART2_PCLK, "gout_uart2_pclk", "mout_peri_bus_user",
+ CLK_CON_GAT_GOUT_PERI_UART_2_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_USI0_PCLK, "gout_usi0_pclk", "mout_peri_bus_user",
+ CLK_CON_GAT_GOUT_PERI_USI0_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_USI0_SCLK, "gout_usi0_sclk", "mout_peri_usi0_user",
+ CLK_CON_GAT_GOUT_PERI_USI0_SCLK, 21, 0, 0),
+ GATE(CLK_GOUT_USI1_PCLK, "gout_usi1_pclk", "mout_peri_bus_user",
+ CLK_CON_GAT_GOUT_PERI_USI1_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_USI1_SCLK, "gout_usi1_sclk", "mout_peri_usi1_user",
+ CLK_CON_GAT_GOUT_PERI_USI1_SCLK, 21, 0, 0),
+ GATE(CLK_GOUT_USI2_PCLK, "gout_usi2_pclk", "mout_peri_bus_user",
+ CLK_CON_GAT_GOUT_PERI_USI2_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_USI2_SCLK, "gout_usi2_sclk", "mout_peri_usi2_user",
+ CLK_CON_GAT_GOUT_PERI_USI2_SCLK, 21, 0, 0),
+ GATE(CLK_GOUT_MCT_PCLK, "gout_mct_pclk", "mout_peri_bus_user",
+ CLK_CON_GAT_GOUT_PERI_MCT_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_SYSREG_PERI_PCLK, "gout_sysreg_peri_pclk",
+ "mout_peri_bus_user",
+ CLK_CON_GAT_GOUT_PERI_SYSREG_PERI_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_WDT0_PCLK, "gout_wdt0_pclk", "mout_peri_bus_user",
+ CLK_CON_GAT_GOUT_PERI_WDT_CLUSTER0_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_WDT1_PCLK, "gout_wdt1_pclk", "mout_peri_bus_user",
+ CLK_CON_GAT_GOUT_PERI_WDT_CLUSTER1_PCLK, 21, 0, 0),
+};
+
+static const struct samsung_cmu_info peri_cmu_info __initconst = {
+ .mux_clks = peri_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(peri_mux_clks),
+ .gate_clks = peri_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(peri_gate_clks),
+ .nr_clk_ids = PERI_NR_CLK,
+ .clk_regs = peri_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(peri_clk_regs),
+ .clk_name = "dout_peri_bus",
+};
+
+static void __init exynos7885_cmu_peri_init(struct device_node *np)
+{
+ exynos_arm64_register_cmu(NULL, np, &peri_cmu_info);
+}
+
+/* Register CMU_PERI early, as it's needed for MCT timer */
+CLK_OF_DECLARE(exynos7885_cmu_peri, "samsung,exynos7885-cmu-peri",
+ exynos7885_cmu_peri_init);
+
+/* ---- CMU_CORE ------------------------------------------------------------ */
+
+/* Register Offset definitions for CMU_CORE (0x12000000) */
+#define PLL_CON0_MUX_CLKCMU_CORE_BUS_USER 0x0100
+#define PLL_CON0_MUX_CLKCMU_CORE_CCI_USER 0x0120
+#define PLL_CON0_MUX_CLKCMU_CORE_G3D_USER 0x0140
+#define CLK_CON_MUX_MUX_CLK_CORE_GIC 0x1000
+#define CLK_CON_DIV_DIV_CLK_CORE_BUSP 0x1800
+#define CLK_CON_GAT_GOUT_CORE_CCI_550_ACLK 0x2054
+#define CLK_CON_GAT_GOUT_CORE_GIC400_CLK 0x2058
+
+static const unsigned long core_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLKCMU_CORE_BUS_USER,
+ PLL_CON0_MUX_CLKCMU_CORE_CCI_USER,
+ PLL_CON0_MUX_CLKCMU_CORE_G3D_USER,
+ CLK_CON_MUX_MUX_CLK_CORE_GIC,
+ CLK_CON_DIV_DIV_CLK_CORE_BUSP,
+ CLK_CON_GAT_GOUT_CORE_CCI_550_ACLK,
+ CLK_CON_GAT_GOUT_CORE_GIC400_CLK,
+};
+
+/* List of parent clocks for Muxes in CMU_CORE */
+PNAME(mout_core_bus_user_p) = { "oscclk", "dout_core_bus" };
+PNAME(mout_core_cci_user_p) = { "oscclk", "dout_core_cci" };
+PNAME(mout_core_g3d_user_p) = { "oscclk", "dout_core_g3d" };
+PNAME(mout_core_gic_p) = { "dout_core_busp", "oscclk" };
+
+static const struct samsung_mux_clock core_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_CORE_BUS_USER, "mout_core_bus_user", mout_core_bus_user_p,
+ PLL_CON0_MUX_CLKCMU_CORE_BUS_USER, 4, 1),
+ MUX(CLK_MOUT_CORE_CCI_USER, "mout_core_cci_user", mout_core_cci_user_p,
+ PLL_CON0_MUX_CLKCMU_CORE_CCI_USER, 4, 1),
+ MUX(CLK_MOUT_CORE_G3D_USER, "mout_core_g3d_user", mout_core_g3d_user_p,
+ PLL_CON0_MUX_CLKCMU_CORE_G3D_USER, 4, 1),
+ MUX(CLK_MOUT_CORE_GIC, "mout_core_gic", mout_core_gic_p,
+ CLK_CON_MUX_MUX_CLK_CORE_GIC, 0, 1),
+};
+
+static const struct samsung_div_clock core_div_clks[] __initconst = {
+ DIV(CLK_DOUT_CORE_BUSP, "dout_core_busp", "mout_core_bus_user",
+ CLK_CON_DIV_DIV_CLK_CORE_BUSP, 0, 2),
+};
+
+static const struct samsung_gate_clock core_gate_clks[] __initconst = {
+ /* CCI (interconnect) clock must be always running */
+ GATE(CLK_GOUT_CCI_ACLK, "gout_cci_aclk", "mout_core_cci_user",
+ CLK_CON_GAT_GOUT_CORE_CCI_550_ACLK, 21, CLK_IS_CRITICAL, 0),
+ /* GIC (interrupt controller) clock must be always running */
+ GATE(CLK_GOUT_GIC400_CLK, "gout_gic400_clk", "mout_core_gic",
+ CLK_CON_GAT_GOUT_CORE_GIC400_CLK, 21, CLK_IS_CRITICAL, 0),
+};
+
+static const struct samsung_cmu_info core_cmu_info __initconst = {
+ .mux_clks = core_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(core_mux_clks),
+ .div_clks = core_div_clks,
+ .nr_div_clks = ARRAY_SIZE(core_div_clks),
+ .gate_clks = core_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(core_gate_clks),
+ .nr_clk_ids = CORE_NR_CLK,
+ .clk_regs = core_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(core_clk_regs),
+ .clk_name = "dout_core_bus",
+};
+
+/* ---- platform_driver ----------------------------------------------------- */
+
+static int __init exynos7885_cmu_probe(struct platform_device *pdev)
+{
+ const struct samsung_cmu_info *info;
+ struct device *dev = &pdev->dev;
+
+ info = of_device_get_match_data(dev);
+ exynos_arm64_register_cmu(dev, dev->of_node, info);
+
+ return 0;
+}
+
+static const struct of_device_id exynos7885_cmu_of_match[] = {
+ {
+ .compatible = "samsung,exynos7885-cmu-core",
+ .data = &core_cmu_info,
+ }, {
+ },
+};
+
+static struct platform_driver exynos7885_cmu_driver __refdata = {
+ .driver = {
+ .name = "exynos7885-cmu",
+ .of_match_table = exynos7885_cmu_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = exynos7885_cmu_probe,
+};
+
+static int __init exynos7885_cmu_init(void)
+{
+ return platform_driver_register(&exynos7885_cmu_driver);
+}
+core_initcall(exynos7885_cmu_init);
diff --git a/drivers/clk/samsung/clk-exynos850.c b/drivers/clk/samsung/clk-exynos850.c
index 2294989e244c..cd9725f1dbf7 100644
--- a/drivers/clk/samsung/clk-exynos850.c
+++ b/drivers/clk/samsung/clk-exynos850.c
@@ -9,56 +9,13 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/of.h>
-#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <dt-bindings/clock/exynos850.h>
#include "clk.h"
-
-/* Gate register bits */
-#define GATE_MANUAL BIT(20)
-#define GATE_ENABLE_HWACG BIT(28)
-
-/* Gate register offsets range */
-#define GATE_OFF_START 0x2000
-#define GATE_OFF_END 0x2fff
-
-/**
- * exynos850_init_clocks - Set clocks initial configuration
- * @np: CMU device tree node with "reg" property (CMU addr)
- * @reg_offs: Register offsets array for clocks to init
- * @reg_offs_len: Number of register offsets in reg_offs array
- *
- * Set manual control mode for all gate clocks.
- */
-static void __init exynos850_init_clocks(struct device_node *np,
- const unsigned long *reg_offs, size_t reg_offs_len)
-{
- void __iomem *reg_base;
- size_t i;
-
- reg_base = of_iomap(np, 0);
- if (!reg_base)
- panic("%s: failed to map registers\n", __func__);
-
- for (i = 0; i < reg_offs_len; ++i) {
- void __iomem *reg = reg_base + reg_offs[i];
- u32 val;
-
- /* Modify only gate clock registers */
- if (reg_offs[i] < GATE_OFF_START || reg_offs[i] > GATE_OFF_END)
- continue;
-
- val = readl(reg);
- val |= GATE_MANUAL;
- val &= ~GATE_ENABLE_HWACG;
- writel(val, reg);
- }
-
- iounmap(reg_base);
-}
+#include "clk-exynos-arm64.h"
/* ---- CMU_TOP ------------------------------------------------------------- */
@@ -72,6 +29,7 @@ static void __init exynos850_init_clocks(struct device_node *np,
#define PLL_CON3_PLL_SHARED0 0x014c
#define PLL_CON0_PLL_SHARED1 0x0180
#define PLL_CON3_PLL_SHARED1 0x018c
+#define CLK_CON_MUX_MUX_CLKCMU_APM_BUS 0x1000
#define CLK_CON_MUX_MUX_CLKCMU_CORE_BUS 0x1014
#define CLK_CON_MUX_MUX_CLKCMU_CORE_CCI 0x1018
#define CLK_CON_MUX_MUX_CLKCMU_CORE_MMC_EMBD 0x101c
@@ -83,6 +41,7 @@ static void __init exynos850_init_clocks(struct device_node *np,
#define CLK_CON_MUX_MUX_CLKCMU_PERI_BUS 0x1070
#define CLK_CON_MUX_MUX_CLKCMU_PERI_IP 0x1074
#define CLK_CON_MUX_MUX_CLKCMU_PERI_UART 0x1078
+#define CLK_CON_DIV_CLKCMU_APM_BUS 0x180c
#define CLK_CON_DIV_CLKCMU_CORE_BUS 0x1820
#define CLK_CON_DIV_CLKCMU_CORE_CCI 0x1824
#define CLK_CON_DIV_CLKCMU_CORE_MMC_EMBD 0x1828
@@ -100,6 +59,7 @@ static void __init exynos850_init_clocks(struct device_node *np,
#define CLK_CON_DIV_PLL_SHARED1_DIV2 0x1898
#define CLK_CON_DIV_PLL_SHARED1_DIV3 0x189c
#define CLK_CON_DIV_PLL_SHARED1_DIV4 0x18a0
+#define CLK_CON_GAT_GATE_CLKCMU_APM_BUS 0x2008
#define CLK_CON_GAT_GATE_CLKCMU_CORE_BUS 0x201c
#define CLK_CON_GAT_GATE_CLKCMU_CORE_CCI 0x2020
#define CLK_CON_GAT_GATE_CLKCMU_CORE_MMC_EMBD 0x2024
@@ -122,6 +82,7 @@ static const unsigned long top_clk_regs[] __initconst = {
PLL_CON3_PLL_SHARED0,
PLL_CON0_PLL_SHARED1,
PLL_CON3_PLL_SHARED1,
+ CLK_CON_MUX_MUX_CLKCMU_APM_BUS,
CLK_CON_MUX_MUX_CLKCMU_CORE_BUS,
CLK_CON_MUX_MUX_CLKCMU_CORE_CCI,
CLK_CON_MUX_MUX_CLKCMU_CORE_MMC_EMBD,
@@ -133,6 +94,7 @@ static const unsigned long top_clk_regs[] __initconst = {
CLK_CON_MUX_MUX_CLKCMU_PERI_BUS,
CLK_CON_MUX_MUX_CLKCMU_PERI_IP,
CLK_CON_MUX_MUX_CLKCMU_PERI_UART,
+ CLK_CON_DIV_CLKCMU_APM_BUS,
CLK_CON_DIV_CLKCMU_CORE_BUS,
CLK_CON_DIV_CLKCMU_CORE_CCI,
CLK_CON_DIV_CLKCMU_CORE_MMC_EMBD,
@@ -150,6 +112,7 @@ static const unsigned long top_clk_regs[] __initconst = {
CLK_CON_DIV_PLL_SHARED1_DIV2,
CLK_CON_DIV_PLL_SHARED1_DIV3,
CLK_CON_DIV_PLL_SHARED1_DIV4,
+ CLK_CON_GAT_GATE_CLKCMU_APM_BUS,
CLK_CON_GAT_GATE_CLKCMU_CORE_BUS,
CLK_CON_GAT_GATE_CLKCMU_CORE_CCI,
CLK_CON_GAT_GATE_CLKCMU_CORE_MMC_EMBD,
@@ -183,6 +146,8 @@ static const struct samsung_pll_clock top_pll_clks[] __initconst = {
PNAME(mout_shared0_pll_p) = { "oscclk", "fout_shared0_pll" };
PNAME(mout_shared1_pll_p) = { "oscclk", "fout_shared1_pll" };
PNAME(mout_mmc_pll_p) = { "oscclk", "fout_mmc_pll" };
+/* List of parent clocks for Muxes in CMU_TOP: for CMU_APM */
+PNAME(mout_clkcmu_apm_bus_p) = { "dout_shared0_div4", "pll_shared1_div4" };
/* List of parent clocks for Muxes in CMU_TOP: for CMU_CORE */
PNAME(mout_core_bus_p) = { "dout_shared1_div2", "dout_shared0_div3",
"dout_shared1_div3", "dout_shared0_div4" };
@@ -222,6 +187,10 @@ static const struct samsung_mux_clock top_mux_clks[] __initconst = {
MUX(CLK_MOUT_MMC_PLL, "mout_mmc_pll", mout_mmc_pll_p,
PLL_CON0_PLL_MMC, 4, 1),
+ /* APM */
+ MUX(CLK_MOUT_CLKCMU_APM_BUS, "mout_clkcmu_apm_bus",
+ mout_clkcmu_apm_bus_p, CLK_CON_MUX_MUX_CLKCMU_APM_BUS, 0, 1),
+
/* CORE */
MUX(CLK_MOUT_CORE_BUS, "mout_core_bus", mout_core_bus_p,
CLK_CON_MUX_MUX_CLKCMU_CORE_BUS, 0, 2),
@@ -268,6 +237,10 @@ static const struct samsung_div_clock top_div_clks[] __initconst = {
DIV(CLK_DOUT_SHARED1_DIV4, "dout_shared1_div4", "dout_shared1_div2",
CLK_CON_DIV_PLL_SHARED1_DIV4, 0, 1),
+ /* APM */
+ DIV(CLK_DOUT_CLKCMU_APM_BUS, "dout_clkcmu_apm_bus",
+ "gout_clkcmu_apm_bus", CLK_CON_DIV_CLKCMU_APM_BUS, 0, 3),
+
/* CORE */
DIV(CLK_DOUT_CORE_BUS, "dout_core_bus", "gout_core_bus",
CLK_CON_DIV_CLKCMU_CORE_BUS, 0, 4),
@@ -310,6 +283,10 @@ static const struct samsung_gate_clock top_gate_clks[] __initconst = {
GATE(CLK_GOUT_CORE_SSS, "gout_core_sss", "mout_core_sss",
CLK_CON_GAT_GATE_CLKCMU_CORE_SSS, 21, 0, 0),
+ /* APM */
+ GATE(CLK_GOUT_CLKCMU_APM_BUS, "gout_clkcmu_apm_bus",
+ "mout_clkcmu_apm_bus", CLK_CON_GAT_GATE_CLKCMU_APM_BUS, 21, 0, 0),
+
/* DPU */
GATE(CLK_GOUT_DPU, "gout_dpu", "mout_dpu",
CLK_CON_GAT_GATE_CLKCMU_DPU, 21, 0, 0),
@@ -347,13 +324,248 @@ static const struct samsung_cmu_info top_cmu_info __initconst = {
static void __init exynos850_cmu_top_init(struct device_node *np)
{
- exynos850_init_clocks(np, top_clk_regs, ARRAY_SIZE(top_clk_regs));
- samsung_cmu_register_one(np, &top_cmu_info);
+ exynos_arm64_register_cmu(NULL, np, &top_cmu_info);
}
+/* Register CMU_TOP early, as it's a dependency for other early domains */
CLK_OF_DECLARE(exynos850_cmu_top, "samsung,exynos850-cmu-top",
exynos850_cmu_top_init);
+/* ---- CMU_APM ------------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_APM (0x11800000) */
+#define PLL_CON0_MUX_CLKCMU_APM_BUS_USER 0x0600
+#define PLL_CON0_MUX_CLK_RCO_APM_I3C_USER 0x0610
+#define PLL_CON0_MUX_CLK_RCO_APM_USER 0x0620
+#define PLL_CON0_MUX_DLL_USER 0x0630
+#define CLK_CON_MUX_MUX_CLKCMU_CHUB_BUS 0x1000
+#define CLK_CON_MUX_MUX_CLK_APM_BUS 0x1004
+#define CLK_CON_MUX_MUX_CLK_APM_I3C 0x1008
+#define CLK_CON_DIV_CLKCMU_CHUB_BUS 0x1800
+#define CLK_CON_DIV_DIV_CLK_APM_BUS 0x1804
+#define CLK_CON_DIV_DIV_CLK_APM_I3C 0x1808
+#define CLK_CON_GAT_CLKCMU_CMGP_BUS 0x2000
+#define CLK_CON_GAT_GATE_CLKCMU_CHUB_BUS 0x2014
+#define CLK_CON_GAT_GOUT_APM_APBIF_GPIO_ALIVE_PCLK 0x2018
+#define CLK_CON_GAT_GOUT_APM_APBIF_PMU_ALIVE_PCLK 0x2020
+#define CLK_CON_GAT_GOUT_APM_APBIF_RTC_PCLK 0x2024
+#define CLK_CON_GAT_GOUT_APM_APBIF_TOP_RTC_PCLK 0x2028
+#define CLK_CON_GAT_GOUT_APM_I3C_APM_PMIC_I_PCLK 0x2034
+#define CLK_CON_GAT_GOUT_APM_I3C_APM_PMIC_I_SCLK 0x2038
+#define CLK_CON_GAT_GOUT_APM_SPEEDY_APM_PCLK 0x20bc
+#define CLK_CON_GAT_GOUT_APM_SYSREG_APM_PCLK 0x20c0
+
+static const unsigned long apm_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLKCMU_APM_BUS_USER,
+ PLL_CON0_MUX_CLK_RCO_APM_I3C_USER,
+ PLL_CON0_MUX_CLK_RCO_APM_USER,
+ PLL_CON0_MUX_DLL_USER,
+ CLK_CON_MUX_MUX_CLKCMU_CHUB_BUS,
+ CLK_CON_MUX_MUX_CLK_APM_BUS,
+ CLK_CON_MUX_MUX_CLK_APM_I3C,
+ CLK_CON_DIV_CLKCMU_CHUB_BUS,
+ CLK_CON_DIV_DIV_CLK_APM_BUS,
+ CLK_CON_DIV_DIV_CLK_APM_I3C,
+ CLK_CON_GAT_CLKCMU_CMGP_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_CHUB_BUS,
+ CLK_CON_GAT_GOUT_APM_APBIF_GPIO_ALIVE_PCLK,
+ CLK_CON_GAT_GOUT_APM_APBIF_PMU_ALIVE_PCLK,
+ CLK_CON_GAT_GOUT_APM_APBIF_RTC_PCLK,
+ CLK_CON_GAT_GOUT_APM_APBIF_TOP_RTC_PCLK,
+ CLK_CON_GAT_GOUT_APM_I3C_APM_PMIC_I_PCLK,
+ CLK_CON_GAT_GOUT_APM_I3C_APM_PMIC_I_SCLK,
+ CLK_CON_GAT_GOUT_APM_SPEEDY_APM_PCLK,
+ CLK_CON_GAT_GOUT_APM_SYSREG_APM_PCLK,
+};
+
+/* List of parent clocks for Muxes in CMU_APM */
+PNAME(mout_apm_bus_user_p) = { "oscclk_rco_apm", "dout_clkcmu_apm_bus" };
+PNAME(mout_rco_apm_i3c_user_p) = { "oscclk_rco_apm", "clk_rco_i3c_pmic" };
+PNAME(mout_rco_apm_user_p) = { "oscclk_rco_apm", "clk_rco_apm__alv" };
+PNAME(mout_dll_user_p) = { "oscclk_rco_apm", "clk_dll_dco" };
+PNAME(mout_clkcmu_chub_bus_p) = { "mout_apm_bus_user", "mout_dll_user" };
+PNAME(mout_apm_bus_p) = { "mout_rco_apm_user", "mout_apm_bus_user",
+ "mout_dll_user", "oscclk_rco_apm" };
+PNAME(mout_apm_i3c_p) = { "dout_apm_i3c", "mout_rco_apm_i3c_user" };
+
+static const struct samsung_fixed_rate_clock apm_fixed_clks[] __initconst = {
+ FRATE(CLK_RCO_I3C_PMIC, "clk_rco_i3c_pmic", NULL, 0, 491520000),
+ FRATE(OSCCLK_RCO_APM, "oscclk_rco_apm", NULL, 0, 24576000),
+ FRATE(CLK_RCO_APM__ALV, "clk_rco_apm__alv", NULL, 0, 49152000),
+ FRATE(CLK_DLL_DCO, "clk_dll_dco", NULL, 0, 360000000),
+};
+
+static const struct samsung_mux_clock apm_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_APM_BUS_USER, "mout_apm_bus_user", mout_apm_bus_user_p,
+ PLL_CON0_MUX_CLKCMU_APM_BUS_USER, 4, 1),
+ MUX(CLK_MOUT_RCO_APM_I3C_USER, "mout_rco_apm_i3c_user",
+ mout_rco_apm_i3c_user_p, PLL_CON0_MUX_CLK_RCO_APM_I3C_USER, 4, 1),
+ MUX(CLK_MOUT_RCO_APM_USER, "mout_rco_apm_user", mout_rco_apm_user_p,
+ PLL_CON0_MUX_CLK_RCO_APM_USER, 4, 1),
+ MUX(CLK_MOUT_DLL_USER, "mout_dll_user", mout_dll_user_p,
+ PLL_CON0_MUX_DLL_USER, 4, 1),
+ MUX(CLK_MOUT_CLKCMU_CHUB_BUS, "mout_clkcmu_chub_bus",
+ mout_clkcmu_chub_bus_p, CLK_CON_MUX_MUX_CLKCMU_CHUB_BUS, 0, 1),
+ MUX(CLK_MOUT_APM_BUS, "mout_apm_bus", mout_apm_bus_p,
+ CLK_CON_MUX_MUX_CLK_APM_BUS, 0, 2),
+ MUX(CLK_MOUT_APM_I3C, "mout_apm_i3c", mout_apm_i3c_p,
+ CLK_CON_MUX_MUX_CLK_APM_I3C, 0, 1),
+};
+
+static const struct samsung_div_clock apm_div_clks[] __initconst = {
+ DIV(CLK_DOUT_CLKCMU_CHUB_BUS, "dout_clkcmu_chub_bus",
+ "gout_clkcmu_chub_bus",
+ CLK_CON_DIV_CLKCMU_CHUB_BUS, 0, 3),
+ DIV(CLK_DOUT_APM_BUS, "dout_apm_bus", "mout_apm_bus",
+ CLK_CON_DIV_DIV_CLK_APM_BUS, 0, 3),
+ DIV(CLK_DOUT_APM_I3C, "dout_apm_i3c", "mout_apm_bus",
+ CLK_CON_DIV_DIV_CLK_APM_I3C, 0, 3),
+};
+
+static const struct samsung_gate_clock apm_gate_clks[] __initconst = {
+ GATE(CLK_GOUT_CLKCMU_CMGP_BUS, "gout_clkcmu_cmgp_bus", "dout_apm_bus",
+ CLK_CON_GAT_CLKCMU_CMGP_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_CLKCMU_CHUB_BUS, "gout_clkcmu_chub_bus",
+ "mout_clkcmu_chub_bus",
+ CLK_CON_GAT_GATE_CLKCMU_CHUB_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_RTC_PCLK, "gout_rtc_pclk", "dout_apm_bus",
+ CLK_CON_GAT_GOUT_APM_APBIF_RTC_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_TOP_RTC_PCLK, "gout_top_rtc_pclk", "dout_apm_bus",
+ CLK_CON_GAT_GOUT_APM_APBIF_TOP_RTC_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_I3C_PCLK, "gout_i3c_pclk", "dout_apm_bus",
+ CLK_CON_GAT_GOUT_APM_I3C_APM_PMIC_I_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_I3C_SCLK, "gout_i3c_sclk", "mout_apm_i3c",
+ CLK_CON_GAT_GOUT_APM_I3C_APM_PMIC_I_SCLK, 21, 0, 0),
+ GATE(CLK_GOUT_SPEEDY_PCLK, "gout_speedy_pclk", "dout_apm_bus",
+ CLK_CON_GAT_GOUT_APM_SPEEDY_APM_PCLK, 21, 0, 0),
+ /* TODO: Should be enabled in GPIO driver (or made CLK_IS_CRITICAL) */
+ GATE(CLK_GOUT_GPIO_ALIVE_PCLK, "gout_gpio_alive_pclk", "dout_apm_bus",
+ CLK_CON_GAT_GOUT_APM_APBIF_GPIO_ALIVE_PCLK, 21, CLK_IGNORE_UNUSED,
+ 0),
+ GATE(CLK_GOUT_PMU_ALIVE_PCLK, "gout_pmu_alive_pclk", "dout_apm_bus",
+ CLK_CON_GAT_GOUT_APM_APBIF_PMU_ALIVE_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_SYSREG_APM_PCLK, "gout_sysreg_apm_pclk", "dout_apm_bus",
+ CLK_CON_GAT_GOUT_APM_SYSREG_APM_PCLK, 21, 0, 0),
+};
+
+static const struct samsung_cmu_info apm_cmu_info __initconst = {
+ .mux_clks = apm_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(apm_mux_clks),
+ .div_clks = apm_div_clks,
+ .nr_div_clks = ARRAY_SIZE(apm_div_clks),
+ .gate_clks = apm_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(apm_gate_clks),
+ .fixed_clks = apm_fixed_clks,
+ .nr_fixed_clks = ARRAY_SIZE(apm_fixed_clks),
+ .nr_clk_ids = APM_NR_CLK,
+ .clk_regs = apm_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(apm_clk_regs),
+ .clk_name = "dout_clkcmu_apm_bus",
+};
+
+/* ---- CMU_CMGP ------------------------------------------------------------ */
+
+/* Register Offset definitions for CMU_CMGP (0x11c00000) */
+#define CLK_CON_MUX_CLK_CMGP_ADC 0x1000
+#define CLK_CON_MUX_MUX_CLK_CMGP_USI_CMGP0 0x1004
+#define CLK_CON_MUX_MUX_CLK_CMGP_USI_CMGP1 0x1008
+#define CLK_CON_DIV_DIV_CLK_CMGP_ADC 0x1800
+#define CLK_CON_DIV_DIV_CLK_CMGP_USI_CMGP0 0x1804
+#define CLK_CON_DIV_DIV_CLK_CMGP_USI_CMGP1 0x1808
+#define CLK_CON_GAT_GOUT_CMGP_ADC_PCLK_S0 0x200c
+#define CLK_CON_GAT_GOUT_CMGP_ADC_PCLK_S1 0x2010
+#define CLK_CON_GAT_GOUT_CMGP_GPIO_PCLK 0x2018
+#define CLK_CON_GAT_GOUT_CMGP_SYSREG_CMGP_PCLK 0x2040
+#define CLK_CON_GAT_GOUT_CMGP_USI_CMGP0_IPCLK 0x2044
+#define CLK_CON_GAT_GOUT_CMGP_USI_CMGP0_PCLK 0x2048
+#define CLK_CON_GAT_GOUT_CMGP_USI_CMGP1_IPCLK 0x204c
+#define CLK_CON_GAT_GOUT_CMGP_USI_CMGP1_PCLK 0x2050
+
+static const unsigned long cmgp_clk_regs[] __initconst = {
+ CLK_CON_MUX_CLK_CMGP_ADC,
+ CLK_CON_MUX_MUX_CLK_CMGP_USI_CMGP0,
+ CLK_CON_MUX_MUX_CLK_CMGP_USI_CMGP1,
+ CLK_CON_DIV_DIV_CLK_CMGP_ADC,
+ CLK_CON_DIV_DIV_CLK_CMGP_USI_CMGP0,
+ CLK_CON_DIV_DIV_CLK_CMGP_USI_CMGP1,
+ CLK_CON_GAT_GOUT_CMGP_ADC_PCLK_S0,
+ CLK_CON_GAT_GOUT_CMGP_ADC_PCLK_S1,
+ CLK_CON_GAT_GOUT_CMGP_GPIO_PCLK,
+ CLK_CON_GAT_GOUT_CMGP_SYSREG_CMGP_PCLK,
+ CLK_CON_GAT_GOUT_CMGP_USI_CMGP0_IPCLK,
+ CLK_CON_GAT_GOUT_CMGP_USI_CMGP0_PCLK,
+ CLK_CON_GAT_GOUT_CMGP_USI_CMGP1_IPCLK,
+ CLK_CON_GAT_GOUT_CMGP_USI_CMGP1_PCLK,
+};
+
+/* List of parent clocks for Muxes in CMU_CMGP */
+PNAME(mout_cmgp_usi0_p) = { "clk_rco_cmgp", "gout_clkcmu_cmgp_bus" };
+PNAME(mout_cmgp_usi1_p) = { "clk_rco_cmgp", "gout_clkcmu_cmgp_bus" };
+PNAME(mout_cmgp_adc_p) = { "oscclk", "dout_cmgp_adc" };
+
+static const struct samsung_fixed_rate_clock cmgp_fixed_clks[] __initconst = {
+ FRATE(CLK_RCO_CMGP, "clk_rco_cmgp", NULL, 0, 49152000),
+};
+
+static const struct samsung_mux_clock cmgp_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_CMGP_ADC, "mout_cmgp_adc", mout_cmgp_adc_p,
+ CLK_CON_MUX_CLK_CMGP_ADC, 0, 1),
+ MUX(CLK_MOUT_CMGP_USI0, "mout_cmgp_usi0", mout_cmgp_usi0_p,
+ CLK_CON_MUX_MUX_CLK_CMGP_USI_CMGP0, 0, 1),
+ MUX(CLK_MOUT_CMGP_USI1, "mout_cmgp_usi1", mout_cmgp_usi1_p,
+ CLK_CON_MUX_MUX_CLK_CMGP_USI_CMGP1, 0, 1),
+};
+
+static const struct samsung_div_clock cmgp_div_clks[] __initconst = {
+ DIV(CLK_DOUT_CMGP_ADC, "dout_cmgp_adc", "gout_clkcmu_cmgp_bus",
+ CLK_CON_DIV_DIV_CLK_CMGP_ADC, 0, 4),
+ DIV(CLK_DOUT_CMGP_USI0, "dout_cmgp_usi0", "mout_cmgp_usi0",
+ CLK_CON_DIV_DIV_CLK_CMGP_USI_CMGP0, 0, 5),
+ DIV(CLK_DOUT_CMGP_USI1, "dout_cmgp_usi1", "mout_cmgp_usi1",
+ CLK_CON_DIV_DIV_CLK_CMGP_USI_CMGP1, 0, 5),
+};
+
+static const struct samsung_gate_clock cmgp_gate_clks[] __initconst = {
+ GATE(CLK_GOUT_CMGP_ADC_S0_PCLK, "gout_adc_s0_pclk",
+ "gout_clkcmu_cmgp_bus",
+ CLK_CON_GAT_GOUT_CMGP_ADC_PCLK_S0, 21, 0, 0),
+ GATE(CLK_GOUT_CMGP_ADC_S1_PCLK, "gout_adc_s1_pclk",
+ "gout_clkcmu_cmgp_bus",
+ CLK_CON_GAT_GOUT_CMGP_ADC_PCLK_S1, 21, 0, 0),
+ /* TODO: Should be enabled in GPIO driver (or made CLK_IS_CRITICAL) */
+ GATE(CLK_GOUT_CMGP_GPIO_PCLK, "gout_gpio_cmgp_pclk",
+ "gout_clkcmu_cmgp_bus",
+ CLK_CON_GAT_GOUT_CMGP_GPIO_PCLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_CMGP_USI0_IPCLK, "gout_cmgp_usi0_ipclk", "dout_cmgp_usi0",
+ CLK_CON_GAT_GOUT_CMGP_USI_CMGP0_IPCLK, 21, 0, 0),
+ GATE(CLK_GOUT_CMGP_USI0_PCLK, "gout_cmgp_usi0_pclk",
+ "gout_clkcmu_cmgp_bus",
+ CLK_CON_GAT_GOUT_CMGP_USI_CMGP0_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_CMGP_USI1_IPCLK, "gout_cmgp_usi1_ipclk", "dout_cmgp_usi1",
+ CLK_CON_GAT_GOUT_CMGP_USI_CMGP1_IPCLK, 21, 0, 0),
+ GATE(CLK_GOUT_CMGP_USI1_PCLK, "gout_cmgp_usi1_pclk",
+ "gout_clkcmu_cmgp_bus",
+ CLK_CON_GAT_GOUT_CMGP_USI_CMGP1_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_SYSREG_CMGP_PCLK, "gout_sysreg_cmgp_pclk",
+ "gout_clkcmu_cmgp_bus",
+ CLK_CON_GAT_GOUT_CMGP_SYSREG_CMGP_PCLK, 21, 0, 0),
+};
+
+static const struct samsung_cmu_info cmgp_cmu_info __initconst = {
+ .mux_clks = cmgp_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(cmgp_mux_clks),
+ .div_clks = cmgp_div_clks,
+ .nr_div_clks = ARRAY_SIZE(cmgp_div_clks),
+ .gate_clks = cmgp_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(cmgp_gate_clks),
+ .fixed_clks = cmgp_fixed_clks,
+ .nr_fixed_clks = ARRAY_SIZE(cmgp_fixed_clks),
+ .nr_clk_ids = CMGP_NR_CLK,
+ .clk_regs = cmgp_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(cmgp_clk_regs),
+ .clk_name = "gout_clkcmu_cmgp_bus",
+};
+
/* ---- CMU_HSI ------------------------------------------------------------- */
/* Register Offset definitions for CMU_HSI (0x13400000) */
@@ -413,8 +625,9 @@ static const struct samsung_gate_clock hsi_gate_clks[] __initconst = {
CLK_CON_GAT_HSI_USB20DRD_TOP_I_REF_CLK_50, 21, 0, 0),
GATE(CLK_GOUT_USB_PHY_REF_CLK, "gout_usb_phy_ref", "oscclk",
CLK_CON_GAT_HSI_USB20DRD_TOP_I_PHY_REFCLK_26, 21, 0, 0),
+ /* TODO: Should be enabled in GPIO driver (or made CLK_IS_CRITICAL) */
GATE(CLK_GOUT_GPIO_HSI_PCLK, "gout_gpio_hsi_pclk", "mout_hsi_bus_user",
- CLK_CON_GAT_GOUT_HSI_GPIO_HSI_PCLK, 21, 0, 0),
+ CLK_CON_GAT_GOUT_HSI_GPIO_HSI_PCLK, 21, CLK_IGNORE_UNUSED, 0),
GATE(CLK_GOUT_MMC_CARD_ACLK, "gout_mmc_card_aclk", "mout_hsi_bus_user",
CLK_CON_GAT_GOUT_HSI_MMC_CARD_I_ACLK, 21, 0, 0),
GATE(CLK_GOUT_MMC_CARD_SDCLKIN, "gout_mmc_card_sdclkin",
@@ -597,9 +810,10 @@ static const struct samsung_gate_clock peri_gate_clks[] __initconst = {
CLK_CON_GAT_GOUT_PERI_WDT_0_PCLK, 21, 0, 0),
GATE(CLK_GOUT_WDT1_PCLK, "gout_wdt1_pclk", "mout_peri_bus_user",
CLK_CON_GAT_GOUT_PERI_WDT_1_PCLK, 21, 0, 0),
+ /* TODO: Should be enabled in GPIO driver (or made CLK_IS_CRITICAL) */
GATE(CLK_GOUT_GPIO_PERI_PCLK, "gout_gpio_peri_pclk",
"mout_peri_bus_user",
- CLK_CON_GAT_GOUT_PERI_GPIO_PERI_PCLK, 21, 0, 0),
+ CLK_CON_GAT_GOUT_PERI_GPIO_PERI_PCLK, 21, CLK_IGNORE_UNUSED, 0),
};
static const struct samsung_cmu_info peri_cmu_info __initconst = {
@@ -615,6 +829,15 @@ static const struct samsung_cmu_info peri_cmu_info __initconst = {
.clk_name = "dout_peri_bus",
};
+static void __init exynos850_cmu_peri_init(struct device_node *np)
+{
+ exynos_arm64_register_cmu(NULL, np, &peri_cmu_info);
+}
+
+/* Register CMU_PERI early, as it's needed for MCT timer */
+CLK_OF_DECLARE(exynos850_cmu_peri, "samsung,exynos850-cmu-peri",
+ exynos850_cmu_peri_init);
+
/* ---- CMU_CORE ------------------------------------------------------------ */
/* Register Offset definitions for CMU_CORE (0x12000000) */
@@ -626,10 +849,12 @@ static const struct samsung_cmu_info peri_cmu_info __initconst = {
#define CLK_CON_DIV_DIV_CLK_CORE_BUSP 0x1800
#define CLK_CON_GAT_GOUT_CORE_CCI_550_ACLK 0x2038
#define CLK_CON_GAT_GOUT_CORE_GIC_CLK 0x2040
+#define CLK_CON_GAT_GOUT_CORE_GPIO_CORE_PCLK 0x2044
#define CLK_CON_GAT_GOUT_CORE_MMC_EMBD_I_ACLK 0x20e8
#define CLK_CON_GAT_GOUT_CORE_MMC_EMBD_SDCLKIN 0x20ec
#define CLK_CON_GAT_GOUT_CORE_SSS_I_ACLK 0x2128
#define CLK_CON_GAT_GOUT_CORE_SSS_I_PCLK 0x212c
+#define CLK_CON_GAT_GOUT_CORE_SYSREG_CORE_PCLK 0x2130
static const unsigned long core_clk_regs[] __initconst = {
PLL_CON0_MUX_CLKCMU_CORE_BUS_USER,
@@ -640,10 +865,12 @@ static const unsigned long core_clk_regs[] __initconst = {
CLK_CON_DIV_DIV_CLK_CORE_BUSP,
CLK_CON_GAT_GOUT_CORE_CCI_550_ACLK,
CLK_CON_GAT_GOUT_CORE_GIC_CLK,
+ CLK_CON_GAT_GOUT_CORE_GPIO_CORE_PCLK,
CLK_CON_GAT_GOUT_CORE_MMC_EMBD_I_ACLK,
CLK_CON_GAT_GOUT_CORE_MMC_EMBD_SDCLKIN,
CLK_CON_GAT_GOUT_CORE_SSS_I_ACLK,
CLK_CON_GAT_GOUT_CORE_SSS_I_PCLK,
+ CLK_CON_GAT_GOUT_CORE_SYSREG_CORE_PCLK,
};
/* List of parent clocks for Muxes in CMU_CORE */
@@ -673,10 +900,12 @@ static const struct samsung_div_clock core_div_clks[] __initconst = {
};
static const struct samsung_gate_clock core_gate_clks[] __initconst = {
+ /* CCI (interconnect) clock must be always running */
GATE(CLK_GOUT_CCI_ACLK, "gout_cci_aclk", "mout_core_cci_user",
- CLK_CON_GAT_GOUT_CORE_CCI_550_ACLK, 21, 0, 0),
+ CLK_CON_GAT_GOUT_CORE_CCI_550_ACLK, 21, CLK_IS_CRITICAL, 0),
+ /* GIC (interrupt controller) clock must be always running */
GATE(CLK_GOUT_GIC_CLK, "gout_gic_clk", "mout_core_gic",
- CLK_CON_GAT_GOUT_CORE_GIC_CLK, 21, 0, 0),
+ CLK_CON_GAT_GOUT_CORE_GIC_CLK, 21, CLK_IS_CRITICAL, 0),
GATE(CLK_GOUT_MMC_EMBD_ACLK, "gout_mmc_embd_aclk", "dout_core_busp",
CLK_CON_GAT_GOUT_CORE_MMC_EMBD_I_ACLK, 21, 0, 0),
GATE(CLK_GOUT_MMC_EMBD_SDCLKIN, "gout_mmc_embd_sdclkin",
@@ -686,6 +915,12 @@ static const struct samsung_gate_clock core_gate_clks[] __initconst = {
CLK_CON_GAT_GOUT_CORE_SSS_I_ACLK, 21, 0, 0),
GATE(CLK_GOUT_SSS_PCLK, "gout_sss_pclk", "dout_core_busp",
CLK_CON_GAT_GOUT_CORE_SSS_I_PCLK, 21, 0, 0),
+ /* TODO: Should be enabled in GPIO driver (or made CLK_IS_CRITICAL) */
+ GATE(CLK_GOUT_GPIO_CORE_PCLK, "gout_gpio_core_pclk", "dout_core_busp",
+ CLK_CON_GAT_GOUT_CORE_GPIO_CORE_PCLK, 21, CLK_IGNORE_UNUSED, 0),
+ GATE(CLK_GOUT_SYSREG_CORE_PCLK, "gout_sysreg_core_pclk",
+ "dout_core_busp",
+ CLK_CON_GAT_GOUT_CORE_SYSREG_CORE_PCLK, 21, 0, 0),
};
static const struct samsung_cmu_info core_cmu_info __initconst = {
@@ -742,8 +977,10 @@ static const struct samsung_div_clock dpu_div_clks[] __initconst = {
};
static const struct samsung_gate_clock dpu_gate_clks[] __initconst = {
+ /* TODO: Should be enabled in DSIM driver */
GATE(CLK_GOUT_DPU_CMU_DPU_PCLK, "gout_dpu_cmu_dpu_pclk",
- "dout_dpu_busp", CLK_CON_GAT_CLK_DPU_CMU_DPU_PCLK, 21, 0, 0),
+ "dout_dpu_busp",
+ CLK_CON_GAT_CLK_DPU_CMU_DPU_PCLK, 21, CLK_IGNORE_UNUSED, 0),
GATE(CLK_GOUT_DPU_DECON0_ACLK, "gout_dpu_decon0_aclk", "mout_dpu_user",
CLK_CON_GAT_GOUT_DPU_ACLK_DECON0, 21, 0, 0),
GATE(CLK_GOUT_DPU_DMA_ACLK, "gout_dpu_dma_aclk", "mout_dpu_user",
@@ -779,37 +1016,24 @@ static int __init exynos850_cmu_probe(struct platform_device *pdev)
{
const struct samsung_cmu_info *info;
struct device *dev = &pdev->dev;
- struct device_node *np = dev->of_node;
info = of_device_get_match_data(dev);
- exynos850_init_clocks(np, info->clk_regs, info->nr_clk_regs);
- samsung_cmu_register_one(np, info);
-
- /* Keep bus clock running, so it's possible to access CMU registers */
- if (info->clk_name) {
- struct clk *bus_clk;
-
- bus_clk = clk_get(dev, info->clk_name);
- if (IS_ERR(bus_clk)) {
- pr_err("%s: could not find bus clock %s; err = %ld\n",
- __func__, info->clk_name, PTR_ERR(bus_clk));
- } else {
- clk_prepare_enable(bus_clk);
- }
- }
+ exynos_arm64_register_cmu(dev, dev->of_node, info);
return 0;
}
-/* CMUs which belong to Power Domains and need runtime PM to be implemented */
static const struct of_device_id exynos850_cmu_of_match[] = {
{
+ .compatible = "samsung,exynos850-cmu-apm",
+ .data = &apm_cmu_info,
+ }, {
+ .compatible = "samsung,exynos850-cmu-cmgp",
+ .data = &cmgp_cmu_info,
+ }, {
.compatible = "samsung,exynos850-cmu-hsi",
.data = &hsi_cmu_info,
}, {
- .compatible = "samsung,exynos850-cmu-peri",
- .data = &peri_cmu_info,
- }, {
.compatible = "samsung,exynos850-cmu-core",
.data = &core_cmu_info,
}, {
diff --git a/drivers/clk/samsung/clk-pll.c b/drivers/clk/samsung/clk-pll.c
index 83d1b03647db..70cdc87f714e 100644
--- a/drivers/clk/samsung/clk-pll.c
+++ b/drivers/clk/samsung/clk-pll.c
@@ -1476,6 +1476,7 @@ static void __init _samsung_clk_register_pll(struct samsung_clk_provider *ctx,
else
init.ops = &samsung_pll35xx_clk_ops;
break;
+ case pll_1417x:
case pll_0822x:
pll->enable_offs = PLL0822X_ENABLE_SHIFT;
pll->lock_offs = PLL0822X_LOCK_STAT_SHIFT;
diff --git a/drivers/clk/samsung/clk-pll.h b/drivers/clk/samsung/clk-pll.h
index a739f2b7ae80..c83a20195f6d 100644
--- a/drivers/clk/samsung/clk-pll.h
+++ b/drivers/clk/samsung/clk-pll.h
@@ -32,6 +32,7 @@ enum samsung_pll_type {
pll_2550xx,
pll_2650x,
pll_2650xx,
+ pll_1417x,
pll_1450x,
pll_1451x,
pll_1452x,
diff --git a/drivers/clk/samsung/clk-s3c2410.c b/drivers/clk/samsung/clk-s3c2410.c
index 5831d0606077..3d152a46169b 100644
--- a/drivers/clk/samsung/clk-s3c2410.c
+++ b/drivers/clk/samsung/clk-s3c2410.c
@@ -323,6 +323,7 @@ void __init s3c2410_common_clk_init(struct device_node *np, unsigned long xti_f,
void __iomem *base)
{
struct samsung_clk_provider *ctx;
+ struct clk_hw **hws;
reg_base = base;
if (np) {
@@ -332,13 +333,14 @@ void __init s3c2410_common_clk_init(struct device_node *np, unsigned long xti_f,
}
ctx = samsung_clk_init(np, reg_base, NR_CLKS);
+ hws = ctx->clk_data.hws;
/* Register external clocks only in non-dt cases */
if (!np)
s3c2410_common_clk_register_fixed_ext(ctx, xti_f);
if (current_soc == S3C2410) {
- if (_get_rate("xti") == 12 * MHZ) {
+ if (clk_hw_get_rate(hws[XTI]) == 12 * MHZ) {
s3c2410_plls[mpll].rate_table = pll_s3c2410_12mhz_tbl;
s3c2410_plls[upll].rate_table = pll_s3c2410_12mhz_tbl;
}
@@ -348,7 +350,7 @@ void __init s3c2410_common_clk_init(struct device_node *np, unsigned long xti_f,
ARRAY_SIZE(s3c2410_plls), reg_base);
} else { /* S3C2440, S3C2442 */
- if (_get_rate("xti") == 12 * MHZ) {
+ if (clk_hw_get_rate(hws[XTI]) == 12 * MHZ) {
/*
* plls follow different calculation schemes, with the
* upll following the same scheme as the s3c2410 plls
diff --git a/drivers/clk/samsung/clk-s3c64xx.c b/drivers/clk/samsung/clk-s3c64xx.c
index 56f95b63f71f..d6b432a26d63 100644
--- a/drivers/clk/samsung/clk-s3c64xx.c
+++ b/drivers/clk/samsung/clk-s3c64xx.c
@@ -394,6 +394,7 @@ void __init s3c64xx_clk_init(struct device_node *np, unsigned long xtal_f,
void __iomem *base)
{
struct samsung_clk_provider *ctx;
+ struct clk_hw **hws;
reg_base = base;
is_s3c6400 = s3c6400;
@@ -405,6 +406,7 @@ void __init s3c64xx_clk_init(struct device_node *np, unsigned long xtal_f,
}
ctx = samsung_clk_init(np, reg_base, NR_CLKS);
+ hws = ctx->clk_data.hws;
/* Register external clocks. */
if (!np)
@@ -459,8 +461,10 @@ void __init s3c64xx_clk_init(struct device_node *np, unsigned long xtal_f,
pr_info("%s clocks: apll = %lu, mpll = %lu\n"
"\tepll = %lu, arm_clk = %lu\n",
is_s3c6400 ? "S3C6400" : "S3C6410",
- _get_rate("fout_apll"), _get_rate("fout_mpll"),
- _get_rate("fout_epll"), _get_rate("armclk"));
+ clk_hw_get_rate(hws[MOUT_APLL]),
+ clk_hw_get_rate(hws[MOUT_MPLL]),
+ clk_hw_get_rate(hws[MOUT_EPLL]),
+ clk_hw_get_rate(hws[ARMCLK]));
}
static void __init s3c6400_clk_init(struct device_node *np)
diff --git a/drivers/clk/samsung/clk-s5pv210.c b/drivers/clk/samsung/clk-s5pv210.c
index e7b68ffe36de..4425186bdcab 100644
--- a/drivers/clk/samsung/clk-s5pv210.c
+++ b/drivers/clk/samsung/clk-s5pv210.c
@@ -741,8 +741,10 @@ static void __init __s5pv210_clk_init(struct device_node *np,
bool is_s5p6442)
{
struct samsung_clk_provider *ctx;
+ struct clk_hw **hws;
ctx = samsung_clk_init(np, reg_base, NR_CLKS);
+ hws = ctx->clk_data.hws;
samsung_clk_register_mux(ctx, early_mux_clks,
ARRAY_SIZE(early_mux_clks));
@@ -789,8 +791,10 @@ static void __init __s5pv210_clk_init(struct device_node *np,
pr_info("%s clocks: mout_apll = %ld, mout_mpll = %ld\n"
"\tmout_epll = %ld, mout_vpll = %ld\n",
is_s5p6442 ? "S5P6442" : "S5PV210",
- _get_rate("mout_apll"), _get_rate("mout_mpll"),
- _get_rate("mout_epll"), _get_rate("mout_vpll"));
+ clk_hw_get_rate(hws[MOUT_APLL]),
+ clk_hw_get_rate(hws[MOUT_MPLL]),
+ clk_hw_get_rate(hws[MOUT_EPLL]),
+ clk_hw_get_rate(hws[MOUT_VPLL]));
}
static void __init s5pv210_clk_dt_init(struct device_node *np)
diff --git a/drivers/clk/samsung/clk.c b/drivers/clk/samsung/clk.c
index 336243c6f120..bca4731b14ea 100644
--- a/drivers/clk/samsung/clk.c
+++ b/drivers/clk/samsung/clk.c
@@ -268,20 +268,6 @@ void __init samsung_clk_of_register_fixed_ext(struct samsung_clk_provider *ctx,
samsung_clk_register_fixed_rate(ctx, fixed_rate_clk, nr_fixed_rate_clk);
}
-/* utility function to get the rate of a specified clock */
-unsigned long _get_rate(const char *clk_name)
-{
- struct clk *clk;
-
- clk = __clk_lookup(clk_name);
- if (!clk) {
- pr_err("%s: could not find clock %s\n", __func__, clk_name);
- return 0;
- }
-
- return clk_get_rate(clk);
-}
-
#ifdef CONFIG_PM_SLEEP
static int samsung_clk_suspend(void)
{
diff --git a/drivers/clk/samsung/clk.h b/drivers/clk/samsung/clk.h
index 26499e97275b..b46e83a2581f 100644
--- a/drivers/clk/samsung/clk.h
+++ b/drivers/clk/samsung/clk.h
@@ -337,54 +337,52 @@ struct samsung_cmu_info {
const char *clk_name;
};
-extern struct samsung_clk_provider *__init samsung_clk_init(
+struct samsung_clk_provider * samsung_clk_init(
struct device_node *np, void __iomem *base,
unsigned long nr_clks);
-extern void __init samsung_clk_of_add_provider(struct device_node *np,
+void samsung_clk_of_add_provider(struct device_node *np,
struct samsung_clk_provider *ctx);
-extern void __init samsung_clk_of_register_fixed_ext(
+void samsung_clk_of_register_fixed_ext(
struct samsung_clk_provider *ctx,
struct samsung_fixed_rate_clock *fixed_rate_clk,
unsigned int nr_fixed_rate_clk,
const struct of_device_id *clk_matches);
-extern void samsung_clk_add_lookup(struct samsung_clk_provider *ctx,
+void samsung_clk_add_lookup(struct samsung_clk_provider *ctx,
struct clk_hw *clk_hw, unsigned int id);
-extern void __init samsung_clk_register_alias(struct samsung_clk_provider *ctx,
+void samsung_clk_register_alias(struct samsung_clk_provider *ctx,
const struct samsung_clock_alias *list,
unsigned int nr_clk);
-extern void __init samsung_clk_register_fixed_rate(
+void samsung_clk_register_fixed_rate(
struct samsung_clk_provider *ctx,
const struct samsung_fixed_rate_clock *clk_list,
unsigned int nr_clk);
-extern void __init samsung_clk_register_fixed_factor(
+void samsung_clk_register_fixed_factor(
struct samsung_clk_provider *ctx,
const struct samsung_fixed_factor_clock *list,
unsigned int nr_clk);
-extern void __init samsung_clk_register_mux(struct samsung_clk_provider *ctx,
+void samsung_clk_register_mux(struct samsung_clk_provider *ctx,
const struct samsung_mux_clock *clk_list,
unsigned int nr_clk);
-extern void __init samsung_clk_register_div(struct samsung_clk_provider *ctx,
+void samsung_clk_register_div(struct samsung_clk_provider *ctx,
const struct samsung_div_clock *clk_list,
unsigned int nr_clk);
-extern void __init samsung_clk_register_gate(struct samsung_clk_provider *ctx,
+void samsung_clk_register_gate(struct samsung_clk_provider *ctx,
const struct samsung_gate_clock *clk_list,
unsigned int nr_clk);
-extern void __init samsung_clk_register_pll(struct samsung_clk_provider *ctx,
+void samsung_clk_register_pll(struct samsung_clk_provider *ctx,
const struct samsung_pll_clock *pll_list,
unsigned int nr_clk, void __iomem *base);
-extern void samsung_clk_register_cpu(struct samsung_clk_provider *ctx,
+void samsung_clk_register_cpu(struct samsung_clk_provider *ctx,
const struct samsung_cpu_clock *list, unsigned int nr_clk);
-extern struct samsung_clk_provider __init *samsung_cmu_register_one(
+struct samsung_clk_provider *samsung_cmu_register_one(
struct device_node *,
const struct samsung_cmu_info *);
-extern unsigned long _get_rate(const char *clk_name);
-
#ifdef CONFIG_PM_SLEEP
-extern void samsung_clk_extended_sleep_init(void __iomem *reg_base,
+void samsung_clk_extended_sleep_init(void __iomem *reg_base,
const unsigned long *rdump,
unsigned long nr_rdump,
const struct samsung_clk_reg_dump *rsuspend,
@@ -399,13 +397,13 @@ static inline void samsung_clk_extended_sleep_init(void __iomem *reg_base,
#define samsung_clk_sleep_init(reg_base, rdump, nr_rdump) \
samsung_clk_extended_sleep_init(reg_base, rdump, nr_rdump, NULL, 0)
-extern void samsung_clk_save(void __iomem *base,
+void samsung_clk_save(void __iomem *base,
struct samsung_clk_reg_dump *rd,
unsigned int num_regs);
-extern void samsung_clk_restore(void __iomem *base,
+void samsung_clk_restore(void __iomem *base,
const struct samsung_clk_reg_dump *rd,
unsigned int num_regs);
-extern struct samsung_clk_reg_dump *samsung_clk_alloc_reg_dump(
+struct samsung_clk_reg_dump *samsung_clk_alloc_reg_dump(
const unsigned long *rdump,
unsigned long nr_rdump);
diff --git a/drivers/clk/socfpga/clk-agilex.c b/drivers/clk/socfpga/clk-agilex.c
index bf8cd928c228..74d21bd82710 100644
--- a/drivers/clk/socfpga/clk-agilex.c
+++ b/drivers/clk/socfpga/clk-agilex.c
@@ -500,12 +500,10 @@ static int n5x_clkmgr_init(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
struct device *dev = &pdev->dev;
struct stratix10_clock_data *clk_data;
- struct resource *res;
void __iomem *base;
int i, num_clks;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/clk/socfpga/clk-gate.c b/drivers/clk/socfpga/clk-gate.c
index 1ec9678d8cd3..53d6e3ec4309 100644
--- a/drivers/clk/socfpga/clk-gate.c
+++ b/drivers/clk/socfpga/clk-gate.c
@@ -34,7 +34,7 @@ static u8 socfpga_clk_get_parent(struct clk_hw *hwclk)
if (streq(name, SOCFPGA_L4_MP_CLK)) {
l4_src = readl(clk_mgr_base_addr + CLKMGR_L4SRC);
- return l4_src &= 0x1;
+ return l4_src & 0x1;
}
if (streq(name, SOCFPGA_L4_SP_CLK)) {
l4_src = readl(clk_mgr_base_addr + CLKMGR_L4SRC);
@@ -43,7 +43,7 @@ static u8 socfpga_clk_get_parent(struct clk_hw *hwclk)
perpll_src = readl(clk_mgr_base_addr + CLKMGR_PERPLL_SRC);
if (streq(name, SOCFPGA_MMC_CLK))
- return perpll_src &= 0x3;
+ return perpll_src & 0x3;
if (streq(name, SOCFPGA_NAND_CLK) ||
streq(name, SOCFPGA_NAND_X_CLK))
return (perpll_src >> 2) & 3;
diff --git a/drivers/clk/socfpga/clk-pll-s10.c b/drivers/clk/socfpga/clk-pll-s10.c
index 70076a80149d..e444e4a0ee53 100644
--- a/drivers/clk/socfpga/clk-pll-s10.c
+++ b/drivers/clk/socfpga/clk-pll-s10.c
@@ -113,7 +113,7 @@ static unsigned long clk_boot_clk_recalc_rate(struct clk_hw *hwclk,
SWCTRLBTCLKSEL_MASK) >>
SWCTRLBTCLKSEL_SHIFT);
div += 1;
- return parent_rate /= div;
+ return parent_rate / div;
}
diff --git a/drivers/clk/socfpga/clk-s10.c b/drivers/clk/socfpga/clk-s10.c
index b532d51faaee..4e508a844b3d 100644
--- a/drivers/clk/socfpga/clk-s10.c
+++ b/drivers/clk/socfpga/clk-s10.c
@@ -388,12 +388,10 @@ static int s10_clkmgr_init(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
struct device *dev = &pdev->dev;
struct stratix10_clock_data *clk_data;
- struct resource *res;
void __iomem *base;
int i, num_clks;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base)) {
pr_err("%s: failed to map clock registers\n", __func__);
return PTR_ERR(base);
diff --git a/drivers/clk/st/clkgen-fsyn.c b/drivers/clk/st/clkgen-fsyn.c
index 164285d6be97..582a22c04919 100644
--- a/drivers/clk/st/clkgen-fsyn.c
+++ b/drivers/clk/st/clkgen-fsyn.c
@@ -988,9 +988,18 @@ static void __init st_of_quadfs_setup(struct device_node *np,
void __iomem *reg;
spinlock_t *lock;
+ /*
+ * First check for reg property within the node to keep backward
+ * compatibility, then if reg doesn't exist look at the parent node
+ */
reg = of_iomap(np, 0);
- if (!reg)
- return;
+ if (!reg) {
+ reg = of_iomap(of_get_parent(np), 0);
+ if (!reg) {
+ pr_err("%s: Failed to get base address\n", __func__);
+ return;
+ }
+ }
clk_parent_name = of_clk_get_parent_name(np, 0);
if (!clk_parent_name)
diff --git a/drivers/clk/st/clkgen-mux.c b/drivers/clk/st/clkgen-mux.c
index ce583ded968a..ee39af7a0b72 100644
--- a/drivers/clk/st/clkgen-mux.c
+++ b/drivers/clk/st/clkgen-mux.c
@@ -57,10 +57,17 @@ static void __init st_of_clkgen_mux_setup(struct device_node *np,
const char **parents;
int num_parents = 0;
+ /*
+ * First check for reg property within the node to keep backward
+ * compatibility, then if reg doesn't exist look at the parent node
+ */
reg = of_iomap(np, 0);
if (!reg) {
- pr_err("%s: Failed to get base address\n", __func__);
- return;
+ reg = of_iomap(of_get_parent(np), 0);
+ if (!reg) {
+ pr_err("%s: Failed to get base address\n", __func__);
+ return;
+ }
}
parents = clkgen_mux_get_parents(np, &num_parents);
diff --git a/drivers/clk/sunxi-ng/Kconfig b/drivers/clk/sunxi-ng/Kconfig
index e76e1676f0f0..68a94e5af8ed 100644
--- a/drivers/clk/sunxi-ng/Kconfig
+++ b/drivers/clk/sunxi-ng/Kconfig
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
config SUNXI_CCU
- bool "Clock support for Allwinner SoCs"
+ tristate "Clock support for Allwinner SoCs"
depends on ARCH_SUNXI || COMPILE_TEST
select RESET_CONTROLLER
default ARCH_SUNXI
@@ -8,42 +8,52 @@ config SUNXI_CCU
if SUNXI_CCU
config SUNIV_F1C100S_CCU
- bool "Support for the Allwinner newer F1C100s CCU"
+ tristate "Support for the Allwinner newer F1C100s CCU"
default MACH_SUNIV
depends on MACH_SUNIV || COMPILE_TEST
+config SUN20I_D1_CCU
+ tristate "Support for the Allwinner D1 CCU"
+ default RISCV && ARCH_SUNXI
+ depends on (RISCV && ARCH_SUNXI) || COMPILE_TEST
+
+config SUN20I_D1_R_CCU
+ tristate "Support for the Allwinner D1 PRCM CCU"
+ default RISCV && ARCH_SUNXI
+ depends on (RISCV && ARCH_SUNXI) || COMPILE_TEST
+
config SUN50I_A64_CCU
- bool "Support for the Allwinner A64 CCU"
+ tristate "Support for the Allwinner A64 CCU"
default ARM64 && ARCH_SUNXI
depends on (ARM64 && ARCH_SUNXI) || COMPILE_TEST
config SUN50I_A100_CCU
- bool "Support for the Allwinner A100 CCU"
+ tristate "Support for the Allwinner A100 CCU"
default ARM64 && ARCH_SUNXI
depends on (ARM64 && ARCH_SUNXI) || COMPILE_TEST
config SUN50I_A100_R_CCU
- bool "Support for the Allwinner A100 PRCM CCU"
+ tristate "Support for the Allwinner A100 PRCM CCU"
default ARM64 && ARCH_SUNXI
depends on (ARM64 && ARCH_SUNXI) || COMPILE_TEST
config SUN50I_H6_CCU
- bool "Support for the Allwinner H6 CCU"
+ tristate "Support for the Allwinner H6 CCU"
default ARM64 && ARCH_SUNXI
depends on (ARM64 && ARCH_SUNXI) || COMPILE_TEST
config SUN50I_H616_CCU
- bool "Support for the Allwinner H616 CCU"
+ tristate "Support for the Allwinner H616 CCU"
default ARM64 && ARCH_SUNXI
depends on (ARM64 && ARCH_SUNXI) || COMPILE_TEST
config SUN50I_H6_R_CCU
- bool "Support for the Allwinner H6 and H616 PRCM CCU"
+ tristate "Support for the Allwinner H6 and H616 PRCM CCU"
default ARM64 && ARCH_SUNXI
depends on (ARM64 && ARCH_SUNXI) || COMPILE_TEST
config SUN4I_A10_CCU
- bool "Support for the Allwinner A10/A20 CCU"
+ tristate "Support for the Allwinner A10/A20 CCU"
default MACH_SUN4I
default MACH_SUN7I
depends on MACH_SUN4I || MACH_SUN7I || COMPILE_TEST
@@ -52,53 +62,54 @@ config SUN5I_CCU
bool "Support for the Allwinner sun5i family CCM"
default MACH_SUN5I
depends on MACH_SUN5I || COMPILE_TEST
+ depends on SUNXI_CCU=y
config SUN6I_A31_CCU
- bool "Support for the Allwinner A31/A31s CCU"
+ tristate "Support for the Allwinner A31/A31s CCU"
default MACH_SUN6I
depends on MACH_SUN6I || COMPILE_TEST
config SUN8I_A23_CCU
- bool "Support for the Allwinner A23 CCU"
+ tristate "Support for the Allwinner A23 CCU"
default MACH_SUN8I
depends on MACH_SUN8I || COMPILE_TEST
config SUN8I_A33_CCU
- bool "Support for the Allwinner A33 CCU"
+ tristate "Support for the Allwinner A33 CCU"
default MACH_SUN8I
depends on MACH_SUN8I || COMPILE_TEST
config SUN8I_A83T_CCU
- bool "Support for the Allwinner A83T CCU"
+ tristate "Support for the Allwinner A83T CCU"
default MACH_SUN8I
depends on MACH_SUN8I || COMPILE_TEST
config SUN8I_H3_CCU
- bool "Support for the Allwinner H3 CCU"
+ tristate "Support for the Allwinner H3 CCU"
default MACH_SUN8I || (ARM64 && ARCH_SUNXI)
depends on MACH_SUN8I || (ARM64 && ARCH_SUNXI) || COMPILE_TEST
config SUN8I_V3S_CCU
- bool "Support for the Allwinner V3s CCU"
+ tristate "Support for the Allwinner V3s CCU"
default MACH_SUN8I
depends on MACH_SUN8I || COMPILE_TEST
config SUN8I_DE2_CCU
- bool "Support for the Allwinner SoCs DE2 CCU"
+ tristate "Support for the Allwinner SoCs DE2 CCU"
default MACH_SUN8I || (ARM64 && ARCH_SUNXI)
config SUN8I_R40_CCU
- bool "Support for the Allwinner R40 CCU"
+ tristate "Support for the Allwinner R40 CCU"
default MACH_SUN8I
depends on MACH_SUN8I || COMPILE_TEST
config SUN9I_A80_CCU
- bool "Support for the Allwinner A80 CCU"
+ tristate "Support for the Allwinner A80 CCU"
default MACH_SUN9I
depends on MACH_SUN9I || COMPILE_TEST
config SUN8I_R_CCU
- bool "Support for Allwinner SoCs' PRCM CCUs"
+ tristate "Support for Allwinner SoCs' PRCM CCUs"
default MACH_SUN8I || (ARCH_SUNXI && ARM64)
endif
diff --git a/drivers/clk/sunxi-ng/Makefile b/drivers/clk/sunxi-ng/Makefile
index 96c324306d97..ec931cb7aa14 100644
--- a/drivers/clk/sunxi-ng/Makefile
+++ b/drivers/clk/sunxi-ng/Makefile
@@ -1,44 +1,73 @@
# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_SUNXI_CCU) += sunxi-ccu.o
+
# Common objects
-obj-y += ccu_common.o
-obj-y += ccu_mmc_timing.o
-obj-y += ccu_reset.o
+sunxi-ccu-y += ccu_common.o
+sunxi-ccu-y += ccu_mmc_timing.o
+sunxi-ccu-y += ccu_reset.o
# Base clock types
-obj-y += ccu_div.o
-obj-y += ccu_frac.o
-obj-y += ccu_gate.o
-obj-y += ccu_mux.o
-obj-y += ccu_mult.o
-obj-y += ccu_phase.o
-obj-y += ccu_sdm.o
+sunxi-ccu-y += ccu_div.o
+sunxi-ccu-y += ccu_frac.o
+sunxi-ccu-y += ccu_gate.o
+sunxi-ccu-y += ccu_mux.o
+sunxi-ccu-y += ccu_mult.o
+sunxi-ccu-y += ccu_phase.o
+sunxi-ccu-y += ccu_sdm.o
# Multi-factor clocks
-obj-y += ccu_nk.o
-obj-y += ccu_nkm.o
-obj-y += ccu_nkmp.o
-obj-y += ccu_nm.o
-obj-y += ccu_mp.o
+sunxi-ccu-y += ccu_nk.o
+sunxi-ccu-y += ccu_nkm.o
+sunxi-ccu-y += ccu_nkmp.o
+sunxi-ccu-y += ccu_nm.o
+sunxi-ccu-y += ccu_mp.o
# SoC support
-obj-$(CONFIG_SUNIV_F1C100S_CCU) += ccu-suniv-f1c100s.o
-obj-$(CONFIG_SUN50I_A64_CCU) += ccu-sun50i-a64.o
-obj-$(CONFIG_SUN50I_A100_CCU) += ccu-sun50i-a100.o
-obj-$(CONFIG_SUN50I_A100_R_CCU) += ccu-sun50i-a100-r.o
-obj-$(CONFIG_SUN50I_H6_CCU) += ccu-sun50i-h6.o
-obj-$(CONFIG_SUN50I_H616_CCU) += ccu-sun50i-h616.o
-obj-$(CONFIG_SUN50I_H6_R_CCU) += ccu-sun50i-h6-r.o
-obj-$(CONFIG_SUN4I_A10_CCU) += ccu-sun4i-a10.o
-obj-$(CONFIG_SUN5I_CCU) += ccu-sun5i.o
-obj-$(CONFIG_SUN6I_A31_CCU) += ccu-sun6i-a31.o
-obj-$(CONFIG_SUN8I_A23_CCU) += ccu-sun8i-a23.o
-obj-$(CONFIG_SUN8I_A33_CCU) += ccu-sun8i-a33.o
-obj-$(CONFIG_SUN8I_A83T_CCU) += ccu-sun8i-a83t.o
-obj-$(CONFIG_SUN8I_H3_CCU) += ccu-sun8i-h3.o
-obj-$(CONFIG_SUN8I_V3S_CCU) += ccu-sun8i-v3s.o
-obj-$(CONFIG_SUN8I_DE2_CCU) += ccu-sun8i-de2.o
-obj-$(CONFIG_SUN8I_R_CCU) += ccu-sun8i-r.o
-obj-$(CONFIG_SUN8I_R40_CCU) += ccu-sun8i-r40.o
-obj-$(CONFIG_SUN9I_A80_CCU) += ccu-sun9i-a80.o
-obj-$(CONFIG_SUN9I_A80_CCU) += ccu-sun9i-a80-de.o
-obj-$(CONFIG_SUN9I_A80_CCU) += ccu-sun9i-a80-usb.o
+obj-$(CONFIG_SUNIV_F1C100S_CCU) += suniv-f1c100s-ccu.o
+obj-$(CONFIG_SUN20I_D1_CCU) += sun20i-d1-ccu.o
+obj-$(CONFIG_SUN20I_D1_R_CCU) += sun20i-d1-r-ccu.o
+obj-$(CONFIG_SUN50I_A64_CCU) += sun50i-a64-ccu.o
+obj-$(CONFIG_SUN50I_A100_CCU) += sun50i-a100-ccu.o
+obj-$(CONFIG_SUN50I_A100_R_CCU) += sun50i-a100-r-ccu.o
+obj-$(CONFIG_SUN50I_H6_CCU) += sun50i-h6-ccu.o
+obj-$(CONFIG_SUN50I_H6_R_CCU) += sun50i-h6-r-ccu.o
+obj-$(CONFIG_SUN50I_H616_CCU) += sun50i-h616-ccu.o
+obj-$(CONFIG_SUN4I_A10_CCU) += sun4i-a10-ccu.o
+obj-$(CONFIG_SUN5I_CCU) += sun5i-ccu.o
+obj-$(CONFIG_SUN6I_A31_CCU) += sun6i-a31-ccu.o
+obj-$(CONFIG_SUN8I_A23_CCU) += sun8i-a23-ccu.o
+obj-$(CONFIG_SUN8I_A33_CCU) += sun8i-a33-ccu.o
+obj-$(CONFIG_SUN8I_A83T_CCU) += sun8i-a83t-ccu.o
+obj-$(CONFIG_SUN8I_H3_CCU) += sun8i-h3-ccu.o
+obj-$(CONFIG_SUN8I_R40_CCU) += sun8i-r40-ccu.o
+obj-$(CONFIG_SUN8I_V3S_CCU) += sun8i-v3s-ccu.o
+obj-$(CONFIG_SUN8I_DE2_CCU) += sun8i-de2-ccu.o
+obj-$(CONFIG_SUN8I_R_CCU) += sun8i-r-ccu.o
+obj-$(CONFIG_SUN9I_A80_CCU) += sun9i-a80-ccu.o
+obj-$(CONFIG_SUN9I_A80_CCU) += sun9i-a80-de-ccu.o
+obj-$(CONFIG_SUN9I_A80_CCU) += sun9i-a80-usb-ccu.o
+
+suniv-f1c100s-ccu-y += ccu-suniv-f1c100s.o
+sun20i-d1-ccu-y += ccu-sun20i-d1.o
+sun20i-d1-r-ccu-y += ccu-sun20i-d1-r.o
+sun50i-a64-ccu-y += ccu-sun50i-a64.o
+sun50i-a100-ccu-y += ccu-sun50i-a100.o
+sun50i-a100-r-ccu-y += ccu-sun50i-a100-r.o
+sun50i-h6-ccu-y += ccu-sun50i-h6.o
+sun50i-h6-r-ccu-y += ccu-sun50i-h6-r.o
+sun50i-h616-ccu-y += ccu-sun50i-h616.o
+sun4i-a10-ccu-y += ccu-sun4i-a10.o
+sun5i-ccu-y += ccu-sun5i.o
+sun6i-a31-ccu-y += ccu-sun6i-a31.o
+sun8i-a23-ccu-y += ccu-sun8i-a23.o
+sun8i-a33-ccu-y += ccu-sun8i-a33.o
+sun8i-a83t-ccu-y += ccu-sun8i-a83t.o
+sun8i-h3-ccu-y += ccu-sun8i-h3.o
+sun8i-r40-ccu-y += ccu-sun8i-r40.o
+sun8i-v3s-ccu-y += ccu-sun8i-v3s.o
+sun8i-de2-ccu-y += ccu-sun8i-de2.o
+sun8i-r-ccu-y += ccu-sun8i-r.o
+sun9i-a80-ccu-y += ccu-sun9i-a80.o
+sun9i-a80-de-ccu-y += ccu-sun9i-a80-de.o
+sun9i-a80-usb-ccu-y += ccu-sun9i-a80-usb.o
diff --git a/drivers/clk/sunxi-ng/ccu-sun20i-d1-r.c b/drivers/clk/sunxi-ng/ccu-sun20i-d1-r.c
new file mode 100644
index 000000000000..9d3ffd3fb2c1
--- /dev/null
+++ b/drivers/clk/sunxi-ng/ccu-sun20i-d1-r.c
@@ -0,0 +1,140 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2020 huangzhenwei@allwinnertech.com
+ * Copyright (C) 2021 Samuel Holland <samuel@sholland.org>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include "ccu_common.h"
+#include "ccu_reset.h"
+
+#include "ccu_gate.h"
+#include "ccu_mp.h"
+
+#include "ccu-sun20i-d1-r.h"
+
+static const struct clk_parent_data r_ahb_apb0_parents[] = {
+ { .fw_name = "hosc" },
+ { .fw_name = "losc" },
+ { .fw_name = "iosc" },
+ { .fw_name = "pll-periph" },
+};
+static SUNXI_CCU_MP_DATA_WITH_MUX(r_ahb_clk, "r-ahb",
+ r_ahb_apb0_parents, 0x000,
+ 0, 5, /* M */
+ 8, 2, /* P */
+ 24, 3, /* mux */
+ 0);
+static const struct clk_hw *r_ahb_hw = &r_ahb_clk.common.hw;
+
+static SUNXI_CCU_MP_DATA_WITH_MUX(r_apb0_clk, "r-apb0",
+ r_ahb_apb0_parents, 0x00c,
+ 0, 5, /* M */
+ 8, 2, /* P */
+ 24, 3, /* mux */
+ 0);
+static const struct clk_hw *r_apb0_hw = &r_apb0_clk.common.hw;
+
+static SUNXI_CCU_GATE_HWS(bus_r_timer_clk, "bus-r-timer", &r_apb0_hw,
+ 0x11c, BIT(0), 0);
+static SUNXI_CCU_GATE_HWS(bus_r_twd_clk, "bus-r-twd", &r_apb0_hw,
+ 0x12c, BIT(0), 0);
+static SUNXI_CCU_GATE_HWS(bus_r_ppu_clk, "bus-r-ppu", &r_apb0_hw,
+ 0x1ac, BIT(0), 0);
+
+static const struct clk_parent_data r_ir_rx_parents[] = {
+ { .fw_name = "losc" },
+ { .fw_name = "hosc" },
+};
+static SUNXI_CCU_MP_DATA_WITH_MUX_GATE(r_ir_rx_clk, "r-ir-rx",
+ r_ir_rx_parents, 0x1c0,
+ 0, 5, /* M */
+ 8, 2, /* P */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_GATE_HWS(bus_r_ir_rx_clk, "bus-r-ir-rx", &r_apb0_hw,
+ 0x1cc, BIT(0), 0);
+static SUNXI_CCU_GATE_HWS(bus_r_rtc_clk, "bus-r-rtc", &r_ahb_hw,
+ 0x20c, BIT(0), 0);
+static SUNXI_CCU_GATE_HWS(bus_r_cpucfg_clk, "bus-r-cpucfg", &r_apb0_hw,
+ 0x22c, BIT(0), 0);
+
+static struct ccu_common *sun20i_d1_r_ccu_clks[] = {
+ &r_ahb_clk.common,
+ &r_apb0_clk.common,
+ &bus_r_timer_clk.common,
+ &bus_r_twd_clk.common,
+ &bus_r_ppu_clk.common,
+ &r_ir_rx_clk.common,
+ &bus_r_ir_rx_clk.common,
+ &bus_r_rtc_clk.common,
+ &bus_r_cpucfg_clk.common,
+};
+
+static struct clk_hw_onecell_data sun20i_d1_r_hw_clks = {
+ .num = CLK_NUMBER,
+ .hws = {
+ [CLK_R_AHB] = &r_ahb_clk.common.hw,
+ [CLK_R_APB0] = &r_apb0_clk.common.hw,
+ [CLK_BUS_R_TIMER] = &bus_r_timer_clk.common.hw,
+ [CLK_BUS_R_TWD] = &bus_r_twd_clk.common.hw,
+ [CLK_BUS_R_PPU] = &bus_r_ppu_clk.common.hw,
+ [CLK_R_IR_RX] = &r_ir_rx_clk.common.hw,
+ [CLK_BUS_R_IR_RX] = &bus_r_ir_rx_clk.common.hw,
+ [CLK_BUS_R_RTC] = &bus_r_rtc_clk.common.hw,
+ [CLK_BUS_R_CPUCFG] = &bus_r_cpucfg_clk.common.hw,
+ },
+};
+
+static struct ccu_reset_map sun20i_d1_r_ccu_resets[] = {
+ [RST_BUS_R_TIMER] = { 0x11c, BIT(16) },
+ [RST_BUS_R_TWD] = { 0x12c, BIT(16) },
+ [RST_BUS_R_PPU] = { 0x1ac, BIT(16) },
+ [RST_BUS_R_IR_RX] = { 0x1cc, BIT(16) },
+ [RST_BUS_R_RTC] = { 0x20c, BIT(16) },
+ [RST_BUS_R_CPUCFG] = { 0x22c, BIT(16) },
+};
+
+static const struct sunxi_ccu_desc sun20i_d1_r_ccu_desc = {
+ .ccu_clks = sun20i_d1_r_ccu_clks,
+ .num_ccu_clks = ARRAY_SIZE(sun20i_d1_r_ccu_clks),
+
+ .hw_clks = &sun20i_d1_r_hw_clks,
+
+ .resets = sun20i_d1_r_ccu_resets,
+ .num_resets = ARRAY_SIZE(sun20i_d1_r_ccu_resets),
+};
+
+static int sun20i_d1_r_ccu_probe(struct platform_device *pdev)
+{
+ void __iomem *reg;
+
+ reg = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
+
+ return devm_sunxi_ccu_probe(&pdev->dev, reg, &sun20i_d1_r_ccu_desc);
+}
+
+static const struct of_device_id sun20i_d1_r_ccu_ids[] = {
+ { .compatible = "allwinner,sun20i-d1-r-ccu" },
+ { }
+};
+
+static struct platform_driver sun20i_d1_r_ccu_driver = {
+ .probe = sun20i_d1_r_ccu_probe,
+ .driver = {
+ .name = "sun20i-d1-r-ccu",
+ .suppress_bind_attrs = true,
+ .of_match_table = sun20i_d1_r_ccu_ids,
+ },
+};
+module_platform_driver(sun20i_d1_r_ccu_driver);
+
+MODULE_IMPORT_NS(SUNXI_CCU);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu-sun20i-d1-r.h b/drivers/clk/sunxi-ng/ccu-sun20i-d1-r.h
new file mode 100644
index 000000000000..afd4342209ee
--- /dev/null
+++ b/drivers/clk/sunxi-ng/ccu-sun20i-d1-r.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2020 frank@allwinnertech.com
+ * Copyright (C) 2021 Samuel Holland <samuel@sholland.org>
+ */
+
+#ifndef _CCU_SUN20I_D1_R_H
+#define _CCU_SUN20I_D1_R_H
+
+#include <dt-bindings/clock/sun20i-d1-r-ccu.h>
+#include <dt-bindings/reset/sun20i-d1-r-ccu.h>
+
+#define CLK_R_APB0 1
+
+#define CLK_NUMBER (CLK_BUS_R_CPUCFG + 1)
+
+#endif /* _CCU_SUN20I_D1_R_H */
diff --git a/drivers/clk/sunxi-ng/ccu-sun20i-d1.c b/drivers/clk/sunxi-ng/ccu-sun20i-d1.c
new file mode 100644
index 000000000000..51058ba4db4d
--- /dev/null
+++ b/drivers/clk/sunxi-ng/ccu-sun20i-d1.c
@@ -0,0 +1,1390 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2020 huangzhenwei@allwinnertech.com
+ * Copyright (C) 2021 Samuel Holland <samuel@sholland.org>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include "../clk.h"
+
+#include "ccu_common.h"
+#include "ccu_reset.h"
+
+#include "ccu_div.h"
+#include "ccu_gate.h"
+#include "ccu_mp.h"
+#include "ccu_mult.h"
+#include "ccu_nk.h"
+#include "ccu_nkm.h"
+#include "ccu_nkmp.h"
+#include "ccu_nm.h"
+
+#include "ccu-sun20i-d1.h"
+
+static const struct clk_parent_data osc24M[] = {
+ { .fw_name = "hosc" }
+};
+
+/*
+ * For the CPU PLL, the output divider is described as "only for testing"
+ * in the user manual. So it's not modelled and forced to 0.
+ */
+#define SUN20I_D1_PLL_CPUX_REG 0x000
+static struct ccu_mult pll_cpux_clk = {
+ .enable = BIT(27),
+ .lock = BIT(28),
+ .mult = _SUNXI_CCU_MULT_MIN(8, 8, 12),
+ .common = {
+ .reg = 0x000,
+ .hw.init = CLK_HW_INIT_PARENTS_DATA("pll-cpux", osc24M,
+ &ccu_mult_ops,
+ CLK_SET_RATE_UNGATE),
+ },
+};
+
+/* Some PLLs are input * N / div1 / P. Model them as NKMP with no K */
+#define SUN20I_D1_PLL_DDR0_REG 0x010
+static struct ccu_nkmp pll_ddr0_clk = {
+ .enable = BIT(27),
+ .lock = BIT(28),
+ .n = _SUNXI_CCU_MULT_MIN(8, 8, 12),
+ .m = _SUNXI_CCU_DIV(1, 1), /* input divider */
+ .p = _SUNXI_CCU_DIV(0, 1), /* output divider */
+ .common = {
+ .reg = 0x010,
+ .hw.init = CLK_HW_INIT_PARENTS_DATA("pll-ddr0", osc24M,
+ &ccu_nkmp_ops,
+ CLK_SET_RATE_UNGATE),
+ },
+};
+
+#define SUN20I_D1_PLL_PERIPH0_REG 0x020
+static struct ccu_nm pll_periph0_4x_clk = {
+ .enable = BIT(27),
+ .lock = BIT(28),
+ .n = _SUNXI_CCU_MULT_MIN(8, 8, 12),
+ .m = _SUNXI_CCU_DIV(1, 1), /* input divider */
+ .common = {
+ .reg = 0x020,
+ .hw.init = CLK_HW_INIT_PARENTS_DATA("pll-periph0-4x", osc24M,
+ &ccu_nm_ops,
+ CLK_SET_RATE_UNGATE),
+ },
+};
+
+static const struct clk_hw *pll_periph0_4x_hws[] = {
+ &pll_periph0_4x_clk.common.hw
+};
+static SUNXI_CCU_M_HWS(pll_periph0_2x_clk, "pll-periph0-2x",
+ pll_periph0_4x_hws, 0x020, 16, 3, 0);
+static SUNXI_CCU_M_HWS(pll_periph0_800M_clk, "pll-periph0-800M",
+ pll_periph0_4x_hws, 0x020, 20, 3, 0);
+
+static const struct clk_hw *pll_periph0_2x_hws[] = {
+ &pll_periph0_2x_clk.common.hw
+};
+static CLK_FIXED_FACTOR_HWS(pll_periph0_clk, "pll-periph0",
+ pll_periph0_2x_hws, 2, 1, 0);
+
+static const struct clk_hw *pll_periph0_hws[] = { &pll_periph0_clk.hw };
+static CLK_FIXED_FACTOR_HWS(pll_periph0_div3_clk, "pll-periph0-div3",
+ pll_periph0_2x_hws, 6, 1, 0);
+
+/*
+ * For Video PLLs, the output divider is described as "only for testing"
+ * in the user manual. So it's not modelled and forced to 0.
+ */
+#define SUN20I_D1_PLL_VIDEO0_REG 0x040
+static struct ccu_nm pll_video0_4x_clk = {
+ .enable = BIT(27),
+ .lock = BIT(28),
+ .n = _SUNXI_CCU_MULT_MIN(8, 8, 12),
+ .m = _SUNXI_CCU_DIV(1, 1), /* input divider */
+ .common = {
+ .reg = 0x040,
+ .hw.init = CLK_HW_INIT_PARENTS_DATA("pll-video0-4x", osc24M,
+ &ccu_nm_ops,
+ CLK_SET_RATE_UNGATE),
+ },
+};
+
+static const struct clk_hw *pll_video0_4x_hws[] = {
+ &pll_video0_4x_clk.common.hw
+};
+static CLK_FIXED_FACTOR_HWS(pll_video0_2x_clk, "pll-video0-2x",
+ pll_video0_4x_hws, 2, 1, CLK_SET_RATE_PARENT);
+static CLK_FIXED_FACTOR_HWS(pll_video0_clk, "pll-video0",
+ pll_video0_4x_hws, 4, 1, CLK_SET_RATE_PARENT);
+
+#define SUN20I_D1_PLL_VIDEO1_REG 0x048
+static struct ccu_nm pll_video1_4x_clk = {
+ .enable = BIT(27),
+ .lock = BIT(28),
+ .n = _SUNXI_CCU_MULT_MIN(8, 8, 12),
+ .m = _SUNXI_CCU_DIV(1, 1), /* input divider */
+ .common = {
+ .reg = 0x048,
+ .hw.init = CLK_HW_INIT_PARENTS_DATA("pll-video1-4x", osc24M,
+ &ccu_nm_ops,
+ CLK_SET_RATE_UNGATE),
+ },
+};
+
+static const struct clk_hw *pll_video1_4x_hws[] = {
+ &pll_video1_4x_clk.common.hw
+};
+static CLK_FIXED_FACTOR_HWS(pll_video1_2x_clk, "pll-video1-2x",
+ pll_video1_4x_hws, 2, 1, CLK_SET_RATE_PARENT);
+static CLK_FIXED_FACTOR_HWS(pll_video1_clk, "pll-video1",
+ pll_video1_4x_hws, 4, 1, CLK_SET_RATE_PARENT);
+
+#define SUN20I_D1_PLL_VE_REG 0x058
+static struct ccu_nkmp pll_ve_clk = {
+ .enable = BIT(27),
+ .lock = BIT(28),
+ .n = _SUNXI_CCU_MULT_MIN(8, 8, 12),
+ .m = _SUNXI_CCU_DIV(1, 1), /* input divider */
+ .p = _SUNXI_CCU_DIV(0, 1), /* output divider */
+ .common = {
+ .reg = 0x058,
+ .hw.init = CLK_HW_INIT_PARENTS_DATA("pll-ve", osc24M,
+ &ccu_nkmp_ops,
+ CLK_SET_RATE_UNGATE),
+ },
+};
+
+/*
+ * PLL_AUDIO0 has m0, m1 dividers in addition to the usual N, M factors.
+ * Since we only need one frequency from this PLL (22.5792 x 4 == 90.3168 MHz),
+ * ignore them for now. Enforce the default for them, which is m1 = 0, m0 = 0.
+ * The M factor must be an even number to produce a 50% duty cycle output.
+ */
+#define SUN20I_D1_PLL_AUDIO0_REG 0x078
+static struct ccu_sdm_setting pll_audio0_sdm_table[] = {
+ { .rate = 90316800, .pattern = 0xc001288d, .m = 6, .n = 22 },
+};
+
+static struct ccu_nm pll_audio0_4x_clk = {
+ .enable = BIT(27),
+ .lock = BIT(28),
+ .n = _SUNXI_CCU_MULT_MIN(8, 8, 12),
+ .m = _SUNXI_CCU_DIV(16, 6),
+ .sdm = _SUNXI_CCU_SDM(pll_audio0_sdm_table, BIT(24),
+ 0x178, BIT(31)),
+ .common = {
+ .reg = 0x078,
+ .features = CCU_FEATURE_SIGMA_DELTA_MOD,
+ .hw.init = CLK_HW_INIT_PARENTS_DATA("pll-audio0-4x", osc24M,
+ &ccu_nm_ops,
+ CLK_SET_RATE_UNGATE),
+ },
+};
+
+static const struct clk_hw *pll_audio0_4x_hws[] = {
+ &pll_audio0_4x_clk.common.hw
+};
+static CLK_FIXED_FACTOR_HWS(pll_audio0_2x_clk, "pll-audio0-2x",
+ pll_audio0_4x_hws, 2, 1, 0);
+static CLK_FIXED_FACTOR_HWS(pll_audio0_clk, "pll-audio0",
+ pll_audio0_4x_hws, 4, 1, 0);
+
+/*
+ * PLL_AUDIO1 doesn't need Fractional-N. The output is usually 614.4 MHz for
+ * audio. The ADC or DAC should divide the PLL output further to 24.576 MHz.
+ */
+#define SUN20I_D1_PLL_AUDIO1_REG 0x080
+static struct ccu_nm pll_audio1_clk = {
+ .enable = BIT(27),
+ .lock = BIT(28),
+ .n = _SUNXI_CCU_MULT_MIN(8, 8, 12),
+ .m = _SUNXI_CCU_DIV(1, 1),
+ .common = {
+ .reg = 0x080,
+ .hw.init = CLK_HW_INIT_PARENTS_DATA("pll-audio1", osc24M,
+ &ccu_nm_ops,
+ CLK_SET_RATE_UNGATE),
+ },
+};
+
+static const struct clk_hw *pll_audio1_hws[] = {
+ &pll_audio1_clk.common.hw
+};
+static SUNXI_CCU_M_HWS(pll_audio1_div2_clk, "pll-audio1-div2",
+ pll_audio1_hws, 0x080, 16, 3, 0);
+static SUNXI_CCU_M_HWS(pll_audio1_div5_clk, "pll-audio1-div5",
+ pll_audio1_hws, 0x080, 20, 3, 0);
+
+/*
+ * The CPUX gate is not modelled - it is in a separate register (0x504)
+ * and has a special key field. The clock does not need to be ungated anyway.
+ */
+static const struct clk_parent_data cpux_parents[] = {
+ { .fw_name = "hosc" },
+ { .fw_name = "losc" },
+ { .fw_name = "iosc" },
+ { .hw = &pll_cpux_clk.common.hw },
+ { .hw = &pll_periph0_clk.hw },
+ { .hw = &pll_periph0_2x_clk.common.hw },
+ { .hw = &pll_periph0_800M_clk.common.hw },
+};
+static SUNXI_CCU_MUX_DATA(cpux_clk, "cpux", cpux_parents,
+ 0x500, 24, 3, CLK_SET_RATE_PARENT);
+
+static const struct clk_hw *cpux_hws[] = { &cpux_clk.common.hw };
+static SUNXI_CCU_M_HWS(cpux_axi_clk, "cpux-axi",
+ cpux_hws, 0x500, 0, 2, 0);
+static SUNXI_CCU_M_HWS(cpux_apb_clk, "cpux-apb",
+ cpux_hws, 0x500, 8, 2, 0);
+
+static const struct clk_parent_data psi_ahb_parents[] = {
+ { .fw_name = "hosc" },
+ { .fw_name = "losc" },
+ { .fw_name = "iosc" },
+ { .hw = &pll_periph0_clk.hw },
+};
+static SUNXI_CCU_MP_DATA_WITH_MUX(psi_ahb_clk, "psi-ahb", psi_ahb_parents, 0x510,
+ 0, 2, /* M */
+ 8, 2, /* P */
+ 24, 2, /* mux */
+ 0);
+
+static const struct clk_parent_data apb0_apb1_parents[] = {
+ { .fw_name = "hosc" },
+ { .fw_name = "losc" },
+ { .hw = &psi_ahb_clk.common.hw },
+ { .hw = &pll_periph0_clk.hw },
+};
+static SUNXI_CCU_MP_DATA_WITH_MUX(apb0_clk, "apb0", apb0_apb1_parents, 0x520,
+ 0, 5, /* M */
+ 8, 2, /* P */
+ 24, 2, /* mux */
+ 0);
+
+static SUNXI_CCU_MP_DATA_WITH_MUX(apb1_clk, "apb1", apb0_apb1_parents, 0x524,
+ 0, 5, /* M */
+ 8, 2, /* P */
+ 24, 2, /* mux */
+ 0);
+
+static const struct clk_hw *psi_ahb_hws[] = { &psi_ahb_clk.common.hw };
+static const struct clk_hw *apb0_hws[] = { &apb0_clk.common.hw };
+static const struct clk_hw *apb1_hws[] = { &apb1_clk.common.hw };
+
+static const struct clk_hw *de_di_g2d_parents[] = {
+ &pll_periph0_2x_clk.common.hw,
+ &pll_video0_4x_clk.common.hw,
+ &pll_video1_4x_clk.common.hw,
+ &pll_audio1_div2_clk.common.hw,
+};
+static SUNXI_CCU_M_HW_WITH_MUX_GATE(de_clk, "de", de_di_g2d_parents, 0x600,
+ 0, 5, /* M */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_GATE_HWS(bus_de_clk, "bus-de", psi_ahb_hws,
+ 0x60c, BIT(0), 0);
+
+static SUNXI_CCU_M_HW_WITH_MUX_GATE(di_clk, "di", de_di_g2d_parents, 0x620,
+ 0, 5, /* M */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_GATE_HWS(bus_di_clk, "bus-di", psi_ahb_hws,
+ 0x62c, BIT(0), 0);
+
+static SUNXI_CCU_M_HW_WITH_MUX_GATE(g2d_clk, "g2d", de_di_g2d_parents, 0x630,
+ 0, 5, /* M */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_GATE_HWS(bus_g2d_clk, "bus-g2d", psi_ahb_hws,
+ 0x63c, BIT(0), 0);
+
+static const struct clk_parent_data ce_parents[] = {
+ { .fw_name = "hosc" },
+ { .hw = &pll_periph0_2x_clk.common.hw },
+ { .hw = &pll_periph0_clk.hw },
+};
+static SUNXI_CCU_MP_DATA_WITH_MUX_GATE(ce_clk, "ce", ce_parents, 0x680,
+ 0, 4, /* M */
+ 8, 2, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_GATE_HWS(bus_ce_clk, "bus-ce", psi_ahb_hws,
+ 0x68c, BIT(0), 0);
+
+static const struct clk_hw *ve_parents[] = {
+ &pll_ve_clk.common.hw,
+ &pll_periph0_2x_clk.common.hw,
+};
+static SUNXI_CCU_M_HW_WITH_MUX_GATE(ve_clk, "ve", ve_parents, 0x690,
+ 0, 5, /* M */
+ 24, 1, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_GATE_HWS(bus_ve_clk, "bus-ve", psi_ahb_hws,
+ 0x69c, BIT(0), 0);
+
+static SUNXI_CCU_GATE_HWS(bus_dma_clk, "bus-dma", psi_ahb_hws,
+ 0x70c, BIT(0), 0);
+
+static SUNXI_CCU_GATE_HWS(bus_msgbox0_clk, "bus-msgbox0", psi_ahb_hws,
+ 0x71c, BIT(0), 0);
+static SUNXI_CCU_GATE_HWS(bus_msgbox1_clk, "bus-msgbox1", psi_ahb_hws,
+ 0x71c, BIT(1), 0);
+static SUNXI_CCU_GATE_HWS(bus_msgbox2_clk, "bus-msgbox2", psi_ahb_hws,
+ 0x71c, BIT(2), 0);
+
+static SUNXI_CCU_GATE_HWS(bus_spinlock_clk, "bus-spinlock", psi_ahb_hws,
+ 0x72c, BIT(0), 0);
+
+static SUNXI_CCU_GATE_HWS(bus_hstimer_clk, "bus-hstimer", psi_ahb_hws,
+ 0x73c, BIT(0), 0);
+
+static SUNXI_CCU_GATE_DATA(avs_clk, "avs", osc24M,
+ 0x740, BIT(31), 0);
+
+static SUNXI_CCU_GATE_HWS(bus_dbg_clk, "bus-dbg", psi_ahb_hws,
+ 0x78c, BIT(0), 0);
+
+static SUNXI_CCU_GATE_HWS(bus_pwm_clk, "bus-pwm", apb0_hws,
+ 0x7ac, BIT(0), 0);
+
+static SUNXI_CCU_GATE_HWS(bus_iommu_clk, "bus-iommu", apb0_hws,
+ 0x7bc, BIT(0), 0);
+
+static const struct clk_hw *dram_parents[] = {
+ &pll_ddr0_clk.common.hw,
+ &pll_audio1_div2_clk.common.hw,
+ &pll_periph0_2x_clk.common.hw,
+ &pll_periph0_800M_clk.common.hw,
+};
+static SUNXI_CCU_MP_HW_WITH_MUX_GATE(dram_clk, "dram", dram_parents, 0x800,
+ 0, 2, /* M */
+ 8, 2, /* P */
+ 24, 2, /* mux */
+ BIT(31), CLK_IS_CRITICAL);
+
+static CLK_FIXED_FACTOR_HW(mbus_clk, "mbus",
+ &dram_clk.common.hw, 4, 1, 0);
+
+static const struct clk_hw *mbus_hws[] = { &mbus_clk.hw };
+
+static SUNXI_CCU_GATE_HWS(mbus_dma_clk, "mbus-dma", mbus_hws,
+ 0x804, BIT(0), 0);
+static SUNXI_CCU_GATE_HWS(mbus_ve_clk, "mbus-ve", mbus_hws,
+ 0x804, BIT(1), 0);
+static SUNXI_CCU_GATE_HWS(mbus_ce_clk, "mbus-ce", mbus_hws,
+ 0x804, BIT(2), 0);
+static SUNXI_CCU_GATE_HWS(mbus_tvin_clk, "mbus-tvin", mbus_hws,
+ 0x804, BIT(7), 0);
+static SUNXI_CCU_GATE_HWS(mbus_csi_clk, "mbus-csi", mbus_hws,
+ 0x804, BIT(8), 0);
+static SUNXI_CCU_GATE_HWS(mbus_g2d_clk, "mbus-g2d", mbus_hws,
+ 0x804, BIT(10), 0);
+static SUNXI_CCU_GATE_HWS(mbus_riscv_clk, "mbus-riscv", mbus_hws,
+ 0x804, BIT(11), 0);
+
+static SUNXI_CCU_GATE_HWS(bus_dram_clk, "bus-dram", psi_ahb_hws,
+ 0x80c, BIT(0), CLK_IS_CRITICAL);
+
+static const struct clk_parent_data mmc0_mmc1_parents[] = {
+ { .fw_name = "hosc" },
+ { .hw = &pll_periph0_clk.hw },
+ { .hw = &pll_periph0_2x_clk.common.hw },
+ { .hw = &pll_audio1_div2_clk.common.hw },
+};
+static SUNXI_CCU_MP_DATA_WITH_MUX_GATE(mmc0_clk, "mmc0", mmc0_mmc1_parents, 0x830,
+ 0, 4, /* M */
+ 8, 2, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_MP_DATA_WITH_MUX_GATE(mmc1_clk, "mmc1", mmc0_mmc1_parents, 0x834,
+ 0, 4, /* M */
+ 8, 2, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static const struct clk_parent_data mmc2_parents[] = {
+ { .fw_name = "hosc" },
+ { .hw = &pll_periph0_clk.hw },
+ { .hw = &pll_periph0_2x_clk.common.hw },
+ { .hw = &pll_periph0_800M_clk.common.hw },
+ { .hw = &pll_audio1_div2_clk.common.hw },
+};
+static SUNXI_CCU_MP_DATA_WITH_MUX_GATE(mmc2_clk, "mmc2", mmc2_parents, 0x838,
+ 0, 4, /* M */
+ 8, 2, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_GATE_HWS(bus_mmc0_clk, "bus-mmc0", psi_ahb_hws,
+ 0x84c, BIT(0), 0);
+static SUNXI_CCU_GATE_HWS(bus_mmc1_clk, "bus-mmc1", psi_ahb_hws,
+ 0x84c, BIT(1), 0);
+static SUNXI_CCU_GATE_HWS(bus_mmc2_clk, "bus-mmc2", psi_ahb_hws,
+ 0x84c, BIT(2), 0);
+
+static SUNXI_CCU_GATE_HWS(bus_uart0_clk, "bus-uart0", apb1_hws,
+ 0x90c, BIT(0), 0);
+static SUNXI_CCU_GATE_HWS(bus_uart1_clk, "bus-uart1", apb1_hws,
+ 0x90c, BIT(1), 0);
+static SUNXI_CCU_GATE_HWS(bus_uart2_clk, "bus-uart2", apb1_hws,
+ 0x90c, BIT(2), 0);
+static SUNXI_CCU_GATE_HWS(bus_uart3_clk, "bus-uart3", apb1_hws,
+ 0x90c, BIT(3), 0);
+static SUNXI_CCU_GATE_HWS(bus_uart4_clk, "bus-uart4", apb1_hws,
+ 0x90c, BIT(4), 0);
+static SUNXI_CCU_GATE_HWS(bus_uart5_clk, "bus-uart5", apb1_hws,
+ 0x90c, BIT(5), 0);
+
+static SUNXI_CCU_GATE_HWS(bus_i2c0_clk, "bus-i2c0", apb1_hws,
+ 0x91c, BIT(0), 0);
+static SUNXI_CCU_GATE_HWS(bus_i2c1_clk, "bus-i2c1", apb1_hws,
+ 0x91c, BIT(1), 0);
+static SUNXI_CCU_GATE_HWS(bus_i2c2_clk, "bus-i2c2", apb1_hws,
+ 0x91c, BIT(2), 0);
+static SUNXI_CCU_GATE_HWS(bus_i2c3_clk, "bus-i2c3", apb1_hws,
+ 0x91c, BIT(3), 0);
+
+static const struct clk_parent_data spi_parents[] = {
+ { .fw_name = "hosc" },
+ { .hw = &pll_periph0_clk.hw },
+ { .hw = &pll_periph0_2x_clk.common.hw },
+ { .hw = &pll_audio1_div2_clk.common.hw },
+ { .hw = &pll_audio1_div5_clk.common.hw },
+};
+static SUNXI_CCU_MP_DATA_WITH_MUX_GATE(spi0_clk, "spi0", spi_parents, 0x940,
+ 0, 4, /* M */
+ 8, 2, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_MP_DATA_WITH_MUX_GATE(spi1_clk, "spi1", spi_parents, 0x944,
+ 0, 4, /* M */
+ 8, 2, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_GATE_HWS(bus_spi0_clk, "bus-spi0", psi_ahb_hws,
+ 0x96c, BIT(0), 0);
+static SUNXI_CCU_GATE_HWS(bus_spi1_clk, "bus-spi1", psi_ahb_hws,
+ 0x96c, BIT(1), 0);
+
+static SUNXI_CCU_GATE_HWS_WITH_PREDIV(emac_25M_clk, "emac-25M", pll_periph0_hws,
+ 0x970, BIT(31) | BIT(30), 24, 0);
+
+static SUNXI_CCU_GATE_HWS(bus_emac_clk, "bus-emac", psi_ahb_hws,
+ 0x97c, BIT(0), 0);
+
+static const struct clk_parent_data ir_tx_ledc_parents[] = {
+ { .fw_name = "hosc" },
+ { .hw = &pll_periph0_clk.hw },
+};
+static SUNXI_CCU_MP_DATA_WITH_MUX_GATE(ir_tx_clk, "ir-tx", ir_tx_ledc_parents, 0x9c0,
+ 0, 4, /* M */
+ 8, 2, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_GATE_HWS(bus_ir_tx_clk, "bus-ir-tx", apb0_hws,
+ 0x9cc, BIT(0), 0);
+
+static SUNXI_CCU_GATE_HWS(bus_gpadc_clk, "bus-gpadc", apb0_hws,
+ 0x9ec, BIT(0), 0);
+
+static SUNXI_CCU_GATE_HWS(bus_ths_clk, "bus-ths", apb0_hws,
+ 0x9fc, BIT(0), 0);
+
+static const struct clk_hw *i2s_spdif_tx_parents[] = {
+ &pll_audio0_clk.hw,
+ &pll_audio0_4x_clk.common.hw,
+ &pll_audio1_div2_clk.common.hw,
+ &pll_audio1_div5_clk.common.hw,
+};
+static SUNXI_CCU_MP_HW_WITH_MUX_GATE(i2s0_clk, "i2s0", i2s_spdif_tx_parents, 0xa10,
+ 0, 5, /* M */
+ 8, 2, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_MP_HW_WITH_MUX_GATE(i2s1_clk, "i2s1", i2s_spdif_tx_parents, 0xa14,
+ 0, 5, /* M */
+ 8, 2, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_MP_HW_WITH_MUX_GATE(i2s2_clk, "i2s2", i2s_spdif_tx_parents, 0xa18,
+ 0, 5, /* M */
+ 8, 2, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static const struct clk_hw *i2s2_asrc_parents[] = {
+ &pll_audio0_4x_clk.common.hw,
+ &pll_periph0_clk.hw,
+ &pll_audio1_div2_clk.common.hw,
+ &pll_audio1_div5_clk.common.hw,
+};
+static SUNXI_CCU_MP_HW_WITH_MUX_GATE(i2s2_asrc_clk, "i2s2-asrc", i2s2_asrc_parents, 0xa1c,
+ 0, 5, /* M */
+ 8, 2, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_GATE_HWS(bus_i2s0_clk, "bus-i2s0", apb0_hws,
+ 0xa20, BIT(0), 0);
+static SUNXI_CCU_GATE_HWS(bus_i2s1_clk, "bus-i2s1", apb0_hws,
+ 0xa20, BIT(1), 0);
+static SUNXI_CCU_GATE_HWS(bus_i2s2_clk, "bus-i2s2", apb0_hws,
+ 0xa20, BIT(2), 0);
+
+static SUNXI_CCU_MP_HW_WITH_MUX_GATE(spdif_tx_clk, "spdif-tx", i2s_spdif_tx_parents, 0xa24,
+ 0, 5, /* M */
+ 8, 2, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static const struct clk_hw *spdif_rx_parents[] = {
+ &pll_periph0_clk.hw,
+ &pll_audio1_div2_clk.common.hw,
+ &pll_audio1_div5_clk.common.hw,
+};
+static SUNXI_CCU_MP_HW_WITH_MUX_GATE(spdif_rx_clk, "spdif-rx", spdif_rx_parents, 0xa28,
+ 0, 5, /* M */
+ 8, 2, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_GATE_HWS(bus_spdif_clk, "bus-spdif", apb0_hws,
+ 0xa2c, BIT(0), 0);
+
+static const struct clk_hw *dmic_codec_parents[] = {
+ &pll_audio0_clk.hw,
+ &pll_audio1_div2_clk.common.hw,
+ &pll_audio1_div5_clk.common.hw,
+};
+static SUNXI_CCU_MP_HW_WITH_MUX_GATE(dmic_clk, "dmic", dmic_codec_parents, 0xa40,
+ 0, 5, /* M */
+ 8, 2, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_GATE_HWS(bus_dmic_clk, "bus-dmic", apb0_hws,
+ 0xa4c, BIT(0), 0);
+
+static SUNXI_CCU_MP_HW_WITH_MUX_GATE(audio_dac_clk, "audio-dac", dmic_codec_parents, 0xa50,
+ 0, 5, /* M */
+ 8, 2, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_MP_HW_WITH_MUX_GATE(audio_adc_clk, "audio-adc", dmic_codec_parents, 0xa54,
+ 0, 5, /* M */
+ 8, 2, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_GATE_HWS(bus_audio_clk, "bus-audio", apb0_hws,
+ 0xa5c, BIT(0), 0);
+
+
+/*
+ * The first parent is a 48 MHz input clock divided by 4. That 48 MHz clock is
+ * a 2x multiplier from osc24M synchronized by pll-periph0, and is also used by
+ * the OHCI module.
+ */
+static const struct clk_parent_data usb_ohci_parents[] = {
+ { .hw = &pll_periph0_clk.hw },
+ { .fw_name = "hosc" },
+ { .fw_name = "losc" },
+};
+static const struct ccu_mux_fixed_prediv usb_ohci_predivs[] = {
+ { .index = 0, .div = 50 },
+ { .index = 1, .div = 2 },
+};
+
+static struct ccu_mux usb_ohci0_clk = {
+ .enable = BIT(31),
+ .mux = {
+ .shift = 24,
+ .width = 2,
+ .fixed_predivs = usb_ohci_predivs,
+ .n_predivs = ARRAY_SIZE(usb_ohci_predivs),
+ },
+ .common = {
+ .reg = 0xa70,
+ .features = CCU_FEATURE_FIXED_PREDIV,
+ .hw.init = CLK_HW_INIT_PARENTS_DATA("usb-ohci0",
+ usb_ohci_parents,
+ &ccu_mux_ops,
+ 0),
+ },
+};
+
+static struct ccu_mux usb_ohci1_clk = {
+ .enable = BIT(31),
+ .mux = {
+ .shift = 24,
+ .width = 2,
+ .fixed_predivs = usb_ohci_predivs,
+ .n_predivs = ARRAY_SIZE(usb_ohci_predivs),
+ },
+ .common = {
+ .reg = 0xa74,
+ .features = CCU_FEATURE_FIXED_PREDIV,
+ .hw.init = CLK_HW_INIT_PARENTS_DATA("usb-ohci1",
+ usb_ohci_parents,
+ &ccu_mux_ops,
+ 0),
+ },
+};
+
+static SUNXI_CCU_GATE_HWS(bus_ohci0_clk, "bus-ohci0", psi_ahb_hws,
+ 0xa8c, BIT(0), 0);
+static SUNXI_CCU_GATE_HWS(bus_ohci1_clk, "bus-ohci1", psi_ahb_hws,
+ 0xa8c, BIT(1), 0);
+static SUNXI_CCU_GATE_HWS(bus_ehci0_clk, "bus-ehci0", psi_ahb_hws,
+ 0xa8c, BIT(4), 0);
+static SUNXI_CCU_GATE_HWS(bus_ehci1_clk, "bus-ehci1", psi_ahb_hws,
+ 0xa8c, BIT(5), 0);
+static SUNXI_CCU_GATE_HWS(bus_otg_clk, "bus-otg", psi_ahb_hws,
+ 0xa8c, BIT(8), 0);
+
+static SUNXI_CCU_GATE_HWS(bus_lradc_clk, "bus-lradc", apb0_hws,
+ 0xa9c, BIT(0), 0);
+
+static SUNXI_CCU_GATE_HWS(bus_dpss_top_clk, "bus-dpss-top", psi_ahb_hws,
+ 0xabc, BIT(0), 0);
+
+static SUNXI_CCU_GATE_DATA(hdmi_24M_clk, "hdmi-24M", osc24M,
+ 0xb04, BIT(31), 0);
+
+static SUNXI_CCU_GATE_HWS_WITH_PREDIV(hdmi_cec_32k_clk, "hdmi-cec-32k",
+ pll_periph0_2x_hws,
+ 0xb10, BIT(30), 36621, 0);
+
+static const struct clk_parent_data hdmi_cec_parents[] = {
+ { .fw_name = "losc" },
+ { .hw = &hdmi_cec_32k_clk.common.hw },
+};
+static SUNXI_CCU_MUX_DATA_WITH_GATE(hdmi_cec_clk, "hdmi-cec", hdmi_cec_parents, 0xb10,
+ 24, 1, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_GATE_HWS(bus_hdmi_clk, "bus-hdmi", psi_ahb_hws,
+ 0xb1c, BIT(0), 0);
+
+static const struct clk_parent_data mipi_dsi_parents[] = {
+ { .fw_name = "hosc" },
+ { .hw = &pll_periph0_clk.hw },
+ { .hw = &pll_video0_2x_clk.hw },
+ { .hw = &pll_video1_2x_clk.hw },
+ { .hw = &pll_audio1_div2_clk.common.hw },
+};
+static SUNXI_CCU_M_DATA_WITH_MUX_GATE(mipi_dsi_clk, "mipi-dsi", mipi_dsi_parents, 0xb24,
+ 0, 4, /* M */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_GATE_HWS(bus_mipi_dsi_clk, "bus-mipi-dsi", psi_ahb_hws,
+ 0xb4c, BIT(0), 0);
+
+static const struct clk_hw *tcon_tve_parents[] = {
+ &pll_video0_clk.hw,
+ &pll_video0_4x_clk.common.hw,
+ &pll_video1_clk.hw,
+ &pll_video1_4x_clk.common.hw,
+ &pll_periph0_2x_clk.common.hw,
+ &pll_audio1_div2_clk.common.hw,
+};
+static SUNXI_CCU_MP_HW_WITH_MUX_GATE(tcon_lcd0_clk, "tcon-lcd0", tcon_tve_parents, 0xb60,
+ 0, 4, /* M */
+ 8, 2, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_GATE_HWS(bus_tcon_lcd0_clk, "bus-tcon-lcd0", psi_ahb_hws,
+ 0xb7c, BIT(0), 0);
+
+static SUNXI_CCU_MP_HW_WITH_MUX_GATE(tcon_tv_clk, "tcon-tv", tcon_tve_parents, 0xb80,
+ 0, 4, /* M */
+ 8, 2, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ CLK_SET_RATE_PARENT);
+
+static SUNXI_CCU_GATE_HWS(bus_tcon_tv_clk, "bus-tcon-tv", psi_ahb_hws,
+ 0xb9c, BIT(0), 0);
+
+static SUNXI_CCU_MP_HW_WITH_MUX_GATE(tve_clk, "tve", tcon_tve_parents, 0xbb0,
+ 0, 4, /* M */
+ 8, 2, /* P */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_GATE_HWS(bus_tve_top_clk, "bus-tve-top", psi_ahb_hws,
+ 0xbbc, BIT(0), 0);
+static SUNXI_CCU_GATE_HWS(bus_tve_clk, "bus-tve", psi_ahb_hws,
+ 0xbbc, BIT(1), 0);
+
+static const struct clk_parent_data tvd_parents[] = {
+ { .fw_name = "hosc" },
+ { .hw = &pll_video0_clk.hw },
+ { .hw = &pll_video1_clk.hw },
+ { .hw = &pll_periph0_clk.hw },
+};
+static SUNXI_CCU_M_DATA_WITH_MUX_GATE(tvd_clk, "tvd", tvd_parents, 0xbc0,
+ 0, 5, /* M */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_GATE_HWS(bus_tvd_top_clk, "bus-tvd-top", psi_ahb_hws,
+ 0xbdc, BIT(0), 0);
+static SUNXI_CCU_GATE_HWS(bus_tvd_clk, "bus-tvd", psi_ahb_hws,
+ 0xbdc, BIT(1), 0);
+
+static SUNXI_CCU_MP_DATA_WITH_MUX_GATE(ledc_clk, "ledc", ir_tx_ledc_parents, 0xbf0,
+ 0, 4, /* M */
+ 8, 2, /* P */
+ 24, 1, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_GATE_HWS(bus_ledc_clk, "bus-ledc", psi_ahb_hws,
+ 0xbfc, BIT(0), 0);
+
+static const struct clk_hw *csi_top_parents[] = {
+ &pll_periph0_2x_clk.common.hw,
+ &pll_video0_2x_clk.hw,
+ &pll_video1_2x_clk.hw,
+};
+static SUNXI_CCU_M_HW_WITH_MUX_GATE(csi_top_clk, "csi-top", csi_top_parents, 0xc04,
+ 0, 4, /* M */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static const struct clk_parent_data csi_mclk_parents[] = {
+ { .fw_name = "hosc" },
+ { .hw = &pll_periph0_clk.hw },
+ { .hw = &pll_video0_clk.hw },
+ { .hw = &pll_video1_clk.hw },
+ { .hw = &pll_audio1_div2_clk.common.hw },
+ { .hw = &pll_audio1_div5_clk.common.hw },
+};
+static SUNXI_CCU_M_DATA_WITH_MUX_GATE(csi_mclk_clk, "csi-mclk", csi_mclk_parents, 0xc08,
+ 0, 5, /* M */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_GATE_HWS(bus_csi_clk, "bus-csi", psi_ahb_hws,
+ 0xc1c, BIT(0), 0);
+
+static const struct clk_parent_data tpadc_parents[] = {
+ { .fw_name = "hosc" },
+ { .hw = &pll_audio0_clk.hw },
+};
+static SUNXI_CCU_MUX_DATA_WITH_GATE(tpadc_clk, "tpadc", tpadc_parents, 0xc50,
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_GATE_HWS(bus_tpadc_clk, "bus-tpadc", apb0_hws,
+ 0xc5c, BIT(0), 0);
+
+static SUNXI_CCU_GATE_HWS(bus_tzma_clk, "bus-tzma", apb0_hws,
+ 0xc6c, BIT(0), 0);
+
+static const struct clk_parent_data dsp_parents[] = {
+ { .fw_name = "hosc" },
+ { .fw_name = "losc" },
+ { .fw_name = "iosc" },
+ { .hw = &pll_periph0_2x_clk.common.hw },
+ { .hw = &pll_audio1_div2_clk.common.hw },
+};
+static SUNXI_CCU_M_DATA_WITH_MUX_GATE(dsp_clk, "dsp", dsp_parents, 0xc70,
+ 0, 5, /* M */
+ 24, 3, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_GATE_HWS(bus_dsp_cfg_clk, "bus-dsp-cfg", psi_ahb_hws,
+ 0xc7c, BIT(1), 0);
+
+/*
+ * The RISC-V gate is not modelled - it is in a separate register (0xd04)
+ * and has a special key field. The clock is critical anyway.
+ */
+static const struct clk_parent_data riscv_parents[] = {
+ { .fw_name = "hosc" },
+ { .fw_name = "losc" },
+ { .fw_name = "iosc" },
+ { .hw = &pll_periph0_800M_clk.common.hw },
+ { .hw = &pll_periph0_clk.hw },
+ { .hw = &pll_cpux_clk.common.hw },
+ { .hw = &pll_audio1_div2_clk.common.hw },
+};
+static SUNXI_CCU_M_DATA_WITH_MUX(riscv_clk, "riscv", riscv_parents, 0xd00,
+ 0, 5, /* M */
+ 24, 3, /* mux */
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL);
+
+/* The riscv-axi clk must be divided by at least 2. */
+static struct clk_div_table riscv_axi_table[] = {
+ { .val = 1, .div = 2 },
+ { .val = 2, .div = 3 },
+ { .val = 3, .div = 4 },
+ { /* Sentinel */ }
+};
+static SUNXI_CCU_DIV_TABLE_HW(riscv_axi_clk, "riscv-axi", &riscv_clk.common.hw,
+ 0xd00, 8, 2, riscv_axi_table, 0);
+
+static SUNXI_CCU_GATE_HWS(bus_riscv_cfg_clk, "bus-riscv-cfg", psi_ahb_hws,
+ 0xd0c, BIT(0), CLK_IS_CRITICAL);
+
+static SUNXI_CCU_GATE_DATA(fanout_24M_clk, "fanout-24M", osc24M,
+ 0xf30, BIT(0), 0);
+static SUNXI_CCU_GATE_DATA_WITH_PREDIV(fanout_12M_clk, "fanout-12M", osc24M,
+ 0xf30, BIT(1), 2, 0);
+static SUNXI_CCU_GATE_HWS_WITH_PREDIV(fanout_16M_clk, "fanout-16M", pll_periph0_2x_hws,
+ 0xf30, BIT(2), 75, 0);
+static SUNXI_CCU_GATE_HWS_WITH_PREDIV(fanout_25M_clk, "fanout-25M", pll_periph0_hws,
+ 0xf30, BIT(3), 24, 0);
+static SUNXI_CCU_GATE_HWS_WITH_PREDIV(fanout_32k_clk, "fanout-32k", pll_periph0_2x_hws,
+ 0xf30, BIT(4), 36621, 0);
+
+/* This clock has a second divider that is not modelled and forced to 0. */
+#define SUN20I_D1_FANOUT_27M_REG 0xf34
+static const struct clk_hw *fanout_27M_parents[] = {
+ &pll_video0_clk.hw,
+ &pll_video1_clk.hw,
+};
+static SUNXI_CCU_M_HW_WITH_MUX_GATE(fanout_27M_clk, "fanout-27M", fanout_27M_parents, 0xf34,
+ 0, 5, /* M */
+ 24, 2, /* mux */
+ BIT(31), /* gate */
+ 0);
+
+static SUNXI_CCU_M_HWS_WITH_GATE(fanout_pclk_clk, "fanout-pclk", apb0_hws, 0xf38,
+ 0, 5, /* M */
+ BIT(31), /* gate */
+ 0);
+
+static const struct clk_hw *fanout_parents[] = {
+ &fanout_32k_clk.common.hw,
+ &fanout_12M_clk.common.hw,
+ &fanout_16M_clk.common.hw,
+ &fanout_24M_clk.common.hw,
+ &fanout_25M_clk.common.hw,
+ &fanout_27M_clk.common.hw,
+ &fanout_pclk_clk.common.hw,
+};
+static SUNXI_CCU_MUX_HW_WITH_GATE(fanout0_clk, "fanout0", fanout_parents, 0xf3c,
+ 0, 3, /* mux */
+ BIT(21), /* gate */
+ 0);
+static SUNXI_CCU_MUX_HW_WITH_GATE(fanout1_clk, "fanout1", fanout_parents, 0xf3c,
+ 3, 3, /* mux */
+ BIT(22), /* gate */
+ 0);
+static SUNXI_CCU_MUX_HW_WITH_GATE(fanout2_clk, "fanout2", fanout_parents, 0xf3c,
+ 6, 3, /* mux */
+ BIT(23), /* gate */
+ 0);
+
+static struct ccu_common *sun20i_d1_ccu_clks[] = {
+ &pll_cpux_clk.common,
+ &pll_ddr0_clk.common,
+ &pll_periph0_4x_clk.common,
+ &pll_periph0_2x_clk.common,
+ &pll_periph0_800M_clk.common,
+ &pll_video0_4x_clk.common,
+ &pll_video1_4x_clk.common,
+ &pll_ve_clk.common,
+ &pll_audio0_4x_clk.common,
+ &pll_audio1_clk.common,
+ &pll_audio1_div2_clk.common,
+ &pll_audio1_div5_clk.common,
+ &cpux_clk.common,
+ &cpux_axi_clk.common,
+ &cpux_apb_clk.common,
+ &psi_ahb_clk.common,
+ &apb0_clk.common,
+ &apb1_clk.common,
+ &de_clk.common,
+ &bus_de_clk.common,
+ &di_clk.common,
+ &bus_di_clk.common,
+ &g2d_clk.common,
+ &bus_g2d_clk.common,
+ &ce_clk.common,
+ &bus_ce_clk.common,
+ &ve_clk.common,
+ &bus_ve_clk.common,
+ &bus_dma_clk.common,
+ &bus_msgbox0_clk.common,
+ &bus_msgbox1_clk.common,
+ &bus_msgbox2_clk.common,
+ &bus_spinlock_clk.common,
+ &bus_hstimer_clk.common,
+ &avs_clk.common,
+ &bus_dbg_clk.common,
+ &bus_pwm_clk.common,
+ &bus_iommu_clk.common,
+ &dram_clk.common,
+ &mbus_dma_clk.common,
+ &mbus_ve_clk.common,
+ &mbus_ce_clk.common,
+ &mbus_tvin_clk.common,
+ &mbus_csi_clk.common,
+ &mbus_g2d_clk.common,
+ &mbus_riscv_clk.common,
+ &bus_dram_clk.common,
+ &mmc0_clk.common,
+ &mmc1_clk.common,
+ &mmc2_clk.common,
+ &bus_mmc0_clk.common,
+ &bus_mmc1_clk.common,
+ &bus_mmc2_clk.common,
+ &bus_uart0_clk.common,
+ &bus_uart1_clk.common,
+ &bus_uart2_clk.common,
+ &bus_uart3_clk.common,
+ &bus_uart4_clk.common,
+ &bus_uart5_clk.common,
+ &bus_i2c0_clk.common,
+ &bus_i2c1_clk.common,
+ &bus_i2c2_clk.common,
+ &bus_i2c3_clk.common,
+ &spi0_clk.common,
+ &spi1_clk.common,
+ &bus_spi0_clk.common,
+ &bus_spi1_clk.common,
+ &emac_25M_clk.common,
+ &bus_emac_clk.common,
+ &ir_tx_clk.common,
+ &bus_ir_tx_clk.common,
+ &bus_gpadc_clk.common,
+ &bus_ths_clk.common,
+ &i2s0_clk.common,
+ &i2s1_clk.common,
+ &i2s2_clk.common,
+ &i2s2_asrc_clk.common,
+ &bus_i2s0_clk.common,
+ &bus_i2s1_clk.common,
+ &bus_i2s2_clk.common,
+ &spdif_tx_clk.common,
+ &spdif_rx_clk.common,
+ &bus_spdif_clk.common,
+ &dmic_clk.common,
+ &bus_dmic_clk.common,
+ &audio_dac_clk.common,
+ &audio_adc_clk.common,
+ &bus_audio_clk.common,
+ &usb_ohci0_clk.common,
+ &usb_ohci1_clk.common,
+ &bus_ohci0_clk.common,
+ &bus_ohci1_clk.common,
+ &bus_ehci0_clk.common,
+ &bus_ehci1_clk.common,
+ &bus_otg_clk.common,
+ &bus_lradc_clk.common,
+ &bus_dpss_top_clk.common,
+ &hdmi_24M_clk.common,
+ &hdmi_cec_32k_clk.common,
+ &hdmi_cec_clk.common,
+ &bus_hdmi_clk.common,
+ &mipi_dsi_clk.common,
+ &bus_mipi_dsi_clk.common,
+ &tcon_lcd0_clk.common,
+ &bus_tcon_lcd0_clk.common,
+ &tcon_tv_clk.common,
+ &bus_tcon_tv_clk.common,
+ &tve_clk.common,
+ &bus_tve_top_clk.common,
+ &bus_tve_clk.common,
+ &tvd_clk.common,
+ &bus_tvd_top_clk.common,
+ &bus_tvd_clk.common,
+ &ledc_clk.common,
+ &bus_ledc_clk.common,
+ &csi_top_clk.common,
+ &csi_mclk_clk.common,
+ &bus_csi_clk.common,
+ &tpadc_clk.common,
+ &bus_tpadc_clk.common,
+ &bus_tzma_clk.common,
+ &dsp_clk.common,
+ &bus_dsp_cfg_clk.common,
+ &riscv_clk.common,
+ &riscv_axi_clk.common,
+ &bus_riscv_cfg_clk.common,
+ &fanout_24M_clk.common,
+ &fanout_12M_clk.common,
+ &fanout_16M_clk.common,
+ &fanout_25M_clk.common,
+ &fanout_32k_clk.common,
+ &fanout_27M_clk.common,
+ &fanout_pclk_clk.common,
+ &fanout0_clk.common,
+ &fanout1_clk.common,
+ &fanout2_clk.common,
+};
+
+static struct clk_hw_onecell_data sun20i_d1_hw_clks = {
+ .num = CLK_NUMBER,
+ .hws = {
+ [CLK_PLL_CPUX] = &pll_cpux_clk.common.hw,
+ [CLK_PLL_DDR0] = &pll_ddr0_clk.common.hw,
+ [CLK_PLL_PERIPH0_4X] = &pll_periph0_4x_clk.common.hw,
+ [CLK_PLL_PERIPH0_2X] = &pll_periph0_2x_clk.common.hw,
+ [CLK_PLL_PERIPH0_800M] = &pll_periph0_800M_clk.common.hw,
+ [CLK_PLL_PERIPH0] = &pll_periph0_clk.hw,
+ [CLK_PLL_PERIPH0_DIV3] = &pll_periph0_div3_clk.hw,
+ [CLK_PLL_VIDEO0_4X] = &pll_video0_4x_clk.common.hw,
+ [CLK_PLL_VIDEO0_2X] = &pll_video0_2x_clk.hw,
+ [CLK_PLL_VIDEO0] = &pll_video0_clk.hw,
+ [CLK_PLL_VIDEO1_4X] = &pll_video1_4x_clk.common.hw,
+ [CLK_PLL_VIDEO1_2X] = &pll_video1_2x_clk.hw,
+ [CLK_PLL_VIDEO1] = &pll_video1_clk.hw,
+ [CLK_PLL_VE] = &pll_ve_clk.common.hw,
+ [CLK_PLL_AUDIO0_4X] = &pll_audio0_4x_clk.common.hw,
+ [CLK_PLL_AUDIO0_2X] = &pll_audio0_2x_clk.hw,
+ [CLK_PLL_AUDIO0] = &pll_audio0_clk.hw,
+ [CLK_PLL_AUDIO1] = &pll_audio1_clk.common.hw,
+ [CLK_PLL_AUDIO1_DIV2] = &pll_audio1_div2_clk.common.hw,
+ [CLK_PLL_AUDIO1_DIV5] = &pll_audio1_div5_clk.common.hw,
+ [CLK_CPUX] = &cpux_clk.common.hw,
+ [CLK_CPUX_AXI] = &cpux_axi_clk.common.hw,
+ [CLK_CPUX_APB] = &cpux_apb_clk.common.hw,
+ [CLK_PSI_AHB] = &psi_ahb_clk.common.hw,
+ [CLK_APB0] = &apb0_clk.common.hw,
+ [CLK_APB1] = &apb1_clk.common.hw,
+ [CLK_MBUS] = &mbus_clk.hw,
+ [CLK_DE] = &de_clk.common.hw,
+ [CLK_BUS_DE] = &bus_de_clk.common.hw,
+ [CLK_DI] = &di_clk.common.hw,
+ [CLK_BUS_DI] = &bus_di_clk.common.hw,
+ [CLK_G2D] = &g2d_clk.common.hw,
+ [CLK_BUS_G2D] = &bus_g2d_clk.common.hw,
+ [CLK_CE] = &ce_clk.common.hw,
+ [CLK_BUS_CE] = &bus_ce_clk.common.hw,
+ [CLK_VE] = &ve_clk.common.hw,
+ [CLK_BUS_VE] = &bus_ve_clk.common.hw,
+ [CLK_BUS_DMA] = &bus_dma_clk.common.hw,
+ [CLK_BUS_MSGBOX0] = &bus_msgbox0_clk.common.hw,
+ [CLK_BUS_MSGBOX1] = &bus_msgbox1_clk.common.hw,
+ [CLK_BUS_MSGBOX2] = &bus_msgbox2_clk.common.hw,
+ [CLK_BUS_SPINLOCK] = &bus_spinlock_clk.common.hw,
+ [CLK_BUS_HSTIMER] = &bus_hstimer_clk.common.hw,
+ [CLK_AVS] = &avs_clk.common.hw,
+ [CLK_BUS_DBG] = &bus_dbg_clk.common.hw,
+ [CLK_BUS_PWM] = &bus_pwm_clk.common.hw,
+ [CLK_BUS_IOMMU] = &bus_iommu_clk.common.hw,
+ [CLK_DRAM] = &dram_clk.common.hw,
+ [CLK_MBUS_DMA] = &mbus_dma_clk.common.hw,
+ [CLK_MBUS_VE] = &mbus_ve_clk.common.hw,
+ [CLK_MBUS_CE] = &mbus_ce_clk.common.hw,
+ [CLK_MBUS_TVIN] = &mbus_tvin_clk.common.hw,
+ [CLK_MBUS_CSI] = &mbus_csi_clk.common.hw,
+ [CLK_MBUS_G2D] = &mbus_g2d_clk.common.hw,
+ [CLK_MBUS_RISCV] = &mbus_riscv_clk.common.hw,
+ [CLK_BUS_DRAM] = &bus_dram_clk.common.hw,
+ [CLK_MMC0] = &mmc0_clk.common.hw,
+ [CLK_MMC1] = &mmc1_clk.common.hw,
+ [CLK_MMC2] = &mmc2_clk.common.hw,
+ [CLK_BUS_MMC0] = &bus_mmc0_clk.common.hw,
+ [CLK_BUS_MMC1] = &bus_mmc1_clk.common.hw,
+ [CLK_BUS_MMC2] = &bus_mmc2_clk.common.hw,
+ [CLK_BUS_UART0] = &bus_uart0_clk.common.hw,
+ [CLK_BUS_UART1] = &bus_uart1_clk.common.hw,
+ [CLK_BUS_UART2] = &bus_uart2_clk.common.hw,
+ [CLK_BUS_UART3] = &bus_uart3_clk.common.hw,
+ [CLK_BUS_UART4] = &bus_uart4_clk.common.hw,
+ [CLK_BUS_UART5] = &bus_uart5_clk.common.hw,
+ [CLK_BUS_I2C0] = &bus_i2c0_clk.common.hw,
+ [CLK_BUS_I2C1] = &bus_i2c1_clk.common.hw,
+ [CLK_BUS_I2C2] = &bus_i2c2_clk.common.hw,
+ [CLK_BUS_I2C3] = &bus_i2c3_clk.common.hw,
+ [CLK_SPI0] = &spi0_clk.common.hw,
+ [CLK_SPI1] = &spi1_clk.common.hw,
+ [CLK_BUS_SPI0] = &bus_spi0_clk.common.hw,
+ [CLK_BUS_SPI1] = &bus_spi1_clk.common.hw,
+ [CLK_EMAC_25M] = &emac_25M_clk.common.hw,
+ [CLK_BUS_EMAC] = &bus_emac_clk.common.hw,
+ [CLK_IR_TX] = &ir_tx_clk.common.hw,
+ [CLK_BUS_IR_TX] = &bus_ir_tx_clk.common.hw,
+ [CLK_BUS_GPADC] = &bus_gpadc_clk.common.hw,
+ [CLK_BUS_THS] = &bus_ths_clk.common.hw,
+ [CLK_I2S0] = &i2s0_clk.common.hw,
+ [CLK_I2S1] = &i2s1_clk.common.hw,
+ [CLK_I2S2] = &i2s2_clk.common.hw,
+ [CLK_I2S2_ASRC] = &i2s2_asrc_clk.common.hw,
+ [CLK_BUS_I2S0] = &bus_i2s0_clk.common.hw,
+ [CLK_BUS_I2S1] = &bus_i2s1_clk.common.hw,
+ [CLK_BUS_I2S2] = &bus_i2s2_clk.common.hw,
+ [CLK_SPDIF_TX] = &spdif_tx_clk.common.hw,
+ [CLK_SPDIF_RX] = &spdif_rx_clk.common.hw,
+ [CLK_BUS_SPDIF] = &bus_spdif_clk.common.hw,
+ [CLK_DMIC] = &dmic_clk.common.hw,
+ [CLK_BUS_DMIC] = &bus_dmic_clk.common.hw,
+ [CLK_AUDIO_DAC] = &audio_dac_clk.common.hw,
+ [CLK_AUDIO_ADC] = &audio_adc_clk.common.hw,
+ [CLK_BUS_AUDIO] = &bus_audio_clk.common.hw,
+ [CLK_USB_OHCI0] = &usb_ohci0_clk.common.hw,
+ [CLK_USB_OHCI1] = &usb_ohci1_clk.common.hw,
+ [CLK_BUS_OHCI0] = &bus_ohci0_clk.common.hw,
+ [CLK_BUS_OHCI1] = &bus_ohci1_clk.common.hw,
+ [CLK_BUS_EHCI0] = &bus_ehci0_clk.common.hw,
+ [CLK_BUS_EHCI1] = &bus_ehci1_clk.common.hw,
+ [CLK_BUS_OTG] = &bus_otg_clk.common.hw,
+ [CLK_BUS_LRADC] = &bus_lradc_clk.common.hw,
+ [CLK_BUS_DPSS_TOP] = &bus_dpss_top_clk.common.hw,
+ [CLK_HDMI_24M] = &hdmi_24M_clk.common.hw,
+ [CLK_HDMI_CEC_32K] = &hdmi_cec_32k_clk.common.hw,
+ [CLK_HDMI_CEC] = &hdmi_cec_clk.common.hw,
+ [CLK_BUS_HDMI] = &bus_hdmi_clk.common.hw,
+ [CLK_MIPI_DSI] = &mipi_dsi_clk.common.hw,
+ [CLK_BUS_MIPI_DSI] = &bus_mipi_dsi_clk.common.hw,
+ [CLK_TCON_LCD0] = &tcon_lcd0_clk.common.hw,
+ [CLK_BUS_TCON_LCD0] = &bus_tcon_lcd0_clk.common.hw,
+ [CLK_TCON_TV] = &tcon_tv_clk.common.hw,
+ [CLK_BUS_TCON_TV] = &bus_tcon_tv_clk.common.hw,
+ [CLK_TVE] = &tve_clk.common.hw,
+ [CLK_BUS_TVE_TOP] = &bus_tve_top_clk.common.hw,
+ [CLK_BUS_TVE] = &bus_tve_clk.common.hw,
+ [CLK_TVD] = &tvd_clk.common.hw,
+ [CLK_BUS_TVD_TOP] = &bus_tvd_top_clk.common.hw,
+ [CLK_BUS_TVD] = &bus_tvd_clk.common.hw,
+ [CLK_LEDC] = &ledc_clk.common.hw,
+ [CLK_BUS_LEDC] = &bus_ledc_clk.common.hw,
+ [CLK_CSI_TOP] = &csi_top_clk.common.hw,
+ [CLK_CSI_MCLK] = &csi_mclk_clk.common.hw,
+ [CLK_BUS_CSI] = &bus_csi_clk.common.hw,
+ [CLK_TPADC] = &tpadc_clk.common.hw,
+ [CLK_BUS_TPADC] = &bus_tpadc_clk.common.hw,
+ [CLK_BUS_TZMA] = &bus_tzma_clk.common.hw,
+ [CLK_DSP] = &dsp_clk.common.hw,
+ [CLK_BUS_DSP_CFG] = &bus_dsp_cfg_clk.common.hw,
+ [CLK_RISCV] = &riscv_clk.common.hw,
+ [CLK_RISCV_AXI] = &riscv_axi_clk.common.hw,
+ [CLK_BUS_RISCV_CFG] = &bus_riscv_cfg_clk.common.hw,
+ [CLK_FANOUT_24M] = &fanout_24M_clk.common.hw,
+ [CLK_FANOUT_12M] = &fanout_12M_clk.common.hw,
+ [CLK_FANOUT_16M] = &fanout_16M_clk.common.hw,
+ [CLK_FANOUT_25M] = &fanout_25M_clk.common.hw,
+ [CLK_FANOUT_32K] = &fanout_32k_clk.common.hw,
+ [CLK_FANOUT_27M] = &fanout_27M_clk.common.hw,
+ [CLK_FANOUT_PCLK] = &fanout_pclk_clk.common.hw,
+ [CLK_FANOUT0] = &fanout0_clk.common.hw,
+ [CLK_FANOUT1] = &fanout1_clk.common.hw,
+ [CLK_FANOUT2] = &fanout2_clk.common.hw,
+ },
+};
+
+static struct ccu_reset_map sun20i_d1_ccu_resets[] = {
+ [RST_MBUS] = { 0x540, BIT(30) },
+ [RST_BUS_DE] = { 0x60c, BIT(16) },
+ [RST_BUS_DI] = { 0x62c, BIT(16) },
+ [RST_BUS_G2D] = { 0x63c, BIT(16) },
+ [RST_BUS_CE] = { 0x68c, BIT(16) },
+ [RST_BUS_VE] = { 0x69c, BIT(16) },
+ [RST_BUS_DMA] = { 0x70c, BIT(16) },
+ [RST_BUS_MSGBOX0] = { 0x71c, BIT(16) },
+ [RST_BUS_MSGBOX1] = { 0x71c, BIT(17) },
+ [RST_BUS_MSGBOX2] = { 0x71c, BIT(18) },
+ [RST_BUS_SPINLOCK] = { 0x72c, BIT(16) },
+ [RST_BUS_HSTIMER] = { 0x73c, BIT(16) },
+ [RST_BUS_DBG] = { 0x78c, BIT(16) },
+ [RST_BUS_PWM] = { 0x7ac, BIT(16) },
+ [RST_BUS_DRAM] = { 0x80c, BIT(16) },
+ [RST_BUS_MMC0] = { 0x84c, BIT(16) },
+ [RST_BUS_MMC1] = { 0x84c, BIT(17) },
+ [RST_BUS_MMC2] = { 0x84c, BIT(18) },
+ [RST_BUS_UART0] = { 0x90c, BIT(16) },
+ [RST_BUS_UART1] = { 0x90c, BIT(17) },
+ [RST_BUS_UART2] = { 0x90c, BIT(18) },
+ [RST_BUS_UART3] = { 0x90c, BIT(19) },
+ [RST_BUS_UART4] = { 0x90c, BIT(20) },
+ [RST_BUS_UART5] = { 0x90c, BIT(21) },
+ [RST_BUS_I2C0] = { 0x91c, BIT(16) },
+ [RST_BUS_I2C1] = { 0x91c, BIT(17) },
+ [RST_BUS_I2C2] = { 0x91c, BIT(18) },
+ [RST_BUS_I2C3] = { 0x91c, BIT(19) },
+ [RST_BUS_SPI0] = { 0x96c, BIT(16) },
+ [RST_BUS_SPI1] = { 0x96c, BIT(17) },
+ [RST_BUS_EMAC] = { 0x97c, BIT(16) },
+ [RST_BUS_IR_TX] = { 0x9cc, BIT(16) },
+ [RST_BUS_GPADC] = { 0x9ec, BIT(16) },
+ [RST_BUS_THS] = { 0x9fc, BIT(16) },
+ [RST_BUS_I2S0] = { 0xa20, BIT(16) },
+ [RST_BUS_I2S1] = { 0xa20, BIT(17) },
+ [RST_BUS_I2S2] = { 0xa20, BIT(18) },
+ [RST_BUS_SPDIF] = { 0xa2c, BIT(16) },
+ [RST_BUS_DMIC] = { 0xa4c, BIT(16) },
+ [RST_BUS_AUDIO] = { 0xa5c, BIT(16) },
+ [RST_USB_PHY0] = { 0xa70, BIT(30) },
+ [RST_USB_PHY1] = { 0xa74, BIT(30) },
+ [RST_BUS_OHCI0] = { 0xa8c, BIT(16) },
+ [RST_BUS_OHCI1] = { 0xa8c, BIT(17) },
+ [RST_BUS_EHCI0] = { 0xa8c, BIT(20) },
+ [RST_BUS_EHCI1] = { 0xa8c, BIT(21) },
+ [RST_BUS_OTG] = { 0xa8c, BIT(24) },
+ [RST_BUS_LRADC] = { 0xa9c, BIT(16) },
+ [RST_BUS_DPSS_TOP] = { 0xabc, BIT(16) },
+ [RST_BUS_HDMI_MAIN] = { 0xb1c, BIT(16) },
+ [RST_BUS_HDMI_SUB] = { 0xb1c, BIT(17) },
+ [RST_BUS_MIPI_DSI] = { 0xb4c, BIT(16) },
+ [RST_BUS_TCON_LCD0] = { 0xb7c, BIT(16) },
+ [RST_BUS_TCON_TV] = { 0xb9c, BIT(16) },
+ [RST_BUS_LVDS0] = { 0xbac, BIT(16) },
+ [RST_BUS_TVE_TOP] = { 0xbbc, BIT(16) },
+ [RST_BUS_TVE] = { 0xbbc, BIT(17) },
+ [RST_BUS_TVD_TOP] = { 0xbdc, BIT(16) },
+ [RST_BUS_TVD] = { 0xbdc, BIT(17) },
+ [RST_BUS_LEDC] = { 0xbfc, BIT(16) },
+ [RST_BUS_CSI] = { 0xc1c, BIT(16) },
+ [RST_BUS_TPADC] = { 0xc5c, BIT(16) },
+ [RST_DSP] = { 0xc7c, BIT(16) },
+ [RST_BUS_DSP_CFG] = { 0xc7c, BIT(17) },
+ [RST_BUS_DSP_DBG] = { 0xc7c, BIT(18) },
+ [RST_BUS_RISCV_CFG] = { 0xd0c, BIT(16) },
+};
+
+static const struct sunxi_ccu_desc sun20i_d1_ccu_desc = {
+ .ccu_clks = sun20i_d1_ccu_clks,
+ .num_ccu_clks = ARRAY_SIZE(sun20i_d1_ccu_clks),
+
+ .hw_clks = &sun20i_d1_hw_clks,
+
+ .resets = sun20i_d1_ccu_resets,
+ .num_resets = ARRAY_SIZE(sun20i_d1_ccu_resets),
+};
+
+static const u32 pll_regs[] = {
+ SUN20I_D1_PLL_CPUX_REG,
+ SUN20I_D1_PLL_DDR0_REG,
+ SUN20I_D1_PLL_PERIPH0_REG,
+ SUN20I_D1_PLL_VIDEO0_REG,
+ SUN20I_D1_PLL_VIDEO1_REG,
+ SUN20I_D1_PLL_VE_REG,
+ SUN20I_D1_PLL_AUDIO0_REG,
+ SUN20I_D1_PLL_AUDIO1_REG,
+};
+
+static const u32 pll_video_regs[] = {
+ SUN20I_D1_PLL_VIDEO0_REG,
+ SUN20I_D1_PLL_VIDEO1_REG,
+};
+
+static struct ccu_mux_nb sun20i_d1_riscv_nb = {
+ .common = &riscv_clk.common,
+ .cm = &riscv_clk.mux,
+ .delay_us = 1,
+ .bypass_index = 4, /* index of pll-periph0 */
+};
+
+static int sun20i_d1_ccu_probe(struct platform_device *pdev)
+{
+ void __iomem *reg;
+ u32 val;
+ int i, ret;
+
+ reg = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
+
+ /* Enable the enable, LDO, and lock bits on all PLLs. */
+ for (i = 0; i < ARRAY_SIZE(pll_regs); i++) {
+ val = readl(reg + pll_regs[i]);
+ val |= BIT(31) | BIT(30) | BIT(29);
+ writel(val, reg + pll_regs[i]);
+ }
+
+ /* Force PLL_CPUX factor M to 0. */
+ val = readl(reg + SUN20I_D1_PLL_CPUX_REG);
+ val &= ~GENMASK(1, 0);
+ writel(val, reg + SUN20I_D1_PLL_CPUX_REG);
+
+ /*
+ * Force the output divider of video PLLs to 0.
+ *
+ * See the comment before pll-video0 definition for the reason.
+ */
+ for (i = 0; i < ARRAY_SIZE(pll_video_regs); i++) {
+ val = readl(reg + pll_video_regs[i]);
+ val &= ~BIT(0);
+ writel(val, reg + pll_video_regs[i]);
+ }
+
+ /* Enforce m1 = 0, m0 = 0 for PLL_AUDIO0 */
+ val = readl(reg + SUN20I_D1_PLL_AUDIO0_REG);
+ val &= ~BIT(1) | BIT(0);
+ writel(val, reg + SUN20I_D1_PLL_AUDIO0_REG);
+
+ /* Force fanout-27M factor N to 0. */
+ val = readl(reg + SUN20I_D1_FANOUT_27M_REG);
+ val &= ~GENMASK(9, 8);
+ writel(val, reg + SUN20I_D1_FANOUT_27M_REG);
+
+ ret = devm_sunxi_ccu_probe(&pdev->dev, reg, &sun20i_d1_ccu_desc);
+ if (ret)
+ return ret;
+
+ /* Reparent CPU during PLL CPUX rate changes */
+ ccu_mux_notifier_register(pll_cpux_clk.common.hw.clk,
+ &sun20i_d1_riscv_nb);
+
+ return 0;
+}
+
+static const struct of_device_id sun20i_d1_ccu_ids[] = {
+ { .compatible = "allwinner,sun20i-d1-ccu" },
+ { }
+};
+
+static struct platform_driver sun20i_d1_ccu_driver = {
+ .probe = sun20i_d1_ccu_probe,
+ .driver = {
+ .name = "sun20i-d1-ccu",
+ .suppress_bind_attrs = true,
+ .of_match_table = sun20i_d1_ccu_ids,
+ },
+};
+module_platform_driver(sun20i_d1_ccu_driver);
+
+MODULE_IMPORT_NS(SUNXI_CCU);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu-sun20i-d1.h b/drivers/clk/sunxi-ng/ccu-sun20i-d1.h
new file mode 100644
index 000000000000..e303176f0d4e
--- /dev/null
+++ b/drivers/clk/sunxi-ng/ccu-sun20i-d1.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2020 frank@allwinnertech.com
+ * Copyright (C) 2021 Samuel Holland <samuel@sholland.org>
+ */
+
+#ifndef _CCU_SUN20I_D1_H_
+#define _CCU_SUN20I_D1_H_
+
+#include <dt-bindings/clock/sun20i-d1-ccu.h>
+#include <dt-bindings/reset/sun20i-d1-ccu.h>
+
+#define CLK_NUMBER (CLK_FANOUT2 + 1)
+
+#endif /* _CCU_SUN20I_D1_H_ */
diff --git a/drivers/clk/sunxi-ng/ccu-sun4i-a10.c b/drivers/clk/sunxi-ng/ccu-sun4i-a10.c
index bd9a8782fec3..c19828f1aa0f 100644
--- a/drivers/clk/sunxi-ng/ccu-sun4i-a10.c
+++ b/drivers/clk/sunxi-ng/ccu-sun4i-a10.c
@@ -7,7 +7,9 @@
#include <linux/clk-provider.h>
#include <linux/io.h>
-#include <linux/of_address.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
#include "ccu_common.h"
#include "ccu_reset.h"
@@ -1425,18 +1427,19 @@ static const struct sunxi_ccu_desc sun7i_a20_ccu_desc = {
.num_resets = ARRAY_SIZE(sunxi_a10_a20_ccu_resets),
};
-static void __init sun4i_ccu_init(struct device_node *node,
- const struct sunxi_ccu_desc *desc)
+static int sun4i_a10_ccu_probe(struct platform_device *pdev)
{
+ const struct sunxi_ccu_desc *desc;
void __iomem *reg;
u32 val;
- reg = of_io_request_and_map(node, 0, of_node_full_name(node));
- if (IS_ERR(reg)) {
- pr_err("%s: Could not map the clock registers\n",
- of_node_full_name(node));
- return;
- }
+ desc = of_device_get_match_data(&pdev->dev);
+ if (!desc)
+ return -EINVAL;
+
+ reg = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
val = readl(reg + SUN4I_PLL_AUDIO_REG);
@@ -1464,19 +1467,30 @@ static void __init sun4i_ccu_init(struct device_node *node,
val &= ~GENMASK(7, 6);
writel(val | (2 << 6), reg + SUN4I_AHB_REG);
- of_sunxi_ccu_probe(node, reg, desc);
+ return devm_sunxi_ccu_probe(&pdev->dev, reg, desc);
}
-static void __init sun4i_a10_ccu_setup(struct device_node *node)
-{
- sun4i_ccu_init(node, &sun4i_a10_ccu_desc);
-}
-CLK_OF_DECLARE(sun4i_a10_ccu, "allwinner,sun4i-a10-ccu",
- sun4i_a10_ccu_setup);
+static const struct of_device_id sun4i_a10_ccu_ids[] = {
+ {
+ .compatible = "allwinner,sun4i-a10-ccu",
+ .data = &sun4i_a10_ccu_desc,
+ },
+ {
+ .compatible = "allwinner,sun7i-a20-ccu",
+ .data = &sun7i_a20_ccu_desc,
+ },
+ { }
+};
-static void __init sun7i_a20_ccu_setup(struct device_node *node)
-{
- sun4i_ccu_init(node, &sun7i_a20_ccu_desc);
-}
-CLK_OF_DECLARE(sun7i_a20_ccu, "allwinner,sun7i-a20-ccu",
- sun7i_a20_ccu_setup);
+static struct platform_driver sun4i_a10_ccu_driver = {
+ .probe = sun4i_a10_ccu_probe,
+ .driver = {
+ .name = "sun4i-a10-ccu",
+ .suppress_bind_attrs = true,
+ .of_match_table = sun4i_a10_ccu_ids,
+ },
+};
+module_platform_driver(sun4i_a10_ccu_driver);
+
+MODULE_IMPORT_NS(SUNXI_CCU);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a100-r.c b/drivers/clk/sunxi-ng/ccu-sun50i-a100-r.c
index 804729e0a208..fddd6c877cec 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-a100-r.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-a100-r.c
@@ -5,7 +5,6 @@
#include <linux/clk-provider.h>
#include <linux/module.h>
-#include <linux/of_address.h>
#include <linux/platform_device.h>
#include "ccu_common.h"
@@ -213,3 +212,6 @@ static struct platform_driver sun50i_a100_r_ccu_driver = {
},
};
module_platform_driver(sun50i_a100_r_ccu_driver);
+
+MODULE_IMPORT_NS(SUNXI_CCU);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a100.c b/drivers/clk/sunxi-ng/ccu-sun50i-a100.c
index 1d475d5a3d91..5f93b5526e13 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-a100.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-a100.c
@@ -6,7 +6,6 @@
#include <linux/clk-provider.h>
#include <linux/io.h>
#include <linux/module.h>
-#include <linux/of_address.h>
#include <linux/platform_device.h>
#include "ccu_common.h"
@@ -1275,3 +1274,6 @@ static struct platform_driver sun50i_a100_ccu_driver = {
},
};
module_platform_driver(sun50i_a100_ccu_driver);
+
+MODULE_IMPORT_NS(SUNXI_CCU);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
index a8c5a92b7d0c..41519185600a 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
@@ -5,7 +5,7 @@
#include <linux/clk-provider.h>
#include <linux/io.h>
-#include <linux/of_address.h>
+#include <linux/module.h>
#include <linux/platform_device.h>
#include "ccu_common.h"
@@ -980,4 +980,7 @@ static struct platform_driver sun50i_a64_ccu_driver = {
.of_match_table = sun50i_a64_ccu_ids,
},
};
-builtin_platform_driver(sun50i_a64_ccu_driver);
+module_platform_driver(sun50i_a64_ccu_driver);
+
+MODULE_IMPORT_NS(SUNXI_CCU);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c b/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c
index f30d7eb5424d..712e103382d8 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c
@@ -4,7 +4,8 @@
*/
#include <linux/clk-provider.h>
-#include <linux/of_address.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include "ccu_common.h"
@@ -221,30 +222,43 @@ static const struct sunxi_ccu_desc sun50i_h616_r_ccu_desc = {
.num_resets = ARRAY_SIZE(sun50i_h616_r_ccu_resets),
};
-static void __init sunxi_r_ccu_init(struct device_node *node,
- const struct sunxi_ccu_desc *desc)
+static int sun50i_h6_r_ccu_probe(struct platform_device *pdev)
{
+ const struct sunxi_ccu_desc *desc;
void __iomem *reg;
- reg = of_io_request_and_map(node, 0, of_node_full_name(node));
- if (IS_ERR(reg)) {
- pr_err("%pOF: Could not map the clock registers\n", node);
- return;
- }
+ desc = of_device_get_match_data(&pdev->dev);
+ if (!desc)
+ return -EINVAL;
- of_sunxi_ccu_probe(node, reg, desc);
-}
+ reg = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
-static void __init sun50i_h6_r_ccu_setup(struct device_node *node)
-{
- sunxi_r_ccu_init(node, &sun50i_h6_r_ccu_desc);
+ return devm_sunxi_ccu_probe(&pdev->dev, reg, desc);
}
-CLK_OF_DECLARE(sun50i_h6_r_ccu, "allwinner,sun50i-h6-r-ccu",
- sun50i_h6_r_ccu_setup);
-static void __init sun50i_h616_r_ccu_setup(struct device_node *node)
-{
- sunxi_r_ccu_init(node, &sun50i_h616_r_ccu_desc);
-}
-CLK_OF_DECLARE(sun50i_h616_r_ccu, "allwinner,sun50i-h616-r-ccu",
- sun50i_h616_r_ccu_setup);
+static const struct of_device_id sun50i_h6_r_ccu_ids[] = {
+ {
+ .compatible = "allwinner,sun50i-h6-r-ccu",
+ .data = &sun50i_h6_r_ccu_desc,
+ },
+ {
+ .compatible = "allwinner,sun50i-h616-r-ccu",
+ .data = &sun50i_h616_r_ccu_desc,
+ },
+ { }
+};
+
+static struct platform_driver sun50i_h6_r_ccu_driver = {
+ .probe = sun50i_h6_r_ccu_probe,
+ .driver = {
+ .name = "sun50i-h6-r-ccu",
+ .suppress_bind_attrs = true,
+ .of_match_table = sun50i_h6_r_ccu_ids,
+ },
+};
+module_platform_driver(sun50i_h6_r_ccu_driver);
+
+MODULE_IMPORT_NS(SUNXI_CCU);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h6.c b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
index e5672c10d065..1a5e418923f6 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
@@ -5,7 +5,7 @@
#include <linux/clk-provider.h>
#include <linux/io.h>
-#include <linux/of_address.h>
+#include <linux/module.h>
#include <linux/platform_device.h>
#include "ccu_common.h"
@@ -1254,4 +1254,7 @@ static struct platform_driver sun50i_h6_ccu_driver = {
.of_match_table = sun50i_h6_ccu_ids,
},
};
-builtin_platform_driver(sun50i_h6_ccu_driver);
+module_platform_driver(sun50i_h6_ccu_driver);
+
+MODULE_IMPORT_NS(SUNXI_CCU);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h616.c b/drivers/clk/sunxi-ng/ccu-sun50i-h616.c
index 22eb18079a15..49a2474cf314 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-h616.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-h616.c
@@ -7,7 +7,7 @@
#include <linux/clk-provider.h>
#include <linux/io.h>
-#include <linux/of_address.h>
+#include <linux/module.h>
#include <linux/platform_device.h>
#include "ccu_common.h"
@@ -1082,17 +1082,15 @@ static const u32 usb2_clk_regs[] = {
SUN50I_H616_USB3_CLK_REG,
};
-static void __init sun50i_h616_ccu_setup(struct device_node *node)
+static int sun50i_h616_ccu_probe(struct platform_device *pdev)
{
void __iomem *reg;
u32 val;
int i;
- reg = of_io_request_and_map(node, 0, of_node_full_name(node));
- if (IS_ERR(reg)) {
- pr_err("%pOF: Could not map clock registers\n", node);
- return;
- }
+ reg = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
/* Enable the lock bits and the output enable bits on all PLLs */
for (i = 0; i < ARRAY_SIZE(pll_regs); i++) {
@@ -1141,8 +1139,23 @@ static void __init sun50i_h616_ccu_setup(struct device_node *node)
val |= BIT(24);
writel(val, reg + SUN50I_H616_HDMI_CEC_CLK_REG);
- of_sunxi_ccu_probe(node, reg, &sun50i_h616_ccu_desc);
+ return devm_sunxi_ccu_probe(&pdev->dev, reg, &sun50i_h616_ccu_desc);
}
-CLK_OF_DECLARE(sun50i_h616_ccu, "allwinner,sun50i-h616-ccu",
- sun50i_h616_ccu_setup);
+static const struct of_device_id sun50i_h616_ccu_ids[] = {
+ { .compatible = "allwinner,sun50i-h616-ccu" },
+ { }
+};
+
+static struct platform_driver sun50i_h616_ccu_driver = {
+ .probe = sun50i_h616_ccu_probe,
+ .driver = {
+ .name = "sun50i-h616-ccu",
+ .suppress_bind_attrs = true,
+ .of_match_table = sun50i_h616_ccu_ids,
+ },
+};
+module_platform_driver(sun50i_h616_ccu_driver);
+
+MODULE_IMPORT_NS(SUNXI_CCU);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
index 3df5c0b41580..0762deffb33c 100644
--- a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
+++ b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
@@ -9,7 +9,8 @@
#include <linux/clk-provider.h>
#include <linux/io.h>
-#include <linux/of_address.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
#include "ccu_common.h"
#include "ccu_reset.h"
@@ -1226,16 +1227,15 @@ static struct ccu_mux_nb sun6i_a31_cpu_nb = {
.bypass_index = 1, /* index of 24 MHz oscillator */
};
-static void __init sun6i_a31_ccu_setup(struct device_node *node)
+static int sun6i_a31_ccu_probe(struct platform_device *pdev)
{
void __iomem *reg;
+ int ret;
u32 val;
- reg = of_io_request_and_map(node, 0, of_node_full_name(node));
- if (IS_ERR(reg)) {
- pr_err("%pOF: Could not map the clock registers\n", node);
- return;
- }
+ reg = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
/* Force the PLL-Audio-1x divider to 1 */
val = readl(reg + SUN6I_A31_PLL_AUDIO_REG);
@@ -1257,10 +1257,30 @@ static void __init sun6i_a31_ccu_setup(struct device_node *node)
val |= 0x3 << 12;
writel(val, reg + SUN6I_A31_AHB1_REG);
- of_sunxi_ccu_probe(node, reg, &sun6i_a31_ccu_desc);
+ ret = devm_sunxi_ccu_probe(&pdev->dev, reg, &sun6i_a31_ccu_desc);
+ if (ret)
+ return ret;
ccu_mux_notifier_register(pll_cpu_clk.common.hw.clk,
&sun6i_a31_cpu_nb);
+
+ return 0;
}
-CLK_OF_DECLARE(sun6i_a31_ccu, "allwinner,sun6i-a31-ccu",
- sun6i_a31_ccu_setup);
+
+static const struct of_device_id sun6i_a31_ccu_ids[] = {
+ { .compatible = "allwinner,sun6i-a31-ccu" },
+ { }
+};
+
+static struct platform_driver sun6i_a31_ccu_driver = {
+ .probe = sun6i_a31_ccu_probe,
+ .driver = {
+ .name = "sun6i-a31-ccu",
+ .suppress_bind_attrs = true,
+ .of_match_table = sun6i_a31_ccu_ids,
+ },
+};
+module_platform_driver(sun6i_a31_ccu_driver);
+
+MODULE_IMPORT_NS(SUNXI_CCU);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a23.c b/drivers/clk/sunxi-ng/ccu-sun8i-a23.c
index 577bb235d658..e80cc3864e44 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-a23.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-a23.c
@@ -5,7 +5,8 @@
#include <linux/clk-provider.h>
#include <linux/io.h>
-#include <linux/of_address.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
#include "ccu_common.h"
#include "ccu_reset.h"
@@ -724,16 +725,14 @@ static const struct sunxi_ccu_desc sun8i_a23_ccu_desc = {
.num_resets = ARRAY_SIZE(sun8i_a23_ccu_resets),
};
-static void __init sun8i_a23_ccu_setup(struct device_node *node)
+static int sun8i_a23_ccu_probe(struct platform_device *pdev)
{
void __iomem *reg;
u32 val;
- reg = of_io_request_and_map(node, 0, of_node_full_name(node));
- if (IS_ERR(reg)) {
- pr_err("%pOF: Could not map the clock registers\n", node);
- return;
- }
+ reg = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
/* Force the PLL-Audio-1x divider to 1 */
val = readl(reg + SUN8I_A23_PLL_AUDIO_REG);
@@ -745,7 +744,23 @@ static void __init sun8i_a23_ccu_setup(struct device_node *node)
val &= ~BIT(16);
writel(val, reg + SUN8I_A23_PLL_MIPI_REG);
- of_sunxi_ccu_probe(node, reg, &sun8i_a23_ccu_desc);
+ return devm_sunxi_ccu_probe(&pdev->dev, reg, &sun8i_a23_ccu_desc);
}
-CLK_OF_DECLARE(sun8i_a23_ccu, "allwinner,sun8i-a23-ccu",
- sun8i_a23_ccu_setup);
+
+static const struct of_device_id sun8i_a23_ccu_ids[] = {
+ { .compatible = "allwinner,sun8i-a23-ccu" },
+ { }
+};
+
+static struct platform_driver sun8i_a23_ccu_driver = {
+ .probe = sun8i_a23_ccu_probe,
+ .driver = {
+ .name = "sun8i-a23-ccu",
+ .suppress_bind_attrs = true,
+ .of_match_table = sun8i_a23_ccu_ids,
+ },
+};
+module_platform_driver(sun8i_a23_ccu_driver);
+
+MODULE_IMPORT_NS(SUNXI_CCU);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a33.c b/drivers/clk/sunxi-ng/ccu-sun8i-a33.c
index 8f65cd03f5ac..d12878a1ba9e 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-a33.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-a33.c
@@ -5,7 +5,8 @@
#include <linux/clk-provider.h>
#include <linux/io.h>
-#include <linux/of_address.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
#include "ccu_common.h"
#include "ccu_reset.h"
@@ -784,16 +785,15 @@ static struct ccu_mux_nb sun8i_a33_cpu_nb = {
.bypass_index = 1, /* index of 24 MHz oscillator */
};
-static void __init sun8i_a33_ccu_setup(struct device_node *node)
+static int sun8i_a33_ccu_probe(struct platform_device *pdev)
{
void __iomem *reg;
+ int ret;
u32 val;
- reg = of_io_request_and_map(node, 0, of_node_full_name(node));
- if (IS_ERR(reg)) {
- pr_err("%pOF: Could not map the clock registers\n", node);
- return;
- }
+ reg = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
/* Force the PLL-Audio-1x divider to 1 */
val = readl(reg + SUN8I_A33_PLL_AUDIO_REG);
@@ -805,7 +805,9 @@ static void __init sun8i_a33_ccu_setup(struct device_node *node)
val &= ~BIT(16);
writel(val, reg + SUN8I_A33_PLL_MIPI_REG);
- of_sunxi_ccu_probe(node, reg, &sun8i_a33_ccu_desc);
+ ret = devm_sunxi_ccu_probe(&pdev->dev, reg, &sun8i_a33_ccu_desc);
+ if (ret)
+ return ret;
/* Gate then ungate PLL CPU after any rate changes */
ccu_pll_notifier_register(&sun8i_a33_pll_cpu_nb);
@@ -813,6 +815,24 @@ static void __init sun8i_a33_ccu_setup(struct device_node *node)
/* Reparent CPU during PLL CPU rate changes */
ccu_mux_notifier_register(pll_cpux_clk.common.hw.clk,
&sun8i_a33_cpu_nb);
+
+ return 0;
}
-CLK_OF_DECLARE(sun8i_a33_ccu, "allwinner,sun8i-a33-ccu",
- sun8i_a33_ccu_setup);
+
+static const struct of_device_id sun8i_a33_ccu_ids[] = {
+ { .compatible = "allwinner,sun8i-a33-ccu" },
+ { }
+};
+
+static struct platform_driver sun8i_a33_ccu_driver = {
+ .probe = sun8i_a33_ccu_probe,
+ .driver = {
+ .name = "sun8i-a33-ccu",
+ .suppress_bind_attrs = true,
+ .of_match_table = sun8i_a33_ccu_ids,
+ },
+};
+module_platform_driver(sun8i_a33_ccu_driver);
+
+MODULE_IMPORT_NS(SUNXI_CCU);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c b/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c
index 3c310aea8cfa..76cbd9e9e89f 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c
@@ -5,7 +5,7 @@
#include <linux/clk-provider.h>
#include <linux/io.h>
-#include <linux/of_address.h>
+#include <linux/module.h>
#include <linux/platform_device.h>
#include "ccu_common.h"
@@ -920,4 +920,7 @@ static struct platform_driver sun8i_a83t_ccu_driver = {
.of_match_table = sun8i_a83t_ccu_ids,
},
};
-builtin_platform_driver(sun8i_a83t_ccu_driver);
+module_platform_driver(sun8i_a83t_ccu_driver);
+
+MODULE_IMPORT_NS(SUNXI_CCU);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-de2.c b/drivers/clk/sunxi-ng/ccu-sun8i-de2.c
index 573b5051d305..e7e3ddf4a227 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-de2.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-de2.c
@@ -5,8 +5,8 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
-#include <linux/of_address.h>
-#include <linux/of_platform.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
@@ -394,4 +394,7 @@ static struct platform_driver sunxi_de2_clk_driver = {
.of_match_table = sunxi_de2_clk_ids,
},
};
-builtin_platform_driver(sunxi_de2_clk_driver);
+module_platform_driver(sunxi_de2_clk_driver);
+
+MODULE_IMPORT_NS(SUNXI_CCU);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-h3.c b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
index d2fc2903787d..e058cf691aea 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
@@ -5,7 +5,9 @@
#include <linux/clk-provider.h>
#include <linux/io.h>
-#include <linux/of_address.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
#include "ccu_common.h"
#include "ccu_reset.h"
@@ -1137,24 +1139,29 @@ static struct ccu_mux_nb sun8i_h3_cpu_nb = {
.bypass_index = 1, /* index of 24 MHz oscillator */
};
-static void __init sunxi_h3_h5_ccu_init(struct device_node *node,
- const struct sunxi_ccu_desc *desc)
+static int sun8i_h3_ccu_probe(struct platform_device *pdev)
{
+ const struct sunxi_ccu_desc *desc;
void __iomem *reg;
+ int ret;
u32 val;
- reg = of_io_request_and_map(node, 0, of_node_full_name(node));
- if (IS_ERR(reg)) {
- pr_err("%pOF: Could not map the clock registers\n", node);
- return;
- }
+ desc = of_device_get_match_data(&pdev->dev);
+ if (!desc)
+ return -EINVAL;
+
+ reg = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
/* Force the PLL-Audio-1x divider to 1 */
val = readl(reg + SUN8I_H3_PLL_AUDIO_REG);
val &= ~GENMASK(19, 16);
writel(val | (0 << 16), reg + SUN8I_H3_PLL_AUDIO_REG);
- of_sunxi_ccu_probe(node, reg, desc);
+ ret = devm_sunxi_ccu_probe(&pdev->dev, reg, desc);
+ if (ret)
+ return ret;
/* Gate then ungate PLL CPU after any rate changes */
ccu_pll_notifier_register(&sun8i_h3_pll_cpu_nb);
@@ -1162,18 +1169,31 @@ static void __init sunxi_h3_h5_ccu_init(struct device_node *node,
/* Reparent CPU during PLL CPU rate changes */
ccu_mux_notifier_register(pll_cpux_clk.common.hw.clk,
&sun8i_h3_cpu_nb);
-}
-static void __init sun8i_h3_ccu_setup(struct device_node *node)
-{
- sunxi_h3_h5_ccu_init(node, &sun8i_h3_ccu_desc);
+ return 0;
}
-CLK_OF_DECLARE(sun8i_h3_ccu, "allwinner,sun8i-h3-ccu",
- sun8i_h3_ccu_setup);
-static void __init sun50i_h5_ccu_setup(struct device_node *node)
-{
- sunxi_h3_h5_ccu_init(node, &sun50i_h5_ccu_desc);
-}
-CLK_OF_DECLARE(sun50i_h5_ccu, "allwinner,sun50i-h5-ccu",
- sun50i_h5_ccu_setup);
+static const struct of_device_id sun8i_h3_ccu_ids[] = {
+ {
+ .compatible = "allwinner,sun8i-h3-ccu",
+ .data = &sun8i_h3_ccu_desc,
+ },
+ {
+ .compatible = "allwinner,sun50i-h5-ccu",
+ .data = &sun50i_h5_ccu_desc,
+ },
+ { }
+};
+
+static struct platform_driver sun8i_h3_ccu_driver = {
+ .probe = sun8i_h3_ccu_probe,
+ .driver = {
+ .name = "sun8i-h3-ccu",
+ .suppress_bind_attrs = true,
+ .of_match_table = sun8i_h3_ccu_ids,
+ },
+};
+module_platform_driver(sun8i_h3_ccu_driver);
+
+MODULE_IMPORT_NS(SUNXI_CCU);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-r.c b/drivers/clk/sunxi-ng/ccu-sun8i-r.c
index 9e754d1f754a..5b7fab832a52 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-r.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-r.c
@@ -4,7 +4,8 @@
*/
#include <linux/clk-provider.h>
-#include <linux/of_address.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include "ccu_common.h"
@@ -254,37 +255,47 @@ static const struct sunxi_ccu_desc sun50i_a64_r_ccu_desc = {
.num_resets = ARRAY_SIZE(sun50i_a64_r_ccu_resets),
};
-static void __init sunxi_r_ccu_init(struct device_node *node,
- const struct sunxi_ccu_desc *desc)
+static int sun8i_r_ccu_probe(struct platform_device *pdev)
{
+ const struct sunxi_ccu_desc *desc;
void __iomem *reg;
- reg = of_io_request_and_map(node, 0, of_node_full_name(node));
- if (IS_ERR(reg)) {
- pr_err("%pOF: Could not map the clock registers\n", node);
- return;
- }
+ desc = of_device_get_match_data(&pdev->dev);
+ if (!desc)
+ return -EINVAL;
- of_sunxi_ccu_probe(node, reg, desc);
-}
+ reg = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
-static void __init sun8i_a83t_r_ccu_setup(struct device_node *node)
-{
- sunxi_r_ccu_init(node, &sun8i_a83t_r_ccu_desc);
+ return devm_sunxi_ccu_probe(&pdev->dev, reg, desc);
}
-CLK_OF_DECLARE(sun8i_a83t_r_ccu, "allwinner,sun8i-a83t-r-ccu",
- sun8i_a83t_r_ccu_setup);
-static void __init sun8i_h3_r_ccu_setup(struct device_node *node)
-{
- sunxi_r_ccu_init(node, &sun8i_h3_r_ccu_desc);
-}
-CLK_OF_DECLARE(sun8i_h3_r_ccu, "allwinner,sun8i-h3-r-ccu",
- sun8i_h3_r_ccu_setup);
+static const struct of_device_id sun8i_r_ccu_ids[] = {
+ {
+ .compatible = "allwinner,sun8i-a83t-r-ccu",
+ .data = &sun8i_a83t_r_ccu_desc,
+ },
+ {
+ .compatible = "allwinner,sun8i-h3-r-ccu",
+ .data = &sun8i_h3_r_ccu_desc,
+ },
+ {
+ .compatible = "allwinner,sun50i-a64-r-ccu",
+ .data = &sun50i_a64_r_ccu_desc,
+ },
+ { }
+};
-static void __init sun50i_a64_r_ccu_setup(struct device_node *node)
-{
- sunxi_r_ccu_init(node, &sun50i_a64_r_ccu_desc);
-}
-CLK_OF_DECLARE(sun50i_a64_r_ccu, "allwinner,sun50i-a64-r-ccu",
- sun50i_a64_r_ccu_setup);
+static struct platform_driver sun8i_r_ccu_driver = {
+ .probe = sun8i_r_ccu_probe,
+ .driver = {
+ .name = "sun8i-r-ccu",
+ .suppress_bind_attrs = true,
+ .of_match_table = sun8i_r_ccu_ids,
+ },
+};
+module_platform_driver(sun8i_r_ccu_driver);
+
+MODULE_IMPORT_NS(SUNXI_CCU);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-r40.c b/drivers/clk/sunxi-ng/ccu-sun8i-r40.c
index 8bb18d9add05..31eca0d3bc1e 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-r40.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-r40.c
@@ -5,6 +5,7 @@
#include <linux/clk-provider.h>
#include <linux/io.h>
+#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
@@ -1371,4 +1372,7 @@ static struct platform_driver sun8i_r40_ccu_driver = {
.of_match_table = sun8i_r40_ccu_ids,
},
};
-builtin_platform_driver(sun8i_r40_ccu_driver);
+module_platform_driver(sun8i_r40_ccu_driver);
+
+MODULE_IMPORT_NS(SUNXI_CCU);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
index ce150f83ab54..87f87d6ea3ad 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
@@ -8,7 +8,9 @@
#include <linux/clk-provider.h>
#include <linux/io.h>
-#include <linux/of_address.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
#include "ccu_common.h"
#include "ccu_reset.h"
@@ -805,38 +807,49 @@ static const struct sunxi_ccu_desc sun8i_v3_ccu_desc = {
.num_resets = ARRAY_SIZE(sun8i_v3_ccu_resets),
};
-static void __init sun8i_v3_v3s_ccu_init(struct device_node *node,
- const struct sunxi_ccu_desc *ccu_desc)
+static int sun8i_v3s_ccu_probe(struct platform_device *pdev)
{
+ const struct sunxi_ccu_desc *desc;
void __iomem *reg;
u32 val;
- reg = of_io_request_and_map(node, 0, of_node_full_name(node));
- if (IS_ERR(reg)) {
- pr_err("%pOF: Could not map the clock registers\n", node);
- return;
- }
+ desc = of_device_get_match_data(&pdev->dev);
+ if (!desc)
+ return -EINVAL;
+
+ reg = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
/* Force the PLL-Audio-1x divider to 1 */
val = readl(reg + SUN8I_V3S_PLL_AUDIO_REG);
val &= ~GENMASK(19, 16);
writel(val, reg + SUN8I_V3S_PLL_AUDIO_REG);
- of_sunxi_ccu_probe(node, reg, ccu_desc);
-}
-
-static void __init sun8i_v3s_ccu_setup(struct device_node *node)
-{
- sun8i_v3_v3s_ccu_init(node, &sun8i_v3s_ccu_desc);
+ return devm_sunxi_ccu_probe(&pdev->dev, reg, desc);
}
-static void __init sun8i_v3_ccu_setup(struct device_node *node)
-{
- sun8i_v3_v3s_ccu_init(node, &sun8i_v3_ccu_desc);
-}
+static const struct of_device_id sun8i_v3s_ccu_ids[] = {
+ {
+ .compatible = "allwinner,sun8i-v3-ccu",
+ .data = &sun8i_v3_ccu_desc,
+ },
+ {
+ .compatible = "allwinner,sun8i-v3s-ccu",
+ .data = &sun8i_v3s_ccu_desc,
+ },
+ { }
+};
-CLK_OF_DECLARE(sun8i_v3s_ccu, "allwinner,sun8i-v3s-ccu",
- sun8i_v3s_ccu_setup);
+static struct platform_driver sun8i_v3s_ccu_driver = {
+ .probe = sun8i_v3s_ccu_probe,
+ .driver = {
+ .name = "sun8i-v3s-ccu",
+ .suppress_bind_attrs = true,
+ .of_match_table = sun8i_v3s_ccu_ids,
+ },
+};
+module_platform_driver(sun8i_v3s_ccu_driver);
-CLK_OF_DECLARE(sun8i_v3_ccu, "allwinner,sun8i-v3-ccu",
- sun8i_v3_ccu_setup);
+MODULE_IMPORT_NS(SUNXI_CCU);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu-sun9i-a80-de.c b/drivers/clk/sunxi-ng/ccu-sun9i-a80-de.c
index 3cde2610f467..f2fe0e1cc3c0 100644
--- a/drivers/clk/sunxi-ng/ccu-sun9i-a80-de.c
+++ b/drivers/clk/sunxi-ng/ccu-sun9i-a80-de.c
@@ -5,7 +5,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
-#include <linux/of_address.h>
+#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
@@ -270,4 +270,7 @@ static struct platform_driver sun9i_a80_de_clk_driver = {
.of_match_table = sun9i_a80_de_clk_ids,
},
};
-builtin_platform_driver(sun9i_a80_de_clk_driver);
+module_platform_driver(sun9i_a80_de_clk_driver);
+
+MODULE_IMPORT_NS(SUNXI_CCU);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu-sun9i-a80-usb.c b/drivers/clk/sunxi-ng/ccu-sun9i-a80-usb.c
index 0740e8978ae8..575ae4ccc65f 100644
--- a/drivers/clk/sunxi-ng/ccu-sun9i-a80-usb.c
+++ b/drivers/clk/sunxi-ng/ccu-sun9i-a80-usb.c
@@ -5,7 +5,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
-#include <linux/of_address.h>
+#include <linux/module.h>
#include <linux/platform_device.h>
#include "ccu_common.h"
@@ -138,4 +138,7 @@ static struct platform_driver sun9i_a80_usb_clk_driver = {
.of_match_table = sun9i_a80_usb_clk_ids,
},
};
-builtin_platform_driver(sun9i_a80_usb_clk_driver);
+module_platform_driver(sun9i_a80_usb_clk_driver);
+
+MODULE_IMPORT_NS(SUNXI_CCU);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu-sun9i-a80.c b/drivers/clk/sunxi-ng/ccu-sun9i-a80.c
index d416af29e0d3..730fd8e28014 100644
--- a/drivers/clk/sunxi-ng/ccu-sun9i-a80.c
+++ b/drivers/clk/sunxi-ng/ccu-sun9i-a80.c
@@ -5,7 +5,7 @@
#include <linux/clk-provider.h>
#include <linux/io.h>
-#include <linux/of_address.h>
+#include <linux/module.h>
#include <linux/platform_device.h>
#include "ccu_common.h"
@@ -1245,4 +1245,7 @@ static struct platform_driver sun9i_a80_ccu_driver = {
.of_match_table = sun9i_a80_ccu_ids,
},
};
-builtin_platform_driver(sun9i_a80_ccu_driver);
+module_platform_driver(sun9i_a80_ccu_driver);
+
+MODULE_IMPORT_NS(SUNXI_CCU);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu-suniv-f1c100s.c b/drivers/clk/sunxi-ng/ccu-suniv-f1c100s.c
index 61ad7ee91c11..ed097c4f780f 100644
--- a/drivers/clk/sunxi-ng/ccu-suniv-f1c100s.c
+++ b/drivers/clk/sunxi-ng/ccu-suniv-f1c100s.c
@@ -6,7 +6,8 @@
#include <linux/clk-provider.h>
#include <linux/io.h>
-#include <linux/of_address.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
#include "ccu_common.h"
#include "ccu_reset.h"
@@ -522,23 +523,24 @@ static struct ccu_mux_nb suniv_cpu_nb = {
.bypass_index = 1, /* index of 24 MHz oscillator */
};
-static void __init suniv_f1c100s_ccu_setup(struct device_node *node)
+static int suniv_f1c100s_ccu_probe(struct platform_device *pdev)
{
void __iomem *reg;
+ int ret;
u32 val;
- reg = of_io_request_and_map(node, 0, of_node_full_name(node));
- if (IS_ERR(reg)) {
- pr_err("%pOF: Could not map the clock registers\n", node);
- return;
- }
+ reg = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
/* Force the PLL-Audio-1x divider to 4 */
val = readl(reg + SUNIV_PLL_AUDIO_REG);
val &= ~GENMASK(19, 16);
writel(val | (3 << 16), reg + SUNIV_PLL_AUDIO_REG);
- of_sunxi_ccu_probe(node, reg, &suniv_ccu_desc);
+ ret = devm_sunxi_ccu_probe(&pdev->dev, reg, &suniv_ccu_desc);
+ if (ret)
+ return ret;
/* Gate then ungate PLL CPU after any rate changes */
ccu_pll_notifier_register(&suniv_pll_cpu_nb);
@@ -546,6 +548,24 @@ static void __init suniv_f1c100s_ccu_setup(struct device_node *node)
/* Reparent CPU during PLL CPU rate changes */
ccu_mux_notifier_register(pll_cpu_clk.common.hw.clk,
&suniv_cpu_nb);
+
+ return 0;
}
-CLK_OF_DECLARE(suniv_f1c100s_ccu, "allwinner,suniv-f1c100s-ccu",
- suniv_f1c100s_ccu_setup);
+
+static const struct of_device_id suniv_f1c100s_ccu_ids[] = {
+ { .compatible = "allwinner,suniv-f1c100s-ccu" },
+ { }
+};
+
+static struct platform_driver suniv_f1c100s_ccu_driver = {
+ .probe = suniv_f1c100s_ccu_probe,
+ .driver = {
+ .name = "suniv-f1c100s-ccu",
+ .suppress_bind_attrs = true,
+ .of_match_table = suniv_f1c100s_ccu_ids,
+ },
+};
+module_platform_driver(suniv_f1c100s_ccu_driver);
+
+MODULE_IMPORT_NS(SUNXI_CCU);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu_common.c b/drivers/clk/sunxi-ng/ccu_common.c
index 31af8b6b5286..8d28a7a079d0 100644
--- a/drivers/clk/sunxi-ng/ccu_common.c
+++ b/drivers/clk/sunxi-ng/ccu_common.c
@@ -9,6 +9,7 @@
#include <linux/clk-provider.h>
#include <linux/device.h>
#include <linux/iopoll.h>
+#include <linux/module.h>
#include <linux/slab.h>
#include "ccu_common.h"
@@ -36,6 +37,7 @@ void ccu_helper_wait_for_lock(struct ccu_common *common, u32 lock)
WARN_ON(readl_relaxed_poll_timeout(addr, reg, reg & lock, 100, 70000));
}
+EXPORT_SYMBOL_NS_GPL(ccu_helper_wait_for_lock, SUNXI_CCU);
/*
* This clock notifier is called when the frequency of a PLL clock is
@@ -83,6 +85,7 @@ int ccu_pll_notifier_register(struct ccu_pll_nb *pll_nb)
return clk_notifier_register(pll_nb->common->hw.clk,
&pll_nb->clk_nb);
}
+EXPORT_SYMBOL_NS_GPL(ccu_pll_notifier_register, SUNXI_CCU);
static int sunxi_ccu_probe(struct sunxi_ccu *ccu, struct device *dev,
struct device_node *node, void __iomem *reg,
@@ -194,6 +197,7 @@ int devm_sunxi_ccu_probe(struct device *dev, void __iomem *reg,
return 0;
}
+EXPORT_SYMBOL_NS_GPL(devm_sunxi_ccu_probe, SUNXI_CCU);
void of_sunxi_ccu_probe(struct device_node *node, void __iomem *reg,
const struct sunxi_ccu_desc *desc)
@@ -211,3 +215,5 @@ void of_sunxi_ccu_probe(struct device_node *node, void __iomem *reg,
kfree(ccu);
}
}
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/sunxi-ng/ccu_div.c b/drivers/clk/sunxi-ng/ccu_div.c
index 4c297089483c..cb10a3ea23f9 100644
--- a/drivers/clk/sunxi-ng/ccu_div.c
+++ b/drivers/clk/sunxi-ng/ccu_div.c
@@ -141,3 +141,4 @@ const struct clk_ops ccu_div_ops = {
.recalc_rate = ccu_div_recalc_rate,
.set_rate = ccu_div_set_rate,
};
+EXPORT_SYMBOL_NS_GPL(ccu_div_ops, SUNXI_CCU);
diff --git a/drivers/clk/sunxi-ng/ccu_div.h b/drivers/clk/sunxi-ng/ccu_div.h
index 6682fde6043c..948e2b0c0c3b 100644
--- a/drivers/clk/sunxi-ng/ccu_div.h
+++ b/drivers/clk/sunxi-ng/ccu_div.h
@@ -108,6 +108,22 @@ struct ccu_div {
_shift, _width, _table, 0, \
_flags)
+#define SUNXI_CCU_DIV_TABLE_HW(_struct, _name, _parent, _reg, \
+ _shift, _width, \
+ _table, _flags) \
+ struct ccu_div _struct = { \
+ .div = _SUNXI_CCU_DIV_TABLE(_shift, _width, \
+ _table), \
+ .common = { \
+ .reg = _reg, \
+ .hw.init = CLK_HW_INIT_HW(_name, \
+ _parent, \
+ &ccu_div_ops, \
+ _flags), \
+ } \
+ }
+
+
#define SUNXI_CCU_M_WITH_MUX_TABLE_GATE(_struct, _name, \
_parents, _table, \
_reg, \
@@ -166,6 +182,68 @@ struct ccu_div {
SUNXI_CCU_M_WITH_GATE(_struct, _name, _parent, _reg, \
_mshift, _mwidth, 0, _flags)
+#define SUNXI_CCU_M_DATA_WITH_MUX_GATE(_struct, _name, _parents, _reg, \
+ _mshift, _mwidth, \
+ _muxshift, _muxwidth, \
+ _gate, _flags) \
+ struct ccu_div _struct = { \
+ .enable = _gate, \
+ .div = _SUNXI_CCU_DIV(_mshift, _mwidth), \
+ .mux = _SUNXI_CCU_MUX(_muxshift, _muxwidth), \
+ .common = { \
+ .reg = _reg, \
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(_name, \
+ _parents, \
+ &ccu_div_ops, \
+ _flags), \
+ }, \
+ }
+
+#define SUNXI_CCU_M_DATA_WITH_MUX(_struct, _name, _parents, _reg, \
+ _mshift, _mwidth, \
+ _muxshift, _muxwidth, \
+ _flags) \
+ SUNXI_CCU_M_DATA_WITH_MUX_GATE(_struct, _name, _parents, _reg, \
+ _mshift, _mwidth, \
+ _muxshift, _muxwidth, \
+ 0, _flags)
+
+#define SUNXI_CCU_M_HW_WITH_MUX_GATE(_struct, _name, _parents, _reg, \
+ _mshift, _mwidth, _muxshift, _muxwidth, \
+ _gate, _flags) \
+ struct ccu_div _struct = { \
+ .enable = _gate, \
+ .div = _SUNXI_CCU_DIV(_mshift, _mwidth), \
+ .mux = _SUNXI_CCU_MUX(_muxshift, _muxwidth), \
+ .common = { \
+ .reg = _reg, \
+ .hw.init = CLK_HW_INIT_PARENTS_HW(_name, \
+ _parents, \
+ &ccu_div_ops, \
+ _flags), \
+ }, \
+ }
+
+#define SUNXI_CCU_M_HWS_WITH_GATE(_struct, _name, _parent, _reg, \
+ _mshift, _mwidth, _gate, \
+ _flags) \
+ struct ccu_div _struct = { \
+ .enable = _gate, \
+ .div = _SUNXI_CCU_DIV(_mshift, _mwidth), \
+ .common = { \
+ .reg = _reg, \
+ .hw.init = CLK_HW_INIT_HWS(_name, \
+ _parent, \
+ &ccu_div_ops, \
+ _flags), \
+ }, \
+ }
+
+#define SUNXI_CCU_M_HWS(_struct, _name, _parent, _reg, _mshift, \
+ _mwidth, _flags) \
+ SUNXI_CCU_M_HWS_WITH_GATE(_struct, _name, _parent, _reg, \
+ _mshift, _mwidth, 0, _flags)
+
static inline struct ccu_div *hw_to_ccu_div(struct clk_hw *hw)
{
struct ccu_common *common = hw_to_ccu_common(hw);
diff --git a/drivers/clk/sunxi-ng/ccu_frac.c b/drivers/clk/sunxi-ng/ccu_frac.c
index 44fcded8b354..b31f3ad946d6 100644
--- a/drivers/clk/sunxi-ng/ccu_frac.c
+++ b/drivers/clk/sunxi-ng/ccu_frac.c
@@ -18,6 +18,7 @@ bool ccu_frac_helper_is_enabled(struct ccu_common *common,
return !(readl(common->base + common->reg) & cf->enable);
}
+EXPORT_SYMBOL_NS_GPL(ccu_frac_helper_is_enabled, SUNXI_CCU);
void ccu_frac_helper_enable(struct ccu_common *common,
struct ccu_frac_internal *cf)
@@ -33,6 +34,7 @@ void ccu_frac_helper_enable(struct ccu_common *common,
writel(reg & ~cf->enable, common->base + common->reg);
spin_unlock_irqrestore(common->lock, flags);
}
+EXPORT_SYMBOL_NS_GPL(ccu_frac_helper_enable, SUNXI_CCU);
void ccu_frac_helper_disable(struct ccu_common *common,
struct ccu_frac_internal *cf)
@@ -48,6 +50,7 @@ void ccu_frac_helper_disable(struct ccu_common *common,
writel(reg | cf->enable, common->base + common->reg);
spin_unlock_irqrestore(common->lock, flags);
}
+EXPORT_SYMBOL_NS_GPL(ccu_frac_helper_disable, SUNXI_CCU);
bool ccu_frac_helper_has_rate(struct ccu_common *common,
struct ccu_frac_internal *cf,
@@ -58,6 +61,7 @@ bool ccu_frac_helper_has_rate(struct ccu_common *common,
return (cf->rates[0] == rate) || (cf->rates[1] == rate);
}
+EXPORT_SYMBOL_NS_GPL(ccu_frac_helper_has_rate, SUNXI_CCU);
unsigned long ccu_frac_helper_read_rate(struct ccu_common *common,
struct ccu_frac_internal *cf)
@@ -79,6 +83,7 @@ unsigned long ccu_frac_helper_read_rate(struct ccu_common *common,
return (reg & cf->select) ? cf->rates[1] : cf->rates[0];
}
+EXPORT_SYMBOL_NS_GPL(ccu_frac_helper_read_rate, SUNXI_CCU);
int ccu_frac_helper_set_rate(struct ccu_common *common,
struct ccu_frac_internal *cf,
@@ -107,3 +112,4 @@ int ccu_frac_helper_set_rate(struct ccu_common *common,
return 0;
}
+EXPORT_SYMBOL_NS_GPL(ccu_frac_helper_set_rate, SUNXI_CCU);
diff --git a/drivers/clk/sunxi-ng/ccu_gate.c b/drivers/clk/sunxi-ng/ccu_gate.c
index 3d5ca092b08f..a2115a21807d 100644
--- a/drivers/clk/sunxi-ng/ccu_gate.c
+++ b/drivers/clk/sunxi-ng/ccu_gate.c
@@ -24,6 +24,7 @@ void ccu_gate_helper_disable(struct ccu_common *common, u32 gate)
spin_unlock_irqrestore(common->lock, flags);
}
+EXPORT_SYMBOL_NS_GPL(ccu_gate_helper_disable, SUNXI_CCU);
static void ccu_gate_disable(struct clk_hw *hw)
{
@@ -49,6 +50,7 @@ int ccu_gate_helper_enable(struct ccu_common *common, u32 gate)
return 0;
}
+EXPORT_SYMBOL_NS_GPL(ccu_gate_helper_enable, SUNXI_CCU);
static int ccu_gate_enable(struct clk_hw *hw)
{
@@ -64,6 +66,7 @@ int ccu_gate_helper_is_enabled(struct ccu_common *common, u32 gate)
return readl(common->base + common->reg) & gate;
}
+EXPORT_SYMBOL_NS_GPL(ccu_gate_helper_is_enabled, SUNXI_CCU);
static int ccu_gate_is_enabled(struct clk_hw *hw)
{
@@ -124,3 +127,4 @@ const struct clk_ops ccu_gate_ops = {
.set_rate = ccu_gate_set_rate,
.recalc_rate = ccu_gate_recalc_rate,
};
+EXPORT_SYMBOL_NS_GPL(ccu_gate_ops, SUNXI_CCU);
diff --git a/drivers/clk/sunxi-ng/ccu_gate.h b/drivers/clk/sunxi-ng/ccu_gate.h
index c386689a952b..dc05ce06737a 100644
--- a/drivers/clk/sunxi-ng/ccu_gate.h
+++ b/drivers/clk/sunxi-ng/ccu_gate.h
@@ -53,7 +53,7 @@ struct ccu_gate {
}
/*
- * The following two macros allow the re-use of the data structure
+ * The following macros allow the re-use of the data structure
* holding the parent info.
*/
#define SUNXI_CCU_GATE_HWS(_struct, _name, _parent, _reg, _gate, _flags) \
@@ -68,6 +68,21 @@ struct ccu_gate {
} \
}
+#define SUNXI_CCU_GATE_HWS_WITH_PREDIV(_struct, _name, _parent, _reg, \
+ _gate, _prediv, _flags) \
+ struct ccu_gate _struct = { \
+ .enable = _gate, \
+ .common = { \
+ .reg = _reg, \
+ .prediv = _prediv, \
+ .features = CCU_FEATURE_ALL_PREDIV, \
+ .hw.init = CLK_HW_INIT_HWS(_name, \
+ _parent, \
+ &ccu_gate_ops, \
+ _flags), \
+ } \
+ }
+
#define SUNXI_CCU_GATE_DATA(_struct, _name, _data, _reg, _gate, _flags) \
struct ccu_gate _struct = { \
.enable = _gate, \
@@ -81,6 +96,21 @@ struct ccu_gate {
} \
}
+#define SUNXI_CCU_GATE_DATA_WITH_PREDIV(_struct, _name, _parent, _reg, \
+ _gate, _prediv, _flags) \
+ struct ccu_gate _struct = { \
+ .enable = _gate, \
+ .common = { \
+ .reg = _reg, \
+ .prediv = _prediv, \
+ .features = CCU_FEATURE_ALL_PREDIV, \
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(_name, \
+ _parent, \
+ &ccu_gate_ops, \
+ _flags), \
+ } \
+ }
+
static inline struct ccu_gate *hw_to_ccu_gate(struct clk_hw *hw)
{
struct ccu_common *common = hw_to_ccu_common(hw);
diff --git a/drivers/clk/sunxi-ng/ccu_mp.c b/drivers/clk/sunxi-ng/ccu_mp.c
index 9d3a76604d94..57cf2d615148 100644
--- a/drivers/clk/sunxi-ng/ccu_mp.c
+++ b/drivers/clk/sunxi-ng/ccu_mp.c
@@ -245,6 +245,7 @@ const struct clk_ops ccu_mp_ops = {
.recalc_rate = ccu_mp_recalc_rate,
.set_rate = ccu_mp_set_rate,
};
+EXPORT_SYMBOL_NS_GPL(ccu_mp_ops, SUNXI_CCU);
/*
* Support for MMC timing mode switching
@@ -325,3 +326,4 @@ const struct clk_ops ccu_mp_mmc_ops = {
.recalc_rate = ccu_mp_mmc_recalc_rate,
.set_rate = ccu_mp_mmc_set_rate,
};
+EXPORT_SYMBOL_NS_GPL(ccu_mp_mmc_ops, SUNXI_CCU);
diff --git a/drivers/clk/sunxi-ng/ccu_mp.h b/drivers/clk/sunxi-ng/ccu_mp.h
index b392e0d575b5..6e50f3728fb5 100644
--- a/drivers/clk/sunxi-ng/ccu_mp.h
+++ b/drivers/clk/sunxi-ng/ccu_mp.h
@@ -82,6 +82,55 @@ struct ccu_mp {
_muxshift, _muxwidth, \
0, _flags)
+#define SUNXI_CCU_MP_DATA_WITH_MUX_GATE(_struct, _name, _parents, _reg, \
+ _mshift, _mwidth, \
+ _pshift, _pwidth, \
+ _muxshift, _muxwidth, \
+ _gate, _flags) \
+ struct ccu_mp _struct = { \
+ .enable = _gate, \
+ .m = _SUNXI_CCU_DIV(_mshift, _mwidth), \
+ .p = _SUNXI_CCU_DIV(_pshift, _pwidth), \
+ .mux = _SUNXI_CCU_MUX(_muxshift, _muxwidth), \
+ .common = { \
+ .reg = _reg, \
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(_name, \
+ _parents, \
+ &ccu_mp_ops, \
+ _flags), \
+ } \
+ }
+
+#define SUNXI_CCU_MP_DATA_WITH_MUX(_struct, _name, _parents, _reg, \
+ _mshift, _mwidth, \
+ _pshift, _pwidth, \
+ _muxshift, _muxwidth, \
+ _flags) \
+ SUNXI_CCU_MP_DATA_WITH_MUX_GATE(_struct, _name, _parents, _reg, \
+ _mshift, _mwidth, \
+ _pshift, _pwidth, \
+ _muxshift, _muxwidth, \
+ 0, _flags)
+
+#define SUNXI_CCU_MP_HW_WITH_MUX_GATE(_struct, _name, _parents, _reg, \
+ _mshift, _mwidth, \
+ _pshift, _pwidth, \
+ _muxshift, _muxwidth, \
+ _gate, _flags) \
+ struct ccu_mp _struct = { \
+ .enable = _gate, \
+ .m = _SUNXI_CCU_DIV(_mshift, _mwidth), \
+ .p = _SUNXI_CCU_DIV(_pshift, _pwidth), \
+ .mux = _SUNXI_CCU_MUX(_muxshift, _muxwidth), \
+ .common = { \
+ .reg = _reg, \
+ .hw.init = CLK_HW_INIT_PARENTS_HW(_name, \
+ _parents, \
+ &ccu_mp_ops, \
+ _flags), \
+ } \
+ }
+
static inline struct ccu_mp *hw_to_ccu_mp(struct clk_hw *hw)
{
struct ccu_common *common = hw_to_ccu_common(hw);
diff --git a/drivers/clk/sunxi-ng/ccu_mult.c b/drivers/clk/sunxi-ng/ccu_mult.c
index 7c8cf2e04e94..7bee217ef111 100644
--- a/drivers/clk/sunxi-ng/ccu_mult.c
+++ b/drivers/clk/sunxi-ng/ccu_mult.c
@@ -170,3 +170,4 @@ const struct clk_ops ccu_mult_ops = {
.recalc_rate = ccu_mult_recalc_rate,
.set_rate = ccu_mult_set_rate,
};
+EXPORT_SYMBOL_NS_GPL(ccu_mult_ops, SUNXI_CCU);
diff --git a/drivers/clk/sunxi-ng/ccu_mux.c b/drivers/clk/sunxi-ng/ccu_mux.c
index 7d75da9a1f2e..2306a1cd83e4 100644
--- a/drivers/clk/sunxi-ng/ccu_mux.c
+++ b/drivers/clk/sunxi-ng/ccu_mux.c
@@ -64,6 +64,7 @@ unsigned long ccu_mux_helper_apply_prediv(struct ccu_common *common,
{
return parent_rate / ccu_mux_get_prediv(common, cm, parent_index);
}
+EXPORT_SYMBOL_NS_GPL(ccu_mux_helper_apply_prediv, SUNXI_CCU);
static unsigned long ccu_mux_helper_unapply_prediv(struct ccu_common *common,
struct ccu_mux_internal *cm,
@@ -152,6 +153,7 @@ out:
req->rate = best_rate;
return 0;
}
+EXPORT_SYMBOL_NS_GPL(ccu_mux_helper_determine_rate, SUNXI_CCU);
u8 ccu_mux_helper_get_parent(struct ccu_common *common,
struct ccu_mux_internal *cm)
@@ -174,6 +176,7 @@ u8 ccu_mux_helper_get_parent(struct ccu_common *common,
return parent;
}
+EXPORT_SYMBOL_NS_GPL(ccu_mux_helper_get_parent, SUNXI_CCU);
int ccu_mux_helper_set_parent(struct ccu_common *common,
struct ccu_mux_internal *cm,
@@ -195,6 +198,7 @@ int ccu_mux_helper_set_parent(struct ccu_common *common,
return 0;
}
+EXPORT_SYMBOL_NS_GPL(ccu_mux_helper_set_parent, SUNXI_CCU);
static void ccu_mux_disable(struct clk_hw *hw)
{
@@ -251,6 +255,7 @@ const struct clk_ops ccu_mux_ops = {
.determine_rate = __clk_mux_determine_rate,
.recalc_rate = ccu_mux_recalc_rate,
};
+EXPORT_SYMBOL_NS_GPL(ccu_mux_ops, SUNXI_CCU);
/*
* This clock notifier is called when the frequency of the of the parent
@@ -285,3 +290,4 @@ int ccu_mux_notifier_register(struct clk *clk, struct ccu_mux_nb *mux_nb)
return clk_notifier_register(clk, &mux_nb->clk_nb);
}
+EXPORT_SYMBOL_NS_GPL(ccu_mux_notifier_register, SUNXI_CCU);
diff --git a/drivers/clk/sunxi-ng/ccu_mux.h b/drivers/clk/sunxi-ng/ccu_mux.h
index e31efc509b3d..2c1811a445b0 100644
--- a/drivers/clk/sunxi-ng/ccu_mux.h
+++ b/drivers/clk/sunxi-ng/ccu_mux.h
@@ -72,6 +72,39 @@ struct ccu_mux {
SUNXI_CCU_MUX_TABLE_WITH_GATE(_struct, _name, _parents, NULL, \
_reg, _shift, _width, 0, _flags)
+#define SUNXI_CCU_MUX_DATA_WITH_GATE(_struct, _name, _parents, _reg, \
+ _shift, _width, _gate, _flags) \
+ struct ccu_mux _struct = { \
+ .enable = _gate, \
+ .mux = _SUNXI_CCU_MUX(_shift, _width), \
+ .common = { \
+ .reg = _reg, \
+ .hw.init = CLK_HW_INIT_PARENTS_DATA(_name, \
+ _parents, \
+ &ccu_mux_ops, \
+ _flags), \
+ } \
+ }
+
+#define SUNXI_CCU_MUX_DATA(_struct, _name, _parents, _reg, \
+ _shift, _width, _flags) \
+ SUNXI_CCU_MUX_DATA_WITH_GATE(_struct, _name, _parents, _reg, \
+ _shift, _width, 0, _flags)
+
+#define SUNXI_CCU_MUX_HW_WITH_GATE(_struct, _name, _parents, _reg, \
+ _shift, _width, _gate, _flags) \
+ struct ccu_mux _struct = { \
+ .enable = _gate, \
+ .mux = _SUNXI_CCU_MUX(_shift, _width), \
+ .common = { \
+ .reg = _reg, \
+ .hw.init = CLK_HW_INIT_PARENTS_HW(_name, \
+ _parents, \
+ &ccu_mux_ops, \
+ _flags), \
+ } \
+ }
+
static inline struct ccu_mux *hw_to_ccu_mux(struct clk_hw *hw)
{
struct ccu_common *common = hw_to_ccu_common(hw);
diff --git a/drivers/clk/sunxi-ng/ccu_nk.c b/drivers/clk/sunxi-ng/ccu_nk.c
index aee68b00f3b2..c4fb82af97e8 100644
--- a/drivers/clk/sunxi-ng/ccu_nk.c
+++ b/drivers/clk/sunxi-ng/ccu_nk.c
@@ -157,3 +157,4 @@ const struct clk_ops ccu_nk_ops = {
.round_rate = ccu_nk_round_rate,
.set_rate = ccu_nk_set_rate,
};
+EXPORT_SYMBOL_NS_GPL(ccu_nk_ops, SUNXI_CCU);
diff --git a/drivers/clk/sunxi-ng/ccu_nkm.c b/drivers/clk/sunxi-ng/ccu_nkm.c
index b9cfee0276ea..67da2c189b53 100644
--- a/drivers/clk/sunxi-ng/ccu_nkm.c
+++ b/drivers/clk/sunxi-ng/ccu_nkm.c
@@ -206,3 +206,4 @@ const struct clk_ops ccu_nkm_ops = {
.recalc_rate = ccu_nkm_recalc_rate,
.set_rate = ccu_nkm_set_rate,
};
+EXPORT_SYMBOL_NS_GPL(ccu_nkm_ops, SUNXI_CCU);
diff --git a/drivers/clk/sunxi-ng/ccu_nkmp.c b/drivers/clk/sunxi-ng/ccu_nkmp.c
index bda87b38c45c..39413cb0985c 100644
--- a/drivers/clk/sunxi-ng/ccu_nkmp.c
+++ b/drivers/clk/sunxi-ng/ccu_nkmp.c
@@ -230,3 +230,4 @@ const struct clk_ops ccu_nkmp_ops = {
.round_rate = ccu_nkmp_round_rate,
.set_rate = ccu_nkmp_set_rate,
};
+EXPORT_SYMBOL_NS_GPL(ccu_nkmp_ops, SUNXI_CCU);
diff --git a/drivers/clk/sunxi-ng/ccu_nm.c b/drivers/clk/sunxi-ng/ccu_nm.c
index e6bcc0a7170c..9ca9257f4426 100644
--- a/drivers/clk/sunxi-ng/ccu_nm.c
+++ b/drivers/clk/sunxi-ng/ccu_nm.c
@@ -238,3 +238,4 @@ const struct clk_ops ccu_nm_ops = {
.round_rate = ccu_nm_round_rate,
.set_rate = ccu_nm_set_rate,
};
+EXPORT_SYMBOL_NS_GPL(ccu_nm_ops, SUNXI_CCU);
diff --git a/drivers/clk/sunxi-ng/ccu_phase.c b/drivers/clk/sunxi-ng/ccu_phase.c
index 92ab8bd66427..e4cae2afe9db 100644
--- a/drivers/clk/sunxi-ng/ccu_phase.c
+++ b/drivers/clk/sunxi-ng/ccu_phase.c
@@ -121,3 +121,4 @@ const struct clk_ops ccu_phase_ops = {
.get_phase = ccu_phase_get_phase,
.set_phase = ccu_phase_set_phase,
};
+EXPORT_SYMBOL_NS_GPL(ccu_phase_ops, SUNXI_CCU);
diff --git a/drivers/clk/sunxi-ng/ccu_reset.c b/drivers/clk/sunxi-ng/ccu_reset.c
index 483100e45df3..6577aa18cb01 100644
--- a/drivers/clk/sunxi-ng/ccu_reset.c
+++ b/drivers/clk/sunxi-ng/ccu_reset.c
@@ -75,3 +75,4 @@ const struct reset_control_ops ccu_reset_ops = {
.reset = ccu_reset_reset,
.status = ccu_reset_status,
};
+EXPORT_SYMBOL_NS_GPL(ccu_reset_ops, SUNXI_CCU);
diff --git a/drivers/clk/sunxi-ng/ccu_sdm.c b/drivers/clk/sunxi-ng/ccu_sdm.c
index 79581a1c649a..41937ed0766d 100644
--- a/drivers/clk/sunxi-ng/ccu_sdm.c
+++ b/drivers/clk/sunxi-ng/ccu_sdm.c
@@ -20,6 +20,7 @@ bool ccu_sdm_helper_is_enabled(struct ccu_common *common,
return !!(readl(common->base + sdm->tuning_reg) & sdm->tuning_enable);
}
+EXPORT_SYMBOL_NS_GPL(ccu_sdm_helper_is_enabled, SUNXI_CCU);
void ccu_sdm_helper_enable(struct ccu_common *common,
struct ccu_sdm_internal *sdm,
@@ -49,6 +50,7 @@ void ccu_sdm_helper_enable(struct ccu_common *common,
writel(reg | sdm->enable, common->base + common->reg);
spin_unlock_irqrestore(common->lock, flags);
}
+EXPORT_SYMBOL_NS_GPL(ccu_sdm_helper_enable, SUNXI_CCU);
void ccu_sdm_helper_disable(struct ccu_common *common,
struct ccu_sdm_internal *sdm)
@@ -69,6 +71,7 @@ void ccu_sdm_helper_disable(struct ccu_common *common,
writel(reg & ~sdm->tuning_enable, common->base + sdm->tuning_reg);
spin_unlock_irqrestore(common->lock, flags);
}
+EXPORT_SYMBOL_NS_GPL(ccu_sdm_helper_disable, SUNXI_CCU);
/*
* Sigma delta modulation provides a way to do fractional-N frequency
@@ -102,6 +105,7 @@ bool ccu_sdm_helper_has_rate(struct ccu_common *common,
return false;
}
+EXPORT_SYMBOL_NS_GPL(ccu_sdm_helper_has_rate, SUNXI_CCU);
unsigned long ccu_sdm_helper_read_rate(struct ccu_common *common,
struct ccu_sdm_internal *sdm,
@@ -132,6 +136,7 @@ unsigned long ccu_sdm_helper_read_rate(struct ccu_common *common,
/* We can't calculate the effective clock rate, so just fail. */
return 0;
}
+EXPORT_SYMBOL_NS_GPL(ccu_sdm_helper_read_rate, SUNXI_CCU);
int ccu_sdm_helper_get_factors(struct ccu_common *common,
struct ccu_sdm_internal *sdm,
@@ -153,3 +158,4 @@ int ccu_sdm_helper_get_factors(struct ccu_common *common,
/* nothing found */
return -EINVAL;
}
+EXPORT_SYMBOL_NS_GPL(ccu_sdm_helper_get_factors, SUNXI_CCU);
diff --git a/drivers/clk/tegra/Makefile b/drivers/clk/tegra/Makefile
index 7b1816856eb5..a0715cdfc1a4 100644
--- a/drivers/clk/tegra/Makefile
+++ b/drivers/clk/tegra/Makefile
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
obj-y += clk.o
obj-y += clk-audio-sync.o
+obj-y += clk-device.o
obj-y += clk-dfll.o
obj-y += clk-divider.o
obj-y += clk-periph.o
diff --git a/drivers/clk/tegra/clk-device.c b/drivers/clk/tegra/clk-device.c
new file mode 100644
index 000000000000..c58beaf8afbc
--- /dev/null
+++ b/drivers/clk/tegra/clk-device.c
@@ -0,0 +1,199 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/mutex.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_opp.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+
+#include <soc/tegra/common.h>
+
+#include "clk.h"
+
+/*
+ * This driver manages performance state of the core power domain for the
+ * independent PLLs and system clocks. We created a virtual clock device
+ * for such clocks, see tegra_clk_dev_register().
+ */
+
+struct tegra_clk_device {
+ struct notifier_block clk_nb;
+ struct device *dev;
+ struct clk_hw *hw;
+ struct mutex lock;
+};
+
+static int tegra_clock_set_pd_state(struct tegra_clk_device *clk_dev,
+ unsigned long rate)
+{
+ struct device *dev = clk_dev->dev;
+ struct dev_pm_opp *opp;
+ unsigned int pstate;
+
+ opp = dev_pm_opp_find_freq_ceil(dev, &rate);
+ if (opp == ERR_PTR(-ERANGE)) {
+ /*
+ * Some clocks may be unused by a particular board and they
+ * may have uninitiated clock rate that is overly high. In
+ * this case clock is expected to be disabled, but still we
+ * need to set up performance state of the power domain and
+ * not error out clk initialization. A typical example is
+ * a PCIe clock on Android tablets.
+ */
+ dev_dbg(dev, "failed to find ceil OPP for %luHz\n", rate);
+ opp = dev_pm_opp_find_freq_floor(dev, &rate);
+ }
+
+ if (IS_ERR(opp)) {
+ dev_err(dev, "failed to find OPP for %luHz: %pe\n", rate, opp);
+ return PTR_ERR(opp);
+ }
+
+ pstate = dev_pm_opp_get_required_pstate(opp, 0);
+ dev_pm_opp_put(opp);
+
+ return dev_pm_genpd_set_performance_state(dev, pstate);
+}
+
+static int tegra_clock_change_notify(struct notifier_block *nb,
+ unsigned long msg, void *data)
+{
+ struct clk_notifier_data *cnd = data;
+ struct tegra_clk_device *clk_dev;
+ int err = 0;
+
+ clk_dev = container_of(nb, struct tegra_clk_device, clk_nb);
+
+ mutex_lock(&clk_dev->lock);
+ switch (msg) {
+ case PRE_RATE_CHANGE:
+ if (cnd->new_rate > cnd->old_rate)
+ err = tegra_clock_set_pd_state(clk_dev, cnd->new_rate);
+ break;
+
+ case ABORT_RATE_CHANGE:
+ err = tegra_clock_set_pd_state(clk_dev, cnd->old_rate);
+ break;
+
+ case POST_RATE_CHANGE:
+ if (cnd->new_rate < cnd->old_rate)
+ err = tegra_clock_set_pd_state(clk_dev, cnd->new_rate);
+ break;
+
+ default:
+ break;
+ }
+ mutex_unlock(&clk_dev->lock);
+
+ return notifier_from_errno(err);
+}
+
+static int tegra_clock_sync_pd_state(struct tegra_clk_device *clk_dev)
+{
+ unsigned long rate;
+ int ret;
+
+ mutex_lock(&clk_dev->lock);
+
+ rate = clk_hw_get_rate(clk_dev->hw);
+ ret = tegra_clock_set_pd_state(clk_dev, rate);
+
+ mutex_unlock(&clk_dev->lock);
+
+ return ret;
+}
+
+static int tegra_clock_probe(struct platform_device *pdev)
+{
+ struct tegra_core_opp_params opp_params = {};
+ struct tegra_clk_device *clk_dev;
+ struct device *dev = &pdev->dev;
+ struct clk *clk;
+ int err;
+
+ if (!dev->pm_domain)
+ return -EINVAL;
+
+ clk_dev = devm_kzalloc(dev, sizeof(*clk_dev), GFP_KERNEL);
+ if (!clk_dev)
+ return -ENOMEM;
+
+ clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ clk_dev->dev = dev;
+ clk_dev->hw = __clk_get_hw(clk);
+ clk_dev->clk_nb.notifier_call = tegra_clock_change_notify;
+ mutex_init(&clk_dev->lock);
+
+ platform_set_drvdata(pdev, clk_dev);
+
+ /*
+ * Runtime PM was already enabled for this device by the parent clk
+ * driver and power domain state should be synced under clk_dev lock,
+ * hence we don't use the common OPP helper that initializes OPP
+ * state. For some clocks common OPP helper may fail to find ceil
+ * rate, it's handled by this driver.
+ */
+ err = devm_tegra_core_dev_init_opp_table(dev, &opp_params);
+ if (err)
+ return err;
+
+ err = clk_notifier_register(clk, &clk_dev->clk_nb);
+ if (err) {
+ dev_err(dev, "failed to register clk notifier: %d\n", err);
+ return err;
+ }
+
+ /*
+ * The driver is attaching to a potentially active/resumed clock, hence
+ * we need to sync the power domain performance state in a accordance to
+ * the clock rate if clock is resumed.
+ */
+ err = tegra_clock_sync_pd_state(clk_dev);
+ if (err)
+ goto unreg_clk;
+
+ return 0;
+
+unreg_clk:
+ clk_notifier_unregister(clk, &clk_dev->clk_nb);
+
+ return err;
+}
+
+/*
+ * Tegra GENPD driver enables clocks during NOIRQ phase. It can't be done
+ * for clocks served by this driver because runtime PM is unavailable in
+ * NOIRQ phase. We will keep clocks resumed during suspend to mitigate this
+ * problem. In practice this makes no difference from a power management
+ * perspective since voltage is kept at a nominal level during suspend anyways.
+ */
+static const struct dev_pm_ops tegra_clock_pm = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_resume_and_get, pm_runtime_put)
+};
+
+static const struct of_device_id tegra_clock_match[] = {
+ { .compatible = "nvidia,tegra20-sclk" },
+ { .compatible = "nvidia,tegra30-sclk" },
+ { .compatible = "nvidia,tegra30-pllc" },
+ { .compatible = "nvidia,tegra30-plle" },
+ { .compatible = "nvidia,tegra30-pllm" },
+ { }
+};
+
+static struct platform_driver tegra_clock_driver = {
+ .driver = {
+ .name = "tegra-clock",
+ .of_match_table = tegra_clock_match,
+ .pm = &tegra_clock_pm,
+ .suppress_bind_attrs = true,
+ },
+ .probe = tegra_clock_probe,
+};
+builtin_platform_driver(tegra_clock_driver);
diff --git a/drivers/clk/tegra/clk-pll.c b/drivers/clk/tegra/clk-pll.c
index eaa079c177c3..100b5d9b7e26 100644
--- a/drivers/clk/tegra/clk-pll.c
+++ b/drivers/clk/tegra/clk-pll.c
@@ -1914,7 +1914,7 @@ static struct clk *_tegra_clk_register_pll(struct tegra_clk_pll *pll,
/* Data in .init is copied by clk_register(), so stack variable OK */
pll->hw.init = &init;
- return clk_register(NULL, &pll->hw);
+ return tegra_clk_dev_register(&pll->hw);
}
struct clk *tegra_clk_register_pll(const char *name, const char *parent_name,
diff --git a/drivers/clk/tegra/clk-super.c b/drivers/clk/tegra/clk-super.c
index 6099c6e9acd4..a98a420398fa 100644
--- a/drivers/clk/tegra/clk-super.c
+++ b/drivers/clk/tegra/clk-super.c
@@ -226,7 +226,7 @@ struct clk *tegra_clk_register_super_mux(const char *name,
/* Data in .init is copied by clk_register(), so stack variable OK */
super->hw.init = &init;
- clk = clk_register(NULL, &super->hw);
+ clk = tegra_clk_dev_register(&super->hw);
if (IS_ERR(clk))
kfree(super);
diff --git a/drivers/clk/tegra/clk-tegra114.c b/drivers/clk/tegra/clk-tegra114.c
index bc9e47a4cb60..ef718c4b3826 100644
--- a/drivers/clk/tegra/clk-tegra114.c
+++ b/drivers/clk/tegra/clk-tegra114.c
@@ -1158,7 +1158,7 @@ static struct tegra_clk_init_table init_table[] __initdata = {
{ TEGRA114_CLK_XUSB_HS_SRC, TEGRA114_CLK_XUSB_SS_DIV2, 61200000, 0 },
{ TEGRA114_CLK_XUSB_FALCON_SRC, TEGRA114_CLK_PLL_P, 204000000, 0 },
{ TEGRA114_CLK_XUSB_HOST_SRC, TEGRA114_CLK_PLL_P, 102000000, 0 },
- { TEGRA114_CLK_VDE, TEGRA114_CLK_CLK_MAX, 600000000, 0 },
+ { TEGRA114_CLK_VDE, TEGRA114_CLK_PLL_P, 408000000, 0 },
{ TEGRA114_CLK_SPDIF_IN_SYNC, TEGRA114_CLK_CLK_MAX, 24000000, 0 },
{ TEGRA114_CLK_I2S0_SYNC, TEGRA114_CLK_CLK_MAX, 24000000, 0 },
{ TEGRA114_CLK_I2S1_SYNC, TEGRA114_CLK_CLK_MAX, 24000000, 0 },
diff --git a/drivers/clk/tegra/clk-tegra20.c b/drivers/clk/tegra/clk-tegra20.c
index 3664593a5ba4..be3c33441cfc 100644
--- a/drivers/clk/tegra/clk-tegra20.c
+++ b/drivers/clk/tegra/clk-tegra20.c
@@ -6,8 +6,11 @@
#include <linux/io.h>
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
+#include <linux/init.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
#include <linux/clk/tegra.h>
#include <linux/delay.h>
#include <dt-bindings/clock/tegra20-car.h>
@@ -414,7 +417,7 @@ static struct tegra_clk_pll_params pll_e_params = {
.fixed_rate = 100000000,
};
-static struct tegra_devclk devclks[] __initdata = {
+static struct tegra_devclk devclks[] = {
{ .con_id = "pll_c", .dt_id = TEGRA20_CLK_PLL_C },
{ .con_id = "pll_c_out1", .dt_id = TEGRA20_CLK_PLL_C_OUT1 },
{ .con_id = "pll_p", .dt_id = TEGRA20_CLK_PLL_P },
@@ -710,13 +713,6 @@ static void tegra20_super_clk_init(void)
NULL);
clks[TEGRA20_CLK_CCLK] = clk;
- /* SCLK */
- clk = tegra_clk_register_super_mux("sclk", sclk_parents,
- ARRAY_SIZE(sclk_parents),
- CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
- clk_base + SCLK_BURST_POLICY, 0, 4, 0, 0, NULL);
- clks[TEGRA20_CLK_SCLK] = clk;
-
/* twd */
clk = clk_register_fixed_factor(NULL, "twd", "cclk", 0, 1, 4);
clks[TEGRA20_CLK_TWD] = clk;
@@ -1014,7 +1010,7 @@ static struct tegra_cpu_car_ops tegra20_cpu_car_ops = {
#endif
};
-static struct tegra_clk_init_table init_table[] __initdata = {
+static struct tegra_clk_init_table init_table[] = {
{ TEGRA20_CLK_PLL_P, TEGRA20_CLK_CLK_MAX, 216000000, 1 },
{ TEGRA20_CLK_PLL_P_OUT1, TEGRA20_CLK_CLK_MAX, 28800000, 1 },
{ TEGRA20_CLK_PLL_P_OUT2, TEGRA20_CLK_CLK_MAX, 48000000, 1 },
@@ -1052,11 +1048,6 @@ static struct tegra_clk_init_table init_table[] __initdata = {
{ TEGRA20_CLK_CLK_MAX, TEGRA20_CLK_CLK_MAX, 0, 0 },
};
-static void __init tegra20_clock_apply_init_table(void)
-{
- tegra_init_from_table(init_table, clks, TEGRA20_CLK_CLK_MAX);
-}
-
/*
* Some clocks may be used by different drivers depending on the board
* configuration. List those here to register them twice in the clock lookup
@@ -1076,6 +1067,8 @@ static const struct of_device_id pmc_match[] __initconst = {
{ },
};
+static bool tegra20_car_initialized;
+
static struct clk *tegra20_clk_src_onecell_get(struct of_phandle_args *clkspec,
void *data)
{
@@ -1083,6 +1076,16 @@ static struct clk *tegra20_clk_src_onecell_get(struct of_phandle_args *clkspec,
struct clk_hw *hw;
struct clk *clk;
+ /*
+ * Timer clocks are needed early, the rest of the clocks shouldn't be
+ * available to device drivers until clock tree is fully initialized.
+ */
+ if (clkspec->args[0] != TEGRA20_CLK_RTC &&
+ clkspec->args[0] != TEGRA20_CLK_TWD &&
+ clkspec->args[0] != TEGRA20_CLK_TIMER &&
+ !tegra20_car_initialized)
+ return ERR_PTR(-EPROBE_DEFER);
+
clk = of_clk_src_onecell_get(clkspec, data);
if (IS_ERR(clk))
return clk;
@@ -1149,10 +1152,48 @@ static void __init tegra20_clock_init(struct device_node *np)
tegra_init_dup_clks(tegra_clk_duplicates, clks, TEGRA20_CLK_CLK_MAX);
tegra_add_of_provider(np, tegra20_clk_src_onecell_get);
- tegra_register_devclks(devclks, ARRAY_SIZE(devclks));
-
- tegra_clk_apply_init_table = tegra20_clock_apply_init_table;
tegra_cpu_car_ops = &tegra20_cpu_car_ops;
}
-CLK_OF_DECLARE(tegra20, "nvidia,tegra20-car", tegra20_clock_init);
+CLK_OF_DECLARE_DRIVER(tegra20, "nvidia,tegra20-car", tegra20_clock_init);
+
+/*
+ * Clocks that use runtime PM can't be created at the tegra20_clock_init
+ * time because drivers' base isn't initialized yet, and thus platform
+ * devices can't be created for the clocks. Hence we need to split the
+ * registration of the clocks into two phases. The first phase registers
+ * essential clocks which don't require RPM and are actually used during
+ * early boot. The second phase registers clocks which use RPM and this
+ * is done when device drivers' core API is ready.
+ */
+static int tegra20_car_probe(struct platform_device *pdev)
+{
+ struct clk *clk;
+
+ clk = tegra_clk_register_super_mux("sclk", sclk_parents,
+ ARRAY_SIZE(sclk_parents),
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ clk_base + SCLK_BURST_POLICY, 0, 4, 0, 0, NULL);
+ clks[TEGRA20_CLK_SCLK] = clk;
+
+ tegra_register_devclks(devclks, ARRAY_SIZE(devclks));
+ tegra_init_from_table(init_table, clks, TEGRA20_CLK_CLK_MAX);
+ tegra20_car_initialized = true;
+
+ return 0;
+}
+
+static const struct of_device_id tegra20_car_match[] = {
+ { .compatible = "nvidia,tegra20-car" },
+ { }
+};
+
+static struct platform_driver tegra20_car_driver = {
+ .driver = {
+ .name = "tegra20-car",
+ .of_match_table = tegra20_car_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = tegra20_car_probe,
+};
+builtin_platform_driver(tegra20_car_driver);
diff --git a/drivers/clk/tegra/clk-tegra30.c b/drivers/clk/tegra/clk-tegra30.c
index 64121bc66d85..04b496123820 100644
--- a/drivers/clk/tegra/clk-tegra30.c
+++ b/drivers/clk/tegra/clk-tegra30.c
@@ -7,8 +7,11 @@
#include <linux/delay.h>
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
+#include <linux/init.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
#include <linux/clk/tegra.h>
#include <soc/tegra/pmc.h>
@@ -532,7 +535,7 @@ static unsigned long tegra30_input_freq[] = {
[12] = 26000000,
};
-static struct tegra_devclk devclks[] __initdata = {
+static struct tegra_devclk devclks[] = {
{ .con_id = "pll_c", .dt_id = TEGRA30_CLK_PLL_C },
{ .con_id = "pll_c_out1", .dt_id = TEGRA30_CLK_PLL_C_OUT1 },
{ .con_id = "pll_p", .dt_id = TEGRA30_CLK_PLL_P },
@@ -812,11 +815,6 @@ static void __init tegra30_pll_init(void)
{
struct clk *clk;
- /* PLLC */
- clk = tegra_clk_register_pll("pll_c", "pll_ref", clk_base, pmc_base, 0,
- &pll_c_params, NULL);
- clks[TEGRA30_CLK_PLL_C] = clk;
-
/* PLLC_OUT1 */
clk = tegra_clk_register_divider("pll_c_out1_div", "pll_c",
clk_base + PLLC_OUT, 0, TEGRA_DIVIDER_ROUND_UP,
@@ -826,11 +824,6 @@ static void __init tegra30_pll_init(void)
0, NULL);
clks[TEGRA30_CLK_PLL_C_OUT1] = clk;
- /* PLLM */
- clk = tegra_clk_register_pll("pll_m", "pll_ref", clk_base, pmc_base,
- CLK_SET_RATE_GATE, &pll_m_params, NULL);
- clks[TEGRA30_CLK_PLL_M] = clk;
-
/* PLLM_OUT1 */
clk = tegra_clk_register_divider("pll_m_out1_div", "pll_m",
clk_base + PLLM_OUT, 0, TEGRA_DIVIDER_ROUND_UP,
@@ -880,9 +873,6 @@ static void __init tegra30_pll_init(void)
ARRAY_SIZE(pll_e_parents),
CLK_SET_RATE_NO_REPARENT,
clk_base + PLLE_AUX, 2, 1, 0, NULL);
- clk = tegra_clk_register_plle("pll_e", "pll_e_mux", clk_base, pmc_base,
- CLK_GET_RATE_NOCACHE, &pll_e_params, NULL);
- clks[TEGRA30_CLK_PLL_E] = clk;
}
static const char *cclk_g_parents[] = { "clk_m", "pll_c", "clk_32k", "pll_m",
@@ -971,14 +961,6 @@ static void __init tegra30_super_clk_init(void)
NULL);
clks[TEGRA30_CLK_CCLK_LP] = clk;
- /* SCLK */
- clk = tegra_clk_register_super_mux("sclk", sclk_parents,
- ARRAY_SIZE(sclk_parents),
- CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
- clk_base + SCLK_BURST_POLICY,
- 0, 4, 0, 0, NULL);
- clks[TEGRA30_CLK_SCLK] = clk;
-
/* twd */
clk = clk_register_fixed_factor(NULL, "twd", "cclk_g",
CLK_SET_RATE_PARENT, 1, 2);
@@ -1214,7 +1196,7 @@ static struct tegra_cpu_car_ops tegra30_cpu_car_ops = {
#endif
};
-static struct tegra_clk_init_table init_table[] __initdata = {
+static struct tegra_clk_init_table init_table[] = {
{ TEGRA30_CLK_UARTA, TEGRA30_CLK_PLL_P, 408000000, 0 },
{ TEGRA30_CLK_UARTB, TEGRA30_CLK_PLL_P, 408000000, 0 },
{ TEGRA30_CLK_UARTC, TEGRA30_CLK_PLL_P, 408000000, 0 },
@@ -1259,11 +1241,6 @@ static struct tegra_clk_init_table init_table[] __initdata = {
{ TEGRA30_CLK_CLK_MAX, TEGRA30_CLK_CLK_MAX, 0, 0 },
};
-static void __init tegra30_clock_apply_init_table(void)
-{
- tegra_init_from_table(init_table, clks, TEGRA30_CLK_CLK_MAX);
-}
-
/*
* Some clocks may be used by different drivers depending on the board
* configuration. List those here to register them twice in the clock lookup
@@ -1294,12 +1271,24 @@ static struct tegra_audio_clk_info tegra30_audio_plls[] = {
{ "pll_a", &pll_a_params, tegra_clk_pll_a, "pll_p_out1" },
};
+static bool tegra30_car_initialized;
+
static struct clk *tegra30_clk_src_onecell_get(struct of_phandle_args *clkspec,
void *data)
{
struct clk_hw *hw;
struct clk *clk;
+ /*
+ * Timer clocks are needed early, the rest of the clocks shouldn't be
+ * available to device drivers until clock tree is fully initialized.
+ */
+ if (clkspec->args[0] != TEGRA30_CLK_RTC &&
+ clkspec->args[0] != TEGRA30_CLK_TWD &&
+ clkspec->args[0] != TEGRA30_CLK_TIMER &&
+ !tegra30_car_initialized)
+ return ERR_PTR(-EPROBE_DEFER);
+
clk = of_clk_src_onecell_get(clkspec, data);
if (IS_ERR(clk))
return clk;
@@ -1357,10 +1346,75 @@ static void __init tegra30_clock_init(struct device_node *np)
tegra_init_dup_clks(tegra_clk_duplicates, clks, TEGRA30_CLK_CLK_MAX);
tegra_add_of_provider(np, tegra30_clk_src_onecell_get);
+
+ tegra_cpu_car_ops = &tegra30_cpu_car_ops;
+}
+CLK_OF_DECLARE_DRIVER(tegra30, "nvidia,tegra30-car", tegra30_clock_init);
+
+/*
+ * Clocks that use runtime PM can't be created at the tegra30_clock_init
+ * time because drivers' base isn't initialized yet, and thus platform
+ * devices can't be created for the clocks. Hence we need to split the
+ * registration of the clocks into two phases. The first phase registers
+ * essential clocks which don't require RPM and are actually used during
+ * early boot. The second phase registers clocks which use RPM and this
+ * is done when device drivers' core API is ready.
+ */
+static int tegra30_car_probe(struct platform_device *pdev)
+{
+ struct clk *clk;
+
+ /* PLLC */
+ clk = tegra_clk_register_pll("pll_c", "pll_ref", clk_base, pmc_base, 0,
+ &pll_c_params, NULL);
+ clks[TEGRA30_CLK_PLL_C] = clk;
+
+ /* PLLE */
+ clk = tegra_clk_register_plle("pll_e", "pll_e_mux", clk_base, pmc_base,
+ CLK_GET_RATE_NOCACHE, &pll_e_params, NULL);
+ clks[TEGRA30_CLK_PLL_E] = clk;
+
+ /* PLLM */
+ clk = tegra_clk_register_pll("pll_m", "pll_ref", clk_base, pmc_base,
+ CLK_SET_RATE_GATE, &pll_m_params, NULL);
+ clks[TEGRA30_CLK_PLL_M] = clk;
+
+ /* SCLK */
+ clk = tegra_clk_register_super_mux("sclk", sclk_parents,
+ ARRAY_SIZE(sclk_parents),
+ CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ clk_base + SCLK_BURST_POLICY,
+ 0, 4, 0, 0, NULL);
+ clks[TEGRA30_CLK_SCLK] = clk;
+
tegra_register_devclks(devclks, ARRAY_SIZE(devclks));
+ tegra_init_from_table(init_table, clks, TEGRA30_CLK_CLK_MAX);
+ tegra30_car_initialized = true;
- tegra_clk_apply_init_table = tegra30_clock_apply_init_table;
+ return 0;
+}
- tegra_cpu_car_ops = &tegra30_cpu_car_ops;
+static const struct of_device_id tegra30_car_match[] = {
+ { .compatible = "nvidia,tegra30-car" },
+ { }
+};
+
+static struct platform_driver tegra30_car_driver = {
+ .driver = {
+ .name = "tegra30-car",
+ .of_match_table = tegra30_car_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = tegra30_car_probe,
+};
+
+/*
+ * Clock driver must be registered before memory controller driver,
+ * which doesn't support deferred probing for today and is registered
+ * from arch init-level.
+ */
+static int tegra30_car_init(void)
+{
+ return platform_driver_register(&tegra30_car_driver);
}
-CLK_OF_DECLARE(tegra30, "nvidia,tegra30-car", tegra30_clock_init);
+postcore_initcall(tegra30_car_init);
diff --git a/drivers/clk/tegra/clk.c b/drivers/clk/tegra/clk.c
index f6cdce441cf7..26bda45813c0 100644
--- a/drivers/clk/tegra/clk.c
+++ b/drivers/clk/tegra/clk.c
@@ -9,14 +9,19 @@
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/clk/tegra.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/reset-controller.h>
+#include <linux/string.h>
#include <soc/tegra/fuse.h>
#include "clk.h"
/* Global data of Tegra CPU CAR ops */
+static struct device_node *tegra_car_np;
static struct tegra_cpu_car_ops dummy_car_ops;
struct tegra_cpu_car_ops *tegra_cpu_car_ops = &dummy_car_ops;
@@ -261,8 +266,8 @@ void __init tegra_init_dup_clks(struct tegra_clk_duplicate *dup_list,
}
}
-void __init tegra_init_from_table(struct tegra_clk_init_table *tbl,
- struct clk *clks[], int clk_max)
+void tegra_init_from_table(struct tegra_clk_init_table *tbl,
+ struct clk *clks[], int clk_max)
{
struct clk *clk;
@@ -320,6 +325,8 @@ void __init tegra_add_of_provider(struct device_node *np,
{
int i;
+ tegra_car_np = np;
+
for (i = 0; i < clk_num; i++) {
if (IS_ERR(clks[i])) {
pr_err
@@ -348,7 +355,7 @@ void __init tegra_init_special_resets(unsigned int num,
special_reset_deassert = deassert;
}
-void __init tegra_register_devclks(struct tegra_devclk *dev_clks, int num)
+void tegra_register_devclks(struct tegra_devclk *dev_clks, int num)
{
int i;
@@ -372,6 +379,68 @@ struct clk ** __init tegra_lookup_dt_id(int clk_id,
return NULL;
}
+static struct device_node *tegra_clk_get_of_node(struct clk_hw *hw)
+{
+ struct device_node *np;
+ char *node_name;
+
+ node_name = kstrdup(hw->init->name, GFP_KERNEL);
+ if (!node_name)
+ return NULL;
+
+ strreplace(node_name, '_', '-');
+
+ for_each_child_of_node(tegra_car_np, np) {
+ if (!strcmp(np->name, node_name))
+ break;
+ }
+
+ kfree(node_name);
+
+ return np;
+}
+
+struct clk *tegra_clk_dev_register(struct clk_hw *hw)
+{
+ struct platform_device *pdev, *parent;
+ const char *dev_name = NULL;
+ struct device *dev = NULL;
+ struct device_node *np;
+
+ np = tegra_clk_get_of_node(hw);
+
+ if (!of_device_is_available(np))
+ goto put_node;
+
+ dev_name = kasprintf(GFP_KERNEL, "tegra_clk_%s", hw->init->name);
+ if (!dev_name)
+ goto put_node;
+
+ parent = of_find_device_by_node(tegra_car_np);
+ if (parent) {
+ pdev = of_platform_device_create(np, dev_name, &parent->dev);
+ put_device(&parent->dev);
+
+ if (!pdev) {
+ pr_err("%s: failed to create device for %pOF\n",
+ __func__, np);
+ goto free_name;
+ }
+
+ dev = &pdev->dev;
+ pm_runtime_enable(dev);
+ } else {
+ WARN(1, "failed to find device for %pOF\n", tegra_car_np);
+ }
+
+free_name:
+ kfree(dev_name);
+put_node:
+ of_node_put(np);
+
+ return clk_register(dev, hw);
+}
+
tegra_clk_apply_init_table_func tegra_clk_apply_init_table;
static int __init tegra_clocks_apply_init_table(void)
diff --git a/drivers/clk/tegra/clk.h b/drivers/clk/tegra/clk.h
index 0c3ba0ccce1a..5d80d8b79b8e 100644
--- a/drivers/clk/tegra/clk.h
+++ b/drivers/clk/tegra/clk.h
@@ -927,4 +927,6 @@ struct clk *tegra20_clk_register_emc(void __iomem *ioaddr, bool low_jitter);
struct clk *tegra210_clk_register_emc(struct device_node *np,
void __iomem *regs);
+struct clk *tegra_clk_dev_register(struct clk_hw *hw);
+
#endif /* TEGRA_CLK_H */
diff --git a/drivers/clk/ti/adpll.c b/drivers/clk/ti/adpll.c
index b341cd990be7..962502ca7ff0 100644
--- a/drivers/clk/ti/adpll.c
+++ b/drivers/clk/ti/adpll.c
@@ -807,7 +807,7 @@ static int ti_adpll_init_registers(struct ti_adpll_data *d)
static int ti_adpll_init_inputs(struct ti_adpll_data *d)
{
- const char *error = "need at least %i inputs";
+ static const char error[] = "need at least %i inputs";
struct clk *clock;
int nr_inputs;
diff --git a/drivers/clk/visconti/Kconfig b/drivers/clk/visconti/Kconfig
new file mode 100644
index 000000000000..1661097b0d92
--- /dev/null
+++ b/drivers/clk/visconti/Kconfig
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config COMMON_CLK_VISCONTI
+ bool "Support for Toshiba Visconti5 ARM SoC clock controllers"
+ depends on ARCH_VISCONTI || COMPILE_TEST
+ default ARCH_VISCONTI
+ help
+ Support for the Toshiba Visconti5 ARM SoC clock controller.
+ Say Y if you want to include clock support.
diff --git a/drivers/clk/visconti/Makefile b/drivers/clk/visconti/Makefile
new file mode 100644
index 000000000000..c1254fd52b31
--- /dev/null
+++ b/drivers/clk/visconti/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0-only
+# Makefile for Toshiba Visconti clock
+
+obj-y += clkc.o pll.o reset.o
+obj-y += pll-tmpv770x.o clkc-tmpv770x.o
diff --git a/drivers/clk/visconti/clkc-tmpv770x.c b/drivers/clk/visconti/clkc-tmpv770x.c
new file mode 100644
index 000000000000..c2b2f41a85a4
--- /dev/null
+++ b/drivers/clk/visconti/clkc-tmpv770x.c
@@ -0,0 +1,291 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Toshiba Visconti clock controller
+ *
+ * Copyright (c) 2021 TOSHIBA CORPORATION
+ * Copyright (c) 2021 Toshiba Electronic Devices & Storage Corporation
+ *
+ * Nobuhiro Iwamatsu <nobuhiro1.iwamatsu@toshiba.co.jp>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include <dt-bindings/clock/toshiba,tmpv770x.h>
+#include <dt-bindings/reset/toshiba,tmpv770x.h>
+
+#include "clkc.h"
+#include "reset.h"
+
+static DEFINE_SPINLOCK(tmpv770x_clk_lock);
+static DEFINE_SPINLOCK(tmpv770x_rst_lock);
+
+static const struct clk_parent_data clks_parent_data[] = {
+ { .fw_name = "pipll1", .name = "pipll1", },
+};
+
+static const struct clk_parent_data pietherplls_parent_data[] = {
+ { .fw_name = "pietherpll", .name = "pietherpll", },
+};
+
+static const struct visconti_fixed_clk fixed_clk_tables[] = {
+ /* PLL1 */
+ /* PICMPT0/1, PITSC, PIUWDT, PISWDT, PISBUS, PIPMU, PIGPMU, PITMU */
+ /* PIEMM, PIMISC, PIGCOMM, PIDCOMM, PIMBUS, PIGPIO, PIPGM */
+ { TMPV770X_CLK_PIPLL1_DIV4, "pipll1_div4", "pipll1", 0, 1, 4, },
+ /* PISBUS */
+ { TMPV770X_CLK_PIPLL1_DIV2, "pipll1_div2", "pipll1", 0, 1, 2, },
+ /* PICOBUS_CLK */
+ { TMPV770X_CLK_PIPLL1_DIV1, "pipll1_div1", "pipll1", 0, 1, 1, },
+ /* PIDNNPLL */
+ /* CONN_CLK, PIMBUS, PICRC0/1 */
+ { TMPV770X_CLK_PIDNNPLL_DIV1, "pidnnpll_div1", "pidnnpll", 0, 1, 1, },
+ { TMPV770X_CLK_PIREFCLK, "pirefclk", "osc2-clk", 0, 1, 1, },
+ { TMPV770X_CLK_WDTCLK, "wdtclk", "osc2-clk", 0, 1, 1, },
+};
+
+static const struct visconti_clk_gate_table pietherpll_clk_gate_tables[] = {
+ /* pietherpll */
+ { TMPV770X_CLK_PIETHER_2P5M, "piether_2p5m",
+ pietherplls_parent_data, ARRAY_SIZE(pietherplls_parent_data),
+ CLK_SET_RATE_PARENT, 0x34, 0x134, 4, 200,
+ TMPV770X_RESET_PIETHER_2P5M, },
+ { TMPV770X_CLK_PIETHER_25M, "piether_25m",
+ pietherplls_parent_data, ARRAY_SIZE(pietherplls_parent_data),
+ CLK_SET_RATE_PARENT, 0x34, 0x134, 5, 20,
+ TMPV770X_RESET_PIETHER_25M, },
+ { TMPV770X_CLK_PIETHER_50M, "piether_50m",
+ pietherplls_parent_data, ARRAY_SIZE(pietherplls_parent_data),
+ CLK_SET_RATE_PARENT, 0x34, 0x134, 6, 10,
+ TMPV770X_RESET_PIETHER_50M, },
+ { TMPV770X_CLK_PIETHER_125M, "piether_125m",
+ pietherplls_parent_data, ARRAY_SIZE(pietherplls_parent_data),
+ CLK_SET_RATE_PARENT, 0x34, 0x134, 7, 4,
+ TMPV770X_RESET_PIETHER_125M, },
+};
+
+static const struct visconti_clk_gate_table clk_gate_tables[] = {
+ { TMPV770X_CLK_HOX, "hox",
+ clks_parent_data, ARRAY_SIZE(clks_parent_data),
+ CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0x4c, 0x14c, 0, 1,
+ TMPV770X_RESET_HOX, },
+ { TMPV770X_CLK_PCIE_MSTR, "pcie_mstr",
+ clks_parent_data, ARRAY_SIZE(clks_parent_data),
+ CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0x38, 0x138, 0, 1,
+ TMPV770X_RESET_PCIE_MSTR, },
+ { TMPV770X_CLK_PCIE_AUX, "pcie_aux",
+ clks_parent_data, ARRAY_SIZE(clks_parent_data),
+ CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0x38, 0x138, 1, 24,
+ TMPV770X_RESET_PCIE_AUX, },
+ { TMPV770X_CLK_PIINTC, "piintc",
+ clks_parent_data, ARRAY_SIZE(clks_parent_data),
+ CLK_IGNORE_UNUSED, 0x8, 0x108, 0, 2, //FIX!!
+ TMPV770X_RESET_PIINTC,},
+ { TMPV770X_CLK_PIETHER_BUS, "piether_bus",
+ clks_parent_data, ARRAY_SIZE(clks_parent_data),
+ 0, 0x34, 0x134, 0, 2,
+ TMPV770X_RESET_PIETHER_BUS, }, /* BUS_CLK */
+ { TMPV770X_CLK_PISPI0, "pispi0",
+ clks_parent_data, ARRAY_SIZE(clks_parent_data),
+ 0, 0x28, 0x128, 0, 2,
+ TMPV770X_RESET_PISPI0, },
+ { TMPV770X_CLK_PISPI1, "pispi1",
+ clks_parent_data, ARRAY_SIZE(clks_parent_data),
+ 0, 0x28, 0x128, 1, 2,
+ TMPV770X_RESET_PISPI1, },
+ { TMPV770X_CLK_PISPI2, "pispi2",
+ clks_parent_data, ARRAY_SIZE(clks_parent_data),
+ 0, 0x28, 0x128, 2, 2,
+ TMPV770X_RESET_PISPI2, },
+ { TMPV770X_CLK_PISPI3, "pispi3",
+ clks_parent_data, ARRAY_SIZE(clks_parent_data),
+ 0, 0x28, 0x128, 3, 2,
+ TMPV770X_RESET_PISPI3,},
+ { TMPV770X_CLK_PISPI4, "pispi4",
+ clks_parent_data, ARRAY_SIZE(clks_parent_data),
+ 0, 0x28, 0x128, 4, 2,
+ TMPV770X_RESET_PISPI4, },
+ { TMPV770X_CLK_PISPI5, "pispi5",
+ clks_parent_data, ARRAY_SIZE(clks_parent_data),
+ 0, 0x28, 0x128, 5, 2,
+ TMPV770X_RESET_PISPI5},
+ { TMPV770X_CLK_PISPI6, "pispi6",
+ clks_parent_data, ARRAY_SIZE(clks_parent_data),
+ 0, 0x28, 0x128, 6, 2,
+ TMPV770X_RESET_PISPI6,},
+ { TMPV770X_CLK_PIUART0, "piuart0",
+ clks_parent_data, ARRAY_SIZE(clks_parent_data),
+ //CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0x2c, 0x12c, 0, 4,
+ 0, 0x2c, 0x12c, 0, 4,
+ TMPV770X_RESET_PIUART0,},
+ { TMPV770X_CLK_PIUART1, "piuart1",
+ clks_parent_data, ARRAY_SIZE(clks_parent_data),
+ //CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED, 0x2c, 0x12c, 1, 4,
+ 0, 0x2c, 0x12c, 1, 4,
+ TMPV770X_RESET_PIUART1, },
+ { TMPV770X_CLK_PIUART2, "piuart2",
+ clks_parent_data, ARRAY_SIZE(clks_parent_data),
+ 0, 0x2c, 0x12c, 2, 4,
+ TMPV770X_RESET_PIUART2, },
+ { TMPV770X_CLK_PIUART3, "piuart3",
+ clks_parent_data, ARRAY_SIZE(clks_parent_data),
+ 0, 0x2c, 0x12c, 3, 4,
+ TMPV770X_RESET_PIUART3, },
+ { TMPV770X_CLK_PII2C0, "pii2c0",
+ clks_parent_data, ARRAY_SIZE(clks_parent_data),
+ 0, 0x30, 0x130, 0, 4,
+ TMPV770X_RESET_PII2C0, },
+ { TMPV770X_CLK_PII2C1, "pii2c1",
+ clks_parent_data, ARRAY_SIZE(clks_parent_data),
+ 0, 0x30, 0x130, 1, 4,
+ TMPV770X_RESET_PII2C1, },
+ { TMPV770X_CLK_PII2C2, "pii2c2",
+ clks_parent_data, ARRAY_SIZE(clks_parent_data),
+ 0, 0x30, 0x130, 2, 4,
+ TMPV770X_RESET_PII2C2, },
+ { TMPV770X_CLK_PII2C3, "pii2c3",
+ clks_parent_data, ARRAY_SIZE(clks_parent_data),
+ 0, 0x30, 0x130, 3, 4,
+ TMPV770X_RESET_PII2C3,},
+ { TMPV770X_CLK_PII2C4, "pii2c4",
+ clks_parent_data, ARRAY_SIZE(clks_parent_data),
+ 0, 0x30, 0x130, 4, 4,
+ TMPV770X_RESET_PII2C4, },
+ { TMPV770X_CLK_PII2C5, "pii2c5",
+ clks_parent_data, ARRAY_SIZE(clks_parent_data),
+ 0, 0x30, 0x130, 5, 4,
+ TMPV770X_RESET_PII2C5, },
+ { TMPV770X_CLK_PII2C6, "pii2c6",
+ clks_parent_data, ARRAY_SIZE(clks_parent_data),
+ 0, 0x30, 0x130, 6, 4,
+ TMPV770X_RESET_PII2C6, },
+ { TMPV770X_CLK_PII2C7, "pii2c7",
+ clks_parent_data, ARRAY_SIZE(clks_parent_data),
+ 0, 0x30, 0x130, 7, 4,
+ TMPV770X_RESET_PII2C7, },
+ { TMPV770X_CLK_PII2C8, "pii2c8",
+ clks_parent_data, ARRAY_SIZE(clks_parent_data),
+ 0, 0x30, 0x130, 8, 4,
+ TMPV770X_RESET_PII2C8, },
+ /* PIPCMIF */
+ { TMPV770X_CLK_PIPCMIF, "pipcmif",
+ clks_parent_data, ARRAY_SIZE(clks_parent_data),
+ 0, 0x64, 0x164, 0, 4,
+ TMPV770X_RESET_PIPCMIF, },
+ /* PISYSTEM */
+ { TMPV770X_CLK_WRCK, "wrck",
+ clks_parent_data, ARRAY_SIZE(clks_parent_data),
+ 0, 0x68, 0x168, 9, 32,
+ -1, }, /* No reset */
+ { TMPV770X_CLK_PICKMON, "pickmon",
+ clks_parent_data, ARRAY_SIZE(clks_parent_data),
+ 0, 0x10, 0x110, 8, 4,
+ TMPV770X_RESET_PICKMON, },
+ { TMPV770X_CLK_SBUSCLK, "sbusclk",
+ clks_parent_data, ARRAY_SIZE(clks_parent_data),
+ 0, 0x14, 0x114, 0, 4,
+ TMPV770X_RESET_SBUSCLK, },
+};
+
+static const struct visconti_reset_data clk_reset_data[] = {
+ [TMPV770X_RESET_PIETHER_2P5M] = { 0x434, 0x534, 4, },
+ [TMPV770X_RESET_PIETHER_25M] = { 0x434, 0x534, 5, },
+ [TMPV770X_RESET_PIETHER_50M] = { 0x434, 0x534, 6, },
+ [TMPV770X_RESET_PIETHER_125M] = { 0x434, 0x534, 7, },
+ [TMPV770X_RESET_HOX] = { 0x44c, 0x54c, 0, },
+ [TMPV770X_RESET_PCIE_MSTR] = { 0x438, 0x538, 0, },
+ [TMPV770X_RESET_PCIE_AUX] = { 0x438, 0x538, 1, },
+ [TMPV770X_RESET_PIINTC] = { 0x408, 0x508, 0, },
+ [TMPV770X_RESET_PIETHER_BUS] = { 0x434, 0x534, 0, },
+ [TMPV770X_RESET_PISPI0] = { 0x428, 0x528, 0, },
+ [TMPV770X_RESET_PISPI1] = { 0x428, 0x528, 1, },
+ [TMPV770X_RESET_PISPI2] = { 0x428, 0x528, 2, },
+ [TMPV770X_RESET_PISPI3] = { 0x428, 0x528, 3, },
+ [TMPV770X_RESET_PISPI4] = { 0x428, 0x528, 4, },
+ [TMPV770X_RESET_PISPI5] = { 0x428, 0x528, 5, },
+ [TMPV770X_RESET_PISPI6] = { 0x428, 0x528, 6, },
+ [TMPV770X_RESET_PIUART0] = { 0x42c, 0x52c, 0, },
+ [TMPV770X_RESET_PIUART1] = { 0x42c, 0x52c, 1, },
+ [TMPV770X_RESET_PIUART2] = { 0x42c, 0x52c, 2, },
+ [TMPV770X_RESET_PIUART3] = { 0x42c, 0x52c, 3, },
+ [TMPV770X_RESET_PII2C0] = { 0x430, 0x530, 0, },
+ [TMPV770X_RESET_PII2C1] = { 0x430, 0x530, 1, },
+ [TMPV770X_RESET_PII2C2] = { 0x430, 0x530, 2, },
+ [TMPV770X_RESET_PII2C3] = { 0x430, 0x530, 3, },
+ [TMPV770X_RESET_PII2C4] = { 0x430, 0x530, 4, },
+ [TMPV770X_RESET_PII2C5] = { 0x430, 0x530, 5, },
+ [TMPV770X_RESET_PII2C6] = { 0x430, 0x530, 6, },
+ [TMPV770X_RESET_PII2C7] = { 0x430, 0x530, 7, },
+ [TMPV770X_RESET_PII2C8] = { 0x430, 0x530, 8, },
+ [TMPV770X_RESET_PIPCMIF] = { 0x464, 0x564, 0, },
+ [TMPV770X_RESET_PICKMON] = { 0x410, 0x510, 8, },
+ [TMPV770X_RESET_SBUSCLK] = { 0x414, 0x514, 0, },
+};
+
+static int visconti_clk_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct visconti_clk_provider *ctx;
+ struct device *dev = &pdev->dev;
+ struct regmap *regmap;
+ int ret, i;
+
+ regmap = syscon_node_to_regmap(np);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ ctx = visconti_init_clk(dev, regmap, TMPV770X_NR_CLK);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
+ ret = visconti_register_reset_controller(dev, regmap, clk_reset_data,
+ TMPV770X_NR_RESET,
+ &visconti_reset_ops,
+ &tmpv770x_rst_lock);
+ if (ret) {
+ dev_err(dev, "Failed to register reset controller: %d\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < (ARRAY_SIZE(fixed_clk_tables)); i++)
+ ctx->clk_data.hws[fixed_clk_tables[i].id] =
+ clk_hw_register_fixed_factor(NULL,
+ fixed_clk_tables[i].name,
+ fixed_clk_tables[i].parent,
+ fixed_clk_tables[i].flag,
+ fixed_clk_tables[i].mult,
+ fixed_clk_tables[i].div);
+
+ ret = visconti_clk_register_gates(ctx, clk_gate_tables,
+ ARRAY_SIZE(clk_gate_tables), clk_reset_data,
+ &tmpv770x_clk_lock);
+ if (ret) {
+ dev_err(dev, "Failed to register main clock gate: %d\n", ret);
+ return ret;
+ }
+
+ ret = visconti_clk_register_gates(ctx, pietherpll_clk_gate_tables,
+ ARRAY_SIZE(pietherpll_clk_gate_tables),
+ clk_reset_data, &tmpv770x_clk_lock);
+ if (ret) {
+ dev_err(dev, "Failed to register pietherpll clock gate: %d\n", ret);
+ return ret;
+ }
+
+ return of_clk_add_hw_provider(np, of_clk_hw_onecell_get, &ctx->clk_data);
+}
+
+static const struct of_device_id visconti_clk_ids[] = {
+ { .compatible = "toshiba,tmpv7708-pismu", },
+ { }
+};
+
+static struct platform_driver visconti_clk_driver = {
+ .probe = visconti_clk_probe,
+ .driver = {
+ .name = "visconti-clk",
+ .of_match_table = visconti_clk_ids,
+ },
+};
+
+builtin_platform_driver(visconti_clk_driver);
diff --git a/drivers/clk/visconti/clkc.c b/drivers/clk/visconti/clkc.c
new file mode 100644
index 000000000000..56a8a4ffebca
--- /dev/null
+++ b/drivers/clk/visconti/clkc.c
@@ -0,0 +1,206 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Toshiba Visconti clock controller
+ *
+ * Copyright (c) 2021 TOSHIBA CORPORATION
+ * Copyright (c) 2021 Toshiba Electronic Devices & Storage Corporation
+ *
+ * Nobuhiro Iwamatsu <nobuhiro1.iwamatsu@toshiba.co.jp>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+
+#include "clkc.h"
+
+static inline struct visconti_clk_gate *to_visconti_clk_gate(struct clk_hw *hw)
+{
+ return container_of(hw, struct visconti_clk_gate, hw);
+}
+
+static int visconti_gate_clk_is_enabled(struct clk_hw *hw)
+{
+ struct visconti_clk_gate *gate = to_visconti_clk_gate(hw);
+ u32 clk = BIT(gate->ck_idx);
+ u32 val;
+
+ regmap_read(gate->regmap, gate->ckon_offset, &val);
+ return (val & clk) ? 1 : 0;
+}
+
+static void visconti_gate_clk_disable(struct clk_hw *hw)
+{
+ struct visconti_clk_gate *gate = to_visconti_clk_gate(hw);
+ u32 clk = BIT(gate->ck_idx);
+ unsigned long flags;
+
+ spin_lock_irqsave(gate->lock, flags);
+
+ if (!visconti_gate_clk_is_enabled(hw)) {
+ spin_unlock_irqrestore(gate->lock, flags);
+ return;
+ }
+
+ regmap_update_bits(gate->regmap, gate->ckoff_offset, clk, clk);
+ spin_unlock_irqrestore(gate->lock, flags);
+}
+
+static int visconti_gate_clk_enable(struct clk_hw *hw)
+{
+ struct visconti_clk_gate *gate = to_visconti_clk_gate(hw);
+ u32 clk = BIT(gate->ck_idx);
+ unsigned long flags;
+
+ spin_lock_irqsave(gate->lock, flags);
+ regmap_update_bits(gate->regmap, gate->ckon_offset, clk, clk);
+ spin_unlock_irqrestore(gate->lock, flags);
+
+ return 0;
+}
+
+static const struct clk_ops visconti_clk_gate_ops = {
+ .enable = visconti_gate_clk_enable,
+ .disable = visconti_gate_clk_disable,
+ .is_enabled = visconti_gate_clk_is_enabled,
+};
+
+static struct clk_hw *visconti_clk_register_gate(struct device *dev,
+ const char *name,
+ const char *parent_name,
+ struct regmap *regmap,
+ const struct visconti_clk_gate_table *clks,
+ u32 rson_offset,
+ u32 rsoff_offset,
+ u8 rs_idx,
+ spinlock_t *lock)
+{
+ struct visconti_clk_gate *gate;
+ struct clk_parent_data *pdata;
+ struct clk_init_data init;
+ struct clk_hw *hw;
+ int ret;
+
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return ERR_PTR(-ENOMEM);
+
+ pdata->name = pdata->fw_name = parent_name;
+
+ gate = devm_kzalloc(dev, sizeof(*gate), GFP_KERNEL);
+ if (!gate)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &visconti_clk_gate_ops;
+ init.flags = clks->flags;
+ init.parent_data = pdata;
+ init.num_parents = 1;
+
+ gate->regmap = regmap;
+ gate->ckon_offset = clks->ckon_offset;
+ gate->ckoff_offset = clks->ckoff_offset;
+ gate->ck_idx = clks->ck_idx;
+ gate->rson_offset = rson_offset;
+ gate->rsoff_offset = rsoff_offset;
+ gate->rs_idx = rs_idx;
+ gate->lock = lock;
+ gate->hw.init = &init;
+
+ hw = &gate->hw;
+ ret = devm_clk_hw_register(dev, hw);
+ if (ret)
+ hw = ERR_PTR(ret);
+
+ return hw;
+}
+
+int visconti_clk_register_gates(struct visconti_clk_provider *ctx,
+ const struct visconti_clk_gate_table *clks,
+ int num_gate,
+ const struct visconti_reset_data *reset,
+ spinlock_t *lock)
+{
+ struct device *dev = ctx->dev;
+ int i;
+
+ for (i = 0; i < num_gate; i++) {
+ const char *parent_div_name = clks[i].parent_data[0].name;
+ struct clk_parent_data *pdata;
+ u32 rson_offset, rsoff_offset;
+ struct clk_hw *gate_clk;
+ struct clk_hw *div_clk;
+ char *dev_name;
+ u8 rs_idx;
+
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ dev_name = devm_kasprintf(dev, GFP_KERNEL, "%s_div", clks[i].name);
+ if (!dev_name)
+ return -ENOMEM;
+
+ if (clks[i].rs_id >= 0) {
+ rson_offset = reset[clks[i].rs_id].rson_offset;
+ rsoff_offset = reset[clks[i].rs_id].rsoff_offset;
+ rs_idx = reset[clks[i].rs_id].rs_idx;
+ } else {
+ rson_offset = rsoff_offset = rs_idx = -1;
+ }
+
+ div_clk = devm_clk_hw_register_fixed_factor(dev,
+ dev_name,
+ parent_div_name,
+ 0, 1,
+ clks[i].div);
+ if (IS_ERR(div_clk))
+ return PTR_ERR(div_clk);
+
+ gate_clk = visconti_clk_register_gate(dev,
+ clks[i].name,
+ dev_name,
+ ctx->regmap,
+ &clks[i],
+ rson_offset,
+ rsoff_offset,
+ rs_idx,
+ lock);
+ if (IS_ERR(gate_clk)) {
+ dev_err(dev, "%s: failed to register clock %s\n",
+ __func__, clks[i].name);
+ return PTR_ERR(gate_clk);
+ }
+
+ ctx->clk_data.hws[clks[i].id] = gate_clk;
+ }
+
+ return 0;
+}
+
+struct visconti_clk_provider *visconti_init_clk(struct device *dev,
+ struct regmap *regmap,
+ unsigned long nr_clks)
+{
+ struct visconti_clk_provider *ctx;
+ int i;
+
+ ctx = devm_kzalloc(dev, struct_size(ctx, clk_data.hws, nr_clks), GFP_KERNEL);
+ if (!ctx)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < nr_clks; ++i)
+ ctx->clk_data.hws[i] = ERR_PTR(-ENOENT);
+ ctx->clk_data.num = nr_clks;
+
+ ctx->dev = dev;
+ ctx->regmap = regmap;
+
+ return ctx;
+}
diff --git a/drivers/clk/visconti/clkc.h b/drivers/clk/visconti/clkc.h
new file mode 100644
index 000000000000..09ed82ff64e4
--- /dev/null
+++ b/drivers/clk/visconti/clkc.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Toshiba Visconti clock controller
+ *
+ * Copyright (c) 2021 TOSHIBA CORPORATION
+ * Copyright (c) 2021 Toshiba Electronic Devices & Storage Corporation
+ *
+ * Nobuhiro Iwamatsu <nobuhiro1.iwamatsu@toshiba.co.jp>
+ */
+
+#ifndef _VISCONTI_CLKC_H_
+#define _VISCONTI_CLKC_H_
+
+#include <linux/mfd/syscon.h>
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/delay.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/io.h>
+#include <linux/spinlock.h>
+
+#include "reset.h"
+
+struct visconti_clk_provider {
+ struct device *dev;
+ struct regmap *regmap;
+ struct clk_hw_onecell_data clk_data;
+};
+
+struct visconti_clk_gate_table {
+ unsigned int id;
+ const char *name;
+ const struct clk_parent_data *parent_data;
+ u8 num_parents;
+ u8 flags;
+ u32 ckon_offset;
+ u32 ckoff_offset;
+ u8 ck_idx;
+ unsigned int div;
+ u8 rs_id;
+};
+
+struct visconti_fixed_clk {
+ unsigned int id;
+ const char *name;
+ const char *parent;
+ unsigned long flag;
+ unsigned int mult;
+ unsigned int div;
+};
+
+struct visconti_clk_gate {
+ struct clk_hw hw;
+ struct regmap *regmap;
+ u32 ckon_offset;
+ u32 ckoff_offset;
+ u8 ck_idx;
+ u8 flags;
+ u32 rson_offset;
+ u32 rsoff_offset;
+ u8 rs_idx;
+ spinlock_t *lock;
+};
+
+struct visconti_clk_provider *visconti_init_clk(struct device *dev,
+ struct regmap *regmap,
+ unsigned long nr_clks);
+int visconti_clk_register_gates(struct visconti_clk_provider *data,
+ const struct visconti_clk_gate_table *clks,
+ int num_gate,
+ const struct visconti_reset_data *reset,
+ spinlock_t *lock);
+#endif /* _VISCONTI_CLKC_H_ */
diff --git a/drivers/clk/visconti/pll-tmpv770x.c b/drivers/clk/visconti/pll-tmpv770x.c
new file mode 100644
index 000000000000..8360ccf88867
--- /dev/null
+++ b/drivers/clk/visconti/pll-tmpv770x.c
@@ -0,0 +1,85 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Toshiba Visconti PLL controller
+ *
+ * Copyright (c) 2021 TOSHIBA CORPORATION
+ * Copyright (c) 2021 Toshiba Electronic Devices & Storage Corporation
+ *
+ * Nobuhiro Iwamatsu <nobuhiro1.iwamatsu@toshiba.co.jp>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+
+#include <dt-bindings/clock/toshiba,tmpv770x.h>
+
+#include "pll.h"
+
+static DEFINE_SPINLOCK(tmpv770x_pll_lock);
+
+static const struct visconti_pll_rate_table pipll0_rates[] __initconst = {
+ VISCONTI_PLL_RATE(840000000, 0x1, 0x0, 0x1, 0x54, 0x000000, 0x2, 0x1),
+ VISCONTI_PLL_RATE(780000000, 0x1, 0x0, 0x1, 0x4e, 0x000000, 0x2, 0x1),
+ VISCONTI_PLL_RATE(600000000, 0x1, 0x0, 0x1, 0x3c, 0x000000, 0x2, 0x1),
+ { /* sentinel */ },
+};
+
+static const struct visconti_pll_rate_table piddrcpll_rates[] __initconst = {
+ VISCONTI_PLL_RATE(780000000, 0x1, 0x0, 0x1, 0x4e, 0x000000, 0x2, 0x1),
+ VISCONTI_PLL_RATE(760000000, 0x1, 0x0, 0x1, 0x4c, 0x000000, 0x2, 0x1),
+ { /* sentinel */ },
+};
+
+static const struct visconti_pll_rate_table pivoifpll_rates[] __initconst = {
+ VISCONTI_PLL_RATE(165000000, 0x1, 0x0, 0x1, 0x42, 0x000000, 0x4, 0x2),
+ VISCONTI_PLL_RATE(148500000, 0x1, 0x1, 0x1, 0x3b, 0x666666, 0x4, 0x2),
+ VISCONTI_PLL_RATE(96000000, 0x1, 0x0, 0x1, 0x30, 0x000000, 0x5, 0x2),
+ VISCONTI_PLL_RATE(74250000, 0x1, 0x1, 0x1, 0x3b, 0x666666, 0x4, 0x4),
+ VISCONTI_PLL_RATE(54000000, 0x1, 0x0, 0x1, 0x36, 0x000000, 0x5, 0x4),
+ VISCONTI_PLL_RATE(48000000, 0x1, 0x0, 0x1, 0x30, 0x000000, 0x5, 0x4),
+ VISCONTI_PLL_RATE(35750000, 0x1, 0x1, 0x1, 0x32, 0x0ccccc, 0x7, 0x4),
+ { /* sentinel */ },
+};
+
+static const struct visconti_pll_rate_table piimgerpll_rates[] __initconst = {
+ VISCONTI_PLL_RATE(165000000, 0x1, 0x0, 0x1, 0x42, 0x000000, 0x4, 0x2),
+ VISCONTI_PLL_RATE(96000000, 0x1, 0x0, 0x1, 0x30, 0x000000, 0x5, 0x2),
+ VISCONTI_PLL_RATE(54000000, 0x1, 0x0, 0x1, 0x36, 0x000000, 0x5, 0x4),
+ VISCONTI_PLL_RATE(48000000, 0x1, 0x0, 0x1, 0x30, 0x000000, 0x5, 0x4),
+ { /* sentinel */ },
+};
+
+static const struct visconti_pll_info pll_info[] __initconst = {
+ { TMPV770X_PLL_PIPLL0, "pipll0", "osc2-clk", 0x0, pipll0_rates },
+ { TMPV770X_PLL_PIDDRCPLL, "piddrcpll", "osc2-clk", 0x500, piddrcpll_rates },
+ { TMPV770X_PLL_PIVOIFPLL, "pivoifpll", "osc2-clk", 0x600, pivoifpll_rates },
+ { TMPV770X_PLL_PIIMGERPLL, "piimgerpll", "osc2-clk", 0x700, piimgerpll_rates },
+};
+
+static void __init tmpv770x_setup_plls(struct device_node *np)
+{
+ struct visconti_pll_provider *ctx;
+ void __iomem *reg_base;
+
+ reg_base = of_iomap(np, 0);
+ if (!reg_base)
+ return;
+
+ ctx = visconti_init_pll(np, reg_base, TMPV770X_NR_PLL);
+ if (IS_ERR(ctx)) {
+ iounmap(reg_base);
+ return;
+ }
+
+ ctx->clk_data.hws[TMPV770X_PLL_PIPLL1] =
+ clk_hw_register_fixed_rate(NULL, "pipll1", NULL, 0, 600000000);
+ ctx->clk_data.hws[TMPV770X_PLL_PIDNNPLL] =
+ clk_hw_register_fixed_rate(NULL, "pidnnpll", NULL, 0, 500000000);
+ ctx->clk_data.hws[TMPV770X_PLL_PIETHERPLL] =
+ clk_hw_register_fixed_rate(NULL, "pietherpll", NULL, 0, 500000000);
+
+ visconti_register_plls(ctx, pll_info, ARRAY_SIZE(pll_info), &tmpv770x_pll_lock);
+}
+
+CLK_OF_DECLARE(tmpv770x_plls, "toshiba,tmpv7708-pipllct", tmpv770x_setup_plls);
diff --git a/drivers/clk/visconti/pll.c b/drivers/clk/visconti/pll.c
new file mode 100644
index 000000000000..a484cb945d67
--- /dev/null
+++ b/drivers/clk/visconti/pll.c
@@ -0,0 +1,339 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Toshiba Visconti PLL driver
+ *
+ * Copyright (c) 2021 TOSHIBA CORPORATION
+ * Copyright (c) 2021 Toshiba Electronic Devices & Storage Corporation
+ *
+ * Nobuhiro Iwamatsu <nobuhiro1.iwamatsu@toshiba.co.jp>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+
+#include "pll.h"
+
+struct visconti_pll {
+ struct clk_hw hw;
+ void __iomem *pll_base;
+ spinlock_t *lock;
+ unsigned long flags;
+ const struct visconti_pll_rate_table *rate_table;
+ size_t rate_count;
+ struct visconti_pll_provider *ctx;
+};
+
+#define PLL_CONF_REG 0x0000
+#define PLL_CTRL_REG 0x0004
+#define PLL_FRACMODE_REG 0x0010
+#define PLL_INTIN_REG 0x0014
+#define PLL_FRACIN_REG 0x0018
+#define PLL_REFDIV_REG 0x001c
+#define PLL_POSTDIV_REG 0x0020
+
+#define PLL_CONFIG_SEL BIT(0)
+#define PLL_PLLEN BIT(4)
+#define PLL_BYPASS BIT(16)
+#define PLL_INTIN_MASK GENMASK(11, 0)
+#define PLL_FRACIN_MASK GENMASK(23, 0)
+#define PLL_REFDIV_MASK GENMASK(5, 0)
+#define PLL_POSTDIV_MASK GENMASK(2, 0)
+
+#define PLL0_FRACMODE_DACEN BIT(4)
+#define PLL0_FRACMODE_DSMEN BIT(0)
+
+#define PLL_CREATE_FRACMODE(table) (table->dacen << 4 | table->dsmen)
+#define PLL_CREATE_OSTDIV(table) (table->postdiv2 << 4 | table->postdiv1)
+
+static inline struct visconti_pll *to_visconti_pll(struct clk_hw *hw)
+{
+ return container_of(hw, struct visconti_pll, hw);
+}
+
+static void visconti_pll_get_params(struct visconti_pll *pll,
+ struct visconti_pll_rate_table *rate_table)
+{
+ u32 postdiv, val;
+
+ val = readl(pll->pll_base + PLL_FRACMODE_REG);
+
+ rate_table->dacen = FIELD_GET(PLL0_FRACMODE_DACEN, val);
+ rate_table->dsmen = FIELD_GET(PLL0_FRACMODE_DSMEN, val);
+
+ rate_table->fracin = readl(pll->pll_base + PLL_FRACIN_REG) & PLL_FRACIN_MASK;
+ rate_table->intin = readl(pll->pll_base + PLL_INTIN_REG) & PLL_INTIN_MASK;
+ rate_table->refdiv = readl(pll->pll_base + PLL_REFDIV_REG) & PLL_REFDIV_MASK;
+
+ postdiv = readl(pll->pll_base + PLL_POSTDIV_REG);
+ rate_table->postdiv1 = postdiv & PLL_POSTDIV_MASK;
+ rate_table->postdiv2 = (postdiv >> 4) & PLL_POSTDIV_MASK;
+}
+
+static const struct visconti_pll_rate_table *visconti_get_pll_settings(struct visconti_pll *pll,
+ unsigned long rate)
+{
+ const struct visconti_pll_rate_table *rate_table = pll->rate_table;
+ int i;
+
+ for (i = 0; i < pll->rate_count; i++)
+ if (rate == rate_table[i].rate)
+ return &rate_table[i];
+
+ return NULL;
+}
+
+static unsigned long visconti_get_pll_rate_from_data(struct visconti_pll *pll,
+ const struct visconti_pll_rate_table *rate)
+{
+ const struct visconti_pll_rate_table *rate_table = pll->rate_table;
+ int i;
+
+ for (i = 0; i < pll->rate_count; i++)
+ if (memcmp(&rate_table[i].dacen, &rate->dacen,
+ sizeof(*rate) - sizeof(unsigned long)) == 0)
+ return rate_table[i].rate;
+
+ /* set default */
+ return rate_table[0].rate;
+}
+
+static long visconti_pll_round_rate(struct clk_hw *hw,
+ unsigned long rate, unsigned long *prate)
+{
+ struct visconti_pll *pll = to_visconti_pll(hw);
+ const struct visconti_pll_rate_table *rate_table = pll->rate_table;
+ int i;
+
+ /* Assumming rate_table is in descending order */
+ for (i = 0; i < pll->rate_count; i++)
+ if (rate >= rate_table[i].rate)
+ return rate_table[i].rate;
+
+ /* return minimum supported value */
+ return rate_table[i - 1].rate;
+}
+
+static unsigned long visconti_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct visconti_pll *pll = to_visconti_pll(hw);
+ struct visconti_pll_rate_table rate_table;
+
+ memset(&rate_table, 0, sizeof(rate_table));
+ visconti_pll_get_params(pll, &rate_table);
+
+ return visconti_get_pll_rate_from_data(pll, &rate_table);
+}
+
+static int visconti_pll_set_params(struct visconti_pll *pll,
+ const struct visconti_pll_rate_table *rate_table)
+{
+ writel(PLL_CREATE_FRACMODE(rate_table), pll->pll_base + PLL_FRACMODE_REG);
+ writel(PLL_CREATE_OSTDIV(rate_table), pll->pll_base + PLL_POSTDIV_REG);
+ writel(rate_table->intin, pll->pll_base + PLL_INTIN_REG);
+ writel(rate_table->fracin, pll->pll_base + PLL_FRACIN_REG);
+ writel(rate_table->refdiv, pll->pll_base + PLL_REFDIV_REG);
+
+ return 0;
+}
+
+static int visconti_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct visconti_pll *pll = to_visconti_pll(hw);
+ const struct visconti_pll_rate_table *rate_table;
+
+ rate_table = visconti_get_pll_settings(pll, rate);
+ if (!rate_table)
+ return -EINVAL;
+
+ return visconti_pll_set_params(pll, rate_table);
+}
+
+static int visconti_pll_is_enabled(struct clk_hw *hw)
+{
+ struct visconti_pll *pll = to_visconti_pll(hw);
+ u32 reg;
+
+ reg = readl(pll->pll_base + PLL_CTRL_REG);
+
+ return (reg & PLL_PLLEN);
+}
+
+static int visconti_pll_enable(struct clk_hw *hw)
+{
+ struct visconti_pll *pll = to_visconti_pll(hw);
+ const struct visconti_pll_rate_table *rate_table = pll->rate_table;
+ unsigned long flags;
+ u32 reg;
+
+ if (visconti_pll_is_enabled(hw))
+ return 0;
+
+ spin_lock_irqsave(pll->lock, flags);
+
+ writel(PLL_CONFIG_SEL, pll->pll_base + PLL_CONF_REG);
+
+ reg = readl(pll->pll_base + PLL_CTRL_REG);
+ reg |= PLL_BYPASS;
+ writel(reg, pll->pll_base + PLL_CTRL_REG);
+
+ visconti_pll_set_params(pll, &rate_table[0]);
+
+ reg = readl(pll->pll_base + PLL_CTRL_REG);
+ reg &= ~PLL_PLLEN;
+ writel(reg, pll->pll_base + PLL_CTRL_REG);
+
+ udelay(1);
+
+ reg = readl(pll->pll_base + PLL_CTRL_REG);
+ reg |= PLL_PLLEN;
+ writel(reg, pll->pll_base + PLL_CTRL_REG);
+
+ udelay(40);
+
+ reg = readl(pll->pll_base + PLL_CTRL_REG);
+ reg &= ~PLL_BYPASS;
+ writel(reg, pll->pll_base + PLL_CTRL_REG);
+
+ spin_unlock_irqrestore(pll->lock, flags);
+
+ return 0;
+}
+
+static void visconti_pll_disable(struct clk_hw *hw)
+{
+ struct visconti_pll *pll = to_visconti_pll(hw);
+ unsigned long flags;
+ u32 reg;
+
+ if (!visconti_pll_is_enabled(hw))
+ return;
+
+ spin_lock_irqsave(pll->lock, flags);
+
+ writel(PLL_CONFIG_SEL, pll->pll_base + PLL_CONF_REG);
+
+ reg = readl(pll->pll_base + PLL_CTRL_REG);
+ reg |= PLL_BYPASS;
+ writel(reg, pll->pll_base + PLL_CTRL_REG);
+
+ reg = readl(pll->pll_base + PLL_CTRL_REG);
+ reg &= ~PLL_PLLEN;
+ writel(reg, pll->pll_base + PLL_CTRL_REG);
+
+ spin_unlock_irqrestore(pll->lock, flags);
+}
+
+static const struct clk_ops visconti_pll_ops = {
+ .enable = visconti_pll_enable,
+ .disable = visconti_pll_disable,
+ .is_enabled = visconti_pll_is_enabled,
+ .round_rate = visconti_pll_round_rate,
+ .recalc_rate = visconti_pll_recalc_rate,
+ .set_rate = visconti_pll_set_rate,
+};
+
+static struct clk_hw *visconti_register_pll(struct visconti_pll_provider *ctx,
+ const char *name,
+ const char *parent_name,
+ int offset,
+ const struct visconti_pll_rate_table *rate_table,
+ spinlock_t *lock)
+{
+ struct clk_init_data init;
+ struct visconti_pll *pll;
+ struct clk_hw *pll_hw_clk;
+ size_t len;
+ int ret;
+
+ pll = kzalloc(sizeof(*pll), GFP_KERNEL);
+ if (!pll)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.flags = CLK_IGNORE_UNUSED;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+
+ for (len = 0; rate_table[len].rate != 0; )
+ len++;
+ pll->rate_count = len;
+ pll->rate_table = kmemdup(rate_table,
+ pll->rate_count * sizeof(struct visconti_pll_rate_table),
+ GFP_KERNEL);
+ WARN(!pll->rate_table, "%s: could not allocate rate table for %s\n", __func__, name);
+
+ init.ops = &visconti_pll_ops;
+ pll->hw.init = &init;
+ pll->pll_base = ctx->reg_base + offset;
+ pll->lock = lock;
+ pll->ctx = ctx;
+
+ pll_hw_clk = &pll->hw;
+ ret = clk_hw_register(NULL, &pll->hw);
+ if (ret) {
+ pr_err("failed to register pll clock %s : %d\n", name, ret);
+ kfree(pll);
+ pll_hw_clk = ERR_PTR(ret);
+ }
+
+ return pll_hw_clk;
+}
+
+static void visconti_pll_add_lookup(struct visconti_pll_provider *ctx,
+ struct clk_hw *hw_clk,
+ unsigned int id)
+{
+ if (id)
+ ctx->clk_data.hws[id] = hw_clk;
+}
+
+void __init visconti_register_plls(struct visconti_pll_provider *ctx,
+ const struct visconti_pll_info *list,
+ unsigned int nr_plls,
+ spinlock_t *lock)
+{
+ int idx;
+
+ for (idx = 0; idx < nr_plls; idx++, list++) {
+ struct clk_hw *clk;
+
+ clk = visconti_register_pll(ctx,
+ list->name,
+ list->parent,
+ list->base_reg,
+ list->rate_table,
+ lock);
+ if (IS_ERR(clk)) {
+ pr_err("failed to register clock %s\n", list->name);
+ continue;
+ }
+
+ visconti_pll_add_lookup(ctx, clk, list->id);
+ }
+}
+
+struct visconti_pll_provider * __init visconti_init_pll(struct device_node *np,
+ void __iomem *base,
+ unsigned long nr_plls)
+{
+ struct visconti_pll_provider *ctx;
+ int i;
+
+ ctx = kzalloc(struct_size(ctx, clk_data.hws, nr_plls), GFP_KERNEL);
+ if (!ctx)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < nr_plls; ++i)
+ ctx->clk_data.hws[i] = ERR_PTR(-ENOENT);
+
+ ctx->node = np;
+ ctx->reg_base = base;
+ ctx->clk_data.num = nr_plls;
+
+ return ctx;
+}
diff --git a/drivers/clk/visconti/pll.h b/drivers/clk/visconti/pll.h
new file mode 100644
index 000000000000..16dae35ab370
--- /dev/null
+++ b/drivers/clk/visconti/pll.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2021 TOSHIBA CORPORATION
+ * Copyright (c) 2021 Toshiba Electronic Devices & Storage Corporation
+ *
+ * Nobuhiro Iwamatsu <nobuhiro1.iwamatsu@toshiba.co.jp>
+ */
+
+#ifndef _VISCONTI_PLL_H_
+#define _VISCONTI_PLL_H_
+
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+#include <linux/spinlock.h>
+
+struct visconti_pll_provider {
+ void __iomem *reg_base;
+ struct regmap *regmap;
+ struct clk_hw_onecell_data clk_data;
+ struct device_node *node;
+};
+
+#define VISCONTI_PLL_RATE(_rate, _dacen, _dsmen, \
+ _refdiv, _intin, _fracin, _postdiv1, _postdiv2) \
+{ \
+ .rate = _rate, \
+ .dacen = _dacen, \
+ .dsmen = _dsmen, \
+ .refdiv = _refdiv, \
+ .intin = _intin, \
+ .fracin = _fracin, \
+ .postdiv1 = _postdiv1, \
+ .postdiv2 = _postdiv2 \
+}
+
+struct visconti_pll_rate_table {
+ unsigned long rate;
+ unsigned int dacen;
+ unsigned int dsmen;
+ unsigned int refdiv;
+ unsigned long intin;
+ unsigned long fracin;
+ unsigned int postdiv1;
+ unsigned int postdiv2;
+};
+
+struct visconti_pll_info {
+ unsigned int id;
+ const char *name;
+ const char *parent;
+ unsigned long base_reg;
+ const struct visconti_pll_rate_table *rate_table;
+};
+
+struct visconti_pll_provider * __init visconti_init_pll(struct device_node *np,
+ void __iomem *base,
+ unsigned long nr_plls);
+void visconti_register_plls(struct visconti_pll_provider *ctx,
+ const struct visconti_pll_info *list,
+ unsigned int nr_plls, spinlock_t *lock);
+
+#endif /* _VISCONTI_PLL_H_ */
diff --git a/drivers/clk/visconti/reset.c b/drivers/clk/visconti/reset.c
new file mode 100644
index 000000000000..e3c3d7804612
--- /dev/null
+++ b/drivers/clk/visconti/reset.c
@@ -0,0 +1,107 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Toshiba Visconti ARM SoC reset controller
+ *
+ * Copyright (c) 2021 TOSHIBA CORPORATION
+ * Copyright (c) 2021 Toshiba Electronic Devices & Storage Corporation
+ *
+ * Nobuhiro Iwamatsu <nobuhiro1.iwamatsu@toshiba.co.jp>
+ */
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#include "reset.h"
+
+static inline struct visconti_reset *to_visconti_reset(struct reset_controller_dev *rcdev)
+{
+ return container_of(rcdev, struct visconti_reset, rcdev);
+}
+
+static int visconti_reset_assert(struct reset_controller_dev *rcdev, unsigned long id)
+{
+ struct visconti_reset *reset = to_visconti_reset(rcdev);
+ const struct visconti_reset_data *data = &reset->resets[id];
+ u32 rst = BIT(data->rs_idx);
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(reset->lock, flags);
+ ret = regmap_update_bits(reset->regmap, data->rson_offset, rst, rst);
+ spin_unlock_irqrestore(reset->lock, flags);
+
+ return ret;
+}
+
+static int visconti_reset_deassert(struct reset_controller_dev *rcdev, unsigned long id)
+{
+ struct visconti_reset *reset = to_visconti_reset(rcdev);
+ const struct visconti_reset_data *data = &reset->resets[id];
+ u32 rst = BIT(data->rs_idx);
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(reset->lock, flags);
+ ret = regmap_update_bits(reset->regmap, data->rsoff_offset, rst, rst);
+ spin_unlock_irqrestore(reset->lock, flags);
+
+ return ret;
+}
+
+static int visconti_reset_reset(struct reset_controller_dev *rcdev, unsigned long id)
+{
+ visconti_reset_assert(rcdev, id);
+ udelay(1);
+ visconti_reset_deassert(rcdev, id);
+
+ return 0;
+}
+
+static int visconti_reset_status(struct reset_controller_dev *rcdev, unsigned long id)
+{
+ struct visconti_reset *reset = to_visconti_reset(rcdev);
+ const struct visconti_reset_data *data = &reset->resets[id];
+ unsigned long flags;
+ u32 reg;
+ int ret;
+
+ spin_lock_irqsave(reset->lock, flags);
+ ret = regmap_read(reset->regmap, data->rson_offset, &reg);
+ spin_unlock_irqrestore(reset->lock, flags);
+ if (ret)
+ return ret;
+
+ return !(reg & data->rs_idx);
+}
+
+const struct reset_control_ops visconti_reset_ops = {
+ .assert = visconti_reset_assert,
+ .deassert = visconti_reset_deassert,
+ .reset = visconti_reset_reset,
+ .status = visconti_reset_status,
+};
+
+int visconti_register_reset_controller(struct device *dev,
+ struct regmap *regmap,
+ const struct visconti_reset_data *resets,
+ unsigned int num_resets,
+ const struct reset_control_ops *reset_ops,
+ spinlock_t *lock)
+{
+ struct visconti_reset *reset;
+
+ reset = devm_kzalloc(dev, sizeof(*reset), GFP_KERNEL);
+ if (!reset)
+ return -ENOMEM;
+
+ reset->regmap = regmap;
+ reset->resets = resets;
+ reset->rcdev.ops = reset_ops;
+ reset->rcdev.nr_resets = num_resets;
+ reset->rcdev.of_node = dev->of_node;
+ reset->lock = lock;
+
+ return devm_reset_controller_register(dev, &reset->rcdev);
+}
diff --git a/drivers/clk/visconti/reset.h b/drivers/clk/visconti/reset.h
new file mode 100644
index 000000000000..229dffcbdc98
--- /dev/null
+++ b/drivers/clk/visconti/reset.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Toshiba ARM SoC reset controller driver
+ *
+ * Copyright (c) 2021 TOSHIBA CORPORATION
+ *
+ * Nobuhiro Iwamatsu <nobuhiro1.iwamatsu@toshiba.co.jp>
+ */
+
+#ifndef _VISCONTI_RESET_H_
+#define _VISCONTI_RESET_H_
+
+#include <linux/reset-controller.h>
+
+struct visconti_reset_data {
+ u32 rson_offset;
+ u32 rsoff_offset;
+ u8 rs_idx;
+};
+
+struct visconti_reset {
+ struct reset_controller_dev rcdev;
+ struct regmap *regmap;
+ const struct visconti_reset_data *resets;
+ spinlock_t *lock;
+};
+
+extern const struct reset_control_ops visconti_reset_ops;
+
+int visconti_register_reset_controller(struct device *dev,
+ struct regmap *regmap,
+ const struct visconti_reset_data *resets,
+ unsigned int num_resets,
+ const struct reset_control_ops *reset_ops,
+ spinlock_t *lock);
+#endif /* _VISCONTI_RESET_H_ */
diff --git a/drivers/clk/x86/clk-fch.c b/drivers/clk/x86/clk-fch.c
index 8f7c5142b0f0..fdc060e75839 100644
--- a/drivers/clk/x86/clk-fch.c
+++ b/drivers/clk/x86/clk-fch.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: MIT
/*
- * clock framework for AMD Stoney based clocks
+ * clock framework for AMD FCH controller block
*
* Copyright 2018 Advanced Micro Devices, Inc.
*/
@@ -8,6 +8,7 @@
#include <linux/clk.h>
#include <linux/clkdev.h>
#include <linux/clk-provider.h>
+#include <linux/pci.h>
#include <linux/platform_data/clk-fch.h>
#include <linux/platform_device.h>
@@ -26,22 +27,37 @@
#define ST_CLK_GATE 3
#define ST_MAX_CLKS 4
-#define RV_CLK_48M 0
-#define RV_CLK_GATE 1
-#define RV_MAX_CLKS 2
+#define CLK_48M_FIXED 0
+#define CLK_GATE_FIXED 1
+#define CLK_MAX_FIXED 2
+
+/* List of supported CPU ids for clk mux with 25Mhz clk support */
+#define AMD_CPU_ID_ST 0x1576
static const char * const clk_oscout1_parents[] = { "clk48MHz", "clk25MHz" };
static struct clk_hw *hws[ST_MAX_CLKS];
+static const struct pci_device_id fch_pci_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_ST) },
+ { }
+};
+
static int fch_clk_probe(struct platform_device *pdev)
{
struct fch_clk_data *fch_data;
+ struct pci_dev *rdev;
fch_data = dev_get_platdata(&pdev->dev);
if (!fch_data || !fch_data->base)
return -EINVAL;
- if (!fch_data->is_rv) {
+ rdev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0, 0));
+ if (!rdev) {
+ dev_err(&pdev->dev, "FCH device not found\n");
+ return -ENODEV;
+ }
+
+ if (pci_match_id(fch_pci_ids, rdev)) {
hws[ST_CLK_48M] = clk_hw_register_fixed_rate(NULL, "clk48MHz",
NULL, 0, 48000000);
hws[ST_CLK_25M] = clk_hw_register_fixed_rate(NULL, "clk25MHz",
@@ -59,34 +75,38 @@ static int fch_clk_probe(struct platform_device *pdev)
OSCCLKENB, CLK_GATE_SET_TO_DISABLE, NULL);
devm_clk_hw_register_clkdev(&pdev->dev, hws[ST_CLK_GATE],
- "oscout1", NULL);
+ fch_data->name, NULL);
} else {
- hws[RV_CLK_48M] = clk_hw_register_fixed_rate(NULL, "clk48MHz",
+ hws[CLK_48M_FIXED] = clk_hw_register_fixed_rate(NULL, "clk48MHz",
NULL, 0, 48000000);
- hws[RV_CLK_GATE] = clk_hw_register_gate(NULL, "oscout1",
+ hws[CLK_GATE_FIXED] = clk_hw_register_gate(NULL, "oscout1",
"clk48MHz", 0, fch_data->base + MISCCLKCNTL1,
- OSCCLKENB, CLK_GATE_SET_TO_DISABLE, NULL);
+ OSCCLKENB, 0, NULL);
- devm_clk_hw_register_clkdev(&pdev->dev, hws[RV_CLK_GATE],
- "oscout1", NULL);
+ devm_clk_hw_register_clkdev(&pdev->dev, hws[CLK_GATE_FIXED],
+ fch_data->name, NULL);
}
+ pci_dev_put(rdev);
return 0;
}
static int fch_clk_remove(struct platform_device *pdev)
{
int i, clks;
- struct fch_clk_data *fch_data;
+ struct pci_dev *rdev;
- fch_data = dev_get_platdata(&pdev->dev);
+ rdev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0, 0));
+ if (!rdev)
+ return -ENODEV;
- clks = fch_data->is_rv ? RV_MAX_CLKS : ST_MAX_CLKS;
+ clks = pci_match_id(fch_pci_ids, rdev) ? CLK_MAX_FIXED : ST_MAX_CLKS;
for (i = 0; i < clks; i++)
clk_hw_unregister(hws[i]);
+ pci_dev_put(rdev);
return 0;
}
diff --git a/drivers/clk/zynq/pll.c b/drivers/clk/zynq/pll.c
index 54f4184de89a..e5f8fb704df2 100644
--- a/drivers/clk/zynq/pll.c
+++ b/drivers/clk/zynq/pll.c
@@ -12,7 +12,7 @@
#include <linux/io.h>
/**
- * struct zynq_pll
+ * struct zynq_pll - pll clock
* @hw: Handle between common and hardware-specific interfaces
* @pll_ctrl: PLL control register
* @pll_status: PLL status register
@@ -46,7 +46,7 @@ struct zynq_pll {
* @hw: Handle between common and hardware-specific interfaces
* @rate: Desired clock frequency
* @prate: Clock frequency of parent clock
- * Returns frequency closest to @rate the hardware can generate.
+ * Return: frequency closest to @rate the hardware can generate.
*/
static long zynq_pll_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *prate)
@@ -66,7 +66,7 @@ static long zynq_pll_round_rate(struct clk_hw *hw, unsigned long rate,
* zynq_pll_recalc_rate() - Recalculate clock frequency
* @hw: Handle between common and hardware-specific interfaces
* @parent_rate: Clock frequency of parent clock
- * Returns current clock frequency.
+ * Return: current clock frequency.
*/
static unsigned long zynq_pll_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
@@ -87,7 +87,7 @@ static unsigned long zynq_pll_recalc_rate(struct clk_hw *hw,
/**
* zynq_pll_is_enabled - Check if a clock is enabled
* @hw: Handle between common and hardware-specific interfaces
- * Returns 1 if the clock is enabled, 0 otherwise.
+ * Return: 1 if the clock is enabled, 0 otherwise.
*
* Not sure this is a good idea, but since disabled means bypassed for
* this clock implementation we say we are always enabled.
@@ -110,7 +110,7 @@ static int zynq_pll_is_enabled(struct clk_hw *hw)
/**
* zynq_pll_enable - Enable clock
* @hw: Handle between common and hardware-specific interfaces
- * Returns 0 on success
+ * Return: 0 on success
*/
static int zynq_pll_enable(struct clk_hw *hw)
{
@@ -179,7 +179,7 @@ static const struct clk_ops zynq_pll_ops = {
* @pll_status: Pointer to PLL status register
* @lock_index: Bit index to this PLL's lock status bit in @pll_status
* @lock: Register lock
- * Returns handle to the registered clock.
+ * Return: handle to the registered clock.
*/
struct clk *clk_register_zynq_pll(const char *name, const char *parent,
void __iomem *pll_ctrl, void __iomem *pll_status, u8 lock_index,
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index f65e31bab9ae..cfb8ea0df3b1 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -510,7 +510,8 @@ config SH_TIMER_MTU2
This hardware comes with 16-bit timer registers.
config RENESAS_OSTM
- bool "Renesas OSTM timer driver" if COMPILE_TEST
+ bool "Renesas OSTM timer driver"
+ depends on ARCH_RENESAS || COMPILE_TEST
select CLKSRC_MMIO
select TIMER_OF
help
@@ -671,6 +672,15 @@ config MILBEAUT_TIMER
help
Enables the support for Milbeaut timer driver.
+config MSC313E_TIMER
+ bool "MSC313E timer driver" if COMPILE_TEST
+ select TIMER_OF
+ select CLKSRC_MMIO
+ help
+ Enables support for the MStar MSC313E timer driver.
+ This provides access to multiple interrupt generating
+ programmable 32-bit free running incrementing counters.
+
config INGENIC_TIMER
bool "Clocksource/timer using the TCU in Ingenic JZ SoCs"
default MACH_INGENIC
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index c17ee32a7151..fa5f624eadb6 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -88,3 +88,4 @@ obj-$(CONFIG_CSKY_MP_TIMER) += timer-mp-csky.o
obj-$(CONFIG_GX6605S_TIMER) += timer-gx6605s.o
obj-$(CONFIG_HYPERV_TIMER) += hyperv_timer.o
obj-$(CONFIG_MICROCHIP_PIT64B) += timer-microchip-pit64b.o
+obj-$(CONFIG_MSC313E_TIMER) += timer-msc313e.o
diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
index 5e3e96d3d1b9..6db3d5511b0f 100644
--- a/drivers/clocksource/exynos_mct.c
+++ b/drivers/clocksource/exynos_mct.c
@@ -467,7 +467,7 @@ static int exynos4_mct_starting_cpu(unsigned int cpu)
evt->tick_resume = set_state_shutdown;
evt->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT |
CLOCK_EVT_FEAT_PERCPU;
- evt->rating = MCT_CLKEVENTS_RATING,
+ evt->rating = MCT_CLKEVENTS_RATING;
exynos4_mct_write(TICK_BASE_CNT, mevt->base + MCT_L_TCNTB_OFFSET);
@@ -504,11 +504,14 @@ static int exynos4_mct_dying_cpu(unsigned int cpu)
return 0;
}
-static int __init exynos4_timer_resources(struct device_node *np, void __iomem *base)
+static int __init exynos4_timer_resources(struct device_node *np)
{
- int err, cpu;
struct clk *mct_clk, *tick_clk;
+ reg_base = of_iomap(np, 0);
+ if (!reg_base)
+ panic("%s: unable to ioremap mct address space\n", __func__);
+
tick_clk = of_clk_get_by_name(np, "fin_pll");
if (IS_ERR(tick_clk))
panic("%s: unable to determine tick clock rate\n", __func__);
@@ -519,9 +522,27 @@ static int __init exynos4_timer_resources(struct device_node *np, void __iomem *
panic("%s: unable to retrieve mct clock instance\n", __func__);
clk_prepare_enable(mct_clk);
- reg_base = base;
- if (!reg_base)
- panic("%s: unable to ioremap mct address space\n", __func__);
+ return 0;
+}
+
+static int __init exynos4_timer_interrupts(struct device_node *np,
+ unsigned int int_type)
+{
+ int nr_irqs, i, err, cpu;
+
+ mct_int_type = int_type;
+
+ /* This driver uses only one global timer interrupt */
+ mct_irqs[MCT_G0_IRQ] = irq_of_parse_and_map(np, MCT_G0_IRQ);
+
+ /*
+ * Find out the number of local irqs specified. The local
+ * timer irqs are specified after the four global timer
+ * irqs are specified.
+ */
+ nr_irqs = of_irq_count(np);
+ for (i = MCT_L0_IRQ; i < nr_irqs; i++)
+ mct_irqs[i] = irq_of_parse_and_map(np, i);
if (mct_int_type == MCT_INT_PPI) {
@@ -581,24 +602,13 @@ out_irq:
static int __init mct_init_dt(struct device_node *np, unsigned int int_type)
{
- u32 nr_irqs, i;
int ret;
- mct_int_type = int_type;
-
- /* This driver uses only one global timer interrupt */
- mct_irqs[MCT_G0_IRQ] = irq_of_parse_and_map(np, MCT_G0_IRQ);
-
- /*
- * Find out the number of local irqs specified. The local
- * timer irqs are specified after the four global timer
- * irqs are specified.
- */
- nr_irqs = of_irq_count(np);
- for (i = MCT_L0_IRQ; i < nr_irqs; i++)
- mct_irqs[i] = irq_of_parse_and_map(np, i);
+ ret = exynos4_timer_resources(np);
+ if (ret)
+ return ret;
- ret = exynos4_timer_resources(np, of_iomap(np, 0));
+ ret = exynos4_timer_interrupts(np, int_type);
if (ret)
return ret;
diff --git a/drivers/clocksource/renesas-ostm.c b/drivers/clocksource/renesas-ostm.c
index 3d06ba66008c..21d1392637b8 100644
--- a/drivers/clocksource/renesas-ostm.c
+++ b/drivers/clocksource/renesas-ostm.c
@@ -9,6 +9,8 @@
#include <linux/clk.h>
#include <linux/clockchips.h>
#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
#include <linux/sched_clock.h>
#include <linux/slab.h>
@@ -159,6 +161,7 @@ static int __init ostm_init_clkevt(struct timer_of *to)
static int __init ostm_init(struct device_node *np)
{
+ struct reset_control *rstc;
struct timer_of *to;
int ret;
@@ -166,6 +169,14 @@ static int __init ostm_init(struct device_node *np)
if (!to)
return -ENOMEM;
+ rstc = of_reset_control_get_optional_exclusive(np, NULL);
+ if (IS_ERR(rstc)) {
+ ret = PTR_ERR(rstc);
+ goto err_free;
+ }
+
+ reset_control_deassert(rstc);
+
to->flags = TIMER_OF_BASE | TIMER_OF_CLOCK;
if (system_clock) {
/*
@@ -178,7 +189,7 @@ static int __init ostm_init(struct device_node *np)
ret = timer_of_init(np, to);
if (ret)
- goto err_free;
+ goto err_reset;
/*
* First probed device will be used as system clocksource. Any
@@ -203,9 +214,35 @@ static int __init ostm_init(struct device_node *np)
err_cleanup:
timer_of_cleanup(to);
+err_reset:
+ reset_control_assert(rstc);
+ reset_control_put(rstc);
err_free:
kfree(to);
return ret;
}
TIMER_OF_DECLARE(ostm, "renesas,ostm", ostm_init);
+
+#ifdef CONFIG_ARCH_R9A07G044
+static int __init ostm_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+
+ return ostm_init(dev->of_node);
+}
+
+static const struct of_device_id ostm_of_table[] = {
+ { .compatible = "renesas,ostm", },
+ { /* sentinel */ }
+};
+
+static struct platform_driver ostm_device_driver = {
+ .driver = {
+ .name = "renesas_ostm",
+ .of_match_table = of_match_ptr(ostm_of_table),
+ .suppress_bind_attrs = true,
+ },
+};
+builtin_platform_driver_probe(ostm_device_driver, ostm_probe);
+#endif
diff --git a/drivers/clocksource/timer-imx-sysctr.c b/drivers/clocksource/timer-imx-sysctr.c
index 18b90fc56bfc..55a8e198d2a1 100644
--- a/drivers/clocksource/timer-imx-sysctr.c
+++ b/drivers/clocksource/timer-imx-sysctr.c
@@ -20,8 +20,8 @@
#define SYS_CTR_CLK_DIV 0x3
-static void __iomem *sys_ctr_base;
-static u32 cmpcr;
+static void __iomem *sys_ctr_base __ro_after_init;
+static u32 cmpcr __ro_after_init;
static void sysctr_timer_enable(bool enable)
{
@@ -119,7 +119,7 @@ static struct timer_of to_sysctr = {
static void __init sysctr_clockevent_init(void)
{
- to_sysctr.clkevt.cpumask = cpumask_of(0);
+ to_sysctr.clkevt.cpumask = cpu_possible_mask;
clockevents_config_and_register(&to_sysctr.clkevt,
timer_of_rate(&to_sysctr),
diff --git a/drivers/clocksource/timer-msc313e.c b/drivers/clocksource/timer-msc313e.c
new file mode 100644
index 000000000000..54c54ca7c786
--- /dev/null
+++ b/drivers/clocksource/timer-msc313e.c
@@ -0,0 +1,253 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * MStar timer driver
+ *
+ * Copyright (C) 2021 Daniel Palmer
+ * Copyright (C) 2021 Romain Perier
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/clockchips.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqreturn.h>
+#include <linux/sched_clock.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#ifdef CONFIG_ARM
+#include <linux/delay.h>
+#endif
+
+#include "timer-of.h"
+
+#define TIMER_NAME "msc313e_timer"
+
+#define MSC313E_REG_CTRL 0x00
+#define MSC313E_REG_CTRL_TIMER_EN BIT(0)
+#define MSC313E_REG_CTRL_TIMER_TRIG BIT(1)
+#define MSC313E_REG_CTRL_TIMER_INT_EN BIT(8)
+#define MSC313E_REG_TIMER_MAX_LOW 0x08
+#define MSC313E_REG_TIMER_MAX_HIGH 0x0c
+#define MSC313E_REG_COUNTER_LOW 0x10
+#define MSC313E_REG_COUNTER_HIGH 0x14
+#define MSC313E_REG_TIMER_DIVIDE 0x18
+
+#define MSC313E_CLK_DIVIDER 9
+#define TIMER_SYNC_TICKS 3
+
+#ifdef CONFIG_ARM
+struct msc313e_delay {
+ void __iomem *base;
+ struct delay_timer delay;
+};
+static struct msc313e_delay msc313e_delay;
+#endif
+
+static void __iomem *msc313e_clksrc;
+
+static void msc313e_timer_stop(void __iomem *base)
+{
+ writew(0, base + MSC313E_REG_CTRL);
+}
+
+static void msc313e_timer_start(void __iomem *base, bool periodic)
+{
+ u16 reg;
+
+ reg = readw(base + MSC313E_REG_CTRL);
+ if (periodic)
+ reg |= MSC313E_REG_CTRL_TIMER_EN;
+ else
+ reg |= MSC313E_REG_CTRL_TIMER_TRIG;
+ writew(reg | MSC313E_REG_CTRL_TIMER_INT_EN, base + MSC313E_REG_CTRL);
+}
+
+static void msc313e_timer_setup(void __iomem *base, unsigned long delay)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ writew(delay >> 16, base + MSC313E_REG_TIMER_MAX_HIGH);
+ writew(delay & 0xffff, base + MSC313E_REG_TIMER_MAX_LOW);
+ local_irq_restore(flags);
+}
+
+static unsigned long msc313e_timer_current_value(void __iomem *base)
+{
+ unsigned long flags;
+ u16 l, h;
+
+ local_irq_save(flags);
+ l = readw(base + MSC313E_REG_COUNTER_LOW);
+ h = readw(base + MSC313E_REG_COUNTER_HIGH);
+ local_irq_restore(flags);
+
+ return (((u32)h) << 16 | l);
+}
+
+static int msc313e_timer_clkevt_shutdown(struct clock_event_device *evt)
+{
+ struct timer_of *timer = to_timer_of(evt);
+
+ msc313e_timer_stop(timer_of_base(timer));
+
+ return 0;
+}
+
+static int msc313e_timer_clkevt_set_oneshot(struct clock_event_device *evt)
+{
+ struct timer_of *timer = to_timer_of(evt);
+
+ msc313e_timer_stop(timer_of_base(timer));
+ msc313e_timer_start(timer_of_base(timer), false);
+
+ return 0;
+}
+
+static int msc313e_timer_clkevt_set_periodic(struct clock_event_device *evt)
+{
+ struct timer_of *timer = to_timer_of(evt);
+
+ msc313e_timer_stop(timer_of_base(timer));
+ msc313e_timer_setup(timer_of_base(timer), timer_of_period(timer));
+ msc313e_timer_start(timer_of_base(timer), true);
+
+ return 0;
+}
+
+static int msc313e_timer_clkevt_next_event(unsigned long evt, struct clock_event_device *clkevt)
+{
+ struct timer_of *timer = to_timer_of(clkevt);
+
+ msc313e_timer_stop(timer_of_base(timer));
+ msc313e_timer_setup(timer_of_base(timer), evt);
+ msc313e_timer_start(timer_of_base(timer), false);
+
+ return 0;
+}
+
+static irqreturn_t msc313e_timer_clkevt_irq(int irq, void *dev_id)
+{
+ struct clock_event_device *evt = dev_id;
+
+ evt->event_handler(evt);
+
+ return IRQ_HANDLED;
+}
+
+static u64 msc313e_timer_clksrc_read(struct clocksource *cs)
+{
+ return msc313e_timer_current_value(msc313e_clksrc) & cs->mask;
+}
+
+#ifdef CONFIG_ARM
+static unsigned long msc313e_read_delay_timer_read(void)
+{
+ return msc313e_timer_current_value(msc313e_delay.base);
+}
+#endif
+
+static u64 msc313e_timer_sched_clock_read(void)
+{
+ return msc313e_timer_current_value(msc313e_clksrc);
+}
+
+static struct clock_event_device msc313e_clkevt = {
+ .name = TIMER_NAME,
+ .rating = 300,
+ .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
+ .set_state_shutdown = msc313e_timer_clkevt_shutdown,
+ .set_state_periodic = msc313e_timer_clkevt_set_periodic,
+ .set_state_oneshot = msc313e_timer_clkevt_set_oneshot,
+ .tick_resume = msc313e_timer_clkevt_shutdown,
+ .set_next_event = msc313e_timer_clkevt_next_event,
+};
+
+static int __init msc313e_clkevt_init(struct device_node *np)
+{
+ int ret;
+ struct timer_of *to;
+
+ to = kzalloc(sizeof(*to), GFP_KERNEL);
+ if (!to)
+ return -ENOMEM;
+
+ to->flags = TIMER_OF_IRQ | TIMER_OF_CLOCK | TIMER_OF_BASE;
+ to->of_irq.handler = msc313e_timer_clkevt_irq;
+ ret = timer_of_init(np, to);
+ if (ret)
+ return ret;
+
+ if (of_device_is_compatible(np, "sstar,ssd20xd-timer")) {
+ to->of_clk.rate = clk_get_rate(to->of_clk.clk) / MSC313E_CLK_DIVIDER;
+ to->of_clk.period = DIV_ROUND_UP(to->of_clk.rate, HZ);
+ writew(MSC313E_CLK_DIVIDER - 1, timer_of_base(to) + MSC313E_REG_TIMER_DIVIDE);
+ }
+
+ msc313e_clkevt.cpumask = cpu_possible_mask;
+ msc313e_clkevt.irq = to->of_irq.irq;
+ to->clkevt = msc313e_clkevt;
+
+ clockevents_config_and_register(&to->clkevt, timer_of_rate(to),
+ TIMER_SYNC_TICKS, 0xffffffff);
+ return 0;
+}
+
+static int __init msc313e_clksrc_init(struct device_node *np)
+{
+ struct timer_of to = { 0 };
+ int ret;
+ u16 reg;
+
+ to.flags = TIMER_OF_BASE | TIMER_OF_CLOCK;
+ ret = timer_of_init(np, &to);
+ if (ret)
+ return ret;
+
+ msc313e_clksrc = timer_of_base(&to);
+ reg = readw(msc313e_clksrc + MSC313E_REG_CTRL);
+ reg |= MSC313E_REG_CTRL_TIMER_EN;
+ writew(reg, msc313e_clksrc + MSC313E_REG_CTRL);
+
+#ifdef CONFIG_ARM
+ msc313e_delay.base = timer_of_base(&to);
+ msc313e_delay.delay.read_current_timer = msc313e_read_delay_timer_read;
+ msc313e_delay.delay.freq = timer_of_rate(&to);
+
+ register_current_timer_delay(&msc313e_delay.delay);
+#endif
+
+ sched_clock_register(msc313e_timer_sched_clock_read, 32, timer_of_rate(&to));
+ return clocksource_mmio_init(timer_of_base(&to), TIMER_NAME, timer_of_rate(&to), 300, 32,
+ msc313e_timer_clksrc_read);
+}
+
+static int __init msc313e_timer_init(struct device_node *np)
+{
+ int ret = 0;
+ static int num_called;
+
+ switch (num_called) {
+ case 0:
+ ret = msc313e_clksrc_init(np);
+ if (ret)
+ return ret;
+ break;
+
+ default:
+ ret = msc313e_clkevt_init(np);
+ if (ret)
+ return ret;
+ break;
+ }
+
+ num_called++;
+
+ return 0;
+}
+
+TIMER_OF_DECLARE(msc313, "mstar,msc313e-timer", msc313e_timer_init);
+TIMER_OF_DECLARE(ssd20xd, "sstar,ssd20xd-timer", msc313e_timer_init);
diff --git a/drivers/clocksource/timer-pistachio.c b/drivers/clocksource/timer-pistachio.c
index 6f37181a8c63..69c069e6f0a2 100644
--- a/drivers/clocksource/timer-pistachio.c
+++ b/drivers/clocksource/timer-pistachio.c
@@ -71,7 +71,8 @@ static u64 notrace
pistachio_clocksource_read_cycles(struct clocksource *cs)
{
struct pistachio_clocksource *pcs = to_pistachio_clocksource(cs);
- u32 counter, overflow;
+ __maybe_unused u32 overflow;
+ u32 counter;
unsigned long flags;
/*
diff --git a/drivers/clocksource/timer-ti-dm-systimer.c b/drivers/clocksource/timer-ti-dm-systimer.c
index b6f97960d8ee..5c40ca1d4740 100644
--- a/drivers/clocksource/timer-ti-dm-systimer.c
+++ b/drivers/clocksource/timer-ti-dm-systimer.c
@@ -241,7 +241,7 @@ static void __init dmtimer_systimer_assign_alwon(void)
bool quirk_unreliable_oscillator = false;
/* Quirk unreliable 32 KiHz oscillator with incomplete dts */
- if (of_machine_is_compatible("ti,omap3-beagle") ||
+ if (of_machine_is_compatible("ti,omap3-beagle-ab4") ||
of_machine_is_compatible("timll,omap3-devkit8000")) {
quirk_unreliable_oscillator = true;
counter_32k = -ENODEV;
diff --git a/drivers/comedi/comedi_buf.c b/drivers/comedi/comedi_buf.c
index 06bfc859ab31..393966c09740 100644
--- a/drivers/comedi/comedi_buf.c
+++ b/drivers/comedi/comedi_buf.c
@@ -9,8 +9,7 @@
#include <linux/vmalloc.h>
#include <linux/slab.h>
-
-#include "comedidev.h"
+#include <linux/comedi/comedidev.h>
#include "comedi_internal.h"
#ifdef PAGE_KERNEL_NOCACHE
diff --git a/drivers/comedi/comedi_fops.c b/drivers/comedi/comedi_fops.c
index 763cea8418f8..55a0cae04b8d 100644
--- a/drivers/comedi/comedi_fops.c
+++ b/drivers/comedi/comedi_fops.c
@@ -23,7 +23,7 @@
#include <linux/poll.h>
#include <linux/device.h>
#include <linux/fs.h>
-#include "comedidev.h"
+#include <linux/comedi/comedidev.h>
#include <linux/cdev.h>
#include <linux/io.h>
diff --git a/drivers/comedi/comedi_pci.c b/drivers/comedi/comedi_pci.c
index 54739af7eb71..cc2581902195 100644
--- a/drivers/comedi/comedi_pci.c
+++ b/drivers/comedi/comedi_pci.c
@@ -9,8 +9,7 @@
#include <linux/module.h>
#include <linux/interrupt.h>
-
-#include "comedi_pci.h"
+#include <linux/comedi/comedi_pci.h>
/**
* comedi_to_pci_dev() - Return PCI device attached to COMEDI device
diff --git a/drivers/comedi/comedi_pcmcia.c b/drivers/comedi/comedi_pcmcia.c
index bb273bb202e6..c53aad0fc2ce 100644
--- a/drivers/comedi/comedi_pcmcia.c
+++ b/drivers/comedi/comedi_pcmcia.c
@@ -9,8 +9,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
-
-#include "comedi_pcmcia.h"
+#include <linux/comedi/comedi_pcmcia.h>
/**
* comedi_to_pcmcia_dev() - Return PCMCIA device attached to COMEDI device
diff --git a/drivers/comedi/comedi_usb.c b/drivers/comedi/comedi_usb.c
index eea8ebf32ed0..d11ea148ebf8 100644
--- a/drivers/comedi/comedi_usb.c
+++ b/drivers/comedi/comedi_usb.c
@@ -8,8 +8,7 @@
*/
#include <linux/module.h>
-
-#include "comedi_usb.h"
+#include <linux/comedi/comedi_usb.h>
/**
* comedi_to_usb_interface() - Return USB interface attached to COMEDI device
diff --git a/drivers/comedi/drivers.c b/drivers/comedi/drivers.c
index 750a6ff3c03c..8eb1f699a857 100644
--- a/drivers/comedi/drivers.c
+++ b/drivers/comedi/drivers.c
@@ -17,8 +17,7 @@
#include <linux/dma-direction.h>
#include <linux/interrupt.h>
#include <linux/firmware.h>
-
-#include "comedidev.h"
+#include <linux/comedi/comedidev.h>
#include "comedi_internal.h"
struct comedi_driver *comedi_drivers;
diff --git a/drivers/comedi/drivers/8255.c b/drivers/comedi/drivers/8255.c
index e23335c75867..ced8ea09d4fa 100644
--- a/drivers/comedi/drivers/8255.c
+++ b/drivers/comedi/drivers/8255.c
@@ -40,9 +40,8 @@
*/
#include <linux/module.h>
-#include "../comedidev.h"
-
-#include "8255.h"
+#include <linux/comedi/comedidev.h>
+#include <linux/comedi/comedi_8255.h>
static int dev_8255_attach(struct comedi_device *dev,
struct comedi_devconfig *it)
diff --git a/drivers/comedi/drivers/8255_pci.c b/drivers/comedi/drivers/8255_pci.c
index 5a810f0e532a..0fec048e3a53 100644
--- a/drivers/comedi/drivers/8255_pci.c
+++ b/drivers/comedi/drivers/8255_pci.c
@@ -53,10 +53,8 @@
*/
#include <linux/module.h>
-
-#include "../comedi_pci.h"
-
-#include "8255.h"
+#include <linux/comedi/comedi_pci.h>
+#include <linux/comedi/comedi_8255.h>
enum pci_8255_boardid {
BOARD_ADLINK_PCI7224,
diff --git a/drivers/comedi/drivers/addi_apci_1032.c b/drivers/comedi/drivers/addi_apci_1032.c
index 81a246fbcc01..8eec6d9402de 100644
--- a/drivers/comedi/drivers/addi_apci_1032.c
+++ b/drivers/comedi/drivers/addi_apci_1032.c
@@ -63,8 +63,8 @@
#include <linux/module.h>
#include <linux/interrupt.h>
+#include <linux/comedi/comedi_pci.h>
-#include "../comedi_pci.h"
#include "amcc_s5933.h"
/*
diff --git a/drivers/comedi/drivers/addi_apci_1500.c b/drivers/comedi/drivers/addi_apci_1500.c
index b04c15dcfb57..c94c78588889 100644
--- a/drivers/comedi/drivers/addi_apci_1500.c
+++ b/drivers/comedi/drivers/addi_apci_1500.c
@@ -14,8 +14,8 @@
#include <linux/module.h>
#include <linux/interrupt.h>
+#include <linux/comedi/comedi_pci.h>
-#include "../comedi_pci.h"
#include "amcc_s5933.h"
#include "z8536.h"
diff --git a/drivers/comedi/drivers/addi_apci_1516.c b/drivers/comedi/drivers/addi_apci_1516.c
index 274ec9fb030c..3c48b72dad9d 100644
--- a/drivers/comedi/drivers/addi_apci_1516.c
+++ b/drivers/comedi/drivers/addi_apci_1516.c
@@ -14,8 +14,8 @@
*/
#include <linux/module.h>
+#include <linux/comedi/comedi_pci.h>
-#include "../comedi_pci.h"
#include "addi_watchdog.h"
/*
diff --git a/drivers/comedi/drivers/addi_apci_1564.c b/drivers/comedi/drivers/addi_apci_1564.c
index 06fc7ed96200..0cd40948bee7 100644
--- a/drivers/comedi/drivers/addi_apci_1564.c
+++ b/drivers/comedi/drivers/addi_apci_1564.c
@@ -68,8 +68,8 @@
#include <linux/module.h>
#include <linux/interrupt.h>
+#include <linux/comedi/comedi_pci.h>
-#include "../comedi_pci.h"
#include "addi_tcw.h"
#include "addi_watchdog.h"
diff --git a/drivers/comedi/drivers/addi_apci_16xx.c b/drivers/comedi/drivers/addi_apci_16xx.c
index c306aa41df97..ec2c321d2431 100644
--- a/drivers/comedi/drivers/addi_apci_16xx.c
+++ b/drivers/comedi/drivers/addi_apci_16xx.c
@@ -14,8 +14,7 @@
*/
#include <linux/module.h>
-
-#include "../comedi_pci.h"
+#include <linux/comedi/comedi_pci.h>
/*
* Register I/O map
diff --git a/drivers/comedi/drivers/addi_apci_2032.c b/drivers/comedi/drivers/addi_apci_2032.c
index e9a2b37a4ae0..e048dfc3ec77 100644
--- a/drivers/comedi/drivers/addi_apci_2032.c
+++ b/drivers/comedi/drivers/addi_apci_2032.c
@@ -16,8 +16,8 @@
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
+#include <linux/comedi/comedi_pci.h>
-#include "../comedi_pci.h"
#include "addi_watchdog.h"
/*
diff --git a/drivers/comedi/drivers/addi_apci_2200.c b/drivers/comedi/drivers/addi_apci_2200.c
index 4c5aee784bd9..00378c9dddc8 100644
--- a/drivers/comedi/drivers/addi_apci_2200.c
+++ b/drivers/comedi/drivers/addi_apci_2200.c
@@ -14,8 +14,8 @@
*/
#include <linux/module.h>
+#include <linux/comedi/comedi_pci.h>
-#include "../comedi_pci.h"
#include "addi_watchdog.h"
/*
diff --git a/drivers/comedi/drivers/addi_apci_3120.c b/drivers/comedi/drivers/addi_apci_3120.c
index 1ed3b33d1a30..28a242e69721 100644
--- a/drivers/comedi/drivers/addi_apci_3120.c
+++ b/drivers/comedi/drivers/addi_apci_3120.c
@@ -14,8 +14,8 @@
#include <linux/module.h>
#include <linux/interrupt.h>
+#include <linux/comedi/comedi_pci.h>
-#include "../comedi_pci.h"
#include "amcc_s5933.h"
/*
diff --git a/drivers/comedi/drivers/addi_apci_3501.c b/drivers/comedi/drivers/addi_apci_3501.c
index f0c9642f3f1a..ecb5552f1785 100644
--- a/drivers/comedi/drivers/addi_apci_3501.c
+++ b/drivers/comedi/drivers/addi_apci_3501.c
@@ -41,8 +41,8 @@
*/
#include <linux/module.h>
+#include <linux/comedi/comedi_pci.h>
-#include "../comedi_pci.h"
#include "amcc_s5933.h"
/*
diff --git a/drivers/comedi/drivers/addi_apci_3xxx.c b/drivers/comedi/drivers/addi_apci_3xxx.c
index a90d59377e18..bc72273e6a29 100644
--- a/drivers/comedi/drivers/addi_apci_3xxx.c
+++ b/drivers/comedi/drivers/addi_apci_3xxx.c
@@ -15,8 +15,7 @@
#include <linux/module.h>
#include <linux/interrupt.h>
-
-#include "../comedi_pci.h"
+#include <linux/comedi/comedi_pci.h>
#define CONV_UNIT_NS BIT(0)
#define CONV_UNIT_US BIT(1)
diff --git a/drivers/comedi/drivers/addi_watchdog.c b/drivers/comedi/drivers/addi_watchdog.c
index 69b323fb869f..ed87ab432020 100644
--- a/drivers/comedi/drivers/addi_watchdog.c
+++ b/drivers/comedi/drivers/addi_watchdog.c
@@ -10,7 +10,7 @@
*/
#include <linux/module.h>
-#include "../comedidev.h"
+#include <linux/comedi/comedidev.h>
#include "addi_tcw.h"
#include "addi_watchdog.h"
diff --git a/drivers/comedi/drivers/adl_pci6208.c b/drivers/comedi/drivers/adl_pci6208.c
index 9ae4cc523dd4..b27354a51f5c 100644
--- a/drivers/comedi/drivers/adl_pci6208.c
+++ b/drivers/comedi/drivers/adl_pci6208.c
@@ -24,8 +24,7 @@
#include <linux/module.h>
#include <linux/delay.h>
-
-#include "../comedi_pci.h"
+#include <linux/comedi/comedi_pci.h>
/*
* PCI-6208/6216-GL register map
diff --git a/drivers/comedi/drivers/adl_pci7x3x.c b/drivers/comedi/drivers/adl_pci7x3x.c
index 8fc45638ff59..e9f22de9b6f1 100644
--- a/drivers/comedi/drivers/adl_pci7x3x.c
+++ b/drivers/comedi/drivers/adl_pci7x3x.c
@@ -46,8 +46,7 @@
*/
#include <linux/module.h>
-
-#include "../comedi_pci.h"
+#include <linux/comedi/comedi_pci.h>
#include "plx9052.h"
diff --git a/drivers/comedi/drivers/adl_pci8164.c b/drivers/comedi/drivers/adl_pci8164.c
index d5e1bda81557..0c513a67a264 100644
--- a/drivers/comedi/drivers/adl_pci8164.c
+++ b/drivers/comedi/drivers/adl_pci8164.c
@@ -19,8 +19,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
-
-#include "../comedi_pci.h"
+#include <linux/comedi/comedi_pci.h>
#define PCI8164_AXIS(x) ((x) * 0x08)
#define PCI8164_CMD_MSTS_REG 0x00
diff --git a/drivers/comedi/drivers/adl_pci9111.c b/drivers/comedi/drivers/adl_pci9111.c
index a062c5ab20e9..c50f94272a74 100644
--- a/drivers/comedi/drivers/adl_pci9111.c
+++ b/drivers/comedi/drivers/adl_pci9111.c
@@ -42,11 +42,10 @@
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
-
-#include "../comedi_pci.h"
+#include <linux/comedi/comedi_pci.h>
+#include <linux/comedi/comedi_8254.h>
#include "plx9052.h"
-#include "comedi_8254.h"
#define PCI9111_FIFO_HALF_SIZE 512
diff --git a/drivers/comedi/drivers/adl_pci9118.c b/drivers/comedi/drivers/adl_pci9118.c
index cda3a4267dca..9a816c718303 100644
--- a/drivers/comedi/drivers/adl_pci9118.c
+++ b/drivers/comedi/drivers/adl_pci9118.c
@@ -78,11 +78,10 @@
#include <linux/gfp.h>
#include <linux/interrupt.h>
#include <linux/io.h>
-
-#include "../comedi_pci.h"
+#include <linux/comedi/comedi_pci.h>
+#include <linux/comedi/comedi_8254.h>
#include "amcc_s5933.h"
-#include "comedi_8254.h"
/*
* PCI BAR2 Register map (dev->iobase)
diff --git a/drivers/comedi/drivers/adq12b.c b/drivers/comedi/drivers/adq12b.c
index d719f76709ef..19d765182006 100644
--- a/drivers/comedi/drivers/adq12b.c
+++ b/drivers/comedi/drivers/adq12b.c
@@ -48,8 +48,7 @@
#include <linux/module.h>
#include <linux/delay.h>
-
-#include "../comedidev.h"
+#include <linux/comedi/comedidev.h>
/* address scheme (page 2.17 of the manual) */
#define ADQ12B_CTREG 0x00
diff --git a/drivers/comedi/drivers/adv_pci1710.c b/drivers/comedi/drivers/adv_pci1710.c
index 090607760be6..4f2639968260 100644
--- a/drivers/comedi/drivers/adv_pci1710.c
+++ b/drivers/comedi/drivers/adv_pci1710.c
@@ -30,10 +30,9 @@
#include <linux/module.h>
#include <linux/interrupt.h>
+#include <linux/comedi/comedi_pci.h>
+#include <linux/comedi/comedi_8254.h>
-#include "../comedi_pci.h"
-
-#include "comedi_8254.h"
#include "amcc_s5933.h"
/*
diff --git a/drivers/comedi/drivers/adv_pci1720.c b/drivers/comedi/drivers/adv_pci1720.c
index 2fcd7e8e7d85..2619591ba301 100644
--- a/drivers/comedi/drivers/adv_pci1720.c
+++ b/drivers/comedi/drivers/adv_pci1720.c
@@ -42,8 +42,7 @@
#include <linux/module.h>
#include <linux/delay.h>
-
-#include "../comedi_pci.h"
+#include <linux/comedi/comedi_pci.h>
/*
* PCI BAR2 Register map (dev->iobase)
diff --git a/drivers/comedi/drivers/adv_pci1723.c b/drivers/comedi/drivers/adv_pci1723.c
index 23660a9fdb9c..e2aedb152068 100644
--- a/drivers/comedi/drivers/adv_pci1723.c
+++ b/drivers/comedi/drivers/adv_pci1723.c
@@ -32,8 +32,7 @@
*/
#include <linux/module.h>
-
-#include "../comedi_pci.h"
+#include <linux/comedi/comedi_pci.h>
/*
* PCI Bar 2 I/O Register map (dev->iobase)
diff --git a/drivers/comedi/drivers/adv_pci1724.c b/drivers/comedi/drivers/adv_pci1724.c
index e8ab573c839f..bb43b7deeb56 100644
--- a/drivers/comedi/drivers/adv_pci1724.c
+++ b/drivers/comedi/drivers/adv_pci1724.c
@@ -38,8 +38,7 @@
*/
#include <linux/module.h>
-
-#include "../comedi_pci.h"
+#include <linux/comedi/comedi_pci.h>
/*
* PCI bar 2 Register I/O map (dev->iobase)
diff --git a/drivers/comedi/drivers/adv_pci1760.c b/drivers/comedi/drivers/adv_pci1760.c
index 6de8ab97d346..fcfc2e299110 100644
--- a/drivers/comedi/drivers/adv_pci1760.c
+++ b/drivers/comedi/drivers/adv_pci1760.c
@@ -22,8 +22,7 @@
*/
#include <linux/module.h>
-
-#include "../comedi_pci.h"
+#include <linux/comedi/comedi_pci.h>
/*
* PCI-1760 Register Map
diff --git a/drivers/comedi/drivers/adv_pci_dio.c b/drivers/comedi/drivers/adv_pci_dio.c
index 54c7419c8ca6..efa3e46b554b 100644
--- a/drivers/comedi/drivers/adv_pci_dio.c
+++ b/drivers/comedi/drivers/adv_pci_dio.c
@@ -23,11 +23,9 @@
#include <linux/module.h>
#include <linux/delay.h>
-
-#include "../comedi_pci.h"
-
-#include "8255.h"
-#include "comedi_8254.h"
+#include <linux/comedi/comedi_pci.h>
+#include <linux/comedi/comedi_8255.h>
+#include <linux/comedi/comedi_8254.h>
/*
* Register offset definitions
diff --git a/drivers/comedi/drivers/aio_aio12_8.c b/drivers/comedi/drivers/aio_aio12_8.c
index 4829115921a3..30b8a32204d8 100644
--- a/drivers/comedi/drivers/aio_aio12_8.c
+++ b/drivers/comedi/drivers/aio_aio12_8.c
@@ -22,10 +22,9 @@
*/
#include <linux/module.h>
-#include "../comedidev.h"
-
-#include "comedi_8254.h"
-#include "8255.h"
+#include <linux/comedi/comedidev.h>
+#include <linux/comedi/comedi_8255.h>
+#include <linux/comedi/comedi_8254.h>
/*
* Register map
diff --git a/drivers/comedi/drivers/aio_iiro_16.c b/drivers/comedi/drivers/aio_iiro_16.c
index fe3876235075..b00fab0b89d4 100644
--- a/drivers/comedi/drivers/aio_iiro_16.c
+++ b/drivers/comedi/drivers/aio_iiro_16.c
@@ -30,8 +30,7 @@
#include <linux/module.h>
#include <linux/interrupt.h>
-
-#include "../comedidev.h"
+#include <linux/comedi/comedidev.h>
#define AIO_IIRO_16_RELAY_0_7 0x00
#define AIO_IIRO_16_INPUT_0_7 0x01
diff --git a/drivers/comedi/drivers/amplc_dio200.c b/drivers/comedi/drivers/amplc_dio200.c
index fa19c9e7c56b..4544bcdd8a70 100644
--- a/drivers/comedi/drivers/amplc_dio200.c
+++ b/drivers/comedi/drivers/amplc_dio200.c
@@ -185,7 +185,7 @@
*/
#include <linux/module.h>
-#include "../comedidev.h"
+#include <linux/comedi/comedidev.h>
#include "amplc_dio200.h"
diff --git a/drivers/comedi/drivers/amplc_dio200_common.c b/drivers/comedi/drivers/amplc_dio200_common.c
index a3454130d5f8..ff651f2eb86c 100644
--- a/drivers/comedi/drivers/amplc_dio200_common.c
+++ b/drivers/comedi/drivers/amplc_dio200_common.c
@@ -12,12 +12,11 @@
#include <linux/module.h>
#include <linux/interrupt.h>
-
-#include "../comedidev.h"
+#include <linux/comedi/comedidev.h>
+#include <linux/comedi/comedi_8255.h> /* only for register defines */
+#include <linux/comedi/comedi_8254.h>
#include "amplc_dio200.h"
-#include "comedi_8254.h"
-#include "8255.h" /* only for register defines */
/* 200 series registers */
#define DIO200_IO_SIZE 0x20
diff --git a/drivers/comedi/drivers/amplc_dio200_pci.c b/drivers/comedi/drivers/amplc_dio200_pci.c
index 1bd7a42c8464..527994d82a1f 100644
--- a/drivers/comedi/drivers/amplc_dio200_pci.c
+++ b/drivers/comedi/drivers/amplc_dio200_pci.c
@@ -214,8 +214,7 @@
#include <linux/module.h>
#include <linux/interrupt.h>
-
-#include "../comedi_pci.h"
+#include <linux/comedi/comedi_pci.h>
#include "amplc_dio200.h"
diff --git a/drivers/comedi/drivers/amplc_pc236.c b/drivers/comedi/drivers/amplc_pc236.c
index c377af1d5246..b21e0c906aab 100644
--- a/drivers/comedi/drivers/amplc_pc236.c
+++ b/drivers/comedi/drivers/amplc_pc236.c
@@ -32,8 +32,7 @@
*/
#include <linux/module.h>
-
-#include "../comedidev.h"
+#include <linux/comedi/comedidev.h>
#include "amplc_pc236.h"
diff --git a/drivers/comedi/drivers/amplc_pc236_common.c b/drivers/comedi/drivers/amplc_pc236_common.c
index 981d281e87a1..9f4f89b1ef23 100644
--- a/drivers/comedi/drivers/amplc_pc236_common.c
+++ b/drivers/comedi/drivers/amplc_pc236_common.c
@@ -11,11 +11,10 @@
#include <linux/module.h>
#include <linux/interrupt.h>
-
-#include "../comedidev.h"
+#include <linux/comedi/comedidev.h>
+#include <linux/comedi/comedi_8255.h>
#include "amplc_pc236.h"
-#include "8255.h"
static void pc236_intr_update(struct comedi_device *dev, bool enable)
{
diff --git a/drivers/comedi/drivers/amplc_pc263.c b/drivers/comedi/drivers/amplc_pc263.c
index 68da6098ee84..d7f088a8a5e3 100644
--- a/drivers/comedi/drivers/amplc_pc263.c
+++ b/drivers/comedi/drivers/amplc_pc263.c
@@ -25,7 +25,7 @@
*/
#include <linux/module.h>
-#include "../comedidev.h"
+#include <linux/comedi/comedidev.h>
/* PC263 registers */
#define PC263_DO_0_7_REG 0x00
diff --git a/drivers/comedi/drivers/amplc_pci224.c b/drivers/comedi/drivers/amplc_pci224.c
index bcf6d61af863..5a04e55daeea 100644
--- a/drivers/comedi/drivers/amplc_pci224.c
+++ b/drivers/comedi/drivers/amplc_pci224.c
@@ -96,10 +96,8 @@
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
-
-#include "../comedi_pci.h"
-
-#include "comedi_8254.h"
+#include <linux/comedi/comedi_pci.h>
+#include <linux/comedi/comedi_8254.h>
/*
* PCI224/234 i/o space 1 (PCIBAR2) registers.
diff --git a/drivers/comedi/drivers/amplc_pci230.c b/drivers/comedi/drivers/amplc_pci230.c
index 8911dc2bd2c6..92ba8b8c0172 100644
--- a/drivers/comedi/drivers/amplc_pci230.c
+++ b/drivers/comedi/drivers/amplc_pci230.c
@@ -174,11 +174,9 @@
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
-
-#include "../comedi_pci.h"
-
-#include "comedi_8254.h"
-#include "8255.h"
+#include <linux/comedi/comedi_pci.h>
+#include <linux/comedi/comedi_8255.h>
+#include <linux/comedi/comedi_8254.h>
/*
* PCI230 PCI configuration register information
diff --git a/drivers/comedi/drivers/amplc_pci236.c b/drivers/comedi/drivers/amplc_pci236.c
index e7f6fa4d101a..482eb261c333 100644
--- a/drivers/comedi/drivers/amplc_pci236.c
+++ b/drivers/comedi/drivers/amplc_pci236.c
@@ -34,8 +34,7 @@
#include <linux/module.h>
#include <linux/interrupt.h>
-
-#include "../comedi_pci.h"
+#include <linux/comedi/comedi_pci.h>
#include "amplc_pc236.h"
#include "plx9052.h"
diff --git a/drivers/comedi/drivers/amplc_pci263.c b/drivers/comedi/drivers/amplc_pci263.c
index 9217973f1141..1609665c4b18 100644
--- a/drivers/comedi/drivers/amplc_pci263.c
+++ b/drivers/comedi/drivers/amplc_pci263.c
@@ -24,8 +24,7 @@
*/
#include <linux/module.h>
-
-#include "../comedi_pci.h"
+#include <linux/comedi/comedi_pci.h>
/* PCI263 registers */
#define PCI263_DO_0_7_REG 0x00
diff --git a/drivers/comedi/drivers/c6xdigio.c b/drivers/comedi/drivers/c6xdigio.c
index 786fd15698df..14b90d1c64dc 100644
--- a/drivers/comedi/drivers/c6xdigio.c
+++ b/drivers/comedi/drivers/c6xdigio.c
@@ -30,8 +30,7 @@
#include <linux/timer.h>
#include <linux/io.h>
#include <linux/pnp.h>
-
-#include "../comedidev.h"
+#include <linux/comedi/comedidev.h>
/*
* Register I/O map
diff --git a/drivers/comedi/drivers/cb_das16_cs.c b/drivers/comedi/drivers/cb_das16_cs.c
index a5d171e71c33..8e0d2fa5f95d 100644
--- a/drivers/comedi/drivers/cb_das16_cs.c
+++ b/drivers/comedi/drivers/cb_das16_cs.c
@@ -27,10 +27,8 @@
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
-
-#include "../comedi_pcmcia.h"
-
-#include "comedi_8254.h"
+#include <linux/comedi/comedi_pcmcia.h>
+#include <linux/comedi/comedi_8254.h>
/*
* Register I/O map
diff --git a/drivers/comedi/drivers/cb_pcidas.c b/drivers/comedi/drivers/cb_pcidas.c
index 2f20bd56ec6c..0c7576b967fc 100644
--- a/drivers/comedi/drivers/cb_pcidas.c
+++ b/drivers/comedi/drivers/cb_pcidas.c
@@ -54,11 +54,10 @@
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
+#include <linux/comedi/comedi_pci.h>
+#include <linux/comedi/comedi_8255.h>
+#include <linux/comedi/comedi_8254.h>
-#include "../comedi_pci.h"
-
-#include "comedi_8254.h"
-#include "8255.h"
#include "amcc_s5933.h"
#define AI_BUFFER_SIZE 1024 /* max ai fifo size */
diff --git a/drivers/comedi/drivers/cb_pcidas64.c b/drivers/comedi/drivers/cb_pcidas64.c
index 41a8fea7f48a..ca6038a25f26 100644
--- a/drivers/comedi/drivers/cb_pcidas64.c
+++ b/drivers/comedi/drivers/cb_pcidas64.c
@@ -73,10 +73,9 @@
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
+#include <linux/comedi/comedi_pci.h>
+#include <linux/comedi/comedi_8255.h>
-#include "../comedi_pci.h"
-
-#include "8255.h"
#include "plx9080.h"
#define TIMER_BASE 25 /* 40MHz master clock */
diff --git a/drivers/comedi/drivers/cb_pcidda.c b/drivers/comedi/drivers/cb_pcidda.c
index 78cf1603638c..c52204a6bda4 100644
--- a/drivers/comedi/drivers/cb_pcidda.c
+++ b/drivers/comedi/drivers/cb_pcidda.c
@@ -27,10 +27,8 @@
*/
#include <linux/module.h>
-
-#include "../comedi_pci.h"
-
-#include "8255.h"
+#include <linux/comedi/comedi_pci.h>
+#include <linux/comedi/comedi_8255.h>
#define EEPROM_SIZE 128 /* number of entries in eeprom */
/* maximum number of ao channels for supported boards */
diff --git a/drivers/comedi/drivers/cb_pcimdas.c b/drivers/comedi/drivers/cb_pcimdas.c
index 2292f69da4f4..8bdb00774f11 100644
--- a/drivers/comedi/drivers/cb_pcimdas.c
+++ b/drivers/comedi/drivers/cb_pcimdas.c
@@ -34,12 +34,11 @@
#include <linux/module.h>
#include <linux/interrupt.h>
+#include <linux/comedi/comedi_pci.h>
+#include <linux/comedi/comedi_8255.h>
+#include <linux/comedi/comedi_8254.h>
-#include "../comedi_pci.h"
-
-#include "comedi_8254.h"
#include "plx9052.h"
-#include "8255.h"
/*
* PCI Bar 1 Register map
diff --git a/drivers/comedi/drivers/cb_pcimdda.c b/drivers/comedi/drivers/cb_pcimdda.c
index 21fc7b3c5f60..bf8093a10315 100644
--- a/drivers/comedi/drivers/cb_pcimdda.c
+++ b/drivers/comedi/drivers/cb_pcimdda.c
@@ -67,10 +67,8 @@
*/
#include <linux/module.h>
-
-#include "../comedi_pci.h"
-
-#include "8255.h"
+#include <linux/comedi/comedi_pci.h>
+#include <linux/comedi/comedi_8255.h>
/* device ids of the cards we support -- currently only 1 card supported */
#define PCI_ID_PCIM_DDA06_16 0x0053
diff --git a/drivers/comedi/drivers/comedi_8254.c b/drivers/comedi/drivers/comedi_8254.c
index 4bf5daa9e885..b4185c1b2695 100644
--- a/drivers/comedi/drivers/comedi_8254.c
+++ b/drivers/comedi/drivers/comedi_8254.c
@@ -116,10 +116,8 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/io.h>
-
-#include "../comedidev.h"
-
-#include "comedi_8254.h"
+#include <linux/comedi/comedidev.h>
+#include <linux/comedi/comedi_8254.h>
static unsigned int __i8254_read(struct comedi_8254 *i8254, unsigned int reg)
{
diff --git a/drivers/comedi/drivers/comedi_8255.c b/drivers/comedi/drivers/comedi_8255.c
index b7ca465933ee..5562b9cd0a17 100644
--- a/drivers/comedi/drivers/comedi_8255.c
+++ b/drivers/comedi/drivers/comedi_8255.c
@@ -29,9 +29,8 @@
*/
#include <linux/module.h>
-#include "../comedidev.h"
-
-#include "8255.h"
+#include <linux/comedi/comedidev.h>
+#include <linux/comedi/comedi_8255.h>
struct subdev_8255_private {
unsigned long regbase;
diff --git a/drivers/comedi/drivers/comedi_bond.c b/drivers/comedi/drivers/comedi_bond.c
index 4392b5927a99..78c39fa84177 100644
--- a/drivers/comedi/drivers/comedi_bond.c
+++ b/drivers/comedi/drivers/comedi_bond.c
@@ -40,9 +40,9 @@
#include <linux/module.h>
#include <linux/string.h>
#include <linux/slab.h>
-#include "../comedi.h"
-#include "../comedilib.h"
-#include "../comedidev.h"
+#include <linux/comedi.h>
+#include <linux/comedi/comedilib.h>
+#include <linux/comedi/comedidev.h>
struct bonded_device {
struct comedi_device *dev;
diff --git a/drivers/comedi/drivers/comedi_isadma.c b/drivers/comedi/drivers/comedi_isadma.c
index 479b58e209ba..700982464c53 100644
--- a/drivers/comedi/drivers/comedi_isadma.c
+++ b/drivers/comedi/drivers/comedi_isadma.c
@@ -9,10 +9,8 @@
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <asm/dma.h>
-
-#include "../comedidev.h"
-
-#include "comedi_isadma.h"
+#include <linux/comedi/comedidev.h>
+#include <linux/comedi/comedi_isadma.h>
/**
* comedi_isadma_program - program and enable an ISA DMA transfer
diff --git a/drivers/comedi/drivers/comedi_parport.c b/drivers/comedi/drivers/comedi_parport.c
index 5338b5eea440..098738a688fe 100644
--- a/drivers/comedi/drivers/comedi_parport.c
+++ b/drivers/comedi/drivers/comedi_parport.c
@@ -57,8 +57,7 @@
#include <linux/module.h>
#include <linux/interrupt.h>
-
-#include "../comedidev.h"
+#include <linux/comedi/comedidev.h>
/*
* Register map
diff --git a/drivers/comedi/drivers/comedi_test.c b/drivers/comedi/drivers/comedi_test.c
index cbc225eb1991..0b5c0af1cebf 100644
--- a/drivers/comedi/drivers/comedi_test.c
+++ b/drivers/comedi/drivers/comedi_test.c
@@ -45,10 +45,8 @@
*/
#include <linux/module.h>
-#include "../comedidev.h"
-
+#include <linux/comedi/comedidev.h>
#include <asm/div64.h>
-
#include <linux/timer.h>
#include <linux/ktime.h>
#include <linux/jiffies.h>
diff --git a/drivers/comedi/drivers/contec_pci_dio.c b/drivers/comedi/drivers/contec_pci_dio.c
index b8fdd9c1f166..41d42ff14144 100644
--- a/drivers/comedi/drivers/contec_pci_dio.c
+++ b/drivers/comedi/drivers/contec_pci_dio.c
@@ -18,8 +18,7 @@
*/
#include <linux/module.h>
-
-#include "../comedi_pci.h"
+#include <linux/comedi/comedi_pci.h>
/*
* Register map
diff --git a/drivers/comedi/drivers/dac02.c b/drivers/comedi/drivers/dac02.c
index 5ef8114c2c85..4b011d66d7b0 100644
--- a/drivers/comedi/drivers/dac02.c
+++ b/drivers/comedi/drivers/dac02.c
@@ -25,8 +25,7 @@
*/
#include <linux/module.h>
-
-#include "../comedidev.h"
+#include <linux/comedi/comedidev.h>
/*
* The output range is selected by jumpering pins on the I/O connector.
diff --git a/drivers/comedi/drivers/daqboard2000.c b/drivers/comedi/drivers/daqboard2000.c
index f64e747078bd..c0a4e1b06fb3 100644
--- a/drivers/comedi/drivers/daqboard2000.c
+++ b/drivers/comedi/drivers/daqboard2000.c
@@ -96,10 +96,9 @@
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
+#include <linux/comedi/comedi_pci.h>
+#include <linux/comedi/comedi_8255.h>
-#include "../comedi_pci.h"
-
-#include "8255.h"
#include "plx9080.h"
#define DB2K_FIRMWARE "daqboard2000_firmware.bin"
diff --git a/drivers/comedi/drivers/das08.c b/drivers/comedi/drivers/das08.c
index b50743c5b822..f8ab3af2e391 100644
--- a/drivers/comedi/drivers/das08.c
+++ b/drivers/comedi/drivers/das08.c
@@ -10,11 +10,10 @@
*/
#include <linux/module.h>
+#include <linux/comedi/comedidev.h>
+#include <linux/comedi/comedi_8255.h>
+#include <linux/comedi/comedi_8254.h>
-#include "../comedidev.h"
-
-#include "8255.h"
-#include "comedi_8254.h"
#include "das08.h"
/*
diff --git a/drivers/comedi/drivers/das08_cs.c b/drivers/comedi/drivers/das08_cs.c
index 223479f9ea3c..6075efcf10d6 100644
--- a/drivers/comedi/drivers/das08_cs.c
+++ b/drivers/comedi/drivers/das08_cs.c
@@ -30,8 +30,7 @@
*/
#include <linux/module.h>
-
-#include "../comedi_pcmcia.h"
+#include <linux/comedi/comedi_pcmcia.h>
#include "das08.h"
diff --git a/drivers/comedi/drivers/das08_isa.c b/drivers/comedi/drivers/das08_isa.c
index 8c4cfa821423..3d43b77cc9f4 100644
--- a/drivers/comedi/drivers/das08_isa.c
+++ b/drivers/comedi/drivers/das08_isa.c
@@ -29,7 +29,7 @@
*/
#include <linux/module.h>
-#include "../comedidev.h"
+#include <linux/comedi/comedidev.h>
#include "das08.h"
diff --git a/drivers/comedi/drivers/das08_pci.c b/drivers/comedi/drivers/das08_pci.c
index 1cd903336a4c..982f3ab0ccbd 100644
--- a/drivers/comedi/drivers/das08_pci.c
+++ b/drivers/comedi/drivers/das08_pci.c
@@ -23,8 +23,7 @@
*/
#include <linux/module.h>
-
-#include "../comedi_pci.h"
+#include <linux/comedi/comedi_pci.h>
#include "das08.h"
diff --git a/drivers/comedi/drivers/das16.c b/drivers/comedi/drivers/das16.c
index 4ac2622b0fac..937a69ce0977 100644
--- a/drivers/comedi/drivers/das16.c
+++ b/drivers/comedi/drivers/das16.c
@@ -63,12 +63,10 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
-
-#include "../comedidev.h"
-
-#include "comedi_isadma.h"
-#include "comedi_8254.h"
-#include "8255.h"
+#include <linux/comedi/comedidev.h>
+#include <linux/comedi/comedi_8255.h>
+#include <linux/comedi/comedi_8254.h>
+#include <linux/comedi/comedi_isadma.h>
#define DAS16_DMA_SIZE 0xff00 /* size in bytes of allocated dma buffer */
diff --git a/drivers/comedi/drivers/das16m1.c b/drivers/comedi/drivers/das16m1.c
index 75f3dbbe97ac..275effb77746 100644
--- a/drivers/comedi/drivers/das16m1.c
+++ b/drivers/comedi/drivers/das16m1.c
@@ -42,10 +42,9 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
-#include "../comedidev.h"
-
-#include "8255.h"
-#include "comedi_8254.h"
+#include <linux/comedi/comedidev.h>
+#include <linux/comedi/comedi_8255.h>
+#include <linux/comedi/comedi_8254.h>
/*
* Register map (dev->iobase)
diff --git a/drivers/comedi/drivers/das1800.c b/drivers/comedi/drivers/das1800.c
index f50891a6ee7d..f09608c0f4ff 100644
--- a/drivers/comedi/drivers/das1800.c
+++ b/drivers/comedi/drivers/das1800.c
@@ -73,11 +73,9 @@
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/io.h>
-
-#include "../comedidev.h"
-
-#include "comedi_isadma.h"
-#include "comedi_8254.h"
+#include <linux/comedi/comedidev.h>
+#include <linux/comedi/comedi_8254.h>
+#include <linux/comedi/comedi_isadma.h>
/* misc. defines */
#define DAS1800_SIZE 16 /* uses 16 io addresses */
diff --git a/drivers/comedi/drivers/das6402.c b/drivers/comedi/drivers/das6402.c
index 96f4107b8054..1af394591e74 100644
--- a/drivers/comedi/drivers/das6402.c
+++ b/drivers/comedi/drivers/das6402.c
@@ -24,10 +24,8 @@
#include <linux/module.h>
#include <linux/interrupt.h>
-
-#include "../comedidev.h"
-
-#include "comedi_8254.h"
+#include <linux/comedi/comedidev.h>
+#include <linux/comedi/comedi_8254.h>
/*
* Register I/O map
diff --git a/drivers/comedi/drivers/das800.c b/drivers/comedi/drivers/das800.c
index bc08324f422f..4ca33f46eaa7 100644
--- a/drivers/comedi/drivers/das800.c
+++ b/drivers/comedi/drivers/das800.c
@@ -46,10 +46,8 @@
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
-
-#include "../comedidev.h"
-
-#include "comedi_8254.h"
+#include <linux/comedi/comedidev.h>
+#include <linux/comedi/comedi_8254.h>
#define N_CHAN_AI 8 /* number of analog input channels */
diff --git a/drivers/comedi/drivers/dmm32at.c b/drivers/comedi/drivers/dmm32at.c
index 56682f01242f..fe023c722aa3 100644
--- a/drivers/comedi/drivers/dmm32at.c
+++ b/drivers/comedi/drivers/dmm32at.c
@@ -29,9 +29,8 @@
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
-#include "../comedidev.h"
-
-#include "8255.h"
+#include <linux/comedi/comedidev.h>
+#include <linux/comedi/comedi_8255.h>
/* Board register addresses */
#define DMM32AT_AI_START_CONV_REG 0x00
diff --git a/drivers/comedi/drivers/dt2801.c b/drivers/comedi/drivers/dt2801.c
index 0d571d817b4e..230d25010f58 100644
--- a/drivers/comedi/drivers/dt2801.c
+++ b/drivers/comedi/drivers/dt2801.c
@@ -31,7 +31,7 @@
*/
#include <linux/module.h>
-#include "../comedidev.h"
+#include <linux/comedi/comedidev.h>
#include <linux/delay.h>
#define DT2801_TIMEOUT 1000
diff --git a/drivers/comedi/drivers/dt2811.c b/drivers/comedi/drivers/dt2811.c
index 0eb5e6ba6916..dbb9f38da289 100644
--- a/drivers/comedi/drivers/dt2811.c
+++ b/drivers/comedi/drivers/dt2811.c
@@ -40,8 +40,7 @@
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
-
-#include "../comedidev.h"
+#include <linux/comedi/comedidev.h>
/*
* Register I/O map
diff --git a/drivers/comedi/drivers/dt2814.c b/drivers/comedi/drivers/dt2814.c
index ed44ce0d151b..c98a5a4a7aec 100644
--- a/drivers/comedi/drivers/dt2814.c
+++ b/drivers/comedi/drivers/dt2814.c
@@ -27,8 +27,7 @@
#include <linux/module.h>
#include <linux/interrupt.h>
-#include "../comedidev.h"
-
+#include <linux/comedi/comedidev.h>
#include <linux/delay.h>
#define DT2814_CSR 0
diff --git a/drivers/comedi/drivers/dt2815.c b/drivers/comedi/drivers/dt2815.c
index 5906f32aa01f..03ba2fd18a21 100644
--- a/drivers/comedi/drivers/dt2815.c
+++ b/drivers/comedi/drivers/dt2815.c
@@ -43,8 +43,7 @@
*/
#include <linux/module.h>
-#include "../comedidev.h"
-
+#include <linux/comedi/comedidev.h>
#include <linux/delay.h>
#define DT2815_DATA 0
diff --git a/drivers/comedi/drivers/dt2817.c b/drivers/comedi/drivers/dt2817.c
index 7c1463e835d3..6738045c7531 100644
--- a/drivers/comedi/drivers/dt2817.c
+++ b/drivers/comedi/drivers/dt2817.c
@@ -25,7 +25,7 @@
*/
#include <linux/module.h>
-#include "../comedidev.h"
+#include <linux/comedi/comedidev.h>
#define DT2817_CR 0
#define DT2817_DATA 1
diff --git a/drivers/comedi/drivers/dt282x.c b/drivers/comedi/drivers/dt282x.c
index 2656b4b0e3d0..4ae80e6c7266 100644
--- a/drivers/comedi/drivers/dt282x.c
+++ b/drivers/comedi/drivers/dt282x.c
@@ -51,10 +51,8 @@
#include <linux/gfp.h>
#include <linux/interrupt.h>
#include <linux/io.h>
-
-#include "../comedidev.h"
-
-#include "comedi_isadma.h"
+#include <linux/comedi/comedidev.h>
+#include <linux/comedi/comedi_isadma.h>
/*
* Register map
diff --git a/drivers/comedi/drivers/dt3000.c b/drivers/comedi/drivers/dt3000.c
index ec27aa4730d4..fc6e9c30e522 100644
--- a/drivers/comedi/drivers/dt3000.c
+++ b/drivers/comedi/drivers/dt3000.c
@@ -43,8 +43,7 @@
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
-
-#include "../comedi_pci.h"
+#include <linux/comedi/comedi_pci.h>
/*
* PCI BAR0 - dual-ported RAM location definitions (dev->mmio)
diff --git a/drivers/comedi/drivers/dt9812.c b/drivers/comedi/drivers/dt9812.c
index 704b04d2980d..b37b9d8eca0d 100644
--- a/drivers/comedi/drivers/dt9812.c
+++ b/drivers/comedi/drivers/dt9812.c
@@ -34,8 +34,7 @@
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
-
-#include "../comedi_usb.h"
+#include <linux/comedi/comedi_usb.h>
#define DT9812_DIAGS_BOARD_INFO_ADDR 0xFBFF
#define DT9812_MAX_WRITE_CMD_PIPE_SIZE 32
diff --git a/drivers/comedi/drivers/dyna_pci10xx.c b/drivers/comedi/drivers/dyna_pci10xx.c
index c224422bb126..407a038fb3e0 100644
--- a/drivers/comedi/drivers/dyna_pci10xx.c
+++ b/drivers/comedi/drivers/dyna_pci10xx.c
@@ -26,8 +26,7 @@
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/mutex.h>
-
-#include "../comedi_pci.h"
+#include <linux/comedi/comedi_pci.h>
#define READ_TIMEOUT 50
diff --git a/drivers/comedi/drivers/fl512.c b/drivers/comedi/drivers/fl512.c
index b715f30659fa..139e801fc358 100644
--- a/drivers/comedi/drivers/fl512.c
+++ b/drivers/comedi/drivers/fl512.c
@@ -21,8 +21,7 @@
*/
#include <linux/module.h>
-#include "../comedidev.h"
-
+#include <linux/comedi/comedidev.h>
#include <linux/delay.h>
/*
diff --git a/drivers/comedi/drivers/gsc_hpdi.c b/drivers/comedi/drivers/gsc_hpdi.c
index e35e4a743714..c09d135df38d 100644
--- a/drivers/comedi/drivers/gsc_hpdi.c
+++ b/drivers/comedi/drivers/gsc_hpdi.c
@@ -34,8 +34,7 @@
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
-
-#include "../comedi_pci.h"
+#include <linux/comedi/comedi_pci.h>
#include "plx9080.h"
diff --git a/drivers/comedi/drivers/icp_multi.c b/drivers/comedi/drivers/icp_multi.c
index 16d2b78de83c..ac4b11dbd741 100644
--- a/drivers/comedi/drivers/icp_multi.c
+++ b/drivers/comedi/drivers/icp_multi.c
@@ -36,8 +36,7 @@
#include <linux/module.h>
#include <linux/delay.h>
-
-#include "../comedi_pci.h"
+#include <linux/comedi/comedi_pci.h>
#define ICP_MULTI_ADC_CSR 0x00 /* R/W: ADC command/status register */
#define ICP_MULTI_ADC_CSR_ST BIT(0) /* Start ADC */
diff --git a/drivers/comedi/drivers/ii_pci20kc.c b/drivers/comedi/drivers/ii_pci20kc.c
index 399255dbe388..4a19bf8462be 100644
--- a/drivers/comedi/drivers/ii_pci20kc.c
+++ b/drivers/comedi/drivers/ii_pci20kc.c
@@ -30,7 +30,7 @@
#include <linux/module.h>
#include <linux/io.h>
-#include "../comedidev.h"
+#include <linux/comedi/comedidev.h>
/*
* Register I/O map
diff --git a/drivers/comedi/drivers/jr3_pci.c b/drivers/comedi/drivers/jr3_pci.c
index f963080dd61f..951c23fa0369 100644
--- a/drivers/comedi/drivers/jr3_pci.c
+++ b/drivers/comedi/drivers/jr3_pci.c
@@ -35,8 +35,7 @@
#include <linux/jiffies.h>
#include <linux/slab.h>
#include <linux/timer.h>
-
-#include "../comedi_pci.h"
+#include <linux/comedi/comedi_pci.h>
#include "jr3_pci.h"
diff --git a/drivers/comedi/drivers/ke_counter.c b/drivers/comedi/drivers/ke_counter.c
index bef1b20c1c8d..b825cf60e1e0 100644
--- a/drivers/comedi/drivers/ke_counter.c
+++ b/drivers/comedi/drivers/ke_counter.c
@@ -19,8 +19,7 @@
*/
#include <linux/module.h>
-
-#include "../comedi_pci.h"
+#include <linux/comedi/comedi_pci.h>
/*
* PCI BAR 0 Register I/O map
diff --git a/drivers/comedi/drivers/me4000.c b/drivers/comedi/drivers/me4000.c
index 0d3d4cafce2e..9aea02b86ed9 100644
--- a/drivers/comedi/drivers/me4000.c
+++ b/drivers/comedi/drivers/me4000.c
@@ -32,10 +32,9 @@
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
+#include <linux/comedi/comedi_pci.h>
+#include <linux/comedi/comedi_8254.h>
-#include "../comedi_pci.h"
-
-#include "comedi_8254.h"
#include "plx9052.h"
#define ME4000_FIRMWARE "me4000_firmware.bin"
diff --git a/drivers/comedi/drivers/me_daq.c b/drivers/comedi/drivers/me_daq.c
index ef18e387471b..076b15097afd 100644
--- a/drivers/comedi/drivers/me_daq.c
+++ b/drivers/comedi/drivers/me_daq.c
@@ -23,8 +23,7 @@
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
-
-#include "../comedi_pci.h"
+#include <linux/comedi/comedi_pci.h>
#include "plx9052.h"
diff --git a/drivers/comedi/drivers/mf6x4.c b/drivers/comedi/drivers/mf6x4.c
index 9da8dd748078..14f1d5e9cd59 100644
--- a/drivers/comedi/drivers/mf6x4.c
+++ b/drivers/comedi/drivers/mf6x4.c
@@ -18,8 +18,7 @@
#include <linux/module.h>
#include <linux/delay.h>
-
-#include "../comedi_pci.h"
+#include <linux/comedi/comedi_pci.h>
/* Registers present in BAR0 memory region */
#define MF624_GPIOC_REG 0x54
diff --git a/drivers/comedi/drivers/mite.c b/drivers/comedi/drivers/mite.c
index 70960e3ba878..88f3cd6f54f1 100644
--- a/drivers/comedi/drivers/mite.c
+++ b/drivers/comedi/drivers/mite.c
@@ -38,8 +38,7 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/log2.h>
-
-#include "../comedi_pci.h"
+#include <linux/comedi/comedi_pci.h>
#include "mite.h"
diff --git a/drivers/comedi/drivers/mpc624.c b/drivers/comedi/drivers/mpc624.c
index 646f4c086204..9e51ff528ed1 100644
--- a/drivers/comedi/drivers/mpc624.c
+++ b/drivers/comedi/drivers/mpc624.c
@@ -44,8 +44,7 @@
*/
#include <linux/module.h>
-#include "../comedidev.h"
-
+#include <linux/comedi/comedidev.h>
#include <linux/delay.h>
/* Offsets of different ports */
diff --git a/drivers/comedi/drivers/multiq3.c b/drivers/comedi/drivers/multiq3.c
index c1897aee9a9a..07ff5383da99 100644
--- a/drivers/comedi/drivers/multiq3.c
+++ b/drivers/comedi/drivers/multiq3.c
@@ -26,8 +26,7 @@
*/
#include <linux/module.h>
-
-#include "../comedidev.h"
+#include <linux/comedi/comedidev.h>
/*
* Register map
diff --git a/drivers/comedi/drivers/ni_6527.c b/drivers/comedi/drivers/ni_6527.c
index f1a45cf7342a..ac5820085231 100644
--- a/drivers/comedi/drivers/ni_6527.c
+++ b/drivers/comedi/drivers/ni_6527.c
@@ -20,8 +20,7 @@
#include <linux/module.h>
#include <linux/interrupt.h>
-
-#include "../comedi_pci.h"
+#include <linux/comedi/comedi_pci.h>
/*
* PCI BAR1 - Register memory map
diff --git a/drivers/comedi/drivers/ni_65xx.c b/drivers/comedi/drivers/ni_65xx.c
index 7cd8497420f2..58334de3b253 100644
--- a/drivers/comedi/drivers/ni_65xx.c
+++ b/drivers/comedi/drivers/ni_65xx.c
@@ -49,8 +49,7 @@
#include <linux/module.h>
#include <linux/interrupt.h>
-
-#include "../comedi_pci.h"
+#include <linux/comedi/comedi_pci.h>
/*
* PCI BAR1 Register Map
diff --git a/drivers/comedi/drivers/ni_660x.c b/drivers/comedi/drivers/ni_660x.c
index e60d0125bcb2..0679bc39e0bc 100644
--- a/drivers/comedi/drivers/ni_660x.c
+++ b/drivers/comedi/drivers/ni_660x.c
@@ -26,8 +26,7 @@
#include <linux/module.h>
#include <linux/interrupt.h>
-
-#include "../comedi_pci.h"
+#include <linux/comedi/comedi_pci.h>
#include "mite.h"
#include "ni_tio.h"
diff --git a/drivers/comedi/drivers/ni_670x.c b/drivers/comedi/drivers/ni_670x.c
index c197e47486be..c875d251c230 100644
--- a/drivers/comedi/drivers/ni_670x.c
+++ b/drivers/comedi/drivers/ni_670x.c
@@ -24,8 +24,7 @@
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
-
-#include "../comedi_pci.h"
+#include <linux/comedi/comedi_pci.h>
#define AO_VALUE_OFFSET 0x00
#define AO_CHAN_OFFSET 0x0c
diff --git a/drivers/comedi/drivers/ni_at_a2150.c b/drivers/comedi/drivers/ni_at_a2150.c
index 10ad7b88713e..df8d219e6723 100644
--- a/drivers/comedi/drivers/ni_at_a2150.c
+++ b/drivers/comedi/drivers/ni_at_a2150.c
@@ -39,11 +39,9 @@
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/io.h>
-
-#include "../comedidev.h"
-
-#include "comedi_isadma.h"
-#include "comedi_8254.h"
+#include <linux/comedi/comedidev.h>
+#include <linux/comedi/comedi_8254.h>
+#include <linux/comedi/comedi_isadma.h>
#define A2150_DMA_BUFFER_SIZE 0xff00 /* size in bytes of dma buffer */
diff --git a/drivers/comedi/drivers/ni_at_ao.c b/drivers/comedi/drivers/ni_at_ao.c
index 2a0fb4d460db..9f3147b72aa8 100644
--- a/drivers/comedi/drivers/ni_at_ao.c
+++ b/drivers/comedi/drivers/ni_at_ao.c
@@ -25,10 +25,8 @@
*/
#include <linux/module.h>
-
-#include "../comedidev.h"
-
-#include "comedi_8254.h"
+#include <linux/comedi/comedidev.h>
+#include <linux/comedi/comedi_8254.h>
/*
* Register map
diff --git a/drivers/comedi/drivers/ni_atmio.c b/drivers/comedi/drivers/ni_atmio.c
index 56c78da475e7..8876a1d24c56 100644
--- a/drivers/comedi/drivers/ni_atmio.c
+++ b/drivers/comedi/drivers/ni_atmio.c
@@ -73,12 +73,11 @@
#include <linux/module.h>
#include <linux/interrupt.h>
-#include "../comedidev.h"
-
+#include <linux/comedi/comedidev.h>
#include <linux/isapnp.h>
+#include <linux/comedi/comedi_8255.h>
#include "ni_stc.h"
-#include "8255.h"
/* AT specific setup */
static const struct ni_board_struct ni_boards[] = {
diff --git a/drivers/comedi/drivers/ni_atmio16d.c b/drivers/comedi/drivers/ni_atmio16d.c
index dffce1aa3e69..9fa902529a8e 100644
--- a/drivers/comedi/drivers/ni_atmio16d.c
+++ b/drivers/comedi/drivers/ni_atmio16d.c
@@ -39,9 +39,8 @@
#include <linux/module.h>
#include <linux/interrupt.h>
-#include "../comedidev.h"
-
-#include "8255.h"
+#include <linux/comedi/comedidev.h>
+#include <linux/comedi/comedi_8255.h>
/* Configuration and Status Registers */
#define COM_REG_1 0x00 /* wo 16 */
diff --git a/drivers/comedi/drivers/ni_daq_700.c b/drivers/comedi/drivers/ni_daq_700.c
index d40fc89f9cef..0ef20e9a8bc4 100644
--- a/drivers/comedi/drivers/ni_daq_700.c
+++ b/drivers/comedi/drivers/ni_daq_700.c
@@ -41,8 +41,7 @@
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
-
-#include "../comedi_pcmcia.h"
+#include <linux/comedi/comedi_pcmcia.h>
/* daqcard700 registers */
#define DIO_W 0x04 /* WO 8bit */
diff --git a/drivers/comedi/drivers/ni_daq_dio24.c b/drivers/comedi/drivers/ni_daq_dio24.c
index 44fb65afc218..487733111023 100644
--- a/drivers/comedi/drivers/ni_daq_dio24.c
+++ b/drivers/comedi/drivers/ni_daq_dio24.c
@@ -23,9 +23,8 @@
*/
#include <linux/module.h>
-#include "../comedi_pcmcia.h"
-
-#include "8255.h"
+#include <linux/comedi/comedi_pcmcia.h>
+#include <linux/comedi/comedi_8255.h>
static int dio24_auto_attach(struct comedi_device *dev,
unsigned long context)
diff --git a/drivers/comedi/drivers/ni_labpc.c b/drivers/comedi/drivers/ni_labpc.c
index 1f4a07bd1d26..b25a8e117072 100644
--- a/drivers/comedi/drivers/ni_labpc.c
+++ b/drivers/comedi/drivers/ni_labpc.c
@@ -48,8 +48,7 @@
*/
#include <linux/module.h>
-
-#include "../comedidev.h"
+#include <linux/comedi/comedidev.h>
#include "ni_labpc.h"
#include "ni_labpc_isadma.h"
diff --git a/drivers/comedi/drivers/ni_labpc_common.c b/drivers/comedi/drivers/ni_labpc_common.c
index dd97946eacaf..763249653228 100644
--- a/drivers/comedi/drivers/ni_labpc_common.c
+++ b/drivers/comedi/drivers/ni_labpc_common.c
@@ -12,11 +12,10 @@
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/slab.h>
+#include <linux/comedi/comedidev.h>
+#include <linux/comedi/comedi_8255.h>
+#include <linux/comedi/comedi_8254.h>
-#include "../comedidev.h"
-
-#include "comedi_8254.h"
-#include "8255.h"
#include "ni_labpc.h"
#include "ni_labpc_regs.h"
#include "ni_labpc_isadma.h"
diff --git a/drivers/comedi/drivers/ni_labpc_cs.c b/drivers/comedi/drivers/ni_labpc_cs.c
index 4f7e2fe21254..62fecb50ec6e 100644
--- a/drivers/comedi/drivers/ni_labpc_cs.c
+++ b/drivers/comedi/drivers/ni_labpc_cs.c
@@ -38,8 +38,7 @@
*/
#include <linux/module.h>
-
-#include "../comedi_pcmcia.h"
+#include <linux/comedi/comedi_pcmcia.h>
#include "ni_labpc.h"
diff --git a/drivers/comedi/drivers/ni_labpc_isadma.c b/drivers/comedi/drivers/ni_labpc_isadma.c
index a551aca6e615..0652ca8345b6 100644
--- a/drivers/comedi/drivers/ni_labpc_isadma.c
+++ b/drivers/comedi/drivers/ni_labpc_isadma.c
@@ -10,10 +10,9 @@
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/comedi/comedidev.h>
+#include <linux/comedi/comedi_isadma.h>
-#include "../comedidev.h"
-
-#include "comedi_isadma.h"
#include "ni_labpc.h"
#include "ni_labpc_regs.h"
#include "ni_labpc_isadma.h"
diff --git a/drivers/comedi/drivers/ni_labpc_pci.c b/drivers/comedi/drivers/ni_labpc_pci.c
index ec180b0fedf7..e2a44bbd9fa6 100644
--- a/drivers/comedi/drivers/ni_labpc_pci.c
+++ b/drivers/comedi/drivers/ni_labpc_pci.c
@@ -22,8 +22,7 @@
#include <linux/module.h>
#include <linux/interrupt.h>
-
-#include "../comedi_pci.h"
+#include <linux/comedi/comedi_pci.h>
#include "ni_labpc.h"
diff --git a/drivers/comedi/drivers/ni_mio_common.c b/drivers/comedi/drivers/ni_mio_common.c
index 4f80a4991f95..d39998565808 100644
--- a/drivers/comedi/drivers/ni_mio_common.c
+++ b/drivers/comedi/drivers/ni_mio_common.c
@@ -43,7 +43,7 @@
#include <linux/interrupt.h>
#include <linux/sched.h>
#include <linux/delay.h>
-#include "8255.h"
+#include <linux/comedi/comedi_8255.h>
#include "mite.h"
/* A timeout count */
diff --git a/drivers/comedi/drivers/ni_mio_cs.c b/drivers/comedi/drivers/ni_mio_cs.c
index 4f37b4e58f09..796f0b743772 100644
--- a/drivers/comedi/drivers/ni_mio_cs.c
+++ b/drivers/comedi/drivers/ni_mio_cs.c
@@ -28,10 +28,10 @@
#include <linux/module.h>
#include <linux/delay.h>
+#include <linux/comedi/comedi_pcmcia.h>
+#include <linux/comedi/comedi_8255.h>
-#include "../comedi_pcmcia.h"
#include "ni_stc.h"
-#include "8255.h"
/*
* AT specific setup
diff --git a/drivers/comedi/drivers/ni_pcidio.c b/drivers/comedi/drivers/ni_pcidio.c
index 623f8d08d13a..2d58e83420e8 100644
--- a/drivers/comedi/drivers/ni_pcidio.c
+++ b/drivers/comedi/drivers/ni_pcidio.c
@@ -42,8 +42,7 @@
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
-
-#include "../comedi_pci.h"
+#include <linux/comedi/comedi_pci.h>
#include "mite.h"
diff --git a/drivers/comedi/drivers/ni_pcimio.c b/drivers/comedi/drivers/ni_pcimio.c
index 6c813a490ba5..0b055321023d 100644
--- a/drivers/comedi/drivers/ni_pcimio.c
+++ b/drivers/comedi/drivers/ni_pcimio.c
@@ -94,9 +94,7 @@
#include <linux/module.h>
#include <linux/delay.h>
-
-#include "../comedi_pci.h"
-
+#include <linux/comedi/comedi_pci.h>
#include <asm/byteorder.h>
#include "ni_stc.h"
diff --git a/drivers/comedi/drivers/ni_routes.c b/drivers/comedi/drivers/ni_routes.c
index f0f8cd424b30..f24eeb464eba 100644
--- a/drivers/comedi/drivers/ni_routes.c
+++ b/drivers/comedi/drivers/ni_routes.c
@@ -21,8 +21,7 @@
#include <linux/slab.h>
#include <linux/bsearch.h>
#include <linux/sort.h>
-
-#include "../comedi.h"
+#include <linux/comedi.h>
#include "ni_routes.h"
#include "ni_routing/ni_route_values.h"
diff --git a/drivers/comedi/drivers/ni_routes.h b/drivers/comedi/drivers/ni_routes.h
index 036982315584..cff8a463a03f 100644
--- a/drivers/comedi/drivers/ni_routes.h
+++ b/drivers/comedi/drivers/ni_routes.h
@@ -27,7 +27,7 @@
#include <linux/bitops.h>
#endif
-#include "../comedi.h"
+#include <linux/comedi.h>
/**
* struct ni_route_set - Set of destinations with a common source.
diff --git a/drivers/comedi/drivers/ni_routing/ni_route_values.h b/drivers/comedi/drivers/ni_routing/ni_route_values.h
index 6e358efa6f7f..80880083ea41 100644
--- a/drivers/comedi/drivers/ni_routing/ni_route_values.h
+++ b/drivers/comedi/drivers/ni_routing/ni_route_values.h
@@ -20,7 +20,7 @@
#ifndef _COMEDI_DRIVERS_NI_ROUTINT_NI_ROUTE_VALUES_H
#define _COMEDI_DRIVERS_NI_ROUTINT_NI_ROUTE_VALUES_H
-#include "../../comedi.h"
+#include <linux/comedi.h>
#include <linux/types.h>
/*
diff --git a/drivers/comedi/drivers/ni_routing/tools/.gitignore b/drivers/comedi/drivers/ni_routing/tools/.gitignore
index e3ebffcd900e..c12f825db266 100644
--- a/drivers/comedi/drivers/ni_routing/tools/.gitignore
+++ b/drivers/comedi/drivers/ni_routing/tools/.gitignore
@@ -5,4 +5,5 @@ ni_values.py
convert_c_to_py
c/
csv/
+linux/
all_cfiles.c
diff --git a/drivers/comedi/drivers/ni_routing/tools/Makefile b/drivers/comedi/drivers/ni_routing/tools/Makefile
index 6e92a06a44cb..31212101b3bc 100644
--- a/drivers/comedi/drivers/ni_routing/tools/Makefile
+++ b/drivers/comedi/drivers/ni_routing/tools/Makefile
@@ -3,7 +3,7 @@
# ni_route_values.h
# ni_device_routes.h
# in order to do this, we are also generating a python representation (using
-# ctypesgen) of ../../comedi.h.
+# ctypesgen) of ../../../../../include/uapi/linux/comedi.h.
# This allows us to sort NI signal/terminal names numerically to use a binary
# search through the device_routes tables to find valid routes.
@@ -30,13 +30,21 @@ ALL:
everything : csv-files c-files csv-blank
-CPPFLAGS=-D"BIT(x)=(1UL<<(x))" -D__user=
+CPPFLAGS = -D__user=
+INC_UAPI = ../../../../../include/uapi
-comedi_h.py : ../../../comedi.h
+comedi_h.py: $(INC_UAPI)/linux/comedi.h
ctypesgen $< --include "sys/ioctl.h" --cpp 'gcc -E $(CPPFLAGS)' -o $@
-convert_c_to_py: all_cfiles.c
- gcc -g convert_c_to_py.c -o convert_c_to_py -std=c99
+convert_c_to_py: all_cfiles.c linux/comedi.h
+ gcc -g -I. convert_c_to_py.c -o convert_c_to_py -std=c99
+
+# Create a local 'linux/comedi.h' for use when compiling 'convert_c_to_py.c'
+# with the '-I.' option. (Cannot specify '-I../../../../../include/uapi'
+# because that interferes with inclusion of other system headers.)
+linux/comedi.h: $(INC_UAPI)/linux/comedi.h
+ mkdir -p linux
+ ln -snf ../$< $@
ni_values.py: convert_c_to_py
./convert_c_to_py
@@ -44,7 +52,7 @@ ni_values.py: convert_c_to_py
csv-files : ni_values.py comedi_h.py
./convert_py_to_csv.py
-csv-blank :
+csv-blank : comedi_h.py
./make_blank_csv.py
@echo New blank csv signal table in csv/blank_route_table.csv
@@ -62,17 +70,16 @@ clean-partial :
$(RM) -rf comedi_h.py ni_values.py convert_c_to_py all_cfiles.c *.pyc \
__pycache__/
-clean : partial_clean
- $(RM) -rf c/ csv/
+clean : clean-partial
+ $(RM) -rf c/ csv/ linux/
# Note: One could also use ctypeslib in order to generate these files. The
# caveat is that ctypeslib does not do a great job at handling macro functions.
# The make rules are as follows:
-# comedi.h.xml : ../../comedi.h
+# comedi.h.xml : $(INC_UAPI)/linux/comedi.h
# # note that we have to use PWD here to avoid h2xml finding a system
# # installed version of the comedilib/comedi.h file
-# h2xml ${PWD}/../../comedi.h -c -D__user="" -D"BIT(x)=(1<<(x))" \
-# -o comedi.h.xml
+# h2xml ${PWD}/$(INC_UAPI)/linux/comedi.h -c D__user="" -o comedi.h.xml
#
# comedi_h.py : comedi.h.xml
# xml2py ./comedi.h.xml -o comedi_h.py
diff --git a/drivers/comedi/drivers/ni_tio.h b/drivers/comedi/drivers/ni_tio.h
index e7b05718df9b..9ae2221c3c18 100644
--- a/drivers/comedi/drivers/ni_tio.h
+++ b/drivers/comedi/drivers/ni_tio.h
@@ -8,7 +8,7 @@
#ifndef _COMEDI_NI_TIO_H
#define _COMEDI_NI_TIO_H
-#include "../comedidev.h"
+#include <linux/comedi/comedidev.h>
enum ni_gpct_register {
NITIO_G0_AUTO_INC,
diff --git a/drivers/comedi/drivers/ni_usb6501.c b/drivers/comedi/drivers/ni_usb6501.c
index c42987b74b1d..0dd9edf7bced 100644
--- a/drivers/comedi/drivers/ni_usb6501.c
+++ b/drivers/comedi/drivers/ni_usb6501.c
@@ -87,8 +87,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
-
-#include "../comedi_usb.h"
+#include <linux/comedi/comedi_usb.h>
#define NI6501_TIMEOUT 1000
diff --git a/drivers/comedi/drivers/pcl711.c b/drivers/comedi/drivers/pcl711.c
index bd6f42fe9e3c..05172c553c8a 100644
--- a/drivers/comedi/drivers/pcl711.c
+++ b/drivers/comedi/drivers/pcl711.c
@@ -29,10 +29,8 @@
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
-
-#include "../comedidev.h"
-
-#include "comedi_8254.h"
+#include <linux/comedi/comedidev.h>
+#include <linux/comedi/comedi_8254.h>
/*
* I/O port register map
diff --git a/drivers/comedi/drivers/pcl724.c b/drivers/comedi/drivers/pcl724.c
index 1a5799278a7a..948a0576c9ef 100644
--- a/drivers/comedi/drivers/pcl724.c
+++ b/drivers/comedi/drivers/pcl724.c
@@ -25,9 +25,8 @@
*/
#include <linux/module.h>
-#include "../comedidev.h"
-
-#include "8255.h"
+#include <linux/comedi/comedidev.h>
+#include <linux/comedi/comedi_8255.h>
struct pcl724_board {
const char *name;
diff --git a/drivers/comedi/drivers/pcl726.c b/drivers/comedi/drivers/pcl726.c
index 88f25d7e76f7..0430630e6ebb 100644
--- a/drivers/comedi/drivers/pcl726.c
+++ b/drivers/comedi/drivers/pcl726.c
@@ -50,8 +50,7 @@
#include <linux/module.h>
#include <linux/interrupt.h>
-
-#include "../comedidev.h"
+#include <linux/comedi/comedidev.h>
#define PCL726_AO_MSB_REG(x) (0x00 + ((x) * 2))
#define PCL726_AO_LSB_REG(x) (0x01 + ((x) * 2))
diff --git a/drivers/comedi/drivers/pcl730.c b/drivers/comedi/drivers/pcl730.c
index 32a29129e6e8..d2733cd5383d 100644
--- a/drivers/comedi/drivers/pcl730.c
+++ b/drivers/comedi/drivers/pcl730.c
@@ -25,7 +25,7 @@
*/
#include <linux/module.h>
-#include "../comedidev.h"
+#include <linux/comedi/comedidev.h>
/*
* Register map
diff --git a/drivers/comedi/drivers/pcl812.c b/drivers/comedi/drivers/pcl812.c
index b87ab3840eee..70dbc129fcf5 100644
--- a/drivers/comedi/drivers/pcl812.c
+++ b/drivers/comedi/drivers/pcl812.c
@@ -114,11 +114,9 @@
#include <linux/gfp.h>
#include <linux/delay.h>
#include <linux/io.h>
-
-#include "../comedidev.h"
-
-#include "comedi_isadma.h"
-#include "comedi_8254.h"
+#include <linux/comedi/comedidev.h>
+#include <linux/comedi/comedi_8254.h>
+#include <linux/comedi/comedi_isadma.h>
/*
* Register I/O map
diff --git a/drivers/comedi/drivers/pcl816.c b/drivers/comedi/drivers/pcl816.c
index c368a337a0ae..a5e5320be648 100644
--- a/drivers/comedi/drivers/pcl816.c
+++ b/drivers/comedi/drivers/pcl816.c
@@ -35,11 +35,9 @@
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/interrupt.h>
-
-#include "../comedidev.h"
-
-#include "comedi_isadma.h"
-#include "comedi_8254.h"
+#include <linux/comedi/comedidev.h>
+#include <linux/comedi/comedi_8254.h>
+#include <linux/comedi/comedi_isadma.h>
/*
* Register I/O map
diff --git a/drivers/comedi/drivers/pcl818.c b/drivers/comedi/drivers/pcl818.c
index f4b4a686c710..29e503de8267 100644
--- a/drivers/comedi/drivers/pcl818.c
+++ b/drivers/comedi/drivers/pcl818.c
@@ -97,11 +97,9 @@
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/interrupt.h>
-
-#include "../comedidev.h"
-
-#include "comedi_isadma.h"
-#include "comedi_8254.h"
+#include <linux/comedi/comedidev.h>
+#include <linux/comedi/comedi_8254.h>
+#include <linux/comedi/comedi_isadma.h>
/*
* Register I/O map
diff --git a/drivers/comedi/drivers/pcm3724.c b/drivers/comedi/drivers/pcm3724.c
index 0cb1ad060402..e4103f9eeced 100644
--- a/drivers/comedi/drivers/pcm3724.c
+++ b/drivers/comedi/drivers/pcm3724.c
@@ -24,9 +24,8 @@
*/
#include <linux/module.h>
-#include "../comedidev.h"
-
-#include "8255.h"
+#include <linux/comedi/comedidev.h>
+#include <linux/comedi/comedi_8255.h>
/*
* Register I/O Map
diff --git a/drivers/comedi/drivers/pcmad.c b/drivers/comedi/drivers/pcmad.c
index eec89a0afb2f..976eda43881b 100644
--- a/drivers/comedi/drivers/pcmad.c
+++ b/drivers/comedi/drivers/pcmad.c
@@ -29,7 +29,7 @@
*/
#include <linux/module.h>
-#include "../comedidev.h"
+#include <linux/comedi/comedidev.h>
#define PCMAD_STATUS 0
#define PCMAD_LSB 1
diff --git a/drivers/comedi/drivers/pcmda12.c b/drivers/comedi/drivers/pcmda12.c
index 14ab1f0d1e9f..611f13bedca0 100644
--- a/drivers/comedi/drivers/pcmda12.c
+++ b/drivers/comedi/drivers/pcmda12.c
@@ -40,7 +40,7 @@
*/
#include <linux/module.h>
-#include "../comedidev.h"
+#include <linux/comedi/comedidev.h>
/* AI range is not configurable, it's set by jumpers on the board */
static const struct comedi_lrange pcmda12_ranges = {
diff --git a/drivers/comedi/drivers/pcmmio.c b/drivers/comedi/drivers/pcmmio.c
index 24a9568d3378..c2402239d551 100644
--- a/drivers/comedi/drivers/pcmmio.c
+++ b/drivers/comedi/drivers/pcmmio.c
@@ -66,8 +66,7 @@
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
-
-#include "../comedidev.h"
+#include <linux/comedi/comedidev.h>
/*
* Register I/O map
diff --git a/drivers/comedi/drivers/pcmuio.c b/drivers/comedi/drivers/pcmuio.c
index b299d648a0eb..33b24dbbb919 100644
--- a/drivers/comedi/drivers/pcmuio.c
+++ b/drivers/comedi/drivers/pcmuio.c
@@ -65,8 +65,7 @@
#include <linux/module.h>
#include <linux/interrupt.h>
-
-#include "../comedidev.h"
+#include <linux/comedi/comedidev.h>
/*
* Register I/O map
diff --git a/drivers/comedi/drivers/quatech_daqp_cs.c b/drivers/comedi/drivers/quatech_daqp_cs.c
index fe4408ebf6b3..2a76c75c513b 100644
--- a/drivers/comedi/drivers/quatech_daqp_cs.c
+++ b/drivers/comedi/drivers/quatech_daqp_cs.c
@@ -41,8 +41,7 @@
*/
#include <linux/module.h>
-
-#include "../comedi_pcmcia.h"
+#include <linux/comedi/comedi_pcmcia.h>
/*
* Register I/O map
diff --git a/drivers/comedi/drivers/rtd520.c b/drivers/comedi/drivers/rtd520.c
index 2d99a648b054..7e0ec1a2a2ca 100644
--- a/drivers/comedi/drivers/rtd520.c
+++ b/drivers/comedi/drivers/rtd520.c
@@ -85,10 +85,9 @@
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
+#include <linux/comedi/comedi_pci.h>
+#include <linux/comedi/comedi_8254.h>
-#include "../comedi_pci.h"
-
-#include "comedi_8254.h"
#include "plx9080.h"
/*
diff --git a/drivers/comedi/drivers/rti800.c b/drivers/comedi/drivers/rti800.c
index 327fd93b8b12..1b02e47bdb4c 100644
--- a/drivers/comedi/drivers/rti800.c
+++ b/drivers/comedi/drivers/rti800.c
@@ -42,7 +42,7 @@
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
-#include "../comedidev.h"
+#include <linux/comedi/comedidev.h>
/*
* Register map
diff --git a/drivers/comedi/drivers/rti802.c b/drivers/comedi/drivers/rti802.c
index 195e2b1ac4c1..d66762a22258 100644
--- a/drivers/comedi/drivers/rti802.c
+++ b/drivers/comedi/drivers/rti802.c
@@ -22,7 +22,7 @@
*/
#include <linux/module.h>
-#include "../comedidev.h"
+#include <linux/comedi/comedidev.h>
/*
* Register I/O map
diff --git a/drivers/comedi/drivers/s526.c b/drivers/comedi/drivers/s526.c
index 085cf5b449e5..9245c679a3c4 100644
--- a/drivers/comedi/drivers/s526.c
+++ b/drivers/comedi/drivers/s526.c
@@ -27,7 +27,7 @@
*/
#include <linux/module.h>
-#include "../comedidev.h"
+#include <linux/comedi/comedidev.h>
/*
* Register I/O map
diff --git a/drivers/comedi/drivers/s626.c b/drivers/comedi/drivers/s626.c
index e7aba937d896..0e5f9a9a7fd3 100644
--- a/drivers/comedi/drivers/s626.c
+++ b/drivers/comedi/drivers/s626.c
@@ -55,8 +55,7 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/types.h>
-
-#include "../comedi_pci.h"
+#include <linux/comedi/comedi_pci.h>
#include "s626.h"
diff --git a/drivers/comedi/drivers/ssv_dnp.c b/drivers/comedi/drivers/ssv_dnp.c
index 016d315aa584..813bd0853b0b 100644
--- a/drivers/comedi/drivers/ssv_dnp.c
+++ b/drivers/comedi/drivers/ssv_dnp.c
@@ -19,7 +19,7 @@
/* include files ----------------------------------------------------------- */
#include <linux/module.h>
-#include "../comedidev.h"
+#include <linux/comedi/comedidev.h>
/* Some global definitions: the registers of the DNP ----------------------- */
/* */
diff --git a/drivers/comedi/drivers/usbdux.c b/drivers/comedi/drivers/usbdux.c
index 0350f303d557..92d514b3c1c3 100644
--- a/drivers/comedi/drivers/usbdux.c
+++ b/drivers/comedi/drivers/usbdux.c
@@ -73,8 +73,7 @@
#include <linux/input.h>
#include <linux/fcntl.h>
#include <linux/compiler.h>
-
-#include "../comedi_usb.h"
+#include <linux/comedi/comedi_usb.h>
/* constants for firmware upload and download */
#define USBDUX_FIRMWARE "usbdux_firmware.bin"
diff --git a/drivers/comedi/drivers/usbduxfast.c b/drivers/comedi/drivers/usbduxfast.c
index 4af012968cb6..39faae0ecb19 100644
--- a/drivers/comedi/drivers/usbduxfast.c
+++ b/drivers/comedi/drivers/usbduxfast.c
@@ -40,7 +40,7 @@
#include <linux/input.h>
#include <linux/fcntl.h>
#include <linux/compiler.h>
-#include "../comedi_usb.h"
+#include <linux/comedi/comedi_usb.h>
/*
* timeout for the USB-transfer
diff --git a/drivers/comedi/drivers/usbduxsigma.c b/drivers/comedi/drivers/usbduxsigma.c
index 54d7605e909f..2aaeaf44fbe5 100644
--- a/drivers/comedi/drivers/usbduxsigma.c
+++ b/drivers/comedi/drivers/usbduxsigma.c
@@ -40,8 +40,7 @@
#include <linux/fcntl.h>
#include <linux/compiler.h>
#include <asm/unaligned.h>
-
-#include "../comedi_usb.h"
+#include <linux/comedi/comedi_usb.h>
/* timeout for the USB-transfer in ms*/
#define BULK_TIMEOUT 1000
diff --git a/drivers/comedi/drivers/vmk80xx.c b/drivers/comedi/drivers/vmk80xx.c
index 4b00a9ea611a..46023adc5395 100644
--- a/drivers/comedi/drivers/vmk80xx.c
+++ b/drivers/comedi/drivers/vmk80xx.c
@@ -35,8 +35,7 @@
#include <linux/slab.h>
#include <linux/poll.h>
#include <linux/uaccess.h>
-
-#include "../comedi_usb.h"
+#include <linux/comedi/comedi_usb.h>
enum {
DEVICE_VMK8055,
diff --git a/drivers/comedi/kcomedilib/kcomedilib_main.c b/drivers/comedi/kcomedilib/kcomedilib_main.c
index df9bba1b69ed..43fbe1a63b14 100644
--- a/drivers/comedi/kcomedilib/kcomedilib_main.c
+++ b/drivers/comedi/kcomedilib/kcomedilib_main.c
@@ -16,9 +16,9 @@
#include <linux/mm.h>
#include <linux/io.h>
-#include "../comedi.h"
-#include "../comedilib.h"
-#include "../comedidev.h"
+#include <linux/comedi.h>
+#include <linux/comedi/comedidev.h>
+#include <linux/comedi/comedilib.h>
MODULE_AUTHOR("David Schleef <ds@schleef.org>");
MODULE_DESCRIPTION("Comedi kernel library");
diff --git a/drivers/comedi/proc.c b/drivers/comedi/proc.c
index 8bc8e42beb90..2e4496633d3d 100644
--- a/drivers/comedi/proc.c
+++ b/drivers/comedi/proc.c
@@ -13,7 +13,7 @@
* was cool.
*/
-#include "comedidev.h"
+#include <linux/comedi/comedidev.h>
#include "comedi_internal.h"
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
diff --git a/drivers/comedi/range.c b/drivers/comedi/range.c
index a4e6fe0fb729..8f43cf88d784 100644
--- a/drivers/comedi/range.c
+++ b/drivers/comedi/range.c
@@ -8,7 +8,7 @@
*/
#include <linux/uaccess.h>
-#include "comedidev.h"
+#include <linux/comedi/comedidev.h>
#include "comedi_internal.h"
const struct comedi_lrange range_bipolar10 = { 1, {BIP_RANGE(10)} };
diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c
index 646ad385e490..ccac1c453080 100644
--- a/drivers/connector/cn_proc.c
+++ b/drivers/connector/cn_proc.c
@@ -358,7 +358,7 @@ static void cn_proc_mcast_ctl(struct cn_msg *msg,
* other namespaces.
*/
if ((current_user_ns() != &init_user_ns) ||
- (task_active_pid_ns(current) != &init_pid_ns))
+ !task_is_in_init_pid_ns(current))
return;
/* Can only change if privileged. */
diff --git a/drivers/counter/104-quad-8.c b/drivers/counter/104-quad-8.c
index 1cbd60aaed69..a17e51d65aca 100644
--- a/drivers/counter/104-quad-8.c
+++ b/drivers/counter/104-quad-8.c
@@ -14,6 +14,7 @@
#include <linux/interrupt.h>
#include <linux/isa.h>
#include <linux/kernel.h>
+#include <linux/list.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/types.h>
@@ -44,7 +45,6 @@ MODULE_PARM_DESC(irq, "ACCES 104-QUAD-8 interrupt line numbers");
* @ab_enable: array of A and B inputs enable configurations
* @preset_enable: array of set_to_preset_on_index attribute configurations
* @irq_trigger: array of current IRQ trigger function configurations
- * @next_irq_trigger: array of next IRQ trigger function configurations
* @synchronous_mode: array of index function synchronous mode configurations
* @index_polarity: array of index function polarity configurations
* @cable_fault_enable: differential encoder cable status enable configurations
@@ -52,7 +52,6 @@ MODULE_PARM_DESC(irq, "ACCES 104-QUAD-8 interrupt line numbers");
*/
struct quad8 {
spinlock_t lock;
- struct counter_device counter;
unsigned int fck_prescaler[QUAD8_NUM_COUNTERS];
unsigned int preset[QUAD8_NUM_COUNTERS];
unsigned int count_mode[QUAD8_NUM_COUNTERS];
@@ -61,7 +60,6 @@ struct quad8 {
unsigned int ab_enable[QUAD8_NUM_COUNTERS];
unsigned int preset_enable[QUAD8_NUM_COUNTERS];
unsigned int irq_trigger[QUAD8_NUM_COUNTERS];
- unsigned int next_irq_trigger[QUAD8_NUM_COUNTERS];
unsigned int synchronous_mode[QUAD8_NUM_COUNTERS];
unsigned int index_polarity[QUAD8_NUM_COUNTERS];
unsigned int cable_fault_enable;
@@ -113,7 +111,7 @@ static int quad8_signal_read(struct counter_device *counter,
struct counter_signal *signal,
enum counter_signal_level *level)
{
- const struct quad8 *const priv = counter->priv;
+ const struct quad8 *const priv = counter_priv(counter);
unsigned int state;
/* Only Index signal levels can be read */
@@ -131,7 +129,7 @@ static int quad8_signal_read(struct counter_device *counter,
static int quad8_count_read(struct counter_device *counter,
struct counter_count *count, u64 *val)
{
- struct quad8 *const priv = counter->priv;
+ struct quad8 *const priv = counter_priv(counter);
const int base_offset = priv->base + 2 * count->id;
unsigned int flags;
unsigned int borrow;
@@ -163,7 +161,7 @@ static int quad8_count_read(struct counter_device *counter,
static int quad8_count_write(struct counter_device *counter,
struct counter_count *count, u64 val)
{
- struct quad8 *const priv = counter->priv;
+ struct quad8 *const priv = counter_priv(counter);
const int base_offset = priv->base + 2 * count->id;
unsigned long irqflags;
int i;
@@ -213,7 +211,7 @@ static int quad8_function_read(struct counter_device *counter,
struct counter_count *count,
enum counter_function *function)
{
- struct quad8 *const priv = counter->priv;
+ struct quad8 *const priv = counter_priv(counter);
const int id = count->id;
unsigned long irqflags;
@@ -243,7 +241,7 @@ static int quad8_function_write(struct counter_device *counter,
struct counter_count *count,
enum counter_function function)
{
- struct quad8 *const priv = counter->priv;
+ struct quad8 *const priv = counter_priv(counter);
const int id = count->id;
unsigned int *const quadrature_mode = priv->quadrature_mode + id;
unsigned int *const scale = priv->quadrature_scale + id;
@@ -305,7 +303,7 @@ static int quad8_direction_read(struct counter_device *counter,
struct counter_count *count,
enum counter_count_direction *direction)
{
- const struct quad8 *const priv = counter->priv;
+ const struct quad8 *const priv = counter_priv(counter);
unsigned int ud_flag;
const unsigned int flag_addr = priv->base + 2 * count->id + 1;
@@ -335,7 +333,7 @@ static int quad8_action_read(struct counter_device *counter,
struct counter_synapse *synapse,
enum counter_synapse_action *action)
{
- struct quad8 *const priv = counter->priv;
+ struct quad8 *const priv = counter_priv(counter);
int err;
enum counter_function function;
const size_t signal_a_id = count->synapses[0].signal->id;
@@ -390,7 +388,6 @@ static int quad8_action_read(struct counter_device *counter,
}
enum {
- QUAD8_EVENT_NONE = -1,
QUAD8_EVENT_CARRY = 0,
QUAD8_EVENT_COMPARE = 1,
QUAD8_EVENT_CARRY_BORROW = 2,
@@ -399,37 +396,52 @@ enum {
static int quad8_events_configure(struct counter_device *counter)
{
- struct quad8 *const priv = counter->priv;
+ struct quad8 *const priv = counter_priv(counter);
unsigned long irq_enabled = 0;
unsigned long irqflags;
- size_t channel;
+ struct counter_event_node *event_node;
+ unsigned int next_irq_trigger;
unsigned long ior_cfg;
unsigned long base_offset;
spin_lock_irqsave(&priv->lock, irqflags);
- /* Enable interrupts for the requested channels, disable for the rest */
- for (channel = 0; channel < QUAD8_NUM_COUNTERS; channel++) {
- if (priv->next_irq_trigger[channel] == QUAD8_EVENT_NONE)
+ list_for_each_entry(event_node, &counter->events_list, l) {
+ switch (event_node->event) {
+ case COUNTER_EVENT_OVERFLOW:
+ next_irq_trigger = QUAD8_EVENT_CARRY;
+ break;
+ case COUNTER_EVENT_THRESHOLD:
+ next_irq_trigger = QUAD8_EVENT_COMPARE;
+ break;
+ case COUNTER_EVENT_OVERFLOW_UNDERFLOW:
+ next_irq_trigger = QUAD8_EVENT_CARRY_BORROW;
+ break;
+ case COUNTER_EVENT_INDEX:
+ next_irq_trigger = QUAD8_EVENT_INDEX;
+ break;
+ default:
+ /* should never reach this path */
+ spin_unlock_irqrestore(&priv->lock, irqflags);
+ return -EINVAL;
+ }
+
+ /* Skip configuration if it is the same as previously set */
+ if (priv->irq_trigger[event_node->channel] == next_irq_trigger)
continue;
- if (priv->irq_trigger[channel] != priv->next_irq_trigger[channel]) {
- /* Save new IRQ function configuration */
- priv->irq_trigger[channel] = priv->next_irq_trigger[channel];
+ /* Save new IRQ function configuration */
+ priv->irq_trigger[event_node->channel] = next_irq_trigger;
- /* Load configuration to I/O Control Register */
- ior_cfg = priv->ab_enable[channel] |
- priv->preset_enable[channel] << 1 |
- priv->irq_trigger[channel] << 3;
- base_offset = priv->base + 2 * channel + 1;
- outb(QUAD8_CTR_IOR | ior_cfg, base_offset);
- }
-
- /* Reset next IRQ trigger function configuration */
- priv->next_irq_trigger[channel] = QUAD8_EVENT_NONE;
+ /* Load configuration to I/O Control Register */
+ ior_cfg = priv->ab_enable[event_node->channel] |
+ priv->preset_enable[event_node->channel] << 1 |
+ priv->irq_trigger[event_node->channel] << 3;
+ base_offset = priv->base + 2 * event_node->channel + 1;
+ outb(QUAD8_CTR_IOR | ior_cfg, base_offset);
/* Enable IRQ line */
- irq_enabled |= BIT(channel);
+ irq_enabled |= BIT(event_node->channel);
}
outb(irq_enabled, priv->base + QUAD8_REG_INDEX_INTERRUPT);
@@ -442,35 +454,20 @@ static int quad8_events_configure(struct counter_device *counter)
static int quad8_watch_validate(struct counter_device *counter,
const struct counter_watch *watch)
{
- struct quad8 *const priv = counter->priv;
+ struct counter_event_node *event_node;
if (watch->channel > QUAD8_NUM_COUNTERS - 1)
return -EINVAL;
switch (watch->event) {
case COUNTER_EVENT_OVERFLOW:
- if (priv->next_irq_trigger[watch->channel] == QUAD8_EVENT_NONE)
- priv->next_irq_trigger[watch->channel] = QUAD8_EVENT_CARRY;
- else if (priv->next_irq_trigger[watch->channel] != QUAD8_EVENT_CARRY)
- return -EINVAL;
- return 0;
case COUNTER_EVENT_THRESHOLD:
- if (priv->next_irq_trigger[watch->channel] == QUAD8_EVENT_NONE)
- priv->next_irq_trigger[watch->channel] = QUAD8_EVENT_COMPARE;
- else if (priv->next_irq_trigger[watch->channel] != QUAD8_EVENT_COMPARE)
- return -EINVAL;
- return 0;
case COUNTER_EVENT_OVERFLOW_UNDERFLOW:
- if (priv->next_irq_trigger[watch->channel] == QUAD8_EVENT_NONE)
- priv->next_irq_trigger[watch->channel] = QUAD8_EVENT_CARRY_BORROW;
- else if (priv->next_irq_trigger[watch->channel] != QUAD8_EVENT_CARRY_BORROW)
- return -EINVAL;
- return 0;
case COUNTER_EVENT_INDEX:
- if (priv->next_irq_trigger[watch->channel] == QUAD8_EVENT_NONE)
- priv->next_irq_trigger[watch->channel] = QUAD8_EVENT_INDEX;
- else if (priv->next_irq_trigger[watch->channel] != QUAD8_EVENT_INDEX)
- return -EINVAL;
+ list_for_each_entry(event_node, &counter->next_events_list, l)
+ if (watch->channel == event_node->channel &&
+ watch->event != event_node->event)
+ return -EINVAL;
return 0;
default:
return -EINVAL;
@@ -497,7 +494,7 @@ static int quad8_index_polarity_get(struct counter_device *counter,
struct counter_signal *signal,
u32 *index_polarity)
{
- const struct quad8 *const priv = counter->priv;
+ const struct quad8 *const priv = counter_priv(counter);
const size_t channel_id = signal->id - 16;
*index_polarity = priv->index_polarity[channel_id];
@@ -509,7 +506,7 @@ static int quad8_index_polarity_set(struct counter_device *counter,
struct counter_signal *signal,
u32 index_polarity)
{
- struct quad8 *const priv = counter->priv;
+ struct quad8 *const priv = counter_priv(counter);
const size_t channel_id = signal->id - 16;
const int base_offset = priv->base + 2 * channel_id + 1;
unsigned long irqflags;
@@ -538,7 +535,7 @@ static int quad8_synchronous_mode_get(struct counter_device *counter,
struct counter_signal *signal,
u32 *synchronous_mode)
{
- const struct quad8 *const priv = counter->priv;
+ const struct quad8 *const priv = counter_priv(counter);
const size_t channel_id = signal->id - 16;
*synchronous_mode = priv->synchronous_mode[channel_id];
@@ -550,7 +547,7 @@ static int quad8_synchronous_mode_set(struct counter_device *counter,
struct counter_signal *signal,
u32 synchronous_mode)
{
- struct quad8 *const priv = counter->priv;
+ struct quad8 *const priv = counter_priv(counter);
const size_t channel_id = signal->id - 16;
const int base_offset = priv->base + 2 * channel_id + 1;
unsigned long irqflags;
@@ -589,7 +586,7 @@ static int quad8_count_mode_read(struct counter_device *counter,
struct counter_count *count,
enum counter_count_mode *cnt_mode)
{
- const struct quad8 *const priv = counter->priv;
+ const struct quad8 *const priv = counter_priv(counter);
/* Map 104-QUAD-8 count mode to Generic Counter count mode */
switch (priv->count_mode[count->id]) {
@@ -614,7 +611,7 @@ static int quad8_count_mode_write(struct counter_device *counter,
struct counter_count *count,
enum counter_count_mode cnt_mode)
{
- struct quad8 *const priv = counter->priv;
+ struct quad8 *const priv = counter_priv(counter);
unsigned int count_mode;
unsigned int mode_cfg;
const int base_offset = priv->base + 2 * count->id + 1;
@@ -661,7 +658,7 @@ static int quad8_count_mode_write(struct counter_device *counter,
static int quad8_count_enable_read(struct counter_device *counter,
struct counter_count *count, u8 *enable)
{
- const struct quad8 *const priv = counter->priv;
+ const struct quad8 *const priv = counter_priv(counter);
*enable = priv->ab_enable[count->id];
@@ -671,7 +668,7 @@ static int quad8_count_enable_read(struct counter_device *counter,
static int quad8_count_enable_write(struct counter_device *counter,
struct counter_count *count, u8 enable)
{
- struct quad8 *const priv = counter->priv;
+ struct quad8 *const priv = counter_priv(counter);
const int base_offset = priv->base + 2 * count->id;
unsigned long irqflags;
unsigned int ior_cfg;
@@ -699,7 +696,7 @@ static const char *const quad8_noise_error_states[] = {
static int quad8_error_noise_get(struct counter_device *counter,
struct counter_count *count, u32 *noise_error)
{
- const struct quad8 *const priv = counter->priv;
+ const struct quad8 *const priv = counter_priv(counter);
const int base_offset = priv->base + 2 * count->id + 1;
*noise_error = !!(inb(base_offset) & QUAD8_FLAG_E);
@@ -710,7 +707,7 @@ static int quad8_error_noise_get(struct counter_device *counter,
static int quad8_count_preset_read(struct counter_device *counter,
struct counter_count *count, u64 *preset)
{
- const struct quad8 *const priv = counter->priv;
+ const struct quad8 *const priv = counter_priv(counter);
*preset = priv->preset[count->id];
@@ -736,7 +733,7 @@ static void quad8_preset_register_set(struct quad8 *const priv, const int id,
static int quad8_count_preset_write(struct counter_device *counter,
struct counter_count *count, u64 preset)
{
- struct quad8 *const priv = counter->priv;
+ struct quad8 *const priv = counter_priv(counter);
unsigned long irqflags;
/* Only 24-bit values are supported */
@@ -755,7 +752,7 @@ static int quad8_count_preset_write(struct counter_device *counter,
static int quad8_count_ceiling_read(struct counter_device *counter,
struct counter_count *count, u64 *ceiling)
{
- struct quad8 *const priv = counter->priv;
+ struct quad8 *const priv = counter_priv(counter);
unsigned long irqflags;
spin_lock_irqsave(&priv->lock, irqflags);
@@ -780,7 +777,7 @@ static int quad8_count_ceiling_read(struct counter_device *counter,
static int quad8_count_ceiling_write(struct counter_device *counter,
struct counter_count *count, u64 ceiling)
{
- struct quad8 *const priv = counter->priv;
+ struct quad8 *const priv = counter_priv(counter);
unsigned long irqflags;
/* Only 24-bit values are supported */
@@ -807,7 +804,7 @@ static int quad8_count_preset_enable_read(struct counter_device *counter,
struct counter_count *count,
u8 *preset_enable)
{
- const struct quad8 *const priv = counter->priv;
+ const struct quad8 *const priv = counter_priv(counter);
*preset_enable = !priv->preset_enable[count->id];
@@ -818,7 +815,7 @@ static int quad8_count_preset_enable_write(struct counter_device *counter,
struct counter_count *count,
u8 preset_enable)
{
- struct quad8 *const priv = counter->priv;
+ struct quad8 *const priv = counter_priv(counter);
const int base_offset = priv->base + 2 * count->id + 1;
unsigned long irqflags;
unsigned int ior_cfg;
@@ -845,7 +842,7 @@ static int quad8_signal_cable_fault_read(struct counter_device *counter,
struct counter_signal *signal,
u8 *cable_fault)
{
- struct quad8 *const priv = counter->priv;
+ struct quad8 *const priv = counter_priv(counter);
const size_t channel_id = signal->id / 2;
unsigned long irqflags;
bool disabled;
@@ -875,7 +872,7 @@ static int quad8_signal_cable_fault_enable_read(struct counter_device *counter,
struct counter_signal *signal,
u8 *enable)
{
- const struct quad8 *const priv = counter->priv;
+ const struct quad8 *const priv = counter_priv(counter);
const size_t channel_id = signal->id / 2;
*enable = !!(priv->cable_fault_enable & BIT(channel_id));
@@ -887,7 +884,7 @@ static int quad8_signal_cable_fault_enable_write(struct counter_device *counter,
struct counter_signal *signal,
u8 enable)
{
- struct quad8 *const priv = counter->priv;
+ struct quad8 *const priv = counter_priv(counter);
const size_t channel_id = signal->id / 2;
unsigned long irqflags;
unsigned int cable_fault_enable;
@@ -913,7 +910,7 @@ static int quad8_signal_fck_prescaler_read(struct counter_device *counter,
struct counter_signal *signal,
u8 *prescaler)
{
- const struct quad8 *const priv = counter->priv;
+ const struct quad8 *const priv = counter_priv(counter);
*prescaler = priv->fck_prescaler[signal->id / 2];
@@ -924,7 +921,7 @@ static int quad8_signal_fck_prescaler_write(struct counter_device *counter,
struct counter_signal *signal,
u8 prescaler)
{
- struct quad8 *const priv = counter->priv;
+ struct quad8 *const priv = counter_priv(counter);
const size_t channel_id = signal->id / 2;
const int base_offset = priv->base + 2 * channel_id;
unsigned long irqflags;
@@ -1085,7 +1082,8 @@ static struct counter_count quad8_counts[] = {
static irqreturn_t quad8_irq_handler(int irq, void *private)
{
- struct quad8 *const priv = private;
+ struct counter_device *counter = private;
+ struct quad8 *const priv = counter_priv(counter);
const unsigned long base = priv->base;
unsigned long irq_status;
unsigned long channel;
@@ -1116,7 +1114,7 @@ static irqreturn_t quad8_irq_handler(int irq, void *private)
continue;
}
- counter_push_event(&priv->counter, event, channel);
+ counter_push_event(counter, event, channel);
}
/* Clear pending interrupts on device */
@@ -1127,6 +1125,7 @@ static irqreturn_t quad8_irq_handler(int irq, void *private)
static int quad8_probe(struct device *dev, unsigned int id)
{
+ struct counter_device *counter;
struct quad8 *priv;
int i, j;
unsigned int base_offset;
@@ -1138,19 +1137,19 @@ static int quad8_probe(struct device *dev, unsigned int id)
return -EBUSY;
}
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
+ counter = devm_counter_alloc(dev, sizeof(*priv));
+ if (!counter)
return -ENOMEM;
+ priv = counter_priv(counter);
/* Initialize Counter device and driver data */
- priv->counter.name = dev_name(dev);
- priv->counter.parent = dev;
- priv->counter.ops = &quad8_ops;
- priv->counter.counts = quad8_counts;
- priv->counter.num_counts = ARRAY_SIZE(quad8_counts);
- priv->counter.signals = quad8_signals;
- priv->counter.num_signals = ARRAY_SIZE(quad8_signals);
- priv->counter.priv = priv;
+ counter->name = dev_name(dev);
+ counter->parent = dev;
+ counter->ops = &quad8_ops;
+ counter->counts = quad8_counts;
+ counter->num_counts = ARRAY_SIZE(quad8_counts);
+ counter->signals = quad8_signals;
+ counter->num_signals = ARRAY_SIZE(quad8_signals);
priv->base = base[id];
spin_lock_init(&priv->lock);
@@ -1183,20 +1182,22 @@ static int quad8_probe(struct device *dev, unsigned int id)
outb(QUAD8_CTR_IOR, base_offset + 1);
/* Disable index function; negative index polarity */
outb(QUAD8_CTR_IDR, base_offset + 1);
- /* Initialize next IRQ trigger function configuration */
- priv->next_irq_trigger[i] = QUAD8_EVENT_NONE;
}
/* Disable Differential Encoder Cable Status for all channels */
outb(0xFF, base[id] + QUAD8_DIFF_ENCODER_CABLE_STATUS);
/* Enable all counters and enable interrupt function */
outb(QUAD8_CHAN_OP_ENABLE_INTERRUPT_FUNC, base[id] + QUAD8_REG_CHAN_OP);
- err = devm_request_irq(dev, irq[id], quad8_irq_handler, IRQF_SHARED,
- priv->counter.name, priv);
+ err = devm_request_irq(&counter->dev, irq[id], quad8_irq_handler,
+ IRQF_SHARED, counter->name, counter);
if (err)
return err;
- return devm_counter_register(dev, &priv->counter);
+ err = devm_counter_add(dev, counter);
+ if (err < 0)
+ return dev_err_probe(dev, err, "Failed to add counter\n");
+
+ return 0;
}
static struct isa_driver quad8_driver = {
diff --git a/drivers/counter/counter-core.c b/drivers/counter/counter-core.c
index 5acc54539623..869894b74741 100644
--- a/drivers/counter/counter-core.c
+++ b/drivers/counter/counter-core.c
@@ -15,6 +15,7 @@
#include <linux/kdev_t.h>
#include <linux/module.h>
#include <linux/mutex.h>
+#include <linux/slab.h>
#include <linux/types.h>
#include <linux/wait.h>
@@ -24,12 +25,25 @@
/* Provides a unique ID for each counter device */
static DEFINE_IDA(counter_ida);
+struct counter_device_allochelper {
+ struct counter_device counter;
+
+ /*
+ * This is cache line aligned to ensure private data behaves like if it
+ * were kmalloced separately.
+ */
+ unsigned long privdata[] ____cacheline_aligned;
+};
+
static void counter_device_release(struct device *dev)
{
- struct counter_device *const counter = dev_get_drvdata(dev);
+ struct counter_device *const counter =
+ container_of(dev, struct counter_device, dev);
counter_chrdev_remove(counter);
ida_free(&counter_ida, dev->id);
+
+ kfree(container_of(counter, struct counter_device_allochelper, counter));
}
static struct device_type counter_device_type = {
@@ -45,62 +59,105 @@ static struct bus_type counter_bus_type = {
static dev_t counter_devt;
/**
- * counter_register - register Counter to the system
- * @counter: pointer to Counter to register
+ * counter_priv - access counter device private data
+ * @counter: counter device
*
- * This function registers a Counter to the system. A sysfs "counter" directory
- * will be created and populated with sysfs attributes correlating with the
- * Counter Signals, Synapses, and Counts respectively.
+ * Get the counter device private data
+ */
+void *counter_priv(const struct counter_device *const counter)
+{
+ struct counter_device_allochelper *ch =
+ container_of(counter, struct counter_device_allochelper, counter);
+
+ return &ch->privdata;
+}
+EXPORT_SYMBOL_GPL(counter_priv);
+
+/**
+ * counter_alloc - allocate a counter_device
+ * @sizeof_priv: size of the driver private data
+ *
+ * This is part one of counter registration. The structure is allocated
+ * dynamically to ensure the right lifetime for the embedded struct device.
*
- * RETURNS:
- * 0 on success, negative error number on failure.
+ * If this succeeds, call counter_put() to get rid of the counter_device again.
*/
-int counter_register(struct counter_device *const counter)
+struct counter_device *counter_alloc(size_t sizeof_priv)
{
- struct device *const dev = &counter->dev;
- int id;
+ struct counter_device_allochelper *ch;
+ struct counter_device *counter;
+ struct device *dev;
int err;
+ ch = kzalloc(sizeof(*ch) + sizeof_priv, GFP_KERNEL);
+ if (!ch)
+ return NULL;
+
+ counter = &ch->counter;
+ dev = &counter->dev;
+
/* Acquire unique ID */
- id = ida_alloc(&counter_ida, GFP_KERNEL);
- if (id < 0)
- return id;
+ err = ida_alloc(&counter_ida, GFP_KERNEL);
+ if (err < 0)
+ goto err_ida_alloc;
+ dev->id = err;
mutex_init(&counter->ops_exist_lock);
-
- /* Configure device structure for Counter */
- dev->id = id;
dev->type = &counter_device_type;
dev->bus = &counter_bus_type;
- dev->devt = MKDEV(MAJOR(counter_devt), id);
+ dev->devt = MKDEV(MAJOR(counter_devt), dev->id);
+
+ err = counter_chrdev_add(counter);
+ if (err < 0)
+ goto err_chrdev_add;
+
+ device_initialize(dev);
+
+ return counter;
+
+err_chrdev_add:
+
+ ida_free(&counter_ida, dev->id);
+err_ida_alloc:
+
+ kfree(ch);
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(counter_alloc);
+
+void counter_put(struct counter_device *counter)
+{
+ put_device(&counter->dev);
+}
+EXPORT_SYMBOL_GPL(counter_put);
+
+/**
+ * counter_add - complete registration of a counter
+ * @counter: the counter to add
+ *
+ * This is part two of counter registration.
+ *
+ * If this succeeds, call counter_unregister() to get rid of the counter_device again.
+ */
+int counter_add(struct counter_device *counter)
+{
+ int err;
+ struct device *dev = &counter->dev;
+
if (counter->parent) {
dev->parent = counter->parent;
dev->of_node = counter->parent->of_node;
}
- device_initialize(dev);
- dev_set_drvdata(dev, counter);
err = counter_sysfs_add(counter);
if (err < 0)
- goto err_free_id;
-
- err = counter_chrdev_add(counter);
- if (err < 0)
- goto err_free_id;
-
- err = cdev_device_add(&counter->chrdev, dev);
- if (err < 0)
- goto err_remove_chrdev;
-
- return 0;
+ return err;
-err_remove_chrdev:
- counter_chrdev_remove(counter);
-err_free_id:
- put_device(dev);
- return err;
+ /* implies device_add(dev) */
+ return cdev_device_add(&counter->chrdev, dev);
}
-EXPORT_SYMBOL_GPL(counter_register);
+EXPORT_SYMBOL_GPL(counter_add);
/**
* counter_unregister - unregister Counter from the system
@@ -121,8 +178,6 @@ void counter_unregister(struct counter_device *const counter)
wake_up(&counter->events_wait);
mutex_unlock(&counter->ops_exist_lock);
-
- put_device(&counter->dev);
}
EXPORT_SYMBOL_GPL(counter_unregister);
@@ -131,30 +186,56 @@ static void devm_counter_release(void *counter)
counter_unregister(counter);
}
+static void devm_counter_put(void *counter)
+{
+ counter_put(counter);
+}
+
/**
- * devm_counter_register - Resource-managed counter_register
- * @dev: device to allocate counter_device for
- * @counter: pointer to Counter to register
+ * devm_counter_alloc - allocate a counter_device
+ * @dev: the device to register the release callback for
+ * @sizeof_priv: size of the driver private data
*
- * Managed counter_register. The Counter registered with this function is
- * automatically unregistered on driver detach. This function calls
- * counter_register internally. Refer to that function for more information.
+ * This is the device managed version of counter_add(). It registers a cleanup
+ * callback to care for calling counter_put().
+ */
+struct counter_device *devm_counter_alloc(struct device *dev, size_t sizeof_priv)
+{
+ struct counter_device *counter;
+ int err;
+
+ counter = counter_alloc(sizeof_priv);
+ if (!counter)
+ return NULL;
+
+ err = devm_add_action_or_reset(dev, devm_counter_put, counter);
+ if (err < 0)
+ return NULL;
+
+ return counter;
+}
+EXPORT_SYMBOL_GPL(devm_counter_alloc);
+
+/**
+ * devm_counter_add - complete registration of a counter
+ * @dev: the device to register the release callback for
+ * @counter: the counter to add
*
- * RETURNS:
- * 0 on success, negative error number on failure.
+ * This is the device managed version of counter_add(). It registers a cleanup
+ * callback to care for calling counter_unregister().
*/
-int devm_counter_register(struct device *dev,
- struct counter_device *const counter)
+int devm_counter_add(struct device *dev,
+ struct counter_device *const counter)
{
int err;
- err = counter_register(counter);
+ err = counter_add(counter);
if (err < 0)
return err;
return devm_add_action_or_reset(dev, devm_counter_release, counter);
}
-EXPORT_SYMBOL_GPL(devm_counter_register);
+EXPORT_SYMBOL_GPL(devm_counter_add);
#define COUNTER_DEV_MAX 256
diff --git a/drivers/counter/ftm-quaddec.c b/drivers/counter/ftm-quaddec.c
index 5ef0478709cd..2a58582a9df4 100644
--- a/drivers/counter/ftm-quaddec.c
+++ b/drivers/counter/ftm-quaddec.c
@@ -26,7 +26,6 @@
})
struct ftm_quaddec {
- struct counter_device counter;
struct platform_device *pdev;
void __iomem *ftm_base;
bool big_endian;
@@ -118,7 +117,7 @@ static void ftm_quaddec_disable(void *ftm)
static int ftm_quaddec_get_prescaler(struct counter_device *counter,
struct counter_count *count, u32 *cnt_mode)
{
- struct ftm_quaddec *ftm = counter->priv;
+ struct ftm_quaddec *ftm = counter_priv(counter);
uint32_t scflags;
ftm_read(ftm, FTM_SC, &scflags);
@@ -131,7 +130,7 @@ static int ftm_quaddec_get_prescaler(struct counter_device *counter,
static int ftm_quaddec_set_prescaler(struct counter_device *counter,
struct counter_count *count, u32 cnt_mode)
{
- struct ftm_quaddec *ftm = counter->priv;
+ struct ftm_quaddec *ftm = counter_priv(counter);
mutex_lock(&ftm->ftm_quaddec_mutex);
@@ -162,7 +161,7 @@ static int ftm_quaddec_count_read(struct counter_device *counter,
struct counter_count *count,
u64 *val)
{
- struct ftm_quaddec *const ftm = counter->priv;
+ struct ftm_quaddec *const ftm = counter_priv(counter);
uint32_t cntval;
ftm_read(ftm, FTM_CNT, &cntval);
@@ -176,7 +175,7 @@ static int ftm_quaddec_count_write(struct counter_device *counter,
struct counter_count *count,
const u64 val)
{
- struct ftm_quaddec *const ftm = counter->priv;
+ struct ftm_quaddec *const ftm = counter_priv(counter);
if (val != 0) {
dev_warn(&ftm->pdev->dev, "Can only accept '0' as new counter value\n");
@@ -259,17 +258,17 @@ static struct counter_count ftm_quaddec_counts = {
static int ftm_quaddec_probe(struct platform_device *pdev)
{
+ struct counter_device *counter;
struct ftm_quaddec *ftm;
struct device_node *node = pdev->dev.of_node;
struct resource *io;
int ret;
- ftm = devm_kzalloc(&pdev->dev, sizeof(*ftm), GFP_KERNEL);
- if (!ftm)
+ counter = devm_counter_alloc(&pdev->dev, sizeof(*ftm));
+ if (!counter)
return -ENOMEM;
-
- platform_set_drvdata(pdev, ftm);
+ ftm = counter_priv(counter);
io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!io) {
@@ -285,14 +284,13 @@ static int ftm_quaddec_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "Failed to map memory region\n");
return -EINVAL;
}
- ftm->counter.name = dev_name(&pdev->dev);
- ftm->counter.parent = &pdev->dev;
- ftm->counter.ops = &ftm_quaddec_cnt_ops;
- ftm->counter.counts = &ftm_quaddec_counts;
- ftm->counter.num_counts = 1;
- ftm->counter.signals = ftm_quaddec_signals;
- ftm->counter.num_signals = ARRAY_SIZE(ftm_quaddec_signals);
- ftm->counter.priv = ftm;
+ counter->name = dev_name(&pdev->dev);
+ counter->parent = &pdev->dev;
+ counter->ops = &ftm_quaddec_cnt_ops;
+ counter->counts = &ftm_quaddec_counts;
+ counter->num_counts = 1;
+ counter->signals = ftm_quaddec_signals;
+ counter->num_signals = ARRAY_SIZE(ftm_quaddec_signals);
mutex_init(&ftm->ftm_quaddec_mutex);
@@ -302,9 +300,9 @@ static int ftm_quaddec_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = devm_counter_register(&pdev->dev, &ftm->counter);
+ ret = devm_counter_add(&pdev->dev, counter);
if (ret)
- return ret;
+ return dev_err_probe(&pdev->dev, ret, "Failed to add counter\n");
return 0;
}
diff --git a/drivers/counter/intel-qep.c b/drivers/counter/intel-qep.c
index 0924d16de6e2..47a6a9dfc9e8 100644
--- a/drivers/counter/intel-qep.c
+++ b/drivers/counter/intel-qep.c
@@ -63,7 +63,6 @@
#define INTEL_QEP_CLK_PERIOD_NS 10
struct intel_qep {
- struct counter_device counter;
struct mutex lock;
struct device *dev;
void __iomem *regs;
@@ -109,7 +108,7 @@ static void intel_qep_init(struct intel_qep *qep)
static int intel_qep_count_read(struct counter_device *counter,
struct counter_count *count, u64 *val)
{
- struct intel_qep *const qep = counter->priv;
+ struct intel_qep *const qep = counter_priv(counter);
pm_runtime_get_sync(qep->dev);
*val = intel_qep_readl(qep, INTEL_QEPCOUNT);
@@ -176,7 +175,7 @@ static struct counter_synapse intel_qep_count_synapses[] = {
static int intel_qep_ceiling_read(struct counter_device *counter,
struct counter_count *count, u64 *ceiling)
{
- struct intel_qep *qep = counter->priv;
+ struct intel_qep *qep = counter_priv(counter);
pm_runtime_get_sync(qep->dev);
*ceiling = intel_qep_readl(qep, INTEL_QEPMAX);
@@ -188,7 +187,7 @@ static int intel_qep_ceiling_read(struct counter_device *counter,
static int intel_qep_ceiling_write(struct counter_device *counter,
struct counter_count *count, u64 max)
{
- struct intel_qep *qep = counter->priv;
+ struct intel_qep *qep = counter_priv(counter);
int ret = 0;
/* Intel QEP ceiling configuration only supports 32-bit values */
@@ -213,7 +212,7 @@ out:
static int intel_qep_enable_read(struct counter_device *counter,
struct counter_count *count, u8 *enable)
{
- struct intel_qep *qep = counter->priv;
+ struct intel_qep *qep = counter_priv(counter);
*enable = qep->enabled;
@@ -223,7 +222,7 @@ static int intel_qep_enable_read(struct counter_device *counter,
static int intel_qep_enable_write(struct counter_device *counter,
struct counter_count *count, u8 val)
{
- struct intel_qep *qep = counter->priv;
+ struct intel_qep *qep = counter_priv(counter);
u32 reg;
bool changed;
@@ -256,7 +255,7 @@ static int intel_qep_spike_filter_ns_read(struct counter_device *counter,
struct counter_count *count,
u64 *length)
{
- struct intel_qep *qep = counter->priv;
+ struct intel_qep *qep = counter_priv(counter);
u32 reg;
pm_runtime_get_sync(qep->dev);
@@ -277,7 +276,7 @@ static int intel_qep_spike_filter_ns_write(struct counter_device *counter,
struct counter_count *count,
u64 length)
{
- struct intel_qep *qep = counter->priv;
+ struct intel_qep *qep = counter_priv(counter);
u32 reg;
bool enable;
int ret = 0;
@@ -326,7 +325,7 @@ static int intel_qep_preset_enable_read(struct counter_device *counter,
struct counter_count *count,
u8 *preset_enable)
{
- struct intel_qep *qep = counter->priv;
+ struct intel_qep *qep = counter_priv(counter);
u32 reg;
pm_runtime_get_sync(qep->dev);
@@ -341,7 +340,7 @@ static int intel_qep_preset_enable_read(struct counter_device *counter,
static int intel_qep_preset_enable_write(struct counter_device *counter,
struct counter_count *count, u8 val)
{
- struct intel_qep *qep = counter->priv;
+ struct intel_qep *qep = counter_priv(counter);
u32 reg;
int ret = 0;
@@ -392,14 +391,16 @@ static struct counter_count intel_qep_counter_count[] = {
static int intel_qep_probe(struct pci_dev *pci, const struct pci_device_id *id)
{
+ struct counter_device *counter;
struct intel_qep *qep;
struct device *dev = &pci->dev;
void __iomem *regs;
int ret;
- qep = devm_kzalloc(dev, sizeof(*qep), GFP_KERNEL);
- if (!qep)
+ counter = devm_counter_alloc(dev, sizeof(*qep));
+ if (!counter)
return -ENOMEM;
+ qep = counter_priv(counter);
ret = pcim_enable_device(pci);
if (ret)
@@ -422,20 +423,23 @@ static int intel_qep_probe(struct pci_dev *pci, const struct pci_device_id *id)
intel_qep_init(qep);
pci_set_drvdata(pci, qep);
- qep->counter.name = pci_name(pci);
- qep->counter.parent = dev;
- qep->counter.ops = &intel_qep_counter_ops;
- qep->counter.counts = intel_qep_counter_count;
- qep->counter.num_counts = ARRAY_SIZE(intel_qep_counter_count);
- qep->counter.signals = intel_qep_signals;
- qep->counter.num_signals = ARRAY_SIZE(intel_qep_signals);
- qep->counter.priv = qep;
+ counter->name = pci_name(pci);
+ counter->parent = dev;
+ counter->ops = &intel_qep_counter_ops;
+ counter->counts = intel_qep_counter_count;
+ counter->num_counts = ARRAY_SIZE(intel_qep_counter_count);
+ counter->signals = intel_qep_signals;
+ counter->num_signals = ARRAY_SIZE(intel_qep_signals);
qep->enabled = false;
pm_runtime_put(dev);
pm_runtime_allow(dev);
- return devm_counter_register(&pci->dev, &qep->counter);
+ ret = devm_counter_add(&pci->dev, counter);
+ if (ret < 0)
+ return dev_err_probe(&pci->dev, ret, "Failed to add counter\n");
+
+ return 0;
}
static void intel_qep_remove(struct pci_dev *pci)
diff --git a/drivers/counter/interrupt-cnt.c b/drivers/counter/interrupt-cnt.c
index 8514a87fcbee..9e99702470c2 100644
--- a/drivers/counter/interrupt-cnt.c
+++ b/drivers/counter/interrupt-cnt.c
@@ -16,7 +16,6 @@
struct interrupt_cnt_priv {
atomic_t count;
- struct counter_device counter;
struct gpio_desc *gpio;
int irq;
bool enabled;
@@ -37,7 +36,7 @@ static irqreturn_t interrupt_cnt_isr(int irq, void *dev_id)
static int interrupt_cnt_enable_read(struct counter_device *counter,
struct counter_count *count, u8 *enable)
{
- struct interrupt_cnt_priv *priv = counter->priv;
+ struct interrupt_cnt_priv *priv = counter_priv(counter);
*enable = priv->enabled;
@@ -47,7 +46,7 @@ static int interrupt_cnt_enable_read(struct counter_device *counter,
static int interrupt_cnt_enable_write(struct counter_device *counter,
struct counter_count *count, u8 enable)
{
- struct interrupt_cnt_priv *priv = counter->priv;
+ struct interrupt_cnt_priv *priv = counter_priv(counter);
if (priv->enabled == enable)
return 0;
@@ -85,7 +84,7 @@ static int interrupt_cnt_action_read(struct counter_device *counter,
static int interrupt_cnt_read(struct counter_device *counter,
struct counter_count *count, u64 *val)
{
- struct interrupt_cnt_priv *priv = counter->priv;
+ struct interrupt_cnt_priv *priv = counter_priv(counter);
*val = atomic_read(&priv->count);
@@ -95,7 +94,7 @@ static int interrupt_cnt_read(struct counter_device *counter,
static int interrupt_cnt_write(struct counter_device *counter,
struct counter_count *count, const u64 val)
{
- struct interrupt_cnt_priv *priv = counter->priv;
+ struct interrupt_cnt_priv *priv = counter_priv(counter);
if (val != (typeof(priv->count.counter))val)
return -ERANGE;
@@ -122,7 +121,7 @@ static int interrupt_cnt_signal_read(struct counter_device *counter,
struct counter_signal *signal,
enum counter_signal_level *level)
{
- struct interrupt_cnt_priv *priv = counter->priv;
+ struct interrupt_cnt_priv *priv = counter_priv(counter);
int ret;
if (!priv->gpio)
@@ -148,12 +147,14 @@ static const struct counter_ops interrupt_cnt_ops = {
static int interrupt_cnt_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ struct counter_device *counter;
struct interrupt_cnt_priv *priv;
int ret;
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
+ counter = devm_counter_alloc(dev, sizeof(*priv));
+ if (!counter)
return -ENOMEM;
+ priv = counter_priv(counter);
priv->irq = platform_get_irq_optional(pdev, 0);
if (priv->irq == -ENXIO)
@@ -184,8 +185,8 @@ static int interrupt_cnt_probe(struct platform_device *pdev)
if (!priv->signals.name)
return -ENOMEM;
- priv->counter.signals = &priv->signals;
- priv->counter.num_signals = 1;
+ counter->signals = &priv->signals;
+ counter->num_signals = 1;
priv->synapses.actions_list = interrupt_cnt_synapse_actions;
priv->synapses.num_actions = ARRAY_SIZE(interrupt_cnt_synapse_actions);
@@ -199,12 +200,11 @@ static int interrupt_cnt_probe(struct platform_device *pdev)
priv->cnts.ext = interrupt_cnt_ext;
priv->cnts.num_ext = ARRAY_SIZE(interrupt_cnt_ext);
- priv->counter.priv = priv;
- priv->counter.name = dev_name(dev);
- priv->counter.parent = dev;
- priv->counter.ops = &interrupt_cnt_ops;
- priv->counter.counts = &priv->cnts;
- priv->counter.num_counts = 1;
+ counter->name = dev_name(dev);
+ counter->parent = dev;
+ counter->ops = &interrupt_cnt_ops;
+ counter->counts = &priv->cnts;
+ counter->num_counts = 1;
irq_set_status_flags(priv->irq, IRQ_NOAUTOEN);
ret = devm_request_irq(dev, priv->irq, interrupt_cnt_isr,
@@ -213,7 +213,11 @@ static int interrupt_cnt_probe(struct platform_device *pdev)
if (ret)
return ret;
- return devm_counter_register(dev, &priv->counter);
+ ret = devm_counter_add(dev, counter);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to add counter\n");
+
+ return 0;
}
static const struct of_device_id interrupt_cnt_of_match[] = {
diff --git a/drivers/counter/microchip-tcb-capture.c b/drivers/counter/microchip-tcb-capture.c
index 0ab1b2716784..00844445143b 100644
--- a/drivers/counter/microchip-tcb-capture.c
+++ b/drivers/counter/microchip-tcb-capture.c
@@ -24,7 +24,6 @@
struct mchp_tc_data {
const struct atmel_tcb_config *tc_cfg;
- struct counter_device counter;
struct regmap *regmap;
int qdec_mode;
int num_channels;
@@ -72,7 +71,7 @@ static int mchp_tc_count_function_read(struct counter_device *counter,
struct counter_count *count,
enum counter_function *function)
{
- struct mchp_tc_data *const priv = counter->priv;
+ struct mchp_tc_data *const priv = counter_priv(counter);
if (priv->qdec_mode)
*function = COUNTER_FUNCTION_QUADRATURE_X4;
@@ -86,7 +85,7 @@ static int mchp_tc_count_function_write(struct counter_device *counter,
struct counter_count *count,
enum counter_function function)
{
- struct mchp_tc_data *const priv = counter->priv;
+ struct mchp_tc_data *const priv = counter_priv(counter);
u32 bmr, cmr;
regmap_read(priv->regmap, ATMEL_TC_BMR, &bmr);
@@ -148,7 +147,7 @@ static int mchp_tc_count_signal_read(struct counter_device *counter,
struct counter_signal *signal,
enum counter_signal_level *lvl)
{
- struct mchp_tc_data *const priv = counter->priv;
+ struct mchp_tc_data *const priv = counter_priv(counter);
bool sigstatus;
u32 sr;
@@ -169,7 +168,7 @@ static int mchp_tc_count_action_read(struct counter_device *counter,
struct counter_synapse *synapse,
enum counter_synapse_action *action)
{
- struct mchp_tc_data *const priv = counter->priv;
+ struct mchp_tc_data *const priv = counter_priv(counter);
u32 cmr;
regmap_read(priv->regmap, ATMEL_TC_REG(priv->channel[0], CMR), &cmr);
@@ -197,7 +196,7 @@ static int mchp_tc_count_action_write(struct counter_device *counter,
struct counter_synapse *synapse,
enum counter_synapse_action action)
{
- struct mchp_tc_data *const priv = counter->priv;
+ struct mchp_tc_data *const priv = counter_priv(counter);
u32 edge = ATMEL_TC_ETRGEDG_NONE;
/* QDEC mode is rising edge only */
@@ -230,7 +229,7 @@ static int mchp_tc_count_action_write(struct counter_device *counter,
static int mchp_tc_count_read(struct counter_device *counter,
struct counter_count *count, u64 *val)
{
- struct mchp_tc_data *const priv = counter->priv;
+ struct mchp_tc_data *const priv = counter_priv(counter);
u32 cnt;
regmap_read(priv->regmap, ATMEL_TC_REG(priv->channel[0], CV), &cnt);
@@ -296,6 +295,7 @@ static int mchp_tc_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
const struct atmel_tcb_config *tcb_config;
const struct of_device_id *match;
+ struct counter_device *counter;
struct mchp_tc_data *priv;
char clk_name[7];
struct regmap *regmap;
@@ -303,11 +303,10 @@ static int mchp_tc_probe(struct platform_device *pdev)
int channel;
int ret, i;
- priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
+ counter = devm_counter_alloc(&pdev->dev, sizeof(*priv));
+ if (!counter)
return -ENOMEM;
-
- platform_set_drvdata(pdev, priv);
+ priv = counter_priv(counter);
match = of_match_node(atmel_tc_of_match, np->parent);
tcb_config = match->data;
@@ -362,16 +361,19 @@ static int mchp_tc_probe(struct platform_device *pdev)
priv->tc_cfg = tcb_config;
priv->regmap = regmap;
- priv->counter.name = dev_name(&pdev->dev);
- priv->counter.parent = &pdev->dev;
- priv->counter.ops = &mchp_tc_ops;
- priv->counter.num_counts = ARRAY_SIZE(mchp_tc_counts);
- priv->counter.counts = mchp_tc_counts;
- priv->counter.num_signals = ARRAY_SIZE(mchp_tc_count_signals);
- priv->counter.signals = mchp_tc_count_signals;
- priv->counter.priv = priv;
-
- return devm_counter_register(&pdev->dev, &priv->counter);
+ counter->name = dev_name(&pdev->dev);
+ counter->parent = &pdev->dev;
+ counter->ops = &mchp_tc_ops;
+ counter->num_counts = ARRAY_SIZE(mchp_tc_counts);
+ counter->counts = mchp_tc_counts;
+ counter->num_signals = ARRAY_SIZE(mchp_tc_count_signals);
+ counter->signals = mchp_tc_count_signals;
+
+ ret = devm_counter_add(&pdev->dev, counter);
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret, "Failed to add counter\n");
+
+ return 0;
}
static const struct of_device_id mchp_tc_dt_ids[] = {
diff --git a/drivers/counter/stm32-lptimer-cnt.c b/drivers/counter/stm32-lptimer-cnt.c
index 5168833b1fdf..68031d93ce89 100644
--- a/drivers/counter/stm32-lptimer-cnt.c
+++ b/drivers/counter/stm32-lptimer-cnt.c
@@ -20,7 +20,6 @@
#include <linux/types.h>
struct stm32_lptim_cnt {
- struct counter_device counter;
struct device *dev;
struct regmap *regmap;
struct clk *clk;
@@ -141,7 +140,7 @@ static const enum counter_synapse_action stm32_lptim_cnt_synapse_actions[] = {
static int stm32_lptim_cnt_read(struct counter_device *counter,
struct counter_count *count, u64 *val)
{
- struct stm32_lptim_cnt *const priv = counter->priv;
+ struct stm32_lptim_cnt *const priv = counter_priv(counter);
u32 cnt;
int ret;
@@ -158,7 +157,7 @@ static int stm32_lptim_cnt_function_read(struct counter_device *counter,
struct counter_count *count,
enum counter_function *function)
{
- struct stm32_lptim_cnt *const priv = counter->priv;
+ struct stm32_lptim_cnt *const priv = counter_priv(counter);
if (!priv->quadrature_mode) {
*function = COUNTER_FUNCTION_INCREASE;
@@ -177,7 +176,7 @@ static int stm32_lptim_cnt_function_write(struct counter_device *counter,
struct counter_count *count,
enum counter_function function)
{
- struct stm32_lptim_cnt *const priv = counter->priv;
+ struct stm32_lptim_cnt *const priv = counter_priv(counter);
if (stm32_lptim_is_enabled(priv))
return -EBUSY;
@@ -200,7 +199,7 @@ static int stm32_lptim_cnt_enable_read(struct counter_device *counter,
struct counter_count *count,
u8 *enable)
{
- struct stm32_lptim_cnt *const priv = counter->priv;
+ struct stm32_lptim_cnt *const priv = counter_priv(counter);
int ret;
ret = stm32_lptim_is_enabled(priv);
@@ -216,7 +215,7 @@ static int stm32_lptim_cnt_enable_write(struct counter_device *counter,
struct counter_count *count,
u8 enable)
{
- struct stm32_lptim_cnt *const priv = counter->priv;
+ struct stm32_lptim_cnt *const priv = counter_priv(counter);
int ret;
/* Check nobody uses the timer, or already disabled/enabled */
@@ -241,7 +240,7 @@ static int stm32_lptim_cnt_ceiling_read(struct counter_device *counter,
struct counter_count *count,
u64 *ceiling)
{
- struct stm32_lptim_cnt *const priv = counter->priv;
+ struct stm32_lptim_cnt *const priv = counter_priv(counter);
*ceiling = priv->ceiling;
@@ -252,7 +251,7 @@ static int stm32_lptim_cnt_ceiling_write(struct counter_device *counter,
struct counter_count *count,
u64 ceiling)
{
- struct stm32_lptim_cnt *const priv = counter->priv;
+ struct stm32_lptim_cnt *const priv = counter_priv(counter);
if (stm32_lptim_is_enabled(priv))
return -EBUSY;
@@ -277,7 +276,7 @@ static int stm32_lptim_cnt_action_read(struct counter_device *counter,
struct counter_synapse *synapse,
enum counter_synapse_action *action)
{
- struct stm32_lptim_cnt *const priv = counter->priv;
+ struct stm32_lptim_cnt *const priv = counter_priv(counter);
enum counter_function function;
int err;
@@ -321,7 +320,7 @@ static int stm32_lptim_cnt_action_write(struct counter_device *counter,
struct counter_synapse *synapse,
enum counter_synapse_action action)
{
- struct stm32_lptim_cnt *const priv = counter->priv;
+ struct stm32_lptim_cnt *const priv = counter_priv(counter);
enum counter_function function;
int err;
@@ -411,14 +410,17 @@ static struct counter_count stm32_lptim_in1_counts = {
static int stm32_lptim_cnt_probe(struct platform_device *pdev)
{
struct stm32_lptimer *ddata = dev_get_drvdata(pdev->dev.parent);
+ struct counter_device *counter;
struct stm32_lptim_cnt *priv;
+ int ret;
if (IS_ERR_OR_NULL(ddata))
return -EINVAL;
- priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
+ counter = devm_counter_alloc(&pdev->dev, sizeof(*priv));
+ if (!counter)
return -ENOMEM;
+ priv = counter_priv(counter);
priv->dev = &pdev->dev;
priv->regmap = ddata->regmap;
@@ -426,23 +428,26 @@ static int stm32_lptim_cnt_probe(struct platform_device *pdev)
priv->ceiling = STM32_LPTIM_MAX_ARR;
/* Initialize Counter device */
- priv->counter.name = dev_name(&pdev->dev);
- priv->counter.parent = &pdev->dev;
- priv->counter.ops = &stm32_lptim_cnt_ops;
+ counter->name = dev_name(&pdev->dev);
+ counter->parent = &pdev->dev;
+ counter->ops = &stm32_lptim_cnt_ops;
if (ddata->has_encoder) {
- priv->counter.counts = &stm32_lptim_enc_counts;
- priv->counter.num_signals = ARRAY_SIZE(stm32_lptim_cnt_signals);
+ counter->counts = &stm32_lptim_enc_counts;
+ counter->num_signals = ARRAY_SIZE(stm32_lptim_cnt_signals);
} else {
- priv->counter.counts = &stm32_lptim_in1_counts;
- priv->counter.num_signals = 1;
+ counter->counts = &stm32_lptim_in1_counts;
+ counter->num_signals = 1;
}
- priv->counter.num_counts = 1;
- priv->counter.signals = stm32_lptim_cnt_signals;
- priv->counter.priv = priv;
+ counter->num_counts = 1;
+ counter->signals = stm32_lptim_cnt_signals;
platform_set_drvdata(pdev, priv);
- return devm_counter_register(&pdev->dev, &priv->counter);
+ ret = devm_counter_add(&pdev->dev, counter);
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret, "Failed to add counter\n");
+
+ return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/counter/stm32-timer-cnt.c b/drivers/counter/stm32-timer-cnt.c
index 0546e932db0c..5779ae7c73cf 100644
--- a/drivers/counter/stm32-timer-cnt.c
+++ b/drivers/counter/stm32-timer-cnt.c
@@ -29,7 +29,6 @@ struct stm32_timer_regs {
};
struct stm32_timer_cnt {
- struct counter_device counter;
struct regmap *regmap;
struct clk *clk;
u32 max_arr;
@@ -47,7 +46,7 @@ static const enum counter_function stm32_count_functions[] = {
static int stm32_count_read(struct counter_device *counter,
struct counter_count *count, u64 *val)
{
- struct stm32_timer_cnt *const priv = counter->priv;
+ struct stm32_timer_cnt *const priv = counter_priv(counter);
u32 cnt;
regmap_read(priv->regmap, TIM_CNT, &cnt);
@@ -59,7 +58,7 @@ static int stm32_count_read(struct counter_device *counter,
static int stm32_count_write(struct counter_device *counter,
struct counter_count *count, const u64 val)
{
- struct stm32_timer_cnt *const priv = counter->priv;
+ struct stm32_timer_cnt *const priv = counter_priv(counter);
u32 ceiling;
regmap_read(priv->regmap, TIM_ARR, &ceiling);
@@ -73,7 +72,7 @@ static int stm32_count_function_read(struct counter_device *counter,
struct counter_count *count,
enum counter_function *function)
{
- struct stm32_timer_cnt *const priv = counter->priv;
+ struct stm32_timer_cnt *const priv = counter_priv(counter);
u32 smcr;
regmap_read(priv->regmap, TIM_SMCR, &smcr);
@@ -100,7 +99,7 @@ static int stm32_count_function_write(struct counter_device *counter,
struct counter_count *count,
enum counter_function function)
{
- struct stm32_timer_cnt *const priv = counter->priv;
+ struct stm32_timer_cnt *const priv = counter_priv(counter);
u32 cr1, sms;
switch (function) {
@@ -140,7 +139,7 @@ static int stm32_count_direction_read(struct counter_device *counter,
struct counter_count *count,
enum counter_count_direction *direction)
{
- struct stm32_timer_cnt *const priv = counter->priv;
+ struct stm32_timer_cnt *const priv = counter_priv(counter);
u32 cr1;
regmap_read(priv->regmap, TIM_CR1, &cr1);
@@ -153,7 +152,7 @@ static int stm32_count_direction_read(struct counter_device *counter,
static int stm32_count_ceiling_read(struct counter_device *counter,
struct counter_count *count, u64 *ceiling)
{
- struct stm32_timer_cnt *const priv = counter->priv;
+ struct stm32_timer_cnt *const priv = counter_priv(counter);
u32 arr;
regmap_read(priv->regmap, TIM_ARR, &arr);
@@ -166,7 +165,7 @@ static int stm32_count_ceiling_read(struct counter_device *counter,
static int stm32_count_ceiling_write(struct counter_device *counter,
struct counter_count *count, u64 ceiling)
{
- struct stm32_timer_cnt *const priv = counter->priv;
+ struct stm32_timer_cnt *const priv = counter_priv(counter);
if (ceiling > priv->max_arr)
return -ERANGE;
@@ -181,7 +180,7 @@ static int stm32_count_ceiling_write(struct counter_device *counter,
static int stm32_count_enable_read(struct counter_device *counter,
struct counter_count *count, u8 *enable)
{
- struct stm32_timer_cnt *const priv = counter->priv;
+ struct stm32_timer_cnt *const priv = counter_priv(counter);
u32 cr1;
regmap_read(priv->regmap, TIM_CR1, &cr1);
@@ -194,7 +193,7 @@ static int stm32_count_enable_read(struct counter_device *counter,
static int stm32_count_enable_write(struct counter_device *counter,
struct counter_count *count, u8 enable)
{
- struct stm32_timer_cnt *const priv = counter->priv;
+ struct stm32_timer_cnt *const priv = counter_priv(counter);
u32 cr1;
if (enable) {
@@ -317,31 +316,38 @@ static int stm32_timer_cnt_probe(struct platform_device *pdev)
struct stm32_timers *ddata = dev_get_drvdata(pdev->dev.parent);
struct device *dev = &pdev->dev;
struct stm32_timer_cnt *priv;
+ struct counter_device *counter;
+ int ret;
if (IS_ERR_OR_NULL(ddata))
return -EINVAL;
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
+ counter = devm_counter_alloc(dev, sizeof(*priv));
+ if (!counter)
return -ENOMEM;
+ priv = counter_priv(counter);
+
priv->regmap = ddata->regmap;
priv->clk = ddata->clk;
priv->max_arr = ddata->max_arr;
- priv->counter.name = dev_name(dev);
- priv->counter.parent = dev;
- priv->counter.ops = &stm32_timer_cnt_ops;
- priv->counter.counts = &stm32_counts;
- priv->counter.num_counts = 1;
- priv->counter.signals = stm32_signals;
- priv->counter.num_signals = ARRAY_SIZE(stm32_signals);
- priv->counter.priv = priv;
+ counter->name = dev_name(dev);
+ counter->parent = dev;
+ counter->ops = &stm32_timer_cnt_ops;
+ counter->counts = &stm32_counts;
+ counter->num_counts = 1;
+ counter->signals = stm32_signals;
+ counter->num_signals = ARRAY_SIZE(stm32_signals);
platform_set_drvdata(pdev, priv);
/* Register Counter device */
- return devm_counter_register(dev, &priv->counter);
+ ret = devm_counter_add(dev, counter);
+ if (ret < 0)
+ dev_err_probe(dev, ret, "Failed to add counter\n");
+
+ return ret;
}
static int __maybe_unused stm32_timer_cnt_suspend(struct device *dev)
diff --git a/drivers/counter/ti-eqep.c b/drivers/counter/ti-eqep.c
index 09817c953f9a..0489d26eb47c 100644
--- a/drivers/counter/ti-eqep.c
+++ b/drivers/counter/ti-eqep.c
@@ -87,10 +87,15 @@ struct ti_eqep_cnt {
struct regmap *regmap16;
};
+static struct ti_eqep_cnt *ti_eqep_count_from_counter(struct counter_device *counter)
+{
+ return counter_priv(counter);
+}
+
static int ti_eqep_count_read(struct counter_device *counter,
struct counter_count *count, u64 *val)
{
- struct ti_eqep_cnt *priv = counter->priv;
+ struct ti_eqep_cnt *priv = ti_eqep_count_from_counter(counter);
u32 cnt;
regmap_read(priv->regmap32, QPOSCNT, &cnt);
@@ -102,7 +107,7 @@ static int ti_eqep_count_read(struct counter_device *counter,
static int ti_eqep_count_write(struct counter_device *counter,
struct counter_count *count, u64 val)
{
- struct ti_eqep_cnt *priv = counter->priv;
+ struct ti_eqep_cnt *priv = ti_eqep_count_from_counter(counter);
u32 max;
regmap_read(priv->regmap32, QPOSMAX, &max);
@@ -116,7 +121,7 @@ static int ti_eqep_function_read(struct counter_device *counter,
struct counter_count *count,
enum counter_function *function)
{
- struct ti_eqep_cnt *priv = counter->priv;
+ struct ti_eqep_cnt *priv = ti_eqep_count_from_counter(counter);
u32 qdecctl;
regmap_read(priv->regmap16, QDECCTL, &qdecctl);
@@ -143,7 +148,7 @@ static int ti_eqep_function_write(struct counter_device *counter,
struct counter_count *count,
enum counter_function function)
{
- struct ti_eqep_cnt *priv = counter->priv;
+ struct ti_eqep_cnt *priv = ti_eqep_count_from_counter(counter);
enum ti_eqep_count_func qsrc;
switch (function) {
@@ -173,7 +178,7 @@ static int ti_eqep_action_read(struct counter_device *counter,
struct counter_synapse *synapse,
enum counter_synapse_action *action)
{
- struct ti_eqep_cnt *priv = counter->priv;
+ struct ti_eqep_cnt *priv = ti_eqep_count_from_counter(counter);
enum counter_function function;
u32 qdecctl;
int err;
@@ -245,7 +250,7 @@ static int ti_eqep_position_ceiling_read(struct counter_device *counter,
struct counter_count *count,
u64 *ceiling)
{
- struct ti_eqep_cnt *priv = counter->priv;
+ struct ti_eqep_cnt *priv = ti_eqep_count_from_counter(counter);
u32 qposmax;
regmap_read(priv->regmap32, QPOSMAX, &qposmax);
@@ -259,7 +264,7 @@ static int ti_eqep_position_ceiling_write(struct counter_device *counter,
struct counter_count *count,
u64 ceiling)
{
- struct ti_eqep_cnt *priv = counter->priv;
+ struct ti_eqep_cnt *priv = ti_eqep_count_from_counter(counter);
if (ceiling != (u32)ceiling)
return -ERANGE;
@@ -272,7 +277,7 @@ static int ti_eqep_position_ceiling_write(struct counter_device *counter,
static int ti_eqep_position_enable_read(struct counter_device *counter,
struct counter_count *count, u8 *enable)
{
- struct ti_eqep_cnt *priv = counter->priv;
+ struct ti_eqep_cnt *priv = ti_eqep_count_from_counter(counter);
u32 qepctl;
regmap_read(priv->regmap16, QEPCTL, &qepctl);
@@ -285,7 +290,7 @@ static int ti_eqep_position_enable_read(struct counter_device *counter,
static int ti_eqep_position_enable_write(struct counter_device *counter,
struct counter_count *count, u8 enable)
{
- struct ti_eqep_cnt *priv = counter->priv;
+ struct ti_eqep_cnt *priv = ti_eqep_count_from_counter(counter);
regmap_write_bits(priv->regmap16, QEPCTL, QEPCTL_PHEN, enable ? -1 : 0);
@@ -368,13 +373,15 @@ static const struct regmap_config ti_eqep_regmap16_config = {
static int ti_eqep_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ struct counter_device *counter;
struct ti_eqep_cnt *priv;
void __iomem *base;
int err;
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
+ counter = devm_counter_alloc(dev, sizeof(*priv));
+ if (!counter)
return -ENOMEM;
+ priv = counter_priv(counter);
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
@@ -390,16 +397,15 @@ static int ti_eqep_probe(struct platform_device *pdev)
if (IS_ERR(priv->regmap16))
return PTR_ERR(priv->regmap16);
- priv->counter.name = dev_name(dev);
- priv->counter.parent = dev;
- priv->counter.ops = &ti_eqep_counter_ops;
- priv->counter.counts = ti_eqep_counts;
- priv->counter.num_counts = ARRAY_SIZE(ti_eqep_counts);
- priv->counter.signals = ti_eqep_signals;
- priv->counter.num_signals = ARRAY_SIZE(ti_eqep_signals);
- priv->counter.priv = priv;
+ counter->name = dev_name(dev);
+ counter->parent = dev;
+ counter->ops = &ti_eqep_counter_ops;
+ counter->counts = ti_eqep_counts;
+ counter->num_counts = ARRAY_SIZE(ti_eqep_counts);
+ counter->signals = ti_eqep_signals;
+ counter->num_signals = ARRAY_SIZE(ti_eqep_signals);
- platform_set_drvdata(pdev, priv);
+ platform_set_drvdata(pdev, counter);
/*
* Need to make sure power is turned on. On AM33xx, this comes from the
@@ -409,7 +415,7 @@ static int ti_eqep_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
pm_runtime_get_sync(dev);
- err = counter_register(&priv->counter);
+ err = counter_add(counter);
if (err < 0) {
pm_runtime_put_sync(dev);
pm_runtime_disable(dev);
@@ -421,10 +427,10 @@ static int ti_eqep_probe(struct platform_device *pdev)
static int ti_eqep_remove(struct platform_device *pdev)
{
- struct ti_eqep_cnt *priv = platform_get_drvdata(pdev);
+ struct counter_device *counter = platform_get_drvdata(pdev);
struct device *dev = &pdev->dev;
- counter_unregister(&priv->counter);
+ counter_unregister(counter);
pm_runtime_put_sync(dev);
pm_runtime_disable(dev);
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
index 4c8ebdf671ca..1b4d425bbf0e 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c
@@ -1753,7 +1753,6 @@ void otx2_cpt_print_uc_dbg_info(struct otx2_cptpf_dev *cptpf)
char engs_info[2 * OTX2_CPT_NAME_LENGTH];
struct otx2_cpt_eng_grp_info *grp;
struct otx2_cpt_engs_rsvd *engs;
- u32 mask[4];
int i, j;
pr_debug("Engine groups global info");
@@ -1785,6 +1784,8 @@ void otx2_cpt_print_uc_dbg_info(struct otx2_cptpf_dev *cptpf)
for (j = 0; j < OTX2_CPT_MAX_ETYPES_PER_GRP; j++) {
engs = &grp->engs[j];
if (engs->type) {
+ u32 mask[5] = { };
+
get_engs_info(grp, engs_info,
2 * OTX2_CPT_NAME_LENGTH, j);
pr_debug("Slot%d: %s", j, engs_info);
diff --git a/drivers/crypto/virtio/virtio_crypto_core.c b/drivers/crypto/virtio/virtio_crypto_core.c
index e2375d992308..8e977b7627cb 100644
--- a/drivers/crypto/virtio/virtio_crypto_core.c
+++ b/drivers/crypto/virtio/virtio_crypto_core.c
@@ -404,7 +404,7 @@ static int virtcrypto_probe(struct virtio_device *vdev)
free_engines:
virtcrypto_clear_crypto_engines(vcrypto);
free_vqs:
- vcrypto->vdev->config->reset(vdev);
+ virtio_reset_device(vdev);
virtcrypto_del_vqs(vcrypto);
free_dev:
virtcrypto_devmgr_rm_dev(vcrypto);
@@ -436,7 +436,7 @@ static void virtcrypto_remove(struct virtio_device *vdev)
if (virtcrypto_dev_started(vcrypto))
virtcrypto_dev_stop(vcrypto);
- vdev->config->reset(vdev);
+ virtio_reset_device(vdev);
virtcrypto_free_unused_reqs(vcrypto);
virtcrypto_clear_crypto_engines(vcrypto);
virtcrypto_del_vqs(vcrypto);
@@ -456,7 +456,7 @@ static int virtcrypto_freeze(struct virtio_device *vdev)
{
struct virtio_crypto *vcrypto = vdev->priv;
- vdev->config->reset(vdev);
+ virtio_reset_device(vdev);
virtcrypto_free_unused_reqs(vcrypto);
if (virtcrypto_dev_started(vcrypto))
virtcrypto_dev_stop(vcrypto);
@@ -492,7 +492,7 @@ static int virtcrypto_restore(struct virtio_device *vdev)
free_engines:
virtcrypto_clear_crypto_engines(vcrypto);
free_vqs:
- vcrypto->vdev->config->reset(vdev);
+ virtio_reset_device(vdev);
virtcrypto_del_vqs(vcrypto);
return err;
}
diff --git a/drivers/cxl/Kconfig b/drivers/cxl/Kconfig
index e6de221cc568..67c91378f2dd 100644
--- a/drivers/cxl/Kconfig
+++ b/drivers/cxl/Kconfig
@@ -51,6 +51,7 @@ config CXL_ACPI
tristate "CXL ACPI: Platform Support"
depends on ACPI
default CXL_BUS
+ select ACPI_TABLE_LIB
help
Enable support for host managed device memory (HDM) resources
published by a platform's ACPI CXL memory layout description. See
diff --git a/drivers/cxl/acpi.c b/drivers/cxl/acpi.c
index dadc7f64b9ff..3163167ecc3a 100644
--- a/drivers/cxl/acpi.c
+++ b/drivers/cxl/acpi.c
@@ -8,8 +8,6 @@
#include <linux/pci.h>
#include "cxl.h"
-static struct acpi_table_header *acpi_cedt;
-
/* Encode defined in CXL 2.0 8.2.5.12.7 HDM Decoder Control Register */
#define CFMWS_INTERLEAVE_WAYS(x) (1 << (x)->interleave_ways)
#define CFMWS_INTERLEAVE_GRANULARITY(x) ((x)->granularity + 8)
@@ -74,134 +72,64 @@ static int cxl_acpi_cfmws_verify(struct device *dev,
return 0;
}
-static void cxl_add_cfmws_decoders(struct device *dev,
- struct cxl_port *root_port)
+struct cxl_cfmws_context {
+ struct device *dev;
+ struct cxl_port *root_port;
+};
+
+static int cxl_parse_cfmws(union acpi_subtable_headers *header, void *arg,
+ const unsigned long end)
{
int target_map[CXL_DECODER_MAX_INTERLEAVE];
+ struct cxl_cfmws_context *ctx = arg;
+ struct cxl_port *root_port = ctx->root_port;
+ struct device *dev = ctx->dev;
struct acpi_cedt_cfmws *cfmws;
struct cxl_decoder *cxld;
- acpi_size len, cur = 0;
- void *cedt_subtable;
- int rc;
-
- len = acpi_cedt->length - sizeof(*acpi_cedt);
- cedt_subtable = acpi_cedt + 1;
-
- while (cur < len) {
- struct acpi_cedt_header *c = cedt_subtable + cur;
- int i;
-
- if (c->type != ACPI_CEDT_TYPE_CFMWS) {
- cur += c->length;
- continue;
- }
+ int rc, i;
- cfmws = cedt_subtable + cur;
+ cfmws = (struct acpi_cedt_cfmws *) header;
- if (cfmws->header.length < sizeof(*cfmws)) {
- dev_warn_once(dev,
- "CFMWS entry skipped:invalid length:%u\n",
- cfmws->header.length);
- cur += c->length;
- continue;
- }
-
- rc = cxl_acpi_cfmws_verify(dev, cfmws);
- if (rc) {
- dev_err(dev, "CFMWS range %#llx-%#llx not registered\n",
- cfmws->base_hpa, cfmws->base_hpa +
- cfmws->window_size - 1);
- cur += c->length;
- continue;
- }
-
- for (i = 0; i < CFMWS_INTERLEAVE_WAYS(cfmws); i++)
- target_map[i] = cfmws->interleave_targets[i];
-
- cxld = cxl_decoder_alloc(root_port,
- CFMWS_INTERLEAVE_WAYS(cfmws));
- if (IS_ERR(cxld))
- goto next;
-
- cxld->flags = cfmws_to_decoder_flags(cfmws->restrictions);
- cxld->target_type = CXL_DECODER_EXPANDER;
- cxld->range = (struct range) {
- .start = cfmws->base_hpa,
- .end = cfmws->base_hpa + cfmws->window_size - 1,
- };
- cxld->interleave_ways = CFMWS_INTERLEAVE_WAYS(cfmws);
- cxld->interleave_granularity =
- CFMWS_INTERLEAVE_GRANULARITY(cfmws);
-
- rc = cxl_decoder_add(cxld, target_map);
- if (rc)
- put_device(&cxld->dev);
- else
- rc = cxl_decoder_autoremove(dev, cxld);
- if (rc) {
- dev_err(dev, "Failed to add decoder for %#llx-%#llx\n",
- cfmws->base_hpa, cfmws->base_hpa +
- cfmws->window_size - 1);
- goto next;
- }
- dev_dbg(dev, "add: %s range %#llx-%#llx\n",
- dev_name(&cxld->dev), cfmws->base_hpa,
+ rc = cxl_acpi_cfmws_verify(dev, cfmws);
+ if (rc) {
+ dev_err(dev, "CFMWS range %#llx-%#llx not registered\n",
+ cfmws->base_hpa,
cfmws->base_hpa + cfmws->window_size - 1);
-next:
- cur += c->length;
+ return 0;
}
-}
-
-static struct acpi_cedt_chbs *cxl_acpi_match_chbs(struct device *dev, u32 uid)
-{
- struct acpi_cedt_chbs *chbs, *chbs_match = NULL;
- acpi_size len, cur = 0;
- void *cedt_subtable;
- len = acpi_cedt->length - sizeof(*acpi_cedt);
- cedt_subtable = acpi_cedt + 1;
+ for (i = 0; i < CFMWS_INTERLEAVE_WAYS(cfmws); i++)
+ target_map[i] = cfmws->interleave_targets[i];
- while (cur < len) {
- struct acpi_cedt_header *c = cedt_subtable + cur;
-
- if (c->type != ACPI_CEDT_TYPE_CHBS) {
- cur += c->length;
- continue;
- }
-
- chbs = cedt_subtable + cur;
-
- if (chbs->header.length < sizeof(*chbs)) {
- dev_warn_once(dev,
- "CHBS entry skipped: invalid length:%u\n",
- chbs->header.length);
- cur += c->length;
- continue;
- }
-
- if (chbs->uid != uid) {
- cur += c->length;
- continue;
- }
+ cxld = cxl_decoder_alloc(root_port, CFMWS_INTERLEAVE_WAYS(cfmws));
+ if (IS_ERR(cxld))
+ return 0;
- if (chbs_match) {
- dev_warn_once(dev,
- "CHBS entry skipped: duplicate UID:%u\n",
- uid);
- cur += c->length;
- continue;
- }
+ cxld->flags = cfmws_to_decoder_flags(cfmws->restrictions);
+ cxld->target_type = CXL_DECODER_EXPANDER;
+ cxld->range = (struct range){
+ .start = cfmws->base_hpa,
+ .end = cfmws->base_hpa + cfmws->window_size - 1,
+ };
+ cxld->interleave_ways = CFMWS_INTERLEAVE_WAYS(cfmws);
+ cxld->interleave_granularity = CFMWS_INTERLEAVE_GRANULARITY(cfmws);
- chbs_match = chbs;
- cur += c->length;
+ rc = cxl_decoder_add(cxld, target_map);
+ if (rc)
+ put_device(&cxld->dev);
+ else
+ rc = cxl_decoder_autoremove(dev, cxld);
+ if (rc) {
+ dev_err(dev, "Failed to add decoder for %#llx-%#llx\n",
+ cfmws->base_hpa,
+ cfmws->base_hpa + cfmws->window_size - 1);
+ return 0;
}
+ dev_dbg(dev, "add: %s node: %d range %#llx-%#llx\n",
+ dev_name(&cxld->dev), phys_to_target_node(cxld->range.start),
+ cfmws->base_hpa, cfmws->base_hpa + cfmws->window_size - 1);
- return chbs_match ? chbs_match : ERR_PTR(-ENODEV);
-}
-
-static resource_size_t get_chbcr(struct acpi_cedt_chbs *chbs)
-{
- return IS_ERR(chbs) ? CXL_RESOURCE_NONE : chbs->base;
+ return 0;
}
__mock int match_add_root_ports(struct pci_dev *pdev, void *data)
@@ -355,12 +283,36 @@ static int add_host_bridge_uport(struct device *match, void *arg)
return rc;
}
+struct cxl_chbs_context {
+ struct device *dev;
+ unsigned long long uid;
+ resource_size_t chbcr;
+};
+
+static int cxl_get_chbcr(union acpi_subtable_headers *header, void *arg,
+ const unsigned long end)
+{
+ struct cxl_chbs_context *ctx = arg;
+ struct acpi_cedt_chbs *chbs;
+
+ if (ctx->chbcr)
+ return 0;
+
+ chbs = (struct acpi_cedt_chbs *) header;
+
+ if (ctx->uid != chbs->uid)
+ return 0;
+ ctx->chbcr = chbs->base;
+
+ return 0;
+}
+
static int add_host_bridge_dport(struct device *match, void *arg)
{
int rc;
acpi_status status;
unsigned long long uid;
- struct acpi_cedt_chbs *chbs;
+ struct cxl_chbs_context ctx;
struct cxl_port *root_port = arg;
struct device *host = root_port->dev.parent;
struct acpi_device *bridge = to_cxl_host_bridge(host, match);
@@ -376,14 +328,19 @@ static int add_host_bridge_dport(struct device *match, void *arg)
return -ENODEV;
}
- chbs = cxl_acpi_match_chbs(host, uid);
- if (IS_ERR(chbs)) {
+ ctx = (struct cxl_chbs_context) {
+ .dev = host,
+ .uid = uid,
+ };
+ acpi_table_parse_cedt(ACPI_CEDT_TYPE_CHBS, cxl_get_chbcr, &ctx);
+
+ if (ctx.chbcr == 0) {
dev_warn(host, "No CHBS found for Host Bridge: %s\n",
dev_name(match));
return 0;
}
- rc = cxl_add_dport(root_port, match, uid, get_chbcr(chbs));
+ rc = cxl_add_dport(root_port, match, uid, ctx.chbcr);
if (rc) {
dev_err(host, "failed to add downstream port: %s\n",
dev_name(match));
@@ -417,40 +374,29 @@ static int add_root_nvdimm_bridge(struct device *match, void *data)
return 1;
}
-static u32 cedt_instance(struct platform_device *pdev)
-{
- const bool *native_acpi0017 = acpi_device_get_match_data(&pdev->dev);
-
- if (native_acpi0017 && *native_acpi0017)
- return 0;
-
- /* for cxl_test request a non-canonical instance */
- return U32_MAX;
-}
-
static int cxl_acpi_probe(struct platform_device *pdev)
{
int rc;
- acpi_status status;
struct cxl_port *root_port;
struct device *host = &pdev->dev;
struct acpi_device *adev = ACPI_COMPANION(host);
+ struct cxl_cfmws_context ctx;
root_port = devm_cxl_add_port(host, host, CXL_RESOURCE_NONE, NULL);
if (IS_ERR(root_port))
return PTR_ERR(root_port);
dev_dbg(host, "add: %s\n", dev_name(&root_port->dev));
- status = acpi_get_table(ACPI_SIG_CEDT, cedt_instance(pdev), &acpi_cedt);
- if (ACPI_FAILURE(status))
- return -ENXIO;
-
rc = bus_for_each_dev(adev->dev.bus, NULL, root_port,
add_host_bridge_dport);
- if (rc)
- goto out;
+ if (rc < 0)
+ return rc;
- cxl_add_cfmws_decoders(host, root_port);
+ ctx = (struct cxl_cfmws_context) {
+ .dev = host,
+ .root_port = root_port,
+ };
+ acpi_table_parse_cedt(ACPI_CEDT_TYPE_CFMWS, cxl_parse_cfmws, &ctx);
/*
* Root level scanned with host-bridge as dports, now scan host-bridges
@@ -458,24 +404,20 @@ static int cxl_acpi_probe(struct platform_device *pdev)
*/
rc = bus_for_each_dev(adev->dev.bus, NULL, root_port,
add_host_bridge_uport);
- if (rc)
- goto out;
+ if (rc < 0)
+ return rc;
if (IS_ENABLED(CONFIG_CXL_PMEM))
rc = device_for_each_child(&root_port->dev, root_port,
add_root_nvdimm_bridge);
-
-out:
- acpi_put_table(acpi_cedt);
if (rc < 0)
return rc;
+
return 0;
}
-static bool native_acpi0017 = true;
-
static const struct acpi_device_id cxl_acpi_ids[] = {
- { "ACPI0017", (unsigned long) &native_acpi0017 },
+ { "ACPI0017" },
{ },
};
MODULE_DEVICE_TABLE(acpi, cxl_acpi_ids);
@@ -491,3 +433,4 @@ static struct platform_driver cxl_acpi_driver = {
module_platform_driver(cxl_acpi_driver);
MODULE_LICENSE("GPL v2");
MODULE_IMPORT_NS(CXL);
+MODULE_IMPORT_NS(ACPI);
diff --git a/drivers/cxl/core/Makefile b/drivers/cxl/core/Makefile
index 07eb8e1fb8a6..40ab50318daf 100644
--- a/drivers/cxl/core/Makefile
+++ b/drivers/cxl/core/Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_CXL_BUS) += cxl_core.o
-ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE=CXL -I$(srctree)/drivers/cxl
+ccflags-y += -I$(srctree)/drivers/cxl
cxl_core-y := bus.o
cxl_core-y += pmem.o
cxl_core-y += regs.o
diff --git a/drivers/cxl/core/bus.c b/drivers/cxl/core/bus.c
index ebd061d03950..3f9b98ecd18b 100644
--- a/drivers/cxl/core/bus.c
+++ b/drivers/cxl/core/bus.c
@@ -200,7 +200,7 @@ bool is_root_decoder(struct device *dev)
{
return dev->type == &cxl_decoder_root_type;
}
-EXPORT_SYMBOL_GPL(is_root_decoder);
+EXPORT_SYMBOL_NS_GPL(is_root_decoder, CXL);
struct cxl_decoder *to_cxl_decoder(struct device *dev)
{
@@ -209,7 +209,7 @@ struct cxl_decoder *to_cxl_decoder(struct device *dev)
return NULL;
return container_of(dev, struct cxl_decoder, dev);
}
-EXPORT_SYMBOL_GPL(to_cxl_decoder);
+EXPORT_SYMBOL_NS_GPL(to_cxl_decoder, CXL);
static void cxl_dport_release(struct cxl_dport *dport)
{
@@ -376,7 +376,7 @@ err:
put_device(dev);
return ERR_PTR(rc);
}
-EXPORT_SYMBOL_GPL(devm_cxl_add_port);
+EXPORT_SYMBOL_NS_GPL(devm_cxl_add_port, CXL);
static struct cxl_dport *find_dport(struct cxl_port *port, int id)
{
@@ -451,7 +451,7 @@ err:
cxl_dport_release(dport);
return rc;
}
-EXPORT_SYMBOL_GPL(cxl_add_dport);
+EXPORT_SYMBOL_NS_GPL(cxl_add_dport, CXL);
static int decoder_populate_targets(struct cxl_decoder *cxld,
struct cxl_port *port, int *target_map)
@@ -485,9 +485,7 @@ out_unlock:
struct cxl_decoder *cxl_decoder_alloc(struct cxl_port *port, int nr_targets)
{
- struct cxl_decoder *cxld, cxld_const_init = {
- .nr_targets = nr_targets,
- };
+ struct cxl_decoder *cxld;
struct device *dev;
int rc = 0;
@@ -497,13 +495,13 @@ struct cxl_decoder *cxl_decoder_alloc(struct cxl_port *port, int nr_targets)
cxld = kzalloc(struct_size(cxld, target, nr_targets), GFP_KERNEL);
if (!cxld)
return ERR_PTR(-ENOMEM);
- memcpy(cxld, &cxld_const_init, sizeof(cxld_const_init));
rc = ida_alloc(&port->decoder_ida, GFP_KERNEL);
if (rc < 0)
goto err;
cxld->id = rc;
+ cxld->nr_targets = nr_targets;
dev = &cxld->dev;
device_initialize(dev);
device_set_pm_not_required(dev);
@@ -521,7 +519,7 @@ err:
kfree(cxld);
return ERR_PTR(rc);
}
-EXPORT_SYMBOL_GPL(cxl_decoder_alloc);
+EXPORT_SYMBOL_NS_GPL(cxl_decoder_alloc, CXL);
int cxl_decoder_add(struct cxl_decoder *cxld, int *target_map)
{
@@ -550,7 +548,7 @@ int cxl_decoder_add(struct cxl_decoder *cxld, int *target_map)
return device_add(dev);
}
-EXPORT_SYMBOL_GPL(cxl_decoder_add);
+EXPORT_SYMBOL_NS_GPL(cxl_decoder_add, CXL);
static void cxld_unregister(void *dev)
{
@@ -561,7 +559,7 @@ int cxl_decoder_autoremove(struct device *host, struct cxl_decoder *cxld)
{
return devm_add_action_or_reset(host, cxld_unregister, &cxld->dev);
}
-EXPORT_SYMBOL_GPL(cxl_decoder_autoremove);
+EXPORT_SYMBOL_NS_GPL(cxl_decoder_autoremove, CXL);
/**
* __cxl_driver_register - register a driver for the cxl bus
@@ -594,13 +592,13 @@ int __cxl_driver_register(struct cxl_driver *cxl_drv, struct module *owner,
return driver_register(&cxl_drv->drv);
}
-EXPORT_SYMBOL_GPL(__cxl_driver_register);
+EXPORT_SYMBOL_NS_GPL(__cxl_driver_register, CXL);
void cxl_driver_unregister(struct cxl_driver *cxl_drv)
{
driver_unregister(&cxl_drv->drv);
}
-EXPORT_SYMBOL_GPL(cxl_driver_unregister);
+EXPORT_SYMBOL_NS_GPL(cxl_driver_unregister, CXL);
static int cxl_device_id(struct device *dev)
{
@@ -642,7 +640,7 @@ struct bus_type cxl_bus_type = {
.probe = cxl_bus_probe,
.remove = cxl_bus_remove,
};
-EXPORT_SYMBOL_GPL(cxl_bus_type);
+EXPORT_SYMBOL_NS_GPL(cxl_bus_type, CXL);
static __init int cxl_core_init(void)
{
diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c
index 576796a5d9f3..be61a0d8016b 100644
--- a/drivers/cxl/core/mbox.c
+++ b/drivers/cxl/core/mbox.c
@@ -128,8 +128,8 @@ static struct cxl_mem_command *cxl_mem_find_command(u16 opcode)
}
/**
- * cxl_mem_mbox_send_cmd() - Send a mailbox command to a memory device.
- * @cxlm: The CXL memory device to communicate with.
+ * cxl_mbox_send_cmd() - Send a mailbox command to a device.
+ * @cxlds: The device data for the operation
* @opcode: Opcode for the mailbox command.
* @in: The input payload for the mailbox command.
* @in_size: The length of the input payload
@@ -148,11 +148,9 @@ static struct cxl_mem_command *cxl_mem_find_command(u16 opcode)
* Mailbox commands may execute successfully yet the device itself reported an
* error. While this distinction can be useful for commands from userspace, the
* kernel will only be able to use results when both are successful.
- *
- * See __cxl_mem_mbox_send_cmd()
*/
-int cxl_mem_mbox_send_cmd(struct cxl_mem *cxlm, u16 opcode, void *in,
- size_t in_size, void *out, size_t out_size)
+int cxl_mbox_send_cmd(struct cxl_dev_state *cxlds, u16 opcode, void *in,
+ size_t in_size, void *out, size_t out_size)
{
const struct cxl_mem_command *cmd = cxl_mem_find_command(opcode);
struct cxl_mbox_cmd mbox_cmd = {
@@ -164,10 +162,10 @@ int cxl_mem_mbox_send_cmd(struct cxl_mem *cxlm, u16 opcode, void *in,
};
int rc;
- if (out_size > cxlm->payload_size)
+ if (out_size > cxlds->payload_size)
return -E2BIG;
- rc = cxlm->mbox_send(cxlm, &mbox_cmd);
+ rc = cxlds->mbox_send(cxlds, &mbox_cmd);
if (rc)
return rc;
@@ -184,7 +182,7 @@ int cxl_mem_mbox_send_cmd(struct cxl_mem *cxlm, u16 opcode, void *in,
return 0;
}
-EXPORT_SYMBOL_GPL(cxl_mem_mbox_send_cmd);
+EXPORT_SYMBOL_NS_GPL(cxl_mbox_send_cmd, CXL);
static bool cxl_mem_raw_command_allowed(u16 opcode)
{
@@ -211,7 +209,7 @@ static bool cxl_mem_raw_command_allowed(u16 opcode)
/**
* cxl_validate_cmd_from_user() - Check fields for CXL_MEM_SEND_COMMAND.
- * @cxlm: &struct cxl_mem device whose mailbox will be used.
+ * @cxlds: The device data for the operation
* @send_cmd: &struct cxl_send_command copied in from userspace.
* @out_cmd: Sanitized and populated &struct cxl_mem_command.
*
@@ -228,7 +226,7 @@ static bool cxl_mem_raw_command_allowed(u16 opcode)
*
* See handle_mailbox_cmd_from_user()
*/
-static int cxl_validate_cmd_from_user(struct cxl_mem *cxlm,
+static int cxl_validate_cmd_from_user(struct cxl_dev_state *cxlds,
const struct cxl_send_command *send_cmd,
struct cxl_mem_command *out_cmd)
{
@@ -243,7 +241,7 @@ static int cxl_validate_cmd_from_user(struct cxl_mem *cxlm,
* supports, but output can be arbitrarily large (simply write out as
* much data as the hardware provides).
*/
- if (send_cmd->in.size > cxlm->payload_size)
+ if (send_cmd->in.size > cxlds->payload_size)
return -EINVAL;
/*
@@ -269,7 +267,7 @@ static int cxl_validate_cmd_from_user(struct cxl_mem *cxlm,
* gets passed along without further checking, so it must be
* validated here.
*/
- if (send_cmd->out.size > cxlm->payload_size)
+ if (send_cmd->out.size > cxlds->payload_size)
return -EINVAL;
if (!cxl_mem_raw_command_allowed(send_cmd->raw.opcode))
@@ -294,11 +292,11 @@ static int cxl_validate_cmd_from_user(struct cxl_mem *cxlm,
info = &c->info;
/* Check that the command is enabled for hardware */
- if (!test_bit(info->id, cxlm->enabled_cmds))
+ if (!test_bit(info->id, cxlds->enabled_cmds))
return -ENOTTY;
/* Check that the command is not claimed for exclusive kernel use */
- if (test_bit(info->id, cxlm->exclusive_cmds))
+ if (test_bit(info->id, cxlds->exclusive_cmds))
return -EBUSY;
/* Check the input buffer is the expected size */
@@ -356,7 +354,7 @@ int cxl_query_cmd(struct cxl_memdev *cxlmd,
/**
* handle_mailbox_cmd_from_user() - Dispatch a mailbox command for userspace.
- * @cxlm: The CXL memory device to communicate with.
+ * @cxlds: The device data for the operation
* @cmd: The validated command.
* @in_payload: Pointer to userspace's input payload.
* @out_payload: Pointer to userspace's output payload.
@@ -379,12 +377,12 @@ int cxl_query_cmd(struct cxl_memdev *cxlmd,
*
* See cxl_send_cmd().
*/
-static int handle_mailbox_cmd_from_user(struct cxl_mem *cxlm,
+static int handle_mailbox_cmd_from_user(struct cxl_dev_state *cxlds,
const struct cxl_mem_command *cmd,
u64 in_payload, u64 out_payload,
s32 *size_out, u32 *retval)
{
- struct device *dev = cxlm->dev;
+ struct device *dev = cxlds->dev;
struct cxl_mbox_cmd mbox_cmd = {
.opcode = cmd->opcode,
.size_in = cmd->info.size_in,
@@ -417,7 +415,7 @@ static int handle_mailbox_cmd_from_user(struct cxl_mem *cxlm,
dev_WARN_ONCE(dev, cmd->info.id == CXL_MEM_COMMAND_ID_RAW,
"raw command path used\n");
- rc = cxlm->mbox_send(cxlm, &mbox_cmd);
+ rc = cxlds->mbox_send(cxlds, &mbox_cmd);
if (rc)
goto out;
@@ -447,7 +445,7 @@ out:
int cxl_send_cmd(struct cxl_memdev *cxlmd, struct cxl_send_command __user *s)
{
- struct cxl_mem *cxlm = cxlmd->cxlm;
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
struct device *dev = &cxlmd->dev;
struct cxl_send_command send;
struct cxl_mem_command c;
@@ -458,15 +456,15 @@ int cxl_send_cmd(struct cxl_memdev *cxlmd, struct cxl_send_command __user *s)
if (copy_from_user(&send, s, sizeof(send)))
return -EFAULT;
- rc = cxl_validate_cmd_from_user(cxlmd->cxlm, &send, &c);
+ rc = cxl_validate_cmd_from_user(cxlmd->cxlds, &send, &c);
if (rc)
return rc;
/* Prepare to handle a full payload for variable sized output */
if (c.info.size_out < 0)
- c.info.size_out = cxlm->payload_size;
+ c.info.size_out = cxlds->payload_size;
- rc = handle_mailbox_cmd_from_user(cxlm, &c, send.in.payload,
+ rc = handle_mailbox_cmd_from_user(cxlds, &c, send.in.payload,
send.out.payload, &send.out.size,
&send.retval);
if (rc)
@@ -478,13 +476,13 @@ int cxl_send_cmd(struct cxl_memdev *cxlmd, struct cxl_send_command __user *s)
return 0;
}
-static int cxl_xfer_log(struct cxl_mem *cxlm, uuid_t *uuid, u32 size, u8 *out)
+static int cxl_xfer_log(struct cxl_dev_state *cxlds, uuid_t *uuid, u32 size, u8 *out)
{
u32 remaining = size;
u32 offset = 0;
while (remaining) {
- u32 xfer_size = min_t(u32, remaining, cxlm->payload_size);
+ u32 xfer_size = min_t(u32, remaining, cxlds->payload_size);
struct cxl_mbox_get_log log = {
.uuid = *uuid,
.offset = cpu_to_le32(offset),
@@ -492,8 +490,8 @@ static int cxl_xfer_log(struct cxl_mem *cxlm, uuid_t *uuid, u32 size, u8 *out)
};
int rc;
- rc = cxl_mem_mbox_send_cmd(cxlm, CXL_MBOX_OP_GET_LOG, &log,
- sizeof(log), out, xfer_size);
+ rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_GET_LOG, &log, sizeof(log),
+ out, xfer_size);
if (rc < 0)
return rc;
@@ -507,14 +505,14 @@ static int cxl_xfer_log(struct cxl_mem *cxlm, uuid_t *uuid, u32 size, u8 *out)
/**
* cxl_walk_cel() - Walk through the Command Effects Log.
- * @cxlm: Device.
+ * @cxlds: The device data for the operation
* @size: Length of the Command Effects Log.
* @cel: CEL
*
* Iterate over each entry in the CEL and determine if the driver supports the
* command. If so, the command is enabled for the device and can be used later.
*/
-static void cxl_walk_cel(struct cxl_mem *cxlm, size_t size, u8 *cel)
+static void cxl_walk_cel(struct cxl_dev_state *cxlds, size_t size, u8 *cel)
{
struct cxl_cel_entry *cel_entry;
const int cel_entries = size / sizeof(*cel_entry);
@@ -527,26 +525,26 @@ static void cxl_walk_cel(struct cxl_mem *cxlm, size_t size, u8 *cel)
struct cxl_mem_command *cmd = cxl_mem_find_command(opcode);
if (!cmd) {
- dev_dbg(cxlm->dev,
+ dev_dbg(cxlds->dev,
"Opcode 0x%04x unsupported by driver", opcode);
continue;
}
- set_bit(cmd->info.id, cxlm->enabled_cmds);
+ set_bit(cmd->info.id, cxlds->enabled_cmds);
}
}
-static struct cxl_mbox_get_supported_logs *cxl_get_gsl(struct cxl_mem *cxlm)
+static struct cxl_mbox_get_supported_logs *cxl_get_gsl(struct cxl_dev_state *cxlds)
{
struct cxl_mbox_get_supported_logs *ret;
int rc;
- ret = kvmalloc(cxlm->payload_size, GFP_KERNEL);
+ ret = kvmalloc(cxlds->payload_size, GFP_KERNEL);
if (!ret)
return ERR_PTR(-ENOMEM);
- rc = cxl_mem_mbox_send_cmd(cxlm, CXL_MBOX_OP_GET_SUPPORTED_LOGS, NULL,
- 0, ret, cxlm->payload_size);
+ rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_GET_SUPPORTED_LOGS, NULL, 0, ret,
+ cxlds->payload_size);
if (rc < 0) {
kvfree(ret);
return ERR_PTR(rc);
@@ -567,23 +565,23 @@ static const uuid_t log_uuid[] = {
};
/**
- * cxl_mem_enumerate_cmds() - Enumerate commands for a device.
- * @cxlm: The device.
+ * cxl_enumerate_cmds() - Enumerate commands for a device.
+ * @cxlds: The device data for the operation
*
* Returns 0 if enumerate completed successfully.
*
* CXL devices have optional support for certain commands. This function will
* determine the set of supported commands for the hardware and update the
- * enabled_cmds bitmap in the @cxlm.
+ * enabled_cmds bitmap in the @cxlds.
*/
-int cxl_mem_enumerate_cmds(struct cxl_mem *cxlm)
+int cxl_enumerate_cmds(struct cxl_dev_state *cxlds)
{
struct cxl_mbox_get_supported_logs *gsl;
- struct device *dev = cxlm->dev;
+ struct device *dev = cxlds->dev;
struct cxl_mem_command *cmd;
int i, rc;
- gsl = cxl_get_gsl(cxlm);
+ gsl = cxl_get_gsl(cxlds);
if (IS_ERR(gsl))
return PTR_ERR(gsl);
@@ -604,19 +602,19 @@ int cxl_mem_enumerate_cmds(struct cxl_mem *cxlm)
goto out;
}
- rc = cxl_xfer_log(cxlm, &uuid, size, log);
+ rc = cxl_xfer_log(cxlds, &uuid, size, log);
if (rc) {
kvfree(log);
goto out;
}
- cxl_walk_cel(cxlm, size, log);
+ cxl_walk_cel(cxlds, size, log);
kvfree(log);
/* In case CEL was bogus, enable some default commands. */
cxl_for_each_cmd(cmd)
if (cmd->flags & CXL_CMD_FLAG_FORCE_ENABLE)
- set_bit(cmd->info.id, cxlm->enabled_cmds);
+ set_bit(cmd->info.id, cxlds->enabled_cmds);
/* Found the required CEL */
rc = 0;
@@ -626,11 +624,11 @@ out:
kvfree(gsl);
return rc;
}
-EXPORT_SYMBOL_GPL(cxl_mem_enumerate_cmds);
+EXPORT_SYMBOL_NS_GPL(cxl_enumerate_cmds, CXL);
/**
* cxl_mem_get_partition_info - Get partition info
- * @cxlm: cxl_mem instance to update partition info
+ * @cxlds: The device data for the operation
*
* Retrieve the current partition info for the device specified. The active
* values are the current capacity in bytes. If not 0, the 'next' values are
@@ -640,7 +638,7 @@ EXPORT_SYMBOL_GPL(cxl_mem_enumerate_cmds);
*
* See CXL @8.2.9.5.2.1 Get Partition Info
*/
-static int cxl_mem_get_partition_info(struct cxl_mem *cxlm)
+static int cxl_mem_get_partition_info(struct cxl_dev_state *cxlds)
{
struct cxl_mbox_get_partition_info {
__le64 active_volatile_cap;
@@ -650,124 +648,124 @@ static int cxl_mem_get_partition_info(struct cxl_mem *cxlm)
} __packed pi;
int rc;
- rc = cxl_mem_mbox_send_cmd(cxlm, CXL_MBOX_OP_GET_PARTITION_INFO,
- NULL, 0, &pi, sizeof(pi));
+ rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_GET_PARTITION_INFO, NULL, 0,
+ &pi, sizeof(pi));
if (rc)
return rc;
- cxlm->active_volatile_bytes =
+ cxlds->active_volatile_bytes =
le64_to_cpu(pi.active_volatile_cap) * CXL_CAPACITY_MULTIPLIER;
- cxlm->active_persistent_bytes =
+ cxlds->active_persistent_bytes =
le64_to_cpu(pi.active_persistent_cap) * CXL_CAPACITY_MULTIPLIER;
- cxlm->next_volatile_bytes =
+ cxlds->next_volatile_bytes =
le64_to_cpu(pi.next_volatile_cap) * CXL_CAPACITY_MULTIPLIER;
- cxlm->next_persistent_bytes =
+ cxlds->next_persistent_bytes =
le64_to_cpu(pi.next_volatile_cap) * CXL_CAPACITY_MULTIPLIER;
return 0;
}
/**
- * cxl_mem_identify() - Send the IDENTIFY command to the device.
- * @cxlm: The device to identify.
+ * cxl_dev_state_identify() - Send the IDENTIFY command to the device.
+ * @cxlds: The device data for the operation
*
* Return: 0 if identify was executed successfully.
*
* This will dispatch the identify command to the device and on success populate
* structures to be exported to sysfs.
*/
-int cxl_mem_identify(struct cxl_mem *cxlm)
+int cxl_dev_state_identify(struct cxl_dev_state *cxlds)
{
/* See CXL 2.0 Table 175 Identify Memory Device Output Payload */
struct cxl_mbox_identify id;
int rc;
- rc = cxl_mem_mbox_send_cmd(cxlm, CXL_MBOX_OP_IDENTIFY, NULL, 0, &id,
- sizeof(id));
+ rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_IDENTIFY, NULL, 0, &id,
+ sizeof(id));
if (rc < 0)
return rc;
- cxlm->total_bytes =
+ cxlds->total_bytes =
le64_to_cpu(id.total_capacity) * CXL_CAPACITY_MULTIPLIER;
- cxlm->volatile_only_bytes =
+ cxlds->volatile_only_bytes =
le64_to_cpu(id.volatile_capacity) * CXL_CAPACITY_MULTIPLIER;
- cxlm->persistent_only_bytes =
+ cxlds->persistent_only_bytes =
le64_to_cpu(id.persistent_capacity) * CXL_CAPACITY_MULTIPLIER;
- cxlm->partition_align_bytes =
+ cxlds->partition_align_bytes =
le64_to_cpu(id.partition_align) * CXL_CAPACITY_MULTIPLIER;
- dev_dbg(cxlm->dev,
+ dev_dbg(cxlds->dev,
"Identify Memory Device\n"
" total_bytes = %#llx\n"
" volatile_only_bytes = %#llx\n"
" persistent_only_bytes = %#llx\n"
" partition_align_bytes = %#llx\n",
- cxlm->total_bytes, cxlm->volatile_only_bytes,
- cxlm->persistent_only_bytes, cxlm->partition_align_bytes);
+ cxlds->total_bytes, cxlds->volatile_only_bytes,
+ cxlds->persistent_only_bytes, cxlds->partition_align_bytes);
- cxlm->lsa_size = le32_to_cpu(id.lsa_size);
- memcpy(cxlm->firmware_version, id.fw_revision, sizeof(id.fw_revision));
+ cxlds->lsa_size = le32_to_cpu(id.lsa_size);
+ memcpy(cxlds->firmware_version, id.fw_revision, sizeof(id.fw_revision));
return 0;
}
-EXPORT_SYMBOL_GPL(cxl_mem_identify);
+EXPORT_SYMBOL_NS_GPL(cxl_dev_state_identify, CXL);
-int cxl_mem_create_range_info(struct cxl_mem *cxlm)
+int cxl_mem_create_range_info(struct cxl_dev_state *cxlds)
{
int rc;
- if (cxlm->partition_align_bytes == 0) {
- cxlm->ram_range.start = 0;
- cxlm->ram_range.end = cxlm->volatile_only_bytes - 1;
- cxlm->pmem_range.start = cxlm->volatile_only_bytes;
- cxlm->pmem_range.end = cxlm->volatile_only_bytes +
- cxlm->persistent_only_bytes - 1;
+ if (cxlds->partition_align_bytes == 0) {
+ cxlds->ram_range.start = 0;
+ cxlds->ram_range.end = cxlds->volatile_only_bytes - 1;
+ cxlds->pmem_range.start = cxlds->volatile_only_bytes;
+ cxlds->pmem_range.end = cxlds->volatile_only_bytes +
+ cxlds->persistent_only_bytes - 1;
return 0;
}
- rc = cxl_mem_get_partition_info(cxlm);
+ rc = cxl_mem_get_partition_info(cxlds);
if (rc) {
- dev_err(cxlm->dev, "Failed to query partition information\n");
+ dev_err(cxlds->dev, "Failed to query partition information\n");
return rc;
}
- dev_dbg(cxlm->dev,
+ dev_dbg(cxlds->dev,
"Get Partition Info\n"
" active_volatile_bytes = %#llx\n"
" active_persistent_bytes = %#llx\n"
" next_volatile_bytes = %#llx\n"
" next_persistent_bytes = %#llx\n",
- cxlm->active_volatile_bytes, cxlm->active_persistent_bytes,
- cxlm->next_volatile_bytes, cxlm->next_persistent_bytes);
+ cxlds->active_volatile_bytes, cxlds->active_persistent_bytes,
+ cxlds->next_volatile_bytes, cxlds->next_persistent_bytes);
- cxlm->ram_range.start = 0;
- cxlm->ram_range.end = cxlm->active_volatile_bytes - 1;
+ cxlds->ram_range.start = 0;
+ cxlds->ram_range.end = cxlds->active_volatile_bytes - 1;
- cxlm->pmem_range.start = cxlm->active_volatile_bytes;
- cxlm->pmem_range.end =
- cxlm->active_volatile_bytes + cxlm->active_persistent_bytes - 1;
+ cxlds->pmem_range.start = cxlds->active_volatile_bytes;
+ cxlds->pmem_range.end =
+ cxlds->active_volatile_bytes + cxlds->active_persistent_bytes - 1;
return 0;
}
-EXPORT_SYMBOL_GPL(cxl_mem_create_range_info);
+EXPORT_SYMBOL_NS_GPL(cxl_mem_create_range_info, CXL);
-struct cxl_mem *cxl_mem_create(struct device *dev)
+struct cxl_dev_state *cxl_dev_state_create(struct device *dev)
{
- struct cxl_mem *cxlm;
+ struct cxl_dev_state *cxlds;
- cxlm = devm_kzalloc(dev, sizeof(*cxlm), GFP_KERNEL);
- if (!cxlm) {
+ cxlds = devm_kzalloc(dev, sizeof(*cxlds), GFP_KERNEL);
+ if (!cxlds) {
dev_err(dev, "No memory available\n");
return ERR_PTR(-ENOMEM);
}
- mutex_init(&cxlm->mbox_mutex);
- cxlm->dev = dev;
+ mutex_init(&cxlds->mbox_mutex);
+ cxlds->dev = dev;
- return cxlm;
+ return cxlds;
}
-EXPORT_SYMBOL_GPL(cxl_mem_create);
+EXPORT_SYMBOL_NS_GPL(cxl_dev_state_create, CXL);
static struct dentry *cxl_debugfs;
diff --git a/drivers/cxl/core/memdev.c b/drivers/cxl/core/memdev.c
index bf1b04d00ff4..61029cb7ac62 100644
--- a/drivers/cxl/core/memdev.c
+++ b/drivers/cxl/core/memdev.c
@@ -37,9 +37,9 @@ static ssize_t firmware_version_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
- struct cxl_mem *cxlm = cxlmd->cxlm;
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
- return sysfs_emit(buf, "%.16s\n", cxlm->firmware_version);
+ return sysfs_emit(buf, "%.16s\n", cxlds->firmware_version);
}
static DEVICE_ATTR_RO(firmware_version);
@@ -47,9 +47,9 @@ static ssize_t payload_max_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
- struct cxl_mem *cxlm = cxlmd->cxlm;
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
- return sysfs_emit(buf, "%zu\n", cxlm->payload_size);
+ return sysfs_emit(buf, "%zu\n", cxlds->payload_size);
}
static DEVICE_ATTR_RO(payload_max);
@@ -57,9 +57,9 @@ static ssize_t label_storage_size_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
- struct cxl_mem *cxlm = cxlmd->cxlm;
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
- return sysfs_emit(buf, "%zu\n", cxlm->lsa_size);
+ return sysfs_emit(buf, "%zu\n", cxlds->lsa_size);
}
static DEVICE_ATTR_RO(label_storage_size);
@@ -67,8 +67,8 @@ static ssize_t ram_size_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
- struct cxl_mem *cxlm = cxlmd->cxlm;
- unsigned long long len = range_len(&cxlm->ram_range);
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ unsigned long long len = range_len(&cxlds->ram_range);
return sysfs_emit(buf, "%#llx\n", len);
}
@@ -80,8 +80,8 @@ static ssize_t pmem_size_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
- struct cxl_mem *cxlm = cxlmd->cxlm;
- unsigned long long len = range_len(&cxlm->pmem_range);
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
+ unsigned long long len = range_len(&cxlds->pmem_range);
return sysfs_emit(buf, "%#llx\n", len);
}
@@ -136,42 +136,42 @@ static const struct device_type cxl_memdev_type = {
/**
* set_exclusive_cxl_commands() - atomically disable user cxl commands
- * @cxlm: cxl_mem instance to modify
+ * @cxlds: The device state to operate on
* @cmds: bitmap of commands to mark exclusive
*
* Grab the cxl_memdev_rwsem in write mode to flush in-flight
* invocations of the ioctl path and then disable future execution of
* commands with the command ids set in @cmds.
*/
-void set_exclusive_cxl_commands(struct cxl_mem *cxlm, unsigned long *cmds)
+void set_exclusive_cxl_commands(struct cxl_dev_state *cxlds, unsigned long *cmds)
{
down_write(&cxl_memdev_rwsem);
- bitmap_or(cxlm->exclusive_cmds, cxlm->exclusive_cmds, cmds,
+ bitmap_or(cxlds->exclusive_cmds, cxlds->exclusive_cmds, cmds,
CXL_MEM_COMMAND_ID_MAX);
up_write(&cxl_memdev_rwsem);
}
-EXPORT_SYMBOL_GPL(set_exclusive_cxl_commands);
+EXPORT_SYMBOL_NS_GPL(set_exclusive_cxl_commands, CXL);
/**
* clear_exclusive_cxl_commands() - atomically enable user cxl commands
- * @cxlm: cxl_mem instance to modify
+ * @cxlds: The device state to modify
* @cmds: bitmap of commands to mark available for userspace
*/
-void clear_exclusive_cxl_commands(struct cxl_mem *cxlm, unsigned long *cmds)
+void clear_exclusive_cxl_commands(struct cxl_dev_state *cxlds, unsigned long *cmds)
{
down_write(&cxl_memdev_rwsem);
- bitmap_andnot(cxlm->exclusive_cmds, cxlm->exclusive_cmds, cmds,
+ bitmap_andnot(cxlds->exclusive_cmds, cxlds->exclusive_cmds, cmds,
CXL_MEM_COMMAND_ID_MAX);
up_write(&cxl_memdev_rwsem);
}
-EXPORT_SYMBOL_GPL(clear_exclusive_cxl_commands);
+EXPORT_SYMBOL_NS_GPL(clear_exclusive_cxl_commands, CXL);
static void cxl_memdev_shutdown(struct device *dev)
{
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
down_write(&cxl_memdev_rwsem);
- cxlmd->cxlm = NULL;
+ cxlmd->cxlds = NULL;
up_write(&cxl_memdev_rwsem);
}
@@ -185,7 +185,7 @@ static void cxl_memdev_unregister(void *_cxlmd)
put_device(dev);
}
-static struct cxl_memdev *cxl_memdev_alloc(struct cxl_mem *cxlm,
+static struct cxl_memdev *cxl_memdev_alloc(struct cxl_dev_state *cxlds,
const struct file_operations *fops)
{
struct cxl_memdev *cxlmd;
@@ -204,7 +204,7 @@ static struct cxl_memdev *cxl_memdev_alloc(struct cxl_mem *cxlm,
dev = &cxlmd->dev;
device_initialize(dev);
- dev->parent = cxlm->dev;
+ dev->parent = cxlds->dev;
dev->bus = &cxl_bus_type;
dev->devt = MKDEV(cxl_mem_major, cxlmd->id);
dev->type = &cxl_memdev_type;
@@ -239,7 +239,7 @@ static long cxl_memdev_ioctl(struct file *file, unsigned int cmd,
int rc = -ENXIO;
down_read(&cxl_memdev_rwsem);
- if (cxlmd->cxlm)
+ if (cxlmd->cxlds)
rc = __cxl_memdev_ioctl(cxlmd, cmd, arg);
up_read(&cxl_memdev_rwsem);
@@ -276,15 +276,14 @@ static const struct file_operations cxl_memdev_fops = {
.llseek = noop_llseek,
};
-struct cxl_memdev *
-devm_cxl_add_memdev(struct cxl_mem *cxlm)
+struct cxl_memdev *devm_cxl_add_memdev(struct cxl_dev_state *cxlds)
{
struct cxl_memdev *cxlmd;
struct device *dev;
struct cdev *cdev;
int rc;
- cxlmd = cxl_memdev_alloc(cxlm, &cxl_memdev_fops);
+ cxlmd = cxl_memdev_alloc(cxlds, &cxl_memdev_fops);
if (IS_ERR(cxlmd))
return cxlmd;
@@ -297,14 +296,14 @@ devm_cxl_add_memdev(struct cxl_mem *cxlm)
* Activate ioctl operations, no cxl_memdev_rwsem manipulation
* needed as this is ordered with cdev_add() publishing the device.
*/
- cxlmd->cxlm = cxlm;
+ cxlmd->cxlds = cxlds;
cdev = &cxlmd->cdev;
rc = cdev_device_add(cdev, dev);
if (rc)
goto err;
- rc = devm_add_action_or_reset(cxlm->dev, cxl_memdev_unregister, cxlmd);
+ rc = devm_add_action_or_reset(cxlds->dev, cxl_memdev_unregister, cxlmd);
if (rc)
return ERR_PTR(rc);
return cxlmd;
@@ -318,7 +317,7 @@ err:
put_device(dev);
return ERR_PTR(rc);
}
-EXPORT_SYMBOL_GPL(devm_cxl_add_memdev);
+EXPORT_SYMBOL_NS_GPL(devm_cxl_add_memdev, CXL);
__init int cxl_memdev_init(void)
{
diff --git a/drivers/cxl/core/pmem.c b/drivers/cxl/core/pmem.c
index 5032f4c1c69d..b5fca97b0a07 100644
--- a/drivers/cxl/core/pmem.c
+++ b/drivers/cxl/core/pmem.c
@@ -49,12 +49,18 @@ struct cxl_nvdimm_bridge *to_cxl_nvdimm_bridge(struct device *dev)
return NULL;
return container_of(dev, struct cxl_nvdimm_bridge, dev);
}
-EXPORT_SYMBOL_GPL(to_cxl_nvdimm_bridge);
+EXPORT_SYMBOL_NS_GPL(to_cxl_nvdimm_bridge, CXL);
-__mock int match_nvdimm_bridge(struct device *dev, const void *data)
+bool is_cxl_nvdimm_bridge(struct device *dev)
{
return dev->type == &cxl_nvdimm_bridge_type;
}
+EXPORT_SYMBOL_NS_GPL(is_cxl_nvdimm_bridge, CXL);
+
+__mock int match_nvdimm_bridge(struct device *dev, const void *data)
+{
+ return is_cxl_nvdimm_bridge(dev);
+}
struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct cxl_nvdimm *cxl_nvd)
{
@@ -65,7 +71,7 @@ struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct cxl_nvdimm *cxl_nvd)
return NULL;
return to_cxl_nvdimm_bridge(dev);
}
-EXPORT_SYMBOL_GPL(cxl_find_nvdimm_bridge);
+EXPORT_SYMBOL_NS_GPL(cxl_find_nvdimm_bridge, CXL);
static struct cxl_nvdimm_bridge *
cxl_nvdimm_bridge_alloc(struct cxl_port *port)
@@ -167,7 +173,7 @@ err:
put_device(dev);
return ERR_PTR(rc);
}
-EXPORT_SYMBOL_GPL(devm_cxl_add_nvdimm_bridge);
+EXPORT_SYMBOL_NS_GPL(devm_cxl_add_nvdimm_bridge, CXL);
static void cxl_nvdimm_release(struct device *dev)
{
@@ -191,7 +197,7 @@ bool is_cxl_nvdimm(struct device *dev)
{
return dev->type == &cxl_nvdimm_type;
}
-EXPORT_SYMBOL_GPL(is_cxl_nvdimm);
+EXPORT_SYMBOL_NS_GPL(is_cxl_nvdimm, CXL);
struct cxl_nvdimm *to_cxl_nvdimm(struct device *dev)
{
@@ -200,7 +206,7 @@ struct cxl_nvdimm *to_cxl_nvdimm(struct device *dev)
return NULL;
return container_of(dev, struct cxl_nvdimm, dev);
}
-EXPORT_SYMBOL_GPL(to_cxl_nvdimm);
+EXPORT_SYMBOL_NS_GPL(to_cxl_nvdimm, CXL);
static struct cxl_nvdimm *cxl_nvdimm_alloc(struct cxl_memdev *cxlmd)
{
@@ -262,4 +268,4 @@ err:
put_device(dev);
return rc;
}
-EXPORT_SYMBOL_GPL(devm_cxl_add_nvdimm);
+EXPORT_SYMBOL_NS_GPL(devm_cxl_add_nvdimm, CXL);
diff --git a/drivers/cxl/core/regs.c b/drivers/cxl/core/regs.c
index 41de4a136ecd..e37e23bf4355 100644
--- a/drivers/cxl/core/regs.c
+++ b/drivers/cxl/core/regs.c
@@ -90,7 +90,7 @@ void cxl_probe_component_regs(struct device *dev, void __iomem *base,
}
}
}
-EXPORT_SYMBOL_GPL(cxl_probe_component_regs);
+EXPORT_SYMBOL_NS_GPL(cxl_probe_component_regs, CXL);
/**
* cxl_probe_device_regs() - Detect CXL Device register blocks
@@ -156,7 +156,7 @@ void cxl_probe_device_regs(struct device *dev, void __iomem *base,
}
}
}
-EXPORT_SYMBOL_GPL(cxl_probe_device_regs);
+EXPORT_SYMBOL_NS_GPL(cxl_probe_device_regs, CXL);
static void __iomem *devm_cxl_iomap_block(struct device *dev,
resource_size_t addr,
@@ -199,7 +199,7 @@ int cxl_map_component_regs(struct pci_dev *pdev,
return 0;
}
-EXPORT_SYMBOL_GPL(cxl_map_component_regs);
+EXPORT_SYMBOL_NS_GPL(cxl_map_component_regs, CXL);
int cxl_map_device_regs(struct pci_dev *pdev,
struct cxl_device_regs *regs,
@@ -246,4 +246,4 @@ int cxl_map_device_regs(struct pci_dev *pdev,
return 0;
}
-EXPORT_SYMBOL_GPL(cxl_map_device_regs);
+EXPORT_SYMBOL_NS_GPL(cxl_map_device_regs, CXL);
diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
index 3af704e9b448..a5a0be3f088b 100644
--- a/drivers/cxl/cxl.h
+++ b/drivers/cxl/cxl.h
@@ -191,11 +191,18 @@ struct cxl_decoder {
int interleave_granularity;
enum cxl_decoder_type target_type;
unsigned long flags;
- const int nr_targets;
+ int nr_targets;
struct cxl_dport *target[];
};
+/**
+ * enum cxl_nvdimm_brige_state - state machine for managing bus rescans
+ * @CXL_NVB_NEW: Set at bridge create and after cxl_pmem_wq is destroyed
+ * @CXL_NVB_DEAD: Set at brige unregistration to preclude async probing
+ * @CXL_NVB_ONLINE: Target state after successful ->probe()
+ * @CXL_NVB_OFFLINE: Target state after ->remove() or failed ->probe()
+ */
enum cxl_nvdimm_brige_state {
CXL_NVB_NEW,
CXL_NVB_DEAD,
@@ -308,6 +315,7 @@ struct cxl_nvdimm_bridge *devm_cxl_add_nvdimm_bridge(struct device *host,
struct cxl_port *port);
struct cxl_nvdimm *to_cxl_nvdimm(struct device *dev);
bool is_cxl_nvdimm(struct device *dev);
+bool is_cxl_nvdimm_bridge(struct device *dev);
int devm_cxl_add_nvdimm(struct device *host, struct cxl_memdev *cxlmd);
struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct cxl_nvdimm *cxl_nvd);
diff --git a/drivers/cxl/cxlmem.h b/drivers/cxl/cxlmem.h
index c4f450ad434d..8d96d009ad90 100644
--- a/drivers/cxl/cxlmem.h
+++ b/drivers/cxl/cxlmem.h
@@ -33,13 +33,13 @@
* struct cxl_memdev - CXL bus object representing a Type-3 Memory Device
* @dev: driver core device object
* @cdev: char dev core object for ioctl operations
- * @cxlm: pointer to the parent device driver data
+ * @cxlds: The device state backing this device
* @id: id number of this memdev instance.
*/
struct cxl_memdev {
struct device dev;
struct cdev cdev;
- struct cxl_mem *cxlm;
+ struct cxl_dev_state *cxlds;
int id;
};
@@ -48,7 +48,7 @@ static inline struct cxl_memdev *to_cxl_memdev(struct device *dev)
return container_of(dev, struct cxl_memdev, dev);
}
-struct cxl_memdev *devm_cxl_add_memdev(struct cxl_mem *cxlm);
+struct cxl_memdev *devm_cxl_add_memdev(struct cxl_dev_state *cxlds);
/**
* struct cxl_mbox_cmd - A command to be submitted to hardware.
@@ -90,9 +90,13 @@ struct cxl_mbox_cmd {
#define CXL_CAPACITY_MULTIPLIER SZ_256M
/**
- * struct cxl_mem - A CXL memory device
- * @dev: The device associated with this CXL device.
- * @cxlmd: Logical memory device chardev / interface
+ * struct cxl_dev_state - The driver device state
+ *
+ * cxl_dev_state represents the CXL driver/device state. It provides an
+ * interface to mailbox commands as well as some cached data about the device.
+ * Currently only memory devices are represented.
+ *
+ * @dev: The device associated with this CXL state
* @regs: Parsed register blocks
* @payload_size: Size of space for payload
* (CXL 2.0 8.2.8.4.3 Mailbox Capabilities Register)
@@ -117,9 +121,8 @@ struct cxl_mbox_cmd {
* See section 8.2.9.5.2 Capacity Configuration and Label Storage for
* details on capacity parameters.
*/
-struct cxl_mem {
+struct cxl_dev_state {
struct device *dev;
- struct cxl_memdev *cxlmd;
struct cxl_regs regs;
@@ -142,7 +145,7 @@ struct cxl_mem {
u64 next_volatile_bytes;
u64 next_persistent_bytes;
- int (*mbox_send)(struct cxl_mem *cxlm, struct cxl_mbox_cmd *cmd);
+ int (*mbox_send)(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd);
};
enum cxl_opcode {
@@ -253,12 +256,12 @@ struct cxl_mem_command {
#define CXL_CMD_FLAG_FORCE_ENABLE BIT(0)
};
-int cxl_mem_mbox_send_cmd(struct cxl_mem *cxlm, u16 opcode, void *in,
- size_t in_size, void *out, size_t out_size);
-int cxl_mem_identify(struct cxl_mem *cxlm);
-int cxl_mem_enumerate_cmds(struct cxl_mem *cxlm);
-int cxl_mem_create_range_info(struct cxl_mem *cxlm);
-struct cxl_mem *cxl_mem_create(struct device *dev);
-void set_exclusive_cxl_commands(struct cxl_mem *cxlm, unsigned long *cmds);
-void clear_exclusive_cxl_commands(struct cxl_mem *cxlm, unsigned long *cmds);
+int cxl_mbox_send_cmd(struct cxl_dev_state *cxlds, u16 opcode, void *in,
+ size_t in_size, void *out, size_t out_size);
+int cxl_dev_state_identify(struct cxl_dev_state *cxlds);
+int cxl_enumerate_cmds(struct cxl_dev_state *cxlds);
+int cxl_mem_create_range_info(struct cxl_dev_state *cxlds);
+struct cxl_dev_state *cxl_dev_state_create(struct device *dev);
+void set_exclusive_cxl_commands(struct cxl_dev_state *cxlds, unsigned long *cmds);
+void clear_exclusive_cxl_commands(struct cxl_dev_state *cxlds, unsigned long *cmds);
#endif /* __CXL_MEM_H__ */
diff --git a/drivers/cxl/pci.c b/drivers/cxl/pci.c
index c734e21fb4e0..8dc91fd3396a 100644
--- a/drivers/cxl/pci.c
+++ b/drivers/cxl/pci.c
@@ -28,39 +28,39 @@
* - Registers a CXL mailbox with cxl_core.
*/
-#define cxl_doorbell_busy(cxlm) \
- (readl((cxlm)->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET) & \
+#define cxl_doorbell_busy(cxlds) \
+ (readl((cxlds)->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET) & \
CXLDEV_MBOX_CTRL_DOORBELL)
/* CXL 2.0 - 8.2.8.4 */
#define CXL_MAILBOX_TIMEOUT_MS (2 * HZ)
-static int cxl_pci_mbox_wait_for_doorbell(struct cxl_mem *cxlm)
+static int cxl_pci_mbox_wait_for_doorbell(struct cxl_dev_state *cxlds)
{
const unsigned long start = jiffies;
unsigned long end = start;
- while (cxl_doorbell_busy(cxlm)) {
+ while (cxl_doorbell_busy(cxlds)) {
end = jiffies;
if (time_after(end, start + CXL_MAILBOX_TIMEOUT_MS)) {
/* Check again in case preempted before timeout test */
- if (!cxl_doorbell_busy(cxlm))
+ if (!cxl_doorbell_busy(cxlds))
break;
return -ETIMEDOUT;
}
cpu_relax();
}
- dev_dbg(cxlm->dev, "Doorbell wait took %dms",
+ dev_dbg(cxlds->dev, "Doorbell wait took %dms",
jiffies_to_msecs(end) - jiffies_to_msecs(start));
return 0;
}
-static void cxl_pci_mbox_timeout(struct cxl_mem *cxlm,
+static void cxl_pci_mbox_timeout(struct cxl_dev_state *cxlds,
struct cxl_mbox_cmd *mbox_cmd)
{
- struct device *dev = cxlm->dev;
+ struct device *dev = cxlds->dev;
dev_dbg(dev, "Mailbox command (opcode: %#x size: %zub) timed out\n",
mbox_cmd->opcode, mbox_cmd->size_in);
@@ -68,7 +68,7 @@ static void cxl_pci_mbox_timeout(struct cxl_mem *cxlm,
/**
* __cxl_pci_mbox_send_cmd() - Execute a mailbox command
- * @cxlm: The CXL memory device to communicate with.
+ * @cxlds: The device state to communicate with.
* @mbox_cmd: Command to send to the memory device.
*
* Context: Any context. Expects mbox_mutex to be held.
@@ -88,16 +88,16 @@ static void cxl_pci_mbox_timeout(struct cxl_mem *cxlm,
* not need to coordinate with each other. The driver only uses the primary
* mailbox.
*/
-static int __cxl_pci_mbox_send_cmd(struct cxl_mem *cxlm,
+static int __cxl_pci_mbox_send_cmd(struct cxl_dev_state *cxlds,
struct cxl_mbox_cmd *mbox_cmd)
{
- void __iomem *payload = cxlm->regs.mbox + CXLDEV_MBOX_PAYLOAD_OFFSET;
- struct device *dev = cxlm->dev;
+ void __iomem *payload = cxlds->regs.mbox + CXLDEV_MBOX_PAYLOAD_OFFSET;
+ struct device *dev = cxlds->dev;
u64 cmd_reg, status_reg;
size_t out_len;
int rc;
- lockdep_assert_held(&cxlm->mbox_mutex);
+ lockdep_assert_held(&cxlds->mbox_mutex);
/*
* Here are the steps from 8.2.8.4 of the CXL 2.0 spec.
@@ -117,7 +117,7 @@ static int __cxl_pci_mbox_send_cmd(struct cxl_mem *cxlm,
*/
/* #1 */
- if (cxl_doorbell_busy(cxlm)) {
+ if (cxl_doorbell_busy(cxlds)) {
dev_err_ratelimited(dev, "Mailbox re-busy after acquiring\n");
return -EBUSY;
}
@@ -134,22 +134,22 @@ static int __cxl_pci_mbox_send_cmd(struct cxl_mem *cxlm,
}
/* #2, #3 */
- writeq(cmd_reg, cxlm->regs.mbox + CXLDEV_MBOX_CMD_OFFSET);
+ writeq(cmd_reg, cxlds->regs.mbox + CXLDEV_MBOX_CMD_OFFSET);
/* #4 */
dev_dbg(dev, "Sending command\n");
writel(CXLDEV_MBOX_CTRL_DOORBELL,
- cxlm->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET);
+ cxlds->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET);
/* #5 */
- rc = cxl_pci_mbox_wait_for_doorbell(cxlm);
+ rc = cxl_pci_mbox_wait_for_doorbell(cxlds);
if (rc == -ETIMEDOUT) {
- cxl_pci_mbox_timeout(cxlm, mbox_cmd);
+ cxl_pci_mbox_timeout(cxlds, mbox_cmd);
return rc;
}
/* #6 */
- status_reg = readq(cxlm->regs.mbox + CXLDEV_MBOX_STATUS_OFFSET);
+ status_reg = readq(cxlds->regs.mbox + CXLDEV_MBOX_STATUS_OFFSET);
mbox_cmd->return_code =
FIELD_GET(CXLDEV_MBOX_STATUS_RET_CODE_MASK, status_reg);
@@ -159,7 +159,7 @@ static int __cxl_pci_mbox_send_cmd(struct cxl_mem *cxlm,
}
/* #7 */
- cmd_reg = readq(cxlm->regs.mbox + CXLDEV_MBOX_CMD_OFFSET);
+ cmd_reg = readq(cxlds->regs.mbox + CXLDEV_MBOX_CMD_OFFSET);
out_len = FIELD_GET(CXLDEV_MBOX_CMD_PAYLOAD_LENGTH_MASK, cmd_reg);
/* #8 */
@@ -171,7 +171,7 @@ static int __cxl_pci_mbox_send_cmd(struct cxl_mem *cxlm,
* have requested less data than the hardware supplied even
* within spec.
*/
- size_t n = min3(mbox_cmd->size_out, cxlm->payload_size, out_len);
+ size_t n = min3(mbox_cmd->size_out, cxlds->payload_size, out_len);
memcpy_fromio(mbox_cmd->payload_out, payload, n);
mbox_cmd->size_out = n;
@@ -184,18 +184,18 @@ static int __cxl_pci_mbox_send_cmd(struct cxl_mem *cxlm,
/**
* cxl_pci_mbox_get() - Acquire exclusive access to the mailbox.
- * @cxlm: The memory device to gain access to.
+ * @cxlds: The device state to gain access to.
*
* Context: Any context. Takes the mbox_mutex.
* Return: 0 if exclusive access was acquired.
*/
-static int cxl_pci_mbox_get(struct cxl_mem *cxlm)
+static int cxl_pci_mbox_get(struct cxl_dev_state *cxlds)
{
- struct device *dev = cxlm->dev;
+ struct device *dev = cxlds->dev;
u64 md_status;
int rc;
- mutex_lock_io(&cxlm->mbox_mutex);
+ mutex_lock_io(&cxlds->mbox_mutex);
/*
* XXX: There is some amount of ambiguity in the 2.0 version of the spec
@@ -214,13 +214,13 @@ static int cxl_pci_mbox_get(struct cxl_mem *cxlm)
* Mailbox Interface Ready bit. Therefore, waiting for the doorbell
* to be ready is sufficient.
*/
- rc = cxl_pci_mbox_wait_for_doorbell(cxlm);
+ rc = cxl_pci_mbox_wait_for_doorbell(cxlds);
if (rc) {
dev_warn(dev, "Mailbox interface not ready\n");
goto out;
}
- md_status = readq(cxlm->regs.memdev + CXLMDEV_STATUS_OFFSET);
+ md_status = readq(cxlds->regs.memdev + CXLMDEV_STATUS_OFFSET);
if (!(md_status & CXLMDEV_MBOX_IF_READY && CXLMDEV_READY(md_status))) {
dev_err(dev, "mbox: reported doorbell ready, but not mbox ready\n");
rc = -EBUSY;
@@ -249,41 +249,41 @@ static int cxl_pci_mbox_get(struct cxl_mem *cxlm)
return 0;
out:
- mutex_unlock(&cxlm->mbox_mutex);
+ mutex_unlock(&cxlds->mbox_mutex);
return rc;
}
/**
* cxl_pci_mbox_put() - Release exclusive access to the mailbox.
- * @cxlm: The CXL memory device to communicate with.
+ * @cxlds: The device state to communicate with.
*
* Context: Any context. Expects mbox_mutex to be held.
*/
-static void cxl_pci_mbox_put(struct cxl_mem *cxlm)
+static void cxl_pci_mbox_put(struct cxl_dev_state *cxlds)
{
- mutex_unlock(&cxlm->mbox_mutex);
+ mutex_unlock(&cxlds->mbox_mutex);
}
-static int cxl_pci_mbox_send(struct cxl_mem *cxlm, struct cxl_mbox_cmd *cmd)
+static int cxl_pci_mbox_send(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd)
{
int rc;
- rc = cxl_pci_mbox_get(cxlm);
+ rc = cxl_pci_mbox_get(cxlds);
if (rc)
return rc;
- rc = __cxl_pci_mbox_send_cmd(cxlm, cmd);
- cxl_pci_mbox_put(cxlm);
+ rc = __cxl_pci_mbox_send_cmd(cxlds, cmd);
+ cxl_pci_mbox_put(cxlds);
return rc;
}
-static int cxl_pci_setup_mailbox(struct cxl_mem *cxlm)
+static int cxl_pci_setup_mailbox(struct cxl_dev_state *cxlds)
{
- const int cap = readl(cxlm->regs.mbox + CXLDEV_MBOX_CAPS_OFFSET);
+ const int cap = readl(cxlds->regs.mbox + CXLDEV_MBOX_CAPS_OFFSET);
- cxlm->mbox_send = cxl_pci_mbox_send;
- cxlm->payload_size =
+ cxlds->mbox_send = cxl_pci_mbox_send;
+ cxlds->payload_size =
1 << FIELD_GET(CXLDEV_MBOX_CAP_PAYLOAD_SIZE_MASK, cap);
/*
@@ -293,15 +293,15 @@ static int cxl_pci_setup_mailbox(struct cxl_mem *cxlm)
* there's no point in going forward. If the size is too large, there's
* no harm is soft limiting it.
*/
- cxlm->payload_size = min_t(size_t, cxlm->payload_size, SZ_1M);
- if (cxlm->payload_size < 256) {
- dev_err(cxlm->dev, "Mailbox is too small (%zub)",
- cxlm->payload_size);
+ cxlds->payload_size = min_t(size_t, cxlds->payload_size, SZ_1M);
+ if (cxlds->payload_size < 256) {
+ dev_err(cxlds->dev, "Mailbox is too small (%zub)",
+ cxlds->payload_size);
return -ENXIO;
}
- dev_dbg(cxlm->dev, "Mailbox payload sized %zu",
- cxlm->payload_size);
+ dev_dbg(cxlds->dev, "Mailbox payload sized %zu",
+ cxlds->payload_size);
return 0;
}
@@ -379,18 +379,18 @@ static int cxl_probe_regs(struct pci_dev *pdev, struct cxl_register_map *map)
return 0;
}
-static int cxl_map_regs(struct cxl_mem *cxlm, struct cxl_register_map *map)
+static int cxl_map_regs(struct cxl_dev_state *cxlds, struct cxl_register_map *map)
{
- struct device *dev = cxlm->dev;
+ struct device *dev = cxlds->dev;
struct pci_dev *pdev = to_pci_dev(dev);
switch (map->reg_type) {
case CXL_REGLOC_RBI_COMPONENT:
- cxl_map_component_regs(pdev, &cxlm->regs.component, map);
+ cxl_map_component_regs(pdev, &cxlds->regs.component, map);
dev_dbg(dev, "Mapping component registers...\n");
break;
case CXL_REGLOC_RBI_MEMDEV:
- cxl_map_device_regs(pdev, &cxlm->regs.device_regs, map);
+ cxl_map_device_regs(pdev, &cxlds->regs.device_regs, map);
dev_dbg(dev, "Probing device registers...\n");
break;
default:
@@ -475,7 +475,7 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct cxl_register_map map;
struct cxl_memdev *cxlmd;
- struct cxl_mem *cxlm;
+ struct cxl_dev_state *cxlds;
int rc;
/*
@@ -489,39 +489,39 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (rc)
return rc;
- cxlm = cxl_mem_create(&pdev->dev);
- if (IS_ERR(cxlm))
- return PTR_ERR(cxlm);
+ cxlds = cxl_dev_state_create(&pdev->dev);
+ if (IS_ERR(cxlds))
+ return PTR_ERR(cxlds);
rc = cxl_setup_regs(pdev, CXL_REGLOC_RBI_MEMDEV, &map);
if (rc)
return rc;
- rc = cxl_map_regs(cxlm, &map);
+ rc = cxl_map_regs(cxlds, &map);
if (rc)
return rc;
- rc = cxl_pci_setup_mailbox(cxlm);
+ rc = cxl_pci_setup_mailbox(cxlds);
if (rc)
return rc;
- rc = cxl_mem_enumerate_cmds(cxlm);
+ rc = cxl_enumerate_cmds(cxlds);
if (rc)
return rc;
- rc = cxl_mem_identify(cxlm);
+ rc = cxl_dev_state_identify(cxlds);
if (rc)
return rc;
- rc = cxl_mem_create_range_info(cxlm);
+ rc = cxl_mem_create_range_info(cxlds);
if (rc)
return rc;
- cxlmd = devm_cxl_add_memdev(cxlm);
+ cxlmd = devm_cxl_add_memdev(cxlds);
if (IS_ERR(cxlmd))
return PTR_ERR(cxlmd);
- if (range_len(&cxlm->pmem_range) && IS_ENABLED(CONFIG_CXL_PMEM))
+ if (range_len(&cxlds->pmem_range) && IS_ENABLED(CONFIG_CXL_PMEM))
rc = devm_cxl_add_nvdimm(&pdev->dev, cxlmd);
return rc;
diff --git a/drivers/cxl/pmem.c b/drivers/cxl/pmem.c
index ceb2115981e5..b65a272a2d6d 100644
--- a/drivers/cxl/pmem.c
+++ b/drivers/cxl/pmem.c
@@ -19,9 +19,9 @@ static struct workqueue_struct *cxl_pmem_wq;
static __read_mostly DECLARE_BITMAP(exclusive_cmds, CXL_MEM_COMMAND_ID_MAX);
-static void clear_exclusive(void *cxlm)
+static void clear_exclusive(void *cxlds)
{
- clear_exclusive_cxl_commands(cxlm, exclusive_cmds);
+ clear_exclusive_cxl_commands(cxlds, exclusive_cmds);
}
static void unregister_nvdimm(void *nvdimm)
@@ -34,7 +34,7 @@ static int cxl_nvdimm_probe(struct device *dev)
struct cxl_nvdimm *cxl_nvd = to_cxl_nvdimm(dev);
struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
unsigned long flags = 0, cmd_mask = 0;
- struct cxl_mem *cxlm = cxlmd->cxlm;
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
struct cxl_nvdimm_bridge *cxl_nvb;
struct nvdimm *nvdimm;
int rc;
@@ -49,8 +49,8 @@ static int cxl_nvdimm_probe(struct device *dev)
goto out;
}
- set_exclusive_cxl_commands(cxlm, exclusive_cmds);
- rc = devm_add_action_or_reset(dev, clear_exclusive, cxlm);
+ set_exclusive_cxl_commands(cxlds, exclusive_cmds);
+ rc = devm_add_action_or_reset(dev, clear_exclusive, cxlds);
if (rc)
goto out;
@@ -80,7 +80,7 @@ static struct cxl_driver cxl_nvdimm_driver = {
.id = CXL_DEVICE_NVDIMM,
};
-static int cxl_pmem_get_config_size(struct cxl_mem *cxlm,
+static int cxl_pmem_get_config_size(struct cxl_dev_state *cxlds,
struct nd_cmd_get_config_size *cmd,
unsigned int buf_len)
{
@@ -88,14 +88,14 @@ static int cxl_pmem_get_config_size(struct cxl_mem *cxlm,
return -EINVAL;
*cmd = (struct nd_cmd_get_config_size) {
- .config_size = cxlm->lsa_size,
- .max_xfer = cxlm->payload_size,
+ .config_size = cxlds->lsa_size,
+ .max_xfer = cxlds->payload_size,
};
return 0;
}
-static int cxl_pmem_get_config_data(struct cxl_mem *cxlm,
+static int cxl_pmem_get_config_data(struct cxl_dev_state *cxlds,
struct nd_cmd_get_config_data_hdr *cmd,
unsigned int buf_len)
{
@@ -112,15 +112,14 @@ static int cxl_pmem_get_config_data(struct cxl_mem *cxlm,
.length = cmd->in_length,
};
- rc = cxl_mem_mbox_send_cmd(cxlm, CXL_MBOX_OP_GET_LSA, &get_lsa,
- sizeof(get_lsa), cmd->out_buf,
- cmd->in_length);
+ rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_GET_LSA, &get_lsa,
+ sizeof(get_lsa), cmd->out_buf, cmd->in_length);
cmd->status = 0;
return rc;
}
-static int cxl_pmem_set_config_data(struct cxl_mem *cxlm,
+static int cxl_pmem_set_config_data(struct cxl_dev_state *cxlds,
struct nd_cmd_set_config_hdr *cmd,
unsigned int buf_len)
{
@@ -144,9 +143,9 @@ static int cxl_pmem_set_config_data(struct cxl_mem *cxlm,
};
memcpy(set_lsa->data, cmd->in_buf, cmd->in_length);
- rc = cxl_mem_mbox_send_cmd(cxlm, CXL_MBOX_OP_SET_LSA, set_lsa,
- struct_size(set_lsa, data, cmd->in_length),
- NULL, 0);
+ rc = cxl_mbox_send_cmd(cxlds, CXL_MBOX_OP_SET_LSA, set_lsa,
+ struct_size(set_lsa, data, cmd->in_length),
+ NULL, 0);
/*
* Set "firmware" status (4-packed bytes at the end of the input
@@ -164,18 +163,18 @@ static int cxl_pmem_nvdimm_ctl(struct nvdimm *nvdimm, unsigned int cmd,
struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm);
unsigned long cmd_mask = nvdimm_cmd_mask(nvdimm);
struct cxl_memdev *cxlmd = cxl_nvd->cxlmd;
- struct cxl_mem *cxlm = cxlmd->cxlm;
+ struct cxl_dev_state *cxlds = cxlmd->cxlds;
if (!test_bit(cmd, &cmd_mask))
return -ENOTTY;
switch (cmd) {
case ND_CMD_GET_CONFIG_SIZE:
- return cxl_pmem_get_config_size(cxlm, buf, buf_len);
+ return cxl_pmem_get_config_size(cxlds, buf, buf_len);
case ND_CMD_GET_CONFIG_DATA:
- return cxl_pmem_get_config_data(cxlm, buf, buf_len);
+ return cxl_pmem_get_config_data(cxlds, buf, buf_len);
case ND_CMD_SET_CONFIG_DATA:
- return cxl_pmem_set_config_data(cxlm, buf, buf_len);
+ return cxl_pmem_set_config_data(cxlds, buf, buf_len);
default:
return -ENOTTY;
}
@@ -266,14 +265,24 @@ static void cxl_nvb_update_state(struct work_struct *work)
put_device(&cxl_nvb->dev);
}
+static void cxl_nvdimm_bridge_state_work(struct cxl_nvdimm_bridge *cxl_nvb)
+{
+ /*
+ * Take a reference that the workqueue will drop if new work
+ * gets queued.
+ */
+ get_device(&cxl_nvb->dev);
+ if (!queue_work(cxl_pmem_wq, &cxl_nvb->state_work))
+ put_device(&cxl_nvb->dev);
+}
+
static void cxl_nvdimm_bridge_remove(struct device *dev)
{
struct cxl_nvdimm_bridge *cxl_nvb = to_cxl_nvdimm_bridge(dev);
if (cxl_nvb->state == CXL_NVB_ONLINE)
cxl_nvb->state = CXL_NVB_OFFLINE;
- if (queue_work(cxl_pmem_wq, &cxl_nvb->state_work))
- get_device(&cxl_nvb->dev);
+ cxl_nvdimm_bridge_state_work(cxl_nvb);
}
static int cxl_nvdimm_bridge_probe(struct device *dev)
@@ -294,8 +303,7 @@ static int cxl_nvdimm_bridge_probe(struct device *dev)
}
cxl_nvb->state = CXL_NVB_ONLINE;
- if (queue_work(cxl_pmem_wq, &cxl_nvb->state_work))
- get_device(&cxl_nvb->dev);
+ cxl_nvdimm_bridge_state_work(cxl_nvb);
return 0;
}
@@ -307,6 +315,31 @@ static struct cxl_driver cxl_nvdimm_bridge_driver = {
.id = CXL_DEVICE_NVDIMM_BRIDGE,
};
+/*
+ * Return all bridges to the CXL_NVB_NEW state to invalidate any
+ * ->state_work referring to the now destroyed cxl_pmem_wq.
+ */
+static int cxl_nvdimm_bridge_reset(struct device *dev, void *data)
+{
+ struct cxl_nvdimm_bridge *cxl_nvb;
+
+ if (!is_cxl_nvdimm_bridge(dev))
+ return 0;
+
+ cxl_nvb = to_cxl_nvdimm_bridge(dev);
+ device_lock(dev);
+ cxl_nvb->state = CXL_NVB_NEW;
+ device_unlock(dev);
+
+ return 0;
+}
+
+static void destroy_cxl_pmem_wq(void)
+{
+ destroy_workqueue(cxl_pmem_wq);
+ bus_for_each_dev(&cxl_bus_type, NULL, NULL, cxl_nvdimm_bridge_reset);
+}
+
static __init int cxl_pmem_init(void)
{
int rc;
@@ -332,7 +365,7 @@ static __init int cxl_pmem_init(void)
err_nvdimm:
cxl_driver_unregister(&cxl_nvdimm_bridge_driver);
err_bridge:
- destroy_workqueue(cxl_pmem_wq);
+ destroy_cxl_pmem_wq();
return rc;
}
@@ -340,7 +373,7 @@ static __exit void cxl_pmem_exit(void)
{
cxl_driver_unregister(&cxl_nvdimm_driver);
cxl_driver_unregister(&cxl_nvdimm_bridge_driver);
- destroy_workqueue(cxl_pmem_wq);
+ destroy_cxl_pmem_wq();
}
MODULE_LICENSE("GPL v2");
diff --git a/drivers/dax/Kconfig b/drivers/dax/Kconfig
index d2834c2cfa10..5fdf269a822e 100644
--- a/drivers/dax/Kconfig
+++ b/drivers/dax/Kconfig
@@ -1,8 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
-config DAX_DRIVER
- select DAX
- bool
-
menuconfig DAX
tristate "DAX: direct access to differentiated memory"
select SRCU
@@ -70,13 +66,4 @@ config DEV_DAX_KMEM
Say N if unsure.
-config DEV_DAX_PMEM_COMPAT
- tristate "PMEM DAX: support the deprecated /sys/class/dax interface"
- depends on m && DEV_DAX_PMEM=m
- default DEV_DAX_PMEM
- help
- Older versions of the libdaxctl library expect to find all
- device-dax instances under /sys/class/dax. If libdaxctl in
- your distribution is older than v58 say M, otherwise say N.
-
endif
diff --git a/drivers/dax/Makefile b/drivers/dax/Makefile
index 9d4ba672d305..90a56ca3b345 100644
--- a/drivers/dax/Makefile
+++ b/drivers/dax/Makefile
@@ -2,10 +2,11 @@
obj-$(CONFIG_DAX) += dax.o
obj-$(CONFIG_DEV_DAX) += device_dax.o
obj-$(CONFIG_DEV_DAX_KMEM) += kmem.o
+obj-$(CONFIG_DEV_DAX_PMEM) += dax_pmem.o
dax-y := super.o
dax-y += bus.o
device_dax-y := device.o
+dax_pmem-y := pmem.o
-obj-y += pmem/
obj-y += hmem/
diff --git a/drivers/dax/bus.c b/drivers/dax/bus.c
index 6cc4da4c713d..1dad813ee4a6 100644
--- a/drivers/dax/bus.c
+++ b/drivers/dax/bus.c
@@ -10,8 +10,6 @@
#include "dax-private.h"
#include "bus.h"
-static struct class *dax_class;
-
static DEFINE_MUTEX(dax_bus_lock);
#define DAX_NAME_LEN 30
@@ -129,11 +127,35 @@ ATTRIBUTE_GROUPS(dax_drv);
static int dax_bus_match(struct device *dev, struct device_driver *drv);
+/*
+ * Static dax regions are regions created by an external subsystem
+ * nvdimm where a single range is assigned. Its boundaries are by the external
+ * subsystem and are usually limited to one physical memory range. For example,
+ * for PMEM it is usually defined by NVDIMM Namespace boundaries (i.e. a
+ * single contiguous range)
+ *
+ * On dynamic dax regions, the assigned region can be partitioned by dax core
+ * into multiple subdivisions. A subdivision is represented into one
+ * /dev/daxN.M device composed by one or more potentially discontiguous ranges.
+ *
+ * When allocating a dax region, drivers must set whether it's static
+ * (IORESOURCE_DAX_STATIC). On static dax devices, the @pgmap is pre-assigned
+ * to dax core when calling devm_create_dev_dax(), whereas in dynamic dax
+ * devices it is NULL but afterwards allocated by dax core on device ->probe().
+ * Care is needed to make sure that dynamic dax devices are torn down with a
+ * cleared @pgmap field (see kill_dev_dax()).
+ */
static bool is_static(struct dax_region *dax_region)
{
return (dax_region->res.flags & IORESOURCE_DAX_STATIC) != 0;
}
+bool static_dev_dax(struct dev_dax *dev_dax)
+{
+ return is_static(dev_dax->region);
+}
+EXPORT_SYMBOL_GPL(static_dev_dax);
+
static u64 dev_dax_size(struct dev_dax *dev_dax)
{
u64 size = 0;
@@ -363,6 +385,14 @@ void kill_dev_dax(struct dev_dax *dev_dax)
kill_dax(dax_dev);
unmap_mapping_range(inode->i_mapping, 0, 0, 1);
+
+ /*
+ * Dynamic dax region have the pgmap allocated via dev_kzalloc()
+ * and thus freed by devm. Clear the pgmap to not have stale pgmap
+ * ranges on probe() from previous reconfigurations of region devices.
+ */
+ if (!static_dev_dax(dev_dax))
+ dev_dax->pgmap = NULL;
}
EXPORT_SYMBOL_GPL(kill_dev_dax);
@@ -1323,14 +1353,17 @@ struct dev_dax *devm_create_dev_dax(struct dev_dax_data *data)
}
/*
- * No 'host' or dax_operations since there is no access to this
- * device outside of mmap of the resulting character device.
+ * No dax_operations since there is no access to this device outside of
+ * mmap of the resulting character device.
*/
- dax_dev = alloc_dax(dev_dax, NULL, NULL, DAXDEV_F_SYNC);
+ dax_dev = alloc_dax(dev_dax, NULL);
if (IS_ERR(dax_dev)) {
rc = PTR_ERR(dax_dev);
goto err_alloc_dax;
}
+ set_dax_synchronous(dax_dev);
+ set_dax_nocache(dax_dev);
+ set_dax_nomc(dax_dev);
/* a device_dax instance is dead while the driver is not attached */
kill_dax(dax_dev);
@@ -1343,10 +1376,7 @@ struct dev_dax *devm_create_dev_dax(struct dev_dax_data *data)
inode = dax_inode(dax_dev);
dev->devt = inode->i_rdev;
- if (data->subsys == DEV_DAX_BUS)
- dev->bus = &dax_bus_type;
- else
- dev->class = dax_class;
+ dev->bus = &dax_bus_type;
dev->parent = parent;
dev->type = &dev_dax_type;
@@ -1445,22 +1475,10 @@ EXPORT_SYMBOL_GPL(dax_driver_unregister);
int __init dax_bus_init(void)
{
- int rc;
-
- if (IS_ENABLED(CONFIG_DEV_DAX_PMEM_COMPAT)) {
- dax_class = class_create(THIS_MODULE, "dax");
- if (IS_ERR(dax_class))
- return PTR_ERR(dax_class);
- }
-
- rc = bus_register(&dax_bus_type);
- if (rc)
- class_destroy(dax_class);
- return rc;
+ return bus_register(&dax_bus_type);
}
void __exit dax_bus_exit(void)
{
bus_unregister(&dax_bus_type);
- class_destroy(dax_class);
}
diff --git a/drivers/dax/bus.h b/drivers/dax/bus.h
index 1e946ad7780a..fbb940293d6d 100644
--- a/drivers/dax/bus.h
+++ b/drivers/dax/bus.h
@@ -16,24 +16,15 @@ struct dax_region *alloc_dax_region(struct device *parent, int region_id,
struct range *range, int target_node, unsigned int align,
unsigned long flags);
-enum dev_dax_subsys {
- DEV_DAX_BUS = 0, /* zeroed dev_dax_data picks this by default */
- DEV_DAX_CLASS,
-};
-
struct dev_dax_data {
struct dax_region *dax_region;
struct dev_pagemap *pgmap;
- enum dev_dax_subsys subsys;
resource_size_t size;
int id;
};
struct dev_dax *devm_create_dev_dax(struct dev_dax_data *data);
-/* to be deleted when DEV_DAX_CLASS is removed */
-struct dev_dax *__dax_pmem_probe(struct device *dev, enum dev_dax_subsys subsys);
-
struct dax_device_driver {
struct device_driver drv;
struct list_head ids;
@@ -48,10 +39,7 @@ int __dax_driver_register(struct dax_device_driver *dax_drv,
__dax_driver_register(driver, THIS_MODULE, KBUILD_MODNAME)
void dax_driver_unregister(struct dax_device_driver *dax_drv);
void kill_dev_dax(struct dev_dax *dev_dax);
-
-#if IS_ENABLED(CONFIG_DEV_DAX_PMEM_COMPAT)
-int dev_dax_probe(struct dev_dax *dev_dax);
-#endif
+bool static_dev_dax(struct dev_dax *dev_dax);
/*
* While run_dax() is potentially a generic operation that could be
diff --git a/drivers/dax/device.c b/drivers/dax/device.c
index dd8222a42808..d33a0613ed0c 100644
--- a/drivers/dax/device.c
+++ b/drivers/dax/device.c
@@ -73,11 +73,39 @@ __weak phys_addr_t dax_pgoff_to_phys(struct dev_dax *dev_dax, pgoff_t pgoff,
return -1;
}
+static void dax_set_mapping(struct vm_fault *vmf, pfn_t pfn,
+ unsigned long fault_size)
+{
+ unsigned long i, nr_pages = fault_size / PAGE_SIZE;
+ struct file *filp = vmf->vma->vm_file;
+ struct dev_dax *dev_dax = filp->private_data;
+ pgoff_t pgoff;
+
+ /* mapping is only set on the head */
+ if (dev_dax->pgmap->vmemmap_shift)
+ nr_pages = 1;
+
+ pgoff = linear_page_index(vmf->vma,
+ ALIGN(vmf->address, fault_size));
+
+ for (i = 0; i < nr_pages; i++) {
+ struct page *page = pfn_to_page(pfn_t_to_pfn(pfn) + i);
+
+ page = compound_head(page);
+ if (page->mapping)
+ continue;
+
+ page->mapping = filp->f_mapping;
+ page->index = pgoff + i;
+ }
+}
+
static vm_fault_t __dev_dax_pte_fault(struct dev_dax *dev_dax,
- struct vm_fault *vmf, pfn_t *pfn)
+ struct vm_fault *vmf)
{
struct device *dev = &dev_dax->dev;
phys_addr_t phys;
+ pfn_t pfn;
unsigned int fault_size = PAGE_SIZE;
if (check_vma(dev_dax, vmf->vma, __func__))
@@ -98,18 +126,21 @@ static vm_fault_t __dev_dax_pte_fault(struct dev_dax *dev_dax,
return VM_FAULT_SIGBUS;
}
- *pfn = phys_to_pfn_t(phys, PFN_DEV|PFN_MAP);
+ pfn = phys_to_pfn_t(phys, PFN_DEV|PFN_MAP);
- return vmf_insert_mixed(vmf->vma, vmf->address, *pfn);
+ dax_set_mapping(vmf, pfn, fault_size);
+
+ return vmf_insert_mixed(vmf->vma, vmf->address, pfn);
}
static vm_fault_t __dev_dax_pmd_fault(struct dev_dax *dev_dax,
- struct vm_fault *vmf, pfn_t *pfn)
+ struct vm_fault *vmf)
{
unsigned long pmd_addr = vmf->address & PMD_MASK;
struct device *dev = &dev_dax->dev;
phys_addr_t phys;
pgoff_t pgoff;
+ pfn_t pfn;
unsigned int fault_size = PMD_SIZE;
if (check_vma(dev_dax, vmf->vma, __func__))
@@ -138,19 +169,22 @@ static vm_fault_t __dev_dax_pmd_fault(struct dev_dax *dev_dax,
return VM_FAULT_SIGBUS;
}
- *pfn = phys_to_pfn_t(phys, PFN_DEV|PFN_MAP);
+ pfn = phys_to_pfn_t(phys, PFN_DEV|PFN_MAP);
- return vmf_insert_pfn_pmd(vmf, *pfn, vmf->flags & FAULT_FLAG_WRITE);
+ dax_set_mapping(vmf, pfn, fault_size);
+
+ return vmf_insert_pfn_pmd(vmf, pfn, vmf->flags & FAULT_FLAG_WRITE);
}
#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax,
- struct vm_fault *vmf, pfn_t *pfn)
+ struct vm_fault *vmf)
{
unsigned long pud_addr = vmf->address & PUD_MASK;
struct device *dev = &dev_dax->dev;
phys_addr_t phys;
pgoff_t pgoff;
+ pfn_t pfn;
unsigned int fault_size = PUD_SIZE;
@@ -180,13 +214,15 @@ static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax,
return VM_FAULT_SIGBUS;
}
- *pfn = phys_to_pfn_t(phys, PFN_DEV|PFN_MAP);
+ pfn = phys_to_pfn_t(phys, PFN_DEV|PFN_MAP);
- return vmf_insert_pfn_pud(vmf, *pfn, vmf->flags & FAULT_FLAG_WRITE);
+ dax_set_mapping(vmf, pfn, fault_size);
+
+ return vmf_insert_pfn_pud(vmf, pfn, vmf->flags & FAULT_FLAG_WRITE);
}
#else
static vm_fault_t __dev_dax_pud_fault(struct dev_dax *dev_dax,
- struct vm_fault *vmf, pfn_t *pfn)
+ struct vm_fault *vmf)
{
return VM_FAULT_FALLBACK;
}
@@ -196,10 +232,8 @@ static vm_fault_t dev_dax_huge_fault(struct vm_fault *vmf,
enum page_entry_size pe_size)
{
struct file *filp = vmf->vma->vm_file;
- unsigned long fault_size;
vm_fault_t rc = VM_FAULT_SIGBUS;
int id;
- pfn_t pfn;
struct dev_dax *dev_dax = filp->private_data;
dev_dbg(&dev_dax->dev, "%s: %s (%#lx - %#lx) size = %d\n", current->comm,
@@ -209,43 +243,18 @@ static vm_fault_t dev_dax_huge_fault(struct vm_fault *vmf,
id = dax_read_lock();
switch (pe_size) {
case PE_SIZE_PTE:
- fault_size = PAGE_SIZE;
- rc = __dev_dax_pte_fault(dev_dax, vmf, &pfn);
+ rc = __dev_dax_pte_fault(dev_dax, vmf);
break;
case PE_SIZE_PMD:
- fault_size = PMD_SIZE;
- rc = __dev_dax_pmd_fault(dev_dax, vmf, &pfn);
+ rc = __dev_dax_pmd_fault(dev_dax, vmf);
break;
case PE_SIZE_PUD:
- fault_size = PUD_SIZE;
- rc = __dev_dax_pud_fault(dev_dax, vmf, &pfn);
+ rc = __dev_dax_pud_fault(dev_dax, vmf);
break;
default:
rc = VM_FAULT_SIGBUS;
}
- if (rc == VM_FAULT_NOPAGE) {
- unsigned long i;
- pgoff_t pgoff;
-
- /*
- * In the device-dax case the only possibility for a
- * VM_FAULT_NOPAGE result is when device-dax capacity is
- * mapped. No need to consider the zero page, or racing
- * conflicting mappings.
- */
- pgoff = linear_page_index(vmf->vma, vmf->address
- & ~(fault_size - 1));
- for (i = 0; i < fault_size / PAGE_SIZE; i++) {
- struct page *page;
-
- page = pfn_to_page(pfn_t_to_pfn(pfn) + i);
- if (page->mapping)
- continue;
- page->mapping = filp->f_mapping;
- page->index = pgoff + i;
- }
- }
dax_read_unlock(id);
return rc;
@@ -398,17 +407,34 @@ int dev_dax_probe(struct dev_dax *dev_dax)
void *addr;
int rc, i;
- pgmap = dev_dax->pgmap;
- if (dev_WARN_ONCE(dev, pgmap && dev_dax->nr_range > 1,
- "static pgmap / multi-range device conflict\n"))
- return -EINVAL;
+ if (static_dev_dax(dev_dax)) {
+ if (dev_dax->nr_range > 1) {
+ dev_warn(dev,
+ "static pgmap / multi-range device conflict\n");
+ return -EINVAL;
+ }
- if (!pgmap) {
- pgmap = devm_kzalloc(dev, sizeof(*pgmap) + sizeof(struct range)
- * (dev_dax->nr_range - 1), GFP_KERNEL);
+ pgmap = dev_dax->pgmap;
+ } else {
+ if (dev_dax->pgmap) {
+ dev_warn(dev,
+ "dynamic-dax with pre-populated page map\n");
+ return -EINVAL;
+ }
+
+ pgmap = devm_kzalloc(dev,
+ struct_size(pgmap, ranges, dev_dax->nr_range - 1),
+ GFP_KERNEL);
if (!pgmap)
return -ENOMEM;
+
pgmap->nr_range = dev_dax->nr_range;
+ dev_dax->pgmap = pgmap;
+
+ for (i = 0; i < dev_dax->nr_range; i++) {
+ struct range *range = &dev_dax->ranges[i].range;
+ pgmap->ranges[i] = *range;
+ }
}
for (i = 0; i < dev_dax->nr_range; i++) {
@@ -420,12 +446,12 @@ int dev_dax_probe(struct dev_dax *dev_dax)
i, range->start, range->end);
return -EBUSY;
}
- /* don't update the range for static pgmap */
- if (!dev_dax->pgmap)
- pgmap->ranges[i] = *range;
}
pgmap->type = MEMORY_DEVICE_GENERIC;
+ if (dev_dax->align > PAGE_SIZE)
+ pgmap->vmemmap_shift =
+ order_base_2(dev_dax->align >> PAGE_SHIFT);
addr = devm_memremap_pages(dev, pgmap);
if (IS_ERR(addr))
return PTR_ERR(addr);
@@ -433,11 +459,7 @@ int dev_dax_probe(struct dev_dax *dev_dax)
inode = dax_inode(dax_dev);
cdev = inode->i_cdev;
cdev_init(cdev, &dax_fops);
- if (dev->class) {
- /* for the CONFIG_DEV_DAX_PMEM_COMPAT case */
- cdev->owner = dev->parent->driver->owner;
- } else
- cdev->owner = dev->driver->owner;
+ cdev->owner = dev->driver->owner;
cdev_set_parent(cdev, &dev->kobj);
rc = cdev_add(cdev, dev->devt, 1);
if (rc)
diff --git a/drivers/dax/pmem/core.c b/drivers/dax/pmem.c
index 062e8bc14223..f050ea78bb83 100644
--- a/drivers/dax/pmem/core.c
+++ b/drivers/dax/pmem.c
@@ -3,11 +3,11 @@
#include <linux/memremap.h>
#include <linux/module.h>
#include <linux/pfn_t.h>
-#include "../../nvdimm/pfn.h"
-#include "../../nvdimm/nd.h"
-#include "../bus.h"
+#include "../nvdimm/pfn.h"
+#include "../nvdimm/nd.h"
+#include "bus.h"
-struct dev_dax *__dax_pmem_probe(struct device *dev, enum dev_dax_subsys subsys)
+static struct dev_dax *__dax_pmem_probe(struct device *dev)
{
struct range range;
int rc, id, region_id;
@@ -63,7 +63,6 @@ struct dev_dax *__dax_pmem_probe(struct device *dev, enum dev_dax_subsys subsys)
.dax_region = dax_region,
.id = id,
.pgmap = &pgmap,
- .subsys = subsys,
.size = range_len(&range),
};
dev_dax = devm_create_dev_dax(&data);
@@ -73,7 +72,32 @@ struct dev_dax *__dax_pmem_probe(struct device *dev, enum dev_dax_subsys subsys)
return dev_dax;
}
-EXPORT_SYMBOL_GPL(__dax_pmem_probe);
+
+static int dax_pmem_probe(struct device *dev)
+{
+ return PTR_ERR_OR_ZERO(__dax_pmem_probe(dev));
+}
+
+static struct nd_device_driver dax_pmem_driver = {
+ .probe = dax_pmem_probe,
+ .drv = {
+ .name = "dax_pmem",
+ },
+ .type = ND_DRIVER_DAX_PMEM,
+};
+
+static int __init dax_pmem_init(void)
+{
+ return nd_driver_register(&dax_pmem_driver);
+}
+module_init(dax_pmem_init);
+
+static void __exit dax_pmem_exit(void)
+{
+ driver_unregister(&dax_pmem_driver.drv);
+}
+module_exit(dax_pmem_exit);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Intel Corporation");
+MODULE_ALIAS_ND_DEVICE(ND_DEVICE_DAX_PMEM);
diff --git a/drivers/dax/pmem/Makefile b/drivers/dax/pmem/Makefile
index 010269f61d41..191c31f0d4f0 100644
--- a/drivers/dax/pmem/Makefile
+++ b/drivers/dax/pmem/Makefile
@@ -1,7 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_DEV_DAX_PMEM) += dax_pmem.o
obj-$(CONFIG_DEV_DAX_PMEM) += dax_pmem_core.o
-obj-$(CONFIG_DEV_DAX_PMEM_COMPAT) += dax_pmem_compat.o
dax_pmem-y := pmem.o
dax_pmem_core-y := core.o
diff --git a/drivers/dax/pmem/compat.c b/drivers/dax/pmem/compat.c
deleted file mode 100644
index d81dc35fd65d..000000000000
--- a/drivers/dax/pmem/compat.c
+++ /dev/null
@@ -1,72 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2016 - 2018 Intel Corporation. All rights reserved. */
-#include <linux/percpu-refcount.h>
-#include <linux/memremap.h>
-#include <linux/module.h>
-#include <linux/pfn_t.h>
-#include <linux/nd.h>
-#include "../bus.h"
-
-/* we need the private definitions to implement compat suport */
-#include "../dax-private.h"
-
-static int dax_pmem_compat_probe(struct device *dev)
-{
- struct dev_dax *dev_dax = __dax_pmem_probe(dev, DEV_DAX_CLASS);
- int rc;
-
- if (IS_ERR(dev_dax))
- return PTR_ERR(dev_dax);
-
- if (!devres_open_group(&dev_dax->dev, dev_dax, GFP_KERNEL))
- return -ENOMEM;
-
- device_lock(&dev_dax->dev);
- rc = dev_dax_probe(dev_dax);
- device_unlock(&dev_dax->dev);
-
- devres_close_group(&dev_dax->dev, dev_dax);
- if (rc)
- devres_release_group(&dev_dax->dev, dev_dax);
-
- return rc;
-}
-
-static int dax_pmem_compat_release(struct device *dev, void *data)
-{
- device_lock(dev);
- devres_release_group(dev, to_dev_dax(dev));
- device_unlock(dev);
-
- return 0;
-}
-
-static void dax_pmem_compat_remove(struct device *dev)
-{
- device_for_each_child(dev, NULL, dax_pmem_compat_release);
-}
-
-static struct nd_device_driver dax_pmem_compat_driver = {
- .probe = dax_pmem_compat_probe,
- .remove = dax_pmem_compat_remove,
- .drv = {
- .name = "dax_pmem_compat",
- },
- .type = ND_DRIVER_DAX_PMEM,
-};
-
-static int __init dax_pmem_compat_init(void)
-{
- return nd_driver_register(&dax_pmem_compat_driver);
-}
-module_init(dax_pmem_compat_init);
-
-static void __exit dax_pmem_compat_exit(void)
-{
- driver_unregister(&dax_pmem_compat_driver.drv);
-}
-module_exit(dax_pmem_compat_exit);
-
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Intel Corporation");
-MODULE_ALIAS_ND_DEVICE(ND_DEVICE_DAX_PMEM);
diff --git a/drivers/dax/pmem/pmem.c b/drivers/dax/pmem/pmem.c
index 0ae4238a0ef8..dfe91a2990fe 100644
--- a/drivers/dax/pmem/pmem.c
+++ b/drivers/dax/pmem/pmem.c
@@ -7,34 +7,4 @@
#include <linux/nd.h>
#include "../bus.h"
-static int dax_pmem_probe(struct device *dev)
-{
- return PTR_ERR_OR_ZERO(__dax_pmem_probe(dev, DEV_DAX_BUS));
-}
-static struct nd_device_driver dax_pmem_driver = {
- .probe = dax_pmem_probe,
- .drv = {
- .name = "dax_pmem",
- },
- .type = ND_DRIVER_DAX_PMEM,
-};
-
-static int __init dax_pmem_init(void)
-{
- return nd_driver_register(&dax_pmem_driver);
-}
-module_init(dax_pmem_init);
-
-static void __exit dax_pmem_exit(void)
-{
- driver_unregister(&dax_pmem_driver.drv);
-}
-module_exit(dax_pmem_exit);
-
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Intel Corporation");
-#if !IS_ENABLED(CONFIG_DEV_DAX_PMEM_COMPAT)
-/* For compat builds, don't load this module by default */
-MODULE_ALIAS_ND_DEVICE(ND_DEVICE_DAX_PMEM);
-#endif
diff --git a/drivers/dax/super.c b/drivers/dax/super.c
index b882cf8106ea..e3029389d809 100644
--- a/drivers/dax/super.c
+++ b/drivers/dax/super.c
@@ -7,10 +7,8 @@
#include <linux/mount.h>
#include <linux/pseudo_fs.h>
#include <linux/magic.h>
-#include <linux/genhd.h>
#include <linux/pfn_t.h>
#include <linux/cdev.h>
-#include <linux/hash.h>
#include <linux/slab.h>
#include <linux/uio.h>
#include <linux/dax.h>
@@ -21,15 +19,12 @@
* struct dax_device - anchor object for dax services
* @inode: core vfs
* @cdev: optional character interface for "device dax"
- * @host: optional name for lookups where the device path is not available
* @private: dax driver private data
* @flags: state and boolean properties
*/
struct dax_device {
- struct hlist_node list;
struct inode inode;
struct cdev cdev;
- const char *host;
void *private;
unsigned long flags;
const struct dax_operations *ops;
@@ -42,10 +37,6 @@ static DEFINE_IDA(dax_minor_ida);
static struct kmem_cache *dax_cache __read_mostly;
static struct super_block *dax_superblock __read_mostly;
-#define DAX_HASH_SIZE (PAGE_SIZE / sizeof(struct hlist_head))
-static struct hlist_head dax_host_list[DAX_HASH_SIZE];
-static DEFINE_SPINLOCK(dax_host_lock);
-
int dax_read_lock(void)
{
return srcu_read_lock(&dax_srcu);
@@ -58,169 +49,54 @@ void dax_read_unlock(int id)
}
EXPORT_SYMBOL_GPL(dax_read_unlock);
-static int dax_host_hash(const char *host)
-{
- return hashlen_hash(hashlen_string("DAX", host)) % DAX_HASH_SIZE;
-}
-
-#ifdef CONFIG_BLOCK
+#if defined(CONFIG_BLOCK) && defined(CONFIG_FS_DAX)
#include <linux/blkdev.h>
-int bdev_dax_pgoff(struct block_device *bdev, sector_t sector, size_t size,
- pgoff_t *pgoff)
+static DEFINE_XARRAY(dax_hosts);
+
+int dax_add_host(struct dax_device *dax_dev, struct gendisk *disk)
{
- sector_t start_sect = bdev ? get_start_sect(bdev) : 0;
- phys_addr_t phys_off = (start_sect + sector) * 512;
+ return xa_insert(&dax_hosts, (unsigned long)disk, dax_dev, GFP_KERNEL);
+}
+EXPORT_SYMBOL_GPL(dax_add_host);
- if (pgoff)
- *pgoff = PHYS_PFN(phys_off);
- if (phys_off % PAGE_SIZE || size % PAGE_SIZE)
- return -EINVAL;
- return 0;
+void dax_remove_host(struct gendisk *disk)
+{
+ xa_erase(&dax_hosts, (unsigned long)disk);
}
-EXPORT_SYMBOL(bdev_dax_pgoff);
+EXPORT_SYMBOL_GPL(dax_remove_host);
-#if IS_ENABLED(CONFIG_FS_DAX)
/**
- * dax_get_by_host() - temporary lookup mechanism for filesystem-dax
- * @host: alternate name for the device registered by a dax driver
+ * fs_dax_get_by_bdev() - temporary lookup mechanism for filesystem-dax
+ * @bdev: block device to find a dax_device for
+ * @start_off: returns the byte offset into the dax_device that @bdev starts
*/
-static struct dax_device *dax_get_by_host(const char *host)
+struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev, u64 *start_off)
{
- struct dax_device *dax_dev, *found = NULL;
- int hash, id;
-
- if (!host)
- return NULL;
-
- hash = dax_host_hash(host);
-
- id = dax_read_lock();
- spin_lock(&dax_host_lock);
- hlist_for_each_entry(dax_dev, &dax_host_list[hash], list) {
- if (!dax_alive(dax_dev)
- || strcmp(host, dax_dev->host) != 0)
- continue;
-
- if (igrab(&dax_dev->inode))
- found = dax_dev;
- break;
- }
- spin_unlock(&dax_host_lock);
- dax_read_unlock(id);
-
- return found;
-}
+ struct dax_device *dax_dev;
+ u64 part_size;
+ int id;
-struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev)
-{
if (!blk_queue_dax(bdev->bd_disk->queue))
return NULL;
- return dax_get_by_host(bdev->bd_disk->disk_name);
-}
-EXPORT_SYMBOL_GPL(fs_dax_get_by_bdev);
-
-bool generic_fsdax_supported(struct dax_device *dax_dev,
- struct block_device *bdev, int blocksize, sector_t start,
- sector_t sectors)
-{
- bool dax_enabled = false;
- pgoff_t pgoff, pgoff_end;
- void *kaddr, *end_kaddr;
- pfn_t pfn, end_pfn;
- sector_t last_page;
- long len, len2;
- int err, id;
-
- if (blocksize != PAGE_SIZE) {
- pr_info("%pg: error: unsupported blocksize for dax\n", bdev);
- return false;
- }
-
- if (!dax_dev) {
- pr_debug("%pg: error: dax unsupported by block device\n", bdev);
- return false;
- }
- err = bdev_dax_pgoff(bdev, start, PAGE_SIZE, &pgoff);
- if (err) {
+ *start_off = get_start_sect(bdev) * SECTOR_SIZE;
+ part_size = bdev_nr_sectors(bdev) * SECTOR_SIZE;
+ if (*start_off % PAGE_SIZE || part_size % PAGE_SIZE) {
pr_info("%pg: error: unaligned partition for dax\n", bdev);
- return false;
- }
-
- last_page = PFN_DOWN((start + sectors - 1) * 512) * PAGE_SIZE / 512;
- err = bdev_dax_pgoff(bdev, last_page, PAGE_SIZE, &pgoff_end);
- if (err) {
- pr_info("%pg: error: unaligned partition for dax\n", bdev);
- return false;
+ return NULL;
}
id = dax_read_lock();
- len = dax_direct_access(dax_dev, pgoff, 1, &kaddr, &pfn);
- len2 = dax_direct_access(dax_dev, pgoff_end, 1, &end_kaddr, &end_pfn);
-
- if (len < 1 || len2 < 1) {
- pr_info("%pg: error: dax access failed (%ld)\n",
- bdev, len < 1 ? len : len2);
- dax_read_unlock(id);
- return false;
- }
-
- if (IS_ENABLED(CONFIG_FS_DAX_LIMITED) && pfn_t_special(pfn)) {
- /*
- * An arch that has enabled the pmem api should also
- * have its drivers support pfn_t_devmap()
- *
- * This is a developer warning and should not trigger in
- * production. dax_flush() will crash since it depends
- * on being able to do (page_address(pfn_to_page())).
- */
- WARN_ON(IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API));
- dax_enabled = true;
- } else if (pfn_t_devmap(pfn) && pfn_t_devmap(end_pfn)) {
- struct dev_pagemap *pgmap, *end_pgmap;
-
- pgmap = get_dev_pagemap(pfn_t_to_pfn(pfn), NULL);
- end_pgmap = get_dev_pagemap(pfn_t_to_pfn(end_pfn), NULL);
- if (pgmap && pgmap == end_pgmap && pgmap->type == MEMORY_DEVICE_FS_DAX
- && pfn_t_to_page(pfn)->pgmap == pgmap
- && pfn_t_to_page(end_pfn)->pgmap == pgmap
- && pfn_t_to_pfn(pfn) == PHYS_PFN(__pa(kaddr))
- && pfn_t_to_pfn(end_pfn) == PHYS_PFN(__pa(end_kaddr)))
- dax_enabled = true;
- put_dev_pagemap(pgmap);
- put_dev_pagemap(end_pgmap);
-
- }
+ dax_dev = xa_load(&dax_hosts, (unsigned long)bdev->bd_disk);
+ if (!dax_dev || !dax_alive(dax_dev) || !igrab(&dax_dev->inode))
+ dax_dev = NULL;
dax_read_unlock(id);
- if (!dax_enabled) {
- pr_info("%pg: error: dax support not enabled\n", bdev);
- return false;
- }
- return true;
-}
-EXPORT_SYMBOL_GPL(generic_fsdax_supported);
-
-bool dax_supported(struct dax_device *dax_dev, struct block_device *bdev,
- int blocksize, sector_t start, sector_t len)
-{
- bool ret = false;
- int id;
-
- if (!dax_dev)
- return false;
-
- id = dax_read_lock();
- if (dax_alive(dax_dev) && dax_dev->ops->dax_supported)
- ret = dax_dev->ops->dax_supported(dax_dev, bdev, blocksize,
- start, len);
- dax_read_unlock(id);
- return ret;
+ return dax_dev;
}
-EXPORT_SYMBOL_GPL(dax_supported);
-#endif /* CONFIG_FS_DAX */
-#endif /* CONFIG_BLOCK */
+EXPORT_SYMBOL_GPL(fs_dax_get_by_bdev);
+#endif /* CONFIG_BLOCK && CONFIG_FS_DAX */
enum dax_device_flags {
/* !alive + rcu grace period == no new operations / mappings */
@@ -229,6 +105,10 @@ enum dax_device_flags {
DAXDEV_WRITE_CACHE,
/* flag to check if device supports synchronous flush */
DAXDEV_SYNC,
+ /* do not leave the caches dirty after writes */
+ DAXDEV_NOCACHE,
+ /* handle CPU fetch exceptions during reads */
+ DAXDEV_NOMC,
};
/**
@@ -270,9 +150,15 @@ size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
if (!dax_alive(dax_dev))
return 0;
- return dax_dev->ops->copy_from_iter(dax_dev, pgoff, addr, bytes, i);
+ /*
+ * The userspace address for the memory copy has already been validated
+ * via access_ok() in vfs_write, so use the 'no check' version to bypass
+ * the HARDENED_USERCOPY overhead.
+ */
+ if (test_bit(DAXDEV_NOCACHE, &dax_dev->flags))
+ return _copy_from_iter_flushcache(addr, bytes, i);
+ return _copy_from_iter(addr, bytes, i);
}
-EXPORT_SYMBOL_GPL(dax_copy_from_iter);
size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
size_t bytes, struct iov_iter *i)
@@ -280,9 +166,15 @@ size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
if (!dax_alive(dax_dev))
return 0;
- return dax_dev->ops->copy_to_iter(dax_dev, pgoff, addr, bytes, i);
+ /*
+ * The userspace address for the memory copy has already been validated
+ * via access_ok() in vfs_red, so use the 'no check' version to bypass
+ * the HARDENED_USERCOPY overhead.
+ */
+ if (test_bit(DAXDEV_NOMC, &dax_dev->flags))
+ return _copy_mc_to_iter(addr, bytes, i);
+ return _copy_to_iter(addr, bytes, i);
}
-EXPORT_SYMBOL_GPL(dax_copy_to_iter);
int dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff,
size_t nr_pages)
@@ -332,17 +224,29 @@ bool dax_write_cache_enabled(struct dax_device *dax_dev)
}
EXPORT_SYMBOL_GPL(dax_write_cache_enabled);
-bool __dax_synchronous(struct dax_device *dax_dev)
+bool dax_synchronous(struct dax_device *dax_dev)
{
return test_bit(DAXDEV_SYNC, &dax_dev->flags);
}
-EXPORT_SYMBOL_GPL(__dax_synchronous);
+EXPORT_SYMBOL_GPL(dax_synchronous);
-void __set_dax_synchronous(struct dax_device *dax_dev)
+void set_dax_synchronous(struct dax_device *dax_dev)
{
set_bit(DAXDEV_SYNC, &dax_dev->flags);
}
-EXPORT_SYMBOL_GPL(__set_dax_synchronous);
+EXPORT_SYMBOL_GPL(set_dax_synchronous);
+
+void set_dax_nocache(struct dax_device *dax_dev)
+{
+ set_bit(DAXDEV_NOCACHE, &dax_dev->flags);
+}
+EXPORT_SYMBOL_GPL(set_dax_nocache);
+
+void set_dax_nomc(struct dax_device *dax_dev)
+{
+ set_bit(DAXDEV_NOMC, &dax_dev->flags);
+}
+EXPORT_SYMBOL_GPL(set_dax_nomc);
bool dax_alive(struct dax_device *dax_dev)
{
@@ -363,12 +267,7 @@ void kill_dax(struct dax_device *dax_dev)
return;
clear_bit(DAXDEV_ALIVE, &dax_dev->flags);
-
synchronize_srcu(&dax_srcu);
-
- spin_lock(&dax_host_lock);
- hlist_del_init(&dax_dev->list);
- spin_unlock(&dax_host_lock);
}
EXPORT_SYMBOL_GPL(kill_dax);
@@ -400,8 +299,6 @@ static struct dax_device *to_dax_dev(struct inode *inode)
static void dax_free_inode(struct inode *inode)
{
struct dax_device *dax_dev = to_dax_dev(inode);
- kfree(dax_dev->host);
- dax_dev->host = NULL;
if (inode->i_rdev)
ida_simple_remove(&dax_minor_ida, iminor(inode));
kmem_cache_free(dax_cache, dax_dev);
@@ -476,65 +373,30 @@ static struct dax_device *dax_dev_get(dev_t devt)
return dax_dev;
}
-static void dax_add_host(struct dax_device *dax_dev, const char *host)
-{
- int hash;
-
- /*
- * Unconditionally init dax_dev since it's coming from a
- * non-zeroed slab cache
- */
- INIT_HLIST_NODE(&dax_dev->list);
- dax_dev->host = host;
- if (!host)
- return;
-
- hash = dax_host_hash(host);
- spin_lock(&dax_host_lock);
- hlist_add_head(&dax_dev->list, &dax_host_list[hash]);
- spin_unlock(&dax_host_lock);
-}
-
-struct dax_device *alloc_dax(void *private, const char *__host,
- const struct dax_operations *ops, unsigned long flags)
+struct dax_device *alloc_dax(void *private, const struct dax_operations *ops)
{
struct dax_device *dax_dev;
- const char *host;
dev_t devt;
int minor;
- if (ops && !ops->zero_page_range) {
- pr_debug("%s: error: device does not provide dax"
- " operation zero_page_range()\n",
- __host ? __host : "Unknown");
+ if (WARN_ON_ONCE(ops && !ops->zero_page_range))
return ERR_PTR(-EINVAL);
- }
-
- host = kstrdup(__host, GFP_KERNEL);
- if (__host && !host)
- return ERR_PTR(-ENOMEM);
minor = ida_simple_get(&dax_minor_ida, 0, MINORMASK+1, GFP_KERNEL);
if (minor < 0)
- goto err_minor;
+ return ERR_PTR(-ENOMEM);
devt = MKDEV(MAJOR(dax_devt), minor);
dax_dev = dax_dev_get(devt);
if (!dax_dev)
goto err_dev;
- dax_add_host(dax_dev, host);
dax_dev->ops = ops;
dax_dev->private = private;
- if (flags & DAXDEV_F_SYNC)
- set_dax_synchronous(dax_dev);
-
return dax_dev;
err_dev:
ida_simple_remove(&dax_minor_ida, minor);
- err_minor:
- kfree(host);
return ERR_PTR(-ENOMEM);
}
EXPORT_SYMBOL_GPL(alloc_dax);
diff --git a/drivers/dma-buf/dma-buf-sysfs-stats.c b/drivers/dma-buf/dma-buf-sysfs-stats.c
index 053baadcada9..2bba0babcb62 100644
--- a/drivers/dma-buf/dma-buf-sysfs-stats.c
+++ b/drivers/dma-buf/dma-buf-sysfs-stats.c
@@ -132,7 +132,7 @@ void dma_buf_stats_teardown(struct dma_buf *dmabuf)
/* Statistics files do not need to send uevents. */
-static int dmabuf_sysfs_uevent_filter(struct kset *kset, struct kobject *kobj)
+static int dmabuf_sysfs_uevent_filter(struct kobject *kobj)
{
return 0;
}
diff --git a/drivers/dma-buf/dma-heap.c b/drivers/dma-buf/dma-heap.c
index 56bf5ad01ad5..8f5848aa144f 100644
--- a/drivers/dma-buf/dma-heap.c
+++ b/drivers/dma-buf/dma-heap.c
@@ -14,6 +14,7 @@
#include <linux/xarray.h>
#include <linux/list.h>
#include <linux/slab.h>
+#include <linux/nospec.h>
#include <linux/uaccess.h>
#include <linux/syscalls.h>
#include <linux/dma-heap.h>
@@ -135,6 +136,7 @@ static long dma_heap_ioctl(struct file *file, unsigned int ucmd,
if (nr >= ARRAY_SIZE(dma_heap_ioctl_cmds))
return -EINVAL;
+ nr = array_index_nospec(nr, ARRAY_SIZE(dma_heap_ioctl_cmds));
/* Get the kernel ioctl cmd that matches */
kcmd = dma_heap_ioctl_cmds[nr];
diff --git a/drivers/dma-buf/heaps/cma_heap.c b/drivers/dma-buf/heaps/cma_heap.c
index 0c05b79870f9..83f02bd51dda 100644
--- a/drivers/dma-buf/heaps/cma_heap.c
+++ b/drivers/dma-buf/heaps/cma_heap.c
@@ -124,10 +124,11 @@ static int cma_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
struct cma_heap_buffer *buffer = dmabuf->priv;
struct dma_heap_attachment *a;
+ mutex_lock(&buffer->lock);
+
if (buffer->vmap_cnt)
invalidate_kernel_vmap_range(buffer->vaddr, buffer->len);
- mutex_lock(&buffer->lock);
list_for_each_entry(a, &buffer->attachments, list) {
if (!a->mapped)
continue;
@@ -144,10 +145,11 @@ static int cma_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
struct cma_heap_buffer *buffer = dmabuf->priv;
struct dma_heap_attachment *a;
+ mutex_lock(&buffer->lock);
+
if (buffer->vmap_cnt)
flush_kernel_vmap_range(buffer->vaddr, buffer->len);
- mutex_lock(&buffer->lock);
list_for_each_entry(a, &buffer->attachments, list) {
if (!a->mapped)
continue;
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index 275a76f188ae..1476156af74b 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -99,6 +99,7 @@
#define AT_XDMAC_CNDC_NDE (0x1 << 0) /* Channel x Next Descriptor Enable */
#define AT_XDMAC_CNDC_NDSUP (0x1 << 1) /* Channel x Next Descriptor Source Update */
#define AT_XDMAC_CNDC_NDDUP (0x1 << 2) /* Channel x Next Descriptor Destination Update */
+#define AT_XDMAC_CNDC_NDVIEW_MASK GENMASK(28, 27)
#define AT_XDMAC_CNDC_NDVIEW_NDV0 (0x0 << 3) /* Channel x Next Descriptor View 0 */
#define AT_XDMAC_CNDC_NDVIEW_NDV1 (0x1 << 3) /* Channel x Next Descriptor View 1 */
#define AT_XDMAC_CNDC_NDVIEW_NDV2 (0x2 << 3) /* Channel x Next Descriptor View 2 */
@@ -252,15 +253,15 @@ struct at_xdmac {
/* Linked List Descriptor */
struct at_xdmac_lld {
- dma_addr_t mbr_nda; /* Next Descriptor Member */
- u32 mbr_ubc; /* Microblock Control Member */
- dma_addr_t mbr_sa; /* Source Address Member */
- dma_addr_t mbr_da; /* Destination Address Member */
- u32 mbr_cfg; /* Configuration Register */
- u32 mbr_bc; /* Block Control Register */
- u32 mbr_ds; /* Data Stride Register */
- u32 mbr_sus; /* Source Microblock Stride Register */
- u32 mbr_dus; /* Destination Microblock Stride Register */
+ u32 mbr_nda; /* Next Descriptor Member */
+ u32 mbr_ubc; /* Microblock Control Member */
+ u32 mbr_sa; /* Source Address Member */
+ u32 mbr_da; /* Destination Address Member */
+ u32 mbr_cfg; /* Configuration Register */
+ u32 mbr_bc; /* Block Control Register */
+ u32 mbr_ds; /* Data Stride Register */
+ u32 mbr_sus; /* Source Microblock Stride Register */
+ u32 mbr_dus; /* Destination Microblock Stride Register */
};
/* 64-bit alignment needed to update CNDA and CUBC registers in an atomic way. */
@@ -385,9 +386,6 @@ static void at_xdmac_start_xfer(struct at_xdmac_chan *atchan,
dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, first);
- if (at_xdmac_chan_is_enabled(atchan))
- return;
-
/* Set transfer as active to not try to start it again. */
first->active_xfer = true;
@@ -405,7 +403,8 @@ static void at_xdmac_start_xfer(struct at_xdmac_chan *atchan,
*/
if (at_xdmac_chan_is_cyclic(atchan))
reg = AT_XDMAC_CNDC_NDVIEW_NDV1;
- else if (first->lld.mbr_ubc & AT_XDMAC_MBR_UBC_NDV3)
+ else if ((first->lld.mbr_ubc &
+ AT_XDMAC_CNDC_NDVIEW_MASK) == AT_XDMAC_MBR_UBC_NDV3)
reg = AT_XDMAC_CNDC_NDVIEW_NDV3;
else
reg = AT_XDMAC_CNDC_NDVIEW_NDV2;
@@ -476,13 +475,12 @@ static dma_cookie_t at_xdmac_tx_submit(struct dma_async_tx_descriptor *tx)
spin_lock_irqsave(&atchan->lock, irqflags);
cookie = dma_cookie_assign(tx);
+ list_add_tail(&desc->xfer_node, &atchan->xfers_list);
+ spin_unlock_irqrestore(&atchan->lock, irqflags);
+
dev_vdbg(chan2dev(tx->chan), "%s: atchan 0x%p, add desc 0x%p to xfers_list\n",
__func__, atchan, desc);
- list_add_tail(&desc->xfer_node, &atchan->xfers_list);
- if (list_is_singular(&atchan->xfers_list))
- at_xdmac_start_xfer(atchan, desc);
- spin_unlock_irqrestore(&atchan->lock, irqflags);
return cookie;
}
@@ -733,7 +731,8 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
if (!desc) {
dev_err(chan2dev(chan), "can't get descriptor\n");
if (first)
- list_splice_init(&first->descs_list, &atchan->free_descs_list);
+ list_splice_tail_init(&first->descs_list,
+ &atchan->free_descs_list);
goto spin_unlock;
}
@@ -821,7 +820,8 @@ at_xdmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
if (!desc) {
dev_err(chan2dev(chan), "can't get descriptor\n");
if (first)
- list_splice_init(&first->descs_list, &atchan->free_descs_list);
+ list_splice_tail_init(&first->descs_list,
+ &atchan->free_descs_list);
spin_unlock_irqrestore(&atchan->lock, irqflags);
return NULL;
}
@@ -1055,8 +1055,8 @@ at_xdmac_prep_interleaved(struct dma_chan *chan,
src_addr, dst_addr,
xt, chunk);
if (!desc) {
- list_splice_init(&first->descs_list,
- &atchan->free_descs_list);
+ list_splice_tail_init(&first->descs_list,
+ &atchan->free_descs_list);
return NULL;
}
@@ -1136,7 +1136,8 @@ at_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
if (!desc) {
dev_err(chan2dev(chan), "can't get descriptor\n");
if (first)
- list_splice_init(&first->descs_list, &atchan->free_descs_list);
+ list_splice_tail_init(&first->descs_list,
+ &atchan->free_descs_list);
return NULL;
}
@@ -1312,8 +1313,8 @@ at_xdmac_prep_dma_memset_sg(struct dma_chan *chan, struct scatterlist *sgl,
sg_dma_len(sg),
value);
if (!desc && first)
- list_splice_init(&first->descs_list,
- &atchan->free_descs_list);
+ list_splice_tail_init(&first->descs_list,
+ &atchan->free_descs_list);
if (!first)
first = desc;
@@ -1586,20 +1587,6 @@ spin_unlock:
return ret;
}
-/* Call must be protected by lock. */
-static void at_xdmac_remove_xfer(struct at_xdmac_chan *atchan,
- struct at_xdmac_desc *desc)
-{
- dev_dbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
-
- /*
- * Remove the transfer from the transfer list then move the transfer
- * descriptors into the free descriptors list.
- */
- list_del(&desc->xfer_node);
- list_splice_init(&desc->descs_list, &atchan->free_descs_list);
-}
-
static void at_xdmac_advance_work(struct at_xdmac_chan *atchan)
{
struct at_xdmac_desc *desc;
@@ -1608,14 +1595,14 @@ static void at_xdmac_advance_work(struct at_xdmac_chan *atchan)
* If channel is enabled, do nothing, advance_work will be triggered
* after the interruption.
*/
- if (!at_xdmac_chan_is_enabled(atchan) && !list_empty(&atchan->xfers_list)) {
- desc = list_first_entry(&atchan->xfers_list,
- struct at_xdmac_desc,
- xfer_node);
- dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
- if (!desc->active_xfer)
- at_xdmac_start_xfer(atchan, desc);
- }
+ if (at_xdmac_chan_is_enabled(atchan) || list_empty(&atchan->xfers_list))
+ return;
+
+ desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc,
+ xfer_node);
+ dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
+ if (!desc->active_xfer)
+ at_xdmac_start_xfer(atchan, desc);
}
static void at_xdmac_handle_cyclic(struct at_xdmac_chan *atchan)
@@ -1623,16 +1610,22 @@ static void at_xdmac_handle_cyclic(struct at_xdmac_chan *atchan)
struct at_xdmac_desc *desc;
struct dma_async_tx_descriptor *txd;
- if (!list_empty(&atchan->xfers_list)) {
- desc = list_first_entry(&atchan->xfers_list,
- struct at_xdmac_desc, xfer_node);
- txd = &desc->tx_dma_desc;
-
- if (txd->flags & DMA_PREP_INTERRUPT)
- dmaengine_desc_get_callback_invoke(txd, NULL);
+ spin_lock_irq(&atchan->lock);
+ dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08x\n",
+ __func__, atchan->irq_status);
+ if (list_empty(&atchan->xfers_list)) {
+ spin_unlock_irq(&atchan->lock);
+ return;
}
+ desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc,
+ xfer_node);
+ spin_unlock_irq(&atchan->lock);
+ txd = &desc->tx_dma_desc;
+ if (txd->flags & DMA_PREP_INTERRUPT)
+ dmaengine_desc_get_callback_invoke(txd, NULL);
}
+/* Called with atchan->lock held. */
static void at_xdmac_handle_error(struct at_xdmac_chan *atchan)
{
struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
@@ -1651,8 +1644,6 @@ static void at_xdmac_handle_error(struct at_xdmac_chan *atchan)
if (atchan->irq_status & AT_XDMAC_CIS_ROIS)
dev_err(chan2dev(&atchan->chan), "request overflow error!!!");
- spin_lock_irq(&atchan->lock);
-
/* Channel must be disabled first as it's not done automatically */
at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask)
@@ -1662,8 +1653,6 @@ static void at_xdmac_handle_error(struct at_xdmac_chan *atchan)
struct at_xdmac_desc,
xfer_node);
- spin_unlock_irq(&atchan->lock);
-
/* Print bad descriptor's details if needed */
dev_dbg(chan2dev(&atchan->chan),
"%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n",
@@ -1677,50 +1666,54 @@ static void at_xdmac_tasklet(struct tasklet_struct *t)
{
struct at_xdmac_chan *atchan = from_tasklet(atchan, t, tasklet);
struct at_xdmac_desc *desc;
+ struct dma_async_tx_descriptor *txd;
u32 error_mask;
+ if (at_xdmac_chan_is_cyclic(atchan))
+ return at_xdmac_handle_cyclic(atchan);
+
+ error_mask = AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS |
+ AT_XDMAC_CIS_ROIS;
+
+ spin_lock_irq(&atchan->lock);
+
dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08x\n",
__func__, atchan->irq_status);
- error_mask = AT_XDMAC_CIS_RBEIS
- | AT_XDMAC_CIS_WBEIS
- | AT_XDMAC_CIS_ROIS;
-
- if (at_xdmac_chan_is_cyclic(atchan)) {
- at_xdmac_handle_cyclic(atchan);
- } else if ((atchan->irq_status & AT_XDMAC_CIS_LIS)
- || (atchan->irq_status & error_mask)) {
- struct dma_async_tx_descriptor *txd;
-
- if (atchan->irq_status & error_mask)
- at_xdmac_handle_error(atchan);
-
- spin_lock_irq(&atchan->lock);
- desc = list_first_entry(&atchan->xfers_list,
- struct at_xdmac_desc,
- xfer_node);
- dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
- if (!desc->active_xfer) {
- dev_err(chan2dev(&atchan->chan), "Xfer not active: exiting");
- spin_unlock_irq(&atchan->lock);
- return;
- }
+ if (!(atchan->irq_status & AT_XDMAC_CIS_LIS) &&
+ !(atchan->irq_status & error_mask)) {
+ spin_unlock_irq(&atchan->lock);
+ return;
+ }
- txd = &desc->tx_dma_desc;
+ if (atchan->irq_status & error_mask)
+ at_xdmac_handle_error(atchan);
- at_xdmac_remove_xfer(atchan, desc);
+ desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc,
+ xfer_node);
+ dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
+ if (!desc->active_xfer) {
+ dev_err(chan2dev(&atchan->chan), "Xfer not active: exiting");
spin_unlock_irq(&atchan->lock);
+ return;
+ }
- dma_cookie_complete(txd);
- if (txd->flags & DMA_PREP_INTERRUPT)
- dmaengine_desc_get_callback_invoke(txd, NULL);
+ txd = &desc->tx_dma_desc;
+ dma_cookie_complete(txd);
+ /* Remove the transfer from the transfer list. */
+ list_del(&desc->xfer_node);
+ spin_unlock_irq(&atchan->lock);
- dma_run_dependencies(txd);
+ if (txd->flags & DMA_PREP_INTERRUPT)
+ dmaengine_desc_get_callback_invoke(txd, NULL);
- spin_lock_irq(&atchan->lock);
- at_xdmac_advance_work(atchan);
- spin_unlock_irq(&atchan->lock);
- }
+ dma_run_dependencies(txd);
+
+ spin_lock_irq(&atchan->lock);
+ /* Move the xfer descriptors into the free descriptors list. */
+ list_splice_tail_init(&desc->descs_list, &atchan->free_descs_list);
+ at_xdmac_advance_work(atchan);
+ spin_unlock_irq(&atchan->lock);
}
static irqreturn_t at_xdmac_interrupt(int irq, void *dev_id)
@@ -1784,11 +1777,9 @@ static void at_xdmac_issue_pending(struct dma_chan *chan)
dev_dbg(chan2dev(&atchan->chan), "%s\n", __func__);
- if (!at_xdmac_chan_is_cyclic(atchan)) {
- spin_lock_irqsave(&atchan->lock, flags);
- at_xdmac_advance_work(atchan);
- spin_unlock_irqrestore(&atchan->lock, flags);
- }
+ spin_lock_irqsave(&atchan->lock, flags);
+ at_xdmac_advance_work(atchan);
+ spin_unlock_irqrestore(&atchan->lock, flags);
return;
}
@@ -1866,8 +1857,11 @@ static int at_xdmac_device_terminate_all(struct dma_chan *chan)
cpu_relax();
/* Cancel all pending transfers. */
- list_for_each_entry_safe(desc, _desc, &atchan->xfers_list, xfer_node)
- at_xdmac_remove_xfer(atchan, desc);
+ list_for_each_entry_safe(desc, _desc, &atchan->xfers_list, xfer_node) {
+ list_del(&desc->xfer_node);
+ list_splice_tail_init(&desc->descs_list,
+ &atchan->free_descs_list);
+ }
clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
clear_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status);
@@ -2031,7 +2025,7 @@ static int __maybe_unused atmel_xdmac_resume(struct device *dev)
static int at_xdmac_probe(struct platform_device *pdev)
{
struct at_xdmac *atxdmac;
- int irq, size, nr_channels, i, ret;
+ int irq, nr_channels, i, ret;
void __iomem *base;
u32 reg;
@@ -2056,9 +2050,9 @@ static int at_xdmac_probe(struct platform_device *pdev)
return -EINVAL;
}
- size = sizeof(*atxdmac);
- size += nr_channels * sizeof(struct at_xdmac_chan);
- atxdmac = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+ atxdmac = devm_kzalloc(&pdev->dev,
+ struct_size(atxdmac, chan, nr_channels),
+ GFP_KERNEL);
if (!atxdmac) {
dev_err(&pdev->dev, "can't allocate at_xdmac structure\n");
return -ENOMEM;
diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c
index 96701dedcac8..fc513eb2b289 100644
--- a/drivers/dma/dma-jz4780.c
+++ b/drivers/dma/dma-jz4780.c
@@ -104,10 +104,10 @@
* descriptor base address in the upper 8 bits.
*/
struct jz4780_dma_hwdesc {
- uint32_t dcm;
- uint32_t dsa;
- uint32_t dta;
- uint32_t dtc;
+ u32 dcm;
+ u32 dsa;
+ u32 dta;
+ u32 dtc;
};
/* Size of allocations for hardware descriptor blocks. */
@@ -122,7 +122,8 @@ struct jz4780_dma_desc {
dma_addr_t desc_phys;
unsigned int count;
enum dma_transaction_type type;
- uint32_t status;
+ u32 transfer_type;
+ u32 status;
};
struct jz4780_dma_chan {
@@ -130,8 +131,8 @@ struct jz4780_dma_chan {
unsigned int id;
struct dma_pool *desc_pool;
- uint32_t transfer_type;
- uint32_t transfer_shift;
+ u32 transfer_type_tx, transfer_type_rx;
+ u32 transfer_shift;
struct dma_slave_config config;
struct jz4780_dma_desc *desc;
@@ -152,12 +153,12 @@ struct jz4780_dma_dev {
unsigned int irq;
const struct jz4780_dma_soc_data *soc_data;
- uint32_t chan_reserved;
+ u32 chan_reserved;
struct jz4780_dma_chan chan[];
};
struct jz4780_dma_filter_data {
- uint32_t transfer_type;
+ u32 transfer_type_tx, transfer_type_rx;
int channel;
};
@@ -179,26 +180,26 @@ static inline struct jz4780_dma_dev *jz4780_dma_chan_parent(
dma_device);
}
-static inline uint32_t jz4780_dma_chn_readl(struct jz4780_dma_dev *jzdma,
+static inline u32 jz4780_dma_chn_readl(struct jz4780_dma_dev *jzdma,
unsigned int chn, unsigned int reg)
{
return readl(jzdma->chn_base + reg + JZ_DMA_REG_CHAN(chn));
}
static inline void jz4780_dma_chn_writel(struct jz4780_dma_dev *jzdma,
- unsigned int chn, unsigned int reg, uint32_t val)
+ unsigned int chn, unsigned int reg, u32 val)
{
writel(val, jzdma->chn_base + reg + JZ_DMA_REG_CHAN(chn));
}
-static inline uint32_t jz4780_dma_ctrl_readl(struct jz4780_dma_dev *jzdma,
+static inline u32 jz4780_dma_ctrl_readl(struct jz4780_dma_dev *jzdma,
unsigned int reg)
{
return readl(jzdma->ctrl_base + reg);
}
static inline void jz4780_dma_ctrl_writel(struct jz4780_dma_dev *jzdma,
- unsigned int reg, uint32_t val)
+ unsigned int reg, u32 val)
{
writel(val, jzdma->ctrl_base + reg);
}
@@ -226,9 +227,10 @@ static inline void jz4780_dma_chan_disable(struct jz4780_dma_dev *jzdma,
jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DCKEC, BIT(chn));
}
-static struct jz4780_dma_desc *jz4780_dma_desc_alloc(
- struct jz4780_dma_chan *jzchan, unsigned int count,
- enum dma_transaction_type type)
+static struct jz4780_dma_desc *
+jz4780_dma_desc_alloc(struct jz4780_dma_chan *jzchan, unsigned int count,
+ enum dma_transaction_type type,
+ enum dma_transfer_direction direction)
{
struct jz4780_dma_desc *desc;
@@ -248,6 +250,12 @@ static struct jz4780_dma_desc *jz4780_dma_desc_alloc(
desc->count = count;
desc->type = type;
+
+ if (direction == DMA_DEV_TO_MEM)
+ desc->transfer_type = jzchan->transfer_type_rx;
+ else
+ desc->transfer_type = jzchan->transfer_type_tx;
+
return desc;
}
@@ -260,8 +268,8 @@ static void jz4780_dma_desc_free(struct virt_dma_desc *vdesc)
kfree(desc);
}
-static uint32_t jz4780_dma_transfer_size(struct jz4780_dma_chan *jzchan,
- unsigned long val, uint32_t *shift)
+static u32 jz4780_dma_transfer_size(struct jz4780_dma_chan *jzchan,
+ unsigned long val, u32 *shift)
{
struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan);
int ord = ffs(val) - 1;
@@ -303,7 +311,7 @@ static int jz4780_dma_setup_hwdesc(struct jz4780_dma_chan *jzchan,
enum dma_transfer_direction direction)
{
struct dma_slave_config *config = &jzchan->config;
- uint32_t width, maxburst, tsz;
+ u32 width, maxburst, tsz;
if (direction == DMA_MEM_TO_DEV) {
desc->dcm = JZ_DMA_DCM_SAI;
@@ -361,7 +369,7 @@ static struct dma_async_tx_descriptor *jz4780_dma_prep_slave_sg(
unsigned int i;
int err;
- desc = jz4780_dma_desc_alloc(jzchan, sg_len, DMA_SLAVE);
+ desc = jz4780_dma_desc_alloc(jzchan, sg_len, DMA_SLAVE, direction);
if (!desc)
return NULL;
@@ -410,7 +418,7 @@ static struct dma_async_tx_descriptor *jz4780_dma_prep_dma_cyclic(
periods = buf_len / period_len;
- desc = jz4780_dma_desc_alloc(jzchan, periods, DMA_CYCLIC);
+ desc = jz4780_dma_desc_alloc(jzchan, periods, DMA_CYCLIC, direction);
if (!desc)
return NULL;
@@ -453,16 +461,16 @@ static struct dma_async_tx_descriptor *jz4780_dma_prep_dma_memcpy(
{
struct jz4780_dma_chan *jzchan = to_jz4780_dma_chan(chan);
struct jz4780_dma_desc *desc;
- uint32_t tsz;
+ u32 tsz;
- desc = jz4780_dma_desc_alloc(jzchan, 1, DMA_MEMCPY);
+ desc = jz4780_dma_desc_alloc(jzchan, 1, DMA_MEMCPY, 0);
if (!desc)
return NULL;
tsz = jz4780_dma_transfer_size(jzchan, dest | src | len,
&jzchan->transfer_shift);
- jzchan->transfer_type = JZ_DMA_DRT_AUTO;
+ desc->transfer_type = JZ_DMA_DRT_AUTO;
desc->desc[0].dsa = src;
desc->desc[0].dta = dest;
@@ -528,7 +536,7 @@ static void jz4780_dma_begin(struct jz4780_dma_chan *jzchan)
/* Set transfer type. */
jz4780_dma_chn_writel(jzdma, jzchan->id, JZ_DMA_REG_DRT,
- jzchan->transfer_type);
+ jzchan->desc->transfer_type);
/*
* Set the transfer count. This is redundant for a descriptor-driven
@@ -670,7 +678,7 @@ static bool jz4780_dma_chan_irq(struct jz4780_dma_dev *jzdma,
{
const unsigned int soc_flags = jzdma->soc_data->flags;
struct jz4780_dma_desc *desc = jzchan->desc;
- uint32_t dcs;
+ u32 dcs;
bool ack = true;
spin_lock(&jzchan->vchan.lock);
@@ -727,7 +735,7 @@ static irqreturn_t jz4780_dma_irq_handler(int irq, void *data)
struct jz4780_dma_dev *jzdma = data;
unsigned int nb_channels = jzdma->soc_data->nb_channels;
unsigned long pending;
- uint32_t dmac;
+ u32 dmac;
int i;
pending = jz4780_dma_ctrl_readl(jzdma, JZ_DMA_REG_DIRQP);
@@ -788,7 +796,8 @@ static bool jz4780_dma_filter_fn(struct dma_chan *chan, void *param)
return false;
}
- jzchan->transfer_type = data->transfer_type;
+ jzchan->transfer_type_tx = data->transfer_type_tx;
+ jzchan->transfer_type_rx = data->transfer_type_rx;
return true;
}
@@ -800,11 +809,17 @@ static struct dma_chan *jz4780_of_dma_xlate(struct of_phandle_args *dma_spec,
dma_cap_mask_t mask = jzdma->dma_device.cap_mask;
struct jz4780_dma_filter_data data;
- if (dma_spec->args_count != 2)
+ if (dma_spec->args_count == 2) {
+ data.transfer_type_tx = dma_spec->args[0];
+ data.transfer_type_rx = dma_spec->args[0];
+ data.channel = dma_spec->args[1];
+ } else if (dma_spec->args_count == 3) {
+ data.transfer_type_tx = dma_spec->args[0];
+ data.transfer_type_rx = dma_spec->args[1];
+ data.channel = dma_spec->args[2];
+ } else {
return NULL;
-
- data.transfer_type = dma_spec->args[0];
- data.channel = dma_spec->args[1];
+ }
if (data.channel > -1) {
if (data.channel >= jzdma->soc_data->nb_channels) {
@@ -822,7 +837,8 @@ static struct dma_chan *jz4780_of_dma_xlate(struct of_phandle_args *dma_spec,
return NULL;
}
- jzdma->chan[data.channel].transfer_type = data.transfer_type;
+ jzdma->chan[data.channel].transfer_type_tx = data.transfer_type_tx;
+ jzdma->chan[data.channel].transfer_type_rx = data.transfer_type_rx;
return dma_get_slave_channel(
&jzdma->chan[data.channel].vchan.chan);
@@ -938,6 +954,14 @@ static int jz4780_dma_probe(struct platform_device *pdev)
jzchan->vchan.desc_free = jz4780_dma_desc_free;
}
+ /*
+ * On JZ4760, chan0 won't enable properly the first time.
+ * Enabling then disabling chan1 will magically make chan0 work
+ * correctly.
+ */
+ jz4780_dma_chan_enable(jzdma, 1);
+ jz4780_dma_chan_disable(jzdma, 1);
+
ret = platform_get_irq(pdev, 0);
if (ret < 0)
goto err_disable_clk;
@@ -1011,12 +1035,36 @@ static const struct jz4780_dma_soc_data jz4760_dma_soc_data = {
.flags = JZ_SOC_DATA_PER_CHAN_PM | JZ_SOC_DATA_NO_DCKES_DCKEC,
};
+static const struct jz4780_dma_soc_data jz4760_mdma_soc_data = {
+ .nb_channels = 2,
+ .transfer_ord_max = 6,
+ .flags = JZ_SOC_DATA_PER_CHAN_PM | JZ_SOC_DATA_NO_DCKES_DCKEC,
+};
+
+static const struct jz4780_dma_soc_data jz4760_bdma_soc_data = {
+ .nb_channels = 3,
+ .transfer_ord_max = 6,
+ .flags = JZ_SOC_DATA_PER_CHAN_PM | JZ_SOC_DATA_NO_DCKES_DCKEC,
+};
+
static const struct jz4780_dma_soc_data jz4760b_dma_soc_data = {
.nb_channels = 5,
.transfer_ord_max = 6,
.flags = JZ_SOC_DATA_PER_CHAN_PM,
};
+static const struct jz4780_dma_soc_data jz4760b_mdma_soc_data = {
+ .nb_channels = 2,
+ .transfer_ord_max = 6,
+ .flags = JZ_SOC_DATA_PER_CHAN_PM,
+};
+
+static const struct jz4780_dma_soc_data jz4760b_bdma_soc_data = {
+ .nb_channels = 3,
+ .transfer_ord_max = 6,
+ .flags = JZ_SOC_DATA_PER_CHAN_PM,
+};
+
static const struct jz4780_dma_soc_data jz4770_dma_soc_data = {
.nb_channels = 6,
.transfer_ord_max = 6,
@@ -1045,7 +1093,11 @@ static const struct of_device_id jz4780_dma_dt_match[] = {
{ .compatible = "ingenic,jz4740-dma", .data = &jz4740_dma_soc_data },
{ .compatible = "ingenic,jz4725b-dma", .data = &jz4725b_dma_soc_data },
{ .compatible = "ingenic,jz4760-dma", .data = &jz4760_dma_soc_data },
+ { .compatible = "ingenic,jz4760-mdma", .data = &jz4760_mdma_soc_data },
+ { .compatible = "ingenic,jz4760-bdma", .data = &jz4760_bdma_soc_data },
{ .compatible = "ingenic,jz4760b-dma", .data = &jz4760b_dma_soc_data },
+ { .compatible = "ingenic,jz4760b-mdma", .data = &jz4760b_mdma_soc_data },
+ { .compatible = "ingenic,jz4760b-bdma", .data = &jz4760b_bdma_soc_data },
{ .compatible = "ingenic,jz4770-dma", .data = &jz4770_dma_soc_data },
{ .compatible = "ingenic,jz4780-dma", .data = &jz4780_dma_soc_data },
{ .compatible = "ingenic,x1000-dma", .data = &x1000_dma_soc_data },
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index d9f7c097cfd6..2cfa8458b51b 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -1159,6 +1159,13 @@ int dma_async_device_register(struct dma_device *device)
return -EIO;
}
+ if (dma_has_cap(DMA_MEMCPY_SG, device->cap_mask) && !device->device_prep_dma_memcpy_sg) {
+ dev_err(device->dev,
+ "Device claims capability %s, but op is not defined\n",
+ "DMA_MEMCPY_SG");
+ return -EIO;
+ }
+
if (dma_has_cap(DMA_XOR, device->cap_mask) && !device->device_prep_dma_xor) {
dev_err(device->dev,
"Device claims capability %s, but op is not defined\n",
diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
index fab412349f7f..573ad8b86804 100644
--- a/drivers/dma/idxd/device.c
+++ b/drivers/dma/idxd/device.c
@@ -19,30 +19,6 @@ static void idxd_device_wqs_clear_state(struct idxd_device *idxd);
static void idxd_wq_disable_cleanup(struct idxd_wq *wq);
/* Interrupt control bits */
-void idxd_mask_msix_vector(struct idxd_device *idxd, int vec_id)
-{
- struct irq_data *data = irq_get_irq_data(idxd->irq_entries[vec_id].vector);
-
- pci_msi_mask_irq(data);
-}
-
-void idxd_mask_msix_vectors(struct idxd_device *idxd)
-{
- struct pci_dev *pdev = idxd->pdev;
- int msixcnt = pci_msix_vec_count(pdev);
- int i;
-
- for (i = 0; i < msixcnt; i++)
- idxd_mask_msix_vector(idxd, i);
-}
-
-void idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id)
-{
- struct irq_data *data = irq_get_irq_data(idxd->irq_entries[vec_id].vector);
-
- pci_msi_unmask_irq(data);
-}
-
void idxd_unmask_error_interrupts(struct idxd_device *idxd)
{
union genctrl_reg genctrl;
@@ -382,14 +358,24 @@ static void idxd_wq_disable_cleanup(struct idxd_wq *wq)
lockdep_assert_held(&wq->wq_lock);
memset(wq->wqcfg, 0, idxd->wqcfg_size);
wq->type = IDXD_WQT_NONE;
- wq->size = 0;
- wq->group = NULL;
wq->threshold = 0;
wq->priority = 0;
wq->ats_dis = 0;
+ wq->enqcmds_retries = IDXD_ENQCMDS_RETRIES;
clear_bit(WQ_FLAG_DEDICATED, &wq->flags);
clear_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags);
memset(wq->name, 0, WQ_NAME_SIZE);
+ wq->max_xfer_bytes = WQ_DEFAULT_MAX_XFER;
+ wq->max_batch_size = WQ_DEFAULT_MAX_BATCH;
+}
+
+static void idxd_wq_device_reset_cleanup(struct idxd_wq *wq)
+{
+ lockdep_assert_held(&wq->wq_lock);
+
+ idxd_wq_disable_cleanup(wq);
+ wq->size = 0;
+ wq->group = NULL;
}
static void idxd_wq_ref_release(struct percpu_ref *ref)
@@ -404,19 +390,31 @@ int idxd_wq_init_percpu_ref(struct idxd_wq *wq)
int rc;
memset(&wq->wq_active, 0, sizeof(wq->wq_active));
- rc = percpu_ref_init(&wq->wq_active, idxd_wq_ref_release, 0, GFP_KERNEL);
+ rc = percpu_ref_init(&wq->wq_active, idxd_wq_ref_release,
+ PERCPU_REF_ALLOW_REINIT, GFP_KERNEL);
if (rc < 0)
return rc;
reinit_completion(&wq->wq_dead);
+ reinit_completion(&wq->wq_resurrect);
return 0;
}
-void idxd_wq_quiesce(struct idxd_wq *wq)
+void __idxd_wq_quiesce(struct idxd_wq *wq)
{
+ lockdep_assert_held(&wq->wq_lock);
+ reinit_completion(&wq->wq_resurrect);
percpu_ref_kill(&wq->wq_active);
+ complete_all(&wq->wq_resurrect);
wait_for_completion(&wq->wq_dead);
}
+void idxd_wq_quiesce(struct idxd_wq *wq)
+{
+ mutex_lock(&wq->wq_lock);
+ __idxd_wq_quiesce(wq);
+ mutex_unlock(&wq->wq_lock);
+}
+
/* Device control bits */
static inline bool idxd_is_enabled(struct idxd_device *idxd)
{
@@ -572,7 +570,6 @@ void idxd_device_reset(struct idxd_device *idxd)
idxd_device_clear_state(idxd);
idxd->state = IDXD_DEV_DISABLED;
idxd_unmask_error_interrupts(idxd);
- idxd_msix_perm_setup(idxd);
spin_unlock(&idxd->dev_lock);
}
@@ -681,9 +678,9 @@ static void idxd_groups_clear_state(struct idxd_device *idxd)
memset(&group->grpcfg, 0, sizeof(group->grpcfg));
group->num_engines = 0;
group->num_wqs = 0;
- group->use_token_limit = false;
- group->tokens_allowed = 0;
- group->tokens_reserved = 0;
+ group->use_rdbuf_limit = false;
+ group->rdbufs_allowed = 0;
+ group->rdbufs_reserved = 0;
group->tc_a = -1;
group->tc_b = -1;
}
@@ -699,6 +696,7 @@ static void idxd_device_wqs_clear_state(struct idxd_device *idxd)
if (wq->state == IDXD_WQ_ENABLED) {
idxd_wq_disable_cleanup(wq);
+ idxd_wq_device_reset_cleanup(wq);
wq->state = IDXD_WQ_DISABLED;
}
}
@@ -711,36 +709,6 @@ void idxd_device_clear_state(struct idxd_device *idxd)
idxd_device_wqs_clear_state(idxd);
}
-void idxd_msix_perm_setup(struct idxd_device *idxd)
-{
- union msix_perm mperm;
- int i, msixcnt;
-
- msixcnt = pci_msix_vec_count(idxd->pdev);
- if (msixcnt < 0)
- return;
-
- mperm.bits = 0;
- mperm.pasid = idxd->pasid;
- mperm.pasid_en = device_pasid_enabled(idxd);
- for (i = 1; i < msixcnt; i++)
- iowrite32(mperm.bits, idxd->reg_base + idxd->msix_perm_offset + i * 8);
-}
-
-void idxd_msix_perm_clear(struct idxd_device *idxd)
-{
- union msix_perm mperm;
- int i, msixcnt;
-
- msixcnt = pci_msix_vec_count(idxd->pdev);
- if (msixcnt < 0)
- return;
-
- mperm.bits = 0;
- for (i = 1; i < msixcnt; i++)
- iowrite32(mperm.bits, idxd->reg_base + idxd->msix_perm_offset + i * 8);
-}
-
static void idxd_group_config_write(struct idxd_group *group)
{
struct idxd_device *idxd = group->idxd;
@@ -780,10 +748,10 @@ static int idxd_groups_config_write(struct idxd_device *idxd)
int i;
struct device *dev = &idxd->pdev->dev;
- /* Setup bandwidth token limit */
- if (idxd->hw.gen_cap.config_en && idxd->token_limit) {
+ /* Setup bandwidth rdbuf limit */
+ if (idxd->hw.gen_cap.config_en && idxd->rdbuf_limit) {
reg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET);
- reg.token_limit = idxd->token_limit;
+ reg.rdbuf_limit = idxd->rdbuf_limit;
iowrite32(reg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
}
@@ -827,15 +795,12 @@ static int idxd_wq_config_write(struct idxd_wq *wq)
wq->wqcfg->bits[i] = ioread32(idxd->reg_base + wq_offset);
}
+ if (wq->size == 0 && wq->type != IDXD_WQT_NONE)
+ wq->size = WQ_DEFAULT_QUEUE_DEPTH;
+
/* byte 0-3 */
wq->wqcfg->wq_size = wq->size;
- if (wq->size == 0) {
- idxd->cmd_status = IDXD_SCMD_WQ_NO_SIZE;
- dev_warn(dev, "Incorrect work queue size: 0\n");
- return -EINVAL;
- }
-
/* bytes 4-7 */
wq->wqcfg->wq_thresh = wq->threshold;
@@ -924,13 +889,12 @@ static void idxd_group_flags_setup(struct idxd_device *idxd)
group->tc_b = group->grpcfg.flags.tc_b = 1;
else
group->grpcfg.flags.tc_b = group->tc_b;
- group->grpcfg.flags.use_token_limit = group->use_token_limit;
- group->grpcfg.flags.tokens_reserved = group->tokens_reserved;
- if (group->tokens_allowed)
- group->grpcfg.flags.tokens_allowed =
- group->tokens_allowed;
+ group->grpcfg.flags.use_rdbuf_limit = group->use_rdbuf_limit;
+ group->grpcfg.flags.rdbufs_reserved = group->rdbufs_reserved;
+ if (group->rdbufs_allowed)
+ group->grpcfg.flags.rdbufs_allowed = group->rdbufs_allowed;
else
- group->grpcfg.flags.tokens_allowed = idxd->max_tokens;
+ group->grpcfg.flags.rdbufs_allowed = idxd->max_rdbufs;
}
}
@@ -981,8 +945,6 @@ static int idxd_wqs_setup(struct idxd_device *idxd)
if (!wq->group)
continue;
- if (!wq->size)
- continue;
if (wq_shared(wq) && !device_swq_supported(idxd)) {
idxd->cmd_status = IDXD_SCMD_WQ_NO_SWQ_SUPPORT;
@@ -1123,7 +1085,7 @@ int idxd_device_load_config(struct idxd_device *idxd)
int i, rc;
reg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET);
- idxd->token_limit = reg.token_limit;
+ idxd->rdbuf_limit = reg.rdbuf_limit;
for (i = 0; i < idxd->max_groups; i++) {
struct idxd_group *group = idxd->groups[i];
@@ -1142,6 +1104,106 @@ int idxd_device_load_config(struct idxd_device *idxd)
return 0;
}
+static void idxd_flush_pending_descs(struct idxd_irq_entry *ie)
+{
+ struct idxd_desc *desc, *itr;
+ struct llist_node *head;
+ LIST_HEAD(flist);
+ enum idxd_complete_type ctype;
+
+ spin_lock(&ie->list_lock);
+ head = llist_del_all(&ie->pending_llist);
+ if (head) {
+ llist_for_each_entry_safe(desc, itr, head, llnode)
+ list_add_tail(&desc->list, &ie->work_list);
+ }
+
+ list_for_each_entry_safe(desc, itr, &ie->work_list, list)
+ list_move_tail(&desc->list, &flist);
+ spin_unlock(&ie->list_lock);
+
+ list_for_each_entry_safe(desc, itr, &flist, list) {
+ list_del(&desc->list);
+ ctype = desc->completion->status ? IDXD_COMPLETE_NORMAL : IDXD_COMPLETE_ABORT;
+ idxd_dma_complete_txd(desc, ctype, true);
+ }
+}
+
+static void idxd_device_set_perm_entry(struct idxd_device *idxd,
+ struct idxd_irq_entry *ie)
+{
+ union msix_perm mperm;
+
+ if (ie->pasid == INVALID_IOASID)
+ return;
+
+ mperm.bits = 0;
+ mperm.pasid = ie->pasid;
+ mperm.pasid_en = 1;
+ iowrite32(mperm.bits, idxd->reg_base + idxd->msix_perm_offset + ie->id * 8);
+}
+
+static void idxd_device_clear_perm_entry(struct idxd_device *idxd,
+ struct idxd_irq_entry *ie)
+{
+ iowrite32(0, idxd->reg_base + idxd->msix_perm_offset + ie->id * 8);
+}
+
+void idxd_wq_free_irq(struct idxd_wq *wq)
+{
+ struct idxd_device *idxd = wq->idxd;
+ struct idxd_irq_entry *ie = &wq->ie;
+
+ synchronize_irq(ie->vector);
+ free_irq(ie->vector, ie);
+ idxd_flush_pending_descs(ie);
+ if (idxd->request_int_handles)
+ idxd_device_release_int_handle(idxd, ie->int_handle, IDXD_IRQ_MSIX);
+ idxd_device_clear_perm_entry(idxd, ie);
+ ie->vector = -1;
+ ie->int_handle = INVALID_INT_HANDLE;
+ ie->pasid = INVALID_IOASID;
+}
+
+int idxd_wq_request_irq(struct idxd_wq *wq)
+{
+ struct idxd_device *idxd = wq->idxd;
+ struct pci_dev *pdev = idxd->pdev;
+ struct device *dev = &pdev->dev;
+ struct idxd_irq_entry *ie;
+ int rc;
+
+ ie = &wq->ie;
+ ie->vector = pci_irq_vector(pdev, ie->id);
+ ie->pasid = device_pasid_enabled(idxd) ? idxd->pasid : INVALID_IOASID;
+ idxd_device_set_perm_entry(idxd, ie);
+
+ rc = request_threaded_irq(ie->vector, NULL, idxd_wq_thread, 0, "idxd-portal", ie);
+ if (rc < 0) {
+ dev_err(dev, "Failed to request irq %d.\n", ie->vector);
+ goto err_irq;
+ }
+
+ if (idxd->request_int_handles) {
+ rc = idxd_device_request_int_handle(idxd, ie->id, &ie->int_handle,
+ IDXD_IRQ_MSIX);
+ if (rc < 0)
+ goto err_int_handle;
+ } else {
+ ie->int_handle = ie->id;
+ }
+
+ return 0;
+
+err_int_handle:
+ ie->int_handle = INVALID_INT_HANDLE;
+ free_irq(ie->vector, ie);
+err_irq:
+ idxd_device_clear_perm_entry(idxd, ie);
+ ie->pasid = INVALID_IOASID;
+ return rc;
+}
+
int __drv_enable_wq(struct idxd_wq *wq)
{
struct idxd_device *idxd = wq->idxd;
diff --git a/drivers/dma/idxd/dma.c b/drivers/dma/idxd/dma.c
index c39e9483206a..bfff59617d04 100644
--- a/drivers/dma/idxd/dma.c
+++ b/drivers/dma/idxd/dma.c
@@ -21,20 +21,27 @@ static inline struct idxd_wq *to_idxd_wq(struct dma_chan *c)
}
void idxd_dma_complete_txd(struct idxd_desc *desc,
- enum idxd_complete_type comp_type)
+ enum idxd_complete_type comp_type,
+ bool free_desc)
{
+ struct idxd_device *idxd = desc->wq->idxd;
struct dma_async_tx_descriptor *tx;
struct dmaengine_result res;
int complete = 1;
- if (desc->completion->status == DSA_COMP_SUCCESS)
+ if (desc->completion->status == DSA_COMP_SUCCESS) {
res.result = DMA_TRANS_NOERROR;
- else if (desc->completion->status)
+ } else if (desc->completion->status) {
+ if (idxd->request_int_handles && comp_type != IDXD_COMPLETE_ABORT &&
+ desc->completion->status == DSA_COMP_INT_HANDLE_INVAL &&
+ idxd_queue_int_handle_resubmit(desc))
+ return;
res.result = DMA_TRANS_WRITE_FAILED;
- else if (comp_type == IDXD_COMPLETE_ABORT)
+ } else if (comp_type == IDXD_COMPLETE_ABORT) {
res.result = DMA_TRANS_ABORTED;
- else
+ } else {
complete = 0;
+ }
tx = &desc->txd;
if (complete && tx->cookie) {
@@ -44,6 +51,9 @@ void idxd_dma_complete_txd(struct idxd_desc *desc,
tx->callback = NULL;
tx->callback_result = NULL;
}
+
+ if (free_desc)
+ idxd_free_desc(desc->wq, desc);
}
static void op_flag_setup(unsigned long flags, u32 *desc_flags)
@@ -153,8 +163,10 @@ static dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx)
cookie = dma_cookie_assign(tx);
rc = idxd_submit_desc(wq, desc);
- if (rc < 0)
+ if (rc < 0) {
+ idxd_free_desc(wq, desc);
return rc;
+ }
return cookie;
}
@@ -277,6 +289,14 @@ static int idxd_dmaengine_drv_probe(struct idxd_dev *idxd_dev)
mutex_lock(&wq->wq_lock);
wq->type = IDXD_WQT_KERNEL;
+
+ rc = idxd_wq_request_irq(wq);
+ if (rc < 0) {
+ idxd->cmd_status = IDXD_SCMD_WQ_IRQ_ERR;
+ dev_dbg(dev, "WQ %d irq setup failed: %d\n", wq->id, rc);
+ goto err_irq;
+ }
+
rc = __drv_enable_wq(wq);
if (rc < 0) {
dev_dbg(dev, "Enable wq %d failed: %d\n", wq->id, rc);
@@ -310,13 +330,15 @@ static int idxd_dmaengine_drv_probe(struct idxd_dev *idxd_dev)
return 0;
err_dma:
- idxd_wq_quiesce(wq);
+ __idxd_wq_quiesce(wq);
percpu_ref_exit(&wq->wq_active);
err_ref:
idxd_wq_free_resources(wq);
err_res_alloc:
__drv_disable_wq(wq);
err:
+ idxd_wq_free_irq(wq);
+err_irq:
wq->type = IDXD_WQT_NONE;
mutex_unlock(&wq->wq_lock);
return rc;
@@ -327,11 +349,13 @@ static void idxd_dmaengine_drv_remove(struct idxd_dev *idxd_dev)
struct idxd_wq *wq = idxd_dev_to_wq(idxd_dev);
mutex_lock(&wq->wq_lock);
- idxd_wq_quiesce(wq);
+ __idxd_wq_quiesce(wq);
idxd_unregister_dma_channel(wq);
idxd_wq_free_resources(wq);
__drv_disable_wq(wq);
percpu_ref_exit(&wq->wq_active);
+ idxd_wq_free_irq(wq);
+ wq->type = IDXD_WQT_NONE;
mutex_unlock(&wq->wq_lock);
}
diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h
index 0cf8d3145870..da72eb15f610 100644
--- a/drivers/dma/idxd/idxd.h
+++ b/drivers/dma/idxd/idxd.h
@@ -10,6 +10,7 @@
#include <linux/cdev.h>
#include <linux/idr.h>
#include <linux/pci.h>
+#include <linux/ioasid.h>
#include <linux/perf_event.h>
#include <uapi/linux/idxd.h>
#include "registers.h"
@@ -51,6 +52,9 @@ enum idxd_type {
#define IDXD_NAME_SIZE 128
#define IDXD_PMU_EVENT_MAX 64
+#define IDXD_ENQCMDS_RETRIES 32
+#define IDXD_ENQCMDS_MAX_RETRIES 64
+
struct idxd_device_driver {
const char *name;
enum idxd_dev_type *type;
@@ -64,8 +68,8 @@ extern struct idxd_device_driver idxd_drv;
extern struct idxd_device_driver idxd_dmaengine_drv;
extern struct idxd_device_driver idxd_user_drv;
+#define INVALID_INT_HANDLE -1
struct idxd_irq_entry {
- struct idxd_device *idxd;
int id;
int vector;
struct llist_head pending_llist;
@@ -75,6 +79,8 @@ struct idxd_irq_entry {
* and irq thread processing error descriptor.
*/
spinlock_t list_lock;
+ int int_handle;
+ ioasid_t pasid;
};
struct idxd_group {
@@ -84,9 +90,9 @@ struct idxd_group {
int id;
int num_engines;
int num_wqs;
- bool use_token_limit;
- u8 tokens_allowed;
- u8 tokens_reserved;
+ bool use_rdbuf_limit;
+ u8 rdbufs_allowed;
+ u8 rdbufs_reserved;
int tc_a;
int tc_b;
};
@@ -145,6 +151,10 @@ struct idxd_cdev {
#define WQ_NAME_SIZE 1024
#define WQ_TYPE_SIZE 10
+#define WQ_DEFAULT_QUEUE_DEPTH 16
+#define WQ_DEFAULT_MAX_XFER SZ_2M
+#define WQ_DEFAULT_MAX_BATCH 32
+
enum idxd_op_type {
IDXD_OP_BLOCK = 0,
IDXD_OP_NONBLOCK = 1,
@@ -164,13 +174,16 @@ struct idxd_dma_chan {
struct idxd_wq {
void __iomem *portal;
u32 portal_offset;
+ unsigned int enqcmds_retries;
struct percpu_ref wq_active;
struct completion wq_dead;
+ struct completion wq_resurrect;
struct idxd_dev idxd_dev;
struct idxd_cdev *idxd_cdev;
struct wait_queue_head err_queue;
struct idxd_device *idxd;
int id;
+ struct idxd_irq_entry ie;
enum idxd_wq_type type;
struct idxd_group *group;
int client_count;
@@ -251,6 +264,7 @@ struct idxd_device {
int id;
int major;
u32 cmd_status;
+ struct idxd_irq_entry ie; /* misc irq, msix 0 */
struct pci_dev *pdev;
void __iomem *reg_base;
@@ -266,6 +280,8 @@ struct idxd_device {
unsigned int pasid;
int num_groups;
+ int irq_cnt;
+ bool request_int_handles;
u32 msix_perm_offset;
u32 wqcfg_offset;
@@ -276,24 +292,20 @@ struct idxd_device {
u32 max_batch_size;
int max_groups;
int max_engines;
- int max_tokens;
+ int max_rdbufs;
int max_wqs;
int max_wq_size;
- int token_limit;
- int nr_tokens; /* non-reserved tokens */
+ int rdbuf_limit;
+ int nr_rdbufs; /* non-reserved read buffers */
unsigned int wqcfg_size;
union sw_err_reg sw_err;
wait_queue_head_t cmd_waitq;
- int num_wq_irqs;
- struct idxd_irq_entry *irq_entries;
struct idxd_dma_dev *idxd_dma;
struct workqueue_struct *wq;
struct work_struct work;
- int *int_handles;
-
struct idxd_pmu *idxd_pmu;
};
@@ -380,6 +392,21 @@ static inline void idxd_dev_set_type(struct idxd_dev *idev, int type)
idev->type = type;
}
+static inline struct idxd_irq_entry *idxd_get_ie(struct idxd_device *idxd, int idx)
+{
+ return (idx == 0) ? &idxd->ie : &idxd->wqs[idx - 1]->ie;
+}
+
+static inline struct idxd_wq *ie_to_wq(struct idxd_irq_entry *ie)
+{
+ return container_of(ie, struct idxd_wq, ie);
+}
+
+static inline struct idxd_device *ie_to_idxd(struct idxd_irq_entry *ie)
+{
+ return container_of(ie, struct idxd_device, ie);
+}
+
extern struct bus_type dsa_bus_type;
extern bool support_enqcmd;
@@ -518,17 +545,13 @@ void idxd_unregister_devices(struct idxd_device *idxd);
int idxd_register_driver(void);
void idxd_unregister_driver(void);
void idxd_wqs_quiesce(struct idxd_device *idxd);
+bool idxd_queue_int_handle_resubmit(struct idxd_desc *desc);
/* device interrupt control */
-void idxd_msix_perm_setup(struct idxd_device *idxd);
-void idxd_msix_perm_clear(struct idxd_device *idxd);
irqreturn_t idxd_misc_thread(int vec, void *data);
irqreturn_t idxd_wq_thread(int irq, void *data);
void idxd_mask_error_interrupts(struct idxd_device *idxd);
void idxd_unmask_error_interrupts(struct idxd_device *idxd);
-void idxd_mask_msix_vectors(struct idxd_device *idxd);
-void idxd_mask_msix_vector(struct idxd_device *idxd, int vec_id);
-void idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id);
/* device control */
int idxd_register_idxd_drv(void);
@@ -564,13 +587,17 @@ int idxd_wq_map_portal(struct idxd_wq *wq);
void idxd_wq_unmap_portal(struct idxd_wq *wq);
int idxd_wq_set_pasid(struct idxd_wq *wq, int pasid);
int idxd_wq_disable_pasid(struct idxd_wq *wq);
+void __idxd_wq_quiesce(struct idxd_wq *wq);
void idxd_wq_quiesce(struct idxd_wq *wq);
int idxd_wq_init_percpu_ref(struct idxd_wq *wq);
+void idxd_wq_free_irq(struct idxd_wq *wq);
+int idxd_wq_request_irq(struct idxd_wq *wq);
/* submission */
int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc);
struct idxd_desc *idxd_alloc_desc(struct idxd_wq *wq, enum idxd_op_type optype);
void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc);
+int idxd_enqcmds(struct idxd_wq *wq, void __iomem *portal, const void *desc);
/* dmaengine */
int idxd_register_dma_device(struct idxd_device *idxd);
@@ -579,7 +606,7 @@ int idxd_register_dma_channel(struct idxd_wq *wq);
void idxd_unregister_dma_channel(struct idxd_wq *wq);
void idxd_parse_completion_status(u8 status, enum dmaengine_tx_result *res);
void idxd_dma_complete_txd(struct idxd_desc *desc,
- enum idxd_complete_type comp_type);
+ enum idxd_complete_type comp_type, bool free_desc);
/* cdev */
int idxd_cdev_register(void);
@@ -603,10 +630,4 @@ static inline void perfmon_init(void) {}
static inline void perfmon_exit(void) {}
#endif
-static inline void complete_desc(struct idxd_desc *desc, enum idxd_complete_type reason)
-{
- idxd_dma_complete_txd(desc, reason);
- idxd_free_desc(desc->wq, desc);
-}
-
#endif
diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
index 7bf03f371ce1..08a5f4310188 100644
--- a/drivers/dma/idxd/init.c
+++ b/drivers/dma/idxd/init.c
@@ -72,7 +72,7 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
{
struct pci_dev *pdev = idxd->pdev;
struct device *dev = &pdev->dev;
- struct idxd_irq_entry *irq_entry;
+ struct idxd_irq_entry *ie;
int i, msixcnt;
int rc = 0;
@@ -81,6 +81,7 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
dev_err(dev, "Not MSI-X interrupt capable.\n");
return -ENOSPC;
}
+ idxd->irq_cnt = msixcnt;
rc = pci_alloc_irq_vectors(pdev, msixcnt, msixcnt, PCI_IRQ_MSIX);
if (rc != msixcnt) {
@@ -89,87 +90,34 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
}
dev_dbg(dev, "Enabled %d msix vectors\n", msixcnt);
- /*
- * We implement 1 completion list per MSI-X entry except for
- * entry 0, which is for errors and others.
- */
- idxd->irq_entries = kcalloc_node(msixcnt, sizeof(struct idxd_irq_entry),
- GFP_KERNEL, dev_to_node(dev));
- if (!idxd->irq_entries) {
- rc = -ENOMEM;
- goto err_irq_entries;
- }
-
- for (i = 0; i < msixcnt; i++) {
- idxd->irq_entries[i].id = i;
- idxd->irq_entries[i].idxd = idxd;
- idxd->irq_entries[i].vector = pci_irq_vector(pdev, i);
- spin_lock_init(&idxd->irq_entries[i].list_lock);
- }
-
- idxd_msix_perm_setup(idxd);
- irq_entry = &idxd->irq_entries[0];
- rc = request_threaded_irq(irq_entry->vector, NULL, idxd_misc_thread,
- 0, "idxd-misc", irq_entry);
+ ie = idxd_get_ie(idxd, 0);
+ ie->vector = pci_irq_vector(pdev, 0);
+ rc = request_threaded_irq(ie->vector, NULL, idxd_misc_thread, 0, "idxd-misc", ie);
if (rc < 0) {
dev_err(dev, "Failed to allocate misc interrupt.\n");
goto err_misc_irq;
}
+ dev_dbg(dev, "Requested idxd-misc handler on msix vector %d\n", ie->vector);
- dev_dbg(dev, "Allocated idxd-misc handler on msix vector %d\n", irq_entry->vector);
-
- /* first MSI-X entry is not for wq interrupts */
- idxd->num_wq_irqs = msixcnt - 1;
+ for (i = 0; i < idxd->max_wqs; i++) {
+ int msix_idx = i + 1;
- for (i = 1; i < msixcnt; i++) {
- irq_entry = &idxd->irq_entries[i];
+ ie = idxd_get_ie(idxd, msix_idx);
+ ie->id = msix_idx;
+ ie->int_handle = INVALID_INT_HANDLE;
+ ie->pasid = INVALID_IOASID;
- init_llist_head(&idxd->irq_entries[i].pending_llist);
- INIT_LIST_HEAD(&idxd->irq_entries[i].work_list);
- rc = request_threaded_irq(irq_entry->vector, NULL,
- idxd_wq_thread, 0, "idxd-portal", irq_entry);
- if (rc < 0) {
- dev_err(dev, "Failed to allocate irq %d.\n", irq_entry->vector);
- goto err_wq_irqs;
- }
-
- dev_dbg(dev, "Allocated idxd-msix %d for vector %d\n", i, irq_entry->vector);
- if (idxd->hw.cmd_cap & BIT(IDXD_CMD_REQUEST_INT_HANDLE)) {
- /*
- * The MSIX vector enumeration starts at 1 with vector 0 being the
- * misc interrupt that handles non I/O completion events. The
- * interrupt handles are for IMS enumeration on guest. The misc
- * interrupt vector does not require a handle and therefore we start
- * the int_handles at index 0. Since 'i' starts at 1, the first
- * int_handles index will be 0.
- */
- rc = idxd_device_request_int_handle(idxd, i, &idxd->int_handles[i - 1],
- IDXD_IRQ_MSIX);
- if (rc < 0) {
- free_irq(irq_entry->vector, irq_entry);
- goto err_wq_irqs;
- }
- dev_dbg(dev, "int handle requested: %u\n", idxd->int_handles[i - 1]);
- }
+ spin_lock_init(&ie->list_lock);
+ init_llist_head(&ie->pending_llist);
+ INIT_LIST_HEAD(&ie->work_list);
}
idxd_unmask_error_interrupts(idxd);
return 0;
- err_wq_irqs:
- while (--i >= 0) {
- irq_entry = &idxd->irq_entries[i];
- free_irq(irq_entry->vector, irq_entry);
- if (i != 0)
- idxd_device_release_int_handle(idxd,
- idxd->int_handles[i], IDXD_IRQ_MSIX);
- }
err_misc_irq:
- /* Disable error interrupt generation */
idxd_mask_error_interrupts(idxd);
- idxd_msix_perm_clear(idxd);
- err_irq_entries:
pci_free_irq_vectors(pdev);
dev_err(dev, "No usable interrupts\n");
return rc;
@@ -178,26 +126,16 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
static void idxd_cleanup_interrupts(struct idxd_device *idxd)
{
struct pci_dev *pdev = idxd->pdev;
- struct idxd_irq_entry *irq_entry;
- int i, msixcnt;
+ struct idxd_irq_entry *ie;
+ int msixcnt;
msixcnt = pci_msix_vec_count(pdev);
if (msixcnt <= 0)
return;
- irq_entry = &idxd->irq_entries[0];
- free_irq(irq_entry->vector, irq_entry);
-
- for (i = 1; i < msixcnt; i++) {
-
- irq_entry = &idxd->irq_entries[i];
- if (idxd->hw.cmd_cap & BIT(IDXD_CMD_RELEASE_INT_HANDLE))
- idxd_device_release_int_handle(idxd, idxd->int_handles[i],
- IDXD_IRQ_MSIX);
- free_irq(irq_entry->vector, irq_entry);
- }
-
+ ie = idxd_get_ie(idxd, 0);
idxd_mask_error_interrupts(idxd);
+ free_irq(ie->vector, ie);
pci_free_irq_vectors(pdev);
}
@@ -237,8 +175,10 @@ static int idxd_setup_wqs(struct idxd_device *idxd)
mutex_init(&wq->wq_lock);
init_waitqueue_head(&wq->err_queue);
init_completion(&wq->wq_dead);
- wq->max_xfer_bytes = idxd->max_xfer_bytes;
- wq->max_batch_size = idxd->max_batch_size;
+ init_completion(&wq->wq_resurrect);
+ wq->max_xfer_bytes = WQ_DEFAULT_MAX_XFER;
+ wq->max_batch_size = WQ_DEFAULT_MAX_BATCH;
+ wq->enqcmds_retries = IDXD_ENQCMDS_RETRIES;
wq->wqcfg = kzalloc_node(idxd->wqcfg_size, GFP_KERNEL, dev_to_node(dev));
if (!wq->wqcfg) {
put_device(conf_dev);
@@ -379,13 +319,6 @@ static int idxd_setup_internals(struct idxd_device *idxd)
init_waitqueue_head(&idxd->cmd_waitq);
- if (idxd->hw.cmd_cap & BIT(IDXD_CMD_REQUEST_INT_HANDLE)) {
- idxd->int_handles = kcalloc_node(idxd->max_wqs, sizeof(int), GFP_KERNEL,
- dev_to_node(dev));
- if (!idxd->int_handles)
- return -ENOMEM;
- }
-
rc = idxd_setup_wqs(idxd);
if (rc < 0)
goto err_wqs;
@@ -416,7 +349,6 @@ static int idxd_setup_internals(struct idxd_device *idxd)
for (i = 0; i < idxd->max_wqs; i++)
put_device(wq_confdev(idxd->wqs[i]));
err_wqs:
- kfree(idxd->int_handles);
return rc;
}
@@ -451,6 +383,10 @@ static void idxd_read_caps(struct idxd_device *idxd)
dev_dbg(dev, "cmd_cap: %#x\n", idxd->hw.cmd_cap);
}
+ /* reading command capabilities */
+ if (idxd->hw.cmd_cap & BIT(IDXD_CMD_REQUEST_INT_HANDLE))
+ idxd->request_int_handles = true;
+
idxd->max_xfer_bytes = 1ULL << idxd->hw.gen_cap.max_xfer_shift;
dev_dbg(dev, "max xfer size: %llu bytes\n", idxd->max_xfer_bytes);
idxd->max_batch_size = 1U << idxd->hw.gen_cap.max_batch_shift;
@@ -464,9 +400,9 @@ static void idxd_read_caps(struct idxd_device *idxd)
dev_dbg(dev, "group_cap: %#llx\n", idxd->hw.group_cap.bits);
idxd->max_groups = idxd->hw.group_cap.num_groups;
dev_dbg(dev, "max groups: %u\n", idxd->max_groups);
- idxd->max_tokens = idxd->hw.group_cap.total_tokens;
- dev_dbg(dev, "max tokens: %u\n", idxd->max_tokens);
- idxd->nr_tokens = idxd->max_tokens;
+ idxd->max_rdbufs = idxd->hw.group_cap.total_rdbufs;
+ dev_dbg(dev, "max read buffers: %u\n", idxd->max_rdbufs);
+ idxd->nr_rdbufs = idxd->max_rdbufs;
/* read engine capabilities */
idxd->hw.engine_cap.bits =
@@ -611,8 +547,6 @@ static int idxd_probe(struct idxd_device *idxd)
if (rc)
goto err_config;
- dev_dbg(dev, "IDXD interrupt setup complete.\n");
-
idxd->major = idxd_cdev_get_major(idxd);
rc = perfmon_pmu_init(idxd);
@@ -708,32 +642,6 @@ static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return rc;
}
-static void idxd_flush_pending_llist(struct idxd_irq_entry *ie)
-{
- struct idxd_desc *desc, *itr;
- struct llist_node *head;
-
- head = llist_del_all(&ie->pending_llist);
- if (!head)
- return;
-
- llist_for_each_entry_safe(desc, itr, head, llnode) {
- idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT);
- idxd_free_desc(desc->wq, desc);
- }
-}
-
-static void idxd_flush_work_list(struct idxd_irq_entry *ie)
-{
- struct idxd_desc *desc, *iter;
-
- list_for_each_entry_safe(desc, iter, &ie->work_list, list) {
- list_del(&desc->list);
- idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT);
- idxd_free_desc(desc->wq, desc);
- }
-}
-
void idxd_wqs_quiesce(struct idxd_device *idxd)
{
struct idxd_wq *wq;
@@ -746,47 +654,19 @@ void idxd_wqs_quiesce(struct idxd_device *idxd)
}
}
-static void idxd_release_int_handles(struct idxd_device *idxd)
-{
- struct device *dev = &idxd->pdev->dev;
- int i, rc;
-
- for (i = 0; i < idxd->num_wq_irqs; i++) {
- if (idxd->hw.cmd_cap & BIT(IDXD_CMD_RELEASE_INT_HANDLE)) {
- rc = idxd_device_release_int_handle(idxd, idxd->int_handles[i],
- IDXD_IRQ_MSIX);
- if (rc < 0)
- dev_warn(dev, "irq handle %d release failed\n",
- idxd->int_handles[i]);
- else
- dev_dbg(dev, "int handle requested: %u\n", idxd->int_handles[i]);
- }
- }
-}
-
static void idxd_shutdown(struct pci_dev *pdev)
{
struct idxd_device *idxd = pci_get_drvdata(pdev);
- int rc, i;
struct idxd_irq_entry *irq_entry;
- int msixcnt = pci_msix_vec_count(pdev);
+ int rc;
rc = idxd_device_disable(idxd);
if (rc)
dev_err(&pdev->dev, "Disabling device failed\n");
- dev_dbg(&pdev->dev, "%s called\n", __func__);
- idxd_mask_msix_vectors(idxd);
+ irq_entry = &idxd->ie;
+ synchronize_irq(irq_entry->vector);
idxd_mask_error_interrupts(idxd);
-
- for (i = 0; i < msixcnt; i++) {
- irq_entry = &idxd->irq_entries[i];
- synchronize_irq(irq_entry->vector);
- if (i == 0)
- continue;
- idxd_flush_pending_llist(irq_entry);
- idxd_flush_work_list(irq_entry);
- }
flush_workqueue(idxd->wq);
}
@@ -794,8 +674,6 @@ static void idxd_remove(struct pci_dev *pdev)
{
struct idxd_device *idxd = pci_get_drvdata(pdev);
struct idxd_irq_entry *irq_entry;
- int msixcnt = pci_msix_vec_count(pdev);
- int i;
idxd_unregister_devices(idxd);
/*
@@ -811,12 +689,8 @@ static void idxd_remove(struct pci_dev *pdev)
if (device_pasid_enabled(idxd))
idxd_disable_system_pasid(idxd);
- for (i = 0; i < msixcnt; i++) {
- irq_entry = &idxd->irq_entries[i];
- free_irq(irq_entry->vector, irq_entry);
- }
- idxd_msix_perm_clear(idxd);
- idxd_release_int_handles(idxd);
+ irq_entry = idxd_get_ie(idxd, 0);
+ free_irq(irq_entry->vector, irq_entry);
pci_free_irq_vectors(pdev);
pci_iounmap(pdev, idxd->reg_base);
iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA);
diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c
index cf2c8bc4f147..743ead5ebc57 100644
--- a/drivers/dma/idxd/irq.c
+++ b/drivers/dma/idxd/irq.c
@@ -6,6 +6,7 @@
#include <linux/pci.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/dmaengine.h>
+#include <linux/delay.h>
#include <uapi/linux/idxd.h>
#include "../dmaengine.h"
#include "idxd.h"
@@ -22,6 +23,16 @@ struct idxd_fault {
struct idxd_device *idxd;
};
+struct idxd_resubmit {
+ struct work_struct work;
+ struct idxd_desc *desc;
+};
+
+struct idxd_int_handle_revoke {
+ struct work_struct work;
+ struct idxd_device *idxd;
+};
+
static void idxd_device_reinit(struct work_struct *work)
{
struct idxd_device *idxd = container_of(work, struct idxd_device, work);
@@ -55,6 +66,162 @@ static void idxd_device_reinit(struct work_struct *work)
idxd_device_clear_state(idxd);
}
+/*
+ * The function sends a drain descriptor for the interrupt handle. The drain ensures
+ * all descriptors with this interrupt handle is flushed and the interrupt
+ * will allow the cleanup of the outstanding descriptors.
+ */
+static void idxd_int_handle_revoke_drain(struct idxd_irq_entry *ie)
+{
+ struct idxd_wq *wq = ie_to_wq(ie);
+ struct idxd_device *idxd = wq->idxd;
+ struct device *dev = &idxd->pdev->dev;
+ struct dsa_hw_desc desc = {};
+ void __iomem *portal;
+ int rc;
+
+ /* Issue a simple drain operation with interrupt but no completion record */
+ desc.flags = IDXD_OP_FLAG_RCI;
+ desc.opcode = DSA_OPCODE_DRAIN;
+ desc.priv = 1;
+
+ if (ie->pasid != INVALID_IOASID)
+ desc.pasid = ie->pasid;
+ desc.int_handle = ie->int_handle;
+ portal = idxd_wq_portal_addr(wq);
+
+ /*
+ * The wmb() makes sure that the descriptor is all there before we
+ * issue.
+ */
+ wmb();
+ if (wq_dedicated(wq)) {
+ iosubmit_cmds512(portal, &desc, 1);
+ } else {
+ rc = idxd_enqcmds(wq, portal, &desc);
+ /* This should not fail unless hardware failed. */
+ if (rc < 0)
+ dev_warn(dev, "Failed to submit drain desc on wq %d\n", wq->id);
+ }
+}
+
+static void idxd_abort_invalid_int_handle_descs(struct idxd_irq_entry *ie)
+{
+ LIST_HEAD(flist);
+ struct idxd_desc *d, *t;
+ struct llist_node *head;
+
+ spin_lock(&ie->list_lock);
+ head = llist_del_all(&ie->pending_llist);
+ if (head) {
+ llist_for_each_entry_safe(d, t, head, llnode)
+ list_add_tail(&d->list, &ie->work_list);
+ }
+
+ list_for_each_entry_safe(d, t, &ie->work_list, list) {
+ if (d->completion->status == DSA_COMP_INT_HANDLE_INVAL)
+ list_move_tail(&d->list, &flist);
+ }
+ spin_unlock(&ie->list_lock);
+
+ list_for_each_entry_safe(d, t, &flist, list) {
+ list_del(&d->list);
+ idxd_dma_complete_txd(d, IDXD_COMPLETE_ABORT, true);
+ }
+}
+
+static void idxd_int_handle_revoke(struct work_struct *work)
+{
+ struct idxd_int_handle_revoke *revoke =
+ container_of(work, struct idxd_int_handle_revoke, work);
+ struct idxd_device *idxd = revoke->idxd;
+ struct pci_dev *pdev = idxd->pdev;
+ struct device *dev = &pdev->dev;
+ int i, new_handle, rc;
+
+ if (!idxd->request_int_handles) {
+ kfree(revoke);
+ dev_warn(dev, "Unexpected int handle refresh interrupt.\n");
+ return;
+ }
+
+ /*
+ * The loop attempts to acquire new interrupt handle for all interrupt
+ * vectors that supports a handle. If a new interrupt handle is acquired and the
+ * wq is kernel type, the driver will kill the percpu_ref to pause all
+ * ongoing descriptor submissions. The interrupt handle is then changed.
+ * After change, the percpu_ref is revived and all the pending submissions
+ * are woken to try again. A drain is sent to for the interrupt handle
+ * at the end to make sure all invalid int handle descriptors are processed.
+ */
+ for (i = 1; i < idxd->irq_cnt; i++) {
+ struct idxd_irq_entry *ie = idxd_get_ie(idxd, i);
+ struct idxd_wq *wq = ie_to_wq(ie);
+
+ if (ie->int_handle == INVALID_INT_HANDLE)
+ continue;
+
+ rc = idxd_device_request_int_handle(idxd, i, &new_handle, IDXD_IRQ_MSIX);
+ if (rc < 0) {
+ dev_warn(dev, "get int handle %d failed: %d\n", i, rc);
+ /*
+ * Failed to acquire new interrupt handle. Kill the WQ
+ * and release all the pending submitters. The submitters will
+ * get error return code and handle appropriately.
+ */
+ ie->int_handle = INVALID_INT_HANDLE;
+ idxd_wq_quiesce(wq);
+ idxd_abort_invalid_int_handle_descs(ie);
+ continue;
+ }
+
+ /* No change in interrupt handle, nothing needs to be done */
+ if (ie->int_handle == new_handle)
+ continue;
+
+ if (wq->state != IDXD_WQ_ENABLED || wq->type != IDXD_WQT_KERNEL) {
+ /*
+ * All the MSIX interrupts are allocated at once during probe.
+ * Therefore we need to update all interrupts even if the WQ
+ * isn't supporting interrupt operations.
+ */
+ ie->int_handle = new_handle;
+ continue;
+ }
+
+ mutex_lock(&wq->wq_lock);
+ reinit_completion(&wq->wq_resurrect);
+
+ /* Kill percpu_ref to pause additional descriptor submissions */
+ percpu_ref_kill(&wq->wq_active);
+
+ /* Wait for all submitters quiesce before we change interrupt handle */
+ wait_for_completion(&wq->wq_dead);
+
+ ie->int_handle = new_handle;
+
+ /* Revive percpu ref and wake up all the waiting submitters */
+ percpu_ref_reinit(&wq->wq_active);
+ complete_all(&wq->wq_resurrect);
+ mutex_unlock(&wq->wq_lock);
+
+ /*
+ * The delay here is to wait for all possible MOVDIR64B that
+ * are issued before percpu_ref_kill() has happened to have
+ * reached the PCIe domain before the drain is issued. The driver
+ * needs to ensure that the drain descriptor issued does not pass
+ * all the other issued descriptors that contain the invalid
+ * interrupt handle in order to ensure that the drain descriptor
+ * interrupt will allow the cleanup of all the descriptors with
+ * invalid interrupt handle.
+ */
+ if (wq_dedicated(wq))
+ udelay(100);
+ idxd_int_handle_revoke_drain(ie);
+ }
+ kfree(revoke);
+}
+
static int process_misc_interrupts(struct idxd_device *idxd, u32 cause)
{
struct device *dev = &idxd->pdev->dev;
@@ -101,6 +268,23 @@ static int process_misc_interrupts(struct idxd_device *idxd, u32 cause)
err = true;
}
+ if (cause & IDXD_INTC_INT_HANDLE_REVOKED) {
+ struct idxd_int_handle_revoke *revoke;
+
+ val |= IDXD_INTC_INT_HANDLE_REVOKED;
+
+ revoke = kzalloc(sizeof(*revoke), GFP_ATOMIC);
+ if (revoke) {
+ revoke->idxd = idxd;
+ INIT_WORK(&revoke->work, idxd_int_handle_revoke);
+ queue_work(idxd->wq, &revoke->work);
+
+ } else {
+ dev_err(dev, "Failed to allocate work for int handle revoke\n");
+ idxd_wqs_quiesce(idxd);
+ }
+ }
+
if (cause & IDXD_INTC_CMD) {
val |= IDXD_INTC_CMD;
complete(idxd->cmd_done);
@@ -157,7 +341,7 @@ halt:
irqreturn_t idxd_misc_thread(int vec, void *data)
{
struct idxd_irq_entry *irq_entry = data;
- struct idxd_device *idxd = irq_entry->idxd;
+ struct idxd_device *idxd = ie_to_idxd(irq_entry);
int rc;
u32 cause;
@@ -177,6 +361,51 @@ irqreturn_t idxd_misc_thread(int vec, void *data)
return IRQ_HANDLED;
}
+static void idxd_int_handle_resubmit_work(struct work_struct *work)
+{
+ struct idxd_resubmit *irw = container_of(work, struct idxd_resubmit, work);
+ struct idxd_desc *desc = irw->desc;
+ struct idxd_wq *wq = desc->wq;
+ int rc;
+
+ desc->completion->status = 0;
+ rc = idxd_submit_desc(wq, desc);
+ if (rc < 0) {
+ dev_dbg(&wq->idxd->pdev->dev, "Failed to resubmit desc %d to wq %d.\n",
+ desc->id, wq->id);
+ /*
+ * If the error is not -EAGAIN, it means the submission failed due to wq
+ * has been killed instead of ENQCMDS failure. Here the driver needs to
+ * notify the submitter of the failure by reporting abort status.
+ *
+ * -EAGAIN comes from ENQCMDS failure. idxd_submit_desc() will handle the
+ * abort.
+ */
+ if (rc != -EAGAIN) {
+ desc->completion->status = IDXD_COMP_DESC_ABORT;
+ idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT, false);
+ }
+ idxd_free_desc(wq, desc);
+ }
+ kfree(irw);
+}
+
+bool idxd_queue_int_handle_resubmit(struct idxd_desc *desc)
+{
+ struct idxd_wq *wq = desc->wq;
+ struct idxd_device *idxd = wq->idxd;
+ struct idxd_resubmit *irw;
+
+ irw = kzalloc(sizeof(*irw), GFP_KERNEL);
+ if (!irw)
+ return false;
+
+ irw->desc = desc;
+ INIT_WORK(&irw->work, idxd_int_handle_resubmit_work);
+ queue_work(idxd->wq, &irw->work);
+ return true;
+}
+
static void irq_process_pending_llist(struct idxd_irq_entry *irq_entry)
{
struct idxd_desc *desc, *t;
@@ -195,11 +424,11 @@ static void irq_process_pending_llist(struct idxd_irq_entry *irq_entry)
* and 0xff, which DSA_COMP_STATUS_MASK can mask out.
*/
if (unlikely(desc->completion->status == IDXD_COMP_DESC_ABORT)) {
- complete_desc(desc, IDXD_COMPLETE_ABORT);
+ idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT, true);
continue;
}
- complete_desc(desc, IDXD_COMPLETE_NORMAL);
+ idxd_dma_complete_txd(desc, IDXD_COMPLETE_NORMAL, true);
} else {
spin_lock(&irq_entry->list_lock);
list_add_tail(&desc->list,
@@ -238,11 +467,11 @@ static void irq_process_work_list(struct idxd_irq_entry *irq_entry)
* and 0xff, which DSA_COMP_STATUS_MASK can mask out.
*/
if (unlikely(desc->completion->status == IDXD_COMP_DESC_ABORT)) {
- complete_desc(desc, IDXD_COMPLETE_ABORT);
+ idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT, true);
continue;
}
- complete_desc(desc, IDXD_COMPLETE_NORMAL);
+ idxd_dma_complete_txd(desc, IDXD_COMPLETE_NORMAL, true);
}
}
diff --git a/drivers/dma/idxd/registers.h b/drivers/dma/idxd/registers.h
index 262c8220adbd..aa642aecdc0b 100644
--- a/drivers/dma/idxd/registers.h
+++ b/drivers/dma/idxd/registers.h
@@ -64,9 +64,9 @@ union wq_cap_reg {
union group_cap_reg {
struct {
u64 num_groups:8;
- u64 total_tokens:8;
- u64 token_en:1;
- u64 token_limit:1;
+ u64 total_rdbufs:8; /* formerly total_tokens */
+ u64 rdbuf_ctrl:1; /* formerly token_en */
+ u64 rdbuf_limit:1; /* formerly token_limit */
u64 rsvd:46;
};
u64 bits;
@@ -110,7 +110,7 @@ union offsets_reg {
#define IDXD_GENCFG_OFFSET 0x80
union gencfg_reg {
struct {
- u32 token_limit:8;
+ u32 rdbuf_limit:8;
u32 rsvd:4;
u32 user_int_en:1;
u32 rsvd2:19;
@@ -158,6 +158,7 @@ enum idxd_device_reset_type {
#define IDXD_INTC_OCCUPY 0x04
#define IDXD_INTC_PERFMON_OVFL 0x08
#define IDXD_INTC_HALT_STATE 0x10
+#define IDXD_INTC_INT_HANDLE_REVOKED 0x80000000
#define IDXD_CMD_OFFSET 0xa0
union idxd_command_reg {
@@ -287,10 +288,10 @@ union group_flags {
u32 tc_a:3;
u32 tc_b:3;
u32 rsvd:1;
- u32 use_token_limit:1;
- u32 tokens_reserved:8;
+ u32 use_rdbuf_limit:1;
+ u32 rdbufs_reserved:8;
u32 rsvd2:4;
- u32 tokens_allowed:8;
+ u32 rdbufs_allowed:8;
u32 rsvd3:4;
};
u32 bits;
diff --git a/drivers/dma/idxd/submit.c b/drivers/dma/idxd/submit.c
index 83452fbbb168..e289fd48711a 100644
--- a/drivers/dma/idxd/submit.c
+++ b/drivers/dma/idxd/submit.c
@@ -21,15 +21,6 @@ static struct idxd_desc *__get_desc(struct idxd_wq *wq, int idx, int cpu)
if (device_pasid_enabled(idxd))
desc->hw->pasid = idxd->pasid;
- /*
- * On host, MSIX vecotr 0 is used for misc interrupt. Therefore when we match
- * vector 1:1 to the WQ id, we need to add 1
- */
- if (!idxd->int_handles)
- desc->hw->int_handle = wq->id + 1;
- else
- desc->hw->int_handle = idxd->int_handles[wq->id];
-
return desc;
}
@@ -134,35 +125,58 @@ static void llist_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie,
spin_unlock(&ie->list_lock);
if (found)
- complete_desc(found, IDXD_COMPLETE_ABORT);
+ idxd_dma_complete_txd(found, IDXD_COMPLETE_ABORT, false);
/*
- * complete_desc() will return desc to allocator and the desc can be
- * acquired by a different process and the desc->list can be modified.
- * Delete desc from list so the list trasversing does not get corrupted
- * by the other process.
+ * completing the descriptor will return desc to allocator and
+ * the desc can be acquired by a different process and the
+ * desc->list can be modified. Delete desc from list so the
+ * list trasversing does not get corrupted by the other process.
*/
list_for_each_entry_safe(d, t, &flist, list) {
list_del_init(&d->list);
- complete_desc(d, IDXD_COMPLETE_NORMAL);
+ idxd_dma_complete_txd(found, IDXD_COMPLETE_ABORT, true);
}
}
+/*
+ * ENQCMDS typically fail when the WQ is inactive or busy. On host submission, the driver
+ * has better control of number of descriptors being submitted to a shared wq by limiting
+ * the number of driver allocated descriptors to the wq size. However, when the swq is
+ * exported to a guest kernel, it may be shared with multiple guest kernels. This means
+ * the likelihood of getting busy returned on the swq when submitting goes significantly up.
+ * Having a tunable retry mechanism allows the driver to keep trying for a bit before giving
+ * up. The sysfs knob can be tuned by the system administrator.
+ */
+int idxd_enqcmds(struct idxd_wq *wq, void __iomem *portal, const void *desc)
+{
+ int rc, retries = 0;
+
+ do {
+ rc = enqcmds(portal, desc);
+ if (rc == 0)
+ break;
+ cpu_relax();
+ } while (retries++ < wq->enqcmds_retries);
+
+ return rc;
+}
+
int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc)
{
struct idxd_device *idxd = wq->idxd;
struct idxd_irq_entry *ie = NULL;
+ u32 desc_flags = desc->hw->flags;
void __iomem *portal;
int rc;
- if (idxd->state != IDXD_DEV_ENABLED) {
- idxd_free_desc(wq, desc);
+ if (idxd->state != IDXD_DEV_ENABLED)
return -EIO;
- }
if (!percpu_ref_tryget_live(&wq->wq_active)) {
- idxd_free_desc(wq, desc);
- return -ENXIO;
+ wait_for_completion(&wq->wq_resurrect);
+ if (!percpu_ref_tryget_live(&wq->wq_active))
+ return -ENXIO;
}
portal = idxd_wq_portal_addr(wq);
@@ -178,28 +192,21 @@ int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc)
* Pending the descriptor to the lockless list for the irq_entry
* that we designated the descriptor to.
*/
- if (desc->hw->flags & IDXD_OP_FLAG_RCI) {
- ie = &idxd->irq_entries[wq->id + 1];
+ if (desc_flags & IDXD_OP_FLAG_RCI) {
+ ie = &wq->ie;
+ desc->hw->int_handle = ie->int_handle;
llist_add(&desc->llnode, &ie->pending_llist);
}
if (wq_dedicated(wq)) {
iosubmit_cmds512(portal, desc->hw, 1);
} else {
- /*
- * It's not likely that we would receive queue full rejection
- * since the descriptor allocation gates at wq size. If we
- * receive a -EAGAIN, that means something went wrong such as the
- * device is not accepting descriptor at all.
- */
- rc = enqcmds(portal, desc->hw);
+ rc = idxd_enqcmds(wq, portal, desc->hw);
if (rc < 0) {
percpu_ref_put(&wq->wq_active);
/* abort operation frees the descriptor */
if (ie)
llist_abort_desc(wq, ie, desc);
- else
- idxd_free_desc(wq, desc);
return rc;
}
}
diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c
index a9025be940db..7e19ab92b61a 100644
--- a/drivers/dma/idxd/sysfs.c
+++ b/drivers/dma/idxd/sysfs.c
@@ -99,31 +99,39 @@ struct device_type idxd_engine_device_type = {
/* Group attributes */
-static void idxd_set_free_tokens(struct idxd_device *idxd)
+static void idxd_set_free_rdbufs(struct idxd_device *idxd)
{
- int i, tokens;
+ int i, rdbufs;
- for (i = 0, tokens = 0; i < idxd->max_groups; i++) {
+ for (i = 0, rdbufs = 0; i < idxd->max_groups; i++) {
struct idxd_group *g = idxd->groups[i];
- tokens += g->tokens_reserved;
+ rdbufs += g->rdbufs_reserved;
}
- idxd->nr_tokens = idxd->max_tokens - tokens;
+ idxd->nr_rdbufs = idxd->max_rdbufs - rdbufs;
+}
+
+static ssize_t group_read_buffers_reserved_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct idxd_group *group = confdev_to_group(dev);
+
+ return sysfs_emit(buf, "%u\n", group->rdbufs_reserved);
}
static ssize_t group_tokens_reserved_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct idxd_group *group = confdev_to_group(dev);
-
- return sysfs_emit(buf, "%u\n", group->tokens_reserved);
+ dev_warn_once(dev, "attribute deprecated, see read_buffers_reserved.\n");
+ return group_read_buffers_reserved_show(dev, attr, buf);
}
-static ssize_t group_tokens_reserved_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t group_read_buffers_reserved_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct idxd_group *group = confdev_to_group(dev);
struct idxd_device *idxd = group->idxd;
@@ -143,33 +151,53 @@ static ssize_t group_tokens_reserved_store(struct device *dev,
if (idxd->state == IDXD_DEV_ENABLED)
return -EPERM;
- if (val > idxd->max_tokens)
+ if (val > idxd->max_rdbufs)
return -EINVAL;
- if (val > idxd->nr_tokens + group->tokens_reserved)
+ if (val > idxd->nr_rdbufs + group->rdbufs_reserved)
return -EINVAL;
- group->tokens_reserved = val;
- idxd_set_free_tokens(idxd);
+ group->rdbufs_reserved = val;
+ idxd_set_free_rdbufs(idxd);
return count;
}
+static ssize_t group_tokens_reserved_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ dev_warn_once(dev, "attribute deprecated, see read_buffers_reserved.\n");
+ return group_read_buffers_reserved_store(dev, attr, buf, count);
+}
+
static struct device_attribute dev_attr_group_tokens_reserved =
__ATTR(tokens_reserved, 0644, group_tokens_reserved_show,
group_tokens_reserved_store);
+static struct device_attribute dev_attr_group_read_buffers_reserved =
+ __ATTR(read_buffers_reserved, 0644, group_read_buffers_reserved_show,
+ group_read_buffers_reserved_store);
+
+static ssize_t group_read_buffers_allowed_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct idxd_group *group = confdev_to_group(dev);
+
+ return sysfs_emit(buf, "%u\n", group->rdbufs_allowed);
+}
+
static ssize_t group_tokens_allowed_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct idxd_group *group = confdev_to_group(dev);
-
- return sysfs_emit(buf, "%u\n", group->tokens_allowed);
+ dev_warn_once(dev, "attribute deprecated, see read_buffers_allowed.\n");
+ return group_read_buffers_allowed_show(dev, attr, buf);
}
-static ssize_t group_tokens_allowed_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t group_read_buffers_allowed_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct idxd_group *group = confdev_to_group(dev);
struct idxd_device *idxd = group->idxd;
@@ -190,29 +218,49 @@ static ssize_t group_tokens_allowed_store(struct device *dev,
return -EPERM;
if (val < 4 * group->num_engines ||
- val > group->tokens_reserved + idxd->nr_tokens)
+ val > group->rdbufs_reserved + idxd->nr_rdbufs)
return -EINVAL;
- group->tokens_allowed = val;
+ group->rdbufs_allowed = val;
return count;
}
+static ssize_t group_tokens_allowed_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ dev_warn_once(dev, "attribute deprecated, see read_buffers_allowed.\n");
+ return group_read_buffers_allowed_store(dev, attr, buf, count);
+}
+
static struct device_attribute dev_attr_group_tokens_allowed =
__ATTR(tokens_allowed, 0644, group_tokens_allowed_show,
group_tokens_allowed_store);
+static struct device_attribute dev_attr_group_read_buffers_allowed =
+ __ATTR(read_buffers_allowed, 0644, group_read_buffers_allowed_show,
+ group_read_buffers_allowed_store);
+
+static ssize_t group_use_read_buffer_limit_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct idxd_group *group = confdev_to_group(dev);
+
+ return sysfs_emit(buf, "%u\n", group->use_rdbuf_limit);
+}
+
static ssize_t group_use_token_limit_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct idxd_group *group = confdev_to_group(dev);
-
- return sysfs_emit(buf, "%u\n", group->use_token_limit);
+ dev_warn_once(dev, "attribute deprecated, see use_read_buffer_limit.\n");
+ return group_use_read_buffer_limit_show(dev, attr, buf);
}
-static ssize_t group_use_token_limit_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t group_use_read_buffer_limit_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct idxd_group *group = confdev_to_group(dev);
struct idxd_device *idxd = group->idxd;
@@ -232,17 +280,29 @@ static ssize_t group_use_token_limit_store(struct device *dev,
if (idxd->state == IDXD_DEV_ENABLED)
return -EPERM;
- if (idxd->token_limit == 0)
+ if (idxd->rdbuf_limit == 0)
return -EPERM;
- group->use_token_limit = !!val;
+ group->use_rdbuf_limit = !!val;
return count;
}
+static ssize_t group_use_token_limit_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ dev_warn_once(dev, "attribute deprecated, see use_read_buffer_limit.\n");
+ return group_use_read_buffer_limit_store(dev, attr, buf, count);
+}
+
static struct device_attribute dev_attr_group_use_token_limit =
__ATTR(use_token_limit, 0644, group_use_token_limit_show,
group_use_token_limit_store);
+static struct device_attribute dev_attr_group_use_read_buffer_limit =
+ __ATTR(use_read_buffer_limit, 0644, group_use_read_buffer_limit_show,
+ group_use_read_buffer_limit_store);
+
static ssize_t group_engines_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -387,8 +447,11 @@ static struct attribute *idxd_group_attributes[] = {
&dev_attr_group_work_queues.attr,
&dev_attr_group_engines.attr,
&dev_attr_group_use_token_limit.attr,
+ &dev_attr_group_use_read_buffer_limit.attr,
&dev_attr_group_tokens_allowed.attr,
+ &dev_attr_group_read_buffers_allowed.attr,
&dev_attr_group_tokens_reserved.attr,
+ &dev_attr_group_read_buffers_reserved.attr,
&dev_attr_group_traffic_class_a.attr,
&dev_attr_group_traffic_class_b.attr,
NULL,
@@ -945,6 +1008,41 @@ static ssize_t wq_occupancy_show(struct device *dev, struct device_attribute *at
static struct device_attribute dev_attr_wq_occupancy =
__ATTR(occupancy, 0444, wq_occupancy_show, NULL);
+static ssize_t wq_enqcmds_retries_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_wq *wq = confdev_to_wq(dev);
+
+ if (wq_dedicated(wq))
+ return -EOPNOTSUPP;
+
+ return sysfs_emit(buf, "%u\n", wq->enqcmds_retries);
+}
+
+static ssize_t wq_enqcmds_retries_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct idxd_wq *wq = confdev_to_wq(dev);
+ int rc;
+ unsigned int retries;
+
+ if (wq_dedicated(wq))
+ return -EOPNOTSUPP;
+
+ rc = kstrtouint(buf, 10, &retries);
+ if (rc < 0)
+ return rc;
+
+ if (retries > IDXD_ENQCMDS_MAX_RETRIES)
+ retries = IDXD_ENQCMDS_MAX_RETRIES;
+
+ wq->enqcmds_retries = retries;
+ return count;
+}
+
+static struct device_attribute dev_attr_wq_enqcmds_retries =
+ __ATTR(enqcmds_retries, 0644, wq_enqcmds_retries_show, wq_enqcmds_retries_store);
+
static struct attribute *idxd_wq_attributes[] = {
&dev_attr_wq_clients.attr,
&dev_attr_wq_state.attr,
@@ -961,6 +1059,7 @@ static struct attribute *idxd_wq_attributes[] = {
&dev_attr_wq_max_batch_size.attr,
&dev_attr_wq_ats_disable.attr,
&dev_attr_wq_occupancy.attr,
+ &dev_attr_wq_enqcmds_retries.attr,
NULL,
};
@@ -1156,26 +1255,42 @@ static ssize_t errors_show(struct device *dev,
}
static DEVICE_ATTR_RO(errors);
+static ssize_t max_read_buffers_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_device *idxd = confdev_to_idxd(dev);
+
+ return sysfs_emit(buf, "%u\n", idxd->max_rdbufs);
+}
+
static ssize_t max_tokens_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
+ dev_warn_once(dev, "attribute deprecated, see max_read_buffers.\n");
+ return max_read_buffers_show(dev, attr, buf);
+}
+
+static DEVICE_ATTR_RO(max_tokens); /* deprecated */
+static DEVICE_ATTR_RO(max_read_buffers);
+
+static ssize_t read_buffer_limit_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
struct idxd_device *idxd = confdev_to_idxd(dev);
- return sysfs_emit(buf, "%u\n", idxd->max_tokens);
+ return sysfs_emit(buf, "%u\n", idxd->rdbuf_limit);
}
-static DEVICE_ATTR_RO(max_tokens);
static ssize_t token_limit_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct idxd_device *idxd = confdev_to_idxd(dev);
-
- return sysfs_emit(buf, "%u\n", idxd->token_limit);
+ dev_warn_once(dev, "attribute deprecated, see read_buffer_limit.\n");
+ return read_buffer_limit_show(dev, attr, buf);
}
-static ssize_t token_limit_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t read_buffer_limit_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct idxd_device *idxd = confdev_to_idxd(dev);
unsigned long val;
@@ -1191,16 +1306,26 @@ static ssize_t token_limit_store(struct device *dev,
if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
return -EPERM;
- if (!idxd->hw.group_cap.token_limit)
+ if (!idxd->hw.group_cap.rdbuf_limit)
return -EPERM;
- if (val > idxd->hw.group_cap.total_tokens)
+ if (val > idxd->hw.group_cap.total_rdbufs)
return -EINVAL;
- idxd->token_limit = val;
+ idxd->rdbuf_limit = val;
return count;
}
-static DEVICE_ATTR_RW(token_limit);
+
+static ssize_t token_limit_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ dev_warn_once(dev, "attribute deprecated, see read_buffer_limit\n");
+ return read_buffer_limit_store(dev, attr, buf, count);
+}
+
+static DEVICE_ATTR_RW(token_limit); /* deprecated */
+static DEVICE_ATTR_RW(read_buffer_limit);
static ssize_t cdev_major_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -1246,7 +1371,9 @@ static struct attribute *idxd_device_attributes[] = {
&dev_attr_state.attr,
&dev_attr_errors.attr,
&dev_attr_max_tokens.attr,
+ &dev_attr_max_read_buffers.attr,
&dev_attr_token_limit.attr,
+ &dev_attr_read_buffer_limit.attr,
&dev_attr_cdev_major.attr,
&dev_attr_cmd_status.attr,
NULL,
@@ -1268,8 +1395,6 @@ static void idxd_conf_device_release(struct device *dev)
kfree(idxd->groups);
kfree(idxd->wqs);
kfree(idxd->engines);
- kfree(idxd->irq_entries);
- kfree(idxd->int_handles);
ida_free(&idxd_ida, idxd->id);
kfree(idxd);
}
diff --git a/drivers/dma/ioat/sysfs.c b/drivers/dma/ioat/sysfs.c
index aa44bcd6a356..168adf28c5b1 100644
--- a/drivers/dma/ioat/sysfs.c
+++ b/drivers/dma/ioat/sysfs.c
@@ -158,8 +158,9 @@ static struct attribute *ioat_attrs[] = {
&intr_coalesce_attr.attr,
NULL,
};
+ATTRIBUTE_GROUPS(ioat);
struct kobj_type ioat_ktype = {
.sysfs_ops = &ioat_sysfs_ops,
- .default_attrs = ioat_attrs,
+ .default_groups = ioat_groups,
};
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
index a23563cd118b..5a53d7fcef01 100644
--- a/drivers/dma/mmp_pdma.c
+++ b/drivers/dma/mmp_pdma.c
@@ -727,12 +727,6 @@ static int mmp_pdma_config_write(struct dma_chan *dchan,
chan->dir = direction;
chan->dev_addr = addr;
- /* FIXME: drivers should be ported over to use the filter
- * function. Once that's done, the following two lines can
- * be removed.
- */
- if (cfg->slave_id)
- chan->drcmr = cfg->slave_id;
return 0;
}
diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c
index 9b0d463f89bb..9c8b4084ba2f 100644
--- a/drivers/dma/mv_xor_v2.c
+++ b/drivers/dma/mv_xor_v2.c
@@ -149,7 +149,7 @@ struct mv_xor_v2_descriptor {
* @desc_size: HW descriptor size
* @npendings: number of pending descriptors (for which tx_submit has
* @hw_queue_idx: HW queue index
- * @msi_desc: local interrupt descriptor information
+ * @irq: The Linux interrupt number
* been called, but not yet issue_pending)
*/
struct mv_xor_v2_device {
@@ -168,7 +168,7 @@ struct mv_xor_v2_device {
int desc_size;
unsigned int npendings;
unsigned int hw_queue_idx;
- struct msi_desc *msi_desc;
+ unsigned int irq;
};
/**
@@ -718,7 +718,6 @@ static int mv_xor_v2_probe(struct platform_device *pdev)
int i, ret = 0;
struct dma_device *dma_dev;
struct mv_xor_v2_sw_desc *sw_desc;
- struct msi_desc *msi_desc;
BUILD_BUG_ON(sizeof(struct mv_xor_v2_descriptor) !=
MV_XOR_V2_EXT_DESC_SIZE);
@@ -770,14 +769,9 @@ static int mv_xor_v2_probe(struct platform_device *pdev)
if (ret)
goto disable_clk;
- msi_desc = first_msi_entry(&pdev->dev);
- if (!msi_desc) {
- ret = -ENODEV;
- goto free_msi_irqs;
- }
- xor_dev->msi_desc = msi_desc;
+ xor_dev->irq = msi_get_virq(&pdev->dev, 0);
- ret = devm_request_irq(&pdev->dev, msi_desc->irq,
+ ret = devm_request_irq(&pdev->dev, xor_dev->irq,
mv_xor_v2_interrupt_handler, 0,
dev_name(&pdev->dev), xor_dev);
if (ret)
@@ -892,7 +886,7 @@ static int mv_xor_v2_remove(struct platform_device *pdev)
xor_dev->desc_size * MV_XOR_V2_DESC_NUM,
xor_dev->hw_desq_virt, xor_dev->hw_desq);
- devm_free_irq(&pdev->dev, xor_dev->msi_desc->irq, xor_dev);
+ devm_free_irq(&pdev->dev, xor_dev->irq, xor_dev);
platform_msi_domain_free_irqs(&pdev->dev);
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index 1da04112fcdb..c359decc07a3 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -835,7 +835,7 @@ static int pch_dma_probe(struct pci_dev *pdev,
goto err_disable_pdev;
}
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
dev_err(&pdev->dev, "Cannot set proper DMA config\n");
goto err_free_res;
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c
index e2b5129c5f84..5e46e347e28b 100644
--- a/drivers/dma/ppc4xx/adma.c
+++ b/drivers/dma/ppc4xx/adma.c
@@ -3240,7 +3240,6 @@ static int ppc440spe_adma_dma2rxor_prep_src(
struct ppc440spe_rxor *cursor, int index,
int src_cnt, u32 addr)
{
- int rval = 0;
u32 sign;
struct ppc440spe_adma_desc_slot *desc = hdesc;
int i;
@@ -3348,7 +3347,7 @@ static int ppc440spe_adma_dma2rxor_prep_src(
break;
}
- return rval;
+ return 0;
}
/**
diff --git a/drivers/dma/ptdma/ptdma-dev.c b/drivers/dma/ptdma/ptdma-dev.c
index 8a6bf291a73f..daafea5bc35d 100644
--- a/drivers/dma/ptdma/ptdma-dev.c
+++ b/drivers/dma/ptdma/ptdma-dev.c
@@ -207,7 +207,7 @@ int pt_core_init(struct pt_device *pt)
if (!cmd_q->qbase) {
dev_err(dev, "unable to allocate command queue\n");
ret = -ENOMEM;
- goto e_dma_alloc;
+ goto e_destroy_pool;
}
cmd_q->qidx = 0;
@@ -229,8 +229,10 @@ int pt_core_init(struct pt_device *pt)
/* Request an irq */
ret = request_irq(pt->pt_irq, pt_core_irq_handler, 0, dev_name(pt->dev), pt);
- if (ret)
- goto e_pool;
+ if (ret) {
+ dev_err(dev, "unable to allocate an IRQ\n");
+ goto e_free_dma;
+ }
/* Update the device registers with queue information. */
cmd_q->qcontrol &= ~CMD_Q_SIZE;
@@ -250,21 +252,20 @@ int pt_core_init(struct pt_device *pt)
/* Register the DMA engine support */
ret = pt_dmaengine_register(pt);
if (ret)
- goto e_dmaengine;
+ goto e_free_irq;
/* Set up debugfs entries */
ptdma_debugfs_setup(pt);
return 0;
-e_dmaengine:
+e_free_irq:
free_irq(pt->pt_irq, pt);
-e_dma_alloc:
+e_free_dma:
dma_free_coherent(dev, cmd_q->qsize, cmd_q->qbase, cmd_q->qbase_dma);
-e_pool:
- dev_err(dev, "unable to allocate an IRQ\n");
+e_destroy_pool:
dma_pool_destroy(pt->cmd_q.dma_pool);
return ret;
diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c
index 52d04641e361..6078cc81892e 100644
--- a/drivers/dma/pxa_dma.c
+++ b/drivers/dma/pxa_dma.c
@@ -909,13 +909,6 @@ static void pxad_get_config(struct pxad_chan *chan,
*dcmd |= PXA_DCMD_BURST16;
else if (maxburst == 32)
*dcmd |= PXA_DCMD_BURST32;
-
- /* FIXME: drivers should be ported over to use the filter
- * function. Once that's done, the following two lines can
- * be removed.
- */
- if (chan->cfg.slave_id)
- chan->drcmr = chan->cfg.slave_id;
}
static struct dma_async_tx_descriptor *
diff --git a/drivers/dma/qcom/gpi.c b/drivers/dma/qcom/gpi.c
index 1a1b7d8458c9..94f3648f7483 100644
--- a/drivers/dma/qcom/gpi.c
+++ b/drivers/dma/qcom/gpi.c
@@ -2206,10 +2206,8 @@ static int gpi_probe(struct platform_device *pdev)
/* set up irq */
ret = platform_get_irq(pdev, i);
- if (ret < 0) {
- dev_err(gpi_dev->dev, "platform_get_irq failed for %d:%d\n", i, ret);
+ if (ret < 0)
return ret;
- }
gpii->irq = ret;
/* set up channel specific register info */
diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c
index 23d64489d25f..65d054bb11aa 100644
--- a/drivers/dma/qcom/hidma.c
+++ b/drivers/dma/qcom/hidma.c
@@ -666,7 +666,7 @@ static void hidma_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
struct device *dev = msi_desc_to_dev(desc);
struct hidma_dev *dmadev = dev_get_drvdata(dev);
- if (!desc->platform.msi_index) {
+ if (!desc->msi_index) {
writel(msg->address_lo, dmadev->dev_evca + 0x118);
writel(msg->address_hi, dmadev->dev_evca + 0x11C);
writel(msg->data, dmadev->dev_evca + 0x120);
@@ -678,11 +678,13 @@ static void hidma_free_msis(struct hidma_dev *dmadev)
{
#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
struct device *dev = dmadev->ddev.dev;
- struct msi_desc *desc;
+ int i, virq;
- /* free allocated MSI interrupts above */
- for_each_msi_entry(desc, dev)
- devm_free_irq(dev, desc->irq, &dmadev->lldev);
+ for (i = 0; i < HIDMA_MSI_INTS; i++) {
+ virq = msi_get_virq(dev, i);
+ if (virq)
+ devm_free_irq(dev, virq, &dmadev->lldev);
+ }
platform_msi_domain_free_irqs(dev);
#endif
@@ -692,45 +694,37 @@ static int hidma_request_msi(struct hidma_dev *dmadev,
struct platform_device *pdev)
{
#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
- int rc;
- struct msi_desc *desc;
- struct msi_desc *failed_desc = NULL;
+ int rc, i, virq;
rc = platform_msi_domain_alloc_irqs(&pdev->dev, HIDMA_MSI_INTS,
hidma_write_msi_msg);
if (rc)
return rc;
- for_each_msi_entry(desc, &pdev->dev) {
- if (!desc->platform.msi_index)
- dmadev->msi_virqbase = desc->irq;
-
- rc = devm_request_irq(&pdev->dev, desc->irq,
+ for (i = 0; i < HIDMA_MSI_INTS; i++) {
+ virq = msi_get_virq(&pdev->dev, i);
+ rc = devm_request_irq(&pdev->dev, virq,
hidma_chirq_handler_msi,
0, "qcom-hidma-msi",
&dmadev->lldev);
- if (rc) {
- failed_desc = desc;
+ if (rc)
break;
- }
+ if (!i)
+ dmadev->msi_virqbase = virq;
}
if (rc) {
/* free allocated MSI interrupts above */
- for_each_msi_entry(desc, &pdev->dev) {
- if (desc == failed_desc)
- break;
- devm_free_irq(&pdev->dev, desc->irq,
- &dmadev->lldev);
+ for (--i; i >= 0; i--) {
+ virq = msi_get_virq(&pdev->dev, i);
+ devm_free_irq(&pdev->dev, virq, &dmadev->lldev);
}
+ dev_warn(&pdev->dev,
+ "failed to request MSI irq, falling back to wired IRQ\n");
} else {
/* Add callback to free MSIs on teardown */
hidma_ll_setup_irq(dmadev->lldev, true);
-
}
- if (rc)
- dev_warn(&pdev->dev,
- "failed to request MSI irq, falling back to wired IRQ\n");
return rc;
#else
return -EINVAL;
diff --git a/drivers/dma/qcom/qcom_adm.c b/drivers/dma/qcom/qcom_adm.c
index ee78bed8d60d..facdacf8aede 100644
--- a/drivers/dma/qcom/qcom_adm.c
+++ b/drivers/dma/qcom/qcom_adm.c
@@ -8,6 +8,7 @@
#include <linux/device.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
+#include <linux/dma/qcom_adm.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -140,6 +141,8 @@ struct adm_chan {
struct adm_async_desc *curr_txd;
struct dma_slave_config slave;
+ u32 crci;
+ u32 mux;
struct list_head node;
int error;
@@ -379,8 +382,8 @@ static struct dma_async_tx_descriptor *adm_prep_slave_sg(struct dma_chan *chan,
return ERR_PTR(-EINVAL);
}
- crci = achan->slave.slave_id & 0xf;
- if (!crci || achan->slave.slave_id > 0x1f) {
+ crci = achan->crci & 0xf;
+ if (!crci || achan->crci > 0x1f) {
dev_err(adev->dev, "invalid crci value\n");
return ERR_PTR(-EINVAL);
}
@@ -403,9 +406,7 @@ static struct dma_async_tx_descriptor *adm_prep_slave_sg(struct dma_chan *chan,
if (!async_desc)
return ERR_PTR(-ENOMEM);
- if (crci)
- async_desc->mux = achan->slave.slave_id & ADM_CRCI_MUX_SEL ?
- ADM_CRCI_CTL_MUX_SEL : 0;
+ async_desc->mux = achan->mux ? ADM_CRCI_CTL_MUX_SEL : 0;
async_desc->crci = crci;
async_desc->blk_size = blk_size;
async_desc->dma_len = single_count * sizeof(struct adm_desc_hw_single) +
@@ -488,10 +489,13 @@ static int adm_terminate_all(struct dma_chan *chan)
static int adm_slave_config(struct dma_chan *chan, struct dma_slave_config *cfg)
{
struct adm_chan *achan = to_adm_chan(chan);
+ struct qcom_adm_peripheral_config *config = cfg->peripheral_config;
unsigned long flag;
spin_lock_irqsave(&achan->vc.lock, flag);
memcpy(&achan->slave, cfg, sizeof(struct dma_slave_config));
+ if (cfg->peripheral_size == sizeof(config))
+ achan->crci = config->crci;
spin_unlock_irqrestore(&achan->vc.lock, flag);
return 0;
@@ -694,6 +698,45 @@ static void adm_channel_init(struct adm_device *adev, struct adm_chan *achan,
achan->vc.desc_free = adm_dma_free_desc;
}
+/**
+ * adm_dma_xlate
+ * @dma_spec: pointer to DMA specifier as found in the device tree
+ * @ofdma: pointer to DMA controller data
+ *
+ * This can use either 1-cell or 2-cell formats, the first cell
+ * identifies the slave device, while the optional second cell
+ * contains the crci value.
+ *
+ * Returns pointer to appropriate dma channel on success or NULL on error.
+ */
+static struct dma_chan *adm_dma_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ struct dma_device *dev = ofdma->of_dma_data;
+ struct dma_chan *chan, *candidate = NULL;
+ struct adm_chan *achan;
+
+ if (!dev || dma_spec->args_count > 2)
+ return NULL;
+
+ list_for_each_entry(chan, &dev->channels, device_node)
+ if (chan->chan_id == dma_spec->args[0]) {
+ candidate = chan;
+ break;
+ }
+
+ if (!candidate)
+ return NULL;
+
+ achan = to_adm_chan(candidate);
+ if (dma_spec->args_count == 2)
+ achan->crci = dma_spec->args[1];
+ else
+ achan->crci = 0;
+
+ return dma_get_slave_channel(candidate);
+}
+
static int adm_dma_probe(struct platform_device *pdev)
{
struct adm_device *adev;
@@ -838,8 +881,7 @@ static int adm_dma_probe(struct platform_device *pdev)
goto err_disable_clks;
}
- ret = of_dma_controller_register(pdev->dev.of_node,
- of_dma_xlate_by_chan_id,
+ ret = of_dma_controller_register(pdev->dev.of_node, adm_dma_xlate,
&adev->common);
if (ret)
goto err_unregister_dma;
diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
index 5c7716fd6bc5..13d12d660cc2 100644
--- a/drivers/dma/sh/rcar-dmac.c
+++ b/drivers/dma/sh/rcar-dmac.c
@@ -236,7 +236,7 @@ struct rcar_dmac_of_data {
#define RCAR_DMAOR_PRI_ROUND_ROBIN (3 << 8)
#define RCAR_DMAOR_AE (1 << 2)
#define RCAR_DMAOR_DME (1 << 0)
-#define RCAR_DMACHCLR 0x0080 /* Not on R-Car V3U */
+#define RCAR_DMACHCLR 0x0080 /* Not on R-Car Gen4 */
#define RCAR_DMADPSEC 0x00a0
#define RCAR_DMASAR 0x0000
@@ -299,8 +299,8 @@ struct rcar_dmac_of_data {
#define RCAR_DMAFIXDAR 0x0014
#define RCAR_DMAFIXDPBASE 0x0060
-/* For R-Car V3U */
-#define RCAR_V3U_DMACHCLR 0x0100
+/* For R-Car Gen4 */
+#define RCAR_GEN4_DMACHCLR 0x0100
/* Hardcode the MEMCPY transfer size to 4 bytes. */
#define RCAR_DMAC_MEMCPY_XFER_SIZE 4
@@ -345,7 +345,7 @@ static void rcar_dmac_chan_clear(struct rcar_dmac *dmac,
struct rcar_dmac_chan *chan)
{
if (dmac->chan_base)
- rcar_dmac_chan_write(chan, RCAR_V3U_DMACHCLR, 1);
+ rcar_dmac_chan_write(chan, RCAR_GEN4_DMACHCLR, 1);
else
rcar_dmac_write(dmac, RCAR_DMACHCLR, BIT(chan->index));
}
@@ -357,7 +357,7 @@ static void rcar_dmac_chan_clear_all(struct rcar_dmac *dmac)
if (dmac->chan_base) {
for_each_rcar_dmac_chan(i, dmac, chan)
- rcar_dmac_chan_write(chan, RCAR_V3U_DMACHCLR, 1);
+ rcar_dmac_chan_write(chan, RCAR_GEN4_DMACHCLR, 1);
} else {
rcar_dmac_write(dmac, RCAR_DMACHCLR, dmac->channels_mask);
}
@@ -1868,8 +1868,13 @@ static int rcar_dmac_probe(struct platform_device *pdev)
dmac->dev = &pdev->dev;
platform_set_drvdata(pdev, dmac);
- dma_set_max_seg_size(dmac->dev, RCAR_DMATCR_MASK);
- dma_set_mask_and_coherent(dmac->dev, DMA_BIT_MASK(40));
+ ret = dma_set_max_seg_size(dmac->dev, RCAR_DMATCR_MASK);
+ if (ret)
+ return ret;
+
+ ret = dma_set_mask_and_coherent(dmac->dev, DMA_BIT_MASK(40));
+ if (ret)
+ return ret;
ret = rcar_dmac_parse_of(&pdev->dev, dmac);
if (ret < 0)
@@ -2009,7 +2014,7 @@ static const struct rcar_dmac_of_data rcar_dmac_data = {
.chan_offset_stride = 0x80,
};
-static const struct rcar_dmac_of_data rcar_v3u_dmac_data = {
+static const struct rcar_dmac_of_data rcar_gen4_dmac_data = {
.chan_offset_base = 0x0,
.chan_offset_stride = 0x1000,
};
@@ -2019,8 +2024,11 @@ static const struct of_device_id rcar_dmac_of_ids[] = {
.compatible = "renesas,rcar-dmac",
.data = &rcar_dmac_data,
}, {
+ .compatible = "renesas,rcar-gen4-dmac",
+ .data = &rcar_gen4_dmac_data,
+ }, {
.compatible = "renesas,dmac-r8a779a0",
- .data = &rcar_v3u_dmac_data,
+ .data = &rcar_gen4_dmac_data,
},
{ /* Sentinel */ }
};
diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c
index 7f72b3f4cd1a..b26ed690f03c 100644
--- a/drivers/dma/sh/shdma-base.c
+++ b/drivers/dma/sh/shdma-base.c
@@ -115,8 +115,10 @@ static dma_cookie_t shdma_tx_submit(struct dma_async_tx_descriptor *tx)
ret = pm_runtime_get(schan->dev);
spin_unlock_irq(&schan->chan_lock);
- if (ret < 0)
+ if (ret < 0) {
dev_err(schan->dev, "%s(): GET = %d\n", __func__, ret);
+ pm_runtime_put(schan->dev);
+ }
pm_runtime_barrier(schan->dev);
@@ -787,14 +789,6 @@ static int shdma_config(struct dma_chan *chan,
return -EINVAL;
/*
- * overriding the slave_id through dma_slave_config is deprecated,
- * but possibly some out-of-tree drivers still do it.
- */
- if (WARN_ON_ONCE(config->slave_id &&
- config->slave_id != schan->real_slave_id))
- schan->real_slave_id = config->slave_id;
-
- /*
* We could lock this, but you shouldn't be configuring the
* channel, while using it...
*/
@@ -1042,9 +1036,7 @@ EXPORT_SYMBOL(shdma_cleanup);
static int __init shdma_enter(void)
{
- shdma_slave_used = kcalloc(DIV_ROUND_UP(slave_num, BITS_PER_LONG),
- sizeof(long),
- GFP_KERNEL);
+ shdma_slave_used = bitmap_zalloc(slave_num, GFP_KERNEL);
if (!shdma_slave_used)
return -ENOMEM;
return 0;
@@ -1053,7 +1045,7 @@ module_init(shdma_enter);
static void __exit shdma_exit(void)
{
- kfree(shdma_slave_used);
+ bitmap_free(shdma_slave_used);
}
module_exit(shdma_exit);
diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c
index 4357d2395e6b..7f158ef5672d 100644
--- a/drivers/dma/sprd-dma.c
+++ b/drivers/dma/sprd-dma.c
@@ -795,9 +795,6 @@ static int sprd_dma_fill_desc(struct dma_chan *chan,
return dst_datawidth;
}
- if (slave_cfg->slave_id)
- schan->dev_id = slave_cfg->slave_id;
-
hw->cfg = SPRD_DMA_DONOT_WAIT_BDONE << SPRD_DMA_WAIT_BDONE_OFFSET;
/*
diff --git a/drivers/dma/stm32-dmamux.c b/drivers/dma/stm32-dmamux.c
index a42164389ebc..d5d55732adba 100644
--- a/drivers/dma/stm32-dmamux.c
+++ b/drivers/dma/stm32-dmamux.c
@@ -292,10 +292,12 @@ static int stm32_dmamux_probe(struct platform_device *pdev)
ret = of_dma_router_register(node, stm32_dmamux_route_allocate,
&stm32_dmamux->dmarouter);
if (ret)
- goto err_clk;
+ goto pm_disable;
return 0;
+pm_disable:
+ pm_runtime_disable(&pdev->dev);
err_clk:
clk_disable_unprepare(stm32_dmamux->clk);
diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c
index d30a4a28d3bf..6f57ff0e7b37 100644
--- a/drivers/dma/stm32-mdma.c
+++ b/drivers/dma/stm32-mdma.c
@@ -10,6 +10,7 @@
* Inspired by stm32-dma.c and dma-jz4780.c
*/
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/dmaengine.h>
@@ -32,13 +33,6 @@
#include "virt-dma.h"
-/* MDMA Generic getter/setter */
-#define STM32_MDMA_SHIFT(n) (ffs(n) - 1)
-#define STM32_MDMA_SET(n, mask) (((n) << STM32_MDMA_SHIFT(mask)) & \
- (mask))
-#define STM32_MDMA_GET(n, mask) (((n) & (mask)) >> \
- STM32_MDMA_SHIFT(mask))
-
#define STM32_MDMA_GISR0 0x0000 /* MDMA Int Status Reg 1 */
#define STM32_MDMA_GISR1 0x0004 /* MDMA Int Status Reg 2 */
@@ -80,8 +74,7 @@
#define STM32_MDMA_CCR_HEX BIT(13)
#define STM32_MDMA_CCR_BEX BIT(12)
#define STM32_MDMA_CCR_PL_MASK GENMASK(7, 6)
-#define STM32_MDMA_CCR_PL(n) STM32_MDMA_SET(n, \
- STM32_MDMA_CCR_PL_MASK)
+#define STM32_MDMA_CCR_PL(n) FIELD_PREP(STM32_MDMA_CCR_PL_MASK, (n))
#define STM32_MDMA_CCR_TCIE BIT(5)
#define STM32_MDMA_CCR_BTIE BIT(4)
#define STM32_MDMA_CCR_BRTIE BIT(3)
@@ -99,48 +92,33 @@
#define STM32_MDMA_CTCR_BWM BIT(31)
#define STM32_MDMA_CTCR_SWRM BIT(30)
#define STM32_MDMA_CTCR_TRGM_MSK GENMASK(29, 28)
-#define STM32_MDMA_CTCR_TRGM(n) STM32_MDMA_SET((n), \
- STM32_MDMA_CTCR_TRGM_MSK)
-#define STM32_MDMA_CTCR_TRGM_GET(n) STM32_MDMA_GET((n), \
- STM32_MDMA_CTCR_TRGM_MSK)
+#define STM32_MDMA_CTCR_TRGM(n) FIELD_PREP(STM32_MDMA_CTCR_TRGM_MSK, (n))
+#define STM32_MDMA_CTCR_TRGM_GET(n) FIELD_GET(STM32_MDMA_CTCR_TRGM_MSK, (n))
#define STM32_MDMA_CTCR_PAM_MASK GENMASK(27, 26)
-#define STM32_MDMA_CTCR_PAM(n) STM32_MDMA_SET(n, \
- STM32_MDMA_CTCR_PAM_MASK)
+#define STM32_MDMA_CTCR_PAM(n) FIELD_PREP(STM32_MDMA_CTCR_PAM_MASK, (n))
#define STM32_MDMA_CTCR_PKE BIT(25)
#define STM32_MDMA_CTCR_TLEN_MSK GENMASK(24, 18)
-#define STM32_MDMA_CTCR_TLEN(n) STM32_MDMA_SET((n), \
- STM32_MDMA_CTCR_TLEN_MSK)
-#define STM32_MDMA_CTCR_TLEN_GET(n) STM32_MDMA_GET((n), \
- STM32_MDMA_CTCR_TLEN_MSK)
+#define STM32_MDMA_CTCR_TLEN(n) FIELD_PREP(STM32_MDMA_CTCR_TLEN_MSK, (n))
+#define STM32_MDMA_CTCR_TLEN_GET(n) FIELD_GET(STM32_MDMA_CTCR_TLEN_MSK, (n))
#define STM32_MDMA_CTCR_LEN2_MSK GENMASK(25, 18)
-#define STM32_MDMA_CTCR_LEN2(n) STM32_MDMA_SET((n), \
- STM32_MDMA_CTCR_LEN2_MSK)
-#define STM32_MDMA_CTCR_LEN2_GET(n) STM32_MDMA_GET((n), \
- STM32_MDMA_CTCR_LEN2_MSK)
+#define STM32_MDMA_CTCR_LEN2(n) FIELD_PREP(STM32_MDMA_CTCR_LEN2_MSK, (n))
+#define STM32_MDMA_CTCR_LEN2_GET(n) FIELD_GET(STM32_MDMA_CTCR_LEN2_MSK, (n))
#define STM32_MDMA_CTCR_DBURST_MASK GENMASK(17, 15)
-#define STM32_MDMA_CTCR_DBURST(n) STM32_MDMA_SET(n, \
- STM32_MDMA_CTCR_DBURST_MASK)
+#define STM32_MDMA_CTCR_DBURST(n) FIELD_PREP(STM32_MDMA_CTCR_DBURST_MASK, (n))
#define STM32_MDMA_CTCR_SBURST_MASK GENMASK(14, 12)
-#define STM32_MDMA_CTCR_SBURST(n) STM32_MDMA_SET(n, \
- STM32_MDMA_CTCR_SBURST_MASK)
+#define STM32_MDMA_CTCR_SBURST(n) FIELD_PREP(STM32_MDMA_CTCR_SBURST_MASK, (n))
#define STM32_MDMA_CTCR_DINCOS_MASK GENMASK(11, 10)
-#define STM32_MDMA_CTCR_DINCOS(n) STM32_MDMA_SET((n), \
- STM32_MDMA_CTCR_DINCOS_MASK)
+#define STM32_MDMA_CTCR_DINCOS(n) FIELD_PREP(STM32_MDMA_CTCR_DINCOS_MASK, (n))
#define STM32_MDMA_CTCR_SINCOS_MASK GENMASK(9, 8)
-#define STM32_MDMA_CTCR_SINCOS(n) STM32_MDMA_SET((n), \
- STM32_MDMA_CTCR_SINCOS_MASK)
+#define STM32_MDMA_CTCR_SINCOS(n) FIELD_PREP(STM32_MDMA_CTCR_SINCOS_MASK, (n))
#define STM32_MDMA_CTCR_DSIZE_MASK GENMASK(7, 6)
-#define STM32_MDMA_CTCR_DSIZE(n) STM32_MDMA_SET(n, \
- STM32_MDMA_CTCR_DSIZE_MASK)
+#define STM32_MDMA_CTCR_DSIZE(n) FIELD_PREP(STM32_MDMA_CTCR_DSIZE_MASK, (n))
#define STM32_MDMA_CTCR_SSIZE_MASK GENMASK(5, 4)
-#define STM32_MDMA_CTCR_SSIZE(n) STM32_MDMA_SET(n, \
- STM32_MDMA_CTCR_SSIZE_MASK)
+#define STM32_MDMA_CTCR_SSIZE(n) FIELD_PREP(STM32_MDMA_CTCR_SSIZE_MASK, (n))
#define STM32_MDMA_CTCR_DINC_MASK GENMASK(3, 2)
-#define STM32_MDMA_CTCR_DINC(n) STM32_MDMA_SET((n), \
- STM32_MDMA_CTCR_DINC_MASK)
+#define STM32_MDMA_CTCR_DINC(n) FIELD_PREP(STM32_MDMA_CTCR_DINC_MASK, (n))
#define STM32_MDMA_CTCR_SINC_MASK GENMASK(1, 0)
-#define STM32_MDMA_CTCR_SINC(n) STM32_MDMA_SET((n), \
- STM32_MDMA_CTCR_SINC_MASK)
+#define STM32_MDMA_CTCR_SINC(n) FIELD_PREP(STM32_MDMA_CTCR_SINC_MASK, (n))
#define STM32_MDMA_CTCR_CFG_MASK (STM32_MDMA_CTCR_SINC_MASK \
| STM32_MDMA_CTCR_DINC_MASK \
| STM32_MDMA_CTCR_SINCOS_MASK \
@@ -151,16 +129,13 @@
/* MDMA Channel x block number of data register */
#define STM32_MDMA_CBNDTR(x) (0x54 + 0x40 * (x))
#define STM32_MDMA_CBNDTR_BRC_MK GENMASK(31, 20)
-#define STM32_MDMA_CBNDTR_BRC(n) STM32_MDMA_SET(n, \
- STM32_MDMA_CBNDTR_BRC_MK)
-#define STM32_MDMA_CBNDTR_BRC_GET(n) STM32_MDMA_GET((n), \
- STM32_MDMA_CBNDTR_BRC_MK)
+#define STM32_MDMA_CBNDTR_BRC(n) FIELD_PREP(STM32_MDMA_CBNDTR_BRC_MK, (n))
+#define STM32_MDMA_CBNDTR_BRC_GET(n) FIELD_GET(STM32_MDMA_CBNDTR_BRC_MK, (n))
#define STM32_MDMA_CBNDTR_BRDUM BIT(19)
#define STM32_MDMA_CBNDTR_BRSUM BIT(18)
#define STM32_MDMA_CBNDTR_BNDT_MASK GENMASK(16, 0)
-#define STM32_MDMA_CBNDTR_BNDT(n) STM32_MDMA_SET(n, \
- STM32_MDMA_CBNDTR_BNDT_MASK)
+#define STM32_MDMA_CBNDTR_BNDT(n) FIELD_PREP(STM32_MDMA_CBNDTR_BNDT_MASK, (n))
/* MDMA Channel x source address register */
#define STM32_MDMA_CSAR(x) (0x58 + 0x40 * (x))
@@ -171,11 +146,9 @@
/* MDMA Channel x block repeat address update register */
#define STM32_MDMA_CBRUR(x) (0x60 + 0x40 * (x))
#define STM32_MDMA_CBRUR_DUV_MASK GENMASK(31, 16)
-#define STM32_MDMA_CBRUR_DUV(n) STM32_MDMA_SET(n, \
- STM32_MDMA_CBRUR_DUV_MASK)
+#define STM32_MDMA_CBRUR_DUV(n) FIELD_PREP(STM32_MDMA_CBRUR_DUV_MASK, (n))
#define STM32_MDMA_CBRUR_SUV_MASK GENMASK(15, 0)
-#define STM32_MDMA_CBRUR_SUV(n) STM32_MDMA_SET(n, \
- STM32_MDMA_CBRUR_SUV_MASK)
+#define STM32_MDMA_CBRUR_SUV(n) FIELD_PREP(STM32_MDMA_CBRUR_SUV_MASK, (n))
/* MDMA Channel x link address register */
#define STM32_MDMA_CLAR(x) (0x64 + 0x40 * (x))
@@ -184,9 +157,8 @@
#define STM32_MDMA_CTBR(x) (0x68 + 0x40 * (x))
#define STM32_MDMA_CTBR_DBUS BIT(17)
#define STM32_MDMA_CTBR_SBUS BIT(16)
-#define STM32_MDMA_CTBR_TSEL_MASK GENMASK(7, 0)
-#define STM32_MDMA_CTBR_TSEL(n) STM32_MDMA_SET(n, \
- STM32_MDMA_CTBR_TSEL_MASK)
+#define STM32_MDMA_CTBR_TSEL_MASK GENMASK(5, 0)
+#define STM32_MDMA_CTBR_TSEL(n) FIELD_PREP(STM32_MDMA_CTBR_TSEL_MASK, (n))
/* MDMA Channel x mask address register */
#define STM32_MDMA_CMAR(x) (0x70 + 0x40 * (x))
@@ -1279,7 +1251,7 @@ static size_t stm32_mdma_desc_residue(struct stm32_mdma_chan *chan,
u32 curr_hwdesc)
{
struct stm32_mdma_device *dmadev = stm32_mdma_get_dev(chan);
- struct stm32_mdma_hwdesc *hwdesc = desc->node[0].hwdesc;
+ struct stm32_mdma_hwdesc *hwdesc;
u32 cbndtr, residue, modulo, burst_size;
int i;
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
index b7260749e8ee..eaafcbe4ca94 100644
--- a/drivers/dma/tegra20-apb-dma.c
+++ b/drivers/dma/tegra20-apb-dma.c
@@ -343,12 +343,6 @@ static int tegra_dma_slave_config(struct dma_chan *dc,
}
memcpy(&tdc->dma_sconfig, sconfig, sizeof(*sconfig));
- if (tdc->slave_id == TEGRA_APBDMA_SLAVE_ID_INVALID &&
- sconfig->device_fc) {
- if (sconfig->slave_id > TEGRA_APBDMA_CSR_REQ_SEL_MASK)
- return -EINVAL;
- tdc->slave_id = sconfig->slave_id;
- }
tdc->config_init = true;
return 0;
diff --git a/drivers/dma/ti/Makefile b/drivers/dma/ti/Makefile
index bd496efadff7..1d4081a049b7 100644
--- a/drivers/dma/ti/Makefile
+++ b/drivers/dma/ti/Makefile
@@ -8,5 +8,6 @@ obj-$(CONFIG_TI_K3_PSIL) += k3-psil.o \
k3-psil-am654.o \
k3-psil-j721e.o \
k3-psil-j7200.o \
- k3-psil-am64.o
+ k3-psil-am64.o \
+ k3-psil-j721s2.o
obj-$(CONFIG_TI_DMA_CROSSBAR) += dma-crossbar.o
diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c
index 35d81bd857f1..08e47f44d325 100644
--- a/drivers/dma/ti/edma.c
+++ b/drivers/dma/ti/edma.c
@@ -1681,8 +1681,7 @@ static irqreturn_t dma_ccerr_handler(int irq, void *data)
dev_dbg(ecc->dev, "EMR%d 0x%08x\n", j, val);
emr = val;
- for (i = find_next_bit(&emr, 32, 0); i < 32;
- i = find_next_bit(&emr, 32, i + 1)) {
+ for_each_set_bit(i, &emr, 32) {
int k = (j << 5) + i;
/* Clear the corresponding EMR bits */
diff --git a/drivers/dma/ti/k3-psil-j721s2.c b/drivers/dma/ti/k3-psil-j721s2.c
new file mode 100644
index 000000000000..4c4172a4d271
--- /dev/null
+++ b/drivers/dma/ti/k3-psil-j721s2.c
@@ -0,0 +1,167 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Texas Instruments Incorporated - https://www.ti.com
+ */
+
+#include <linux/kernel.h>
+
+#include "k3-psil-priv.h"
+
+#define PSIL_PDMA_XY_TR(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_PDMA_XY, \
+ }, \
+ }
+
+#define PSIL_PDMA_XY_PKT(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_PDMA_XY, \
+ .pkt_mode = 1, \
+ }, \
+ }
+
+#define PSIL_PDMA_MCASP(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_PDMA_XY, \
+ .pdma_acc32 = 1, \
+ .pdma_burst = 1, \
+ }, \
+ }
+
+#define PSIL_ETHERNET(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_NATIVE, \
+ .pkt_mode = 1, \
+ .needs_epib = 1, \
+ .psd_size = 16, \
+ }, \
+ }
+
+#define PSIL_SA2UL(x, tx) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_NATIVE, \
+ .pkt_mode = 1, \
+ .needs_epib = 1, \
+ .psd_size = 64, \
+ .notdpkt = tx, \
+ }, \
+ }
+
+/* PSI-L source thread IDs, used for RX (DMA_DEV_TO_MEM) */
+static struct psil_ep j721s2_src_ep_map[] = {
+ /* PDMA_MCASP - McASP0-4 */
+ PSIL_PDMA_MCASP(0x4400),
+ PSIL_PDMA_MCASP(0x4401),
+ PSIL_PDMA_MCASP(0x4402),
+ PSIL_PDMA_MCASP(0x4403),
+ PSIL_PDMA_MCASP(0x4404),
+ /* PDMA_SPI_G0 - SPI0-3 */
+ PSIL_PDMA_XY_PKT(0x4600),
+ PSIL_PDMA_XY_PKT(0x4601),
+ PSIL_PDMA_XY_PKT(0x4602),
+ PSIL_PDMA_XY_PKT(0x4603),
+ PSIL_PDMA_XY_PKT(0x4604),
+ PSIL_PDMA_XY_PKT(0x4605),
+ PSIL_PDMA_XY_PKT(0x4606),
+ PSIL_PDMA_XY_PKT(0x4607),
+ PSIL_PDMA_XY_PKT(0x4608),
+ PSIL_PDMA_XY_PKT(0x4609),
+ PSIL_PDMA_XY_PKT(0x460a),
+ PSIL_PDMA_XY_PKT(0x460b),
+ PSIL_PDMA_XY_PKT(0x460c),
+ PSIL_PDMA_XY_PKT(0x460d),
+ PSIL_PDMA_XY_PKT(0x460e),
+ PSIL_PDMA_XY_PKT(0x460f),
+ /* PDMA_SPI_G1 - SPI4-7 */
+ PSIL_PDMA_XY_PKT(0x4610),
+ PSIL_PDMA_XY_PKT(0x4611),
+ PSIL_PDMA_XY_PKT(0x4612),
+ PSIL_PDMA_XY_PKT(0x4613),
+ PSIL_PDMA_XY_PKT(0x4614),
+ PSIL_PDMA_XY_PKT(0x4615),
+ PSIL_PDMA_XY_PKT(0x4616),
+ PSIL_PDMA_XY_PKT(0x4617),
+ PSIL_PDMA_XY_PKT(0x4618),
+ PSIL_PDMA_XY_PKT(0x4619),
+ PSIL_PDMA_XY_PKT(0x461a),
+ PSIL_PDMA_XY_PKT(0x461b),
+ PSIL_PDMA_XY_PKT(0x461c),
+ PSIL_PDMA_XY_PKT(0x461d),
+ PSIL_PDMA_XY_PKT(0x461e),
+ PSIL_PDMA_XY_PKT(0x461f),
+ /* PDMA_USART_G0 - UART0-1 */
+ PSIL_PDMA_XY_PKT(0x4700),
+ PSIL_PDMA_XY_PKT(0x4701),
+ /* PDMA_USART_G1 - UART2-3 */
+ PSIL_PDMA_XY_PKT(0x4702),
+ PSIL_PDMA_XY_PKT(0x4703),
+ /* PDMA_USART_G2 - UART4-9 */
+ PSIL_PDMA_XY_PKT(0x4704),
+ PSIL_PDMA_XY_PKT(0x4705),
+ PSIL_PDMA_XY_PKT(0x4706),
+ PSIL_PDMA_XY_PKT(0x4707),
+ PSIL_PDMA_XY_PKT(0x4708),
+ PSIL_PDMA_XY_PKT(0x4709),
+ /* CPSW0 */
+ PSIL_ETHERNET(0x7000),
+ /* MCU_PDMA0 (MCU_PDMA_MISC_G0) - SPI0 */
+ PSIL_PDMA_XY_PKT(0x7100),
+ PSIL_PDMA_XY_PKT(0x7101),
+ PSIL_PDMA_XY_PKT(0x7102),
+ PSIL_PDMA_XY_PKT(0x7103),
+ /* MCU_PDMA1 (MCU_PDMA_MISC_G1) - SPI1-2 */
+ PSIL_PDMA_XY_PKT(0x7200),
+ PSIL_PDMA_XY_PKT(0x7201),
+ PSIL_PDMA_XY_PKT(0x7202),
+ PSIL_PDMA_XY_PKT(0x7203),
+ PSIL_PDMA_XY_PKT(0x7204),
+ PSIL_PDMA_XY_PKT(0x7205),
+ PSIL_PDMA_XY_PKT(0x7206),
+ PSIL_PDMA_XY_PKT(0x7207),
+ /* MCU_PDMA2 (MCU_PDMA_MISC_G2) - UART0 */
+ PSIL_PDMA_XY_PKT(0x7300),
+ /* MCU_PDMA_ADC - ADC0-1 */
+ PSIL_PDMA_XY_TR(0x7400),
+ PSIL_PDMA_XY_TR(0x7401),
+ PSIL_PDMA_XY_TR(0x7402),
+ PSIL_PDMA_XY_TR(0x7403),
+ /* SA2UL */
+ PSIL_SA2UL(0x7500, 0),
+ PSIL_SA2UL(0x7501, 0),
+ PSIL_SA2UL(0x7502, 0),
+ PSIL_SA2UL(0x7503, 0),
+};
+
+/* PSI-L destination thread IDs, used for TX (DMA_MEM_TO_DEV) */
+static struct psil_ep j721s2_dst_ep_map[] = {
+ /* CPSW0 */
+ PSIL_ETHERNET(0xf000),
+ PSIL_ETHERNET(0xf001),
+ PSIL_ETHERNET(0xf002),
+ PSIL_ETHERNET(0xf003),
+ PSIL_ETHERNET(0xf004),
+ PSIL_ETHERNET(0xf005),
+ PSIL_ETHERNET(0xf006),
+ PSIL_ETHERNET(0xf007),
+ /* SA2UL */
+ PSIL_SA2UL(0xf500, 1),
+ PSIL_SA2UL(0xf501, 1),
+};
+
+struct psil_ep_map j721s2_ep_map = {
+ .name = "j721s2",
+ .src = j721s2_src_ep_map,
+ .src_count = ARRAY_SIZE(j721s2_src_ep_map),
+ .dst = j721s2_dst_ep_map,
+ .dst_count = ARRAY_SIZE(j721s2_dst_ep_map),
+};
diff --git a/drivers/dma/ti/k3-psil-priv.h b/drivers/dma/ti/k3-psil-priv.h
index b74e192e3c2d..e51e179cdb56 100644
--- a/drivers/dma/ti/k3-psil-priv.h
+++ b/drivers/dma/ti/k3-psil-priv.h
@@ -41,5 +41,6 @@ extern struct psil_ep_map am654_ep_map;
extern struct psil_ep_map j721e_ep_map;
extern struct psil_ep_map j7200_ep_map;
extern struct psil_ep_map am64_ep_map;
+extern struct psil_ep_map j721s2_ep_map;
#endif /* K3_PSIL_PRIV_H_ */
diff --git a/drivers/dma/ti/k3-psil.c b/drivers/dma/ti/k3-psil.c
index 13ce7367d870..8867b4bd0c51 100644
--- a/drivers/dma/ti/k3-psil.c
+++ b/drivers/dma/ti/k3-psil.c
@@ -21,6 +21,7 @@ static const struct soc_device_attribute k3_soc_devices[] = {
{ .family = "J721E", .data = &j721e_ep_map },
{ .family = "J7200", .data = &j7200_ep_map },
{ .family = "AM64X", .data = &am64_ep_map },
+ { .family = "J721S2", .data = &j721s2_ep_map },
{ /* sentinel */ }
};
diff --git a/drivers/dma/ti/k3-udma-private.c b/drivers/dma/ti/k3-udma-private.c
index aada84f40723..d4f1e4e9603a 100644
--- a/drivers/dma/ti/k3-udma-private.c
+++ b/drivers/dma/ti/k3-udma-private.c
@@ -168,8 +168,7 @@ int xudma_pktdma_tflow_get_irq(struct udma_dev *ud, int udma_tflow_id)
{
const struct udma_oes_offsets *oes = &ud->soc_data->oes;
- return ti_sci_inta_msi_get_virq(ud->dev, udma_tflow_id +
- oes->pktdma_tchan_flow);
+ return msi_get_virq(ud->dev, udma_tflow_id + oes->pktdma_tchan_flow);
}
EXPORT_SYMBOL(xudma_pktdma_tflow_get_irq);
@@ -177,7 +176,6 @@ int xudma_pktdma_rflow_get_irq(struct udma_dev *ud, int udma_rflow_id)
{
const struct udma_oes_offsets *oes = &ud->soc_data->oes;
- return ti_sci_inta_msi_get_virq(ud->dev, udma_rflow_id +
- oes->pktdma_rchan_flow);
+ return msi_get_virq(ud->dev, udma_rflow_id + oes->pktdma_rchan_flow);
}
EXPORT_SYMBOL(xudma_pktdma_rflow_get_irq);
diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c
index 6e56d1cef5ee..d2d4cbe63e44 100644
--- a/drivers/dma/ti/k3-udma.c
+++ b/drivers/dma/ti/k3-udma.c
@@ -2313,8 +2313,7 @@ static int udma_alloc_chan_resources(struct dma_chan *chan)
/* Event from UDMA (TR events) only needed for slave TR mode channels */
if (is_slave_direction(uc->config.dir) && !uc->config.pkt_mode) {
- uc->irq_num_udma = ti_sci_inta_msi_get_virq(ud->dev,
- irq_udma_idx);
+ uc->irq_num_udma = msi_get_virq(ud->dev, irq_udma_idx);
if (uc->irq_num_udma <= 0) {
dev_err(ud->dev, "Failed to get udma irq (index: %u)\n",
irq_udma_idx);
@@ -2486,7 +2485,7 @@ static int bcdma_alloc_chan_resources(struct dma_chan *chan)
uc->psil_paired = true;
}
- uc->irq_num_ring = ti_sci_inta_msi_get_virq(ud->dev, irq_ring_idx);
+ uc->irq_num_ring = msi_get_virq(ud->dev, irq_ring_idx);
if (uc->irq_num_ring <= 0) {
dev_err(ud->dev, "Failed to get ring irq (index: %u)\n",
irq_ring_idx);
@@ -2503,8 +2502,7 @@ static int bcdma_alloc_chan_resources(struct dma_chan *chan)
/* Event from BCDMA (TR events) only needed for slave channels */
if (is_slave_direction(uc->config.dir)) {
- uc->irq_num_udma = ti_sci_inta_msi_get_virq(ud->dev,
- irq_udma_idx);
+ uc->irq_num_udma = msi_get_virq(ud->dev, irq_udma_idx);
if (uc->irq_num_udma <= 0) {
dev_err(ud->dev, "Failed to get bcdma irq (index: %u)\n",
irq_udma_idx);
@@ -2672,7 +2670,7 @@ static int pktdma_alloc_chan_resources(struct dma_chan *chan)
uc->psil_paired = true;
- uc->irq_num_ring = ti_sci_inta_msi_get_virq(ud->dev, irq_ring_idx);
+ uc->irq_num_ring = msi_get_virq(ud->dev, irq_ring_idx);
if (uc->irq_num_ring <= 0) {
dev_err(ud->dev, "Failed to get ring irq (index: %u)\n",
irq_ring_idx);
@@ -4376,6 +4374,7 @@ static const struct soc_device_attribute k3_soc_devices[] = {
{ .family = "J721E", .data = &j721e_soc_data },
{ .family = "J7200", .data = &j7200_soc_data },
{ .family = "AM64X", .data = &am64_soc_data },
+ { .family = "J721S2", .data = &j721e_soc_data},
{ /* sentinel */ }
};
@@ -5336,9 +5335,9 @@ static int udma_probe(struct platform_device *pdev)
if (IS_ERR(ud->ringacc))
return PTR_ERR(ud->ringacc);
- dev->msi_domain = of_msi_get_domain(dev, dev->of_node,
+ dev->msi.domain = of_msi_get_domain(dev, dev->of_node,
DOMAIN_BUS_TI_SCI_INTA_MSI);
- if (!dev->msi_domain) {
+ if (!dev->msi.domain) {
dev_err(dev, "Failed to get MSI domain\n");
return -EPROBE_DEFER;
}
diff --git a/drivers/dma/uniphier-xdmac.c b/drivers/dma/uniphier-xdmac.c
index d6b8a202474f..290836b7e1be 100644
--- a/drivers/dma/uniphier-xdmac.c
+++ b/drivers/dma/uniphier-xdmac.c
@@ -131,8 +131,9 @@ uniphier_xdmac_next_desc(struct uniphier_xdmac_chan *xc)
static void uniphier_xdmac_chan_start(struct uniphier_xdmac_chan *xc,
struct uniphier_xdmac_desc *xd)
{
- u32 src_mode, src_addr, src_width;
- u32 dst_mode, dst_addr, dst_width;
+ u32 src_mode, src_width;
+ u32 dst_mode, dst_width;
+ dma_addr_t src_addr, dst_addr;
u32 val, its, tnum;
enum dma_slave_buswidth buswidth;
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index 4677ce08ed40..cd62bbb50e8b 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -2128,6 +2128,126 @@ error:
}
/**
+ * xilinx_cdma_prep_memcpy_sg - prepare descriptors for a memcpy_sg transaction
+ * @dchan: DMA channel
+ * @dst_sg: Destination scatter list
+ * @dst_sg_len: Number of entries in destination scatter list
+ * @src_sg: Source scatter list
+ * @src_sg_len: Number of entries in source scatter list
+ * @flags: transfer ack flags
+ *
+ * Return: Async transaction descriptor on success and NULL on failure
+ */
+static struct dma_async_tx_descriptor *xilinx_cdma_prep_memcpy_sg(
+ struct dma_chan *dchan, struct scatterlist *dst_sg,
+ unsigned int dst_sg_len, struct scatterlist *src_sg,
+ unsigned int src_sg_len, unsigned long flags)
+{
+ struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
+ struct xilinx_dma_tx_descriptor *desc;
+ struct xilinx_cdma_tx_segment *segment, *prev = NULL;
+ struct xilinx_cdma_desc_hw *hw;
+ size_t len, dst_avail, src_avail;
+ dma_addr_t dma_dst, dma_src;
+
+ if (unlikely(dst_sg_len == 0 || src_sg_len == 0))
+ return NULL;
+
+ if (unlikely(!dst_sg || !src_sg))
+ return NULL;
+
+ desc = xilinx_dma_alloc_tx_descriptor(chan);
+ if (!desc)
+ return NULL;
+
+ dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
+ desc->async_tx.tx_submit = xilinx_dma_tx_submit;
+
+ dst_avail = sg_dma_len(dst_sg);
+ src_avail = sg_dma_len(src_sg);
+ /*
+ * loop until there is either no more source or no more destination
+ * scatterlist entry
+ */
+ while (true) {
+ len = min_t(size_t, src_avail, dst_avail);
+ len = min_t(size_t, len, chan->xdev->max_buffer_len);
+ if (len == 0)
+ goto fetch;
+
+ /* Allocate the link descriptor from DMA pool */
+ segment = xilinx_cdma_alloc_tx_segment(chan);
+ if (!segment)
+ goto error;
+
+ dma_dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) -
+ dst_avail;
+ dma_src = sg_dma_address(src_sg) + sg_dma_len(src_sg) -
+ src_avail;
+ hw = &segment->hw;
+ hw->control = len;
+ hw->src_addr = dma_src;
+ hw->dest_addr = dma_dst;
+ if (chan->ext_addr) {
+ hw->src_addr_msb = upper_32_bits(dma_src);
+ hw->dest_addr_msb = upper_32_bits(dma_dst);
+ }
+
+ if (prev) {
+ prev->hw.next_desc = segment->phys;
+ if (chan->ext_addr)
+ prev->hw.next_desc_msb =
+ upper_32_bits(segment->phys);
+ }
+
+ prev = segment;
+ dst_avail -= len;
+ src_avail -= len;
+ list_add_tail(&segment->node, &desc->segments);
+
+fetch:
+ /* Fetch the next dst scatterlist entry */
+ if (dst_avail == 0) {
+ if (dst_sg_len == 0)
+ break;
+ dst_sg = sg_next(dst_sg);
+ if (dst_sg == NULL)
+ break;
+ dst_sg_len--;
+ dst_avail = sg_dma_len(dst_sg);
+ }
+ /* Fetch the next src scatterlist entry */
+ if (src_avail == 0) {
+ if (src_sg_len == 0)
+ break;
+ src_sg = sg_next(src_sg);
+ if (src_sg == NULL)
+ break;
+ src_sg_len--;
+ src_avail = sg_dma_len(src_sg);
+ }
+ }
+
+ if (list_empty(&desc->segments)) {
+ dev_err(chan->xdev->dev,
+ "%s: Zero-size SG transfer requested\n", __func__);
+ goto error;
+ }
+
+ /* Link the last hardware descriptor with the first. */
+ segment = list_first_entry(&desc->segments,
+ struct xilinx_cdma_tx_segment, node);
+ desc->async_tx.phys = segment->phys;
+ prev->hw.next_desc = segment->phys;
+
+ return &desc->async_tx;
+
+error:
+ xilinx_dma_free_tx_descriptor(chan, desc);
+ return NULL;
+}
+
+/**
* xilinx_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
* @dchan: DMA channel
* @sgl: scatterlist to transfer to/from
@@ -2860,7 +2980,9 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
}
/* Request the interrupt */
- chan->irq = irq_of_parse_and_map(node, chan->tdest);
+ chan->irq = of_irq_get(node, chan->tdest);
+ if (chan->irq < 0)
+ return dev_err_probe(xdev->dev, chan->irq, "failed to get irq\n");
err = request_irq(chan->irq, xdev->dma_config->irq_handler,
IRQF_SHARED, "xilinx-dma-controller", chan);
if (err) {
@@ -2934,8 +3056,11 @@ static int xilinx_dma_child_probe(struct xilinx_dma_device *xdev,
if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA && ret < 0)
dev_warn(xdev->dev, "missing dma-channels property\n");
- for (i = 0; i < nr_channels; i++)
- xilinx_dma_chan_probe(xdev, node);
+ for (i = 0; i < nr_channels; i++) {
+ ret = xilinx_dma_chan_probe(xdev, node);
+ if (ret)
+ return ret;
+ }
return 0;
}
@@ -3115,7 +3240,9 @@ static int xilinx_dma_probe(struct platform_device *pdev)
DMA_RESIDUE_GRANULARITY_SEGMENT;
} else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
dma_cap_set(DMA_MEMCPY, xdev->common.cap_mask);
+ dma_cap_set(DMA_MEMCPY_SG, xdev->common.cap_mask);
xdev->common.device_prep_dma_memcpy = xilinx_cdma_prep_memcpy;
+ xdev->common.device_prep_dma_memcpy_sg = xilinx_cdma_prep_memcpy_sg;
/* Residue calculation is supported by only AXI DMA and CDMA */
xdev->common.residue_granularity =
DMA_RESIDUE_GRANULARITY_SEGMENT;
diff --git a/drivers/dma/xilinx/xilinx_dpdma.c b/drivers/dma/xilinx/xilinx_dpdma.c
index ce5c66e6897d..b0f4948b00a5 100644
--- a/drivers/dma/xilinx/xilinx_dpdma.c
+++ b/drivers/dma/xilinx/xilinx_dpdma.c
@@ -12,6 +12,7 @@
#include <linux/clk.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
+#include <linux/dma/xilinx_dpdma.h>
#include <linux/dmaengine.h>
#include <linux/dmapool.h>
#include <linux/interrupt.h>
@@ -1273,6 +1274,7 @@ static int xilinx_dpdma_config(struct dma_chan *dchan,
struct dma_slave_config *config)
{
struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
+ struct xilinx_dpdma_peripheral_config *pconfig;
unsigned long flags;
/*
@@ -1282,15 +1284,18 @@ static int xilinx_dpdma_config(struct dma_chan *dchan,
* fixed both on the DPDMA side and on the DP controller side.
*/
- spin_lock_irqsave(&chan->lock, flags);
-
/*
- * Abuse the slave_id to indicate that the channel is part of a video
- * group.
+ * Use the peripheral_config to indicate that the channel is part
+ * of a video group. This requires matching use of the custom
+ * structure in each driver.
*/
- if (chan->id <= ZYNQMP_DPDMA_VIDEO2)
- chan->video_group = config->slave_id != 0;
+ pconfig = config->peripheral_config;
+ if (WARN_ON(pconfig && config->peripheral_size != sizeof(*pconfig)))
+ return -EINVAL;
+ spin_lock_irqsave(&chan->lock, flags);
+ if (chan->id <= ZYNQMP_DPDMA_VIDEO2 && pconfig)
+ chan->video_group = pconfig->video_group;
spin_unlock_irqrestore(&chan->lock, flags);
return 0;
diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c
index 3a6d2416cb0f..5dd29789f97d 100644
--- a/drivers/edac/altera_edac.c
+++ b/drivers/edac/altera_edac.c
@@ -350,7 +350,7 @@ static int altr_sdram_probe(struct platform_device *pdev)
if (irq < 0) {
edac_printk(KERN_ERR, EDAC_MC,
"No irq %d in DT\n", irq);
- return -ENODEV;
+ return irq;
}
/* Arria10 has a 2nd IRQ */
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index 9d9aabdec96b..f5677d81bd2d 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -215,7 +215,7 @@ void *edac_align_ptr(void **p, unsigned int size, int n_elems)
else
return (char *)ptr;
- r = (unsigned long)p % align;
+ r = (unsigned long)ptr % align;
if (r == 0)
return (char *)ptr;
diff --git a/drivers/edac/xgene_edac.c b/drivers/edac/xgene_edac.c
index 2ccd1db5e98f..7197f9fa0245 100644
--- a/drivers/edac/xgene_edac.c
+++ b/drivers/edac/xgene_edac.c
@@ -1919,7 +1919,7 @@ static int xgene_edac_probe(struct platform_device *pdev)
irq = platform_get_irq_optional(pdev, i);
if (irq < 0) {
dev_err(&pdev->dev, "No IRQ resource\n");
- rc = -EINVAL;
+ rc = irq;
goto out_err;
}
rc = devm_request_irq(&pdev->dev, irq,
diff --git a/drivers/extcon/extcon-usb-gpio.c b/drivers/extcon/extcon-usb-gpio.c
index 0cb440bdd5cb..f2b65d967384 100644
--- a/drivers/extcon/extcon-usb-gpio.c
+++ b/drivers/extcon/extcon-usb-gpio.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* drivers/extcon/extcon-usb-gpio.c - USB GPIO extcon driver
*
* Copyright (C) 2015 Texas Instruments Incorporated - https://www.ti.com
diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c
index e7a9561a826d..a09e704fd0fa 100644
--- a/drivers/extcon/extcon.c
+++ b/drivers/extcon/extcon.c
@@ -576,19 +576,7 @@ EXPORT_SYMBOL_GPL(extcon_set_state);
*/
int extcon_set_state_sync(struct extcon_dev *edev, unsigned int id, bool state)
{
- int ret, index;
- unsigned long flags;
-
- index = find_cable_index_by_id(edev, id);
- if (index < 0)
- return index;
-
- /* Check whether the external connector's state is changed. */
- spin_lock_irqsave(&edev->lock, flags);
- ret = is_extcon_changed(edev, index, state);
- spin_unlock_irqrestore(&edev->lock, flags);
- if (!ret)
- return 0;
+ int ret;
ret = extcon_set_state(edev, id, state);
if (ret < 0)
diff --git a/drivers/firmware/arm_scmi/virtio.c b/drivers/firmware/arm_scmi/virtio.c
index 87039c5c03fd..eefcc4146749 100644
--- a/drivers/firmware/arm_scmi/virtio.c
+++ b/drivers/firmware/arm_scmi/virtio.c
@@ -452,7 +452,7 @@ static void scmi_vio_remove(struct virtio_device *vdev)
* outstanding message on any vqueue to be ignored by complete_cb: now
* we can just stop processing buffers and destroy the vqueues.
*/
- vdev->config->reset(vdev);
+ virtio_reset_device(vdev);
vdev->config->del_vqs(vdev);
/* Ensure scmi_vdev is visible as NULL */
smp_store_mb(scmi_vdev, NULL);
diff --git a/drivers/firmware/cirrus/cs_dsp.c b/drivers/firmware/cirrus/cs_dsp.c
index 948dd8382686..e48108e694f8 100644
--- a/drivers/firmware/cirrus/cs_dsp.c
+++ b/drivers/firmware/cirrus/cs_dsp.c
@@ -12,16 +12,10 @@
#include <linux/ctype.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
-#include <linux/device.h>
-#include <linux/firmware.h>
-#include <linux/interrupt.h>
-#include <linux/list.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
-#include <linux/regmap.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
-#include <linux/workqueue.h>
#include <linux/firmware/cirrus/cs_dsp.h>
#include <linux/firmware/cirrus/wmfw.h>
@@ -622,7 +616,8 @@ static void cs_dsp_halo_show_fw_status(struct cs_dsp *dsp)
offs[0], offs[1], offs[2], offs[3]);
}
-static int cs_dsp_coeff_base_reg(struct cs_dsp_coeff_ctl *ctl, unsigned int *reg)
+static int cs_dsp_coeff_base_reg(struct cs_dsp_coeff_ctl *ctl, unsigned int *reg,
+ unsigned int off)
{
const struct cs_dsp_alg_region *alg_region = &ctl->alg_region;
struct cs_dsp *dsp = ctl->dsp;
@@ -635,7 +630,7 @@ static int cs_dsp_coeff_base_reg(struct cs_dsp_coeff_ctl *ctl, unsigned int *reg
return -EINVAL;
}
- *reg = dsp->ops->region_to_reg(mem, ctl->alg_region.base + ctl->offset);
+ *reg = dsp->ops->region_to_reg(mem, ctl->alg_region.base + ctl->offset + off);
return 0;
}
@@ -659,10 +654,12 @@ int cs_dsp_coeff_write_acked_control(struct cs_dsp_coeff_ctl *ctl, unsigned int
unsigned int reg;
int i, ret;
+ lockdep_assert_held(&dsp->pwr_lock);
+
if (!dsp->running)
return -EPERM;
- ret = cs_dsp_coeff_base_reg(ctl, &reg);
+ ret = cs_dsp_coeff_base_reg(ctl, &reg, 0);
if (ret)
return ret;
@@ -716,14 +713,14 @@ int cs_dsp_coeff_write_acked_control(struct cs_dsp_coeff_ctl *ctl, unsigned int
EXPORT_SYMBOL_GPL(cs_dsp_coeff_write_acked_control);
static int cs_dsp_coeff_write_ctrl_raw(struct cs_dsp_coeff_ctl *ctl,
- const void *buf, size_t len)
+ unsigned int off, const void *buf, size_t len)
{
struct cs_dsp *dsp = ctl->dsp;
void *scratch;
int ret;
unsigned int reg;
- ret = cs_dsp_coeff_base_reg(ctl, &reg);
+ ret = cs_dsp_coeff_base_reg(ctl, &reg, off);
if (ret)
return ret;
@@ -749,38 +746,49 @@ static int cs_dsp_coeff_write_ctrl_raw(struct cs_dsp_coeff_ctl *ctl,
/**
* cs_dsp_coeff_write_ctrl() - Writes the given buffer to the given coefficient control
* @ctl: pointer to coefficient control
+ * @off: word offset at which data should be written
* @buf: the buffer to write to the given control
- * @len: the length of the buffer
+ * @len: the length of the buffer in bytes
*
* Must be called with pwr_lock held.
*
* Return: Zero for success, a negative number on error.
*/
-int cs_dsp_coeff_write_ctrl(struct cs_dsp_coeff_ctl *ctl, const void *buf, size_t len)
+int cs_dsp_coeff_write_ctrl(struct cs_dsp_coeff_ctl *ctl,
+ unsigned int off, const void *buf, size_t len)
{
int ret = 0;
+ if (!ctl)
+ return -ENOENT;
+
+ lockdep_assert_held(&ctl->dsp->pwr_lock);
+
+ if (len + off * sizeof(u32) > ctl->len)
+ return -EINVAL;
+
if (ctl->flags & WMFW_CTL_FLAG_VOLATILE)
ret = -EPERM;
else if (buf != ctl->cache)
- memcpy(ctl->cache, buf, len);
+ memcpy(ctl->cache + off * sizeof(u32), buf, len);
ctl->set = 1;
if (ctl->enabled && ctl->dsp->running)
- ret = cs_dsp_coeff_write_ctrl_raw(ctl, buf, len);
+ ret = cs_dsp_coeff_write_ctrl_raw(ctl, off, buf, len);
return ret;
}
EXPORT_SYMBOL_GPL(cs_dsp_coeff_write_ctrl);
-static int cs_dsp_coeff_read_ctrl_raw(struct cs_dsp_coeff_ctl *ctl, void *buf, size_t len)
+static int cs_dsp_coeff_read_ctrl_raw(struct cs_dsp_coeff_ctl *ctl,
+ unsigned int off, void *buf, size_t len)
{
struct cs_dsp *dsp = ctl->dsp;
void *scratch;
int ret;
unsigned int reg;
- ret = cs_dsp_coeff_base_reg(ctl, &reg);
+ ret = cs_dsp_coeff_base_reg(ctl, &reg, off);
if (ret)
return ret;
@@ -806,28 +814,38 @@ static int cs_dsp_coeff_read_ctrl_raw(struct cs_dsp_coeff_ctl *ctl, void *buf, s
/**
* cs_dsp_coeff_read_ctrl() - Reads the given coefficient control into the given buffer
* @ctl: pointer to coefficient control
+ * @off: word offset at which data should be read
* @buf: the buffer to store to the given control
- * @len: the length of the buffer
+ * @len: the length of the buffer in bytes
*
* Must be called with pwr_lock held.
*
* Return: Zero for success, a negative number on error.
*/
-int cs_dsp_coeff_read_ctrl(struct cs_dsp_coeff_ctl *ctl, void *buf, size_t len)
+int cs_dsp_coeff_read_ctrl(struct cs_dsp_coeff_ctl *ctl,
+ unsigned int off, void *buf, size_t len)
{
int ret = 0;
+ if (!ctl)
+ return -ENOENT;
+
+ lockdep_assert_held(&ctl->dsp->pwr_lock);
+
+ if (len + off * sizeof(u32) > ctl->len)
+ return -EINVAL;
+
if (ctl->flags & WMFW_CTL_FLAG_VOLATILE) {
if (ctl->enabled && ctl->dsp->running)
- return cs_dsp_coeff_read_ctrl_raw(ctl, buf, len);
+ return cs_dsp_coeff_read_ctrl_raw(ctl, off, buf, len);
else
return -EPERM;
} else {
if (!ctl->flags && ctl->enabled && ctl->dsp->running)
- ret = cs_dsp_coeff_read_ctrl_raw(ctl, ctl->cache, ctl->len);
+ ret = cs_dsp_coeff_read_ctrl_raw(ctl, 0, ctl->cache, ctl->len);
if (buf != ctl->cache)
- memcpy(buf, ctl->cache, len);
+ memcpy(buf, ctl->cache + off * sizeof(u32), len);
}
return ret;
@@ -851,7 +869,7 @@ static int cs_dsp_coeff_init_control_caches(struct cs_dsp *dsp)
* created so we don't need to do anything.
*/
if (!ctl->flags || (ctl->flags & WMFW_CTL_FLAG_READABLE)) {
- ret = cs_dsp_coeff_read_ctrl_raw(ctl, ctl->cache, ctl->len);
+ ret = cs_dsp_coeff_read_ctrl_raw(ctl, 0, ctl->cache, ctl->len);
if (ret < 0)
return ret;
}
@@ -869,7 +887,7 @@ static int cs_dsp_coeff_sync_controls(struct cs_dsp *dsp)
if (!ctl->enabled)
continue;
if (ctl->set && !(ctl->flags & WMFW_CTL_FLAG_VOLATILE)) {
- ret = cs_dsp_coeff_write_ctrl_raw(ctl, ctl->cache,
+ ret = cs_dsp_coeff_write_ctrl_raw(ctl, 0, ctl->cache,
ctl->len);
if (ret < 0)
return ret;
@@ -1159,6 +1177,7 @@ static int cs_dsp_parse_coeff(struct cs_dsp *dsp,
return -EINVAL;
break;
case WMFW_CTL_TYPE_HOSTEVENT:
+ case WMFW_CTL_TYPE_FWEVENT:
ret = cs_dsp_check_coeff_flags(dsp, &coeff_blk,
WMFW_CTL_FLAG_SYS |
WMFW_CTL_FLAG_VOLATILE |
@@ -1459,6 +1478,8 @@ struct cs_dsp_coeff_ctl *cs_dsp_get_ctl(struct cs_dsp *dsp, const char *name, in
{
struct cs_dsp_coeff_ctl *pos, *rslt = NULL;
+ lockdep_assert_held(&dsp->pwr_lock);
+
list_for_each_entry(pos, &dsp->ctl_list, list) {
if (!pos->subname)
continue;
@@ -1554,6 +1575,8 @@ struct cs_dsp_alg_region *cs_dsp_find_alg_region(struct cs_dsp *dsp,
{
struct cs_dsp_alg_region *alg_region;
+ lockdep_assert_held(&dsp->pwr_lock);
+
list_for_each_entry(alg_region, &dsp->alg_regions, list) {
if (id == alg_region->alg && type == alg_region->type)
return alg_region;
@@ -1565,7 +1588,7 @@ EXPORT_SYMBOL_GPL(cs_dsp_find_alg_region);
static struct cs_dsp_alg_region *cs_dsp_create_region(struct cs_dsp *dsp,
int type, __be32 id,
- __be32 base)
+ __be32 ver, __be32 base)
{
struct cs_dsp_alg_region *alg_region;
@@ -1575,6 +1598,7 @@ static struct cs_dsp_alg_region *cs_dsp_create_region(struct cs_dsp *dsp,
alg_region->type = type;
alg_region->alg = be32_to_cpu(id);
+ alg_region->ver = be32_to_cpu(ver);
alg_region->base = be32_to_cpu(base);
list_add_tail(&alg_region->list, &dsp->alg_regions);
@@ -1624,14 +1648,14 @@ static void cs_dsp_parse_wmfw_v3_id_header(struct cs_dsp *dsp,
nalgs);
}
-static int cs_dsp_create_regions(struct cs_dsp *dsp, __be32 id, int nregions,
- const int *type, __be32 *base)
+static int cs_dsp_create_regions(struct cs_dsp *dsp, __be32 id, __be32 ver,
+ int nregions, const int *type, __be32 *base)
{
struct cs_dsp_alg_region *alg_region;
int i;
for (i = 0; i < nregions; i++) {
- alg_region = cs_dsp_create_region(dsp, type[i], id, base[i]);
+ alg_region = cs_dsp_create_region(dsp, type[i], id, ver, base[i]);
if (IS_ERR(alg_region))
return PTR_ERR(alg_region);
}
@@ -1666,12 +1690,14 @@ static int cs_dsp_adsp1_setup_algs(struct cs_dsp *dsp)
cs_dsp_parse_wmfw_id_header(dsp, &adsp1_id.fw, n_algs);
alg_region = cs_dsp_create_region(dsp, WMFW_ADSP1_ZM,
- adsp1_id.fw.id, adsp1_id.zm);
+ adsp1_id.fw.id, adsp1_id.fw.ver,
+ adsp1_id.zm);
if (IS_ERR(alg_region))
return PTR_ERR(alg_region);
alg_region = cs_dsp_create_region(dsp, WMFW_ADSP1_DM,
- adsp1_id.fw.id, adsp1_id.dm);
+ adsp1_id.fw.id, adsp1_id.fw.ver,
+ adsp1_id.dm);
if (IS_ERR(alg_region))
return PTR_ERR(alg_region);
@@ -1694,6 +1720,7 @@ static int cs_dsp_adsp1_setup_algs(struct cs_dsp *dsp)
alg_region = cs_dsp_create_region(dsp, WMFW_ADSP1_DM,
adsp1_alg[i].alg.id,
+ adsp1_alg[i].alg.ver,
adsp1_alg[i].dm);
if (IS_ERR(alg_region)) {
ret = PTR_ERR(alg_region);
@@ -1715,6 +1742,7 @@ static int cs_dsp_adsp1_setup_algs(struct cs_dsp *dsp)
alg_region = cs_dsp_create_region(dsp, WMFW_ADSP1_ZM,
adsp1_alg[i].alg.id,
+ adsp1_alg[i].alg.ver,
adsp1_alg[i].zm);
if (IS_ERR(alg_region)) {
ret = PTR_ERR(alg_region);
@@ -1767,17 +1795,20 @@ static int cs_dsp_adsp2_setup_algs(struct cs_dsp *dsp)
cs_dsp_parse_wmfw_id_header(dsp, &adsp2_id.fw, n_algs);
alg_region = cs_dsp_create_region(dsp, WMFW_ADSP2_XM,
- adsp2_id.fw.id, adsp2_id.xm);
+ adsp2_id.fw.id, adsp2_id.fw.ver,
+ adsp2_id.xm);
if (IS_ERR(alg_region))
return PTR_ERR(alg_region);
alg_region = cs_dsp_create_region(dsp, WMFW_ADSP2_YM,
- adsp2_id.fw.id, adsp2_id.ym);
+ adsp2_id.fw.id, adsp2_id.fw.ver,
+ adsp2_id.ym);
if (IS_ERR(alg_region))
return PTR_ERR(alg_region);
alg_region = cs_dsp_create_region(dsp, WMFW_ADSP2_ZM,
- adsp2_id.fw.id, adsp2_id.zm);
+ adsp2_id.fw.id, adsp2_id.fw.ver,
+ adsp2_id.zm);
if (IS_ERR(alg_region))
return PTR_ERR(alg_region);
@@ -1802,6 +1833,7 @@ static int cs_dsp_adsp2_setup_algs(struct cs_dsp *dsp)
alg_region = cs_dsp_create_region(dsp, WMFW_ADSP2_XM,
adsp2_alg[i].alg.id,
+ adsp2_alg[i].alg.ver,
adsp2_alg[i].xm);
if (IS_ERR(alg_region)) {
ret = PTR_ERR(alg_region);
@@ -1823,6 +1855,7 @@ static int cs_dsp_adsp2_setup_algs(struct cs_dsp *dsp)
alg_region = cs_dsp_create_region(dsp, WMFW_ADSP2_YM,
adsp2_alg[i].alg.id,
+ adsp2_alg[i].alg.ver,
adsp2_alg[i].ym);
if (IS_ERR(alg_region)) {
ret = PTR_ERR(alg_region);
@@ -1844,6 +1877,7 @@ static int cs_dsp_adsp2_setup_algs(struct cs_dsp *dsp)
alg_region = cs_dsp_create_region(dsp, WMFW_ADSP2_ZM,
adsp2_alg[i].alg.id,
+ adsp2_alg[i].alg.ver,
adsp2_alg[i].zm);
if (IS_ERR(alg_region)) {
ret = PTR_ERR(alg_region);
@@ -1869,7 +1903,7 @@ out:
return ret;
}
-static int cs_dsp_halo_create_regions(struct cs_dsp *dsp, __be32 id,
+static int cs_dsp_halo_create_regions(struct cs_dsp *dsp, __be32 id, __be32 ver,
__be32 xm_base, __be32 ym_base)
{
static const int types[] = {
@@ -1878,7 +1912,7 @@ static int cs_dsp_halo_create_regions(struct cs_dsp *dsp, __be32 id,
};
__be32 bases[] = { xm_base, xm_base, ym_base, ym_base };
- return cs_dsp_create_regions(dsp, id, ARRAY_SIZE(types), types, bases);
+ return cs_dsp_create_regions(dsp, id, ver, ARRAY_SIZE(types), types, bases);
}
static int cs_dsp_halo_setup_algs(struct cs_dsp *dsp)
@@ -1906,7 +1940,7 @@ static int cs_dsp_halo_setup_algs(struct cs_dsp *dsp)
cs_dsp_parse_wmfw_v3_id_header(dsp, &halo_id.fw, n_algs);
- ret = cs_dsp_halo_create_regions(dsp, halo_id.fw.id,
+ ret = cs_dsp_halo_create_regions(dsp, halo_id.fw.id, halo_id.fw.ver,
halo_id.xm_base, halo_id.ym_base);
if (ret)
return ret;
@@ -1930,6 +1964,7 @@ static int cs_dsp_halo_setup_algs(struct cs_dsp *dsp)
be32_to_cpu(halo_alg[i].ym_base));
ret = cs_dsp_halo_create_regions(dsp, halo_alg[i].alg.id,
+ halo_alg[i].alg.ver,
halo_alg[i].xm_base,
halo_alg[i].ym_base);
if (ret)
@@ -1951,7 +1986,8 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware
const struct cs_dsp_region *mem;
struct cs_dsp_alg_region *alg_region;
const char *region_name;
- int ret, pos, blocks, type, offset, reg;
+ int ret, pos, blocks, type, offset, reg, version;
+ char *text = NULL;
struct cs_dsp_buf *buf;
if (!firmware)
@@ -1973,6 +2009,7 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware
switch (be32_to_cpu(hdr->rev) & 0xff) {
case 1:
+ case 2:
break;
default:
cs_dsp_err(dsp, "%s: Unsupported coefficient file format %d\n",
@@ -1995,6 +2032,7 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware
type = le16_to_cpu(blk->type);
offset = le16_to_cpu(blk->offset);
+ version = le32_to_cpu(blk->ver) >> 8;
cs_dsp_dbg(dsp, "%s.%d: %x v%d.%d.%d\n",
file, blocks, le32_to_cpu(blk->id),
@@ -2008,6 +2046,8 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware
region_name = "Unknown";
switch (type) {
case (WMFW_NAME_TEXT << 8):
+ text = kzalloc(le32_to_cpu(blk->len) + 1, GFP_KERNEL);
+ break;
case (WMFW_INFO_TEXT << 8):
case (WMFW_METADATA << 8):
break;
@@ -2052,6 +2092,16 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware
alg_region = cs_dsp_find_alg_region(dsp, type,
le32_to_cpu(blk->id));
if (alg_region) {
+ if (version != alg_region->ver)
+ cs_dsp_warn(dsp,
+ "Algorithm coefficient version %d.%d.%d but expected %d.%d.%d\n",
+ (version >> 16) & 0xFF,
+ (version >> 8) & 0xFF,
+ version & 0xFF,
+ (alg_region->ver >> 16) & 0xFF,
+ (alg_region->ver >> 8) & 0xFF,
+ alg_region->ver & 0xFF);
+
reg = alg_region->base;
reg = dsp->ops->region_to_reg(mem, reg);
reg += offset;
@@ -2067,6 +2117,13 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware
break;
}
+ if (text) {
+ memcpy(text, blk->data, le32_to_cpu(blk->len));
+ cs_dsp_info(dsp, "%s: %s\n", dsp->fw_name, text);
+ kfree(text);
+ text = NULL;
+ }
+
if (reg) {
if (le32_to_cpu(blk->len) >
firmware->size - pos - sizeof(*blk)) {
@@ -2117,6 +2174,7 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware
out_fw:
regmap_async_complete(regmap);
cs_dsp_buf_free(&buf_list);
+ kfree(text);
return ret;
}
@@ -2600,6 +2658,12 @@ int cs_dsp_run(struct cs_dsp *dsp)
goto err;
}
+ if (dsp->client_ops->pre_run) {
+ ret = dsp->client_ops->pre_run(dsp);
+ if (ret)
+ goto err;
+ }
+
/* Sync set controls */
ret = cs_dsp_coeff_sync_controls(dsp);
if (ret != 0)
@@ -2680,10 +2744,16 @@ EXPORT_SYMBOL_GPL(cs_dsp_stop);
static int cs_dsp_halo_start_core(struct cs_dsp *dsp)
{
- return regmap_update_bits(dsp->regmap,
- dsp->base + HALO_CCM_CORE_CONTROL,
- HALO_CORE_RESET | HALO_CORE_EN,
- HALO_CORE_RESET | HALO_CORE_EN);
+ int ret;
+
+ ret = regmap_update_bits(dsp->regmap, dsp->base + HALO_CCM_CORE_CONTROL,
+ HALO_CORE_RESET | HALO_CORE_EN,
+ HALO_CORE_RESET | HALO_CORE_EN);
+ if (ret)
+ return ret;
+
+ return regmap_update_bits(dsp->regmap, dsp->base + HALO_CCM_CORE_CONTROL,
+ HALO_CORE_RESET, 0);
}
static void cs_dsp_halo_stop_core(struct cs_dsp *dsp)
@@ -2789,6 +2859,8 @@ int cs_dsp_read_raw_data_block(struct cs_dsp *dsp, int mem_type, unsigned int me
unsigned int reg;
int ret;
+ lockdep_assert_held(&dsp->pwr_lock);
+
if (!mem)
return -EINVAL;
@@ -2842,6 +2914,8 @@ int cs_dsp_write_data_word(struct cs_dsp *dsp, int mem_type, unsigned int mem_ad
__be32 val = cpu_to_be32(data & 0x00ffffffu);
unsigned int reg;
+ lockdep_assert_held(&dsp->pwr_lock);
+
if (!mem)
return -EINVAL;
diff --git a/drivers/firmware/dmi-sysfs.c b/drivers/firmware/dmi-sysfs.c
index 8b8127fa8955..3a353776bd34 100644
--- a/drivers/firmware/dmi-sysfs.c
+++ b/drivers/firmware/dmi-sysfs.c
@@ -302,12 +302,12 @@ static struct attribute *dmi_sysfs_sel_attrs[] = {
&dmi_sysfs_attr_sel_per_log_type_descriptor_length.attr,
NULL,
};
-
+ATTRIBUTE_GROUPS(dmi_sysfs_sel);
static struct kobj_type dmi_system_event_log_ktype = {
.release = dmi_entry_free,
.sysfs_ops = &dmi_sysfs_specialize_attr_ops,
- .default_attrs = dmi_sysfs_sel_attrs,
+ .default_groups = dmi_sysfs_sel_groups,
};
typedef u8 (*sel_io_reader)(const struct dmi_system_event_log *sel,
@@ -518,6 +518,7 @@ static struct attribute *dmi_sysfs_entry_attrs[] = {
&dmi_sysfs_attr_entry_position.attr,
NULL,
};
+ATTRIBUTE_GROUPS(dmi_sysfs_entry);
static ssize_t dmi_entry_raw_read_helper(struct dmi_sysfs_entry *entry,
const struct dmi_header *dh,
@@ -565,7 +566,7 @@ static void dmi_sysfs_entry_release(struct kobject *kobj)
static struct kobj_type dmi_sysfs_entry_ktype = {
.release = dmi_sysfs_entry_release,
.sysfs_ops = &dmi_sysfs_attr_ops,
- .default_attrs = dmi_sysfs_entry_attrs,
+ .default_groups = dmi_sysfs_entry_groups,
};
static struct kset *dmi_kset;
diff --git a/drivers/firmware/edd.c b/drivers/firmware/edd.c
index 14d0970a7198..69353dd0ea22 100644
--- a/drivers/firmware/edd.c
+++ b/drivers/firmware/edd.c
@@ -574,14 +574,6 @@ static EDD_DEVICE_ATTR(interface, 0444, edd_show_interface, edd_has_edd30);
static EDD_DEVICE_ATTR(host_bus, 0444, edd_show_host_bus, edd_has_edd30);
static EDD_DEVICE_ATTR(mbr_signature, 0444, edd_show_mbr_signature, edd_has_mbr_signature);
-
-/* These are default attributes that are added for every edd
- * device discovered. There are none.
- */
-static struct attribute * def_attrs[] = {
- NULL,
-};
-
/* These attributes are conditional and only added for some devices. */
static struct edd_attribute * edd_attrs[] = {
&edd_attr_raw_data,
@@ -619,7 +611,6 @@ static void edd_release(struct kobject * kobj)
static struct kobj_type edd_ktype = {
.release = edd_release,
.sysfs_ops = &edd_attr_ops,
- .default_attrs = def_attrs,
};
static struct kset *edd_kset;
diff --git a/drivers/firmware/efi/efi-init.c b/drivers/firmware/efi/efi-init.c
index b19ce1a83f91..b2c829e95bd1 100644
--- a/drivers/firmware/efi/efi-init.c
+++ b/drivers/firmware/efi/efi-init.c
@@ -235,6 +235,11 @@ void __init efi_init(void)
}
reserve_regions();
+ /*
+ * For memblock manipulation, the cap should come after the memblock_add().
+ * And now, memblock is fully populated, it is time to do capping.
+ */
+ early_init_dt_check_for_usable_mem_range();
efi_esrt_init();
efi_mokvar_table_init();
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index ae79c3300129..7de3f5b6e8d0 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -722,6 +722,13 @@ void __init efi_systab_report_header(const efi_table_hdr_t *systab_hdr,
systab_hdr->revision >> 16,
systab_hdr->revision & 0xffff,
vendor);
+
+ if (IS_ENABLED(CONFIG_X86_64) &&
+ systab_hdr->revision > EFI_1_10_SYSTEM_TABLE_REVISION &&
+ !strcmp(vendor, "Apple")) {
+ pr_info("Apple Mac detected, using EFI v1.10 runtime services only\n");
+ efi.runtime_version = EFI_1_10_SYSTEM_TABLE_REVISION;
+ }
}
static __initdata char memory_type_name[][13] = {
diff --git a/drivers/firmware/efi/libstub/arm64-stub.c b/drivers/firmware/efi/libstub/arm64-stub.c
index 2363fee9211c..9cc556013d08 100644
--- a/drivers/firmware/efi/libstub/arm64-stub.c
+++ b/drivers/firmware/efi/libstub/arm64-stub.c
@@ -119,9 +119,9 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
if (image->image_base != _text)
efi_err("FIRMWARE BUG: efi_loaded_image_t::image_base has bogus value\n");
- if (!IS_ALIGNED((u64)_text, EFI_KIMG_ALIGN))
- efi_err("FIRMWARE BUG: kernel image not aligned on %ldk boundary\n",
- EFI_KIMG_ALIGN >> 10);
+ if (!IS_ALIGNED((u64)_text, SEGMENT_ALIGN))
+ efi_err("FIRMWARE BUG: kernel image not aligned on %dk boundary\n",
+ SEGMENT_ALIGN >> 10);
kernel_size = _edata - _text;
kernel_memsize = kernel_size + (_end - _edata);
diff --git a/drivers/firmware/efi/libstub/efi-stub.c b/drivers/firmware/efi/libstub/efi-stub.c
index e87e7f1b1a33..da93864d7abc 100644
--- a/drivers/firmware/efi/libstub/efi-stub.c
+++ b/drivers/firmware/efi/libstub/efi-stub.c
@@ -40,6 +40,8 @@
#ifdef CONFIG_ARM64
# define EFI_RT_VIRTUAL_LIMIT DEFAULT_MAP_WINDOW_64
+#elif defined(CONFIG_RISCV)
+# define EFI_RT_VIRTUAL_LIMIT TASK_SIZE_MIN
#else
# define EFI_RT_VIRTUAL_LIMIT TASK_SIZE
#endif
diff --git a/drivers/firmware/google/Kconfig b/drivers/firmware/google/Kconfig
index 97968aece54f..931544c9f63d 100644
--- a/drivers/firmware/google/Kconfig
+++ b/drivers/firmware/google/Kconfig
@@ -3,9 +3,9 @@ menuconfig GOOGLE_FIRMWARE
bool "Google Firmware Drivers"
default n
help
- These firmware drivers are used by Google's servers. They are
- only useful if you are working directly on one of their
- proprietary servers. If in doubt, say "N".
+ These firmware drivers are used by Google servers,
+ Chromebooks and other devices using coreboot firmware.
+ If in doubt, say "N".
if GOOGLE_FIRMWARE
diff --git a/drivers/firmware/memmap.c b/drivers/firmware/memmap.c
index 24945e2da77b..8e59be3782cb 100644
--- a/drivers/firmware/memmap.c
+++ b/drivers/firmware/memmap.c
@@ -69,6 +69,7 @@ static struct attribute *def_attrs[] = {
&memmap_type_attr.attr,
NULL
};
+ATTRIBUTE_GROUPS(def);
static const struct sysfs_ops memmap_attr_ops = {
.show = memmap_attr_show,
@@ -118,7 +119,7 @@ static void __meminit release_firmware_map_entry(struct kobject *kobj)
static struct kobj_type __refdata memmap_ktype = {
.release = release_firmware_map_entry,
.sysfs_ops = &memmap_attr_ops,
- .default_attrs = def_attrs,
+ .default_groups = def_groups,
};
/*
diff --git a/drivers/firmware/qemu_fw_cfg.c b/drivers/firmware/qemu_fw_cfg.c
index 172c751a4f6c..a69399a6b7c0 100644
--- a/drivers/firmware/qemu_fw_cfg.c
+++ b/drivers/firmware/qemu_fw_cfg.c
@@ -388,14 +388,13 @@ static void fw_cfg_sysfs_cache_cleanup(void)
struct fw_cfg_sysfs_entry *entry, *next;
list_for_each_entry_safe(entry, next, &fw_cfg_entry_cache, list) {
- /* will end up invoking fw_cfg_sysfs_cache_delist()
- * via each object's release() method (i.e. destructor)
- */
+ fw_cfg_sysfs_cache_delist(entry);
+ kobject_del(&entry->kobj);
kobject_put(&entry->kobj);
}
}
-/* default_attrs: per-entry attributes and show methods */
+/* per-entry attributes and show methods */
#define FW_CFG_SYSFS_ATTR(_attr) \
struct fw_cfg_sysfs_attribute fw_cfg_sysfs_attr_##_attr = { \
@@ -428,6 +427,7 @@ static struct attribute *fw_cfg_sysfs_entry_attrs[] = {
&fw_cfg_sysfs_attr_name.attr,
NULL,
};
+ATTRIBUTE_GROUPS(fw_cfg_sysfs_entry);
/* sysfs_ops: find fw_cfg_[entry, attribute] and call appropriate show method */
static ssize_t fw_cfg_sysfs_attr_show(struct kobject *kobj, struct attribute *a,
@@ -448,13 +448,12 @@ static void fw_cfg_sysfs_release_entry(struct kobject *kobj)
{
struct fw_cfg_sysfs_entry *entry = to_entry(kobj);
- fw_cfg_sysfs_cache_delist(entry);
kfree(entry);
}
/* kobj_type: ties together all properties required to register an entry */
static struct kobj_type fw_cfg_sysfs_entry_ktype = {
- .default_attrs = fw_cfg_sysfs_entry_attrs,
+ .default_groups = fw_cfg_sysfs_entry_groups,
.sysfs_ops = &fw_cfg_sysfs_attr_ops,
.release = fw_cfg_sysfs_release_entry,
};
@@ -601,20 +600,18 @@ static int fw_cfg_register_file(const struct fw_cfg_file *f)
/* set file entry information */
entry->size = be32_to_cpu(f->size);
entry->select = be16_to_cpu(f->select);
- memcpy(entry->name, f->name, FW_CFG_MAX_FILE_PATH);
+ strscpy(entry->name, f->name, FW_CFG_MAX_FILE_PATH);
/* register entry under "/sys/firmware/qemu_fw_cfg/by_key/" */
err = kobject_init_and_add(&entry->kobj, &fw_cfg_sysfs_entry_ktype,
fw_cfg_sel_ko, "%d", entry->select);
- if (err) {
- kobject_put(&entry->kobj);
- return err;
- }
+ if (err)
+ goto err_put_entry;
/* add raw binary content access */
err = sysfs_create_bin_file(&entry->kobj, &fw_cfg_sysfs_attr_raw);
if (err)
- goto err_add_raw;
+ goto err_del_entry;
/* try adding "/sys/firmware/qemu_fw_cfg/by_name/" symlink */
fw_cfg_build_symlink(fw_cfg_fname_kset, &entry->kobj, entry->name);
@@ -623,9 +620,10 @@ static int fw_cfg_register_file(const struct fw_cfg_file *f)
fw_cfg_sysfs_cache_enlist(entry);
return 0;
-err_add_raw:
+err_del_entry:
kobject_del(&entry->kobj);
- kfree(entry);
+err_put_entry:
+ kobject_put(&entry->kobj);
return err;
}
diff --git a/drivers/firmware/sysfb_simplefb.c b/drivers/firmware/sysfb_simplefb.c
index b86761904949..303a491e520d 100644
--- a/drivers/firmware/sysfb_simplefb.c
+++ b/drivers/firmware/sysfb_simplefb.c
@@ -113,12 +113,16 @@ __init int sysfb_create_simplefb(const struct screen_info *si,
sysfb_apply_efi_quirks(pd);
ret = platform_device_add_resources(pd, &res, 1);
- if (ret)
+ if (ret) {
+ platform_device_put(pd);
return ret;
+ }
ret = platform_device_add_data(pd, mode, sizeof(*mode));
- if (ret)
+ if (ret) {
+ platform_device_put(pd);
return ret;
+ }
return platform_device_add(pd);
}
diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c
index 0dd117860b63..450c5f6a1cbf 100644
--- a/drivers/firmware/xilinx/zynqmp.c
+++ b/drivers/firmware/xilinx/zynqmp.c
@@ -23,6 +23,7 @@
#include <linux/hashtable.h>
#include <linux/firmware/xlnx-zynqmp.h>
+#include <linux/firmware/xlnx-event-manager.h>
#include "zynqmp-debug.h"
/* Max HashMap Order for PM API feature check (1<<7 = 128) */
@@ -38,6 +39,8 @@
static bool feature_check_enabled;
static DEFINE_HASHTABLE(pm_api_features_map, PM_API_FEATURE_CHECK_MAX_ORDER);
+static struct platform_device *em_dev;
+
/**
* struct pm_api_feature_data - PM API Feature data
* @pm_api_id: PM API Id, used as key to index into hashmap
@@ -160,7 +163,7 @@ static noinline int do_fw_call_hvc(u64 arg0, u64 arg1, u64 arg2,
*
* Return: Returns status, either success or error+reason
*/
-static int zynqmp_pm_feature(u32 api_id)
+int zynqmp_pm_feature(const u32 api_id)
{
int ret;
u32 ret_payload[PAYLOAD_ARG_CNT];
@@ -197,6 +200,7 @@ static int zynqmp_pm_feature(u32 api_id)
return ret;
}
+EXPORT_SYMBOL_GPL(zynqmp_pm_feature);
/**
* zynqmp_pm_invoke_fn() - Invoke the system-level platform management layer
@@ -1117,6 +1121,29 @@ int zynqmp_pm_aes_engine(const u64 address, u32 *out)
EXPORT_SYMBOL_GPL(zynqmp_pm_aes_engine);
/**
+ * zynqmp_pm_register_notifier() - PM API for register a subsystem
+ * to be notified about specific
+ * event/error.
+ * @node: Node ID to which the event is related.
+ * @event: Event Mask of Error events for which wants to get notified.
+ * @wake: Wake subsystem upon capturing the event if value 1
+ * @enable: Enable the registration for value 1, disable for value 0
+ *
+ * This function is used to register/un-register for particular node-event
+ * combination in firmware.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+
+int zynqmp_pm_register_notifier(const u32 node, const u32 event,
+ const u32 wake, const u32 enable)
+{
+ return zynqmp_pm_invoke_fn(PM_REGISTER_NOTIFIER, node, event,
+ wake, enable, NULL);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_register_notifier);
+
+/**
* zynqmp_pm_system_shutdown - PM call to request a system shutdown or restart
* @type: Shutdown or restart? 0 for shutdown, 1 for restart
* @subtype: Specifies which system should be restarted or shut down
@@ -1471,6 +1498,15 @@ static int zynqmp_firmware_probe(struct platform_device *pdev)
zynqmp_pm_api_debugfs_init();
+ np = of_find_compatible_node(NULL, NULL, "xlnx,versal");
+ if (np) {
+ em_dev = platform_device_register_data(&pdev->dev, "xlnx_event_manager",
+ -1, NULL, 0);
+ if (IS_ERR(em_dev))
+ dev_err_probe(&pdev->dev, PTR_ERR(em_dev), "EM register fail with error\n");
+ }
+ of_node_put(np);
+
return of_platform_populate(dev->of_node, NULL, NULL, dev);
}
@@ -1488,6 +1524,8 @@ static int zynqmp_firmware_remove(struct platform_device *pdev)
kfree(feature_data);
}
+ platform_device_unregister(em_dev);
+
return 0;
}
diff --git a/drivers/fpga/altera-cvp.c b/drivers/fpga/altera-cvp.c
index ccf4546eff29..4ffb9da537d8 100644
--- a/drivers/fpga/altera-cvp.c
+++ b/drivers/fpga/altera-cvp.c
@@ -652,19 +652,15 @@ static int altera_cvp_probe(struct pci_dev *pdev,
snprintf(conf->mgr_name, sizeof(conf->mgr_name), "%s @%s",
ALTERA_CVP_MGR_NAME, pci_name(pdev));
- mgr = devm_fpga_mgr_create(&pdev->dev, conf->mgr_name,
- &altera_cvp_ops, conf);
- if (!mgr) {
- ret = -ENOMEM;
+ mgr = fpga_mgr_register(&pdev->dev, conf->mgr_name,
+ &altera_cvp_ops, conf);
+ if (IS_ERR(mgr)) {
+ ret = PTR_ERR(mgr);
goto err_unmap;
}
pci_set_drvdata(pdev, mgr);
- ret = fpga_mgr_register(mgr);
- if (ret)
- goto err_unmap;
-
return 0;
err_unmap:
diff --git a/drivers/fpga/altera-fpga2sdram.c b/drivers/fpga/altera-fpga2sdram.c
index a78e49c63c64..ff3a646fd9e3 100644
--- a/drivers/fpga/altera-fpga2sdram.c
+++ b/drivers/fpga/altera-fpga2sdram.c
@@ -121,17 +121,13 @@ static int alt_fpga_bridge_probe(struct platform_device *pdev)
/* Get f2s bridge configuration saved in handoff register */
regmap_read(sysmgr, SYSMGR_ISWGRP_HANDOFF3, &priv->mask);
- br = devm_fpga_bridge_create(dev, F2S_BRIDGE_NAME,
- &altera_fpga2sdram_br_ops, priv);
- if (!br)
- return -ENOMEM;
+ br = fpga_bridge_register(dev, F2S_BRIDGE_NAME,
+ &altera_fpga2sdram_br_ops, priv);
+ if (IS_ERR(br))
+ return PTR_ERR(br);
platform_set_drvdata(pdev, br);
- ret = fpga_bridge_register(br);
- if (ret)
- return ret;
-
dev_info(dev, "driver initialized with handoff %08x\n", priv->mask);
if (!of_property_read_u32(dev->of_node, "bridge-enable", &enable)) {
diff --git a/drivers/fpga/altera-freeze-bridge.c b/drivers/fpga/altera-freeze-bridge.c
index 7d22a44d652e..445f4b011167 100644
--- a/drivers/fpga/altera-freeze-bridge.c
+++ b/drivers/fpga/altera-freeze-bridge.c
@@ -246,14 +246,14 @@ static int altera_freeze_br_probe(struct platform_device *pdev)
priv->base_addr = base_addr;
- br = devm_fpga_bridge_create(dev, FREEZE_BRIDGE_NAME,
- &altera_freeze_br_br_ops, priv);
- if (!br)
- return -ENOMEM;
+ br = fpga_bridge_register(dev, FREEZE_BRIDGE_NAME,
+ &altera_freeze_br_br_ops, priv);
+ if (IS_ERR(br))
+ return PTR_ERR(br);
platform_set_drvdata(pdev, br);
- return fpga_bridge_register(br);
+ return 0;
}
static int altera_freeze_br_remove(struct platform_device *pdev)
diff --git a/drivers/fpga/altera-hps2fpga.c b/drivers/fpga/altera-hps2fpga.c
index 77b95f251821..aa758426c22b 100644
--- a/drivers/fpga/altera-hps2fpga.c
+++ b/drivers/fpga/altera-hps2fpga.c
@@ -180,19 +180,15 @@ static int alt_fpga_bridge_probe(struct platform_device *pdev)
}
}
- br = devm_fpga_bridge_create(dev, priv->name,
- &altera_hps2fpga_br_ops, priv);
- if (!br) {
- ret = -ENOMEM;
+ br = fpga_bridge_register(dev, priv->name,
+ &altera_hps2fpga_br_ops, priv);
+ if (IS_ERR(br)) {
+ ret = PTR_ERR(br);
goto err;
}
platform_set_drvdata(pdev, br);
- ret = fpga_bridge_register(br);
- if (ret)
- goto err;
-
return 0;
err:
diff --git a/drivers/fpga/altera-pr-ip-core.c b/drivers/fpga/altera-pr-ip-core.c
index dfdf21ed34c4..be0667968d33 100644
--- a/drivers/fpga/altera-pr-ip-core.c
+++ b/drivers/fpga/altera-pr-ip-core.c
@@ -191,11 +191,8 @@ int alt_pr_register(struct device *dev, void __iomem *reg_base)
(val & ALT_PR_CSR_STATUS_MSK) >> ALT_PR_CSR_STATUS_SFT,
(int)(val & ALT_PR_CSR_PR_START));
- mgr = devm_fpga_mgr_create(dev, dev_name(dev), &alt_pr_ops, priv);
- if (!mgr)
- return -ENOMEM;
-
- return devm_fpga_mgr_register(dev, mgr);
+ mgr = devm_fpga_mgr_register(dev, dev_name(dev), &alt_pr_ops, priv);
+ return PTR_ERR_OR_ZERO(mgr);
}
EXPORT_SYMBOL_GPL(alt_pr_register);
diff --git a/drivers/fpga/altera-ps-spi.c b/drivers/fpga/altera-ps-spi.c
index 23bfd4d1ad0f..5e1e009dba89 100644
--- a/drivers/fpga/altera-ps-spi.c
+++ b/drivers/fpga/altera-ps-spi.c
@@ -302,12 +302,9 @@ static int altera_ps_probe(struct spi_device *spi)
snprintf(conf->mgr_name, sizeof(conf->mgr_name), "%s %s",
dev_driver_string(&spi->dev), dev_name(&spi->dev));
- mgr = devm_fpga_mgr_create(&spi->dev, conf->mgr_name,
- &altera_ps_ops, conf);
- if (!mgr)
- return -ENOMEM;
-
- return devm_fpga_mgr_register(&spi->dev, mgr);
+ mgr = devm_fpga_mgr_register(&spi->dev, conf->mgr_name,
+ &altera_ps_ops, conf);
+ return PTR_ERR_OR_ZERO(mgr);
}
static const struct spi_device_id altera_ps_spi_ids[] = {
diff --git a/drivers/fpga/dfl-fme-br.c b/drivers/fpga/dfl-fme-br.c
index 3ff9f3a687ce..808d1f4d76df 100644
--- a/drivers/fpga/dfl-fme-br.c
+++ b/drivers/fpga/dfl-fme-br.c
@@ -68,14 +68,14 @@ static int fme_br_probe(struct platform_device *pdev)
priv->pdata = dev_get_platdata(dev);
- br = devm_fpga_bridge_create(dev, "DFL FPGA FME Bridge",
- &fme_bridge_ops, priv);
- if (!br)
- return -ENOMEM;
+ br = fpga_bridge_register(dev, "DFL FPGA FME Bridge",
+ &fme_bridge_ops, priv);
+ if (IS_ERR(br))
+ return PTR_ERR(br);
platform_set_drvdata(pdev, br);
- return fpga_bridge_register(br);
+ return 0;
}
static int fme_br_remove(struct platform_device *pdev)
diff --git a/drivers/fpga/dfl-fme-mgr.c b/drivers/fpga/dfl-fme-mgr.c
index 313420405d5e..af0785783b52 100644
--- a/drivers/fpga/dfl-fme-mgr.c
+++ b/drivers/fpga/dfl-fme-mgr.c
@@ -276,7 +276,7 @@ static void fme_mgr_get_compat_id(void __iomem *fme_pr,
static int fme_mgr_probe(struct platform_device *pdev)
{
struct dfl_fme_mgr_pdata *pdata = dev_get_platdata(&pdev->dev);
- struct fpga_compat_id *compat_id;
+ struct fpga_manager_info info = { 0 };
struct device *dev = &pdev->dev;
struct fme_mgr_priv *priv;
struct fpga_manager *mgr;
@@ -296,20 +296,16 @@ static int fme_mgr_probe(struct platform_device *pdev)
return PTR_ERR(priv->ioaddr);
}
- compat_id = devm_kzalloc(dev, sizeof(*compat_id), GFP_KERNEL);
- if (!compat_id)
+ info.name = "DFL FME FPGA Manager";
+ info.mops = &fme_mgr_ops;
+ info.priv = priv;
+ info.compat_id = devm_kzalloc(dev, sizeof(*info.compat_id), GFP_KERNEL);
+ if (!info.compat_id)
return -ENOMEM;
- fme_mgr_get_compat_id(priv->ioaddr, compat_id);
-
- mgr = devm_fpga_mgr_create(dev, "DFL FME FPGA Manager",
- &fme_mgr_ops, priv);
- if (!mgr)
- return -ENOMEM;
-
- mgr->compat_id = compat_id;
-
- return devm_fpga_mgr_register(dev, mgr);
+ fme_mgr_get_compat_id(priv->ioaddr, info.compat_id);
+ mgr = devm_fpga_mgr_register_full(dev, &info);
+ return PTR_ERR_OR_ZERO(mgr);
}
static struct platform_driver fme_mgr_driver = {
diff --git a/drivers/fpga/dfl-fme-region.c b/drivers/fpga/dfl-fme-region.c
index 1eeb42af1012..4aebde0a7f1c 100644
--- a/drivers/fpga/dfl-fme-region.c
+++ b/drivers/fpga/dfl-fme-region.c
@@ -30,6 +30,7 @@ static int fme_region_get_bridges(struct fpga_region *region)
static int fme_region_probe(struct platform_device *pdev)
{
struct dfl_fme_region_pdata *pdata = dev_get_platdata(&pdev->dev);
+ struct fpga_region_info info = { 0 };
struct device *dev = &pdev->dev;
struct fpga_region *region;
struct fpga_manager *mgr;
@@ -39,20 +40,18 @@ static int fme_region_probe(struct platform_device *pdev)
if (IS_ERR(mgr))
return -EPROBE_DEFER;
- region = devm_fpga_region_create(dev, mgr, fme_region_get_bridges);
- if (!region) {
- ret = -ENOMEM;
+ info.mgr = mgr;
+ info.compat_id = mgr->compat_id;
+ info.get_bridges = fme_region_get_bridges;
+ info.priv = pdata;
+ region = fpga_region_register_full(dev, &info);
+ if (IS_ERR(region)) {
+ ret = PTR_ERR(region);
goto eprobe_mgr_put;
}
- region->priv = pdata;
- region->compat_id = mgr->compat_id;
platform_set_drvdata(pdev, region);
- ret = fpga_region_register(region);
- if (ret)
- goto eprobe_mgr_put;
-
dev_dbg(dev, "DFL FME FPGA Region probed\n");
return 0;
diff --git a/drivers/fpga/dfl.c b/drivers/fpga/dfl.c
index f86666cf2c6a..599bb21d86af 100644
--- a/drivers/fpga/dfl.c
+++ b/drivers/fpga/dfl.c
@@ -1407,19 +1407,15 @@ dfl_fpga_feature_devs_enumerate(struct dfl_fpga_enum_info *info)
if (!cdev)
return ERR_PTR(-ENOMEM);
- cdev->region = devm_fpga_region_create(info->dev, NULL, NULL);
- if (!cdev->region) {
- ret = -ENOMEM;
- goto free_cdev_exit;
- }
-
cdev->parent = info->dev;
mutex_init(&cdev->lock);
INIT_LIST_HEAD(&cdev->port_dev_list);
- ret = fpga_region_register(cdev->region);
- if (ret)
+ cdev->region = fpga_region_register(info->dev, NULL, NULL);
+ if (IS_ERR(cdev->region)) {
+ ret = PTR_ERR(cdev->region);
goto free_cdev_exit;
+ }
/* create and init build info for enumeration */
binfo = devm_kzalloc(info->dev, sizeof(*binfo), GFP_KERNEL);
diff --git a/drivers/fpga/fpga-bridge.c b/drivers/fpga/fpga-bridge.c
index 798f55670646..16f2b164a178 100644
--- a/drivers/fpga/fpga-bridge.c
+++ b/drivers/fpga/fpga-bridge.c
@@ -312,36 +312,41 @@ static struct attribute *fpga_bridge_attrs[] = {
ATTRIBUTE_GROUPS(fpga_bridge);
/**
- * fpga_bridge_create - create and initialize a struct fpga_bridge
+ * fpga_bridge_register - create and register an FPGA Bridge device
* @parent: FPGA bridge device from pdev
* @name: FPGA bridge name
* @br_ops: pointer to structure of fpga bridge ops
* @priv: FPGA bridge private data
*
- * The caller of this function is responsible for freeing the bridge with
- * fpga_bridge_free(). Using devm_fpga_bridge_create() instead is recommended.
- *
- * Return: struct fpga_bridge or NULL
+ * Return: struct fpga_bridge pointer or ERR_PTR()
*/
-struct fpga_bridge *fpga_bridge_create(struct device *parent, const char *name,
- const struct fpga_bridge_ops *br_ops,
- void *priv)
+struct fpga_bridge *
+fpga_bridge_register(struct device *parent, const char *name,
+ const struct fpga_bridge_ops *br_ops,
+ void *priv)
{
struct fpga_bridge *bridge;
int id, ret;
+ if (!br_ops) {
+ dev_err(parent, "Attempt to register without fpga_bridge_ops\n");
+ return ERR_PTR(-EINVAL);
+ }
+
if (!name || !strlen(name)) {
dev_err(parent, "Attempt to register with no name!\n");
- return NULL;
+ return ERR_PTR(-EINVAL);
}
bridge = kzalloc(sizeof(*bridge), GFP_KERNEL);
if (!bridge)
- return NULL;
+ return ERR_PTR(-ENOMEM);
id = ida_simple_get(&fpga_bridge_ida, 0, 0, GFP_KERNEL);
- if (id < 0)
+ if (id < 0) {
+ ret = id;
goto error_kfree;
+ }
mutex_init(&bridge->mutex);
INIT_LIST_HEAD(&bridge->node);
@@ -350,17 +355,23 @@ struct fpga_bridge *fpga_bridge_create(struct device *parent, const char *name,
bridge->br_ops = br_ops;
bridge->priv = priv;
- device_initialize(&bridge->dev);
bridge->dev.groups = br_ops->groups;
bridge->dev.class = fpga_bridge_class;
bridge->dev.parent = parent;
bridge->dev.of_node = parent->of_node;
bridge->dev.id = id;
+ of_platform_populate(bridge->dev.of_node, NULL, NULL, &bridge->dev);
ret = dev_set_name(&bridge->dev, "br%d", id);
if (ret)
goto error_device;
+ ret = device_register(&bridge->dev);
+ if (ret) {
+ put_device(&bridge->dev);
+ return ERR_PTR(ret);
+ }
+
return bridge;
error_device:
@@ -368,88 +379,7 @@ error_device:
error_kfree:
kfree(bridge);
- return NULL;
-}
-EXPORT_SYMBOL_GPL(fpga_bridge_create);
-
-/**
- * fpga_bridge_free - free an fpga bridge created by fpga_bridge_create()
- * @bridge: FPGA bridge struct
- */
-void fpga_bridge_free(struct fpga_bridge *bridge)
-{
- ida_simple_remove(&fpga_bridge_ida, bridge->dev.id);
- kfree(bridge);
-}
-EXPORT_SYMBOL_GPL(fpga_bridge_free);
-
-static void devm_fpga_bridge_release(struct device *dev, void *res)
-{
- struct fpga_bridge *bridge = *(struct fpga_bridge **)res;
-
- fpga_bridge_free(bridge);
-}
-
-/**
- * devm_fpga_bridge_create - create and init a managed struct fpga_bridge
- * @parent: FPGA bridge device from pdev
- * @name: FPGA bridge name
- * @br_ops: pointer to structure of fpga bridge ops
- * @priv: FPGA bridge private data
- *
- * This function is intended for use in an FPGA bridge driver's probe function.
- * After the bridge driver creates the struct with devm_fpga_bridge_create(), it
- * should register the bridge with fpga_bridge_register(). The bridge driver's
- * remove function should call fpga_bridge_unregister(). The bridge struct
- * allocated with this function will be freed automatically on driver detach.
- * This includes the case of a probe function returning error before calling
- * fpga_bridge_register(), the struct will still get cleaned up.
- *
- * Return: struct fpga_bridge or NULL
- */
-struct fpga_bridge
-*devm_fpga_bridge_create(struct device *parent, const char *name,
- const struct fpga_bridge_ops *br_ops, void *priv)
-{
- struct fpga_bridge **ptr, *bridge;
-
- ptr = devres_alloc(devm_fpga_bridge_release, sizeof(*ptr), GFP_KERNEL);
- if (!ptr)
- return NULL;
-
- bridge = fpga_bridge_create(parent, name, br_ops, priv);
- if (!bridge) {
- devres_free(ptr);
- } else {
- *ptr = bridge;
- devres_add(parent, ptr);
- }
-
- return bridge;
-}
-EXPORT_SYMBOL_GPL(devm_fpga_bridge_create);
-
-/**
- * fpga_bridge_register - register an FPGA bridge
- *
- * @bridge: FPGA bridge struct
- *
- * Return: 0 for success, error code otherwise.
- */
-int fpga_bridge_register(struct fpga_bridge *bridge)
-{
- struct device *dev = &bridge->dev;
- int ret;
-
- ret = device_add(dev);
- if (ret)
- return ret;
-
- of_platform_populate(dev->of_node, NULL, NULL, dev);
-
- dev_info(dev->parent, "fpga bridge [%s] registered\n", bridge->name);
-
- return 0;
+ return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(fpga_bridge_register);
@@ -475,6 +405,10 @@ EXPORT_SYMBOL_GPL(fpga_bridge_unregister);
static void fpga_bridge_dev_release(struct device *dev)
{
+ struct fpga_bridge *bridge = to_fpga_bridge(dev);
+
+ ida_simple_remove(&fpga_bridge_ida, bridge->dev.id);
+ kfree(bridge);
}
static int __init fpga_bridge_dev_init(void)
diff --git a/drivers/fpga/fpga-mgr.c b/drivers/fpga/fpga-mgr.c
index aa30889e2320..d49a9ce34568 100644
--- a/drivers/fpga/fpga-mgr.c
+++ b/drivers/fpga/fpga-mgr.c
@@ -592,49 +592,49 @@ void fpga_mgr_unlock(struct fpga_manager *mgr)
EXPORT_SYMBOL_GPL(fpga_mgr_unlock);
/**
- * fpga_mgr_create - create and initialize an FPGA manager struct
+ * fpga_mgr_register_full - create and register an FPGA Manager device
* @parent: fpga manager device from pdev
- * @name: fpga manager name
- * @mops: pointer to structure of fpga manager ops
- * @priv: fpga manager private data
+ * @info: parameters for fpga manager
*
- * The caller of this function is responsible for freeing the struct with
- * fpga_mgr_free(). Using devm_fpga_mgr_create() instead is recommended.
+ * The caller of this function is responsible for calling fpga_mgr_unregister().
+ * Using devm_fpga_mgr_register_full() instead is recommended.
*
- * Return: pointer to struct fpga_manager or NULL
+ * Return: pointer to struct fpga_manager pointer or ERR_PTR()
*/
-struct fpga_manager *fpga_mgr_create(struct device *parent, const char *name,
- const struct fpga_manager_ops *mops,
- void *priv)
+struct fpga_manager *
+fpga_mgr_register_full(struct device *parent, const struct fpga_manager_info *info)
{
+ const struct fpga_manager_ops *mops = info->mops;
struct fpga_manager *mgr;
int id, ret;
if (!mops) {
dev_err(parent, "Attempt to register without fpga_manager_ops\n");
- return NULL;
+ return ERR_PTR(-EINVAL);
}
- if (!name || !strlen(name)) {
+ if (!info->name || !strlen(info->name)) {
dev_err(parent, "Attempt to register with no name!\n");
- return NULL;
+ return ERR_PTR(-EINVAL);
}
mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
if (!mgr)
- return NULL;
+ return ERR_PTR(-ENOMEM);
id = ida_simple_get(&fpga_mgr_ida, 0, 0, GFP_KERNEL);
- if (id < 0)
+ if (id < 0) {
+ ret = id;
goto error_kfree;
+ }
mutex_init(&mgr->ref_mutex);
- mgr->name = name;
- mgr->mops = mops;
- mgr->priv = priv;
+ mgr->name = info->name;
+ mgr->mops = info->mops;
+ mgr->priv = info->priv;
+ mgr->compat_id = info->compat_id;
- device_initialize(&mgr->dev);
mgr->dev.class = fpga_mgr_class;
mgr->dev.groups = mops->groups;
mgr->dev.parent = parent;
@@ -645,6 +645,19 @@ struct fpga_manager *fpga_mgr_create(struct device *parent, const char *name,
if (ret)
goto error_device;
+ /*
+ * Initialize framework state by requesting low level driver read state
+ * from device. FPGA may be in reset mode or may have been programmed
+ * by bootloader or EEPROM.
+ */
+ mgr->state = fpga_mgr_state(mgr);
+
+ ret = device_register(&mgr->dev);
+ if (ret) {
+ put_device(&mgr->dev);
+ return ERR_PTR(ret);
+ }
+
return mgr;
error_device:
@@ -652,96 +665,36 @@ error_device:
error_kfree:
kfree(mgr);
- return NULL;
+ return ERR_PTR(ret);
}
-EXPORT_SYMBOL_GPL(fpga_mgr_create);
+EXPORT_SYMBOL_GPL(fpga_mgr_register_full);
/**
- * fpga_mgr_free - free an FPGA manager created with fpga_mgr_create()
- * @mgr: fpga manager struct
- */
-void fpga_mgr_free(struct fpga_manager *mgr)
-{
- ida_simple_remove(&fpga_mgr_ida, mgr->dev.id);
- kfree(mgr);
-}
-EXPORT_SYMBOL_GPL(fpga_mgr_free);
-
-static void devm_fpga_mgr_release(struct device *dev, void *res)
-{
- struct fpga_mgr_devres *dr = res;
-
- fpga_mgr_free(dr->mgr);
-}
-
-/**
- * devm_fpga_mgr_create - create and initialize a managed FPGA manager struct
+ * fpga_mgr_register - create and register an FPGA Manager device
* @parent: fpga manager device from pdev
* @name: fpga manager name
* @mops: pointer to structure of fpga manager ops
* @priv: fpga manager private data
*
- * This function is intended for use in an FPGA manager driver's probe function.
- * After the manager driver creates the manager struct with
- * devm_fpga_mgr_create(), it should register it with fpga_mgr_register(). The
- * manager driver's remove function should call fpga_mgr_unregister(). The
- * manager struct allocated with this function will be freed automatically on
- * driver detach. This includes the case of a probe function returning error
- * before calling fpga_mgr_register(), the struct will still get cleaned up.
+ * The caller of this function is responsible for calling fpga_mgr_unregister().
+ * Using devm_fpga_mgr_register() instead is recommended. This simple
+ * version of the register function should be sufficient for most users. The
+ * fpga_mgr_register_full() function is available for users that need to pass
+ * additional, optional parameters.
*
- * Return: pointer to struct fpga_manager or NULL
+ * Return: pointer to struct fpga_manager pointer or ERR_PTR()
*/
-struct fpga_manager *devm_fpga_mgr_create(struct device *parent, const char *name,
- const struct fpga_manager_ops *mops,
- void *priv)
+struct fpga_manager *
+fpga_mgr_register(struct device *parent, const char *name,
+ const struct fpga_manager_ops *mops, void *priv)
{
- struct fpga_mgr_devres *dr;
+ struct fpga_manager_info info = { 0 };
- dr = devres_alloc(devm_fpga_mgr_release, sizeof(*dr), GFP_KERNEL);
- if (!dr)
- return NULL;
+ info.name = name;
+ info.mops = mops;
+ info.priv = priv;
- dr->mgr = fpga_mgr_create(parent, name, mops, priv);
- if (!dr->mgr) {
- devres_free(dr);
- return NULL;
- }
-
- devres_add(parent, dr);
-
- return dr->mgr;
-}
-EXPORT_SYMBOL_GPL(devm_fpga_mgr_create);
-
-/**
- * fpga_mgr_register - register an FPGA manager
- * @mgr: fpga manager struct
- *
- * Return: 0 on success, negative error code otherwise.
- */
-int fpga_mgr_register(struct fpga_manager *mgr)
-{
- int ret;
-
- /*
- * Initialize framework state by requesting low level driver read state
- * from device. FPGA may be in reset mode or may have been programmed
- * by bootloader or EEPROM.
- */
- mgr->state = fpga_mgr_state(mgr);
-
- ret = device_add(&mgr->dev);
- if (ret)
- goto error_device;
-
- dev_info(&mgr->dev, "%s registered\n", mgr->name);
-
- return 0;
-
-error_device:
- ida_simple_remove(&fpga_mgr_ida, mgr->dev.id);
-
- return ret;
+ return fpga_mgr_register_full(parent, &info);
}
EXPORT_SYMBOL_GPL(fpga_mgr_register);
@@ -765,14 +718,6 @@ void fpga_mgr_unregister(struct fpga_manager *mgr)
}
EXPORT_SYMBOL_GPL(fpga_mgr_unregister);
-static int fpga_mgr_devres_match(struct device *dev, void *res,
- void *match_data)
-{
- struct fpga_mgr_devres *dr = res;
-
- return match_data == dr->mgr;
-}
-
static void devm_fpga_mgr_unregister(struct device *dev, void *res)
{
struct fpga_mgr_devres *dr = res;
@@ -781,45 +726,67 @@ static void devm_fpga_mgr_unregister(struct device *dev, void *res)
}
/**
- * devm_fpga_mgr_register - resource managed variant of fpga_mgr_register()
- * @dev: managing device for this FPGA manager
- * @mgr: fpga manager struct
+ * devm_fpga_mgr_register_full - resource managed variant of fpga_mgr_register()
+ * @parent: fpga manager device from pdev
+ * @info: parameters for fpga manager
*
- * This is the devres variant of fpga_mgr_register() for which the unregister
+ * This is the devres variant of fpga_mgr_register_full() for which the unregister
* function will be called automatically when the managing device is detached.
*/
-int devm_fpga_mgr_register(struct device *dev, struct fpga_manager *mgr)
+struct fpga_manager *
+devm_fpga_mgr_register_full(struct device *parent, const struct fpga_manager_info *info)
{
struct fpga_mgr_devres *dr;
- int ret;
-
- /*
- * Make sure that the struct fpga_manager * that is passed in is
- * managed itself.
- */
- if (WARN_ON(!devres_find(dev, devm_fpga_mgr_release,
- fpga_mgr_devres_match, mgr)))
- return -EINVAL;
+ struct fpga_manager *mgr;
dr = devres_alloc(devm_fpga_mgr_unregister, sizeof(*dr), GFP_KERNEL);
if (!dr)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
- ret = fpga_mgr_register(mgr);
- if (ret) {
+ mgr = fpga_mgr_register_full(parent, info);
+ if (IS_ERR(mgr)) {
devres_free(dr);
- return ret;
+ return mgr;
}
dr->mgr = mgr;
- devres_add(dev, dr);
+ devres_add(parent, dr);
- return 0;
+ return mgr;
+}
+EXPORT_SYMBOL_GPL(devm_fpga_mgr_register_full);
+
+/**
+ * devm_fpga_mgr_register - resource managed variant of fpga_mgr_register()
+ * @parent: fpga manager device from pdev
+ * @name: fpga manager name
+ * @mops: pointer to structure of fpga manager ops
+ * @priv: fpga manager private data
+ *
+ * This is the devres variant of fpga_mgr_register() for which the
+ * unregister function will be called automatically when the managing
+ * device is detached.
+ */
+struct fpga_manager *
+devm_fpga_mgr_register(struct device *parent, const char *name,
+ const struct fpga_manager_ops *mops, void *priv)
+{
+ struct fpga_manager_info info = { 0 };
+
+ info.name = name;
+ info.mops = mops;
+ info.priv = priv;
+
+ return devm_fpga_mgr_register_full(parent, &info);
}
EXPORT_SYMBOL_GPL(devm_fpga_mgr_register);
static void fpga_mgr_dev_release(struct device *dev)
{
+ struct fpga_manager *mgr = to_fpga_manager(dev);
+
+ ida_simple_remove(&fpga_mgr_ida, mgr->dev.id);
+ kfree(mgr);
}
static int __init fpga_mgr_class_init(void)
diff --git a/drivers/fpga/fpga-region.c b/drivers/fpga/fpga-region.c
index a4838715221f..b0ac18de4885 100644
--- a/drivers/fpga/fpga-region.c
+++ b/drivers/fpga/fpga-region.c
@@ -180,39 +180,42 @@ static struct attribute *fpga_region_attrs[] = {
ATTRIBUTE_GROUPS(fpga_region);
/**
- * fpga_region_create - alloc and init a struct fpga_region
+ * fpga_region_register_full - create and register an FPGA Region device
* @parent: device parent
- * @mgr: manager that programs this region
- * @get_bridges: optional function to get bridges to a list
- *
- * The caller of this function is responsible for freeing the resulting region
- * struct with fpga_region_free(). Using devm_fpga_region_create() instead is
- * recommended.
+ * @info: parameters for FPGA Region
*
- * Return: struct fpga_region or NULL
+ * Return: struct fpga_region or ERR_PTR()
*/
-struct fpga_region
-*fpga_region_create(struct device *parent,
- struct fpga_manager *mgr,
- int (*get_bridges)(struct fpga_region *))
+struct fpga_region *
+fpga_region_register_full(struct device *parent, const struct fpga_region_info *info)
{
struct fpga_region *region;
int id, ret = 0;
+ if (!info) {
+ dev_err(parent,
+ "Attempt to register without required info structure\n");
+ return ERR_PTR(-EINVAL);
+ }
+
region = kzalloc(sizeof(*region), GFP_KERNEL);
if (!region)
- return NULL;
+ return ERR_PTR(-ENOMEM);
id = ida_simple_get(&fpga_region_ida, 0, 0, GFP_KERNEL);
- if (id < 0)
+ if (id < 0) {
+ ret = id;
goto err_free;
+ }
+
+ region->mgr = info->mgr;
+ region->compat_id = info->compat_id;
+ region->priv = info->priv;
+ region->get_bridges = info->get_bridges;
- region->mgr = mgr;
- region->get_bridges = get_bridges;
mutex_init(&region->mutex);
INIT_LIST_HEAD(&region->bridge_list);
- device_initialize(&region->dev);
region->dev.class = fpga_region_class;
region->dev.parent = parent;
region->dev.of_node = parent->of_node;
@@ -222,6 +225,12 @@ struct fpga_region
if (ret)
goto err_remove;
+ ret = device_register(&region->dev);
+ if (ret) {
+ put_device(&region->dev);
+ return ERR_PTR(ret);
+ }
+
return region;
err_remove:
@@ -229,76 +238,32 @@ err_remove:
err_free:
kfree(region);
- return NULL;
-}
-EXPORT_SYMBOL_GPL(fpga_region_create);
-
-/**
- * fpga_region_free - free an FPGA region created by fpga_region_create()
- * @region: FPGA region
- */
-void fpga_region_free(struct fpga_region *region)
-{
- ida_simple_remove(&fpga_region_ida, region->dev.id);
- kfree(region);
-}
-EXPORT_SYMBOL_GPL(fpga_region_free);
-
-static void devm_fpga_region_release(struct device *dev, void *res)
-{
- struct fpga_region *region = *(struct fpga_region **)res;
-
- fpga_region_free(region);
+ return ERR_PTR(ret);
}
+EXPORT_SYMBOL_GPL(fpga_region_register_full);
/**
- * devm_fpga_region_create - create and initialize a managed FPGA region struct
+ * fpga_region_register - create and register an FPGA Region device
* @parent: device parent
* @mgr: manager that programs this region
* @get_bridges: optional function to get bridges to a list
*
- * This function is intended for use in an FPGA region driver's probe function.
- * After the region driver creates the region struct with
- * devm_fpga_region_create(), it should register it with fpga_region_register().
- * The region driver's remove function should call fpga_region_unregister().
- * The region struct allocated with this function will be freed automatically on
- * driver detach. This includes the case of a probe function returning error
- * before calling fpga_region_register(), the struct will still get cleaned up.
+ * This simple version of the register function should be sufficient for most users.
+ * The fpga_region_register_full() function is available for users that need to
+ * pass additional, optional parameters.
*
- * Return: struct fpga_region or NULL
+ * Return: struct fpga_region or ERR_PTR()
*/
-struct fpga_region
-*devm_fpga_region_create(struct device *parent,
- struct fpga_manager *mgr,
- int (*get_bridges)(struct fpga_region *))
+struct fpga_region *
+fpga_region_register(struct device *parent, struct fpga_manager *mgr,
+ int (*get_bridges)(struct fpga_region *))
{
- struct fpga_region **ptr, *region;
-
- ptr = devres_alloc(devm_fpga_region_release, sizeof(*ptr), GFP_KERNEL);
- if (!ptr)
- return NULL;
+ struct fpga_region_info info = { 0 };
- region = fpga_region_create(parent, mgr, get_bridges);
- if (!region) {
- devres_free(ptr);
- } else {
- *ptr = region;
- devres_add(parent, ptr);
- }
+ info.mgr = mgr;
+ info.get_bridges = get_bridges;
- return region;
-}
-EXPORT_SYMBOL_GPL(devm_fpga_region_create);
-
-/**
- * fpga_region_register - register an FPGA region
- * @region: FPGA region
- *
- * Return: 0 or -errno
- */
-int fpga_region_register(struct fpga_region *region)
-{
- return device_add(&region->dev);
+ return fpga_region_register_full(parent, &info);
}
EXPORT_SYMBOL_GPL(fpga_region_register);
@@ -316,6 +281,10 @@ EXPORT_SYMBOL_GPL(fpga_region_unregister);
static void fpga_region_dev_release(struct device *dev)
{
+ struct fpga_region *region = to_fpga_region(dev);
+
+ ida_simple_remove(&fpga_region_ida, region->dev.id);
+ kfree(region);
}
/**
diff --git a/drivers/fpga/ice40-spi.c b/drivers/fpga/ice40-spi.c
index 029d3cdb918d..7cbb3558b844 100644
--- a/drivers/fpga/ice40-spi.c
+++ b/drivers/fpga/ice40-spi.c
@@ -178,12 +178,9 @@ static int ice40_fpga_probe(struct spi_device *spi)
return ret;
}
- mgr = devm_fpga_mgr_create(dev, "Lattice iCE40 FPGA Manager",
- &ice40_fpga_ops, priv);
- if (!mgr)
- return -ENOMEM;
-
- return devm_fpga_mgr_register(dev, mgr);
+ mgr = devm_fpga_mgr_register(dev, "Lattice iCE40 FPGA Manager",
+ &ice40_fpga_ops, priv);
+ return PTR_ERR_OR_ZERO(mgr);
}
static const struct of_device_id ice40_fpga_of_match[] = {
diff --git a/drivers/fpga/machxo2-spi.c b/drivers/fpga/machxo2-spi.c
index ea2ec3c6815c..905607992a12 100644
--- a/drivers/fpga/machxo2-spi.c
+++ b/drivers/fpga/machxo2-spi.c
@@ -370,12 +370,9 @@ static int machxo2_spi_probe(struct spi_device *spi)
return -EINVAL;
}
- mgr = devm_fpga_mgr_create(dev, "Lattice MachXO2 SPI FPGA Manager",
- &machxo2_ops, spi);
- if (!mgr)
- return -ENOMEM;
-
- return devm_fpga_mgr_register(dev, mgr);
+ mgr = devm_fpga_mgr_register(dev, "Lattice MachXO2 SPI FPGA Manager",
+ &machxo2_ops, spi);
+ return PTR_ERR_OR_ZERO(mgr);
}
#ifdef CONFIG_OF
diff --git a/drivers/fpga/of-fpga-region.c b/drivers/fpga/of-fpga-region.c
index e3c25576b6b9..50b83057c048 100644
--- a/drivers/fpga/of-fpga-region.c
+++ b/drivers/fpga/of-fpga-region.c
@@ -405,16 +405,12 @@ static int of_fpga_region_probe(struct platform_device *pdev)
if (IS_ERR(mgr))
return -EPROBE_DEFER;
- region = devm_fpga_region_create(dev, mgr, of_fpga_region_get_bridges);
- if (!region) {
- ret = -ENOMEM;
+ region = fpga_region_register(dev, mgr, of_fpga_region_get_bridges);
+ if (IS_ERR(region)) {
+ ret = PTR_ERR(region);
goto eprobe_mgr_put;
}
- ret = fpga_region_register(region);
- if (ret)
- goto eprobe_mgr_put;
-
of_platform_populate(np, fpga_region_of_match, NULL, &region->dev);
platform_set_drvdata(pdev, region);
@@ -448,7 +444,7 @@ static struct platform_driver of_fpga_region_driver = {
};
/**
- * fpga_region_init - init function for fpga_region class
+ * of_fpga_region_init - init function for fpga_region class
* Creates the fpga_region class and registers a reconfig notifier.
*/
static int __init of_fpga_region_init(void)
diff --git a/drivers/fpga/socfpga-a10.c b/drivers/fpga/socfpga-a10.c
index 573d88bdf730..ac8e89b8a5cc 100644
--- a/drivers/fpga/socfpga-a10.c
+++ b/drivers/fpga/socfpga-a10.c
@@ -508,19 +508,15 @@ static int socfpga_a10_fpga_probe(struct platform_device *pdev)
return -EBUSY;
}
- mgr = devm_fpga_mgr_create(dev, "SoCFPGA Arria10 FPGA Manager",
- &socfpga_a10_fpga_mgr_ops, priv);
- if (!mgr)
- return -ENOMEM;
-
- platform_set_drvdata(pdev, mgr);
-
- ret = fpga_mgr_register(mgr);
- if (ret) {
+ mgr = fpga_mgr_register(dev, "SoCFPGA Arria10 FPGA Manager",
+ &socfpga_a10_fpga_mgr_ops, priv);
+ if (IS_ERR(mgr)) {
clk_disable_unprepare(priv->clk);
- return ret;
+ return PTR_ERR(mgr);
}
+ platform_set_drvdata(pdev, mgr);
+
return 0;
}
diff --git a/drivers/fpga/socfpga.c b/drivers/fpga/socfpga.c
index 1f467173fc1f..7e0741f99696 100644
--- a/drivers/fpga/socfpga.c
+++ b/drivers/fpga/socfpga.c
@@ -571,12 +571,9 @@ static int socfpga_fpga_probe(struct platform_device *pdev)
if (ret)
return ret;
- mgr = devm_fpga_mgr_create(dev, "Altera SOCFPGA FPGA Manager",
- &socfpga_fpga_ops, priv);
- if (!mgr)
- return -ENOMEM;
-
- return devm_fpga_mgr_register(dev, mgr);
+ mgr = devm_fpga_mgr_register(dev, "Altera SOCFPGA FPGA Manager",
+ &socfpga_fpga_ops, priv);
+ return PTR_ERR_OR_ZERO(mgr);
}
#ifdef CONFIG_OF
diff --git a/drivers/fpga/stratix10-soc.c b/drivers/fpga/stratix10-soc.c
index 047fd7f23706..357cea58ec98 100644
--- a/drivers/fpga/stratix10-soc.c
+++ b/drivers/fpga/stratix10-soc.c
@@ -419,23 +419,16 @@ static int s10_probe(struct platform_device *pdev)
init_completion(&priv->status_return_completion);
- mgr = fpga_mgr_create(dev, "Stratix10 SOC FPGA Manager",
- &s10_ops, priv);
- if (!mgr) {
- dev_err(dev, "unable to create FPGA manager\n");
- ret = -ENOMEM;
- goto probe_err;
- }
-
- ret = fpga_mgr_register(mgr);
- if (ret) {
+ mgr = fpga_mgr_register(dev, "Stratix10 SOC FPGA Manager",
+ &s10_ops, priv);
+ if (IS_ERR(mgr)) {
dev_err(dev, "unable to register FPGA manager\n");
- fpga_mgr_free(mgr);
+ ret = PTR_ERR(mgr);
goto probe_err;
}
platform_set_drvdata(pdev, mgr);
- return ret;
+ return 0;
probe_err:
stratix10_svc_free_channel(priv->chan);
@@ -448,7 +441,6 @@ static int s10_remove(struct platform_device *pdev)
struct s10_priv *priv = mgr->priv;
fpga_mgr_unregister(mgr);
- fpga_mgr_free(mgr);
stratix10_svc_free_channel(priv->chan);
return 0;
diff --git a/drivers/fpga/ts73xx-fpga.c b/drivers/fpga/ts73xx-fpga.c
index 167abb0b08d4..8e6e9c840d9d 100644
--- a/drivers/fpga/ts73xx-fpga.c
+++ b/drivers/fpga/ts73xx-fpga.c
@@ -116,12 +116,9 @@ static int ts73xx_fpga_probe(struct platform_device *pdev)
if (IS_ERR(priv->io_base))
return PTR_ERR(priv->io_base);
- mgr = devm_fpga_mgr_create(kdev, "TS-73xx FPGA Manager",
- &ts73xx_fpga_ops, priv);
- if (!mgr)
- return -ENOMEM;
-
- return devm_fpga_mgr_register(kdev, mgr);
+ mgr = devm_fpga_mgr_register(kdev, "TS-73xx FPGA Manager",
+ &ts73xx_fpga_ops, priv);
+ return PTR_ERR_OR_ZERO(mgr);
}
static struct platform_driver ts73xx_fpga_driver = {
diff --git a/drivers/fpga/versal-fpga.c b/drivers/fpga/versal-fpga.c
index 5b0dda304bd2..e1601b3a345b 100644
--- a/drivers/fpga/versal-fpga.c
+++ b/drivers/fpga/versal-fpga.c
@@ -54,12 +54,9 @@ static int versal_fpga_probe(struct platform_device *pdev)
return ret;
}
- mgr = devm_fpga_mgr_create(dev, "Xilinx Versal FPGA Manager",
- &versal_fpga_ops, NULL);
- if (!mgr)
- return -ENOMEM;
-
- return devm_fpga_mgr_register(dev, mgr);
+ mgr = devm_fpga_mgr_register(dev, "Xilinx Versal FPGA Manager",
+ &versal_fpga_ops, NULL);
+ return PTR_ERR_OR_ZERO(mgr);
}
static const struct of_device_id versal_fpga_of_match[] = {
diff --git a/drivers/fpga/xilinx-pr-decoupler.c b/drivers/fpga/xilinx-pr-decoupler.c
index e986ed47c4ed..2d9c491f7be9 100644
--- a/drivers/fpga/xilinx-pr-decoupler.c
+++ b/drivers/fpga/xilinx-pr-decoupler.c
@@ -140,22 +140,17 @@ static int xlnx_pr_decoupler_probe(struct platform_device *pdev)
clk_disable(priv->clk);
- br = devm_fpga_bridge_create(&pdev->dev, priv->ipconfig->name,
- &xlnx_pr_decoupler_br_ops, priv);
- if (!br) {
- err = -ENOMEM;
- goto err_clk;
- }
-
- platform_set_drvdata(pdev, br);
-
- err = fpga_bridge_register(br);
- if (err) {
+ br = fpga_bridge_register(&pdev->dev, priv->ipconfig->name,
+ &xlnx_pr_decoupler_br_ops, priv);
+ if (IS_ERR(br)) {
+ err = PTR_ERR(br);
dev_err(&pdev->dev, "unable to register %s",
priv->ipconfig->name);
goto err_clk;
}
+ platform_set_drvdata(pdev, br);
+
return 0;
err_clk:
diff --git a/drivers/fpga/xilinx-spi.c b/drivers/fpga/xilinx-spi.c
index b6bcf1d9233d..e1a227e7ff2a 100644
--- a/drivers/fpga/xilinx-spi.c
+++ b/drivers/fpga/xilinx-spi.c
@@ -247,13 +247,10 @@ static int xilinx_spi_probe(struct spi_device *spi)
return dev_err_probe(&spi->dev, PTR_ERR(conf->done),
"Failed to get DONE gpio\n");
- mgr = devm_fpga_mgr_create(&spi->dev,
- "Xilinx Slave Serial FPGA Manager",
- &xilinx_spi_ops, conf);
- if (!mgr)
- return -ENOMEM;
-
- return devm_fpga_mgr_register(&spi->dev, mgr);
+ mgr = devm_fpga_mgr_register(&spi->dev,
+ "Xilinx Slave Serial FPGA Manager",
+ &xilinx_spi_ops, conf);
+ return PTR_ERR_OR_ZERO(mgr);
}
#ifdef CONFIG_OF
diff --git a/drivers/fpga/zynq-fpga.c b/drivers/fpga/zynq-fpga.c
index 9b75bd4f93d8..426aa34c6a0d 100644
--- a/drivers/fpga/zynq-fpga.c
+++ b/drivers/fpga/zynq-fpga.c
@@ -609,20 +609,16 @@ static int zynq_fpga_probe(struct platform_device *pdev)
clk_disable(priv->clk);
- mgr = devm_fpga_mgr_create(dev, "Xilinx Zynq FPGA Manager",
- &zynq_fpga_ops, priv);
- if (!mgr)
- return -ENOMEM;
-
- platform_set_drvdata(pdev, mgr);
-
- err = fpga_mgr_register(mgr);
- if (err) {
+ mgr = fpga_mgr_register(dev, "Xilinx Zynq FPGA Manager",
+ &zynq_fpga_ops, priv);
+ if (IS_ERR(mgr)) {
dev_err(dev, "unable to register FPGA manager\n");
clk_unprepare(priv->clk);
- return err;
+ return PTR_ERR(mgr);
}
+ platform_set_drvdata(pdev, mgr);
+
return 0;
}
diff --git a/drivers/fpga/zynqmp-fpga.c b/drivers/fpga/zynqmp-fpga.c
index 7d3d5650c322..c60f20949c47 100644
--- a/drivers/fpga/zynqmp-fpga.c
+++ b/drivers/fpga/zynqmp-fpga.c
@@ -95,12 +95,9 @@ static int zynqmp_fpga_probe(struct platform_device *pdev)
priv->dev = dev;
- mgr = devm_fpga_mgr_create(dev, "Xilinx ZynqMP FPGA Manager",
- &zynqmp_fpga_ops, priv);
- if (!mgr)
- return -ENOMEM;
-
- return devm_fpga_mgr_register(dev, mgr);
+ mgr = devm_fpga_mgr_register(dev, "Xilinx ZynqMP FPGA Manager",
+ &zynqmp_fpga_ops, priv);
+ return PTR_ERR_OR_ZERO(mgr);
}
#ifdef CONFIG_OF
diff --git a/drivers/gnss/Kconfig b/drivers/gnss/Kconfig
index bd12e3d57baa..d7fe265c2869 100644
--- a/drivers/gnss/Kconfig
+++ b/drivers/gnss/Kconfig
@@ -54,4 +54,15 @@ config GNSS_UBX_SERIAL
If unsure, say N.
+config GNSS_USB
+ tristate "USB GNSS receiver support"
+ depends on USB
+ help
+ Say Y here if you have a GNSS receiver which uses a USB interface.
+
+ To compile this driver as a module, choose M here: the module will
+ be called gnss-usb.
+
+ If unsure, say N.
+
endif # GNSS
diff --git a/drivers/gnss/Makefile b/drivers/gnss/Makefile
index 451f11401ecc..bb2cbada3435 100644
--- a/drivers/gnss/Makefile
+++ b/drivers/gnss/Makefile
@@ -17,3 +17,6 @@ gnss-sirf-y := sirf.o
obj-$(CONFIG_GNSS_UBX_SERIAL) += gnss-ubx.o
gnss-ubx-y := ubx.o
+
+obj-$(CONFIG_GNSS_USB) += gnss-usb.o
+gnss-usb-y := usb.o
diff --git a/drivers/gnss/mtk.c b/drivers/gnss/mtk.c
index d1fc55560daf..c62b1211f4fe 100644
--- a/drivers/gnss/mtk.c
+++ b/drivers/gnss/mtk.c
@@ -126,7 +126,7 @@ static void mtk_remove(struct serdev_device *serdev)
if (data->vbackup)
regulator_disable(data->vbackup);
gnss_serial_free(gserial);
-};
+}
#ifdef CONFIG_OF
static const struct of_device_id mtk_of_match[] = {
diff --git a/drivers/gnss/serial.c b/drivers/gnss/serial.c
index def64b36d994..5d8e9bfb24d0 100644
--- a/drivers/gnss/serial.c
+++ b/drivers/gnss/serial.c
@@ -165,7 +165,7 @@ void gnss_serial_free(struct gnss_serial *gserial)
{
gnss_put_device(gserial->gdev);
kfree(gserial);
-};
+}
EXPORT_SYMBOL_GPL(gnss_serial_free);
int gnss_serial_register(struct gnss_serial *gserial)
diff --git a/drivers/gnss/sirf.c b/drivers/gnss/sirf.c
index 2ecb1d3e8eeb..bcb53ccfee4d 100644
--- a/drivers/gnss/sirf.c
+++ b/drivers/gnss/sirf.c
@@ -551,7 +551,7 @@ static void sirf_remove(struct serdev_device *serdev)
regulator_disable(data->vcc);
gnss_put_device(data->gdev);
-};
+}
#ifdef CONFIG_OF
static const struct of_device_id sirf_of_match[] = {
diff --git a/drivers/gnss/ubx.c b/drivers/gnss/ubx.c
index 7b05bc40532e..c951be202ca2 100644
--- a/drivers/gnss/ubx.c
+++ b/drivers/gnss/ubx.c
@@ -126,7 +126,7 @@ static void ubx_remove(struct serdev_device *serdev)
if (data->v_bckp)
regulator_disable(data->v_bckp);
gnss_serial_free(gserial);
-};
+}
#ifdef CONFIG_OF
static const struct of_device_id ubx_of_match[] = {
diff --git a/drivers/gnss/usb.c b/drivers/gnss/usb.c
new file mode 100644
index 000000000000..028ce56b20ea
--- /dev/null
+++ b/drivers/gnss/usb.c
@@ -0,0 +1,214 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Generic USB GNSS receiver driver
+ *
+ * Copyright (C) 2021 Johan Hovold <johan@kernel.org>
+ */
+
+#include <linux/errno.h>
+#include <linux/gnss.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/usb.h>
+
+#define GNSS_USB_READ_BUF_LEN 512
+#define GNSS_USB_WRITE_TIMEOUT 1000
+
+static const struct usb_device_id gnss_usb_id_table[] = {
+ { USB_DEVICE(0x1199, 0xb000) }, /* Sierra Wireless XM1210 */
+ { }
+};
+MODULE_DEVICE_TABLE(usb, gnss_usb_id_table);
+
+struct gnss_usb {
+ struct usb_device *udev;
+ struct usb_interface *intf;
+ struct gnss_device *gdev;
+ struct urb *read_urb;
+ unsigned int write_pipe;
+};
+
+static void gnss_usb_rx_complete(struct urb *urb)
+{
+ struct gnss_usb *gusb = urb->context;
+ struct gnss_device *gdev = gusb->gdev;
+ int status = urb->status;
+ int len;
+ int ret;
+
+ switch (status) {
+ case 0:
+ break;
+ case -ENOENT:
+ case -ECONNRESET:
+ case -ESHUTDOWN:
+ dev_dbg(&gdev->dev, "urb stopped: %d\n", status);
+ return;
+ case -EPIPE:
+ dev_err(&gdev->dev, "urb stopped: %d\n", status);
+ return;
+ default:
+ dev_dbg(&gdev->dev, "nonzero urb status: %d\n", status);
+ goto resubmit;
+ }
+
+ len = urb->actual_length;
+ if (len == 0)
+ goto resubmit;
+
+ ret = gnss_insert_raw(gdev, urb->transfer_buffer, len);
+ if (ret < len)
+ dev_dbg(&gdev->dev, "dropped %d bytes\n", len - ret);
+resubmit:
+ ret = usb_submit_urb(urb, GFP_ATOMIC);
+ if (ret && ret != -EPERM && ret != -ENODEV)
+ dev_err(&gdev->dev, "failed to resubmit urb: %d\n", ret);
+}
+
+static int gnss_usb_open(struct gnss_device *gdev)
+{
+ struct gnss_usb *gusb = gnss_get_drvdata(gdev);
+ int ret;
+
+ ret = usb_submit_urb(gusb->read_urb, GFP_KERNEL);
+ if (ret) {
+ if (ret != -EPERM && ret != -ENODEV)
+ dev_err(&gdev->dev, "failed to submit urb: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void gnss_usb_close(struct gnss_device *gdev)
+{
+ struct gnss_usb *gusb = gnss_get_drvdata(gdev);
+
+ usb_kill_urb(gusb->read_urb);
+}
+
+static int gnss_usb_write_raw(struct gnss_device *gdev,
+ const unsigned char *buf, size_t count)
+{
+ struct gnss_usb *gusb = gnss_get_drvdata(gdev);
+ void *tbuf;
+ int ret;
+
+ tbuf = kmemdup(buf, count, GFP_KERNEL);
+ if (!tbuf)
+ return -ENOMEM;
+
+ ret = usb_bulk_msg(gusb->udev, gusb->write_pipe, tbuf, count, NULL,
+ GNSS_USB_WRITE_TIMEOUT);
+ kfree(tbuf);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static const struct gnss_operations gnss_usb_gnss_ops = {
+ .open = gnss_usb_open,
+ .close = gnss_usb_close,
+ .write_raw = gnss_usb_write_raw,
+};
+
+static int gnss_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
+{
+ struct usb_device *udev = interface_to_usbdev(intf);
+ struct usb_endpoint_descriptor *in, *out;
+ struct gnss_device *gdev;
+ struct gnss_usb *gusb;
+ struct urb *urb;
+ size_t buf_len;
+ void *buf;
+ int ret;
+
+ ret = usb_find_common_endpoints(intf->cur_altsetting, &in, &out, NULL,
+ NULL);
+ if (ret)
+ return ret;
+
+ gusb = kzalloc(sizeof(*gusb), GFP_KERNEL);
+ if (!gusb)
+ return -ENOMEM;
+
+ gdev = gnss_allocate_device(&intf->dev);
+ if (!gdev) {
+ ret = -ENOMEM;
+ goto err_free_gusb;
+ }
+
+ gdev->ops = &gnss_usb_gnss_ops;
+ gdev->type = GNSS_TYPE_NMEA;
+ gnss_set_drvdata(gdev, gusb);
+
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!urb) {
+ ret = -ENOMEM;
+ goto err_put_gdev;
+ }
+
+ buf_len = max(usb_endpoint_maxp(in), GNSS_USB_READ_BUF_LEN);
+
+ buf = kzalloc(buf_len, GFP_KERNEL);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto err_free_urb;
+ }
+
+ usb_fill_bulk_urb(urb, udev,
+ usb_rcvbulkpipe(udev, usb_endpoint_num(in)),
+ buf, buf_len, gnss_usb_rx_complete, gusb);
+
+ gusb->intf = intf;
+ gusb->udev = udev;
+ gusb->gdev = gdev;
+ gusb->read_urb = urb;
+ gusb->write_pipe = usb_sndbulkpipe(udev, usb_endpoint_num(out));
+
+ ret = gnss_register_device(gdev);
+ if (ret)
+ goto err_free_buf;
+
+ usb_set_intfdata(intf, gusb);
+
+ return 0;
+
+err_free_buf:
+ kfree(buf);
+err_free_urb:
+ usb_free_urb(urb);
+err_put_gdev:
+ gnss_put_device(gdev);
+err_free_gusb:
+ kfree(gusb);
+
+ return ret;
+}
+
+static void gnss_usb_disconnect(struct usb_interface *intf)
+{
+ struct gnss_usb *gusb = usb_get_intfdata(intf);
+
+ gnss_deregister_device(gusb->gdev);
+
+ kfree(gusb->read_urb->transfer_buffer);
+ usb_free_urb(gusb->read_urb);
+ gnss_put_device(gusb->gdev);
+ kfree(gusb);
+}
+
+static struct usb_driver gnss_usb_driver = {
+ .name = "gnss-usb",
+ .probe = gnss_usb_probe,
+ .disconnect = gnss_usb_disconnect,
+ .id_table = gnss_usb_id_table,
+};
+module_usb_driver(gnss_usb_driver);
+
+MODULE_AUTHOR("Johan Hovold <johan@kernel.org>");
+MODULE_DESCRIPTION("Generic USB GNSS receiver driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-aggregator.c b/drivers/gpio/gpio-aggregator.c
index 869dc952cf45..0cb2664085cf 100644
--- a/drivers/gpio/gpio-aggregator.c
+++ b/drivers/gpio/gpio-aggregator.c
@@ -278,7 +278,8 @@ static int gpio_fwd_get(struct gpio_chip *chip, unsigned int offset)
{
struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
- return gpiod_get_value(fwd->descs[offset]);
+ return chip->can_sleep ? gpiod_get_value_cansleep(fwd->descs[offset])
+ : gpiod_get_value(fwd->descs[offset]);
}
static int gpio_fwd_get_multiple(struct gpiochip_fwd *fwd, unsigned long *mask,
@@ -293,7 +294,10 @@ static int gpio_fwd_get_multiple(struct gpiochip_fwd *fwd, unsigned long *mask,
for_each_set_bit(i, mask, fwd->chip.ngpio)
descs[j++] = fwd->descs[i];
- error = gpiod_get_array_value(j, descs, NULL, values);
+ if (fwd->chip.can_sleep)
+ error = gpiod_get_array_value_cansleep(j, descs, NULL, values);
+ else
+ error = gpiod_get_array_value(j, descs, NULL, values);
if (error)
return error;
@@ -328,7 +332,10 @@ static void gpio_fwd_set(struct gpio_chip *chip, unsigned int offset, int value)
{
struct gpiochip_fwd *fwd = gpiochip_get_data(chip);
- gpiod_set_value(fwd->descs[offset], value);
+ if (chip->can_sleep)
+ gpiod_set_value_cansleep(fwd->descs[offset], value);
+ else
+ gpiod_set_value(fwd->descs[offset], value);
}
static void gpio_fwd_set_multiple(struct gpiochip_fwd *fwd, unsigned long *mask,
@@ -343,7 +350,10 @@ static void gpio_fwd_set_multiple(struct gpiochip_fwd *fwd, unsigned long *mask,
descs[j++] = fwd->descs[i];
}
- gpiod_set_array_value(j, descs, NULL, values);
+ if (fwd->chip.can_sleep)
+ gpiod_set_array_value_cansleep(j, descs, NULL, values);
+ else
+ gpiod_set_array_value(j, descs, NULL, values);
}
static void gpio_fwd_set_multiple_locked(struct gpio_chip *chip,
diff --git a/drivers/gpio/gpio-idt3243x.c b/drivers/gpio/gpio-idt3243x.c
index 50003ad2e589..52b8b72ded77 100644
--- a/drivers/gpio/gpio-idt3243x.c
+++ b/drivers/gpio/gpio-idt3243x.c
@@ -132,7 +132,7 @@ static int idt_gpio_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct gpio_irq_chip *girq;
struct idt_gpio_ctrl *ctrl;
- unsigned int parent_irq;
+ int parent_irq;
int ngpios;
int ret;
@@ -164,8 +164,8 @@ static int idt_gpio_probe(struct platform_device *pdev)
return PTR_ERR(ctrl->pic);
parent_irq = platform_get_irq(pdev, 0);
- if (!parent_irq)
- return -EINVAL;
+ if (parent_irq < 0)
+ return parent_irq;
girq = &ctrl->gc.irq;
girq->chip = &idt_gpio_irqchip;
diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c
index d26bff29157b..8943cea92764 100644
--- a/drivers/gpio/gpio-mockup.c
+++ b/drivers/gpio/gpio-mockup.c
@@ -491,27 +491,6 @@ static void gpio_mockup_unregister_pdevs(void)
}
}
-static __init char **gpio_mockup_make_line_names(const char *label,
- unsigned int num_lines)
-{
- unsigned int i;
- char **names;
-
- names = kcalloc(num_lines + 1, sizeof(char *), GFP_KERNEL);
- if (!names)
- return NULL;
-
- for (i = 0; i < num_lines; i++) {
- names[i] = kasprintf(GFP_KERNEL, "%s-%u", label, i);
- if (!names[i]) {
- kfree_strarray(names, i);
- return NULL;
- }
- }
-
- return names;
-}
-
static int __init gpio_mockup_register_chip(int idx)
{
struct property_entry properties[GPIO_MOCKUP_MAX_PROP];
@@ -538,7 +517,7 @@ static int __init gpio_mockup_register_chip(int idx)
properties[prop++] = PROPERTY_ENTRY_U16("nr-gpios", ngpio);
if (gpio_mockup_named_lines) {
- line_names = gpio_mockup_make_line_names(chip_label, ngpio);
+ line_names = kasprintf_strarray(GFP_KERNEL, chip_label, ngpio);
if (!line_names)
return -ENOMEM;
diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
index 70d6ae20b1da..a964e25ea620 100644
--- a/drivers/gpio/gpio-mpc8xxx.c
+++ b/drivers/gpio/gpio-mpc8xxx.c
@@ -47,7 +47,7 @@ struct mpc8xxx_gpio_chip {
unsigned offset, int value);
struct irq_domain *irq;
- unsigned int irqn;
+ int irqn;
};
/*
@@ -388,8 +388,8 @@ static int mpc8xxx_probe(struct platform_device *pdev)
}
mpc8xxx_gc->irqn = platform_get_irq(pdev, 0);
- if (!mpc8xxx_gc->irqn)
- return 0;
+ if (mpc8xxx_gc->irqn < 0)
+ return mpc8xxx_gc->irqn;
mpc8xxx_gc->irq = irq_domain_create_linear(fwnode,
MPC8XXX_GPIO_PINS,
diff --git a/drivers/gpio/gpio-sifive.c b/drivers/gpio/gpio-sifive.c
index 403f9e833d6a..7d82388b4ab7 100644
--- a/drivers/gpio/gpio-sifive.c
+++ b/drivers/gpio/gpio-sifive.c
@@ -223,7 +223,7 @@ static int sifive_gpio_probe(struct platform_device *pdev)
NULL,
chip->base + SIFIVE_GPIO_OUTPUT_EN,
chip->base + SIFIVE_GPIO_INPUT_EN,
- 0);
+ BGPIOF_READ_OUTPUT_REG_SET);
if (ret) {
dev_err(dev, "unable to init generic GPIO\n");
return ret;
diff --git a/drivers/gpio/gpio-sim.c b/drivers/gpio/gpio-sim.c
index 838bbfed11d3..153fe79e1bf3 100644
--- a/drivers/gpio/gpio-sim.c
+++ b/drivers/gpio/gpio-sim.c
@@ -570,6 +570,11 @@ static struct gpio_sim_bank *to_gpio_sim_bank(struct config_item *item)
return container_of(group, struct gpio_sim_bank, group);
}
+static bool gpio_sim_bank_has_label(struct gpio_sim_bank *bank)
+{
+ return bank->label && *bank->label;
+}
+
static struct gpio_sim_device *
gpio_sim_bank_get_device(struct gpio_sim_bank *bank)
{
@@ -770,9 +775,15 @@ static int gpio_sim_add_hogs(struct gpio_sim_device *dev)
* point the device doesn't exist yet and so dev_name()
* is not available.
*/
- hog->chip_label = kasprintf(GFP_KERNEL,
- "gpio-sim.%u-%s", dev->id,
- fwnode_get_name(bank->swnode));
+ if (gpio_sim_bank_has_label(bank))
+ hog->chip_label = kstrdup(bank->label,
+ GFP_KERNEL);
+ else
+ hog->chip_label = kasprintf(GFP_KERNEL,
+ "gpio-sim.%u-%s",
+ dev->id,
+ fwnode_get_name(
+ bank->swnode));
if (!hog->chip_label) {
gpio_sim_remove_hogs(dev);
return -ENOMEM;
@@ -816,7 +827,7 @@ gpio_sim_make_bank_swnode(struct gpio_sim_bank *bank,
properties[prop_idx++] = PROPERTY_ENTRY_U32("ngpios", bank->num_lines);
- if (bank->label)
+ if (gpio_sim_bank_has_label(bank))
properties[prop_idx++] = PROPERTY_ENTRY_STRING("gpio-sim,label",
bank->label);
diff --git a/drivers/gpio/gpio-virtio.c b/drivers/gpio/gpio-virtio.c
index 9f4941bc5760..fcc5e8c08973 100644
--- a/drivers/gpio/gpio-virtio.c
+++ b/drivers/gpio/gpio-virtio.c
@@ -450,7 +450,7 @@ static void virtio_gpio_request_vq(struct virtqueue *vq)
static void virtio_gpio_free_vqs(struct virtio_device *vdev)
{
- vdev->config->reset(vdev);
+ virtio_reset_device(vdev);
vdev->config->del_vqs(vdev);
}
diff --git a/drivers/gpio/gpiolib-cdev.c b/drivers/gpio/gpiolib-cdev.c
index c7b5446d01fd..ffa0256cad5a 100644
--- a/drivers/gpio/gpiolib-cdev.c
+++ b/drivers/gpio/gpiolib-cdev.c
@@ -330,7 +330,7 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
goto out_free_lh;
}
- ret = gpiod_request(desc, lh->label);
+ ret = gpiod_request_user(desc, lh->label);
if (ret)
goto out_free_lh;
lh->descs[i] = desc;
@@ -1378,7 +1378,7 @@ static int linereq_create(struct gpio_device *gdev, void __user *ip)
goto out_free_linereq;
}
- ret = gpiod_request(desc, lr->label);
+ ret = gpiod_request_user(desc, lr->label);
if (ret)
goto out_free_linereq;
@@ -1764,7 +1764,7 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
}
}
- ret = gpiod_request(desc, le->label);
+ ret = gpiod_request_user(desc, le->label);
if (ret)
goto out_free_le;
le->desc = desc;
diff --git a/drivers/gpio/gpiolib-sysfs.c b/drivers/gpio/gpiolib-sysfs.c
index 4098bc7f88b7..44c1ad51b3fe 100644
--- a/drivers/gpio/gpiolib-sysfs.c
+++ b/drivers/gpio/gpiolib-sysfs.c
@@ -475,12 +475,9 @@ static ssize_t export_store(struct class *class,
* they may be undone on its behalf too.
*/
- status = gpiod_request(desc, "sysfs");
- if (status) {
- if (status == -EPROBE_DEFER)
- status = -ENODEV;
+ status = gpiod_request_user(desc, "sysfs");
+ if (status)
goto done;
- }
status = gpiod_set_transitory(desc, false);
if (!status) {
diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h
index 30bc3f80f83e..c31f4626915d 100644
--- a/drivers/gpio/gpiolib.h
+++ b/drivers/gpio/gpiolib.h
@@ -135,6 +135,18 @@ struct gpio_desc {
int gpiod_request(struct gpio_desc *desc, const char *label);
void gpiod_free(struct gpio_desc *desc);
+
+static inline int gpiod_request_user(struct gpio_desc *desc, const char *label)
+{
+ int ret;
+
+ ret = gpiod_request(desc, label);
+ if (ret == -EPROBE_DEFER)
+ ret = -ENODEV;
+
+ return ret;
+}
+
int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id,
unsigned long lflags, enum gpiod_flags dflags);
int gpio_set_debounce_timeout(struct gpio_desc *desc, unsigned int debounce);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index d8b854fcbffa..9a53a4de2bb7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1408,12 +1408,10 @@ int amdgpu_acpi_smart_shift_update(struct drm_device *dev, enum amdgpu_ss ss_sta
int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev);
void amdgpu_acpi_get_backlight_caps(struct amdgpu_dm_backlight_caps *caps);
-bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev);
void amdgpu_acpi_detect(void);
#else
static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; }
static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { }
-static inline bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { return false; }
static inline void amdgpu_acpi_detect(void) { }
static inline bool amdgpu_acpi_is_power_shift_control_supported(void) { return false; }
static inline int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev,
@@ -1422,6 +1420,14 @@ static inline int amdgpu_acpi_smart_shift_update(struct drm_device *dev,
enum amdgpu_ss ss_state) { return 0; }
#endif
+#if defined(CONFIG_ACPI) && defined(CONFIG_SUSPEND)
+bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev);
+bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev);
+#else
+static inline bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { return false; }
+static inline bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev) { return false; }
+#endif
+
int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
uint64_t addr, struct amdgpu_bo **bo,
struct amdgpu_bo_va_mapping **mapping);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index 4811b0faafd9..0e12315fa0cb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -1031,6 +1031,20 @@ void amdgpu_acpi_detect(void)
}
}
+#if IS_ENABLED(CONFIG_SUSPEND)
+/**
+ * amdgpu_acpi_is_s3_active
+ *
+ * @adev: amdgpu_device_pointer
+ *
+ * returns true if supported, false if not.
+ */
+bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev)
+{
+ return !(adev->flags & AMD_IS_APU) ||
+ (pm_suspend_target_state == PM_SUSPEND_MEM);
+}
+
/**
* amdgpu_acpi_is_s0ix_active
*
@@ -1040,11 +1054,24 @@ void amdgpu_acpi_detect(void)
*/
bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev)
{
-#if IS_ENABLED(CONFIG_AMD_PMC) && IS_ENABLED(CONFIG_SUSPEND)
- if (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0) {
- if (adev->flags & AMD_IS_APU)
- return pm_suspend_target_state == PM_SUSPEND_TO_IDLE;
+ if (!(adev->flags & AMD_IS_APU) ||
+ (pm_suspend_target_state != PM_SUSPEND_TO_IDLE))
+ return false;
+
+ if (!(acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0)) {
+ dev_warn_once(adev->dev,
+ "Power consumption will be higher as BIOS has not been configured for suspend-to-idle.\n"
+ "To use suspend-to-idle change the sleep mode in BIOS setup.\n");
+ return false;
}
-#endif
+
+#if !IS_ENABLED(CONFIG_AMD_PMC)
+ dev_warn_once(adev->dev,
+ "Power consumption will be higher as the kernel has not been compiled with CONFIG_AMD_PMC.\n");
return false;
+#else
+ return true;
+#endif /* CONFIG_AMD_PMC */
}
+
+#endif /* CONFIG_SUSPEND */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 776a947b45df..6ca1db3c243f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -514,13 +514,6 @@ out_put:
return r;
}
-uint64_t amdgpu_amdkfd_get_vram_usage(struct amdgpu_device *adev)
-{
- struct ttm_resource_manager *vram_man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
-
- return amdgpu_vram_mgr_usage(vram_man);
-}
-
uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct amdgpu_device *dst,
struct amdgpu_device *src)
{
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index 61f899e54fd5..ac841ae8f5cc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -223,7 +223,6 @@ int amdgpu_amdkfd_get_dmabuf_info(struct amdgpu_device *adev, int dma_buf_fd,
uint64_t *bo_size, void *metadata_buffer,
size_t buffer_size, uint32_t *metadata_size,
uint32_t *flags);
-uint64_t amdgpu_amdkfd_get_vram_usage(struct amdgpu_device *adev);
uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct amdgpu_device *dst,
struct amdgpu_device *src);
int amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(struct amdgpu_device *dst,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 0311d799a010..06d07502a1f6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -298,7 +298,6 @@ static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
{
s64 time_us, increment_us;
u64 free_vram, total_vram, used_vram;
- struct ttm_resource_manager *vram_man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
/* Allow a maximum of 200 accumulated ms. This is basically per-IB
* throttling.
*
@@ -315,7 +314,7 @@ static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
}
total_vram = adev->gmc.real_vram_size - atomic64_read(&adev->vram_pin_size);
- used_vram = amdgpu_vram_mgr_usage(vram_man);
+ used_vram = amdgpu_vram_mgr_usage(&adev->mman.vram_mgr);
free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram;
spin_lock(&adev->mm_stats.lock);
@@ -362,7 +361,7 @@ static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev,
if (!amdgpu_gmc_vram_full_visible(&adev->gmc)) {
u64 total_vis_vram = adev->gmc.visible_vram_size;
u64 used_vis_vram =
- amdgpu_vram_mgr_vis_usage(vram_man);
+ amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr);
if (used_vis_vram < total_vis_vram) {
u64 free_vis_vram = total_vis_vram - used_vis_vram;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index a8b08a72b71b..ed077de426d9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -552,7 +552,7 @@ void amdgpu_device_wreg(struct amdgpu_device *adev,
}
/**
- * amdgpu_mm_wreg_mmio_rlc - write register either with mmio or with RLC path if in range
+ * amdgpu_mm_wreg_mmio_rlc - write register either with direct/indirect mmio or with RLC path if in range
*
* this function is invoked only the debugfs register access
*/
@@ -567,6 +567,8 @@ void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
adev->gfx.rlc.funcs->is_rlcg_access_range) {
if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
return adev->gfx.rlc.funcs->sriov_wreg(adev, reg, v, 0, 0);
+ } else if ((reg * 4) >= adev->rmmio_size) {
+ adev->pcie_wreg(adev, reg * 4, v);
} else {
writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
}
@@ -1448,7 +1450,7 @@ static int amdgpu_device_init_apu_flags(struct amdgpu_device *adev)
adev->apu_flags |= AMD_APU_IS_CYAN_SKILLFISH2;
break;
default:
- return -EINVAL;
+ break;
}
return 0;
@@ -2352,7 +2354,7 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
}
if (amdgpu_sriov_vf(adev))
- amdgpu_virt_exchange_data(adev);
+ amdgpu_virt_init_data_exchange(adev);
r = amdgpu_ib_pool_init(adev);
if (r) {
@@ -3496,9 +3498,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
mutex_init(&adev->psp.mutex);
mutex_init(&adev->notifier_lock);
- r = amdgpu_device_init_apu_flags(adev);
- if (r)
- return r;
+ amdgpu_device_init_apu_flags(adev);
r = amdgpu_device_check_arguments(adev);
if (r)
@@ -3833,6 +3833,7 @@ failed:
static void amdgpu_device_unmap_mmio(struct amdgpu_device *adev)
{
+
/* Clear all CPU mappings pointing to this device */
unmap_mapping_range(adev->ddev.anon_inode->i_mapping, 0, 0, 1);
@@ -3913,6 +3914,8 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
void amdgpu_device_fini_sw(struct amdgpu_device *adev)
{
+ int idx;
+
amdgpu_fence_driver_sw_fini(adev);
amdgpu_device_ip_fini(adev);
release_firmware(adev->firmware.gpu_info_fw);
@@ -3937,6 +3940,14 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev)
if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
vga_client_unregister(adev->pdev);
+ if (drm_dev_enter(adev_to_drm(adev), &idx)) {
+
+ iounmap(adev->rmmio);
+ adev->rmmio = NULL;
+ amdgpu_device_doorbell_fini(adev);
+ drm_dev_exit(idx);
+ }
+
if (IS_ENABLED(CONFIG_PERF_EVENTS))
amdgpu_pmu_fini(adev);
if (adev->mman.discovery_bin)
@@ -3957,8 +3968,8 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev)
*/
static void amdgpu_device_evict_resources(struct amdgpu_device *adev)
{
- /* No need to evict vram on APUs for suspend to ram */
- if (adev->in_s3 && (adev->flags & AMD_IS_APU))
+ /* No need to evict vram on APUs for suspend to ram or s2idle */
+ if ((adev->in_s3 || adev->in_s0ix) && (adev->flags & AMD_IS_APU))
return;
if (amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM))
@@ -4005,16 +4016,11 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
if (!adev->in_s0ix)
amdgpu_amdkfd_suspend(adev, adev->in_runpm);
- /* First evict vram memory */
amdgpu_device_evict_resources(adev);
amdgpu_fence_driver_hw_fini(adev);
amdgpu_device_ip_suspend_phase2(adev);
- /* This second call to evict device resources is to evict
- * the gart page table using the CPU.
- */
- amdgpu_device_evict_resources(adev);
return 0;
}
@@ -4359,8 +4365,6 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
goto error;
amdgpu_virt_init_data_exchange(adev);
- /* we need recover gart prior to run SMC/CP/SDMA resume */
- amdgpu_gtt_mgr_recover(ttm_manager_type(&adev->mman.bdev, TTM_PL_TT));
r = amdgpu_device_fw_loading(adev);
if (r)
@@ -4446,33 +4450,24 @@ bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
if (amdgpu_gpu_recovery == -1) {
switch (adev->asic_type) {
- case CHIP_BONAIRE:
- case CHIP_HAWAII:
- case CHIP_TOPAZ:
- case CHIP_TONGA:
- case CHIP_FIJI:
- case CHIP_POLARIS10:
- case CHIP_POLARIS11:
- case CHIP_POLARIS12:
- case CHIP_VEGAM:
- case CHIP_VEGA20:
- case CHIP_VEGA10:
- case CHIP_VEGA12:
- case CHIP_RAVEN:
- case CHIP_ARCTURUS:
- case CHIP_RENOIR:
- case CHIP_NAVI10:
- case CHIP_NAVI14:
- case CHIP_NAVI12:
- case CHIP_SIENNA_CICHLID:
- case CHIP_NAVY_FLOUNDER:
- case CHIP_DIMGREY_CAVEFISH:
- case CHIP_BEIGE_GOBY:
- case CHIP_VANGOGH:
- case CHIP_ALDEBARAN:
- break;
- default:
+#ifdef CONFIG_DRM_AMDGPU_SI
+ case CHIP_VERDE:
+ case CHIP_TAHITI:
+ case CHIP_PITCAIRN:
+ case CHIP_OLAND:
+ case CHIP_HAINAN:
+#endif
+#ifdef CONFIG_DRM_AMDGPU_CIK
+ case CHIP_KAVERI:
+ case CHIP_KABINI:
+ case CHIP_MULLINS:
+#endif
+ case CHIP_CARRIZO:
+ case CHIP_STONEY:
+ case CHIP_CYAN_SKILLFISH:
goto disabled;
+ default:
+ break;
}
}
@@ -4680,10 +4675,6 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle,
amdgpu_inc_vram_lost(tmp_adev);
}
- r = amdgpu_gtt_mgr_recover(ttm_manager_type(&tmp_adev->mman.bdev, TTM_PL_TT));
- if (r)
- goto out;
-
r = amdgpu_device_fw_loading(tmp_adev);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
index 028190d42bb2..81bfee978b74 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
@@ -243,6 +243,30 @@ static inline bool amdgpu_discovery_verify_binary_signature(uint8_t *binary)
return (le32_to_cpu(bhdr->binary_signature) == BINARY_SIGNATURE);
}
+static void amdgpu_discovery_harvest_config_quirk(struct amdgpu_device *adev)
+{
+ /*
+ * So far, apply this quirk only on those Navy Flounder boards which
+ * have a bad harvest table of VCN config.
+ */
+ if ((adev->ip_versions[UVD_HWIP][1] == IP_VERSION(3, 0, 1)) &&
+ (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 2))) {
+ switch (adev->pdev->revision) {
+ case 0xC1:
+ case 0xC2:
+ case 0xC3:
+ case 0xC5:
+ case 0xC7:
+ case 0xCF:
+ case 0xDF:
+ adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1;
+ break;
+ default:
+ break;
+ }
+ }
+}
+
static int amdgpu_discovery_init(struct amdgpu_device *adev)
{
struct table_info *info;
@@ -548,10 +572,9 @@ void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
break;
}
}
- /* some IP discovery tables on Navy Flounder don't have this set correctly */
- if ((adev->ip_versions[UVD_HWIP][1] == IP_VERSION(3, 0, 1)) &&
- (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 2)))
- adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1;
+
+ amdgpu_discovery_harvest_config_quirk(adev);
+
if (vcn_harvest_count == adev->vcn.num_vcn_inst) {
adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK;
adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index b63ed1ddf713..63a089992645 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -1525,6 +1525,87 @@ static const u16 amdgpu_unsupported_pciidlist[] = {
0x99A0,
0x99A2,
0x99A4,
+ /* radeon secondary ids */
+ 0x3171,
+ 0x3e70,
+ 0x4164,
+ 0x4165,
+ 0x4166,
+ 0x4168,
+ 0x4170,
+ 0x4171,
+ 0x4172,
+ 0x4173,
+ 0x496e,
+ 0x4a69,
+ 0x4a6a,
+ 0x4a6b,
+ 0x4a70,
+ 0x4a74,
+ 0x4b69,
+ 0x4b6b,
+ 0x4b6c,
+ 0x4c6e,
+ 0x4e64,
+ 0x4e65,
+ 0x4e66,
+ 0x4e67,
+ 0x4e68,
+ 0x4e69,
+ 0x4e6a,
+ 0x4e71,
+ 0x4f73,
+ 0x5569,
+ 0x556b,
+ 0x556d,
+ 0x556f,
+ 0x5571,
+ 0x5854,
+ 0x5874,
+ 0x5940,
+ 0x5941,
+ 0x5b72,
+ 0x5b73,
+ 0x5b74,
+ 0x5b75,
+ 0x5d44,
+ 0x5d45,
+ 0x5d6d,
+ 0x5d6f,
+ 0x5d72,
+ 0x5d77,
+ 0x5e6b,
+ 0x5e6d,
+ 0x7120,
+ 0x7124,
+ 0x7129,
+ 0x712e,
+ 0x712f,
+ 0x7162,
+ 0x7163,
+ 0x7166,
+ 0x7167,
+ 0x7172,
+ 0x7173,
+ 0x71a0,
+ 0x71a1,
+ 0x71a3,
+ 0x71a7,
+ 0x71bb,
+ 0x71e0,
+ 0x71e1,
+ 0x71e2,
+ 0x71e6,
+ 0x71e7,
+ 0x71f2,
+ 0x7269,
+ 0x726b,
+ 0x726e,
+ 0x72a0,
+ 0x72a8,
+ 0x72b1,
+ 0x72b3,
+ 0x793f,
};
static const struct pci_device_id pciidlist[] = {
@@ -1930,11 +2011,6 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
return -ENODEV;
}
- if (flags == 0) {
- DRM_INFO("Unsupported asic. Remove me when IP discovery init is in place.\n");
- return -ENODEV;
- }
-
if (amdgpu_virtual_display ||
amdgpu_device_asic_has_dc_support(flags & AMD_ASIC_MASK))
supports_atomic = true;
@@ -2170,13 +2246,20 @@ static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work)
static int amdgpu_pmops_prepare(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = drm_to_adev(drm_dev);
/* Return a positive number here so
* DPM_FLAG_SMART_SUSPEND works properly
*/
if (amdgpu_device_supports_boco(drm_dev))
- return pm_runtime_suspended(dev) &&
- pm_suspend_via_firmware();
+ return pm_runtime_suspended(dev);
+
+ /* if we will not support s3 or s2i for the device
+ * then skip suspend
+ */
+ if (!amdgpu_acpi_is_s0ix_active(adev) &&
+ !amdgpu_acpi_is_s3_active(adev))
+ return 1;
return 0;
}
@@ -2194,9 +2277,9 @@ static int amdgpu_pmops_suspend(struct device *dev)
if (amdgpu_acpi_is_s0ix_active(adev))
adev->in_s0ix = true;
- adev->in_s3 = true;
+ else
+ adev->in_s3 = true;
r = amdgpu_device_suspend(drm_dev, true);
- adev->in_s3 = false;
if (r)
return r;
if (!adev->in_s0ix)
@@ -2217,6 +2300,8 @@ static int amdgpu_pmops_resume(struct device *dev)
r = amdgpu_device_resume(drm_dev, true);
if (amdgpu_acpi_is_s0ix_active(adev))
adev->in_s0ix = false;
+ else
+ adev->in_s3 = false;
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index d3e4203f6217..645950a653a0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -114,80 +114,12 @@ void amdgpu_gart_dummy_page_fini(struct amdgpu_device *adev)
*/
int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev)
{
- int r;
-
- if (adev->gart.bo == NULL) {
- struct amdgpu_bo_param bp;
-
- memset(&bp, 0, sizeof(bp));
- bp.size = adev->gart.table_size;
- bp.byte_align = PAGE_SIZE;
- bp.domain = AMDGPU_GEM_DOMAIN_VRAM;
- bp.flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
- AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
- bp.type = ttm_bo_type_kernel;
- bp.resv = NULL;
- bp.bo_ptr_size = sizeof(struct amdgpu_bo);
-
- r = amdgpu_bo_create(adev, &bp, &adev->gart.bo);
- if (r) {
- return r;
- }
- }
- return 0;
-}
-
-/**
- * amdgpu_gart_table_vram_pin - pin gart page table in vram
- *
- * @adev: amdgpu_device pointer
- *
- * Pin the GART page table in vram so it will not be moved
- * by the memory manager (pcie r4xx, r5xx+). These asics require the
- * gart table to be in video memory.
- * Returns 0 for success, error for failure.
- */
-int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev)
-{
- int r;
-
- r = amdgpu_bo_reserve(adev->gart.bo, false);
- if (unlikely(r != 0))
- return r;
- r = amdgpu_bo_pin(adev->gart.bo, AMDGPU_GEM_DOMAIN_VRAM);
- if (r) {
- amdgpu_bo_unreserve(adev->gart.bo);
- return r;
- }
- r = amdgpu_bo_kmap(adev->gart.bo, &adev->gart.ptr);
- if (r)
- amdgpu_bo_unpin(adev->gart.bo);
- amdgpu_bo_unreserve(adev->gart.bo);
- return r;
-}
-
-/**
- * amdgpu_gart_table_vram_unpin - unpin gart page table in vram
- *
- * @adev: amdgpu_device pointer
- *
- * Unpin the GART page table in vram (pcie r4xx, r5xx+).
- * These asics require the gart table to be in video memory.
- */
-void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev)
-{
- int r;
+ if (adev->gart.bo != NULL)
+ return 0;
- if (adev->gart.bo == NULL) {
- return;
- }
- r = amdgpu_bo_reserve(adev->gart.bo, true);
- if (likely(r == 0)) {
- amdgpu_bo_kunmap(adev->gart.bo);
- amdgpu_bo_unpin(adev->gart.bo);
- amdgpu_bo_unreserve(adev->gart.bo);
- adev->gart.ptr = NULL;
- }
+ return amdgpu_bo_create_kernel(adev, adev->gart.table_size, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_VRAM, &adev->gart.bo,
+ NULL, (void *)&adev->gart.ptr);
}
/**
@@ -201,11 +133,7 @@ void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev)
*/
void amdgpu_gart_table_vram_free(struct amdgpu_device *adev)
{
- if (adev->gart.bo == NULL) {
- return;
- }
- amdgpu_bo_unref(&adev->gart.bo);
- adev->gart.ptr = NULL;
+ amdgpu_bo_free_kernel(&adev->gart.bo, NULL, (void *)&adev->gart.ptr);
}
/*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 9a6507af1670..c0d8f40a5b45 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -264,9 +264,6 @@ static int amdgpu_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_str
!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)))
vma->vm_flags &= ~VM_MAYWRITE;
- if (bo->kfd_bo)
- vma->vm_flags |= VM_DONTCOPY;
-
return drm_gem_ttm_mmap(obj, vma);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
index 675a72ef305d..72022df264f6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
@@ -77,10 +77,8 @@ static ssize_t amdgpu_mem_info_gtt_used_show(struct device *dev,
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
- struct ttm_resource_manager *man;
- man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT);
- return sysfs_emit(buf, "%llu\n", amdgpu_gtt_mgr_usage(man));
+ return sysfs_emit(buf, "%llu\n", amdgpu_gtt_mgr_usage(&adev->mman.gtt_mgr));
}
static DEVICE_ATTR(mem_info_gtt_total, S_IRUGO,
@@ -206,30 +204,27 @@ static void amdgpu_gtt_mgr_del(struct ttm_resource_manager *man,
/**
* amdgpu_gtt_mgr_usage - return usage of GTT domain
*
- * @man: TTM memory type manager
+ * @mgr: amdgpu_gtt_mgr pointer
*
* Return how many bytes are used in the GTT domain
*/
-uint64_t amdgpu_gtt_mgr_usage(struct ttm_resource_manager *man)
+uint64_t amdgpu_gtt_mgr_usage(struct amdgpu_gtt_mgr *mgr)
{
- struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man);
-
return atomic64_read(&mgr->used) * PAGE_SIZE;
}
/**
* amdgpu_gtt_mgr_recover - re-init gart
*
- * @man: TTM memory type manager
+ * @mgr: amdgpu_gtt_mgr pointer
*
* Re-init the gart for each known BO in the GTT.
*/
-int amdgpu_gtt_mgr_recover(struct ttm_resource_manager *man)
+int amdgpu_gtt_mgr_recover(struct amdgpu_gtt_mgr *mgr)
{
- struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man);
- struct amdgpu_device *adev;
struct amdgpu_gtt_node *node;
struct drm_mm_node *mm_node;
+ struct amdgpu_device *adev;
int r = 0;
adev = container_of(mgr, typeof(*adev), mman.gtt_mgr);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 09ad17944eb2..1ebb91db2274 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -678,13 +678,13 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
ui64 = atomic64_read(&adev->num_vram_cpu_page_faults);
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
case AMDGPU_INFO_VRAM_USAGE:
- ui64 = amdgpu_vram_mgr_usage(ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM));
+ ui64 = amdgpu_vram_mgr_usage(&adev->mman.vram_mgr);
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
case AMDGPU_INFO_VIS_VRAM_USAGE:
- ui64 = amdgpu_vram_mgr_vis_usage(ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM));
+ ui64 = amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr);
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
case AMDGPU_INFO_GTT_USAGE:
- ui64 = amdgpu_gtt_mgr_usage(ttm_manager_type(&adev->mman.bdev, TTM_PL_TT));
+ ui64 = amdgpu_gtt_mgr_usage(&adev->mman.gtt_mgr);
return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
case AMDGPU_INFO_GDS_CONFIG: {
struct drm_amdgpu_info_gds gds_info;
@@ -715,8 +715,6 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
}
case AMDGPU_INFO_MEMORY: {
struct drm_amdgpu_memory_info mem;
- struct ttm_resource_manager *vram_man =
- ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
struct ttm_resource_manager *gtt_man =
ttm_manager_type(&adev->mman.bdev, TTM_PL_TT);
memset(&mem, 0, sizeof(mem));
@@ -725,7 +723,7 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
atomic64_read(&adev->vram_pin_size) -
AMDGPU_VM_RESERVED_VRAM;
mem.vram.heap_usage =
- amdgpu_vram_mgr_usage(vram_man);
+ amdgpu_vram_mgr_usage(&adev->mman.vram_mgr);
mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4;
mem.cpu_accessible_vram.total_heap_size =
@@ -735,7 +733,7 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
atomic64_read(&adev->visible_pin_size),
mem.vram.usable_heap_size);
mem.cpu_accessible_vram.heap_usage =
- amdgpu_vram_mgr_vis_usage(vram_man);
+ amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr);
mem.cpu_accessible_vram.max_allocation =
mem.cpu_accessible_vram.usable_heap_size * 3 / 4;
@@ -744,7 +742,7 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
mem.gtt.usable_heap_size = mem.gtt.total_heap_size -
atomic64_read(&adev->gart_pin_size);
mem.gtt.heap_usage =
- amdgpu_gtt_mgr_usage(gtt_man);
+ amdgpu_gtt_mgr_usage(&adev->mman.gtt_mgr);
mem.gtt.max_allocation = mem.gtt.usable_heap_size * 3 / 4;
return copy_to_user(out, &mem,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 3a7b56e57cec..5661b82d84d4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -33,6 +33,7 @@
#include <linux/slab.h>
#include <linux/dma-buf.h>
+#include <drm/drm_drv.h>
#include <drm/amdgpu_drm.h>
#include <drm/drm_cache.h>
#include "amdgpu.h"
@@ -1061,7 +1062,18 @@ int amdgpu_bo_init(struct amdgpu_device *adev)
*/
void amdgpu_bo_fini(struct amdgpu_device *adev)
{
+ int idx;
+
amdgpu_ttm_fini(adev);
+
+ if (drm_dev_enter(adev_to_drm(adev), &idx)) {
+
+ if (!adev->gmc.xgmi.connected_to_cpu) {
+ arch_phys_wc_del(adev->gmc.vram_mtrr);
+ arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
+ }
+ drm_dev_exit(idx);
+ }
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index 91e6e87562ac..8f47c14ecbc7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -1592,6 +1592,7 @@ static void amdgpu_ras_interrupt_handler(struct ras_manager *obj)
/* Let IP handle its data, maybe we need get the output
* from the callback to udpate the error type/count, etc
*/
+ memset(&err_data, 0, sizeof(err_data));
ret = data->cb(obj->adev, &err_data, &entry);
/* ue will trigger an interrupt, and in that case
* we need do a reset to recovery the whole system.
@@ -1838,8 +1839,7 @@ static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
.size = AMDGPU_GPU_PAGE_SIZE,
.flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED,
};
- status = amdgpu_vram_mgr_query_page_status(
- ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM),
+ status = amdgpu_vram_mgr_query_page_status(&adev->mman.vram_mgr,
data->bps[i].retired_page);
if (status == -EBUSY)
(*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_PENDING;
@@ -1940,8 +1940,7 @@ int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
goto out;
}
- amdgpu_vram_mgr_reserve_range(
- ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM),
+ amdgpu_vram_mgr_reserve_range(&adev->mman.vram_mgr,
bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT,
AMDGPU_GPU_PAGE_SIZE);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index fb0d8bffdce2..4655702a5e00 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -43,6 +43,7 @@
#include <linux/sizes.h>
#include <linux/module.h>
+#include <drm/drm_drv.h>
#include <drm/ttm/ttm_bo_api.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
@@ -1804,6 +1805,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
*/
void amdgpu_ttm_fini(struct amdgpu_device *adev)
{
+ int idx;
if (!adev->mman.initialized)
return;
@@ -1818,6 +1820,15 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
NULL, NULL);
amdgpu_ttm_fw_reserve_vram_fini(adev);
+ if (drm_dev_enter(adev_to_drm(adev), &idx)) {
+
+ if (adev->mman.aper_base_kaddr)
+ iounmap(adev->mman.aper_base_kaddr);
+ adev->mman.aper_base_kaddr = NULL;
+
+ drm_dev_exit(idx);
+ }
+
amdgpu_vram_mgr_fini(adev);
amdgpu_gtt_mgr_fini(adev);
amdgpu_preempt_mgr_fini(adev);
@@ -1893,7 +1904,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
unsigned i;
int r;
- if (direct_submit && !ring->sched.ready) {
+ if (!direct_submit && !ring->sched.ready) {
DRM_ERROR("Trying to move memory with ring turned off.\n");
return -EINVAL;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index 7346ecff4438..f8f48be16d80 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -114,8 +114,8 @@ int amdgpu_vram_mgr_init(struct amdgpu_device *adev);
void amdgpu_vram_mgr_fini(struct amdgpu_device *adev);
bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_resource *mem);
-uint64_t amdgpu_gtt_mgr_usage(struct ttm_resource_manager *man);
-int amdgpu_gtt_mgr_recover(struct ttm_resource_manager *man);
+uint64_t amdgpu_gtt_mgr_usage(struct amdgpu_gtt_mgr *mgr);
+int amdgpu_gtt_mgr_recover(struct amdgpu_gtt_mgr *mgr);
uint64_t amdgpu_preempt_mgr_usage(struct ttm_resource_manager *man);
@@ -129,11 +129,11 @@ int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
void amdgpu_vram_mgr_free_sgt(struct device *dev,
enum dma_data_direction dir,
struct sg_table *sgt);
-uint64_t amdgpu_vram_mgr_usage(struct ttm_resource_manager *man);
-uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_resource_manager *man);
-int amdgpu_vram_mgr_reserve_range(struct ttm_resource_manager *man,
+uint64_t amdgpu_vram_mgr_usage(struct amdgpu_vram_mgr *mgr);
+uint64_t amdgpu_vram_mgr_vis_usage(struct amdgpu_vram_mgr *mgr);
+int amdgpu_vram_mgr_reserve_range(struct amdgpu_vram_mgr *mgr,
uint64_t start, uint64_t size);
-int amdgpu_vram_mgr_query_page_status(struct ttm_resource_manager *man,
+int amdgpu_vram_mgr_query_page_status(struct amdgpu_vram_mgr *mgr,
uint64_t start);
int amdgpu_ttm_init(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index f8e574cc0e22..07bc0f504713 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -553,7 +553,6 @@ static void amdgpu_virt_populate_vf2pf_ucode_info(struct amdgpu_device *adev)
static int amdgpu_virt_write_vf2pf_data(struct amdgpu_device *adev)
{
struct amd_sriov_msg_vf2pf_info *vf2pf_info;
- struct ttm_resource_manager *vram_man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
vf2pf_info = (struct amd_sriov_msg_vf2pf_info *) adev->virt.fw_reserve.p_vf2pf;
@@ -576,8 +575,8 @@ static int amdgpu_virt_write_vf2pf_data(struct amdgpu_device *adev)
vf2pf_info->driver_cert = 0;
vf2pf_info->os_info.all = 0;
- vf2pf_info->fb_usage = amdgpu_vram_mgr_usage(vram_man) >> 20;
- vf2pf_info->fb_vis_usage = amdgpu_vram_mgr_vis_usage(vram_man) >> 20;
+ vf2pf_info->fb_usage = amdgpu_vram_mgr_usage(&adev->mman.vram_mgr) >> 20;
+ vf2pf_info->fb_vis_usage = amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr) >> 20;
vf2pf_info->fb_size = adev->gmc.real_vram_size >> 20;
vf2pf_info->fb_vis_size = adev->gmc.visible_vram_size >> 20;
@@ -626,20 +625,20 @@ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
adev->virt.fw_reserve.p_vf2pf = NULL;
adev->virt.vf2pf_update_interval_ms = 0;
- if (adev->bios != NULL) {
- adev->virt.vf2pf_update_interval_ms = 2000;
+ if (adev->mman.fw_vram_usage_va != NULL) {
+ /* go through this logic in ip_init and reset to init workqueue*/
+ amdgpu_virt_exchange_data(adev);
+ INIT_DELAYED_WORK(&adev->virt.vf2pf_work, amdgpu_virt_update_vf2pf_work_item);
+ schedule_delayed_work(&(adev->virt.vf2pf_work), msecs_to_jiffies(adev->virt.vf2pf_update_interval_ms));
+ } else if (adev->bios != NULL) {
+ /* got through this logic in early init stage to get necessary flags, e.g. rlcg_acc related*/
adev->virt.fw_reserve.p_pf2vf =
(struct amd_sriov_msg_pf2vf_info_header *)
(adev->bios + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10));
amdgpu_virt_read_pf2vf_data(adev);
}
-
- if (adev->virt.vf2pf_update_interval_ms != 0) {
- INIT_DELAYED_WORK(&adev->virt.vf2pf_work, amdgpu_virt_update_vf2pf_work_item);
- schedule_delayed_work(&(adev->virt.vf2pf_work), msecs_to_jiffies(adev->virt.vf2pf_update_interval_ms));
- }
}
@@ -675,12 +674,6 @@ void amdgpu_virt_exchange_data(struct amdgpu_device *adev)
if (adev->virt.ras_init_done)
amdgpu_virt_add_bad_page(adev, bp_block_offset, bp_block_size);
}
- } else if (adev->bios != NULL) {
- adev->virt.fw_reserve.p_pf2vf =
- (struct amd_sriov_msg_pf2vf_info_header *)
- (adev->bios + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10));
-
- amdgpu_virt_read_pf2vf_data(adev);
}
}
@@ -727,6 +720,10 @@ void amdgpu_detect_virtualization(struct amdgpu_device *adev)
vi_set_virt_ops(adev);
break;
case CHIP_VEGA10:
+ soc15_set_virt_ops(adev);
+ /* send a dummy GPU_INIT_DATA request to host on vega10 */
+ amdgpu_virt_request_init_data(adev);
+ break;
case CHIP_VEGA20:
case CHIP_ARCTURUS:
case CHIP_ALDEBARAN:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
index 2dcc68e04e84..d99c8779b51e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
@@ -144,15 +144,16 @@ static void amdgpu_vkms_crtc_atomic_disable(struct drm_crtc *crtc,
static void amdgpu_vkms_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
+ unsigned long flags;
if (crtc->state->event) {
- spin_lock(&crtc->dev->event_lock);
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
if (drm_crtc_vblank_get(crtc) != 0)
drm_crtc_send_vblank_event(crtc, crtc->state->event);
else
drm_crtc_arm_vblank_event(crtc, crtc->state->event);
- spin_unlock(&crtc->dev->event_lock);
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
crtc->state->event = NULL;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index 7b2b0980ec41..7a2b487db57c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -96,10 +96,9 @@ static ssize_t amdgpu_mem_info_vram_used_show(struct device *dev,
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
- struct ttm_resource_manager *man;
- man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
- return sysfs_emit(buf, "%llu\n", amdgpu_vram_mgr_usage(man));
+ return sysfs_emit(buf, "%llu\n",
+ amdgpu_vram_mgr_usage(&adev->mman.vram_mgr));
}
/**
@@ -116,10 +115,9 @@ static ssize_t amdgpu_mem_info_vis_vram_used_show(struct device *dev,
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
- struct ttm_resource_manager *man;
- man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
- return sysfs_emit(buf, "%llu\n", amdgpu_vram_mgr_vis_usage(man));
+ return sysfs_emit(buf, "%llu\n",
+ amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr));
}
/**
@@ -263,16 +261,15 @@ static void amdgpu_vram_mgr_do_reserve(struct ttm_resource_manager *man)
/**
* amdgpu_vram_mgr_reserve_range - Reserve a range from VRAM
*
- * @man: TTM memory type manager
+ * @mgr: amdgpu_vram_mgr pointer
* @start: start address of the range in VRAM
* @size: size of the range
*
- * Reserve memory from start addess with the specified size in VRAM
+ * Reserve memory from start address with the specified size in VRAM
*/
-int amdgpu_vram_mgr_reserve_range(struct ttm_resource_manager *man,
+int amdgpu_vram_mgr_reserve_range(struct amdgpu_vram_mgr *mgr,
uint64_t start, uint64_t size)
{
- struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
struct amdgpu_vram_reservation *rsv;
rsv = kzalloc(sizeof(*rsv), GFP_KERNEL);
@@ -285,7 +282,7 @@ int amdgpu_vram_mgr_reserve_range(struct ttm_resource_manager *man,
spin_lock(&mgr->lock);
list_add_tail(&mgr->reservations_pending, &rsv->node);
- amdgpu_vram_mgr_do_reserve(man);
+ amdgpu_vram_mgr_do_reserve(&mgr->manager);
spin_unlock(&mgr->lock);
return 0;
@@ -294,7 +291,7 @@ int amdgpu_vram_mgr_reserve_range(struct ttm_resource_manager *man,
/**
* amdgpu_vram_mgr_query_page_status - query the reservation status
*
- * @man: TTM memory type manager
+ * @mgr: amdgpu_vram_mgr pointer
* @start: start address of a page in VRAM
*
* Returns:
@@ -302,10 +299,9 @@ int amdgpu_vram_mgr_reserve_range(struct ttm_resource_manager *man,
* 0: the page has been reserved
* -ENOENT: the input page is not a reservation
*/
-int amdgpu_vram_mgr_query_page_status(struct ttm_resource_manager *man,
+int amdgpu_vram_mgr_query_page_status(struct amdgpu_vram_mgr *mgr,
uint64_t start)
{
- struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
struct amdgpu_vram_reservation *rsv;
int ret;
@@ -632,28 +628,24 @@ void amdgpu_vram_mgr_free_sgt(struct device *dev,
/**
* amdgpu_vram_mgr_usage - how many bytes are used in this domain
*
- * @man: TTM memory type manager
+ * @mgr: amdgpu_vram_mgr pointer
*
* Returns how many bytes are used in this domain.
*/
-uint64_t amdgpu_vram_mgr_usage(struct ttm_resource_manager *man)
+uint64_t amdgpu_vram_mgr_usage(struct amdgpu_vram_mgr *mgr)
{
- struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
-
return atomic64_read(&mgr->usage);
}
/**
* amdgpu_vram_mgr_vis_usage - how many bytes are used in the visible part
*
- * @man: TTM memory type manager
+ * @mgr: amdgpu_vram_mgr pointer
*
* Returns how many bytes are used in the visible part of VRAM
*/
-uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_resource_manager *man)
+uint64_t amdgpu_vram_mgr_vis_usage(struct amdgpu_vram_mgr *mgr)
{
- struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
-
return atomic64_read(&mgr->vis_usage);
}
@@ -675,8 +667,8 @@ static void amdgpu_vram_mgr_debug(struct ttm_resource_manager *man,
spin_unlock(&mgr->lock);
drm_printf(printer, "man size:%llu pages, ram usage:%lluMB, vis usage:%lluMB\n",
- man->size, amdgpu_vram_mgr_usage(man) >> 20,
- amdgpu_vram_mgr_vis_usage(man) >> 20);
+ man->size, amdgpu_vram_mgr_usage(mgr) >> 20,
+ amdgpu_vram_mgr_vis_usage(mgr) >> 20);
}
static const struct ttm_resource_manager_func amdgpu_vram_mgr_func = {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
index a38c6a747fa4..e8b8f28c2f72 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
@@ -208,6 +208,7 @@ static struct attribute *amdgpu_xgmi_hive_attrs[] = {
&amdgpu_xgmi_hive_id,
NULL
};
+ATTRIBUTE_GROUPS(amdgpu_xgmi_hive);
static ssize_t amdgpu_xgmi_show_attrs(struct kobject *kobj,
struct attribute *attr, char *buf)
@@ -237,7 +238,7 @@ static const struct sysfs_ops amdgpu_xgmi_hive_ops = {
struct kobj_type amdgpu_xgmi_hive_type = {
.release = amdgpu_xgmi_hive_release,
.sysfs_ops = &amdgpu_xgmi_hive_ops,
- .default_attrs = amdgpu_xgmi_hive_attrs,
+ .default_groups = amdgpu_xgmi_hive_groups,
};
static ssize_t amdgpu_xgmi_show_device_id(struct device *dev,
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index 54f28c075f21..f10ce740a29c 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -1428,6 +1428,10 @@ static int cik_asic_reset(struct amdgpu_device *adev)
{
int r;
+ /* APUs don't have full asic reset */
+ if (adev->flags & AMD_IS_APU)
+ return 0;
+
if (cik_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
dev_info(adev->dev, "BACO reset\n");
r = amdgpu_dpm_baco_reset(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c
index b4eddf6e98a6..ff738e9725ee 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c
@@ -543,7 +543,9 @@ static void gfxhub_v2_1_utcl2_harvest(struct amdgpu_device *adev)
adev->gfx.config.max_sh_per_se *
adev->gfx.config.max_shader_engines);
- if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 3)) {
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(10, 3, 1):
+ case IP_VERSION(10, 3, 3):
/* Get SA disabled bitmap from eFuse setting */
efuse_setting = RREG32_SOC15(GC, 0, mmCC_GC_SA_UNIT_DISABLE);
efuse_setting &= CC_GC_SA_UNIT_DISABLE__SA_DISABLE_MASK;
@@ -566,6 +568,9 @@ static void gfxhub_v2_1_utcl2_harvest(struct amdgpu_device *adev)
disabled_sa = tmp;
WREG32_SOC15(GC, 0, mmGCUTCL2_HARVEST_BYPASS_GROUPS_YELLOW_CARP, disabled_sa);
+ break;
+ default:
+ break;
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index 3d5d47a799e3..a2f8ed0e6a64 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -989,7 +989,7 @@ static int gmc_v10_0_gart_enable(struct amdgpu_device *adev)
if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev))
goto skip_pin_bo;
- r = amdgpu_gart_table_vram_pin(adev);
+ r = amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
if (r)
return r;
@@ -1060,7 +1060,6 @@ static void gmc_v10_0_gart_disable(struct amdgpu_device *adev)
{
adev->gfxhub.funcs->gart_disable(adev);
adev->mmhub.funcs->gart_disable(adev);
- amdgpu_gart_table_vram_unpin(adev);
}
static int gmc_v10_0_hw_fini(void *handle)
@@ -1141,6 +1140,9 @@ static void gmc_v10_0_get_clockgating_state(void *handle, u32 *flags)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 3))
+ return;
+
adev->mmhub.funcs->get_clockgating(adev, flags);
if (adev->ip_versions[ATHUB_HWIP][0] >= IP_VERSION(2, 1, 0))
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index 0fe714f54cca..cd6c38e083d0 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -476,7 +476,7 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
return -EINVAL;
}
- r = amdgpu_gart_table_vram_pin(adev);
+ r = amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
if (r)
return r;
@@ -608,7 +608,6 @@ static void gmc_v6_0_gart_disable(struct amdgpu_device *adev)
WREG32(mmVM_L2_CNTL3,
VM_L2_CNTL3__L2_CACHE_BIGK_ASSOCIATIVITY_MASK |
(0UL << VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT));
- amdgpu_gart_table_vram_unpin(adev);
}
static void gmc_v6_0_vm_decode_fault(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 0a50fdaced7e..ab8adbff9e2d 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -620,7 +620,7 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev)
dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
return -EINVAL;
}
- r = amdgpu_gart_table_vram_pin(adev);
+ r = amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
if (r)
return r;
@@ -758,7 +758,6 @@ static void gmc_v7_0_gart_disable(struct amdgpu_device *adev)
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
WREG32(mmVM_L2_CNTL, tmp);
WREG32(mmVM_L2_CNTL2, 0);
- amdgpu_gart_table_vram_unpin(adev);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 63b890f1e8af..054733838292 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -844,7 +844,7 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev)
dev_err(adev->dev, "No VRAM object for PCIE GART.\n");
return -EINVAL;
}
- r = amdgpu_gart_table_vram_pin(adev);
+ r = amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
if (r)
return r;
@@ -999,7 +999,6 @@ static void gmc_v8_0_gart_disable(struct amdgpu_device *adev)
tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
WREG32(mmVM_L2_CNTL, tmp);
WREG32(mmVM_L2_CNTL2, 0);
- amdgpu_gart_table_vram_unpin(adev);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 57f2729a7bd0..88c1eb9ad068 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -72,6 +72,9 @@
#define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0 0x049d
#define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0_BASE_IDX 2
+#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_DCN2 0x05ea
+#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_DCN2_BASE_IDX 2
+
static const char *gfxhub_client_ids[] = {
"CB",
@@ -1134,6 +1137,8 @@ static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
u32 d1vga_control = RREG32_SOC15(DCE, 0, mmD1VGA_CONTROL);
unsigned size;
+ /* TODO move to DC so GMC doesn't need to hard-code DCN registers */
+
if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) {
size = AMDGPU_VBIOS_VGA_ALLOCATION;
} else {
@@ -1142,7 +1147,6 @@ static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
switch (adev->ip_versions[DCE_HWIP][0]) {
case IP_VERSION(1, 0, 0):
case IP_VERSION(1, 0, 1):
- case IP_VERSION(2, 1, 0):
viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
size = (REG_GET_FIELD(viewport,
HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
@@ -1150,6 +1154,14 @@ static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) *
4);
break;
+ case IP_VERSION(2, 1, 0):
+ viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_DCN2);
+ size = (REG_GET_FIELD(viewport,
+ HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
+ REG_GET_FIELD(viewport,
+ HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) *
+ 4);
+ break;
default:
viewport = RREG32_SOC15(DCE, 0, mmSCL0_VIEWPORT_SIZE);
size = (REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
@@ -1743,7 +1755,7 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev))
goto skip_pin_bo;
- r = amdgpu_gart_table_vram_pin(adev);
+ r = amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr);
if (r)
return r;
@@ -1821,7 +1833,6 @@ static void gmc_v9_0_gart_disable(struct amdgpu_device *adev)
{
adev->gfxhub.funcs->gart_disable(adev);
adev->mmhub.funcs->gart_disable(adev);
- amdgpu_gart_table_vram_unpin(adev);
}
static int gmc_v9_0_hw_fini(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
index 0077e738db31..56da5ab82987 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
@@ -180,6 +180,11 @@ static int xgpu_ai_send_access_requests(struct amdgpu_device *adev,
RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW2));
}
+ } else if (req == IDH_REQ_GPU_INIT_DATA){
+ /* Dummy REQ_GPU_INIT_DATA handling */
+ r = xgpu_ai_poll_msg(adev, IDH_REQ_GPU_INIT_DATA_READY);
+ /* version set to 0 since dummy */
+ adev->virt.req_init_data_ver = 0;
}
return 0;
@@ -381,10 +386,16 @@ void xgpu_ai_mailbox_put_irq(struct amdgpu_device *adev)
amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
}
+static int xgpu_ai_request_init_data(struct amdgpu_device *adev)
+{
+ return xgpu_ai_send_access_requests(adev, IDH_REQ_GPU_INIT_DATA);
+}
+
const struct amdgpu_virt_ops xgpu_ai_virt_ops = {
.req_full_gpu = xgpu_ai_request_full_gpu_access,
.rel_full_gpu = xgpu_ai_release_full_gpu_access,
.reset_gpu = xgpu_ai_request_reset,
.wait_reset = NULL,
.trans_msg = xgpu_ai_mailbox_trans_msg,
+ .req_init_data = xgpu_ai_request_init_data,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
index f9aa4d0bb638..fa7e13e0459e 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
@@ -35,6 +35,7 @@ enum idh_request {
IDH_REQ_GPU_FINI_ACCESS,
IDH_REL_GPU_FINI_ACCESS,
IDH_REQ_GPU_RESET_ACCESS,
+ IDH_REQ_GPU_INIT_DATA,
IDH_LOG_VF_ERROR = 200,
IDH_READY_TO_RESET = 201,
@@ -48,6 +49,7 @@ enum idh_event {
IDH_SUCCESS,
IDH_FAIL,
IDH_QUERY_ALIVE,
+ IDH_REQ_GPU_INIT_DATA_READY,
IDH_TEXT_MESSAGE = 255,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index e8e4749e9c79..f0638db57111 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -2057,6 +2057,10 @@ static int sdma_v4_0_suspend(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ /* SMU saves SDMA state for us */
+ if (adev->in_s0ix)
+ return 0;
+
return sdma_v4_0_hw_fini(adev);
}
@@ -2064,6 +2068,10 @@ static int sdma_v4_0_resume(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ /* SMU restores SDMA state for us */
+ if (adev->in_s0ix)
+ return 0;
+
return sdma_v4_0_hw_init(adev);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index fe9a7cc8d9eb..6645ebbd2696 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -956,6 +956,10 @@ static int vi_asic_reset(struct amdgpu_device *adev)
{
int r;
+ /* APUs don't have full asic reset */
+ if (adev->flags & AMD_IS_APU)
+ return 0;
+
if (vi_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
dev_info(adev->dev, "BACO reset\n");
r = amdgpu_dpm_baco_reset(adev);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
index f187596faf66..9624bbe8b501 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
@@ -1060,6 +1060,9 @@ static int kfd_parse_subtype_iolink(struct crat_subtype_iolink *iolink,
return -ENODEV;
/* same everything but the other direction */
props2 = kmemdup(props, sizeof(*props2), GFP_KERNEL);
+ if (!props2)
+ return -ENOMEM;
+
props2->node_from = id_to;
props2->node_to = id_from;
props2->kobj = NULL;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 127d41d0e4f0..2b65d0acae2c 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -68,20 +68,20 @@ static void kfd_device_info_set_sdma_queue_num(struct kfd_dev *kfd)
case IP_VERSION(4, 0, 1):/* VEGA12 */
case IP_VERSION(4, 1, 0):/* RAVEN */
case IP_VERSION(4, 1, 1):/* RAVEN */
- case IP_VERSION(4, 1, 2):/* RENIOR */
+ case IP_VERSION(4, 1, 2):/* RENOIR */
case IP_VERSION(5, 2, 1):/* VANGOGH */
case IP_VERSION(5, 2, 3):/* YELLOW_CARP */
kfd->device_info.num_sdma_queues_per_engine = 2;
break;
case IP_VERSION(4, 2, 0):/* VEGA20 */
- case IP_VERSION(4, 2, 2):/* ARCTUTUS */
+ case IP_VERSION(4, 2, 2):/* ARCTURUS */
case IP_VERSION(4, 4, 0):/* ALDEBARAN */
case IP_VERSION(5, 0, 0):/* NAVI10 */
case IP_VERSION(5, 0, 1):/* CYAN_SKILLFISH */
case IP_VERSION(5, 0, 2):/* NAVI14 */
case IP_VERSION(5, 0, 5):/* NAVI12 */
case IP_VERSION(5, 2, 0):/* SIENNA_CICHLID */
- case IP_VERSION(5, 2, 2):/* NAVY_FLOUDER */
+ case IP_VERSION(5, 2, 2):/* NAVY_FLOUNDER */
case IP_VERSION(5, 2, 4):/* DIMGREY_CAVEFISH */
case IP_VERSION(5, 2, 5):/* BEIGE_GOBY */
kfd->device_info.num_sdma_queues_per_engine = 8;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 19890e350107..4b6814949aad 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -1004,14 +1004,17 @@ static void uninitialize(struct device_queue_manager *dqm)
static int start_nocpsch(struct device_queue_manager *dqm)
{
+ int r = 0;
+
pr_info("SW scheduler is used");
init_interrupts(dqm);
if (dqm->dev->adev->asic_type == CHIP_HAWAII)
- return pm_init(&dqm->packet_mgr, dqm);
- dqm->sched_running = true;
+ r = pm_init(&dqm->packet_mgr, dqm);
+ if (!r)
+ dqm->sched_running = true;
- return 0;
+ return r;
}
static int stop_nocpsch(struct device_queue_manager *dqm)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
index b8ac28fb1231..e8bc28009c22 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
@@ -197,6 +197,7 @@ static bool event_interrupt_isr_v9(struct kfd_dev *dev,
*/
return source_id == SOC15_INTSRC_CP_END_OF_PIPE ||
source_id == SOC15_INTSRC_SDMA_TRAP ||
+ source_id == SOC15_INTSRC_SDMA_ECC ||
source_id == SOC15_INTSRC_SQ_INTERRUPT_MSG ||
source_id == SOC15_INTSRC_CP_BAD_OPCODE ||
((client_id == SOC15_IH_CLIENTID_VMC ||
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index f1930ff2c74a..d1145da5348f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -461,6 +461,7 @@ static struct attribute *procfs_queue_attrs[] = {
&attr_queue_gpuid,
NULL
};
+ATTRIBUTE_GROUPS(procfs_queue);
static const struct sysfs_ops procfs_queue_ops = {
.show = kfd_procfs_queue_show,
@@ -468,7 +469,7 @@ static const struct sysfs_ops procfs_queue_ops = {
static struct kobj_type procfs_queue_type = {
.sysfs_ops = &procfs_queue_ops,
- .default_attrs = procfs_queue_attrs,
+ .default_groups = procfs_queue_groups,
};
static const struct sysfs_ops procfs_stats_ops = {
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index aa5ee91cd595..f2805ba74c80 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -107,7 +107,7 @@ static void svm_range_add_to_svms(struct svm_range *prange)
pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx]\n", prange->svms,
prange, prange->start, prange->last);
- list_add_tail(&prange->list, &prange->svms->list);
+ list_move_tail(&prange->list, &prange->svms->list);
prange->it_node.start = prange->start;
prange->it_node.last = prange->last;
interval_tree_insert(&prange->it_node, &prange->svms->objects);
@@ -295,8 +295,6 @@ svm_range *svm_range_new(struct svm_range_list *svms, uint64_t start,
prange->last = last;
INIT_LIST_HEAD(&prange->list);
INIT_LIST_HEAD(&prange->update_list);
- INIT_LIST_HEAD(&prange->remove_list);
- INIT_LIST_HEAD(&prange->insert_list);
INIT_LIST_HEAD(&prange->svm_bo_list);
INIT_LIST_HEAD(&prange->deferred_list);
INIT_LIST_HEAD(&prange->child_list);
@@ -1018,7 +1016,7 @@ svm_range_split_tail(struct svm_range *prange,
int r = svm_range_split(prange, prange->start, new_last, &tail);
if (!r)
- list_add(&tail->insert_list, insert_list);
+ list_add(&tail->list, insert_list);
return r;
}
@@ -1030,7 +1028,7 @@ svm_range_split_head(struct svm_range *prange,
int r = svm_range_split(prange, new_start, prange->last, &head);
if (!r)
- list_add(&head->insert_list, insert_list);
+ list_add(&head->list, insert_list);
return r;
}
@@ -1898,8 +1896,8 @@ svm_range_add(struct kfd_process *p, uint64_t start, uint64_t size,
goto out;
}
- list_add(&old->remove_list, remove_list);
- list_add(&prange->insert_list, insert_list);
+ list_add(&old->update_list, remove_list);
+ list_add(&prange->list, insert_list);
list_add(&prange->update_list, update_list);
if (node->start < start) {
@@ -1931,7 +1929,7 @@ svm_range_add(struct kfd_process *p, uint64_t start, uint64_t size,
goto out;
}
- list_add(&prange->insert_list, insert_list);
+ list_add(&prange->list, insert_list);
list_add(&prange->update_list, update_list);
}
@@ -1946,13 +1944,13 @@ svm_range_add(struct kfd_process *p, uint64_t start, uint64_t size,
r = -ENOMEM;
goto out;
}
- list_add(&prange->insert_list, insert_list);
+ list_add(&prange->list, insert_list);
list_add(&prange->update_list, update_list);
}
out:
if (r)
- list_for_each_entry_safe(prange, tmp, insert_list, insert_list)
+ list_for_each_entry_safe(prange, tmp, insert_list, list)
svm_range_free(prange);
return r;
@@ -3236,7 +3234,7 @@ svm_range_set_attr(struct kfd_process *p, uint64_t start, uint64_t size,
goto out;
}
/* Apply changes as a transaction */
- list_for_each_entry_safe(prange, next, &insert_list, insert_list) {
+ list_for_each_entry_safe(prange, next, &insert_list, list) {
svm_range_add_to_svms(prange);
svm_range_add_notifier_locked(mm, prange);
}
@@ -3244,8 +3242,7 @@ svm_range_set_attr(struct kfd_process *p, uint64_t start, uint64_t size,
svm_range_apply_attrs(p, prange, nattr, attrs);
/* TODO: unmap ranges from GPU that lost access */
}
- list_for_each_entry_safe(prange, next, &remove_list,
- remove_list) {
+ list_for_each_entry_safe(prange, next, &remove_list, update_list) {
pr_debug("unlink old 0x%p prange 0x%p [0x%lx 0x%lx]\n",
prange->svms, prange, prange->start,
prange->last);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
index 2f8a95e86dcb..949b477e2f4c 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
@@ -76,8 +76,6 @@ struct svm_work_list_item {
* aligned, page size is (last - start + 1)
* @list: link list node, used to scan all ranges of svms
* @update_list:link list node used to add to update_list
- * @remove_list:link list node used to add to remove list
- * @insert_list:link list node used to add to insert list
* @mapping: bo_va mapping structure to create and update GPU page table
* @npages: number of pages
* @dma_addr: dma mapping address on each GPU for system memory physical page
@@ -113,8 +111,6 @@ struct svm_range {
struct interval_tree_node it_node;
struct list_head list;
struct list_head update_list;
- struct list_head remove_list;
- struct list_head insert_list;
uint64_t npages;
dma_addr_t *dma_addr[MAX_GPU_INSTANCE];
struct ttm_resource *ttm_res;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 2f0b14f8f833..7c1c623ba799 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -658,7 +658,7 @@ static void dmub_hpd_callback(struct amdgpu_device *adev,
struct drm_connector_list_iter iter;
struct dc_link *link;
uint8_t link_index = 0;
- struct drm_device *dev = adev->dm.ddev;
+ struct drm_device *dev;
if (adev == NULL)
return;
@@ -675,6 +675,7 @@ static void dmub_hpd_callback(struct amdgpu_device *adev,
link_index = notify->link_index;
link = adev->dm.dc->links[link_index];
+ dev = adev->dm.ddev;
drm_connector_list_iter_begin(dev, &iter);
drm_for_each_connector_iter(connector, &iter) {
@@ -1161,6 +1162,32 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
return 0;
}
+static void dm_dmub_hw_resume(struct amdgpu_device *adev)
+{
+ struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
+ enum dmub_status status;
+ bool init;
+
+ if (!dmub_srv) {
+ /* DMUB isn't supported on the ASIC. */
+ return;
+ }
+
+ status = dmub_srv_is_hw_init(dmub_srv, &init);
+ if (status != DMUB_STATUS_OK)
+ DRM_WARN("DMUB hardware init check failed: %d\n", status);
+
+ if (status == DMUB_STATUS_OK && init) {
+ /* Wait for firmware load to finish. */
+ status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
+ if (status != DMUB_STATUS_OK)
+ DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
+ } else {
+ /* Perform the full hardware initialization. */
+ dm_dmub_hw_init(adev);
+ }
+}
+
#if defined(CONFIG_DRM_AMD_DC_DCN)
static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
{
@@ -2637,9 +2664,7 @@ static int dm_resume(void *handle)
amdgpu_dm_outbox_init(adev);
/* Before powering on DC we need to re-initialize DMUB. */
- r = dm_dmub_hw_init(adev);
- if (r)
- DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
+ dm_dmub_hw_resume(adev);
/* power on hardware */
dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
@@ -3628,7 +3653,7 @@ static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
/* Use GRPH_PFLIP interrupt */
for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
- i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
+ i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + dc->caps.max_otg_num - 1;
i++) {
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
if (r) {
@@ -6073,6 +6098,7 @@ static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
struct dsc_dec_dpcd_caps *dsc_caps)
{
stream->timing.flags.DSC = 0;
+ dsc_caps->is_dsc_supported = false;
if (aconnector->dc_link && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
sink->sink_signal == SIGNAL_TYPE_EDP)) {
@@ -10737,6 +10763,8 @@ static int dm_update_plane_state(struct dc *dc,
dm_new_plane_state->dc_state = dc_new_plane_state;
+ dm_new_crtc_state->mpo_requested |= (plane->type == DRM_PLANE_TYPE_OVERLAY);
+
/* Tell DC to do a full surface update every time there
* is a plane change. Inefficient, but works for now.
*/
@@ -10889,7 +10917,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
enum dc_status status;
int ret, i;
bool lock_and_validation_needed = false;
- struct dm_crtc_state *dm_old_crtc_state;
+ struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
#if defined(CONFIG_DRM_AMD_DC_DCN)
struct dsc_mst_fairness_vars vars[MAX_PIPES];
struct drm_dp_mst_topology_state *mst_state;
@@ -11071,6 +11099,12 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
goto fail;
}
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
+ dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
+ if (dm_new_crtc_state->mpo_requested)
+ DRM_DEBUG_DRIVER("MPO enablement requested on crtc:[%p]\n", crtc);
+ }
+
/* Check cursor planes scaling */
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index c98e402eab0c..b9a69b0cef23 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -626,6 +626,8 @@ struct dm_crtc_state {
bool cm_has_degamma;
bool cm_is_degamma_srgb;
+ bool mpo_requested;
+
int update_type;
int active_planes;
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
index ff5bb152ef49..e6ef36de0825 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
@@ -2033,10 +2033,10 @@ static void calculate_bandwidth(
kfree(surface_type);
free_tiling_mode:
kfree(tiling_mode);
-free_yclk:
- kfree(yclk);
free_sclk:
kfree(sclk);
+free_yclk:
+ kfree(yclk);
}
/*******************************************************************************
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
index ec19678a0702..e447c74be713 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
@@ -503,7 +503,6 @@ static void dcn_bw_calc_rq_dlg_ttu(
//input[in_idx].dout.output_standard;
/*todo: soc->sr_enter_plus_exit_time??*/
- dlg_sys_param->t_srx_delay_us = dc->dcn_ip->dcfclk_cstate_latency / v->dcf_clk_deep_sleep;
dml1_rq_dlg_get_rq_params(dml, rq_param, &input->pipe.src);
dml1_extract_rq_regs(dml, rq_regs, rq_param);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
index 9f35f2e8f971..cac80ba69072 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
@@ -38,7 +38,6 @@
#include "clk/clk_11_0_0_offset.h"
#include "clk/clk_11_0_0_sh_mask.h"
-#include "irq/dcn20/irq_service_dcn20.h"
#undef FN
#define FN(reg_name, field_name) \
@@ -223,8 +222,6 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base,
bool force_reset = false;
bool p_state_change_support;
int total_plane_count;
- int irq_src;
- uint32_t hpd_state;
if (dc->work_arounds.skip_clock_update)
return;
@@ -242,13 +239,7 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base,
if (dc->res_pool->pp_smu)
pp_smu = &dc->res_pool->pp_smu->nv_funcs;
- for (irq_src = DC_IRQ_SOURCE_HPD1; irq_src <= DC_IRQ_SOURCE_HPD6; irq_src++) {
- hpd_state = dc_get_hpd_state_dcn20(dc->res_pool->irqs, irq_src);
- if (hpd_state)
- break;
- }
-
- if (display_count == 0 && !hpd_state)
+ if (display_count == 0)
enter_display_off = true;
if (enter_display_off == safe_to_lower) {
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
index fbda42313bfe..f4dee0e48a67 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
@@ -42,7 +42,6 @@
#include "clk/clk_10_0_2_sh_mask.h"
#include "renoir_ip_offset.h"
-#include "irq/dcn21/irq_service_dcn21.h"
/* Constants */
@@ -129,11 +128,9 @@ static void rn_update_clocks(struct clk_mgr *clk_mgr_base,
struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk;
struct dc *dc = clk_mgr_base->ctx->dc;
int display_count;
- int irq_src;
bool update_dppclk = false;
bool update_dispclk = false;
bool dpp_clock_lowered = false;
- uint32_t hpd_state;
struct dmcu *dmcu = clk_mgr_base->ctx->dc->res_pool->dmcu;
@@ -150,14 +147,8 @@ static void rn_update_clocks(struct clk_mgr *clk_mgr_base,
display_count = rn_get_active_display_cnt_wa(dc, context);
- for (irq_src = DC_IRQ_SOURCE_HPD1; irq_src <= DC_IRQ_SOURCE_HPD5; irq_src++) {
- hpd_state = dc_get_hpd_state_dcn21(dc->res_pool->irqs, irq_src);
- if (hpd_state)
- break;
- }
-
/* if we can go lower, go lower */
- if (display_count == 0 && !hpd_state) {
+ if (display_count == 0) {
rn_vbios_smu_set_dcn_low_power_state(clk_mgr, DCN_PWR_STATE_LOW_POWER);
/* update power state */
clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_LOW_POWER;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
index 48005def1164..bc4ddc36fe58 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
@@ -570,32 +570,32 @@ static struct wm_table lpddr5_wm_table = {
.wm_inst = WM_A,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
- .sr_exit_time_us = 7.95,
- .sr_enter_plus_exit_time_us = 9,
+ .sr_exit_time_us = 13.5,
+ .sr_enter_plus_exit_time_us = 16.5,
.valid = true,
},
{
.wm_inst = WM_B,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
- .sr_exit_time_us = 9.82,
- .sr_enter_plus_exit_time_us = 11.196,
+ .sr_exit_time_us = 13.5,
+ .sr_enter_plus_exit_time_us = 16.5,
.valid = true,
},
{
.wm_inst = WM_C,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
- .sr_exit_time_us = 9.89,
- .sr_enter_plus_exit_time_us = 11.24,
+ .sr_exit_time_us = 13.5,
+ .sr_enter_plus_exit_time_us = 16.5,
.valid = true,
},
{
.wm_inst = WM_D,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
- .sr_exit_time_us = 9.748,
- .sr_enter_plus_exit_time_us = 11.102,
+ .sr_exit_time_us = 13.5,
+ .sr_enter_plus_exit_time_us = 16.5,
.valid = true,
},
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
index 4162ce40089b..9d17c5a5ae01 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
@@ -329,38 +329,38 @@ static struct clk_bw_params dcn31_bw_params = {
};
-static struct wm_table ddr4_wm_table = {
+static struct wm_table ddr5_wm_table = {
.entries = {
{
.wm_inst = WM_A,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
- .sr_exit_time_us = 6.09,
- .sr_enter_plus_exit_time_us = 7.14,
+ .sr_exit_time_us = 9,
+ .sr_enter_plus_exit_time_us = 11,
.valid = true,
},
{
.wm_inst = WM_B,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
- .sr_exit_time_us = 10.12,
- .sr_enter_plus_exit_time_us = 11.48,
+ .sr_exit_time_us = 9,
+ .sr_enter_plus_exit_time_us = 11,
.valid = true,
},
{
.wm_inst = WM_C,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
- .sr_exit_time_us = 10.12,
- .sr_enter_plus_exit_time_us = 11.48,
+ .sr_exit_time_us = 9,
+ .sr_enter_plus_exit_time_us = 11,
.valid = true,
},
{
.wm_inst = WM_D,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
- .sr_exit_time_us = 10.12,
- .sr_enter_plus_exit_time_us = 11.48,
+ .sr_exit_time_us = 9,
+ .sr_enter_plus_exit_time_us = 11,
.valid = true,
},
}
@@ -687,7 +687,7 @@ void dcn31_clk_mgr_construct(
if (ctx->dc_bios->integrated_info->memory_type == LpDdr5MemType) {
dcn31_bw_params.wm_table = lpddr5_wm_table;
} else {
- dcn31_bw_params.wm_table = ddr4_wm_table;
+ dcn31_bw_params.wm_table = ddr5_wm_table;
}
/* Saved clocks configured at boot for debug purposes */
dcn31_dump_clk_registers(&clk_mgr->base.base.boot_snapshot, &clk_mgr->base.base, &log_info);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c
index b7ace235a2d5..de3f4643eeef 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c
@@ -119,6 +119,16 @@ static int dcn31_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr,
result = dcn31_smu_wait_for_response(clk_mgr, 10, 200000);
+ if (result == VBIOSSMC_Result_Failed) {
+ if (msg_id == VBIOSSMC_MSG_TransferTableDram2Smu &&
+ param == TABLE_WATERMARKS)
+ DC_LOG_WARNING("Watermarks table not configured properly by SMU");
+ else
+ ASSERT(0);
+ REG_WRITE(MP1_SMN_C2PMSG_91, VBIOSSMC_Result_OK);
+ return -1;
+ }
+
if (IS_SMU_TIMEOUT(result)) {
ASSERT(0);
dm_helpers_smu_timeout(CTX, msg_id, param, 10 * 200000);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 01c8849b9db2..d18e9f3ea998 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -1220,6 +1220,8 @@ struct dc *dc_create(const struct dc_init_data *init_params)
dc->caps.max_dp_protocol_version = DP_VERSION_1_4;
+ dc->caps.max_otg_num = dc->res_pool->res_cap->num_timing_generator;
+
if (dc->res_pool->dmcu != NULL)
dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version;
}
@@ -1404,20 +1406,34 @@ static void program_timing_sync(
status->timing_sync_info.master = false;
}
- /* remove any other unblanked pipes as they have already been synced */
- for (j = j + 1; j < group_size; j++) {
- bool is_blanked;
- if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked)
- is_blanked =
- pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp);
- else
- is_blanked =
- pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg);
- if (!is_blanked) {
- group_size--;
- pipe_set[j] = pipe_set[group_size];
- j--;
+ /* remove any other pipes that are already been synced */
+ if (dc->config.use_pipe_ctx_sync_logic) {
+ /* check pipe's syncd to decide which pipe to be removed */
+ for (j = 1; j < group_size; j++) {
+ if (pipe_set[j]->pipe_idx_syncd == pipe_set[0]->pipe_idx_syncd) {
+ group_size--;
+ pipe_set[j] = pipe_set[group_size];
+ j--;
+ } else
+ /* link slave pipe's syncd with master pipe */
+ pipe_set[j]->pipe_idx_syncd = pipe_set[0]->pipe_idx_syncd;
+ }
+ } else {
+ for (j = j + 1; j < group_size; j++) {
+ bool is_blanked;
+
+ if (pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked)
+ is_blanked =
+ pipe_set[j]->stream_res.opp->funcs->dpg_is_blanked(pipe_set[j]->stream_res.opp);
+ else
+ is_blanked =
+ pipe_set[j]->stream_res.tg->funcs->is_blanked(pipe_set[j]->stream_res.tg);
+ if (!is_blanked) {
+ group_size--;
+ pipe_set[j] = pipe_set[group_size];
+ j--;
+ }
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index dc1380b6c5e0..b5e570d33ca9 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -3971,102 +3971,73 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off)
{
struct cp_psp *cp_psp = &pipe_ctx->stream->ctx->cp_psp;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
struct link_encoder *link_enc = NULL;
-#endif
+ struct cp_psp_stream_config config = {0};
+ enum dp_panel_mode panel_mode =
+ dp_get_panel_mode(pipe_ctx->stream->link);
- if (cp_psp && cp_psp->funcs.update_stream_config) {
- struct cp_psp_stream_config config = {0};
- enum dp_panel_mode panel_mode =
- dp_get_panel_mode(pipe_ctx->stream->link);
+ if (cp_psp == NULL || cp_psp->funcs.update_stream_config == NULL)
+ return;
- config.otg_inst = (uint8_t) pipe_ctx->stream_res.tg->inst;
- /*stream_enc_inst*/
- config.dig_fe = (uint8_t) pipe_ctx->stream_res.stream_enc->stream_enc_inst;
- config.dig_be = pipe_ctx->stream->link->link_enc_hw_inst;
-#if defined(CONFIG_DRM_AMD_DC_DCN)
- config.stream_enc_idx = pipe_ctx->stream_res.stream_enc->id - ENGINE_ID_DIGA;
-
- if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_PHY ||
- pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
- if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_PHY)
- link_enc = pipe_ctx->stream->link->link_enc;
- else if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
- if (pipe_ctx->stream->link->dc->res_pool->funcs->link_encs_assign) {
- link_enc = link_enc_cfg_get_link_enc_used_by_stream(
- pipe_ctx->stream->ctx->dc,
- pipe_ctx->stream);
- }
- ASSERT(link_enc);
+ if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_PHY)
+ link_enc = pipe_ctx->stream->link->link_enc;
+ else if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA &&
+ pipe_ctx->stream->link->dc->res_pool->funcs->link_encs_assign)
+ link_enc = link_enc_cfg_get_link_enc_used_by_stream(
+ pipe_ctx->stream->ctx->dc,
+ pipe_ctx->stream);
+ ASSERT(link_enc);
+ if (link_enc == NULL)
+ return;
- // Initialize PHY ID with ABCDE - 01234 mapping except when it is B0
- config.phy_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A;
+ /* otg instance */
+ config.otg_inst = (uint8_t) pipe_ctx->stream_res.tg->inst;
- // Add flag to guard new A0 DIG mapping
- if (pipe_ctx->stream->ctx->dc->enable_c20_dtm_b0 == true &&
- pipe_ctx->stream->link->dc->ctx->dce_version == DCN_VERSION_3_1) {
- config.dig_be = link_enc->preferred_engine;
- config.dio_output_type = pipe_ctx->stream->link->ep_type;
- config.dio_output_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A;
- } else {
- config.dio_output_type = 0;
- config.dio_output_idx = 0;
- }
+ /* dig front end */
+ config.dig_fe = (uint8_t) pipe_ctx->stream_res.stream_enc->stream_enc_inst;
- // Add flag to guard B0 implementation
- if (pipe_ctx->stream->ctx->dc->enable_c20_dtm_b0 == true &&
- link_enc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0) {
- if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
- // enum ID 1-4 maps to DPIA PHY ID 0-3
- config.phy_idx = pipe_ctx->stream->link->link_id.enum_id - ENUM_ID_1;
- } else { // for non DPIA mode over B0, ABCDE maps to 01564
-
- switch (link_enc->transmitter) {
- case TRANSMITTER_UNIPHY_A:
- config.phy_idx = 0;
- break;
- case TRANSMITTER_UNIPHY_B:
- config.phy_idx = 1;
- break;
- case TRANSMITTER_UNIPHY_C:
- config.phy_idx = 5;
- break;
- case TRANSMITTER_UNIPHY_D:
- config.phy_idx = 6;
- break;
- case TRANSMITTER_UNIPHY_E:
- config.phy_idx = 4;
- break;
- default:
- config.phy_idx = 0;
- break;
- }
+ /* stream encoder index */
+ config.stream_enc_idx = pipe_ctx->stream_res.stream_enc->id - ENGINE_ID_DIGA;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (is_dp_128b_132b_signal(pipe_ctx))
+ config.stream_enc_idx =
+ pipe_ctx->stream_res.hpo_dp_stream_enc->id - ENGINE_ID_HPO_DP_0;
+#endif
- }
- }
- } else if (pipe_ctx->stream->link->dc->res_pool->funcs->link_encs_assign) {
- link_enc = link_enc_cfg_get_link_enc_used_by_stream(
- pipe_ctx->stream->ctx->dc,
- pipe_ctx->stream);
- config.phy_idx = 0; /* Clear phy_idx for non-physical display endpoints. */
- }
- ASSERT(link_enc);
- if (link_enc)
- config.link_enc_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A;
- if (is_dp_128b_132b_signal(pipe_ctx)) {
- config.stream_enc_idx = pipe_ctx->stream_res.hpo_dp_stream_enc->id - ENGINE_ID_HPO_DP_0;
+ /* dig back end */
+ config.dig_be = pipe_ctx->stream->link->link_enc_hw_inst;
- config.link_enc_idx = pipe_ctx->link_res.hpo_dp_link_enc->inst;
- config.dp2_enabled = 1;
- }
+ /* link encoder index */
+ config.link_enc_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (is_dp_128b_132b_signal(pipe_ctx))
+ config.link_enc_idx = pipe_ctx->link_res.hpo_dp_link_enc->inst;
#endif
- config.dpms_off = dpms_off;
- config.dm_stream_ctx = pipe_ctx->stream->dm_stream_context;
- config.assr_enabled = (panel_mode == DP_PANEL_MODE_EDP);
- config.mst_enabled = (pipe_ctx->stream->signal ==
- SIGNAL_TYPE_DISPLAY_PORT_MST);
- cp_psp->funcs.update_stream_config(cp_psp->handle, &config);
- }
+ /* dio output index */
+ config.dio_output_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A;
+
+ /* phy index */
+ config.phy_idx = resource_transmitter_to_phy_idx(
+ pipe_ctx->stream->link->dc, link_enc->transmitter);
+ if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
+ /* USB4 DPIA doesn't use PHY in our soc, initialize it to 0 */
+ config.phy_idx = 0;
+
+ /* stream properties */
+ config.assr_enabled = (panel_mode == DP_PANEL_MODE_EDP) ? 1 : 0;
+ config.mst_enabled = (pipe_ctx->stream->signal ==
+ SIGNAL_TYPE_DISPLAY_PORT_MST) ? 1 : 0;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ config.dp2_enabled = is_dp_128b_132b_signal(pipe_ctx) ? 1 : 0;
+#endif
+ config.usb4_enabled = (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) ?
+ 1 : 0;
+ config.dpms_off = dpms_off;
+
+ /* dm stream context */
+ config.dm_stream_ctx = pipe_ctx->stream->dm_stream_context;
+
+ cp_psp->funcs.update_stream_config(cp_psp->handle, &config);
}
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 05e216524370..61b8f29a0c30 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -202,7 +202,7 @@ void dp_wait_for_training_aux_rd_interval(
uint32_t wait_in_micro_secs)
{
#if defined(CONFIG_DRM_AMD_DC_DCN)
- if (wait_in_micro_secs > 16000)
+ if (wait_in_micro_secs > 1000)
msleep(wait_in_micro_secs/1000);
else
udelay(wait_in_micro_secs);
@@ -5597,6 +5597,26 @@ static bool retrieve_link_cap(struct dc_link *link)
dp_hw_fw_revision.ieee_fw_rev,
sizeof(dp_hw_fw_revision.ieee_fw_rev));
+ /* Quirk for Apple MBP 2018 15" Retina panels: wrong DP_MAX_LINK_RATE */
+ {
+ uint8_t str_mbp_2018[] = { 101, 68, 21, 103, 98, 97 };
+ uint8_t fwrev_mbp_2018[] = { 7, 4 };
+ uint8_t fwrev_mbp_2018_vega[] = { 8, 4 };
+
+ /* We also check for the firmware revision as 16,1 models have an
+ * identical device id and are incorrectly quirked otherwise.
+ */
+ if ((link->dpcd_caps.sink_dev_id == 0x0010fa) &&
+ !memcmp(link->dpcd_caps.sink_dev_id_str, str_mbp_2018,
+ sizeof(str_mbp_2018)) &&
+ (!memcmp(link->dpcd_caps.sink_fw_revision, fwrev_mbp_2018,
+ sizeof(fwrev_mbp_2018)) ||
+ !memcmp(link->dpcd_caps.sink_fw_revision, fwrev_mbp_2018_vega,
+ sizeof(fwrev_mbp_2018_vega)))) {
+ link->reported_link_cap.link_rate = LINK_RATE_RBR2;
+ }
+ }
+
memset(&link->dpcd_caps.dsc_caps, '\0',
sizeof(link->dpcd_caps.dsc_caps));
memset(&link->dpcd_caps.fec_cap, '\0', sizeof(link->dpcd_caps.fec_cap));
@@ -6935,7 +6955,7 @@ bool dpcd_write_128b_132b_sst_payload_allocation_table(
}
}
retries++;
- udelay(5000);
+ msleep(5);
}
if (!result && retries == max_retries) {
@@ -6987,7 +7007,7 @@ bool dpcd_poll_for_allocation_change_trigger(struct dc_link *link)
break;
}
- udelay(5000);
+ msleep(5);
}
if (result == ACT_FAILED) {
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index de5c7d1e0267..b3912ff9dc91 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -3216,3 +3216,90 @@ struct hpo_dp_link_encoder *resource_get_hpo_dp_link_enc_for_det_lt(
return hpo_dp_link_enc;
}
#endif
+
+void reset_syncd_pipes_from_disabled_pipes(struct dc *dc,
+ struct dc_state *context)
+{
+ int i, j;
+ struct pipe_ctx *pipe_ctx_old, *pipe_ctx, *pipe_ctx_syncd;
+
+ /* If pipe backend is reset, need to reset pipe syncd status */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe_ctx_old = &dc->current_state->res_ctx.pipe_ctx[i];
+ pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (!pipe_ctx_old->stream)
+ continue;
+
+ if (pipe_ctx_old->top_pipe || pipe_ctx_old->prev_odm_pipe)
+ continue;
+
+ if (!pipe_ctx->stream ||
+ pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) {
+
+ /* Reset all the syncd pipes from the disabled pipe */
+ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ pipe_ctx_syncd = &context->res_ctx.pipe_ctx[j];
+ if ((GET_PIPE_SYNCD_FROM_PIPE(pipe_ctx_syncd) == pipe_ctx_old->pipe_idx) ||
+ !IS_PIPE_SYNCD_VALID(pipe_ctx_syncd))
+ SET_PIPE_SYNCD_TO_PIPE(pipe_ctx_syncd, j);
+ }
+ }
+ }
+}
+
+void check_syncd_pipes_for_disabled_master_pipe(struct dc *dc,
+ struct dc_state *context,
+ uint8_t disabled_master_pipe_idx)
+{
+ int i;
+ struct pipe_ctx *pipe_ctx, *pipe_ctx_check;
+
+ pipe_ctx = &context->res_ctx.pipe_ctx[disabled_master_pipe_idx];
+ if ((GET_PIPE_SYNCD_FROM_PIPE(pipe_ctx) != disabled_master_pipe_idx) ||
+ !IS_PIPE_SYNCD_VALID(pipe_ctx))
+ SET_PIPE_SYNCD_TO_PIPE(pipe_ctx, disabled_master_pipe_idx);
+
+ /* for the pipe disabled, check if any slave pipe exists and assert */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe_ctx_check = &context->res_ctx.pipe_ctx[i];
+
+ if ((GET_PIPE_SYNCD_FROM_PIPE(pipe_ctx_check) == disabled_master_pipe_idx) &&
+ IS_PIPE_SYNCD_VALID(pipe_ctx_check) && (i != disabled_master_pipe_idx))
+ DC_ERR("DC: Failure: pipe_idx[%d] syncd with disabled master pipe_idx[%d]\n",
+ i, disabled_master_pipe_idx);
+ }
+}
+
+uint8_t resource_transmitter_to_phy_idx(const struct dc *dc, enum transmitter transmitter)
+{
+ /* TODO - get transmitter to phy idx mapping from DMUB */
+ uint8_t phy_idx = transmitter - TRANSMITTER_UNIPHY_A;
+
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (dc->ctx->dce_version == DCN_VERSION_3_1 &&
+ dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0) {
+ switch (transmitter) {
+ case TRANSMITTER_UNIPHY_A:
+ phy_idx = 0;
+ break;
+ case TRANSMITTER_UNIPHY_B:
+ phy_idx = 1;
+ break;
+ case TRANSMITTER_UNIPHY_C:
+ phy_idx = 5;
+ break;
+ case TRANSMITTER_UNIPHY_D:
+ phy_idx = 6;
+ break;
+ case TRANSMITTER_UNIPHY_E:
+ phy_idx = 4;
+ break;
+ default:
+ phy_idx = 0;
+ break;
+ }
+ }
+#endif
+ return phy_idx;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index da2c78ce14d6..b51864890621 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -202,6 +202,7 @@ struct dc_caps {
bool edp_dsc_support;
bool vbios_lttpr_aware;
bool vbios_lttpr_enable;
+ uint32_t max_otg_num;
};
struct dc_bug_wa {
@@ -344,6 +345,7 @@ struct dc_config {
uint8_t vblank_alignment_max_frame_time_diff;
bool is_asymmetric_memory;
bool is_single_rank_dimm;
+ bool use_pipe_ctx_sync_logic;
};
enum visual_confirm {
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 78192ecba102..eb2755bdb30e 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -1566,6 +1566,10 @@ static enum dc_status apply_single_controller_ctx_to_hw(
&pipe_ctx->stream->audio_info);
}
+ /* make sure no pipes syncd to the pipe being enabled */
+ if (!pipe_ctx->stream->apply_seamless_boot_optimization && dc->config.use_pipe_ctx_sync_logic)
+ check_syncd_pipes_for_disabled_master_pipe(dc, context, pipe_ctx->pipe_idx);
+
#if defined(CONFIG_DRM_AMD_DC_DCN)
/* DCN3.1 FPGA Workaround
* Need to enable HPO DP Stream Encoder before setting OTG master enable.
@@ -1604,11 +1608,6 @@ static enum dc_status apply_single_controller_ctx_to_hw(
pipe_ctx->stream_res.stream_enc,
pipe_ctx->stream_res.tg->inst);
- if (dc_is_dp_signal(pipe_ctx->stream->signal) &&
- pipe_ctx->stream_res.stream_enc->funcs->reset_fifo)
- pipe_ctx->stream_res.stream_enc->funcs->reset_fifo(
- pipe_ctx->stream_res.stream_enc);
-
if (dc_is_dp_signal(pipe_ctx->stream->signal))
dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_CONNECT_DIG_FE_OTG);
@@ -1835,9 +1834,29 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
break;
}
}
- // We are trying to enable eDP, don't power down VDD
- if (can_apply_edp_fast_boot)
+
+ /*
+ * TO-DO: So far the code logic below only addresses single eDP case.
+ * For dual eDP case, there are a few things that need to be
+ * implemented first:
+ *
+ * 1. Change the fastboot logic above, so eDP link[0 or 1]'s
+ * stream[0 or 1] will all be checked.
+ *
+ * 2. Change keep_edp_vdd_on to an array, and maintain keep_edp_vdd_on
+ * for each eDP.
+ *
+ * Once above 2 things are completed, we can then change the logic below
+ * correspondingly, so dual eDP case will be fully covered.
+ */
+
+ // We are trying to enable eDP, don't power down VDD if eDP stream is existing
+ if ((edp_stream_num == 1 && edp_streams[0] != NULL) || can_apply_edp_fast_boot) {
keep_edp_vdd_on = true;
+ DC_LOG_EVENT_LINK_TRAINING("Keep eDP Vdd on\n");
+ } else {
+ DC_LOG_EVENT_LINK_TRAINING("No eDP stream enabled, turn eDP Vdd off\n");
+ }
}
// Check seamless boot support
@@ -2297,6 +2316,10 @@ enum dc_status dce110_apply_ctx_to_hw(
enum dc_status status;
int i;
+ /* reset syncd pipes from disabled pipes */
+ if (dc->config.use_pipe_ctx_sync_logic)
+ reset_syncd_pipes_from_disabled_pipes(dc, context);
+
/* Reset old context */
/* look up the targets that have been removed since last commit */
hws->funcs.reset_hw_ctx_wrap(dc, context);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index f19015413ce3..530a72e3eefe 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -1365,7 +1365,12 @@ void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
uint32_t opp_id_src1 = OPP_ID_INVALID;
// Step 1: To find out which OPTC is running & OPTC DSC is ON
- for (i = 0; i < dc->res_pool->res_cap->num_timing_generator; i++) {
+ // We can't use res_pool->res_cap->num_timing_generator to check
+ // Because it records display pipes default setting built in driver,
+ // not display pipes of the current chip.
+ // Some ASICs would be fused display pipes less than the default setting.
+ // In dcnxx_resource_construct function, driver would obatin real information.
+ for (i = 0; i < dc->res_pool->timing_generator_count; i++) {
uint32_t optc_dsc_state = 0;
struct timing_generator *tg = dc->res_pool->timing_generators[i];
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
index bf4436d7aaab..b0c08ee6bc2c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
@@ -902,19 +902,6 @@ void enc1_stream_encoder_stop_dp_info_packets(
}
-void enc1_stream_encoder_reset_fifo(
- struct stream_encoder *enc)
-{
- struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
-
- /* set DIG_START to 0x1 to reset FIFO */
- REG_UPDATE(DIG_FE_CNTL, DIG_START, 1);
- udelay(100);
-
- /* write 0 to take the FIFO out of reset */
- REG_UPDATE(DIG_FE_CNTL, DIG_START, 0);
-}
-
void enc1_stream_encoder_dp_blank(
struct dc_link *link,
struct stream_encoder *enc)
@@ -1600,8 +1587,6 @@ static const struct stream_encoder_funcs dcn10_str_enc_funcs = {
enc1_stream_encoder_send_immediate_sdp_message,
.stop_dp_info_packets =
enc1_stream_encoder_stop_dp_info_packets,
- .reset_fifo =
- enc1_stream_encoder_reset_fifo,
.dp_blank =
enc1_stream_encoder_dp_blank,
.dp_unblank =
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
index a146a41f68e9..687d7e4bf7ca 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
@@ -626,9 +626,6 @@ void enc1_stream_encoder_send_immediate_sdp_message(
void enc1_stream_encoder_stop_dp_info_packets(
struct stream_encoder *enc);
-void enc1_stream_encoder_reset_fifo(
- struct stream_encoder *enc);
-
void enc1_stream_encoder_dp_blank(
struct dc_link *link,
struct stream_encoder *enc);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index 2bc93df023ad..2a72517e2b28 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -1069,7 +1069,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.timing_trace = false,
.clock_trace = true,
.disable_pplib_clock_request = true,
- .pipe_split_policy = MPC_SPLIT_DYNAMIC,
+ .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
.force_single_disp_pipe_split = false,
.disable_dcc = DCC_ENABLE,
.vsr_support = true,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
index 8a70f92795c2..aab25ca8343a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
@@ -593,8 +593,6 @@ static const struct stream_encoder_funcs dcn20_str_enc_funcs = {
enc1_stream_encoder_send_immediate_sdp_message,
.stop_dp_info_packets =
enc1_stream_encoder_stop_dp_info_packets,
- .reset_fifo =
- enc1_stream_encoder_reset_fifo,
.dp_blank =
enc1_stream_encoder_dp_blank,
.dp_unblank =
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c
index 8daa12730bc1..a04ca4a98392 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c
@@ -789,8 +789,6 @@ static const struct stream_encoder_funcs dcn30_str_enc_funcs = {
enc3_stream_encoder_update_dp_info_packets,
.stop_dp_info_packets =
enc1_stream_encoder_stop_dp_info_packets,
- .reset_fifo =
- enc1_stream_encoder_reset_fifo,
.dp_blank =
enc1_stream_encoder_dp_blank,
.dp_unblank =
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
index 602ec9a08549..8ca26383b568 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
@@ -1878,7 +1878,6 @@ noinline bool dcn30_internal_validate_bw(
dc->res_pool->funcs->update_soc_for_wm_a(dc, context);
pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate);
- DC_FP_START();
if (!pipe_cnt) {
out = true;
goto validate_out;
@@ -2104,7 +2103,6 @@ validate_fail:
out = false;
validate_out:
- DC_FP_END();
return out;
}
@@ -2306,7 +2304,9 @@ bool dcn30_validate_bandwidth(struct dc *dc,
BW_VAL_TRACE_COUNT();
+ DC_FP_START();
out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate);
+ DC_FP_END();
if (pipe_cnt == 0)
goto validate_out;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
index c1c6e602b06c..5d9637b07429 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
@@ -686,7 +686,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.disable_clock_gate = true,
.disable_pplib_clock_request = true,
.disable_pplib_wm_range = true,
- .pipe_split_policy = MPC_SPLIT_DYNAMIC,
+ .pipe_split_policy = MPC_SPLIT_AVOID,
.force_single_disp_pipe_split = false,
.disable_dcc = DCC_ENABLE,
.vsr_support = true,
@@ -1380,6 +1380,17 @@ static void set_wm_ranges(
pp_smu->nv_funcs.set_wm_ranges(&pp_smu->nv_funcs.pp_smu, &ranges);
}
+static void dcn301_calculate_wm_and_dlg(
+ struct dc *dc, struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ int pipe_cnt,
+ int vlevel)
+{
+ DC_FP_START();
+ dcn301_calculate_wm_and_dlg_fp(dc, context, pipes, pipe_cnt, vlevel);
+ DC_FP_END();
+}
+
static struct resource_funcs dcn301_res_pool_funcs = {
.destroy = dcn301_destroy_resource_pool,
.link_enc_create = dcn301_link_encoder_create,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c
index 71c359f9cdd2..8b9b1a5309ba 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c
@@ -100,6 +100,35 @@ static uint8_t phy_id_from_transmitter(enum transmitter t)
return phy_id;
}
+static bool has_query_dp_alt(struct link_encoder *enc)
+{
+ struct dc_dmub_srv *dc_dmub_srv = enc->ctx->dmub_srv;
+
+ /* Supports development firmware and firmware >= 4.0.11 */
+ return dc_dmub_srv &&
+ !(dc_dmub_srv->dmub->fw_version >= DMUB_FW_VERSION(4, 0, 0) &&
+ dc_dmub_srv->dmub->fw_version <= DMUB_FW_VERSION(4, 0, 10));
+}
+
+static bool query_dp_alt_from_dmub(struct link_encoder *enc,
+ union dmub_rb_cmd *cmd)
+{
+ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+ struct dc_dmub_srv *dc_dmub_srv = enc->ctx->dmub_srv;
+
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->query_dp_alt.header.type = DMUB_CMD__VBIOS;
+ cmd->query_dp_alt.header.sub_type =
+ DMUB_CMD__VBIOS_TRANSMITTER_QUERY_DP_ALT;
+ cmd->query_dp_alt.header.payload_bytes = sizeof(cmd->query_dp_alt.data);
+ cmd->query_dp_alt.data.phy_id = phy_id_from_transmitter(enc10->base.transmitter);
+
+ if (!dc_dmub_srv_cmd_with_reply_data(dc_dmub_srv, cmd))
+ return false;
+
+ return true;
+}
+
void dcn31_link_encoder_set_dio_phy_mux(
struct link_encoder *enc,
enum encoder_type_select sel,
@@ -569,45 +598,90 @@ void dcn31_link_encoder_disable_output(
bool dcn31_link_encoder_is_in_alt_mode(struct link_encoder *enc)
{
struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
- struct dc_dmub_srv *dc_dmub_srv = enc->ctx->dmub_srv;
union dmub_rb_cmd cmd;
- bool is_usb_c_alt_mode = false;
+ uint32_t dp_alt_mode_disable;
- if (enc->features.flags.bits.DP_IS_USB_C && dc_dmub_srv) {
- memset(&cmd, 0, sizeof(cmd));
- cmd.query_dp_alt.header.type = DMUB_CMD__VBIOS;
- cmd.query_dp_alt.header.sub_type = DMUB_CMD__VBIOS_TRANSMITTER_QUERY_DP_ALT;
- cmd.query_dp_alt.header.payload_bytes = sizeof(cmd.panel_cntl.data);
- cmd.query_dp_alt.data.phy_id = phy_id_from_transmitter(enc10->base.transmitter);
+ /* Only applicable to USB-C PHY. */
+ if (!enc->features.flags.bits.DP_IS_USB_C)
+ return false;
- if (!dc_dmub_srv_cmd_with_reply_data(dc_dmub_srv, &cmd))
+ /*
+ * Use the new interface from DMCUB if available.
+ * Avoids hanging the RDCPSPIPE if DMCUB wasn't already running.
+ */
+ if (has_query_dp_alt(enc)) {
+ if (!query_dp_alt_from_dmub(enc, &cmd))
return false;
- is_usb_c_alt_mode = (cmd.query_dp_alt.data.is_dp_alt_disable == 0);
+ return (cmd.query_dp_alt.data.is_dp_alt_disable == 0);
}
- return is_usb_c_alt_mode;
+ /* Legacy path, avoid if possible. */
+ if (enc->ctx->asic_id.hw_internal_rev != YELLOW_CARP_B0) {
+ REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE,
+ &dp_alt_mode_disable);
+ } else {
+ /*
+ * B0 phys use a new set of registers to check whether alt mode is disabled.
+ * if value == 1 alt mode is disabled, otherwise it is enabled.
+ */
+ if ((enc10->base.transmitter == TRANSMITTER_UNIPHY_A) ||
+ (enc10->base.transmitter == TRANSMITTER_UNIPHY_B) ||
+ (enc10->base.transmitter == TRANSMITTER_UNIPHY_E)) {
+ REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE,
+ &dp_alt_mode_disable);
+ } else {
+ REG_GET(RDPCSPIPE_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE,
+ &dp_alt_mode_disable);
+ }
+ }
+
+ return (dp_alt_mode_disable == 0);
}
void dcn31_link_encoder_get_max_link_cap(struct link_encoder *enc, struct dc_link_settings *link_settings)
{
struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
- struct dc_dmub_srv *dc_dmub_srv = enc->ctx->dmub_srv;
union dmub_rb_cmd cmd;
+ uint32_t is_in_usb_c_dp4_mode = 0;
dcn10_link_encoder_get_max_link_cap(enc, link_settings);
- if (enc->features.flags.bits.DP_IS_USB_C && dc_dmub_srv) {
- memset(&cmd, 0, sizeof(cmd));
- cmd.query_dp_alt.header.type = DMUB_CMD__VBIOS;
- cmd.query_dp_alt.header.sub_type = DMUB_CMD__VBIOS_TRANSMITTER_QUERY_DP_ALT;
- cmd.query_dp_alt.header.payload_bytes = sizeof(cmd.panel_cntl.data);
- cmd.query_dp_alt.data.phy_id = phy_id_from_transmitter(enc10->base.transmitter);
+ /* Take the link cap directly if not USB */
+ if (!enc->features.flags.bits.DP_IS_USB_C)
+ return;
- if (!dc_dmub_srv_cmd_with_reply_data(dc_dmub_srv, &cmd))
+ /*
+ * Use the new interface from DMCUB if available.
+ * Avoids hanging the RDCPSPIPE if DMCUB wasn't already running.
+ */
+ if (has_query_dp_alt(enc)) {
+ if (!query_dp_alt_from_dmub(enc, &cmd))
return;
- if (cmd.query_dp_alt.data.is_usb && cmd.query_dp_alt.data.is_dp4 == 0)
+ if (cmd.query_dp_alt.data.is_usb &&
+ cmd.query_dp_alt.data.is_dp4 == 0)
link_settings->lane_count = MIN(LANE_COUNT_TWO, link_settings->lane_count);
+
+ return;
}
+
+ /* Legacy path, avoid if possible. */
+ if (enc->ctx->asic_id.hw_internal_rev != YELLOW_CARP_B0) {
+ REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DP4,
+ &is_in_usb_c_dp4_mode);
+ } else {
+ if ((enc10->base.transmitter == TRANSMITTER_UNIPHY_A) ||
+ (enc10->base.transmitter == TRANSMITTER_UNIPHY_B) ||
+ (enc10->base.transmitter == TRANSMITTER_UNIPHY_E)) {
+ REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DP4,
+ &is_in_usb_c_dp4_mode);
+ } else {
+ REG_GET(RDPCSPIPE_PHY_CNTL6, RDPCS_PHY_DPALT_DP4,
+ &is_in_usb_c_dp4_mode);
+ }
+ }
+
+ if (!is_in_usb_c_dp4_mode)
+ link_settings->lane_count = MIN(LANE_COUNT_TWO, link_settings->lane_count);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c
index 90c73a1cb986..5e3bcaf12cac 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubbub.c
@@ -138,8 +138,11 @@ static uint32_t convert_and_clamp(
ret_val = wm_ns * refclk_mhz;
ret_val /= 1000;
- if (ret_val > clamp_value)
+ if (ret_val > clamp_value) {
+ /* clamping WMs is abnormal, unexpected and may lead to underflow*/
+ ASSERT(0);
ret_val = clamp_value;
+ }
return ret_val;
}
@@ -159,7 +162,7 @@ static bool hubbub31_program_urgent_watermarks(
if (safe_to_lower || watermarks->a.urgent_ns > hubbub2->watermarks.a.urgent_ns) {
hubbub2->watermarks.a.urgent_ns = watermarks->a.urgent_ns;
prog_wm_value = convert_and_clamp(watermarks->a.urgent_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0x3fff);
REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, 0,
DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value);
@@ -193,7 +196,7 @@ static bool hubbub31_program_urgent_watermarks(
if (safe_to_lower || watermarks->a.urgent_latency_ns > hubbub2->watermarks.a.urgent_latency_ns) {
hubbub2->watermarks.a.urgent_latency_ns = watermarks->a.urgent_latency_ns;
prog_wm_value = convert_and_clamp(watermarks->a.urgent_latency_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0x3fff);
REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, 0,
DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, prog_wm_value);
} else if (watermarks->a.urgent_latency_ns < hubbub2->watermarks.a.urgent_latency_ns)
@@ -203,7 +206,7 @@ static bool hubbub31_program_urgent_watermarks(
if (safe_to_lower || watermarks->b.urgent_ns > hubbub2->watermarks.b.urgent_ns) {
hubbub2->watermarks.b.urgent_ns = watermarks->b.urgent_ns;
prog_wm_value = convert_and_clamp(watermarks->b.urgent_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0x3fff);
REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, 0,
DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B, prog_wm_value);
@@ -237,7 +240,7 @@ static bool hubbub31_program_urgent_watermarks(
if (safe_to_lower || watermarks->b.urgent_latency_ns > hubbub2->watermarks.b.urgent_latency_ns) {
hubbub2->watermarks.b.urgent_latency_ns = watermarks->b.urgent_latency_ns;
prog_wm_value = convert_and_clamp(watermarks->b.urgent_latency_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0x3fff);
REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, 0,
DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, prog_wm_value);
} else if (watermarks->b.urgent_latency_ns < hubbub2->watermarks.b.urgent_latency_ns)
@@ -247,7 +250,7 @@ static bool hubbub31_program_urgent_watermarks(
if (safe_to_lower || watermarks->c.urgent_ns > hubbub2->watermarks.c.urgent_ns) {
hubbub2->watermarks.c.urgent_ns = watermarks->c.urgent_ns;
prog_wm_value = convert_and_clamp(watermarks->c.urgent_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0x3fff);
REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, 0,
DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C, prog_wm_value);
@@ -281,7 +284,7 @@ static bool hubbub31_program_urgent_watermarks(
if (safe_to_lower || watermarks->c.urgent_latency_ns > hubbub2->watermarks.c.urgent_latency_ns) {
hubbub2->watermarks.c.urgent_latency_ns = watermarks->c.urgent_latency_ns;
prog_wm_value = convert_and_clamp(watermarks->c.urgent_latency_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0x3fff);
REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, 0,
DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, prog_wm_value);
} else if (watermarks->c.urgent_latency_ns < hubbub2->watermarks.c.urgent_latency_ns)
@@ -291,7 +294,7 @@ static bool hubbub31_program_urgent_watermarks(
if (safe_to_lower || watermarks->d.urgent_ns > hubbub2->watermarks.d.urgent_ns) {
hubbub2->watermarks.d.urgent_ns = watermarks->d.urgent_ns;
prog_wm_value = convert_and_clamp(watermarks->d.urgent_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0x3fff);
REG_SET(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, 0,
DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D, prog_wm_value);
@@ -325,7 +328,7 @@ static bool hubbub31_program_urgent_watermarks(
if (safe_to_lower || watermarks->d.urgent_latency_ns > hubbub2->watermarks.d.urgent_latency_ns) {
hubbub2->watermarks.d.urgent_latency_ns = watermarks->d.urgent_latency_ns;
prog_wm_value = convert_and_clamp(watermarks->d.urgent_latency_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0x3fff);
REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, 0,
DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, prog_wm_value);
} else if (watermarks->d.urgent_latency_ns < hubbub2->watermarks.d.urgent_latency_ns)
@@ -351,7 +354,7 @@ static bool hubbub31_program_stutter_watermarks(
watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns;
prog_wm_value = convert_and_clamp(
watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0xffff);
REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, 0,
DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_A, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_A calculated =%d\n"
@@ -367,7 +370,7 @@ static bool hubbub31_program_stutter_watermarks(
watermarks->a.cstate_pstate.cstate_exit_ns;
prog_wm_value = convert_and_clamp(
watermarks->a.cstate_pstate.cstate_exit_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0xffff);
REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, 0,
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_A, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_A calculated =%d\n"
@@ -383,7 +386,7 @@ static bool hubbub31_program_stutter_watermarks(
watermarks->a.cstate_pstate.cstate_enter_plus_exit_z8_ns;
prog_wm_value = convert_and_clamp(
watermarks->a.cstate_pstate.cstate_enter_plus_exit_z8_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0xffff);
REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A, 0,
DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_A, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_A calculated =%d\n"
@@ -399,7 +402,7 @@ static bool hubbub31_program_stutter_watermarks(
watermarks->a.cstate_pstate.cstate_exit_z8_ns;
prog_wm_value = convert_and_clamp(
watermarks->a.cstate_pstate.cstate_exit_z8_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0xffff);
REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A, 0,
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_A, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_A calculated =%d\n"
@@ -416,7 +419,7 @@ static bool hubbub31_program_stutter_watermarks(
watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns;
prog_wm_value = convert_and_clamp(
watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0xffff);
REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, 0,
DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_B, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_B calculated =%d\n"
@@ -432,7 +435,7 @@ static bool hubbub31_program_stutter_watermarks(
watermarks->b.cstate_pstate.cstate_exit_ns;
prog_wm_value = convert_and_clamp(
watermarks->b.cstate_pstate.cstate_exit_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0xffff);
REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, 0,
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_B, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_B calculated =%d\n"
@@ -448,7 +451,7 @@ static bool hubbub31_program_stutter_watermarks(
watermarks->b.cstate_pstate.cstate_enter_plus_exit_z8_ns;
prog_wm_value = convert_and_clamp(
watermarks->b.cstate_pstate.cstate_enter_plus_exit_z8_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0xffff);
REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B, 0,
DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_B, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_B calculated =%d\n"
@@ -464,7 +467,7 @@ static bool hubbub31_program_stutter_watermarks(
watermarks->b.cstate_pstate.cstate_exit_z8_ns;
prog_wm_value = convert_and_clamp(
watermarks->b.cstate_pstate.cstate_exit_z8_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0xffff);
REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B, 0,
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_B, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_B calculated =%d\n"
@@ -481,7 +484,7 @@ static bool hubbub31_program_stutter_watermarks(
watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns;
prog_wm_value = convert_and_clamp(
watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0xffff);
REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, 0,
DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_C, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_C calculated =%d\n"
@@ -497,7 +500,7 @@ static bool hubbub31_program_stutter_watermarks(
watermarks->c.cstate_pstate.cstate_exit_ns;
prog_wm_value = convert_and_clamp(
watermarks->c.cstate_pstate.cstate_exit_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0xffff);
REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, 0,
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_C, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_C calculated =%d\n"
@@ -513,7 +516,7 @@ static bool hubbub31_program_stutter_watermarks(
watermarks->c.cstate_pstate.cstate_enter_plus_exit_z8_ns;
prog_wm_value = convert_and_clamp(
watermarks->c.cstate_pstate.cstate_enter_plus_exit_z8_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0xffff);
REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C, 0,
DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_C, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_C calculated =%d\n"
@@ -529,7 +532,7 @@ static bool hubbub31_program_stutter_watermarks(
watermarks->c.cstate_pstate.cstate_exit_z8_ns;
prog_wm_value = convert_and_clamp(
watermarks->c.cstate_pstate.cstate_exit_z8_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0xffff);
REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C, 0,
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_C, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_C calculated =%d\n"
@@ -546,7 +549,7 @@ static bool hubbub31_program_stutter_watermarks(
watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns;
prog_wm_value = convert_and_clamp(
watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0xffff);
REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, 0,
DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_D, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_D calculated =%d\n"
@@ -562,7 +565,7 @@ static bool hubbub31_program_stutter_watermarks(
watermarks->d.cstate_pstate.cstate_exit_ns;
prog_wm_value = convert_and_clamp(
watermarks->d.cstate_pstate.cstate_exit_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0xffff);
REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, 0,
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_D, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_D calculated =%d\n"
@@ -578,7 +581,7 @@ static bool hubbub31_program_stutter_watermarks(
watermarks->d.cstate_pstate.cstate_enter_plus_exit_z8_ns;
prog_wm_value = convert_and_clamp(
watermarks->d.cstate_pstate.cstate_enter_plus_exit_z8_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0xffff);
REG_SET(DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D, 0,
DCHUBBUB_ARB_ALLOW_SR_ENTER_WATERMARK_Z8_D, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_WATERMARK_Z8_D calculated =%d\n"
@@ -594,7 +597,7 @@ static bool hubbub31_program_stutter_watermarks(
watermarks->d.cstate_pstate.cstate_exit_z8_ns;
prog_wm_value = convert_and_clamp(
watermarks->d.cstate_pstate.cstate_exit_z8_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0xffff);
REG_SET(DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D, 0,
DCHUBBUB_ARB_ALLOW_SR_EXIT_WATERMARK_Z8_D, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_Z8_D calculated =%d\n"
@@ -625,7 +628,7 @@ static bool hubbub31_program_pstate_watermarks(
watermarks->a.cstate_pstate.pstate_change_ns;
prog_wm_value = convert_and_clamp(
watermarks->a.cstate_pstate.pstate_change_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0xffff);
REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, 0,
DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n"
@@ -642,7 +645,7 @@ static bool hubbub31_program_pstate_watermarks(
watermarks->b.cstate_pstate.pstate_change_ns;
prog_wm_value = convert_and_clamp(
watermarks->b.cstate_pstate.pstate_change_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0xffff);
REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, 0,
DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n"
@@ -659,7 +662,7 @@ static bool hubbub31_program_pstate_watermarks(
watermarks->c.cstate_pstate.pstate_change_ns;
prog_wm_value = convert_and_clamp(
watermarks->c.cstate_pstate.pstate_change_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0xffff);
REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, 0,
DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n"
@@ -676,7 +679,7 @@ static bool hubbub31_program_pstate_watermarks(
watermarks->d.cstate_pstate.pstate_change_ns;
prog_wm_value = convert_and_clamp(
watermarks->d.cstate_pstate.pstate_change_ns,
- refclk_mhz, 0x1fffff);
+ refclk_mhz, 0xffff);
REG_SET(DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, 0,
DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, prog_wm_value);
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_D calculated =%d\n"
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
index 4d9c64d982d7..8d64187478e4 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
@@ -1984,7 +1984,7 @@ static void dcn31_calculate_wm_and_dlg_fp(
pipes[pipe_idx].clks_cfg.dispclk_mhz = get_dispclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt);
pipes[pipe_idx].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
- if (dc->config.forced_clocks) {
+ if (dc->config.forced_clocks || dc->debug.max_disp_clk) {
pipes[pipe_idx].clks_cfg.dispclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dispclk_mhz;
pipes[pipe_idx].clks_cfg.dppclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz;
}
@@ -2260,6 +2260,9 @@ static bool dcn31_resource_construct(
dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
dc->caps.color.mpc.ocsc = 1;
+ /* Use pipe context based otg sync logic */
+ dc->config.use_pipe_ctx_sync_logic = true;
+
/* read VBIOS LTTPR caps */
{
if (ctx->dc_bios->funcs->get_lttpr_caps) {
diff --git a/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h b/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h
index 511f9e1159c7..4229369c57f4 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h
@@ -34,12 +34,12 @@ struct cp_psp_stream_config {
uint8_t dig_fe;
uint8_t link_enc_idx;
uint8_t stream_enc_idx;
- uint8_t phy_idx;
uint8_t dio_output_idx;
- uint8_t dio_output_type;
+ uint8_t phy_idx;
uint8_t assr_enabled;
uint8_t mst_enabled;
uint8_t dp2_enabled;
+ uint8_t usb4_enabled;
void *dm_stream_ctx;
bool dpms_off;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
index 246071c72f6b..548cdef8a8ad 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
@@ -1576,8 +1576,6 @@ void dml20_rq_dlg_get_dlg_reg(struct display_mode_lib *mode_lib,
dlg_sys_param.total_flip_bytes = get_total_immediate_flip_bytes(mode_lib,
e2e_pipe_param,
num_pipes);
- dlg_sys_param.t_srx_delay_us = mode_lib->ip.dcfclk_cstate_latency
- / dlg_sys_param.deepsleep_dcfclk_mhz; // TODO: Deprecated
print__dlg_sys_params_st(mode_lib, &dlg_sys_param);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
index 015e7f2c0b16..0fc9f3e3ffae 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
@@ -1577,8 +1577,6 @@ void dml20v2_rq_dlg_get_dlg_reg(struct display_mode_lib *mode_lib,
dlg_sys_param.total_flip_bytes = get_total_immediate_flip_bytes(mode_lib,
e2e_pipe_param,
num_pipes);
- dlg_sys_param.t_srx_delay_us = mode_lib->ip.dcfclk_cstate_latency
- / dlg_sys_param.deepsleep_dcfclk_mhz; // TODO: Deprecated
print__dlg_sys_params_st(mode_lib, &dlg_sys_param);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
index 8bc27de4c104..618f4b682ab1 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
@@ -1688,8 +1688,6 @@ void dml21_rq_dlg_get_dlg_reg(
mode_lib,
e2e_pipe_param,
num_pipes);
- dlg_sys_param.t_srx_delay_us = mode_lib->ip.dcfclk_cstate_latency
- / dlg_sys_param.deepsleep_dcfclk_mhz; // TODO: Deprecated
print__dlg_sys_params_st(mode_lib, &dlg_sys_param);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
index aef854270054..747167083dea 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
@@ -1858,8 +1858,6 @@ void dml30_rq_dlg_get_dlg_reg(struct display_mode_lib *mode_lib,
dlg_sys_param.total_flip_bytes = get_total_immediate_flip_bytes(mode_lib,
e2e_pipe_param,
num_pipes);
- dlg_sys_param.t_srx_delay_us = mode_lib->ip.dcfclk_cstate_latency
- / dlg_sys_param.deepsleep_dcfclk_mhz; // TODO: Deprecated
print__dlg_sys_params_st(mode_lib, &dlg_sys_param);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c
index 94c32832a0e7..0a7a33864973 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c
@@ -327,7 +327,7 @@ void dcn301_fpu_init_soc_bounding_box(struct bp_soc_bb_info bb_info)
dcn3_01_soc.sr_exit_time_us = bb_info.dram_sr_exit_latency_100ns * 10;
}
-void dcn301_calculate_wm_and_dlg(struct dc *dc,
+void dcn301_calculate_wm_and_dlg_fp(struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
int pipe_cnt,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.h
index fc7065d17842..774b0fdfc80b 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.h
@@ -34,7 +34,7 @@ void dcn301_fpu_set_wm_ranges(int i,
void dcn301_fpu_init_soc_bounding_box(struct bp_soc_bb_info bb_info);
-void dcn301_calculate_wm_and_dlg(struct dc *dc,
+void dcn301_calculate_wm_and_dlg_fp(struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
int pipe_cnt,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
index d46a2733024c..8f9f1d607f7c 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
@@ -546,7 +546,6 @@ struct _vcs_dpi_display_dlg_sys_params_st {
double t_sr_wm_us;
double t_extra_us;
double mem_trip_us;
- double t_srx_delay_us;
double deepsleep_dcfclk_mhz;
double total_flip_bw;
unsigned int total_flip_bytes;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c
index 71ea503cb32f..412e75eb4704 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c
@@ -142,9 +142,6 @@ void print__dlg_sys_params_st(struct display_mode_lib *mode_lib, const struct _v
dml_print("DML_RQ_DLG_CALC: t_sr_wm_us = %3.2f\n", dlg_sys_param->t_sr_wm_us);
dml_print("DML_RQ_DLG_CALC: t_extra_us = %3.2f\n", dlg_sys_param->t_extra_us);
dml_print(
- "DML_RQ_DLG_CALC: t_srx_delay_us = %3.2f\n",
- dlg_sys_param->t_srx_delay_us);
- dml_print(
"DML_RQ_DLG_CALC: deepsleep_dcfclk_mhz = %3.2f\n",
dlg_sys_param->deepsleep_dcfclk_mhz);
dml_print(
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
index 59dc2c5b58dd..3df559c591f8 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
@@ -1331,10 +1331,6 @@ void dml1_rq_dlg_get_dlg_params(
if (dual_plane)
DTRACE("DLG: %s: swath_height_c = %d", __func__, swath_height_c);
- DTRACE(
- "DLG: %s: t_srx_delay_us = %3.2f",
- __func__,
- (double) dlg_sys_param->t_srx_delay_us);
DTRACE("DLG: %s: line_time_in_us = %3.2f", __func__, (double) line_time_in_us);
DTRACE("DLG: %s: vupdate_offset = %d", __func__, vupdate_offset);
DTRACE("DLG: %s: vupdate_width = %d", __func__, vupdate_width);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index 890280026e69..943240e2809e 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -382,6 +382,7 @@ struct pipe_ctx {
struct pll_settings pll_settings;
uint8_t pipe_idx;
+ uint8_t pipe_idx_syncd;
struct pipe_ctx *top_pipe;
struct pipe_ctx *bottom_pipe;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
index 073f8b667eff..c88e113b94d1 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
@@ -164,10 +164,6 @@ struct stream_encoder_funcs {
void (*stop_dp_info_packets)(
struct stream_encoder *enc);
- void (*reset_fifo)(
- struct stream_encoder *enc
- );
-
void (*dp_blank)(
struct dc_link *link,
struct stream_encoder *enc);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h
index e589cbe67307..dbfe6690ded8 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/resource.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h
@@ -34,6 +34,10 @@
#define MEMORY_TYPE_HBM 2
+#define IS_PIPE_SYNCD_VALID(pipe) ((((pipe)->pipe_idx_syncd) & 0x80)?1:0)
+#define GET_PIPE_SYNCD_FROM_PIPE(pipe) ((pipe)->pipe_idx_syncd & 0x7F)
+#define SET_PIPE_SYNCD_TO_PIPE(pipe, pipe_syncd) ((pipe)->pipe_idx_syncd = (0x80 | pipe_syncd))
+
enum dce_version resource_parse_asic_id(
struct hw_asic_id asic_id);
@@ -208,4 +212,13 @@ struct hpo_dp_link_encoder *resource_get_hpo_dp_link_enc_for_det_lt(
const struct dc_link *link);
#endif
+void reset_syncd_pipes_from_disabled_pipes(struct dc *dc,
+ struct dc_state *context);
+
+void check_syncd_pipes_for_disabled_master_pipe(struct dc *dc,
+ struct dc_state *context,
+ uint8_t disabled_master_pipe_idx);
+
+uint8_t resource_transmitter_to_phy_idx(const struct dc *dc, enum transmitter transmitter);
+
#endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_RESOURCE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c b/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c
index 9ccafe007b23..c4b067d01895 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c
@@ -132,31 +132,6 @@ enum dc_irq_source to_dal_irq_source_dcn20(
}
}
-uint32_t dc_get_hpd_state_dcn20(struct irq_service *irq_service, enum dc_irq_source source)
-{
- const struct irq_source_info *info;
- uint32_t addr;
- uint32_t value;
- uint32_t current_status;
-
- info = find_irq_source_info(irq_service, source);
- if (!info)
- return 0;
-
- addr = info->status_reg;
- if (!addr)
- return 0;
-
- value = dm_read_reg(irq_service->ctx, addr);
- current_status =
- get_reg_field_value(
- value,
- HPD0_DC_HPD_INT_STATUS,
- DC_HPD_SENSE);
-
- return current_status;
-}
-
static bool hpd_ack(
struct irq_service *irq_service,
const struct irq_source_info *info)
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.h b/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.h
index 4d69ab24ca25..aee4b37999f1 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.h
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.h
@@ -31,6 +31,4 @@
struct irq_service *dal_irq_service_dcn20_create(
struct irq_service_init_data *init_data);
-uint32_t dc_get_hpd_state_dcn20(struct irq_service *irq_service, enum dc_irq_source source);
-
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c
index 235294534c43..0f15bcada4e9 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c
@@ -134,31 +134,6 @@ static enum dc_irq_source to_dal_irq_source_dcn21(struct irq_service *irq_servic
return DC_IRQ_SOURCE_INVALID;
}
-uint32_t dc_get_hpd_state_dcn21(struct irq_service *irq_service, enum dc_irq_source source)
-{
- const struct irq_source_info *info;
- uint32_t addr;
- uint32_t value;
- uint32_t current_status;
-
- info = find_irq_source_info(irq_service, source);
- if (!info)
- return 0;
-
- addr = info->status_reg;
- if (!addr)
- return 0;
-
- value = dm_read_reg(irq_service->ctx, addr);
- current_status =
- get_reg_field_value(
- value,
- HPD0_DC_HPD_INT_STATUS,
- DC_HPD_SENSE);
-
- return current_status;
-}
-
static bool hpd_ack(
struct irq_service *irq_service,
const struct irq_source_info *info)
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.h b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.h
index 616470e32380..da2bd0e93d7a 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.h
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.h
@@ -31,6 +31,4 @@
struct irq_service *dal_irq_service_dcn21_create(
struct irq_service_init_data *init_data);
-uint32_t dc_get_hpd_state_dcn21(struct irq_service *irq_service, enum dc_irq_source source);
-
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/irq/irq_service.c b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
index 4db1133e4466..a2a4fbeb83f8 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
@@ -79,7 +79,7 @@ void dal_irq_service_destroy(struct irq_service **irq_service)
*irq_service = NULL;
}
-const struct irq_source_info *find_irq_source_info(
+static const struct irq_source_info *find_irq_source_info(
struct irq_service *irq_service,
enum dc_irq_source source)
{
diff --git a/drivers/gpu/drm/amd/display/dc/irq/irq_service.h b/drivers/gpu/drm/amd/display/dc/irq/irq_service.h
index e60b82480093..dbfcb096eedd 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/irq_service.h
+++ b/drivers/gpu/drm/amd/display/dc/irq/irq_service.h
@@ -69,10 +69,6 @@ struct irq_service {
const struct irq_service_funcs *funcs;
};
-const struct irq_source_info *find_irq_source_info(
- struct irq_service *irq_service,
- enum dc_irq_source source);
-
void dal_irq_service_construct(
struct irq_service *irq_service,
struct irq_service_init_data *init_data);
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
index 6d648c889866..f7420c3f5672 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
@@ -104,6 +104,7 @@ struct mod_hdcp_displayport {
uint8_t rev;
uint8_t assr_enabled;
uint8_t mst_enabled;
+ uint8_t usb4_enabled;
};
struct mod_hdcp_hdmi {
@@ -249,7 +250,6 @@ struct mod_hdcp_link {
uint8_t ddc_line;
uint8_t link_enc_idx;
uint8_t phy_idx;
- uint8_t dio_output_type;
uint8_t dio_output_id;
uint8_t hdcp_supported_informational;
union {
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index e2cae97f4ff1..48cc009d9bdf 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -3462,8 +3462,7 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
attr == &sensor_dev_attr_power2_cap_min.dev_attr.attr ||
attr == &sensor_dev_attr_power2_cap.dev_attr.attr ||
attr == &sensor_dev_attr_power2_cap_default.dev_attr.attr ||
- attr == &sensor_dev_attr_power2_label.dev_attr.attr ||
- attr == &sensor_dev_attr_power1_label.dev_attr.attr))
+ attr == &sensor_dev_attr_power2_label.dev_attr.attr))
return 0;
return effective_mode;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index 777f717c37ae..2320bd750876 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -1238,21 +1238,37 @@ static int sienna_cichlid_populate_umd_state_clk(struct smu_context *smu)
&dpm_context->dpm_tables.soc_table;
struct smu_umd_pstate_table *pstate_table =
&smu->pstate_table;
+ struct amdgpu_device *adev = smu->adev;
pstate_table->gfxclk_pstate.min = gfx_table->min;
pstate_table->gfxclk_pstate.peak = gfx_table->max;
- if (gfx_table->max >= SIENNA_CICHLID_UMD_PSTATE_PROFILING_GFXCLK)
- pstate_table->gfxclk_pstate.standard = SIENNA_CICHLID_UMD_PSTATE_PROFILING_GFXCLK;
pstate_table->uclk_pstate.min = mem_table->min;
pstate_table->uclk_pstate.peak = mem_table->max;
- if (mem_table->max >= SIENNA_CICHLID_UMD_PSTATE_PROFILING_MEMCLK)
- pstate_table->uclk_pstate.standard = SIENNA_CICHLID_UMD_PSTATE_PROFILING_MEMCLK;
pstate_table->socclk_pstate.min = soc_table->min;
pstate_table->socclk_pstate.peak = soc_table->max;
- if (soc_table->max >= SIENNA_CICHLID_UMD_PSTATE_PROFILING_SOCCLK)
+
+ switch (adev->asic_type) {
+ case CHIP_SIENNA_CICHLID:
+ case CHIP_NAVY_FLOUNDER:
+ pstate_table->gfxclk_pstate.standard = SIENNA_CICHLID_UMD_PSTATE_PROFILING_GFXCLK;
+ pstate_table->uclk_pstate.standard = SIENNA_CICHLID_UMD_PSTATE_PROFILING_MEMCLK;
pstate_table->socclk_pstate.standard = SIENNA_CICHLID_UMD_PSTATE_PROFILING_SOCCLK;
+ break;
+ case CHIP_DIMGREY_CAVEFISH:
+ pstate_table->gfxclk_pstate.standard = DIMGREY_CAVEFISH_UMD_PSTATE_PROFILING_GFXCLK;
+ pstate_table->uclk_pstate.standard = DIMGREY_CAVEFISH_UMD_PSTATE_PROFILING_MEMCLK;
+ pstate_table->socclk_pstate.standard = DIMGREY_CAVEFISH_UMD_PSTATE_PROFILING_SOCCLK;
+ break;
+ case CHIP_BEIGE_GOBY:
+ pstate_table->gfxclk_pstate.standard = BEIGE_GOBY_UMD_PSTATE_PROFILING_GFXCLK;
+ pstate_table->uclk_pstate.standard = BEIGE_GOBY_UMD_PSTATE_PROFILING_MEMCLK;
+ pstate_table->socclk_pstate.standard = BEIGE_GOBY_UMD_PSTATE_PROFILING_SOCCLK;
+ break;
+ default:
+ break;
+ }
return 0;
}
@@ -3696,14 +3712,14 @@ static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu,
static int sienna_cichlid_enable_mgpu_fan_boost(struct smu_context *smu)
{
- struct smu_table_context *table_context = &smu->smu_table;
- PPTable_t *smc_pptable = table_context->driver_pptable;
+ uint16_t *mgpu_fan_boost_limit_rpm;
+ GET_PPTABLE_MEMBER(MGpuFanBoostLimitRpm, &mgpu_fan_boost_limit_rpm);
/*
* Skip the MGpuFanBoost setting for those ASICs
* which do not support it
*/
- if (!smc_pptable->MGpuFanBoostLimitRpm)
+ if (*mgpu_fan_boost_limit_rpm == 0)
return 0;
return smu_cmn_send_smc_msg_with_param(smu,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.h b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.h
index 38cd0ece24f6..42f705c7a36f 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.h
@@ -33,6 +33,14 @@ typedef enum {
#define SIENNA_CICHLID_UMD_PSTATE_PROFILING_SOCCLK 960
#define SIENNA_CICHLID_UMD_PSTATE_PROFILING_MEMCLK 1000
+#define DIMGREY_CAVEFISH_UMD_PSTATE_PROFILING_GFXCLK 1950
+#define DIMGREY_CAVEFISH_UMD_PSTATE_PROFILING_SOCCLK 960
+#define DIMGREY_CAVEFISH_UMD_PSTATE_PROFILING_MEMCLK 676
+
+#define BEIGE_GOBY_UMD_PSTATE_PROFILING_GFXCLK 2200
+#define BEIGE_GOBY_UMD_PSTATE_PROFILING_SOCCLK 960
+#define BEIGE_GOBY_UMD_PSTATE_PROFILING_MEMCLK 1000
+
extern void sienna_cichlid_set_ppt_funcs(struct smu_context *smu);
#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
index 380811b91350..4885c4ae78b7 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
@@ -1625,10 +1625,18 @@ static int aldebaran_set_df_cstate(struct smu_context *smu,
static int aldebaran_allow_xgmi_power_down(struct smu_context *smu, bool en)
{
- return smu_cmn_send_smc_msg_with_param(smu,
- SMU_MSG_GmiPwrDnControl,
- en ? 0 : 1,
- NULL);
+ struct amdgpu_device *adev = smu->adev;
+
+ /* The message only works on master die and NACK will be sent
+ back for other dies, only send it on master die */
+ if (!adev->smuio.funcs->get_socket_id(adev) &&
+ !adev->smuio.funcs->get_die_id(adev))
+ return smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_GmiPwrDnControl,
+ en ? 0 : 1,
+ NULL);
+ else
+ return 0;
}
static const struct throttling_logging_label {
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
index caf1775d48ef..0bc84b709a93 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
@@ -282,14 +282,9 @@ static int yellow_carp_post_smu_init(struct smu_context *smu)
static int yellow_carp_mode_reset(struct smu_context *smu, int type)
{
- int ret = 0, index = 0;
-
- index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG,
- SMU_MSG_GfxDeviceDriverReset);
- if (index < 0)
- return index == -EACCES ? 0 : index;
+ int ret = 0;
- ret = smu_cmn_send_smc_msg_with_param(smu, (uint16_t)index, type, NULL);
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, type, NULL);
if (ret)
dev_err(smu->adev->dev, "Failed to mode reset!\n");
diff --git a/drivers/gpu/drm/ast/ast_tables.h b/drivers/gpu/drm/ast/ast_tables.h
index d9eb353a4bf0..dbe1cc620f6e 100644
--- a/drivers/gpu/drm/ast/ast_tables.h
+++ b/drivers/gpu/drm/ast/ast_tables.h
@@ -282,8 +282,6 @@ static const struct ast_vbios_enhtable res_1360x768[] = {
};
static const struct ast_vbios_enhtable res_1600x900[] = {
- {1800, 1600, 24, 80, 1000, 900, 1, 3, VCLK108, /* 60Hz */
- (SyncPP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 3, 0x3A },
{1760, 1600, 48, 32, 926, 900, 3, 5, VCLK97_75, /* 60Hz CVT RB */
(SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo |
AST2500PreCatchCRT), 60, 1, 0x3A },
diff --git a/drivers/gpu/drm/bridge/nwl-dsi.c b/drivers/gpu/drm/bridge/nwl-dsi.c
index a7389a0facfb..af07eeb47ca0 100644
--- a/drivers/gpu/drm/bridge/nwl-dsi.c
+++ b/drivers/gpu/drm/bridge/nwl-dsi.c
@@ -7,6 +7,7 @@
*/
#include <linux/bitfield.h>
+#include <linux/bits.h>
#include <linux/clk.h>
#include <linux/irq.h>
#include <linux/math64.h>
@@ -196,12 +197,9 @@ static u32 ps2bc(struct nwl_dsi *dsi, unsigned long long ps)
/*
* ui2bc - UI time periods to byte clock cycles
*/
-static u32 ui2bc(struct nwl_dsi *dsi, unsigned long long ui)
+static u32 ui2bc(unsigned int ui)
{
- u32 bpp = mipi_dsi_pixel_format_to_bpp(dsi->format);
-
- return DIV64_U64_ROUND_UP(ui * dsi->lanes,
- dsi->mode.clock * 1000 * bpp);
+ return DIV_ROUND_UP(ui, BITS_PER_BYTE);
}
/*
@@ -232,12 +230,12 @@ static int nwl_dsi_config_host(struct nwl_dsi *dsi)
}
/* values in byte clock cycles */
- cycles = ui2bc(dsi, cfg->clk_pre);
+ cycles = ui2bc(cfg->clk_pre);
DRM_DEV_DEBUG_DRIVER(dsi->dev, "cfg_t_pre: 0x%x\n", cycles);
nwl_dsi_write(dsi, NWL_DSI_CFG_T_PRE, cycles);
cycles = ps2bc(dsi, cfg->lpx + cfg->clk_prepare + cfg->clk_zero);
DRM_DEV_DEBUG_DRIVER(dsi->dev, "cfg_tx_gap (pre): 0x%x\n", cycles);
- cycles += ui2bc(dsi, cfg->clk_pre);
+ cycles += ui2bc(cfg->clk_pre);
DRM_DEV_DEBUG_DRIVER(dsi->dev, "cfg_t_post: 0x%x\n", cycles);
nwl_dsi_write(dsi, NWL_DSI_CFG_T_POST, cycles);
cycles = ps2bc(dsi, cfg->hs_exit);
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 21174efd91be..88cd992df356 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -1327,8 +1327,10 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
drm_dbg_atomic(dev, "checking %p\n", state);
- for_each_new_crtc_in_state(state, crtc, new_crtc_state, i)
- requested_crtc |= drm_crtc_mask(crtc);
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
+ if (new_crtc_state->enable)
+ requested_crtc |= drm_crtc_mask(crtc);
+ }
for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
ret = drm_atomic_plane_check(old_plane_state, new_plane_state);
@@ -1377,8 +1379,10 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
}
}
- for_each_new_crtc_in_state(state, crtc, new_crtc_state, i)
- affected_crtc |= drm_crtc_mask(crtc);
+ for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
+ if (new_crtc_state->enable)
+ affected_crtc |= drm_crtc_mask(crtc);
+ }
/*
* For commits that allow modesets drivers can add other CRTCs to the
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index aef2fbd676e5..9603193d2fa1 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -828,8 +828,8 @@ int drm_atomic_helper_check_plane_state(struct drm_plane_state *plane_state,
}
if (!crtc_state->enable && !can_update_disabled) {
- drm_dbg_kms(plane_state->crtc->dev,
- "Cannot update plane of a disabled CRTC.\n");
+ drm_dbg_kms(plane_state->plane->dev,
+ "Cannot update plane of a disabled CRTC.\n");
return -EINVAL;
}
@@ -839,8 +839,8 @@ int drm_atomic_helper_check_plane_state(struct drm_plane_state *plane_state,
hscale = drm_rect_calc_hscale(src, dst, min_scale, max_scale);
vscale = drm_rect_calc_vscale(src, dst, min_scale, max_scale);
if (hscale < 0 || vscale < 0) {
- drm_dbg_kms(plane_state->crtc->dev,
- "Invalid scaling of plane\n");
+ drm_dbg_kms(plane_state->plane->dev,
+ "Invalid scaling of plane\n");
drm_rect_debug_print("src: ", &plane_state->src, true);
drm_rect_debug_print("dst: ", &plane_state->dst, false);
return -ERANGE;
@@ -864,8 +864,8 @@ int drm_atomic_helper_check_plane_state(struct drm_plane_state *plane_state,
return 0;
if (!can_position && !drm_rect_equals(dst, &clip)) {
- drm_dbg_kms(plane_state->crtc->dev,
- "Plane must cover entire CRTC\n");
+ drm_dbg_kms(plane_state->plane->dev,
+ "Plane must cover entire CRTC\n");
drm_rect_debug_print("dst: ", dst, false);
drm_rect_debug_print("clip: ", &clip, false);
return -EINVAL;
@@ -1016,7 +1016,7 @@ crtc_needs_disable(struct drm_crtc_state *old_state,
* it's in self refresh mode and needs to be fully disabled.
*/
return old_state->active ||
- (old_state->self_refresh_active && !new_state->enable) ||
+ (old_state->self_refresh_active && !new_state->active) ||
new_state->self_refresh_active;
}
diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c
index 9781722519c3..54d62fdb4ef9 100644
--- a/drivers/gpu/drm/drm_atomic_uapi.c
+++ b/drivers/gpu/drm/drm_atomic_uapi.c
@@ -76,15 +76,17 @@ int drm_atomic_set_mode_for_crtc(struct drm_crtc_state *state,
state->mode_blob = NULL;
if (mode) {
+ struct drm_property_blob *blob;
+
drm_mode_convert_to_umode(&umode, mode);
- state->mode_blob =
- drm_property_create_blob(state->crtc->dev,
- sizeof(umode),
- &umode);
- if (IS_ERR(state->mode_blob))
- return PTR_ERR(state->mode_blob);
+ blob = drm_property_create_blob(crtc->dev,
+ sizeof(umode), &umode);
+ if (IS_ERR(blob))
+ return PTR_ERR(blob);
drm_mode_copy(&state->mode, mode);
+
+ state->mode_blob = blob;
state->enable = true;
drm_dbg_atomic(crtc->dev,
"Set [MODE:%s] for [CRTC:%d:%s] state %p\n",
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index f3d79eda94bb..8b3822142fed 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -5511,6 +5511,7 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
mutex_init(&mgr->probe_lock);
#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
mutex_init(&mgr->topology_ref_history_lock);
+ stack_depot_init();
#endif
INIT_LIST_HEAD(&mgr->tx_msg_downq);
INIT_LIST_HEAD(&mgr->destroy_port_list);
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
index cefd0cbf9deb..dc275c466c9c 100644
--- a/drivers/gpu/drm/drm_gem_cma_helper.c
+++ b/drivers/gpu/drm/drm_gem_cma_helper.c
@@ -512,6 +512,7 @@ int drm_gem_cma_mmap(struct drm_gem_cma_object *cma_obj, struct vm_area_struct *
*/
vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node);
vma->vm_flags &= ~VM_PFNMAP;
+ vma->vm_flags |= VM_DONTEXPAND;
if (cma_obj->map_noncoherent) {
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
diff --git a/drivers/gpu/drm/drm_mipi_dbi.c b/drivers/gpu/drm/drm_mipi_dbi.c
index ded8968b3e8a..0327d595e028 100644
--- a/drivers/gpu/drm/drm_mipi_dbi.c
+++ b/drivers/gpu/drm/drm_mipi_dbi.c
@@ -209,11 +209,11 @@ int mipi_dbi_buf_copy(void *dst, struct drm_framebuffer *fb,
ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE);
if (ret)
return ret;
- src = data[0].vaddr; /* TODO: Use mapping abstraction properly */
ret = drm_gem_fb_vmap(fb, map, data);
if (ret)
goto out_drm_gem_fb_end_cpu_access;
+ src = data[0].vaddr; /* TODO: Use mapping abstraction properly */
switch (fb->format->format) {
case DRM_FORMAT_RGB565:
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 7d1c578388d3..8257f9d4f619 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -980,6 +980,10 @@ void drm_mm_init(struct drm_mm *mm, u64 start, u64 size)
add_hole(&mm->head_node);
mm->scan_active = 0;
+
+#ifdef CONFIG_DRM_DEBUG_MM
+ stack_depot_init();
+#endif
}
EXPORT_SYMBOL(drm_mm_init);
diff --git a/drivers/gpu/drm/drm_modeset_lock.c b/drivers/gpu/drm/drm_modeset_lock.c
index c97323365675..918065982db4 100644
--- a/drivers/gpu/drm/drm_modeset_lock.c
+++ b/drivers/gpu/drm/drm_modeset_lock.c
@@ -107,6 +107,11 @@ static void __drm_stack_depot_print(depot_stack_handle_t stack_depot)
kfree(buf);
}
+
+static void __drm_stack_depot_init(void)
+{
+ stack_depot_init();
+}
#else /* CONFIG_DRM_DEBUG_MODESET_LOCK */
static depot_stack_handle_t __drm_stack_depot_save(void)
{
@@ -115,6 +120,9 @@ static depot_stack_handle_t __drm_stack_depot_save(void)
static void __drm_stack_depot_print(depot_stack_handle_t stack_depot)
{
}
+static void __drm_stack_depot_init(void)
+{
+}
#endif /* CONFIG_DRM_DEBUG_MODESET_LOCK */
/**
@@ -359,6 +367,7 @@ void drm_modeset_lock_init(struct drm_modeset_lock *lock)
{
ww_mutex_init(&lock->mutex, &crtc_ww_class);
INIT_LIST_HEAD(&lock->head);
+ __drm_stack_depot_init();
}
EXPORT_SYMBOL(drm_modeset_lock_init);
diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
index 042bb80383c9..b910978d3e48 100644
--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
+++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
@@ -115,6 +115,12 @@ static const struct drm_dmi_panel_orientation_data lcd1280x1920_rightside_up = {
.orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
};
+static const struct drm_dmi_panel_orientation_data lcd1600x2560_leftside_up = {
+ .width = 1600,
+ .height = 2560,
+ .orientation = DRM_MODE_PANEL_ORIENTATION_LEFT_UP,
+};
+
static const struct dmi_system_id orientation_data[] = {
{ /* Acer One 10 (S1003) */
.matches = {
@@ -275,6 +281,12 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Default string"),
},
.driver_data = (void *)&onegx1_pro,
+ }, { /* OneXPlayer */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ONE-NETBOOK TECHNOLOGY CO., LTD."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "ONE XPLAYER"),
+ },
+ .driver_data = (void *)&lcd1600x2560_leftside_up,
}, { /* Samsung GalaxyBook 10.6 */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
diff --git a/drivers/gpu/drm/drm_privacy_screen.c b/drivers/gpu/drm/drm_privacy_screen.c
index beaf99e9120a..b688841c18e4 100644
--- a/drivers/gpu/drm/drm_privacy_screen.c
+++ b/drivers/gpu/drm/drm_privacy_screen.c
@@ -269,7 +269,7 @@ EXPORT_SYMBOL(drm_privacy_screen_get_state);
*
* The notifier is called with no locks held. The new hw_state and sw_state
* can be retrieved using the drm_privacy_screen_get_state() function.
- * A pointer to the drm_privacy_screen's struct is passed as the void *data
+ * A pointer to the drm_privacy_screen's struct is passed as the ``void *data``
* argument of the notifier_block's notifier_call.
*
* The notifier will NOT be called when changes are made through
diff --git a/drivers/gpu/drm/drm_privacy_screen_x86.c b/drivers/gpu/drm/drm_privacy_screen_x86.c
index a2cafb294ca6..e7aa74ad0b24 100644
--- a/drivers/gpu/drm/drm_privacy_screen_x86.c
+++ b/drivers/gpu/drm/drm_privacy_screen_x86.c
@@ -33,6 +33,9 @@ static bool __init detect_thinkpad_privacy_screen(void)
unsigned long long output;
acpi_status status;
+ if (acpi_disabled)
+ return false;
+
/* Get embedded-controller handle */
status = acpi_get_devices("PNP0C09", acpi_set_handle, NULL, &ec_handle);
if (ACPI_FAILURE(status) || !ec_handle)
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index b03c20c14ca1..a17313282e8b 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -469,8 +469,8 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
return -EINVAL;
}
- if (args->stream_size > SZ_64K || args->nr_relocs > SZ_64K ||
- args->nr_bos > SZ_64K || args->nr_pmrs > 128) {
+ if (args->stream_size > SZ_128K || args->nr_relocs > SZ_128K ||
+ args->nr_bos > SZ_128K || args->nr_pmrs > 128) {
DRM_ERROR("submit arguments out of size limits\n");
return -EINVAL;
}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index ba5fd012a40a..37018bc55810 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -1047,7 +1047,7 @@ pm_put:
void etnaviv_gpu_recover_hang(struct etnaviv_gpu *gpu)
{
- unsigned int i = 0;
+ unsigned int i;
dev_err(gpu->dev, "recover hung GPU!\n");
@@ -1060,7 +1060,7 @@ void etnaviv_gpu_recover_hang(struct etnaviv_gpu *gpu)
/* complete all events, the GPU won't do it after the reset */
spin_lock(&gpu->event_spinlock);
- for_each_set_bit_from(i, gpu->event_bitmap, ETNA_NR_EVENTS)
+ for_each_set_bit(i, gpu->event_bitmap, ETNA_NR_EVENTS)
complete(&gpu->event_free);
bitmap_zero(gpu->event_bitmap, ETNA_NR_EVENTS);
spin_unlock(&gpu->event_spinlock);
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index a4c94dc2e216..cfd932514da2 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -101,6 +101,7 @@ config DRM_I915_USERPTR
config DRM_I915_GVT
bool "Enable Intel GVT-g graphics virtualization host support"
depends on DRM_I915
+ depends on X86
depends on 64BIT
default n
help
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index 9c9d574f0b8c..cab505277595 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -1298,6 +1298,28 @@ static void tgl_dkl_phy_set_signal_levels(struct intel_encoder *encoder,
intel_de_rmw(dev_priv, DKL_TX_DPCNTL2(tc_port),
DKL_TX_DP20BITMODE, 0);
+
+ if (IS_ALDERLAKE_P(dev_priv)) {
+ u32 val;
+
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
+ if (ln == 0) {
+ val = DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX1(0);
+ val |= DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX2(2);
+ } else {
+ val = DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX1(3);
+ val |= DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX2(3);
+ }
+ } else {
+ val = DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX1(0);
+ val |= DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX2(0);
+ }
+
+ intel_de_rmw(dev_priv, DKL_TX_DPCNTL2(tc_port),
+ DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX1_MASK |
+ DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX2_MASK,
+ val);
+ }
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c
index 1e689d573512..e2dfb93a82bd 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c
@@ -477,14 +477,14 @@ static const struct intel_ddi_buf_trans icl_combo_phy_trans_hdmi = {
static const union intel_ddi_buf_trans_entry _ehl_combo_phy_trans_dp[] = {
/* NT mV Trans mV db */
{ .icl = { 0xA, 0x33, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */
- { .icl = { 0xA, 0x47, 0x36, 0x00, 0x09 } }, /* 350 500 3.1 */
- { .icl = { 0xC, 0x64, 0x34, 0x00, 0x0B } }, /* 350 700 6.0 */
- { .icl = { 0x6, 0x7F, 0x30, 0x00, 0x0F } }, /* 350 900 8.2 */
+ { .icl = { 0xA, 0x47, 0x38, 0x00, 0x07 } }, /* 350 500 3.1 */
+ { .icl = { 0xC, 0x64, 0x33, 0x00, 0x0C } }, /* 350 700 6.0 */
+ { .icl = { 0x6, 0x7F, 0x2F, 0x00, 0x10 } }, /* 350 900 8.2 */
{ .icl = { 0xA, 0x46, 0x3F, 0x00, 0x00 } }, /* 500 500 0.0 */
- { .icl = { 0xC, 0x64, 0x38, 0x00, 0x07 } }, /* 500 700 2.9 */
+ { .icl = { 0xC, 0x64, 0x37, 0x00, 0x08 } }, /* 500 700 2.9 */
{ .icl = { 0x6, 0x7F, 0x32, 0x00, 0x0D } }, /* 500 900 5.1 */
{ .icl = { 0xC, 0x61, 0x3F, 0x00, 0x00 } }, /* 650 700 0.6 */
- { .icl = { 0x6, 0x7F, 0x38, 0x00, 0x07 } }, /* 600 900 3.5 */
+ { .icl = { 0x6, 0x7F, 0x37, 0x00, 0x08 } }, /* 600 900 3.5 */
{ .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */
};
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index bf7ce684dd8e..bb4a85445fc6 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -10673,6 +10673,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
vlv_wm_sanitize(dev_priv);
} else if (DISPLAY_VER(dev_priv) >= 9) {
skl_wm_get_hw_state(dev_priv);
+ skl_wm_sanitize(dev_priv);
} else if (HAS_PCH_SPLIT(dev_priv)) {
ilk_wm_get_hw_state(dev_priv);
}
diff --git a/drivers/gpu/drm/i915/display/intel_drrs.c b/drivers/gpu/drm/i915/display/intel_drrs.c
index c1439fcb5a95..3ff149df4a77 100644
--- a/drivers/gpu/drm/i915/display/intel_drrs.c
+++ b/drivers/gpu/drm/i915/display/intel_drrs.c
@@ -405,6 +405,7 @@ intel_drrs_init(struct intel_connector *connector,
struct drm_display_mode *fixed_mode)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_encoder *encoder = connector->encoder;
struct drm_display_mode *downclock_mode = NULL;
INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_drrs_downclock_work);
@@ -416,6 +417,13 @@ intel_drrs_init(struct intel_connector *connector,
return NULL;
}
+ if ((DISPLAY_VER(dev_priv) < 8 && !HAS_GMCH(dev_priv)) &&
+ encoder->port != PORT_A) {
+ drm_dbg_kms(&dev_priv->drm,
+ "DRRS only supported on eDP port A\n");
+ return NULL;
+ }
+
if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
drm_dbg_kms(&dev_priv->drm, "VBT doesn't support DRRS\n");
return NULL;
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c
index 160fd2bdafe5..957feeccff3f 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.c
+++ b/drivers/gpu/drm/i915/display/intel_fbc.c
@@ -1115,7 +1115,8 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state,
/* Wa_22010751166: icl, ehl, tgl, dg1, rkl */
if (DISPLAY_VER(i915) >= 11 &&
- (plane_state->view.color_plane[0].y + drm_rect_height(&plane_state->uapi.src)) & 3) {
+ (plane_state->view.color_plane[0].y +
+ (drm_rect_height(&plane_state->uapi.src) >> 16)) & 3) {
plane_state->no_fbc_reason = "plane end Y offset misaligned";
return false;
}
diff --git a/drivers/gpu/drm/i915/display/intel_opregion.c b/drivers/gpu/drm/i915/display/intel_opregion.c
index 0065111593a6..4a2662838cd8 100644
--- a/drivers/gpu/drm/i915/display/intel_opregion.c
+++ b/drivers/gpu/drm/i915/display/intel_opregion.c
@@ -360,6 +360,21 @@ int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
port++;
}
+ /*
+ * The port numbering and mapping here is bizarre. The now-obsolete
+ * swsci spec supports ports numbered [0..4]. Port E is handled as a
+ * special case, but port F and beyond are not. The functionality is
+ * supposed to be obsolete for new platforms. Just bail out if the port
+ * number is out of bounds after mapping.
+ */
+ if (port > 4) {
+ drm_dbg_kms(&dev_priv->drm,
+ "[ENCODER:%d:%s] port %c (index %u) out of bounds for display power state notification\n",
+ intel_encoder->base.base.id, intel_encoder->base.name,
+ port_name(intel_encoder->port), port);
+ return -EINVAL;
+ }
+
if (!enable)
parm |= 4 << 8;
diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c
index 1a376e9a1ff3..d610e48cab94 100644
--- a/drivers/gpu/drm/i915/display/intel_overlay.c
+++ b/drivers/gpu/drm/i915/display/intel_overlay.c
@@ -959,6 +959,9 @@ static int check_overlay_dst(struct intel_overlay *overlay,
const struct intel_crtc_state *pipe_config =
overlay->crtc->config;
+ if (rec->dst_height == 0 || rec->dst_width == 0)
+ return -EINVAL;
+
if (rec->dst_x < pipe_config->pipe_src_w &&
rec->dst_x + rec->dst_width <= pipe_config->pipe_src_w &&
rec->dst_y < pipe_config->pipe_src_h &&
diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c
index 40faa18947c9..dbd7d0d83a14 100644
--- a/drivers/gpu/drm/i915/display/intel_tc.c
+++ b/drivers/gpu/drm/i915/display/intel_tc.c
@@ -345,10 +345,11 @@ static bool icl_tc_phy_status_complete(struct intel_digital_port *dig_port)
static bool adl_tc_phy_status_complete(struct intel_digital_port *dig_port)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port);
struct intel_uncore *uncore = &i915->uncore;
u32 val;
- val = intel_uncore_read(uncore, TCSS_DDI_STATUS(dig_port->tc_phy_fia_idx));
+ val = intel_uncore_read(uncore, TCSS_DDI_STATUS(tc_port));
if (val == 0xffffffff) {
drm_dbg_kms(&i915->drm,
"Port %s: PHY in TCCOLD, assuming not complete\n",
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 3a5b247be738..1736efa43339 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -2505,9 +2505,14 @@ static int eb_pin_timeline(struct i915_execbuffer *eb, struct intel_context *ce,
timeout) < 0) {
i915_request_put(rq);
- tl = intel_context_timeline_lock(ce);
+ /*
+ * Error path, cannot use intel_context_timeline_lock as
+ * that is user interruptable and this clean up step
+ * must be done.
+ */
+ mutex_lock(&ce->timeline->mutex);
intel_context_exit(ce);
- intel_context_timeline_unlock(tl);
+ mutex_unlock(&ce->timeline->mutex);
if (nonblock)
return -EWOULDBLOCK;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index aaf970c37aa2..1478c02a82cb 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -538,6 +538,9 @@ void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj)
{
struct i915_mmap_offset *mmo, *mn;
+ if (obj->ops->unmap_virtual)
+ obj->ops->unmap_virtual(obj);
+
spin_lock(&obj->mmo.lock);
rbtree_postorder_for_each_entry_safe(mmo, mn,
&obj->mmo.offsets, offset) {
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index f9f7e44099fe..0dd107dcecc2 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -67,6 +67,7 @@ struct drm_i915_gem_object_ops {
int (*pwrite)(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_pwrite *arg);
u64 (*mmap_offset)(struct drm_i915_gem_object *obj);
+ void (*unmap_virtual)(struct drm_i915_gem_object *obj);
int (*dmabuf_export)(struct drm_i915_gem_object *obj);
@@ -310,6 +311,7 @@ struct drm_i915_gem_object {
#define I915_BO_READONLY BIT(6)
#define I915_TILING_QUIRK_BIT 7 /* unknown swizzling; do not release! */
#define I915_BO_PROTECTED BIT(8)
+#define I915_BO_WAS_BOUND_BIT 9
/**
* @mem_flags - Mutable placement-related flags
*
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index 89b70f5cde7a..a50f884973bc 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -10,6 +10,8 @@
#include "i915_gem_lmem.h"
#include "i915_gem_mman.h"
+#include "gt/intel_gt.h"
+
void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages,
unsigned int sg_page_sizes)
@@ -161,7 +163,6 @@ retry:
/* Immediately discard the backing storage */
int i915_gem_object_truncate(struct drm_i915_gem_object *obj)
{
- drm_gem_free_mmap_offset(&obj->base);
if (obj->ops->truncate)
return obj->ops->truncate(obj);
@@ -222,6 +223,14 @@ __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
__i915_gem_object_reset_page_iter(obj);
obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;
+ if (test_and_clear_bit(I915_BO_WAS_BOUND_BIT, &obj->flags)) {
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ intel_wakeref_t wakeref;
+
+ with_intel_runtime_pm_if_active(&i915->runtime_pm, wakeref)
+ intel_gt_invalidate_tlbs(to_gt(i915));
+ }
+
return pages;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
index 923cc7ad8d70..1f880c8c66e7 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
@@ -556,6 +556,20 @@ i915_ttm_resource_get_st(struct drm_i915_gem_object *obj,
return intel_region_ttm_resource_to_rsgt(obj->mm.region, res);
}
+static int i915_ttm_truncate(struct drm_i915_gem_object *obj)
+{
+ struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
+ int err;
+
+ WARN_ON_ONCE(obj->mm.madv == I915_MADV_WILLNEED);
+
+ err = i915_ttm_move_notify(bo);
+ if (err)
+ return err;
+
+ return i915_ttm_purge(obj);
+}
+
static void i915_ttm_swap_notify(struct ttm_buffer_object *bo)
{
struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
@@ -828,11 +842,9 @@ void i915_ttm_adjust_lru(struct drm_i915_gem_object *obj)
} else if (obj->mm.madv != I915_MADV_WILLNEED) {
bo->priority = I915_TTM_PRIO_PURGE;
} else if (!i915_gem_object_has_pages(obj)) {
- if (bo->priority < I915_TTM_PRIO_HAS_PAGES)
- bo->priority = I915_TTM_PRIO_HAS_PAGES;
+ bo->priority = I915_TTM_PRIO_NO_PAGES;
} else {
- if (bo->priority > I915_TTM_PRIO_NO_PAGES)
- bo->priority = I915_TTM_PRIO_NO_PAGES;
+ bo->priority = I915_TTM_PRIO_HAS_PAGES;
}
ttm_bo_move_to_lru_tail(bo, bo->resource, NULL);
@@ -883,6 +895,11 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
if (ret)
return ret;
+ if (obj->mm.madv != I915_MADV_WILLNEED) {
+ dma_resv_unlock(bo->base.resv);
+ return VM_FAULT_SIGBUS;
+ }
+
if (drm_dev_enter(dev, &idx)) {
ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
TTM_BO_VM_NUM_PREFAULT);
@@ -945,6 +962,11 @@ static u64 i915_ttm_mmap_offset(struct drm_i915_gem_object *obj)
return drm_vma_node_offset_addr(&obj->base.vma_node);
}
+static void i915_ttm_unmap_virtual(struct drm_i915_gem_object *obj)
+{
+ ttm_bo_unmap_virtual(i915_gem_to_ttm(obj));
+}
+
static const struct drm_i915_gem_object_ops i915_gem_ttm_obj_ops = {
.name = "i915_gem_object_ttm",
.flags = I915_GEM_OBJECT_IS_SHRINKABLE |
@@ -952,7 +974,7 @@ static const struct drm_i915_gem_object_ops i915_gem_ttm_obj_ops = {
.get_pages = i915_ttm_get_pages,
.put_pages = i915_ttm_put_pages,
- .truncate = i915_ttm_purge,
+ .truncate = i915_ttm_truncate,
.shrinker_release_pages = i915_ttm_shrinker_release_pages,
.adjust_lru = i915_ttm_adjust_lru,
@@ -960,6 +982,7 @@ static const struct drm_i915_gem_object_ops i915_gem_ttm_obj_ops = {
.migrate = i915_ttm_migrate,
.mmap_offset = i915_ttm_mmap_offset,
+ .unmap_virtual = i915_ttm_unmap_virtual,
.mmap_ops = &vm_ops_ttm,
};
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
index ee9612a3ee5e..e130c820ae4e 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
@@ -427,11 +427,17 @@ __i915_ttm_move(struct ttm_buffer_object *bo,
if (!IS_ERR(fence))
goto out;
- } else if (move_deps) {
- int err = i915_deps_sync(move_deps, ctx);
+ } else {
+ int err = PTR_ERR(fence);
+
+ if (err == -EINTR || err == -ERESTARTSYS || err == -EAGAIN)
+ return fence;
- if (err)
- return ERR_PTR(err);
+ if (move_deps) {
+ err = i915_deps_sync(move_deps, ctx);
+ if (err)
+ return ERR_PTR(err);
+ }
}
/* Error intercept failed or no accelerated migration to start with */
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
index 743e6ab2c40b..c6291429b00c 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
@@ -1368,20 +1368,10 @@ static int __igt_mmap_revoke(struct drm_i915_private *i915,
}
}
- if (!obj->ops->mmap_ops) {
- err = check_absent(addr, obj->base.size);
- if (err) {
- pr_err("%s: was not absent\n", obj->mm.region->name);
- goto out_unmap;
- }
- } else {
- /* ttm allows access to evicted regions by design */
-
- err = check_present(addr, obj->base.size);
- if (err) {
- pr_err("%s: was not present\n", obj->mm.region->name);
- goto out_unmap;
- }
+ err = check_absent(addr, obj->base.size);
+ if (err) {
+ pr_err("%s: was not absent\n", obj->mm.region->name);
+ goto out_unmap;
}
out_unmap:
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
index f98f0fb21efb..35d0fcd3a86c 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -29,6 +29,8 @@ void __intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915)
{
spin_lock_init(&gt->irq_lock);
+ mutex_init(&gt->tlb_invalidate_lock);
+
INIT_LIST_HEAD(&gt->closed_vma);
spin_lock_init(&gt->closed_lock);
@@ -912,3 +914,109 @@ void intel_gt_info_print(const struct intel_gt_info *info,
intel_sseu_dump(&info->sseu, p);
}
+
+struct reg_and_bit {
+ i915_reg_t reg;
+ u32 bit;
+};
+
+static struct reg_and_bit
+get_reg_and_bit(const struct intel_engine_cs *engine, const bool gen8,
+ const i915_reg_t *regs, const unsigned int num)
+{
+ const unsigned int class = engine->class;
+ struct reg_and_bit rb = { };
+
+ if (drm_WARN_ON_ONCE(&engine->i915->drm,
+ class >= num || !regs[class].reg))
+ return rb;
+
+ rb.reg = regs[class];
+ if (gen8 && class == VIDEO_DECODE_CLASS)
+ rb.reg.reg += 4 * engine->instance; /* GEN8_M2TCR */
+ else
+ rb.bit = engine->instance;
+
+ rb.bit = BIT(rb.bit);
+
+ return rb;
+}
+
+void intel_gt_invalidate_tlbs(struct intel_gt *gt)
+{
+ static const i915_reg_t gen8_regs[] = {
+ [RENDER_CLASS] = GEN8_RTCR,
+ [VIDEO_DECODE_CLASS] = GEN8_M1TCR, /* , GEN8_M2TCR */
+ [VIDEO_ENHANCEMENT_CLASS] = GEN8_VTCR,
+ [COPY_ENGINE_CLASS] = GEN8_BTCR,
+ };
+ static const i915_reg_t gen12_regs[] = {
+ [RENDER_CLASS] = GEN12_GFX_TLB_INV_CR,
+ [VIDEO_DECODE_CLASS] = GEN12_VD_TLB_INV_CR,
+ [VIDEO_ENHANCEMENT_CLASS] = GEN12_VE_TLB_INV_CR,
+ [COPY_ENGINE_CLASS] = GEN12_BLT_TLB_INV_CR,
+ };
+ struct drm_i915_private *i915 = gt->i915;
+ struct intel_uncore *uncore = gt->uncore;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ const i915_reg_t *regs;
+ unsigned int num = 0;
+
+ if (I915_SELFTEST_ONLY(gt->awake == -ENODEV))
+ return;
+
+ if (GRAPHICS_VER(i915) == 12) {
+ regs = gen12_regs;
+ num = ARRAY_SIZE(gen12_regs);
+ } else if (GRAPHICS_VER(i915) >= 8 && GRAPHICS_VER(i915) <= 11) {
+ regs = gen8_regs;
+ num = ARRAY_SIZE(gen8_regs);
+ } else if (GRAPHICS_VER(i915) < 8) {
+ return;
+ }
+
+ if (drm_WARN_ONCE(&i915->drm, !num,
+ "Platform does not implement TLB invalidation!"))
+ return;
+
+ GEM_TRACE("\n");
+
+ assert_rpm_wakelock_held(&i915->runtime_pm);
+
+ mutex_lock(&gt->tlb_invalidate_lock);
+ intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
+
+ for_each_engine(engine, gt, id) {
+ /*
+ * HW architecture suggest typical invalidation time at 40us,
+ * with pessimistic cases up to 100us and a recommendation to
+ * cap at 1ms. We go a bit higher just in case.
+ */
+ const unsigned int timeout_us = 100;
+ const unsigned int timeout_ms = 4;
+ struct reg_and_bit rb;
+
+ rb = get_reg_and_bit(engine, regs == gen8_regs, regs, num);
+ if (!i915_mmio_reg_offset(rb.reg))
+ continue;
+
+ intel_uncore_write_fw(uncore, rb.reg, rb.bit);
+ if (__intel_wait_for_register_fw(uncore,
+ rb.reg, rb.bit, 0,
+ timeout_us, timeout_ms,
+ NULL))
+ drm_err_ratelimited(&gt->i915->drm,
+ "%s TLB invalidation did not complete in %ums!\n",
+ engine->name, timeout_ms);
+ }
+
+ /*
+ * Use delayed put since a) we mostly expect a flurry of TLB
+ * invalidations so it is good to avoid paying the forcewake cost and
+ * b) it works around a bug in Icelake which cannot cope with too rapid
+ * transitions.
+ */
+ intel_uncore_forcewake_put_delayed(uncore, FORCEWAKE_ALL);
+ mutex_unlock(&gt->tlb_invalidate_lock);
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.h b/drivers/gpu/drm/i915/gt/intel_gt.h
index 3ace129eb2af..a913fb6ffec3 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt.h
@@ -91,4 +91,6 @@ void intel_gt_info_print(const struct intel_gt_info *info,
void intel_gt_watchdog_work(struct work_struct *work);
+void intel_gt_invalidate_tlbs(struct intel_gt *gt);
+
#endif /* __INTEL_GT_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h
index 14216cc471b1..f20687796490 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h
@@ -73,6 +73,8 @@ struct intel_gt {
struct intel_uc uc;
+ struct mutex tlb_invalidate_lock;
+
struct i915_wa_list wa_list;
struct intel_gt_timelines {
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
index f9240d4baa69..3aabe164c329 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
@@ -206,6 +206,11 @@ struct intel_guc {
* context usage for overflows.
*/
struct delayed_work work;
+
+ /**
+ * @shift: Right shift value for the gpm timestamp
+ */
+ u32 shift;
} timestamp;
#ifdef CONFIG_DRM_I915_SELFTEST
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index e7517206af82..154ad726e266 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -1113,6 +1113,19 @@ __extend_last_switch(struct intel_guc *guc, u64 *prev_start, u32 new_start)
if (new_start == lower_32_bits(*prev_start))
return;
+ /*
+ * When gt is unparked, we update the gt timestamp and start the ping
+ * worker that updates the gt_stamp every POLL_TIME_CLKS. As long as gt
+ * is unparked, all switched in contexts will have a start time that is
+ * within +/- POLL_TIME_CLKS of the most recent gt_stamp.
+ *
+ * If neither gt_stamp nor new_start has rolled over, then the
+ * gt_stamp_hi does not need to be adjusted, however if one of them has
+ * rolled over, we need to adjust gt_stamp_hi accordingly.
+ *
+ * The below conditions address the cases of new_start rollover and
+ * gt_stamp_last rollover respectively.
+ */
if (new_start < gt_stamp_last &&
(new_start - gt_stamp_last) <= POLL_TIME_CLKS)
gt_stamp_hi++;
@@ -1124,17 +1137,45 @@ __extend_last_switch(struct intel_guc *guc, u64 *prev_start, u32 new_start)
*prev_start = ((u64)gt_stamp_hi << 32) | new_start;
}
-static void guc_update_engine_gt_clks(struct intel_engine_cs *engine)
+/*
+ * GuC updates shared memory and KMD reads it. Since this is not synchronized,
+ * we run into a race where the value read is inconsistent. Sometimes the
+ * inconsistency is in reading the upper MSB bytes of the last_in value when
+ * this race occurs. 2 types of cases are seen - upper 8 bits are zero and upper
+ * 24 bits are zero. Since these are non-zero values, it is non-trivial to
+ * determine validity of these values. Instead we read the values multiple times
+ * until they are consistent. In test runs, 3 attempts results in consistent
+ * values. The upper bound is set to 6 attempts and may need to be tuned as per
+ * any new occurences.
+ */
+static void __get_engine_usage_record(struct intel_engine_cs *engine,
+ u32 *last_in, u32 *id, u32 *total)
{
struct guc_engine_usage_record *rec = intel_guc_engine_usage(engine);
+ int i = 0;
+
+ do {
+ *last_in = READ_ONCE(rec->last_switch_in_stamp);
+ *id = READ_ONCE(rec->current_context_index);
+ *total = READ_ONCE(rec->total_runtime);
+
+ if (READ_ONCE(rec->last_switch_in_stamp) == *last_in &&
+ READ_ONCE(rec->current_context_index) == *id &&
+ READ_ONCE(rec->total_runtime) == *total)
+ break;
+ } while (++i < 6);
+}
+
+static void guc_update_engine_gt_clks(struct intel_engine_cs *engine)
+{
struct intel_engine_guc_stats *stats = &engine->stats.guc;
struct intel_guc *guc = &engine->gt->uc.guc;
- u32 last_switch = rec->last_switch_in_stamp;
- u32 ctx_id = rec->current_context_index;
- u32 total = rec->total_runtime;
+ u32 last_switch, ctx_id, total;
lockdep_assert_held(&guc->timestamp.lock);
+ __get_engine_usage_record(engine, &last_switch, &ctx_id, &total);
+
stats->running = ctx_id != ~0U && last_switch;
if (stats->running)
__extend_last_switch(guc, &stats->start_gt_clk, last_switch);
@@ -1149,23 +1190,51 @@ static void guc_update_engine_gt_clks(struct intel_engine_cs *engine)
}
}
-static void guc_update_pm_timestamp(struct intel_guc *guc,
- struct intel_engine_cs *engine,
- ktime_t *now)
+static u32 gpm_timestamp_shift(struct intel_gt *gt)
+{
+ intel_wakeref_t wakeref;
+ u32 reg, shift;
+
+ with_intel_runtime_pm(gt->uncore->rpm, wakeref)
+ reg = intel_uncore_read(gt->uncore, RPM_CONFIG0);
+
+ shift = (reg & GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK) >>
+ GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT;
+
+ return 3 - shift;
+}
+
+static u64 gpm_timestamp(struct intel_gt *gt)
+{
+ u32 lo, hi, old_hi, loop = 0;
+
+ hi = intel_uncore_read(gt->uncore, MISC_STATUS1);
+ do {
+ lo = intel_uncore_read(gt->uncore, MISC_STATUS0);
+ old_hi = hi;
+ hi = intel_uncore_read(gt->uncore, MISC_STATUS1);
+ } while (old_hi != hi && loop++ < 2);
+
+ return ((u64)hi << 32) | lo;
+}
+
+static void guc_update_pm_timestamp(struct intel_guc *guc, ktime_t *now)
{
- u32 gt_stamp_now, gt_stamp_hi;
+ struct intel_gt *gt = guc_to_gt(guc);
+ u32 gt_stamp_lo, gt_stamp_hi;
+ u64 gpm_ts;
lockdep_assert_held(&guc->timestamp.lock);
gt_stamp_hi = upper_32_bits(guc->timestamp.gt_stamp);
- gt_stamp_now = intel_uncore_read(engine->uncore,
- RING_TIMESTAMP(engine->mmio_base));
+ gpm_ts = gpm_timestamp(gt) >> guc->timestamp.shift;
+ gt_stamp_lo = lower_32_bits(gpm_ts);
*now = ktime_get();
- if (gt_stamp_now < lower_32_bits(guc->timestamp.gt_stamp))
+ if (gt_stamp_lo < lower_32_bits(guc->timestamp.gt_stamp))
gt_stamp_hi++;
- guc->timestamp.gt_stamp = ((u64)gt_stamp_hi << 32) | gt_stamp_now;
+ guc->timestamp.gt_stamp = ((u64)gt_stamp_hi << 32) | gt_stamp_lo;
}
/*
@@ -1208,8 +1277,12 @@ static ktime_t guc_engine_busyness(struct intel_engine_cs *engine, ktime_t *now)
if (!in_reset && intel_gt_pm_get_if_awake(gt)) {
stats_saved = *stats;
gt_stamp_saved = guc->timestamp.gt_stamp;
+ /*
+ * Update gt_clks, then gt timestamp to simplify the 'gt_stamp -
+ * start_gt_clk' calculation below for active engines.
+ */
guc_update_engine_gt_clks(engine);
- guc_update_pm_timestamp(guc, engine, now);
+ guc_update_pm_timestamp(guc, now);
intel_gt_pm_put_async(gt);
if (i915_reset_count(gpu_error) != reset_count) {
*stats = stats_saved;
@@ -1241,8 +1314,8 @@ static void __reset_guc_busyness_stats(struct intel_guc *guc)
spin_lock_irqsave(&guc->timestamp.lock, flags);
+ guc_update_pm_timestamp(guc, &unused);
for_each_engine(engine, gt, id) {
- guc_update_pm_timestamp(guc, engine, &unused);
guc_update_engine_gt_clks(engine);
engine->stats.guc.prev_total = 0;
}
@@ -1259,10 +1332,11 @@ static void __update_guc_busyness_stats(struct intel_guc *guc)
ktime_t unused;
spin_lock_irqsave(&guc->timestamp.lock, flags);
- for_each_engine(engine, gt, id) {
- guc_update_pm_timestamp(guc, engine, &unused);
+
+ guc_update_pm_timestamp(guc, &unused);
+ for_each_engine(engine, gt, id)
guc_update_engine_gt_clks(engine);
- }
+
spin_unlock_irqrestore(&guc->timestamp.lock, flags);
}
@@ -1335,10 +1409,15 @@ void intel_guc_busyness_park(struct intel_gt *gt)
void intel_guc_busyness_unpark(struct intel_gt *gt)
{
struct intel_guc *guc = &gt->uc.guc;
+ unsigned long flags;
+ ktime_t unused;
if (!guc_submission_initialized(guc))
return;
+ spin_lock_irqsave(&guc->timestamp.lock, flags);
+ guc_update_pm_timestamp(guc, &unused);
+ spin_unlock_irqrestore(&guc->timestamp.lock, flags);
mod_delayed_work(system_highpri_wq, &guc->timestamp.work,
guc->timestamp.ping_delay);
}
@@ -1783,6 +1862,7 @@ int intel_guc_submission_init(struct intel_guc *guc)
spin_lock_init(&guc->timestamp.lock);
INIT_DELAYED_WORK(&guc->timestamp.work, guc_timestamp_ping);
guc->timestamp.ping_delay = (POLL_TIME_CLKS / gt->clock_frequency + 1) * HZ;
+ guc->timestamp.shift = gpm_timestamp_shift(gt);
return 0;
}
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 99d1781fa5f0..af79b39048f7 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -1148,7 +1148,7 @@ static inline void ppgtt_generate_shadow_entry(struct intel_gvt_gtt_entry *se,
ops->set_pfn(se, s->shadow_page.mfn);
}
-/**
+/*
* Check if can do 2M page
* @vgpu: target vgpu
* @entry: target pfn's gtt entry
@@ -2193,7 +2193,7 @@ static int emulate_ggtt_mmio_read(struct intel_vgpu *vgpu,
}
/**
- * intel_vgpu_emulate_gtt_mmio_read - emulate GTT MMIO register read
+ * intel_vgpu_emulate_ggtt_mmio_read - emulate GTT MMIO register read
* @vgpu: a vGPU
* @off: register offset
* @p_data: data will be returned to guest
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 5ae812d60abe..0633888a411e 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -1522,7 +1522,7 @@ capture_engine(struct intel_engine_cs *engine,
struct i915_request *rq = NULL;
unsigned long flags;
- ee = intel_engine_coredump_alloc(engine, GFP_KERNEL);
+ ee = intel_engine_coredump_alloc(engine, ALLOW_FAIL);
if (!ee)
return NULL;
diff --git a/drivers/gpu/drm/i915/i915_mm.h b/drivers/gpu/drm/i915/i915_mm.h
index 76f1d53bdf34..3ad22bbe80eb 100644
--- a/drivers/gpu/drm/i915/i915_mm.h
+++ b/drivers/gpu/drm/i915/i915_mm.h
@@ -6,6 +6,7 @@
#ifndef __I915_MM_H__
#define __I915_MM_H__
+#include <linux/bug.h>
#include <linux/types.h>
struct vm_area_struct;
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 170bba913c30..e27f3b7cf094 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -4273,26 +4273,6 @@ static struct ctl_table oa_table[] = {
{}
};
-static struct ctl_table i915_root[] = {
- {
- .procname = "i915",
- .maxlen = 0,
- .mode = 0555,
- .child = oa_table,
- },
- {}
-};
-
-static struct ctl_table dev_root[] = {
- {
- .procname = "dev",
- .maxlen = 0,
- .mode = 0555,
- .child = i915_root,
- },
- {}
-};
-
static void oa_init_supported_formats(struct i915_perf *perf)
{
struct drm_i915_private *i915 = perf->i915;
@@ -4488,7 +4468,7 @@ static int destroy_config(int id, void *p, void *data)
int i915_perf_sysctl_register(void)
{
- sysctl_header = register_sysctl_table(dev_root);
+ sysctl_header = register_sysctl("dev/i915", oa_table);
return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 4c28dadf8d69..c2bb33febb68 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -2684,7 +2684,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define RING_WAIT (1 << 11) /* gen3+, PRBx_CTL */
#define RING_WAIT_SEMAPHORE (1 << 10) /* gen6+ */
-#define GUCPMTIMESTAMP _MMIO(0xC3E8)
+#define MISC_STATUS0 _MMIO(0xA500)
+#define MISC_STATUS1 _MMIO(0xA504)
/* There are 16 64-bit CS General Purpose Registers per-engine on Gen8+ */
#define GEN8_RING_CS_GPR(base, n) _MMIO((base) + 0x600 + (n) * 8)
@@ -2721,6 +2722,12 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING (1 << 28)
#define GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT (1 << 24)
+#define GEN8_RTCR _MMIO(0x4260)
+#define GEN8_M1TCR _MMIO(0x4264)
+#define GEN8_M2TCR _MMIO(0x4268)
+#define GEN8_BTCR _MMIO(0x426c)
+#define GEN8_VTCR _MMIO(0x4270)
+
#if 0
#define PRB0_TAIL _MMIO(0x2030)
#define PRB0_HEAD _MMIO(0x2034)
@@ -2819,6 +2826,11 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define FAULT_VA_HIGH_BITS (0xf << 0)
#define FAULT_GTT_SEL (1 << 4)
+#define GEN12_GFX_TLB_INV_CR _MMIO(0xced8)
+#define GEN12_VD_TLB_INV_CR _MMIO(0xcedc)
+#define GEN12_VE_TLB_INV_CR _MMIO(0xcee0)
+#define GEN12_BLT_TLB_INV_CR _MMIO(0xcee4)
+
#define GEN12_AUX_ERR_DBG _MMIO(0x43f4)
#define FPGA_DBG _MMIO(0x42300)
@@ -11166,8 +11178,12 @@ enum skl_power_gate {
_DKL_PHY2_BASE) + \
_DKL_TX_DPCNTL1)
-#define _DKL_TX_DPCNTL2 0x2C8
-#define DKL_TX_DP20BITMODE (1 << 2)
+#define _DKL_TX_DPCNTL2 0x2C8
+#define DKL_TX_DP20BITMODE REG_BIT(2)
+#define DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX1_MASK REG_GENMASK(4, 3)
+#define DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX1(val) REG_FIELD_PREP(DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX1_MASK, (val))
+#define DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX2_MASK REG_GENMASK(6, 5)
+#define DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX2(val) REG_FIELD_PREP(DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX2_MASK, (val))
#define DKL_TX_DPCNTL2(tc_port) _MMIO(_PORT(tc_port, \
_DKL_PHY1_BASE, \
_DKL_PHY2_BASE) + \
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 29a858c53bdd..c0d6d5526abe 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -457,6 +457,9 @@ int i915_vma_bind(struct i915_vma *vma,
vma->ops->bind_vma(vma->vm, NULL, vma, cache_level, bind_flags);
}
+ if (vma->obj)
+ set_bit(I915_BO_WAS_BOUND_BIT, &vma->obj->flags);
+
atomic_or(bind_flags, &vma->flags);
return 0;
}
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 434b1f8b7fe3..32bc155f5dc0 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -4717,6 +4717,10 @@ static const struct dbuf_slice_conf_entry dg2_allowed_dbufs[] = {
};
static const struct dbuf_slice_conf_entry adlp_allowed_dbufs[] = {
+ /*
+ * Keep the join_mbus cases first so check_mbus_joined()
+ * will prefer them over the !join_mbus cases.
+ */
{
.active_pipes = BIT(PIPE_A),
.dbuf_mask = {
@@ -4732,6 +4736,20 @@ static const struct dbuf_slice_conf_entry adlp_allowed_dbufs[] = {
.join_mbus = true,
},
{
+ .active_pipes = BIT(PIPE_A),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ },
+ .join_mbus = false,
+ },
+ {
+ .active_pipes = BIT(PIPE_B),
+ .dbuf_mask = {
+ [PIPE_B] = BIT(DBUF_S3) | BIT(DBUF_S4),
+ },
+ .join_mbus = false,
+ },
+ {
.active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
.dbuf_mask = {
[PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
@@ -4835,7 +4853,7 @@ static bool check_mbus_joined(u8 active_pipes,
{
int i;
- for (i = 0; i < dbuf_slices[i].active_pipes; i++) {
+ for (i = 0; dbuf_slices[i].active_pipes != 0; i++) {
if (dbuf_slices[i].active_pipes == active_pipes)
return dbuf_slices[i].join_mbus;
}
@@ -4847,13 +4865,14 @@ static bool adlp_check_mbus_joined(u8 active_pipes)
return check_mbus_joined(active_pipes, adlp_allowed_dbufs);
}
-static u8 compute_dbuf_slices(enum pipe pipe, u8 active_pipes,
+static u8 compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus,
const struct dbuf_slice_conf_entry *dbuf_slices)
{
int i;
- for (i = 0; i < dbuf_slices[i].active_pipes; i++) {
- if (dbuf_slices[i].active_pipes == active_pipes)
+ for (i = 0; dbuf_slices[i].active_pipes != 0; i++) {
+ if (dbuf_slices[i].active_pipes == active_pipes &&
+ dbuf_slices[i].join_mbus == join_mbus)
return dbuf_slices[i].dbuf_mask[pipe];
}
return 0;
@@ -4864,7 +4883,7 @@ static u8 compute_dbuf_slices(enum pipe pipe, u8 active_pipes,
* returns correspondent DBuf slice mask as stated in BSpec for particular
* platform.
*/
-static u8 icl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes)
+static u8 icl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus)
{
/*
* FIXME: For ICL this is still a bit unclear as prev BSpec revision
@@ -4878,37 +4897,41 @@ static u8 icl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes)
* still here - we will need it once those additional constraints
* pop up.
*/
- return compute_dbuf_slices(pipe, active_pipes, icl_allowed_dbufs);
+ return compute_dbuf_slices(pipe, active_pipes, join_mbus,
+ icl_allowed_dbufs);
}
-static u8 tgl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes)
+static u8 tgl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus)
{
- return compute_dbuf_slices(pipe, active_pipes, tgl_allowed_dbufs);
+ return compute_dbuf_slices(pipe, active_pipes, join_mbus,
+ tgl_allowed_dbufs);
}
-static u32 adlp_compute_dbuf_slices(enum pipe pipe, u32 active_pipes)
+static u8 adlp_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus)
{
- return compute_dbuf_slices(pipe, active_pipes, adlp_allowed_dbufs);
+ return compute_dbuf_slices(pipe, active_pipes, join_mbus,
+ adlp_allowed_dbufs);
}
-static u32 dg2_compute_dbuf_slices(enum pipe pipe, u32 active_pipes)
+static u8 dg2_compute_dbuf_slices(enum pipe pipe, u8 active_pipes, bool join_mbus)
{
- return compute_dbuf_slices(pipe, active_pipes, dg2_allowed_dbufs);
+ return compute_dbuf_slices(pipe, active_pipes, join_mbus,
+ dg2_allowed_dbufs);
}
-static u8 skl_compute_dbuf_slices(struct intel_crtc *crtc, u8 active_pipes)
+static u8 skl_compute_dbuf_slices(struct intel_crtc *crtc, u8 active_pipes, bool join_mbus)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
if (IS_DG2(dev_priv))
- return dg2_compute_dbuf_slices(pipe, active_pipes);
+ return dg2_compute_dbuf_slices(pipe, active_pipes, join_mbus);
else if (IS_ALDERLAKE_P(dev_priv))
- return adlp_compute_dbuf_slices(pipe, active_pipes);
+ return adlp_compute_dbuf_slices(pipe, active_pipes, join_mbus);
else if (DISPLAY_VER(dev_priv) == 12)
- return tgl_compute_dbuf_slices(pipe, active_pipes);
+ return tgl_compute_dbuf_slices(pipe, active_pipes, join_mbus);
else if (DISPLAY_VER(dev_priv) == 11)
- return icl_compute_dbuf_slices(pipe, active_pipes);
+ return icl_compute_dbuf_slices(pipe, active_pipes, join_mbus);
/*
* For anything else just return one slice yet.
* Should be extended for other platforms.
@@ -6127,11 +6150,16 @@ skl_compute_ddb(struct intel_atomic_state *state)
return ret;
}
+ if (IS_ALDERLAKE_P(dev_priv))
+ new_dbuf_state->joined_mbus =
+ adlp_check_mbus_joined(new_dbuf_state->active_pipes);
+
for_each_intel_crtc(&dev_priv->drm, crtc) {
enum pipe pipe = crtc->pipe;
new_dbuf_state->slices[pipe] =
- skl_compute_dbuf_slices(crtc, new_dbuf_state->active_pipes);
+ skl_compute_dbuf_slices(crtc, new_dbuf_state->active_pipes,
+ new_dbuf_state->joined_mbus);
if (old_dbuf_state->slices[pipe] == new_dbuf_state->slices[pipe])
continue;
@@ -6143,9 +6171,6 @@ skl_compute_ddb(struct intel_atomic_state *state)
new_dbuf_state->enabled_slices = intel_dbuf_enabled_slices(new_dbuf_state);
- if (IS_ALDERLAKE_P(dev_priv))
- new_dbuf_state->joined_mbus = adlp_check_mbus_joined(new_dbuf_state->active_pipes);
-
if (old_dbuf_state->enabled_slices != new_dbuf_state->enabled_slices ||
old_dbuf_state->joined_mbus != new_dbuf_state->joined_mbus) {
ret = intel_atomic_serialize_global_state(&new_dbuf_state->base);
@@ -6626,6 +6651,7 @@ void skl_wm_get_hw_state(struct drm_i915_private *dev_priv)
enum pipe pipe = crtc->pipe;
unsigned int mbus_offset;
enum plane_id plane_id;
+ u8 slices;
skl_pipe_wm_get_hw_state(crtc, &crtc_state->wm.skl.optimal);
crtc_state->wm.skl.raw = crtc_state->wm.skl.optimal;
@@ -6645,19 +6671,22 @@ void skl_wm_get_hw_state(struct drm_i915_private *dev_priv)
skl_ddb_entry_union(&dbuf_state->ddb[pipe], ddb_uv);
}
- dbuf_state->slices[pipe] =
- skl_compute_dbuf_slices(crtc, dbuf_state->active_pipes);
-
dbuf_state->weight[pipe] = intel_crtc_ddb_weight(crtc_state);
/*
* Used for checking overlaps, so we need absolute
* offsets instead of MBUS relative offsets.
*/
- mbus_offset = mbus_ddb_offset(dev_priv, dbuf_state->slices[pipe]);
+ slices = skl_compute_dbuf_slices(crtc, dbuf_state->active_pipes,
+ dbuf_state->joined_mbus);
+ mbus_offset = mbus_ddb_offset(dev_priv, slices);
crtc_state->wm.skl.ddb.start = mbus_offset + dbuf_state->ddb[pipe].start;
crtc_state->wm.skl.ddb.end = mbus_offset + dbuf_state->ddb[pipe].end;
+ /* The slices actually used by the planes on the pipe */
+ dbuf_state->slices[pipe] =
+ skl_ddb_dbuf_slice_mask(dev_priv, &crtc_state->wm.skl.ddb);
+
drm_dbg_kms(&dev_priv->drm,
"[CRTC:%d:%s] dbuf slices 0x%x, ddb (%d - %d), active pipes 0x%x, mbus joined: %s\n",
crtc->base.base.id, crtc->base.name,
@@ -6669,6 +6698,74 @@ void skl_wm_get_hw_state(struct drm_i915_private *dev_priv)
dbuf_state->enabled_slices = dev_priv->dbuf.enabled_slices;
}
+static bool skl_dbuf_is_misconfigured(struct drm_i915_private *i915)
+{
+ const struct intel_dbuf_state *dbuf_state =
+ to_intel_dbuf_state(i915->dbuf.obj.state);
+ struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
+ struct intel_crtc *crtc;
+
+ for_each_intel_crtc(&i915->drm, crtc) {
+ const struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+
+ entries[crtc->pipe] = crtc_state->wm.skl.ddb;
+ }
+
+ for_each_intel_crtc(&i915->drm, crtc) {
+ const struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ u8 slices;
+
+ slices = skl_compute_dbuf_slices(crtc, dbuf_state->active_pipes,
+ dbuf_state->joined_mbus);
+ if (dbuf_state->slices[crtc->pipe] & ~slices)
+ return true;
+
+ if (skl_ddb_allocation_overlaps(&crtc_state->wm.skl.ddb, entries,
+ I915_MAX_PIPES, crtc->pipe))
+ return true;
+ }
+
+ return false;
+}
+
+void skl_wm_sanitize(struct drm_i915_private *i915)
+{
+ struct intel_crtc *crtc;
+
+ /*
+ * On TGL/RKL (at least) the BIOS likes to assign the planes
+ * to the wrong DBUF slices. This will cause an infinite loop
+ * in skl_commit_modeset_enables() as it can't find a way to
+ * transition between the old bogus DBUF layout to the new
+ * proper DBUF layout without DBUF allocation overlaps between
+ * the planes (which cannot be allowed or else the hardware
+ * may hang). If we detect a bogus DBUF layout just turn off
+ * all the planes so that skl_commit_modeset_enables() can
+ * simply ignore them.
+ */
+ if (!skl_dbuf_is_misconfigured(i915))
+ return;
+
+ drm_dbg_kms(&i915->drm, "BIOS has misprogrammed the DBUF, disabling all planes\n");
+
+ for_each_intel_crtc(&i915->drm, crtc) {
+ struct intel_plane *plane = to_intel_plane(crtc->base.primary);
+ const struct intel_plane_state *plane_state =
+ to_intel_plane_state(plane->base.state);
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+
+ if (plane_state->uapi.visible)
+ intel_plane_disable_noatomic(crtc, plane);
+
+ drm_WARN_ON(&i915->drm, crtc_state->active_planes != 0);
+
+ memset(&crtc_state->wm.skl.ddb, 0, sizeof(crtc_state->wm.skl.ddb));
+ }
+}
+
static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
diff --git a/drivers/gpu/drm/i915/intel_pm.h b/drivers/gpu/drm/i915/intel_pm.h
index 990cdcaf85ce..d2243653a893 100644
--- a/drivers/gpu/drm/i915/intel_pm.h
+++ b/drivers/gpu/drm/i915/intel_pm.h
@@ -47,6 +47,7 @@ void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
struct skl_pipe_wm *out);
void g4x_wm_sanitize(struct drm_i915_private *dev_priv);
void vlv_wm_sanitize(struct drm_i915_private *dev_priv);
+void skl_wm_sanitize(struct drm_i915_private *dev_priv);
bool intel_can_enable_sagv(struct drm_i915_private *dev_priv,
const struct intel_bw_state *bw_state);
void intel_sagv_pre_plane_update(struct intel_atomic_state *state);
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 22dab36afcb6..64c2708efc9e 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -68,6 +68,7 @@ static noinline depot_stack_handle_t __save_depot_stack(void)
static void init_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
{
spin_lock_init(&rpm->debug.lock);
+ stack_depot_init();
}
static noinline depot_stack_handle_t
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index fc25ebf1a593..778da3179b3c 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -724,7 +724,8 @@ void intel_uncore_forcewake_get__locked(struct intel_uncore *uncore,
}
static void __intel_uncore_forcewake_put(struct intel_uncore *uncore,
- enum forcewake_domains fw_domains)
+ enum forcewake_domains fw_domains,
+ bool delayed)
{
struct intel_uncore_forcewake_domain *domain;
unsigned int tmp;
@@ -739,7 +740,11 @@ static void __intel_uncore_forcewake_put(struct intel_uncore *uncore,
continue;
}
- fw_domains_put(uncore, domain->mask);
+ if (delayed &&
+ !(domain->uncore->fw_domains_timer & domain->mask))
+ fw_domain_arm_timer(domain);
+ else
+ fw_domains_put(uncore, domain->mask);
}
}
@@ -760,7 +765,20 @@ void intel_uncore_forcewake_put(struct intel_uncore *uncore,
return;
spin_lock_irqsave(&uncore->lock, irqflags);
- __intel_uncore_forcewake_put(uncore, fw_domains);
+ __intel_uncore_forcewake_put(uncore, fw_domains, false);
+ spin_unlock_irqrestore(&uncore->lock, irqflags);
+}
+
+void intel_uncore_forcewake_put_delayed(struct intel_uncore *uncore,
+ enum forcewake_domains fw_domains)
+{
+ unsigned long irqflags;
+
+ if (!uncore->fw_get_funcs)
+ return;
+
+ spin_lock_irqsave(&uncore->lock, irqflags);
+ __intel_uncore_forcewake_put(uncore, fw_domains, true);
spin_unlock_irqrestore(&uncore->lock, irqflags);
}
@@ -802,7 +820,7 @@ void intel_uncore_forcewake_put__locked(struct intel_uncore *uncore,
if (!uncore->fw_get_funcs)
return;
- __intel_uncore_forcewake_put(uncore, fw_domains);
+ __intel_uncore_forcewake_put(uncore, fw_domains, false);
}
void assert_forcewakes_inactive(struct intel_uncore *uncore)
diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h
index 210fe2a71612..2a15b2b2e2fc 100644
--- a/drivers/gpu/drm/i915/intel_uncore.h
+++ b/drivers/gpu/drm/i915/intel_uncore.h
@@ -246,6 +246,8 @@ void intel_uncore_forcewake_get(struct intel_uncore *uncore,
enum forcewake_domains domains);
void intel_uncore_forcewake_put(struct intel_uncore *uncore,
enum forcewake_domains domains);
+void intel_uncore_forcewake_put_delayed(struct intel_uncore *uncore,
+ enum forcewake_domains domains);
void intel_uncore_forcewake_flush(struct intel_uncore *uncore,
enum forcewake_domains fw_domains);
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c b/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c
index 195b2323ec00..4b6f5655fab5 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c
@@ -107,9 +107,12 @@ static int i915_pxp_tee_component_bind(struct device *i915_kdev,
static void i915_pxp_tee_component_unbind(struct device *i915_kdev,
struct device *tee_kdev, void *data)
{
+ struct drm_i915_private *i915 = kdev_to_i915(i915_kdev);
struct intel_pxp *pxp = i915_dev_to_pxp(i915_kdev);
+ intel_wakeref_t wakeref;
- intel_pxp_fini_hw(pxp);
+ with_intel_runtime_pm_if_in_use(&i915->runtime_pm, wakeref)
+ intel_pxp_fini_hw(pxp);
mutex_lock(&pxp->tee_mutex);
pxp->pxp_component = NULL;
diff --git a/drivers/gpu/drm/kmb/kmb_plane.c b/drivers/gpu/drm/kmb/kmb_plane.c
index 00404ba4126d..2735b8eb3537 100644
--- a/drivers/gpu/drm/kmb/kmb_plane.c
+++ b/drivers/gpu/drm/kmb/kmb_plane.c
@@ -158,12 +158,6 @@ static void kmb_plane_atomic_disable(struct drm_plane *plane,
case LAYER_1:
kmb->plane_status[plane_id].ctrl = LCD_CTRL_VL2_ENABLE;
break;
- case LAYER_2:
- kmb->plane_status[plane_id].ctrl = LCD_CTRL_GL1_ENABLE;
- break;
- case LAYER_3:
- kmb->plane_status[plane_id].ctrl = LCD_CTRL_GL2_ENABLE;
- break;
}
kmb->plane_status[plane_id].disable = true;
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index 5d90d2eb0019..bced4c7d668e 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -786,18 +786,101 @@ void mtk_dsi_ddp_stop(struct device *dev)
mtk_dsi_poweroff(dsi);
}
+static int mtk_dsi_encoder_init(struct drm_device *drm, struct mtk_dsi *dsi)
+{
+ int ret;
+
+ ret = drm_simple_encoder_init(drm, &dsi->encoder,
+ DRM_MODE_ENCODER_DSI);
+ if (ret) {
+ DRM_ERROR("Failed to encoder init to drm\n");
+ return ret;
+ }
+
+ dsi->encoder.possible_crtcs = mtk_drm_find_possible_crtc_by_comp(drm, dsi->host.dev);
+
+ ret = drm_bridge_attach(&dsi->encoder, &dsi->bridge, NULL,
+ DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+ if (ret)
+ goto err_cleanup_encoder;
+
+ dsi->connector = drm_bridge_connector_init(drm, &dsi->encoder);
+ if (IS_ERR(dsi->connector)) {
+ DRM_ERROR("Unable to create bridge connector\n");
+ ret = PTR_ERR(dsi->connector);
+ goto err_cleanup_encoder;
+ }
+ drm_connector_attach_encoder(dsi->connector, &dsi->encoder);
+
+ return 0;
+
+err_cleanup_encoder:
+ drm_encoder_cleanup(&dsi->encoder);
+ return ret;
+}
+
+static int mtk_dsi_bind(struct device *dev, struct device *master, void *data)
+{
+ int ret;
+ struct drm_device *drm = data;
+ struct mtk_dsi *dsi = dev_get_drvdata(dev);
+
+ ret = mtk_dsi_encoder_init(drm, dsi);
+ if (ret)
+ return ret;
+
+ return device_reset_optional(dev);
+}
+
+static void mtk_dsi_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct mtk_dsi *dsi = dev_get_drvdata(dev);
+
+ drm_encoder_cleanup(&dsi->encoder);
+}
+
+static const struct component_ops mtk_dsi_component_ops = {
+ .bind = mtk_dsi_bind,
+ .unbind = mtk_dsi_unbind,
+};
+
static int mtk_dsi_host_attach(struct mipi_dsi_host *host,
struct mipi_dsi_device *device)
{
struct mtk_dsi *dsi = host_to_dsi(host);
+ struct device *dev = host->dev;
+ int ret;
dsi->lanes = device->lanes;
dsi->format = device->format;
dsi->mode_flags = device->mode_flags;
+ dsi->next_bridge = devm_drm_of_get_bridge(dev, dev->of_node, 0, 0);
+ if (IS_ERR(dsi->next_bridge))
+ return PTR_ERR(dsi->next_bridge);
+
+ drm_bridge_add(&dsi->bridge);
+
+ ret = component_add(host->dev, &mtk_dsi_component_ops);
+ if (ret) {
+ DRM_ERROR("failed to add dsi_host component: %d\n", ret);
+ drm_bridge_remove(&dsi->bridge);
+ return ret;
+ }
return 0;
}
+static int mtk_dsi_host_detach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *device)
+{
+ struct mtk_dsi *dsi = host_to_dsi(host);
+
+ component_del(host->dev, &mtk_dsi_component_ops);
+ drm_bridge_remove(&dsi->bridge);
+ return 0;
+}
+
static void mtk_dsi_wait_for_idle(struct mtk_dsi *dsi)
{
int ret;
@@ -938,73 +1021,14 @@ static ssize_t mtk_dsi_host_transfer(struct mipi_dsi_host *host,
static const struct mipi_dsi_host_ops mtk_dsi_ops = {
.attach = mtk_dsi_host_attach,
+ .detach = mtk_dsi_host_detach,
.transfer = mtk_dsi_host_transfer,
};
-static int mtk_dsi_encoder_init(struct drm_device *drm, struct mtk_dsi *dsi)
-{
- int ret;
-
- ret = drm_simple_encoder_init(drm, &dsi->encoder,
- DRM_MODE_ENCODER_DSI);
- if (ret) {
- DRM_ERROR("Failed to encoder init to drm\n");
- return ret;
- }
-
- dsi->encoder.possible_crtcs = mtk_drm_find_possible_crtc_by_comp(drm, dsi->host.dev);
-
- ret = drm_bridge_attach(&dsi->encoder, &dsi->bridge, NULL,
- DRM_BRIDGE_ATTACH_NO_CONNECTOR);
- if (ret)
- goto err_cleanup_encoder;
-
- dsi->connector = drm_bridge_connector_init(drm, &dsi->encoder);
- if (IS_ERR(dsi->connector)) {
- DRM_ERROR("Unable to create bridge connector\n");
- ret = PTR_ERR(dsi->connector);
- goto err_cleanup_encoder;
- }
- drm_connector_attach_encoder(dsi->connector, &dsi->encoder);
-
- return 0;
-
-err_cleanup_encoder:
- drm_encoder_cleanup(&dsi->encoder);
- return ret;
-}
-
-static int mtk_dsi_bind(struct device *dev, struct device *master, void *data)
-{
- int ret;
- struct drm_device *drm = data;
- struct mtk_dsi *dsi = dev_get_drvdata(dev);
-
- ret = mtk_dsi_encoder_init(drm, dsi);
- if (ret)
- return ret;
-
- return device_reset_optional(dev);
-}
-
-static void mtk_dsi_unbind(struct device *dev, struct device *master,
- void *data)
-{
- struct mtk_dsi *dsi = dev_get_drvdata(dev);
-
- drm_encoder_cleanup(&dsi->encoder);
-}
-
-static const struct component_ops mtk_dsi_component_ops = {
- .bind = mtk_dsi_bind,
- .unbind = mtk_dsi_unbind,
-};
-
static int mtk_dsi_probe(struct platform_device *pdev)
{
struct mtk_dsi *dsi;
struct device *dev = &pdev->dev;
- struct drm_panel *panel;
struct resource *regs;
int irq_num;
int ret;
@@ -1021,19 +1045,6 @@ static int mtk_dsi_probe(struct platform_device *pdev)
return ret;
}
- ret = drm_of_find_panel_or_bridge(dev->of_node, 0, 0,
- &panel, &dsi->next_bridge);
- if (ret)
- goto err_unregister_host;
-
- if (panel) {
- dsi->next_bridge = devm_drm_panel_bridge_add(dev, panel);
- if (IS_ERR(dsi->next_bridge)) {
- ret = PTR_ERR(dsi->next_bridge);
- goto err_unregister_host;
- }
- }
-
dsi->driver_data = of_device_get_match_data(dev);
dsi->engine_clk = devm_clk_get(dev, "engine");
@@ -1098,14 +1109,6 @@ static int mtk_dsi_probe(struct platform_device *pdev)
dsi->bridge.of_node = dev->of_node;
dsi->bridge.type = DRM_MODE_CONNECTOR_DSI;
- drm_bridge_add(&dsi->bridge);
-
- ret = component_add(&pdev->dev, &mtk_dsi_component_ops);
- if (ret) {
- dev_err(&pdev->dev, "failed to add component: %d\n", ret);
- goto err_unregister_host;
- }
-
return 0;
err_unregister_host:
@@ -1118,8 +1121,6 @@ static int mtk_dsi_remove(struct platform_device *pdev)
struct mtk_dsi *dsi = platform_get_drvdata(pdev);
mtk_output_dsi_disable(dsi);
- drm_bridge_remove(&dsi->bridge);
- component_del(&pdev->dev, &mtk_dsi_component_ops);
mipi_dsi_host_unregister(&dsi->host);
return 0;
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index 51b83776951b..17cfad6424db 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -1560,6 +1560,8 @@ static int a6xx_pm_suspend(struct msm_gpu *gpu)
for (i = 0; i < gpu->nr_rings; i++)
a6xx_gpu->shadow[i] = 0;
+ gpu->suspend_count++;
+
return 0;
}
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index 93005839b5da..fb261930ad1c 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -608,9 +608,27 @@ static int adreno_resume(struct device *dev)
return gpu->funcs->pm_resume(gpu);
}
+static int active_submits(struct msm_gpu *gpu)
+{
+ int active_submits;
+ mutex_lock(&gpu->active_lock);
+ active_submits = gpu->active_submits;
+ mutex_unlock(&gpu->active_lock);
+ return active_submits;
+}
+
static int adreno_suspend(struct device *dev)
{
struct msm_gpu *gpu = dev_to_gpu(dev);
+ int remaining;
+
+ remaining = wait_event_timeout(gpu->retire_event,
+ active_submits(gpu) == 0,
+ msecs_to_jiffies(1000));
+ if (remaining == 0) {
+ dev_err(dev, "Timeout waiting for GPU to suspend\n");
+ return -EBUSY;
+ }
return gpu->funcs->pm_suspend(gpu);
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c
index a98e964c3b6f..355894a3b48c 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c
@@ -26,9 +26,16 @@ static void dpu_setup_dspp_pcc(struct dpu_hw_dspp *ctx,
struct dpu_hw_pcc_cfg *cfg)
{
- u32 base = ctx->cap->sblk->pcc.base;
+ u32 base;
- if (!ctx || !base) {
+ if (!ctx) {
+ DRM_ERROR("invalid ctx %pK\n", ctx);
+ return;
+ }
+
+ base = ctx->cap->sblk->pcc.base;
+
+ if (!base) {
DRM_ERROR("invalid ctx %pK pcc base 0x%x\n", ctx, base);
return;
}
diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c
index 052548883d27..0fe02529b5e7 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.c
+++ b/drivers/gpu/drm/msm/dsi/dsi.c
@@ -40,7 +40,12 @@ static int dsi_get_phy(struct msm_dsi *msm_dsi)
of_node_put(phy_node);
- if (!phy_pdev || !msm_dsi->phy) {
+ if (!phy_pdev) {
+ DRM_DEV_ERROR(&pdev->dev, "%s: phy driver is not ready\n", __func__);
+ return -EPROBE_DEFER;
+ }
+ if (!msm_dsi->phy) {
+ put_device(&phy_pdev->dev);
DRM_DEV_ERROR(&pdev->dev, "%s: phy driver is not ready\n", __func__);
return -EPROBE_DEFER;
}
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
index c2ed177717c7..2027b38617ab 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
@@ -808,12 +808,14 @@ int msm_dsi_phy_enable(struct msm_dsi_phy *phy,
struct msm_dsi_phy_clk_request *clk_req,
struct msm_dsi_phy_shared_timings *shared_timings)
{
- struct device *dev = &phy->pdev->dev;
+ struct device *dev;
int ret;
if (!phy || !phy->cfg->ops.enable)
return -EINVAL;
+ dev = &phy->pdev->dev;
+
ret = dsi_phy_enable_resource(phy);
if (ret) {
DRM_DEV_ERROR(dev, "%s: resource enable failed, %d\n",
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index 3acdeae25caf..719720709e9e 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -97,10 +97,15 @@ static int msm_hdmi_get_phy(struct hdmi *hdmi)
of_node_put(phy_node);
- if (!phy_pdev || !hdmi->phy) {
+ if (!phy_pdev) {
DRM_DEV_ERROR(&pdev->dev, "phy driver is not ready\n");
return -EPROBE_DEFER;
}
+ if (!hdmi->phy) {
+ DRM_DEV_ERROR(&pdev->dev, "phy driver is not ready\n");
+ put_device(&phy_pdev->dev);
+ return -EPROBE_DEFER;
+ }
hdmi->phy_dev = get_device(&phy_pdev->dev);
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index ad35a5d94053..555666e3f960 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -461,7 +461,7 @@ static int msm_init_vram(struct drm_device *dev)
of_node_put(node);
if (ret)
return ret;
- size = r.end - r.start;
+ size = r.end - r.start + 1;
DRM_INFO("using VRAM carveout: %lx@%pa\n", size, &r.start);
/* if we have no IOMMU, then we need to use carveout allocator.
@@ -510,7 +510,6 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
struct msm_drm_private *priv = dev_get_drvdata(dev);
struct drm_device *ddev;
struct msm_kms *kms;
- struct msm_mdss *mdss;
int ret, i;
ddev = drm_dev_alloc(drv, dev);
@@ -521,8 +520,6 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
ddev->dev_private = priv;
priv->dev = ddev;
- mdss = priv->mdss;
-
priv->wq = alloc_ordered_workqueue("msm", 0);
priv->hangcheck_period = DRM_MSM_HANGCHECK_DEFAULT_PERIOD;
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 0f78c2615272..2c1049c0ea14 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -703,6 +703,8 @@ static void retire_submits(struct msm_gpu *gpu)
}
}
}
+
+ wake_up_all(&gpu->retire_event);
}
static void retire_worker(struct kthread_work *work)
@@ -848,6 +850,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
INIT_LIST_HEAD(&gpu->active_list);
mutex_init(&gpu->active_lock);
mutex_init(&gpu->lock);
+ init_waitqueue_head(&gpu->retire_event);
kthread_init_work(&gpu->retire_work, retire_worker);
kthread_init_work(&gpu->recover_work, recover_worker);
kthread_init_work(&gpu->fault_work, fault_worker);
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index 445c6bfd4b6b..92aa1e9196c6 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -230,6 +230,9 @@ struct msm_gpu {
/* work for handling GPU recovery: */
struct kthread_work recover_work;
+ /** retire_event: notified when submits are retired: */
+ wait_queue_head_t retire_event;
+
/* work for handling active-list retiring: */
struct kthread_work retire_work;
diff --git a/drivers/gpu/drm/msm/msm_gpu_devfreq.c b/drivers/gpu/drm/msm/msm_gpu_devfreq.c
index 62405e980925..9bf319be11f6 100644
--- a/drivers/gpu/drm/msm/msm_gpu_devfreq.c
+++ b/drivers/gpu/drm/msm/msm_gpu_devfreq.c
@@ -133,6 +133,18 @@ void msm_devfreq_init(struct msm_gpu *gpu)
CLOCK_MONOTONIC, HRTIMER_MODE_REL);
}
+static void cancel_idle_work(struct msm_gpu_devfreq *df)
+{
+ hrtimer_cancel(&df->idle_work.timer);
+ kthread_cancel_work_sync(&df->idle_work.work);
+}
+
+static void cancel_boost_work(struct msm_gpu_devfreq *df)
+{
+ hrtimer_cancel(&df->boost_work.timer);
+ kthread_cancel_work_sync(&df->boost_work.work);
+}
+
void msm_devfreq_cleanup(struct msm_gpu *gpu)
{
struct msm_gpu_devfreq *df = &gpu->devfreq;
@@ -152,7 +164,12 @@ void msm_devfreq_resume(struct msm_gpu *gpu)
void msm_devfreq_suspend(struct msm_gpu *gpu)
{
- devfreq_suspend_device(gpu->devfreq.devfreq);
+ struct msm_gpu_devfreq *df = &gpu->devfreq;
+
+ devfreq_suspend_device(df->devfreq);
+
+ cancel_idle_work(df);
+ cancel_boost_work(df);
}
static void msm_devfreq_boost_work(struct kthread_work *work)
@@ -196,7 +213,7 @@ void msm_devfreq_active(struct msm_gpu *gpu)
/*
* Cancel any pending transition to idle frequency:
*/
- hrtimer_cancel(&df->idle_work.timer);
+ cancel_idle_work(df);
idle_time = ktime_to_ms(ktime_sub(ktime_get(), df->idle_time));
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_kms.c b/drivers/gpu/drm/mxsfb/mxsfb_kms.c
index 0655582ae8ed..4cfb6c001679 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_kms.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_kms.c
@@ -361,7 +361,11 @@ static void mxsfb_crtc_atomic_enable(struct drm_crtc *crtc,
bridge_state =
drm_atomic_get_new_bridge_state(state,
mxsfb->bridge);
- bus_format = bridge_state->input_bus_cfg.format;
+ if (!bridge_state)
+ bus_format = MEDIA_BUS_FMT_FIXED;
+ else
+ bus_format = bridge_state->input_bus_cfg.format;
+
if (bus_format == MEDIA_BUS_FMT_FIXED) {
dev_warn_once(drm->dev,
"Bridge does not provide bus format, assuming MEDIA_BUS_FMT_RGB888_1X24.\n"
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c
index d0f52d59fc2f..64e423dddd9e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c
@@ -38,7 +38,7 @@ nvbios_addr(struct nvkm_bios *bios, u32 *addr, u8 size)
*addr += bios->imaged_addr;
}
- if (unlikely(*addr + size >= bios->size)) {
+ if (unlikely(*addr + size > bios->size)) {
nvkm_error(&bios->subdev, "OOB %d %08x %08x\n", size, p, *addr);
return false;
}
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 9e46db5e359c..3c08f9827acf 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -588,6 +588,7 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
err = panel_dpi_probe(dev, panel);
if (err)
goto free_ddc;
+ desc = panel->desc;
} else {
if (!of_get_display_timing(dev->of_node, "panel-timing", &dt))
panel_simple_parse_panel_timing_node(dev, panel, &dt);
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 0fce73b9a646..70bd84b7ef2b 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -198,7 +198,8 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
* so don't register a backlight device
*/
if ((rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
- (rdev->pdev->device == 0x6741))
+ (rdev->pdev->device == 0x6741) &&
+ !dmi_match(DMI_PRODUCT_NAME, "iMac12,1"))
return;
if (!radeon_encoder->enc_priv)
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index e2488559cc9f..11ad210919c8 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -666,18 +666,18 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
if (unlikely(!fpriv)) {
r = -ENOMEM;
- goto out_suspend;
+ goto err_suspend;
}
if (rdev->accel_working) {
vm = &fpriv->vm;
r = radeon_vm_init(rdev, vm);
if (r)
- goto out_fpriv;
+ goto err_fpriv;
r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
if (r)
- goto out_vm_fini;
+ goto err_vm_fini;
/* map the ib pool buffer read only into
* virtual address space */
@@ -685,7 +685,7 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
rdev->ring_tmp_bo.bo);
if (!vm->ib_bo_va) {
r = -ENOMEM;
- goto out_vm_fini;
+ goto err_vm_fini;
}
r = radeon_vm_bo_set_addr(rdev, vm->ib_bo_va,
@@ -693,19 +693,21 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
RADEON_VM_PAGE_READABLE |
RADEON_VM_PAGE_SNOOPED);
if (r)
- goto out_vm_fini;
+ goto err_vm_fini;
}
file_priv->driver_priv = fpriv;
}
- if (!r)
- goto out_suspend;
+ pm_runtime_mark_last_busy(dev->dev);
+ pm_runtime_put_autosuspend(dev->dev);
+ return 0;
-out_vm_fini:
+err_vm_fini:
radeon_vm_fini(rdev, vm);
-out_fpriv:
+err_fpriv:
kfree(fpriv);
-out_suspend:
+
+err_suspend:
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
return r;
diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
index 830bdd5e9b7c..8677c8271678 100644
--- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
@@ -529,13 +529,6 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master,
return ret;
}
- ret = clk_prepare_enable(hdmi->vpll_clk);
- if (ret) {
- DRM_DEV_ERROR(hdmi->dev, "Failed to enable HDMI vpll: %d\n",
- ret);
- return ret;
- }
-
hdmi->phy = devm_phy_optional_get(dev, "hdmi");
if (IS_ERR(hdmi->phy)) {
ret = PTR_ERR(hdmi->phy);
@@ -544,6 +537,13 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master,
return ret;
}
+ ret = clk_prepare_enable(hdmi->vpll_clk);
+ if (ret) {
+ DRM_DEV_ERROR(hdmi->dev, "Failed to enable HDMI vpll: %d\n",
+ ret);
+ return ret;
+ }
+
drm_encoder_helper_add(encoder, &dw_hdmi_rockchip_encoder_helper_funcs);
drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS);
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
index 1f7353f0684a..798b542e5916 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
+++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
@@ -902,6 +902,7 @@ static const struct vop_win_phy rk3399_win01_data = {
.enable = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 0),
.format = VOP_REG(RK3288_WIN0_CTRL0, 0x7, 1),
.rb_swap = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 12),
+ .x_mir_en = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 21),
.y_mir_en = VOP_REG(RK3288_WIN0_CTRL0, 0x1, 22),
.act_info = VOP_REG(RK3288_WIN0_ACT_INFO, 0x1fff1fff, 0),
.dsp_info = VOP_REG(RK3288_WIN0_DSP_INFO, 0x0fff0fff, 0),
@@ -912,6 +913,7 @@ static const struct vop_win_phy rk3399_win01_data = {
.uv_vir = VOP_REG(RK3288_WIN0_VIR, 0x3fff, 16),
.src_alpha_ctl = VOP_REG(RK3288_WIN0_SRC_ALPHA_CTRL, 0xff, 0),
.dst_alpha_ctl = VOP_REG(RK3288_WIN0_DST_ALPHA_CTRL, 0xff, 0),
+ .channel = VOP_REG(RK3288_WIN0_CTRL2, 0xff, 0),
};
/*
@@ -922,11 +924,11 @@ static const struct vop_win_phy rk3399_win01_data = {
static const struct vop_win_data rk3399_vop_win_data[] = {
{ .base = 0x00, .phy = &rk3399_win01_data,
.type = DRM_PLANE_TYPE_PRIMARY },
- { .base = 0x40, .phy = &rk3288_win01_data,
+ { .base = 0x40, .phy = &rk3368_win01_data,
.type = DRM_PLANE_TYPE_OVERLAY },
- { .base = 0x00, .phy = &rk3288_win23_data,
+ { .base = 0x00, .phy = &rk3368_win23_data,
.type = DRM_PLANE_TYPE_OVERLAY },
- { .base = 0x50, .phy = &rk3288_win23_data,
+ { .base = 0x50, .phy = &rk3368_win23_data,
.type = DRM_PLANE_TYPE_CURSOR },
};
diff --git a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
index b64d93da651d..5e2b0175df36 100644
--- a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
+++ b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c
@@ -658,8 +658,10 @@ int sun8i_hdmi_phy_get(struct sun8i_dw_hdmi *hdmi, struct device_node *node)
return -EPROBE_DEFER;
phy = platform_get_drvdata(pdev);
- if (!phy)
+ if (!phy) {
+ put_device(&pdev->dev);
return -EPROBE_DEFER;
+ }
hdmi->phy = phy;
diff --git a/drivers/gpu/drm/ttm/ttm_module.c b/drivers/gpu/drm/ttm/ttm_module.c
index 0037eefe3239..a3ad7c9736ec 100644
--- a/drivers/gpu/drm/ttm/ttm_module.c
+++ b/drivers/gpu/drm/ttm/ttm_module.c
@@ -68,9 +68,11 @@ pgprot_t ttm_prot_from_caching(enum ttm_caching caching, pgprot_t tmp)
#if defined(__i386__) || defined(__x86_64__)
if (caching == ttm_write_combined)
tmp = pgprot_writecombine(tmp);
+#ifndef CONFIG_UML
else if (boot_cpu_data.x86 > 3)
tmp = pgprot_noncached(tmp);
-#endif
+#endif /* CONFIG_UML */
+#endif /* __i386__ || __x86_64__ */
#if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \
defined(__powerpc__) || defined(__mips__)
if (caching == ttm_write_combined)
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index 287dbc89ad64..e6cc47470e03 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -671,7 +671,6 @@ static int vc4_crtc_atomic_check(struct drm_crtc *crtc,
const struct drm_display_mode *mode = &crtc_state->adjusted_mode;
struct vc4_encoder *vc4_encoder = to_vc4_encoder(encoder);
- mode = &crtc_state->adjusted_mode;
if (vc4_encoder->type == VC4_ENCODER_TYPE_HDMI0) {
vc4_state->hvs_load = max(mode->clock * mode->hdisplay / mode->htotal + 1000,
mode->clock * 9 / 10) * 1000;
diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c
index a229da58962a..9300d3354c51 100644
--- a/drivers/gpu/drm/vc4/vc4_dsi.c
+++ b/drivers/gpu/drm/vc4/vc4_dsi.c
@@ -1262,7 +1262,6 @@ static int vc4_dsi_host_attach(struct mipi_dsi_host *host,
struct mipi_dsi_device *device)
{
struct vc4_dsi *dsi = host_to_dsi(host);
- int ret;
dsi->lanes = device->lanes;
dsi->channel = device->channel;
@@ -1297,18 +1296,15 @@ static int vc4_dsi_host_attach(struct mipi_dsi_host *host,
return 0;
}
- ret = component_add(&dsi->pdev->dev, &vc4_dsi_ops);
- if (ret) {
- mipi_dsi_host_unregister(&dsi->dsi_host);
- return ret;
- }
-
- return 0;
+ return component_add(&dsi->pdev->dev, &vc4_dsi_ops);
}
static int vc4_dsi_host_detach(struct mipi_dsi_host *host,
struct mipi_dsi_device *device)
{
+ struct vc4_dsi *dsi = host_to_dsi(host);
+
+ component_del(&dsi->pdev->dev, &vc4_dsi_ops);
return 0;
}
@@ -1686,9 +1682,7 @@ static int vc4_dsi_dev_remove(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct vc4_dsi *dsi = dev_get_drvdata(dev);
- component_del(&pdev->dev, &vc4_dsi_ops);
mipi_dsi_host_unregister(&dsi->dsi_host);
-
return 0;
}
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index 053fbaf765ca..b30500405fa7 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -196,14 +196,8 @@ vc4_hdmi_connector_detect(struct drm_connector *connector, bool force)
if (gpiod_get_value_cansleep(vc4_hdmi->hpd_gpio))
connected = true;
} else {
- unsigned long flags;
- u32 hotplug;
-
- spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
- hotplug = HDMI_READ(HDMI_HOTPLUG);
- spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
-
- if (hotplug & VC4_HDMI_HOTPLUG_CONNECTED)
+ if (vc4_hdmi->variant->hp_detect &&
+ vc4_hdmi->variant->hp_detect(vc4_hdmi))
connected = true;
}
@@ -1251,6 +1245,7 @@ static int vc4_hdmi_encoder_atomic_check(struct drm_encoder *encoder,
unsigned long long tmds_rate;
if (vc4_hdmi->variant->unsupported_odd_h_timings &&
+ !(mode->flags & DRM_MODE_FLAG_DBLCLK) &&
((mode->hdisplay % 2) || (mode->hsync_start % 2) ||
(mode->hsync_end % 2) || (mode->htotal % 2)))
return -EINVAL;
@@ -1298,6 +1293,7 @@ vc4_hdmi_encoder_mode_valid(struct drm_encoder *encoder,
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
if (vc4_hdmi->variant->unsupported_odd_h_timings &&
+ !(mode->flags & DRM_MODE_FLAG_DBLCLK) &&
((mode->hdisplay % 2) || (mode->hsync_start % 2) ||
(mode->hsync_end % 2) || (mode->htotal % 2)))
return MODE_H_ILLEGAL;
@@ -1343,6 +1339,18 @@ static u32 vc5_hdmi_channel_map(struct vc4_hdmi *vc4_hdmi, u32 channel_mask)
return channel_map;
}
+static bool vc5_hdmi_hp_detect(struct vc4_hdmi *vc4_hdmi)
+{
+ unsigned long flags;
+ u32 hotplug;
+
+ spin_lock_irqsave(&vc4_hdmi->hw_lock, flags);
+ hotplug = HDMI_READ(HDMI_HOTPLUG);
+ spin_unlock_irqrestore(&vc4_hdmi->hw_lock, flags);
+
+ return !!(hotplug & VC4_HDMI_HOTPLUG_CONNECTED);
+}
+
/* HDMI audio codec callbacks */
static void vc4_hdmi_audio_set_mai_clock(struct vc4_hdmi *vc4_hdmi,
unsigned int samplerate)
@@ -2504,7 +2512,8 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
* vc4_hdmi_disable_scrambling() will thus run at boot, make
* sure it's disabled, and avoid any inconsistency.
*/
- vc4_hdmi->scdc_enabled = true;
+ if (variant->max_pixel_clock > HDMI_14_MAX_TMDS_CLK)
+ vc4_hdmi->scdc_enabled = true;
ret = variant->init_resources(vc4_hdmi);
if (ret)
@@ -2723,6 +2732,7 @@ static const struct vc4_hdmi_variant bcm2711_hdmi0_variant = {
.phy_rng_disable = vc5_hdmi_phy_rng_disable,
.channel_map = vc5_hdmi_channel_map,
.supports_hdr = true,
+ .hp_detect = vc5_hdmi_hp_detect,
};
static const struct vc4_hdmi_variant bcm2711_hdmi1_variant = {
@@ -2751,6 +2761,7 @@ static const struct vc4_hdmi_variant bcm2711_hdmi1_variant = {
.phy_rng_disable = vc5_hdmi_phy_rng_disable,
.channel_map = vc5_hdmi_channel_map,
.supports_hdr = true,
+ .hp_detect = vc5_hdmi_hp_detect,
};
static const struct of_device_id vc4_hdmi_dt_match[] = {
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.h b/drivers/gpu/drm/vc4/vc4_hdmi.h
index 36c0b082a43b..31b77a94c526 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.h
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.h
@@ -102,6 +102,9 @@ struct vc4_hdmi_variant {
/* Enables HDR metadata */
bool supports_hdr;
+
+ /* Callback for hardware specific hotplug detect */
+ bool (*hp_detect)(struct vc4_hdmi *vc4_hdmi);
};
/* HDMI audio information */
diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c
index 21f410901694..3313b92db531 100644
--- a/drivers/gpu/drm/virtio/virtgpu_kms.c
+++ b/drivers/gpu/drm/virtio/virtgpu_kms.c
@@ -279,7 +279,7 @@ void virtio_gpu_deinit(struct drm_device *dev)
flush_work(&vgdev->ctrlq.dequeue_work);
flush_work(&vgdev->cursorq.dequeue_work);
flush_work(&vgdev->config_changed_work);
- vgdev->vdev->config->reset(vgdev->vdev);
+ virtio_reset_device(vgdev->vdev);
vgdev->vdev->config->del_vqs(vgdev->vdev);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index d6b66636a19b..ea3ecdda561d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -1140,15 +1140,14 @@ extern int vmw_execbuf_fence_commands(struct drm_file *file_priv,
struct vmw_private *dev_priv,
struct vmw_fence_obj **p_fence,
uint32_t *p_handle);
-extern void vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
+extern int vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
struct vmw_fpriv *vmw_fp,
int ret,
struct drm_vmw_fence_rep __user
*user_fence_rep,
struct vmw_fence_obj *fence,
uint32_t fence_handle,
- int32_t out_fence_fd,
- struct sync_file *sync_file);
+ int32_t out_fence_fd);
bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd);
/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 44ca23b0ea4e..dd2ff441068e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -3879,17 +3879,17 @@ int vmw_execbuf_fence_commands(struct drm_file *file_priv,
* Also if copying fails, user-space will be unable to signal the fence object
* so we wait for it immediately, and then unreference the user-space reference.
*/
-void
+int
vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
struct vmw_fpriv *vmw_fp, int ret,
struct drm_vmw_fence_rep __user *user_fence_rep,
struct vmw_fence_obj *fence, uint32_t fence_handle,
- int32_t out_fence_fd, struct sync_file *sync_file)
+ int32_t out_fence_fd)
{
struct drm_vmw_fence_rep fence_rep;
if (user_fence_rep == NULL)
- return;
+ return 0;
memset(&fence_rep, 0, sizeof(fence_rep));
@@ -3917,19 +3917,13 @@ vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
* handle.
*/
if (unlikely(ret != 0) && (fence_rep.error == 0)) {
- if (sync_file)
- fput(sync_file->file);
-
- if (fence_rep.fd != -1) {
- put_unused_fd(fence_rep.fd);
- fence_rep.fd = -1;
- }
-
ttm_ref_object_base_unref(vmw_fp->tfile, fence_handle);
VMW_DEBUG_USER("Fence copy error. Syncing.\n");
(void) vmw_fence_obj_wait(fence, false, false,
VMW_FENCE_WAIT_TIMEOUT);
}
+
+ return ret ? -EFAULT : 0;
}
/**
@@ -4266,16 +4260,23 @@ int vmw_execbuf_process(struct drm_file *file_priv,
(void) vmw_fence_obj_wait(fence, false, false,
VMW_FENCE_WAIT_TIMEOUT);
+ }
+ }
+
+ ret = vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
+ user_fence_rep, fence, handle, out_fence_fd);
+
+ if (sync_file) {
+ if (ret) {
+ /* usercopy of fence failed, put the file object */
+ fput(sync_file->file);
+ put_unused_fd(out_fence_fd);
} else {
/* Link the fence with the FD created earlier */
fd_install(out_fence_fd, sync_file->file);
}
}
- vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
- user_fence_rep, fence, handle, out_fence_fd,
- sync_file);
-
/* Don't unreference when handing fence out */
if (unlikely(out_fence != NULL)) {
*out_fence = fence;
@@ -4293,7 +4294,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
*/
vmw_validation_unref_lists(&val_ctx);
- return 0;
+ return ret;
out_unlock_binding:
mutex_unlock(&dev_priv->binding_mutex);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index c60d395f9e2e..5001b87aebe8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -1128,7 +1128,7 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
}
vmw_execbuf_copy_fence_user(dev_priv, vmw_fp, 0, user_fence_rep, fence,
- handle, -1, NULL);
+ handle, -1);
vmw_fence_obj_unreference(&fence);
return 0;
out_no_create:
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 4e693e8de2c3..bbd2f4ec08ec 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -2501,7 +2501,7 @@ void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv,
if (file_priv)
vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv),
ret, user_fence_rep, fence,
- handle, -1, NULL);
+ handle, -1);
if (out_fence)
*out_fence = fence;
else
diff --git a/drivers/gpu/drm/xlnx/zynqmp_disp.c b/drivers/gpu/drm/xlnx/zynqmp_disp.c
index ff2b308d8651..11c409cbc88e 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_disp.c
+++ b/drivers/gpu/drm/xlnx/zynqmp_disp.c
@@ -24,6 +24,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/dma/xilinx_dpdma.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/module.h>
@@ -1058,14 +1059,18 @@ static void zynqmp_disp_layer_set_format(struct zynqmp_disp_layer *layer,
zynqmp_disp_avbuf_set_format(layer->disp, layer, layer->disp_fmt);
/*
- * Set slave_id for each DMA channel to indicate they're part of a
+ * Set pconfig for each DMA channel to indicate they're part of a
* video group.
*/
for (i = 0; i < info->num_planes; i++) {
struct zynqmp_disp_layer_dma *dma = &layer->dmas[i];
+ struct xilinx_dpdma_peripheral_config pconfig = {
+ .video_group = true,
+ };
struct dma_slave_config config = {
.direction = DMA_MEM_TO_DEV,
- .slave_id = 1,
+ .peripheral_config = &pconfig,
+ .peripheral_size = sizeof(pconfig),
};
dmaengine_slave_config(dma->chan, &config);
diff --git a/drivers/greybus/es2.c b/drivers/greybus/es2.c
index 15661c7f3633..e89cca015095 100644
--- a/drivers/greybus/es2.c
+++ b/drivers/greybus/es2.c
@@ -78,7 +78,7 @@ struct es2_cport_in {
* @hd: pointer to our gb_host_device structure
*
* @cport_in: endpoint, urbs and buffer for cport in messages
- * @cport_out_endpoint: endpoint for for cport out messages
+ * @cport_out_endpoint: endpoint for cport out messages
* @cport_out_urb: array of urbs for the CPort out messages
* @cport_out_urb_busy: array of flags to see if the @cport_out_urb is busy or
* not.
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
index 2503be0253d3..19fa734a9a79 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.c
@@ -37,11 +37,11 @@ static int amd_sfh_wait_response_v2(struct amd_mp2_dev *mp2, u8 sid, u32 sensor_
{
union cmd_response cmd_resp;
- /* Get response with status within a max of 800 ms timeout */
+ /* Get response with status within a max of 1600 ms timeout */
if (!readl_poll_timeout(mp2->mmio + AMD_P2C_MSG(0), cmd_resp.resp,
(cmd_resp.response_v2.response == sensor_sts &&
cmd_resp.response_v2.status == 0 && (sid == 0xff ||
- cmd_resp.response_v2.sensor_id == sid)), 500, 800000))
+ cmd_resp.response_v2.sensor_id == sid)), 500, 1600000))
return cmd_resp.response_v2.response;
return SENSOR_DISABLED;
@@ -53,6 +53,7 @@ static void amd_start_sensor_v2(struct amd_mp2_dev *privdata, struct amd_mp2_sen
cmd_base.ul = 0;
cmd_base.cmd_v2.cmd_id = ENABLE_SENSOR;
+ cmd_base.cmd_v2.intr_disable = 1;
cmd_base.cmd_v2.period = info.period;
cmd_base.cmd_v2.sensor_id = info.sensor_idx;
cmd_base.cmd_v2.length = 16;
@@ -70,6 +71,7 @@ static void amd_stop_sensor_v2(struct amd_mp2_dev *privdata, u16 sensor_idx)
cmd_base.ul = 0;
cmd_base.cmd_v2.cmd_id = DISABLE_SENSOR;
+ cmd_base.cmd_v2.intr_disable = 1;
cmd_base.cmd_v2.period = 0;
cmd_base.cmd_v2.sensor_id = sensor_idx;
cmd_base.cmd_v2.length = 16;
@@ -83,12 +85,51 @@ static void amd_stop_all_sensor_v2(struct amd_mp2_dev *privdata)
union sfh_cmd_base cmd_base;
cmd_base.cmd_v2.cmd_id = STOP_ALL_SENSORS;
+ cmd_base.cmd_v2.intr_disable = 1;
cmd_base.cmd_v2.period = 0;
cmd_base.cmd_v2.sensor_id = 0;
writel(cmd_base.ul, privdata->mmio + AMD_C2P_MSG0);
}
+static void amd_sfh_clear_intr_v2(struct amd_mp2_dev *privdata)
+{
+ if (readl(privdata->mmio + AMD_P2C_MSG(4))) {
+ writel(0, privdata->mmio + AMD_P2C_MSG(4));
+ writel(0xf, privdata->mmio + AMD_P2C_MSG(5));
+ }
+}
+
+static void amd_sfh_clear_intr(struct amd_mp2_dev *privdata)
+{
+ if (privdata->mp2_ops->clear_intr)
+ privdata->mp2_ops->clear_intr(privdata);
+}
+
+static irqreturn_t amd_sfh_irq_handler(int irq, void *data)
+{
+ amd_sfh_clear_intr(data);
+
+ return IRQ_HANDLED;
+}
+
+static int amd_sfh_irq_init_v2(struct amd_mp2_dev *privdata)
+{
+ int rc;
+
+ pci_intx(privdata->pdev, true);
+
+ rc = devm_request_irq(&privdata->pdev->dev, privdata->pdev->irq,
+ amd_sfh_irq_handler, 0, DRIVER_NAME, privdata);
+ if (rc) {
+ dev_err(&privdata->pdev->dev, "failed to request irq %d err=%d\n",
+ privdata->pdev->irq, rc);
+ return rc;
+ }
+
+ return 0;
+}
+
void amd_start_sensor(struct amd_mp2_dev *privdata, struct amd_mp2_sensor_info info)
{
union sfh_cmd_param cmd_param;
@@ -193,6 +234,8 @@ static void amd_mp2_pci_remove(void *privdata)
struct amd_mp2_dev *mp2 = privdata;
amd_sfh_hid_client_deinit(privdata);
mp2->mp2_ops->stop_all(mp2);
+ pci_intx(mp2->pdev, false);
+ amd_sfh_clear_intr(mp2);
}
static const struct amd_mp2_ops amd_sfh_ops_v2 = {
@@ -200,6 +243,8 @@ static const struct amd_mp2_ops amd_sfh_ops_v2 = {
.stop = amd_stop_sensor_v2,
.stop_all = amd_stop_all_sensor_v2,
.response = amd_sfh_wait_response_v2,
+ .clear_intr = amd_sfh_clear_intr_v2,
+ .init_intr = amd_sfh_irq_init_v2,
};
static const struct amd_mp2_ops amd_sfh_ops = {
@@ -225,6 +270,14 @@ static void mp2_select_ops(struct amd_mp2_dev *privdata)
}
}
+static int amd_sfh_irq_init(struct amd_mp2_dev *privdata)
+{
+ if (privdata->mp2_ops->init_intr)
+ return privdata->mp2_ops->init_intr(privdata);
+
+ return 0;
+}
+
static int amd_mp2_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct amd_mp2_dev *privdata;
@@ -261,9 +314,20 @@ static int amd_mp2_pci_probe(struct pci_dev *pdev, const struct pci_device_id *i
mp2_select_ops(privdata);
+ rc = amd_sfh_irq_init(privdata);
+ if (rc) {
+ dev_err(&pdev->dev, "amd_sfh_irq_init failed\n");
+ return rc;
+ }
+
rc = amd_sfh_hid_client_init(privdata);
- if (rc)
+ if (rc) {
+ amd_sfh_clear_intr(privdata);
+ dev_err(&pdev->dev, "amd_sfh_hid_client_init failed\n");
return rc;
+ }
+
+ amd_sfh_clear_intr(privdata);
return devm_add_action_or_reset(&pdev->dev, amd_mp2_pci_remove, privdata);
}
@@ -290,6 +354,9 @@ static int __maybe_unused amd_mp2_pci_resume(struct device *dev)
}
}
+ schedule_delayed_work(&cl_data->work_buffer, msecs_to_jiffies(AMD_SFH_IDLE_LOOP));
+ amd_sfh_clear_intr(mp2);
+
return 0;
}
@@ -312,6 +379,9 @@ static int __maybe_unused amd_mp2_pci_suspend(struct device *dev)
}
}
+ cancel_delayed_work_sync(&cl_data->work_buffer);
+ amd_sfh_clear_intr(mp2);
+
return 0;
}
diff --git a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h
index ae30e059f847..97b99861fae2 100644
--- a/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h
+++ b/drivers/hid/amd-sfh-hid/amd_sfh_pcie.h
@@ -49,7 +49,7 @@ union sfh_cmd_base {
} s;
struct {
u32 cmd_id : 4;
- u32 intr_enable : 1;
+ u32 intr_disable : 1;
u32 rsvd1 : 3;
u32 length : 7;
u32 mem_type : 1;
@@ -141,5 +141,7 @@ struct amd_mp2_ops {
void (*stop)(struct amd_mp2_dev *privdata, u16 sensor_idx);
void (*stop_all)(struct amd_mp2_dev *privdata);
int (*response)(struct amd_mp2_dev *mp2, u8 sid, u32 sensor_sts);
+ void (*clear_intr)(struct amd_mp2_dev *privdata);
+ int (*init_intr)(struct amd_mp2_dev *privdata);
};
#endif
diff --git a/drivers/hid/amd-sfh-hid/hid_descriptor/amd_sfh_hid_desc.c b/drivers/hid/amd-sfh-hid/hid_descriptor/amd_sfh_hid_desc.c
index be41f83b0289..76095bd53c65 100644
--- a/drivers/hid/amd-sfh-hid/hid_descriptor/amd_sfh_hid_desc.c
+++ b/drivers/hid/amd-sfh-hid/hid_descriptor/amd_sfh_hid_desc.c
@@ -27,6 +27,7 @@
#define HID_USAGE_SENSOR_STATE_READY_ENUM 0x02
#define HID_USAGE_SENSOR_STATE_INITIALIZING_ENUM 0x05
#define HID_USAGE_SENSOR_EVENT_DATA_UPDATED_ENUM 0x04
+#define ILLUMINANCE_MASK GENMASK(14, 0)
int get_report_descriptor(int sensor_idx, u8 *rep_desc)
{
@@ -246,7 +247,8 @@ u8 get_input_report(u8 current_index, int sensor_idx, int report_id, struct amd_
get_common_inputs(&als_input.common_property, report_id);
/* For ALS ,V2 Platforms uses C2P_MSG5 register instead of DRAM access method */
if (supported_input == V2_STATUS)
- als_input.illuminance_value = (int)readl(privdata->mmio + AMD_C2P_MSG(5));
+ als_input.illuminance_value =
+ readl(privdata->mmio + AMD_C2P_MSG(5)) & ILLUMINANCE_MASK;
else
als_input.illuminance_value =
(int)sensor_virt_addr[0] / AMD_SFH_FW_MULTIPLIER;
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
index 24802a4a636e..7dc89dc6b0f0 100644
--- a/drivers/hid/hid-apple.c
+++ b/drivers/hid/hid-apple.c
@@ -691,49 +691,49 @@ static const struct hid_device_id apple_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI),
.driver_data = APPLE_HAS_FN },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_ISO),
- .driver_data = APPLE_HAS_FN },
+ .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5_JIS),
.driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI),
.driver_data = APPLE_HAS_FN },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_ISO),
- .driver_data = APPLE_HAS_FN },
+ .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6_JIS),
.driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI),
.driver_data = APPLE_HAS_FN },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_ISO),
- .driver_data = APPLE_HAS_FN },
+ .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS),
.driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI),
.driver_data = APPLE_HAS_FN },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_ISO),
- .driver_data = APPLE_HAS_FN },
+ .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS),
.driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI),
.driver_data = APPLE_HAS_FN },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_ISO),
- .driver_data = APPLE_HAS_FN },
+ .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7_JIS),
.driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_ANSI),
.driver_data = APPLE_HAS_FN },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_ISO),
- .driver_data = APPLE_HAS_FN },
+ .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_JIS),
.driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI),
.driver_data = APPLE_HAS_FN },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ISO),
- .driver_data = APPLE_HAS_FN },
+ .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_JIS),
.driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI),
.driver_data = APPLE_HAS_FN },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ISO),
- .driver_data = APPLE_HAS_FN },
+ .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_JIS),
.driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI),
diff --git a/drivers/hid/hid-elo.c b/drivers/hid/hid-elo.c
index 8e960d7b233b..9b42b0cdeef0 100644
--- a/drivers/hid/hid-elo.c
+++ b/drivers/hid/hid-elo.c
@@ -262,6 +262,7 @@ static int elo_probe(struct hid_device *hdev, const struct hid_device_id *id)
return 0;
err_free:
+ usb_put_dev(udev);
kfree(priv);
return ret;
}
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 26cee452ec44..78bd3ddda442 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -400,6 +400,7 @@
#define USB_DEVICE_ID_HP_X2 0x074d
#define USB_DEVICE_ID_HP_X2_10_COVER 0x0755
#define I2C_DEVICE_ID_HP_ENVY_X360_15 0x2d05
+#define I2C_DEVICE_ID_HP_ENVY_X360_15T_DR100 0x29CF
#define I2C_DEVICE_ID_HP_SPECTRE_X360_15 0x2817
#define USB_DEVICE_ID_ASUS_UX550VE_TOUCHSCREEN 0x2544
#define USB_DEVICE_ID_ASUS_UX550_TOUCHSCREEN 0x2706
@@ -1369,6 +1370,7 @@
#define USB_VENDOR_ID_UGTIZER 0x2179
#define USB_DEVICE_ID_UGTIZER_TABLET_GP0610 0x0053
#define USB_DEVICE_ID_UGTIZER_TABLET_GT5040 0x0077
+#define USB_DEVICE_ID_UGTIZER_TABLET_WP5540 0x0004
#define USB_VENDOR_ID_VIEWSONIC 0x0543
#define USB_DEVICE_ID_VIEWSONIC_PD1011 0xe621
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index 1ce75e8b49d5..112901d2d8d2 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -330,6 +330,8 @@ static const struct hid_device_id hid_battery_quirks[] = {
HID_BATTERY_QUIRK_IGNORE },
{ HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_ENVY_X360_15),
HID_BATTERY_QUIRK_IGNORE },
+ { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_ENVY_X360_15T_DR100),
+ HID_BATTERY_QUIRK_IGNORE },
{ HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_HP_SPECTRE_X360_15),
HID_BATTERY_QUIRK_IGNORE },
{ HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, I2C_DEVICE_ID_SURFACE_GO_TOUCHSCREEN),
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
index 9af1dc8ae3a2..c066ba901867 100644
--- a/drivers/hid/hid-quirks.c
+++ b/drivers/hid/hid-quirks.c
@@ -187,6 +187,7 @@ static const struct hid_device_id hid_quirks[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_KEYBOARD), HID_QUIRK_NOGET },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_KNA5), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_TWA60), HID_QUIRK_MULTI_INPUT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_UGTIZER, USB_DEVICE_ID_UGTIZER_TABLET_WP5540), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_10_6_INCH), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_14_1_INCH), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SIRIUS_BATTERY_FREE_TABLET), HID_QUIRK_MULTI_INPUT },
diff --git a/drivers/hid/hid-vivaldi.c b/drivers/hid/hid-vivaldi.c
index 72957a9f7117..efa6140915f4 100644
--- a/drivers/hid/hid-vivaldi.c
+++ b/drivers/hid/hid-vivaldi.c
@@ -6,16 +6,17 @@
* Author: Sean O'Brien <seobrien@chromium.org>
*/
+#include <linux/device.h>
#include <linux/hid.h>
+#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/sysfs.h>
#define MIN_FN_ROW_KEY 1
#define MAX_FN_ROW_KEY 24
#define HID_VD_FN_ROW_PHYSMAP 0x00000001
#define HID_USAGE_FN_ROW_PHYSMAP (HID_UP_GOOGLEVENDOR | HID_VD_FN_ROW_PHYSMAP)
-static struct hid_driver hid_vivaldi;
-
struct vivaldi_data {
u32 function_row_physmap[MAX_FN_ROW_KEY - MIN_FN_ROW_KEY + 1];
int max_function_row_key;
@@ -40,7 +41,7 @@ static ssize_t function_row_physmap_show(struct device *dev,
return size;
}
-DEVICE_ATTR_RO(function_row_physmap);
+static DEVICE_ATTR_RO(function_row_physmap);
static struct attribute *sysfs_attrs[] = {
&dev_attr_function_row_physmap.attr,
NULL
@@ -74,10 +75,11 @@ static void vivaldi_feature_mapping(struct hid_device *hdev,
struct hid_usage *usage)
{
struct vivaldi_data *drvdata = hid_get_drvdata(hdev);
+ struct hid_report *report = field->report;
int fn_key;
int ret;
u32 report_len;
- u8 *buf;
+ u8 *report_data, *buf;
if (field->logical != HID_USAGE_FN_ROW_PHYSMAP ||
(usage->hid & HID_USAGE_PAGE) != HID_UP_ORDINAL)
@@ -89,12 +91,24 @@ static void vivaldi_feature_mapping(struct hid_device *hdev,
if (fn_key > drvdata->max_function_row_key)
drvdata->max_function_row_key = fn_key;
- buf = hid_alloc_report_buf(field->report, GFP_KERNEL);
- if (!buf)
+ report_data = buf = hid_alloc_report_buf(report, GFP_KERNEL);
+ if (!report_data)
return;
- report_len = hid_report_len(field->report);
- ret = hid_hw_raw_request(hdev, field->report->id, buf,
+ report_len = hid_report_len(report);
+ if (!report->id) {
+ /*
+ * hid_hw_raw_request() will stuff report ID (which will be 0)
+ * into the first byte of the buffer even for unnumbered
+ * reports, so we need to account for this to avoid getting
+ * -EOVERFLOW in return.
+ * Note that hid_alloc_report_buf() adds 7 bytes to the size
+ * so we can safely say that we have space for an extra byte.
+ */
+ report_len++;
+ }
+
+ ret = hid_hw_raw_request(hdev, report->id, report_data,
report_len, HID_FEATURE_REPORT,
HID_REQ_GET_REPORT);
if (ret < 0) {
@@ -103,7 +117,16 @@ static void vivaldi_feature_mapping(struct hid_device *hdev,
goto out;
}
- ret = hid_report_raw_event(hdev, HID_FEATURE_REPORT, buf,
+ if (!report->id) {
+ /*
+ * Undo the damage from hid_hw_raw_request() for unnumbered
+ * reports.
+ */
+ report_data++;
+ report_len--;
+ }
+
+ ret = hid_report_raw_event(hdev, HID_FEATURE_REPORT, report_data,
report_len, 0);
if (ret) {
dev_warn(&hdev->dev, "failed to report feature %d\n",
diff --git a/drivers/hid/i2c-hid/i2c-hid-of-goodix.c b/drivers/hid/i2c-hid/i2c-hid-of-goodix.c
index b4dad66fa954..ec6c73f75ffe 100644
--- a/drivers/hid/i2c-hid/i2c-hid-of-goodix.c
+++ b/drivers/hid/i2c-hid/i2c-hid-of-goodix.c
@@ -27,7 +27,6 @@ struct i2c_hid_of_goodix {
struct regulator *vdd;
struct notifier_block nb;
- struct mutex regulator_mutex;
struct gpio_desc *reset_gpio;
const struct goodix_i2c_hid_timing_data *timings;
};
@@ -67,8 +66,6 @@ static int ihid_goodix_vdd_notify(struct notifier_block *nb,
container_of(nb, struct i2c_hid_of_goodix, nb);
int ret = NOTIFY_OK;
- mutex_lock(&ihid_goodix->regulator_mutex);
-
switch (event) {
case REGULATOR_EVENT_PRE_DISABLE:
gpiod_set_value_cansleep(ihid_goodix->reset_gpio, 1);
@@ -87,8 +84,6 @@ static int ihid_goodix_vdd_notify(struct notifier_block *nb,
break;
}
- mutex_unlock(&ihid_goodix->regulator_mutex);
-
return ret;
}
@@ -102,8 +97,6 @@ static int i2c_hid_of_goodix_probe(struct i2c_client *client,
if (!ihid_goodix)
return -ENOMEM;
- mutex_init(&ihid_goodix->regulator_mutex);
-
ihid_goodix->ops.power_up = goodix_i2c_hid_power_up;
ihid_goodix->ops.power_down = goodix_i2c_hid_power_down;
@@ -130,25 +123,28 @@ static int i2c_hid_of_goodix_probe(struct i2c_client *client,
* long. Holding the controller in reset apparently draws extra
* power.
*/
- mutex_lock(&ihid_goodix->regulator_mutex);
ihid_goodix->nb.notifier_call = ihid_goodix_vdd_notify;
ret = devm_regulator_register_notifier(ihid_goodix->vdd, &ihid_goodix->nb);
- if (ret) {
- mutex_unlock(&ihid_goodix->regulator_mutex);
+ if (ret)
return dev_err_probe(&client->dev, ret,
"regulator notifier request failed\n");
- }
/*
* If someone else is holding the regulator on (or the regulator is
* an always-on one) we might never be told to deassert reset. Do it
- * now. Here we'll assume that someone else might have _just
- * barely_ turned the regulator on so we'll do the full
- * "post_power_delay" just in case.
+ * now... and temporarily bump the regulator reference count just to
+ * make sure it is impossible for this to race with our own notifier!
+ * We also assume that someone else might have _just barely_ turned
+ * the regulator on so we'll do the full "post_power_delay" just in
+ * case.
*/
- if (ihid_goodix->reset_gpio && regulator_is_enabled(ihid_goodix->vdd))
+ if (ihid_goodix->reset_gpio && regulator_is_enabled(ihid_goodix->vdd)) {
+ ret = regulator_enable(ihid_goodix->vdd);
+ if (ret)
+ return ret;
goodix_i2c_hid_deassert_reset(ihid_goodix, true);
- mutex_unlock(&ihid_goodix->regulator_mutex);
+ regulator_disable(ihid_goodix->vdd);
+ }
return i2c_hid_core_probe(client, &ihid_goodix->ops, 0x0001, 0);
}
diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c
index 8fe3efcb8327..614adb510dbd 100644
--- a/drivers/hid/uhid.c
+++ b/drivers/hid/uhid.c
@@ -28,11 +28,22 @@
struct uhid_device {
struct mutex devlock;
+
+ /* This flag tracks whether the HID device is usable for commands from
+ * userspace. The flag is already set before hid_add_device(), which
+ * runs in workqueue context, to allow hid_add_device() to communicate
+ * with userspace.
+ * However, if hid_add_device() fails, the flag is cleared without
+ * holding devlock.
+ * We guarantee that if @running changes from true to false while you're
+ * holding @devlock, it's still fine to access @hid.
+ */
bool running;
__u8 *rd_data;
uint rd_size;
+ /* When this is NULL, userspace may use UHID_CREATE/UHID_CREATE2. */
struct hid_device *hid;
struct uhid_event input_buf;
@@ -63,9 +74,18 @@ static void uhid_device_add_worker(struct work_struct *work)
if (ret) {
hid_err(uhid->hid, "Cannot register HID device: error %d\n", ret);
- hid_destroy_device(uhid->hid);
- uhid->hid = NULL;
- uhid->running = false;
+ /* We used to call hid_destroy_device() here, but that's really
+ * messy to get right because we have to coordinate with
+ * concurrent writes from userspace that might be in the middle
+ * of using uhid->hid.
+ * Just leave uhid->hid as-is for now, and clean it up when
+ * userspace tries to close or reinitialize the uhid instance.
+ *
+ * However, we do have to clear the ->running flag and do a
+ * wakeup to make sure userspace knows that the device is gone.
+ */
+ WRITE_ONCE(uhid->running, false);
+ wake_up_interruptible(&uhid->report_wait);
}
}
@@ -174,9 +194,9 @@ static int __uhid_report_queue_and_wait(struct uhid_device *uhid,
spin_unlock_irqrestore(&uhid->qlock, flags);
ret = wait_event_interruptible_timeout(uhid->report_wait,
- !uhid->report_running || !uhid->running,
+ !uhid->report_running || !READ_ONCE(uhid->running),
5 * HZ);
- if (!ret || !uhid->running || uhid->report_running)
+ if (!ret || !READ_ONCE(uhid->running) || uhid->report_running)
ret = -EIO;
else if (ret < 0)
ret = -ERESTARTSYS;
@@ -217,7 +237,7 @@ static int uhid_hid_get_report(struct hid_device *hid, unsigned char rnum,
struct uhid_event *ev;
int ret;
- if (!uhid->running)
+ if (!READ_ONCE(uhid->running))
return -EIO;
ev = kzalloc(sizeof(*ev), GFP_KERNEL);
@@ -259,7 +279,7 @@ static int uhid_hid_set_report(struct hid_device *hid, unsigned char rnum,
struct uhid_event *ev;
int ret;
- if (!uhid->running || count > UHID_DATA_MAX)
+ if (!READ_ONCE(uhid->running) || count > UHID_DATA_MAX)
return -EIO;
ev = kzalloc(sizeof(*ev), GFP_KERNEL);
@@ -474,7 +494,7 @@ static int uhid_dev_create2(struct uhid_device *uhid,
void *rd_data;
int ret;
- if (uhid->running)
+ if (uhid->hid)
return -EALREADY;
rd_size = ev->u.create2.rd_size;
@@ -556,15 +576,16 @@ static int uhid_dev_create(struct uhid_device *uhid,
static int uhid_dev_destroy(struct uhid_device *uhid)
{
- if (!uhid->running)
+ if (!uhid->hid)
return -EINVAL;
- uhid->running = false;
+ WRITE_ONCE(uhid->running, false);
wake_up_interruptible(&uhid->report_wait);
cancel_work_sync(&uhid->worker);
hid_destroy_device(uhid->hid);
+ uhid->hid = NULL;
kfree(uhid->rd_data);
return 0;
@@ -572,7 +593,7 @@ static int uhid_dev_destroy(struct uhid_device *uhid)
static int uhid_dev_input(struct uhid_device *uhid, struct uhid_event *ev)
{
- if (!uhid->running)
+ if (!READ_ONCE(uhid->running))
return -EINVAL;
hid_input_report(uhid->hid, HID_INPUT_REPORT, ev->u.input.data,
@@ -583,7 +604,7 @@ static int uhid_dev_input(struct uhid_device *uhid, struct uhid_event *ev)
static int uhid_dev_input2(struct uhid_device *uhid, struct uhid_event *ev)
{
- if (!uhid->running)
+ if (!READ_ONCE(uhid->running))
return -EINVAL;
hid_input_report(uhid->hid, HID_INPUT_REPORT, ev->u.input2.data,
@@ -595,7 +616,7 @@ static int uhid_dev_input2(struct uhid_device *uhid, struct uhid_event *ev)
static int uhid_dev_get_report_reply(struct uhid_device *uhid,
struct uhid_event *ev)
{
- if (!uhid->running)
+ if (!READ_ONCE(uhid->running))
return -EINVAL;
uhid_report_wake_up(uhid, ev->u.get_report_reply.id, ev);
@@ -605,7 +626,7 @@ static int uhid_dev_get_report_reply(struct uhid_device *uhid,
static int uhid_dev_set_report_reply(struct uhid_device *uhid,
struct uhid_event *ev)
{
- if (!uhid->running)
+ if (!READ_ONCE(uhid->running))
return -EINVAL;
uhid_report_wake_up(uhid, ev->u.set_report_reply.id, ev);
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 2a4cc39962e7..a7176fc0635d 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -2588,6 +2588,24 @@ static void wacom_wac_finger_slot(struct wacom_wac *wacom_wac,
}
}
+static bool wacom_wac_slot_is_active(struct input_dev *dev, int key)
+{
+ struct input_mt *mt = dev->mt;
+ struct input_mt_slot *s;
+
+ if (!mt)
+ return false;
+
+ for (s = mt->slots; s != mt->slots + mt->num_slots; s++) {
+ if (s->key == key &&
+ input_mt_get_value(s, ABS_MT_TRACKING_ID) >= 0) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
static void wacom_wac_finger_event(struct hid_device *hdev,
struct hid_field *field, struct hid_usage *usage, __s32 value)
{
@@ -2638,9 +2656,14 @@ static void wacom_wac_finger_event(struct hid_device *hdev,
}
if (usage->usage_index + 1 == field->report_count) {
- if (equivalent_usage == wacom_wac->hid_data.last_slot_field &&
- wacom_wac->hid_data.confidence)
- wacom_wac_finger_slot(wacom_wac, wacom_wac->touch_input);
+ if (equivalent_usage == wacom_wac->hid_data.last_slot_field) {
+ bool touch_removed = wacom_wac_slot_is_active(wacom_wac->touch_input,
+ wacom_wac->hid_data.id) && !wacom_wac->hid_data.tipswitch;
+
+ if (wacom_wac->hid_data.confidence || touch_removed) {
+ wacom_wac_finger_slot(wacom_wac, wacom_wac->touch_input);
+ }
+ }
}
}
@@ -2659,6 +2682,10 @@ static void wacom_wac_finger_pre_report(struct hid_device *hdev,
hid_data->confidence = true;
+ hid_data->cc_report = 0;
+ hid_data->cc_index = -1;
+ hid_data->cc_value_index = -1;
+
for (i = 0; i < report->maxfield; i++) {
struct hid_field *field = report->field[i];
int j;
@@ -2692,11 +2719,14 @@ static void wacom_wac_finger_pre_report(struct hid_device *hdev,
hid_data->cc_index >= 0) {
struct hid_field *field = report->field[hid_data->cc_index];
int value = field->value[hid_data->cc_value_index];
- if (value)
+ if (value) {
hid_data->num_expected = value;
+ hid_data->num_received = 0;
+ }
}
else {
hid_data->num_expected = wacom_wac->features.touch_max;
+ hid_data->num_received = 0;
}
}
@@ -2724,6 +2754,7 @@ static void wacom_wac_finger_report(struct hid_device *hdev,
input_sync(input);
wacom_wac->hid_data.num_received = 0;
+ wacom_wac->hid_data.num_expected = 0;
/* keep touch state for pen event */
wacom_wac->shared->touch_down = wacom_wac_finger_count_touches(wacom_wac);
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index 2829575fd9b7..60375879612f 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -1554,7 +1554,7 @@ int vmbus_request_offers(void)
struct vmbus_channel_msginfo *msginfo;
int ret;
- msginfo = kmalloc(sizeof(*msginfo) +
+ msginfo = kzalloc(sizeof(*msginfo) +
sizeof(struct vmbus_channel_message_header),
GFP_KERNEL);
if (!msginfo)
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index ca873a3b98db..f2d05bff4245 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -1660,6 +1660,13 @@ static int balloon_connect_vsp(struct hv_device *dev)
unsigned long t;
int ret;
+ /*
+ * max_pkt_size should be large enough for one vmbus packet header plus
+ * our receive buffer size. Hyper-V sends messages up to
+ * HV_HYP_PAGE_SIZE bytes long on balloon channel.
+ */
+ dev->channel->max_pkt_size = HV_HYP_PAGE_SIZE * 2;
+
ret = vmbus_open(dev->channel, dm_ring_size, dm_ring_size, NULL, 0,
balloon_onchannelcallback, dev);
if (ret)
diff --git a/drivers/hv/hv_common.c b/drivers/hv/hv_common.c
index 7be173a99f27..181d16bbf49d 100644
--- a/drivers/hv/hv_common.c
+++ b/drivers/hv/hv_common.c
@@ -44,10 +44,10 @@ EXPORT_SYMBOL_GPL(hv_vp_index);
u32 hv_max_vp_index;
EXPORT_SYMBOL_GPL(hv_max_vp_index);
-void __percpu **hyperv_pcpu_input_arg;
+void * __percpu *hyperv_pcpu_input_arg;
EXPORT_SYMBOL_GPL(hyperv_pcpu_input_arg);
-void __percpu **hyperv_pcpu_output_arg;
+void * __percpu *hyperv_pcpu_output_arg;
EXPORT_SYMBOL_GPL(hyperv_pcpu_output_arg);
/*
@@ -295,3 +295,14 @@ u64 __weak hv_ghcb_hypercall(u64 control, void *input, void *output, u32 input_s
return HV_STATUS_INVALID_PARAMETER;
}
EXPORT_SYMBOL_GPL(hv_ghcb_hypercall);
+
+void __weak *hv_map_memory(void *addr, unsigned long size)
+{
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(hv_map_memory);
+
+void __weak hv_unmap_memory(void *addr)
+{
+}
+EXPORT_SYMBOL_GPL(hv_unmap_memory);
diff --git a/drivers/hv/hv_utils_transport.c b/drivers/hv/hv_utils_transport.c
index eb2833d2b5d0..832885198643 100644
--- a/drivers/hv/hv_utils_transport.c
+++ b/drivers/hv/hv_utils_transport.c
@@ -13,7 +13,7 @@
#include "hv_utils_transport.h"
static DEFINE_SPINLOCK(hvt_list_lock);
-static struct list_head hvt_list = LIST_HEAD_INIT(hvt_list);
+static LIST_HEAD(hvt_list);
static void hvt_reset(struct hvutil_transport *hvt)
{
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 7ae04ccb1043..12a2b37e87f3 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -33,6 +33,7 @@
#include <linux/random.h>
#include <linux/kernel.h>
#include <linux/syscore_ops.h>
+#include <linux/dma-map-ops.h>
#include <clocksource/hyperv_timer.h>
#include "hyperv_vmbus.h"
@@ -2027,8 +2028,10 @@ int vmbus_add_channel_kobj(struct hv_device *dev, struct vmbus_channel *channel)
kobj->kset = dev->channels_kset;
ret = kobject_init_and_add(kobj, &vmbus_chan_ktype, NULL,
"%u", relid);
- if (ret)
+ if (ret) {
+ kobject_put(kobj);
return ret;
+ }
ret = sysfs_create_group(kobj, &vmbus_chan_group);
@@ -2037,6 +2040,7 @@ int vmbus_add_channel_kobj(struct hv_device *dev, struct vmbus_channel *channel)
* The calling functions' error handling paths will cleanup the
* empty channel directory.
*/
+ kobject_put(kobj);
dev_err(device, "Unable to set up channel sysfs files\n");
return ret;
}
@@ -2118,6 +2122,9 @@ int vmbus_device_register(struct hv_device *child_device_obj)
}
hv_debug_add_dev_dir(child_device_obj);
+ child_device_obj->device.dma_parms = &child_device_obj->dma_parms;
+ child_device_obj->device.dma_mask = &child_device_obj->dma_mask;
+ dma_set_mask(&child_device_obj->device, DMA_BIT_MASK(64));
return 0;
err_kset_unregister:
diff --git a/drivers/hwmon/adt7470.c b/drivers/hwmon/adt7470.c
index d519aca4a9d6..fb6d14d213a1 100644
--- a/drivers/hwmon/adt7470.c
+++ b/drivers/hwmon/adt7470.c
@@ -662,6 +662,9 @@ static int adt7470_fan_write(struct device *dev, u32 attr, int channel, long val
struct adt7470_data *data = dev_get_drvdata(dev);
int err;
+ if (val <= 0)
+ return -EINVAL;
+
val = FAN_RPM_TO_PERIOD(val);
val = clamp_val(val, 1, 65534);
diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c
index d401f9acf450..9949eeb79378 100644
--- a/drivers/hwmon/dell-smm-hwmon.c
+++ b/drivers/hwmon/dell-smm-hwmon.c
@@ -451,7 +451,7 @@ static int i8k_get_power_status(void)
static long i8k_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
{
- struct dell_smm_data *data = PDE_DATA(file_inode(fp));
+ struct dell_smm_data *data = pde_data(file_inode(fp));
int __user *argp = (int __user *)arg;
int speed, err;
int val = 0;
@@ -585,7 +585,7 @@ static int i8k_proc_show(struct seq_file *seq, void *offset)
static int i8k_open_fs(struct inode *inode, struct file *file)
{
- return single_open(file, i8k_proc_show, PDE_DATA(inode));
+ return single_open(file, i8k_proc_show, pde_data(inode));
}
static const struct proc_ops i8k_proc_ops = {
diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
index 74019dff2550..1c9493c70813 100644
--- a/drivers/hwmon/lm90.c
+++ b/drivers/hwmon/lm90.c
@@ -373,7 +373,7 @@ static const struct lm90_params lm90_params[] = {
.flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT
| LM90_HAVE_BROKEN_ALERT | LM90_HAVE_CRIT,
.alert_alarms = 0x7c,
- .max_convrate = 8,
+ .max_convrate = 7,
},
[lm86] = {
.flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT
@@ -394,12 +394,13 @@ static const struct lm90_params lm90_params[] = {
.max_convrate = 9,
},
[max6646] = {
- .flags = LM90_HAVE_CRIT,
+ .flags = LM90_HAVE_CRIT | LM90_HAVE_BROKEN_ALERT,
.alert_alarms = 0x7c,
.max_convrate = 6,
.reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL,
},
[max6654] = {
+ .flags = LM90_HAVE_BROKEN_ALERT,
.alert_alarms = 0x7c,
.max_convrate = 7,
.reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL,
@@ -418,7 +419,7 @@ static const struct lm90_params lm90_params[] = {
},
[max6680] = {
.flags = LM90_HAVE_OFFSET | LM90_HAVE_CRIT
- | LM90_HAVE_CRIT_ALRM_SWP,
+ | LM90_HAVE_CRIT_ALRM_SWP | LM90_HAVE_BROKEN_ALERT,
.alert_alarms = 0x7c,
.max_convrate = 7,
},
@@ -848,7 +849,7 @@ static int lm90_update_device(struct device *dev)
* Re-enable ALERT# output if it was originally enabled and
* relevant alarms are all clear
*/
- if (!(data->config_orig & 0x80) &&
+ if ((client->irq || !(data->config_orig & 0x80)) &&
!(data->alarms & data->alert_alarms)) {
if (data->config & 0x80) {
dev_dbg(&client->dev, "Re-enabling ALERT#\n");
@@ -1807,22 +1808,22 @@ static bool lm90_is_tripped(struct i2c_client *client, u16 *status)
if (st & LM90_STATUS_LLOW)
hwmon_notify_event(data->hwmon_dev, hwmon_temp,
- hwmon_temp_min, 0);
+ hwmon_temp_min_alarm, 0);
if (st & LM90_STATUS_RLOW)
hwmon_notify_event(data->hwmon_dev, hwmon_temp,
- hwmon_temp_min, 1);
+ hwmon_temp_min_alarm, 1);
if (st2 & MAX6696_STATUS2_R2LOW)
hwmon_notify_event(data->hwmon_dev, hwmon_temp,
- hwmon_temp_min, 2);
+ hwmon_temp_min_alarm, 2);
if (st & LM90_STATUS_LHIGH)
hwmon_notify_event(data->hwmon_dev, hwmon_temp,
- hwmon_temp_max, 0);
+ hwmon_temp_max_alarm, 0);
if (st & LM90_STATUS_RHIGH)
hwmon_notify_event(data->hwmon_dev, hwmon_temp,
- hwmon_temp_max, 1);
+ hwmon_temp_max_alarm, 1);
if (st2 & MAX6696_STATUS2_R2HIGH)
hwmon_notify_event(data->hwmon_dev, hwmon_temp,
- hwmon_temp_max, 2);
+ hwmon_temp_max_alarm, 2);
return true;
}
diff --git a/drivers/hwmon/ltc2992.c b/drivers/hwmon/ltc2992.c
index 2a4bed0ab226..7352d2b3c756 100644
--- a/drivers/hwmon/ltc2992.c
+++ b/drivers/hwmon/ltc2992.c
@@ -248,8 +248,7 @@ static int ltc2992_gpio_get_multiple(struct gpio_chip *chip, unsigned long *mask
gpio_status = reg;
- gpio_nr = 0;
- for_each_set_bit_from(gpio_nr, mask, LTC2992_GPIO_NR) {
+ for_each_set_bit(gpio_nr, mask, LTC2992_GPIO_NR) {
if (test_bit(LTC2992_GPIO_BIT(gpio_nr), &gpio_status))
set_bit(gpio_nr, bits);
}
diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
index fd3f91cb01c6..098d12b9ecda 100644
--- a/drivers/hwmon/nct6775.c
+++ b/drivers/hwmon/nct6775.c
@@ -1175,7 +1175,7 @@ static inline u8 in_to_reg(u32 val, u8 nr)
struct nct6775_data {
int addr; /* IO base of hw monitor block */
- int sioreg; /* SIO register address */
+ struct nct6775_sio_data *sio_data;
enum kinds kind;
const char *name;
@@ -3559,7 +3559,7 @@ clear_caseopen(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct nct6775_data *data = dev_get_drvdata(dev);
- struct nct6775_sio_data *sio_data = dev_get_platdata(dev);
+ struct nct6775_sio_data *sio_data = data->sio_data;
int nr = to_sensor_dev_attr(attr)->index - INTRUSION_ALARM_BASE;
unsigned long val;
u8 reg;
@@ -3967,7 +3967,7 @@ static int nct6775_probe(struct platform_device *pdev)
return -ENOMEM;
data->kind = sio_data->kind;
- data->sioreg = sio_data->sioreg;
+ data->sio_data = sio_data;
if (sio_data->access == access_direct) {
data->addr = res->start;
diff --git a/drivers/hwmon/pmbus/ir38064.c b/drivers/hwmon/pmbus/ir38064.c
index 0ea7e1c18bdc..09276e397194 100644
--- a/drivers/hwmon/pmbus/ir38064.c
+++ b/drivers/hwmon/pmbus/ir38064.c
@@ -62,7 +62,7 @@ static const struct i2c_device_id ir38064_id[] = {
MODULE_DEVICE_TABLE(i2c, ir38064_id);
-static const struct of_device_id ir38064_of_match[] = {
+static const struct of_device_id __maybe_unused ir38064_of_match[] = {
{ .compatible = "infineon,ir38060" },
{ .compatible = "infineon,ir38064" },
{ .compatible = "infineon,ir38164" },
diff --git a/drivers/hwspinlock/stm32_hwspinlock.c b/drivers/hwspinlock/stm32_hwspinlock.c
index 3ad0ce0da4d9..5bd11a7fab65 100644
--- a/drivers/hwspinlock/stm32_hwspinlock.c
+++ b/drivers/hwspinlock/stm32_hwspinlock.c
@@ -54,8 +54,23 @@ static const struct hwspinlock_ops stm32_hwspinlock_ops = {
.relax = stm32_hwspinlock_relax,
};
+static void stm32_hwspinlock_disable_clk(void *data)
+{
+ struct platform_device *pdev = data;
+ struct stm32_hwspinlock *hw = platform_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
+
+ pm_runtime_get_sync(dev);
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+ pm_runtime_put_noidle(dev);
+
+ clk_disable_unprepare(hw->clk);
+}
+
static int stm32_hwspinlock_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct stm32_hwspinlock *hw;
void __iomem *io_base;
size_t array_size;
@@ -66,41 +81,43 @@ static int stm32_hwspinlock_probe(struct platform_device *pdev)
return PTR_ERR(io_base);
array_size = STM32_MUTEX_NUM_LOCKS * sizeof(struct hwspinlock);
- hw = devm_kzalloc(&pdev->dev, sizeof(*hw) + array_size, GFP_KERNEL);
+ hw = devm_kzalloc(dev, sizeof(*hw) + array_size, GFP_KERNEL);
if (!hw)
return -ENOMEM;
- hw->clk = devm_clk_get(&pdev->dev, "hsem");
+ hw->clk = devm_clk_get(dev, "hsem");
if (IS_ERR(hw->clk))
return PTR_ERR(hw->clk);
- for (i = 0; i < STM32_MUTEX_NUM_LOCKS; i++)
- hw->bank.lock[i].priv = io_base + i * sizeof(u32);
+ ret = clk_prepare_enable(hw->clk);
+ if (ret) {
+ dev_err(dev, "Failed to prepare_enable clock\n");
+ return ret;
+ }
platform_set_drvdata(pdev, hw);
- pm_runtime_enable(&pdev->dev);
- ret = hwspin_lock_register(&hw->bank, &pdev->dev, &stm32_hwspinlock_ops,
- 0, STM32_MUTEX_NUM_LOCKS);
+ pm_runtime_get_noresume(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ pm_runtime_put(dev);
- if (ret)
- pm_runtime_disable(&pdev->dev);
+ ret = devm_add_action_or_reset(dev, stm32_hwspinlock_disable_clk, pdev);
+ if (ret) {
+ dev_err(dev, "Failed to register action\n");
+ return ret;
+ }
- return ret;
-}
+ for (i = 0; i < STM32_MUTEX_NUM_LOCKS; i++)
+ hw->bank.lock[i].priv = io_base + i * sizeof(u32);
-static int stm32_hwspinlock_remove(struct platform_device *pdev)
-{
- struct stm32_hwspinlock *hw = platform_get_drvdata(pdev);
- int ret;
+ ret = devm_hwspin_lock_register(dev, &hw->bank, &stm32_hwspinlock_ops,
+ 0, STM32_MUTEX_NUM_LOCKS);
- ret = hwspin_lock_unregister(&hw->bank);
if (ret)
- dev_err(&pdev->dev, "%s failed: %d\n", __func__, ret);
-
- pm_runtime_disable(&pdev->dev);
+ dev_err(dev, "Failed to register hwspinlock\n");
- return 0;
+ return ret;
}
static int __maybe_unused stm32_hwspinlock_runtime_suspend(struct device *dev)
@@ -135,7 +152,6 @@ MODULE_DEVICE_TABLE(of, stm32_hwpinlock_ids);
static struct platform_driver stm32_hwspinlock_driver = {
.probe = stm32_hwspinlock_probe,
- .remove = stm32_hwspinlock_remove,
.driver = {
.name = "stm32_hwspinlock",
.of_match_table = stm32_hwpinlock_ids,
diff --git a/drivers/hwtracing/coresight/coresight-cfg-preload.c b/drivers/hwtracing/coresight/coresight-cfg-preload.c
index 751af3710d56..e237a4edfa09 100644
--- a/drivers/hwtracing/coresight/coresight-cfg-preload.c
+++ b/drivers/hwtracing/coresight/coresight-cfg-preload.c
@@ -24,8 +24,13 @@ static struct cscfg_config_desc *preload_cfgs[] = {
NULL
};
+static struct cscfg_load_owner_info preload_owner = {
+ .type = CSCFG_OWNER_PRELOAD,
+};
+
/* preload called on initialisation */
-int cscfg_preload(void)
+int cscfg_preload(void *owner_handle)
{
- return cscfg_load_config_sets(preload_cfgs, preload_feats);
+ preload_owner.owner_handle = owner_handle;
+ return cscfg_load_config_sets(preload_cfgs, preload_feats, &preload_owner);
}
diff --git a/drivers/hwtracing/coresight/coresight-config.h b/drivers/hwtracing/coresight/coresight-config.h
index 25eb6c632692..9bd44b940add 100644
--- a/drivers/hwtracing/coresight/coresight-config.h
+++ b/drivers/hwtracing/coresight/coresight-config.h
@@ -97,6 +97,8 @@ struct cscfg_regval_desc {
* @params_desc: array of parameters used.
* @nr_regs: number of registers used.
* @regs_desc: array of registers used.
+ * @load_owner: handle to load owner for dynamic load and unload of features.
+ * @fs_group: reference to configfs group for dynamic unload.
*/
struct cscfg_feature_desc {
const char *name;
@@ -107,6 +109,8 @@ struct cscfg_feature_desc {
struct cscfg_parameter_desc *params_desc;
int nr_regs;
struct cscfg_regval_desc *regs_desc;
+ void *load_owner;
+ struct config_group *fs_group;
};
/**
@@ -128,7 +132,8 @@ struct cscfg_feature_desc {
* @presets: Array of preset values.
* @event_ea: Extended attribute for perf event value
* @active_cnt: ref count for activate on this configuration.
- *
+ * @load_owner: handle to load owner for dynamic load and unload of configs.
+ * @fs_group: reference to configfs group for dynamic unload.
*/
struct cscfg_config_desc {
const char *name;
@@ -141,6 +146,8 @@ struct cscfg_config_desc {
const u64 *presets; /* nr_presets * nr_total_params */
struct dev_ext_attribute *event_ea;
atomic_t active_cnt;
+ void *load_owner;
+ struct config_group *fs_group;
};
/**
diff --git a/drivers/hwtracing/coresight/coresight-core.c b/drivers/hwtracing/coresight/coresight-core.c
index 8a18c71df37a..88653d1c06a4 100644
--- a/drivers/hwtracing/coresight/coresight-core.c
+++ b/drivers/hwtracing/coresight/coresight-core.c
@@ -729,7 +729,7 @@ static inline void coresight_put_ref(struct coresight_device *csdev)
* coresight_grab_device - Power up this device and any of the helper
* devices connected to it for trace operation. Since the helper devices
* don't appear on the trace path, they should be handled along with the
- * the master device.
+ * master device.
*/
static int coresight_grab_device(struct coresight_device *csdev)
{
diff --git a/drivers/hwtracing/coresight/coresight-etm4x-core.c b/drivers/hwtracing/coresight/coresight-etm4x-core.c
index 86a313857b58..bf18128cf5de 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x-core.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x-core.c
@@ -722,7 +722,16 @@ static int etm4_enable_sysfs(struct coresight_device *csdev)
{
struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
struct etm4_enable_arg arg = { };
- int ret;
+ unsigned long cfg_hash;
+ int ret, preset;
+
+ /* enable any config activated by configfs */
+ cscfg_config_sysfs_get_active_cfg(&cfg_hash, &preset);
+ if (cfg_hash) {
+ ret = cscfg_csdev_enable_active_config(csdev, cfg_hash, preset);
+ if (ret)
+ return ret;
+ }
spin_lock(&drvdata->spinlock);
diff --git a/drivers/hwtracing/coresight/coresight-stm.c b/drivers/hwtracing/coresight/coresight-stm.c
index 58062a5a8238..bb14a3a8a921 100644
--- a/drivers/hwtracing/coresight/coresight-stm.c
+++ b/drivers/hwtracing/coresight/coresight-stm.c
@@ -856,13 +856,11 @@ static int stm_probe(struct amba_device *adev, const struct amba_id *id)
{
int ret;
void __iomem *base;
- unsigned long *guaranteed;
struct device *dev = &adev->dev;
struct coresight_platform_data *pdata = NULL;
struct stm_drvdata *drvdata;
struct resource *res = &adev->res;
struct resource ch_res;
- size_t bitmap_size;
struct coresight_desc desc = { 0 };
desc.name = coresight_alloc_device_name(&stm_devs, dev);
@@ -904,12 +902,10 @@ static int stm_probe(struct amba_device *adev, const struct amba_id *id)
else
drvdata->numsp = stm_num_stimulus_port(drvdata);
- bitmap_size = BITS_TO_LONGS(drvdata->numsp) * sizeof(long);
-
- guaranteed = devm_kzalloc(dev, bitmap_size, GFP_KERNEL);
- if (!guaranteed)
+ drvdata->chs.guaranteed = devm_bitmap_zalloc(dev, drvdata->numsp,
+ GFP_KERNEL);
+ if (!drvdata->chs.guaranteed)
return -ENOMEM;
- drvdata->chs.guaranteed = guaranteed;
spin_lock_init(&drvdata->spinlock);
diff --git a/drivers/hwtracing/coresight/coresight-syscfg-configfs.c b/drivers/hwtracing/coresight/coresight-syscfg-configfs.c
index c547816b9000..433ede94dd63 100644
--- a/drivers/hwtracing/coresight/coresight-syscfg-configfs.c
+++ b/drivers/hwtracing/coresight/coresight-syscfg-configfs.c
@@ -6,6 +6,7 @@
#include <linux/configfs.h>
+#include "coresight-config.h"
#include "coresight-syscfg-configfs.h"
/* create a default ci_type. */
@@ -87,9 +88,75 @@ static ssize_t cscfg_cfg_values_show(struct config_item *item, char *page)
}
CONFIGFS_ATTR_RO(cscfg_cfg_, values);
+static ssize_t cscfg_cfg_enable_show(struct config_item *item, char *page)
+{
+ struct cscfg_fs_config *fs_config = container_of(to_config_group(item),
+ struct cscfg_fs_config, group);
+
+ return scnprintf(page, PAGE_SIZE, "%d\n", fs_config->active);
+}
+
+static ssize_t cscfg_cfg_enable_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct cscfg_fs_config *fs_config = container_of(to_config_group(item),
+ struct cscfg_fs_config, group);
+ int err;
+ bool val;
+
+ err = kstrtobool(page, &val);
+ if (!err)
+ err = cscfg_config_sysfs_activate(fs_config->config_desc, val);
+ if (!err) {
+ fs_config->active = val;
+ if (val)
+ cscfg_config_sysfs_set_preset(fs_config->preset);
+ }
+ return err ? err : count;
+}
+CONFIGFS_ATTR(cscfg_cfg_, enable);
+
+static ssize_t cscfg_cfg_preset_show(struct config_item *item, char *page)
+{
+ struct cscfg_fs_config *fs_config = container_of(to_config_group(item),
+ struct cscfg_fs_config, group);
+
+ return scnprintf(page, PAGE_SIZE, "%d\n", fs_config->preset);
+}
+
+static ssize_t cscfg_cfg_preset_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct cscfg_fs_config *fs_config = container_of(to_config_group(item),
+ struct cscfg_fs_config, group);
+ int preset, err;
+
+ err = kstrtoint(page, 0, &preset);
+ if (!err) {
+ /*
+ * presets start at 1, and go up to max (15),
+ * but the config may provide fewer.
+ */
+ if ((preset < 1) || (preset > fs_config->config_desc->nr_presets))
+ err = -EINVAL;
+ }
+
+ if (!err) {
+ /* set new value */
+ fs_config->preset = preset;
+ /* set on system if active */
+ if (fs_config->active)
+ cscfg_config_sysfs_set_preset(fs_config->preset);
+ }
+ return err ? err : count;
+}
+CONFIGFS_ATTR(cscfg_cfg_, preset);
+
static struct configfs_attribute *cscfg_config_view_attrs[] = {
&cscfg_cfg_attr_description,
&cscfg_cfg_attr_feature_refs,
+ &cscfg_cfg_attr_enable,
+ &cscfg_cfg_attr_preset,
NULL,
};
@@ -334,9 +401,19 @@ int cscfg_configfs_add_config(struct cscfg_config_desc *config_desc)
if (IS_ERR(new_group))
return PTR_ERR(new_group);
err = configfs_register_group(&cscfg_configs_grp, new_group);
+ if (!err)
+ config_desc->fs_group = new_group;
return err;
}
+void cscfg_configfs_del_config(struct cscfg_config_desc *config_desc)
+{
+ if (config_desc->fs_group) {
+ configfs_unregister_group(config_desc->fs_group);
+ config_desc->fs_group = NULL;
+ }
+}
+
static struct config_item_type cscfg_features_type = {
.ct_owner = THIS_MODULE,
};
@@ -358,9 +435,19 @@ int cscfg_configfs_add_feature(struct cscfg_feature_desc *feat_desc)
if (IS_ERR(new_group))
return PTR_ERR(new_group);
err = configfs_register_group(&cscfg_features_grp, new_group);
+ if (!err)
+ feat_desc->fs_group = new_group;
return err;
}
+void cscfg_configfs_del_feature(struct cscfg_feature_desc *feat_desc)
+{
+ if (feat_desc->fs_group) {
+ configfs_unregister_group(feat_desc->fs_group);
+ feat_desc->fs_group = NULL;
+ }
+}
+
int cscfg_configfs_init(struct cscfg_manager *cscfg_mgr)
{
struct configfs_subsystem *subsys;
diff --git a/drivers/hwtracing/coresight/coresight-syscfg-configfs.h b/drivers/hwtracing/coresight/coresight-syscfg-configfs.h
index 7d6ffe35ca4c..373d84d43268 100644
--- a/drivers/hwtracing/coresight/coresight-syscfg-configfs.h
+++ b/drivers/hwtracing/coresight/coresight-syscfg-configfs.h
@@ -15,6 +15,8 @@
struct cscfg_fs_config {
struct cscfg_config_desc *config_desc;
struct config_group group;
+ bool active;
+ int preset;
};
/* container for feature view */
@@ -41,5 +43,7 @@ int cscfg_configfs_init(struct cscfg_manager *cscfg_mgr);
void cscfg_configfs_release(struct cscfg_manager *cscfg_mgr);
int cscfg_configfs_add_config(struct cscfg_config_desc *config_desc);
int cscfg_configfs_add_feature(struct cscfg_feature_desc *feat_desc);
+void cscfg_configfs_del_config(struct cscfg_config_desc *config_desc);
+void cscfg_configfs_del_feature(struct cscfg_feature_desc *feat_desc);
#endif /* CORESIGHT_SYSCFG_CONFIGFS_H */
diff --git a/drivers/hwtracing/coresight/coresight-syscfg.c b/drivers/hwtracing/coresight/coresight-syscfg.c
index 43054568430f..098fc34c4829 100644
--- a/drivers/hwtracing/coresight/coresight-syscfg.c
+++ b/drivers/hwtracing/coresight/coresight-syscfg.c
@@ -250,6 +250,13 @@ static int cscfg_check_feat_for_cfg(struct cscfg_config_desc *config_desc)
static int cscfg_load_feat(struct cscfg_feature_desc *feat_desc)
{
int err;
+ struct cscfg_feature_desc *feat_desc_exist;
+
+ /* new feature must have unique name */
+ list_for_each_entry(feat_desc_exist, &cscfg_mgr->feat_desc_list, item) {
+ if (!strcmp(feat_desc_exist->name, feat_desc->name))
+ return -EEXIST;
+ }
/* add feature to any matching registered devices */
err = cscfg_add_feat_to_csdevs(feat_desc);
@@ -267,6 +274,13 @@ static int cscfg_load_feat(struct cscfg_feature_desc *feat_desc)
static int cscfg_load_config(struct cscfg_config_desc *config_desc)
{
int err;
+ struct cscfg_config_desc *config_desc_exist;
+
+ /* new configuration must have a unique name */
+ list_for_each_entry(config_desc_exist, &cscfg_mgr->config_desc_list, item) {
+ if (!strcmp(config_desc_exist->name, config_desc->name))
+ return -EEXIST;
+ }
/* validate features are present */
err = cscfg_check_feat_for_cfg(config_desc);
@@ -354,6 +368,92 @@ unlock_exit:
return err;
}
+/*
+ * Conditionally up reference count on owner to prevent unload.
+ *
+ * module loaded configs need to be locked in to prevent premature unload.
+ */
+static int cscfg_owner_get(struct cscfg_load_owner_info *owner_info)
+{
+ if ((owner_info->type == CSCFG_OWNER_MODULE) &&
+ (!try_module_get(owner_info->owner_handle)))
+ return -EINVAL;
+ return 0;
+}
+
+/* conditionally lower ref count on an owner */
+static void cscfg_owner_put(struct cscfg_load_owner_info *owner_info)
+{
+ if (owner_info->type == CSCFG_OWNER_MODULE)
+ module_put(owner_info->owner_handle);
+}
+
+static void cscfg_remove_owned_csdev_configs(struct coresight_device *csdev, void *load_owner)
+{
+ struct cscfg_config_csdev *config_csdev, *tmp;
+
+ if (list_empty(&csdev->config_csdev_list))
+ return;
+
+ list_for_each_entry_safe(config_csdev, tmp, &csdev->config_csdev_list, node) {
+ if (config_csdev->config_desc->load_owner == load_owner)
+ list_del(&config_csdev->node);
+ }
+}
+
+static void cscfg_remove_owned_csdev_features(struct coresight_device *csdev, void *load_owner)
+{
+ struct cscfg_feature_csdev *feat_csdev, *tmp;
+
+ if (list_empty(&csdev->feature_csdev_list))
+ return;
+
+ list_for_each_entry_safe(feat_csdev, tmp, &csdev->feature_csdev_list, node) {
+ if (feat_csdev->feat_desc->load_owner == load_owner)
+ list_del(&feat_csdev->node);
+ }
+}
+
+/*
+ * removal is relatively easy - just remove from all lists, anything that
+ * matches the owner. Memory for the descriptors will be managed by the owner,
+ * memory for the csdev items is devm_ allocated with the individual csdev
+ * devices.
+ */
+static void cscfg_unload_owned_cfgs_feats(void *load_owner)
+{
+ struct cscfg_config_desc *config_desc, *cfg_tmp;
+ struct cscfg_feature_desc *feat_desc, *feat_tmp;
+ struct cscfg_registered_csdev *csdev_item;
+
+ /* remove from each csdev instance feature and config lists */
+ list_for_each_entry(csdev_item, &cscfg_mgr->csdev_desc_list, item) {
+ /*
+ * for each csdev, check the loaded lists and remove if
+ * referenced descriptor is owned
+ */
+ cscfg_remove_owned_csdev_configs(csdev_item->csdev, load_owner);
+ cscfg_remove_owned_csdev_features(csdev_item->csdev, load_owner);
+ }
+
+ /* remove from the config descriptor lists */
+ list_for_each_entry_safe(config_desc, cfg_tmp, &cscfg_mgr->config_desc_list, item) {
+ if (config_desc->load_owner == load_owner) {
+ cscfg_configfs_del_config(config_desc);
+ etm_perf_del_symlink_cscfg(config_desc);
+ list_del(&config_desc->item);
+ }
+ }
+
+ /* remove from the feature descriptor lists */
+ list_for_each_entry_safe(feat_desc, feat_tmp, &cscfg_mgr->feat_desc_list, item) {
+ if (feat_desc->load_owner == load_owner) {
+ cscfg_configfs_del_feature(feat_desc);
+ list_del(&feat_desc->item);
+ }
+ }
+}
+
/**
* cscfg_load_config_sets - API function to load feature and config sets.
*
@@ -361,13 +461,22 @@ unlock_exit:
* descriptors and load into the system.
* Features are loaded first to ensure configuration dependencies can be met.
*
+ * To facilitate dynamic loading and unloading, features and configurations
+ * have a "load_owner", to allow later unload by the same owner. An owner may
+ * be a loadable module or configuration dynamically created via configfs.
+ * As later loaded configurations can use earlier loaded features, creating load
+ * dependencies, a load order list is maintained. Unload is strictly in the
+ * reverse order to load.
+ *
* @config_descs: 0 terminated array of configuration descriptors.
* @feat_descs: 0 terminated array of feature descriptors.
+ * @owner_info: Information on the owner of this set.
*/
int cscfg_load_config_sets(struct cscfg_config_desc **config_descs,
- struct cscfg_feature_desc **feat_descs)
+ struct cscfg_feature_desc **feat_descs,
+ struct cscfg_load_owner_info *owner_info)
{
- int err, i = 0;
+ int err = 0, i = 0;
mutex_lock(&cscfg_mutex);
@@ -380,8 +489,10 @@ int cscfg_load_config_sets(struct cscfg_config_desc **config_descs,
if (err) {
pr_err("coresight-syscfg: Failed to load feature %s\n",
feat_descs[i]->name);
+ cscfg_unload_owned_cfgs_feats(owner_info);
goto exit_unlock;
}
+ feat_descs[i]->load_owner = owner_info;
i++;
}
}
@@ -396,18 +507,86 @@ int cscfg_load_config_sets(struct cscfg_config_desc **config_descs,
if (err) {
pr_err("coresight-syscfg: Failed to load configuration %s\n",
config_descs[i]->name);
+ cscfg_unload_owned_cfgs_feats(owner_info);
goto exit_unlock;
}
+ config_descs[i]->load_owner = owner_info;
i++;
}
}
+ /* add the load owner to the load order list */
+ list_add_tail(&owner_info->item, &cscfg_mgr->load_order_list);
+ if (!list_is_singular(&cscfg_mgr->load_order_list)) {
+ /* lock previous item in load order list */
+ err = cscfg_owner_get(list_prev_entry(owner_info, item));
+ if (err) {
+ cscfg_unload_owned_cfgs_feats(owner_info);
+ list_del(&owner_info->item);
+ }
+ }
+
exit_unlock:
mutex_unlock(&cscfg_mutex);
return err;
}
EXPORT_SYMBOL_GPL(cscfg_load_config_sets);
+/**
+ * cscfg_unload_config_sets - unload a set of configurations by owner.
+ *
+ * Dynamic unload of configuration and feature sets is done on the basis of
+ * the load owner of that set. Later loaded configurations can depend on
+ * features loaded earlier.
+ *
+ * Therefore, unload is only possible if:-
+ * 1) no configurations are active.
+ * 2) the set being unloaded was the last to be loaded to maintain dependencies.
+ *
+ * @owner_info: Information on owner for set being unloaded.
+ */
+int cscfg_unload_config_sets(struct cscfg_load_owner_info *owner_info)
+{
+ int err = 0;
+ struct cscfg_load_owner_info *load_list_item = NULL;
+
+ mutex_lock(&cscfg_mutex);
+
+ /* cannot unload if anything is active */
+ if (atomic_read(&cscfg_mgr->sys_active_cnt)) {
+ err = -EBUSY;
+ goto exit_unlock;
+ }
+
+ /* cannot unload if not last loaded in load order */
+ if (!list_empty(&cscfg_mgr->load_order_list)) {
+ load_list_item = list_last_entry(&cscfg_mgr->load_order_list,
+ struct cscfg_load_owner_info, item);
+ if (load_list_item != owner_info)
+ load_list_item = NULL;
+ }
+
+ if (!load_list_item) {
+ err = -EINVAL;
+ goto exit_unlock;
+ }
+
+ /* unload all belonging to load_owner */
+ cscfg_unload_owned_cfgs_feats(owner_info);
+
+ /* remove from load order list */
+ if (!list_is_singular(&cscfg_mgr->load_order_list)) {
+ /* unlock previous item in load order list */
+ cscfg_owner_put(list_prev_entry(owner_info, item));
+ }
+ list_del(&owner_info->item);
+
+exit_unlock:
+ mutex_unlock(&cscfg_mutex);
+ return err;
+}
+EXPORT_SYMBOL_GPL(cscfg_unload_config_sets);
+
/* Handle coresight device registration and add configs and features to devices */
/* iterate through config lists and load matching configs to device */
@@ -566,32 +745,26 @@ unlock_exit:
}
EXPORT_SYMBOL_GPL(cscfg_csdev_reset_feats);
-/**
- * cscfg_activate_config - Mark a configuration descriptor as active.
- *
- * This will be seen when csdev devices are enabled in the system.
- * Only activated configurations can be enabled on individual devices.
- * Activation protects the configuration from alteration or removal while
- * active.
- *
- * Selection by hash value - generated from the configuration name when it
- * was loaded and added to the cs_etm/configurations file system for selection
- * by perf.
+/*
+ * This activate configuration for either perf or sysfs. Perf can have multiple
+ * active configs, selected per event, sysfs is limited to one.
*
* Increments the configuration descriptor active count and the global active
* count.
*
* @cfg_hash: Hash value of the selected configuration name.
*/
-int cscfg_activate_config(unsigned long cfg_hash)
+static int _cscfg_activate_config(unsigned long cfg_hash)
{
struct cscfg_config_desc *config_desc;
int err = -EINVAL;
- mutex_lock(&cscfg_mutex);
-
list_for_each_entry(config_desc, &cscfg_mgr->config_desc_list, item) {
if ((unsigned long)config_desc->event_ea->var == cfg_hash) {
+ /* must ensure that config cannot be unloaded in use */
+ err = cscfg_owner_get(config_desc->load_owner);
+ if (err)
+ break;
/*
* increment the global active count - control changes to
* active configurations
@@ -609,6 +782,101 @@ int cscfg_activate_config(unsigned long cfg_hash)
break;
}
}
+ return err;
+}
+
+static void _cscfg_deactivate_config(unsigned long cfg_hash)
+{
+ struct cscfg_config_desc *config_desc;
+
+ list_for_each_entry(config_desc, &cscfg_mgr->config_desc_list, item) {
+ if ((unsigned long)config_desc->event_ea->var == cfg_hash) {
+ atomic_dec(&config_desc->active_cnt);
+ atomic_dec(&cscfg_mgr->sys_active_cnt);
+ cscfg_owner_put(config_desc->load_owner);
+ dev_dbg(cscfg_device(), "Deactivate config %s.\n", config_desc->name);
+ break;
+ }
+ }
+}
+
+/*
+ * called from configfs to set/clear the active configuration for use when
+ * using sysfs to control trace.
+ */
+int cscfg_config_sysfs_activate(struct cscfg_config_desc *config_desc, bool activate)
+{
+ unsigned long cfg_hash;
+ int err = 0;
+
+ mutex_lock(&cscfg_mutex);
+
+ cfg_hash = (unsigned long)config_desc->event_ea->var;
+
+ if (activate) {
+ /* cannot be a current active value to activate this */
+ if (cscfg_mgr->sysfs_active_config) {
+ err = -EBUSY;
+ goto exit_unlock;
+ }
+ err = _cscfg_activate_config(cfg_hash);
+ if (!err)
+ cscfg_mgr->sysfs_active_config = cfg_hash;
+ } else {
+ /* disable if matching current value */
+ if (cscfg_mgr->sysfs_active_config == cfg_hash) {
+ _cscfg_deactivate_config(cfg_hash);
+ cscfg_mgr->sysfs_active_config = 0;
+ } else
+ err = -EINVAL;
+ }
+
+exit_unlock:
+ mutex_unlock(&cscfg_mutex);
+ return err;
+}
+
+/* set the sysfs preset value */
+void cscfg_config_sysfs_set_preset(int preset)
+{
+ mutex_lock(&cscfg_mutex);
+ cscfg_mgr->sysfs_active_preset = preset;
+ mutex_unlock(&cscfg_mutex);
+}
+
+/*
+ * Used by a device to get the config and preset selected as active in configfs,
+ * when using sysfs to control trace.
+ */
+void cscfg_config_sysfs_get_active_cfg(unsigned long *cfg_hash, int *preset)
+{
+ mutex_lock(&cscfg_mutex);
+ *preset = cscfg_mgr->sysfs_active_preset;
+ *cfg_hash = cscfg_mgr->sysfs_active_config;
+ mutex_unlock(&cscfg_mutex);
+}
+EXPORT_SYMBOL_GPL(cscfg_config_sysfs_get_active_cfg);
+
+/**
+ * cscfg_activate_config - Mark a configuration descriptor as active.
+ *
+ * This will be seen when csdev devices are enabled in the system.
+ * Only activated configurations can be enabled on individual devices.
+ * Activation protects the configuration from alteration or removal while
+ * active.
+ *
+ * Selection by hash value - generated from the configuration name when it
+ * was loaded and added to the cs_etm/configurations file system for selection
+ * by perf.
+ *
+ * @cfg_hash: Hash value of the selected configuration name.
+ */
+int cscfg_activate_config(unsigned long cfg_hash)
+{
+ int err = 0;
+
+ mutex_lock(&cscfg_mutex);
+ err = _cscfg_activate_config(cfg_hash);
mutex_unlock(&cscfg_mutex);
return err;
@@ -624,18 +892,8 @@ EXPORT_SYMBOL_GPL(cscfg_activate_config);
*/
void cscfg_deactivate_config(unsigned long cfg_hash)
{
- struct cscfg_config_desc *config_desc;
-
mutex_lock(&cscfg_mutex);
-
- list_for_each_entry(config_desc, &cscfg_mgr->config_desc_list, item) {
- if ((unsigned long)config_desc->event_ea->var == cfg_hash) {
- atomic_dec(&config_desc->active_cnt);
- atomic_dec(&cscfg_mgr->sys_active_cnt);
- dev_dbg(cscfg_device(), "Deactivate config %s.\n", config_desc->name);
- break;
- }
- }
+ _cscfg_deactivate_config(cfg_hash);
mutex_unlock(&cscfg_mutex);
}
EXPORT_SYMBOL_GPL(cscfg_deactivate_config);
@@ -827,10 +1085,11 @@ int __init cscfg_init(void)
INIT_LIST_HEAD(&cscfg_mgr->csdev_desc_list);
INIT_LIST_HEAD(&cscfg_mgr->feat_desc_list);
INIT_LIST_HEAD(&cscfg_mgr->config_desc_list);
+ INIT_LIST_HEAD(&cscfg_mgr->load_order_list);
atomic_set(&cscfg_mgr->sys_active_cnt, 0);
/* preload built-in configurations */
- err = cscfg_preload();
+ err = cscfg_preload(THIS_MODULE);
if (err)
goto exit_err;
diff --git a/drivers/hwtracing/coresight/coresight-syscfg.h b/drivers/hwtracing/coresight/coresight-syscfg.h
index 8d018efd6ead..9106ffab4833 100644
--- a/drivers/hwtracing/coresight/coresight-syscfg.h
+++ b/drivers/hwtracing/coresight/coresight-syscfg.h
@@ -25,16 +25,22 @@
* @csdev_desc_list: List of coresight devices registered with the configuration manager.
* @feat_desc_list: List of feature descriptors to load into registered devices.
* @config_desc_list: List of system configuration descriptors to load into registered devices.
+ * @load_order_list: Ordered list of owners for dynamically loaded configurations.
* @sys_active_cnt: Total number of active config descriptor references.
* @cfgfs_subsys: configfs subsystem used to manage configurations.
+ * @sysfs_active_config:Active config hash used if CoreSight controlled from sysfs.
+ * @sysfs_active_preset:Active preset index used if CoreSight controlled from sysfs.
*/
struct cscfg_manager {
struct device dev;
struct list_head csdev_desc_list;
struct list_head feat_desc_list;
struct list_head config_desc_list;
+ struct list_head load_order_list;
atomic_t sys_active_cnt;
struct configfs_subsystem cfgfs_subsys;
+ u32 sysfs_active_config;
+ int sysfs_active_preset;
};
/* get reference to dev in cscfg_manager */
@@ -56,18 +62,44 @@ struct cscfg_registered_csdev {
struct list_head item;
};
+/* owner types for loading and unloading of config and feature sets */
+enum cscfg_load_owner_type {
+ CSCFG_OWNER_PRELOAD,
+ CSCFG_OWNER_MODULE,
+};
+
+/**
+ * Load item - item to add to the load order list allowing dynamic load and
+ * unload of configurations and features. Caller loading a config
+ * set provides a context handle for unload. API ensures that
+ * items unloaded strictly in reverse order from load to ensure
+ * dependencies are respected.
+ *
+ * @item: list entry for load order list.
+ * @type: type of owner - allows interpretation of owner_handle.
+ * @owner_handle: load context - handle for owner of loaded configs.
+ */
+struct cscfg_load_owner_info {
+ struct list_head item;
+ int type;
+ void *owner_handle;
+};
+
/* internal core operations for cscfg */
int __init cscfg_init(void);
void cscfg_exit(void);
-int cscfg_preload(void);
+int cscfg_preload(void *owner_handle);
const struct cscfg_feature_desc *cscfg_get_named_feat_desc(const char *name);
int cscfg_update_feat_param_val(struct cscfg_feature_desc *feat_desc,
int param_idx, u64 value);
-
+int cscfg_config_sysfs_activate(struct cscfg_config_desc *cfg_desc, bool activate);
+void cscfg_config_sysfs_set_preset(int preset);
/* syscfg manager external API */
int cscfg_load_config_sets(struct cscfg_config_desc **cfg_descs,
- struct cscfg_feature_desc **feat_descs);
+ struct cscfg_feature_desc **feat_descs,
+ struct cscfg_load_owner_info *owner_info);
+int cscfg_unload_config_sets(struct cscfg_load_owner_info *owner_info);
int cscfg_register_csdev(struct coresight_device *csdev, u32 match_flags,
struct cscfg_csdev_feat_ops *ops);
void cscfg_unregister_csdev(struct coresight_device *csdev);
@@ -77,5 +109,6 @@ void cscfg_csdev_reset_feats(struct coresight_device *csdev);
int cscfg_csdev_enable_active_config(struct coresight_device *csdev,
unsigned long cfg_hash, int preset);
void cscfg_csdev_disable_active_config(struct coresight_device *csdev);
+void cscfg_config_sysfs_get_active_cfg(unsigned long *cfg_hash, int *preset);
#endif /* CORESIGHT_SYSCFG_H */
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index c6b854a9e476..8a6c6ee28556 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -488,7 +488,7 @@ config I2C_BRCMSTB
config I2C_CADENCE
tristate "Cadence I2C Controller"
- depends on ARCH_ZYNQ || ARM64 || XTENSA
+ depends on ARCH_ZYNQ || ARM64 || XTENSA || COMPILE_TEST
help
Say yes here to select Cadence I2C Host Controller. This controller is
e.g. used by Xilinx Zynq.
@@ -617,7 +617,7 @@ config I2C_EXYNOS5
help
High-speed I2C controller on Samsung Exynos5 and newer Samsung SoCs:
Exynos5250, Exynos5260, Exynos5410, Exynos542x, Exynos5800,
- Exynos5433 and Exynos7.
+ Exynos5433, Exynos7, Exynos850 and ExynosAutoV9.
Choose Y here only if you build for such Samsung SoC.
config I2C_GPIO
@@ -680,7 +680,7 @@ config I2C_IMG
config I2C_IMX
tristate "IMX I2C interface"
- depends on ARCH_MXC || ARCH_LAYERSCAPE || COLDFIRE
+ depends on ARCH_MXC || ARCH_LAYERSCAPE || COLDFIRE || COMPILE_TEST
select I2C_SLAVE
help
Say Y here if you want to use the IIC bus controller on
@@ -935,7 +935,7 @@ config I2C_QCOM_GENI
config I2C_QUP
tristate "Qualcomm QUP based I2C controller"
- depends on ARCH_QCOM
+ depends on ARCH_QCOM || COMPILE_TEST
help
If you say yes to this option, support will be included for the
built-in I2C interface on the Qualcomm SoCs.
@@ -1153,22 +1153,12 @@ config I2C_XILINX
This driver can also be built as a module. If so, the module
will be called xilinx_i2c.
-config I2C_XLR
- tristate "Netlogic XLR I2C support"
- depends on CPU_XLR || COMPILE_TEST
- help
- This driver enables support for the on-chip I2C interface of
- the Netlogic XLR/XLS MIPS processors and Sigma Designs SOCs.
-
- This driver can also be built as a module. If so, the module
- will be called i2c-xlr.
-
config I2C_XLP9XX
- tristate "XLP9XX I2C support"
- depends on CPU_XLP || ARCH_THUNDER2 || COMPILE_TEST
+ tristate "Cavium ThunderX2 I2C support"
+ depends on ARCH_THUNDER2 || COMPILE_TEST
help
This driver enables support for the on-chip I2C interface of
- the Broadcom XLP9xx/XLP5xx MIPS and Vulcan ARM64 processors.
+ the Cavium ThunderX2 processors. (Originally on Netlogic XLP SoCs.)
This driver can also be built as a module. If so, the module will
be called i2c-xlp9xx.
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index d85899fef8c7..1d00dce77098 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -119,7 +119,6 @@ obj-$(CONFIG_I2C_OCTEON) += i2c-octeon.o
i2c-thunderx-objs := i2c-octeon-core.o i2c-thunderx-pcidrv.o
obj-$(CONFIG_I2C_THUNDERX) += i2c-thunderx.o
obj-$(CONFIG_I2C_XILINX) += i2c-xiic.o
-obj-$(CONFIG_I2C_XLR) += i2c-xlr.o
obj-$(CONFIG_I2C_XLP9XX) += i2c-xlp9xx.o
obj-$(CONFIG_I2C_RCAR) += i2c-rcar.o
diff --git a/drivers/i2c/busses/i2c-aspeed.c b/drivers/i2c/busses/i2c-aspeed.c
index 67e8b97c0c95..771e53d3d197 100644
--- a/drivers/i2c/busses/i2c-aspeed.c
+++ b/drivers/i2c/busses/i2c-aspeed.c
@@ -16,8 +16,6 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
-#include <linux/irqchip/chained_irq.h>
-#include <linux/irqdomain.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of_address.h>
diff --git a/drivers/i2c/busses/i2c-bcm2835.c b/drivers/i2c/busses/i2c-bcm2835.c
index 37443edbf754..5149454eef4a 100644
--- a/drivers/i2c/busses/i2c-bcm2835.c
+++ b/drivers/i2c/busses/i2c-bcm2835.c
@@ -23,6 +23,11 @@
#define BCM2835_I2C_FIFO 0x10
#define BCM2835_I2C_DIV 0x14
#define BCM2835_I2C_DEL 0x18
+/*
+ * 16-bit field for the number of SCL cycles to wait after rising SCL
+ * before deciding the slave is not responding. 0 disables the
+ * timeout detection.
+ */
#define BCM2835_I2C_CLKT 0x1c
#define BCM2835_I2C_C_READ BIT(0)
@@ -402,7 +407,7 @@ static const struct i2c_adapter_quirks bcm2835_i2c_quirks = {
static int bcm2835_i2c_probe(struct platform_device *pdev)
{
struct bcm2835_i2c_dev *i2c_dev;
- struct resource *mem, *irq;
+ struct resource *mem;
int ret;
struct i2c_adapter *adap;
struct clk *mclk;
@@ -452,12 +457,9 @@ static int bcm2835_i2c_probe(struct platform_device *pdev)
return ret;
}
- irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!irq) {
- dev_err(&pdev->dev, "No IRQ resource\n");
- return -ENODEV;
- }
- i2c_dev->irq = irq->start;
+ i2c_dev->irq = platform_get_irq(pdev, 0);
+ if (i2c_dev->irq < 0)
+ return i2c_dev->irq;
ret = request_irq(i2c_dev->irq, bcm2835_i2c_isr, IRQF_SHARED,
dev_name(&pdev->dev), i2c_dev);
@@ -477,6 +479,12 @@ static int bcm2835_i2c_probe(struct platform_device *pdev)
adap->dev.of_node = pdev->dev.of_node;
adap->quirks = of_device_get_match_data(&pdev->dev);
+ /*
+ * Disable the hardware clock stretching timeout. SMBUS
+ * specifies a limit for how long the device can stretch the
+ * clock, but core I2C doesn't.
+ */
+ bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_CLKT, 0);
bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_C, 0);
ret = i2c_add_adapter(adap);
diff --git a/drivers/i2c/busses/i2c-brcmstb.c b/drivers/i2c/busses/i2c-brcmstb.c
index 490ee3962645..b00f35c0b066 100644
--- a/drivers/i2c/busses/i2c-brcmstb.c
+++ b/drivers/i2c/busses/i2c-brcmstb.c
@@ -673,7 +673,7 @@ static int brcmstb_i2c_probe(struct platform_device *pdev)
/* set the data in/out register size for compatible SoCs */
if (of_device_is_compatible(dev->device->of_node,
- "brcmstb,brcmper-i2c"))
+ "brcm,brcmper-i2c"))
dev->data_regsz = sizeof(u8);
else
dev->data_regsz = sizeof(u32);
diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h
index 60a2e750cee9..4b26cba40139 100644
--- a/drivers/i2c/busses/i2c-designware-core.h
+++ b/drivers/i2c/busses/i2c-designware-core.h
@@ -191,23 +191,26 @@ struct reset_control;
* @cmd_complete: tx completion indicator
* @clk: input reference clock
* @pclk: clock required to access the registers
+ * @rst: optional reset for the controller
* @slave: represent an I2C slave device
+ * @get_clk_rate_khz: callback to retrieve IP specific bus speed
* @cmd_err: run time hadware error code
* @msgs: points to an array of messages currently being transferred
* @msgs_num: the number of elements in msgs
- * @msg_write_idx: the element index of the current tx message in the msgs
- * array
+ * @msg_write_idx: the element index of the current tx message in the msgs array
* @tx_buf_len: the length of the current tx buffer
* @tx_buf: the current tx buffer
- * @msg_read_idx: the element index of the current rx message in the msgs
- * array
+ * @msg_read_idx: the element index of the current rx message in the msgs array
* @rx_buf_len: the length of the current rx buffer
* @rx_buf: the current rx buffer
* @msg_err: error status of the current transfer
* @status: i2c master status, one of STATUS_*
* @abort_source: copy of the TX_ABRT_SOURCE register
* @irq: interrupt number for the i2c master
+ * @flags: platform specific flags like type of IO accessors or model
* @adapter: i2c subsystem adapter node
+ * @functionality: I2C_FUNC_* ORed bits to reflect what controller does support
+ * @master_cfg: configuration for the master device
* @slave_cfg: configuration for the slave device
* @tx_fifo_depth: depth of the hardware tx fifo
* @rx_fifo_depth: depth of the hardware rx fifo
@@ -228,7 +231,9 @@ struct reset_control;
* @disable: function to disable the controller
* @disable_int: function to disable all interrupts
* @init: function to initialize the I2C hardware
+ * @set_sda_hold_time: callback to retrieve IP specific SDA hold timing
* @mode: operation mode - DW_IC_MASTER or DW_IC_SLAVE
+ * @rinfo: I²C GPIO recovery information
* @suspended: set to true if the controller is suspended
*
* HCNT and LCNT parameters can be used if the platform knows more accurate
diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c
index 9b08bb5df38d..9177463c2cbb 100644
--- a/drivers/i2c/busses/i2c-designware-master.c
+++ b/drivers/i2c/busses/i2c-designware-master.c
@@ -701,7 +701,8 @@ static u32 i2c_dw_read_clear_intrbits(struct dw_i2c_dev *dev)
regmap_read(dev->map, DW_IC_CLR_RX_DONE, &dummy);
if (stat & DW_IC_INTR_ACTIVITY)
regmap_read(dev->map, DW_IC_CLR_ACTIVITY, &dummy);
- if (stat & DW_IC_INTR_STOP_DET)
+ if ((stat & DW_IC_INTR_STOP_DET) &&
+ ((dev->rx_outstanding == 0) || (stat & DW_IC_INTR_RX_FULL)))
regmap_read(dev->map, DW_IC_CLR_STOP_DET, &dummy);
if (stat & DW_IC_INTR_START_DET)
regmap_read(dev->map, DW_IC_CLR_START_DET, &dummy);
@@ -723,6 +724,7 @@ static int i2c_dw_irq_handler_master(struct dw_i2c_dev *dev)
if (stat & DW_IC_INTR_TX_ABRT) {
dev->cmd_err |= DW_IC_ERR_TX_ABRT;
dev->status = STATUS_IDLE;
+ dev->rx_outstanding = 0;
/*
* Anytime TX_ABRT is set, the contents of the tx/rx
@@ -745,7 +747,8 @@ static int i2c_dw_irq_handler_master(struct dw_i2c_dev *dev)
*/
tx_aborted:
- if ((stat & (DW_IC_INTR_TX_ABRT | DW_IC_INTR_STOP_DET)) || dev->msg_err)
+ if (((stat & (DW_IC_INTR_TX_ABRT | DW_IC_INTR_STOP_DET)) || dev->msg_err) &&
+ (dev->rx_outstanding == 0))
complete(&dev->cmd_complete);
else if (unlikely(dev->flags & ACCESS_INTR_MASK)) {
/* Workaround to trigger pending interrupt */
diff --git a/drivers/i2c/busses/i2c-designware-pcidrv.c b/drivers/i2c/busses/i2c-designware-pcidrv.c
index 0f409a4c2da0..ef4250f8852b 100644
--- a/drivers/i2c/busses/i2c-designware-pcidrv.c
+++ b/drivers/i2c/busses/i2c-designware-pcidrv.c
@@ -38,11 +38,18 @@ enum dw_pci_ctl_id_t {
navi_amd,
};
+/*
+ * This is a legacy structure to describe the hardware counters
+ * to configure signal timings on the bus. For Device Tree platforms
+ * one should use the respective properties and for ACPI there is
+ * a set of ACPI methods that provide these counters. No new
+ * platform should use this structure.
+ */
struct dw_scl_sda_cfg {
- u32 ss_hcnt;
- u32 fs_hcnt;
- u32 ss_lcnt;
- u32 fs_lcnt;
+ u16 ss_hcnt;
+ u16 fs_hcnt;
+ u16 ss_lcnt;
+ u16 fs_lcnt;
u32 sda_hold;
};
@@ -206,8 +213,7 @@ static struct dw_pci_controller dw_pci_controllers[] = {
},
};
-#ifdef CONFIG_PM
-static int i2c_dw_pci_suspend(struct device *dev)
+static int __maybe_unused i2c_dw_pci_suspend(struct device *dev)
{
struct dw_i2c_dev *i_dev = dev_get_drvdata(dev);
@@ -217,7 +223,7 @@ static int i2c_dw_pci_suspend(struct device *dev)
return 0;
}
-static int i2c_dw_pci_resume(struct device *dev)
+static int __maybe_unused i2c_dw_pci_resume(struct device *dev)
{
struct dw_i2c_dev *i_dev = dev_get_drvdata(dev);
int ret;
@@ -227,7 +233,6 @@ static int i2c_dw_pci_resume(struct device *dev)
return ret;
}
-#endif
static UNIVERSAL_DEV_PM_OPS(i2c_dw_pm_ops, i2c_dw_pci_suspend,
i2c_dw_pci_resume, NULL);
@@ -241,28 +246,24 @@ static int i2c_dw_pci_probe(struct pci_dev *pdev,
struct dw_pci_controller *controller;
struct dw_scl_sda_cfg *cfg;
- if (id->driver_data >= ARRAY_SIZE(dw_pci_controllers)) {
- dev_err(&pdev->dev, "%s: invalid driver data %ld\n", __func__,
- id->driver_data);
- return -EINVAL;
- }
+ if (id->driver_data >= ARRAY_SIZE(dw_pci_controllers))
+ return dev_err_probe(&pdev->dev, -EINVAL,
+ "Invalid driver data %ld\n",
+ id->driver_data);
controller = &dw_pci_controllers[id->driver_data];
r = pcim_enable_device(pdev);
- if (r) {
- dev_err(&pdev->dev, "Failed to enable I2C PCI device (%d)\n",
- r);
- return r;
- }
+ if (r)
+ return dev_err_probe(&pdev->dev, r,
+ "Failed to enable I2C PCI device\n");
pci_set_master(pdev);
r = pcim_iomap_regions(pdev, 1 << 0, pci_name(pdev));
- if (r) {
- dev_err(&pdev->dev, "I/O memory remapping failed\n");
- return r;
- }
+ if (r)
+ return dev_err_probe(&pdev->dev, r,
+ "I/O memory remapping failed\n");
dev = devm_kzalloc(&pdev->dev, sizeof(struct dw_i2c_dev), GFP_KERNEL);
if (!dev)
@@ -352,9 +353,6 @@ static void i2c_dw_pci_remove(struct pci_dev *pdev)
pci_free_irq_vectors(pdev);
}
-/* work with hotplug and coldplug */
-MODULE_ALIAS("i2c_designware-pci");
-
static const struct pci_device_id i2_designware_pci_ids[] = {
/* Medfield */
{ PCI_VDEVICE(INTEL, 0x0817), medfield },
@@ -411,9 +409,10 @@ static struct pci_driver dw_i2c_driver = {
.pm = &i2c_dw_pm_ops,
},
};
-
module_pci_driver(dw_i2c_driver);
+/* Work with hotplug and coldplug */
+MODULE_ALIAS("i2c_designware-pci");
MODULE_AUTHOR("Baruch Siach <baruch@tkos.co.il>");
MODULE_DESCRIPTION("Synopsys DesignWare PCI I2C bus adapter");
MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index 21113665ddea..2bd81abc86f6 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -293,6 +293,8 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
DPM_FLAG_MAY_SKIP_RESUME);
}
+ device_enable_async_suspend(&pdev->dev);
+
/* The code below assumes runtime PM to be disabled. */
WARN_ON(pm_runtime_enabled(&pdev->dev));
diff --git a/drivers/i2c/busses/i2c-exynos5.c b/drivers/i2c/busses/i2c-exynos5.c
index 97d4f3ac0abd..b812d1090c0f 100644
--- a/drivers/i2c/busses/i2c-exynos5.c
+++ b/drivers/i2c/busses/i2c-exynos5.c
@@ -169,6 +169,7 @@
enum i2c_type_exynos {
I2C_TYPE_EXYNOS5,
I2C_TYPE_EXYNOS7,
+ I2C_TYPE_EXYNOSAUTOV9,
};
struct exynos5_i2c {
@@ -181,7 +182,8 @@ struct exynos5_i2c {
unsigned int irq;
void __iomem *regs;
- struct clk *clk;
+ struct clk *clk; /* operating clock */
+ struct clk *pclk; /* bus clock */
struct device *dev;
int state;
@@ -230,6 +232,11 @@ static const struct exynos_hsi2c_variant exynos7_hsi2c_data = {
.hw = I2C_TYPE_EXYNOS7,
};
+static const struct exynos_hsi2c_variant exynosautov9_hsi2c_data = {
+ .fifo_depth = 64,
+ .hw = I2C_TYPE_EXYNOSAUTOV9,
+};
+
static const struct of_device_id exynos5_i2c_match[] = {
{
.compatible = "samsung,exynos5-hsi2c",
@@ -243,6 +250,9 @@ static const struct of_device_id exynos5_i2c_match[] = {
}, {
.compatible = "samsung,exynos7-hsi2c",
.data = &exynos7_hsi2c_data
+ }, {
+ .compatible = "samsung,exynosautov9-hsi2c",
+ .data = &exynosautov9_hsi2c_data
}, {},
};
MODULE_DEVICE_TABLE(of, exynos5_i2c_match);
@@ -282,6 +292,31 @@ static int exynos5_i2c_set_timing(struct exynos5_i2c *i2c, bool hs_timings)
int div, clk_cycle, temp;
/*
+ * In case of HSI2C controllers in ExynosAutoV9:
+ *
+ * FSCL = IPCLK / ((CLK_DIV + 1) * 16)
+ * T_SCL_LOW = IPCLK * (CLK_DIV + 1) * (N + M)
+ * [N : number of 0's in the TSCL_H_HS]
+ * [M : number of 0's in the TSCL_L_HS]
+ * T_SCL_HIGH = IPCLK * (CLK_DIV + 1) * (N + M)
+ * [N : number of 1's in the TSCL_H_HS]
+ * [M : number of 1's in the TSCL_L_HS]
+ *
+ * Result of (N + M) is always 8.
+ * In general case, we don't need to control timing_s1 and timing_s2.
+ */
+ if (i2c->variant->hw == I2C_TYPE_EXYNOSAUTOV9) {
+ div = ((clkin / (16 * i2c->op_clock)) - 1);
+ i2c_timing_s3 = div << 16;
+ if (hs_timings)
+ writel(i2c_timing_s3, i2c->regs + HSI2C_TIMING_HS3);
+ else
+ writel(i2c_timing_s3, i2c->regs + HSI2C_TIMING_FS3);
+
+ return 0;
+ }
+
+ /*
* In case of HSI2C controller in Exynos5 series
* FPCLK / FI2C =
* (CLK_DIV + 1) * (TSCLK_L + TSCLK_H + 2) + 8 + 2 * FLT_CYCLE
@@ -422,7 +457,10 @@ static irqreturn_t exynos5_i2c_irq(int irqno, void *dev_id)
writel(int_status, i2c->regs + HSI2C_INT_STATUS);
/* handle interrupt related to the transfer status */
- if (i2c->variant->hw == I2C_TYPE_EXYNOS7) {
+ switch (i2c->variant->hw) {
+ case I2C_TYPE_EXYNOSAUTOV9:
+ fallthrough;
+ case I2C_TYPE_EXYNOS7:
if (int_status & HSI2C_INT_TRANS_DONE) {
i2c->trans_done = 1;
i2c->state = 0;
@@ -443,7 +481,12 @@ static irqreturn_t exynos5_i2c_irq(int irqno, void *dev_id)
i2c->state = -ETIMEDOUT;
goto stop;
}
- } else if (int_status & HSI2C_INT_I2C) {
+
+ break;
+ case I2C_TYPE_EXYNOS5:
+ if (!(int_status & HSI2C_INT_I2C))
+ break;
+
trans_status = readl(i2c->regs + HSI2C_TRANS_STATUS);
if (trans_status & HSI2C_NO_DEV_ACK) {
dev_dbg(i2c->dev, "No ACK from device\n");
@@ -465,6 +508,8 @@ static irqreturn_t exynos5_i2c_irq(int irqno, void *dev_id)
i2c->trans_done = 1;
i2c->state = 0;
}
+
+ break;
}
if ((i2c->msg->flags & I2C_M_RD) && (int_status &
@@ -569,13 +614,13 @@ static void exynos5_i2c_bus_check(struct exynos5_i2c *i2c)
{
unsigned long timeout;
- if (i2c->variant->hw != I2C_TYPE_EXYNOS7)
+ if (i2c->variant->hw == I2C_TYPE_EXYNOS5)
return;
/*
- * HSI2C_MASTER_ST_LOSE state in EXYNOS7 variant before transaction
- * indicates that bus is stuck (SDA is low). In such case bus recovery
- * can be performed.
+ * HSI2C_MASTER_ST_LOSE state (in Exynos7 and ExynosAutoV9 variants)
+ * before transaction indicates that bus is stuck (SDA is low).
+ * In such case bus recovery can be performed.
*/
timeout = jiffies + msecs_to_jiffies(100);
for (;;) {
@@ -611,10 +656,10 @@ static void exynos5_i2c_message_start(struct exynos5_i2c *i2c, int stop)
unsigned long flags;
unsigned short trig_lvl;
- if (i2c->variant->hw == I2C_TYPE_EXYNOS7)
- int_en |= HSI2C_INT_I2C_TRANS;
- else
+ if (i2c->variant->hw == I2C_TYPE_EXYNOS5)
int_en |= HSI2C_INT_I2C;
+ else
+ int_en |= HSI2C_INT_I2C_TRANS;
i2c_ctl = readl(i2c->regs + HSI2C_CTL);
i2c_ctl &= ~(HSI2C_TXCHON | HSI2C_RXCHON);
@@ -713,10 +758,14 @@ static int exynos5_i2c_xfer(struct i2c_adapter *adap,
struct exynos5_i2c *i2c = adap->algo_data;
int i, ret;
- ret = clk_enable(i2c->clk);
+ ret = clk_enable(i2c->pclk);
if (ret)
return ret;
+ ret = clk_enable(i2c->clk);
+ if (ret)
+ goto err_pclk;
+
for (i = 0; i < num; ++i) {
ret = exynos5_i2c_xfer_msg(i2c, msgs + i, i + 1 == num);
if (ret)
@@ -724,6 +773,8 @@ static int exynos5_i2c_xfer(struct i2c_adapter *adap,
}
clk_disable(i2c->clk);
+err_pclk:
+ clk_disable(i2c->pclk);
return ret ?: num;
}
@@ -763,10 +814,20 @@ static int exynos5_i2c_probe(struct platform_device *pdev)
return -ENOENT;
}
- ret = clk_prepare_enable(i2c->clk);
+ i2c->pclk = devm_clk_get_optional(&pdev->dev, "hsi2c_pclk");
+ if (IS_ERR(i2c->pclk)) {
+ return dev_err_probe(&pdev->dev, PTR_ERR(i2c->pclk),
+ "cannot get pclk");
+ }
+
+ ret = clk_prepare_enable(i2c->pclk);
if (ret)
return ret;
+ ret = clk_prepare_enable(i2c->clk);
+ if (ret)
+ goto err_pclk;
+
i2c->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(i2c->regs)) {
ret = PTR_ERR(i2c->regs);
@@ -809,11 +870,15 @@ static int exynos5_i2c_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, i2c);
clk_disable(i2c->clk);
+ clk_disable(i2c->pclk);
return 0;
err_clk:
clk_disable_unprepare(i2c->clk);
+
+ err_pclk:
+ clk_disable_unprepare(i2c->pclk);
return ret;
}
@@ -824,6 +889,7 @@ static int exynos5_i2c_remove(struct platform_device *pdev)
i2c_del_adapter(&i2c->adap);
clk_unprepare(i2c->clk);
+ clk_unprepare(i2c->pclk);
return 0;
}
@@ -835,6 +901,7 @@ static int exynos5_i2c_suspend_noirq(struct device *dev)
i2c_mark_adapter_suspended(&i2c->adap);
clk_unprepare(i2c->clk);
+ clk_unprepare(i2c->pclk);
return 0;
}
@@ -844,21 +911,30 @@ static int exynos5_i2c_resume_noirq(struct device *dev)
struct exynos5_i2c *i2c = dev_get_drvdata(dev);
int ret = 0;
- ret = clk_prepare_enable(i2c->clk);
+ ret = clk_prepare_enable(i2c->pclk);
if (ret)
return ret;
+ ret = clk_prepare_enable(i2c->clk);
+ if (ret)
+ goto err_pclk;
+
ret = exynos5_hsi2c_clock_setup(i2c);
- if (ret) {
- clk_disable_unprepare(i2c->clk);
- return ret;
- }
+ if (ret)
+ goto err_clk;
exynos5_i2c_init(i2c);
clk_disable(i2c->clk);
+ clk_disable(i2c->pclk);
i2c_mark_adapter_resumed(&i2c->adap);
return 0;
+
+err_clk:
+ clk_disable_unprepare(i2c->clk);
+err_pclk:
+ clk_disable_unprepare(i2c->pclk);
+ return ret;
}
#endif
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 41446f9cc52d..7428cc6af5cc 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -328,22 +328,14 @@ static int i801_check_pre(struct i801_priv *priv)
status = inb_p(SMBHSTSTS(priv));
if (status & SMBHSTSTS_HOST_BUSY) {
- dev_err(&priv->pci_dev->dev, "SMBus is busy, can't use it!\n");
+ pci_err(priv->pci_dev, "SMBus is busy, can't use it!\n");
return -EBUSY;
}
status &= STATUS_FLAGS;
if (status) {
- dev_dbg(&priv->pci_dev->dev, "Clearing status flags (%02x)\n",
- status);
+ pci_dbg(priv->pci_dev, "Clearing status flags (%02x)\n", status);
outb_p(status, SMBHSTSTS(priv));
- status = inb_p(SMBHSTSTS(priv)) & STATUS_FLAGS;
- if (status) {
- dev_err(&priv->pci_dev->dev,
- "Failed clearing status flags (%02x)\n",
- status);
- return -EBUSY;
- }
}
/*
@@ -356,27 +348,14 @@ static int i801_check_pre(struct i801_priv *priv)
if (priv->features & FEATURE_SMBUS_PEC) {
status = inb_p(SMBAUXSTS(priv)) & SMBAUXSTS_CRCE;
if (status) {
- dev_dbg(&priv->pci_dev->dev,
- "Clearing aux status flags (%02x)\n", status);
+ pci_dbg(priv->pci_dev, "Clearing aux status flags (%02x)\n", status);
outb_p(status, SMBAUXSTS(priv));
- status = inb_p(SMBAUXSTS(priv)) & SMBAUXSTS_CRCE;
- if (status) {
- dev_err(&priv->pci_dev->dev,
- "Failed clearing aux status flags (%02x)\n",
- status);
- return -EBUSY;
- }
}
}
return 0;
}
-/*
- * Convert the status register to an error code, and clear it.
- * Note that status only contains the bits we want to clear, not the
- * actual register value.
- */
static int i801_check_post(struct i801_priv *priv, int status)
{
int result = 0;
@@ -401,7 +380,6 @@ static int i801_check_post(struct i801_priv *priv, int status)
!(status & SMBHSTSTS_FAILED))
dev_err(&priv->pci_dev->dev,
"Failed terminating the transaction\n");
- outb_p(STATUS_FLAGS, SMBHSTSTS(priv));
return -ETIMEDOUT;
}
@@ -440,9 +418,6 @@ static int i801_check_post(struct i801_priv *priv, int status)
dev_dbg(&priv->pci_dev->dev, "Lost arbitration\n");
}
- /* Clear status flags except BYTE_DONE, to be cleared by caller */
- outb_p(status, SMBHSTSTS(priv));
-
return result;
}
@@ -523,9 +498,11 @@ static int i801_block_transaction_by_block(struct i801_priv *priv,
return -EOPNOTSUPP;
}
+ /* Set block buffer mode */
+ outb_p(inb_p(SMBAUXCTL(priv)) | SMBAUXCTL_E32B, SMBAUXCTL(priv));
+
inb_p(SMBHSTCNT(priv)); /* reset the data buffer index */
- /* Use 32-byte buffer to process this transaction */
if (read_write == I2C_SMBUS_WRITE) {
len = data->block[0];
outb_p(len, SMBHSTDAT0(priv));
@@ -760,14 +737,6 @@ exit:
return i801_check_post(priv, status);
}
-static int i801_set_block_buffer_mode(struct i801_priv *priv)
-{
- outb_p(inb_p(SMBAUXCTL(priv)) | SMBAUXCTL_E32B, SMBAUXCTL(priv));
- if ((inb_p(SMBAUXCTL(priv)) & SMBAUXCTL_E32B) == 0)
- return -EIO;
- return 0;
-}
-
/* Block transaction function */
static int i801_block_transaction(struct i801_priv *priv, union i2c_smbus_data *data,
char read_write, int command)
@@ -775,6 +744,11 @@ static int i801_block_transaction(struct i801_priv *priv, union i2c_smbus_data *
int result = 0;
unsigned char hostc;
+ if (read_write == I2C_SMBUS_READ && command == I2C_SMBUS_BLOCK_DATA)
+ data->block[0] = I2C_SMBUS_BLOCK_MAX;
+ else if (data->block[0] < 1 || data->block[0] > I2C_SMBUS_BLOCK_MAX)
+ return -EPROTO;
+
if (command == I2C_SMBUS_I2C_BLOCK_DATA) {
if (read_write == I2C_SMBUS_WRITE) {
/* set I2C_EN bit in configuration register */
@@ -788,22 +762,11 @@ static int i801_block_transaction(struct i801_priv *priv, union i2c_smbus_data *
}
}
- if (read_write == I2C_SMBUS_WRITE
- || command == I2C_SMBUS_I2C_BLOCK_DATA) {
- if (data->block[0] < 1)
- data->block[0] = 1;
- if (data->block[0] > I2C_SMBUS_BLOCK_MAX)
- data->block[0] = I2C_SMBUS_BLOCK_MAX;
- } else {
- data->block[0] = 32; /* max for SMBus block reads */
- }
-
/* Experience has shown that the block buffer can only be used for
SMBus (not I2C) block transactions, even though the datasheet
doesn't mention this limitation. */
- if ((priv->features & FEATURE_BLOCK_BUFFER)
- && command != I2C_SMBUS_I2C_BLOCK_DATA
- && i801_set_block_buffer_mode(priv) == 0)
+ if ((priv->features & FEATURE_BLOCK_BUFFER) &&
+ command != I2C_SMBUS_I2C_BLOCK_DATA)
result = i801_block_transaction_by_block(priv, data,
read_write,
command);
@@ -951,8 +914,11 @@ static s32 i801_access(struct i2c_adapter *adap, u16 addr,
}
out:
- /* Unlock the SMBus device for use by BIOS/ACPI */
- outb_p(SMBHSTSTS_INUSE_STS, SMBHSTSTS(priv));
+ /*
+ * Unlock the SMBus device for use by BIOS/ACPI,
+ * and clear status flags if not done already.
+ */
+ outb_p(SMBHSTSTS_INUSE_STS | STATUS_FLAGS, SMBHSTSTS(priv));
pm_runtime_mark_last_busy(&priv->pci_dev->dev);
pm_runtime_put_autosuspend(&priv->pci_dev->dev);
@@ -1009,66 +975,72 @@ static const struct i2c_algorithm smbus_algorithm = {
.functionality = i801_func,
};
+#define FEATURES_ICH5 (FEATURE_BLOCK_PROC | FEATURE_I2C_BLOCK_READ | \
+ FEATURE_IRQ | FEATURE_SMBUS_PEC | \
+ FEATURE_BLOCK_BUFFER | FEATURE_HOST_NOTIFY)
+#define FEATURES_ICH4 (FEATURE_SMBUS_PEC | FEATURE_BLOCK_BUFFER | \
+ FEATURE_HOST_NOTIFY)
+
static const struct pci_device_id i801_ids[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_3) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_3) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_2) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_3) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_3) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_3) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_4) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_16) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_17) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB2_17) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_5) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_6) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EP80579_1) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_4) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_5) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5_3400_SERIES_SMBUS) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_COUGARPOINT_SMBUS) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF0) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF1) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF2) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_DH89XXCC_SMBUS) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PANTHERPOINT_SMBUS) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNXPOINT_SMBUS) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_SMBUS) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_AVOTON_SMBUS) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS0) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS1) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS2) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_COLETOCREEK_SMBUS) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_GEMINILAKE_SMBUS) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WILDCATPOINT_SMBUS) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_WILDCATPOINT_LP_SMBUS) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BAYTRAIL_SMBUS) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BRASWELL_SMBUS) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_SMBUS) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_SMBUS) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CDF_SMBUS) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_DNV_SMBUS) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EBG_SMBUS) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BROXTON_SMBUS) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LEWISBURG_SMBUS) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KABYLAKE_PCH_H_SMBUS) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CANNONLAKE_H_SMBUS) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CANNONLAKE_LP_SMBUS) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICELAKE_LP_SMBUS) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICELAKE_N_SMBUS) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_COMETLAKE_SMBUS) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_COMETLAKE_H_SMBUS) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_COMETLAKE_V_SMBUS) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ELKHART_LAKE_SMBUS) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TIGERLAKE_LP_SMBUS) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TIGERLAKE_H_SMBUS) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_JASPER_LAKE_SMBUS) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ALDER_LAKE_S_SMBUS) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ALDER_LAKE_P_SMBUS) },
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ALDER_LAKE_M_SMBUS) },
+ { PCI_DEVICE_DATA(INTEL, 82801AA_3, 0) },
+ { PCI_DEVICE_DATA(INTEL, 82801AB_3, 0) },
+ { PCI_DEVICE_DATA(INTEL, 82801BA_2, 0) },
+ { PCI_DEVICE_DATA(INTEL, 82801CA_3, FEATURE_HOST_NOTIFY) },
+ { PCI_DEVICE_DATA(INTEL, 82801DB_3, FEATURES_ICH4) },
+ { PCI_DEVICE_DATA(INTEL, 82801EB_3, FEATURES_ICH5) },
+ { PCI_DEVICE_DATA(INTEL, ESB_4, FEATURES_ICH5) },
+ { PCI_DEVICE_DATA(INTEL, ICH6_16, FEATURES_ICH5) },
+ { PCI_DEVICE_DATA(INTEL, ICH7_17, FEATURES_ICH5) },
+ { PCI_DEVICE_DATA(INTEL, ESB2_17, FEATURES_ICH5) },
+ { PCI_DEVICE_DATA(INTEL, ICH8_5, FEATURES_ICH5) },
+ { PCI_DEVICE_DATA(INTEL, ICH9_6, FEATURES_ICH5) },
+ { PCI_DEVICE_DATA(INTEL, EP80579_1, FEATURES_ICH5) },
+ { PCI_DEVICE_DATA(INTEL, ICH10_4, FEATURES_ICH5) },
+ { PCI_DEVICE_DATA(INTEL, ICH10_5, FEATURES_ICH5) },
+ { PCI_DEVICE_DATA(INTEL, 5_3400_SERIES_SMBUS, FEATURES_ICH5) },
+ { PCI_DEVICE_DATA(INTEL, COUGARPOINT_SMBUS, FEATURES_ICH5) },
+ { PCI_DEVICE_DATA(INTEL, PATSBURG_SMBUS, FEATURES_ICH5) },
+ { PCI_DEVICE_DATA(INTEL, PATSBURG_SMBUS_IDF0, FEATURES_ICH5 | FEATURE_IDF) },
+ { PCI_DEVICE_DATA(INTEL, PATSBURG_SMBUS_IDF1, FEATURES_ICH5 | FEATURE_IDF) },
+ { PCI_DEVICE_DATA(INTEL, PATSBURG_SMBUS_IDF2, FEATURES_ICH5 | FEATURE_IDF) },
+ { PCI_DEVICE_DATA(INTEL, DH89XXCC_SMBUS, FEATURES_ICH5) },
+ { PCI_DEVICE_DATA(INTEL, PANTHERPOINT_SMBUS, FEATURES_ICH5) },
+ { PCI_DEVICE_DATA(INTEL, LYNXPOINT_SMBUS, FEATURES_ICH5) },
+ { PCI_DEVICE_DATA(INTEL, LYNXPOINT_LP_SMBUS, FEATURES_ICH5) },
+ { PCI_DEVICE_DATA(INTEL, AVOTON_SMBUS, FEATURES_ICH5) },
+ { PCI_DEVICE_DATA(INTEL, WELLSBURG_SMBUS, FEATURES_ICH5) },
+ { PCI_DEVICE_DATA(INTEL, WELLSBURG_SMBUS_MS0, FEATURES_ICH5 | FEATURE_IDF) },
+ { PCI_DEVICE_DATA(INTEL, WELLSBURG_SMBUS_MS1, FEATURES_ICH5 | FEATURE_IDF) },
+ { PCI_DEVICE_DATA(INTEL, WELLSBURG_SMBUS_MS2, FEATURES_ICH5 | FEATURE_IDF) },
+ { PCI_DEVICE_DATA(INTEL, COLETOCREEK_SMBUS, FEATURES_ICH5) },
+ { PCI_DEVICE_DATA(INTEL, GEMINILAKE_SMBUS, FEATURES_ICH5) },
+ { PCI_DEVICE_DATA(INTEL, WILDCATPOINT_SMBUS, FEATURES_ICH5) },
+ { PCI_DEVICE_DATA(INTEL, WILDCATPOINT_LP_SMBUS, FEATURES_ICH5) },
+ { PCI_DEVICE_DATA(INTEL, BAYTRAIL_SMBUS, FEATURES_ICH5) },
+ { PCI_DEVICE_DATA(INTEL, BRASWELL_SMBUS, FEATURES_ICH5) },
+ { PCI_DEVICE_DATA(INTEL, SUNRISEPOINT_H_SMBUS, FEATURES_ICH5 | FEATURE_TCO_SPT) },
+ { PCI_DEVICE_DATA(INTEL, SUNRISEPOINT_LP_SMBUS, FEATURES_ICH5 | FEATURE_TCO_SPT) },
+ { PCI_DEVICE_DATA(INTEL, CDF_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
+ { PCI_DEVICE_DATA(INTEL, DNV_SMBUS, FEATURES_ICH5 | FEATURE_TCO_SPT) },
+ { PCI_DEVICE_DATA(INTEL, EBG_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
+ { PCI_DEVICE_DATA(INTEL, BROXTON_SMBUS, FEATURES_ICH5) },
+ { PCI_DEVICE_DATA(INTEL, LEWISBURG_SMBUS, FEATURES_ICH5 | FEATURE_TCO_SPT) },
+ { PCI_DEVICE_DATA(INTEL, LEWISBURG_SSKU_SMBUS, FEATURES_ICH5 | FEATURE_TCO_SPT) },
+ { PCI_DEVICE_DATA(INTEL, KABYLAKE_PCH_H_SMBUS, FEATURES_ICH5 | FEATURE_TCO_SPT) },
+ { PCI_DEVICE_DATA(INTEL, CANNONLAKE_H_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
+ { PCI_DEVICE_DATA(INTEL, CANNONLAKE_LP_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
+ { PCI_DEVICE_DATA(INTEL, ICELAKE_LP_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
+ { PCI_DEVICE_DATA(INTEL, ICELAKE_N_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
+ { PCI_DEVICE_DATA(INTEL, COMETLAKE_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
+ { PCI_DEVICE_DATA(INTEL, COMETLAKE_H_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
+ { PCI_DEVICE_DATA(INTEL, COMETLAKE_V_SMBUS, FEATURES_ICH5 | FEATURE_TCO_SPT) },
+ { PCI_DEVICE_DATA(INTEL, ELKHART_LAKE_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
+ { PCI_DEVICE_DATA(INTEL, TIGERLAKE_LP_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
+ { PCI_DEVICE_DATA(INTEL, TIGERLAKE_H_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
+ { PCI_DEVICE_DATA(INTEL, JASPER_LAKE_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
+ { PCI_DEVICE_DATA(INTEL, ALDER_LAKE_S_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
+ { PCI_DEVICE_DATA(INTEL, ALDER_LAKE_P_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
+ { PCI_DEVICE_DATA(INTEL, ALDER_LAKE_M_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
{ 0, }
};
@@ -1493,15 +1465,14 @@ static inline unsigned int i801_get_adapter_class(struct i801_priv *priv)
}
#endif
-static const struct itco_wdt_platform_data spt_tco_platform_data = {
- .name = "Intel PCH",
- .version = 4,
-};
-
static struct platform_device *
i801_add_tco_spt(struct i801_priv *priv, struct pci_dev *pci_dev,
struct resource *tco_res)
{
+ static const struct itco_wdt_platform_data pldata = {
+ .name = "Intel PCH",
+ .version = 4,
+ };
struct resource *res;
unsigned int devfn;
u64 base64_addr;
@@ -1544,22 +1515,20 @@ i801_add_tco_spt(struct i801_priv *priv, struct pci_dev *pci_dev,
res->flags = IORESOURCE_MEM;
return platform_device_register_resndata(&pci_dev->dev, "iTCO_wdt", -1,
- tco_res, 2, &spt_tco_platform_data,
- sizeof(spt_tco_platform_data));
+ tco_res, 2, &pldata, sizeof(pldata));
}
-static const struct itco_wdt_platform_data cnl_tco_platform_data = {
- .name = "Intel PCH",
- .version = 6,
-};
-
static struct platform_device *
i801_add_tco_cnl(struct i801_priv *priv, struct pci_dev *pci_dev,
struct resource *tco_res)
{
- return platform_device_register_resndata(&pci_dev->dev,
- "iTCO_wdt", -1, tco_res, 1, &cnl_tco_platform_data,
- sizeof(cnl_tco_platform_data));
+ static const struct itco_wdt_platform_data pldata = {
+ .name = "Intel PCH",
+ .version = 6,
+ };
+
+ return platform_device_register_resndata(&pci_dev->dev, "iTCO_wdt", -1,
+ tco_res, 1, &pldata, sizeof(pldata));
}
static void i801_add_tco(struct i801_priv *priv)
@@ -1697,72 +1666,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
mutex_init(&priv->acpi_lock);
priv->pci_dev = dev;
- switch (dev->device) {
- case PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_SMBUS:
- case PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_SMBUS:
- case PCI_DEVICE_ID_INTEL_LEWISBURG_SMBUS:
- case PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS:
- case PCI_DEVICE_ID_INTEL_DNV_SMBUS:
- case PCI_DEVICE_ID_INTEL_KABYLAKE_PCH_H_SMBUS:
- case PCI_DEVICE_ID_INTEL_COMETLAKE_V_SMBUS:
- priv->features |= FEATURE_BLOCK_PROC;
- priv->features |= FEATURE_I2C_BLOCK_READ;
- priv->features |= FEATURE_IRQ;
- priv->features |= FEATURE_SMBUS_PEC;
- priv->features |= FEATURE_BLOCK_BUFFER;
- priv->features |= FEATURE_TCO_SPT;
- priv->features |= FEATURE_HOST_NOTIFY;
- break;
-
- case PCI_DEVICE_ID_INTEL_CANNONLAKE_H_SMBUS:
- case PCI_DEVICE_ID_INTEL_CANNONLAKE_LP_SMBUS:
- case PCI_DEVICE_ID_INTEL_CDF_SMBUS:
- case PCI_DEVICE_ID_INTEL_ICELAKE_LP_SMBUS:
- case PCI_DEVICE_ID_INTEL_ICELAKE_N_SMBUS:
- case PCI_DEVICE_ID_INTEL_COMETLAKE_SMBUS:
- case PCI_DEVICE_ID_INTEL_COMETLAKE_H_SMBUS:
- case PCI_DEVICE_ID_INTEL_ELKHART_LAKE_SMBUS:
- case PCI_DEVICE_ID_INTEL_TIGERLAKE_LP_SMBUS:
- case PCI_DEVICE_ID_INTEL_TIGERLAKE_H_SMBUS:
- case PCI_DEVICE_ID_INTEL_JASPER_LAKE_SMBUS:
- case PCI_DEVICE_ID_INTEL_EBG_SMBUS:
- case PCI_DEVICE_ID_INTEL_ALDER_LAKE_S_SMBUS:
- case PCI_DEVICE_ID_INTEL_ALDER_LAKE_P_SMBUS:
- case PCI_DEVICE_ID_INTEL_ALDER_LAKE_M_SMBUS:
- priv->features |= FEATURE_BLOCK_PROC;
- priv->features |= FEATURE_I2C_BLOCK_READ;
- priv->features |= FEATURE_IRQ;
- priv->features |= FEATURE_SMBUS_PEC;
- priv->features |= FEATURE_BLOCK_BUFFER;
- priv->features |= FEATURE_TCO_CNL;
- priv->features |= FEATURE_HOST_NOTIFY;
- break;
-
- case PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF0:
- case PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF1:
- case PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF2:
- case PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS0:
- case PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS1:
- case PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS2:
- priv->features |= FEATURE_IDF;
- fallthrough;
- default:
- priv->features |= FEATURE_BLOCK_PROC;
- priv->features |= FEATURE_I2C_BLOCK_READ;
- priv->features |= FEATURE_IRQ;
- fallthrough;
- case PCI_DEVICE_ID_INTEL_82801DB_3:
- priv->features |= FEATURE_SMBUS_PEC;
- priv->features |= FEATURE_BLOCK_BUFFER;
- fallthrough;
- case PCI_DEVICE_ID_INTEL_82801CA_3:
- priv->features |= FEATURE_HOST_NOTIFY;
- fallthrough;
- case PCI_DEVICE_ID_INTEL_82801BA_2:
- case PCI_DEVICE_ID_INTEL_82801AB_3:
- case PCI_DEVICE_ID_INTEL_82801AA_3:
- break;
- }
+ priv->features = id->driver_data;
/* Disable features on user request */
for (i = 0; i < ARRAY_SIZE(i801_feature_names); i++) {
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index 3576b63a6c03..27f969b3dc07 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -37,6 +37,8 @@
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/hrtimer.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -51,6 +53,8 @@
/* This will be the driver name the kernel reports */
#define DRIVER_NAME "imx-i2c"
+#define I2C_IMX_CHECK_DELAY 30000 /* Time to check for bus idle, in NS */
+
/*
* Enable DMA if transfer byte size is bigger than this threshold.
* As the hardware request, it must bigger than 4 bytes.\
@@ -210,6 +214,10 @@ struct imx_i2c_struct {
struct imx_i2c_dma *dma;
struct i2c_client *slave;
enum i2c_slave_event last_slave_event;
+
+ /* For checking slave events. */
+ spinlock_t slave_lock;
+ struct hrtimer slave_timer;
};
static const struct imx_i2c_hwdata imx1_i2c_hwdata = {
@@ -680,7 +688,7 @@ static void i2c_imx_slave_event(struct imx_i2c_struct *i2c_imx,
static void i2c_imx_slave_finish_op(struct imx_i2c_struct *i2c_imx)
{
- u8 val;
+ u8 val = 0;
while (i2c_imx->last_slave_event != I2C_SLAVE_STOP) {
switch (i2c_imx->last_slave_event) {
@@ -701,10 +709,11 @@ static void i2c_imx_slave_finish_op(struct imx_i2c_struct *i2c_imx)
}
}
-static irqreturn_t i2c_imx_slave_isr(struct imx_i2c_struct *i2c_imx,
- unsigned int status, unsigned int ctl)
+/* Returns true if the timer should be restarted, false if not. */
+static irqreturn_t i2c_imx_slave_handle(struct imx_i2c_struct *i2c_imx,
+ unsigned int status, unsigned int ctl)
{
- u8 value;
+ u8 value = 0;
if (status & I2SR_IAL) { /* Arbitration lost */
i2c_imx_clear_irq(i2c_imx, I2SR_IAL);
@@ -712,6 +721,16 @@ static irqreturn_t i2c_imx_slave_isr(struct imx_i2c_struct *i2c_imx,
return IRQ_HANDLED;
}
+ if (!(status & I2SR_IBB)) {
+ /* No master on the bus, that could mean a stop condition. */
+ i2c_imx_slave_finish_op(i2c_imx);
+ return IRQ_HANDLED;
+ }
+
+ if (!(status & I2SR_ICF))
+ /* Data transfer still in progress, ignore this. */
+ goto out;
+
if (status & I2SR_IAAS) { /* Addressed as a slave */
i2c_imx_slave_finish_op(i2c_imx);
if (status & I2SR_SRW) { /* Master wants to read from us*/
@@ -737,16 +756,9 @@ static irqreturn_t i2c_imx_slave_isr(struct imx_i2c_struct *i2c_imx,
imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR);
}
} else if (!(ctl & I2CR_MTX)) { /* Receive mode */
- if (status & I2SR_IBB) { /* No STOP signal detected */
- value = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR);
- i2c_imx_slave_event(i2c_imx,
- I2C_SLAVE_WRITE_RECEIVED, &value);
- } else { /* STOP signal is detected */
- dev_dbg(&i2c_imx->adapter.dev,
- "STOP signal detected");
- i2c_imx_slave_event(i2c_imx,
- I2C_SLAVE_STOP, &value);
- }
+ value = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR);
+ i2c_imx_slave_event(i2c_imx,
+ I2C_SLAVE_WRITE_RECEIVED, &value);
} else if (!(status & I2SR_RXAK)) { /* Transmit mode received ACK */
ctl |= I2CR_MTX;
imx_i2c_write_reg(ctl, i2c_imx, IMX_I2C_I2CR);
@@ -755,15 +767,43 @@ static irqreturn_t i2c_imx_slave_isr(struct imx_i2c_struct *i2c_imx,
I2C_SLAVE_READ_PROCESSED, &value);
imx_i2c_write_reg(value, i2c_imx, IMX_I2C_I2DR);
- } else { /* Transmit mode received NAK */
+ } else { /* Transmit mode received NAK, operation is done */
ctl &= ~I2CR_MTX;
imx_i2c_write_reg(ctl, i2c_imx, IMX_I2C_I2CR);
imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR);
+ i2c_imx_slave_finish_op(i2c_imx);
+ return IRQ_HANDLED;
}
+out:
+ /*
+ * No need to check the return value here. If it returns 0 or
+ * 1, then everything is fine. If it returns -1, then the
+ * timer is running in the handler. This will still work,
+ * though it may be redone (or already have been done) by the
+ * timer function.
+ */
+ hrtimer_try_to_cancel(&i2c_imx->slave_timer);
+ hrtimer_forward_now(&i2c_imx->slave_timer, I2C_IMX_CHECK_DELAY);
+ hrtimer_restart(&i2c_imx->slave_timer);
return IRQ_HANDLED;
}
+static enum hrtimer_restart i2c_imx_slave_timeout(struct hrtimer *t)
+{
+ struct imx_i2c_struct *i2c_imx = container_of(t, struct imx_i2c_struct,
+ slave_timer);
+ unsigned int ctl, status;
+ unsigned long flags;
+
+ spin_lock_irqsave(&i2c_imx->slave_lock, flags);
+ status = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2SR);
+ ctl = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
+ i2c_imx_slave_handle(i2c_imx, status, ctl);
+ spin_unlock_irqrestore(&i2c_imx->slave_lock, flags);
+ return HRTIMER_NORESTART;
+}
+
static void i2c_imx_slave_init(struct imx_i2c_struct *i2c_imx)
{
int temp;
@@ -843,7 +883,9 @@ static irqreturn_t i2c_imx_isr(int irq, void *dev_id)
{
struct imx_i2c_struct *i2c_imx = dev_id;
unsigned int ctl, status;
+ unsigned long flags;
+ spin_lock_irqsave(&i2c_imx->slave_lock, flags);
status = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2SR);
ctl = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
@@ -851,14 +893,20 @@ static irqreturn_t i2c_imx_isr(int irq, void *dev_id)
i2c_imx_clear_irq(i2c_imx, I2SR_IIF);
if (i2c_imx->slave) {
if (!(ctl & I2CR_MSTA)) {
- return i2c_imx_slave_isr(i2c_imx, status, ctl);
- } else if (i2c_imx->last_slave_event !=
- I2C_SLAVE_STOP) {
- i2c_imx_slave_finish_op(i2c_imx);
+ irqreturn_t ret;
+
+ ret = i2c_imx_slave_handle(i2c_imx,
+ status, ctl);
+ spin_unlock_irqrestore(&i2c_imx->slave_lock,
+ flags);
+ return ret;
}
+ i2c_imx_slave_finish_op(i2c_imx);
}
+ spin_unlock_irqrestore(&i2c_imx->slave_lock, flags);
return i2c_imx_master_isr(i2c_imx, status);
}
+ spin_unlock_irqrestore(&i2c_imx->slave_lock, flags);
return IRQ_NONE;
}
@@ -1378,6 +1426,10 @@ static int i2c_imx_probe(struct platform_device *pdev)
if (!i2c_imx)
return -ENOMEM;
+ spin_lock_init(&i2c_imx->slave_lock);
+ hrtimer_init(&i2c_imx->slave_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+ i2c_imx->slave_timer.function = i2c_imx_slave_timeout;
+
match = device_get_match_data(&pdev->dev);
if (match)
i2c_imx->hwdata = match;
@@ -1491,6 +1543,8 @@ static int i2c_imx_remove(struct platform_device *pdev)
if (ret < 0)
return ret;
+ hrtimer_cancel(&i2c_imx->slave_timer);
+
/* remove adapter */
dev_dbg(&i2c_imx->adapter.dev, "adapter removed\n");
i2c_del_adapter(&i2c_imx->adapter);
diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c
index db26cc36e13f..6c698c10d3cd 100644
--- a/drivers/i2c/busses/i2c-mpc.c
+++ b/drivers/i2c/busses/i2c-mpc.c
@@ -119,23 +119,30 @@ static inline void writeccr(struct mpc_i2c *i2c, u32 x)
/* Sometimes 9th clock pulse isn't generated, and slave doesn't release
* the bus, because it wants to send ACK.
* Following sequence of enabling/disabling and sending start/stop generates
- * the 9 pulses, so it's all OK.
+ * the 9 pulses, each with a START then ending with STOP, so it's all OK.
*/
static void mpc_i2c_fixup(struct mpc_i2c *i2c)
{
int k;
- u32 delay_val = 1000000 / i2c->real_clk + 1;
-
- if (delay_val < 2)
- delay_val = 2;
+ unsigned long flags;
for (k = 9; k; k--) {
writeccr(i2c, 0);
- writeccr(i2c, CCR_MSTA | CCR_MTX | CCR_MEN);
+ writeb(0, i2c->base + MPC_I2C_SR); /* clear any status bits */
+ writeccr(i2c, CCR_MEN | CCR_MSTA); /* START */
+ readb(i2c->base + MPC_I2C_DR); /* init xfer */
+ udelay(15); /* let it hit the bus */
+ local_irq_save(flags); /* should not be delayed further */
+ writeccr(i2c, CCR_MEN | CCR_MSTA | CCR_RSTA); /* delay SDA */
readb(i2c->base + MPC_I2C_DR);
- writeccr(i2c, CCR_MEN);
- udelay(delay_val << 1);
+ if (k != 1)
+ udelay(5);
+ local_irq_restore(flags);
}
+ writeccr(i2c, CCR_MEN); /* Initiate STOP */
+ readb(i2c->base + MPC_I2C_DR);
+ udelay(15); /* Let STOP propagate */
+ writeccr(i2c, 0);
}
static int i2c_mpc_wait_sr(struct mpc_i2c *i2c, int mask)
diff --git a/drivers/i2c/busses/i2c-qcom-cci.c b/drivers/i2c/busses/i2c-qcom-cci.c
index c1de8eb66169..cf54f1cb4c57 100644
--- a/drivers/i2c/busses/i2c-qcom-cci.c
+++ b/drivers/i2c/busses/i2c-qcom-cci.c
@@ -558,7 +558,7 @@ static int cci_probe(struct platform_device *pdev)
cci->master[idx].adap.quirks = &cci->data->quirks;
cci->master[idx].adap.algo = &cci_algo;
cci->master[idx].adap.dev.parent = dev;
- cci->master[idx].adap.dev.of_node = child;
+ cci->master[idx].adap.dev.of_node = of_node_get(child);
cci->master[idx].master = idx;
cci->master[idx].cci = cci;
@@ -643,8 +643,10 @@ static int cci_probe(struct platform_device *pdev)
continue;
ret = i2c_add_adapter(&cci->master[i].adap);
- if (ret < 0)
+ if (ret < 0) {
+ of_node_put(cci->master[i].adap.dev.of_node);
goto error_i2c;
+ }
}
pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC);
@@ -655,9 +657,11 @@ static int cci_probe(struct platform_device *pdev)
return 0;
error_i2c:
- for (; i >= 0; i--) {
- if (cci->master[i].cci)
+ for (--i ; i >= 0; i--) {
+ if (cci->master[i].cci) {
i2c_del_adapter(&cci->master[i].adap);
+ of_node_put(cci->master[i].adap.dev.of_node);
+ }
}
error:
disable_irq(cci->irq);
@@ -673,8 +677,10 @@ static int cci_remove(struct platform_device *pdev)
int i;
for (i = 0; i < cci->data->num_masters; i++) {
- if (cci->master[i].cci)
+ if (cci->master[i].cci) {
i2c_del_adapter(&cci->master[i].adap);
+ of_node_put(cci->master[i].adap.dev.of_node);
+ }
cci_halt(cci, i);
}
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index fc13511f4562..f71c730f9838 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -367,11 +367,15 @@ static void rcar_i2c_next_msg(struct rcar_i2c_priv *priv)
rcar_i2c_prepare_msg(priv);
}
-static void rcar_i2c_dma_unmap(struct rcar_i2c_priv *priv)
+static void rcar_i2c_cleanup_dma(struct rcar_i2c_priv *priv, bool terminate)
{
struct dma_chan *chan = priv->dma_direction == DMA_FROM_DEVICE
? priv->dma_rx : priv->dma_tx;
+ /* only allowed from thread context! */
+ if (terminate)
+ dmaengine_terminate_sync(chan);
+
dma_unmap_single(chan->device->dev, sg_dma_address(&priv->sg),
sg_dma_len(&priv->sg), priv->dma_direction);
@@ -386,25 +390,13 @@ static void rcar_i2c_dma_unmap(struct rcar_i2c_priv *priv)
rcar_i2c_write(priv, ICDMAER, 0);
}
-static void rcar_i2c_cleanup_dma(struct rcar_i2c_priv *priv)
-{
- if (priv->dma_direction == DMA_NONE)
- return;
- else if (priv->dma_direction == DMA_FROM_DEVICE)
- dmaengine_terminate_all(priv->dma_rx);
- else if (priv->dma_direction == DMA_TO_DEVICE)
- dmaengine_terminate_all(priv->dma_tx);
-
- rcar_i2c_dma_unmap(priv);
-}
-
static void rcar_i2c_dma_callback(void *data)
{
struct rcar_i2c_priv *priv = data;
priv->pos += sg_dma_len(&priv->sg);
- rcar_i2c_dma_unmap(priv);
+ rcar_i2c_cleanup_dma(priv, false);
}
static bool rcar_i2c_dma(struct rcar_i2c_priv *priv)
@@ -456,7 +448,7 @@ static bool rcar_i2c_dma(struct rcar_i2c_priv *priv)
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!txdesc) {
dev_dbg(dev, "dma prep slave sg failed, using PIO\n");
- rcar_i2c_cleanup_dma(priv);
+ rcar_i2c_cleanup_dma(priv, false);
return false;
}
@@ -466,7 +458,7 @@ static bool rcar_i2c_dma(struct rcar_i2c_priv *priv)
cookie = dmaengine_submit(txdesc);
if (dma_submit_error(cookie)) {
dev_dbg(dev, "submitting dma failed, using PIO\n");
- rcar_i2c_cleanup_dma(priv);
+ rcar_i2c_cleanup_dma(priv, false);
return false;
}
@@ -846,7 +838,7 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
/* cleanup DMA if it couldn't complete properly due to an error */
if (priv->dma_direction != DMA_NONE)
- rcar_i2c_cleanup_dma(priv);
+ rcar_i2c_cleanup_dma(priv, true);
if (!time_left) {
rcar_i2c_init(priv);
diff --git a/drivers/i2c/busses/i2c-riic.c b/drivers/i2c/busses/i2c-riic.c
index 78b84445ee6a..8dfd27dc6149 100644
--- a/drivers/i2c/busses/i2c-riic.c
+++ b/drivers/i2c/busses/i2c-riic.c
@@ -433,12 +433,12 @@ static int riic_i2c_probe(struct platform_device *pdev)
}
for (i = 0; i < ARRAY_SIZE(riic_irqs); i++) {
- res = platform_get_resource(pdev, IORESOURCE_IRQ, riic_irqs[i].res_num);
- if (!res)
- return -ENODEV;
+ ret = platform_get_irq(pdev, riic_irqs[i].res_num);
+ if (ret < 0)
+ return ret;
- ret = devm_request_irq(&pdev->dev, res->start, riic_irqs[i].isr,
- 0, riic_irqs[i].name, riic);
+ ret = devm_request_irq(&pdev->dev, ret, riic_irqs[i].isr,
+ 0, riic_irqs[i].name, riic);
if (ret) {
dev_err(&pdev->dev, "failed to request irq %s\n", riic_irqs[i].name);
return ret;
diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c
index 02ddb237f69a..989040a73626 100644
--- a/drivers/i2c/busses/i2c-rk3x.c
+++ b/drivers/i2c/busses/i2c-rk3x.c
@@ -1338,8 +1338,15 @@ static int rk3x_i2c_probe(struct platform_device *pdev)
goto err_pclk;
}
+ ret = clk_enable(i2c->clk);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Can't enable bus clk: %d\n", ret);
+ goto err_clk_notifier;
+ }
+
clk_rate = clk_get_rate(i2c->clk);
rk3x_i2c_adapt_div(i2c, clk_rate);
+ clk_disable(i2c->clk);
ret = i2c_add_adapter(&i2c->adap);
if (ret < 0)
diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c
index db8fa4186814..72f024a0c363 100644
--- a/drivers/i2c/busses/i2c-sh_mobile.c
+++ b/drivers/i2c/busses/i2c-sh_mobile.c
@@ -442,34 +442,26 @@ static irqreturn_t sh_mobile_i2c_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static void sh_mobile_i2c_dma_unmap(struct sh_mobile_i2c_data *pd)
+static void sh_mobile_i2c_cleanup_dma(struct sh_mobile_i2c_data *pd, bool terminate)
{
struct dma_chan *chan = pd->dma_direction == DMA_FROM_DEVICE
? pd->dma_rx : pd->dma_tx;
+ /* only allowed from thread context! */
+ if (terminate)
+ dmaengine_terminate_sync(chan);
+
dma_unmap_single(chan->device->dev, sg_dma_address(&pd->sg),
pd->msg->len, pd->dma_direction);
pd->dma_direction = DMA_NONE;
}
-static void sh_mobile_i2c_cleanup_dma(struct sh_mobile_i2c_data *pd)
-{
- if (pd->dma_direction == DMA_NONE)
- return;
- else if (pd->dma_direction == DMA_FROM_DEVICE)
- dmaengine_terminate_sync(pd->dma_rx);
- else if (pd->dma_direction == DMA_TO_DEVICE)
- dmaengine_terminate_sync(pd->dma_tx);
-
- sh_mobile_i2c_dma_unmap(pd);
-}
-
static void sh_mobile_i2c_dma_callback(void *data)
{
struct sh_mobile_i2c_data *pd = data;
- sh_mobile_i2c_dma_unmap(pd);
+ sh_mobile_i2c_cleanup_dma(pd, false);
pd->pos = pd->msg->len;
pd->stop_after_dma = true;
@@ -549,7 +541,7 @@ static void sh_mobile_i2c_xfer_dma(struct sh_mobile_i2c_data *pd)
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!txdesc) {
dev_dbg(pd->dev, "dma prep slave sg failed, using PIO\n");
- sh_mobile_i2c_cleanup_dma(pd);
+ sh_mobile_i2c_cleanup_dma(pd, false);
return;
}
@@ -559,7 +551,7 @@ static void sh_mobile_i2c_xfer_dma(struct sh_mobile_i2c_data *pd)
cookie = dmaengine_submit(txdesc);
if (dma_submit_error(cookie)) {
dev_dbg(pd->dev, "submitting dma failed, using PIO\n");
- sh_mobile_i2c_cleanup_dma(pd);
+ sh_mobile_i2c_cleanup_dma(pd, false);
return;
}
@@ -698,7 +690,7 @@ static int sh_mobile_xfer(struct sh_mobile_i2c_data *pd,
if (!time_left) {
dev_err(pd->dev, "Transfer request timed out\n");
if (pd->dma_direction != DMA_NONE)
- sh_mobile_i2c_cleanup_dma(pd);
+ sh_mobile_i2c_cleanup_dma(pd, true);
err = -ETIMEDOUT;
break;
@@ -838,20 +830,38 @@ static void sh_mobile_i2c_release_dma(struct sh_mobile_i2c_data *pd)
static int sh_mobile_i2c_hook_irqs(struct platform_device *dev, struct sh_mobile_i2c_data *pd)
{
- struct resource *res;
- resource_size_t n;
+ struct device_node *np = dev_of_node(&dev->dev);
int k = 0, ret;
- while ((res = platform_get_resource(dev, IORESOURCE_IRQ, k))) {
- for (n = res->start; n <= res->end; n++) {
- ret = devm_request_irq(&dev->dev, n, sh_mobile_i2c_isr,
- 0, dev_name(&dev->dev), pd);
+ if (np) {
+ int irq;
+
+ while ((irq = platform_get_irq_optional(dev, k)) != -ENXIO) {
+ if (irq < 0)
+ return irq;
+ ret = devm_request_irq(&dev->dev, irq, sh_mobile_i2c_isr,
+ 0, dev_name(&dev->dev), pd);
if (ret) {
- dev_err(&dev->dev, "cannot request IRQ %pa\n", &n);
+ dev_err(&dev->dev, "cannot request IRQ %d\n", irq);
return ret;
}
+ k++;
+ }
+ } else {
+ struct resource *res;
+ resource_size_t n;
+
+ while ((res = platform_get_resource(dev, IORESOURCE_IRQ, k))) {
+ for (n = res->start; n <= res->end; n++) {
+ ret = devm_request_irq(&dev->dev, n, sh_mobile_i2c_isr,
+ 0, dev_name(&dev->dev), pd);
+ if (ret) {
+ dev_err(&dev->dev, "cannot request IRQ %pa\n", &n);
+ return ret;
+ }
+ }
+ k++;
}
- k++;
}
return k > 0 ? 0 : -ENOENT;
diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c
index 66145d2b9b55..6d4aa64b195d 100644
--- a/drivers/i2c/busses/i2c-stm32f7.c
+++ b/drivers/i2c/busses/i2c-stm32f7.c
@@ -828,18 +828,14 @@ static void stm32f7_i2c_smbus_reload(struct stm32f7_i2c_dev *i2c_dev)
writel_relaxed(cr2, i2c_dev->base + STM32F7_I2C_CR2);
}
-static int stm32f7_i2c_release_bus(struct i2c_adapter *i2c_adap)
+static void stm32f7_i2c_release_bus(struct i2c_adapter *i2c_adap)
{
struct stm32f7_i2c_dev *i2c_dev = i2c_get_adapdata(i2c_adap);
- dev_info(i2c_dev->dev, "Trying to recover bus\n");
-
stm32f7_i2c_clr_bits(i2c_dev->base + STM32F7_I2C_CR1,
STM32F7_I2C_CR1_PE);
stm32f7_i2c_hw_config(i2c_dev);
-
- return 0;
}
static int stm32f7_i2c_wait_free_bus(struct stm32f7_i2c_dev *i2c_dev)
@@ -854,13 +850,7 @@ static int stm32f7_i2c_wait_free_bus(struct stm32f7_i2c_dev *i2c_dev)
if (!ret)
return 0;
- dev_info(i2c_dev->dev, "bus busy\n");
-
- ret = stm32f7_i2c_release_bus(&i2c_dev->adap);
- if (ret) {
- dev_err(i2c_dev->dev, "Failed to recover the bus (%d)\n", ret);
- return ret;
- }
+ stm32f7_i2c_release_bus(&i2c_dev->adap);
return -EBUSY;
}
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index b3184c422826..03cea102ab76 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -6,6 +6,7 @@
* Author: Colin Cross <ccross@android.com>
*/
+#include <linux/acpi.h>
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/delay.h>
@@ -245,7 +246,7 @@ struct tegra_i2c_hw_feature {
* @msg_buf: pointer to current message data
* @msg_buf_remaining: size of unsent data in the message buffer
* @msg_read: indicates that the transfer is a read access
- * @bus_clk_rate: current I2C bus clock rate
+ * @timings: i2c timings information like bus frequency
* @multimaster_mode: indicates that I2C controller is in multi-master mode
* @tx_dma_chan: DMA transmit channel
* @rx_dma_chan: DMA receive channel
@@ -272,7 +273,7 @@ struct tegra_i2c_dev {
unsigned int nclocks;
struct clk *div_clk;
- u32 bus_clk_rate;
+ struct i2c_timings timings;
struct completion msg_complete;
size_t msg_buf_remaining;
@@ -608,6 +609,8 @@ static int tegra_i2c_wait_for_config_load(struct tegra_i2c_dev *i2c_dev)
static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev)
{
u32 val, clk_divisor, clk_multiplier, tsu_thd, tlow, thigh, non_hs_mode;
+ acpi_handle handle = ACPI_HANDLE(i2c_dev->dev);
+ struct i2c_timings *t = &i2c_dev->timings;
int err;
/*
@@ -618,7 +621,11 @@ static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev)
* emit a noisy warning on error, which won't stay unnoticed and
* won't hose machine entirely.
*/
- err = reset_control_reset(i2c_dev->rst);
+ if (handle)
+ err = acpi_evaluate_object(handle, "_RST", NULL, NULL);
+ else
+ err = reset_control_reset(i2c_dev->rst);
+
WARN_ON_ONCE(err);
if (i2c_dev->is_dvc)
@@ -636,14 +643,14 @@ static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev)
if (i2c_dev->is_vi)
tegra_i2c_vi_init(i2c_dev);
- switch (i2c_dev->bus_clk_rate) {
+ switch (t->bus_freq_hz) {
case I2C_MAX_STANDARD_MODE_FREQ + 1 ... I2C_MAX_FAST_MODE_PLUS_FREQ:
default:
tlow = i2c_dev->hw->tlow_fast_fastplus_mode;
thigh = i2c_dev->hw->thigh_fast_fastplus_mode;
tsu_thd = i2c_dev->hw->setup_hold_time_fast_fast_plus_mode;
- if (i2c_dev->bus_clk_rate > I2C_MAX_FAST_MODE_FREQ)
+ if (t->bus_freq_hz > I2C_MAX_FAST_MODE_FREQ)
non_hs_mode = i2c_dev->hw->clk_divisor_fast_plus_mode;
else
non_hs_mode = i2c_dev->hw->clk_divisor_fast_mode;
@@ -679,7 +686,7 @@ static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev)
clk_multiplier = (tlow + thigh + 2) * (non_hs_mode + 1);
err = clk_set_rate(i2c_dev->div_clk,
- i2c_dev->bus_clk_rate * clk_multiplier);
+ t->bus_freq_hz * clk_multiplier);
if (err) {
dev_err(i2c_dev->dev, "failed to set div-clk rate: %d\n", err);
return err;
@@ -718,7 +725,7 @@ static int tegra_i2c_disable_packet_mode(struct tegra_i2c_dev *i2c_dev)
* before disabling the controller so that the STOP condition has
* been delivered properly.
*/
- udelay(DIV_ROUND_UP(2 * 1000000, i2c_dev->bus_clk_rate));
+ udelay(DIV_ROUND_UP(2 * 1000000, i2c_dev->timings.bus_freq_hz));
cnfg = i2c_readl(i2c_dev, I2C_CNFG);
if (cnfg & I2C_CNFG_PACKET_MODE_EN)
@@ -1248,7 +1255,7 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev,
* Total bits = 9 bits per byte (including ACK bit) + Start & stop bits
*/
xfer_time += DIV_ROUND_CLOSEST(((xfer_size * 9) + 2) * MSEC_PER_SEC,
- i2c_dev->bus_clk_rate);
+ i2c_dev->timings.bus_freq_hz);
int_mask = I2C_INT_NO_ACK | I2C_INT_ARBITRATION_LOST;
tegra_i2c_unmask_irq(i2c_dev, int_mask);
@@ -1625,14 +1632,10 @@ static void tegra_i2c_parse_dt(struct tegra_i2c_dev *i2c_dev)
{
struct device_node *np = i2c_dev->dev->of_node;
bool multi_mode;
- int err;
- err = of_property_read_u32(np, "clock-frequency",
- &i2c_dev->bus_clk_rate);
- if (err)
- i2c_dev->bus_clk_rate = I2C_MAX_STANDARD_MODE_FREQ;
+ i2c_parse_fw_timings(i2c_dev->dev, &i2c_dev->timings, true);
- multi_mode = of_property_read_bool(np, "multi-master");
+ multi_mode = device_property_read_bool(i2c_dev->dev, "multi-master");
i2c_dev->multimaster_mode = multi_mode;
if (of_device_is_compatible(np, "nvidia,tegra20-i2c-dvc"))
@@ -1642,10 +1645,26 @@ static void tegra_i2c_parse_dt(struct tegra_i2c_dev *i2c_dev)
i2c_dev->is_vi = true;
}
+static int tegra_i2c_init_reset(struct tegra_i2c_dev *i2c_dev)
+{
+ if (ACPI_HANDLE(i2c_dev->dev))
+ return 0;
+
+ i2c_dev->rst = devm_reset_control_get_exclusive(i2c_dev->dev, "i2c");
+ if (IS_ERR(i2c_dev->rst))
+ return dev_err_probe(i2c_dev->dev, PTR_ERR(i2c_dev->rst),
+ "failed to get reset control\n");
+
+ return 0;
+}
+
static int tegra_i2c_init_clocks(struct tegra_i2c_dev *i2c_dev)
{
int err;
+ if (ACPI_HANDLE(i2c_dev->dev))
+ return 0;
+
i2c_dev->clocks[i2c_dev->nclocks++].id = "div-clk";
if (i2c_dev->hw == &tegra20_i2c_hw || i2c_dev->hw == &tegra30_i2c_hw)
@@ -1720,7 +1739,7 @@ static int tegra_i2c_probe(struct platform_device *pdev)
init_completion(&i2c_dev->msg_complete);
init_completion(&i2c_dev->dma_complete);
- i2c_dev->hw = of_device_get_match_data(&pdev->dev);
+ i2c_dev->hw = device_get_match_data(&pdev->dev);
i2c_dev->cont_id = pdev->id;
i2c_dev->dev = &pdev->dev;
@@ -1746,15 +1765,12 @@ static int tegra_i2c_probe(struct platform_device *pdev)
if (err)
return err;
- i2c_dev->rst = devm_reset_control_get_exclusive(i2c_dev->dev, "i2c");
- if (IS_ERR(i2c_dev->rst)) {
- dev_err_probe(i2c_dev->dev, PTR_ERR(i2c_dev->rst),
- "failed to get reset control\n");
- return PTR_ERR(i2c_dev->rst);
- }
-
tegra_i2c_parse_dt(i2c_dev);
+ err = tegra_i2c_init_reset(i2c_dev);
+ if (err)
+ return err;
+
err = tegra_i2c_init_clocks(i2c_dev);
if (err)
return err;
@@ -1923,12 +1939,21 @@ static const struct dev_pm_ops tegra_i2c_pm = {
NULL)
};
+static const struct acpi_device_id tegra_i2c_acpi_match[] = {
+ {.id = "NVDA0101", .driver_data = (kernel_ulong_t)&tegra210_i2c_hw},
+ {.id = "NVDA0201", .driver_data = (kernel_ulong_t)&tegra186_i2c_hw},
+ {.id = "NVDA0301", .driver_data = (kernel_ulong_t)&tegra194_i2c_hw},
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, tegra_i2c_acpi_match);
+
static struct platform_driver tegra_i2c_driver = {
.probe = tegra_i2c_probe,
.remove = tegra_i2c_remove,
.driver = {
.name = "tegra-i2c",
.of_match_table = tegra_i2c_of_match,
+ .acpi_match_table = tegra_i2c_acpi_match,
.pm = &tegra_i2c_pm,
},
};
diff --git a/drivers/i2c/busses/i2c-virtio.c b/drivers/i2c/busses/i2c-virtio.c
index 41eb0dcc3204..4b9536f50800 100644
--- a/drivers/i2c/busses/i2c-virtio.c
+++ b/drivers/i2c/busses/i2c-virtio.c
@@ -165,7 +165,7 @@ err_free:
static void virtio_i2c_del_vqs(struct virtio_device *vdev)
{
- vdev->config->reset(vdev);
+ virtio_reset_device(vdev);
vdev->config->del_vqs(vdev);
}
diff --git a/drivers/i2c/busses/i2c-xlp9xx.c b/drivers/i2c/busses/i2c-xlp9xx.c
index 6d24dc385522..4e3b11c0f732 100644
--- a/drivers/i2c/busses/i2c-xlp9xx.c
+++ b/drivers/i2c/busses/i2c-xlp9xx.c
@@ -572,12 +572,6 @@ static int xlp9xx_i2c_remove(struct platform_device *pdev)
return 0;
}
-static const struct of_device_id xlp9xx_i2c_of_match[] = {
- { .compatible = "netlogic,xlp980-i2c", },
- { /* sentinel */ },
-};
-MODULE_DEVICE_TABLE(of, xlp9xx_i2c_of_match);
-
#ifdef CONFIG_ACPI
static const struct acpi_device_id xlp9xx_i2c_acpi_ids[] = {
{"BRCM9007", 0},
@@ -592,7 +586,6 @@ static struct platform_driver xlp9xx_i2c_driver = {
.remove = xlp9xx_i2c_remove,
.driver = {
.name = "xlp9xx-i2c",
- .of_match_table = xlp9xx_i2c_of_match,
.acpi_match_table = ACPI_PTR(xlp9xx_i2c_acpi_ids),
},
};
diff --git a/drivers/i2c/busses/i2c-xlr.c b/drivers/i2c/busses/i2c-xlr.c
deleted file mode 100644
index 9ce20652d494..000000000000
--- a/drivers/i2c/busses/i2c-xlr.c
+++ /dev/null
@@ -1,470 +0,0 @@
-/*
- * Copyright 2011, Netlogic Microsystems Inc.
- * Copyright 2004, Matt Porter <mporter@kernel.crashing.org>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#include <linux/err.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/ioport.h>
-#include <linux/delay.h>
-#include <linux/errno.h>
-#include <linux/i2c.h>
-#include <linux/io.h>
-#include <linux/platform_device.h>
-#include <linux/of_device.h>
-#include <linux/clk.h>
-#include <linux/interrupt.h>
-#include <linux/wait.h>
-
-/* XLR I2C REGISTERS */
-#define XLR_I2C_CFG 0x00
-#define XLR_I2C_CLKDIV 0x01
-#define XLR_I2C_DEVADDR 0x02
-#define XLR_I2C_ADDR 0x03
-#define XLR_I2C_DATAOUT 0x04
-#define XLR_I2C_DATAIN 0x05
-#define XLR_I2C_STATUS 0x06
-#define XLR_I2C_STARTXFR 0x07
-#define XLR_I2C_BYTECNT 0x08
-#define XLR_I2C_HDSTATIM 0x09
-
-/* Sigma Designs additional registers */
-#define XLR_I2C_INT_EN 0x09
-#define XLR_I2C_INT_STAT 0x0a
-
-/* XLR I2C REGISTERS FLAGS */
-#define XLR_I2C_BUS_BUSY 0x01
-#define XLR_I2C_SDOEMPTY 0x02
-#define XLR_I2C_RXRDY 0x04
-#define XLR_I2C_ACK_ERR 0x08
-#define XLR_I2C_ARB_STARTERR 0x30
-
-/* Register Values */
-#define XLR_I2C_CFG_ADDR 0xF8
-#define XLR_I2C_CFG_NOADDR 0xFA
-#define XLR_I2C_STARTXFR_ND 0x02 /* No Data */
-#define XLR_I2C_STARTXFR_RD 0x01 /* Read */
-#define XLR_I2C_STARTXFR_WR 0x00 /* Write */
-
-#define XLR_I2C_TIMEOUT 10 /* timeout per byte in msec */
-
-/*
- * On XLR/XLS, we need to use __raw_ IO to read the I2C registers
- * because they are in the big-endian MMIO area on the SoC.
- *
- * The readl/writel implementation on XLR/XLS byteswaps, because
- * those are for its little-endian PCI space (see arch/mips/Kconfig).
- */
-static inline void xlr_i2c_wreg(u32 __iomem *base, unsigned int reg, u32 val)
-{
- __raw_writel(val, base + reg);
-}
-
-static inline u32 xlr_i2c_rdreg(u32 __iomem *base, unsigned int reg)
-{
- return __raw_readl(base + reg);
-}
-
-#define XLR_I2C_FLAG_IRQ 1
-
-struct xlr_i2c_config {
- u32 flags; /* optional feature support */
- u32 status_busy; /* value of STATUS[0] when busy */
- u32 cfg_extra; /* extra CFG bits to set */
-};
-
-struct xlr_i2c_private {
- struct i2c_adapter adap;
- u32 __iomem *iobase;
- int irq;
- int pos;
- struct i2c_msg *msg;
- const struct xlr_i2c_config *cfg;
- wait_queue_head_t wait;
- struct clk *clk;
-};
-
-static int xlr_i2c_busy(struct xlr_i2c_private *priv, u32 status)
-{
- return (status & XLR_I2C_BUS_BUSY) == priv->cfg->status_busy;
-}
-
-static int xlr_i2c_idle(struct xlr_i2c_private *priv)
-{
- return !xlr_i2c_busy(priv, xlr_i2c_rdreg(priv->iobase, XLR_I2C_STATUS));
-}
-
-static int xlr_i2c_wait(struct xlr_i2c_private *priv, unsigned long timeout)
-{
- int status;
- int t;
-
- t = wait_event_timeout(priv->wait, xlr_i2c_idle(priv),
- msecs_to_jiffies(timeout));
- if (!t)
- return -ETIMEDOUT;
-
- status = xlr_i2c_rdreg(priv->iobase, XLR_I2C_STATUS);
-
- return status & XLR_I2C_ACK_ERR ? -EIO : 0;
-}
-
-static void xlr_i2c_tx_irq(struct xlr_i2c_private *priv, u32 status)
-{
- struct i2c_msg *msg = priv->msg;
-
- if (status & XLR_I2C_SDOEMPTY)
- xlr_i2c_wreg(priv->iobase, XLR_I2C_DATAOUT,
- msg->buf[priv->pos++]);
-}
-
-static void xlr_i2c_rx_irq(struct xlr_i2c_private *priv, u32 status)
-{
- struct i2c_msg *msg = priv->msg;
-
- if (status & XLR_I2C_RXRDY)
- msg->buf[priv->pos++] =
- xlr_i2c_rdreg(priv->iobase, XLR_I2C_DATAIN);
-}
-
-static irqreturn_t xlr_i2c_irq(int irq, void *dev_id)
-{
- struct xlr_i2c_private *priv = dev_id;
- struct i2c_msg *msg = priv->msg;
- u32 int_stat, status;
-
- int_stat = xlr_i2c_rdreg(priv->iobase, XLR_I2C_INT_STAT);
- if (!int_stat)
- return IRQ_NONE;
-
- xlr_i2c_wreg(priv->iobase, XLR_I2C_INT_STAT, int_stat);
-
- if (!msg)
- return IRQ_HANDLED;
-
- status = xlr_i2c_rdreg(priv->iobase, XLR_I2C_STATUS);
-
- if (priv->pos < msg->len) {
- if (msg->flags & I2C_M_RD)
- xlr_i2c_rx_irq(priv, status);
- else
- xlr_i2c_tx_irq(priv, status);
- }
-
- if (!xlr_i2c_busy(priv, status))
- wake_up(&priv->wait);
-
- return IRQ_HANDLED;
-}
-
-static int xlr_i2c_tx(struct xlr_i2c_private *priv, u16 len,
- u8 *buf, u16 addr)
-{
- struct i2c_adapter *adap = &priv->adap;
- unsigned long timeout, stoptime, checktime;
- u32 i2c_status;
- int pos, timedout;
- u8 offset;
- u32 xfer;
-
- offset = buf[0];
- xlr_i2c_wreg(priv->iobase, XLR_I2C_ADDR, offset);
- xlr_i2c_wreg(priv->iobase, XLR_I2C_DEVADDR, addr);
- xlr_i2c_wreg(priv->iobase, XLR_I2C_CFG,
- XLR_I2C_CFG_ADDR | priv->cfg->cfg_extra);
-
- timeout = msecs_to_jiffies(XLR_I2C_TIMEOUT);
- stoptime = jiffies + timeout;
- timedout = 0;
-
- if (len == 1) {
- xlr_i2c_wreg(priv->iobase, XLR_I2C_BYTECNT, len - 1);
- xfer = XLR_I2C_STARTXFR_ND;
- pos = 1;
- } else {
- xlr_i2c_wreg(priv->iobase, XLR_I2C_BYTECNT, len - 2);
- xlr_i2c_wreg(priv->iobase, XLR_I2C_DATAOUT, buf[1]);
- xfer = XLR_I2C_STARTXFR_WR;
- pos = 2;
- }
-
- priv->pos = pos;
-
-retry:
- /* retry can only happen on the first byte */
- xlr_i2c_wreg(priv->iobase, XLR_I2C_STARTXFR, xfer);
-
- if (priv->irq > 0)
- return xlr_i2c_wait(priv, XLR_I2C_TIMEOUT * len);
-
- while (!timedout) {
- checktime = jiffies;
- i2c_status = xlr_i2c_rdreg(priv->iobase, XLR_I2C_STATUS);
-
- if ((i2c_status & XLR_I2C_SDOEMPTY) && pos < len) {
- xlr_i2c_wreg(priv->iobase, XLR_I2C_DATAOUT, buf[pos++]);
-
- /* reset timeout on successful xmit */
- stoptime = jiffies + timeout;
- }
- timedout = time_after(checktime, stoptime);
-
- if (i2c_status & XLR_I2C_ARB_STARTERR) {
- if (timedout)
- break;
- goto retry;
- }
-
- if (i2c_status & XLR_I2C_ACK_ERR)
- return -EIO;
-
- if (!xlr_i2c_busy(priv, i2c_status) && pos >= len)
- return 0;
- }
- dev_err(&adap->dev, "I2C transmit timeout\n");
- return -ETIMEDOUT;
-}
-
-static int xlr_i2c_rx(struct xlr_i2c_private *priv, u16 len, u8 *buf, u16 addr)
-{
- struct i2c_adapter *adap = &priv->adap;
- u32 i2c_status;
- unsigned long timeout, stoptime, checktime;
- int nbytes, timedout;
-
- xlr_i2c_wreg(priv->iobase, XLR_I2C_CFG,
- XLR_I2C_CFG_NOADDR | priv->cfg->cfg_extra);
- xlr_i2c_wreg(priv->iobase, XLR_I2C_BYTECNT, len - 1);
- xlr_i2c_wreg(priv->iobase, XLR_I2C_DEVADDR, addr);
-
- priv->pos = 0;
-
- timeout = msecs_to_jiffies(XLR_I2C_TIMEOUT);
- stoptime = jiffies + timeout;
- timedout = 0;
- nbytes = 0;
-retry:
- xlr_i2c_wreg(priv->iobase, XLR_I2C_STARTXFR, XLR_I2C_STARTXFR_RD);
-
- if (priv->irq > 0)
- return xlr_i2c_wait(priv, XLR_I2C_TIMEOUT * len);
-
- while (!timedout) {
- checktime = jiffies;
- i2c_status = xlr_i2c_rdreg(priv->iobase, XLR_I2C_STATUS);
- if (i2c_status & XLR_I2C_RXRDY) {
- if (nbytes >= len)
- return -EIO; /* should not happen */
-
- buf[nbytes++] =
- xlr_i2c_rdreg(priv->iobase, XLR_I2C_DATAIN);
-
- /* reset timeout on successful read */
- stoptime = jiffies + timeout;
- }
-
- timedout = time_after(checktime, stoptime);
- if (i2c_status & XLR_I2C_ARB_STARTERR) {
- if (timedout)
- break;
- goto retry;
- }
-
- if (i2c_status & XLR_I2C_ACK_ERR)
- return -EIO;
-
- if (!xlr_i2c_busy(priv, i2c_status))
- return 0;
- }
-
- dev_err(&adap->dev, "I2C receive timeout\n");
- return -ETIMEDOUT;
-}
-
-static int xlr_i2c_xfer(struct i2c_adapter *adap,
- struct i2c_msg *msgs, int num)
-{
- struct i2c_msg *msg;
- int i;
- int ret = 0;
- struct xlr_i2c_private *priv = i2c_get_adapdata(adap);
-
- ret = clk_enable(priv->clk);
- if (ret)
- return ret;
-
- if (priv->irq)
- xlr_i2c_wreg(priv->iobase, XLR_I2C_INT_EN, 0xf);
-
-
- for (i = 0; ret == 0 && i < num; i++) {
- msg = &msgs[i];
- priv->msg = msg;
- if (msg->flags & I2C_M_RD)
- ret = xlr_i2c_rx(priv, msg->len, &msg->buf[0],
- msg->addr);
- else
- ret = xlr_i2c_tx(priv, msg->len, &msg->buf[0],
- msg->addr);
- }
-
- if (priv->irq)
- xlr_i2c_wreg(priv->iobase, XLR_I2C_INT_EN, 0);
-
- clk_disable(priv->clk);
- priv->msg = NULL;
-
- return (ret != 0) ? ret : num;
-}
-
-static u32 xlr_func(struct i2c_adapter *adap)
-{
- /* Emulate SMBUS over I2C */
- return (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK) | I2C_FUNC_I2C;
-}
-
-static const struct i2c_algorithm xlr_i2c_algo = {
- .master_xfer = xlr_i2c_xfer,
- .functionality = xlr_func,
-};
-
-static const struct i2c_adapter_quirks xlr_i2c_quirks = {
- .flags = I2C_AQ_NO_ZERO_LEN,
-};
-
-static const struct xlr_i2c_config xlr_i2c_config_default = {
- .status_busy = XLR_I2C_BUS_BUSY,
- .cfg_extra = 0,
-};
-
-static const struct xlr_i2c_config xlr_i2c_config_tangox = {
- .flags = XLR_I2C_FLAG_IRQ,
- .status_busy = 0,
- .cfg_extra = 1 << 8,
-};
-
-static const struct of_device_id xlr_i2c_dt_ids[] = {
- {
- .compatible = "sigma,smp8642-i2c",
- .data = &xlr_i2c_config_tangox,
- },
- { }
-};
-MODULE_DEVICE_TABLE(of, xlr_i2c_dt_ids);
-
-static int xlr_i2c_probe(struct platform_device *pdev)
-{
- const struct of_device_id *match;
- struct xlr_i2c_private *priv;
- struct clk *clk;
- unsigned long clk_rate;
- unsigned long clk_div;
- u32 busfreq;
- int irq;
- int ret;
-
- priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- match = of_match_device(xlr_i2c_dt_ids, &pdev->dev);
- if (match)
- priv->cfg = match->data;
- else
- priv->cfg = &xlr_i2c_config_default;
-
- priv->iobase = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(priv->iobase))
- return PTR_ERR(priv->iobase);
-
- irq = platform_get_irq(pdev, 0);
-
- if (irq > 0 && (priv->cfg->flags & XLR_I2C_FLAG_IRQ)) {
- priv->irq = irq;
-
- xlr_i2c_wreg(priv->iobase, XLR_I2C_INT_EN, 0);
- xlr_i2c_wreg(priv->iobase, XLR_I2C_INT_STAT, 0xf);
-
- ret = devm_request_irq(&pdev->dev, priv->irq, xlr_i2c_irq,
- IRQF_SHARED, dev_name(&pdev->dev),
- priv);
- if (ret)
- return ret;
-
- init_waitqueue_head(&priv->wait);
- }
-
- if (of_property_read_u32(pdev->dev.of_node, "clock-frequency",
- &busfreq))
- busfreq = I2C_MAX_STANDARD_MODE_FREQ;
-
- clk = devm_clk_get(&pdev->dev, NULL);
- if (!IS_ERR(clk)) {
- ret = clk_prepare_enable(clk);
- if (ret)
- return ret;
-
- clk_rate = clk_get_rate(clk);
- clk_div = DIV_ROUND_UP(clk_rate, 2 * busfreq);
- xlr_i2c_wreg(priv->iobase, XLR_I2C_CLKDIV, clk_div);
-
- clk_disable(clk);
- priv->clk = clk;
- }
-
- priv->adap.dev.parent = &pdev->dev;
- priv->adap.dev.of_node = pdev->dev.of_node;
- priv->adap.owner = THIS_MODULE;
- priv->adap.algo_data = priv;
- priv->adap.algo = &xlr_i2c_algo;
- priv->adap.quirks = &xlr_i2c_quirks;
- priv->adap.nr = pdev->id;
- priv->adap.class = I2C_CLASS_HWMON;
- snprintf(priv->adap.name, sizeof(priv->adap.name), "xlr-i2c");
-
- i2c_set_adapdata(&priv->adap, priv);
- ret = i2c_add_numbered_adapter(&priv->adap);
- if (ret < 0)
- goto err_unprepare_clk;
-
- platform_set_drvdata(pdev, priv);
- dev_info(&priv->adap.dev, "Added I2C Bus.\n");
- return 0;
-
-err_unprepare_clk:
- clk_unprepare(clk);
- return ret;
-}
-
-static int xlr_i2c_remove(struct platform_device *pdev)
-{
- struct xlr_i2c_private *priv;
-
- priv = platform_get_drvdata(pdev);
- i2c_del_adapter(&priv->adap);
- clk_unprepare(priv->clk);
-
- return 0;
-}
-
-static struct platform_driver xlr_i2c_driver = {
- .probe = xlr_i2c_probe,
- .remove = xlr_i2c_remove,
- .driver = {
- .name = "xlr-i2cbus",
- .of_match_table = xlr_i2c_dt_ids,
- },
-};
-
-module_platform_driver(xlr_i2c_driver);
-
-MODULE_AUTHOR("Ganesan Ramalingam <ganesanr@netlogicmicro.com>");
-MODULE_DESCRIPTION("XLR/XLS SoC I2C Controller driver");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:xlr-i2cbus");
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index 73253e667de1..2c59dd748a49 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -953,6 +953,7 @@ i2c_new_client_device(struct i2c_adapter *adap, struct i2c_board_info const *inf
client->dev.of_node = of_node_get(info->of_node);
client->dev.fwnode = info->fwnode;
+ device_enable_async_suspend(&client->dev);
i2c_dev_set_name(adap, client, info);
if (info->swnode) {
@@ -1482,6 +1483,7 @@ static int i2c_register_adapter(struct i2c_adapter *adap)
if (res)
goto out_reg;
+ device_enable_async_suspend(&adap->dev);
pm_runtime_no_callbacks(&adap->dev);
pm_suspend_ignore_children(&adap->dev, true);
pm_runtime_enable(&adap->dev);
diff --git a/drivers/i2c/muxes/i2c-mux-gpio.c b/drivers/i2c/muxes/i2c-mux-gpio.c
index bac415a52b78..73a23e117ebe 100644
--- a/drivers/i2c/muxes/i2c-mux-gpio.c
+++ b/drivers/i2c/muxes/i2c-mux-gpio.c
@@ -7,6 +7,7 @@
#include <linux/i2c.h>
#include <linux/i2c-mux.h>
+#include <linux/overflow.h>
#include <linux/platform_data/i2c-mux-gpio.h>
#include <linux/platform_device.h>
#include <linux/module.h>
@@ -49,49 +50,11 @@ static int i2c_mux_gpio_deselect(struct i2c_mux_core *muxc, u32 chan)
return 0;
}
-#ifdef CONFIG_ACPI
-
-static int i2c_mux_gpio_get_acpi_adr(struct device *dev,
- struct fwnode_handle *fwdev,
- unsigned int *adr)
-
-{
- unsigned long long adr64;
- acpi_status status;
-
- status = acpi_evaluate_integer(ACPI_HANDLE_FWNODE(fwdev),
- METHOD_NAME__ADR,
- NULL, &adr64);
-
- if (!ACPI_SUCCESS(status)) {
- dev_err(dev, "Cannot get address\n");
- return -EINVAL;
- }
-
- *adr = adr64;
- if (*adr != adr64) {
- dev_err(dev, "Address out of range\n");
- return -ERANGE;
- }
-
- return 0;
-}
-
-#else
-
-static int i2c_mux_gpio_get_acpi_adr(struct device *dev,
- struct fwnode_handle *fwdev,
- unsigned int *adr)
-{
- return -EINVAL;
-}
-
-#endif
-
static int i2c_mux_gpio_probe_fw(struct gpiomux *mux,
struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ struct fwnode_handle *fwnode = dev_fwnode(dev);
struct device_node *np = dev->of_node;
struct device_node *adapter_np;
struct i2c_adapter *adapter = NULL;
@@ -99,7 +62,7 @@ static int i2c_mux_gpio_probe_fw(struct gpiomux *mux,
unsigned *values;
int rc, i = 0;
- if (is_of_node(dev->fwnode)) {
+ if (is_of_node(fwnode)) {
if (!np)
return -ENODEV;
@@ -111,7 +74,7 @@ static int i2c_mux_gpio_probe_fw(struct gpiomux *mux,
adapter = of_find_i2c_adapter_by_node(adapter_np);
of_node_put(adapter_np);
- } else if (is_acpi_node(dev->fwnode)) {
+ } else if (is_acpi_node(fwnode)) {
/*
* In ACPI land the mux should be a direct child of the i2c
* bus it muxes.
@@ -141,16 +104,16 @@ static int i2c_mux_gpio_probe_fw(struct gpiomux *mux,
fwnode_property_read_u32(child, "reg", values + i);
} else if (is_acpi_node(child)) {
- rc = i2c_mux_gpio_get_acpi_adr(dev, child, values + i);
+ rc = acpi_get_local_address(ACPI_HANDLE_FWNODE(child), values + i);
if (rc)
- return rc;
+ return dev_err_probe(dev, rc, "Cannot get address\n");
}
i++;
}
mux->data.values = values;
- if (fwnode_property_read_u32(dev->fwnode, "idle-state", &mux->data.idle))
+ if (device_property_read_u32(dev, "idle-state", &mux->data.idle))
mux->data.idle = I2C_MUX_GPIO_NO_IDLE;
return 0;
@@ -190,7 +153,7 @@ static int i2c_mux_gpio_probe(struct platform_device *pdev)
return -EPROBE_DEFER;
muxc = i2c_mux_alloc(parent, &pdev->dev, mux->data.n_values,
- ngpios * sizeof(*mux->gpios), 0,
+ array_size(ngpios, sizeof(*mux->gpios)), 0,
i2c_mux_gpio_select, NULL);
if (!muxc) {
ret = -ENOMEM;
diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c
index c3b4c677b442..dfe18dcd008d 100644
--- a/drivers/i3c/master.c
+++ b/drivers/i3c/master.c
@@ -343,7 +343,8 @@ struct bus_type i3c_bus_type = {
static enum i3c_addr_slot_status
i3c_bus_get_addr_slot_status(struct i3c_bus *bus, u16 addr)
{
- int status, bitpos = addr * 2;
+ unsigned long status;
+ int bitpos = addr * 2;
if (addr > I2C_MAX_ADDR)
return I3C_ADDR_SLOT_RSVD;
diff --git a/drivers/i3c/master/dw-i3c-master.c b/drivers/i3c/master/dw-i3c-master.c
index 03a368da51b9..51a8608203de 100644
--- a/drivers/i3c/master/dw-i3c-master.c
+++ b/drivers/i3c/master/dw-i3c-master.c
@@ -793,6 +793,10 @@ static int dw_i3c_master_daa(struct i3c_master_controller *m)
return -ENOMEM;
pos = dw_i3c_master_get_free_pos(master);
+ if (pos < 0) {
+ dw_i3c_master_free_xfer(xfer);
+ return pos;
+ }
cmd = &xfer->cmds[0];
cmd->cmd_hi = 0x1;
cmd->cmd_lo = COMMAND_PORT_DEV_COUNT(master->maxdevs - pos) |
diff --git a/drivers/i3c/master/mipi-i3c-hci/core.c b/drivers/i3c/master/mipi-i3c-hci/core.c
index 1b73647cc3b1..8c01123dc4ed 100644
--- a/drivers/i3c/master/mipi-i3c-hci/core.c
+++ b/drivers/i3c/master/mipi-i3c-hci/core.c
@@ -662,7 +662,7 @@ static int i3c_hci_init(struct i3c_hci *hci)
/* Make sure our data ordering fits the host's */
regval = reg_read(HC_CONTROL);
- if (IS_ENABLED(CONFIG_BIG_ENDIAN)) {
+ if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) {
if (!(regval & HC_CONTROL_DATA_BIG_ENDIAN)) {
regval |= HC_CONTROL_DATA_BIG_ENDIAN;
reg_write(HC_CONTROL, regval);
diff --git a/drivers/i3c/master/mipi-i3c-hci/dat_v1.c b/drivers/i3c/master/mipi-i3c-hci/dat_v1.c
index 783e551a2c85..97bb49ff5b53 100644
--- a/drivers/i3c/master/mipi-i3c-hci/dat_v1.c
+++ b/drivers/i3c/master/mipi-i3c-hci/dat_v1.c
@@ -160,9 +160,7 @@ static int hci_dat_v1_get_index(struct i3c_hci *hci, u8 dev_addr)
unsigned int dat_idx;
u32 dat_w0;
- for (dat_idx = find_first_bit(hci->DAT_data, hci->DAT_entries);
- dat_idx < hci->DAT_entries;
- dat_idx = find_next_bit(hci->DAT_data, hci->DAT_entries, dat_idx)) {
+ for_each_set_bit(dat_idx, hci->DAT_data, hci->DAT_entries) {
dat_w0 = dat_w0_read(dat_idx);
if (FIELD_GET(DAT_0_DYNAMIC_ADDRESS, dat_w0) == dev_addr)
return dat_idx;
diff --git a/drivers/i3c/master/mipi-i3c-hci/dma.c b/drivers/i3c/master/mipi-i3c-hci/dma.c
index af873a9be050..2990ac9eaade 100644
--- a/drivers/i3c/master/mipi-i3c-hci/dma.c
+++ b/drivers/i3c/master/mipi-i3c-hci/dma.c
@@ -223,7 +223,7 @@ static int hci_dma_init(struct i3c_hci *hci)
}
if (nr_rings > XFER_RINGS)
nr_rings = XFER_RINGS;
- rings = kzalloc(sizeof(*rings) + nr_rings * sizeof(*rh), GFP_KERNEL);
+ rings = kzalloc(struct_size(rings, headers, nr_rings), GFP_KERNEL);
if (!rings)
return -ENOMEM;
hci->io_data = rings;
diff --git a/drivers/i3c/master/mipi-i3c-hci/hci.h b/drivers/i3c/master/mipi-i3c-hci/hci.h
index 80beb1d5be8f..f109923f6c3f 100644
--- a/drivers/i3c/master/mipi-i3c-hci/hci.h
+++ b/drivers/i3c/master/mipi-i3c-hci/hci.h
@@ -98,7 +98,7 @@ struct hci_xfer {
static inline struct hci_xfer *hci_alloc_xfer(unsigned int n)
{
- return kzalloc(sizeof(struct hci_xfer) * n, GFP_KERNEL);
+ return kcalloc(n, sizeof(struct hci_xfer), GFP_KERNEL);
}
static inline void hci_free_xfer(struct hci_xfer *xfer, unsigned int n)
diff --git a/drivers/i3c/master/svc-i3c-master.c b/drivers/i3c/master/svc-i3c-master.c
index 879e5a64acaf..7550dad64ecf 100644
--- a/drivers/i3c/master/svc-i3c-master.c
+++ b/drivers/i3c/master/svc-i3c-master.c
@@ -17,7 +17,9 @@
#include <linux/list.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
/* Master Mode Registers */
#define SVC_I3C_MCONFIG 0x000
@@ -119,6 +121,7 @@
#define SVC_MDYNADDR_ADDR(x) FIELD_PREP(GENMASK(7, 1), (x))
#define SVC_I3C_MAX_DEVS 32
+#define SVC_I3C_PM_TIMEOUT_MS 1000
/* This parameter depends on the implementation and may be tuned */
#define SVC_I3C_FIFO_SIZE 16
@@ -236,6 +239,40 @@ static void svc_i3c_master_disable_interrupts(struct svc_i3c_master *master)
writel(mask, master->regs + SVC_I3C_MINTCLR);
}
+static void svc_i3c_master_clear_merrwarn(struct svc_i3c_master *master)
+{
+ /* Clear pending warnings */
+ writel(readl(master->regs + SVC_I3C_MERRWARN),
+ master->regs + SVC_I3C_MERRWARN);
+}
+
+static void svc_i3c_master_flush_fifo(struct svc_i3c_master *master)
+{
+ /* Flush FIFOs */
+ writel(SVC_I3C_MDATACTRL_FLUSHTB | SVC_I3C_MDATACTRL_FLUSHRB,
+ master->regs + SVC_I3C_MDATACTRL);
+}
+
+static void svc_i3c_master_reset_fifo_trigger(struct svc_i3c_master *master)
+{
+ u32 reg;
+
+ /* Set RX and TX tigger levels, flush FIFOs */
+ reg = SVC_I3C_MDATACTRL_FLUSHTB |
+ SVC_I3C_MDATACTRL_FLUSHRB |
+ SVC_I3C_MDATACTRL_UNLOCK_TRIG |
+ SVC_I3C_MDATACTRL_TXTRIG_FIFO_NOT_FULL |
+ SVC_I3C_MDATACTRL_RXTRIG_FIFO_NOT_EMPTY;
+ writel(reg, master->regs + SVC_I3C_MDATACTRL);
+}
+
+static void svc_i3c_master_reset(struct svc_i3c_master *master)
+{
+ svc_i3c_master_clear_merrwarn(master);
+ svc_i3c_master_reset_fifo_trigger(master);
+ svc_i3c_master_disable_interrupts(master);
+}
+
static inline struct svc_i3c_master *
to_svc_i3c_master(struct i3c_master_controller *master)
{
@@ -279,12 +316,6 @@ static void svc_i3c_master_emit_stop(struct svc_i3c_master *master)
udelay(1);
}
-static void svc_i3c_master_clear_merrwarn(struct svc_i3c_master *master)
-{
- writel(readl(master->regs + SVC_I3C_MERRWARN),
- master->regs + SVC_I3C_MERRWARN);
-}
-
static int svc_i3c_master_handle_ibi(struct svc_i3c_master *master,
struct i3c_dev_desc *dev)
{
@@ -449,13 +480,23 @@ static int svc_i3c_master_bus_init(struct i3c_master_controller *m)
struct i3c_device_info info = {};
unsigned long fclk_rate, fclk_period_ns;
unsigned int high_period_ns, od_low_period_ns;
- u32 ppbaud, pplow, odhpp, odbaud, i2cbaud, reg;
+ u32 ppbaud, pplow, odhpp, odbaud, odstop, i2cbaud, reg;
int ret;
+ ret = pm_runtime_resume_and_get(master->dev);
+ if (ret < 0) {
+ dev_err(master->dev,
+ "<%s> cannot resume i3c bus master, err: %d\n",
+ __func__, ret);
+ return ret;
+ }
+
/* Timings derivation */
fclk_rate = clk_get_rate(master->fclk);
- if (!fclk_rate)
- return -EINVAL;
+ if (!fclk_rate) {
+ ret = -EINVAL;
+ goto rpm_out;
+ }
fclk_period_ns = DIV_ROUND_UP(1000000000, fclk_rate);
@@ -479,6 +520,7 @@ static int svc_i3c_master_bus_init(struct i3c_master_controller *m)
switch (bus->mode) {
case I3C_BUS_MODE_PURE:
i2cbaud = 0;
+ odstop = 0;
break;
case I3C_BUS_MODE_MIXED_FAST:
case I3C_BUS_MODE_MIXED_LIMITED:
@@ -487,6 +529,7 @@ static int svc_i3c_master_bus_init(struct i3c_master_controller *m)
* between the high and low period does not really matter.
*/
i2cbaud = DIV_ROUND_UP(1000, od_low_period_ns) - 2;
+ odstop = 1;
break;
case I3C_BUS_MODE_MIXED_SLOW:
/*
@@ -494,15 +537,16 @@ static int svc_i3c_master_bus_init(struct i3c_master_controller *m)
* constraints as the FM+ mode.
*/
i2cbaud = DIV_ROUND_UP(2500, od_low_period_ns) - 2;
+ odstop = 1;
break;
default:
- return -EINVAL;
+ goto rpm_out;
}
reg = SVC_I3C_MCONFIG_MASTER_EN |
SVC_I3C_MCONFIG_DISTO(0) |
SVC_I3C_MCONFIG_HKEEP(0) |
- SVC_I3C_MCONFIG_ODSTOP(0) |
+ SVC_I3C_MCONFIG_ODSTOP(odstop) |
SVC_I3C_MCONFIG_PPBAUD(ppbaud) |
SVC_I3C_MCONFIG_PPLOW(pplow) |
SVC_I3C_MCONFIG_ODBAUD(odbaud) |
@@ -514,7 +558,7 @@ static int svc_i3c_master_bus_init(struct i3c_master_controller *m)
/* Master core's registration */
ret = i3c_master_get_free_addr(m, 0);
if (ret < 0)
- return ret;
+ goto rpm_out;
info.dyn_addr = ret;
@@ -523,21 +567,33 @@ static int svc_i3c_master_bus_init(struct i3c_master_controller *m)
ret = i3c_master_set_info(&master->base, &info);
if (ret)
- return ret;
+ goto rpm_out;
- svc_i3c_master_enable_interrupts(master, SVC_I3C_MINT_SLVSTART);
+rpm_out:
+ pm_runtime_mark_last_busy(master->dev);
+ pm_runtime_put_autosuspend(master->dev);
- return 0;
+ return ret;
}
static void svc_i3c_master_bus_cleanup(struct i3c_master_controller *m)
{
struct svc_i3c_master *master = to_svc_i3c_master(m);
+ int ret;
+
+ ret = pm_runtime_resume_and_get(master->dev);
+ if (ret < 0) {
+ dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
+ return;
+ }
svc_i3c_master_disable_interrupts(master);
/* Disable master */
writel(0, master->regs + SVC_I3C_MCONFIG);
+
+ pm_runtime_mark_last_busy(master->dev);
+ pm_runtime_put_autosuspend(master->dev);
}
static int svc_i3c_master_reserve_slot(struct svc_i3c_master *master)
@@ -656,8 +712,10 @@ static int svc_i3c_master_readb(struct svc_i3c_master *master, u8 *dst,
u32 reg;
for (i = 0; i < len; i++) {
- ret = readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg,
- SVC_I3C_MSTATUS_RXPEND(reg), 0, 1000);
+ ret = readl_poll_timeout_atomic(master->regs + SVC_I3C_MSTATUS,
+ reg,
+ SVC_I3C_MSTATUS_RXPEND(reg),
+ 0, 1000);
if (ret)
return ret;
@@ -687,10 +745,11 @@ static int svc_i3c_master_do_daa_locked(struct svc_i3c_master *master,
* Either one slave will send its ID, or the assignment process
* is done.
*/
- ret = readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg,
- SVC_I3C_MSTATUS_RXPEND(reg) |
- SVC_I3C_MSTATUS_MCTRLDONE(reg),
- 1, 1000);
+ ret = readl_poll_timeout_atomic(master->regs + SVC_I3C_MSTATUS,
+ reg,
+ SVC_I3C_MSTATUS_RXPEND(reg) |
+ SVC_I3C_MSTATUS_MCTRLDONE(reg),
+ 1, 1000);
if (ret)
return ret;
@@ -744,11 +803,12 @@ static int svc_i3c_master_do_daa_locked(struct svc_i3c_master *master,
}
/* Wait for the slave to be ready to receive its address */
- ret = readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg,
- SVC_I3C_MSTATUS_MCTRLDONE(reg) &&
- SVC_I3C_MSTATUS_STATE_DAA(reg) &&
- SVC_I3C_MSTATUS_BETWEEN(reg),
- 0, 1000);
+ ret = readl_poll_timeout_atomic(master->regs + SVC_I3C_MSTATUS,
+ reg,
+ SVC_I3C_MSTATUS_MCTRLDONE(reg) &&
+ SVC_I3C_MSTATUS_STATE_DAA(reg) &&
+ SVC_I3C_MSTATUS_BETWEEN(reg),
+ 0, 1000);
if (ret)
return ret;
@@ -832,31 +892,36 @@ static int svc_i3c_master_do_daa(struct i3c_master_controller *m)
unsigned int dev_nb;
int ret, i;
+ ret = pm_runtime_resume_and_get(master->dev);
+ if (ret < 0) {
+ dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
+ return ret;
+ }
+
spin_lock_irqsave(&master->xferqueue.lock, flags);
ret = svc_i3c_master_do_daa_locked(master, addrs, &dev_nb);
spin_unlock_irqrestore(&master->xferqueue.lock, flags);
- if (ret)
- goto emit_stop;
+ if (ret) {
+ svc_i3c_master_emit_stop(master);
+ svc_i3c_master_clear_merrwarn(master);
+ goto rpm_out;
+ }
/* Register all devices who participated to the core */
for (i = 0; i < dev_nb; i++) {
ret = i3c_master_add_i3c_dev_locked(m, addrs[i]);
if (ret)
- return ret;
+ goto rpm_out;
}
/* Configure IBI auto-rules */
ret = svc_i3c_update_ibirules(master);
- if (ret) {
+ if (ret)
dev_err(master->dev, "Cannot handle such a list of devices");
- return ret;
- }
- return 0;
-
-emit_stop:
- svc_i3c_master_emit_stop(master);
- svc_i3c_master_clear_merrwarn(master);
+rpm_out:
+ pm_runtime_mark_last_busy(master->dev);
+ pm_runtime_put_autosuspend(master->dev);
return ret;
}
@@ -864,27 +929,35 @@ emit_stop:
static int svc_i3c_master_read(struct svc_i3c_master *master,
u8 *in, unsigned int len)
{
- int offset = 0, i, ret;
- u32 mdctrl;
+ int offset = 0, i;
+ u32 mdctrl, mstatus;
+ bool completed = false;
+ unsigned int count;
+ unsigned long start = jiffies;
- while (offset < len) {
- unsigned int count;
+ while (!completed) {
+ mstatus = readl(master->regs + SVC_I3C_MSTATUS);
+ if (SVC_I3C_MSTATUS_COMPLETE(mstatus) != 0)
+ completed = true;
- ret = readl_poll_timeout(master->regs + SVC_I3C_MDATACTRL,
- mdctrl,
- !(mdctrl & SVC_I3C_MDATACTRL_RXEMPTY),
- 0, 1000);
- if (ret)
- return ret;
+ if (time_after(jiffies, start + msecs_to_jiffies(1000))) {
+ dev_dbg(master->dev, "I3C read timeout\n");
+ return -ETIMEDOUT;
+ }
+ mdctrl = readl(master->regs + SVC_I3C_MDATACTRL);
count = SVC_I3C_MDATACTRL_RXCOUNT(mdctrl);
+ if (offset + count > len) {
+ dev_err(master->dev, "I3C receive length too long!\n");
+ return -EINVAL;
+ }
for (i = 0; i < count; i++)
in[offset + i] = readl(master->regs + SVC_I3C_MRDATAB);
offset += count;
}
- return 0;
+ return offset;
}
static int svc_i3c_master_write(struct svc_i3c_master *master,
@@ -917,7 +990,7 @@ static int svc_i3c_master_write(struct svc_i3c_master *master,
static int svc_i3c_master_xfer(struct svc_i3c_master *master,
bool rnw, unsigned int xfer_type, u8 addr,
u8 *in, const u8 *out, unsigned int xfer_len,
- unsigned int read_len, bool continued)
+ unsigned int *read_len, bool continued)
{
u32 reg;
int ret;
@@ -927,7 +1000,7 @@ static int svc_i3c_master_xfer(struct svc_i3c_master *master,
SVC_I3C_MCTRL_IBIRESP_NACK |
SVC_I3C_MCTRL_DIR(rnw) |
SVC_I3C_MCTRL_ADDR(addr) |
- SVC_I3C_MCTRL_RDTERM(read_len),
+ SVC_I3C_MCTRL_RDTERM(*read_len),
master->regs + SVC_I3C_MCTRL);
ret = readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg,
@@ -939,17 +1012,27 @@ static int svc_i3c_master_xfer(struct svc_i3c_master *master,
ret = svc_i3c_master_read(master, in, xfer_len);
else
ret = svc_i3c_master_write(master, out, xfer_len);
- if (ret)
+ if (ret < 0)
goto emit_stop;
+ if (rnw)
+ *read_len = ret;
+
ret = readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg,
SVC_I3C_MSTATUS_COMPLETE(reg), 0, 1000);
if (ret)
goto emit_stop;
- if (!continued)
+ writel(SVC_I3C_MINT_COMPLETE, master->regs + SVC_I3C_MSTATUS);
+
+ if (!continued) {
svc_i3c_master_emit_stop(master);
+ /* Wait idle if stop is sent. */
+ readl_poll_timeout(master->regs + SVC_I3C_MSTATUS, reg,
+ SVC_I3C_MSTATUS_STATE_IDLE(reg), 0, 1000);
+ }
+
return 0;
emit_stop:
@@ -1007,17 +1090,29 @@ static void svc_i3c_master_start_xfer_locked(struct svc_i3c_master *master)
if (!xfer)
return;
+ ret = pm_runtime_resume_and_get(master->dev);
+ if (ret < 0) {
+ dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
+ return;
+ }
+
+ svc_i3c_master_clear_merrwarn(master);
+ svc_i3c_master_flush_fifo(master);
+
for (i = 0; i < xfer->ncmds; i++) {
struct svc_i3c_cmd *cmd = &xfer->cmds[i];
ret = svc_i3c_master_xfer(master, cmd->rnw, xfer->type,
cmd->addr, cmd->in, cmd->out,
- cmd->len, cmd->read_len,
+ cmd->len, &cmd->read_len,
cmd->continued);
if (ret)
break;
}
+ pm_runtime_mark_last_busy(master->dev);
+ pm_runtime_put_autosuspend(master->dev);
+
xfer->ret = ret;
complete(&xfer->comp);
@@ -1141,6 +1236,9 @@ static int svc_i3c_master_send_direct_ccc_cmd(struct svc_i3c_master *master,
if (!wait_for_completion_timeout(&xfer->comp, msecs_to_jiffies(1000)))
svc_i3c_master_dequeue_xfer(master, xfer);
+ if (cmd->read_len != xfer_len)
+ ccc->dests[0].payload.len = cmd->read_len;
+
ret = xfer->ret;
svc_i3c_master_free_xfer(xfer);
@@ -1291,6 +1389,16 @@ static void svc_i3c_master_free_ibi(struct i3c_dev_desc *dev)
static int svc_i3c_master_enable_ibi(struct i3c_dev_desc *dev)
{
struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct svc_i3c_master *master = to_svc_i3c_master(m);
+ int ret;
+
+ ret = pm_runtime_resume_and_get(master->dev);
+ if (ret < 0) {
+ dev_err(master->dev, "<%s> Cannot get runtime PM.\n", __func__);
+ return ret;
+ }
+
+ svc_i3c_master_enable_interrupts(master, SVC_I3C_MINT_SLVSTART);
return i3c_master_enec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR);
}
@@ -1298,8 +1406,17 @@ static int svc_i3c_master_enable_ibi(struct i3c_dev_desc *dev)
static int svc_i3c_master_disable_ibi(struct i3c_dev_desc *dev)
{
struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct svc_i3c_master *master = to_svc_i3c_master(m);
+ int ret;
+
+ svc_i3c_master_disable_interrupts(master);
- return i3c_master_disec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR);
+ ret = i3c_master_disec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR);
+
+ pm_runtime_mark_last_busy(master->dev);
+ pm_runtime_put_autosuspend(master->dev);
+
+ return ret;
}
static void svc_i3c_master_recycle_ibi_slot(struct i3c_dev_desc *dev,
@@ -1330,23 +1447,35 @@ static const struct i3c_master_controller_ops svc_i3c_master_ops = {
.disable_ibi = svc_i3c_master_disable_ibi,
};
-static void svc_i3c_master_reset(struct svc_i3c_master *master)
+static int svc_i3c_master_prepare_clks(struct svc_i3c_master *master)
{
- u32 reg;
+ int ret = 0;
- /* Clear pending warnings */
- writel(readl(master->regs + SVC_I3C_MERRWARN),
- master->regs + SVC_I3C_MERRWARN);
+ ret = clk_prepare_enable(master->pclk);
+ if (ret)
+ return ret;
- /* Set RX and TX tigger levels, flush FIFOs */
- reg = SVC_I3C_MDATACTRL_FLUSHTB |
- SVC_I3C_MDATACTRL_FLUSHRB |
- SVC_I3C_MDATACTRL_UNLOCK_TRIG |
- SVC_I3C_MDATACTRL_TXTRIG_FIFO_NOT_FULL |
- SVC_I3C_MDATACTRL_RXTRIG_FIFO_NOT_EMPTY;
- writel(reg, master->regs + SVC_I3C_MDATACTRL);
+ ret = clk_prepare_enable(master->fclk);
+ if (ret) {
+ clk_disable_unprepare(master->pclk);
+ return ret;
+ }
- svc_i3c_master_disable_interrupts(master);
+ ret = clk_prepare_enable(master->sclk);
+ if (ret) {
+ clk_disable_unprepare(master->pclk);
+ clk_disable_unprepare(master->fclk);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void svc_i3c_master_unprepare_clks(struct svc_i3c_master *master)
+{
+ clk_disable_unprepare(master->pclk);
+ clk_disable_unprepare(master->fclk);
+ clk_disable_unprepare(master->sclk);
}
static int svc_i3c_master_probe(struct platform_device *pdev)
@@ -1381,26 +1510,16 @@ static int svc_i3c_master_probe(struct platform_device *pdev)
master->dev = dev;
- svc_i3c_master_reset(master);
-
- ret = clk_prepare_enable(master->pclk);
+ ret = svc_i3c_master_prepare_clks(master);
if (ret)
return ret;
- ret = clk_prepare_enable(master->fclk);
- if (ret)
- goto err_disable_pclk;
-
- ret = clk_prepare_enable(master->sclk);
- if (ret)
- goto err_disable_fclk;
-
INIT_WORK(&master->hj_work, svc_i3c_master_hj_work);
INIT_WORK(&master->ibi_work, svc_i3c_master_ibi_work);
ret = devm_request_irq(dev, master->irq, svc_i3c_master_irq_handler,
IRQF_NO_SUSPEND, "svc-i3c-irq", master);
if (ret)
- goto err_disable_sclk;
+ goto err_disable_clks;
master->free_slots = GENMASK(SVC_I3C_MAX_DEVS - 1, 0);
@@ -1414,27 +1533,38 @@ static int svc_i3c_master_probe(struct platform_device *pdev)
GFP_KERNEL);
if (!master->ibi.slots) {
ret = -ENOMEM;
- goto err_disable_sclk;
+ goto err_disable_clks;
}
platform_set_drvdata(pdev, master);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, SVC_I3C_PM_TIMEOUT_MS);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_get_noresume(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
+ svc_i3c_master_reset(master);
+
/* Register the master */
ret = i3c_master_register(&master->base, &pdev->dev,
&svc_i3c_master_ops, false);
if (ret)
- goto err_disable_sclk;
+ goto rpm_disable;
- return 0;
+ pm_runtime_mark_last_busy(&pdev->dev);
+ pm_runtime_put_autosuspend(&pdev->dev);
-err_disable_sclk:
- clk_disable_unprepare(master->sclk);
+ return 0;
-err_disable_fclk:
- clk_disable_unprepare(master->fclk);
+rpm_disable:
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
+ pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
-err_disable_pclk:
- clk_disable_unprepare(master->pclk);
+err_disable_clks:
+ svc_i3c_master_unprepare_clks(master);
return ret;
}
@@ -1448,17 +1578,45 @@ static int svc_i3c_master_remove(struct platform_device *pdev)
if (ret)
return ret;
- clk_disable_unprepare(master->pclk);
- clk_disable_unprepare(master->fclk);
- clk_disable_unprepare(master->sclk);
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
return 0;
}
+static int __maybe_unused svc_i3c_runtime_suspend(struct device *dev)
+{
+ struct svc_i3c_master *master = dev_get_drvdata(dev);
+
+ svc_i3c_master_unprepare_clks(master);
+ pinctrl_pm_select_sleep_state(dev);
+
+ return 0;
+}
+
+static int __maybe_unused svc_i3c_runtime_resume(struct device *dev)
+{
+ struct svc_i3c_master *master = dev_get_drvdata(dev);
+ int ret = 0;
+
+ pinctrl_pm_select_default_state(dev);
+ svc_i3c_master_prepare_clks(master);
+
+ return ret;
+}
+
+static const struct dev_pm_ops svc_i3c_pm_ops = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(svc_i3c_runtime_suspend,
+ svc_i3c_runtime_resume, NULL)
+};
+
static const struct of_device_id svc_i3c_master_of_match_tbl[] = {
{ .compatible = "silvaco,i3c-master" },
{ /* sentinel */ },
};
+MODULE_DEVICE_TABLE(of, svc_i3c_master_of_match_tbl);
static struct platform_driver svc_i3c_master = {
.probe = svc_i3c_master_probe,
@@ -1466,6 +1624,7 @@ static struct platform_driver svc_i3c_master = {
.driver = {
.name = "silvaco-i3c-master",
.of_match_table = svc_i3c_master_of_match_tbl,
+ .pm = &svc_i3c_pm_ops,
},
};
module_platform_driver(svc_i3c_master);
diff --git a/drivers/iio/Kconfig b/drivers/iio/Kconfig
index 2334ad249b46..b190846c3dc2 100644
--- a/drivers/iio/Kconfig
+++ b/drivers/iio/Kconfig
@@ -70,6 +70,7 @@ config IIO_TRIGGERED_EVENT
source "drivers/iio/accel/Kconfig"
source "drivers/iio/adc/Kconfig"
+source "drivers/iio/addac/Kconfig"
source "drivers/iio/afe/Kconfig"
source "drivers/iio/amplifiers/Kconfig"
source "drivers/iio/cdc/Kconfig"
@@ -77,6 +78,7 @@ source "drivers/iio/chemical/Kconfig"
source "drivers/iio/common/Kconfig"
source "drivers/iio/dac/Kconfig"
source "drivers/iio/dummy/Kconfig"
+source "drivers/iio/filter/Kconfig"
source "drivers/iio/frequency/Kconfig"
source "drivers/iio/gyro/Kconfig"
source "drivers/iio/health/Kconfig"
diff --git a/drivers/iio/Makefile b/drivers/iio/Makefile
index 65e39bd4f934..3be08cdadd7e 100644
--- a/drivers/iio/Makefile
+++ b/drivers/iio/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_IIO_TRIGGERED_EVENT) += industrialio-triggered-event.o
obj-y += accel/
obj-y += adc/
+obj-y += addac/
obj-y += afe/
obj-y += amplifiers/
obj-y += buffer/
@@ -24,6 +25,7 @@ obj-y += common/
obj-y += dac/
obj-y += dummy/
obj-y += gyro/
+obj-y += filter/
obj-y += frequency/
obj-y += health/
obj-y += humidity/
diff --git a/drivers/iio/accel/bma180.c b/drivers/iio/accel/bma180.c
index 2edfcb4819b7..d8a454c266d5 100644
--- a/drivers/iio/accel/bma180.c
+++ b/drivers/iio/accel/bma180.c
@@ -658,7 +658,7 @@ static const struct iio_chan_spec_ext_info bma023_ext_info[] = {
static const struct iio_chan_spec_ext_info bma180_ext_info[] = {
IIO_ENUM("power_mode", IIO_SHARED_BY_TYPE, &bma180_power_mode_enum),
- IIO_ENUM_AVAILABLE("power_mode", &bma180_power_mode_enum),
+ IIO_ENUM_AVAILABLE("power_mode", IIO_SHARED_BY_TYPE, &bma180_power_mode_enum),
IIO_MOUNT_MATRIX(IIO_SHARED_BY_DIR, bma180_accel_get_mount_matrix),
{ }
};
@@ -938,7 +938,7 @@ static int bma180_probe(struct i2c_client *client,
i2c_set_clientdata(client, indio_dev);
data->client = client;
if (client->dev.of_node)
- chip = (enum chip_ids)of_device_get_match_data(dev);
+ chip = (uintptr_t)of_device_get_match_data(dev);
else
chip = id->driver_data;
data->part_info = &bma180_part_info[chip];
diff --git a/drivers/iio/accel/bma220_spi.c b/drivers/iio/accel/bma220_spi.c
index bc4c626e454d..74024d7ce5ac 100644
--- a/drivers/iio/accel/bma220_spi.c
+++ b/drivers/iio/accel/bma220_spi.c
@@ -27,7 +27,6 @@
#define BMA220_CHIP_ID 0xDD
#define BMA220_READ_MASK BIT(7)
#define BMA220_RANGE_MASK GENMASK(1, 0)
-#define BMA220_DATA_SHIFT 2
#define BMA220_SUSPEND_SLEEP 0xFF
#define BMA220_SUSPEND_WAKE 0x00
@@ -45,7 +44,7 @@
.sign = 's', \
.realbits = 6, \
.storagebits = 8, \
- .shift = BMA220_DATA_SHIFT, \
+ .shift = 2, \
.endianness = IIO_CPU, \
}, \
}
@@ -125,7 +124,8 @@ static int bma220_read_raw(struct iio_dev *indio_dev,
ret = bma220_read_reg(data->spi_device, chan->address);
if (ret < 0)
return -EINVAL;
- *val = sign_extend32(ret >> BMA220_DATA_SHIFT, 5);
+ *val = sign_extend32(ret >> chan->scan_type.shift,
+ chan->scan_type.realbits - 1);
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
ret = bma220_read_reg(data->spi_device, BMA220_REG_RANGE);
diff --git a/drivers/iio/accel/bmc150-accel-core.c b/drivers/iio/accel/bmc150-accel-core.c
index b0678c351e82..e6081dd0a880 100644
--- a/drivers/iio/accel/bmc150-accel-core.c
+++ b/drivers/iio/accel/bmc150-accel-core.c
@@ -170,7 +170,7 @@ static const struct {
{1000, 0, 0x0E},
{2000, 0, 0x0F} };
-static const struct {
+static __maybe_unused const struct {
int bw_bits;
int msec;
} bmc150_accel_sample_upd_time[] = { {0x08, 64},
diff --git a/drivers/iio/accel/kxcjk-1013.c b/drivers/iio/accel/kxcjk-1013.c
index 24c9387c2968..0fe570316848 100644
--- a/drivers/iio/accel/kxcjk-1013.c
+++ b/drivers/iio/accel/kxcjk-1013.c
@@ -315,7 +315,7 @@ static const char *const kxtf9_samp_freq_avail =
"25 50 100 200 400 800";
/* Refer to section 4 of the specification */
-static const struct {
+static __maybe_unused const struct {
int odr_bits;
int usec;
} odr_start_up_times[KX_MAX_CHIPS][12] = {
@@ -927,7 +927,8 @@ static int kxcjk1013_read_raw(struct iio_dev *indio_dev,
mutex_unlock(&data->mutex);
return ret;
}
- *val = sign_extend32(ret >> 4, 11);
+ *val = sign_extend32(ret >> chan->scan_type.shift,
+ chan->scan_type.realbits - 1);
ret = kxcjk1013_set_power_state(data, false);
}
mutex_unlock(&data->mutex);
diff --git a/drivers/iio/accel/mma7455_core.c b/drivers/iio/accel/mma7455_core.c
index 777c6c384b09..e6739ba74edf 100644
--- a/drivers/iio/accel/mma7455_core.c
+++ b/drivers/iio/accel/mma7455_core.c
@@ -134,7 +134,8 @@ static int mma7455_read_raw(struct iio_dev *indio_dev,
if (ret)
return ret;
- *val = sign_extend32(le16_to_cpu(data), 9);
+ *val = sign_extend32(le16_to_cpu(data),
+ chan->scan_type.realbits - 1);
return IIO_VAL_INT;
diff --git a/drivers/iio/accel/mma7660.c b/drivers/iio/accel/mma7660.c
index cd6cdf2c51b0..24b83ccdb950 100644
--- a/drivers/iio/accel/mma7660.c
+++ b/drivers/iio/accel/mma7660.c
@@ -210,10 +210,16 @@ static int mma7660_probe(struct i2c_client *client,
static int mma7660_remove(struct i2c_client *client)
{
struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ int ret;
iio_device_unregister(indio_dev);
- return mma7660_set_mode(iio_priv(indio_dev), MMA7660_MODE_STANDBY);
+ ret = mma7660_set_mode(iio_priv(indio_dev), MMA7660_MODE_STANDBY);
+ if (ret)
+ dev_warn(&client->dev, "Failed to put device in stand-by mode (%pe), ignoring\n",
+ ERR_PTR(ret));
+
+ return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c
index 09c7f10fefb6..64b82b4503ad 100644
--- a/drivers/iio/accel/mma8452.c
+++ b/drivers/iio/accel/mma8452.c
@@ -1053,7 +1053,7 @@ static irqreturn_t mma8452_interrupt(int irq, void *p)
{
struct iio_dev *indio_dev = p;
struct mma8452_data *data = iio_priv(indio_dev);
- int ret = IRQ_NONE;
+ irqreturn_t ret = IRQ_NONE;
int src;
src = i2c_smbus_read_byte_data(data->client, MMA8452_INT_SRC);
diff --git a/drivers/iio/accel/mma9553.c b/drivers/iio/accel/mma9553.c
index ba3ecb3b57dc..0570ab1cc064 100644
--- a/drivers/iio/accel/mma9553.c
+++ b/drivers/iio/accel/mma9553.c
@@ -917,7 +917,7 @@ static const struct iio_enum mma9553_calibgender_enum = {
static const struct iio_chan_spec_ext_info mma9553_ext_info[] = {
IIO_ENUM("calibgender", IIO_SHARED_BY_TYPE, &mma9553_calibgender_enum),
- IIO_ENUM_AVAILABLE("calibgender", &mma9553_calibgender_enum),
+ IIO_ENUM_AVAILABLE("calibgender", IIO_SHARED_BY_TYPE, &mma9553_calibgender_enum),
{},
};
diff --git a/drivers/iio/accel/sca3000.c b/drivers/iio/accel/sca3000.c
index c6b75308148a..43ecacbdc95a 100644
--- a/drivers/iio/accel/sca3000.c
+++ b/drivers/iio/accel/sca3000.c
@@ -534,6 +534,13 @@ static const struct iio_chan_spec sca3000_channels_with_temp[] = {
BIT(IIO_CHAN_INFO_OFFSET),
/* No buffer support */
.scan_index = -1,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 9,
+ .storagebits = 16,
+ .shift = 5,
+ .endianness = IIO_BE,
+ },
},
{
.type = IIO_ACCEL,
@@ -730,8 +737,9 @@ static int sca3000_read_raw(struct iio_dev *indio_dev,
mutex_unlock(&st->lock);
return ret;
}
- *val = (be16_to_cpup((__be16 *)st->rx) >> 3) & 0x1FFF;
- *val = sign_extend32(*val, 12);
+ *val = sign_extend32(be16_to_cpup((__be16 *)st->rx) >>
+ chan->scan_type.shift,
+ chan->scan_type.realbits - 1);
} else {
/* get the temperature when available */
ret = sca3000_read_data_short(st,
@@ -741,8 +749,9 @@ static int sca3000_read_raw(struct iio_dev *indio_dev,
mutex_unlock(&st->lock);
return ret;
}
- *val = ((st->rx[0] & 0x3F) << 3) |
- ((st->rx[1] & 0xE0) >> 5);
+ *val = (be16_to_cpup((__be16 *)st->rx) >>
+ chan->scan_type.shift) &
+ GENMASK(chan->scan_type.realbits - 1, 0);
}
mutex_unlock(&st->lock);
return IIO_VAL_INT;
diff --git a/drivers/iio/accel/stk8312.c b/drivers/iio/accel/stk8312.c
index 43c621d0f11e..de0cdf8c1f94 100644
--- a/drivers/iio/accel/stk8312.c
+++ b/drivers/iio/accel/stk8312.c
@@ -355,7 +355,7 @@ static int stk8312_read_raw(struct iio_dev *indio_dev,
mutex_unlock(&data->lock);
return ret;
}
- *val = sign_extend32(ret, 7);
+ *val = sign_extend32(ret, chan->scan_type.realbits - 1);
ret = stk8312_set_mode(data,
data->mode & (~STK8312_MODE_ACTIVE));
mutex_unlock(&data->lock);
diff --git a/drivers/iio/accel/stk8ba50.c b/drivers/iio/accel/stk8ba50.c
index e137a34b5c9a..517c57ed9e94 100644
--- a/drivers/iio/accel/stk8ba50.c
+++ b/drivers/iio/accel/stk8ba50.c
@@ -227,7 +227,8 @@ static int stk8ba50_read_raw(struct iio_dev *indio_dev,
mutex_unlock(&data->lock);
return -EINVAL;
}
- *val = sign_extend32(ret >> STK8BA50_DATA_SHIFT, 9);
+ *val = sign_extend32(ret >> chan->scan_type.shift,
+ chan->scan_type.realbits - 1);
stk8ba50_set_power(data, STK8BA50_MODE_SUSPEND);
mutex_unlock(&data->lock);
return IIO_VAL_INT;
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index 3363af15a43f..4fdc8bfbb407 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -1146,7 +1146,7 @@ config TI_ADS7950
config TI_ADS8344
tristate "Texas Instruments ADS8344"
- depends on SPI && OF
+ depends on SPI
help
If you say yes here you get support for Texas Instruments ADS8344
ADC chips
@@ -1156,7 +1156,7 @@ config TI_ADS8344
config TI_ADS8688
tristate "Texas Instruments ADS8688"
- depends on SPI && OF
+ depends on SPI
help
If you say yes here you get support for Texas Instruments ADS8684 and
and ADS8688 ADC chips
@@ -1166,7 +1166,7 @@ config TI_ADS8688
config TI_ADS124S08
tristate "Texas Instruments ADS124S08"
- depends on SPI && OF
+ depends on SPI
help
If you say yes here you get support for Texas Instruments ADS124S08
and ADS124S06 ADC chips
@@ -1288,4 +1288,19 @@ config XILINX_XADC
The driver can also be build as a module. If so, the module will be called
xilinx-xadc.
+config XILINX_AMS
+ tristate "Xilinx AMS driver"
+ depends on ARCH_ZYNQMP || COMPILE_TEST
+ depends on HAS_IOMEM
+ help
+ Say yes here to have support for the Xilinx AMS for Ultrascale/Ultrascale+
+ System Monitor. With this you can measure and monitor the Voltages and
+ Temperature values on the SOC.
+
+ The driver supports Voltage and Temperature monitoring on Xilinx Ultrascale
+ devices.
+
+ The driver can also be built as a module. If so, the module will be called
+ xilinx-ams.
+
endmenu
diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile
index d3f53549720c..4a8f1833993b 100644
--- a/drivers/iio/adc/Makefile
+++ b/drivers/iio/adc/Makefile
@@ -115,4 +115,5 @@ obj-$(CONFIG_VF610_ADC) += vf610_adc.o
obj-$(CONFIG_VIPERBOARD_ADC) += viperboard_adc.o
xilinx-xadc-y := xilinx-xadc-core.o xilinx-xadc-events.o
obj-$(CONFIG_XILINX_XADC) += xilinx-xadc.o
+obj-$(CONFIG_XILINX_AMS) += xilinx-ams.o
obj-$(CONFIG_SD_ADC_MODULATOR) += sd_adc_modulator.o
diff --git a/drivers/iio/adc/ad7124.c b/drivers/iio/adc/ad7124.c
index e45c600fccc0..bc2cfa5f9592 100644
--- a/drivers/iio/adc/ad7124.c
+++ b/drivers/iio/adc/ad7124.c
@@ -347,7 +347,7 @@ static int ad7124_find_free_config_slot(struct ad7124_state *st)
{
unsigned int free_cfg_slot;
- free_cfg_slot = find_next_zero_bit(&st->cfg_slots_status, AD7124_MAX_CONFIGS, 0);
+ free_cfg_slot = find_first_zero_bit(&st->cfg_slots_status, AD7124_MAX_CONFIGS);
if (free_cfg_slot == AD7124_MAX_CONFIGS)
return -1;
diff --git a/drivers/iio/adc/ad7192.c b/drivers/iio/adc/ad7192.c
index 2121a812b0c3..cc990205f306 100644
--- a/drivers/iio/adc/ad7192.c
+++ b/drivers/iio/adc/ad7192.c
@@ -257,7 +257,8 @@ static const struct iio_chan_spec_ext_info ad7192_calibsys_ext_info[] = {
},
IIO_ENUM("sys_calibration_mode", IIO_SEPARATE,
&ad7192_syscalib_mode_enum),
- IIO_ENUM_AVAILABLE("sys_calibration_mode", &ad7192_syscalib_mode_enum),
+ IIO_ENUM_AVAILABLE("sys_calibration_mode", IIO_SHARED_BY_TYPE,
+ &ad7192_syscalib_mode_enum),
{}
};
diff --git a/drivers/iio/adc/ad7266.c b/drivers/iio/adc/ad7266.c
index a8ec3efd659e..1d345d66742d 100644
--- a/drivers/iio/adc/ad7266.c
+++ b/drivers/iio/adc/ad7266.c
@@ -159,7 +159,8 @@ static int ad7266_read_raw(struct iio_dev *indio_dev,
*val = (*val >> 2) & 0xfff;
if (chan->scan_type.sign == 's')
- *val = sign_extend32(*val, 11);
+ *val = sign_extend32(*val,
+ chan->scan_type.realbits - 1);
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
diff --git a/drivers/iio/adc/ad7606.h b/drivers/iio/adc/ad7606.h
index 9350ef1f63b5..4f82d7c9acfd 100644
--- a/drivers/iio/adc/ad7606.h
+++ b/drivers/iio/adc/ad7606.h
@@ -62,7 +62,7 @@ struct ad7606_chip_info {
* struct ad7606_state - driver instance specific data
* @dev pointer to kernel device
* @chip_info entry in the table of chips that describes this device
- * @reg regulator info for the the power supply of the device
+ * @reg regulator info for the power supply of the device
* @bops bus operations (SPI or parallel)
* @range voltage range selection, selects which scale to apply
* @oversampling oversampling selection
diff --git a/drivers/iio/adc/ad_sigma_delta.c b/drivers/iio/adc/ad_sigma_delta.c
index 1d652d9b2f5c..cd418bd8bd87 100644
--- a/drivers/iio/adc/ad_sigma_delta.c
+++ b/drivers/iio/adc/ad_sigma_delta.c
@@ -467,9 +467,6 @@ int ad_sd_validate_trigger(struct iio_dev *indio_dev, struct iio_trigger *trig)
}
EXPORT_SYMBOL_GPL(ad_sd_validate_trigger);
-static const struct iio_trigger_ops ad_sd_trigger_ops = {
-};
-
static int devm_ad_sd_probe_trigger(struct device *dev, struct iio_dev *indio_dev)
{
struct ad_sigma_delta *sigma_delta = iio_device_get_drvdata(indio_dev);
@@ -486,7 +483,6 @@ static int devm_ad_sd_probe_trigger(struct device *dev, struct iio_dev *indio_de
if (sigma_delta->trig == NULL)
return -ENOMEM;
- sigma_delta->trig->ops = &ad_sd_trigger_ops;
init_completion(&sigma_delta->completion);
sigma_delta->irq_dis = true;
diff --git a/drivers/iio/adc/at91-sama5d2_adc.c b/drivers/iio/adc/at91-sama5d2_adc.c
index 92a57cf10fba..854b1f81d807 100644
--- a/drivers/iio/adc/at91-sama5d2_adc.c
+++ b/drivers/iio/adc/at91-sama5d2_adc.c
@@ -1662,10 +1662,9 @@ static int at91_adc_write_raw(struct iio_dev *indio_dev,
}
}
-static void at91_adc_dma_init(struct platform_device *pdev)
+static void at91_adc_dma_init(struct at91_adc_state *st)
{
- struct iio_dev *indio_dev = platform_get_drvdata(pdev);
- struct at91_adc_state *st = iio_priv(indio_dev);
+ struct device *dev = &st->indio_dev->dev;
struct dma_slave_config config = {0};
/* we have 2 bytes for each channel */
unsigned int sample_size = st->soc_info.platform->nr_channels * 2;
@@ -1680,9 +1679,9 @@ static void at91_adc_dma_init(struct platform_device *pdev)
if (st->dma_st.dma_chan)
return;
- st->dma_st.dma_chan = dma_request_chan(&pdev->dev, "rx");
+ st->dma_st.dma_chan = dma_request_chan(dev, "rx");
if (IS_ERR(st->dma_st.dma_chan)) {
- dev_info(&pdev->dev, "can't get DMA channel\n");
+ dev_info(dev, "can't get DMA channel\n");
st->dma_st.dma_chan = NULL;
goto dma_exit;
}
@@ -1692,7 +1691,7 @@ static void at91_adc_dma_init(struct platform_device *pdev)
&st->dma_st.rx_dma_buf,
GFP_KERNEL);
if (!st->dma_st.rx_buf) {
- dev_info(&pdev->dev, "can't allocate coherent DMA area\n");
+ dev_info(dev, "can't allocate coherent DMA area\n");
goto dma_chan_disable;
}
@@ -1705,11 +1704,11 @@ static void at91_adc_dma_init(struct platform_device *pdev)
config.dst_maxburst = 1;
if (dmaengine_slave_config(st->dma_st.dma_chan, &config)) {
- dev_info(&pdev->dev, "can't configure DMA slave\n");
+ dev_info(dev, "can't configure DMA slave\n");
goto dma_free_area;
}
- dev_info(&pdev->dev, "using %s for rx DMA transfers\n",
+ dev_info(dev, "using %s for rx DMA transfers\n",
dma_chan_name(st->dma_st.dma_chan));
return;
@@ -1721,13 +1720,12 @@ dma_chan_disable:
dma_release_channel(st->dma_st.dma_chan);
st->dma_st.dma_chan = NULL;
dma_exit:
- dev_info(&pdev->dev, "continuing without DMA support\n");
+ dev_info(dev, "continuing without DMA support\n");
}
-static void at91_adc_dma_disable(struct platform_device *pdev)
+static void at91_adc_dma_disable(struct at91_adc_state *st)
{
- struct iio_dev *indio_dev = platform_get_drvdata(pdev);
- struct at91_adc_state *st = iio_priv(indio_dev);
+ struct device *dev = &st->indio_dev->dev;
/* we have 2 bytes for each channel */
unsigned int sample_size = st->soc_info.platform->nr_channels * 2;
unsigned int pages = DIV_ROUND_UP(AT91_HWFIFO_MAX_SIZE *
@@ -1745,7 +1743,7 @@ static void at91_adc_dma_disable(struct platform_device *pdev)
dma_release_channel(st->dma_st.dma_chan);
st->dma_st.dma_chan = NULL;
- dev_info(&pdev->dev, "continuing without DMA support\n");
+ dev_info(dev, "continuing without DMA support\n");
}
static int at91_adc_set_watermark(struct iio_dev *indio_dev, unsigned int val)
@@ -1771,9 +1769,9 @@ static int at91_adc_set_watermark(struct iio_dev *indio_dev, unsigned int val)
*/
if (val == 1)
- at91_adc_dma_disable(to_platform_device(&indio_dev->dev));
+ at91_adc_dma_disable(st);
else if (val > 1)
- at91_adc_dma_init(to_platform_device(&indio_dev->dev));
+ at91_adc_dma_init(st);
/*
* We can start the DMA only after setting the watermark and
@@ -1781,7 +1779,7 @@ static int at91_adc_set_watermark(struct iio_dev *indio_dev, unsigned int val)
*/
ret = at91_adc_buffer_prepare(indio_dev);
if (ret)
- at91_adc_dma_disable(to_platform_device(&indio_dev->dev));
+ at91_adc_dma_disable(st);
return ret;
}
@@ -1828,7 +1826,7 @@ static void at91_adc_hw_init(struct iio_dev *indio_dev)
static ssize_t at91_adc_get_fifo_state(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct at91_adc_state *st = iio_priv(indio_dev);
return scnprintf(buf, PAGE_SIZE, "%d\n", !!st->dma_st.dma_chan);
@@ -1837,7 +1835,7 @@ static ssize_t at91_adc_get_fifo_state(struct device *dev,
static ssize_t at91_adc_get_watermark(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct at91_adc_state *st = iio_priv(indio_dev);
return scnprintf(buf, PAGE_SIZE, "%d\n", st->dma_st.watermark);
@@ -2078,7 +2076,7 @@ static int at91_adc_probe(struct platform_device *pdev)
return 0;
dma_disable:
- at91_adc_dma_disable(pdev);
+ at91_adc_dma_disable(st);
per_clk_disable_unprepare:
clk_disable_unprepare(st->per_clk);
vref_disable:
@@ -2095,7 +2093,7 @@ static int at91_adc_remove(struct platform_device *pdev)
iio_device_unregister(indio_dev);
- at91_adc_dma_disable(pdev);
+ at91_adc_dma_disable(st);
clk_disable_unprepare(st->per_clk);
diff --git a/drivers/iio/adc/axp20x_adc.c b/drivers/iio/adc/axp20x_adc.c
index df99f1365c39..53bf7d4899d2 100644
--- a/drivers/iio/adc/axp20x_adc.c
+++ b/drivers/iio/adc/axp20x_adc.c
@@ -186,6 +186,8 @@ static const struct iio_chan_spec axp20x_adc_channels[] = {
AXP20X_BATT_CHRG_I_H),
AXP20X_ADC_CHANNEL(AXP20X_BATT_DISCHRG_I, "batt_dischrg_i", IIO_CURRENT,
AXP20X_BATT_DISCHRG_I_H),
+ AXP20X_ADC_CHANNEL(AXP20X_TS_IN, "ts_v", IIO_VOLTAGE,
+ AXP20X_TS_IN_H),
};
static const struct iio_chan_spec axp22x_adc_channels[] = {
@@ -203,6 +205,8 @@ static const struct iio_chan_spec axp22x_adc_channels[] = {
AXP20X_BATT_CHRG_I_H),
AXP20X_ADC_CHANNEL(AXP22X_BATT_DISCHRG_I, "batt_dischrg_i", IIO_CURRENT,
AXP20X_BATT_DISCHRG_I_H),
+ AXP20X_ADC_CHANNEL(AXP22X_TS_IN, "ts_v", IIO_VOLTAGE,
+ AXP22X_TS_ADC_H),
};
static const struct iio_chan_spec axp813_adc_channels[] = {
@@ -222,6 +226,8 @@ static const struct iio_chan_spec axp813_adc_channels[] = {
AXP20X_BATT_CHRG_I_H),
AXP20X_ADC_CHANNEL(AXP22X_BATT_DISCHRG_I, "batt_dischrg_i", IIO_CURRENT,
AXP20X_BATT_DISCHRG_I_H),
+ AXP20X_ADC_CHANNEL(AXP813_TS_IN, "ts_v", IIO_VOLTAGE,
+ AXP288_TS_ADC_H),
};
static int axp20x_adc_raw(struct iio_dev *indio_dev,
@@ -296,11 +302,36 @@ static int axp20x_adc_scale_voltage(int channel, int *val, int *val2)
*val2 = 400000;
return IIO_VAL_INT_PLUS_MICRO;
+ case AXP20X_TS_IN:
+ /* 0.8 mV per LSB */
+ *val = 0;
+ *val2 = 800000;
+ return IIO_VAL_INT_PLUS_MICRO;
+
default:
return -EINVAL;
}
}
+static int axp22x_adc_scale_voltage(int channel, int *val, int *val2)
+{
+ switch (channel) {
+ case AXP22X_BATT_V:
+ /* 1.1 mV per LSB */
+ *val = 1;
+ *val2 = 100000;
+ return IIO_VAL_INT_PLUS_MICRO;
+
+ case AXP22X_TS_IN:
+ /* 0.8 mV per LSB */
+ *val = 0;
+ *val2 = 800000;
+ return IIO_VAL_INT_PLUS_MICRO;
+
+ default:
+ return -EINVAL;
+ }
+}
static int axp813_adc_scale_voltage(int channel, int *val, int *val2)
{
switch (channel) {
@@ -314,6 +345,12 @@ static int axp813_adc_scale_voltage(int channel, int *val, int *val2)
*val2 = 100000;
return IIO_VAL_INT_PLUS_MICRO;
+ case AXP813_TS_IN:
+ /* 0.8 mV per LSB */
+ *val = 0;
+ *val2 = 800000;
+ return IIO_VAL_INT_PLUS_MICRO;
+
default:
return -EINVAL;
}
@@ -367,12 +404,7 @@ static int axp22x_adc_scale(struct iio_chan_spec const *chan, int *val,
{
switch (chan->type) {
case IIO_VOLTAGE:
- if (chan->channel != AXP22X_BATT_V)
- return -EINVAL;
-
- *val = 1;
- *val2 = 100000;
- return IIO_VAL_INT_PLUS_MICRO;
+ return axp22x_adc_scale_voltage(chan->channel, val, val2);
case IIO_CURRENT:
*val = 1;
@@ -476,6 +508,7 @@ static int axp22x_read_raw(struct iio_dev *indio_dev,
{
switch (mask) {
case IIO_CHAN_INFO_OFFSET:
+ /* For PMIC temp only */
*val = -2677;
return IIO_VAL_INT;
diff --git a/drivers/iio/adc/envelope-detector.c b/drivers/iio/adc/envelope-detector.c
index d73eac36153f..e911c25d106d 100644
--- a/drivers/iio/adc/envelope-detector.c
+++ b/drivers/iio/adc/envelope-detector.c
@@ -31,14 +31,13 @@
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/mutex.h>
#include <linux/iio/consumer.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
diff --git a/drivers/iio/adc/hi8435.c b/drivers/iio/adc/hi8435.c
index 8b353e26668e..e665e14c6e54 100644
--- a/drivers/iio/adc/hi8435.c
+++ b/drivers/iio/adc/hi8435.c
@@ -350,7 +350,7 @@ static const struct iio_enum hi8435_sensing_mode = {
static const struct iio_chan_spec_ext_info hi8435_ext_info[] = {
IIO_ENUM("sensing_mode", IIO_SEPARATE, &hi8435_sensing_mode),
- IIO_ENUM_AVAILABLE("sensing_mode", &hi8435_sensing_mode),
+ IIO_ENUM_AVAILABLE("sensing_mode", IIO_SHARED_BY_TYPE, &hi8435_sensing_mode),
{},
};
diff --git a/drivers/iio/adc/imx7d_adc.c b/drivers/iio/adc/imx7d_adc.c
index 092f8d296527..12f5b8e34c84 100644
--- a/drivers/iio/adc/imx7d_adc.c
+++ b/drivers/iio/adc/imx7d_adc.c
@@ -522,12 +522,11 @@ static int imx7d_adc_probe(struct platform_device *pdev)
imx7d_adc_feature_config(info);
- ret = imx7d_adc_enable(&indio_dev->dev);
+ ret = imx7d_adc_enable(dev);
if (ret)
return ret;
- ret = devm_add_action_or_reset(dev, __imx7d_adc_disable,
- &indio_dev->dev);
+ ret = devm_add_action_or_reset(dev, __imx7d_adc_disable, dev);
if (ret)
return ret;
diff --git a/drivers/iio/adc/ina2xx-adc.c b/drivers/iio/adc/ina2xx-adc.c
index a4b2ff9e0dd5..4f9992a51e64 100644
--- a/drivers/iio/adc/ina2xx-adc.c
+++ b/drivers/iio/adc/ina2xx-adc.c
@@ -550,7 +550,7 @@ static ssize_t ina2xx_allow_async_readout_store(struct device *dev,
bool val;
int ret;
- ret = strtobool((const char *) buf, &val);
+ ret = strtobool(buf, &val);
if (ret)
return ret;
@@ -842,15 +842,13 @@ static int ina2xx_buffer_enable(struct iio_dev *indio_dev)
dev_dbg(&indio_dev->dev, "Async readout mode: %d\n",
chip->allow_async_readout);
- task = kthread_create(ina2xx_capture_thread, (void *)indio_dev,
- "%s:%d-%uus", indio_dev->name,
- iio_device_id(indio_dev),
- sampling_us);
+ task = kthread_run(ina2xx_capture_thread, (void *)indio_dev,
+ "%s:%d-%uus", indio_dev->name,
+ iio_device_id(indio_dev),
+ sampling_us);
if (IS_ERR(task))
return PTR_ERR(task);
- get_task_struct(task);
- wake_up_process(task);
chip->task = task;
return 0;
@@ -862,7 +860,6 @@ static int ina2xx_buffer_disable(struct iio_dev *indio_dev)
if (chip->task) {
kthread_stop(chip->task);
- put_task_struct(chip->task);
chip->task = NULL;
}
@@ -974,7 +971,7 @@ static int ina2xx_probe(struct i2c_client *client,
}
if (client->dev.of_node)
- type = (enum ina2xx_ids)of_device_get_match_data(&client->dev);
+ type = (uintptr_t)of_device_get_match_data(&client->dev);
else
type = id->driver_data;
chip->config = &ina2xx_config[type];
diff --git a/drivers/iio/adc/lpc18xx_adc.c b/drivers/iio/adc/lpc18xx_adc.c
index ceefa4d793cf..ae9c9384f23e 100644
--- a/drivers/iio/adc/lpc18xx_adc.c
+++ b/drivers/iio/adc/lpc18xx_adc.c
@@ -157,9 +157,6 @@ static int lpc18xx_adc_probe(struct platform_device *pdev)
return dev_err_probe(&pdev->dev, PTR_ERR(adc->clk),
"error getting clock\n");
- rate = clk_get_rate(adc->clk);
- clkdiv = DIV_ROUND_UP(rate, LPC18XX_ADC_CLK_TARGET);
-
adc->vref = devm_regulator_get(&pdev->dev, "vref");
if (IS_ERR(adc->vref))
return dev_err_probe(&pdev->dev, PTR_ERR(adc->vref),
@@ -192,6 +189,9 @@ static int lpc18xx_adc_probe(struct platform_device *pdev)
if (ret)
return ret;
+ rate = clk_get_rate(adc->clk);
+ clkdiv = DIV_ROUND_UP(rate, LPC18XX_ADC_CLK_TARGET);
+
adc->cr_reg = (clkdiv << LPC18XX_ADC_CR_CLKDIV_SHIFT) |
LPC18XX_ADC_CR_PDN;
writel(adc->cr_reg, adc->base + LPC18XX_ADC_CR);
diff --git a/drivers/iio/adc/max9611.c b/drivers/iio/adc/max9611.c
index 052ab23f10b2..01a4275e9c46 100644
--- a/drivers/iio/adc/max9611.c
+++ b/drivers/iio/adc/max9611.c
@@ -22,7 +22,8 @@
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/mod_devicetable.h>
+#include <linux/property.h>
#define DRIVER_NAME "max9611"
@@ -513,11 +514,9 @@ static int max9611_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
const char * const shunt_res_prop = "shunt-resistor-micro-ohms";
- const struct device_node *of_node = client->dev.of_node;
- const struct of_device_id *of_id =
- of_match_device(max9611_of_table, &client->dev);
struct max9611_dev *max9611;
struct iio_dev *indio_dev;
+ struct device *dev = &client->dev;
unsigned int of_shunt;
int ret;
@@ -528,15 +527,14 @@ static int max9611_probe(struct i2c_client *client,
i2c_set_clientdata(client, indio_dev);
max9611 = iio_priv(indio_dev);
- max9611->dev = &client->dev;
+ max9611->dev = dev;
max9611->i2c_client = client;
mutex_init(&max9611->lock);
- ret = of_property_read_u32(of_node, shunt_res_prop, &of_shunt);
+ ret = device_property_read_u32(dev, shunt_res_prop, &of_shunt);
if (ret) {
- dev_err(&client->dev,
- "Missing %s property for %pOF node\n",
- shunt_res_prop, of_node);
+ dev_err(dev, "Missing %s property for %pfw node\n",
+ shunt_res_prop, dev_fwnode(dev));
return ret;
}
max9611->shunt_resistor_uohm = of_shunt;
@@ -545,13 +543,13 @@ static int max9611_probe(struct i2c_client *client,
if (ret)
return ret;
- indio_dev->name = of_id->data;
+ indio_dev->name = device_get_match_data(dev);
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->info = &indio_info;
indio_dev->channels = max9611_channels;
indio_dev->num_channels = ARRAY_SIZE(max9611_channels);
- return devm_iio_device_register(&client->dev, indio_dev);
+ return devm_iio_device_register(dev, indio_dev);
}
static struct i2c_driver max9611_driver = {
diff --git a/drivers/iio/adc/mcp3911.c b/drivers/iio/adc/mcp3911.c
index e573da5397bb..13535f148c4c 100644
--- a/drivers/iio/adc/mcp3911.c
+++ b/drivers/iio/adc/mcp3911.c
@@ -10,6 +10,8 @@
#include <linux/err.h>
#include <linux/iio/iio.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/property.h>
#include <linux/regulator/consumer.h>
#include <linux/spi/spi.h>
@@ -200,12 +202,13 @@ static const struct iio_info mcp3911_info = {
.write_raw = mcp3911_write_raw,
};
-static int mcp3911_config(struct mcp3911 *adc, struct device_node *of_node)
+static int mcp3911_config(struct mcp3911 *adc)
{
+ struct device *dev = &adc->spi->dev;
u32 configreg;
int ret;
- of_property_read_u32(of_node, "device-addr", &adc->dev_addr);
+ device_property_read_u32(dev, "device-addr", &adc->dev_addr);
if (adc->dev_addr > 3) {
dev_err(&adc->spi->dev,
"invalid device address (%i). Must be in range 0-3.\n",
@@ -289,7 +292,7 @@ static int mcp3911_probe(struct spi_device *spi)
}
}
- ret = mcp3911_config(adc, spi->dev.of_node);
+ ret = mcp3911_config(adc);
if (ret)
goto clk_disable;
diff --git a/drivers/iio/adc/rcar-gyroadc.c b/drivers/iio/adc/rcar-gyroadc.c
index a48895046408..727ea6c68049 100644
--- a/drivers/iio/adc/rcar-gyroadc.c
+++ b/drivers/iio/adc/rcar-gyroadc.c
@@ -511,8 +511,7 @@ static int rcar_gyroadc_probe(struct platform_device *pdev)
if (ret)
return ret;
- priv->model = (enum rcar_gyroadc_model)
- of_device_get_match_data(&pdev->dev);
+ priv->model = (uintptr_t)of_device_get_match_data(&pdev->dev);
platform_set_drvdata(pdev, indio_dev);
diff --git a/drivers/iio/adc/rzg2l_adc.c b/drivers/iio/adc/rzg2l_adc.c
index 32fbf57c362f..9d5be52bd948 100644
--- a/drivers/iio/adc/rzg2l_adc.c
+++ b/drivers/iio/adc/rzg2l_adc.c
@@ -506,10 +506,8 @@ static int rzg2l_adc_probe(struct platform_device *pdev)
}
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(dev, "no irq resource\n");
+ if (irq < 0)
return irq;
- }
ret = devm_request_irq(dev, irq, rzg2l_adc_isr,
0, dev_name(dev), adc);
diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c
index 8cd258cb2682..897166d9e45c 100644
--- a/drivers/iio/adc/stm32-adc.c
+++ b/drivers/iio/adc/stm32-adc.c
@@ -2025,7 +2025,8 @@ static int stm32_adc_generic_chan_init(struct iio_dev *indio_dev,
if (strlen(name) >= STM32_ADC_CH_SZ) {
dev_err(&indio_dev->dev, "Label %s exceeds %d characters\n",
name, STM32_ADC_CH_SZ);
- return -EINVAL;
+ ret = -EINVAL;
+ goto err;
}
strncpy(adc->chan_name[val], name, STM32_ADC_CH_SZ);
ret = stm32_adc_populate_int_ch(indio_dev, name, val);
diff --git a/drivers/iio/adc/stmpe-adc.c b/drivers/iio/adc/stmpe-adc.c
index fba659bfdb40..d2d405388499 100644
--- a/drivers/iio/adc/stmpe-adc.c
+++ b/drivers/iio/adc/stmpe-adc.c
@@ -256,6 +256,7 @@ static int stmpe_adc_probe(struct platform_device *pdev)
struct stmpe_adc *info;
struct device_node *np;
u32 norequest_mask = 0;
+ unsigned long bits;
int irq_temp, irq_adc;
int num_chan = 0;
int i = 0;
@@ -309,8 +310,8 @@ static int stmpe_adc_probe(struct platform_device *pdev)
of_property_read_u32(np, "st,norequest-mask", &norequest_mask);
- for_each_clear_bit(i, (unsigned long *) &norequest_mask,
- (STMPE_ADC_LAST_NR + 1)) {
+ bits = norequest_mask;
+ for_each_clear_bit(i, &bits, (STMPE_ADC_LAST_NR + 1)) {
stmpe_adc_voltage_chan(&info->stmpe_adc_iio_channels[num_chan], i);
num_chan++;
}
diff --git a/drivers/iio/adc/ti-adc081c.c b/drivers/iio/adc/ti-adc081c.c
index 16fc608db36a..bd48b073e720 100644
--- a/drivers/iio/adc/ti-adc081c.c
+++ b/drivers/iio/adc/ti-adc081c.c
@@ -19,6 +19,7 @@
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
+#include <linux/property.h>
#include <linux/iio/iio.h>
#include <linux/iio/buffer.h>
@@ -156,13 +157,16 @@ static int adc081c_probe(struct i2c_client *client,
{
struct iio_dev *iio;
struct adc081c *adc;
- struct adcxx1c_model *model;
+ const struct adcxx1c_model *model;
int err;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WORD_DATA))
return -EOPNOTSUPP;
- model = &adcxx1c_models[id->driver_data];
+ if (dev_fwnode(&client->dev))
+ model = device_get_match_data(&client->dev);
+ else
+ model = &adcxx1c_models[id->driver_data];
iio = devm_iio_device_alloc(&client->dev, sizeof(*adc));
if (!iio)
@@ -210,10 +214,17 @@ static const struct i2c_device_id adc081c_id[] = {
};
MODULE_DEVICE_TABLE(i2c, adc081c_id);
+static const struct acpi_device_id adc081c_acpi_match[] = {
+ /* Used on some AAEON boards */
+ { "ADC081C", (kernel_ulong_t)&adcxx1c_models[ADC081C] },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, adc081c_acpi_match);
+
static const struct of_device_id adc081c_of_match[] = {
- { .compatible = "ti,adc081c" },
- { .compatible = "ti,adc101c" },
- { .compatible = "ti,adc121c" },
+ { .compatible = "ti,adc081c", .data = &adcxx1c_models[ADC081C] },
+ { .compatible = "ti,adc101c", .data = &adcxx1c_models[ADC101C] },
+ { .compatible = "ti,adc121c", .data = &adcxx1c_models[ADC121C] },
{ }
};
MODULE_DEVICE_TABLE(of, adc081c_of_match);
@@ -222,6 +233,7 @@ static struct i2c_driver adc081c_driver = {
.driver = {
.name = "adc081c",
.of_match_table = adc081c_of_match,
+ .acpi_match_table = adc081c_acpi_match,
},
.probe = adc081c_probe,
.id_table = adc081c_id,
diff --git a/drivers/iio/adc/ti-adc12138.c b/drivers/iio/adc/ti-adc12138.c
index fcd5d39dd03e..6eb62b564dae 100644
--- a/drivers/iio/adc/ti-adc12138.c
+++ b/drivers/iio/adc/ti-adc12138.c
@@ -11,6 +11,7 @@
#include <linux/interrupt.h>
#include <linux/completion.h>
#include <linux/clk.h>
+#include <linux/property.h>
#include <linux/spi/spi.h>
#include <linux/iio/iio.h>
#include <linux/iio/buffer.h>
@@ -239,7 +240,8 @@ static int adc12138_read_raw(struct iio_dev *iio,
if (ret)
return ret;
- *value = sign_extend32(be16_to_cpu(data) >> 3, 12);
+ *value = sign_extend32(be16_to_cpu(data) >> channel->scan_type.shift,
+ channel->scan_type.realbits - 1);
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
@@ -429,8 +431,8 @@ static int adc12138_probe(struct spi_device *spi)
return -EINVAL;
}
- ret = of_property_read_u32(spi->dev.of_node, "ti,acquisition-time",
- &adc->acquisition_time);
+ ret = device_property_read_u32(&spi->dev, "ti,acquisition-time",
+ &adc->acquisition_time);
if (ret)
adc->acquisition_time = 10;
@@ -516,8 +518,6 @@ static int adc12138_remove(struct spi_device *spi)
return 0;
}
-#ifdef CONFIG_OF
-
static const struct of_device_id adc12138_dt_ids[] = {
{ .compatible = "ti,adc12130", },
{ .compatible = "ti,adc12132", },
@@ -526,8 +526,6 @@ static const struct of_device_id adc12138_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, adc12138_dt_ids);
-#endif
-
static const struct spi_device_id adc12138_id[] = {
{ "adc12130", adc12130 },
{ "adc12132", adc12132 },
@@ -539,7 +537,7 @@ MODULE_DEVICE_TABLE(spi, adc12138_id);
static struct spi_driver adc12138_driver = {
.driver = {
.name = "adc12138",
- .of_match_table = of_match_ptr(adc12138_dt_ids),
+ .of_match_table = adc12138_dt_ids,
},
.probe = adc12138_probe,
.remove = adc12138_remove,
diff --git a/drivers/iio/adc/ti-ads1015.c b/drivers/iio/adc/ti-ads1015.c
index b0352e91ac16..068efbce1710 100644
--- a/drivers/iio/adc/ti-ads1015.c
+++ b/drivers/iio/adc/ti-ads1015.c
@@ -464,9 +464,7 @@ static int ads1015_read_raw(struct iio_dev *indio_dev,
mutex_lock(&data->lock);
switch (mask) {
- case IIO_CHAN_INFO_RAW: {
- int shift = chan->scan_type.shift;
-
+ case IIO_CHAN_INFO_RAW:
ret = iio_device_claim_direct_mode(indio_dev);
if (ret)
break;
@@ -487,7 +485,8 @@ static int ads1015_read_raw(struct iio_dev *indio_dev,
goto release_direct;
}
- *val = sign_extend32(*val >> shift, 15 - shift);
+ *val = sign_extend32(*val >> chan->scan_type.shift,
+ chan->scan_type.realbits - 1);
ret = ads1015_set_power_state(data, false);
if (ret < 0)
@@ -497,7 +496,6 @@ static int ads1015_read_raw(struct iio_dev *indio_dev,
release_direct:
iio_device_release_direct_mode(indio_dev);
break;
- }
case IIO_CHAN_INFO_SCALE:
idx = data->channel_data[chan->address].pga;
*val = ads1015_fullscale_range[idx];
@@ -952,7 +950,7 @@ static int ads1015_probe(struct i2c_client *client,
indio_dev->name = ADS1015_DRV_NAME;
indio_dev->modes = INDIO_DIRECT_MODE;
- chip = (enum chip_ids)device_get_match_data(&client->dev);
+ chip = (uintptr_t)device_get_match_data(&client->dev);
if (chip == ADSXXXX)
chip = id->driver_data;
switch (chip) {
diff --git a/drivers/iio/adc/ti-ads124s08.c b/drivers/iio/adc/ti-ads124s08.c
index 17d0da5877a9..767b3b634809 100644
--- a/drivers/iio/adc/ti-ads124s08.c
+++ b/drivers/iio/adc/ti-ads124s08.c
@@ -8,8 +8,7 @@
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_gpio.h>
+#include <linux/mod_devicetable.h>
#include <linux/slab.h>
#include <linux/sysfs.h>
diff --git a/drivers/iio/adc/ti-ads8688.c b/drivers/iio/adc/ti-ads8688.c
index 79c803537dc4..2e24717d7f55 100644
--- a/drivers/iio/adc/ti-ads8688.c
+++ b/drivers/iio/adc/ti-ads8688.c
@@ -281,12 +281,10 @@ static int ads8688_write_reg_range(struct iio_dev *indio_dev,
enum ads8688_range range)
{
unsigned int tmp;
- int ret;
tmp = ADS8688_PROG_REG_RANGE_CH(chan->channel);
- ret = ads8688_prog_write(indio_dev, tmp, range);
- return ret;
+ return ads8688_prog_write(indio_dev, tmp, range);
}
static int ads8688_write_raw(struct iio_dev *indio_dev,
diff --git a/drivers/iio/adc/xilinx-ams.c b/drivers/iio/adc/xilinx-ams.c
new file mode 100644
index 000000000000..8343c5f74121
--- /dev/null
+++ b/drivers/iio/adc/xilinx-ams.c
@@ -0,0 +1,1451 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx AMS driver
+ *
+ * Copyright (C) 2021 Xilinx, Inc.
+ *
+ * Manish Narani <mnarani@xilinx.com>
+ * Rajnikant Bhojani <rajnikant.bhojani@xilinx.com>
+ */
+
+#include <linux/bits.h>
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/overflow.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/slab.h>
+
+#include <linux/iio/events.h>
+#include <linux/iio/iio.h>
+
+/* AMS registers definitions */
+#define AMS_ISR_0 0x010
+#define AMS_ISR_1 0x014
+#define AMS_IER_0 0x020
+#define AMS_IER_1 0x024
+#define AMS_IDR_0 0x028
+#define AMS_IDR_1 0x02C
+#define AMS_PS_CSTS 0x040
+#define AMS_PL_CSTS 0x044
+
+#define AMS_VCC_PSPLL0 0x060
+#define AMS_VCC_PSPLL3 0x06C
+#define AMS_VCCINT 0x078
+#define AMS_VCCBRAM 0x07C
+#define AMS_VCCAUX 0x080
+#define AMS_PSDDRPLL 0x084
+#define AMS_PSINTFPDDR 0x09C
+
+#define AMS_VCC_PSPLL0_CH 48
+#define AMS_VCC_PSPLL3_CH 51
+#define AMS_VCCINT_CH 54
+#define AMS_VCCBRAM_CH 55
+#define AMS_VCCAUX_CH 56
+#define AMS_PSDDRPLL_CH 57
+#define AMS_PSINTFPDDR_CH 63
+
+#define AMS_REG_CONFIG0 0x100
+#define AMS_REG_CONFIG1 0x104
+#define AMS_REG_CONFIG3 0x10C
+#define AMS_REG_CONFIG4 0x110
+#define AMS_REG_SEQ_CH0 0x120
+#define AMS_REG_SEQ_CH1 0x124
+#define AMS_REG_SEQ_CH2 0x118
+
+#define AMS_VUSER0_MASK BIT(0)
+#define AMS_VUSER1_MASK BIT(1)
+#define AMS_VUSER2_MASK BIT(2)
+#define AMS_VUSER3_MASK BIT(3)
+
+#define AMS_TEMP 0x000
+#define AMS_SUPPLY1 0x004
+#define AMS_SUPPLY2 0x008
+#define AMS_VP_VN 0x00C
+#define AMS_VREFP 0x010
+#define AMS_VREFN 0x014
+#define AMS_SUPPLY3 0x018
+#define AMS_SUPPLY4 0x034
+#define AMS_SUPPLY5 0x038
+#define AMS_SUPPLY6 0x03C
+#define AMS_SUPPLY7 0x200
+#define AMS_SUPPLY8 0x204
+#define AMS_SUPPLY9 0x208
+#define AMS_SUPPLY10 0x20C
+#define AMS_VCCAMS 0x210
+#define AMS_TEMP_REMOTE 0x214
+
+#define AMS_REG_VAUX(x) (0x40 + 4 * (x))
+
+#define AMS_PS_RESET_VALUE 0xFFFF
+#define AMS_PL_RESET_VALUE 0xFFFF
+
+#define AMS_CONF0_CHANNEL_NUM_MASK GENMASK(6, 0)
+
+#define AMS_CONF1_SEQ_MASK GENMASK(15, 12)
+#define AMS_CONF1_SEQ_DEFAULT FIELD_PREP(AMS_CONF1_SEQ_MASK, 0)
+#define AMS_CONF1_SEQ_CONTINUOUS FIELD_PREP(AMS_CONF1_SEQ_MASK, 1)
+#define AMS_CONF1_SEQ_SINGLE_CHANNEL FIELD_PREP(AMS_CONF1_SEQ_MASK, 2)
+
+#define AMS_REG_SEQ0_MASK GENMASK(15, 0)
+#define AMS_REG_SEQ2_MASK GENMASK(21, 16)
+#define AMS_REG_SEQ1_MASK GENMASK_ULL(37, 22)
+
+#define AMS_PS_SEQ_MASK GENMASK(21, 0)
+#define AMS_PL_SEQ_MASK GENMASK_ULL(59, 22)
+
+#define AMS_ALARM_TEMP 0x140
+#define AMS_ALARM_SUPPLY1 0x144
+#define AMS_ALARM_SUPPLY2 0x148
+#define AMS_ALARM_SUPPLY3 0x160
+#define AMS_ALARM_SUPPLY4 0x164
+#define AMS_ALARM_SUPPLY5 0x168
+#define AMS_ALARM_SUPPLY6 0x16C
+#define AMS_ALARM_SUPPLY7 0x180
+#define AMS_ALARM_SUPPLY8 0x184
+#define AMS_ALARM_SUPPLY9 0x188
+#define AMS_ALARM_SUPPLY10 0x18C
+#define AMS_ALARM_VCCAMS 0x190
+#define AMS_ALARM_TEMP_REMOTE 0x194
+#define AMS_ALARM_THRESHOLD_OFF_10 0x10
+#define AMS_ALARM_THRESHOLD_OFF_20 0x20
+
+#define AMS_ALARM_THR_DIRECT_MASK BIT(1)
+#define AMS_ALARM_THR_MIN 0x0000
+#define AMS_ALARM_THR_MAX (BIT(16) - 1)
+
+#define AMS_ALARM_MASK GENMASK_ULL(63, 0)
+#define AMS_NO_OF_ALARMS 32
+#define AMS_PL_ALARM_START 16
+#define AMS_PL_ALARM_MASK GENMASK(31, 16)
+#define AMS_ISR0_ALARM_MASK GENMASK(31, 0)
+#define AMS_ISR1_ALARM_MASK (GENMASK(31, 29) | GENMASK(4, 0))
+#define AMS_ISR1_EOC_MASK BIT(3)
+#define AMS_ISR1_INTR_MASK GENMASK_ULL(63, 32)
+#define AMS_ISR0_ALARM_2_TO_0_MASK GENMASK(2, 0)
+#define AMS_ISR0_ALARM_6_TO_3_MASK GENMASK(6, 3)
+#define AMS_ISR0_ALARM_12_TO_7_MASK GENMASK(13, 8)
+#define AMS_CONF1_ALARM_2_TO_0_MASK GENMASK(3, 1)
+#define AMS_CONF1_ALARM_6_TO_3_MASK GENMASK(11, 8)
+#define AMS_CONF1_ALARM_12_TO_7_MASK GENMASK(5, 0)
+#define AMS_REGCFG1_ALARM_MASK \
+ (AMS_CONF1_ALARM_2_TO_0_MASK | AMS_CONF1_ALARM_6_TO_3_MASK | BIT(0))
+#define AMS_REGCFG3_ALARM_MASK AMS_CONF1_ALARM_12_TO_7_MASK
+
+#define AMS_PS_CSTS_PS_READY (BIT(27) | BIT(16))
+#define AMS_PL_CSTS_ACCESS_MASK BIT(1)
+
+#define AMS_PL_MAX_FIXED_CHANNEL 10
+#define AMS_PL_MAX_EXT_CHANNEL 20
+
+#define AMS_INIT_POLL_TIME_US 200
+#define AMS_INIT_TIMEOUT_US 10000
+#define AMS_UNMASK_TIMEOUT_MS 500
+
+/*
+ * Following scale and offset value is derived from
+ * UG580 (v1.7) December 20, 2016
+ */
+#define AMS_SUPPLY_SCALE_1VOLT_mV 1000
+#define AMS_SUPPLY_SCALE_3VOLT_mV 3000
+#define AMS_SUPPLY_SCALE_6VOLT_mV 6000
+#define AMS_SUPPLY_SCALE_DIV_BIT 16
+
+#define AMS_TEMP_SCALE 509314
+#define AMS_TEMP_SCALE_DIV_BIT 16
+#define AMS_TEMP_OFFSET -((280230LL << 16) / 509314)
+
+enum ams_alarm_bit {
+ AMS_ALARM_BIT_TEMP = 0,
+ AMS_ALARM_BIT_SUPPLY1 = 1,
+ AMS_ALARM_BIT_SUPPLY2 = 2,
+ AMS_ALARM_BIT_SUPPLY3 = 3,
+ AMS_ALARM_BIT_SUPPLY4 = 4,
+ AMS_ALARM_BIT_SUPPLY5 = 5,
+ AMS_ALARM_BIT_SUPPLY6 = 6,
+ AMS_ALARM_BIT_RESERVED = 7,
+ AMS_ALARM_BIT_SUPPLY7 = 8,
+ AMS_ALARM_BIT_SUPPLY8 = 9,
+ AMS_ALARM_BIT_SUPPLY9 = 10,
+ AMS_ALARM_BIT_SUPPLY10 = 11,
+ AMS_ALARM_BIT_VCCAMS = 12,
+ AMS_ALARM_BIT_TEMP_REMOTE = 13,
+};
+
+enum ams_seq {
+ AMS_SEQ_VCC_PSPLL = 0,
+ AMS_SEQ_VCC_PSBATT = 1,
+ AMS_SEQ_VCCINT = 2,
+ AMS_SEQ_VCCBRAM = 3,
+ AMS_SEQ_VCCAUX = 4,
+ AMS_SEQ_PSDDRPLL = 5,
+ AMS_SEQ_INTDDR = 6,
+};
+
+enum ams_ps_pl_seq {
+ AMS_SEQ_CALIB = 0,
+ AMS_SEQ_RSVD_1 = 1,
+ AMS_SEQ_RSVD_2 = 2,
+ AMS_SEQ_TEST = 3,
+ AMS_SEQ_RSVD_4 = 4,
+ AMS_SEQ_SUPPLY4 = 5,
+ AMS_SEQ_SUPPLY5 = 6,
+ AMS_SEQ_SUPPLY6 = 7,
+ AMS_SEQ_TEMP = 8,
+ AMS_SEQ_SUPPLY2 = 9,
+ AMS_SEQ_SUPPLY1 = 10,
+ AMS_SEQ_VP_VN = 11,
+ AMS_SEQ_VREFP = 12,
+ AMS_SEQ_VREFN = 13,
+ AMS_SEQ_SUPPLY3 = 14,
+ AMS_SEQ_CURRENT_MON = 15,
+ AMS_SEQ_SUPPLY7 = 16,
+ AMS_SEQ_SUPPLY8 = 17,
+ AMS_SEQ_SUPPLY9 = 18,
+ AMS_SEQ_SUPPLY10 = 19,
+ AMS_SEQ_VCCAMS = 20,
+ AMS_SEQ_TEMP_REMOTE = 21,
+ AMS_SEQ_MAX = 22
+};
+
+#define AMS_PS_SEQ_MAX AMS_SEQ_MAX
+#define AMS_SEQ(x) (AMS_SEQ_MAX + (x))
+#define PS_SEQ(x) (x)
+#define PL_SEQ(x) (AMS_PS_SEQ_MAX + (x))
+#define AMS_CTRL_SEQ_BASE (AMS_PS_SEQ_MAX * 3)
+
+#define AMS_CHAN_TEMP(_scan_index, _addr) { \
+ .type = IIO_TEMP, \
+ .indexed = 1, \
+ .address = (_addr), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_OFFSET), \
+ .event_spec = ams_temp_events, \
+ .scan_index = _scan_index, \
+ .num_event_specs = ARRAY_SIZE(ams_temp_events), \
+}
+
+#define AMS_CHAN_VOLTAGE(_scan_index, _addr, _alarm) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .address = (_addr), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ .event_spec = (_alarm) ? ams_voltage_events : NULL, \
+ .scan_index = _scan_index, \
+ .num_event_specs = (_alarm) ? ARRAY_SIZE(ams_voltage_events) : 0, \
+}
+
+#define AMS_PS_CHAN_TEMP(_scan_index, _addr) \
+ AMS_CHAN_TEMP(PS_SEQ(_scan_index), _addr)
+#define AMS_PS_CHAN_VOLTAGE(_scan_index, _addr) \
+ AMS_CHAN_VOLTAGE(PS_SEQ(_scan_index), _addr, true)
+
+#define AMS_PL_CHAN_TEMP(_scan_index, _addr) \
+ AMS_CHAN_TEMP(PL_SEQ(_scan_index), _addr)
+#define AMS_PL_CHAN_VOLTAGE(_scan_index, _addr, _alarm) \
+ AMS_CHAN_VOLTAGE(PL_SEQ(_scan_index), _addr, _alarm)
+#define AMS_PL_AUX_CHAN_VOLTAGE(_auxno) \
+ AMS_CHAN_VOLTAGE(PL_SEQ(AMS_SEQ(_auxno)), AMS_REG_VAUX(_auxno), false)
+#define AMS_CTRL_CHAN_VOLTAGE(_scan_index, _addr) \
+ AMS_CHAN_VOLTAGE(PL_SEQ(AMS_SEQ(AMS_SEQ(_scan_index))), _addr, false)
+
+/**
+ * struct ams - This structure contains necessary state for xilinx-ams to operate
+ * @base: physical base address of device
+ * @ps_base: physical base address of PS device
+ * @pl_base: physical base address of PL device
+ * @clk: clocks associated with the device
+ * @dev: pointer to device struct
+ * @lock: to handle multiple user interaction
+ * @intr_lock: to protect interrupt mask values
+ * @alarm_mask: alarm configuration
+ * @current_masked_alarm: currently masked due to alarm
+ * @intr_mask: interrupt configuration
+ * @ams_unmask_work: re-enables event once the event condition disappears
+ *
+ */
+struct ams {
+ void __iomem *base;
+ void __iomem *ps_base;
+ void __iomem *pl_base;
+ struct clk *clk;
+ struct device *dev;
+ struct mutex lock;
+ spinlock_t intr_lock;
+ unsigned int alarm_mask;
+ unsigned int current_masked_alarm;
+ u64 intr_mask;
+ struct delayed_work ams_unmask_work;
+};
+
+static inline void ams_ps_update_reg(struct ams *ams, unsigned int offset,
+ u32 mask, u32 data)
+{
+ u32 val, regval;
+
+ val = readl(ams->ps_base + offset);
+ regval = (val & ~mask) | (data & mask);
+ writel(regval, ams->ps_base + offset);
+}
+
+static inline void ams_pl_update_reg(struct ams *ams, unsigned int offset,
+ u32 mask, u32 data)
+{
+ u32 val, regval;
+
+ val = readl(ams->pl_base + offset);
+ regval = (val & ~mask) | (data & mask);
+ writel(regval, ams->pl_base + offset);
+}
+
+static void ams_update_intrmask(struct ams *ams, u64 mask, u64 val)
+{
+ u32 regval;
+
+ ams->intr_mask = (ams->intr_mask & ~mask) | (val & mask);
+
+ regval = ~(ams->intr_mask | ams->current_masked_alarm);
+ writel(regval, ams->base + AMS_IER_0);
+
+ regval = ~(FIELD_GET(AMS_ISR1_INTR_MASK, ams->intr_mask));
+ writel(regval, ams->base + AMS_IER_1);
+
+ regval = ams->intr_mask | ams->current_masked_alarm;
+ writel(regval, ams->base + AMS_IDR_0);
+
+ regval = FIELD_GET(AMS_ISR1_INTR_MASK, ams->intr_mask);
+ writel(regval, ams->base + AMS_IDR_1);
+}
+
+static void ams_disable_all_alarms(struct ams *ams)
+{
+ /* disable PS module alarm */
+ if (ams->ps_base) {
+ ams_ps_update_reg(ams, AMS_REG_CONFIG1, AMS_REGCFG1_ALARM_MASK,
+ AMS_REGCFG1_ALARM_MASK);
+ ams_ps_update_reg(ams, AMS_REG_CONFIG3, AMS_REGCFG3_ALARM_MASK,
+ AMS_REGCFG3_ALARM_MASK);
+ }
+
+ /* disable PL module alarm */
+ if (ams->pl_base) {
+ ams_pl_update_reg(ams, AMS_REG_CONFIG1, AMS_REGCFG1_ALARM_MASK,
+ AMS_REGCFG1_ALARM_MASK);
+ ams_pl_update_reg(ams, AMS_REG_CONFIG3, AMS_REGCFG3_ALARM_MASK,
+ AMS_REGCFG3_ALARM_MASK);
+ }
+}
+
+static void ams_update_ps_alarm(struct ams *ams, unsigned long alarm_mask)
+{
+ u32 cfg;
+ u32 val;
+
+ val = FIELD_GET(AMS_ISR0_ALARM_2_TO_0_MASK, alarm_mask);
+ cfg = ~(FIELD_PREP(AMS_CONF1_ALARM_2_TO_0_MASK, val));
+
+ val = FIELD_GET(AMS_ISR0_ALARM_6_TO_3_MASK, alarm_mask);
+ cfg &= ~(FIELD_PREP(AMS_CONF1_ALARM_6_TO_3_MASK, val));
+
+ ams_ps_update_reg(ams, AMS_REG_CONFIG1, AMS_REGCFG1_ALARM_MASK, cfg);
+
+ val = FIELD_GET(AMS_ISR0_ALARM_12_TO_7_MASK, alarm_mask);
+ cfg = ~(FIELD_PREP(AMS_CONF1_ALARM_12_TO_7_MASK, val));
+ ams_ps_update_reg(ams, AMS_REG_CONFIG3, AMS_REGCFG3_ALARM_MASK, cfg);
+}
+
+static void ams_update_pl_alarm(struct ams *ams, unsigned long alarm_mask)
+{
+ unsigned long pl_alarm_mask;
+ u32 cfg;
+ u32 val;
+
+ pl_alarm_mask = FIELD_GET(AMS_PL_ALARM_MASK, alarm_mask);
+
+ val = FIELD_GET(AMS_ISR0_ALARM_2_TO_0_MASK, pl_alarm_mask);
+ cfg = ~(FIELD_PREP(AMS_CONF1_ALARM_2_TO_0_MASK, val));
+
+ val = FIELD_GET(AMS_ISR0_ALARM_6_TO_3_MASK, pl_alarm_mask);
+ cfg &= ~(FIELD_PREP(AMS_CONF1_ALARM_6_TO_3_MASK, val));
+
+ ams_pl_update_reg(ams, AMS_REG_CONFIG1, AMS_REGCFG1_ALARM_MASK, cfg);
+
+ val = FIELD_GET(AMS_ISR0_ALARM_12_TO_7_MASK, pl_alarm_mask);
+ cfg = ~(FIELD_PREP(AMS_CONF1_ALARM_12_TO_7_MASK, val));
+ ams_pl_update_reg(ams, AMS_REG_CONFIG3, AMS_REGCFG3_ALARM_MASK, cfg);
+}
+
+static void ams_update_alarm(struct ams *ams, unsigned long alarm_mask)
+{
+ unsigned long flags;
+
+ if (ams->ps_base)
+ ams_update_ps_alarm(ams, alarm_mask);
+
+ if (ams->pl_base)
+ ams_update_pl_alarm(ams, alarm_mask);
+
+ spin_lock_irqsave(&ams->intr_lock, flags);
+ ams_update_intrmask(ams, AMS_ISR0_ALARM_MASK, ~alarm_mask);
+ spin_unlock_irqrestore(&ams->intr_lock, flags);
+}
+
+static void ams_enable_channel_sequence(struct iio_dev *indio_dev)
+{
+ struct ams *ams = iio_priv(indio_dev);
+ unsigned long long scan_mask;
+ int i;
+ u32 regval;
+
+ /*
+ * Enable channel sequence. First 22 bits of scan_mask represent
+ * PS channels, and next remaining bits represent PL channels.
+ */
+
+ /* Run calibration of PS & PL as part of the sequence */
+ scan_mask = BIT(0) | BIT(AMS_PS_SEQ_MAX);
+ for (i = 0; i < indio_dev->num_channels; i++)
+ scan_mask |= BIT_ULL(indio_dev->channels[i].scan_index);
+
+ if (ams->ps_base) {
+ /* put sysmon in a soft reset to change the sequence */
+ ams_ps_update_reg(ams, AMS_REG_CONFIG1, AMS_CONF1_SEQ_MASK,
+ AMS_CONF1_SEQ_DEFAULT);
+
+ /* configure basic channels */
+ regval = FIELD_GET(AMS_REG_SEQ0_MASK, scan_mask);
+ writel(regval, ams->ps_base + AMS_REG_SEQ_CH0);
+
+ regval = FIELD_GET(AMS_REG_SEQ2_MASK, scan_mask);
+ writel(regval, ams->ps_base + AMS_REG_SEQ_CH2);
+
+ /* set continuous sequence mode */
+ ams_ps_update_reg(ams, AMS_REG_CONFIG1, AMS_CONF1_SEQ_MASK,
+ AMS_CONF1_SEQ_CONTINUOUS);
+ }
+
+ if (ams->pl_base) {
+ /* put sysmon in a soft reset to change the sequence */
+ ams_pl_update_reg(ams, AMS_REG_CONFIG1, AMS_CONF1_SEQ_MASK,
+ AMS_CONF1_SEQ_DEFAULT);
+
+ /* configure basic channels */
+ scan_mask = FIELD_GET(AMS_PL_SEQ_MASK, scan_mask);
+
+ regval = FIELD_GET(AMS_REG_SEQ0_MASK, scan_mask);
+ writel(regval, ams->pl_base + AMS_REG_SEQ_CH0);
+
+ regval = FIELD_GET(AMS_REG_SEQ1_MASK, scan_mask);
+ writel(regval, ams->pl_base + AMS_REG_SEQ_CH1);
+
+ regval = FIELD_GET(AMS_REG_SEQ2_MASK, scan_mask);
+ writel(regval, ams->pl_base + AMS_REG_SEQ_CH2);
+
+ /* set continuous sequence mode */
+ ams_pl_update_reg(ams, AMS_REG_CONFIG1, AMS_CONF1_SEQ_MASK,
+ AMS_CONF1_SEQ_CONTINUOUS);
+ }
+}
+
+static int ams_init_device(struct ams *ams)
+{
+ u32 expect = AMS_PS_CSTS_PS_READY;
+ u32 reg, value;
+ int ret;
+
+ /* reset AMS */
+ if (ams->ps_base) {
+ writel(AMS_PS_RESET_VALUE, ams->ps_base + AMS_VP_VN);
+
+ ret = readl_poll_timeout(ams->base + AMS_PS_CSTS, reg, (reg & expect),
+ AMS_INIT_POLL_TIME_US, AMS_INIT_TIMEOUT_US);
+ if (ret)
+ return ret;
+
+ /* put sysmon in a default state */
+ ams_ps_update_reg(ams, AMS_REG_CONFIG1, AMS_CONF1_SEQ_MASK,
+ AMS_CONF1_SEQ_DEFAULT);
+ }
+
+ if (ams->pl_base) {
+ value = readl(ams->base + AMS_PL_CSTS);
+ if (value == 0)
+ return 0;
+
+ writel(AMS_PL_RESET_VALUE, ams->pl_base + AMS_VP_VN);
+
+ /* put sysmon in a default state */
+ ams_pl_update_reg(ams, AMS_REG_CONFIG1, AMS_CONF1_SEQ_MASK,
+ AMS_CONF1_SEQ_DEFAULT);
+ }
+
+ ams_disable_all_alarms(ams);
+
+ /* Disable interrupt */
+ ams_update_intrmask(ams, AMS_ALARM_MASK, AMS_ALARM_MASK);
+
+ /* Clear any pending interrupt */
+ writel(AMS_ISR0_ALARM_MASK, ams->base + AMS_ISR_0);
+ writel(AMS_ISR1_ALARM_MASK, ams->base + AMS_ISR_1);
+
+ return 0;
+}
+
+static int ams_enable_single_channel(struct ams *ams, unsigned int offset)
+{
+ u8 channel_num;
+
+ switch (offset) {
+ case AMS_VCC_PSPLL0:
+ channel_num = AMS_VCC_PSPLL0_CH;
+ break;
+ case AMS_VCC_PSPLL3:
+ channel_num = AMS_VCC_PSPLL3_CH;
+ break;
+ case AMS_VCCINT:
+ channel_num = AMS_VCCINT_CH;
+ break;
+ case AMS_VCCBRAM:
+ channel_num = AMS_VCCBRAM_CH;
+ break;
+ case AMS_VCCAUX:
+ channel_num = AMS_VCCAUX_CH;
+ break;
+ case AMS_PSDDRPLL:
+ channel_num = AMS_PSDDRPLL_CH;
+ break;
+ case AMS_PSINTFPDDR:
+ channel_num = AMS_PSINTFPDDR_CH;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* set single channel, sequencer off mode */
+ ams_ps_update_reg(ams, AMS_REG_CONFIG1, AMS_CONF1_SEQ_MASK,
+ AMS_CONF1_SEQ_SINGLE_CHANNEL);
+
+ /* write the channel number */
+ ams_ps_update_reg(ams, AMS_REG_CONFIG0, AMS_CONF0_CHANNEL_NUM_MASK,
+ channel_num);
+
+ return 0;
+}
+
+static int ams_read_vcc_reg(struct ams *ams, unsigned int offset, u32 *data)
+{
+ u32 expect = AMS_ISR1_EOC_MASK;
+ u32 reg;
+ int ret;
+
+ ret = ams_enable_single_channel(ams, offset);
+ if (ret)
+ return ret;
+
+ ret = readl_poll_timeout(ams->base + AMS_ISR_1, reg, (reg & expect),
+ AMS_INIT_POLL_TIME_US, AMS_INIT_TIMEOUT_US);
+ if (ret)
+ return ret;
+
+ *data = readl(ams->base + offset);
+
+ return 0;
+}
+
+static int ams_get_ps_scale(int address)
+{
+ int val;
+
+ switch (address) {
+ case AMS_SUPPLY1:
+ case AMS_SUPPLY2:
+ case AMS_SUPPLY3:
+ case AMS_SUPPLY4:
+ case AMS_SUPPLY9:
+ case AMS_SUPPLY10:
+ case AMS_VCCAMS:
+ val = AMS_SUPPLY_SCALE_3VOLT_mV;
+ break;
+ case AMS_SUPPLY5:
+ case AMS_SUPPLY6:
+ case AMS_SUPPLY7:
+ case AMS_SUPPLY8:
+ val = AMS_SUPPLY_SCALE_6VOLT_mV;
+ break;
+ default:
+ val = AMS_SUPPLY_SCALE_1VOLT_mV;
+ break;
+ }
+
+ return val;
+}
+
+static int ams_get_pl_scale(struct ams *ams, int address)
+{
+ int val, regval;
+
+ switch (address) {
+ case AMS_SUPPLY1:
+ case AMS_SUPPLY2:
+ case AMS_SUPPLY3:
+ case AMS_SUPPLY4:
+ case AMS_SUPPLY5:
+ case AMS_SUPPLY6:
+ case AMS_VCCAMS:
+ case AMS_VREFP:
+ case AMS_VREFN:
+ val = AMS_SUPPLY_SCALE_3VOLT_mV;
+ break;
+ case AMS_SUPPLY7:
+ regval = readl(ams->pl_base + AMS_REG_CONFIG4);
+ if (FIELD_GET(AMS_VUSER0_MASK, regval))
+ val = AMS_SUPPLY_SCALE_6VOLT_mV;
+ else
+ val = AMS_SUPPLY_SCALE_3VOLT_mV;
+ break;
+ case AMS_SUPPLY8:
+ regval = readl(ams->pl_base + AMS_REG_CONFIG4);
+ if (FIELD_GET(AMS_VUSER1_MASK, regval))
+ val = AMS_SUPPLY_SCALE_6VOLT_mV;
+ else
+ val = AMS_SUPPLY_SCALE_3VOLT_mV;
+ break;
+ case AMS_SUPPLY9:
+ regval = readl(ams->pl_base + AMS_REG_CONFIG4);
+ if (FIELD_GET(AMS_VUSER2_MASK, regval))
+ val = AMS_SUPPLY_SCALE_6VOLT_mV;
+ else
+ val = AMS_SUPPLY_SCALE_3VOLT_mV;
+ break;
+ case AMS_SUPPLY10:
+ regval = readl(ams->pl_base + AMS_REG_CONFIG4);
+ if (FIELD_GET(AMS_VUSER3_MASK, regval))
+ val = AMS_SUPPLY_SCALE_6VOLT_mV;
+ else
+ val = AMS_SUPPLY_SCALE_3VOLT_mV;
+ break;
+ case AMS_VP_VN:
+ case AMS_REG_VAUX(0) ... AMS_REG_VAUX(15):
+ val = AMS_SUPPLY_SCALE_1VOLT_mV;
+ break;
+ default:
+ val = AMS_SUPPLY_SCALE_1VOLT_mV;
+ break;
+ }
+
+ return val;
+}
+
+static int ams_get_ctrl_scale(int address)
+{
+ int val;
+
+ switch (address) {
+ case AMS_VCC_PSPLL0:
+ case AMS_VCC_PSPLL3:
+ case AMS_VCCINT:
+ case AMS_VCCBRAM:
+ case AMS_VCCAUX:
+ case AMS_PSDDRPLL:
+ case AMS_PSINTFPDDR:
+ val = AMS_SUPPLY_SCALE_3VOLT_mV;
+ break;
+ default:
+ val = AMS_SUPPLY_SCALE_1VOLT_mV;
+ break;
+ }
+
+ return val;
+}
+
+static int ams_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct ams *ams = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ mutex_lock(&ams->lock);
+ if (chan->scan_index >= AMS_CTRL_SEQ_BASE) {
+ ret = ams_read_vcc_reg(ams, chan->address, val);
+ if (ret)
+ goto unlock_mutex;
+ ams_enable_channel_sequence(indio_dev);
+ } else if (chan->scan_index >= AMS_PS_SEQ_MAX)
+ *val = readl(ams->pl_base + chan->address);
+ else
+ *val = readl(ams->ps_base + chan->address);
+
+ ret = IIO_VAL_INT;
+unlock_mutex:
+ mutex_unlock(&ams->lock);
+ return ret;
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ if (chan->scan_index < AMS_PS_SEQ_MAX)
+ *val = ams_get_ps_scale(chan->address);
+ else if (chan->scan_index >= AMS_PS_SEQ_MAX &&
+ chan->scan_index < AMS_CTRL_SEQ_BASE)
+ *val = ams_get_pl_scale(ams, chan->address);
+ else
+ *val = ams_get_ctrl_scale(chan->address);
+
+ *val2 = AMS_SUPPLY_SCALE_DIV_BIT;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ case IIO_TEMP:
+ *val = AMS_TEMP_SCALE;
+ *val2 = AMS_TEMP_SCALE_DIV_BIT;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_OFFSET:
+ /* Only the temperature channel has an offset */
+ *val = AMS_TEMP_OFFSET;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ams_get_alarm_offset(int scan_index, enum iio_event_direction dir)
+{
+ int offset;
+
+ if (scan_index >= AMS_PS_SEQ_MAX)
+ scan_index -= AMS_PS_SEQ_MAX;
+
+ if (dir == IIO_EV_DIR_FALLING) {
+ if (scan_index < AMS_SEQ_SUPPLY7)
+ offset = AMS_ALARM_THRESHOLD_OFF_10;
+ else
+ offset = AMS_ALARM_THRESHOLD_OFF_20;
+ } else {
+ offset = 0;
+ }
+
+ switch (scan_index) {
+ case AMS_SEQ_TEMP:
+ return AMS_ALARM_TEMP + offset;
+ case AMS_SEQ_SUPPLY1:
+ return AMS_ALARM_SUPPLY1 + offset;
+ case AMS_SEQ_SUPPLY2:
+ return AMS_ALARM_SUPPLY2 + offset;
+ case AMS_SEQ_SUPPLY3:
+ return AMS_ALARM_SUPPLY3 + offset;
+ case AMS_SEQ_SUPPLY4:
+ return AMS_ALARM_SUPPLY4 + offset;
+ case AMS_SEQ_SUPPLY5:
+ return AMS_ALARM_SUPPLY5 + offset;
+ case AMS_SEQ_SUPPLY6:
+ return AMS_ALARM_SUPPLY6 + offset;
+ case AMS_SEQ_SUPPLY7:
+ return AMS_ALARM_SUPPLY7 + offset;
+ case AMS_SEQ_SUPPLY8:
+ return AMS_ALARM_SUPPLY8 + offset;
+ case AMS_SEQ_SUPPLY9:
+ return AMS_ALARM_SUPPLY9 + offset;
+ case AMS_SEQ_SUPPLY10:
+ return AMS_ALARM_SUPPLY10 + offset;
+ case AMS_SEQ_VCCAMS:
+ return AMS_ALARM_VCCAMS + offset;
+ case AMS_SEQ_TEMP_REMOTE:
+ return AMS_ALARM_TEMP_REMOTE + offset;
+ default:
+ return 0;
+ }
+}
+
+static const struct iio_chan_spec *ams_event_to_channel(struct iio_dev *dev,
+ u32 event)
+{
+ int scan_index = 0, i;
+
+ if (event >= AMS_PL_ALARM_START) {
+ event -= AMS_PL_ALARM_START;
+ scan_index = AMS_PS_SEQ_MAX;
+ }
+
+ switch (event) {
+ case AMS_ALARM_BIT_TEMP:
+ scan_index += AMS_SEQ_TEMP;
+ break;
+ case AMS_ALARM_BIT_SUPPLY1:
+ scan_index += AMS_SEQ_SUPPLY1;
+ break;
+ case AMS_ALARM_BIT_SUPPLY2:
+ scan_index += AMS_SEQ_SUPPLY2;
+ break;
+ case AMS_ALARM_BIT_SUPPLY3:
+ scan_index += AMS_SEQ_SUPPLY3;
+ break;
+ case AMS_ALARM_BIT_SUPPLY4:
+ scan_index += AMS_SEQ_SUPPLY4;
+ break;
+ case AMS_ALARM_BIT_SUPPLY5:
+ scan_index += AMS_SEQ_SUPPLY5;
+ break;
+ case AMS_ALARM_BIT_SUPPLY6:
+ scan_index += AMS_SEQ_SUPPLY6;
+ break;
+ case AMS_ALARM_BIT_SUPPLY7:
+ scan_index += AMS_SEQ_SUPPLY7;
+ break;
+ case AMS_ALARM_BIT_SUPPLY8:
+ scan_index += AMS_SEQ_SUPPLY8;
+ break;
+ case AMS_ALARM_BIT_SUPPLY9:
+ scan_index += AMS_SEQ_SUPPLY9;
+ break;
+ case AMS_ALARM_BIT_SUPPLY10:
+ scan_index += AMS_SEQ_SUPPLY10;
+ break;
+ case AMS_ALARM_BIT_VCCAMS:
+ scan_index += AMS_SEQ_VCCAMS;
+ break;
+ case AMS_ALARM_BIT_TEMP_REMOTE:
+ scan_index += AMS_SEQ_TEMP_REMOTE;
+ break;
+ default:
+ break;
+ }
+
+ for (i = 0; i < dev->num_channels; i++)
+ if (dev->channels[i].scan_index == scan_index)
+ break;
+
+ return &dev->channels[i];
+}
+
+static int ams_get_alarm_mask(int scan_index)
+{
+ int bit = 0;
+
+ if (scan_index >= AMS_PS_SEQ_MAX) {
+ bit = AMS_PL_ALARM_START;
+ scan_index -= AMS_PS_SEQ_MAX;
+ }
+
+ switch (scan_index) {
+ case AMS_SEQ_TEMP:
+ return BIT(AMS_ALARM_BIT_TEMP + bit);
+ case AMS_SEQ_SUPPLY1:
+ return BIT(AMS_ALARM_BIT_SUPPLY1 + bit);
+ case AMS_SEQ_SUPPLY2:
+ return BIT(AMS_ALARM_BIT_SUPPLY2 + bit);
+ case AMS_SEQ_SUPPLY3:
+ return BIT(AMS_ALARM_BIT_SUPPLY3 + bit);
+ case AMS_SEQ_SUPPLY4:
+ return BIT(AMS_ALARM_BIT_SUPPLY4 + bit);
+ case AMS_SEQ_SUPPLY5:
+ return BIT(AMS_ALARM_BIT_SUPPLY5 + bit);
+ case AMS_SEQ_SUPPLY6:
+ return BIT(AMS_ALARM_BIT_SUPPLY6 + bit);
+ case AMS_SEQ_SUPPLY7:
+ return BIT(AMS_ALARM_BIT_SUPPLY7 + bit);
+ case AMS_SEQ_SUPPLY8:
+ return BIT(AMS_ALARM_BIT_SUPPLY8 + bit);
+ case AMS_SEQ_SUPPLY9:
+ return BIT(AMS_ALARM_BIT_SUPPLY9 + bit);
+ case AMS_SEQ_SUPPLY10:
+ return BIT(AMS_ALARM_BIT_SUPPLY10 + bit);
+ case AMS_SEQ_VCCAMS:
+ return BIT(AMS_ALARM_BIT_VCCAMS + bit);
+ case AMS_SEQ_TEMP_REMOTE:
+ return BIT(AMS_ALARM_BIT_TEMP_REMOTE + bit);
+ default:
+ return 0;
+ }
+}
+
+static int ams_read_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir)
+{
+ struct ams *ams = iio_priv(indio_dev);
+
+ return !!(ams->alarm_mask & ams_get_alarm_mask(chan->scan_index));
+}
+
+static int ams_write_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ int state)
+{
+ struct ams *ams = iio_priv(indio_dev);
+ unsigned int alarm;
+
+ alarm = ams_get_alarm_mask(chan->scan_index);
+
+ mutex_lock(&ams->lock);
+
+ if (state)
+ ams->alarm_mask |= alarm;
+ else
+ ams->alarm_mask &= ~alarm;
+
+ ams_update_alarm(ams, ams->alarm_mask);
+
+ mutex_unlock(&ams->lock);
+
+ return 0;
+}
+
+static int ams_read_event_value(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info, int *val, int *val2)
+{
+ struct ams *ams = iio_priv(indio_dev);
+ unsigned int offset = ams_get_alarm_offset(chan->scan_index, dir);
+
+ mutex_lock(&ams->lock);
+
+ if (chan->scan_index >= AMS_PS_SEQ_MAX)
+ *val = readl(ams->pl_base + offset);
+ else
+ *val = readl(ams->ps_base + offset);
+
+ mutex_unlock(&ams->lock);
+
+ return IIO_VAL_INT;
+}
+
+static int ams_write_event_value(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info, int val, int val2)
+{
+ struct ams *ams = iio_priv(indio_dev);
+ unsigned int offset;
+
+ mutex_lock(&ams->lock);
+
+ /* Set temperature channel threshold to direct threshold */
+ if (chan->type == IIO_TEMP) {
+ offset = ams_get_alarm_offset(chan->scan_index, IIO_EV_DIR_FALLING);
+
+ if (chan->scan_index >= AMS_PS_SEQ_MAX)
+ ams_pl_update_reg(ams, offset,
+ AMS_ALARM_THR_DIRECT_MASK,
+ AMS_ALARM_THR_DIRECT_MASK);
+ else
+ ams_ps_update_reg(ams, offset,
+ AMS_ALARM_THR_DIRECT_MASK,
+ AMS_ALARM_THR_DIRECT_MASK);
+ }
+
+ offset = ams_get_alarm_offset(chan->scan_index, dir);
+ if (chan->scan_index >= AMS_PS_SEQ_MAX)
+ writel(val, ams->pl_base + offset);
+ else
+ writel(val, ams->ps_base + offset);
+
+ mutex_unlock(&ams->lock);
+
+ return 0;
+}
+
+static void ams_handle_event(struct iio_dev *indio_dev, u32 event)
+{
+ const struct iio_chan_spec *chan;
+
+ chan = ams_event_to_channel(indio_dev, event);
+
+ if (chan->type == IIO_TEMP) {
+ /*
+ * The temperature channel only supports over-temperature
+ * events.
+ */
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(chan->type, chan->channel,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_RISING),
+ iio_get_time_ns(indio_dev));
+ } else {
+ /*
+ * For other channels we don't know whether it is a upper or
+ * lower threshold event. Userspace will have to check the
+ * channel value if it wants to know.
+ */
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(chan->type, chan->channel,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_EITHER),
+ iio_get_time_ns(indio_dev));
+ }
+}
+
+static void ams_handle_events(struct iio_dev *indio_dev, unsigned long events)
+{
+ unsigned int bit;
+
+ for_each_set_bit(bit, &events, AMS_NO_OF_ALARMS)
+ ams_handle_event(indio_dev, bit);
+}
+
+/**
+ * ams_unmask_worker - ams alarm interrupt unmask worker
+ * @work: work to be done
+ *
+ * The ZynqMP threshold interrupts are level sensitive. Since we can't make the
+ * threshold condition go way from within the interrupt handler, this means as
+ * soon as a threshold condition is present we would enter the interrupt handler
+ * again and again. To work around this we mask all active threshold interrupts
+ * in the interrupt handler and start a timer. In this timer we poll the
+ * interrupt status and only if the interrupt is inactive we unmask it again.
+ */
+static void ams_unmask_worker(struct work_struct *work)
+{
+ struct ams *ams = container_of(work, struct ams, ams_unmask_work.work);
+ unsigned int status, unmask;
+
+ spin_lock_irq(&ams->intr_lock);
+
+ status = readl(ams->base + AMS_ISR_0);
+
+ /* Clear those bits which are not active anymore */
+ unmask = (ams->current_masked_alarm ^ status) & ams->current_masked_alarm;
+
+ /* Clear status of disabled alarm */
+ unmask |= ams->intr_mask;
+
+ ams->current_masked_alarm &= status;
+
+ /* Also clear those which are masked out anyway */
+ ams->current_masked_alarm &= ~ams->intr_mask;
+
+ /* Clear the interrupts before we unmask them */
+ writel(unmask, ams->base + AMS_ISR_0);
+
+ ams_update_intrmask(ams, ~AMS_ALARM_MASK, ~AMS_ALARM_MASK);
+
+ spin_unlock_irq(&ams->intr_lock);
+
+ /* If still pending some alarm re-trigger the timer */
+ if (ams->current_masked_alarm)
+ schedule_delayed_work(&ams->ams_unmask_work,
+ msecs_to_jiffies(AMS_UNMASK_TIMEOUT_MS));
+}
+
+static irqreturn_t ams_irq(int irq, void *data)
+{
+ struct iio_dev *indio_dev = data;
+ struct ams *ams = iio_priv(indio_dev);
+ u32 isr0;
+
+ spin_lock(&ams->intr_lock);
+
+ isr0 = readl(ams->base + AMS_ISR_0);
+
+ /* Only process alarms that are not masked */
+ isr0 &= ~((ams->intr_mask & AMS_ISR0_ALARM_MASK) | ams->current_masked_alarm);
+ if (!isr0) {
+ spin_unlock(&ams->intr_lock);
+ return IRQ_NONE;
+ }
+
+ /* Clear interrupt */
+ writel(isr0, ams->base + AMS_ISR_0);
+
+ /* Mask the alarm interrupts until cleared */
+ ams->current_masked_alarm |= isr0;
+ ams_update_intrmask(ams, ~AMS_ALARM_MASK, ~AMS_ALARM_MASK);
+
+ ams_handle_events(indio_dev, isr0);
+
+ schedule_delayed_work(&ams->ams_unmask_work,
+ msecs_to_jiffies(AMS_UNMASK_TIMEOUT_MS));
+
+ spin_unlock(&ams->intr_lock);
+
+ return IRQ_HANDLED;
+}
+
+static const struct iio_event_spec ams_temp_events[] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_separate = BIT(IIO_EV_INFO_ENABLE) | BIT(IIO_EV_INFO_VALUE),
+ },
+};
+
+static const struct iio_event_spec ams_voltage_events[] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE),
+ },
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_FALLING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE),
+ },
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_EITHER,
+ .mask_separate = BIT(IIO_EV_INFO_ENABLE),
+ },
+};
+
+static const struct iio_chan_spec ams_ps_channels[] = {
+ AMS_PS_CHAN_TEMP(AMS_SEQ_TEMP, AMS_TEMP),
+ AMS_PS_CHAN_TEMP(AMS_SEQ_TEMP_REMOTE, AMS_TEMP_REMOTE),
+ AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY1, AMS_SUPPLY1),
+ AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY2, AMS_SUPPLY2),
+ AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY3, AMS_SUPPLY3),
+ AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY4, AMS_SUPPLY4),
+ AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY5, AMS_SUPPLY5),
+ AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY6, AMS_SUPPLY6),
+ AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY7, AMS_SUPPLY7),
+ AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY8, AMS_SUPPLY8),
+ AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY9, AMS_SUPPLY9),
+ AMS_PS_CHAN_VOLTAGE(AMS_SEQ_SUPPLY10, AMS_SUPPLY10),
+ AMS_PS_CHAN_VOLTAGE(AMS_SEQ_VCCAMS, AMS_VCCAMS),
+};
+
+static const struct iio_chan_spec ams_pl_channels[] = {
+ AMS_PL_CHAN_TEMP(AMS_SEQ_TEMP, AMS_TEMP),
+ AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY1, AMS_SUPPLY1, true),
+ AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY2, AMS_SUPPLY2, true),
+ AMS_PL_CHAN_VOLTAGE(AMS_SEQ_VREFP, AMS_VREFP, false),
+ AMS_PL_CHAN_VOLTAGE(AMS_SEQ_VREFN, AMS_VREFN, false),
+ AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY3, AMS_SUPPLY3, true),
+ AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY4, AMS_SUPPLY4, true),
+ AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY5, AMS_SUPPLY5, true),
+ AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY6, AMS_SUPPLY6, true),
+ AMS_PL_CHAN_VOLTAGE(AMS_SEQ_VCCAMS, AMS_VCCAMS, true),
+ AMS_PL_CHAN_VOLTAGE(AMS_SEQ_VP_VN, AMS_VP_VN, false),
+ AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY7, AMS_SUPPLY7, true),
+ AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY8, AMS_SUPPLY8, true),
+ AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY9, AMS_SUPPLY9, true),
+ AMS_PL_CHAN_VOLTAGE(AMS_SEQ_SUPPLY10, AMS_SUPPLY10, true),
+ AMS_PL_AUX_CHAN_VOLTAGE(0),
+ AMS_PL_AUX_CHAN_VOLTAGE(1),
+ AMS_PL_AUX_CHAN_VOLTAGE(2),
+ AMS_PL_AUX_CHAN_VOLTAGE(3),
+ AMS_PL_AUX_CHAN_VOLTAGE(4),
+ AMS_PL_AUX_CHAN_VOLTAGE(5),
+ AMS_PL_AUX_CHAN_VOLTAGE(6),
+ AMS_PL_AUX_CHAN_VOLTAGE(7),
+ AMS_PL_AUX_CHAN_VOLTAGE(8),
+ AMS_PL_AUX_CHAN_VOLTAGE(9),
+ AMS_PL_AUX_CHAN_VOLTAGE(10),
+ AMS_PL_AUX_CHAN_VOLTAGE(11),
+ AMS_PL_AUX_CHAN_VOLTAGE(12),
+ AMS_PL_AUX_CHAN_VOLTAGE(13),
+ AMS_PL_AUX_CHAN_VOLTAGE(14),
+ AMS_PL_AUX_CHAN_VOLTAGE(15),
+};
+
+static const struct iio_chan_spec ams_ctrl_channels[] = {
+ AMS_CTRL_CHAN_VOLTAGE(AMS_SEQ_VCC_PSPLL, AMS_VCC_PSPLL0),
+ AMS_CTRL_CHAN_VOLTAGE(AMS_SEQ_VCC_PSBATT, AMS_VCC_PSPLL3),
+ AMS_CTRL_CHAN_VOLTAGE(AMS_SEQ_VCCINT, AMS_VCCINT),
+ AMS_CTRL_CHAN_VOLTAGE(AMS_SEQ_VCCBRAM, AMS_VCCBRAM),
+ AMS_CTRL_CHAN_VOLTAGE(AMS_SEQ_VCCAUX, AMS_VCCAUX),
+ AMS_CTRL_CHAN_VOLTAGE(AMS_SEQ_PSDDRPLL, AMS_PSDDRPLL),
+ AMS_CTRL_CHAN_VOLTAGE(AMS_SEQ_INTDDR, AMS_PSINTFPDDR),
+};
+
+static int ams_get_ext_chan(struct fwnode_handle *chan_node,
+ struct iio_chan_spec *channels, int num_channels)
+{
+ struct iio_chan_spec *chan;
+ struct fwnode_handle *child;
+ unsigned int reg, ext_chan;
+ int ret;
+
+ fwnode_for_each_child_node(chan_node, child) {
+ ret = fwnode_property_read_u32(child, "reg", &reg);
+ if (ret || reg > AMS_PL_MAX_EXT_CHANNEL + 30)
+ continue;
+
+ chan = &channels[num_channels];
+ ext_chan = reg + AMS_PL_MAX_FIXED_CHANNEL - 30;
+ memcpy(chan, &ams_pl_channels[ext_chan], sizeof(*channels));
+
+ if (fwnode_property_read_bool(child, "xlnx,bipolar"))
+ chan->scan_type.sign = 's';
+
+ num_channels++;
+ }
+
+ return num_channels;
+}
+
+static void ams_iounmap_ps(void *data)
+{
+ struct ams *ams = data;
+
+ iounmap(ams->ps_base);
+}
+
+static void ams_iounmap_pl(void *data)
+{
+ struct ams *ams = data;
+
+ iounmap(ams->pl_base);
+}
+
+static int ams_init_module(struct iio_dev *indio_dev,
+ struct fwnode_handle *fwnode,
+ struct iio_chan_spec *channels)
+{
+ struct device *dev = indio_dev->dev.parent;
+ struct ams *ams = iio_priv(indio_dev);
+ int num_channels = 0;
+ int ret;
+
+ if (fwnode_property_match_string(fwnode, "compatible",
+ "xlnx,zynqmp-ams-ps") == 0) {
+ ams->ps_base = fwnode_iomap(fwnode, 0);
+ if (!ams->ps_base)
+ return -ENXIO;
+ ret = devm_add_action_or_reset(dev, ams_iounmap_ps, ams);
+ if (ret < 0)
+ return ret;
+
+ /* add PS channels to iio device channels */
+ memcpy(channels, ams_ps_channels, sizeof(ams_ps_channels));
+ } else if (fwnode_property_match_string(fwnode, "compatible",
+ "xlnx,zynqmp-ams-pl") == 0) {
+ ams->pl_base = fwnode_iomap(fwnode, 0);
+ if (!ams->pl_base)
+ return -ENXIO;
+
+ ret = devm_add_action_or_reset(dev, ams_iounmap_pl, ams);
+ if (ret < 0)
+ return ret;
+
+ /* Copy only first 10 fix channels */
+ memcpy(channels, ams_pl_channels, AMS_PL_MAX_FIXED_CHANNEL * sizeof(*channels));
+ num_channels += AMS_PL_MAX_FIXED_CHANNEL;
+ num_channels = ams_get_ext_chan(fwnode, channels,
+ num_channels);
+ } else if (fwnode_property_match_string(fwnode, "compatible",
+ "xlnx,zynqmp-ams") == 0) {
+ /* add AMS channels to iio device channels */
+ memcpy(channels, ams_ctrl_channels, sizeof(ams_ctrl_channels));
+ num_channels += ARRAY_SIZE(ams_ctrl_channels);
+ } else {
+ return -EINVAL;
+ }
+
+ return num_channels;
+}
+
+static int ams_parse_firmware(struct iio_dev *indio_dev)
+{
+ struct ams *ams = iio_priv(indio_dev);
+ struct iio_chan_spec *ams_channels, *dev_channels;
+ struct device *dev = indio_dev->dev.parent;
+ struct fwnode_handle *child = NULL;
+ struct fwnode_handle *fwnode = dev_fwnode(dev);
+ size_t ams_size, dev_size;
+ int ret, ch_cnt = 0, i, rising_off, falling_off;
+ unsigned int num_channels = 0;
+
+ ams_size = ARRAY_SIZE(ams_ps_channels) + ARRAY_SIZE(ams_pl_channels) +
+ ARRAY_SIZE(ams_ctrl_channels);
+
+ /* Initialize buffer for channel specification */
+ ams_channels = devm_kcalloc(dev, ams_size, sizeof(*ams_channels), GFP_KERNEL);
+ if (!ams_channels)
+ return -ENOMEM;
+
+ if (fwnode_device_is_available(fwnode)) {
+ ret = ams_init_module(indio_dev, fwnode, ams_channels);
+ if (ret < 0)
+ return ret;
+
+ num_channels += ret;
+ }
+
+ fwnode_for_each_child_node(fwnode, child) {
+ if (fwnode_device_is_available(child)) {
+ ret = ams_init_module(indio_dev, child, ams_channels + num_channels);
+ if (ret < 0) {
+ fwnode_handle_put(child);
+ return ret;
+ }
+
+ num_channels += ret;
+ }
+ }
+
+ for (i = 0; i < num_channels; i++) {
+ ams_channels[i].channel = ch_cnt++;
+
+ if (ams_channels[i].scan_index < AMS_CTRL_SEQ_BASE) {
+ /* set threshold to max and min for each channel */
+ falling_off =
+ ams_get_alarm_offset(ams_channels[i].scan_index,
+ IIO_EV_DIR_FALLING);
+ rising_off =
+ ams_get_alarm_offset(ams_channels[i].scan_index,
+ IIO_EV_DIR_RISING);
+ if (ams_channels[i].scan_index >= AMS_PS_SEQ_MAX) {
+ writel(AMS_ALARM_THR_MIN,
+ ams->pl_base + falling_off);
+ writel(AMS_ALARM_THR_MAX,
+ ams->pl_base + rising_off);
+ } else {
+ writel(AMS_ALARM_THR_MIN,
+ ams->ps_base + falling_off);
+ writel(AMS_ALARM_THR_MAX,
+ ams->ps_base + rising_off);
+ }
+ }
+ }
+
+ dev_size = array_size(sizeof(*dev_channels), num_channels);
+ if (dev_size == SIZE_MAX)
+ return -ENOMEM;
+
+ dev_channels = devm_krealloc(dev, ams_channels, dev_size, GFP_KERNEL);
+ if (!dev_channels)
+ ret = -ENOMEM;
+
+ indio_dev->channels = dev_channels;
+ indio_dev->num_channels = num_channels;
+
+ return 0;
+}
+
+static const struct iio_info iio_ams_info = {
+ .read_raw = &ams_read_raw,
+ .read_event_config = &ams_read_event_config,
+ .write_event_config = &ams_write_event_config,
+ .read_event_value = &ams_read_event_value,
+ .write_event_value = &ams_write_event_value,
+};
+
+static const struct of_device_id ams_of_match_table[] = {
+ { .compatible = "xlnx,zynqmp-ams" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ams_of_match_table);
+
+static void ams_clk_disable_unprepare(void *data)
+{
+ clk_disable_unprepare(data);
+}
+
+static void ams_cancel_delayed_work(void *data)
+{
+ cancel_delayed_work(data);
+}
+
+static int ams_probe(struct platform_device *pdev)
+{
+ struct iio_dev *indio_dev;
+ struct ams *ams;
+ int ret;
+ int irq;
+
+ indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*ams));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ ams = iio_priv(indio_dev);
+ mutex_init(&ams->lock);
+ spin_lock_init(&ams->intr_lock);
+
+ indio_dev->name = "xilinx-ams";
+
+ indio_dev->info = &iio_ams_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ams->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(ams->base))
+ return PTR_ERR(ams->base);
+
+ ams->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(ams->clk))
+ return PTR_ERR(ams->clk);
+
+ ret = clk_prepare_enable(ams->clk);
+ if (ret < 0)
+ return ret;
+
+ ret = devm_add_action_or_reset(&pdev->dev, ams_clk_disable_unprepare, ams->clk);
+ if (ret < 0)
+ return ret;
+
+ INIT_DELAYED_WORK(&ams->ams_unmask_work, ams_unmask_worker);
+ ret = devm_add_action_or_reset(&pdev->dev, ams_cancel_delayed_work,
+ &ams->ams_unmask_work);
+ if (ret < 0)
+ return ret;
+
+ ret = ams_parse_firmware(indio_dev);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "failure in parsing DT\n");
+
+ ret = ams_init_device(ams);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret, "failed to initialize AMS\n");
+
+ ams_enable_channel_sequence(indio_dev);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return ret;
+
+ ret = devm_request_irq(&pdev->dev, irq, &ams_irq, 0, "ams-irq",
+ indio_dev);
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret, "failed to register interrupt\n");
+
+ platform_set_drvdata(pdev, indio_dev);
+
+ return devm_iio_device_register(&pdev->dev, indio_dev);
+}
+
+static int __maybe_unused ams_suspend(struct device *dev)
+{
+ struct ams *ams = iio_priv(dev_get_drvdata(dev));
+
+ clk_disable_unprepare(ams->clk);
+
+ return 0;
+}
+
+static int __maybe_unused ams_resume(struct device *dev)
+{
+ struct ams *ams = iio_priv(dev_get_drvdata(dev));
+
+ return clk_prepare_enable(ams->clk);
+}
+
+static SIMPLE_DEV_PM_OPS(ams_pm_ops, ams_suspend, ams_resume);
+
+static struct platform_driver ams_driver = {
+ .probe = ams_probe,
+ .driver = {
+ .name = "xilinx-ams",
+ .pm = &ams_pm_ops,
+ .of_match_table = ams_of_match_table,
+ },
+};
+module_platform_driver(ams_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Xilinx, Inc.");
diff --git a/drivers/iio/adc/xilinx-xadc-core.c b/drivers/iio/adc/xilinx-xadc-core.c
index 83bea5ef765d..823c8e5f9809 100644
--- a/drivers/iio/adc/xilinx-xadc-core.c
+++ b/drivers/iio/adc/xilinx-xadc-core.c
@@ -107,6 +107,7 @@ static const unsigned int XADC_ZYNQ_UNMASK_TIMEOUT = 500;
#define XADC_AXI_INT_ALARM_MASK 0x3c0f
#define XADC_FLAGS_BUFFERED BIT(0)
+#define XADC_FLAGS_IRQ_OPTIONAL BIT(1)
/*
* The XADC hardware supports a samplerate of up to 1MSPS. Unfortunately it does
@@ -562,7 +563,7 @@ static const struct xadc_ops xadc_7s_axi_ops = {
.get_dclk_rate = xadc_axi_get_dclk,
.update_alarm = xadc_axi_update_alarm,
.interrupt_handler = xadc_axi_interrupt_handler,
- .flags = XADC_FLAGS_BUFFERED,
+ .flags = XADC_FLAGS_BUFFERED | XADC_FLAGS_IRQ_OPTIONAL,
.type = XADC_TYPE_S7,
};
@@ -573,7 +574,7 @@ static const struct xadc_ops xadc_us_axi_ops = {
.get_dclk_rate = xadc_axi_get_dclk,
.update_alarm = xadc_axi_update_alarm,
.interrupt_handler = xadc_axi_interrupt_handler,
- .flags = XADC_FLAGS_BUFFERED,
+ .flags = XADC_FLAGS_BUFFERED | XADC_FLAGS_IRQ_OPTIONAL,
.type = XADC_TYPE_US,
};
@@ -943,7 +944,7 @@ static int xadc_read_raw(struct iio_dev *indio_dev,
*val = 1000;
break;
}
- *val2 = chan->scan_type.realbits;
+ *val2 = bits;
return IIO_VAL_FRACTIONAL_LOG2;
case IIO_TEMP:
/* Temp in C = (val * 503.975) / 2**bits - 273.15 */
@@ -1182,7 +1183,7 @@ static const struct of_device_id xadc_of_match_table[] = {
MODULE_DEVICE_TABLE(of, xadc_of_match_table);
static int xadc_parse_dt(struct iio_dev *indio_dev, struct device_node *np,
- unsigned int *conf)
+ unsigned int *conf, int irq)
{
struct device *dev = indio_dev->dev.parent;
struct xadc *xadc = iio_priv(indio_dev);
@@ -1195,6 +1196,7 @@ static int xadc_parse_dt(struct iio_dev *indio_dev, struct device_node *np,
u32 ext_mux_chan;
u32 reg;
int ret;
+ int i;
*conf = 0;
@@ -1273,6 +1275,14 @@ static int xadc_parse_dt(struct iio_dev *indio_dev, struct device_node *np,
}
of_node_put(chan_node);
+ /* No IRQ => no events */
+ if (irq <= 0) {
+ for (i = 0; i < num_channels; i++) {
+ channels[i].event_spec = NULL;
+ channels[i].num_event_specs = 0;
+ }
+ }
+
indio_dev->num_channels = num_channels;
indio_dev->channels = devm_krealloc(dev, channels,
sizeof(*channels) * num_channels,
@@ -1307,6 +1317,7 @@ static int xadc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
const struct of_device_id *id;
+ const struct xadc_ops *ops;
struct iio_dev *indio_dev;
unsigned int bipolar_mask;
unsigned int conf0;
@@ -1322,9 +1333,12 @@ static int xadc_probe(struct platform_device *pdev)
if (!id)
return -EINVAL;
- irq = platform_get_irq(pdev, 0);
- if (irq <= 0)
- return -ENXIO;
+ ops = id->data;
+
+ irq = platform_get_irq_optional(pdev, 0);
+ if (irq < 0 &&
+ (irq != -ENXIO || !(ops->flags & XADC_FLAGS_IRQ_OPTIONAL)))
+ return irq;
indio_dev = devm_iio_device_alloc(dev, sizeof(*xadc));
if (!indio_dev)
@@ -1345,7 +1359,7 @@ static int xadc_probe(struct platform_device *pdev)
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->info = &xadc_info;
- ret = xadc_parse_dt(indio_dev, dev->of_node, &conf0);
+ ret = xadc_parse_dt(indio_dev, dev->of_node, &conf0, irq);
if (ret)
return ret;
@@ -1357,14 +1371,16 @@ static int xadc_probe(struct platform_device *pdev)
if (ret)
return ret;
- xadc->convst_trigger = xadc_alloc_trigger(indio_dev, "convst");
- if (IS_ERR(xadc->convst_trigger))
- return PTR_ERR(xadc->convst_trigger);
+ if (irq > 0) {
+ xadc->convst_trigger = xadc_alloc_trigger(indio_dev, "convst");
+ if (IS_ERR(xadc->convst_trigger))
+ return PTR_ERR(xadc->convst_trigger);
- xadc->samplerate_trigger = xadc_alloc_trigger(indio_dev,
- "samplerate");
- if (IS_ERR(xadc->samplerate_trigger))
- return PTR_ERR(xadc->samplerate_trigger);
+ xadc->samplerate_trigger = xadc_alloc_trigger(indio_dev,
+ "samplerate");
+ if (IS_ERR(xadc->samplerate_trigger))
+ return PTR_ERR(xadc->samplerate_trigger);
+ }
}
xadc->clk = devm_clk_get(dev, NULL);
@@ -1396,15 +1412,17 @@ static int xadc_probe(struct platform_device *pdev)
}
}
- ret = devm_request_irq(dev, irq, xadc->ops->interrupt_handler, 0,
- dev_name(dev), indio_dev);
- if (ret)
- return ret;
+ if (irq > 0) {
+ ret = devm_request_irq(dev, irq, xadc->ops->interrupt_handler,
+ 0, dev_name(dev), indio_dev);
+ if (ret)
+ return ret;
- ret = devm_add_action_or_reset(dev, xadc_cancel_delayed_work,
- &xadc->zynq_unmask_work);
- if (ret)
- return ret;
+ ret = devm_add_action_or_reset(dev, xadc_cancel_delayed_work,
+ &xadc->zynq_unmask_work);
+ if (ret)
+ return ret;
+ }
ret = xadc->ops->setup(pdev, indio_dev, irq);
if (ret)
diff --git a/drivers/iio/addac/Kconfig b/drivers/iio/addac/Kconfig
new file mode 100644
index 000000000000..138492362f20
--- /dev/null
+++ b/drivers/iio/addac/Kconfig
@@ -0,0 +1,20 @@
+#
+# ADC DAC drivers
+#
+# When adding new entries keep the list in alphabetical order
+
+menu "Analog to digital and digital to analog converters"
+
+config AD74413R
+ tristate "Analog Devices AD74412R/AD74413R driver"
+ depends on GPIOLIB && SPI
+ select REGMAP_SPI
+ select CRC8
+ help
+ Say yes here to build support for Analog Devices AD74412R/AD74413R
+ quad-channel software configurable input/output solution.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ad74413r.
+
+endmenu
diff --git a/drivers/iio/addac/Makefile b/drivers/iio/addac/Makefile
new file mode 100644
index 000000000000..cfd4bbe64ad3
--- /dev/null
+++ b/drivers/iio/addac/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for industrial I/O ADDAC drivers
+#
+
+# When adding new entries keep the list in alphabetical order
+obj-$(CONFIG_AD74413R) += ad74413r.o
diff --git a/drivers/iio/addac/ad74413r.c b/drivers/iio/addac/ad74413r.c
new file mode 100644
index 000000000000..5271073bb74e
--- /dev/null
+++ b/drivers/iio/addac/ad74413r.c
@@ -0,0 +1,1475 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Analog Devices, Inc.
+ * Author: Cosmin Tanislav <cosmin.tanislav@analog.com>
+ */
+
+#include <asm/unaligned.h>
+#include <linux/bitfield.h>
+#include <linux/crc8.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/gpio/driver.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/interrupt.h>
+#include <linux/mod_devicetable.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spi/spi.h>
+
+#include <dt-bindings/iio/addac/adi,ad74413r.h>
+
+#define AD74413R_CRC_POLYNOMIAL 0x7
+DECLARE_CRC8_TABLE(ad74413r_crc8_table);
+
+#define AD74413R_CHANNEL_MAX 4
+
+#define AD74413R_FRAME_SIZE 4
+
+struct ad74413r_chip_info {
+ const char *name;
+ bool hart_support;
+};
+
+struct ad74413r_channel_config {
+ u32 func;
+ bool gpo_comparator;
+ bool initialized;
+};
+
+struct ad74413r_channels {
+ struct iio_chan_spec *channels;
+ unsigned int num_channels;
+};
+
+struct ad74413r_state {
+ struct ad74413r_channel_config channel_configs[AD74413R_CHANNEL_MAX];
+ unsigned int gpo_gpio_offsets[AD74413R_CHANNEL_MAX];
+ unsigned int comp_gpio_offsets[AD74413R_CHANNEL_MAX];
+ struct gpio_chip gpo_gpiochip;
+ struct gpio_chip comp_gpiochip;
+ struct completion adc_data_completion;
+ unsigned int num_gpo_gpios;
+ unsigned int num_comparator_gpios;
+ u32 sense_resistor_ohms;
+
+ /*
+ * Synchronize consecutive operations when doing a one-shot
+ * conversion and when updating the ADC samples SPI message.
+ */
+ struct mutex lock;
+
+ const struct ad74413r_chip_info *chip_info;
+ struct spi_device *spi;
+ struct regulator *refin_reg;
+ struct regmap *regmap;
+ struct device *dev;
+ struct iio_trigger *trig;
+
+ size_t adc_active_channels;
+ struct spi_message adc_samples_msg;
+ struct spi_transfer adc_samples_xfer[AD74413R_CHANNEL_MAX + 1];
+
+ /*
+ * DMA (thus cache coherency maintenance) requires the
+ * transfer buffers to live in their own cache lines.
+ */
+ struct {
+ u8 rx_buf[AD74413R_FRAME_SIZE * AD74413R_CHANNEL_MAX];
+ s64 timestamp;
+ } adc_samples_buf ____cacheline_aligned;
+
+ u8 adc_samples_tx_buf[AD74413R_FRAME_SIZE * AD74413R_CHANNEL_MAX];
+ u8 reg_tx_buf[AD74413R_FRAME_SIZE];
+ u8 reg_rx_buf[AD74413R_FRAME_SIZE];
+};
+
+#define AD74413R_REG_NOP 0x00
+
+#define AD74413R_REG_CH_FUNC_SETUP_X(x) (0x01 + (x))
+#define AD74413R_CH_FUNC_SETUP_MASK GENMASK(3, 0)
+
+#define AD74413R_REG_ADC_CONFIG_X(x) (0x05 + (x))
+#define AD74413R_ADC_CONFIG_RANGE_MASK GENMASK(7, 5)
+#define AD74413R_ADC_CONFIG_REJECTION_MASK GENMASK(4, 3)
+#define AD74413R_ADC_RANGE_10V 0b000
+#define AD74413R_ADC_RANGE_2P5V_EXT_POW 0b001
+#define AD74413R_ADC_RANGE_2P5V_INT_POW 0b010
+#define AD74413R_ADC_RANGE_5V_BI_DIR 0b011
+#define AD74413R_ADC_REJECTION_50_60 0b00
+#define AD74413R_ADC_REJECTION_NONE 0b01
+#define AD74413R_ADC_REJECTION_50_60_HART 0b10
+#define AD74413R_ADC_REJECTION_HART 0b11
+
+#define AD74413R_REG_DIN_CONFIG_X(x) (0x09 + (x))
+#define AD74413R_DIN_DEBOUNCE_MASK GENMASK(4, 0)
+#define AD74413R_DIN_DEBOUNCE_LEN BIT(5)
+
+#define AD74413R_REG_DAC_CODE_X(x) (0x16 + (x))
+#define AD74413R_DAC_CODE_MAX GENMASK(12, 0)
+#define AD74413R_DAC_VOLTAGE_MAX 11000
+
+#define AD74413R_REG_GPO_PAR_DATA 0x0d
+#define AD74413R_REG_GPO_CONFIG_X(x) (0x0e + (x))
+#define AD74413R_GPO_CONFIG_DATA_MASK BIT(3)
+#define AD74413R_GPO_CONFIG_SELECT_MASK GENMASK(2, 0)
+#define AD74413R_GPO_CONFIG_100K_PULL_DOWN 0b000
+#define AD74413R_GPO_CONFIG_LOGIC 0b001
+#define AD74413R_GPO_CONFIG_LOGIC_PARALLEL 0b010
+#define AD74413R_GPO_CONFIG_COMPARATOR 0b011
+#define AD74413R_GPO_CONFIG_HIGH_IMPEDANCE 0b100
+
+#define AD74413R_REG_ADC_CONV_CTRL 0x23
+#define AD74413R_CONV_SEQ_MASK GENMASK(9, 8)
+#define AD74413R_CONV_SEQ_ON 0b00
+#define AD74413R_CONV_SEQ_SINGLE 0b01
+#define AD74413R_CONV_SEQ_CONTINUOUS 0b10
+#define AD74413R_CONV_SEQ_OFF 0b11
+#define AD74413R_CH_EN_MASK(x) BIT(x)
+
+#define AD74413R_REG_DIN_COMP_OUT 0x25
+#define AD74413R_DIN_COMP_OUT_SHIFT_X(x) x
+
+#define AD74413R_REG_ADC_RESULT_X(x) (0x26 + (x))
+#define AD74413R_ADC_RESULT_MAX GENMASK(15, 0)
+
+#define AD74413R_REG_READ_SELECT 0x41
+
+#define AD74413R_REG_CMD_KEY 0x44
+#define AD74413R_CMD_KEY_LDAC 0x953a
+#define AD74413R_CMD_KEY_RESET1 0x15fa
+#define AD74413R_CMD_KEY_RESET2 0xaf51
+
+static const int ad74413r_adc_sampling_rates[] = {
+ 20, 4800,
+};
+
+static const int ad74413r_adc_sampling_rates_hart[] = {
+ 10, 20, 1200, 4800,
+};
+
+static int ad74413r_crc(u8 *buf)
+{
+ return crc8(ad74413r_crc8_table, buf, 3, 0);
+}
+
+static void ad74413r_format_reg_write(u8 reg, u16 val, u8 *buf)
+{
+ buf[0] = reg;
+ put_unaligned_be16(val, &buf[1]);
+ buf[3] = ad74413r_crc(buf);
+}
+
+static int ad74413r_reg_write(void *context, unsigned int reg, unsigned int val)
+{
+ struct ad74413r_state *st = context;
+
+ ad74413r_format_reg_write(reg, val, st->reg_tx_buf);
+
+ return spi_write(st->spi, st->reg_tx_buf, AD74413R_FRAME_SIZE);
+}
+
+static int ad74413r_crc_check(struct ad74413r_state *st, u8 *buf)
+{
+ u8 expected_crc = ad74413r_crc(buf);
+
+ if (buf[3] != expected_crc) {
+ dev_err(st->dev, "Bad CRC %02x for %02x%02x%02x\n",
+ buf[3], buf[0], buf[1], buf[2]);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ad74413r_reg_read(void *context, unsigned int reg, unsigned int *val)
+{
+ struct ad74413r_state *st = context;
+ struct spi_transfer reg_read_xfer[] = {
+ {
+ .tx_buf = st->reg_tx_buf,
+ .len = AD74413R_FRAME_SIZE,
+ .cs_change = 1,
+ },
+ {
+ .rx_buf = st->reg_rx_buf,
+ .len = AD74413R_FRAME_SIZE,
+ },
+ };
+ int ret;
+
+ ad74413r_format_reg_write(AD74413R_REG_READ_SELECT, reg,
+ st->reg_tx_buf);
+
+ ret = spi_sync_transfer(st->spi, reg_read_xfer,
+ ARRAY_SIZE(reg_read_xfer));
+ if (ret)
+ return ret;
+
+ ret = ad74413r_crc_check(st, st->reg_rx_buf);
+ if (ret)
+ return ret;
+
+ *val = get_unaligned_be16(&st->reg_rx_buf[1]);
+
+ return 0;
+}
+
+static const struct regmap_config ad74413r_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 16,
+ .reg_read = ad74413r_reg_read,
+ .reg_write = ad74413r_reg_write,
+};
+
+static int ad74413r_set_gpo_config(struct ad74413r_state *st,
+ unsigned int offset, u8 mode)
+{
+ return regmap_update_bits(st->regmap, AD74413R_REG_GPO_CONFIG_X(offset),
+ AD74413R_GPO_CONFIG_SELECT_MASK, mode);
+}
+
+static const unsigned int ad74413r_debounce_map[AD74413R_DIN_DEBOUNCE_LEN] = {
+ 0, 13, 18, 24, 32, 42, 56, 75,
+ 100, 130, 180, 240, 320, 420, 560, 750,
+ 1000, 1300, 1800, 2400, 3200, 4200, 5600, 7500,
+ 10000, 13000, 18000, 24000, 32000, 42000, 56000, 75000,
+};
+
+static int ad74413r_set_comp_debounce(struct ad74413r_state *st,
+ unsigned int offset,
+ unsigned int debounce)
+{
+ unsigned int val = AD74413R_DIN_DEBOUNCE_LEN - 1;
+ unsigned int i;
+
+ for (i = 0; i < AD74413R_DIN_DEBOUNCE_LEN; i++)
+ if (debounce <= ad74413r_debounce_map[i]) {
+ val = i;
+ break;
+ }
+
+ return regmap_update_bits(st->regmap,
+ AD74413R_REG_DIN_CONFIG_X(offset),
+ AD74413R_DIN_DEBOUNCE_MASK,
+ val);
+}
+
+static void ad74413r_gpio_set(struct gpio_chip *chip,
+ unsigned int offset, int val)
+{
+ struct ad74413r_state *st = gpiochip_get_data(chip);
+ unsigned int real_offset = st->gpo_gpio_offsets[offset];
+ int ret;
+
+ ret = ad74413r_set_gpo_config(st, real_offset,
+ AD74413R_GPO_CONFIG_LOGIC);
+ if (ret)
+ return;
+
+ regmap_update_bits(st->regmap, AD74413R_REG_GPO_CONFIG_X(real_offset),
+ AD74413R_GPO_CONFIG_DATA_MASK,
+ val ? AD74413R_GPO_CONFIG_DATA_MASK : 0);
+}
+
+static void ad74413r_gpio_set_multiple(struct gpio_chip *chip,
+ unsigned long *mask,
+ unsigned long *bits)
+{
+ struct ad74413r_state *st = gpiochip_get_data(chip);
+ unsigned long real_mask = 0;
+ unsigned long real_bits = 0;
+ unsigned int offset = 0;
+ int ret;
+
+ for_each_set_bit_from(offset, mask, AD74413R_CHANNEL_MAX) {
+ unsigned int real_offset = st->gpo_gpio_offsets[offset];
+
+ ret = ad74413r_set_gpo_config(st, real_offset,
+ AD74413R_GPO_CONFIG_LOGIC_PARALLEL);
+ if (ret)
+ return;
+
+ real_mask |= BIT(real_offset);
+ if (*bits & offset)
+ real_bits |= BIT(real_offset);
+ }
+
+ regmap_update_bits(st->regmap, AD74413R_REG_GPO_PAR_DATA,
+ real_mask, real_bits);
+}
+
+static int ad74413r_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+ struct ad74413r_state *st = gpiochip_get_data(chip);
+ unsigned int real_offset = st->comp_gpio_offsets[offset];
+ unsigned int status;
+ int ret;
+
+ ret = regmap_read(st->regmap, AD74413R_REG_DIN_COMP_OUT, &status);
+ if (ret)
+ return ret;
+
+ status &= AD74413R_DIN_COMP_OUT_SHIFT_X(real_offset);
+
+ return status ? 1 : 0;
+}
+
+static int ad74413r_gpio_get_multiple(struct gpio_chip *chip,
+ unsigned long *mask,
+ unsigned long *bits)
+{
+ struct ad74413r_state *st = gpiochip_get_data(chip);
+ unsigned int offset = 0;
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(st->regmap, AD74413R_REG_DIN_COMP_OUT, &val);
+ if (ret)
+ return ret;
+
+ for_each_set_bit_from(offset, mask, AD74413R_CHANNEL_MAX) {
+ unsigned int real_offset = st->comp_gpio_offsets[offset];
+
+ if (val & BIT(real_offset))
+ *bits |= offset;
+ }
+
+ return ret;
+}
+
+static int ad74413r_gpio_get_gpo_direction(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ return GPIO_LINE_DIRECTION_OUT;
+}
+
+static int ad74413r_gpio_get_comp_direction(struct gpio_chip *chip,
+ unsigned int offset)
+{
+ return GPIO_LINE_DIRECTION_IN;
+}
+
+static int ad74413r_gpio_set_gpo_config(struct gpio_chip *chip,
+ unsigned int offset,
+ unsigned long config)
+{
+ struct ad74413r_state *st = gpiochip_get_data(chip);
+ unsigned int real_offset = st->gpo_gpio_offsets[offset];
+
+ switch (pinconf_to_config_param(config)) {
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ return ad74413r_set_gpo_config(st, real_offset,
+ AD74413R_GPO_CONFIG_100K_PULL_DOWN);
+ case PIN_CONFIG_BIAS_HIGH_IMPEDANCE:
+ return ad74413r_set_gpo_config(st, real_offset,
+ AD74413R_GPO_CONFIG_HIGH_IMPEDANCE);
+ default:
+ return -ENOTSUPP;
+ }
+}
+
+static int ad74413r_gpio_set_comp_config(struct gpio_chip *chip,
+ unsigned int offset,
+ unsigned long config)
+{
+ struct ad74413r_state *st = gpiochip_get_data(chip);
+ unsigned int real_offset = st->comp_gpio_offsets[offset];
+
+ switch (pinconf_to_config_param(config)) {
+ case PIN_CONFIG_INPUT_DEBOUNCE:
+ return ad74413r_set_comp_debounce(st, real_offset,
+ pinconf_to_config_argument(config));
+ default:
+ return -ENOTSUPP;
+ }
+}
+
+static int ad74413r_reset(struct ad74413r_state *st)
+{
+ int ret;
+
+ ret = regmap_write(st->regmap, AD74413R_REG_CMD_KEY,
+ AD74413R_CMD_KEY_RESET1);
+ if (ret)
+ return ret;
+
+ return regmap_write(st->regmap, AD74413R_REG_CMD_KEY,
+ AD74413R_CMD_KEY_RESET2);
+}
+
+static int ad74413r_set_channel_dac_code(struct ad74413r_state *st,
+ unsigned int channel, int dac_code)
+{
+ struct reg_sequence reg_seq[2] = {
+ { AD74413R_REG_DAC_CODE_X(channel), dac_code },
+ { AD74413R_REG_CMD_KEY, AD74413R_CMD_KEY_LDAC },
+ };
+
+ return regmap_multi_reg_write(st->regmap, reg_seq, 2);
+}
+
+static int ad74413r_set_channel_function(struct ad74413r_state *st,
+ unsigned int channel, u8 func)
+{
+ return regmap_update_bits(st->regmap,
+ AD74413R_REG_CH_FUNC_SETUP_X(channel),
+ AD74413R_CH_FUNC_SETUP_MASK, func);
+}
+
+static int ad74413r_set_adc_conv_seq(struct ad74413r_state *st,
+ unsigned int status)
+{
+ int ret;
+
+ /*
+ * These bits do not clear when a conversion completes.
+ * To enable a subsequent conversion, repeat the write.
+ */
+ ret = regmap_write_bits(st->regmap, AD74413R_REG_ADC_CONV_CTRL,
+ AD74413R_CONV_SEQ_MASK,
+ FIELD_PREP(AD74413R_CONV_SEQ_MASK, status));
+ if (ret)
+ return ret;
+
+ /*
+ * Wait 100us before starting conversions.
+ */
+ usleep_range(100, 120);
+
+ return 0;
+}
+
+static int ad74413r_set_adc_channel_enable(struct ad74413r_state *st,
+ unsigned int channel,
+ bool status)
+{
+ return regmap_update_bits(st->regmap, AD74413R_REG_ADC_CONV_CTRL,
+ AD74413R_CH_EN_MASK(channel),
+ status ? AD74413R_CH_EN_MASK(channel) : 0);
+}
+
+static int ad74413r_get_adc_range(struct ad74413r_state *st,
+ unsigned int channel,
+ unsigned int *val)
+{
+ int ret;
+
+ ret = regmap_read(st->regmap, AD74413R_REG_ADC_CONFIG_X(channel), val);
+ if (ret)
+ return ret;
+
+ *val = FIELD_GET(AD74413R_ADC_CONFIG_RANGE_MASK, *val);
+
+ return 0;
+}
+
+static int ad74413r_get_adc_rejection(struct ad74413r_state *st,
+ unsigned int channel,
+ unsigned int *val)
+{
+ int ret;
+
+ ret = regmap_read(st->regmap, AD74413R_REG_ADC_CONFIG_X(channel), val);
+ if (ret)
+ return ret;
+
+ *val = FIELD_GET(AD74413R_ADC_CONFIG_REJECTION_MASK, *val);
+
+ return 0;
+}
+
+static int ad74413r_set_adc_rejection(struct ad74413r_state *st,
+ unsigned int channel,
+ unsigned int val)
+{
+ return regmap_update_bits(st->regmap,
+ AD74413R_REG_ADC_CONFIG_X(channel),
+ AD74413R_ADC_CONFIG_REJECTION_MASK,
+ FIELD_PREP(AD74413R_ADC_CONFIG_REJECTION_MASK,
+ val));
+}
+
+static int ad74413r_rejection_to_rate(struct ad74413r_state *st,
+ unsigned int rej, int *val)
+{
+ switch (rej) {
+ case AD74413R_ADC_REJECTION_50_60:
+ *val = 20;
+ return 0;
+ case AD74413R_ADC_REJECTION_NONE:
+ *val = 4800;
+ return 0;
+ case AD74413R_ADC_REJECTION_50_60_HART:
+ *val = 10;
+ return 0;
+ case AD74413R_ADC_REJECTION_HART:
+ *val = 1200;
+ return 0;
+ default:
+ dev_err(st->dev, "ADC rejection invalid\n");
+ return -EINVAL;
+ }
+}
+
+static int ad74413r_rate_to_rejection(struct ad74413r_state *st,
+ int rate, unsigned int *val)
+{
+ switch (rate) {
+ case 20:
+ *val = AD74413R_ADC_REJECTION_50_60;
+ return 0;
+ case 4800:
+ *val = AD74413R_ADC_REJECTION_NONE;
+ return 0;
+ case 10:
+ *val = AD74413R_ADC_REJECTION_50_60_HART;
+ return 0;
+ case 1200:
+ *val = AD74413R_ADC_REJECTION_HART;
+ return 0;
+ default:
+ dev_err(st->dev, "ADC rate invalid\n");
+ return -EINVAL;
+ }
+}
+
+static int ad74413r_range_to_voltage_range(struct ad74413r_state *st,
+ unsigned int range, int *val)
+{
+ switch (range) {
+ case AD74413R_ADC_RANGE_10V:
+ *val = 10000;
+ return 0;
+ case AD74413R_ADC_RANGE_2P5V_EXT_POW:
+ case AD74413R_ADC_RANGE_2P5V_INT_POW:
+ *val = 2500;
+ return 0;
+ case AD74413R_ADC_RANGE_5V_BI_DIR:
+ *val = 5000;
+ return 0;
+ default:
+ dev_err(st->dev, "ADC range invalid\n");
+ return -EINVAL;
+ }
+}
+
+static int ad74413r_range_to_voltage_offset(struct ad74413r_state *st,
+ unsigned int range, int *val)
+{
+ switch (range) {
+ case AD74413R_ADC_RANGE_10V:
+ case AD74413R_ADC_RANGE_2P5V_EXT_POW:
+ *val = 0;
+ return 0;
+ case AD74413R_ADC_RANGE_2P5V_INT_POW:
+ case AD74413R_ADC_RANGE_5V_BI_DIR:
+ *val = -2500;
+ return 0;
+ default:
+ dev_err(st->dev, "ADC range invalid\n");
+ return -EINVAL;
+ }
+}
+
+static int ad74413r_range_to_voltage_offset_raw(struct ad74413r_state *st,
+ unsigned int range, int *val)
+{
+ switch (range) {
+ case AD74413R_ADC_RANGE_10V:
+ case AD74413R_ADC_RANGE_2P5V_EXT_POW:
+ *val = 0;
+ return 0;
+ case AD74413R_ADC_RANGE_2P5V_INT_POW:
+ *val = -((int)AD74413R_ADC_RESULT_MAX);
+ return 0;
+ case AD74413R_ADC_RANGE_5V_BI_DIR:
+ *val = -((int)AD74413R_ADC_RESULT_MAX / 2);
+ return 0;
+ default:
+ dev_err(st->dev, "ADC range invalid\n");
+ return -EINVAL;
+ }
+}
+
+static int ad74413r_get_output_voltage_scale(struct ad74413r_state *st,
+ int *val, int *val2)
+{
+ *val = AD74413R_DAC_VOLTAGE_MAX;
+ *val2 = AD74413R_DAC_CODE_MAX;
+
+ return IIO_VAL_FRACTIONAL;
+}
+
+static int ad74413r_get_output_current_scale(struct ad74413r_state *st,
+ int *val, int *val2)
+{
+ *val = regulator_get_voltage(st->refin_reg);
+ *val2 = st->sense_resistor_ohms * AD74413R_DAC_CODE_MAX * 1000;
+
+ return IIO_VAL_FRACTIONAL;
+}
+
+static int ad74413r_get_input_voltage_scale(struct ad74413r_state *st,
+ unsigned int channel,
+ int *val, int *val2)
+{
+ unsigned int range;
+ int ret;
+
+ ret = ad74413r_get_adc_range(st, channel, &range);
+ if (ret)
+ return ret;
+
+ ret = ad74413r_range_to_voltage_range(st, range, val);
+ if (ret)
+ return ret;
+
+ *val2 = AD74413R_ADC_RESULT_MAX;
+
+ return IIO_VAL_FRACTIONAL;
+}
+
+static int ad74413r_get_input_voltage_offset(struct ad74413r_state *st,
+ unsigned int channel, int *val)
+{
+ unsigned int range;
+ int ret;
+
+ ret = ad74413r_get_adc_range(st, channel, &range);
+ if (ret)
+ return ret;
+
+ ret = ad74413r_range_to_voltage_offset_raw(st, range, val);
+ if (ret)
+ return ret;
+
+ return IIO_VAL_INT;
+}
+
+static int ad74413r_get_input_current_scale(struct ad74413r_state *st,
+ unsigned int channel, int *val,
+ int *val2)
+{
+ unsigned int range;
+ int ret;
+
+ ret = ad74413r_get_adc_range(st, channel, &range);
+ if (ret)
+ return ret;
+
+ ret = ad74413r_range_to_voltage_range(st, range, val);
+ if (ret)
+ return ret;
+
+ *val2 = AD74413R_ADC_RESULT_MAX * st->sense_resistor_ohms;
+
+ return IIO_VAL_FRACTIONAL;
+}
+
+static int ad74413_get_input_current_offset(struct ad74413r_state *st,
+ unsigned int channel, int *val)
+{
+ unsigned int range;
+ int voltage_range;
+ int voltage_offset;
+ int ret;
+
+ ret = ad74413r_get_adc_range(st, channel, &range);
+ if (ret)
+ return ret;
+
+ ret = ad74413r_range_to_voltage_range(st, range, &voltage_range);
+ if (ret)
+ return ret;
+
+ ret = ad74413r_range_to_voltage_offset(st, range, &voltage_offset);
+ if (ret)
+ return ret;
+
+ *val = voltage_offset * AD74413R_ADC_RESULT_MAX / voltage_range;
+
+ return IIO_VAL_INT;
+}
+
+static int ad74413r_get_adc_rate(struct ad74413r_state *st,
+ unsigned int channel, int *val)
+{
+ unsigned int rejection;
+ int ret;
+
+ ret = ad74413r_get_adc_rejection(st, channel, &rejection);
+ if (ret)
+ return ret;
+
+ ret = ad74413r_rejection_to_rate(st, rejection, val);
+ if (ret)
+ return ret;
+
+ return IIO_VAL_INT;
+}
+
+static int ad74413r_set_adc_rate(struct ad74413r_state *st,
+ unsigned int channel, int val)
+{
+ unsigned int rejection;
+ int ret;
+
+ ret = ad74413r_rate_to_rejection(st, val, &rejection);
+ if (ret)
+ return ret;
+
+ return ad74413r_set_adc_rejection(st, channel, rejection);
+}
+
+static irqreturn_t ad74413r_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct ad74413r_state *st = iio_priv(indio_dev);
+ u8 *rx_buf = st->adc_samples_buf.rx_buf;
+ unsigned int i;
+ int ret;
+
+ ret = spi_sync(st->spi, &st->adc_samples_msg);
+ if (ret)
+ goto out;
+
+ for (i = 0; i < st->adc_active_channels; i++)
+ ad74413r_crc_check(st, &rx_buf[i * AD74413R_FRAME_SIZE]);
+
+ iio_push_to_buffers_with_timestamp(indio_dev, &st->adc_samples_buf,
+ iio_get_time_ns(indio_dev));
+
+out:
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t ad74413r_adc_data_interrupt(int irq, void *data)
+{
+ struct iio_dev *indio_dev = data;
+ struct ad74413r_state *st = iio_priv(indio_dev);
+
+ if (iio_buffer_enabled(indio_dev))
+ iio_trigger_poll(st->trig);
+ else
+ complete(&st->adc_data_completion);
+
+ return IRQ_HANDLED;
+}
+
+static int _ad74413r_get_single_adc_result(struct ad74413r_state *st,
+ unsigned int channel, int *val)
+{
+ unsigned int uval;
+ int ret;
+
+ reinit_completion(&st->adc_data_completion);
+
+ ret = ad74413r_set_adc_channel_enable(st, channel, true);
+ if (ret)
+ return ret;
+
+ ret = ad74413r_set_adc_conv_seq(st, AD74413R_CONV_SEQ_SINGLE);
+ if (ret)
+ return ret;
+
+ ret = wait_for_completion_timeout(&st->adc_data_completion,
+ msecs_to_jiffies(1000));
+ if (!ret) {
+ ret = -ETIMEDOUT;
+ return ret;
+ }
+
+ ret = regmap_read(st->regmap, AD74413R_REG_ADC_RESULT_X(channel),
+ &uval);
+ if (ret)
+ return ret;
+
+ ret = ad74413r_set_adc_conv_seq(st, AD74413R_CONV_SEQ_OFF);
+ if (ret)
+ return ret;
+
+ ret = ad74413r_set_adc_channel_enable(st, channel, false);
+ if (ret)
+ return ret;
+
+ *val = uval;
+
+ return IIO_VAL_INT;
+}
+
+static int ad74413r_get_single_adc_result(struct iio_dev *indio_dev,
+ unsigned int channel, int *val)
+{
+ struct ad74413r_state *st = iio_priv(indio_dev);
+ int ret;
+
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
+
+ mutex_lock(&st->lock);
+ ret = _ad74413r_get_single_adc_result(st, channel, val);
+ mutex_unlock(&st->lock);
+
+ iio_device_release_direct_mode(indio_dev);
+
+ return ret;
+}
+
+static void ad74413r_adc_to_resistance_result(int adc_result, int *val)
+{
+ if (adc_result == AD74413R_ADC_RESULT_MAX)
+ adc_result = AD74413R_ADC_RESULT_MAX - 1;
+
+ *val = DIV_ROUND_CLOSEST(adc_result * 2100,
+ AD74413R_ADC_RESULT_MAX - adc_result);
+}
+
+static int ad74413r_update_scan_mode(struct iio_dev *indio_dev,
+ const unsigned long *active_scan_mask)
+{
+ struct ad74413r_state *st = iio_priv(indio_dev);
+ struct spi_transfer *xfer = st->adc_samples_xfer;
+ u8 *rx_buf = &st->adc_samples_buf.rx_buf[-1 * AD74413R_FRAME_SIZE];
+ u8 *tx_buf = st->adc_samples_tx_buf;
+ unsigned int channel;
+ int ret = -EINVAL;
+
+ mutex_lock(&st->lock);
+
+ spi_message_init(&st->adc_samples_msg);
+ st->adc_active_channels = 0;
+
+ for_each_clear_bit(channel, active_scan_mask, AD74413R_CHANNEL_MAX) {
+ ret = ad74413r_set_adc_channel_enable(st, channel, false);
+ if (ret)
+ goto out;
+ }
+
+ if (*active_scan_mask == 0)
+ goto out;
+
+ /*
+ * The read select register is used to select which register's value
+ * will be sent by the slave on the next SPI frame.
+ *
+ * Create an SPI message that, on each step, writes to the read select
+ * register to select the ADC result of the next enabled channel, and
+ * reads the ADC result of the previous enabled channel.
+ *
+ * Example:
+ * W: [WCH1] [WCH2] [WCH2] [WCH3] [ ]
+ * R: [ ] [RCH1] [RCH2] [RCH3] [RCH4]
+ */
+
+ for_each_set_bit(channel, active_scan_mask, AD74413R_CHANNEL_MAX) {
+ ret = ad74413r_set_adc_channel_enable(st, channel, true);
+ if (ret)
+ goto out;
+
+ st->adc_active_channels++;
+
+ if (xfer == st->adc_samples_xfer)
+ xfer->rx_buf = NULL;
+ else
+ xfer->rx_buf = rx_buf;
+
+ xfer->tx_buf = tx_buf;
+ xfer->len = AD74413R_FRAME_SIZE;
+ xfer->cs_change = 1;
+
+ ad74413r_format_reg_write(AD74413R_REG_READ_SELECT,
+ AD74413R_REG_ADC_RESULT_X(channel),
+ tx_buf);
+
+ spi_message_add_tail(xfer, &st->adc_samples_msg);
+
+ xfer++;
+ tx_buf += AD74413R_FRAME_SIZE;
+ rx_buf += AD74413R_FRAME_SIZE;
+ }
+
+ xfer->rx_buf = rx_buf;
+ xfer->tx_buf = NULL;
+ xfer->len = AD74413R_FRAME_SIZE;
+ xfer->cs_change = 0;
+
+ spi_message_add_tail(xfer, &st->adc_samples_msg);
+
+out:
+ mutex_unlock(&st->lock);
+
+ return ret;
+}
+
+static int ad74413r_buffer_postenable(struct iio_dev *indio_dev)
+{
+ struct ad74413r_state *st = iio_priv(indio_dev);
+
+ return ad74413r_set_adc_conv_seq(st, AD74413R_CONV_SEQ_CONTINUOUS);
+}
+
+static int ad74413r_buffer_predisable(struct iio_dev *indio_dev)
+{
+ struct ad74413r_state *st = iio_priv(indio_dev);
+
+ return ad74413r_set_adc_conv_seq(st, AD74413R_CONV_SEQ_OFF);
+}
+
+static int ad74413r_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long info)
+{
+ struct ad74413r_state *st = iio_priv(indio_dev);
+
+ switch (info) {
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ if (chan->output)
+ return ad74413r_get_output_voltage_scale(st,
+ val, val2);
+ else
+ return ad74413r_get_input_voltage_scale(st,
+ chan->channel, val, val2);
+ case IIO_CURRENT:
+ if (chan->output)
+ return ad74413r_get_output_current_scale(st,
+ val, val2);
+ else
+ return ad74413r_get_input_current_scale(st,
+ chan->channel, val, val2);
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_OFFSET:
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ return ad74413r_get_input_voltage_offset(st,
+ chan->channel, val);
+ case IIO_CURRENT:
+ return ad74413_get_input_current_offset(st,
+ chan->channel, val);
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_RAW:
+ if (chan->output)
+ return -EINVAL;
+
+ return ad74413r_get_single_adc_result(indio_dev, chan->channel,
+ val);
+ case IIO_CHAN_INFO_PROCESSED: {
+ int ret;
+
+ ret = ad74413r_get_single_adc_result(indio_dev, chan->channel,
+ val);
+ if (ret)
+ return ret;
+
+ ad74413r_adc_to_resistance_result(*val, val);
+
+ return ret;
+ }
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ return ad74413r_get_adc_rate(st, chan->channel, val);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ad74413r_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long info)
+{
+ struct ad74413r_state *st = iio_priv(indio_dev);
+
+ switch (info) {
+ case IIO_CHAN_INFO_RAW:
+ if (!chan->output)
+ return -EINVAL;
+
+ if (val < 0 || val > AD74413R_DAC_CODE_MAX) {
+ dev_err(st->dev, "Invalid DAC code\n");
+ return -EINVAL;
+ }
+
+ return ad74413r_set_channel_dac_code(st, chan->channel, val);
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ return ad74413r_set_adc_rate(st, chan->channel, val);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ad74413r_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals, int *type, int *length,
+ long info)
+{
+ struct ad74413r_state *st = iio_priv(indio_dev);
+
+ switch (info) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ if (st->chip_info->hart_support) {
+ *vals = ad74413r_adc_sampling_rates_hart;
+ *length = ARRAY_SIZE(ad74413r_adc_sampling_rates_hart);
+ } else {
+ *vals = ad74413r_adc_sampling_rates;
+ *length = ARRAY_SIZE(ad74413r_adc_sampling_rates);
+ }
+ *type = IIO_VAL_INT;
+ return IIO_AVAIL_LIST;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_buffer_setup_ops ad74413r_buffer_ops = {
+ .postenable = &ad74413r_buffer_postenable,
+ .predisable = &ad74413r_buffer_predisable,
+};
+
+static const struct iio_trigger_ops ad74413r_trigger_ops = {
+ .validate_device = iio_trigger_validate_own_device,
+};
+
+static const struct iio_info ad74413r_info = {
+ .read_raw = &ad74413r_read_raw,
+ .write_raw = &ad74413r_write_raw,
+ .read_avail = &ad74413r_read_avail,
+ .update_scan_mode = &ad74413r_update_scan_mode,
+};
+
+#define AD74413R_DAC_CHANNEL(_type, extra_mask_separate) \
+ { \
+ .type = (_type), \
+ .indexed = 1, \
+ .output = 1, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) \
+ | (extra_mask_separate), \
+ }
+
+#define AD74413R_ADC_CHANNEL(_type, extra_mask_separate) \
+ { \
+ .type = (_type), \
+ .indexed = 1, \
+ .output = 0, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) \
+ | BIT(IIO_CHAN_INFO_SAMP_FREQ) \
+ | (extra_mask_separate), \
+ .info_mask_separate_available = \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = 16, \
+ .storagebits = 32, \
+ .shift = 8, \
+ .endianness = IIO_BE, \
+ }, \
+ }
+
+#define AD74413R_ADC_VOLTAGE_CHANNEL \
+ AD74413R_ADC_CHANNEL(IIO_VOLTAGE, BIT(IIO_CHAN_INFO_SCALE) \
+ | BIT(IIO_CHAN_INFO_OFFSET))
+
+#define AD74413R_ADC_CURRENT_CHANNEL \
+ AD74413R_ADC_CHANNEL(IIO_CURRENT, BIT(IIO_CHAN_INFO_SCALE) \
+ | BIT(IIO_CHAN_INFO_OFFSET))
+
+static struct iio_chan_spec ad74413r_voltage_output_channels[] = {
+ AD74413R_DAC_CHANNEL(IIO_VOLTAGE, BIT(IIO_CHAN_INFO_SCALE)),
+ AD74413R_ADC_CURRENT_CHANNEL,
+};
+
+static struct iio_chan_spec ad74413r_current_output_channels[] = {
+ AD74413R_DAC_CHANNEL(IIO_CURRENT, BIT(IIO_CHAN_INFO_SCALE)),
+ AD74413R_ADC_VOLTAGE_CHANNEL,
+};
+
+static struct iio_chan_spec ad74413r_voltage_input_channels[] = {
+ AD74413R_ADC_VOLTAGE_CHANNEL,
+};
+
+static struct iio_chan_spec ad74413r_current_input_channels[] = {
+ AD74413R_ADC_CURRENT_CHANNEL,
+};
+
+static struct iio_chan_spec ad74413r_resistance_input_channels[] = {
+ AD74413R_ADC_CHANNEL(IIO_RESISTANCE, BIT(IIO_CHAN_INFO_PROCESSED)),
+};
+
+static struct iio_chan_spec ad74413r_digital_input_channels[] = {
+ AD74413R_ADC_VOLTAGE_CHANNEL,
+};
+
+#define _AD74413R_CHANNELS(_channels) \
+ { \
+ .channels = _channels, \
+ .num_channels = ARRAY_SIZE(_channels), \
+ }
+
+#define AD74413R_CHANNELS(name) \
+ _AD74413R_CHANNELS(ad74413r_ ## name ## _channels)
+
+static const struct ad74413r_channels ad74413r_channels_map[] = {
+ [CH_FUNC_HIGH_IMPEDANCE] = AD74413R_CHANNELS(voltage_input),
+ [CH_FUNC_VOLTAGE_OUTPUT] = AD74413R_CHANNELS(voltage_output),
+ [CH_FUNC_CURRENT_OUTPUT] = AD74413R_CHANNELS(current_output),
+ [CH_FUNC_VOLTAGE_INPUT] = AD74413R_CHANNELS(voltage_input),
+ [CH_FUNC_CURRENT_INPUT_EXT_POWER] = AD74413R_CHANNELS(current_input),
+ [CH_FUNC_CURRENT_INPUT_LOOP_POWER] = AD74413R_CHANNELS(current_input),
+ [CH_FUNC_RESISTANCE_INPUT] = AD74413R_CHANNELS(resistance_input),
+ [CH_FUNC_DIGITAL_INPUT_LOGIC] = AD74413R_CHANNELS(digital_input),
+ [CH_FUNC_DIGITAL_INPUT_LOOP_POWER] = AD74413R_CHANNELS(digital_input),
+ [CH_FUNC_CURRENT_INPUT_EXT_POWER_HART] = AD74413R_CHANNELS(current_input),
+ [CH_FUNC_CURRENT_INPUT_LOOP_POWER_HART] = AD74413R_CHANNELS(current_input),
+};
+
+static int ad74413r_parse_channel_config(struct iio_dev *indio_dev,
+ struct fwnode_handle *channel_node)
+{
+ struct ad74413r_state *st = iio_priv(indio_dev);
+ struct ad74413r_channel_config *config;
+ u32 index;
+ int ret;
+
+ ret = fwnode_property_read_u32(channel_node, "reg", &index);
+ if (ret) {
+ dev_err(st->dev, "Failed to read channel reg: %d\n", ret);
+ return ret;
+ }
+
+ if (index >= AD74413R_CHANNEL_MAX) {
+ dev_err(st->dev, "Channel index %u is too large\n", index);
+ return -EINVAL;
+ }
+
+ config = &st->channel_configs[index];
+ if (config->initialized) {
+ dev_err(st->dev, "Channel %u already initialized\n", index);
+ return -EINVAL;
+ }
+
+ config->func = CH_FUNC_HIGH_IMPEDANCE;
+ fwnode_property_read_u32(channel_node, "adi,ch-func", &config->func);
+
+ if (config->func < CH_FUNC_MIN || config->func > CH_FUNC_MAX) {
+ dev_err(st->dev, "Invalid channel function %u\n", config->func);
+ return -EINVAL;
+ }
+
+ if (!st->chip_info->hart_support &&
+ (config->func == CH_FUNC_CURRENT_INPUT_EXT_POWER_HART ||
+ config->func == CH_FUNC_CURRENT_INPUT_LOOP_POWER_HART)) {
+ dev_err(st->dev, "Unsupported HART function %u\n", config->func);
+ return -EINVAL;
+ }
+
+ if (config->func == CH_FUNC_DIGITAL_INPUT_LOGIC ||
+ config->func == CH_FUNC_DIGITAL_INPUT_LOOP_POWER)
+ st->num_comparator_gpios++;
+
+ config->gpo_comparator = fwnode_property_read_bool(channel_node,
+ "adi,gpo-comparator");
+
+ if (!config->gpo_comparator)
+ st->num_gpo_gpios++;
+
+ indio_dev->num_channels += ad74413r_channels_map[config->func].num_channels;
+
+ config->initialized = true;
+
+ return 0;
+}
+
+static int ad74413r_parse_channel_configs(struct iio_dev *indio_dev)
+{
+ struct ad74413r_state *st = iio_priv(indio_dev);
+ struct fwnode_handle *channel_node = NULL;
+ int ret;
+
+ fwnode_for_each_available_child_node(dev_fwnode(st->dev), channel_node) {
+ ret = ad74413r_parse_channel_config(indio_dev, channel_node);
+ if (ret)
+ goto put_channel_node;
+ }
+
+ return 0;
+
+put_channel_node:
+ fwnode_handle_put(channel_node);
+
+ return ret;
+}
+
+static int ad74413r_setup_channels(struct iio_dev *indio_dev)
+{
+ struct ad74413r_state *st = iio_priv(indio_dev);
+ struct ad74413r_channel_config *config;
+ struct iio_chan_spec *channels, *chans;
+ unsigned int i, num_chans, chan_i;
+ int ret;
+
+ channels = devm_kcalloc(st->dev, sizeof(*channels),
+ indio_dev->num_channels, GFP_KERNEL);
+ if (!channels)
+ return -ENOMEM;
+
+ indio_dev->channels = channels;
+
+ for (i = 0; i < AD74413R_CHANNEL_MAX; i++) {
+ config = &st->channel_configs[i];
+ chans = ad74413r_channels_map[config->func].channels;
+ num_chans = ad74413r_channels_map[config->func].num_channels;
+
+ memcpy(channels, chans, num_chans * sizeof(*chans));
+
+ for (chan_i = 0; chan_i < num_chans; chan_i++) {
+ struct iio_chan_spec *chan = &channels[chan_i];
+
+ chan->channel = i;
+ if (chan->output)
+ chan->scan_index = -1;
+ else
+ chan->scan_index = i;
+ }
+
+ ret = ad74413r_set_channel_function(st, i, config->func);
+ if (ret)
+ return ret;
+
+ channels += num_chans;
+ }
+
+ return 0;
+}
+
+static int ad74413r_setup_gpios(struct ad74413r_state *st)
+{
+ struct ad74413r_channel_config *config;
+ unsigned int comp_gpio_i = 0;
+ unsigned int gpo_gpio_i = 0;
+ unsigned int i;
+ u8 gpo_config;
+ int ret;
+
+ for (i = 0; i < AD74413R_CHANNEL_MAX; i++) {
+ config = &st->channel_configs[i];
+
+ if (config->gpo_comparator) {
+ gpo_config = AD74413R_GPO_CONFIG_COMPARATOR;
+ } else {
+ gpo_config = AD74413R_GPO_CONFIG_LOGIC;
+ st->gpo_gpio_offsets[gpo_gpio_i++] = i;
+ }
+
+ if (config->func == CH_FUNC_DIGITAL_INPUT_LOGIC ||
+ config->func == CH_FUNC_DIGITAL_INPUT_LOOP_POWER)
+ st->comp_gpio_offsets[comp_gpio_i++] = i;
+
+ ret = ad74413r_set_gpo_config(st, i, gpo_config);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ad74413r_regulator_disable(void *regulator)
+{
+ regulator_disable(regulator);
+}
+
+static int ad74413r_probe(struct spi_device *spi)
+{
+ struct ad74413r_state *st;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ st = iio_priv(indio_dev);
+
+ st->spi = spi;
+ st->dev = &spi->dev;
+ st->chip_info = device_get_match_data(&spi->dev);
+ mutex_init(&st->lock);
+ init_completion(&st->adc_data_completion);
+
+ st->regmap = devm_regmap_init(st->dev, NULL, st,
+ &ad74413r_regmap_config);
+ if (IS_ERR(st->regmap))
+ return PTR_ERR(st->regmap);
+
+ st->refin_reg = devm_regulator_get(st->dev, "refin");
+ if (IS_ERR(st->refin_reg))
+ return dev_err_probe(st->dev, PTR_ERR(st->refin_reg),
+ "Failed to get refin regulator\n");
+
+ ret = regulator_enable(st->refin_reg);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(st->dev, ad74413r_regulator_disable,
+ st->refin_reg);
+ if (ret)
+ return ret;
+
+ st->sense_resistor_ohms = 100000000;
+ device_property_read_u32(st->dev, "shunt-resistor-micro-ohms",
+ &st->sense_resistor_ohms);
+ st->sense_resistor_ohms /= 1000000;
+
+ st->trig = devm_iio_trigger_alloc(st->dev, "%s-dev%d",
+ st->chip_info->name, iio_device_id(indio_dev));
+ if (!st->trig)
+ return -ENOMEM;
+
+ st->trig->ops = &ad74413r_trigger_ops;
+ iio_trigger_set_drvdata(st->trig, st);
+
+ ret = devm_iio_trigger_register(st->dev, st->trig);
+ if (ret)
+ return ret;
+
+ indio_dev->name = st->chip_info->name;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &ad74413r_info;
+ indio_dev->trig = iio_trigger_get(st->trig);
+
+ ret = ad74413r_reset(st);
+ if (ret)
+ return ret;
+
+ ret = ad74413r_parse_channel_configs(indio_dev);
+ if (ret)
+ return ret;
+
+ ret = ad74413r_setup_channels(indio_dev);
+ if (ret)
+ return ret;
+
+ ret = ad74413r_setup_gpios(st);
+ if (ret)
+ return ret;
+
+ if (st->num_gpo_gpios) {
+ st->gpo_gpiochip.owner = THIS_MODULE;
+ st->gpo_gpiochip.label = st->chip_info->name;
+ st->gpo_gpiochip.base = -1;
+ st->gpo_gpiochip.ngpio = st->num_gpo_gpios;
+ st->gpo_gpiochip.parent = st->dev;
+ st->gpo_gpiochip.can_sleep = true;
+ st->gpo_gpiochip.set = ad74413r_gpio_set;
+ st->gpo_gpiochip.set_multiple = ad74413r_gpio_set_multiple;
+ st->gpo_gpiochip.set_config = ad74413r_gpio_set_gpo_config;
+ st->gpo_gpiochip.get_direction =
+ ad74413r_gpio_get_gpo_direction;
+
+ ret = devm_gpiochip_add_data(st->dev, &st->gpo_gpiochip, st);
+ if (ret)
+ return ret;
+ }
+
+ if (st->num_comparator_gpios) {
+ st->comp_gpiochip.owner = THIS_MODULE;
+ st->comp_gpiochip.label = st->chip_info->name;
+ st->comp_gpiochip.base = -1;
+ st->comp_gpiochip.ngpio = st->num_comparator_gpios;
+ st->comp_gpiochip.parent = st->dev;
+ st->comp_gpiochip.can_sleep = true;
+ st->comp_gpiochip.get = ad74413r_gpio_get;
+ st->comp_gpiochip.get_multiple = ad74413r_gpio_get_multiple;
+ st->comp_gpiochip.set_config = ad74413r_gpio_set_comp_config;
+ st->comp_gpiochip.get_direction =
+ ad74413r_gpio_get_comp_direction;
+
+ ret = devm_gpiochip_add_data(st->dev, &st->comp_gpiochip, st);
+ if (ret)
+ return ret;
+ }
+
+ ret = ad74413r_set_adc_conv_seq(st, AD74413R_CONV_SEQ_OFF);
+ if (ret)
+ return ret;
+
+ ret = devm_request_irq(st->dev, spi->irq, ad74413r_adc_data_interrupt,
+ 0, st->chip_info->name, indio_dev);
+ if (ret)
+ return dev_err_probe(st->dev, ret, "Failed to request irq\n");
+
+ ret = devm_iio_triggered_buffer_setup(st->dev, indio_dev,
+ &iio_pollfunc_store_time,
+ &ad74413r_trigger_handler,
+ &ad74413r_buffer_ops);
+ if (ret)
+ return ret;
+
+ return devm_iio_device_register(st->dev, indio_dev);
+}
+
+static int ad74413r_unregister_driver(struct spi_driver *spi)
+{
+ spi_unregister_driver(spi);
+
+ return 0;
+}
+
+static int __init ad74413r_register_driver(struct spi_driver *spi)
+{
+ crc8_populate_msb(ad74413r_crc8_table, AD74413R_CRC_POLYNOMIAL);
+
+ return spi_register_driver(spi);
+}
+
+static const struct ad74413r_chip_info ad74412r_chip_info_data = {
+ .hart_support = false,
+ .name = "ad74412r",
+};
+
+static const struct ad74413r_chip_info ad74413r_chip_info_data = {
+ .hart_support = true,
+ .name = "ad74413r",
+};
+
+static const struct of_device_id ad74413r_dt_id[] = {
+ {
+ .compatible = "adi,ad74412r",
+ .data = &ad74412r_chip_info_data,
+ },
+ {
+ .compatible = "adi,ad74413r",
+ .data = &ad74413r_chip_info_data,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ad74413r_dt_id);
+
+static struct spi_driver ad74413r_driver = {
+ .driver = {
+ .name = "ad74413r",
+ .of_match_table = ad74413r_dt_id,
+ },
+ .probe = ad74413r_probe,
+};
+
+module_driver(ad74413r_driver,
+ ad74413r_register_driver,
+ ad74413r_unregister_driver);
+
+MODULE_AUTHOR("Cosmin Tanislav <cosmin.tanislav@analog.com>");
+MODULE_DESCRIPTION("Analog Devices AD74413R ADDAC");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/amplifiers/hmc425a.c b/drivers/iio/amplifiers/hmc425a.c
index 9efa692151f0..16c0a77f6a1c 100644
--- a/drivers/iio/amplifiers/hmc425a.c
+++ b/drivers/iio/amplifiers/hmc425a.c
@@ -192,7 +192,7 @@ static int hmc425a_probe(struct platform_device *pdev)
return -ENOMEM;
st = iio_priv(indio_dev);
- st->type = (enum hmc425a_type)of_device_get_match_data(&pdev->dev);
+ st->type = (uintptr_t)of_device_get_match_data(&pdev->dev);
st->chip_info = &hmc425a_chip_info_tbl[st->type];
indio_dev->num_channels = st->chip_info->num_channels;
diff --git a/drivers/iio/buffer/industrialio-buffer-dmaengine.c b/drivers/iio/buffer/industrialio-buffer-dmaengine.c
index 1ac94c4e9792..f8ce26a24c57 100644
--- a/drivers/iio/buffer/industrialio-buffer-dmaengine.c
+++ b/drivers/iio/buffer/industrialio-buffer-dmaengine.c
@@ -67,7 +67,7 @@ static int iio_dmaengine_buffer_submit_block(struct iio_dma_buffer_queue *queue,
dma_cookie_t cookie;
block->bytes_used = min(block->size, dmaengine_buffer->max_size);
- block->bytes_used = rounddown(block->bytes_used,
+ block->bytes_used = round_down(block->bytes_used,
dmaengine_buffer->align);
desc = dmaengine_prep_slave_single(dmaengine_buffer->chan,
diff --git a/drivers/iio/chemical/atlas-sensor.c b/drivers/iio/chemical/atlas-sensor.c
index 9cb99585b6ff..04b44a327614 100644
--- a/drivers/iio/chemical/atlas-sensor.c
+++ b/drivers/iio/chemical/atlas-sensor.c
@@ -434,9 +434,6 @@ static int atlas_buffer_predisable(struct iio_dev *indio_dev)
return 0;
}
-static const struct iio_trigger_ops atlas_interrupt_trigger_ops = {
-};
-
static const struct iio_buffer_setup_ops atlas_buffer_setup_ops = {
.postenable = atlas_buffer_postenable,
.predisable = atlas_buffer_predisable,
@@ -645,7 +642,6 @@ static int atlas_probe(struct i2c_client *client,
data->client = client;
data->trig = trig;
data->chip = chip;
- trig->ops = &atlas_interrupt_trigger_ops;
iio_trigger_set_drvdata(trig, indio_dev);
i2c_set_clientdata(client, indio_dev);
diff --git a/drivers/iio/chemical/sunrise_co2.c b/drivers/iio/chemical/sunrise_co2.c
index 233bd0f379c9..8440dc0c77cf 100644
--- a/drivers/iio/chemical/sunrise_co2.c
+++ b/drivers/iio/chemical/sunrise_co2.c
@@ -407,24 +407,24 @@ static int sunrise_read_raw(struct iio_dev *iio_dev,
mutex_lock(&sunrise->lock);
ret = sunrise_read_word(sunrise, SUNRISE_CO2_FILTERED_COMP_REG,
&value);
- *val = value;
mutex_unlock(&sunrise->lock);
if (ret)
return ret;
+ *val = value;
return IIO_VAL_INT;
case IIO_TEMP:
mutex_lock(&sunrise->lock);
ret = sunrise_read_word(sunrise, SUNRISE_CHIP_TEMPERATURE_REG,
&value);
- *val = value;
mutex_unlock(&sunrise->lock);
if (ret)
return ret;
+ *val = value;
return IIO_VAL_INT;
default:
diff --git a/drivers/iio/chemical/vz89x.c b/drivers/iio/chemical/vz89x.c
index 23b22a5f5c1c..e7e1c74a351e 100644
--- a/drivers/iio/chemical/vz89x.c
+++ b/drivers/iio/chemical/vz89x.c
@@ -242,7 +242,7 @@ static int vz89x_get_resistance_reading(struct vz89x_data *data,
struct iio_chan_spec const *chan,
int *val)
{
- u8 *tmp = (u8 *) &data->buffer[chan->address];
+ u8 *tmp = &data->buffer[chan->address];
switch (chan->scan_type.endianness) {
case IIO_LE:
diff --git a/drivers/iio/common/scmi_sensors/scmi_iio.c b/drivers/iio/common/scmi_sensors/scmi_iio.c
index 7cf2bf282cef..d538bf3ab1ef 100644
--- a/drivers/iio/common/scmi_sensors/scmi_iio.c
+++ b/drivers/iio/common/scmi_sensors/scmi_iio.c
@@ -279,6 +279,52 @@ static int scmi_iio_get_odr_val(struct iio_dev *iio_dev, int *val, int *val2)
return 0;
}
+static int scmi_iio_read_channel_data(struct iio_dev *iio_dev,
+ struct iio_chan_spec const *ch, int *val, int *val2)
+{
+ struct scmi_iio_priv *sensor = iio_priv(iio_dev);
+ u32 sensor_config;
+ struct scmi_sensor_reading readings[SCMI_IIO_NUM_OF_AXIS];
+ int err;
+
+ sensor_config = FIELD_PREP(SCMI_SENS_CFG_SENSOR_ENABLED_MASK,
+ SCMI_SENS_CFG_SENSOR_ENABLE);
+ err = sensor->sensor_ops->config_set(
+ sensor->ph, sensor->sensor_info->id, sensor_config);
+ if (err) {
+ dev_err(&iio_dev->dev,
+ "Error in enabling sensor %s err %d",
+ sensor->sensor_info->name, err);
+ return err;
+ }
+
+ err = sensor->sensor_ops->reading_get_timestamped(
+ sensor->ph, sensor->sensor_info->id,
+ sensor->sensor_info->num_axis, readings);
+ if (err) {
+ dev_err(&iio_dev->dev,
+ "Error in reading raw attribute for sensor %s err %d",
+ sensor->sensor_info->name, err);
+ return err;
+ }
+
+ sensor_config = FIELD_PREP(SCMI_SENS_CFG_SENSOR_ENABLED_MASK,
+ SCMI_SENS_CFG_SENSOR_DISABLE);
+ err = sensor->sensor_ops->config_set(
+ sensor->ph, sensor->sensor_info->id, sensor_config);
+ if (err) {
+ dev_err(&iio_dev->dev,
+ "Error in disabling sensor %s err %d",
+ sensor->sensor_info->name, err);
+ return err;
+ }
+
+ *val = lower_32_bits(readings[ch->scan_index].value);
+ *val2 = upper_32_bits(readings[ch->scan_index].value);
+
+ return IIO_VAL_INT_64;
+}
+
static int scmi_iio_read_raw(struct iio_dev *iio_dev,
struct iio_chan_spec const *ch, int *val,
int *val2, long mask)
@@ -300,6 +346,14 @@ static int scmi_iio_read_raw(struct iio_dev *iio_dev,
case IIO_CHAN_INFO_SAMP_FREQ:
ret = scmi_iio_get_odr_val(iio_dev, val, val2);
return ret ? ret : IIO_VAL_INT_PLUS_MICRO;
+ case IIO_CHAN_INFO_RAW:
+ ret = iio_device_claim_direct_mode(iio_dev);
+ if (ret)
+ return ret;
+
+ ret = scmi_iio_read_channel_data(iio_dev, ch, val, val2);
+ iio_device_release_direct_mode(iio_dev);
+ return ret;
default:
return -EINVAL;
}
@@ -381,7 +435,8 @@ static void scmi_iio_set_data_channel(struct iio_chan_spec *iio_chan,
iio_chan->type = type;
iio_chan->modified = 1;
iio_chan->channel2 = mod;
- iio_chan->info_mask_separate = BIT(IIO_CHAN_INFO_SCALE);
+ iio_chan->info_mask_separate =
+ BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_RAW);
iio_chan->info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SAMP_FREQ);
iio_chan->info_mask_shared_by_type_available =
BIT(IIO_CHAN_INFO_SAMP_FREQ);
diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c
index 1de395bda03e..eb452d0c423c 100644
--- a/drivers/iio/common/st_sensors/st_sensors_core.c
+++ b/drivers/iio/common/st_sensors/st_sensors_core.c
@@ -638,7 +638,7 @@ ssize_t st_sensors_sysfs_sampling_frequency_avail(struct device *dev,
struct device_attribute *attr, char *buf)
{
int i, len = 0;
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct st_sensor_data *sdata = iio_priv(indio_dev);
mutex_lock(&indio_dev->mlock);
@@ -660,7 +660,7 @@ ssize_t st_sensors_sysfs_scale_avail(struct device *dev,
struct device_attribute *attr, char *buf)
{
int i, len = 0, q, r;
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct st_sensor_data *sdata = iio_priv(indio_dev);
mutex_lock(&indio_dev->mlock);
diff --git a/drivers/iio/dac/Kconfig b/drivers/iio/dac/Kconfig
index 75e1f2b48638..bfcf7568de32 100644
--- a/drivers/iio/dac/Kconfig
+++ b/drivers/iio/dac/Kconfig
@@ -6,6 +6,16 @@
menu "Digital to analog converters"
+config AD3552R
+ tristate "Analog Devices AD3552R DAC driver"
+ depends on SPI_MASTER
+ help
+ Say yes here to build support for Analog Devices AD3552R
+ Digital to Analog Converter.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ad3552r.
+
config AD5064
tristate "Analog Devices AD5064 and similar multi-channel DAC driver"
depends on (SPI_MASTER && I2C!=m) || I2C
@@ -221,6 +231,17 @@ config AD5791
To compile this driver as a module, choose M here: the
module will be called ad5791.
+config AD7293
+ tristate "Analog Devices AD7293 Power Amplifier Current Controller"
+ depends on SPI
+ help
+ Say yes here to build support for Analog Devices AD7293
+ Power Amplifier Current Controller with
+ ADC, DACs, and Temperature and Current Sensors
+
+ To compile this driver as a module, choose M here: the
+ module will be called ad7293.
+
config AD7303
tristate "Analog Devices AD7303 DAC driver"
depends on SPI
@@ -329,7 +350,6 @@ config MAX517
config MAX5821
tristate "Maxim MAX5821 DAC driver"
depends on I2C
- depends on OF
help
Say yes here to build support for Maxim MAX5821
10 bits DAC.
diff --git a/drivers/iio/dac/Makefile b/drivers/iio/dac/Makefile
index 33e16f14902a..01a50131572f 100644
--- a/drivers/iio/dac/Makefile
+++ b/drivers/iio/dac/Makefile
@@ -4,6 +4,7 @@
#
# When adding new entries keep the list in alphabetical order
+obj-$(CONFIG_AD3552R) += ad3552r.o
obj-$(CONFIG_AD5360) += ad5360.o
obj-$(CONFIG_AD5380) += ad5380.o
obj-$(CONFIG_AD5421) += ad5421.o
@@ -25,6 +26,7 @@ obj-$(CONFIG_AD5791) += ad5791.o
obj-$(CONFIG_AD5686) += ad5686.o
obj-$(CONFIG_AD5686_SPI) += ad5686-spi.o
obj-$(CONFIG_AD5696_I2C) += ad5696-i2c.o
+obj-$(CONFIG_AD7293) += ad7293.o
obj-$(CONFIG_AD7303) += ad7303.o
obj-$(CONFIG_AD8801) += ad8801.o
obj-$(CONFIG_CIO_DAC) += cio-dac.o
diff --git a/drivers/iio/dac/ad3552r.c b/drivers/iio/dac/ad3552r.c
new file mode 100644
index 000000000000..97f13c0b9631
--- /dev/null
+++ b/drivers/iio/dac/ad3552r.c
@@ -0,0 +1,1138 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Analog Devices AD3552R
+ * Digital to Analog converter driver
+ *
+ * Copyright 2021 Analog Devices Inc.
+ */
+#include <asm/unaligned.h>
+#include <linux/device.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spi/spi.h>
+
+/* Register addresses */
+/* Primary address space */
+#define AD3552R_REG_ADDR_INTERFACE_CONFIG_A 0x00
+#define AD3552R_MASK_SOFTWARE_RESET (BIT(7) | BIT(0))
+#define AD3552R_MASK_ADDR_ASCENSION BIT(5)
+#define AD3552R_MASK_SDO_ACTIVE BIT(4)
+#define AD3552R_REG_ADDR_INTERFACE_CONFIG_B 0x01
+#define AD3552R_MASK_SINGLE_INST BIT(7)
+#define AD3552R_MASK_SHORT_INSTRUCTION BIT(3)
+#define AD3552R_REG_ADDR_DEVICE_CONFIG 0x02
+#define AD3552R_MASK_DEVICE_STATUS(n) BIT(4 + (n))
+#define AD3552R_MASK_CUSTOM_MODES GENMASK(3, 2)
+#define AD3552R_MASK_OPERATING_MODES GENMASK(1, 0)
+#define AD3552R_REG_ADDR_CHIP_TYPE 0x03
+#define AD3552R_MASK_CLASS GENMASK(7, 0)
+#define AD3552R_REG_ADDR_PRODUCT_ID_L 0x04
+#define AD3552R_REG_ADDR_PRODUCT_ID_H 0x05
+#define AD3552R_REG_ADDR_CHIP_GRADE 0x06
+#define AD3552R_MASK_GRADE GENMASK(7, 4)
+#define AD3552R_MASK_DEVICE_REVISION GENMASK(3, 0)
+#define AD3552R_REG_ADDR_SCRATCH_PAD 0x0A
+#define AD3552R_REG_ADDR_SPI_REVISION 0x0B
+#define AD3552R_REG_ADDR_VENDOR_L 0x0C
+#define AD3552R_REG_ADDR_VENDOR_H 0x0D
+#define AD3552R_REG_ADDR_STREAM_MODE 0x0E
+#define AD3552R_MASK_LENGTH GENMASK(7, 0)
+#define AD3552R_REG_ADDR_TRANSFER_REGISTER 0x0F
+#define AD3552R_MASK_MULTI_IO_MODE GENMASK(7, 6)
+#define AD3552R_MASK_STREAM_LENGTH_KEEP_VALUE BIT(2)
+#define AD3552R_REG_ADDR_INTERFACE_CONFIG_C 0x10
+#define AD3552R_MASK_CRC_ENABLE (GENMASK(7, 6) |\
+ GENMASK(1, 0))
+#define AD3552R_MASK_STRICT_REGISTER_ACCESS BIT(5)
+#define AD3552R_REG_ADDR_INTERFACE_STATUS_A 0x11
+#define AD3552R_MASK_INTERFACE_NOT_READY BIT(7)
+#define AD3552R_MASK_CLOCK_COUNTING_ERROR BIT(5)
+#define AD3552R_MASK_INVALID_OR_NO_CRC BIT(3)
+#define AD3552R_MASK_WRITE_TO_READ_ONLY_REGISTER BIT(2)
+#define AD3552R_MASK_PARTIAL_REGISTER_ACCESS BIT(1)
+#define AD3552R_MASK_REGISTER_ADDRESS_INVALID BIT(0)
+#define AD3552R_REG_ADDR_INTERFACE_CONFIG_D 0x14
+#define AD3552R_MASK_ALERT_ENABLE_PULLUP BIT(6)
+#define AD3552R_MASK_MEM_CRC_EN BIT(4)
+#define AD3552R_MASK_SDO_DRIVE_STRENGTH GENMASK(3, 2)
+#define AD3552R_MASK_DUAL_SPI_SYNCHROUNOUS_EN BIT(1)
+#define AD3552R_MASK_SPI_CONFIG_DDR BIT(0)
+#define AD3552R_REG_ADDR_SH_REFERENCE_CONFIG 0x15
+#define AD3552R_MASK_IDUMP_FAST_MODE BIT(6)
+#define AD3552R_MASK_SAMPLE_HOLD_DIFFERENTIAL_USER_EN BIT(5)
+#define AD3552R_MASK_SAMPLE_HOLD_USER_TRIM GENMASK(4, 3)
+#define AD3552R_MASK_SAMPLE_HOLD_USER_ENABLE BIT(2)
+#define AD3552R_MASK_REFERENCE_VOLTAGE_SEL GENMASK(1, 0)
+#define AD3552R_REG_ADDR_ERR_ALARM_MASK 0x16
+#define AD3552R_MASK_REF_RANGE_ALARM BIT(6)
+#define AD3552R_MASK_CLOCK_COUNT_ERR_ALARM BIT(5)
+#define AD3552R_MASK_MEM_CRC_ERR_ALARM BIT(4)
+#define AD3552R_MASK_SPI_CRC_ERR_ALARM BIT(3)
+#define AD3552R_MASK_WRITE_TO_READ_ONLY_ALARM BIT(2)
+#define AD3552R_MASK_PARTIAL_REGISTER_ACCESS_ALARM BIT(1)
+#define AD3552R_MASK_REGISTER_ADDRESS_INVALID_ALARM BIT(0)
+#define AD3552R_REG_ADDR_ERR_STATUS 0x17
+#define AD3552R_MASK_REF_RANGE_ERR_STATUS BIT(6)
+#define AD3552R_MASK_DUAL_SPI_STREAM_EXCEEDS_DAC_ERR_STATUS BIT(5)
+#define AD3552R_MASK_MEM_CRC_ERR_STATUS BIT(4)
+#define AD3552R_MASK_RESET_STATUS BIT(0)
+#define AD3552R_REG_ADDR_POWERDOWN_CONFIG 0x18
+#define AD3552R_MASK_CH_DAC_POWERDOWN(ch) BIT(4 + (ch))
+#define AD3552R_MASK_CH_AMPLIFIER_POWERDOWN(ch) BIT(ch)
+#define AD3552R_REG_ADDR_CH0_CH1_OUTPUT_RANGE 0x19
+#define AD3552R_MASK_CH_OUTPUT_RANGE_SEL(ch) ((ch) ? GENMASK(7, 4) :\
+ GENMASK(3, 0))
+#define AD3552R_REG_ADDR_CH_OFFSET(ch) (0x1B + (ch) * 2)
+#define AD3552R_MASK_CH_OFFSET_BITS_0_7 GENMASK(7, 0)
+#define AD3552R_REG_ADDR_CH_GAIN(ch) (0x1C + (ch) * 2)
+#define AD3552R_MASK_CH_RANGE_OVERRIDE BIT(7)
+#define AD3552R_MASK_CH_GAIN_SCALING_N GENMASK(6, 5)
+#define AD3552R_MASK_CH_GAIN_SCALING_P GENMASK(4, 3)
+#define AD3552R_MASK_CH_OFFSET_POLARITY BIT(2)
+#define AD3552R_MASK_CH_OFFSET_BIT_8 BIT(0)
+/*
+ * Secondary region
+ * For multibyte registers specify the highest address because the access is
+ * done in descending order
+ */
+#define AD3552R_SECONDARY_REGION_START 0x28
+#define AD3552R_REG_ADDR_HW_LDAC_16B 0x28
+#define AD3552R_REG_ADDR_CH_DAC_16B(ch) (0x2C - (1 - ch) * 2)
+#define AD3552R_REG_ADDR_DAC_PAGE_MASK_16B 0x2E
+#define AD3552R_REG_ADDR_CH_SELECT_16B 0x2F
+#define AD3552R_REG_ADDR_INPUT_PAGE_MASK_16B 0x31
+#define AD3552R_REG_ADDR_SW_LDAC_16B 0x32
+#define AD3552R_REG_ADDR_CH_INPUT_16B(ch) (0x36 - (1 - ch) * 2)
+/* 3 bytes registers */
+#define AD3552R_REG_START_24B 0x37
+#define AD3552R_REG_ADDR_HW_LDAC_24B 0x37
+#define AD3552R_REG_ADDR_CH_DAC_24B(ch) (0x3D - (1 - ch) * 3)
+#define AD3552R_REG_ADDR_DAC_PAGE_MASK_24B 0x40
+#define AD3552R_REG_ADDR_CH_SELECT_24B 0x41
+#define AD3552R_REG_ADDR_INPUT_PAGE_MASK_24B 0x44
+#define AD3552R_REG_ADDR_SW_LDAC_24B 0x45
+#define AD3552R_REG_ADDR_CH_INPUT_24B(ch) (0x4B - (1 - ch) * 3)
+
+/* Useful defines */
+#define AD3552R_NUM_CH 2
+#define AD3552R_MASK_CH(ch) BIT(ch)
+#define AD3552R_MASK_ALL_CH GENMASK(1, 0)
+#define AD3552R_MAX_REG_SIZE 3
+#define AD3552R_READ_BIT BIT(7)
+#define AD3552R_ADDR_MASK GENMASK(6, 0)
+#define AD3552R_MASK_DAC_12B 0xFFF0
+#define AD3552R_DEFAULT_CONFIG_B_VALUE 0x8
+#define AD3552R_SCRATCH_PAD_TEST_VAL1 0x34
+#define AD3552R_SCRATCH_PAD_TEST_VAL2 0xB2
+#define AD3552R_GAIN_SCALE 1000
+#define AD3552R_LDAC_PULSE_US 100
+
+enum ad3552r_ch_vref_select {
+ /* Internal source with Vref I/O floating */
+ AD3552R_INTERNAL_VREF_PIN_FLOATING,
+ /* Internal source with Vref I/O at 2.5V */
+ AD3552R_INTERNAL_VREF_PIN_2P5V,
+ /* External source with Vref I/O as input */
+ AD3552R_EXTERNAL_VREF_PIN_INPUT
+};
+
+enum ad3542r_id {
+ AD3542R_ID = 0x4008,
+ AD3552R_ID = 0x4009,
+};
+
+enum ad3552r_ch_output_range {
+ /* Range from 0 V to 2.5 V. Requires Rfb1x connection */
+ AD3552R_CH_OUTPUT_RANGE_0__2P5V,
+ /* Range from 0 V to 5 V. Requires Rfb1x connection */
+ AD3552R_CH_OUTPUT_RANGE_0__5V,
+ /* Range from 0 V to 10 V. Requires Rfb2x connection */
+ AD3552R_CH_OUTPUT_RANGE_0__10V,
+ /* Range from -5 V to 5 V. Requires Rfb2x connection */
+ AD3552R_CH_OUTPUT_RANGE_NEG_5__5V,
+ /* Range from -10 V to 10 V. Requires Rfb4x connection */
+ AD3552R_CH_OUTPUT_RANGE_NEG_10__10V,
+};
+
+static const s32 ad3552r_ch_ranges[][2] = {
+ [AD3552R_CH_OUTPUT_RANGE_0__2P5V] = {0, 2500},
+ [AD3552R_CH_OUTPUT_RANGE_0__5V] = {0, 5000},
+ [AD3552R_CH_OUTPUT_RANGE_0__10V] = {0, 10000},
+ [AD3552R_CH_OUTPUT_RANGE_NEG_5__5V] = {-5000, 5000},
+ [AD3552R_CH_OUTPUT_RANGE_NEG_10__10V] = {-10000, 10000}
+};
+
+enum ad3542r_ch_output_range {
+ /* Range from 0 V to 2.5 V. Requires Rfb1x connection */
+ AD3542R_CH_OUTPUT_RANGE_0__2P5V,
+ /* Range from 0 V to 3 V. Requires Rfb1x connection */
+ AD3542R_CH_OUTPUT_RANGE_0__3V,
+ /* Range from 0 V to 5 V. Requires Rfb1x connection */
+ AD3542R_CH_OUTPUT_RANGE_0__5V,
+ /* Range from 0 V to 10 V. Requires Rfb2x connection */
+ AD3542R_CH_OUTPUT_RANGE_0__10V,
+ /* Range from -2.5 V to 7.5 V. Requires Rfb2x connection */
+ AD3542R_CH_OUTPUT_RANGE_NEG_2P5__7P5V,
+ /* Range from -5 V to 5 V. Requires Rfb2x connection */
+ AD3542R_CH_OUTPUT_RANGE_NEG_5__5V,
+};
+
+static const s32 ad3542r_ch_ranges[][2] = {
+ [AD3542R_CH_OUTPUT_RANGE_0__2P5V] = {0, 2500},
+ [AD3542R_CH_OUTPUT_RANGE_0__3V] = {0, 3000},
+ [AD3542R_CH_OUTPUT_RANGE_0__5V] = {0, 5000},
+ [AD3542R_CH_OUTPUT_RANGE_0__10V] = {0, 10000},
+ [AD3542R_CH_OUTPUT_RANGE_NEG_2P5__7P5V] = {-2500, 7500},
+ [AD3542R_CH_OUTPUT_RANGE_NEG_5__5V] = {-5000, 5000}
+};
+
+enum ad3552r_ch_gain_scaling {
+ /* Gain scaling of 1 */
+ AD3552R_CH_GAIN_SCALING_1,
+ /* Gain scaling of 0.5 */
+ AD3552R_CH_GAIN_SCALING_0_5,
+ /* Gain scaling of 0.25 */
+ AD3552R_CH_GAIN_SCALING_0_25,
+ /* Gain scaling of 0.125 */
+ AD3552R_CH_GAIN_SCALING_0_125,
+};
+
+/* Gain * AD3552R_GAIN_SCALE */
+static const s32 gains_scaling_table[] = {
+ [AD3552R_CH_GAIN_SCALING_1] = 1000,
+ [AD3552R_CH_GAIN_SCALING_0_5] = 500,
+ [AD3552R_CH_GAIN_SCALING_0_25] = 250,
+ [AD3552R_CH_GAIN_SCALING_0_125] = 125
+};
+
+enum ad3552r_dev_attributes {
+ /* - Direct register values */
+ /* From 0-3 */
+ AD3552R_SDO_DRIVE_STRENGTH,
+ /*
+ * 0 -> Internal Vref, vref_io pin floating (default)
+ * 1 -> Internal Vref, vref_io driven by internal vref
+ * 2 or 3 -> External Vref
+ */
+ AD3552R_VREF_SELECT,
+ /* Read registers in ascending order if set. Else descending */
+ AD3552R_ADDR_ASCENSION,
+};
+
+enum ad3552r_ch_attributes {
+ /* DAC powerdown */
+ AD3552R_CH_DAC_POWERDOWN,
+ /* DAC amplifier powerdown */
+ AD3552R_CH_AMPLIFIER_POWERDOWN,
+ /* Select the output range. Select from enum ad3552r_ch_output_range */
+ AD3552R_CH_OUTPUT_RANGE_SEL,
+ /*
+ * Over-rider the range selector in order to manually set the output
+ * voltage range
+ */
+ AD3552R_CH_RANGE_OVERRIDE,
+ /* Manually set the offset voltage */
+ AD3552R_CH_GAIN_OFFSET,
+ /* Sets the polarity of the offset. */
+ AD3552R_CH_GAIN_OFFSET_POLARITY,
+ /* PDAC gain scaling */
+ AD3552R_CH_GAIN_SCALING_P,
+ /* NDAC gain scaling */
+ AD3552R_CH_GAIN_SCALING_N,
+ /* Rfb value */
+ AD3552R_CH_RFB,
+ /* Channel select. When set allow Input -> DAC and Mask -> DAC */
+ AD3552R_CH_SELECT,
+};
+
+struct ad3552r_ch_data {
+ s32 scale_int;
+ s32 scale_dec;
+ s32 offset_int;
+ s32 offset_dec;
+ s16 gain_offset;
+ u16 rfb;
+ u8 n;
+ u8 p;
+ u8 range;
+ bool range_override;
+};
+
+struct ad3552r_desc {
+ /* Used to look the spi bus for atomic operations where needed */
+ struct mutex lock;
+ struct gpio_desc *gpio_reset;
+ struct gpio_desc *gpio_ldac;
+ struct spi_device *spi;
+ struct ad3552r_ch_data ch_data[AD3552R_NUM_CH];
+ struct iio_chan_spec channels[AD3552R_NUM_CH + 1];
+ unsigned long enabled_ch;
+ unsigned int num_ch;
+ enum ad3542r_id chip_id;
+};
+
+static const u16 addr_mask_map[][2] = {
+ [AD3552R_ADDR_ASCENSION] = {
+ AD3552R_REG_ADDR_INTERFACE_CONFIG_A,
+ AD3552R_MASK_ADDR_ASCENSION
+ },
+ [AD3552R_SDO_DRIVE_STRENGTH] = {
+ AD3552R_REG_ADDR_INTERFACE_CONFIG_D,
+ AD3552R_MASK_SDO_DRIVE_STRENGTH
+ },
+ [AD3552R_VREF_SELECT] = {
+ AD3552R_REG_ADDR_SH_REFERENCE_CONFIG,
+ AD3552R_MASK_REFERENCE_VOLTAGE_SEL
+ },
+};
+
+/* 0 -> reg addr, 1->ch0 mask, 2->ch1 mask */
+static const u16 addr_mask_map_ch[][3] = {
+ [AD3552R_CH_DAC_POWERDOWN] = {
+ AD3552R_REG_ADDR_POWERDOWN_CONFIG,
+ AD3552R_MASK_CH_DAC_POWERDOWN(0),
+ AD3552R_MASK_CH_DAC_POWERDOWN(1)
+ },
+ [AD3552R_CH_AMPLIFIER_POWERDOWN] = {
+ AD3552R_REG_ADDR_POWERDOWN_CONFIG,
+ AD3552R_MASK_CH_AMPLIFIER_POWERDOWN(0),
+ AD3552R_MASK_CH_AMPLIFIER_POWERDOWN(1)
+ },
+ [AD3552R_CH_OUTPUT_RANGE_SEL] = {
+ AD3552R_REG_ADDR_CH0_CH1_OUTPUT_RANGE,
+ AD3552R_MASK_CH_OUTPUT_RANGE_SEL(0),
+ AD3552R_MASK_CH_OUTPUT_RANGE_SEL(1)
+ },
+ [AD3552R_CH_SELECT] = {
+ AD3552R_REG_ADDR_CH_SELECT_16B,
+ AD3552R_MASK_CH(0),
+ AD3552R_MASK_CH(1)
+ }
+};
+
+static u8 _ad3552r_reg_len(u8 addr)
+{
+ switch (addr) {
+ case AD3552R_REG_ADDR_HW_LDAC_16B:
+ case AD3552R_REG_ADDR_CH_SELECT_16B:
+ case AD3552R_REG_ADDR_SW_LDAC_16B:
+ case AD3552R_REG_ADDR_HW_LDAC_24B:
+ case AD3552R_REG_ADDR_CH_SELECT_24B:
+ case AD3552R_REG_ADDR_SW_LDAC_24B:
+ return 1;
+ default:
+ break;
+ }
+
+ if (addr > AD3552R_REG_ADDR_HW_LDAC_24B)
+ return 3;
+ if (addr > AD3552R_REG_ADDR_HW_LDAC_16B)
+ return 2;
+
+ return 1;
+}
+
+/* SPI transfer to device */
+static int ad3552r_transfer(struct ad3552r_desc *dac, u8 addr, u32 len,
+ u8 *data, bool is_read)
+{
+ /* Maximum transfer: Addr (1B) + 2 * (Data Reg (3B)) + SW LDAC(1B) */
+ u8 buf[8];
+
+ buf[0] = addr & AD3552R_ADDR_MASK;
+ buf[0] |= is_read ? AD3552R_READ_BIT : 0;
+ if (is_read)
+ return spi_write_then_read(dac->spi, buf, 1, data, len);
+
+ memcpy(buf + 1, data, len);
+ return spi_write_then_read(dac->spi, buf, len + 1, NULL, 0);
+}
+
+static int ad3552r_write_reg(struct ad3552r_desc *dac, u8 addr, u16 val)
+{
+ u8 reg_len;
+ u8 buf[AD3552R_MAX_REG_SIZE] = { 0 };
+
+ reg_len = _ad3552r_reg_len(addr);
+ if (reg_len == 2)
+ /* Only DAC register are 2 bytes wide */
+ val &= AD3552R_MASK_DAC_12B;
+ if (reg_len == 1)
+ buf[0] = val & 0xFF;
+ else
+ /* reg_len can be 2 or 3, but 3rd bytes needs to be set to 0 */
+ put_unaligned_be16(val, buf);
+
+ return ad3552r_transfer(dac, addr, reg_len, buf, false);
+}
+
+static int ad3552r_read_reg(struct ad3552r_desc *dac, u8 addr, u16 *val)
+{
+ int err;
+ u8 reg_len, buf[AD3552R_MAX_REG_SIZE] = { 0 };
+
+ reg_len = _ad3552r_reg_len(addr);
+ err = ad3552r_transfer(dac, addr, reg_len, buf, true);
+ if (err)
+ return err;
+
+ if (reg_len == 1)
+ *val = buf[0];
+ else
+ /* reg_len can be 2 or 3, but only first 2 bytes are relevant */
+ *val = get_unaligned_be16(buf);
+
+ return 0;
+}
+
+static u16 ad3552r_field_prep(u16 val, u16 mask)
+{
+ return (val << __ffs(mask)) & mask;
+}
+
+/* Update field of a register, shift val if needed */
+static int ad3552r_update_reg_field(struct ad3552r_desc *dac, u8 addr, u16 mask,
+ u16 val)
+{
+ int ret;
+ u16 reg;
+
+ ret = ad3552r_read_reg(dac, addr, &reg);
+ if (ret < 0)
+ return ret;
+
+ reg &= ~mask;
+ reg |= ad3552r_field_prep(val, mask);
+
+ return ad3552r_write_reg(dac, addr, reg);
+}
+
+static int ad3552r_set_ch_value(struct ad3552r_desc *dac,
+ enum ad3552r_ch_attributes attr,
+ u8 ch,
+ u16 val)
+{
+ /* Update register related to attributes in chip */
+ return ad3552r_update_reg_field(dac, addr_mask_map_ch[attr][0],
+ addr_mask_map_ch[attr][ch + 1], val);
+}
+
+#define AD3552R_CH_DAC(_idx) ((struct iio_chan_spec) { \
+ .type = IIO_VOLTAGE, \
+ .output = true, \
+ .indexed = true, \
+ .channel = _idx, \
+ .scan_index = _idx, \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = 16, \
+ .storagebits = 16, \
+ .endianness = IIO_BE, \
+ }, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_ENABLE) | \
+ BIT(IIO_CHAN_INFO_OFFSET), \
+})
+
+static int ad3552r_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long mask)
+{
+ struct ad3552r_desc *dac = iio_priv(indio_dev);
+ u16 tmp_val;
+ int err;
+ u8 ch = chan->channel;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ mutex_lock(&dac->lock);
+ err = ad3552r_read_reg(dac, AD3552R_REG_ADDR_CH_DAC_24B(ch),
+ &tmp_val);
+ mutex_unlock(&dac->lock);
+ if (err < 0)
+ return err;
+ *val = tmp_val;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_ENABLE:
+ mutex_lock(&dac->lock);
+ err = ad3552r_read_reg(dac, AD3552R_REG_ADDR_POWERDOWN_CONFIG,
+ &tmp_val);
+ mutex_unlock(&dac->lock);
+ if (err < 0)
+ return err;
+ *val = !((tmp_val & AD3552R_MASK_CH_DAC_POWERDOWN(ch)) >>
+ __ffs(AD3552R_MASK_CH_DAC_POWERDOWN(ch)));
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ *val = dac->ch_data[ch].scale_int;
+ *val2 = dac->ch_data[ch].scale_dec;
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_CHAN_INFO_OFFSET:
+ *val = dac->ch_data[ch].offset_int;
+ *val2 = dac->ch_data[ch].offset_dec;
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ad3552r_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val,
+ int val2,
+ long mask)
+{
+ struct ad3552r_desc *dac = iio_priv(indio_dev);
+ int err;
+
+ mutex_lock(&dac->lock);
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ err = ad3552r_write_reg(dac,
+ AD3552R_REG_ADDR_CH_DAC_24B(chan->channel),
+ val);
+ break;
+ case IIO_CHAN_INFO_ENABLE:
+ err = ad3552r_set_ch_value(dac, AD3552R_CH_DAC_POWERDOWN,
+ chan->channel, !val);
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+ mutex_unlock(&dac->lock);
+
+ return err;
+}
+
+static const struct iio_info ad3552r_iio_info = {
+ .read_raw = ad3552r_read_raw,
+ .write_raw = ad3552r_write_raw
+};
+
+static int32_t ad3552r_trigger_hw_ldac(struct gpio_desc *ldac)
+{
+ gpiod_set_value_cansleep(ldac, 0);
+ usleep_range(AD3552R_LDAC_PULSE_US, AD3552R_LDAC_PULSE_US + 10);
+ gpiod_set_value_cansleep(ldac, 1);
+
+ return 0;
+}
+
+static int ad3552r_write_all_channels(struct ad3552r_desc *dac, u8 *data)
+{
+ int err, len;
+ u8 addr, buff[AD3552R_NUM_CH * AD3552R_MAX_REG_SIZE + 1];
+
+ addr = AD3552R_REG_ADDR_CH_INPUT_24B(1);
+ /* CH1 */
+ memcpy(buff, data + 2, 2);
+ buff[2] = 0;
+ /* CH0 */
+ memcpy(buff + 3, data, 2);
+ buff[5] = 0;
+ len = 6;
+ if (!dac->gpio_ldac) {
+ /* Software LDAC */
+ buff[6] = AD3552R_MASK_ALL_CH;
+ ++len;
+ }
+ err = ad3552r_transfer(dac, addr, len, buff, false);
+ if (err)
+ return err;
+
+ if (dac->gpio_ldac)
+ return ad3552r_trigger_hw_ldac(dac->gpio_ldac);
+
+ return 0;
+}
+
+static int ad3552r_write_codes(struct ad3552r_desc *dac, u32 mask, u8 *data)
+{
+ int err;
+ u8 addr, buff[AD3552R_MAX_REG_SIZE];
+
+ if (mask == AD3552R_MASK_ALL_CH) {
+ if (memcmp(data, data + 2, 2) != 0)
+ return ad3552r_write_all_channels(dac, data);
+
+ addr = AD3552R_REG_ADDR_INPUT_PAGE_MASK_24B;
+ } else {
+ addr = AD3552R_REG_ADDR_CH_INPUT_24B(__ffs(mask));
+ }
+
+ memcpy(buff, data, 2);
+ buff[2] = 0;
+ err = ad3552r_transfer(dac, addr, 3, data, false);
+ if (err)
+ return err;
+
+ if (dac->gpio_ldac)
+ return ad3552r_trigger_hw_ldac(dac->gpio_ldac);
+
+ return ad3552r_write_reg(dac, AD3552R_REG_ADDR_SW_LDAC_24B, mask);
+}
+
+static irqreturn_t ad3552r_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct iio_buffer *buf = indio_dev->buffer;
+ struct ad3552r_desc *dac = iio_priv(indio_dev);
+ /* Maximum size of a scan */
+ u8 buff[AD3552R_NUM_CH * AD3552R_MAX_REG_SIZE];
+ int err;
+
+ memset(buff, 0, sizeof(buff));
+ err = iio_pop_from_buffer(buf, buff);
+ if (err)
+ goto end;
+
+ mutex_lock(&dac->lock);
+ ad3552r_write_codes(dac, *indio_dev->active_scan_mask, buff);
+ mutex_unlock(&dac->lock);
+end:
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static int ad3552r_check_scratch_pad(struct ad3552r_desc *dac)
+{
+ const u16 val1 = AD3552R_SCRATCH_PAD_TEST_VAL1;
+ const u16 val2 = AD3552R_SCRATCH_PAD_TEST_VAL2;
+ u16 val;
+ int err;
+
+ err = ad3552r_write_reg(dac, AD3552R_REG_ADDR_SCRATCH_PAD, val1);
+ if (err < 0)
+ return err;
+
+ err = ad3552r_read_reg(dac, AD3552R_REG_ADDR_SCRATCH_PAD, &val);
+ if (err < 0)
+ return err;
+
+ if (val1 != val)
+ return -ENODEV;
+
+ err = ad3552r_write_reg(dac, AD3552R_REG_ADDR_SCRATCH_PAD, val2);
+ if (err < 0)
+ return err;
+
+ err = ad3552r_read_reg(dac, AD3552R_REG_ADDR_SCRATCH_PAD, &val);
+ if (err < 0)
+ return err;
+
+ if (val2 != val)
+ return -ENODEV;
+
+ return 0;
+}
+
+struct reg_addr_pool {
+ struct ad3552r_desc *dac;
+ u8 addr;
+};
+
+static int ad3552r_read_reg_wrapper(struct reg_addr_pool *addr)
+{
+ int err;
+ u16 val;
+
+ err = ad3552r_read_reg(addr->dac, addr->addr, &val);
+ if (err)
+ return err;
+
+ return val;
+}
+
+static int ad3552r_reset(struct ad3552r_desc *dac)
+{
+ struct reg_addr_pool addr;
+ int ret;
+ u16 val;
+
+ dac->gpio_reset = devm_gpiod_get_optional(&dac->spi->dev, "reset",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(dac->gpio_reset))
+ return dev_err_probe(&dac->spi->dev, PTR_ERR(dac->gpio_reset),
+ "Error while getting gpio reset");
+
+ if (dac->gpio_reset) {
+ /* Perform hardware reset */
+ usleep_range(10, 20);
+ gpiod_set_value_cansleep(dac->gpio_reset, 1);
+ } else {
+ /* Perform software reset if no GPIO provided */
+ ret = ad3552r_update_reg_field(dac,
+ AD3552R_REG_ADDR_INTERFACE_CONFIG_A,
+ AD3552R_MASK_SOFTWARE_RESET,
+ AD3552R_MASK_SOFTWARE_RESET);
+ if (ret < 0)
+ return ret;
+
+ }
+
+ addr.dac = dac;
+ addr.addr = AD3552R_REG_ADDR_INTERFACE_CONFIG_B;
+ ret = readx_poll_timeout(ad3552r_read_reg_wrapper, &addr, val,
+ val == AD3552R_DEFAULT_CONFIG_B_VALUE ||
+ val < 0,
+ 5000, 50000);
+ if (val < 0)
+ ret = val;
+ if (ret) {
+ dev_err(&dac->spi->dev, "Error while resetting");
+ return ret;
+ }
+
+ ret = readx_poll_timeout(ad3552r_read_reg_wrapper, &addr, val,
+ !(val & AD3552R_MASK_INTERFACE_NOT_READY) ||
+ val < 0,
+ 5000, 50000);
+ if (val < 0)
+ ret = val;
+ if (ret) {
+ dev_err(&dac->spi->dev, "Error while resetting");
+ return ret;
+ }
+
+ return ad3552r_update_reg_field(dac,
+ addr_mask_map[AD3552R_ADDR_ASCENSION][0],
+ addr_mask_map[AD3552R_ADDR_ASCENSION][1],
+ val);
+}
+
+static void ad3552r_get_custom_range(struct ad3552r_desc *dac, s32 i, s32 *v_min,
+ s32 *v_max)
+{
+ s64 vref, tmp, common, offset, gn, gp;
+ /*
+ * From datasheet formula (In Volts):
+ * Vmin = 2.5 + [(GainN + Offset / 1024) * 2.5 * Rfb * 1.03]
+ * Vmax = 2.5 - [(GainP + Offset / 1024) * 2.5 * Rfb * 1.03]
+ * Calculus are converted to milivolts
+ */
+ vref = 2500;
+ /* 2.5 * 1.03 * 1000 (To mV) */
+ common = 2575 * dac->ch_data[i].rfb;
+ offset = dac->ch_data[i].gain_offset;
+
+ gn = gains_scaling_table[dac->ch_data[i].n];
+ tmp = (1024 * gn + AD3552R_GAIN_SCALE * offset) * common;
+ tmp = div_s64(tmp, 1024 * AD3552R_GAIN_SCALE);
+ *v_max = vref + tmp;
+
+ gp = gains_scaling_table[dac->ch_data[i].p];
+ tmp = (1024 * gp - AD3552R_GAIN_SCALE * offset) * common;
+ tmp = div_s64(tmp, 1024 * AD3552R_GAIN_SCALE);
+ *v_min = vref - tmp;
+}
+
+static void ad3552r_calc_gain_and_offset(struct ad3552r_desc *dac, s32 ch)
+{
+ s32 idx, v_max, v_min, span, rem;
+ s64 tmp;
+
+ if (dac->ch_data[ch].range_override) {
+ ad3552r_get_custom_range(dac, ch, &v_min, &v_max);
+ } else {
+ /* Normal range */
+ idx = dac->ch_data[ch].range;
+ if (dac->chip_id == AD3542R_ID) {
+ v_min = ad3542r_ch_ranges[idx][0];
+ v_max = ad3542r_ch_ranges[idx][1];
+ } else {
+ v_min = ad3552r_ch_ranges[idx][0];
+ v_max = ad3552r_ch_ranges[idx][1];
+ }
+ }
+
+ /*
+ * From datasheet formula:
+ * Vout = Span * (D / 65536) + Vmin
+ * Converted to scale and offset:
+ * Scale = Span / 65536
+ * Offset = 65536 * Vmin / Span
+ *
+ * Reminders are in micros in order to be printed as
+ * IIO_VAL_INT_PLUS_MICRO
+ */
+ span = v_max - v_min;
+ dac->ch_data[ch].scale_int = div_s64_rem(span, 65536, &rem);
+ /* Do operations in microvolts */
+ dac->ch_data[ch].scale_dec = DIV_ROUND_CLOSEST((s64)rem * 1000000,
+ 65536);
+
+ dac->ch_data[ch].offset_int = div_s64_rem(v_min * 65536, span, &rem);
+ tmp = (s64)rem * 1000000;
+ dac->ch_data[ch].offset_dec = div_s64(tmp, span);
+}
+
+static int ad3552r_find_range(u16 id, s32 *vals)
+{
+ int i, len;
+ const s32 (*ranges)[2];
+
+ if (id == AD3542R_ID) {
+ len = ARRAY_SIZE(ad3542r_ch_ranges);
+ ranges = ad3542r_ch_ranges;
+ } else {
+ len = ARRAY_SIZE(ad3552r_ch_ranges);
+ ranges = ad3552r_ch_ranges;
+ }
+
+ for (i = 0; i < len; i++)
+ if (vals[0] == ranges[i][0] * 1000 &&
+ vals[1] == ranges[i][1] * 1000)
+ return i;
+
+ return -EINVAL;
+}
+
+static int ad3552r_configure_custom_gain(struct ad3552r_desc *dac,
+ struct fwnode_handle *child,
+ u32 ch)
+{
+ struct device *dev = &dac->spi->dev;
+ struct fwnode_handle *gain_child;
+ u32 val;
+ int err;
+ u8 addr;
+ u16 reg = 0, offset;
+
+ gain_child = fwnode_get_named_child_node(child,
+ "custom-output-range-config");
+ if (IS_ERR(gain_child)) {
+ dev_err(dev,
+ "mandatory custom-output-range-config property missing\n");
+ return PTR_ERR(gain_child);
+ }
+
+ dac->ch_data[ch].range_override = 1;
+ reg |= ad3552r_field_prep(1, AD3552R_MASK_CH_RANGE_OVERRIDE);
+
+ err = fwnode_property_read_u32(gain_child, "adi,gain-scaling-p", &val);
+ if (err) {
+ dev_err(dev, "mandatory adi,gain-scaling-p property missing\n");
+ goto put_child;
+ }
+ reg |= ad3552r_field_prep(val, AD3552R_MASK_CH_GAIN_SCALING_P);
+ dac->ch_data[ch].p = val;
+
+ err = fwnode_property_read_u32(gain_child, "adi,gain-scaling-n", &val);
+ if (err) {
+ dev_err(dev, "mandatory adi,gain-scaling-n property missing\n");
+ goto put_child;
+ }
+ reg |= ad3552r_field_prep(val, AD3552R_MASK_CH_GAIN_SCALING_N);
+ dac->ch_data[ch].n = val;
+
+ err = fwnode_property_read_u32(gain_child, "adi,rfb-ohms", &val);
+ if (err) {
+ dev_err(dev, "mandatory adi,rfb-ohms property missing\n");
+ goto put_child;
+ }
+ dac->ch_data[ch].rfb = val;
+
+ err = fwnode_property_read_u32(gain_child, "adi,gain-offset", &val);
+ if (err) {
+ dev_err(dev, "mandatory adi,gain-offset property missing\n");
+ goto put_child;
+ }
+ dac->ch_data[ch].gain_offset = val;
+
+ offset = abs((s32)val);
+ reg |= ad3552r_field_prep((offset >> 8), AD3552R_MASK_CH_OFFSET_BIT_8);
+
+ reg |= ad3552r_field_prep((s32)val < 0, AD3552R_MASK_CH_OFFSET_POLARITY);
+ addr = AD3552R_REG_ADDR_CH_GAIN(ch);
+ err = ad3552r_write_reg(dac, addr,
+ offset & AD3552R_MASK_CH_OFFSET_BITS_0_7);
+ if (err) {
+ dev_err(dev, "Error writing register\n");
+ goto put_child;
+ }
+
+ err = ad3552r_write_reg(dac, addr, reg);
+ if (err) {
+ dev_err(dev, "Error writing register\n");
+ goto put_child;
+ }
+
+put_child:
+ fwnode_handle_put(gain_child);
+
+ return err;
+}
+
+static void ad3552r_reg_disable(void *reg)
+{
+ regulator_disable(reg);
+}
+
+static int ad3552r_configure_device(struct ad3552r_desc *dac)
+{
+ struct device *dev = &dac->spi->dev;
+ struct fwnode_handle *child;
+ struct regulator *vref;
+ int err, cnt = 0, voltage, delta = 100000;
+ u32 vals[2], val, ch;
+
+ dac->gpio_ldac = devm_gpiod_get_optional(dev, "ldac", GPIOD_OUT_HIGH);
+ if (IS_ERR(dac->gpio_ldac))
+ return dev_err_probe(dev, PTR_ERR(dac->gpio_ldac),
+ "Error getting gpio ldac");
+
+ vref = devm_regulator_get_optional(dev, "vref");
+ if (IS_ERR(vref)) {
+ if (PTR_ERR(vref) != -ENODEV)
+ return dev_err_probe(dev, PTR_ERR(vref),
+ "Error getting vref");
+
+ if (device_property_read_bool(dev, "adi,vref-out-en"))
+ val = AD3552R_INTERNAL_VREF_PIN_2P5V;
+ else
+ val = AD3552R_INTERNAL_VREF_PIN_FLOATING;
+ } else {
+ err = regulator_enable(vref);
+ if (err) {
+ dev_err(dev, "Failed to enable external vref supply\n");
+ return err;
+ }
+
+ err = devm_add_action_or_reset(dev, ad3552r_reg_disable, vref);
+ if (err) {
+ regulator_disable(vref);
+ return err;
+ }
+
+ voltage = regulator_get_voltage(vref);
+ if (voltage > 2500000 + delta || voltage < 2500000 - delta) {
+ dev_warn(dev, "vref-supply must be 2.5V");
+ return -EINVAL;
+ }
+ val = AD3552R_EXTERNAL_VREF_PIN_INPUT;
+ }
+
+ err = ad3552r_update_reg_field(dac,
+ addr_mask_map[AD3552R_VREF_SELECT][0],
+ addr_mask_map[AD3552R_VREF_SELECT][1],
+ val);
+ if (err)
+ return err;
+
+ err = device_property_read_u32(dev, "adi,sdo-drive-strength", &val);
+ if (!err) {
+ if (val > 3) {
+ dev_err(dev, "adi,sdo-drive-strength must be less than 4\n");
+ return -EINVAL;
+ }
+
+ err = ad3552r_update_reg_field(dac,
+ addr_mask_map[AD3552R_SDO_DRIVE_STRENGTH][0],
+ addr_mask_map[AD3552R_SDO_DRIVE_STRENGTH][1],
+ val);
+ if (err)
+ return err;
+ }
+
+ dac->num_ch = device_get_child_node_count(dev);
+ if (!dac->num_ch) {
+ dev_err(dev, "No channels defined\n");
+ return -ENODEV;
+ }
+
+ device_for_each_child_node(dev, child) {
+ err = fwnode_property_read_u32(child, "reg", &ch);
+ if (err) {
+ dev_err(dev, "mandatory reg property missing\n");
+ goto put_child;
+ }
+ if (ch >= AD3552R_NUM_CH) {
+ dev_err(dev, "reg must be less than %d\n",
+ AD3552R_NUM_CH);
+ err = -EINVAL;
+ goto put_child;
+ }
+
+ if (fwnode_property_present(child, "adi,output-range-microvolt")) {
+ err = fwnode_property_read_u32_array(child,
+ "adi,output-range-microvolt",
+ vals,
+ 2);
+ if (err) {
+ dev_err(dev,
+ "adi,output-range-microvolt property could not be parsed\n");
+ goto put_child;
+ }
+
+ err = ad3552r_find_range(dac->chip_id, vals);
+ if (err < 0) {
+ dev_err(dev,
+ "Invalid adi,output-range-microvolt value\n");
+ goto put_child;
+ }
+ val = err;
+ err = ad3552r_set_ch_value(dac,
+ AD3552R_CH_OUTPUT_RANGE_SEL,
+ ch, val);
+ if (err)
+ goto put_child;
+
+ dac->ch_data[ch].range = val;
+ } else if (dac->chip_id == AD3542R_ID) {
+ dev_err(dev,
+ "adi,output-range-microvolt is required for ad3542r\n");
+ err = -EINVAL;
+ goto put_child;
+ } else {
+ err = ad3552r_configure_custom_gain(dac, child, ch);
+ if (err)
+ goto put_child;
+ }
+
+ ad3552r_calc_gain_and_offset(dac, ch);
+ dac->enabled_ch |= BIT(ch);
+
+ err = ad3552r_set_ch_value(dac, AD3552R_CH_SELECT, ch, 1);
+ if (err < 0)
+ goto put_child;
+
+ dac->channels[cnt] = AD3552R_CH_DAC(ch);
+ ++cnt;
+
+ }
+
+ /* Disable unused channels */
+ for_each_clear_bit(ch, &dac->enabled_ch, AD3552R_NUM_CH) {
+ err = ad3552r_set_ch_value(dac, AD3552R_CH_AMPLIFIER_POWERDOWN,
+ ch, 1);
+ if (err)
+ return err;
+ }
+
+ dac->num_ch = cnt;
+
+ return 0;
+put_child:
+ fwnode_handle_put(child);
+
+ return err;
+}
+
+static int ad3552r_init(struct ad3552r_desc *dac)
+{
+ int err;
+ u16 val, id;
+
+ err = ad3552r_reset(dac);
+ if (err) {
+ dev_err(&dac->spi->dev, "Reset failed\n");
+ return err;
+ }
+
+ err = ad3552r_check_scratch_pad(dac);
+ if (err) {
+ dev_err(&dac->spi->dev, "Scratch pad test failed\n");
+ return err;
+ }
+
+ err = ad3552r_read_reg(dac, AD3552R_REG_ADDR_PRODUCT_ID_L, &val);
+ if (err) {
+ dev_err(&dac->spi->dev, "Fail read PRODUCT_ID_L\n");
+ return err;
+ }
+
+ id = val;
+ err = ad3552r_read_reg(dac, AD3552R_REG_ADDR_PRODUCT_ID_H, &val);
+ if (err) {
+ dev_err(&dac->spi->dev, "Fail read PRODUCT_ID_H\n");
+ return err;
+ }
+
+ id |= val << 8;
+ if (id != dac->chip_id) {
+ dev_err(&dac->spi->dev, "Product id not matching\n");
+ return -ENODEV;
+ }
+
+ return ad3552r_configure_device(dac);
+}
+
+static int ad3552r_probe(struct spi_device *spi)
+{
+ const struct spi_device_id *id = spi_get_device_id(spi);
+ struct ad3552r_desc *dac;
+ struct iio_dev *indio_dev;
+ int err;
+
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*dac));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ dac = iio_priv(indio_dev);
+ dac->spi = spi;
+ dac->chip_id = id->driver_data;
+
+ mutex_init(&dac->lock);
+
+ err = ad3552r_init(dac);
+ if (err)
+ return err;
+
+ /* Config triggered buffer device */
+ if (dac->chip_id == AD3552R_ID)
+ indio_dev->name = "ad3552r";
+ else
+ indio_dev->name = "ad3542r";
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->info = &ad3552r_iio_info;
+ indio_dev->num_channels = dac->num_ch;
+ indio_dev->channels = dac->channels;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ err = devm_iio_triggered_buffer_setup_ext(&indio_dev->dev, indio_dev, NULL,
+ &ad3552r_trigger_handler,
+ IIO_BUFFER_DIRECTION_OUT,
+ NULL,
+ NULL);
+ if (err)
+ return err;
+
+ return devm_iio_device_register(&spi->dev, indio_dev);
+}
+
+static const struct spi_device_id ad3552r_id[] = {
+ { "ad3542r", AD3542R_ID },
+ { "ad3552r", AD3552R_ID },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, ad3552r_id);
+
+static const struct of_device_id ad3552r_of_match[] = {
+ { .compatible = "adi,ad3542r"},
+ { .compatible = "adi,ad3552r"},
+ { }
+};
+MODULE_DEVICE_TABLE(of, ad3552r_of_match);
+
+static struct spi_driver ad3552r_driver = {
+ .driver = {
+ .name = "ad3552r",
+ .of_match_table = ad3552r_of_match,
+ },
+ .probe = ad3552r_probe,
+ .id_table = ad3552r_id
+};
+module_spi_driver(ad3552r_driver);
+
+MODULE_AUTHOR("Mihail Chindris <mihail.chindris@analog.com>");
+MODULE_DESCRIPTION("Analog Device AD3552R DAC");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/dac/ad5064.c b/drivers/iio/dac/ad5064.c
index fd9cac4f6321..27ee2c63c5d4 100644
--- a/drivers/iio/dac/ad5064.c
+++ b/drivers/iio/dac/ad5064.c
@@ -377,7 +377,7 @@ static const struct iio_chan_spec_ext_info ad5064_ext_info[] = {
.shared = IIO_SEPARATE,
},
IIO_ENUM("powerdown_mode", IIO_SEPARATE, &ad5064_powerdown_mode_enum),
- IIO_ENUM_AVAILABLE("powerdown_mode", &ad5064_powerdown_mode_enum),
+ IIO_ENUM_AVAILABLE("powerdown_mode", IIO_SHARED_BY_TYPE, &ad5064_powerdown_mode_enum),
{ },
};
@@ -389,7 +389,7 @@ static const struct iio_chan_spec_ext_info ltc2617_ext_info[] = {
.shared = IIO_SEPARATE,
},
IIO_ENUM("powerdown_mode", IIO_SEPARATE, &ltc2617_powerdown_mode_enum),
- IIO_ENUM_AVAILABLE("powerdown_mode", &ltc2617_powerdown_mode_enum),
+ IIO_ENUM_AVAILABLE("powerdown_mode", IIO_SHARED_BY_TYPE, &ltc2617_powerdown_mode_enum),
{ },
};
diff --git a/drivers/iio/dac/ad5380.c b/drivers/iio/dac/ad5380.c
index 8ca26bb4b62f..e38860a6a9f3 100644
--- a/drivers/iio/dac/ad5380.c
+++ b/drivers/iio/dac/ad5380.c
@@ -249,7 +249,7 @@ static const struct iio_chan_spec_ext_info ad5380_ext_info[] = {
},
IIO_ENUM("powerdown_mode", IIO_SHARED_BY_TYPE,
&ad5380_powerdown_mode_enum),
- IIO_ENUM_AVAILABLE("powerdown_mode", &ad5380_powerdown_mode_enum),
+ IIO_ENUM_AVAILABLE("powerdown_mode", IIO_SHARED_BY_TYPE, &ad5380_powerdown_mode_enum),
{ },
};
diff --git a/drivers/iio/dac/ad5446.c b/drivers/iio/dac/ad5446.c
index 3cc5513a6cbf..1c9b54c012a7 100644
--- a/drivers/iio/dac/ad5446.c
+++ b/drivers/iio/dac/ad5446.c
@@ -142,7 +142,7 @@ static const struct iio_chan_spec_ext_info ad5446_ext_info_powerdown[] = {
.shared = IIO_SEPARATE,
},
IIO_ENUM("powerdown_mode", IIO_SEPARATE, &ad5446_powerdown_mode_enum),
- IIO_ENUM_AVAILABLE("powerdown_mode", &ad5446_powerdown_mode_enum),
+ IIO_ENUM_AVAILABLE("powerdown_mode", IIO_SHARED_BY_TYPE, &ad5446_powerdown_mode_enum),
{ },
};
diff --git a/drivers/iio/dac/ad5504.c b/drivers/iio/dac/ad5504.c
index 19cdf9890d02..b631261efa97 100644
--- a/drivers/iio/dac/ad5504.c
+++ b/drivers/iio/dac/ad5504.c
@@ -241,7 +241,7 @@ static const struct iio_chan_spec_ext_info ad5504_ext_info[] = {
},
IIO_ENUM("powerdown_mode", IIO_SHARED_BY_TYPE,
&ad5504_powerdown_mode_enum),
- IIO_ENUM_AVAILABLE("powerdown_mode", &ad5504_powerdown_mode_enum),
+ IIO_ENUM_AVAILABLE("powerdown_mode", IIO_SHARED_BY_TYPE, &ad5504_powerdown_mode_enum),
{ },
};
diff --git a/drivers/iio/dac/ad5624r_spi.c b/drivers/iio/dac/ad5624r_spi.c
index 530529feebb5..3c98941b9f99 100644
--- a/drivers/iio/dac/ad5624r_spi.c
+++ b/drivers/iio/dac/ad5624r_spi.c
@@ -159,7 +159,7 @@ static const struct iio_chan_spec_ext_info ad5624r_ext_info[] = {
},
IIO_ENUM("powerdown_mode", IIO_SHARED_BY_TYPE,
&ad5624r_powerdown_mode_enum),
- IIO_ENUM_AVAILABLE("powerdown_mode", &ad5624r_powerdown_mode_enum),
+ IIO_ENUM_AVAILABLE("powerdown_mode", IIO_SHARED_BY_TYPE, &ad5624r_powerdown_mode_enum),
{ },
};
diff --git a/drivers/iio/dac/ad5686.c b/drivers/iio/dac/ad5686.c
index 8f001db775f4..e592a995f404 100644
--- a/drivers/iio/dac/ad5686.c
+++ b/drivers/iio/dac/ad5686.c
@@ -184,7 +184,7 @@ static const struct iio_chan_spec_ext_info ad5686_ext_info[] = {
.shared = IIO_SEPARATE,
},
IIO_ENUM("powerdown_mode", IIO_SEPARATE, &ad5686_powerdown_mode_enum),
- IIO_ENUM_AVAILABLE("powerdown_mode", &ad5686_powerdown_mode_enum),
+ IIO_ENUM_AVAILABLE("powerdown_mode", IIO_SHARED_BY_TYPE, &ad5686_powerdown_mode_enum),
{ },
};
diff --git a/drivers/iio/dac/ad5755.c b/drivers/iio/dac/ad5755.c
index cabc38d54085..7a62e6e1d5f1 100644
--- a/drivers/iio/dac/ad5755.c
+++ b/drivers/iio/dac/ad5755.c
@@ -13,10 +13,10 @@
#include <linux/slab.h>
#include <linux/sysfs.h>
#include <linux/delay.h>
-#include <linux/of.h>
+#include <linux/property.h>
+
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
-#include <linux/platform_data/ad5755.h>
#define AD5755_NUM_CHANNELS 4
@@ -63,6 +63,101 @@
#define AD5755_SLEW_RATE_SHIFT 3
#define AD5755_SLEW_ENABLE BIT(12)
+enum ad5755_mode {
+ AD5755_MODE_VOLTAGE_0V_5V = 0,
+ AD5755_MODE_VOLTAGE_0V_10V = 1,
+ AD5755_MODE_VOLTAGE_PLUSMINUS_5V = 2,
+ AD5755_MODE_VOLTAGE_PLUSMINUS_10V = 3,
+ AD5755_MODE_CURRENT_4mA_20mA = 4,
+ AD5755_MODE_CURRENT_0mA_20mA = 5,
+ AD5755_MODE_CURRENT_0mA_24mA = 6,
+};
+
+enum ad5755_dc_dc_phase {
+ AD5755_DC_DC_PHASE_ALL_SAME_EDGE = 0,
+ AD5755_DC_DC_PHASE_A_B_SAME_EDGE_C_D_OPP_EDGE = 1,
+ AD5755_DC_DC_PHASE_A_C_SAME_EDGE_B_D_OPP_EDGE = 2,
+ AD5755_DC_DC_PHASE_90_DEGREE = 3,
+};
+
+enum ad5755_dc_dc_freq {
+ AD5755_DC_DC_FREQ_250kHZ = 0,
+ AD5755_DC_DC_FREQ_410kHZ = 1,
+ AD5755_DC_DC_FREQ_650kHZ = 2,
+};
+
+enum ad5755_dc_dc_maxv {
+ AD5755_DC_DC_MAXV_23V = 0,
+ AD5755_DC_DC_MAXV_24V5 = 1,
+ AD5755_DC_DC_MAXV_27V = 2,
+ AD5755_DC_DC_MAXV_29V5 = 3,
+};
+
+enum ad5755_slew_rate {
+ AD5755_SLEW_RATE_64k = 0,
+ AD5755_SLEW_RATE_32k = 1,
+ AD5755_SLEW_RATE_16k = 2,
+ AD5755_SLEW_RATE_8k = 3,
+ AD5755_SLEW_RATE_4k = 4,
+ AD5755_SLEW_RATE_2k = 5,
+ AD5755_SLEW_RATE_1k = 6,
+ AD5755_SLEW_RATE_500 = 7,
+ AD5755_SLEW_RATE_250 = 8,
+ AD5755_SLEW_RATE_125 = 9,
+ AD5755_SLEW_RATE_64 = 10,
+ AD5755_SLEW_RATE_32 = 11,
+ AD5755_SLEW_RATE_16 = 12,
+ AD5755_SLEW_RATE_8 = 13,
+ AD5755_SLEW_RATE_4 = 14,
+ AD5755_SLEW_RATE_0_5 = 15,
+};
+
+enum ad5755_slew_step_size {
+ AD5755_SLEW_STEP_SIZE_1 = 0,
+ AD5755_SLEW_STEP_SIZE_2 = 1,
+ AD5755_SLEW_STEP_SIZE_4 = 2,
+ AD5755_SLEW_STEP_SIZE_8 = 3,
+ AD5755_SLEW_STEP_SIZE_16 = 4,
+ AD5755_SLEW_STEP_SIZE_32 = 5,
+ AD5755_SLEW_STEP_SIZE_64 = 6,
+ AD5755_SLEW_STEP_SIZE_128 = 7,
+ AD5755_SLEW_STEP_SIZE_256 = 8,
+};
+
+/**
+ * struct ad5755_platform_data - AD5755 DAC driver platform data
+ * @ext_dc_dc_compenstation_resistor: Whether an external DC-DC converter
+ * compensation register is used.
+ * @dc_dc_phase: DC-DC converter phase.
+ * @dc_dc_freq: DC-DC converter frequency.
+ * @dc_dc_maxv: DC-DC maximum allowed boost voltage.
+ * @dac: Per DAC instance parameters.
+ * @dac.mode: The mode to be used for the DAC output.
+ * @dac.ext_current_sense_resistor: Whether an external current sense resistor
+ * is used.
+ * @dac.enable_voltage_overrange: Whether to enable 20% voltage output overrange.
+ * @dac.slew.enable: Whether to enable digital slew.
+ * @dac.slew.rate: Slew rate of the digital slew.
+ * @dac.slew.step_size: Slew step size of the digital slew.
+ **/
+struct ad5755_platform_data {
+ bool ext_dc_dc_compenstation_resistor;
+ enum ad5755_dc_dc_phase dc_dc_phase;
+ enum ad5755_dc_dc_freq dc_dc_freq;
+ enum ad5755_dc_dc_maxv dc_dc_maxv;
+
+ struct {
+ enum ad5755_mode mode;
+ bool ext_current_sense_resistor;
+ bool enable_voltage_overrange;
+ struct {
+ bool enable;
+ enum ad5755_slew_rate rate;
+ enum ad5755_slew_step_size step_size;
+ } slew;
+ } dac[4];
+};
+
/**
* struct ad5755_chip_info - chip specific information
* @channel_template: channel specification
@@ -111,7 +206,6 @@ enum ad5755_type {
ID_AD5737,
};
-#ifdef CONFIG_OF
static const int ad5755_dcdc_freq_table[][2] = {
{ 250000, AD5755_DC_DC_FREQ_250kHZ },
{ 410000, AD5755_DC_DC_FREQ_410kHZ },
@@ -154,7 +248,6 @@ static const int ad5755_slew_step_table[][2] = {
{ 2, AD5755_SLEW_STEP_SIZE_2 },
{ 1, AD5755_SLEW_STEP_SIZE_1 },
};
-#endif
static int ad5755_write_unlocked(struct iio_dev *indio_dev,
unsigned int reg, unsigned int val)
@@ -604,30 +697,29 @@ static const struct ad5755_platform_data ad5755_default_pdata = {
},
};
-#ifdef CONFIG_OF
-static struct ad5755_platform_data *ad5755_parse_dt(struct device *dev)
+static struct ad5755_platform_data *ad5755_parse_fw(struct device *dev)
{
- struct device_node *np = dev->of_node;
- struct device_node *pp;
+ struct fwnode_handle *pp;
struct ad5755_platform_data *pdata;
unsigned int tmp;
unsigned int tmparray[3];
int devnr, i;
+ if (!dev_fwnode(dev))
+ return NULL;
+
pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return NULL;
pdata->ext_dc_dc_compenstation_resistor =
- of_property_read_bool(np, "adi,ext-dc-dc-compenstation-resistor");
+ device_property_read_bool(dev, "adi,ext-dc-dc-compenstation-resistor");
- if (!of_property_read_u32(np, "adi,dc-dc-phase", &tmp))
- pdata->dc_dc_phase = tmp;
- else
- pdata->dc_dc_phase = AD5755_DC_DC_PHASE_ALL_SAME_EDGE;
+ pdata->dc_dc_phase = AD5755_DC_DC_PHASE_ALL_SAME_EDGE;
+ device_property_read_u32(dev, "adi,dc-dc-phase", &pdata->dc_dc_phase);
pdata->dc_dc_freq = AD5755_DC_DC_FREQ_410kHZ;
- if (!of_property_read_u32(np, "adi,dc-dc-freq-hz", &tmp)) {
+ if (!device_property_read_u32(dev, "adi,dc-dc-freq-hz", &tmp)) {
for (i = 0; i < ARRAY_SIZE(ad5755_dcdc_freq_table); i++) {
if (tmp == ad5755_dcdc_freq_table[i][0]) {
pdata->dc_dc_freq = ad5755_dcdc_freq_table[i][1];
@@ -641,7 +733,7 @@ static struct ad5755_platform_data *ad5755_parse_dt(struct device *dev)
}
pdata->dc_dc_maxv = AD5755_DC_DC_MAXV_23V;
- if (!of_property_read_u32(np, "adi,dc-dc-max-microvolt", &tmp)) {
+ if (!device_property_read_u32(dev, "adi,dc-dc-max-microvolt", &tmp)) {
for (i = 0; i < ARRAY_SIZE(ad5755_dcdc_maxv_table); i++) {
if (tmp == ad5755_dcdc_maxv_table[i][0]) {
pdata->dc_dc_maxv = ad5755_dcdc_maxv_table[i][1];
@@ -654,25 +746,23 @@ static struct ad5755_platform_data *ad5755_parse_dt(struct device *dev)
}
devnr = 0;
- for_each_child_of_node(np, pp) {
+ device_for_each_child_node(dev, pp) {
if (devnr >= AD5755_NUM_CHANNELS) {
dev_err(dev,
"There are too many channels defined in DT\n");
goto error_out;
}
- if (!of_property_read_u32(pp, "adi,mode", &tmp))
- pdata->dac[devnr].mode = tmp;
- else
- pdata->dac[devnr].mode = AD5755_MODE_CURRENT_4mA_20mA;
+ pdata->dac[devnr].mode = AD5755_MODE_CURRENT_4mA_20mA;
+ fwnode_property_read_u32(pp, "adi,mode", &pdata->dac[devnr].mode);
pdata->dac[devnr].ext_current_sense_resistor =
- of_property_read_bool(pp, "adi,ext-current-sense-resistor");
+ fwnode_property_read_bool(pp, "adi,ext-current-sense-resistor");
pdata->dac[devnr].enable_voltage_overrange =
- of_property_read_bool(pp, "adi,enable-voltage-overrange");
+ fwnode_property_read_bool(pp, "adi,enable-voltage-overrange");
- if (!of_property_read_u32_array(pp, "adi,slew", tmparray, 3)) {
+ if (!fwnode_property_read_u32_array(pp, "adi,slew", tmparray, 3)) {
pdata->dac[devnr].slew.enable = tmparray[0];
pdata->dac[devnr].slew.rate = AD5755_SLEW_RATE_64k;
@@ -715,18 +805,11 @@ static struct ad5755_platform_data *ad5755_parse_dt(struct device *dev)
devm_kfree(dev, pdata);
return NULL;
}
-#else
-static
-struct ad5755_platform_data *ad5755_parse_dt(struct device *dev)
-{
- return NULL;
-}
-#endif
static int ad5755_probe(struct spi_device *spi)
{
enum ad5755_type type = spi_get_device_id(spi)->driver_data;
- const struct ad5755_platform_data *pdata = dev_get_platdata(&spi->dev);
+ const struct ad5755_platform_data *pdata;
struct iio_dev *indio_dev;
struct ad5755_state *st;
int ret;
@@ -751,13 +834,10 @@ static int ad5755_probe(struct spi_device *spi)
mutex_init(&st->lock);
- if (spi->dev.of_node)
- pdata = ad5755_parse_dt(&spi->dev);
- else
- pdata = spi->dev.platform_data;
+ pdata = ad5755_parse_fw(&spi->dev);
if (!pdata) {
- dev_warn(&spi->dev, "no platform data? using default\n");
+ dev_warn(&spi->dev, "no firmware provided parameters? using default\n");
pdata = &ad5755_default_pdata;
}
diff --git a/drivers/iio/dac/ad5758.c b/drivers/iio/dac/ad5758.c
index 0572ef518101..98771e37a7b5 100644
--- a/drivers/iio/dac/ad5758.c
+++ b/drivers/iio/dac/ad5758.c
@@ -10,9 +10,8 @@
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/property.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/spi/spi.h>
#include <linux/gpio/consumer.h>
diff --git a/drivers/iio/dac/ad5766.c b/drivers/iio/dac/ad5766.c
index b0d220c3a126..43189af2fb1f 100644
--- a/drivers/iio/dac/ad5766.c
+++ b/drivers/iio/dac/ad5766.c
@@ -426,14 +426,6 @@ static ssize_t ad5766_write_ext(struct iio_dev *indio_dev,
.shared = _shared, \
}
-#define IIO_ENUM_AVAILABLE_SHARED(_name, _shared, _e) \
-{ \
- .name = (_name "_available"), \
- .shared = _shared, \
- .read = iio_enum_available_read, \
- .private = (uintptr_t)(_e), \
-}
-
static const struct iio_chan_spec_ext_info ad5766_ext_info[] = {
_AD5766_CHAN_EXT_INFO("dither_enable", AD5766_DITHER_ENABLE,
@@ -443,9 +435,8 @@ static const struct iio_chan_spec_ext_info ad5766_ext_info[] = {
_AD5766_CHAN_EXT_INFO("dither_source", AD5766_DITHER_SOURCE,
IIO_SEPARATE),
IIO_ENUM("dither_scale", IIO_SEPARATE, &ad5766_dither_scale_enum),
- IIO_ENUM_AVAILABLE_SHARED("dither_scale",
- IIO_SEPARATE,
- &ad5766_dither_scale_enum),
+ IIO_ENUM_AVAILABLE("dither_scale", IIO_SEPARATE,
+ &ad5766_dither_scale_enum),
{}
};
diff --git a/drivers/iio/dac/ad5791.c b/drivers/iio/dac/ad5791.c
index a0923b76e8b6..7b4579d73d18 100644
--- a/drivers/iio/dac/ad5791.c
+++ b/drivers/iio/dac/ad5791.c
@@ -285,7 +285,7 @@ static const struct iio_chan_spec_ext_info ad5791_ext_info[] = {
},
IIO_ENUM("powerdown_mode", IIO_SHARED_BY_TYPE,
&ad5791_powerdown_mode_enum),
- IIO_ENUM_AVAILABLE("powerdown_mode", &ad5791_powerdown_mode_enum),
+ IIO_ENUM_AVAILABLE("powerdown_mode", IIO_SHARED_BY_TYPE, &ad5791_powerdown_mode_enum),
{ },
};
diff --git a/drivers/iio/dac/ad7293.c b/drivers/iio/dac/ad7293.c
new file mode 100644
index 000000000000..59a38ca4c3c7
--- /dev/null
+++ b/drivers/iio/dac/ad7293.c
@@ -0,0 +1,934 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * AD7293 driver
+ *
+ * Copyright 2021 Analog Devices Inc.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/gpio/consumer.h>
+#include <linux/iio/iio.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spi/spi.h>
+
+#include <asm/unaligned.h>
+
+#define AD7293_R1B BIT(16)
+#define AD7293_R2B BIT(17)
+#define AD7293_PAGE_ADDR_MSK GENMASK(15, 8)
+#define AD7293_PAGE(x) FIELD_PREP(AD7293_PAGE_ADDR_MSK, x)
+
+/* AD7293 Register Map Common */
+#define AD7293_REG_NO_OP (AD7293_R1B | AD7293_PAGE(0x0) | 0x0)
+#define AD7293_REG_PAGE_SELECT (AD7293_R1B | AD7293_PAGE(0x0) | 0x1)
+#define AD7293_REG_CONV_CMD (AD7293_R2B | AD7293_PAGE(0x0) | 0x2)
+#define AD7293_REG_RESULT (AD7293_R1B | AD7293_PAGE(0x0) | 0x3)
+#define AD7293_REG_DAC_EN (AD7293_R1B | AD7293_PAGE(0x0) | 0x4)
+#define AD7293_REG_DEVICE_ID (AD7293_R2B | AD7293_PAGE(0x0) | 0xC)
+#define AD7293_REG_SOFT_RESET (AD7293_R2B | AD7293_PAGE(0x0) | 0xF)
+
+/* AD7293 Register Map Page 0x0 */
+#define AD7293_REG_VIN0 (AD7293_R2B | AD7293_PAGE(0x0) | 0x10)
+#define AD7293_REG_VIN1 (AD7293_R2B | AD7293_PAGE(0x0) | 0x11)
+#define AD7293_REG_VIN2 (AD7293_R2B | AD7293_PAGE(0x0) | 0x12)
+#define AD7293_REG_VIN3 (AD7293_R2B | AD7293_PAGE(0x0) | 0x13)
+#define AD7293_REG_TSENSE_INT (AD7293_R2B | AD7293_PAGE(0x0) | 0x20)
+#define AD7293_REG_TSENSE_D0 (AD7293_R2B | AD7293_PAGE(0x0) | 0x21)
+#define AD7293_REG_TSENSE_D1 (AD7293_R2B | AD7293_PAGE(0x0) | 0x22)
+#define AD7293_REG_ISENSE_0 (AD7293_R2B | AD7293_PAGE(0x0) | 0x28)
+#define AD7293_REG_ISENSE_1 (AD7293_R2B | AD7293_PAGE(0x0) | 0x29)
+#define AD7293_REG_ISENSE_2 (AD7293_R2B | AD7293_PAGE(0x0) | 0x2A)
+#define AD7293_REG_ISENSE_3 (AD7293_R2B | AD7293_PAGE(0x0) | 0x2B)
+#define AD7293_REG_UNI_VOUT0 (AD7293_R2B | AD7293_PAGE(0x0) | 0x30)
+#define AD7293_REG_UNI_VOUT1 (AD7293_R2B | AD7293_PAGE(0x0) | 0x31)
+#define AD7293_REG_UNI_VOUT2 (AD7293_R2B | AD7293_PAGE(0x0) | 0x32)
+#define AD7293_REG_UNI_VOUT3 (AD7293_R2B | AD7293_PAGE(0x0) | 0x33)
+#define AD7293_REG_BI_VOUT0 (AD7293_R2B | AD7293_PAGE(0x0) | 0x34)
+#define AD7293_REG_BI_VOUT1 (AD7293_R2B | AD7293_PAGE(0x0) | 0x35)
+#define AD7293_REG_BI_VOUT2 (AD7293_R2B | AD7293_PAGE(0x0) | 0x36)
+#define AD7293_REG_BI_VOUT3 (AD7293_R2B | AD7293_PAGE(0x0) | 0x37)
+
+/* AD7293 Register Map Page 0x2 */
+#define AD7293_REG_DIGITAL_OUT_EN (AD7293_R2B | AD7293_PAGE(0x2) | 0x11)
+#define AD7293_REG_DIGITAL_INOUT_FUNC (AD7293_R2B | AD7293_PAGE(0x2) | 0x12)
+#define AD7293_REG_DIGITAL_FUNC_POL (AD7293_R2B | AD7293_PAGE(0x2) | 0x13)
+#define AD7293_REG_GENERAL (AD7293_R2B | AD7293_PAGE(0x2) | 0x14)
+#define AD7293_REG_VINX_RANGE0 (AD7293_R2B | AD7293_PAGE(0x2) | 0x15)
+#define AD7293_REG_VINX_RANGE1 (AD7293_R2B | AD7293_PAGE(0x2) | 0x16)
+#define AD7293_REG_VINX_DIFF_SE (AD7293_R2B | AD7293_PAGE(0x2) | 0x17)
+#define AD7293_REG_VINX_FILTER (AD7293_R2B | AD7293_PAGE(0x2) | 0x18)
+#define AD7293_REG_BG_EN (AD7293_R2B | AD7293_PAGE(0x2) | 0x19)
+#define AD7293_REG_CONV_DELAY (AD7293_R2B | AD7293_PAGE(0x2) | 0x1A)
+#define AD7293_REG_TSENSE_BG_EN (AD7293_R2B | AD7293_PAGE(0x2) | 0x1B)
+#define AD7293_REG_ISENSE_BG_EN (AD7293_R2B | AD7293_PAGE(0x2) | 0x1C)
+#define AD7293_REG_ISENSE_GAIN (AD7293_R2B | AD7293_PAGE(0x2) | 0x1D)
+#define AD7293_REG_DAC_SNOOZE_O (AD7293_R2B | AD7293_PAGE(0x2) | 0x1F)
+#define AD7293_REG_DAC_SNOOZE_1 (AD7293_R2B | AD7293_PAGE(0x2) | 0x20)
+#define AD7293_REG_RSX_MON_BG_EN (AD7293_R2B | AD7293_PAGE(0x2) | 0x23)
+#define AD7293_REG_INTEGR_CL (AD7293_R2B | AD7293_PAGE(0x2) | 0x28)
+#define AD7293_REG_PA_ON_CTRL (AD7293_R2B | AD7293_PAGE(0x2) | 0x29)
+#define AD7293_REG_RAMP_TIME_0 (AD7293_R2B | AD7293_PAGE(0x2) | 0x2A)
+#define AD7293_REG_RAMP_TIME_1 (AD7293_R2B | AD7293_PAGE(0x2) | 0x2B)
+#define AD7293_REG_RAMP_TIME_2 (AD7293_R2B | AD7293_PAGE(0x2) | 0x2C)
+#define AD7293_REG_RAMP_TIME_3 (AD7293_R2B | AD7293_PAGE(0x2) | 0x2D)
+#define AD7293_REG_CL_FR_IT (AD7293_R2B | AD7293_PAGE(0x2) | 0x2E)
+#define AD7293_REG_INTX_AVSS_AVDD (AD7293_R2B | AD7293_PAGE(0x2) | 0x2F)
+
+/* AD7293 Register Map Page 0x3 */
+#define AD7293_REG_VINX_SEQ (AD7293_R2B | AD7293_PAGE(0x3) | 0x10)
+#define AD7293_REG_ISENSEX_TSENSEX_SEQ (AD7293_R2B | AD7293_PAGE(0x3) | 0x11)
+#define AD7293_REG_RSX_MON_BI_VOUTX_SEQ (AD7293_R2B | AD7293_PAGE(0x3) | 0x12)
+
+/* AD7293 Register Map Page 0xE */
+#define AD7293_REG_VIN0_OFFSET (AD7293_R1B | AD7293_PAGE(0xE) | 0x10)
+#define AD7293_REG_VIN1_OFFSET (AD7293_R1B | AD7293_PAGE(0xE) | 0x11)
+#define AD7293_REG_VIN2_OFFSET (AD7293_R1B | AD7293_PAGE(0xE) | 0x12)
+#define AD7293_REG_VIN3_OFFSET (AD7293_R1B | AD7293_PAGE(0xE) | 0x13)
+#define AD7293_REG_TSENSE_INT_OFFSET (AD7293_R1B | AD7293_PAGE(0xE) | 0x20)
+#define AD7293_REG_TSENSE_D0_OFFSET (AD7293_R1B | AD7293_PAGE(0xE) | 0x21)
+#define AD7293_REG_TSENSE_D1_OFFSET (AD7293_R1B | AD7293_PAGE(0xE) | 0x22)
+#define AD7293_REG_ISENSE0_OFFSET (AD7293_R1B | AD7293_PAGE(0xE) | 0x28)
+#define AD7293_REG_ISENSE1_OFFSET (AD7293_R1B | AD7293_PAGE(0xE) | 0x29)
+#define AD7293_REG_ISENSE2_OFFSET (AD7293_R1B | AD7293_PAGE(0xE) | 0x2A)
+#define AD7293_REG_ISENSE3_OFFSET (AD7293_R1B | AD7293_PAGE(0xE) | 0x2B)
+#define AD7293_REG_UNI_VOUT0_OFFSET (AD7293_R1B | AD7293_PAGE(0xE) | 0x30)
+#define AD7293_REG_UNI_VOUT1_OFFSET (AD7293_R1B | AD7293_PAGE(0xE) | 0x31)
+#define AD7293_REG_UNI_VOUT2_OFFSET (AD7293_R1B | AD7293_PAGE(0xE) | 0x32)
+#define AD7293_REG_UNI_VOUT3_OFFSET (AD7293_R1B | AD7293_PAGE(0xE) | 0x33)
+#define AD7293_REG_BI_VOUT0_OFFSET (AD7293_R1B | AD7293_PAGE(0xE) | 0x34)
+#define AD7293_REG_BI_VOUT1_OFFSET (AD7293_R1B | AD7293_PAGE(0xE) | 0x35)
+#define AD7293_REG_BI_VOUT2_OFFSET (AD7293_R1B | AD7293_PAGE(0xE) | 0x36)
+#define AD7293_REG_BI_VOUT3_OFFSET (AD7293_R1B | AD7293_PAGE(0xE) | 0x37)
+
+/* AD7293 Miscellaneous Definitions */
+#define AD7293_READ BIT(7)
+#define AD7293_TRANSF_LEN_MSK GENMASK(17, 16)
+
+#define AD7293_REG_ADDR_MSK GENMASK(7, 0)
+#define AD7293_REG_VOUT_OFFSET_MSK GENMASK(5, 4)
+#define AD7293_REG_DATA_RAW_MSK GENMASK(15, 4)
+#define AD7293_REG_VINX_RANGE_GET_CH_MSK(x, ch) (((x) >> (ch)) & 0x1)
+#define AD7293_REG_VINX_RANGE_SET_CH_MSK(x, ch) (((x) & 0x1) << (ch))
+#define AD7293_CHIP_ID 0x18
+
+enum ad7293_ch_type {
+ AD7293_ADC_VINX,
+ AD7293_ADC_TSENSE,
+ AD7293_ADC_ISENSE,
+ AD7293_DAC,
+};
+
+enum ad7293_max_offset {
+ AD7293_TSENSE_MIN_OFFSET_CH = 4,
+ AD7293_ISENSE_MIN_OFFSET_CH = 7,
+ AD7293_VOUT_MIN_OFFSET_CH = 11,
+ AD7293_VOUT_MAX_OFFSET_CH = 18,
+};
+
+static const int dac_offset_table[] = {0, 1, 2};
+
+static const int isense_gain_table[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
+
+static const int adc_range_table[] = {0, 1, 2, 3};
+
+struct ad7293_state {
+ struct spi_device *spi;
+ /* Protect against concurrent accesses to the device, page selection and data content */
+ struct mutex lock;
+ struct gpio_desc *gpio_reset;
+ struct regulator *reg_avdd;
+ struct regulator *reg_vdrive;
+ u8 page_select;
+ u8 data[3] ____cacheline_aligned;
+};
+
+static int ad7293_page_select(struct ad7293_state *st, unsigned int reg)
+{
+ int ret;
+
+ if (st->page_select != FIELD_GET(AD7293_PAGE_ADDR_MSK, reg)) {
+ st->data[0] = FIELD_GET(AD7293_REG_ADDR_MSK, AD7293_REG_PAGE_SELECT);
+ st->data[1] = FIELD_GET(AD7293_PAGE_ADDR_MSK, reg);
+
+ ret = spi_write(st->spi, &st->data[0], 2);
+ if (ret)
+ return ret;
+
+ st->page_select = FIELD_GET(AD7293_PAGE_ADDR_MSK, reg);
+ }
+
+ return 0;
+}
+
+static int __ad7293_spi_read(struct ad7293_state *st, unsigned int reg,
+ u16 *val)
+{
+ int ret;
+ unsigned int length;
+ struct spi_transfer t = {0};
+
+ length = FIELD_GET(AD7293_TRANSF_LEN_MSK, reg);
+
+ ret = ad7293_page_select(st, reg);
+ if (ret)
+ return ret;
+
+ st->data[0] = AD7293_READ | FIELD_GET(AD7293_REG_ADDR_MSK, reg);
+ st->data[1] = 0x0;
+ st->data[2] = 0x0;
+
+ t.tx_buf = &st->data[0];
+ t.rx_buf = &st->data[0];
+ t.len = length + 1;
+
+ ret = spi_sync_transfer(st->spi, &t, 1);
+ if (ret)
+ return ret;
+
+ if (length == 1)
+ *val = st->data[1];
+ else
+ *val = get_unaligned_be16(&st->data[1]);
+
+ return 0;
+}
+
+static int ad7293_spi_read(struct ad7293_state *st, unsigned int reg,
+ u16 *val)
+{
+ int ret;
+
+ mutex_lock(&st->lock);
+ ret = __ad7293_spi_read(st, reg, val);
+ mutex_unlock(&st->lock);
+
+ return ret;
+}
+
+static int __ad7293_spi_write(struct ad7293_state *st, unsigned int reg,
+ u16 val)
+{
+ int ret;
+ unsigned int length;
+
+ length = FIELD_GET(AD7293_TRANSF_LEN_MSK, reg);
+
+ ret = ad7293_page_select(st, reg);
+ if (ret)
+ return ret;
+
+ st->data[0] = FIELD_GET(AD7293_REG_ADDR_MSK, reg);
+
+ if (length == 1)
+ st->data[1] = val;
+ else
+ put_unaligned_be16(val, &st->data[1]);
+
+ return spi_write(st->spi, &st->data[0], length + 1);
+}
+
+static int ad7293_spi_write(struct ad7293_state *st, unsigned int reg,
+ u16 val)
+{
+ int ret;
+
+ mutex_lock(&st->lock);
+ ret = __ad7293_spi_write(st, reg, val);
+ mutex_unlock(&st->lock);
+
+ return ret;
+}
+
+static int __ad7293_spi_update_bits(struct ad7293_state *st, unsigned int reg,
+ u16 mask, u16 val)
+{
+ int ret;
+ u16 data, temp;
+
+ ret = __ad7293_spi_read(st, reg, &data);
+ if (ret)
+ return ret;
+
+ temp = (data & ~mask) | (val & mask);
+
+ return __ad7293_spi_write(st, reg, temp);
+}
+
+static int ad7293_spi_update_bits(struct ad7293_state *st, unsigned int reg,
+ u16 mask, u16 val)
+{
+ int ret;
+
+ mutex_lock(&st->lock);
+ ret = __ad7293_spi_update_bits(st, reg, mask, val);
+ mutex_unlock(&st->lock);
+
+ return ret;
+}
+
+static int ad7293_adc_get_scale(struct ad7293_state *st, unsigned int ch,
+ u16 *range)
+{
+ int ret;
+ u16 data;
+
+ mutex_lock(&st->lock);
+
+ ret = __ad7293_spi_read(st, AD7293_REG_VINX_RANGE1, &data);
+ if (ret)
+ goto exit;
+
+ *range = AD7293_REG_VINX_RANGE_GET_CH_MSK(data, ch);
+
+ ret = __ad7293_spi_read(st, AD7293_REG_VINX_RANGE0, &data);
+ if (ret)
+ goto exit;
+
+ *range |= AD7293_REG_VINX_RANGE_GET_CH_MSK(data, ch) << 1;
+
+exit:
+ mutex_unlock(&st->lock);
+
+ return ret;
+}
+
+static int ad7293_adc_set_scale(struct ad7293_state *st, unsigned int ch,
+ u16 range)
+{
+ int ret;
+ unsigned int ch_msk = BIT(ch);
+
+ mutex_lock(&st->lock);
+ ret = __ad7293_spi_update_bits(st, AD7293_REG_VINX_RANGE1, ch_msk,
+ AD7293_REG_VINX_RANGE_SET_CH_MSK(range, ch));
+ if (ret)
+ goto exit;
+
+ ret = __ad7293_spi_update_bits(st, AD7293_REG_VINX_RANGE0, ch_msk,
+ AD7293_REG_VINX_RANGE_SET_CH_MSK((range >> 1), ch));
+
+exit:
+ mutex_unlock(&st->lock);
+
+ return ret;
+}
+
+static int ad7293_get_offset(struct ad7293_state *st, unsigned int ch,
+ u16 *offset)
+{
+ if (ch < AD7293_TSENSE_MIN_OFFSET_CH)
+ return ad7293_spi_read(st, AD7293_REG_VIN0_OFFSET + ch, offset);
+ else if (ch < AD7293_ISENSE_MIN_OFFSET_CH)
+ return ad7293_spi_read(st, AD7293_REG_TSENSE_INT_OFFSET + (ch - 4), offset);
+ else if (ch < AD7293_VOUT_MIN_OFFSET_CH)
+ return ad7293_spi_read(st, AD7293_REG_ISENSE0_OFFSET + (ch - 7), offset);
+ else if (ch <= AD7293_VOUT_MAX_OFFSET_CH)
+ return ad7293_spi_read(st, AD7293_REG_UNI_VOUT0_OFFSET + (ch - 11), offset);
+
+ return -EINVAL;
+}
+
+static int ad7293_set_offset(struct ad7293_state *st, unsigned int ch,
+ u16 offset)
+{
+ if (ch < AD7293_TSENSE_MIN_OFFSET_CH)
+ return ad7293_spi_write(st, AD7293_REG_VIN0_OFFSET + ch,
+ offset);
+ else if (ch < AD7293_ISENSE_MIN_OFFSET_CH)
+ return ad7293_spi_write(st,
+ AD7293_REG_TSENSE_INT_OFFSET +
+ (ch - AD7293_TSENSE_MIN_OFFSET_CH),
+ offset);
+ else if (ch < AD7293_VOUT_MIN_OFFSET_CH)
+ return ad7293_spi_write(st,
+ AD7293_REG_ISENSE0_OFFSET +
+ (ch - AD7293_ISENSE_MIN_OFFSET_CH),
+ offset);
+ else if (ch <= AD7293_VOUT_MAX_OFFSET_CH)
+ return ad7293_spi_update_bits(st,
+ AD7293_REG_UNI_VOUT0_OFFSET +
+ (ch - AD7293_VOUT_MIN_OFFSET_CH),
+ AD7293_REG_VOUT_OFFSET_MSK,
+ FIELD_PREP(AD7293_REG_VOUT_OFFSET_MSK, offset));
+
+ return -EINVAL;
+}
+
+static int ad7293_isense_set_scale(struct ad7293_state *st, unsigned int ch,
+ u16 gain)
+{
+ unsigned int ch_msk = (0xf << (4 * ch));
+
+ return ad7293_spi_update_bits(st, AD7293_REG_ISENSE_GAIN, ch_msk,
+ gain << (4 * ch));
+}
+
+static int ad7293_isense_get_scale(struct ad7293_state *st, unsigned int ch,
+ u16 *gain)
+{
+ int ret;
+
+ ret = ad7293_spi_read(st, AD7293_REG_ISENSE_GAIN, gain);
+ if (ret)
+ return ret;
+
+ *gain = (*gain >> (4 * ch)) & 0xf;
+
+ return ret;
+}
+
+static int ad7293_dac_write_raw(struct ad7293_state *st, unsigned int ch,
+ u16 raw)
+{
+ int ret;
+
+ mutex_lock(&st->lock);
+
+ ret = __ad7293_spi_update_bits(st, AD7293_REG_DAC_EN, BIT(ch), BIT(ch));
+ if (ret)
+ goto exit;
+
+ ret = __ad7293_spi_write(st, AD7293_REG_UNI_VOUT0 + ch,
+ FIELD_PREP(AD7293_REG_DATA_RAW_MSK, raw));
+
+exit:
+ mutex_unlock(&st->lock);
+
+ return ret;
+}
+
+static int ad7293_ch_read_raw(struct ad7293_state *st, enum ad7293_ch_type type,
+ unsigned int ch, u16 *raw)
+{
+ int ret;
+ unsigned int reg_wr, reg_rd, data_wr;
+
+ switch (type) {
+ case AD7293_ADC_VINX:
+ reg_wr = AD7293_REG_VINX_SEQ;
+ reg_rd = AD7293_REG_VIN0 + ch;
+ data_wr = BIT(ch);
+
+ break;
+ case AD7293_ADC_TSENSE:
+ reg_wr = AD7293_REG_ISENSEX_TSENSEX_SEQ;
+ reg_rd = AD7293_REG_TSENSE_INT + ch;
+ data_wr = BIT(ch);
+
+ break;
+ case AD7293_ADC_ISENSE:
+ reg_wr = AD7293_REG_ISENSEX_TSENSEX_SEQ;
+ reg_rd = AD7293_REG_ISENSE_0 + ch;
+ data_wr = BIT(ch) << 8;
+
+ break;
+ case AD7293_DAC:
+ reg_rd = AD7293_REG_UNI_VOUT0 + ch;
+
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ mutex_lock(&st->lock);
+
+ if (type != AD7293_DAC) {
+ if (type == AD7293_ADC_TSENSE) {
+ ret = __ad7293_spi_write(st, AD7293_REG_TSENSE_BG_EN,
+ BIT(ch));
+ if (ret)
+ goto exit;
+
+ usleep_range(9000, 9900);
+ } else if (type == AD7293_ADC_ISENSE) {
+ ret = __ad7293_spi_write(st, AD7293_REG_ISENSE_BG_EN,
+ BIT(ch));
+ if (ret)
+ goto exit;
+
+ usleep_range(2000, 7000);
+ }
+
+ ret = __ad7293_spi_write(st, reg_wr, data_wr);
+ if (ret)
+ goto exit;
+
+ ret = __ad7293_spi_write(st, AD7293_REG_CONV_CMD, 0x82);
+ if (ret)
+ goto exit;
+ }
+
+ ret = __ad7293_spi_read(st, reg_rd, raw);
+
+ *raw = FIELD_GET(AD7293_REG_DATA_RAW_MSK, *raw);
+
+exit:
+ mutex_unlock(&st->lock);
+
+ return ret;
+}
+
+static int ad7293_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long info)
+{
+ struct ad7293_state *st = iio_priv(indio_dev);
+ int ret;
+ u16 data;
+
+ switch (info) {
+ case IIO_CHAN_INFO_RAW:
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ if (chan->output)
+ ret = ad7293_ch_read_raw(st, AD7293_DAC,
+ chan->channel, &data);
+ else
+ ret = ad7293_ch_read_raw(st, AD7293_ADC_VINX,
+ chan->channel, &data);
+
+ break;
+ case IIO_CURRENT:
+ ret = ad7293_ch_read_raw(st, AD7293_ADC_ISENSE,
+ chan->channel, &data);
+
+ break;
+ case IIO_TEMP:
+ ret = ad7293_ch_read_raw(st, AD7293_ADC_TSENSE,
+ chan->channel, &data);
+
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (ret)
+ return ret;
+
+ *val = data;
+
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_OFFSET:
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ if (chan->output) {
+ ret = ad7293_get_offset(st,
+ chan->channel + AD7293_VOUT_MIN_OFFSET_CH,
+ &data);
+
+ data = FIELD_GET(AD7293_REG_VOUT_OFFSET_MSK, data);
+ } else {
+ ret = ad7293_get_offset(st, chan->channel, &data);
+ }
+
+ break;
+ case IIO_CURRENT:
+ ret = ad7293_get_offset(st,
+ chan->channel + AD7293_ISENSE_MIN_OFFSET_CH,
+ &data);
+
+ break;
+ case IIO_TEMP:
+ ret = ad7293_get_offset(st,
+ chan->channel + AD7293_TSENSE_MIN_OFFSET_CH,
+ &data);
+
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (ret)
+ return ret;
+
+ *val = data;
+
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ ret = ad7293_adc_get_scale(st, chan->channel, &data);
+ if (ret)
+ return ret;
+
+ *val = data;
+
+ return IIO_VAL_INT;
+ case IIO_CURRENT:
+ ret = ad7293_isense_get_scale(st, chan->channel, &data);
+ if (ret)
+ return ret;
+
+ *val = data;
+
+ return IIO_VAL_INT;
+ case IIO_TEMP:
+ *val = 1;
+ *val2 = 8;
+
+ return IIO_VAL_FRACTIONAL;
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ad7293_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long info)
+{
+ struct ad7293_state *st = iio_priv(indio_dev);
+
+ switch (info) {
+ case IIO_CHAN_INFO_RAW:
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ if (!chan->output)
+ return -EINVAL;
+
+ return ad7293_dac_write_raw(st, chan->channel, val);
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_OFFSET:
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ if (chan->output)
+ return ad7293_set_offset(st,
+ chan->channel +
+ AD7293_VOUT_MIN_OFFSET_CH,
+ val);
+ else
+ return ad7293_set_offset(st, chan->channel, val);
+ case IIO_CURRENT:
+ return ad7293_set_offset(st,
+ chan->channel +
+ AD7293_ISENSE_MIN_OFFSET_CH,
+ val);
+ case IIO_TEMP:
+ return ad7293_set_offset(st,
+ chan->channel +
+ AD7293_TSENSE_MIN_OFFSET_CH,
+ val);
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ return ad7293_adc_set_scale(st, chan->channel, val);
+ case IIO_CURRENT:
+ return ad7293_isense_set_scale(st, chan->channel, val);
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ad7293_reg_access(struct iio_dev *indio_dev,
+ unsigned int reg,
+ unsigned int write_val,
+ unsigned int *read_val)
+{
+ struct ad7293_state *st = iio_priv(indio_dev);
+ int ret;
+
+ if (read_val) {
+ u16 temp;
+ ret = ad7293_spi_read(st, reg, &temp);
+ *read_val = temp;
+ } else {
+ ret = ad7293_spi_write(st, reg, (u16)write_val);
+ }
+
+ return ret;
+}
+
+static int ad7293_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals, int *type, int *length,
+ long info)
+{
+ switch (info) {
+ case IIO_CHAN_INFO_OFFSET:
+ *vals = dac_offset_table;
+ *type = IIO_VAL_INT;
+ *length = ARRAY_SIZE(dac_offset_table);
+
+ return IIO_AVAIL_LIST;
+ case IIO_CHAN_INFO_SCALE:
+ *type = IIO_VAL_INT;
+
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ *vals = adc_range_table;
+ *length = ARRAY_SIZE(adc_range_table);
+ return IIO_AVAIL_LIST;
+ case IIO_CURRENT:
+ *vals = isense_gain_table;
+ *length = ARRAY_SIZE(isense_gain_table);
+ return IIO_AVAIL_LIST;
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+#define AD7293_CHAN_ADC(_channel) { \
+ .type = IIO_VOLTAGE, \
+ .output = 0, \
+ .indexed = 1, \
+ .channel = _channel, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_OFFSET), \
+ .info_mask_shared_by_type_available = BIT(IIO_CHAN_INFO_SCALE) \
+}
+
+#define AD7293_CHAN_DAC(_channel) { \
+ .type = IIO_VOLTAGE, \
+ .output = 1, \
+ .indexed = 1, \
+ .channel = _channel, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_OFFSET), \
+ .info_mask_shared_by_type_available = BIT(IIO_CHAN_INFO_OFFSET) \
+}
+
+#define AD7293_CHAN_ISENSE(_channel) { \
+ .type = IIO_CURRENT, \
+ .output = 0, \
+ .indexed = 1, \
+ .channel = _channel, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_OFFSET) | \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_shared_by_type_available = BIT(IIO_CHAN_INFO_SCALE) \
+}
+
+#define AD7293_CHAN_TEMP(_channel) { \
+ .type = IIO_TEMP, \
+ .output = 0, \
+ .indexed = 1, \
+ .channel = _channel, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_OFFSET), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) \
+}
+
+static const struct iio_chan_spec ad7293_channels[] = {
+ AD7293_CHAN_ADC(0),
+ AD7293_CHAN_ADC(1),
+ AD7293_CHAN_ADC(2),
+ AD7293_CHAN_ADC(3),
+ AD7293_CHAN_ISENSE(0),
+ AD7293_CHAN_ISENSE(1),
+ AD7293_CHAN_ISENSE(2),
+ AD7293_CHAN_ISENSE(3),
+ AD7293_CHAN_TEMP(0),
+ AD7293_CHAN_TEMP(1),
+ AD7293_CHAN_TEMP(2),
+ AD7293_CHAN_DAC(0),
+ AD7293_CHAN_DAC(1),
+ AD7293_CHAN_DAC(2),
+ AD7293_CHAN_DAC(3),
+ AD7293_CHAN_DAC(4),
+ AD7293_CHAN_DAC(5),
+ AD7293_CHAN_DAC(6),
+ AD7293_CHAN_DAC(7)
+};
+
+static int ad7293_soft_reset(struct ad7293_state *st)
+{
+ int ret;
+
+ ret = __ad7293_spi_write(st, AD7293_REG_SOFT_RESET, 0x7293);
+ if (ret)
+ return ret;
+
+ return __ad7293_spi_write(st, AD7293_REG_SOFT_RESET, 0x0000);
+}
+
+static int ad7293_reset(struct ad7293_state *st)
+{
+ if (st->gpio_reset) {
+ gpiod_set_value(st->gpio_reset, 0);
+ usleep_range(100, 1000);
+ gpiod_set_value(st->gpio_reset, 1);
+ usleep_range(100, 1000);
+
+ return 0;
+ }
+
+ /* Perform a software reset */
+ return ad7293_soft_reset(st);
+}
+
+static int ad7293_properties_parse(struct ad7293_state *st)
+{
+ struct spi_device *spi = st->spi;
+
+ st->gpio_reset = devm_gpiod_get_optional(&st->spi->dev, "reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(st->gpio_reset))
+ return dev_err_probe(&spi->dev, PTR_ERR(st->gpio_reset),
+ "failed to get the reset GPIO\n");
+
+ st->reg_avdd = devm_regulator_get(&spi->dev, "avdd");
+ if (IS_ERR(st->reg_avdd))
+ return dev_err_probe(&spi->dev, PTR_ERR(st->reg_avdd),
+ "failed to get the AVDD voltage\n");
+
+ st->reg_vdrive = devm_regulator_get(&spi->dev, "vdrive");
+ if (IS_ERR(st->reg_vdrive))
+ return dev_err_probe(&spi->dev, PTR_ERR(st->reg_vdrive),
+ "failed to get the VDRIVE voltage\n");
+
+ return 0;
+}
+
+static void ad7293_reg_disable(void *data)
+{
+ regulator_disable(data);
+}
+
+static int ad7293_init(struct ad7293_state *st)
+{
+ int ret;
+ u16 chip_id;
+ struct spi_device *spi = st->spi;
+
+ ret = ad7293_properties_parse(st);
+ if (ret)
+ return ret;
+
+ ret = ad7293_reset(st);
+ if (ret)
+ return ret;
+
+ ret = regulator_enable(st->reg_avdd);
+ if (ret) {
+ dev_err(&spi->dev,
+ "Failed to enable specified AVDD Voltage!\n");
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(&spi->dev, ad7293_reg_disable,
+ st->reg_avdd);
+ if (ret)
+ return ret;
+
+ ret = regulator_enable(st->reg_vdrive);
+ if (ret) {
+ dev_err(&spi->dev,
+ "Failed to enable specified VDRIVE Voltage!\n");
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(&spi->dev, ad7293_reg_disable,
+ st->reg_vdrive);
+ if (ret)
+ return ret;
+
+ ret = regulator_get_voltage(st->reg_avdd);
+ if (ret < 0) {
+ dev_err(&spi->dev, "Failed to read avdd regulator: %d\n", ret);
+ return ret;
+ }
+
+ if (ret > 5500000 || ret < 4500000)
+ return -EINVAL;
+
+ ret = regulator_get_voltage(st->reg_vdrive);
+ if (ret < 0) {
+ dev_err(&spi->dev,
+ "Failed to read vdrive regulator: %d\n", ret);
+ return ret;
+ }
+ if (ret > 5500000 || ret < 1700000)
+ return -EINVAL;
+
+ /* Check Chip ID */
+ ret = __ad7293_spi_read(st, AD7293_REG_DEVICE_ID, &chip_id);
+ if (ret)
+ return ret;
+
+ if (chip_id != AD7293_CHIP_ID) {
+ dev_err(&spi->dev, "Invalid Chip ID.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct iio_info ad7293_info = {
+ .read_raw = ad7293_read_raw,
+ .write_raw = ad7293_write_raw,
+ .read_avail = &ad7293_read_avail,
+ .debugfs_reg_access = &ad7293_reg_access,
+};
+
+static int ad7293_probe(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev;
+ struct ad7293_state *st;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ st = iio_priv(indio_dev);
+
+ indio_dev->info = &ad7293_info;
+ indio_dev->name = "ad7293";
+ indio_dev->channels = ad7293_channels;
+ indio_dev->num_channels = ARRAY_SIZE(ad7293_channels);
+
+ st->spi = spi;
+ st->page_select = 0;
+
+ mutex_init(&st->lock);
+
+ ret = ad7293_init(st);
+ if (ret)
+ return ret;
+
+ return devm_iio_device_register(&spi->dev, indio_dev);
+}
+
+static const struct spi_device_id ad7293_id[] = {
+ { "ad7293", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(spi, ad7293_id);
+
+static const struct of_device_id ad7293_of_match[] = {
+ { .compatible = "adi,ad7293" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, ad7293_of_match);
+
+static struct spi_driver ad7293_driver = {
+ .driver = {
+ .name = "ad7293",
+ .of_match_table = ad7293_of_match,
+ },
+ .probe = ad7293_probe,
+ .id_table = ad7293_id,
+};
+module_spi_driver(ad7293_driver);
+
+MODULE_AUTHOR("Antoniu Miclaus <antoniu.miclaus@analog.com");
+MODULE_DESCRIPTION("Analog Devices AD7293");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/dac/dpot-dac.c b/drivers/iio/dac/dpot-dac.c
index 5d1819448102..83ce9489259c 100644
--- a/drivers/iio/dac/dpot-dac.c
+++ b/drivers/iio/dac/dpot-dac.c
@@ -30,7 +30,7 @@
#include <linux/iio/consumer.h>
#include <linux/iio/iio.h>
#include <linux/module.h>
-#include <linux/of.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
diff --git a/drivers/iio/dac/lpc18xx_dac.c b/drivers/iio/dac/lpc18xx_dac.c
index 5502e4f62f0d..60467c6f2c6e 100644
--- a/drivers/iio/dac/lpc18xx_dac.c
+++ b/drivers/iio/dac/lpc18xx_dac.c
@@ -16,9 +16,8 @@
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/mutex.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
diff --git a/drivers/iio/dac/max5821.c b/drivers/iio/dac/max5821.c
index 7da4710a6408..fce640b7f1c8 100644
--- a/drivers/iio/dac/max5821.c
+++ b/drivers/iio/dac/max5821.c
@@ -137,7 +137,7 @@ static const struct iio_chan_spec_ext_info max5821_ext_info[] = {
.shared = IIO_SEPARATE,
},
IIO_ENUM("powerdown_mode", IIO_SEPARATE, &max5821_powerdown_mode_enum),
- IIO_ENUM_AVAILABLE("powerdown_mode", &max5821_powerdown_mode_enum),
+ IIO_ENUM_AVAILABLE("powerdown_mode", IIO_SHARED_BY_TYPE, &max5821_powerdown_mode_enum),
{ },
};
diff --git a/drivers/iio/dac/mcp4725.c b/drivers/iio/dac/mcp4725.c
index 34b14aafb630..842bad57cb88 100644
--- a/drivers/iio/dac/mcp4725.c
+++ b/drivers/iio/dac/mcp4725.c
@@ -221,8 +221,8 @@ static const struct iio_chan_spec_ext_info mcp4725_ext_info[] = {
},
IIO_ENUM("powerdown_mode", IIO_SEPARATE,
&mcp472x_powerdown_mode_enum[MCP4725]),
- IIO_ENUM_AVAILABLE("powerdown_mode",
- &mcp472x_powerdown_mode_enum[MCP4725]),
+ IIO_ENUM_AVAILABLE("powerdown_mode", IIO_SHARED_BY_TYPE,
+ &mcp472x_powerdown_mode_enum[MCP4725]),
{ },
};
@@ -235,8 +235,8 @@ static const struct iio_chan_spec_ext_info mcp4726_ext_info[] = {
},
IIO_ENUM("powerdown_mode", IIO_SEPARATE,
&mcp472x_powerdown_mode_enum[MCP4726]),
- IIO_ENUM_AVAILABLE("powerdown_mode",
- &mcp472x_powerdown_mode_enum[MCP4726]),
+ IIO_ENUM_AVAILABLE("powerdown_mode", IIO_SHARED_BY_TYPE,
+ &mcp472x_powerdown_mode_enum[MCP4726]),
{ },
};
@@ -386,7 +386,7 @@ static int mcp4725_probe(struct i2c_client *client,
i2c_set_clientdata(client, indio_dev);
data->client = client;
if (dev_fwnode(&client->dev))
- data->id = (enum chip_id)device_get_match_data(&client->dev);
+ data->id = (uintptr_t)device_get_match_data(&client->dev);
else
data->id = id->driver_data;
pdata = dev_get_platdata(&client->dev);
diff --git a/drivers/iio/dac/stm32-dac.c b/drivers/iio/dac/stm32-dac.c
index dd2e306824e7..cd71cc4553a7 100644
--- a/drivers/iio/dac/stm32-dac.c
+++ b/drivers/iio/dac/stm32-dac.c
@@ -246,7 +246,7 @@ static const struct iio_chan_spec_ext_info stm32_dac_ext_info[] = {
.shared = IIO_SEPARATE,
},
IIO_ENUM("powerdown_mode", IIO_SEPARATE, &stm32_dac_powerdown_mode_en),
- IIO_ENUM_AVAILABLE("powerdown_mode", &stm32_dac_powerdown_mode_en),
+ IIO_ENUM_AVAILABLE("powerdown_mode", IIO_SHARED_BY_TYPE, &stm32_dac_powerdown_mode_en),
{},
};
diff --git a/drivers/iio/dac/ti-dac082s085.c b/drivers/iio/dac/ti-dac082s085.c
index 5c14bfb16521..6beda2193683 100644
--- a/drivers/iio/dac/ti-dac082s085.c
+++ b/drivers/iio/dac/ti-dac082s085.c
@@ -160,7 +160,7 @@ static const struct iio_chan_spec_ext_info ti_dac_ext_info[] = {
.shared = IIO_SHARED_BY_TYPE,
},
IIO_ENUM("powerdown_mode", IIO_SHARED_BY_TYPE, &ti_dac_powerdown_mode),
- IIO_ENUM_AVAILABLE("powerdown_mode", &ti_dac_powerdown_mode),
+ IIO_ENUM_AVAILABLE("powerdown_mode", IIO_SHARED_BY_TYPE, &ti_dac_powerdown_mode),
{ },
};
diff --git a/drivers/iio/dac/ti-dac5571.c b/drivers/iio/dac/ti-dac5571.c
index 546a4cf6c5ef..4a3b8d875518 100644
--- a/drivers/iio/dac/ti-dac5571.c
+++ b/drivers/iio/dac/ti-dac5571.c
@@ -212,7 +212,7 @@ static const struct iio_chan_spec_ext_info dac5571_ext_info[] = {
.shared = IIO_SEPARATE,
},
IIO_ENUM("powerdown_mode", IIO_SEPARATE, &dac5571_powerdown_mode),
- IIO_ENUM_AVAILABLE("powerdown_mode", &dac5571_powerdown_mode),
+ IIO_ENUM_AVAILABLE("powerdown_mode", IIO_SHARED_BY_TYPE, &dac5571_powerdown_mode),
{},
};
diff --git a/drivers/iio/dac/ti-dac7311.c b/drivers/iio/dac/ti-dac7311.c
index 09218c3029f0..99f275829ec2 100644
--- a/drivers/iio/dac/ti-dac7311.c
+++ b/drivers/iio/dac/ti-dac7311.c
@@ -146,7 +146,7 @@ static const struct iio_chan_spec_ext_info ti_dac_ext_info[] = {
.shared = IIO_SHARED_BY_TYPE,
},
IIO_ENUM("powerdown_mode", IIO_SHARED_BY_TYPE, &ti_dac_powerdown_mode),
- IIO_ENUM_AVAILABLE("powerdown_mode", &ti_dac_powerdown_mode),
+ IIO_ENUM_AVAILABLE("powerdown_mode", IIO_SHARED_BY_TYPE, &ti_dac_powerdown_mode),
{ },
};
diff --git a/drivers/iio/dummy/iio_simple_dummy_buffer.c b/drivers/iio/dummy/iio_simple_dummy_buffer.c
index 59aa60d4ca37..d81c2b2dad82 100644
--- a/drivers/iio/dummy/iio_simple_dummy_buffer.c
+++ b/drivers/iio/dummy/iio_simple_dummy_buffer.c
@@ -45,7 +45,6 @@ static irqreturn_t iio_simple_dummy_trigger_h(int irq, void *p)
{
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
- int len = 0;
u16 *data;
data = kmalloc(indio_dev->scan_bytes, GFP_KERNEL);
@@ -79,7 +78,6 @@ static irqreturn_t iio_simple_dummy_trigger_h(int irq, void *p)
indio_dev->masklength, j);
/* random access read from the 'device' */
data[i] = fakedata[j];
- len += 2;
}
}
diff --git a/drivers/iio/filter/Kconfig b/drivers/iio/filter/Kconfig
new file mode 100644
index 000000000000..3ae35817ad82
--- /dev/null
+++ b/drivers/iio/filter/Kconfig
@@ -0,0 +1,18 @@
+#
+# Filter drivers
+#
+# When adding new entries keep the list in alphabetical order
+
+menu "Filters"
+
+config ADMV8818
+ tristate "Analog Devices ADMV8818 High-Pass and Low-Pass Filter"
+ depends on SPI && COMMON_CLK && 64BIT
+ help
+ Say yes here to build support for Analog Devices ADMV8818
+ 2 GHz to 18 GHz, Digitally Tunable, High-Pass and Low-Pass Filter.
+
+ To compile this driver as a module, choose M here: the
+ modiule will be called admv8818.
+
+endmenu
diff --git a/drivers/iio/filter/Makefile b/drivers/iio/filter/Makefile
new file mode 100644
index 000000000000..55e228c0dd20
--- /dev/null
+++ b/drivers/iio/filter/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for industrial I/O Filter drivers
+#
+
+# When adding new entries keep the list in alphabetical order
+obj-$(CONFIG_ADMV8818) += admv8818.o
diff --git a/drivers/iio/filter/admv8818.c b/drivers/iio/filter/admv8818.c
new file mode 100644
index 000000000000..68de45fe21b4
--- /dev/null
+++ b/drivers/iio/filter/admv8818.c
@@ -0,0 +1,665 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * ADMV8818 driver
+ *
+ * Copyright 2021 Analog Devices Inc.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/iio/iio.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/mutex.h>
+#include <linux/notifier.h>
+#include <linux/regmap.h>
+#include <linux/spi/spi.h>
+#include <linux/units.h>
+
+/* ADMV8818 Register Map */
+#define ADMV8818_REG_SPI_CONFIG_A 0x0
+#define ADMV8818_REG_SPI_CONFIG_B 0x1
+#define ADMV8818_REG_CHIPTYPE 0x3
+#define ADMV8818_REG_PRODUCT_ID_L 0x4
+#define ADMV8818_REG_PRODUCT_ID_H 0x5
+#define ADMV8818_REG_FAST_LATCH_POINTER 0x10
+#define ADMV8818_REG_FAST_LATCH_STOP 0x11
+#define ADMV8818_REG_FAST_LATCH_START 0x12
+#define ADMV8818_REG_FAST_LATCH_DIRECTION 0x13
+#define ADMV8818_REG_FAST_LATCH_STATE 0x14
+#define ADMV8818_REG_WR0_SW 0x20
+#define ADMV8818_REG_WR0_FILTER 0x21
+#define ADMV8818_REG_WR1_SW 0x22
+#define ADMV8818_REG_WR1_FILTER 0x23
+#define ADMV8818_REG_WR2_SW 0x24
+#define ADMV8818_REG_WR2_FILTER 0x25
+#define ADMV8818_REG_WR3_SW 0x26
+#define ADMV8818_REG_WR3_FILTER 0x27
+#define ADMV8818_REG_WR4_SW 0x28
+#define ADMV8818_REG_WR4_FILTER 0x29
+#define ADMV8818_REG_LUT0_SW 0x100
+#define ADMV8818_REG_LUT0_FILTER 0x101
+#define ADMV8818_REG_LUT127_SW 0x1FE
+#define ADMV8818_REG_LUT127_FILTER 0x1FF
+
+/* ADMV8818_REG_SPI_CONFIG_A Map */
+#define ADMV8818_SOFTRESET_N_MSK BIT(7)
+#define ADMV8818_LSB_FIRST_N_MSK BIT(6)
+#define ADMV8818_ENDIAN_N_MSK BIT(5)
+#define ADMV8818_SDOACTIVE_N_MSK BIT(4)
+#define ADMV8818_SDOACTIVE_MSK BIT(3)
+#define ADMV8818_ENDIAN_MSK BIT(2)
+#define ADMV8818_LSBFIRST_MSK BIT(1)
+#define ADMV8818_SOFTRESET_MSK BIT(0)
+
+/* ADMV8818_REG_SPI_CONFIG_B Map */
+#define ADMV8818_SINGLE_INSTRUCTION_MSK BIT(7)
+#define ADMV8818_CSB_STALL_MSK BIT(6)
+#define ADMV8818_MASTER_SLAVE_RB_MSK BIT(5)
+#define ADMV8818_MASTER_SLAVE_TRANSFER_MSK BIT(0)
+
+/* ADMV8818_REG_WR0_SW Map */
+#define ADMV8818_SW_IN_SET_WR0_MSK BIT(7)
+#define ADMV8818_SW_OUT_SET_WR0_MSK BIT(6)
+#define ADMV8818_SW_IN_WR0_MSK GENMASK(5, 3)
+#define ADMV8818_SW_OUT_WR0_MSK GENMASK(2, 0)
+
+/* ADMV8818_REG_WR0_FILTER Map */
+#define ADMV8818_HPF_WR0_MSK GENMASK(7, 4)
+#define ADMV8818_LPF_WR0_MSK GENMASK(3, 0)
+
+enum {
+ ADMV8818_BW_FREQ,
+ ADMV8818_CENTER_FREQ
+};
+
+enum {
+ ADMV8818_AUTO_MODE,
+ ADMV8818_MANUAL_MODE,
+};
+
+struct admv8818_state {
+ struct spi_device *spi;
+ struct regmap *regmap;
+ struct clk *clkin;
+ struct notifier_block nb;
+ /* Protect against concurrent accesses to the device and data content*/
+ struct mutex lock;
+ unsigned int filter_mode;
+ u64 cf_hz;
+};
+
+static const unsigned long long freq_range_hpf[4][2] = {
+ {1750000000ULL, 3550000000ULL},
+ {3400000000ULL, 7250000000ULL},
+ {6600000000, 12000000000},
+ {12500000000, 19900000000}
+};
+
+static const unsigned long long freq_range_lpf[4][2] = {
+ {2050000000ULL, 3850000000ULL},
+ {3350000000ULL, 7250000000ULL},
+ {7000000000, 13000000000},
+ {12550000000, 18500000000}
+};
+
+static const struct regmap_config admv8818_regmap_config = {
+ .reg_bits = 16,
+ .val_bits = 8,
+ .read_flag_mask = 0x80,
+ .max_register = 0x1FF,
+};
+
+static const char * const admv8818_modes[] = {
+ [0] = "auto",
+ [1] = "manual"
+};
+
+static int __admv8818_hpf_select(struct admv8818_state *st, u64 freq)
+{
+ unsigned int hpf_step = 0, hpf_band = 0, i, j;
+ u64 freq_step;
+ int ret;
+
+ if (freq < freq_range_hpf[0][0])
+ goto hpf_write;
+
+ if (freq > freq_range_hpf[3][1]) {
+ hpf_step = 15;
+ hpf_band = 4;
+
+ goto hpf_write;
+ }
+
+ for (i = 0; i < 4; i++) {
+ freq_step = div_u64((freq_range_hpf[i][1] -
+ freq_range_hpf[i][0]), 15);
+
+ if (freq > freq_range_hpf[i][0] &&
+ (freq < freq_range_hpf[i][1] + freq_step)) {
+ hpf_band = i + 1;
+
+ for (j = 1; j <= 16; j++) {
+ if (freq < (freq_range_hpf[i][0] + (freq_step * j))) {
+ hpf_step = j - 1;
+ break;
+ }
+ }
+ break;
+ }
+ }
+
+ /* Close HPF frequency gap between 12 and 12.5 GHz */
+ if (freq >= 12000 * HZ_PER_MHZ && freq <= 12500 * HZ_PER_MHZ) {
+ hpf_band = 3;
+ hpf_step = 15;
+ }
+
+hpf_write:
+ ret = regmap_update_bits(st->regmap, ADMV8818_REG_WR0_SW,
+ ADMV8818_SW_IN_SET_WR0_MSK |
+ ADMV8818_SW_IN_WR0_MSK,
+ FIELD_PREP(ADMV8818_SW_IN_SET_WR0_MSK, 1) |
+ FIELD_PREP(ADMV8818_SW_IN_WR0_MSK, hpf_band));
+ if (ret)
+ return ret;
+
+ return regmap_update_bits(st->regmap, ADMV8818_REG_WR0_FILTER,
+ ADMV8818_HPF_WR0_MSK,
+ FIELD_PREP(ADMV8818_HPF_WR0_MSK, hpf_step));
+}
+
+static int admv8818_hpf_select(struct admv8818_state *st, u64 freq)
+{
+ int ret;
+
+ mutex_lock(&st->lock);
+ ret = __admv8818_hpf_select(st, freq);
+ mutex_unlock(&st->lock);
+
+ return ret;
+}
+
+static int __admv8818_lpf_select(struct admv8818_state *st, u64 freq)
+{
+ unsigned int lpf_step = 0, lpf_band = 0, i, j;
+ u64 freq_step;
+ int ret;
+
+ if (freq > freq_range_lpf[3][1])
+ goto lpf_write;
+
+ if (freq < freq_range_lpf[0][0]) {
+ lpf_band = 1;
+
+ goto lpf_write;
+ }
+
+ for (i = 0; i < 4; i++) {
+ if (freq > freq_range_lpf[i][0] && freq < freq_range_lpf[i][1]) {
+ lpf_band = i + 1;
+ freq_step = div_u64((freq_range_lpf[i][1] - freq_range_lpf[i][0]), 15);
+
+ for (j = 0; j <= 15; j++) {
+ if (freq < (freq_range_lpf[i][0] + (freq_step * j))) {
+ lpf_step = j;
+ break;
+ }
+ }
+ break;
+ }
+ }
+
+lpf_write:
+ ret = regmap_update_bits(st->regmap, ADMV8818_REG_WR0_SW,
+ ADMV8818_SW_OUT_SET_WR0_MSK |
+ ADMV8818_SW_OUT_WR0_MSK,
+ FIELD_PREP(ADMV8818_SW_OUT_SET_WR0_MSK, 1) |
+ FIELD_PREP(ADMV8818_SW_OUT_WR0_MSK, lpf_band));
+ if (ret)
+ return ret;
+
+ return regmap_update_bits(st->regmap, ADMV8818_REG_WR0_FILTER,
+ ADMV8818_LPF_WR0_MSK,
+ FIELD_PREP(ADMV8818_LPF_WR0_MSK, lpf_step));
+}
+
+static int admv8818_lpf_select(struct admv8818_state *st, u64 freq)
+{
+ int ret;
+
+ mutex_lock(&st->lock);
+ ret = __admv8818_lpf_select(st, freq);
+ mutex_unlock(&st->lock);
+
+ return ret;
+}
+
+static int admv8818_rfin_band_select(struct admv8818_state *st)
+{
+ int ret;
+
+ st->cf_hz = clk_get_rate(st->clkin);
+
+ mutex_lock(&st->lock);
+
+ ret = __admv8818_hpf_select(st, st->cf_hz);
+ if (ret)
+ goto exit;
+
+ ret = __admv8818_lpf_select(st, st->cf_hz);
+exit:
+ mutex_unlock(&st->lock);
+ return ret;
+}
+
+static int __admv8818_read_hpf_freq(struct admv8818_state *st, u64 *hpf_freq)
+{
+ unsigned int data, hpf_band, hpf_state;
+ int ret;
+
+ ret = regmap_read(st->regmap, ADMV8818_REG_WR0_SW, &data);
+ if (ret)
+ return ret;
+
+ hpf_band = FIELD_GET(ADMV8818_SW_IN_WR0_MSK, data);
+ if (!hpf_band) {
+ *hpf_freq = 0;
+ return ret;
+ }
+
+ ret = regmap_read(st->regmap, ADMV8818_REG_WR0_FILTER, &data);
+ if (ret)
+ return ret;
+
+ hpf_state = FIELD_GET(ADMV8818_HPF_WR0_MSK, data);
+
+ *hpf_freq = div_u64(freq_range_hpf[hpf_band - 1][1] - freq_range_hpf[hpf_band - 1][0], 15);
+ *hpf_freq = freq_range_hpf[hpf_band - 1][0] + (*hpf_freq * hpf_state);
+
+ return ret;
+}
+
+static int admv8818_read_hpf_freq(struct admv8818_state *st, u64 *hpf_freq)
+{
+ int ret;
+
+ mutex_lock(&st->lock);
+ ret = __admv8818_read_hpf_freq(st, hpf_freq);
+ mutex_unlock(&st->lock);
+
+ return ret;
+}
+
+static int __admv8818_read_lpf_freq(struct admv8818_state *st, u64 *lpf_freq)
+{
+ unsigned int data, lpf_band, lpf_state;
+ int ret;
+
+ ret = regmap_read(st->regmap, ADMV8818_REG_WR0_SW, &data);
+ if (ret)
+ return ret;
+
+ lpf_band = FIELD_GET(ADMV8818_SW_OUT_WR0_MSK, data);
+ if (!lpf_band) {
+ *lpf_freq = 0;
+ return ret;
+ }
+
+ ret = regmap_read(st->regmap, ADMV8818_REG_WR0_FILTER, &data);
+ if (ret)
+ return ret;
+
+ lpf_state = FIELD_GET(ADMV8818_LPF_WR0_MSK, data);
+
+ *lpf_freq = div_u64(freq_range_lpf[lpf_band - 1][1] - freq_range_lpf[lpf_band - 1][0], 15);
+ *lpf_freq = freq_range_lpf[lpf_band - 1][0] + (*lpf_freq * lpf_state);
+
+ return ret;
+}
+
+static int admv8818_read_lpf_freq(struct admv8818_state *st, u64 *lpf_freq)
+{
+ int ret;
+
+ mutex_lock(&st->lock);
+ ret = __admv8818_read_lpf_freq(st, lpf_freq);
+ mutex_unlock(&st->lock);
+
+ return ret;
+}
+
+static int admv8818_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long info)
+{
+ struct admv8818_state *st = iio_priv(indio_dev);
+
+ u64 freq = ((u64)val2 << 32 | (u32)val);
+
+ switch (info) {
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+ return admv8818_lpf_select(st, freq);
+ case IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY:
+ return admv8818_hpf_select(st, freq);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int admv8818_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long info)
+{
+ struct admv8818_state *st = iio_priv(indio_dev);
+ int ret;
+ u64 freq;
+
+ switch (info) {
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+ ret = admv8818_read_lpf_freq(st, &freq);
+ if (ret)
+ return ret;
+
+ *val = (u32)freq;
+ *val2 = (u32)(freq >> 32);
+
+ return IIO_VAL_INT_64;
+ case IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY:
+ ret = admv8818_read_hpf_freq(st, &freq);
+ if (ret)
+ return ret;
+
+ *val = (u32)freq;
+ *val2 = (u32)(freq >> 32);
+
+ return IIO_VAL_INT_64;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int admv8818_reg_access(struct iio_dev *indio_dev,
+ unsigned int reg,
+ unsigned int write_val,
+ unsigned int *read_val)
+{
+ struct admv8818_state *st = iio_priv(indio_dev);
+
+ if (read_val)
+ return regmap_read(st->regmap, reg, read_val);
+ else
+ return regmap_write(st->regmap, reg, write_val);
+}
+
+static int admv8818_get_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ struct admv8818_state *st = iio_priv(indio_dev);
+
+ return st->filter_mode;
+}
+
+static int admv8818_set_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ unsigned int mode)
+{
+ struct admv8818_state *st = iio_priv(indio_dev);
+ int ret = 0;
+
+ if (!st->clkin) {
+ if (mode == ADMV8818_MANUAL_MODE)
+ return 0;
+
+ return -EINVAL;
+ }
+
+ switch (mode) {
+ case ADMV8818_AUTO_MODE:
+ if (!st->filter_mode)
+ return 0;
+
+ ret = clk_prepare_enable(st->clkin);
+ if (ret)
+ return ret;
+
+ ret = clk_notifier_register(st->clkin, &st->nb);
+ if (ret) {
+ clk_disable_unprepare(st->clkin);
+
+ return ret;
+ }
+
+ break;
+ case ADMV8818_MANUAL_MODE:
+ if (st->filter_mode)
+ return 0;
+
+ clk_disable_unprepare(st->clkin);
+
+ ret = clk_notifier_unregister(st->clkin, &st->nb);
+ if (ret)
+ return ret;
+
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ st->filter_mode = mode;
+
+ return ret;
+}
+
+static const struct iio_info admv8818_info = {
+ .write_raw = admv8818_write_raw,
+ .read_raw = admv8818_read_raw,
+ .debugfs_reg_access = &admv8818_reg_access,
+};
+
+static const struct iio_enum admv8818_mode_enum = {
+ .items = admv8818_modes,
+ .num_items = ARRAY_SIZE(admv8818_modes),
+ .get = admv8818_get_mode,
+ .set = admv8818_set_mode,
+};
+
+static const struct iio_chan_spec_ext_info admv8818_ext_info[] = {
+ IIO_ENUM("filter_mode", IIO_SHARED_BY_ALL, &admv8818_mode_enum),
+ IIO_ENUM_AVAILABLE("filter_mode", IIO_SHARED_BY_ALL, &admv8818_mode_enum),
+ { },
+};
+
+#define ADMV8818_CHAN(_channel) { \
+ .type = IIO_ALTVOLTAGE, \
+ .output = 1, \
+ .indexed = 1, \
+ .channel = _channel, \
+ .info_mask_separate = \
+ BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY) | \
+ BIT(IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY) \
+}
+
+#define ADMV8818_CHAN_BW_CF(_channel, _admv8818_ext_info) { \
+ .type = IIO_ALTVOLTAGE, \
+ .output = 1, \
+ .indexed = 1, \
+ .channel = _channel, \
+ .ext_info = _admv8818_ext_info, \
+}
+
+static const struct iio_chan_spec admv8818_channels[] = {
+ ADMV8818_CHAN(0),
+ ADMV8818_CHAN_BW_CF(0, admv8818_ext_info),
+};
+
+static int admv8818_freq_change(struct notifier_block *nb, unsigned long action, void *data)
+{
+ struct admv8818_state *st = container_of(nb, struct admv8818_state, nb);
+
+ if (action == POST_RATE_CHANGE)
+ return notifier_from_errno(admv8818_rfin_band_select(st));
+
+ return NOTIFY_OK;
+}
+
+static void admv8818_clk_notifier_unreg(void *data)
+{
+ struct admv8818_state *st = data;
+
+ if (st->filter_mode == 0)
+ clk_notifier_unregister(st->clkin, &st->nb);
+}
+
+static void admv8818_clk_disable(void *data)
+{
+ struct admv8818_state *st = data;
+
+ if (st->filter_mode == 0)
+ clk_disable_unprepare(st->clkin);
+}
+
+static int admv8818_init(struct admv8818_state *st)
+{
+ int ret;
+ struct spi_device *spi = st->spi;
+ unsigned int chip_id;
+
+ ret = regmap_update_bits(st->regmap, ADMV8818_REG_SPI_CONFIG_A,
+ ADMV8818_SOFTRESET_N_MSK |
+ ADMV8818_SOFTRESET_MSK,
+ FIELD_PREP(ADMV8818_SOFTRESET_N_MSK, 1) |
+ FIELD_PREP(ADMV8818_SOFTRESET_MSK, 1));
+ if (ret) {
+ dev_err(&spi->dev, "ADMV8818 Soft Reset failed.\n");
+ return ret;
+ }
+
+ ret = regmap_update_bits(st->regmap, ADMV8818_REG_SPI_CONFIG_A,
+ ADMV8818_SDOACTIVE_N_MSK |
+ ADMV8818_SDOACTIVE_MSK,
+ FIELD_PREP(ADMV8818_SDOACTIVE_N_MSK, 1) |
+ FIELD_PREP(ADMV8818_SDOACTIVE_MSK, 1));
+ if (ret) {
+ dev_err(&spi->dev, "ADMV8818 SDO Enable failed.\n");
+ return ret;
+ }
+
+ ret = regmap_read(st->regmap, ADMV8818_REG_CHIPTYPE, &chip_id);
+ if (ret) {
+ dev_err(&spi->dev, "ADMV8818 Chip ID read failed.\n");
+ return ret;
+ }
+
+ if (chip_id != 0x1) {
+ dev_err(&spi->dev, "ADMV8818 Invalid Chip ID.\n");
+ return -EINVAL;
+ }
+
+ ret = regmap_update_bits(st->regmap, ADMV8818_REG_SPI_CONFIG_B,
+ ADMV8818_SINGLE_INSTRUCTION_MSK,
+ FIELD_PREP(ADMV8818_SINGLE_INSTRUCTION_MSK, 1));
+ if (ret) {
+ dev_err(&spi->dev, "ADMV8818 Single Instruction failed.\n");
+ return ret;
+ }
+
+ if (st->clkin)
+ return admv8818_rfin_band_select(st);
+ else
+ return 0;
+}
+
+static int admv8818_clk_setup(struct admv8818_state *st)
+{
+ struct spi_device *spi = st->spi;
+ int ret;
+
+ st->clkin = devm_clk_get_optional(&spi->dev, "rf_in");
+ if (IS_ERR(st->clkin))
+ return dev_err_probe(&spi->dev, PTR_ERR(st->clkin),
+ "failed to get the input clock\n");
+ else if (!st->clkin)
+ return 0;
+
+ ret = clk_prepare_enable(st->clkin);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(&spi->dev, admv8818_clk_disable, st);
+ if (ret)
+ return ret;
+
+ st->nb.notifier_call = admv8818_freq_change;
+ ret = clk_notifier_register(st->clkin, &st->nb);
+ if (ret < 0)
+ return ret;
+
+ return devm_add_action_or_reset(&spi->dev, admv8818_clk_notifier_unreg, st);
+}
+
+static int admv8818_probe(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev;
+ struct regmap *regmap;
+ struct admv8818_state *st;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ regmap = devm_regmap_init_spi(spi, &admv8818_regmap_config);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ st = iio_priv(indio_dev);
+ st->regmap = regmap;
+
+ indio_dev->info = &admv8818_info;
+ indio_dev->name = "admv8818";
+ indio_dev->channels = admv8818_channels;
+ indio_dev->num_channels = ARRAY_SIZE(admv8818_channels);
+
+ st->spi = spi;
+
+ ret = admv8818_clk_setup(st);
+ if (ret)
+ return ret;
+
+ mutex_init(&st->lock);
+
+ ret = admv8818_init(st);
+ if (ret)
+ return ret;
+
+ return devm_iio_device_register(&spi->dev, indio_dev);
+}
+
+static const struct spi_device_id admv8818_id[] = {
+ { "admv8818", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(spi, admv8818_id);
+
+static const struct of_device_id admv8818_of_match[] = {
+ { .compatible = "adi,admv8818" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, admv8818_of_match);
+
+static struct spi_driver admv8818_driver = {
+ .driver = {
+ .name = "admv8818",
+ .of_match_table = admv8818_of_match,
+ },
+ .probe = admv8818_probe,
+ .id_table = admv8818_id,
+};
+module_spi_driver(admv8818_driver);
+
+MODULE_AUTHOR("Antoniu Miclaus <antoniu.miclaus@analog.com");
+MODULE_DESCRIPTION("Analog Devices ADMV8818");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/frequency/Kconfig b/drivers/iio/frequency/Kconfig
index 2c9e0559e8a4..b44036f843af 100644
--- a/drivers/iio/frequency/Kconfig
+++ b/drivers/iio/frequency/Kconfig
@@ -50,6 +50,16 @@ config ADF4371
To compile this driver as a module, choose M here: the
module will be called adf4371.
+config ADMV1013
+ tristate "Analog Devices ADMV1013 Microwave Upconverter"
+ depends on SPI && COMMON_CLK
+ help
+ Say yes here to build support for Analog Devices ADMV1013
+ 24 GHz to 44 GHz, Wideband, Microwave Upconverter.
+
+ To compile this driver as a module, choose M here: the
+ module will be called admv1013.
+
config ADRF6780
tristate "Analog Devices ADRF6780 Microwave Upconverter"
depends on SPI
diff --git a/drivers/iio/frequency/Makefile b/drivers/iio/frequency/Makefile
index ae3136c79202..ae6899856c99 100644
--- a/drivers/iio/frequency/Makefile
+++ b/drivers/iio/frequency/Makefile
@@ -7,4 +7,5 @@
obj-$(CONFIG_AD9523) += ad9523.o
obj-$(CONFIG_ADF4350) += adf4350.o
obj-$(CONFIG_ADF4371) += adf4371.o
+obj-$(CONFIG_ADMV1013) += admv1013.o
obj-$(CONFIG_ADRF6780) += adrf6780.o
diff --git a/drivers/iio/frequency/admv1013.c b/drivers/iio/frequency/admv1013.c
new file mode 100644
index 000000000000..6cdeb50143af
--- /dev/null
+++ b/drivers/iio/frequency/admv1013.c
@@ -0,0 +1,656 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * ADMV1013 driver
+ *
+ * Copyright 2021 Analog Devices Inc.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/iio/iio.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/notifier.h>
+#include <linux/property.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spi/spi.h>
+#include <linux/units.h>
+
+#include <asm/unaligned.h>
+
+/* ADMV1013 Register Map */
+#define ADMV1013_REG_SPI_CONTROL 0x00
+#define ADMV1013_REG_ALARM 0x01
+#define ADMV1013_REG_ALARM_MASKS 0x02
+#define ADMV1013_REG_ENABLE 0x03
+#define ADMV1013_REG_LO_AMP_I 0x05
+#define ADMV1013_REG_LO_AMP_Q 0x06
+#define ADMV1013_REG_OFFSET_ADJUST_I 0x07
+#define ADMV1013_REG_OFFSET_ADJUST_Q 0x08
+#define ADMV1013_REG_QUAD 0x09
+#define ADMV1013_REG_VVA_TEMP_COMP 0x0A
+
+/* ADMV1013_REG_SPI_CONTROL Map */
+#define ADMV1013_PARITY_EN_MSK BIT(15)
+#define ADMV1013_SPI_SOFT_RESET_MSK BIT(14)
+#define ADMV1013_CHIP_ID_MSK GENMASK(11, 4)
+#define ADMV1013_CHIP_ID 0xA
+#define ADMV1013_REVISION_ID_MSK GENMASK(3, 0)
+
+/* ADMV1013_REG_ALARM Map */
+#define ADMV1013_PARITY_ERROR_MSK BIT(15)
+#define ADMV1013_TOO_FEW_ERRORS_MSK BIT(14)
+#define ADMV1013_TOO_MANY_ERRORS_MSK BIT(13)
+#define ADMV1013_ADDRESS_RANGE_ERROR_MSK BIT(12)
+
+/* ADMV1013_REG_ENABLE Map */
+#define ADMV1013_VGA_PD_MSK BIT(15)
+#define ADMV1013_MIXER_PD_MSK BIT(14)
+#define ADMV1013_QUAD_PD_MSK GENMASK(13, 11)
+#define ADMV1013_BG_PD_MSK BIT(10)
+#define ADMV1013_MIXER_IF_EN_MSK BIT(7)
+#define ADMV1013_DET_EN_MSK BIT(5)
+
+/* ADMV1013_REG_LO_AMP Map */
+#define ADMV1013_LOAMP_PH_ADJ_FINE_MSK GENMASK(13, 7)
+#define ADMV1013_MIXER_VGATE_MSK GENMASK(6, 0)
+
+/* ADMV1013_REG_OFFSET_ADJUST Map */
+#define ADMV1013_MIXER_OFF_ADJ_P_MSK GENMASK(15, 9)
+#define ADMV1013_MIXER_OFF_ADJ_N_MSK GENMASK(8, 2)
+
+/* ADMV1013_REG_QUAD Map */
+#define ADMV1013_QUAD_SE_MODE_MSK GENMASK(9, 6)
+#define ADMV1013_QUAD_FILTERS_MSK GENMASK(3, 0)
+
+/* ADMV1013_REG_VVA_TEMP_COMP Map */
+#define ADMV1013_VVA_TEMP_COMP_MSK GENMASK(15, 0)
+
+/* ADMV1013 Miscellaneous Defines */
+#define ADMV1013_READ BIT(7)
+#define ADMV1013_REG_ADDR_READ_MSK GENMASK(6, 1)
+#define ADMV1013_REG_ADDR_WRITE_MSK GENMASK(22, 17)
+#define ADMV1013_REG_DATA_MSK GENMASK(16, 1)
+
+enum {
+ ADMV1013_IQ_MODE,
+ ADMV1013_IF_MODE
+};
+
+enum {
+ ADMV1013_RFMOD_I_CALIBPHASE,
+ ADMV1013_RFMOD_Q_CALIBPHASE,
+};
+
+enum {
+ ADMV1013_SE_MODE_POS = 6,
+ ADMV1013_SE_MODE_NEG = 9,
+ ADMV1013_SE_MODE_DIFF = 12
+};
+
+struct admv1013_state {
+ struct spi_device *spi;
+ struct clk *clkin;
+ /* Protect against concurrent accesses to the device and to data */
+ struct mutex lock;
+ struct regulator *reg;
+ struct notifier_block nb;
+ unsigned int input_mode;
+ unsigned int quad_se_mode;
+ bool det_en;
+ u8 data[3] ____cacheline_aligned;
+};
+
+static int __admv1013_spi_read(struct admv1013_state *st, unsigned int reg,
+ unsigned int *val)
+{
+ int ret;
+ struct spi_transfer t = {0};
+
+ st->data[0] = ADMV1013_READ | FIELD_PREP(ADMV1013_REG_ADDR_READ_MSK, reg);
+ st->data[1] = 0x0;
+ st->data[2] = 0x0;
+
+ t.rx_buf = &st->data[0];
+ t.tx_buf = &st->data[0];
+ t.len = 3;
+
+ ret = spi_sync_transfer(st->spi, &t, 1);
+ if (ret)
+ return ret;
+
+ *val = FIELD_GET(ADMV1013_REG_DATA_MSK, get_unaligned_be24(&st->data[0]));
+
+ return ret;
+}
+
+static int admv1013_spi_read(struct admv1013_state *st, unsigned int reg,
+ unsigned int *val)
+{
+ int ret;
+
+ mutex_lock(&st->lock);
+ ret = __admv1013_spi_read(st, reg, val);
+ mutex_unlock(&st->lock);
+
+ return ret;
+}
+
+static int __admv1013_spi_write(struct admv1013_state *st,
+ unsigned int reg,
+ unsigned int val)
+{
+ put_unaligned_be24(FIELD_PREP(ADMV1013_REG_DATA_MSK, val) |
+ FIELD_PREP(ADMV1013_REG_ADDR_WRITE_MSK, reg), &st->data[0]);
+
+ return spi_write(st->spi, &st->data[0], 3);
+}
+
+static int admv1013_spi_write(struct admv1013_state *st, unsigned int reg,
+ unsigned int val)
+{
+ int ret;
+
+ mutex_lock(&st->lock);
+ ret = __admv1013_spi_write(st, reg, val);
+ mutex_unlock(&st->lock);
+
+ return ret;
+}
+
+static int __admv1013_spi_update_bits(struct admv1013_state *st, unsigned int reg,
+ unsigned int mask, unsigned int val)
+{
+ int ret;
+ unsigned int data, temp;
+
+ ret = __admv1013_spi_read(st, reg, &data);
+ if (ret)
+ return ret;
+
+ temp = (data & ~mask) | (val & mask);
+
+ return __admv1013_spi_write(st, reg, temp);
+}
+
+static int admv1013_spi_update_bits(struct admv1013_state *st, unsigned int reg,
+ unsigned int mask, unsigned int val)
+{
+ int ret;
+
+ mutex_lock(&st->lock);
+ ret = __admv1013_spi_update_bits(st, reg, mask, val);
+ mutex_unlock(&st->lock);
+
+ return ret;
+}
+
+static int admv1013_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long info)
+{
+ struct admv1013_state *st = iio_priv(indio_dev);
+ unsigned int data, addr;
+ int ret;
+
+ switch (info) {
+ case IIO_CHAN_INFO_CALIBBIAS:
+ switch (chan->channel) {
+ case IIO_MOD_I:
+ addr = ADMV1013_REG_OFFSET_ADJUST_I;
+ break;
+ case IIO_MOD_Q:
+ addr = ADMV1013_REG_OFFSET_ADJUST_Q;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = admv1013_spi_read(st, addr, &data);
+ if (ret)
+ return ret;
+
+ if (!chan->channel)
+ *val = FIELD_GET(ADMV1013_MIXER_OFF_ADJ_P_MSK, data);
+ else
+ *val = FIELD_GET(ADMV1013_MIXER_OFF_ADJ_N_MSK, data);
+
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int admv1013_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long info)
+{
+ struct admv1013_state *st = iio_priv(indio_dev);
+ unsigned int addr, data, msk;
+
+ switch (info) {
+ case IIO_CHAN_INFO_CALIBBIAS:
+ switch (chan->channel2) {
+ case IIO_MOD_I:
+ addr = ADMV1013_REG_OFFSET_ADJUST_I;
+ break;
+ case IIO_MOD_Q:
+ addr = ADMV1013_REG_OFFSET_ADJUST_Q;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (!chan->channel) {
+ msk = ADMV1013_MIXER_OFF_ADJ_P_MSK;
+ data = FIELD_PREP(ADMV1013_MIXER_OFF_ADJ_P_MSK, val);
+ } else {
+ msk = ADMV1013_MIXER_OFF_ADJ_N_MSK;
+ data = FIELD_PREP(ADMV1013_MIXER_OFF_ADJ_N_MSK, val);
+ }
+
+ return admv1013_spi_update_bits(st, addr, msk, data);
+ default:
+ return -EINVAL;
+ }
+}
+
+static ssize_t admv1013_read(struct iio_dev *indio_dev,
+ uintptr_t private,
+ const struct iio_chan_spec *chan,
+ char *buf)
+{
+ struct admv1013_state *st = iio_priv(indio_dev);
+ unsigned int data, addr;
+ int ret;
+
+ switch ((u32)private) {
+ case ADMV1013_RFMOD_I_CALIBPHASE:
+ addr = ADMV1013_REG_LO_AMP_I;
+ break;
+ case ADMV1013_RFMOD_Q_CALIBPHASE:
+ addr = ADMV1013_REG_LO_AMP_Q;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = admv1013_spi_read(st, addr, &data);
+ if (ret)
+ return ret;
+
+ data = FIELD_GET(ADMV1013_LOAMP_PH_ADJ_FINE_MSK, data);
+
+ return sysfs_emit(buf, "%u\n", data);
+}
+
+static ssize_t admv1013_write(struct iio_dev *indio_dev,
+ uintptr_t private,
+ const struct iio_chan_spec *chan,
+ const char *buf, size_t len)
+{
+ struct admv1013_state *st = iio_priv(indio_dev);
+ unsigned int data;
+ int ret;
+
+ ret = kstrtou32(buf, 10, &data);
+ if (ret)
+ return ret;
+
+ data = FIELD_PREP(ADMV1013_LOAMP_PH_ADJ_FINE_MSK, data);
+
+ switch ((u32)private) {
+ case ADMV1013_RFMOD_I_CALIBPHASE:
+ ret = admv1013_spi_update_bits(st, ADMV1013_REG_LO_AMP_I,
+ ADMV1013_LOAMP_PH_ADJ_FINE_MSK,
+ data);
+ if (ret)
+ return ret;
+ break;
+ case ADMV1013_RFMOD_Q_CALIBPHASE:
+ ret = admv1013_spi_update_bits(st, ADMV1013_REG_LO_AMP_Q,
+ ADMV1013_LOAMP_PH_ADJ_FINE_MSK,
+ data);
+ if (ret)
+ return ret;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ret ? ret : len;
+}
+
+static int admv1013_update_quad_filters(struct admv1013_state *st)
+{
+ unsigned int filt_raw;
+ u64 rate = clk_get_rate(st->clkin);
+
+ if (rate >= (5400 * HZ_PER_MHZ) && rate <= (7000 * HZ_PER_MHZ))
+ filt_raw = 15;
+ else if (rate >= (5400 * HZ_PER_MHZ) && rate <= (8000 * HZ_PER_MHZ))
+ filt_raw = 10;
+ else if (rate >= (6600 * HZ_PER_MHZ) && rate <= (9200 * HZ_PER_MHZ))
+ filt_raw = 5;
+ else
+ filt_raw = 0;
+
+ return __admv1013_spi_update_bits(st, ADMV1013_REG_QUAD,
+ ADMV1013_QUAD_FILTERS_MSK,
+ FIELD_PREP(ADMV1013_QUAD_FILTERS_MSK, filt_raw));
+}
+
+static int admv1013_update_mixer_vgate(struct admv1013_state *st)
+{
+ unsigned int vcm, mixer_vgate;
+
+ vcm = regulator_get_voltage(st->reg);
+
+ if (vcm >= 0 && vcm < 1800000)
+ mixer_vgate = (2389 * vcm / 1000000 + 8100) / 100;
+ else if (vcm > 1800000 && vcm < 2600000)
+ mixer_vgate = (2375 * vcm / 1000000 + 125) / 100;
+ else
+ return -EINVAL;
+
+ return __admv1013_spi_update_bits(st, ADMV1013_REG_LO_AMP_I,
+ ADMV1013_MIXER_VGATE_MSK,
+ FIELD_PREP(ADMV1013_MIXER_VGATE_MSK, mixer_vgate));
+}
+
+static int admv1013_reg_access(struct iio_dev *indio_dev,
+ unsigned int reg,
+ unsigned int write_val,
+ unsigned int *read_val)
+{
+ struct admv1013_state *st = iio_priv(indio_dev);
+
+ if (read_val)
+ return admv1013_spi_read(st, reg, read_val);
+ else
+ return admv1013_spi_write(st, reg, write_val);
+}
+
+static const struct iio_info admv1013_info = {
+ .read_raw = admv1013_read_raw,
+ .write_raw = admv1013_write_raw,
+ .debugfs_reg_access = &admv1013_reg_access,
+};
+
+static int admv1013_freq_change(struct notifier_block *nb, unsigned long action, void *data)
+{
+ struct admv1013_state *st = container_of(nb, struct admv1013_state, nb);
+ int ret;
+
+ if (action == POST_RATE_CHANGE) {
+ mutex_lock(&st->lock);
+ ret = notifier_from_errno(admv1013_update_quad_filters(st));
+ mutex_unlock(&st->lock);
+ return ret;
+ }
+
+ return NOTIFY_OK;
+}
+
+#define _ADMV1013_EXT_INFO(_name, _shared, _ident) { \
+ .name = _name, \
+ .read = admv1013_read, \
+ .write = admv1013_write, \
+ .private = _ident, \
+ .shared = _shared, \
+}
+
+static const struct iio_chan_spec_ext_info admv1013_ext_info[] = {
+ _ADMV1013_EXT_INFO("i_calibphase", IIO_SEPARATE, ADMV1013_RFMOD_I_CALIBPHASE),
+ _ADMV1013_EXT_INFO("q_calibphase", IIO_SEPARATE, ADMV1013_RFMOD_Q_CALIBPHASE),
+ { },
+};
+
+#define ADMV1013_CHAN_PHASE(_channel, _channel2, _admv1013_ext_info) { \
+ .type = IIO_ALTVOLTAGE, \
+ .output = 0, \
+ .indexed = 1, \
+ .channel2 = _channel2, \
+ .channel = _channel, \
+ .differential = 1, \
+ .ext_info = _admv1013_ext_info, \
+ }
+
+#define ADMV1013_CHAN_CALIB(_channel, rf_comp) { \
+ .type = IIO_ALTVOLTAGE, \
+ .output = 0, \
+ .indexed = 1, \
+ .channel = _channel, \
+ .channel2 = IIO_MOD_##rf_comp, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_CALIBBIAS), \
+ }
+
+static const struct iio_chan_spec admv1013_channels[] = {
+ ADMV1013_CHAN_PHASE(0, 1, admv1013_ext_info),
+ ADMV1013_CHAN_CALIB(0, I),
+ ADMV1013_CHAN_CALIB(0, Q),
+ ADMV1013_CHAN_CALIB(1, I),
+ ADMV1013_CHAN_CALIB(1, Q),
+};
+
+static int admv1013_init(struct admv1013_state *st)
+{
+ int ret;
+ unsigned int data;
+ struct spi_device *spi = st->spi;
+
+ /* Perform a software reset */
+ ret = __admv1013_spi_update_bits(st, ADMV1013_REG_SPI_CONTROL,
+ ADMV1013_SPI_SOFT_RESET_MSK,
+ FIELD_PREP(ADMV1013_SPI_SOFT_RESET_MSK, 1));
+ if (ret)
+ return ret;
+
+ ret = __admv1013_spi_update_bits(st, ADMV1013_REG_SPI_CONTROL,
+ ADMV1013_SPI_SOFT_RESET_MSK,
+ FIELD_PREP(ADMV1013_SPI_SOFT_RESET_MSK, 0));
+ if (ret)
+ return ret;
+
+ ret = __admv1013_spi_read(st, ADMV1013_REG_SPI_CONTROL, &data);
+ if (ret)
+ return ret;
+
+ data = FIELD_GET(ADMV1013_CHIP_ID_MSK, data);
+ if (data != ADMV1013_CHIP_ID) {
+ dev_err(&spi->dev, "Invalid Chip ID.\n");
+ return -EINVAL;
+ }
+
+ ret = __admv1013_spi_write(st, ADMV1013_REG_VVA_TEMP_COMP, 0xE700);
+ if (ret)
+ return ret;
+
+ data = FIELD_PREP(ADMV1013_QUAD_SE_MODE_MSK, st->quad_se_mode);
+
+ ret = __admv1013_spi_update_bits(st, ADMV1013_REG_QUAD,
+ ADMV1013_QUAD_SE_MODE_MSK, data);
+ if (ret)
+ return ret;
+
+ ret = admv1013_update_mixer_vgate(st);
+ if (ret)
+ return ret;
+
+ ret = admv1013_update_quad_filters(st);
+ if (ret)
+ return ret;
+
+ return __admv1013_spi_update_bits(st, ADMV1013_REG_ENABLE,
+ ADMV1013_DET_EN_MSK |
+ ADMV1013_MIXER_IF_EN_MSK,
+ st->det_en |
+ st->input_mode);
+}
+
+static void admv1013_clk_disable(void *data)
+{
+ clk_disable_unprepare(data);
+}
+
+static void admv1013_reg_disable(void *data)
+{
+ regulator_disable(data);
+}
+
+static void admv1013_powerdown(void *data)
+{
+ unsigned int enable_reg, enable_reg_msk;
+
+ /* Disable all components in the Enable Register */
+ enable_reg_msk = ADMV1013_VGA_PD_MSK |
+ ADMV1013_MIXER_PD_MSK |
+ ADMV1013_QUAD_PD_MSK |
+ ADMV1013_BG_PD_MSK |
+ ADMV1013_MIXER_IF_EN_MSK |
+ ADMV1013_DET_EN_MSK;
+
+ enable_reg = FIELD_PREP(ADMV1013_VGA_PD_MSK, 1) |
+ FIELD_PREP(ADMV1013_MIXER_PD_MSK, 1) |
+ FIELD_PREP(ADMV1013_QUAD_PD_MSK, 7) |
+ FIELD_PREP(ADMV1013_BG_PD_MSK, 1) |
+ FIELD_PREP(ADMV1013_MIXER_IF_EN_MSK, 0) |
+ FIELD_PREP(ADMV1013_DET_EN_MSK, 0);
+
+ admv1013_spi_update_bits(data, ADMV1013_REG_ENABLE, enable_reg_msk, enable_reg);
+}
+
+static int admv1013_properties_parse(struct admv1013_state *st)
+{
+ int ret;
+ const char *str;
+ struct spi_device *spi = st->spi;
+
+ st->det_en = device_property_read_bool(&spi->dev, "adi,detector-enable");
+
+ ret = device_property_read_string(&spi->dev, "adi,input-mode", &str);
+ if (ret)
+ st->input_mode = ADMV1013_IQ_MODE;
+
+ if (!strcmp(str, "iq"))
+ st->input_mode = ADMV1013_IQ_MODE;
+ else if (!strcmp(str, "if"))
+ st->input_mode = ADMV1013_IF_MODE;
+ else
+ return -EINVAL;
+
+ ret = device_property_read_string(&spi->dev, "adi,quad-se-mode", &str);
+ if (ret)
+ st->quad_se_mode = ADMV1013_SE_MODE_DIFF;
+
+ if (!strcmp(str, "diff"))
+ st->quad_se_mode = ADMV1013_SE_MODE_DIFF;
+ else if (!strcmp(str, "se-pos"))
+ st->quad_se_mode = ADMV1013_SE_MODE_POS;
+ else if (!strcmp(str, "se-neg"))
+ st->quad_se_mode = ADMV1013_SE_MODE_NEG;
+ else
+ return -EINVAL;
+
+ st->reg = devm_regulator_get(&spi->dev, "vcm");
+ if (IS_ERR(st->reg))
+ return dev_err_probe(&spi->dev, PTR_ERR(st->reg),
+ "failed to get the common-mode voltage\n");
+
+ st->clkin = devm_clk_get(&spi->dev, "lo_in");
+ if (IS_ERR(st->clkin))
+ return dev_err_probe(&spi->dev, PTR_ERR(st->clkin),
+ "failed to get the LO input clock\n");
+
+ return 0;
+}
+
+static int admv1013_probe(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev;
+ struct admv1013_state *st;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ st = iio_priv(indio_dev);
+
+ indio_dev->info = &admv1013_info;
+ indio_dev->name = "admv1013";
+ indio_dev->channels = admv1013_channels;
+ indio_dev->num_channels = ARRAY_SIZE(admv1013_channels);
+
+ st->spi = spi;
+
+ ret = admv1013_properties_parse(st);
+ if (ret)
+ return ret;
+
+ ret = regulator_enable(st->reg);
+ if (ret) {
+ dev_err(&spi->dev, "Failed to enable specified Common-Mode Voltage!\n");
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(&spi->dev, admv1013_reg_disable,
+ st->reg);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(st->clkin);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(&spi->dev, admv1013_clk_disable, st->clkin);
+ if (ret)
+ return ret;
+
+ st->nb.notifier_call = admv1013_freq_change;
+ ret = devm_clk_notifier_register(&spi->dev, st->clkin, &st->nb);
+ if (ret)
+ return ret;
+
+ mutex_init(&st->lock);
+
+ ret = admv1013_init(st);
+ if (ret) {
+ dev_err(&spi->dev, "admv1013 init failed\n");
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(&spi->dev, admv1013_powerdown, st);
+ if (ret)
+ return ret;
+
+ return devm_iio_device_register(&spi->dev, indio_dev);
+}
+
+static const struct spi_device_id admv1013_id[] = {
+ { "admv1013", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(spi, admv1013_id);
+
+static const struct of_device_id admv1013_of_match[] = {
+ { .compatible = "adi,admv1013" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, admv1013_of_match);
+
+static struct spi_driver admv1013_driver = {
+ .driver = {
+ .name = "admv1013",
+ .of_match_table = admv1013_of_match,
+ },
+ .probe = admv1013_probe,
+ .id_table = admv1013_id,
+};
+module_spi_driver(admv1013_driver);
+
+MODULE_AUTHOR("Antoniu Miclaus <antoniu.miclaus@analog.com");
+MODULE_DESCRIPTION("Analog Devices ADMV1013");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/health/afe4403.c b/drivers/iio/health/afe4403.c
index 97b82f9a8e45..273f16dcaff8 100644
--- a/drivers/iio/health/afe4403.c
+++ b/drivers/iio/health/afe4403.c
@@ -345,9 +345,6 @@ err:
return IRQ_HANDLED;
}
-static const struct iio_trigger_ops afe4403_trigger_ops = {
-};
-
#define AFE4403_TIMING_PAIRS \
{ AFE440X_LED2STC, 0x000050 }, \
{ AFE440X_LED2ENDC, 0x0003e7 }, \
@@ -530,8 +527,6 @@ static int afe4403_probe(struct spi_device *spi)
iio_trigger_set_drvdata(afe->trig, indio_dev);
- afe->trig->ops = &afe4403_trigger_ops;
-
ret = iio_trigger_register(afe->trig);
if (ret) {
dev_err(afe->dev, "Unable to register IIO trigger\n");
diff --git a/drivers/iio/health/afe4404.c b/drivers/iio/health/afe4404.c
index 7ef3f5e34de5..aa9311e1e655 100644
--- a/drivers/iio/health/afe4404.c
+++ b/drivers/iio/health/afe4404.c
@@ -347,9 +347,6 @@ err:
return IRQ_HANDLED;
}
-static const struct iio_trigger_ops afe4404_trigger_ops = {
-};
-
/* Default timings from data-sheet */
#define AFE4404_TIMING_PAIRS \
{ AFE440X_PRPCOUNT, 39999 }, \
@@ -537,8 +534,6 @@ static int afe4404_probe(struct i2c_client *client,
iio_trigger_set_drvdata(afe->trig, indio_dev);
- afe->trig->ops = &afe4404_trigger_ops;
-
ret = iio_trigger_register(afe->trig);
if (ret) {
dev_err(afe->dev, "Unable to register IIO trigger\n");
diff --git a/drivers/iio/iio_core.h b/drivers/iio/iio_core.h
index 61e318431de9..501e286702ef 100644
--- a/drivers/iio/iio_core.h
+++ b/drivers/iio/iio_core.h
@@ -16,7 +16,7 @@ struct iio_buffer;
struct iio_chan_spec;
struct iio_dev;
-extern struct device_type iio_device_type;
+extern const struct device_type iio_device_type;
struct iio_dev_buffer_pair {
struct iio_dev *indio_dev;
diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_i2c.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_i2c.c
index 85b1934cec60..33d9afb1ba91 100644
--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_i2c.c
+++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_i2c.c
@@ -58,7 +58,7 @@ static int inv_icm42600_probe(struct i2c_client *client)
match = device_get_match_data(&client->dev);
if (!match)
return -EINVAL;
- chip = (enum inv_icm42600_chip)match;
+ chip = (uintptr_t)match;
regmap = devm_regmap_init_i2c(client, &inv_icm42600_regmap_config);
if (IS_ERR(regmap))
diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_spi.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_spi.c
index 323789697a08..e6305e5fa975 100644
--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_spi.c
+++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_spi.c
@@ -57,7 +57,7 @@ static int inv_icm42600_probe(struct spi_device *spi)
match = device_get_match_data(&spi->dev);
if (!match)
return -EINVAL;
- chip = (enum inv_icm42600_chip)match;
+ chip = (uintptr_t)match;
regmap = devm_regmap_init_spi(spi, &inv_icm42600_regmap_config);
if (IS_ERR(regmap))
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
index 3ef17e3f50e2..fe03707ec2d3 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
@@ -110,7 +110,7 @@ static int inv_mpu_probe(struct i2c_client *client,
match = device_get_match_data(&client->dev);
if (match) {
- chip_type = (enum inv_devices)match;
+ chip_type = (uintptr_t)match;
name = client->name;
} else if (id) {
chip_type = (enum inv_devices)
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
index b056f3fe2561..6800356b25fb 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
@@ -45,7 +45,7 @@ static int inv_mpu_probe(struct spi_device *spi)
chip_type = (enum inv_devices)spi_id->driver_data;
name = spi_id->name;
} else if ((match = device_get_match_data(&spi->dev))) {
- chip_type = (enum inv_devices)match;
+ chip_type = (uintptr_t)match;
name = dev_name(&spi->dev);
} else {
return -ENODEV;
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
index f2cbbc756459..727b4b6ac696 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
@@ -2244,7 +2244,9 @@ int st_lsm6dsx_probe(struct device *dev, int irq, int hw_id,
return err;
hub_settings = &hw->settings->shub_settings;
- if (hub_settings->master_en.addr) {
+ if (hub_settings->master_en.addr &&
+ (!dev_fwnode(dev) ||
+ !device_property_read_bool(dev, "st,disable-sensor-hub"))) {
err = st_lsm6dsx_shub_probe(hw, name);
if (err < 0)
return err;
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index e180728914c0..208b5193c621 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -1569,9 +1569,17 @@ static long iio_device_buffer_getfd(struct iio_dev *indio_dev, unsigned long arg
}
if (copy_to_user(ival, &fd, sizeof(fd))) {
- put_unused_fd(fd);
- ret = -EFAULT;
- goto error_free_ib;
+ /*
+ * "Leak" the fd, as there's not much we can do about this
+ * anyway. 'fd' might have been closed already, as
+ * anon_inode_getfd() called fd_install() on it, which made
+ * it reachable by userland.
+ *
+ * Instead of allowing a malicious user to play tricks with
+ * us, rely on the process exit path to do any necessary
+ * cleanup, as in releasing the file, if still needed.
+ */
+ return -EFAULT;
}
return 0;
@@ -1727,8 +1735,7 @@ int iio_buffers_alloc_sysfs_and_mask(struct iio_dev *indio_dev)
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
const struct iio_chan_spec *channels;
struct iio_buffer *buffer;
- int unwind_idx;
- int ret, i;
+ int ret, i, idx;
size_t sz;
channels = indio_dev->channels;
@@ -1743,15 +1750,12 @@ int iio_buffers_alloc_sysfs_and_mask(struct iio_dev *indio_dev)
if (!iio_dev_opaque->attached_buffers_cnt)
return 0;
- for (i = 0; i < iio_dev_opaque->attached_buffers_cnt; i++) {
- buffer = iio_dev_opaque->attached_buffers[i];
- ret = __iio_buffer_alloc_sysfs_and_mask(buffer, indio_dev, i);
- if (ret) {
- unwind_idx = i - 1;
+ for (idx = 0; idx < iio_dev_opaque->attached_buffers_cnt; idx++) {
+ buffer = iio_dev_opaque->attached_buffers[idx];
+ ret = __iio_buffer_alloc_sysfs_and_mask(buffer, indio_dev, idx);
+ if (ret)
goto error_unwind_sysfs_and_mask;
- }
}
- unwind_idx = iio_dev_opaque->attached_buffers_cnt - 1;
sz = sizeof(*(iio_dev_opaque->buffer_ioctl_handler));
iio_dev_opaque->buffer_ioctl_handler = kzalloc(sz, GFP_KERNEL);
@@ -1767,9 +1771,9 @@ int iio_buffers_alloc_sysfs_and_mask(struct iio_dev *indio_dev)
return 0;
error_unwind_sysfs_and_mask:
- for (; unwind_idx >= 0; unwind_idx--) {
- buffer = iio_dev_opaque->attached_buffers[unwind_idx];
- __iio_buffer_free_sysfs_and_mask(buffer, indio_dev, unwind_idx);
+ while (idx--) {
+ buffer = iio_dev_opaque->attached_buffers[idx];
+ __iio_buffer_free_sysfs_and_mask(buffer, indio_dev, idx);
}
return ret;
}
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index 463a63d5bf56..409c278a4c2c 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -702,6 +702,9 @@ static ssize_t __iio_format_value(char *buf, size_t offset, unsigned int type,
}
case IIO_VAL_CHAR:
return sysfs_emit_at(buf, offset, "%c", (char)vals[0]);
+ case IIO_VAL_INT_64:
+ tmp2 = (s64)((((u64)vals[1]) << 32) | (u32)vals[0]);
+ return sysfs_emit_at(buf, offset, "%lld", tmp2);
default:
return 0;
}
@@ -1619,7 +1622,7 @@ static void iio_dev_release(struct device *device)
kfree(iio_dev_opaque);
}
-struct device_type iio_device_type = {
+const struct device_type iio_device_type = {
.name = "iio_device",
.release = iio_dev_release,
};
@@ -1653,7 +1656,6 @@ struct iio_dev *iio_device_alloc(struct device *parent, int sizeof_priv)
indio_dev->dev.type = &iio_device_type;
indio_dev->dev.bus = &iio_bus_type;
device_initialize(&indio_dev->dev);
- iio_device_set_drvdata(indio_dev, (void *)indio_dev);
mutex_init(&indio_dev->mlock);
mutex_init(&iio_dev_opaque->info_exist_lock);
INIT_LIST_HEAD(&iio_dev_opaque->channel_attr_list);
diff --git a/drivers/iio/industrialio-trigger.c b/drivers/iio/industrialio-trigger.c
index 93990ff1dfe3..f504ed351b3e 100644
--- a/drivers/iio/industrialio-trigger.c
+++ b/drivers/iio/industrialio-trigger.c
@@ -162,6 +162,39 @@ static struct iio_trigger *iio_trigger_acquire_by_name(const char *name)
return trig;
}
+static void iio_reenable_work_fn(struct work_struct *work)
+{
+ struct iio_trigger *trig = container_of(work, struct iio_trigger,
+ reenable_work);
+
+ /*
+ * This 'might' occur after the trigger state is set to disabled -
+ * in that case the driver should skip reenabling.
+ */
+ trig->ops->reenable(trig);
+}
+
+/*
+ * In general, reenable callbacks may need to sleep and this path is
+ * not performance sensitive, so just queue up a work item
+ * to reneable the trigger for us.
+ *
+ * Races that can cause this.
+ * 1) A handler occurs entirely in interrupt context so the counter
+ * the final decrement is still in this interrupt.
+ * 2) The trigger has been removed, but one last interrupt gets through.
+ *
+ * For (1) we must call reenable, but not in atomic context.
+ * For (2) it should be safe to call reenanble, if drivers never blindly
+ * reenable after state is off.
+ */
+static void iio_trigger_notify_done_atomic(struct iio_trigger *trig)
+{
+ if (atomic_dec_and_test(&trig->use_count) && trig->ops &&
+ trig->ops->reenable)
+ schedule_work(&trig->reenable_work);
+}
+
void iio_trigger_poll(struct iio_trigger *trig)
{
int i;
@@ -173,7 +206,7 @@ void iio_trigger_poll(struct iio_trigger *trig)
if (trig->subirqs[i].enabled)
generic_handle_irq(trig->subirq_base + i);
else
- iio_trigger_notify_done(trig);
+ iio_trigger_notify_done_atomic(trig);
}
}
}
@@ -535,6 +568,7 @@ struct iio_trigger *viio_trigger_alloc(struct device *parent,
trig->dev.type = &iio_trig_type;
trig->dev.bus = &iio_bus_type;
device_initialize(&trig->dev);
+ INIT_WORK(&trig->reenable_work, iio_reenable_work_fn);
mutex_init(&trig->pool_lock);
trig->subirq_base = irq_alloc_descs(-1, 0,
diff --git a/drivers/iio/light/cm3605.c b/drivers/iio/light/cm3605.c
index 3e7fb16ab1f6..50d34a98839c 100644
--- a/drivers/iio/light/cm3605.c
+++ b/drivers/iio/light/cm3605.c
@@ -10,6 +10,7 @@
*/
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include <linux/iio/events.h>
@@ -18,7 +19,7 @@
#include <linux/init.h>
#include <linux/leds.h>
#include <linux/platform_device.h>
-#include <linux/of.h>
+#include <linux/property.h>
#include <linux/regulator/consumer.h>
#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
@@ -156,7 +157,6 @@ static int cm3605_probe(struct platform_device *pdev)
struct cm3605 *cm3605;
struct iio_dev *indio_dev;
struct device *dev = &pdev->dev;
- struct device_node *np = dev->of_node;
enum iio_chan_type ch_type;
u32 rset;
int irq;
@@ -171,7 +171,7 @@ static int cm3605_probe(struct platform_device *pdev)
cm3605->dev = dev;
cm3605->dir = IIO_EV_DIR_FALLING;
- ret = of_property_read_u32(np, "capella,aset-resistance-ohms", &rset);
+ ret = device_property_read_u32(dev, "capella,aset-resistance-ohms", &rset);
if (ret) {
dev_info(dev, "no RSET specified, assuming 100K\n");
rset = 100000;
diff --git a/drivers/iio/light/gp2ap020a00f.c b/drivers/iio/light/gp2ap020a00f.c
index d1d9f2d319e4..b820041159f7 100644
--- a/drivers/iio/light/gp2ap020a00f.c
+++ b/drivers/iio/light/gp2ap020a00f.c
@@ -1467,9 +1467,6 @@ static const struct iio_buffer_setup_ops gp2ap020a00f_buffer_setup_ops = {
.predisable = &gp2ap020a00f_buffer_predisable,
};
-static const struct iio_trigger_ops gp2ap020a00f_trigger_ops = {
-};
-
static int gp2ap020a00f_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -1550,8 +1547,6 @@ static int gp2ap020a00f_probe(struct i2c_client *client,
goto error_uninit_buffer;
}
- data->trig->ops = &gp2ap020a00f_trigger_ops;
-
init_irq_work(&data->work, gp2ap020a00f_iio_trigger_work);
err = iio_trigger_register(data->trig);
diff --git a/drivers/iio/light/ltr501.c b/drivers/iio/light/ltr501.c
index b2983b1a9ed1..47d61ec2bb50 100644
--- a/drivers/iio/light/ltr501.c
+++ b/drivers/iio/light/ltr501.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * ltr501.c - Support for Lite-On LTR501 ambient light and proximity sensor
+ * Support for Lite-On LTR501 and similar ambient light and proximity sensors.
*
* Copyright 2014 Peter Meerwald <pmeerw@pmeerw.net>
*
@@ -98,6 +98,7 @@ enum {
ltr501 = 0,
ltr559,
ltr301,
+ ltr303,
};
struct ltr501_gain {
@@ -165,6 +166,7 @@ struct ltr501_data {
struct regmap_field *reg_ps_rate;
struct regmap_field *reg_als_prst;
struct regmap_field *reg_ps_prst;
+ uint32_t near_level;
};
static const struct ltr501_samp_table ltr501_als_samp_table[] = {
@@ -524,6 +526,25 @@ static int ltr501_write_intr_prst(struct ltr501_data *data,
return -EINVAL;
}
+static ssize_t ltr501_read_near_level(struct iio_dev *indio_dev,
+ uintptr_t priv,
+ const struct iio_chan_spec *chan,
+ char *buf)
+{
+ struct ltr501_data *data = iio_priv(indio_dev);
+
+ return sprintf(buf, "%u\n", data->near_level);
+}
+
+static const struct iio_chan_spec_ext_info ltr501_ext_info[] = {
+ {
+ .name = "nearlevel",
+ .shared = IIO_SEPARATE,
+ .read = ltr501_read_near_level,
+ },
+ { /* sentinel */ }
+};
+
static const struct iio_event_spec ltr501_als_event_spec[] = {
{
.type = IIO_EV_TYPE_THRESH,
@@ -608,6 +629,7 @@ static const struct iio_chan_spec ltr501_channels[] = {
},
.event_spec = ltr501_pxs_event_spec,
.num_event_specs = ARRAY_SIZE(ltr501_pxs_event_spec),
+ .ext_info = ltr501_ext_info,
},
IIO_CHAN_SOFT_TIMESTAMP(3),
};
@@ -1231,6 +1253,18 @@ static const struct ltr501_chip_info ltr501_chip_info_tbl[] = {
.channels = ltr301_channels,
.no_channels = ARRAY_SIZE(ltr301_channels),
},
+ [ltr303] = {
+ .partid = 0x0A,
+ .als_gain = ltr559_als_gain_tbl,
+ .als_gain_tbl_size = ARRAY_SIZE(ltr559_als_gain_tbl),
+ .als_mode_active = BIT(0),
+ .als_gain_mask = BIT(2) | BIT(3) | BIT(4),
+ .als_gain_shift = 2,
+ .info = &ltr301_info,
+ .info_no_irq = &ltr301_info_no_irq,
+ .channels = ltr301_channels,
+ .no_channels = ARRAY_SIZE(ltr301_channels),
+ },
};
static int ltr501_write_contr(struct ltr501_data *data, u8 als_val, u8 ps_val)
@@ -1518,6 +1552,10 @@ static int ltr501_probe(struct i2c_client *client,
if ((partid >> 4) != data->chip_info->partid)
return -ENODEV;
+ if (device_property_read_u32(&client->dev, "proximity-near-level",
+ &data->near_level))
+ data->near_level = 0;
+
indio_dev->info = data->chip_info->info;
indio_dev->channels = data->chip_info->channels;
indio_dev->num_channels = data->chip_info->no_channels;
@@ -1605,6 +1643,7 @@ static const struct i2c_device_id ltr501_id[] = {
{ "ltr501", ltr501},
{ "ltr559", ltr559},
{ "ltr301", ltr301},
+ { "ltr303", ltr303},
{ }
};
MODULE_DEVICE_TABLE(i2c, ltr501_id);
@@ -1613,6 +1652,7 @@ static const struct of_device_id ltr501_of_match[] = {
{ .compatible = "liteon,ltr501", },
{ .compatible = "liteon,ltr559", },
{ .compatible = "liteon,ltr301", },
+ { .compatible = "liteon,ltr303", },
{}
};
MODULE_DEVICE_TABLE(of, ltr501_of_match);
diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c
index 6e82dc54a417..55879a20ae52 100644
--- a/drivers/iio/magnetometer/ak8975.c
+++ b/drivers/iio/magnetometer/ak8975.c
@@ -929,7 +929,7 @@ static int ak8975_probe(struct i2c_client *client,
/* id will be NULL when enumerated via ACPI */
match = device_get_match_data(&client->dev);
if (match) {
- chipset = (enum asahi_compass_chipset)(match);
+ chipset = (uintptr_t)match;
name = dev_name(&client->dev);
} else if (id) {
chipset = (enum asahi_compass_chipset)(id->driver_data);
diff --git a/drivers/iio/magnetometer/hmc5843_core.c b/drivers/iio/magnetometer/hmc5843_core.c
index f08726bf5ec3..5a730d9bdbb0 100644
--- a/drivers/iio/magnetometer/hmc5843_core.c
+++ b/drivers/iio/magnetometer/hmc5843_core.c
@@ -246,7 +246,7 @@ static const struct iio_enum hmc5843_meas_conf_enum = {
static const struct iio_chan_spec_ext_info hmc5843_ext_info[] = {
IIO_ENUM("meas_conf", IIO_SHARED_BY_TYPE, &hmc5843_meas_conf_enum),
- IIO_ENUM_AVAILABLE("meas_conf", &hmc5843_meas_conf_enum),
+ IIO_ENUM_AVAILABLE("meas_conf", IIO_SHARED_BY_TYPE, &hmc5843_meas_conf_enum),
IIO_MOUNT_MATRIX(IIO_SHARED_BY_DIR, hmc5843_get_mount_matrix),
{ }
};
@@ -260,7 +260,7 @@ static const struct iio_enum hmc5983_meas_conf_enum = {
static const struct iio_chan_spec_ext_info hmc5983_ext_info[] = {
IIO_ENUM("meas_conf", IIO_SHARED_BY_TYPE, &hmc5983_meas_conf_enum),
- IIO_ENUM_AVAILABLE("meas_conf", &hmc5983_meas_conf_enum),
+ IIO_ENUM_AVAILABLE("meas_conf", IIO_SHARED_BY_TYPE, &hmc5983_meas_conf_enum),
IIO_MOUNT_MATRIX(IIO_SHARED_BY_DIR, hmc5843_get_mount_matrix),
{ }
};
diff --git a/drivers/iio/magnetometer/mag3110.c b/drivers/iio/magnetometer/mag3110.c
index c96415a1aead..17c62d806218 100644
--- a/drivers/iio/magnetometer/mag3110.c
+++ b/drivers/iio/magnetometer/mag3110.c
@@ -291,7 +291,8 @@ static int mag3110_read_raw(struct iio_dev *indio_dev,
if (ret < 0)
goto release;
*val = sign_extend32(
- be16_to_cpu(buffer[chan->scan_index]), 15);
+ be16_to_cpu(buffer[chan->scan_index]),
+ chan->scan_type.realbits - 1);
ret = IIO_VAL_INT;
break;
case IIO_TEMP: /* in 1 C / LSB */
@@ -306,7 +307,8 @@ static int mag3110_read_raw(struct iio_dev *indio_dev,
mutex_unlock(&data->lock);
if (ret < 0)
goto release;
- *val = sign_extend32(ret, 7);
+ *val = sign_extend32(ret,
+ chan->scan_type.realbits - 1);
ret = IIO_VAL_INT;
break;
default:
diff --git a/drivers/iio/potentiometer/mcp41010.c b/drivers/iio/potentiometer/mcp41010.c
index 79ccac6d4be0..30a4594d4e11 100644
--- a/drivers/iio/potentiometer/mcp41010.c
+++ b/drivers/iio/potentiometer/mcp41010.c
@@ -21,9 +21,9 @@
#include <linux/iio/iio.h>
#include <linux/iio/types.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/mutex.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
+#include <linux/property.h>
#include <linux/spi/spi.h>
#define MCP41010_MAX_WIPERS 2
@@ -146,7 +146,7 @@ static int mcp41010_probe(struct spi_device *spi)
data = iio_priv(indio_dev);
spi_set_drvdata(spi, indio_dev);
data->spi = spi;
- data->cfg = of_device_get_match_data(&spi->dev);
+ data->cfg = device_get_match_data(&spi->dev);
if (!data->cfg)
data->cfg = &mcp41010_cfg[spi_get_device_id(spi)->driver_data];
diff --git a/drivers/iio/potentiostat/lmp91000.c b/drivers/iio/potentiostat/lmp91000.c
index ed30bdaa10ec..fe514f0b5506 100644
--- a/drivers/iio/potentiostat/lmp91000.c
+++ b/drivers/iio/potentiostat/lmp91000.c
@@ -271,9 +271,6 @@ static int lmp91000_buffer_cb(const void *val, void *private)
return 0;
}
-static const struct iio_trigger_ops lmp91000_trigger_ops = {
-};
-
static int lmp91000_buffer_postenable(struct iio_dev *indio_dev)
{
struct lmp91000_data *data = iio_priv(indio_dev);
@@ -330,7 +327,6 @@ static int lmp91000_probe(struct i2c_client *client,
return -ENOMEM;
}
- data->trig->ops = &lmp91000_trigger_ops;
init_completion(&data->completion);
ret = lmp91000_read_config(data);
diff --git a/drivers/iio/pressure/bmp280-core.c b/drivers/iio/pressure/bmp280-core.c
index 6b7da40f99c8..bf8167f43c56 100644
--- a/drivers/iio/pressure/bmp280-core.c
+++ b/drivers/iio/pressure/bmp280-core.c
@@ -1138,7 +1138,6 @@ int bmp280_common_probe(struct device *dev,
}
EXPORT_SYMBOL(bmp280_common_probe);
-#ifdef CONFIG_PM
static int bmp280_runtime_suspend(struct device *dev)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
@@ -1159,15 +1158,9 @@ static int bmp280_runtime_resume(struct device *dev)
usleep_range(data->start_up_time, data->start_up_time + 100);
return data->chip_info->chip_config(data);
}
-#endif /* CONFIG_PM */
-const struct dev_pm_ops bmp280_dev_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
- pm_runtime_force_resume)
- SET_RUNTIME_PM_OPS(bmp280_runtime_suspend,
- bmp280_runtime_resume, NULL)
-};
-EXPORT_SYMBOL(bmp280_dev_pm_ops);
+EXPORT_RUNTIME_DEV_PM_OPS(bmp280_dev_pm_ops, bmp280_runtime_suspend,
+ bmp280_runtime_resume, NULL);
MODULE_AUTHOR("Vlad Dogaru <vlad.dogaru@intel.com>");
MODULE_DESCRIPTION("Driver for Bosch Sensortec BMP180/BMP280 pressure and temperature sensor");
diff --git a/drivers/iio/pressure/bmp280-i2c.c b/drivers/iio/pressure/bmp280-i2c.c
index 8b03ea15c0d0..35045bd92846 100644
--- a/drivers/iio/pressure/bmp280-i2c.c
+++ b/drivers/iio/pressure/bmp280-i2c.c
@@ -58,7 +58,7 @@ static struct i2c_driver bmp280_i2c_driver = {
.driver = {
.name = "bmp280",
.of_match_table = bmp280_of_i2c_match,
- .pm = &bmp280_dev_pm_ops,
+ .pm = pm_ptr(&bmp280_dev_pm_ops),
},
.probe = bmp280_i2c_probe,
.id_table = bmp280_i2c_id,
diff --git a/drivers/iio/pressure/bmp280-spi.c b/drivers/iio/pressure/bmp280-spi.c
index 625b86878ad8..41f6cc56d229 100644
--- a/drivers/iio/pressure/bmp280-spi.c
+++ b/drivers/iio/pressure/bmp280-spi.c
@@ -109,7 +109,7 @@ static struct spi_driver bmp280_spi_driver = {
.driver = {
.name = "bmp280",
.of_match_table = bmp280_of_spi_match,
- .pm = &bmp280_dev_pm_ops,
+ .pm = pm_ptr(&bmp280_dev_pm_ops),
},
.id_table = bmp280_spi_id,
.probe = bmp280_spi_probe,
diff --git a/drivers/iio/pressure/mpl3115.c b/drivers/iio/pressure/mpl3115.c
index 1eb9e7b29e05..e95b9a5475b4 100644
--- a/drivers/iio/pressure/mpl3115.c
+++ b/drivers/iio/pressure/mpl3115.c
@@ -74,7 +74,6 @@ static int mpl3115_read_raw(struct iio_dev *indio_dev,
int *val, int *val2, long mask)
{
struct mpl3115_data *data = iio_priv(indio_dev);
- __be32 tmp = 0;
int ret;
switch (mask) {
@@ -84,7 +83,9 @@ static int mpl3115_read_raw(struct iio_dev *indio_dev,
return ret;
switch (chan->type) {
- case IIO_PRESSURE: /* in 0.25 pascal / LSB */
+ case IIO_PRESSURE: { /* in 0.25 pascal / LSB */
+ __be32 tmp = 0;
+
mutex_lock(&data->lock);
ret = mpl3115_request(data);
if (ret < 0) {
@@ -96,10 +97,13 @@ static int mpl3115_read_raw(struct iio_dev *indio_dev,
mutex_unlock(&data->lock);
if (ret < 0)
break;
- *val = be32_to_cpu(tmp) >> 12;
+ *val = be32_to_cpu(tmp) >> chan->scan_type.shift;
ret = IIO_VAL_INT;
break;
- case IIO_TEMP: /* in 0.0625 celsius / LSB */
+ }
+ case IIO_TEMP: { /* in 0.0625 celsius / LSB */
+ __be16 tmp;
+
mutex_lock(&data->lock);
ret = mpl3115_request(data);
if (ret < 0) {
@@ -111,9 +115,11 @@ static int mpl3115_read_raw(struct iio_dev *indio_dev,
mutex_unlock(&data->lock);
if (ret < 0)
break;
- *val = sign_extend32(be32_to_cpu(tmp) >> 20, 11);
+ *val = sign_extend32(be16_to_cpu(tmp) >> chan->scan_type.shift,
+ chan->scan_type.realbits - 1);
ret = IIO_VAL_INT;
break;
+ }
default:
ret = -EINVAL;
break;
diff --git a/drivers/iio/pressure/ms5611.h b/drivers/iio/pressure/ms5611.h
index 86b1c4b1820d..cbc9349c342a 100644
--- a/drivers/iio/pressure/ms5611.h
+++ b/drivers/iio/pressure/ms5611.h
@@ -50,9 +50,9 @@ struct ms5611_state {
const struct ms5611_osr *pressure_osr;
const struct ms5611_osr *temp_osr;
- int (*reset)(struct device *dev);
- int (*read_prom_word)(struct device *dev, int index, u16 *word);
- int (*read_adc_temp_and_pressure)(struct device *dev,
+ int (*reset)(struct ms5611_state *st);
+ int (*read_prom_word)(struct ms5611_state *st, int index, u16 *word);
+ int (*read_adc_temp_and_pressure)(struct ms5611_state *st,
s32 *temp, s32 *pressure);
struct ms5611_chip_info *chip_info;
diff --git a/drivers/iio/pressure/ms5611_core.c b/drivers/iio/pressure/ms5611_core.c
index ee75f08655c9..a4d0b54cde9b 100644
--- a/drivers/iio/pressure/ms5611_core.c
+++ b/drivers/iio/pressure/ms5611_core.c
@@ -85,8 +85,7 @@ static int ms5611_read_prom(struct iio_dev *indio_dev)
struct ms5611_state *st = iio_priv(indio_dev);
for (i = 0; i < MS5611_PROM_WORDS_NB; i++) {
- ret = st->read_prom_word(&indio_dev->dev,
- i, &st->chip_info->prom[i]);
+ ret = st->read_prom_word(st, i, &st->chip_info->prom[i]);
if (ret < 0) {
dev_err(&indio_dev->dev,
"failed to read prom at %d\n", i);
@@ -108,7 +107,7 @@ static int ms5611_read_temp_and_pressure(struct iio_dev *indio_dev,
int ret;
struct ms5611_state *st = iio_priv(indio_dev);
- ret = st->read_adc_temp_and_pressure(&indio_dev->dev, temp, pressure);
+ ret = st->read_adc_temp_and_pressure(st, temp, pressure);
if (ret < 0) {
dev_err(&indio_dev->dev,
"failed to read temperature and pressure\n");
@@ -196,7 +195,7 @@ static int ms5611_reset(struct iio_dev *indio_dev)
int ret;
struct ms5611_state *st = iio_priv(indio_dev);
- ret = st->reset(&indio_dev->dev);
+ ret = st->reset(st);
if (ret < 0) {
dev_err(&indio_dev->dev, "failed to reset device\n");
return ret;
diff --git a/drivers/iio/pressure/ms5611_i2c.c b/drivers/iio/pressure/ms5611_i2c.c
index 5c82d80f85b6..1047a85527a9 100644
--- a/drivers/iio/pressure/ms5611_i2c.c
+++ b/drivers/iio/pressure/ms5611_i2c.c
@@ -20,17 +20,15 @@
#include "ms5611.h"
-static int ms5611_i2c_reset(struct device *dev)
+static int ms5611_i2c_reset(struct ms5611_state *st)
{
- struct ms5611_state *st = iio_priv(dev_to_iio_dev(dev));
-
return i2c_smbus_write_byte(st->client, MS5611_RESET);
}
-static int ms5611_i2c_read_prom_word(struct device *dev, int index, u16 *word)
+static int ms5611_i2c_read_prom_word(struct ms5611_state *st, int index,
+ u16 *word)
{
int ret;
- struct ms5611_state *st = iio_priv(dev_to_iio_dev(dev));
ret = i2c_smbus_read_word_swapped(st->client,
MS5611_READ_PROM_WORD + (index << 1));
@@ -57,11 +55,10 @@ static int ms5611_i2c_read_adc(struct ms5611_state *st, s32 *val)
return 0;
}
-static int ms5611_i2c_read_adc_temp_and_pressure(struct device *dev,
+static int ms5611_i2c_read_adc_temp_and_pressure(struct ms5611_state *st,
s32 *temp, s32 *pressure)
{
int ret;
- struct ms5611_state *st = iio_priv(dev_to_iio_dev(dev));
const struct ms5611_osr *osr = st->temp_osr;
ret = i2c_smbus_write_byte(st->client, osr->cmd);
diff --git a/drivers/iio/pressure/ms5611_spi.c b/drivers/iio/pressure/ms5611_spi.c
index 79bed64c9b68..9fa2dcd71760 100644
--- a/drivers/iio/pressure/ms5611_spi.c
+++ b/drivers/iio/pressure/ms5611_spi.c
@@ -15,18 +15,17 @@
#include "ms5611.h"
-static int ms5611_spi_reset(struct device *dev)
+static int ms5611_spi_reset(struct ms5611_state *st)
{
u8 cmd = MS5611_RESET;
- struct ms5611_state *st = iio_priv(dev_to_iio_dev(dev));
return spi_write_then_read(st->client, &cmd, 1, NULL, 0);
}
-static int ms5611_spi_read_prom_word(struct device *dev, int index, u16 *word)
+static int ms5611_spi_read_prom_word(struct ms5611_state *st, int index,
+ u16 *word)
{
int ret;
- struct ms5611_state *st = iio_priv(dev_to_iio_dev(dev));
ret = spi_w8r16be(st->client, MS5611_READ_PROM_WORD + (index << 1));
if (ret < 0)
@@ -37,11 +36,10 @@ static int ms5611_spi_read_prom_word(struct device *dev, int index, u16 *word)
return 0;
}
-static int ms5611_spi_read_adc(struct device *dev, s32 *val)
+static int ms5611_spi_read_adc(struct ms5611_state *st, s32 *val)
{
int ret;
u8 buf[3] = { MS5611_READ_ADC };
- struct ms5611_state *st = iio_priv(dev_to_iio_dev(dev));
ret = spi_write_then_read(st->client, buf, 1, buf, 3);
if (ret < 0)
@@ -52,11 +50,10 @@ static int ms5611_spi_read_adc(struct device *dev, s32 *val)
return 0;
}
-static int ms5611_spi_read_adc_temp_and_pressure(struct device *dev,
+static int ms5611_spi_read_adc_temp_and_pressure(struct ms5611_state *st,
s32 *temp, s32 *pressure)
{
int ret;
- struct ms5611_state *st = iio_priv(dev_to_iio_dev(dev));
const struct ms5611_osr *osr = st->temp_osr;
/*
@@ -68,7 +65,7 @@ static int ms5611_spi_read_adc_temp_and_pressure(struct device *dev,
return ret;
usleep_range(osr->conv_usec, osr->conv_usec + (osr->conv_usec / 10UL));
- ret = ms5611_spi_read_adc(dev, temp);
+ ret = ms5611_spi_read_adc(st, temp);
if (ret < 0)
return ret;
@@ -78,7 +75,7 @@ static int ms5611_spi_read_adc_temp_and_pressure(struct device *dev,
return ret;
usleep_range(osr->conv_usec, osr->conv_usec + (osr->conv_usec / 10UL));
- return ms5611_spi_read_adc(dev, pressure);
+ return ms5611_spi_read_adc(st, pressure);
}
static int ms5611_spi_probe(struct spi_device *spi)
diff --git a/drivers/iio/proximity/as3935.c b/drivers/iio/proximity/as3935.c
index 3797a8f54276..51f4f92ae84a 100644
--- a/drivers/iio/proximity/as3935.c
+++ b/drivers/iio/proximity/as3935.c
@@ -133,7 +133,7 @@ static ssize_t as3935_sensor_sensitivity_store(struct device *dev,
unsigned long val;
int ret;
- ret = kstrtoul((const char *) buf, 10, &val);
+ ret = kstrtoul(buf, 10, &val);
if (ret)
return -EINVAL;
@@ -238,9 +238,6 @@ err_read:
return IRQ_HANDLED;
}
-static const struct iio_trigger_ops iio_interrupt_trigger_ops = {
-};
-
static void as3935_event_work(struct work_struct *work)
{
struct as3935_state *st;
@@ -417,7 +414,6 @@ static int as3935_probe(struct spi_device *spi)
st->trig = trig;
st->noise_tripped = jiffies - HZ;
iio_trigger_set_drvdata(trig, indio_dev);
- trig->ops = &iio_interrupt_trigger_ops;
ret = devm_iio_trigger_register(dev, trig);
if (ret) {
diff --git a/drivers/iio/test/iio-test-format.c b/drivers/iio/test/iio-test-format.c
index f1e951eddb43..237321436b83 100644
--- a/drivers/iio/test/iio-test-format.c
+++ b/drivers/iio/test/iio-test-format.c
@@ -14,10 +14,13 @@
static void iio_test_iio_format_value_integer(struct kunit *test)
{
- char *buf = kunit_kmalloc(test, PAGE_SIZE, GFP_KERNEL);
+ char *buf;
int val;
int ret;
+ buf = kunit_kmalloc(test, PAGE_SIZE, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf);
+
val = 42;
ret = iio_format_value(buf, IIO_VAL_INT, 1, &val);
IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "42\n");
@@ -41,153 +44,219 @@ static void iio_test_iio_format_value_integer(struct kunit *test)
static void iio_test_iio_format_value_fixedpoint(struct kunit *test)
{
- char *buf = kunit_kmalloc(test, PAGE_SIZE, GFP_KERNEL);
int values[2];
+ char *buf;
int ret;
+ buf = kunit_kmalloc(test, PAGE_SIZE, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf);
+
/* positive >= 1 */
values[0] = 1;
values[1] = 10;
- ret = iio_format_value(buf, IIO_VAL_INT_PLUS_MICRO, 2, values);
+ ret = iio_format_value(buf, IIO_VAL_INT_PLUS_MICRO, ARRAY_SIZE(values), values);
IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "1.000010\n");
- ret = iio_format_value(buf, IIO_VAL_INT_PLUS_MICRO_DB, 2, values);
+ ret = iio_format_value(buf, IIO_VAL_INT_PLUS_MICRO_DB, ARRAY_SIZE(values), values);
IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "1.000010 dB\n");
- ret = iio_format_value(buf, IIO_VAL_INT_PLUS_NANO, 2, values);
+ ret = iio_format_value(buf, IIO_VAL_INT_PLUS_NANO, ARRAY_SIZE(values), values);
IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "1.000000010\n");
/* positive < 1 */
values[0] = 0;
values[1] = 12;
- ret = iio_format_value(buf, IIO_VAL_INT_PLUS_MICRO, 2, values);
+ ret = iio_format_value(buf, IIO_VAL_INT_PLUS_MICRO, ARRAY_SIZE(values), values);
IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "0.000012\n");
- ret = iio_format_value(buf, IIO_VAL_INT_PLUS_MICRO_DB, 2, values);
+ ret = iio_format_value(buf, IIO_VAL_INT_PLUS_MICRO_DB, ARRAY_SIZE(values), values);
IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "0.000012 dB\n");
- ret = iio_format_value(buf, IIO_VAL_INT_PLUS_NANO, 2, values);
+ ret = iio_format_value(buf, IIO_VAL_INT_PLUS_NANO, ARRAY_SIZE(values), values);
IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "0.000000012\n");
/* negative <= -1 */
values[0] = -1;
values[1] = 10;
- ret = iio_format_value(buf, IIO_VAL_INT_PLUS_MICRO, 2, values);
+ ret = iio_format_value(buf, IIO_VAL_INT_PLUS_MICRO, ARRAY_SIZE(values), values);
IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "-1.000010\n");
- ret = iio_format_value(buf, IIO_VAL_INT_PLUS_MICRO_DB, 2, values);
+ ret = iio_format_value(buf, IIO_VAL_INT_PLUS_MICRO_DB, ARRAY_SIZE(values), values);
IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "-1.000010 dB\n");
- ret = iio_format_value(buf, IIO_VAL_INT_PLUS_NANO, 2, values);
+ ret = iio_format_value(buf, IIO_VAL_INT_PLUS_NANO, ARRAY_SIZE(values), values);
IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "-1.000000010\n");
/* negative > -1 */
values[0] = 0;
values[1] = -123;
- ret = iio_format_value(buf, IIO_VAL_INT_PLUS_MICRO, 2, values);
+ ret = iio_format_value(buf, IIO_VAL_INT_PLUS_MICRO, ARRAY_SIZE(values), values);
IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "-0.000123\n");
- ret = iio_format_value(buf, IIO_VAL_INT_PLUS_MICRO_DB, 2, values);
+ ret = iio_format_value(buf, IIO_VAL_INT_PLUS_MICRO_DB, ARRAY_SIZE(values), values);
IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "-0.000123 dB\n");
- ret = iio_format_value(buf, IIO_VAL_INT_PLUS_NANO, 2, values);
+ ret = iio_format_value(buf, IIO_VAL_INT_PLUS_NANO, ARRAY_SIZE(values), values);
IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "-0.000000123\n");
}
static void iio_test_iio_format_value_fractional(struct kunit *test)
{
- char *buf = kunit_kmalloc(test, PAGE_SIZE, GFP_KERNEL);
int values[2];
+ char *buf;
int ret;
+ buf = kunit_kmalloc(test, PAGE_SIZE, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf);
+
/* positive < 1 */
values[0] = 1;
values[1] = 10;
- ret = iio_format_value(buf, IIO_VAL_FRACTIONAL, 2, values);
+ ret = iio_format_value(buf, IIO_VAL_FRACTIONAL, ARRAY_SIZE(values), values);
IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "0.100000000\n");
/* positive >= 1 */
values[0] = 100;
values[1] = 3;
- ret = iio_format_value(buf, IIO_VAL_FRACTIONAL, 2, values);
+ ret = iio_format_value(buf, IIO_VAL_FRACTIONAL, ARRAY_SIZE(values), values);
IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "33.333333333\n");
/* negative > -1 */
values[0] = -1;
values[1] = 1000000000;
- ret = iio_format_value(buf, IIO_VAL_FRACTIONAL, 2, values);
+ ret = iio_format_value(buf, IIO_VAL_FRACTIONAL, ARRAY_SIZE(values), values);
IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "-0.000000001\n");
/* negative <= -1 */
values[0] = -200;
values[1] = 3;
- ret = iio_format_value(buf, IIO_VAL_FRACTIONAL, 2, values);
+ ret = iio_format_value(buf, IIO_VAL_FRACTIONAL, ARRAY_SIZE(values), values);
IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "-66.666666666\n");
/* Zero */
values[0] = 0;
values[1] = -10;
- ret = iio_format_value(buf, IIO_VAL_FRACTIONAL, 2, values);
+ ret = iio_format_value(buf, IIO_VAL_FRACTIONAL, ARRAY_SIZE(values), values);
IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "0.000000000\n");
}
static void iio_test_iio_format_value_fractional_log2(struct kunit *test)
{
- char *buf = kunit_kmalloc(test, PAGE_SIZE, GFP_KERNEL);
int values[2];
+ char *buf;
int ret;
+ buf = kunit_kmalloc(test, PAGE_SIZE, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf);
+
/* positive < 1 */
values[0] = 123;
values[1] = 10;
- ret = iio_format_value(buf, IIO_VAL_FRACTIONAL_LOG2, 2, values);
+ ret = iio_format_value(buf, IIO_VAL_FRACTIONAL_LOG2, ARRAY_SIZE(values), values);
IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "0.120117187\n");
/* positive >= 1 */
values[0] = 1234567;
values[1] = 10;
- ret = iio_format_value(buf, IIO_VAL_FRACTIONAL_LOG2, 2, values);
+ ret = iio_format_value(buf, IIO_VAL_FRACTIONAL_LOG2, ARRAY_SIZE(values), values);
IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "1205.631835937\n");
/* negative > -1 */
values[0] = -123;
values[1] = 10;
- ret = iio_format_value(buf, IIO_VAL_FRACTIONAL_LOG2, 2, values);
+ ret = iio_format_value(buf, IIO_VAL_FRACTIONAL_LOG2, ARRAY_SIZE(values), values);
IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "-0.120117187\n");
/* negative <= -1 */
values[0] = -1234567;
values[1] = 10;
- ret = iio_format_value(buf, IIO_VAL_FRACTIONAL_LOG2, 2, values);
+ ret = iio_format_value(buf, IIO_VAL_FRACTIONAL_LOG2, ARRAY_SIZE(values), values);
IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "-1205.631835937\n");
/* Zero */
values[0] = 0;
values[1] = 10;
- ret = iio_format_value(buf, IIO_VAL_FRACTIONAL_LOG2, 2, values);
+ ret = iio_format_value(buf, IIO_VAL_FRACTIONAL_LOG2, ARRAY_SIZE(values), values);
IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "0.000000000\n");
}
static void iio_test_iio_format_value_multiple(struct kunit *test)
{
- char *buf = kunit_kmalloc(test, PAGE_SIZE, GFP_KERNEL);
int values[] = {1, -2, 3, -4, 5};
+ char *buf;
int ret;
+ buf = kunit_kmalloc(test, PAGE_SIZE, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf);
+
ret = iio_format_value(buf, IIO_VAL_INT_MULTIPLE,
ARRAY_SIZE(values), values);
IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "1 -2 3 -4 5 \n");
}
+static void iio_test_iio_format_value_integer_64(struct kunit *test)
+{
+ int values[2];
+ s64 value;
+ char *buf;
+ int ret;
+
+ buf = kunit_kmalloc(test, PAGE_SIZE, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf);
+
+ value = 24;
+ values[0] = lower_32_bits(value);
+ values[1] = upper_32_bits(value);
+ ret = iio_format_value(buf, IIO_VAL_INT_64, ARRAY_SIZE(values), values);
+ IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "24\n");
+
+ value = -24;
+ values[0] = lower_32_bits(value);
+ values[1] = upper_32_bits(value);
+ ret = iio_format_value(buf, IIO_VAL_INT_64, ARRAY_SIZE(values), values);
+ IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "-24\n");
+
+ value = 0;
+ values[0] = lower_32_bits(value);
+ values[1] = upper_32_bits(value);
+ ret = iio_format_value(buf, IIO_VAL_INT_64, ARRAY_SIZE(values), values);
+ IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "0\n");
+
+ value = UINT_MAX;
+ values[0] = lower_32_bits(value);
+ values[1] = upper_32_bits(value);
+ ret = iio_format_value(buf, IIO_VAL_INT_64, ARRAY_SIZE(values), values);
+ IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "4294967295\n");
+
+ value = -((s64)UINT_MAX);
+ values[0] = lower_32_bits(value);
+ values[1] = upper_32_bits(value);
+ ret = iio_format_value(buf, IIO_VAL_INT_64, ARRAY_SIZE(values), values);
+ IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "-4294967295\n");
+
+ value = LLONG_MAX;
+ values[0] = lower_32_bits(value);
+ values[1] = upper_32_bits(value);
+ ret = iio_format_value(buf, IIO_VAL_INT_64, ARRAY_SIZE(values), values);
+ IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "9223372036854775807\n");
+
+ value = LLONG_MIN;
+ values[0] = lower_32_bits(value);
+ values[1] = upper_32_bits(value);
+ ret = iio_format_value(buf, IIO_VAL_INT_64, ARRAY_SIZE(values), values);
+ IIO_TEST_FORMAT_EXPECT_EQ(test, buf, ret, "-9223372036854775808\n");
+}
+
static struct kunit_case iio_format_test_cases[] = {
KUNIT_CASE(iio_test_iio_format_value_integer),
KUNIT_CASE(iio_test_iio_format_value_fixedpoint),
KUNIT_CASE(iio_test_iio_format_value_fractional),
KUNIT_CASE(iio_test_iio_format_value_fractional_log2),
KUNIT_CASE(iio_test_iio_format_value_multiple),
+ KUNIT_CASE(iio_test_iio_format_value_integer_64),
{}
};
diff --git a/drivers/iio/trigger/iio-trig-interrupt.c b/drivers/iio/trigger/iio-trig-interrupt.c
index f746c460bf2a..5f49cd105fae 100644
--- a/drivers/iio/trigger/iio-trig-interrupt.c
+++ b/drivers/iio/trigger/iio-trig-interrupt.c
@@ -25,9 +25,6 @@ static irqreturn_t iio_interrupt_trigger_poll(int irq, void *private)
return IRQ_HANDLED;
}
-static const struct iio_trigger_ops iio_interrupt_trigger_ops = {
-};
-
static int iio_interrupt_trigger_probe(struct platform_device *pdev)
{
struct iio_interrupt_trigger_info *trig_info;
@@ -58,7 +55,6 @@ static int iio_interrupt_trigger_probe(struct platform_device *pdev)
}
iio_trigger_set_drvdata(trig, trig_info);
trig_info->irq = irq;
- trig->ops = &iio_interrupt_trigger_ops;
ret = request_irq(irq, iio_interrupt_trigger_poll,
irqflags, trig->name, trig);
if (ret) {
diff --git a/drivers/iio/trigger/iio-trig-sysfs.c b/drivers/iio/trigger/iio-trig-sysfs.c
index e9adfff45b39..2a4b75897910 100644
--- a/drivers/iio/trigger/iio-trig-sysfs.c
+++ b/drivers/iio/trigger/iio-trig-sysfs.c
@@ -124,9 +124,6 @@ static const struct attribute_group *iio_sysfs_trigger_attr_groups[] = {
NULL
};
-static const struct iio_trigger_ops iio_sysfs_trigger_ops = {
-};
-
static int iio_sysfs_trigger_probe(int id)
{
struct iio_sysfs_trig *t;
@@ -156,7 +153,6 @@ static int iio_sysfs_trigger_probe(int id)
}
t->trig->dev.groups = iio_sysfs_trigger_attr_groups;
- t->trig->ops = &iio_sysfs_trigger_ops;
iio_trigger_set_drvdata(t->trig, t);
t->work = IRQ_WORK_INIT_HARD(iio_sysfs_trigger_work);
diff --git a/drivers/iio/trigger/stm32-timer-trigger.c b/drivers/iio/trigger/stm32-timer-trigger.c
index 4353b749ecef..4f9461e1412c 100644
--- a/drivers/iio/trigger/stm32-timer-trigger.c
+++ b/drivers/iio/trigger/stm32-timer-trigger.c
@@ -696,9 +696,9 @@ static const struct iio_chan_spec_ext_info stm32_trigger_count_info[] = {
.write = stm32_count_set_preset
},
IIO_ENUM("enable_mode", IIO_SEPARATE, &stm32_enable_mode_enum),
- IIO_ENUM_AVAILABLE("enable_mode", &stm32_enable_mode_enum),
+ IIO_ENUM_AVAILABLE("enable_mode", IIO_SHARED_BY_TYPE, &stm32_enable_mode_enum),
IIO_ENUM("trigger_mode", IIO_SEPARATE, &stm32_trigger_mode_enum),
- IIO_ENUM_AVAILABLE("trigger_mode", &stm32_trigger_mode_enum),
+ IIO_ENUM_AVAILABLE("trigger_mode", IIO_SHARED_BY_TYPE, &stm32_trigger_mode_enum),
{}
};
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index b79f816a7203..f6aa1a964573 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -956,7 +956,7 @@ int rdma_query_gid(struct ib_device *device, u32 port_num,
{
struct ib_gid_table *table;
unsigned long flags;
- int res = -EINVAL;
+ int res;
if (!rdma_is_port_valid(device, port_num))
return -EINVAL;
@@ -964,9 +964,15 @@ int rdma_query_gid(struct ib_device *device, u32 port_num,
table = rdma_gid_table(device, port_num);
read_lock_irqsave(&table->rwlock, flags);
- if (index < 0 || index >= table->sz ||
- !is_gid_entry_valid(table->data_vec[index]))
+ if (index < 0 || index >= table->sz) {
+ res = -EINVAL;
goto done;
+ }
+
+ if (!is_gid_entry_valid(table->data_vec[index])) {
+ res = -ENOENT;
+ goto done;
+ }
memcpy(gid, &table->data_vec[index]->attr.gid, sizeof(*gid));
res = 0;
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index c903b74f46a4..35f0d5e7533d 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -3322,7 +3322,7 @@ static int cm_lap_handler(struct cm_work *work)
ret = cm_init_av_by_path(param->alternate_path, NULL, &alt_av);
if (ret) {
rdma_destroy_ah_attr(&ah_attr);
- return -EINVAL;
+ goto deref;
}
spin_lock_irq(&cm_id_priv->lock);
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 835ac54d4a24..c447526288f4 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -67,8 +67,8 @@ static const char * const cma_events[] = {
[RDMA_CM_EVENT_TIMEWAIT_EXIT] = "timewait exit",
};
-static void cma_set_mgid(struct rdma_id_private *id_priv, struct sockaddr *addr,
- union ib_gid *mgid);
+static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid,
+ enum ib_gid_type gid_type);
const char *__attribute_const__ rdma_event_msg(enum rdma_cm_event_type event)
{
@@ -766,6 +766,7 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv)
unsigned int p;
u16 pkey, index;
enum ib_port_state port_state;
+ int ret;
int i;
cma_dev = NULL;
@@ -784,9 +785,14 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv)
if (ib_get_cached_port_state(cur_dev->device, p, &port_state))
continue;
- for (i = 0; !rdma_query_gid(cur_dev->device,
- p, i, &gid);
- i++) {
+
+ for (i = 0; i < cur_dev->device->port_data[p].immutable.gid_tbl_len;
+ ++i) {
+ ret = rdma_query_gid(cur_dev->device, p, i,
+ &gid);
+ if (ret)
+ continue;
+
if (!memcmp(&gid, dgid, sizeof(gid))) {
cma_dev = cur_dev;
sgid = gid;
@@ -1840,17 +1846,19 @@ static void destroy_mc(struct rdma_id_private *id_priv,
if (dev_addr->bound_dev_if)
ndev = dev_get_by_index(dev_addr->net,
dev_addr->bound_dev_if);
- if (ndev) {
+ if (ndev && !send_only) {
+ enum ib_gid_type gid_type;
union ib_gid mgid;
- cma_set_mgid(id_priv, (struct sockaddr *)&mc->addr,
- &mgid);
-
- if (!send_only)
- cma_igmp_send(ndev, &mgid, false);
-
- dev_put(ndev);
+ gid_type = id_priv->cma_dev->default_gid_type
+ [id_priv->id.port_num -
+ rdma_start_port(
+ id_priv->cma_dev->device)];
+ cma_iboe_set_mgid((struct sockaddr *)&mc->addr, &mgid,
+ gid_type);
+ cma_igmp_send(ndev, &mgid, false);
}
+ dev_put(ndev);
cancel_work_sync(&mc->iboe_join.work);
}
@@ -4033,8 +4041,7 @@ static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
memset(&req, 0, sizeof req);
offset = cma_user_data_offset(id_priv);
- req.private_data_len = offset + conn_param->private_data_len;
- if (req.private_data_len < conn_param->private_data_len)
+ if (check_add_overflow(offset, conn_param->private_data_len, &req.private_data_len))
return -EINVAL;
if (req.private_data_len) {
@@ -4093,8 +4100,7 @@ static int cma_connect_ib(struct rdma_id_private *id_priv,
memset(&req, 0, sizeof req);
offset = cma_user_data_offset(id_priv);
- req.private_data_len = offset + conn_param->private_data_len;
- if (req.private_data_len < conn_param->private_data_len)
+ if (check_add_overflow(offset, conn_param->private_data_len, &req.private_data_len))
return -EINVAL;
if (req.private_data_len) {
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 22a4adda7981..a311df07b1bd 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -2461,7 +2461,8 @@ int ib_find_gid(struct ib_device *device, union ib_gid *gid,
++i) {
ret = rdma_query_gid(device, port, i, &tmp_gid);
if (ret)
- return ret;
+ continue;
+
if (!memcmp(&tmp_gid, gid, sizeof *gid)) {
*port_num = port;
if (index)
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index a3f84b50c46a..84c53bd2a52d 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -433,6 +433,7 @@ static struct attribute *port_default_attrs[] = {
&ib_port_attr_link_layer.attr,
NULL
};
+ATTRIBUTE_GROUPS(port_default);
static ssize_t print_ndev(const struct ib_gid_attr *gid_attr, char *buf)
{
@@ -774,7 +775,7 @@ static void ib_port_gid_attr_release(struct kobject *kobj)
static struct kobj_type port_type = {
.release = ib_port_release,
.sysfs_ops = &port_sysfs_ops,
- .default_attrs = port_default_attrs
+ .default_groups = port_default_groups,
};
static struct kobj_type gid_attr_type = {
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index 2b72c4fa9550..9d6ac9dff39a 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -95,6 +95,7 @@ struct ucma_context {
u64 uid;
struct list_head list;
+ struct list_head mc_list;
struct work_struct close_work;
};
@@ -105,6 +106,7 @@ struct ucma_multicast {
u64 uid;
u8 join_state;
+ struct list_head list;
struct sockaddr_storage addr;
};
@@ -198,6 +200,7 @@ static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
INIT_WORK(&ctx->close_work, ucma_close_id);
init_completion(&ctx->comp);
+ INIT_LIST_HEAD(&ctx->mc_list);
/* So list_del() will work if we don't do ucma_finish_ctx() */
INIT_LIST_HEAD(&ctx->list);
ctx->file = file;
@@ -484,19 +487,19 @@ err1:
static void ucma_cleanup_multicast(struct ucma_context *ctx)
{
- struct ucma_multicast *mc;
- unsigned long index;
+ struct ucma_multicast *mc, *tmp;
- xa_for_each(&multicast_table, index, mc) {
- if (mc->ctx != ctx)
- continue;
+ xa_lock(&multicast_table);
+ list_for_each_entry_safe(mc, tmp, &ctx->mc_list, list) {
+ list_del(&mc->list);
/*
* At this point mc->ctx->ref is 0 so the mc cannot leave the
* lock on the reader and this is enough serialization
*/
- xa_erase(&multicast_table, index);
+ __xa_erase(&multicast_table, mc->id);
kfree(mc);
}
+ xa_unlock(&multicast_table);
}
static void ucma_cleanup_mc_events(struct ucma_multicast *mc)
@@ -1469,12 +1472,16 @@ static ssize_t ucma_process_join(struct ucma_file *file,
mc->uid = cmd->uid;
memcpy(&mc->addr, addr, cmd->addr_size);
- if (xa_alloc(&multicast_table, &mc->id, NULL, xa_limit_32b,
+ xa_lock(&multicast_table);
+ if (__xa_alloc(&multicast_table, &mc->id, NULL, xa_limit_32b,
GFP_KERNEL)) {
ret = -ENOMEM;
goto err_free_mc;
}
+ list_add_tail(&mc->list, &ctx->mc_list);
+ xa_unlock(&multicast_table);
+
mutex_lock(&ctx->mutex);
ret = rdma_join_multicast(ctx->cm_id, (struct sockaddr *)&mc->addr,
join_state, mc);
@@ -1500,8 +1507,11 @@ err_leave_multicast:
mutex_unlock(&ctx->mutex);
ucma_cleanup_mc_events(mc);
err_xa_erase:
- xa_erase(&multicast_table, mc->id);
+ xa_lock(&multicast_table);
+ list_del(&mc->list);
+ __xa_erase(&multicast_table, mc->id);
err_free_mc:
+ xa_unlock(&multicast_table);
kfree(mc);
err_put_ctx:
ucma_put_ctx(ctx);
@@ -1569,15 +1579,17 @@ static ssize_t ucma_leave_multicast(struct ucma_file *file,
mc = ERR_PTR(-EINVAL);
else if (!refcount_inc_not_zero(&mc->ctx->ref))
mc = ERR_PTR(-ENXIO);
- else
- __xa_erase(&multicast_table, mc->id);
- xa_unlock(&multicast_table);
if (IS_ERR(mc)) {
+ xa_unlock(&multicast_table);
ret = PTR_ERR(mc);
goto out;
}
+ list_del(&mc->list);
+ __xa_erase(&multicast_table, mc->id);
+ xa_unlock(&multicast_table);
+
mutex_lock(&mc->ctx->mutex);
rdma_leave_multicast(mc->ctx->cm_id, (struct sockaddr *) &mc->addr);
mutex_unlock(&mc->ctx->mutex);
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index 7a47343d11f9..aead24c1a682 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -227,7 +227,6 @@ struct ib_umem_odp *ib_umem_odp_get(struct ib_device *device,
const struct mmu_interval_notifier_ops *ops)
{
struct ib_umem_odp *umem_odp;
- struct mm_struct *mm;
int ret;
if (WARN_ON_ONCE(!(access & IB_ACCESS_ON_DEMAND)))
@@ -241,7 +240,7 @@ struct ib_umem_odp *ib_umem_odp_get(struct ib_device *device,
umem_odp->umem.length = size;
umem_odp->umem.address = addr;
umem_odp->umem.writable = ib_access_writable(access);
- umem_odp->umem.owning_mm = mm = current->mm;
+ umem_odp->umem.owning_mm = current->mm;
umem_odp->notifier.ops = ops;
umem_odp->page_shift = PAGE_SHIFT;
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index d1345d76d9b1..6b6393176b3c 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -1399,7 +1399,6 @@ static int create_qp(struct uverbs_attr_bundle *attrs,
attr.sq_sig_type = cmd->sq_sig_all ? IB_SIGNAL_ALL_WR :
IB_SIGNAL_REQ_WR;
attr.qp_type = cmd->qp_type;
- attr.create_flags = 0;
attr.cap.max_send_wr = cmd->max_send_wr;
attr.cap.max_recv_wr = cmd->max_recv_wr;
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index 29cc0d14399a..3224f18a66e5 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -262,13 +262,12 @@ void bnxt_re_query_fw_str(struct ib_device *ibdev, char *str)
int bnxt_re_query_pkey(struct ib_device *ibdev, u32 port_num,
u16 index, u16 *pkey)
{
- struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
+ if (index > 0)
+ return -EINVAL;
- /* Ignore port_num */
+ *pkey = IB_DEFAULT_PKEY_FULL;
- memset(pkey, 0, sizeof(*pkey));
- return bnxt_qplib_get_pkey(&rdev->qplib_res,
- &rdev->qplib_res.pkey_tbl, index, pkey);
+ return 0;
}
int bnxt_re_query_gid(struct ib_device *ibdev, u32 port_num,
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index b44944fb9b24..3d6834d3d4fb 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -893,7 +893,6 @@ static int bnxt_re_srqn_handler(struct bnxt_qplib_nq *nq,
struct bnxt_re_srq *srq = container_of(handle, struct bnxt_re_srq,
qplib_srq);
struct ib_event ib_event;
- int rc = 0;
ib_event.device = &srq->rdev->ibdev;
ib_event.element.srq = &srq->ib_srq;
@@ -907,7 +906,7 @@ static int bnxt_re_srqn_handler(struct bnxt_qplib_nq *nq,
(*srq->ib_srq.event_handler)(&ib_event,
srq->ib_srq.srq_context);
}
- return rc;
+ return 0;
}
static int bnxt_re_cqn_handler(struct bnxt_qplib_nq *nq,
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
index ca88849559bf..96e581ced50e 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
@@ -46,6 +46,7 @@
#include <linux/delay.h>
#include <linux/prefetch.h>
#include <linux/if_ether.h>
+#include <rdma/ib_mad.h>
#include "roce_hsi.h"
@@ -1232,7 +1233,7 @@ int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
struct cmdq_modify_qp req;
struct creq_modify_qp_resp resp;
- u16 cmd_flags = 0, pkey;
+ u16 cmd_flags = 0;
u32 temp32[4];
u32 bmask;
int rc;
@@ -1255,11 +1256,9 @@ int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS)
req.access = qp->access;
- if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PKEY) {
- if (!bnxt_qplib_get_pkey(res, &res->pkey_tbl,
- qp->pkey_index, &pkey))
- req.pkey = cpu_to_le16(pkey);
- }
+ if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PKEY)
+ req.pkey = cpu_to_le16(IB_DEFAULT_PKEY_FULL);
+
if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_QKEY)
req.qkey = cpu_to_le32(qp->qkey);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
index 3de854727460..061b2895dd9b 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
@@ -555,7 +555,7 @@ skip_ctx_setup:
void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
{
- kfree(rcfw->cmdq.cmdq_bitmap);
+ bitmap_free(rcfw->cmdq.cmdq_bitmap);
kfree(rcfw->qp_tbl);
kfree(rcfw->crsqe_tbl);
bnxt_qplib_free_hwq(rcfw->res, &rcfw->cmdq.hwq);
@@ -572,7 +572,6 @@ int bnxt_qplib_alloc_rcfw_channel(struct bnxt_qplib_res *res,
struct bnxt_qplib_sg_info sginfo = {};
struct bnxt_qplib_cmdq_ctx *cmdq;
struct bnxt_qplib_creq_ctx *creq;
- u32 bmap_size = 0;
rcfw->pdev = res->pdev;
cmdq = &rcfw->cmdq;
@@ -613,13 +612,10 @@ int bnxt_qplib_alloc_rcfw_channel(struct bnxt_qplib_res *res,
if (!rcfw->crsqe_tbl)
goto fail;
- bmap_size = BITS_TO_LONGS(rcfw->cmdq_depth) * sizeof(unsigned long);
- cmdq->cmdq_bitmap = kzalloc(bmap_size, GFP_KERNEL);
+ cmdq->cmdq_bitmap = bitmap_zalloc(rcfw->cmdq_depth, GFP_KERNEL);
if (!cmdq->cmdq_bitmap)
goto fail;
- cmdq->bmap_size = bmap_size;
-
/* Allocate one extra to hold the QP1 entries */
rcfw->qp_tbl_size = qp_tbl_sz + 1;
rcfw->qp_tbl = kcalloc(rcfw->qp_tbl_size, sizeof(struct bnxt_qplib_qp_node),
@@ -667,8 +663,8 @@ void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
iounmap(cmdq->cmdq_mbox.reg.bar_reg);
iounmap(creq->creq_db.reg.bar_reg);
- indx = find_first_bit(cmdq->cmdq_bitmap, cmdq->bmap_size);
- if (indx != cmdq->bmap_size)
+ indx = find_first_bit(cmdq->cmdq_bitmap, rcfw->cmdq_depth);
+ if (indx != rcfw->cmdq_depth)
dev_err(&rcfw->pdev->dev,
"disabling RCFW with pending cmd-bit %lx\n", indx);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
index 82faa4e4cda8..0a3d8e7da3d4 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
@@ -152,7 +152,6 @@ struct bnxt_qplib_cmdq_ctx {
wait_queue_head_t waitq;
unsigned long flags;
unsigned long *cmdq_bitmap;
- u32 bmap_size;
u32 seq_num;
};
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c
index bc1ba4b51ba4..126d4f26f75a 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c
@@ -649,31 +649,6 @@ static void bnxt_qplib_init_sgid_tbl(struct bnxt_qplib_sgid_tbl *sgid_tbl,
memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max);
}
-static void bnxt_qplib_free_pkey_tbl(struct bnxt_qplib_res *res,
- struct bnxt_qplib_pkey_tbl *pkey_tbl)
-{
- if (!pkey_tbl->tbl)
- dev_dbg(&res->pdev->dev, "PKEY tbl not present\n");
- else
- kfree(pkey_tbl->tbl);
-
- pkey_tbl->tbl = NULL;
- pkey_tbl->max = 0;
- pkey_tbl->active = 0;
-}
-
-static int bnxt_qplib_alloc_pkey_tbl(struct bnxt_qplib_res *res,
- struct bnxt_qplib_pkey_tbl *pkey_tbl,
- u16 max)
-{
- pkey_tbl->tbl = kcalloc(max, sizeof(u16), GFP_KERNEL);
- if (!pkey_tbl->tbl)
- return -ENOMEM;
-
- pkey_tbl->max = max;
- return 0;
-};
-
/* PDs */
int bnxt_qplib_alloc_pd(struct bnxt_qplib_pd_tbl *pdt, struct bnxt_qplib_pd *pd)
{
@@ -843,24 +818,6 @@ unmap_io:
return -ENOMEM;
}
-/* PKEYs */
-static void bnxt_qplib_cleanup_pkey_tbl(struct bnxt_qplib_pkey_tbl *pkey_tbl)
-{
- memset(pkey_tbl->tbl, 0, sizeof(u16) * pkey_tbl->max);
- pkey_tbl->active = 0;
-}
-
-static void bnxt_qplib_init_pkey_tbl(struct bnxt_qplib_res *res,
- struct bnxt_qplib_pkey_tbl *pkey_tbl)
-{
- u16 pkey = 0xFFFF;
-
- memset(pkey_tbl->tbl, 0, sizeof(u16) * pkey_tbl->max);
-
- /* pkey default = 0xFFFF */
- bnxt_qplib_add_pkey(res, pkey_tbl, &pkey, false);
-}
-
/* Stats */
static void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev,
struct bnxt_qplib_stats *stats)
@@ -891,21 +848,18 @@ static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev,
void bnxt_qplib_cleanup_res(struct bnxt_qplib_res *res)
{
- bnxt_qplib_cleanup_pkey_tbl(&res->pkey_tbl);
bnxt_qplib_cleanup_sgid_tbl(res, &res->sgid_tbl);
}
int bnxt_qplib_init_res(struct bnxt_qplib_res *res)
{
bnxt_qplib_init_sgid_tbl(&res->sgid_tbl, res->netdev);
- bnxt_qplib_init_pkey_tbl(res, &res->pkey_tbl);
return 0;
}
void bnxt_qplib_free_res(struct bnxt_qplib_res *res)
{
- bnxt_qplib_free_pkey_tbl(res, &res->pkey_tbl);
bnxt_qplib_free_sgid_tbl(res, &res->sgid_tbl);
bnxt_qplib_free_pd_tbl(&res->pd_tbl);
bnxt_qplib_free_dpi_tbl(res, &res->dpi_tbl);
@@ -924,10 +878,6 @@ int bnxt_qplib_alloc_res(struct bnxt_qplib_res *res, struct pci_dev *pdev,
if (rc)
goto fail;
- rc = bnxt_qplib_alloc_pkey_tbl(res, &res->pkey_tbl, dev_attr->max_pkey);
- if (rc)
- goto fail;
-
rc = bnxt_qplib_alloc_pd_tbl(res, &res->pd_tbl, dev_attr->max_pd);
if (rc)
goto fail;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.h b/drivers/infiniband/hw/bnxt_re/qplib_res.h
index e1411a2352a7..982e2c96dac2 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.h
@@ -185,12 +185,6 @@ struct bnxt_qplib_sgid_tbl {
u8 *vlan;
};
-struct bnxt_qplib_pkey_tbl {
- u16 *tbl;
- u16 max;
- u16 active;
-};
-
struct bnxt_qplib_dpi {
u32 dpi;
void __iomem *dbr;
@@ -258,7 +252,6 @@ struct bnxt_qplib_res {
struct bnxt_qplib_rcfw *rcfw;
struct bnxt_qplib_pd_tbl pd_tbl;
struct bnxt_qplib_sgid_tbl sgid_tbl;
- struct bnxt_qplib_pkey_tbl pkey_tbl;
struct bnxt_qplib_dpi_tbl dpi_tbl;
bool prio;
bool is_vf;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
index 379e715ebd30..b802981b7171 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
@@ -146,17 +146,7 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
attr->max_srq = le16_to_cpu(sb->max_srq);
attr->max_srq_wqes = le32_to_cpu(sb->max_srq_wr) - 1;
attr->max_srq_sges = sb->max_srq_sge;
- attr->max_pkey = le32_to_cpu(sb->max_pkeys);
- /*
- * Some versions of FW reports more than 0xFFFF.
- * Restrict it for now to 0xFFFF to avoid
- * reporting trucated value
- */
- if (attr->max_pkey > 0xFFFF) {
- /* ib_port_attr::pkey_tbl_len is u16 */
- attr->max_pkey = 0xFFFF;
- }
-
+ attr->max_pkey = 1;
attr->max_inline_data = le32_to_cpu(sb->max_inline_data);
attr->l2_db_size = (sb->l2_db_space_size + 1) *
(0x01 << RCFW_DBR_BASE_PAGE_SHIFT);
@@ -414,93 +404,6 @@ int bnxt_qplib_update_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
return rc;
}
-/* pkeys */
-int bnxt_qplib_get_pkey(struct bnxt_qplib_res *res,
- struct bnxt_qplib_pkey_tbl *pkey_tbl, u16 index,
- u16 *pkey)
-{
- if (index == 0xFFFF) {
- *pkey = 0xFFFF;
- return 0;
- }
- if (index >= pkey_tbl->max) {
- dev_err(&res->pdev->dev,
- "Index %d exceeded PKEY table max (%d)\n",
- index, pkey_tbl->max);
- return -EINVAL;
- }
- memcpy(pkey, &pkey_tbl->tbl[index], sizeof(*pkey));
- return 0;
-}
-
-int bnxt_qplib_del_pkey(struct bnxt_qplib_res *res,
- struct bnxt_qplib_pkey_tbl *pkey_tbl, u16 *pkey,
- bool update)
-{
- int i, rc = 0;
-
- if (!pkey_tbl) {
- dev_err(&res->pdev->dev, "PKEY table not allocated\n");
- return -EINVAL;
- }
-
- /* Do we need a pkey_lock here? */
- if (!pkey_tbl->active) {
- dev_err(&res->pdev->dev, "PKEY table has no active entries\n");
- return -ENOMEM;
- }
- for (i = 0; i < pkey_tbl->max; i++) {
- if (!memcmp(&pkey_tbl->tbl[i], pkey, sizeof(*pkey)))
- break;
- }
- if (i == pkey_tbl->max) {
- dev_err(&res->pdev->dev,
- "PKEY 0x%04x not found in the pkey table\n", *pkey);
- return -ENOMEM;
- }
- memset(&pkey_tbl->tbl[i], 0, sizeof(*pkey));
- pkey_tbl->active--;
-
- /* unlock */
- return rc;
-}
-
-int bnxt_qplib_add_pkey(struct bnxt_qplib_res *res,
- struct bnxt_qplib_pkey_tbl *pkey_tbl, u16 *pkey,
- bool update)
-{
- int i, free_idx, rc = 0;
-
- if (!pkey_tbl) {
- dev_err(&res->pdev->dev, "PKEY table not allocated\n");
- return -EINVAL;
- }
-
- /* Do we need a pkey_lock here? */
- if (pkey_tbl->active == pkey_tbl->max) {
- dev_err(&res->pdev->dev, "PKEY table is full\n");
- return -ENOMEM;
- }
- free_idx = pkey_tbl->max;
- for (i = 0; i < pkey_tbl->max; i++) {
- if (!memcmp(&pkey_tbl->tbl[i], pkey, sizeof(*pkey)))
- return -EALREADY;
- else if (!pkey_tbl->tbl[i] && free_idx == pkey_tbl->max)
- free_idx = i;
- }
- if (free_idx == pkey_tbl->max) {
- dev_err(&res->pdev->dev,
- "PKEY table is FULL but count is not MAX??\n");
- return -ENOMEM;
- }
- /* Add PKEY to the pkey_tbl */
- memcpy(&pkey_tbl->tbl[free_idx], pkey, sizeof(*pkey));
- pkey_tbl->active++;
-
- /* unlock */
- return rc;
-}
-
/* AH */
int bnxt_qplib_create_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah,
bool block)
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.h b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
index a18f568cb23e..5939e8fc8353 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
@@ -255,15 +255,6 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
int bnxt_qplib_update_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
struct bnxt_qplib_gid *gid, u16 gid_idx,
const u8 *smac);
-int bnxt_qplib_get_pkey(struct bnxt_qplib_res *res,
- struct bnxt_qplib_pkey_tbl *pkey_tbl, u16 index,
- u16 *pkey);
-int bnxt_qplib_del_pkey(struct bnxt_qplib_res *res,
- struct bnxt_qplib_pkey_tbl *pkey_tbl, u16 *pkey,
- bool update);
-int bnxt_qplib_add_pkey(struct bnxt_qplib_res *res,
- struct bnxt_qplib_pkey_tbl *pkey_tbl, u16 *pkey,
- bool update);
int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
struct bnxt_qplib_dev_attr *attr, bool vf);
int bnxt_qplib_set_func_resources(struct bnxt_qplib_res *res,
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 913f39ee4416..c16017f6e8db 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -2471,7 +2471,8 @@ static int accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
skb_get(skb);
rpl = cplhdr(skb);
if (!is_t4(adapter_type)) {
- skb_trim(skb, roundup(sizeof(*rpl5), 16));
+ BUILD_BUG_ON(sizeof(*rpl5) != roundup(sizeof(*rpl5), 16));
+ skb_trim(skb, sizeof(*rpl5));
rpl5 = (void *)rpl;
INIT_TP_WR(rpl5, ep->hwtid);
} else {
@@ -2487,7 +2488,7 @@ static int accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
opt2 |= CONG_CNTRL_V(CONG_ALG_TAHOE);
opt2 |= T5_ISS_F;
rpl5 = (void *)rpl;
- memset(&rpl5->iss, 0, roundup(sizeof(*rpl5)-sizeof(*rpl), 16));
+ memset_after(rpl5, 0, iss);
if (peer2peer)
isn += 4;
rpl5->iss = cpu_to_be32(isn);
diff --git a/drivers/infiniband/hw/cxgb4/id_table.c b/drivers/infiniband/hw/cxgb4/id_table.c
index 724d23297b35..f64e7e02b129 100644
--- a/drivers/infiniband/hw/cxgb4/id_table.c
+++ b/drivers/infiniband/hw/cxgb4/id_table.c
@@ -59,7 +59,7 @@ u32 c4iw_id_alloc(struct c4iw_id_table *alloc)
alloc->last = obj + 1;
if (alloc->last >= alloc->max)
alloc->last = 0;
- set_bit(obj, alloc->table);
+ __set_bit(obj, alloc->table);
obj += alloc->start;
} else
obj = -1;
@@ -75,37 +75,32 @@ void c4iw_id_free(struct c4iw_id_table *alloc, u32 obj)
obj -= alloc->start;
spin_lock_irqsave(&alloc->lock, flags);
- clear_bit(obj, alloc->table);
+ __clear_bit(obj, alloc->table);
spin_unlock_irqrestore(&alloc->lock, flags);
}
int c4iw_id_table_alloc(struct c4iw_id_table *alloc, u32 start, u32 num,
u32 reserved, u32 flags)
{
- int i;
-
alloc->start = start;
alloc->flags = flags;
if (flags & C4IW_ID_TABLE_F_RANDOM)
alloc->last = prandom_u32() % RANDOM_SKIP;
else
alloc->last = 0;
- alloc->max = num;
+ alloc->max = num;
spin_lock_init(&alloc->lock);
- alloc->table = kmalloc_array(BITS_TO_LONGS(num), sizeof(long),
- GFP_KERNEL);
+ alloc->table = bitmap_zalloc(num, GFP_KERNEL);
if (!alloc->table)
return -ENOMEM;
- bitmap_zero(alloc->table, num);
if (!(alloc->flags & C4IW_ID_TABLE_F_EMPTY))
- for (i = 0; i < reserved; ++i)
- set_bit(i, alloc->table);
+ bitmap_set(alloc->table, 0, reserved);
return 0;
}
void c4iw_id_table_free(struct c4iw_id_table *alloc)
{
- kfree(alloc->table);
+ bitmap_free(alloc->table);
}
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index 0c8fd5a85fcb..89f36a3a9af0 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -41,6 +41,7 @@
#include <linux/ethtool.h>
#include <linux/rtnetlink.h>
#include <linux/inetdevice.h>
+#include <net/addrconf.h>
#include <linux/io.h>
#include <asm/irq.h>
@@ -264,7 +265,8 @@ static int c4iw_query_device(struct ib_device *ibdev, struct ib_device_attr *pro
return -EINVAL;
dev = to_c4iw_dev(ibdev);
- memcpy(&props->sys_image_guid, dev->rdev.lldi.ports[0]->dev_addr, 6);
+ addrconf_addr_eui48((u8 *)&props->sys_image_guid,
+ dev->rdev.lldi.ports[0]->dev_addr);
props->hw_ver = CHELSIO_CHIP_RELEASE(dev->rdev.lldi.adapter_type);
props->fw_ver = dev->rdev.lldi.fw_vers;
props->device_cap_flags = dev->device_cap_flags;
@@ -525,8 +527,8 @@ void c4iw_register_device(struct work_struct *work)
struct c4iw_dev *dev = ctx->dev;
pr_debug("c4iw_dev %p\n", dev);
- memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid));
- memcpy(&dev->ibdev.node_guid, dev->rdev.lldi.ports[0]->dev_addr, 6);
+ addrconf_addr_eui48((u8 *)&dev->ibdev.node_guid,
+ dev->rdev.lldi.ports[0]->dev_addr);
dev->device_cap_flags = IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_WINDOW;
if (fastreg_support)
dev->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index d20b4ef2c853..ffbd9a89981e 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -2460,6 +2460,7 @@ int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
memset(attr, 0, sizeof(*attr));
memset(init_attr, 0, sizeof(*init_attr));
attr->qp_state = to_ib_qp_state(qhp->attr.state);
+ attr->cur_qp_state = to_ib_qp_state(qhp->attr.state);
init_attr->cap.max_send_wr = qhp->attr.sq_num_entries;
init_attr->cap.max_recv_wr = qhp->attr.rq_num_entries;
init_attr->cap.max_send_sge = qhp->attr.sq_max_sges;
diff --git a/drivers/infiniband/hw/hfi1/ipoib.h b/drivers/infiniband/hw/hfi1/ipoib.h
index 909122934246..aec60d4888eb 100644
--- a/drivers/infiniband/hw/hfi1/ipoib.h
+++ b/drivers/infiniband/hw/hfi1/ipoib.h
@@ -55,7 +55,7 @@ union hfi1_ipoib_flow {
*/
struct ipoib_txreq {
struct sdma_txreq txreq;
- struct hfi1_sdma_header sdma_hdr;
+ struct hfi1_sdma_header *sdma_hdr;
int sdma_status;
int complete;
struct hfi1_ipoib_dev_priv *priv;
diff --git a/drivers/infiniband/hw/hfi1/ipoib_main.c b/drivers/infiniband/hw/hfi1/ipoib_main.c
index e1a2b02bbd91..5d814afdf7f3 100644
--- a/drivers/infiniband/hw/hfi1/ipoib_main.c
+++ b/drivers/infiniband/hw/hfi1/ipoib_main.c
@@ -22,26 +22,35 @@ static int hfi1_ipoib_dev_init(struct net_device *dev)
int ret;
dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
+ if (!dev->tstats)
+ return -ENOMEM;
ret = priv->netdev_ops->ndo_init(dev);
if (ret)
- return ret;
+ goto out_ret;
ret = hfi1_netdev_add_data(priv->dd,
qpn_from_mac(priv->netdev->dev_addr),
dev);
if (ret < 0) {
priv->netdev_ops->ndo_uninit(dev);
- return ret;
+ goto out_ret;
}
return 0;
+out_ret:
+ free_percpu(dev->tstats);
+ dev->tstats = NULL;
+ return ret;
}
static void hfi1_ipoib_dev_uninit(struct net_device *dev)
{
struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
+ free_percpu(dev->tstats);
+ dev->tstats = NULL;
+
hfi1_netdev_remove_data(priv->dd, qpn_from_mac(priv->netdev->dev_addr));
priv->netdev_ops->ndo_uninit(dev);
@@ -166,12 +175,7 @@ static void hfi1_ipoib_netdev_dtor(struct net_device *dev)
hfi1_ipoib_rxq_deinit(priv->netdev);
free_percpu(dev->tstats);
-}
-
-static void hfi1_ipoib_free_rdma_netdev(struct net_device *dev)
-{
- hfi1_ipoib_netdev_dtor(dev);
- free_netdev(dev);
+ dev->tstats = NULL;
}
static void hfi1_ipoib_set_id(struct net_device *dev, int id)
@@ -211,24 +215,23 @@ static int hfi1_ipoib_setup_rn(struct ib_device *device,
priv->port_num = port_num;
priv->netdev_ops = netdev->netdev_ops;
- netdev->netdev_ops = &hfi1_ipoib_netdev_ops;
-
ib_query_pkey(device, port_num, priv->pkey_index, &priv->pkey);
rc = hfi1_ipoib_txreq_init(priv);
if (rc) {
dd_dev_err(dd, "IPoIB netdev TX init - failed(%d)\n", rc);
- hfi1_ipoib_free_rdma_netdev(netdev);
return rc;
}
rc = hfi1_ipoib_rxq_init(netdev);
if (rc) {
dd_dev_err(dd, "IPoIB netdev RX init - failed(%d)\n", rc);
- hfi1_ipoib_free_rdma_netdev(netdev);
+ hfi1_ipoib_txreq_deinit(priv);
return rc;
}
+ netdev->netdev_ops = &hfi1_ipoib_netdev_ops;
+
netdev->priv_destructor = hfi1_ipoib_netdev_dtor;
netdev->needs_free_netdev = true;
diff --git a/drivers/infiniband/hw/hfi1/ipoib_tx.c b/drivers/infiniband/hw/hfi1/ipoib_tx.c
index f4010890309f..d6bbdb8fcb50 100644
--- a/drivers/infiniband/hw/hfi1/ipoib_tx.c
+++ b/drivers/infiniband/hw/hfi1/ipoib_tx.c
@@ -122,7 +122,7 @@ static void hfi1_ipoib_free_tx(struct ipoib_txreq *tx, int budget)
dd_dev_warn(priv->dd,
"%s: Status = 0x%x pbc 0x%llx txq = %d sde = %d\n",
__func__, tx->sdma_status,
- le64_to_cpu(tx->sdma_hdr.pbc), tx->txq->q_idx,
+ le64_to_cpu(tx->sdma_hdr->pbc), tx->txq->q_idx,
tx->txq->sde->this_idx);
}
@@ -231,7 +231,7 @@ static int hfi1_ipoib_build_tx_desc(struct ipoib_txreq *tx,
{
struct hfi1_devdata *dd = txp->dd;
struct sdma_txreq *txreq = &tx->txreq;
- struct hfi1_sdma_header *sdma_hdr = &tx->sdma_hdr;
+ struct hfi1_sdma_header *sdma_hdr = tx->sdma_hdr;
u16 pkt_bytes =
sizeof(sdma_hdr->pbc) + (txp->hdr_dwords << 2) + tx->skb->len;
int ret;
@@ -256,7 +256,7 @@ static void hfi1_ipoib_build_ib_tx_headers(struct ipoib_txreq *tx,
struct ipoib_txparms *txp)
{
struct hfi1_ipoib_dev_priv *priv = tx->txq->priv;
- struct hfi1_sdma_header *sdma_hdr = &tx->sdma_hdr;
+ struct hfi1_sdma_header *sdma_hdr = tx->sdma_hdr;
struct sk_buff *skb = tx->skb;
struct hfi1_pportdata *ppd = ppd_from_ibp(txp->ibp);
struct rdma_ah_attr *ah_attr = txp->ah_attr;
@@ -483,7 +483,7 @@ static int hfi1_ipoib_send_dma_single(struct net_device *dev,
if (likely(!ret)) {
tx_ok:
trace_sdma_output_ibhdr(txq->priv->dd,
- &tx->sdma_hdr.hdr,
+ &tx->sdma_hdr->hdr,
ib_is_sc5(txp->flow.sc5));
hfi1_ipoib_check_queue_depth(txq);
return NETDEV_TX_OK;
@@ -547,7 +547,7 @@ static int hfi1_ipoib_send_dma_list(struct net_device *dev,
hfi1_ipoib_check_queue_depth(txq);
trace_sdma_output_ibhdr(txq->priv->dd,
- &tx->sdma_hdr.hdr,
+ &tx->sdma_hdr->hdr,
ib_is_sc5(txp->flow.sc5));
if (!netdev_xmit_more())
@@ -683,7 +683,8 @@ int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv)
{
struct net_device *dev = priv->netdev;
u32 tx_ring_size, tx_item_size;
- int i;
+ struct hfi1_ipoib_circ_buf *tx_ring;
+ int i, j;
/*
* Ring holds 1 less than tx_ring_size
@@ -701,7 +702,9 @@ int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv)
for (i = 0; i < dev->num_tx_queues; i++) {
struct hfi1_ipoib_txq *txq = &priv->txqs[i];
+ struct ipoib_txreq *tx;
+ tx_ring = &txq->tx_ring;
iowait_init(&txq->wait,
0,
hfi1_ipoib_flush_txq,
@@ -725,14 +728,19 @@ int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv)
priv->dd->node);
txq->tx_ring.items =
- kcalloc_node(tx_ring_size, tx_item_size,
- GFP_KERNEL, priv->dd->node);
+ kvzalloc_node(array_size(tx_ring_size, tx_item_size),
+ GFP_KERNEL, priv->dd->node);
if (!txq->tx_ring.items)
goto free_txqs;
txq->tx_ring.max_items = tx_ring_size;
- txq->tx_ring.shift = ilog2(tx_ring_size);
+ txq->tx_ring.shift = ilog2(tx_item_size);
txq->tx_ring.avail = hfi1_ipoib_ring_hwat(txq);
+ tx_ring = &txq->tx_ring;
+ for (j = 0; j < tx_ring_size; j++)
+ hfi1_txreq_from_idx(tx_ring, j)->sdma_hdr =
+ kzalloc_node(sizeof(*tx->sdma_hdr),
+ GFP_KERNEL, priv->dd->node);
netif_tx_napi_add(dev, &txq->napi,
hfi1_ipoib_poll_tx_ring,
@@ -746,7 +754,10 @@ free_txqs:
struct hfi1_ipoib_txq *txq = &priv->txqs[i];
netif_napi_del(&txq->napi);
- kfree(txq->tx_ring.items);
+ tx_ring = &txq->tx_ring;
+ for (j = 0; j < tx_ring_size; j++)
+ kfree(hfi1_txreq_from_idx(tx_ring, j)->sdma_hdr);
+ kvfree(tx_ring->items);
}
kfree(priv->txqs);
@@ -780,17 +791,20 @@ static void hfi1_ipoib_drain_tx_list(struct hfi1_ipoib_txq *txq)
void hfi1_ipoib_txreq_deinit(struct hfi1_ipoib_dev_priv *priv)
{
- int i;
+ int i, j;
for (i = 0; i < priv->netdev->num_tx_queues; i++) {
struct hfi1_ipoib_txq *txq = &priv->txqs[i];
+ struct hfi1_ipoib_circ_buf *tx_ring = &txq->tx_ring;
iowait_cancel_work(&txq->wait);
iowait_sdma_drain(&txq->wait);
hfi1_ipoib_drain_tx_list(txq);
netif_napi_del(&txq->napi);
hfi1_ipoib_drain_tx_ring(txq);
- kfree(txq->tx_ring.items);
+ for (j = 0; j < tx_ring->max_items; j++)
+ kfree(hfi1_txreq_from_idx(tx_ring, j)->sdma_hdr);
+ kvfree(tx_ring->items);
}
kfree(priv->txqs);
diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c
index 5b11c8282744..a71c5a36ceba 100644
--- a/drivers/infiniband/hw/hfi1/user_sdma.c
+++ b/drivers/infiniband/hw/hfi1/user_sdma.c
@@ -161,9 +161,7 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt,
if (!pq->reqs)
goto pq_reqs_nomem;
- pq->req_in_use = kcalloc(BITS_TO_LONGS(hfi1_sdma_comp_ring_size),
- sizeof(*pq->req_in_use),
- GFP_KERNEL);
+ pq->req_in_use = bitmap_zalloc(hfi1_sdma_comp_ring_size, GFP_KERNEL);
if (!pq->req_in_use)
goto pq_reqs_no_in_use;
@@ -210,7 +208,7 @@ cq_comps_nomem:
cq_nomem:
kmem_cache_destroy(pq->txreq_cache);
pq_txreq_nomem:
- kfree(pq->req_in_use);
+ bitmap_free(pq->req_in_use);
pq_reqs_no_in_use:
kfree(pq->reqs);
pq_reqs_nomem:
@@ -257,7 +255,7 @@ int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd,
pq->wait,
!atomic_read(&pq->n_reqs));
kfree(pq->reqs);
- kfree(pq->req_in_use);
+ bitmap_free(pq->req_in_use);
kmem_cache_destroy(pq->txreq_cache);
flush_pq_iowait(pq);
kfree(pq);
diff --git a/drivers/infiniband/hw/hns/Kconfig b/drivers/infiniband/hw/hns/Kconfig
index 18d10ebf900b..ab3fbba70789 100644
--- a/drivers/infiniband/hw/hns/Kconfig
+++ b/drivers/infiniband/hw/hns/Kconfig
@@ -5,22 +5,9 @@ config INFINIBAND_HNS
depends on ARM64 || (COMPILE_TEST && 64BIT)
depends on (HNS_DSAF && HNS_ENET) || HNS3
help
- This is a RoCE/RDMA driver for the Hisilicon RoCE engine. The engine
- is used in Hisilicon Hip06 and more further ICT SoC based on
- platform device.
+ This is a RoCE/RDMA driver for the Hisilicon RoCE engine.
- To compile HIP06 or HIP08 driver as module, choose M here.
-
-config INFINIBAND_HNS_HIP06
- bool "Hisilicon Hip06 Family RoCE support"
- depends on INFINIBAND_HNS && HNS && HNS_DSAF && HNS_ENET
- depends on INFINIBAND_HNS=m || (HNS_DSAF=y && HNS_ENET=y)
- help
- RoCE driver support for Hisilicon RoCE engine in Hisilicon Hip06 and
- Hip07 SoC. These RoCE engines are platform devices.
-
- To compile this driver, choose Y here: if INFINIBAND_HNS is m, this
- module will be called hns-roce-hw-v1
+ To compile HIP08 driver as module, choose M here.
config INFINIBAND_HNS_HIP08
bool "Hisilicon Hip08 Family RoCE support"
diff --git a/drivers/infiniband/hw/hns/Makefile b/drivers/infiniband/hw/hns/Makefile
index e105945b94a1..9f04f25d9631 100644
--- a/drivers/infiniband/hw/hns/Makefile
+++ b/drivers/infiniband/hw/hns/Makefile
@@ -9,11 +9,6 @@ hns-roce-objs := hns_roce_main.o hns_roce_cmd.o hns_roce_pd.o \
hns_roce_ah.o hns_roce_hem.o hns_roce_mr.o hns_roce_qp.o \
hns_roce_cq.o hns_roce_alloc.o hns_roce_db.o hns_roce_srq.o hns_roce_restrack.o
-ifdef CONFIG_INFINIBAND_HNS_HIP06
-hns-roce-hw-v1-objs := hns_roce_hw_v1.o $(hns-roce-objs)
-obj-$(CONFIG_INFINIBAND_HNS) += hns-roce-hw-v1.o
-endif
-
ifdef CONFIG_INFINIBAND_HNS_HIP08
hns-roce-hw-v2-objs := hns_roce_hw_v2.o hns_roce_hw_v2_dfx.o $(hns-roce-objs)
obj-$(CONFIG_INFINIBAND_HNS) += hns-roce-hw-v2.o
diff --git a/drivers/infiniband/hw/hns/hns_roce_ah.c b/drivers/infiniband/hw/hns/hns_roce_ah.c
index cc258edec331..492b122d0521 100644
--- a/drivers/infiniband/hw/hns/hns_roce_ah.c
+++ b/drivers/infiniband/hw/hns/hns_roce_ah.c
@@ -30,7 +30,6 @@
* SOFTWARE.
*/
-#include <linux/platform_device.h>
#include <linux/pci.h>
#include <rdma/ib_addr.h>
#include <rdma/ib_cache.h>
@@ -61,7 +60,7 @@ int hns_roce_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
struct hns_roce_ah *ah = to_hr_ah(ibah);
int ret = 0;
- if (hr_dev->pci_dev->revision <= PCI_REVISION_ID_HIP08 && udata)
+ if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08 && udata)
return -EOPNOTSUPP;
ah->av.port = rdma_ah_get_port_num(ah_attr);
@@ -80,7 +79,7 @@ int hns_roce_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr,
memcpy(ah->av.mac, ah_attr->roce.dmac, ETH_ALEN);
/* HIP08 needs to record vlan info in Address Vector */
- if (hr_dev->pci_dev->revision <= PCI_REVISION_ID_HIP08) {
+ if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
ret = rdma_read_gid_l2_fields(ah_attr->grh.sgid_attr,
&ah->av.vlan_id, NULL);
if (ret)
diff --git a/drivers/infiniband/hw/hns/hns_roce_alloc.c b/drivers/infiniband/hw/hns/hns_roce_alloc.c
index d4fa0fd52294..11a78ceae568 100644
--- a/drivers/infiniband/hw/hns/hns_roce_alloc.c
+++ b/drivers/infiniband/hw/hns/hns_roce_alloc.c
@@ -31,10 +31,9 @@
* SOFTWARE.
*/
-#include <linux/platform_device.h>
#include <linux/vmalloc.h>
-#include "hns_roce_device.h"
#include <rdma/ib_umem.h>
+#include "hns_roce_device.h"
void hns_roce_buf_free(struct hns_roce_dev *hr_dev, struct hns_roce_buf *buf)
{
diff --git a/drivers/infiniband/hw/hns/hns_roce_cmd.c b/drivers/infiniband/hw/hns/hns_roce_cmd.c
index 84f3f2b5f097..4b693d542ace 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cmd.c
+++ b/drivers/infiniband/hw/hns/hns_roce_cmd.c
@@ -31,7 +31,6 @@
*/
#include <linux/dmapool.h>
-#include <linux/platform_device.h>
#include "hns_roce_common.h"
#include "hns_roce_device.h"
#include "hns_roce_cmd.h"
@@ -61,7 +60,7 @@ static int __hns_roce_cmd_mbox_poll(struct hns_roce_dev *hr_dev, u64 in_param,
CMD_POLL_TOKEN, 0);
if (ret) {
dev_err_ratelimited(hr_dev->dev,
- "failed to post mailbox %x in poll mode, ret = %d.\n",
+ "failed to post mailbox 0x%x in poll mode, ret = %d.\n",
op, ret);
return ret;
}
@@ -91,7 +90,7 @@ void hns_roce_cmd_event(struct hns_roce_dev *hr_dev, u16 token, u8 status,
if (unlikely(token != context->token)) {
dev_err_ratelimited(hr_dev->dev,
- "[cmd] invalid ae token %x,context token is %x!\n",
+ "[cmd] invalid ae token 0x%x, context token is 0x%x.\n",
token, context->token);
return;
}
@@ -130,14 +129,14 @@ static int __hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev, u64 in_param,
context->token, 1);
if (ret) {
dev_err_ratelimited(dev,
- "failed to post mailbox %x in event mode, ret = %d.\n",
+ "failed to post mailbox 0x%x in event mode, ret = %d.\n",
op, ret);
goto out;
}
if (!wait_for_completion_timeout(&context->done,
msecs_to_jiffies(timeout))) {
- dev_err_ratelimited(dev, "[cmd] token %x mailbox %x timeout.\n",
+ dev_err_ratelimited(dev, "[cmd] token 0x%x mailbox 0x%x timeout.\n",
context->token, op);
ret = -EBUSY;
goto out;
@@ -145,7 +144,7 @@ static int __hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev, u64 in_param,
ret = context->result;
if (ret)
- dev_err_ratelimited(dev, "[cmd] token %x mailbox %x error %d\n",
+ dev_err_ratelimited(dev, "[cmd] token 0x%x mailbox 0x%x error %d.\n",
context->token, op, ret);
out:
diff --git a/drivers/infiniband/hw/hns/hns_roce_common.h b/drivers/infiniband/hw/hns/hns_roce_common.h
index b73e55de83ac..465d1f914b6c 100644
--- a/drivers/infiniband/hw/hns/hns_roce_common.h
+++ b/drivers/infiniband/hw/hns/hns_roce_common.h
@@ -104,208 +104,6 @@
#define hr_reg_read(ptr, field) _hr_reg_read(ptr, field)
-#define ROCEE_GLB_CFG_ROCEE_DB_SQ_MODE_S 3
-#define ROCEE_GLB_CFG_ROCEE_DB_OTH_MODE_S 4
-
-#define ROCEE_GLB_CFG_SQ_EXT_DB_MODE_S 5
-
-#define ROCEE_GLB_CFG_OTH_EXT_DB_MODE_S 6
-
-#define ROCEE_GLB_CFG_ROCEE_PORT_ST_S 10
-#define ROCEE_GLB_CFG_ROCEE_PORT_ST_M \
- (((1UL << 6) - 1) << ROCEE_GLB_CFG_ROCEE_PORT_ST_S)
-
-#define ROCEE_GLB_CFG_TRP_RAQ_DROP_EN_S 16
-
-#define ROCEE_DMAE_USER_CFG1_ROCEE_STREAM_ID_TB_CFG_S 0
-#define ROCEE_DMAE_USER_CFG1_ROCEE_STREAM_ID_TB_CFG_M \
- (((1UL << 24) - 1) << ROCEE_DMAE_USER_CFG1_ROCEE_STREAM_ID_TB_CFG_S)
-
-#define ROCEE_DMAE_USER_CFG1_ROCEE_CACHE_TB_CFG_S 24
-#define ROCEE_DMAE_USER_CFG1_ROCEE_CACHE_TB_CFG_M \
- (((1UL << 4) - 1) << ROCEE_DMAE_USER_CFG1_ROCEE_CACHE_TB_CFG_S)
-
-#define ROCEE_DMAE_USER_CFG2_ROCEE_STREAM_ID_PKT_CFG_S 0
-#define ROCEE_DMAE_USER_CFG2_ROCEE_STREAM_ID_PKT_CFG_M \
- (((1UL << 24) - 1) << ROCEE_DMAE_USER_CFG2_ROCEE_STREAM_ID_PKT_CFG_S)
-
-#define ROCEE_DMAE_USER_CFG2_ROCEE_CACHE_PKT_CFG_S 24
-#define ROCEE_DMAE_USER_CFG2_ROCEE_CACHE_PKT_CFG_M \
- (((1UL << 4) - 1) << ROCEE_DMAE_USER_CFG2_ROCEE_CACHE_PKT_CFG_S)
-
-#define ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_S 0
-#define ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_M \
- (((1UL << 16) - 1) << ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_S)
-
-#define ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_EMPTY_S 16
-#define ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_EMPTY_M \
- (((1UL << 16) - 1) << ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_EMPTY_S)
-
-#define ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_S 0
-#define ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_M \
- (((1UL << 16) - 1) << ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_S)
-
-#define ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_EMPTY_S 16
-#define ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_EMPTY_M \
- (((1UL << 16) - 1) << ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_EMPTY_S)
-
-#define ROCEE_RAQ_WL_ROCEE_RAQ_WL_S 0
-#define ROCEE_RAQ_WL_ROCEE_RAQ_WL_M \
- (((1UL << 8) - 1) << ROCEE_RAQ_WL_ROCEE_RAQ_WL_S)
-
-#define ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_POL_TIME_INTERVAL_S 0
-#define ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_POL_TIME_INTERVAL_M \
- (((1UL << 15) - 1) << \
- ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_POL_TIME_INTERVAL_S)
-
-#define ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_CFG_S 16
-#define ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_CFG_M \
- (((1UL << 4) - 1) << \
- ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_CFG_S)
-
-#define ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_EN_S 20
-
-#define ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_EXT_RAQ_MODE 21
-
-#define ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_SHIFT_S 0
-#define ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_SHIFT_M \
- (((1UL << 5) - 1) << ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_SHIFT_S)
-
-#define ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_S 5
-#define ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_M \
- (((1UL << 5) - 1) << ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_S)
-
-#define ROCEE_EXT_DB_OTH_H_EXT_DB_OTH_SHIFT_S 0
-#define ROCEE_EXT_DB_OTH_H_EXT_DB_OTH_SHIFT_M \
- (((1UL << 5) - 1) << ROCEE_EXT_DB_OTH_H_EXT_DB_OTH_SHIFT_S)
-
-#define ROCEE_EXT_DB_SQ_H_EXT_DB_OTH_BA_H_S 5
-#define ROCEE_EXT_DB_SQ_H_EXT_DB_OTH_BA_H_M \
- (((1UL << 5) - 1) << ROCEE_EXT_DB_SQ_H_EXT_DB_OTH_BA_H_S)
-
-#define ROCEE_EXT_RAQ_H_EXT_RAQ_SHIFT_S 0
-#define ROCEE_EXT_RAQ_H_EXT_RAQ_SHIFT_M \
- (((1UL << 5) - 1) << ROCEE_EXT_RAQ_H_EXT_RAQ_SHIFT_S)
-
-#define ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_S 8
-#define ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_M \
- (((1UL << 5) - 1) << ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_S)
-
-#define ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S 0
-#define ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_M \
- (((1UL << 19) - 1) << ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S)
-
-#define ROCEE_BT_CMD_H_ROCEE_BT_CMD_S 19
-
-#define ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S 20
-#define ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M \
- (((1UL << 2) - 1) << ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S)
-
-#define ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S 22
-#define ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_M \
- (((1UL << 5) - 1) << ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S)
-
-#define ROCEE_BT_CMD_H_ROCEE_BT_CMD_HW_SYNS_S 31
-
-#define ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_S 0
-#define ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_M \
- (((1UL << 3) - 1) << ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_S)
-
-#define ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_S 0
-#define ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_M \
- (((1UL << 15) - 1) << ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_S)
-
-#define ROCEE_MB6_ROCEE_MB_CMD_S 0
-#define ROCEE_MB6_ROCEE_MB_CMD_M \
- (((1UL << 8) - 1) << ROCEE_MB6_ROCEE_MB_CMD_S)
-
-#define ROCEE_MB6_ROCEE_MB_CMD_MDF_S 8
-#define ROCEE_MB6_ROCEE_MB_CMD_MDF_M \
- (((1UL << 4) - 1) << ROCEE_MB6_ROCEE_MB_CMD_MDF_S)
-
-#define ROCEE_MB6_ROCEE_MB_EVENT_S 14
-
-#define ROCEE_MB6_ROCEE_MB_HW_RUN_S 15
-
-#define ROCEE_MB6_ROCEE_MB_TOKEN_S 16
-#define ROCEE_MB6_ROCEE_MB_TOKEN_M \
- (((1UL << 16) - 1) << ROCEE_MB6_ROCEE_MB_TOKEN_S)
-
-#define ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_S 0
-#define ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_M \
- (((1UL << 24) - 1) << ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_S)
-
-#define ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_S 24
-#define ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_M \
- (((1UL << 4) - 1) << ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_S)
-
-#define ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_S 28
-#define ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_M \
- (((1UL << 3) - 1) << ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_S)
-
-#define ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_HW_SYNS_S 31
-
-#define ROCEE_SMAC_H_ROCEE_SMAC_H_S 0
-#define ROCEE_SMAC_H_ROCEE_SMAC_H_M \
- (((1UL << 16) - 1) << ROCEE_SMAC_H_ROCEE_SMAC_H_S)
-
-#define ROCEE_SMAC_H_ROCEE_PORT_MTU_S 16
-#define ROCEE_SMAC_H_ROCEE_PORT_MTU_M \
- (((1UL << 4) - 1) << ROCEE_SMAC_H_ROCEE_PORT_MTU_S)
-
-#define ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S 0
-#define ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M \
- (((1UL << 2) - 1) << ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S)
-
-#define ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_S 8
-#define ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_M \
- (((1UL << 4) - 1) << ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_S)
-
-#define ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQ_ALM_OVF_INT_ST_S 17
-
-#define ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_S 0
-#define ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_M \
- (((1UL << 5) - 1) << ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_S)
-
-#define ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_S 16
-#define ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_M \
- (((1UL << 16) - 1) << ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_S)
-
-#define ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_S 0
-#define ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_M \
- (((1UL << 16) - 1) << ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_S)
-
-#define ROCEE_CAEP_CEQC_SHIFT_CAEP_CEQ_ALM_OVF_INT_ST_S 16
-#define ROCEE_CAEP_CE_IRQ_MASK_CAEP_CEQ_ALM_OVF_MASK_S 1
-#define ROCEE_CAEP_CEQ_ALM_OVF_CAEP_CEQ_ALM_OVF_S 0
-
-#define ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S 0
-#define ROCEE_CAEP_AE_MASK_CAEP_AE_IRQ_MASK_S 1
-
-#define ROCEE_CAEP_AE_ST_CAEP_AEQ_ALM_OVF_S 0
-
-#define ROCEE_SDB_ISSUE_PTR_SDB_ISSUE_PTR_S 0
-#define ROCEE_SDB_ISSUE_PTR_SDB_ISSUE_PTR_M \
- (((1UL << 28) - 1) << ROCEE_SDB_ISSUE_PTR_SDB_ISSUE_PTR_S)
-
-#define ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S 0
-#define ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M \
- (((1UL << 28) - 1) << ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S)
-
-#define ROCEE_SDB_INV_CNT_SDB_INV_CNT_S 0
-#define ROCEE_SDB_INV_CNT_SDB_INV_CNT_M \
- (((1UL << 16) - 1) << ROCEE_SDB_INV_CNT_SDB_INV_CNT_S)
-
-#define ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_S 0
-#define ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_M \
- (((1UL << 16) - 1) << ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_S)
-
-#define ROCEE_SDB_CNT_CMP_BITS 16
-
-#define ROCEE_TSP_BP_ST_QH_FIFO_ENTRY_S 20
-
-#define ROCEE_CNT_CLR_CE_CNT_CLR_CE_S 0
-
/*************ROCEE_REG DEFINITION****************/
#define ROCEE_VENDOR_ID_REG 0x0
#define ROCEE_VENDOR_PART_ID_REG 0x4
diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c
index d763f097599f..55057dcbb2dc 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_cq.c
@@ -30,7 +30,6 @@
* SOFTWARE.
*/
-#include <linux/platform_device.h>
#include <rdma/ib_umem.h>
#include <rdma/uverbs_ioctl.h>
#include "hns_roce_device.h"
@@ -406,15 +405,6 @@ int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr,
goto err_cqn;
}
- /*
- * For the QP created by kernel space, tptr value should be initialized
- * to zero; For the QP created by user space, it will cause synchronous
- * problems if tptr is set to zero here, so we initialize it in user
- * space.
- */
- if (!udata && hr_cq->tptr_addr)
- *hr_cq->tptr_addr = 0;
-
if (udata) {
resp.cqn = hr_cq->cqn;
ret = ib_copy_to_udata(udata, &resp,
@@ -441,9 +431,6 @@ int hns_roce_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device);
struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
- if (hr_dev->hw->destroy_cq)
- hr_dev->hw->destroy_cq(ib_cq, udata);
-
free_cqc(hr_dev, hr_cq);
free_cqn(hr_dev, hr_cq->cqn);
free_cq_db(hr_dev, hr_cq, udata);
diff --git a/drivers/infiniband/hw/hns/hns_roce_db.c b/drivers/infiniband/hw/hns/hns_roce_db.c
index 751470c7a2ce..5c4c0480832b 100644
--- a/drivers/infiniband/hw/hns/hns_roce_db.c
+++ b/drivers/infiniband/hw/hns/hns_roce_db.c
@@ -4,7 +4,6 @@
* Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
*/
-#include <linux/platform_device.h>
#include <rdma/ib_umem.h>
#include "hns_roce_device.h"
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index 43e17d61cb63..1e0bae136997 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -36,36 +36,18 @@
#include <rdma/ib_verbs.h>
#include <rdma/hns-abi.h>
-#define DRV_NAME "hns_roce"
-
#define PCI_REVISION_ID_HIP08 0x21
#define PCI_REVISION_ID_HIP09 0x30
-#define HNS_ROCE_HW_VER1 ('h' << 24 | 'i' << 16 | '0' << 8 | '6')
-
#define HNS_ROCE_MAX_MSG_LEN 0x80000000
#define HNS_ROCE_IB_MIN_SQ_STRIDE 6
#define BA_BYTE_LEN 8
-/* Hardware specification only for v1 engine */
#define HNS_ROCE_MIN_CQE_NUM 0x40
-#define HNS_ROCE_MIN_WQE_NUM 0x20
#define HNS_ROCE_MIN_SRQ_WQE_NUM 1
-/* Hardware specification only for v1 engine */
-#define HNS_ROCE_MAX_INNER_MTPT_NUM 0x7
-#define HNS_ROCE_MAX_MTPT_PBL_NUM 0x100000
-
-#define HNS_ROCE_EACH_FREE_CQ_WAIT_MSECS 20
-#define HNS_ROCE_MAX_FREE_CQ_WAIT_CNT \
- (5000 / HNS_ROCE_EACH_FREE_CQ_WAIT_MSECS)
-#define HNS_ROCE_CQE_WCMD_EMPTY_BIT 0x2
-#define HNS_ROCE_MIN_CQE_CNT 16
-
-#define HNS_ROCE_RESERVED_SGE 1
-
#define HNS_ROCE_MAX_IRQ_NUM 128
#define HNS_ROCE_SGE_IN_WQE 2
@@ -102,18 +84,12 @@
#define HNS_ROCE_FRMR_MAX_PA 512
#define PKEY_ID 0xffff
-#define GUID_LEN 8
#define NODE_DESC_SIZE 64
#define DB_REG_OFFSET 0x1000
/* Configure to HW for PAGE_SIZE larger than 4KB */
#define PG_SHIFT_OFFSET (PAGE_SHIFT - 12)
-#define PAGES_SHIFT_8 8
-#define PAGES_SHIFT_16 16
-#define PAGES_SHIFT_24 24
-#define PAGES_SHIFT_32 32
-
#define HNS_ROCE_IDX_QUE_ENTRY_SZ 4
#define SRQ_DB_REG 0x230
@@ -122,11 +98,6 @@
#define CQ_BANKID_SHIFT 2
-/* The chip implementation of the consumer index is calculated
- * according to twice the actual EQ depth
- */
-#define EQ_DEPTH_COEFF 2
-
enum {
SERV_TYPE_RC,
SERV_TYPE_UC,
@@ -182,6 +153,7 @@ enum {
HNS_ROCE_CAP_FLAG_FRMR = BIT(8),
HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL = BIT(9),
HNS_ROCE_CAP_FLAG_ATOMIC = BIT(10),
+ HNS_ROCE_CAP_FLAG_DIRECT_WQE = BIT(12),
HNS_ROCE_CAP_FLAG_SDI_MODE = BIT(14),
HNS_ROCE_CAP_FLAG_STASH = BIT(17),
};
@@ -227,7 +199,7 @@ struct hns_roce_uar {
enum hns_roce_mmap_type {
HNS_ROCE_MMAP_TYPE_DB = 1,
- HNS_ROCE_MMAP_TYPE_TPTR,
+ HNS_ROCE_MMAP_TYPE_DWQE,
};
struct hns_user_mmap_entry {
@@ -242,7 +214,6 @@ struct hns_roce_ucontext {
struct list_head page_list;
struct mutex page_mutex;
struct hns_user_mmap_entry *db_mmap_entry;
- struct hns_user_mmap_entry *tptr_mmap_entry;
};
struct hns_roce_pd {
@@ -345,19 +316,16 @@ struct hns_roce_mw {
u32 pbl_buf_pg_sz;
};
-/* Only support 4K page size for mr register */
-#define MR_SIZE_4K 0
-
struct hns_roce_mr {
struct ib_mr ibmr;
u64 iova; /* MR's virtual original addr */
u64 size; /* Address range of MR */
u32 key; /* Key of MR */
u32 pd; /* PD num of MR */
- u32 access; /* Access permission of MR */
+ u32 access; /* Access permission of MR */
int enabled; /* MR's active status */
- int type; /* MR's register type */
- u32 pbl_hop_num; /* multi-hop number */
+ int type; /* MR's register type */
+ u32 pbl_hop_num; /* multi-hop number */
struct hns_roce_mtr pbl_mtr;
u32 npages;
dma_addr_t *page_list;
@@ -374,17 +342,17 @@ struct hns_roce_wq {
u32 wqe_cnt; /* WQE num */
u32 max_gs;
u32 rsv_sge;
- int offset;
- int wqe_shift; /* WQE size */
+ u32 offset;
+ u32 wqe_shift; /* WQE size */
u32 head;
u32 tail;
void __iomem *db_reg;
};
struct hns_roce_sge {
- unsigned int sge_cnt; /* SGE num */
- int offset;
- int sge_shift; /* SGE size */
+ unsigned int sge_cnt; /* SGE num */
+ u32 offset;
+ u32 sge_shift; /* SGE size */
};
struct hns_roce_buf_list {
@@ -453,7 +421,6 @@ struct hns_roce_cq {
u32 cons_index;
u32 *set_ci_db;
void __iomem *db_reg;
- u16 *tptr_addr;
int arm_sn;
int cqe_size;
unsigned long cqn;
@@ -468,7 +435,7 @@ struct hns_roce_cq {
struct hns_roce_idx_que {
struct hns_roce_mtr mtr;
- int entry_shift;
+ u32 entry_shift;
unsigned long *bitmap;
u32 head;
u32 tail;
@@ -480,7 +447,7 @@ struct hns_roce_srq {
u32 wqe_cnt;
int max_gs;
u32 rsv_sge;
- int wqe_shift;
+ u32 wqe_shift;
u32 cqn;
u32 xrcdn;
void __iomem *db_reg;
@@ -539,10 +506,6 @@ struct hns_roce_srq_table {
struct hns_roce_hem_table table;
};
-struct hns_roce_raq_table {
- struct hns_roce_buf_list *e_raq_buf;
-};
-
struct hns_roce_av {
u8 port;
u8 gid_index;
@@ -627,10 +590,6 @@ struct hns_roce_work {
u32 queue_num;
};
-enum {
- HNS_ROCE_QP_CAP_DIRECT_WQE = BIT(5),
-};
-
struct hns_roce_qp {
struct ib_qp ibqp;
struct hns_roce_wq rq;
@@ -650,9 +609,7 @@ struct hns_roce_qp {
u8 sl;
u8 resp_depth;
u8 state;
- u32 access_flags;
u32 atomic_rd_en;
- u32 pkey_index;
u32 qkey;
void (*event)(struct hns_roce_qp *qp,
enum hns_roce_event event_type);
@@ -672,9 +629,10 @@ struct hns_roce_qp {
unsigned long flush_flag;
struct hns_roce_work flush_work;
struct hns_roce_rinl_buf rq_inl_buf;
- struct list_head node; /* all qps are on a list */
- struct list_head rq_node; /* all recv qps are on a list */
- struct list_head sq_node; /* all send qps are on a list */
+ struct list_head node; /* all qps are on a list */
+ struct list_head rq_node; /* all recv qps are on a list */
+ struct list_head sq_node; /* all send qps are on a list */
+ struct hns_user_mmap_entry *dwqe_mmap_entry;
};
struct hns_roce_ib_iboe {
@@ -684,11 +642,6 @@ struct hns_roce_ib_iboe {
u8 phy_port[HNS_ROCE_MAX_PORTS];
};
-enum {
- HNS_ROCE_EQ_STAT_INVALID = 0,
- HNS_ROCE_EQ_STAT_VALID = 2,
-};
-
struct hns_roce_ceqe {
__le32 comp;
__le32 rsv[15];
@@ -720,12 +673,9 @@ struct hns_roce_eq {
int type_flag; /* Aeq:1 ceq:0 */
int eqn;
u32 entries;
- u32 log_entries;
int eqe_size;
int irq;
- int log_page_size;
u32 cons_index;
- struct hns_roce_buf_list *buf_list;
int over_ignore;
int coalesce;
int arm_st;
@@ -740,7 +690,6 @@ struct hns_roce_eq {
struct hns_roce_eq_table {
struct hns_roce_eq *eq;
- void __iomem **eqc_base; /* only for hw v1 */
};
enum cong_type {
@@ -767,7 +716,7 @@ struct hns_roce_caps {
u32 reserved_qps;
int num_qpc_timer;
int num_cqc_timer;
- int num_srqs;
+ u32 num_srqs;
u32 max_wqes;
u32 max_srq_wrs;
u32 max_srq_sges;
@@ -781,7 +730,7 @@ struct hns_roce_caps {
u32 min_cqes;
u32 min_wqes;
u32 reserved_cqs;
- int reserved_srqs;
+ u32 reserved_srqs;
int num_aeq_vectors;
int num_comp_vectors;
int num_other_vectors;
@@ -855,7 +804,7 @@ struct hns_roce_caps {
u32 cqc_timer_ba_pg_sz;
u32 cqc_timer_buf_pg_sz;
u32 cqc_timer_hop_num;
- u32 cqe_ba_pg_sz; /* page_size = 4K*(2^cqe_ba_pg_sz) */
+ u32 cqe_ba_pg_sz; /* page_size = 4K*(2^cqe_ba_pg_sz) */
u32 cqe_buf_pg_sz;
u32 cqe_hop_num;
u32 srqwqe_ba_pg_sz;
@@ -874,7 +823,7 @@ struct hns_roce_caps {
u32 gmv_hop_num;
u32 sl_num;
u32 llm_buf_pg_sz;
- u32 chunk_sz; /* chunk size in non multihop mode */
+ u32 chunk_sz; /* chunk size in non multihop mode */
u64 flags;
u16 default_ceq_max_cnt;
u16 default_ceq_period;
@@ -897,7 +846,6 @@ enum hns_roce_device_state {
};
struct hns_roce_hw {
- int (*reset)(struct hns_roce_dev *hr_dev, bool enable);
int (*cmq_init)(struct hns_roce_dev *hr_dev);
void (*cmq_exit)(struct hns_roce_dev *hr_dev);
int (*hw_profile)(struct hns_roce_dev *hr_dev);
@@ -909,14 +857,12 @@ struct hns_roce_hw {
int (*poll_mbox_done)(struct hns_roce_dev *hr_dev,
unsigned int timeout);
bool (*chk_mbox_avail)(struct hns_roce_dev *hr_dev, bool *is_busy);
- int (*set_gid)(struct hns_roce_dev *hr_dev, u32 port, int gid_index,
+ int (*set_gid)(struct hns_roce_dev *hr_dev, int gid_index,
const union ib_gid *gid, const struct ib_gid_attr *attr);
int (*set_mac)(struct hns_roce_dev *hr_dev, u8 phy_port,
const u8 *addr);
- void (*set_mtu)(struct hns_roce_dev *hr_dev, u8 phy_port,
- enum ib_mtu mtu);
int (*write_mtpt)(struct hns_roce_dev *hr_dev, void *mb_buf,
- struct hns_roce_mr *mr, unsigned long mtpt_idx);
+ struct hns_roce_mr *mr);
int (*rereg_write_mtpt)(struct hns_roce_dev *hr_dev,
struct hns_roce_mr *mr, int flags,
void *mb_buf);
@@ -936,9 +882,6 @@ struct hns_roce_hw {
enum ib_qp_state new_state);
int (*qp_flow_control_init)(struct hns_roce_dev *hr_dev,
struct hns_roce_qp *hr_qp);
- int (*dereg_mr)(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr,
- struct ib_udata *udata);
- int (*destroy_cq)(struct ib_cq *ibcq, struct ib_udata *udata);
int (*init_eq)(struct hns_roce_dev *hr_dev);
void (*cleanup_eq)(struct hns_roce_dev *hr_dev);
int (*write_srqc)(struct hns_roce_srq *srq, void *mb_buf);
@@ -948,13 +891,11 @@ struct hns_roce_hw {
struct hns_roce_dev {
struct ib_device ib_dev;
- struct platform_device *pdev;
struct pci_dev *pci_dev;
struct device *dev;
struct hns_roce_uar priv_uar;
const char *irq_names[HNS_ROCE_MAX_IRQ_NUM];
spinlock_t sm_lock;
- spinlock_t bt_cmd_lock;
bool active;
bool is_reset;
bool dis_db;
@@ -1001,8 +942,6 @@ struct hns_roce_dev {
int loop_idc;
u32 sdb_offset;
u32 odb_offset;
- dma_addr_t tptr_dma_addr; /* only for hw v1 */
- u32 tptr_size; /* only for hw v1 */
const struct hns_roce_hw *hw;
void *priv;
struct workqueue_struct *irq_workq;
@@ -1010,6 +949,7 @@ struct hns_roce_dev {
u32 func_num;
u32 is_vf;
u32 cong_algo_tmpl_id;
+ u64 dwqe_page;
};
static inline struct hns_roce_dev *to_hr_dev(struct ib_device *ib_dev)
@@ -1158,7 +1098,7 @@ void hns_roce_cmd_use_polling(struct hns_roce_dev *hr_dev);
/* hns roce hw need current block and next block addr from mtt */
#define MTT_MIN_COUNT 2
int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
- int offset, u64 *mtt_buf, int mtt_max, u64 *base_addr);
+ u32 offset, u64 *mtt_buf, int mtt_max, u64 *base_addr);
int hns_roce_mtr_create(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
struct hns_roce_buf_attr *buf_attr,
unsigned int page_shift, struct ib_udata *udata,
diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c
index fa15d79eabb3..8917365cc6b8 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hem.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hem.c
@@ -31,7 +31,6 @@
* SOFTWARE.
*/
-#include <linux/platform_device.h>
#include "hns_roce_device.h"
#include "hns_roce_hem.h"
#include "hns_roce_common.h"
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
deleted file mode 100644
index f4af3992ba95..000000000000
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+++ /dev/null
@@ -1,4675 +0,0 @@
-/*
- * Copyright (c) 2016 Hisilicon Limited.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/platform_device.h>
-#include <linux/acpi.h>
-#include <linux/etherdevice.h>
-#include <linux/interrupt.h>
-#include <linux/of.h>
-#include <linux/of_platform.h>
-#include <rdma/ib_umem.h>
-#include "hns_roce_common.h"
-#include "hns_roce_device.h"
-#include "hns_roce_cmd.h"
-#include "hns_roce_hem.h"
-#include "hns_roce_hw_v1.h"
-
-/**
- * hns_get_gid_index - Get gid index.
- * @hr_dev: pointer to structure hns_roce_dev.
- * @port: port, value range: 0 ~ MAX
- * @gid_index: gid_index, value range: 0 ~ MAX
- * Description:
- * N ports shared gids, allocation method as follow:
- * GID[0][0], GID[1][0],.....GID[N - 1][0],
- * GID[0][0], GID[1][0],.....GID[N - 1][0],
- * And so on
- */
-u8 hns_get_gid_index(struct hns_roce_dev *hr_dev, u32 port, int gid_index)
-{
- return gid_index * hr_dev->caps.num_ports + port;
-}
-
-static void set_data_seg(struct hns_roce_wqe_data_seg *dseg, struct ib_sge *sg)
-{
- dseg->lkey = cpu_to_le32(sg->lkey);
- dseg->addr = cpu_to_le64(sg->addr);
- dseg->len = cpu_to_le32(sg->length);
-}
-
-static void set_raddr_seg(struct hns_roce_wqe_raddr_seg *rseg, u64 remote_addr,
- u32 rkey)
-{
- rseg->raddr = cpu_to_le64(remote_addr);
- rseg->rkey = cpu_to_le32(rkey);
- rseg->len = 0;
-}
-
-static int hns_roce_v1_post_send(struct ib_qp *ibqp,
- const struct ib_send_wr *wr,
- const struct ib_send_wr **bad_wr)
-{
- struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
- struct hns_roce_ah *ah = to_hr_ah(ud_wr(wr)->ah);
- struct hns_roce_ud_send_wqe *ud_sq_wqe = NULL;
- struct hns_roce_wqe_ctrl_seg *ctrl = NULL;
- struct hns_roce_wqe_data_seg *dseg = NULL;
- struct hns_roce_qp *qp = to_hr_qp(ibqp);
- struct device *dev = &hr_dev->pdev->dev;
- struct hns_roce_sq_db sq_db = {};
- int ps_opcode, i;
- unsigned long flags = 0;
- void *wqe = NULL;
- __le32 doorbell[2];
- const u8 *smac;
- int ret = 0;
- int loopback;
- u32 wqe_idx;
- int nreq;
-
- if (unlikely(ibqp->qp_type != IB_QPT_GSI &&
- ibqp->qp_type != IB_QPT_RC)) {
- dev_err(dev, "un-supported QP type\n");
- *bad_wr = NULL;
- return -EOPNOTSUPP;
- }
-
- spin_lock_irqsave(&qp->sq.lock, flags);
-
- for (nreq = 0; wr; ++nreq, wr = wr->next) {
- if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
- ret = -ENOMEM;
- *bad_wr = wr;
- goto out;
- }
-
- wqe_idx = (qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1);
-
- if (unlikely(wr->num_sge > qp->sq.max_gs)) {
- dev_err(dev, "num_sge=%d > qp->sq.max_gs=%d\n",
- wr->num_sge, qp->sq.max_gs);
- ret = -EINVAL;
- *bad_wr = wr;
- goto out;
- }
-
- wqe = hns_roce_get_send_wqe(qp, wqe_idx);
- qp->sq.wrid[wqe_idx] = wr->wr_id;
-
- /* Corresponding to the RC and RD type wqe process separately */
- if (ibqp->qp_type == IB_QPT_GSI) {
- ud_sq_wqe = wqe;
- roce_set_field(ud_sq_wqe->dmac_h,
- UD_SEND_WQE_U32_4_DMAC_0_M,
- UD_SEND_WQE_U32_4_DMAC_0_S,
- ah->av.mac[0]);
- roce_set_field(ud_sq_wqe->dmac_h,
- UD_SEND_WQE_U32_4_DMAC_1_M,
- UD_SEND_WQE_U32_4_DMAC_1_S,
- ah->av.mac[1]);
- roce_set_field(ud_sq_wqe->dmac_h,
- UD_SEND_WQE_U32_4_DMAC_2_M,
- UD_SEND_WQE_U32_4_DMAC_2_S,
- ah->av.mac[2]);
- roce_set_field(ud_sq_wqe->dmac_h,
- UD_SEND_WQE_U32_4_DMAC_3_M,
- UD_SEND_WQE_U32_4_DMAC_3_S,
- ah->av.mac[3]);
-
- roce_set_field(ud_sq_wqe->u32_8,
- UD_SEND_WQE_U32_8_DMAC_4_M,
- UD_SEND_WQE_U32_8_DMAC_4_S,
- ah->av.mac[4]);
- roce_set_field(ud_sq_wqe->u32_8,
- UD_SEND_WQE_U32_8_DMAC_5_M,
- UD_SEND_WQE_U32_8_DMAC_5_S,
- ah->av.mac[5]);
-
- smac = (const u8 *)hr_dev->dev_addr[qp->port];
- loopback = ether_addr_equal_unaligned(ah->av.mac,
- smac) ? 1 : 0;
- roce_set_bit(ud_sq_wqe->u32_8,
- UD_SEND_WQE_U32_8_LOOPBACK_INDICATOR_S,
- loopback);
-
- roce_set_field(ud_sq_wqe->u32_8,
- UD_SEND_WQE_U32_8_OPERATION_TYPE_M,
- UD_SEND_WQE_U32_8_OPERATION_TYPE_S,
- HNS_ROCE_WQE_OPCODE_SEND);
- roce_set_field(ud_sq_wqe->u32_8,
- UD_SEND_WQE_U32_8_NUMBER_OF_DATA_SEG_M,
- UD_SEND_WQE_U32_8_NUMBER_OF_DATA_SEG_S,
- 2);
- roce_set_bit(ud_sq_wqe->u32_8,
- UD_SEND_WQE_U32_8_SEND_GL_ROUTING_HDR_FLAG_S,
- 1);
-
- ud_sq_wqe->u32_8 |= (wr->send_flags & IB_SEND_SIGNALED ?
- cpu_to_le32(HNS_ROCE_WQE_CQ_NOTIFY) : 0) |
- (wr->send_flags & IB_SEND_SOLICITED ?
- cpu_to_le32(HNS_ROCE_WQE_SE) : 0) |
- ((wr->opcode == IB_WR_SEND_WITH_IMM) ?
- cpu_to_le32(HNS_ROCE_WQE_IMM) : 0);
-
- roce_set_field(ud_sq_wqe->u32_16,
- UD_SEND_WQE_U32_16_DEST_QP_M,
- UD_SEND_WQE_U32_16_DEST_QP_S,
- ud_wr(wr)->remote_qpn);
- roce_set_field(ud_sq_wqe->u32_16,
- UD_SEND_WQE_U32_16_MAX_STATIC_RATE_M,
- UD_SEND_WQE_U32_16_MAX_STATIC_RATE_S,
- ah->av.stat_rate);
-
- roce_set_field(ud_sq_wqe->u32_36,
- UD_SEND_WQE_U32_36_FLOW_LABEL_M,
- UD_SEND_WQE_U32_36_FLOW_LABEL_S,
- ah->av.flowlabel);
- roce_set_field(ud_sq_wqe->u32_36,
- UD_SEND_WQE_U32_36_PRIORITY_M,
- UD_SEND_WQE_U32_36_PRIORITY_S,
- ah->av.sl);
- roce_set_field(ud_sq_wqe->u32_36,
- UD_SEND_WQE_U32_36_SGID_INDEX_M,
- UD_SEND_WQE_U32_36_SGID_INDEX_S,
- hns_get_gid_index(hr_dev, qp->phy_port,
- ah->av.gid_index));
-
- roce_set_field(ud_sq_wqe->u32_40,
- UD_SEND_WQE_U32_40_HOP_LIMIT_M,
- UD_SEND_WQE_U32_40_HOP_LIMIT_S,
- ah->av.hop_limit);
- roce_set_field(ud_sq_wqe->u32_40,
- UD_SEND_WQE_U32_40_TRAFFIC_CLASS_M,
- UD_SEND_WQE_U32_40_TRAFFIC_CLASS_S,
- ah->av.tclass);
-
- memcpy(&ud_sq_wqe->dgid[0], &ah->av.dgid[0], GID_LEN);
-
- ud_sq_wqe->va0_l =
- cpu_to_le32((u32)wr->sg_list[0].addr);
- ud_sq_wqe->va0_h =
- cpu_to_le32((wr->sg_list[0].addr) >> 32);
- ud_sq_wqe->l_key0 =
- cpu_to_le32(wr->sg_list[0].lkey);
-
- ud_sq_wqe->va1_l =
- cpu_to_le32((u32)wr->sg_list[1].addr);
- ud_sq_wqe->va1_h =
- cpu_to_le32((wr->sg_list[1].addr) >> 32);
- ud_sq_wqe->l_key1 =
- cpu_to_le32(wr->sg_list[1].lkey);
- } else if (ibqp->qp_type == IB_QPT_RC) {
- u32 tmp_len = 0;
-
- ctrl = wqe;
- memset(ctrl, 0, sizeof(struct hns_roce_wqe_ctrl_seg));
- for (i = 0; i < wr->num_sge; i++)
- tmp_len += wr->sg_list[i].length;
-
- ctrl->msg_length =
- cpu_to_le32(le32_to_cpu(ctrl->msg_length) + tmp_len);
-
- ctrl->sgl_pa_h = 0;
- ctrl->flag = 0;
-
- switch (wr->opcode) {
- case IB_WR_SEND_WITH_IMM:
- case IB_WR_RDMA_WRITE_WITH_IMM:
- ctrl->imm_data = wr->ex.imm_data;
- break;
- case IB_WR_SEND_WITH_INV:
- ctrl->inv_key =
- cpu_to_le32(wr->ex.invalidate_rkey);
- break;
- default:
- ctrl->imm_data = 0;
- break;
- }
-
- /* Ctrl field, ctrl set type: sig, solic, imm, fence */
- /* SO wait for conforming application scenarios */
- ctrl->flag |= (wr->send_flags & IB_SEND_SIGNALED ?
- cpu_to_le32(HNS_ROCE_WQE_CQ_NOTIFY) : 0) |
- (wr->send_flags & IB_SEND_SOLICITED ?
- cpu_to_le32(HNS_ROCE_WQE_SE) : 0) |
- ((wr->opcode == IB_WR_SEND_WITH_IMM ||
- wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) ?
- cpu_to_le32(HNS_ROCE_WQE_IMM) : 0) |
- (wr->send_flags & IB_SEND_FENCE ?
- (cpu_to_le32(HNS_ROCE_WQE_FENCE)) : 0);
-
- wqe += sizeof(struct hns_roce_wqe_ctrl_seg);
-
- switch (wr->opcode) {
- case IB_WR_RDMA_READ:
- ps_opcode = HNS_ROCE_WQE_OPCODE_RDMA_READ;
- set_raddr_seg(wqe, rdma_wr(wr)->remote_addr,
- rdma_wr(wr)->rkey);
- break;
- case IB_WR_RDMA_WRITE:
- case IB_WR_RDMA_WRITE_WITH_IMM:
- ps_opcode = HNS_ROCE_WQE_OPCODE_RDMA_WRITE;
- set_raddr_seg(wqe, rdma_wr(wr)->remote_addr,
- rdma_wr(wr)->rkey);
- break;
- case IB_WR_SEND:
- case IB_WR_SEND_WITH_INV:
- case IB_WR_SEND_WITH_IMM:
- ps_opcode = HNS_ROCE_WQE_OPCODE_SEND;
- break;
- case IB_WR_LOCAL_INV:
- case IB_WR_ATOMIC_CMP_AND_SWP:
- case IB_WR_ATOMIC_FETCH_AND_ADD:
- case IB_WR_LSO:
- default:
- ps_opcode = HNS_ROCE_WQE_OPCODE_MASK;
- break;
- }
- ctrl->flag |= cpu_to_le32(ps_opcode);
- wqe += sizeof(struct hns_roce_wqe_raddr_seg);
-
- dseg = wqe;
- if (wr->send_flags & IB_SEND_INLINE && wr->num_sge) {
- if (le32_to_cpu(ctrl->msg_length) >
- hr_dev->caps.max_sq_inline) {
- ret = -EINVAL;
- *bad_wr = wr;
- dev_err(dev, "inline len(1-%d)=%d, illegal",
- le32_to_cpu(ctrl->msg_length),
- hr_dev->caps.max_sq_inline);
- goto out;
- }
- for (i = 0; i < wr->num_sge; i++) {
- memcpy(wqe, ((void *) (uintptr_t)
- wr->sg_list[i].addr),
- wr->sg_list[i].length);
- wqe += wr->sg_list[i].length;
- }
- ctrl->flag |= cpu_to_le32(HNS_ROCE_WQE_INLINE);
- } else {
- /* sqe num is two */
- for (i = 0; i < wr->num_sge; i++)
- set_data_seg(dseg + i, wr->sg_list + i);
-
- ctrl->flag |= cpu_to_le32(wr->num_sge <<
- HNS_ROCE_WQE_SGE_NUM_BIT);
- }
- }
- }
-
-out:
- /* Set DB return */
- if (likely(nreq)) {
- qp->sq.head += nreq;
-
- roce_set_field(sq_db.u32_4, SQ_DOORBELL_U32_4_SQ_HEAD_M,
- SQ_DOORBELL_U32_4_SQ_HEAD_S,
- (qp->sq.head & ((qp->sq.wqe_cnt << 1) - 1)));
- roce_set_field(sq_db.u32_4, SQ_DOORBELL_U32_4_SL_M,
- SQ_DOORBELL_U32_4_SL_S, qp->sl);
- roce_set_field(sq_db.u32_4, SQ_DOORBELL_U32_4_PORT_M,
- SQ_DOORBELL_U32_4_PORT_S, qp->phy_port);
- roce_set_field(sq_db.u32_8, SQ_DOORBELL_U32_8_QPN_M,
- SQ_DOORBELL_U32_8_QPN_S, qp->doorbell_qpn);
- roce_set_bit(sq_db.u32_8, SQ_DOORBELL_HW_SYNC_S, 1);
-
- doorbell[0] = sq_db.u32_4;
- doorbell[1] = sq_db.u32_8;
-
- hns_roce_write64_k(doorbell, qp->sq.db_reg);
- }
-
- spin_unlock_irqrestore(&qp->sq.lock, flags);
-
- return ret;
-}
-
-static int hns_roce_v1_post_recv(struct ib_qp *ibqp,
- const struct ib_recv_wr *wr,
- const struct ib_recv_wr **bad_wr)
-{
- struct hns_roce_rq_wqe_ctrl *ctrl = NULL;
- struct hns_roce_wqe_data_seg *scat = NULL;
- struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
- struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
- struct device *dev = &hr_dev->pdev->dev;
- struct hns_roce_rq_db rq_db = {};
- __le32 doorbell[2] = {0};
- unsigned long flags = 0;
- unsigned int wqe_idx;
- int ret = 0;
- int nreq;
- int i;
- u32 reg_val;
-
- spin_lock_irqsave(&hr_qp->rq.lock, flags);
-
- for (nreq = 0; wr; ++nreq, wr = wr->next) {
- if (hns_roce_wq_overflow(&hr_qp->rq, nreq,
- hr_qp->ibqp.recv_cq)) {
- ret = -ENOMEM;
- *bad_wr = wr;
- goto out;
- }
-
- wqe_idx = (hr_qp->rq.head + nreq) & (hr_qp->rq.wqe_cnt - 1);
-
- if (unlikely(wr->num_sge > hr_qp->rq.max_gs)) {
- dev_err(dev, "rq:num_sge=%d > qp->sq.max_gs=%d\n",
- wr->num_sge, hr_qp->rq.max_gs);
- ret = -EINVAL;
- *bad_wr = wr;
- goto out;
- }
-
- ctrl = hns_roce_get_recv_wqe(hr_qp, wqe_idx);
-
- roce_set_field(ctrl->rwqe_byte_12,
- RQ_WQE_CTRL_RWQE_BYTE_12_RWQE_SGE_NUM_M,
- RQ_WQE_CTRL_RWQE_BYTE_12_RWQE_SGE_NUM_S,
- wr->num_sge);
-
- scat = (struct hns_roce_wqe_data_seg *)(ctrl + 1);
-
- for (i = 0; i < wr->num_sge; i++)
- set_data_seg(scat + i, wr->sg_list + i);
-
- hr_qp->rq.wrid[wqe_idx] = wr->wr_id;
- }
-
-out:
- if (likely(nreq)) {
- hr_qp->rq.head += nreq;
-
- if (ibqp->qp_type == IB_QPT_GSI) {
- __le32 tmp;
-
- /* SW update GSI rq header */
- reg_val = roce_read(to_hr_dev(ibqp->device),
- ROCEE_QP1C_CFG3_0_REG +
- QP1C_CFGN_OFFSET * hr_qp->phy_port);
- tmp = cpu_to_le32(reg_val);
- roce_set_field(tmp,
- ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_M,
- ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_S,
- hr_qp->rq.head);
- reg_val = le32_to_cpu(tmp);
- roce_write(to_hr_dev(ibqp->device),
- ROCEE_QP1C_CFG3_0_REG +
- QP1C_CFGN_OFFSET * hr_qp->phy_port, reg_val);
- } else {
- roce_set_field(rq_db.u32_4, RQ_DOORBELL_U32_4_RQ_HEAD_M,
- RQ_DOORBELL_U32_4_RQ_HEAD_S,
- hr_qp->rq.head);
- roce_set_field(rq_db.u32_8, RQ_DOORBELL_U32_8_QPN_M,
- RQ_DOORBELL_U32_8_QPN_S, hr_qp->qpn);
- roce_set_field(rq_db.u32_8, RQ_DOORBELL_U32_8_CMD_M,
- RQ_DOORBELL_U32_8_CMD_S, 1);
- roce_set_bit(rq_db.u32_8, RQ_DOORBELL_U32_8_HW_SYNC_S,
- 1);
-
- doorbell[0] = rq_db.u32_4;
- doorbell[1] = rq_db.u32_8;
-
- hns_roce_write64_k(doorbell, hr_qp->rq.db_reg);
- }
- }
- spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
-
- return ret;
-}
-
-static void hns_roce_set_db_event_mode(struct hns_roce_dev *hr_dev,
- int sdb_mode, int odb_mode)
-{
- __le32 tmp;
- u32 val;
-
- val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
- tmp = cpu_to_le32(val);
- roce_set_bit(tmp, ROCEE_GLB_CFG_ROCEE_DB_SQ_MODE_S, sdb_mode);
- roce_set_bit(tmp, ROCEE_GLB_CFG_ROCEE_DB_OTH_MODE_S, odb_mode);
- val = le32_to_cpu(tmp);
- roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
-}
-
-static int hns_roce_v1_set_hem(struct hns_roce_dev *hr_dev,
- struct hns_roce_hem_table *table, int obj,
- int step_idx)
-{
- spinlock_t *lock = &hr_dev->bt_cmd_lock;
- struct device *dev = hr_dev->dev;
- struct hns_roce_hem_iter iter;
- void __iomem *bt_cmd;
- __le32 bt_cmd_val[2];
- __le32 bt_cmd_h = 0;
- unsigned long flags;
- __le32 bt_cmd_l;
- int ret = 0;
- u64 bt_ba;
- long end;
-
- /* Find the HEM(Hardware Entry Memory) entry */
- unsigned long i = obj / (table->table_chunk_size / table->obj_size);
-
- switch (table->type) {
- case HEM_TYPE_QPC:
- case HEM_TYPE_MTPT:
- case HEM_TYPE_CQC:
- case HEM_TYPE_SRQC:
- roce_set_field(bt_cmd_h, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
- ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, table->type);
- break;
- default:
- return ret;
- }
-
- roce_set_field(bt_cmd_h, ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_M,
- ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S, obj);
- roce_set_bit(bt_cmd_h, ROCEE_BT_CMD_H_ROCEE_BT_CMD_S, 0);
- roce_set_bit(bt_cmd_h, ROCEE_BT_CMD_H_ROCEE_BT_CMD_HW_SYNS_S, 1);
-
- /* Currently iter only a chunk */
- for (hns_roce_hem_first(table->hem[i], &iter);
- !hns_roce_hem_last(&iter); hns_roce_hem_next(&iter)) {
- bt_ba = hns_roce_hem_addr(&iter) >> HNS_HW_PAGE_SHIFT;
-
- spin_lock_irqsave(lock, flags);
-
- bt_cmd = hr_dev->reg_base + ROCEE_BT_CMD_H_REG;
-
- end = HW_SYNC_TIMEOUT_MSECS;
- while (end > 0) {
- if (!(readl(bt_cmd) >> BT_CMD_SYNC_SHIFT))
- break;
-
- mdelay(HW_SYNC_SLEEP_TIME_INTERVAL);
- end -= HW_SYNC_SLEEP_TIME_INTERVAL;
- }
-
- if (end <= 0) {
- dev_err(dev, "Write bt_cmd err,hw_sync is not zero.\n");
- spin_unlock_irqrestore(lock, flags);
- return -EBUSY;
- }
-
- bt_cmd_l = cpu_to_le32(bt_ba);
- roce_set_field(bt_cmd_h, ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_M,
- ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S,
- upper_32_bits(bt_ba));
-
- bt_cmd_val[0] = bt_cmd_l;
- bt_cmd_val[1] = bt_cmd_h;
- hns_roce_write64_k(bt_cmd_val,
- hr_dev->reg_base + ROCEE_BT_CMD_L_REG);
- spin_unlock_irqrestore(lock, flags);
- }
-
- return ret;
-}
-
-static void hns_roce_set_db_ext_mode(struct hns_roce_dev *hr_dev, u32 sdb_mode,
- u32 odb_mode)
-{
- __le32 tmp;
- u32 val;
-
- /* Configure SDB/ODB extend mode */
- val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
- tmp = cpu_to_le32(val);
- roce_set_bit(tmp, ROCEE_GLB_CFG_SQ_EXT_DB_MODE_S, sdb_mode);
- roce_set_bit(tmp, ROCEE_GLB_CFG_OTH_EXT_DB_MODE_S, odb_mode);
- val = le32_to_cpu(tmp);
- roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
-}
-
-static void hns_roce_set_sdb(struct hns_roce_dev *hr_dev, u32 sdb_alept,
- u32 sdb_alful)
-{
- __le32 tmp;
- u32 val;
-
- /* Configure SDB */
- val = roce_read(hr_dev, ROCEE_DB_SQ_WL_REG);
- tmp = cpu_to_le32(val);
- roce_set_field(tmp, ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_M,
- ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_S, sdb_alful);
- roce_set_field(tmp, ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_EMPTY_M,
- ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_EMPTY_S, sdb_alept);
- val = le32_to_cpu(tmp);
- roce_write(hr_dev, ROCEE_DB_SQ_WL_REG, val);
-}
-
-static void hns_roce_set_odb(struct hns_roce_dev *hr_dev, u32 odb_alept,
- u32 odb_alful)
-{
- __le32 tmp;
- u32 val;
-
- /* Configure ODB */
- val = roce_read(hr_dev, ROCEE_DB_OTHERS_WL_REG);
- tmp = cpu_to_le32(val);
- roce_set_field(tmp, ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_M,
- ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_S, odb_alful);
- roce_set_field(tmp, ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_EMPTY_M,
- ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_EMPTY_S, odb_alept);
- val = le32_to_cpu(tmp);
- roce_write(hr_dev, ROCEE_DB_OTHERS_WL_REG, val);
-}
-
-static void hns_roce_set_sdb_ext(struct hns_roce_dev *hr_dev, u32 ext_sdb_alept,
- u32 ext_sdb_alful)
-{
- struct hns_roce_v1_priv *priv = hr_dev->priv;
- struct hns_roce_db_table *db = &priv->db_table;
- struct device *dev = &hr_dev->pdev->dev;
- dma_addr_t sdb_dma_addr;
- __le32 tmp;
- u32 val;
-
- /* Configure extend SDB threshold */
- roce_write(hr_dev, ROCEE_EXT_DB_SQ_WL_EMPTY_REG, ext_sdb_alept);
- roce_write(hr_dev, ROCEE_EXT_DB_SQ_WL_REG, ext_sdb_alful);
-
- /* Configure extend SDB base addr */
- sdb_dma_addr = db->ext_db->sdb_buf_list->map;
- roce_write(hr_dev, ROCEE_EXT_DB_SQ_REG, (u32)(sdb_dma_addr >> 12));
-
- /* Configure extend SDB depth */
- val = roce_read(hr_dev, ROCEE_EXT_DB_SQ_H_REG);
- tmp = cpu_to_le32(val);
- roce_set_field(tmp, ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_SHIFT_M,
- ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_SHIFT_S,
- db->ext_db->esdb_dep);
- /*
- * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of
- * using 4K page, and shift more 32 because of
- * calculating the high 32 bit value evaluated to hardware.
- */
- roce_set_field(tmp, ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_M,
- ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_S, sdb_dma_addr >> 44);
- val = le32_to_cpu(tmp);
- roce_write(hr_dev, ROCEE_EXT_DB_SQ_H_REG, val);
-
- dev_dbg(dev, "ext SDB depth: 0x%x\n", db->ext_db->esdb_dep);
- dev_dbg(dev, "ext SDB threshold: empty: 0x%x, ful: 0x%x\n",
- ext_sdb_alept, ext_sdb_alful);
-}
-
-static void hns_roce_set_odb_ext(struct hns_roce_dev *hr_dev, u32 ext_odb_alept,
- u32 ext_odb_alful)
-{
- struct hns_roce_v1_priv *priv = hr_dev->priv;
- struct hns_roce_db_table *db = &priv->db_table;
- struct device *dev = &hr_dev->pdev->dev;
- dma_addr_t odb_dma_addr;
- __le32 tmp;
- u32 val;
-
- /* Configure extend ODB threshold */
- roce_write(hr_dev, ROCEE_EXT_DB_OTHERS_WL_EMPTY_REG, ext_odb_alept);
- roce_write(hr_dev, ROCEE_EXT_DB_OTHERS_WL_REG, ext_odb_alful);
-
- /* Configure extend ODB base addr */
- odb_dma_addr = db->ext_db->odb_buf_list->map;
- roce_write(hr_dev, ROCEE_EXT_DB_OTH_REG, (u32)(odb_dma_addr >> 12));
-
- /* Configure extend ODB depth */
- val = roce_read(hr_dev, ROCEE_EXT_DB_OTH_H_REG);
- tmp = cpu_to_le32(val);
- roce_set_field(tmp, ROCEE_EXT_DB_OTH_H_EXT_DB_OTH_SHIFT_M,
- ROCEE_EXT_DB_OTH_H_EXT_DB_OTH_SHIFT_S,
- db->ext_db->eodb_dep);
- roce_set_field(tmp, ROCEE_EXT_DB_SQ_H_EXT_DB_OTH_BA_H_M,
- ROCEE_EXT_DB_SQ_H_EXT_DB_OTH_BA_H_S,
- db->ext_db->eodb_dep);
- val = le32_to_cpu(tmp);
- roce_write(hr_dev, ROCEE_EXT_DB_OTH_H_REG, val);
-
- dev_dbg(dev, "ext ODB depth: 0x%x\n", db->ext_db->eodb_dep);
- dev_dbg(dev, "ext ODB threshold: empty: 0x%x, ful: 0x%x\n",
- ext_odb_alept, ext_odb_alful);
-}
-
-static int hns_roce_db_ext_init(struct hns_roce_dev *hr_dev, u32 sdb_ext_mod,
- u32 odb_ext_mod)
-{
- struct hns_roce_v1_priv *priv = hr_dev->priv;
- struct hns_roce_db_table *db = &priv->db_table;
- struct device *dev = &hr_dev->pdev->dev;
- dma_addr_t sdb_dma_addr;
- dma_addr_t odb_dma_addr;
- int ret = 0;
-
- db->ext_db = kmalloc(sizeof(*db->ext_db), GFP_KERNEL);
- if (!db->ext_db)
- return -ENOMEM;
-
- if (sdb_ext_mod) {
- db->ext_db->sdb_buf_list = kmalloc(
- sizeof(*db->ext_db->sdb_buf_list), GFP_KERNEL);
- if (!db->ext_db->sdb_buf_list) {
- ret = -ENOMEM;
- goto ext_sdb_buf_fail_out;
- }
-
- db->ext_db->sdb_buf_list->buf = dma_alloc_coherent(dev,
- HNS_ROCE_V1_EXT_SDB_SIZE,
- &sdb_dma_addr, GFP_KERNEL);
- if (!db->ext_db->sdb_buf_list->buf) {
- ret = -ENOMEM;
- goto alloc_sq_db_buf_fail;
- }
- db->ext_db->sdb_buf_list->map = sdb_dma_addr;
-
- db->ext_db->esdb_dep = ilog2(HNS_ROCE_V1_EXT_SDB_DEPTH);
- hns_roce_set_sdb_ext(hr_dev, HNS_ROCE_V1_EXT_SDB_ALEPT,
- HNS_ROCE_V1_EXT_SDB_ALFUL);
- } else
- hns_roce_set_sdb(hr_dev, HNS_ROCE_V1_SDB_ALEPT,
- HNS_ROCE_V1_SDB_ALFUL);
-
- if (odb_ext_mod) {
- db->ext_db->odb_buf_list = kmalloc(
- sizeof(*db->ext_db->odb_buf_list), GFP_KERNEL);
- if (!db->ext_db->odb_buf_list) {
- ret = -ENOMEM;
- goto ext_odb_buf_fail_out;
- }
-
- db->ext_db->odb_buf_list->buf = dma_alloc_coherent(dev,
- HNS_ROCE_V1_EXT_ODB_SIZE,
- &odb_dma_addr, GFP_KERNEL);
- if (!db->ext_db->odb_buf_list->buf) {
- ret = -ENOMEM;
- goto alloc_otr_db_buf_fail;
- }
- db->ext_db->odb_buf_list->map = odb_dma_addr;
-
- db->ext_db->eodb_dep = ilog2(HNS_ROCE_V1_EXT_ODB_DEPTH);
- hns_roce_set_odb_ext(hr_dev, HNS_ROCE_V1_EXT_ODB_ALEPT,
- HNS_ROCE_V1_EXT_ODB_ALFUL);
- } else
- hns_roce_set_odb(hr_dev, HNS_ROCE_V1_ODB_ALEPT,
- HNS_ROCE_V1_ODB_ALFUL);
-
- hns_roce_set_db_ext_mode(hr_dev, sdb_ext_mod, odb_ext_mod);
-
- return 0;
-
-alloc_otr_db_buf_fail:
- kfree(db->ext_db->odb_buf_list);
-
-ext_odb_buf_fail_out:
- if (sdb_ext_mod) {
- dma_free_coherent(dev, HNS_ROCE_V1_EXT_SDB_SIZE,
- db->ext_db->sdb_buf_list->buf,
- db->ext_db->sdb_buf_list->map);
- }
-
-alloc_sq_db_buf_fail:
- if (sdb_ext_mod)
- kfree(db->ext_db->sdb_buf_list);
-
-ext_sdb_buf_fail_out:
- kfree(db->ext_db);
- return ret;
-}
-
-static struct hns_roce_qp *hns_roce_v1_create_lp_qp(struct hns_roce_dev *hr_dev,
- struct ib_pd *pd)
-{
- struct device *dev = &hr_dev->pdev->dev;
- struct ib_qp_init_attr init_attr;
- struct ib_qp *qp;
-
- memset(&init_attr, 0, sizeof(struct ib_qp_init_attr));
- init_attr.qp_type = IB_QPT_RC;
- init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
- init_attr.cap.max_recv_wr = HNS_ROCE_MIN_WQE_NUM;
- init_attr.cap.max_send_wr = HNS_ROCE_MIN_WQE_NUM;
-
- qp = ib_create_qp(pd, &init_attr);
- if (IS_ERR(qp)) {
- dev_err(dev, "Create loop qp for mr free failed!");
- return NULL;
- }
-
- return to_hr_qp(qp);
-}
-
-static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
-{
- struct hns_roce_v1_priv *priv = hr_dev->priv;
- struct hns_roce_free_mr *free_mr = &priv->free_mr;
- struct hns_roce_caps *caps = &hr_dev->caps;
- struct ib_device *ibdev = &hr_dev->ib_dev;
- struct device *dev = &hr_dev->pdev->dev;
- struct ib_cq_init_attr cq_init_attr;
- struct ib_qp_attr attr = { 0 };
- struct hns_roce_qp *hr_qp;
- struct ib_cq *cq;
- struct ib_pd *pd;
- union ib_gid dgid;
- __be64 subnet_prefix;
- int attr_mask = 0;
- int ret;
- int i, j;
- u8 queue_en[HNS_ROCE_V1_RESV_QP] = { 0 };
- u8 phy_port;
- u32 port = 0;
- u8 sl;
-
- /* Reserved cq for loop qp */
- cq_init_attr.cqe = HNS_ROCE_MIN_WQE_NUM * 2;
- cq_init_attr.comp_vector = 0;
-
- cq = rdma_zalloc_drv_obj(ibdev, ib_cq);
- if (!cq)
- return -ENOMEM;
-
- ret = hns_roce_create_cq(cq, &cq_init_attr, NULL);
- if (ret) {
- dev_err(dev, "Create cq for reserved loop qp failed!");
- goto alloc_cq_failed;
- }
- free_mr->mr_free_cq = to_hr_cq(cq);
- free_mr->mr_free_cq->ib_cq.device = &hr_dev->ib_dev;
- free_mr->mr_free_cq->ib_cq.uobject = NULL;
- free_mr->mr_free_cq->ib_cq.comp_handler = NULL;
- free_mr->mr_free_cq->ib_cq.event_handler = NULL;
- free_mr->mr_free_cq->ib_cq.cq_context = NULL;
- atomic_set(&free_mr->mr_free_cq->ib_cq.usecnt, 0);
-
- pd = rdma_zalloc_drv_obj(ibdev, ib_pd);
- if (!pd) {
- ret = -ENOMEM;
- goto alloc_mem_failed;
- }
-
- pd->device = ibdev;
- ret = hns_roce_alloc_pd(pd, NULL);
- if (ret)
- goto alloc_pd_failed;
-
- free_mr->mr_free_pd = to_hr_pd(pd);
- free_mr->mr_free_pd->ibpd.device = &hr_dev->ib_dev;
- free_mr->mr_free_pd->ibpd.uobject = NULL;
- free_mr->mr_free_pd->ibpd.__internal_mr = NULL;
- atomic_set(&free_mr->mr_free_pd->ibpd.usecnt, 0);
-
- attr.qp_access_flags = IB_ACCESS_REMOTE_WRITE;
- attr.pkey_index = 0;
- attr.min_rnr_timer = 0;
- /* Disable read ability */
- attr.max_dest_rd_atomic = 0;
- attr.max_rd_atomic = 0;
- /* Use arbitrary values as rq_psn and sq_psn */
- attr.rq_psn = 0x0808;
- attr.sq_psn = 0x0808;
- attr.retry_cnt = 7;
- attr.rnr_retry = 7;
- attr.timeout = 0x12;
- attr.path_mtu = IB_MTU_256;
- attr.ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
- rdma_ah_set_grh(&attr.ah_attr, NULL, 0, 0, 1, 0);
- rdma_ah_set_static_rate(&attr.ah_attr, 3);
-
- subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
- for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) {
- phy_port = (i >= HNS_ROCE_MAX_PORTS) ? (i - 2) :
- (i % HNS_ROCE_MAX_PORTS);
- sl = i / HNS_ROCE_MAX_PORTS;
-
- for (j = 0; j < caps->num_ports; j++) {
- if (hr_dev->iboe.phy_port[j] == phy_port) {
- queue_en[i] = 1;
- port = j;
- break;
- }
- }
-
- if (!queue_en[i])
- continue;
-
- free_mr->mr_free_qp[i] = hns_roce_v1_create_lp_qp(hr_dev, pd);
- if (!free_mr->mr_free_qp[i]) {
- dev_err(dev, "Create loop qp failed!\n");
- ret = -ENOMEM;
- goto create_lp_qp_failed;
- }
- hr_qp = free_mr->mr_free_qp[i];
-
- hr_qp->port = port;
- hr_qp->phy_port = phy_port;
- hr_qp->ibqp.qp_type = IB_QPT_RC;
- hr_qp->ibqp.device = &hr_dev->ib_dev;
- hr_qp->ibqp.uobject = NULL;
- atomic_set(&hr_qp->ibqp.usecnt, 0);
- hr_qp->ibqp.pd = pd;
- hr_qp->ibqp.recv_cq = cq;
- hr_qp->ibqp.send_cq = cq;
-
- rdma_ah_set_port_num(&attr.ah_attr, port + 1);
- rdma_ah_set_sl(&attr.ah_attr, sl);
- attr.port_num = port + 1;
-
- attr.dest_qp_num = hr_qp->qpn;
- memcpy(rdma_ah_retrieve_dmac(&attr.ah_attr),
- hr_dev->dev_addr[port],
- ETH_ALEN);
-
- memcpy(&dgid.raw, &subnet_prefix, sizeof(u64));
- memcpy(&dgid.raw[8], hr_dev->dev_addr[port], 3);
- memcpy(&dgid.raw[13], hr_dev->dev_addr[port] + 3, 3);
- dgid.raw[11] = 0xff;
- dgid.raw[12] = 0xfe;
- dgid.raw[8] ^= 2;
- rdma_ah_set_dgid_raw(&attr.ah_attr, dgid.raw);
-
- ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, attr_mask,
- IB_QPS_RESET, IB_QPS_INIT);
- if (ret) {
- dev_err(dev, "modify qp failed(%d)!\n", ret);
- goto create_lp_qp_failed;
- }
-
- ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, IB_QP_DEST_QPN,
- IB_QPS_INIT, IB_QPS_RTR);
- if (ret) {
- dev_err(dev, "modify qp failed(%d)!\n", ret);
- goto create_lp_qp_failed;
- }
-
- ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, attr_mask,
- IB_QPS_RTR, IB_QPS_RTS);
- if (ret) {
- dev_err(dev, "modify qp failed(%d)!\n", ret);
- goto create_lp_qp_failed;
- }
- }
-
- return 0;
-
-create_lp_qp_failed:
- for (i -= 1; i >= 0; i--) {
- hr_qp = free_mr->mr_free_qp[i];
- if (ib_destroy_qp(&hr_qp->ibqp))
- dev_err(dev, "Destroy qp %d for mr free failed!\n", i);
- }
-
- hns_roce_dealloc_pd(pd, NULL);
-
-alloc_pd_failed:
- kfree(pd);
-
-alloc_mem_failed:
- hns_roce_destroy_cq(cq, NULL);
-alloc_cq_failed:
- kfree(cq);
- return ret;
-}
-
-static void hns_roce_v1_release_lp_qp(struct hns_roce_dev *hr_dev)
-{
- struct hns_roce_v1_priv *priv = hr_dev->priv;
- struct hns_roce_free_mr *free_mr = &priv->free_mr;
- struct device *dev = &hr_dev->pdev->dev;
- struct hns_roce_qp *hr_qp;
- int ret;
- int i;
-
- for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) {
- hr_qp = free_mr->mr_free_qp[i];
- if (!hr_qp)
- continue;
-
- ret = ib_destroy_qp(&hr_qp->ibqp);
- if (ret)
- dev_err(dev, "Destroy qp %d for mr free failed(%d)!\n",
- i, ret);
- }
-
- hns_roce_destroy_cq(&free_mr->mr_free_cq->ib_cq, NULL);
- kfree(&free_mr->mr_free_cq->ib_cq);
- hns_roce_dealloc_pd(&free_mr->mr_free_pd->ibpd, NULL);
- kfree(&free_mr->mr_free_pd->ibpd);
-}
-
-static int hns_roce_db_init(struct hns_roce_dev *hr_dev)
-{
- struct hns_roce_v1_priv *priv = hr_dev->priv;
- struct hns_roce_db_table *db = &priv->db_table;
- struct device *dev = &hr_dev->pdev->dev;
- u32 sdb_ext_mod;
- u32 odb_ext_mod;
- u32 sdb_evt_mod;
- u32 odb_evt_mod;
- int ret;
-
- memset(db, 0, sizeof(*db));
-
- /* Default DB mode */
- sdb_ext_mod = HNS_ROCE_SDB_EXTEND_MODE;
- odb_ext_mod = HNS_ROCE_ODB_EXTEND_MODE;
- sdb_evt_mod = HNS_ROCE_SDB_NORMAL_MODE;
- odb_evt_mod = HNS_ROCE_ODB_POLL_MODE;
-
- db->sdb_ext_mod = sdb_ext_mod;
- db->odb_ext_mod = odb_ext_mod;
-
- /* Init extend DB */
- ret = hns_roce_db_ext_init(hr_dev, sdb_ext_mod, odb_ext_mod);
- if (ret) {
- dev_err(dev, "Failed in extend DB configuration.\n");
- return ret;
- }
-
- hns_roce_set_db_event_mode(hr_dev, sdb_evt_mod, odb_evt_mod);
-
- return 0;
-}
-
-static void hns_roce_v1_recreate_lp_qp_work_fn(struct work_struct *work)
-{
- struct hns_roce_recreate_lp_qp_work *lp_qp_work;
- struct hns_roce_dev *hr_dev;
-
- lp_qp_work = container_of(work, struct hns_roce_recreate_lp_qp_work,
- work);
- hr_dev = to_hr_dev(lp_qp_work->ib_dev);
-
- hns_roce_v1_release_lp_qp(hr_dev);
-
- if (hns_roce_v1_rsv_lp_qp(hr_dev))
- dev_err(&hr_dev->pdev->dev, "create reserver qp failed\n");
-
- if (lp_qp_work->comp_flag)
- complete(lp_qp_work->comp);
-
- kfree(lp_qp_work);
-}
-
-static int hns_roce_v1_recreate_lp_qp(struct hns_roce_dev *hr_dev)
-{
- long end = HNS_ROCE_V1_RECREATE_LP_QP_TIMEOUT_MSECS;
- struct hns_roce_v1_priv *priv = hr_dev->priv;
- struct hns_roce_free_mr *free_mr = &priv->free_mr;
- struct hns_roce_recreate_lp_qp_work *lp_qp_work;
- struct device *dev = &hr_dev->pdev->dev;
- struct completion comp;
-
- lp_qp_work = kzalloc(sizeof(struct hns_roce_recreate_lp_qp_work),
- GFP_KERNEL);
- if (!lp_qp_work)
- return -ENOMEM;
-
- INIT_WORK(&(lp_qp_work->work), hns_roce_v1_recreate_lp_qp_work_fn);
-
- lp_qp_work->ib_dev = &(hr_dev->ib_dev);
- lp_qp_work->comp = &comp;
- lp_qp_work->comp_flag = 1;
-
- init_completion(lp_qp_work->comp);
-
- queue_work(free_mr->free_mr_wq, &(lp_qp_work->work));
-
- while (end > 0) {
- if (try_wait_for_completion(&comp))
- return 0;
- msleep(HNS_ROCE_V1_RECREATE_LP_QP_WAIT_VALUE);
- end -= HNS_ROCE_V1_RECREATE_LP_QP_WAIT_VALUE;
- }
-
- lp_qp_work->comp_flag = 0;
- if (try_wait_for_completion(&comp))
- return 0;
-
- dev_warn(dev, "recreate lp qp failed 20s timeout and return failed!\n");
- return -ETIMEDOUT;
-}
-
-static int hns_roce_v1_send_lp_wqe(struct hns_roce_qp *hr_qp)
-{
- struct hns_roce_dev *hr_dev = to_hr_dev(hr_qp->ibqp.device);
- struct device *dev = &hr_dev->pdev->dev;
- struct ib_send_wr send_wr;
- const struct ib_send_wr *bad_wr;
- int ret;
-
- memset(&send_wr, 0, sizeof(send_wr));
- send_wr.next = NULL;
- send_wr.num_sge = 0;
- send_wr.send_flags = 0;
- send_wr.sg_list = NULL;
- send_wr.wr_id = (unsigned long long)&send_wr;
- send_wr.opcode = IB_WR_RDMA_WRITE;
-
- ret = hns_roce_v1_post_send(&hr_qp->ibqp, &send_wr, &bad_wr);
- if (ret) {
- dev_err(dev, "Post write wqe for mr free failed(%d)!", ret);
- return ret;
- }
-
- return 0;
-}
-
-static void hns_roce_v1_mr_free_work_fn(struct work_struct *work)
-{
- unsigned long end =
- msecs_to_jiffies(HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS) + jiffies;
- struct hns_roce_mr_free_work *mr_work =
- container_of(work, struct hns_roce_mr_free_work, work);
- struct hns_roce_dev *hr_dev = to_hr_dev(mr_work->ib_dev);
- struct hns_roce_v1_priv *priv = hr_dev->priv;
- struct hns_roce_free_mr *free_mr = &priv->free_mr;
- struct hns_roce_cq *mr_free_cq = free_mr->mr_free_cq;
- struct hns_roce_mr *hr_mr = mr_work->mr;
- struct device *dev = &hr_dev->pdev->dev;
- struct ib_wc wc[HNS_ROCE_V1_RESV_QP];
- struct hns_roce_qp *hr_qp;
- int ne = 0;
- int ret;
- int i;
-
- for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) {
- hr_qp = free_mr->mr_free_qp[i];
- if (!hr_qp)
- continue;
- ne++;
-
- ret = hns_roce_v1_send_lp_wqe(hr_qp);
- if (ret) {
- dev_err(dev,
- "Send wqe (qp:0x%lx) for mr free failed(%d)!\n",
- hr_qp->qpn, ret);
- goto free_work;
- }
- }
-
- if (!ne) {
- dev_err(dev, "Reserved loop qp is absent!\n");
- goto free_work;
- }
-
- do {
- ret = hns_roce_v1_poll_cq(&mr_free_cq->ib_cq, ne, wc);
- if (ret < 0 && hr_qp) {
- dev_err(dev,
- "(qp:0x%lx) starts, Poll cqe failed(%d) for mr 0x%x free! Remain %d cqe\n",
- hr_qp->qpn, ret, hr_mr->key, ne);
- goto free_work;
- }
- ne -= ret;
- usleep_range(HNS_ROCE_V1_FREE_MR_WAIT_VALUE * 1000,
- (1 + HNS_ROCE_V1_FREE_MR_WAIT_VALUE) * 1000);
- } while (ne && time_before_eq(jiffies, end));
-
- if (ne != 0)
- dev_err(dev,
- "Poll cqe for mr 0x%x free timeout! Remain %d cqe\n",
- hr_mr->key, ne);
-
-free_work:
- if (mr_work->comp_flag)
- complete(mr_work->comp);
- kfree(mr_work);
-}
-
-static int hns_roce_v1_dereg_mr(struct hns_roce_dev *hr_dev,
- struct hns_roce_mr *mr, struct ib_udata *udata)
-{
- struct hns_roce_v1_priv *priv = hr_dev->priv;
- struct hns_roce_free_mr *free_mr = &priv->free_mr;
- long end = HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS;
- struct device *dev = &hr_dev->pdev->dev;
- struct hns_roce_mr_free_work *mr_work;
- unsigned long start = jiffies;
- struct completion comp;
- int ret = 0;
-
- if (mr->enabled) {
- if (hns_roce_hw_destroy_mpt(hr_dev, NULL,
- key_to_hw_index(mr->key) &
- (hr_dev->caps.num_mtpts - 1)))
- dev_warn(dev, "DESTROY_MPT failed!\n");
- }
-
- mr_work = kzalloc(sizeof(*mr_work), GFP_KERNEL);
- if (!mr_work) {
- ret = -ENOMEM;
- goto free_mr;
- }
-
- INIT_WORK(&(mr_work->work), hns_roce_v1_mr_free_work_fn);
-
- mr_work->ib_dev = &(hr_dev->ib_dev);
- mr_work->comp = &comp;
- mr_work->comp_flag = 1;
- mr_work->mr = (void *)mr;
- init_completion(mr_work->comp);
-
- queue_work(free_mr->free_mr_wq, &(mr_work->work));
-
- while (end > 0) {
- if (try_wait_for_completion(&comp))
- goto free_mr;
- msleep(HNS_ROCE_V1_FREE_MR_WAIT_VALUE);
- end -= HNS_ROCE_V1_FREE_MR_WAIT_VALUE;
- }
-
- mr_work->comp_flag = 0;
- if (try_wait_for_completion(&comp))
- goto free_mr;
-
- dev_warn(dev, "Free mr work 0x%x over 50s and failed!\n", mr->key);
- ret = -ETIMEDOUT;
-
-free_mr:
- dev_dbg(dev, "Free mr 0x%x use 0x%x us.\n",
- mr->key, jiffies_to_usecs(jiffies) - jiffies_to_usecs(start));
-
- ida_free(&hr_dev->mr_table.mtpt_ida.ida, (int)key_to_hw_index(mr->key));
- hns_roce_mtr_destroy(hr_dev, &mr->pbl_mtr);
- kfree(mr);
-
- return ret;
-}
-
-static void hns_roce_db_free(struct hns_roce_dev *hr_dev)
-{
- struct hns_roce_v1_priv *priv = hr_dev->priv;
- struct hns_roce_db_table *db = &priv->db_table;
- struct device *dev = &hr_dev->pdev->dev;
-
- if (db->sdb_ext_mod) {
- dma_free_coherent(dev, HNS_ROCE_V1_EXT_SDB_SIZE,
- db->ext_db->sdb_buf_list->buf,
- db->ext_db->sdb_buf_list->map);
- kfree(db->ext_db->sdb_buf_list);
- }
-
- if (db->odb_ext_mod) {
- dma_free_coherent(dev, HNS_ROCE_V1_EXT_ODB_SIZE,
- db->ext_db->odb_buf_list->buf,
- db->ext_db->odb_buf_list->map);
- kfree(db->ext_db->odb_buf_list);
- }
-
- kfree(db->ext_db);
-}
-
-static int hns_roce_raq_init(struct hns_roce_dev *hr_dev)
-{
- struct hns_roce_v1_priv *priv = hr_dev->priv;
- struct hns_roce_raq_table *raq = &priv->raq_table;
- struct device *dev = &hr_dev->pdev->dev;
- dma_addr_t addr;
- int raq_shift;
- __le32 tmp;
- u32 val;
- int ret;
-
- raq->e_raq_buf = kzalloc(sizeof(*(raq->e_raq_buf)), GFP_KERNEL);
- if (!raq->e_raq_buf)
- return -ENOMEM;
-
- raq->e_raq_buf->buf = dma_alloc_coherent(dev, HNS_ROCE_V1_RAQ_SIZE,
- &addr, GFP_KERNEL);
- if (!raq->e_raq_buf->buf) {
- ret = -ENOMEM;
- goto err_dma_alloc_raq;
- }
- raq->e_raq_buf->map = addr;
-
- /* Configure raq extended address. 48bit 4K align */
- roce_write(hr_dev, ROCEE_EXT_RAQ_REG, raq->e_raq_buf->map >> 12);
-
- /* Configure raq_shift */
- raq_shift = ilog2(HNS_ROCE_V1_RAQ_SIZE / HNS_ROCE_V1_RAQ_ENTRY);
- val = roce_read(hr_dev, ROCEE_EXT_RAQ_H_REG);
- tmp = cpu_to_le32(val);
- roce_set_field(tmp, ROCEE_EXT_RAQ_H_EXT_RAQ_SHIFT_M,
- ROCEE_EXT_RAQ_H_EXT_RAQ_SHIFT_S, raq_shift);
- /*
- * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of
- * using 4K page, and shift more 32 because of
- * calculating the high 32 bit value evaluated to hardware.
- */
- roce_set_field(tmp, ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_M,
- ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_S,
- raq->e_raq_buf->map >> 44);
- val = le32_to_cpu(tmp);
- roce_write(hr_dev, ROCEE_EXT_RAQ_H_REG, val);
- dev_dbg(dev, "Configure raq_shift 0x%x.\n", val);
-
- /* Configure raq threshold */
- val = roce_read(hr_dev, ROCEE_RAQ_WL_REG);
- tmp = cpu_to_le32(val);
- roce_set_field(tmp, ROCEE_RAQ_WL_ROCEE_RAQ_WL_M,
- ROCEE_RAQ_WL_ROCEE_RAQ_WL_S,
- HNS_ROCE_V1_EXT_RAQ_WF);
- val = le32_to_cpu(tmp);
- roce_write(hr_dev, ROCEE_RAQ_WL_REG, val);
- dev_dbg(dev, "Configure raq_wl 0x%x.\n", val);
-
- /* Enable extend raq */
- val = roce_read(hr_dev, ROCEE_WRMS_POL_TIME_INTERVAL_REG);
- tmp = cpu_to_le32(val);
- roce_set_field(tmp,
- ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_POL_TIME_INTERVAL_M,
- ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_POL_TIME_INTERVAL_S,
- POL_TIME_INTERVAL_VAL);
- roce_set_bit(tmp, ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_EXT_RAQ_MODE, 1);
- roce_set_field(tmp,
- ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_CFG_M,
- ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_CFG_S,
- 2);
- roce_set_bit(tmp,
- ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_EN_S, 1);
- val = le32_to_cpu(tmp);
- roce_write(hr_dev, ROCEE_WRMS_POL_TIME_INTERVAL_REG, val);
- dev_dbg(dev, "Configure WrmsPolTimeInterval 0x%x.\n", val);
-
- /* Enable raq drop */
- val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
- tmp = cpu_to_le32(val);
- roce_set_bit(tmp, ROCEE_GLB_CFG_TRP_RAQ_DROP_EN_S, 1);
- val = le32_to_cpu(tmp);
- roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
- dev_dbg(dev, "Configure GlbCfg = 0x%x.\n", val);
-
- return 0;
-
-err_dma_alloc_raq:
- kfree(raq->e_raq_buf);
- return ret;
-}
-
-static void hns_roce_raq_free(struct hns_roce_dev *hr_dev)
-{
- struct hns_roce_v1_priv *priv = hr_dev->priv;
- struct hns_roce_raq_table *raq = &priv->raq_table;
- struct device *dev = &hr_dev->pdev->dev;
-
- dma_free_coherent(dev, HNS_ROCE_V1_RAQ_SIZE, raq->e_raq_buf->buf,
- raq->e_raq_buf->map);
- kfree(raq->e_raq_buf);
-}
-
-static void hns_roce_port_enable(struct hns_roce_dev *hr_dev, int enable_flag)
-{
- __le32 tmp;
- u32 val;
-
- if (enable_flag) {
- val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
- /* Open all ports */
- tmp = cpu_to_le32(val);
- roce_set_field(tmp, ROCEE_GLB_CFG_ROCEE_PORT_ST_M,
- ROCEE_GLB_CFG_ROCEE_PORT_ST_S,
- ALL_PORT_VAL_OPEN);
- val = le32_to_cpu(tmp);
- roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
- } else {
- val = roce_read(hr_dev, ROCEE_GLB_CFG_REG);
- /* Close all ports */
- tmp = cpu_to_le32(val);
- roce_set_field(tmp, ROCEE_GLB_CFG_ROCEE_PORT_ST_M,
- ROCEE_GLB_CFG_ROCEE_PORT_ST_S, 0x0);
- val = le32_to_cpu(tmp);
- roce_write(hr_dev, ROCEE_GLB_CFG_REG, val);
- }
-}
-
-static int hns_roce_bt_init(struct hns_roce_dev *hr_dev)
-{
- struct hns_roce_v1_priv *priv = hr_dev->priv;
- struct device *dev = &hr_dev->pdev->dev;
- int ret;
-
- priv->bt_table.qpc_buf.buf = dma_alloc_coherent(dev,
- HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.qpc_buf.map,
- GFP_KERNEL);
- if (!priv->bt_table.qpc_buf.buf)
- return -ENOMEM;
-
- priv->bt_table.mtpt_buf.buf = dma_alloc_coherent(dev,
- HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.mtpt_buf.map,
- GFP_KERNEL);
- if (!priv->bt_table.mtpt_buf.buf) {
- ret = -ENOMEM;
- goto err_failed_alloc_mtpt_buf;
- }
-
- priv->bt_table.cqc_buf.buf = dma_alloc_coherent(dev,
- HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.cqc_buf.map,
- GFP_KERNEL);
- if (!priv->bt_table.cqc_buf.buf) {
- ret = -ENOMEM;
- goto err_failed_alloc_cqc_buf;
- }
-
- return 0;
-
-err_failed_alloc_cqc_buf:
- dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
- priv->bt_table.mtpt_buf.buf, priv->bt_table.mtpt_buf.map);
-
-err_failed_alloc_mtpt_buf:
- dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
- priv->bt_table.qpc_buf.buf, priv->bt_table.qpc_buf.map);
-
- return ret;
-}
-
-static void hns_roce_bt_free(struct hns_roce_dev *hr_dev)
-{
- struct hns_roce_v1_priv *priv = hr_dev->priv;
- struct device *dev = &hr_dev->pdev->dev;
-
- dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
- priv->bt_table.cqc_buf.buf, priv->bt_table.cqc_buf.map);
-
- dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
- priv->bt_table.mtpt_buf.buf, priv->bt_table.mtpt_buf.map);
-
- dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE,
- priv->bt_table.qpc_buf.buf, priv->bt_table.qpc_buf.map);
-}
-
-static int hns_roce_tptr_init(struct hns_roce_dev *hr_dev)
-{
- struct hns_roce_v1_priv *priv = hr_dev->priv;
- struct hns_roce_buf_list *tptr_buf = &priv->tptr_table.tptr_buf;
- struct device *dev = &hr_dev->pdev->dev;
-
- /*
- * This buffer will be used for CQ's tptr(tail pointer), also
- * named ci(customer index). Every CQ will use 2 bytes to save
- * cqe ci in hip06. Hardware will read this area to get new ci
- * when the queue is almost full.
- */
- tptr_buf->buf = dma_alloc_coherent(dev, HNS_ROCE_V1_TPTR_BUF_SIZE,
- &tptr_buf->map, GFP_KERNEL);
- if (!tptr_buf->buf)
- return -ENOMEM;
-
- hr_dev->tptr_dma_addr = tptr_buf->map;
- hr_dev->tptr_size = HNS_ROCE_V1_TPTR_BUF_SIZE;
-
- return 0;
-}
-
-static void hns_roce_tptr_free(struct hns_roce_dev *hr_dev)
-{
- struct hns_roce_v1_priv *priv = hr_dev->priv;
- struct hns_roce_buf_list *tptr_buf = &priv->tptr_table.tptr_buf;
- struct device *dev = &hr_dev->pdev->dev;
-
- dma_free_coherent(dev, HNS_ROCE_V1_TPTR_BUF_SIZE,
- tptr_buf->buf, tptr_buf->map);
-}
-
-static int hns_roce_free_mr_init(struct hns_roce_dev *hr_dev)
-{
- struct hns_roce_v1_priv *priv = hr_dev->priv;
- struct hns_roce_free_mr *free_mr = &priv->free_mr;
- struct device *dev = &hr_dev->pdev->dev;
- int ret;
-
- free_mr->free_mr_wq = create_singlethread_workqueue("hns_roce_free_mr");
- if (!free_mr->free_mr_wq) {
- dev_err(dev, "Create free mr workqueue failed!\n");
- return -ENOMEM;
- }
-
- ret = hns_roce_v1_rsv_lp_qp(hr_dev);
- if (ret) {
- dev_err(dev, "Reserved loop qp failed(%d)!\n", ret);
- destroy_workqueue(free_mr->free_mr_wq);
- }
-
- return ret;
-}
-
-static void hns_roce_free_mr_free(struct hns_roce_dev *hr_dev)
-{
- struct hns_roce_v1_priv *priv = hr_dev->priv;
- struct hns_roce_free_mr *free_mr = &priv->free_mr;
-
- destroy_workqueue(free_mr->free_mr_wq);
-
- hns_roce_v1_release_lp_qp(hr_dev);
-}
-
-/**
- * hns_roce_v1_reset - reset RoCE
- * @hr_dev: RoCE device struct pointer
- * @dereset: true -- drop reset, false -- reset
- * return 0 - success , negative --fail
- */
-static int hns_roce_v1_reset(struct hns_roce_dev *hr_dev, bool dereset)
-{
- struct device_node *dsaf_node;
- struct device *dev = &hr_dev->pdev->dev;
- struct device_node *np = dev->of_node;
- struct fwnode_handle *fwnode;
- int ret;
-
- /* check if this is DT/ACPI case */
- if (dev_of_node(dev)) {
- dsaf_node = of_parse_phandle(np, "dsaf-handle", 0);
- if (!dsaf_node) {
- dev_err(dev, "could not find dsaf-handle\n");
- return -EINVAL;
- }
- fwnode = &dsaf_node->fwnode;
- } else if (is_acpi_device_node(dev->fwnode)) {
- struct fwnode_reference_args args;
-
- ret = acpi_node_get_property_reference(dev->fwnode,
- "dsaf-handle", 0, &args);
- if (ret) {
- dev_err(dev, "could not find dsaf-handle\n");
- return ret;
- }
- fwnode = args.fwnode;
- } else {
- dev_err(dev, "cannot read data from DT or ACPI\n");
- return -ENXIO;
- }
-
- ret = hns_dsaf_roce_reset(fwnode, false);
- if (ret)
- return ret;
-
- if (dereset) {
- msleep(SLEEP_TIME_INTERVAL);
- ret = hns_dsaf_roce_reset(fwnode, true);
- }
-
- return ret;
-}
-
-static int hns_roce_v1_profile(struct hns_roce_dev *hr_dev)
-{
- struct hns_roce_caps *caps = &hr_dev->caps;
- int i;
-
- hr_dev->vendor_id = roce_read(hr_dev, ROCEE_VENDOR_ID_REG);
- hr_dev->vendor_part_id = roce_read(hr_dev, ROCEE_VENDOR_PART_ID_REG);
- hr_dev->sys_image_guid = roce_read(hr_dev, ROCEE_SYS_IMAGE_GUID_L_REG) |
- ((u64)roce_read(hr_dev,
- ROCEE_SYS_IMAGE_GUID_H_REG) << 32);
- hr_dev->hw_rev = HNS_ROCE_HW_VER1;
-
- caps->num_qps = HNS_ROCE_V1_MAX_QP_NUM;
- caps->max_wqes = HNS_ROCE_V1_MAX_WQE_NUM;
- caps->min_wqes = HNS_ROCE_MIN_WQE_NUM;
- caps->num_cqs = HNS_ROCE_V1_MAX_CQ_NUM;
- caps->min_cqes = HNS_ROCE_MIN_CQE_NUM;
- caps->max_cqes = HNS_ROCE_V1_MAX_CQE_NUM;
- caps->max_sq_sg = HNS_ROCE_V1_SG_NUM;
- caps->max_rq_sg = HNS_ROCE_V1_SG_NUM;
- caps->max_sq_inline = HNS_ROCE_V1_INLINE_SIZE;
- caps->num_uars = HNS_ROCE_V1_UAR_NUM;
- caps->phy_num_uars = HNS_ROCE_V1_PHY_UAR_NUM;
- caps->num_aeq_vectors = HNS_ROCE_V1_AEQE_VEC_NUM;
- caps->num_comp_vectors = HNS_ROCE_V1_COMP_VEC_NUM;
- caps->num_other_vectors = HNS_ROCE_V1_ABNORMAL_VEC_NUM;
- caps->num_mtpts = HNS_ROCE_V1_MAX_MTPT_NUM;
- caps->num_mtt_segs = HNS_ROCE_V1_MAX_MTT_SEGS;
- caps->num_pds = HNS_ROCE_V1_MAX_PD_NUM;
- caps->max_qp_init_rdma = HNS_ROCE_V1_MAX_QP_INIT_RDMA;
- caps->max_qp_dest_rdma = HNS_ROCE_V1_MAX_QP_DEST_RDMA;
- caps->max_sq_desc_sz = HNS_ROCE_V1_MAX_SQ_DESC_SZ;
- caps->max_rq_desc_sz = HNS_ROCE_V1_MAX_RQ_DESC_SZ;
- caps->qpc_sz = HNS_ROCE_V1_QPC_SIZE;
- caps->irrl_entry_sz = HNS_ROCE_V1_IRRL_ENTRY_SIZE;
- caps->cqc_entry_sz = HNS_ROCE_V1_CQC_ENTRY_SIZE;
- caps->mtpt_entry_sz = HNS_ROCE_V1_MTPT_ENTRY_SIZE;
- caps->mtt_entry_sz = HNS_ROCE_V1_MTT_ENTRY_SIZE;
- caps->cqe_sz = HNS_ROCE_V1_CQE_SIZE;
- caps->page_size_cap = HNS_ROCE_V1_PAGE_SIZE_SUPPORT;
- caps->reserved_lkey = 0;
- caps->reserved_pds = 0;
- caps->reserved_mrws = 1;
- caps->reserved_uars = 0;
- caps->reserved_cqs = 0;
- caps->reserved_qps = 12; /* 2 SQP per port, six ports total 12 */
- caps->chunk_sz = HNS_ROCE_V1_TABLE_CHUNK_SIZE;
-
- for (i = 0; i < caps->num_ports; i++)
- caps->pkey_table_len[i] = 1;
-
- for (i = 0; i < caps->num_ports; i++) {
- /* Six ports shared 16 GID in v1 engine */
- if (i >= (HNS_ROCE_V1_GID_NUM % caps->num_ports))
- caps->gid_table_len[i] = HNS_ROCE_V1_GID_NUM /
- caps->num_ports;
- else
- caps->gid_table_len[i] = HNS_ROCE_V1_GID_NUM /
- caps->num_ports + 1;
- }
-
- caps->ceqe_depth = HNS_ROCE_V1_COMP_EQE_NUM;
- caps->aeqe_depth = HNS_ROCE_V1_ASYNC_EQE_NUM;
- caps->local_ca_ack_delay = roce_read(hr_dev, ROCEE_ACK_DELAY_REG);
- caps->max_mtu = IB_MTU_2048;
-
- return 0;
-}
-
-static int hns_roce_v1_init(struct hns_roce_dev *hr_dev)
-{
- int ret;
- u32 val;
- __le32 tmp;
- struct device *dev = &hr_dev->pdev->dev;
-
- /* DMAE user config */
- val = roce_read(hr_dev, ROCEE_DMAE_USER_CFG1_REG);
- tmp = cpu_to_le32(val);
- roce_set_field(tmp, ROCEE_DMAE_USER_CFG1_ROCEE_CACHE_TB_CFG_M,
- ROCEE_DMAE_USER_CFG1_ROCEE_CACHE_TB_CFG_S, 0xf);
- roce_set_field(tmp, ROCEE_DMAE_USER_CFG1_ROCEE_STREAM_ID_TB_CFG_M,
- ROCEE_DMAE_USER_CFG1_ROCEE_STREAM_ID_TB_CFG_S,
- 1 << PAGES_SHIFT_16);
- val = le32_to_cpu(tmp);
- roce_write(hr_dev, ROCEE_DMAE_USER_CFG1_REG, val);
-
- val = roce_read(hr_dev, ROCEE_DMAE_USER_CFG2_REG);
- tmp = cpu_to_le32(val);
- roce_set_field(tmp, ROCEE_DMAE_USER_CFG2_ROCEE_CACHE_PKT_CFG_M,
- ROCEE_DMAE_USER_CFG2_ROCEE_CACHE_PKT_CFG_S, 0xf);
- roce_set_field(tmp, ROCEE_DMAE_USER_CFG2_ROCEE_STREAM_ID_PKT_CFG_M,
- ROCEE_DMAE_USER_CFG2_ROCEE_STREAM_ID_PKT_CFG_S,
- 1 << PAGES_SHIFT_16);
-
- ret = hns_roce_db_init(hr_dev);
- if (ret) {
- dev_err(dev, "doorbell init failed!\n");
- return ret;
- }
-
- ret = hns_roce_raq_init(hr_dev);
- if (ret) {
- dev_err(dev, "raq init failed!\n");
- goto error_failed_raq_init;
- }
-
- ret = hns_roce_bt_init(hr_dev);
- if (ret) {
- dev_err(dev, "bt init failed!\n");
- goto error_failed_bt_init;
- }
-
- ret = hns_roce_tptr_init(hr_dev);
- if (ret) {
- dev_err(dev, "tptr init failed!\n");
- goto error_failed_tptr_init;
- }
-
- ret = hns_roce_free_mr_init(hr_dev);
- if (ret) {
- dev_err(dev, "free mr init failed!\n");
- goto error_failed_free_mr_init;
- }
-
- hns_roce_port_enable(hr_dev, HNS_ROCE_PORT_UP);
-
- return 0;
-
-error_failed_free_mr_init:
- hns_roce_tptr_free(hr_dev);
-
-error_failed_tptr_init:
- hns_roce_bt_free(hr_dev);
-
-error_failed_bt_init:
- hns_roce_raq_free(hr_dev);
-
-error_failed_raq_init:
- hns_roce_db_free(hr_dev);
- return ret;
-}
-
-static void hns_roce_v1_exit(struct hns_roce_dev *hr_dev)
-{
- hns_roce_port_enable(hr_dev, HNS_ROCE_PORT_DOWN);
- hns_roce_free_mr_free(hr_dev);
- hns_roce_tptr_free(hr_dev);
- hns_roce_bt_free(hr_dev);
- hns_roce_raq_free(hr_dev);
- hns_roce_db_free(hr_dev);
-}
-
-static int hns_roce_v1_cmd_pending(struct hns_roce_dev *hr_dev)
-{
- u32 status = readl(hr_dev->reg_base + ROCEE_MB6_REG);
-
- return (!!(status & (1 << HCR_GO_BIT)));
-}
-
-static int hns_roce_v1_post_mbox(struct hns_roce_dev *hr_dev, u64 in_param,
- u64 out_param, u32 in_modifier, u8 op_modifier,
- u16 op, u16 token, int event)
-{
- u32 __iomem *hcr = (u32 __iomem *)(hr_dev->reg_base + ROCEE_MB1_REG);
- unsigned long end;
- u32 val = 0;
- __le32 tmp;
-
- end = msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS) + jiffies;
- while (hns_roce_v1_cmd_pending(hr_dev)) {
- if (time_after(jiffies, end)) {
- dev_err(hr_dev->dev, "jiffies=%d end=%d\n",
- (int)jiffies, (int)end);
- return -EAGAIN;
- }
- cond_resched();
- }
-
- tmp = cpu_to_le32(val);
- roce_set_field(tmp, ROCEE_MB6_ROCEE_MB_CMD_M, ROCEE_MB6_ROCEE_MB_CMD_S,
- op);
- roce_set_field(tmp, ROCEE_MB6_ROCEE_MB_CMD_MDF_M,
- ROCEE_MB6_ROCEE_MB_CMD_MDF_S, op_modifier);
- roce_set_bit(tmp, ROCEE_MB6_ROCEE_MB_EVENT_S, event);
- roce_set_bit(tmp, ROCEE_MB6_ROCEE_MB_HW_RUN_S, 1);
- roce_set_field(tmp, ROCEE_MB6_ROCEE_MB_TOKEN_M,
- ROCEE_MB6_ROCEE_MB_TOKEN_S, token);
-
- val = le32_to_cpu(tmp);
- writeq(in_param, hcr + 0);
- writeq(out_param, hcr + 2);
- writel(in_modifier, hcr + 4);
- /* Memory barrier */
- wmb();
-
- writel(val, hcr + 5);
-
- return 0;
-}
-
-static int hns_roce_v1_chk_mbox(struct hns_roce_dev *hr_dev,
- unsigned int timeout)
-{
- u8 __iomem *hcr = hr_dev->reg_base + ROCEE_MB1_REG;
- unsigned long end;
- u32 status = 0;
-
- end = msecs_to_jiffies(timeout) + jiffies;
- while (hns_roce_v1_cmd_pending(hr_dev) && time_before(jiffies, end))
- cond_resched();
-
- if (hns_roce_v1_cmd_pending(hr_dev)) {
- dev_err(hr_dev->dev, "[cmd_poll]hw run cmd TIMEDOUT!\n");
- return -ETIMEDOUT;
- }
-
- status = le32_to_cpu((__force __le32)
- __raw_readl(hcr + HCR_STATUS_OFFSET));
- if ((status & STATUS_MASK) != 0x1) {
- dev_err(hr_dev->dev, "mailbox status 0x%x!\n", status);
- return -EBUSY;
- }
-
- return 0;
-}
-
-static int hns_roce_v1_set_gid(struct hns_roce_dev *hr_dev, u32 port,
- int gid_index, const union ib_gid *gid,
- const struct ib_gid_attr *attr)
-{
- unsigned long flags;
- u32 *p = NULL;
- u8 gid_idx;
-
- gid_idx = hns_get_gid_index(hr_dev, port, gid_index);
-
- spin_lock_irqsave(&hr_dev->iboe.lock, flags);
-
- p = (u32 *)&gid->raw[0];
- roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_L_0_REG +
- (HNS_ROCE_V1_GID_NUM * gid_idx));
-
- p = (u32 *)&gid->raw[4];
- roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_ML_0_REG +
- (HNS_ROCE_V1_GID_NUM * gid_idx));
-
- p = (u32 *)&gid->raw[8];
- roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_MH_0_REG +
- (HNS_ROCE_V1_GID_NUM * gid_idx));
-
- p = (u32 *)&gid->raw[0xc];
- roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_H_0_REG +
- (HNS_ROCE_V1_GID_NUM * gid_idx));
-
- spin_unlock_irqrestore(&hr_dev->iboe.lock, flags);
-
- return 0;
-}
-
-static int hns_roce_v1_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port,
- const u8 *addr)
-{
- u32 reg_smac_l;
- u16 reg_smac_h;
- __le32 tmp;
- u16 *p_h;
- u32 *p;
- u32 val;
-
- /*
- * When mac changed, loopback may fail
- * because of smac not equal to dmac.
- * We Need to release and create reserved qp again.
- */
- if (hr_dev->hw->dereg_mr) {
- int ret;
-
- ret = hns_roce_v1_recreate_lp_qp(hr_dev);
- if (ret && ret != -ETIMEDOUT)
- return ret;
- }
-
- p = (u32 *)(&addr[0]);
- reg_smac_l = *p;
- roce_raw_write(reg_smac_l, hr_dev->reg_base + ROCEE_SMAC_L_0_REG +
- PHY_PORT_OFFSET * phy_port);
-
- val = roce_read(hr_dev,
- ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET);
- tmp = cpu_to_le32(val);
- p_h = (u16 *)(&addr[4]);
- reg_smac_h = *p_h;
- roce_set_field(tmp, ROCEE_SMAC_H_ROCEE_SMAC_H_M,
- ROCEE_SMAC_H_ROCEE_SMAC_H_S, reg_smac_h);
- val = le32_to_cpu(tmp);
- roce_write(hr_dev, ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET,
- val);
-
- return 0;
-}
-
-static void hns_roce_v1_set_mtu(struct hns_roce_dev *hr_dev, u8 phy_port,
- enum ib_mtu mtu)
-{
- __le32 tmp;
- u32 val;
-
- val = roce_read(hr_dev,
- ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET);
- tmp = cpu_to_le32(val);
- roce_set_field(tmp, ROCEE_SMAC_H_ROCEE_PORT_MTU_M,
- ROCEE_SMAC_H_ROCEE_PORT_MTU_S, mtu);
- val = le32_to_cpu(tmp);
- roce_write(hr_dev, ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET,
- val);
-}
-
-static int hns_roce_v1_write_mtpt(struct hns_roce_dev *hr_dev, void *mb_buf,
- struct hns_roce_mr *mr,
- unsigned long mtpt_idx)
-{
- u64 pages[HNS_ROCE_MAX_INNER_MTPT_NUM] = { 0 };
- struct ib_device *ibdev = &hr_dev->ib_dev;
- struct hns_roce_v1_mpt_entry *mpt_entry;
- dma_addr_t pbl_ba;
- int count;
- int i;
-
- /* MPT filled into mailbox buf */
- mpt_entry = (struct hns_roce_v1_mpt_entry *)mb_buf;
- memset(mpt_entry, 0, sizeof(*mpt_entry));
-
- roce_set_field(mpt_entry->mpt_byte_4, MPT_BYTE_4_KEY_STATE_M,
- MPT_BYTE_4_KEY_STATE_S, KEY_VALID);
- roce_set_field(mpt_entry->mpt_byte_4, MPT_BYTE_4_KEY_M,
- MPT_BYTE_4_KEY_S, mr->key);
- roce_set_field(mpt_entry->mpt_byte_4, MPT_BYTE_4_PAGE_SIZE_M,
- MPT_BYTE_4_PAGE_SIZE_S, MR_SIZE_4K);
- roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_MW_TYPE_S, 0);
- roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_MW_BIND_ENABLE_S,
- (mr->access & IB_ACCESS_MW_BIND ? 1 : 0));
- roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_OWN_S, 0);
- roce_set_field(mpt_entry->mpt_byte_4, MPT_BYTE_4_MEMORY_LOCATION_TYPE_M,
- MPT_BYTE_4_MEMORY_LOCATION_TYPE_S, mr->type);
- roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_REMOTE_ATOMIC_S, 0);
- roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_LOCAL_WRITE_S,
- (mr->access & IB_ACCESS_LOCAL_WRITE ? 1 : 0));
- roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_REMOTE_WRITE_S,
- (mr->access & IB_ACCESS_REMOTE_WRITE ? 1 : 0));
- roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_REMOTE_READ_S,
- (mr->access & IB_ACCESS_REMOTE_READ ? 1 : 0));
- roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_REMOTE_INVAL_ENABLE_S,
- 0);
- roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_ADDRESS_TYPE_S, 0);
-
- roce_set_field(mpt_entry->mpt_byte_12, MPT_BYTE_12_PBL_ADDR_H_M,
- MPT_BYTE_12_PBL_ADDR_H_S, 0);
- roce_set_field(mpt_entry->mpt_byte_12, MPT_BYTE_12_MW_BIND_COUNTER_M,
- MPT_BYTE_12_MW_BIND_COUNTER_S, 0);
-
- mpt_entry->virt_addr_l = cpu_to_le32((u32)mr->iova);
- mpt_entry->virt_addr_h = cpu_to_le32((u32)(mr->iova >> 32));
- mpt_entry->length = cpu_to_le32((u32)mr->size);
-
- roce_set_field(mpt_entry->mpt_byte_28, MPT_BYTE_28_PD_M,
- MPT_BYTE_28_PD_S, mr->pd);
- roce_set_field(mpt_entry->mpt_byte_28, MPT_BYTE_28_L_KEY_IDX_L_M,
- MPT_BYTE_28_L_KEY_IDX_L_S, mtpt_idx);
- roce_set_field(mpt_entry->mpt_byte_64, MPT_BYTE_64_L_KEY_IDX_H_M,
- MPT_BYTE_64_L_KEY_IDX_H_S, mtpt_idx >> MTPT_IDX_SHIFT);
-
- /* DMA memory register */
- if (mr->type == MR_TYPE_DMA)
- return 0;
-
- count = hns_roce_mtr_find(hr_dev, &mr->pbl_mtr, 0, pages,
- ARRAY_SIZE(pages), &pbl_ba);
- if (count < 1) {
- ibdev_err(ibdev, "failed to find PBL mtr, count = %d.", count);
- return -ENOBUFS;
- }
-
- /* Register user mr */
- for (i = 0; i < count; i++) {
- switch (i) {
- case 0:
- mpt_entry->pa0_l = cpu_to_le32((u32)(pages[i]));
- roce_set_field(mpt_entry->mpt_byte_36,
- MPT_BYTE_36_PA0_H_M,
- MPT_BYTE_36_PA0_H_S,
- (u32)(pages[i] >> PAGES_SHIFT_32));
- break;
- case 1:
- roce_set_field(mpt_entry->mpt_byte_36,
- MPT_BYTE_36_PA1_L_M,
- MPT_BYTE_36_PA1_L_S, (u32)(pages[i]));
- roce_set_field(mpt_entry->mpt_byte_40,
- MPT_BYTE_40_PA1_H_M,
- MPT_BYTE_40_PA1_H_S,
- (u32)(pages[i] >> PAGES_SHIFT_24));
- break;
- case 2:
- roce_set_field(mpt_entry->mpt_byte_40,
- MPT_BYTE_40_PA2_L_M,
- MPT_BYTE_40_PA2_L_S, (u32)(pages[i]));
- roce_set_field(mpt_entry->mpt_byte_44,
- MPT_BYTE_44_PA2_H_M,
- MPT_BYTE_44_PA2_H_S,
- (u32)(pages[i] >> PAGES_SHIFT_16));
- break;
- case 3:
- roce_set_field(mpt_entry->mpt_byte_44,
- MPT_BYTE_44_PA3_L_M,
- MPT_BYTE_44_PA3_L_S, (u32)(pages[i]));
- roce_set_field(mpt_entry->mpt_byte_48,
- MPT_BYTE_48_PA3_H_M,
- MPT_BYTE_48_PA3_H_S,
- (u32)(pages[i] >> PAGES_SHIFT_8));
- break;
- case 4:
- mpt_entry->pa4_l = cpu_to_le32((u32)(pages[i]));
- roce_set_field(mpt_entry->mpt_byte_56,
- MPT_BYTE_56_PA4_H_M,
- MPT_BYTE_56_PA4_H_S,
- (u32)(pages[i] >> PAGES_SHIFT_32));
- break;
- case 5:
- roce_set_field(mpt_entry->mpt_byte_56,
- MPT_BYTE_56_PA5_L_M,
- MPT_BYTE_56_PA5_L_S, (u32)(pages[i]));
- roce_set_field(mpt_entry->mpt_byte_60,
- MPT_BYTE_60_PA5_H_M,
- MPT_BYTE_60_PA5_H_S,
- (u32)(pages[i] >> PAGES_SHIFT_24));
- break;
- case 6:
- roce_set_field(mpt_entry->mpt_byte_60,
- MPT_BYTE_60_PA6_L_M,
- MPT_BYTE_60_PA6_L_S, (u32)(pages[i]));
- roce_set_field(mpt_entry->mpt_byte_64,
- MPT_BYTE_64_PA6_H_M,
- MPT_BYTE_64_PA6_H_S,
- (u32)(pages[i] >> PAGES_SHIFT_16));
- break;
- default:
- break;
- }
- }
-
- mpt_entry->pbl_addr_l = cpu_to_le32(pbl_ba);
- roce_set_field(mpt_entry->mpt_byte_12, MPT_BYTE_12_PBL_ADDR_H_M,
- MPT_BYTE_12_PBL_ADDR_H_S, upper_32_bits(pbl_ba));
-
- return 0;
-}
-
-static void *get_cqe(struct hns_roce_cq *hr_cq, int n)
-{
- return hns_roce_buf_offset(hr_cq->mtr.kmem, n * HNS_ROCE_V1_CQE_SIZE);
-}
-
-static void *get_sw_cqe(struct hns_roce_cq *hr_cq, int n)
-{
- struct hns_roce_cqe *hr_cqe = get_cqe(hr_cq, n & hr_cq->ib_cq.cqe);
-
- /* Get cqe when Owner bit is Conversely with the MSB of cons_idx */
- return (roce_get_bit(hr_cqe->cqe_byte_4, CQE_BYTE_4_OWNER_S) ^
- !!(n & hr_cq->cq_depth)) ? hr_cqe : NULL;
-}
-
-static struct hns_roce_cqe *next_cqe_sw(struct hns_roce_cq *hr_cq)
-{
- return get_sw_cqe(hr_cq, hr_cq->cons_index);
-}
-
-static void hns_roce_v1_cq_set_ci(struct hns_roce_cq *hr_cq, u32 cons_index)
-{
- __le32 doorbell[2];
-
- doorbell[0] = cpu_to_le32(cons_index & ((hr_cq->cq_depth << 1) - 1));
- doorbell[1] = 0;
- roce_set_bit(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_HW_SYNS_S, 1);
- roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_M,
- ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_S, 3);
- roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_M,
- ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_S, 0);
- roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_M,
- ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_S, hr_cq->cqn);
-
- hns_roce_write64_k(doorbell, hr_cq->db_reg);
-}
-
-static void __hns_roce_v1_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
- struct hns_roce_srq *srq)
-{
- struct hns_roce_cqe *cqe, *dest;
- u32 prod_index;
- int nfreed = 0;
- u8 owner_bit;
-
- for (prod_index = hr_cq->cons_index; get_sw_cqe(hr_cq, prod_index);
- ++prod_index) {
- if (prod_index == hr_cq->cons_index + hr_cq->ib_cq.cqe)
- break;
- }
-
- /*
- * Now backwards through the CQ, removing CQ entries
- * that match our QP by overwriting them with next entries.
- */
- while ((int) --prod_index - (int) hr_cq->cons_index >= 0) {
- cqe = get_cqe(hr_cq, prod_index & hr_cq->ib_cq.cqe);
- if ((roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M,
- CQE_BYTE_16_LOCAL_QPN_S) &
- HNS_ROCE_CQE_QPN_MASK) == qpn) {
- /* In v1 engine, not support SRQ */
- ++nfreed;
- } else if (nfreed) {
- dest = get_cqe(hr_cq, (prod_index + nfreed) &
- hr_cq->ib_cq.cqe);
- owner_bit = roce_get_bit(dest->cqe_byte_4,
- CQE_BYTE_4_OWNER_S);
- memcpy(dest, cqe, sizeof(*cqe));
- roce_set_bit(dest->cqe_byte_4, CQE_BYTE_4_OWNER_S,
- owner_bit);
- }
- }
-
- if (nfreed) {
- hr_cq->cons_index += nfreed;
- hns_roce_v1_cq_set_ci(hr_cq, hr_cq->cons_index);
- }
-}
-
-static void hns_roce_v1_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
- struct hns_roce_srq *srq)
-{
- spin_lock_irq(&hr_cq->lock);
- __hns_roce_v1_cq_clean(hr_cq, qpn, srq);
- spin_unlock_irq(&hr_cq->lock);
-}
-
-static void hns_roce_v1_write_cqc(struct hns_roce_dev *hr_dev,
- struct hns_roce_cq *hr_cq, void *mb_buf,
- u64 *mtts, dma_addr_t dma_handle)
-{
- struct hns_roce_v1_priv *priv = hr_dev->priv;
- struct hns_roce_buf_list *tptr_buf = &priv->tptr_table.tptr_buf;
- struct hns_roce_cq_context *cq_context = mb_buf;
- dma_addr_t tptr_dma_addr;
- int offset;
-
- memset(cq_context, 0, sizeof(*cq_context));
-
- /* Get the tptr for this CQ. */
- offset = hr_cq->cqn * HNS_ROCE_V1_TPTR_ENTRY_SIZE;
- tptr_dma_addr = tptr_buf->map + offset;
- hr_cq->tptr_addr = (u16 *)(tptr_buf->buf + offset);
-
- /* Register cq_context members */
- roce_set_field(cq_context->cqc_byte_4,
- CQ_CONTEXT_CQC_BYTE_4_CQC_STATE_M,
- CQ_CONTEXT_CQC_BYTE_4_CQC_STATE_S, CQ_STATE_VALID);
- roce_set_field(cq_context->cqc_byte_4, CQ_CONTEXT_CQC_BYTE_4_CQN_M,
- CQ_CONTEXT_CQC_BYTE_4_CQN_S, hr_cq->cqn);
-
- cq_context->cq_bt_l = cpu_to_le32((u32)dma_handle);
-
- roce_set_field(cq_context->cqc_byte_12,
- CQ_CONTEXT_CQC_BYTE_12_CQ_BT_H_M,
- CQ_CONTEXT_CQC_BYTE_12_CQ_BT_H_S,
- ((u64)dma_handle >> 32));
- roce_set_field(cq_context->cqc_byte_12,
- CQ_CONTEXT_CQC_BYTE_12_CQ_CQE_SHIFT_M,
- CQ_CONTEXT_CQC_BYTE_12_CQ_CQE_SHIFT_S,
- ilog2(hr_cq->cq_depth));
- roce_set_field(cq_context->cqc_byte_12, CQ_CONTEXT_CQC_BYTE_12_CEQN_M,
- CQ_CONTEXT_CQC_BYTE_12_CEQN_S, hr_cq->vector);
-
- cq_context->cur_cqe_ba0_l = cpu_to_le32((u32)(mtts[0]));
-
- roce_set_field(cq_context->cqc_byte_20,
- CQ_CONTEXT_CQC_BYTE_20_CUR_CQE_BA0_H_M,
- CQ_CONTEXT_CQC_BYTE_20_CUR_CQE_BA0_H_S, (mtts[0]) >> 32);
- /* Dedicated hardware, directly set 0 */
- roce_set_field(cq_context->cqc_byte_20,
- CQ_CONTEXT_CQC_BYTE_20_CQ_CUR_INDEX_M,
- CQ_CONTEXT_CQC_BYTE_20_CQ_CUR_INDEX_S, 0);
- /**
- * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of
- * using 4K page, and shift more 32 because of
- * calculating the high 32 bit value evaluated to hardware.
- */
- roce_set_field(cq_context->cqc_byte_20,
- CQ_CONTEXT_CQC_BYTE_20_CQE_TPTR_ADDR_H_M,
- CQ_CONTEXT_CQC_BYTE_20_CQE_TPTR_ADDR_H_S,
- tptr_dma_addr >> 44);
-
- cq_context->cqe_tptr_addr_l = cpu_to_le32((u32)(tptr_dma_addr >> 12));
-
- roce_set_field(cq_context->cqc_byte_32,
- CQ_CONTEXT_CQC_BYTE_32_CUR_CQE_BA1_H_M,
- CQ_CONTEXT_CQC_BYTE_32_CUR_CQE_BA1_H_S, 0);
- roce_set_bit(cq_context->cqc_byte_32,
- CQ_CONTEXT_CQC_BYTE_32_SE_FLAG_S, 0);
- roce_set_bit(cq_context->cqc_byte_32,
- CQ_CONTEXT_CQC_BYTE_32_CE_FLAG_S, 0);
- roce_set_bit(cq_context->cqc_byte_32,
- CQ_CONTEXT_CQC_BYTE_32_NOTIFICATION_FLAG_S, 0);
- roce_set_bit(cq_context->cqc_byte_32,
- CQ_CQNTEXT_CQC_BYTE_32_TYPE_OF_COMPLETION_NOTIFICATION_S,
- 0);
- /* The initial value of cq's ci is 0 */
- roce_set_field(cq_context->cqc_byte_32,
- CQ_CONTEXT_CQC_BYTE_32_CQ_CONS_IDX_M,
- CQ_CONTEXT_CQC_BYTE_32_CQ_CONS_IDX_S, 0);
-}
-
-static int hns_roce_v1_req_notify_cq(struct ib_cq *ibcq,
- enum ib_cq_notify_flags flags)
-{
- struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
- u32 notification_flag;
- __le32 doorbell[2] = {};
-
- notification_flag = (flags & IB_CQ_SOLICITED_MASK) ==
- IB_CQ_SOLICITED ? CQ_DB_REQ_NOT : CQ_DB_REQ_NOT_SOL;
- /*
- * flags = 0; Notification Flag = 1, next
- * flags = 1; Notification Flag = 0, solocited
- */
- doorbell[0] =
- cpu_to_le32(hr_cq->cons_index & ((hr_cq->cq_depth << 1) - 1));
- roce_set_bit(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_HW_SYNS_S, 1);
- roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_M,
- ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_S, 3);
- roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_M,
- ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_S, 1);
- roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_M,
- ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_S,
- hr_cq->cqn | notification_flag);
-
- hns_roce_write64_k(doorbell, hr_cq->db_reg);
-
- return 0;
-}
-
-static int hns_roce_v1_poll_one(struct hns_roce_cq *hr_cq,
- struct hns_roce_qp **cur_qp, struct ib_wc *wc)
-{
- int qpn;
- int is_send;
- u16 wqe_ctr;
- u32 status;
- u32 opcode;
- struct hns_roce_cqe *cqe;
- struct hns_roce_qp *hr_qp;
- struct hns_roce_wq *wq;
- struct hns_roce_wqe_ctrl_seg *sq_wqe;
- struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device);
- struct device *dev = &hr_dev->pdev->dev;
-
- /* Find cqe according consumer index */
- cqe = next_cqe_sw(hr_cq);
- if (!cqe)
- return -EAGAIN;
-
- ++hr_cq->cons_index;
- /* Memory barrier */
- rmb();
- /* 0->SQ, 1->RQ */
- is_send = !(roce_get_bit(cqe->cqe_byte_4, CQE_BYTE_4_SQ_RQ_FLAG_S));
-
- /* Local_qpn in UD cqe is always 1, so it needs to compute new qpn */
- if (roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M,
- CQE_BYTE_16_LOCAL_QPN_S) <= 1) {
- qpn = roce_get_field(cqe->cqe_byte_20, CQE_BYTE_20_PORT_NUM_M,
- CQE_BYTE_20_PORT_NUM_S) +
- roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M,
- CQE_BYTE_16_LOCAL_QPN_S) *
- HNS_ROCE_MAX_PORTS;
- } else {
- qpn = roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M,
- CQE_BYTE_16_LOCAL_QPN_S);
- }
-
- if (!*cur_qp || (qpn & HNS_ROCE_CQE_QPN_MASK) != (*cur_qp)->qpn) {
- hr_qp = __hns_roce_qp_lookup(hr_dev, qpn);
- if (unlikely(!hr_qp)) {
- dev_err(dev, "CQ %06lx with entry for unknown QPN %06x\n",
- hr_cq->cqn, (qpn & HNS_ROCE_CQE_QPN_MASK));
- return -EINVAL;
- }
-
- *cur_qp = hr_qp;
- }
-
- wc->qp = &(*cur_qp)->ibqp;
- wc->vendor_err = 0;
-
- status = roce_get_field(cqe->cqe_byte_4,
- CQE_BYTE_4_STATUS_OF_THE_OPERATION_M,
- CQE_BYTE_4_STATUS_OF_THE_OPERATION_S) &
- HNS_ROCE_CQE_STATUS_MASK;
- switch (status) {
- case HNS_ROCE_CQE_SUCCESS:
- wc->status = IB_WC_SUCCESS;
- break;
- case HNS_ROCE_CQE_SYNDROME_LOCAL_LENGTH_ERR:
- wc->status = IB_WC_LOC_LEN_ERR;
- break;
- case HNS_ROCE_CQE_SYNDROME_LOCAL_QP_OP_ERR:
- wc->status = IB_WC_LOC_QP_OP_ERR;
- break;
- case HNS_ROCE_CQE_SYNDROME_LOCAL_PROT_ERR:
- wc->status = IB_WC_LOC_PROT_ERR;
- break;
- case HNS_ROCE_CQE_SYNDROME_WR_FLUSH_ERR:
- wc->status = IB_WC_WR_FLUSH_ERR;
- break;
- case HNS_ROCE_CQE_SYNDROME_MEM_MANAGE_OPERATE_ERR:
- wc->status = IB_WC_MW_BIND_ERR;
- break;
- case HNS_ROCE_CQE_SYNDROME_BAD_RESP_ERR:
- wc->status = IB_WC_BAD_RESP_ERR;
- break;
- case HNS_ROCE_CQE_SYNDROME_LOCAL_ACCESS_ERR:
- wc->status = IB_WC_LOC_ACCESS_ERR;
- break;
- case HNS_ROCE_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR:
- wc->status = IB_WC_REM_INV_REQ_ERR;
- break;
- case HNS_ROCE_CQE_SYNDROME_REMOTE_ACCESS_ERR:
- wc->status = IB_WC_REM_ACCESS_ERR;
- break;
- case HNS_ROCE_CQE_SYNDROME_REMOTE_OP_ERR:
- wc->status = IB_WC_REM_OP_ERR;
- break;
- case HNS_ROCE_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR:
- wc->status = IB_WC_RETRY_EXC_ERR;
- break;
- case HNS_ROCE_CQE_SYNDROME_RNR_RETRY_EXC_ERR:
- wc->status = IB_WC_RNR_RETRY_EXC_ERR;
- break;
- default:
- wc->status = IB_WC_GENERAL_ERR;
- break;
- }
-
- /* CQE status error, directly return */
- if (wc->status != IB_WC_SUCCESS)
- return 0;
-
- if (is_send) {
- /* SQ conrespond to CQE */
- sq_wqe = hns_roce_get_send_wqe(*cur_qp,
- roce_get_field(cqe->cqe_byte_4,
- CQE_BYTE_4_WQE_INDEX_M,
- CQE_BYTE_4_WQE_INDEX_S) &
- ((*cur_qp)->sq.wqe_cnt-1));
- switch (le32_to_cpu(sq_wqe->flag) & HNS_ROCE_WQE_OPCODE_MASK) {
- case HNS_ROCE_WQE_OPCODE_SEND:
- wc->opcode = IB_WC_SEND;
- break;
- case HNS_ROCE_WQE_OPCODE_RDMA_READ:
- wc->opcode = IB_WC_RDMA_READ;
- wc->byte_len = le32_to_cpu(cqe->byte_cnt);
- break;
- case HNS_ROCE_WQE_OPCODE_RDMA_WRITE:
- wc->opcode = IB_WC_RDMA_WRITE;
- break;
- case HNS_ROCE_WQE_OPCODE_LOCAL_INV:
- wc->opcode = IB_WC_LOCAL_INV;
- break;
- case HNS_ROCE_WQE_OPCODE_UD_SEND:
- wc->opcode = IB_WC_SEND;
- break;
- default:
- wc->status = IB_WC_GENERAL_ERR;
- break;
- }
- wc->wc_flags = (le32_to_cpu(sq_wqe->flag) & HNS_ROCE_WQE_IMM ?
- IB_WC_WITH_IMM : 0);
-
- wq = &(*cur_qp)->sq;
- if ((*cur_qp)->sq_signal_bits) {
- /*
- * If sg_signal_bit is 1,
- * firstly tail pointer updated to wqe
- * which current cqe correspond to
- */
- wqe_ctr = (u16)roce_get_field(cqe->cqe_byte_4,
- CQE_BYTE_4_WQE_INDEX_M,
- CQE_BYTE_4_WQE_INDEX_S);
- wq->tail += (wqe_ctr - (u16)wq->tail) &
- (wq->wqe_cnt - 1);
- }
- wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
- ++wq->tail;
- } else {
- /* RQ conrespond to CQE */
- wc->byte_len = le32_to_cpu(cqe->byte_cnt);
- opcode = roce_get_field(cqe->cqe_byte_4,
- CQE_BYTE_4_OPERATION_TYPE_M,
- CQE_BYTE_4_OPERATION_TYPE_S) &
- HNS_ROCE_CQE_OPCODE_MASK;
- switch (opcode) {
- case HNS_ROCE_OPCODE_RDMA_WITH_IMM_RECEIVE:
- wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
- wc->wc_flags = IB_WC_WITH_IMM;
- wc->ex.imm_data =
- cpu_to_be32(le32_to_cpu(cqe->immediate_data));
- break;
- case HNS_ROCE_OPCODE_SEND_DATA_RECEIVE:
- if (roce_get_bit(cqe->cqe_byte_4,
- CQE_BYTE_4_IMM_INDICATOR_S)) {
- wc->opcode = IB_WC_RECV;
- wc->wc_flags = IB_WC_WITH_IMM;
- wc->ex.imm_data = cpu_to_be32(
- le32_to_cpu(cqe->immediate_data));
- } else {
- wc->opcode = IB_WC_RECV;
- wc->wc_flags = 0;
- }
- break;
- default:
- wc->status = IB_WC_GENERAL_ERR;
- break;
- }
-
- /* Update tail pointer, record wr_id */
- wq = &(*cur_qp)->rq;
- wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
- ++wq->tail;
- wc->sl = (u8)roce_get_field(cqe->cqe_byte_20, CQE_BYTE_20_SL_M,
- CQE_BYTE_20_SL_S);
- wc->src_qp = (u8)roce_get_field(cqe->cqe_byte_20,
- CQE_BYTE_20_REMOTE_QPN_M,
- CQE_BYTE_20_REMOTE_QPN_S);
- wc->wc_flags |= (roce_get_bit(cqe->cqe_byte_20,
- CQE_BYTE_20_GRH_PRESENT_S) ?
- IB_WC_GRH : 0);
- wc->pkey_index = (u16)roce_get_field(cqe->cqe_byte_28,
- CQE_BYTE_28_P_KEY_IDX_M,
- CQE_BYTE_28_P_KEY_IDX_S);
- }
-
- return 0;
-}
-
-int hns_roce_v1_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
-{
- struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
- struct hns_roce_qp *cur_qp = NULL;
- unsigned long flags;
- int npolled;
- int ret;
-
- spin_lock_irqsave(&hr_cq->lock, flags);
-
- for (npolled = 0; npolled < num_entries; ++npolled) {
- ret = hns_roce_v1_poll_one(hr_cq, &cur_qp, wc + npolled);
- if (ret)
- break;
- }
-
- if (npolled) {
- *hr_cq->tptr_addr = hr_cq->cons_index &
- ((hr_cq->cq_depth << 1) - 1);
-
- hns_roce_v1_cq_set_ci(hr_cq, hr_cq->cons_index);
- }
-
- spin_unlock_irqrestore(&hr_cq->lock, flags);
-
- if (ret == 0 || ret == -EAGAIN)
- return npolled;
- else
- return ret;
-}
-
-static int hns_roce_v1_clear_hem(struct hns_roce_dev *hr_dev,
- struct hns_roce_hem_table *table, int obj,
- int step_idx)
-{
- struct hns_roce_v1_priv *priv = hr_dev->priv;
- struct device *dev = &hr_dev->pdev->dev;
- long end = HW_SYNC_TIMEOUT_MSECS;
- __le32 bt_cmd_val[2] = {0};
- unsigned long flags = 0;
- void __iomem *bt_cmd;
- u64 bt_ba = 0;
-
- switch (table->type) {
- case HEM_TYPE_QPC:
- bt_ba = priv->bt_table.qpc_buf.map >> 12;
- break;
- case HEM_TYPE_MTPT:
- bt_ba = priv->bt_table.mtpt_buf.map >> 12;
- break;
- case HEM_TYPE_CQC:
- bt_ba = priv->bt_table.cqc_buf.map >> 12;
- break;
- case HEM_TYPE_SRQC:
- dev_dbg(dev, "HEM_TYPE_SRQC not support.\n");
- return -EINVAL;
- default:
- return 0;
- }
- roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
- ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, table->type);
- roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_M,
- ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S, obj);
- roce_set_bit(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_S, 0);
- roce_set_bit(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_HW_SYNS_S, 1);
-
- spin_lock_irqsave(&hr_dev->bt_cmd_lock, flags);
-
- bt_cmd = hr_dev->reg_base + ROCEE_BT_CMD_H_REG;
-
- while (1) {
- if (readl(bt_cmd) >> BT_CMD_SYNC_SHIFT) {
- if (!end) {
- dev_err(dev, "Write bt_cmd err,hw_sync is not zero.\n");
- spin_unlock_irqrestore(&hr_dev->bt_cmd_lock,
- flags);
- return -EBUSY;
- }
- } else {
- break;
- }
- mdelay(HW_SYNC_SLEEP_TIME_INTERVAL);
- end -= HW_SYNC_SLEEP_TIME_INTERVAL;
- }
-
- bt_cmd_val[0] = cpu_to_le32(bt_ba);
- roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_M,
- ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S, bt_ba >> 32);
- hns_roce_write64_k(bt_cmd_val, hr_dev->reg_base + ROCEE_BT_CMD_L_REG);
-
- spin_unlock_irqrestore(&hr_dev->bt_cmd_lock, flags);
-
- return 0;
-}
-
-static int hns_roce_v1_qp_modify(struct hns_roce_dev *hr_dev,
- enum hns_roce_qp_state cur_state,
- enum hns_roce_qp_state new_state,
- struct hns_roce_qp_context *context,
- struct hns_roce_qp *hr_qp)
-{
- static const u16
- op[HNS_ROCE_QP_NUM_STATE][HNS_ROCE_QP_NUM_STATE] = {
- [HNS_ROCE_QP_STATE_RST] = {
- [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
- [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
- [HNS_ROCE_QP_STATE_INIT] = HNS_ROCE_CMD_RST2INIT_QP,
- },
- [HNS_ROCE_QP_STATE_INIT] = {
- [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
- [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
- /* Note: In v1 engine, HW doesn't support RST2INIT.
- * We use RST2INIT cmd instead of INIT2INIT.
- */
- [HNS_ROCE_QP_STATE_INIT] = HNS_ROCE_CMD_RST2INIT_QP,
- [HNS_ROCE_QP_STATE_RTR] = HNS_ROCE_CMD_INIT2RTR_QP,
- },
- [HNS_ROCE_QP_STATE_RTR] = {
- [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
- [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
- [HNS_ROCE_QP_STATE_RTS] = HNS_ROCE_CMD_RTR2RTS_QP,
- },
- [HNS_ROCE_QP_STATE_RTS] = {
- [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
- [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
- [HNS_ROCE_QP_STATE_RTS] = HNS_ROCE_CMD_RTS2RTS_QP,
- [HNS_ROCE_QP_STATE_SQD] = HNS_ROCE_CMD_RTS2SQD_QP,
- },
- [HNS_ROCE_QP_STATE_SQD] = {
- [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
- [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
- [HNS_ROCE_QP_STATE_RTS] = HNS_ROCE_CMD_SQD2RTS_QP,
- [HNS_ROCE_QP_STATE_SQD] = HNS_ROCE_CMD_SQD2SQD_QP,
- },
- [HNS_ROCE_QP_STATE_ERR] = {
- [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP,
- [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP,
- }
- };
-
- struct hns_roce_cmd_mailbox *mailbox;
- struct device *dev = &hr_dev->pdev->dev;
- int ret;
-
- if (cur_state >= HNS_ROCE_QP_NUM_STATE ||
- new_state >= HNS_ROCE_QP_NUM_STATE ||
- !op[cur_state][new_state]) {
- dev_err(dev, "[modify_qp]not support state %d to %d\n",
- cur_state, new_state);
- return -EINVAL;
- }
-
- if (op[cur_state][new_state] == HNS_ROCE_CMD_2RST_QP)
- return hns_roce_cmd_mbox(hr_dev, 0, 0, hr_qp->qpn, 2,
- HNS_ROCE_CMD_2RST_QP,
- HNS_ROCE_CMD_TIMEOUT_MSECS);
-
- if (op[cur_state][new_state] == HNS_ROCE_CMD_2ERR_QP)
- return hns_roce_cmd_mbox(hr_dev, 0, 0, hr_qp->qpn, 2,
- HNS_ROCE_CMD_2ERR_QP,
- HNS_ROCE_CMD_TIMEOUT_MSECS);
-
- mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
- if (IS_ERR(mailbox))
- return PTR_ERR(mailbox);
-
- memcpy(mailbox->buf, context, sizeof(*context));
-
- ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_qp->qpn, 0,
- op[cur_state][new_state],
- HNS_ROCE_CMD_TIMEOUT_MSECS);
-
- hns_roce_free_cmd_mailbox(hr_dev, mailbox);
- return ret;
-}
-
-static int find_wqe_mtt(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
- u64 *sq_ba, u64 *rq_ba, dma_addr_t *bt_ba)
-{
- struct ib_device *ibdev = &hr_dev->ib_dev;
- int count;
-
- count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, 0, sq_ba, 1, bt_ba);
- if (count < 1) {
- ibdev_err(ibdev, "Failed to find SQ ba\n");
- return -ENOBUFS;
- }
-
- count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, hr_qp->rq.offset, rq_ba,
- 1, NULL);
- if (!count) {
- ibdev_err(ibdev, "Failed to find RQ ba\n");
- return -ENOBUFS;
- }
-
- return 0;
-}
-
-static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
- int attr_mask, enum ib_qp_state cur_state,
- enum ib_qp_state new_state)
-{
- struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
- struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
- struct hns_roce_sqp_context *context;
- dma_addr_t dma_handle = 0;
- u32 __iomem *addr;
- u64 sq_ba = 0;
- u64 rq_ba = 0;
- __le32 tmp;
- u32 reg_val;
-
- context = kzalloc(sizeof(*context), GFP_KERNEL);
- if (!context)
- return -ENOMEM;
-
- /* Search QP buf's MTTs */
- if (find_wqe_mtt(hr_dev, hr_qp, &sq_ba, &rq_ba, &dma_handle))
- goto out;
-
- if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
- roce_set_field(context->qp1c_bytes_4,
- QP1C_BYTES_4_SQ_WQE_SHIFT_M,
- QP1C_BYTES_4_SQ_WQE_SHIFT_S,
- ilog2((unsigned int)hr_qp->sq.wqe_cnt));
- roce_set_field(context->qp1c_bytes_4,
- QP1C_BYTES_4_RQ_WQE_SHIFT_M,
- QP1C_BYTES_4_RQ_WQE_SHIFT_S,
- ilog2((unsigned int)hr_qp->rq.wqe_cnt));
- roce_set_field(context->qp1c_bytes_4, QP1C_BYTES_4_PD_M,
- QP1C_BYTES_4_PD_S, to_hr_pd(ibqp->pd)->pdn);
-
- context->sq_rq_bt_l = cpu_to_le32(dma_handle);
- roce_set_field(context->qp1c_bytes_12,
- QP1C_BYTES_12_SQ_RQ_BT_H_M,
- QP1C_BYTES_12_SQ_RQ_BT_H_S,
- upper_32_bits(dma_handle));
-
- roce_set_field(context->qp1c_bytes_16, QP1C_BYTES_16_RQ_HEAD_M,
- QP1C_BYTES_16_RQ_HEAD_S, hr_qp->rq.head);
- roce_set_field(context->qp1c_bytes_16, QP1C_BYTES_16_PORT_NUM_M,
- QP1C_BYTES_16_PORT_NUM_S, hr_qp->phy_port);
- roce_set_bit(context->qp1c_bytes_16,
- QP1C_BYTES_16_SIGNALING_TYPE_S,
- hr_qp->sq_signal_bits);
- roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_RQ_BA_FLG_S,
- 1);
- roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_SQ_BA_FLG_S,
- 1);
- roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_QP1_ERR_S,
- 0);
-
- roce_set_field(context->qp1c_bytes_20, QP1C_BYTES_20_SQ_HEAD_M,
- QP1C_BYTES_20_SQ_HEAD_S, hr_qp->sq.head);
- roce_set_field(context->qp1c_bytes_20, QP1C_BYTES_20_PKEY_IDX_M,
- QP1C_BYTES_20_PKEY_IDX_S, attr->pkey_index);
-
- context->cur_rq_wqe_ba_l = cpu_to_le32(rq_ba);
-
- roce_set_field(context->qp1c_bytes_28,
- QP1C_BYTES_28_CUR_RQ_WQE_BA_H_M,
- QP1C_BYTES_28_CUR_RQ_WQE_BA_H_S,
- upper_32_bits(rq_ba));
- roce_set_field(context->qp1c_bytes_28,
- QP1C_BYTES_28_RQ_CUR_IDX_M,
- QP1C_BYTES_28_RQ_CUR_IDX_S, 0);
-
- roce_set_field(context->qp1c_bytes_32,
- QP1C_BYTES_32_RX_CQ_NUM_M,
- QP1C_BYTES_32_RX_CQ_NUM_S,
- to_hr_cq(ibqp->recv_cq)->cqn);
- roce_set_field(context->qp1c_bytes_32,
- QP1C_BYTES_32_TX_CQ_NUM_M,
- QP1C_BYTES_32_TX_CQ_NUM_S,
- to_hr_cq(ibqp->send_cq)->cqn);
-
- context->cur_sq_wqe_ba_l = cpu_to_le32(sq_ba);
-
- roce_set_field(context->qp1c_bytes_40,
- QP1C_BYTES_40_CUR_SQ_WQE_BA_H_M,
- QP1C_BYTES_40_CUR_SQ_WQE_BA_H_S,
- upper_32_bits(sq_ba));
- roce_set_field(context->qp1c_bytes_40,
- QP1C_BYTES_40_SQ_CUR_IDX_M,
- QP1C_BYTES_40_SQ_CUR_IDX_S, 0);
-
- /* Copy context to QP1C register */
- addr = (u32 __iomem *)(hr_dev->reg_base +
- ROCEE_QP1C_CFG0_0_REG +
- hr_qp->phy_port * sizeof(*context));
-
- writel(le32_to_cpu(context->qp1c_bytes_4), addr);
- writel(le32_to_cpu(context->sq_rq_bt_l), addr + 1);
- writel(le32_to_cpu(context->qp1c_bytes_12), addr + 2);
- writel(le32_to_cpu(context->qp1c_bytes_16), addr + 3);
- writel(le32_to_cpu(context->qp1c_bytes_20), addr + 4);
- writel(le32_to_cpu(context->cur_rq_wqe_ba_l), addr + 5);
- writel(le32_to_cpu(context->qp1c_bytes_28), addr + 6);
- writel(le32_to_cpu(context->qp1c_bytes_32), addr + 7);
- writel(le32_to_cpu(context->cur_sq_wqe_ba_l), addr + 8);
- writel(le32_to_cpu(context->qp1c_bytes_40), addr + 9);
- }
-
- /* Modify QP1C status */
- reg_val = roce_read(hr_dev, ROCEE_QP1C_CFG0_0_REG +
- hr_qp->phy_port * sizeof(*context));
- tmp = cpu_to_le32(reg_val);
- roce_set_field(tmp, ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_M,
- ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_S, new_state);
- reg_val = le32_to_cpu(tmp);
- roce_write(hr_dev, ROCEE_QP1C_CFG0_0_REG +
- hr_qp->phy_port * sizeof(*context), reg_val);
-
- hr_qp->state = new_state;
- if (new_state == IB_QPS_RESET) {
- hns_roce_v1_cq_clean(to_hr_cq(ibqp->recv_cq), hr_qp->qpn,
- ibqp->srq ? to_hr_srq(ibqp->srq) : NULL);
- if (ibqp->send_cq != ibqp->recv_cq)
- hns_roce_v1_cq_clean(to_hr_cq(ibqp->send_cq),
- hr_qp->qpn, NULL);
-
- hr_qp->rq.head = 0;
- hr_qp->rq.tail = 0;
- hr_qp->sq.head = 0;
- hr_qp->sq.tail = 0;
- }
-
- kfree(context);
- return 0;
-
-out:
- kfree(context);
- return -EINVAL;
-}
-
-static bool check_qp_state(enum ib_qp_state cur_state,
- enum ib_qp_state new_state)
-{
- static const bool sm[][IB_QPS_ERR + 1] = {
- [IB_QPS_RESET] = { [IB_QPS_RESET] = true,
- [IB_QPS_INIT] = true },
- [IB_QPS_INIT] = { [IB_QPS_RESET] = true,
- [IB_QPS_INIT] = true,
- [IB_QPS_RTR] = true,
- [IB_QPS_ERR] = true },
- [IB_QPS_RTR] = { [IB_QPS_RESET] = true,
- [IB_QPS_RTS] = true,
- [IB_QPS_ERR] = true },
- [IB_QPS_RTS] = { [IB_QPS_RESET] = true, [IB_QPS_ERR] = true },
- [IB_QPS_SQD] = {},
- [IB_QPS_SQE] = {},
- [IB_QPS_ERR] = { [IB_QPS_RESET] = true, [IB_QPS_ERR] = true }
- };
-
- return sm[cur_state][new_state];
-}
-
-static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
- int attr_mask, enum ib_qp_state cur_state,
- enum ib_qp_state new_state)
-{
- struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
- struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
- struct device *dev = &hr_dev->pdev->dev;
- struct hns_roce_qp_context *context;
- const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
- dma_addr_t dma_handle_2 = 0;
- dma_addr_t dma_handle = 0;
- __le32 doorbell[2] = {0};
- u64 *mtts_2 = NULL;
- int ret = -EINVAL;
- const u8 *smac;
- u64 sq_ba = 0;
- u64 rq_ba = 0;
- u32 port;
- u32 port_num;
- u8 *dmac;
-
- if (!check_qp_state(cur_state, new_state)) {
- ibdev_err(ibqp->device,
- "not support QP(%u) status from %d to %d\n",
- ibqp->qp_num, cur_state, new_state);
- return -EINVAL;
- }
-
- context = kzalloc(sizeof(*context), GFP_KERNEL);
- if (!context)
- return -ENOMEM;
-
- /* Search qp buf's mtts */
- if (find_wqe_mtt(hr_dev, hr_qp, &sq_ba, &rq_ba, &dma_handle))
- goto out;
-
- /* Search IRRL's mtts */
- mtts_2 = hns_roce_table_find(hr_dev, &hr_dev->qp_table.irrl_table,
- hr_qp->qpn, &dma_handle_2);
- if (mtts_2 == NULL) {
- dev_err(dev, "qp irrl_table find failed\n");
- goto out;
- }
-
- /*
- * Reset to init
- * Mandatory param:
- * IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT | IB_QP_ACCESS_FLAGS
- * Optional param: NA
- */
- if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) {
- roce_set_field(context->qpc_bytes_4,
- QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_M,
- QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_S,
- to_hr_qp_type(hr_qp->ibqp.qp_type));
-
- roce_set_bit(context->qpc_bytes_4,
- QP_CONTEXT_QPC_BYTE_4_ENABLE_FPMR_S, 0);
- roce_set_bit(context->qpc_bytes_4,
- QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S,
- !!(attr->qp_access_flags & IB_ACCESS_REMOTE_READ));
- roce_set_bit(context->qpc_bytes_4,
- QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S,
- !!(attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE)
- );
- roce_set_bit(context->qpc_bytes_4,
- QP_CONTEXT_QPC_BYTE_4_ATOMIC_OPERATION_ENABLE_S,
- !!(attr->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)
- );
- roce_set_bit(context->qpc_bytes_4,
- QP_CONTEXT_QPC_BYTE_4_RDMAR_USE_S, 1);
- roce_set_field(context->qpc_bytes_4,
- QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_M,
- QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_S,
- ilog2((unsigned int)hr_qp->sq.wqe_cnt));
- roce_set_field(context->qpc_bytes_4,
- QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_M,
- QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_S,
- ilog2((unsigned int)hr_qp->rq.wqe_cnt));
- roce_set_field(context->qpc_bytes_4,
- QP_CONTEXT_QPC_BYTES_4_PD_M,
- QP_CONTEXT_QPC_BYTES_4_PD_S,
- to_hr_pd(ibqp->pd)->pdn);
- hr_qp->access_flags = attr->qp_access_flags;
- roce_set_field(context->qpc_bytes_8,
- QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_M,
- QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_S,
- to_hr_cq(ibqp->send_cq)->cqn);
- roce_set_field(context->qpc_bytes_8,
- QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_M,
- QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_S,
- to_hr_cq(ibqp->recv_cq)->cqn);
-
- if (ibqp->srq)
- roce_set_field(context->qpc_bytes_12,
- QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_M,
- QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_S,
- to_hr_srq(ibqp->srq)->srqn);
-
- roce_set_field(context->qpc_bytes_12,
- QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M,
- QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S,
- attr->pkey_index);
- hr_qp->pkey_index = attr->pkey_index;
- roce_set_field(context->qpc_bytes_16,
- QP_CONTEXT_QPC_BYTES_16_QP_NUM_M,
- QP_CONTEXT_QPC_BYTES_16_QP_NUM_S, hr_qp->qpn);
- } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
- roce_set_field(context->qpc_bytes_4,
- QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_M,
- QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_S,
- to_hr_qp_type(hr_qp->ibqp.qp_type));
- roce_set_bit(context->qpc_bytes_4,
- QP_CONTEXT_QPC_BYTE_4_ENABLE_FPMR_S, 0);
- if (attr_mask & IB_QP_ACCESS_FLAGS) {
- roce_set_bit(context->qpc_bytes_4,
- QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S,
- !!(attr->qp_access_flags &
- IB_ACCESS_REMOTE_READ));
- roce_set_bit(context->qpc_bytes_4,
- QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S,
- !!(attr->qp_access_flags &
- IB_ACCESS_REMOTE_WRITE));
- } else {
- roce_set_bit(context->qpc_bytes_4,
- QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S,
- !!(hr_qp->access_flags &
- IB_ACCESS_REMOTE_READ));
- roce_set_bit(context->qpc_bytes_4,
- QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S,
- !!(hr_qp->access_flags &
- IB_ACCESS_REMOTE_WRITE));
- }
-
- roce_set_bit(context->qpc_bytes_4,
- QP_CONTEXT_QPC_BYTE_4_RDMAR_USE_S, 1);
- roce_set_field(context->qpc_bytes_4,
- QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_M,
- QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_S,
- ilog2((unsigned int)hr_qp->sq.wqe_cnt));
- roce_set_field(context->qpc_bytes_4,
- QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_M,
- QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_S,
- ilog2((unsigned int)hr_qp->rq.wqe_cnt));
- roce_set_field(context->qpc_bytes_4,
- QP_CONTEXT_QPC_BYTES_4_PD_M,
- QP_CONTEXT_QPC_BYTES_4_PD_S,
- to_hr_pd(ibqp->pd)->pdn);
-
- roce_set_field(context->qpc_bytes_8,
- QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_M,
- QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_S,
- to_hr_cq(ibqp->send_cq)->cqn);
- roce_set_field(context->qpc_bytes_8,
- QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_M,
- QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_S,
- to_hr_cq(ibqp->recv_cq)->cqn);
-
- if (ibqp->srq)
- roce_set_field(context->qpc_bytes_12,
- QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_M,
- QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_S,
- to_hr_srq(ibqp->srq)->srqn);
- if (attr_mask & IB_QP_PKEY_INDEX)
- roce_set_field(context->qpc_bytes_12,
- QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M,
- QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S,
- attr->pkey_index);
- else
- roce_set_field(context->qpc_bytes_12,
- QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M,
- QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S,
- hr_qp->pkey_index);
-
- roce_set_field(context->qpc_bytes_16,
- QP_CONTEXT_QPC_BYTES_16_QP_NUM_M,
- QP_CONTEXT_QPC_BYTES_16_QP_NUM_S, hr_qp->qpn);
- } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
- if ((attr_mask & IB_QP_ALT_PATH) ||
- (attr_mask & IB_QP_ACCESS_FLAGS) ||
- (attr_mask & IB_QP_PKEY_INDEX) ||
- (attr_mask & IB_QP_QKEY)) {
- dev_err(dev, "INIT2RTR attr_mask error\n");
- goto out;
- }
-
- dmac = (u8 *)attr->ah_attr.roce.dmac;
-
- context->sq_rq_bt_l = cpu_to_le32(dma_handle);
- roce_set_field(context->qpc_bytes_24,
- QP_CONTEXT_QPC_BYTES_24_SQ_RQ_BT_H_M,
- QP_CONTEXT_QPC_BYTES_24_SQ_RQ_BT_H_S,
- upper_32_bits(dma_handle));
- roce_set_bit(context->qpc_bytes_24,
- QP_CONTEXT_QPC_BYTE_24_REMOTE_ENABLE_E2E_CREDITS_S,
- 1);
- roce_set_field(context->qpc_bytes_24,
- QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_M,
- QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_S,
- attr->min_rnr_timer);
- context->irrl_ba_l = cpu_to_le32((u32)(dma_handle_2));
- roce_set_field(context->qpc_bytes_32,
- QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_M,
- QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_S,
- ((u32)(dma_handle_2 >> 32)) &
- QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_M);
- roce_set_field(context->qpc_bytes_32,
- QP_CONTEXT_QPC_BYTES_32_MIG_STATE_M,
- QP_CONTEXT_QPC_BYTES_32_MIG_STATE_S, 0);
- roce_set_bit(context->qpc_bytes_32,
- QP_CONTEXT_QPC_BYTE_32_LOCAL_ENABLE_E2E_CREDITS_S,
- 1);
- roce_set_bit(context->qpc_bytes_32,
- QP_CONTEXT_QPC_BYTE_32_SIGNALING_TYPE_S,
- hr_qp->sq_signal_bits);
-
- port = (attr_mask & IB_QP_PORT) ? (attr->port_num - 1) :
- hr_qp->port;
- smac = (const u8 *)hr_dev->dev_addr[port];
- /* when dmac equals smac or loop_idc is 1, it should loopback */
- if (ether_addr_equal_unaligned(dmac, smac) ||
- hr_dev->loop_idc == 0x1)
- roce_set_bit(context->qpc_bytes_32,
- QP_CONTEXT_QPC_BYTE_32_LOOPBACK_INDICATOR_S, 1);
-
- roce_set_bit(context->qpc_bytes_32,
- QP_CONTEXT_QPC_BYTE_32_GLOBAL_HEADER_S,
- rdma_ah_get_ah_flags(&attr->ah_attr));
- roce_set_field(context->qpc_bytes_32,
- QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_M,
- QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_S,
- ilog2((unsigned int)attr->max_dest_rd_atomic));
-
- if (attr_mask & IB_QP_DEST_QPN)
- roce_set_field(context->qpc_bytes_36,
- QP_CONTEXT_QPC_BYTES_36_DEST_QP_M,
- QP_CONTEXT_QPC_BYTES_36_DEST_QP_S,
- attr->dest_qp_num);
-
- /* Configure GID index */
- port_num = rdma_ah_get_port_num(&attr->ah_attr);
- roce_set_field(context->qpc_bytes_36,
- QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_M,
- QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_S,
- hns_get_gid_index(hr_dev,
- port_num - 1,
- grh->sgid_index));
-
- memcpy(&(context->dmac_l), dmac, 4);
-
- roce_set_field(context->qpc_bytes_44,
- QP_CONTEXT_QPC_BYTES_44_DMAC_H_M,
- QP_CONTEXT_QPC_BYTES_44_DMAC_H_S,
- *((u16 *)(&dmac[4])));
- roce_set_field(context->qpc_bytes_44,
- QP_CONTEXT_QPC_BYTES_44_MAXIMUM_STATIC_RATE_M,
- QP_CONTEXT_QPC_BYTES_44_MAXIMUM_STATIC_RATE_S,
- rdma_ah_get_static_rate(&attr->ah_attr));
- roce_set_field(context->qpc_bytes_44,
- QP_CONTEXT_QPC_BYTES_44_HOPLMT_M,
- QP_CONTEXT_QPC_BYTES_44_HOPLMT_S,
- grh->hop_limit);
-
- roce_set_field(context->qpc_bytes_48,
- QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_M,
- QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_S,
- grh->flow_label);
- roce_set_field(context->qpc_bytes_48,
- QP_CONTEXT_QPC_BYTES_48_TCLASS_M,
- QP_CONTEXT_QPC_BYTES_48_TCLASS_S,
- grh->traffic_class);
- roce_set_field(context->qpc_bytes_48,
- QP_CONTEXT_QPC_BYTES_48_MTU_M,
- QP_CONTEXT_QPC_BYTES_48_MTU_S, attr->path_mtu);
-
- memcpy(context->dgid, grh->dgid.raw,
- sizeof(grh->dgid.raw));
-
- dev_dbg(dev, "dmac:%x :%lx\n", context->dmac_l,
- roce_get_field(context->qpc_bytes_44,
- QP_CONTEXT_QPC_BYTES_44_DMAC_H_M,
- QP_CONTEXT_QPC_BYTES_44_DMAC_H_S));
-
- roce_set_field(context->qpc_bytes_68,
- QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_M,
- QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_S,
- hr_qp->rq.head);
- roce_set_field(context->qpc_bytes_68,
- QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_M,
- QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_S, 0);
-
- context->cur_rq_wqe_ba_l = cpu_to_le32(rq_ba);
-
- roce_set_field(context->qpc_bytes_76,
- QP_CONTEXT_QPC_BYTES_76_CUR_RQ_WQE_BA_H_M,
- QP_CONTEXT_QPC_BYTES_76_CUR_RQ_WQE_BA_H_S,
- upper_32_bits(rq_ba));
- roce_set_field(context->qpc_bytes_76,
- QP_CONTEXT_QPC_BYTES_76_RX_REQ_MSN_M,
- QP_CONTEXT_QPC_BYTES_76_RX_REQ_MSN_S, 0);
-
- context->rx_rnr_time = 0;
-
- roce_set_field(context->qpc_bytes_84,
- QP_CONTEXT_QPC_BYTES_84_LAST_ACK_PSN_M,
- QP_CONTEXT_QPC_BYTES_84_LAST_ACK_PSN_S,
- attr->rq_psn - 1);
- roce_set_field(context->qpc_bytes_84,
- QP_CONTEXT_QPC_BYTES_84_TRRL_HEAD_M,
- QP_CONTEXT_QPC_BYTES_84_TRRL_HEAD_S, 0);
-
- roce_set_field(context->qpc_bytes_88,
- QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_M,
- QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_S,
- attr->rq_psn);
- roce_set_bit(context->qpc_bytes_88,
- QP_CONTEXT_QPC_BYTES_88_RX_REQ_PSN_ERR_FLAG_S, 0);
- roce_set_bit(context->qpc_bytes_88,
- QP_CONTEXT_QPC_BYTES_88_RX_LAST_OPCODE_FLG_S, 0);
- roce_set_field(context->qpc_bytes_88,
- QP_CONTEXT_QPC_BYTES_88_RQ_REQ_LAST_OPERATION_TYPE_M,
- QP_CONTEXT_QPC_BYTES_88_RQ_REQ_LAST_OPERATION_TYPE_S,
- 0);
- roce_set_field(context->qpc_bytes_88,
- QP_CONTEXT_QPC_BYTES_88_RQ_REQ_RDMA_WR_FLAG_M,
- QP_CONTEXT_QPC_BYTES_88_RQ_REQ_RDMA_WR_FLAG_S,
- 0);
-
- context->dma_length = 0;
- context->r_key = 0;
- context->va_l = 0;
- context->va_h = 0;
-
- roce_set_field(context->qpc_bytes_108,
- QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_M,
- QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_S, 0);
- roce_set_bit(context->qpc_bytes_108,
- QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_FLG_S, 0);
- roce_set_bit(context->qpc_bytes_108,
- QP_CONTEXT_QPC_BYTES_108_TRRL_TDB_PSN_FLG_S, 0);
-
- roce_set_field(context->qpc_bytes_112,
- QP_CONTEXT_QPC_BYTES_112_TRRL_TDB_PSN_M,
- QP_CONTEXT_QPC_BYTES_112_TRRL_TDB_PSN_S, 0);
- roce_set_field(context->qpc_bytes_112,
- QP_CONTEXT_QPC_BYTES_112_TRRL_TAIL_M,
- QP_CONTEXT_QPC_BYTES_112_TRRL_TAIL_S, 0);
-
- /* For chip resp ack */
- roce_set_field(context->qpc_bytes_156,
- QP_CONTEXT_QPC_BYTES_156_PORT_NUM_M,
- QP_CONTEXT_QPC_BYTES_156_PORT_NUM_S,
- hr_qp->phy_port);
- roce_set_field(context->qpc_bytes_156,
- QP_CONTEXT_QPC_BYTES_156_SL_M,
- QP_CONTEXT_QPC_BYTES_156_SL_S,
- rdma_ah_get_sl(&attr->ah_attr));
- hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
- } else if (cur_state == IB_QPS_RTR && new_state == IB_QPS_RTS) {
- /* If exist optional param, return error */
- if ((attr_mask & IB_QP_ALT_PATH) ||
- (attr_mask & IB_QP_ACCESS_FLAGS) ||
- (attr_mask & IB_QP_QKEY) ||
- (attr_mask & IB_QP_PATH_MIG_STATE) ||
- (attr_mask & IB_QP_CUR_STATE) ||
- (attr_mask & IB_QP_MIN_RNR_TIMER)) {
- dev_err(dev, "RTR2RTS attr_mask error\n");
- goto out;
- }
-
- context->rx_cur_sq_wqe_ba_l = cpu_to_le32(sq_ba);
-
- roce_set_field(context->qpc_bytes_120,
- QP_CONTEXT_QPC_BYTES_120_RX_CUR_SQ_WQE_BA_H_M,
- QP_CONTEXT_QPC_BYTES_120_RX_CUR_SQ_WQE_BA_H_S,
- upper_32_bits(sq_ba));
-
- roce_set_field(context->qpc_bytes_124,
- QP_CONTEXT_QPC_BYTES_124_RX_ACK_MSN_M,
- QP_CONTEXT_QPC_BYTES_124_RX_ACK_MSN_S, 0);
- roce_set_field(context->qpc_bytes_124,
- QP_CONTEXT_QPC_BYTES_124_IRRL_MSG_IDX_M,
- QP_CONTEXT_QPC_BYTES_124_IRRL_MSG_IDX_S, 0);
-
- roce_set_field(context->qpc_bytes_128,
- QP_CONTEXT_QPC_BYTES_128_RX_ACK_EPSN_M,
- QP_CONTEXT_QPC_BYTES_128_RX_ACK_EPSN_S,
- attr->sq_psn);
- roce_set_bit(context->qpc_bytes_128,
- QP_CONTEXT_QPC_BYTES_128_RX_ACK_PSN_ERR_FLG_S, 0);
- roce_set_field(context->qpc_bytes_128,
- QP_CONTEXT_QPC_BYTES_128_ACK_LAST_OPERATION_TYPE_M,
- QP_CONTEXT_QPC_BYTES_128_ACK_LAST_OPERATION_TYPE_S,
- 0);
- roce_set_bit(context->qpc_bytes_128,
- QP_CONTEXT_QPC_BYTES_128_IRRL_PSN_VLD_FLG_S, 0);
-
- roce_set_field(context->qpc_bytes_132,
- QP_CONTEXT_QPC_BYTES_132_IRRL_PSN_M,
- QP_CONTEXT_QPC_BYTES_132_IRRL_PSN_S, 0);
- roce_set_field(context->qpc_bytes_132,
- QP_CONTEXT_QPC_BYTES_132_IRRL_TAIL_M,
- QP_CONTEXT_QPC_BYTES_132_IRRL_TAIL_S, 0);
-
- roce_set_field(context->qpc_bytes_136,
- QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_PSN_M,
- QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_PSN_S,
- attr->sq_psn);
- roce_set_field(context->qpc_bytes_136,
- QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_FPKT_PSN_L_M,
- QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_FPKT_PSN_L_S,
- attr->sq_psn);
-
- roce_set_field(context->qpc_bytes_140,
- QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_FPKT_PSN_H_M,
- QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_FPKT_PSN_H_S,
- (attr->sq_psn >> SQ_PSN_SHIFT));
- roce_set_field(context->qpc_bytes_140,
- QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_MSN_M,
- QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_MSN_S, 0);
- roce_set_bit(context->qpc_bytes_140,
- QP_CONTEXT_QPC_BYTES_140_RNR_RETRY_FLG_S, 0);
-
- roce_set_field(context->qpc_bytes_148,
- QP_CONTEXT_QPC_BYTES_148_CHECK_FLAG_M,
- QP_CONTEXT_QPC_BYTES_148_CHECK_FLAG_S, 0);
- roce_set_field(context->qpc_bytes_148,
- QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_M,
- QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_S,
- attr->retry_cnt);
- roce_set_field(context->qpc_bytes_148,
- QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_M,
- QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_S,
- attr->rnr_retry);
- roce_set_field(context->qpc_bytes_148,
- QP_CONTEXT_QPC_BYTES_148_LSN_M,
- QP_CONTEXT_QPC_BYTES_148_LSN_S, 0x100);
-
- context->rnr_retry = 0;
-
- roce_set_field(context->qpc_bytes_156,
- QP_CONTEXT_QPC_BYTES_156_RETRY_COUNT_INIT_M,
- QP_CONTEXT_QPC_BYTES_156_RETRY_COUNT_INIT_S,
- attr->retry_cnt);
- if (attr->timeout < 0x12) {
- dev_info(dev, "ack timeout value(0x%x) must bigger than 0x12.\n",
- attr->timeout);
- roce_set_field(context->qpc_bytes_156,
- QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M,
- QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S,
- 0x12);
- } else {
- roce_set_field(context->qpc_bytes_156,
- QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M,
- QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S,
- attr->timeout);
- }
- roce_set_field(context->qpc_bytes_156,
- QP_CONTEXT_QPC_BYTES_156_RNR_RETRY_COUNT_INIT_M,
- QP_CONTEXT_QPC_BYTES_156_RNR_RETRY_COUNT_INIT_S,
- attr->rnr_retry);
- roce_set_field(context->qpc_bytes_156,
- QP_CONTEXT_QPC_BYTES_156_PORT_NUM_M,
- QP_CONTEXT_QPC_BYTES_156_PORT_NUM_S,
- hr_qp->phy_port);
- roce_set_field(context->qpc_bytes_156,
- QP_CONTEXT_QPC_BYTES_156_SL_M,
- QP_CONTEXT_QPC_BYTES_156_SL_S,
- rdma_ah_get_sl(&attr->ah_attr));
- hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
- roce_set_field(context->qpc_bytes_156,
- QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_M,
- QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_S,
- ilog2((unsigned int)attr->max_rd_atomic));
- roce_set_field(context->qpc_bytes_156,
- QP_CONTEXT_QPC_BYTES_156_ACK_REQ_IND_M,
- QP_CONTEXT_QPC_BYTES_156_ACK_REQ_IND_S, 0);
- context->pkt_use_len = 0;
-
- roce_set_field(context->qpc_bytes_164,
- QP_CONTEXT_QPC_BYTES_164_SQ_PSN_M,
- QP_CONTEXT_QPC_BYTES_164_SQ_PSN_S, attr->sq_psn);
- roce_set_field(context->qpc_bytes_164,
- QP_CONTEXT_QPC_BYTES_164_IRRL_HEAD_M,
- QP_CONTEXT_QPC_BYTES_164_IRRL_HEAD_S, 0);
-
- roce_set_field(context->qpc_bytes_168,
- QP_CONTEXT_QPC_BYTES_168_RETRY_SQ_PSN_M,
- QP_CONTEXT_QPC_BYTES_168_RETRY_SQ_PSN_S,
- attr->sq_psn);
- roce_set_field(context->qpc_bytes_168,
- QP_CONTEXT_QPC_BYTES_168_SGE_USE_FLA_M,
- QP_CONTEXT_QPC_BYTES_168_SGE_USE_FLA_S, 0);
- roce_set_field(context->qpc_bytes_168,
- QP_CONTEXT_QPC_BYTES_168_DB_TYPE_M,
- QP_CONTEXT_QPC_BYTES_168_DB_TYPE_S, 0);
- roce_set_bit(context->qpc_bytes_168,
- QP_CONTEXT_QPC_BYTES_168_MSG_LP_IND_S, 0);
- roce_set_bit(context->qpc_bytes_168,
- QP_CONTEXT_QPC_BYTES_168_CSDB_LP_IND_S, 0);
- roce_set_bit(context->qpc_bytes_168,
- QP_CONTEXT_QPC_BYTES_168_QP_ERR_FLG_S, 0);
- context->sge_use_len = 0;
-
- roce_set_field(context->qpc_bytes_176,
- QP_CONTEXT_QPC_BYTES_176_DB_CUR_INDEX_M,
- QP_CONTEXT_QPC_BYTES_176_DB_CUR_INDEX_S, 0);
- roce_set_field(context->qpc_bytes_176,
- QP_CONTEXT_QPC_BYTES_176_RETRY_DB_CUR_INDEX_M,
- QP_CONTEXT_QPC_BYTES_176_RETRY_DB_CUR_INDEX_S,
- 0);
- roce_set_field(context->qpc_bytes_180,
- QP_CONTEXT_QPC_BYTES_180_SQ_CUR_INDEX_M,
- QP_CONTEXT_QPC_BYTES_180_SQ_CUR_INDEX_S, 0);
- roce_set_field(context->qpc_bytes_180,
- QP_CONTEXT_QPC_BYTES_180_SQ_HEAD_M,
- QP_CONTEXT_QPC_BYTES_180_SQ_HEAD_S, 0);
-
- context->tx_cur_sq_wqe_ba_l = cpu_to_le32(sq_ba);
-
- roce_set_field(context->qpc_bytes_188,
- QP_CONTEXT_QPC_BYTES_188_TX_CUR_SQ_WQE_BA_H_M,
- QP_CONTEXT_QPC_BYTES_188_TX_CUR_SQ_WQE_BA_H_S,
- upper_32_bits(sq_ba));
- roce_set_bit(context->qpc_bytes_188,
- QP_CONTEXT_QPC_BYTES_188_PKT_RETRY_FLG_S, 0);
- roce_set_field(context->qpc_bytes_188,
- QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_M,
- QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_S,
- 0);
- }
-
- /* Every status migrate must change state */
- roce_set_field(context->qpc_bytes_144,
- QP_CONTEXT_QPC_BYTES_144_QP_STATE_M,
- QP_CONTEXT_QPC_BYTES_144_QP_STATE_S, new_state);
-
- /* SW pass context to HW */
- ret = hns_roce_v1_qp_modify(hr_dev, to_hns_roce_state(cur_state),
- to_hns_roce_state(new_state), context,
- hr_qp);
- if (ret) {
- dev_err(dev, "hns_roce_qp_modify failed\n");
- goto out;
- }
-
- /*
- * Use rst2init to instead of init2init with drv,
- * need to hw to flash RQ HEAD by DB again
- */
- if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) {
- roce_set_field(doorbell[0], RQ_DOORBELL_U32_4_RQ_HEAD_M,
- RQ_DOORBELL_U32_4_RQ_HEAD_S, hr_qp->rq.head);
- roce_set_field(doorbell[1], RQ_DOORBELL_U32_8_QPN_M,
- RQ_DOORBELL_U32_8_QPN_S, hr_qp->qpn);
- roce_set_field(doorbell[1], RQ_DOORBELL_U32_8_CMD_M,
- RQ_DOORBELL_U32_8_CMD_S, 1);
- roce_set_bit(doorbell[1], RQ_DOORBELL_U32_8_HW_SYNC_S, 1);
-
- if (ibqp->uobject) {
- hr_qp->rq.db_reg = hr_dev->reg_base +
- hr_dev->odb_offset +
- DB_REG_OFFSET * hr_dev->priv_uar.index;
- }
-
- hns_roce_write64_k(doorbell, hr_qp->rq.db_reg);
- }
-
- hr_qp->state = new_state;
-
- if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
- hr_qp->resp_depth = attr->max_dest_rd_atomic;
- if (attr_mask & IB_QP_PORT) {
- hr_qp->port = attr->port_num - 1;
- hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
- }
-
- if (new_state == IB_QPS_RESET && !ibqp->uobject) {
- hns_roce_v1_cq_clean(to_hr_cq(ibqp->recv_cq), hr_qp->qpn,
- ibqp->srq ? to_hr_srq(ibqp->srq) : NULL);
- if (ibqp->send_cq != ibqp->recv_cq)
- hns_roce_v1_cq_clean(to_hr_cq(ibqp->send_cq),
- hr_qp->qpn, NULL);
-
- hr_qp->rq.head = 0;
- hr_qp->rq.tail = 0;
- hr_qp->sq.head = 0;
- hr_qp->sq.tail = 0;
- }
-out:
- kfree(context);
- return ret;
-}
-
-static int hns_roce_v1_modify_qp(struct ib_qp *ibqp,
- const struct ib_qp_attr *attr, int attr_mask,
- enum ib_qp_state cur_state,
- enum ib_qp_state new_state)
-{
- if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
- return -EOPNOTSUPP;
-
- if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI)
- return hns_roce_v1_m_sqp(ibqp, attr, attr_mask, cur_state,
- new_state);
- else
- return hns_roce_v1_m_qp(ibqp, attr, attr_mask, cur_state,
- new_state);
-}
-
-static enum ib_qp_state to_ib_qp_state(enum hns_roce_qp_state state)
-{
- switch (state) {
- case HNS_ROCE_QP_STATE_RST:
- return IB_QPS_RESET;
- case HNS_ROCE_QP_STATE_INIT:
- return IB_QPS_INIT;
- case HNS_ROCE_QP_STATE_RTR:
- return IB_QPS_RTR;
- case HNS_ROCE_QP_STATE_RTS:
- return IB_QPS_RTS;
- case HNS_ROCE_QP_STATE_SQD:
- return IB_QPS_SQD;
- case HNS_ROCE_QP_STATE_ERR:
- return IB_QPS_ERR;
- default:
- return IB_QPS_ERR;
- }
-}
-
-static int hns_roce_v1_query_qpc(struct hns_roce_dev *hr_dev,
- struct hns_roce_qp *hr_qp,
- struct hns_roce_qp_context *hr_context)
-{
- struct hns_roce_cmd_mailbox *mailbox;
- int ret;
-
- mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
- if (IS_ERR(mailbox))
- return PTR_ERR(mailbox);
-
- ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, hr_qp->qpn, 0,
- HNS_ROCE_CMD_QUERY_QP,
- HNS_ROCE_CMD_TIMEOUT_MSECS);
- if (!ret)
- memcpy(hr_context, mailbox->buf, sizeof(*hr_context));
- else
- dev_err(&hr_dev->pdev->dev, "QUERY QP cmd process error\n");
-
- hns_roce_free_cmd_mailbox(hr_dev, mailbox);
-
- return ret;
-}
-
-static int hns_roce_v1_q_sqp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
- int qp_attr_mask,
- struct ib_qp_init_attr *qp_init_attr)
-{
- struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
- struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
- struct hns_roce_sqp_context context;
- u32 addr;
-
- mutex_lock(&hr_qp->mutex);
-
- if (hr_qp->state == IB_QPS_RESET) {
- qp_attr->qp_state = IB_QPS_RESET;
- goto done;
- }
-
- addr = ROCEE_QP1C_CFG0_0_REG +
- hr_qp->port * sizeof(struct hns_roce_sqp_context);
- context.qp1c_bytes_4 = cpu_to_le32(roce_read(hr_dev, addr));
- context.sq_rq_bt_l = cpu_to_le32(roce_read(hr_dev, addr + 1));
- context.qp1c_bytes_12 = cpu_to_le32(roce_read(hr_dev, addr + 2));
- context.qp1c_bytes_16 = cpu_to_le32(roce_read(hr_dev, addr + 3));
- context.qp1c_bytes_20 = cpu_to_le32(roce_read(hr_dev, addr + 4));
- context.cur_rq_wqe_ba_l = cpu_to_le32(roce_read(hr_dev, addr + 5));
- context.qp1c_bytes_28 = cpu_to_le32(roce_read(hr_dev, addr + 6));
- context.qp1c_bytes_32 = cpu_to_le32(roce_read(hr_dev, addr + 7));
- context.cur_sq_wqe_ba_l = cpu_to_le32(roce_read(hr_dev, addr + 8));
- context.qp1c_bytes_40 = cpu_to_le32(roce_read(hr_dev, addr + 9));
-
- hr_qp->state = roce_get_field(context.qp1c_bytes_4,
- QP1C_BYTES_4_QP_STATE_M,
- QP1C_BYTES_4_QP_STATE_S);
- qp_attr->qp_state = hr_qp->state;
- qp_attr->path_mtu = IB_MTU_256;
- qp_attr->path_mig_state = IB_MIG_ARMED;
- qp_attr->qkey = QKEY_VAL;
- qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
- qp_attr->rq_psn = 0;
- qp_attr->sq_psn = 0;
- qp_attr->dest_qp_num = 1;
- qp_attr->qp_access_flags = 6;
-
- qp_attr->pkey_index = roce_get_field(context.qp1c_bytes_20,
- QP1C_BYTES_20_PKEY_IDX_M,
- QP1C_BYTES_20_PKEY_IDX_S);
- qp_attr->port_num = hr_qp->port + 1;
- qp_attr->sq_draining = 0;
- qp_attr->max_rd_atomic = 0;
- qp_attr->max_dest_rd_atomic = 0;
- qp_attr->min_rnr_timer = 0;
- qp_attr->timeout = 0;
- qp_attr->retry_cnt = 0;
- qp_attr->rnr_retry = 0;
- qp_attr->alt_timeout = 0;
-
-done:
- qp_attr->cur_qp_state = qp_attr->qp_state;
- qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt;
- qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs;
- qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt;
- qp_attr->cap.max_send_sge = hr_qp->sq.max_gs;
- qp_attr->cap.max_inline_data = 0;
- qp_init_attr->cap = qp_attr->cap;
- qp_init_attr->create_flags = 0;
-
- mutex_unlock(&hr_qp->mutex);
-
- return 0;
-}
-
-static int hns_roce_v1_q_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
- int qp_attr_mask,
- struct ib_qp_init_attr *qp_init_attr)
-{
- struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
- struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
- struct device *dev = &hr_dev->pdev->dev;
- struct hns_roce_qp_context *context;
- int tmp_qp_state;
- int ret = 0;
- int state;
-
- context = kzalloc(sizeof(*context), GFP_KERNEL);
- if (!context)
- return -ENOMEM;
-
- memset(qp_attr, 0, sizeof(*qp_attr));
- memset(qp_init_attr, 0, sizeof(*qp_init_attr));
-
- mutex_lock(&hr_qp->mutex);
-
- if (hr_qp->state == IB_QPS_RESET) {
- qp_attr->qp_state = IB_QPS_RESET;
- goto done;
- }
-
- ret = hns_roce_v1_query_qpc(hr_dev, hr_qp, context);
- if (ret) {
- dev_err(dev, "query qpc error\n");
- ret = -EINVAL;
- goto out;
- }
-
- state = roce_get_field(context->qpc_bytes_144,
- QP_CONTEXT_QPC_BYTES_144_QP_STATE_M,
- QP_CONTEXT_QPC_BYTES_144_QP_STATE_S);
- tmp_qp_state = (int)to_ib_qp_state((enum hns_roce_qp_state)state);
- if (tmp_qp_state == -1) {
- dev_err(dev, "to_ib_qp_state error\n");
- ret = -EINVAL;
- goto out;
- }
- hr_qp->state = (u8)tmp_qp_state;
- qp_attr->qp_state = (enum ib_qp_state)hr_qp->state;
- qp_attr->path_mtu = (enum ib_mtu)roce_get_field(context->qpc_bytes_48,
- QP_CONTEXT_QPC_BYTES_48_MTU_M,
- QP_CONTEXT_QPC_BYTES_48_MTU_S);
- qp_attr->path_mig_state = IB_MIG_ARMED;
- qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
- if (hr_qp->ibqp.qp_type == IB_QPT_UD)
- qp_attr->qkey = QKEY_VAL;
-
- qp_attr->rq_psn = roce_get_field(context->qpc_bytes_88,
- QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_M,
- QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_S);
- qp_attr->sq_psn = (u32)roce_get_field(context->qpc_bytes_164,
- QP_CONTEXT_QPC_BYTES_164_SQ_PSN_M,
- QP_CONTEXT_QPC_BYTES_164_SQ_PSN_S);
- qp_attr->dest_qp_num = (u8)roce_get_field(context->qpc_bytes_36,
- QP_CONTEXT_QPC_BYTES_36_DEST_QP_M,
- QP_CONTEXT_QPC_BYTES_36_DEST_QP_S);
- qp_attr->qp_access_flags = ((roce_get_bit(context->qpc_bytes_4,
- QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S)) << 2) |
- ((roce_get_bit(context->qpc_bytes_4,
- QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S)) << 1) |
- ((roce_get_bit(context->qpc_bytes_4,
- QP_CONTEXT_QPC_BYTE_4_ATOMIC_OPERATION_ENABLE_S)) << 3);
-
- if (hr_qp->ibqp.qp_type == IB_QPT_RC) {
- struct ib_global_route *grh =
- rdma_ah_retrieve_grh(&qp_attr->ah_attr);
-
- rdma_ah_set_sl(&qp_attr->ah_attr,
- roce_get_field(context->qpc_bytes_156,
- QP_CONTEXT_QPC_BYTES_156_SL_M,
- QP_CONTEXT_QPC_BYTES_156_SL_S));
- rdma_ah_set_ah_flags(&qp_attr->ah_attr, IB_AH_GRH);
- grh->flow_label =
- roce_get_field(context->qpc_bytes_48,
- QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_M,
- QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_S);
- grh->sgid_index =
- roce_get_field(context->qpc_bytes_36,
- QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_M,
- QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_S);
- grh->hop_limit =
- roce_get_field(context->qpc_bytes_44,
- QP_CONTEXT_QPC_BYTES_44_HOPLMT_M,
- QP_CONTEXT_QPC_BYTES_44_HOPLMT_S);
- grh->traffic_class =
- roce_get_field(context->qpc_bytes_48,
- QP_CONTEXT_QPC_BYTES_48_TCLASS_M,
- QP_CONTEXT_QPC_BYTES_48_TCLASS_S);
-
- memcpy(grh->dgid.raw, context->dgid,
- sizeof(grh->dgid.raw));
- }
-
- qp_attr->pkey_index = roce_get_field(context->qpc_bytes_12,
- QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M,
- QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S);
- qp_attr->port_num = hr_qp->port + 1;
- qp_attr->sq_draining = 0;
- qp_attr->max_rd_atomic = 1 << roce_get_field(context->qpc_bytes_156,
- QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_M,
- QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_S);
- qp_attr->max_dest_rd_atomic = 1 << roce_get_field(context->qpc_bytes_32,
- QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_M,
- QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_S);
- qp_attr->min_rnr_timer = (u8)(roce_get_field(context->qpc_bytes_24,
- QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_M,
- QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_S));
- qp_attr->timeout = (u8)(roce_get_field(context->qpc_bytes_156,
- QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M,
- QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S));
- qp_attr->retry_cnt = roce_get_field(context->qpc_bytes_148,
- QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_M,
- QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_S);
- qp_attr->rnr_retry = (u8)le32_to_cpu(context->rnr_retry);
-
-done:
- qp_attr->cur_qp_state = qp_attr->qp_state;
- qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt;
- qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs;
-
- if (!ibqp->uobject) {
- qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt;
- qp_attr->cap.max_send_sge = hr_qp->sq.max_gs;
- } else {
- qp_attr->cap.max_send_wr = 0;
- qp_attr->cap.max_send_sge = 0;
- }
-
- qp_init_attr->cap = qp_attr->cap;
-
-out:
- mutex_unlock(&hr_qp->mutex);
- kfree(context);
- return ret;
-}
-
-static int hns_roce_v1_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
- int qp_attr_mask,
- struct ib_qp_init_attr *qp_init_attr)
-{
- struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
-
- return hr_qp->doorbell_qpn <= 1 ?
- hns_roce_v1_q_sqp(ibqp, qp_attr, qp_attr_mask, qp_init_attr) :
- hns_roce_v1_q_qp(ibqp, qp_attr, qp_attr_mask, qp_init_attr);
-}
-
-int hns_roce_v1_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
-{
- struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
- struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
- struct hns_roce_cq *send_cq, *recv_cq;
- int ret;
-
- ret = hns_roce_v1_modify_qp(ibqp, NULL, 0, hr_qp->state, IB_QPS_RESET);
- if (ret)
- return ret;
-
- send_cq = hr_qp->ibqp.send_cq ? to_hr_cq(hr_qp->ibqp.send_cq) : NULL;
- recv_cq = hr_qp->ibqp.recv_cq ? to_hr_cq(hr_qp->ibqp.recv_cq) : NULL;
-
- hns_roce_lock_cqs(send_cq, recv_cq);
- if (!udata) {
- if (recv_cq)
- __hns_roce_v1_cq_clean(recv_cq, hr_qp->qpn,
- (hr_qp->ibqp.srq ?
- to_hr_srq(hr_qp->ibqp.srq) :
- NULL));
-
- if (send_cq && send_cq != recv_cq)
- __hns_roce_v1_cq_clean(send_cq, hr_qp->qpn, NULL);
- }
- hns_roce_qp_remove(hr_dev, hr_qp);
- hns_roce_unlock_cqs(send_cq, recv_cq);
-
- hns_roce_qp_destroy(hr_dev, hr_qp, udata);
-
- return 0;
-}
-
-static int hns_roce_v1_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
-{
- struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device);
- struct hns_roce_cq *hr_cq = to_hr_cq(ibcq);
- struct device *dev = &hr_dev->pdev->dev;
- u32 cqe_cnt_ori;
- u32 cqe_cnt_cur;
- int wait_time = 0;
-
- /*
- * Before freeing cq buffer, we need to ensure that the outstanding CQE
- * have been written by checking the CQE counter.
- */
- cqe_cnt_ori = roce_read(hr_dev, ROCEE_SCAEP_WR_CQE_CNT);
- while (1) {
- if (roce_read(hr_dev, ROCEE_CAEP_CQE_WCMD_EMPTY) &
- HNS_ROCE_CQE_WCMD_EMPTY_BIT)
- break;
-
- cqe_cnt_cur = roce_read(hr_dev, ROCEE_SCAEP_WR_CQE_CNT);
- if ((cqe_cnt_cur - cqe_cnt_ori) >= HNS_ROCE_MIN_CQE_CNT)
- break;
-
- msleep(HNS_ROCE_EACH_FREE_CQ_WAIT_MSECS);
- if (wait_time > HNS_ROCE_MAX_FREE_CQ_WAIT_CNT) {
- dev_warn(dev, "Destroy cq 0x%lx timeout!\n",
- hr_cq->cqn);
- break;
- }
- wait_time++;
- }
- return 0;
-}
-
-static void set_eq_cons_index_v1(struct hns_roce_eq *eq, u32 req_not)
-{
- roce_raw_write((eq->cons_index & HNS_ROCE_V1_CONS_IDX_M) |
- (req_not << eq->log_entries), eq->db_reg);
-}
-
-static void hns_roce_v1_wq_catas_err_handle(struct hns_roce_dev *hr_dev,
- struct hns_roce_aeqe *aeqe, int qpn)
-{
- struct device *dev = &hr_dev->pdev->dev;
-
- dev_warn(dev, "Local Work Queue Catastrophic Error.\n");
- switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M,
- HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) {
- case HNS_ROCE_LWQCE_QPC_ERROR:
- dev_warn(dev, "QP %d, QPC error.\n", qpn);
- break;
- case HNS_ROCE_LWQCE_MTU_ERROR:
- dev_warn(dev, "QP %d, MTU error.\n", qpn);
- break;
- case HNS_ROCE_LWQCE_WQE_BA_ADDR_ERROR:
- dev_warn(dev, "QP %d, WQE BA addr error.\n", qpn);
- break;
- case HNS_ROCE_LWQCE_WQE_ADDR_ERROR:
- dev_warn(dev, "QP %d, WQE addr error.\n", qpn);
- break;
- case HNS_ROCE_LWQCE_SQ_WQE_SHIFT_ERROR:
- dev_warn(dev, "QP %d, WQE shift error\n", qpn);
- break;
- case HNS_ROCE_LWQCE_SL_ERROR:
- dev_warn(dev, "QP %d, SL error.\n", qpn);
- break;
- case HNS_ROCE_LWQCE_PORT_ERROR:
- dev_warn(dev, "QP %d, port error.\n", qpn);
- break;
- default:
- break;
- }
-}
-
-static void hns_roce_v1_local_wq_access_err_handle(struct hns_roce_dev *hr_dev,
- struct hns_roce_aeqe *aeqe,
- int qpn)
-{
- struct device *dev = &hr_dev->pdev->dev;
-
- dev_warn(dev, "Local Access Violation Work Queue Error.\n");
- switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M,
- HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) {
- case HNS_ROCE_LAVWQE_R_KEY_VIOLATION:
- dev_warn(dev, "QP %d, R_key violation.\n", qpn);
- break;
- case HNS_ROCE_LAVWQE_LENGTH_ERROR:
- dev_warn(dev, "QP %d, length error.\n", qpn);
- break;
- case HNS_ROCE_LAVWQE_VA_ERROR:
- dev_warn(dev, "QP %d, VA error.\n", qpn);
- break;
- case HNS_ROCE_LAVWQE_PD_ERROR:
- dev_err(dev, "QP %d, PD error.\n", qpn);
- break;
- case HNS_ROCE_LAVWQE_RW_ACC_ERROR:
- dev_warn(dev, "QP %d, rw acc error.\n", qpn);
- break;
- case HNS_ROCE_LAVWQE_KEY_STATE_ERROR:
- dev_warn(dev, "QP %d, key state error.\n", qpn);
- break;
- case HNS_ROCE_LAVWQE_MR_OPERATION_ERROR:
- dev_warn(dev, "QP %d, MR operation error.\n", qpn);
- break;
- default:
- break;
- }
-}
-
-static void hns_roce_v1_qp_err_handle(struct hns_roce_dev *hr_dev,
- struct hns_roce_aeqe *aeqe,
- int event_type)
-{
- struct device *dev = &hr_dev->pdev->dev;
- int phy_port;
- int qpn;
-
- qpn = roce_get_field(aeqe->event.queue_event.num,
- HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M,
- HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S);
- phy_port = roce_get_field(aeqe->event.queue_event.num,
- HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_M,
- HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_S);
- if (qpn <= 1)
- qpn = HNS_ROCE_MAX_PORTS * qpn + phy_port;
-
- switch (event_type) {
- case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
- dev_warn(dev, "Invalid Req Local Work Queue Error.\n"
- "QP %d, phy_port %d.\n", qpn, phy_port);
- break;
- case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
- hns_roce_v1_wq_catas_err_handle(hr_dev, aeqe, qpn);
- break;
- case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
- hns_roce_v1_local_wq_access_err_handle(hr_dev, aeqe, qpn);
- break;
- default:
- break;
- }
-
- hns_roce_qp_event(hr_dev, qpn, event_type);
-}
-
-static void hns_roce_v1_cq_err_handle(struct hns_roce_dev *hr_dev,
- struct hns_roce_aeqe *aeqe,
- int event_type)
-{
- struct device *dev = &hr_dev->pdev->dev;
- u32 cqn;
-
- cqn = roce_get_field(aeqe->event.queue_event.num,
- HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M,
- HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S);
-
- switch (event_type) {
- case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
- dev_warn(dev, "CQ 0x%x access err.\n", cqn);
- break;
- case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
- dev_warn(dev, "CQ 0x%x overflow\n", cqn);
- break;
- case HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID:
- dev_warn(dev, "CQ 0x%x ID invalid.\n", cqn);
- break;
- default:
- break;
- }
-
- hns_roce_cq_event(hr_dev, cqn, event_type);
-}
-
-static void hns_roce_v1_db_overflow_handle(struct hns_roce_dev *hr_dev,
- struct hns_roce_aeqe *aeqe)
-{
- struct device *dev = &hr_dev->pdev->dev;
-
- switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M,
- HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) {
- case HNS_ROCE_DB_SUBTYPE_SDB_OVF:
- dev_warn(dev, "SDB overflow.\n");
- break;
- case HNS_ROCE_DB_SUBTYPE_SDB_ALM_OVF:
- dev_warn(dev, "SDB almost overflow.\n");
- break;
- case HNS_ROCE_DB_SUBTYPE_SDB_ALM_EMP:
- dev_warn(dev, "SDB almost empty.\n");
- break;
- case HNS_ROCE_DB_SUBTYPE_ODB_OVF:
- dev_warn(dev, "ODB overflow.\n");
- break;
- case HNS_ROCE_DB_SUBTYPE_ODB_ALM_OVF:
- dev_warn(dev, "ODB almost overflow.\n");
- break;
- case HNS_ROCE_DB_SUBTYPE_ODB_ALM_EMP:
- dev_warn(dev, "SDB almost empty.\n");
- break;
- default:
- break;
- }
-}
-
-static struct hns_roce_aeqe *get_aeqe_v1(struct hns_roce_eq *eq, u32 entry)
-{
- unsigned long off = (entry & (eq->entries - 1)) * HNS_ROCE_AEQE_SIZE;
-
- return (struct hns_roce_aeqe *)((u8 *)
- (eq->buf_list[off / HNS_ROCE_BA_SIZE].buf) +
- off % HNS_ROCE_BA_SIZE);
-}
-
-static struct hns_roce_aeqe *next_aeqe_sw_v1(struct hns_roce_eq *eq)
-{
- struct hns_roce_aeqe *aeqe = get_aeqe_v1(eq, eq->cons_index);
-
- return (roce_get_bit(aeqe->asyn, HNS_ROCE_AEQE_U32_4_OWNER_S) ^
- !!(eq->cons_index & eq->entries)) ? aeqe : NULL;
-}
-
-static int hns_roce_v1_aeq_int(struct hns_roce_dev *hr_dev,
- struct hns_roce_eq *eq)
-{
- struct device *dev = &hr_dev->pdev->dev;
- struct hns_roce_aeqe *aeqe;
- int aeqes_found = 0;
- int event_type;
-
- while ((aeqe = next_aeqe_sw_v1(eq))) {
- /* Make sure we read the AEQ entry after we have checked the
- * ownership bit
- */
- dma_rmb();
-
- dev_dbg(dev, "aeqe = %pK, aeqe->asyn.event_type = 0x%lx\n",
- aeqe,
- roce_get_field(aeqe->asyn,
- HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
- HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S));
- event_type = roce_get_field(aeqe->asyn,
- HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M,
- HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S);
- switch (event_type) {
- case HNS_ROCE_EVENT_TYPE_PATH_MIG:
- dev_warn(dev, "PATH MIG not supported\n");
- break;
- case HNS_ROCE_EVENT_TYPE_COMM_EST:
- dev_warn(dev, "COMMUNICATION established\n");
- break;
- case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
- dev_warn(dev, "SQ DRAINED not supported\n");
- break;
- case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
- dev_warn(dev, "PATH MIG failed\n");
- break;
- case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
- case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
- case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
- hns_roce_v1_qp_err_handle(hr_dev, aeqe, event_type);
- break;
- case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
- case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
- case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
- dev_warn(dev, "SRQ not support!\n");
- break;
- case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
- case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
- case HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID:
- hns_roce_v1_cq_err_handle(hr_dev, aeqe, event_type);
- break;
- case HNS_ROCE_EVENT_TYPE_PORT_CHANGE:
- dev_warn(dev, "port change.\n");
- break;
- case HNS_ROCE_EVENT_TYPE_MB:
- hns_roce_cmd_event(hr_dev,
- le16_to_cpu(aeqe->event.cmd.token),
- aeqe->event.cmd.status,
- le64_to_cpu(aeqe->event.cmd.out_param
- ));
- break;
- case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
- hns_roce_v1_db_overflow_handle(hr_dev, aeqe);
- break;
- default:
- dev_warn(dev, "Unhandled event %d on EQ %d at idx %u.\n",
- event_type, eq->eqn, eq->cons_index);
- break;
- }
-
- eq->cons_index++;
- aeqes_found = 1;
-
- if (eq->cons_index > 2 * hr_dev->caps.aeqe_depth - 1)
- eq->cons_index = 0;
- }
-
- set_eq_cons_index_v1(eq, 0);
-
- return aeqes_found;
-}
-
-static struct hns_roce_ceqe *get_ceqe_v1(struct hns_roce_eq *eq, u32 entry)
-{
- unsigned long off = (entry & (eq->entries - 1)) * HNS_ROCE_CEQE_SIZE;
-
- return (struct hns_roce_ceqe *)((u8 *)
- (eq->buf_list[off / HNS_ROCE_BA_SIZE].buf) +
- off % HNS_ROCE_BA_SIZE);
-}
-
-static struct hns_roce_ceqe *next_ceqe_sw_v1(struct hns_roce_eq *eq)
-{
- struct hns_roce_ceqe *ceqe = get_ceqe_v1(eq, eq->cons_index);
-
- return (!!(roce_get_bit(ceqe->comp,
- HNS_ROCE_CEQE_CEQE_COMP_OWNER_S))) ^
- (!!(eq->cons_index & eq->entries)) ? ceqe : NULL;
-}
-
-static int hns_roce_v1_ceq_int(struct hns_roce_dev *hr_dev,
- struct hns_roce_eq *eq)
-{
- struct hns_roce_ceqe *ceqe;
- int ceqes_found = 0;
- u32 cqn;
-
- while ((ceqe = next_ceqe_sw_v1(eq))) {
- /* Make sure we read CEQ entry after we have checked the
- * ownership bit
- */
- dma_rmb();
-
- cqn = roce_get_field(ceqe->comp,
- HNS_ROCE_CEQE_CEQE_COMP_CQN_M,
- HNS_ROCE_CEQE_CEQE_COMP_CQN_S);
- hns_roce_cq_completion(hr_dev, cqn);
-
- ++eq->cons_index;
- ceqes_found = 1;
-
- if (eq->cons_index >
- EQ_DEPTH_COEFF * hr_dev->caps.ceqe_depth - 1)
- eq->cons_index = 0;
- }
-
- set_eq_cons_index_v1(eq, 0);
-
- return ceqes_found;
-}
-
-static irqreturn_t hns_roce_v1_msix_interrupt_eq(int irq, void *eq_ptr)
-{
- struct hns_roce_eq *eq = eq_ptr;
- struct hns_roce_dev *hr_dev = eq->hr_dev;
- int int_work;
-
- if (eq->type_flag == HNS_ROCE_CEQ)
- /* CEQ irq routine, CEQ is pulse irq, not clear */
- int_work = hns_roce_v1_ceq_int(hr_dev, eq);
- else
- /* AEQ irq routine, AEQ is pulse irq, not clear */
- int_work = hns_roce_v1_aeq_int(hr_dev, eq);
-
- return IRQ_RETVAL(int_work);
-}
-
-static irqreturn_t hns_roce_v1_msix_interrupt_abn(int irq, void *dev_id)
-{
- struct hns_roce_dev *hr_dev = dev_id;
- struct device *dev = &hr_dev->pdev->dev;
- int int_work = 0;
- u32 caepaemask_val;
- u32 cealmovf_val;
- u32 caepaest_val;
- u32 aeshift_val;
- u32 ceshift_val;
- u32 cemask_val;
- __le32 tmp;
- int i;
-
- /*
- * Abnormal interrupt:
- * AEQ overflow, ECC multi-bit err, CEQ overflow must clear
- * interrupt, mask irq, clear irq, cancel mask operation
- */
- aeshift_val = roce_read(hr_dev, ROCEE_CAEP_AEQC_AEQE_SHIFT_REG);
- tmp = cpu_to_le32(aeshift_val);
-
- /* AEQE overflow */
- if (roce_get_bit(tmp,
- ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQ_ALM_OVF_INT_ST_S) == 1) {
- dev_warn(dev, "AEQ overflow!\n");
-
- /* Set mask */
- caepaemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG);
- tmp = cpu_to_le32(caepaemask_val);
- roce_set_bit(tmp, ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S,
- HNS_ROCE_INT_MASK_ENABLE);
- caepaemask_val = le32_to_cpu(tmp);
- roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, caepaemask_val);
-
- /* Clear int state(INT_WC : write 1 clear) */
- caepaest_val = roce_read(hr_dev, ROCEE_CAEP_AE_ST_REG);
- tmp = cpu_to_le32(caepaest_val);
- roce_set_bit(tmp, ROCEE_CAEP_AE_ST_CAEP_AEQ_ALM_OVF_S, 1);
- caepaest_val = le32_to_cpu(tmp);
- roce_write(hr_dev, ROCEE_CAEP_AE_ST_REG, caepaest_val);
-
- /* Clear mask */
- caepaemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG);
- tmp = cpu_to_le32(caepaemask_val);
- roce_set_bit(tmp, ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S,
- HNS_ROCE_INT_MASK_DISABLE);
- caepaemask_val = le32_to_cpu(tmp);
- roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, caepaemask_val);
- }
-
- /* CEQ almost overflow */
- for (i = 0; i < hr_dev->caps.num_comp_vectors; i++) {
- ceshift_val = roce_read(hr_dev, ROCEE_CAEP_CEQC_SHIFT_0_REG +
- i * CEQ_REG_OFFSET);
- tmp = cpu_to_le32(ceshift_val);
-
- if (roce_get_bit(tmp,
- ROCEE_CAEP_CEQC_SHIFT_CAEP_CEQ_ALM_OVF_INT_ST_S) == 1) {
- dev_warn(dev, "CEQ[%d] almost overflow!\n", i);
- int_work++;
-
- /* Set mask */
- cemask_val = roce_read(hr_dev,
- ROCEE_CAEP_CE_IRQ_MASK_0_REG +
- i * CEQ_REG_OFFSET);
- tmp = cpu_to_le32(cemask_val);
- roce_set_bit(tmp,
- ROCEE_CAEP_CE_IRQ_MASK_CAEP_CEQ_ALM_OVF_MASK_S,
- HNS_ROCE_INT_MASK_ENABLE);
- cemask_val = le32_to_cpu(tmp);
- roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG +
- i * CEQ_REG_OFFSET, cemask_val);
-
- /* Clear int state(INT_WC : write 1 clear) */
- cealmovf_val = roce_read(hr_dev,
- ROCEE_CAEP_CEQ_ALM_OVF_0_REG +
- i * CEQ_REG_OFFSET);
- tmp = cpu_to_le32(cealmovf_val);
- roce_set_bit(tmp,
- ROCEE_CAEP_CEQ_ALM_OVF_CAEP_CEQ_ALM_OVF_S,
- 1);
- cealmovf_val = le32_to_cpu(tmp);
- roce_write(hr_dev, ROCEE_CAEP_CEQ_ALM_OVF_0_REG +
- i * CEQ_REG_OFFSET, cealmovf_val);
-
- /* Clear mask */
- cemask_val = roce_read(hr_dev,
- ROCEE_CAEP_CE_IRQ_MASK_0_REG +
- i * CEQ_REG_OFFSET);
- tmp = cpu_to_le32(cemask_val);
- roce_set_bit(tmp,
- ROCEE_CAEP_CE_IRQ_MASK_CAEP_CEQ_ALM_OVF_MASK_S,
- HNS_ROCE_INT_MASK_DISABLE);
- cemask_val = le32_to_cpu(tmp);
- roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG +
- i * CEQ_REG_OFFSET, cemask_val);
- }
- }
-
- /* ECC multi-bit error alarm */
- dev_warn(dev, "ECC UCERR ALARM: 0x%x, 0x%x, 0x%x\n",
- roce_read(hr_dev, ROCEE_ECC_UCERR_ALM0_REG),
- roce_read(hr_dev, ROCEE_ECC_UCERR_ALM1_REG),
- roce_read(hr_dev, ROCEE_ECC_UCERR_ALM2_REG));
-
- dev_warn(dev, "ECC CERR ALARM: 0x%x, 0x%x, 0x%x\n",
- roce_read(hr_dev, ROCEE_ECC_CERR_ALM0_REG),
- roce_read(hr_dev, ROCEE_ECC_CERR_ALM1_REG),
- roce_read(hr_dev, ROCEE_ECC_CERR_ALM2_REG));
-
- return IRQ_RETVAL(int_work);
-}
-
-static void hns_roce_v1_int_mask_enable(struct hns_roce_dev *hr_dev)
-{
- u32 aemask_val;
- int masken = 0;
- __le32 tmp;
- int i;
-
- /* AEQ INT */
- aemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG);
- tmp = cpu_to_le32(aemask_val);
- roce_set_bit(tmp, ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S,
- masken);
- roce_set_bit(tmp, ROCEE_CAEP_AE_MASK_CAEP_AE_IRQ_MASK_S, masken);
- aemask_val = le32_to_cpu(tmp);
- roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, aemask_val);
-
- /* CEQ INT */
- for (i = 0; i < hr_dev->caps.num_comp_vectors; i++) {
- /* IRQ mask */
- roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG +
- i * CEQ_REG_OFFSET, masken);
- }
-}
-
-static void hns_roce_v1_free_eq(struct hns_roce_dev *hr_dev,
- struct hns_roce_eq *eq)
-{
- int npages = (PAGE_ALIGN(eq->eqe_size * eq->entries) +
- HNS_ROCE_BA_SIZE - 1) / HNS_ROCE_BA_SIZE;
- int i;
-
- if (!eq->buf_list)
- return;
-
- for (i = 0; i < npages; ++i)
- dma_free_coherent(&hr_dev->pdev->dev, HNS_ROCE_BA_SIZE,
- eq->buf_list[i].buf, eq->buf_list[i].map);
-
- kfree(eq->buf_list);
-}
-
-static void hns_roce_v1_enable_eq(struct hns_roce_dev *hr_dev, int eq_num,
- int enable_flag)
-{
- void __iomem *eqc = hr_dev->eq_table.eqc_base[eq_num];
- __le32 tmp;
- u32 val;
-
- val = readl(eqc);
- tmp = cpu_to_le32(val);
-
- if (enable_flag)
- roce_set_field(tmp,
- ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M,
- ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S,
- HNS_ROCE_EQ_STAT_VALID);
- else
- roce_set_field(tmp,
- ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M,
- ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S,
- HNS_ROCE_EQ_STAT_INVALID);
-
- val = le32_to_cpu(tmp);
- writel(val, eqc);
-}
-
-static int hns_roce_v1_create_eq(struct hns_roce_dev *hr_dev,
- struct hns_roce_eq *eq)
-{
- void __iomem *eqc = hr_dev->eq_table.eqc_base[eq->eqn];
- struct device *dev = &hr_dev->pdev->dev;
- dma_addr_t tmp_dma_addr;
- u32 eqcuridx_val;
- u32 eqconsindx_val;
- u32 eqshift_val;
- __le32 tmp2 = 0;
- __le32 tmp1 = 0;
- __le32 tmp = 0;
- int num_bas;
- int ret;
- int i;
-
- num_bas = (PAGE_ALIGN(eq->entries * eq->eqe_size) +
- HNS_ROCE_BA_SIZE - 1) / HNS_ROCE_BA_SIZE;
-
- if ((eq->entries * eq->eqe_size) > HNS_ROCE_BA_SIZE) {
- dev_err(dev, "[error]eq buf %d gt ba size(%d) need bas=%d\n",
- (eq->entries * eq->eqe_size), HNS_ROCE_BA_SIZE,
- num_bas);
- return -EINVAL;
- }
-
- eq->buf_list = kcalloc(num_bas, sizeof(*eq->buf_list), GFP_KERNEL);
- if (!eq->buf_list)
- return -ENOMEM;
-
- for (i = 0; i < num_bas; ++i) {
- eq->buf_list[i].buf = dma_alloc_coherent(dev, HNS_ROCE_BA_SIZE,
- &tmp_dma_addr,
- GFP_KERNEL);
- if (!eq->buf_list[i].buf) {
- ret = -ENOMEM;
- goto err_out_free_pages;
- }
-
- eq->buf_list[i].map = tmp_dma_addr;
- }
- eq->cons_index = 0;
- roce_set_field(tmp, ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M,
- ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S,
- HNS_ROCE_EQ_STAT_INVALID);
- roce_set_field(tmp, ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_M,
- ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_S,
- eq->log_entries);
- eqshift_val = le32_to_cpu(tmp);
- writel(eqshift_val, eqc);
-
- /* Configure eq extended address 12~44bit */
- writel((u32)(eq->buf_list[0].map >> 12), eqc + 4);
-
- /*
- * Configure eq extended address 45~49 bit.
- * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of
- * using 4K page, and shift more 32 because of
- * calculating the high 32 bit value evaluated to hardware.
- */
- roce_set_field(tmp1, ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_M,
- ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_S,
- eq->buf_list[0].map >> 44);
- roce_set_field(tmp1, ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_M,
- ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_S, 0);
- eqcuridx_val = le32_to_cpu(tmp1);
- writel(eqcuridx_val, eqc + 8);
-
- /* Configure eq consumer index */
- roce_set_field(tmp2, ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_M,
- ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_S, 0);
- eqconsindx_val = le32_to_cpu(tmp2);
- writel(eqconsindx_val, eqc + 0xc);
-
- return 0;
-
-err_out_free_pages:
- for (i -= 1; i >= 0; i--)
- dma_free_coherent(dev, HNS_ROCE_BA_SIZE, eq->buf_list[i].buf,
- eq->buf_list[i].map);
-
- kfree(eq->buf_list);
- return ret;
-}
-
-static int hns_roce_v1_init_eq_table(struct hns_roce_dev *hr_dev)
-{
- struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
- struct device *dev = &hr_dev->pdev->dev;
- struct hns_roce_eq *eq;
- int irq_num;
- int eq_num;
- int ret;
- int i, j;
-
- eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
- irq_num = eq_num + hr_dev->caps.num_other_vectors;
-
- eq_table->eq = kcalloc(eq_num, sizeof(*eq_table->eq), GFP_KERNEL);
- if (!eq_table->eq)
- return -ENOMEM;
-
- eq_table->eqc_base = kcalloc(eq_num, sizeof(*eq_table->eqc_base),
- GFP_KERNEL);
- if (!eq_table->eqc_base) {
- ret = -ENOMEM;
- goto err_eqc_base_alloc_fail;
- }
-
- for (i = 0; i < eq_num; i++) {
- eq = &eq_table->eq[i];
- eq->hr_dev = hr_dev;
- eq->eqn = i;
- eq->irq = hr_dev->irq[i];
- eq->log_page_size = PAGE_SHIFT;
-
- if (i < hr_dev->caps.num_comp_vectors) {
- /* CEQ */
- eq_table->eqc_base[i] = hr_dev->reg_base +
- ROCEE_CAEP_CEQC_SHIFT_0_REG +
- CEQ_REG_OFFSET * i;
- eq->type_flag = HNS_ROCE_CEQ;
- eq->db_reg = hr_dev->reg_base +
- ROCEE_CAEP_CEQC_CONS_IDX_0_REG +
- CEQ_REG_OFFSET * i;
- eq->entries = hr_dev->caps.ceqe_depth;
- eq->log_entries = ilog2(eq->entries);
- eq->eqe_size = HNS_ROCE_CEQE_SIZE;
- } else {
- /* AEQ */
- eq_table->eqc_base[i] = hr_dev->reg_base +
- ROCEE_CAEP_AEQC_AEQE_SHIFT_REG;
- eq->type_flag = HNS_ROCE_AEQ;
- eq->db_reg = hr_dev->reg_base +
- ROCEE_CAEP_AEQE_CONS_IDX_REG;
- eq->entries = hr_dev->caps.aeqe_depth;
- eq->log_entries = ilog2(eq->entries);
- eq->eqe_size = HNS_ROCE_AEQE_SIZE;
- }
- }
-
- /* Disable irq */
- hns_roce_v1_int_mask_enable(hr_dev);
-
- /* Configure ce int interval */
- roce_write(hr_dev, ROCEE_CAEP_CE_INTERVAL_CFG_REG,
- HNS_ROCE_CEQ_DEFAULT_INTERVAL);
-
- /* Configure ce int burst num */
- roce_write(hr_dev, ROCEE_CAEP_CE_BURST_NUM_CFG_REG,
- HNS_ROCE_CEQ_DEFAULT_BURST_NUM);
-
- for (i = 0; i < eq_num; i++) {
- ret = hns_roce_v1_create_eq(hr_dev, &eq_table->eq[i]);
- if (ret) {
- dev_err(dev, "eq create failed\n");
- goto err_create_eq_fail;
- }
- }
-
- for (j = 0; j < irq_num; j++) {
- if (j < eq_num)
- ret = request_irq(hr_dev->irq[j],
- hns_roce_v1_msix_interrupt_eq, 0,
- hr_dev->irq_names[j],
- &eq_table->eq[j]);
- else
- ret = request_irq(hr_dev->irq[j],
- hns_roce_v1_msix_interrupt_abn, 0,
- hr_dev->irq_names[j], hr_dev);
-
- if (ret) {
- dev_err(dev, "request irq error!\n");
- goto err_request_irq_fail;
- }
- }
-
- for (i = 0; i < eq_num; i++)
- hns_roce_v1_enable_eq(hr_dev, i, EQ_ENABLE);
-
- return 0;
-
-err_request_irq_fail:
- for (j -= 1; j >= 0; j--)
- free_irq(hr_dev->irq[j], &eq_table->eq[j]);
-
-err_create_eq_fail:
- for (i -= 1; i >= 0; i--)
- hns_roce_v1_free_eq(hr_dev, &eq_table->eq[i]);
-
- kfree(eq_table->eqc_base);
-
-err_eqc_base_alloc_fail:
- kfree(eq_table->eq);
-
- return ret;
-}
-
-static void hns_roce_v1_cleanup_eq_table(struct hns_roce_dev *hr_dev)
-{
- struct hns_roce_eq_table *eq_table = &hr_dev->eq_table;
- int irq_num;
- int eq_num;
- int i;
-
- eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors;
- irq_num = eq_num + hr_dev->caps.num_other_vectors;
- for (i = 0; i < eq_num; i++) {
- /* Disable EQ */
- hns_roce_v1_enable_eq(hr_dev, i, EQ_DISABLE);
-
- free_irq(hr_dev->irq[i], &eq_table->eq[i]);
-
- hns_roce_v1_free_eq(hr_dev, &eq_table->eq[i]);
- }
- for (i = eq_num; i < irq_num; i++)
- free_irq(hr_dev->irq[i], hr_dev);
-
- kfree(eq_table->eqc_base);
- kfree(eq_table->eq);
-}
-
-static const struct ib_device_ops hns_roce_v1_dev_ops = {
- .destroy_qp = hns_roce_v1_destroy_qp,
- .poll_cq = hns_roce_v1_poll_cq,
- .post_recv = hns_roce_v1_post_recv,
- .post_send = hns_roce_v1_post_send,
- .query_qp = hns_roce_v1_query_qp,
- .req_notify_cq = hns_roce_v1_req_notify_cq,
-};
-
-static const struct hns_roce_hw hns_roce_hw_v1 = {
- .reset = hns_roce_v1_reset,
- .hw_profile = hns_roce_v1_profile,
- .hw_init = hns_roce_v1_init,
- .hw_exit = hns_roce_v1_exit,
- .post_mbox = hns_roce_v1_post_mbox,
- .poll_mbox_done = hns_roce_v1_chk_mbox,
- .set_gid = hns_roce_v1_set_gid,
- .set_mac = hns_roce_v1_set_mac,
- .set_mtu = hns_roce_v1_set_mtu,
- .write_mtpt = hns_roce_v1_write_mtpt,
- .write_cqc = hns_roce_v1_write_cqc,
- .set_hem = hns_roce_v1_set_hem,
- .clear_hem = hns_roce_v1_clear_hem,
- .modify_qp = hns_roce_v1_modify_qp,
- .dereg_mr = hns_roce_v1_dereg_mr,
- .destroy_cq = hns_roce_v1_destroy_cq,
- .init_eq = hns_roce_v1_init_eq_table,
- .cleanup_eq = hns_roce_v1_cleanup_eq_table,
- .hns_roce_dev_ops = &hns_roce_v1_dev_ops,
-};
-
-static const struct of_device_id hns_roce_of_match[] = {
- { .compatible = "hisilicon,hns-roce-v1", .data = &hns_roce_hw_v1, },
- {},
-};
-MODULE_DEVICE_TABLE(of, hns_roce_of_match);
-
-static const struct acpi_device_id hns_roce_acpi_match[] = {
- { "HISI00D1", (kernel_ulong_t)&hns_roce_hw_v1 },
- {},
-};
-MODULE_DEVICE_TABLE(acpi, hns_roce_acpi_match);
-
-static struct
-platform_device *hns_roce_find_pdev(struct fwnode_handle *fwnode)
-{
- struct device *dev;
-
- /* get the 'device' corresponding to the matching 'fwnode' */
- dev = bus_find_device_by_fwnode(&platform_bus_type, fwnode);
- /* get the platform device */
- return dev ? to_platform_device(dev) : NULL;
-}
-
-static int hns_roce_get_cfg(struct hns_roce_dev *hr_dev)
-{
- struct device *dev = &hr_dev->pdev->dev;
- struct platform_device *pdev = NULL;
- struct net_device *netdev = NULL;
- struct device_node *net_node;
- int port_cnt = 0;
- u8 phy_port;
- int ret;
- int i;
-
- /* check if we are compatible with the underlying SoC */
- if (dev_of_node(dev)) {
- const struct of_device_id *of_id;
-
- of_id = of_match_node(hns_roce_of_match, dev->of_node);
- if (!of_id) {
- dev_err(dev, "device is not compatible!\n");
- return -ENXIO;
- }
- hr_dev->hw = (const struct hns_roce_hw *)of_id->data;
- if (!hr_dev->hw) {
- dev_err(dev, "couldn't get H/W specific DT data!\n");
- return -ENXIO;
- }
- } else if (is_acpi_device_node(dev->fwnode)) {
- const struct acpi_device_id *acpi_id;
-
- acpi_id = acpi_match_device(hns_roce_acpi_match, dev);
- if (!acpi_id) {
- dev_err(dev, "device is not compatible!\n");
- return -ENXIO;
- }
- hr_dev->hw = (const struct hns_roce_hw *) acpi_id->driver_data;
- if (!hr_dev->hw) {
- dev_err(dev, "couldn't get H/W specific ACPI data!\n");
- return -ENXIO;
- }
- } else {
- dev_err(dev, "can't read compatibility data from DT or ACPI\n");
- return -ENXIO;
- }
-
- /* get the mapped register base address */
- hr_dev->reg_base = devm_platform_ioremap_resource(hr_dev->pdev, 0);
- if (IS_ERR(hr_dev->reg_base))
- return PTR_ERR(hr_dev->reg_base);
-
- /* read the node_guid of IB device from the DT or ACPI */
- ret = device_property_read_u8_array(dev, "node-guid",
- (u8 *)&hr_dev->ib_dev.node_guid,
- GUID_LEN);
- if (ret) {
- dev_err(dev, "couldn't get node_guid from DT or ACPI!\n");
- return ret;
- }
-
- /* get the RoCE associated ethernet ports or netdevices */
- for (i = 0; i < HNS_ROCE_MAX_PORTS; i++) {
- if (dev_of_node(dev)) {
- net_node = of_parse_phandle(dev->of_node, "eth-handle",
- i);
- if (!net_node)
- continue;
- pdev = of_find_device_by_node(net_node);
- } else if (is_acpi_device_node(dev->fwnode)) {
- struct fwnode_reference_args args;
-
- ret = acpi_node_get_property_reference(dev->fwnode,
- "eth-handle",
- i, &args);
- if (ret)
- continue;
- pdev = hns_roce_find_pdev(args.fwnode);
- } else {
- dev_err(dev, "cannot read data from DT or ACPI\n");
- return -ENXIO;
- }
-
- if (pdev) {
- netdev = platform_get_drvdata(pdev);
- phy_port = (u8)i;
- if (netdev) {
- hr_dev->iboe.netdevs[port_cnt] = netdev;
- hr_dev->iboe.phy_port[port_cnt] = phy_port;
- } else {
- dev_err(dev, "no netdev found with pdev %s\n",
- pdev->name);
- return -ENODEV;
- }
- port_cnt++;
- }
- }
-
- if (port_cnt == 0) {
- dev_err(dev, "unable to get eth-handle for available ports!\n");
- return -EINVAL;
- }
-
- hr_dev->caps.num_ports = port_cnt;
-
- /* cmd issue mode: 0 is poll, 1 is event */
- hr_dev->cmd_mod = 1;
- hr_dev->loop_idc = 0;
- hr_dev->sdb_offset = ROCEE_DB_SQ_L_0_REG;
- hr_dev->odb_offset = ROCEE_DB_OTHERS_L_0_REG;
-
- /* read the interrupt names from the DT or ACPI */
- ret = device_property_read_string_array(dev, "interrupt-names",
- hr_dev->irq_names,
- HNS_ROCE_V1_MAX_IRQ_NUM);
- if (ret < 0) {
- dev_err(dev, "couldn't get interrupt names from DT or ACPI!\n");
- return ret;
- }
-
- /* fetch the interrupt numbers */
- for (i = 0; i < HNS_ROCE_V1_MAX_IRQ_NUM; i++) {
- hr_dev->irq[i] = platform_get_irq(hr_dev->pdev, i);
- if (hr_dev->irq[i] <= 0)
- return -EINVAL;
- }
-
- return 0;
-}
-
-/**
- * hns_roce_probe - RoCE driver entrance
- * @pdev: pointer to platform device
- * Return : int
- *
- */
-static int hns_roce_probe(struct platform_device *pdev)
-{
- int ret;
- struct hns_roce_dev *hr_dev;
- struct device *dev = &pdev->dev;
-
- hr_dev = ib_alloc_device(hns_roce_dev, ib_dev);
- if (!hr_dev)
- return -ENOMEM;
-
- hr_dev->priv = kzalloc(sizeof(struct hns_roce_v1_priv), GFP_KERNEL);
- if (!hr_dev->priv) {
- ret = -ENOMEM;
- goto error_failed_kzalloc;
- }
-
- hr_dev->pdev = pdev;
- hr_dev->dev = dev;
- platform_set_drvdata(pdev, hr_dev);
-
- if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64ULL)) &&
- dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32ULL))) {
- dev_err(dev, "Not usable DMA addressing mode\n");
- ret = -EIO;
- goto error_failed_get_cfg;
- }
-
- ret = hns_roce_get_cfg(hr_dev);
- if (ret) {
- dev_err(dev, "Get Configuration failed!\n");
- goto error_failed_get_cfg;
- }
-
- ret = hns_roce_init(hr_dev);
- if (ret) {
- dev_err(dev, "RoCE engine init failed!\n");
- goto error_failed_get_cfg;
- }
-
- return 0;
-
-error_failed_get_cfg:
- kfree(hr_dev->priv);
-
-error_failed_kzalloc:
- ib_dealloc_device(&hr_dev->ib_dev);
-
- return ret;
-}
-
-/**
- * hns_roce_remove - remove RoCE device
- * @pdev: pointer to platform device
- */
-static int hns_roce_remove(struct platform_device *pdev)
-{
- struct hns_roce_dev *hr_dev = platform_get_drvdata(pdev);
-
- hns_roce_exit(hr_dev);
- kfree(hr_dev->priv);
- ib_dealloc_device(&hr_dev->ib_dev);
-
- return 0;
-}
-
-static struct platform_driver hns_roce_driver = {
- .probe = hns_roce_probe,
- .remove = hns_roce_remove,
- .driver = {
- .name = DRV_NAME,
- .of_match_table = hns_roce_of_match,
- .acpi_match_table = ACPI_PTR(hns_roce_acpi_match),
- },
-};
-
-module_platform_driver(hns_roce_driver);
-
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_AUTHOR("Wei Hu <xavier.huwei@huawei.com>");
-MODULE_AUTHOR("Nenglong Zhao <zhaonenglong@hisilicon.com>");
-MODULE_AUTHOR("Lijun Ou <oulijun@huawei.com>");
-MODULE_DESCRIPTION("Hisilicon Hip06 Family RoCE Driver");
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.h b/drivers/infiniband/hw/hns/hns_roce_hw_v1.h
deleted file mode 100644
index 60fdcbae6729..000000000000
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.h
+++ /dev/null
@@ -1,1147 +0,0 @@
-/*
- * Copyright (c) 2016 Hisilicon Limited.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef _HNS_ROCE_HW_V1_H
-#define _HNS_ROCE_HW_V1_H
-
-#define CQ_STATE_VALID 2
-
-#define HNS_ROCE_V1_MAX_PD_NUM 0x8000
-#define HNS_ROCE_V1_MAX_CQ_NUM 0x10000
-#define HNS_ROCE_V1_MAX_CQE_NUM 0x8000
-
-#define HNS_ROCE_V1_MAX_QP_NUM 0x40000
-#define HNS_ROCE_V1_MAX_WQE_NUM 0x4000
-
-#define HNS_ROCE_V1_MAX_MTPT_NUM 0x80000
-
-#define HNS_ROCE_V1_MAX_MTT_SEGS 0x100000
-
-#define HNS_ROCE_V1_MAX_QP_INIT_RDMA 128
-#define HNS_ROCE_V1_MAX_QP_DEST_RDMA 128
-
-#define HNS_ROCE_V1_MAX_SQ_DESC_SZ 64
-#define HNS_ROCE_V1_MAX_RQ_DESC_SZ 64
-#define HNS_ROCE_V1_SG_NUM 2
-#define HNS_ROCE_V1_INLINE_SIZE 32
-
-#define HNS_ROCE_V1_UAR_NUM 256
-#define HNS_ROCE_V1_PHY_UAR_NUM 8
-
-#define HNS_ROCE_V1_GID_NUM 16
-#define HNS_ROCE_V1_RESV_QP 8
-
-#define HNS_ROCE_V1_MAX_IRQ_NUM 34
-#define HNS_ROCE_V1_COMP_VEC_NUM 32
-#define HNS_ROCE_V1_AEQE_VEC_NUM 1
-#define HNS_ROCE_V1_ABNORMAL_VEC_NUM 1
-
-#define HNS_ROCE_V1_COMP_EQE_NUM 0x8000
-#define HNS_ROCE_V1_ASYNC_EQE_NUM 0x400
-
-#define HNS_ROCE_V1_QPC_SIZE 256
-#define HNS_ROCE_V1_IRRL_ENTRY_SIZE 8
-#define HNS_ROCE_V1_CQC_ENTRY_SIZE 64
-#define HNS_ROCE_V1_MTPT_ENTRY_SIZE 64
-#define HNS_ROCE_V1_MTT_ENTRY_SIZE 64
-
-#define HNS_ROCE_V1_CQE_SIZE 32
-#define HNS_ROCE_V1_PAGE_SIZE_SUPPORT 0xFFFFF000
-
-#define HNS_ROCE_V1_TABLE_CHUNK_SIZE (1 << 17)
-
-#define HNS_ROCE_V1_EXT_RAQ_WF 8
-#define HNS_ROCE_V1_RAQ_ENTRY 64
-#define HNS_ROCE_V1_RAQ_DEPTH 32768
-#define HNS_ROCE_V1_RAQ_SIZE (HNS_ROCE_V1_RAQ_ENTRY * HNS_ROCE_V1_RAQ_DEPTH)
-
-#define HNS_ROCE_V1_SDB_DEPTH 0x400
-#define HNS_ROCE_V1_ODB_DEPTH 0x400
-
-#define HNS_ROCE_V1_DB_RSVD 0x80
-
-#define HNS_ROCE_V1_SDB_ALEPT HNS_ROCE_V1_DB_RSVD
-#define HNS_ROCE_V1_SDB_ALFUL (HNS_ROCE_V1_SDB_DEPTH - HNS_ROCE_V1_DB_RSVD)
-#define HNS_ROCE_V1_ODB_ALEPT HNS_ROCE_V1_DB_RSVD
-#define HNS_ROCE_V1_ODB_ALFUL (HNS_ROCE_V1_ODB_DEPTH - HNS_ROCE_V1_DB_RSVD)
-
-#define HNS_ROCE_V1_EXT_SDB_DEPTH 0x4000
-#define HNS_ROCE_V1_EXT_ODB_DEPTH 0x4000
-#define HNS_ROCE_V1_EXT_SDB_ENTRY 16
-#define HNS_ROCE_V1_EXT_ODB_ENTRY 16
-#define HNS_ROCE_V1_EXT_SDB_SIZE \
- (HNS_ROCE_V1_EXT_SDB_DEPTH * HNS_ROCE_V1_EXT_SDB_ENTRY)
-#define HNS_ROCE_V1_EXT_ODB_SIZE \
- (HNS_ROCE_V1_EXT_ODB_DEPTH * HNS_ROCE_V1_EXT_ODB_ENTRY)
-
-#define HNS_ROCE_V1_EXT_SDB_ALEPT HNS_ROCE_V1_DB_RSVD
-#define HNS_ROCE_V1_EXT_SDB_ALFUL \
- (HNS_ROCE_V1_EXT_SDB_DEPTH - HNS_ROCE_V1_DB_RSVD)
-#define HNS_ROCE_V1_EXT_ODB_ALEPT HNS_ROCE_V1_DB_RSVD
-#define HNS_ROCE_V1_EXT_ODB_ALFUL \
- (HNS_ROCE_V1_EXT_ODB_DEPTH - HNS_ROCE_V1_DB_RSVD)
-
-#define HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS 50000
-#define HNS_ROCE_V1_RECREATE_LP_QP_TIMEOUT_MSECS 10000
-#define HNS_ROCE_V1_FREE_MR_WAIT_VALUE 5
-#define HNS_ROCE_V1_RECREATE_LP_QP_WAIT_VALUE 20
-
-#define HNS_ROCE_BT_RSV_BUF_SIZE (1 << 17)
-
-#define HNS_ROCE_V1_TPTR_ENTRY_SIZE 2
-#define HNS_ROCE_V1_TPTR_BUF_SIZE \
- (HNS_ROCE_V1_TPTR_ENTRY_SIZE * HNS_ROCE_V1_MAX_CQ_NUM)
-
-#define HNS_ROCE_ODB_POLL_MODE 0
-
-#define HNS_ROCE_SDB_NORMAL_MODE 0
-#define HNS_ROCE_SDB_EXTEND_MODE 1
-
-#define HNS_ROCE_ODB_EXTEND_MODE 1
-
-#define KEY_VALID 0x02
-
-#define HNS_ROCE_CQE_QPN_MASK 0x3ffff
-#define HNS_ROCE_CQE_STATUS_MASK 0x1f
-#define HNS_ROCE_CQE_OPCODE_MASK 0xf
-
-#define HNS_ROCE_CQE_SUCCESS 0x00
-#define HNS_ROCE_CQE_SYNDROME_LOCAL_LENGTH_ERR 0x01
-#define HNS_ROCE_CQE_SYNDROME_LOCAL_QP_OP_ERR 0x02
-#define HNS_ROCE_CQE_SYNDROME_LOCAL_PROT_ERR 0x03
-#define HNS_ROCE_CQE_SYNDROME_WR_FLUSH_ERR 0x04
-#define HNS_ROCE_CQE_SYNDROME_MEM_MANAGE_OPERATE_ERR 0x05
-#define HNS_ROCE_CQE_SYNDROME_BAD_RESP_ERR 0x06
-#define HNS_ROCE_CQE_SYNDROME_LOCAL_ACCESS_ERR 0x07
-#define HNS_ROCE_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR 0x08
-#define HNS_ROCE_CQE_SYNDROME_REMOTE_ACCESS_ERR 0x09
-#define HNS_ROCE_CQE_SYNDROME_REMOTE_OP_ERR 0x0a
-#define HNS_ROCE_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR 0x0b
-#define HNS_ROCE_CQE_SYNDROME_RNR_RETRY_EXC_ERR 0x0c
-
-#define QP1C_CFGN_OFFSET 0x28
-#define PHY_PORT_OFFSET 0x8
-#define MTPT_IDX_SHIFT 16
-#define ALL_PORT_VAL_OPEN 0x3f
-#define POL_TIME_INTERVAL_VAL 0x80
-#define SLEEP_TIME_INTERVAL 20
-#define SQ_PSN_SHIFT 8
-#define QKEY_VAL 0x80010000
-#define SDB_INV_CNT_OFFSET 8
-
-#define HNS_ROCE_CEQ_DEFAULT_INTERVAL 0x10
-#define HNS_ROCE_CEQ_DEFAULT_BURST_NUM 0x10
-
-#define HNS_ROCE_INT_MASK_DISABLE 0
-#define HNS_ROCE_INT_MASK_ENABLE 1
-
-#define CEQ_REG_OFFSET 0x18
-
-#define HNS_ROCE_CEQE_CEQE_COMP_OWNER_S 0
-
-#define HNS_ROCE_V1_CONS_IDX_M GENMASK(15, 0)
-
-#define HNS_ROCE_CEQE_CEQE_COMP_CQN_S 16
-#define HNS_ROCE_CEQE_CEQE_COMP_CQN_M GENMASK(31, 16)
-
-#define HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S 16
-#define HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M GENMASK(23, 16)
-
-#define HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S 24
-#define HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M GENMASK(30, 24)
-
-#define HNS_ROCE_AEQE_U32_4_OWNER_S 31
-
-#define HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S 0
-#define HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M GENMASK(23, 0)
-
-#define HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_S 25
-#define HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_M GENMASK(27, 25)
-
-#define HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S 0
-#define HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M GENMASK(15, 0)
-
-#define HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_S 0
-#define HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_M GENMASK(4, 0)
-
-/* Local Work Queue Catastrophic Error,SUBTYPE 0x5 */
-enum {
- HNS_ROCE_LWQCE_QPC_ERROR = 1,
- HNS_ROCE_LWQCE_MTU_ERROR,
- HNS_ROCE_LWQCE_WQE_BA_ADDR_ERROR,
- HNS_ROCE_LWQCE_WQE_ADDR_ERROR,
- HNS_ROCE_LWQCE_SQ_WQE_SHIFT_ERROR,
- HNS_ROCE_LWQCE_SL_ERROR,
- HNS_ROCE_LWQCE_PORT_ERROR,
-};
-
-/* Local Access Violation Work Queue Error,SUBTYPE 0x7 */
-enum {
- HNS_ROCE_LAVWQE_R_KEY_VIOLATION = 1,
- HNS_ROCE_LAVWQE_LENGTH_ERROR,
- HNS_ROCE_LAVWQE_VA_ERROR,
- HNS_ROCE_LAVWQE_PD_ERROR,
- HNS_ROCE_LAVWQE_RW_ACC_ERROR,
- HNS_ROCE_LAVWQE_KEY_STATE_ERROR,
- HNS_ROCE_LAVWQE_MR_OPERATION_ERROR,
-};
-
-/* DOORBELL overflow subtype */
-enum {
- HNS_ROCE_DB_SUBTYPE_SDB_OVF = 1,
- HNS_ROCE_DB_SUBTYPE_SDB_ALM_OVF,
- HNS_ROCE_DB_SUBTYPE_ODB_OVF,
- HNS_ROCE_DB_SUBTYPE_ODB_ALM_OVF,
- HNS_ROCE_DB_SUBTYPE_SDB_ALM_EMP,
- HNS_ROCE_DB_SUBTYPE_ODB_ALM_EMP,
-};
-
-enum {
- /* RQ&SRQ related operations */
- HNS_ROCE_OPCODE_SEND_DATA_RECEIVE = 0x06,
- HNS_ROCE_OPCODE_RDMA_WITH_IMM_RECEIVE,
-};
-
-enum {
- HNS_ROCE_PORT_DOWN = 0,
- HNS_ROCE_PORT_UP,
-};
-
-struct hns_roce_cq_context {
- __le32 cqc_byte_4;
- __le32 cq_bt_l;
- __le32 cqc_byte_12;
- __le32 cur_cqe_ba0_l;
- __le32 cqc_byte_20;
- __le32 cqe_tptr_addr_l;
- __le32 cur_cqe_ba1_l;
- __le32 cqc_byte_32;
-};
-
-#define CQ_CONTEXT_CQC_BYTE_4_CQC_STATE_S 0
-#define CQ_CONTEXT_CQC_BYTE_4_CQC_STATE_M \
- (((1UL << 2) - 1) << CQ_CONTEXT_CQC_BYTE_4_CQC_STATE_S)
-
-#define CQ_CONTEXT_CQC_BYTE_4_CQN_S 16
-#define CQ_CONTEXT_CQC_BYTE_4_CQN_M \
- (((1UL << 16) - 1) << CQ_CONTEXT_CQC_BYTE_4_CQN_S)
-
-#define CQ_CONTEXT_CQC_BYTE_12_CQ_BT_H_S 0
-#define CQ_CONTEXT_CQC_BYTE_12_CQ_BT_H_M \
- (((1UL << 17) - 1) << CQ_CONTEXT_CQC_BYTE_12_CQ_BT_H_S)
-
-#define CQ_CONTEXT_CQC_BYTE_12_CQ_CQE_SHIFT_S 20
-#define CQ_CONTEXT_CQC_BYTE_12_CQ_CQE_SHIFT_M \
- (((1UL << 4) - 1) << CQ_CONTEXT_CQC_BYTE_12_CQ_CQE_SHIFT_S)
-
-#define CQ_CONTEXT_CQC_BYTE_12_CEQN_S 24
-#define CQ_CONTEXT_CQC_BYTE_12_CEQN_M \
- (((1UL << 5) - 1) << CQ_CONTEXT_CQC_BYTE_12_CEQN_S)
-
-#define CQ_CONTEXT_CQC_BYTE_20_CUR_CQE_BA0_H_S 0
-#define CQ_CONTEXT_CQC_BYTE_20_CUR_CQE_BA0_H_M \
- (((1UL << 5) - 1) << CQ_CONTEXT_CQC_BYTE_20_CUR_CQE_BA0_H_S)
-
-#define CQ_CONTEXT_CQC_BYTE_20_CQ_CUR_INDEX_S 16
-#define CQ_CONTEXT_CQC_BYTE_20_CQ_CUR_INDEX_M \
- (((1UL << 16) - 1) << CQ_CONTEXT_CQC_BYTE_20_CQ_CUR_INDEX_S)
-
-#define CQ_CONTEXT_CQC_BYTE_20_CQE_TPTR_ADDR_H_S 8
-#define CQ_CONTEXT_CQC_BYTE_20_CQE_TPTR_ADDR_H_M \
- (((1UL << 5) - 1) << CQ_CONTEXT_CQC_BYTE_20_CQE_TPTR_ADDR_H_S)
-
-#define CQ_CONTEXT_CQC_BYTE_32_CUR_CQE_BA1_H_S 0
-#define CQ_CONTEXT_CQC_BYTE_32_CUR_CQE_BA1_H_M \
- (((1UL << 5) - 1) << CQ_CONTEXT_CQC_BYTE_32_CUR_CQE_BA1_H_S)
-
-#define CQ_CONTEXT_CQC_BYTE_32_SE_FLAG_S 9
-
-#define CQ_CONTEXT_CQC_BYTE_32_CE_FLAG_S 8
-#define CQ_CONTEXT_CQC_BYTE_32_NOTIFICATION_FLAG_S 14
-#define CQ_CQNTEXT_CQC_BYTE_32_TYPE_OF_COMPLETION_NOTIFICATION_S 15
-
-#define CQ_CONTEXT_CQC_BYTE_32_CQ_CONS_IDX_S 16
-#define CQ_CONTEXT_CQC_BYTE_32_CQ_CONS_IDX_M \
- (((1UL << 16) - 1) << CQ_CONTEXT_CQC_BYTE_32_CQ_CONS_IDX_S)
-
-struct hns_roce_cqe {
- __le32 cqe_byte_4;
- union {
- __le32 r_key;
- __le32 immediate_data;
- };
- __le32 byte_cnt;
- __le32 cqe_byte_16;
- __le32 cqe_byte_20;
- __le32 s_mac_l;
- __le32 cqe_byte_28;
- __le32 reserved;
-};
-
-#define CQE_BYTE_4_OWNER_S 7
-#define CQE_BYTE_4_SQ_RQ_FLAG_S 14
-
-#define CQE_BYTE_4_STATUS_OF_THE_OPERATION_S 8
-#define CQE_BYTE_4_STATUS_OF_THE_OPERATION_M \
- (((1UL << 5) - 1) << CQE_BYTE_4_STATUS_OF_THE_OPERATION_S)
-
-#define CQE_BYTE_4_WQE_INDEX_S 16
-#define CQE_BYTE_4_WQE_INDEX_M (((1UL << 14) - 1) << CQE_BYTE_4_WQE_INDEX_S)
-
-#define CQE_BYTE_4_OPERATION_TYPE_S 0
-#define CQE_BYTE_4_OPERATION_TYPE_M \
- (((1UL << 4) - 1) << CQE_BYTE_4_OPERATION_TYPE_S)
-
-#define CQE_BYTE_4_IMM_INDICATOR_S 15
-
-#define CQE_BYTE_16_LOCAL_QPN_S 0
-#define CQE_BYTE_16_LOCAL_QPN_M (((1UL << 24) - 1) << CQE_BYTE_16_LOCAL_QPN_S)
-
-#define CQE_BYTE_20_PORT_NUM_S 26
-#define CQE_BYTE_20_PORT_NUM_M (((1UL << 3) - 1) << CQE_BYTE_20_PORT_NUM_S)
-
-#define CQE_BYTE_20_SL_S 24
-#define CQE_BYTE_20_SL_M (((1UL << 2) - 1) << CQE_BYTE_20_SL_S)
-
-#define CQE_BYTE_20_REMOTE_QPN_S 0
-#define CQE_BYTE_20_REMOTE_QPN_M \
- (((1UL << 24) - 1) << CQE_BYTE_20_REMOTE_QPN_S)
-
-#define CQE_BYTE_20_GRH_PRESENT_S 29
-
-#define CQE_BYTE_28_P_KEY_IDX_S 16
-#define CQE_BYTE_28_P_KEY_IDX_M (((1UL << 16) - 1) << CQE_BYTE_28_P_KEY_IDX_S)
-
-#define CQ_DB_REQ_NOT_SOL 0
-#define CQ_DB_REQ_NOT (1 << 16)
-
-struct hns_roce_v1_mpt_entry {
- __le32 mpt_byte_4;
- __le32 pbl_addr_l;
- __le32 mpt_byte_12;
- __le32 virt_addr_l;
- __le32 virt_addr_h;
- __le32 length;
- __le32 mpt_byte_28;
- __le32 pa0_l;
- __le32 mpt_byte_36;
- __le32 mpt_byte_40;
- __le32 mpt_byte_44;
- __le32 mpt_byte_48;
- __le32 pa4_l;
- __le32 mpt_byte_56;
- __le32 mpt_byte_60;
- __le32 mpt_byte_64;
-};
-
-#define MPT_BYTE_4_KEY_STATE_S 0
-#define MPT_BYTE_4_KEY_STATE_M (((1UL << 2) - 1) << MPT_BYTE_4_KEY_STATE_S)
-
-#define MPT_BYTE_4_KEY_S 8
-#define MPT_BYTE_4_KEY_M (((1UL << 8) - 1) << MPT_BYTE_4_KEY_S)
-
-#define MPT_BYTE_4_PAGE_SIZE_S 16
-#define MPT_BYTE_4_PAGE_SIZE_M (((1UL << 2) - 1) << MPT_BYTE_4_PAGE_SIZE_S)
-
-#define MPT_BYTE_4_MW_TYPE_S 20
-
-#define MPT_BYTE_4_MW_BIND_ENABLE_S 21
-
-#define MPT_BYTE_4_OWN_S 22
-
-#define MPT_BYTE_4_MEMORY_LOCATION_TYPE_S 24
-#define MPT_BYTE_4_MEMORY_LOCATION_TYPE_M \
- (((1UL << 2) - 1) << MPT_BYTE_4_MEMORY_LOCATION_TYPE_S)
-
-#define MPT_BYTE_4_REMOTE_ATOMIC_S 26
-#define MPT_BYTE_4_LOCAL_WRITE_S 27
-#define MPT_BYTE_4_REMOTE_WRITE_S 28
-#define MPT_BYTE_4_REMOTE_READ_S 29
-#define MPT_BYTE_4_REMOTE_INVAL_ENABLE_S 30
-#define MPT_BYTE_4_ADDRESS_TYPE_S 31
-
-#define MPT_BYTE_12_PBL_ADDR_H_S 0
-#define MPT_BYTE_12_PBL_ADDR_H_M \
- (((1UL << 17) - 1) << MPT_BYTE_12_PBL_ADDR_H_S)
-
-#define MPT_BYTE_12_MW_BIND_COUNTER_S 17
-#define MPT_BYTE_12_MW_BIND_COUNTER_M \
- (((1UL << 15) - 1) << MPT_BYTE_12_MW_BIND_COUNTER_S)
-
-#define MPT_BYTE_28_PD_S 0
-#define MPT_BYTE_28_PD_M (((1UL << 16) - 1) << MPT_BYTE_28_PD_S)
-
-#define MPT_BYTE_28_L_KEY_IDX_L_S 16
-#define MPT_BYTE_28_L_KEY_IDX_L_M \
- (((1UL << 16) - 1) << MPT_BYTE_28_L_KEY_IDX_L_S)
-
-#define MPT_BYTE_36_PA0_H_S 0
-#define MPT_BYTE_36_PA0_H_M (((1UL << 5) - 1) << MPT_BYTE_36_PA0_H_S)
-
-#define MPT_BYTE_36_PA1_L_S 8
-#define MPT_BYTE_36_PA1_L_M (((1UL << 24) - 1) << MPT_BYTE_36_PA1_L_S)
-
-#define MPT_BYTE_40_PA1_H_S 0
-#define MPT_BYTE_40_PA1_H_M (((1UL << 13) - 1) << MPT_BYTE_40_PA1_H_S)
-
-#define MPT_BYTE_40_PA2_L_S 16
-#define MPT_BYTE_40_PA2_L_M (((1UL << 16) - 1) << MPT_BYTE_40_PA2_L_S)
-
-#define MPT_BYTE_44_PA2_H_S 0
-#define MPT_BYTE_44_PA2_H_M (((1UL << 21) - 1) << MPT_BYTE_44_PA2_H_S)
-
-#define MPT_BYTE_44_PA3_L_S 24
-#define MPT_BYTE_44_PA3_L_M (((1UL << 8) - 1) << MPT_BYTE_44_PA3_L_S)
-
-#define MPT_BYTE_48_PA3_H_S 0
-#define MPT_BYTE_48_PA3_H_M (((1UL << 29) - 1) << MPT_BYTE_48_PA3_H_S)
-
-#define MPT_BYTE_56_PA4_H_S 0
-#define MPT_BYTE_56_PA4_H_M (((1UL << 5) - 1) << MPT_BYTE_56_PA4_H_S)
-
-#define MPT_BYTE_56_PA5_L_S 8
-#define MPT_BYTE_56_PA5_L_M (((1UL << 24) - 1) << MPT_BYTE_56_PA5_L_S)
-
-#define MPT_BYTE_60_PA5_H_S 0
-#define MPT_BYTE_60_PA5_H_M (((1UL << 13) - 1) << MPT_BYTE_60_PA5_H_S)
-
-#define MPT_BYTE_60_PA6_L_S 16
-#define MPT_BYTE_60_PA6_L_M (((1UL << 16) - 1) << MPT_BYTE_60_PA6_L_S)
-
-#define MPT_BYTE_64_PA6_H_S 0
-#define MPT_BYTE_64_PA6_H_M (((1UL << 21) - 1) << MPT_BYTE_64_PA6_H_S)
-
-#define MPT_BYTE_64_L_KEY_IDX_H_S 24
-#define MPT_BYTE_64_L_KEY_IDX_H_M \
- (((1UL << 8) - 1) << MPT_BYTE_64_L_KEY_IDX_H_S)
-
-struct hns_roce_wqe_ctrl_seg {
- __le32 sgl_pa_h;
- __le32 flag;
- union {
- __be32 imm_data;
- __le32 inv_key;
- };
- __le32 msg_length;
-};
-
-struct hns_roce_wqe_data_seg {
- __le64 addr;
- __le32 lkey;
- __le32 len;
-};
-
-struct hns_roce_wqe_raddr_seg {
- __le32 rkey;
- __le32 len; /* reserved */
- __le64 raddr;
-};
-
-struct hns_roce_rq_wqe_ctrl {
- __le32 rwqe_byte_4;
- __le32 rocee_sgl_ba_l;
- __le32 rwqe_byte_12;
- __le32 reserved[5];
-};
-
-#define RQ_WQE_CTRL_RWQE_BYTE_12_RWQE_SGE_NUM_S 16
-#define RQ_WQE_CTRL_RWQE_BYTE_12_RWQE_SGE_NUM_M \
- (((1UL << 6) - 1) << RQ_WQE_CTRL_RWQE_BYTE_12_RWQE_SGE_NUM_S)
-
-#define HNS_ROCE_QP_DESTROY_TIMEOUT_MSECS 10000
-
-#define GID_LEN 16
-
-struct hns_roce_ud_send_wqe {
- __le32 dmac_h;
- __le32 u32_8;
- __le32 immediate_data;
-
- __le32 u32_16;
- union {
- unsigned char dgid[GID_LEN];
- struct {
- __le32 u32_20;
- __le32 u32_24;
- __le32 u32_28;
- __le32 u32_32;
- };
- };
-
- __le32 u32_36;
- __le32 u32_40;
-
- __le32 va0_l;
- __le32 va0_h;
- __le32 l_key0;
-
- __le32 va1_l;
- __le32 va1_h;
- __le32 l_key1;
-};
-
-#define UD_SEND_WQE_U32_4_DMAC_0_S 0
-#define UD_SEND_WQE_U32_4_DMAC_0_M \
- (((1UL << 8) - 1) << UD_SEND_WQE_U32_4_DMAC_0_S)
-
-#define UD_SEND_WQE_U32_4_DMAC_1_S 8
-#define UD_SEND_WQE_U32_4_DMAC_1_M \
- (((1UL << 8) - 1) << UD_SEND_WQE_U32_4_DMAC_1_S)
-
-#define UD_SEND_WQE_U32_4_DMAC_2_S 16
-#define UD_SEND_WQE_U32_4_DMAC_2_M \
- (((1UL << 8) - 1) << UD_SEND_WQE_U32_4_DMAC_2_S)
-
-#define UD_SEND_WQE_U32_4_DMAC_3_S 24
-#define UD_SEND_WQE_U32_4_DMAC_3_M \
- (((1UL << 8) - 1) << UD_SEND_WQE_U32_4_DMAC_3_S)
-
-#define UD_SEND_WQE_U32_8_DMAC_4_S 0
-#define UD_SEND_WQE_U32_8_DMAC_4_M \
- (((1UL << 8) - 1) << UD_SEND_WQE_U32_8_DMAC_4_S)
-
-#define UD_SEND_WQE_U32_8_DMAC_5_S 8
-#define UD_SEND_WQE_U32_8_DMAC_5_M \
- (((1UL << 8) - 1) << UD_SEND_WQE_U32_8_DMAC_5_S)
-
-#define UD_SEND_WQE_U32_8_LOOPBACK_INDICATOR_S 22
-
-#define UD_SEND_WQE_U32_8_OPERATION_TYPE_S 16
-#define UD_SEND_WQE_U32_8_OPERATION_TYPE_M \
- (((1UL << 4) - 1) << UD_SEND_WQE_U32_8_OPERATION_TYPE_S)
-
-#define UD_SEND_WQE_U32_8_NUMBER_OF_DATA_SEG_S 24
-#define UD_SEND_WQE_U32_8_NUMBER_OF_DATA_SEG_M \
- (((1UL << 6) - 1) << UD_SEND_WQE_U32_8_NUMBER_OF_DATA_SEG_S)
-
-#define UD_SEND_WQE_U32_8_SEND_GL_ROUTING_HDR_FLAG_S 31
-
-#define UD_SEND_WQE_U32_16_DEST_QP_S 0
-#define UD_SEND_WQE_U32_16_DEST_QP_M \
- (((1UL << 24) - 1) << UD_SEND_WQE_U32_16_DEST_QP_S)
-
-#define UD_SEND_WQE_U32_16_MAX_STATIC_RATE_S 24
-#define UD_SEND_WQE_U32_16_MAX_STATIC_RATE_M \
- (((1UL << 8) - 1) << UD_SEND_WQE_U32_16_MAX_STATIC_RATE_S)
-
-#define UD_SEND_WQE_U32_36_FLOW_LABEL_S 0
-#define UD_SEND_WQE_U32_36_FLOW_LABEL_M \
- (((1UL << 20) - 1) << UD_SEND_WQE_U32_36_FLOW_LABEL_S)
-
-#define UD_SEND_WQE_U32_36_PRIORITY_S 20
-#define UD_SEND_WQE_U32_36_PRIORITY_M \
- (((1UL << 4) - 1) << UD_SEND_WQE_U32_36_PRIORITY_S)
-
-#define UD_SEND_WQE_U32_36_SGID_INDEX_S 24
-#define UD_SEND_WQE_U32_36_SGID_INDEX_M \
- (((1UL << 8) - 1) << UD_SEND_WQE_U32_36_SGID_INDEX_S)
-
-#define UD_SEND_WQE_U32_40_HOP_LIMIT_S 0
-#define UD_SEND_WQE_U32_40_HOP_LIMIT_M \
- (((1UL << 8) - 1) << UD_SEND_WQE_U32_40_HOP_LIMIT_S)
-
-#define UD_SEND_WQE_U32_40_TRAFFIC_CLASS_S 8
-#define UD_SEND_WQE_U32_40_TRAFFIC_CLASS_M \
- (((1UL << 8) - 1) << UD_SEND_WQE_U32_40_TRAFFIC_CLASS_S)
-
-struct hns_roce_sqp_context {
- __le32 qp1c_bytes_4;
- __le32 sq_rq_bt_l;
- __le32 qp1c_bytes_12;
- __le32 qp1c_bytes_16;
- __le32 qp1c_bytes_20;
- __le32 cur_rq_wqe_ba_l;
- __le32 qp1c_bytes_28;
- __le32 qp1c_bytes_32;
- __le32 cur_sq_wqe_ba_l;
- __le32 qp1c_bytes_40;
-};
-
-#define QP1C_BYTES_4_QP_STATE_S 0
-#define QP1C_BYTES_4_QP_STATE_M \
- (((1UL << 3) - 1) << QP1C_BYTES_4_QP_STATE_S)
-
-#define QP1C_BYTES_4_SQ_WQE_SHIFT_S 8
-#define QP1C_BYTES_4_SQ_WQE_SHIFT_M \
- (((1UL << 4) - 1) << QP1C_BYTES_4_SQ_WQE_SHIFT_S)
-
-#define QP1C_BYTES_4_RQ_WQE_SHIFT_S 12
-#define QP1C_BYTES_4_RQ_WQE_SHIFT_M \
- (((1UL << 4) - 1) << QP1C_BYTES_4_RQ_WQE_SHIFT_S)
-
-#define QP1C_BYTES_4_PD_S 16
-#define QP1C_BYTES_4_PD_M (((1UL << 16) - 1) << QP1C_BYTES_4_PD_S)
-
-#define QP1C_BYTES_12_SQ_RQ_BT_H_S 0
-#define QP1C_BYTES_12_SQ_RQ_BT_H_M \
- (((1UL << 17) - 1) << QP1C_BYTES_12_SQ_RQ_BT_H_S)
-
-#define QP1C_BYTES_16_RQ_HEAD_S 0
-#define QP1C_BYTES_16_RQ_HEAD_M (((1UL << 15) - 1) << QP1C_BYTES_16_RQ_HEAD_S)
-
-#define QP1C_BYTES_16_PORT_NUM_S 16
-#define QP1C_BYTES_16_PORT_NUM_M \
- (((1UL << 3) - 1) << QP1C_BYTES_16_PORT_NUM_S)
-
-#define QP1C_BYTES_16_SIGNALING_TYPE_S 27
-#define QP1C_BYTES_16_LOCAL_ENABLE_E2E_CREDIT_S 28
-#define QP1C_BYTES_16_RQ_BA_FLG_S 29
-#define QP1C_BYTES_16_SQ_BA_FLG_S 30
-#define QP1C_BYTES_16_QP1_ERR_S 31
-
-#define QP1C_BYTES_20_SQ_HEAD_S 0
-#define QP1C_BYTES_20_SQ_HEAD_M (((1UL << 15) - 1) << QP1C_BYTES_20_SQ_HEAD_S)
-
-#define QP1C_BYTES_20_PKEY_IDX_S 16
-#define QP1C_BYTES_20_PKEY_IDX_M \
- (((1UL << 16) - 1) << QP1C_BYTES_20_PKEY_IDX_S)
-
-#define QP1C_BYTES_28_CUR_RQ_WQE_BA_H_S 0
-#define QP1C_BYTES_28_CUR_RQ_WQE_BA_H_M \
- (((1UL << 5) - 1) << QP1C_BYTES_28_CUR_RQ_WQE_BA_H_S)
-
-#define QP1C_BYTES_28_RQ_CUR_IDX_S 16
-#define QP1C_BYTES_28_RQ_CUR_IDX_M \
- (((1UL << 15) - 1) << QP1C_BYTES_28_RQ_CUR_IDX_S)
-
-#define QP1C_BYTES_32_TX_CQ_NUM_S 0
-#define QP1C_BYTES_32_TX_CQ_NUM_M \
- (((1UL << 16) - 1) << QP1C_BYTES_32_TX_CQ_NUM_S)
-
-#define QP1C_BYTES_32_RX_CQ_NUM_S 16
-#define QP1C_BYTES_32_RX_CQ_NUM_M \
- (((1UL << 16) - 1) << QP1C_BYTES_32_RX_CQ_NUM_S)
-
-#define QP1C_BYTES_40_CUR_SQ_WQE_BA_H_S 0
-#define QP1C_BYTES_40_CUR_SQ_WQE_BA_H_M \
- (((1UL << 5) - 1) << QP1C_BYTES_40_CUR_SQ_WQE_BA_H_S)
-
-#define QP1C_BYTES_40_SQ_CUR_IDX_S 16
-#define QP1C_BYTES_40_SQ_CUR_IDX_M \
- (((1UL << 15) - 1) << QP1C_BYTES_40_SQ_CUR_IDX_S)
-
-#define HNS_ROCE_WQE_INLINE (1UL<<31)
-#define HNS_ROCE_WQE_SE (1UL<<30)
-
-#define HNS_ROCE_WQE_SGE_NUM_BIT 24
-#define HNS_ROCE_WQE_IMM (1UL<<23)
-#define HNS_ROCE_WQE_FENCE (1UL<<21)
-#define HNS_ROCE_WQE_CQ_NOTIFY (1UL<<20)
-
-#define HNS_ROCE_WQE_OPCODE_SEND (0<<16)
-#define HNS_ROCE_WQE_OPCODE_RDMA_READ (1<<16)
-#define HNS_ROCE_WQE_OPCODE_RDMA_WRITE (2<<16)
-#define HNS_ROCE_WQE_OPCODE_LOCAL_INV (4<<16)
-#define HNS_ROCE_WQE_OPCODE_UD_SEND (7<<16)
-#define HNS_ROCE_WQE_OPCODE_MASK (15<<16)
-
-struct hns_roce_qp_context {
- __le32 qpc_bytes_4;
- __le32 qpc_bytes_8;
- __le32 qpc_bytes_12;
- __le32 qpc_bytes_16;
- __le32 sq_rq_bt_l;
- __le32 qpc_bytes_24;
- __le32 irrl_ba_l;
- __le32 qpc_bytes_32;
- __le32 qpc_bytes_36;
- __le32 dmac_l;
- __le32 qpc_bytes_44;
- __le32 qpc_bytes_48;
- u8 dgid[16];
- __le32 qpc_bytes_68;
- __le32 cur_rq_wqe_ba_l;
- __le32 qpc_bytes_76;
- __le32 rx_rnr_time;
- __le32 qpc_bytes_84;
- __le32 qpc_bytes_88;
- union {
- __le32 rx_sge_len;
- __le32 dma_length;
- };
- union {
- __le32 rx_sge_num;
- __le32 rx_send_pktn;
- __le32 r_key;
- };
- __le32 va_l;
- __le32 va_h;
- __le32 qpc_bytes_108;
- __le32 qpc_bytes_112;
- __le32 rx_cur_sq_wqe_ba_l;
- __le32 qpc_bytes_120;
- __le32 qpc_bytes_124;
- __le32 qpc_bytes_128;
- __le32 qpc_bytes_132;
- __le32 qpc_bytes_136;
- __le32 qpc_bytes_140;
- __le32 qpc_bytes_144;
- __le32 qpc_bytes_148;
- union {
- __le32 rnr_retry;
- __le32 ack_time;
- };
- __le32 qpc_bytes_156;
- __le32 pkt_use_len;
- __le32 qpc_bytes_164;
- __le32 qpc_bytes_168;
- union {
- __le32 sge_use_len;
- __le32 pa_use_len;
- };
- __le32 qpc_bytes_176;
- __le32 qpc_bytes_180;
- __le32 tx_cur_sq_wqe_ba_l;
- __le32 qpc_bytes_188;
- __le32 rvd21;
-};
-
-#define QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_S 0
-#define QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_M \
- (((1UL << 3) - 1) << QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_S)
-
-#define QP_CONTEXT_QPC_BYTE_4_ENABLE_FPMR_S 3
-#define QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S 4
-#define QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S 5
-#define QP_CONTEXT_QPC_BYTE_4_ATOMIC_OPERATION_ENABLE_S 6
-#define QP_CONTEXT_QPC_BYTE_4_RDMAR_USE_S 7
-
-#define QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_S 8
-#define QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_M \
- (((1UL << 4) - 1) << QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_S)
-
-#define QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_S 12
-#define QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_M \
- (((1UL << 4) - 1) << QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_S)
-
-#define QP_CONTEXT_QPC_BYTES_4_PD_S 16
-#define QP_CONTEXT_QPC_BYTES_4_PD_M \
- (((1UL << 16) - 1) << QP_CONTEXT_QPC_BYTES_4_PD_S)
-
-#define QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_S 0
-#define QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_M \
- (((1UL << 16) - 1) << QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_S)
-
-#define QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_S 16
-#define QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_M \
- (((1UL << 16) - 1) << QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_S)
-
-#define QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_S 0
-#define QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_M \
- (((1UL << 16) - 1) << QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_S)
-
-#define QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S 16
-#define QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M \
- (((1UL << 16) - 1) << QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S)
-
-#define QP_CONTEXT_QPC_BYTES_16_QP_NUM_S 0
-#define QP_CONTEXT_QPC_BYTES_16_QP_NUM_M \
- (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_16_QP_NUM_S)
-
-#define QP_CONTEXT_QPC_BYTES_24_SQ_RQ_BT_H_S 0
-#define QP_CONTEXT_QPC_BYTES_24_SQ_RQ_BT_H_M \
- (((1UL << 17) - 1) << QP_CONTEXT_QPC_BYTES_24_SQ_RQ_BT_H_S)
-
-#define QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_S 18
-#define QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_M \
- (((1UL << 5) - 1) << QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_S)
-
-#define QP_CONTEXT_QPC_BYTE_24_REMOTE_ENABLE_E2E_CREDITS_S 23
-
-#define QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_S 0
-#define QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_M \
- (((1UL << 17) - 1) << QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_S)
-
-#define QP_CONTEXT_QPC_BYTES_32_MIG_STATE_S 18
-#define QP_CONTEXT_QPC_BYTES_32_MIG_STATE_M \
- (((1UL << 2) - 1) << QP_CONTEXT_QPC_BYTES_32_MIG_STATE_S)
-
-#define QP_CONTEXT_QPC_BYTE_32_LOCAL_ENABLE_E2E_CREDITS_S 20
-#define QP_CONTEXT_QPC_BYTE_32_SIGNALING_TYPE_S 21
-#define QP_CONTEXT_QPC_BYTE_32_LOOPBACK_INDICATOR_S 22
-#define QP_CONTEXT_QPC_BYTE_32_GLOBAL_HEADER_S 23
-
-#define QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_S 24
-#define QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_M \
- (((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_S)
-
-#define QP_CONTEXT_QPC_BYTES_36_DEST_QP_S 0
-#define QP_CONTEXT_QPC_BYTES_36_DEST_QP_M \
- (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_36_DEST_QP_S)
-
-#define QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_S 24
-#define QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_M \
- (((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_S)
-
-#define QP_CONTEXT_QPC_BYTES_44_DMAC_H_S 0
-#define QP_CONTEXT_QPC_BYTES_44_DMAC_H_M \
- (((1UL << 16) - 1) << QP_CONTEXT_QPC_BYTES_44_DMAC_H_S)
-
-#define QP_CONTEXT_QPC_BYTES_44_MAXIMUM_STATIC_RATE_S 16
-#define QP_CONTEXT_QPC_BYTES_44_MAXIMUM_STATIC_RATE_M \
- (((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_44_MAXIMUM_STATIC_RATE_S)
-
-#define QP_CONTEXT_QPC_BYTES_44_HOPLMT_S 24
-#define QP_CONTEXT_QPC_BYTES_44_HOPLMT_M \
- (((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_44_HOPLMT_S)
-
-#define QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_S 0
-#define QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_M \
- (((1UL << 20) - 1) << QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_S)
-
-#define QP_CONTEXT_QPC_BYTES_48_TCLASS_S 20
-#define QP_CONTEXT_QPC_BYTES_48_TCLASS_M \
- (((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_48_TCLASS_S)
-
-#define QP_CONTEXT_QPC_BYTES_48_MTU_S 28
-#define QP_CONTEXT_QPC_BYTES_48_MTU_M \
- (((1UL << 4) - 1) << QP_CONTEXT_QPC_BYTES_48_MTU_S)
-
-#define QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_S 0
-#define QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_M \
- (((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_S)
-
-#define QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_S 16
-#define QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_M \
- (((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_S)
-
-#define QP_CONTEXT_QPC_BYTES_76_CUR_RQ_WQE_BA_H_S 0
-#define QP_CONTEXT_QPC_BYTES_76_CUR_RQ_WQE_BA_H_M \
- (((1UL << 5) - 1) << QP_CONTEXT_QPC_BYTES_76_CUR_RQ_WQE_BA_H_S)
-
-#define QP_CONTEXT_QPC_BYTES_76_RX_REQ_MSN_S 8
-#define QP_CONTEXT_QPC_BYTES_76_RX_REQ_MSN_M \
- (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_76_RX_REQ_MSN_S)
-
-#define QP_CONTEXT_QPC_BYTES_84_LAST_ACK_PSN_S 0
-#define QP_CONTEXT_QPC_BYTES_84_LAST_ACK_PSN_M \
- (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_84_LAST_ACK_PSN_S)
-
-#define QP_CONTEXT_QPC_BYTES_84_TRRL_HEAD_S 24
-#define QP_CONTEXT_QPC_BYTES_84_TRRL_HEAD_M \
- (((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_84_TRRL_HEAD_S)
-
-#define QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_S 0
-#define QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_M \
- (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_S)
-
-#define QP_CONTEXT_QPC_BYTES_88_RX_REQ_PSN_ERR_FLAG_S 24
-#define QP_CONTEXT_QPC_BYTES_88_RX_LAST_OPCODE_FLG_S 25
-
-#define QP_CONTEXT_QPC_BYTES_88_RQ_REQ_LAST_OPERATION_TYPE_S 26
-#define QP_CONTEXT_QPC_BYTES_88_RQ_REQ_LAST_OPERATION_TYPE_M \
- (((1UL << 2) - 1) << \
- QP_CONTEXT_QPC_BYTES_88_RQ_REQ_LAST_OPERATION_TYPE_S)
-
-#define QP_CONTEXT_QPC_BYTES_88_RQ_REQ_RDMA_WR_FLAG_S 29
-#define QP_CONTEXT_QPC_BYTES_88_RQ_REQ_RDMA_WR_FLAG_M \
- (((1UL << 2) - 1) << QP_CONTEXT_QPC_BYTES_88_RQ_REQ_RDMA_WR_FLAG_S)
-
-#define QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_S 0
-#define QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_M \
- (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_S)
-
-#define QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_FLG_S 24
-#define QP_CONTEXT_QPC_BYTES_108_TRRL_TDB_PSN_FLG_S 25
-
-#define QP_CONTEXT_QPC_BYTES_112_TRRL_TDB_PSN_S 0
-#define QP_CONTEXT_QPC_BYTES_112_TRRL_TDB_PSN_M \
- (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_112_TRRL_TDB_PSN_S)
-
-#define QP_CONTEXT_QPC_BYTES_112_TRRL_TAIL_S 24
-#define QP_CONTEXT_QPC_BYTES_112_TRRL_TAIL_M \
- (((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_112_TRRL_TAIL_S)
-
-#define QP_CONTEXT_QPC_BYTES_120_RX_CUR_SQ_WQE_BA_H_S 0
-#define QP_CONTEXT_QPC_BYTES_120_RX_CUR_SQ_WQE_BA_H_M \
- (((1UL << 5) - 1) << QP_CONTEXT_QPC_BYTES_120_RX_CUR_SQ_WQE_BA_H_S)
-
-#define QP_CONTEXT_QPC_BYTES_124_RX_ACK_MSN_S 0
-#define QP_CONTEXT_QPC_BYTES_124_RX_ACK_MSN_M \
- (((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_124_RX_ACK_MSN_S)
-
-#define QP_CONTEXT_QPC_BYTES_124_IRRL_MSG_IDX_S 16
-#define QP_CONTEXT_QPC_BYTES_124_IRRL_MSG_IDX_M \
- (((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_124_IRRL_MSG_IDX_S)
-
-#define QP_CONTEXT_QPC_BYTES_128_RX_ACK_EPSN_S 0
-#define QP_CONTEXT_QPC_BYTES_128_RX_ACK_EPSN_M \
- (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_128_RX_ACK_EPSN_S)
-
-#define QP_CONTEXT_QPC_BYTES_128_RX_ACK_PSN_ERR_FLG_S 24
-
-#define QP_CONTEXT_QPC_BYTES_128_ACK_LAST_OPERATION_TYPE_S 25
-#define QP_CONTEXT_QPC_BYTES_128_ACK_LAST_OPERATION_TYPE_M \
- (((1UL << 2) - 1) << QP_CONTEXT_QPC_BYTES_128_ACK_LAST_OPERATION_TYPE_S)
-
-#define QP_CONTEXT_QPC_BYTES_128_IRRL_PSN_VLD_FLG_S 27
-
-#define QP_CONTEXT_QPC_BYTES_132_IRRL_PSN_S 0
-#define QP_CONTEXT_QPC_BYTES_132_IRRL_PSN_M \
- (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_132_IRRL_PSN_S)
-
-#define QP_CONTEXT_QPC_BYTES_132_IRRL_TAIL_S 24
-#define QP_CONTEXT_QPC_BYTES_132_IRRL_TAIL_M \
- (((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_132_IRRL_TAIL_S)
-
-#define QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_PSN_S 0
-#define QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_PSN_M \
- (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_PSN_S)
-
-#define QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_FPKT_PSN_L_S 24
-#define QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_FPKT_PSN_L_M \
- (((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_FPKT_PSN_L_S)
-
-#define QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_FPKT_PSN_H_S 0
-#define QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_FPKT_PSN_H_M \
- (((1UL << 16) - 1) << QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_FPKT_PSN_H_S)
-
-#define QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_MSN_S 16
-#define QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_MSN_M \
- (((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_MSN_S)
-
-#define QP_CONTEXT_QPC_BYTES_140_RNR_RETRY_FLG_S 31
-
-#define QP_CONTEXT_QPC_BYTES_144_QP_STATE_S 0
-#define QP_CONTEXT_QPC_BYTES_144_QP_STATE_M \
- (((1UL << 3) - 1) << QP_CONTEXT_QPC_BYTES_144_QP_STATE_S)
-
-#define QP_CONTEXT_QPC_BYTES_148_CHECK_FLAG_S 0
-#define QP_CONTEXT_QPC_BYTES_148_CHECK_FLAG_M \
- (((1UL << 2) - 1) << QP_CONTEXT_QPC_BYTES_148_CHECK_FLAG_S)
-
-#define QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_S 2
-#define QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_M \
- (((1UL << 3) - 1) << QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_S)
-
-#define QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_S 5
-#define QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_M \
- (((1UL << 3) - 1) << QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_S)
-
-#define QP_CONTEXT_QPC_BYTES_148_LSN_S 8
-#define QP_CONTEXT_QPC_BYTES_148_LSN_M \
- (((1UL << 16) - 1) << QP_CONTEXT_QPC_BYTES_148_LSN_S)
-
-#define QP_CONTEXT_QPC_BYTES_156_RETRY_COUNT_INIT_S 0
-#define QP_CONTEXT_QPC_BYTES_156_RETRY_COUNT_INIT_M \
- (((1UL << 3) - 1) << QP_CONTEXT_QPC_BYTES_156_RETRY_COUNT_INIT_S)
-
-#define QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S 3
-#define QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M \
- (((1UL << 5) - 1) << QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S)
-
-#define QP_CONTEXT_QPC_BYTES_156_RNR_RETRY_COUNT_INIT_S 8
-#define QP_CONTEXT_QPC_BYTES_156_RNR_RETRY_COUNT_INIT_M \
- (((1UL << 3) - 1) << QP_CONTEXT_QPC_BYTES_156_RNR_RETRY_COUNT_INIT_S)
-
-#define QP_CONTEXT_QPC_BYTES_156_PORT_NUM_S 11
-#define QP_CONTEXT_QPC_BYTES_156_PORT_NUM_M \
- (((1UL << 3) - 1) << QP_CONTEXT_QPC_BYTES_156_PORT_NUM_S)
-
-#define QP_CONTEXT_QPC_BYTES_156_SL_S 14
-#define QP_CONTEXT_QPC_BYTES_156_SL_M \
- (((1UL << 2) - 1) << QP_CONTEXT_QPC_BYTES_156_SL_S)
-
-#define QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_S 16
-#define QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_M \
- (((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_S)
-
-#define QP_CONTEXT_QPC_BYTES_156_ACK_REQ_IND_S 24
-#define QP_CONTEXT_QPC_BYTES_156_ACK_REQ_IND_M \
- (((1UL << 2) - 1) << QP_CONTEXT_QPC_BYTES_156_ACK_REQ_IND_S)
-
-#define QP_CONTEXT_QPC_BYTES_164_SQ_PSN_S 0
-#define QP_CONTEXT_QPC_BYTES_164_SQ_PSN_M \
- (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_164_SQ_PSN_S)
-
-#define QP_CONTEXT_QPC_BYTES_164_IRRL_HEAD_S 24
-#define QP_CONTEXT_QPC_BYTES_164_IRRL_HEAD_M \
- (((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_164_IRRL_HEAD_S)
-
-#define QP_CONTEXT_QPC_BYTES_168_RETRY_SQ_PSN_S 0
-#define QP_CONTEXT_QPC_BYTES_168_RETRY_SQ_PSN_M \
- (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_168_RETRY_SQ_PSN_S)
-
-#define QP_CONTEXT_QPC_BYTES_168_SGE_USE_FLA_S 24
-#define QP_CONTEXT_QPC_BYTES_168_SGE_USE_FLA_M \
- (((1UL << 2) - 1) << QP_CONTEXT_QPC_BYTES_168_SGE_USE_FLA_S)
-
-#define QP_CONTEXT_QPC_BYTES_168_DB_TYPE_S 26
-#define QP_CONTEXT_QPC_BYTES_168_DB_TYPE_M \
- (((1UL << 2) - 1) << QP_CONTEXT_QPC_BYTES_168_DB_TYPE_S)
-
-#define QP_CONTEXT_QPC_BYTES_168_MSG_LP_IND_S 28
-#define QP_CONTEXT_QPC_BYTES_168_CSDB_LP_IND_S 29
-#define QP_CONTEXT_QPC_BYTES_168_QP_ERR_FLG_S 30
-
-#define QP_CONTEXT_QPC_BYTES_176_DB_CUR_INDEX_S 0
-#define QP_CONTEXT_QPC_BYTES_176_DB_CUR_INDEX_M \
- (((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_176_DB_CUR_INDEX_S)
-
-#define QP_CONTEXT_QPC_BYTES_176_RETRY_DB_CUR_INDEX_S 16
-#define QP_CONTEXT_QPC_BYTES_176_RETRY_DB_CUR_INDEX_M \
- (((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_176_RETRY_DB_CUR_INDEX_S)
-
-#define QP_CONTEXT_QPC_BYTES_180_SQ_HEAD_S 0
-#define QP_CONTEXT_QPC_BYTES_180_SQ_HEAD_M \
- (((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_180_SQ_HEAD_S)
-
-#define QP_CONTEXT_QPC_BYTES_180_SQ_CUR_INDEX_S 16
-#define QP_CONTEXT_QPC_BYTES_180_SQ_CUR_INDEX_M \
- (((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_180_SQ_CUR_INDEX_S)
-
-#define QP_CONTEXT_QPC_BYTES_188_TX_CUR_SQ_WQE_BA_H_S 0
-#define QP_CONTEXT_QPC_BYTES_188_TX_CUR_SQ_WQE_BA_H_M \
- (((1UL << 5) - 1) << QP_CONTEXT_QPC_BYTES_188_TX_CUR_SQ_WQE_BA_H_S)
-
-#define QP_CONTEXT_QPC_BYTES_188_PKT_RETRY_FLG_S 8
-
-#define QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_S 16
-#define QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_M \
- (((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_S)
-
-#define STATUS_MASK 0xff
-#define GO_BIT_TIMEOUT_MSECS 10000
-#define HCR_STATUS_OFFSET 0x18
-#define HCR_GO_BIT 15
-
-struct hns_roce_rq_db {
- __le32 u32_4;
- __le32 u32_8;
-};
-
-#define RQ_DOORBELL_U32_4_RQ_HEAD_S 0
-#define RQ_DOORBELL_U32_4_RQ_HEAD_M \
- (((1UL << 15) - 1) << RQ_DOORBELL_U32_4_RQ_HEAD_S)
-
-#define RQ_DOORBELL_U32_8_QPN_S 0
-#define RQ_DOORBELL_U32_8_QPN_M (((1UL << 24) - 1) << RQ_DOORBELL_U32_8_QPN_S)
-
-#define RQ_DOORBELL_U32_8_CMD_S 28
-#define RQ_DOORBELL_U32_8_CMD_M (((1UL << 3) - 1) << RQ_DOORBELL_U32_8_CMD_S)
-
-#define RQ_DOORBELL_U32_8_HW_SYNC_S 31
-
-struct hns_roce_sq_db {
- __le32 u32_4;
- __le32 u32_8;
-};
-
-#define SQ_DOORBELL_U32_4_SQ_HEAD_S 0
-#define SQ_DOORBELL_U32_4_SQ_HEAD_M \
- (((1UL << 15) - 1) << SQ_DOORBELL_U32_4_SQ_HEAD_S)
-
-#define SQ_DOORBELL_U32_4_SL_S 16
-#define SQ_DOORBELL_U32_4_SL_M \
- (((1UL << 2) - 1) << SQ_DOORBELL_U32_4_SL_S)
-
-#define SQ_DOORBELL_U32_4_PORT_S 18
-#define SQ_DOORBELL_U32_4_PORT_M (((1UL << 3) - 1) << SQ_DOORBELL_U32_4_PORT_S)
-
-#define SQ_DOORBELL_U32_8_QPN_S 0
-#define SQ_DOORBELL_U32_8_QPN_M (((1UL << 24) - 1) << SQ_DOORBELL_U32_8_QPN_S)
-
-#define SQ_DOORBELL_HW_SYNC_S 31
-
-struct hns_roce_ext_db {
- int esdb_dep;
- int eodb_dep;
- struct hns_roce_buf_list *sdb_buf_list;
- struct hns_roce_buf_list *odb_buf_list;
-};
-
-struct hns_roce_db_table {
- int sdb_ext_mod;
- int odb_ext_mod;
- struct hns_roce_ext_db *ext_db;
-};
-
-#define HW_SYNC_SLEEP_TIME_INTERVAL 20
-#define HW_SYNC_TIMEOUT_MSECS (25 * HW_SYNC_SLEEP_TIME_INTERVAL)
-#define BT_CMD_SYNC_SHIFT 31
-#define HNS_ROCE_BA_SIZE (32 * 4096)
-
-struct hns_roce_bt_table {
- struct hns_roce_buf_list qpc_buf;
- struct hns_roce_buf_list mtpt_buf;
- struct hns_roce_buf_list cqc_buf;
-};
-
-struct hns_roce_tptr_table {
- struct hns_roce_buf_list tptr_buf;
-};
-
-struct hns_roce_qp_work {
- struct work_struct work;
- struct ib_device *ib_dev;
- struct hns_roce_qp *qp;
- u32 db_wait_stage;
- u32 sdb_issue_ptr;
- u32 sdb_inv_cnt;
- u32 sche_cnt;
-};
-
-struct hns_roce_mr_free_work {
- struct work_struct work;
- struct ib_device *ib_dev;
- struct completion *comp;
- int comp_flag;
- void *mr;
-};
-
-struct hns_roce_recreate_lp_qp_work {
- struct work_struct work;
- struct ib_device *ib_dev;
- struct completion *comp;
- int comp_flag;
-};
-
-struct hns_roce_free_mr {
- struct workqueue_struct *free_mr_wq;
- struct hns_roce_qp *mr_free_qp[HNS_ROCE_V1_RESV_QP];
- struct hns_roce_cq *mr_free_cq;
- struct hns_roce_pd *mr_free_pd;
-};
-
-struct hns_roce_v1_priv {
- struct hns_roce_db_table db_table;
- struct hns_roce_raq_table raq_table;
- struct hns_roce_bt_table bt_table;
- struct hns_roce_tptr_table tptr_table;
- struct hns_roce_free_mr free_mr;
-};
-
-int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset);
-int hns_roce_v1_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
-int hns_roce_v1_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata);
-
-#endif
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index eb0defa80d0d..b33e948fd060 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -678,6 +678,7 @@ static void hns_roce_write512(struct hns_roce_dev *hr_dev, u64 *val,
static void write_dwqe(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp,
void *wqe)
{
+#define HNS_ROCE_SL_SHIFT 2
struct hns_roce_v2_rc_send_wqe *rc_sq_wqe = wqe;
/* All kinds of DirectWQE have the same header field layout */
@@ -685,7 +686,8 @@ static void write_dwqe(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp,
roce_set_field(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_DB_SL_L_M,
V2_RC_SEND_WQE_BYTE_4_DB_SL_L_S, qp->sl);
roce_set_field(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_DB_SL_H_M,
- V2_RC_SEND_WQE_BYTE_4_DB_SL_H_S, qp->sl >> 2);
+ V2_RC_SEND_WQE_BYTE_4_DB_SL_H_S,
+ qp->sl >> HNS_ROCE_SL_SHIFT);
roce_set_field(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_WQE_INDEX_M,
V2_RC_SEND_WQE_BYTE_4_WQE_INDEX_S, qp->sq.head);
@@ -1305,14 +1307,14 @@ static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
continue;
dev_err_ratelimited(hr_dev->dev,
- "Cmdq IO error, opcode = %x, return = %x\n",
+ "Cmdq IO error, opcode = 0x%x, return = 0x%x.\n",
desc->opcode, desc_ret);
ret = -EIO;
}
} else {
/* FW/HW reset or incorrect number of desc */
tail = roce_read(hr_dev, ROCEE_TX_CMQ_CI_REG);
- dev_warn(hr_dev->dev, "CMDQ move tail from %d to %d\n",
+ dev_warn(hr_dev->dev, "CMDQ move tail from %u to %u.\n",
csq->head, tail);
csq->head = tail;
@@ -1571,7 +1573,7 @@ static int hns_roce_query_func_info(struct hns_roce_dev *hr_dev)
struct hns_roce_cmq_desc desc;
int ret;
- if (hr_dev->pci_dev->revision < PCI_REVISION_ID_HIP09) {
+ if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
hr_dev->func_num = 1;
return 0;
}
@@ -2003,7 +2005,8 @@ static void set_default_caps(struct hns_roce_dev *hr_dev)
caps->gid_table_len[0] = HNS_ROCE_V2_GID_INDEX_NUM;
if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
- caps->flags |= HNS_ROCE_CAP_FLAG_STASH;
+ caps->flags |= HNS_ROCE_CAP_FLAG_STASH |
+ HNS_ROCE_CAP_FLAG_DIRECT_WQE;
caps->max_sq_inline = HNS_ROCE_V3_MAX_SQ_INLINE;
} else {
caps->max_sq_inline = HNS_ROCE_V2_MAX_SQ_INLINE;
@@ -2144,7 +2147,6 @@ static void apply_func_caps(struct hns_roce_dev *hr_dev)
caps->cqc_timer_entry_sz = HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ;
caps->mtt_entry_sz = HNS_ROCE_V2_MTT_ENTRY_SZ;
- caps->eqe_hop_num = HNS_ROCE_EQE_HOP_NUM;
caps->pbl_hop_num = HNS_ROCE_PBL_HOP_NUM;
caps->qpc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
caps->cqc_timer_hop_num = HNS_ROCE_HOP_NUM_0;
@@ -2161,6 +2163,7 @@ static void apply_func_caps(struct hns_roce_dev *hr_dev)
(u32)priv->handle->rinfo.num_vectors - 2);
if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) {
+ caps->eqe_hop_num = HNS_ROCE_V3_EQE_HOP_NUM;
caps->ceqe_size = HNS_ROCE_V3_EQE_SIZE;
caps->aeqe_size = HNS_ROCE_V3_EQE_SIZE;
@@ -2181,6 +2184,7 @@ static void apply_func_caps(struct hns_roce_dev *hr_dev)
} else {
u32 func_num = max_t(u32, 1, hr_dev->func_num);
+ caps->eqe_hop_num = HNS_ROCE_V2_EQE_HOP_NUM;
caps->ceqe_size = HNS_ROCE_CEQE_SIZE;
caps->aeqe_size = HNS_ROCE_AEQE_SIZE;
caps->gid_table_len[0] /= func_num;
@@ -2393,7 +2397,7 @@ static int hns_roce_config_entry_size(struct hns_roce_dev *hr_dev)
struct hns_roce_caps *caps = &hr_dev->caps;
int ret;
- if (hr_dev->pci_dev->revision < PCI_REVISION_ID_HIP09)
+ if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08)
return 0;
ret = config_hem_entry_size(hr_dev, HNS_ROCE_CFG_QPC_SIZE,
@@ -2967,8 +2971,8 @@ static int config_gmv_table(struct hns_roce_dev *hr_dev,
return hns_roce_cmq_send(hr_dev, desc, 2);
}
-static int hns_roce_v2_set_gid(struct hns_roce_dev *hr_dev, u32 port,
- int gid_index, const union ib_gid *gid,
+static int hns_roce_v2_set_gid(struct hns_roce_dev *hr_dev, int gid_index,
+ const union ib_gid *gid,
const struct ib_gid_attr *attr)
{
enum hns_roce_sgid_type sgid_type = GID_TYPE_FLAG_ROCE_V1;
@@ -3063,8 +3067,7 @@ static int set_mtpt_pbl(struct hns_roce_dev *hr_dev,
}
static int hns_roce_v2_write_mtpt(struct hns_roce_dev *hr_dev,
- void *mb_buf, struct hns_roce_mr *mr,
- unsigned long mtpt_idx)
+ void *mb_buf, struct hns_roce_mr *mr)
{
struct hns_roce_v2_mpt_entry *mpt_entry;
int ret;
@@ -4488,14 +4491,6 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
return 0;
}
-static inline u16 get_udp_sport(u32 fl, u32 lqpn, u32 rqpn)
-{
- if (!fl)
- fl = rdma_calc_flow_label(lqpn, rqpn);
-
- return rdma_flow_label_to_udp_sport(fl);
-}
-
static int get_dip_ctx_idx(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
u32 *dip_idx)
{
@@ -4712,8 +4707,9 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp,
}
hr_reg_write(context, QPC_UDPSPN,
- is_udp ? get_udp_sport(grh->flow_label, ibqp->qp_num,
- attr->dest_qp_num) : 0);
+ is_udp ? rdma_get_udp_sport(grh->flow_label, ibqp->qp_num,
+ attr->dest_qp_num) :
+ 0);
hr_reg_clear(qpc_mask, QPC_UDPSPN);
@@ -4739,7 +4735,7 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp,
hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr);
if (unlikely(hr_qp->sl > MAX_SERVICE_LEVEL)) {
ibdev_err(ibdev,
- "failed to fill QPC, sl (%d) shouldn't be larger than %d.\n",
+ "failed to fill QPC, sl (%u) shouldn't be larger than %d.\n",
hr_qp->sl, MAX_SERVICE_LEVEL);
return -EINVAL;
}
@@ -4768,7 +4764,8 @@ static bool check_qp_state(enum ib_qp_state cur_state,
[IB_QPS_ERR] = true },
[IB_QPS_SQD] = {},
[IB_QPS_SQE] = {},
- [IB_QPS_ERR] = { [IB_QPS_RESET] = true, [IB_QPS_ERR] = true }
+ [IB_QPS_ERR] = { [IB_QPS_RESET] = true,
+ [IB_QPS_ERR] = true }
};
return sm[cur_state][new_state];
@@ -5868,7 +5865,7 @@ static void hns_roce_v2_int_mask_enable(struct hns_roce_dev *hr_dev,
roce_write(hr_dev, ROCEE_VF_ABN_INT_CFG_REG, enable_flag);
}
-static void hns_roce_v2_destroy_eqc(struct hns_roce_dev *hr_dev, int eqn)
+static void hns_roce_v2_destroy_eqc(struct hns_roce_dev *hr_dev, u32 eqn)
{
struct device *dev = hr_dev->dev;
int ret;
@@ -5882,7 +5879,7 @@ static void hns_roce_v2_destroy_eqc(struct hns_roce_dev *hr_dev, int eqn)
0, HNS_ROCE_CMD_DESTROY_AEQC,
HNS_ROCE_CMD_TIMEOUT_MSECS);
if (ret)
- dev_err(dev, "[mailbox cmd] destroy eqc(%d) failed.\n", eqn);
+ dev_err(dev, "[mailbox cmd] destroy eqc(%u) failed.\n", eqn);
}
static void free_eq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq)
@@ -6394,7 +6391,7 @@ static int hns_roce_hw_v2_init_instance(struct hnae3_handle *handle)
if (!id)
return 0;
- if (id->driver_data && handle->pdev->revision < PCI_REVISION_ID_HIP09)
+ if (id->driver_data && handle->pdev->revision == PCI_REVISION_ID_HIP08)
return 0;
ret = __hns_roce_hw_v2_init_instance(handle);
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
index 35c61da7ba15..12be85f0986e 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
@@ -35,26 +35,15 @@
#include <linux/bitops.h>
-#define HNS_ROCE_VF_QPC_BT_NUM 256
-#define HNS_ROCE_VF_SCCC_BT_NUM 64
-#define HNS_ROCE_VF_SRQC_BT_NUM 64
-#define HNS_ROCE_VF_CQC_BT_NUM 64
-#define HNS_ROCE_VF_MPT_BT_NUM 64
-#define HNS_ROCE_VF_SMAC_NUM 32
-#define HNS_ROCE_VF_SL_NUM 8
-#define HNS_ROCE_VF_GMV_BT_NUM 256
-
#define HNS_ROCE_V2_MAX_QP_NUM 0x1000
#define HNS_ROCE_V2_MAX_QPC_TIMER_NUM 0x200
#define HNS_ROCE_V2_MAX_WQE_NUM 0x8000
-#define HNS_ROCE_V2_MAX_SRQ 0x100000
#define HNS_ROCE_V2_MAX_SRQ_WR 0x8000
#define HNS_ROCE_V2_MAX_SRQ_SGE 64
#define HNS_ROCE_V2_MAX_CQ_NUM 0x100000
#define HNS_ROCE_V2_MAX_CQC_TIMER_NUM 0x100
#define HNS_ROCE_V2_MAX_SRQ_NUM 0x100000
#define HNS_ROCE_V2_MAX_CQE_NUM 0x400000
-#define HNS_ROCE_V2_MAX_SRQWQE_NUM 0x8000
#define HNS_ROCE_V2_MAX_RQ_SGE_NUM 64
#define HNS_ROCE_V2_MAX_SQ_SGE_NUM 64
#define HNS_ROCE_V2_MAX_EXTEND_SGE_NUM 0x200000
@@ -63,13 +52,10 @@
#define HNS_ROCE_V2_MAX_RC_INL_INN_SZ 32
#define HNS_ROCE_V2_UAR_NUM 256
#define HNS_ROCE_V2_PHY_UAR_NUM 1
-#define HNS_ROCE_V2_MAX_IRQ_NUM 65
-#define HNS_ROCE_V2_COMP_VEC_NUM 63
#define HNS_ROCE_V2_AEQE_VEC_NUM 1
#define HNS_ROCE_V2_ABNORMAL_VEC_NUM 1
#define HNS_ROCE_V2_MAX_MTPT_NUM 0x100000
#define HNS_ROCE_V2_MAX_MTT_SEGS 0x1000000
-#define HNS_ROCE_V2_MAX_CQE_SEGS 0x1000000
#define HNS_ROCE_V2_MAX_SRQWQE_SEGS 0x1000000
#define HNS_ROCE_V2_MAX_IDX_SEGS 0x1000000
#define HNS_ROCE_V2_MAX_PD_NUM 0x1000000
@@ -81,7 +67,6 @@
#define HNS_ROCE_V2_MAX_RQ_DESC_SZ 16
#define HNS_ROCE_V2_MAX_SRQ_DESC_SZ 64
#define HNS_ROCE_V2_IRRL_ENTRY_SZ 64
-#define HNS_ROCE_V2_TRRL_ENTRY_SZ 48
#define HNS_ROCE_V2_EXT_ATOMIC_TRRL_ENTRY_SZ 100
#define HNS_ROCE_V2_CQC_ENTRY_SZ 64
#define HNS_ROCE_V2_SRQC_ENTRY_SZ 64
@@ -103,7 +88,6 @@
#define HNS_ROCE_INVALID_LKEY 0x0
#define HNS_ROCE_INVALID_SGE_LENGTH 0x80000000
#define HNS_ROCE_CMQ_TX_TIMEOUT 30000
-#define HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE 2
#define HNS_ROCE_V2_RSV_QPS 8
#define HNS_ROCE_V2_HW_RST_TIMEOUT 1000
@@ -117,12 +101,14 @@
#define HNS_ROCE_CQE_HOP_NUM 1
#define HNS_ROCE_SRQWQE_HOP_NUM 1
#define HNS_ROCE_PBL_HOP_NUM 2
-#define HNS_ROCE_EQE_HOP_NUM 2
#define HNS_ROCE_IDX_HOP_NUM 1
#define HNS_ROCE_SQWQE_HOP_NUM 2
#define HNS_ROCE_EXT_SGE_HOP_NUM 1
#define HNS_ROCE_RQWQE_HOP_NUM 2
+#define HNS_ROCE_V2_EQE_HOP_NUM 2
+#define HNS_ROCE_V3_EQE_HOP_NUM 1
+
#define HNS_ROCE_BA_PG_SZ_SUPPORTED_256K 6
#define HNS_ROCE_BA_PG_SZ_SUPPORTED_16K 2
#define HNS_ROCE_V2_GID_INDEX_NUM 16
@@ -1441,7 +1427,7 @@ struct hns_roce_v2_priv {
struct hns_roce_dip {
u8 dgid[GID_LEN_V2];
u32 dip_idx;
- struct list_head node; /* all dips are on a list */
+ struct list_head node; /* all dips are on a list */
};
/* only for RNR timeout issue of HIP08 */
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
index 4194b626f3c6..f73ba619f375 100644
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -31,7 +31,6 @@
* SOFTWARE.
*/
#include <linux/acpi.h>
-#include <linux/of_platform.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <rdma/ib_addr.h>
@@ -70,7 +69,7 @@ static int hns_roce_add_gid(const struct ib_gid_attr *attr, void **context)
if (port >= hr_dev->caps.num_ports)
return -EINVAL;
- ret = hr_dev->hw->set_gid(hr_dev, port, attr->index, &attr->gid, attr);
+ ret = hr_dev->hw->set_gid(hr_dev, attr->index, &attr->gid, attr);
return ret;
}
@@ -84,7 +83,7 @@ static int hns_roce_del_gid(const struct ib_gid_attr *attr, void **context)
if (port >= hr_dev->caps.num_ports)
return -EINVAL;
- ret = hr_dev->hw->set_gid(hr_dev, port, attr->index, NULL, NULL);
+ ret = hr_dev->hw->set_gid(hr_dev, attr->index, NULL, NULL);
return ret;
}
@@ -152,9 +151,6 @@ static int hns_roce_setup_mtu_mac(struct hns_roce_dev *hr_dev)
u8 i;
for (i = 0; i < hr_dev->caps.num_ports; i++) {
- if (hr_dev->hw->set_mtu)
- hr_dev->hw->set_mtu(hr_dev, hr_dev->iboe.phy_port[i],
- hr_dev->caps.max_mtu);
ret = hns_roce_set_mac(hr_dev, i,
hr_dev->iboe.netdevs[i]->dev_addr);
if (ret)
@@ -270,6 +266,9 @@ static enum rdma_link_layer hns_roce_get_link_layer(struct ib_device *device,
static int hns_roce_query_pkey(struct ib_device *ib_dev, u32 port, u16 index,
u16 *pkey)
{
+ if (index > 0)
+ return -EINVAL;
+
*pkey = PKEY_ID;
return 0;
@@ -307,9 +306,22 @@ hns_roce_user_mmap_entry_insert(struct ib_ucontext *ucontext, u64 address,
entry->address = address;
entry->mmap_type = mmap_type;
- ret = rdma_user_mmap_entry_insert_exact(
- ucontext, &entry->rdma_entry, length,
- mmap_type == HNS_ROCE_MMAP_TYPE_DB ? 0 : 1);
+ switch (mmap_type) {
+ /* pgoff 0 must be used by DB for compatibility */
+ case HNS_ROCE_MMAP_TYPE_DB:
+ ret = rdma_user_mmap_entry_insert_exact(
+ ucontext, &entry->rdma_entry, length, 0);
+ break;
+ case HNS_ROCE_MMAP_TYPE_DWQE:
+ ret = rdma_user_mmap_entry_insert_range(
+ ucontext, &entry->rdma_entry, length, 1,
+ U32_MAX);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
if (ret) {
kfree(entry);
return NULL;
@@ -323,18 +335,12 @@ static void hns_roce_dealloc_uar_entry(struct hns_roce_ucontext *context)
if (context->db_mmap_entry)
rdma_user_mmap_entry_remove(
&context->db_mmap_entry->rdma_entry);
-
- if (context->tptr_mmap_entry)
- rdma_user_mmap_entry_remove(
- &context->tptr_mmap_entry->rdma_entry);
}
static int hns_roce_alloc_uar_entry(struct ib_ucontext *uctx)
{
struct hns_roce_ucontext *context = to_hr_ucontext(uctx);
- struct hns_roce_dev *hr_dev = to_hr_dev(uctx->device);
u64 address;
- int ret;
address = context->uar.pfn << PAGE_SHIFT;
context->db_mmap_entry = hns_roce_user_mmap_entry_insert(
@@ -342,27 +348,7 @@ static int hns_roce_alloc_uar_entry(struct ib_ucontext *uctx)
if (!context->db_mmap_entry)
return -ENOMEM;
- if (!hr_dev->tptr_dma_addr || !hr_dev->tptr_size)
- return 0;
-
- /*
- * FIXME: using io_remap_pfn_range on the dma address returned
- * by dma_alloc_coherent is totally wrong.
- */
- context->tptr_mmap_entry =
- hns_roce_user_mmap_entry_insert(uctx, hr_dev->tptr_dma_addr,
- hr_dev->tptr_size,
- HNS_ROCE_MMAP_TYPE_TPTR);
- if (!context->tptr_mmap_entry) {
- ret = -ENOMEM;
- goto err;
- }
-
return 0;
-
-err:
- hns_roce_dealloc_uar_entry(context);
- return ret;
}
static int hns_roce_alloc_ucontext(struct ib_ucontext *uctx,
@@ -436,10 +422,15 @@ static int hns_roce_mmap(struct ib_ucontext *uctx, struct vm_area_struct *vma)
entry = to_hns_mmap(rdma_entry);
pfn = entry->address >> PAGE_SHIFT;
- prot = vma->vm_page_prot;
- if (entry->mmap_type != HNS_ROCE_MMAP_TYPE_TPTR)
- prot = pgprot_noncached(prot);
+ switch (entry->mmap_type) {
+ case HNS_ROCE_MMAP_TYPE_DB:
+ case HNS_ROCE_MMAP_TYPE_DWQE:
+ prot = pgprot_device(vma->vm_page_prot);
+ break;
+ default:
+ return -EINVAL;
+ }
ret = rdma_user_mmap_io(uctx, vma, pfn, rdma_entry->npages * PAGE_SIZE,
prot, rdma_entry);
@@ -816,7 +807,6 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev)
int ret;
spin_lock_init(&hr_dev->sm_lock);
- spin_lock_init(&hr_dev->bt_cmd_lock);
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQ_RECORD_DB ||
hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) {
@@ -907,20 +897,13 @@ int hns_roce_init(struct hns_roce_dev *hr_dev)
struct device *dev = hr_dev->dev;
int ret;
- if (hr_dev->hw->reset) {
- ret = hr_dev->hw->reset(hr_dev, true);
- if (ret) {
- dev_err(dev, "Reset RoCE engine failed!\n");
- return ret;
- }
- }
hr_dev->is_reset = false;
if (hr_dev->hw->cmq_init) {
ret = hr_dev->hw->cmq_init(hr_dev);
if (ret) {
dev_err(dev, "Init RoCE Command Queue failed!\n");
- goto error_failed_cmq_init;
+ return ret;
}
}
@@ -1003,12 +986,6 @@ error_failed_cmd_init:
if (hr_dev->hw->cmq_exit)
hr_dev->hw->cmq_exit(hr_dev);
-error_failed_cmq_init:
- if (hr_dev->hw->reset) {
- if (hr_dev->hw->reset(hr_dev, false))
- dev_err(dev, "Dereset RoCE engine failed!\n");
- }
-
return ret;
}
@@ -1028,8 +1005,6 @@ void hns_roce_exit(struct hns_roce_dev *hr_dev)
hns_roce_cmd_cleanup(hr_dev);
if (hr_dev->hw->cmq_exit)
hr_dev->hw->cmq_exit(hr_dev);
- if (hr_dev->hw->reset)
- hr_dev->hw->reset(hr_dev, false);
}
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
index 7089ac780291..2ee06b906b60 100644
--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
+++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
@@ -31,7 +31,6 @@
* SOFTWARE.
*/
-#include <linux/platform_device.h>
#include <linux/vmalloc.h>
#include <rdma/ib_umem.h>
#include "hns_roce_device.h"
@@ -81,7 +80,7 @@ static int alloc_mr_key(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr)
return -ENOMEM;
}
- mr->key = hw_index_to_key(id); /* MR key */
+ mr->key = hw_index_to_key(id); /* MR key */
err = hns_roce_table_get(hr_dev, &hr_dev->mr_table.mtpt_table,
(unsigned long)id);
@@ -173,8 +172,7 @@ static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev,
}
if (mr->type != MR_TYPE_FRMR)
- ret = hr_dev->hw->write_mtpt(hr_dev, mailbox->buf, mr,
- mtpt_idx);
+ ret = hr_dev->hw->write_mtpt(hr_dev, mailbox->buf, mr);
else
ret = hr_dev->hw->frmr_write_mtpt(hr_dev, mailbox->buf, mr);
if (ret) {
@@ -363,12 +361,8 @@ int hns_roce_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
struct hns_roce_mr *mr = to_hr_mr(ibmr);
int ret = 0;
- if (hr_dev->hw->dereg_mr) {
- ret = hr_dev->hw->dereg_mr(hr_dev, mr, udata);
- } else {
- hns_roce_mr_free(hr_dev, mr);
- kfree(mr);
- }
+ hns_roce_mr_free(hr_dev, mr);
+ kfree(mr);
return ret;
}
@@ -614,10 +608,7 @@ static int mtr_map_region(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
return -ENOBUFS;
for (i = 0; i < count && npage < max_count; i++) {
- if (hr_dev->hw_rev == HNS_ROCE_HW_VER1)
- addr = to_hr_hw_page_addr(pages[npage]);
- else
- addr = pages[npage];
+ addr = pages[npage];
mtts[i] = cpu_to_le64(addr);
npage++;
@@ -824,11 +815,11 @@ int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
}
int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
- int offset, u64 *mtt_buf, int mtt_max, u64 *base_addr)
+ u32 offset, u64 *mtt_buf, int mtt_max, u64 *base_addr)
{
struct hns_roce_hem_cfg *cfg = &mtr->hem_cfg;
int mtt_count, left;
- int start_index;
+ u32 start_index;
int total = 0;
__le64 *mtts;
u32 npage;
@@ -847,10 +838,7 @@ int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
continue;
addr = cfg->root_ba + (npage << HNS_HW_PAGE_SHIFT);
- if (hr_dev->hw_rev == HNS_ROCE_HW_VER1)
- mtt_buf[total] = to_hr_hw_page_addr(addr);
- else
- mtt_buf[total] = addr;
+ mtt_buf[total] = addr;
total++;
}
@@ -884,10 +872,10 @@ done:
static int mtr_init_buf_cfg(struct hns_roce_dev *hr_dev,
struct hns_roce_buf_attr *attr,
struct hns_roce_hem_cfg *cfg,
- unsigned int *buf_page_shift, int unalinged_size)
+ unsigned int *buf_page_shift, u64 unalinged_size)
{
struct hns_roce_buf_region *r;
- int first_region_padding;
+ u64 first_region_padding;
int page_cnt, region_cnt;
unsigned int page_shift;
size_t buf_size;
diff --git a/drivers/infiniband/hw/hns/hns_roce_pd.c b/drivers/infiniband/hw/hns/hns_roce_pd.c
index 81ffad77ae42..783e71852c50 100644
--- a/drivers/infiniband/hw/hns/hns_roce_pd.c
+++ b/drivers/infiniband/hw/hns/hns_roce_pd.c
@@ -30,7 +30,6 @@
* SOFTWARE.
*/
-#include <linux/platform_device.h>
#include <linux/pci.h>
#include "hns_roce_device.h"
@@ -86,7 +85,6 @@ int hns_roce_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata)
int hns_roce_uar_alloc(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar)
{
struct hns_roce_ida *uar_ida = &hr_dev->uar_ida;
- struct resource *res;
int id;
/* Using bitmap to manager UAR index */
@@ -104,18 +102,9 @@ int hns_roce_uar_alloc(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar)
else
uar->index = 0;
- if (!dev_is_pci(hr_dev->dev)) {
- res = platform_get_resource(hr_dev->pdev, IORESOURCE_MEM, 0);
- if (!res) {
- ida_free(&uar_ida->ida, id);
- dev_err(&hr_dev->pdev->dev, "memory resource not found!\n");
- return -EINVAL;
- }
- uar->pfn = ((res->start) >> PAGE_SHIFT) + uar->index;
- } else {
- uar->pfn = ((pci_resource_start(hr_dev->pci_dev, 2))
- >> PAGE_SHIFT);
- }
+ uar->pfn = ((pci_resource_start(hr_dev->pci_dev, 2)) >> PAGE_SHIFT);
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_DIRECT_WQE)
+ hr_dev->dwqe_page = pci_resource_start(hr_dev->pci_dev, 4);
return 0;
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
index 9af4509894e6..d78373e10aab 100644
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -32,7 +32,6 @@
*/
#include <linux/pci.h>
-#include <linux/platform_device.h>
#include <rdma/ib_addr.h>
#include <rdma/ib_umem.h>
#include <rdma/uverbs_ioctl.h>
@@ -110,12 +109,11 @@ void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type)
return;
}
- if (hr_dev->hw_rev != HNS_ROCE_HW_VER1 &&
- (event_type == HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR ||
- event_type == HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR ||
- event_type == HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR ||
- event_type == HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION ||
- event_type == HNS_ROCE_EVENT_TYPE_INVALID_XRCETH)) {
+ if (event_type == HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR ||
+ event_type == HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR ||
+ event_type == HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR ||
+ event_type == HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION ||
+ event_type == HNS_ROCE_EVENT_TYPE_INVALID_XRCETH) {
qp->state = IB_QPS_ERR;
flush_cqe(hr_dev, qp);
@@ -219,13 +217,7 @@ static int alloc_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
int ret;
if (hr_qp->ibqp.qp_type == IB_QPT_GSI) {
- /* when hw version is v1, the sqpn is allocated */
- if (hr_dev->hw_rev == HNS_ROCE_HW_VER1)
- num = HNS_ROCE_MAX_PORTS +
- hr_dev->iboe.phy_port[hr_qp->port];
- else
- num = 1;
-
+ num = 1;
hr_qp->doorbell_qpn = 1;
} else {
mutex_lock(&qp_table->bank_mutex);
@@ -324,11 +316,6 @@ static int alloc_qpc(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
if (!hr_qp->qpn)
return -EINVAL;
- /* In v1 engine, GSI QP context is saved in the RoCE hw's register */
- if (hr_qp->ibqp.qp_type == IB_QPT_GSI &&
- hr_dev->hw_rev == HNS_ROCE_HW_VER1)
- return 0;
-
/* Alloc memory for QPC */
ret = hns_roce_table_get(hr_dev, &qp_table->qp_table, hr_qp->qpn);
if (ret) {
@@ -379,6 +366,11 @@ err_out:
return ret;
}
+static void qp_user_mmap_entry_remove(struct hns_roce_qp *hr_qp)
+{
+ rdma_user_mmap_entry_remove(&hr_qp->dwqe_mmap_entry->rdma_entry);
+}
+
void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
{
struct xarray *xa = &hr_dev->qp_table_xa;
@@ -402,11 +394,6 @@ static void free_qpc(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
{
struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
- /* In v1 engine, GSI QP context is saved in the RoCE hw's register */
- if (hr_qp->ibqp.qp_type == IB_QPT_GSI &&
- hr_dev->hw_rev == HNS_ROCE_HW_VER1)
- return;
-
if (hr_dev->caps.trrl_entry_sz)
hns_roce_table_put(hr_dev, &qp_table->trrl_table, hr_qp->qpn);
hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn);
@@ -535,11 +522,6 @@ static void set_ext_sge_param(struct hns_roce_dev *hr_dev, u32 sq_wqe_cnt,
hr_qp->sge.sge_shift = HNS_ROCE_SGE_SHIFT;
- if (hr_dev->hw_rev == HNS_ROCE_HW_VER1) {
- hr_qp->sq.max_gs = HNS_ROCE_SGE_IN_WQE;
- return;
- }
-
hr_qp->sq.max_gs = max(1U, cap->max_send_sge);
wqe_sge_cnt = get_wqe_ext_sge_cnt(hr_qp);
@@ -780,7 +762,11 @@ static int alloc_qp_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
goto err_inline;
}
+ if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_DIRECT_WQE)
+ hr_qp->en_flags |= HNS_ROCE_QP_CAP_DIRECT_WQE;
+
return 0;
+
err_inline:
free_rq_inline_buf(hr_qp);
@@ -822,6 +808,35 @@ static inline bool kernel_qp_has_rdb(struct hns_roce_dev *hr_dev,
hns_roce_qp_has_rq(init_attr));
}
+static int qp_mmap_entry(struct hns_roce_qp *hr_qp,
+ struct hns_roce_dev *hr_dev,
+ struct ib_udata *udata,
+ struct hns_roce_ib_create_qp_resp *resp)
+{
+ struct hns_roce_ucontext *uctx =
+ rdma_udata_to_drv_context(udata,
+ struct hns_roce_ucontext, ibucontext);
+ struct rdma_user_mmap_entry *rdma_entry;
+ u64 address;
+
+ address = hr_dev->dwqe_page + hr_qp->qpn * HNS_ROCE_DWQE_SIZE;
+
+ hr_qp->dwqe_mmap_entry =
+ hns_roce_user_mmap_entry_insert(&uctx->ibucontext, address,
+ HNS_ROCE_DWQE_SIZE,
+ HNS_ROCE_MMAP_TYPE_DWQE);
+
+ if (!hr_qp->dwqe_mmap_entry) {
+ ibdev_err(&hr_dev->ib_dev, "failed to get dwqe mmap entry.\n");
+ return -ENOMEM;
+ }
+
+ rdma_entry = &hr_qp->dwqe_mmap_entry->rdma_entry;
+ resp->dwqe_mmap_key = rdma_user_mmap_get_offset(rdma_entry);
+
+ return 0;
+}
+
static int alloc_user_qp_db(struct hns_roce_dev *hr_dev,
struct hns_roce_qp *hr_qp,
struct ib_qp_init_attr *init_attr,
@@ -909,10 +924,16 @@ static int alloc_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
hr_qp->en_flags |= HNS_ROCE_QP_CAP_OWNER_DB;
if (udata) {
+ if (hr_qp->en_flags & HNS_ROCE_QP_CAP_DIRECT_WQE) {
+ ret = qp_mmap_entry(hr_qp, hr_dev, udata, resp);
+ if (ret)
+ return ret;
+ }
+
ret = alloc_user_qp_db(hr_dev, hr_qp, init_attr, udata, ucmd,
resp);
if (ret)
- return ret;
+ goto err_remove_qp;
} else {
ret = alloc_kernel_qp_db(hr_dev, hr_qp, init_attr);
if (ret)
@@ -920,6 +941,12 @@ static int alloc_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
}
return 0;
+
+err_remove_qp:
+ if (hr_qp->en_flags & HNS_ROCE_QP_CAP_DIRECT_WQE)
+ qp_user_mmap_entry_remove(hr_qp);
+
+ return ret;
}
static void free_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
@@ -933,6 +960,8 @@ static void free_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
hns_roce_db_unmap_user(uctx, &hr_qp->rdb);
if (hr_qp->en_flags & HNS_ROCE_QP_CAP_SQ_RECORD_DB)
hns_roce_db_unmap_user(uctx, &hr_qp->sdb);
+ if (hr_qp->en_flags & HNS_ROCE_QP_CAP_DIRECT_WQE)
+ qp_user_mmap_entry_remove(hr_qp);
} else {
if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB)
hns_roce_free_db(hr_dev, &hr_qp->rdb);
@@ -1158,7 +1187,7 @@ static int check_qp_type(struct hns_roce_dev *hr_dev, enum ib_qp_type type,
goto out;
break;
case IB_QPT_UD:
- if (hr_dev->pci_dev->revision <= PCI_REVISION_ID_HIP08 &&
+ if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08 &&
is_user)
goto out;
break;
@@ -1391,7 +1420,7 @@ void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq,
}
}
-static inline void *get_wqe(struct hns_roce_qp *hr_qp, int offset)
+static inline void *get_wqe(struct hns_roce_qp *hr_qp, u32 offset)
{
return hns_roce_buf_offset(hr_qp->mtr.kmem, offset);
}
diff --git a/drivers/infiniband/hw/irdma/hw.c b/drivers/infiniband/hw/irdma/hw.c
index b4c657f5f2f9..89234d04cc65 100644
--- a/drivers/infiniband/hw/irdma/hw.c
+++ b/drivers/infiniband/hw/irdma/hw.c
@@ -550,7 +550,7 @@ static void irdma_destroy_irq(struct irdma_pci_f *rf,
struct irdma_sc_dev *dev = &rf->sc_dev;
dev->irq_ops->irdma_dis_irq(dev, msix_vec->idx);
- irq_set_affinity_hint(msix_vec->irq, NULL);
+ irq_update_affinity_hint(msix_vec->irq, NULL);
free_irq(msix_vec->irq, dev_id);
}
@@ -1100,7 +1100,7 @@ irdma_cfg_ceq_vector(struct irdma_pci_f *rf, struct irdma_ceq *iwceq,
}
cpumask_clear(&msix_vec->mask);
cpumask_set_cpu(msix_vec->cpu_affinity, &msix_vec->mask);
- irq_set_affinity_hint(msix_vec->irq, &msix_vec->mask);
+ irq_update_affinity_hint(msix_vec->irq, &msix_vec->mask);
if (status) {
ibdev_dbg(&rf->iwdev->ibdev, "ERR: ceq irq config fail\n");
return IRDMA_ERR_CFG;
@@ -1709,14 +1709,14 @@ clean_msixtbl:
*/
static void irdma_get_used_rsrc(struct irdma_device *iwdev)
{
- iwdev->rf->used_pds = find_next_zero_bit(iwdev->rf->allocated_pds,
- iwdev->rf->max_pd, 0);
- iwdev->rf->used_qps = find_next_zero_bit(iwdev->rf->allocated_qps,
- iwdev->rf->max_qp, 0);
- iwdev->rf->used_cqs = find_next_zero_bit(iwdev->rf->allocated_cqs,
- iwdev->rf->max_cq, 0);
- iwdev->rf->used_mrs = find_next_zero_bit(iwdev->rf->allocated_mrs,
- iwdev->rf->max_mr, 0);
+ iwdev->rf->used_pds = find_first_zero_bit(iwdev->rf->allocated_pds,
+ iwdev->rf->max_pd);
+ iwdev->rf->used_qps = find_first_zero_bit(iwdev->rf->allocated_qps,
+ iwdev->rf->max_qp);
+ iwdev->rf->used_cqs = find_first_zero_bit(iwdev->rf->allocated_cqs,
+ iwdev->rf->max_cq);
+ iwdev->rf->used_mrs = find_first_zero_bit(iwdev->rf->allocated_mrs,
+ iwdev->rf->max_mr);
}
void irdma_ctrl_deinit_hw(struct irdma_pci_f *rf)
diff --git a/drivers/infiniband/hw/irdma/i40iw_if.c b/drivers/infiniband/hw/irdma/i40iw_if.c
index d219f64b2c3d..43e962b97d6a 100644
--- a/drivers/infiniband/hw/irdma/i40iw_if.c
+++ b/drivers/infiniband/hw/irdma/i40iw_if.c
@@ -198,7 +198,7 @@ static void i40iw_remove(struct auxiliary_device *aux_dev)
aux_dev);
struct i40e_info *cdev_info = i40e_adev->ldev;
- return i40e_client_device_unregister(cdev_info);
+ i40e_client_device_unregister(cdev_info);
}
static const struct auxiliary_device_id i40iw_auxiliary_id_table[] = {
diff --git a/drivers/infiniband/hw/irdma/main.c b/drivers/infiniband/hw/irdma/main.c
index 3fda7b78a9af..9fab29039f1c 100644
--- a/drivers/infiniband/hw/irdma/main.c
+++ b/drivers/infiniband/hw/irdma/main.c
@@ -207,7 +207,7 @@ static void irdma_remove(struct auxiliary_device *aux_dev)
struct iidc_auxiliary_dev,
adev);
struct ice_pf *pf = iidc_adev->pf;
- struct irdma_device *iwdev = dev_get_drvdata(&aux_dev->dev);
+ struct irdma_device *iwdev = auxiliary_get_drvdata(aux_dev);
irdma_ib_unregister_device(iwdev);
ice_rdma_update_vsi_filter(pf, iwdev->vsi_num, false);
@@ -295,7 +295,7 @@ static int irdma_probe(struct auxiliary_device *aux_dev, const struct auxiliary_
ice_rdma_update_vsi_filter(pf, iwdev->vsi_num, true);
ibdev_dbg(&iwdev->ibdev, "INIT: Gen2 PF[%d] device probe success\n", PCI_FUNC(rf->pcidev->devfn));
- dev_set_drvdata(&aux_dev->dev, iwdev);
+ auxiliary_set_drvdata(aux_dev, iwdev);
return 0;
diff --git a/drivers/infiniband/hw/irdma/pble.h b/drivers/infiniband/hw/irdma/pble.h
index aa20827dcc9d..d0d4f2b77d34 100644
--- a/drivers/infiniband/hw/irdma/pble.h
+++ b/drivers/infiniband/hw/irdma/pble.h
@@ -69,7 +69,7 @@ struct irdma_add_page_info {
struct irdma_chunk {
struct list_head list;
struct irdma_dma_info dmainfo;
- void *bitmapbuf;
+ unsigned long *bitmapbuf;
u32 sizeofbitmap;
u64 size;
diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
index 8cd5f9261692..460e757d3fe6 100644
--- a/drivers/infiniband/hw/irdma/verbs.c
+++ b/drivers/infiniband/hw/irdma/verbs.c
@@ -21,7 +21,8 @@ static int irdma_query_device(struct ib_device *ibdev,
return -EINVAL;
memset(props, 0, sizeof(*props));
- ether_addr_copy((u8 *)&props->sys_image_guid, iwdev->netdev->dev_addr);
+ addrconf_addr_eui48((u8 *)&props->sys_image_guid,
+ iwdev->netdev->dev_addr);
props->fw_ver = (u64)irdma_fw_major_ver(&rf->sc_dev) << 32 |
irdma_fw_minor_ver(&rf->sc_dev);
props->device_cap_flags = iwdev->device_cap_flags;
@@ -1170,6 +1171,10 @@ int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
udp_info->ttl = attr->ah_attr.grh.hop_limit;
udp_info->flow_label = attr->ah_attr.grh.flow_label;
udp_info->tos = attr->ah_attr.grh.traffic_class;
+ udp_info->src_port =
+ rdma_get_udp_sport(udp_info->flow_label,
+ ibqp->qp_num,
+ roce_info->dest_qp);
irdma_qp_rem_qos(&iwqp->sc_qp);
dev->ws_remove(iwqp->sc_qp.vsi, ctx_info->user_pri);
ctx_info->user_pri = rt_tos2priority(udp_info->tos);
@@ -4321,24 +4326,6 @@ static enum rdma_link_layer irdma_get_link_layer(struct ib_device *ibdev,
return IB_LINK_LAYER_ETHERNET;
}
-static __be64 irdma_mac_to_guid(struct net_device *ndev)
-{
- const unsigned char *mac = ndev->dev_addr;
- __be64 guid;
- unsigned char *dst = (unsigned char *)&guid;
-
- dst[0] = mac[0] ^ 2;
- dst[1] = mac[1];
- dst[2] = mac[2];
- dst[3] = 0xff;
- dst[4] = 0xfe;
- dst[5] = mac[3];
- dst[6] = mac[4];
- dst[7] = mac[5];
-
- return guid;
-}
-
static const struct ib_device_ops irdma_roce_dev_ops = {
.attach_mcast = irdma_attach_mcast,
.create_ah = irdma_create_ah,
@@ -4408,7 +4395,8 @@ static const struct ib_device_ops irdma_dev_ops = {
static void irdma_init_roce_device(struct irdma_device *iwdev)
{
iwdev->ibdev.node_type = RDMA_NODE_IB_CA;
- iwdev->ibdev.node_guid = irdma_mac_to_guid(iwdev->netdev);
+ addrconf_addr_eui48((u8 *)&iwdev->ibdev.node_guid,
+ iwdev->netdev->dev_addr);
ib_set_device_ops(&iwdev->ibdev, &irdma_roce_dev_ops);
}
@@ -4421,7 +4409,8 @@ static int irdma_init_iw_device(struct irdma_device *iwdev)
struct net_device *netdev = iwdev->netdev;
iwdev->ibdev.node_type = RDMA_NODE_RNIC;
- ether_addr_copy((u8 *)&iwdev->ibdev.node_guid, netdev->dev_addr);
+ addrconf_addr_eui48((u8 *)&iwdev->ibdev.node_guid,
+ netdev->dev_addr);
iwdev->ibdev.ops.iw_add_ref = irdma_qp_add_ref;
iwdev->ibdev.ops.iw_rem_ref = irdma_qp_rem_ref;
iwdev->ibdev.ops.iw_get_qp = irdma_get_qp;
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 0d2fa3338784..93b1650eacfa 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -85,14 +85,6 @@ static enum rdma_link_layer mlx4_ib_port_link_layer(struct ib_device *device,
static struct workqueue_struct *wq;
-static void init_query_mad(struct ib_smp *mad)
-{
- mad->base_version = 1;
- mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
- mad->class_version = 1;
- mad->method = IB_MGMT_METHOD_GET;
-}
-
static int check_flow_steering_support(struct mlx4_dev *dev)
{
int eth_num_ports = 0;
@@ -471,7 +463,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
if (!in_mad || !out_mad)
goto out;
- init_query_mad(in_mad);
+ ib_init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
err = mlx4_MAD_IFC(to_mdev(ibdev), MLX4_MAD_IFC_IGNORE_KEYS,
@@ -669,7 +661,7 @@ static int ib_link_query_port(struct ib_device *ibdev, u32 port,
if (!in_mad || !out_mad)
goto out;
- init_query_mad(in_mad);
+ ib_init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
in_mad->attr_mod = cpu_to_be32(port);
@@ -721,7 +713,7 @@ static int ib_link_query_port(struct ib_device *ibdev, u32 port,
/* If reported active speed is QDR, check if is FDR-10 */
if (props->active_speed == IB_SPEED_QDR) {
- init_query_mad(in_mad);
+ ib_init_query_mad(in_mad);
in_mad->attr_id = MLX4_ATTR_EXTENDED_PORT_INFO;
in_mad->attr_mod = cpu_to_be32(port);
@@ -848,7 +840,7 @@ int __mlx4_ib_query_gid(struct ib_device *ibdev, u32 port, int index,
if (!in_mad || !out_mad)
goto out;
- init_query_mad(in_mad);
+ ib_init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
in_mad->attr_mod = cpu_to_be32(port);
@@ -870,7 +862,7 @@ int __mlx4_ib_query_gid(struct ib_device *ibdev, u32 port, int index,
}
}
- init_query_mad(in_mad);
+ ib_init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_GUID_INFO;
in_mad->attr_mod = cpu_to_be32(index / 8);
@@ -917,7 +909,7 @@ static int mlx4_ib_query_sl2vl(struct ib_device *ibdev, u32 port,
if (!in_mad || !out_mad)
goto out;
- init_query_mad(in_mad);
+ ib_init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_SL_TO_VL_TABLE;
in_mad->attr_mod = 0;
@@ -971,7 +963,7 @@ int __mlx4_ib_query_pkey(struct ib_device *ibdev, u32 port, u16 index,
if (!in_mad || !out_mad)
goto out;
- init_query_mad(in_mad);
+ ib_init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE;
in_mad->attr_mod = cpu_to_be32(index / 32);
@@ -1990,7 +1982,7 @@ static int init_node_data(struct mlx4_ib_dev *dev)
if (!in_mad || !out_mad)
goto out;
- init_query_mad(in_mad);
+ ib_init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
if (mlx4_is_master(dev->dev))
mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW;
@@ -2784,10 +2776,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
if (err)
goto err_counter;
- ibdev->ib_uc_qpns_bitmap =
- kmalloc_array(BITS_TO_LONGS(ibdev->steer_qpn_count),
- sizeof(long),
- GFP_KERNEL);
+ ibdev->ib_uc_qpns_bitmap = bitmap_alloc(ibdev->steer_qpn_count,
+ GFP_KERNEL);
if (!ibdev->ib_uc_qpns_bitmap)
goto err_steer_qp_release;
@@ -2875,7 +2865,7 @@ err_diag_counters:
mlx4_ib_diag_cleanup(ibdev);
err_steer_free_bitmap:
- kfree(ibdev->ib_uc_qpns_bitmap);
+ bitmap_free(ibdev->ib_uc_qpns_bitmap);
err_steer_qp_release:
mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
@@ -2988,7 +2978,7 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
mlx4_qp_release_range(dev, ibdev->steer_qpn_base,
ibdev->steer_qpn_count);
- kfree(ibdev->ib_uc_qpns_bitmap);
+ bitmap_free(ibdev->ib_uc_qpns_bitmap);
iounmap(ibdev->uar_map);
for (p = 0; p < ibdev->num_ports; ++p)
@@ -3247,7 +3237,7 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
case MLX4_DEV_EVENT_PORT_MGMT_CHANGE:
ew = kmalloc(sizeof *ew, GFP_ATOMIC);
if (!ew)
- break;
+ return;
INIT_WORK(&ew->work, handle_port_mgmt_change_event);
memcpy(&ew->ib_eqe, eqe, sizeof *eqe);
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index a190fb581591..08371a80fdc2 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -328,8 +328,11 @@ static void mlx5_handle_error_cqe(struct mlx5_ib_dev *dev,
}
wc->vendor_err = cqe->vendor_err_synd;
- if (dump)
+ if (dump) {
+ mlx5_ib_warn(dev, "WC error: %d, Message: %s\n", wc->status,
+ ib_wc_status_msg(wc->status));
dump_cqe(dev, cqe);
+ }
}
static void handle_atomics(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64,
diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c
index ec242a5a17a3..293ed709e5ed 100644
--- a/drivers/infiniband/hw/mlx5/mad.c
+++ b/drivers/infiniband/hw/mlx5/mad.c
@@ -291,7 +291,7 @@ int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, unsigned int port)
if (!in_mad || !out_mad)
goto out;
- init_query_mad(in_mad);
+ ib_init_query_mad(in_mad);
in_mad->attr_id = MLX5_ATTR_EXTENDED_PORT_INFO;
in_mad->attr_mod = cpu_to_be32(port);
@@ -318,7 +318,7 @@ static int mlx5_query_mad_ifc_smp_attr_node_info(struct ib_device *ibdev,
if (!in_mad)
return -ENOMEM;
- init_query_mad(in_mad);
+ ib_init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, 1, NULL, NULL, in_mad,
@@ -405,7 +405,7 @@ int mlx5_query_mad_ifc_node_desc(struct mlx5_ib_dev *dev, char *node_desc)
if (!in_mad || !out_mad)
goto out;
- init_query_mad(in_mad);
+ ib_init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad);
@@ -430,7 +430,7 @@ int mlx5_query_mad_ifc_node_guid(struct mlx5_ib_dev *dev, __be64 *node_guid)
if (!in_mad || !out_mad)
goto out;
- init_query_mad(in_mad);
+ ib_init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad);
@@ -456,7 +456,7 @@ int mlx5_query_mad_ifc_pkey(struct ib_device *ibdev, u32 port, u16 index,
if (!in_mad || !out_mad)
goto out;
- init_query_mad(in_mad);
+ ib_init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE;
in_mad->attr_mod = cpu_to_be32(index / 32);
@@ -485,7 +485,7 @@ int mlx5_query_mad_ifc_gids(struct ib_device *ibdev, u32 port, int index,
if (!in_mad || !out_mad)
goto out;
- init_query_mad(in_mad);
+ ib_init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
in_mad->attr_mod = cpu_to_be32(port);
@@ -496,7 +496,7 @@ int mlx5_query_mad_ifc_gids(struct ib_device *ibdev, u32 port, int index,
memcpy(gid->raw, out_mad->data + 8, 8);
- init_query_mad(in_mad);
+ ib_init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_GUID_INFO;
in_mad->attr_mod = cpu_to_be32(index / 8);
@@ -530,7 +530,7 @@ int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u32 port,
/* props being zeroed by the caller, avoid zeroing it here */
- init_query_mad(in_mad);
+ ib_init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
in_mad->attr_mod = cpu_to_be32(port);
@@ -584,6 +584,11 @@ int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u32 port,
props->port_cap_flags2 & IB_PORT_LINK_SPEED_HDR_SUP)
props->active_speed = IB_SPEED_HDR;
break;
+ case 8:
+ if (props->port_cap_flags & IB_PORT_CAP_MASK2_SUP &&
+ props->port_cap_flags2 & IB_PORT_LINK_SPEED_NDR_SUP)
+ props->active_speed = IB_SPEED_NDR;
+ break;
}
}
@@ -591,7 +596,7 @@ int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u32 port,
if (props->active_speed == 4) {
if (dev->port_caps[port - 1].ext_port_cap &
MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO) {
- init_query_mad(in_mad);
+ ib_init_query_mad(in_mad);
in_mad->attr_id = MLX5_ATTR_EXTENDED_PORT_INFO;
in_mad->attr_mod = cpu_to_be32(port);
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 5ec8bd2f0b2f..85f526c861e9 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -4422,7 +4422,7 @@ static int mlx5r_mp_probe(struct auxiliary_device *adev,
}
mutex_unlock(&mlx5_ib_multiport_mutex);
- dev_set_drvdata(&adev->dev, mpi);
+ auxiliary_set_drvdata(adev, mpi);
return 0;
}
@@ -4430,7 +4430,7 @@ static void mlx5r_mp_remove(struct auxiliary_device *adev)
{
struct mlx5_ib_multiport_info *mpi;
- mpi = dev_get_drvdata(&adev->dev);
+ mpi = auxiliary_get_drvdata(adev);
mutex_lock(&mlx5_ib_multiport_mutex);
if (mpi->ibdev)
mlx5_ib_unbind_slave_port(mpi->ibdev, mpi);
@@ -4480,7 +4480,7 @@ static int mlx5r_probe(struct auxiliary_device *adev,
return ret;
}
- dev_set_drvdata(&adev->dev, dev);
+ auxiliary_set_drvdata(adev, dev);
return 0;
}
@@ -4488,7 +4488,7 @@ static void mlx5r_remove(struct auxiliary_device *adev)
{
struct mlx5_ib_dev *dev;
- dev = dev_get_drvdata(&adev->dev);
+ dev = auxiliary_get_drvdata(adev);
__mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX);
}
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index e3c33be9c5a0..cbc20e400be0 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -665,9 +665,9 @@ struct mlx5_ib_mr {
/* User MR data */
struct mlx5_cache_ent *cache_ent;
+ /* Everything after cache_ent is zero'd when MR allocated */
struct ib_umem *umem;
- /* This is zero'd when the MR is allocated */
union {
/* Used only while the MR is in the cache */
struct {
@@ -719,7 +719,7 @@ struct mlx5_ib_mr {
/* Zero the fields in the mr that are variant depending on usage */
static inline void mlx5_clear_mr(struct mlx5_ib_mr *mr)
{
- memset(mr->out, 0, sizeof(*mr) - offsetof(struct mlx5_ib_mr, out));
+ memset_after(mr, 0, cache_ent);
}
static inline bool is_odp_mr(struct mlx5_ib_mr *mr)
@@ -1466,14 +1466,6 @@ extern const struct uapi_definition mlx5_ib_flow_defs[];
extern const struct uapi_definition mlx5_ib_qos_defs[];
extern const struct uapi_definition mlx5_ib_std_types_defs[];
-static inline void init_query_mad(struct ib_smp *mad)
-{
- mad->base_version = 1;
- mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
- mad->class_version = 1;
- mad->method = IB_MGMT_METHOD_GET;
-}
-
static inline int is_qp1(enum ib_qp_type qp_type)
{
return qp_type == MLX5_IB_QPT_HW_GSI || qp_type == IB_QPT_GSI;
diff --git a/drivers/infiniband/hw/mthca/mthca_allocator.c b/drivers/infiniband/hw/mthca/mthca_allocator.c
index aef1d274a14e..9f0f79d02d3c 100644
--- a/drivers/infiniband/hw/mthca/mthca_allocator.c
+++ b/drivers/infiniband/hw/mthca/mthca_allocator.c
@@ -51,7 +51,7 @@ u32 mthca_alloc(struct mthca_alloc *alloc)
}
if (obj < alloc->max) {
- set_bit(obj, alloc->table);
+ __set_bit(obj, alloc->table);
obj |= alloc->top;
} else
obj = -1;
@@ -69,7 +69,7 @@ void mthca_free(struct mthca_alloc *alloc, u32 obj)
spin_lock_irqsave(&alloc->lock, flags);
- clear_bit(obj, alloc->table);
+ __clear_bit(obj, alloc->table);
alloc->last = min(alloc->last, obj);
alloc->top = (alloc->top + alloc->max) & alloc->mask;
@@ -79,8 +79,6 @@ void mthca_free(struct mthca_alloc *alloc, u32 obj)
int mthca_alloc_init(struct mthca_alloc *alloc, u32 num, u32 mask,
u32 reserved)
{
- int i;
-
/* num must be a power of 2 */
if (num != 1 << (ffs(num) - 1))
return -EINVAL;
@@ -90,21 +88,18 @@ int mthca_alloc_init(struct mthca_alloc *alloc, u32 num, u32 mask,
alloc->max = num;
alloc->mask = mask;
spin_lock_init(&alloc->lock);
- alloc->table = kmalloc_array(BITS_TO_LONGS(num), sizeof(long),
- GFP_KERNEL);
+ alloc->table = bitmap_zalloc(num, GFP_KERNEL);
if (!alloc->table)
return -ENOMEM;
- bitmap_zero(alloc->table, num);
- for (i = 0; i < reserved; ++i)
- set_bit(i, alloc->table);
+ bitmap_set(alloc->table, 0, reserved);
return 0;
}
void mthca_alloc_cleanup(struct mthca_alloc *alloc)
{
- kfree(alloc->table);
+ bitmap_free(alloc->table);
}
/*
diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
index ce0e0867e488..192f83fd7c8a 100644
--- a/drivers/infiniband/hw/mthca/mthca_mr.c
+++ b/drivers/infiniband/hw/mthca/mthca_mr.c
@@ -101,13 +101,13 @@ static u32 mthca_buddy_alloc(struct mthca_buddy *buddy, int order)
return -1;
found:
- clear_bit(seg, buddy->bits[o]);
+ __clear_bit(seg, buddy->bits[o]);
--buddy->num_free[o];
while (o > order) {
--o;
seg <<= 1;
- set_bit(seg ^ 1, buddy->bits[o]);
+ __set_bit(seg ^ 1, buddy->bits[o]);
++buddy->num_free[o];
}
@@ -125,13 +125,13 @@ static void mthca_buddy_free(struct mthca_buddy *buddy, u32 seg, int order)
spin_lock(&buddy->lock);
while (test_bit(seg ^ 1, buddy->bits[order])) {
- clear_bit(seg ^ 1, buddy->bits[order]);
+ __clear_bit(seg ^ 1, buddy->bits[order]);
--buddy->num_free[order];
seg >>= 1;
++order;
}
- set_bit(seg, buddy->bits[order]);
+ __set_bit(seg, buddy->bits[order]);
++buddy->num_free[order];
spin_unlock(&buddy->lock);
@@ -139,7 +139,7 @@ static void mthca_buddy_free(struct mthca_buddy *buddy, u32 seg, int order)
static int mthca_buddy_init(struct mthca_buddy *buddy, int max_order)
{
- int i, s;
+ int i;
buddy->max_order = max_order;
spin_lock_init(&buddy->lock);
@@ -152,22 +152,20 @@ static int mthca_buddy_init(struct mthca_buddy *buddy, int max_order)
goto err_out;
for (i = 0; i <= buddy->max_order; ++i) {
- s = BITS_TO_LONGS(1 << (buddy->max_order - i));
- buddy->bits[i] = kmalloc_array(s, sizeof(long), GFP_KERNEL);
+ buddy->bits[i] = bitmap_zalloc(1 << (buddy->max_order - i),
+ GFP_KERNEL);
if (!buddy->bits[i])
goto err_out_free;
- bitmap_zero(buddy->bits[i],
- 1 << (buddy->max_order - i));
}
- set_bit(0, buddy->bits[buddy->max_order]);
+ __set_bit(0, buddy->bits[buddy->max_order]);
buddy->num_free[buddy->max_order] = 1;
return 0;
err_out_free:
for (i = 0; i <= buddy->max_order; ++i)
- kfree(buddy->bits[i]);
+ bitmap_free(buddy->bits[i]);
err_out:
kfree(buddy->bits);
@@ -181,7 +179,7 @@ static void mthca_buddy_cleanup(struct mthca_buddy *buddy)
int i;
for (i = 0; i <= buddy->max_order; ++i)
- kfree(buddy->bits[i]);
+ bitmap_free(buddy->bits[i]);
kfree(buddy->bits);
kfree(buddy->num_free);
@@ -469,8 +467,7 @@ int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
mpt_entry->start = cpu_to_be64(iova);
mpt_entry->length = cpu_to_be64(total_size);
- memset(&mpt_entry->lkey, 0,
- sizeof *mpt_entry - offsetof(struct mthca_mpt_entry, lkey));
+ memset_startat(mpt_entry, 0, lkey);
if (mr->mtt)
mpt_entry->mtt_seg =
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index ceee23ebc0f2..c46df53f26cf 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -50,14 +50,6 @@
#include <rdma/mthca-abi.h>
#include "mthca_memfree.h"
-static void init_query_mad(struct ib_smp *mad)
-{
- mad->base_version = 1;
- mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
- mad->class_version = 1;
- mad->method = IB_MGMT_METHOD_GET;
-}
-
static int mthca_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
struct ib_udata *uhw)
{
@@ -78,7 +70,7 @@ static int mthca_query_device(struct ib_device *ibdev, struct ib_device_attr *pr
props->fw_ver = mdev->fw_ver;
- init_query_mad(in_mad);
+ ib_init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
err = mthca_MAD_IFC(mdev, 1, 1,
@@ -140,7 +132,7 @@ static int mthca_query_port(struct ib_device *ibdev,
/* props being zeroed by the caller, avoid zeroing it here */
- init_query_mad(in_mad);
+ ib_init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
in_mad->attr_mod = cpu_to_be32(port);
@@ -234,7 +226,7 @@ static int mthca_query_pkey(struct ib_device *ibdev,
if (!in_mad || !out_mad)
goto out;
- init_query_mad(in_mad);
+ ib_init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE;
in_mad->attr_mod = cpu_to_be32(index / 32);
@@ -263,7 +255,7 @@ static int mthca_query_gid(struct ib_device *ibdev, u32 port,
if (!in_mad || !out_mad)
goto out;
- init_query_mad(in_mad);
+ ib_init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
in_mad->attr_mod = cpu_to_be32(port);
@@ -274,7 +266,7 @@ static int mthca_query_gid(struct ib_device *ibdev, u32 port,
memcpy(gid->raw, out_mad->data + 8, 8);
- init_query_mad(in_mad);
+ ib_init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_GUID_INFO;
in_mad->attr_mod = cpu_to_be32(index / 8);
@@ -1006,7 +998,7 @@ static int mthca_init_node_data(struct mthca_dev *dev)
if (!in_mad || !out_mad)
goto out;
- init_query_mad(in_mad);
+ ib_init_query_mad(in_mad);
in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
err = mthca_MAD_IFC(dev, 1, 1,
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
index c51c3f40700e..265a581133dc 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
@@ -1506,7 +1506,6 @@ int ocrdma_mbx_dealloc_pd(struct ocrdma_dev *dev, struct ocrdma_pd *pd)
static int ocrdma_mbx_alloc_pd_range(struct ocrdma_dev *dev)
{
int status = -ENOMEM;
- size_t pd_bitmap_size;
struct ocrdma_alloc_pd_range *cmd;
struct ocrdma_alloc_pd_range_rsp *rsp;
@@ -1528,10 +1527,8 @@ static int ocrdma_mbx_alloc_pd_range(struct ocrdma_dev *dev)
dev->pd_mgr->pd_dpp_start = rsp->dpp_page_pdid &
OCRDMA_ALLOC_PD_RNG_RSP_START_PDID_MASK;
dev->pd_mgr->max_dpp_pd = rsp->pd_count;
- pd_bitmap_size =
- BITS_TO_LONGS(rsp->pd_count) * sizeof(long);
- dev->pd_mgr->pd_dpp_bitmap = kzalloc(pd_bitmap_size,
- GFP_KERNEL);
+ dev->pd_mgr->pd_dpp_bitmap = bitmap_zalloc(rsp->pd_count,
+ GFP_KERNEL);
}
kfree(cmd);
}
@@ -1547,9 +1544,8 @@ static int ocrdma_mbx_alloc_pd_range(struct ocrdma_dev *dev)
dev->pd_mgr->pd_norm_start = rsp->dpp_page_pdid &
OCRDMA_ALLOC_PD_RNG_RSP_START_PDID_MASK;
dev->pd_mgr->max_normal_pd = rsp->pd_count;
- pd_bitmap_size = BITS_TO_LONGS(rsp->pd_count) * sizeof(long);
- dev->pd_mgr->pd_norm_bitmap = kzalloc(pd_bitmap_size,
- GFP_KERNEL);
+ dev->pd_mgr->pd_norm_bitmap = bitmap_zalloc(rsp->pd_count,
+ GFP_KERNEL);
}
kfree(cmd);
@@ -1611,8 +1607,8 @@ void ocrdma_alloc_pd_pool(struct ocrdma_dev *dev)
static void ocrdma_free_pd_pool(struct ocrdma_dev *dev)
{
ocrdma_mbx_dealloc_pd_range(dev);
- kfree(dev->pd_mgr->pd_norm_bitmap);
- kfree(dev->pd_mgr->pd_dpp_bitmap);
+ bitmap_free(dev->pd_mgr->pd_norm_bitmap);
+ bitmap_free(dev->pd_mgr->pd_dpp_bitmap);
kfree(dev->pd_mgr);
}
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
index 7abf6cf1e937..5d4b3bc16493 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
@@ -62,20 +62,6 @@ MODULE_DESCRIPTION(OCRDMA_ROCE_DRV_DESC " " OCRDMA_ROCE_DRV_VERSION);
MODULE_AUTHOR("Emulex Corporation");
MODULE_LICENSE("Dual BSD/GPL");
-void ocrdma_get_guid(struct ocrdma_dev *dev, u8 *guid)
-{
- u8 mac_addr[6];
-
- memcpy(&mac_addr[0], &dev->nic_info.mac_addr[0], ETH_ALEN);
- guid[0] = mac_addr[0] ^ 2;
- guid[1] = mac_addr[1];
- guid[2] = mac_addr[2];
- guid[3] = 0xff;
- guid[4] = 0xfe;
- guid[5] = mac_addr[3];
- guid[6] = mac_addr[4];
- guid[7] = mac_addr[5];
-}
static enum rdma_link_layer ocrdma_link_layer(struct ib_device *device,
u32 port_num)
{
@@ -203,7 +189,8 @@ static int ocrdma_register_device(struct ocrdma_dev *dev)
{
int ret;
- ocrdma_get_guid(dev, (u8 *)&dev->ibdev.node_guid);
+ addrconf_addr_eui48((u8 *)&dev->ibdev.node_guid,
+ dev->nic_info.mac_addr);
BUILD_BUG_ON(sizeof(OCRDMA_NODE_DESC) > IB_DEVICE_NODE_DESC_MAX);
memcpy(dev->ibdev.node_desc, OCRDMA_NODE_DESC,
sizeof(OCRDMA_NODE_DESC));
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index 735123d0e9ec..acf9970ec245 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -41,6 +41,7 @@
*/
#include <linux/dma-mapping.h>
+#include <net/addrconf.h>
#include <rdma/ib_verbs.h>
#include <rdma/ib_user_verbs.h>
#include <rdma/iw_cm.h>
@@ -74,7 +75,8 @@ int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr,
memset(attr, 0, sizeof *attr);
memcpy(&attr->fw_ver, &dev->attr.fw_ver[0],
min(sizeof(dev->attr.fw_ver), sizeof(attr->fw_ver)));
- ocrdma_get_guid(dev, (u8 *)&attr->sys_image_guid);
+ addrconf_addr_eui48((u8 *)&attr->sys_image_guid,
+ dev->nic_info.mac_addr);
attr->max_mr_size = dev->attr.max_mr_size;
attr->page_size_cap = 0xffff000;
attr->vendor_id = dev->nic_info.pdev->vendor;
@@ -245,13 +247,13 @@ static bool ocrdma_search_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
static u16 _ocrdma_pd_mgr_get_bitmap(struct ocrdma_dev *dev, bool dpp_pool)
{
u16 pd_bitmap_idx = 0;
- const unsigned long *pd_bitmap;
+ unsigned long *pd_bitmap;
if (dpp_pool) {
pd_bitmap = dev->pd_mgr->pd_dpp_bitmap;
pd_bitmap_idx = find_first_zero_bit(pd_bitmap,
dev->pd_mgr->max_dpp_pd);
- __set_bit(pd_bitmap_idx, dev->pd_mgr->pd_dpp_bitmap);
+ __set_bit(pd_bitmap_idx, pd_bitmap);
dev->pd_mgr->pd_dpp_count++;
if (dev->pd_mgr->pd_dpp_count > dev->pd_mgr->pd_dpp_thrsh)
dev->pd_mgr->pd_dpp_thrsh = dev->pd_mgr->pd_dpp_count;
@@ -259,7 +261,7 @@ static u16 _ocrdma_pd_mgr_get_bitmap(struct ocrdma_dev *dev, bool dpp_pool)
pd_bitmap = dev->pd_mgr->pd_norm_bitmap;
pd_bitmap_idx = find_first_zero_bit(pd_bitmap,
dev->pd_mgr->max_normal_pd);
- __set_bit(pd_bitmap_idx, dev->pd_mgr->pd_norm_bitmap);
+ __set_bit(pd_bitmap_idx, pd_bitmap);
dev->pd_mgr->pd_norm_count++;
if (dev->pd_mgr->pd_norm_count > dev->pd_mgr->pd_norm_thrsh)
dev->pd_mgr->pd_norm_thrsh = dev->pd_mgr->pd_norm_count;
@@ -1844,12 +1846,10 @@ int ocrdma_modify_srq(struct ib_srq *ibsrq,
int ocrdma_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
{
- int status;
struct ocrdma_srq *srq;
srq = get_ocrdma_srq(ibsrq);
- status = ocrdma_mbx_query_srq(srq, srq_attr);
- return status;
+ return ocrdma_mbx_query_srq(srq, srq_attr);
}
int ocrdma_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
@@ -1960,7 +1960,6 @@ static int ocrdma_build_inline_sges(struct ocrdma_qp *qp,
static int ocrdma_build_send(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
const struct ib_send_wr *wr)
{
- int status;
struct ocrdma_sge *sge;
u32 wqe_size = sizeof(*hdr);
@@ -1972,8 +1971,7 @@ static int ocrdma_build_send(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
sge = (struct ocrdma_sge *)(hdr + 1);
}
- status = ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size);
- return status;
+ return ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size);
}
static int ocrdma_build_write(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
index b73d742a520c..f860b7fcef33 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
@@ -59,7 +59,6 @@ int ocrdma_query_port(struct ib_device *ibdev, u32 port,
enum rdma_protocol_type
ocrdma_query_protocol(struct ib_device *device, u32 port_num);
-void ocrdma_get_guid(struct ocrdma_dev *, u8 *guid);
int ocrdma_query_pkey(struct ib_device *ibdev, u32 port, u16 index, u16 *pkey);
int ocrdma_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata);
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index 9100009f0a23..a53476653b0d 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -1931,6 +1931,7 @@ static int qedr_create_user_qp(struct qedr_dev *dev,
/* db offset was calculated in copy_qp_uresp, now set in the user q */
if (qedr_qp_has_sq(qp)) {
qp->usq.db_addr = ctx->dpi_addr + uresp.sq_db_offset;
+ qp->sq.max_wr = attrs->cap.max_send_wr;
rc = qedr_db_recovery_add(dev, qp->usq.db_addr,
&qp->usq.db_rec_data->db_data,
DB_REC_WIDTH_32B,
@@ -1941,6 +1942,7 @@ static int qedr_create_user_qp(struct qedr_dev *dev,
if (qedr_qp_has_rq(qp)) {
qp->urq.db_addr = ctx->dpi_addr + uresp.rq_db_offset;
+ qp->rq.max_wr = attrs->cap.max_recv_wr;
rc = qedr_db_recovery_add(dev, qp->urq.db_addr,
&qp->urq.db_rec_data->db_data,
DB_REC_WIDTH_32B,
diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
index 9363bccfc6e7..a8e1c30c370f 100644
--- a/drivers/infiniband/hw/qib/qib.h
+++ b/drivers/infiniband/hw/qib/qib.h
@@ -196,7 +196,7 @@ struct qib_ctxtdata {
pid_t pid;
pid_t subpid[QLOGIC_IB_MAX_SUBCTXT];
/* same size as task_struct .comm[], command that opened context */
- char comm[16];
+ char comm[TASK_COMM_LEN];
/* pkeys set by this use of this ctxt */
u16 pkeys[4];
/* so file ops can get at unit */
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
index 63854f4b6524..aa290928cf96 100644
--- a/drivers/infiniband/hw/qib/qib_file_ops.c
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
@@ -1321,7 +1321,7 @@ static int setup_ctxt(struct qib_pportdata *ppd, int ctxt,
rcd->tid_pg_list = ptmp;
rcd->pid = current->pid;
init_waitqueue_head(&dd->rcd[ctxt]->wait);
- strlcpy(rcd->comm, current->comm, sizeof(rcd->comm));
+ get_task_comm(rcd->comm, current);
ctxt_fp(fp) = rcd;
qib_stats.sps_ctxts++;
dd->freectxts--;
diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c
index a9b83bc13f4a..aea571943768 100644
--- a/drivers/infiniband/hw/qib/qib_iba6120.c
+++ b/drivers/infiniband/hw/qib/qib_iba6120.c
@@ -3030,7 +3030,7 @@ static int qib_6120_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs)
/* Does read/modify/write to appropriate registers to
* set output and direction bits selected by mask.
- * these are in their canonical postions (e.g. lsb of
+ * these are in their canonical positions (e.g. lsb of
* dir will end up in D48 of extctrl on existing chips).
* returns contents of GP Inputs.
*/
diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c
index d1c0bc31869f..80a8dd6c7814 100644
--- a/drivers/infiniband/hw/qib/qib_iba7220.c
+++ b/drivers/infiniband/hw/qib/qib_iba7220.c
@@ -3742,7 +3742,7 @@ static int qib_7220_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs)
/*
* Does read/modify/write to appropriate registers to
* set output and direction bits selected by mask.
- * these are in their canonical postions (e.g. lsb of
+ * these are in their canonical positions (e.g. lsb of
* dir will end up in D48 of extctrl on existing chips).
* returns contents of GP Inputs.
*/
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index ab98b6a3ae1e..ceed302cf6a0 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -5665,7 +5665,7 @@ static int qib_7322_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs)
/*
* Does read/modify/write to appropriate registers to
* set output and direction bits selected by mask.
- * these are in their canonical postions (e.g. lsb of
+ * these are in their canonical positions (e.g. lsb of
* dir will end up in D48 of extctrl on existing chips).
* returns contents of GP Inputs.
*/
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
index ef91bff5c23c..0080f0be72fe 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.c
+++ b/drivers/infiniband/hw/qib/qib_verbs.c
@@ -425,7 +425,7 @@ static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off)
}
#endif
-static void copy_io(u32 __iomem *piobuf, struct rvt_sge_state *ss,
+static void qib_copy_io(u32 __iomem *piobuf, struct rvt_sge_state *ss,
u32 length, unsigned flush_wc)
{
u32 extra = 0;
@@ -975,7 +975,7 @@ static int qib_verbs_send_pio(struct rvt_qp *qp, struct ib_header *ibhdr,
qib_pio_copy(piobuf, addr, dwords);
goto done;
}
- copy_io(piobuf, ss, len, flush_wc);
+ qib_copy_io(piobuf, ss, len, flush_wc);
done:
if (dd->flags & QIB_USE_SPCL_TRIG) {
u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023;
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c b/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c
index 586b0e52ba7f..7d868f033bbf 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c
@@ -243,10 +243,11 @@ static struct attribute *usnic_ib_qpn_default_attrs[] = {
&qpn_attr_summary.attr,
NULL
};
+ATTRIBUTE_GROUPS(usnic_ib_qpn_default);
static struct kobj_type usnic_ib_qpn_type = {
.sysfs_ops = &usnic_ib_qpn_sysfs_ops,
- .default_attrs = usnic_ib_qpn_default_attrs
+ .default_groups = usnic_ib_qpn_default_groups,
};
int usnic_ib_sysfs_register_usdev(struct usnic_ib_dev *us_ibdev)
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
index 756a83bcff58..5a0e26cd648e 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
@@ -442,12 +442,10 @@ int usnic_ib_query_gid(struct ib_device *ibdev, u32 port, int index,
int usnic_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
{
struct usnic_ib_pd *pd = to_upd(ibpd);
- void *umem_pd;
- umem_pd = pd->umem_pd = usnic_uiom_alloc_pd();
- if (IS_ERR_OR_NULL(umem_pd)) {
- return umem_pd ? PTR_ERR(umem_pd) : -ENOMEM;
- }
+ pd->umem_pd = usnic_uiom_alloc_pd();
+ if (IS_ERR(pd->umem_pd))
+ return PTR_ERR(pd->umem_pd);
return 0;
}
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_doorbell.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_doorbell.c
index bf51357ea3aa..9a4de962e947 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_doorbell.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_doorbell.c
@@ -63,12 +63,12 @@ int pvrdma_uar_table_init(struct pvrdma_dev *dev)
tbl->max = num;
tbl->mask = mask;
spin_lock_init(&tbl->lock);
- tbl->table = kcalloc(BITS_TO_LONGS(num), sizeof(long), GFP_KERNEL);
+ tbl->table = bitmap_zalloc(num, GFP_KERNEL);
if (!tbl->table)
return -ENOMEM;
/* 0th UAR is taken by the device. */
- set_bit(0, tbl->table);
+ __set_bit(0, tbl->table);
return 0;
}
@@ -77,7 +77,7 @@ void pvrdma_uar_table_cleanup(struct pvrdma_dev *dev)
{
struct pvrdma_id_table *tbl = &dev->uar_table.tbl;
- kfree(tbl->table);
+ bitmap_free(tbl->table);
}
int pvrdma_uar_alloc(struct pvrdma_dev *dev, struct pvrdma_uar_map *uar)
@@ -100,7 +100,7 @@ int pvrdma_uar_alloc(struct pvrdma_dev *dev, struct pvrdma_uar_map *uar)
return -ENOMEM;
}
- set_bit(obj, tbl->table);
+ __set_bit(obj, tbl->table);
obj |= tbl->top;
spin_unlock_irqrestore(&tbl->lock, flags);
@@ -120,7 +120,7 @@ void pvrdma_uar_free(struct pvrdma_dev *dev, struct pvrdma_uar_map *uar)
obj = uar->index & (tbl->max - 1);
spin_lock_irqsave(&tbl->lock, flags);
- clear_bit(obj, tbl->table);
+ __clear_bit(obj, tbl->table);
tbl->last = min(tbl->last, obj);
tbl->top = (tbl->top + tbl->max) & tbl->mask;
spin_unlock_irqrestore(&tbl->lock, flags);
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index 3305f2744bfa..ae50b56e8913 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -3073,6 +3073,8 @@ do_write:
case IB_WR_ATOMIC_FETCH_AND_ADD:
if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
goto inv_err;
+ if (unlikely(wqe->atomic_wr.remote_addr & (sizeof(u64) - 1)))
+ goto inv_err;
if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
wqe->atomic_wr.remote_addr,
wqe->atomic_wr.rkey,
diff --git a/drivers/infiniband/sw/rxe/Makefile b/drivers/infiniband/sw/rxe/Makefile
index 1e24673e9318..5395a581f4bb 100644
--- a/drivers/infiniband/sw/rxe/Makefile
+++ b/drivers/infiniband/sw/rxe/Makefile
@@ -22,5 +22,4 @@ rdma_rxe-y := \
rxe_mcast.o \
rxe_task.o \
rxe_net.o \
- rxe_sysfs.o \
rxe_hw_counters.o
diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c
index 8e0f9c489cab..fab291245366 100644
--- a/drivers/infiniband/sw/rxe/rxe.c
+++ b/drivers/infiniband/sw/rxe/rxe.c
@@ -13,8 +13,6 @@ MODULE_AUTHOR("Bob Pearson, Frank Zago, John Groves, Kamal Heib");
MODULE_DESCRIPTION("Soft RDMA transport");
MODULE_LICENSE("Dual BSD/GPL");
-bool rxe_initialized;
-
/* free resources for a rxe device all objects created for this device must
* have been destroyed
*/
@@ -290,7 +288,6 @@ static int __init rxe_module_init(void)
return err;
rdma_link_register(&rxe_link_ops);
- rxe_initialized = true;
pr_info("loaded\n");
return 0;
}
@@ -301,7 +298,6 @@ static void __exit rxe_module_exit(void)
ib_unregister_driver(RDMA_DRIVER_RXE);
rxe_net_exit();
- rxe_initialized = false;
pr_info("unloaded\n");
}
diff --git a/drivers/infiniband/sw/rxe/rxe.h b/drivers/infiniband/sw/rxe/rxe.h
index 1bb3fb618bf5..fb9066e6f5f0 100644
--- a/drivers/infiniband/sw/rxe/rxe.h
+++ b/drivers/infiniband/sw/rxe/rxe.h
@@ -39,8 +39,6 @@
#define RXE_ROCE_V2_SPORT (0xc000)
-extern bool rxe_initialized;
-
void rxe_set_mtu(struct rxe_dev *rxe, unsigned int dev_mtu);
int rxe_add(struct rxe_dev *rxe, unsigned int mtu, const char *ibdev_name);
diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c
index d771ba8449a1..f363fe3fa414 100644
--- a/drivers/infiniband/sw/rxe/rxe_comp.c
+++ b/drivers/infiniband/sw/rxe/rxe_comp.c
@@ -458,8 +458,6 @@ static inline enum comp_state complete_ack(struct rxe_qp *qp,
struct rxe_pkt_info *pkt,
struct rxe_send_wqe *wqe)
{
- unsigned long flags;
-
if (wqe->has_rd_atomic) {
wqe->has_rd_atomic = 0;
atomic_inc(&qp->req.rd_atomic);
@@ -472,11 +470,11 @@ static inline enum comp_state complete_ack(struct rxe_qp *qp,
if (unlikely(qp->req.state == QP_STATE_DRAIN)) {
/* state_lock used by requester & completer */
- spin_lock_irqsave(&qp->state_lock, flags);
+ spin_lock_bh(&qp->state_lock);
if ((qp->req.state == QP_STATE_DRAIN) &&
(qp->comp.psn == qp->req.psn)) {
qp->req.state = QP_STATE_DRAINED;
- spin_unlock_irqrestore(&qp->state_lock, flags);
+ spin_unlock_bh(&qp->state_lock);
if (qp->ibqp.event_handler) {
struct ib_event ev;
@@ -488,7 +486,7 @@ static inline enum comp_state complete_ack(struct rxe_qp *qp,
qp->ibqp.qp_context);
}
} else {
- spin_unlock_irqrestore(&qp->state_lock, flags);
+ spin_unlock_bh(&qp->state_lock);
}
}
diff --git a/drivers/infiniband/sw/rxe/rxe_cq.c b/drivers/infiniband/sw/rxe/rxe_cq.c
index 6848426c074f..6baaaa34458e 100644
--- a/drivers/infiniband/sw/rxe/rxe_cq.c
+++ b/drivers/infiniband/sw/rxe/rxe_cq.c
@@ -42,14 +42,13 @@ err1:
static void rxe_send_complete(struct tasklet_struct *t)
{
struct rxe_cq *cq = from_tasklet(cq, t, comp_task);
- unsigned long flags;
- spin_lock_irqsave(&cq->cq_lock, flags);
+ spin_lock_bh(&cq->cq_lock);
if (cq->is_dying) {
- spin_unlock_irqrestore(&cq->cq_lock, flags);
+ spin_unlock_bh(&cq->cq_lock);
return;
}
- spin_unlock_irqrestore(&cq->cq_lock, flags);
+ spin_unlock_bh(&cq->cq_lock);
cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
}
@@ -106,15 +105,14 @@ int rxe_cq_resize_queue(struct rxe_cq *cq, int cqe,
int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited)
{
struct ib_event ev;
- unsigned long flags;
int full;
void *addr;
- spin_lock_irqsave(&cq->cq_lock, flags);
+ spin_lock_bh(&cq->cq_lock);
full = queue_full(cq->queue, QUEUE_TYPE_TO_CLIENT);
if (unlikely(full)) {
- spin_unlock_irqrestore(&cq->cq_lock, flags);
+ spin_unlock_bh(&cq->cq_lock);
if (cq->ibcq.event_handler) {
ev.device = cq->ibcq.device;
ev.element.cq = &cq->ibcq;
@@ -130,7 +128,7 @@ int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited)
queue_advance_producer(cq->queue, QUEUE_TYPE_TO_CLIENT);
- spin_unlock_irqrestore(&cq->cq_lock, flags);
+ spin_unlock_bh(&cq->cq_lock);
if ((cq->notify == IB_CQ_NEXT_COMP) ||
(cq->notify == IB_CQ_SOLICITED && solicited)) {
@@ -143,16 +141,14 @@ int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited)
void rxe_cq_disable(struct rxe_cq *cq)
{
- unsigned long flags;
-
- spin_lock_irqsave(&cq->cq_lock, flags);
+ spin_lock_bh(&cq->cq_lock);
cq->is_dying = true;
- spin_unlock_irqrestore(&cq->cq_lock, flags);
+ spin_unlock_bh(&cq->cq_lock);
}
-void rxe_cq_cleanup(struct rxe_pool_entry *arg)
+void rxe_cq_cleanup(struct rxe_pool_elem *elem)
{
- struct rxe_cq *cq = container_of(arg, typeof(*cq), pelem);
+ struct rxe_cq *cq = container_of(elem, typeof(*cq), elem);
if (cq->queue)
rxe_queue_cleanup(cq->queue);
diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h
index 1ca43b859d80..b1e174afb1d4 100644
--- a/drivers/infiniband/sw/rxe/rxe_loc.h
+++ b/drivers/infiniband/sw/rxe/rxe_loc.h
@@ -37,7 +37,7 @@ int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited);
void rxe_cq_disable(struct rxe_cq *cq);
-void rxe_cq_cleanup(struct rxe_pool_entry *arg);
+void rxe_cq_cleanup(struct rxe_pool_elem *arg);
/* rxe_mcast.c */
int rxe_mcast_get_grp(struct rxe_dev *rxe, union ib_gid *mgid,
@@ -51,7 +51,7 @@ int rxe_mcast_drop_grp_elem(struct rxe_dev *rxe, struct rxe_qp *qp,
void rxe_drop_all_mcast_groups(struct rxe_qp *qp);
-void rxe_mc_cleanup(struct rxe_pool_entry *arg);
+void rxe_mc_cleanup(struct rxe_pool_elem *arg);
/* rxe_mmap.c */
struct rxe_mmap_info {
@@ -89,7 +89,7 @@ int rxe_invalidate_mr(struct rxe_qp *qp, u32 rkey);
int rxe_reg_fast_mr(struct rxe_qp *qp, struct rxe_send_wqe *wqe);
int rxe_mr_set_page(struct ib_mr *ibmr, u64 addr);
int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata);
-void rxe_mr_cleanup(struct rxe_pool_entry *arg);
+void rxe_mr_cleanup(struct rxe_pool_elem *arg);
/* rxe_mw.c */
int rxe_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata);
@@ -97,7 +97,7 @@ int rxe_dealloc_mw(struct ib_mw *ibmw);
int rxe_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe);
int rxe_invalidate_mw(struct rxe_qp *qp, u32 rkey);
struct rxe_mw *rxe_lookup_mw(struct rxe_qp *qp, int access, u32 rkey);
-void rxe_mw_cleanup(struct rxe_pool_entry *arg);
+void rxe_mw_cleanup(struct rxe_pool_elem *arg);
/* rxe_net.c */
struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av,
@@ -131,7 +131,7 @@ void rxe_qp_error(struct rxe_qp *qp);
void rxe_qp_destroy(struct rxe_qp *qp);
-void rxe_qp_cleanup(struct rxe_pool_entry *arg);
+void rxe_qp_cleanup(struct rxe_pool_elem *elem);
static inline int qp_num(struct rxe_qp *qp)
{
diff --git a/drivers/infiniband/sw/rxe/rxe_mcast.c b/drivers/infiniband/sw/rxe/rxe_mcast.c
index 1c1d1b53312d..bd1ac88b8700 100644
--- a/drivers/infiniband/sw/rxe/rxe_mcast.c
+++ b/drivers/infiniband/sw/rxe/rxe_mcast.c
@@ -40,12 +40,11 @@ int rxe_mcast_get_grp(struct rxe_dev *rxe, union ib_gid *mgid,
int err;
struct rxe_mc_grp *grp;
struct rxe_pool *pool = &rxe->mc_grp_pool;
- unsigned long flags;
if (rxe->attr.max_mcast_qp_attach == 0)
return -EINVAL;
- write_lock_irqsave(&pool->pool_lock, flags);
+ write_lock_bh(&pool->pool_lock);
grp = rxe_pool_get_key_locked(pool, mgid);
if (grp)
@@ -53,13 +52,13 @@ int rxe_mcast_get_grp(struct rxe_dev *rxe, union ib_gid *mgid,
grp = create_grp(rxe, pool, mgid);
if (IS_ERR(grp)) {
- write_unlock_irqrestore(&pool->pool_lock, flags);
+ write_unlock_bh(&pool->pool_lock);
err = PTR_ERR(grp);
return err;
}
done:
- write_unlock_irqrestore(&pool->pool_lock, flags);
+ write_unlock_bh(&pool->pool_lock);
*grp_p = grp;
return 0;
}
@@ -169,9 +168,9 @@ void rxe_drop_all_mcast_groups(struct rxe_qp *qp)
}
}
-void rxe_mc_cleanup(struct rxe_pool_entry *arg)
+void rxe_mc_cleanup(struct rxe_pool_elem *elem)
{
- struct rxe_mc_grp *grp = container_of(arg, typeof(*grp), pelem);
+ struct rxe_mc_grp *grp = container_of(elem, typeof(*grp), elem);
struct rxe_dev *rxe = grp->rxe;
rxe_drop_key(grp);
diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
index bcf717bcf0b3..453ef3c9d535 100644
--- a/drivers/infiniband/sw/rxe/rxe_mr.c
+++ b/drivers/infiniband/sw/rxe/rxe_mr.c
@@ -50,7 +50,7 @@ int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length)
static void rxe_mr_init(int access, struct rxe_mr *mr)
{
- u32 lkey = mr->pelem.index << 8 | rxe_get_next_key(-1);
+ u32 lkey = mr->elem.index << 8 | rxe_get_next_key(-1);
u32 rkey = (access & IB_ACCESS_REMOTE) ? lkey : 0;
/* set ibmr->l/rkey and also copy into private l/rkey
@@ -697,9 +697,9 @@ int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
return 0;
}
-void rxe_mr_cleanup(struct rxe_pool_entry *arg)
+void rxe_mr_cleanup(struct rxe_pool_elem *elem)
{
- struct rxe_mr *mr = container_of(arg, typeof(*mr), pelem);
+ struct rxe_mr *mr = container_of(elem, typeof(*mr), elem);
ib_umem_release(mr->umem);
diff --git a/drivers/infiniband/sw/rxe/rxe_mw.c b/drivers/infiniband/sw/rxe/rxe_mw.c
index 9534a7fe1a98..32dd8c0b8b9e 100644
--- a/drivers/infiniband/sw/rxe/rxe_mw.c
+++ b/drivers/infiniband/sw/rxe/rxe_mw.c
@@ -21,7 +21,7 @@ int rxe_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata)
}
rxe_add_index(mw);
- mw->rkey = ibmw->rkey = (mw->pelem.index << 8) | rxe_get_next_key(-1);
+ mw->rkey = ibmw->rkey = (mw->elem.index << 8) | rxe_get_next_key(-1);
mw->state = (mw->ibmw.type == IB_MW_TYPE_2) ?
RXE_MW_STATE_FREE : RXE_MW_STATE_VALID;
spin_lock_init(&mw->lock);
@@ -56,11 +56,10 @@ int rxe_dealloc_mw(struct ib_mw *ibmw)
{
struct rxe_mw *mw = to_rmw(ibmw);
struct rxe_pd *pd = to_rpd(ibmw->pd);
- unsigned long flags;
- spin_lock_irqsave(&mw->lock, flags);
+ spin_lock_bh(&mw->lock);
rxe_do_dealloc_mw(mw);
- spin_unlock_irqrestore(&mw->lock, flags);
+ spin_unlock_bh(&mw->lock);
rxe_drop_ref(mw);
rxe_drop_ref(pd);
@@ -197,7 +196,6 @@ int rxe_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
u32 mw_rkey = wqe->wr.wr.mw.mw_rkey;
u32 mr_lkey = wqe->wr.wr.mw.mr_lkey;
- unsigned long flags;
mw = rxe_pool_get_index(&rxe->mw_pool, mw_rkey >> 8);
if (unlikely(!mw)) {
@@ -225,7 +223,7 @@ int rxe_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
mr = NULL;
}
- spin_lock_irqsave(&mw->lock, flags);
+ spin_lock_bh(&mw->lock);
ret = rxe_check_bind_mw(qp, wqe, mw, mr);
if (ret)
@@ -233,7 +231,7 @@ int rxe_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
rxe_do_bind_mw(qp, wqe, mw, mr);
err_unlock:
- spin_unlock_irqrestore(&mw->lock, flags);
+ spin_unlock_bh(&mw->lock);
err_drop_mr:
if (mr)
rxe_drop_ref(mr);
@@ -280,7 +278,6 @@ static void rxe_do_invalidate_mw(struct rxe_mw *mw)
int rxe_invalidate_mw(struct rxe_qp *qp, u32 rkey)
{
struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
- unsigned long flags;
struct rxe_mw *mw;
int ret;
@@ -295,7 +292,7 @@ int rxe_invalidate_mw(struct rxe_qp *qp, u32 rkey)
goto err_drop_ref;
}
- spin_lock_irqsave(&mw->lock, flags);
+ spin_lock_bh(&mw->lock);
ret = rxe_check_invalidate_mw(qp, mw);
if (ret)
@@ -303,7 +300,7 @@ int rxe_invalidate_mw(struct rxe_qp *qp, u32 rkey)
rxe_do_invalidate_mw(mw);
err_unlock:
- spin_unlock_irqrestore(&mw->lock, flags);
+ spin_unlock_bh(&mw->lock);
err_drop_ref:
rxe_drop_ref(mw);
err:
@@ -333,9 +330,9 @@ struct rxe_mw *rxe_lookup_mw(struct rxe_qp *qp, int access, u32 rkey)
return mw;
}
-void rxe_mw_cleanup(struct rxe_pool_entry *elem)
+void rxe_mw_cleanup(struct rxe_pool_elem *elem)
{
- struct rxe_mw *mw = container_of(elem, typeof(*mw), pelem);
+ struct rxe_mw *mw = container_of(elem, typeof(*mw), elem);
rxe_drop_index(mw);
}
diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
index 2cb810cb890a..be72bdbfb4ba 100644
--- a/drivers/infiniband/sw/rxe/rxe_net.c
+++ b/drivers/infiniband/sw/rxe/rxe_net.c
@@ -22,24 +22,20 @@ static struct rxe_recv_sockets recv_sockets;
int rxe_mcast_add(struct rxe_dev *rxe, union ib_gid *mgid)
{
- int err;
unsigned char ll_addr[ETH_ALEN];
ipv6_eth_mc_map((struct in6_addr *)mgid->raw, ll_addr);
- err = dev_mc_add(rxe->ndev, ll_addr);
- return err;
+ return dev_mc_add(rxe->ndev, ll_addr);
}
int rxe_mcast_delete(struct rxe_dev *rxe, union ib_gid *mgid)
{
- int err;
unsigned char ll_addr[ETH_ALEN];
ipv6_eth_mc_map((struct in6_addr *)mgid->raw, ll_addr);
- err = dev_mc_del(rxe->ndev, ll_addr);
- return err;
+ return dev_mc_del(rxe->ndev, ll_addr);
}
static struct dst_entry *rxe_find_route4(struct net_device *ndev,
@@ -444,7 +440,6 @@ int rxe_xmit_packet(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
else
err = rxe_send(skb, pkt);
if (err) {
- rxe->xmit_errors++;
rxe_counter_inc(rxe, RXE_CNT_SEND_ERR);
return err;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_opcode.c b/drivers/infiniband/sw/rxe/rxe_opcode.c
index 3ef5a10a6efd..df596ba7527d 100644
--- a/drivers/infiniband/sw/rxe/rxe_opcode.c
+++ b/drivers/infiniband/sw/rxe/rxe_opcode.c
@@ -108,8 +108,8 @@ struct rxe_wr_opcode_info rxe_wr_opcode_info[] = {
struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
[IB_OPCODE_RC_SEND_FIRST] = {
.name = "IB_OPCODE_RC_SEND_FIRST",
- .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_RWR_MASK
- | RXE_SEND_MASK | RXE_START_MASK,
+ .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_RWR_MASK |
+ RXE_SEND_MASK | RXE_START_MASK,
.length = RXE_BTH_BYTES,
.offset = {
[RXE_BTH] = 0,
@@ -117,9 +117,9 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
}
},
[IB_OPCODE_RC_SEND_MIDDLE] = {
- .name = "IB_OPCODE_RC_SEND_MIDDLE]",
- .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_SEND_MASK
- | RXE_MIDDLE_MASK,
+ .name = "IB_OPCODE_RC_SEND_MIDDLE",
+ .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_SEND_MASK |
+ RXE_MIDDLE_MASK,
.length = RXE_BTH_BYTES,
.offset = {
[RXE_BTH] = 0,
@@ -128,8 +128,8 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
},
[IB_OPCODE_RC_SEND_LAST] = {
.name = "IB_OPCODE_RC_SEND_LAST",
- .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_COMP_MASK
- | RXE_SEND_MASK | RXE_END_MASK,
+ .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_COMP_MASK |
+ RXE_SEND_MASK | RXE_END_MASK,
.length = RXE_BTH_BYTES,
.offset = {
[RXE_BTH] = 0,
@@ -138,21 +138,21 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
},
[IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE] = {
.name = "IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE",
- .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK
- | RXE_COMP_MASK | RXE_SEND_MASK | RXE_END_MASK,
+ .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK |
+ RXE_COMP_MASK | RXE_SEND_MASK | RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_IMMDT_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_IMMDT] = RXE_BTH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_IMMDT_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_IMMDT_BYTES,
}
},
[IB_OPCODE_RC_SEND_ONLY] = {
.name = "IB_OPCODE_RC_SEND_ONLY",
- .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_COMP_MASK
- | RXE_RWR_MASK | RXE_SEND_MASK
- | RXE_START_MASK | RXE_END_MASK,
+ .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_COMP_MASK |
+ RXE_RWR_MASK | RXE_SEND_MASK |
+ RXE_START_MASK | RXE_END_MASK,
.length = RXE_BTH_BYTES,
.offset = {
[RXE_BTH] = 0,
@@ -161,33 +161,33 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
},
[IB_OPCODE_RC_SEND_ONLY_WITH_IMMEDIATE] = {
.name = "IB_OPCODE_RC_SEND_ONLY_WITH_IMMEDIATE",
- .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK
- | RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK
- | RXE_START_MASK | RXE_END_MASK,
+ .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK |
+ RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK |
+ RXE_START_MASK | RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_IMMDT_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_IMMDT] = RXE_BTH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_IMMDT_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_IMMDT_BYTES,
}
},
[IB_OPCODE_RC_RDMA_WRITE_FIRST] = {
.name = "IB_OPCODE_RC_RDMA_WRITE_FIRST",
- .mask = RXE_RETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK
- | RXE_WRITE_MASK | RXE_START_MASK,
+ .mask = RXE_RETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK |
+ RXE_WRITE_MASK | RXE_START_MASK,
.length = RXE_BTH_BYTES + RXE_RETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RETH] = RXE_BTH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_RETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RETH_BYTES,
}
},
[IB_OPCODE_RC_RDMA_WRITE_MIDDLE] = {
.name = "IB_OPCODE_RC_RDMA_WRITE_MIDDLE",
- .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_WRITE_MASK
- | RXE_MIDDLE_MASK,
+ .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_WRITE_MASK |
+ RXE_MIDDLE_MASK,
.length = RXE_BTH_BYTES,
.offset = {
[RXE_BTH] = 0,
@@ -196,8 +196,8 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
},
[IB_OPCODE_RC_RDMA_WRITE_LAST] = {
.name = "IB_OPCODE_RC_RDMA_WRITE_LAST",
- .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_WRITE_MASK
- | RXE_END_MASK,
+ .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_WRITE_MASK |
+ RXE_END_MASK,
.length = RXE_BTH_BYTES,
.offset = {
[RXE_BTH] = 0,
@@ -206,69 +206,69 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
},
[IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE] = {
.name = "IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE",
- .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK
- | RXE_WRITE_MASK | RXE_COMP_MASK | RXE_RWR_MASK
- | RXE_END_MASK,
+ .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK |
+ RXE_WRITE_MASK | RXE_COMP_MASK | RXE_RWR_MASK |
+ RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_IMMDT_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_IMMDT] = RXE_BTH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_IMMDT_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_IMMDT_BYTES,
}
},
[IB_OPCODE_RC_RDMA_WRITE_ONLY] = {
.name = "IB_OPCODE_RC_RDMA_WRITE_ONLY",
- .mask = RXE_RETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK
- | RXE_WRITE_MASK | RXE_START_MASK
- | RXE_END_MASK,
+ .mask = RXE_RETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK |
+ RXE_WRITE_MASK | RXE_START_MASK |
+ RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_RETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RETH] = RXE_BTH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_RETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RETH_BYTES,
}
},
[IB_OPCODE_RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE] = {
.name = "IB_OPCODE_RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE",
- .mask = RXE_RETH_MASK | RXE_IMMDT_MASK | RXE_PAYLOAD_MASK
- | RXE_REQ_MASK | RXE_WRITE_MASK
- | RXE_COMP_MASK | RXE_RWR_MASK
- | RXE_START_MASK | RXE_END_MASK,
+ .mask = RXE_RETH_MASK | RXE_IMMDT_MASK | RXE_PAYLOAD_MASK |
+ RXE_REQ_MASK | RXE_WRITE_MASK |
+ RXE_COMP_MASK | RXE_RWR_MASK |
+ RXE_START_MASK | RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_RETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RETH] = RXE_BTH_BYTES,
- [RXE_IMMDT] = RXE_BTH_BYTES
- + RXE_RETH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_RETH_BYTES
- + RXE_IMMDT_BYTES,
+ [RXE_IMMDT] = RXE_BTH_BYTES +
+ RXE_RETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RETH_BYTES +
+ RXE_IMMDT_BYTES,
}
},
[IB_OPCODE_RC_RDMA_READ_REQUEST] = {
.name = "IB_OPCODE_RC_RDMA_READ_REQUEST",
- .mask = RXE_RETH_MASK | RXE_REQ_MASK | RXE_READ_MASK
- | RXE_START_MASK | RXE_END_MASK,
+ .mask = RXE_RETH_MASK | RXE_REQ_MASK | RXE_READ_MASK |
+ RXE_START_MASK | RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_RETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RETH] = RXE_BTH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_RETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RETH_BYTES,
}
},
[IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST] = {
.name = "IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST",
- .mask = RXE_AETH_MASK | RXE_PAYLOAD_MASK | RXE_ACK_MASK
- | RXE_START_MASK,
+ .mask = RXE_AETH_MASK | RXE_PAYLOAD_MASK | RXE_ACK_MASK |
+ RXE_START_MASK,
.length = RXE_BTH_BYTES + RXE_AETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_AETH] = RXE_BTH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_AETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_AETH_BYTES,
}
},
[IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE] = {
@@ -282,109 +282,110 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
},
[IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST] = {
.name = "IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST",
- .mask = RXE_AETH_MASK | RXE_PAYLOAD_MASK | RXE_ACK_MASK
- | RXE_END_MASK,
+ .mask = RXE_AETH_MASK | RXE_PAYLOAD_MASK | RXE_ACK_MASK |
+ RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_AETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_AETH] = RXE_BTH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_AETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_AETH_BYTES,
}
},
[IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY] = {
.name = "IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY",
- .mask = RXE_AETH_MASK | RXE_PAYLOAD_MASK | RXE_ACK_MASK
- | RXE_START_MASK | RXE_END_MASK,
+ .mask = RXE_AETH_MASK | RXE_PAYLOAD_MASK | RXE_ACK_MASK |
+ RXE_START_MASK | RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_AETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_AETH] = RXE_BTH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_AETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_AETH_BYTES,
}
},
[IB_OPCODE_RC_ACKNOWLEDGE] = {
.name = "IB_OPCODE_RC_ACKNOWLEDGE",
- .mask = RXE_AETH_MASK | RXE_ACK_MASK | RXE_START_MASK
- | RXE_END_MASK,
+ .mask = RXE_AETH_MASK | RXE_ACK_MASK | RXE_START_MASK |
+ RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_AETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_AETH] = RXE_BTH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_AETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_AETH_BYTES,
}
},
[IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE] = {
.name = "IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE",
- .mask = RXE_AETH_MASK | RXE_ATMACK_MASK | RXE_ACK_MASK
- | RXE_START_MASK | RXE_END_MASK,
+ .mask = RXE_AETH_MASK | RXE_ATMACK_MASK | RXE_ACK_MASK |
+ RXE_START_MASK | RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_ATMACK_BYTES + RXE_AETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_AETH] = RXE_BTH_BYTES,
- [RXE_ATMACK] = RXE_BTH_BYTES
- + RXE_AETH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_ATMACK_BYTES + RXE_AETH_BYTES,
+ [RXE_ATMACK] = RXE_BTH_BYTES +
+ RXE_AETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_ATMACK_BYTES +
+ RXE_AETH_BYTES,
}
},
[IB_OPCODE_RC_COMPARE_SWAP] = {
.name = "IB_OPCODE_RC_COMPARE_SWAP",
- .mask = RXE_ATMETH_MASK | RXE_REQ_MASK | RXE_ATOMIC_MASK
- | RXE_START_MASK | RXE_END_MASK,
+ .mask = RXE_ATMETH_MASK | RXE_REQ_MASK | RXE_ATOMIC_MASK |
+ RXE_START_MASK | RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_ATMETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_ATMETH] = RXE_BTH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_ATMETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_ATMETH_BYTES,
}
},
[IB_OPCODE_RC_FETCH_ADD] = {
.name = "IB_OPCODE_RC_FETCH_ADD",
- .mask = RXE_ATMETH_MASK | RXE_REQ_MASK | RXE_ATOMIC_MASK
- | RXE_START_MASK | RXE_END_MASK,
+ .mask = RXE_ATMETH_MASK | RXE_REQ_MASK | RXE_ATOMIC_MASK |
+ RXE_START_MASK | RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_ATMETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_ATMETH] = RXE_BTH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_ATMETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_ATMETH_BYTES,
}
},
[IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE] = {
.name = "IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE",
- .mask = RXE_IETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK
- | RXE_COMP_MASK | RXE_SEND_MASK | RXE_END_MASK,
+ .mask = RXE_IETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK |
+ RXE_COMP_MASK | RXE_SEND_MASK | RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_IETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_IETH] = RXE_BTH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_IETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_IETH_BYTES,
}
},
[IB_OPCODE_RC_SEND_ONLY_WITH_INVALIDATE] = {
.name = "IB_OPCODE_RC_SEND_ONLY_INV",
- .mask = RXE_IETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK
- | RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK
- | RXE_END_MASK | RXE_START_MASK,
+ .mask = RXE_IETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK |
+ RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK |
+ RXE_END_MASK | RXE_START_MASK,
.length = RXE_BTH_BYTES + RXE_IETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_IETH] = RXE_BTH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_IETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_IETH_BYTES,
}
},
/* UC */
[IB_OPCODE_UC_SEND_FIRST] = {
.name = "IB_OPCODE_UC_SEND_FIRST",
- .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_RWR_MASK
- | RXE_SEND_MASK | RXE_START_MASK,
+ .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_RWR_MASK |
+ RXE_SEND_MASK | RXE_START_MASK,
.length = RXE_BTH_BYTES,
.offset = {
[RXE_BTH] = 0,
@@ -393,8 +394,8 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
},
[IB_OPCODE_UC_SEND_MIDDLE] = {
.name = "IB_OPCODE_UC_SEND_MIDDLE",
- .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_SEND_MASK
- | RXE_MIDDLE_MASK,
+ .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_SEND_MASK |
+ RXE_MIDDLE_MASK,
.length = RXE_BTH_BYTES,
.offset = {
[RXE_BTH] = 0,
@@ -403,8 +404,8 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
},
[IB_OPCODE_UC_SEND_LAST] = {
.name = "IB_OPCODE_UC_SEND_LAST",
- .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_COMP_MASK
- | RXE_SEND_MASK | RXE_END_MASK,
+ .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_COMP_MASK |
+ RXE_SEND_MASK | RXE_END_MASK,
.length = RXE_BTH_BYTES,
.offset = {
[RXE_BTH] = 0,
@@ -413,21 +414,21 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
},
[IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE] = {
.name = "IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE",
- .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK
- | RXE_COMP_MASK | RXE_SEND_MASK | RXE_END_MASK,
+ .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK |
+ RXE_COMP_MASK | RXE_SEND_MASK | RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_IMMDT_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_IMMDT] = RXE_BTH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_IMMDT_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_IMMDT_BYTES,
}
},
[IB_OPCODE_UC_SEND_ONLY] = {
.name = "IB_OPCODE_UC_SEND_ONLY",
- .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_COMP_MASK
- | RXE_RWR_MASK | RXE_SEND_MASK
- | RXE_START_MASK | RXE_END_MASK,
+ .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_COMP_MASK |
+ RXE_RWR_MASK | RXE_SEND_MASK |
+ RXE_START_MASK | RXE_END_MASK,
.length = RXE_BTH_BYTES,
.offset = {
[RXE_BTH] = 0,
@@ -436,33 +437,33 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
},
[IB_OPCODE_UC_SEND_ONLY_WITH_IMMEDIATE] = {
.name = "IB_OPCODE_UC_SEND_ONLY_WITH_IMMEDIATE",
- .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK
- | RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK
- | RXE_START_MASK | RXE_END_MASK,
+ .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK |
+ RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK |
+ RXE_START_MASK | RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_IMMDT_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_IMMDT] = RXE_BTH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_IMMDT_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_IMMDT_BYTES,
}
},
[IB_OPCODE_UC_RDMA_WRITE_FIRST] = {
.name = "IB_OPCODE_UC_RDMA_WRITE_FIRST",
- .mask = RXE_RETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK
- | RXE_WRITE_MASK | RXE_START_MASK,
+ .mask = RXE_RETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK |
+ RXE_WRITE_MASK | RXE_START_MASK,
.length = RXE_BTH_BYTES + RXE_RETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RETH] = RXE_BTH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_RETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RETH_BYTES,
}
},
[IB_OPCODE_UC_RDMA_WRITE_MIDDLE] = {
.name = "IB_OPCODE_UC_RDMA_WRITE_MIDDLE",
- .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_WRITE_MASK
- | RXE_MIDDLE_MASK,
+ .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_WRITE_MASK |
+ RXE_MIDDLE_MASK,
.length = RXE_BTH_BYTES,
.offset = {
[RXE_BTH] = 0,
@@ -471,8 +472,8 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
},
[IB_OPCODE_UC_RDMA_WRITE_LAST] = {
.name = "IB_OPCODE_UC_RDMA_WRITE_LAST",
- .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_WRITE_MASK
- | RXE_END_MASK,
+ .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_WRITE_MASK |
+ RXE_END_MASK,
.length = RXE_BTH_BYTES,
.offset = {
[RXE_BTH] = 0,
@@ -481,460 +482,460 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
},
[IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE] = {
.name = "IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE",
- .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK
- | RXE_WRITE_MASK | RXE_COMP_MASK | RXE_RWR_MASK
- | RXE_END_MASK,
+ .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK |
+ RXE_WRITE_MASK | RXE_COMP_MASK | RXE_RWR_MASK |
+ RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_IMMDT_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_IMMDT] = RXE_BTH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_IMMDT_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_IMMDT_BYTES,
}
},
[IB_OPCODE_UC_RDMA_WRITE_ONLY] = {
.name = "IB_OPCODE_UC_RDMA_WRITE_ONLY",
- .mask = RXE_RETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK
- | RXE_WRITE_MASK | RXE_START_MASK
- | RXE_END_MASK,
+ .mask = RXE_RETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK |
+ RXE_WRITE_MASK | RXE_START_MASK |
+ RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_RETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RETH] = RXE_BTH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_RETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RETH_BYTES,
}
},
[IB_OPCODE_UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE] = {
.name = "IB_OPCODE_UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE",
- .mask = RXE_RETH_MASK | RXE_IMMDT_MASK | RXE_PAYLOAD_MASK
- | RXE_REQ_MASK | RXE_WRITE_MASK
- | RXE_COMP_MASK | RXE_RWR_MASK
- | RXE_START_MASK | RXE_END_MASK,
+ .mask = RXE_RETH_MASK | RXE_IMMDT_MASK | RXE_PAYLOAD_MASK |
+ RXE_REQ_MASK | RXE_WRITE_MASK |
+ RXE_COMP_MASK | RXE_RWR_MASK |
+ RXE_START_MASK | RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_RETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RETH] = RXE_BTH_BYTES,
- [RXE_IMMDT] = RXE_BTH_BYTES
- + RXE_RETH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_RETH_BYTES
- + RXE_IMMDT_BYTES,
+ [RXE_IMMDT] = RXE_BTH_BYTES +
+ RXE_RETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RETH_BYTES +
+ RXE_IMMDT_BYTES,
}
},
/* RD */
[IB_OPCODE_RD_SEND_FIRST] = {
.name = "IB_OPCODE_RD_SEND_FIRST",
- .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK
- | RXE_REQ_MASK | RXE_RWR_MASK | RXE_SEND_MASK
- | RXE_START_MASK,
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK |
+ RXE_REQ_MASK | RXE_RWR_MASK | RXE_SEND_MASK |
+ RXE_START_MASK,
.length = RXE_BTH_BYTES + RXE_DETH_BYTES + RXE_RDETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RDETH] = RXE_BTH_BYTES,
- [RXE_DETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_DETH_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES,
}
},
[IB_OPCODE_RD_SEND_MIDDLE] = {
.name = "IB_OPCODE_RD_SEND_MIDDLE",
- .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK
- | RXE_REQ_MASK | RXE_SEND_MASK
- | RXE_MIDDLE_MASK,
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK |
+ RXE_REQ_MASK | RXE_SEND_MASK |
+ RXE_MIDDLE_MASK,
.length = RXE_BTH_BYTES + RXE_DETH_BYTES + RXE_RDETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RDETH] = RXE_BTH_BYTES,
- [RXE_DETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_DETH_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES,
}
},
[IB_OPCODE_RD_SEND_LAST] = {
.name = "IB_OPCODE_RD_SEND_LAST",
- .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK
- | RXE_REQ_MASK | RXE_COMP_MASK | RXE_SEND_MASK
- | RXE_END_MASK,
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK |
+ RXE_REQ_MASK | RXE_COMP_MASK | RXE_SEND_MASK |
+ RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_DETH_BYTES + RXE_RDETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RDETH] = RXE_BTH_BYTES,
- [RXE_DETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_DETH_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES,
}
},
[IB_OPCODE_RD_SEND_LAST_WITH_IMMEDIATE] = {
.name = "IB_OPCODE_RD_SEND_LAST_WITH_IMMEDIATE",
- .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_IMMDT_MASK
- | RXE_PAYLOAD_MASK | RXE_REQ_MASK
- | RXE_COMP_MASK | RXE_SEND_MASK
- | RXE_END_MASK,
- .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_DETH_BYTES
- + RXE_RDETH_BYTES,
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_IMMDT_MASK |
+ RXE_PAYLOAD_MASK | RXE_REQ_MASK |
+ RXE_COMP_MASK | RXE_SEND_MASK |
+ RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_DETH_BYTES +
+ RXE_RDETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RDETH] = RXE_BTH_BYTES,
- [RXE_DETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES,
- [RXE_IMMDT] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_DETH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_DETH_BYTES
- + RXE_IMMDT_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
+ [RXE_IMMDT] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES +
+ RXE_IMMDT_BYTES,
}
},
[IB_OPCODE_RD_SEND_ONLY] = {
.name = "IB_OPCODE_RD_SEND_ONLY",
- .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK
- | RXE_REQ_MASK | RXE_COMP_MASK | RXE_RWR_MASK
- | RXE_SEND_MASK | RXE_START_MASK | RXE_END_MASK,
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK |
+ RXE_REQ_MASK | RXE_COMP_MASK | RXE_RWR_MASK |
+ RXE_SEND_MASK | RXE_START_MASK | RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_DETH_BYTES + RXE_RDETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RDETH] = RXE_BTH_BYTES,
- [RXE_DETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_DETH_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES,
}
},
[IB_OPCODE_RD_SEND_ONLY_WITH_IMMEDIATE] = {
.name = "IB_OPCODE_RD_SEND_ONLY_WITH_IMMEDIATE",
- .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_IMMDT_MASK
- | RXE_PAYLOAD_MASK | RXE_REQ_MASK
- | RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK
- | RXE_START_MASK | RXE_END_MASK,
- .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_DETH_BYTES
- + RXE_RDETH_BYTES,
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_IMMDT_MASK |
+ RXE_PAYLOAD_MASK | RXE_REQ_MASK |
+ RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK |
+ RXE_START_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_DETH_BYTES +
+ RXE_RDETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RDETH] = RXE_BTH_BYTES,
- [RXE_DETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES,
- [RXE_IMMDT] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_DETH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_DETH_BYTES
- + RXE_IMMDT_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
+ [RXE_IMMDT] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES +
+ RXE_IMMDT_BYTES,
}
},
[IB_OPCODE_RD_RDMA_WRITE_FIRST] = {
.name = "IB_OPCODE_RD_RDMA_WRITE_FIRST",
- .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_RETH_MASK
- | RXE_PAYLOAD_MASK | RXE_REQ_MASK
- | RXE_WRITE_MASK | RXE_START_MASK,
- .length = RXE_BTH_BYTES + RXE_RETH_BYTES + RXE_DETH_BYTES
- + RXE_RDETH_BYTES,
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_RETH_MASK |
+ RXE_PAYLOAD_MASK | RXE_REQ_MASK |
+ RXE_WRITE_MASK | RXE_START_MASK,
+ .length = RXE_BTH_BYTES + RXE_RETH_BYTES + RXE_DETH_BYTES +
+ RXE_RDETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RDETH] = RXE_BTH_BYTES,
- [RXE_DETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES,
- [RXE_RETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_DETH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_DETH_BYTES
- + RXE_RETH_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
+ [RXE_RETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES +
+ RXE_RETH_BYTES,
}
},
[IB_OPCODE_RD_RDMA_WRITE_MIDDLE] = {
.name = "IB_OPCODE_RD_RDMA_WRITE_MIDDLE",
- .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK
- | RXE_REQ_MASK | RXE_WRITE_MASK
- | RXE_MIDDLE_MASK,
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK |
+ RXE_REQ_MASK | RXE_WRITE_MASK |
+ RXE_MIDDLE_MASK,
.length = RXE_BTH_BYTES + RXE_DETH_BYTES + RXE_RDETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RDETH] = RXE_BTH_BYTES,
- [RXE_DETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_DETH_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES,
}
},
[IB_OPCODE_RD_RDMA_WRITE_LAST] = {
.name = "IB_OPCODE_RD_RDMA_WRITE_LAST",
- .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK
- | RXE_REQ_MASK | RXE_WRITE_MASK
- | RXE_END_MASK,
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK |
+ RXE_REQ_MASK | RXE_WRITE_MASK |
+ RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_DETH_BYTES + RXE_RDETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RDETH] = RXE_BTH_BYTES,
- [RXE_DETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_DETH_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES,
}
},
[IB_OPCODE_RD_RDMA_WRITE_LAST_WITH_IMMEDIATE] = {
.name = "IB_OPCODE_RD_RDMA_WRITE_LAST_WITH_IMMEDIATE",
- .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_IMMDT_MASK
- | RXE_PAYLOAD_MASK | RXE_REQ_MASK
- | RXE_WRITE_MASK | RXE_COMP_MASK | RXE_RWR_MASK
- | RXE_END_MASK,
- .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_DETH_BYTES
- + RXE_RDETH_BYTES,
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_IMMDT_MASK |
+ RXE_PAYLOAD_MASK | RXE_REQ_MASK |
+ RXE_WRITE_MASK | RXE_COMP_MASK | RXE_RWR_MASK |
+ RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_DETH_BYTES +
+ RXE_RDETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RDETH] = RXE_BTH_BYTES,
- [RXE_DETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES,
- [RXE_IMMDT] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_DETH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_DETH_BYTES
- + RXE_IMMDT_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
+ [RXE_IMMDT] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES +
+ RXE_IMMDT_BYTES,
}
},
[IB_OPCODE_RD_RDMA_WRITE_ONLY] = {
.name = "IB_OPCODE_RD_RDMA_WRITE_ONLY",
- .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_RETH_MASK
- | RXE_PAYLOAD_MASK | RXE_REQ_MASK
- | RXE_WRITE_MASK | RXE_START_MASK
- | RXE_END_MASK,
- .length = RXE_BTH_BYTES + RXE_RETH_BYTES + RXE_DETH_BYTES
- + RXE_RDETH_BYTES,
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_RETH_MASK |
+ RXE_PAYLOAD_MASK | RXE_REQ_MASK |
+ RXE_WRITE_MASK | RXE_START_MASK |
+ RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_RETH_BYTES + RXE_DETH_BYTES +
+ RXE_RDETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RDETH] = RXE_BTH_BYTES,
- [RXE_DETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES,
- [RXE_RETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_DETH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_DETH_BYTES
- + RXE_RETH_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
+ [RXE_RETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES +
+ RXE_RETH_BYTES,
}
},
[IB_OPCODE_RD_RDMA_WRITE_ONLY_WITH_IMMEDIATE] = {
.name = "IB_OPCODE_RD_RDMA_WRITE_ONLY_WITH_IMMEDIATE",
- .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_RETH_MASK
- | RXE_IMMDT_MASK | RXE_PAYLOAD_MASK
- | RXE_REQ_MASK | RXE_WRITE_MASK
- | RXE_COMP_MASK | RXE_RWR_MASK
- | RXE_START_MASK | RXE_END_MASK,
- .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_RETH_BYTES
- + RXE_DETH_BYTES + RXE_RDETH_BYTES,
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_RETH_MASK |
+ RXE_IMMDT_MASK | RXE_PAYLOAD_MASK |
+ RXE_REQ_MASK | RXE_WRITE_MASK |
+ RXE_COMP_MASK | RXE_RWR_MASK |
+ RXE_START_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_RETH_BYTES +
+ RXE_DETH_BYTES + RXE_RDETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RDETH] = RXE_BTH_BYTES,
- [RXE_DETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES,
- [RXE_RETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_DETH_BYTES,
- [RXE_IMMDT] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_DETH_BYTES
- + RXE_RETH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_DETH_BYTES
- + RXE_RETH_BYTES
- + RXE_IMMDT_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
+ [RXE_RETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES,
+ [RXE_IMMDT] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES +
+ RXE_RETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES +
+ RXE_RETH_BYTES +
+ RXE_IMMDT_BYTES,
}
},
[IB_OPCODE_RD_RDMA_READ_REQUEST] = {
.name = "IB_OPCODE_RD_RDMA_READ_REQUEST",
- .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_RETH_MASK
- | RXE_REQ_MASK | RXE_READ_MASK
- | RXE_START_MASK | RXE_END_MASK,
- .length = RXE_BTH_BYTES + RXE_RETH_BYTES + RXE_DETH_BYTES
- + RXE_RDETH_BYTES,
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_RETH_MASK |
+ RXE_REQ_MASK | RXE_READ_MASK |
+ RXE_START_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_RETH_BYTES + RXE_DETH_BYTES +
+ RXE_RDETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RDETH] = RXE_BTH_BYTES,
- [RXE_DETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES,
- [RXE_RETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_DETH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_RETH_BYTES
- + RXE_DETH_BYTES
- + RXE_RDETH_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
+ [RXE_RETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RETH_BYTES +
+ RXE_DETH_BYTES +
+ RXE_RDETH_BYTES,
}
},
[IB_OPCODE_RD_RDMA_READ_RESPONSE_FIRST] = {
.name = "IB_OPCODE_RD_RDMA_READ_RESPONSE_FIRST",
- .mask = RXE_RDETH_MASK | RXE_AETH_MASK
- | RXE_PAYLOAD_MASK | RXE_ACK_MASK
- | RXE_START_MASK,
+ .mask = RXE_RDETH_MASK | RXE_AETH_MASK |
+ RXE_PAYLOAD_MASK | RXE_ACK_MASK |
+ RXE_START_MASK,
.length = RXE_BTH_BYTES + RXE_AETH_BYTES + RXE_RDETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RDETH] = RXE_BTH_BYTES,
- [RXE_AETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_AETH_BYTES,
+ [RXE_AETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_AETH_BYTES,
}
},
[IB_OPCODE_RD_RDMA_READ_RESPONSE_MIDDLE] = {
.name = "IB_OPCODE_RD_RDMA_READ_RESPONSE_MIDDLE",
- .mask = RXE_RDETH_MASK | RXE_PAYLOAD_MASK | RXE_ACK_MASK
- | RXE_MIDDLE_MASK,
+ .mask = RXE_RDETH_MASK | RXE_PAYLOAD_MASK | RXE_ACK_MASK |
+ RXE_MIDDLE_MASK,
.length = RXE_BTH_BYTES + RXE_RDETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RDETH] = RXE_BTH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
}
},
[IB_OPCODE_RD_RDMA_READ_RESPONSE_LAST] = {
.name = "IB_OPCODE_RD_RDMA_READ_RESPONSE_LAST",
- .mask = RXE_RDETH_MASK | RXE_AETH_MASK | RXE_PAYLOAD_MASK
- | RXE_ACK_MASK | RXE_END_MASK,
+ .mask = RXE_RDETH_MASK | RXE_AETH_MASK | RXE_PAYLOAD_MASK |
+ RXE_ACK_MASK | RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_AETH_BYTES + RXE_RDETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RDETH] = RXE_BTH_BYTES,
- [RXE_AETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_AETH_BYTES,
+ [RXE_AETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_AETH_BYTES,
}
},
[IB_OPCODE_RD_RDMA_READ_RESPONSE_ONLY] = {
.name = "IB_OPCODE_RD_RDMA_READ_RESPONSE_ONLY",
- .mask = RXE_RDETH_MASK | RXE_AETH_MASK | RXE_PAYLOAD_MASK
- | RXE_ACK_MASK | RXE_START_MASK | RXE_END_MASK,
+ .mask = RXE_RDETH_MASK | RXE_AETH_MASK | RXE_PAYLOAD_MASK |
+ RXE_ACK_MASK | RXE_START_MASK | RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_AETH_BYTES + RXE_RDETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RDETH] = RXE_BTH_BYTES,
- [RXE_AETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_AETH_BYTES,
+ [RXE_AETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_AETH_BYTES,
}
},
[IB_OPCODE_RD_ACKNOWLEDGE] = {
.name = "IB_OPCODE_RD_ACKNOWLEDGE",
- .mask = RXE_RDETH_MASK | RXE_AETH_MASK | RXE_ACK_MASK
- | RXE_START_MASK | RXE_END_MASK,
+ .mask = RXE_RDETH_MASK | RXE_AETH_MASK | RXE_ACK_MASK |
+ RXE_START_MASK | RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_AETH_BYTES + RXE_RDETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RDETH] = RXE_BTH_BYTES,
- [RXE_AETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES,
+ [RXE_AETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
}
},
[IB_OPCODE_RD_ATOMIC_ACKNOWLEDGE] = {
.name = "IB_OPCODE_RD_ATOMIC_ACKNOWLEDGE",
- .mask = RXE_RDETH_MASK | RXE_AETH_MASK | RXE_ATMACK_MASK
- | RXE_ACK_MASK | RXE_START_MASK | RXE_END_MASK,
- .length = RXE_BTH_BYTES + RXE_ATMACK_BYTES + RXE_AETH_BYTES
- + RXE_RDETH_BYTES,
+ .mask = RXE_RDETH_MASK | RXE_AETH_MASK | RXE_ATMACK_MASK |
+ RXE_ACK_MASK | RXE_START_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_ATMACK_BYTES + RXE_AETH_BYTES +
+ RXE_RDETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RDETH] = RXE_BTH_BYTES,
- [RXE_AETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES,
- [RXE_ATMACK] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_AETH_BYTES,
+ [RXE_AETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
+ [RXE_ATMACK] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_AETH_BYTES,
}
},
[IB_OPCODE_RD_COMPARE_SWAP] = {
.name = "RD_COMPARE_SWAP",
- .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_ATMETH_MASK
- | RXE_REQ_MASK | RXE_ATOMIC_MASK
- | RXE_START_MASK | RXE_END_MASK,
- .length = RXE_BTH_BYTES + RXE_ATMETH_BYTES + RXE_DETH_BYTES
- + RXE_RDETH_BYTES,
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_ATMETH_MASK |
+ RXE_REQ_MASK | RXE_ATOMIC_MASK |
+ RXE_START_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_ATMETH_BYTES + RXE_DETH_BYTES +
+ RXE_RDETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RDETH] = RXE_BTH_BYTES,
- [RXE_DETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES,
- [RXE_ATMETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_DETH_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
+ [RXE_ATMETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES,
[RXE_PAYLOAD] = RXE_BTH_BYTES +
- + RXE_ATMETH_BYTES
- + RXE_DETH_BYTES +
- + RXE_RDETH_BYTES,
+ RXE_ATMETH_BYTES +
+ RXE_DETH_BYTES +
+ RXE_RDETH_BYTES,
}
},
[IB_OPCODE_RD_FETCH_ADD] = {
.name = "IB_OPCODE_RD_FETCH_ADD",
- .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_ATMETH_MASK
- | RXE_REQ_MASK | RXE_ATOMIC_MASK
- | RXE_START_MASK | RXE_END_MASK,
- .length = RXE_BTH_BYTES + RXE_ATMETH_BYTES + RXE_DETH_BYTES
- + RXE_RDETH_BYTES,
+ .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_ATMETH_MASK |
+ RXE_REQ_MASK | RXE_ATOMIC_MASK |
+ RXE_START_MASK | RXE_END_MASK,
+ .length = RXE_BTH_BYTES + RXE_ATMETH_BYTES + RXE_DETH_BYTES +
+ RXE_RDETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_RDETH] = RXE_BTH_BYTES,
- [RXE_DETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES,
- [RXE_ATMETH] = RXE_BTH_BYTES
- + RXE_RDETH_BYTES
- + RXE_DETH_BYTES,
+ [RXE_DETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES,
+ [RXE_ATMETH] = RXE_BTH_BYTES +
+ RXE_RDETH_BYTES +
+ RXE_DETH_BYTES,
[RXE_PAYLOAD] = RXE_BTH_BYTES +
- + RXE_ATMETH_BYTES
- + RXE_DETH_BYTES +
- + RXE_RDETH_BYTES,
+ RXE_ATMETH_BYTES +
+ RXE_DETH_BYTES +
+ RXE_RDETH_BYTES,
}
},
/* UD */
[IB_OPCODE_UD_SEND_ONLY] = {
.name = "IB_OPCODE_UD_SEND_ONLY",
- .mask = RXE_DETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK
- | RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK
- | RXE_START_MASK | RXE_END_MASK,
+ .mask = RXE_DETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK |
+ RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK |
+ RXE_START_MASK | RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_DETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_DETH] = RXE_BTH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_DETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_DETH_BYTES,
}
},
[IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE] = {
.name = "IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE",
- .mask = RXE_DETH_MASK | RXE_IMMDT_MASK | RXE_PAYLOAD_MASK
- | RXE_REQ_MASK | RXE_COMP_MASK | RXE_RWR_MASK
- | RXE_SEND_MASK | RXE_START_MASK | RXE_END_MASK,
+ .mask = RXE_DETH_MASK | RXE_IMMDT_MASK | RXE_PAYLOAD_MASK |
+ RXE_REQ_MASK | RXE_COMP_MASK | RXE_RWR_MASK |
+ RXE_SEND_MASK | RXE_START_MASK | RXE_END_MASK,
.length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_DETH_BYTES,
.offset = {
[RXE_BTH] = 0,
[RXE_DETH] = RXE_BTH_BYTES,
- [RXE_IMMDT] = RXE_BTH_BYTES
- + RXE_DETH_BYTES,
- [RXE_PAYLOAD] = RXE_BTH_BYTES
- + RXE_DETH_BYTES
- + RXE_IMMDT_BYTES,
+ [RXE_IMMDT] = RXE_BTH_BYTES +
+ RXE_DETH_BYTES,
+ [RXE_PAYLOAD] = RXE_BTH_BYTES +
+ RXE_DETH_BYTES +
+ RXE_IMMDT_BYTES,
}
},
diff --git a/drivers/infiniband/sw/rxe/rxe_pool.c b/drivers/infiniband/sw/rxe/rxe_pool.c
index 2e80bb6aa957..4cb003885e00 100644
--- a/drivers/infiniband/sw/rxe/rxe_pool.c
+++ b/drivers/infiniband/sw/rxe/rxe_pool.c
@@ -5,13 +5,14 @@
*/
#include "rxe.h"
-#include "rxe_loc.h"
+
+#define RXE_POOL_ALIGN (16)
static const struct rxe_type_info {
const char *name;
size_t size;
size_t elem_offset;
- void (*cleanup)(struct rxe_pool_entry *obj);
+ void (*cleanup)(struct rxe_pool_elem *obj);
enum rxe_pool_flags flags;
u32 min_index;
u32 max_index;
@@ -21,19 +22,19 @@ static const struct rxe_type_info {
[RXE_TYPE_UC] = {
.name = "rxe-uc",
.size = sizeof(struct rxe_ucontext),
- .elem_offset = offsetof(struct rxe_ucontext, pelem),
+ .elem_offset = offsetof(struct rxe_ucontext, elem),
.flags = RXE_POOL_NO_ALLOC,
},
[RXE_TYPE_PD] = {
.name = "rxe-pd",
.size = sizeof(struct rxe_pd),
- .elem_offset = offsetof(struct rxe_pd, pelem),
+ .elem_offset = offsetof(struct rxe_pd, elem),
.flags = RXE_POOL_NO_ALLOC,
},
[RXE_TYPE_AH] = {
.name = "rxe-ah",
.size = sizeof(struct rxe_ah),
- .elem_offset = offsetof(struct rxe_ah, pelem),
+ .elem_offset = offsetof(struct rxe_ah, elem),
.flags = RXE_POOL_INDEX | RXE_POOL_NO_ALLOC,
.min_index = RXE_MIN_AH_INDEX,
.max_index = RXE_MAX_AH_INDEX,
@@ -41,7 +42,7 @@ static const struct rxe_type_info {
[RXE_TYPE_SRQ] = {
.name = "rxe-srq",
.size = sizeof(struct rxe_srq),
- .elem_offset = offsetof(struct rxe_srq, pelem),
+ .elem_offset = offsetof(struct rxe_srq, elem),
.flags = RXE_POOL_INDEX | RXE_POOL_NO_ALLOC,
.min_index = RXE_MIN_SRQ_INDEX,
.max_index = RXE_MAX_SRQ_INDEX,
@@ -49,7 +50,7 @@ static const struct rxe_type_info {
[RXE_TYPE_QP] = {
.name = "rxe-qp",
.size = sizeof(struct rxe_qp),
- .elem_offset = offsetof(struct rxe_qp, pelem),
+ .elem_offset = offsetof(struct rxe_qp, elem),
.cleanup = rxe_qp_cleanup,
.flags = RXE_POOL_INDEX | RXE_POOL_NO_ALLOC,
.min_index = RXE_MIN_QP_INDEX,
@@ -58,14 +59,14 @@ static const struct rxe_type_info {
[RXE_TYPE_CQ] = {
.name = "rxe-cq",
.size = sizeof(struct rxe_cq),
- .elem_offset = offsetof(struct rxe_cq, pelem),
+ .elem_offset = offsetof(struct rxe_cq, elem),
.flags = RXE_POOL_NO_ALLOC,
.cleanup = rxe_cq_cleanup,
},
[RXE_TYPE_MR] = {
.name = "rxe-mr",
.size = sizeof(struct rxe_mr),
- .elem_offset = offsetof(struct rxe_mr, pelem),
+ .elem_offset = offsetof(struct rxe_mr, elem),
.cleanup = rxe_mr_cleanup,
.flags = RXE_POOL_INDEX,
.min_index = RXE_MIN_MR_INDEX,
@@ -74,7 +75,7 @@ static const struct rxe_type_info {
[RXE_TYPE_MW] = {
.name = "rxe-mw",
.size = sizeof(struct rxe_mw),
- .elem_offset = offsetof(struct rxe_mw, pelem),
+ .elem_offset = offsetof(struct rxe_mw, elem),
.cleanup = rxe_mw_cleanup,
.flags = RXE_POOL_INDEX | RXE_POOL_NO_ALLOC,
.min_index = RXE_MIN_MW_INDEX,
@@ -83,7 +84,7 @@ static const struct rxe_type_info {
[RXE_TYPE_MC_GRP] = {
.name = "rxe-mc_grp",
.size = sizeof(struct rxe_mc_grp),
- .elem_offset = offsetof(struct rxe_mc_grp, pelem),
+ .elem_offset = offsetof(struct rxe_mc_grp, elem),
.cleanup = rxe_mc_cleanup,
.flags = RXE_POOL_KEY,
.key_offset = offsetof(struct rxe_mc_grp, mgid),
@@ -92,15 +93,10 @@ static const struct rxe_type_info {
[RXE_TYPE_MC_ELEM] = {
.name = "rxe-mc_elem",
.size = sizeof(struct rxe_mc_elem),
- .elem_offset = offsetof(struct rxe_mc_elem, pelem),
+ .elem_offset = offsetof(struct rxe_mc_elem, elem),
},
};
-static inline const char *pool_name(struct rxe_pool *pool)
-{
- return rxe_type_info[pool->type].name;
-}
-
static int rxe_pool_init_index(struct rxe_pool *pool, u32 max, u32 min)
{
int err = 0;
@@ -130,35 +126,36 @@ int rxe_pool_init(
enum rxe_elem_type type,
unsigned int max_elem)
{
+ const struct rxe_type_info *info = &rxe_type_info[type];
int err = 0;
- size_t size = rxe_type_info[type].size;
memset(pool, 0, sizeof(*pool));
pool->rxe = rxe;
+ pool->name = info->name;
pool->type = type;
pool->max_elem = max_elem;
- pool->elem_size = ALIGN(size, RXE_POOL_ALIGN);
- pool->flags = rxe_type_info[type].flags;
- pool->index.tree = RB_ROOT;
- pool->key.tree = RB_ROOT;
- pool->cleanup = rxe_type_info[type].cleanup;
+ pool->elem_size = ALIGN(info->size, RXE_POOL_ALIGN);
+ pool->elem_offset = info->elem_offset;
+ pool->flags = info->flags;
+ pool->cleanup = info->cleanup;
atomic_set(&pool->num_elem, 0);
rwlock_init(&pool->pool_lock);
- if (rxe_type_info[type].flags & RXE_POOL_INDEX) {
- err = rxe_pool_init_index(pool,
- rxe_type_info[type].max_index,
- rxe_type_info[type].min_index);
+ if (pool->flags & RXE_POOL_INDEX) {
+ pool->index.tree = RB_ROOT;
+ err = rxe_pool_init_index(pool, info->max_index,
+ info->min_index);
if (err)
goto out;
}
- if (rxe_type_info[type].flags & RXE_POOL_KEY) {
- pool->key.key_offset = rxe_type_info[type].key_offset;
- pool->key.key_size = rxe_type_info[type].key_size;
+ if (pool->flags & RXE_POOL_KEY) {
+ pool->key.tree = RB_ROOT;
+ pool->key.key_offset = info->key_offset;
+ pool->key.key_size = info->key_size;
}
out:
@@ -169,9 +166,10 @@ void rxe_pool_cleanup(struct rxe_pool *pool)
{
if (atomic_read(&pool->num_elem) > 0)
pr_warn("%s pool destroyed with unfree'd elem\n",
- pool_name(pool));
+ pool->name);
- bitmap_free(pool->index.table);
+ if (pool->flags & RXE_POOL_INDEX)
+ bitmap_free(pool->index.table);
}
static u32 alloc_index(struct rxe_pool *pool)
@@ -189,15 +187,15 @@ static u32 alloc_index(struct rxe_pool *pool)
return index + pool->index.min_index;
}
-static int rxe_insert_index(struct rxe_pool *pool, struct rxe_pool_entry *new)
+static int rxe_insert_index(struct rxe_pool *pool, struct rxe_pool_elem *new)
{
struct rb_node **link = &pool->index.tree.rb_node;
struct rb_node *parent = NULL;
- struct rxe_pool_entry *elem;
+ struct rxe_pool_elem *elem;
while (*link) {
parent = *link;
- elem = rb_entry(parent, struct rxe_pool_entry, index_node);
+ elem = rb_entry(parent, struct rxe_pool_elem, index_node);
if (elem->index == new->index) {
pr_warn("element already exists!\n");
@@ -216,19 +214,20 @@ static int rxe_insert_index(struct rxe_pool *pool, struct rxe_pool_entry *new)
return 0;
}
-static int rxe_insert_key(struct rxe_pool *pool, struct rxe_pool_entry *new)
+static int rxe_insert_key(struct rxe_pool *pool, struct rxe_pool_elem *new)
{
struct rb_node **link = &pool->key.tree.rb_node;
struct rb_node *parent = NULL;
- struct rxe_pool_entry *elem;
+ struct rxe_pool_elem *elem;
int cmp;
while (*link) {
parent = *link;
- elem = rb_entry(parent, struct rxe_pool_entry, key_node);
+ elem = rb_entry(parent, struct rxe_pool_elem, key_node);
cmp = memcmp((u8 *)elem + pool->key.key_offset,
- (u8 *)new + pool->key.key_offset, pool->key.key_size);
+ (u8 *)new + pool->key.key_offset,
+ pool->key.key_size);
if (cmp == 0) {
pr_warn("key already exists!\n");
@@ -247,7 +246,7 @@ static int rxe_insert_key(struct rxe_pool *pool, struct rxe_pool_entry *new)
return 0;
}
-int __rxe_add_key_locked(struct rxe_pool_entry *elem, void *key)
+int __rxe_add_key_locked(struct rxe_pool_elem *elem, void *key)
{
struct rxe_pool *pool = elem->pool;
int err;
@@ -258,37 +257,35 @@ int __rxe_add_key_locked(struct rxe_pool_entry *elem, void *key)
return err;
}
-int __rxe_add_key(struct rxe_pool_entry *elem, void *key)
+int __rxe_add_key(struct rxe_pool_elem *elem, void *key)
{
struct rxe_pool *pool = elem->pool;
- unsigned long flags;
int err;
- write_lock_irqsave(&pool->pool_lock, flags);
+ write_lock_bh(&pool->pool_lock);
err = __rxe_add_key_locked(elem, key);
- write_unlock_irqrestore(&pool->pool_lock, flags);
+ write_unlock_bh(&pool->pool_lock);
return err;
}
-void __rxe_drop_key_locked(struct rxe_pool_entry *elem)
+void __rxe_drop_key_locked(struct rxe_pool_elem *elem)
{
struct rxe_pool *pool = elem->pool;
rb_erase(&elem->key_node, &pool->key.tree);
}
-void __rxe_drop_key(struct rxe_pool_entry *elem)
+void __rxe_drop_key(struct rxe_pool_elem *elem)
{
struct rxe_pool *pool = elem->pool;
- unsigned long flags;
- write_lock_irqsave(&pool->pool_lock, flags);
+ write_lock_bh(&pool->pool_lock);
__rxe_drop_key_locked(elem);
- write_unlock_irqrestore(&pool->pool_lock, flags);
+ write_unlock_bh(&pool->pool_lock);
}
-int __rxe_add_index_locked(struct rxe_pool_entry *elem)
+int __rxe_add_index_locked(struct rxe_pool_elem *elem)
{
struct rxe_pool *pool = elem->pool;
int err;
@@ -299,20 +296,19 @@ int __rxe_add_index_locked(struct rxe_pool_entry *elem)
return err;
}
-int __rxe_add_index(struct rxe_pool_entry *elem)
+int __rxe_add_index(struct rxe_pool_elem *elem)
{
struct rxe_pool *pool = elem->pool;
- unsigned long flags;
int err;
- write_lock_irqsave(&pool->pool_lock, flags);
+ write_lock_bh(&pool->pool_lock);
err = __rxe_add_index_locked(elem);
- write_unlock_irqrestore(&pool->pool_lock, flags);
+ write_unlock_bh(&pool->pool_lock);
return err;
}
-void __rxe_drop_index_locked(struct rxe_pool_entry *elem)
+void __rxe_drop_index_locked(struct rxe_pool_elem *elem)
{
struct rxe_pool *pool = elem->pool;
@@ -320,32 +316,31 @@ void __rxe_drop_index_locked(struct rxe_pool_entry *elem)
rb_erase(&elem->index_node, &pool->index.tree);
}
-void __rxe_drop_index(struct rxe_pool_entry *elem)
+void __rxe_drop_index(struct rxe_pool_elem *elem)
{
struct rxe_pool *pool = elem->pool;
- unsigned long flags;
- write_lock_irqsave(&pool->pool_lock, flags);
+ write_lock_bh(&pool->pool_lock);
__rxe_drop_index_locked(elem);
- write_unlock_irqrestore(&pool->pool_lock, flags);
+ write_unlock_bh(&pool->pool_lock);
}
void *rxe_alloc_locked(struct rxe_pool *pool)
{
- const struct rxe_type_info *info = &rxe_type_info[pool->type];
- struct rxe_pool_entry *elem;
- u8 *obj;
+ struct rxe_pool_elem *elem;
+ void *obj;
if (atomic_inc_return(&pool->num_elem) > pool->max_elem)
goto out_cnt;
- obj = kzalloc(info->size, GFP_ATOMIC);
+ obj = kzalloc(pool->elem_size, GFP_ATOMIC);
if (!obj)
goto out_cnt;
- elem = (struct rxe_pool_entry *)(obj + info->elem_offset);
+ elem = (struct rxe_pool_elem *)((u8 *)obj + pool->elem_offset);
elem->pool = pool;
+ elem->obj = obj;
kref_init(&elem->ref_cnt);
return obj;
@@ -357,20 +352,20 @@ out_cnt:
void *rxe_alloc(struct rxe_pool *pool)
{
- const struct rxe_type_info *info = &rxe_type_info[pool->type];
- struct rxe_pool_entry *elem;
- u8 *obj;
+ struct rxe_pool_elem *elem;
+ void *obj;
if (atomic_inc_return(&pool->num_elem) > pool->max_elem)
goto out_cnt;
- obj = kzalloc(info->size, GFP_KERNEL);
+ obj = kzalloc(pool->elem_size, GFP_KERNEL);
if (!obj)
goto out_cnt;
- elem = (struct rxe_pool_entry *)(obj + info->elem_offset);
+ elem = (struct rxe_pool_elem *)((u8 *)obj + pool->elem_offset);
elem->pool = pool;
+ elem->obj = obj;
kref_init(&elem->ref_cnt);
return obj;
@@ -380,12 +375,13 @@ out_cnt:
return NULL;
}
-int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_entry *elem)
+int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_elem *elem)
{
if (atomic_inc_return(&pool->num_elem) > pool->max_elem)
goto out_cnt;
elem->pool = pool;
+ elem->obj = (u8 *)elem - pool->elem_offset;
kref_init(&elem->ref_cnt);
return 0;
@@ -397,17 +393,16 @@ out_cnt:
void rxe_elem_release(struct kref *kref)
{
- struct rxe_pool_entry *elem =
- container_of(kref, struct rxe_pool_entry, ref_cnt);
+ struct rxe_pool_elem *elem =
+ container_of(kref, struct rxe_pool_elem, ref_cnt);
struct rxe_pool *pool = elem->pool;
- const struct rxe_type_info *info = &rxe_type_info[pool->type];
- u8 *obj;
+ void *obj;
if (pool->cleanup)
pool->cleanup(elem);
if (!(pool->flags & RXE_POOL_NO_ALLOC)) {
- obj = (u8 *)elem - info->elem_offset;
+ obj = elem->obj;
kfree(obj);
}
@@ -416,15 +411,14 @@ void rxe_elem_release(struct kref *kref)
void *rxe_pool_get_index_locked(struct rxe_pool *pool, u32 index)
{
- const struct rxe_type_info *info = &rxe_type_info[pool->type];
struct rb_node *node;
- struct rxe_pool_entry *elem;
- u8 *obj;
+ struct rxe_pool_elem *elem;
+ void *obj;
node = pool->index.tree.rb_node;
while (node) {
- elem = rb_entry(node, struct rxe_pool_entry, index_node);
+ elem = rb_entry(node, struct rxe_pool_elem, index_node);
if (elem->index > index)
node = node->rb_left;
@@ -436,7 +430,7 @@ void *rxe_pool_get_index_locked(struct rxe_pool *pool, u32 index)
if (node) {
kref_get(&elem->ref_cnt);
- obj = (u8 *)elem - info->elem_offset;
+ obj = elem->obj;
} else {
obj = NULL;
}
@@ -446,28 +440,26 @@ void *rxe_pool_get_index_locked(struct rxe_pool *pool, u32 index)
void *rxe_pool_get_index(struct rxe_pool *pool, u32 index)
{
- u8 *obj;
- unsigned long flags;
+ void *obj;
- read_lock_irqsave(&pool->pool_lock, flags);
+ read_lock_bh(&pool->pool_lock);
obj = rxe_pool_get_index_locked(pool, index);
- read_unlock_irqrestore(&pool->pool_lock, flags);
+ read_unlock_bh(&pool->pool_lock);
return obj;
}
void *rxe_pool_get_key_locked(struct rxe_pool *pool, void *key)
{
- const struct rxe_type_info *info = &rxe_type_info[pool->type];
struct rb_node *node;
- struct rxe_pool_entry *elem;
- u8 *obj;
+ struct rxe_pool_elem *elem;
+ void *obj;
int cmp;
node = pool->key.tree.rb_node;
while (node) {
- elem = rb_entry(node, struct rxe_pool_entry, key_node);
+ elem = rb_entry(node, struct rxe_pool_elem, key_node);
cmp = memcmp((u8 *)elem + pool->key.key_offset,
key, pool->key.key_size);
@@ -482,7 +474,7 @@ void *rxe_pool_get_key_locked(struct rxe_pool *pool, void *key)
if (node) {
kref_get(&elem->ref_cnt);
- obj = (u8 *)elem - info->elem_offset;
+ obj = elem->obj;
} else {
obj = NULL;
}
@@ -492,12 +484,11 @@ void *rxe_pool_get_key_locked(struct rxe_pool *pool, void *key)
void *rxe_pool_get_key(struct rxe_pool *pool, void *key)
{
- u8 *obj;
- unsigned long flags;
+ void *obj;
- read_lock_irqsave(&pool->pool_lock, flags);
+ read_lock_bh(&pool->pool_lock);
obj = rxe_pool_get_key_locked(pool, key);
- read_unlock_irqrestore(&pool->pool_lock, flags);
+ read_unlock_bh(&pool->pool_lock);
return obj;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_pool.h b/drivers/infiniband/sw/rxe/rxe_pool.h
index 8ecd9f870aea..214279310f4d 100644
--- a/drivers/infiniband/sw/rxe/rxe_pool.h
+++ b/drivers/infiniband/sw/rxe/rxe_pool.h
@@ -7,9 +7,6 @@
#ifndef RXE_POOL_H
#define RXE_POOL_H
-#define RXE_POOL_ALIGN (16)
-#define RXE_POOL_CACHE_FLAGS (0)
-
enum rxe_pool_flags {
RXE_POOL_INDEX = BIT(1),
RXE_POOL_KEY = BIT(2),
@@ -30,10 +27,9 @@ enum rxe_elem_type {
RXE_NUM_TYPES, /* keep me last */
};
-struct rxe_pool_entry;
-
-struct rxe_pool_entry {
+struct rxe_pool_elem {
struct rxe_pool *pool;
+ void *obj;
struct kref ref_cnt;
struct list_head list;
@@ -47,14 +43,16 @@ struct rxe_pool_entry {
struct rxe_pool {
struct rxe_dev *rxe;
+ const char *name;
rwlock_t pool_lock; /* protects pool add/del/search */
- size_t elem_size;
- void (*cleanup)(struct rxe_pool_entry *obj);
+ void (*cleanup)(struct rxe_pool_elem *obj);
enum rxe_pool_flags flags;
enum rxe_elem_type type;
unsigned int max_elem;
atomic_t num_elem;
+ size_t elem_size;
+ size_t elem_offset;
/* only used if indexed */
struct {
@@ -89,51 +87,51 @@ void *rxe_alloc_locked(struct rxe_pool *pool);
void *rxe_alloc(struct rxe_pool *pool);
/* connect already allocated object to pool */
-int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_entry *elem);
+int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_elem *elem);
-#define rxe_add_to_pool(pool, obj) __rxe_add_to_pool(pool, &(obj)->pelem)
+#define rxe_add_to_pool(pool, obj) __rxe_add_to_pool(pool, &(obj)->elem)
/* assign an index to an indexed object and insert object into
* pool's rb tree holding and not holding the pool_lock
*/
-int __rxe_add_index_locked(struct rxe_pool_entry *elem);
+int __rxe_add_index_locked(struct rxe_pool_elem *elem);
-#define rxe_add_index_locked(obj) __rxe_add_index_locked(&(obj)->pelem)
+#define rxe_add_index_locked(obj) __rxe_add_index_locked(&(obj)->elem)
-int __rxe_add_index(struct rxe_pool_entry *elem);
+int __rxe_add_index(struct rxe_pool_elem *elem);
-#define rxe_add_index(obj) __rxe_add_index(&(obj)->pelem)
+#define rxe_add_index(obj) __rxe_add_index(&(obj)->elem)
/* drop an index and remove object from rb tree
* holding and not holding the pool_lock
*/
-void __rxe_drop_index_locked(struct rxe_pool_entry *elem);
+void __rxe_drop_index_locked(struct rxe_pool_elem *elem);
-#define rxe_drop_index_locked(obj) __rxe_drop_index_locked(&(obj)->pelem)
+#define rxe_drop_index_locked(obj) __rxe_drop_index_locked(&(obj)->elem)
-void __rxe_drop_index(struct rxe_pool_entry *elem);
+void __rxe_drop_index(struct rxe_pool_elem *elem);
-#define rxe_drop_index(obj) __rxe_drop_index(&(obj)->pelem)
+#define rxe_drop_index(obj) __rxe_drop_index(&(obj)->elem)
/* assign a key to a keyed object and insert object into
* pool's rb tree holding and not holding pool_lock
*/
-int __rxe_add_key_locked(struct rxe_pool_entry *elem, void *key);
+int __rxe_add_key_locked(struct rxe_pool_elem *elem, void *key);
-#define rxe_add_key_locked(obj, key) __rxe_add_key_locked(&(obj)->pelem, key)
+#define rxe_add_key_locked(obj, key) __rxe_add_key_locked(&(obj)->elem, key)
-int __rxe_add_key(struct rxe_pool_entry *elem, void *key);
+int __rxe_add_key(struct rxe_pool_elem *elem, void *key);
-#define rxe_add_key(obj, key) __rxe_add_key(&(obj)->pelem, key)
+#define rxe_add_key(obj, key) __rxe_add_key(&(obj)->elem, key)
/* remove elem from rb tree holding and not holding the pool_lock */
-void __rxe_drop_key_locked(struct rxe_pool_entry *elem);
+void __rxe_drop_key_locked(struct rxe_pool_elem *elem);
-#define rxe_drop_key_locked(obj) __rxe_drop_key_locked(&(obj)->pelem)
+#define rxe_drop_key_locked(obj) __rxe_drop_key_locked(&(obj)->elem)
-void __rxe_drop_key(struct rxe_pool_entry *elem);
+void __rxe_drop_key(struct rxe_pool_elem *elem);
-#define rxe_drop_key(obj) __rxe_drop_key(&(obj)->pelem)
+#define rxe_drop_key(obj) __rxe_drop_key(&(obj)->elem)
/* lookup an indexed object from index holding and not holding the pool_lock.
* takes a reference on object
@@ -153,9 +151,9 @@ void *rxe_pool_get_key(struct rxe_pool *pool, void *key);
void rxe_elem_release(struct kref *kref);
/* take a reference on an object */
-#define rxe_add_ref(elem) kref_get(&(elem)->pelem.ref_cnt)
+#define rxe_add_ref(obj) kref_get(&(obj)->elem.ref_cnt)
/* drop a reference on an object */
-#define rxe_drop_ref(elem) kref_put(&(elem)->pelem.ref_cnt, rxe_elem_release)
+#define rxe_drop_ref(obj) kref_put(&(obj)->elem.ref_cnt, rxe_elem_release)
#endif /* RXE_POOL_H */
diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
index 54b8711321c1..5018b9387694 100644
--- a/drivers/infiniband/sw/rxe/rxe_qp.c
+++ b/drivers/infiniband/sw/rxe/rxe_qp.c
@@ -167,7 +167,7 @@ static void rxe_qp_init_misc(struct rxe_dev *rxe, struct rxe_qp *qp,
qp->attr.path_mtu = 1;
qp->mtu = ib_mtu_enum_to_int(qp->attr.path_mtu);
- qpn = qp->pelem.index;
+ qpn = qp->elem.index;
port = &rxe->port;
switch (init->qp_type) {
@@ -217,8 +217,7 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
* the port number must be in the Dynamic Ports range
* (0xc000 - 0xffff).
*/
- qp->src_port = RXE_ROCE_V2_SPORT +
- (hash_32_generic(qp_num(qp), 14) & 0x3fff);
+ qp->src_port = RXE_ROCE_V2_SPORT + (hash_32(qp_num(qp), 14) & 0x3fff);
qp->sq.max_wr = init->cap.max_send_wr;
/* These caps are limited by rxe_qp_chk_cap() done by the caller */
@@ -832,9 +831,9 @@ static void rxe_qp_do_cleanup(struct work_struct *work)
}
/* called when the last reference to the qp is dropped */
-void rxe_qp_cleanup(struct rxe_pool_entry *arg)
+void rxe_qp_cleanup(struct rxe_pool_elem *elem)
{
- struct rxe_qp *qp = container_of(arg, typeof(*qp), pelem);
+ struct rxe_qp *qp = container_of(elem, typeof(*qp), elem);
execute_in_process_context(rxe_qp_do_cleanup, &qp->cleanup_work);
}
diff --git a/drivers/infiniband/sw/rxe/rxe_queue.c b/drivers/infiniband/sw/rxe/rxe_queue.c
index 6e6e023c1b45..a1b283dd2d4c 100644
--- a/drivers/infiniband/sw/rxe/rxe_queue.c
+++ b/drivers/infiniband/sw/rxe/rxe_queue.c
@@ -151,7 +151,6 @@ int rxe_queue_resize(struct rxe_queue *q, unsigned int *num_elem_p,
struct rxe_queue *new_q;
unsigned int num_elem = *num_elem_p;
int err;
- unsigned long flags = 0, flags1;
new_q = rxe_queue_init(q->rxe, &num_elem, elem_size, q->type);
if (!new_q)
@@ -165,17 +164,17 @@ int rxe_queue_resize(struct rxe_queue *q, unsigned int *num_elem_p,
goto err1;
}
- spin_lock_irqsave(consumer_lock, flags1);
+ spin_lock_bh(consumer_lock);
if (producer_lock) {
- spin_lock_irqsave(producer_lock, flags);
+ spin_lock_bh(producer_lock);
err = resize_finish(q, new_q, num_elem);
- spin_unlock_irqrestore(producer_lock, flags);
+ spin_unlock_bh(producer_lock);
} else {
err = resize_finish(q, new_q, num_elem);
}
- spin_unlock_irqrestore(consumer_lock, flags1);
+ spin_unlock_bh(consumer_lock);
rxe_queue_cleanup(new_q); /* new/old dep on err */
if (err)
diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
index 0c9d2af15f3d..5eb89052dd66 100644
--- a/drivers/infiniband/sw/rxe/rxe_req.c
+++ b/drivers/infiniband/sw/rxe/rxe_req.c
@@ -110,7 +110,6 @@ void rnr_nak_timer(struct timer_list *t)
static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
{
struct rxe_send_wqe *wqe;
- unsigned long flags;
struct rxe_queue *q = qp->sq.queue;
unsigned int index = qp->req.wqe_index;
unsigned int cons;
@@ -124,25 +123,23 @@ static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
/* check to see if we are drained;
* state_lock used by requester and completer
*/
- spin_lock_irqsave(&qp->state_lock, flags);
+ spin_lock_bh(&qp->state_lock);
do {
if (qp->req.state != QP_STATE_DRAIN) {
/* comp just finished */
- spin_unlock_irqrestore(&qp->state_lock,
- flags);
+ spin_unlock_bh(&qp->state_lock);
break;
}
if (wqe && ((index != cons) ||
(wqe->state != wqe_state_posted))) {
/* comp not done yet */
- spin_unlock_irqrestore(&qp->state_lock,
- flags);
+ spin_unlock_bh(&qp->state_lock);
break;
}
qp->req.state = QP_STATE_DRAINED;
- spin_unlock_irqrestore(&qp->state_lock, flags);
+ spin_unlock_bh(&qp->state_lock);
if (qp->ibqp.event_handler) {
struct ib_event ev;
@@ -372,7 +369,6 @@ static struct sk_buff *init_req_packet(struct rxe_qp *qp,
int pad = (-payload) & 0x3;
int paylen;
int solicited;
- u16 pkey;
u32 qp_num;
int ack_req;
@@ -404,8 +400,6 @@ static struct sk_buff *init_req_packet(struct rxe_qp *qp,
(pkt->mask & (RXE_WRITE_MASK | RXE_IMMDT_MASK)) ==
(RXE_WRITE_MASK | RXE_IMMDT_MASK));
- pkey = IB_DEFAULT_PKEY_FULL;
-
qp_num = (pkt->mask & RXE_DETH_MASK) ? ibwr->wr.ud.remote_qpn :
qp->attr.dest_qp_num;
@@ -414,7 +408,7 @@ static struct sk_buff *init_req_packet(struct rxe_qp *qp,
if (ack_req)
qp->req.noack_pkts = 0;
- bth_init(pkt, pkt->opcode, solicited, 0, pad, pkey, qp_num,
+ bth_init(pkt, pkt->opcode, solicited, 0, pad, IB_DEFAULT_PKEY_FULL, qp_num,
ack_req, pkt->psn);
/* init optional headers */
diff --git a/drivers/infiniband/sw/rxe/rxe_srq.c b/drivers/infiniband/sw/rxe/rxe_srq.c
index eb1c4c3b3a78..0c0721f04357 100644
--- a/drivers/infiniband/sw/rxe/rxe_srq.c
+++ b/drivers/infiniband/sw/rxe/rxe_srq.c
@@ -83,7 +83,7 @@ int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq,
srq->ibsrq.event_handler = init->event_handler;
srq->ibsrq.srq_context = init->srq_context;
srq->limit = init->attr.srq_limit;
- srq->srq_num = srq->pelem.index;
+ srq->srq_num = srq->elem.index;
srq->rq.max_wr = init->attr.max_wr;
srq->rq.max_sge = init->attr.max_sge;
diff --git a/drivers/infiniband/sw/rxe/rxe_sysfs.c b/drivers/infiniband/sw/rxe/rxe_sysfs.c
deleted file mode 100644
index 666202ddff48..000000000000
--- a/drivers/infiniband/sw/rxe/rxe_sysfs.c
+++ /dev/null
@@ -1,119 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
-/*
- * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
- * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
- */
-
-#include "rxe.h"
-#include "rxe_net.h"
-
-/* Copy argument and remove trailing CR. Return the new length. */
-static int sanitize_arg(const char *val, char *intf, int intf_len)
-{
- int len;
-
- if (!val)
- return 0;
-
- /* Remove newline. */
- for (len = 0; len < intf_len - 1 && val[len] && val[len] != '\n'; len++)
- intf[len] = val[len];
- intf[len] = 0;
-
- if (len == 0 || (val[len] != 0 && val[len] != '\n'))
- return 0;
-
- return len;
-}
-
-static int rxe_param_set_add(const char *val, const struct kernel_param *kp)
-{
- int len;
- int err = 0;
- char intf[32];
- struct net_device *ndev;
- struct rxe_dev *exists;
-
- if (!rxe_initialized) {
- pr_err("Module parameters are not supported, use rdma link add or rxe_cfg\n");
- return -EAGAIN;
- }
-
- len = sanitize_arg(val, intf, sizeof(intf));
- if (!len) {
- pr_err("add: invalid interface name\n");
- return -EINVAL;
- }
-
- ndev = dev_get_by_name(&init_net, intf);
- if (!ndev) {
- pr_err("interface %s not found\n", intf);
- return -EINVAL;
- }
-
- if (is_vlan_dev(ndev)) {
- pr_err("rxe creation allowed on top of a real device only\n");
- err = -EPERM;
- goto err;
- }
-
- exists = rxe_get_dev_from_net(ndev);
- if (exists) {
- ib_device_put(&exists->ib_dev);
- pr_err("already configured on %s\n", intf);
- err = -EINVAL;
- goto err;
- }
-
- err = rxe_net_add("rxe%d", ndev);
- if (err) {
- pr_err("failed to add %s\n", intf);
- goto err;
- }
-
-err:
- dev_put(ndev);
- return err;
-}
-
-static int rxe_param_set_remove(const char *val, const struct kernel_param *kp)
-{
- int len;
- char intf[32];
- struct ib_device *ib_dev;
-
- len = sanitize_arg(val, intf, sizeof(intf));
- if (!len) {
- pr_err("add: invalid interface name\n");
- return -EINVAL;
- }
-
- if (strncmp("all", intf, len) == 0) {
- pr_info("rxe_sys: remove all");
- ib_unregister_driver(RDMA_DRIVER_RXE);
- return 0;
- }
-
- ib_dev = ib_device_get_by_name(intf, RDMA_DRIVER_RXE);
- if (!ib_dev) {
- pr_err("not configured on %s\n", intf);
- return -EINVAL;
- }
-
- ib_unregister_device_and_put(ib_dev);
-
- return 0;
-}
-
-static const struct kernel_param_ops rxe_add_ops = {
- .set = rxe_param_set_add,
-};
-
-static const struct kernel_param_ops rxe_remove_ops = {
- .set = rxe_param_set_remove,
-};
-
-module_param_cb(add, &rxe_add_ops, NULL, 0200);
-MODULE_PARM_DESC(add, "DEPRECATED. Create RXE device over network interface");
-module_param_cb(remove, &rxe_remove_ops, NULL, 0200);
-MODULE_PARM_DESC(remove, "DEPRECATED. Remove RXE device over network interface");
diff --git a/drivers/infiniband/sw/rxe/rxe_task.c b/drivers/infiniband/sw/rxe/rxe_task.c
index 6951fdcb31bf..0c4db5bb17d7 100644
--- a/drivers/infiniband/sw/rxe/rxe_task.c
+++ b/drivers/infiniband/sw/rxe/rxe_task.c
@@ -32,25 +32,24 @@ void rxe_do_task(struct tasklet_struct *t)
{
int cont;
int ret;
- unsigned long flags;
struct rxe_task *task = from_tasklet(task, t, tasklet);
- spin_lock_irqsave(&task->state_lock, flags);
+ spin_lock_bh(&task->state_lock);
switch (task->state) {
case TASK_STATE_START:
task->state = TASK_STATE_BUSY;
- spin_unlock_irqrestore(&task->state_lock, flags);
+ spin_unlock_bh(&task->state_lock);
break;
case TASK_STATE_BUSY:
task->state = TASK_STATE_ARMED;
fallthrough;
case TASK_STATE_ARMED:
- spin_unlock_irqrestore(&task->state_lock, flags);
+ spin_unlock_bh(&task->state_lock);
return;
default:
- spin_unlock_irqrestore(&task->state_lock, flags);
+ spin_unlock_bh(&task->state_lock);
pr_warn("%s failed with bad state %d\n", __func__, task->state);
return;
}
@@ -59,7 +58,7 @@ void rxe_do_task(struct tasklet_struct *t)
cont = 0;
ret = task->func(task->arg);
- spin_lock_irqsave(&task->state_lock, flags);
+ spin_lock_bh(&task->state_lock);
switch (task->state) {
case TASK_STATE_BUSY:
if (ret)
@@ -81,7 +80,7 @@ void rxe_do_task(struct tasklet_struct *t)
pr_warn("%s failed with bad state %d\n", __func__,
task->state);
}
- spin_unlock_irqrestore(&task->state_lock, flags);
+ spin_unlock_bh(&task->state_lock);
} while (cont);
task->ret = ret;
@@ -106,7 +105,6 @@ int rxe_init_task(void *obj, struct rxe_task *task,
void rxe_cleanup_task(struct rxe_task *task)
{
- unsigned long flags;
bool idle;
/*
@@ -116,9 +114,9 @@ void rxe_cleanup_task(struct rxe_task *task)
task->destroyed = true;
do {
- spin_lock_irqsave(&task->state_lock, flags);
+ spin_lock_bh(&task->state_lock);
idle = (task->state == TASK_STATE_START);
- spin_unlock_irqrestore(&task->state_lock, flags);
+ spin_unlock_bh(&task->state_lock);
} while (!idle);
tasklet_kill(&task->tasklet);
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index 0aa0d7e52773..915ad6664321 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -182,7 +182,7 @@ static int rxe_create_ah(struct ib_ah *ibah,
/* create index > 0 */
rxe_add_index(ah);
- ah->ah_num = ah->pelem.index;
+ ah->ah_num = ah->elem.index;
if (uresp) {
/* only if new user provider */
@@ -383,10 +383,9 @@ static int rxe_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
const struct ib_recv_wr **bad_wr)
{
int err = 0;
- unsigned long flags;
struct rxe_srq *srq = to_rsrq(ibsrq);
- spin_lock_irqsave(&srq->rq.producer_lock, flags);
+ spin_lock_bh(&srq->rq.producer_lock);
while (wr) {
err = post_one_recv(&srq->rq, wr);
@@ -395,7 +394,7 @@ static int rxe_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
wr = wr->next;
}
- spin_unlock_irqrestore(&srq->rq.producer_lock, flags);
+ spin_unlock_bh(&srq->rq.producer_lock);
if (err)
*bad_wr = wr;
@@ -469,6 +468,11 @@ static int rxe_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
if (err)
goto err1;
+ if ((mask & IB_QP_AV) && (attr->ah_attr.ah_flags & IB_AH_GRH))
+ qp->src_port = rdma_get_udp_sport(attr->ah_attr.grh.flow_label,
+ qp->ibqp.qp_num,
+ qp->attr.dest_qp_num);
+
return 0;
err1:
@@ -634,19 +638,18 @@ static int post_one_send(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
int err;
struct rxe_sq *sq = &qp->sq;
struct rxe_send_wqe *send_wqe;
- unsigned long flags;
int full;
err = validate_send_wr(qp, ibwr, mask, length);
if (err)
return err;
- spin_lock_irqsave(&qp->sq.sq_lock, flags);
+ spin_lock_bh(&qp->sq.sq_lock);
full = queue_full(sq->queue, QUEUE_TYPE_TO_DRIVER);
if (unlikely(full)) {
- spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
+ spin_unlock_bh(&qp->sq.sq_lock);
return -ENOMEM;
}
@@ -655,7 +658,7 @@ static int post_one_send(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
queue_advance_producer(sq->queue, QUEUE_TYPE_TO_DRIVER);
- spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
+ spin_unlock_bh(&qp->sq.sq_lock);
return 0;
}
@@ -735,7 +738,6 @@ static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
int err = 0;
struct rxe_qp *qp = to_rqp(ibqp);
struct rxe_rq *rq = &qp->rq;
- unsigned long flags;
if (unlikely((qp_state(qp) < IB_QPS_INIT) || !qp->valid)) {
*bad_wr = wr;
@@ -749,7 +751,7 @@ static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
goto err1;
}
- spin_lock_irqsave(&rq->producer_lock, flags);
+ spin_lock_bh(&rq->producer_lock);
while (wr) {
err = post_one_recv(rq, wr);
@@ -760,7 +762,7 @@ static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
wr = wr->next;
}
- spin_unlock_irqrestore(&rq->producer_lock, flags);
+ spin_unlock_bh(&rq->producer_lock);
if (qp->resp.state == QP_STATE_ERROR)
rxe_run_task(&qp->resp.task, 1);
@@ -841,9 +843,8 @@ static int rxe_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
int i;
struct rxe_cq *cq = to_rcq(ibcq);
struct rxe_cqe *cqe;
- unsigned long flags;
- spin_lock_irqsave(&cq->cq_lock, flags);
+ spin_lock_bh(&cq->cq_lock);
for (i = 0; i < num_entries; i++) {
cqe = queue_head(cq->queue, QUEUE_TYPE_FROM_DRIVER);
if (!cqe)
@@ -852,7 +853,7 @@ static int rxe_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
memcpy(wc++, &cqe->ibwc, sizeof(*wc));
queue_advance_consumer(cq->queue, QUEUE_TYPE_FROM_DRIVER);
}
- spin_unlock_irqrestore(&cq->cq_lock, flags);
+ spin_unlock_bh(&cq->cq_lock);
return i;
}
@@ -870,11 +871,10 @@ static int rxe_peek_cq(struct ib_cq *ibcq, int wc_cnt)
static int rxe_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
{
struct rxe_cq *cq = to_rcq(ibcq);
- unsigned long irq_flags;
int ret = 0;
int empty;
- spin_lock_irqsave(&cq->cq_lock, irq_flags);
+ spin_lock_bh(&cq->cq_lock);
if (cq->notify != IB_CQ_NEXT_COMP)
cq->notify = flags & IB_CQ_SOLICITED_MASK;
@@ -883,7 +883,7 @@ static int rxe_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !empty)
ret = 1;
- spin_unlock_irqrestore(&cq->cq_lock, irq_flags);
+ spin_unlock_bh(&cq->cq_lock);
return ret;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h
index 35e041450090..e48969e8d4c8 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.h
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.h
@@ -35,17 +35,17 @@ static inline int psn_compare(u32 psn_a, u32 psn_b)
struct rxe_ucontext {
struct ib_ucontext ibuc;
- struct rxe_pool_entry pelem;
+ struct rxe_pool_elem elem;
};
struct rxe_pd {
struct ib_pd ibpd;
- struct rxe_pool_entry pelem;
+ struct rxe_pool_elem elem;
};
struct rxe_ah {
struct ib_ah ibah;
- struct rxe_pool_entry pelem;
+ struct rxe_pool_elem elem;
struct rxe_av av;
bool is_user;
int ah_num;
@@ -60,7 +60,7 @@ struct rxe_cqe {
struct rxe_cq {
struct ib_cq ibcq;
- struct rxe_pool_entry pelem;
+ struct rxe_pool_elem elem;
struct rxe_queue *queue;
spinlock_t cq_lock;
u8 notify;
@@ -95,7 +95,7 @@ struct rxe_rq {
struct rxe_srq {
struct ib_srq ibsrq;
- struct rxe_pool_entry pelem;
+ struct rxe_pool_elem elem;
struct rxe_pd *pd;
struct rxe_rq rq;
u32 srq_num;
@@ -209,7 +209,7 @@ struct rxe_resp_info {
struct rxe_qp {
struct ib_qp ibqp;
- struct rxe_pool_entry pelem;
+ struct rxe_pool_elem elem;
struct ib_qp_attr attr;
unsigned int valid;
unsigned int mtu;
@@ -309,7 +309,7 @@ static inline int rkey_is_mw(u32 rkey)
}
struct rxe_mr {
- struct rxe_pool_entry pelem;
+ struct rxe_pool_elem elem;
struct ib_mr ibmr;
struct ib_umem *umem;
@@ -342,7 +342,7 @@ enum rxe_mw_state {
struct rxe_mw {
struct ib_mw ibmw;
- struct rxe_pool_entry pelem;
+ struct rxe_pool_elem elem;
spinlock_t lock;
enum rxe_mw_state state;
struct rxe_qp *qp; /* Type 2 only */
@@ -354,7 +354,7 @@ struct rxe_mw {
};
struct rxe_mc_grp {
- struct rxe_pool_entry pelem;
+ struct rxe_pool_elem elem;
spinlock_t mcg_lock; /* guard group */
struct rxe_dev *rxe;
struct list_head qp_list;
@@ -365,7 +365,7 @@ struct rxe_mc_grp {
};
struct rxe_mc_elem {
- struct rxe_pool_entry pelem;
+ struct rxe_pool_elem elem;
struct list_head qp_list;
struct list_head grp_list;
struct rxe_qp *qp;
@@ -392,8 +392,6 @@ struct rxe_dev {
struct net_device *ndev;
- int xmit_errors;
-
struct rxe_pool uc_pool;
struct rxe_pool pd_pool;
struct rxe_pool ah_pool;
@@ -484,6 +482,6 @@ static inline struct rxe_pd *rxe_mw_pd(struct rxe_mw *mw)
int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name);
-void rxe_mc_cleanup(struct rxe_pool_entry *arg);
+void rxe_mc_cleanup(struct rxe_pool_elem *elem);
#endif /* RXE_VERBS_H */
diff --git a/drivers/infiniband/sw/siw/siw.h b/drivers/infiniband/sw/siw/siw.h
index 368959ae9a8c..df03d84c6868 100644
--- a/drivers/infiniband/sw/siw/siw.h
+++ b/drivers/infiniband/sw/siw/siw.h
@@ -644,14 +644,9 @@ static inline struct siw_sqe *orq_get_current(struct siw_qp *qp)
return &qp->orq[qp->orq_get % qp->attrs.orq_size];
}
-static inline struct siw_sqe *orq_get_tail(struct siw_qp *qp)
-{
- return &qp->orq[qp->orq_put % qp->attrs.orq_size];
-}
-
static inline struct siw_sqe *orq_get_free(struct siw_qp *qp)
{
- struct siw_sqe *orq_e = orq_get_tail(qp);
+ struct siw_sqe *orq_e = &qp->orq[qp->orq_put % qp->attrs.orq_size];
if (READ_ONCE(orq_e->flags) == 0)
return orq_e;
diff --git a/drivers/infiniband/sw/siw/siw_main.c b/drivers/infiniband/sw/siw/siw_main.c
index 9093e6a80b26..e5c586913d0b 100644
--- a/drivers/infiniband/sw/siw/siw_main.c
+++ b/drivers/infiniband/sw/siw/siw_main.c
@@ -98,15 +98,14 @@ static int siw_create_tx_threads(void)
continue;
siw_tx_thread[cpu] =
- kthread_create(siw_run_sq, (unsigned long *)(long)cpu,
- "siw_tx/%d", cpu);
+ kthread_run_on_cpu(siw_run_sq,
+ (unsigned long *)(long)cpu,
+ cpu, "siw_tx/%u");
if (IS_ERR(siw_tx_thread[cpu])) {
siw_tx_thread[cpu] = NULL;
continue;
}
- kthread_bind(siw_tx_thread[cpu], cpu);
- wake_up_process(siw_tx_thread[cpu]);
assigned++;
}
return assigned;
diff --git a/drivers/infiniband/sw/siw/siw_qp_rx.c b/drivers/infiniband/sw/siw/siw_qp_rx.c
index 60116f20653c..875ea6f1b04a 100644
--- a/drivers/infiniband/sw/siw/siw_qp_rx.c
+++ b/drivers/infiniband/sw/siw/siw_qp_rx.c
@@ -1153,11 +1153,12 @@ static int siw_check_tx_fence(struct siw_qp *qp)
spin_lock_irqsave(&qp->orq_lock, flags);
- rreq = orq_get_current(qp);
-
/* free current orq entry */
+ rreq = orq_get_current(qp);
WRITE_ONCE(rreq->flags, 0);
+ qp->orq_get++;
+
if (qp->tx_ctx.orq_fence) {
if (unlikely(tx_waiting->wr_status != SIW_WR_QUEUED)) {
pr_warn("siw: [QP %u]: fence resume: bad status %d\n",
@@ -1165,10 +1166,12 @@ static int siw_check_tx_fence(struct siw_qp *qp)
rv = -EPROTO;
goto out;
}
- /* resume SQ processing */
+ /* resume SQ processing, if possible */
if (tx_waiting->sqe.opcode == SIW_OP_READ ||
tx_waiting->sqe.opcode == SIW_OP_READ_LOCAL_INV) {
- rreq = orq_get_tail(qp);
+
+ /* SQ processing was stopped because of a full ORQ */
+ rreq = orq_get_free(qp);
if (unlikely(!rreq)) {
pr_warn("siw: [QP %u]: no ORQE\n", qp_id(qp));
rv = -EPROTO;
@@ -1181,15 +1184,14 @@ static int siw_check_tx_fence(struct siw_qp *qp)
resume_tx = 1;
} else if (siw_orq_empty(qp)) {
+ /*
+ * SQ processing was stopped by fenced work request.
+ * Resume since all previous Read's are now completed.
+ */
qp->tx_ctx.orq_fence = 0;
resume_tx = 1;
- } else {
- pr_warn("siw: [QP %u]: fence resume: orq idx: %d:%d\n",
- qp_id(qp), qp->orq_get, qp->orq_put);
- rv = -EPROTO;
}
}
- qp->orq_get++;
out:
spin_unlock_irqrestore(&qp->orq_lock, flags);
diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c
index 1b36350601fa..54ef367b074a 100644
--- a/drivers/infiniband/sw/siw/siw_verbs.c
+++ b/drivers/infiniband/sw/siw/siw_verbs.c
@@ -8,6 +8,7 @@
#include <linux/uaccess.h>
#include <linux/vmalloc.h>
#include <linux/xarray.h>
+#include <net/addrconf.h>
#include <rdma/iw_cm.h>
#include <rdma/ib_verbs.h>
@@ -155,7 +156,8 @@ int siw_query_device(struct ib_device *base_dev, struct ib_device_attr *attr,
attr->vendor_id = SIW_VENDOR_ID;
attr->vendor_part_id = sdev->vendor_part_id;
- memcpy(&attr->sys_image_guid, sdev->netdev->dev_addr, 6);
+ addrconf_addr_eui48((u8 *)&attr->sys_image_guid,
+ sdev->netdev->dev_addr);
return 0;
}
@@ -311,7 +313,8 @@ int siw_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attrs,
if (atomic_inc_return(&sdev->num_qp) > SIW_MAX_QP) {
siw_dbg(base_dev, "too many QP's\n");
- return -ENOMEM;
+ rv = -ENOMEM;
+ goto err_atomic;
}
if (attrs->qp_type != IB_QPT_RC) {
siw_dbg(base_dev, "only RC QP's supported\n");
@@ -660,7 +663,7 @@ static int siw_copy_inline_sgl(const struct ib_send_wr *core_wr,
kbuf += core_sge->length;
core_sge++;
}
- sqe->sge[0].length = bytes > 0 ? bytes : 0;
+ sqe->sge[0].length = max(bytes, 0);
sqe->num_sge = bytes > 0 ? 1 : 0;
return bytes;
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 776e46ee95da..07e47021a71f 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -113,10 +113,6 @@ bool iser_pi_enable = false;
module_param_named(pi_enable, iser_pi_enable, bool, S_IRUGO);
MODULE_PARM_DESC(pi_enable, "Enable T10-PI offload support (default:disabled)");
-int iser_pi_guard;
-module_param_named(pi_guard, iser_pi_guard, int, S_IRUGO);
-MODULE_PARM_DESC(pi_guard, "T10-PI guard_type [deprecated]");
-
static int iscsi_iser_set(const char *val, const struct kernel_param *kp)
{
int ret;
@@ -139,9 +135,8 @@ static int iscsi_iser_set(const char *val, const struct kernel_param *kp)
* Notes: In case of data length errors or iscsi PDU completion failures
* this routine will signal iscsi layer of connection failure.
*/
-void
-iscsi_iser_recv(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
- char *rx_data, int rx_data_len)
+void iscsi_iser_recv(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
+ char *rx_data, int rx_data_len)
{
int rc = 0;
int datalen;
@@ -176,8 +171,7 @@ error:
* Netes: This routine can't fail, just assign iscsi task
* hdr and max hdr size.
*/
-static int
-iscsi_iser_pdu_alloc(struct iscsi_task *task, uint8_t opcode)
+static int iscsi_iser_pdu_alloc(struct iscsi_task *task, uint8_t opcode)
{
struct iscsi_iser_task *iser_task = task->dd_data;
@@ -198,9 +192,8 @@ iscsi_iser_pdu_alloc(struct iscsi_task *task, uint8_t opcode)
* state mutex to avoid dereferencing the IB device which
* may have already been terminated.
*/
-int
-iser_initialize_task_headers(struct iscsi_task *task,
- struct iser_tx_desc *tx_desc)
+int iser_initialize_task_headers(struct iscsi_task *task,
+ struct iser_tx_desc *tx_desc)
{
struct iser_conn *iser_conn = task->conn->dd_data;
struct iser_device *device = iser_conn->ib_conn.device;
@@ -237,8 +230,7 @@ iser_initialize_task_headers(struct iscsi_task *task,
* Return: Returns zero on success or -ENOMEM when failing
* to init task headers (dma mapping error).
*/
-static int
-iscsi_iser_task_init(struct iscsi_task *task)
+static int iscsi_iser_task_init(struct iscsi_task *task)
{
struct iscsi_iser_task *iser_task = task->dd_data;
int ret;
@@ -272,8 +264,8 @@ iscsi_iser_task_init(struct iscsi_task *task)
* xmit.
*
**/
-static int
-iscsi_iser_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task)
+static int iscsi_iser_mtask_xmit(struct iscsi_conn *conn,
+ struct iscsi_task *task)
{
int error = 0;
@@ -290,9 +282,8 @@ iscsi_iser_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task)
return error;
}
-static int
-iscsi_iser_task_xmit_unsol_data(struct iscsi_conn *conn,
- struct iscsi_task *task)
+static int iscsi_iser_task_xmit_unsol_data(struct iscsi_conn *conn,
+ struct iscsi_task *task)
{
struct iscsi_r2t_info *r2t = &task->unsol_r2t;
struct iscsi_data hdr;
@@ -326,8 +317,7 @@ iscsi_iser_task_xmit_unsol_data_exit:
*
* Return: zero on success or escalates $error on failure.
*/
-static int
-iscsi_iser_task_xmit(struct iscsi_task *task)
+static int iscsi_iser_task_xmit(struct iscsi_task *task)
{
struct iscsi_conn *conn = task->conn;
struct iscsi_iser_task *iser_task = task->dd_data;
@@ -410,8 +400,7 @@ static void iscsi_iser_cleanup_task(struct iscsi_task *task)
*
* In addition the error sector is marked.
*/
-static u8
-iscsi_iser_check_protection(struct iscsi_task *task, sector_t *sector)
+static u8 iscsi_iser_check_protection(struct iscsi_task *task, sector_t *sector)
{
struct iscsi_iser_task *iser_task = task->dd_data;
enum iser_data_dir dir = iser_task->dir[ISER_DIR_IN] ?
@@ -460,11 +449,9 @@ iscsi_iser_conn_create(struct iscsi_cls_session *cls_session,
* -EINVAL in case end-point doesn't exsits anymore or iser connection
* state is not UP (teardown already started).
*/
-static int
-iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
- struct iscsi_cls_conn *cls_conn,
- uint64_t transport_eph,
- int is_leading)
+static int iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
+ struct iscsi_cls_conn *cls_conn,
+ uint64_t transport_eph, int is_leading)
{
struct iscsi_conn *conn = cls_conn->dd_data;
struct iser_conn *iser_conn;
@@ -519,8 +506,7 @@ out:
* from this point iscsi must call conn_stop in session/connection
* teardown so iser transport must wait for it.
*/
-static int
-iscsi_iser_conn_start(struct iscsi_cls_conn *cls_conn)
+static int iscsi_iser_conn_start(struct iscsi_cls_conn *cls_conn)
{
struct iscsi_conn *iscsi_conn;
struct iser_conn *iser_conn;
@@ -542,8 +528,7 @@ iscsi_iser_conn_start(struct iscsi_cls_conn *cls_conn)
* handle, so we call it under iser the state lock to protect against
* this kind of race.
*/
-static void
-iscsi_iser_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
+static void iscsi_iser_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
{
struct iscsi_conn *conn = cls_conn->dd_data;
struct iser_conn *iser_conn = conn->dd_data;
@@ -578,8 +563,7 @@ iscsi_iser_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
*
* Removes and free iscsi host.
*/
-static void
-iscsi_iser_session_destroy(struct iscsi_cls_session *cls_session)
+static void iscsi_iser_session_destroy(struct iscsi_cls_session *cls_session)
{
struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
@@ -588,8 +572,7 @@ iscsi_iser_session_destroy(struct iscsi_cls_session *cls_session)
iscsi_host_free(shost);
}
-static inline unsigned int
-iser_dif_prot_caps(int prot_caps)
+static inline unsigned int iser_dif_prot_caps(int prot_caps)
{
int ret = 0;
@@ -708,9 +691,8 @@ free_host:
return NULL;
}
-static int
-iscsi_iser_set_param(struct iscsi_cls_conn *cls_conn,
- enum iscsi_param param, char *buf, int buflen)
+static int iscsi_iser_set_param(struct iscsi_cls_conn *cls_conn,
+ enum iscsi_param param, char *buf, int buflen)
{
int value;
@@ -760,8 +742,8 @@ iscsi_iser_set_param(struct iscsi_cls_conn *cls_conn,
*
* Output connection statistics.
*/
-static void
-iscsi_iser_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *stats)
+static void iscsi_iser_conn_get_stats(struct iscsi_cls_conn *cls_conn,
+ struct iscsi_stats *stats)
{
struct iscsi_conn *conn = cls_conn->dd_data;
@@ -812,9 +794,9 @@ static int iscsi_iser_get_ep_param(struct iscsi_endpoint *ep,
* Return: iscsi_endpoint created by iscsi layer or ERR_PTR(error)
* if fails.
*/
-static struct iscsi_endpoint *
-iscsi_iser_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
- int non_blocking)
+static struct iscsi_endpoint *iscsi_iser_ep_connect(struct Scsi_Host *shost,
+ struct sockaddr *dst_addr,
+ int non_blocking)
{
int err;
struct iser_conn *iser_conn;
@@ -857,8 +839,7 @@ failure:
* or more likely iser connection state transitioned to TEMINATING or
* DOWN during the wait period.
*/
-static int
-iscsi_iser_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
+static int iscsi_iser_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
{
struct iser_conn *iser_conn = ep->dd_data;
int rc;
@@ -893,8 +874,7 @@ iscsi_iser_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
* and cleanup or actually call it immediately in case we didn't pass
* iscsi conn bind/start stage, thus it is safe.
*/
-static void
-iscsi_iser_ep_disconnect(struct iscsi_endpoint *ep)
+static void iscsi_iser_ep_disconnect(struct iscsi_endpoint *ep)
{
struct iser_conn *iser_conn = ep->dd_data;
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 9f6ac0a09a78..20af46c4e954 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -119,8 +119,6 @@
#define ISER_QP_MAX_RECV_DTOS (ISER_DEF_XMIT_CMDS_MAX)
-#define ISER_MIN_POSTED_RX (ISER_DEF_XMIT_CMDS_MAX >> 2)
-
/* the max TX (send) WR supported by the iSER QP is defined by *
* max_send_wr = T * (1 + D) + C ; D is how many inflight dataouts we expect *
* to have at max for SCSI command. The tx posting & completion handling code *
@@ -148,8 +146,6 @@
- ISER_MAX_RX_MISC_PDUS) / \
(1 + ISER_INFLIGHT_DATAOUTS))
-#define ISER_SIGNAL_CMD_COUNT 32
-
/* Constant PDU lengths calculations */
#define ISER_HEADERS_LEN (sizeof(struct iser_ctrl) + sizeof(struct iscsi_hdr))
@@ -366,9 +362,6 @@ struct iser_fr_pool {
* @qp: Connection Queue-pair
* @cq: Connection completion queue
* @cq_size: The number of max outstanding completions
- * @post_recv_buf_count: post receive counter
- * @sig_count: send work request signal count
- * @rx_wr: receive work request for batch posts
* @device: reference to iser device
* @fr_pool: connection fast registration poool
* @pi_support: Indicate device T10-PI support
@@ -379,9 +372,6 @@ struct ib_conn {
struct ib_qp *qp;
struct ib_cq *cq;
u32 cq_size;
- int post_recv_buf_count;
- u8 sig_count;
- struct ib_recv_wr rx_wr[ISER_MIN_POSTED_RX];
struct iser_device *device;
struct iser_fr_pool fr_pool;
bool pi_support;
@@ -397,8 +387,6 @@ struct ib_conn {
* @state: connection logical state
* @qp_max_recv_dtos: maximum number of data outs, corresponds
* to max number of post recvs
- * @qp_max_recv_dtos_mask: (qp_max_recv_dtos - 1)
- * @min_posted_rx: (qp_max_recv_dtos >> 2)
* @max_cmds: maximum cmds allowed for this connection
* @name: connection peer portal
* @release_work: deffered work for release job
@@ -409,7 +397,6 @@ struct ib_conn {
* (state is ISER_CONN_UP)
* @conn_list: entry in ig conn list
* @login_desc: login descriptor
- * @rx_desc_head: head of rx_descs cyclic buffer
* @rx_descs: rx buffers array (cyclic buffer)
* @num_rx_descs: number of rx descriptors
* @scsi_sg_tablesize: scsi host sg_tablesize
@@ -422,8 +409,6 @@ struct iser_conn {
struct iscsi_endpoint *ep;
enum iser_conn_state state;
unsigned qp_max_recv_dtos;
- unsigned qp_max_recv_dtos_mask;
- unsigned min_posted_rx;
u16 max_cmds;
char name[ISER_OBJECT_NAME_SIZE];
struct work_struct release_work;
@@ -433,7 +418,6 @@ struct iser_conn {
struct completion up_completion;
struct list_head conn_list;
struct iser_login_desc login_desc;
- unsigned int rx_desc_head;
struct iser_rx_desc *rx_descs;
u32 num_rx_descs;
unsigned short scsi_sg_tablesize;
@@ -486,7 +470,6 @@ struct iser_global {
extern struct iser_global ig;
extern int iser_debug_level;
extern bool iser_pi_enable;
-extern int iser_pi_guard;
extern unsigned int iser_max_sectors;
extern bool iser_always_reg;
@@ -543,9 +526,9 @@ int iser_connect(struct iser_conn *iser_conn,
int non_blocking);
int iser_post_recvl(struct iser_conn *iser_conn);
-int iser_post_recvm(struct iser_conn *iser_conn, int count);
-int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc,
- bool signal);
+int iser_post_recvm(struct iser_conn *iser_conn,
+ struct iser_rx_desc *rx_desc);
+int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc);
int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
struct iser_data_buf *data,
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index 27a6f75a9912..2490150d3085 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -95,11 +95,8 @@ static int iser_prepare_read_cmd(struct iscsi_task *task)
* task->data[ISER_DIR_OUT].data_len, Protection size
* is stored at task->prot[ISER_DIR_OUT].data_len
*/
-static int
-iser_prepare_write_cmd(struct iscsi_task *task,
- unsigned int imm_sz,
- unsigned int unsol_sz,
- unsigned int edtl)
+static int iser_prepare_write_cmd(struct iscsi_task *task, unsigned int imm_sz,
+ unsigned int unsol_sz, unsigned int edtl)
{
struct iscsi_iser_task *iser_task = task->dd_data;
struct iser_mem_reg *mem_reg;
@@ -160,8 +157,8 @@ iser_prepare_write_cmd(struct iscsi_task *task,
}
/* creates a new tx descriptor and adds header regd buffer */
-static void iser_create_send_desc(struct iser_conn *iser_conn,
- struct iser_tx_desc *tx_desc)
+static void iser_create_send_desc(struct iser_conn *iser_conn,
+ struct iser_tx_desc *tx_desc)
{
struct iser_device *device = iser_conn->ib_conn.device;
@@ -247,8 +244,6 @@ int iser_alloc_rx_descriptors(struct iser_conn *iser_conn,
struct iser_device *device = ib_conn->device;
iser_conn->qp_max_recv_dtos = session->cmds_max;
- iser_conn->qp_max_recv_dtos_mask = session->cmds_max - 1; /* cmds_max is 2^N */
- iser_conn->min_posted_rx = iser_conn->qp_max_recv_dtos >> 2;
if (iser_alloc_fastreg_pool(ib_conn, session->scsi_cmds_max,
iser_conn->pages_per_mr))
@@ -280,7 +275,6 @@ int iser_alloc_rx_descriptors(struct iser_conn *iser_conn,
rx_sg->lkey = device->pd->local_dma_lkey;
}
- iser_conn->rx_desc_head = 0;
return 0;
rx_desc_dma_map_failed:
@@ -322,37 +316,35 @@ void iser_free_rx_descriptors(struct iser_conn *iser_conn)
static int iser_post_rx_bufs(struct iscsi_conn *conn, struct iscsi_hdr *req)
{
struct iser_conn *iser_conn = conn->dd_data;
- struct ib_conn *ib_conn = &iser_conn->ib_conn;
struct iscsi_session *session = conn->session;
+ int err = 0;
+ int i;
iser_dbg("req op %x flags %x\n", req->opcode, req->flags);
/* check if this is the last login - going to full feature phase */
if ((req->flags & ISCSI_FULL_FEATURE_PHASE) != ISCSI_FULL_FEATURE_PHASE)
- return 0;
-
- /*
- * Check that there is one posted recv buffer
- * (for the last login response).
- */
- WARN_ON(ib_conn->post_recv_buf_count != 1);
+ goto out;
if (session->discovery_sess) {
iser_info("Discovery session, re-using login RX buffer\n");
- return 0;
- } else
- iser_info("Normal session, posting batch of RX %d buffers\n",
- iser_conn->min_posted_rx);
-
- /* Initial post receive buffers */
- if (iser_post_recvm(iser_conn, iser_conn->min_posted_rx))
- return -ENOMEM;
+ goto out;
+ }
- return 0;
-}
+ iser_info("Normal session, posting batch of RX %d buffers\n",
+ iser_conn->qp_max_recv_dtos - 1);
-static inline bool iser_signal_comp(u8 sig_count)
-{
- return ((sig_count % ISER_SIGNAL_CMD_COUNT) == 0);
+ /*
+ * Initial post receive buffers.
+ * There is one already posted recv buffer (for the last login
+ * response). Therefore, the first recv buffer is skipped here.
+ */
+ for (i = 1; i < iser_conn->qp_max_recv_dtos; i++) {
+ err = iser_post_recvm(iser_conn, &iser_conn->rx_descs[i]);
+ if (err)
+ goto out;
+ }
+out:
+ return err;
}
/**
@@ -360,8 +352,7 @@ static inline bool iser_signal_comp(u8 sig_count)
* @conn: link to matching iscsi connection
* @task: SCSI command task
*/
-int iser_send_command(struct iscsi_conn *conn,
- struct iscsi_task *task)
+int iser_send_command(struct iscsi_conn *conn, struct iscsi_task *task)
{
struct iser_conn *iser_conn = conn->dd_data;
struct iscsi_iser_task *iser_task = task->dd_data;
@@ -371,7 +362,6 @@ int iser_send_command(struct iscsi_conn *conn,
struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr;
struct scsi_cmnd *sc = task->sc;
struct iser_tx_desc *tx_desc = &iser_task->desc;
- u8 sig_count = ++iser_conn->ib_conn.sig_count;
edtl = ntohl(hdr->data_length);
@@ -418,8 +408,7 @@ int iser_send_command(struct iscsi_conn *conn,
iser_task->status = ISER_TASK_STATUS_STARTED;
- err = iser_post_send(&iser_conn->ib_conn, tx_desc,
- iser_signal_comp(sig_count));
+ err = iser_post_send(&iser_conn->ib_conn, tx_desc);
if (!err)
return 0;
@@ -434,8 +423,7 @@ send_command_error:
* @task: SCSI command task
* @hdr: pointer to the LLD's iSCSI message header
*/
-int iser_send_data_out(struct iscsi_conn *conn,
- struct iscsi_task *task,
+int iser_send_data_out(struct iscsi_conn *conn, struct iscsi_task *task,
struct iscsi_data *hdr)
{
struct iser_conn *iser_conn = conn->dd_data;
@@ -487,7 +475,7 @@ int iser_send_data_out(struct iscsi_conn *conn,
itt, buf_offset, data_seg_len);
- err = iser_post_send(&iser_conn->ib_conn, tx_desc, true);
+ err = iser_post_send(&iser_conn->ib_conn, tx_desc);
if (!err)
return 0;
@@ -497,8 +485,7 @@ send_data_out_error:
return err;
}
-int iser_send_control(struct iscsi_conn *conn,
- struct iscsi_task *task)
+int iser_send_control(struct iscsi_conn *conn, struct iscsi_task *task)
{
struct iser_conn *iser_conn = conn->dd_data;
struct iscsi_iser_task *iser_task = task->dd_data;
@@ -550,7 +537,7 @@ int iser_send_control(struct iscsi_conn *conn,
goto send_control_error;
}
- err = iser_post_send(&iser_conn->ib_conn, mdesc, true);
+ err = iser_post_send(&iser_conn->ib_conn, mdesc);
if (!err)
return 0;
@@ -590,11 +577,14 @@ void iser_login_rsp(struct ib_cq *cq, struct ib_wc *wc)
desc->rsp_dma, ISER_RX_LOGIN_SIZE,
DMA_FROM_DEVICE);
- ib_conn->post_recv_buf_count--;
+ if (iser_conn->iscsi_conn->session->discovery_sess)
+ return;
+
+ /* Post the first RX buffer that is skipped in iser_post_rx_bufs() */
+ iser_post_recvm(iser_conn, iser_conn->rx_descs);
}
-static inline int
-iser_inv_desc(struct iser_fr_desc *desc, u32 rkey)
+static inline int iser_inv_desc(struct iser_fr_desc *desc, u32 rkey)
{
if (unlikely((!desc->sig_protected && rkey != desc->rsc.mr->rkey) ||
(desc->sig_protected && rkey != desc->rsc.sig_mr->rkey))) {
@@ -607,10 +597,8 @@ iser_inv_desc(struct iser_fr_desc *desc, u32 rkey)
return 0;
}
-static int
-iser_check_remote_inv(struct iser_conn *iser_conn,
- struct ib_wc *wc,
- struct iscsi_hdr *hdr)
+static int iser_check_remote_inv(struct iser_conn *iser_conn, struct ib_wc *wc,
+ struct iscsi_hdr *hdr)
{
if (wc->wc_flags & IB_WC_WITH_INVALIDATE) {
struct iscsi_task *task;
@@ -657,8 +645,7 @@ void iser_task_rsp(struct ib_cq *cq, struct ib_wc *wc)
struct iser_conn *iser_conn = to_iser_conn(ib_conn);
struct iser_rx_desc *desc = iser_rx(wc->wr_cqe);
struct iscsi_hdr *hdr;
- int length;
- int outstanding, count, err;
+ int length, err;
if (unlikely(wc->status != IB_WC_SUCCESS)) {
iser_err_comp(wc, "task_rsp");
@@ -687,20 +674,9 @@ void iser_task_rsp(struct ib_cq *cq, struct ib_wc *wc)
desc->dma_addr, ISER_RX_PAYLOAD_SIZE,
DMA_FROM_DEVICE);
- /* decrementing conn->post_recv_buf_count only --after-- freeing the *
- * task eliminates the need to worry on tasks which are completed in *
- * parallel to the execution of iser_conn_term. So the code that waits *
- * for the posted rx bufs refcount to become zero handles everything */
- ib_conn->post_recv_buf_count--;
-
- outstanding = ib_conn->post_recv_buf_count;
- if (outstanding + iser_conn->min_posted_rx <= iser_conn->qp_max_recv_dtos) {
- count = min(iser_conn->qp_max_recv_dtos - outstanding,
- iser_conn->min_posted_rx);
- err = iser_post_recvm(iser_conn, count);
- if (err)
- iser_err("posting %d rx bufs err %d\n", count, err);
- }
+ err = iser_post_recvm(iser_conn, desc);
+ if (err)
+ iser_err("posting rx buffer err %d\n", err);
}
void iser_cmd_comp(struct ib_cq *cq, struct ib_wc *wc)
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index 9776b755d848..660982625488 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -44,8 +44,7 @@ void iser_reg_comp(struct ib_cq *cq, struct ib_wc *wc)
iser_err_comp(wc, "memreg");
}
-static struct iser_fr_desc *
-iser_reg_desc_get_fr(struct ib_conn *ib_conn)
+static struct iser_fr_desc *iser_reg_desc_get_fr(struct ib_conn *ib_conn)
{
struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
struct iser_fr_desc *desc;
@@ -60,9 +59,8 @@ iser_reg_desc_get_fr(struct ib_conn *ib_conn)
return desc;
}
-static void
-iser_reg_desc_put_fr(struct ib_conn *ib_conn,
- struct iser_fr_desc *desc)
+static void iser_reg_desc_put_fr(struct ib_conn *ib_conn,
+ struct iser_fr_desc *desc)
{
struct iser_fr_pool *fr_pool = &ib_conn->fr_pool;
unsigned long flags;
@@ -73,9 +71,9 @@ iser_reg_desc_put_fr(struct ib_conn *ib_conn,
}
int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
- struct iser_data_buf *data,
- enum iser_data_dir iser_dir,
- enum dma_data_direction dma_dir)
+ struct iser_data_buf *data,
+ enum iser_data_dir iser_dir,
+ enum dma_data_direction dma_dir)
{
struct ib_device *dev;
@@ -100,9 +98,8 @@ void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task,
ib_dma_unmap_sg(dev, data->sg, data->size, dir);
}
-static int
-iser_reg_dma(struct iser_device *device, struct iser_data_buf *mem,
- struct iser_mem_reg *reg)
+static int iser_reg_dma(struct iser_device *device, struct iser_data_buf *mem,
+ struct iser_mem_reg *reg)
{
struct scatterlist *sg = mem->sg;
@@ -154,8 +151,8 @@ void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task,
reg->mem_h = NULL;
}
-static void
-iser_set_dif_domain(struct scsi_cmnd *sc, struct ib_sig_domain *domain)
+static void iser_set_dif_domain(struct scsi_cmnd *sc,
+ struct ib_sig_domain *domain)
{
domain->sig_type = IB_SIG_TYPE_T10_DIF;
domain->sig.dif.pi_interval = scsi_prot_interval(sc);
@@ -171,8 +168,8 @@ iser_set_dif_domain(struct scsi_cmnd *sc, struct ib_sig_domain *domain)
domain->sig.dif.ref_remap = true;
}
-static int
-iser_set_sig_attrs(struct scsi_cmnd *sc, struct ib_sig_attrs *sig_attrs)
+static int iser_set_sig_attrs(struct scsi_cmnd *sc,
+ struct ib_sig_attrs *sig_attrs)
{
switch (scsi_get_prot_op(sc)) {
case SCSI_PROT_WRITE_INSERT:
@@ -205,8 +202,7 @@ iser_set_sig_attrs(struct scsi_cmnd *sc, struct ib_sig_attrs *sig_attrs)
return 0;
}
-static inline void
-iser_set_prot_checks(struct scsi_cmnd *sc, u8 *mask)
+static inline void iser_set_prot_checks(struct scsi_cmnd *sc, u8 *mask)
{
*mask = 0;
if (sc->prot_flags & SCSI_PROT_REF_CHECK)
@@ -215,11 +211,8 @@ iser_set_prot_checks(struct scsi_cmnd *sc, u8 *mask)
*mask |= IB_SIG_CHECK_GUARD;
}
-static inline void
-iser_inv_rkey(struct ib_send_wr *inv_wr,
- struct ib_mr *mr,
- struct ib_cqe *cqe,
- struct ib_send_wr *next_wr)
+static inline void iser_inv_rkey(struct ib_send_wr *inv_wr, struct ib_mr *mr,
+ struct ib_cqe *cqe, struct ib_send_wr *next_wr)
{
inv_wr->opcode = IB_WR_LOCAL_INV;
inv_wr->wr_cqe = cqe;
@@ -229,12 +222,11 @@ iser_inv_rkey(struct ib_send_wr *inv_wr,
inv_wr->next = next_wr;
}
-static int
-iser_reg_sig_mr(struct iscsi_iser_task *iser_task,
- struct iser_data_buf *mem,
- struct iser_data_buf *sig_mem,
- struct iser_reg_resources *rsc,
- struct iser_mem_reg *sig_reg)
+static int iser_reg_sig_mr(struct iscsi_iser_task *iser_task,
+ struct iser_data_buf *mem,
+ struct iser_data_buf *sig_mem,
+ struct iser_reg_resources *rsc,
+ struct iser_mem_reg *sig_reg)
{
struct iser_tx_desc *tx_desc = &iser_task->desc;
struct ib_cqe *cqe = &iser_task->iser_conn->ib_conn.reg_cqe;
@@ -335,12 +327,10 @@ static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
return 0;
}
-static int
-iser_reg_data_sg(struct iscsi_iser_task *task,
- struct iser_data_buf *mem,
- struct iser_fr_desc *desc,
- bool use_dma_key,
- struct iser_mem_reg *reg)
+static int iser_reg_data_sg(struct iscsi_iser_task *task,
+ struct iser_data_buf *mem,
+ struct iser_fr_desc *desc, bool use_dma_key,
+ struct iser_mem_reg *reg)
{
struct iser_device *device = task->iser_conn->ib_conn.device;
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index b566f7cb7797..8bf87b073d9b 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -265,14 +265,14 @@ static int iser_create_ib_conn_res(struct ib_conn *ib_conn)
memset(&init_attr, 0, sizeof(init_attr));
init_attr.event_handler = iser_qp_event_callback;
- init_attr.qp_context = (void *)ib_conn;
- init_attr.send_cq = ib_conn->cq;
- init_attr.recv_cq = ib_conn->cq;
- init_attr.cap.max_recv_wr = ISER_QP_MAX_RECV_DTOS;
+ init_attr.qp_context = (void *)ib_conn;
+ init_attr.send_cq = ib_conn->cq;
+ init_attr.recv_cq = ib_conn->cq;
+ init_attr.cap.max_recv_wr = ISER_QP_MAX_RECV_DTOS;
init_attr.cap.max_send_sge = 2;
init_attr.cap.max_recv_sge = 1;
- init_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
- init_attr.qp_type = IB_QPT_RC;
+ init_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
+ init_attr.qp_type = IB_QPT_RC;
init_attr.cap.max_send_wr = max_send_wr;
if (ib_conn->pi_support)
init_attr.create_flags |= IB_QP_CREATE_INTEGRITY_EN;
@@ -283,9 +283,8 @@ static int iser_create_ib_conn_res(struct ib_conn *ib_conn)
goto out_err;
ib_conn->qp = ib_conn->cma_id->qp;
- iser_info("setting conn %p cma_id %p qp %p max_send_wr %d\n",
- ib_conn, ib_conn->cma_id,
- ib_conn->cma_id->qp, max_send_wr);
+ iser_info("setting conn %p cma_id %p qp %p max_send_wr %d\n", ib_conn,
+ ib_conn->cma_id, ib_conn->cma_id->qp, max_send_wr);
return ret;
out_err:
@@ -313,7 +312,7 @@ struct iser_device *iser_device_find_by_ib_device(struct rdma_cm_id *cma_id)
goto inc_refcnt;
device = kzalloc(sizeof *device, GFP_KERNEL);
- if (device == NULL)
+ if (!device)
goto out;
/* assign this device to the device */
@@ -392,8 +391,7 @@ void iser_release_work(struct work_struct *work)
* so the cm_id removal is out of here. It is Safe to
* be invoked multiple times.
*/
-static void iser_free_ib_conn_res(struct iser_conn *iser_conn,
- bool destroy)
+static void iser_free_ib_conn_res(struct iser_conn *iser_conn, bool destroy)
{
struct ib_conn *ib_conn = &iser_conn->ib_conn;
struct iser_device *device = ib_conn->device;
@@ -401,7 +399,7 @@ static void iser_free_ib_conn_res(struct iser_conn *iser_conn,
iser_info("freeing conn %p cma_id %p qp %p\n",
iser_conn, ib_conn->cma_id, ib_conn->qp);
- if (ib_conn->qp != NULL) {
+ if (ib_conn->qp) {
rdma_destroy_qp(ib_conn->cma_id);
ib_cq_pool_put(ib_conn->cq, ib_conn->cq_size);
ib_conn->qp = NULL;
@@ -411,7 +409,7 @@ static void iser_free_ib_conn_res(struct iser_conn *iser_conn,
if (iser_conn->rx_descs)
iser_free_rx_descriptors(iser_conn);
- if (device != NULL) {
+ if (device) {
iser_device_try_release(device);
ib_conn->device = NULL;
}
@@ -445,7 +443,7 @@ void iser_conn_release(struct iser_conn *iser_conn)
iser_free_ib_conn_res(iser_conn, true);
mutex_unlock(&iser_conn->state_mutex);
- if (ib_conn->cma_id != NULL) {
+ if (ib_conn->cma_id) {
rdma_destroy_id(ib_conn->cma_id);
ib_conn->cma_id = NULL;
}
@@ -501,13 +499,12 @@ static void iser_connect_error(struct rdma_cm_id *cma_id)
{
struct iser_conn *iser_conn;
- iser_conn = (struct iser_conn *)cma_id->context;
+ iser_conn = cma_id->context;
iser_conn->state = ISER_CONN_TERMINATING;
}
-static void
-iser_calc_scsi_params(struct iser_conn *iser_conn,
- unsigned int max_sectors)
+static void iser_calc_scsi_params(struct iser_conn *iser_conn,
+ unsigned int max_sectors)
{
struct iser_device *device = iser_conn->ib_conn.device;
struct ib_device_attr *attr = &device->ib_device->attrs;
@@ -545,11 +542,11 @@ iser_calc_scsi_params(struct iser_conn *iser_conn,
static void iser_addr_handler(struct rdma_cm_id *cma_id)
{
struct iser_device *device;
- struct iser_conn *iser_conn;
- struct ib_conn *ib_conn;
+ struct iser_conn *iser_conn;
+ struct ib_conn *ib_conn;
int ret;
- iser_conn = (struct iser_conn *)cma_id->context;
+ iser_conn = cma_id->context;
if (iser_conn->state != ISER_CONN_PENDING)
/* bailout */
return;
@@ -593,9 +590,9 @@ static void iser_addr_handler(struct rdma_cm_id *cma_id)
static void iser_route_handler(struct rdma_cm_id *cma_id)
{
struct rdma_conn_param conn_param;
- int ret;
+ int ret;
struct iser_cm_hdr req_hdr;
- struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context;
+ struct iser_conn *iser_conn = cma_id->context;
struct ib_conn *ib_conn = &iser_conn->ib_conn;
struct ib_device *ib_dev = ib_conn->device->ib_device;
@@ -609,9 +606,9 @@ static void iser_route_handler(struct rdma_cm_id *cma_id)
memset(&conn_param, 0, sizeof conn_param);
conn_param.responder_resources = ib_dev->attrs.max_qp_rd_atom;
- conn_param.initiator_depth = 1;
- conn_param.retry_count = 7;
- conn_param.rnr_retry_count = 6;
+ conn_param.initiator_depth = 1;
+ conn_param.retry_count = 7;
+ conn_param.rnr_retry_count = 6;
memset(&req_hdr, 0, sizeof(req_hdr));
req_hdr.flags = ISER_ZBVA_NOT_SUP;
@@ -638,7 +635,7 @@ static void iser_connected_handler(struct rdma_cm_id *cma_id,
struct ib_qp_attr attr;
struct ib_qp_init_attr init_attr;
- iser_conn = (struct iser_conn *)cma_id->context;
+ iser_conn = cma_id->context;
if (iser_conn->state != ISER_CONN_PENDING)
/* bailout */
return;
@@ -661,7 +658,7 @@ static void iser_connected_handler(struct rdma_cm_id *cma_id,
static void iser_disconnected_handler(struct rdma_cm_id *cma_id)
{
- struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context;
+ struct iser_conn *iser_conn = cma_id->context;
if (iser_conn_terminate(iser_conn)) {
if (iser_conn->iscsi_conn)
@@ -675,7 +672,7 @@ static void iser_disconnected_handler(struct rdma_cm_id *cma_id)
static void iser_cleanup_handler(struct rdma_cm_id *cma_id,
bool destroy)
{
- struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context;
+ struct iser_conn *iser_conn = cma_id->context;
/*
* We are not guaranteed that we visited disconnected_handler
@@ -687,12 +684,13 @@ static void iser_cleanup_handler(struct rdma_cm_id *cma_id,
complete(&iser_conn->ib_completion);
}
-static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
+static int iser_cma_handler(struct rdma_cm_id *cma_id,
+ struct rdma_cm_event *event)
{
struct iser_conn *iser_conn;
int ret = 0;
- iser_conn = (struct iser_conn *)cma_id->context;
+ iser_conn = cma_id->context;
iser_info("%s (%d): status %d conn %p id %p\n",
rdma_event_msg(event->event), event->event,
event->status, cma_id->context, cma_id);
@@ -757,7 +755,6 @@ void iser_conn_init(struct iser_conn *iser_conn)
INIT_LIST_HEAD(&iser_conn->conn_list);
mutex_init(&iser_conn->state_mutex);
- ib_conn->post_recv_buf_count = 0;
ib_conn->reg_cqe.done = iser_reg_comp;
}
@@ -765,10 +762,8 @@ void iser_conn_init(struct iser_conn *iser_conn)
* starts the process of connecting to the target
* sleeps until the connection is established or rejected
*/
-int iser_connect(struct iser_conn *iser_conn,
- struct sockaddr *src_addr,
- struct sockaddr *dst_addr,
- int non_blocking)
+int iser_connect(struct iser_conn *iser_conn, struct sockaddr *src_addr,
+ struct sockaddr *dst_addr, int non_blocking)
{
struct ib_conn *ib_conn = &iser_conn->ib_conn;
int err = 0;
@@ -785,8 +780,7 @@ int iser_connect(struct iser_conn *iser_conn,
iser_conn->state = ISER_CONN_PENDING;
ib_conn->cma_id = rdma_create_id(&init_net, iser_cma_handler,
- (void *)iser_conn,
- RDMA_PS_TCP, IB_QPT_RC);
+ iser_conn, RDMA_PS_TCP, IB_QPT_RC);
if (IS_ERR(ib_conn->cma_id)) {
err = PTR_ERR(ib_conn->cma_id);
iser_err("rdma_create_id failed: %d\n", err);
@@ -829,7 +823,7 @@ int iser_post_recvl(struct iser_conn *iser_conn)
struct ib_conn *ib_conn = &iser_conn->ib_conn;
struct iser_login_desc *desc = &iser_conn->login_desc;
struct ib_recv_wr wr;
- int ib_ret;
+ int ret;
desc->sge.addr = desc->rsp_dma;
desc->sge.length = ISER_RX_LOGIN_SIZE;
@@ -841,46 +835,30 @@ int iser_post_recvl(struct iser_conn *iser_conn)
wr.num_sge = 1;
wr.next = NULL;
- ib_conn->post_recv_buf_count++;
- ib_ret = ib_post_recv(ib_conn->qp, &wr, NULL);
- if (ib_ret) {
- iser_err("ib_post_recv failed ret=%d\n", ib_ret);
- ib_conn->post_recv_buf_count--;
- }
+ ret = ib_post_recv(ib_conn->qp, &wr, NULL);
+ if (unlikely(ret))
+ iser_err("ib_post_recv login failed ret=%d\n", ret);
- return ib_ret;
+ return ret;
}
-int iser_post_recvm(struct iser_conn *iser_conn, int count)
+int iser_post_recvm(struct iser_conn *iser_conn, struct iser_rx_desc *rx_desc)
{
struct ib_conn *ib_conn = &iser_conn->ib_conn;
- unsigned int my_rx_head = iser_conn->rx_desc_head;
- struct iser_rx_desc *rx_desc;
- struct ib_recv_wr *wr;
- int i, ib_ret;
-
- for (wr = ib_conn->rx_wr, i = 0; i < count; i++, wr++) {
- rx_desc = &iser_conn->rx_descs[my_rx_head];
- rx_desc->cqe.done = iser_task_rsp;
- wr->wr_cqe = &rx_desc->cqe;
- wr->sg_list = &rx_desc->rx_sg;
- wr->num_sge = 1;
- wr->next = wr + 1;
- my_rx_head = (my_rx_head + 1) & iser_conn->qp_max_recv_dtos_mask;
- }
+ struct ib_recv_wr wr;
+ int ret;
- wr--;
- wr->next = NULL; /* mark end of work requests list */
+ rx_desc->cqe.done = iser_task_rsp;
+ wr.wr_cqe = &rx_desc->cqe;
+ wr.sg_list = &rx_desc->rx_sg;
+ wr.num_sge = 1;
+ wr.next = NULL;
- ib_conn->post_recv_buf_count += count;
- ib_ret = ib_post_recv(ib_conn->qp, ib_conn->rx_wr, NULL);
- if (unlikely(ib_ret)) {
- iser_err("ib_post_recv failed ret=%d\n", ib_ret);
- ib_conn->post_recv_buf_count -= count;
- } else
- iser_conn->rx_desc_head = my_rx_head;
+ ret = ib_post_recv(ib_conn->qp, &wr, NULL);
+ if (unlikely(ret))
+ iser_err("ib_post_recv failed ret=%d\n", ret);
- return ib_ret;
+ return ret;
}
@@ -888,16 +866,14 @@ int iser_post_recvm(struct iser_conn *iser_conn, int count)
* iser_post_send - Initiate a Send DTO operation
* @ib_conn: connection RDMA resources
* @tx_desc: iSER TX descriptor
- * @signal: true to send work request as SIGNALED
*
* Return: 0 on success, -1 on failure
*/
-int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc,
- bool signal)
+int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc)
{
struct ib_send_wr *wr = &tx_desc->send_wr;
struct ib_send_wr *first_wr;
- int ib_ret;
+ int ret;
ib_dma_sync_single_for_device(ib_conn->device->ib_device,
tx_desc->dma_addr, ISER_HEADERS_LEN,
@@ -908,7 +884,7 @@ int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc,
wr->sg_list = tx_desc->tx_sg;
wr->num_sge = tx_desc->num_sge;
wr->opcode = IB_WR_SEND;
- wr->send_flags = signal ? IB_SEND_SIGNALED : 0;
+ wr->send_flags = IB_SEND_SIGNALED;
if (tx_desc->inv_wr.next)
first_wr = &tx_desc->inv_wr;
@@ -917,12 +893,12 @@ int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc,
else
first_wr = wr;
- ib_ret = ib_post_send(ib_conn->qp, first_wr, NULL);
- if (unlikely(ib_ret))
+ ret = ib_post_send(ib_conn->qp, first_wr, NULL);
+ if (unlikely(ret))
iser_err("ib_post_send failed, ret:%d opcode:%d\n",
- ib_ret, wr->opcode);
+ ret, wr->opcode);
- return ib_ret;
+ return ret;
}
u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task,
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c b/drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c
index 76e4352fe3f6..385a19846c24 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c
@@ -13,8 +13,8 @@
void rtrs_clt_update_wc_stats(struct rtrs_clt_con *con)
{
- struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
- struct rtrs_clt_stats *stats = sess->stats;
+ struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
+ struct rtrs_clt_stats *stats = clt_path->stats;
struct rtrs_clt_stats_pcpu *s;
int cpu;
@@ -180,8 +180,8 @@ static inline void rtrs_clt_update_rdma_stats(struct rtrs_clt_stats *stats,
void rtrs_clt_update_all_stats(struct rtrs_clt_io_req *req, int dir)
{
struct rtrs_clt_con *con = req->con;
- struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
- struct rtrs_clt_stats *stats = sess->stats;
+ struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
+ struct rtrs_clt_stats *stats = clt_path->stats;
unsigned int len;
len = req->usr_len + req->data_len;
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c b/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c
index 0e69180c3771..b4fa473b7888 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c
@@ -16,21 +16,21 @@
#define MIN_MAX_RECONN_ATT -1
#define MAX_MAX_RECONN_ATT 9999
-static void rtrs_clt_sess_release(struct kobject *kobj)
+static void rtrs_clt_path_release(struct kobject *kobj)
{
- struct rtrs_clt_sess *sess;
+ struct rtrs_clt_path *clt_path;
- sess = container_of(kobj, struct rtrs_clt_sess, kobj);
+ clt_path = container_of(kobj, struct rtrs_clt_path, kobj);
- free_sess(sess);
+ free_path(clt_path);
}
static struct kobj_type ktype_sess = {
.sysfs_ops = &kobj_sysfs_ops,
- .release = rtrs_clt_sess_release
+ .release = rtrs_clt_path_release
};
-static void rtrs_clt_sess_stats_release(struct kobject *kobj)
+static void rtrs_clt_path_stats_release(struct kobject *kobj)
{
struct rtrs_clt_stats *stats;
@@ -43,14 +43,15 @@ static void rtrs_clt_sess_stats_release(struct kobject *kobj)
static struct kobj_type ktype_stats = {
.sysfs_ops = &kobj_sysfs_ops,
- .release = rtrs_clt_sess_stats_release,
+ .release = rtrs_clt_path_stats_release,
};
static ssize_t max_reconnect_attempts_show(struct device *dev,
struct device_attribute *attr,
char *page)
{
- struct rtrs_clt *clt = container_of(dev, struct rtrs_clt, dev);
+ struct rtrs_clt_sess *clt = container_of(dev, struct rtrs_clt_sess,
+ dev);
return sysfs_emit(page, "%d\n",
rtrs_clt_get_max_reconnect_attempts(clt));
@@ -63,7 +64,8 @@ static ssize_t max_reconnect_attempts_store(struct device *dev,
{
int value;
int ret;
- struct rtrs_clt *clt = container_of(dev, struct rtrs_clt, dev);
+ struct rtrs_clt_sess *clt = container_of(dev, struct rtrs_clt_sess,
+ dev);
ret = kstrtoint(buf, 10, &value);
if (ret) {
@@ -90,9 +92,9 @@ static ssize_t mpath_policy_show(struct device *dev,
struct device_attribute *attr,
char *page)
{
- struct rtrs_clt *clt;
+ struct rtrs_clt_sess *clt;
- clt = container_of(dev, struct rtrs_clt, dev);
+ clt = container_of(dev, struct rtrs_clt_sess, dev);
switch (clt->mp_policy) {
case MP_POLICY_RR:
@@ -114,12 +116,12 @@ static ssize_t mpath_policy_store(struct device *dev,
const char *buf,
size_t count)
{
- struct rtrs_clt *clt;
+ struct rtrs_clt_sess *clt;
int value;
int ret;
size_t len = 0;
- clt = container_of(dev, struct rtrs_clt, dev);
+ clt = container_of(dev, struct rtrs_clt_sess, dev);
ret = kstrtoint(buf, 10, &value);
if (!ret && (value == MP_POLICY_RR ||
@@ -169,12 +171,12 @@ static ssize_t add_path_store(struct device *dev,
.src = &srcaddr,
.dst = &dstaddr
};
- struct rtrs_clt *clt;
+ struct rtrs_clt_sess *clt;
const char *nl;
size_t len;
int err;
- clt = container_of(dev, struct rtrs_clt, dev);
+ clt = container_of(dev, struct rtrs_clt_sess, dev);
nl = strchr(buf, '\n');
if (nl)
@@ -197,10 +199,10 @@ static DEVICE_ATTR_RW(add_path);
static ssize_t rtrs_clt_state_show(struct kobject *kobj,
struct kobj_attribute *attr, char *page)
{
- struct rtrs_clt_sess *sess;
+ struct rtrs_clt_path *clt_path;
- sess = container_of(kobj, struct rtrs_clt_sess, kobj);
- if (sess->state == RTRS_CLT_CONNECTED)
+ clt_path = container_of(kobj, struct rtrs_clt_path, kobj);
+ if (clt_path->state == RTRS_CLT_CONNECTED)
return sysfs_emit(page, "connected\n");
return sysfs_emit(page, "disconnected\n");
@@ -219,16 +221,16 @@ static ssize_t rtrs_clt_reconnect_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count)
{
- struct rtrs_clt_sess *sess;
+ struct rtrs_clt_path *clt_path;
int ret;
- sess = container_of(kobj, struct rtrs_clt_sess, kobj);
+ clt_path = container_of(kobj, struct rtrs_clt_path, kobj);
if (!sysfs_streq(buf, "1")) {
- rtrs_err(sess->clt, "%s: unknown value: '%s'\n",
+ rtrs_err(clt_path->clt, "%s: unknown value: '%s'\n",
attr->attr.name, buf);
return -EINVAL;
}
- ret = rtrs_clt_reconnect_from_sysfs(sess);
+ ret = rtrs_clt_reconnect_from_sysfs(clt_path);
if (ret)
return ret;
@@ -249,15 +251,15 @@ static ssize_t rtrs_clt_disconnect_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count)
{
- struct rtrs_clt_sess *sess;
+ struct rtrs_clt_path *clt_path;
- sess = container_of(kobj, struct rtrs_clt_sess, kobj);
+ clt_path = container_of(kobj, struct rtrs_clt_path, kobj);
if (!sysfs_streq(buf, "1")) {
- rtrs_err(sess->clt, "%s: unknown value: '%s'\n",
+ rtrs_err(clt_path->clt, "%s: unknown value: '%s'\n",
attr->attr.name, buf);
return -EINVAL;
}
- rtrs_clt_close_conns(sess, true);
+ rtrs_clt_close_conns(clt_path, true);
return count;
}
@@ -276,16 +278,16 @@ static ssize_t rtrs_clt_remove_path_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count)
{
- struct rtrs_clt_sess *sess;
+ struct rtrs_clt_path *clt_path;
int ret;
- sess = container_of(kobj, struct rtrs_clt_sess, kobj);
+ clt_path = container_of(kobj, struct rtrs_clt_path, kobj);
if (!sysfs_streq(buf, "1")) {
- rtrs_err(sess->clt, "%s: unknown value: '%s'\n",
+ rtrs_err(clt_path->clt, "%s: unknown value: '%s'\n",
attr->attr.name, buf);
return -EINVAL;
}
- ret = rtrs_clt_remove_path_from_sysfs(sess, &attr->attr);
+ ret = rtrs_clt_remove_path_from_sysfs(clt_path, &attr->attr);
if (ret)
return ret;
@@ -333,11 +335,11 @@ static ssize_t rtrs_clt_hca_port_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *page)
{
- struct rtrs_clt_sess *sess;
+ struct rtrs_clt_path *clt_path;
- sess = container_of(kobj, typeof(*sess), kobj);
+ clt_path = container_of(kobj, typeof(*clt_path), kobj);
- return sysfs_emit(page, "%u\n", sess->hca_port);
+ return sysfs_emit(page, "%u\n", clt_path->hca_port);
}
static struct kobj_attribute rtrs_clt_hca_port_attr =
@@ -347,11 +349,11 @@ static ssize_t rtrs_clt_hca_name_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *page)
{
- struct rtrs_clt_sess *sess;
+ struct rtrs_clt_path *clt_path;
- sess = container_of(kobj, struct rtrs_clt_sess, kobj);
+ clt_path = container_of(kobj, struct rtrs_clt_path, kobj);
- return sysfs_emit(page, "%s\n", sess->hca_name);
+ return sysfs_emit(page, "%s\n", clt_path->hca_name);
}
static struct kobj_attribute rtrs_clt_hca_name_attr =
@@ -361,12 +363,12 @@ static ssize_t rtrs_clt_cur_latency_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *page)
{
- struct rtrs_clt_sess *sess;
+ struct rtrs_clt_path *clt_path;
- sess = container_of(kobj, struct rtrs_clt_sess, kobj);
+ clt_path = container_of(kobj, struct rtrs_clt_path, kobj);
return sysfs_emit(page, "%lld ns\n",
- ktime_to_ns(sess->s.hb_cur_latency));
+ ktime_to_ns(clt_path->s.hb_cur_latency));
}
static struct kobj_attribute rtrs_clt_cur_latency_attr =
@@ -376,11 +378,11 @@ static ssize_t rtrs_clt_src_addr_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *page)
{
- struct rtrs_clt_sess *sess;
+ struct rtrs_clt_path *clt_path;
int len;
- sess = container_of(kobj, struct rtrs_clt_sess, kobj);
- len = sockaddr_to_str((struct sockaddr *)&sess->s.src_addr, page,
+ clt_path = container_of(kobj, struct rtrs_clt_path, kobj);
+ len = sockaddr_to_str((struct sockaddr *)&clt_path->s.src_addr, page,
PAGE_SIZE);
len += sysfs_emit_at(page, len, "\n");
return len;
@@ -393,11 +395,11 @@ static ssize_t rtrs_clt_dst_addr_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *page)
{
- struct rtrs_clt_sess *sess;
+ struct rtrs_clt_path *clt_path;
int len;
- sess = container_of(kobj, struct rtrs_clt_sess, kobj);
- len = sockaddr_to_str((struct sockaddr *)&sess->s.dst_addr, page,
+ clt_path = container_of(kobj, struct rtrs_clt_path, kobj);
+ len = sockaddr_to_str((struct sockaddr *)&clt_path->s.dst_addr, page,
PAGE_SIZE);
len += sysfs_emit_at(page, len, "\n");
return len;
@@ -406,7 +408,7 @@ static ssize_t rtrs_clt_dst_addr_show(struct kobject *kobj,
static struct kobj_attribute rtrs_clt_dst_addr_attr =
__ATTR(dst_addr, 0444, rtrs_clt_dst_addr_show, NULL);
-static struct attribute *rtrs_clt_sess_attrs[] = {
+static struct attribute *rtrs_clt_path_attrs[] = {
&rtrs_clt_hca_name_attr.attr,
&rtrs_clt_hca_port_attr.attr,
&rtrs_clt_src_addr_attr.attr,
@@ -419,42 +421,43 @@ static struct attribute *rtrs_clt_sess_attrs[] = {
NULL,
};
-static const struct attribute_group rtrs_clt_sess_attr_group = {
- .attrs = rtrs_clt_sess_attrs,
+static const struct attribute_group rtrs_clt_path_attr_group = {
+ .attrs = rtrs_clt_path_attrs,
};
-int rtrs_clt_create_sess_files(struct rtrs_clt_sess *sess)
+int rtrs_clt_create_path_files(struct rtrs_clt_path *clt_path)
{
- struct rtrs_clt *clt = sess->clt;
+ struct rtrs_clt_sess *clt = clt_path->clt;
char str[NAME_MAX];
int err;
struct rtrs_addr path = {
- .src = &sess->s.src_addr,
- .dst = &sess->s.dst_addr,
+ .src = &clt_path->s.src_addr,
+ .dst = &clt_path->s.dst_addr,
};
rtrs_addr_to_str(&path, str, sizeof(str));
- err = kobject_init_and_add(&sess->kobj, &ktype_sess, clt->kobj_paths,
+ err = kobject_init_and_add(&clt_path->kobj, &ktype_sess,
+ clt->kobj_paths,
"%s", str);
if (err) {
pr_err("kobject_init_and_add: %d\n", err);
- kobject_put(&sess->kobj);
+ kobject_put(&clt_path->kobj);
return err;
}
- err = sysfs_create_group(&sess->kobj, &rtrs_clt_sess_attr_group);
+ err = sysfs_create_group(&clt_path->kobj, &rtrs_clt_path_attr_group);
if (err) {
pr_err("sysfs_create_group(): %d\n", err);
goto put_kobj;
}
- err = kobject_init_and_add(&sess->stats->kobj_stats, &ktype_stats,
- &sess->kobj, "stats");
+ err = kobject_init_and_add(&clt_path->stats->kobj_stats, &ktype_stats,
+ &clt_path->kobj, "stats");
if (err) {
pr_err("kobject_init_and_add: %d\n", err);
- kobject_put(&sess->stats->kobj_stats);
+ kobject_put(&clt_path->stats->kobj_stats);
goto remove_group;
}
- err = sysfs_create_group(&sess->stats->kobj_stats,
+ err = sysfs_create_group(&clt_path->stats->kobj_stats,
&rtrs_clt_stats_attr_group);
if (err) {
pr_err("failed to create stats sysfs group, err: %d\n", err);
@@ -464,25 +467,25 @@ int rtrs_clt_create_sess_files(struct rtrs_clt_sess *sess)
return 0;
put_kobj_stats:
- kobject_del(&sess->stats->kobj_stats);
- kobject_put(&sess->stats->kobj_stats);
+ kobject_del(&clt_path->stats->kobj_stats);
+ kobject_put(&clt_path->stats->kobj_stats);
remove_group:
- sysfs_remove_group(&sess->kobj, &rtrs_clt_sess_attr_group);
+ sysfs_remove_group(&clt_path->kobj, &rtrs_clt_path_attr_group);
put_kobj:
- kobject_del(&sess->kobj);
- kobject_put(&sess->kobj);
+ kobject_del(&clt_path->kobj);
+ kobject_put(&clt_path->kobj);
return err;
}
-void rtrs_clt_destroy_sess_files(struct rtrs_clt_sess *sess,
+void rtrs_clt_destroy_path_files(struct rtrs_clt_path *clt_path,
const struct attribute *sysfs_self)
{
- kobject_del(&sess->stats->kobj_stats);
- kobject_put(&sess->stats->kobj_stats);
+ kobject_del(&clt_path->stats->kobj_stats);
+ kobject_put(&clt_path->stats->kobj_stats);
if (sysfs_self)
- sysfs_remove_file_self(&sess->kobj, sysfs_self);
- kobject_del(&sess->kobj);
+ sysfs_remove_file_self(&clt_path->kobj, sysfs_self);
+ kobject_del(&clt_path->kobj);
}
static struct attribute *rtrs_clt_attrs[] = {
@@ -496,12 +499,12 @@ static const struct attribute_group rtrs_clt_attr_group = {
.attrs = rtrs_clt_attrs,
};
-int rtrs_clt_create_sysfs_root_files(struct rtrs_clt *clt)
+int rtrs_clt_create_sysfs_root_files(struct rtrs_clt_sess *clt)
{
return sysfs_create_group(&clt->dev.kobj, &rtrs_clt_attr_group);
}
-void rtrs_clt_destroy_sysfs_root(struct rtrs_clt *clt)
+void rtrs_clt_destroy_sysfs_root(struct rtrs_clt_sess *clt)
{
sysfs_remove_group(&clt->dev.kobj, &rtrs_clt_attr_group);
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
index 15c0077dd27e..7c3f98e57889 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
@@ -46,21 +46,21 @@ static struct rtrs_rdma_dev_pd dev_pd = {
static struct workqueue_struct *rtrs_wq;
static struct class *rtrs_clt_dev_class;
-static inline bool rtrs_clt_is_connected(const struct rtrs_clt *clt)
+static inline bool rtrs_clt_is_connected(const struct rtrs_clt_sess *clt)
{
- struct rtrs_clt_sess *sess;
+ struct rtrs_clt_path *clt_path;
bool connected = false;
rcu_read_lock();
- list_for_each_entry_rcu(sess, &clt->paths_list, s.entry)
- connected |= READ_ONCE(sess->state) == RTRS_CLT_CONNECTED;
+ list_for_each_entry_rcu(clt_path, &clt->paths_list, s.entry)
+ connected |= READ_ONCE(clt_path->state) == RTRS_CLT_CONNECTED;
rcu_read_unlock();
return connected;
}
static struct rtrs_permit *
-__rtrs_get_permit(struct rtrs_clt *clt, enum rtrs_clt_con_type con_type)
+__rtrs_get_permit(struct rtrs_clt_sess *clt, enum rtrs_clt_con_type con_type)
{
size_t max_depth = clt->queue_depth;
struct rtrs_permit *permit;
@@ -87,7 +87,7 @@ __rtrs_get_permit(struct rtrs_clt *clt, enum rtrs_clt_con_type con_type)
return permit;
}
-static inline void __rtrs_put_permit(struct rtrs_clt *clt,
+static inline void __rtrs_put_permit(struct rtrs_clt_sess *clt,
struct rtrs_permit *permit)
{
clear_bit_unlock(permit->mem_id, clt->permits_map);
@@ -107,7 +107,7 @@ static inline void __rtrs_put_permit(struct rtrs_clt *clt,
* Context:
* Can sleep if @wait == RTRS_PERMIT_WAIT
*/
-struct rtrs_permit *rtrs_clt_get_permit(struct rtrs_clt *clt,
+struct rtrs_permit *rtrs_clt_get_permit(struct rtrs_clt_sess *clt,
enum rtrs_clt_con_type con_type,
enum wait_type can_wait)
{
@@ -142,7 +142,8 @@ EXPORT_SYMBOL(rtrs_clt_get_permit);
* Context:
* Does not matter
*/
-void rtrs_clt_put_permit(struct rtrs_clt *clt, struct rtrs_permit *permit)
+void rtrs_clt_put_permit(struct rtrs_clt_sess *clt,
+ struct rtrs_permit *permit)
{
if (WARN_ON(!test_bit(permit->mem_id, clt->permits_map)))
return;
@@ -163,29 +164,29 @@ EXPORT_SYMBOL(rtrs_clt_put_permit);
/**
* rtrs_permit_to_clt_con() - returns RDMA connection pointer by the permit
- * @sess: client session pointer
+ * @clt_path: client path pointer
* @permit: permit for the allocation of the RDMA buffer
* Note:
* IO connection starts from 1.
* 0 connection is for user messages.
*/
static
-struct rtrs_clt_con *rtrs_permit_to_clt_con(struct rtrs_clt_sess *sess,
+struct rtrs_clt_con *rtrs_permit_to_clt_con(struct rtrs_clt_path *clt_path,
struct rtrs_permit *permit)
{
int id = 0;
if (permit->con_type == RTRS_IO_CON)
- id = (permit->cpu_id % (sess->s.irq_con_num - 1)) + 1;
+ id = (permit->cpu_id % (clt_path->s.irq_con_num - 1)) + 1;
- return to_clt_con(sess->s.con[id]);
+ return to_clt_con(clt_path->s.con[id]);
}
/**
* rtrs_clt_change_state() - change the session state through session state
* machine.
*
- * @sess: client session to change the state of.
+ * @clt_path: client path to change the state of.
* @new_state: state to change to.
*
* returns true if sess's state is changed to new state, otherwise return false.
@@ -193,15 +194,15 @@ struct rtrs_clt_con *rtrs_permit_to_clt_con(struct rtrs_clt_sess *sess,
* Locks:
* state_wq lock must be hold.
*/
-static bool rtrs_clt_change_state(struct rtrs_clt_sess *sess,
+static bool rtrs_clt_change_state(struct rtrs_clt_path *clt_path,
enum rtrs_clt_state new_state)
{
enum rtrs_clt_state old_state;
bool changed = false;
- lockdep_assert_held(&sess->state_wq.lock);
+ lockdep_assert_held(&clt_path->state_wq.lock);
- old_state = sess->state;
+ old_state = clt_path->state;
switch (new_state) {
case RTRS_CLT_CONNECTING:
switch (old_state) {
@@ -275,42 +276,42 @@ static bool rtrs_clt_change_state(struct rtrs_clt_sess *sess,
break;
}
if (changed) {
- sess->state = new_state;
- wake_up_locked(&sess->state_wq);
+ clt_path->state = new_state;
+ wake_up_locked(&clt_path->state_wq);
}
return changed;
}
-static bool rtrs_clt_change_state_from_to(struct rtrs_clt_sess *sess,
+static bool rtrs_clt_change_state_from_to(struct rtrs_clt_path *clt_path,
enum rtrs_clt_state old_state,
enum rtrs_clt_state new_state)
{
bool changed = false;
- spin_lock_irq(&sess->state_wq.lock);
- if (sess->state == old_state)
- changed = rtrs_clt_change_state(sess, new_state);
- spin_unlock_irq(&sess->state_wq.lock);
+ spin_lock_irq(&clt_path->state_wq.lock);
+ if (clt_path->state == old_state)
+ changed = rtrs_clt_change_state(clt_path, new_state);
+ spin_unlock_irq(&clt_path->state_wq.lock);
return changed;
}
static void rtrs_rdma_error_recovery(struct rtrs_clt_con *con)
{
- struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+ struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
- if (rtrs_clt_change_state_from_to(sess,
+ if (rtrs_clt_change_state_from_to(clt_path,
RTRS_CLT_CONNECTED,
RTRS_CLT_RECONNECTING)) {
- struct rtrs_clt *clt = sess->clt;
+ struct rtrs_clt_sess *clt = clt_path->clt;
unsigned int delay_ms;
/*
* Normal scenario, reconnect if we were successfully connected
*/
delay_ms = clt->reconnect_delay_sec * 1000;
- queue_delayed_work(rtrs_wq, &sess->reconnect_dwork,
+ queue_delayed_work(rtrs_wq, &clt_path->reconnect_dwork,
msecs_to_jiffies(delay_ms +
prandom_u32() % RTRS_RECONNECT_SEED));
} else {
@@ -319,7 +320,7 @@ static void rtrs_rdma_error_recovery(struct rtrs_clt_con *con)
* so notify waiter with error state, waiter is responsible
* for cleaning the rest and reconnect if needed.
*/
- rtrs_clt_change_state_from_to(sess,
+ rtrs_clt_change_state_from_to(clt_path,
RTRS_CLT_CONNECTING,
RTRS_CLT_CONNECTING_ERR);
}
@@ -330,7 +331,7 @@ static void rtrs_clt_fast_reg_done(struct ib_cq *cq, struct ib_wc *wc)
struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context);
if (wc->status != IB_WC_SUCCESS) {
- rtrs_err(con->c.sess, "Failed IB_WR_REG_MR: %s\n",
+ rtrs_err(con->c.path, "Failed IB_WR_REG_MR: %s\n",
ib_wc_status_msg(wc->status));
rtrs_rdma_error_recovery(con);
}
@@ -350,7 +351,7 @@ static void rtrs_clt_inv_rkey_done(struct ib_cq *cq, struct ib_wc *wc)
struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context);
if (wc->status != IB_WC_SUCCESS) {
- rtrs_err(con->c.sess, "Failed IB_WR_LOCAL_INV: %s\n",
+ rtrs_err(con->c.path, "Failed IB_WR_LOCAL_INV: %s\n",
ib_wc_status_msg(wc->status));
rtrs_rdma_error_recovery(con);
}
@@ -380,14 +381,14 @@ static void complete_rdma_req(struct rtrs_clt_io_req *req, int errno,
bool notify, bool can_wait)
{
struct rtrs_clt_con *con = req->con;
- struct rtrs_clt_sess *sess;
+ struct rtrs_clt_path *clt_path;
int err;
if (WARN_ON(!req->in_use))
return;
if (WARN_ON(!req->con))
return;
- sess = to_clt_sess(con->c.sess);
+ clt_path = to_clt_path(con->c.path);
if (req->sg_cnt) {
if (req->dir == DMA_FROM_DEVICE && req->need_inv) {
@@ -417,7 +418,7 @@ static void complete_rdma_req(struct rtrs_clt_io_req *req, int errno,
refcount_inc(&req->ref);
err = rtrs_inv_rkey(req);
if (err) {
- rtrs_err(con->c.sess, "Send INV WR key=%#x: %d\n",
+ rtrs_err(con->c.path, "Send INV WR key=%#x: %d\n",
req->mr->rkey, err);
} else if (can_wait) {
wait_for_completion(&req->inv_comp);
@@ -433,21 +434,21 @@ static void complete_rdma_req(struct rtrs_clt_io_req *req, int errno,
if (!refcount_dec_and_test(&req->ref))
return;
}
- ib_dma_unmap_sg(sess->s.dev->ib_dev, req->sglist,
+ ib_dma_unmap_sg(clt_path->s.dev->ib_dev, req->sglist,
req->sg_cnt, req->dir);
}
if (!refcount_dec_and_test(&req->ref))
return;
if (req->mp_policy == MP_POLICY_MIN_INFLIGHT)
- atomic_dec(&sess->stats->inflight);
+ atomic_dec(&clt_path->stats->inflight);
req->in_use = false;
req->con = NULL;
if (errno) {
- rtrs_err_rl(con->c.sess, "IO request failed: error=%d path=%s [%s:%u] notify=%d\n",
- errno, kobject_name(&sess->kobj), sess->hca_name,
- sess->hca_port, notify);
+ rtrs_err_rl(con->c.path, "IO request failed: error=%d path=%s [%s:%u] notify=%d\n",
+ errno, kobject_name(&clt_path->kobj), clt_path->hca_name,
+ clt_path->hca_port, notify);
}
if (notify)
@@ -459,12 +460,12 @@ static int rtrs_post_send_rdma(struct rtrs_clt_con *con,
struct rtrs_rbuf *rbuf, u32 off,
u32 imm, struct ib_send_wr *wr)
{
- struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+ struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
enum ib_send_flags flags;
struct ib_sge sge;
if (!req->sg_size) {
- rtrs_wrn(con->c.sess,
+ rtrs_wrn(con->c.path,
"Doing RDMA Write failed, no data supplied\n");
return -EINVAL;
}
@@ -472,16 +473,17 @@ static int rtrs_post_send_rdma(struct rtrs_clt_con *con,
/* user data and user message in the first list element */
sge.addr = req->iu->dma_addr;
sge.length = req->sg_size;
- sge.lkey = sess->s.dev->ib_pd->local_dma_lkey;
+ sge.lkey = clt_path->s.dev->ib_pd->local_dma_lkey;
/*
* From time to time we have to post signalled sends,
* or send queue will fill up and only QP reset can help.
*/
- flags = atomic_inc_return(&con->c.wr_cnt) % sess->s.signal_interval ?
+ flags = atomic_inc_return(&con->c.wr_cnt) % clt_path->s.signal_interval ?
0 : IB_SEND_SIGNALED;
- ib_dma_sync_single_for_device(sess->s.dev->ib_dev, req->iu->dma_addr,
+ ib_dma_sync_single_for_device(clt_path->s.dev->ib_dev,
+ req->iu->dma_addr,
req->sg_size, DMA_TO_DEVICE);
return rtrs_iu_post_rdma_write_imm(&con->c, req->iu, &sge, 1,
@@ -489,15 +491,15 @@ static int rtrs_post_send_rdma(struct rtrs_clt_con *con,
imm, flags, wr, NULL);
}
-static void process_io_rsp(struct rtrs_clt_sess *sess, u32 msg_id,
+static void process_io_rsp(struct rtrs_clt_path *clt_path, u32 msg_id,
s16 errno, bool w_inval)
{
struct rtrs_clt_io_req *req;
- if (WARN_ON(msg_id >= sess->queue_depth))
+ if (WARN_ON(msg_id >= clt_path->queue_depth))
return;
- req = &sess->reqs[msg_id];
+ req = &clt_path->reqs[msg_id];
/* Drop need_inv if server responded with send with invalidation */
req->need_inv &= !w_inval;
complete_rdma_req(req, errno, true, false);
@@ -507,21 +509,21 @@ static void rtrs_clt_recv_done(struct rtrs_clt_con *con, struct ib_wc *wc)
{
struct rtrs_iu *iu;
int err;
- struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+ struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
- WARN_ON((sess->flags & RTRS_MSG_NEW_RKEY_F) == 0);
+ WARN_ON((clt_path->flags & RTRS_MSG_NEW_RKEY_F) == 0);
iu = container_of(wc->wr_cqe, struct rtrs_iu,
cqe);
err = rtrs_iu_post_recv(&con->c, iu);
if (err) {
- rtrs_err(con->c.sess, "post iu failed %d\n", err);
+ rtrs_err(con->c.path, "post iu failed %d\n", err);
rtrs_rdma_error_recovery(con);
}
}
static void rtrs_clt_rkey_rsp_done(struct rtrs_clt_con *con, struct ib_wc *wc)
{
- struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+ struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
struct rtrs_msg_rkey_rsp *msg;
u32 imm_type, imm_payload;
bool w_inval = false;
@@ -529,25 +531,26 @@ static void rtrs_clt_rkey_rsp_done(struct rtrs_clt_con *con, struct ib_wc *wc)
u32 buf_id;
int err;
- WARN_ON((sess->flags & RTRS_MSG_NEW_RKEY_F) == 0);
+ WARN_ON((clt_path->flags & RTRS_MSG_NEW_RKEY_F) == 0);
iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe);
if (wc->byte_len < sizeof(*msg)) {
- rtrs_err(con->c.sess, "rkey response is malformed: size %d\n",
+ rtrs_err(con->c.path, "rkey response is malformed: size %d\n",
wc->byte_len);
goto out;
}
- ib_dma_sync_single_for_cpu(sess->s.dev->ib_dev, iu->dma_addr,
+ ib_dma_sync_single_for_cpu(clt_path->s.dev->ib_dev, iu->dma_addr,
iu->size, DMA_FROM_DEVICE);
msg = iu->buf;
if (le16_to_cpu(msg->type) != RTRS_MSG_RKEY_RSP) {
- rtrs_err(sess->clt, "rkey response is malformed: type %d\n",
+ rtrs_err(clt_path->clt,
+ "rkey response is malformed: type %d\n",
le16_to_cpu(msg->type));
goto out;
}
buf_id = le16_to_cpu(msg->buf_id);
- if (WARN_ON(buf_id >= sess->queue_depth))
+ if (WARN_ON(buf_id >= clt_path->queue_depth))
goto out;
rtrs_from_imm(be32_to_cpu(wc->ex.imm_data), &imm_type, &imm_payload);
@@ -560,10 +563,10 @@ static void rtrs_clt_rkey_rsp_done(struct rtrs_clt_con *con, struct ib_wc *wc)
if (WARN_ON(buf_id != msg_id))
goto out;
- sess->rbufs[buf_id].rkey = le32_to_cpu(msg->rkey);
- process_io_rsp(sess, msg_id, err, w_inval);
+ clt_path->rbufs[buf_id].rkey = le32_to_cpu(msg->rkey);
+ process_io_rsp(clt_path, msg_id, err, w_inval);
}
- ib_dma_sync_single_for_device(sess->s.dev->ib_dev, iu->dma_addr,
+ ib_dma_sync_single_for_device(clt_path->s.dev->ib_dev, iu->dma_addr,
iu->size, DMA_FROM_DEVICE);
return rtrs_clt_recv_done(con, wc);
out:
@@ -600,14 +603,14 @@ static int rtrs_post_recv_empty_x2(struct rtrs_con *con, struct ib_cqe *cqe)
static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
{
struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context);
- struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+ struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
u32 imm_type, imm_payload;
bool w_inval = false;
int err;
if (wc->status != IB_WC_SUCCESS) {
if (wc->status != IB_WC_WR_FLUSH_ERR) {
- rtrs_err(sess->clt, "RDMA failed: %s\n",
+ rtrs_err(clt_path->clt, "RDMA failed: %s\n",
ib_wc_status_msg(wc->status));
rtrs_rdma_error_recovery(con);
}
@@ -632,21 +635,21 @@ static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
w_inval = (imm_type == RTRS_IO_RSP_W_INV_IMM);
rtrs_from_io_rsp_imm(imm_payload, &msg_id, &err);
- process_io_rsp(sess, msg_id, err, w_inval);
+ process_io_rsp(clt_path, msg_id, err, w_inval);
} else if (imm_type == RTRS_HB_MSG_IMM) {
WARN_ON(con->c.cid);
- rtrs_send_hb_ack(&sess->s);
- if (sess->flags & RTRS_MSG_NEW_RKEY_F)
+ rtrs_send_hb_ack(&clt_path->s);
+ if (clt_path->flags & RTRS_MSG_NEW_RKEY_F)
return rtrs_clt_recv_done(con, wc);
} else if (imm_type == RTRS_HB_ACK_IMM) {
WARN_ON(con->c.cid);
- sess->s.hb_missed_cnt = 0;
- sess->s.hb_cur_latency =
- ktime_sub(ktime_get(), sess->s.hb_last_sent);
- if (sess->flags & RTRS_MSG_NEW_RKEY_F)
+ clt_path->s.hb_missed_cnt = 0;
+ clt_path->s.hb_cur_latency =
+ ktime_sub(ktime_get(), clt_path->s.hb_last_sent);
+ if (clt_path->flags & RTRS_MSG_NEW_RKEY_F)
return rtrs_clt_recv_done(con, wc);
} else {
- rtrs_wrn(con->c.sess, "Unknown IMM type %u\n",
+ rtrs_wrn(con->c.path, "Unknown IMM type %u\n",
imm_type);
}
if (w_inval)
@@ -658,7 +661,7 @@ static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
else
err = rtrs_post_recv_empty(&con->c, &io_comp_cqe);
if (err) {
- rtrs_err(con->c.sess, "rtrs_post_recv_empty(): %d\n",
+ rtrs_err(con->c.path, "rtrs_post_recv_empty(): %d\n",
err);
rtrs_rdma_error_recovery(con);
}
@@ -670,7 +673,7 @@ static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
WARN_ON(!(wc->wc_flags & IB_WC_WITH_INVALIDATE ||
wc->wc_flags & IB_WC_WITH_IMM));
WARN_ON(wc->wr_cqe->done != rtrs_clt_rdma_done);
- if (sess->flags & RTRS_MSG_NEW_RKEY_F) {
+ if (clt_path->flags & RTRS_MSG_NEW_RKEY_F) {
if (wc->wc_flags & IB_WC_WITH_INVALIDATE)
return rtrs_clt_recv_done(con, wc);
@@ -685,7 +688,7 @@ static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
break;
default:
- rtrs_wrn(sess->clt, "Unexpected WC type: %d\n", wc->opcode);
+ rtrs_wrn(clt_path->clt, "Unexpected WC type: %d\n", wc->opcode);
return;
}
}
@@ -693,10 +696,10 @@ static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
static int post_recv_io(struct rtrs_clt_con *con, size_t q_size)
{
int err, i;
- struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+ struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
for (i = 0; i < q_size; i++) {
- if (sess->flags & RTRS_MSG_NEW_RKEY_F) {
+ if (clt_path->flags & RTRS_MSG_NEW_RKEY_F) {
struct rtrs_iu *iu = &con->rsp_ius[i];
err = rtrs_iu_post_recv(&con->c, iu);
@@ -710,16 +713,16 @@ static int post_recv_io(struct rtrs_clt_con *con, size_t q_size)
return 0;
}
-static int post_recv_sess(struct rtrs_clt_sess *sess)
+static int post_recv_path(struct rtrs_clt_path *clt_path)
{
size_t q_size = 0;
int err, cid;
- for (cid = 0; cid < sess->s.con_num; cid++) {
+ for (cid = 0; cid < clt_path->s.con_num; cid++) {
if (cid == 0)
q_size = SERVICE_CON_QUEUE_DEPTH;
else
- q_size = sess->queue_depth;
+ q_size = clt_path->queue_depth;
/*
* x2 for RDMA read responses + FR key invalidations,
@@ -727,9 +730,10 @@ static int post_recv_sess(struct rtrs_clt_sess *sess)
*/
q_size *= 2;
- err = post_recv_io(to_clt_con(sess->s.con[cid]), q_size);
+ err = post_recv_io(to_clt_con(clt_path->s.con[cid]), q_size);
if (err) {
- rtrs_err(sess->clt, "post_recv_io(), err: %d\n", err);
+ rtrs_err(clt_path->clt, "post_recv_io(), err: %d\n",
+ err);
return err;
}
}
@@ -740,8 +744,8 @@ static int post_recv_sess(struct rtrs_clt_sess *sess)
struct path_it {
int i;
struct list_head skip_list;
- struct rtrs_clt *clt;
- struct rtrs_clt_sess *(*next_path)(struct path_it *it);
+ struct rtrs_clt_sess *clt;
+ struct rtrs_clt_path *(*next_path)(struct path_it *it);
};
/**
@@ -773,11 +777,11 @@ struct path_it {
* Locks:
* rcu_read_lock() must be hold.
*/
-static struct rtrs_clt_sess *get_next_path_rr(struct path_it *it)
+static struct rtrs_clt_path *get_next_path_rr(struct path_it *it)
{
- struct rtrs_clt_sess __rcu **ppcpu_path;
- struct rtrs_clt_sess *path;
- struct rtrs_clt *clt;
+ struct rtrs_clt_path __rcu **ppcpu_path;
+ struct rtrs_clt_path *path;
+ struct rtrs_clt_sess *clt;
clt = it->clt;
@@ -811,26 +815,26 @@ static struct rtrs_clt_sess *get_next_path_rr(struct path_it *it)
* Locks:
* rcu_read_lock() must be hold.
*/
-static struct rtrs_clt_sess *get_next_path_min_inflight(struct path_it *it)
+static struct rtrs_clt_path *get_next_path_min_inflight(struct path_it *it)
{
- struct rtrs_clt_sess *min_path = NULL;
- struct rtrs_clt *clt = it->clt;
- struct rtrs_clt_sess *sess;
+ struct rtrs_clt_path *min_path = NULL;
+ struct rtrs_clt_sess *clt = it->clt;
+ struct rtrs_clt_path *clt_path;
int min_inflight = INT_MAX;
int inflight;
- list_for_each_entry_rcu(sess, &clt->paths_list, s.entry) {
- if (READ_ONCE(sess->state) != RTRS_CLT_CONNECTED)
+ list_for_each_entry_rcu(clt_path, &clt->paths_list, s.entry) {
+ if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTED)
continue;
- if (!list_empty(raw_cpu_ptr(sess->mp_skip_entry)))
+ if (!list_empty(raw_cpu_ptr(clt_path->mp_skip_entry)))
continue;
- inflight = atomic_read(&sess->stats->inflight);
+ inflight = atomic_read(&clt_path->stats->inflight);
if (inflight < min_inflight) {
min_inflight = inflight;
- min_path = sess;
+ min_path = clt_path;
}
}
@@ -862,26 +866,26 @@ static struct rtrs_clt_sess *get_next_path_min_inflight(struct path_it *it)
* Therefore the caller MUST check the returned
* path is NULL and trigger the IO error.
*/
-static struct rtrs_clt_sess *get_next_path_min_latency(struct path_it *it)
+static struct rtrs_clt_path *get_next_path_min_latency(struct path_it *it)
{
- struct rtrs_clt_sess *min_path = NULL;
- struct rtrs_clt *clt = it->clt;
- struct rtrs_clt_sess *sess;
- ktime_t min_latency = INT_MAX;
+ struct rtrs_clt_path *min_path = NULL;
+ struct rtrs_clt_sess *clt = it->clt;
+ struct rtrs_clt_path *clt_path;
+ ktime_t min_latency = KTIME_MAX;
ktime_t latency;
- list_for_each_entry_rcu(sess, &clt->paths_list, s.entry) {
- if (READ_ONCE(sess->state) != RTRS_CLT_CONNECTED)
+ list_for_each_entry_rcu(clt_path, &clt->paths_list, s.entry) {
+ if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTED)
continue;
- if (!list_empty(raw_cpu_ptr(sess->mp_skip_entry)))
+ if (!list_empty(raw_cpu_ptr(clt_path->mp_skip_entry)))
continue;
- latency = sess->s.hb_cur_latency;
+ latency = clt_path->s.hb_cur_latency;
if (latency < min_latency) {
min_latency = latency;
- min_path = sess;
+ min_path = clt_path;
}
}
@@ -895,7 +899,7 @@ static struct rtrs_clt_sess *get_next_path_min_latency(struct path_it *it)
return min_path;
}
-static inline void path_it_init(struct path_it *it, struct rtrs_clt *clt)
+static inline void path_it_init(struct path_it *it, struct rtrs_clt_sess *clt)
{
INIT_LIST_HEAD(&it->skip_list);
it->clt = clt;
@@ -928,7 +932,7 @@ static inline void path_it_deinit(struct path_it *it)
* the corresponding buffer of rtrs_iu (req->iu->buf), which later on will
* also hold the control message of rtrs.
* @req: an io request holding information about IO.
- * @sess: client session
+ * @clt_path: client path
* @conf: conformation callback function to notify upper layer.
* @permit: permit for allocation of RDMA remote buffer
* @priv: private pointer
@@ -940,7 +944,7 @@ static inline void path_it_deinit(struct path_it *it)
* @dir: direction of the IO.
*/
static void rtrs_clt_init_req(struct rtrs_clt_io_req *req,
- struct rtrs_clt_sess *sess,
+ struct rtrs_clt_path *clt_path,
void (*conf)(void *priv, int errno),
struct rtrs_permit *permit, void *priv,
const struct kvec *vec, size_t usr_len,
@@ -958,13 +962,13 @@ static void rtrs_clt_init_req(struct rtrs_clt_io_req *req,
req->sg_cnt = sg_cnt;
req->priv = priv;
req->dir = dir;
- req->con = rtrs_permit_to_clt_con(sess, permit);
+ req->con = rtrs_permit_to_clt_con(clt_path, permit);
req->conf = conf;
req->need_inv = false;
req->need_inv_comp = false;
req->inv_errno = 0;
refcount_set(&req->ref, 1);
- req->mp_policy = sess->clt->mp_policy;
+ req->mp_policy = clt_path->clt->mp_policy;
iov_iter_kvec(&iter, READ, vec, 1, usr_len);
len = _copy_from_iter(req->iu->buf, usr_len, &iter);
@@ -974,7 +978,7 @@ static void rtrs_clt_init_req(struct rtrs_clt_io_req *req,
}
static struct rtrs_clt_io_req *
-rtrs_clt_get_req(struct rtrs_clt_sess *sess,
+rtrs_clt_get_req(struct rtrs_clt_path *clt_path,
void (*conf)(void *priv, int errno),
struct rtrs_permit *permit, void *priv,
const struct kvec *vec, size_t usr_len,
@@ -983,14 +987,14 @@ rtrs_clt_get_req(struct rtrs_clt_sess *sess,
{
struct rtrs_clt_io_req *req;
- req = &sess->reqs[permit->mem_id];
- rtrs_clt_init_req(req, sess, conf, permit, priv, vec, usr_len,
+ req = &clt_path->reqs[permit->mem_id];
+ rtrs_clt_init_req(req, clt_path, conf, permit, priv, vec, usr_len,
sg, sg_cnt, data_len, dir);
return req;
}
static struct rtrs_clt_io_req *
-rtrs_clt_get_copy_req(struct rtrs_clt_sess *alive_sess,
+rtrs_clt_get_copy_req(struct rtrs_clt_path *alive_path,
struct rtrs_clt_io_req *fail_req)
{
struct rtrs_clt_io_req *req;
@@ -999,8 +1003,8 @@ rtrs_clt_get_copy_req(struct rtrs_clt_sess *alive_sess,
.iov_len = fail_req->usr_len
};
- req = &alive_sess->reqs[fail_req->permit->mem_id];
- rtrs_clt_init_req(req, alive_sess, fail_req->conf, fail_req->permit,
+ req = &alive_path->reqs[fail_req->permit->mem_id];
+ rtrs_clt_init_req(req, alive_path, fail_req->conf, fail_req->permit,
fail_req->priv, &vec, fail_req->usr_len,
fail_req->sglist, fail_req->sg_cnt,
fail_req->data_len, fail_req->dir);
@@ -1013,7 +1017,7 @@ static int rtrs_post_rdma_write_sg(struct rtrs_clt_con *con,
u32 size, u32 imm, struct ib_send_wr *wr,
struct ib_send_wr *tail)
{
- struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+ struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
struct ib_sge *sge = req->sge;
enum ib_send_flags flags;
struct scatterlist *sg;
@@ -1033,22 +1037,23 @@ static int rtrs_post_rdma_write_sg(struct rtrs_clt_con *con,
for_each_sg(req->sglist, sg, req->sg_cnt, i) {
sge[i].addr = sg_dma_address(sg);
sge[i].length = sg_dma_len(sg);
- sge[i].lkey = sess->s.dev->ib_pd->local_dma_lkey;
+ sge[i].lkey = clt_path->s.dev->ib_pd->local_dma_lkey;
}
num_sge = 1 + req->sg_cnt;
}
sge[i].addr = req->iu->dma_addr;
sge[i].length = size;
- sge[i].lkey = sess->s.dev->ib_pd->local_dma_lkey;
+ sge[i].lkey = clt_path->s.dev->ib_pd->local_dma_lkey;
/*
* From time to time we have to post signalled sends,
* or send queue will fill up and only QP reset can help.
*/
- flags = atomic_inc_return(&con->c.wr_cnt) % sess->s.signal_interval ?
+ flags = atomic_inc_return(&con->c.wr_cnt) % clt_path->s.signal_interval ?
0 : IB_SEND_SIGNALED;
- ib_dma_sync_single_for_device(sess->s.dev->ib_dev, req->iu->dma_addr,
+ ib_dma_sync_single_for_device(clt_path->s.dev->ib_dev,
+ req->iu->dma_addr,
size, DMA_TO_DEVICE);
return rtrs_iu_post_rdma_write_imm(&con->c, req->iu, sge, num_sge,
@@ -1074,8 +1079,8 @@ static int rtrs_map_sg_fr(struct rtrs_clt_io_req *req, size_t count)
static int rtrs_clt_write_req(struct rtrs_clt_io_req *req)
{
struct rtrs_clt_con *con = req->con;
- struct rtrs_sess *s = con->c.sess;
- struct rtrs_clt_sess *sess = to_clt_sess(s);
+ struct rtrs_path *s = con->c.path;
+ struct rtrs_clt_path *clt_path = to_clt_path(s);
struct rtrs_msg_rdma_write *msg;
struct rtrs_rbuf *rbuf;
@@ -1088,13 +1093,13 @@ static int rtrs_clt_write_req(struct rtrs_clt_io_req *req)
const size_t tsize = sizeof(*msg) + req->data_len + req->usr_len;
- if (tsize > sess->chunk_size) {
+ if (tsize > clt_path->chunk_size) {
rtrs_wrn(s, "Write request failed, size too big %zu > %d\n",
- tsize, sess->chunk_size);
+ tsize, clt_path->chunk_size);
return -EMSGSIZE;
}
if (req->sg_cnt) {
- count = ib_dma_map_sg(sess->s.dev->ib_dev, req->sglist,
+ count = ib_dma_map_sg(clt_path->s.dev->ib_dev, req->sglist,
req->sg_cnt, req->dir);
if (!count) {
rtrs_wrn(s, "Write request failed, map failed\n");
@@ -1111,7 +1116,7 @@ static int rtrs_clt_write_req(struct rtrs_clt_io_req *req)
imm = rtrs_to_io_req_imm(imm);
buf_id = req->permit->mem_id;
req->sg_size = tsize;
- rbuf = &sess->rbufs[buf_id];
+ rbuf = &clt_path->rbufs[buf_id];
if (count) {
ret = rtrs_map_sg_fr(req, count);
@@ -1119,7 +1124,7 @@ static int rtrs_clt_write_req(struct rtrs_clt_io_req *req)
rtrs_err_rl(s,
"Write request failed, failed to map fast reg. data, err: %d\n",
ret);
- ib_dma_unmap_sg(sess->s.dev->ib_dev, req->sglist,
+ ib_dma_unmap_sg(clt_path->s.dev->ib_dev, req->sglist,
req->sg_cnt, req->dir);
return ret;
}
@@ -1153,12 +1158,12 @@ static int rtrs_clt_write_req(struct rtrs_clt_io_req *req)
if (ret) {
rtrs_err_rl(s,
"Write request failed: error=%d path=%s [%s:%u]\n",
- ret, kobject_name(&sess->kobj), sess->hca_name,
- sess->hca_port);
+ ret, kobject_name(&clt_path->kobj), clt_path->hca_name,
+ clt_path->hca_port);
if (req->mp_policy == MP_POLICY_MIN_INFLIGHT)
- atomic_dec(&sess->stats->inflight);
+ atomic_dec(&clt_path->stats->inflight);
if (req->sg_cnt)
- ib_dma_unmap_sg(sess->s.dev->ib_dev, req->sglist,
+ ib_dma_unmap_sg(clt_path->s.dev->ib_dev, req->sglist,
req->sg_cnt, req->dir);
}
@@ -1168,10 +1173,10 @@ static int rtrs_clt_write_req(struct rtrs_clt_io_req *req)
static int rtrs_clt_read_req(struct rtrs_clt_io_req *req)
{
struct rtrs_clt_con *con = req->con;
- struct rtrs_sess *s = con->c.sess;
- struct rtrs_clt_sess *sess = to_clt_sess(s);
+ struct rtrs_path *s = con->c.path;
+ struct rtrs_clt_path *clt_path = to_clt_path(s);
struct rtrs_msg_rdma_read *msg;
- struct rtrs_ib_dev *dev = sess->s.dev;
+ struct rtrs_ib_dev *dev = clt_path->s.dev;
struct ib_reg_wr rwr;
struct ib_send_wr *wr = NULL;
@@ -1181,10 +1186,10 @@ static int rtrs_clt_read_req(struct rtrs_clt_io_req *req)
const size_t tsize = sizeof(*msg) + req->data_len + req->usr_len;
- if (tsize > sess->chunk_size) {
+ if (tsize > clt_path->chunk_size) {
rtrs_wrn(s,
"Read request failed, message size is %zu, bigger than CHUNK_SIZE %d\n",
- tsize, sess->chunk_size);
+ tsize, clt_path->chunk_size);
return -EMSGSIZE;
}
@@ -1254,15 +1259,15 @@ static int rtrs_clt_read_req(struct rtrs_clt_io_req *req)
*/
rtrs_clt_update_all_stats(req, READ);
- ret = rtrs_post_send_rdma(req->con, req, &sess->rbufs[buf_id],
+ ret = rtrs_post_send_rdma(req->con, req, &clt_path->rbufs[buf_id],
req->data_len, imm, wr);
if (ret) {
rtrs_err_rl(s,
"Read request failed: error=%d path=%s [%s:%u]\n",
- ret, kobject_name(&sess->kobj), sess->hca_name,
- sess->hca_port);
+ ret, kobject_name(&clt_path->kobj), clt_path->hca_name,
+ clt_path->hca_port);
if (req->mp_policy == MP_POLICY_MIN_INFLIGHT)
- atomic_dec(&sess->stats->inflight);
+ atomic_dec(&clt_path->stats->inflight);
req->need_inv = false;
if (req->sg_cnt)
ib_dma_unmap_sg(dev->ib_dev, req->sglist,
@@ -1277,21 +1282,21 @@ static int rtrs_clt_read_req(struct rtrs_clt_io_req *req)
* @clt: clt context
* @fail_req: a failed io request.
*/
-static int rtrs_clt_failover_req(struct rtrs_clt *clt,
+static int rtrs_clt_failover_req(struct rtrs_clt_sess *clt,
struct rtrs_clt_io_req *fail_req)
{
- struct rtrs_clt_sess *alive_sess;
+ struct rtrs_clt_path *alive_path;
struct rtrs_clt_io_req *req;
int err = -ECONNABORTED;
struct path_it it;
rcu_read_lock();
for (path_it_init(&it, clt);
- (alive_sess = it.next_path(&it)) && it.i < it.clt->paths_num;
+ (alive_path = it.next_path(&it)) && it.i < it.clt->paths_num;
it.i++) {
- if (READ_ONCE(alive_sess->state) != RTRS_CLT_CONNECTED)
+ if (READ_ONCE(alive_path->state) != RTRS_CLT_CONNECTED)
continue;
- req = rtrs_clt_get_copy_req(alive_sess, fail_req);
+ req = rtrs_clt_get_copy_req(alive_path, fail_req);
if (req->dir == DMA_TO_DEVICE)
err = rtrs_clt_write_req(req);
else
@@ -1301,7 +1306,7 @@ static int rtrs_clt_failover_req(struct rtrs_clt *clt,
continue;
}
/* Success path */
- rtrs_clt_inc_failover_cnt(alive_sess->stats);
+ rtrs_clt_inc_failover_cnt(alive_path->stats);
break;
}
path_it_deinit(&it);
@@ -1310,16 +1315,16 @@ static int rtrs_clt_failover_req(struct rtrs_clt *clt,
return err;
}
-static void fail_all_outstanding_reqs(struct rtrs_clt_sess *sess)
+static void fail_all_outstanding_reqs(struct rtrs_clt_path *clt_path)
{
- struct rtrs_clt *clt = sess->clt;
+ struct rtrs_clt_sess *clt = clt_path->clt;
struct rtrs_clt_io_req *req;
int i, err;
- if (!sess->reqs)
+ if (!clt_path->reqs)
return;
- for (i = 0; i < sess->queue_depth; ++i) {
- req = &sess->reqs[i];
+ for (i = 0; i < clt_path->queue_depth; ++i) {
+ req = &clt_path->reqs[i];
if (!req->in_use)
continue;
@@ -1337,38 +1342,39 @@ static void fail_all_outstanding_reqs(struct rtrs_clt_sess *sess)
}
}
-static void free_sess_reqs(struct rtrs_clt_sess *sess)
+static void free_path_reqs(struct rtrs_clt_path *clt_path)
{
struct rtrs_clt_io_req *req;
int i;
- if (!sess->reqs)
+ if (!clt_path->reqs)
return;
- for (i = 0; i < sess->queue_depth; ++i) {
- req = &sess->reqs[i];
+ for (i = 0; i < clt_path->queue_depth; ++i) {
+ req = &clt_path->reqs[i];
if (req->mr)
ib_dereg_mr(req->mr);
kfree(req->sge);
- rtrs_iu_free(req->iu, sess->s.dev->ib_dev, 1);
+ rtrs_iu_free(req->iu, clt_path->s.dev->ib_dev, 1);
}
- kfree(sess->reqs);
- sess->reqs = NULL;
+ kfree(clt_path->reqs);
+ clt_path->reqs = NULL;
}
-static int alloc_sess_reqs(struct rtrs_clt_sess *sess)
+static int alloc_path_reqs(struct rtrs_clt_path *clt_path)
{
struct rtrs_clt_io_req *req;
int i, err = -ENOMEM;
- sess->reqs = kcalloc(sess->queue_depth, sizeof(*sess->reqs),
- GFP_KERNEL);
- if (!sess->reqs)
+ clt_path->reqs = kcalloc(clt_path->queue_depth,
+ sizeof(*clt_path->reqs),
+ GFP_KERNEL);
+ if (!clt_path->reqs)
return -ENOMEM;
- for (i = 0; i < sess->queue_depth; ++i) {
- req = &sess->reqs[i];
- req->iu = rtrs_iu_alloc(1, sess->max_hdr_size, GFP_KERNEL,
- sess->s.dev->ib_dev,
+ for (i = 0; i < clt_path->queue_depth; ++i) {
+ req = &clt_path->reqs[i];
+ req->iu = rtrs_iu_alloc(1, clt_path->max_hdr_size, GFP_KERNEL,
+ clt_path->s.dev->ib_dev,
DMA_TO_DEVICE,
rtrs_clt_rdma_done);
if (!req->iu)
@@ -1378,13 +1384,14 @@ static int alloc_sess_reqs(struct rtrs_clt_sess *sess)
if (!req->sge)
goto out;
- req->mr = ib_alloc_mr(sess->s.dev->ib_pd, IB_MR_TYPE_MEM_REG,
- sess->max_pages_per_mr);
+ req->mr = ib_alloc_mr(clt_path->s.dev->ib_pd,
+ IB_MR_TYPE_MEM_REG,
+ clt_path->max_pages_per_mr);
if (IS_ERR(req->mr)) {
err = PTR_ERR(req->mr);
req->mr = NULL;
- pr_err("Failed to alloc sess->max_pages_per_mr %d\n",
- sess->max_pages_per_mr);
+ pr_err("Failed to alloc clt_path->max_pages_per_mr %d\n",
+ clt_path->max_pages_per_mr);
goto out;
}
@@ -1394,12 +1401,12 @@ static int alloc_sess_reqs(struct rtrs_clt_sess *sess)
return 0;
out:
- free_sess_reqs(sess);
+ free_path_reqs(clt_path);
return err;
}
-static int alloc_permits(struct rtrs_clt *clt)
+static int alloc_permits(struct rtrs_clt_sess *clt)
{
unsigned int chunk_bits;
int err, i;
@@ -1433,7 +1440,7 @@ out_err:
return err;
}
-static void free_permits(struct rtrs_clt *clt)
+static void free_permits(struct rtrs_clt_sess *clt)
{
if (clt->permits_map) {
size_t sz = clt->queue_depth;
@@ -1447,13 +1454,13 @@ static void free_permits(struct rtrs_clt *clt)
clt->permits = NULL;
}
-static void query_fast_reg_mode(struct rtrs_clt_sess *sess)
+static void query_fast_reg_mode(struct rtrs_clt_path *clt_path)
{
struct ib_device *ib_dev;
u64 max_pages_per_mr;
int mr_page_shift;
- ib_dev = sess->s.dev->ib_dev;
+ ib_dev = clt_path->s.dev->ib_dev;
/*
* Use the smallest page size supported by the HCA, down to a
@@ -1463,24 +1470,24 @@ static void query_fast_reg_mode(struct rtrs_clt_sess *sess)
mr_page_shift = max(12, ffs(ib_dev->attrs.page_size_cap) - 1);
max_pages_per_mr = ib_dev->attrs.max_mr_size;
do_div(max_pages_per_mr, (1ull << mr_page_shift));
- sess->max_pages_per_mr =
- min3(sess->max_pages_per_mr, (u32)max_pages_per_mr,
+ clt_path->max_pages_per_mr =
+ min3(clt_path->max_pages_per_mr, (u32)max_pages_per_mr,
ib_dev->attrs.max_fast_reg_page_list_len);
- sess->clt->max_segments =
- min(sess->max_pages_per_mr, sess->clt->max_segments);
+ clt_path->clt->max_segments =
+ min(clt_path->max_pages_per_mr, clt_path->clt->max_segments);
}
-static bool rtrs_clt_change_state_get_old(struct rtrs_clt_sess *sess,
+static bool rtrs_clt_change_state_get_old(struct rtrs_clt_path *clt_path,
enum rtrs_clt_state new_state,
enum rtrs_clt_state *old_state)
{
bool changed;
- spin_lock_irq(&sess->state_wq.lock);
+ spin_lock_irq(&clt_path->state_wq.lock);
if (old_state)
- *old_state = sess->state;
- changed = rtrs_clt_change_state(sess, new_state);
- spin_unlock_irq(&sess->state_wq.lock);
+ *old_state = clt_path->state;
+ changed = rtrs_clt_change_state(clt_path, new_state);
+ spin_unlock_irq(&clt_path->state_wq.lock);
return changed;
}
@@ -1492,9 +1499,9 @@ static void rtrs_clt_hb_err_handler(struct rtrs_con *c)
rtrs_rdma_error_recovery(con);
}
-static void rtrs_clt_init_hb(struct rtrs_clt_sess *sess)
+static void rtrs_clt_init_hb(struct rtrs_clt_path *clt_path)
{
- rtrs_init_hb(&sess->s, &io_comp_cqe,
+ rtrs_init_hb(&clt_path->s, &io_comp_cqe,
RTRS_HB_INTERVAL_MS,
RTRS_HB_MISSED_MAX,
rtrs_clt_hb_err_handler,
@@ -1504,17 +1511,17 @@ static void rtrs_clt_init_hb(struct rtrs_clt_sess *sess)
static void rtrs_clt_reconnect_work(struct work_struct *work);
static void rtrs_clt_close_work(struct work_struct *work);
-static struct rtrs_clt_sess *alloc_sess(struct rtrs_clt *clt,
+static struct rtrs_clt_path *alloc_path(struct rtrs_clt_sess *clt,
const struct rtrs_addr *path,
size_t con_num, u32 nr_poll_queues)
{
- struct rtrs_clt_sess *sess;
+ struct rtrs_clt_path *clt_path;
int err = -ENOMEM;
int cpu;
size_t total_con;
- sess = kzalloc(sizeof(*sess), GFP_KERNEL);
- if (!sess)
+ clt_path = kzalloc(sizeof(*clt_path), GFP_KERNEL);
+ if (!clt_path)
goto err;
/*
@@ -1522,20 +1529,21 @@ static struct rtrs_clt_sess *alloc_sess(struct rtrs_clt *clt,
* +1: Extra connection for user messages
*/
total_con = con_num + nr_poll_queues + 1;
- sess->s.con = kcalloc(total_con, sizeof(*sess->s.con), GFP_KERNEL);
- if (!sess->s.con)
- goto err_free_sess;
+ clt_path->s.con = kcalloc(total_con, sizeof(*clt_path->s.con),
+ GFP_KERNEL);
+ if (!clt_path->s.con)
+ goto err_free_path;
- sess->s.con_num = total_con;
- sess->s.irq_con_num = con_num + 1;
+ clt_path->s.con_num = total_con;
+ clt_path->s.irq_con_num = con_num + 1;
- sess->stats = kzalloc(sizeof(*sess->stats), GFP_KERNEL);
- if (!sess->stats)
+ clt_path->stats = kzalloc(sizeof(*clt_path->stats), GFP_KERNEL);
+ if (!clt_path->stats)
goto err_free_con;
- mutex_init(&sess->init_mutex);
- uuid_gen(&sess->s.uuid);
- memcpy(&sess->s.dst_addr, path->dst,
+ mutex_init(&clt_path->init_mutex);
+ uuid_gen(&clt_path->s.uuid);
+ memcpy(&clt_path->s.dst_addr, path->dst,
rdma_addr_size((struct sockaddr *)path->dst));
/*
@@ -1544,53 +1552,54 @@ static struct rtrs_clt_sess *alloc_sess(struct rtrs_clt *clt,
* the sess->src_addr will contain only zeros, which is then fine.
*/
if (path->src)
- memcpy(&sess->s.src_addr, path->src,
+ memcpy(&clt_path->s.src_addr, path->src,
rdma_addr_size((struct sockaddr *)path->src));
- strscpy(sess->s.sessname, clt->sessname, sizeof(sess->s.sessname));
- sess->clt = clt;
- sess->max_pages_per_mr = RTRS_MAX_SEGMENTS;
- init_waitqueue_head(&sess->state_wq);
- sess->state = RTRS_CLT_CONNECTING;
- atomic_set(&sess->connected_cnt, 0);
- INIT_WORK(&sess->close_work, rtrs_clt_close_work);
- INIT_DELAYED_WORK(&sess->reconnect_dwork, rtrs_clt_reconnect_work);
- rtrs_clt_init_hb(sess);
-
- sess->mp_skip_entry = alloc_percpu(typeof(*sess->mp_skip_entry));
- if (!sess->mp_skip_entry)
+ strscpy(clt_path->s.sessname, clt->sessname,
+ sizeof(clt_path->s.sessname));
+ clt_path->clt = clt;
+ clt_path->max_pages_per_mr = RTRS_MAX_SEGMENTS;
+ init_waitqueue_head(&clt_path->state_wq);
+ clt_path->state = RTRS_CLT_CONNECTING;
+ atomic_set(&clt_path->connected_cnt, 0);
+ INIT_WORK(&clt_path->close_work, rtrs_clt_close_work);
+ INIT_DELAYED_WORK(&clt_path->reconnect_dwork, rtrs_clt_reconnect_work);
+ rtrs_clt_init_hb(clt_path);
+
+ clt_path->mp_skip_entry = alloc_percpu(typeof(*clt_path->mp_skip_entry));
+ if (!clt_path->mp_skip_entry)
goto err_free_stats;
for_each_possible_cpu(cpu)
- INIT_LIST_HEAD(per_cpu_ptr(sess->mp_skip_entry, cpu));
+ INIT_LIST_HEAD(per_cpu_ptr(clt_path->mp_skip_entry, cpu));
- err = rtrs_clt_init_stats(sess->stats);
+ err = rtrs_clt_init_stats(clt_path->stats);
if (err)
goto err_free_percpu;
- return sess;
+ return clt_path;
err_free_percpu:
- free_percpu(sess->mp_skip_entry);
+ free_percpu(clt_path->mp_skip_entry);
err_free_stats:
- kfree(sess->stats);
+ kfree(clt_path->stats);
err_free_con:
- kfree(sess->s.con);
-err_free_sess:
- kfree(sess);
+ kfree(clt_path->s.con);
+err_free_path:
+ kfree(clt_path);
err:
return ERR_PTR(err);
}
-void free_sess(struct rtrs_clt_sess *sess)
+void free_path(struct rtrs_clt_path *clt_path)
{
- free_percpu(sess->mp_skip_entry);
- mutex_destroy(&sess->init_mutex);
- kfree(sess->s.con);
- kfree(sess->rbufs);
- kfree(sess);
+ free_percpu(clt_path->mp_skip_entry);
+ mutex_destroy(&clt_path->init_mutex);
+ kfree(clt_path->s.con);
+ kfree(clt_path->rbufs);
+ kfree(clt_path);
}
-static int create_con(struct rtrs_clt_sess *sess, unsigned int cid)
+static int create_con(struct rtrs_clt_path *clt_path, unsigned int cid)
{
struct rtrs_clt_con *con;
@@ -1601,28 +1610,28 @@ static int create_con(struct rtrs_clt_sess *sess, unsigned int cid)
/* Map first two connections to the first CPU */
con->cpu = (cid ? cid - 1 : 0) % nr_cpu_ids;
con->c.cid = cid;
- con->c.sess = &sess->s;
+ con->c.path = &clt_path->s;
/* Align with srv, init as 1 */
atomic_set(&con->c.wr_cnt, 1);
mutex_init(&con->con_mutex);
- sess->s.con[cid] = &con->c;
+ clt_path->s.con[cid] = &con->c;
return 0;
}
static void destroy_con(struct rtrs_clt_con *con)
{
- struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+ struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
- sess->s.con[con->c.cid] = NULL;
+ clt_path->s.con[con->c.cid] = NULL;
mutex_destroy(&con->con_mutex);
kfree(con);
}
static int create_con_cq_qp(struct rtrs_clt_con *con)
{
- struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+ struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
u32 max_send_wr, max_recv_wr, cq_num, max_send_sge, wr_limit;
int err, cq_vector;
struct rtrs_msg_rkey_rsp *rsp;
@@ -1631,7 +1640,7 @@ static int create_con_cq_qp(struct rtrs_clt_con *con)
if (con->c.cid == 0) {
max_send_sge = 1;
/* We must be the first here */
- if (WARN_ON(sess->s.dev))
+ if (WARN_ON(clt_path->s.dev))
return -EINVAL;
/*
@@ -1639,16 +1648,16 @@ static int create_con_cq_qp(struct rtrs_clt_con *con)
* Be careful not to close user connection before ib dev
* is gracefully put.
*/
- sess->s.dev = rtrs_ib_dev_find_or_add(con->c.cm_id->device,
+ clt_path->s.dev = rtrs_ib_dev_find_or_add(con->c.cm_id->device,
&dev_pd);
- if (!sess->s.dev) {
- rtrs_wrn(sess->clt,
+ if (!clt_path->s.dev) {
+ rtrs_wrn(clt_path->clt,
"rtrs_ib_dev_find_get_or_add(): no memory\n");
return -ENOMEM;
}
- sess->s.dev_ref = 1;
- query_fast_reg_mode(sess);
- wr_limit = sess->s.dev->ib_dev->attrs.max_qp_wr;
+ clt_path->s.dev_ref = 1;
+ query_fast_reg_mode(clt_path);
+ wr_limit = clt_path->s.dev->ib_dev->attrs.max_qp_wr;
/*
* Two (request + registration) completion for send
* Two for recv if always_invalidate is set on server
@@ -1665,27 +1674,28 @@ static int create_con_cq_qp(struct rtrs_clt_con *con)
* This is always true if user connection (cid == 0) is
* established first.
*/
- if (WARN_ON(!sess->s.dev))
+ if (WARN_ON(!clt_path->s.dev))
return -EINVAL;
- if (WARN_ON(!sess->queue_depth))
+ if (WARN_ON(!clt_path->queue_depth))
return -EINVAL;
- wr_limit = sess->s.dev->ib_dev->attrs.max_qp_wr;
+ wr_limit = clt_path->s.dev->ib_dev->attrs.max_qp_wr;
/* Shared between connections */
- sess->s.dev_ref++;
+ clt_path->s.dev_ref++;
max_send_wr = min_t(int, wr_limit,
/* QD * (REQ + RSP + FR REGS or INVS) + drain */
- sess->queue_depth * 3 + 1);
+ clt_path->queue_depth * 3 + 1);
max_recv_wr = min_t(int, wr_limit,
- sess->queue_depth * 3 + 1);
+ clt_path->queue_depth * 3 + 1);
max_send_sge = 2;
}
atomic_set(&con->c.sq_wr_avail, max_send_wr);
cq_num = max_send_wr + max_recv_wr;
/* alloc iu to recv new rkey reply when server reports flags set */
- if (sess->flags & RTRS_MSG_NEW_RKEY_F || con->c.cid == 0) {
+ if (clt_path->flags & RTRS_MSG_NEW_RKEY_F || con->c.cid == 0) {
con->rsp_ius = rtrs_iu_alloc(cq_num, sizeof(*rsp),
- GFP_KERNEL, sess->s.dev->ib_dev,
+ GFP_KERNEL,
+ clt_path->s.dev->ib_dev,
DMA_FROM_DEVICE,
rtrs_clt_rdma_done);
if (!con->rsp_ius)
@@ -1693,13 +1703,13 @@ static int create_con_cq_qp(struct rtrs_clt_con *con)
con->queue_num = cq_num;
}
cq_num = max_send_wr + max_recv_wr;
- cq_vector = con->cpu % sess->s.dev->ib_dev->num_comp_vectors;
- if (con->c.cid >= sess->s.irq_con_num)
- err = rtrs_cq_qp_create(&sess->s, &con->c, max_send_sge,
+ cq_vector = con->cpu % clt_path->s.dev->ib_dev->num_comp_vectors;
+ if (con->c.cid >= clt_path->s.irq_con_num)
+ err = rtrs_cq_qp_create(&clt_path->s, &con->c, max_send_sge,
cq_vector, cq_num, max_send_wr,
max_recv_wr, IB_POLL_DIRECT);
else
- err = rtrs_cq_qp_create(&sess->s, &con->c, max_send_sge,
+ err = rtrs_cq_qp_create(&clt_path->s, &con->c, max_send_sge,
cq_vector, cq_num, max_send_wr,
max_recv_wr, IB_POLL_SOFTIRQ);
/*
@@ -1711,7 +1721,7 @@ static int create_con_cq_qp(struct rtrs_clt_con *con)
static void destroy_con_cq_qp(struct rtrs_clt_con *con)
{
- struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+ struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
/*
* Be careful here: destroy_con_cq_qp() can be called even
@@ -1720,13 +1730,14 @@ static void destroy_con_cq_qp(struct rtrs_clt_con *con)
lockdep_assert_held(&con->con_mutex);
rtrs_cq_qp_destroy(&con->c);
if (con->rsp_ius) {
- rtrs_iu_free(con->rsp_ius, sess->s.dev->ib_dev, con->queue_num);
+ rtrs_iu_free(con->rsp_ius, clt_path->s.dev->ib_dev,
+ con->queue_num);
con->rsp_ius = NULL;
con->queue_num = 0;
}
- if (sess->s.dev_ref && !--sess->s.dev_ref) {
- rtrs_ib_dev_put(sess->s.dev);
- sess->s.dev = NULL;
+ if (clt_path->s.dev_ref && !--clt_path->s.dev_ref) {
+ rtrs_ib_dev_put(clt_path->s.dev);
+ clt_path->s.dev = NULL;
}
}
@@ -1745,7 +1756,7 @@ static void destroy_cm(struct rtrs_clt_con *con)
static int rtrs_rdma_addr_resolved(struct rtrs_clt_con *con)
{
- struct rtrs_sess *s = con->c.sess;
+ struct rtrs_path *s = con->c.path;
int err;
mutex_lock(&con->con_mutex);
@@ -1764,8 +1775,8 @@ static int rtrs_rdma_addr_resolved(struct rtrs_clt_con *con)
static int rtrs_rdma_route_resolved(struct rtrs_clt_con *con)
{
- struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
- struct rtrs_clt *clt = sess->clt;
+ struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
+ struct rtrs_clt_sess *clt = clt_path->clt;
struct rtrs_msg_conn_req msg;
struct rdma_conn_param param;
@@ -1782,11 +1793,11 @@ static int rtrs_rdma_route_resolved(struct rtrs_clt_con *con)
.magic = cpu_to_le16(RTRS_MAGIC),
.version = cpu_to_le16(RTRS_PROTO_VER),
.cid = cpu_to_le16(con->c.cid),
- .cid_num = cpu_to_le16(sess->s.con_num),
- .recon_cnt = cpu_to_le16(sess->s.recon_cnt),
+ .cid_num = cpu_to_le16(clt_path->s.con_num),
+ .recon_cnt = cpu_to_le16(clt_path->s.recon_cnt),
};
- msg.first_conn = sess->for_new_clt ? FIRST_CONN : 0;
- uuid_copy(&msg.sess_uuid, &sess->s.uuid);
+ msg.first_conn = clt_path->for_new_clt ? FIRST_CONN : 0;
+ uuid_copy(&msg.sess_uuid, &clt_path->s.uuid);
uuid_copy(&msg.paths_uuid, &clt->paths_uuid);
err = rdma_connect_locked(con->c.cm_id, &param);
@@ -1799,8 +1810,8 @@ static int rtrs_rdma_route_resolved(struct rtrs_clt_con *con)
static int rtrs_rdma_conn_established(struct rtrs_clt_con *con,
struct rdma_cm_event *ev)
{
- struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
- struct rtrs_clt *clt = sess->clt;
+ struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
+ struct rtrs_clt_sess *clt = clt_path->clt;
const struct rtrs_msg_conn_rsp *msg;
u16 version, queue_depth;
int errno;
@@ -1831,31 +1842,32 @@ static int rtrs_rdma_conn_established(struct rtrs_clt_con *con,
if (con->c.cid == 0) {
queue_depth = le16_to_cpu(msg->queue_depth);
- if (sess->queue_depth > 0 && queue_depth != sess->queue_depth) {
+ if (clt_path->queue_depth > 0 && queue_depth != clt_path->queue_depth) {
rtrs_err(clt, "Error: queue depth changed\n");
/*
* Stop any more reconnection attempts
*/
- sess->reconnect_attempts = -1;
+ clt_path->reconnect_attempts = -1;
rtrs_err(clt,
"Disabling auto-reconnect. Trigger a manual reconnect after issue is resolved\n");
return -ECONNRESET;
}
- if (!sess->rbufs) {
- sess->rbufs = kcalloc(queue_depth, sizeof(*sess->rbufs),
- GFP_KERNEL);
- if (!sess->rbufs)
+ if (!clt_path->rbufs) {
+ clt_path->rbufs = kcalloc(queue_depth,
+ sizeof(*clt_path->rbufs),
+ GFP_KERNEL);
+ if (!clt_path->rbufs)
return -ENOMEM;
}
- sess->queue_depth = queue_depth;
- sess->s.signal_interval = min_not_zero(queue_depth,
+ clt_path->queue_depth = queue_depth;
+ clt_path->s.signal_interval = min_not_zero(queue_depth,
(unsigned short) SERVICE_CON_QUEUE_DEPTH);
- sess->max_hdr_size = le32_to_cpu(msg->max_hdr_size);
- sess->max_io_size = le32_to_cpu(msg->max_io_size);
- sess->flags = le32_to_cpu(msg->flags);
- sess->chunk_size = sess->max_io_size + sess->max_hdr_size;
+ clt_path->max_hdr_size = le32_to_cpu(msg->max_hdr_size);
+ clt_path->max_io_size = le32_to_cpu(msg->max_io_size);
+ clt_path->flags = le32_to_cpu(msg->flags);
+ clt_path->chunk_size = clt_path->max_io_size + clt_path->max_hdr_size;
/*
* Global IO size is always a minimum.
@@ -1866,20 +1878,20 @@ static int rtrs_rdma_conn_established(struct rtrs_clt_con *con,
* connections in parallel, use lock.
*/
mutex_lock(&clt->paths_mutex);
- clt->queue_depth = sess->queue_depth;
- clt->max_io_size = min_not_zero(sess->max_io_size,
+ clt->queue_depth = clt_path->queue_depth;
+ clt->max_io_size = min_not_zero(clt_path->max_io_size,
clt->max_io_size);
mutex_unlock(&clt->paths_mutex);
/*
* Cache the hca_port and hca_name for sysfs
*/
- sess->hca_port = con->c.cm_id->port_num;
- scnprintf(sess->hca_name, sizeof(sess->hca_name),
- sess->s.dev->ib_dev->name);
- sess->s.src_addr = con->c.cm_id->route.addr.src_addr;
+ clt_path->hca_port = con->c.cm_id->port_num;
+ scnprintf(clt_path->hca_name, sizeof(clt_path->hca_name),
+ clt_path->s.dev->ib_dev->name);
+ clt_path->s.src_addr = con->c.cm_id->route.addr.src_addr;
/* set for_new_clt, to allow future reconnect on any path */
- sess->for_new_clt = 1;
+ clt_path->for_new_clt = 1;
}
return 0;
@@ -1887,16 +1899,16 @@ static int rtrs_rdma_conn_established(struct rtrs_clt_con *con,
static inline void flag_success_on_conn(struct rtrs_clt_con *con)
{
- struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+ struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
- atomic_inc(&sess->connected_cnt);
+ atomic_inc(&clt_path->connected_cnt);
con->cm_err = 1;
}
static int rtrs_rdma_conn_rejected(struct rtrs_clt_con *con,
struct rdma_cm_event *ev)
{
- struct rtrs_sess *s = con->c.sess;
+ struct rtrs_path *s = con->c.path;
const struct rtrs_msg_conn_rsp *msg;
const char *rej_msg;
int status, errno;
@@ -1924,23 +1936,23 @@ static int rtrs_rdma_conn_rejected(struct rtrs_clt_con *con,
return -ECONNRESET;
}
-void rtrs_clt_close_conns(struct rtrs_clt_sess *sess, bool wait)
+void rtrs_clt_close_conns(struct rtrs_clt_path *clt_path, bool wait)
{
- if (rtrs_clt_change_state_get_old(sess, RTRS_CLT_CLOSING, NULL))
- queue_work(rtrs_wq, &sess->close_work);
+ if (rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_CLOSING, NULL))
+ queue_work(rtrs_wq, &clt_path->close_work);
if (wait)
- flush_work(&sess->close_work);
+ flush_work(&clt_path->close_work);
}
static inline void flag_error_on_conn(struct rtrs_clt_con *con, int cm_err)
{
if (con->cm_err == 1) {
- struct rtrs_clt_sess *sess;
+ struct rtrs_clt_path *clt_path;
- sess = to_clt_sess(con->c.sess);
- if (atomic_dec_and_test(&sess->connected_cnt))
+ clt_path = to_clt_path(con->c.path);
+ if (atomic_dec_and_test(&clt_path->connected_cnt))
- wake_up(&sess->state_wq);
+ wake_up(&clt_path->state_wq);
}
con->cm_err = cm_err;
}
@@ -1949,8 +1961,8 @@ static int rtrs_clt_rdma_cm_handler(struct rdma_cm_id *cm_id,
struct rdma_cm_event *ev)
{
struct rtrs_clt_con *con = cm_id->context;
- struct rtrs_sess *s = con->c.sess;
- struct rtrs_clt_sess *sess = to_clt_sess(s);
+ struct rtrs_path *s = con->c.path;
+ struct rtrs_clt_path *clt_path = to_clt_path(s);
int cm_err = 0;
switch (ev->event) {
@@ -1968,7 +1980,7 @@ static int rtrs_clt_rdma_cm_handler(struct rdma_cm_id *cm_id,
* i.e. wake up without state change, but we set cm_err.
*/
flag_success_on_conn(con);
- wake_up(&sess->state_wq);
+ wake_up(&clt_path->state_wq);
return 0;
}
break;
@@ -1997,7 +2009,7 @@ static int rtrs_clt_rdma_cm_handler(struct rdma_cm_id *cm_id,
/*
* Device removal is a special case. Queue close and return 0.
*/
- rtrs_clt_close_conns(sess, false);
+ rtrs_clt_close_conns(clt_path, false);
return 0;
default:
rtrs_err(s, "Unexpected RDMA CM error (CM event: %s, err: %d)\n",
@@ -2020,13 +2032,13 @@ static int rtrs_clt_rdma_cm_handler(struct rdma_cm_id *cm_id,
static int create_cm(struct rtrs_clt_con *con)
{
- struct rtrs_sess *s = con->c.sess;
- struct rtrs_clt_sess *sess = to_clt_sess(s);
+ struct rtrs_path *s = con->c.path;
+ struct rtrs_clt_path *clt_path = to_clt_path(s);
struct rdma_cm_id *cm_id;
int err;
cm_id = rdma_create_id(&init_net, rtrs_clt_rdma_cm_handler, con,
- sess->s.dst_addr.ss_family == AF_IB ?
+ clt_path->s.dst_addr.ss_family == AF_IB ?
RDMA_PS_IB : RDMA_PS_TCP, IB_QPT_RC);
if (IS_ERR(cm_id)) {
err = PTR_ERR(cm_id);
@@ -2042,8 +2054,8 @@ static int create_cm(struct rtrs_clt_con *con)
rtrs_err(s, "Set address reuse failed, err: %d\n", err);
goto destroy_cm;
}
- err = rdma_resolve_addr(cm_id, (struct sockaddr *)&sess->s.src_addr,
- (struct sockaddr *)&sess->s.dst_addr,
+ err = rdma_resolve_addr(cm_id, (struct sockaddr *)&clt_path->s.src_addr,
+ (struct sockaddr *)&clt_path->s.dst_addr,
RTRS_CONNECT_TIMEOUT_MS);
if (err) {
rtrs_err(s, "Failed to resolve address, err: %d\n", err);
@@ -2055,8 +2067,8 @@ static int create_cm(struct rtrs_clt_con *con)
* or session state was really changed to error by device removal.
*/
err = wait_event_interruptible_timeout(
- sess->state_wq,
- con->cm_err || sess->state != RTRS_CLT_CONNECTING,
+ clt_path->state_wq,
+ con->cm_err || clt_path->state != RTRS_CLT_CONNECTING,
msecs_to_jiffies(RTRS_CONNECT_TIMEOUT_MS));
if (err == 0 || err == -ERESTARTSYS) {
if (err == 0)
@@ -2068,7 +2080,7 @@ static int create_cm(struct rtrs_clt_con *con)
err = con->cm_err;
goto errr;
}
- if (READ_ONCE(sess->state) != RTRS_CLT_CONNECTING) {
+ if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTING) {
/* Device removal */
err = -ECONNABORTED;
goto errr;
@@ -2087,9 +2099,9 @@ destroy_cm:
return err;
}
-static void rtrs_clt_sess_up(struct rtrs_clt_sess *sess)
+static void rtrs_clt_path_up(struct rtrs_clt_path *clt_path)
{
- struct rtrs_clt *clt = sess->clt;
+ struct rtrs_clt_sess *clt = clt_path->clt;
int up;
/*
@@ -2113,19 +2125,19 @@ static void rtrs_clt_sess_up(struct rtrs_clt_sess *sess)
mutex_unlock(&clt->paths_ev_mutex);
/* Mark session as established */
- sess->established = true;
- sess->reconnect_attempts = 0;
- sess->stats->reconnects.successful_cnt++;
+ clt_path->established = true;
+ clt_path->reconnect_attempts = 0;
+ clt_path->stats->reconnects.successful_cnt++;
}
-static void rtrs_clt_sess_down(struct rtrs_clt_sess *sess)
+static void rtrs_clt_path_down(struct rtrs_clt_path *clt_path)
{
- struct rtrs_clt *clt = sess->clt;
+ struct rtrs_clt_sess *clt = clt_path->clt;
- if (!sess->established)
+ if (!clt_path->established)
return;
- sess->established = false;
+ clt_path->established = false;
mutex_lock(&clt->paths_ev_mutex);
WARN_ON(!clt->paths_up);
if (--clt->paths_up == 0)
@@ -2133,19 +2145,19 @@ static void rtrs_clt_sess_down(struct rtrs_clt_sess *sess)
mutex_unlock(&clt->paths_ev_mutex);
}
-static void rtrs_clt_stop_and_destroy_conns(struct rtrs_clt_sess *sess)
+static void rtrs_clt_stop_and_destroy_conns(struct rtrs_clt_path *clt_path)
{
struct rtrs_clt_con *con;
unsigned int cid;
- WARN_ON(READ_ONCE(sess->state) == RTRS_CLT_CONNECTED);
+ WARN_ON(READ_ONCE(clt_path->state) == RTRS_CLT_CONNECTED);
/*
* Possible race with rtrs_clt_open(), when DEVICE_REMOVAL comes
* exactly in between. Start destroying after it finishes.
*/
- mutex_lock(&sess->init_mutex);
- mutex_unlock(&sess->init_mutex);
+ mutex_lock(&clt_path->init_mutex);
+ mutex_unlock(&clt_path->init_mutex);
/*
* All IO paths must observe !CONNECTED state before we
@@ -2153,7 +2165,7 @@ static void rtrs_clt_stop_and_destroy_conns(struct rtrs_clt_sess *sess)
*/
synchronize_rcu();
- rtrs_stop_hb(&sess->s);
+ rtrs_stop_hb(&clt_path->s);
/*
* The order it utterly crucial: firstly disconnect and complete all
@@ -2162,15 +2174,15 @@ static void rtrs_clt_stop_and_destroy_conns(struct rtrs_clt_sess *sess)
* eventually notify upper layer about session disconnection.
*/
- for (cid = 0; cid < sess->s.con_num; cid++) {
- if (!sess->s.con[cid])
+ for (cid = 0; cid < clt_path->s.con_num; cid++) {
+ if (!clt_path->s.con[cid])
break;
- con = to_clt_con(sess->s.con[cid]);
+ con = to_clt_con(clt_path->s.con[cid]);
stop_cm(con);
}
- fail_all_outstanding_reqs(sess);
- free_sess_reqs(sess);
- rtrs_clt_sess_down(sess);
+ fail_all_outstanding_reqs(clt_path);
+ free_path_reqs(clt_path);
+ rtrs_clt_path_down(clt_path);
/*
* Wait for graceful shutdown, namely when peer side invokes
@@ -2180,13 +2192,14 @@ static void rtrs_clt_stop_and_destroy_conns(struct rtrs_clt_sess *sess)
* since CM does not fire anything. That is fine, we are not in
* hurry.
*/
- wait_event_timeout(sess->state_wq, !atomic_read(&sess->connected_cnt),
+ wait_event_timeout(clt_path->state_wq,
+ !atomic_read(&clt_path->connected_cnt),
msecs_to_jiffies(RTRS_CONNECT_TIMEOUT_MS));
- for (cid = 0; cid < sess->s.con_num; cid++) {
- if (!sess->s.con[cid])
+ for (cid = 0; cid < clt_path->s.con_num; cid++) {
+ if (!clt_path->s.con[cid])
break;
- con = to_clt_con(sess->s.con[cid]);
+ con = to_clt_con(clt_path->s.con[cid]);
mutex_lock(&con->con_mutex);
destroy_con_cq_qp(con);
mutex_unlock(&con->con_mutex);
@@ -2195,26 +2208,26 @@ static void rtrs_clt_stop_and_destroy_conns(struct rtrs_clt_sess *sess)
}
}
-static inline bool xchg_sessions(struct rtrs_clt_sess __rcu **rcu_ppcpu_path,
- struct rtrs_clt_sess *sess,
- struct rtrs_clt_sess *next)
+static inline bool xchg_paths(struct rtrs_clt_path __rcu **rcu_ppcpu_path,
+ struct rtrs_clt_path *clt_path,
+ struct rtrs_clt_path *next)
{
- struct rtrs_clt_sess **ppcpu_path;
+ struct rtrs_clt_path **ppcpu_path;
/* Call cmpxchg() without sparse warnings */
ppcpu_path = (typeof(ppcpu_path))rcu_ppcpu_path;
- return sess == cmpxchg(ppcpu_path, sess, next);
+ return clt_path == cmpxchg(ppcpu_path, clt_path, next);
}
-static void rtrs_clt_remove_path_from_arr(struct rtrs_clt_sess *sess)
+static void rtrs_clt_remove_path_from_arr(struct rtrs_clt_path *clt_path)
{
- struct rtrs_clt *clt = sess->clt;
- struct rtrs_clt_sess *next;
+ struct rtrs_clt_sess *clt = clt_path->clt;
+ struct rtrs_clt_path *next;
bool wait_for_grace = false;
int cpu;
mutex_lock(&clt->paths_mutex);
- list_del_rcu(&sess->s.entry);
+ list_del_rcu(&clt_path->s.entry);
/* Make sure everybody observes path removal. */
synchronize_rcu();
@@ -2255,7 +2268,7 @@ static void rtrs_clt_remove_path_from_arr(struct rtrs_clt_sess *sess)
* removed. If @sess is the last element, then @next is NULL.
*/
rcu_read_lock();
- next = list_next_or_null_rr_rcu(&clt->paths_list, &sess->s.entry,
+ next = list_next_or_null_rr_rcu(&clt->paths_list, &clt_path->s.entry,
typeof(*next), s.entry);
rcu_read_unlock();
@@ -2264,11 +2277,11 @@ static void rtrs_clt_remove_path_from_arr(struct rtrs_clt_sess *sess)
* removed, so change the pointer manually.
*/
for_each_possible_cpu(cpu) {
- struct rtrs_clt_sess __rcu **ppcpu_path;
+ struct rtrs_clt_path __rcu **ppcpu_path;
ppcpu_path = per_cpu_ptr(clt->pcpu_path, cpu);
if (rcu_dereference_protected(*ppcpu_path,
- lockdep_is_held(&clt->paths_mutex)) != sess)
+ lockdep_is_held(&clt->paths_mutex)) != clt_path)
/*
* synchronize_rcu() was called just after deleting
* entry from the list, thus IO code path cannot
@@ -2281,7 +2294,7 @@ static void rtrs_clt_remove_path_from_arr(struct rtrs_clt_sess *sess)
* We race with IO code path, which also changes pointer,
* thus we have to be careful not to overwrite it.
*/
- if (xchg_sessions(ppcpu_path, sess, next))
+ if (xchg_paths(ppcpu_path, clt_path, next))
/*
* @ppcpu_path was successfully replaced with @next,
* that means that someone could also pick up the
@@ -2296,29 +2309,29 @@ static void rtrs_clt_remove_path_from_arr(struct rtrs_clt_sess *sess)
mutex_unlock(&clt->paths_mutex);
}
-static void rtrs_clt_add_path_to_arr(struct rtrs_clt_sess *sess)
+static void rtrs_clt_add_path_to_arr(struct rtrs_clt_path *clt_path)
{
- struct rtrs_clt *clt = sess->clt;
+ struct rtrs_clt_sess *clt = clt_path->clt;
mutex_lock(&clt->paths_mutex);
clt->paths_num++;
- list_add_tail_rcu(&sess->s.entry, &clt->paths_list);
+ list_add_tail_rcu(&clt_path->s.entry, &clt->paths_list);
mutex_unlock(&clt->paths_mutex);
}
static void rtrs_clt_close_work(struct work_struct *work)
{
- struct rtrs_clt_sess *sess;
+ struct rtrs_clt_path *clt_path;
- sess = container_of(work, struct rtrs_clt_sess, close_work);
+ clt_path = container_of(work, struct rtrs_clt_path, close_work);
- cancel_delayed_work_sync(&sess->reconnect_dwork);
- rtrs_clt_stop_and_destroy_conns(sess);
- rtrs_clt_change_state_get_old(sess, RTRS_CLT_CLOSED, NULL);
+ cancel_delayed_work_sync(&clt_path->reconnect_dwork);
+ rtrs_clt_stop_and_destroy_conns(clt_path);
+ rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_CLOSED, NULL);
}
-static int init_conns(struct rtrs_clt_sess *sess)
+static int init_conns(struct rtrs_clt_path *clt_path)
{
unsigned int cid;
int err;
@@ -2328,31 +2341,31 @@ static int init_conns(struct rtrs_clt_sess *sess)
* to avoid clashes with previous sessions not yet closed
* sessions on a server side.
*/
- sess->s.recon_cnt++;
+ clt_path->s.recon_cnt++;
/* Establish all RDMA connections */
- for (cid = 0; cid < sess->s.con_num; cid++) {
- err = create_con(sess, cid);
+ for (cid = 0; cid < clt_path->s.con_num; cid++) {
+ err = create_con(clt_path, cid);
if (err)
goto destroy;
- err = create_cm(to_clt_con(sess->s.con[cid]));
+ err = create_cm(to_clt_con(clt_path->s.con[cid]));
if (err) {
- destroy_con(to_clt_con(sess->s.con[cid]));
+ destroy_con(to_clt_con(clt_path->s.con[cid]));
goto destroy;
}
}
- err = alloc_sess_reqs(sess);
+ err = alloc_path_reqs(clt_path);
if (err)
goto destroy;
- rtrs_start_hb(&sess->s);
+ rtrs_start_hb(&clt_path->s);
return 0;
destroy:
while (cid--) {
- struct rtrs_clt_con *con = to_clt_con(sess->s.con[cid]);
+ struct rtrs_clt_con *con = to_clt_con(clt_path->s.con[cid]);
stop_cm(con);
@@ -2367,7 +2380,7 @@ destroy:
* doing rdma_resolve_addr(), switch to CONNECTION_ERR state
* manually to keep reconnecting.
*/
- rtrs_clt_change_state_get_old(sess, RTRS_CLT_CONNECTING_ERR, NULL);
+ rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_CONNECTING_ERR, NULL);
return err;
}
@@ -2375,31 +2388,32 @@ destroy:
static void rtrs_clt_info_req_done(struct ib_cq *cq, struct ib_wc *wc)
{
struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context);
- struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+ struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
struct rtrs_iu *iu;
iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe);
- rtrs_iu_free(iu, sess->s.dev->ib_dev, 1);
+ rtrs_iu_free(iu, clt_path->s.dev->ib_dev, 1);
if (wc->status != IB_WC_SUCCESS) {
- rtrs_err(sess->clt, "Sess info request send failed: %s\n",
+ rtrs_err(clt_path->clt, "Path info request send failed: %s\n",
ib_wc_status_msg(wc->status));
- rtrs_clt_change_state_get_old(sess, RTRS_CLT_CONNECTING_ERR, NULL);
+ rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_CONNECTING_ERR, NULL);
return;
}
rtrs_clt_update_wc_stats(con);
}
-static int process_info_rsp(struct rtrs_clt_sess *sess,
+static int process_info_rsp(struct rtrs_clt_path *clt_path,
const struct rtrs_msg_info_rsp *msg)
{
unsigned int sg_cnt, total_len;
int i, sgi;
sg_cnt = le16_to_cpu(msg->sg_cnt);
- if (!sg_cnt || (sess->queue_depth % sg_cnt)) {
- rtrs_err(sess->clt, "Incorrect sg_cnt %d, is not multiple\n",
+ if (!sg_cnt || (clt_path->queue_depth % sg_cnt)) {
+ rtrs_err(clt_path->clt,
+ "Incorrect sg_cnt %d, is not multiple\n",
sg_cnt);
return -EINVAL;
}
@@ -2408,15 +2422,15 @@ static int process_info_rsp(struct rtrs_clt_sess *sess,
* Check if IB immediate data size is enough to hold the mem_id and
* the offset inside the memory chunk.
*/
- if ((ilog2(sg_cnt - 1) + 1) + (ilog2(sess->chunk_size - 1) + 1) >
+ if ((ilog2(sg_cnt - 1) + 1) + (ilog2(clt_path->chunk_size - 1) + 1) >
MAX_IMM_PAYL_BITS) {
- rtrs_err(sess->clt,
+ rtrs_err(clt_path->clt,
"RDMA immediate size (%db) not enough to encode %d buffers of size %dB\n",
- MAX_IMM_PAYL_BITS, sg_cnt, sess->chunk_size);
+ MAX_IMM_PAYL_BITS, sg_cnt, clt_path->chunk_size);
return -EINVAL;
}
total_len = 0;
- for (sgi = 0, i = 0; sgi < sg_cnt && i < sess->queue_depth; sgi++) {
+ for (sgi = 0, i = 0; sgi < sg_cnt && i < clt_path->queue_depth; sgi++) {
const struct rtrs_sg_desc *desc = &msg->desc[sgi];
u32 len, rkey;
u64 addr;
@@ -2427,26 +2441,28 @@ static int process_info_rsp(struct rtrs_clt_sess *sess,
total_len += len;
- if (!len || (len % sess->chunk_size)) {
- rtrs_err(sess->clt, "Incorrect [%d].len %d\n", sgi,
+ if (!len || (len % clt_path->chunk_size)) {
+ rtrs_err(clt_path->clt, "Incorrect [%d].len %d\n",
+ sgi,
len);
return -EINVAL;
}
- for ( ; len && i < sess->queue_depth; i++) {
- sess->rbufs[i].addr = addr;
- sess->rbufs[i].rkey = rkey;
+ for ( ; len && i < clt_path->queue_depth; i++) {
+ clt_path->rbufs[i].addr = addr;
+ clt_path->rbufs[i].rkey = rkey;
- len -= sess->chunk_size;
- addr += sess->chunk_size;
+ len -= clt_path->chunk_size;
+ addr += clt_path->chunk_size;
}
}
/* Sanity check */
- if (sgi != sg_cnt || i != sess->queue_depth) {
- rtrs_err(sess->clt, "Incorrect sg vector, not fully mapped\n");
+ if (sgi != sg_cnt || i != clt_path->queue_depth) {
+ rtrs_err(clt_path->clt,
+ "Incorrect sg vector, not fully mapped\n");
return -EINVAL;
}
- if (total_len != sess->chunk_size * sess->queue_depth) {
- rtrs_err(sess->clt, "Incorrect total_len %d\n", total_len);
+ if (total_len != clt_path->chunk_size * clt_path->queue_depth) {
+ rtrs_err(clt_path->clt, "Incorrect total_len %d\n", total_len);
return -EINVAL;
}
@@ -2456,7 +2472,7 @@ static int process_info_rsp(struct rtrs_clt_sess *sess,
static void rtrs_clt_info_rsp_done(struct ib_cq *cq, struct ib_wc *wc)
{
struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context);
- struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
+ struct rtrs_clt_path *clt_path = to_clt_path(con->c.path);
struct rtrs_msg_info_rsp *msg;
enum rtrs_clt_state state;
struct rtrs_iu *iu;
@@ -2468,37 +2484,37 @@ static void rtrs_clt_info_rsp_done(struct ib_cq *cq, struct ib_wc *wc)
WARN_ON(con->c.cid);
iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe);
if (wc->status != IB_WC_SUCCESS) {
- rtrs_err(sess->clt, "Sess info response recv failed: %s\n",
+ rtrs_err(clt_path->clt, "Path info response recv failed: %s\n",
ib_wc_status_msg(wc->status));
goto out;
}
WARN_ON(wc->opcode != IB_WC_RECV);
if (wc->byte_len < sizeof(*msg)) {
- rtrs_err(sess->clt, "Sess info response is malformed: size %d\n",
+ rtrs_err(clt_path->clt, "Path info response is malformed: size %d\n",
wc->byte_len);
goto out;
}
- ib_dma_sync_single_for_cpu(sess->s.dev->ib_dev, iu->dma_addr,
+ ib_dma_sync_single_for_cpu(clt_path->s.dev->ib_dev, iu->dma_addr,
iu->size, DMA_FROM_DEVICE);
msg = iu->buf;
if (le16_to_cpu(msg->type) != RTRS_MSG_INFO_RSP) {
- rtrs_err(sess->clt, "Sess info response is malformed: type %d\n",
+ rtrs_err(clt_path->clt, "Path info response is malformed: type %d\n",
le16_to_cpu(msg->type));
goto out;
}
rx_sz = sizeof(*msg);
rx_sz += sizeof(msg->desc[0]) * le16_to_cpu(msg->sg_cnt);
if (wc->byte_len < rx_sz) {
- rtrs_err(sess->clt, "Sess info response is malformed: size %d\n",
+ rtrs_err(clt_path->clt, "Path info response is malformed: size %d\n",
wc->byte_len);
goto out;
}
- err = process_info_rsp(sess, msg);
+ err = process_info_rsp(clt_path, msg);
if (err)
goto out;
- err = post_recv_sess(sess);
+ err = post_recv_path(clt_path);
if (err)
goto out;
@@ -2506,25 +2522,25 @@ static void rtrs_clt_info_rsp_done(struct ib_cq *cq, struct ib_wc *wc)
out:
rtrs_clt_update_wc_stats(con);
- rtrs_iu_free(iu, sess->s.dev->ib_dev, 1);
- rtrs_clt_change_state_get_old(sess, state, NULL);
+ rtrs_iu_free(iu, clt_path->s.dev->ib_dev, 1);
+ rtrs_clt_change_state_get_old(clt_path, state, NULL);
}
-static int rtrs_send_sess_info(struct rtrs_clt_sess *sess)
+static int rtrs_send_path_info(struct rtrs_clt_path *clt_path)
{
- struct rtrs_clt_con *usr_con = to_clt_con(sess->s.con[0]);
+ struct rtrs_clt_con *usr_con = to_clt_con(clt_path->s.con[0]);
struct rtrs_msg_info_req *msg;
struct rtrs_iu *tx_iu, *rx_iu;
size_t rx_sz;
int err;
rx_sz = sizeof(struct rtrs_msg_info_rsp);
- rx_sz += sizeof(struct rtrs_sg_desc) * sess->queue_depth;
+ rx_sz += sizeof(struct rtrs_sg_desc) * clt_path->queue_depth;
tx_iu = rtrs_iu_alloc(1, sizeof(struct rtrs_msg_info_req), GFP_KERNEL,
- sess->s.dev->ib_dev, DMA_TO_DEVICE,
+ clt_path->s.dev->ib_dev, DMA_TO_DEVICE,
rtrs_clt_info_req_done);
- rx_iu = rtrs_iu_alloc(1, rx_sz, GFP_KERNEL, sess->s.dev->ib_dev,
+ rx_iu = rtrs_iu_alloc(1, rx_sz, GFP_KERNEL, clt_path->s.dev->ib_dev,
DMA_FROM_DEVICE, rtrs_clt_info_rsp_done);
if (!tx_iu || !rx_iu) {
err = -ENOMEM;
@@ -2533,33 +2549,34 @@ static int rtrs_send_sess_info(struct rtrs_clt_sess *sess)
/* Prepare for getting info response */
err = rtrs_iu_post_recv(&usr_con->c, rx_iu);
if (err) {
- rtrs_err(sess->clt, "rtrs_iu_post_recv(), err: %d\n", err);
+ rtrs_err(clt_path->clt, "rtrs_iu_post_recv(), err: %d\n", err);
goto out;
}
rx_iu = NULL;
msg = tx_iu->buf;
msg->type = cpu_to_le16(RTRS_MSG_INFO_REQ);
- memcpy(msg->sessname, sess->s.sessname, sizeof(msg->sessname));
+ memcpy(msg->pathname, clt_path->s.sessname, sizeof(msg->pathname));
- ib_dma_sync_single_for_device(sess->s.dev->ib_dev, tx_iu->dma_addr,
+ ib_dma_sync_single_for_device(clt_path->s.dev->ib_dev,
+ tx_iu->dma_addr,
tx_iu->size, DMA_TO_DEVICE);
/* Send info request */
err = rtrs_iu_post_send(&usr_con->c, tx_iu, sizeof(*msg), NULL);
if (err) {
- rtrs_err(sess->clt, "rtrs_iu_post_send(), err: %d\n", err);
+ rtrs_err(clt_path->clt, "rtrs_iu_post_send(), err: %d\n", err);
goto out;
}
tx_iu = NULL;
/* Wait for state change */
- wait_event_interruptible_timeout(sess->state_wq,
- sess->state != RTRS_CLT_CONNECTING,
+ wait_event_interruptible_timeout(clt_path->state_wq,
+ clt_path->state != RTRS_CLT_CONNECTING,
msecs_to_jiffies(
RTRS_CONNECT_TIMEOUT_MS));
- if (READ_ONCE(sess->state) != RTRS_CLT_CONNECTED) {
- if (READ_ONCE(sess->state) == RTRS_CLT_CONNECTING_ERR)
+ if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTED) {
+ if (READ_ONCE(clt_path->state) == RTRS_CLT_CONNECTING_ERR)
err = -ECONNRESET;
else
err = -ETIMEDOUT;
@@ -2567,82 +2584,82 @@ static int rtrs_send_sess_info(struct rtrs_clt_sess *sess)
out:
if (tx_iu)
- rtrs_iu_free(tx_iu, sess->s.dev->ib_dev, 1);
+ rtrs_iu_free(tx_iu, clt_path->s.dev->ib_dev, 1);
if (rx_iu)
- rtrs_iu_free(rx_iu, sess->s.dev->ib_dev, 1);
+ rtrs_iu_free(rx_iu, clt_path->s.dev->ib_dev, 1);
if (err)
/* If we've never taken async path because of malloc problems */
- rtrs_clt_change_state_get_old(sess, RTRS_CLT_CONNECTING_ERR, NULL);
+ rtrs_clt_change_state_get_old(clt_path,
+ RTRS_CLT_CONNECTING_ERR, NULL);
return err;
}
/**
- * init_sess() - establishes all session connections and does handshake
- * @sess: client session.
+ * init_path() - establishes all path connections and does handshake
+ * @clt_path: client path.
* In case of error full close or reconnect procedure should be taken,
* because reconnect or close async works can be started.
*/
-static int init_sess(struct rtrs_clt_sess *sess)
+static int init_path(struct rtrs_clt_path *clt_path)
{
int err;
char str[NAME_MAX];
struct rtrs_addr path = {
- .src = &sess->s.src_addr,
- .dst = &sess->s.dst_addr,
+ .src = &clt_path->s.src_addr,
+ .dst = &clt_path->s.dst_addr,
};
rtrs_addr_to_str(&path, str, sizeof(str));
- mutex_lock(&sess->init_mutex);
- err = init_conns(sess);
+ mutex_lock(&clt_path->init_mutex);
+ err = init_conns(clt_path);
if (err) {
- rtrs_err(sess->clt,
+ rtrs_err(clt_path->clt,
"init_conns() failed: err=%d path=%s [%s:%u]\n", err,
- str, sess->hca_name, sess->hca_port);
+ str, clt_path->hca_name, clt_path->hca_port);
goto out;
}
- err = rtrs_send_sess_info(sess);
+ err = rtrs_send_path_info(clt_path);
if (err) {
- rtrs_err(
- sess->clt,
- "rtrs_send_sess_info() failed: err=%d path=%s [%s:%u]\n",
- err, str, sess->hca_name, sess->hca_port);
+ rtrs_err(clt_path->clt,
+ "rtrs_send_path_info() failed: err=%d path=%s [%s:%u]\n",
+ err, str, clt_path->hca_name, clt_path->hca_port);
goto out;
}
- rtrs_clt_sess_up(sess);
+ rtrs_clt_path_up(clt_path);
out:
- mutex_unlock(&sess->init_mutex);
+ mutex_unlock(&clt_path->init_mutex);
return err;
}
static void rtrs_clt_reconnect_work(struct work_struct *work)
{
- struct rtrs_clt_sess *sess;
- struct rtrs_clt *clt;
+ struct rtrs_clt_path *clt_path;
+ struct rtrs_clt_sess *clt;
unsigned int delay_ms;
int err;
- sess = container_of(to_delayed_work(work), struct rtrs_clt_sess,
- reconnect_dwork);
- clt = sess->clt;
+ clt_path = container_of(to_delayed_work(work), struct rtrs_clt_path,
+ reconnect_dwork);
+ clt = clt_path->clt;
- if (READ_ONCE(sess->state) != RTRS_CLT_RECONNECTING)
+ if (READ_ONCE(clt_path->state) != RTRS_CLT_RECONNECTING)
return;
- if (sess->reconnect_attempts >= clt->max_reconnect_attempts) {
- /* Close a session completely if max attempts is reached */
- rtrs_clt_close_conns(sess, false);
+ if (clt_path->reconnect_attempts >= clt->max_reconnect_attempts) {
+ /* Close a path completely if max attempts is reached */
+ rtrs_clt_close_conns(clt_path, false);
return;
}
- sess->reconnect_attempts++;
+ clt_path->reconnect_attempts++;
/* Stop everything */
- rtrs_clt_stop_and_destroy_conns(sess);
+ rtrs_clt_stop_and_destroy_conns(clt_path);
msleep(RTRS_RECONNECT_BACKOFF);
- if (rtrs_clt_change_state_get_old(sess, RTRS_CLT_CONNECTING, NULL)) {
- err = init_sess(sess);
+ if (rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_CONNECTING, NULL)) {
+ err = init_path(clt_path);
if (err)
goto reconnect_again;
}
@@ -2650,10 +2667,10 @@ static void rtrs_clt_reconnect_work(struct work_struct *work)
return;
reconnect_again:
- if (rtrs_clt_change_state_get_old(sess, RTRS_CLT_RECONNECTING, NULL)) {
- sess->stats->reconnects.fail_cnt++;
+ if (rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_RECONNECTING, NULL)) {
+ clt_path->stats->reconnects.fail_cnt++;
delay_ms = clt->reconnect_delay_sec * 1000;
- queue_delayed_work(rtrs_wq, &sess->reconnect_dwork,
+ queue_delayed_work(rtrs_wq, &clt_path->reconnect_dwork,
msecs_to_jiffies(delay_ms +
prandom_u32() %
RTRS_RECONNECT_SEED));
@@ -2662,19 +2679,20 @@ reconnect_again:
static void rtrs_clt_dev_release(struct device *dev)
{
- struct rtrs_clt *clt = container_of(dev, struct rtrs_clt, dev);
+ struct rtrs_clt_sess *clt = container_of(dev, struct rtrs_clt_sess,
+ dev);
kfree(clt);
}
-static struct rtrs_clt *alloc_clt(const char *sessname, size_t paths_num,
+static struct rtrs_clt_sess *alloc_clt(const char *sessname, size_t paths_num,
u16 port, size_t pdu_sz, void *priv,
void (*link_ev)(void *priv,
enum rtrs_clt_link_ev ev),
unsigned int reconnect_delay_sec,
unsigned int max_reconnect_attempts)
{
- struct rtrs_clt *clt;
+ struct rtrs_clt_sess *clt;
int err;
if (!paths_num || paths_num > MAX_PATHS_NUM)
@@ -2749,7 +2767,7 @@ err:
return ERR_PTR(err);
}
-static void free_clt(struct rtrs_clt *clt)
+static void free_clt(struct rtrs_clt_sess *clt)
{
free_permits(clt);
free_percpu(clt->pcpu_path);
@@ -2760,7 +2778,7 @@ static void free_clt(struct rtrs_clt *clt)
}
/**
- * rtrs_clt_open() - Open a session to an RTRS server
+ * rtrs_clt_open() - Open a path to an RTRS server
* @ops: holds the link event callback and the private pointer.
* @sessname: name of the session
* @paths: Paths to be established defined by their src and dst addresses
@@ -2777,24 +2795,24 @@ static void free_clt(struct rtrs_clt *clt)
*
* Return a valid pointer on success otherwise PTR_ERR.
*/
-struct rtrs_clt *rtrs_clt_open(struct rtrs_clt_ops *ops,
- const char *sessname,
+struct rtrs_clt_sess *rtrs_clt_open(struct rtrs_clt_ops *ops,
+ const char *pathname,
const struct rtrs_addr *paths,
size_t paths_num, u16 port,
size_t pdu_sz, u8 reconnect_delay_sec,
s16 max_reconnect_attempts, u32 nr_poll_queues)
{
- struct rtrs_clt_sess *sess, *tmp;
- struct rtrs_clt *clt;
+ struct rtrs_clt_path *clt_path, *tmp;
+ struct rtrs_clt_sess *clt;
int err, i;
- if (strchr(sessname, '/') || strchr(sessname, '.')) {
- pr_err("sessname cannot contain / and .\n");
+ if (strchr(pathname, '/') || strchr(pathname, '.')) {
+ pr_err("pathname cannot contain / and .\n");
err = -EINVAL;
goto out;
}
- clt = alloc_clt(sessname, paths_num, port, pdu_sz, ops->priv,
+ clt = alloc_clt(pathname, paths_num, port, pdu_sz, ops->priv,
ops->link_ev,
reconnect_delay_sec,
max_reconnect_attempts);
@@ -2803,49 +2821,49 @@ struct rtrs_clt *rtrs_clt_open(struct rtrs_clt_ops *ops,
goto out;
}
for (i = 0; i < paths_num; i++) {
- struct rtrs_clt_sess *sess;
+ struct rtrs_clt_path *clt_path;
- sess = alloc_sess(clt, &paths[i], nr_cpu_ids,
+ clt_path = alloc_path(clt, &paths[i], nr_cpu_ids,
nr_poll_queues);
- if (IS_ERR(sess)) {
- err = PTR_ERR(sess);
- goto close_all_sess;
+ if (IS_ERR(clt_path)) {
+ err = PTR_ERR(clt_path);
+ goto close_all_path;
}
if (!i)
- sess->for_new_clt = 1;
- list_add_tail_rcu(&sess->s.entry, &clt->paths_list);
+ clt_path->for_new_clt = 1;
+ list_add_tail_rcu(&clt_path->s.entry, &clt->paths_list);
- err = init_sess(sess);
+ err = init_path(clt_path);
if (err) {
- list_del_rcu(&sess->s.entry);
- rtrs_clt_close_conns(sess, true);
- free_percpu(sess->stats->pcpu_stats);
- kfree(sess->stats);
- free_sess(sess);
- goto close_all_sess;
+ list_del_rcu(&clt_path->s.entry);
+ rtrs_clt_close_conns(clt_path, true);
+ free_percpu(clt_path->stats->pcpu_stats);
+ kfree(clt_path->stats);
+ free_path(clt_path);
+ goto close_all_path;
}
- err = rtrs_clt_create_sess_files(sess);
+ err = rtrs_clt_create_path_files(clt_path);
if (err) {
- list_del_rcu(&sess->s.entry);
- rtrs_clt_close_conns(sess, true);
- free_percpu(sess->stats->pcpu_stats);
- kfree(sess->stats);
- free_sess(sess);
- goto close_all_sess;
+ list_del_rcu(&clt_path->s.entry);
+ rtrs_clt_close_conns(clt_path, true);
+ free_percpu(clt_path->stats->pcpu_stats);
+ kfree(clt_path->stats);
+ free_path(clt_path);
+ goto close_all_path;
}
}
err = alloc_permits(clt);
if (err)
- goto close_all_sess;
+ goto close_all_path;
return clt;
-close_all_sess:
- list_for_each_entry_safe(sess, tmp, &clt->paths_list, s.entry) {
- rtrs_clt_destroy_sess_files(sess, NULL);
- rtrs_clt_close_conns(sess, true);
- kobject_put(&sess->kobj);
+close_all_path:
+ list_for_each_entry_safe(clt_path, tmp, &clt->paths_list, s.entry) {
+ rtrs_clt_destroy_path_files(clt_path, NULL);
+ rtrs_clt_close_conns(clt_path, true);
+ kobject_put(&clt_path->kobj);
}
rtrs_clt_destroy_sysfs_root(clt);
free_clt(clt);
@@ -2856,37 +2874,38 @@ out:
EXPORT_SYMBOL(rtrs_clt_open);
/**
- * rtrs_clt_close() - Close a session
+ * rtrs_clt_close() - Close a path
* @clt: Session handle. Session is freed upon return.
*/
-void rtrs_clt_close(struct rtrs_clt *clt)
+void rtrs_clt_close(struct rtrs_clt_sess *clt)
{
- struct rtrs_clt_sess *sess, *tmp;
+ struct rtrs_clt_path *clt_path, *tmp;
/* Firstly forbid sysfs access */
rtrs_clt_destroy_sysfs_root(clt);
/* Now it is safe to iterate over all paths without locks */
- list_for_each_entry_safe(sess, tmp, &clt->paths_list, s.entry) {
- rtrs_clt_close_conns(sess, true);
- rtrs_clt_destroy_sess_files(sess, NULL);
- kobject_put(&sess->kobj);
+ list_for_each_entry_safe(clt_path, tmp, &clt->paths_list, s.entry) {
+ rtrs_clt_close_conns(clt_path, true);
+ rtrs_clt_destroy_path_files(clt_path, NULL);
+ kobject_put(&clt_path->kobj);
}
free_clt(clt);
}
EXPORT_SYMBOL(rtrs_clt_close);
-int rtrs_clt_reconnect_from_sysfs(struct rtrs_clt_sess *sess)
+int rtrs_clt_reconnect_from_sysfs(struct rtrs_clt_path *clt_path)
{
enum rtrs_clt_state old_state;
int err = -EBUSY;
bool changed;
- changed = rtrs_clt_change_state_get_old(sess, RTRS_CLT_RECONNECTING,
+ changed = rtrs_clt_change_state_get_old(clt_path,
+ RTRS_CLT_RECONNECTING,
&old_state);
if (changed) {
- sess->reconnect_attempts = 0;
- queue_delayed_work(rtrs_wq, &sess->reconnect_dwork, 0);
+ clt_path->reconnect_attempts = 0;
+ queue_delayed_work(rtrs_wq, &clt_path->reconnect_dwork, 0);
}
if (changed || old_state == RTRS_CLT_RECONNECTING) {
/*
@@ -2894,15 +2913,15 @@ int rtrs_clt_reconnect_from_sysfs(struct rtrs_clt_sess *sess)
* execution, so do the flush if we have queued something
* right now or work is pending.
*/
- flush_delayed_work(&sess->reconnect_dwork);
- err = (READ_ONCE(sess->state) ==
+ flush_delayed_work(&clt_path->reconnect_dwork);
+ err = (READ_ONCE(clt_path->state) ==
RTRS_CLT_CONNECTED ? 0 : -ENOTCONN);
}
return err;
}
-int rtrs_clt_remove_path_from_sysfs(struct rtrs_clt_sess *sess,
+int rtrs_clt_remove_path_from_sysfs(struct rtrs_clt_path *clt_path,
const struct attribute *sysfs_self)
{
enum rtrs_clt_state old_state;
@@ -2918,27 +2937,27 @@ int rtrs_clt_remove_path_from_sysfs(struct rtrs_clt_sess *sess,
* removing the path.
*/
do {
- rtrs_clt_close_conns(sess, true);
- changed = rtrs_clt_change_state_get_old(sess,
+ rtrs_clt_close_conns(clt_path, true);
+ changed = rtrs_clt_change_state_get_old(clt_path,
RTRS_CLT_DEAD,
&old_state);
} while (!changed && old_state != RTRS_CLT_DEAD);
if (changed) {
- rtrs_clt_remove_path_from_arr(sess);
- rtrs_clt_destroy_sess_files(sess, sysfs_self);
- kobject_put(&sess->kobj);
+ rtrs_clt_remove_path_from_arr(clt_path);
+ rtrs_clt_destroy_path_files(clt_path, sysfs_self);
+ kobject_put(&clt_path->kobj);
}
return 0;
}
-void rtrs_clt_set_max_reconnect_attempts(struct rtrs_clt *clt, int value)
+void rtrs_clt_set_max_reconnect_attempts(struct rtrs_clt_sess *clt, int value)
{
clt->max_reconnect_attempts = (unsigned int)value;
}
-int rtrs_clt_get_max_reconnect_attempts(const struct rtrs_clt *clt)
+int rtrs_clt_get_max_reconnect_attempts(const struct rtrs_clt_sess *clt)
{
return (int)clt->max_reconnect_attempts;
}
@@ -2968,12 +2987,12 @@ int rtrs_clt_get_max_reconnect_attempts(const struct rtrs_clt *clt)
* On dir=WRITE rtrs client will rdma write data in sg to server side.
*/
int rtrs_clt_request(int dir, struct rtrs_clt_req_ops *ops,
- struct rtrs_clt *clt, struct rtrs_permit *permit,
- const struct kvec *vec, size_t nr, size_t data_len,
- struct scatterlist *sg, unsigned int sg_cnt)
+ struct rtrs_clt_sess *clt, struct rtrs_permit *permit,
+ const struct kvec *vec, size_t nr, size_t data_len,
+ struct scatterlist *sg, unsigned int sg_cnt)
{
struct rtrs_clt_io_req *req;
- struct rtrs_clt_sess *sess;
+ struct rtrs_clt_path *clt_path;
enum dma_data_direction dma_dir;
int err = -ECONNABORTED, i;
@@ -2995,19 +3014,19 @@ int rtrs_clt_request(int dir, struct rtrs_clt_req_ops *ops,
rcu_read_lock();
for (path_it_init(&it, clt);
- (sess = it.next_path(&it)) && it.i < it.clt->paths_num; it.i++) {
- if (READ_ONCE(sess->state) != RTRS_CLT_CONNECTED)
+ (clt_path = it.next_path(&it)) && it.i < it.clt->paths_num; it.i++) {
+ if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTED)
continue;
- if (usr_len + hdr_len > sess->max_hdr_size) {
- rtrs_wrn_rl(sess->clt,
+ if (usr_len + hdr_len > clt_path->max_hdr_size) {
+ rtrs_wrn_rl(clt_path->clt,
"%s request failed, user message size is %zu and header length %zu, but max size is %u\n",
dir == READ ? "Read" : "Write",
- usr_len, hdr_len, sess->max_hdr_size);
+ usr_len, hdr_len, clt_path->max_hdr_size);
err = -EMSGSIZE;
break;
}
- req = rtrs_clt_get_req(sess, ops->conf_fn, permit, ops->priv,
+ req = rtrs_clt_get_req(clt_path, ops->conf_fn, permit, ops->priv,
vec, usr_len, sg, sg_cnt, data_len,
dma_dir);
if (dir == READ)
@@ -3028,21 +3047,21 @@ int rtrs_clt_request(int dir, struct rtrs_clt_req_ops *ops,
}
EXPORT_SYMBOL(rtrs_clt_request);
-int rtrs_clt_rdma_cq_direct(struct rtrs_clt *clt, unsigned int index)
+int rtrs_clt_rdma_cq_direct(struct rtrs_clt_sess *clt, unsigned int index)
{
/* If no path, return -1 for block layer not to try again */
int cnt = -1;
struct rtrs_con *con;
- struct rtrs_clt_sess *sess;
+ struct rtrs_clt_path *clt_path;
struct path_it it;
rcu_read_lock();
for (path_it_init(&it, clt);
- (sess = it.next_path(&it)) && it.i < it.clt->paths_num; it.i++) {
- if (READ_ONCE(sess->state) != RTRS_CLT_CONNECTED)
+ (clt_path = it.next_path(&it)) && it.i < it.clt->paths_num; it.i++) {
+ if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTED)
continue;
- con = sess->s.con[index + 1];
+ con = clt_path->s.con[index + 1];
cnt = ib_process_cq_direct(con->cq, -1);
if (cnt)
break;
@@ -3062,7 +3081,7 @@ EXPORT_SYMBOL(rtrs_clt_rdma_cq_direct);
* 0 on success
* -ECOMM no connection to the server
*/
-int rtrs_clt_query(struct rtrs_clt *clt, struct rtrs_attrs *attr)
+int rtrs_clt_query(struct rtrs_clt_sess *clt, struct rtrs_attrs *attr)
{
if (!rtrs_clt_is_connected(clt))
return -ECOMM;
@@ -3077,15 +3096,15 @@ int rtrs_clt_query(struct rtrs_clt *clt, struct rtrs_attrs *attr)
}
EXPORT_SYMBOL(rtrs_clt_query);
-int rtrs_clt_create_path_from_sysfs(struct rtrs_clt *clt,
+int rtrs_clt_create_path_from_sysfs(struct rtrs_clt_sess *clt,
struct rtrs_addr *addr)
{
- struct rtrs_clt_sess *sess;
+ struct rtrs_clt_path *clt_path;
int err;
- sess = alloc_sess(clt, addr, nr_cpu_ids, 0);
- if (IS_ERR(sess))
- return PTR_ERR(sess);
+ clt_path = alloc_path(clt, addr, nr_cpu_ids, 0);
+ if (IS_ERR(clt_path))
+ return PTR_ERR(clt_path);
mutex_lock(&clt->paths_mutex);
if (clt->paths_num == 0) {
@@ -3094,7 +3113,7 @@ int rtrs_clt_create_path_from_sysfs(struct rtrs_clt *clt,
* the addition of the first path is like a new session for
* the storage server
*/
- sess->for_new_clt = 1;
+ clt_path->for_new_clt = 1;
}
mutex_unlock(&clt->paths_mutex);
@@ -3104,24 +3123,24 @@ int rtrs_clt_create_path_from_sysfs(struct rtrs_clt *clt,
* IO will never grab it. Also it is very important to add
* path before init, since init fires LINK_CONNECTED event.
*/
- rtrs_clt_add_path_to_arr(sess);
+ rtrs_clt_add_path_to_arr(clt_path);
- err = init_sess(sess);
+ err = init_path(clt_path);
if (err)
- goto close_sess;
+ goto close_path;
- err = rtrs_clt_create_sess_files(sess);
+ err = rtrs_clt_create_path_files(clt_path);
if (err)
- goto close_sess;
+ goto close_path;
return 0;
-close_sess:
- rtrs_clt_remove_path_from_arr(sess);
- rtrs_clt_close_conns(sess, true);
- free_percpu(sess->stats->pcpu_stats);
- kfree(sess->stats);
- free_sess(sess);
+close_path:
+ rtrs_clt_remove_path_from_arr(clt_path);
+ rtrs_clt_close_conns(clt_path, true);
+ free_percpu(clt_path->stats->pcpu_stats);
+ kfree(clt_path->stats);
+ free_path(clt_path);
return err;
}
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.h b/drivers/infiniband/ulp/rtrs/rtrs-clt.h
index 9afffccff973..d1b18a154ae0 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-clt.h
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.h
@@ -124,9 +124,9 @@ struct rtrs_rbuf {
u32 rkey;
};
-struct rtrs_clt_sess {
- struct rtrs_sess s;
- struct rtrs_clt *clt;
+struct rtrs_clt_path {
+ struct rtrs_path s;
+ struct rtrs_clt_sess *clt;
wait_queue_head_t state_wq;
enum rtrs_clt_state state;
atomic_t connected_cnt;
@@ -153,10 +153,10 @@ struct rtrs_clt_sess {
*mp_skip_entry;
};
-struct rtrs_clt {
+struct rtrs_clt_sess {
struct list_head paths_list; /* rcu protected list */
size_t paths_num;
- struct rtrs_clt_sess
+ struct rtrs_clt_path
__rcu * __percpu *pcpu_path;
uuid_t paths_uuid;
int paths_up;
@@ -186,31 +186,32 @@ static inline struct rtrs_clt_con *to_clt_con(struct rtrs_con *c)
return container_of(c, struct rtrs_clt_con, c);
}
-static inline struct rtrs_clt_sess *to_clt_sess(struct rtrs_sess *s)
+static inline struct rtrs_clt_path *to_clt_path(struct rtrs_path *s)
{
- return container_of(s, struct rtrs_clt_sess, s);
+ return container_of(s, struct rtrs_clt_path, s);
}
-static inline int permit_size(struct rtrs_clt *clt)
+static inline int permit_size(struct rtrs_clt_sess *clt)
{
return sizeof(struct rtrs_permit) + clt->pdu_sz;
}
-static inline struct rtrs_permit *get_permit(struct rtrs_clt *clt, int idx)
+static inline struct rtrs_permit *get_permit(struct rtrs_clt_sess *clt,
+ int idx)
{
return (struct rtrs_permit *)(clt->permits + permit_size(clt) * idx);
}
-int rtrs_clt_reconnect_from_sysfs(struct rtrs_clt_sess *sess);
-void rtrs_clt_close_conns(struct rtrs_clt_sess *sess, bool wait);
-int rtrs_clt_create_path_from_sysfs(struct rtrs_clt *clt,
+int rtrs_clt_reconnect_from_sysfs(struct rtrs_clt_path *path);
+void rtrs_clt_close_conns(struct rtrs_clt_path *clt_path, bool wait);
+int rtrs_clt_create_path_from_sysfs(struct rtrs_clt_sess *clt,
struct rtrs_addr *addr);
-int rtrs_clt_remove_path_from_sysfs(struct rtrs_clt_sess *sess,
+int rtrs_clt_remove_path_from_sysfs(struct rtrs_clt_path *path,
const struct attribute *sysfs_self);
-void rtrs_clt_set_max_reconnect_attempts(struct rtrs_clt *clt, int value);
-int rtrs_clt_get_max_reconnect_attempts(const struct rtrs_clt *clt);
-void free_sess(struct rtrs_clt_sess *sess);
+void rtrs_clt_set_max_reconnect_attempts(struct rtrs_clt_sess *clt, int value);
+int rtrs_clt_get_max_reconnect_attempts(const struct rtrs_clt_sess *clt);
+void free_path(struct rtrs_clt_path *clt_path);
/* rtrs-clt-stats.c */
@@ -239,11 +240,11 @@ ssize_t rtrs_clt_reset_all_help(struct rtrs_clt_stats *stats,
/* rtrs-clt-sysfs.c */
-int rtrs_clt_create_sysfs_root_files(struct rtrs_clt *clt);
-void rtrs_clt_destroy_sysfs_root(struct rtrs_clt *clt);
+int rtrs_clt_create_sysfs_root_files(struct rtrs_clt_sess *clt);
+void rtrs_clt_destroy_sysfs_root(struct rtrs_clt_sess *clt);
-int rtrs_clt_create_sess_files(struct rtrs_clt_sess *sess);
-void rtrs_clt_destroy_sess_files(struct rtrs_clt_sess *sess,
+int rtrs_clt_create_path_files(struct rtrs_clt_path *clt_path);
+void rtrs_clt_destroy_path_files(struct rtrs_clt_path *clt_path,
const struct attribute *sysfs_self);
#endif /* RTRS_CLT_H */
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-pri.h b/drivers/infiniband/ulp/rtrs/rtrs-pri.h
index 78eac9a4f703..9a1e5c2ae55c 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-pri.h
+++ b/drivers/infiniband/ulp/rtrs/rtrs-pri.h
@@ -90,7 +90,7 @@ struct rtrs_ib_dev {
};
struct rtrs_con {
- struct rtrs_sess *sess;
+ struct rtrs_path *path;
struct ib_qp *qp;
struct ib_cq *cq;
struct rdma_cm_id *cm_id;
@@ -100,7 +100,7 @@ struct rtrs_con {
atomic_t sq_wr_avail;
};
-struct rtrs_sess {
+struct rtrs_path {
struct list_head entry;
struct sockaddr_storage dst_addr;
struct sockaddr_storage src_addr;
@@ -229,11 +229,11 @@ struct rtrs_msg_conn_rsp {
/**
* struct rtrs_msg_info_req
* @type: @RTRS_MSG_INFO_REQ
- * @sessname: Session name chosen by client
+ * @pathname: Path name chosen by client
*/
struct rtrs_msg_info_req {
__le16 type;
- u8 sessname[NAME_MAX];
+ u8 pathname[NAME_MAX];
u8 reserved[15];
};
@@ -313,19 +313,19 @@ int rtrs_iu_post_rdma_write_imm(struct rtrs_con *con, struct rtrs_iu *iu,
int rtrs_post_recv_empty(struct rtrs_con *con, struct ib_cqe *cqe);
-int rtrs_cq_qp_create(struct rtrs_sess *sess, struct rtrs_con *con,
+int rtrs_cq_qp_create(struct rtrs_path *path, struct rtrs_con *con,
u32 max_send_sge, int cq_vector, int nr_cqe,
u32 max_send_wr, u32 max_recv_wr,
enum ib_poll_context poll_ctx);
void rtrs_cq_qp_destroy(struct rtrs_con *con);
-void rtrs_init_hb(struct rtrs_sess *sess, struct ib_cqe *cqe,
+void rtrs_init_hb(struct rtrs_path *path, struct ib_cqe *cqe,
unsigned int interval_ms, unsigned int missed_max,
void (*err_handler)(struct rtrs_con *con),
struct workqueue_struct *wq);
-void rtrs_start_hb(struct rtrs_sess *sess);
-void rtrs_stop_hb(struct rtrs_sess *sess);
-void rtrs_send_hb_ack(struct rtrs_sess *sess);
+void rtrs_start_hb(struct rtrs_path *path);
+void rtrs_stop_hb(struct rtrs_path *path);
+void rtrs_send_hb_ack(struct rtrs_path *path);
void rtrs_rdma_dev_pd_init(enum ib_pd_flags pd_flags,
struct rtrs_rdma_dev_pd *pool);
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c b/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c
index 9c43ce5ba1c1..b94ae12c2795 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c
@@ -15,10 +15,10 @@
static void rtrs_srv_release(struct kobject *kobj)
{
- struct rtrs_srv_sess *sess;
+ struct rtrs_srv_path *srv_path;
- sess = container_of(kobj, struct rtrs_srv_sess, kobj);
- kfree(sess);
+ srv_path = container_of(kobj, struct rtrs_srv_path, kobj);
+ kfree(srv_path);
}
static struct kobj_type ktype = {
@@ -36,24 +36,25 @@ static ssize_t rtrs_srv_disconnect_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count)
{
- struct rtrs_srv_sess *sess;
- struct rtrs_sess *s;
+ struct rtrs_srv_path *srv_path;
+ struct rtrs_path *s;
char str[MAXHOSTNAMELEN];
- sess = container_of(kobj, struct rtrs_srv_sess, kobj);
- s = &sess->s;
+ srv_path = container_of(kobj, struct rtrs_srv_path, kobj);
+ s = &srv_path->s;
if (!sysfs_streq(buf, "1")) {
rtrs_err(s, "%s: invalid value: '%s'\n",
attr->attr.name, buf);
return -EINVAL;
}
- sockaddr_to_str((struct sockaddr *)&sess->s.dst_addr, str, sizeof(str));
+ sockaddr_to_str((struct sockaddr *)&srv_path->s.dst_addr, str,
+ sizeof(str));
rtrs_info(s, "disconnect for path %s requested\n", str);
/* first remove sysfs itself to avoid deadlock */
- sysfs_remove_file_self(&sess->kobj, &attr->attr);
- close_sess(sess);
+ sysfs_remove_file_self(&srv_path->kobj, &attr->attr);
+ close_path(srv_path);
return count;
}
@@ -66,11 +67,11 @@ static ssize_t rtrs_srv_hca_port_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *page)
{
- struct rtrs_srv_sess *sess;
+ struct rtrs_srv_path *srv_path;
struct rtrs_con *usr_con;
- sess = container_of(kobj, typeof(*sess), kobj);
- usr_con = sess->s.con[0];
+ srv_path = container_of(kobj, typeof(*srv_path), kobj);
+ usr_con = srv_path->s.con[0];
return sysfs_emit(page, "%u\n", usr_con->cm_id->port_num);
}
@@ -82,11 +83,11 @@ static ssize_t rtrs_srv_hca_name_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *page)
{
- struct rtrs_srv_sess *sess;
+ struct rtrs_srv_path *srv_path;
- sess = container_of(kobj, struct rtrs_srv_sess, kobj);
+ srv_path = container_of(kobj, struct rtrs_srv_path, kobj);
- return sysfs_emit(page, "%s\n", sess->s.dev->ib_dev->name);
+ return sysfs_emit(page, "%s\n", srv_path->s.dev->ib_dev->name);
}
static struct kobj_attribute rtrs_srv_hca_name_attr =
@@ -96,11 +97,11 @@ static ssize_t rtrs_srv_src_addr_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *page)
{
- struct rtrs_srv_sess *sess;
+ struct rtrs_srv_path *srv_path;
int cnt;
- sess = container_of(kobj, struct rtrs_srv_sess, kobj);
- cnt = sockaddr_to_str((struct sockaddr *)&sess->s.dst_addr,
+ srv_path = container_of(kobj, struct rtrs_srv_path, kobj);
+ cnt = sockaddr_to_str((struct sockaddr *)&srv_path->s.dst_addr,
page, PAGE_SIZE);
return cnt + sysfs_emit_at(page, cnt, "\n");
}
@@ -112,11 +113,11 @@ static ssize_t rtrs_srv_dst_addr_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *page)
{
- struct rtrs_srv_sess *sess;
+ struct rtrs_srv_path *srv_path;
int len;
- sess = container_of(kobj, struct rtrs_srv_sess, kobj);
- len = sockaddr_to_str((struct sockaddr *)&sess->s.src_addr, page,
+ srv_path = container_of(kobj, struct rtrs_srv_path, kobj);
+ len = sockaddr_to_str((struct sockaddr *)&srv_path->s.src_addr, page,
PAGE_SIZE);
len += sysfs_emit_at(page, len, "\n");
return len;
@@ -125,7 +126,7 @@ static ssize_t rtrs_srv_dst_addr_show(struct kobject *kobj,
static struct kobj_attribute rtrs_srv_dst_addr_attr =
__ATTR(dst_addr, 0444, rtrs_srv_dst_addr_show, NULL);
-static struct attribute *rtrs_srv_sess_attrs[] = {
+static struct attribute *rtrs_srv_path_attrs[] = {
&rtrs_srv_hca_name_attr.attr,
&rtrs_srv_hca_port_attr.attr,
&rtrs_srv_src_addr_attr.attr,
@@ -134,8 +135,8 @@ static struct attribute *rtrs_srv_sess_attrs[] = {
NULL,
};
-static const struct attribute_group rtrs_srv_sess_attr_group = {
- .attrs = rtrs_srv_sess_attrs,
+static const struct attribute_group rtrs_srv_path_attr_group = {
+ .attrs = rtrs_srv_path_attrs,
};
STAT_ATTR(struct rtrs_srv_stats, rdma,
@@ -151,9 +152,9 @@ static const struct attribute_group rtrs_srv_stats_attr_group = {
.attrs = rtrs_srv_stats_attrs,
};
-static int rtrs_srv_create_once_sysfs_root_folders(struct rtrs_srv_sess *sess)
+static int rtrs_srv_create_once_sysfs_root_folders(struct rtrs_srv_path *srv_path)
{
- struct rtrs_srv *srv = sess->srv;
+ struct rtrs_srv_sess *srv = srv_path->srv;
int err = 0;
mutex_lock(&srv->paths_mutex);
@@ -164,7 +165,7 @@ static int rtrs_srv_create_once_sysfs_root_folders(struct rtrs_srv_sess *sess)
goto unlock;
}
srv->dev.class = rtrs_dev_class;
- err = dev_set_name(&srv->dev, "%s", sess->s.sessname);
+ err = dev_set_name(&srv->dev, "%s", srv_path->s.sessname);
if (err)
goto unlock;
@@ -196,9 +197,9 @@ unlock:
}
static void
-rtrs_srv_destroy_once_sysfs_root_folders(struct rtrs_srv_sess *sess)
+rtrs_srv_destroy_once_sysfs_root_folders(struct rtrs_srv_path *srv_path)
{
- struct rtrs_srv *srv = sess->srv;
+ struct rtrs_srv_sess *srv = srv_path->srv;
mutex_lock(&srv->paths_mutex);
if (!--srv->dev_ref) {
@@ -213,7 +214,7 @@ rtrs_srv_destroy_once_sysfs_root_folders(struct rtrs_srv_sess *sess)
}
}
-static void rtrs_srv_sess_stats_release(struct kobject *kobj)
+static void rtrs_srv_path_stats_release(struct kobject *kobj)
{
struct rtrs_srv_stats *stats;
@@ -224,22 +225,22 @@ static void rtrs_srv_sess_stats_release(struct kobject *kobj)
static struct kobj_type ktype_stats = {
.sysfs_ops = &kobj_sysfs_ops,
- .release = rtrs_srv_sess_stats_release,
+ .release = rtrs_srv_path_stats_release,
};
-static int rtrs_srv_create_stats_files(struct rtrs_srv_sess *sess)
+static int rtrs_srv_create_stats_files(struct rtrs_srv_path *srv_path)
{
int err;
- struct rtrs_sess *s = &sess->s;
+ struct rtrs_path *s = &srv_path->s;
- err = kobject_init_and_add(&sess->stats->kobj_stats, &ktype_stats,
- &sess->kobj, "stats");
+ err = kobject_init_and_add(&srv_path->stats->kobj_stats, &ktype_stats,
+ &srv_path->kobj, "stats");
if (err) {
rtrs_err(s, "kobject_init_and_add(): %d\n", err);
- kobject_put(&sess->stats->kobj_stats);
+ kobject_put(&srv_path->stats->kobj_stats);
return err;
}
- err = sysfs_create_group(&sess->stats->kobj_stats,
+ err = sysfs_create_group(&srv_path->stats->kobj_stats,
&rtrs_srv_stats_attr_group);
if (err) {
rtrs_err(s, "sysfs_create_group(): %d\n", err);
@@ -249,64 +250,64 @@ static int rtrs_srv_create_stats_files(struct rtrs_srv_sess *sess)
return 0;
err:
- kobject_del(&sess->stats->kobj_stats);
- kobject_put(&sess->stats->kobj_stats);
+ kobject_del(&srv_path->stats->kobj_stats);
+ kobject_put(&srv_path->stats->kobj_stats);
return err;
}
-int rtrs_srv_create_sess_files(struct rtrs_srv_sess *sess)
+int rtrs_srv_create_path_files(struct rtrs_srv_path *srv_path)
{
- struct rtrs_srv *srv = sess->srv;
- struct rtrs_sess *s = &sess->s;
+ struct rtrs_srv_sess *srv = srv_path->srv;
+ struct rtrs_path *s = &srv_path->s;
char str[NAME_MAX];
int err;
struct rtrs_addr path = {
- .src = &sess->s.dst_addr,
- .dst = &sess->s.src_addr,
+ .src = &srv_path->s.dst_addr,
+ .dst = &srv_path->s.src_addr,
};
rtrs_addr_to_str(&path, str, sizeof(str));
- err = rtrs_srv_create_once_sysfs_root_folders(sess);
+ err = rtrs_srv_create_once_sysfs_root_folders(srv_path);
if (err)
return err;
- err = kobject_init_and_add(&sess->kobj, &ktype, srv->kobj_paths,
+ err = kobject_init_and_add(&srv_path->kobj, &ktype, srv->kobj_paths,
"%s", str);
if (err) {
rtrs_err(s, "kobject_init_and_add(): %d\n", err);
goto destroy_root;
}
- err = sysfs_create_group(&sess->kobj, &rtrs_srv_sess_attr_group);
+ err = sysfs_create_group(&srv_path->kobj, &rtrs_srv_path_attr_group);
if (err) {
rtrs_err(s, "sysfs_create_group(): %d\n", err);
goto put_kobj;
}
- err = rtrs_srv_create_stats_files(sess);
+ err = rtrs_srv_create_stats_files(srv_path);
if (err)
goto remove_group;
return 0;
remove_group:
- sysfs_remove_group(&sess->kobj, &rtrs_srv_sess_attr_group);
+ sysfs_remove_group(&srv_path->kobj, &rtrs_srv_path_attr_group);
put_kobj:
- kobject_del(&sess->kobj);
+ kobject_del(&srv_path->kobj);
destroy_root:
- kobject_put(&sess->kobj);
- rtrs_srv_destroy_once_sysfs_root_folders(sess);
+ kobject_put(&srv_path->kobj);
+ rtrs_srv_destroy_once_sysfs_root_folders(srv_path);
return err;
}
-void rtrs_srv_destroy_sess_files(struct rtrs_srv_sess *sess)
+void rtrs_srv_destroy_path_files(struct rtrs_srv_path *srv_path)
{
- if (sess->kobj.state_in_sysfs) {
- kobject_del(&sess->stats->kobj_stats);
- kobject_put(&sess->stats->kobj_stats);
- sysfs_remove_group(&sess->kobj, &rtrs_srv_sess_attr_group);
- kobject_put(&sess->kobj);
+ if (srv_path->kobj.state_in_sysfs) {
+ kobject_del(&srv_path->stats->kobj_stats);
+ kobject_put(&srv_path->stats->kobj_stats);
+ sysfs_remove_group(&srv_path->kobj, &rtrs_srv_path_attr_group);
+ kobject_put(&srv_path->kobj);
- rtrs_srv_destroy_once_sysfs_root_folders(sess);
+ rtrs_srv_destroy_once_sysfs_root_folders(srv_path);
}
}
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.c b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
index 7df71f8cf149..24024bce2566 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-srv.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
@@ -62,19 +62,19 @@ static inline struct rtrs_srv_con *to_srv_con(struct rtrs_con *c)
return container_of(c, struct rtrs_srv_con, c);
}
-static inline struct rtrs_srv_sess *to_srv_sess(struct rtrs_sess *s)
+static inline struct rtrs_srv_path *to_srv_path(struct rtrs_path *s)
{
- return container_of(s, struct rtrs_srv_sess, s);
+ return container_of(s, struct rtrs_srv_path, s);
}
-static bool rtrs_srv_change_state(struct rtrs_srv_sess *sess,
+static bool rtrs_srv_change_state(struct rtrs_srv_path *srv_path,
enum rtrs_srv_state new_state)
{
enum rtrs_srv_state old_state;
bool changed = false;
- spin_lock_irq(&sess->state_lock);
- old_state = sess->state;
+ spin_lock_irq(&srv_path->state_lock);
+ old_state = srv_path->state;
switch (new_state) {
case RTRS_SRV_CONNECTED:
if (old_state == RTRS_SRV_CONNECTING)
@@ -93,8 +93,8 @@ static bool rtrs_srv_change_state(struct rtrs_srv_sess *sess,
break;
}
if (changed)
- sess->state = new_state;
- spin_unlock_irq(&sess->state_lock);
+ srv_path->state = new_state;
+ spin_unlock_irq(&srv_path->state_lock);
return changed;
}
@@ -106,16 +106,16 @@ static void free_id(struct rtrs_srv_op *id)
kfree(id);
}
-static void rtrs_srv_free_ops_ids(struct rtrs_srv_sess *sess)
+static void rtrs_srv_free_ops_ids(struct rtrs_srv_path *srv_path)
{
- struct rtrs_srv *srv = sess->srv;
+ struct rtrs_srv_sess *srv = srv_path->srv;
int i;
- if (sess->ops_ids) {
+ if (srv_path->ops_ids) {
for (i = 0; i < srv->queue_depth; i++)
- free_id(sess->ops_ids[i]);
- kfree(sess->ops_ids);
- sess->ops_ids = NULL;
+ free_id(srv_path->ops_ids[i]);
+ kfree(srv_path->ops_ids);
+ srv_path->ops_ids = NULL;
}
}
@@ -127,21 +127,24 @@ static struct ib_cqe io_comp_cqe = {
static inline void rtrs_srv_inflight_ref_release(struct percpu_ref *ref)
{
- struct rtrs_srv_sess *sess = container_of(ref, struct rtrs_srv_sess, ids_inflight_ref);
+ struct rtrs_srv_path *srv_path = container_of(ref,
+ struct rtrs_srv_path,
+ ids_inflight_ref);
- percpu_ref_exit(&sess->ids_inflight_ref);
- complete(&sess->complete_done);
+ percpu_ref_exit(&srv_path->ids_inflight_ref);
+ complete(&srv_path->complete_done);
}
-static int rtrs_srv_alloc_ops_ids(struct rtrs_srv_sess *sess)
+static int rtrs_srv_alloc_ops_ids(struct rtrs_srv_path *srv_path)
{
- struct rtrs_srv *srv = sess->srv;
+ struct rtrs_srv_sess *srv = srv_path->srv;
struct rtrs_srv_op *id;
int i, ret;
- sess->ops_ids = kcalloc(srv->queue_depth, sizeof(*sess->ops_ids),
- GFP_KERNEL);
- if (!sess->ops_ids)
+ srv_path->ops_ids = kcalloc(srv->queue_depth,
+ sizeof(*srv_path->ops_ids),
+ GFP_KERNEL);
+ if (!srv_path->ops_ids)
goto err;
for (i = 0; i < srv->queue_depth; ++i) {
@@ -149,44 +152,44 @@ static int rtrs_srv_alloc_ops_ids(struct rtrs_srv_sess *sess)
if (!id)
goto err;
- sess->ops_ids[i] = id;
+ srv_path->ops_ids[i] = id;
}
- ret = percpu_ref_init(&sess->ids_inflight_ref,
+ ret = percpu_ref_init(&srv_path->ids_inflight_ref,
rtrs_srv_inflight_ref_release, 0, GFP_KERNEL);
if (ret) {
pr_err("Percpu reference init failed\n");
goto err;
}
- init_completion(&sess->complete_done);
+ init_completion(&srv_path->complete_done);
return 0;
err:
- rtrs_srv_free_ops_ids(sess);
+ rtrs_srv_free_ops_ids(srv_path);
return -ENOMEM;
}
-static inline void rtrs_srv_get_ops_ids(struct rtrs_srv_sess *sess)
+static inline void rtrs_srv_get_ops_ids(struct rtrs_srv_path *srv_path)
{
- percpu_ref_get(&sess->ids_inflight_ref);
+ percpu_ref_get(&srv_path->ids_inflight_ref);
}
-static inline void rtrs_srv_put_ops_ids(struct rtrs_srv_sess *sess)
+static inline void rtrs_srv_put_ops_ids(struct rtrs_srv_path *srv_path)
{
- percpu_ref_put(&sess->ids_inflight_ref);
+ percpu_ref_put(&srv_path->ids_inflight_ref);
}
static void rtrs_srv_reg_mr_done(struct ib_cq *cq, struct ib_wc *wc)
{
struct rtrs_srv_con *con = to_srv_con(wc->qp->qp_context);
- struct rtrs_sess *s = con->c.sess;
- struct rtrs_srv_sess *sess = to_srv_sess(s);
+ struct rtrs_path *s = con->c.path;
+ struct rtrs_srv_path *srv_path = to_srv_path(s);
if (wc->status != IB_WC_SUCCESS) {
rtrs_err(s, "REG MR failed: %s\n",
ib_wc_status_msg(wc->status));
- close_sess(sess);
+ close_path(srv_path);
return;
}
}
@@ -197,9 +200,9 @@ static struct ib_cqe local_reg_cqe = {
static int rdma_write_sg(struct rtrs_srv_op *id)
{
- struct rtrs_sess *s = id->con->c.sess;
- struct rtrs_srv_sess *sess = to_srv_sess(s);
- dma_addr_t dma_addr = sess->dma_addr[id->msg_id];
+ struct rtrs_path *s = id->con->c.path;
+ struct rtrs_srv_path *srv_path = to_srv_path(s);
+ dma_addr_t dma_addr = srv_path->dma_addr[id->msg_id];
struct rtrs_srv_mr *srv_mr;
struct ib_send_wr inv_wr;
struct ib_rdma_wr imm_wr;
@@ -233,7 +236,7 @@ static int rdma_write_sg(struct rtrs_srv_op *id)
return -EINVAL;
}
- plist->lkey = sess->s.dev->ib_pd->local_dma_lkey;
+ plist->lkey = srv_path->s.dev->ib_pd->local_dma_lkey;
offset += plist->length;
wr->wr.sg_list = plist;
@@ -284,7 +287,7 @@ static int rdma_write_sg(struct rtrs_srv_op *id)
if (always_invalidate) {
struct rtrs_msg_rkey_rsp *msg;
- srv_mr = &sess->mrs[id->msg_id];
+ srv_mr = &srv_path->mrs[id->msg_id];
rwr.wr.opcode = IB_WR_REG_MR;
rwr.wr.wr_cqe = &local_reg_cqe;
rwr.wr.num_sge = 0;
@@ -300,11 +303,11 @@ static int rdma_write_sg(struct rtrs_srv_op *id)
list.addr = srv_mr->iu->dma_addr;
list.length = sizeof(*msg);
- list.lkey = sess->s.dev->ib_pd->local_dma_lkey;
+ list.lkey = srv_path->s.dev->ib_pd->local_dma_lkey;
imm_wr.wr.sg_list = &list;
imm_wr.wr.num_sge = 1;
imm_wr.wr.opcode = IB_WR_SEND_WITH_IMM;
- ib_dma_sync_single_for_device(sess->s.dev->ib_dev,
+ ib_dma_sync_single_for_device(srv_path->s.dev->ib_dev,
srv_mr->iu->dma_addr,
srv_mr->iu->size, DMA_TO_DEVICE);
} else {
@@ -317,7 +320,7 @@ static int rdma_write_sg(struct rtrs_srv_op *id)
0, need_inval));
imm_wr.wr.wr_cqe = &io_comp_cqe;
- ib_dma_sync_single_for_device(sess->s.dev->ib_dev, dma_addr,
+ ib_dma_sync_single_for_device(srv_path->s.dev->ib_dev, dma_addr,
offset, DMA_BIDIRECTIONAL);
err = ib_post_send(id->con->c.qp, &id->tx_wr.wr, NULL);
@@ -341,8 +344,8 @@ static int rdma_write_sg(struct rtrs_srv_op *id)
static int send_io_resp_imm(struct rtrs_srv_con *con, struct rtrs_srv_op *id,
int errno)
{
- struct rtrs_sess *s = con->c.sess;
- struct rtrs_srv_sess *sess = to_srv_sess(s);
+ struct rtrs_path *s = con->c.path;
+ struct rtrs_srv_path *srv_path = to_srv_path(s);
struct ib_send_wr inv_wr, *wr = NULL;
struct ib_rdma_wr imm_wr;
struct ib_reg_wr rwr;
@@ -402,7 +405,7 @@ static int send_io_resp_imm(struct rtrs_srv_con *con, struct rtrs_srv_op *id,
struct ib_sge list;
struct rtrs_msg_rkey_rsp *msg;
- srv_mr = &sess->mrs[id->msg_id];
+ srv_mr = &srv_path->mrs[id->msg_id];
rwr.wr.next = &imm_wr.wr;
rwr.wr.opcode = IB_WR_REG_MR;
rwr.wr.wr_cqe = &local_reg_cqe;
@@ -419,11 +422,11 @@ static int send_io_resp_imm(struct rtrs_srv_con *con, struct rtrs_srv_op *id,
list.addr = srv_mr->iu->dma_addr;
list.length = sizeof(*msg);
- list.lkey = sess->s.dev->ib_pd->local_dma_lkey;
+ list.lkey = srv_path->s.dev->ib_pd->local_dma_lkey;
imm_wr.wr.sg_list = &list;
imm_wr.wr.num_sge = 1;
imm_wr.wr.opcode = IB_WR_SEND_WITH_IMM;
- ib_dma_sync_single_for_device(sess->s.dev->ib_dev,
+ ib_dma_sync_single_for_device(srv_path->s.dev->ib_dev,
srv_mr->iu->dma_addr,
srv_mr->iu->size, DMA_TO_DEVICE);
} else {
@@ -444,11 +447,11 @@ static int send_io_resp_imm(struct rtrs_srv_con *con, struct rtrs_srv_op *id,
return err;
}
-void close_sess(struct rtrs_srv_sess *sess)
+void close_path(struct rtrs_srv_path *srv_path)
{
- if (rtrs_srv_change_state(sess, RTRS_SRV_CLOSING))
- queue_work(rtrs_wq, &sess->close_work);
- WARN_ON(sess->state != RTRS_SRV_CLOSING);
+ if (rtrs_srv_change_state(srv_path, RTRS_SRV_CLOSING))
+ queue_work(rtrs_wq, &srv_path->close_work);
+ WARN_ON(srv_path->state != RTRS_SRV_CLOSING);
}
static inline const char *rtrs_srv_state_str(enum rtrs_srv_state state)
@@ -480,35 +483,35 @@ static inline const char *rtrs_srv_state_str(enum rtrs_srv_state state)
*/
bool rtrs_srv_resp_rdma(struct rtrs_srv_op *id, int status)
{
- struct rtrs_srv_sess *sess;
+ struct rtrs_srv_path *srv_path;
struct rtrs_srv_con *con;
- struct rtrs_sess *s;
+ struct rtrs_path *s;
int err;
if (WARN_ON(!id))
return true;
con = id->con;
- s = con->c.sess;
- sess = to_srv_sess(s);
+ s = con->c.path;
+ srv_path = to_srv_path(s);
id->status = status;
- if (sess->state != RTRS_SRV_CONNECTED) {
+ if (srv_path->state != RTRS_SRV_CONNECTED) {
rtrs_err_rl(s,
- "Sending I/O response failed, session %s is disconnected, sess state %s\n",
- kobject_name(&sess->kobj),
- rtrs_srv_state_str(sess->state));
+ "Sending I/O response failed, server path %s is disconnected, path state %s\n",
+ kobject_name(&srv_path->kobj),
+ rtrs_srv_state_str(srv_path->state));
goto out;
}
if (always_invalidate) {
- struct rtrs_srv_mr *mr = &sess->mrs[id->msg_id];
+ struct rtrs_srv_mr *mr = &srv_path->mrs[id->msg_id];
ib_update_fast_reg_key(mr->mr, ib_inc_rkey(mr->mr->rkey));
}
if (atomic_sub_return(1, &con->c.sq_wr_avail) < 0) {
- rtrs_err(s, "IB send queue full: sess=%s cid=%d\n",
- kobject_name(&sess->kobj),
+ rtrs_err(s, "IB send queue full: srv_path=%s cid=%d\n",
+ kobject_name(&srv_path->kobj),
con->c.cid);
atomic_add(1, &con->c.sq_wr_avail);
spin_lock(&con->rsp_wr_wait_lock);
@@ -523,12 +526,12 @@ bool rtrs_srv_resp_rdma(struct rtrs_srv_op *id, int status)
err = rdma_write_sg(id);
if (err) {
- rtrs_err_rl(s, "IO response failed: %d: sess=%s\n", err,
- kobject_name(&sess->kobj));
- close_sess(sess);
+ rtrs_err_rl(s, "IO response failed: %d: srv_path=%s\n", err,
+ kobject_name(&srv_path->kobj));
+ close_path(srv_path);
}
out:
- rtrs_srv_put_ops_ids(sess);
+ rtrs_srv_put_ops_ids(srv_path);
return true;
}
EXPORT_SYMBOL(rtrs_srv_resp_rdma);
@@ -538,33 +541,33 @@ EXPORT_SYMBOL(rtrs_srv_resp_rdma);
* @srv: Session pointer
* @priv: The private pointer that is associated with the session.
*/
-void rtrs_srv_set_sess_priv(struct rtrs_srv *srv, void *priv)
+void rtrs_srv_set_sess_priv(struct rtrs_srv_sess *srv, void *priv)
{
srv->priv = priv;
}
EXPORT_SYMBOL(rtrs_srv_set_sess_priv);
-static void unmap_cont_bufs(struct rtrs_srv_sess *sess)
+static void unmap_cont_bufs(struct rtrs_srv_path *srv_path)
{
int i;
- for (i = 0; i < sess->mrs_num; i++) {
+ for (i = 0; i < srv_path->mrs_num; i++) {
struct rtrs_srv_mr *srv_mr;
- srv_mr = &sess->mrs[i];
- rtrs_iu_free(srv_mr->iu, sess->s.dev->ib_dev, 1);
+ srv_mr = &srv_path->mrs[i];
+ rtrs_iu_free(srv_mr->iu, srv_path->s.dev->ib_dev, 1);
ib_dereg_mr(srv_mr->mr);
- ib_dma_unmap_sg(sess->s.dev->ib_dev, srv_mr->sgt.sgl,
+ ib_dma_unmap_sg(srv_path->s.dev->ib_dev, srv_mr->sgt.sgl,
srv_mr->sgt.nents, DMA_BIDIRECTIONAL);
sg_free_table(&srv_mr->sgt);
}
- kfree(sess->mrs);
+ kfree(srv_path->mrs);
}
-static int map_cont_bufs(struct rtrs_srv_sess *sess)
+static int map_cont_bufs(struct rtrs_srv_path *srv_path)
{
- struct rtrs_srv *srv = sess->srv;
- struct rtrs_sess *ss = &sess->s;
+ struct rtrs_srv_sess *srv = srv_path->srv;
+ struct rtrs_path *ss = &srv_path->s;
int i, mri, err, mrs_num;
unsigned int chunk_bits;
int chunks_per_mr = 1;
@@ -581,19 +584,19 @@ static int map_cont_bufs(struct rtrs_srv_sess *sess)
mrs_num = srv->queue_depth;
} else {
chunks_per_mr =
- sess->s.dev->ib_dev->attrs.max_fast_reg_page_list_len;
+ srv_path->s.dev->ib_dev->attrs.max_fast_reg_page_list_len;
mrs_num = DIV_ROUND_UP(srv->queue_depth, chunks_per_mr);
chunks_per_mr = DIV_ROUND_UP(srv->queue_depth, mrs_num);
}
- sess->mrs = kcalloc(mrs_num, sizeof(*sess->mrs), GFP_KERNEL);
- if (!sess->mrs)
+ srv_path->mrs = kcalloc(mrs_num, sizeof(*srv_path->mrs), GFP_KERNEL);
+ if (!srv_path->mrs)
return -ENOMEM;
- sess->mrs_num = mrs_num;
+ srv_path->mrs_num = mrs_num;
for (mri = 0; mri < mrs_num; mri++) {
- struct rtrs_srv_mr *srv_mr = &sess->mrs[mri];
+ struct rtrs_srv_mr *srv_mr = &srv_path->mrs[mri];
struct sg_table *sgt = &srv_mr->sgt;
struct scatterlist *s;
struct ib_mr *mr;
@@ -612,13 +615,13 @@ static int map_cont_bufs(struct rtrs_srv_sess *sess)
sg_set_page(s, srv->chunks[chunks + i],
max_chunk_size, 0);
- nr = ib_dma_map_sg(sess->s.dev->ib_dev, sgt->sgl,
+ nr = ib_dma_map_sg(srv_path->s.dev->ib_dev, sgt->sgl,
sgt->nents, DMA_BIDIRECTIONAL);
if (nr < sgt->nents) {
err = nr < 0 ? nr : -EINVAL;
goto free_sg;
}
- mr = ib_alloc_mr(sess->s.dev->ib_pd, IB_MR_TYPE_MEM_REG,
+ mr = ib_alloc_mr(srv_path->s.dev->ib_pd, IB_MR_TYPE_MEM_REG,
sgt->nents);
if (IS_ERR(mr)) {
err = PTR_ERR(mr);
@@ -634,7 +637,7 @@ static int map_cont_bufs(struct rtrs_srv_sess *sess)
if (always_invalidate) {
srv_mr->iu = rtrs_iu_alloc(1,
sizeof(struct rtrs_msg_rkey_rsp),
- GFP_KERNEL, sess->s.dev->ib_dev,
+ GFP_KERNEL, srv_path->s.dev->ib_dev,
DMA_TO_DEVICE, rtrs_srv_rdma_done);
if (!srv_mr->iu) {
err = -ENOMEM;
@@ -644,7 +647,7 @@ static int map_cont_bufs(struct rtrs_srv_sess *sess)
}
/* Eventually dma addr for each chunk can be cached */
for_each_sg(sgt->sgl, s, sgt->orig_nents, i)
- sess->dma_addr[chunks + i] = sg_dma_address(s);
+ srv_path->dma_addr[chunks + i] = sg_dma_address(s);
ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey));
srv_mr->mr = mr;
@@ -652,75 +655,75 @@ static int map_cont_bufs(struct rtrs_srv_sess *sess)
continue;
err:
while (mri--) {
- srv_mr = &sess->mrs[mri];
+ srv_mr = &srv_path->mrs[mri];
sgt = &srv_mr->sgt;
mr = srv_mr->mr;
- rtrs_iu_free(srv_mr->iu, sess->s.dev->ib_dev, 1);
+ rtrs_iu_free(srv_mr->iu, srv_path->s.dev->ib_dev, 1);
dereg_mr:
ib_dereg_mr(mr);
unmap_sg:
- ib_dma_unmap_sg(sess->s.dev->ib_dev, sgt->sgl,
+ ib_dma_unmap_sg(srv_path->s.dev->ib_dev, sgt->sgl,
sgt->nents, DMA_BIDIRECTIONAL);
free_sg:
sg_free_table(sgt);
}
- kfree(sess->mrs);
+ kfree(srv_path->mrs);
return err;
}
chunk_bits = ilog2(srv->queue_depth - 1) + 1;
- sess->mem_bits = (MAX_IMM_PAYL_BITS - chunk_bits);
+ srv_path->mem_bits = (MAX_IMM_PAYL_BITS - chunk_bits);
return 0;
}
static void rtrs_srv_hb_err_handler(struct rtrs_con *c)
{
- close_sess(to_srv_sess(c->sess));
+ close_path(to_srv_path(c->path));
}
-static void rtrs_srv_init_hb(struct rtrs_srv_sess *sess)
+static void rtrs_srv_init_hb(struct rtrs_srv_path *srv_path)
{
- rtrs_init_hb(&sess->s, &io_comp_cqe,
+ rtrs_init_hb(&srv_path->s, &io_comp_cqe,
RTRS_HB_INTERVAL_MS,
RTRS_HB_MISSED_MAX,
rtrs_srv_hb_err_handler,
rtrs_wq);
}
-static void rtrs_srv_start_hb(struct rtrs_srv_sess *sess)
+static void rtrs_srv_start_hb(struct rtrs_srv_path *srv_path)
{
- rtrs_start_hb(&sess->s);
+ rtrs_start_hb(&srv_path->s);
}
-static void rtrs_srv_stop_hb(struct rtrs_srv_sess *sess)
+static void rtrs_srv_stop_hb(struct rtrs_srv_path *srv_path)
{
- rtrs_stop_hb(&sess->s);
+ rtrs_stop_hb(&srv_path->s);
}
static void rtrs_srv_info_rsp_done(struct ib_cq *cq, struct ib_wc *wc)
{
struct rtrs_srv_con *con = to_srv_con(wc->qp->qp_context);
- struct rtrs_sess *s = con->c.sess;
- struct rtrs_srv_sess *sess = to_srv_sess(s);
+ struct rtrs_path *s = con->c.path;
+ struct rtrs_srv_path *srv_path = to_srv_path(s);
struct rtrs_iu *iu;
iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe);
- rtrs_iu_free(iu, sess->s.dev->ib_dev, 1);
+ rtrs_iu_free(iu, srv_path->s.dev->ib_dev, 1);
if (wc->status != IB_WC_SUCCESS) {
rtrs_err(s, "Sess info response send failed: %s\n",
ib_wc_status_msg(wc->status));
- close_sess(sess);
+ close_path(srv_path);
return;
}
WARN_ON(wc->opcode != IB_WC_SEND);
}
-static void rtrs_srv_sess_up(struct rtrs_srv_sess *sess)
+static void rtrs_srv_path_up(struct rtrs_srv_path *srv_path)
{
- struct rtrs_srv *srv = sess->srv;
+ struct rtrs_srv_sess *srv = srv_path->srv;
struct rtrs_srv_ctx *ctx = srv->ctx;
int up;
@@ -731,18 +734,18 @@ static void rtrs_srv_sess_up(struct rtrs_srv_sess *sess)
mutex_unlock(&srv->paths_ev_mutex);
/* Mark session as established */
- sess->established = true;
+ srv_path->established = true;
}
-static void rtrs_srv_sess_down(struct rtrs_srv_sess *sess)
+static void rtrs_srv_path_down(struct rtrs_srv_path *srv_path)
{
- struct rtrs_srv *srv = sess->srv;
+ struct rtrs_srv_sess *srv = srv_path->srv;
struct rtrs_srv_ctx *ctx = srv->ctx;
- if (!sess->established)
+ if (!srv_path->established)
return;
- sess->established = false;
+ srv_path->established = false;
mutex_lock(&srv->paths_ev_mutex);
WARN_ON(!srv->paths_up);
if (--srv->paths_up == 0)
@@ -750,11 +753,11 @@ static void rtrs_srv_sess_down(struct rtrs_srv_sess *sess)
mutex_unlock(&srv->paths_ev_mutex);
}
-static bool exist_sessname(struct rtrs_srv_ctx *ctx,
- const char *sessname, const uuid_t *path_uuid)
+static bool exist_pathname(struct rtrs_srv_ctx *ctx,
+ const char *pathname, const uuid_t *path_uuid)
{
- struct rtrs_srv *srv;
- struct rtrs_srv_sess *sess;
+ struct rtrs_srv_sess *srv;
+ struct rtrs_srv_path *srv_path;
bool found = false;
mutex_lock(&ctx->srv_mutex);
@@ -767,9 +770,9 @@ static bool exist_sessname(struct rtrs_srv_ctx *ctx,
continue;
}
- list_for_each_entry(sess, &srv->paths_list, s.entry) {
- if (strlen(sess->s.sessname) == strlen(sessname) &&
- !strcmp(sess->s.sessname, sessname)) {
+ list_for_each_entry(srv_path, &srv->paths_list, s.entry) {
+ if (strlen(srv_path->s.sessname) == strlen(pathname) &&
+ !strcmp(srv_path->s.sessname, pathname)) {
found = true;
break;
}
@@ -782,14 +785,14 @@ static bool exist_sessname(struct rtrs_srv_ctx *ctx,
return found;
}
-static int post_recv_sess(struct rtrs_srv_sess *sess);
+static int post_recv_path(struct rtrs_srv_path *srv_path);
static int rtrs_rdma_do_reject(struct rdma_cm_id *cm_id, int errno);
static int process_info_req(struct rtrs_srv_con *con,
struct rtrs_msg_info_req *msg)
{
- struct rtrs_sess *s = con->c.sess;
- struct rtrs_srv_sess *sess = to_srv_sess(s);
+ struct rtrs_path *s = con->c.path;
+ struct rtrs_srv_path *srv_path = to_srv_path(s);
struct ib_send_wr *reg_wr = NULL;
struct rtrs_msg_info_rsp *rsp;
struct rtrs_iu *tx_iu;
@@ -797,31 +800,32 @@ static int process_info_req(struct rtrs_srv_con *con,
int mri, err;
size_t tx_sz;
- err = post_recv_sess(sess);
+ err = post_recv_path(srv_path);
if (err) {
- rtrs_err(s, "post_recv_sess(), err: %d\n", err);
+ rtrs_err(s, "post_recv_path(), err: %d\n", err);
return err;
}
- if (strchr(msg->sessname, '/') || strchr(msg->sessname, '.')) {
- rtrs_err(s, "sessname cannot contain / and .\n");
+ if (strchr(msg->pathname, '/') || strchr(msg->pathname, '.')) {
+ rtrs_err(s, "pathname cannot contain / and .\n");
return -EINVAL;
}
- if (exist_sessname(sess->srv->ctx,
- msg->sessname, &sess->srv->paths_uuid)) {
- rtrs_err(s, "sessname is duplicated: %s\n", msg->sessname);
+ if (exist_pathname(srv_path->srv->ctx,
+ msg->pathname, &srv_path->srv->paths_uuid)) {
+ rtrs_err(s, "pathname is duplicated: %s\n", msg->pathname);
return -EPERM;
}
- strscpy(sess->s.sessname, msg->sessname, sizeof(sess->s.sessname));
+ strscpy(srv_path->s.sessname, msg->pathname,
+ sizeof(srv_path->s.sessname));
- rwr = kcalloc(sess->mrs_num, sizeof(*rwr), GFP_KERNEL);
+ rwr = kcalloc(srv_path->mrs_num, sizeof(*rwr), GFP_KERNEL);
if (!rwr)
return -ENOMEM;
tx_sz = sizeof(*rsp);
- tx_sz += sizeof(rsp->desc[0]) * sess->mrs_num;
- tx_iu = rtrs_iu_alloc(1, tx_sz, GFP_KERNEL, sess->s.dev->ib_dev,
+ tx_sz += sizeof(rsp->desc[0]) * srv_path->mrs_num;
+ tx_iu = rtrs_iu_alloc(1, tx_sz, GFP_KERNEL, srv_path->s.dev->ib_dev,
DMA_TO_DEVICE, rtrs_srv_info_rsp_done);
if (!tx_iu) {
err = -ENOMEM;
@@ -830,10 +834,10 @@ static int process_info_req(struct rtrs_srv_con *con,
rsp = tx_iu->buf;
rsp->type = cpu_to_le16(RTRS_MSG_INFO_RSP);
- rsp->sg_cnt = cpu_to_le16(sess->mrs_num);
+ rsp->sg_cnt = cpu_to_le16(srv_path->mrs_num);
- for (mri = 0; mri < sess->mrs_num; mri++) {
- struct ib_mr *mr = sess->mrs[mri].mr;
+ for (mri = 0; mri < srv_path->mrs_num; mri++) {
+ struct ib_mr *mr = srv_path->mrs[mri].mr;
rsp->desc[mri].addr = cpu_to_le64(mr->iova);
rsp->desc[mri].key = cpu_to_le32(mr->rkey);
@@ -854,13 +858,13 @@ static int process_info_req(struct rtrs_srv_con *con,
reg_wr = &rwr[mri].wr;
}
- err = rtrs_srv_create_sess_files(sess);
+ err = rtrs_srv_create_path_files(srv_path);
if (err)
goto iu_free;
- kobject_get(&sess->kobj);
- get_device(&sess->srv->dev);
- rtrs_srv_change_state(sess, RTRS_SRV_CONNECTED);
- rtrs_srv_start_hb(sess);
+ kobject_get(&srv_path->kobj);
+ get_device(&srv_path->srv->dev);
+ rtrs_srv_change_state(srv_path, RTRS_SRV_CONNECTED);
+ rtrs_srv_start_hb(srv_path);
/*
* We do not account number of established connections at the current
@@ -868,9 +872,10 @@ static int process_info_req(struct rtrs_srv_con *con,
* all connections are successfully established. Thus, simply notify
* listener with a proper event if we are the first path.
*/
- rtrs_srv_sess_up(sess);
+ rtrs_srv_path_up(srv_path);
- ib_dma_sync_single_for_device(sess->s.dev->ib_dev, tx_iu->dma_addr,
+ ib_dma_sync_single_for_device(srv_path->s.dev->ib_dev,
+ tx_iu->dma_addr,
tx_iu->size, DMA_TO_DEVICE);
/* Send info response */
@@ -878,7 +883,7 @@ static int process_info_req(struct rtrs_srv_con *con,
if (err) {
rtrs_err(s, "rtrs_iu_post_send(), err: %d\n", err);
iu_free:
- rtrs_iu_free(tx_iu, sess->s.dev->ib_dev, 1);
+ rtrs_iu_free(tx_iu, srv_path->s.dev->ib_dev, 1);
}
rwr_free:
kfree(rwr);
@@ -889,8 +894,8 @@ rwr_free:
static void rtrs_srv_info_req_done(struct ib_cq *cq, struct ib_wc *wc)
{
struct rtrs_srv_con *con = to_srv_con(wc->qp->qp_context);
- struct rtrs_sess *s = con->c.sess;
- struct rtrs_srv_sess *sess = to_srv_sess(s);
+ struct rtrs_path *s = con->c.path;
+ struct rtrs_srv_path *srv_path = to_srv_path(s);
struct rtrs_msg_info_req *msg;
struct rtrs_iu *iu;
int err;
@@ -910,7 +915,7 @@ static void rtrs_srv_info_req_done(struct ib_cq *cq, struct ib_wc *wc)
wc->byte_len);
goto close;
}
- ib_dma_sync_single_for_cpu(sess->s.dev->ib_dev, iu->dma_addr,
+ ib_dma_sync_single_for_cpu(srv_path->s.dev->ib_dev, iu->dma_addr,
iu->size, DMA_FROM_DEVICE);
msg = iu->buf;
if (le16_to_cpu(msg->type) != RTRS_MSG_INFO_REQ) {
@@ -923,22 +928,22 @@ static void rtrs_srv_info_req_done(struct ib_cq *cq, struct ib_wc *wc)
goto close;
out:
- rtrs_iu_free(iu, sess->s.dev->ib_dev, 1);
+ rtrs_iu_free(iu, srv_path->s.dev->ib_dev, 1);
return;
close:
- close_sess(sess);
+ close_path(srv_path);
goto out;
}
static int post_recv_info_req(struct rtrs_srv_con *con)
{
- struct rtrs_sess *s = con->c.sess;
- struct rtrs_srv_sess *sess = to_srv_sess(s);
+ struct rtrs_path *s = con->c.path;
+ struct rtrs_srv_path *srv_path = to_srv_path(s);
struct rtrs_iu *rx_iu;
int err;
rx_iu = rtrs_iu_alloc(1, sizeof(struct rtrs_msg_info_req),
- GFP_KERNEL, sess->s.dev->ib_dev,
+ GFP_KERNEL, srv_path->s.dev->ib_dev,
DMA_FROM_DEVICE, rtrs_srv_info_req_done);
if (!rx_iu)
return -ENOMEM;
@@ -946,7 +951,7 @@ static int post_recv_info_req(struct rtrs_srv_con *con)
err = rtrs_iu_post_recv(&con->c, rx_iu);
if (err) {
rtrs_err(s, "rtrs_iu_post_recv(), err: %d\n", err);
- rtrs_iu_free(rx_iu, sess->s.dev->ib_dev, 1);
+ rtrs_iu_free(rx_iu, srv_path->s.dev->ib_dev, 1);
return err;
}
@@ -966,20 +971,20 @@ static int post_recv_io(struct rtrs_srv_con *con, size_t q_size)
return 0;
}
-static int post_recv_sess(struct rtrs_srv_sess *sess)
+static int post_recv_path(struct rtrs_srv_path *srv_path)
{
- struct rtrs_srv *srv = sess->srv;
- struct rtrs_sess *s = &sess->s;
+ struct rtrs_srv_sess *srv = srv_path->srv;
+ struct rtrs_path *s = &srv_path->s;
size_t q_size;
int err, cid;
- for (cid = 0; cid < sess->s.con_num; cid++) {
+ for (cid = 0; cid < srv_path->s.con_num; cid++) {
if (cid == 0)
q_size = SERVICE_CON_QUEUE_DEPTH;
else
q_size = srv->queue_depth;
- err = post_recv_io(to_srv_con(sess->s.con[cid]), q_size);
+ err = post_recv_io(to_srv_con(srv_path->s.con[cid]), q_size);
if (err) {
rtrs_err(s, "post_recv_io(), err: %d\n", err);
return err;
@@ -993,9 +998,9 @@ static void process_read(struct rtrs_srv_con *con,
struct rtrs_msg_rdma_read *msg,
u32 buf_id, u32 off)
{
- struct rtrs_sess *s = con->c.sess;
- struct rtrs_srv_sess *sess = to_srv_sess(s);
- struct rtrs_srv *srv = sess->srv;
+ struct rtrs_path *s = con->c.path;
+ struct rtrs_srv_path *srv_path = to_srv_path(s);
+ struct rtrs_srv_sess *srv = srv_path->srv;
struct rtrs_srv_ctx *ctx = srv->ctx;
struct rtrs_srv_op *id;
@@ -1003,10 +1008,10 @@ static void process_read(struct rtrs_srv_con *con,
void *data;
int ret;
- if (sess->state != RTRS_SRV_CONNECTED) {
+ if (srv_path->state != RTRS_SRV_CONNECTED) {
rtrs_err_rl(s,
"Processing read request failed, session is disconnected, sess state %s\n",
- rtrs_srv_state_str(sess->state));
+ rtrs_srv_state_str(srv_path->state));
return;
}
if (msg->sg_cnt != 1 && msg->sg_cnt != 0) {
@@ -1014,9 +1019,9 @@ static void process_read(struct rtrs_srv_con *con,
"Processing read request failed, invalid message\n");
return;
}
- rtrs_srv_get_ops_ids(sess);
- rtrs_srv_update_rdma_stats(sess->stats, off, READ);
- id = sess->ops_ids[buf_id];
+ rtrs_srv_get_ops_ids(srv_path);
+ rtrs_srv_update_rdma_stats(srv_path->stats, off, READ);
+ id = srv_path->ops_ids[buf_id];
id->con = con;
id->dir = READ;
id->msg_id = buf_id;
@@ -1042,18 +1047,18 @@ send_err_msg:
rtrs_err_rl(s,
"Sending err msg for failed RDMA-Write-Req failed, msg_id %d, err: %d\n",
buf_id, ret);
- close_sess(sess);
+ close_path(srv_path);
}
- rtrs_srv_put_ops_ids(sess);
+ rtrs_srv_put_ops_ids(srv_path);
}
static void process_write(struct rtrs_srv_con *con,
struct rtrs_msg_rdma_write *req,
u32 buf_id, u32 off)
{
- struct rtrs_sess *s = con->c.sess;
- struct rtrs_srv_sess *sess = to_srv_sess(s);
- struct rtrs_srv *srv = sess->srv;
+ struct rtrs_path *s = con->c.path;
+ struct rtrs_srv_path *srv_path = to_srv_path(s);
+ struct rtrs_srv_sess *srv = srv_path->srv;
struct rtrs_srv_ctx *ctx = srv->ctx;
struct rtrs_srv_op *id;
@@ -1061,15 +1066,15 @@ static void process_write(struct rtrs_srv_con *con,
void *data;
int ret;
- if (sess->state != RTRS_SRV_CONNECTED) {
+ if (srv_path->state != RTRS_SRV_CONNECTED) {
rtrs_err_rl(s,
"Processing write request failed, session is disconnected, sess state %s\n",
- rtrs_srv_state_str(sess->state));
+ rtrs_srv_state_str(srv_path->state));
return;
}
- rtrs_srv_get_ops_ids(sess);
- rtrs_srv_update_rdma_stats(sess->stats, off, WRITE);
- id = sess->ops_ids[buf_id];
+ rtrs_srv_get_ops_ids(srv_path);
+ rtrs_srv_update_rdma_stats(srv_path->stats, off, WRITE);
+ id = srv_path->ops_ids[buf_id];
id->con = con;
id->dir = WRITE;
id->msg_id = buf_id;
@@ -1094,20 +1099,21 @@ send_err_msg:
rtrs_err_rl(s,
"Processing write request failed, sending I/O response failed, msg_id %d, err: %d\n",
buf_id, ret);
- close_sess(sess);
+ close_path(srv_path);
}
- rtrs_srv_put_ops_ids(sess);
+ rtrs_srv_put_ops_ids(srv_path);
}
static void process_io_req(struct rtrs_srv_con *con, void *msg,
u32 id, u32 off)
{
- struct rtrs_sess *s = con->c.sess;
- struct rtrs_srv_sess *sess = to_srv_sess(s);
+ struct rtrs_path *s = con->c.path;
+ struct rtrs_srv_path *srv_path = to_srv_path(s);
struct rtrs_msg_rdma_hdr *hdr;
unsigned int type;
- ib_dma_sync_single_for_cpu(sess->s.dev->ib_dev, sess->dma_addr[id],
+ ib_dma_sync_single_for_cpu(srv_path->s.dev->ib_dev,
+ srv_path->dma_addr[id],
max_chunk_size, DMA_BIDIRECTIONAL);
hdr = msg;
type = le16_to_cpu(hdr->type);
@@ -1129,7 +1135,7 @@ static void process_io_req(struct rtrs_srv_con *con, void *msg,
return;
err:
- close_sess(sess);
+ close_path(srv_path);
}
static void rtrs_srv_inv_rkey_done(struct ib_cq *cq, struct ib_wc *wc)
@@ -1137,16 +1143,16 @@ static void rtrs_srv_inv_rkey_done(struct ib_cq *cq, struct ib_wc *wc)
struct rtrs_srv_mr *mr =
container_of(wc->wr_cqe, typeof(*mr), inv_cqe);
struct rtrs_srv_con *con = to_srv_con(wc->qp->qp_context);
- struct rtrs_sess *s = con->c.sess;
- struct rtrs_srv_sess *sess = to_srv_sess(s);
- struct rtrs_srv *srv = sess->srv;
+ struct rtrs_path *s = con->c.path;
+ struct rtrs_srv_path *srv_path = to_srv_path(s);
+ struct rtrs_srv_sess *srv = srv_path->srv;
u32 msg_id, off;
void *data;
if (wc->status != IB_WC_SUCCESS) {
rtrs_err(s, "Failed IB_WR_LOCAL_INV: %s\n",
ib_wc_status_msg(wc->status));
- close_sess(sess);
+ close_path(srv_path);
}
msg_id = mr->msg_id;
off = mr->msg_off;
@@ -1194,9 +1200,9 @@ static void rtrs_rdma_process_wr_wait_list(struct rtrs_srv_con *con)
static void rtrs_srv_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
{
struct rtrs_srv_con *con = to_srv_con(wc->qp->qp_context);
- struct rtrs_sess *s = con->c.sess;
- struct rtrs_srv_sess *sess = to_srv_sess(s);
- struct rtrs_srv *srv = sess->srv;
+ struct rtrs_path *s = con->c.path;
+ struct rtrs_srv_path *srv_path = to_srv_path(s);
+ struct rtrs_srv_sess *srv = srv_path->srv;
u32 imm_type, imm_payload;
int err;
@@ -1206,7 +1212,7 @@ static void rtrs_srv_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
"%s (wr_cqe: %p, type: %d, vendor_err: 0x%x, len: %u)\n",
ib_wc_status_msg(wc->status), wc->wr_cqe,
wc->opcode, wc->vendor_err, wc->byte_len);
- close_sess(sess);
+ close_path(srv_path);
}
return;
}
@@ -1222,7 +1228,7 @@ static void rtrs_srv_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
err = rtrs_post_recv_empty(&con->c, &io_comp_cqe);
if (err) {
rtrs_err(s, "rtrs_post_recv(), err: %d\n", err);
- close_sess(sess);
+ close_path(srv_path);
break;
}
rtrs_from_imm(be32_to_cpu(wc->ex.imm_data),
@@ -1231,16 +1237,16 @@ static void rtrs_srv_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
u32 msg_id, off;
void *data;
- msg_id = imm_payload >> sess->mem_bits;
- off = imm_payload & ((1 << sess->mem_bits) - 1);
+ msg_id = imm_payload >> srv_path->mem_bits;
+ off = imm_payload & ((1 << srv_path->mem_bits) - 1);
if (msg_id >= srv->queue_depth || off >= max_chunk_size) {
rtrs_err(s, "Wrong msg_id %u, off %u\n",
msg_id, off);
- close_sess(sess);
+ close_path(srv_path);
return;
}
if (always_invalidate) {
- struct rtrs_srv_mr *mr = &sess->mrs[msg_id];
+ struct rtrs_srv_mr *mr = &srv_path->mrs[msg_id];
mr->msg_off = off;
mr->msg_id = msg_id;
@@ -1248,7 +1254,7 @@ static void rtrs_srv_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
if (err) {
rtrs_err(s, "rtrs_post_recv(), err: %d\n",
err);
- close_sess(sess);
+ close_path(srv_path);
break;
}
} else {
@@ -1257,10 +1263,10 @@ static void rtrs_srv_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
}
} else if (imm_type == RTRS_HB_MSG_IMM) {
WARN_ON(con->c.cid);
- rtrs_send_hb_ack(&sess->s);
+ rtrs_send_hb_ack(&srv_path->s);
} else if (imm_type == RTRS_HB_ACK_IMM) {
WARN_ON(con->c.cid);
- sess->s.hb_missed_cnt = 0;
+ srv_path->s.hb_missed_cnt = 0;
} else {
rtrs_wrn(s, "Unknown IMM type %u\n", imm_type);
}
@@ -1284,22 +1290,23 @@ static void rtrs_srv_rdma_done(struct ib_cq *cq, struct ib_wc *wc)
}
/**
- * rtrs_srv_get_sess_name() - Get rtrs_srv peer hostname.
+ * rtrs_srv_get_path_name() - Get rtrs_srv peer hostname.
* @srv: Session
- * @sessname: Sessname buffer
+ * @pathname: Pathname buffer
* @len: Length of sessname buffer
*/
-int rtrs_srv_get_sess_name(struct rtrs_srv *srv, char *sessname, size_t len)
+int rtrs_srv_get_path_name(struct rtrs_srv_sess *srv, char *pathname,
+ size_t len)
{
- struct rtrs_srv_sess *sess;
+ struct rtrs_srv_path *srv_path;
int err = -ENOTCONN;
mutex_lock(&srv->paths_mutex);
- list_for_each_entry(sess, &srv->paths_list, s.entry) {
- if (sess->state != RTRS_SRV_CONNECTED)
+ list_for_each_entry(srv_path, &srv->paths_list, s.entry) {
+ if (srv_path->state != RTRS_SRV_CONNECTED)
continue;
- strscpy(sessname, sess->s.sessname,
- min_t(size_t, sizeof(sess->s.sessname), len));
+ strscpy(pathname, srv_path->s.sessname,
+ min_t(size_t, sizeof(srv_path->s.sessname), len));
err = 0;
break;
}
@@ -1307,44 +1314,45 @@ int rtrs_srv_get_sess_name(struct rtrs_srv *srv, char *sessname, size_t len)
return err;
}
-EXPORT_SYMBOL(rtrs_srv_get_sess_name);
+EXPORT_SYMBOL(rtrs_srv_get_path_name);
/**
* rtrs_srv_get_queue_depth() - Get rtrs_srv qdepth.
* @srv: Session
*/
-int rtrs_srv_get_queue_depth(struct rtrs_srv *srv)
+int rtrs_srv_get_queue_depth(struct rtrs_srv_sess *srv)
{
return srv->queue_depth;
}
EXPORT_SYMBOL(rtrs_srv_get_queue_depth);
-static int find_next_bit_ring(struct rtrs_srv_sess *sess)
+static int find_next_bit_ring(struct rtrs_srv_path *srv_path)
{
- struct ib_device *ib_dev = sess->s.dev->ib_dev;
+ struct ib_device *ib_dev = srv_path->s.dev->ib_dev;
int v;
- v = cpumask_next(sess->cur_cq_vector, &cq_affinity_mask);
+ v = cpumask_next(srv_path->cur_cq_vector, &cq_affinity_mask);
if (v >= nr_cpu_ids || v >= ib_dev->num_comp_vectors)
v = cpumask_first(&cq_affinity_mask);
return v;
}
-static int rtrs_srv_get_next_cq_vector(struct rtrs_srv_sess *sess)
+static int rtrs_srv_get_next_cq_vector(struct rtrs_srv_path *srv_path)
{
- sess->cur_cq_vector = find_next_bit_ring(sess);
+ srv_path->cur_cq_vector = find_next_bit_ring(srv_path);
- return sess->cur_cq_vector;
+ return srv_path->cur_cq_vector;
}
static void rtrs_srv_dev_release(struct device *dev)
{
- struct rtrs_srv *srv = container_of(dev, struct rtrs_srv, dev);
+ struct rtrs_srv_sess *srv = container_of(dev, struct rtrs_srv_sess,
+ dev);
kfree(srv);
}
-static void free_srv(struct rtrs_srv *srv)
+static void free_srv(struct rtrs_srv_sess *srv)
{
int i;
@@ -1358,11 +1366,11 @@ static void free_srv(struct rtrs_srv *srv)
put_device(&srv->dev);
}
-static struct rtrs_srv *get_or_create_srv(struct rtrs_srv_ctx *ctx,
+static struct rtrs_srv_sess *get_or_create_srv(struct rtrs_srv_ctx *ctx,
const uuid_t *paths_uuid,
bool first_conn)
{
- struct rtrs_srv *srv;
+ struct rtrs_srv_sess *srv;
int i;
mutex_lock(&ctx->srv_mutex);
@@ -1424,7 +1432,7 @@ err_free_srv:
return ERR_PTR(-ENOMEM);
}
-static void put_srv(struct rtrs_srv *srv)
+static void put_srv(struct rtrs_srv_sess *srv)
{
if (refcount_dec_and_test(&srv->refcount)) {
struct rtrs_srv_ctx *ctx = srv->ctx;
@@ -1438,23 +1446,23 @@ static void put_srv(struct rtrs_srv *srv)
}
}
-static void __add_path_to_srv(struct rtrs_srv *srv,
- struct rtrs_srv_sess *sess)
+static void __add_path_to_srv(struct rtrs_srv_sess *srv,
+ struct rtrs_srv_path *srv_path)
{
- list_add_tail(&sess->s.entry, &srv->paths_list);
+ list_add_tail(&srv_path->s.entry, &srv->paths_list);
srv->paths_num++;
WARN_ON(srv->paths_num >= MAX_PATHS_NUM);
}
-static void del_path_from_srv(struct rtrs_srv_sess *sess)
+static void del_path_from_srv(struct rtrs_srv_path *srv_path)
{
- struct rtrs_srv *srv = sess->srv;
+ struct rtrs_srv_sess *srv = srv_path->srv;
if (WARN_ON(!srv))
return;
mutex_lock(&srv->paths_mutex);
- list_del(&sess->s.entry);
+ list_del(&srv_path->s.entry);
WARN_ON(!srv->paths_num);
srv->paths_num--;
mutex_unlock(&srv->paths_mutex);
@@ -1484,47 +1492,47 @@ static int sockaddr_cmp(const struct sockaddr *a, const struct sockaddr *b)
}
}
-static bool __is_path_w_addr_exists(struct rtrs_srv *srv,
+static bool __is_path_w_addr_exists(struct rtrs_srv_sess *srv,
struct rdma_addr *addr)
{
- struct rtrs_srv_sess *sess;
+ struct rtrs_srv_path *srv_path;
- list_for_each_entry(sess, &srv->paths_list, s.entry)
- if (!sockaddr_cmp((struct sockaddr *)&sess->s.dst_addr,
+ list_for_each_entry(srv_path, &srv->paths_list, s.entry)
+ if (!sockaddr_cmp((struct sockaddr *)&srv_path->s.dst_addr,
(struct sockaddr *)&addr->dst_addr) &&
- !sockaddr_cmp((struct sockaddr *)&sess->s.src_addr,
+ !sockaddr_cmp((struct sockaddr *)&srv_path->s.src_addr,
(struct sockaddr *)&addr->src_addr))
return true;
return false;
}
-static void free_sess(struct rtrs_srv_sess *sess)
+static void free_path(struct rtrs_srv_path *srv_path)
{
- if (sess->kobj.state_in_sysfs) {
- kobject_del(&sess->kobj);
- kobject_put(&sess->kobj);
+ if (srv_path->kobj.state_in_sysfs) {
+ kobject_del(&srv_path->kobj);
+ kobject_put(&srv_path->kobj);
} else {
- kfree(sess->stats);
- kfree(sess);
+ kfree(srv_path->stats);
+ kfree(srv_path);
}
}
static void rtrs_srv_close_work(struct work_struct *work)
{
- struct rtrs_srv_sess *sess;
+ struct rtrs_srv_path *srv_path;
struct rtrs_srv_con *con;
int i;
- sess = container_of(work, typeof(*sess), close_work);
+ srv_path = container_of(work, typeof(*srv_path), close_work);
- rtrs_srv_destroy_sess_files(sess);
- rtrs_srv_stop_hb(sess);
+ rtrs_srv_destroy_path_files(srv_path);
+ rtrs_srv_stop_hb(srv_path);
- for (i = 0; i < sess->s.con_num; i++) {
- if (!sess->s.con[i])
+ for (i = 0; i < srv_path->s.con_num; i++) {
+ if (!srv_path->s.con[i])
continue;
- con = to_srv_con(sess->s.con[i]);
+ con = to_srv_con(srv_path->s.con[i]);
rdma_disconnect(con->c.cm_id);
ib_drain_qp(con->c.qp);
}
@@ -1533,41 +1541,41 @@ static void rtrs_srv_close_work(struct work_struct *work)
* Degrade ref count to the usual model with a single shared
* atomic_t counter
*/
- percpu_ref_kill(&sess->ids_inflight_ref);
+ percpu_ref_kill(&srv_path->ids_inflight_ref);
/* Wait for all completion */
- wait_for_completion(&sess->complete_done);
+ wait_for_completion(&srv_path->complete_done);
/* Notify upper layer if we are the last path */
- rtrs_srv_sess_down(sess);
+ rtrs_srv_path_down(srv_path);
- unmap_cont_bufs(sess);
- rtrs_srv_free_ops_ids(sess);
+ unmap_cont_bufs(srv_path);
+ rtrs_srv_free_ops_ids(srv_path);
- for (i = 0; i < sess->s.con_num; i++) {
- if (!sess->s.con[i])
+ for (i = 0; i < srv_path->s.con_num; i++) {
+ if (!srv_path->s.con[i])
continue;
- con = to_srv_con(sess->s.con[i]);
+ con = to_srv_con(srv_path->s.con[i]);
rtrs_cq_qp_destroy(&con->c);
rdma_destroy_id(con->c.cm_id);
kfree(con);
}
- rtrs_ib_dev_put(sess->s.dev);
+ rtrs_ib_dev_put(srv_path->s.dev);
- del_path_from_srv(sess);
- put_srv(sess->srv);
- sess->srv = NULL;
- rtrs_srv_change_state(sess, RTRS_SRV_CLOSED);
+ del_path_from_srv(srv_path);
+ put_srv(srv_path->srv);
+ srv_path->srv = NULL;
+ rtrs_srv_change_state(srv_path, RTRS_SRV_CLOSED);
- kfree(sess->dma_addr);
- kfree(sess->s.con);
- free_sess(sess);
+ kfree(srv_path->dma_addr);
+ kfree(srv_path->s.con);
+ free_path(srv_path);
}
-static int rtrs_rdma_do_accept(struct rtrs_srv_sess *sess,
+static int rtrs_rdma_do_accept(struct rtrs_srv_path *srv_path,
struct rdma_cm_id *cm_id)
{
- struct rtrs_srv *srv = sess->srv;
+ struct rtrs_srv_sess *srv = srv_path->srv;
struct rtrs_msg_conn_rsp msg;
struct rdma_conn_param param;
int err;
@@ -1615,25 +1623,25 @@ static int rtrs_rdma_do_reject(struct rdma_cm_id *cm_id, int errno)
return errno;
}
-static struct rtrs_srv_sess *
-__find_sess(struct rtrs_srv *srv, const uuid_t *sess_uuid)
+static struct rtrs_srv_path *
+__find_path(struct rtrs_srv_sess *srv, const uuid_t *sess_uuid)
{
- struct rtrs_srv_sess *sess;
+ struct rtrs_srv_path *srv_path;
- list_for_each_entry(sess, &srv->paths_list, s.entry) {
- if (uuid_equal(&sess->s.uuid, sess_uuid))
- return sess;
+ list_for_each_entry(srv_path, &srv->paths_list, s.entry) {
+ if (uuid_equal(&srv_path->s.uuid, sess_uuid))
+ return srv_path;
}
return NULL;
}
-static int create_con(struct rtrs_srv_sess *sess,
+static int create_con(struct rtrs_srv_path *srv_path,
struct rdma_cm_id *cm_id,
unsigned int cid)
{
- struct rtrs_srv *srv = sess->srv;
- struct rtrs_sess *s = &sess->s;
+ struct rtrs_srv_sess *srv = srv_path->srv;
+ struct rtrs_path *s = &srv_path->s;
struct rtrs_srv_con *con;
u32 cq_num, max_send_wr, max_recv_wr, wr_limit;
@@ -1648,10 +1656,10 @@ static int create_con(struct rtrs_srv_sess *sess,
spin_lock_init(&con->rsp_wr_wait_lock);
INIT_LIST_HEAD(&con->rsp_wr_wait_list);
con->c.cm_id = cm_id;
- con->c.sess = &sess->s;
+ con->c.path = &srv_path->s;
con->c.cid = cid;
atomic_set(&con->c.wr_cnt, 1);
- wr_limit = sess->s.dev->ib_dev->attrs.max_qp_wr;
+ wr_limit = srv_path->s.dev->ib_dev->attrs.max_qp_wr;
if (con->c.cid == 0) {
/*
@@ -1684,10 +1692,10 @@ static int create_con(struct rtrs_srv_sess *sess,
}
cq_num = max_send_wr + max_recv_wr;
atomic_set(&con->c.sq_wr_avail, max_send_wr);
- cq_vector = rtrs_srv_get_next_cq_vector(sess);
+ cq_vector = rtrs_srv_get_next_cq_vector(srv_path);
/* TODO: SOFTIRQ can be faster, but be careful with softirq context */
- err = rtrs_cq_qp_create(&sess->s, &con->c, 1, cq_vector, cq_num,
+ err = rtrs_cq_qp_create(&srv_path->s, &con->c, 1, cq_vector, cq_num,
max_send_wr, max_recv_wr,
IB_POLL_WORKQUEUE);
if (err) {
@@ -1699,8 +1707,8 @@ static int create_con(struct rtrs_srv_sess *sess,
if (err)
goto free_cqqp;
}
- WARN_ON(sess->s.con[cid]);
- sess->s.con[cid] = &con->c;
+ WARN_ON(srv_path->s.con[cid]);
+ srv_path->s.con[cid] = &con->c;
/*
* Change context from server to current connection. The other
@@ -1719,13 +1727,13 @@ err:
return err;
}
-static struct rtrs_srv_sess *__alloc_sess(struct rtrs_srv *srv,
+static struct rtrs_srv_path *__alloc_path(struct rtrs_srv_sess *srv,
struct rdma_cm_id *cm_id,
unsigned int con_num,
unsigned int recon_cnt,
const uuid_t *uuid)
{
- struct rtrs_srv_sess *sess;
+ struct rtrs_srv_path *srv_path;
int err = -ENOMEM;
char str[NAME_MAX];
struct rtrs_addr path;
@@ -1739,74 +1747,76 @@ static struct rtrs_srv_sess *__alloc_sess(struct rtrs_srv *srv,
pr_err("Path with same addr exists\n");
goto err;
}
- sess = kzalloc(sizeof(*sess), GFP_KERNEL);
- if (!sess)
+ srv_path = kzalloc(sizeof(*srv_path), GFP_KERNEL);
+ if (!srv_path)
goto err;
- sess->stats = kzalloc(sizeof(*sess->stats), GFP_KERNEL);
- if (!sess->stats)
+ srv_path->stats = kzalloc(sizeof(*srv_path->stats), GFP_KERNEL);
+ if (!srv_path->stats)
goto err_free_sess;
- sess->stats->sess = sess;
+ srv_path->stats->srv_path = srv_path;
- sess->dma_addr = kcalloc(srv->queue_depth, sizeof(*sess->dma_addr),
- GFP_KERNEL);
- if (!sess->dma_addr)
+ srv_path->dma_addr = kcalloc(srv->queue_depth,
+ sizeof(*srv_path->dma_addr),
+ GFP_KERNEL);
+ if (!srv_path->dma_addr)
goto err_free_stats;
- sess->s.con = kcalloc(con_num, sizeof(*sess->s.con), GFP_KERNEL);
- if (!sess->s.con)
+ srv_path->s.con = kcalloc(con_num, sizeof(*srv_path->s.con),
+ GFP_KERNEL);
+ if (!srv_path->s.con)
goto err_free_dma_addr;
- sess->state = RTRS_SRV_CONNECTING;
- sess->srv = srv;
- sess->cur_cq_vector = -1;
- sess->s.dst_addr = cm_id->route.addr.dst_addr;
- sess->s.src_addr = cm_id->route.addr.src_addr;
+ srv_path->state = RTRS_SRV_CONNECTING;
+ srv_path->srv = srv;
+ srv_path->cur_cq_vector = -1;
+ srv_path->s.dst_addr = cm_id->route.addr.dst_addr;
+ srv_path->s.src_addr = cm_id->route.addr.src_addr;
/* temporary until receiving session-name from client */
- path.src = &sess->s.src_addr;
- path.dst = &sess->s.dst_addr;
+ path.src = &srv_path->s.src_addr;
+ path.dst = &srv_path->s.dst_addr;
rtrs_addr_to_str(&path, str, sizeof(str));
- strscpy(sess->s.sessname, str, sizeof(sess->s.sessname));
-
- sess->s.con_num = con_num;
- sess->s.irq_con_num = con_num;
- sess->s.recon_cnt = recon_cnt;
- uuid_copy(&sess->s.uuid, uuid);
- spin_lock_init(&sess->state_lock);
- INIT_WORK(&sess->close_work, rtrs_srv_close_work);
- rtrs_srv_init_hb(sess);
-
- sess->s.dev = rtrs_ib_dev_find_or_add(cm_id->device, &dev_pd);
- if (!sess->s.dev) {
+ strscpy(srv_path->s.sessname, str, sizeof(srv_path->s.sessname));
+
+ srv_path->s.con_num = con_num;
+ srv_path->s.irq_con_num = con_num;
+ srv_path->s.recon_cnt = recon_cnt;
+ uuid_copy(&srv_path->s.uuid, uuid);
+ spin_lock_init(&srv_path->state_lock);
+ INIT_WORK(&srv_path->close_work, rtrs_srv_close_work);
+ rtrs_srv_init_hb(srv_path);
+
+ srv_path->s.dev = rtrs_ib_dev_find_or_add(cm_id->device, &dev_pd);
+ if (!srv_path->s.dev) {
err = -ENOMEM;
goto err_free_con;
}
- err = map_cont_bufs(sess);
+ err = map_cont_bufs(srv_path);
if (err)
goto err_put_dev;
- err = rtrs_srv_alloc_ops_ids(sess);
+ err = rtrs_srv_alloc_ops_ids(srv_path);
if (err)
goto err_unmap_bufs;
- __add_path_to_srv(srv, sess);
+ __add_path_to_srv(srv, srv_path);
- return sess;
+ return srv_path;
err_unmap_bufs:
- unmap_cont_bufs(sess);
+ unmap_cont_bufs(srv_path);
err_put_dev:
- rtrs_ib_dev_put(sess->s.dev);
+ rtrs_ib_dev_put(srv_path->s.dev);
err_free_con:
- kfree(sess->s.con);
+ kfree(srv_path->s.con);
err_free_dma_addr:
- kfree(sess->dma_addr);
+ kfree(srv_path->dma_addr);
err_free_stats:
- kfree(sess->stats);
+ kfree(srv_path->stats);
err_free_sess:
- kfree(sess);
+ kfree(srv_path);
err:
return ERR_PTR(err);
}
@@ -1816,8 +1826,8 @@ static int rtrs_rdma_connect(struct rdma_cm_id *cm_id,
size_t len)
{
struct rtrs_srv_ctx *ctx = cm_id->context;
- struct rtrs_srv_sess *sess;
- struct rtrs_srv *srv;
+ struct rtrs_srv_path *srv_path;
+ struct rtrs_srv_sess *srv;
u16 version, con_num, cid;
u16 recon_cnt;
@@ -1857,16 +1867,16 @@ static int rtrs_rdma_connect(struct rdma_cm_id *cm_id,
goto reject_w_err;
}
mutex_lock(&srv->paths_mutex);
- sess = __find_sess(srv, &msg->sess_uuid);
- if (sess) {
- struct rtrs_sess *s = &sess->s;
+ srv_path = __find_path(srv, &msg->sess_uuid);
+ if (srv_path) {
+ struct rtrs_path *s = &srv_path->s;
/* Session already holds a reference */
put_srv(srv);
- if (sess->state != RTRS_SRV_CONNECTING) {
+ if (srv_path->state != RTRS_SRV_CONNECTING) {
rtrs_err(s, "Session in wrong state: %s\n",
- rtrs_srv_state_str(sess->state));
+ rtrs_srv_state_str(srv_path->state));
mutex_unlock(&srv->paths_mutex);
goto reject_w_err;
}
@@ -1886,19 +1896,19 @@ static int rtrs_rdma_connect(struct rdma_cm_id *cm_id,
goto reject_w_err;
}
} else {
- sess = __alloc_sess(srv, cm_id, con_num, recon_cnt,
+ srv_path = __alloc_path(srv, cm_id, con_num, recon_cnt,
&msg->sess_uuid);
- if (IS_ERR(sess)) {
+ if (IS_ERR(srv_path)) {
mutex_unlock(&srv->paths_mutex);
put_srv(srv);
- err = PTR_ERR(sess);
+ err = PTR_ERR(srv_path);
pr_err("RTRS server session allocation failed: %d\n", err);
goto reject_w_err;
}
}
- err = create_con(sess, cm_id, cid);
+ err = create_con(srv_path, cm_id, cid);
if (err) {
- rtrs_err((&sess->s), "create_con(), error %d\n", err);
+ rtrs_err((&srv_path->s), "create_con(), error %d\n", err);
rtrs_rdma_do_reject(cm_id, err);
/*
* Since session has other connections we follow normal way
@@ -1907,9 +1917,9 @@ static int rtrs_rdma_connect(struct rdma_cm_id *cm_id,
*/
goto close_and_return_err;
}
- err = rtrs_rdma_do_accept(sess, cm_id);
+ err = rtrs_rdma_do_accept(srv_path, cm_id);
if (err) {
- rtrs_err((&sess->s), "rtrs_rdma_do_accept(), error %d\n", err);
+ rtrs_err((&srv_path->s), "rtrs_rdma_do_accept(), error %d\n", err);
rtrs_rdma_do_reject(cm_id, err);
/*
* Since current connection was successfully added to the
@@ -1929,7 +1939,7 @@ reject_w_err:
close_and_return_err:
mutex_unlock(&srv->paths_mutex);
- close_sess(sess);
+ close_path(srv_path);
return err;
}
@@ -1937,14 +1947,14 @@ close_and_return_err:
static int rtrs_srv_rdma_cm_handler(struct rdma_cm_id *cm_id,
struct rdma_cm_event *ev)
{
- struct rtrs_srv_sess *sess = NULL;
- struct rtrs_sess *s = NULL;
+ struct rtrs_srv_path *srv_path = NULL;
+ struct rtrs_path *s = NULL;
if (ev->event != RDMA_CM_EVENT_CONNECT_REQUEST) {
struct rtrs_con *c = cm_id->context;
- s = c->sess;
- sess = to_srv_sess(s);
+ s = c->path;
+ srv_path = to_srv_path(s);
}
switch (ev->event) {
@@ -1968,7 +1978,7 @@ static int rtrs_srv_rdma_cm_handler(struct rdma_cm_id *cm_id,
case RDMA_CM_EVENT_ADDR_CHANGE:
case RDMA_CM_EVENT_TIMEWAIT_EXIT:
case RDMA_CM_EVENT_DEVICE_REMOVAL:
- close_sess(sess);
+ close_path(srv_path);
break;
default:
pr_err("Ignoring unexpected CM event %s, err %d\n",
@@ -2176,23 +2186,23 @@ struct rtrs_srv_ctx *rtrs_srv_open(struct rtrs_srv_ops *ops, u16 port)
}
EXPORT_SYMBOL(rtrs_srv_open);
-static void close_sessions(struct rtrs_srv *srv)
+static void close_paths(struct rtrs_srv_sess *srv)
{
- struct rtrs_srv_sess *sess;
+ struct rtrs_srv_path *srv_path;
mutex_lock(&srv->paths_mutex);
- list_for_each_entry(sess, &srv->paths_list, s.entry)
- close_sess(sess);
+ list_for_each_entry(srv_path, &srv->paths_list, s.entry)
+ close_path(srv_path);
mutex_unlock(&srv->paths_mutex);
}
static void close_ctx(struct rtrs_srv_ctx *ctx)
{
- struct rtrs_srv *srv;
+ struct rtrs_srv_sess *srv;
mutex_lock(&ctx->srv_mutex);
list_for_each_entry(srv, &ctx->srv_list, ctx_list)
- close_sessions(srv);
+ close_paths(srv);
mutex_unlock(&ctx->srv_mutex);
flush_workqueue(rtrs_wq);
}
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.h b/drivers/infiniband/ulp/rtrs/rtrs-srv.h
index 7d403c12faf3..6292e87f6afd 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-srv.h
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.h
@@ -37,7 +37,7 @@ struct rtrs_srv_stats_rdma_stats {
struct rtrs_srv_stats {
struct kobject kobj_stats;
struct rtrs_srv_stats_rdma_stats rdma_stats;
- struct rtrs_srv_sess *sess;
+ struct rtrs_srv_path *srv_path;
};
struct rtrs_srv_con {
@@ -71,9 +71,9 @@ struct rtrs_srv_mr {
struct rtrs_iu *iu; /* send buffer for new rkey msg */
};
-struct rtrs_srv_sess {
- struct rtrs_sess s;
- struct rtrs_srv *srv;
+struct rtrs_srv_path {
+ struct rtrs_path s;
+ struct rtrs_srv_sess *srv;
struct work_struct close_work;
enum rtrs_srv_state state;
spinlock_t state_lock;
@@ -90,7 +90,7 @@ struct rtrs_srv_sess {
struct rtrs_srv_stats *stats;
};
-struct rtrs_srv {
+struct rtrs_srv_sess {
struct list_head paths_list;
int paths_up;
struct mutex paths_ev_mutex;
@@ -125,7 +125,7 @@ struct rtrs_srv_ib_ctx {
extern struct class *rtrs_dev_class;
-void close_sess(struct rtrs_srv_sess *sess);
+void close_path(struct rtrs_srv_path *srv_path);
static inline void rtrs_srv_update_rdma_stats(struct rtrs_srv_stats *s,
size_t size, int d)
@@ -142,7 +142,7 @@ ssize_t rtrs_srv_reset_all_help(struct rtrs_srv_stats *stats,
char *page, size_t len);
/* functions which are implemented in rtrs-srv-sysfs.c */
-int rtrs_srv_create_sess_files(struct rtrs_srv_sess *sess);
-void rtrs_srv_destroy_sess_files(struct rtrs_srv_sess *sess);
+int rtrs_srv_create_path_files(struct rtrs_srv_path *srv_path);
+void rtrs_srv_destroy_path_files(struct rtrs_srv_path *srv_path);
#endif /* RTRS_SRV_H */
diff --git a/drivers/infiniband/ulp/rtrs/rtrs.c b/drivers/infiniband/ulp/rtrs/rtrs.c
index 37952c8e768c..4da889103a5f 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs.c
@@ -69,16 +69,16 @@ EXPORT_SYMBOL_GPL(rtrs_iu_free);
int rtrs_iu_post_recv(struct rtrs_con *con, struct rtrs_iu *iu)
{
- struct rtrs_sess *sess = con->sess;
+ struct rtrs_path *path = con->path;
struct ib_recv_wr wr;
struct ib_sge list;
list.addr = iu->dma_addr;
list.length = iu->size;
- list.lkey = sess->dev->ib_pd->local_dma_lkey;
+ list.lkey = path->dev->ib_pd->local_dma_lkey;
if (list.length == 0) {
- rtrs_wrn(con->sess,
+ rtrs_wrn(con->path,
"Posting receive work request failed, sg list is empty\n");
return -EINVAL;
}
@@ -126,7 +126,7 @@ static int rtrs_post_send(struct ib_qp *qp, struct ib_send_wr *head,
int rtrs_iu_post_send(struct rtrs_con *con, struct rtrs_iu *iu, size_t size,
struct ib_send_wr *head)
{
- struct rtrs_sess *sess = con->sess;
+ struct rtrs_path *path = con->path;
struct ib_send_wr wr;
struct ib_sge list;
@@ -135,7 +135,7 @@ int rtrs_iu_post_send(struct rtrs_con *con, struct rtrs_iu *iu, size_t size,
list.addr = iu->dma_addr;
list.length = size;
- list.lkey = sess->dev->ib_pd->local_dma_lkey;
+ list.lkey = path->dev->ib_pd->local_dma_lkey;
wr = (struct ib_send_wr) {
.wr_cqe = &iu->cqe,
@@ -188,11 +188,11 @@ static int rtrs_post_rdma_write_imm_empty(struct rtrs_con *con,
struct ib_send_wr *head)
{
struct ib_rdma_wr wr;
- struct rtrs_sess *sess = con->sess;
+ struct rtrs_path *path = con->path;
enum ib_send_flags sflags;
atomic_dec_if_positive(&con->sq_wr_avail);
- sflags = (atomic_inc_return(&con->wr_cnt) % sess->signal_interval) ?
+ sflags = (atomic_inc_return(&con->wr_cnt) % path->signal_interval) ?
0 : IB_SEND_SIGNALED;
wr = (struct ib_rdma_wr) {
@@ -211,12 +211,12 @@ static void qp_event_handler(struct ib_event *ev, void *ctx)
switch (ev->event) {
case IB_EVENT_COMM_EST:
- rtrs_info(con->sess, "QP event %s (%d) received\n",
+ rtrs_info(con->path, "QP event %s (%d) received\n",
ib_event_msg(ev->event), ev->event);
rdma_notify(con->cm_id, IB_EVENT_COMM_EST);
break;
default:
- rtrs_info(con->sess, "Unhandled QP event %s (%d) received\n",
+ rtrs_info(con->path, "Unhandled QP event %s (%d) received\n",
ib_event_msg(ev->event), ev->event);
break;
}
@@ -224,7 +224,7 @@ static void qp_event_handler(struct ib_event *ev, void *ctx)
static bool is_pollqueue(struct rtrs_con *con)
{
- return con->cid >= con->sess->irq_con_num;
+ return con->cid >= con->path->irq_con_num;
}
static int create_cq(struct rtrs_con *con, int cq_vector, int nr_cqe,
@@ -240,7 +240,7 @@ static int create_cq(struct rtrs_con *con, int cq_vector, int nr_cqe,
cq = ib_cq_pool_get(cm_id->device, nr_cqe, cq_vector, poll_ctx);
if (IS_ERR(cq)) {
- rtrs_err(con->sess, "Creating completion queue failed, errno: %ld\n",
+ rtrs_err(con->path, "Creating completion queue failed, errno: %ld\n",
PTR_ERR(cq));
return PTR_ERR(cq);
}
@@ -271,7 +271,7 @@ static int create_qp(struct rtrs_con *con, struct ib_pd *pd,
ret = rdma_create_qp(cm_id, pd, &init_attr);
if (ret) {
- rtrs_err(con->sess, "Creating QP failed, err: %d\n", ret);
+ rtrs_err(con->path, "Creating QP failed, err: %d\n", ret);
return ret;
}
con->qp = cm_id->qp;
@@ -290,7 +290,7 @@ static void destroy_cq(struct rtrs_con *con)
con->cq = NULL;
}
-int rtrs_cq_qp_create(struct rtrs_sess *sess, struct rtrs_con *con,
+int rtrs_cq_qp_create(struct rtrs_path *path, struct rtrs_con *con,
u32 max_send_sge, int cq_vector, int nr_cqe,
u32 max_send_wr, u32 max_recv_wr,
enum ib_poll_context poll_ctx)
@@ -301,13 +301,13 @@ int rtrs_cq_qp_create(struct rtrs_sess *sess, struct rtrs_con *con,
if (err)
return err;
- err = create_qp(con, sess->dev->ib_pd, max_send_wr, max_recv_wr,
+ err = create_qp(con, path->dev->ib_pd, max_send_wr, max_recv_wr,
max_send_sge);
if (err) {
destroy_cq(con);
return err;
}
- con->sess = sess;
+ con->path = path;
return 0;
}
@@ -323,24 +323,24 @@ void rtrs_cq_qp_destroy(struct rtrs_con *con)
}
EXPORT_SYMBOL_GPL(rtrs_cq_qp_destroy);
-static void schedule_hb(struct rtrs_sess *sess)
+static void schedule_hb(struct rtrs_path *path)
{
- queue_delayed_work(sess->hb_wq, &sess->hb_dwork,
- msecs_to_jiffies(sess->hb_interval_ms));
+ queue_delayed_work(path->hb_wq, &path->hb_dwork,
+ msecs_to_jiffies(path->hb_interval_ms));
}
-void rtrs_send_hb_ack(struct rtrs_sess *sess)
+void rtrs_send_hb_ack(struct rtrs_path *path)
{
- struct rtrs_con *usr_con = sess->con[0];
+ struct rtrs_con *usr_con = path->con[0];
u32 imm;
int err;
imm = rtrs_to_imm(RTRS_HB_ACK_IMM, 0);
- err = rtrs_post_rdma_write_imm_empty(usr_con, sess->hb_cqe, imm,
+ err = rtrs_post_rdma_write_imm_empty(usr_con, path->hb_cqe, imm,
NULL);
if (err) {
- rtrs_err(sess, "send HB ACK failed, errno: %d\n", err);
- sess->hb_err_handler(usr_con);
+ rtrs_err(path, "send HB ACK failed, errno: %d\n", err);
+ path->hb_err_handler(usr_con);
return;
}
}
@@ -349,63 +349,63 @@ EXPORT_SYMBOL_GPL(rtrs_send_hb_ack);
static void hb_work(struct work_struct *work)
{
struct rtrs_con *usr_con;
- struct rtrs_sess *sess;
+ struct rtrs_path *path;
u32 imm;
int err;
- sess = container_of(to_delayed_work(work), typeof(*sess), hb_dwork);
- usr_con = sess->con[0];
+ path = container_of(to_delayed_work(work), typeof(*path), hb_dwork);
+ usr_con = path->con[0];
- if (sess->hb_missed_cnt > sess->hb_missed_max) {
- rtrs_err(sess, "HB missed max reached.\n");
- sess->hb_err_handler(usr_con);
+ if (path->hb_missed_cnt > path->hb_missed_max) {
+ rtrs_err(path, "HB missed max reached.\n");
+ path->hb_err_handler(usr_con);
return;
}
- if (sess->hb_missed_cnt++) {
+ if (path->hb_missed_cnt++) {
/* Reschedule work without sending hb */
- schedule_hb(sess);
+ schedule_hb(path);
return;
}
- sess->hb_last_sent = ktime_get();
+ path->hb_last_sent = ktime_get();
imm = rtrs_to_imm(RTRS_HB_MSG_IMM, 0);
- err = rtrs_post_rdma_write_imm_empty(usr_con, sess->hb_cqe, imm,
+ err = rtrs_post_rdma_write_imm_empty(usr_con, path->hb_cqe, imm,
NULL);
if (err) {
- rtrs_err(sess, "HB send failed, errno: %d\n", err);
- sess->hb_err_handler(usr_con);
+ rtrs_err(path, "HB send failed, errno: %d\n", err);
+ path->hb_err_handler(usr_con);
return;
}
- schedule_hb(sess);
+ schedule_hb(path);
}
-void rtrs_init_hb(struct rtrs_sess *sess, struct ib_cqe *cqe,
+void rtrs_init_hb(struct rtrs_path *path, struct ib_cqe *cqe,
unsigned int interval_ms, unsigned int missed_max,
void (*err_handler)(struct rtrs_con *con),
struct workqueue_struct *wq)
{
- sess->hb_cqe = cqe;
- sess->hb_interval_ms = interval_ms;
- sess->hb_err_handler = err_handler;
- sess->hb_wq = wq;
- sess->hb_missed_max = missed_max;
- sess->hb_missed_cnt = 0;
- INIT_DELAYED_WORK(&sess->hb_dwork, hb_work);
+ path->hb_cqe = cqe;
+ path->hb_interval_ms = interval_ms;
+ path->hb_err_handler = err_handler;
+ path->hb_wq = wq;
+ path->hb_missed_max = missed_max;
+ path->hb_missed_cnt = 0;
+ INIT_DELAYED_WORK(&path->hb_dwork, hb_work);
}
EXPORT_SYMBOL_GPL(rtrs_init_hb);
-void rtrs_start_hb(struct rtrs_sess *sess)
+void rtrs_start_hb(struct rtrs_path *path)
{
- schedule_hb(sess);
+ schedule_hb(path);
}
EXPORT_SYMBOL_GPL(rtrs_start_hb);
-void rtrs_stop_hb(struct rtrs_sess *sess)
+void rtrs_stop_hb(struct rtrs_path *path)
{
- cancel_delayed_work_sync(&sess->hb_dwork);
- sess->hb_missed_cnt = 0;
+ cancel_delayed_work_sync(&path->hb_dwork);
+ path->hb_missed_cnt = 0;
}
EXPORT_SYMBOL_GPL(rtrs_stop_hb);
diff --git a/drivers/infiniband/ulp/rtrs/rtrs.h b/drivers/infiniband/ulp/rtrs/rtrs.h
index 859c79685daf..5e57a7ccc7fb 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs.h
+++ b/drivers/infiniband/ulp/rtrs/rtrs.h
@@ -13,9 +13,9 @@
#include <linux/scatterlist.h>
struct rtrs_permit;
-struct rtrs_clt;
+struct rtrs_clt_sess;
struct rtrs_srv_ctx;
-struct rtrs_srv;
+struct rtrs_srv_sess;
struct rtrs_srv_op;
/*
@@ -52,14 +52,14 @@ struct rtrs_clt_ops {
void (*link_ev)(void *priv, enum rtrs_clt_link_ev ev);
};
-struct rtrs_clt *rtrs_clt_open(struct rtrs_clt_ops *ops,
- const char *sessname,
+struct rtrs_clt_sess *rtrs_clt_open(struct rtrs_clt_ops *ops,
+ const char *pathname,
const struct rtrs_addr *paths,
size_t path_cnt, u16 port,
size_t pdu_sz, u8 reconnect_delay_sec,
s16 max_reconnect_attempts, u32 nr_poll_queues);
-void rtrs_clt_close(struct rtrs_clt *sess);
+void rtrs_clt_close(struct rtrs_clt_sess *clt);
enum wait_type {
RTRS_PERMIT_NOWAIT = 0,
@@ -77,11 +77,12 @@ enum rtrs_clt_con_type {
RTRS_IO_CON
};
-struct rtrs_permit *rtrs_clt_get_permit(struct rtrs_clt *sess,
- enum rtrs_clt_con_type con_type,
- enum wait_type wait);
+struct rtrs_permit *rtrs_clt_get_permit(struct rtrs_clt_sess *sess,
+ enum rtrs_clt_con_type con_type,
+ enum wait_type wait);
-void rtrs_clt_put_permit(struct rtrs_clt *sess, struct rtrs_permit *permit);
+void rtrs_clt_put_permit(struct rtrs_clt_sess *sess,
+ struct rtrs_permit *permit);
/**
* rtrs_clt_req_ops - it holds the request confirmation callback
@@ -98,10 +99,10 @@ struct rtrs_clt_req_ops {
};
int rtrs_clt_request(int dir, struct rtrs_clt_req_ops *ops,
- struct rtrs_clt *sess, struct rtrs_permit *permit,
+ struct rtrs_clt_sess *sess, struct rtrs_permit *permit,
const struct kvec *vec, size_t nr, size_t len,
struct scatterlist *sg, unsigned int sg_cnt);
-int rtrs_clt_rdma_cq_direct(struct rtrs_clt *clt, unsigned int index);
+int rtrs_clt_rdma_cq_direct(struct rtrs_clt_sess *clt, unsigned int index);
/**
* rtrs_attrs - RTRS session attributes
@@ -112,7 +113,7 @@ struct rtrs_attrs {
u32 max_segments;
};
-int rtrs_clt_query(struct rtrs_clt *sess, struct rtrs_attrs *attr);
+int rtrs_clt_query(struct rtrs_clt_sess *sess, struct rtrs_attrs *attr);
/*
* Here goes RTRS server API
@@ -163,7 +164,7 @@ struct rtrs_srv_ops {
* @priv: Private data from user if previously set with
* rtrs_srv_set_sess_priv()
*/
- int (*link_ev)(struct rtrs_srv *sess, enum rtrs_srv_link_ev ev,
+ int (*link_ev)(struct rtrs_srv_sess *sess, enum rtrs_srv_link_ev ev,
void *priv);
};
@@ -173,11 +174,12 @@ void rtrs_srv_close(struct rtrs_srv_ctx *ctx);
bool rtrs_srv_resp_rdma(struct rtrs_srv_op *id, int errno);
-void rtrs_srv_set_sess_priv(struct rtrs_srv *sess, void *priv);
+void rtrs_srv_set_sess_priv(struct rtrs_srv_sess *sess, void *priv);
-int rtrs_srv_get_sess_name(struct rtrs_srv *sess, char *sessname, size_t len);
+int rtrs_srv_get_path_name(struct rtrs_srv_sess *sess, char *pathname,
+ size_t len);
-int rtrs_srv_get_queue_depth(struct rtrs_srv *sess);
+int rtrs_srv_get_queue_depth(struct rtrs_srv_sess *sess);
int rtrs_addr_to_sockaddr(const char *str, size_t len, u16 port,
struct rtrs_addr *addr);
diff --git a/drivers/input/ff-core.c b/drivers/input/ff-core.c
index 1cf5deda06e1..fa8d1a466014 100644
--- a/drivers/input/ff-core.c
+++ b/drivers/input/ff-core.c
@@ -67,7 +67,7 @@ static int compat_effect(struct ff_device *ff, struct ff_effect *effect)
effect->type = FF_PERIODIC;
effect->u.periodic.waveform = FF_SINE;
effect->u.periodic.period = 50;
- effect->u.periodic.magnitude = max(magnitude, 0x7fff);
+ effect->u.periodic.magnitude = magnitude;
effect->u.periodic.offset = 0;
effect->u.periodic.phase = 0;
effect->u.periodic.envelope.attack_length = 0;
diff --git a/drivers/input/input.c b/drivers/input/input.c
index ccaeb2426385..c3139bc2aa0d 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -2285,6 +2285,12 @@ int input_register_device(struct input_dev *dev)
/* KEY_RESERVED is not supposed to be transmitted to userspace. */
__clear_bit(KEY_RESERVED, dev->keybit);
+ /* Buttonpads should not map BTN_RIGHT and/or BTN_MIDDLE. */
+ if (test_bit(INPUT_PROP_BUTTONPAD, dev->propbit)) {
+ __clear_bit(BTN_RIGHT, dev->keybit);
+ __clear_bit(BTN_MIDDLE, dev->keybit);
+ }
+
/* Make sure that bitmasks not mentioned in dev->evbit are clean. */
input_cleanse_bitmasks(dev);
diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c
index 8dbf1e69c90a..d75a8b179a8a 100644
--- a/drivers/input/keyboard/gpio_keys.c
+++ b/drivers/input/keyboard/gpio_keys.c
@@ -247,7 +247,7 @@ static ssize_t gpio_keys_attr_store_helper(struct gpio_keys_drvdata *ddata,
ssize_t error;
int i;
- bits = bitmap_zalloc(n_events, GFP_KERNEL);
+ bits = bitmap_alloc(n_events, GFP_KERNEL);
if (!bits)
return -ENOMEM;
diff --git a/drivers/input/misc/axp20x-pek.c b/drivers/input/misc/axp20x-pek.c
index e09b1fae42e1..04da7916eb70 100644
--- a/drivers/input/misc/axp20x-pek.c
+++ b/drivers/input/misc/axp20x-pek.c
@@ -206,11 +206,8 @@ ATTRIBUTE_GROUPS(axp20x);
static irqreturn_t axp20x_pek_irq(int irq, void *pwr)
{
- struct axp20x_pek *axp20x_pek = pwr;
- struct input_dev *idev = axp20x_pek->input;
-
- if (!idev)
- return IRQ_HANDLED;
+ struct input_dev *idev = pwr;
+ struct axp20x_pek *axp20x_pek = input_get_drvdata(idev);
/*
* The power-button is connected to ground so a falling edge (dbf)
@@ -229,9 +226,22 @@ static irqreturn_t axp20x_pek_irq(int irq, void *pwr)
static int axp20x_pek_probe_input_device(struct axp20x_pek *axp20x_pek,
struct platform_device *pdev)
{
+ struct axp20x_dev *axp20x = axp20x_pek->axp20x;
struct input_dev *idev;
int error;
+ axp20x_pek->irq_dbr = platform_get_irq_byname(pdev, "PEK_DBR");
+ if (axp20x_pek->irq_dbr < 0)
+ return axp20x_pek->irq_dbr;
+ axp20x_pek->irq_dbr = regmap_irq_get_virq(axp20x->regmap_irqc,
+ axp20x_pek->irq_dbr);
+
+ axp20x_pek->irq_dbf = platform_get_irq_byname(pdev, "PEK_DBF");
+ if (axp20x_pek->irq_dbf < 0)
+ return axp20x_pek->irq_dbf;
+ axp20x_pek->irq_dbf = regmap_irq_get_virq(axp20x->regmap_irqc,
+ axp20x_pek->irq_dbf);
+
axp20x_pek->input = devm_input_allocate_device(&pdev->dev);
if (!axp20x_pek->input)
return -ENOMEM;
@@ -246,6 +256,24 @@ static int axp20x_pek_probe_input_device(struct axp20x_pek *axp20x_pek,
input_set_drvdata(idev, axp20x_pek);
+ error = devm_request_any_context_irq(&pdev->dev, axp20x_pek->irq_dbr,
+ axp20x_pek_irq, 0,
+ "axp20x-pek-dbr", idev);
+ if (error < 0) {
+ dev_err(&pdev->dev, "Failed to request dbr IRQ#%d: %d\n",
+ axp20x_pek->irq_dbr, error);
+ return error;
+ }
+
+ error = devm_request_any_context_irq(&pdev->dev, axp20x_pek->irq_dbf,
+ axp20x_pek_irq, 0,
+ "axp20x-pek-dbf", idev);
+ if (error < 0) {
+ dev_err(&pdev->dev, "Failed to request dbf IRQ#%d: %d\n",
+ axp20x_pek->irq_dbf, error);
+ return error;
+ }
+
error = input_register_device(idev);
if (error) {
dev_err(&pdev->dev, "Can't register input device: %d\n",
@@ -253,6 +281,8 @@ static int axp20x_pek_probe_input_device(struct axp20x_pek *axp20x_pek,
return error;
}
+ device_init_wakeup(&pdev->dev, true);
+
return 0;
}
@@ -293,18 +323,6 @@ static int axp20x_pek_probe(struct platform_device *pdev)
axp20x_pek->axp20x = dev_get_drvdata(pdev->dev.parent);
- axp20x_pek->irq_dbr = platform_get_irq_byname(pdev, "PEK_DBR");
- if (axp20x_pek->irq_dbr < 0)
- return axp20x_pek->irq_dbr;
- axp20x_pek->irq_dbr = regmap_irq_get_virq(
- axp20x_pek->axp20x->regmap_irqc, axp20x_pek->irq_dbr);
-
- axp20x_pek->irq_dbf = platform_get_irq_byname(pdev, "PEK_DBF");
- if (axp20x_pek->irq_dbf < 0)
- return axp20x_pek->irq_dbf;
- axp20x_pek->irq_dbf = regmap_irq_get_virq(
- axp20x_pek->axp20x->regmap_irqc, axp20x_pek->irq_dbf);
-
if (axp20x_pek_should_register_input(axp20x_pek)) {
error = axp20x_pek_probe_input_device(axp20x_pek, pdev);
if (error)
@@ -313,26 +331,6 @@ static int axp20x_pek_probe(struct platform_device *pdev)
axp20x_pek->info = (struct axp20x_info *)match->driver_data;
- error = devm_request_any_context_irq(&pdev->dev, axp20x_pek->irq_dbr,
- axp20x_pek_irq, 0,
- "axp20x-pek-dbr", axp20x_pek);
- if (error < 0) {
- dev_err(&pdev->dev, "Failed to request dbr IRQ#%d: %d\n",
- axp20x_pek->irq_dbr, error);
- return error;
- }
-
- error = devm_request_any_context_irq(&pdev->dev, axp20x_pek->irq_dbf,
- axp20x_pek_irq, 0,
- "axp20x-pek-dbf", axp20x_pek);
- if (error < 0) {
- dev_err(&pdev->dev, "Failed to request dbf IRQ#%d: %d\n",
- axp20x_pek->irq_dbf, error);
- return error;
- }
-
- device_init_wakeup(&pdev->dev, true);
-
platform_set_drvdata(pdev, axp20x_pek);
return 0;
diff --git a/drivers/input/misc/palmas-pwrbutton.c b/drivers/input/misc/palmas-pwrbutton.c
index f9b05cf09ff5..2213e06b611d 100644
--- a/drivers/input/misc/palmas-pwrbutton.c
+++ b/drivers/input/misc/palmas-pwrbutton.c
@@ -15,6 +15,7 @@
* GNU General Public License for more details.
*/
+#include <linux/bitfield.h>
#include <linux/init.h>
#include <linux/input.h>
#include <linux/interrupt.h>
@@ -115,8 +116,8 @@ static void palmas_pwron_params_ofinit(struct device *dev,
struct device_node *np;
u32 val;
int i, error;
- u8 lpk_times[] = { 6, 8, 10, 12 };
- int pwr_on_deb_ms[] = { 15, 100, 500, 1000 };
+ static const u8 lpk_times[] = { 6, 8, 10, 12 };
+ static const int pwr_on_deb_ms[] = { 15, 100, 500, 1000 };
memset(config, 0, sizeof(*config));
@@ -192,8 +193,8 @@ static int palmas_pwron_probe(struct platform_device *pdev)
* Setup default hardware shutdown option (long key press)
* and debounce.
*/
- val = config.long_press_time_val << __ffs(PALMAS_LPK_TIME_MASK);
- val |= config.pwron_debounce_val << __ffs(PALMAS_PWRON_DEBOUNCE_MASK);
+ val = FIELD_PREP(PALMAS_LPK_TIME_MASK, config.long_press_time_val) |
+ FIELD_PREP(PALMAS_PWRON_DEBOUNCE_MASK, config.pwron_debounce_val);
error = palmas_update_bits(palmas, PALMAS_PMU_CONTROL_BASE,
PALMAS_LONG_PRESS_KEY,
PALMAS_LPK_TIME_MASK |
diff --git a/drivers/input/mouse/byd.c b/drivers/input/mouse/byd.c
index 6e0c5f5a2713..221a553f45cd 100644
--- a/drivers/input/mouse/byd.c
+++ b/drivers/input/mouse/byd.c
@@ -191,7 +191,7 @@
/*
* The touchpad generates a mixture of absolute and relative packets, indicated
- * by the the last byte of each packet being set to one of the following:
+ * by the last byte of each packet being set to one of the following:
*/
#define BYD_PACKET_ABSOLUTE 0xf8
#define BYD_PACKET_RELATIVE 0x00
diff --git a/drivers/input/mouse/psmouse-smbus.c b/drivers/input/mouse/psmouse-smbus.c
index a472489ccbad..164f6c757f6b 100644
--- a/drivers/input/mouse/psmouse-smbus.c
+++ b/drivers/input/mouse/psmouse-smbus.c
@@ -75,6 +75,8 @@ static void psmouse_smbus_detach_i2c_client(struct i2c_client *client)
"Marking SMBus companion %s as gone\n",
dev_name(&smbdev->client->dev));
smbdev->dead = true;
+ device_link_remove(&smbdev->client->dev,
+ &smbdev->psmouse->ps2dev.serio->dev);
serio_rescan(smbdev->psmouse->ps2dev.serio);
} else {
list_del(&smbdev->node);
@@ -174,6 +176,8 @@ static void psmouse_smbus_disconnect(struct psmouse *psmouse)
kfree(smbdev);
} else {
smbdev->dead = true;
+ device_link_remove(&smbdev->client->dev,
+ &psmouse->ps2dev.serio->dev);
psmouse_dbg(smbdev->psmouse,
"posting removal request for SMBus companion %s\n",
dev_name(&smbdev->client->dev));
@@ -270,6 +274,12 @@ int psmouse_smbus_init(struct psmouse *psmouse,
if (smbdev->client) {
/* We have our companion device */
+ if (!device_link_add(&smbdev->client->dev,
+ &psmouse->ps2dev.serio->dev,
+ DL_FLAG_STATELESS))
+ psmouse_warn(psmouse,
+ "failed to set up link with iSMBus companion %s\n",
+ dev_name(&smbdev->client->dev));
return 0;
}
diff --git a/drivers/input/serio/serport.c b/drivers/input/serio/serport.c
index 17eb8f2aa48d..669a728095b8 100644
--- a/drivers/input/serio/serport.c
+++ b/drivers/input/serio/serport.c
@@ -207,8 +207,8 @@ static void serport_set_type(struct tty_struct *tty, unsigned long type)
* serport_ldisc_ioctl() allows to set the port protocol, and device ID
*/
-static int serport_ldisc_ioctl(struct tty_struct *tty, struct file *file,
- unsigned int cmd, unsigned long arg)
+static int serport_ldisc_ioctl(struct tty_struct *tty, unsigned int cmd,
+ unsigned long arg)
{
if (cmd == SPIOCSTYPE) {
unsigned long type;
@@ -226,7 +226,6 @@ static int serport_ldisc_ioctl(struct tty_struct *tty, struct file *file,
#ifdef CONFIG_COMPAT
#define COMPAT_SPIOCSTYPE _IOW('q', 0x01, compat_ulong_t)
static int serport_ldisc_compat_ioctl(struct tty_struct *tty,
- struct file *file,
unsigned int cmd, unsigned long arg)
{
if (cmd == COMPAT_SPIOCSTYPE) {
diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c
index aaa3c455e01e..a3bfc7a41679 100644
--- a/drivers/input/touchscreen/goodix.c
+++ b/drivers/input/touchscreen/goodix.c
@@ -297,6 +297,108 @@ static int goodix_ts_read_input_report(struct goodix_ts_data *ts, u8 *data)
return -ENOMSG;
}
+static struct input_dev *goodix_create_pen_input(struct goodix_ts_data *ts)
+{
+ struct device *dev = &ts->client->dev;
+ struct input_dev *input;
+
+ input = devm_input_allocate_device(dev);
+ if (!input)
+ return NULL;
+
+ input_alloc_absinfo(input);
+ if (!input->absinfo) {
+ input_free_device(input);
+ return NULL;
+ }
+
+ input->absinfo[ABS_X] = ts->input_dev->absinfo[ABS_MT_POSITION_X];
+ input->absinfo[ABS_Y] = ts->input_dev->absinfo[ABS_MT_POSITION_Y];
+ __set_bit(ABS_X, input->absbit);
+ __set_bit(ABS_Y, input->absbit);
+ input_set_abs_params(input, ABS_PRESSURE, 0, 255, 0, 0);
+
+ input_set_capability(input, EV_KEY, BTN_TOUCH);
+ input_set_capability(input, EV_KEY, BTN_TOOL_PEN);
+ input_set_capability(input, EV_KEY, BTN_STYLUS);
+ input_set_capability(input, EV_KEY, BTN_STYLUS2);
+ __set_bit(INPUT_PROP_DIRECT, input->propbit);
+ /*
+ * The resolution of these touchscreens is about 10 units/mm, the actual
+ * resolution does not matter much since we set INPUT_PROP_DIRECT.
+ * Userspace wants something here though, so just set it to 10 units/mm.
+ */
+ input_abs_set_res(input, ABS_X, 10);
+ input_abs_set_res(input, ABS_Y, 10);
+
+ input->name = "Goodix Active Pen";
+ input->phys = "input/pen";
+ input->id.bustype = BUS_I2C;
+ input->id.vendor = 0x0416;
+ if (kstrtou16(ts->id, 10, &input->id.product))
+ input->id.product = 0x1001;
+ input->id.version = ts->version;
+
+ if (input_register_device(input) != 0) {
+ input_free_device(input);
+ return NULL;
+ }
+
+ return input;
+}
+
+static void goodix_ts_report_pen_down(struct goodix_ts_data *ts, u8 *data)
+{
+ int input_x, input_y, input_w;
+ u8 key_value;
+
+ if (!ts->input_pen) {
+ ts->input_pen = goodix_create_pen_input(ts);
+ if (!ts->input_pen)
+ return;
+ }
+
+ if (ts->contact_size == 9) {
+ input_x = get_unaligned_le16(&data[4]);
+ input_y = get_unaligned_le16(&data[6]);
+ input_w = get_unaligned_le16(&data[8]);
+ } else {
+ input_x = get_unaligned_le16(&data[2]);
+ input_y = get_unaligned_le16(&data[4]);
+ input_w = get_unaligned_le16(&data[6]);
+ }
+
+ touchscreen_report_pos(ts->input_pen, &ts->prop, input_x, input_y, false);
+ input_report_abs(ts->input_pen, ABS_PRESSURE, input_w);
+
+ input_report_key(ts->input_pen, BTN_TOUCH, 1);
+ input_report_key(ts->input_pen, BTN_TOOL_PEN, 1);
+
+ if (data[0] & GOODIX_HAVE_KEY) {
+ key_value = data[1 + ts->contact_size];
+ input_report_key(ts->input_pen, BTN_STYLUS, key_value & 0x10);
+ input_report_key(ts->input_pen, BTN_STYLUS2, key_value & 0x20);
+ } else {
+ input_report_key(ts->input_pen, BTN_STYLUS, 0);
+ input_report_key(ts->input_pen, BTN_STYLUS2, 0);
+ }
+
+ input_sync(ts->input_pen);
+}
+
+static void goodix_ts_report_pen_up(struct goodix_ts_data *ts)
+{
+ if (!ts->input_pen)
+ return;
+
+ input_report_key(ts->input_pen, BTN_TOUCH, 0);
+ input_report_key(ts->input_pen, BTN_TOOL_PEN, 0);
+ input_report_key(ts->input_pen, BTN_STYLUS, 0);
+ input_report_key(ts->input_pen, BTN_STYLUS2, 0);
+
+ input_sync(ts->input_pen);
+}
+
static void goodix_ts_report_touch_8b(struct goodix_ts_data *ts, u8 *coor_data)
{
int id = coor_data[0] & 0x0F;
@@ -327,6 +429,14 @@ static void goodix_ts_report_touch_9b(struct goodix_ts_data *ts, u8 *coor_data)
input_report_abs(ts->input_dev, ABS_MT_WIDTH_MAJOR, input_w);
}
+static void goodix_ts_release_keys(struct goodix_ts_data *ts)
+{
+ int i;
+
+ for (i = 0; i < GOODIX_MAX_KEYS; i++)
+ input_report_key(ts->input_dev, ts->keymap[i], 0);
+}
+
static void goodix_ts_report_key(struct goodix_ts_data *ts, u8 *data)
{
int touch_num;
@@ -341,8 +451,7 @@ static void goodix_ts_report_key(struct goodix_ts_data *ts, u8 *data)
input_report_key(ts->input_dev,
ts->keymap[i], 1);
} else {
- for (i = 0; i < GOODIX_MAX_KEYS; i++)
- input_report_key(ts->input_dev, ts->keymap[i], 0);
+ goodix_ts_release_keys(ts);
}
}
@@ -364,6 +473,15 @@ static void goodix_process_events(struct goodix_ts_data *ts)
if (touch_num < 0)
return;
+ /* The pen being down is always reported as a single touch */
+ if (touch_num == 1 && (point_data[1] & 0x80)) {
+ goodix_ts_report_pen_down(ts, point_data);
+ goodix_ts_release_keys(ts);
+ goto sync; /* Release any previously registered touches */
+ } else {
+ goodix_ts_report_pen_up(ts);
+ }
+
goodix_ts_report_key(ts, point_data);
for (i = 0; i < touch_num; i++)
@@ -374,6 +492,7 @@ static void goodix_process_events(struct goodix_ts_data *ts)
goodix_ts_report_touch_8b(ts,
&point_data[1 + ts->contact_size * i]);
+sync:
input_mt_sync_frame(ts->input_dev);
input_sync(ts->input_dev);
}
@@ -857,7 +976,7 @@ retry_get_irq_gpio:
if (IS_ERR(gpiod)) {
error = PTR_ERR(gpiod);
if (error != -EPROBE_DEFER)
- dev_dbg(dev, "Failed to get %s GPIO: %d\n",
+ dev_err(dev, "Failed to get %s GPIO: %d\n",
GOODIX_GPIO_INT_NAME, error);
return error;
}
@@ -874,7 +993,7 @@ retry_get_irq_gpio:
if (IS_ERR(gpiod)) {
error = PTR_ERR(gpiod);
if (error != -EPROBE_DEFER)
- dev_dbg(dev, "Failed to get %s GPIO: %d\n",
+ dev_err(dev, "Failed to get %s GPIO: %d\n",
GOODIX_GPIO_RST_NAME, error);
return error;
}
diff --git a/drivers/input/touchscreen/goodix.h b/drivers/input/touchscreen/goodix.h
index 02065d1c3263..fa8602e78a64 100644
--- a/drivers/input/touchscreen/goodix.h
+++ b/drivers/input/touchscreen/goodix.h
@@ -76,6 +76,7 @@ struct goodix_chip_data {
struct goodix_ts_data {
struct i2c_client *client;
struct input_dev *input_dev;
+ struct input_dev *input_pen;
const struct goodix_chip_data *chip;
const char *firmware_name;
struct touchscreen_properties prop;
diff --git a/drivers/input/touchscreen/silead.c b/drivers/input/touchscreen/silead.c
index 1ee760bac0cf..3eef8c01090f 100644
--- a/drivers/input/touchscreen/silead.c
+++ b/drivers/input/touchscreen/silead.c
@@ -67,6 +67,7 @@ struct silead_ts_data {
struct i2c_client *client;
struct gpio_desc *gpio_power;
struct input_dev *input;
+ struct input_dev *pen_input;
struct regulator_bulk_data regulators[2];
char fw_name[64];
struct touchscreen_properties prop;
@@ -75,6 +76,13 @@ struct silead_ts_data {
struct input_mt_pos pos[SILEAD_MAX_FINGERS];
int slots[SILEAD_MAX_FINGERS];
int id[SILEAD_MAX_FINGERS];
+ u32 efi_fw_min_max[4];
+ bool efi_fw_min_max_set;
+ bool pen_supported;
+ bool pen_down;
+ u32 pen_x_res;
+ u32 pen_y_res;
+ int pen_up_count;
};
struct silead_fw_data {
@@ -82,6 +90,35 @@ struct silead_fw_data {
u32 val;
};
+static void silead_apply_efi_fw_min_max(struct silead_ts_data *data)
+{
+ struct input_absinfo *absinfo_x = &data->input->absinfo[ABS_MT_POSITION_X];
+ struct input_absinfo *absinfo_y = &data->input->absinfo[ABS_MT_POSITION_Y];
+
+ if (!data->efi_fw_min_max_set)
+ return;
+
+ absinfo_x->minimum = data->efi_fw_min_max[0];
+ absinfo_x->maximum = data->efi_fw_min_max[1];
+ absinfo_y->minimum = data->efi_fw_min_max[2];
+ absinfo_y->maximum = data->efi_fw_min_max[3];
+
+ if (data->prop.invert_x) {
+ absinfo_x->maximum -= absinfo_x->minimum;
+ absinfo_x->minimum = 0;
+ }
+
+ if (data->prop.invert_y) {
+ absinfo_y->maximum -= absinfo_y->minimum;
+ absinfo_y->minimum = 0;
+ }
+
+ if (data->prop.swap_x_y) {
+ swap(absinfo_x->minimum, absinfo_y->minimum);
+ swap(absinfo_x->maximum, absinfo_y->maximum);
+ }
+}
+
static int silead_ts_request_input_dev(struct silead_ts_data *data)
{
struct device *dev = &data->client->dev;
@@ -97,6 +134,7 @@ static int silead_ts_request_input_dev(struct silead_ts_data *data)
input_set_abs_params(data->input, ABS_MT_POSITION_X, 0, 4095, 0, 0);
input_set_abs_params(data->input, ABS_MT_POSITION_Y, 0, 4095, 0, 0);
touchscreen_parse_properties(data->input, true, &data->prop);
+ silead_apply_efi_fw_min_max(data);
input_mt_init_slots(data->input, data->max_fingers,
INPUT_MT_DIRECT | INPUT_MT_DROP_UNUSED |
@@ -112,6 +150,40 @@ static int silead_ts_request_input_dev(struct silead_ts_data *data)
error = input_register_device(data->input);
if (error) {
dev_err(dev, "Failed to register input device: %d\n", error);
+ return error;
+ }
+
+ return 0;
+}
+
+static int silead_ts_request_pen_input_dev(struct silead_ts_data *data)
+{
+ struct device *dev = &data->client->dev;
+ int error;
+
+ if (!data->pen_supported)
+ return 0;
+
+ data->pen_input = devm_input_allocate_device(dev);
+ if (!data->pen_input)
+ return -ENOMEM;
+
+ input_set_abs_params(data->pen_input, ABS_X, 0, 4095, 0, 0);
+ input_set_abs_params(data->pen_input, ABS_Y, 0, 4095, 0, 0);
+ input_set_capability(data->pen_input, EV_KEY, BTN_TOUCH);
+ input_set_capability(data->pen_input, EV_KEY, BTN_TOOL_PEN);
+ set_bit(INPUT_PROP_DIRECT, data->pen_input->propbit);
+ touchscreen_parse_properties(data->pen_input, false, &data->prop);
+ input_abs_set_res(data->pen_input, ABS_X, data->pen_x_res);
+ input_abs_set_res(data->pen_input, ABS_Y, data->pen_y_res);
+
+ data->pen_input->name = SILEAD_TS_NAME " pen";
+ data->pen_input->phys = "input/pen";
+ data->input->id.bustype = BUS_I2C;
+
+ error = input_register_device(data->pen_input);
+ if (error) {
+ dev_err(dev, "Failed to register pen input device: %d\n", error);
return error;
}
@@ -129,6 +201,45 @@ static void silead_ts_set_power(struct i2c_client *client,
}
}
+static bool silead_ts_handle_pen_data(struct silead_ts_data *data, u8 *buf)
+{
+ u8 *coord = buf + SILEAD_POINT_DATA_LEN;
+ struct input_mt_pos pos;
+
+ if (!data->pen_supported || buf[2] != 0x00 || buf[3] != 0x00)
+ return false;
+
+ if (buf[0] == 0x00 && buf[1] == 0x00 && data->pen_down) {
+ data->pen_up_count++;
+ if (data->pen_up_count == 6) {
+ data->pen_down = false;
+ goto sync;
+ }
+ return true;
+ }
+
+ if (buf[0] == 0x01 && buf[1] == 0x08) {
+ touchscreen_set_mt_pos(&pos, &data->prop,
+ get_unaligned_le16(&coord[SILEAD_POINT_X_OFF]) & 0xfff,
+ get_unaligned_le16(&coord[SILEAD_POINT_Y_OFF]) & 0xfff);
+
+ input_report_abs(data->pen_input, ABS_X, pos.x);
+ input_report_abs(data->pen_input, ABS_Y, pos.y);
+
+ data->pen_up_count = 0;
+ data->pen_down = true;
+ goto sync;
+ }
+
+ return false;
+
+sync:
+ input_report_key(data->pen_input, BTN_TOOL_PEN, data->pen_down);
+ input_report_key(data->pen_input, BTN_TOUCH, data->pen_down);
+ input_sync(data->pen_input);
+ return true;
+}
+
static void silead_ts_read_data(struct i2c_client *client)
{
struct silead_ts_data *data = i2c_get_clientdata(client);
@@ -151,6 +262,9 @@ static void silead_ts_read_data(struct i2c_client *client)
buf[0] = data->max_fingers;
}
+ if (silead_ts_handle_pen_data(data, buf))
+ goto sync; /* Pen is down, release all previous touches */
+
touch_nr = 0;
bufp = buf + SILEAD_POINT_DATA_LEN;
for (i = 0; i < buf[0]; i++, bufp += SILEAD_POINT_DATA_LEN) {
@@ -193,6 +307,7 @@ static void silead_ts_read_data(struct i2c_client *client)
data->pos[i].y, data->id[i], data->slots[i]);
}
+sync:
input_mt_sync_frame(input);
input_report_key(input, KEY_LEFTMETA, softbutton_pressed);
input_sync(input);
@@ -282,17 +397,56 @@ static int silead_ts_load_fw(struct i2c_client *client)
{
struct device *dev = &client->dev;
struct silead_ts_data *data = i2c_get_clientdata(client);
- unsigned int fw_size, i;
- const struct firmware *fw;
+ const struct firmware *fw = NULL;
struct silead_fw_data *fw_data;
+ unsigned int fw_size, i;
int error;
dev_dbg(dev, "Firmware file name: %s", data->fw_name);
- error = firmware_request_platform(&fw, data->fw_name, dev);
+ /*
+ * Unfortunately, at the time of writing this comment, we have been unable to
+ * get permission from Silead, or from device OEMs, to distribute the necessary
+ * Silead firmware files in linux-firmware.
+ *
+ * On a whole bunch of devices the UEFI BIOS code contains a touchscreen driver,
+ * which contains an embedded copy of the firmware. The fw-loader code has a
+ * "platform" fallback mechanism, which together with info on the firmware
+ * from drivers/platform/x86/touchscreen_dmi.c will use the firmware from the
+ * UEFI driver when the firmware is missing from /lib/firmware. This makes the
+ * touchscreen work OOTB without users needing to manually download the firmware.
+ *
+ * The firmware bundled with the original Windows/Android is usually newer then
+ * the firmware in the UEFI driver and it is better calibrated. This better
+ * calibration can lead to significant differences in the reported min/max
+ * coordinates.
+ *
+ * To deal with this we first try to load the firmware without "platform"
+ * fallback. If that fails we retry with "platform" fallback and if that
+ * succeeds we apply an (optional) set of alternative min/max values from the
+ * "silead,efi-fw-min-max" property.
+ */
+ error = firmware_request_nowarn(&fw, data->fw_name, dev);
if (error) {
- dev_err(dev, "Firmware request error %d\n", error);
- return error;
+ error = firmware_request_platform(&fw, data->fw_name, dev);
+ if (error) {
+ dev_err(dev, "Firmware request error %d\n", error);
+ return error;
+ }
+
+ error = device_property_read_u32_array(dev, "silead,efi-fw-min-max",
+ data->efi_fw_min_max,
+ ARRAY_SIZE(data->efi_fw_min_max));
+ if (!error)
+ data->efi_fw_min_max_set = true;
+
+ /* The EFI (platform) embedded fw does not have pen support */
+ if (data->pen_supported) {
+ dev_warn(dev, "Warning loading '%s' from filesystem failed, using EFI embedded copy.\n",
+ data->fw_name);
+ dev_warn(dev, "Warning pen support is known to be broken in the EFI embedded fw version\n");
+ data->pen_supported = false;
+ }
}
fw_size = fw->size / sizeof(*fw_data);
@@ -450,6 +604,10 @@ static void silead_ts_read_props(struct i2c_client *client)
"silead/%s", str);
else
dev_dbg(dev, "Firmware file name read error. Using default.");
+
+ data->pen_supported = device_property_read_bool(dev, "silead,pen-supported");
+ device_property_read_u32(dev, "silead,pen-resolution-x", &data->pen_x_res);
+ device_property_read_u32(dev, "silead,pen-resolution-y", &data->pen_y_res);
}
#ifdef CONFIG_ACPI
@@ -562,6 +720,10 @@ static int silead_ts_probe(struct i2c_client *client,
if (error)
return error;
+ error = silead_ts_request_pen_input_dev(data);
+ if (error)
+ return error;
+
error = devm_request_threaded_irq(dev, client->irq,
NULL, silead_ts_threaded_irq_handler,
IRQF_ONESHOT, client->name, data);
diff --git a/drivers/input/touchscreen/ti_am335x_tsc.c b/drivers/input/touchscreen/ti_am335x_tsc.c
index 83e685557a19..f2fb6a9a1a57 100644
--- a/drivers/input/touchscreen/ti_am335x_tsc.c
+++ b/drivers/input/touchscreen/ti_am335x_tsc.c
@@ -126,12 +126,13 @@ static int titsc_config_wires(struct titsc *ts_dev)
static void titsc_step_config(struct titsc *ts_dev)
{
unsigned int config;
- int i;
+ int i, n;
int end_step, first_step, tsc_steps;
u32 stepenable;
config = STEPCONFIG_MODE_HWSYNC |
- STEPCONFIG_AVG_16 | ts_dev->bit_xp;
+ STEPCONFIG_AVG_16 | ts_dev->bit_xp |
+ STEPCONFIG_INM_ADCREFM;
switch (ts_dev->wires) {
case 4:
config |= STEPCONFIG_INP(ts_dev->inp_yp) | ts_dev->bit_xn;
@@ -150,9 +151,11 @@ static void titsc_step_config(struct titsc *ts_dev)
first_step = TOTAL_STEPS - tsc_steps;
/* Steps 16 to 16-coordinate_readouts is for X */
end_step = first_step + tsc_steps;
+ n = 0;
for (i = end_step - ts_dev->coordinate_readouts; i < end_step; i++) {
titsc_writel(ts_dev, REG_STEPCONFIG(i), config);
- titsc_writel(ts_dev, REG_STEPDELAY(i), STEPCONFIG_OPENDLY);
+ titsc_writel(ts_dev, REG_STEPDELAY(i),
+ n++ == 0 ? STEPCONFIG_OPENDLY : 0);
}
config = 0;
@@ -174,9 +177,11 @@ static void titsc_step_config(struct titsc *ts_dev)
/* 1 ... coordinate_readouts is for Y */
end_step = first_step + ts_dev->coordinate_readouts;
+ n = 0;
for (i = first_step; i < end_step; i++) {
titsc_writel(ts_dev, REG_STEPCONFIG(i), config);
- titsc_writel(ts_dev, REG_STEPDELAY(i), STEPCONFIG_OPENDLY);
+ titsc_writel(ts_dev, REG_STEPDELAY(i),
+ n++ == 0 ? STEPCONFIG_OPENDLY : 0);
}
/* Make CHARGECONFIG same as IDLECONFIG */
@@ -195,7 +200,10 @@ static void titsc_step_config(struct titsc *ts_dev)
STEPCONFIG_OPENDLY);
end_step++;
- config |= STEPCONFIG_INP(ts_dev->inp_yn);
+ config = STEPCONFIG_MODE_HWSYNC |
+ STEPCONFIG_AVG_16 | ts_dev->bit_yp |
+ ts_dev->bit_xn | STEPCONFIG_INM_ADCREFM |
+ STEPCONFIG_INP(ts_dev->inp_yn);
titsc_writel(ts_dev, REG_STEPCONFIG(end_step), config);
titsc_writel(ts_dev, REG_STEPDELAY(end_step),
STEPCONFIG_OPENDLY);
@@ -310,7 +318,7 @@ static irqreturn_t titsc_irq(int irq, void *dev)
/*
* Calculate pressure using formula
* Resistance(touch) = x plate resistance *
- * x postion/4096 * ((z2 / z1) - 1)
+ * x position/4096 * ((z2 / z1) - 1)
*/
z = z1 - z2;
z *= x;
diff --git a/drivers/input/touchscreen/ucb1400_ts.c b/drivers/input/touchscreen/ucb1400_ts.c
index e3f2c940ef3d..dfd3b35590c3 100644
--- a/drivers/input/touchscreen/ucb1400_ts.c
+++ b/drivers/input/touchscreen/ucb1400_ts.c
@@ -186,7 +186,6 @@ static irqreturn_t ucb1400_irq(int irqnr, void *devid)
{
struct ucb1400_ts *ucb = devid;
unsigned int x, y, p;
- bool penup;
if (unlikely(irqnr != ucb->irq))
return IRQ_NONE;
@@ -196,8 +195,7 @@ static irqreturn_t ucb1400_irq(int irqnr, void *devid)
/* Start with a small delay before checking pendown state */
msleep(UCB1400_TS_POLL_PERIOD);
- while (!ucb->stopped && !(penup = ucb1400_ts_pen_up(ucb))) {
-
+ while (!ucb->stopped && !ucb1400_ts_pen_up(ucb)) {
ucb1400_adc_enable(ucb->ac97);
x = ucb1400_ts_read_xpos(ucb);
y = ucb1400_ts_read_ypos(ucb);
diff --git a/drivers/input/touchscreen/wacom_i2c.c b/drivers/input/touchscreen/wacom_i2c.c
index fe4ea6204a4e..141754b2764c 100644
--- a/drivers/input/touchscreen/wacom_i2c.c
+++ b/drivers/input/touchscreen/wacom_i2c.c
@@ -24,12 +24,19 @@
#define WACOM_IN_PROXIMITY BIT(5)
/* Registers */
-#define WACOM_CMD_QUERY0 0x04
-#define WACOM_CMD_QUERY1 0x00
-#define WACOM_CMD_QUERY2 0x33
-#define WACOM_CMD_QUERY3 0x02
-#define WACOM_CMD_THROW0 0x05
-#define WACOM_CMD_THROW1 0x00
+#define WACOM_COMMAND_LSB 0x04
+#define WACOM_COMMAND_MSB 0x00
+
+#define WACOM_DATA_LSB 0x05
+#define WACOM_DATA_MSB 0x00
+
+/* Report types */
+#define REPORT_FEATURE 0x30
+
+/* Requests / operations */
+#define OPCODE_GET_REPORT 0x02
+
+#define WACOM_QUERY_REPORT 3
#define WACOM_QUERY_SIZE 19
struct wacom_features {
@@ -50,23 +57,24 @@ struct wacom_i2c {
static int wacom_query_device(struct i2c_client *client,
struct wacom_features *features)
{
- int ret;
- u8 cmd1[] = { WACOM_CMD_QUERY0, WACOM_CMD_QUERY1,
- WACOM_CMD_QUERY2, WACOM_CMD_QUERY3 };
- u8 cmd2[] = { WACOM_CMD_THROW0, WACOM_CMD_THROW1 };
+ u8 get_query_data_cmd[] = {
+ WACOM_COMMAND_LSB,
+ WACOM_COMMAND_MSB,
+ REPORT_FEATURE | WACOM_QUERY_REPORT,
+ OPCODE_GET_REPORT,
+ WACOM_DATA_LSB,
+ WACOM_DATA_MSB,
+ };
u8 data[WACOM_QUERY_SIZE];
+ int ret;
+
struct i2c_msg msgs[] = {
+ /* Request reading of feature ReportID: 3 (Pen Query Data) */
{
.addr = client->addr,
.flags = 0,
- .len = sizeof(cmd1),
- .buf = cmd1,
- },
- {
- .addr = client->addr,
- .flags = 0,
- .len = sizeof(cmd2),
- .buf = cmd2,
+ .len = sizeof(get_query_data_cmd),
+ .buf = get_query_data_cmd,
},
{
.addr = client->addr,
diff --git a/drivers/input/touchscreen/wm97xx-core.c b/drivers/input/touchscreen/wm97xx-core.c
index 78d2ee99f37a..1b58611c8084 100644
--- a/drivers/input/touchscreen/wm97xx-core.c
+++ b/drivers/input/touchscreen/wm97xx-core.c
@@ -615,10 +615,9 @@ static int wm97xx_register_touch(struct wm97xx *wm)
* extensions)
*/
wm->touch_dev = platform_device_alloc("wm97xx-touch", -1);
- if (!wm->touch_dev) {
- ret = -ENOMEM;
- goto touch_err;
- }
+ if (!wm->touch_dev)
+ return -ENOMEM;
+
platform_set_drvdata(wm->touch_dev, wm);
wm->touch_dev->dev.parent = wm->dev;
wm->touch_dev->dev.platform_data = pdata;
@@ -629,9 +628,6 @@ static int wm97xx_register_touch(struct wm97xx *wm)
return 0;
touch_reg_err:
platform_device_put(wm->touch_dev);
-touch_err:
- input_unregister_device(wm->input_dev);
- wm->input_dev = NULL;
return ret;
}
@@ -639,8 +635,6 @@ touch_err:
static void wm97xx_unregister_touch(struct wm97xx *wm)
{
platform_device_unregister(wm->touch_dev);
- input_unregister_device(wm->input_dev);
- wm->input_dev = NULL;
}
static int _wm97xx_probe(struct wm97xx *wm)
diff --git a/drivers/input/touchscreen/zinitix.c b/drivers/input/touchscreen/zinitix.c
index 1e70b8d2a8d7..129ebc810de8 100644
--- a/drivers/input/touchscreen/zinitix.c
+++ b/drivers/input/touchscreen/zinitix.c
@@ -252,16 +252,27 @@ static int zinitix_init_touch(struct bt541_ts_data *bt541)
static int zinitix_init_regulators(struct bt541_ts_data *bt541)
{
- struct i2c_client *client = bt541->client;
+ struct device *dev = &bt541->client->dev;
int error;
- bt541->supplies[0].supply = "vdd";
- bt541->supplies[1].supply = "vddo";
- error = devm_regulator_bulk_get(&client->dev,
+ /*
+ * Some older device trees have erroneous names for the regulators,
+ * so check if "vddo" is present and in that case use these names.
+ * Else use the proper supply names on the component.
+ */
+ if (of_find_property(dev->of_node, "vddo-supply", NULL)) {
+ bt541->supplies[0].supply = "vdd";
+ bt541->supplies[1].supply = "vddo";
+ } else {
+ /* Else use the proper supply names */
+ bt541->supplies[0].supply = "vcca";
+ bt541->supplies[1].supply = "vdd";
+ }
+ error = devm_regulator_bulk_get(dev,
ARRAY_SIZE(bt541->supplies),
bt541->supplies);
if (error < 0) {
- dev_err(&client->dev, "Failed to get regulators: %d\n", error);
+ dev_err(dev, "Failed to get regulators: %d\n", error);
return error;
}
@@ -560,7 +571,20 @@ static SIMPLE_DEV_PM_OPS(zinitix_pm_ops, zinitix_suspend, zinitix_resume);
#ifdef CONFIG_OF
static const struct of_device_id zinitix_of_match[] = {
+ { .compatible = "zinitix,bt402" },
+ { .compatible = "zinitix,bt403" },
+ { .compatible = "zinitix,bt404" },
+ { .compatible = "zinitix,bt412" },
+ { .compatible = "zinitix,bt413" },
+ { .compatible = "zinitix,bt431" },
+ { .compatible = "zinitix,bt432" },
+ { .compatible = "zinitix,bt531" },
+ { .compatible = "zinitix,bt532" },
+ { .compatible = "zinitix,bt538" },
{ .compatible = "zinitix,bt541" },
+ { .compatible = "zinitix,bt548" },
+ { .compatible = "zinitix,bt554" },
+ { .compatible = "zinitix,at100" },
{ }
};
MODULE_DEVICE_TABLE(of, zinitix_of_match);
diff --git a/drivers/interconnect/qcom/Kconfig b/drivers/interconnect/qcom/Kconfig
index daf1e25f6042..91353e651a52 100644
--- a/drivers/interconnect/qcom/Kconfig
+++ b/drivers/interconnect/qcom/Kconfig
@@ -35,6 +35,15 @@ config INTERCONNECT_QCOM_MSM8974
This is a driver for the Qualcomm Network-on-Chip on msm8974-based
platforms.
+config INTERCONNECT_QCOM_MSM8996
+ tristate "Qualcomm MSM8996 interconnect driver"
+ depends on INTERCONNECT_QCOM
+ depends on QCOM_SMD_RPM
+ select INTERCONNECT_QCOM_SMD_RPM
+ help
+ This is a driver for the Qualcomm Network-on-Chip on msm8996-based
+ platforms.
+
config INTERCONNECT_QCOM_OSM_L3
tristate "Qualcomm OSM L3 interconnect driver"
depends on INTERCONNECT_QCOM || COMPILE_TEST
@@ -42,6 +51,15 @@ config INTERCONNECT_QCOM_OSM_L3
Say y here to support the Operating State Manager (OSM) interconnect
driver which controls the scaling of L3 caches on Qualcomm SoCs.
+config INTERCONNECT_QCOM_QCM2290
+ tristate "Qualcomm QCM2290 interconnect driver"
+ depends on INTERCONNECT_QCOM
+ depends on QCOM_SMD_RPM
+ select INTERCONNECT_QCOM_SMD_RPM
+ help
+ This is a driver for the Qualcomm Network-on-Chip on qcm2290-based
+ platforms.
+
config INTERCONNECT_QCOM_QCS404
tristate "Qualcomm QCS404 interconnect driver"
depends on INTERCONNECT_QCOM
@@ -146,5 +164,14 @@ config INTERCONNECT_QCOM_SM8350
This is a driver for the Qualcomm Network-on-Chip on SM8350-based
platforms.
+config INTERCONNECT_QCOM_SM8450
+ tristate "Qualcomm SM8450 interconnect driver"
+ depends on INTERCONNECT_QCOM_RPMH_POSSIBLE
+ select INTERCONNECT_QCOM_RPMH
+ select INTERCONNECT_QCOM_BCM_VOTER
+ help
+ This is a driver for the Qualcomm Network-on-Chip on SM8450-based
+ platforms.
+
config INTERCONNECT_QCOM_SMD_RPM
tristate
diff --git a/drivers/interconnect/qcom/Makefile b/drivers/interconnect/qcom/Makefile
index 69300b1d48ef..ceae9bb566c6 100644
--- a/drivers/interconnect/qcom/Makefile
+++ b/drivers/interconnect/qcom/Makefile
@@ -4,7 +4,9 @@ icc-bcm-voter-objs := bcm-voter.o
qnoc-msm8916-objs := msm8916.o
qnoc-msm8939-objs := msm8939.o
qnoc-msm8974-objs := msm8974.o
+qnoc-msm8996-objs := msm8996.o
icc-osm-l3-objs := osm-l3.o
+qnoc-qcm2290-objs := qcm2290.o
qnoc-qcs404-objs := qcs404.o
icc-rpmh-obj := icc-rpmh.o
qnoc-sc7180-objs := sc7180.o
@@ -16,13 +18,16 @@ qnoc-sdx55-objs := sdx55.o
qnoc-sm8150-objs := sm8150.o
qnoc-sm8250-objs := sm8250.o
qnoc-sm8350-objs := sm8350.o
+qnoc-sm8450-objs := sm8450.o
icc-smd-rpm-objs := smd-rpm.o icc-rpm.o
obj-$(CONFIG_INTERCONNECT_QCOM_BCM_VOTER) += icc-bcm-voter.o
obj-$(CONFIG_INTERCONNECT_QCOM_MSM8916) += qnoc-msm8916.o
obj-$(CONFIG_INTERCONNECT_QCOM_MSM8939) += qnoc-msm8939.o
obj-$(CONFIG_INTERCONNECT_QCOM_MSM8974) += qnoc-msm8974.o
+obj-$(CONFIG_INTERCONNECT_QCOM_MSM8996) += qnoc-msm8996.o
obj-$(CONFIG_INTERCONNECT_QCOM_OSM_L3) += icc-osm-l3.o
+obj-$(CONFIG_INTERCONNECT_QCOM_QCM2290) += qnoc-qcm2290.o
obj-$(CONFIG_INTERCONNECT_QCOM_QCS404) += qnoc-qcs404.o
obj-$(CONFIG_INTERCONNECT_QCOM_RPMH) += icc-rpmh.o
obj-$(CONFIG_INTERCONNECT_QCOM_SC7180) += qnoc-sc7180.o
@@ -34,4 +39,5 @@ obj-$(CONFIG_INTERCONNECT_QCOM_SDX55) += qnoc-sdx55.o
obj-$(CONFIG_INTERCONNECT_QCOM_SM8150) += qnoc-sm8150.o
obj-$(CONFIG_INTERCONNECT_QCOM_SM8250) += qnoc-sm8250.o
obj-$(CONFIG_INTERCONNECT_QCOM_SM8350) += qnoc-sm8350.o
+obj-$(CONFIG_INTERCONNECT_QCOM_SM8450) += qnoc-sm8450.o
obj-$(CONFIG_INTERCONNECT_QCOM_SMD_RPM) += icc-smd-rpm.o
diff --git a/drivers/interconnect/qcom/icc-rpm.c b/drivers/interconnect/qcom/icc-rpm.c
index ef7999a08c8b..34125e8f8b60 100644
--- a/drivers/interconnect/qcom/icc-rpm.c
+++ b/drivers/interconnect/qcom/icc-rpm.c
@@ -11,12 +11,20 @@
#include <linux/of_device.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
#include <linux/regmap.h>
#include <linux/slab.h>
#include "smd-rpm.h"
#include "icc-rpm.h"
+/* QNOC QoS */
+#define QNOC_QOS_MCTL_LOWn_ADDR(n) (0x8 + (n * 0x1000))
+#define QNOC_QOS_MCTL_DFLT_PRIO_MASK 0x70
+#define QNOC_QOS_MCTL_DFLT_PRIO_SHIFT 4
+#define QNOC_QOS_MCTL_URGFWD_EN_MASK 0x8
+#define QNOC_QOS_MCTL_URGFWD_EN_SHIFT 3
+
/* BIMC QoS */
#define M_BKE_REG_BASE(n) (0x300 + (0x4000 * n))
#define M_BKE_EN_ADDR(n) (M_BKE_REG_BASE(n))
@@ -39,6 +47,27 @@
#define NOC_QOS_MODEn_ADDR(n) (0xc + (n * 0x1000))
#define NOC_QOS_MODEn_MASK 0x3
+static int qcom_icc_set_qnoc_qos(struct icc_node *src, u64 max_bw)
+{
+ struct icc_provider *provider = src->provider;
+ struct qcom_icc_provider *qp = to_qcom_provider(provider);
+ struct qcom_icc_node *qn = src->data;
+ struct qcom_icc_qos *qos = &qn->qos;
+ int rc;
+
+ rc = regmap_update_bits(qp->regmap,
+ qp->qos_offset + QNOC_QOS_MCTL_LOWn_ADDR(qos->qos_port),
+ QNOC_QOS_MCTL_DFLT_PRIO_MASK,
+ qos->areq_prio << QNOC_QOS_MCTL_DFLT_PRIO_SHIFT);
+ if (rc)
+ return rc;
+
+ return regmap_update_bits(qp->regmap,
+ qp->qos_offset + QNOC_QOS_MCTL_LOWn_ADDR(qos->qos_port),
+ QNOC_QOS_MCTL_URGFWD_EN_MASK,
+ !!qos->urg_fwd_en << QNOC_QOS_MCTL_URGFWD_EN_SHIFT);
+}
+
static int qcom_icc_bimc_set_qos_health(struct qcom_icc_provider *qp,
struct qcom_icc_qos *qos,
int regnum)
@@ -76,7 +105,7 @@ static int qcom_icc_set_bimc_qos(struct icc_node *src, u64 max_bw)
provider = src->provider;
qp = to_qcom_provider(provider);
- if (qn->qos.qos_mode != -1)
+ if (qn->qos.qos_mode != NOC_QOS_MODE_INVALID)
mode = qn->qos.qos_mode;
/* QoS Priority: The QoS Health parameters are getting considered
@@ -137,7 +166,7 @@ static int qcom_icc_set_noc_qos(struct icc_node *src, u64 max_bw)
return 0;
}
- if (qn->qos.qos_mode != -1)
+ if (qn->qos.qos_mode != NOC_QOS_MODE_INVALID)
mode = qn->qos.qos_mode;
if (mode == NOC_QOS_MODE_FIXED) {
@@ -163,10 +192,14 @@ static int qcom_icc_qos_set(struct icc_node *node, u64 sum_bw)
dev_dbg(node->provider->dev, "Setting QoS for %s\n", qn->name);
- if (qp->is_bimc_node)
+ switch (qp->type) {
+ case QCOM_ICC_BIMC:
return qcom_icc_set_bimc_qos(node, sum_bw);
-
- return qcom_icc_set_noc_qos(node, sum_bw);
+ case QCOM_ICC_QNOC:
+ return qcom_icc_set_qnoc_qos(node, sum_bw);
+ default:
+ return qcom_icc_set_noc_qos(node, sum_bw);
+ }
}
static int qcom_icc_rpm_set(int mas_rpm_id, int slv_rpm_id, u64 sum_bw)
@@ -239,6 +272,7 @@ static int qcom_icc_set(struct icc_node *src, struct icc_node *dst)
rate = max(sum_bw, max_peak_bw);
do_div(rate, qn->buswidth);
+ rate = min_t(u64, rate, LONG_MAX);
if (qn->rate == rate)
return 0;
@@ -307,7 +341,7 @@ int qnoc_probe(struct platform_device *pdev)
qp->bus_clks[i].id = cds[i];
qp->num_clks = cd_num;
- qp->is_bimc_node = desc->is_bimc_node;
+ qp->type = desc->type;
qp->qos_offset = desc->qos_offset;
if (desc->regmap_cfg) {
@@ -315,8 +349,13 @@ int qnoc_probe(struct platform_device *pdev)
void __iomem *mmio;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
+ if (!res) {
+ /* Try parent's regmap */
+ qp->regmap = dev_get_regmap(dev->parent, NULL);
+ if (qp->regmap)
+ goto regmap_done;
return -ENODEV;
+ }
mmio = devm_ioremap_resource(dev, res);
@@ -332,6 +371,7 @@ int qnoc_probe(struct platform_device *pdev)
}
}
+regmap_done:
ret = devm_clk_bulk_get(dev, qp->num_clks, qp->bus_clks);
if (ret)
return ret;
@@ -340,6 +380,12 @@ int qnoc_probe(struct platform_device *pdev)
if (ret)
return ret;
+ if (desc->has_bus_pd) {
+ ret = dev_pm_domain_attach(dev, true);
+ if (ret)
+ return ret;
+ }
+
provider = &qp->provider;
INIT_LIST_HEAD(&provider->nodes);
provider->dev = dev;
@@ -377,6 +423,10 @@ int qnoc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, qp);
+ /* Populate child NoC devices if any */
+ if (of_get_child_count(dev->of_node) > 0)
+ return of_platform_populate(dev->of_node, NULL, NULL, dev);
+
return 0;
err:
icc_nodes_remove(provider);
diff --git a/drivers/interconnect/qcom/icc-rpm.h b/drivers/interconnect/qcom/icc-rpm.h
index f5744de4da19..26dad006034f 100644
--- a/drivers/interconnect/qcom/icc-rpm.h
+++ b/drivers/interconnect/qcom/icc-rpm.h
@@ -12,19 +12,25 @@
#define to_qcom_provider(_provider) \
container_of(_provider, struct qcom_icc_provider, provider)
+enum qcom_icc_type {
+ QCOM_ICC_NOC,
+ QCOM_ICC_BIMC,
+ QCOM_ICC_QNOC,
+};
+
/**
* struct qcom_icc_provider - Qualcomm specific interconnect provider
* @provider: generic interconnect provider
* @bus_clks: the clk_bulk_data table of bus clocks
* @num_clks: the total number of clk_bulk_data entries
- * @is_bimc_node: indicates whether to use bimc specific setting
+ * @type: the ICC provider type
* @qos_offset: offset to QoS registers
* @regmap: regmap for QoS registers read/write access
*/
struct qcom_icc_provider {
struct icc_provider provider;
int num_clks;
- bool is_bimc_node;
+ enum qcom_icc_type type;
struct regmap *regmap;
unsigned int qos_offset;
struct clk_bulk_data bus_clks[];
@@ -38,6 +44,7 @@ struct qcom_icc_provider {
* @ap_owned: indicates if the node is owned by the AP or by the RPM
* @qos_mode: default qos mode for this node
* @qos_port: qos port number for finding qos registers of this node
+ * @urg_fwd_en: enable urgent forwarding
*/
struct qcom_icc_qos {
u32 areq_prio;
@@ -46,6 +53,7 @@ struct qcom_icc_qos {
bool ap_owned;
int qos_mode;
int qos_port;
+ bool urg_fwd_en;
};
/**
@@ -77,7 +85,8 @@ struct qcom_icc_desc {
size_t num_nodes;
const char * const *clocks;
size_t num_clocks;
- bool is_bimc_node;
+ bool has_bus_pd;
+ enum qcom_icc_type type;
const struct regmap_config *regmap_cfg;
unsigned int qos_offset;
};
diff --git a/drivers/interconnect/qcom/icc-rpmh.c b/drivers/interconnect/qcom/icc-rpmh.c
index 3eb7936d2cf6..2c8e12549804 100644
--- a/drivers/interconnect/qcom/icc-rpmh.c
+++ b/drivers/interconnect/qcom/icc-rpmh.c
@@ -21,13 +21,18 @@ void qcom_icc_pre_aggregate(struct icc_node *node)
{
size_t i;
struct qcom_icc_node *qn;
+ struct qcom_icc_provider *qp;
qn = node->data;
+ qp = to_qcom_provider(node->provider);
for (i = 0; i < QCOM_ICC_NUM_BUCKETS; i++) {
qn->sum_avg[i] = 0;
qn->max_peak[i] = 0;
}
+
+ for (i = 0; i < qn->num_bcms; i++)
+ qcom_icc_bcm_voter_add(qp->voter, qn->bcms[i]);
}
EXPORT_SYMBOL_GPL(qcom_icc_pre_aggregate);
@@ -45,10 +50,8 @@ int qcom_icc_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
{
size_t i;
struct qcom_icc_node *qn;
- struct qcom_icc_provider *qp;
qn = node->data;
- qp = to_qcom_provider(node->provider);
if (!tag)
tag = QCOM_ICC_TAG_ALWAYS;
@@ -68,9 +71,6 @@ int qcom_icc_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
*agg_avg += avg_bw;
*agg_peak = max_t(u32, *agg_peak, peak_bw);
- for (i = 0; i < qn->num_bcms; i++)
- qcom_icc_bcm_voter_add(qp->voter, qn->bcms[i]);
-
return 0;
}
EXPORT_SYMBOL_GPL(qcom_icc_aggregate);
diff --git a/drivers/interconnect/qcom/msm8916.c b/drivers/interconnect/qcom/msm8916.c
index e3c995b11357..2f397a7c3322 100644
--- a/drivers/interconnect/qcom/msm8916.c
+++ b/drivers/interconnect/qcom/msm8916.c
@@ -1229,6 +1229,7 @@ static const struct regmap_config msm8916_snoc_regmap_config = {
};
static struct qcom_icc_desc msm8916_snoc = {
+ .type = QCOM_ICC_NOC,
.nodes = msm8916_snoc_nodes,
.num_nodes = ARRAY_SIZE(msm8916_snoc_nodes),
.regmap_cfg = &msm8916_snoc_regmap_config,
@@ -1256,9 +1257,9 @@ static const struct regmap_config msm8916_bimc_regmap_config = {
};
static struct qcom_icc_desc msm8916_bimc = {
+ .type = QCOM_ICC_BIMC,
.nodes = msm8916_bimc_nodes,
.num_nodes = ARRAY_SIZE(msm8916_bimc_nodes),
- .is_bimc_node = true,
.regmap_cfg = &msm8916_bimc_regmap_config,
.qos_offset = 0x8000,
};
@@ -1325,6 +1326,7 @@ static const struct regmap_config msm8916_pcnoc_regmap_config = {
};
static struct qcom_icc_desc msm8916_pcnoc = {
+ .type = QCOM_ICC_NOC,
.nodes = msm8916_pcnoc_nodes,
.num_nodes = ARRAY_SIZE(msm8916_pcnoc_nodes),
.regmap_cfg = &msm8916_pcnoc_regmap_config,
diff --git a/drivers/interconnect/qcom/msm8939.c b/drivers/interconnect/qcom/msm8939.c
index 16272a477bd8..d188f3636e4c 100644
--- a/drivers/interconnect/qcom/msm8939.c
+++ b/drivers/interconnect/qcom/msm8939.c
@@ -1282,6 +1282,7 @@ static const struct regmap_config msm8939_snoc_regmap_config = {
};
static struct qcom_icc_desc msm8939_snoc = {
+ .type = QCOM_ICC_NOC,
.nodes = msm8939_snoc_nodes,
.num_nodes = ARRAY_SIZE(msm8939_snoc_nodes),
.regmap_cfg = &msm8939_snoc_regmap_config,
@@ -1309,6 +1310,7 @@ static const struct regmap_config msm8939_snoc_mm_regmap_config = {
};
static struct qcom_icc_desc msm8939_snoc_mm = {
+ .type = QCOM_ICC_NOC,
.nodes = msm8939_snoc_mm_nodes,
.num_nodes = ARRAY_SIZE(msm8939_snoc_mm_nodes),
.regmap_cfg = &msm8939_snoc_mm_regmap_config,
@@ -1336,9 +1338,9 @@ static const struct regmap_config msm8939_bimc_regmap_config = {
};
static struct qcom_icc_desc msm8939_bimc = {
+ .type = QCOM_ICC_BIMC,
.nodes = msm8939_bimc_nodes,
.num_nodes = ARRAY_SIZE(msm8939_bimc_nodes),
- .is_bimc_node = true,
.regmap_cfg = &msm8939_bimc_regmap_config,
.qos_offset = 0x8000,
};
@@ -1407,6 +1409,7 @@ static const struct regmap_config msm8939_pcnoc_regmap_config = {
};
static struct qcom_icc_desc msm8939_pcnoc = {
+ .type = QCOM_ICC_NOC,
.nodes = msm8939_pcnoc_nodes,
.num_nodes = ARRAY_SIZE(msm8939_pcnoc_nodes),
.regmap_cfg = &msm8939_pcnoc_regmap_config,
diff --git a/drivers/interconnect/qcom/msm8996.c b/drivers/interconnect/qcom/msm8996.c
new file mode 100644
index 000000000000..499e11fbbd2e
--- /dev/null
+++ b/drivers/interconnect/qcom/msm8996.c
@@ -0,0 +1,2110 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Qualcomm MSM8996 Network-on-Chip (NoC) QoS driver
+ *
+ * Copyright (c) 2021 Yassine Oudjana <y.oudjana@protonmail.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/interconnect-provider.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/interconnect/qcom,msm8996.h>
+
+#include "icc-rpm.h"
+#include "smd-rpm.h"
+#include "msm8996.h"
+
+static const char * const bus_mm_clocks[] = {
+ "bus",
+ "bus_a",
+ "iface"
+};
+
+static const char * const bus_a0noc_clocks[] = {
+ "aggre0_snoc_axi",
+ "aggre0_cnoc_ahb",
+ "aggre0_noc_mpu_cfg"
+};
+
+static const u16 mas_a0noc_common_links[] = {
+ MSM8996_SLAVE_A0NOC_SNOC
+};
+
+static struct qcom_icc_node mas_pcie_0 = {
+ .name = "mas_pcie_0",
+ .id = MSM8996_MASTER_PCIE_0,
+ .buswidth = 8,
+ .mas_rpm_id = 65,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 1,
+ .qos.prio_level = 1,
+ .qos.qos_port = 0,
+ .num_links = ARRAY_SIZE(mas_a0noc_common_links),
+ .links = mas_a0noc_common_links
+};
+
+static struct qcom_icc_node mas_pcie_1 = {
+ .name = "mas_pcie_1",
+ .id = MSM8996_MASTER_PCIE_1,
+ .buswidth = 8,
+ .mas_rpm_id = 66,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 1,
+ .qos.prio_level = 1,
+ .qos.qos_port = 1,
+ .num_links = ARRAY_SIZE(mas_a0noc_common_links),
+ .links = mas_a0noc_common_links
+};
+
+static struct qcom_icc_node mas_pcie_2 = {
+ .name = "mas_pcie_2",
+ .id = MSM8996_MASTER_PCIE_2,
+ .buswidth = 8,
+ .mas_rpm_id = 119,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 1,
+ .qos.prio_level = 1,
+ .qos.qos_port = 2,
+ .num_links = ARRAY_SIZE(mas_a0noc_common_links),
+ .links = mas_a0noc_common_links
+};
+
+static const u16 mas_a1noc_common_links[] = {
+ MSM8996_SLAVE_A1NOC_SNOC
+};
+
+static struct qcom_icc_node mas_cnoc_a1noc = {
+ .name = "mas_cnoc_a1noc",
+ .id = MSM8996_MASTER_CNOC_A1NOC,
+ .buswidth = 8,
+ .mas_rpm_id = 116,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(mas_a1noc_common_links),
+ .links = mas_a1noc_common_links
+};
+
+static struct qcom_icc_node mas_crypto_c0 = {
+ .name = "mas_crypto_c0",
+ .id = MSM8996_MASTER_CRYPTO_CORE0,
+ .buswidth = 8,
+ .mas_rpm_id = 23,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 1,
+ .qos.prio_level = 1,
+ .qos.qos_port = 0,
+ .num_links = ARRAY_SIZE(mas_a1noc_common_links),
+ .links = mas_a1noc_common_links
+};
+
+static struct qcom_icc_node mas_pnoc_a1noc = {
+ .name = "mas_pnoc_a1noc",
+ .id = MSM8996_MASTER_PNOC_A1NOC,
+ .buswidth = 8,
+ .mas_rpm_id = 117,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = false,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 1,
+ .num_links = ARRAY_SIZE(mas_a1noc_common_links),
+ .links = mas_a1noc_common_links
+};
+
+static const u16 mas_a2noc_common_links[] = {
+ MSM8996_SLAVE_A2NOC_SNOC
+};
+
+static struct qcom_icc_node mas_usb3 = {
+ .name = "mas_usb3",
+ .id = MSM8996_MASTER_USB3,
+ .buswidth = 8,
+ .mas_rpm_id = 32,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 1,
+ .qos.prio_level = 1,
+ .qos.qos_port = 3,
+ .num_links = ARRAY_SIZE(mas_a2noc_common_links),
+ .links = mas_a2noc_common_links
+};
+
+static struct qcom_icc_node mas_ipa = {
+ .name = "mas_ipa",
+ .id = MSM8996_MASTER_IPA,
+ .buswidth = 8,
+ .mas_rpm_id = 59,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = -1,
+ .num_links = ARRAY_SIZE(mas_a2noc_common_links),
+ .links = mas_a2noc_common_links
+};
+
+static struct qcom_icc_node mas_ufs = {
+ .name = "mas_ufs",
+ .id = MSM8996_MASTER_UFS,
+ .buswidth = 8,
+ .mas_rpm_id = 68,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 1,
+ .qos.prio_level = 1,
+ .qos.qos_port = 2,
+ .num_links = ARRAY_SIZE(mas_a2noc_common_links),
+ .links = mas_a2noc_common_links
+};
+
+static const u16 mas_apps_proc_links[] = {
+ MSM8996_SLAVE_BIMC_SNOC_1,
+ MSM8996_SLAVE_EBI_CH0,
+ MSM8996_SLAVE_BIMC_SNOC_0
+};
+
+static struct qcom_icc_node mas_apps_proc = {
+ .name = "mas_apps_proc",
+ .id = MSM8996_MASTER_AMPSS_M0,
+ .buswidth = 8,
+ .mas_rpm_id = 0,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 0,
+ .num_links = ARRAY_SIZE(mas_apps_proc_links),
+ .links = mas_apps_proc_links
+};
+
+static const u16 mas_oxili_common_links[] = {
+ MSM8996_SLAVE_BIMC_SNOC_1,
+ MSM8996_SLAVE_HMSS_L3,
+ MSM8996_SLAVE_EBI_CH0,
+ MSM8996_SLAVE_BIMC_SNOC_0
+};
+
+static struct qcom_icc_node mas_oxili = {
+ .name = "mas_oxili",
+ .id = MSM8996_MASTER_GRAPHICS_3D,
+ .buswidth = 8,
+ .mas_rpm_id = 6,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 1,
+ .num_links = ARRAY_SIZE(mas_oxili_common_links),
+ .links = mas_oxili_common_links
+};
+
+static struct qcom_icc_node mas_mnoc_bimc = {
+ .name = "mas_mnoc_bimc",
+ .id = MSM8996_MASTER_MNOC_BIMC,
+ .buswidth = 8,
+ .mas_rpm_id = 2,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 2,
+ .num_links = ARRAY_SIZE(mas_oxili_common_links),
+ .links = mas_oxili_common_links
+};
+
+static const u16 mas_snoc_bimc_links[] = {
+ MSM8996_SLAVE_HMSS_L3,
+ MSM8996_SLAVE_EBI_CH0
+};
+
+static struct qcom_icc_node mas_snoc_bimc = {
+ .name = "mas_snoc_bimc",
+ .id = MSM8996_MASTER_SNOC_BIMC,
+ .buswidth = 8,
+ .mas_rpm_id = 3,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = false,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = -1,
+ .num_links = ARRAY_SIZE(mas_snoc_bimc_links),
+ .links = mas_snoc_bimc_links
+};
+
+static const u16 mas_snoc_cnoc_links[] = {
+ MSM8996_SLAVE_CLK_CTL,
+ MSM8996_SLAVE_RBCPR_CX,
+ MSM8996_SLAVE_A2NOC_SMMU_CFG,
+ MSM8996_SLAVE_A0NOC_MPU_CFG,
+ MSM8996_SLAVE_MESSAGE_RAM,
+ MSM8996_SLAVE_CNOC_MNOC_MMSS_CFG,
+ MSM8996_SLAVE_PCIE_0_CFG,
+ MSM8996_SLAVE_TLMM,
+ MSM8996_SLAVE_MPM,
+ MSM8996_SLAVE_A0NOC_SMMU_CFG,
+ MSM8996_SLAVE_EBI1_PHY_CFG,
+ MSM8996_SLAVE_BIMC_CFG,
+ MSM8996_SLAVE_PIMEM_CFG,
+ MSM8996_SLAVE_RBCPR_MX,
+ MSM8996_SLAVE_PRNG,
+ MSM8996_SLAVE_PCIE20_AHB2PHY,
+ MSM8996_SLAVE_A2NOC_MPU_CFG,
+ MSM8996_SLAVE_QDSS_CFG,
+ MSM8996_SLAVE_A2NOC_CFG,
+ MSM8996_SLAVE_A0NOC_CFG,
+ MSM8996_SLAVE_UFS_CFG,
+ MSM8996_SLAVE_CRYPTO_0_CFG,
+ MSM8996_SLAVE_PCIE_1_CFG,
+ MSM8996_SLAVE_SNOC_CFG,
+ MSM8996_SLAVE_SNOC_MPU_CFG,
+ MSM8996_SLAVE_A1NOC_MPU_CFG,
+ MSM8996_SLAVE_A1NOC_SMMU_CFG,
+ MSM8996_SLAVE_PCIE_2_CFG,
+ MSM8996_SLAVE_CNOC_MNOC_CFG,
+ MSM8996_SLAVE_QDSS_RBCPR_APU_CFG,
+ MSM8996_SLAVE_PMIC_ARB,
+ MSM8996_SLAVE_IMEM_CFG,
+ MSM8996_SLAVE_A1NOC_CFG,
+ MSM8996_SLAVE_SSC_CFG,
+ MSM8996_SLAVE_TCSR,
+ MSM8996_SLAVE_LPASS_SMMU_CFG,
+ MSM8996_SLAVE_DCC_CFG
+};
+
+static struct qcom_icc_node mas_snoc_cnoc = {
+ .name = "mas_snoc_cnoc",
+ .id = MSM8996_MASTER_SNOC_CNOC,
+ .buswidth = 8,
+ .mas_rpm_id = 52,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_snoc_cnoc_links),
+ .links = mas_snoc_cnoc_links
+};
+
+static const u16 mas_qdss_dap_links[] = {
+ MSM8996_SLAVE_QDSS_RBCPR_APU_CFG,
+ MSM8996_SLAVE_RBCPR_CX,
+ MSM8996_SLAVE_A2NOC_SMMU_CFG,
+ MSM8996_SLAVE_A0NOC_MPU_CFG,
+ MSM8996_SLAVE_MESSAGE_RAM,
+ MSM8996_SLAVE_PCIE_0_CFG,
+ MSM8996_SLAVE_TLMM,
+ MSM8996_SLAVE_MPM,
+ MSM8996_SLAVE_A0NOC_SMMU_CFG,
+ MSM8996_SLAVE_EBI1_PHY_CFG,
+ MSM8996_SLAVE_BIMC_CFG,
+ MSM8996_SLAVE_PIMEM_CFG,
+ MSM8996_SLAVE_RBCPR_MX,
+ MSM8996_SLAVE_CLK_CTL,
+ MSM8996_SLAVE_PRNG,
+ MSM8996_SLAVE_PCIE20_AHB2PHY,
+ MSM8996_SLAVE_A2NOC_MPU_CFG,
+ MSM8996_SLAVE_QDSS_CFG,
+ MSM8996_SLAVE_A2NOC_CFG,
+ MSM8996_SLAVE_A0NOC_CFG,
+ MSM8996_SLAVE_UFS_CFG,
+ MSM8996_SLAVE_CRYPTO_0_CFG,
+ MSM8996_SLAVE_CNOC_A1NOC,
+ MSM8996_SLAVE_PCIE_1_CFG,
+ MSM8996_SLAVE_SNOC_CFG,
+ MSM8996_SLAVE_SNOC_MPU_CFG,
+ MSM8996_SLAVE_A1NOC_MPU_CFG,
+ MSM8996_SLAVE_A1NOC_SMMU_CFG,
+ MSM8996_SLAVE_PCIE_2_CFG,
+ MSM8996_SLAVE_CNOC_MNOC_CFG,
+ MSM8996_SLAVE_CNOC_MNOC_MMSS_CFG,
+ MSM8996_SLAVE_PMIC_ARB,
+ MSM8996_SLAVE_IMEM_CFG,
+ MSM8996_SLAVE_A1NOC_CFG,
+ MSM8996_SLAVE_SSC_CFG,
+ MSM8996_SLAVE_TCSR,
+ MSM8996_SLAVE_LPASS_SMMU_CFG,
+ MSM8996_SLAVE_DCC_CFG
+};
+
+static struct qcom_icc_node mas_qdss_dap = {
+ .name = "mas_qdss_dap",
+ .id = MSM8996_MASTER_QDSS_DAP,
+ .buswidth = 8,
+ .mas_rpm_id = 49,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(mas_qdss_dap_links),
+ .links = mas_qdss_dap_links
+};
+
+static const u16 mas_cnoc_mnoc_mmss_cfg_links[] = {
+ MSM8996_SLAVE_MMAGIC_CFG,
+ MSM8996_SLAVE_DSA_MPU_CFG,
+ MSM8996_SLAVE_MMSS_CLK_CFG,
+ MSM8996_SLAVE_CAMERA_THROTTLE_CFG,
+ MSM8996_SLAVE_VENUS_CFG,
+ MSM8996_SLAVE_SMMU_VFE_CFG,
+ MSM8996_SLAVE_MISC_CFG,
+ MSM8996_SLAVE_SMMU_CPP_CFG,
+ MSM8996_SLAVE_GRAPHICS_3D_CFG,
+ MSM8996_SLAVE_DISPLAY_THROTTLE_CFG,
+ MSM8996_SLAVE_VENUS_THROTTLE_CFG,
+ MSM8996_SLAVE_CAMERA_CFG,
+ MSM8996_SLAVE_DISPLAY_CFG,
+ MSM8996_SLAVE_CPR_CFG,
+ MSM8996_SLAVE_SMMU_ROTATOR_CFG,
+ MSM8996_SLAVE_DSA_CFG,
+ MSM8996_SLAVE_SMMU_VENUS_CFG,
+ MSM8996_SLAVE_VMEM_CFG,
+ MSM8996_SLAVE_SMMU_JPEG_CFG,
+ MSM8996_SLAVE_SMMU_MDP_CFG,
+ MSM8996_SLAVE_MNOC_MPU_CFG
+};
+
+static struct qcom_icc_node mas_cnoc_mnoc_mmss_cfg = {
+ .name = "mas_cnoc_mnoc_mmss_cfg",
+ .id = MSM8996_MASTER_CNOC_MNOC_MMSS_CFG,
+ .buswidth = 8,
+ .mas_rpm_id = 4,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(mas_cnoc_mnoc_mmss_cfg_links),
+ .links = mas_cnoc_mnoc_mmss_cfg_links
+};
+
+static const u16 mas_cnoc_mnoc_cfg_links[] = {
+ MSM8996_SLAVE_SERVICE_MNOC
+};
+
+static struct qcom_icc_node mas_cnoc_mnoc_cfg = {
+ .name = "mas_cnoc_mnoc_cfg",
+ .id = MSM8996_MASTER_CNOC_MNOC_CFG,
+ .buswidth = 8,
+ .mas_rpm_id = 5,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(mas_cnoc_mnoc_cfg_links),
+ .links = mas_cnoc_mnoc_cfg_links
+};
+
+static const u16 mas_mnoc_bimc_common_links[] = {
+ MSM8996_SLAVE_MNOC_BIMC
+};
+
+static struct qcom_icc_node mas_cpp = {
+ .name = "mas_cpp",
+ .id = MSM8996_MASTER_CPP,
+ .buswidth = 32,
+ .mas_rpm_id = 115,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 5,
+ .num_links = ARRAY_SIZE(mas_mnoc_bimc_common_links),
+ .links = mas_mnoc_bimc_common_links
+};
+
+static struct qcom_icc_node mas_jpeg = {
+ .name = "mas_jpeg",
+ .id = MSM8996_MASTER_JPEG,
+ .buswidth = 32,
+ .mas_rpm_id = 7,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 7,
+ .num_links = ARRAY_SIZE(mas_mnoc_bimc_common_links),
+ .links = mas_mnoc_bimc_common_links
+};
+
+static struct qcom_icc_node mas_mdp_p0 = {
+ .name = "mas_mdp_p0",
+ .id = MSM8996_MASTER_MDP_PORT0,
+ .buswidth = 32,
+ .mas_rpm_id = 8,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 1,
+ .num_links = ARRAY_SIZE(mas_mnoc_bimc_common_links),
+ .links = mas_mnoc_bimc_common_links
+};
+
+static struct qcom_icc_node mas_mdp_p1 = {
+ .name = "mas_mdp_p1",
+ .id = MSM8996_MASTER_MDP_PORT1,
+ .buswidth = 32,
+ .mas_rpm_id = 61,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 2,
+ .num_links = ARRAY_SIZE(mas_mnoc_bimc_common_links),
+ .links = mas_mnoc_bimc_common_links
+};
+
+static struct qcom_icc_node mas_rotator = {
+ .name = "mas_rotator",
+ .id = MSM8996_MASTER_ROTATOR,
+ .buswidth = 32,
+ .mas_rpm_id = 120,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 0,
+ .num_links = ARRAY_SIZE(mas_mnoc_bimc_common_links),
+ .links = mas_mnoc_bimc_common_links
+};
+
+static struct qcom_icc_node mas_venus = {
+ .name = "mas_venus",
+ .id = MSM8996_MASTER_VIDEO_P0,
+ .buswidth = 32,
+ .mas_rpm_id = 9,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 3,
+ .num_links = ARRAY_SIZE(mas_mnoc_bimc_common_links),
+ .links = mas_mnoc_bimc_common_links
+};
+
+static struct qcom_icc_node mas_vfe = {
+ .name = "mas_vfe",
+ .id = MSM8996_MASTER_VFE,
+ .buswidth = 32,
+ .mas_rpm_id = 11,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 6,
+ .num_links = ARRAY_SIZE(mas_mnoc_bimc_common_links),
+ .links = mas_mnoc_bimc_common_links
+};
+
+static const u16 mas_vmem_common_links[] = {
+ MSM8996_SLAVE_VMEM
+};
+
+static struct qcom_icc_node mas_snoc_vmem = {
+ .name = "mas_snoc_vmem",
+ .id = MSM8996_MASTER_SNOC_VMEM,
+ .buswidth = 32,
+ .mas_rpm_id = 114,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(mas_vmem_common_links),
+ .links = mas_vmem_common_links
+};
+
+static struct qcom_icc_node mas_venus_vmem = {
+ .name = "mas_venus_vmem",
+ .id = MSM8996_MASTER_VIDEO_P0_OCMEM,
+ .buswidth = 32,
+ .mas_rpm_id = 121,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(mas_vmem_common_links),
+ .links = mas_vmem_common_links
+};
+
+static const u16 mas_snoc_pnoc_links[] = {
+ MSM8996_SLAVE_BLSP_1,
+ MSM8996_SLAVE_BLSP_2,
+ MSM8996_SLAVE_SDCC_1,
+ MSM8996_SLAVE_SDCC_2,
+ MSM8996_SLAVE_SDCC_4,
+ MSM8996_SLAVE_TSIF,
+ MSM8996_SLAVE_PDM,
+ MSM8996_SLAVE_AHB2PHY
+};
+
+static struct qcom_icc_node mas_snoc_pnoc = {
+ .name = "mas_snoc_pnoc",
+ .id = MSM8996_MASTER_SNOC_PNOC,
+ .buswidth = 8,
+ .mas_rpm_id = 44,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_snoc_pnoc_links),
+ .links = mas_snoc_pnoc_links
+};
+
+static const u16 mas_pnoc_a1noc_common_links[] = {
+ MSM8996_SLAVE_PNOC_A1NOC
+};
+
+static struct qcom_icc_node mas_sdcc_1 = {
+ .name = "mas_sdcc_1",
+ .id = MSM8996_MASTER_SDCC_1,
+ .buswidth = 8,
+ .mas_rpm_id = 33,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_pnoc_a1noc_common_links),
+ .links = mas_pnoc_a1noc_common_links
+};
+
+static struct qcom_icc_node mas_sdcc_2 = {
+ .name = "mas_sdcc_2",
+ .id = MSM8996_MASTER_SDCC_2,
+ .buswidth = 8,
+ .mas_rpm_id = 35,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_pnoc_a1noc_common_links),
+ .links = mas_pnoc_a1noc_common_links
+};
+
+static struct qcom_icc_node mas_sdcc_4 = {
+ .name = "mas_sdcc_4",
+ .id = MSM8996_MASTER_SDCC_4,
+ .buswidth = 8,
+ .mas_rpm_id = 36,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_pnoc_a1noc_common_links),
+ .links = mas_pnoc_a1noc_common_links
+};
+
+static struct qcom_icc_node mas_usb_hs = {
+ .name = "mas_usb_hs",
+ .id = MSM8996_MASTER_USB_HS,
+ .buswidth = 8,
+ .mas_rpm_id = 42,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_pnoc_a1noc_common_links),
+ .links = mas_pnoc_a1noc_common_links
+};
+
+static struct qcom_icc_node mas_blsp_1 = {
+ .name = "mas_blsp_1",
+ .id = MSM8996_MASTER_BLSP_1,
+ .buswidth = 4,
+ .mas_rpm_id = 41,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_pnoc_a1noc_common_links),
+ .links = mas_pnoc_a1noc_common_links
+};
+
+static struct qcom_icc_node mas_blsp_2 = {
+ .name = "mas_blsp_2",
+ .id = MSM8996_MASTER_BLSP_2,
+ .buswidth = 4,
+ .mas_rpm_id = 39,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_pnoc_a1noc_common_links),
+ .links = mas_pnoc_a1noc_common_links
+};
+
+static struct qcom_icc_node mas_tsif = {
+ .name = "mas_tsif",
+ .id = MSM8996_MASTER_TSIF,
+ .buswidth = 4,
+ .mas_rpm_id = 37,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_pnoc_a1noc_common_links),
+ .links = mas_pnoc_a1noc_common_links
+};
+
+static const u16 mas_hmss_links[] = {
+ MSM8996_SLAVE_PIMEM,
+ MSM8996_SLAVE_OCIMEM,
+ MSM8996_SLAVE_SNOC_BIMC
+};
+
+static struct qcom_icc_node mas_hmss = {
+ .name = "mas_hmss",
+ .id = MSM8996_MASTER_HMSS,
+ .buswidth = 8,
+ .mas_rpm_id = 118,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 1,
+ .qos.prio_level = 1,
+ .qos.qos_port = 4,
+ .num_links = ARRAY_SIZE(mas_hmss_links),
+ .links = mas_hmss_links
+};
+
+static const u16 mas_qdss_common_links[] = {
+ MSM8996_SLAVE_PIMEM,
+ MSM8996_SLAVE_USB3,
+ MSM8996_SLAVE_OCIMEM,
+ MSM8996_SLAVE_SNOC_BIMC,
+ MSM8996_SLAVE_SNOC_PNOC
+};
+
+static struct qcom_icc_node mas_qdss_bam = {
+ .name = "mas_qdss_bam",
+ .id = MSM8996_MASTER_QDSS_BAM,
+ .buswidth = 16,
+ .mas_rpm_id = 19,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 1,
+ .qos.prio_level = 1,
+ .qos.qos_port = 2,
+ .num_links = ARRAY_SIZE(mas_qdss_common_links),
+ .links = mas_qdss_common_links
+};
+
+static const u16 mas_snoc_cfg_links[] = {
+ MSM8996_SLAVE_SERVICE_SNOC
+};
+
+static struct qcom_icc_node mas_snoc_cfg = {
+ .name = "mas_snoc_cfg",
+ .id = MSM8996_MASTER_SNOC_CFG,
+ .buswidth = 16,
+ .mas_rpm_id = 20,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(mas_snoc_cfg_links),
+ .links = mas_snoc_cfg_links
+};
+
+static const u16 mas_bimc_snoc_0_links[] = {
+ MSM8996_SLAVE_SNOC_VMEM,
+ MSM8996_SLAVE_USB3,
+ MSM8996_SLAVE_PIMEM,
+ MSM8996_SLAVE_LPASS,
+ MSM8996_SLAVE_APPSS,
+ MSM8996_SLAVE_SNOC_CNOC,
+ MSM8996_SLAVE_SNOC_PNOC,
+ MSM8996_SLAVE_OCIMEM,
+ MSM8996_SLAVE_QDSS_STM
+};
+
+static struct qcom_icc_node mas_bimc_snoc_0 = {
+ .name = "mas_bimc_snoc_0",
+ .id = MSM8996_MASTER_BIMC_SNOC_0,
+ .buswidth = 16,
+ .mas_rpm_id = 21,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(mas_bimc_snoc_0_links),
+ .links = mas_bimc_snoc_0_links
+};
+
+static const u16 mas_bimc_snoc_1_links[] = {
+ MSM8996_SLAVE_PCIE_2,
+ MSM8996_SLAVE_PCIE_1,
+ MSM8996_SLAVE_PCIE_0
+};
+
+static struct qcom_icc_node mas_bimc_snoc_1 = {
+ .name = "mas_bimc_snoc_1",
+ .id = MSM8996_MASTER_BIMC_SNOC_1,
+ .buswidth = 16,
+ .mas_rpm_id = 109,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(mas_bimc_snoc_1_links),
+ .links = mas_bimc_snoc_1_links
+};
+
+static const u16 mas_a0noc_snoc_links[] = {
+ MSM8996_SLAVE_SNOC_PNOC,
+ MSM8996_SLAVE_OCIMEM,
+ MSM8996_SLAVE_APPSS,
+ MSM8996_SLAVE_SNOC_BIMC,
+ MSM8996_SLAVE_PIMEM
+};
+
+static struct qcom_icc_node mas_a0noc_snoc = {
+ .name = "mas_a0noc_snoc",
+ .id = MSM8996_MASTER_A0NOC_SNOC,
+ .buswidth = 16,
+ .mas_rpm_id = 110,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(mas_a0noc_snoc_links),
+ .links = mas_a0noc_snoc_links
+};
+
+static const u16 mas_a1noc_snoc_links[] = {
+ MSM8996_SLAVE_SNOC_VMEM,
+ MSM8996_SLAVE_USB3,
+ MSM8996_SLAVE_PCIE_0,
+ MSM8996_SLAVE_PIMEM,
+ MSM8996_SLAVE_PCIE_2,
+ MSM8996_SLAVE_LPASS,
+ MSM8996_SLAVE_PCIE_1,
+ MSM8996_SLAVE_APPSS,
+ MSM8996_SLAVE_SNOC_BIMC,
+ MSM8996_SLAVE_SNOC_CNOC,
+ MSM8996_SLAVE_SNOC_PNOC,
+ MSM8996_SLAVE_OCIMEM,
+ MSM8996_SLAVE_QDSS_STM
+};
+
+static struct qcom_icc_node mas_a1noc_snoc = {
+ .name = "mas_a1noc_snoc",
+ .id = MSM8996_MASTER_A1NOC_SNOC,
+ .buswidth = 16,
+ .mas_rpm_id = 111,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_a1noc_snoc_links),
+ .links = mas_a1noc_snoc_links
+};
+
+static const u16 mas_a2noc_snoc_links[] = {
+ MSM8996_SLAVE_SNOC_VMEM,
+ MSM8996_SLAVE_USB3,
+ MSM8996_SLAVE_PCIE_1,
+ MSM8996_SLAVE_PIMEM,
+ MSM8996_SLAVE_PCIE_2,
+ MSM8996_SLAVE_QDSS_STM,
+ MSM8996_SLAVE_LPASS,
+ MSM8996_SLAVE_SNOC_BIMC,
+ MSM8996_SLAVE_SNOC_CNOC,
+ MSM8996_SLAVE_SNOC_PNOC,
+ MSM8996_SLAVE_OCIMEM,
+ MSM8996_SLAVE_PCIE_0
+};
+
+static struct qcom_icc_node mas_a2noc_snoc = {
+ .name = "mas_a2noc_snoc",
+ .id = MSM8996_MASTER_A2NOC_SNOC,
+ .buswidth = 16,
+ .mas_rpm_id = 112,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_a2noc_snoc_links),
+ .links = mas_a2noc_snoc_links
+};
+
+static struct qcom_icc_node mas_qdss_etr = {
+ .name = "mas_qdss_etr",
+ .id = MSM8996_MASTER_QDSS_ETR,
+ .buswidth = 16,
+ .mas_rpm_id = 31,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 1,
+ .qos.prio_level = 1,
+ .qos.qos_port = 3,
+ .num_links = ARRAY_SIZE(mas_qdss_common_links),
+ .links = mas_qdss_common_links
+};
+
+static const u16 slv_a0noc_snoc_links[] = {
+ MSM8996_MASTER_A0NOC_SNOC
+};
+
+static struct qcom_icc_node slv_a0noc_snoc = {
+ .name = "slv_a0noc_snoc",
+ .id = MSM8996_SLAVE_A0NOC_SNOC,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 141,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(slv_a0noc_snoc_links),
+ .links = slv_a0noc_snoc_links
+};
+
+static const u16 slv_a1noc_snoc_links[] = {
+ MSM8996_MASTER_A1NOC_SNOC
+};
+
+static struct qcom_icc_node slv_a1noc_snoc = {
+ .name = "slv_a1noc_snoc",
+ .id = MSM8996_SLAVE_A1NOC_SNOC,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 142,
+ .num_links = ARRAY_SIZE(slv_a1noc_snoc_links),
+ .links = slv_a1noc_snoc_links
+};
+
+static const u16 slv_a2noc_snoc_links[] = {
+ MSM8996_MASTER_A2NOC_SNOC
+};
+
+static struct qcom_icc_node slv_a2noc_snoc = {
+ .name = "slv_a2noc_snoc",
+ .id = MSM8996_SLAVE_A2NOC_SNOC,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 143,
+ .num_links = ARRAY_SIZE(slv_a2noc_snoc_links),
+ .links = slv_a2noc_snoc_links
+};
+
+static struct qcom_icc_node slv_ebi = {
+ .name = "slv_ebi",
+ .id = MSM8996_SLAVE_EBI_CH0,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 0
+};
+
+static struct qcom_icc_node slv_hmss_l3 = {
+ .name = "slv_hmss_l3",
+ .id = MSM8996_SLAVE_HMSS_L3,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 160
+};
+
+static const u16 slv_bimc_snoc_0_links[] = {
+ MSM8996_MASTER_BIMC_SNOC_0
+};
+
+static struct qcom_icc_node slv_bimc_snoc_0 = {
+ .name = "slv_bimc_snoc_0",
+ .id = MSM8996_SLAVE_BIMC_SNOC_0,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 2,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(slv_bimc_snoc_0_links),
+ .links = slv_bimc_snoc_0_links
+};
+
+static const u16 slv_bimc_snoc_1_links[] = {
+ MSM8996_MASTER_BIMC_SNOC_1
+};
+
+static struct qcom_icc_node slv_bimc_snoc_1 = {
+ .name = "slv_bimc_snoc_1",
+ .id = MSM8996_SLAVE_BIMC_SNOC_1,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 138,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(slv_bimc_snoc_1_links),
+ .links = slv_bimc_snoc_1_links
+};
+
+static const u16 slv_cnoc_a1noc_links[] = {
+ MSM8996_MASTER_CNOC_A1NOC
+};
+
+static struct qcom_icc_node slv_cnoc_a1noc = {
+ .name = "slv_cnoc_a1noc",
+ .id = MSM8996_SLAVE_CNOC_A1NOC,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 75,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(slv_cnoc_a1noc_links),
+ .links = slv_cnoc_a1noc_links
+};
+
+static struct qcom_icc_node slv_clk_ctl = {
+ .name = "slv_clk_ctl",
+ .id = MSM8996_SLAVE_CLK_CTL,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 47
+};
+
+static struct qcom_icc_node slv_tcsr = {
+ .name = "slv_tcsr",
+ .id = MSM8996_SLAVE_TCSR,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 50
+};
+
+static struct qcom_icc_node slv_tlmm = {
+ .name = "slv_tlmm",
+ .id = MSM8996_SLAVE_TLMM,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 51
+};
+
+static struct qcom_icc_node slv_crypto0_cfg = {
+ .name = "slv_crypto0_cfg",
+ .id = MSM8996_SLAVE_CRYPTO_0_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 52,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_mpm = {
+ .name = "slv_mpm",
+ .id = MSM8996_SLAVE_MPM,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 62,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_pimem_cfg = {
+ .name = "slv_pimem_cfg",
+ .id = MSM8996_SLAVE_PIMEM_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 167,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_imem_cfg = {
+ .name = "slv_imem_cfg",
+ .id = MSM8996_SLAVE_IMEM_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 54,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_message_ram = {
+ .name = "slv_message_ram",
+ .id = MSM8996_SLAVE_MESSAGE_RAM,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 55
+};
+
+static struct qcom_icc_node slv_bimc_cfg = {
+ .name = "slv_bimc_cfg",
+ .id = MSM8996_SLAVE_BIMC_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 56,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_pmic_arb = {
+ .name = "slv_pmic_arb",
+ .id = MSM8996_SLAVE_PMIC_ARB,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 59
+};
+
+static struct qcom_icc_node slv_prng = {
+ .name = "slv_prng",
+ .id = MSM8996_SLAVE_PRNG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 127,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_dcc_cfg = {
+ .name = "slv_dcc_cfg",
+ .id = MSM8996_SLAVE_DCC_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 155,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_rbcpr_mx = {
+ .name = "slv_rbcpr_mx",
+ .id = MSM8996_SLAVE_RBCPR_MX,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 170,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_qdss_cfg = {
+ .name = "slv_qdss_cfg",
+ .id = MSM8996_SLAVE_QDSS_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 63,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_rbcpr_cx = {
+ .name = "slv_rbcpr_cx",
+ .id = MSM8996_SLAVE_RBCPR_CX,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 169,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_cpu_apu_cfg = {
+ .name = "slv_cpu_apu_cfg",
+ .id = MSM8996_SLAVE_QDSS_RBCPR_APU_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 168,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static const u16 slv_cnoc_mnoc_cfg_links[] = {
+ MSM8996_MASTER_CNOC_MNOC_CFG
+};
+
+static struct qcom_icc_node slv_cnoc_mnoc_cfg = {
+ .name = "slv_cnoc_mnoc_cfg",
+ .id = MSM8996_SLAVE_CNOC_MNOC_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 66,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(slv_cnoc_mnoc_cfg_links),
+ .links = slv_cnoc_mnoc_cfg_links
+};
+
+static struct qcom_icc_node slv_snoc_cfg = {
+ .name = "slv_snoc_cfg",
+ .id = MSM8996_SLAVE_SNOC_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 70,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_snoc_mpu_cfg = {
+ .name = "slv_snoc_mpu_cfg",
+ .id = MSM8996_SLAVE_SNOC_MPU_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 67,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_ebi1_phy_cfg = {
+ .name = "slv_ebi1_phy_cfg",
+ .id = MSM8996_SLAVE_EBI1_PHY_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 73,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_a0noc_cfg = {
+ .name = "slv_a0noc_cfg",
+ .id = MSM8996_SLAVE_A0NOC_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 144,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_pcie_1_cfg = {
+ .name = "slv_pcie_1_cfg",
+ .id = MSM8996_SLAVE_PCIE_1_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 89,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_pcie_2_cfg = {
+ .name = "slv_pcie_2_cfg",
+ .id = MSM8996_SLAVE_PCIE_2_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 165,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_pcie_0_cfg = {
+ .name = "slv_pcie_0_cfg",
+ .id = MSM8996_SLAVE_PCIE_0_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 88,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_pcie20_ahb2phy = {
+ .name = "slv_pcie20_ahb2phy",
+ .id = MSM8996_SLAVE_PCIE20_AHB2PHY,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 163,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_a0noc_mpu_cfg = {
+ .name = "slv_a0noc_mpu_cfg",
+ .id = MSM8996_SLAVE_A0NOC_MPU_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 145,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_ufs_cfg = {
+ .name = "slv_ufs_cfg",
+ .id = MSM8996_SLAVE_UFS_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 92,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_a1noc_cfg = {
+ .name = "slv_a1noc_cfg",
+ .id = MSM8996_SLAVE_A1NOC_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 147,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_a1noc_mpu_cfg = {
+ .name = "slv_a1noc_mpu_cfg",
+ .id = MSM8996_SLAVE_A1NOC_MPU_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 148,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_a2noc_cfg = {
+ .name = "slv_a2noc_cfg",
+ .id = MSM8996_SLAVE_A2NOC_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 150,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_a2noc_mpu_cfg = {
+ .name = "slv_a2noc_mpu_cfg",
+ .id = MSM8996_SLAVE_A2NOC_MPU_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 151,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_ssc_cfg = {
+ .name = "slv_ssc_cfg",
+ .id = MSM8996_SLAVE_SSC_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 177,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_a0noc_smmu_cfg = {
+ .name = "slv_a0noc_smmu_cfg",
+ .id = MSM8996_SLAVE_A0NOC_SMMU_CFG,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 146,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_a1noc_smmu_cfg = {
+ .name = "slv_a1noc_smmu_cfg",
+ .id = MSM8996_SLAVE_A1NOC_SMMU_CFG,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 149,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_a2noc_smmu_cfg = {
+ .name = "slv_a2noc_smmu_cfg",
+ .id = MSM8996_SLAVE_A2NOC_SMMU_CFG,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 152,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_lpass_smmu_cfg = {
+ .name = "slv_lpass_smmu_cfg",
+ .id = MSM8996_SLAVE_LPASS_SMMU_CFG,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 161,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static const u16 slv_cnoc_mnoc_mmss_cfg_links[] = {
+ MSM8996_MASTER_CNOC_MNOC_MMSS_CFG
+};
+
+static struct qcom_icc_node slv_cnoc_mnoc_mmss_cfg = {
+ .name = "slv_cnoc_mnoc_mmss_cfg",
+ .id = MSM8996_SLAVE_CNOC_MNOC_MMSS_CFG,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 58,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(slv_cnoc_mnoc_mmss_cfg_links),
+ .links = slv_cnoc_mnoc_mmss_cfg_links
+};
+
+static struct qcom_icc_node slv_mmagic_cfg = {
+ .name = "slv_mmagic_cfg",
+ .id = MSM8996_SLAVE_MMAGIC_CFG,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 162,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_cpr_cfg = {
+ .name = "slv_cpr_cfg",
+ .id = MSM8996_SLAVE_CPR_CFG,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 6,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_misc_cfg = {
+ .name = "slv_misc_cfg",
+ .id = MSM8996_SLAVE_MISC_CFG,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 8,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_venus_throttle_cfg = {
+ .name = "slv_venus_throttle_cfg",
+ .id = MSM8996_SLAVE_VENUS_THROTTLE_CFG,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 178,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_venus_cfg = {
+ .name = "slv_venus_cfg",
+ .id = MSM8996_SLAVE_VENUS_CFG,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 10,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_vmem_cfg = {
+ .name = "slv_vmem_cfg",
+ .id = MSM8996_SLAVE_VMEM_CFG,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 180,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_dsa_cfg = {
+ .name = "slv_dsa_cfg",
+ .id = MSM8996_SLAVE_DSA_CFG,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 157,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_mnoc_clocks_cfg = {
+ .name = "slv_mnoc_clocks_cfg",
+ .id = MSM8996_SLAVE_MMSS_CLK_CFG,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 12,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_dsa_mpu_cfg = {
+ .name = "slv_dsa_mpu_cfg",
+ .id = MSM8996_SLAVE_DSA_MPU_CFG,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 158,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_mnoc_mpu_cfg = {
+ .name = "slv_mnoc_mpu_cfg",
+ .id = MSM8996_SLAVE_MNOC_MPU_CFG,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 14,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_display_cfg = {
+ .name = "slv_display_cfg",
+ .id = MSM8996_SLAVE_DISPLAY_CFG,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 4,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_display_throttle_cfg = {
+ .name = "slv_display_throttle_cfg",
+ .id = MSM8996_SLAVE_DISPLAY_THROTTLE_CFG,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 156,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_camera_cfg = {
+ .name = "slv_camera_cfg",
+ .id = MSM8996_SLAVE_CAMERA_CFG,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 3,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_camera_throttle_cfg = {
+ .name = "slv_camera_throttle_cfg",
+ .id = MSM8996_SLAVE_CAMERA_THROTTLE_CFG,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 154,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_oxili_cfg = {
+ .name = "slv_oxili_cfg",
+ .id = MSM8996_SLAVE_GRAPHICS_3D_CFG,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 11,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_smmu_mdp_cfg = {
+ .name = "slv_smmu_mdp_cfg",
+ .id = MSM8996_SLAVE_SMMU_MDP_CFG,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 173,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_smmu_rot_cfg = {
+ .name = "slv_smmu_rot_cfg",
+ .id = MSM8996_SLAVE_SMMU_ROTATOR_CFG,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 174,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_smmu_venus_cfg = {
+ .name = "slv_smmu_venus_cfg",
+ .id = MSM8996_SLAVE_SMMU_VENUS_CFG,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 175,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_smmu_cpp_cfg = {
+ .name = "slv_smmu_cpp_cfg",
+ .id = MSM8996_SLAVE_SMMU_CPP_CFG,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 171,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_smmu_jpeg_cfg = {
+ .name = "slv_smmu_jpeg_cfg",
+ .id = MSM8996_SLAVE_SMMU_JPEG_CFG,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 172,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_smmu_vfe_cfg = {
+ .name = "slv_smmu_vfe_cfg",
+ .id = MSM8996_SLAVE_SMMU_VFE_CFG,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 176,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static const u16 slv_mnoc_bimc_links[] = {
+ MSM8996_MASTER_MNOC_BIMC
+};
+
+static struct qcom_icc_node slv_mnoc_bimc = {
+ .name = "slv_mnoc_bimc",
+ .id = MSM8996_SLAVE_MNOC_BIMC,
+ .buswidth = 32,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 16,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(slv_mnoc_bimc_links),
+ .links = slv_mnoc_bimc_links
+};
+
+static struct qcom_icc_node slv_vmem = {
+ .name = "slv_vmem",
+ .id = MSM8996_SLAVE_VMEM,
+ .buswidth = 32,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 179,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_srvc_mnoc = {
+ .name = "slv_srvc_mnoc",
+ .id = MSM8996_SLAVE_SERVICE_MNOC,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 17,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static const u16 slv_pnoc_a1noc_links[] = {
+ MSM8996_MASTER_PNOC_A1NOC
+};
+
+static struct qcom_icc_node slv_pnoc_a1noc = {
+ .name = "slv_pnoc_a1noc",
+ .id = MSM8996_SLAVE_PNOC_A1NOC,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 139,
+ .num_links = ARRAY_SIZE(slv_pnoc_a1noc_links),
+ .links = slv_pnoc_a1noc_links
+};
+
+static struct qcom_icc_node slv_usb_hs = {
+ .name = "slv_usb_hs",
+ .id = MSM8996_SLAVE_USB_HS,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 40
+};
+
+static struct qcom_icc_node slv_sdcc_2 = {
+ .name = "slv_sdcc_2",
+ .id = MSM8996_SLAVE_SDCC_2,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 33
+};
+
+static struct qcom_icc_node slv_sdcc_4 = {
+ .name = "slv_sdcc_4",
+ .id = MSM8996_SLAVE_SDCC_4,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 34
+};
+
+static struct qcom_icc_node slv_tsif = {
+ .name = "slv_tsif",
+ .id = MSM8996_SLAVE_TSIF,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 35
+};
+
+static struct qcom_icc_node slv_blsp_2 = {
+ .name = "slv_blsp_2",
+ .id = MSM8996_SLAVE_BLSP_2,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 37
+};
+
+static struct qcom_icc_node slv_sdcc_1 = {
+ .name = "slv_sdcc_1",
+ .id = MSM8996_SLAVE_SDCC_1,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 31
+};
+
+static struct qcom_icc_node slv_blsp_1 = {
+ .name = "slv_blsp_1",
+ .id = MSM8996_SLAVE_BLSP_1,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 39
+};
+
+static struct qcom_icc_node slv_pdm = {
+ .name = "slv_pdm",
+ .id = MSM8996_SLAVE_PDM,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 41
+};
+
+static struct qcom_icc_node slv_ahb2phy = {
+ .name = "slv_ahb2phy",
+ .id = MSM8996_SLAVE_AHB2PHY,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 153,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_hmss = {
+ .name = "slv_hmss",
+ .id = MSM8996_SLAVE_APPSS,
+ .buswidth = 16,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 20,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_lpass = {
+ .name = "slv_lpass",
+ .id = MSM8996_SLAVE_LPASS,
+ .buswidth = 16,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 21,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_usb3 = {
+ .name = "slv_usb3",
+ .id = MSM8996_SLAVE_USB3,
+ .buswidth = 16,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 22,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static const u16 slv_snoc_bimc_links[] = {
+ MSM8996_MASTER_SNOC_BIMC
+};
+
+static struct qcom_icc_node slv_snoc_bimc = {
+ .name = "slv_snoc_bimc",
+ .id = MSM8996_SLAVE_SNOC_BIMC,
+ .buswidth = 32,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 24,
+ .num_links = ARRAY_SIZE(slv_snoc_bimc_links),
+ .links = slv_snoc_bimc_links
+};
+
+static const u16 slv_snoc_cnoc_links[] = {
+ MSM8996_MASTER_SNOC_CNOC
+};
+
+static struct qcom_icc_node slv_snoc_cnoc = {
+ .name = "slv_snoc_cnoc",
+ .id = MSM8996_SLAVE_SNOC_CNOC,
+ .buswidth = 16,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 25,
+ .num_links = ARRAY_SIZE(slv_snoc_cnoc_links),
+ .links = slv_snoc_cnoc_links
+};
+
+static struct qcom_icc_node slv_imem = {
+ .name = "slv_imem",
+ .id = MSM8996_SLAVE_OCIMEM,
+ .buswidth = 16,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 26
+};
+
+static struct qcom_icc_node slv_pimem = {
+ .name = "slv_pimem",
+ .id = MSM8996_SLAVE_PIMEM,
+ .buswidth = 16,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 166
+};
+
+static const u16 slv_snoc_vmem_links[] = {
+ MSM8996_MASTER_SNOC_VMEM
+};
+
+static struct qcom_icc_node slv_snoc_vmem = {
+ .name = "slv_snoc_vmem",
+ .id = MSM8996_SLAVE_SNOC_VMEM,
+ .buswidth = 16,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 140,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(slv_snoc_vmem_links),
+ .links = slv_snoc_vmem_links
+};
+
+static const u16 slv_snoc_pnoc_links[] = {
+ MSM8996_MASTER_SNOC_PNOC
+};
+
+static struct qcom_icc_node slv_snoc_pnoc = {
+ .name = "slv_snoc_pnoc",
+ .id = MSM8996_SLAVE_SNOC_PNOC,
+ .buswidth = 16,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 28,
+ .num_links = ARRAY_SIZE(slv_snoc_pnoc_links),
+ .links = slv_snoc_pnoc_links
+};
+
+static struct qcom_icc_node slv_qdss_stm = {
+ .name = "slv_qdss_stm",
+ .id = MSM8996_SLAVE_QDSS_STM,
+ .buswidth = 16,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 30
+};
+
+static struct qcom_icc_node slv_pcie_0 = {
+ .name = "slv_pcie_0",
+ .id = MSM8996_SLAVE_PCIE_0,
+ .buswidth = 16,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 84,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_pcie_1 = {
+ .name = "slv_pcie_1",
+ .id = MSM8996_SLAVE_PCIE_1,
+ .buswidth = 16,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 85,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_pcie_2 = {
+ .name = "slv_pcie_2",
+ .id = MSM8996_SLAVE_PCIE_2,
+ .buswidth = 16,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 164,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node slv_srvc_snoc = {
+ .name = "slv_srvc_snoc",
+ .id = MSM8996_SLAVE_SERVICE_SNOC,
+ .buswidth = 16,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 29,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID
+};
+
+static struct qcom_icc_node *a0noc_nodes[] = {
+ [MASTER_PCIE_0] = &mas_pcie_0,
+ [MASTER_PCIE_1] = &mas_pcie_1,
+ [MASTER_PCIE_2] = &mas_pcie_2
+};
+
+static const struct regmap_config msm8996_a0noc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x9000,
+ .fast_io = true
+};
+
+static const struct qcom_icc_desc msm8996_a0noc = {
+ .type = QCOM_ICC_NOC,
+ .nodes = a0noc_nodes,
+ .num_nodes = ARRAY_SIZE(a0noc_nodes),
+ .clocks = bus_a0noc_clocks,
+ .num_clocks = ARRAY_SIZE(bus_a0noc_clocks),
+ .has_bus_pd = true,
+ .regmap_cfg = &msm8996_a0noc_regmap_config
+};
+
+static struct qcom_icc_node *a1noc_nodes[] = {
+ [MASTER_CNOC_A1NOC] = &mas_cnoc_a1noc,
+ [MASTER_CRYPTO_CORE0] = &mas_crypto_c0,
+ [MASTER_PNOC_A1NOC] = &mas_pnoc_a1noc
+};
+
+static const struct regmap_config msm8996_a1noc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x7000,
+ .fast_io = true
+};
+
+static const struct qcom_icc_desc msm8996_a1noc = {
+ .type = QCOM_ICC_NOC,
+ .nodes = a1noc_nodes,
+ .num_nodes = ARRAY_SIZE(a1noc_nodes),
+ .regmap_cfg = &msm8996_a1noc_regmap_config
+};
+
+static struct qcom_icc_node *a2noc_nodes[] = {
+ [MASTER_USB3] = &mas_usb3,
+ [MASTER_IPA] = &mas_ipa,
+ [MASTER_UFS] = &mas_ufs
+};
+
+static const struct regmap_config msm8996_a2noc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0xa000,
+ .fast_io = true
+};
+
+static const struct qcom_icc_desc msm8996_a2noc = {
+ .type = QCOM_ICC_NOC,
+ .nodes = a2noc_nodes,
+ .num_nodes = ARRAY_SIZE(a2noc_nodes),
+ .regmap_cfg = &msm8996_a2noc_regmap_config
+};
+
+static struct qcom_icc_node *bimc_nodes[] = {
+ [MASTER_AMPSS_M0] = &mas_apps_proc,
+ [MASTER_GRAPHICS_3D] = &mas_oxili,
+ [MASTER_MNOC_BIMC] = &mas_mnoc_bimc,
+ [MASTER_SNOC_BIMC] = &mas_snoc_bimc,
+ [SLAVE_EBI_CH0] = &slv_ebi,
+ [SLAVE_HMSS_L3] = &slv_hmss_l3,
+ [SLAVE_BIMC_SNOC_0] = &slv_bimc_snoc_0,
+ [SLAVE_BIMC_SNOC_1] = &slv_bimc_snoc_1
+};
+
+static const struct regmap_config msm8996_bimc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x62000,
+ .fast_io = true
+};
+
+static const struct qcom_icc_desc msm8996_bimc = {
+ .type = QCOM_ICC_BIMC,
+ .nodes = bimc_nodes,
+ .num_nodes = ARRAY_SIZE(bimc_nodes),
+ .regmap_cfg = &msm8996_bimc_regmap_config
+};
+
+static struct qcom_icc_node *cnoc_nodes[] = {
+ [MASTER_SNOC_CNOC] = &mas_snoc_cnoc,
+ [MASTER_QDSS_DAP] = &mas_qdss_dap,
+ [SLAVE_CNOC_A1NOC] = &slv_cnoc_a1noc,
+ [SLAVE_CLK_CTL] = &slv_clk_ctl,
+ [SLAVE_TCSR] = &slv_tcsr,
+ [SLAVE_TLMM] = &slv_tlmm,
+ [SLAVE_CRYPTO_0_CFG] = &slv_crypto0_cfg,
+ [SLAVE_MPM] = &slv_mpm,
+ [SLAVE_PIMEM_CFG] = &slv_pimem_cfg,
+ [SLAVE_IMEM_CFG] = &slv_imem_cfg,
+ [SLAVE_MESSAGE_RAM] = &slv_message_ram,
+ [SLAVE_BIMC_CFG] = &slv_bimc_cfg,
+ [SLAVE_PMIC_ARB] = &slv_pmic_arb,
+ [SLAVE_PRNG] = &slv_prng,
+ [SLAVE_DCC_CFG] = &slv_dcc_cfg,
+ [SLAVE_RBCPR_MX] = &slv_rbcpr_mx,
+ [SLAVE_QDSS_CFG] = &slv_qdss_cfg,
+ [SLAVE_RBCPR_CX] = &slv_rbcpr_cx,
+ [SLAVE_QDSS_RBCPR_APU] = &slv_cpu_apu_cfg,
+ [SLAVE_CNOC_MNOC_CFG] = &slv_cnoc_mnoc_cfg,
+ [SLAVE_SNOC_CFG] = &slv_snoc_cfg,
+ [SLAVE_SNOC_MPU_CFG] = &slv_snoc_mpu_cfg,
+ [SLAVE_EBI1_PHY_CFG] = &slv_ebi1_phy_cfg,
+ [SLAVE_A0NOC_CFG] = &slv_a0noc_cfg,
+ [SLAVE_PCIE_1_CFG] = &slv_pcie_1_cfg,
+ [SLAVE_PCIE_2_CFG] = &slv_pcie_2_cfg,
+ [SLAVE_PCIE_0_CFG] = &slv_pcie_0_cfg,
+ [SLAVE_PCIE20_AHB2PHY] = &slv_pcie20_ahb2phy,
+ [SLAVE_A0NOC_MPU_CFG] = &slv_a0noc_mpu_cfg,
+ [SLAVE_UFS_CFG] = &slv_ufs_cfg,
+ [SLAVE_A1NOC_CFG] = &slv_a1noc_cfg,
+ [SLAVE_A1NOC_MPU_CFG] = &slv_a1noc_mpu_cfg,
+ [SLAVE_A2NOC_CFG] = &slv_a2noc_cfg,
+ [SLAVE_A2NOC_MPU_CFG] = &slv_a2noc_mpu_cfg,
+ [SLAVE_SSC_CFG] = &slv_ssc_cfg,
+ [SLAVE_A0NOC_SMMU_CFG] = &slv_a0noc_smmu_cfg,
+ [SLAVE_A1NOC_SMMU_CFG] = &slv_a1noc_smmu_cfg,
+ [SLAVE_A2NOC_SMMU_CFG] = &slv_a2noc_smmu_cfg,
+ [SLAVE_LPASS_SMMU_CFG] = &slv_lpass_smmu_cfg,
+ [SLAVE_CNOC_MNOC_MMSS_CFG] = &slv_cnoc_mnoc_mmss_cfg
+};
+
+static const struct regmap_config msm8996_cnoc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x1000,
+ .fast_io = true
+};
+
+static const struct qcom_icc_desc msm8996_cnoc = {
+ .type = QCOM_ICC_NOC,
+ .nodes = cnoc_nodes,
+ .num_nodes = ARRAY_SIZE(cnoc_nodes),
+ .regmap_cfg = &msm8996_cnoc_regmap_config
+};
+
+static struct qcom_icc_node *mnoc_nodes[] = {
+ [MASTER_CNOC_MNOC_CFG] = &mas_cnoc_mnoc_cfg,
+ [MASTER_CPP] = &mas_cpp,
+ [MASTER_JPEG] = &mas_jpeg,
+ [MASTER_MDP_PORT0] = &mas_mdp_p0,
+ [MASTER_MDP_PORT1] = &mas_mdp_p1,
+ [MASTER_ROTATOR] = &mas_rotator,
+ [MASTER_VIDEO_P0] = &mas_venus,
+ [MASTER_VFE] = &mas_vfe,
+ [MASTER_SNOC_VMEM] = &mas_snoc_vmem,
+ [MASTER_VIDEO_P0_OCMEM] = &mas_venus_vmem,
+ [MASTER_CNOC_MNOC_MMSS_CFG] = &mas_cnoc_mnoc_mmss_cfg,
+ [SLAVE_MNOC_BIMC] = &slv_mnoc_bimc,
+ [SLAVE_VMEM] = &slv_vmem,
+ [SLAVE_SERVICE_MNOC] = &slv_srvc_mnoc,
+ [SLAVE_MMAGIC_CFG] = &slv_mmagic_cfg,
+ [SLAVE_CPR_CFG] = &slv_cpr_cfg,
+ [SLAVE_MISC_CFG] = &slv_misc_cfg,
+ [SLAVE_VENUS_THROTTLE_CFG] = &slv_venus_throttle_cfg,
+ [SLAVE_VENUS_CFG] = &slv_venus_cfg,
+ [SLAVE_VMEM_CFG] = &slv_vmem_cfg,
+ [SLAVE_DSA_CFG] = &slv_dsa_cfg,
+ [SLAVE_MMSS_CLK_CFG] = &slv_mnoc_clocks_cfg,
+ [SLAVE_DSA_MPU_CFG] = &slv_dsa_mpu_cfg,
+ [SLAVE_MNOC_MPU_CFG] = &slv_mnoc_mpu_cfg,
+ [SLAVE_DISPLAY_CFG] = &slv_display_cfg,
+ [SLAVE_DISPLAY_THROTTLE_CFG] = &slv_display_throttle_cfg,
+ [SLAVE_CAMERA_CFG] = &slv_camera_cfg,
+ [SLAVE_CAMERA_THROTTLE_CFG] = &slv_camera_throttle_cfg,
+ [SLAVE_GRAPHICS_3D_CFG] = &slv_oxili_cfg,
+ [SLAVE_SMMU_MDP_CFG] = &slv_smmu_mdp_cfg,
+ [SLAVE_SMMU_ROT_CFG] = &slv_smmu_rot_cfg,
+ [SLAVE_SMMU_VENUS_CFG] = &slv_smmu_venus_cfg,
+ [SLAVE_SMMU_CPP_CFG] = &slv_smmu_cpp_cfg,
+ [SLAVE_SMMU_JPEG_CFG] = &slv_smmu_jpeg_cfg,
+ [SLAVE_SMMU_VFE_CFG] = &slv_smmu_vfe_cfg
+};
+
+static const struct regmap_config msm8996_mnoc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x20000,
+ .fast_io = true
+};
+
+static const struct qcom_icc_desc msm8996_mnoc = {
+ .type = QCOM_ICC_NOC,
+ .nodes = mnoc_nodes,
+ .num_nodes = ARRAY_SIZE(mnoc_nodes),
+ .clocks = bus_mm_clocks,
+ .num_clocks = ARRAY_SIZE(bus_mm_clocks),
+ .regmap_cfg = &msm8996_mnoc_regmap_config
+};
+
+static struct qcom_icc_node *pnoc_nodes[] = {
+ [MASTER_SNOC_PNOC] = &mas_snoc_pnoc,
+ [MASTER_SDCC_1] = &mas_sdcc_1,
+ [MASTER_SDCC_2] = &mas_sdcc_2,
+ [MASTER_SDCC_4] = &mas_sdcc_4,
+ [MASTER_USB_HS] = &mas_usb_hs,
+ [MASTER_BLSP_1] = &mas_blsp_1,
+ [MASTER_BLSP_2] = &mas_blsp_2,
+ [MASTER_TSIF] = &mas_tsif,
+ [SLAVE_PNOC_A1NOC] = &slv_pnoc_a1noc,
+ [SLAVE_USB_HS] = &slv_usb_hs,
+ [SLAVE_SDCC_2] = &slv_sdcc_2,
+ [SLAVE_SDCC_4] = &slv_sdcc_4,
+ [SLAVE_TSIF] = &slv_tsif,
+ [SLAVE_BLSP_2] = &slv_blsp_2,
+ [SLAVE_SDCC_1] = &slv_sdcc_1,
+ [SLAVE_BLSP_1] = &slv_blsp_1,
+ [SLAVE_PDM] = &slv_pdm,
+ [SLAVE_AHB2PHY] = &slv_ahb2phy
+};
+
+static const struct regmap_config msm8996_pnoc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x3000,
+ .fast_io = true
+};
+
+static const struct qcom_icc_desc msm8996_pnoc = {
+ .type = QCOM_ICC_NOC,
+ .nodes = pnoc_nodes,
+ .num_nodes = ARRAY_SIZE(pnoc_nodes),
+ .regmap_cfg = &msm8996_pnoc_regmap_config
+};
+
+static struct qcom_icc_node *snoc_nodes[] = {
+ [MASTER_HMSS] = &mas_hmss,
+ [MASTER_QDSS_BAM] = &mas_qdss_bam,
+ [MASTER_SNOC_CFG] = &mas_snoc_cfg,
+ [MASTER_BIMC_SNOC_0] = &mas_bimc_snoc_0,
+ [MASTER_BIMC_SNOC_1] = &mas_bimc_snoc_1,
+ [MASTER_A0NOC_SNOC] = &mas_a0noc_snoc,
+ [MASTER_A1NOC_SNOC] = &mas_a1noc_snoc,
+ [MASTER_A2NOC_SNOC] = &mas_a2noc_snoc,
+ [MASTER_QDSS_ETR] = &mas_qdss_etr,
+ [SLAVE_A0NOC_SNOC] = &slv_a0noc_snoc,
+ [SLAVE_A1NOC_SNOC] = &slv_a1noc_snoc,
+ [SLAVE_A2NOC_SNOC] = &slv_a2noc_snoc,
+ [SLAVE_HMSS] = &slv_hmss,
+ [SLAVE_LPASS] = &slv_lpass,
+ [SLAVE_USB3] = &slv_usb3,
+ [SLAVE_SNOC_BIMC] = &slv_snoc_bimc,
+ [SLAVE_SNOC_CNOC] = &slv_snoc_cnoc,
+ [SLAVE_IMEM] = &slv_imem,
+ [SLAVE_PIMEM] = &slv_pimem,
+ [SLAVE_SNOC_VMEM] = &slv_snoc_vmem,
+ [SLAVE_SNOC_PNOC] = &slv_snoc_pnoc,
+ [SLAVE_QDSS_STM] = &slv_qdss_stm,
+ [SLAVE_PCIE_0] = &slv_pcie_0,
+ [SLAVE_PCIE_1] = &slv_pcie_1,
+ [SLAVE_PCIE_2] = &slv_pcie_2,
+ [SLAVE_SERVICE_SNOC] = &slv_srvc_snoc
+};
+
+static const struct regmap_config msm8996_snoc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x20000,
+ .fast_io = true
+};
+
+static const struct qcom_icc_desc msm8996_snoc = {
+ .type = QCOM_ICC_NOC,
+ .nodes = snoc_nodes,
+ .num_nodes = ARRAY_SIZE(snoc_nodes),
+ .regmap_cfg = &msm8996_snoc_regmap_config
+};
+
+static const struct of_device_id qnoc_of_match[] = {
+ { .compatible = "qcom,msm8996-a0noc", .data = &msm8996_a0noc},
+ { .compatible = "qcom,msm8996-a1noc", .data = &msm8996_a1noc},
+ { .compatible = "qcom,msm8996-a2noc", .data = &msm8996_a2noc},
+ { .compatible = "qcom,msm8996-bimc", .data = &msm8996_bimc},
+ { .compatible = "qcom,msm8996-cnoc", .data = &msm8996_cnoc},
+ { .compatible = "qcom,msm8996-mnoc", .data = &msm8996_mnoc},
+ { .compatible = "qcom,msm8996-pnoc", .data = &msm8996_pnoc},
+ { .compatible = "qcom,msm8996-snoc", .data = &msm8996_snoc},
+ { }
+};
+MODULE_DEVICE_TABLE(of, qnoc_of_match);
+
+static struct platform_driver qnoc_driver = {
+ .probe = qnoc_probe,
+ .remove = qnoc_remove,
+ .driver = {
+ .name = "qnoc-msm8996",
+ .of_match_table = qnoc_of_match,
+ .sync_state = icc_sync_state,
+ }
+};
+module_platform_driver(qnoc_driver);
+
+MODULE_AUTHOR("Yassine Oudjana <y.oudjana@protonmail.com>");
+MODULE_DESCRIPTION("Qualcomm MSM8996 NoC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/interconnect/qcom/msm8996.h b/drivers/interconnect/qcom/msm8996.h
new file mode 100644
index 000000000000..42b54ffcaa7b
--- /dev/null
+++ b/drivers/interconnect/qcom/msm8996.h
@@ -0,0 +1,149 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Qualcomm MSM8996 interconnect IDs
+ *
+ * Copyright (c) 2021 Yassine Oudjana <y.oudjana@protonmail.com>
+ */
+
+#ifndef __DRIVERS_INTERCONNECT_QCOM_MSM8996_H__
+#define __DRIVERS_INTERCONNECT_QCOM_MSM8996_H__
+
+#define MSM8996_MASTER_PCIE_0 1
+#define MSM8996_MASTER_PCIE_1 2
+#define MSM8996_MASTER_PCIE_2 3
+#define MSM8996_MASTER_CNOC_A1NOC 4
+#define MSM8996_MASTER_CRYPTO_CORE0 5
+#define MSM8996_MASTER_PNOC_A1NOC 6
+#define MSM8996_MASTER_USB3 7
+#define MSM8996_MASTER_IPA 8
+#define MSM8996_MASTER_UFS 9
+#define MSM8996_MASTER_AMPSS_M0 10
+#define MSM8996_MASTER_GRAPHICS_3D 11
+#define MSM8996_MASTER_MNOC_BIMC 12
+#define MSM8996_MASTER_SNOC_BIMC 13
+#define MSM8996_MASTER_SNOC_CNOC 14
+#define MSM8996_MASTER_QDSS_DAP 15
+#define MSM8996_MASTER_CNOC_MNOC_MMSS_CFG 16
+#define MSM8996_MASTER_CNOC_MNOC_CFG 17
+#define MSM8996_MASTER_CPP 18
+#define MSM8996_MASTER_JPEG 19
+#define MSM8996_MASTER_MDP_PORT0 20
+#define MSM8996_MASTER_MDP_PORT1 21
+#define MSM8996_MASTER_ROTATOR 22
+#define MSM8996_MASTER_VIDEO_P0 23
+#define MSM8996_MASTER_VFE 24
+#define MSM8996_MASTER_SNOC_VMEM 25
+#define MSM8996_MASTER_VIDEO_P0_OCMEM 26
+#define MSM8996_MASTER_SNOC_PNOC 27
+#define MSM8996_MASTER_SDCC_1 28
+#define MSM8996_MASTER_SDCC_2 29
+#define MSM8996_MASTER_SDCC_4 30
+#define MSM8996_MASTER_USB_HS 31
+#define MSM8996_MASTER_BLSP_1 32
+#define MSM8996_MASTER_BLSP_2 33
+#define MSM8996_MASTER_TSIF 34
+#define MSM8996_MASTER_HMSS 35
+#define MSM8996_MASTER_QDSS_BAM 36
+#define MSM8996_MASTER_SNOC_CFG 37
+#define MSM8996_MASTER_BIMC_SNOC_0 38
+#define MSM8996_MASTER_BIMC_SNOC_1 39
+#define MSM8996_MASTER_A0NOC_SNOC 40
+#define MSM8996_MASTER_A1NOC_SNOC 41
+#define MSM8996_MASTER_A2NOC_SNOC 42
+#define MSM8996_MASTER_QDSS_ETR 43
+
+#define MSM8996_SLAVE_A0NOC_SNOC 44
+#define MSM8996_SLAVE_A1NOC_SNOC 45
+#define MSM8996_SLAVE_A2NOC_SNOC 46
+#define MSM8996_SLAVE_EBI_CH0 47
+#define MSM8996_SLAVE_HMSS_L3 48
+#define MSM8996_SLAVE_BIMC_SNOC_0 49
+#define MSM8996_SLAVE_BIMC_SNOC_1 50
+#define MSM8996_SLAVE_CNOC_A1NOC 51
+#define MSM8996_SLAVE_CLK_CTL 52
+#define MSM8996_SLAVE_TCSR 53
+#define MSM8996_SLAVE_TLMM 54
+#define MSM8996_SLAVE_CRYPTO_0_CFG 55
+#define MSM8996_SLAVE_MPM 56
+#define MSM8996_SLAVE_PIMEM_CFG 57
+#define MSM8996_SLAVE_IMEM_CFG 58
+#define MSM8996_SLAVE_MESSAGE_RAM 59
+#define MSM8996_SLAVE_BIMC_CFG 60
+#define MSM8996_SLAVE_PMIC_ARB 61
+#define MSM8996_SLAVE_PRNG 62
+#define MSM8996_SLAVE_DCC_CFG 63
+#define MSM8996_SLAVE_RBCPR_MX 64
+#define MSM8996_SLAVE_QDSS_CFG 65
+#define MSM8996_SLAVE_RBCPR_CX 66
+#define MSM8996_SLAVE_QDSS_RBCPR_APU_CFG 67
+#define MSM8996_SLAVE_CNOC_MNOC_CFG 68
+#define MSM8996_SLAVE_SNOC_CFG 69
+#define MSM8996_SLAVE_SNOC_MPU_CFG 70
+#define MSM8996_SLAVE_EBI1_PHY_CFG 71
+#define MSM8996_SLAVE_A0NOC_CFG 72
+#define MSM8996_SLAVE_PCIE_1_CFG 73
+#define MSM8996_SLAVE_PCIE_2_CFG 74
+#define MSM8996_SLAVE_PCIE_0_CFG 75
+#define MSM8996_SLAVE_PCIE20_AHB2PHY 76
+#define MSM8996_SLAVE_A0NOC_MPU_CFG 77
+#define MSM8996_SLAVE_UFS_CFG 78
+#define MSM8996_SLAVE_A1NOC_CFG 79
+#define MSM8996_SLAVE_A1NOC_MPU_CFG 80
+#define MSM8996_SLAVE_A2NOC_CFG 81
+#define MSM8996_SLAVE_A2NOC_MPU_CFG 82
+#define MSM8996_SLAVE_SSC_CFG 83
+#define MSM8996_SLAVE_A0NOC_SMMU_CFG 84
+#define MSM8996_SLAVE_A1NOC_SMMU_CFG 85
+#define MSM8996_SLAVE_A2NOC_SMMU_CFG 86
+#define MSM8996_SLAVE_LPASS_SMMU_CFG 87
+#define MSM8996_SLAVE_CNOC_MNOC_MMSS_CFG 88
+#define MSM8996_SLAVE_MMAGIC_CFG 89
+#define MSM8996_SLAVE_CPR_CFG 90
+#define MSM8996_SLAVE_MISC_CFG 91
+#define MSM8996_SLAVE_VENUS_THROTTLE_CFG 92
+#define MSM8996_SLAVE_VENUS_CFG 93
+#define MSM8996_SLAVE_VMEM_CFG 94
+#define MSM8996_SLAVE_DSA_CFG 95
+#define MSM8996_SLAVE_MMSS_CLK_CFG 96
+#define MSM8996_SLAVE_DSA_MPU_CFG 97
+#define MSM8996_SLAVE_MNOC_MPU_CFG 98
+#define MSM8996_SLAVE_DISPLAY_CFG 99
+#define MSM8996_SLAVE_DISPLAY_THROTTLE_CFG 100
+#define MSM8996_SLAVE_CAMERA_CFG 101
+#define MSM8996_SLAVE_CAMERA_THROTTLE_CFG 102
+#define MSM8996_SLAVE_GRAPHICS_3D_CFG 103
+#define MSM8996_SLAVE_SMMU_MDP_CFG 104
+#define MSM8996_SLAVE_SMMU_ROTATOR_CFG 105
+#define MSM8996_SLAVE_SMMU_VENUS_CFG 106
+#define MSM8996_SLAVE_SMMU_CPP_CFG 107
+#define MSM8996_SLAVE_SMMU_JPEG_CFG 108
+#define MSM8996_SLAVE_SMMU_VFE_CFG 109
+#define MSM8996_SLAVE_MNOC_BIMC 110
+#define MSM8996_SLAVE_VMEM 111
+#define MSM8996_SLAVE_SERVICE_MNOC 112
+#define MSM8996_SLAVE_PNOC_A1NOC 113
+#define MSM8996_SLAVE_USB_HS 114
+#define MSM8996_SLAVE_SDCC_2 115
+#define MSM8996_SLAVE_SDCC_4 116
+#define MSM8996_SLAVE_TSIF 117
+#define MSM8996_SLAVE_BLSP_2 118
+#define MSM8996_SLAVE_SDCC_1 119
+#define MSM8996_SLAVE_BLSP_1 120
+#define MSM8996_SLAVE_PDM 121
+#define MSM8996_SLAVE_AHB2PHY 122
+#define MSM8996_SLAVE_APPSS 123
+#define MSM8996_SLAVE_LPASS 124
+#define MSM8996_SLAVE_USB3 125
+#define MSM8996_SLAVE_SNOC_BIMC 126
+#define MSM8996_SLAVE_SNOC_CNOC 127
+#define MSM8996_SLAVE_OCIMEM 128
+#define MSM8996_SLAVE_PIMEM 129
+#define MSM8996_SLAVE_SNOC_VMEM 130
+#define MSM8996_SLAVE_SNOC_PNOC 131
+#define MSM8996_SLAVE_QDSS_STM 132
+#define MSM8996_SLAVE_PCIE_0 133
+#define MSM8996_SLAVE_PCIE_1 134
+#define MSM8996_SLAVE_PCIE_2 135
+#define MSM8996_SLAVE_SERVICE_SNOC 136
+
+#endif /* __DRIVERS_INTERCONNECT_QCOM_MSM8996_H__ */
diff --git a/drivers/interconnect/qcom/osm-l3.c b/drivers/interconnect/qcom/osm-l3.c
index c7af143980de..eec13099a6a3 100644
--- a/drivers/interconnect/qcom/osm-l3.c
+++ b/drivers/interconnect/qcom/osm-l3.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
*/
#include <linux/bitfield.h>
@@ -15,6 +15,7 @@
#include <dt-bindings/interconnect/qcom,osm-l3.h>
#include "sc7180.h"
+#include "sc7280.h"
#include "sc8180x.h"
#include "sdm845.h"
#include "sm8150.h"
@@ -114,6 +115,22 @@ static const struct qcom_osm_l3_desc sc7180_icc_osm_l3 = {
.reg_perf_state = OSM_REG_PERF_STATE,
};
+DEFINE_QNODE(sc7280_epss_apps_l3, SC7280_MASTER_EPSS_L3_APPS, 32, SC7280_SLAVE_EPSS_L3);
+DEFINE_QNODE(sc7280_epss_l3, SC7280_SLAVE_EPSS_L3, 32);
+
+static const struct qcom_osm_l3_node *sc7280_epss_l3_nodes[] = {
+ [MASTER_EPSS_L3_APPS] = &sc7280_epss_apps_l3,
+ [SLAVE_EPSS_L3_SHARED] = &sc7280_epss_l3,
+};
+
+static const struct qcom_osm_l3_desc sc7280_icc_epss_l3 = {
+ .nodes = sc7280_epss_l3_nodes,
+ .num_nodes = ARRAY_SIZE(sc7280_epss_l3_nodes),
+ .lut_row_size = EPSS_LUT_ROW_SIZE,
+ .reg_freq_lut = EPSS_REG_FREQ_LUT,
+ .reg_perf_state = EPSS_REG_PERF_STATE,
+};
+
DEFINE_QNODE(sc8180x_osm_apps_l3, SC8180X_MASTER_OSM_L3_APPS, 32, SC8180X_SLAVE_OSM_L3);
DEFINE_QNODE(sc8180x_osm_l3, SC8180X_SLAVE_OSM_L3, 32);
@@ -326,6 +343,7 @@ err:
static const struct of_device_id osm_l3_of_match[] = {
{ .compatible = "qcom,sc7180-osm-l3", .data = &sc7180_icc_osm_l3 },
+ { .compatible = "qcom,sc7280-epss-l3", .data = &sc7280_icc_epss_l3 },
{ .compatible = "qcom,sdm845-osm-l3", .data = &sdm845_icc_osm_l3 },
{ .compatible = "qcom,sm8150-osm-l3", .data = &sm8150_icc_osm_l3 },
{ .compatible = "qcom,sc8180x-osm-l3", .data = &sc8180x_icc_osm_l3 },
diff --git a/drivers/interconnect/qcom/qcm2290.c b/drivers/interconnect/qcom/qcm2290.c
new file mode 100644
index 000000000000..74404e0b2080
--- /dev/null
+++ b/drivers/interconnect/qcom/qcm2290.c
@@ -0,0 +1,1363 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Qualcomm QCM2290 Network-on-Chip (NoC) QoS driver
+ *
+ * Copyright (c) 2021, Linaro Ltd.
+ *
+ */
+
+#include <dt-bindings/interconnect/qcom,qcm2290.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/interconnect-provider.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#include "icc-rpm.h"
+#include "smd-rpm.h"
+
+enum {
+ QCM2290_MASTER_APPSS_PROC = 1,
+ QCM2290_MASTER_SNOC_BIMC_RT,
+ QCM2290_MASTER_SNOC_BIMC_NRT,
+ QCM2290_MASTER_SNOC_BIMC,
+ QCM2290_MASTER_TCU_0,
+ QCM2290_MASTER_GFX3D,
+ QCM2290_MASTER_SNOC_CNOC,
+ QCM2290_MASTER_QDSS_DAP,
+ QCM2290_MASTER_CRYPTO_CORE0,
+ QCM2290_MASTER_SNOC_CFG,
+ QCM2290_MASTER_TIC,
+ QCM2290_MASTER_ANOC_SNOC,
+ QCM2290_MASTER_BIMC_SNOC,
+ QCM2290_MASTER_PIMEM,
+ QCM2290_MASTER_QDSS_BAM,
+ QCM2290_MASTER_QUP_0,
+ QCM2290_MASTER_IPA,
+ QCM2290_MASTER_QDSS_ETR,
+ QCM2290_MASTER_SDCC_1,
+ QCM2290_MASTER_SDCC_2,
+ QCM2290_MASTER_QPIC,
+ QCM2290_MASTER_USB3_0,
+ QCM2290_MASTER_QUP_CORE_0,
+ QCM2290_MASTER_CAMNOC_SF,
+ QCM2290_MASTER_VIDEO_P0,
+ QCM2290_MASTER_VIDEO_PROC,
+ QCM2290_MASTER_CAMNOC_HF,
+ QCM2290_MASTER_MDP0,
+
+ QCM2290_SLAVE_EBI1,
+ QCM2290_SLAVE_BIMC_SNOC,
+ QCM2290_SLAVE_BIMC_CFG,
+ QCM2290_SLAVE_CAMERA_NRT_THROTTLE_CFG,
+ QCM2290_SLAVE_CAMERA_RT_THROTTLE_CFG,
+ QCM2290_SLAVE_CAMERA_CFG,
+ QCM2290_SLAVE_CLK_CTL,
+ QCM2290_SLAVE_CRYPTO_0_CFG,
+ QCM2290_SLAVE_DISPLAY_CFG,
+ QCM2290_SLAVE_DISPLAY_THROTTLE_CFG,
+ QCM2290_SLAVE_GPU_CFG,
+ QCM2290_SLAVE_HWKM,
+ QCM2290_SLAVE_IMEM_CFG,
+ QCM2290_SLAVE_IPA_CFG,
+ QCM2290_SLAVE_LPASS,
+ QCM2290_SLAVE_MESSAGE_RAM,
+ QCM2290_SLAVE_PDM,
+ QCM2290_SLAVE_PIMEM_CFG,
+ QCM2290_SLAVE_PKA_WRAPPER,
+ QCM2290_SLAVE_PMIC_ARB,
+ QCM2290_SLAVE_PRNG,
+ QCM2290_SLAVE_QDSS_CFG,
+ QCM2290_SLAVE_QM_CFG,
+ QCM2290_SLAVE_QM_MPU_CFG,
+ QCM2290_SLAVE_QPIC,
+ QCM2290_SLAVE_QUP_0,
+ QCM2290_SLAVE_SDCC_1,
+ QCM2290_SLAVE_SDCC_2,
+ QCM2290_SLAVE_SNOC_CFG,
+ QCM2290_SLAVE_TCSR,
+ QCM2290_SLAVE_USB3,
+ QCM2290_SLAVE_VENUS_CFG,
+ QCM2290_SLAVE_VENUS_THROTTLE_CFG,
+ QCM2290_SLAVE_VSENSE_CTRL_CFG,
+ QCM2290_SLAVE_SERVICE_CNOC,
+ QCM2290_SLAVE_APPSS,
+ QCM2290_SLAVE_SNOC_CNOC,
+ QCM2290_SLAVE_IMEM,
+ QCM2290_SLAVE_PIMEM,
+ QCM2290_SLAVE_SNOC_BIMC,
+ QCM2290_SLAVE_SERVICE_SNOC,
+ QCM2290_SLAVE_QDSS_STM,
+ QCM2290_SLAVE_TCU,
+ QCM2290_SLAVE_ANOC_SNOC,
+ QCM2290_SLAVE_QUP_CORE_0,
+ QCM2290_SLAVE_SNOC_BIMC_NRT,
+ QCM2290_SLAVE_SNOC_BIMC_RT,
+};
+
+/* Master nodes */
+static const u16 mas_appss_proc_links[] = {
+ QCM2290_SLAVE_EBI1,
+ QCM2290_SLAVE_BIMC_SNOC,
+};
+
+static struct qcom_icc_node mas_appss_proc = {
+ .id = QCM2290_MASTER_APPSS_PROC,
+ .name = "mas_apps_proc",
+ .buswidth = 16,
+ .qos.ap_owned = true,
+ .qos.qos_port = 0,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.prio_level = 0,
+ .qos.areq_prio = 0,
+ .mas_rpm_id = 0,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_appss_proc_links),
+ .links = mas_appss_proc_links,
+};
+
+static const u16 mas_snoc_bimc_rt_links[] = {
+ QCM2290_SLAVE_EBI1,
+};
+
+static struct qcom_icc_node mas_snoc_bimc_rt = {
+ .id = QCM2290_MASTER_SNOC_BIMC_RT,
+ .name = "mas_snoc_bimc_rt",
+ .buswidth = 16,
+ .qos.ap_owned = true,
+ .qos.qos_port = 2,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .mas_rpm_id = 163,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_snoc_bimc_rt_links),
+ .links = mas_snoc_bimc_rt_links,
+};
+
+static const u16 mas_snoc_bimc_nrt_links[] = {
+ QCM2290_SLAVE_EBI1,
+};
+
+static struct qcom_icc_node mas_snoc_bimc_nrt = {
+ .id = QCM2290_MASTER_SNOC_BIMC_NRT,
+ .name = "mas_snoc_bimc_nrt",
+ .buswidth = 16,
+ .qos.ap_owned = true,
+ .qos.qos_port = 2,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .mas_rpm_id = 163,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_snoc_bimc_nrt_links),
+ .links = mas_snoc_bimc_nrt_links,
+};
+
+static const u16 mas_snoc_bimc_links[] = {
+ QCM2290_SLAVE_EBI1,
+};
+
+static struct qcom_icc_node mas_snoc_bimc = {
+ .id = QCM2290_MASTER_SNOC_BIMC,
+ .name = "mas_snoc_bimc",
+ .buswidth = 16,
+ .qos.ap_owned = true,
+ .qos.qos_port = 2,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .mas_rpm_id = 164,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_snoc_bimc_links),
+ .links = mas_snoc_bimc_links,
+};
+
+static const u16 mas_tcu_0_links[] = {
+ QCM2290_SLAVE_EBI1,
+ QCM2290_SLAVE_BIMC_SNOC,
+};
+
+static struct qcom_icc_node mas_tcu_0 = {
+ .id = QCM2290_MASTER_TCU_0,
+ .name = "mas_tcu_0",
+ .buswidth = 8,
+ .qos.ap_owned = true,
+ .qos.qos_port = 4,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.prio_level = 6,
+ .qos.areq_prio = 6,
+ .mas_rpm_id = 102,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_tcu_0_links),
+ .links = mas_tcu_0_links,
+};
+
+static const u16 mas_snoc_cnoc_links[] = {
+ QCM2290_SLAVE_CAMERA_RT_THROTTLE_CFG,
+ QCM2290_SLAVE_SDCC_2,
+ QCM2290_SLAVE_SDCC_1,
+ QCM2290_SLAVE_QM_CFG,
+ QCM2290_SLAVE_BIMC_CFG,
+ QCM2290_SLAVE_USB3,
+ QCM2290_SLAVE_QM_MPU_CFG,
+ QCM2290_SLAVE_CAMERA_NRT_THROTTLE_CFG,
+ QCM2290_SLAVE_QDSS_CFG,
+ QCM2290_SLAVE_PDM,
+ QCM2290_SLAVE_IPA_CFG,
+ QCM2290_SLAVE_DISPLAY_THROTTLE_CFG,
+ QCM2290_SLAVE_TCSR,
+ QCM2290_SLAVE_MESSAGE_RAM,
+ QCM2290_SLAVE_PMIC_ARB,
+ QCM2290_SLAVE_LPASS,
+ QCM2290_SLAVE_DISPLAY_CFG,
+ QCM2290_SLAVE_VENUS_CFG,
+ QCM2290_SLAVE_GPU_CFG,
+ QCM2290_SLAVE_IMEM_CFG,
+ QCM2290_SLAVE_SNOC_CFG,
+ QCM2290_SLAVE_SERVICE_CNOC,
+ QCM2290_SLAVE_VENUS_THROTTLE_CFG,
+ QCM2290_SLAVE_PKA_WRAPPER,
+ QCM2290_SLAVE_HWKM,
+ QCM2290_SLAVE_PRNG,
+ QCM2290_SLAVE_VSENSE_CTRL_CFG,
+ QCM2290_SLAVE_CRYPTO_0_CFG,
+ QCM2290_SLAVE_PIMEM_CFG,
+ QCM2290_SLAVE_QUP_0,
+ QCM2290_SLAVE_CAMERA_CFG,
+ QCM2290_SLAVE_CLK_CTL,
+ QCM2290_SLAVE_QPIC,
+};
+
+static struct qcom_icc_node mas_snoc_cnoc = {
+ .id = QCM2290_MASTER_SNOC_CNOC,
+ .name = "mas_snoc_cnoc",
+ .buswidth = 8,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .mas_rpm_id = 52,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_snoc_cnoc_links),
+ .links = mas_snoc_cnoc_links,
+};
+
+static const u16 mas_qdss_dap_links[] = {
+ QCM2290_SLAVE_CAMERA_RT_THROTTLE_CFG,
+ QCM2290_SLAVE_SDCC_2,
+ QCM2290_SLAVE_SDCC_1,
+ QCM2290_SLAVE_QM_CFG,
+ QCM2290_SLAVE_BIMC_CFG,
+ QCM2290_SLAVE_USB3,
+ QCM2290_SLAVE_QM_MPU_CFG,
+ QCM2290_SLAVE_CAMERA_NRT_THROTTLE_CFG,
+ QCM2290_SLAVE_QDSS_CFG,
+ QCM2290_SLAVE_PDM,
+ QCM2290_SLAVE_IPA_CFG,
+ QCM2290_SLAVE_DISPLAY_THROTTLE_CFG,
+ QCM2290_SLAVE_TCSR,
+ QCM2290_SLAVE_MESSAGE_RAM,
+ QCM2290_SLAVE_PMIC_ARB,
+ QCM2290_SLAVE_LPASS,
+ QCM2290_SLAVE_DISPLAY_CFG,
+ QCM2290_SLAVE_VENUS_CFG,
+ QCM2290_SLAVE_GPU_CFG,
+ QCM2290_SLAVE_IMEM_CFG,
+ QCM2290_SLAVE_SNOC_CFG,
+ QCM2290_SLAVE_SERVICE_CNOC,
+ QCM2290_SLAVE_VENUS_THROTTLE_CFG,
+ QCM2290_SLAVE_PKA_WRAPPER,
+ QCM2290_SLAVE_HWKM,
+ QCM2290_SLAVE_PRNG,
+ QCM2290_SLAVE_VSENSE_CTRL_CFG,
+ QCM2290_SLAVE_CRYPTO_0_CFG,
+ QCM2290_SLAVE_PIMEM_CFG,
+ QCM2290_SLAVE_QUP_0,
+ QCM2290_SLAVE_CAMERA_CFG,
+ QCM2290_SLAVE_CLK_CTL,
+ QCM2290_SLAVE_QPIC,
+};
+
+static struct qcom_icc_node mas_qdss_dap = {
+ .id = QCM2290_MASTER_QDSS_DAP,
+ .name = "mas_qdss_dap",
+ .buswidth = 8,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .mas_rpm_id = 49,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_qdss_dap_links),
+ .links = mas_qdss_dap_links,
+};
+
+static const u16 mas_crypto_core0_links[] = {
+ QCM2290_SLAVE_ANOC_SNOC
+};
+
+static struct qcom_icc_node mas_crypto_core0 = {
+ .id = QCM2290_MASTER_CRYPTO_CORE0,
+ .name = "mas_crypto_core0",
+ .buswidth = 8,
+ .qos.ap_owned = true,
+ .qos.qos_port = 22,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 2,
+ .mas_rpm_id = 23,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_crypto_core0_links),
+ .links = mas_crypto_core0_links,
+};
+
+static const u16 mas_qup_core_0_links[] = {
+ QCM2290_SLAVE_QUP_CORE_0,
+};
+
+static struct qcom_icc_node mas_qup_core_0 = {
+ .id = QCM2290_MASTER_QUP_CORE_0,
+ .name = "mas_qup_core_0",
+ .buswidth = 4,
+ .mas_rpm_id = 170,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_qup_core_0_links),
+ .links = mas_qup_core_0_links,
+};
+
+static const u16 mas_camnoc_sf_links[] = {
+ QCM2290_SLAVE_SNOC_BIMC_NRT,
+};
+
+static struct qcom_icc_node mas_camnoc_sf = {
+ .id = QCM2290_MASTER_CAMNOC_SF,
+ .name = "mas_camnoc_sf",
+ .buswidth = 32,
+ .qos.ap_owned = true,
+ .qos.qos_port = 4,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 3,
+ .mas_rpm_id = 172,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_camnoc_sf_links),
+ .links = mas_camnoc_sf_links,
+};
+
+static const u16 mas_camnoc_hf_links[] = {
+ QCM2290_SLAVE_SNOC_BIMC_RT,
+};
+
+static struct qcom_icc_node mas_camnoc_hf = {
+ .id = QCM2290_MASTER_CAMNOC_HF,
+ .name = "mas_camnoc_hf",
+ .buswidth = 32,
+ .qos.ap_owned = true,
+ .qos.qos_port = 10,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 3,
+ .qos.urg_fwd_en = true,
+ .mas_rpm_id = 173,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_camnoc_hf_links),
+ .links = mas_camnoc_hf_links,
+};
+
+static const u16 mas_mdp0_links[] = {
+ QCM2290_SLAVE_SNOC_BIMC_RT,
+};
+
+static struct qcom_icc_node mas_mdp0 = {
+ .id = QCM2290_MASTER_MDP0,
+ .name = "mas_mdp0",
+ .buswidth = 16,
+ .qos.ap_owned = true,
+ .qos.qos_port = 5,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 3,
+ .qos.urg_fwd_en = true,
+ .mas_rpm_id = 8,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_mdp0_links),
+ .links = mas_mdp0_links,
+};
+
+static const u16 mas_video_p0_links[] = {
+ QCM2290_SLAVE_SNOC_BIMC_NRT,
+};
+
+static struct qcom_icc_node mas_video_p0 = {
+ .id = QCM2290_MASTER_VIDEO_P0,
+ .name = "mas_video_p0",
+ .buswidth = 16,
+ .qos.ap_owned = true,
+ .qos.qos_port = 9,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 3,
+ .qos.urg_fwd_en = true,
+ .mas_rpm_id = 9,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_video_p0_links),
+ .links = mas_video_p0_links,
+};
+
+static const u16 mas_video_proc_links[] = {
+ QCM2290_SLAVE_SNOC_BIMC_NRT,
+};
+
+static struct qcom_icc_node mas_video_proc = {
+ .id = QCM2290_MASTER_VIDEO_PROC,
+ .name = "mas_video_proc",
+ .buswidth = 8,
+ .qos.ap_owned = true,
+ .qos.qos_port = 13,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 4,
+ .mas_rpm_id = 168,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_video_proc_links),
+ .links = mas_video_proc_links,
+};
+
+static const u16 mas_snoc_cfg_links[] = {
+ QCM2290_SLAVE_SERVICE_SNOC,
+};
+
+static struct qcom_icc_node mas_snoc_cfg = {
+ .id = QCM2290_MASTER_SNOC_CFG,
+ .name = "mas_snoc_cfg",
+ .buswidth = 4,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .mas_rpm_id = 20,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_snoc_cfg_links),
+ .links = mas_snoc_cfg_links,
+};
+
+static const u16 mas_tic_links[] = {
+ QCM2290_SLAVE_PIMEM,
+ QCM2290_SLAVE_IMEM,
+ QCM2290_SLAVE_APPSS,
+ QCM2290_SLAVE_SNOC_BIMC,
+ QCM2290_SLAVE_SNOC_CNOC,
+ QCM2290_SLAVE_TCU,
+ QCM2290_SLAVE_QDSS_STM,
+};
+
+static struct qcom_icc_node mas_tic = {
+ .id = QCM2290_MASTER_TIC,
+ .name = "mas_tic",
+ .buswidth = 4,
+ .qos.ap_owned = true,
+ .qos.qos_port = 8,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 2,
+ .mas_rpm_id = 51,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_tic_links),
+ .links = mas_tic_links,
+};
+
+static const u16 mas_anoc_snoc_links[] = {
+ QCM2290_SLAVE_PIMEM,
+ QCM2290_SLAVE_IMEM,
+ QCM2290_SLAVE_APPSS,
+ QCM2290_SLAVE_SNOC_BIMC,
+ QCM2290_SLAVE_SNOC_CNOC,
+ QCM2290_SLAVE_TCU,
+ QCM2290_SLAVE_QDSS_STM,
+};
+
+static struct qcom_icc_node mas_anoc_snoc = {
+ .id = QCM2290_MASTER_ANOC_SNOC,
+ .name = "mas_anoc_snoc",
+ .buswidth = 16,
+ .mas_rpm_id = 110,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_anoc_snoc_links),
+ .links = mas_anoc_snoc_links,
+};
+
+static const u16 mas_bimc_snoc_links[] = {
+ QCM2290_SLAVE_PIMEM,
+ QCM2290_SLAVE_IMEM,
+ QCM2290_SLAVE_APPSS,
+ QCM2290_SLAVE_SNOC_CNOC,
+ QCM2290_SLAVE_TCU,
+ QCM2290_SLAVE_QDSS_STM,
+};
+
+static struct qcom_icc_node mas_bimc_snoc = {
+ .id = QCM2290_MASTER_BIMC_SNOC,
+ .name = "mas_bimc_snoc",
+ .buswidth = 8,
+ .mas_rpm_id = 21,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_bimc_snoc_links),
+ .links = mas_bimc_snoc_links,
+};
+
+static const u16 mas_pimem_links[] = {
+ QCM2290_SLAVE_IMEM,
+ QCM2290_SLAVE_SNOC_BIMC,
+};
+
+static struct qcom_icc_node mas_pimem = {
+ .id = QCM2290_MASTER_PIMEM,
+ .name = "mas_pimem",
+ .buswidth = 8,
+ .qos.ap_owned = true,
+ .qos.qos_port = 20,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 2,
+ .mas_rpm_id = 113,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_pimem_links),
+ .links = mas_pimem_links,
+};
+
+static const u16 mas_qdss_bam_links[] = {
+ QCM2290_SLAVE_ANOC_SNOC,
+};
+
+static struct qcom_icc_node mas_qdss_bam = {
+ .id = QCM2290_MASTER_QDSS_BAM,
+ .name = "mas_qdss_bam",
+ .buswidth = 4,
+ .qos.ap_owned = true,
+ .qos.qos_port = 2,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 2,
+ .mas_rpm_id = 19,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_qdss_bam_links),
+ .links = mas_qdss_bam_links,
+};
+
+static const u16 mas_qup_0_links[] = {
+ QCM2290_SLAVE_ANOC_SNOC,
+};
+
+static struct qcom_icc_node mas_qup_0 = {
+ .id = QCM2290_MASTER_QUP_0,
+ .name = "mas_qup_0",
+ .buswidth = 4,
+ .qos.ap_owned = true,
+ .qos.qos_port = 0,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 2,
+ .mas_rpm_id = 166,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_qup_0_links),
+ .links = mas_qup_0_links,
+};
+
+static const u16 mas_ipa_links[] = {
+ QCM2290_SLAVE_ANOC_SNOC,
+};
+
+static struct qcom_icc_node mas_ipa = {
+ .id = QCM2290_MASTER_IPA,
+ .name = "mas_ipa",
+ .buswidth = 8,
+ .qos.ap_owned = true,
+ .qos.qos_port = 3,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 2,
+ .mas_rpm_id = 59,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_ipa_links),
+ .links = mas_ipa_links,
+};
+
+static const u16 mas_qdss_etr_links[] = {
+ QCM2290_SLAVE_ANOC_SNOC,
+};
+
+static struct qcom_icc_node mas_qdss_etr = {
+ .id = QCM2290_MASTER_QDSS_ETR,
+ .name = "mas_qdss_etr",
+ .buswidth = 8,
+ .qos.ap_owned = true,
+ .qos.qos_port = 12,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 2,
+ .mas_rpm_id = 31,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_qdss_etr_links),
+ .links = mas_qdss_etr_links,
+};
+
+static const u16 mas_sdcc_1_links[] = {
+ QCM2290_SLAVE_ANOC_SNOC,
+};
+
+static struct qcom_icc_node mas_sdcc_1 = {
+ .id = QCM2290_MASTER_SDCC_1,
+ .name = "mas_sdcc_1",
+ .buswidth = 8,
+ .qos.ap_owned = true,
+ .qos.qos_port = 17,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 2,
+ .mas_rpm_id = 33,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_sdcc_1_links),
+ .links = mas_sdcc_1_links,
+};
+
+static const u16 mas_sdcc_2_links[] = {
+ QCM2290_SLAVE_ANOC_SNOC,
+};
+
+static struct qcom_icc_node mas_sdcc_2 = {
+ .id = QCM2290_MASTER_SDCC_2,
+ .name = "mas_sdcc_2",
+ .buswidth = 8,
+ .qos.ap_owned = true,
+ .qos.qos_port = 23,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 2,
+ .mas_rpm_id = 35,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_sdcc_2_links),
+ .links = mas_sdcc_2_links,
+};
+
+static const u16 mas_qpic_links[] = {
+ QCM2290_SLAVE_ANOC_SNOC,
+};
+
+static struct qcom_icc_node mas_qpic = {
+ .id = QCM2290_MASTER_QPIC,
+ .name = "mas_qpic",
+ .buswidth = 4,
+ .qos.ap_owned = true,
+ .qos.qos_port = 1,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 2,
+ .mas_rpm_id = 58,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_qpic_links),
+ .links = mas_qpic_links,
+};
+
+static const u16 mas_usb3_0_links[] = {
+ QCM2290_SLAVE_ANOC_SNOC,
+};
+
+static struct qcom_icc_node mas_usb3_0 = {
+ .id = QCM2290_MASTER_USB3_0,
+ .name = "mas_usb3_0",
+ .buswidth = 8,
+ .qos.ap_owned = true,
+ .qos.qos_port = 24,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 2,
+ .mas_rpm_id = 32,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_usb3_0_links),
+ .links = mas_usb3_0_links,
+};
+
+static const u16 mas_gfx3d_links[] = {
+ QCM2290_SLAVE_EBI1,
+};
+
+static struct qcom_icc_node mas_gfx3d = {
+ .id = QCM2290_MASTER_GFX3D,
+ .name = "mas_gfx3d",
+ .buswidth = 32,
+ .qos.ap_owned = true,
+ .qos.qos_port = 1,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.prio_level = 0,
+ .qos.areq_prio = 0,
+ .mas_rpm_id = 6,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_gfx3d_links),
+ .links = mas_gfx3d_links,
+};
+
+/* Slave nodes */
+static struct qcom_icc_node slv_ebi1 = {
+ .name = "slv_ebi1",
+ .id = QCM2290_SLAVE_EBI1,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 0,
+};
+
+static const u16 slv_bimc_snoc_links[] = {
+ QCM2290_MASTER_BIMC_SNOC,
+};
+
+static struct qcom_icc_node slv_bimc_snoc = {
+ .name = "slv_bimc_snoc",
+ .id = QCM2290_SLAVE_BIMC_SNOC,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 2,
+ .num_links = ARRAY_SIZE(slv_bimc_snoc_links),
+ .links = slv_bimc_snoc_links,
+};
+
+static struct qcom_icc_node slv_bimc_cfg = {
+ .name = "slv_bimc_cfg",
+ .id = QCM2290_SLAVE_BIMC_CFG,
+ .buswidth = 4,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 56,
+};
+
+static struct qcom_icc_node slv_camera_nrt_throttle_cfg = {
+ .name = "slv_camera_nrt_throttle_cfg",
+ .id = QCM2290_SLAVE_CAMERA_NRT_THROTTLE_CFG,
+ .buswidth = 4,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 271,
+};
+
+static struct qcom_icc_node slv_camera_rt_throttle_cfg = {
+ .name = "slv_camera_rt_throttle_cfg",
+ .id = QCM2290_SLAVE_CAMERA_RT_THROTTLE_CFG,
+ .buswidth = 4,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 279,
+};
+
+static struct qcom_icc_node slv_camera_cfg = {
+ .name = "slv_camera_cfg",
+ .id = QCM2290_SLAVE_CAMERA_CFG,
+ .buswidth = 4,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 3,
+};
+
+static struct qcom_icc_node slv_clk_ctl = {
+ .name = "slv_clk_ctl",
+ .id = QCM2290_SLAVE_CLK_CTL,
+ .buswidth = 4,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 47,
+};
+
+static struct qcom_icc_node slv_crypto_0_cfg = {
+ .name = "slv_crypto_0_cfg",
+ .id = QCM2290_SLAVE_CRYPTO_0_CFG,
+ .buswidth = 4,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 52,
+};
+
+static struct qcom_icc_node slv_display_cfg = {
+ .name = "slv_display_cfg",
+ .id = QCM2290_SLAVE_DISPLAY_CFG,
+ .buswidth = 4,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 4,
+};
+
+static struct qcom_icc_node slv_display_throttle_cfg = {
+ .name = "slv_display_throttle_cfg",
+ .id = QCM2290_SLAVE_DISPLAY_THROTTLE_CFG,
+ .buswidth = 4,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 156,
+};
+
+static struct qcom_icc_node slv_gpu_cfg = {
+ .name = "slv_gpu_cfg",
+ .id = QCM2290_SLAVE_GPU_CFG,
+ .buswidth = 8,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 275,
+};
+
+static struct qcom_icc_node slv_hwkm = {
+ .name = "slv_hwkm",
+ .id = QCM2290_SLAVE_HWKM,
+ .buswidth = 4,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 280,
+};
+
+static struct qcom_icc_node slv_imem_cfg = {
+ .name = "slv_imem_cfg",
+ .id = QCM2290_SLAVE_IMEM_CFG,
+ .buswidth = 4,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 54,
+};
+
+static struct qcom_icc_node slv_ipa_cfg = {
+ .name = "slv_ipa_cfg",
+ .id = QCM2290_SLAVE_IPA_CFG,
+ .buswidth = 4,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 183,
+};
+
+static struct qcom_icc_node slv_lpass = {
+ .name = "slv_lpass",
+ .id = QCM2290_SLAVE_LPASS,
+ .buswidth = 4,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 21,
+};
+
+static struct qcom_icc_node slv_message_ram = {
+ .name = "slv_message_ram",
+ .id = QCM2290_SLAVE_MESSAGE_RAM,
+ .buswidth = 4,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 55,
+};
+
+static struct qcom_icc_node slv_pdm = {
+ .name = "slv_pdm",
+ .id = QCM2290_SLAVE_PDM,
+ .buswidth = 4,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 41,
+};
+
+static struct qcom_icc_node slv_pimem_cfg = {
+ .name = "slv_pimem_cfg",
+ .id = QCM2290_SLAVE_PIMEM_CFG,
+ .buswidth = 4,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 167,
+};
+
+static struct qcom_icc_node slv_pka_wrapper = {
+ .name = "slv_pka_wrapper",
+ .id = QCM2290_SLAVE_PKA_WRAPPER,
+ .buswidth = 4,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 281,
+};
+
+static struct qcom_icc_node slv_pmic_arb = {
+ .name = "slv_pmic_arb",
+ .id = QCM2290_SLAVE_PMIC_ARB,
+ .buswidth = 4,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 59,
+};
+
+static struct qcom_icc_node slv_prng = {
+ .name = "slv_prng",
+ .id = QCM2290_SLAVE_PRNG,
+ .buswidth = 4,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 44,
+};
+
+static struct qcom_icc_node slv_qdss_cfg = {
+ .name = "slv_qdss_cfg",
+ .id = QCM2290_SLAVE_QDSS_CFG,
+ .buswidth = 4,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 63,
+};
+
+static struct qcom_icc_node slv_qm_cfg = {
+ .name = "slv_qm_cfg",
+ .id = QCM2290_SLAVE_QM_CFG,
+ .buswidth = 4,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 212,
+};
+
+static struct qcom_icc_node slv_qm_mpu_cfg = {
+ .name = "slv_qm_mpu_cfg",
+ .id = QCM2290_SLAVE_QM_MPU_CFG,
+ .buswidth = 4,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 231,
+};
+
+static struct qcom_icc_node slv_qpic = {
+ .name = "slv_qpic",
+ .id = QCM2290_SLAVE_QPIC,
+ .buswidth = 4,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 80,
+};
+
+static struct qcom_icc_node slv_qup_0 = {
+ .name = "slv_qup_0",
+ .id = QCM2290_SLAVE_QUP_0,
+ .buswidth = 4,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 261,
+};
+
+static struct qcom_icc_node slv_sdcc_1 = {
+ .name = "slv_sdcc_1",
+ .id = QCM2290_SLAVE_SDCC_1,
+ .buswidth = 4,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 31,
+};
+
+static struct qcom_icc_node slv_sdcc_2 = {
+ .name = "slv_sdcc_2",
+ .id = QCM2290_SLAVE_SDCC_2,
+ .buswidth = 4,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 33,
+};
+
+static const u16 slv_snoc_cfg_links[] = {
+ QCM2290_MASTER_SNOC_CFG,
+};
+
+static struct qcom_icc_node slv_snoc_cfg = {
+ .name = "slv_snoc_cfg",
+ .id = QCM2290_SLAVE_SNOC_CFG,
+ .buswidth = 4,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 70,
+ .num_links = ARRAY_SIZE(slv_snoc_cfg_links),
+ .links = slv_snoc_cfg_links,
+};
+
+static struct qcom_icc_node slv_tcsr = {
+ .name = "slv_tcsr",
+ .id = QCM2290_SLAVE_TCSR,
+ .buswidth = 4,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 50,
+};
+
+static struct qcom_icc_node slv_usb3 = {
+ .name = "slv_usb3",
+ .id = QCM2290_SLAVE_USB3,
+ .buswidth = 4,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 22,
+};
+
+static struct qcom_icc_node slv_venus_cfg = {
+ .name = "slv_venus_cfg",
+ .id = QCM2290_SLAVE_VENUS_CFG,
+ .buswidth = 4,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 10,
+};
+
+static struct qcom_icc_node slv_venus_throttle_cfg = {
+ .name = "slv_venus_throttle_cfg",
+ .id = QCM2290_SLAVE_VENUS_THROTTLE_CFG,
+ .buswidth = 4,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 178,
+};
+
+static struct qcom_icc_node slv_vsense_ctrl_cfg = {
+ .name = "slv_vsense_ctrl_cfg",
+ .id = QCM2290_SLAVE_VSENSE_CTRL_CFG,
+ .buswidth = 4,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 263,
+};
+
+static struct qcom_icc_node slv_service_cnoc = {
+ .name = "slv_service_cnoc",
+ .id = QCM2290_SLAVE_SERVICE_CNOC,
+ .buswidth = 4,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 76,
+};
+
+static struct qcom_icc_node slv_qup_core_0 = {
+ .name = "slv_qup_core_0",
+ .id = QCM2290_SLAVE_QUP_CORE_0,
+ .buswidth = 4,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 264,
+};
+
+static const u16 slv_snoc_bimc_nrt_links[] = {
+ QCM2290_MASTER_SNOC_BIMC_NRT,
+};
+
+static struct qcom_icc_node slv_snoc_bimc_nrt = {
+ .name = "slv_snoc_bimc_nrt",
+ .id = QCM2290_SLAVE_SNOC_BIMC_NRT,
+ .buswidth = 16,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 259,
+ .num_links = ARRAY_SIZE(slv_snoc_bimc_nrt_links),
+ .links = slv_snoc_bimc_nrt_links,
+};
+
+static const u16 slv_snoc_bimc_rt_links[] = {
+ QCM2290_MASTER_SNOC_BIMC_RT,
+};
+
+static struct qcom_icc_node slv_snoc_bimc_rt = {
+ .name = "slv_snoc_bimc_rt",
+ .id = QCM2290_SLAVE_SNOC_BIMC_RT,
+ .buswidth = 16,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 260,
+ .num_links = ARRAY_SIZE(slv_snoc_bimc_rt_links),
+ .links = slv_snoc_bimc_rt_links,
+};
+
+static struct qcom_icc_node slv_appss = {
+ .name = "slv_appss",
+ .id = QCM2290_SLAVE_APPSS,
+ .buswidth = 8,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 20,
+};
+
+static const u16 slv_snoc_cnoc_links[] = {
+ QCM2290_MASTER_SNOC_CNOC,
+};
+
+static struct qcom_icc_node slv_snoc_cnoc = {
+ .name = "slv_snoc_cnoc",
+ .id = QCM2290_SLAVE_SNOC_CNOC,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 25,
+ .num_links = ARRAY_SIZE(slv_snoc_cnoc_links),
+ .links = slv_snoc_cnoc_links,
+};
+
+static struct qcom_icc_node slv_imem = {
+ .name = "slv_imem",
+ .id = QCM2290_SLAVE_IMEM,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 26,
+};
+
+static struct qcom_icc_node slv_pimem = {
+ .name = "slv_pimem",
+ .id = QCM2290_SLAVE_PIMEM,
+ .buswidth = 8,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 166,
+};
+
+static const u16 slv_snoc_bimc_links[] = {
+ QCM2290_MASTER_SNOC_BIMC,
+};
+
+static struct qcom_icc_node slv_snoc_bimc = {
+ .name = "slv_snoc_bimc",
+ .id = QCM2290_SLAVE_SNOC_BIMC,
+ .buswidth = 16,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 24,
+ .num_links = ARRAY_SIZE(slv_snoc_bimc_links),
+ .links = slv_snoc_bimc_links,
+};
+
+static struct qcom_icc_node slv_service_snoc = {
+ .name = "slv_service_snoc",
+ .id = QCM2290_SLAVE_SERVICE_SNOC,
+ .buswidth = 4,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 29,
+};
+
+static struct qcom_icc_node slv_qdss_stm = {
+ .name = "slv_qdss_stm",
+ .id = QCM2290_SLAVE_QDSS_STM,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 30,
+};
+
+static struct qcom_icc_node slv_tcu = {
+ .name = "slv_tcu",
+ .id = QCM2290_SLAVE_TCU,
+ .buswidth = 8,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 133,
+};
+
+static const u16 slv_anoc_snoc_links[] = {
+ QCM2290_MASTER_ANOC_SNOC,
+};
+
+static struct qcom_icc_node slv_anoc_snoc = {
+ .name = "slv_anoc_snoc",
+ .id = QCM2290_SLAVE_ANOC_SNOC,
+ .buswidth = 16,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 141,
+ .num_links = ARRAY_SIZE(slv_anoc_snoc_links),
+ .links = slv_anoc_snoc_links,
+};
+
+/* NoC descriptors */
+static struct qcom_icc_node *qcm2290_bimc_nodes[] = {
+ [MASTER_APPSS_PROC] = &mas_appss_proc,
+ [MASTER_SNOC_BIMC_RT] = &mas_snoc_bimc_rt,
+ [MASTER_SNOC_BIMC_NRT] = &mas_snoc_bimc_nrt,
+ [MASTER_SNOC_BIMC] = &mas_snoc_bimc,
+ [MASTER_TCU_0] = &mas_tcu_0,
+ [MASTER_GFX3D] = &mas_gfx3d,
+ [SLAVE_EBI1] = &slv_ebi1,
+ [SLAVE_BIMC_SNOC] = &slv_bimc_snoc,
+};
+
+static const struct regmap_config qcm2290_bimc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x80000,
+ .fast_io = true,
+};
+
+static struct qcom_icc_desc qcm2290_bimc = {
+ .type = QCOM_ICC_BIMC,
+ .nodes = qcm2290_bimc_nodes,
+ .num_nodes = ARRAY_SIZE(qcm2290_bimc_nodes),
+ .regmap_cfg = &qcm2290_bimc_regmap_config,
+ /* M_REG_BASE() in vendor msm_bus_bimc_adhoc driver */
+ .qos_offset = 0x8000,
+};
+
+static struct qcom_icc_node *qcm2290_cnoc_nodes[] = {
+ [MASTER_SNOC_CNOC] = &mas_snoc_cnoc,
+ [MASTER_QDSS_DAP] = &mas_qdss_dap,
+ [SLAVE_BIMC_CFG] = &slv_bimc_cfg,
+ [SLAVE_CAMERA_NRT_THROTTLE_CFG] = &slv_camera_nrt_throttle_cfg,
+ [SLAVE_CAMERA_RT_THROTTLE_CFG] = &slv_camera_rt_throttle_cfg,
+ [SLAVE_CAMERA_CFG] = &slv_camera_cfg,
+ [SLAVE_CLK_CTL] = &slv_clk_ctl,
+ [SLAVE_CRYPTO_0_CFG] = &slv_crypto_0_cfg,
+ [SLAVE_DISPLAY_CFG] = &slv_display_cfg,
+ [SLAVE_DISPLAY_THROTTLE_CFG] = &slv_display_throttle_cfg,
+ [SLAVE_GPU_CFG] = &slv_gpu_cfg,
+ [SLAVE_HWKM] = &slv_hwkm,
+ [SLAVE_IMEM_CFG] = &slv_imem_cfg,
+ [SLAVE_IPA_CFG] = &slv_ipa_cfg,
+ [SLAVE_LPASS] = &slv_lpass,
+ [SLAVE_MESSAGE_RAM] = &slv_message_ram,
+ [SLAVE_PDM] = &slv_pdm,
+ [SLAVE_PIMEM_CFG] = &slv_pimem_cfg,
+ [SLAVE_PKA_WRAPPER] = &slv_pka_wrapper,
+ [SLAVE_PMIC_ARB] = &slv_pmic_arb,
+ [SLAVE_PRNG] = &slv_prng,
+ [SLAVE_QDSS_CFG] = &slv_qdss_cfg,
+ [SLAVE_QM_CFG] = &slv_qm_cfg,
+ [SLAVE_QM_MPU_CFG] = &slv_qm_mpu_cfg,
+ [SLAVE_QPIC] = &slv_qpic,
+ [SLAVE_QUP_0] = &slv_qup_0,
+ [SLAVE_SDCC_1] = &slv_sdcc_1,
+ [SLAVE_SDCC_2] = &slv_sdcc_2,
+ [SLAVE_SNOC_CFG] = &slv_snoc_cfg,
+ [SLAVE_TCSR] = &slv_tcsr,
+ [SLAVE_USB3] = &slv_usb3,
+ [SLAVE_VENUS_CFG] = &slv_venus_cfg,
+ [SLAVE_VENUS_THROTTLE_CFG] = &slv_venus_throttle_cfg,
+ [SLAVE_VSENSE_CTRL_CFG] = &slv_vsense_ctrl_cfg,
+ [SLAVE_SERVICE_CNOC] = &slv_service_cnoc,
+};
+
+static const struct regmap_config qcm2290_cnoc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x8200,
+ .fast_io = true,
+};
+
+static struct qcom_icc_desc qcm2290_cnoc = {
+ .type = QCOM_ICC_NOC,
+ .nodes = qcm2290_cnoc_nodes,
+ .num_nodes = ARRAY_SIZE(qcm2290_cnoc_nodes),
+ .regmap_cfg = &qcm2290_cnoc_regmap_config,
+};
+
+static struct qcom_icc_node *qcm2290_snoc_nodes[] = {
+ [MASTER_CRYPTO_CORE0] = &mas_crypto_core0,
+ [MASTER_SNOC_CFG] = &mas_snoc_cfg,
+ [MASTER_TIC] = &mas_tic,
+ [MASTER_ANOC_SNOC] = &mas_anoc_snoc,
+ [MASTER_BIMC_SNOC] = &mas_bimc_snoc,
+ [MASTER_PIMEM] = &mas_pimem,
+ [MASTER_QDSS_BAM] = &mas_qdss_bam,
+ [MASTER_QUP_0] = &mas_qup_0,
+ [MASTER_IPA] = &mas_ipa,
+ [MASTER_QDSS_ETR] = &mas_qdss_etr,
+ [MASTER_SDCC_1] = &mas_sdcc_1,
+ [MASTER_SDCC_2] = &mas_sdcc_2,
+ [MASTER_QPIC] = &mas_qpic,
+ [MASTER_USB3_0] = &mas_usb3_0,
+ [SLAVE_APPSS] = &slv_appss,
+ [SLAVE_SNOC_CNOC] = &slv_snoc_cnoc,
+ [SLAVE_IMEM] = &slv_imem,
+ [SLAVE_PIMEM] = &slv_pimem,
+ [SLAVE_SNOC_BIMC] = &slv_snoc_bimc,
+ [SLAVE_SERVICE_SNOC] = &slv_service_snoc,
+ [SLAVE_QDSS_STM] = &slv_qdss_stm,
+ [SLAVE_TCU] = &slv_tcu,
+ [SLAVE_ANOC_SNOC] = &slv_anoc_snoc,
+};
+
+static const struct regmap_config qcm2290_snoc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x60200,
+ .fast_io = true,
+};
+
+static struct qcom_icc_desc qcm2290_snoc = {
+ .type = QCOM_ICC_QNOC,
+ .nodes = qcm2290_snoc_nodes,
+ .num_nodes = ARRAY_SIZE(qcm2290_snoc_nodes),
+ .regmap_cfg = &qcm2290_snoc_regmap_config,
+ /* Vendor DT node fab-sys_noc property 'qcom,base-offset' */
+ .qos_offset = 0x15000,
+};
+
+static struct qcom_icc_node *qcm2290_qup_virt_nodes[] = {
+ [MASTER_QUP_CORE_0] = &mas_qup_core_0,
+ [SLAVE_QUP_CORE_0] = &slv_qup_core_0
+};
+
+static struct qcom_icc_desc qcm2290_qup_virt = {
+ .type = QCOM_ICC_QNOC,
+ .nodes = qcm2290_qup_virt_nodes,
+ .num_nodes = ARRAY_SIZE(qcm2290_qup_virt_nodes),
+};
+
+static struct qcom_icc_node *qcm2290_mmnrt_virt_nodes[] = {
+ [MASTER_CAMNOC_SF] = &mas_camnoc_sf,
+ [MASTER_VIDEO_P0] = &mas_video_p0,
+ [MASTER_VIDEO_PROC] = &mas_video_proc,
+ [SLAVE_SNOC_BIMC_NRT] = &slv_snoc_bimc_nrt,
+};
+
+static struct qcom_icc_desc qcm2290_mmnrt_virt = {
+ .type = QCOM_ICC_QNOC,
+ .nodes = qcm2290_mmnrt_virt_nodes,
+ .num_nodes = ARRAY_SIZE(qcm2290_mmnrt_virt_nodes),
+ .regmap_cfg = &qcm2290_snoc_regmap_config,
+ .qos_offset = 0x15000,
+};
+
+static struct qcom_icc_node *qcm2290_mmrt_virt_nodes[] = {
+ [MASTER_CAMNOC_HF] = &mas_camnoc_hf,
+ [MASTER_MDP0] = &mas_mdp0,
+ [SLAVE_SNOC_BIMC_RT] = &slv_snoc_bimc_rt,
+};
+
+static struct qcom_icc_desc qcm2290_mmrt_virt = {
+ .type = QCOM_ICC_QNOC,
+ .nodes = qcm2290_mmrt_virt_nodes,
+ .num_nodes = ARRAY_SIZE(qcm2290_mmrt_virt_nodes),
+ .regmap_cfg = &qcm2290_snoc_regmap_config,
+ .qos_offset = 0x15000,
+};
+
+static const struct of_device_id qcm2290_noc_of_match[] = {
+ { .compatible = "qcom,qcm2290-bimc", .data = &qcm2290_bimc },
+ { .compatible = "qcom,qcm2290-cnoc", .data = &qcm2290_cnoc },
+ { .compatible = "qcom,qcm2290-snoc", .data = &qcm2290_snoc },
+ { .compatible = "qcom,qcm2290-qup-virt", .data = &qcm2290_qup_virt },
+ { .compatible = "qcom,qcm2290-mmrt-virt", .data = &qcm2290_mmrt_virt },
+ { .compatible = "qcom,qcm2290-mmnrt-virt", .data = &qcm2290_mmnrt_virt },
+ { },
+};
+MODULE_DEVICE_TABLE(of, qcm2290_noc_of_match);
+
+static struct platform_driver qcm2290_noc_driver = {
+ .probe = qnoc_probe,
+ .remove = qnoc_remove,
+ .driver = {
+ .name = "qnoc-qcm2290",
+ .of_match_table = qcm2290_noc_of_match,
+ },
+};
+module_platform_driver(qcm2290_noc_driver);
+
+MODULE_DESCRIPTION("Qualcomm QCM2290 NoC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/interconnect/qcom/sc7280.h b/drivers/interconnect/qcom/sc7280.h
index 175e400305c5..1fb9839b2c14 100644
--- a/drivers/interconnect/qcom/sc7280.h
+++ b/drivers/interconnect/qcom/sc7280.h
@@ -150,5 +150,7 @@
#define SC7280_SLAVE_PCIE_1 139
#define SC7280_SLAVE_QDSS_STM 140
#define SC7280_SLAVE_TCU 141
+#define SC7280_MASTER_EPSS_L3_APPS 142
+#define SC7280_SLAVE_EPSS_L3 143
#endif
diff --git a/drivers/interconnect/qcom/sdm660.c b/drivers/interconnect/qcom/sdm660.c
index 471bb88f8828..274a7139fe1a 100644
--- a/drivers/interconnect/qcom/sdm660.c
+++ b/drivers/interconnect/qcom/sdm660.c
@@ -1513,6 +1513,7 @@ static const struct regmap_config sdm660_a2noc_regmap_config = {
};
static struct qcom_icc_desc sdm660_a2noc = {
+ .type = QCOM_ICC_NOC,
.nodes = sdm660_a2noc_nodes,
.num_nodes = ARRAY_SIZE(sdm660_a2noc_nodes),
.clocks = bus_a2noc_clocks,
@@ -1540,9 +1541,9 @@ static const struct regmap_config sdm660_bimc_regmap_config = {
};
static struct qcom_icc_desc sdm660_bimc = {
+ .type = QCOM_ICC_BIMC,
.nodes = sdm660_bimc_nodes,
.num_nodes = ARRAY_SIZE(sdm660_bimc_nodes),
- .is_bimc_node = true,
.regmap_cfg = &sdm660_bimc_regmap_config,
};
@@ -1594,6 +1595,7 @@ static const struct regmap_config sdm660_cnoc_regmap_config = {
};
static struct qcom_icc_desc sdm660_cnoc = {
+ .type = QCOM_ICC_NOC,
.nodes = sdm660_cnoc_nodes,
.num_nodes = ARRAY_SIZE(sdm660_cnoc_nodes),
.regmap_cfg = &sdm660_cnoc_regmap_config,
@@ -1614,6 +1616,7 @@ static const struct regmap_config sdm660_gnoc_regmap_config = {
};
static struct qcom_icc_desc sdm660_gnoc = {
+ .type = QCOM_ICC_NOC,
.nodes = sdm660_gnoc_nodes,
.num_nodes = ARRAY_SIZE(sdm660_gnoc_nodes),
.regmap_cfg = &sdm660_gnoc_regmap_config,
@@ -1653,6 +1656,7 @@ static const struct regmap_config sdm660_mnoc_regmap_config = {
};
static struct qcom_icc_desc sdm660_mnoc = {
+ .type = QCOM_ICC_NOC,
.nodes = sdm660_mnoc_nodes,
.num_nodes = ARRAY_SIZE(sdm660_mnoc_nodes),
.clocks = bus_mm_clocks,
@@ -1689,6 +1693,7 @@ static const struct regmap_config sdm660_snoc_regmap_config = {
};
static struct qcom_icc_desc sdm660_snoc = {
+ .type = QCOM_ICC_NOC,
.nodes = sdm660_snoc_nodes,
.num_nodes = ARRAY_SIZE(sdm660_snoc_nodes),
.regmap_cfg = &sdm660_snoc_regmap_config,
diff --git a/drivers/interconnect/qcom/sm8150.c b/drivers/interconnect/qcom/sm8150.c
index 2a85f53802b5..745e3c36a61a 100644
--- a/drivers/interconnect/qcom/sm8150.c
+++ b/drivers/interconnect/qcom/sm8150.c
@@ -535,7 +535,6 @@ static struct platform_driver qnoc_driver = {
.driver = {
.name = "qnoc-sm8150",
.of_match_table = qnoc_of_match,
- .sync_state = icc_sync_state,
},
};
module_platform_driver(qnoc_driver);
diff --git a/drivers/interconnect/qcom/sm8250.c b/drivers/interconnect/qcom/sm8250.c
index 8dfb5dea562a..aa707582ea01 100644
--- a/drivers/interconnect/qcom/sm8250.c
+++ b/drivers/interconnect/qcom/sm8250.c
@@ -551,7 +551,6 @@ static struct platform_driver qnoc_driver = {
.driver = {
.name = "qnoc-sm8250",
.of_match_table = qnoc_of_match,
- .sync_state = icc_sync_state,
},
};
module_platform_driver(qnoc_driver);
diff --git a/drivers/interconnect/qcom/sm8350.c b/drivers/interconnect/qcom/sm8350.c
index 3e26a2175b28..c79f93a1ac73 100644
--- a/drivers/interconnect/qcom/sm8350.c
+++ b/drivers/interconnect/qcom/sm8350.c
@@ -531,7 +531,6 @@ static struct platform_driver qnoc_driver = {
.driver = {
.name = "qnoc-sm8350",
.of_match_table = qnoc_of_match,
- .sync_state = icc_sync_state,
},
};
module_platform_driver(qnoc_driver);
diff --git a/drivers/interconnect/qcom/sm8450.c b/drivers/interconnect/qcom/sm8450.c
new file mode 100644
index 000000000000..8d99ee6421df
--- /dev/null
+++ b/drivers/interconnect/qcom/sm8450.c
@@ -0,0 +1,1987 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021, Linaro Limited
+ */
+
+#include <linux/device.h>
+#include <linux/interconnect.h>
+#include <linux/interconnect-provider.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <dt-bindings/interconnect/qcom,sm8450.h>
+
+#include "bcm-voter.h"
+#include "icc-rpmh.h"
+#include "sm8450.h"
+
+static struct qcom_icc_node qhm_qspi = {
+ .name = "qhm_qspi",
+ .id = SM8450_MASTER_QSPI_0,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node qhm_qup1 = {
+ .name = "qhm_qup1",
+ .id = SM8450_MASTER_QUP_1,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node qnm_a1noc_cfg = {
+ .name = "qnm_a1noc_cfg",
+ .id = SM8450_MASTER_A1NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_SERVICE_A1NOC },
+};
+
+static struct qcom_icc_node xm_sdc4 = {
+ .name = "xm_sdc4",
+ .id = SM8450_MASTER_SDCC_4,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_ufs_mem = {
+ .name = "xm_ufs_mem",
+ .id = SM8450_MASTER_UFS_MEM,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_usb3_0 = {
+ .name = "xm_usb3_0",
+ .id = SM8450_MASTER_USB3_0,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node qhm_qdss_bam = {
+ .name = "qhm_qdss_bam",
+ .id = SM8450_MASTER_QDSS_BAM,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qhm_qup0 = {
+ .name = "qhm_qup0",
+ .id = SM8450_MASTER_QUP_0,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qhm_qup2 = {
+ .name = "qhm_qup2",
+ .id = SM8450_MASTER_QUP_2,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qnm_a2noc_cfg = {
+ .name = "qnm_a2noc_cfg",
+ .id = SM8450_MASTER_A2NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_SERVICE_A2NOC },
+};
+
+static struct qcom_icc_node qxm_crypto = {
+ .name = "qxm_crypto",
+ .id = SM8450_MASTER_CRYPTO,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qxm_ipa = {
+ .name = "qxm_ipa",
+ .id = SM8450_MASTER_IPA,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qxm_sensorss_q6 = {
+ .name = "qxm_sensorss_q6",
+ .id = SM8450_MASTER_SENSORS_PROC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qxm_sp = {
+ .name = "qxm_sp",
+ .id = SM8450_MASTER_SP,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_qdss_etr_0 = {
+ .name = "xm_qdss_etr_0",
+ .id = SM8450_MASTER_QDSS_ETR,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_qdss_etr_1 = {
+ .name = "xm_qdss_etr_1",
+ .id = SM8450_MASTER_QDSS_ETR_1,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node xm_sdc2 = {
+ .name = "xm_sdc2",
+ .id = SM8450_MASTER_SDCC_2,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node qup0_core_master = {
+ .name = "qup0_core_master",
+ .id = SM8450_MASTER_QUP_CORE_0,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_QUP_CORE_0 },
+};
+
+static struct qcom_icc_node qup1_core_master = {
+ .name = "qup1_core_master",
+ .id = SM8450_MASTER_QUP_CORE_1,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_QUP_CORE_1 },
+};
+
+static struct qcom_icc_node qup2_core_master = {
+ .name = "qup2_core_master",
+ .id = SM8450_MASTER_QUP_CORE_2,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_QUP_CORE_2 },
+};
+
+static struct qcom_icc_node qnm_gemnoc_cnoc = {
+ .name = "qnm_gemnoc_cnoc",
+ .id = SM8450_MASTER_GEM_NOC_CNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 51,
+ .links = { SM8450_SLAVE_AHB2PHY_SOUTH, SM8450_SLAVE_AHB2PHY_NORTH,
+ SM8450_SLAVE_AOSS, SM8450_SLAVE_CAMERA_CFG,
+ SM8450_SLAVE_CLK_CTL, SM8450_SLAVE_CDSP_CFG,
+ SM8450_SLAVE_RBCPR_CX_CFG, SM8450_SLAVE_RBCPR_MMCX_CFG,
+ SM8450_SLAVE_RBCPR_MXA_CFG, SM8450_SLAVE_RBCPR_MXC_CFG,
+ SM8450_SLAVE_CRYPTO_0_CFG, SM8450_SLAVE_CX_RDPM,
+ SM8450_SLAVE_DISPLAY_CFG, SM8450_SLAVE_GFX3D_CFG,
+ SM8450_SLAVE_IMEM_CFG, SM8450_SLAVE_IPA_CFG,
+ SM8450_SLAVE_IPC_ROUTER_CFG, SM8450_SLAVE_LPASS,
+ SM8450_SLAVE_CNOC_MSS, SM8450_SLAVE_MX_RDPM,
+ SM8450_SLAVE_PCIE_0_CFG, SM8450_SLAVE_PCIE_1_CFG,
+ SM8450_SLAVE_PDM, SM8450_SLAVE_PIMEM_CFG,
+ SM8450_SLAVE_PRNG, SM8450_SLAVE_QDSS_CFG,
+ SM8450_SLAVE_QSPI_0, SM8450_SLAVE_QUP_0,
+ SM8450_SLAVE_QUP_1, SM8450_SLAVE_QUP_2,
+ SM8450_SLAVE_SDCC_2, SM8450_SLAVE_SDCC_4,
+ SM8450_SLAVE_SPSS_CFG, SM8450_SLAVE_TCSR,
+ SM8450_SLAVE_TLMM, SM8450_SLAVE_TME_CFG,
+ SM8450_SLAVE_UFS_MEM_CFG, SM8450_SLAVE_USB3_0,
+ SM8450_SLAVE_VENUS_CFG, SM8450_SLAVE_VSENSE_CTRL_CFG,
+ SM8450_SLAVE_A1NOC_CFG, SM8450_SLAVE_A2NOC_CFG,
+ SM8450_SLAVE_DDRSS_CFG, SM8450_SLAVE_CNOC_MNOC_CFG,
+ SM8450_SLAVE_PCIE_ANOC_CFG, SM8450_SLAVE_SNOC_CFG,
+ SM8450_SLAVE_IMEM, SM8450_SLAVE_PIMEM,
+ SM8450_SLAVE_SERVICE_CNOC, SM8450_SLAVE_QDSS_STM,
+ SM8450_SLAVE_TCU },
+};
+
+static struct qcom_icc_node qnm_gemnoc_pcie = {
+ .name = "qnm_gemnoc_pcie",
+ .id = SM8450_MASTER_GEM_NOC_PCIE_SNOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 2,
+ .links = { SM8450_SLAVE_PCIE_0, SM8450_SLAVE_PCIE_1 },
+};
+
+static struct qcom_icc_node alm_gpu_tcu = {
+ .name = "alm_gpu_tcu",
+ .id = SM8450_MASTER_GPU_TCU,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 2,
+ .links = { SM8450_SLAVE_GEM_NOC_CNOC, SM8450_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node alm_sys_tcu = {
+ .name = "alm_sys_tcu",
+ .id = SM8450_MASTER_SYS_TCU,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 2,
+ .links = { SM8450_SLAVE_GEM_NOC_CNOC, SM8450_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node chm_apps = {
+ .name = "chm_apps",
+ .id = SM8450_MASTER_APPSS_PROC,
+ .channels = 3,
+ .buswidth = 32,
+ .num_links = 3,
+ .links = { SM8450_SLAVE_GEM_NOC_CNOC, SM8450_SLAVE_LLCC,
+ SM8450_SLAVE_MEM_NOC_PCIE_SNOC },
+};
+
+static struct qcom_icc_node qnm_gpu = {
+ .name = "qnm_gpu",
+ .id = SM8450_MASTER_GFX3D,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 2,
+ .links = { SM8450_SLAVE_GEM_NOC_CNOC, SM8450_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node qnm_mdsp = {
+ .name = "qnm_mdsp",
+ .id = SM8450_MASTER_MSS_PROC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 3,
+ .links = { SM8450_SLAVE_GEM_NOC_CNOC, SM8450_SLAVE_LLCC,
+ SM8450_SLAVE_MEM_NOC_PCIE_SNOC },
+};
+
+static struct qcom_icc_node qnm_mnoc_hf = {
+ .name = "qnm_mnoc_hf",
+ .id = SM8450_MASTER_MNOC_HF_MEM_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node qnm_mnoc_sf = {
+ .name = "qnm_mnoc_sf",
+ .id = SM8450_MASTER_MNOC_SF_MEM_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 2,
+ .links = { SM8450_SLAVE_GEM_NOC_CNOC, SM8450_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node qnm_nsp_gemnoc = {
+ .name = "qnm_nsp_gemnoc",
+ .id = SM8450_MASTER_COMPUTE_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 2,
+ .links = { SM8450_SLAVE_GEM_NOC_CNOC, SM8450_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node qnm_pcie = {
+ .name = "qnm_pcie",
+ .id = SM8450_MASTER_ANOC_PCIE_GEM_NOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 2,
+ .links = { SM8450_SLAVE_GEM_NOC_CNOC, SM8450_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node qnm_snoc_gc = {
+ .name = "qnm_snoc_gc",
+ .id = SM8450_MASTER_SNOC_GC_MEM_NOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node qnm_snoc_sf = {
+ .name = "qnm_snoc_sf",
+ .id = SM8450_MASTER_SNOC_SF_MEM_NOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 3,
+ .links = { SM8450_SLAVE_GEM_NOC_CNOC, SM8450_SLAVE_LLCC,
+ SM8450_SLAVE_MEM_NOC_PCIE_SNOC },
+};
+
+static struct qcom_icc_node qhm_config_noc = {
+ .name = "qhm_config_noc",
+ .id = SM8450_MASTER_CNOC_LPASS_AG_NOC,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 6,
+ .links = { SM8450_SLAVE_LPASS_CORE_CFG, SM8450_SLAVE_LPASS_LPI_CFG,
+ SM8450_SLAVE_LPASS_MPU_CFG, SM8450_SLAVE_LPASS_TOP_CFG,
+ SM8450_SLAVE_SERVICES_LPASS_AML_NOC, SM8450_SLAVE_SERVICE_LPASS_AG_NOC },
+};
+
+static struct qcom_icc_node qxm_lpass_dsp = {
+ .name = "qxm_lpass_dsp",
+ .id = SM8450_MASTER_LPASS_PROC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 4,
+ .links = { SM8450_SLAVE_LPASS_TOP_CFG, SM8450_SLAVE_LPASS_SNOC,
+ SM8450_SLAVE_SERVICES_LPASS_AML_NOC, SM8450_SLAVE_SERVICE_LPASS_AG_NOC },
+};
+
+static struct qcom_icc_node llcc_mc = {
+ .name = "llcc_mc",
+ .id = SM8450_MASTER_LLCC,
+ .channels = 4,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_EBI1 },
+};
+
+static struct qcom_icc_node qnm_camnoc_hf = {
+ .name = "qnm_camnoc_hf",
+ .id = SM8450_MASTER_CAMNOC_HF,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_MNOC_HF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_camnoc_icp = {
+ .name = "qnm_camnoc_icp",
+ .id = SM8450_MASTER_CAMNOC_ICP,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_camnoc_sf = {
+ .name = "qnm_camnoc_sf",
+ .id = SM8450_MASTER_CAMNOC_SF,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_mdp = {
+ .name = "qnm_mdp",
+ .id = SM8450_MASTER_MDP,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_MNOC_HF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_mnoc_cfg = {
+ .name = "qnm_mnoc_cfg",
+ .id = SM8450_MASTER_CNOC_MNOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_SERVICE_MNOC },
+};
+
+static struct qcom_icc_node qnm_rot = {
+ .name = "qnm_rot",
+ .id = SM8450_MASTER_ROTATOR,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_vapss_hcp = {
+ .name = "qnm_vapss_hcp",
+ .id = SM8450_MASTER_CDSP_HCP,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_video = {
+ .name = "qnm_video",
+ .id = SM8450_MASTER_VIDEO,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_video_cv_cpu = {
+ .name = "qnm_video_cv_cpu",
+ .id = SM8450_MASTER_VIDEO_CV_PROC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_video_cvp = {
+ .name = "qnm_video_cvp",
+ .id = SM8450_MASTER_VIDEO_PROC,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_video_v_cpu = {
+ .name = "qnm_video_v_cpu",
+ .id = SM8450_MASTER_VIDEO_V_PROC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qhm_nsp_noc_config = {
+ .name = "qhm_nsp_noc_config",
+ .id = SM8450_MASTER_CDSP_NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_SERVICE_NSP_NOC },
+};
+
+static struct qcom_icc_node qxm_nsp = {
+ .name = "qxm_nsp",
+ .id = SM8450_MASTER_CDSP_PROC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_CDSP_MEM_NOC },
+};
+
+static struct qcom_icc_node qnm_pcie_anoc_cfg = {
+ .name = "qnm_pcie_anoc_cfg",
+ .id = SM8450_MASTER_PCIE_ANOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_SERVICE_PCIE_ANOC },
+};
+
+static struct qcom_icc_node xm_pcie3_0 = {
+ .name = "xm_pcie3_0",
+ .id = SM8450_MASTER_PCIE_0,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_ANOC_PCIE_GEM_NOC },
+};
+
+static struct qcom_icc_node xm_pcie3_1 = {
+ .name = "xm_pcie3_1",
+ .id = SM8450_MASTER_PCIE_1,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_ANOC_PCIE_GEM_NOC },
+};
+
+static struct qcom_icc_node qhm_gic = {
+ .name = "qhm_gic",
+ .id = SM8450_MASTER_GIC_AHB,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_SNOC_GEM_NOC_SF },
+};
+
+static struct qcom_icc_node qnm_aggre1_noc = {
+ .name = "qnm_aggre1_noc",
+ .id = SM8450_MASTER_A1NOC_SNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_SNOC_GEM_NOC_SF },
+};
+
+static struct qcom_icc_node qnm_aggre2_noc = {
+ .name = "qnm_aggre2_noc",
+ .id = SM8450_MASTER_A2NOC_SNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_SNOC_GEM_NOC_SF },
+};
+
+static struct qcom_icc_node qnm_lpass_noc = {
+ .name = "qnm_lpass_noc",
+ .id = SM8450_MASTER_LPASS_ANOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_SNOC_GEM_NOC_SF },
+};
+
+static struct qcom_icc_node qnm_snoc_cfg = {
+ .name = "qnm_snoc_cfg",
+ .id = SM8450_MASTER_SNOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_SERVICE_SNOC },
+};
+
+static struct qcom_icc_node qxm_pimem = {
+ .name = "qxm_pimem",
+ .id = SM8450_MASTER_PIMEM,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_SNOC_GEM_NOC_GC },
+};
+
+static struct qcom_icc_node xm_gic = {
+ .name = "xm_gic",
+ .id = SM8450_MASTER_GIC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_SNOC_GEM_NOC_GC },
+};
+
+static struct qcom_icc_node qnm_mnoc_hf_disp = {
+ .name = "qnm_mnoc_hf_disp",
+ .id = SM8450_MASTER_MNOC_HF_MEM_NOC_DISP,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_LLCC_DISP },
+};
+
+static struct qcom_icc_node qnm_mnoc_sf_disp = {
+ .name = "qnm_mnoc_sf_disp",
+ .id = SM8450_MASTER_MNOC_SF_MEM_NOC_DISP,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_LLCC_DISP },
+};
+
+static struct qcom_icc_node qnm_pcie_disp = {
+ .name = "qnm_pcie_disp",
+ .id = SM8450_MASTER_ANOC_PCIE_GEM_NOC_DISP,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_LLCC_DISP },
+};
+
+static struct qcom_icc_node llcc_mc_disp = {
+ .name = "llcc_mc_disp",
+ .id = SM8450_MASTER_LLCC_DISP,
+ .channels = 4,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_EBI1_DISP },
+};
+
+static struct qcom_icc_node qnm_mdp_disp = {
+ .name = "qnm_mdp_disp",
+ .id = SM8450_MASTER_MDP_DISP,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_MNOC_HF_MEM_NOC_DISP },
+};
+
+static struct qcom_icc_node qnm_rot_disp = {
+ .name = "qnm_rot_disp",
+ .id = SM8450_MASTER_ROTATOR_DISP,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8450_SLAVE_MNOC_SF_MEM_NOC_DISP },
+};
+
+static struct qcom_icc_node qns_a1noc_snoc = {
+ .name = "qns_a1noc_snoc",
+ .id = SM8450_SLAVE_A1NOC_SNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8450_MASTER_A1NOC_SNOC },
+};
+
+static struct qcom_icc_node srvc_aggre1_noc = {
+ .name = "srvc_aggre1_noc",
+ .id = SM8450_SLAVE_SERVICE_A1NOC,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qns_a2noc_snoc = {
+ .name = "qns_a2noc_snoc",
+ .id = SM8450_SLAVE_A2NOC_SNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8450_MASTER_A2NOC_SNOC },
+};
+
+static struct qcom_icc_node srvc_aggre2_noc = {
+ .name = "srvc_aggre2_noc",
+ .id = SM8450_SLAVE_SERVICE_A2NOC,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qup0_core_slave = {
+ .name = "qup0_core_slave",
+ .id = SM8450_SLAVE_QUP_CORE_0,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qup1_core_slave = {
+ .name = "qup1_core_slave",
+ .id = SM8450_SLAVE_QUP_CORE_1,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qup2_core_slave = {
+ .name = "qup2_core_slave",
+ .id = SM8450_SLAVE_QUP_CORE_2,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_ahb2phy0 = {
+ .name = "qhs_ahb2phy0",
+ .id = SM8450_SLAVE_AHB2PHY_SOUTH,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_ahb2phy1 = {
+ .name = "qhs_ahb2phy1",
+ .id = SM8450_SLAVE_AHB2PHY_NORTH,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_aoss = {
+ .name = "qhs_aoss",
+ .id = SM8450_SLAVE_AOSS,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_camera_cfg = {
+ .name = "qhs_camera_cfg",
+ .id = SM8450_SLAVE_CAMERA_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_clk_ctl = {
+ .name = "qhs_clk_ctl",
+ .id = SM8450_SLAVE_CLK_CTL,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_compute_cfg = {
+ .name = "qhs_compute_cfg",
+ .id = SM8450_SLAVE_CDSP_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { MASTER_CDSP_NOC_CFG },
+};
+
+static struct qcom_icc_node qhs_cpr_cx = {
+ .name = "qhs_cpr_cx",
+ .id = SM8450_SLAVE_RBCPR_CX_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_cpr_mmcx = {
+ .name = "qhs_cpr_mmcx",
+ .id = SM8450_SLAVE_RBCPR_MMCX_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_cpr_mxa = {
+ .name = "qhs_cpr_mxa",
+ .id = SM8450_SLAVE_RBCPR_MXA_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_cpr_mxc = {
+ .name = "qhs_cpr_mxc",
+ .id = SM8450_SLAVE_RBCPR_MXC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_crypto0_cfg = {
+ .name = "qhs_crypto0_cfg",
+ .id = SM8450_SLAVE_CRYPTO_0_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_cx_rdpm = {
+ .name = "qhs_cx_rdpm",
+ .id = SM8450_SLAVE_CX_RDPM,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_display_cfg = {
+ .name = "qhs_display_cfg",
+ .id = SM8450_SLAVE_DISPLAY_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_gpuss_cfg = {
+ .name = "qhs_gpuss_cfg",
+ .id = SM8450_SLAVE_GFX3D_CFG,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_imem_cfg = {
+ .name = "qhs_imem_cfg",
+ .id = SM8450_SLAVE_IMEM_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_ipa = {
+ .name = "qhs_ipa",
+ .id = SM8450_SLAVE_IPA_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_ipc_router = {
+ .name = "qhs_ipc_router",
+ .id = SM8450_SLAVE_IPC_ROUTER_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_lpass_cfg = {
+ .name = "qhs_lpass_cfg",
+ .id = SM8450_SLAVE_LPASS,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { MASTER_CNOC_LPASS_AG_NOC },
+};
+
+static struct qcom_icc_node qhs_mss_cfg = {
+ .name = "qhs_mss_cfg",
+ .id = SM8450_SLAVE_CNOC_MSS,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_mx_rdpm = {
+ .name = "qhs_mx_rdpm",
+ .id = SM8450_SLAVE_MX_RDPM,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_pcie0_cfg = {
+ .name = "qhs_pcie0_cfg",
+ .id = SM8450_SLAVE_PCIE_0_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_pcie1_cfg = {
+ .name = "qhs_pcie1_cfg",
+ .id = SM8450_SLAVE_PCIE_1_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_pdm = {
+ .name = "qhs_pdm",
+ .id = SM8450_SLAVE_PDM,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_pimem_cfg = {
+ .name = "qhs_pimem_cfg",
+ .id = SM8450_SLAVE_PIMEM_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_prng = {
+ .name = "qhs_prng",
+ .id = SM8450_SLAVE_PRNG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_qdss_cfg = {
+ .name = "qhs_qdss_cfg",
+ .id = SM8450_SLAVE_QDSS_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_qspi = {
+ .name = "qhs_qspi",
+ .id = SM8450_SLAVE_QSPI_0,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_qup0 = {
+ .name = "qhs_qup0",
+ .id = SM8450_SLAVE_QUP_0,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_qup1 = {
+ .name = "qhs_qup1",
+ .id = SM8450_SLAVE_QUP_1,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_qup2 = {
+ .name = "qhs_qup2",
+ .id = SM8450_SLAVE_QUP_2,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_sdc2 = {
+ .name = "qhs_sdc2",
+ .id = SM8450_SLAVE_SDCC_2,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_sdc4 = {
+ .name = "qhs_sdc4",
+ .id = SM8450_SLAVE_SDCC_4,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_spss_cfg = {
+ .name = "qhs_spss_cfg",
+ .id = SM8450_SLAVE_SPSS_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_tcsr = {
+ .name = "qhs_tcsr",
+ .id = SM8450_SLAVE_TCSR,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_tlmm = {
+ .name = "qhs_tlmm",
+ .id = SM8450_SLAVE_TLMM,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_tme_cfg = {
+ .name = "qhs_tme_cfg",
+ .id = SM8450_SLAVE_TME_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_ufs_mem_cfg = {
+ .name = "qhs_ufs_mem_cfg",
+ .id = SM8450_SLAVE_UFS_MEM_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_usb3_0 = {
+ .name = "qhs_usb3_0",
+ .id = SM8450_SLAVE_USB3_0,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_venus_cfg = {
+ .name = "qhs_venus_cfg",
+ .id = SM8450_SLAVE_VENUS_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_vsense_ctrl_cfg = {
+ .name = "qhs_vsense_ctrl_cfg",
+ .id = SM8450_SLAVE_VSENSE_CTRL_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qns_a1_noc_cfg = {
+ .name = "qns_a1_noc_cfg",
+ .id = SM8450_SLAVE_A1NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8450_MASTER_A1NOC_CFG },
+};
+
+static struct qcom_icc_node qns_a2_noc_cfg = {
+ .name = "qns_a2_noc_cfg",
+ .id = SM8450_SLAVE_A2NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8450_MASTER_A2NOC_CFG },
+};
+
+static struct qcom_icc_node qns_ddrss_cfg = {
+ .name = "qns_ddrss_cfg",
+ .id = SM8450_SLAVE_DDRSS_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ //FIXME where is link
+};
+
+static struct qcom_icc_node qns_mnoc_cfg = {
+ .name = "qns_mnoc_cfg",
+ .id = SM8450_SLAVE_CNOC_MNOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8450_MASTER_CNOC_MNOC_CFG },
+};
+
+static struct qcom_icc_node qns_pcie_anoc_cfg = {
+ .name = "qns_pcie_anoc_cfg",
+ .id = SM8450_SLAVE_PCIE_ANOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8450_MASTER_PCIE_ANOC_CFG },
+};
+
+static struct qcom_icc_node qns_snoc_cfg = {
+ .name = "qns_snoc_cfg",
+ .id = SM8450_SLAVE_SNOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM8450_MASTER_SNOC_CFG },
+};
+
+static struct qcom_icc_node qxs_imem = {
+ .name = "qxs_imem",
+ .id = SM8450_SLAVE_IMEM,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qxs_pimem = {
+ .name = "qxs_pimem",
+ .id = SM8450_SLAVE_PIMEM,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node srvc_cnoc = {
+ .name = "srvc_cnoc",
+ .id = SM8450_SLAVE_SERVICE_CNOC,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node xs_pcie_0 = {
+ .name = "xs_pcie_0",
+ .id = SM8450_SLAVE_PCIE_0,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node xs_pcie_1 = {
+ .name = "xs_pcie_1",
+ .id = SM8450_SLAVE_PCIE_1,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node xs_qdss_stm = {
+ .name = "xs_qdss_stm",
+ .id = SM8450_SLAVE_QDSS_STM,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node xs_sys_tcu_cfg = {
+ .name = "xs_sys_tcu_cfg",
+ .id = SM8450_SLAVE_TCU,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qns_gem_noc_cnoc = {
+ .name = "qns_gem_noc_cnoc",
+ .id = SM8450_SLAVE_GEM_NOC_CNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8450_MASTER_GEM_NOC_CNOC },
+};
+
+static struct qcom_icc_node qns_llcc = {
+ .name = "qns_llcc",
+ .id = SM8450_SLAVE_LLCC,
+ .channels = 4,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8450_MASTER_LLCC },
+};
+
+static struct qcom_icc_node qns_pcie = {
+ .name = "qns_pcie",
+ .id = SM8450_SLAVE_MEM_NOC_PCIE_SNOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8450_MASTER_GEM_NOC_PCIE_SNOC },
+};
+
+static struct qcom_icc_node qhs_lpass_core = {
+ .name = "qhs_lpass_core",
+ .id = SM8450_SLAVE_LPASS_CORE_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_lpass_lpi = {
+ .name = "qhs_lpass_lpi",
+ .id = SM8450_SLAVE_LPASS_LPI_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_lpass_mpu = {
+ .name = "qhs_lpass_mpu",
+ .id = SM8450_SLAVE_LPASS_MPU_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qhs_lpass_top = {
+ .name = "qhs_lpass_top",
+ .id = SM8450_SLAVE_LPASS_TOP_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qns_sysnoc = {
+ .name = "qns_sysnoc",
+ .id = SM8450_SLAVE_LPASS_SNOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8450_MASTER_LPASS_ANOC },
+};
+
+static struct qcom_icc_node srvc_niu_aml_noc = {
+ .name = "srvc_niu_aml_noc",
+ .id = SM8450_SLAVE_SERVICES_LPASS_AML_NOC,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node srvc_niu_lpass_agnoc = {
+ .name = "srvc_niu_lpass_agnoc",
+ .id = SM8450_SLAVE_SERVICE_LPASS_AG_NOC,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node ebi = {
+ .name = "ebi",
+ .id = SM8450_SLAVE_EBI1,
+ .channels = 4,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qns_mem_noc_hf = {
+ .name = "qns_mem_noc_hf",
+ .id = SM8450_SLAVE_MNOC_HF_MEM_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8450_MASTER_MNOC_HF_MEM_NOC },
+};
+
+static struct qcom_icc_node qns_mem_noc_sf = {
+ .name = "qns_mem_noc_sf",
+ .id = SM8450_SLAVE_MNOC_SF_MEM_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8450_MASTER_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node srvc_mnoc = {
+ .name = "srvc_mnoc",
+ .id = SM8450_SLAVE_SERVICE_MNOC,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qns_nsp_gemnoc = {
+ .name = "qns_nsp_gemnoc",
+ .id = SM8450_SLAVE_CDSP_MEM_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8450_MASTER_COMPUTE_NOC },
+};
+
+static struct qcom_icc_node service_nsp_noc = {
+ .name = "service_nsp_noc",
+ .id = SM8450_SLAVE_SERVICE_NSP_NOC,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qns_pcie_mem_noc = {
+ .name = "qns_pcie_mem_noc",
+ .id = SM8450_SLAVE_ANOC_PCIE_GEM_NOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8450_MASTER_ANOC_PCIE_GEM_NOC },
+};
+
+static struct qcom_icc_node srvc_pcie_aggre_noc = {
+ .name = "srvc_pcie_aggre_noc",
+ .id = SM8450_SLAVE_SERVICE_PCIE_ANOC,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qns_gemnoc_gc = {
+ .name = "qns_gemnoc_gc",
+ .id = SM8450_SLAVE_SNOC_GEM_NOC_GC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM8450_MASTER_SNOC_GC_MEM_NOC },
+};
+
+static struct qcom_icc_node qns_gemnoc_sf = {
+ .name = "qns_gemnoc_sf",
+ .id = SM8450_SLAVE_SNOC_GEM_NOC_SF,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8450_MASTER_SNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node srvc_snoc = {
+ .name = "srvc_snoc",
+ .id = SM8450_SLAVE_SERVICE_SNOC,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qns_llcc_disp = {
+ .name = "qns_llcc_disp",
+ .id = SM8450_SLAVE_LLCC_DISP,
+ .channels = 4,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM8450_MASTER_LLCC_DISP },
+};
+
+static struct qcom_icc_node ebi_disp = {
+ .name = "ebi_disp",
+ .id = SM8450_SLAVE_EBI1_DISP,
+ .channels = 4,
+ .buswidth = 4,
+ .num_links = 0,
+};
+
+static struct qcom_icc_node qns_mem_noc_hf_disp = {
+ .name = "qns_mem_noc_hf_disp",
+ .id = SM8450_SLAVE_MNOC_HF_MEM_NOC_DISP,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8450_MASTER_MNOC_HF_MEM_NOC_DISP },
+};
+
+static struct qcom_icc_node qns_mem_noc_sf_disp = {
+ .name = "qns_mem_noc_sf_disp",
+ .id = SM8450_SLAVE_MNOC_SF_MEM_NOC_DISP,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM8450_MASTER_MNOC_SF_MEM_NOC_DISP },
+};
+
+static struct qcom_icc_bcm bcm_acv = {
+ .name = "ACV",
+ .num_nodes = 1,
+ .nodes = { &ebi },
+};
+
+static struct qcom_icc_bcm bcm_ce0 = {
+ .name = "CE0",
+ .num_nodes = 1,
+ .nodes = { &qxm_crypto },
+};
+
+static struct qcom_icc_bcm bcm_cn0 = {
+ .name = "CN0",
+ .keepalive = true,
+ .num_nodes = 55,
+ .nodes = { &qnm_gemnoc_cnoc, &qnm_gemnoc_pcie,
+ &qhs_ahb2phy0, &qhs_ahb2phy1,
+ &qhs_aoss, &qhs_camera_cfg,
+ &qhs_clk_ctl, &qhs_compute_cfg,
+ &qhs_cpr_cx, &qhs_cpr_mmcx,
+ &qhs_cpr_mxa, &qhs_cpr_mxc,
+ &qhs_crypto0_cfg, &qhs_cx_rdpm,
+ &qhs_display_cfg, &qhs_gpuss_cfg,
+ &qhs_imem_cfg, &qhs_ipa,
+ &qhs_ipc_router, &qhs_lpass_cfg,
+ &qhs_mss_cfg, &qhs_mx_rdpm,
+ &qhs_pcie0_cfg, &qhs_pcie1_cfg,
+ &qhs_pdm, &qhs_pimem_cfg,
+ &qhs_prng, &qhs_qdss_cfg,
+ &qhs_qspi, &qhs_qup0,
+ &qhs_qup1, &qhs_qup2,
+ &qhs_sdc2, &qhs_sdc4,
+ &qhs_spss_cfg, &qhs_tcsr,
+ &qhs_tlmm, &qhs_tme_cfg,
+ &qhs_ufs_mem_cfg, &qhs_usb3_0,
+ &qhs_venus_cfg, &qhs_vsense_ctrl_cfg,
+ &qns_a1_noc_cfg, &qns_a2_noc_cfg,
+ &qns_ddrss_cfg, &qns_mnoc_cfg,
+ &qns_pcie_anoc_cfg, &qns_snoc_cfg,
+ &qxs_imem, &qxs_pimem,
+ &srvc_cnoc, &xs_pcie_0,
+ &xs_pcie_1, &xs_qdss_stm,
+ &xs_sys_tcu_cfg },
+};
+
+static struct qcom_icc_bcm bcm_co0 = {
+ .name = "CO0",
+ .num_nodes = 2,
+ .nodes = { &qxm_nsp, &qns_nsp_gemnoc },
+};
+
+static struct qcom_icc_bcm bcm_mc0 = {
+ .name = "MC0",
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &ebi },
+};
+
+static struct qcom_icc_bcm bcm_mm0 = {
+ .name = "MM0",
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &qns_mem_noc_hf },
+};
+
+static struct qcom_icc_bcm bcm_mm1 = {
+ .name = "MM1",
+ .num_nodes = 12,
+ .nodes = { &qnm_camnoc_hf, &qnm_camnoc_icp,
+ &qnm_camnoc_sf, &qnm_mdp,
+ &qnm_mnoc_cfg, &qnm_rot,
+ &qnm_vapss_hcp, &qnm_video,
+ &qnm_video_cv_cpu, &qnm_video_cvp,
+ &qnm_video_v_cpu, &qns_mem_noc_sf },
+};
+
+static struct qcom_icc_bcm bcm_qup0 = {
+ .name = "QUP0",
+ .keepalive = true,
+ .vote_scale = 1,
+ .num_nodes = 1,
+ .nodes = { &qup0_core_slave },
+};
+
+static struct qcom_icc_bcm bcm_qup1 = {
+ .name = "QUP1",
+ .keepalive = true,
+ .vote_scale = 1,
+ .num_nodes = 1,
+ .nodes = { &qup1_core_slave },
+};
+
+static struct qcom_icc_bcm bcm_qup2 = {
+ .name = "QUP2",
+ .keepalive = true,
+ .vote_scale = 1,
+ .num_nodes = 1,
+ .nodes = { &qup2_core_slave },
+};
+
+static struct qcom_icc_bcm bcm_sh0 = {
+ .name = "SH0",
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &qns_llcc },
+};
+
+static struct qcom_icc_bcm bcm_sh1 = {
+ .name = "SH1",
+ .num_nodes = 7,
+ .nodes = { &alm_gpu_tcu, &alm_sys_tcu,
+ &qnm_nsp_gemnoc, &qnm_pcie,
+ &qnm_snoc_gc, &qns_gem_noc_cnoc,
+ &qns_pcie },
+};
+
+static struct qcom_icc_bcm bcm_sn0 = {
+ .name = "SN0",
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &qns_gemnoc_sf },
+};
+
+static struct qcom_icc_bcm bcm_sn1 = {
+ .name = "SN1",
+ .num_nodes = 4,
+ .nodes = { &qhm_gic, &qxm_pimem,
+ &xm_gic, &qns_gemnoc_gc },
+};
+
+static struct qcom_icc_bcm bcm_sn2 = {
+ .name = "SN2",
+ .num_nodes = 1,
+ .nodes = { &qnm_aggre1_noc },
+};
+
+static struct qcom_icc_bcm bcm_sn3 = {
+ .name = "SN3",
+ .num_nodes = 1,
+ .nodes = { &qnm_aggre2_noc },
+};
+
+static struct qcom_icc_bcm bcm_sn4 = {
+ .name = "SN4",
+ .num_nodes = 1,
+ .nodes = { &qnm_lpass_noc },
+};
+
+static struct qcom_icc_bcm bcm_sn7 = {
+ .name = "SN7",
+ .num_nodes = 1,
+ .nodes = { &qns_pcie_mem_noc },
+};
+
+static struct qcom_icc_bcm bcm_acv_disp = {
+ .name = "ACV",
+ .num_nodes = 1,
+ .nodes = { &ebi_disp },
+};
+
+static struct qcom_icc_bcm bcm_mc0_disp = {
+ .name = "MC0",
+ .num_nodes = 1,
+ .nodes = { &ebi_disp },
+};
+
+static struct qcom_icc_bcm bcm_mm0_disp = {
+ .name = "MM0",
+ .num_nodes = 1,
+ .nodes = { &qns_mem_noc_hf_disp },
+};
+
+static struct qcom_icc_bcm bcm_mm1_disp = {
+ .name = "MM1",
+ .num_nodes = 3,
+ .nodes = { &qnm_mdp_disp, &qnm_rot_disp,
+ &qns_mem_noc_sf_disp },
+};
+
+static struct qcom_icc_bcm bcm_sh0_disp = {
+ .name = "SH0",
+ .num_nodes = 1,
+ .nodes = { &qns_llcc_disp },
+};
+
+static struct qcom_icc_bcm bcm_sh1_disp = {
+ .name = "SH1",
+ .num_nodes = 1,
+ .nodes = { &qnm_pcie_disp },
+};
+
+static struct qcom_icc_bcm *aggre1_noc_bcms[] = {
+};
+
+static struct qcom_icc_node *aggre1_noc_nodes[] = {
+ [MASTER_QSPI_0] = &qhm_qspi,
+ [MASTER_QUP_1] = &qhm_qup1,
+ [MASTER_A1NOC_CFG] = &qnm_a1noc_cfg,
+ [MASTER_SDCC_4] = &xm_sdc4,
+ [MASTER_UFS_MEM] = &xm_ufs_mem,
+ [MASTER_USB3_0] = &xm_usb3_0,
+ [SLAVE_A1NOC_SNOC] = &qns_a1noc_snoc,
+ [SLAVE_SERVICE_A1NOC] = &srvc_aggre1_noc,
+};
+
+static struct qcom_icc_desc sm8450_aggre1_noc = {
+ .nodes = aggre1_noc_nodes,
+ .num_nodes = ARRAY_SIZE(aggre1_noc_nodes),
+ .bcms = aggre1_noc_bcms,
+ .num_bcms = ARRAY_SIZE(aggre1_noc_bcms),
+};
+
+static struct qcom_icc_bcm *aggre2_noc_bcms[] = {
+ &bcm_ce0,
+};
+
+static struct qcom_icc_node *aggre2_noc_nodes[] = {
+ [MASTER_QDSS_BAM] = &qhm_qdss_bam,
+ [MASTER_QUP_0] = &qhm_qup0,
+ [MASTER_QUP_2] = &qhm_qup2,
+ [MASTER_A2NOC_CFG] = &qnm_a2noc_cfg,
+ [MASTER_CRYPTO] = &qxm_crypto,
+ [MASTER_IPA] = &qxm_ipa,
+ [MASTER_SENSORS_PROC] = &qxm_sensorss_q6,
+ [MASTER_SP] = &qxm_sp,
+ [MASTER_QDSS_ETR] = &xm_qdss_etr_0,
+ [MASTER_QDSS_ETR_1] = &xm_qdss_etr_1,
+ [MASTER_SDCC_2] = &xm_sdc2,
+ [SLAVE_A2NOC_SNOC] = &qns_a2noc_snoc,
+ [SLAVE_SERVICE_A2NOC] = &srvc_aggre2_noc,
+};
+
+static struct qcom_icc_desc sm8450_aggre2_noc = {
+ .nodes = aggre2_noc_nodes,
+ .num_nodes = ARRAY_SIZE(aggre2_noc_nodes),
+ .bcms = aggre2_noc_bcms,
+ .num_bcms = ARRAY_SIZE(aggre2_noc_bcms),
+};
+
+static struct qcom_icc_bcm *clk_virt_bcms[] = {
+ &bcm_qup0,
+ &bcm_qup1,
+ &bcm_qup2,
+};
+
+static struct qcom_icc_node *clk_virt_nodes[] = {
+ [MASTER_QUP_CORE_0] = &qup0_core_master,
+ [MASTER_QUP_CORE_1] = &qup1_core_master,
+ [MASTER_QUP_CORE_2] = &qup2_core_master,
+ [SLAVE_QUP_CORE_0] = &qup0_core_slave,
+ [SLAVE_QUP_CORE_1] = &qup1_core_slave,
+ [SLAVE_QUP_CORE_2] = &qup2_core_slave,
+};
+
+static struct qcom_icc_desc sm8450_clk_virt = {
+ .nodes = clk_virt_nodes,
+ .num_nodes = ARRAY_SIZE(clk_virt_nodes),
+ .bcms = clk_virt_bcms,
+ .num_bcms = ARRAY_SIZE(clk_virt_bcms),
+};
+
+static struct qcom_icc_bcm *config_noc_bcms[] = {
+ &bcm_cn0,
+};
+
+static struct qcom_icc_node *config_noc_nodes[] = {
+ [MASTER_GEM_NOC_CNOC] = &qnm_gemnoc_cnoc,
+ [MASTER_GEM_NOC_PCIE_SNOC] = &qnm_gemnoc_pcie,
+ [SLAVE_AHB2PHY_SOUTH] = &qhs_ahb2phy0,
+ [SLAVE_AHB2PHY_NORTH] = &qhs_ahb2phy1,
+ [SLAVE_AOSS] = &qhs_aoss,
+ [SLAVE_CAMERA_CFG] = &qhs_camera_cfg,
+ [SLAVE_CLK_CTL] = &qhs_clk_ctl,
+ [SLAVE_CDSP_CFG] = &qhs_compute_cfg,
+ [SLAVE_RBCPR_CX_CFG] = &qhs_cpr_cx,
+ [SLAVE_RBCPR_MMCX_CFG] = &qhs_cpr_mmcx,
+ [SLAVE_RBCPR_MXA_CFG] = &qhs_cpr_mxa,
+ [SLAVE_RBCPR_MXC_CFG] = &qhs_cpr_mxc,
+ [SLAVE_CRYPTO_0_CFG] = &qhs_crypto0_cfg,
+ [SLAVE_CX_RDPM] = &qhs_cx_rdpm,
+ [SLAVE_DISPLAY_CFG] = &qhs_display_cfg,
+ [SLAVE_GFX3D_CFG] = &qhs_gpuss_cfg,
+ [SLAVE_IMEM_CFG] = &qhs_imem_cfg,
+ [SLAVE_IPA_CFG] = &qhs_ipa,
+ [SLAVE_IPC_ROUTER_CFG] = &qhs_ipc_router,
+ [SLAVE_LPASS] = &qhs_lpass_cfg,
+ [SLAVE_CNOC_MSS] = &qhs_mss_cfg,
+ [SLAVE_MX_RDPM] = &qhs_mx_rdpm,
+ [SLAVE_PCIE_0_CFG] = &qhs_pcie0_cfg,
+ [SLAVE_PCIE_1_CFG] = &qhs_pcie1_cfg,
+ [SLAVE_PDM] = &qhs_pdm,
+ [SLAVE_PIMEM_CFG] = &qhs_pimem_cfg,
+ [SLAVE_PRNG] = &qhs_prng,
+ [SLAVE_QDSS_CFG] = &qhs_qdss_cfg,
+ [SLAVE_QSPI_0] = &qhs_qspi,
+ [SLAVE_QUP_0] = &qhs_qup0,
+ [SLAVE_QUP_1] = &qhs_qup1,
+ [SLAVE_QUP_2] = &qhs_qup2,
+ [SLAVE_SDCC_2] = &qhs_sdc2,
+ [SLAVE_SDCC_4] = &qhs_sdc4,
+ [SLAVE_SPSS_CFG] = &qhs_spss_cfg,
+ [SLAVE_TCSR] = &qhs_tcsr,
+ [SLAVE_TLMM] = &qhs_tlmm,
+ [SLAVE_TME_CFG] = &qhs_tme_cfg,
+ [SLAVE_UFS_MEM_CFG] = &qhs_ufs_mem_cfg,
+ [SLAVE_USB3_0] = &qhs_usb3_0,
+ [SLAVE_VENUS_CFG] = &qhs_venus_cfg,
+ [SLAVE_VSENSE_CTRL_CFG] = &qhs_vsense_ctrl_cfg,
+ [SLAVE_A1NOC_CFG] = &qns_a1_noc_cfg,
+ [SLAVE_A2NOC_CFG] = &qns_a2_noc_cfg,
+ [SLAVE_DDRSS_CFG] = &qns_ddrss_cfg,
+ [SLAVE_CNOC_MNOC_CFG] = &qns_mnoc_cfg,
+ [SLAVE_PCIE_ANOC_CFG] = &qns_pcie_anoc_cfg,
+ [SLAVE_SNOC_CFG] = &qns_snoc_cfg,
+ [SLAVE_IMEM] = &qxs_imem,
+ [SLAVE_PIMEM] = &qxs_pimem,
+ [SLAVE_SERVICE_CNOC] = &srvc_cnoc,
+ [SLAVE_PCIE_0] = &xs_pcie_0,
+ [SLAVE_PCIE_1] = &xs_pcie_1,
+ [SLAVE_QDSS_STM] = &xs_qdss_stm,
+ [SLAVE_TCU] = &xs_sys_tcu_cfg,
+};
+
+static struct qcom_icc_desc sm8450_config_noc = {
+ .nodes = config_noc_nodes,
+ .num_nodes = ARRAY_SIZE(config_noc_nodes),
+ .bcms = config_noc_bcms,
+ .num_bcms = ARRAY_SIZE(config_noc_bcms),
+};
+
+static struct qcom_icc_bcm *gem_noc_bcms[] = {
+ &bcm_sh0,
+ &bcm_sh1,
+ &bcm_sh0_disp,
+ &bcm_sh1_disp,
+};
+
+static struct qcom_icc_node *gem_noc_nodes[] = {
+ [MASTER_GPU_TCU] = &alm_gpu_tcu,
+ [MASTER_SYS_TCU] = &alm_sys_tcu,
+ [MASTER_APPSS_PROC] = &chm_apps,
+ [MASTER_GFX3D] = &qnm_gpu,
+ [MASTER_MSS_PROC] = &qnm_mdsp,
+ [MASTER_MNOC_HF_MEM_NOC] = &qnm_mnoc_hf,
+ [MASTER_MNOC_SF_MEM_NOC] = &qnm_mnoc_sf,
+ [MASTER_COMPUTE_NOC] = &qnm_nsp_gemnoc,
+ [MASTER_ANOC_PCIE_GEM_NOC] = &qnm_pcie,
+ [MASTER_SNOC_GC_MEM_NOC] = &qnm_snoc_gc,
+ [MASTER_SNOC_SF_MEM_NOC] = &qnm_snoc_sf,
+ [SLAVE_GEM_NOC_CNOC] = &qns_gem_noc_cnoc,
+ [SLAVE_LLCC] = &qns_llcc,
+ [SLAVE_MEM_NOC_PCIE_SNOC] = &qns_pcie,
+ [MASTER_MNOC_HF_MEM_NOC_DISP] = &qnm_mnoc_hf_disp,
+ [MASTER_MNOC_SF_MEM_NOC_DISP] = &qnm_mnoc_sf_disp,
+ [MASTER_ANOC_PCIE_GEM_NOC_DISP] = &qnm_pcie_disp,
+ [SLAVE_LLCC_DISP] = &qns_llcc_disp,
+};
+
+static struct qcom_icc_desc sm8450_gem_noc = {
+ .nodes = gem_noc_nodes,
+ .num_nodes = ARRAY_SIZE(gem_noc_nodes),
+ .bcms = gem_noc_bcms,
+ .num_bcms = ARRAY_SIZE(gem_noc_bcms),
+};
+
+static struct qcom_icc_bcm *lpass_ag_noc_bcms[] = {
+};
+
+static struct qcom_icc_node *lpass_ag_noc_nodes[] = {
+ [MASTER_CNOC_LPASS_AG_NOC] = &qhm_config_noc,
+ [MASTER_LPASS_PROC] = &qxm_lpass_dsp,
+ [SLAVE_LPASS_CORE_CFG] = &qhs_lpass_core,
+ [SLAVE_LPASS_LPI_CFG] = &qhs_lpass_lpi,
+ [SLAVE_LPASS_MPU_CFG] = &qhs_lpass_mpu,
+ [SLAVE_LPASS_TOP_CFG] = &qhs_lpass_top,
+ [SLAVE_LPASS_SNOC] = &qns_sysnoc,
+ [SLAVE_SERVICES_LPASS_AML_NOC] = &srvc_niu_aml_noc,
+ [SLAVE_SERVICE_LPASS_AG_NOC] = &srvc_niu_lpass_agnoc,
+};
+
+static struct qcom_icc_desc sm8450_lpass_ag_noc = {
+ .nodes = lpass_ag_noc_nodes,
+ .num_nodes = ARRAY_SIZE(lpass_ag_noc_nodes),
+ .bcms = lpass_ag_noc_bcms,
+ .num_bcms = ARRAY_SIZE(lpass_ag_noc_bcms),
+};
+
+static struct qcom_icc_bcm *mc_virt_bcms[] = {
+ &bcm_acv,
+ &bcm_mc0,
+ &bcm_acv_disp,
+ &bcm_mc0_disp,
+};
+
+static struct qcom_icc_node *mc_virt_nodes[] = {
+ [MASTER_LLCC] = &llcc_mc,
+ [SLAVE_EBI1] = &ebi,
+ [MASTER_LLCC_DISP] = &llcc_mc_disp,
+ [SLAVE_EBI1_DISP] = &ebi_disp,
+};
+
+static struct qcom_icc_desc sm8450_mc_virt = {
+ .nodes = mc_virt_nodes,
+ .num_nodes = ARRAY_SIZE(mc_virt_nodes),
+ .bcms = mc_virt_bcms,
+ .num_bcms = ARRAY_SIZE(mc_virt_bcms),
+};
+
+static struct qcom_icc_bcm *mmss_noc_bcms[] = {
+ &bcm_mm0,
+ &bcm_mm1,
+ &bcm_mm0_disp,
+ &bcm_mm1_disp,
+};
+
+static struct qcom_icc_node *mmss_noc_nodes[] = {
+ [MASTER_CAMNOC_HF] = &qnm_camnoc_hf,
+ [MASTER_CAMNOC_ICP] = &qnm_camnoc_icp,
+ [MASTER_CAMNOC_SF] = &qnm_camnoc_sf,
+ [MASTER_MDP] = &qnm_mdp,
+ [MASTER_CNOC_MNOC_CFG] = &qnm_mnoc_cfg,
+ [MASTER_ROTATOR] = &qnm_rot,
+ [MASTER_CDSP_HCP] = &qnm_vapss_hcp,
+ [MASTER_VIDEO] = &qnm_video,
+ [MASTER_VIDEO_CV_PROC] = &qnm_video_cv_cpu,
+ [MASTER_VIDEO_PROC] = &qnm_video_cvp,
+ [MASTER_VIDEO_V_PROC] = &qnm_video_v_cpu,
+ [SLAVE_MNOC_HF_MEM_NOC] = &qns_mem_noc_hf,
+ [SLAVE_MNOC_SF_MEM_NOC] = &qns_mem_noc_sf,
+ [SLAVE_SERVICE_MNOC] = &srvc_mnoc,
+ [MASTER_MDP_DISP] = &qnm_mdp_disp,
+ [MASTER_ROTATOR_DISP] = &qnm_rot_disp,
+ [SLAVE_MNOC_HF_MEM_NOC_DISP] = &qns_mem_noc_hf_disp,
+ [SLAVE_MNOC_SF_MEM_NOC_DISP] = &qns_mem_noc_sf_disp,
+};
+
+static struct qcom_icc_desc sm8450_mmss_noc = {
+ .nodes = mmss_noc_nodes,
+ .num_nodes = ARRAY_SIZE(mmss_noc_nodes),
+ .bcms = mmss_noc_bcms,
+ .num_bcms = ARRAY_SIZE(mmss_noc_bcms),
+};
+
+static struct qcom_icc_bcm *nsp_noc_bcms[] = {
+ &bcm_co0,
+};
+
+static struct qcom_icc_node *nsp_noc_nodes[] = {
+ [MASTER_CDSP_NOC_CFG] = &qhm_nsp_noc_config,
+ [MASTER_CDSP_PROC] = &qxm_nsp,
+ [SLAVE_CDSP_MEM_NOC] = &qns_nsp_gemnoc,
+ [SLAVE_SERVICE_NSP_NOC] = &service_nsp_noc,
+};
+
+static struct qcom_icc_desc sm8450_nsp_noc = {
+ .nodes = nsp_noc_nodes,
+ .num_nodes = ARRAY_SIZE(nsp_noc_nodes),
+ .bcms = nsp_noc_bcms,
+ .num_bcms = ARRAY_SIZE(nsp_noc_bcms),
+};
+
+static struct qcom_icc_bcm *pcie_anoc_bcms[] = {
+ &bcm_sn7,
+};
+
+static struct qcom_icc_node *pcie_anoc_nodes[] = {
+ [MASTER_PCIE_ANOC_CFG] = &qnm_pcie_anoc_cfg,
+ [MASTER_PCIE_0] = &xm_pcie3_0,
+ [MASTER_PCIE_1] = &xm_pcie3_1,
+ [SLAVE_ANOC_PCIE_GEM_NOC] = &qns_pcie_mem_noc,
+ [SLAVE_SERVICE_PCIE_ANOC] = &srvc_pcie_aggre_noc,
+};
+
+static struct qcom_icc_desc sm8450_pcie_anoc = {
+ .nodes = pcie_anoc_nodes,
+ .num_nodes = ARRAY_SIZE(pcie_anoc_nodes),
+ .bcms = pcie_anoc_bcms,
+ .num_bcms = ARRAY_SIZE(pcie_anoc_bcms),
+};
+
+static struct qcom_icc_bcm *system_noc_bcms[] = {
+ &bcm_sn0,
+ &bcm_sn1,
+ &bcm_sn2,
+ &bcm_sn3,
+ &bcm_sn4,
+};
+
+static struct qcom_icc_node *system_noc_nodes[] = {
+ [MASTER_GIC_AHB] = &qhm_gic,
+ [MASTER_A1NOC_SNOC] = &qnm_aggre1_noc,
+ [MASTER_A2NOC_SNOC] = &qnm_aggre2_noc,
+ [MASTER_LPASS_ANOC] = &qnm_lpass_noc,
+ [MASTER_SNOC_CFG] = &qnm_snoc_cfg,
+ [MASTER_PIMEM] = &qxm_pimem,
+ [MASTER_GIC] = &xm_gic,
+ [SLAVE_SNOC_GEM_NOC_GC] = &qns_gemnoc_gc,
+ [SLAVE_SNOC_GEM_NOC_SF] = &qns_gemnoc_sf,
+ [SLAVE_SERVICE_SNOC] = &srvc_snoc,
+};
+
+static struct qcom_icc_desc sm8450_system_noc = {
+ .nodes = system_noc_nodes,
+ .num_nodes = ARRAY_SIZE(system_noc_nodes),
+ .bcms = system_noc_bcms,
+ .num_bcms = ARRAY_SIZE(system_noc_bcms),
+};
+
+static int qnoc_probe(struct platform_device *pdev)
+{
+ const struct qcom_icc_desc *desc;
+ struct icc_onecell_data *data;
+ struct icc_provider *provider;
+ struct qcom_icc_node **qnodes;
+ struct qcom_icc_provider *qp;
+ struct icc_node *node;
+ size_t num_nodes, i;
+ int ret;
+
+ desc = device_get_match_data(&pdev->dev);
+ if (!desc)
+ return -EINVAL;
+
+ qnodes = desc->nodes;
+ num_nodes = desc->num_nodes;
+
+ qp = devm_kzalloc(&pdev->dev, sizeof(*qp), GFP_KERNEL);
+ if (!qp)
+ return -ENOMEM;
+
+ data = devm_kcalloc(&pdev->dev, num_nodes, sizeof(*node), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ provider = &qp->provider;
+ provider->dev = &pdev->dev;
+ provider->set = qcom_icc_set;
+ provider->pre_aggregate = qcom_icc_pre_aggregate;
+ provider->aggregate = qcom_icc_aggregate;
+ provider->xlate_extended = qcom_icc_xlate_extended;
+ INIT_LIST_HEAD(&provider->nodes);
+ provider->data = data;
+
+ qp->dev = &pdev->dev;
+ qp->bcms = desc->bcms;
+ qp->num_bcms = desc->num_bcms;
+
+ qp->voter = of_bcm_voter_get(qp->dev, NULL);
+ if (IS_ERR(qp->voter))
+ return PTR_ERR(qp->voter);
+
+ ret = icc_provider_add(provider);
+ if (ret) {
+ dev_err(&pdev->dev, "error adding interconnect provider\n");
+ return ret;
+ }
+
+ for (i = 0; i < qp->num_bcms; i++)
+ qcom_icc_bcm_init(qp->bcms[i], &pdev->dev);
+
+ for (i = 0; i < num_nodes; i++) {
+ size_t j;
+
+ if (!qnodes[i])
+ continue;
+
+ node = icc_node_create(qnodes[i]->id);
+ if (IS_ERR(node)) {
+ ret = PTR_ERR(node);
+ goto err;
+ }
+
+ node->name = qnodes[i]->name;
+ node->data = qnodes[i];
+ icc_node_add(node, provider);
+
+ for (j = 0; j < qnodes[i]->num_links; j++)
+ icc_link_create(node, qnodes[i]->links[j]);
+
+ data->nodes[i] = node;
+ }
+ data->num_nodes = num_nodes;
+
+ platform_set_drvdata(pdev, qp);
+
+ return 0;
+err:
+ icc_nodes_remove(provider);
+ icc_provider_del(provider);
+ return ret;
+}
+
+static int qnoc_remove(struct platform_device *pdev)
+{
+ struct qcom_icc_provider *qp = platform_get_drvdata(pdev);
+
+ icc_nodes_remove(&qp->provider);
+ return icc_provider_del(&qp->provider);
+}
+
+static const struct of_device_id qnoc_of_match[] = {
+ { .compatible = "qcom,sm8450-aggre1-noc",
+ .data = &sm8450_aggre1_noc},
+ { .compatible = "qcom,sm8450-aggre2-noc",
+ .data = &sm8450_aggre2_noc},
+ { .compatible = "qcom,sm8450-clk-virt",
+ .data = &sm8450_clk_virt},
+ { .compatible = "qcom,sm8450-config-noc",
+ .data = &sm8450_config_noc},
+ { .compatible = "qcom,sm8450-gem-noc",
+ .data = &sm8450_gem_noc},
+ { .compatible = "qcom,sm8450-lpass-ag-noc",
+ .data = &sm8450_lpass_ag_noc},
+ { .compatible = "qcom,sm8450-mc-virt",
+ .data = &sm8450_mc_virt},
+ { .compatible = "qcom,sm8450-mmss-noc",
+ .data = &sm8450_mmss_noc},
+ { .compatible = "qcom,sm8450-nsp-noc",
+ .data = &sm8450_nsp_noc},
+ { .compatible = "qcom,sm8450-pcie-anoc",
+ .data = &sm8450_pcie_anoc},
+ { .compatible = "qcom,sm8450-system-noc",
+ .data = &sm8450_system_noc},
+ { }
+};
+MODULE_DEVICE_TABLE(of, qnoc_of_match);
+
+static struct platform_driver qnoc_driver = {
+ .probe = qnoc_probe,
+ .remove = qnoc_remove,
+ .driver = {
+ .name = "qnoc-sm8450",
+ .of_match_table = qnoc_of_match,
+ },
+};
+
+static int __init qnoc_driver_init(void)
+{
+ return platform_driver_register(&qnoc_driver);
+}
+core_initcall(qnoc_driver_init);
+
+static void __exit qnoc_driver_exit(void)
+{
+ platform_driver_unregister(&qnoc_driver);
+}
+module_exit(qnoc_driver_exit);
+
+MODULE_DESCRIPTION("sm8450 NoC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/interconnect/qcom/sm8450.h b/drivers/interconnect/qcom/sm8450.h
new file mode 100644
index 000000000000..a5790ec6767b
--- /dev/null
+++ b/drivers/interconnect/qcom/sm8450.h
@@ -0,0 +1,169 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * SM8450 interconnect IDs
+ *
+ * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021, Linaro Limited
+ */
+
+#ifndef __DRIVERS_INTERCONNECT_QCOM_SM8450_H
+#define __DRIVERS_INTERCONNECT_QCOM_SM8450_H
+
+#define SM8450_MASTER_GPU_TCU 0
+#define SM8450_MASTER_SYS_TCU 1
+#define SM8450_MASTER_APPSS_PROC 2
+#define SM8450_MASTER_LLCC 3
+#define SM8450_MASTER_CNOC_LPASS_AG_NOC 4
+#define SM8450_MASTER_GIC_AHB 5
+#define SM8450_MASTER_CDSP_NOC_CFG 6
+#define SM8450_MASTER_QDSS_BAM 7
+#define SM8450_MASTER_QSPI_0 8
+#define SM8450_MASTER_QUP_0 9
+#define SM8450_MASTER_QUP_1 10
+#define SM8450_MASTER_QUP_2 11
+#define SM8450_MASTER_A1NOC_CFG 12
+#define SM8450_MASTER_A2NOC_CFG 13
+#define SM8450_MASTER_A1NOC_SNOC 14
+#define SM8450_MASTER_A2NOC_SNOC 15
+#define SM8450_MASTER_CAMNOC_HF 16
+#define SM8450_MASTER_CAMNOC_ICP 17
+#define SM8450_MASTER_CAMNOC_SF 18
+#define SM8450_MASTER_GEM_NOC_CNOC 19
+#define SM8450_MASTER_GEM_NOC_PCIE_SNOC 20
+#define SM8450_MASTER_GFX3D 21
+#define SM8450_MASTER_LPASS_ANOC 22
+#define SM8450_MASTER_MDP 23
+#define SM8450_MASTER_MDP0 SM8450_MASTER_MDP
+#define SM8450_MASTER_MDP1 SM8450_MASTER_MDP
+#define SM8450_MASTER_MSS_PROC 24
+#define SM8450_MASTER_CNOC_MNOC_CFG 25
+#define SM8450_MASTER_MNOC_HF_MEM_NOC 26
+#define SM8450_MASTER_MNOC_SF_MEM_NOC 27
+#define SM8450_MASTER_COMPUTE_NOC 28
+#define SM8450_MASTER_ANOC_PCIE_GEM_NOC 29
+#define SM8450_MASTER_PCIE_ANOC_CFG 30
+#define SM8450_MASTER_ROTATOR 31
+#define SM8450_MASTER_SNOC_CFG 32
+#define SM8450_MASTER_SNOC_GC_MEM_NOC 33
+#define SM8450_MASTER_SNOC_SF_MEM_NOC 34
+#define SM8450_MASTER_CDSP_HCP 35
+#define SM8450_MASTER_VIDEO 36
+#define SM8450_MASTER_VIDEO_P0 SM8450_MASTER_VIDEO
+#define SM8450_MASTER_VIDEO_P1 SM8450_MASTER_VIDEO
+#define SM8450_MASTER_VIDEO_CV_PROC 37
+#define SM8450_MASTER_VIDEO_PROC 38
+#define SM8450_MASTER_VIDEO_V_PROC 39
+#define SM8450_MASTER_QUP_CORE_0 40
+#define SM8450_MASTER_QUP_CORE_1 41
+#define SM8450_MASTER_QUP_CORE_2 42
+#define SM8450_MASTER_CRYPTO 43
+#define SM8450_MASTER_IPA 44
+#define SM8450_MASTER_LPASS_PROC 45
+#define SM8450_MASTER_CDSP_PROC 46
+#define SM8450_MASTER_PIMEM 47
+#define SM8450_MASTER_SENSORS_PROC 48
+#define SM8450_MASTER_SP 49
+#define SM8450_MASTER_GIC 50
+#define SM8450_MASTER_PCIE_0 51
+#define SM8450_MASTER_PCIE_1 52
+#define SM8450_MASTER_QDSS_ETR 53
+#define SM8450_MASTER_QDSS_ETR_1 54
+#define SM8450_MASTER_SDCC_2 55
+#define SM8450_MASTER_SDCC_4 56
+#define SM8450_MASTER_UFS_MEM 57
+#define SM8450_MASTER_USB3_0 58
+#define SM8450_SLAVE_EBI1 512
+#define SM8450_SLAVE_AHB2PHY_SOUTH 513
+#define SM8450_SLAVE_AHB2PHY_NORTH 514
+#define SM8450_SLAVE_AOSS 515
+#define SM8450_SLAVE_CAMERA_CFG 516
+#define SM8450_SLAVE_CLK_CTL 517
+#define SM8450_SLAVE_CDSP_CFG 518
+#define SM8450_SLAVE_RBCPR_CX_CFG 519
+#define SM8450_SLAVE_RBCPR_MMCX_CFG 520
+#define SM8450_SLAVE_RBCPR_MXA_CFG 521
+#define SM8450_SLAVE_RBCPR_MXC_CFG 522
+#define SM8450_SLAVE_CRYPTO_0_CFG 523
+#define SM8450_SLAVE_CX_RDPM 524
+#define SM8450_SLAVE_DISPLAY_CFG 525
+#define SM8450_SLAVE_GFX3D_CFG 526
+#define SM8450_SLAVE_IMEM_CFG 527
+#define SM8450_SLAVE_IPA_CFG 528
+#define SM8450_SLAVE_IPC_ROUTER_CFG 529
+#define SM8450_SLAVE_LPASS 530
+#define SM8450_SLAVE_LPASS_CORE_CFG 531
+#define SM8450_SLAVE_LPASS_LPI_CFG 532
+#define SM8450_SLAVE_LPASS_MPU_CFG 533
+#define SM8450_SLAVE_LPASS_TOP_CFG 534
+#define SM8450_SLAVE_CNOC_MSS 535
+#define SM8450_SLAVE_MX_RDPM 536
+#define SM8450_SLAVE_PCIE_0_CFG 537
+#define SM8450_SLAVE_PCIE_1_CFG 538
+#define SM8450_SLAVE_PDM 539
+#define SM8450_SLAVE_PIMEM_CFG 540
+#define SM8450_SLAVE_PRNG 541
+#define SM8450_SLAVE_QDSS_CFG 542
+#define SM8450_SLAVE_QSPI_0 543
+#define SM8450_SLAVE_QUP_0 544
+#define SM8450_SLAVE_QUP_1 545
+#define SM8450_SLAVE_QUP_2 546
+#define SM8450_SLAVE_SDCC_2 547
+#define SM8450_SLAVE_SDCC_4 548
+#define SM8450_SLAVE_SPSS_CFG 549
+#define SM8450_SLAVE_TCSR 550
+#define SM8450_SLAVE_TLMM 551
+#define SM8450_SLAVE_TME_CFG 552
+#define SM8450_SLAVE_UFS_MEM_CFG 553
+#define SM8450_SLAVE_USB3_0 554
+#define SM8450_SLAVE_VENUS_CFG 555
+#define SM8450_SLAVE_VSENSE_CTRL_CFG 556
+#define SM8450_SLAVE_A1NOC_CFG 557
+#define SM8450_SLAVE_A1NOC_SNOC 558
+#define SM8450_SLAVE_A2NOC_CFG 559
+#define SM8450_SLAVE_A2NOC_SNOC 560
+#define SM8450_SLAVE_DDRSS_CFG 561
+#define SM8450_SLAVE_GEM_NOC_CNOC 562
+#define SM8450_SLAVE_SNOC_GEM_NOC_GC 563
+#define SM8450_SLAVE_SNOC_GEM_NOC_SF 564
+#define SM8450_SLAVE_LLCC 565
+#define SM8450_SLAVE_MNOC_HF_MEM_NOC 566
+#define SM8450_SLAVE_MNOC_SF_MEM_NOC 567
+#define SM8450_SLAVE_CNOC_MNOC_CFG 568
+#define SM8450_SLAVE_CDSP_MEM_NOC 569
+#define SM8450_SLAVE_MEM_NOC_PCIE_SNOC 570
+#define SM8450_SLAVE_PCIE_ANOC_CFG 571
+#define SM8450_SLAVE_ANOC_PCIE_GEM_NOC 572
+#define SM8450_SLAVE_SNOC_CFG 573
+#define SM8450_SLAVE_LPASS_SNOC 574
+#define SM8450_SLAVE_QUP_CORE_0 575
+#define SM8450_SLAVE_QUP_CORE_1 576
+#define SM8450_SLAVE_QUP_CORE_2 577
+#define SM8450_SLAVE_IMEM 578
+#define SM8450_SLAVE_PIMEM 579
+#define SM8450_SLAVE_SERVICE_NSP_NOC 580
+#define SM8450_SLAVE_SERVICE_A1NOC 581
+#define SM8450_SLAVE_SERVICE_A2NOC 582
+#define SM8450_SLAVE_SERVICE_CNOC 583
+#define SM8450_SLAVE_SERVICE_MNOC 584
+#define SM8450_SLAVE_SERVICES_LPASS_AML_NOC 585
+#define SM8450_SLAVE_SERVICE_LPASS_AG_NOC 586
+#define SM8450_SLAVE_SERVICE_PCIE_ANOC 587
+#define SM8450_SLAVE_SERVICE_SNOC 588
+#define SM8450_SLAVE_PCIE_0 589
+#define SM8450_SLAVE_PCIE_1 590
+#define SM8450_SLAVE_QDSS_STM 591
+#define SM8450_SLAVE_TCU 592
+#define SM8450_MASTER_LLCC_DISP 1000
+#define SM8450_MASTER_MDP_DISP 1001
+#define SM8450_MASTER_MDP0_DISP SM8450_MASTER_MDP_DISP
+#define SM8450_MASTER_MDP1_DISP SM8450_MASTER_MDP_DISP
+#define SM8450_MASTER_MNOC_HF_MEM_NOC_DISP 1002
+#define SM8450_MASTER_MNOC_SF_MEM_NOC_DISP 1003
+#define SM8450_MASTER_ANOC_PCIE_GEM_NOC_DISP 1004
+#define SM8450_MASTER_ROTATOR_DISP 1005
+#define SM8450_SLAVE_EBI1_DISP 1512
+#define SM8450_SLAVE_LLCC_DISP 1513
+#define SM8450_SLAVE_MNOC_HF_MEM_NOC_DISP 1514
+#define SM8450_SLAVE_MNOC_SF_MEM_NOC_DISP 1515
+
+#endif
diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h
index 867535eb0ce9..ffc89c4fb120 100644
--- a/drivers/iommu/amd/amd_iommu_types.h
+++ b/drivers/iommu/amd/amd_iommu_types.h
@@ -645,8 +645,6 @@ struct amd_iommu {
/* DebugFS Info */
struct dentry *debugfs;
#endif
- /* IRQ notifier for IntCapXT interrupt */
- struct irq_affinity_notify intcapxt_notify;
};
static inline struct amd_iommu *dev_to_amd_iommu(struct device *dev)
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index 1eacd43cb436..b10fb52ea442 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -21,6 +21,7 @@
#include <linux/export.h>
#include <linux/kmemleak.h>
#include <linux/cc_platform.h>
+#include <linux/iopoll.h>
#include <asm/pci-direct.h>
#include <asm/iommu.h>
#include <asm/apic.h>
@@ -806,16 +807,27 @@ static int iommu_ga_log_enable(struct amd_iommu *iommu)
{
#ifdef CONFIG_IRQ_REMAP
u32 status, i;
+ u64 entry;
if (!iommu->ga_log)
return -EINVAL;
- status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
-
/* Check if already running */
- if (status & (MMIO_STATUS_GALOG_RUN_MASK))
+ status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
+ if (WARN_ON(status & (MMIO_STATUS_GALOG_RUN_MASK)))
return 0;
+ entry = iommu_virt_to_phys(iommu->ga_log) | GA_LOG_SIZE_512;
+ memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_BASE_OFFSET,
+ &entry, sizeof(entry));
+ entry = (iommu_virt_to_phys(iommu->ga_log_tail) &
+ (BIT_ULL(52)-1)) & ~7ULL;
+ memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_TAIL_OFFSET,
+ &entry, sizeof(entry));
+ writel(0x00, iommu->mmio_base + MMIO_GA_HEAD_OFFSET);
+ writel(0x00, iommu->mmio_base + MMIO_GA_TAIL_OFFSET);
+
+
iommu_feature_enable(iommu, CONTROL_GAINT_EN);
iommu_feature_enable(iommu, CONTROL_GALOG_EN);
@@ -823,9 +835,10 @@ static int iommu_ga_log_enable(struct amd_iommu *iommu)
status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
if (status & (MMIO_STATUS_GALOG_RUN_MASK))
break;
+ udelay(10);
}
- if (i >= LOOP_TIMEOUT)
+ if (WARN_ON(i >= LOOP_TIMEOUT))
return -EINVAL;
#endif /* CONFIG_IRQ_REMAP */
return 0;
@@ -834,8 +847,6 @@ static int iommu_ga_log_enable(struct amd_iommu *iommu)
static int iommu_init_ga_log(struct amd_iommu *iommu)
{
#ifdef CONFIG_IRQ_REMAP
- u64 entry;
-
if (!AMD_IOMMU_GUEST_IR_VAPIC(amd_iommu_guest_ir))
return 0;
@@ -849,16 +860,6 @@ static int iommu_init_ga_log(struct amd_iommu *iommu)
if (!iommu->ga_log_tail)
goto err_out;
- entry = iommu_virt_to_phys(iommu->ga_log) | GA_LOG_SIZE_512;
- memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_BASE_OFFSET,
- &entry, sizeof(entry));
- entry = (iommu_virt_to_phys(iommu->ga_log_tail) &
- (BIT_ULL(52)-1)) & ~7ULL;
- memcpy_toio(iommu->mmio_base + MMIO_GA_LOG_TAIL_OFFSET,
- &entry, sizeof(entry));
- writel(0x00, iommu->mmio_base + MMIO_GA_HEAD_OFFSET);
- writel(0x00, iommu->mmio_base + MMIO_GA_TAIL_OFFSET);
-
return 0;
err_out:
free_ga_log(iommu);
@@ -1523,7 +1524,7 @@ static void amd_iommu_ats_write_check_workaround(struct amd_iommu *iommu)
}
/*
- * This function clues the initialization function for one IOMMU
+ * This function glues the initialization function for one IOMMU
* together and also allocates the command buffer and programs the
* hardware. It does NOT enable the IOMMU. This is done afterwards.
*/
@@ -2016,48 +2017,18 @@ union intcapxt {
};
} __attribute__ ((packed));
-/*
- * There isn't really any need to mask/unmask at the irqchip level because
- * the 64-bit INTCAPXT registers can be updated atomically without tearing
- * when the affinity is being updated.
- */
-static void intcapxt_unmask_irq(struct irq_data *data)
-{
-}
-
-static void intcapxt_mask_irq(struct irq_data *data)
-{
-}
static struct irq_chip intcapxt_controller;
static int intcapxt_irqdomain_activate(struct irq_domain *domain,
struct irq_data *irqd, bool reserve)
{
- struct amd_iommu *iommu = irqd->chip_data;
- struct irq_cfg *cfg = irqd_cfg(irqd);
- union intcapxt xt;
-
- xt.capxt = 0ULL;
- xt.dest_mode_logical = apic->dest_mode_logical;
- xt.vector = cfg->vector;
- xt.destid_0_23 = cfg->dest_apicid & GENMASK(23, 0);
- xt.destid_24_31 = cfg->dest_apicid >> 24;
-
- /**
- * Current IOMMU implemtation uses the same IRQ for all
- * 3 IOMMU interrupts.
- */
- writeq(xt.capxt, iommu->mmio_base + MMIO_INTCAPXT_EVT_OFFSET);
- writeq(xt.capxt, iommu->mmio_base + MMIO_INTCAPXT_PPR_OFFSET);
- writeq(xt.capxt, iommu->mmio_base + MMIO_INTCAPXT_GALOG_OFFSET);
return 0;
}
static void intcapxt_irqdomain_deactivate(struct irq_domain *domain,
struct irq_data *irqd)
{
- intcapxt_mask_irq(irqd);
}
@@ -2091,6 +2062,38 @@ static void intcapxt_irqdomain_free(struct irq_domain *domain, unsigned int virq
irq_domain_free_irqs_top(domain, virq, nr_irqs);
}
+
+static void intcapxt_unmask_irq(struct irq_data *irqd)
+{
+ struct amd_iommu *iommu = irqd->chip_data;
+ struct irq_cfg *cfg = irqd_cfg(irqd);
+ union intcapxt xt;
+
+ xt.capxt = 0ULL;
+ xt.dest_mode_logical = apic->dest_mode_logical;
+ xt.vector = cfg->vector;
+ xt.destid_0_23 = cfg->dest_apicid & GENMASK(23, 0);
+ xt.destid_24_31 = cfg->dest_apicid >> 24;
+
+ /**
+ * Current IOMMU implementation uses the same IRQ for all
+ * 3 IOMMU interrupts.
+ */
+ writeq(xt.capxt, iommu->mmio_base + MMIO_INTCAPXT_EVT_OFFSET);
+ writeq(xt.capxt, iommu->mmio_base + MMIO_INTCAPXT_PPR_OFFSET);
+ writeq(xt.capxt, iommu->mmio_base + MMIO_INTCAPXT_GALOG_OFFSET);
+}
+
+static void intcapxt_mask_irq(struct irq_data *irqd)
+{
+ struct amd_iommu *iommu = irqd->chip_data;
+
+ writeq(0, iommu->mmio_base + MMIO_INTCAPXT_EVT_OFFSET);
+ writeq(0, iommu->mmio_base + MMIO_INTCAPXT_PPR_OFFSET);
+ writeq(0, iommu->mmio_base + MMIO_INTCAPXT_GALOG_OFFSET);
+}
+
+
static int intcapxt_set_affinity(struct irq_data *irqd,
const struct cpumask *mask, bool force)
{
@@ -2100,8 +2103,12 @@ static int intcapxt_set_affinity(struct irq_data *irqd,
ret = parent->chip->irq_set_affinity(parent, mask, force);
if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE)
return ret;
+ return 0;
+}
- return intcapxt_irqdomain_activate(irqd->domain, irqd, false);
+static int intcapxt_set_wake(struct irq_data *irqd, unsigned int on)
+{
+ return on ? -EOPNOTSUPP : 0;
}
static struct irq_chip intcapxt_controller = {
@@ -2111,7 +2118,8 @@ static struct irq_chip intcapxt_controller = {
.irq_ack = irq_chip_ack_parent,
.irq_retrigger = irq_chip_retrigger_hierarchy,
.irq_set_affinity = intcapxt_set_affinity,
- .flags = IRQCHIP_SKIP_SET_WAKE,
+ .irq_set_wake = intcapxt_set_wake,
+ .flags = IRQCHIP_MASK_ON_SUSPEND,
};
static const struct irq_domain_ops intcapxt_domain_ops = {
@@ -2173,7 +2181,6 @@ static int iommu_setup_intcapxt(struct amd_iommu *iommu)
return ret;
}
- iommu_feature_enable(iommu, CONTROL_INTCAPXT_EN);
return 0;
}
@@ -2196,6 +2203,10 @@ static int iommu_init_irq(struct amd_iommu *iommu)
iommu->int_enabled = true;
enable_faults:
+
+ if (amd_iommu_xt_mode == IRQ_REMAP_X2APIC_MODE)
+ iommu_feature_enable(iommu, CONTROL_INTCAPXT_EN);
+
iommu_feature_enable(iommu, CONTROL_EVT_INT_EN);
if (iommu->ppr_log != NULL)
diff --git a/drivers/iommu/amd/io_pgtable.c b/drivers/iommu/amd/io_pgtable.c
index 182c93a43efd..b1bf4125b0f7 100644
--- a/drivers/iommu/amd/io_pgtable.c
+++ b/drivers/iommu/amd/io_pgtable.c
@@ -74,87 +74,61 @@ static u64 *first_pte_l7(u64 *pte, unsigned long *page_size,
*
****************************************************************************/
-static void free_page_list(struct page *freelist)
+static void free_pt_page(u64 *pt, struct list_head *freelist)
{
- while (freelist != NULL) {
- unsigned long p = (unsigned long)page_address(freelist);
+ struct page *p = virt_to_page(pt);
- freelist = freelist->freelist;
- free_page(p);
- }
+ list_add_tail(&p->lru, freelist);
}
-static struct page *free_pt_page(unsigned long pt, struct page *freelist)
+static void free_pt_lvl(u64 *pt, struct list_head *freelist, int lvl)
{
- struct page *p = virt_to_page((void *)pt);
+ u64 *p;
+ int i;
- p->freelist = freelist;
+ for (i = 0; i < 512; ++i) {
+ /* PTE present? */
+ if (!IOMMU_PTE_PRESENT(pt[i]))
+ continue;
- return p;
-}
+ /* Large PTE? */
+ if (PM_PTE_LEVEL(pt[i]) == 0 ||
+ PM_PTE_LEVEL(pt[i]) == 7)
+ continue;
-#define DEFINE_FREE_PT_FN(LVL, FN) \
-static struct page *free_pt_##LVL (unsigned long __pt, struct page *freelist) \
-{ \
- unsigned long p; \
- u64 *pt; \
- int i; \
- \
- pt = (u64 *)__pt; \
- \
- for (i = 0; i < 512; ++i) { \
- /* PTE present? */ \
- if (!IOMMU_PTE_PRESENT(pt[i])) \
- continue; \
- \
- /* Large PTE? */ \
- if (PM_PTE_LEVEL(pt[i]) == 0 || \
- PM_PTE_LEVEL(pt[i]) == 7) \
- continue; \
- \
- p = (unsigned long)IOMMU_PTE_PAGE(pt[i]); \
- freelist = FN(p, freelist); \
- } \
- \
- return free_pt_page((unsigned long)pt, freelist); \
-}
+ /*
+ * Free the next level. No need to look at l1 tables here since
+ * they can only contain leaf PTEs; just free them directly.
+ */
+ p = IOMMU_PTE_PAGE(pt[i]);
+ if (lvl > 2)
+ free_pt_lvl(p, freelist, lvl - 1);
+ else
+ free_pt_page(p, freelist);
+ }
-DEFINE_FREE_PT_FN(l2, free_pt_page)
-DEFINE_FREE_PT_FN(l3, free_pt_l2)
-DEFINE_FREE_PT_FN(l4, free_pt_l3)
-DEFINE_FREE_PT_FN(l5, free_pt_l4)
-DEFINE_FREE_PT_FN(l6, free_pt_l5)
+ free_pt_page(pt, freelist);
+}
-static struct page *free_sub_pt(unsigned long root, int mode,
- struct page *freelist)
+static void free_sub_pt(u64 *root, int mode, struct list_head *freelist)
{
switch (mode) {
case PAGE_MODE_NONE:
case PAGE_MODE_7_LEVEL:
break;
case PAGE_MODE_1_LEVEL:
- freelist = free_pt_page(root, freelist);
+ free_pt_page(root, freelist);
break;
case PAGE_MODE_2_LEVEL:
- freelist = free_pt_l2(root, freelist);
- break;
case PAGE_MODE_3_LEVEL:
- freelist = free_pt_l3(root, freelist);
- break;
case PAGE_MODE_4_LEVEL:
- freelist = free_pt_l4(root, freelist);
- break;
case PAGE_MODE_5_LEVEL:
- freelist = free_pt_l5(root, freelist);
- break;
case PAGE_MODE_6_LEVEL:
- freelist = free_pt_l6(root, freelist);
+ free_pt_lvl(root, freelist, mode);
break;
default:
BUG();
}
-
- return freelist;
}
void amd_iommu_domain_set_pgtable(struct protection_domain *domain,
@@ -362,9 +336,9 @@ static u64 *fetch_pte(struct amd_io_pgtable *pgtable,
return pte;
}
-static struct page *free_clear_pte(u64 *pte, u64 pteval, struct page *freelist)
+static void free_clear_pte(u64 *pte, u64 pteval, struct list_head *freelist)
{
- unsigned long pt;
+ u64 *pt;
int mode;
while (cmpxchg64(pte, pteval, 0) != pteval) {
@@ -373,12 +347,12 @@ static struct page *free_clear_pte(u64 *pte, u64 pteval, struct page *freelist)
}
if (!IOMMU_PTE_PRESENT(pteval))
- return freelist;
+ return;
- pt = (unsigned long)IOMMU_PTE_PAGE(pteval);
+ pt = IOMMU_PTE_PAGE(pteval);
mode = IOMMU_PTE_MODE(pteval);
- return free_sub_pt(pt, mode, freelist);
+ free_sub_pt(pt, mode, freelist);
}
/*
@@ -392,7 +366,7 @@ static int iommu_v1_map_page(struct io_pgtable_ops *ops, unsigned long iova,
phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{
struct protection_domain *dom = io_pgtable_ops_to_domain(ops);
- struct page *freelist = NULL;
+ LIST_HEAD(freelist);
bool updated = false;
u64 __pte, *pte;
int ret, i, count;
@@ -412,9 +386,9 @@ static int iommu_v1_map_page(struct io_pgtable_ops *ops, unsigned long iova,
goto out;
for (i = 0; i < count; ++i)
- freelist = free_clear_pte(&pte[i], pte[i], freelist);
+ free_clear_pte(&pte[i], pte[i], &freelist);
- if (freelist != NULL)
+ if (!list_empty(&freelist))
updated = true;
if (count > 1) {
@@ -449,7 +423,7 @@ out:
}
/* Everything flushed out, free pages now */
- free_page_list(freelist);
+ put_pages_list(&freelist);
return ret;
}
@@ -511,8 +485,7 @@ static void v1_free_pgtable(struct io_pgtable *iop)
{
struct amd_io_pgtable *pgtable = container_of(iop, struct amd_io_pgtable, iop);
struct protection_domain *dom;
- struct page *freelist = NULL;
- unsigned long root;
+ LIST_HEAD(freelist);
if (pgtable->mode == PAGE_MODE_NONE)
return;
@@ -529,10 +502,9 @@ static void v1_free_pgtable(struct io_pgtable *iop)
BUG_ON(pgtable->mode < PAGE_MODE_NONE ||
pgtable->mode > PAGE_MODE_6_LEVEL);
- root = (unsigned long)pgtable->root;
- freelist = free_sub_pt(root, pgtable->mode, freelist);
+ free_sub_pt(pgtable->root, pgtable->mode, &freelist);
- free_page_list(freelist);
+ put_pages_list(&freelist);
}
static struct io_pgtable *v1_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
index ee66d1f4cb81..a737ba5f727e 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c
@@ -220,7 +220,7 @@ static void arm_smmu_mmu_notifier_free(struct mmu_notifier *mn)
kfree(mn_to_smmu(mn));
}
-static struct mmu_notifier_ops arm_smmu_mmu_notifier_ops = {
+static const struct mmu_notifier_ops arm_smmu_mmu_notifier_ops = {
.invalidate_range = arm_smmu_mm_invalidate_range,
.release = arm_smmu_mm_release,
.free_notifier = arm_smmu_mmu_notifier_free,
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index f5848b351b19..6dc6d8b6b368 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -3142,7 +3142,7 @@ static void arm_smmu_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
phys_addr_t doorbell;
struct device *dev = msi_desc_to_dev(desc);
struct arm_smmu_device *smmu = dev_get_drvdata(dev);
- phys_addr_t *cfg = arm_smmu_msi_cfg[desc->platform.msi_index];
+ phys_addr_t *cfg = arm_smmu_msi_cfg[desc->msi_index];
doorbell = (((u64)msg->address_hi) << 32) | msg->address_lo;
doorbell &= MSI_CFG0_ADDR_MASK;
@@ -3154,7 +3154,6 @@ static void arm_smmu_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
static void arm_smmu_setup_msis(struct arm_smmu_device *smmu)
{
- struct msi_desc *desc;
int ret, nvec = ARM_SMMU_MAX_MSIS;
struct device *dev = smmu->dev;
@@ -3170,7 +3169,7 @@ static void arm_smmu_setup_msis(struct arm_smmu_device *smmu)
if (!(smmu->features & ARM_SMMU_FEAT_MSI))
return;
- if (!dev->msi_domain) {
+ if (!dev->msi.domain) {
dev_info(smmu->dev, "msi_domain absent - falling back to wired irqs\n");
return;
}
@@ -3182,21 +3181,9 @@ static void arm_smmu_setup_msis(struct arm_smmu_device *smmu)
return;
}
- for_each_msi_entry(desc, dev) {
- switch (desc->platform.msi_index) {
- case EVTQ_MSI_INDEX:
- smmu->evtq.q.irq = desc->irq;
- break;
- case GERROR_MSI_INDEX:
- smmu->gerr_irq = desc->irq;
- break;
- case PRIQ_MSI_INDEX:
- smmu->priq.q.irq = desc->irq;
- break;
- default: /* Unknown */
- continue;
- }
- }
+ smmu->evtq.q.irq = msi_get_virq(dev, EVTQ_MSI_INDEX);
+ smmu->gerr_irq = msi_get_virq(dev, GERROR_MSI_INDEX);
+ smmu->priq.q.irq = msi_get_virq(dev, PRIQ_MSI_INDEX);
/* Add callback to free MSIs on teardown */
devm_add_action(dev, arm_smmu_free_msis, dev);
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
index 4cb136f07914..cd48590ada30 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
@@ -184,7 +184,6 @@
#else
#define Q_MAX_SZ_SHIFT (PAGE_SHIFT + MAX_ORDER - 1)
#endif
-#define Q_MIN_SZ_SHIFT (PAGE_SHIFT)
/*
* Stream table.
@@ -374,7 +373,7 @@
/* Event queue */
#define EVTQ_ENT_SZ_SHIFT 5
#define EVTQ_ENT_DWORDS ((1 << EVTQ_ENT_SZ_SHIFT) >> 3)
-#define EVTQ_MAX_SZ_SHIFT (Q_MIN_SZ_SHIFT - EVTQ_ENT_SZ_SHIFT)
+#define EVTQ_MAX_SZ_SHIFT (Q_MAX_SZ_SHIFT - EVTQ_ENT_SZ_SHIFT)
#define EVTQ_0_ID GENMASK_ULL(7, 0)
@@ -400,7 +399,7 @@
/* PRI queue */
#define PRIQ_ENT_SZ_SHIFT 4
#define PRIQ_ENT_DWORDS ((1 << PRIQ_ENT_SZ_SHIFT) >> 3)
-#define PRIQ_MAX_SZ_SHIFT (Q_MIN_SZ_SHIFT - PRIQ_ENT_SZ_SHIFT)
+#define PRIQ_MAX_SZ_SHIFT (Q_MAX_SZ_SHIFT - PRIQ_ENT_SZ_SHIFT)
#define PRIQ_0_SID GENMASK_ULL(31, 0)
#define PRIQ_0_SSID GENMASK_ULL(51, 32)
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
index ca736b065dd0..ba6298c7140e 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
@@ -51,7 +51,7 @@ static void qcom_adreno_smmu_get_fault_info(const void *cookie,
info->fsynr1 = arm_smmu_cb_read(smmu, cfg->cbndx, ARM_SMMU_CB_FSYNR1);
info->far = arm_smmu_cb_readq(smmu, cfg->cbndx, ARM_SMMU_CB_FAR);
info->cbfrsynra = arm_smmu_gr1_read(smmu, ARM_SMMU_GR1_CBFRSYNRA(cfg->cbndx));
- info->ttbr0 = arm_smmu_cb_read(smmu, cfg->cbndx, ARM_SMMU_CB_TTBR0);
+ info->ttbr0 = arm_smmu_cb_readq(smmu, cfg->cbndx, ARM_SMMU_CB_TTBR0);
info->contextidr = arm_smmu_cb_read(smmu, cfg->cbndx, ARM_SMMU_CB_CONTEXTIDR);
}
@@ -415,6 +415,7 @@ static const struct of_device_id __maybe_unused qcom_smmu_impl_of_match[] = {
{ .compatible = "qcom,sm8150-smmu-500" },
{ .compatible = "qcom,sm8250-smmu-500" },
{ .compatible = "qcom,sm8350-smmu-500" },
+ { .compatible = "qcom,sm8450-smmu-500" },
{ }
};
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index b42e38a0dbe2..d85d54f2b549 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -9,9 +9,12 @@
*/
#include <linux/acpi_iort.h>
+#include <linux/atomic.h>
+#include <linux/crash_dump.h>
#include <linux/device.h>
-#include <linux/dma-map-ops.h>
+#include <linux/dma-direct.h>
#include <linux/dma-iommu.h>
+#include <linux/dma-map-ops.h>
#include <linux/gfp.h>
#include <linux/huge_mm.h>
#include <linux/iommu.h>
@@ -20,11 +23,10 @@
#include <linux/mm.h>
#include <linux/mutex.h>
#include <linux/pci.h>
-#include <linux/swiotlb.h>
#include <linux/scatterlist.h>
+#include <linux/spinlock.h>
+#include <linux/swiotlb.h>
#include <linux/vmalloc.h>
-#include <linux/crash_dump.h>
-#include <linux/dma-direct.h>
struct iommu_dma_msi_page {
struct list_head list;
@@ -41,7 +43,19 @@ struct iommu_dma_cookie {
enum iommu_dma_cookie_type type;
union {
/* Full allocator for IOMMU_DMA_IOVA_COOKIE */
- struct iova_domain iovad;
+ struct {
+ struct iova_domain iovad;
+
+ struct iova_fq __percpu *fq; /* Flush queue */
+ /* Number of TLB flushes that have been started */
+ atomic64_t fq_flush_start_cnt;
+ /* Number of TLB flushes that have been finished */
+ atomic64_t fq_flush_finish_cnt;
+ /* Timer to regularily empty the flush queues */
+ struct timer_list fq_timer;
+ /* 1 when timer is active, 0 when not */
+ atomic_t fq_timer_on;
+ };
/* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */
dma_addr_t msi_iova;
};
@@ -64,16 +78,203 @@ static int __init iommu_dma_forcedac_setup(char *str)
}
early_param("iommu.forcedac", iommu_dma_forcedac_setup);
-static void iommu_dma_entry_dtor(unsigned long data)
+/* Number of entries per flush queue */
+#define IOVA_FQ_SIZE 256
+
+/* Timeout (in ms) after which entries are flushed from the queue */
+#define IOVA_FQ_TIMEOUT 10
+
+/* Flush queue entry for deferred flushing */
+struct iova_fq_entry {
+ unsigned long iova_pfn;
+ unsigned long pages;
+ struct list_head freelist;
+ u64 counter; /* Flush counter when this entry was added */
+};
+
+/* Per-CPU flush queue structure */
+struct iova_fq {
+ struct iova_fq_entry entries[IOVA_FQ_SIZE];
+ unsigned int head, tail;
+ spinlock_t lock;
+};
+
+#define fq_ring_for_each(i, fq) \
+ for ((i) = (fq)->head; (i) != (fq)->tail; (i) = ((i) + 1) % IOVA_FQ_SIZE)
+
+static inline bool fq_full(struct iova_fq *fq)
+{
+ assert_spin_locked(&fq->lock);
+ return (((fq->tail + 1) % IOVA_FQ_SIZE) == fq->head);
+}
+
+static inline unsigned int fq_ring_add(struct iova_fq *fq)
+{
+ unsigned int idx = fq->tail;
+
+ assert_spin_locked(&fq->lock);
+
+ fq->tail = (idx + 1) % IOVA_FQ_SIZE;
+
+ return idx;
+}
+
+static void fq_ring_free(struct iommu_dma_cookie *cookie, struct iova_fq *fq)
+{
+ u64 counter = atomic64_read(&cookie->fq_flush_finish_cnt);
+ unsigned int idx;
+
+ assert_spin_locked(&fq->lock);
+
+ fq_ring_for_each(idx, fq) {
+
+ if (fq->entries[idx].counter >= counter)
+ break;
+
+ put_pages_list(&fq->entries[idx].freelist);
+ free_iova_fast(&cookie->iovad,
+ fq->entries[idx].iova_pfn,
+ fq->entries[idx].pages);
+
+ fq->head = (fq->head + 1) % IOVA_FQ_SIZE;
+ }
+}
+
+static void fq_flush_iotlb(struct iommu_dma_cookie *cookie)
{
- struct page *freelist = (struct page *)data;
+ atomic64_inc(&cookie->fq_flush_start_cnt);
+ cookie->fq_domain->ops->flush_iotlb_all(cookie->fq_domain);
+ atomic64_inc(&cookie->fq_flush_finish_cnt);
+}
+
+static void fq_flush_timeout(struct timer_list *t)
+{
+ struct iommu_dma_cookie *cookie = from_timer(cookie, t, fq_timer);
+ int cpu;
+
+ atomic_set(&cookie->fq_timer_on, 0);
+ fq_flush_iotlb(cookie);
+
+ for_each_possible_cpu(cpu) {
+ unsigned long flags;
+ struct iova_fq *fq;
+
+ fq = per_cpu_ptr(cookie->fq, cpu);
+ spin_lock_irqsave(&fq->lock, flags);
+ fq_ring_free(cookie, fq);
+ spin_unlock_irqrestore(&fq->lock, flags);
+ }
+}
+
+static void queue_iova(struct iommu_dma_cookie *cookie,
+ unsigned long pfn, unsigned long pages,
+ struct list_head *freelist)
+{
+ struct iova_fq *fq;
+ unsigned long flags;
+ unsigned int idx;
+
+ /*
+ * Order against the IOMMU driver's pagetable update from unmapping
+ * @pte, to guarantee that fq_flush_iotlb() observes that if called
+ * from a different CPU before we release the lock below. Full barrier
+ * so it also pairs with iommu_dma_init_fq() to avoid seeing partially
+ * written fq state here.
+ */
+ smp_mb();
- while (freelist) {
- unsigned long p = (unsigned long)page_address(freelist);
+ fq = raw_cpu_ptr(cookie->fq);
+ spin_lock_irqsave(&fq->lock, flags);
+
+ /*
+ * First remove all entries from the flush queue that have already been
+ * flushed out on another CPU. This makes the fq_full() check below less
+ * likely to be true.
+ */
+ fq_ring_free(cookie, fq);
- freelist = freelist->freelist;
- free_page(p);
+ if (fq_full(fq)) {
+ fq_flush_iotlb(cookie);
+ fq_ring_free(cookie, fq);
}
+
+ idx = fq_ring_add(fq);
+
+ fq->entries[idx].iova_pfn = pfn;
+ fq->entries[idx].pages = pages;
+ fq->entries[idx].counter = atomic64_read(&cookie->fq_flush_start_cnt);
+ list_splice(freelist, &fq->entries[idx].freelist);
+
+ spin_unlock_irqrestore(&fq->lock, flags);
+
+ /* Avoid false sharing as much as possible. */
+ if (!atomic_read(&cookie->fq_timer_on) &&
+ !atomic_xchg(&cookie->fq_timer_on, 1))
+ mod_timer(&cookie->fq_timer,
+ jiffies + msecs_to_jiffies(IOVA_FQ_TIMEOUT));
+}
+
+static void iommu_dma_free_fq(struct iommu_dma_cookie *cookie)
+{
+ int cpu, idx;
+
+ if (!cookie->fq)
+ return;
+
+ del_timer_sync(&cookie->fq_timer);
+ /* The IOVAs will be torn down separately, so just free our queued pages */
+ for_each_possible_cpu(cpu) {
+ struct iova_fq *fq = per_cpu_ptr(cookie->fq, cpu);
+
+ fq_ring_for_each(idx, fq)
+ put_pages_list(&fq->entries[idx].freelist);
+ }
+
+ free_percpu(cookie->fq);
+}
+
+/* sysfs updates are serialised by the mutex of the group owning @domain */
+int iommu_dma_init_fq(struct iommu_domain *domain)
+{
+ struct iommu_dma_cookie *cookie = domain->iova_cookie;
+ struct iova_fq __percpu *queue;
+ int i, cpu;
+
+ if (cookie->fq_domain)
+ return 0;
+
+ atomic64_set(&cookie->fq_flush_start_cnt, 0);
+ atomic64_set(&cookie->fq_flush_finish_cnt, 0);
+
+ queue = alloc_percpu(struct iova_fq);
+ if (!queue) {
+ pr_warn("iova flush queue initialization failed\n");
+ return -ENOMEM;
+ }
+
+ for_each_possible_cpu(cpu) {
+ struct iova_fq *fq = per_cpu_ptr(queue, cpu);
+
+ fq->head = 0;
+ fq->tail = 0;
+
+ spin_lock_init(&fq->lock);
+
+ for (i = 0; i < IOVA_FQ_SIZE; i++)
+ INIT_LIST_HEAD(&fq->entries[i].freelist);
+ }
+
+ cookie->fq = queue;
+
+ timer_setup(&cookie->fq_timer, fq_flush_timeout, 0);
+ atomic_set(&cookie->fq_timer_on, 0);
+ /*
+ * Prevent incomplete fq state being observable. Pairs with path from
+ * __iommu_dma_unmap() through iommu_dma_free_iova() to queue_iova()
+ */
+ smp_wmb();
+ WRITE_ONCE(cookie->fq_domain, domain);
+ return 0;
}
static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie)
@@ -156,8 +357,10 @@ void iommu_put_dma_cookie(struct iommu_domain *domain)
if (!cookie)
return;
- if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule)
+ if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule) {
+ iommu_dma_free_fq(cookie);
put_iova_domain(&cookie->iovad);
+ }
list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) {
list_del(&msi->list);
@@ -294,17 +497,6 @@ static int iova_reserve_iommu_regions(struct device *dev,
return ret;
}
-static void iommu_dma_flush_iotlb_all(struct iova_domain *iovad)
-{
- struct iommu_dma_cookie *cookie;
- struct iommu_domain *domain;
-
- cookie = container_of(iovad, struct iommu_dma_cookie, iovad);
- domain = cookie->fq_domain;
-
- domain->ops->flush_iotlb_all(domain);
-}
-
static bool dev_is_untrusted(struct device *dev)
{
return dev_is_pci(dev) && to_pci_dev(dev)->untrusted;
@@ -315,30 +507,6 @@ static bool dev_use_swiotlb(struct device *dev)
return IS_ENABLED(CONFIG_SWIOTLB) && dev_is_untrusted(dev);
}
-/* sysfs updates are serialised by the mutex of the group owning @domain */
-int iommu_dma_init_fq(struct iommu_domain *domain)
-{
- struct iommu_dma_cookie *cookie = domain->iova_cookie;
- int ret;
-
- if (cookie->fq_domain)
- return 0;
-
- ret = init_iova_flush_queue(&cookie->iovad, iommu_dma_flush_iotlb_all,
- iommu_dma_entry_dtor);
- if (ret) {
- pr_warn("iova flush queue initialization failed\n");
- return ret;
- }
- /*
- * Prevent incomplete iovad->fq being observable. Pairs with path from
- * __iommu_dma_unmap() through iommu_dma_free_iova() to queue_iova()
- */
- smp_wmb();
- WRITE_ONCE(cookie->fq_domain, domain);
- return 0;
-}
-
/**
* iommu_dma_init_domain - Initialise a DMA mapping domain
* @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
@@ -442,14 +610,6 @@ static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
shift = iova_shift(iovad);
iova_len = size >> shift;
- /*
- * Freeing non-power-of-two-sized allocations back into the IOVA caches
- * will come back to bite us badly, so we have to waste a bit of space
- * rounding up anything cacheable to make sure that can't happen. The
- * order of the unadjusted size will still match upon freeing.
- */
- if (iova_len < (1 << (IOVA_RANGE_CACHE_MAX_SIZE - 1)))
- iova_len = roundup_pow_of_two(iova_len);
dma_limit = min_not_zero(dma_limit, dev->bus_dma_limit);
@@ -477,9 +637,9 @@ static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie,
if (cookie->type == IOMMU_DMA_MSI_COOKIE)
cookie->msi_iova -= size;
else if (gather && gather->queued)
- queue_iova(iovad, iova_pfn(iovad, iova),
+ queue_iova(cookie, iova_pfn(iovad, iova),
size >> iova_shift(iovad),
- (unsigned long)gather->freelist);
+ &gather->freelist);
else
free_iova_fast(iovad, iova_pfn(iovad, iova),
size >> iova_shift(iovad));
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index b6a8f3282411..92fea3fbbb11 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -133,11 +133,6 @@ static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
are never going to work. */
-static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
-{
- return dma_pfn >> (PAGE_SHIFT - VTD_PAGE_SHIFT);
-}
-
static inline unsigned long mm_to_dma_pfn(unsigned long mm_pfn)
{
return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
@@ -1280,10 +1275,6 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain,
unsigned long last_pfn,
int retain_level)
{
- BUG_ON(!domain_pfn_supported(domain, start_pfn));
- BUG_ON(!domain_pfn_supported(domain, last_pfn));
- BUG_ON(start_pfn > last_pfn);
-
dma_pte_clear_range(domain, start_pfn, last_pfn);
/* We don't need lock here; nobody else touches the iova range */
@@ -1303,35 +1294,30 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain,
know the hardware page-walk will no longer touch them.
The 'pte' argument is the *parent* PTE, pointing to the page that is to
be freed. */
-static struct page *dma_pte_list_pagetables(struct dmar_domain *domain,
- int level, struct dma_pte *pte,
- struct page *freelist)
+static void dma_pte_list_pagetables(struct dmar_domain *domain,
+ int level, struct dma_pte *pte,
+ struct list_head *freelist)
{
struct page *pg;
pg = pfn_to_page(dma_pte_addr(pte) >> PAGE_SHIFT);
- pg->freelist = freelist;
- freelist = pg;
+ list_add_tail(&pg->lru, freelist);
if (level == 1)
- return freelist;
+ return;
pte = page_address(pg);
do {
if (dma_pte_present(pte) && !dma_pte_superpage(pte))
- freelist = dma_pte_list_pagetables(domain, level - 1,
- pte, freelist);
+ dma_pte_list_pagetables(domain, level - 1, pte, freelist);
pte++;
} while (!first_pte_in_page(pte));
-
- return freelist;
}
-static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level,
- struct dma_pte *pte, unsigned long pfn,
- unsigned long start_pfn,
- unsigned long last_pfn,
- struct page *freelist)
+static void dma_pte_clear_level(struct dmar_domain *domain, int level,
+ struct dma_pte *pte, unsigned long pfn,
+ unsigned long start_pfn, unsigned long last_pfn,
+ struct list_head *freelist)
{
struct dma_pte *first_pte = NULL, *last_pte = NULL;
@@ -1350,7 +1336,7 @@ static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level,
/* These suborbinate page tables are going away entirely. Don't
bother to clear them; we're just going to *free* them. */
if (level > 1 && !dma_pte_superpage(pte))
- freelist = dma_pte_list_pagetables(domain, level - 1, pte, freelist);
+ dma_pte_list_pagetables(domain, level - 1, pte, freelist);
dma_clear_pte(pte);
if (!first_pte)
@@ -1358,10 +1344,10 @@ static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level,
last_pte = pte;
} else if (level > 1) {
/* Recurse down into a level that isn't *entirely* obsolete */
- freelist = dma_pte_clear_level(domain, level - 1,
- phys_to_virt(dma_pte_addr(pte)),
- level_pfn, start_pfn, last_pfn,
- freelist);
+ dma_pte_clear_level(domain, level - 1,
+ phys_to_virt(dma_pte_addr(pte)),
+ level_pfn, start_pfn, last_pfn,
+ freelist);
}
next:
pfn = level_pfn + level_size(level);
@@ -1370,47 +1356,28 @@ next:
if (first_pte)
domain_flush_cache(domain, first_pte,
(void *)++last_pte - (void *)first_pte);
-
- return freelist;
}
/* We can't just free the pages because the IOMMU may still be walking
the page tables, and may have cached the intermediate levels. The
pages can only be freed after the IOTLB flush has been done. */
-static struct page *domain_unmap(struct dmar_domain *domain,
- unsigned long start_pfn,
- unsigned long last_pfn,
- struct page *freelist)
+static void domain_unmap(struct dmar_domain *domain, unsigned long start_pfn,
+ unsigned long last_pfn, struct list_head *freelist)
{
BUG_ON(!domain_pfn_supported(domain, start_pfn));
BUG_ON(!domain_pfn_supported(domain, last_pfn));
BUG_ON(start_pfn > last_pfn);
/* we don't need lock here; nobody else touches the iova range */
- freelist = dma_pte_clear_level(domain, agaw_to_level(domain->agaw),
- domain->pgd, 0, start_pfn, last_pfn,
- freelist);
+ dma_pte_clear_level(domain, agaw_to_level(domain->agaw),
+ domain->pgd, 0, start_pfn, last_pfn, freelist);
/* free pgd */
if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
struct page *pgd_page = virt_to_page(domain->pgd);
- pgd_page->freelist = freelist;
- freelist = pgd_page;
-
+ list_add_tail(&pgd_page->lru, freelist);
domain->pgd = NULL;
}
-
- return freelist;
-}
-
-static void dma_free_pagelist(struct page *freelist)
-{
- struct page *pg;
-
- while ((pg = freelist)) {
- freelist = pg->freelist;
- free_pgtable_page(page_address(pg));
- }
}
/* iommu handling */
@@ -1878,17 +1845,16 @@ static void iommu_disable_translation(struct intel_iommu *iommu)
static int iommu_init_domains(struct intel_iommu *iommu)
{
- u32 ndomains, nlongs;
+ u32 ndomains;
size_t size;
ndomains = cap_ndoms(iommu->cap);
pr_debug("%s: Number of Domains supported <%d>\n",
iommu->name, ndomains);
- nlongs = BITS_TO_LONGS(ndomains);
spin_lock_init(&iommu->lock);
- iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL);
+ iommu->domain_ids = bitmap_zalloc(ndomains, GFP_KERNEL);
if (!iommu->domain_ids)
return -ENOMEM;
@@ -1903,7 +1869,7 @@ static int iommu_init_domains(struct intel_iommu *iommu)
if (!iommu->domains || !iommu->domains[0]) {
pr_err("%s: Allocating domain array failed\n",
iommu->name);
- kfree(iommu->domain_ids);
+ bitmap_free(iommu->domain_ids);
kfree(iommu->domains);
iommu->domain_ids = NULL;
iommu->domains = NULL;
@@ -1964,7 +1930,7 @@ static void free_dmar_iommu(struct intel_iommu *iommu)
for (i = 0; i < elems; i++)
kfree(iommu->domains[i]);
kfree(iommu->domains);
- kfree(iommu->domain_ids);
+ bitmap_free(iommu->domain_ids);
iommu->domains = NULL;
iommu->domain_ids = NULL;
}
@@ -2095,11 +2061,10 @@ static void domain_exit(struct dmar_domain *domain)
domain_remove_dev_info(domain);
if (domain->pgd) {
- struct page *freelist;
+ LIST_HEAD(freelist);
- freelist = domain_unmap(domain, 0,
- DOMAIN_MAX_PFN(domain->gaw), NULL);
- dma_free_pagelist(freelist);
+ domain_unmap(domain, 0, DOMAIN_MAX_PFN(domain->gaw), &freelist);
+ put_pages_list(&freelist);
}
free_domain_mem(domain);
@@ -2112,10 +2077,10 @@ static void domain_exit(struct dmar_domain *domain)
*/
static inline unsigned long context_get_sm_pds(struct pasid_table *table)
{
- int pds, max_pde;
+ unsigned long pds, max_pde;
max_pde = table->max_pasid >> PASID_PDE_SHIFT;
- pds = find_first_bit((unsigned long *)&max_pde, MAX_NR_PASID_BITS);
+ pds = find_first_bit(&max_pde, MAX_NR_PASID_BITS);
if (pds < 7)
return 0;
@@ -4192,19 +4157,17 @@ static int intel_iommu_memory_notifier(struct notifier_block *nb,
{
struct dmar_drhd_unit *drhd;
struct intel_iommu *iommu;
- struct page *freelist;
+ LIST_HEAD(freelist);
- freelist = domain_unmap(si_domain,
- start_vpfn, last_vpfn,
- NULL);
+ domain_unmap(si_domain, start_vpfn, last_vpfn, &freelist);
rcu_read_lock();
for_each_active_iommu(iommu, drhd)
iommu_flush_iotlb_psi(iommu, si_domain,
start_vpfn, mhp->nr_pages,
- !freelist, 0);
+ list_empty(&freelist), 0);
rcu_read_unlock();
- dma_free_pagelist(freelist);
+ put_pages_list(&freelist);
}
break;
}
@@ -5211,8 +5174,7 @@ static size_t intel_iommu_unmap(struct iommu_domain *domain,
start_pfn = iova >> VTD_PAGE_SHIFT;
last_pfn = (iova + size - 1) >> VTD_PAGE_SHIFT;
- gather->freelist = domain_unmap(dmar_domain, start_pfn,
- last_pfn, gather->freelist);
+ domain_unmap(dmar_domain, start_pfn, last_pfn, &gather->freelist);
if (dmar_domain->max_addr == iova + size)
dmar_domain->max_addr = iova;
@@ -5248,9 +5210,10 @@ static void intel_iommu_tlb_sync(struct iommu_domain *domain,
for_each_domain_iommu(iommu_id, dmar_domain)
iommu_flush_iotlb_psi(g_iommus[iommu_id], dmar_domain,
- start_pfn, nrpages, !gather->freelist, 0);
+ start_pfn, nrpages,
+ list_empty(&gather->freelist), 0);
- dma_free_pagelist(gather->freelist);
+ put_pages_list(&gather->freelist);
}
static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
diff --git a/drivers/iommu/intel/irq_remapping.c b/drivers/iommu/intel/irq_remapping.c
index f912fe45bea2..a67319597884 100644
--- a/drivers/iommu/intel/irq_remapping.c
+++ b/drivers/iommu/intel/irq_remapping.c
@@ -569,9 +569,8 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu)
fn, &intel_ir_domain_ops,
iommu);
if (!iommu->ir_domain) {
- irq_domain_free_fwnode(fn);
pr_err("IR%d: failed to allocate irqdomain\n", iommu->seq_id);
- goto out_free_bitmap;
+ goto out_free_fwnode;
}
iommu->ir_msi_domain =
arch_create_remap_msi_irq_domain(iommu->ir_domain,
@@ -595,7 +594,7 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu)
if (dmar_enable_qi(iommu)) {
pr_err("Failed to enable queued invalidation\n");
- goto out_free_bitmap;
+ goto out_free_ir_domain;
}
}
@@ -619,6 +618,14 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu)
return 0;
+out_free_ir_domain:
+ if (iommu->ir_msi_domain)
+ irq_domain_remove(iommu->ir_msi_domain);
+ iommu->ir_msi_domain = NULL;
+ irq_domain_remove(iommu->ir_domain);
+ iommu->ir_domain = NULL;
+out_free_fwnode:
+ irq_domain_free_fwnode(fn);
out_free_bitmap:
bitmap_free(bitmap);
out_free_pages:
diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c
index bfb6acb651e5..be066c1503d3 100644
--- a/drivers/iommu/io-pgtable-arm-v7s.c
+++ b/drivers/iommu/io-pgtable-arm-v7s.c
@@ -246,13 +246,17 @@ static void *__arm_v7s_alloc_table(int lvl, gfp_t gfp,
__GFP_ZERO | ARM_V7S_TABLE_GFP_DMA, get_order(size));
else if (lvl == 2)
table = kmem_cache_zalloc(data->l2_tables, gfp);
+
+ if (!table)
+ return NULL;
+
phys = virt_to_phys(table);
if (phys != (arm_v7s_iopte)phys) {
/* Doesn't fit in PTE */
dev_err(dev, "Page table does not fit in PTE: %pa", &phys);
goto out_free;
}
- if (table && !cfg->coherent_walk) {
+ if (!cfg->coherent_walk) {
dma = dma_map_single(dev, table, size, DMA_TO_DEVICE);
if (dma_mapping_error(dev, dma))
goto out_free;
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index dd9e47189d0d..94ff319ae8ac 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -315,11 +315,12 @@ static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
static arm_lpae_iopte arm_lpae_install_table(arm_lpae_iopte *table,
arm_lpae_iopte *ptep,
arm_lpae_iopte curr,
- struct io_pgtable_cfg *cfg)
+ struct arm_lpae_io_pgtable *data)
{
arm_lpae_iopte old, new;
+ struct io_pgtable_cfg *cfg = &data->iop.cfg;
- new = __pa(table) | ARM_LPAE_PTE_TYPE_TABLE;
+ new = paddr_to_iopte(__pa(table), data) | ARM_LPAE_PTE_TYPE_TABLE;
if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)
new |= ARM_LPAE_PTE_NSTABLE;
@@ -380,7 +381,7 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
if (!cptep)
return -ENOMEM;
- pte = arm_lpae_install_table(cptep, ptep, 0, cfg);
+ pte = arm_lpae_install_table(cptep, ptep, 0, data);
if (pte)
__arm_lpae_free_pages(cptep, tblsz, cfg);
} else if (!cfg->coherent_walk && !(pte & ARM_LPAE_PTE_SW_SYNC)) {
@@ -592,7 +593,7 @@ static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
__arm_lpae_init_pte(data, blk_paddr, pte, lvl, 1, &tablep[i]);
}
- pte = arm_lpae_install_table(tablep, ptep, blk_pte, cfg);
+ pte = arm_lpae_install_table(tablep, ptep, blk_pte, data);
if (pte != blk_pte) {
__arm_lpae_free_pages(tablep, tablesz, cfg);
/*
diff --git a/drivers/iommu/ioasid.c b/drivers/iommu/ioasid.c
index 50ee27bbd04e..06fee7416816 100644
--- a/drivers/iommu/ioasid.c
+++ b/drivers/iommu/ioasid.c
@@ -349,6 +349,7 @@ EXPORT_SYMBOL_GPL(ioasid_alloc);
/**
* ioasid_get - obtain a reference to the IOASID
+ * @ioasid: the ID to get
*/
void ioasid_get(ioasid_t ioasid)
{
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index dd7863e453a5..107dcf5938d6 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -207,9 +207,14 @@ static struct dev_iommu *dev_iommu_get(struct device *dev)
static void dev_iommu_free(struct device *dev)
{
- iommu_fwspec_free(dev);
- kfree(dev->iommu);
+ struct dev_iommu *param = dev->iommu;
+
dev->iommu = NULL;
+ if (param->fwspec) {
+ fwnode_handle_put(param->fwspec->iommu_fwnode);
+ kfree(param->fwspec);
+ }
+ kfree(param);
}
static int __iommu_probe_device(struct device *dev, struct list_head *group_list)
@@ -288,11 +293,11 @@ int iommu_probe_device(struct device *dev)
*/
mutex_lock(&group->mutex);
iommu_alloc_default_domain(group, dev);
- mutex_unlock(&group->mutex);
if (group->default_domain) {
ret = __iommu_attach_device(group->default_domain, dev);
if (ret) {
+ mutex_unlock(&group->mutex);
iommu_group_put(group);
goto err_release;
}
@@ -300,6 +305,7 @@ int iommu_probe_device(struct device *dev)
iommu_create_device_direct_mappings(group, dev);
+ mutex_unlock(&group->mutex);
iommu_group_put(group);
if (ops->probe_finalize)
@@ -979,17 +985,6 @@ static int iommu_group_device_count(struct iommu_group *group)
return ret;
}
-/**
- * iommu_group_for_each_dev - iterate over each device in the group
- * @group: the group
- * @data: caller opaque data to be passed to callback function
- * @fn: caller supplied callback function
- *
- * This function is called by group users to iterate over group devices.
- * Callers should hold a reference count to the group during callback.
- * The group->mutex is held across callbacks, which will block calls to
- * iommu_group_add/remove_device.
- */
static int __iommu_group_for_each_dev(struct iommu_group *group, void *data,
int (*fn)(struct device *, void *))
{
@@ -1004,7 +999,17 @@ static int __iommu_group_for_each_dev(struct iommu_group *group, void *data,
return ret;
}
-
+/**
+ * iommu_group_for_each_dev - iterate over each device in the group
+ * @group: the group
+ * @data: caller opaque data to be passed to callback function
+ * @fn: caller supplied callback function
+ *
+ * This function is called by group users to iterate over group devices.
+ * Callers should hold a reference count to the group during callback.
+ * The group->mutex is held across callbacks, which will block calls to
+ * iommu_group_add/remove_device.
+ */
int iommu_group_for_each_dev(struct iommu_group *group, void *data,
int (*fn)(struct device *, void *))
{
@@ -3031,6 +3036,7 @@ EXPORT_SYMBOL_GPL(iommu_aux_get_pasid);
* iommu_sva_bind_device() - Bind a process address space to a device
* @dev: the device
* @mm: the mm to bind, caller must hold a reference to it
+ * @drvdata: opaque data pointer to pass to bind callback
*
* Create a bond between device and address space, allowing the device to access
* the mm using the returned PASID. If a bond already exists between @device and
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index 9e8bc802ac05..b28c9435b898 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -24,8 +24,6 @@ static unsigned long iova_rcache_get(struct iova_domain *iovad,
static void init_iova_rcaches(struct iova_domain *iovad);
static void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad);
static void free_iova_rcaches(struct iova_domain *iovad);
-static void fq_destroy_all_entries(struct iova_domain *iovad);
-static void fq_flush_timeout(struct timer_list *t);
static int iova_cpuhp_dead(unsigned int cpu, struct hlist_node *node)
{
@@ -63,8 +61,6 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule,
iovad->start_pfn = start_pfn;
iovad->dma_32bit_pfn = 1UL << (32 - iova_shift(iovad));
iovad->max32_alloc_size = iovad->dma_32bit_pfn;
- iovad->flush_cb = NULL;
- iovad->fq = NULL;
iovad->anchor.pfn_lo = iovad->anchor.pfn_hi = IOVA_ANCHOR;
rb_link_node(&iovad->anchor.node, NULL, &iovad->rbroot.rb_node);
rb_insert_color(&iovad->anchor.node, &iovad->rbroot);
@@ -73,62 +69,6 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule,
}
EXPORT_SYMBOL_GPL(init_iova_domain);
-static bool has_iova_flush_queue(struct iova_domain *iovad)
-{
- return !!iovad->fq;
-}
-
-static void free_iova_flush_queue(struct iova_domain *iovad)
-{
- if (!has_iova_flush_queue(iovad))
- return;
-
- if (timer_pending(&iovad->fq_timer))
- del_timer(&iovad->fq_timer);
-
- fq_destroy_all_entries(iovad);
-
- free_percpu(iovad->fq);
-
- iovad->fq = NULL;
- iovad->flush_cb = NULL;
- iovad->entry_dtor = NULL;
-}
-
-int init_iova_flush_queue(struct iova_domain *iovad,
- iova_flush_cb flush_cb, iova_entry_dtor entry_dtor)
-{
- struct iova_fq __percpu *queue;
- int cpu;
-
- atomic64_set(&iovad->fq_flush_start_cnt, 0);
- atomic64_set(&iovad->fq_flush_finish_cnt, 0);
-
- queue = alloc_percpu(struct iova_fq);
- if (!queue)
- return -ENOMEM;
-
- iovad->flush_cb = flush_cb;
- iovad->entry_dtor = entry_dtor;
-
- for_each_possible_cpu(cpu) {
- struct iova_fq *fq;
-
- fq = per_cpu_ptr(queue, cpu);
- fq->head = 0;
- fq->tail = 0;
-
- spin_lock_init(&fq->lock);
- }
-
- iovad->fq = queue;
-
- timer_setup(&iovad->fq_timer, fq_flush_timeout, 0);
- atomic_set(&iovad->fq_timer_on, 0);
-
- return 0;
-}
-
static struct rb_node *
__get_cached_rbnode(struct iova_domain *iovad, unsigned long limit_pfn)
{
@@ -497,6 +437,15 @@ alloc_iova_fast(struct iova_domain *iovad, unsigned long size,
unsigned long iova_pfn;
struct iova *new_iova;
+ /*
+ * Freeing non-power-of-two-sized allocations back into the IOVA caches
+ * will come back to bite us badly, so we have to waste a bit of space
+ * rounding up anything cacheable to make sure that can't happen. The
+ * order of the unadjusted size will still match upon freeing.
+ */
+ if (size < (1 << (IOVA_RANGE_CACHE_MAX_SIZE - 1)))
+ size = roundup_pow_of_two(size);
+
iova_pfn = iova_rcache_get(iovad, size, limit_pfn + 1);
if (iova_pfn)
return iova_pfn;
@@ -539,144 +488,6 @@ free_iova_fast(struct iova_domain *iovad, unsigned long pfn, unsigned long size)
}
EXPORT_SYMBOL_GPL(free_iova_fast);
-#define fq_ring_for_each(i, fq) \
- for ((i) = (fq)->head; (i) != (fq)->tail; (i) = ((i) + 1) % IOVA_FQ_SIZE)
-
-static inline bool fq_full(struct iova_fq *fq)
-{
- assert_spin_locked(&fq->lock);
- return (((fq->tail + 1) % IOVA_FQ_SIZE) == fq->head);
-}
-
-static inline unsigned fq_ring_add(struct iova_fq *fq)
-{
- unsigned idx = fq->tail;
-
- assert_spin_locked(&fq->lock);
-
- fq->tail = (idx + 1) % IOVA_FQ_SIZE;
-
- return idx;
-}
-
-static void fq_ring_free(struct iova_domain *iovad, struct iova_fq *fq)
-{
- u64 counter = atomic64_read(&iovad->fq_flush_finish_cnt);
- unsigned idx;
-
- assert_spin_locked(&fq->lock);
-
- fq_ring_for_each(idx, fq) {
-
- if (fq->entries[idx].counter >= counter)
- break;
-
- if (iovad->entry_dtor)
- iovad->entry_dtor(fq->entries[idx].data);
-
- free_iova_fast(iovad,
- fq->entries[idx].iova_pfn,
- fq->entries[idx].pages);
-
- fq->head = (fq->head + 1) % IOVA_FQ_SIZE;
- }
-}
-
-static void iova_domain_flush(struct iova_domain *iovad)
-{
- atomic64_inc(&iovad->fq_flush_start_cnt);
- iovad->flush_cb(iovad);
- atomic64_inc(&iovad->fq_flush_finish_cnt);
-}
-
-static void fq_destroy_all_entries(struct iova_domain *iovad)
-{
- int cpu;
-
- /*
- * This code runs when the iova_domain is being detroyed, so don't
- * bother to free iovas, just call the entry_dtor on all remaining
- * entries.
- */
- if (!iovad->entry_dtor)
- return;
-
- for_each_possible_cpu(cpu) {
- struct iova_fq *fq = per_cpu_ptr(iovad->fq, cpu);
- int idx;
-
- fq_ring_for_each(idx, fq)
- iovad->entry_dtor(fq->entries[idx].data);
- }
-}
-
-static void fq_flush_timeout(struct timer_list *t)
-{
- struct iova_domain *iovad = from_timer(iovad, t, fq_timer);
- int cpu;
-
- atomic_set(&iovad->fq_timer_on, 0);
- iova_domain_flush(iovad);
-
- for_each_possible_cpu(cpu) {
- unsigned long flags;
- struct iova_fq *fq;
-
- fq = per_cpu_ptr(iovad->fq, cpu);
- spin_lock_irqsave(&fq->lock, flags);
- fq_ring_free(iovad, fq);
- spin_unlock_irqrestore(&fq->lock, flags);
- }
-}
-
-void queue_iova(struct iova_domain *iovad,
- unsigned long pfn, unsigned long pages,
- unsigned long data)
-{
- struct iova_fq *fq;
- unsigned long flags;
- unsigned idx;
-
- /*
- * Order against the IOMMU driver's pagetable update from unmapping
- * @pte, to guarantee that iova_domain_flush() observes that if called
- * from a different CPU before we release the lock below. Full barrier
- * so it also pairs with iommu_dma_init_fq() to avoid seeing partially
- * written fq state here.
- */
- smp_mb();
-
- fq = raw_cpu_ptr(iovad->fq);
- spin_lock_irqsave(&fq->lock, flags);
-
- /*
- * First remove all entries from the flush queue that have already been
- * flushed out on another CPU. This makes the fq_full() check below less
- * likely to be true.
- */
- fq_ring_free(iovad, fq);
-
- if (fq_full(fq)) {
- iova_domain_flush(iovad);
- fq_ring_free(iovad, fq);
- }
-
- idx = fq_ring_add(fq);
-
- fq->entries[idx].iova_pfn = pfn;
- fq->entries[idx].pages = pages;
- fq->entries[idx].data = data;
- fq->entries[idx].counter = atomic64_read(&iovad->fq_flush_start_cnt);
-
- spin_unlock_irqrestore(&fq->lock, flags);
-
- /* Avoid false sharing as much as possible. */
- if (!atomic_read(&iovad->fq_timer_on) &&
- !atomic_xchg(&iovad->fq_timer_on, 1))
- mod_timer(&iovad->fq_timer,
- jiffies + msecs_to_jiffies(IOVA_FQ_TIMEOUT));
-}
-
/**
* put_iova_domain - destroys the iova domain
* @iovad: - iova domain in question.
@@ -688,8 +499,6 @@ void put_iova_domain(struct iova_domain *iovad)
cpuhp_state_remove_instance_nocalls(CPUHP_IOMMU_IOVA_DEAD,
&iovad->cpuhp_dead);
-
- free_iova_flush_queue(iovad);
free_iova_rcaches(iovad);
rbtree_postorder_for_each_entry_safe(iova, tmp, &iovad->rbroot, node)
free_iova_mem(iova);
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index 91749654fd49..980e4af3f06b 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -1085,7 +1085,7 @@ static __maybe_unused int omap_iommu_runtime_resume(struct device *dev)
}
/**
- * omap_iommu_suspend_prepare - prepare() dev_pm_ops implementation
+ * omap_iommu_prepare - prepare() dev_pm_ops implementation
* @dev: iommu device
*
* This function performs the necessary checks to determine if the IOMMU
diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c
index 80930ce04a16..f2aa34f57454 100644
--- a/drivers/iommu/virtio-iommu.c
+++ b/drivers/iommu/virtio-iommu.c
@@ -71,6 +71,7 @@ struct viommu_domain {
struct rb_root_cached mappings;
unsigned long nr_endpoints;
+ bool bypass;
};
struct viommu_endpoint {
@@ -310,8 +311,8 @@ out_unlock:
*
* On success, return the new mapping. Otherwise return NULL.
*/
-static int viommu_add_mapping(struct viommu_domain *vdomain, unsigned long iova,
- phys_addr_t paddr, size_t size, u32 flags)
+static int viommu_add_mapping(struct viommu_domain *vdomain, u64 iova, u64 end,
+ phys_addr_t paddr, u32 flags)
{
unsigned long irqflags;
struct viommu_mapping *mapping;
@@ -322,7 +323,7 @@ static int viommu_add_mapping(struct viommu_domain *vdomain, unsigned long iova,
mapping->paddr = paddr;
mapping->iova.start = iova;
- mapping->iova.last = iova + size - 1;
+ mapping->iova.last = end;
mapping->flags = flags;
spin_lock_irqsave(&vdomain->mappings_lock, irqflags);
@@ -337,26 +338,24 @@ static int viommu_add_mapping(struct viommu_domain *vdomain, unsigned long iova,
*
* @vdomain: the domain
* @iova: start of the range
- * @size: size of the range. A size of 0 corresponds to the entire address
- * space.
+ * @end: end of the range
*
- * On success, returns the number of unmapped bytes (>= size)
+ * On success, returns the number of unmapped bytes
*/
static size_t viommu_del_mappings(struct viommu_domain *vdomain,
- unsigned long iova, size_t size)
+ u64 iova, u64 end)
{
size_t unmapped = 0;
unsigned long flags;
- unsigned long last = iova + size - 1;
struct viommu_mapping *mapping = NULL;
struct interval_tree_node *node, *next;
spin_lock_irqsave(&vdomain->mappings_lock, flags);
- next = interval_tree_iter_first(&vdomain->mappings, iova, last);
+ next = interval_tree_iter_first(&vdomain->mappings, iova, end);
while (next) {
node = next;
mapping = container_of(node, struct viommu_mapping, iova);
- next = interval_tree_iter_next(node, iova, last);
+ next = interval_tree_iter_next(node, iova, end);
/* Trying to split a mapping? */
if (mapping->iova.start < iova)
@@ -377,6 +376,55 @@ static size_t viommu_del_mappings(struct viommu_domain *vdomain,
}
/*
+ * Fill the domain with identity mappings, skipping the device's reserved
+ * regions.
+ */
+static int viommu_domain_map_identity(struct viommu_endpoint *vdev,
+ struct viommu_domain *vdomain)
+{
+ int ret;
+ struct iommu_resv_region *resv;
+ u64 iova = vdomain->domain.geometry.aperture_start;
+ u64 limit = vdomain->domain.geometry.aperture_end;
+ u32 flags = VIRTIO_IOMMU_MAP_F_READ | VIRTIO_IOMMU_MAP_F_WRITE;
+ unsigned long granule = 1UL << __ffs(vdomain->domain.pgsize_bitmap);
+
+ iova = ALIGN(iova, granule);
+ limit = ALIGN_DOWN(limit + 1, granule) - 1;
+
+ list_for_each_entry(resv, &vdev->resv_regions, list) {
+ u64 resv_start = ALIGN_DOWN(resv->start, granule);
+ u64 resv_end = ALIGN(resv->start + resv->length, granule) - 1;
+
+ if (resv_end < iova || resv_start > limit)
+ /* No overlap */
+ continue;
+
+ if (resv_start > iova) {
+ ret = viommu_add_mapping(vdomain, iova, resv_start - 1,
+ (phys_addr_t)iova, flags);
+ if (ret)
+ goto err_unmap;
+ }
+
+ if (resv_end >= limit)
+ return 0;
+
+ iova = resv_end + 1;
+ }
+
+ ret = viommu_add_mapping(vdomain, iova, limit, (phys_addr_t)iova,
+ flags);
+ if (ret)
+ goto err_unmap;
+ return 0;
+
+err_unmap:
+ viommu_del_mappings(vdomain, 0, iova);
+ return ret;
+}
+
+/*
* viommu_replay_mappings - re-send MAP requests
*
* When reattaching a domain that was previously detached from all endpoints,
@@ -422,7 +470,7 @@ static int viommu_add_resv_mem(struct viommu_endpoint *vdev,
size_t size;
u64 start64, end64;
phys_addr_t start, end;
- struct iommu_resv_region *region = NULL;
+ struct iommu_resv_region *region = NULL, *next;
unsigned long prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
start = start64 = le64_to_cpu(mem->start);
@@ -453,7 +501,12 @@ static int viommu_add_resv_mem(struct viommu_endpoint *vdev,
if (!region)
return -ENOMEM;
- list_add(&region->list, &vdev->resv_regions);
+ /* Keep the list sorted */
+ list_for_each_entry(next, &vdev->resv_regions, list) {
+ if (next->start > region->start)
+ break;
+ }
+ list_add_tail(&region->list, &next->list);
return 0;
}
@@ -587,7 +640,9 @@ static struct iommu_domain *viommu_domain_alloc(unsigned type)
{
struct viommu_domain *vdomain;
- if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
+ if (type != IOMMU_DOMAIN_UNMANAGED &&
+ type != IOMMU_DOMAIN_DMA &&
+ type != IOMMU_DOMAIN_IDENTITY)
return NULL;
vdomain = kzalloc(sizeof(*vdomain), GFP_KERNEL);
@@ -630,6 +685,21 @@ static int viommu_domain_finalise(struct viommu_endpoint *vdev,
vdomain->map_flags = viommu->map_flags;
vdomain->viommu = viommu;
+ if (domain->type == IOMMU_DOMAIN_IDENTITY) {
+ if (virtio_has_feature(viommu->vdev,
+ VIRTIO_IOMMU_F_BYPASS_CONFIG)) {
+ vdomain->bypass = true;
+ return 0;
+ }
+
+ ret = viommu_domain_map_identity(vdev, vdomain);
+ if (ret) {
+ ida_free(&viommu->domain_ids, vdomain->id);
+ vdomain->viommu = NULL;
+ return -EOPNOTSUPP;
+ }
+ }
+
return 0;
}
@@ -637,8 +707,8 @@ static void viommu_domain_free(struct iommu_domain *domain)
{
struct viommu_domain *vdomain = to_viommu_domain(domain);
- /* Free all remaining mappings (size 2^64) */
- viommu_del_mappings(vdomain, 0, 0);
+ /* Free all remaining mappings */
+ viommu_del_mappings(vdomain, 0, ULLONG_MAX);
if (vdomain->viommu)
ida_free(&vdomain->viommu->domain_ids, vdomain->id);
@@ -673,7 +743,7 @@ static int viommu_attach_dev(struct iommu_domain *domain, struct device *dev)
/*
* In the virtio-iommu device, when attaching the endpoint to a new
- * domain, it is detached from the old one and, if as as a result the
+ * domain, it is detached from the old one and, if as a result the
* old domain isn't attached to any endpoint, all mappings are removed
* from the old domain and it is freed.
*
@@ -691,6 +761,9 @@ static int viommu_attach_dev(struct iommu_domain *domain, struct device *dev)
.domain = cpu_to_le32(vdomain->id),
};
+ if (vdomain->bypass)
+ req.flags |= cpu_to_le32(VIRTIO_IOMMU_ATTACH_F_BYPASS);
+
for (i = 0; i < fwspec->num_ids; i++) {
req.endpoint = cpu_to_le32(fwspec->ids[i]);
@@ -720,6 +793,7 @@ static int viommu_map(struct iommu_domain *domain, unsigned long iova,
{
int ret;
u32 flags;
+ u64 end = iova + size - 1;
struct virtio_iommu_req_map map;
struct viommu_domain *vdomain = to_viommu_domain(domain);
@@ -730,7 +804,7 @@ static int viommu_map(struct iommu_domain *domain, unsigned long iova,
if (flags & ~vdomain->map_flags)
return -EINVAL;
- ret = viommu_add_mapping(vdomain, iova, paddr, size, flags);
+ ret = viommu_add_mapping(vdomain, iova, end, paddr, flags);
if (ret)
return ret;
@@ -739,7 +813,7 @@ static int viommu_map(struct iommu_domain *domain, unsigned long iova,
.domain = cpu_to_le32(vdomain->id),
.virt_start = cpu_to_le64(iova),
.phys_start = cpu_to_le64(paddr),
- .virt_end = cpu_to_le64(iova + size - 1),
+ .virt_end = cpu_to_le64(end),
.flags = cpu_to_le32(flags),
};
@@ -748,7 +822,7 @@ static int viommu_map(struct iommu_domain *domain, unsigned long iova,
ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map));
if (ret)
- viommu_del_mappings(vdomain, iova, size);
+ viommu_del_mappings(vdomain, iova, end);
return ret;
}
@@ -761,7 +835,7 @@ static size_t viommu_unmap(struct iommu_domain *domain, unsigned long iova,
struct virtio_iommu_req_unmap unmap;
struct viommu_domain *vdomain = to_viommu_domain(domain);
- unmapped = viommu_del_mappings(vdomain, iova, size);
+ unmapped = viommu_del_mappings(vdomain, iova, iova + size - 1);
if (unmapped < size)
return 0;
@@ -1115,7 +1189,7 @@ static void viommu_remove(struct virtio_device *vdev)
iommu_device_unregister(&viommu->iommu);
/* Stop all virtqueues */
- vdev->config->reset(vdev);
+ virtio_reset_device(vdev);
vdev->config->del_vqs(vdev);
dev_info(&vdev->dev, "device removed\n");
@@ -1132,6 +1206,7 @@ static unsigned int features[] = {
VIRTIO_IOMMU_F_DOMAIN_RANGE,
VIRTIO_IOMMU_F_PROBE,
VIRTIO_IOMMU_F_MMIO,
+ VIRTIO_IOMMU_F_BYPASS_CONFIG,
};
static struct virtio_device_id id_table[] = {
diff --git a/drivers/irqchip/irq-apple-aic.c b/drivers/irqchip/irq-apple-aic.c
index 2543ef65825b..38091ebb9403 100644
--- a/drivers/irqchip/irq-apple-aic.c
+++ b/drivers/irqchip/irq-apple-aic.c
@@ -178,7 +178,6 @@ struct aic_irq_chip {
struct irq_domain *hw_domain;
struct irq_domain *ipi_domain;
int nr_hw;
- int ipi_hwirq;
};
static DEFINE_PER_CPU(uint32_t, aic_fiq_unmasked);
diff --git a/drivers/irqchip/irq-gic-v2m.c b/drivers/irqchip/irq-gic-v2m.c
index 9349fc68b81a..b249d4df899e 100644
--- a/drivers/irqchip/irq-gic-v2m.c
+++ b/drivers/irqchip/irq-gic-v2m.c
@@ -88,7 +88,6 @@ static struct irq_chip gicv2m_msi_irq_chip = {
.irq_mask = gicv2m_mask_msi_irq,
.irq_unmask = gicv2m_unmask_msi_irq,
.irq_eoi = irq_chip_eoi_parent,
- .irq_write_msi_msg = pci_msi_domain_write_msg,
};
static struct msi_domain_info gicv2m_msi_domain_info = {
@@ -405,7 +404,7 @@ err_free_v2m:
return ret;
}
-static struct of_device_id gicv2m_device_id[] = {
+static const struct of_device_id gicv2m_device_id[] = {
{ .compatible = "arm,gic-v2m-frame", },
{},
};
diff --git a/drivers/irqchip/irq-gic-v3-its-pci-msi.c b/drivers/irqchip/irq-gic-v3-its-pci-msi.c
index ad2810c017ed..93f77a8196da 100644
--- a/drivers/irqchip/irq-gic-v3-its-pci-msi.c
+++ b/drivers/irqchip/irq-gic-v3-its-pci-msi.c
@@ -28,7 +28,6 @@ static struct irq_chip its_msi_irq_chip = {
.irq_unmask = its_unmask_msi_irq,
.irq_mask = its_mask_msi_irq,
.irq_eoi = irq_chip_eoi_parent,
- .irq_write_msi_msg = pci_msi_domain_write_msg,
};
static int its_pci_msi_vec_count(struct pci_dev *pdev, void *data)
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 0cb584d9815b..cd772973114a 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -46,6 +46,10 @@
#define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0)
#define RDIST_FLAGS_RD_TABLES_PREALLOCATED (1 << 1)
+#define RD_LOCAL_LPI_ENABLED BIT(0)
+#define RD_LOCAL_PENDTABLE_PREALLOCATED BIT(1)
+#define RD_LOCAL_MEMRESERVE_DONE BIT(2)
+
static u32 lpi_id_bits;
/*
@@ -3044,7 +3048,7 @@ static void its_cpu_init_lpis(void)
phys_addr_t paddr;
u64 val, tmp;
- if (gic_data_rdist()->lpi_enabled)
+ if (gic_data_rdist()->flags & RD_LOCAL_LPI_ENABLED)
return;
val = readl_relaxed(rbase + GICR_CTLR);
@@ -3063,15 +3067,13 @@ static void its_cpu_init_lpis(void)
paddr &= GENMASK_ULL(51, 16);
WARN_ON(!gic_check_reserved_range(paddr, LPI_PENDBASE_SZ));
- its_free_pending_table(gic_data_rdist()->pend_page);
- gic_data_rdist()->pend_page = NULL;
+ gic_data_rdist()->flags |= RD_LOCAL_PENDTABLE_PREALLOCATED;
goto out;
}
pend_page = gic_data_rdist()->pend_page;
paddr = page_to_phys(pend_page);
- WARN_ON(gic_reserve_range(paddr, LPI_PENDBASE_SZ));
/* set PROPBASE */
val = (gic_rdists->prop_table_pa |
@@ -3158,10 +3160,11 @@ static void its_cpu_init_lpis(void)
/* Make sure the GIC has seen the above */
dsb(sy);
out:
- gic_data_rdist()->lpi_enabled = true;
+ gic_data_rdist()->flags |= RD_LOCAL_LPI_ENABLED;
pr_info("GICv3: CPU%d: using %s LPI pending table @%pa\n",
smp_processor_id(),
- gic_data_rdist()->pend_page ? "allocated" : "reserved",
+ gic_data_rdist()->flags & RD_LOCAL_PENDTABLE_PREALLOCATED ?
+ "reserved" : "allocated",
&paddr);
}
@@ -4853,6 +4856,38 @@ static struct syscore_ops its_syscore_ops = {
.resume = its_restore_enable,
};
+static void __init __iomem *its_map_one(struct resource *res, int *err)
+{
+ void __iomem *its_base;
+ u32 val;
+
+ its_base = ioremap(res->start, SZ_64K);
+ if (!its_base) {
+ pr_warn("ITS@%pa: Unable to map ITS registers\n", &res->start);
+ *err = -ENOMEM;
+ return NULL;
+ }
+
+ val = readl_relaxed(its_base + GITS_PIDR2) & GIC_PIDR2_ARCH_MASK;
+ if (val != 0x30 && val != 0x40) {
+ pr_warn("ITS@%pa: No ITS detected, giving up\n", &res->start);
+ *err = -ENODEV;
+ goto out_unmap;
+ }
+
+ *err = its_force_quiescent(its_base);
+ if (*err) {
+ pr_warn("ITS@%pa: Failed to quiesce, giving up\n", &res->start);
+ goto out_unmap;
+ }
+
+ return its_base;
+
+out_unmap:
+ iounmap(its_base);
+ return NULL;
+}
+
static int its_init_domain(struct fwnode_handle *handle, struct its_node *its)
{
struct irq_domain *inner_domain;
@@ -4960,29 +4995,14 @@ static int __init its_probe_one(struct resource *res,
{
struct its_node *its;
void __iomem *its_base;
- u32 val, ctlr;
u64 baser, tmp, typer;
struct page *page;
+ u32 ctlr;
int err;
- its_base = ioremap(res->start, SZ_64K);
- if (!its_base) {
- pr_warn("ITS@%pa: Unable to map ITS registers\n", &res->start);
- return -ENOMEM;
- }
-
- val = readl_relaxed(its_base + GITS_PIDR2) & GIC_PIDR2_ARCH_MASK;
- if (val != 0x30 && val != 0x40) {
- pr_warn("ITS@%pa: No ITS detected, giving up\n", &res->start);
- err = -ENODEV;
- goto out_unmap;
- }
-
- err = its_force_quiescent(its_base);
- if (err) {
- pr_warn("ITS@%pa: Failed to quiesce, giving up\n", &res->start);
- goto out_unmap;
- }
+ its_base = its_map_one(res, &err);
+ if (!its_base)
+ return err;
pr_info("ITS %pR\n", res);
@@ -5138,7 +5158,7 @@ static int redist_disable_lpis(void)
*
* If running with preallocated tables, there is nothing to do.
*/
- if (gic_data_rdist()->lpi_enabled ||
+ if ((gic_data_rdist()->flags & RD_LOCAL_LPI_ENABLED) ||
(gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED))
return 0;
@@ -5200,6 +5220,69 @@ int its_cpu_init(void)
return 0;
}
+static void rdist_memreserve_cpuhp_cleanup_workfn(struct work_struct *work)
+{
+ cpuhp_remove_state_nocalls(gic_rdists->cpuhp_memreserve_state);
+ gic_rdists->cpuhp_memreserve_state = CPUHP_INVALID;
+}
+
+static DECLARE_WORK(rdist_memreserve_cpuhp_cleanup_work,
+ rdist_memreserve_cpuhp_cleanup_workfn);
+
+static int its_cpu_memreserve_lpi(unsigned int cpu)
+{
+ struct page *pend_page;
+ int ret = 0;
+
+ /* This gets to run exactly once per CPU */
+ if (gic_data_rdist()->flags & RD_LOCAL_MEMRESERVE_DONE)
+ return 0;
+
+ pend_page = gic_data_rdist()->pend_page;
+ if (WARN_ON(!pend_page)) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ /*
+ * If the pending table was pre-programmed, free the memory we
+ * preemptively allocated. Otherwise, reserve that memory for
+ * later kexecs.
+ */
+ if (gic_data_rdist()->flags & RD_LOCAL_PENDTABLE_PREALLOCATED) {
+ its_free_pending_table(pend_page);
+ gic_data_rdist()->pend_page = NULL;
+ } else {
+ phys_addr_t paddr = page_to_phys(pend_page);
+ WARN_ON(gic_reserve_range(paddr, LPI_PENDBASE_SZ));
+ }
+
+out:
+ /* Last CPU being brought up gets to issue the cleanup */
+ if (!IS_ENABLED(CONFIG_SMP) ||
+ cpumask_equal(&cpus_booted_once_mask, cpu_possible_mask))
+ schedule_work(&rdist_memreserve_cpuhp_cleanup_work);
+
+ gic_data_rdist()->flags |= RD_LOCAL_MEMRESERVE_DONE;
+ return ret;
+}
+
+/* Mark all the BASER registers as invalid before they get reprogrammed */
+static int __init its_reset_one(struct resource *res)
+{
+ void __iomem *its_base;
+ int err, i;
+
+ its_base = its_map_one(res, &err);
+ if (!its_base)
+ return err;
+
+ for (i = 0; i < GITS_BASER_NR_REGS; i++)
+ gits_write_baser(0, its_base + GITS_BASER + (i << 3));
+
+ iounmap(its_base);
+ return 0;
+}
+
static const struct of_device_id its_device_id[] = {
{ .compatible = "arm,gic-v3-its", },
{},
@@ -5210,6 +5293,26 @@ static int __init its_of_probe(struct device_node *node)
struct device_node *np;
struct resource res;
+ /*
+ * Make sure *all* the ITS are reset before we probe any, as
+ * they may be sharing memory. If any of the ITS fails to
+ * reset, don't even try to go any further, as this could
+ * result in something even worse.
+ */
+ for (np = of_find_matching_node(node, its_device_id); np;
+ np = of_find_matching_node(np, its_device_id)) {
+ int err;
+
+ if (!of_device_is_available(np) ||
+ !of_property_read_bool(np, "msi-controller") ||
+ of_address_to_resource(np, 0, &res))
+ continue;
+
+ err = its_reset_one(&res);
+ if (err)
+ return err;
+ }
+
for (np = of_find_matching_node(node, its_device_id); np;
np = of_find_matching_node(np, its_device_id)) {
if (!of_device_is_available(np))
@@ -5372,17 +5475,64 @@ dom_err:
return err;
}
+static int __init its_acpi_reset(union acpi_subtable_headers *header,
+ const unsigned long end)
+{
+ struct acpi_madt_generic_translator *its_entry;
+ struct resource res;
+
+ its_entry = (struct acpi_madt_generic_translator *)header;
+ res = (struct resource) {
+ .start = its_entry->base_address,
+ .end = its_entry->base_address + ACPI_GICV3_ITS_MEM_SIZE - 1,
+ .flags = IORESOURCE_MEM,
+ };
+
+ return its_reset_one(&res);
+}
+
static void __init its_acpi_probe(void)
{
acpi_table_parse_srat_its();
- acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR,
- gic_acpi_parse_madt_its, 0);
+ /*
+ * Make sure *all* the ITS are reset before we probe any, as
+ * they may be sharing memory. If any of the ITS fails to
+ * reset, don't even try to go any further, as this could
+ * result in something even worse.
+ */
+ if (acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR,
+ its_acpi_reset, 0) > 0)
+ acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR,
+ gic_acpi_parse_madt_its, 0);
acpi_its_srat_maps_free();
}
#else
static void __init its_acpi_probe(void) { }
#endif
+int __init its_lpi_memreserve_init(void)
+{
+ int state;
+
+ if (!efi_enabled(EFI_CONFIG_TABLES))
+ return 0;
+
+ if (list_empty(&its_nodes))
+ return 0;
+
+ gic_rdists->cpuhp_memreserve_state = CPUHP_INVALID;
+ state = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
+ "irqchip/arm/gicv3/memreserve:online",
+ its_cpu_memreserve_lpi,
+ NULL);
+ if (state < 0)
+ return state;
+
+ gic_rdists->cpuhp_memreserve_state = state;
+
+ return 0;
+}
+
int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
struct irq_domain *parent_domain)
{
diff --git a/drivers/irqchip/irq-gic-v3-mbi.c b/drivers/irqchip/irq-gic-v3-mbi.c
index b84c9c2eccdc..a2163d32f17d 100644
--- a/drivers/irqchip/irq-gic-v3-mbi.c
+++ b/drivers/irqchip/irq-gic-v3-mbi.c
@@ -171,7 +171,6 @@ static struct irq_chip mbi_msi_irq_chip = {
.irq_unmask = mbi_unmask_msi_irq,
.irq_eoi = irq_chip_eoi_parent,
.irq_compose_msi_msg = mbi_compose_msi_msg,
- .irq_write_msi_msg = pci_msi_domain_write_msg,
};
static struct msi_domain_info mbi_msi_domain_info = {
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index daec3309b014..5e935d97207d 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -920,6 +920,22 @@ static int __gic_update_rdist_properties(struct redist_region *region,
{
u64 typer = gic_read_typer(ptr + GICR_TYPER);
+ /* Boot-time cleanip */
+ if ((typer & GICR_TYPER_VLPIS) && (typer & GICR_TYPER_RVPEID)) {
+ u64 val;
+
+ /* Deactivate any present vPE */
+ val = gicr_read_vpendbaser(ptr + SZ_128K + GICR_VPENDBASER);
+ if (val & GICR_VPENDBASER_Valid)
+ gicr_write_vpendbaser(GICR_VPENDBASER_PendingLast,
+ ptr + SZ_128K + GICR_VPENDBASER);
+
+ /* Mark the VPE table as invalid */
+ val = gicr_read_vpropbaser(ptr + SZ_128K + GICR_VPROPBASER);
+ val &= ~GICR_VPROPBASER_4_1_VALID;
+ gicr_write_vpropbaser(val, ptr + SZ_128K + GICR_VPROPBASER);
+ }
+
gic_data.rdists.has_vlpis &= !!(typer & GICR_TYPER_VLPIS);
/* RVPEID implies some form of DirectLPI, no matter what the doc says... :-/ */
@@ -1802,6 +1818,7 @@ static int __init gic_init_bases(void __iomem *dist_base,
if (gic_dist_supports_lpis()) {
its_init(handle, &gic_data.rdists, gic_data.domain);
its_cpu_init();
+ its_lpi_memreserve_init();
} else {
if (IS_ENABLED(CONFIG_ARM_GIC_V2M))
gicv2m_init(handle, gic_data.domain);
diff --git a/drivers/irqchip/irq-imx-gpcv2.c b/drivers/irqchip/irq-imx-gpcv2.c
index 5b5a365dbd5e..b9c22f764b4d 100644
--- a/drivers/irqchip/irq-imx-gpcv2.c
+++ b/drivers/irqchip/irq-imx-gpcv2.c
@@ -26,7 +26,7 @@ struct gpcv2_irqchip_data {
u32 cpu2wakeup;
};
-static struct gpcv2_irqchip_data *imx_gpcv2_instance;
+static struct gpcv2_irqchip_data *imx_gpcv2_instance __ro_after_init;
static void __iomem *gpcv2_idx_to_reg(struct gpcv2_irqchip_data *cd, int i)
{
diff --git a/drivers/irqchip/irq-ingenic-tcu.c b/drivers/irqchip/irq-ingenic-tcu.c
index 34a7d261b710..3363f83bd7e9 100644
--- a/drivers/irqchip/irq-ingenic-tcu.c
+++ b/drivers/irqchip/irq-ingenic-tcu.c
@@ -28,6 +28,7 @@ static void ingenic_tcu_intc_cascade(struct irq_desc *desc)
struct irq_chip_generic *gc = irq_get_domain_generic_chip(domain, 0);
struct regmap *map = gc->private;
uint32_t irq_reg, irq_mask;
+ unsigned long bits;
unsigned int i;
regmap_read(map, TCU_REG_TFR, &irq_reg);
@@ -36,8 +37,9 @@ static void ingenic_tcu_intc_cascade(struct irq_desc *desc)
chained_irq_enter(irq_chip, desc);
irq_reg &= ~irq_mask;
+ bits = irq_reg;
- for_each_set_bit(i, (unsigned long *)&irq_reg, 32)
+ for_each_set_bit(i, &bits, 32)
generic_handle_domain_irq(domain, i);
chained_irq_exit(irq_chip, desc);
diff --git a/drivers/irqchip/irq-loongson-pch-msi.c b/drivers/irqchip/irq-loongson-pch-msi.c
index 32562b7e681b..e3801c4a77ed 100644
--- a/drivers/irqchip/irq-loongson-pch-msi.c
+++ b/drivers/irqchip/irq-loongson-pch-msi.c
@@ -241,7 +241,7 @@ static int pch_msi_init(struct device_node *node,
return 0;
err_map:
- kfree(priv->msi_map);
+ bitmap_free(priv->msi_map);
err_priv:
kfree(priv);
return ret;
diff --git a/drivers/irqchip/irq-mbigen.c b/drivers/irqchip/irq-mbigen.c
index 12df2162108e..f3faf5c99770 100644
--- a/drivers/irqchip/irq-mbigen.c
+++ b/drivers/irqchip/irq-mbigen.c
@@ -207,7 +207,7 @@ static int mbigen_irq_domain_alloc(struct irq_domain *domain,
if (err)
return err;
- err = platform_msi_domain_alloc(domain, virq, nr_irqs);
+ err = platform_msi_device_domain_alloc(domain, virq, nr_irqs);
if (err)
return err;
@@ -223,7 +223,7 @@ static int mbigen_irq_domain_alloc(struct irq_domain *domain,
static void mbigen_irq_domain_free(struct irq_domain *domain, unsigned int virq,
unsigned int nr_irqs)
{
- platform_msi_domain_free(domain, virq, nr_irqs);
+ platform_msi_device_domain_free(domain, virq, nr_irqs);
}
static const struct irq_domain_ops mbigen_domain_ops = {
diff --git a/drivers/irqchip/irq-mvebu-icu.c b/drivers/irqchip/irq-mvebu-icu.c
index 3e7297fc5948..497da344717c 100644
--- a/drivers/irqchip/irq-mvebu-icu.c
+++ b/drivers/irqchip/irq-mvebu-icu.c
@@ -221,7 +221,7 @@ mvebu_icu_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
icu_irqd->icu_group = msi_data->subset_data->icu_group;
icu_irqd->icu = icu;
- err = platform_msi_domain_alloc(domain, virq, nr_irqs);
+ err = platform_msi_device_domain_alloc(domain, virq, nr_irqs);
if (err) {
dev_err(icu->dev, "failed to allocate ICU interrupt in parent domain\n");
goto free_irqd;
@@ -245,7 +245,7 @@ mvebu_icu_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
return 0;
free_msi:
- platform_msi_domain_free(domain, virq, nr_irqs);
+ platform_msi_device_domain_free(domain, virq, nr_irqs);
free_irqd:
kfree(icu_irqd);
return err;
@@ -260,7 +260,7 @@ mvebu_icu_irq_domain_free(struct irq_domain *domain, unsigned int virq,
kfree(icu_irqd);
- platform_msi_domain_free(domain, virq, nr_irqs);
+ platform_msi_device_domain_free(domain, virq, nr_irqs);
}
static const struct irq_domain_ops mvebu_icu_domain_ops = {
@@ -314,12 +314,12 @@ static int mvebu_icu_subset_probe(struct platform_device *pdev)
msi_data->subset_data = of_device_get_match_data(dev);
}
- dev->msi_domain = of_msi_get_domain(dev, dev->of_node,
+ dev->msi.domain = of_msi_get_domain(dev, dev->of_node,
DOMAIN_BUS_PLATFORM_MSI);
- if (!dev->msi_domain)
+ if (!dev->msi.domain)
return -EPROBE_DEFER;
- msi_parent_dn = irq_domain_get_of_node(dev->msi_domain);
+ msi_parent_dn = irq_domain_get_of_node(dev->msi.domain);
if (!msi_parent_dn)
return -ENODEV;
diff --git a/drivers/irqchip/irq-realtek-rtl.c b/drivers/irqchip/irq-realtek-rtl.c
index fd9f275592d2..50a56820c99b 100644
--- a/drivers/irqchip/irq-realtek-rtl.c
+++ b/drivers/irqchip/irq-realtek-rtl.c
@@ -62,7 +62,7 @@ static struct irq_chip realtek_ictl_irq = {
static int intc_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
{
- irq_set_chip_and_handler(hw, &realtek_ictl_irq, handle_level_irq);
+ irq_set_chip_and_handler(irq, &realtek_ictl_irq, handle_level_irq);
return 0;
}
@@ -76,16 +76,20 @@ static void realtek_irq_dispatch(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
struct irq_domain *domain;
- unsigned int pending;
+ unsigned long pending;
+ unsigned int soc_int;
chained_irq_enter(chip, desc);
pending = readl(REG(RTL_ICTL_GIMR)) & readl(REG(RTL_ICTL_GISR));
+
if (unlikely(!pending)) {
spurious_interrupt();
goto out;
}
+
domain = irq_desc_get_handler_data(desc);
- generic_handle_domain_irq(domain, __ffs(pending));
+ for_each_set_bit(soc_int, &pending, 32)
+ generic_handle_domain_irq(domain, soc_int);
out:
chained_irq_exit(chip, desc);
@@ -95,7 +99,8 @@ out:
* SoC interrupts are cascaded to MIPS CPU interrupts according to the
* interrupt-map in the device tree. Each SoC interrupt gets 4 bits for
* the CPU interrupt in an Interrupt Routing Register. Max 32 SoC interrupts
- * thus go into 4 IRRs.
+ * thus go into 4 IRRs. A routing value of '0' means the interrupt is left
+ * disconnected. Routing values {1..15} connect to output lines {0..14}.
*/
static int __init map_interrupts(struct device_node *node, struct irq_domain *domain)
{
@@ -134,7 +139,7 @@ static int __init map_interrupts(struct device_node *node, struct irq_domain *do
of_node_put(cpu_ictl);
cpu_int = be32_to_cpup(imap + 2);
- if (cpu_int > 7)
+ if (cpu_int > 7 || cpu_int < 2)
return -EINVAL;
if (!(mips_irqs_set & BIT(cpu_int))) {
@@ -143,7 +148,8 @@ static int __init map_interrupts(struct device_node *node, struct irq_domain *do
mips_irqs_set |= BIT(cpu_int);
}
- regs[(soc_int * 4) / 32] |= cpu_int << (soc_int * 4) % 32;
+ /* Use routing values (1..6) for CPU interrupts (2..7) */
+ regs[(soc_int * 4) / 32] |= (cpu_int - 1) << (soc_int * 4) % 32;
imap += 3;
}
diff --git a/drivers/irqchip/irq-renesas-intc-irqpin.c b/drivers/irqchip/irq-renesas-intc-irqpin.c
index cb7f60b3b4a9..37f9a4499fdb 100644
--- a/drivers/irqchip/irq-renesas-intc-irqpin.c
+++ b/drivers/irqchip/irq-renesas-intc-irqpin.c
@@ -375,7 +375,6 @@ static int intc_irqpin_probe(struct platform_device *pdev)
struct intc_irqpin_priv *p;
struct intc_irqpin_iomem *i;
struct resource *io[INTC_IRQPIN_REG_NR];
- struct resource *irq;
struct irq_chip *irq_chip;
void (*enable_fn)(struct irq_data *d);
void (*disable_fn)(struct irq_data *d);
@@ -418,12 +417,14 @@ static int intc_irqpin_probe(struct platform_device *pdev)
/* allow any number of IRQs between 1 and INTC_IRQPIN_MAX */
for (k = 0; k < INTC_IRQPIN_MAX; k++) {
- irq = platform_get_resource(pdev, IORESOURCE_IRQ, k);
- if (!irq)
+ ret = platform_get_irq_optional(pdev, k);
+ if (ret == -ENXIO)
break;
+ if (ret < 0)
+ goto err0;
p->irq[k].p = p;
- p->irq[k].requested_irq = irq->start;
+ p->irq[k].requested_irq = ret;
}
nirqs = k;
diff --git a/drivers/irqchip/irq-renesas-irqc.c b/drivers/irqchip/irq-renesas-irqc.c
index 07a6d8b42b63..909325f88239 100644
--- a/drivers/irqchip/irq-renesas-irqc.c
+++ b/drivers/irqchip/irq-renesas-irqc.c
@@ -126,7 +126,6 @@ static int irqc_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
const char *name = dev_name(dev);
struct irqc_priv *p;
- struct resource *irq;
int ret;
int k;
@@ -142,13 +141,15 @@ static int irqc_probe(struct platform_device *pdev)
/* allow any number of IRQs between 1 and IRQC_IRQ_MAX */
for (k = 0; k < IRQC_IRQ_MAX; k++) {
- irq = platform_get_resource(pdev, IORESOURCE_IRQ, k);
- if (!irq)
+ ret = platform_get_irq_optional(pdev, k);
+ if (ret == -ENXIO)
break;
+ if (ret < 0)
+ goto err_runtime_pm_disable;
p->irq[k].p = p;
p->irq[k].hw_irq = k;
- p->irq[k].requested_irq = irq->start;
+ p->irq[k].requested_irq = ret;
}
p->number_of_irqs = k;
diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
index 259065d271ef..09cc98266d30 100644
--- a/drivers/irqchip/irq-sifive-plic.c
+++ b/drivers/irqchip/irq-sifive-plic.c
@@ -398,3 +398,4 @@ out_free_priv:
IRQCHIP_DECLARE(sifive_plic, "sifive,plic-1.0.0", plic_init);
IRQCHIP_DECLARE(riscv_plic0, "riscv,plic0", plic_init); /* for legacy systems */
+IRQCHIP_DECLARE(thead_c900_plic, "thead,c900-plic", plic_init); /* for firmware driver */
diff --git a/drivers/irqchip/irq-ti-sci-inta.c b/drivers/irqchip/irq-ti-sci-inta.c
index 8eba08db33b2..5fdbb4358dd0 100644
--- a/drivers/irqchip/irq-ti-sci-inta.c
+++ b/drivers/irqchip/irq-ti-sci-inta.c
@@ -595,7 +595,7 @@ static void ti_sci_inta_msi_set_desc(msi_alloc_info_t *arg,
struct platform_device *pdev = to_platform_device(desc->dev);
arg->desc = desc;
- arg->hwirq = TO_HWIRQ(pdev->id, desc->inta.dev_index);
+ arg->hwirq = TO_HWIRQ(pdev->id, desc->msi_index);
}
static struct msi_domain_ops ti_sci_inta_msi_ops = {
diff --git a/drivers/irqchip/spear-shirq.c b/drivers/irqchip/spear-shirq.c
index 1518ba31a80c..7c17a6f643ef 100644
--- a/drivers/irqchip/spear-shirq.c
+++ b/drivers/irqchip/spear-shirq.c
@@ -149,6 +149,8 @@ static struct spear_shirq spear320_shirq_ras3 = {
.offset = 0,
.nr_irqs = 7,
.mask = ((0x1 << 7) - 1) << 0,
+ .irq_chip = &dummy_irq_chip,
+ .status_reg = SPEAR320_INT_STS_MASK_REG,
};
static struct spear_shirq spear320_shirq_ras1 = {
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index ac6688d7a3f4..6090e647daee 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -260,13 +260,6 @@ config LEDS_NET48XX
This option enables support for the Soekris net4801 and net4826 error
LED.
-config LEDS_FSG
- tristate "LED Support for the Freecom FSG-3"
- depends on LEDS_CLASS
- depends on MACH_FSG
- help
- This option enables support for the LEDs on the Freecom FSG-3.
-
config LEDS_WRAP
tristate "LED Support for the WRAP series LEDs"
depends on LEDS_CLASS
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index 1a719caf14c0..e58ecb36360f 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -26,7 +26,6 @@ obj-$(CONFIG_LEDS_COBALT_RAQ) += leds-cobalt-raq.o
obj-$(CONFIG_LEDS_CPCAP) += leds-cpcap.o
obj-$(CONFIG_LEDS_DA903X) += leds-da903x.o
obj-$(CONFIG_LEDS_DA9052) += leds-da9052.o
-obj-$(CONFIG_LEDS_FSG) += leds-fsg.o
obj-$(CONFIG_LEDS_GPIO) += leds-gpio.o
obj-$(CONFIG_LEDS_GPIO_REGISTER) += leds-gpio-register.o
obj-$(CONFIG_LEDS_HP6XX) += leds-hp6xx.o
diff --git a/drivers/leds/blink/leds-lgm-sso.c b/drivers/leds/blink/leds-lgm-sso.c
index fd8b7573285a..6f270c0272fb 100644
--- a/drivers/leds/blink/leds-lgm-sso.c
+++ b/drivers/leds/blink/leds-lgm-sso.c
@@ -477,7 +477,6 @@ static int sso_gpio_gc_init(struct device *dev, struct sso_led_priv *priv)
gc->ngpio = priv->gpio.pins;
gc->parent = dev;
gc->owner = THIS_MODULE;
- gc->of_node = dev->of_node;
return devm_gpiochip_add_data(dev, gc, priv);
}
diff --git a/drivers/leds/flash/Kconfig b/drivers/leds/flash/Kconfig
index b230f3d65eb0..d3eb689b193c 100644
--- a/drivers/leds/flash/Kconfig
+++ b/drivers/leds/flash/Kconfig
@@ -48,6 +48,19 @@ config LEDS_MAX77693
multifunction device. It has build in control for two leds in flash
and torch mode.
+config LEDS_MT6360
+ tristate "LED Support for Mediatek MT6360 PMIC"
+ depends on LEDS_CLASS && OF
+ depends on LEDS_CLASS_FLASH || !LEDS_CLASS_FLASH
+ depends on LEDS_CLASS_MULTICOLOR || !LEDS_CLASS_MULTICOLOR
+ depends on V4L2_FLASH_LED_CLASS || !V4L2_FLASH_LED_CLASS
+ depends on MFD_MT6360
+ help
+ This option enables support for dual Flash LED drivers found on
+ Mediatek MT6360 PMIC.
+ Independent current sources supply for each flash LED support torch
+ and strobe mode.
+
config LEDS_RT4505
tristate "LED support for RT4505 flashlight controller"
depends on I2C && OF
diff --git a/drivers/leds/flash/Makefile b/drivers/leds/flash/Makefile
index ebea42f9c37e..0acbddc0b91b 100644
--- a/drivers/leds/flash/Makefile
+++ b/drivers/leds/flash/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_LEDS_MT6360) += leds-mt6360.o
obj-$(CONFIG_LEDS_AAT1290) += leds-aat1290.o
obj-$(CONFIG_LEDS_AS3645A) += leds-as3645a.o
obj-$(CONFIG_LEDS_KTD2692) += leds-ktd2692.o
diff --git a/drivers/leds/flash/leds-ktd2692.c b/drivers/leds/flash/leds-ktd2692.c
index f341da1503a4..ed1f20a58bf6 100644
--- a/drivers/leds/flash/leds-ktd2692.c
+++ b/drivers/leds/flash/leds-ktd2692.c
@@ -274,7 +274,7 @@ static int ktd2692_parse_dt(struct ktd2692_context *led, struct device *dev,
struct device_node *child_node;
int ret;
- if (!dev_of_node(dev))
+ if (!np)
return -ENXIO;
led->ctrl_gpio = devm_gpiod_get(dev, "ctrl", GPIOD_ASIS);
diff --git a/drivers/leds/flash/leds-mt6360.c b/drivers/leds/flash/leds-mt6360.c
new file mode 100644
index 000000000000..e1066a52d2d2
--- /dev/null
+++ b/drivers/leds/flash/leds-mt6360.c
@@ -0,0 +1,910 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/led-class-flash.h>
+#include <linux/led-class-multicolor.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include <media/v4l2-flash-led-class.h>
+
+enum {
+ MT6360_LED_ISNK1 = 0,
+ MT6360_LED_ISNK2,
+ MT6360_LED_ISNK3,
+ MT6360_LED_ISNKML,
+ MT6360_LED_FLASH1,
+ MT6360_LED_FLASH2,
+ MT6360_MAX_LEDS
+};
+
+#define MT6360_REG_RGBEN 0x380
+#define MT6360_REG_ISNK(_led_no) (0x381 + (_led_no))
+#define MT6360_ISNK_ENMASK(_led_no) BIT(7 - (_led_no))
+#define MT6360_ISNK_MASK GENMASK(4, 0)
+#define MT6360_CHRINDSEL_MASK BIT(3)
+
+/* Virtual definition for multicolor */
+#define MT6360_VIRTUAL_MULTICOLOR (MT6360_MAX_LEDS + 1)
+#define MULTICOLOR_NUM_CHANNELS 3
+
+#define MT6360_REG_FLEDEN 0x37E
+#define MT6360_REG_STRBTO 0x373
+#define MT6360_REG_FLEDBASE(_id) (0x372 + 4 * (_id - MT6360_LED_FLASH1))
+#define MT6360_REG_FLEDISTRB(_id) (MT6360_REG_FLEDBASE(_id) + 2)
+#define MT6360_REG_FLEDITOR(_id) (MT6360_REG_FLEDBASE(_id) + 3)
+#define MT6360_REG_CHGSTAT2 0x3E1
+#define MT6360_REG_FLEDSTAT1 0x3E9
+#define MT6360_ITORCH_MASK GENMASK(4, 0)
+#define MT6360_ISTROBE_MASK GENMASK(6, 0)
+#define MT6360_STRBTO_MASK GENMASK(6, 0)
+#define MT6360_TORCHEN_MASK BIT(3)
+#define MT6360_STROBEN_MASK BIT(2)
+#define MT6360_FLCSEN_MASK(_id) BIT(MT6360_LED_FLASH2 - _id)
+#define MT6360_FLEDCHGVINOVP_MASK BIT(3)
+#define MT6360_FLED1STRBTO_MASK BIT(11)
+#define MT6360_FLED2STRBTO_MASK BIT(10)
+#define MT6360_FLED1STRB_MASK BIT(9)
+#define MT6360_FLED2STRB_MASK BIT(8)
+#define MT6360_FLED1SHORT_MASK BIT(7)
+#define MT6360_FLED2SHORT_MASK BIT(6)
+#define MT6360_FLEDLVF_MASK BIT(3)
+
+#define MT6360_ISNKRGB_STEPUA 2000
+#define MT6360_ISNKRGB_MAXUA 24000
+#define MT6360_ISNKML_STEPUA 5000
+#define MT6360_ISNKML_MAXUA 150000
+
+#define MT6360_ITORCH_MINUA 25000
+#define MT6360_ITORCH_STEPUA 12500
+#define MT6360_ITORCH_MAXUA 400000
+#define MT6360_ISTRB_MINUA 50000
+#define MT6360_ISTRB_STEPUA 12500
+#define MT6360_ISTRB_MAXUA 1500000
+#define MT6360_STRBTO_MINUS 64000
+#define MT6360_STRBTO_STEPUS 32000
+#define MT6360_STRBTO_MAXUS 2432000
+
+#define STATE_OFF 0
+#define STATE_KEEP 1
+#define STATE_ON 2
+
+struct mt6360_led {
+ union {
+ struct led_classdev isnk;
+ struct led_classdev_mc mc;
+ struct led_classdev_flash flash;
+ };
+ struct v4l2_flash *v4l2_flash;
+ struct mt6360_priv *priv;
+ u32 led_no;
+ u32 default_state;
+};
+
+struct mt6360_priv {
+ struct device *dev;
+ struct regmap *regmap;
+ struct mutex lock;
+ unsigned int fled_strobe_used;
+ unsigned int fled_torch_used;
+ unsigned int leds_active;
+ unsigned int leds_count;
+ struct mt6360_led leds[];
+};
+
+static int mt6360_mc_brightness_set(struct led_classdev *lcdev,
+ enum led_brightness level)
+{
+ struct led_classdev_mc *mccdev = lcdev_to_mccdev(lcdev);
+ struct mt6360_led *led = container_of(mccdev, struct mt6360_led, mc);
+ struct mt6360_priv *priv = led->priv;
+ u32 real_bright, enable_mask = 0, enable = 0;
+ int i, ret;
+
+ mutex_lock(&priv->lock);
+
+ led_mc_calc_color_components(mccdev, level);
+
+ for (i = 0; i < mccdev->num_colors; i++) {
+ struct mc_subled *subled = mccdev->subled_info + i;
+
+ real_bright = min(lcdev->max_brightness, subled->brightness);
+ ret = regmap_update_bits(priv->regmap, MT6360_REG_ISNK(i),
+ MT6360_ISNK_MASK, real_bright);
+ if (ret)
+ goto out;
+
+ enable_mask |= MT6360_ISNK_ENMASK(subled->channel);
+ if (real_bright)
+ enable |= MT6360_ISNK_ENMASK(subled->channel);
+ }
+
+ ret = regmap_update_bits(priv->regmap, MT6360_REG_RGBEN, enable_mask,
+ enable);
+
+out:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static int mt6360_isnk_brightness_set(struct led_classdev *lcdev,
+ enum led_brightness level)
+{
+ struct mt6360_led *led = container_of(lcdev, struct mt6360_led, isnk);
+ struct mt6360_priv *priv = led->priv;
+ u32 enable_mask = MT6360_ISNK_ENMASK(led->led_no);
+ u32 val = level ? MT6360_ISNK_ENMASK(led->led_no) : 0;
+ int ret;
+
+ mutex_lock(&priv->lock);
+
+ ret = regmap_update_bits(priv->regmap, MT6360_REG_ISNK(led->led_no),
+ MT6360_ISNK_MASK, level);
+ if (ret)
+ goto out;
+
+ ret = regmap_update_bits(priv->regmap, MT6360_REG_RGBEN, enable_mask,
+ val);
+
+out:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static int mt6360_torch_brightness_set(struct led_classdev *lcdev,
+ enum led_brightness level)
+{
+ struct mt6360_led *led =
+ container_of(lcdev, struct mt6360_led, flash.led_cdev);
+ struct mt6360_priv *priv = led->priv;
+ u32 enable_mask = MT6360_TORCHEN_MASK | MT6360_FLCSEN_MASK(led->led_no);
+ u32 val = level ? MT6360_FLCSEN_MASK(led->led_no) : 0;
+ u32 prev = priv->fled_torch_used, curr;
+ int ret;
+
+ mutex_lock(&priv->lock);
+
+ /*
+ * Only one set of flash control logic, use the flag to avoid strobe is
+ * currently used.
+ */
+ if (priv->fled_strobe_used) {
+ dev_warn(lcdev->dev, "Please disable strobe first [%d]\n",
+ priv->fled_strobe_used);
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ if (level)
+ curr = prev | BIT(led->led_no);
+ else
+ curr = prev & ~BIT(led->led_no);
+
+ if (curr)
+ val |= MT6360_TORCHEN_MASK;
+
+ if (level) {
+ ret = regmap_update_bits(priv->regmap,
+ MT6360_REG_FLEDITOR(led->led_no),
+ MT6360_ITORCH_MASK, level - 1);
+ if (ret)
+ goto unlock;
+ }
+
+ ret = regmap_update_bits(priv->regmap, MT6360_REG_FLEDEN, enable_mask,
+ val);
+ if (ret)
+ goto unlock;
+
+ priv->fled_torch_used = curr;
+
+unlock:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static int mt6360_flash_brightness_set(struct led_classdev_flash *fl_cdev,
+ u32 brightness)
+{
+ /*
+ * Due to the current spike when turning on flash, let brightness to be
+ * kept by framework.
+ * This empty function is used to prevent led_classdev_flash register
+ * ops check failure.
+ */
+ return 0;
+}
+
+static int _mt6360_flash_brightness_set(struct led_classdev_flash *fl_cdev,
+ u32 brightness)
+{
+ struct mt6360_led *led =
+ container_of(fl_cdev, struct mt6360_led, flash);
+ struct mt6360_priv *priv = led->priv;
+ struct led_flash_setting *s = &fl_cdev->brightness;
+ u32 val = (brightness - s->min) / s->step;
+
+ return regmap_update_bits(priv->regmap,
+ MT6360_REG_FLEDISTRB(led->led_no),
+ MT6360_ISTROBE_MASK, val);
+}
+
+static int mt6360_strobe_set(struct led_classdev_flash *fl_cdev, bool state)
+{
+ struct mt6360_led *led =
+ container_of(fl_cdev, struct mt6360_led, flash);
+ struct mt6360_priv *priv = led->priv;
+ struct led_classdev *lcdev = &fl_cdev->led_cdev;
+ struct led_flash_setting *s = &fl_cdev->brightness;
+ u32 enable_mask = MT6360_STROBEN_MASK | MT6360_FLCSEN_MASK(led->led_no);
+ u32 val = state ? MT6360_FLCSEN_MASK(led->led_no) : 0;
+ u32 prev = priv->fled_strobe_used, curr;
+ int ret;
+
+ mutex_lock(&priv->lock);
+
+ /*
+ * Only one set of flash control logic, use the flag to avoid torch is
+ * currently used
+ */
+ if (priv->fled_torch_used) {
+ dev_warn(lcdev->dev, "Please disable torch first [0x%x]\n",
+ priv->fled_torch_used);
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ if (state)
+ curr = prev | BIT(led->led_no);
+ else
+ curr = prev & ~BIT(led->led_no);
+
+ if (curr)
+ val |= MT6360_STROBEN_MASK;
+
+ ret = regmap_update_bits(priv->regmap, MT6360_REG_FLEDEN, enable_mask,
+ val);
+ if (ret) {
+ dev_err(lcdev->dev, "[%d] control current source %d fail\n",
+ led->led_no, state);
+ goto unlock;
+ }
+
+ /*
+ * If the flash need to be on, config the flash current ramping up to
+ * the setting value.
+ * Else, always recover back to the minimum one
+ */
+ ret = _mt6360_flash_brightness_set(fl_cdev, state ? s->val : s->min);
+ if (ret)
+ goto unlock;
+
+ /*
+ * For the flash turn on/off, HW rampping up/down time is 5ms/500us,
+ * respectively.
+ */
+ if (!prev && curr)
+ usleep_range(5000, 6000);
+ else if (prev && !curr)
+ udelay(500);
+
+ priv->fled_strobe_used = curr;
+
+unlock:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static int mt6360_strobe_get(struct led_classdev_flash *fl_cdev, bool *state)
+{
+ struct mt6360_led *led =
+ container_of(fl_cdev, struct mt6360_led, flash);
+ struct mt6360_priv *priv = led->priv;
+
+ mutex_lock(&priv->lock);
+ *state = !!(priv->fled_strobe_used & BIT(led->led_no));
+ mutex_unlock(&priv->lock);
+
+ return 0;
+}
+
+static int mt6360_timeout_set(struct led_classdev_flash *fl_cdev, u32 timeout)
+{
+ struct mt6360_led *led =
+ container_of(fl_cdev, struct mt6360_led, flash);
+ struct mt6360_priv *priv = led->priv;
+ struct led_flash_setting *s = &fl_cdev->timeout;
+ u32 val = (timeout - s->min) / s->step;
+ int ret;
+
+ mutex_lock(&priv->lock);
+ ret = regmap_update_bits(priv->regmap, MT6360_REG_STRBTO,
+ MT6360_STRBTO_MASK, val);
+ mutex_unlock(&priv->lock);
+
+ return ret;
+}
+
+static int mt6360_fault_get(struct led_classdev_flash *fl_cdev, u32 *fault)
+{
+ struct mt6360_led *led =
+ container_of(fl_cdev, struct mt6360_led, flash);
+ struct mt6360_priv *priv = led->priv;
+ u16 fled_stat;
+ unsigned int chg_stat, strobe_timeout_mask, fled_short_mask;
+ u32 rfault = 0;
+ int ret;
+
+ mutex_lock(&priv->lock);
+ ret = regmap_read(priv->regmap, MT6360_REG_CHGSTAT2, &chg_stat);
+ if (ret)
+ goto unlock;
+
+ ret = regmap_raw_read(priv->regmap, MT6360_REG_FLEDSTAT1, &fled_stat,
+ sizeof(fled_stat));
+ if (ret)
+ goto unlock;
+
+ if (led->led_no == MT6360_LED_FLASH1) {
+ strobe_timeout_mask = MT6360_FLED1STRBTO_MASK;
+ fled_short_mask = MT6360_FLED1SHORT_MASK;
+ } else {
+ strobe_timeout_mask = MT6360_FLED2STRBTO_MASK;
+ fled_short_mask = MT6360_FLED2SHORT_MASK;
+ }
+
+ if (chg_stat & MT6360_FLEDCHGVINOVP_MASK)
+ rfault |= LED_FAULT_INPUT_VOLTAGE;
+
+ if (fled_stat & strobe_timeout_mask)
+ rfault |= LED_FAULT_TIMEOUT;
+
+ if (fled_stat & fled_short_mask)
+ rfault |= LED_FAULT_SHORT_CIRCUIT;
+
+ if (fled_stat & MT6360_FLEDLVF_MASK)
+ rfault |= LED_FAULT_UNDER_VOLTAGE;
+
+ *fault = rfault;
+unlock:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static const struct led_flash_ops mt6360_flash_ops = {
+ .flash_brightness_set = mt6360_flash_brightness_set,
+ .strobe_set = mt6360_strobe_set,
+ .strobe_get = mt6360_strobe_get,
+ .timeout_set = mt6360_timeout_set,
+ .fault_get = mt6360_fault_get,
+};
+
+static int mt6360_isnk_init_default_state(struct mt6360_led *led)
+{
+ struct mt6360_priv *priv = led->priv;
+ unsigned int regval;
+ u32 level;
+ int ret;
+
+ ret = regmap_read(priv->regmap, MT6360_REG_ISNK(led->led_no), &regval);
+ if (ret)
+ return ret;
+ level = regval & MT6360_ISNK_MASK;
+
+ ret = regmap_read(priv->regmap, MT6360_REG_RGBEN, &regval);
+ if (ret)
+ return ret;
+
+ if (!(regval & MT6360_ISNK_ENMASK(led->led_no)))
+ level = LED_OFF;
+
+ switch (led->default_state) {
+ case STATE_ON:
+ led->isnk.brightness = led->isnk.max_brightness;
+ break;
+ case STATE_KEEP:
+ led->isnk.brightness = min(level, led->isnk.max_brightness);
+ break;
+ default:
+ led->isnk.brightness = LED_OFF;
+ }
+
+ return mt6360_isnk_brightness_set(&led->isnk, led->isnk.brightness);
+}
+
+static int mt6360_flash_init_default_state(struct mt6360_led *led)
+{
+ struct led_classdev_flash *flash = &led->flash;
+ struct mt6360_priv *priv = led->priv;
+ u32 enable_mask = MT6360_TORCHEN_MASK | MT6360_FLCSEN_MASK(led->led_no);
+ u32 level;
+ unsigned int regval;
+ int ret;
+
+ ret = regmap_read(priv->regmap, MT6360_REG_FLEDITOR(led->led_no),
+ &regval);
+ if (ret)
+ return ret;
+ level = regval & MT6360_ITORCH_MASK;
+
+ ret = regmap_read(priv->regmap, MT6360_REG_FLEDEN, &regval);
+ if (ret)
+ return ret;
+
+ if ((regval & enable_mask) == enable_mask)
+ level += 1;
+ else
+ level = LED_OFF;
+
+ switch (led->default_state) {
+ case STATE_ON:
+ flash->led_cdev.brightness = flash->led_cdev.max_brightness;
+ break;
+ case STATE_KEEP:
+ flash->led_cdev.brightness =
+ min(level, flash->led_cdev.max_brightness);
+ break;
+ default:
+ flash->led_cdev.brightness = LED_OFF;
+ }
+
+ return mt6360_torch_brightness_set(&flash->led_cdev,
+ flash->led_cdev.brightness);
+}
+
+#if IS_ENABLED(CONFIG_V4L2_FLASH_LED_CLASS)
+static int mt6360_flash_external_strobe_set(struct v4l2_flash *v4l2_flash,
+ bool enable)
+{
+ struct led_classdev_flash *flash = v4l2_flash->fled_cdev;
+ struct mt6360_led *led = container_of(flash, struct mt6360_led, flash);
+ struct mt6360_priv *priv = led->priv;
+ u32 mask = MT6360_FLCSEN_MASK(led->led_no);
+ u32 val = enable ? mask : 0;
+ int ret;
+
+ mutex_lock(&priv->lock);
+
+ ret = regmap_update_bits(priv->regmap, MT6360_REG_FLEDEN, mask, val);
+ if (ret)
+ goto unlock;
+
+ if (enable)
+ priv->fled_strobe_used |= BIT(led->led_no);
+ else
+ priv->fled_strobe_used &= ~BIT(led->led_no);
+
+unlock:
+ mutex_unlock(&priv->lock);
+ return ret;
+}
+
+static const struct v4l2_flash_ops v4l2_flash_ops = {
+ .external_strobe_set = mt6360_flash_external_strobe_set,
+};
+
+static void mt6360_init_v4l2_flash_config(struct mt6360_led *led,
+ struct v4l2_flash_config *config)
+{
+ struct led_classdev *lcdev;
+ struct led_flash_setting *s = &config->intensity;
+
+ lcdev = &led->flash.led_cdev;
+
+ s->min = MT6360_ITORCH_MINUA;
+ s->step = MT6360_ITORCH_STEPUA;
+ s->val = s->max = s->min + (lcdev->max_brightness - 1) * s->step;
+
+ config->has_external_strobe = 1;
+ strscpy(config->dev_name, lcdev->dev->kobj.name,
+ sizeof(config->dev_name));
+
+ config->flash_faults = LED_FAULT_SHORT_CIRCUIT | LED_FAULT_TIMEOUT |
+ LED_FAULT_INPUT_VOLTAGE |
+ LED_FAULT_UNDER_VOLTAGE;
+}
+#else
+static const struct v4l2_flash_ops v4l2_flash_ops;
+static void mt6360_init_v4l2_flash_config(struct mt6360_led *led,
+ struct v4l2_flash_config *config)
+{
+}
+#endif
+
+static int mt6360_led_register(struct device *parent, struct mt6360_led *led,
+ struct led_init_data *init_data)
+{
+ struct mt6360_priv *priv = led->priv;
+ struct v4l2_flash_config v4l2_config = {0};
+ int ret;
+
+ if ((led->led_no == MT6360_LED_ISNK1 ||
+ led->led_no == MT6360_VIRTUAL_MULTICOLOR) &&
+ (priv->leds_active & BIT(MT6360_LED_ISNK1))) {
+ /*
+ * Change isink1 to SW control mode, disconnect it with
+ * charger state
+ */
+ ret = regmap_update_bits(priv->regmap, MT6360_REG_RGBEN,
+ MT6360_CHRINDSEL_MASK,
+ MT6360_CHRINDSEL_MASK);
+ if (ret) {
+ dev_err(parent, "Failed to config ISNK1 to SW mode\n");
+ return ret;
+ }
+ }
+
+ switch (led->led_no) {
+ case MT6360_VIRTUAL_MULTICOLOR:
+ ret = mt6360_mc_brightness_set(&led->mc.led_cdev, LED_OFF);
+ if (ret) {
+ dev_err(parent,
+ "Failed to init multicolor brightness\n");
+ return ret;
+ }
+
+ ret = devm_led_classdev_multicolor_register_ext(parent,
+ &led->mc, init_data);
+ if (ret) {
+ dev_err(parent, "Couldn't register multicolor\n");
+ return ret;
+ }
+ break;
+ case MT6360_LED_ISNK1 ... MT6360_LED_ISNKML:
+ ret = mt6360_isnk_init_default_state(led);
+ if (ret) {
+ dev_err(parent, "Failed to init %d isnk state\n",
+ led->led_no);
+ return ret;
+ }
+
+ ret = devm_led_classdev_register_ext(parent, &led->isnk,
+ init_data);
+ if (ret) {
+ dev_err(parent, "Couldn't register isink %d\n",
+ led->led_no);
+ return ret;
+ }
+ break;
+ default:
+ ret = mt6360_flash_init_default_state(led);
+ if (ret) {
+ dev_err(parent, "Failed to init %d flash state\n",
+ led->led_no);
+ return ret;
+ }
+
+ ret = devm_led_classdev_flash_register_ext(parent, &led->flash,
+ init_data);
+ if (ret) {
+ dev_err(parent, "Couldn't register flash %d\n",
+ led->led_no);
+ return ret;
+ }
+
+ mt6360_init_v4l2_flash_config(led, &v4l2_config);
+ led->v4l2_flash = v4l2_flash_init(parent, init_data->fwnode,
+ &led->flash,
+ &v4l2_flash_ops,
+ &v4l2_config);
+ if (IS_ERR(led->v4l2_flash)) {
+ dev_err(parent, "Failed to register %d v4l2 sd\n",
+ led->led_no);
+ return PTR_ERR(led->v4l2_flash);
+ }
+ }
+
+ return 0;
+}
+
+static u32 clamp_align(u32 val, u32 min, u32 max, u32 step)
+{
+ u32 retval;
+
+ retval = clamp_val(val, min, max);
+ if (step > 1)
+ retval = rounddown(retval - min, step) + min;
+
+ return retval;
+}
+
+static int mt6360_init_isnk_properties(struct mt6360_led *led,
+ struct led_init_data *init_data)
+{
+ struct led_classdev *lcdev;
+ struct mt6360_priv *priv = led->priv;
+ struct fwnode_handle *child;
+ u32 step_uA = MT6360_ISNKRGB_STEPUA, max_uA = MT6360_ISNKRGB_MAXUA;
+ u32 val;
+ int num_color = 0, ret;
+
+ if (led->led_no == MT6360_VIRTUAL_MULTICOLOR) {
+ struct mc_subled *sub_led;
+
+ sub_led = devm_kzalloc(priv->dev,
+ sizeof(*sub_led) * MULTICOLOR_NUM_CHANNELS, GFP_KERNEL);
+ if (!sub_led)
+ return -ENOMEM;
+
+ fwnode_for_each_child_node(init_data->fwnode, child) {
+ u32 reg, color;
+
+ ret = fwnode_property_read_u32(child, "reg", &reg);
+ if (ret || reg > MT6360_LED_ISNK3 ||
+ priv->leds_active & BIT(reg))
+ return -EINVAL;
+
+ ret = fwnode_property_read_u32(child, "color", &color);
+ if (ret) {
+ dev_err(priv->dev,
+ "led %d, no color specified\n",
+ led->led_no);
+ return ret;
+ }
+
+ priv->leds_active |= BIT(reg);
+ sub_led[num_color].color_index = color;
+ sub_led[num_color].channel = reg;
+ num_color++;
+ }
+
+ if (num_color < 2) {
+ dev_err(priv->dev,
+ "Multicolor must include 2 or more led channel\n");
+ return -EINVAL;
+ }
+
+ led->mc.num_colors = num_color;
+ led->mc.subled_info = sub_led;
+
+ lcdev = &led->mc.led_cdev;
+ lcdev->brightness_set_blocking = mt6360_mc_brightness_set;
+ } else {
+ if (led->led_no == MT6360_LED_ISNKML) {
+ step_uA = MT6360_ISNKML_STEPUA;
+ max_uA = MT6360_ISNKML_MAXUA;
+ }
+
+ lcdev = &led->isnk;
+ lcdev->brightness_set_blocking = mt6360_isnk_brightness_set;
+ }
+
+ ret = fwnode_property_read_u32(init_data->fwnode, "led-max-microamp",
+ &val);
+ if (ret) {
+ dev_warn(priv->dev,
+ "Not specified led-max-microamp, config to the minimum\n");
+ val = step_uA;
+ } else
+ val = clamp_align(val, 0, max_uA, step_uA);
+
+ lcdev->max_brightness = val / step_uA;
+
+ fwnode_property_read_string(init_data->fwnode, "linux,default-trigger",
+ &lcdev->default_trigger);
+
+ return 0;
+}
+
+static int mt6360_init_flash_properties(struct mt6360_led *led,
+ struct led_init_data *init_data)
+{
+ struct led_classdev_flash *flash = &led->flash;
+ struct led_classdev *lcdev = &flash->led_cdev;
+ struct mt6360_priv *priv = led->priv;
+ struct led_flash_setting *s;
+ u32 val;
+ int ret;
+
+ ret = fwnode_property_read_u32(init_data->fwnode, "led-max-microamp",
+ &val);
+ if (ret) {
+ dev_warn(priv->dev,
+ "Not specified led-max-microamp, config to the minimum\n");
+ val = MT6360_ITORCH_MINUA;
+ } else
+ val = clamp_align(val, MT6360_ITORCH_MINUA, MT6360_ITORCH_MAXUA,
+ MT6360_ITORCH_STEPUA);
+
+ lcdev->max_brightness =
+ (val - MT6360_ITORCH_MINUA) / MT6360_ITORCH_STEPUA + 1;
+ lcdev->brightness_set_blocking = mt6360_torch_brightness_set;
+ lcdev->flags |= LED_DEV_CAP_FLASH;
+
+ ret = fwnode_property_read_u32(init_data->fwnode, "flash-max-microamp",
+ &val);
+ if (ret) {
+ dev_warn(priv->dev,
+ "Not specified flash-max-microamp, config to the minimum\n");
+ val = MT6360_ISTRB_MINUA;
+ } else
+ val = clamp_align(val, MT6360_ISTRB_MINUA, MT6360_ISTRB_MAXUA,
+ MT6360_ISTRB_STEPUA);
+
+ s = &flash->brightness;
+ s->min = MT6360_ISTRB_MINUA;
+ s->step = MT6360_ISTRB_STEPUA;
+ s->val = s->max = val;
+
+ /*
+ * Always configure as min level when off to prevent flash current
+ * spike.
+ */
+ ret = _mt6360_flash_brightness_set(flash, s->min);
+ if (ret)
+ return ret;
+
+ ret = fwnode_property_read_u32(init_data->fwnode,
+ "flash-max-timeout-us", &val);
+ if (ret) {
+ dev_warn(priv->dev,
+ "Not specified flash-max-timeout-us, config to the minimum\n");
+ val = MT6360_STRBTO_MINUS;
+ } else
+ val = clamp_align(val, MT6360_STRBTO_MINUS, MT6360_STRBTO_MAXUS,
+ MT6360_STRBTO_STEPUS);
+
+ s = &flash->timeout;
+ s->min = MT6360_STRBTO_MINUS;
+ s->step = MT6360_STRBTO_STEPUS;
+ s->val = s->max = val;
+
+ flash->ops = &mt6360_flash_ops;
+
+ return 0;
+}
+
+static int mt6360_init_common_properties(struct mt6360_led *led,
+ struct led_init_data *init_data)
+{
+ const char *const states[] = { "off", "keep", "on" };
+ const char *str;
+ int ret;
+
+ if (!fwnode_property_read_string(init_data->fwnode,
+ "default-state", &str)) {
+ ret = match_string(states, ARRAY_SIZE(states), str);
+ if (ret < 0)
+ ret = STATE_OFF;
+
+ led->default_state = ret;
+ }
+
+ return 0;
+}
+
+static void mt6360_v4l2_flash_release(struct mt6360_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->leds_count; i++) {
+ struct mt6360_led *led = priv->leds + i;
+
+ if (led->v4l2_flash)
+ v4l2_flash_release(led->v4l2_flash);
+ }
+}
+
+static int mt6360_led_probe(struct platform_device *pdev)
+{
+ struct mt6360_priv *priv;
+ struct fwnode_handle *child;
+ size_t count;
+ int i = 0, ret;
+
+ count = device_get_child_node_count(&pdev->dev);
+ if (!count || count > MT6360_MAX_LEDS) {
+ dev_err(&pdev->dev,
+ "No child node or node count over max led number %zu\n",
+ count);
+ return -EINVAL;
+ }
+
+ priv = devm_kzalloc(&pdev->dev,
+ struct_size(priv, leds, count), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->leds_count = count;
+ priv->dev = &pdev->dev;
+ mutex_init(&priv->lock);
+
+ priv->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!priv->regmap) {
+ dev_err(&pdev->dev, "Failed to get parent regmap\n");
+ return -ENODEV;
+ }
+
+ device_for_each_child_node(&pdev->dev, child) {
+ struct mt6360_led *led = priv->leds + i;
+ struct led_init_data init_data = { .fwnode = child, };
+ u32 reg, led_color;
+
+ ret = fwnode_property_read_u32(child, "color", &led_color);
+ if (ret)
+ goto out_flash_release;
+
+ if (led_color == LED_COLOR_ID_RGB ||
+ led_color == LED_COLOR_ID_MULTI)
+ reg = MT6360_VIRTUAL_MULTICOLOR;
+ else {
+ ret = fwnode_property_read_u32(child, "reg", &reg);
+ if (ret)
+ goto out_flash_release;
+
+ if (reg >= MT6360_MAX_LEDS) {
+ ret = -EINVAL;
+ goto out_flash_release;
+ }
+ }
+
+ if (priv->leds_active & BIT(reg)) {
+ ret = -EINVAL;
+ goto out_flash_release;
+ }
+ priv->leds_active |= BIT(reg);
+
+ led->led_no = reg;
+ led->priv = priv;
+
+ ret = mt6360_init_common_properties(led, &init_data);
+ if (ret)
+ goto out_flash_release;
+
+ if (reg == MT6360_VIRTUAL_MULTICOLOR ||
+ reg <= MT6360_LED_ISNKML)
+ ret = mt6360_init_isnk_properties(led, &init_data);
+ else
+ ret = mt6360_init_flash_properties(led, &init_data);
+
+ if (ret)
+ goto out_flash_release;
+
+ ret = mt6360_led_register(&pdev->dev, led, &init_data);
+ if (ret)
+ goto out_flash_release;
+
+ i++;
+ }
+
+ platform_set_drvdata(pdev, priv);
+ return 0;
+
+out_flash_release:
+ mt6360_v4l2_flash_release(priv);
+ return ret;
+}
+
+static int mt6360_led_remove(struct platform_device *pdev)
+{
+ struct mt6360_priv *priv = platform_get_drvdata(pdev);
+
+ mt6360_v4l2_flash_release(priv);
+ return 0;
+}
+
+static const struct of_device_id __maybe_unused mt6360_led_of_id[] = {
+ { .compatible = "mediatek,mt6360-led", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, mt6360_led_of_id);
+
+static struct platform_driver mt6360_led_driver = {
+ .driver = {
+ .name = "mt6360-led",
+ .of_match_table = mt6360_led_of_id,
+ },
+ .probe = mt6360_led_probe,
+ .remove = mt6360_led_remove,
+};
+module_platform_driver(mt6360_led_driver);
+
+MODULE_AUTHOR("Gene Chen <gene_chen@richtek.com>");
+MODULE_DESCRIPTION("MT6360 LED Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index f4bb02f6e042..6a8ea94834fa 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -375,10 +375,8 @@ int led_classdev_register_ext(struct device *parent,
mutex_unlock(&led_cdev->led_access);
return PTR_ERR(led_cdev->dev);
}
- if (init_data && init_data->fwnode) {
- led_cdev->dev->fwnode = init_data->fwnode;
- led_cdev->dev->of_node = to_of_node(init_data->fwnode);
- }
+ if (init_data && init_data->fwnode)
+ device_set_node(led_cdev->dev, init_data->fwnode);
if (ret)
dev_warn(parent, "Led %s renamed to %s due to name collision",
diff --git a/drivers/leds/leds-fsg.c b/drivers/leds/leds-fsg.c
deleted file mode 100644
index bc6b420637d6..000000000000
--- a/drivers/leds/leds-fsg.c
+++ /dev/null
@@ -1,193 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * LED Driver for the Freecom FSG-3
- *
- * Copyright (c) 2008 Rod Whitby <rod@whitby.id.au>
- *
- * Author: Rod Whitby <rod@whitby.id.au>
- *
- * Based on leds-spitz.c
- * Copyright 2005-2006 Openedhand Ltd.
- * Author: Richard Purdie <rpurdie@openedhand.com>
- */
-
-#include <linux/kernel.h>
-#include <linux/platform_device.h>
-#include <linux/leds.h>
-#include <linux/module.h>
-#include <linux/io.h>
-#include <mach/hardware.h>
-
-#define FSG_LED_WLAN_BIT 0
-#define FSG_LED_WAN_BIT 1
-#define FSG_LED_SATA_BIT 2
-#define FSG_LED_USB_BIT 4
-#define FSG_LED_RING_BIT 5
-#define FSG_LED_SYNC_BIT 7
-
-static short __iomem *latch_address;
-static unsigned short latch_value;
-
-
-static void fsg_led_wlan_set(struct led_classdev *led_cdev,
- enum led_brightness value)
-{
- if (value) {
- latch_value &= ~(1 << FSG_LED_WLAN_BIT);
- *latch_address = latch_value;
- } else {
- latch_value |= (1 << FSG_LED_WLAN_BIT);
- *latch_address = latch_value;
- }
-}
-
-static void fsg_led_wan_set(struct led_classdev *led_cdev,
- enum led_brightness value)
-{
- if (value) {
- latch_value &= ~(1 << FSG_LED_WAN_BIT);
- *latch_address = latch_value;
- } else {
- latch_value |= (1 << FSG_LED_WAN_BIT);
- *latch_address = latch_value;
- }
-}
-
-static void fsg_led_sata_set(struct led_classdev *led_cdev,
- enum led_brightness value)
-{
- if (value) {
- latch_value &= ~(1 << FSG_LED_SATA_BIT);
- *latch_address = latch_value;
- } else {
- latch_value |= (1 << FSG_LED_SATA_BIT);
- *latch_address = latch_value;
- }
-}
-
-static void fsg_led_usb_set(struct led_classdev *led_cdev,
- enum led_brightness value)
-{
- if (value) {
- latch_value &= ~(1 << FSG_LED_USB_BIT);
- *latch_address = latch_value;
- } else {
- latch_value |= (1 << FSG_LED_USB_BIT);
- *latch_address = latch_value;
- }
-}
-
-static void fsg_led_sync_set(struct led_classdev *led_cdev,
- enum led_brightness value)
-{
- if (value) {
- latch_value &= ~(1 << FSG_LED_SYNC_BIT);
- *latch_address = latch_value;
- } else {
- latch_value |= (1 << FSG_LED_SYNC_BIT);
- *latch_address = latch_value;
- }
-}
-
-static void fsg_led_ring_set(struct led_classdev *led_cdev,
- enum led_brightness value)
-{
- if (value) {
- latch_value &= ~(1 << FSG_LED_RING_BIT);
- *latch_address = latch_value;
- } else {
- latch_value |= (1 << FSG_LED_RING_BIT);
- *latch_address = latch_value;
- }
-}
-
-
-static struct led_classdev fsg_wlan_led = {
- .name = "fsg:blue:wlan",
- .brightness_set = fsg_led_wlan_set,
- .flags = LED_CORE_SUSPENDRESUME,
-};
-
-static struct led_classdev fsg_wan_led = {
- .name = "fsg:blue:wan",
- .brightness_set = fsg_led_wan_set,
- .flags = LED_CORE_SUSPENDRESUME,
-};
-
-static struct led_classdev fsg_sata_led = {
- .name = "fsg:blue:sata",
- .brightness_set = fsg_led_sata_set,
- .flags = LED_CORE_SUSPENDRESUME,
-};
-
-static struct led_classdev fsg_usb_led = {
- .name = "fsg:blue:usb",
- .brightness_set = fsg_led_usb_set,
- .flags = LED_CORE_SUSPENDRESUME,
-};
-
-static struct led_classdev fsg_sync_led = {
- .name = "fsg:blue:sync",
- .brightness_set = fsg_led_sync_set,
- .flags = LED_CORE_SUSPENDRESUME,
-};
-
-static struct led_classdev fsg_ring_led = {
- .name = "fsg:blue:ring",
- .brightness_set = fsg_led_ring_set,
- .flags = LED_CORE_SUSPENDRESUME,
-};
-
-
-static int fsg_led_probe(struct platform_device *pdev)
-{
- int ret;
-
- /* Map the LED chip select address space */
- latch_address = (unsigned short *) devm_ioremap(&pdev->dev,
- IXP4XX_EXP_BUS_BASE(2), 512);
- if (!latch_address)
- return -ENOMEM;
-
- latch_value = 0xffff;
- *latch_address = latch_value;
-
- ret = devm_led_classdev_register(&pdev->dev, &fsg_wlan_led);
- if (ret < 0)
- return ret;
-
- ret = devm_led_classdev_register(&pdev->dev, &fsg_wan_led);
- if (ret < 0)
- return ret;
-
- ret = devm_led_classdev_register(&pdev->dev, &fsg_sata_led);
- if (ret < 0)
- return ret;
-
- ret = devm_led_classdev_register(&pdev->dev, &fsg_usb_led);
- if (ret < 0)
- return ret;
-
- ret = devm_led_classdev_register(&pdev->dev, &fsg_sync_led);
- if (ret < 0)
- return ret;
-
- ret = devm_led_classdev_register(&pdev->dev, &fsg_ring_led);
- if (ret < 0)
- return ret;
-
- return ret;
-}
-
-static struct platform_driver fsg_led_driver = {
- .probe = fsg_led_probe,
- .driver = {
- .name = "fsg-led",
- },
-};
-
-module_platform_driver(fsg_led_driver);
-
-MODULE_AUTHOR("Rod Whitby <rod@whitby.id.au>");
-MODULE_DESCRIPTION("Freecom FSG-3 LED driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/leds/leds-lp50xx.c b/drivers/leds/leds-lp50xx.c
index 401df1e2e05d..50b195ff96ca 100644
--- a/drivers/leds/leds-lp50xx.c
+++ b/drivers/leds/leds-lp50xx.c
@@ -266,7 +266,6 @@ struct lp50xx_led {
struct led_classdev_mc mc_cdev;
struct lp50xx *priv;
unsigned long bank_modules;
- int led_intensity[LP50XX_LEDS_PER_MODULE];
u8 ctrl_bank_enabled;
int led_number;
};
diff --git a/drivers/leds/leds-lp55xx-common.c b/drivers/leds/leds-lp55xx-common.c
index d1657c46ee2f..9fdfc1b9a1a0 100644
--- a/drivers/leds/leds-lp55xx-common.c
+++ b/drivers/leds/leds-lp55xx-common.c
@@ -439,6 +439,8 @@ int lp55xx_init_device(struct lp55xx_chip *chip)
return -EINVAL;
if (pdata->enable_gpiod) {
+ gpiod_direction_output(pdata->enable_gpiod, 0);
+
gpiod_set_consumer_name(pdata->enable_gpiod, "LP55xx enable");
gpiod_set_value(pdata->enable_gpiod, 0);
usleep_range(1000, 2000); /* Keep enable down at least 1ms */
@@ -694,7 +696,7 @@ struct lp55xx_platform_data *lp55xx_of_populate_pdata(struct device *dev,
of_property_read_u8(np, "clock-mode", &pdata->clock_mode);
pdata->enable_gpiod = devm_gpiod_get_optional(dev, "enable",
- GPIOD_OUT_LOW);
+ GPIOD_ASIS);
if (IS_ERR(pdata->enable_gpiod))
return ERR_CAST(pdata->enable_gpiod);
diff --git a/drivers/leds/leds-tca6507.c b/drivers/leds/leds-tca6507.c
index 225b765830bd..1473ced8664c 100644
--- a/drivers/leds/leds-tca6507.c
+++ b/drivers/leds/leds-tca6507.c
@@ -242,9 +242,7 @@ static int choose_times(int msec, int *c1p, int *c2p)
if (diff < 65536) {
int actual;
if (msec & 1) {
- c1 = *c2p;
- *c2p = *c1p;
- *c1p = c1;
+ swap(*c2p, *c1p);
}
actual = time_codes[*c1p] + time_codes[*c2p];
if (*c1p < *c2p)
@@ -643,9 +641,6 @@ static int tca6507_probe_gpios(struct device *dev,
tca->gpio.direction_output = tca6507_gpio_direction_output;
tca->gpio.set = tca6507_gpio_set_value;
tca->gpio.parent = dev;
-#ifdef CONFIG_OF_GPIO
- tca->gpio.of_node = of_node_get(dev_of_node(dev));
-#endif
err = gpiochip_add_data(&tca->gpio, tca);
if (err) {
tca->gpio.ngpio = 0;
diff --git a/drivers/macintosh/mac_hid.c b/drivers/macintosh/mac_hid.c
index 28b8581b44dd..d8c4d5664145 100644
--- a/drivers/macintosh/mac_hid.c
+++ b/drivers/macintosh/mac_hid.c
@@ -239,33 +239,11 @@ static struct ctl_table mac_hid_files[] = {
{ }
};
-/* dir in /proc/sys/dev */
-static struct ctl_table mac_hid_dir[] = {
- {
- .procname = "mac_hid",
- .maxlen = 0,
- .mode = 0555,
- .child = mac_hid_files,
- },
- { }
-};
-
-/* /proc/sys/dev itself, in case that is not there yet */
-static struct ctl_table mac_hid_root_dir[] = {
- {
- .procname = "dev",
- .maxlen = 0,
- .mode = 0555,
- .child = mac_hid_dir,
- },
- { }
-};
-
static struct ctl_table_header *mac_hid_sysctl_header;
static int __init mac_hid_init(void)
{
- mac_hid_sysctl_header = register_sysctl_table(mac_hid_root_dir);
+ mac_hid_sysctl_header = register_sysctl("dev/mac_hid", mac_hid_files);
if (!mac_hid_sysctl_header)
return -ENOMEM;
diff --git a/drivers/macintosh/mediabay.c b/drivers/macintosh/mediabay.c
index eab7e83c11c4..b17660c022eb 100644
--- a/drivers/macintosh/mediabay.c
+++ b/drivers/macintosh/mediabay.c
@@ -703,7 +703,7 @@ static const struct mb_ops keylargo_mb_ops = {
* Therefore we do it all by polling the media bay once each tick.
*/
-static struct of_device_id media_bay_match[] =
+static const struct of_device_id media_bay_match[] =
{
{
.name = "media-bay",
diff --git a/drivers/mailbox/apple-mailbox.c b/drivers/mailbox/apple-mailbox.c
index 72942002a54a..496c4951ccb1 100644
--- a/drivers/mailbox/apple-mailbox.c
+++ b/drivers/mailbox/apple-mailbox.c
@@ -364,8 +364,8 @@ static const struct apple_mbox_hw apple_mbox_m3_hw = {
};
static const struct of_device_id apple_mbox_of_match[] = {
- { .compatible = "apple,t8103-asc-mailbox", .data = &apple_mbox_asc_hw },
- { .compatible = "apple,t8103-m3-mailbox", .data = &apple_mbox_m3_hw },
+ { .compatible = "apple,asc-mailbox-v4", .data = &apple_mbox_asc_hw },
+ { .compatible = "apple,m3-mailbox-v2", .data = &apple_mbox_m3_hw },
{}
};
MODULE_DEVICE_TABLE(of, apple_mbox_of_match);
diff --git a/drivers/mailbox/bcm-flexrm-mailbox.c b/drivers/mailbox/bcm-flexrm-mailbox.c
index 78073ad1f2f1..22acb51531cb 100644
--- a/drivers/mailbox/bcm-flexrm-mailbox.c
+++ b/drivers/mailbox/bcm-flexrm-mailbox.c
@@ -1298,7 +1298,7 @@ static int flexrm_startup(struct mbox_chan *chan)
val = (num_online_cpus() < val) ? val / num_online_cpus() : 1;
cpumask_set_cpu((ring->num / val) % num_online_cpus(),
&ring->irq_aff_hint);
- ret = irq_set_affinity_hint(ring->irq, &ring->irq_aff_hint);
+ ret = irq_update_affinity_hint(ring->irq, &ring->irq_aff_hint);
if (ret) {
dev_err(ring->mbox->dev,
"failed to set IRQ affinity hint for ring%d\n",
@@ -1425,7 +1425,7 @@ static void flexrm_shutdown(struct mbox_chan *chan)
/* Release IRQ */
if (ring->irq_requested) {
- irq_set_affinity_hint(ring->irq, NULL);
+ irq_update_affinity_hint(ring->irq, NULL);
free_irq(ring->irq, ring);
ring->irq_requested = false;
}
@@ -1484,7 +1484,7 @@ static void flexrm_mbox_msi_write(struct msi_desc *desc, struct msi_msg *msg)
{
struct device *dev = msi_desc_to_dev(desc);
struct flexrm_mbox *mbox = dev_get_drvdata(dev);
- struct flexrm_ring *ring = &mbox->rings[desc->platform.msi_index];
+ struct flexrm_ring *ring = &mbox->rings[desc->msi_index];
/* Configure per-Ring MSI registers */
writel_relaxed(msg->address_lo, ring->regs + RING_MSI_ADDR_LS);
@@ -1497,7 +1497,6 @@ static int flexrm_mbox_probe(struct platform_device *pdev)
int index, ret = 0;
void __iomem *regs;
void __iomem *regs_end;
- struct msi_desc *desc;
struct resource *iomem;
struct flexrm_ring *ring;
struct flexrm_mbox *mbox;
@@ -1608,10 +1607,8 @@ static int flexrm_mbox_probe(struct platform_device *pdev)
goto fail_destroy_cmpl_pool;
/* Save alloced IRQ numbers for each ring */
- for_each_msi_entry(desc, dev) {
- ring = &mbox->rings[desc->platform.msi_index];
- ring->irq = desc->irq;
- }
+ for (index = 0; index < mbox->num_rings; index++)
+ mbox->rings[index].irq = msi_get_virq(dev, index);
/* Check availability of debugfs */
if (!debugfs_initialized())
diff --git a/drivers/mailbox/hi3660-mailbox.c b/drivers/mailbox/hi3660-mailbox.c
index e41bd2f5ea46..ab24e731a782 100644
--- a/drivers/mailbox/hi3660-mailbox.c
+++ b/drivers/mailbox/hi3660-mailbox.c
@@ -44,14 +44,13 @@
#define MBOX_MSG_LEN 8
/**
- * Hi3660 mailbox channel information
+ * struct hi3660_chan_info - Hi3660 mailbox channel information
+ * @dst_irq: Interrupt vector for remote processor
+ * @ack_irq: Interrupt vector for local processor
*
* A channel can be used for TX or RX, it can trigger remote
* processor interrupt to notify remote processor and can receive
- * interrupt if has incoming message.
- *
- * @dst_irq: Interrupt vector for remote processor
- * @ack_irq: Interrupt vector for local processor
+ * interrupt if it has an incoming message.
*/
struct hi3660_chan_info {
unsigned int dst_irq;
@@ -59,16 +58,15 @@ struct hi3660_chan_info {
};
/**
- * Hi3660 mailbox controller data
- *
- * Mailbox controller includes 32 channels and can allocate
- * channel for message transferring.
- *
+ * struct hi3660_mbox - Hi3660 mailbox controller data
* @dev: Device to which it is attached
* @base: Base address of the register mapping region
* @chan: Representation of channels in mailbox controller
* @mchan: Representation of channel info
* @controller: Representation of a communication channel controller
+ *
+ * Mailbox controller includes 32 channels and can allocate
+ * channel for message transferring.
*/
struct hi3660_mbox {
struct device *dev;
diff --git a/drivers/mailbox/imx-mailbox.c b/drivers/mailbox/imx-mailbox.c
index ffe36a6bef9e..544de2db6453 100644
--- a/drivers/mailbox/imx-mailbox.c
+++ b/drivers/mailbox/imx-mailbox.c
@@ -563,8 +563,8 @@ static int imx_mu_probe(struct platform_device *pdev)
size = sizeof(struct imx_sc_rpc_msg_max);
priv->msg = devm_kzalloc(dev, size, GFP_KERNEL);
- if (IS_ERR(priv->msg))
- return PTR_ERR(priv->msg);
+ if (!priv->msg)
+ return -ENOMEM;
priv->clk = devm_clk_get(dev, NULL);
if (IS_ERR(priv->clk)) {
diff --git a/drivers/mailbox/mailbox-mpfs.c b/drivers/mailbox/mailbox-mpfs.c
index 0d6e2231a2c7..4e34854d1238 100644
--- a/drivers/mailbox/mailbox-mpfs.c
+++ b/drivers/mailbox/mailbox-mpfs.c
@@ -232,7 +232,7 @@ static int mpfs_mbox_probe(struct platform_device *pdev)
}
static const struct of_device_id mpfs_mbox_of_match[] = {
- {.compatible = "microchip,polarfire-soc-mailbox", },
+ {.compatible = "microchip,mpfs-mailbox", },
{},
};
MODULE_DEVICE_TABLE(of, mpfs_mbox_of_match);
diff --git a/drivers/mailbox/mtk-cmdq-mailbox.c b/drivers/mailbox/mtk-cmdq-mailbox.c
index a8845b162dbf..2578e5aaa935 100644
--- a/drivers/mailbox/mtk-cmdq-mailbox.c
+++ b/drivers/mailbox/mtk-cmdq-mailbox.c
@@ -573,8 +573,11 @@ static int cmdq_probe(struct platform_device *pdev)
cmdq->clocks[alias_id].id = clk_names[alias_id];
cmdq->clocks[alias_id].clk = of_clk_get(node, 0);
if (IS_ERR(cmdq->clocks[alias_id].clk)) {
- dev_err(dev, "failed to get gce clk: %d\n", alias_id);
- return PTR_ERR(cmdq->clocks[alias_id].clk);
+ of_node_put(node);
+ return dev_err_probe(dev,
+ PTR_ERR(cmdq->clocks[alias_id].clk),
+ "failed to get gce clk: %d\n",
+ alias_id);
}
}
}
@@ -582,8 +585,8 @@ static int cmdq_probe(struct platform_device *pdev)
cmdq->clocks[alias_id].id = clk_name;
cmdq->clocks[alias_id].clk = devm_clk_get(&pdev->dev, clk_name);
if (IS_ERR(cmdq->clocks[alias_id].clk)) {
- dev_err(dev, "failed to get gce clk\n");
- return PTR_ERR(cmdq->clocks[alias_id].clk);
+ return dev_err_probe(dev, PTR_ERR(cmdq->clocks[alias_id].clk),
+ "failed to get gce clk\n");
}
}
@@ -658,13 +661,13 @@ static const struct gce_plat gce_plat_v5 = {
.thread_nr = 24,
.shift = 3,
.control_by_sw = true,
- .gce_num = 2
+ .gce_num = 1
};
static const struct gce_plat gce_plat_v6 = {
.thread_nr = 24,
.shift = 3,
- .control_by_sw = false,
+ .control_by_sw = true,
.gce_num = 2
};
diff --git a/drivers/mailbox/pcc.c b/drivers/mailbox/pcc.c
index 887a3704c12e..ed18936b8ce6 100644
--- a/drivers/mailbox/pcc.c
+++ b/drivers/mailbox/pcc.c
@@ -241,9 +241,11 @@ static irqreturn_t pcc_mbox_irq(int irq, void *p)
if (ret)
return IRQ_NONE;
- val &= pchan->cmd_complete.status_mask;
- if (!val)
- return IRQ_NONE;
+ if (val) { /* Ensure GAS exists and value is non-zero */
+ val &= pchan->cmd_complete.status_mask;
+ if (!val)
+ return IRQ_NONE;
+ }
ret = pcc_chan_reg_read(&pchan->error, &val);
if (ret)
@@ -289,7 +291,7 @@ pcc_mbox_request_channel(struct mbox_client *cl, int subspace_id)
pchan = chan_info + subspace_id;
chan = pchan->chan.mchan;
if (IS_ERR(chan) || chan->cl) {
- dev_err(dev, "Channel not found for idx: %d\n", subspace_id);
+ pr_err("Channel not found for idx: %d\n", subspace_id);
return ERR_PTR(-EBUSY);
}
dev = chan->mbox->dev;
diff --git a/drivers/mailbox/qcom-ipcc.c b/drivers/mailbox/qcom-ipcc.c
index f1d4f4679b17..c5d963222014 100644
--- a/drivers/mailbox/qcom-ipcc.c
+++ b/drivers/mailbox/qcom-ipcc.c
@@ -13,8 +13,6 @@
#include <dt-bindings/mailbox/qcom-ipcc.h>
-#define IPCC_MBOX_MAX_CHAN 48
-
/* IPCC Register offsets */
#define IPCC_REG_SEND_ID 0x0c
#define IPCC_REG_RECV_ID 0x10
@@ -52,9 +50,10 @@ struct qcom_ipcc {
struct device *dev;
void __iomem *base;
struct irq_domain *irq_domain;
- struct mbox_chan chan[IPCC_MBOX_MAX_CHAN];
- struct qcom_ipcc_chan_info mchan[IPCC_MBOX_MAX_CHAN];
+ struct mbox_chan *chans;
+ struct qcom_ipcc_chan_info *mchan;
struct mbox_controller mbox;
+ int num_chans;
int irq;
};
@@ -166,25 +165,37 @@ static struct mbox_chan *qcom_ipcc_mbox_xlate(struct mbox_controller *mbox,
struct qcom_ipcc *ipcc = to_qcom_ipcc(mbox);
struct qcom_ipcc_chan_info *mchan;
struct mbox_chan *chan;
- unsigned int i;
+ struct device *dev;
+ int chan_id;
+
+ dev = ipcc->dev;
if (ph->args_count != 2)
return ERR_PTR(-EINVAL);
- for (i = 0; i < IPCC_MBOX_MAX_CHAN; i++) {
- chan = &ipcc->chan[i];
- if (!chan->con_priv) {
- mchan = &ipcc->mchan[i];
- mchan->client_id = ph->args[0];
- mchan->signal_id = ph->args[1];
- chan->con_priv = mchan;
- break;
- }
+ for (chan_id = 0; chan_id < mbox->num_chans; chan_id++) {
+ chan = &ipcc->chans[chan_id];
+ mchan = chan->con_priv;
- chan = NULL;
+ if (!mchan)
+ break;
+ else if (mchan->client_id == ph->args[0] &&
+ mchan->signal_id == ph->args[1])
+ return ERR_PTR(-EBUSY);
}
- return chan ?: ERR_PTR(-EBUSY);
+ if (chan_id >= mbox->num_chans)
+ return ERR_PTR(-EBUSY);
+
+ mchan = devm_kzalloc(dev, sizeof(*mchan), GFP_KERNEL);
+ if (!mchan)
+ return ERR_PTR(-ENOMEM);
+
+ mchan->client_id = ph->args[0];
+ mchan->signal_id = ph->args[1];
+ chan->con_priv = mchan;
+
+ return chan;
}
static const struct mbox_chan_ops ipcc_mbox_chan_ops = {
@@ -192,15 +203,49 @@ static const struct mbox_chan_ops ipcc_mbox_chan_ops = {
.shutdown = qcom_ipcc_mbox_shutdown,
};
-static int qcom_ipcc_setup_mbox(struct qcom_ipcc *ipcc)
+static int qcom_ipcc_setup_mbox(struct qcom_ipcc *ipcc,
+ struct device_node *controller_dn)
{
+ struct of_phandle_args curr_ph;
+ struct device_node *client_dn;
struct mbox_controller *mbox;
struct device *dev = ipcc->dev;
+ int i, j, ret;
+
+ /*
+ * Find out the number of clients interested in this mailbox
+ * and create channels accordingly.
+ */
+ ipcc->num_chans = 0;
+ for_each_node_with_property(client_dn, "mboxes") {
+ if (!of_device_is_available(client_dn))
+ continue;
+ i = of_count_phandle_with_args(client_dn,
+ "mboxes", "#mbox-cells");
+ for (j = 0; j < i; j++) {
+ ret = of_parse_phandle_with_args(client_dn, "mboxes",
+ "#mbox-cells", j, &curr_ph);
+ of_node_put(curr_ph.np);
+ if (!ret && curr_ph.np == controller_dn) {
+ ipcc->num_chans++;
+ break;
+ }
+ }
+ }
+
+ /* If no clients are found, skip registering as a mbox controller */
+ if (!ipcc->num_chans)
+ return 0;
+
+ ipcc->chans = devm_kcalloc(dev, ipcc->num_chans,
+ sizeof(struct mbox_chan), GFP_KERNEL);
+ if (!ipcc->chans)
+ return -ENOMEM;
mbox = &ipcc->mbox;
mbox->dev = dev;
- mbox->num_chans = IPCC_MBOX_MAX_CHAN;
- mbox->chans = ipcc->chan;
+ mbox->num_chans = ipcc->num_chans;
+ mbox->chans = ipcc->chans;
mbox->ops = &ipcc_mbox_chan_ops;
mbox->of_xlate = qcom_ipcc_mbox_xlate;
mbox->txdone_irq = false;
@@ -212,6 +257,8 @@ static int qcom_ipcc_setup_mbox(struct qcom_ipcc *ipcc)
static int qcom_ipcc_probe(struct platform_device *pdev)
{
struct qcom_ipcc *ipcc;
+ static int id;
+ char *name;
int ret;
ipcc = devm_kzalloc(&pdev->dev, sizeof(*ipcc), GFP_KERNEL);
@@ -228,27 +275,33 @@ static int qcom_ipcc_probe(struct platform_device *pdev)
if (ipcc->irq < 0)
return ipcc->irq;
+ name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "ipcc_%d", id++);
+ if (!name)
+ return -ENOMEM;
+
ipcc->irq_domain = irq_domain_add_tree(pdev->dev.of_node,
&qcom_ipcc_irq_ops, ipcc);
if (!ipcc->irq_domain)
return -ENOMEM;
- ret = qcom_ipcc_setup_mbox(ipcc);
+ ret = qcom_ipcc_setup_mbox(ipcc, pdev->dev.of_node);
if (ret)
goto err_mbox;
ret = devm_request_irq(&pdev->dev, ipcc->irq, qcom_ipcc_irq_fn,
- IRQF_TRIGGER_HIGH, "ipcc", ipcc);
+ IRQF_TRIGGER_HIGH | IRQF_NO_SUSPEND, name, ipcc);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to register the irq: %d\n", ret);
- goto err_mbox;
+ goto err_req_irq;
}
- enable_irq_wake(ipcc->irq);
platform_set_drvdata(pdev, ipcc);
return 0;
+err_req_irq:
+ if (ipcc->num_chans)
+ mbox_controller_unregister(&ipcc->mbox);
err_mbox:
irq_domain_remove(ipcc->irq_domain);
diff --git a/drivers/mailbox/zynqmp-ipi-mailbox.c b/drivers/mailbox/zynqmp-ipi-mailbox.c
index f44079d62b1a..31a0fa914274 100644
--- a/drivers/mailbox/zynqmp-ipi-mailbox.c
+++ b/drivers/mailbox/zynqmp-ipi-mailbox.c
@@ -655,6 +655,7 @@ static int zynqmp_ipi_probe(struct platform_device *pdev)
mbox->pdata = pdata;
ret = zynqmp_ipi_mbox_probe(mbox, nc);
if (ret) {
+ of_node_put(nc);
dev_err(dev, "failed to probe subdev.\n");
ret = -EINVAL;
goto free_mbox_dev;
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index 7af242de3202..eb4b5e52bd6f 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -121,8 +121,10 @@ struct journal_entry {
#define JOURNAL_MAC_SIZE (JOURNAL_MAC_PER_SECTOR * JOURNAL_BLOCK_SECTORS)
struct journal_sector {
- __u8 entries[JOURNAL_SECTOR_DATA - JOURNAL_MAC_PER_SECTOR];
- __u8 mac[JOURNAL_MAC_PER_SECTOR];
+ struct_group(sectors,
+ __u8 entries[JOURNAL_SECTOR_DATA - JOURNAL_MAC_PER_SECTOR];
+ __u8 mac[JOURNAL_MAC_PER_SECTOR];
+ );
commit_id_t commit_id;
};
@@ -2870,7 +2872,8 @@ static void init_journal(struct dm_integrity_c *ic, unsigned start_section,
wraparound_section(ic, &i);
for (j = 0; j < ic->journal_section_sectors; j++) {
struct journal_sector *js = access_journal(ic, i, j);
- memset(&js->entries, 0, JOURNAL_SECTOR_DATA);
+ BUILD_BUG_ON(sizeof(js->sectors) != JOURNAL_SECTOR_DATA);
+ memset(&js->sectors, 0, sizeof(js->sectors));
js->commit_id = dm_integrity_commit_id(ic, i, j, commit_seq);
}
for (j = 0; j < ic->journal_section_entries; j++) {
diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
index 66ba16713f69..1b97a11d7151 100644
--- a/drivers/md/dm-linear.c
+++ b/drivers/md/dm-linear.c
@@ -162,71 +162,34 @@ static int linear_iterate_devices(struct dm_target *ti,
return fn(ti, lc->dev, lc->start, ti->len, data);
}
-#if IS_ENABLED(CONFIG_DAX_DRIVER)
-static long linear_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,
- long nr_pages, void **kaddr, pfn_t *pfn)
+#if IS_ENABLED(CONFIG_FS_DAX)
+static struct dax_device *linear_dax_pgoff(struct dm_target *ti, pgoff_t *pgoff)
{
- long ret;
struct linear_c *lc = ti->private;
- struct block_device *bdev = lc->dev->bdev;
- struct dax_device *dax_dev = lc->dev->dax_dev;
- sector_t dev_sector, sector = pgoff * PAGE_SECTORS;
-
- dev_sector = linear_map_sector(ti, sector);
- ret = bdev_dax_pgoff(bdev, dev_sector, nr_pages * PAGE_SIZE, &pgoff);
- if (ret)
- return ret;
- return dax_direct_access(dax_dev, pgoff, nr_pages, kaddr, pfn);
-}
+ sector_t sector = linear_map_sector(ti, *pgoff << PAGE_SECTORS_SHIFT);
-static size_t linear_dax_copy_from_iter(struct dm_target *ti, pgoff_t pgoff,
- void *addr, size_t bytes, struct iov_iter *i)
-{
- struct linear_c *lc = ti->private;
- struct block_device *bdev = lc->dev->bdev;
- struct dax_device *dax_dev = lc->dev->dax_dev;
- sector_t dev_sector, sector = pgoff * PAGE_SECTORS;
-
- dev_sector = linear_map_sector(ti, sector);
- if (bdev_dax_pgoff(bdev, dev_sector, ALIGN(bytes, PAGE_SIZE), &pgoff))
- return 0;
- return dax_copy_from_iter(dax_dev, pgoff, addr, bytes, i);
+ *pgoff = (get_start_sect(lc->dev->bdev) + sector) >> PAGE_SECTORS_SHIFT;
+ return lc->dev->dax_dev;
}
-static size_t linear_dax_copy_to_iter(struct dm_target *ti, pgoff_t pgoff,
- void *addr, size_t bytes, struct iov_iter *i)
+static long linear_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,
+ long nr_pages, void **kaddr, pfn_t *pfn)
{
- struct linear_c *lc = ti->private;
- struct block_device *bdev = lc->dev->bdev;
- struct dax_device *dax_dev = lc->dev->dax_dev;
- sector_t dev_sector, sector = pgoff * PAGE_SECTORS;
-
- dev_sector = linear_map_sector(ti, sector);
- if (bdev_dax_pgoff(bdev, dev_sector, ALIGN(bytes, PAGE_SIZE), &pgoff))
- return 0;
- return dax_copy_to_iter(dax_dev, pgoff, addr, bytes, i);
+ struct dax_device *dax_dev = linear_dax_pgoff(ti, &pgoff);
+
+ return dax_direct_access(dax_dev, pgoff, nr_pages, kaddr, pfn);
}
static int linear_dax_zero_page_range(struct dm_target *ti, pgoff_t pgoff,
size_t nr_pages)
{
- int ret;
- struct linear_c *lc = ti->private;
- struct block_device *bdev = lc->dev->bdev;
- struct dax_device *dax_dev = lc->dev->dax_dev;
- sector_t dev_sector, sector = pgoff * PAGE_SECTORS;
-
- dev_sector = linear_map_sector(ti, sector);
- ret = bdev_dax_pgoff(bdev, dev_sector, nr_pages << PAGE_SHIFT, &pgoff);
- if (ret)
- return ret;
+ struct dax_device *dax_dev = linear_dax_pgoff(ti, &pgoff);
+
return dax_zero_page_range(dax_dev, pgoff, nr_pages);
}
#else
#define linear_dax_direct_access NULL
-#define linear_dax_copy_from_iter NULL
-#define linear_dax_copy_to_iter NULL
#define linear_dax_zero_page_range NULL
#endif
@@ -244,8 +207,6 @@ static struct target_type linear_target = {
.prepare_ioctl = linear_prepare_ioctl,
.iterate_devices = linear_iterate_devices,
.direct_access = linear_dax_direct_access,
- .dax_copy_from_iter = linear_dax_copy_from_iter,
- .dax_copy_to_iter = linear_dax_copy_to_iter,
.dax_zero_page_range = linear_dax_zero_page_range,
};
diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c
index 0b3ef977ceeb..139b09b06eda 100644
--- a/drivers/md/dm-log-writes.c
+++ b/drivers/md/dm-log-writes.c
@@ -901,120 +901,34 @@ static void log_writes_io_hints(struct dm_target *ti, struct queue_limits *limit
limits->io_min = limits->physical_block_size;
}
-#if IS_ENABLED(CONFIG_DAX_DRIVER)
-static int log_dax(struct log_writes_c *lc, sector_t sector, size_t bytes,
- struct iov_iter *i)
+#if IS_ENABLED(CONFIG_FS_DAX)
+static struct dax_device *log_writes_dax_pgoff(struct dm_target *ti,
+ pgoff_t *pgoff)
{
- struct pending_block *block;
-
- if (!bytes)
- return 0;
-
- block = kzalloc(sizeof(struct pending_block), GFP_KERNEL);
- if (!block) {
- DMERR("Error allocating dax pending block");
- return -ENOMEM;
- }
-
- block->data = kzalloc(bytes, GFP_KERNEL);
- if (!block->data) {
- DMERR("Error allocating dax data space");
- kfree(block);
- return -ENOMEM;
- }
-
- /* write data provided via the iterator */
- if (!copy_from_iter(block->data, bytes, i)) {
- DMERR("Error copying dax data");
- kfree(block->data);
- kfree(block);
- return -EIO;
- }
-
- /* rewind the iterator so that the block driver can use it */
- iov_iter_revert(i, bytes);
-
- block->datalen = bytes;
- block->sector = bio_to_dev_sectors(lc, sector);
- block->nr_sectors = ALIGN(bytes, lc->sectorsize) >> lc->sectorshift;
-
- atomic_inc(&lc->pending_blocks);
- spin_lock_irq(&lc->blocks_lock);
- list_add_tail(&block->list, &lc->unflushed_blocks);
- spin_unlock_irq(&lc->blocks_lock);
- wake_up_process(lc->log_kthread);
+ struct log_writes_c *lc = ti->private;
- return 0;
+ *pgoff += (get_start_sect(lc->dev->bdev) >> PAGE_SECTORS_SHIFT);
+ return lc->dev->dax_dev;
}
static long log_writes_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,
long nr_pages, void **kaddr, pfn_t *pfn)
{
- struct log_writes_c *lc = ti->private;
- sector_t sector = pgoff * PAGE_SECTORS;
- int ret;
-
- ret = bdev_dax_pgoff(lc->dev->bdev, sector, nr_pages * PAGE_SIZE, &pgoff);
- if (ret)
- return ret;
- return dax_direct_access(lc->dev->dax_dev, pgoff, nr_pages, kaddr, pfn);
-}
-
-static size_t log_writes_dax_copy_from_iter(struct dm_target *ti,
- pgoff_t pgoff, void *addr, size_t bytes,
- struct iov_iter *i)
-{
- struct log_writes_c *lc = ti->private;
- sector_t sector = pgoff * PAGE_SECTORS;
- int err;
-
- if (bdev_dax_pgoff(lc->dev->bdev, sector, ALIGN(bytes, PAGE_SIZE), &pgoff))
- return 0;
-
- /* Don't bother doing anything if logging has been disabled */
- if (!lc->logging_enabled)
- goto dax_copy;
-
- err = log_dax(lc, sector, bytes, i);
- if (err) {
- DMWARN("Error %d logging DAX write", err);
- return 0;
- }
-dax_copy:
- return dax_copy_from_iter(lc->dev->dax_dev, pgoff, addr, bytes, i);
-}
-
-static size_t log_writes_dax_copy_to_iter(struct dm_target *ti,
- pgoff_t pgoff, void *addr, size_t bytes,
- struct iov_iter *i)
-{
- struct log_writes_c *lc = ti->private;
- sector_t sector = pgoff * PAGE_SECTORS;
+ struct dax_device *dax_dev = log_writes_dax_pgoff(ti, &pgoff);
- if (bdev_dax_pgoff(lc->dev->bdev, sector, ALIGN(bytes, PAGE_SIZE), &pgoff))
- return 0;
- return dax_copy_to_iter(lc->dev->dax_dev, pgoff, addr, bytes, i);
+ return dax_direct_access(dax_dev, pgoff, nr_pages, kaddr, pfn);
}
static int log_writes_dax_zero_page_range(struct dm_target *ti, pgoff_t pgoff,
size_t nr_pages)
{
- int ret;
- struct log_writes_c *lc = ti->private;
- sector_t sector = pgoff * PAGE_SECTORS;
-
- ret = bdev_dax_pgoff(lc->dev->bdev, sector, nr_pages << PAGE_SHIFT,
- &pgoff);
- if (ret)
- return ret;
- return dax_zero_page_range(lc->dev->dax_dev, pgoff,
- nr_pages << PAGE_SHIFT);
+ struct dax_device *dax_dev = log_writes_dax_pgoff(ti, &pgoff);
+
+ return dax_zero_page_range(dax_dev, pgoff, nr_pages << PAGE_SHIFT);
}
#else
#define log_writes_dax_direct_access NULL
-#define log_writes_dax_copy_from_iter NULL
-#define log_writes_dax_copy_to_iter NULL
#define log_writes_dax_zero_page_range NULL
#endif
@@ -1032,8 +946,6 @@ static struct target_type log_writes_target = {
.iterate_devices = log_writes_iterate_devices,
.io_hints = log_writes_io_hints,
.direct_access = log_writes_dax_direct_access,
- .dax_copy_from_iter = log_writes_dax_copy_from_iter,
- .dax_copy_to_iter = log_writes_dax_copy_to_iter,
.dax_zero_page_range = log_writes_dax_zero_page_range,
};
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 90dc9cc48881..f4719b65e5e3 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -550,7 +550,6 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
return DM_MAPIO_REQUEUE;
}
clone->bio = clone->biotail = NULL;
- clone->rq_disk = bdev->bd_disk;
clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
*__clone = clone;
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index 6660b6b53d5b..e566115ec0bb 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -300,91 +300,40 @@ static int stripe_map(struct dm_target *ti, struct bio *bio)
return DM_MAPIO_REMAPPED;
}
-#if IS_ENABLED(CONFIG_DAX_DRIVER)
-static long stripe_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,
- long nr_pages, void **kaddr, pfn_t *pfn)
+#if IS_ENABLED(CONFIG_FS_DAX)
+static struct dax_device *stripe_dax_pgoff(struct dm_target *ti, pgoff_t *pgoff)
{
- sector_t dev_sector, sector = pgoff * PAGE_SECTORS;
struct stripe_c *sc = ti->private;
- struct dax_device *dax_dev;
struct block_device *bdev;
+ sector_t dev_sector;
uint32_t stripe;
- long ret;
- stripe_map_sector(sc, sector, &stripe, &dev_sector);
+ stripe_map_sector(sc, *pgoff * PAGE_SECTORS, &stripe, &dev_sector);
dev_sector += sc->stripe[stripe].physical_start;
- dax_dev = sc->stripe[stripe].dev->dax_dev;
bdev = sc->stripe[stripe].dev->bdev;
- ret = bdev_dax_pgoff(bdev, dev_sector, nr_pages * PAGE_SIZE, &pgoff);
- if (ret)
- return ret;
- return dax_direct_access(dax_dev, pgoff, nr_pages, kaddr, pfn);
+ *pgoff = (get_start_sect(bdev) + dev_sector) >> PAGE_SECTORS_SHIFT;
+ return sc->stripe[stripe].dev->dax_dev;
}
-static size_t stripe_dax_copy_from_iter(struct dm_target *ti, pgoff_t pgoff,
- void *addr, size_t bytes, struct iov_iter *i)
+static long stripe_dax_direct_access(struct dm_target *ti, pgoff_t pgoff,
+ long nr_pages, void **kaddr, pfn_t *pfn)
{
- sector_t dev_sector, sector = pgoff * PAGE_SECTORS;
- struct stripe_c *sc = ti->private;
- struct dax_device *dax_dev;
- struct block_device *bdev;
- uint32_t stripe;
-
- stripe_map_sector(sc, sector, &stripe, &dev_sector);
- dev_sector += sc->stripe[stripe].physical_start;
- dax_dev = sc->stripe[stripe].dev->dax_dev;
- bdev = sc->stripe[stripe].dev->bdev;
+ struct dax_device *dax_dev = stripe_dax_pgoff(ti, &pgoff);
- if (bdev_dax_pgoff(bdev, dev_sector, ALIGN(bytes, PAGE_SIZE), &pgoff))
- return 0;
- return dax_copy_from_iter(dax_dev, pgoff, addr, bytes, i);
-}
-
-static size_t stripe_dax_copy_to_iter(struct dm_target *ti, pgoff_t pgoff,
- void *addr, size_t bytes, struct iov_iter *i)
-{
- sector_t dev_sector, sector = pgoff * PAGE_SECTORS;
- struct stripe_c *sc = ti->private;
- struct dax_device *dax_dev;
- struct block_device *bdev;
- uint32_t stripe;
-
- stripe_map_sector(sc, sector, &stripe, &dev_sector);
- dev_sector += sc->stripe[stripe].physical_start;
- dax_dev = sc->stripe[stripe].dev->dax_dev;
- bdev = sc->stripe[stripe].dev->bdev;
-
- if (bdev_dax_pgoff(bdev, dev_sector, ALIGN(bytes, PAGE_SIZE), &pgoff))
- return 0;
- return dax_copy_to_iter(dax_dev, pgoff, addr, bytes, i);
+ return dax_direct_access(dax_dev, pgoff, nr_pages, kaddr, pfn);
}
static int stripe_dax_zero_page_range(struct dm_target *ti, pgoff_t pgoff,
size_t nr_pages)
{
- int ret;
- sector_t dev_sector, sector = pgoff * PAGE_SECTORS;
- struct stripe_c *sc = ti->private;
- struct dax_device *dax_dev;
- struct block_device *bdev;
- uint32_t stripe;
-
- stripe_map_sector(sc, sector, &stripe, &dev_sector);
- dev_sector += sc->stripe[stripe].physical_start;
- dax_dev = sc->stripe[stripe].dev->dax_dev;
- bdev = sc->stripe[stripe].dev->bdev;
+ struct dax_device *dax_dev = stripe_dax_pgoff(ti, &pgoff);
- ret = bdev_dax_pgoff(bdev, dev_sector, nr_pages << PAGE_SHIFT, &pgoff);
- if (ret)
- return ret;
return dax_zero_page_range(dax_dev, pgoff, nr_pages);
}
#else
#define stripe_dax_direct_access NULL
-#define stripe_dax_copy_from_iter NULL
-#define stripe_dax_copy_to_iter NULL
#define stripe_dax_zero_page_range NULL
#endif
@@ -521,8 +470,6 @@ static struct target_type stripe_target = {
.iterate_devices = stripe_iterate_devices,
.io_hints = stripe_io_hints,
.direct_access = stripe_dax_direct_access,
- .dax_copy_from_iter = stripe_dax_copy_from_iter,
- .dax_copy_to_iter = stripe_dax_copy_to_iter,
.dax_zero_page_range = stripe_dax_zero_page_range,
};
diff --git a/drivers/md/dm-sysfs.c b/drivers/md/dm-sysfs.c
index a05fcd50e1b9..e28c92478536 100644
--- a/drivers/md/dm-sysfs.c
+++ b/drivers/md/dm-sysfs.c
@@ -112,6 +112,7 @@ static struct attribute *dm_attrs[] = {
&dm_attr_rq_based_seq_io_merge_deadline.attr,
NULL,
};
+ATTRIBUTE_GROUPS(dm);
static const struct sysfs_ops dm_sysfs_ops = {
.show = dm_attr_show,
@@ -120,7 +121,7 @@ static const struct sysfs_ops dm_sysfs_ops = {
static struct kobj_type dm_ktype = {
.sysfs_ops = &dm_sysfs_ops,
- .default_attrs = dm_attrs,
+ .default_groups = dm_groups,
.release = dm_kobject_release,
};
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index aa173f5bdc3d..e43096cfe9e2 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -806,12 +806,14 @@ void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type)
EXPORT_SYMBOL_GPL(dm_table_set_type);
/* validate the dax capability of the target device span */
-int device_not_dax_capable(struct dm_target *ti, struct dm_dev *dev,
+static int device_not_dax_capable(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
- int blocksize = *(int *) data;
+ if (dev->dax_dev)
+ return false;
- return !dax_supported(dev->dax_dev, dev->bdev, blocksize, start, len);
+ DMDEBUG("%pg: error: dax unsupported by block device", dev->bdev);
+ return true;
}
/* Check devices support synchronous DAX */
@@ -821,8 +823,8 @@ static int device_not_dax_synchronous_capable(struct dm_target *ti, struct dm_de
return !dev->dax_dev || !dax_synchronous(dev->dax_dev);
}
-bool dm_table_supports_dax(struct dm_table *t,
- iterate_devices_callout_fn iterate_fn, int *blocksize)
+static bool dm_table_supports_dax(struct dm_table *t,
+ iterate_devices_callout_fn iterate_fn)
{
struct dm_target *ti;
unsigned i;
@@ -835,7 +837,7 @@ bool dm_table_supports_dax(struct dm_table *t,
return false;
if (!ti->type->iterate_devices ||
- ti->type->iterate_devices(ti, iterate_fn, blocksize))
+ ti->type->iterate_devices(ti, iterate_fn, NULL))
return false;
}
@@ -862,7 +864,6 @@ static int dm_table_determine_type(struct dm_table *t)
struct dm_target *tgt;
struct list_head *devices = dm_table_get_devices(t);
enum dm_queue_mode live_md_type = dm_get_md_type(t->md);
- int page_size = PAGE_SIZE;
if (t->type != DM_TYPE_NONE) {
/* target already set the table's type */
@@ -906,7 +907,7 @@ static int dm_table_determine_type(struct dm_table *t)
verify_bio_based:
/* We must use this table as bio-based */
t->type = DM_TYPE_BIO_BASED;
- if (dm_table_supports_dax(t, device_not_dax_capable, &page_size) ||
+ if (dm_table_supports_dax(t, device_not_dax_capable) ||
(list_empty(devices) && live_md_type == DM_TYPE_DAX_BIO_BASED)) {
t->type = DM_TYPE_DAX_BIO_BASED;
}
@@ -1976,7 +1977,6 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
struct queue_limits *limits)
{
bool wc = false, fua = false;
- int page_size = PAGE_SIZE;
int r;
/*
@@ -2010,9 +2010,9 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
}
blk_queue_write_cache(q, wc, fua);
- if (dm_table_supports_dax(t, device_not_dax_capable, &page_size)) {
+ if (dm_table_supports_dax(t, device_not_dax_capable)) {
blk_queue_flag_set(QUEUE_FLAG_DAX, q);
- if (dm_table_supports_dax(t, device_not_dax_synchronous_capable, NULL))
+ if (dm_table_supports_dax(t, device_not_dax_synchronous_capable))
set_dax_synchronous(t->md->dax_dev);
}
else
diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c
index 4b8991cde223..4f31591d2d25 100644
--- a/drivers/md/dm-writecache.c
+++ b/drivers/md/dm-writecache.c
@@ -38,7 +38,7 @@
#define BITMAP_GRANULARITY PAGE_SIZE
#endif
-#if IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API) && IS_ENABLED(CONFIG_DAX_DRIVER)
+#if IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API) && IS_ENABLED(CONFIG_FS_DAX)
#define DM_WRITECACHE_HAS_PMEM
#endif
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 662742a310cb..997ace47bbd5 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -489,7 +489,7 @@ static void start_io_acct(struct dm_io *io)
struct mapped_device *md = io->md;
struct bio *bio = io->orig_bio;
- io->start_time = bio_start_io_acct(bio);
+ bio_start_io_acct_time(bio, io->start_time);
if (unlikely(dm_stats_used(&md->stats)))
dm_stats_account_io(&md->stats, bio_data_dir(bio),
bio->bi_iter.bi_sector, bio_sectors(bio),
@@ -535,7 +535,7 @@ static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio)
io->md = md;
spin_lock_init(&io->endio_lock);
- start_io_acct(io);
+ io->start_time = jiffies;
return io;
}
@@ -637,7 +637,7 @@ static int open_table_device(struct table_device *td, dev_t dev,
struct mapped_device *md)
{
struct block_device *bdev;
-
+ u64 part_off;
int r;
BUG_ON(td->dm_dev.bdev);
@@ -653,7 +653,7 @@ static int open_table_device(struct table_device *td, dev_t dev,
}
td->dm_dev.bdev = bdev;
- td->dm_dev.dax_dev = fs_dax_get_by_bdev(bdev);
+ td->dm_dev.dax_dev = fs_dax_get_by_bdev(bdev, &part_off);
return 0;
}
@@ -1027,74 +1027,6 @@ static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
return ret;
}
-static bool dm_dax_supported(struct dax_device *dax_dev, struct block_device *bdev,
- int blocksize, sector_t start, sector_t len)
-{
- struct mapped_device *md = dax_get_private(dax_dev);
- struct dm_table *map;
- bool ret = false;
- int srcu_idx;
-
- map = dm_get_live_table(md, &srcu_idx);
- if (!map)
- goto out;
-
- ret = dm_table_supports_dax(map, device_not_dax_capable, &blocksize);
-
-out:
- dm_put_live_table(md, srcu_idx);
-
- return ret;
-}
-
-static size_t dm_dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff,
- void *addr, size_t bytes, struct iov_iter *i)
-{
- struct mapped_device *md = dax_get_private(dax_dev);
- sector_t sector = pgoff * PAGE_SECTORS;
- struct dm_target *ti;
- long ret = 0;
- int srcu_idx;
-
- ti = dm_dax_get_live_target(md, sector, &srcu_idx);
-
- if (!ti)
- goto out;
- if (!ti->type->dax_copy_from_iter) {
- ret = copy_from_iter(addr, bytes, i);
- goto out;
- }
- ret = ti->type->dax_copy_from_iter(ti, pgoff, addr, bytes, i);
- out:
- dm_put_live_table(md, srcu_idx);
-
- return ret;
-}
-
-static size_t dm_dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff,
- void *addr, size_t bytes, struct iov_iter *i)
-{
- struct mapped_device *md = dax_get_private(dax_dev);
- sector_t sector = pgoff * PAGE_SECTORS;
- struct dm_target *ti;
- long ret = 0;
- int srcu_idx;
-
- ti = dm_dax_get_live_target(md, sector, &srcu_idx);
-
- if (!ti)
- goto out;
- if (!ti->type->dax_copy_to_iter) {
- ret = copy_to_iter(addr, bytes, i);
- goto out;
- }
- ret = ti->type->dax_copy_to_iter(ti, pgoff, addr, bytes, i);
- out:
- dm_put_live_table(md, srcu_idx);
-
- return ret;
-}
-
static int dm_dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff,
size_t nr_pages)
{
@@ -1510,9 +1442,6 @@ static void init_clone_info(struct clone_info *ci, struct mapped_device *md,
ci->sector = bio->bi_iter.bi_sector;
}
-#define __dm_part_stat_sub(part, field, subnd) \
- (part_stat_get(part, field) -= (subnd))
-
/*
* Entry point to split a bio into clones and submit them to the targets.
*/
@@ -1548,23 +1477,12 @@ static void __split_and_process_bio(struct mapped_device *md,
GFP_NOIO, &md->queue->bio_split);
ci.io->orig_bio = b;
- /*
- * Adjust IO stats for each split, otherwise upon queue
- * reentry there will be redundant IO accounting.
- * NOTE: this is a stop-gap fix, a proper fix involves
- * significant refactoring of DM core's bio splitting
- * (by eliminating DM's splitting and just using bio_split)
- */
- part_stat_lock();
- __dm_part_stat_sub(dm_disk(md)->part0,
- sectors[op_stat_group(bio_op(bio))], ci.sector_count);
- part_stat_unlock();
-
bio_chain(b, bio);
trace_block_split(b, bio->bi_iter.bi_sector);
submit_bio_noacct(bio);
}
}
+ start_io_acct(ci.io);
/* drop the extra reference count */
dm_io_dec_pending(ci.io, errno_to_blk_status(error));
@@ -1683,6 +1601,7 @@ static void cleanup_mapped_device(struct mapped_device *md)
bioset_exit(&md->io_bs);
if (md->dax_dev) {
+ dax_remove_host(md->disk);
kill_dax(md->dax_dev);
put_dax(md->dax_dev);
md->dax_dev = NULL;
@@ -1778,15 +1697,21 @@ static struct mapped_device *alloc_dev(int minor)
md->disk->major = _major;
md->disk->first_minor = minor;
md->disk->minors = 1;
+ md->disk->flags |= GENHD_FL_NO_PART;
md->disk->fops = &dm_blk_dops;
md->disk->queue = md->queue;
md->disk->private_data = md;
sprintf(md->disk->disk_name, "dm-%d", minor);
- if (IS_ENABLED(CONFIG_DAX_DRIVER)) {
- md->dax_dev = alloc_dax(md, md->disk->disk_name,
- &dm_dax_ops, 0);
- if (IS_ERR(md->dax_dev))
+ if (IS_ENABLED(CONFIG_FS_DAX)) {
+ md->dax_dev = alloc_dax(md, &dm_dax_ops);
+ if (IS_ERR(md->dax_dev)) {
+ md->dax_dev = NULL;
+ goto bad;
+ }
+ set_dax_nocache(md->dax_dev);
+ set_dax_nomc(md->dax_dev);
+ if (dax_add_host(md->dax_dev, md->disk))
goto bad;
}
@@ -2152,7 +2077,7 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
set_bit(DMF_FREEING, &md->flags);
spin_unlock(&_minor_lock);
- blk_set_queue_dying(md->queue);
+ blk_mark_disk_dead(md->disk);
/*
* Take suspend_lock so that presuspend and postsuspend methods
@@ -3040,9 +2965,6 @@ static const struct block_device_operations dm_rq_blk_dops = {
static const struct dax_operations dm_dax_ops = {
.direct_access = dm_dax_direct_access,
- .dax_supported = dm_dax_supported,
- .copy_from_iter = dm_dax_copy_from_iter,
- .copy_to_iter = dm_dax_copy_to_iter,
.zero_page_range = dm_dax_zero_page_range,
};
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index 742d9c80efe1..9013dc1a7b00 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -73,10 +73,6 @@ bool dm_table_bio_based(struct dm_table *t);
bool dm_table_request_based(struct dm_table *t);
void dm_table_free_md_mempools(struct dm_table *t);
struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t);
-bool dm_table_supports_dax(struct dm_table *t, iterate_devices_callout_fn fn,
- int *blocksize);
-int device_not_dax_capable(struct dm_target *ti, struct dm_dev *dev,
- sector_t start, sector_t len, void *data);
void dm_lock_md_type(struct mapped_device *md);
void dm_unlock_md_type(struct mapped_device *md);
diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
index 7fbd41e156c9..1c8a06b77c85 100644
--- a/drivers/md/md-cluster.c
+++ b/drivers/md/md-cluster.c
@@ -574,7 +574,7 @@ static int process_recvd_msg(struct mddev *mddev, struct cluster_msg *msg)
int ret = 0;
if (WARN(mddev->cluster_info->slot_number - 1 == le32_to_cpu(msg->slot),
- "node %d received it's own msg\n", le32_to_cpu(msg->slot)))
+ "node %d received its own msg\n", le32_to_cpu(msg->slot)))
return -1;
switch (le32_to_cpu(msg->type)) {
case METADATA_UPDATED:
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 41d6e2383517..4d38bd7dadd6 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -418,6 +418,12 @@ check_suspended:
rcu_read_lock();
if (is_suspended(mddev, bio)) {
DEFINE_WAIT(__wait);
+ /* Bail out if REQ_NOWAIT is set for the bio */
+ if (bio->bi_opf & REQ_NOWAIT) {
+ rcu_read_unlock();
+ bio_wouldblock_error(bio);
+ return;
+ }
for (;;) {
prepare_to_wait(&mddev->sb_wait, &__wait,
TASK_UNINTERRUPTIBLE);
@@ -3603,6 +3609,7 @@ static struct attribute *rdev_default_attrs[] = {
&rdev_ppl_size.attr,
NULL,
};
+ATTRIBUTE_GROUPS(rdev_default);
static ssize_t
rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
{
@@ -3652,7 +3659,7 @@ static const struct sysfs_ops rdev_sysfs_ops = {
static struct kobj_type rdev_ktype = {
.release = rdev_free,
.sysfs_ops = &rdev_sysfs_ops,
- .default_attrs = rdev_default_attrs,
+ .default_groups = rdev_default_groups,
};
int md_rdev_init(struct md_rdev *rdev)
@@ -5708,11 +5715,6 @@ static int md_alloc(dev_t dev, char *name)
mddev->queue = disk->queue;
blk_set_stacking_limits(&mddev->queue->limits);
blk_queue_write_cache(mddev->queue, true, true);
- /* Allow extended partitions. This makes the
- * 'mdp' device redundant, but we can't really
- * remove it now.
- */
- disk->flags |= GENHD_FL_EXT_DEVT;
disk->events |= DISK_EVENT_MEDIA_CHANGE;
mddev->gendisk = disk;
error = add_disk(disk);
@@ -5793,6 +5795,7 @@ int md_run(struct mddev *mddev)
int err;
struct md_rdev *rdev;
struct md_personality *pers;
+ bool nowait = true;
if (list_empty(&mddev->disks))
/* cannot run an array with no devices.. */
@@ -5863,6 +5866,7 @@ int md_run(struct mddev *mddev)
}
}
sysfs_notify_dirent_safe(rdev->sysfs_state);
+ nowait = nowait && blk_queue_nowait(bdev_get_queue(rdev->bdev));
}
if (!bioset_initialized(&mddev->bio_set)) {
@@ -5875,13 +5879,6 @@ int md_run(struct mddev *mddev)
if (err)
goto exit_bio_set;
}
- if (mddev->level != 1 && mddev->level != 10 &&
- !bioset_initialized(&mddev->io_acct_set)) {
- err = bioset_init(&mddev->io_acct_set, BIO_POOL_SIZE,
- offsetof(struct md_io_acct, bio_clone), 0);
- if (err)
- goto exit_sync_set;
- }
spin_lock(&pers_lock);
pers = find_pers(mddev->level, mddev->clevel);
@@ -6009,6 +6006,10 @@ int md_run(struct mddev *mddev)
else
blk_queue_flag_clear(QUEUE_FLAG_NONROT, mddev->queue);
blk_queue_flag_set(QUEUE_FLAG_IO_STAT, mddev->queue);
+
+ /* Set the NOWAIT flags if all underlying devices support it */
+ if (nowait)
+ blk_queue_flag_set(QUEUE_FLAG_NOWAIT, mddev->queue);
}
if (pers->sync_request) {
if (mddev->kobj.sd &&
@@ -6058,9 +6059,6 @@ bitmap_abort:
module_put(pers->owner);
md_bitmap_destroy(mddev);
abort:
- if (mddev->level != 1 && mddev->level != 10)
- bioset_exit(&mddev->io_acct_set);
-exit_sync_set:
bioset_exit(&mddev->sync_set);
exit_bio_set:
bioset_exit(&mddev->bio_set);
@@ -7010,6 +7008,15 @@ static int hot_add_disk(struct mddev *mddev, dev_t dev)
if (!mddev->thread)
md_update_sb(mddev, 1);
/*
+ * If the new disk does not support REQ_NOWAIT,
+ * disable on the whole MD.
+ */
+ if (!blk_queue_nowait(bdev_get_queue(rdev->bdev))) {
+ pr_info("%s: Disabling nowait because %s does not support nowait\n",
+ mdname(mddev), bdevname(rdev->bdev, b));
+ blk_queue_flag_clear(QUEUE_FLAG_NOWAIT, mddev->queue);
+ }
+ /*
* Kick recovery, maybe this spare has to be added to the
* array immediately.
*/
@@ -8407,7 +8414,7 @@ int md_setup_cluster(struct mddev *mddev, int nodes)
spin_lock(&pers_lock);
/* ensure module won't be unloaded */
if (!md_cluster_ops || !try_module_get(md_cluster_mod)) {
- pr_warn("can't find md-cluster module or get it's reference.\n");
+ pr_warn("can't find md-cluster module or get its reference.\n");
spin_unlock(&pers_lock);
return -ENOENT;
}
@@ -8594,6 +8601,23 @@ void md_submit_discard_bio(struct mddev *mddev, struct md_rdev *rdev,
}
EXPORT_SYMBOL_GPL(md_submit_discard_bio);
+int acct_bioset_init(struct mddev *mddev)
+{
+ int err = 0;
+
+ if (!bioset_initialized(&mddev->io_acct_set))
+ err = bioset_init(&mddev->io_acct_set, BIO_POOL_SIZE,
+ offsetof(struct md_io_acct, bio_clone), 0);
+ return err;
+}
+EXPORT_SYMBOL_GPL(acct_bioset_init);
+
+void acct_bioset_exit(struct mddev *mddev)
+{
+ bioset_exit(&mddev->io_acct_set);
+}
+EXPORT_SYMBOL_GPL(acct_bioset_exit);
+
static void md_end_io_acct(struct bio *bio)
{
struct md_io_acct *md_io_acct = bio->bi_private;
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 53ea7a6961de..f1bf3625ef4c 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -721,6 +721,8 @@ extern void md_error(struct mddev *mddev, struct md_rdev *rdev);
extern void md_finish_reshape(struct mddev *mddev);
void md_submit_discard_bio(struct mddev *mddev, struct md_rdev *rdev,
struct bio *bio, sector_t start, sector_t size);
+int acct_bioset_init(struct mddev *mddev);
+void acct_bioset_exit(struct mddev *mddev);
void md_account_bio(struct mddev *mddev, struct bio **bio);
extern bool __must_check md_flush_request(struct mddev *mddev, struct bio *bio);
diff --git a/drivers/md/persistent-data/dm-btree-remove.c b/drivers/md/persistent-data/dm-btree-remove.c
index cb670f16e98e..4ead31e0d8ce 100644
--- a/drivers/md/persistent-data/dm-btree-remove.c
+++ b/drivers/md/persistent-data/dm-btree-remove.c
@@ -9,6 +9,9 @@
#include "dm-transaction-manager.h"
#include <linux/export.h>
+#include <linux/device-mapper.h>
+
+#define DM_MSG_PREFIX "btree"
/*
* Removing an entry from a btree
@@ -79,15 +82,23 @@ static void node_shift(struct btree_node *n, int shift)
}
}
-static void node_copy(struct btree_node *left, struct btree_node *right, int shift)
+static int node_copy(struct btree_node *left, struct btree_node *right, int shift)
{
uint32_t nr_left = le32_to_cpu(left->header.nr_entries);
uint32_t value_size = le32_to_cpu(left->header.value_size);
- BUG_ON(value_size != le32_to_cpu(right->header.value_size));
+ if (value_size != le32_to_cpu(right->header.value_size)) {
+ DMERR("mismatched value size");
+ return -EILSEQ;
+ }
if (shift < 0) {
shift = -shift;
- BUG_ON(nr_left + shift > le32_to_cpu(left->header.max_entries));
+
+ if (nr_left + shift > le32_to_cpu(left->header.max_entries)) {
+ DMERR("bad shift");
+ return -EINVAL;
+ }
+
memcpy(key_ptr(left, nr_left),
key_ptr(right, 0),
shift * sizeof(__le64));
@@ -95,7 +106,11 @@ static void node_copy(struct btree_node *left, struct btree_node *right, int shi
value_ptr(right, 0),
shift * value_size);
} else {
- BUG_ON(shift > le32_to_cpu(right->header.max_entries));
+ if (shift > le32_to_cpu(right->header.max_entries)) {
+ DMERR("bad shift");
+ return -EINVAL;
+ }
+
memcpy(key_ptr(right, 0),
key_ptr(left, nr_left - shift),
shift * sizeof(__le64));
@@ -103,6 +118,7 @@ static void node_copy(struct btree_node *left, struct btree_node *right, int shi
value_ptr(left, nr_left - shift),
shift * value_size);
}
+ return 0;
}
/*
@@ -170,35 +186,54 @@ static void exit_child(struct dm_btree_info *info, struct child *c)
dm_tm_unlock(info->tm, c->block);
}
-static void shift(struct btree_node *left, struct btree_node *right, int count)
+static int shift(struct btree_node *left, struct btree_node *right, int count)
{
+ int r;
uint32_t nr_left = le32_to_cpu(left->header.nr_entries);
uint32_t nr_right = le32_to_cpu(right->header.nr_entries);
uint32_t max_entries = le32_to_cpu(left->header.max_entries);
uint32_t r_max_entries = le32_to_cpu(right->header.max_entries);
- BUG_ON(max_entries != r_max_entries);
- BUG_ON(nr_left - count > max_entries);
- BUG_ON(nr_right + count > max_entries);
+ if (max_entries != r_max_entries) {
+ DMERR("node max_entries mismatch");
+ return -EILSEQ;
+ }
+
+ if (nr_left - count > max_entries) {
+ DMERR("node shift out of bounds");
+ return -EINVAL;
+ }
+
+ if (nr_right + count > max_entries) {
+ DMERR("node shift out of bounds");
+ return -EINVAL;
+ }
if (!count)
- return;
+ return 0;
if (count > 0) {
node_shift(right, count);
- node_copy(left, right, count);
+ r = node_copy(left, right, count);
+ if (r)
+ return r;
} else {
- node_copy(left, right, count);
+ r = node_copy(left, right, count);
+ if (r)
+ return r;
node_shift(right, count);
}
left->header.nr_entries = cpu_to_le32(nr_left - count);
right->header.nr_entries = cpu_to_le32(nr_right + count);
+
+ return 0;
}
-static void __rebalance2(struct dm_btree_info *info, struct btree_node *parent,
- struct child *l, struct child *r)
+static int __rebalance2(struct dm_btree_info *info, struct btree_node *parent,
+ struct child *l, struct child *r)
{
+ int ret;
struct btree_node *left = l->n;
struct btree_node *right = r->n;
uint32_t nr_left = le32_to_cpu(left->header.nr_entries);
@@ -229,9 +264,12 @@ static void __rebalance2(struct dm_btree_info *info, struct btree_node *parent,
* Rebalance.
*/
unsigned target_left = (nr_left + nr_right) / 2;
- shift(left, right, nr_left - target_left);
+ ret = shift(left, right, nr_left - target_left);
+ if (ret)
+ return ret;
*key_ptr(parent, r->index) = right->keys[0];
}
+ return 0;
}
static int rebalance2(struct shadow_spine *s, struct dm_btree_info *info,
@@ -253,12 +291,12 @@ static int rebalance2(struct shadow_spine *s, struct dm_btree_info *info,
return r;
}
- __rebalance2(info, parent, &left, &right);
+ r = __rebalance2(info, parent, &left, &right);
exit_child(info, &left);
exit_child(info, &right);
- return 0;
+ return r;
}
/*
@@ -266,21 +304,30 @@ static int rebalance2(struct shadow_spine *s, struct dm_btree_info *info,
* in right, then rebalance2. This wastes some cpu, but I want something
* simple atm.
*/
-static void delete_center_node(struct dm_btree_info *info, struct btree_node *parent,
- struct child *l, struct child *c, struct child *r,
- struct btree_node *left, struct btree_node *center, struct btree_node *right,
- uint32_t nr_left, uint32_t nr_center, uint32_t nr_right)
+static int delete_center_node(struct dm_btree_info *info, struct btree_node *parent,
+ struct child *l, struct child *c, struct child *r,
+ struct btree_node *left, struct btree_node *center, struct btree_node *right,
+ uint32_t nr_left, uint32_t nr_center, uint32_t nr_right)
{
uint32_t max_entries = le32_to_cpu(left->header.max_entries);
unsigned shift = min(max_entries - nr_left, nr_center);
- BUG_ON(nr_left + shift > max_entries);
+ if (nr_left + shift > max_entries) {
+ DMERR("node shift out of bounds");
+ return -EINVAL;
+ }
+
node_copy(left, center, -shift);
left->header.nr_entries = cpu_to_le32(nr_left + shift);
if (shift != nr_center) {
shift = nr_center - shift;
- BUG_ON((nr_right + shift) > max_entries);
+
+ if ((nr_right + shift) > max_entries) {
+ DMERR("node shift out of bounds");
+ return -EINVAL;
+ }
+
node_shift(right, shift);
node_copy(center, right, shift);
right->header.nr_entries = cpu_to_le32(nr_right + shift);
@@ -291,18 +338,18 @@ static void delete_center_node(struct dm_btree_info *info, struct btree_node *pa
r->index--;
dm_tm_dec(info->tm, dm_block_location(c->block));
- __rebalance2(info, parent, l, r);
+ return __rebalance2(info, parent, l, r);
}
/*
* Redistributes entries among 3 sibling nodes.
*/
-static void redistribute3(struct dm_btree_info *info, struct btree_node *parent,
- struct child *l, struct child *c, struct child *r,
- struct btree_node *left, struct btree_node *center, struct btree_node *right,
- uint32_t nr_left, uint32_t nr_center, uint32_t nr_right)
+static int redistribute3(struct dm_btree_info *info, struct btree_node *parent,
+ struct child *l, struct child *c, struct child *r,
+ struct btree_node *left, struct btree_node *center, struct btree_node *right,
+ uint32_t nr_left, uint32_t nr_center, uint32_t nr_right)
{
- int s;
+ int s, ret;
uint32_t max_entries = le32_to_cpu(left->header.max_entries);
unsigned total = nr_left + nr_center + nr_right;
unsigned target_right = total / 3;
@@ -317,35 +364,55 @@ static void redistribute3(struct dm_btree_info *info, struct btree_node *parent,
if (s < 0 && nr_center < -s) {
/* not enough in central node */
- shift(left, center, -nr_center);
+ ret = shift(left, center, -nr_center);
+ if (ret)
+ return ret;
+
s += nr_center;
- shift(left, right, s);
- nr_right += s;
- } else
- shift(left, center, s);
+ ret = shift(left, right, s);
+ if (ret)
+ return ret;
- shift(center, right, target_right - nr_right);
+ nr_right += s;
+ } else {
+ ret = shift(left, center, s);
+ if (ret)
+ return ret;
+ }
+ ret = shift(center, right, target_right - nr_right);
+ if (ret)
+ return ret;
} else {
s = target_right - nr_right;
if (s > 0 && nr_center < s) {
/* not enough in central node */
- shift(center, right, nr_center);
+ ret = shift(center, right, nr_center);
+ if (ret)
+ return ret;
s -= nr_center;
- shift(left, right, s);
+ ret = shift(left, right, s);
+ if (ret)
+ return ret;
nr_left -= s;
- } else
- shift(center, right, s);
+ } else {
+ ret = shift(center, right, s);
+ if (ret)
+ return ret;
+ }
- shift(left, center, nr_left - target_left);
+ ret = shift(left, center, nr_left - target_left);
+ if (ret)
+ return ret;
}
*key_ptr(parent, c->index) = center->keys[0];
*key_ptr(parent, r->index) = right->keys[0];
+ return 0;
}
-static void __rebalance3(struct dm_btree_info *info, struct btree_node *parent,
- struct child *l, struct child *c, struct child *r)
+static int __rebalance3(struct dm_btree_info *info, struct btree_node *parent,
+ struct child *l, struct child *c, struct child *r)
{
struct btree_node *left = l->n;
struct btree_node *center = c->n;
@@ -357,15 +424,19 @@ static void __rebalance3(struct dm_btree_info *info, struct btree_node *parent,
unsigned threshold = merge_threshold(left) * 4 + 1;
- BUG_ON(left->header.max_entries != center->header.max_entries);
- BUG_ON(center->header.max_entries != right->header.max_entries);
+ if ((left->header.max_entries != center->header.max_entries) ||
+ (center->header.max_entries != right->header.max_entries)) {
+ DMERR("bad btree metadata, max_entries differ");
+ return -EILSEQ;
+ }
+
+ if ((nr_left + nr_center + nr_right) < threshold) {
+ return delete_center_node(info, parent, l, c, r, left, center, right,
+ nr_left, nr_center, nr_right);
+ }
- if ((nr_left + nr_center + nr_right) < threshold)
- delete_center_node(info, parent, l, c, r, left, center, right,
- nr_left, nr_center, nr_right);
- else
- redistribute3(info, parent, l, c, r, left, center, right,
- nr_left, nr_center, nr_right);
+ return redistribute3(info, parent, l, c, r, left, center, right,
+ nr_left, nr_center, nr_right);
}
static int rebalance3(struct shadow_spine *s, struct dm_btree_info *info,
@@ -395,13 +466,13 @@ static int rebalance3(struct shadow_spine *s, struct dm_btree_info *info,
return r;
}
- __rebalance3(info, parent, &left, &center, &right);
+ r = __rebalance3(info, parent, &left, &center, &right);
exit_child(info, &left);
exit_child(info, &center);
exit_child(info, &right);
- return 0;
+ return r;
}
static int rebalance_children(struct shadow_spine *s,
diff --git a/drivers/md/persistent-data/dm-btree-spine.c b/drivers/md/persistent-data/dm-btree-spine.c
index f5bd76ed8fe6..e653458888a7 100644
--- a/drivers/md/persistent-data/dm-btree-spine.c
+++ b/drivers/md/persistent-data/dm-btree-spine.c
@@ -15,10 +15,6 @@
#define BTREE_CSUM_XOR 121107
-static int node_check(struct dm_block_validator *v,
- struct dm_block *b,
- size_t block_size);
-
static void node_prepare_for_write(struct dm_block_validator *v,
struct dm_block *b,
size_t block_size)
@@ -40,7 +36,7 @@ static int node_check(struct dm_block_validator *v,
struct node_header *h = &n->header;
size_t value_size;
__le32 csum_disk;
- uint32_t flags;
+ uint32_t flags, nr_entries, max_entries;
if (dm_block_location(b) != le64_to_cpu(h->blocknr)) {
DMERR_LIMIT("node_check failed: blocknr %llu != wanted %llu",
@@ -57,15 +53,17 @@ static int node_check(struct dm_block_validator *v,
return -EILSEQ;
}
+ nr_entries = le32_to_cpu(h->nr_entries);
+ max_entries = le32_to_cpu(h->max_entries);
value_size = le32_to_cpu(h->value_size);
if (sizeof(struct node_header) +
- (sizeof(__le64) + value_size) * le32_to_cpu(h->max_entries) > block_size) {
+ (sizeof(__le64) + value_size) * max_entries > block_size) {
DMERR_LIMIT("node_check failed: max_entries too large");
return -EILSEQ;
}
- if (le32_to_cpu(h->nr_entries) > le32_to_cpu(h->max_entries)) {
+ if (nr_entries > max_entries) {
DMERR_LIMIT("node_check failed: too many entries");
return -EILSEQ;
}
diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
index 0703ca7a7d9a..5ce64e93aae7 100644
--- a/drivers/md/persistent-data/dm-btree.c
+++ b/drivers/md/persistent-data/dm-btree.c
@@ -81,14 +81,16 @@ void inc_children(struct dm_transaction_manager *tm, struct btree_node *n,
}
static int insert_at(size_t value_size, struct btree_node *node, unsigned index,
- uint64_t key, void *value)
- __dm_written_to_disk(value)
+ uint64_t key, void *value)
+ __dm_written_to_disk(value)
{
uint32_t nr_entries = le32_to_cpu(node->header.nr_entries);
+ uint32_t max_entries = le32_to_cpu(node->header.max_entries);
__le64 key_le = cpu_to_le64(key);
if (index > nr_entries ||
- index >= le32_to_cpu(node->header.max_entries)) {
+ index >= max_entries ||
+ nr_entries >= max_entries) {
DMERR("too many entries in btree node for insert");
__dm_unbless_for_disk(value);
return -ENOMEM;
diff --git a/drivers/md/persistent-data/dm-space-map-common.c b/drivers/md/persistent-data/dm-space-map-common.c
index 4a6a2a9b4eb4..bfbfa750e016 100644
--- a/drivers/md/persistent-data/dm-space-map-common.c
+++ b/drivers/md/persistent-data/dm-space-map-common.c
@@ -283,6 +283,11 @@ int sm_ll_lookup_bitmap(struct ll_disk *ll, dm_block_t b, uint32_t *result)
struct disk_index_entry ie_disk;
struct dm_block *blk;
+ if (b >= ll->nr_blocks) {
+ DMERR_LIMIT("metadata block out of bounds");
+ return -EINVAL;
+ }
+
b = do_div(index, ll->entries_per_block);
r = ll->load_ie(ll, index, &ie_disk);
if (r < 0)
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 62c8b6adac70..b59a77b31b90 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -356,7 +356,21 @@ static sector_t raid0_size(struct mddev *mddev, sector_t sectors, int raid_disks
return array_sectors;
}
-static void raid0_free(struct mddev *mddev, void *priv);
+static void free_conf(struct mddev *mddev, struct r0conf *conf)
+{
+ kfree(conf->strip_zone);
+ kfree(conf->devlist);
+ kfree(conf);
+ mddev->private = NULL;
+}
+
+static void raid0_free(struct mddev *mddev, void *priv)
+{
+ struct r0conf *conf = priv;
+
+ free_conf(mddev, conf);
+ acct_bioset_exit(mddev);
+}
static int raid0_run(struct mddev *mddev)
{
@@ -370,11 +384,16 @@ static int raid0_run(struct mddev *mddev)
if (md_check_no_bitmap(mddev))
return -EINVAL;
+ if (acct_bioset_init(mddev)) {
+ pr_err("md/raid0:%s: alloc acct bioset failed.\n", mdname(mddev));
+ return -ENOMEM;
+ }
+
/* if private is not null, we are here after takeover */
if (mddev->private == NULL) {
ret = create_strip_zones(mddev, &conf);
if (ret < 0)
- return ret;
+ goto exit_acct_set;
mddev->private = conf;
}
conf = mddev->private;
@@ -413,17 +432,16 @@ static int raid0_run(struct mddev *mddev)
dump_zones(mddev);
ret = md_integrity_register(mddev);
+ if (ret)
+ goto free;
return ret;
-}
-static void raid0_free(struct mddev *mddev, void *priv)
-{
- struct r0conf *conf = priv;
-
- kfree(conf->strip_zone);
- kfree(conf->devlist);
- kfree(conf);
+free:
+ free_conf(mddev, conf);
+exit_acct_set:
+ acct_bioset_exit(mddev);
+ return ret;
}
static void raid0_handle_discard(struct mddev *mddev, struct bio *bio)
diff --git a/drivers/md/raid1-10.c b/drivers/md/raid1-10.c
index 54db34163968..83f9a4f3d82e 100644
--- a/drivers/md/raid1-10.c
+++ b/drivers/md/raid1-10.c
@@ -22,12 +22,6 @@
#define BIO_SPECIAL(bio) ((unsigned long)bio <= 2)
-/* When there are this many requests queue to be written by
- * the raid thread, we become 'congested' to provide back-pressure
- * for writeback.
- */
-static int max_queued_requests = 1024;
-
/* for managing resync I/O pages */
struct resync_pages {
void *raid_bio;
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 85505424f7a4..e2d8acb1e988 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -929,8 +929,10 @@ static void lower_barrier(struct r1conf *conf, sector_t sector_nr)
wake_up(&conf->wait_barrier);
}
-static void _wait_barrier(struct r1conf *conf, int idx)
+static bool _wait_barrier(struct r1conf *conf, int idx, bool nowait)
{
+ bool ret = true;
+
/*
* We need to increase conf->nr_pending[idx] very early here,
* then raise_barrier() can be blocked when it waits for
@@ -961,7 +963,7 @@ static void _wait_barrier(struct r1conf *conf, int idx)
*/
if (!READ_ONCE(conf->array_frozen) &&
!atomic_read(&conf->barrier[idx]))
- return;
+ return ret;
/*
* After holding conf->resync_lock, conf->nr_pending[idx]
@@ -979,18 +981,27 @@ static void _wait_barrier(struct r1conf *conf, int idx)
*/
wake_up(&conf->wait_barrier);
/* Wait for the barrier in same barrier unit bucket to drop. */
- wait_event_lock_irq(conf->wait_barrier,
- !conf->array_frozen &&
- !atomic_read(&conf->barrier[idx]),
- conf->resync_lock);
- atomic_inc(&conf->nr_pending[idx]);
+
+ /* Return false when nowait flag is set */
+ if (nowait) {
+ ret = false;
+ } else {
+ wait_event_lock_irq(conf->wait_barrier,
+ !conf->array_frozen &&
+ !atomic_read(&conf->barrier[idx]),
+ conf->resync_lock);
+ atomic_inc(&conf->nr_pending[idx]);
+ }
+
atomic_dec(&conf->nr_waiting[idx]);
spin_unlock_irq(&conf->resync_lock);
+ return ret;
}
-static void wait_read_barrier(struct r1conf *conf, sector_t sector_nr)
+static bool wait_read_barrier(struct r1conf *conf, sector_t sector_nr, bool nowait)
{
int idx = sector_to_idx(sector_nr);
+ bool ret = true;
/*
* Very similar to _wait_barrier(). The difference is, for read
@@ -1002,7 +1013,7 @@ static void wait_read_barrier(struct r1conf *conf, sector_t sector_nr)
atomic_inc(&conf->nr_pending[idx]);
if (!READ_ONCE(conf->array_frozen))
- return;
+ return ret;
spin_lock_irq(&conf->resync_lock);
atomic_inc(&conf->nr_waiting[idx]);
@@ -1013,19 +1024,28 @@ static void wait_read_barrier(struct r1conf *conf, sector_t sector_nr)
*/
wake_up(&conf->wait_barrier);
/* Wait for array to be unfrozen */
- wait_event_lock_irq(conf->wait_barrier,
- !conf->array_frozen,
- conf->resync_lock);
- atomic_inc(&conf->nr_pending[idx]);
+
+ /* Return false when nowait flag is set */
+ if (nowait) {
+ /* Return false when nowait flag is set */
+ ret = false;
+ } else {
+ wait_event_lock_irq(conf->wait_barrier,
+ !conf->array_frozen,
+ conf->resync_lock);
+ atomic_inc(&conf->nr_pending[idx]);
+ }
+
atomic_dec(&conf->nr_waiting[idx]);
spin_unlock_irq(&conf->resync_lock);
+ return ret;
}
-static void wait_barrier(struct r1conf *conf, sector_t sector_nr)
+static bool wait_barrier(struct r1conf *conf, sector_t sector_nr, bool nowait)
{
int idx = sector_to_idx(sector_nr);
- _wait_barrier(conf, idx);
+ return _wait_barrier(conf, idx, nowait);
}
static void _allow_barrier(struct r1conf *conf, int idx)
@@ -1236,7 +1256,11 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
* Still need barrier for READ in case that whole
* array is frozen.
*/
- wait_read_barrier(conf, bio->bi_iter.bi_sector);
+ if (!wait_read_barrier(conf, bio->bi_iter.bi_sector,
+ bio->bi_opf & REQ_NOWAIT)) {
+ bio_wouldblock_error(bio);
+ return;
+ }
if (!r1_bio)
r1_bio = alloc_r1bio(mddev, bio);
@@ -1336,6 +1360,10 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
bio->bi_iter.bi_sector, bio_end_sector(bio))) {
DEFINE_WAIT(w);
+ if (bio->bi_opf & REQ_NOWAIT) {
+ bio_wouldblock_error(bio);
+ return;
+ }
for (;;) {
prepare_to_wait(&conf->wait_barrier,
&w, TASK_IDLE);
@@ -1353,17 +1381,15 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
* thread has put up a bar for new requests.
* Continue immediately if no resync is active currently.
*/
- wait_barrier(conf, bio->bi_iter.bi_sector);
+ if (!wait_barrier(conf, bio->bi_iter.bi_sector,
+ bio->bi_opf & REQ_NOWAIT)) {
+ bio_wouldblock_error(bio);
+ return;
+ }
r1_bio = alloc_r1bio(mddev, bio);
r1_bio->sectors = max_write_sectors;
- if (conf->pending_count >= max_queued_requests) {
- md_wakeup_thread(mddev->thread);
- raid1_log(mddev, "wait queued");
- wait_event(conf->wait_barrier,
- conf->pending_count < max_queued_requests);
- }
/* first select target devices under rcu_lock and
* inc refcount on their rdev. Record them by setting
* bios[x] to bio
@@ -1458,9 +1484,14 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
rdev_dec_pending(conf->mirrors[j].rdev, mddev);
r1_bio->state = 0;
allow_barrier(conf, bio->bi_iter.bi_sector);
+
+ if (bio->bi_opf & REQ_NOWAIT) {
+ bio_wouldblock_error(bio);
+ return;
+ }
raid1_log(mddev, "wait rdev %d blocked", blocked_rdev->raid_disk);
md_wait_for_blocked_rdev(blocked_rdev, mddev);
- wait_barrier(conf, bio->bi_iter.bi_sector);
+ wait_barrier(conf, bio->bi_iter.bi_sector, false);
goto retry_write;
}
@@ -1688,7 +1719,7 @@ static void close_sync(struct r1conf *conf)
int idx;
for (idx = 0; idx < BARRIER_BUCKETS_NR; idx++) {
- _wait_barrier(conf, idx);
+ _wait_barrier(conf, idx, false);
_allow_barrier(conf, idx);
}
@@ -3410,5 +3441,3 @@ MODULE_DESCRIPTION("RAID1 (mirroring) personality for MD");
MODULE_ALIAS("md-personality-3"); /* RAID1 */
MODULE_ALIAS("md-raid1");
MODULE_ALIAS("md-level-1");
-
-module_param(max_queued_requests, int, S_IRUGO|S_IWUSR);
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index dde98f65bd04..2b969f70a31f 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -952,8 +952,10 @@ static void lower_barrier(struct r10conf *conf)
wake_up(&conf->wait_barrier);
}
-static void wait_barrier(struct r10conf *conf)
+static bool wait_barrier(struct r10conf *conf, bool nowait)
{
+ bool ret = true;
+
spin_lock_irq(&conf->resync_lock);
if (conf->barrier) {
struct bio_list *bio_list = current->bio_list;
@@ -967,27 +969,35 @@ static void wait_barrier(struct r10conf *conf)
* that queue to get the nr_pending
* count down.
*/
- raid10_log(conf->mddev, "wait barrier");
- wait_event_lock_irq(conf->wait_barrier,
- !conf->barrier ||
- (atomic_read(&conf->nr_pending) &&
- bio_list &&
- (!bio_list_empty(&bio_list[0]) ||
- !bio_list_empty(&bio_list[1]))) ||
- /* move on if recovery thread is
- * blocked by us
- */
- (conf->mddev->thread->tsk == current &&
- test_bit(MD_RECOVERY_RUNNING,
- &conf->mddev->recovery) &&
- conf->nr_queued > 0),
- conf->resync_lock);
+ /* Return false when nowait flag is set */
+ if (nowait) {
+ ret = false;
+ } else {
+ raid10_log(conf->mddev, "wait barrier");
+ wait_event_lock_irq(conf->wait_barrier,
+ !conf->barrier ||
+ (atomic_read(&conf->nr_pending) &&
+ bio_list &&
+ (!bio_list_empty(&bio_list[0]) ||
+ !bio_list_empty(&bio_list[1]))) ||
+ /* move on if recovery thread is
+ * blocked by us
+ */
+ (conf->mddev->thread->tsk == current &&
+ test_bit(MD_RECOVERY_RUNNING,
+ &conf->mddev->recovery) &&
+ conf->nr_queued > 0),
+ conf->resync_lock);
+ }
conf->nr_waiting--;
if (!conf->nr_waiting)
wake_up(&conf->wait_barrier);
}
- atomic_inc(&conf->nr_pending);
+ /* Only increment nr_pending when we wait */
+ if (ret)
+ atomic_inc(&conf->nr_pending);
spin_unlock_irq(&conf->resync_lock);
+ return ret;
}
static void allow_barrier(struct r10conf *conf)
@@ -1098,21 +1108,30 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
* currently.
* 2. If IO spans the reshape position. Need to wait for reshape to pass.
*/
-static void regular_request_wait(struct mddev *mddev, struct r10conf *conf,
+static bool regular_request_wait(struct mddev *mddev, struct r10conf *conf,
struct bio *bio, sector_t sectors)
{
- wait_barrier(conf);
+ /* Bail out if REQ_NOWAIT is set for the bio */
+ if (!wait_barrier(conf, bio->bi_opf & REQ_NOWAIT)) {
+ bio_wouldblock_error(bio);
+ return false;
+ }
while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
bio->bi_iter.bi_sector < conf->reshape_progress &&
bio->bi_iter.bi_sector + sectors > conf->reshape_progress) {
- raid10_log(conf->mddev, "wait reshape");
allow_barrier(conf);
+ if (bio->bi_opf & REQ_NOWAIT) {
+ bio_wouldblock_error(bio);
+ return false;
+ }
+ raid10_log(conf->mddev, "wait reshape");
wait_event(conf->wait_barrier,
conf->reshape_progress <= bio->bi_iter.bi_sector ||
conf->reshape_progress >= bio->bi_iter.bi_sector +
sectors);
- wait_barrier(conf);
+ wait_barrier(conf, false);
}
+ return true;
}
static void raid10_read_request(struct mddev *mddev, struct bio *bio,
@@ -1157,7 +1176,8 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
rcu_read_unlock();
}
- regular_request_wait(mddev, conf, bio, r10_bio->sectors);
+ if (!regular_request_wait(mddev, conf, bio, r10_bio->sectors))
+ return;
rdev = read_balance(conf, r10_bio, &max_sectors);
if (!rdev) {
if (err_rdev) {
@@ -1179,7 +1199,7 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
bio_chain(split, bio);
allow_barrier(conf);
submit_bio_noacct(bio);
- wait_barrier(conf);
+ wait_barrier(conf, false);
bio = split;
r10_bio->master_bio = bio;
r10_bio->sectors = max_sectors;
@@ -1338,7 +1358,7 @@ retry_wait:
raid10_log(conf->mddev, "%s wait rdev %d blocked",
__func__, blocked_rdev->raid_disk);
md_wait_for_blocked_rdev(blocked_rdev, mddev);
- wait_barrier(conf);
+ wait_barrier(conf, false);
goto retry_wait;
}
}
@@ -1356,6 +1376,11 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
bio->bi_iter.bi_sector,
bio_end_sector(bio)))) {
DEFINE_WAIT(w);
+ /* Bail out if REQ_NOWAIT is set for the bio */
+ if (bio->bi_opf & REQ_NOWAIT) {
+ bio_wouldblock_error(bio);
+ return;
+ }
for (;;) {
prepare_to_wait(&conf->wait_barrier,
&w, TASK_IDLE);
@@ -1368,7 +1393,8 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
}
sectors = r10_bio->sectors;
- regular_request_wait(mddev, conf, bio, sectors);
+ if (!regular_request_wait(mddev, conf, bio, sectors))
+ return;
if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
(mddev->reshape_backwards
? (bio->bi_iter.bi_sector < conf->reshape_safe &&
@@ -1380,6 +1406,11 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
set_mask_bits(&mddev->sb_flags, 0,
BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
md_wakeup_thread(mddev->thread);
+ if (bio->bi_opf & REQ_NOWAIT) {
+ allow_barrier(conf);
+ bio_wouldblock_error(bio);
+ return;
+ }
raid10_log(conf->mddev, "wait reshape metadata");
wait_event(mddev->sb_wait,
!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
@@ -1387,12 +1418,6 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
conf->reshape_safe = mddev->reshape_position;
}
- if (conf->pending_count >= max_queued_requests) {
- md_wakeup_thread(mddev->thread);
- raid10_log(mddev, "wait queued");
- wait_event(conf->wait_barrier,
- conf->pending_count < max_queued_requests);
- }
/* first select target devices under rcu_lock and
* inc refcount on their rdev. Record them by setting
* bios[x] to bio
@@ -1482,7 +1507,7 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
bio_chain(split, bio);
allow_barrier(conf);
submit_bio_noacct(bio);
- wait_barrier(conf);
+ wait_barrier(conf, false);
bio = split;
r10_bio->master_bio = bio;
}
@@ -1607,7 +1632,11 @@ static int raid10_handle_discard(struct mddev *mddev, struct bio *bio)
if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
return -EAGAIN;
- wait_barrier(conf);
+ if (WARN_ON_ONCE(bio->bi_opf & REQ_NOWAIT)) {
+ bio_wouldblock_error(bio);
+ return 0;
+ }
+ wait_barrier(conf, false);
/*
* Check reshape again to avoid reshape happens after checking
@@ -1649,7 +1678,7 @@ static int raid10_handle_discard(struct mddev *mddev, struct bio *bio)
allow_barrier(conf);
/* Resend the fist split part */
submit_bio_noacct(split);
- wait_barrier(conf);
+ wait_barrier(conf, false);
}
div_u64_rem(bio_end, stripe_size, &remainder);
if (remainder) {
@@ -1660,7 +1689,7 @@ static int raid10_handle_discard(struct mddev *mddev, struct bio *bio)
/* Resend the second split part */
submit_bio_noacct(bio);
bio = split;
- wait_barrier(conf);
+ wait_barrier(conf, false);
}
bio_start = bio->bi_iter.bi_sector;
@@ -1816,7 +1845,7 @@ retry_discard:
end_disk_offset += geo->stride;
atomic_inc(&first_r10bio->remaining);
raid_end_discard_bio(r10_bio);
- wait_barrier(conf);
+ wait_barrier(conf, false);
goto retry_discard;
}
@@ -2011,7 +2040,7 @@ static void print_conf(struct r10conf *conf)
static void close_sync(struct r10conf *conf)
{
- wait_barrier(conf);
+ wait_barrier(conf, false);
allow_barrier(conf);
mempool_exit(&conf->r10buf_pool);
@@ -4819,7 +4848,7 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
if (need_flush ||
time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) {
/* Need to update reshape_position in metadata */
- wait_barrier(conf);
+ wait_barrier(conf, false);
mddev->reshape_position = conf->reshape_progress;
if (mddev->reshape_backwards)
mddev->curr_resync_completed = raid10_size(mddev, 0, 0)
@@ -5242,5 +5271,3 @@ MODULE_DESCRIPTION("RAID10 (striped mirror) personality for MD");
MODULE_ALIAS("md-personality-9"); /* RAID10 */
MODULE_ALIAS("md-raid10");
MODULE_ALIAS("md-level-10");
-
-module_param(max_queued_requests, int, S_IRUGO|S_IWUSR);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 9c1a5877cf9f..ffe720c73b0a 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -2215,10 +2215,9 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
struct r5conf *conf = sh->raid_conf;
int level = conf->level;
struct raid5_percpu *percpu;
- unsigned long cpu;
- cpu = get_cpu();
- percpu = per_cpu_ptr(conf->percpu, cpu);
+ local_lock(&conf->percpu->lock);
+ percpu = this_cpu_ptr(conf->percpu);
if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) {
ops_run_biofill(sh);
overlap_clear++;
@@ -2271,13 +2270,14 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
BUG();
}
- if (overlap_clear && !sh->batch_head)
+ if (overlap_clear && !sh->batch_head) {
for (i = disks; i--; ) {
struct r5dev *dev = &sh->dev[i];
if (test_and_clear_bit(R5_Overlap, &dev->flags))
wake_up(&sh->raid_conf->wait_for_overlap);
}
- put_cpu();
+ }
+ local_unlock(&conf->percpu->lock);
}
static void free_stripe(struct kmem_cache *sc, struct stripe_head *sh)
@@ -5686,6 +5686,10 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi)
struct stripe_head *sh;
int stripe_sectors;
+ /* We need to handle this when io_uring supports discard/trim */
+ if (WARN_ON_ONCE(bi->bi_opf & REQ_NOWAIT))
+ return;
+
if (mddev->reshape_position != MaxSector)
/* Skip discard while reshape is happening */
return;
@@ -5819,6 +5823,17 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
last_sector = bio_end_sector(bi);
bi->bi_next = NULL;
+ /* Bail out if conflicts with reshape and REQ_NOWAIT is set */
+ if ((bi->bi_opf & REQ_NOWAIT) &&
+ (conf->reshape_progress != MaxSector) &&
+ (mddev->reshape_backwards
+ ? (logical_sector > conf->reshape_progress && logical_sector <= conf->reshape_safe)
+ : (logical_sector >= conf->reshape_safe && logical_sector < conf->reshape_progress))) {
+ bio_wouldblock_error(bi);
+ if (rw == WRITE)
+ md_write_end(mddev);
+ return true;
+ }
md_account_bio(mddev, &bi);
prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE);
for (; logical_sector < last_sector; logical_sector += RAID5_STRIPE_SECTORS(conf)) {
@@ -7052,6 +7067,7 @@ static int alloc_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu
return -ENOMEM;
}
+ local_lock_init(&percpu->lock);
return 0;
}
@@ -7446,12 +7462,19 @@ static int raid5_run(struct mddev *mddev)
struct md_rdev *rdev;
struct md_rdev *journal_dev = NULL;
sector_t reshape_offset = 0;
- int i;
+ int i, ret = 0;
long long min_offset_diff = 0;
int first = 1;
- if (mddev_init_writes_pending(mddev) < 0)
+ if (acct_bioset_init(mddev)) {
+ pr_err("md/raid456:%s: alloc acct bioset failed.\n", mdname(mddev));
return -ENOMEM;
+ }
+
+ if (mddev_init_writes_pending(mddev) < 0) {
+ ret = -ENOMEM;
+ goto exit_acct_set;
+ }
if (mddev->recovery_cp != MaxSector)
pr_notice("md/raid:%s: not clean -- starting background reconstruction\n",
@@ -7482,7 +7505,8 @@ static int raid5_run(struct mddev *mddev)
(mddev->bitmap_info.offset || mddev->bitmap_info.file)) {
pr_notice("md/raid:%s: array cannot have both journal and bitmap\n",
mdname(mddev));
- return -EINVAL;
+ ret = -EINVAL;
+ goto exit_acct_set;
}
if (mddev->reshape_position != MaxSector) {
@@ -7507,13 +7531,15 @@ static int raid5_run(struct mddev *mddev)
if (journal_dev) {
pr_warn("md/raid:%s: don't support reshape with journal - aborting.\n",
mdname(mddev));
- return -EINVAL;
+ ret = -EINVAL;
+ goto exit_acct_set;
}
if (mddev->new_level != mddev->level) {
pr_warn("md/raid:%s: unsupported reshape required - aborting.\n",
mdname(mddev));
- return -EINVAL;
+ ret = -EINVAL;
+ goto exit_acct_set;
}
old_disks = mddev->raid_disks - mddev->delta_disks;
/* reshape_position must be on a new-stripe boundary, and one
@@ -7529,7 +7555,8 @@ static int raid5_run(struct mddev *mddev)
if (sector_div(here_new, chunk_sectors * new_data_disks)) {
pr_warn("md/raid:%s: reshape_position not on a stripe boundary\n",
mdname(mddev));
- return -EINVAL;
+ ret = -EINVAL;
+ goto exit_acct_set;
}
reshape_offset = here_new * chunk_sectors;
/* here_new is the stripe we will write to */
@@ -7551,7 +7578,8 @@ static int raid5_run(struct mddev *mddev)
else if (mddev->ro == 0) {
pr_warn("md/raid:%s: in-place reshape must be started in read-only mode - aborting\n",
mdname(mddev));
- return -EINVAL;
+ ret = -EINVAL;
+ goto exit_acct_set;
}
} else if (mddev->reshape_backwards
? (here_new * chunk_sectors + min_offset_diff <=
@@ -7561,7 +7589,8 @@ static int raid5_run(struct mddev *mddev)
/* Reading from the same stripe as writing to - bad */
pr_warn("md/raid:%s: reshape_position too early for auto-recovery - aborting.\n",
mdname(mddev));
- return -EINVAL;
+ ret = -EINVAL;
+ goto exit_acct_set;
}
pr_debug("md/raid:%s: reshape will continue\n", mdname(mddev));
/* OK, we should be able to continue; */
@@ -7585,8 +7614,10 @@ static int raid5_run(struct mddev *mddev)
else
conf = mddev->private;
- if (IS_ERR(conf))
- return PTR_ERR(conf);
+ if (IS_ERR(conf)) {
+ ret = PTR_ERR(conf);
+ goto exit_acct_set;
+ }
if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) {
if (!journal_dev) {
@@ -7783,7 +7814,10 @@ abort:
free_conf(conf);
mddev->private = NULL;
pr_warn("md/raid:%s: failed to run raid set.\n", mdname(mddev));
- return -EIO;
+ ret = -EIO;
+exit_acct_set:
+ acct_bioset_exit(mddev);
+ return ret;
}
static void raid5_free(struct mddev *mddev, void *priv)
@@ -7791,6 +7825,7 @@ static void raid5_free(struct mddev *mddev, void *priv)
struct r5conf *conf = priv;
free_conf(conf);
+ acct_bioset_exit(mddev);
mddev->to_remove = &raid5_attrs_group;
}
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index 5c05acf20e1f..9e8486a9e445 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -4,6 +4,7 @@
#include <linux/raid/xor.h>
#include <linux/dmaengine.h>
+#include <linux/local_lock.h>
/*
*
@@ -640,7 +641,8 @@ struct r5conf {
* lists and performing address
* conversions
*/
- int scribble_obj_size;
+ int scribble_obj_size;
+ local_lock_t lock;
} __percpu *percpu;
int scribble_disks;
int scribble_sectors;
diff --git a/drivers/media/cec/core/cec-core.c b/drivers/media/cec/core/cec-core.c
index ec67065d5202..a3ab6a43fb14 100644
--- a/drivers/media/cec/core/cec-core.c
+++ b/drivers/media/cec/core/cec-core.c
@@ -106,7 +106,7 @@ static int __must_check cec_devnode_register(struct cec_devnode *devnode,
/* Part 1: Find a free minor number */
mutex_lock(&cec_devnode_lock);
- minor = find_next_zero_bit(cec_devnode_nums, CEC_NUM_DEVICES, 0);
+ minor = find_first_zero_bit(cec_devnode_nums, CEC_NUM_DEVICES);
if (minor == CEC_NUM_DEVICES) {
mutex_unlock(&cec_devnode_lock);
pr_err("could not get a free minor\n");
diff --git a/drivers/media/mc/mc-devnode.c b/drivers/media/mc/mc-devnode.c
index f11382afe23b..680fbb3a9340 100644
--- a/drivers/media/mc/mc-devnode.c
+++ b/drivers/media/mc/mc-devnode.c
@@ -217,7 +217,7 @@ int __must_check media_devnode_register(struct media_device *mdev,
/* Part 1: Find a free minor number */
mutex_lock(&media_devnode_lock);
- minor = find_next_zero_bit(media_devnode_nums, MEDIA_NUM_DEVICES, 0);
+ minor = find_first_zero_bit(media_devnode_nums, MEDIA_NUM_DEVICES);
if (minor == MEDIA_NUM_DEVICES) {
mutex_unlock(&media_devnode_lock);
pr_err("could not get a free minor\n");
diff --git a/drivers/media/tuners/si2157.c b/drivers/media/tuners/si2157.c
index 481c5c3b577d..47029746b89e 100644
--- a/drivers/media/tuners/si2157.c
+++ b/drivers/media/tuners/si2157.c
@@ -446,7 +446,8 @@ static int si2157_set_params(struct dvb_frontend *fe)
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
int ret;
struct si2157_cmd cmd;
- u8 bandwidth, delivery_system;
+ u8 bw, delivery_system;
+ u32 bandwidth;
u32 if_frequency = 5000000;
dev_dbg(&client->dev,
@@ -458,18 +459,22 @@ static int si2157_set_params(struct dvb_frontend *fe)
goto err;
}
- if (SUPPORTS_1700KHz(dev) && c->bandwidth_hz <= 1700000)
- bandwidth = 0x09;
- if (c->bandwidth_hz <= 6000000)
- bandwidth = 0x06;
- if (SUPPORTS_1700KHz(dev) && c->bandwidth_hz <= 6100000)
- bandwidth = 0x10;
- else if (c->bandwidth_hz <= 7000000)
- bandwidth = 0x07;
- else if (c->bandwidth_hz <= 8000000)
- bandwidth = 0x08;
- else
- bandwidth = 0x0f;
+ if (SUPPORTS_1700KHz(dev) && c->bandwidth_hz <= 1700000) {
+ bandwidth = 1700000;
+ bw = 9;
+ } else if (c->bandwidth_hz <= 6000000) {
+ bandwidth = 6000000;
+ bw = 6;
+ } else if (SUPPORTS_1700KHz(dev) && c->bandwidth_hz <= 6100000) {
+ bandwidth = 6100000;
+ bw = 10;
+ } else if (c->bandwidth_hz <= 7000000) {
+ bandwidth = 7000000;
+ bw = 7;
+ } else {
+ bandwidth = 8000000;
+ bw = 8;
+ }
switch (c->delivery_system) {
case SYS_ATSC:
@@ -485,6 +490,7 @@ static int si2157_set_params(struct dvb_frontend *fe)
delivery_system = 0x20;
break;
case SYS_DVBC_ANNEX_A:
+ case SYS_DVBC_ANNEX_C:
delivery_system = 0x30;
break;
case SYS_ISDBT:
@@ -499,7 +505,7 @@ static int si2157_set_params(struct dvb_frontend *fe)
}
memcpy(cmd.args, "\x14\x00\x03\x07\x00\x00", 6);
- cmd.args[4] = delivery_system | bandwidth;
+ cmd.args[4] = delivery_system | bw;
if (dev->inversion)
cmd.args[5] = 0x01;
cmd.wlen = 6;
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index b94d5e4fdc23..e90adfa57950 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -300,8 +300,8 @@ mpt_is_discovery_complete(MPT_ADAPTER *ioc)
if (!hdr.ExtPageLength)
goto out;
- buffer = pci_alloc_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
- &dma_handle);
+ buffer = dma_alloc_coherent(&ioc->pcidev->dev, hdr.ExtPageLength * 4,
+ &dma_handle, GFP_KERNEL);
if (!buffer)
goto out;
@@ -316,8 +316,8 @@ mpt_is_discovery_complete(MPT_ADAPTER *ioc)
rc = 1;
out_free_consistent:
- pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
- buffer, dma_handle);
+ dma_free_coherent(&ioc->pcidev->dev, hdr.ExtPageLength * 4, buffer,
+ dma_handle);
out:
return rc;
}
@@ -1274,8 +1274,6 @@ mpt_send_handshake_request(u8 cb_idx, MPT_ADAPTER *ioc, int reqBytes, u32 *req,
static int
mpt_host_page_access_control(MPT_ADAPTER *ioc, u8 access_control_value, int sleepFlag)
{
- int r = 0;
-
/* return if in use */
if (CHIPREG_READ32(&ioc->chip->Doorbell)
& MPI_DOORBELL_ACTIVE)
@@ -1289,9 +1287,9 @@ mpt_host_page_access_control(MPT_ADAPTER *ioc, u8 access_control_value, int slee
(access_control_value<<12)));
/* Wait for IOC to clear Doorbell Status bit */
- if ((r = WaitForDoorbellAck(ioc, 5, sleepFlag)) < 0) {
+ if (WaitForDoorbellAck(ioc, 5, sleepFlag) < 0)
return -2;
- }else
+ else
return 0;
}
@@ -1663,16 +1661,14 @@ mpt_mapresources(MPT_ADAPTER *ioc)
const uint64_t required_mask = dma_get_required_mask
(&pdev->dev);
if (required_mask > DMA_BIT_MASK(32)
- && !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
- && !pci_set_consistent_dma_mask(pdev,
- DMA_BIT_MASK(64))) {
+ && !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))
+ && !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
ioc->dma_mask = DMA_BIT_MASK(64);
dinitprintk(ioc, printk(MYIOC_s_INFO_FMT
": 64 BIT PCI BUS DMA ADDRESSING SUPPORTED\n",
ioc->name));
- } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
- && !pci_set_consistent_dma_mask(pdev,
- DMA_BIT_MASK(32))) {
+ } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))
+ && !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32))) {
ioc->dma_mask = DMA_BIT_MASK(32);
dinitprintk(ioc, printk(MYIOC_s_INFO_FMT
": 32 BIT PCI BUS DMA ADDRESSING SUPPORTED\n",
@@ -1683,9 +1679,8 @@ mpt_mapresources(MPT_ADAPTER *ioc)
goto out_pci_release_region;
}
} else {
- if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
- && !pci_set_consistent_dma_mask(pdev,
- DMA_BIT_MASK(32))) {
+ if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))
+ && !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32))) {
ioc->dma_mask = DMA_BIT_MASK(32);
dinitprintk(ioc, printk(MYIOC_s_INFO_FMT
": 32 BIT PCI BUS DMA ADDRESSING SUPPORTED\n",
@@ -2771,9 +2766,9 @@ mpt_adapter_disable(MPT_ADAPTER *ioc)
if (ioc->spi_data.pIocPg4 != NULL) {
sz = ioc->spi_data.IocPg4Sz;
- pci_free_consistent(ioc->pcidev, sz,
- ioc->spi_data.pIocPg4,
- ioc->spi_data.IocPg4_dma);
+ dma_free_coherent(&ioc->pcidev->dev, sz,
+ ioc->spi_data.pIocPg4,
+ ioc->spi_data.IocPg4_dma);
ioc->spi_data.pIocPg4 = NULL;
ioc->alloc_total -= sz;
}
@@ -3517,7 +3512,8 @@ mpt_alloc_fw_memory(MPT_ADAPTER *ioc, int size)
rc = 0;
goto out;
}
- ioc->cached_fw = pci_alloc_consistent(ioc->pcidev, size, &ioc->cached_fw_dma);
+ ioc->cached_fw = dma_alloc_coherent(&ioc->pcidev->dev, size,
+ &ioc->cached_fw_dma, GFP_ATOMIC);
if (!ioc->cached_fw) {
printk(MYIOC_s_ERR_FMT "Unable to allocate memory for the cached firmware image!\n",
ioc->name);
@@ -3550,7 +3546,8 @@ mpt_free_fw_memory(MPT_ADAPTER *ioc)
sz = ioc->facts.FWImageSize;
dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "free_fw_memory: FW Image @ %p[%p], sz=%d[%x] bytes\n",
ioc->name, ioc->cached_fw, (void *)(ulong)ioc->cached_fw_dma, sz, sz));
- pci_free_consistent(ioc->pcidev, sz, ioc->cached_fw, ioc->cached_fw_dma);
+ dma_free_coherent(&ioc->pcidev->dev, sz, ioc->cached_fw,
+ ioc->cached_fw_dma);
ioc->alloc_total -= sz;
ioc->cached_fw = NULL;
}
@@ -4449,9 +4446,8 @@ PrimeIocFifos(MPT_ADAPTER *ioc)
*/
if (ioc->pcidev->device == MPI_MANUFACTPAGE_DEVID_SAS1078 &&
ioc->dma_mask > DMA_BIT_MASK(35)) {
- if (!pci_set_dma_mask(ioc->pcidev, DMA_BIT_MASK(32))
- && !pci_set_consistent_dma_mask(ioc->pcidev,
- DMA_BIT_MASK(32))) {
+ if (!dma_set_mask(&ioc->pcidev->dev, DMA_BIT_MASK(32))
+ && !dma_set_coherent_mask(&ioc->pcidev->dev, DMA_BIT_MASK(32))) {
dma_mask = DMA_BIT_MASK(35);
d36memprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"setting 35 bit addressing for "
@@ -4459,10 +4455,10 @@ PrimeIocFifos(MPT_ADAPTER *ioc)
ioc->name));
} else {
/*Reseting DMA mask to 64 bit*/
- pci_set_dma_mask(ioc->pcidev,
- DMA_BIT_MASK(64));
- pci_set_consistent_dma_mask(ioc->pcidev,
- DMA_BIT_MASK(64));
+ dma_set_mask(&ioc->pcidev->dev,
+ DMA_BIT_MASK(64));
+ dma_set_coherent_mask(&ioc->pcidev->dev,
+ DMA_BIT_MASK(64));
printk(MYIOC_s_ERR_FMT
"failed setting 35 bit addressing for "
@@ -4597,8 +4593,8 @@ PrimeIocFifos(MPT_ADAPTER *ioc)
alloc_dma += ioc->reply_sz;
}
- if (dma_mask == DMA_BIT_MASK(35) && !pci_set_dma_mask(ioc->pcidev,
- ioc->dma_mask) && !pci_set_consistent_dma_mask(ioc->pcidev,
+ if (dma_mask == DMA_BIT_MASK(35) && !dma_set_mask(&ioc->pcidev->dev,
+ ioc->dma_mask) && !dma_set_coherent_mask(&ioc->pcidev->dev,
ioc->dma_mask))
d36memprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"restoring 64 bit addressing\n", ioc->name));
@@ -4622,8 +4618,8 @@ out_fail:
ioc->sense_buf_pool = NULL;
}
- if (dma_mask == DMA_BIT_MASK(35) && !pci_set_dma_mask(ioc->pcidev,
- DMA_BIT_MASK(64)) && !pci_set_consistent_dma_mask(ioc->pcidev,
+ if (dma_mask == DMA_BIT_MASK(35) && !dma_set_mask(&ioc->pcidev->dev,
+ DMA_BIT_MASK(64)) && !dma_set_coherent_mask(&ioc->pcidev->dev,
DMA_BIT_MASK(64)))
d36memprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"restoring 64 bit addressing\n", ioc->name));
@@ -4970,7 +4966,8 @@ GetLanConfigPages(MPT_ADAPTER *ioc)
if (hdr.PageLength > 0) {
data_sz = hdr.PageLength * 4;
- ppage0_alloc = pci_alloc_consistent(ioc->pcidev, data_sz, &page0_dma);
+ ppage0_alloc = dma_alloc_coherent(&ioc->pcidev->dev, data_sz,
+ &page0_dma, GFP_KERNEL);
rc = -ENOMEM;
if (ppage0_alloc) {
memset((u8 *)ppage0_alloc, 0, data_sz);
@@ -4984,7 +4981,8 @@ GetLanConfigPages(MPT_ADAPTER *ioc)
}
- pci_free_consistent(ioc->pcidev, data_sz, (u8 *) ppage0_alloc, page0_dma);
+ dma_free_coherent(&ioc->pcidev->dev, data_sz,
+ (u8 *)ppage0_alloc, page0_dma);
/* FIXME!
* Normalize endianness of structure data,
@@ -5016,7 +5014,8 @@ GetLanConfigPages(MPT_ADAPTER *ioc)
data_sz = hdr.PageLength * 4;
rc = -ENOMEM;
- ppage1_alloc = pci_alloc_consistent(ioc->pcidev, data_sz, &page1_dma);
+ ppage1_alloc = dma_alloc_coherent(&ioc->pcidev->dev, data_sz,
+ &page1_dma, GFP_KERNEL);
if (ppage1_alloc) {
memset((u8 *)ppage1_alloc, 0, data_sz);
cfg.physAddr = page1_dma;
@@ -5028,7 +5027,8 @@ GetLanConfigPages(MPT_ADAPTER *ioc)
memcpy(&ioc->lan_cnfg_page1, ppage1_alloc, copy_sz);
}
- pci_free_consistent(ioc->pcidev, data_sz, (u8 *) ppage1_alloc, page1_dma);
+ dma_free_coherent(&ioc->pcidev->dev, data_sz,
+ (u8 *)ppage1_alloc, page1_dma);
/* FIXME!
* Normalize endianness of structure data,
@@ -5317,7 +5317,8 @@ GetIoUnitPage2(MPT_ADAPTER *ioc)
/* Read the config page */
data_sz = hdr.PageLength * 4;
rc = -ENOMEM;
- ppage_alloc = pci_alloc_consistent(ioc->pcidev, data_sz, &page_dma);
+ ppage_alloc = dma_alloc_coherent(&ioc->pcidev->dev, data_sz,
+ &page_dma, GFP_KERNEL);
if (ppage_alloc) {
memset((u8 *)ppage_alloc, 0, data_sz);
cfg.physAddr = page_dma;
@@ -5327,7 +5328,8 @@ GetIoUnitPage2(MPT_ADAPTER *ioc)
if ((rc = mpt_config(ioc, &cfg)) == 0)
ioc->biosVersion = le32_to_cpu(ppage_alloc->BiosVersion);
- pci_free_consistent(ioc->pcidev, data_sz, (u8 *) ppage_alloc, page_dma);
+ dma_free_coherent(&ioc->pcidev->dev, data_sz,
+ (u8 *)ppage_alloc, page_dma);
}
return rc;
@@ -5402,7 +5404,9 @@ mpt_GetScsiPortSettings(MPT_ADAPTER *ioc, int portnum)
return -EFAULT;
if (header.PageLength > 0) {
- pbuf = pci_alloc_consistent(ioc->pcidev, header.PageLength * 4, &buf_dma);
+ pbuf = dma_alloc_coherent(&ioc->pcidev->dev,
+ header.PageLength * 4, &buf_dma,
+ GFP_KERNEL);
if (pbuf) {
cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
cfg.physAddr = buf_dma;
@@ -5458,7 +5462,9 @@ mpt_GetScsiPortSettings(MPT_ADAPTER *ioc, int portnum)
}
}
if (pbuf) {
- pci_free_consistent(ioc->pcidev, header.PageLength * 4, pbuf, buf_dma);
+ dma_free_coherent(&ioc->pcidev->dev,
+ header.PageLength * 4, pbuf,
+ buf_dma);
}
}
}
@@ -5480,7 +5486,9 @@ mpt_GetScsiPortSettings(MPT_ADAPTER *ioc, int portnum)
if (header.PageLength > 0) {
/* Allocate memory and read SCSI Port Page 2
*/
- pbuf = pci_alloc_consistent(ioc->pcidev, header.PageLength * 4, &buf_dma);
+ pbuf = dma_alloc_coherent(&ioc->pcidev->dev,
+ header.PageLength * 4, &buf_dma,
+ GFP_KERNEL);
if (pbuf) {
cfg.action = MPI_CONFIG_ACTION_PAGE_READ_NVRAM;
cfg.physAddr = buf_dma;
@@ -5545,7 +5553,9 @@ mpt_GetScsiPortSettings(MPT_ADAPTER *ioc, int portnum)
}
}
- pci_free_consistent(ioc->pcidev, header.PageLength * 4, pbuf, buf_dma);
+ dma_free_coherent(&ioc->pcidev->dev,
+ header.PageLength * 4, pbuf,
+ buf_dma);
}
}
@@ -5661,8 +5671,8 @@ mpt_inactive_raid_volumes(MPT_ADAPTER *ioc, u8 channel, u8 id)
if (!hdr.PageLength)
goto out;
- buffer = pci_alloc_consistent(ioc->pcidev, hdr.PageLength * 4,
- &dma_handle);
+ buffer = dma_alloc_coherent(&ioc->pcidev->dev, hdr.PageLength * 4,
+ &dma_handle, GFP_KERNEL);
if (!buffer)
goto out;
@@ -5709,8 +5719,8 @@ mpt_inactive_raid_volumes(MPT_ADAPTER *ioc, u8 channel, u8 id)
out:
if (buffer)
- pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, buffer,
- dma_handle);
+ dma_free_coherent(&ioc->pcidev->dev, hdr.PageLength * 4,
+ buffer, dma_handle);
}
/**
@@ -5754,8 +5764,8 @@ mpt_raid_phys_disk_pg0(MPT_ADAPTER *ioc, u8 phys_disk_num,
goto out;
}
- buffer = pci_alloc_consistent(ioc->pcidev, hdr.PageLength * 4,
- &dma_handle);
+ buffer = dma_alloc_coherent(&ioc->pcidev->dev, hdr.PageLength * 4,
+ &dma_handle, GFP_KERNEL);
if (!buffer) {
rc = -ENOMEM;
@@ -5778,8 +5788,8 @@ mpt_raid_phys_disk_pg0(MPT_ADAPTER *ioc, u8 phys_disk_num,
out:
if (buffer)
- pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, buffer,
- dma_handle);
+ dma_free_coherent(&ioc->pcidev->dev, hdr.PageLength * 4,
+ buffer, dma_handle);
return rc;
}
@@ -5821,8 +5831,8 @@ mpt_raid_phys_disk_get_num_paths(MPT_ADAPTER *ioc, u8 phys_disk_num)
goto out;
}
- buffer = pci_alloc_consistent(ioc->pcidev, hdr.PageLength * 4,
- &dma_handle);
+ buffer = dma_alloc_coherent(&ioc->pcidev->dev, hdr.PageLength * 4,
+ &dma_handle, GFP_KERNEL);
if (!buffer) {
rc = 0;
@@ -5842,8 +5852,8 @@ mpt_raid_phys_disk_get_num_paths(MPT_ADAPTER *ioc, u8 phys_disk_num)
out:
if (buffer)
- pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, buffer,
- dma_handle);
+ dma_free_coherent(&ioc->pcidev->dev, hdr.PageLength * 4,
+ buffer, dma_handle);
return rc;
}
@@ -5893,8 +5903,8 @@ mpt_raid_phys_disk_pg1(MPT_ADAPTER *ioc, u8 phys_disk_num,
goto out;
}
- buffer = pci_alloc_consistent(ioc->pcidev, hdr.PageLength * 4,
- &dma_handle);
+ buffer = dma_alloc_coherent(&ioc->pcidev->dev, hdr.PageLength * 4,
+ &dma_handle, GFP_KERNEL);
if (!buffer) {
rc = -ENOMEM;
@@ -5931,8 +5941,8 @@ mpt_raid_phys_disk_pg1(MPT_ADAPTER *ioc, u8 phys_disk_num,
out:
if (buffer)
- pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, buffer,
- dma_handle);
+ dma_free_coherent(&ioc->pcidev->dev, hdr.PageLength * 4,
+ buffer, dma_handle);
return rc;
}
@@ -5988,7 +5998,8 @@ mpt_findImVolumes(MPT_ADAPTER *ioc)
return -EFAULT;
iocpage2sz = header.PageLength * 4;
- pIoc2 = pci_alloc_consistent(ioc->pcidev, iocpage2sz, &ioc2_dma);
+ pIoc2 = dma_alloc_coherent(&ioc->pcidev->dev, iocpage2sz, &ioc2_dma,
+ GFP_KERNEL);
if (!pIoc2)
return -ENOMEM;
@@ -6013,7 +6024,7 @@ mpt_findImVolumes(MPT_ADAPTER *ioc)
pIoc2->RaidVolume[i].VolumeID);
out:
- pci_free_consistent(ioc->pcidev, iocpage2sz, pIoc2, ioc2_dma);
+ dma_free_coherent(&ioc->pcidev->dev, iocpage2sz, pIoc2, ioc2_dma);
return rc;
}
@@ -6055,7 +6066,8 @@ mpt_read_ioc_pg_3(MPT_ADAPTER *ioc)
/* Read Header good, alloc memory
*/
iocpage3sz = header.PageLength * 4;
- pIoc3 = pci_alloc_consistent(ioc->pcidev, iocpage3sz, &ioc3_dma);
+ pIoc3 = dma_alloc_coherent(&ioc->pcidev->dev, iocpage3sz, &ioc3_dma,
+ GFP_KERNEL);
if (!pIoc3)
return 0;
@@ -6072,7 +6084,7 @@ mpt_read_ioc_pg_3(MPT_ADAPTER *ioc)
}
}
- pci_free_consistent(ioc->pcidev, iocpage3sz, pIoc3, ioc3_dma);
+ dma_free_coherent(&ioc->pcidev->dev, iocpage3sz, pIoc3, ioc3_dma);
return 0;
}
@@ -6106,7 +6118,8 @@ mpt_read_ioc_pg_4(MPT_ADAPTER *ioc)
if ( (pIoc4 = ioc->spi_data.pIocPg4) == NULL ) {
iocpage4sz = (header.PageLength + 4) * 4; /* Allow 4 additional SEP's */
- pIoc4 = pci_alloc_consistent(ioc->pcidev, iocpage4sz, &ioc4_dma);
+ pIoc4 = dma_alloc_coherent(&ioc->pcidev->dev, iocpage4sz,
+ &ioc4_dma, GFP_KERNEL);
if (!pIoc4)
return;
ioc->alloc_total += iocpage4sz;
@@ -6124,7 +6137,8 @@ mpt_read_ioc_pg_4(MPT_ADAPTER *ioc)
ioc->spi_data.IocPg4_dma = ioc4_dma;
ioc->spi_data.IocPg4Sz = iocpage4sz;
} else {
- pci_free_consistent(ioc->pcidev, iocpage4sz, pIoc4, ioc4_dma);
+ dma_free_coherent(&ioc->pcidev->dev, iocpage4sz, pIoc4,
+ ioc4_dma);
ioc->spi_data.pIocPg4 = NULL;
ioc->alloc_total -= iocpage4sz;
}
@@ -6161,7 +6175,8 @@ mpt_read_ioc_pg_1(MPT_ADAPTER *ioc)
/* Read Header good, alloc memory
*/
iocpage1sz = header.PageLength * 4;
- pIoc1 = pci_alloc_consistent(ioc->pcidev, iocpage1sz, &ioc1_dma);
+ pIoc1 = dma_alloc_coherent(&ioc->pcidev->dev, iocpage1sz, &ioc1_dma,
+ GFP_KERNEL);
if (!pIoc1)
return;
@@ -6212,7 +6227,7 @@ mpt_read_ioc_pg_1(MPT_ADAPTER *ioc)
}
}
- pci_free_consistent(ioc->pcidev, iocpage1sz, pIoc1, ioc1_dma);
+ dma_free_coherent(&ioc->pcidev->dev, iocpage1sz, pIoc1, ioc1_dma);
return;
}
@@ -6241,7 +6256,8 @@ mpt_get_manufacturing_pg_0(MPT_ADAPTER *ioc)
goto out;
cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
- pbuf = pci_alloc_consistent(ioc->pcidev, hdr.PageLength * 4, &buf_dma);
+ pbuf = dma_alloc_coherent(&ioc->pcidev->dev, hdr.PageLength * 4,
+ &buf_dma, GFP_KERNEL);
if (!pbuf)
goto out;
@@ -6257,7 +6273,8 @@ mpt_get_manufacturing_pg_0(MPT_ADAPTER *ioc)
out:
if (pbuf)
- pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, pbuf, buf_dma);
+ dma_free_coherent(&ioc->pcidev->dev, hdr.PageLength * 4, pbuf,
+ buf_dma);
}
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index ae433c150b37..03c8fb1795c2 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -1041,14 +1041,15 @@ kbuf_alloc_2_sgl(int bytes, u32 sgdir, int sge_offset, int *frags,
* copying the data in this array into the correct place in the
* request and chain buffers.
*/
- sglbuf = pci_alloc_consistent(ioc->pcidev, MAX_SGL_BYTES, sglbuf_dma);
+ sglbuf = dma_alloc_coherent(&ioc->pcidev->dev, MAX_SGL_BYTES,
+ sglbuf_dma, GFP_KERNEL);
if (sglbuf == NULL)
goto free_and_fail;
if (sgdir & 0x04000000)
- dir = PCI_DMA_TODEVICE;
+ dir = DMA_TO_DEVICE;
else
- dir = PCI_DMA_FROMDEVICE;
+ dir = DMA_FROM_DEVICE;
/* At start:
* sgl = sglbuf = point to beginning of sg buffer
@@ -1062,9 +1063,9 @@ kbuf_alloc_2_sgl(int bytes, u32 sgdir, int sge_offset, int *frags,
while (bytes_allocd < bytes) {
this_alloc = min(alloc_sz, bytes-bytes_allocd);
buflist[buflist_ent].len = this_alloc;
- buflist[buflist_ent].kptr = pci_alloc_consistent(ioc->pcidev,
- this_alloc,
- &pa);
+ buflist[buflist_ent].kptr = dma_alloc_coherent(&ioc->pcidev->dev,
+ this_alloc,
+ &pa, GFP_KERNEL);
if (buflist[buflist_ent].kptr == NULL) {
alloc_sz = alloc_sz / 2;
if (alloc_sz == 0) {
@@ -1080,8 +1081,9 @@ kbuf_alloc_2_sgl(int bytes, u32 sgdir, int sge_offset, int *frags,
bytes_allocd += this_alloc;
sgl->FlagsLength = (0x10000000|sgdir|this_alloc);
- dma_addr = pci_map_single(ioc->pcidev,
- buflist[buflist_ent].kptr, this_alloc, dir);
+ dma_addr = dma_map_single(&ioc->pcidev->dev,
+ buflist[buflist_ent].kptr,
+ this_alloc, dir);
sgl->Address = dma_addr;
fragcnt++;
@@ -1140,9 +1142,11 @@ free_and_fail:
kptr = buflist[i].kptr;
len = buflist[i].len;
- pci_free_consistent(ioc->pcidev, len, kptr, dma_addr);
+ dma_free_coherent(&ioc->pcidev->dev, len, kptr,
+ dma_addr);
}
- pci_free_consistent(ioc->pcidev, MAX_SGL_BYTES, sglbuf, *sglbuf_dma);
+ dma_free_coherent(&ioc->pcidev->dev, MAX_SGL_BYTES, sglbuf,
+ *sglbuf_dma);
}
kfree(buflist);
return NULL;
@@ -1162,9 +1166,9 @@ kfree_sgl(MptSge_t *sgl, dma_addr_t sgl_dma, struct buflist *buflist, MPT_ADAPTE
int n = 0;
if (sg->FlagsLength & 0x04000000)
- dir = PCI_DMA_TODEVICE;
+ dir = DMA_TO_DEVICE;
else
- dir = PCI_DMA_FROMDEVICE;
+ dir = DMA_FROM_DEVICE;
nib = (sg->FlagsLength & 0xF0000000) >> 28;
while (! (nib & 0x4)) { /* eob */
@@ -1179,8 +1183,10 @@ kfree_sgl(MptSge_t *sgl, dma_addr_t sgl_dma, struct buflist *buflist, MPT_ADAPTE
dma_addr = sg->Address;
kptr = bl->kptr;
len = bl->len;
- pci_unmap_single(ioc->pcidev, dma_addr, len, dir);
- pci_free_consistent(ioc->pcidev, len, kptr, dma_addr);
+ dma_unmap_single(&ioc->pcidev->dev, dma_addr, len,
+ dir);
+ dma_free_coherent(&ioc->pcidev->dev, len, kptr,
+ dma_addr);
n++;
}
sg++;
@@ -1197,12 +1203,12 @@ kfree_sgl(MptSge_t *sgl, dma_addr_t sgl_dma, struct buflist *buflist, MPT_ADAPTE
dma_addr = sg->Address;
kptr = bl->kptr;
len = bl->len;
- pci_unmap_single(ioc->pcidev, dma_addr, len, dir);
- pci_free_consistent(ioc->pcidev, len, kptr, dma_addr);
+ dma_unmap_single(&ioc->pcidev->dev, dma_addr, len, dir);
+ dma_free_coherent(&ioc->pcidev->dev, len, kptr, dma_addr);
n++;
}
- pci_free_consistent(ioc->pcidev, MAX_SGL_BYTES, sgl, sgl_dma);
+ dma_free_coherent(&ioc->pcidev->dev, MAX_SGL_BYTES, sgl, sgl_dma);
kfree(buflist);
dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "-SG: Free'd 1 SGL buf + %d kbufs!\n",
ioc->name, n));
@@ -2100,8 +2106,9 @@ mptctl_do_mpt_command (MPT_ADAPTER *ioc, struct mpt_ioctl_command karg, void __u
}
flagsLength |= karg.dataOutSize;
bufOut.len = karg.dataOutSize;
- bufOut.kptr = pci_alloc_consistent(
- ioc->pcidev, bufOut.len, &dma_addr_out);
+ bufOut.kptr = dma_alloc_coherent(&ioc->pcidev->dev,
+ bufOut.len,
+ &dma_addr_out, GFP_KERNEL);
if (bufOut.kptr == NULL) {
rc = -ENOMEM;
@@ -2134,8 +2141,9 @@ mptctl_do_mpt_command (MPT_ADAPTER *ioc, struct mpt_ioctl_command karg, void __u
flagsLength |= karg.dataInSize;
bufIn.len = karg.dataInSize;
- bufIn.kptr = pci_alloc_consistent(ioc->pcidev,
- bufIn.len, &dma_addr_in);
+ bufIn.kptr = dma_alloc_coherent(&ioc->pcidev->dev,
+ bufIn.len,
+ &dma_addr_in, GFP_KERNEL);
if (bufIn.kptr == NULL) {
rc = -ENOMEM;
@@ -2283,13 +2291,13 @@ done_free_mem:
/* Free the allocated memory.
*/
if (bufOut.kptr != NULL) {
- pci_free_consistent(ioc->pcidev,
- bufOut.len, (void *) bufOut.kptr, dma_addr_out);
+ dma_free_coherent(&ioc->pcidev->dev, bufOut.len,
+ (void *)bufOut.kptr, dma_addr_out);
}
if (bufIn.kptr != NULL) {
- pci_free_consistent(ioc->pcidev,
- bufIn.len, (void *) bufIn.kptr, dma_addr_in);
+ dma_free_coherent(&ioc->pcidev->dev, bufIn.len,
+ (void *)bufIn.kptr, dma_addr_in);
}
/* mf is null if command issued successfully
@@ -2395,7 +2403,9 @@ mptctl_hp_hostinfo(MPT_ADAPTER *ioc, unsigned long arg, unsigned int data_size)
/* Issue the second config page request */
cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
- pbuf = pci_alloc_consistent(ioc->pcidev, hdr.PageLength * 4, &buf_dma);
+ pbuf = dma_alloc_coherent(&ioc->pcidev->dev,
+ hdr.PageLength * 4,
+ &buf_dma, GFP_KERNEL);
if (pbuf) {
cfg.physAddr = buf_dma;
if (mpt_config(ioc, &cfg) == 0) {
@@ -2405,7 +2415,9 @@ mptctl_hp_hostinfo(MPT_ADAPTER *ioc, unsigned long arg, unsigned int data_size)
pdata->BoardTracerNumber, 24);
}
}
- pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, pbuf, buf_dma);
+ dma_free_coherent(&ioc->pcidev->dev,
+ hdr.PageLength * 4, pbuf,
+ buf_dma);
pbuf = NULL;
}
}
@@ -2470,7 +2482,7 @@ mptctl_hp_hostinfo(MPT_ADAPTER *ioc, unsigned long arg, unsigned int data_size)
else
IstwiRWRequest->DeviceAddr = 0xB0;
- pbuf = pci_alloc_consistent(ioc->pcidev, 4, &buf_dma);
+ pbuf = dma_alloc_coherent(&ioc->pcidev->dev, 4, &buf_dma, GFP_KERNEL);
if (!pbuf)
goto out;
ioc->add_sge((char *)&IstwiRWRequest->SGL,
@@ -2519,7 +2531,7 @@ retry_wait:
SET_MGMT_MSG_CONTEXT(ioc->ioctl_cmds.msg_context, 0);
if (pbuf)
- pci_free_consistent(ioc->pcidev, 4, pbuf, buf_dma);
+ dma_free_coherent(&ioc->pcidev->dev, 4, pbuf, buf_dma);
/* Copy the data from kernel memory to user memory
*/
@@ -2585,7 +2597,8 @@ mptctl_hp_targetinfo(MPT_ADAPTER *ioc, unsigned long arg)
/* Get the data transfer speeds
*/
data_sz = ioc->spi_data.sdp0length * 4;
- pg0_alloc = pci_alloc_consistent(ioc->pcidev, data_sz, &page_dma);
+ pg0_alloc = dma_alloc_coherent(&ioc->pcidev->dev, data_sz, &page_dma,
+ GFP_KERNEL);
if (pg0_alloc) {
hdr.PageVersion = ioc->spi_data.sdp0version;
hdr.PageLength = data_sz;
@@ -2623,7 +2636,8 @@ mptctl_hp_targetinfo(MPT_ADAPTER *ioc, unsigned long arg)
karg.negotiated_speed = HP_DEV_SPEED_ASYNC;
}
- pci_free_consistent(ioc->pcidev, data_sz, (u8 *) pg0_alloc, page_dma);
+ dma_free_coherent(&ioc->pcidev->dev, data_sz, (u8 *)pg0_alloc,
+ page_dma);
}
/* Set defaults
@@ -2649,7 +2663,8 @@ mptctl_hp_targetinfo(MPT_ADAPTER *ioc, unsigned long arg)
/* Issue the second config page request */
cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
data_sz = (int) cfg.cfghdr.hdr->PageLength * 4;
- pg3_alloc = pci_alloc_consistent(ioc->pcidev, data_sz, &page_dma);
+ pg3_alloc = dma_alloc_coherent(&ioc->pcidev->dev, data_sz,
+ &page_dma, GFP_KERNEL);
if (pg3_alloc) {
cfg.physAddr = page_dma;
cfg.pageAddr = (karg.hdr.channel << 8) | karg.hdr.id;
@@ -2658,7 +2673,8 @@ mptctl_hp_targetinfo(MPT_ADAPTER *ioc, unsigned long arg)
karg.phase_errors = (u32) le16_to_cpu(pg3_alloc->PhaseErrorCount);
karg.parity_errors = (u32) le16_to_cpu(pg3_alloc->ParityErrorCount);
}
- pci_free_consistent(ioc->pcidev, data_sz, (u8 *) pg3_alloc, page_dma);
+ dma_free_coherent(&ioc->pcidev->dev, data_sz,
+ (u8 *)pg3_alloc, page_dma);
}
}
hd = shost_priv(ioc->sh);
diff --git a/drivers/message/fusion/mptlan.c b/drivers/message/fusion/mptlan.c
index 117fa4ebf6d7..142eb5d5d9df 100644
--- a/drivers/message/fusion/mptlan.c
+++ b/drivers/message/fusion/mptlan.c
@@ -516,9 +516,9 @@ mpt_lan_close(struct net_device *dev)
if (priv->RcvCtl[i].skb != NULL) {
/**/ dlprintk((KERN_INFO MYNAM "/lan_close: bucket %05x "
/**/ "is still out\n", i));
- pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[i].dma,
- priv->RcvCtl[i].len,
- PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&mpt_dev->pcidev->dev,
+ priv->RcvCtl[i].dma,
+ priv->RcvCtl[i].len, DMA_FROM_DEVICE);
dev_kfree_skb(priv->RcvCtl[i].skb);
}
}
@@ -528,9 +528,9 @@ mpt_lan_close(struct net_device *dev)
for (i = 0; i < priv->tx_max_out; i++) {
if (priv->SendCtl[i].skb != NULL) {
- pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[i].dma,
- priv->SendCtl[i].len,
- PCI_DMA_TODEVICE);
+ dma_unmap_single(&mpt_dev->pcidev->dev,
+ priv->SendCtl[i].dma,
+ priv->SendCtl[i].len, DMA_TO_DEVICE);
dev_kfree_skb(priv->SendCtl[i].skb);
}
}
@@ -582,8 +582,8 @@ mpt_lan_send_turbo(struct net_device *dev, u32 tmsg)
__func__, sent));
priv->SendCtl[ctx].skb = NULL;
- pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[ctx].dma,
- priv->SendCtl[ctx].len, PCI_DMA_TODEVICE);
+ dma_unmap_single(&mpt_dev->pcidev->dev, priv->SendCtl[ctx].dma,
+ priv->SendCtl[ctx].len, DMA_TO_DEVICE);
dev_kfree_skb_irq(sent);
spin_lock_irqsave(&priv->txfidx_lock, flags);
@@ -648,8 +648,9 @@ mpt_lan_send_reply(struct net_device *dev, LANSendReply_t *pSendRep)
__func__, sent));
priv->SendCtl[ctx].skb = NULL;
- pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[ctx].dma,
- priv->SendCtl[ctx].len, PCI_DMA_TODEVICE);
+ dma_unmap_single(&mpt_dev->pcidev->dev,
+ priv->SendCtl[ctx].dma,
+ priv->SendCtl[ctx].len, DMA_TO_DEVICE);
dev_kfree_skb_irq(sent);
priv->mpt_txfidx[++priv->mpt_txfidx_tail] = ctx;
@@ -720,8 +721,8 @@ mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev)
skb_reset_mac_header(skb);
skb_pull(skb, 12);
- dma = pci_map_single(mpt_dev->pcidev, skb->data, skb->len,
- PCI_DMA_TODEVICE);
+ dma = dma_map_single(&mpt_dev->pcidev->dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
priv->SendCtl[ctx].skb = skb;
priv->SendCtl[ctx].dma = dma;
@@ -868,13 +869,17 @@ mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg)
return -ENOMEM;
}
- pci_dma_sync_single_for_cpu(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
- priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(&mpt_dev->pcidev->dev,
+ priv->RcvCtl[ctx].dma,
+ priv->RcvCtl[ctx].len,
+ DMA_FROM_DEVICE);
skb_copy_from_linear_data(old_skb, skb_put(skb, len), len);
- pci_dma_sync_single_for_device(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
- priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_device(&mpt_dev->pcidev->dev,
+ priv->RcvCtl[ctx].dma,
+ priv->RcvCtl[ctx].len,
+ DMA_FROM_DEVICE);
goto out;
}
@@ -882,8 +887,8 @@ mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg)
priv->RcvCtl[ctx].skb = NULL;
- pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
- priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&mpt_dev->pcidev->dev, priv->RcvCtl[ctx].dma,
+ priv->RcvCtl[ctx].len, DMA_FROM_DEVICE);
out:
spin_lock_irqsave(&priv->rxfidx_lock, flags);
@@ -927,8 +932,8 @@ mpt_lan_receive_post_free(struct net_device *dev,
// dlprintk((KERN_INFO MYNAM "@rpr[2] TC + 3\n"));
priv->RcvCtl[ctx].skb = NULL;
- pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
- priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&mpt_dev->pcidev->dev, priv->RcvCtl[ctx].dma,
+ priv->RcvCtl[ctx].len, DMA_FROM_DEVICE);
dev_kfree_skb_any(skb);
priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
@@ -1028,16 +1033,16 @@ mpt_lan_receive_post_reply(struct net_device *dev,
// IOC_AND_NETDEV_NAMES_s_s(dev),
// i, l));
- pci_dma_sync_single_for_cpu(mpt_dev->pcidev,
- priv->RcvCtl[ctx].dma,
- priv->RcvCtl[ctx].len,
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(&mpt_dev->pcidev->dev,
+ priv->RcvCtl[ctx].dma,
+ priv->RcvCtl[ctx].len,
+ DMA_FROM_DEVICE);
skb_copy_from_linear_data(old_skb, skb_put(skb, l), l);
- pci_dma_sync_single_for_device(mpt_dev->pcidev,
- priv->RcvCtl[ctx].dma,
- priv->RcvCtl[ctx].len,
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_device(&mpt_dev->pcidev->dev,
+ priv->RcvCtl[ctx].dma,
+ priv->RcvCtl[ctx].len,
+ DMA_FROM_DEVICE);
priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
szrem -= l;
@@ -1056,17 +1061,17 @@ mpt_lan_receive_post_reply(struct net_device *dev,
return -ENOMEM;
}
- pci_dma_sync_single_for_cpu(mpt_dev->pcidev,
- priv->RcvCtl[ctx].dma,
- priv->RcvCtl[ctx].len,
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(&mpt_dev->pcidev->dev,
+ priv->RcvCtl[ctx].dma,
+ priv->RcvCtl[ctx].len,
+ DMA_FROM_DEVICE);
skb_copy_from_linear_data(old_skb, skb_put(skb, len), len);
- pci_dma_sync_single_for_device(mpt_dev->pcidev,
- priv->RcvCtl[ctx].dma,
- priv->RcvCtl[ctx].len,
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_device(&mpt_dev->pcidev->dev,
+ priv->RcvCtl[ctx].dma,
+ priv->RcvCtl[ctx].len,
+ DMA_FROM_DEVICE);
spin_lock_irqsave(&priv->rxfidx_lock, flags);
priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
@@ -1077,8 +1082,8 @@ mpt_lan_receive_post_reply(struct net_device *dev,
priv->RcvCtl[ctx].skb = NULL;
- pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
- priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&mpt_dev->pcidev->dev, priv->RcvCtl[ctx].dma,
+ priv->RcvCtl[ctx].len, DMA_FROM_DEVICE);
priv->RcvCtl[ctx].dma = 0;
priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
@@ -1199,10 +1204,10 @@ mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv)
skb = priv->RcvCtl[ctx].skb;
if (skb && (priv->RcvCtl[ctx].len != len)) {
- pci_unmap_single(mpt_dev->pcidev,
+ dma_unmap_single(&mpt_dev->pcidev->dev,
priv->RcvCtl[ctx].dma,
priv->RcvCtl[ctx].len,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
dev_kfree_skb(priv->RcvCtl[ctx].skb);
skb = priv->RcvCtl[ctx].skb = NULL;
}
@@ -1218,8 +1223,9 @@ mpt_lan_post_receive_buckets(struct mpt_lan_priv *priv)
break;
}
- dma = pci_map_single(mpt_dev->pcidev, skb->data,
- len, PCI_DMA_FROMDEVICE);
+ dma = dma_map_single(&mpt_dev->pcidev->dev,
+ skb->data, len,
+ DMA_FROM_DEVICE);
priv->RcvCtl[ctx].skb = skb;
priv->RcvCtl[ctx].dma = dma;
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index 091b45024d34..4acd8f9a48e1 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -702,8 +702,8 @@ mptsas_add_device_component_starget_ir(MPT_ADAPTER *ioc,
if (!hdr.PageLength)
goto out;
- buffer = pci_alloc_consistent(ioc->pcidev, hdr.PageLength * 4,
- &dma_handle);
+ buffer = dma_alloc_coherent(&ioc->pcidev->dev, hdr.PageLength * 4,
+ &dma_handle, GFP_KERNEL);
if (!buffer)
goto out;
@@ -769,8 +769,8 @@ mptsas_add_device_component_starget_ir(MPT_ADAPTER *ioc,
out:
if (buffer)
- pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, buffer,
- dma_handle);
+ dma_free_coherent(&ioc->pcidev->dev, hdr.PageLength * 4,
+ buffer, dma_handle);
}
/**
@@ -1399,8 +1399,8 @@ mptsas_sas_enclosure_pg0(MPT_ADAPTER *ioc, struct mptsas_enclosure *enclosure,
goto out;
}
- buffer = pci_alloc_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
- &dma_handle);
+ buffer = dma_alloc_coherent(&ioc->pcidev->dev, hdr.ExtPageLength * 4,
+ &dma_handle, GFP_KERNEL);
if (!buffer) {
error = -ENOMEM;
goto out;
@@ -1426,8 +1426,8 @@ mptsas_sas_enclosure_pg0(MPT_ADAPTER *ioc, struct mptsas_enclosure *enclosure,
enclosure->sep_channel = buffer->SEPBus;
out_free_consistent:
- pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
- buffer, dma_handle);
+ dma_free_coherent(&ioc->pcidev->dev, hdr.ExtPageLength * 4, buffer,
+ dma_handle);
out:
return error;
}
@@ -2058,8 +2058,8 @@ static int mptsas_get_linkerrors(struct sas_phy *phy)
if (!hdr.ExtPageLength)
return -ENXIO;
- buffer = pci_alloc_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
- &dma_handle);
+ buffer = dma_alloc_coherent(&ioc->pcidev->dev, hdr.ExtPageLength * 4,
+ &dma_handle, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
@@ -2081,8 +2081,8 @@ static int mptsas_get_linkerrors(struct sas_phy *phy)
le32_to_cpu(buffer->PhyResetProblemCount);
out_free_consistent:
- pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
- buffer, dma_handle);
+ dma_free_coherent(&ioc->pcidev->dev, hdr.ExtPageLength * 4, buffer,
+ dma_handle);
return error;
}
@@ -2301,7 +2301,7 @@ static void mptsas_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
<< MPI_SGE_FLAGS_SHIFT;
if (!dma_map_sg(&ioc->pcidev->dev, job->request_payload.sg_list,
- 1, PCI_DMA_BIDIRECTIONAL))
+ 1, DMA_BIDIRECTIONAL))
goto put_mf;
flagsLength |= (sg_dma_len(job->request_payload.sg_list) - 4);
@@ -2318,7 +2318,7 @@ static void mptsas_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
flagsLength = flagsLength << MPI_SGE_FLAGS_SHIFT;
if (!dma_map_sg(&ioc->pcidev->dev, job->reply_payload.sg_list,
- 1, PCI_DMA_BIDIRECTIONAL))
+ 1, DMA_BIDIRECTIONAL))
goto unmap_out;
flagsLength |= sg_dma_len(job->reply_payload.sg_list) + 4;
ioc->add_sge(psge, flagsLength,
@@ -2356,10 +2356,10 @@ static void mptsas_smp_handler(struct bsg_job *job, struct Scsi_Host *shost,
unmap_in:
dma_unmap_sg(&ioc->pcidev->dev, job->reply_payload.sg_list, 1,
- PCI_DMA_BIDIRECTIONAL);
+ DMA_BIDIRECTIONAL);
unmap_out:
dma_unmap_sg(&ioc->pcidev->dev, job->request_payload.sg_list, 1,
- PCI_DMA_BIDIRECTIONAL);
+ DMA_BIDIRECTIONAL);
put_mf:
if (mf)
mpt_free_msg_frame(ioc, mf);
@@ -2412,8 +2412,8 @@ mptsas_sas_io_unit_pg0(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info)
goto out;
}
- buffer = pci_alloc_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
- &dma_handle);
+ buffer = dma_alloc_coherent(&ioc->pcidev->dev, hdr.ExtPageLength * 4,
+ &dma_handle, GFP_KERNEL);
if (!buffer) {
error = -ENOMEM;
goto out;
@@ -2452,8 +2452,8 @@ mptsas_sas_io_unit_pg0(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info)
}
out_free_consistent:
- pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
- buffer, dma_handle);
+ dma_free_coherent(&ioc->pcidev->dev, hdr.ExtPageLength * 4, buffer,
+ dma_handle);
out:
return error;
}
@@ -2487,8 +2487,8 @@ mptsas_sas_io_unit_pg1(MPT_ADAPTER *ioc)
goto out;
}
- buffer = pci_alloc_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
- &dma_handle);
+ buffer = dma_alloc_coherent(&ioc->pcidev->dev, hdr.ExtPageLength * 4,
+ &dma_handle, GFP_KERNEL);
if (!buffer) {
error = -ENOMEM;
goto out;
@@ -2509,8 +2509,8 @@ mptsas_sas_io_unit_pg1(MPT_ADAPTER *ioc)
device_missing_delay & MPI_SAS_IOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
out_free_consistent:
- pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
- buffer, dma_handle);
+ dma_free_coherent(&ioc->pcidev->dev, hdr.ExtPageLength * 4, buffer,
+ dma_handle);
out:
return error;
}
@@ -2551,8 +2551,8 @@ mptsas_sas_phy_pg0(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info,
goto out;
}
- buffer = pci_alloc_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
- &dma_handle);
+ buffer = dma_alloc_coherent(&ioc->pcidev->dev, hdr.ExtPageLength * 4,
+ &dma_handle, GFP_KERNEL);
if (!buffer) {
error = -ENOMEM;
goto out;
@@ -2573,8 +2573,8 @@ mptsas_sas_phy_pg0(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info,
phy_info->attached.handle = le16_to_cpu(buffer->AttachedDevHandle);
out_free_consistent:
- pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
- buffer, dma_handle);
+ dma_free_coherent(&ioc->pcidev->dev, hdr.ExtPageLength * 4, buffer,
+ dma_handle);
out:
return error;
}
@@ -2614,8 +2614,8 @@ mptsas_sas_device_pg0(MPT_ADAPTER *ioc, struct mptsas_devinfo *device_info,
goto out;
}
- buffer = pci_alloc_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
- &dma_handle);
+ buffer = dma_alloc_coherent(&ioc->pcidev->dev, hdr.ExtPageLength * 4,
+ &dma_handle, GFP_KERNEL);
if (!buffer) {
error = -ENOMEM;
goto out;
@@ -2654,8 +2654,8 @@ mptsas_sas_device_pg0(MPT_ADAPTER *ioc, struct mptsas_devinfo *device_info,
device_info->flags = le16_to_cpu(buffer->Flags);
out_free_consistent:
- pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
- buffer, dma_handle);
+ dma_free_coherent(&ioc->pcidev->dev, hdr.ExtPageLength * 4, buffer,
+ dma_handle);
out:
return error;
}
@@ -2697,8 +2697,8 @@ mptsas_sas_expander_pg0(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info,
goto out;
}
- buffer = pci_alloc_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
- &dma_handle);
+ buffer = dma_alloc_coherent(&ioc->pcidev->dev, hdr.ExtPageLength * 4,
+ &dma_handle, GFP_KERNEL);
if (!buffer) {
error = -ENOMEM;
goto out;
@@ -2737,8 +2737,8 @@ mptsas_sas_expander_pg0(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info,
}
out_free_consistent:
- pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
- buffer, dma_handle);
+ dma_free_coherent(&ioc->pcidev->dev, hdr.ExtPageLength * 4, buffer,
+ dma_handle);
out:
return error;
}
@@ -2777,8 +2777,8 @@ mptsas_sas_expander_pg1(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info,
goto out;
}
- buffer = pci_alloc_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
- &dma_handle);
+ buffer = dma_alloc_coherent(&ioc->pcidev->dev, hdr.ExtPageLength * 4,
+ &dma_handle, GFP_KERNEL);
if (!buffer) {
error = -ENOMEM;
goto out;
@@ -2810,8 +2810,8 @@ mptsas_sas_expander_pg1(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info,
phy_info->attached.handle = le16_to_cpu(buffer->AttachedDevHandle);
out_free_consistent:
- pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
- buffer, dma_handle);
+ dma_free_coherent(&ioc->pcidev->dev, hdr.ExtPageLength * 4, buffer,
+ dma_handle);
out:
return error;
}
@@ -2896,7 +2896,8 @@ mptsas_exp_repmanufacture_info(MPT_ADAPTER *ioc,
sz = sizeof(struct rep_manu_request) + sizeof(struct rep_manu_reply);
- data_out = pci_alloc_consistent(ioc->pcidev, sz, &data_out_dma);
+ data_out = dma_alloc_coherent(&ioc->pcidev->dev, sz, &data_out_dma,
+ GFP_KERNEL);
if (!data_out) {
printk(KERN_ERR "Memory allocation failure at %s:%d/%s()!\n",
__FILE__, __LINE__, __func__);
@@ -2987,7 +2988,8 @@ mptsas_exp_repmanufacture_info(MPT_ADAPTER *ioc,
}
out_free:
if (data_out_dma)
- pci_free_consistent(ioc->pcidev, sz, data_out, data_out_dma);
+ dma_free_coherent(&ioc->pcidev->dev, sz, data_out,
+ data_out_dma);
put_mf:
if (mf)
mpt_free_msg_frame(ioc, mf);
@@ -4271,8 +4273,8 @@ mptsas_adding_inactive_raid_components(MPT_ADAPTER *ioc, u8 channel, u8 id)
if (!hdr.PageLength)
goto out;
- buffer = pci_alloc_consistent(ioc->pcidev, hdr.PageLength * 4,
- &dma_handle);
+ buffer = dma_alloc_coherent(&ioc->pcidev->dev, hdr.PageLength * 4,
+ &dma_handle, GFP_KERNEL);
if (!buffer)
goto out;
@@ -4318,8 +4320,8 @@ mptsas_adding_inactive_raid_components(MPT_ADAPTER *ioc, u8 channel, u8 id)
out:
if (buffer)
- pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, buffer,
- dma_handle);
+ dma_free_coherent(&ioc->pcidev->dev, hdr.PageLength * 4,
+ buffer, dma_handle);
}
/*
* Work queue thread to handle SAS hotplug events
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index a21cbdf89477..ba0b3eb131f1 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -696,16 +696,6 @@ config MFD_INTEL_PMC_BXT
Register and P-unit access. In addition this creates devices
for iTCO watchdog and telemetry that are part of the PMC.
-config MFD_INTEL_PMT
- tristate "Intel Platform Monitoring Technology (PMT) support"
- depends on X86 && PCI
- select MFD_CORE
- help
- The Intel Platform Monitoring Technology (PMT) is an interface that
- provides access to hardware monitor registers. This driver supports
- Telemetry, Watcher, and Crashlog PMT capabilities/devices for
- platforms starting from Tiger Lake.
-
config MFD_IPAQ_MICRO
bool "Atmel Micro ASIC (iPAQ h3100/h3600/h3700) Support"
depends on SA1100_H3100 || SA1100_H3600
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 4d53e951a92d..df1ecc4a4c95 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -211,7 +211,6 @@ obj-$(CONFIG_MFD_INTEL_LPSS) += intel-lpss.o
obj-$(CONFIG_MFD_INTEL_LPSS_PCI) += intel-lpss-pci.o
obj-$(CONFIG_MFD_INTEL_LPSS_ACPI) += intel-lpss-acpi.o
obj-$(CONFIG_MFD_INTEL_PMC_BXT) += intel_pmc_bxt.o
-obj-$(CONFIG_MFD_INTEL_PMT) += intel_pmt.o
obj-$(CONFIG_MFD_PALMAS) += palmas.o
obj-$(CONFIG_MFD_VIPERBOARD) += viperboard.o
obj-$(CONFIG_MFD_NTXEC) += ntxec.o
diff --git a/drivers/mfd/intel_pmt.c b/drivers/mfd/intel_pmt.c
deleted file mode 100644
index dd7eb614c28e..000000000000
--- a/drivers/mfd/intel_pmt.c
+++ /dev/null
@@ -1,261 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Intel Platform Monitoring Technology PMT driver
- *
- * Copyright (c) 2020, Intel Corporation.
- * All Rights Reserved.
- *
- * Author: David E. Box <david.e.box@linux.intel.com>
- */
-
-#include <linux/bits.h>
-#include <linux/kernel.h>
-#include <linux/mfd/core.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/platform_device.h>
-#include <linux/pm.h>
-#include <linux/pm_runtime.h>
-#include <linux/types.h>
-
-/* Intel DVSEC capability vendor space offsets */
-#define INTEL_DVSEC_ENTRIES 0xA
-#define INTEL_DVSEC_SIZE 0xB
-#define INTEL_DVSEC_TABLE 0xC
-#define INTEL_DVSEC_TABLE_BAR(x) ((x) & GENMASK(2, 0))
-#define INTEL_DVSEC_TABLE_OFFSET(x) ((x) & GENMASK(31, 3))
-#define INTEL_DVSEC_ENTRY_SIZE 4
-
-/* PMT capabilities */
-#define DVSEC_INTEL_ID_TELEMETRY 2
-#define DVSEC_INTEL_ID_WATCHER 3
-#define DVSEC_INTEL_ID_CRASHLOG 4
-
-struct intel_dvsec_header {
- u16 length;
- u16 id;
- u8 num_entries;
- u8 entry_size;
- u8 tbir;
- u32 offset;
-};
-
-enum pmt_quirks {
- /* Watcher capability not supported */
- PMT_QUIRK_NO_WATCHER = BIT(0),
-
- /* Crashlog capability not supported */
- PMT_QUIRK_NO_CRASHLOG = BIT(1),
-
- /* Use shift instead of mask to read discovery table offset */
- PMT_QUIRK_TABLE_SHIFT = BIT(2),
-
- /* DVSEC not present (provided in driver data) */
- PMT_QUIRK_NO_DVSEC = BIT(3),
-};
-
-struct pmt_platform_info {
- unsigned long quirks;
- struct intel_dvsec_header **capabilities;
-};
-
-static const struct pmt_platform_info tgl_info = {
- .quirks = PMT_QUIRK_NO_WATCHER | PMT_QUIRK_NO_CRASHLOG |
- PMT_QUIRK_TABLE_SHIFT,
-};
-
-/* DG1 Platform with DVSEC quirk*/
-static struct intel_dvsec_header dg1_telemetry = {
- .length = 0x10,
- .id = 2,
- .num_entries = 1,
- .entry_size = 3,
- .tbir = 0,
- .offset = 0x466000,
-};
-
-static struct intel_dvsec_header *dg1_capabilities[] = {
- &dg1_telemetry,
- NULL
-};
-
-static const struct pmt_platform_info dg1_info = {
- .quirks = PMT_QUIRK_NO_DVSEC,
- .capabilities = dg1_capabilities,
-};
-
-static int pmt_add_dev(struct pci_dev *pdev, struct intel_dvsec_header *header,
- unsigned long quirks)
-{
- struct device *dev = &pdev->dev;
- struct resource *res, *tmp;
- struct mfd_cell *cell;
- const char *name;
- int count = header->num_entries;
- int size = header->entry_size;
- int id = header->id;
- int i;
-
- switch (id) {
- case DVSEC_INTEL_ID_TELEMETRY:
- name = "pmt_telemetry";
- break;
- case DVSEC_INTEL_ID_WATCHER:
- if (quirks & PMT_QUIRK_NO_WATCHER) {
- dev_info(dev, "Watcher not supported\n");
- return -EINVAL;
- }
- name = "pmt_watcher";
- break;
- case DVSEC_INTEL_ID_CRASHLOG:
- if (quirks & PMT_QUIRK_NO_CRASHLOG) {
- dev_info(dev, "Crashlog not supported\n");
- return -EINVAL;
- }
- name = "pmt_crashlog";
- break;
- default:
- return -EINVAL;
- }
-
- if (!header->num_entries || !header->entry_size) {
- dev_err(dev, "Invalid count or size for %s header\n", name);
- return -EINVAL;
- }
-
- cell = devm_kzalloc(dev, sizeof(*cell), GFP_KERNEL);
- if (!cell)
- return -ENOMEM;
-
- res = devm_kcalloc(dev, count, sizeof(*res), GFP_KERNEL);
- if (!res)
- return -ENOMEM;
-
- if (quirks & PMT_QUIRK_TABLE_SHIFT)
- header->offset >>= 3;
-
- /*
- * The PMT DVSEC contains the starting offset and count for a block of
- * discovery tables, each providing access to monitoring facilities for
- * a section of the device. Create a resource list of these tables to
- * provide to the driver.
- */
- for (i = 0, tmp = res; i < count; i++, tmp++) {
- tmp->start = pdev->resource[header->tbir].start +
- header->offset + i * (size << 2);
- tmp->end = tmp->start + (size << 2) - 1;
- tmp->flags = IORESOURCE_MEM;
- }
-
- cell->resources = res;
- cell->num_resources = count;
- cell->name = name;
-
- return devm_mfd_add_devices(dev, PLATFORM_DEVID_AUTO, cell, 1, NULL, 0,
- NULL);
-}
-
-static int pmt_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
-{
- struct pmt_platform_info *info;
- unsigned long quirks = 0;
- bool found_devices = false;
- int ret, pos = 0;
-
- ret = pcim_enable_device(pdev);
- if (ret)
- return ret;
-
- info = (struct pmt_platform_info *)id->driver_data;
-
- if (info)
- quirks = info->quirks;
-
- if (info && (info->quirks & PMT_QUIRK_NO_DVSEC)) {
- struct intel_dvsec_header **header;
-
- header = info->capabilities;
- while (*header) {
- ret = pmt_add_dev(pdev, *header, quirks);
- if (ret)
- dev_warn(&pdev->dev,
- "Failed to add device for DVSEC id %d\n",
- (*header)->id);
- else
- found_devices = true;
-
- ++header;
- }
- } else {
- do {
- struct intel_dvsec_header header;
- u32 table;
- u16 vid;
-
- pos = pci_find_next_ext_capability(pdev, pos, PCI_EXT_CAP_ID_DVSEC);
- if (!pos)
- break;
-
- pci_read_config_word(pdev, pos + PCI_DVSEC_HEADER1, &vid);
- if (vid != PCI_VENDOR_ID_INTEL)
- continue;
-
- pci_read_config_word(pdev, pos + PCI_DVSEC_HEADER2,
- &header.id);
- pci_read_config_byte(pdev, pos + INTEL_DVSEC_ENTRIES,
- &header.num_entries);
- pci_read_config_byte(pdev, pos + INTEL_DVSEC_SIZE,
- &header.entry_size);
- pci_read_config_dword(pdev, pos + INTEL_DVSEC_TABLE,
- &table);
-
- header.tbir = INTEL_DVSEC_TABLE_BAR(table);
- header.offset = INTEL_DVSEC_TABLE_OFFSET(table);
-
- ret = pmt_add_dev(pdev, &header, quirks);
- if (ret)
- continue;
-
- found_devices = true;
- } while (true);
- }
-
- if (!found_devices)
- return -ENODEV;
-
- pm_runtime_put(&pdev->dev);
- pm_runtime_allow(&pdev->dev);
-
- return 0;
-}
-
-static void pmt_pci_remove(struct pci_dev *pdev)
-{
- pm_runtime_forbid(&pdev->dev);
- pm_runtime_get_sync(&pdev->dev);
-}
-
-#define PCI_DEVICE_ID_INTEL_PMT_ADL 0x467d
-#define PCI_DEVICE_ID_INTEL_PMT_DG1 0x490e
-#define PCI_DEVICE_ID_INTEL_PMT_OOBMSM 0x09a7
-#define PCI_DEVICE_ID_INTEL_PMT_TGL 0x9a0d
-static const struct pci_device_id pmt_pci_ids[] = {
- { PCI_DEVICE_DATA(INTEL, PMT_ADL, &tgl_info) },
- { PCI_DEVICE_DATA(INTEL, PMT_DG1, &dg1_info) },
- { PCI_DEVICE_DATA(INTEL, PMT_OOBMSM, NULL) },
- { PCI_DEVICE_DATA(INTEL, PMT_TGL, &tgl_info) },
- { }
-};
-MODULE_DEVICE_TABLE(pci, pmt_pci_ids);
-
-static struct pci_driver pmt_pci_driver = {
- .name = "intel-pmt",
- .id_table = pmt_pci_ids,
- .probe = pmt_pci_probe,
- .remove = pmt_pci_remove,
-};
-module_pci_driver(pmt_pci_driver);
-
-MODULE_AUTHOR("David E. Box <david.e.box@linux.intel.com>");
-MODULE_DESCRIPTION("Intel Platform Monitoring Technology PMT driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/cxl/Kconfig b/drivers/misc/cxl/Kconfig
index 51aecafdcbdf..5efc4151bf58 100644
--- a/drivers/misc/cxl/Kconfig
+++ b/drivers/misc/cxl/Kconfig
@@ -6,6 +6,7 @@
config CXL_BASE
bool
select PPC_COPRO_BASE
+ select PPC_64S_HASH_MMU
config CXL
tristate "Support for IBM Coherent Accelerators (CXL)"
diff --git a/drivers/misc/cxl/sysfs.c b/drivers/misc/cxl/sysfs.c
index c173a5e88c91..315c43f17dd3 100644
--- a/drivers/misc/cxl/sysfs.c
+++ b/drivers/misc/cxl/sysfs.c
@@ -570,6 +570,7 @@ static struct attribute *afu_cr_attrs[] = {
&class_attribute.attr,
NULL,
};
+ATTRIBUTE_GROUPS(afu_cr);
static void release_afu_config_record(struct kobject *kobj)
{
@@ -581,7 +582,7 @@ static void release_afu_config_record(struct kobject *kobj)
static struct kobj_type afu_config_record_type = {
.sysfs_ops = &kobj_sysfs_ops,
.release = release_afu_config_record,
- .default_attrs = afu_cr_attrs,
+ .default_groups = afu_cr_groups,
};
static struct afu_config_record *cxl_sysfs_afu_new_cr(struct cxl_afu *afu, int cr_idx)
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
index 49ab656e8a96..633e1cf08d6e 100644
--- a/drivers/misc/eeprom/at24.c
+++ b/drivers/misc/eeprom/at24.c
@@ -68,11 +68,6 @@
* which won't work on pure SMBus systems.
*/
-struct at24_client {
- struct i2c_client *client;
- struct regmap *regmap;
-};
-
struct at24_data {
/*
* Lock protects against activities from other Linux tasks,
@@ -94,9 +89,10 @@ struct at24_data {
/*
* Some chips tie up multiple I2C addresses; dummy devices reserve
- * them for us, and we'll use them with SMBus calls.
+ * them for us.
*/
- struct at24_client client[];
+ u8 bank_addr_shift;
+ struct regmap *client_regmaps[];
};
/*
@@ -123,6 +119,7 @@ MODULE_PARM_DESC(at24_write_timeout, "Time (in ms) to try writes (default 25)");
struct at24_chip_data {
u32 byte_len;
u8 flags;
+ u8 bank_addr_shift;
void (*read_post)(unsigned int off, char *buf, size_t count);
};
@@ -137,6 +134,12 @@ struct at24_chip_data {
.read_post = _read_post, \
}
+#define AT24_CHIP_DATA_BS(_name, _len, _flags, _bank_addr_shift) \
+ static const struct at24_chip_data _name = { \
+ .byte_len = _len, .flags = _flags, \
+ .bank_addr_shift = _bank_addr_shift \
+ }
+
static void at24_read_post_vaio(unsigned int off, char *buf, size_t count)
{
int i;
@@ -197,6 +200,7 @@ AT24_CHIP_DATA(at24_data_24c128, 131072 / 8, AT24_FLAG_ADDR16);
AT24_CHIP_DATA(at24_data_24c256, 262144 / 8, AT24_FLAG_ADDR16);
AT24_CHIP_DATA(at24_data_24c512, 524288 / 8, AT24_FLAG_ADDR16);
AT24_CHIP_DATA(at24_data_24c1024, 1048576 / 8, AT24_FLAG_ADDR16);
+AT24_CHIP_DATA_BS(at24_data_24c1025, 1048576 / 8, AT24_FLAG_ADDR16, 2);
AT24_CHIP_DATA(at24_data_24c2048, 2097152 / 8, AT24_FLAG_ADDR16);
/* identical to 24c08 ? */
AT24_CHIP_DATA(at24_data_INT3499, 8192 / 8, 0);
@@ -225,6 +229,7 @@ static const struct i2c_device_id at24_ids[] = {
{ "24c256", (kernel_ulong_t)&at24_data_24c256 },
{ "24c512", (kernel_ulong_t)&at24_data_24c512 },
{ "24c1024", (kernel_ulong_t)&at24_data_24c1024 },
+ { "24c1025", (kernel_ulong_t)&at24_data_24c1025 },
{ "24c2048", (kernel_ulong_t)&at24_data_24c2048 },
{ "at24", 0 },
{ /* END OF LIST */ }
@@ -254,6 +259,7 @@ static const struct of_device_id at24_of_match[] = {
{ .compatible = "atmel,24c256", .data = &at24_data_24c256 },
{ .compatible = "atmel,24c512", .data = &at24_data_24c512 },
{ .compatible = "atmel,24c1024", .data = &at24_data_24c1024 },
+ { .compatible = "atmel,24c1025", .data = &at24_data_24c1025 },
{ .compatible = "atmel,24c2048", .data = &at24_data_24c2048 },
{ /* END OF LIST */ },
};
@@ -275,8 +281,8 @@ MODULE_DEVICE_TABLE(acpi, at24_acpi_ids);
* set the byte address; on a multi-master board, another master
* may have changed the chip's "current" address pointer.
*/
-static struct at24_client *at24_translate_offset(struct at24_data *at24,
- unsigned int *offset)
+static struct regmap *at24_translate_offset(struct at24_data *at24,
+ unsigned int *offset)
{
unsigned int i;
@@ -288,12 +294,12 @@ static struct at24_client *at24_translate_offset(struct at24_data *at24,
*offset &= 0xff;
}
- return &at24->client[i];
+ return at24->client_regmaps[i];
}
static struct device *at24_base_client_dev(struct at24_data *at24)
{
- return &at24->client[0].client->dev;
+ return regmap_get_device(at24->client_regmaps[0]);
}
static size_t at24_adjust_read_count(struct at24_data *at24,
@@ -324,14 +330,10 @@ static ssize_t at24_regmap_read(struct at24_data *at24, char *buf,
unsigned int offset, size_t count)
{
unsigned long timeout, read_time;
- struct at24_client *at24_client;
- struct i2c_client *client;
struct regmap *regmap;
int ret;
- at24_client = at24_translate_offset(at24, &offset);
- regmap = at24_client->regmap;
- client = at24_client->client;
+ regmap = at24_translate_offset(at24, &offset);
count = at24_adjust_read_count(at24, offset, count);
/* adjust offset for mac and serial read ops */
@@ -346,7 +348,7 @@ static ssize_t at24_regmap_read(struct at24_data *at24, char *buf,
read_time = jiffies;
ret = regmap_bulk_read(regmap, offset, buf, count);
- dev_dbg(&client->dev, "read %zu@%d --> %d (%ld)\n",
+ dev_dbg(regmap_get_device(regmap), "read %zu@%d --> %d (%ld)\n",
count, offset, ret, jiffies);
if (!ret)
return count;
@@ -387,14 +389,10 @@ static ssize_t at24_regmap_write(struct at24_data *at24, const char *buf,
unsigned int offset, size_t count)
{
unsigned long timeout, write_time;
- struct at24_client *at24_client;
- struct i2c_client *client;
struct regmap *regmap;
int ret;
- at24_client = at24_translate_offset(at24, &offset);
- regmap = at24_client->regmap;
- client = at24_client->client;
+ regmap = at24_translate_offset(at24, &offset);
count = at24_adjust_write_count(at24, offset, count);
timeout = jiffies + msecs_to_jiffies(at24_write_timeout);
@@ -406,7 +404,7 @@ static ssize_t at24_regmap_write(struct at24_data *at24, const char *buf,
write_time = jiffies;
ret = regmap_bulk_write(regmap, offset, buf, count);
- dev_dbg(&client->dev, "write %zu@%d --> %d (%ld)\n",
+ dev_dbg(regmap_get_device(regmap), "write %zu@%d --> %d (%ld)\n",
count, offset, ret, jiffies);
if (!ret)
return count;
@@ -538,17 +536,16 @@ static const struct at24_chip_data *at24_get_chip_data(struct device *dev)
}
static int at24_make_dummy_client(struct at24_data *at24, unsigned int index,
+ struct i2c_client *base_client,
struct regmap_config *regmap_config)
{
- struct i2c_client *base_client, *dummy_client;
+ struct i2c_client *dummy_client;
struct regmap *regmap;
- struct device *dev;
-
- base_client = at24->client[0].client;
- dev = &base_client->dev;
- dummy_client = devm_i2c_new_dummy_device(dev, base_client->adapter,
- base_client->addr + index);
+ dummy_client = devm_i2c_new_dummy_device(&base_client->dev,
+ base_client->adapter,
+ base_client->addr +
+ (index << at24->bank_addr_shift));
if (IS_ERR(dummy_client))
return PTR_ERR(dummy_client);
@@ -556,8 +553,7 @@ static int at24_make_dummy_client(struct at24_data *at24, unsigned int index,
if (IS_ERR(regmap))
return PTR_ERR(regmap);
- at24->client[index].client = dummy_client;
- at24->client[index].regmap = regmap;
+ at24->client_regmaps[index] = regmap;
return 0;
}
@@ -680,7 +676,7 @@ static int at24_probe(struct i2c_client *client)
if (IS_ERR(regmap))
return PTR_ERR(regmap);
- at24 = devm_kzalloc(dev, struct_size(at24, client, num_addresses),
+ at24 = devm_kzalloc(dev, struct_size(at24, client_regmaps, num_addresses),
GFP_KERNEL);
if (!at24)
return -ENOMEM;
@@ -690,10 +686,10 @@ static int at24_probe(struct i2c_client *client)
at24->page_size = page_size;
at24->flags = flags;
at24->read_post = cdata->read_post;
+ at24->bank_addr_shift = cdata->bank_addr_shift;
at24->num_addresses = num_addresses;
at24->offset_adj = at24_get_offset_adj(flags, byte_len);
- at24->client[0].client = client;
- at24->client[0].regmap = regmap;
+ at24->client_regmaps[0] = regmap;
at24->vcc_reg = devm_regulator_get(dev, "vcc");
if (IS_ERR(at24->vcc_reg))
@@ -709,7 +705,7 @@ static int at24_probe(struct i2c_client *client)
/* use dummy devices for multiple-address chips */
for (i = 1; i < num_addresses; i++) {
- err = at24_make_dummy_client(at24, i, &regmap_config);
+ err = at24_make_dummy_client(at24, i, client, &regmap_config);
if (err)
return err;
}
diff --git a/drivers/misc/eeprom/at25.c b/drivers/misc/eeprom/at25.c
index b38978a3b3ff..bee727ed98db 100644
--- a/drivers/misc/eeprom/at25.c
+++ b/drivers/misc/eeprom/at25.c
@@ -1,28 +1,27 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * at25.c -- support most SPI EEPROMs, such as Atmel AT25 models
- * and Cypress FRAMs FM25 models
+ * Driver for most of the SPI EEPROMs, such as Atmel AT25 models
+ * and Cypress FRAMs FM25 models.
*
* Copyright (C) 2006 David Brownell
*/
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/slab.h>
+#include <linux/bits.h>
#include <linux/delay.h>
#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/property.h>
#include <linux/sched.h>
+#include <linux/slab.h>
-#include <linux/nvmem-provider.h>
-#include <linux/spi/spi.h>
#include <linux/spi/eeprom.h>
-#include <linux/property.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/math.h>
+#include <linux/spi/spi.h>
+
+#include <linux/nvmem-provider.h>
/*
- * NOTE: this is an *EEPROM* driver. The vagaries of product naming
+ * NOTE: this is an *EEPROM* driver. The vagaries of product naming
* mean that some AT25 products are EEPROMs, and others are FLASH.
* Handle FLASH chips with the drivers/mtd/devices/m25p80.c driver,
* not this one!
@@ -33,9 +32,9 @@
#define FM25_SN_LEN 8 /* serial number length */
struct at25_data {
+ struct spi_eeprom chip;
struct spi_device *spi;
struct mutex lock;
- struct spi_eeprom chip;
unsigned addrlen;
struct nvmem_config nvmem_config;
struct nvmem_device *nvmem;
@@ -58,13 +57,14 @@ struct at25_data {
#define AT25_SR_BP1 0x08
#define AT25_SR_WPEN 0x80 /* writeprotect enable */
-#define AT25_INSTR_BIT3 0x08 /* Additional address bit in instr */
+#define AT25_INSTR_BIT3 0x08 /* additional address bit in instr */
#define FM25_ID_LEN 9 /* ID length */
#define EE_MAXADDRLEN 3 /* 24 bit addresses, up to 2 MBytes */
-/* Specs often allow 5 msec for a page write, sometimes 20 msec;
+/*
+ * Specs often allow 5ms for a page write, sometimes 20ms;
* it's important to recover from write timeouts.
*/
#define EE_TIMEOUT 25
@@ -96,7 +96,7 @@ static int at25_ee_read(void *priv, unsigned int offset,
instr = AT25_READ;
if (at25->chip.flags & EE_INSTR_BIT3_IS_ADDR)
- if (offset >= (1U << (at25->addrlen * 8)))
+ if (offset >= BIT(at25->addrlen * 8))
instr |= AT25_INSTR_BIT3;
*cp++ = instr;
@@ -109,7 +109,7 @@ static int at25_ee_read(void *priv, unsigned int offset,
*cp++ = offset >> 8;
fallthrough;
case 1:
- case 0: /* can't happen: for better codegen */
+ case 0: /* can't happen: for better code generation */
*cp++ = offset >> 0;
}
@@ -126,11 +126,12 @@ static int at25_ee_read(void *priv, unsigned int offset,
mutex_lock(&at25->lock);
- /* Read it all at once.
+ /*
+ * Read it all at once.
*
* REVISIT that's potentially a problem with large chips, if
* other devices on the bus need to be accessed regularly or
- * this chip is clocked very slowly
+ * this chip is clocked very slowly.
*/
status = spi_sync(at25->spi, &m);
dev_dbg(&at25->spi->dev, "read %zu bytes at %d --> %zd\n",
@@ -140,9 +141,7 @@ static int at25_ee_read(void *priv, unsigned int offset,
return status;
}
-/*
- * read extra registers as ID or serial number
- */
+/* Read extra registers as ID or serial number */
static int fm25_aux_read(struct at25_data *at25, u8 *buf, uint8_t command,
int len)
{
@@ -208,7 +207,8 @@ static int at25_ee_write(void *priv, unsigned int off, void *val, size_t count)
if (!bounce)
return -ENOMEM;
- /* For write, rollover is within the page ... so we write at
+ /*
+ * For write, rollover is within the page ... so we write at
* most one page, then manually roll over to the next page.
*/
mutex_lock(&at25->lock);
@@ -229,7 +229,7 @@ static int at25_ee_write(void *priv, unsigned int off, void *val, size_t count)
instr = AT25_WRITE;
if (at25->chip.flags & EE_INSTR_BIT3_IS_ADDR)
- if (offset >= (1U << (at25->addrlen * 8)))
+ if (offset >= BIT(at25->addrlen * 8))
instr |= AT25_INSTR_BIT3;
*cp++ = instr;
@@ -242,7 +242,7 @@ static int at25_ee_write(void *priv, unsigned int off, void *val, size_t count)
*cp++ = offset >> 8;
fallthrough;
case 1:
- case 0: /* can't happen: for better codegen */
+ case 0: /* can't happen: for better code generation */
*cp++ = offset >> 0;
}
@@ -258,8 +258,9 @@ static int at25_ee_write(void *priv, unsigned int off, void *val, size_t count)
if (status < 0)
break;
- /* REVISIT this should detect (or prevent) failed writes
- * to readonly sections of the EEPROM...
+ /*
+ * REVISIT this should detect (or prevent) failed writes
+ * to read-only sections of the EEPROM...
*/
/* Wait for non-busy status */
@@ -306,34 +307,37 @@ static int at25_ee_write(void *priv, unsigned int off, void *val, size_t count)
static int at25_fw_to_chip(struct device *dev, struct spi_eeprom *chip)
{
u32 val;
+ int err;
- memset(chip, 0, sizeof(*chip));
strncpy(chip->name, "at25", sizeof(chip->name));
- if (device_property_read_u32(dev, "size", &val) == 0 ||
- device_property_read_u32(dev, "at25,byte-len", &val) == 0) {
- chip->byte_len = val;
- } else {
+ err = device_property_read_u32(dev, "size", &val);
+ if (err)
+ err = device_property_read_u32(dev, "at25,byte-len", &val);
+ if (err) {
dev_err(dev, "Error: missing \"size\" property\n");
- return -ENODEV;
+ return err;
}
+ chip->byte_len = val;
- if (device_property_read_u32(dev, "pagesize", &val) == 0 ||
- device_property_read_u32(dev, "at25,page-size", &val) == 0) {
- chip->page_size = val;
- } else {
+ err = device_property_read_u32(dev, "pagesize", &val);
+ if (err)
+ err = device_property_read_u32(dev, "at25,page-size", &val);
+ if (err) {
dev_err(dev, "Error: missing \"pagesize\" property\n");
- return -ENODEV;
+ return err;
}
+ chip->page_size = val;
- if (device_property_read_u32(dev, "at25,addr-mode", &val) == 0) {
+ err = device_property_read_u32(dev, "address-width", &val);
+ if (err) {
+ err = device_property_read_u32(dev, "at25,addr-mode", &val);
+ if (err) {
+ dev_err(dev, "Error: missing \"address-width\" property\n");
+ return err;
+ }
chip->flags = (u16)val;
} else {
- if (device_property_read_u32(dev, "address-width", &val)) {
- dev_err(dev,
- "Error: missing \"address-width\" property\n");
- return -ENODEV;
- }
switch (val) {
case 9:
chip->flags |= EE_INSTR_BIT3_IS_ADDR;
@@ -359,16 +363,54 @@ static int at25_fw_to_chip(struct device *dev, struct spi_eeprom *chip)
return 0;
}
+static int at25_fram_to_chip(struct device *dev, struct spi_eeprom *chip)
+{
+ struct at25_data *at25 = container_of(chip, struct at25_data, chip);
+ u8 sernum[FM25_SN_LEN];
+ u8 id[FM25_ID_LEN];
+ int i;
+
+ strncpy(chip->name, "fm25", sizeof(chip->name));
+
+ /* Get ID of chip */
+ fm25_aux_read(at25, id, FM25_RDID, FM25_ID_LEN);
+ if (id[6] != 0xc2) {
+ dev_err(dev, "Error: no Cypress FRAM (id %02x)\n", id[6]);
+ return -ENODEV;
+ }
+ /* Set size found in ID */
+ if (id[7] < 0x21 || id[7] > 0x26) {
+ dev_err(dev, "Error: unsupported size (id %02x)\n", id[7]);
+ return -ENODEV;
+ }
+
+ chip->byte_len = BIT(id[7] - 0x21 + 4) * 1024;
+ if (chip->byte_len > 64 * 1024)
+ chip->flags |= EE_ADDR3;
+ else
+ chip->flags |= EE_ADDR2;
+
+ if (id[8]) {
+ fm25_aux_read(at25, sernum, FM25_RDSN, FM25_SN_LEN);
+ /* Swap byte order */
+ for (i = 0; i < FM25_SN_LEN; i++)
+ at25->sernum[i] = sernum[FM25_SN_LEN - 1 - i];
+ }
+
+ chip->page_size = PAGE_SIZE;
+ return 0;
+}
+
static const struct of_device_id at25_of_match[] = {
- { .compatible = "atmel,at25",},
- { .compatible = "cypress,fm25",},
+ { .compatible = "atmel,at25" },
+ { .compatible = "cypress,fm25" },
{ }
};
MODULE_DEVICE_TABLE(of, at25_of_match);
static const struct spi_device_id at25_spi_ids[] = {
- { .name = "at25",},
- { .name = "fm25",},
+ { .name = "at25" },
+ { .name = "fm25" },
{ }
};
MODULE_DEVICE_TABLE(spi, at25_spi_ids);
@@ -378,31 +420,18 @@ static int at25_probe(struct spi_device *spi)
struct at25_data *at25 = NULL;
int err;
int sr;
- u8 id[FM25_ID_LEN];
- u8 sernum[FM25_SN_LEN];
- int i;
- const struct of_device_id *match;
- bool is_fram = 0;
-
- match = of_match_device(of_match_ptr(at25_of_match), &spi->dev);
- if (match && !strcmp(match->compatible, "cypress,fm25"))
- is_fram = 1;
-
- at25 = devm_kzalloc(&spi->dev, sizeof(struct at25_data), GFP_KERNEL);
- if (!at25)
- return -ENOMEM;
-
- /* Chip description */
- if (spi->dev.platform_data) {
- memcpy(&at25->chip, spi->dev.platform_data, sizeof(at25->chip));
- } else if (!is_fram) {
- err = at25_fw_to_chip(&spi->dev, &at25->chip);
- if (err)
- return err;
- }
-
- /* Ping the chip ... the status register is pretty portable,
- * unlike probing manufacturer IDs. We do expect that system
+ struct spi_eeprom *pdata;
+ bool is_fram;
+
+ err = device_property_match_string(&spi->dev, "compatible", "cypress,fm25");
+ if (err >= 0)
+ is_fram = true;
+ else
+ is_fram = false;
+
+ /*
+ * Ping the chip ... the status register is pretty portable,
+ * unlike probing manufacturer IDs. We do expect that system
* firmware didn't write it in the past few milliseconds!
*/
sr = spi_w8r8(spi, AT25_RDSR);
@@ -411,39 +440,25 @@ static int at25_probe(struct spi_device *spi)
return -ENXIO;
}
+ at25 = devm_kzalloc(&spi->dev, sizeof(*at25), GFP_KERNEL);
+ if (!at25)
+ return -ENOMEM;
+
mutex_init(&at25->lock);
at25->spi = spi;
spi_set_drvdata(spi, at25);
- if (is_fram) {
- /* Get ID of chip */
- fm25_aux_read(at25, id, FM25_RDID, FM25_ID_LEN);
- if (id[6] != 0xc2) {
- dev_err(&spi->dev,
- "Error: no Cypress FRAM (id %02x)\n", id[6]);
- return -ENODEV;
- }
- /* set size found in ID */
- if (id[7] < 0x21 || id[7] > 0x26) {
- dev_err(&spi->dev, "Error: unsupported size (id %02x)\n", id[7]);
- return -ENODEV;
- }
- at25->chip.byte_len = int_pow(2, id[7] - 0x21 + 4) * 1024;
-
- if (at25->chip.byte_len > 64 * 1024)
- at25->chip.flags |= EE_ADDR3;
+ /* Chip description */
+ pdata = dev_get_platdata(&spi->dev);
+ if (pdata) {
+ at25->chip = *pdata;
+ } else {
+ if (is_fram)
+ err = at25_fram_to_chip(&spi->dev, &at25->chip);
else
- at25->chip.flags |= EE_ADDR2;
-
- if (id[8]) {
- fm25_aux_read(at25, sernum, FM25_RDSN, FM25_SN_LEN);
- /* swap byte order */
- for (i = 0; i < FM25_SN_LEN; i++)
- at25->sernum[i] = sernum[FM25_SN_LEN - 1 - i];
- }
-
- at25->chip.page_size = PAGE_SIZE;
- strncpy(at25->chip.name, "fm25", sizeof(at25->chip.name));
+ err = at25_fw_to_chip(&spi->dev, &at25->chip);
+ if (err)
+ return err;
}
/* For now we only support 8/16/24 bit addressing */
diff --git a/drivers/misc/eeprom/ee1004.c b/drivers/misc/eeprom/ee1004.c
index bb9c4512c968..9fbfe784d710 100644
--- a/drivers/misc/eeprom/ee1004.c
+++ b/drivers/misc/eeprom/ee1004.c
@@ -114,6 +114,9 @@ static ssize_t ee1004_eeprom_read(struct i2c_client *client, char *buf,
if (offset + count > EE1004_PAGE_SIZE)
count = EE1004_PAGE_SIZE - offset;
+ if (count > I2C_SMBUS_BLOCK_MAX)
+ count = I2C_SMBUS_BLOCK_MAX;
+
return i2c_smbus_read_i2c_block_data_or_emulated(client, offset, count, buf);
}
diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
index 4ccbf43e6bfa..aa1682b94a23 100644
--- a/drivers/misc/fastrpc.c
+++ b/drivers/misc/fastrpc.c
@@ -1288,7 +1288,14 @@ static int fastrpc_dmabuf_alloc(struct fastrpc_user *fl, char __user *argp)
}
if (copy_to_user(argp, &bp, sizeof(bp))) {
- dma_buf_put(buf->dmabuf);
+ /*
+ * The usercopy failed, but we can't do much about it, as
+ * dma_buf_fd() already called fd_install() and made the
+ * file descriptor accessible for the current process. It
+ * might already be closed and dmabuf no longer valid when
+ * we reach this point. Therefore "leak" the fd and rely on
+ * the process exit path to do any required cleanup.
+ */
return -EFAULT;
}
diff --git a/drivers/misc/habanalabs/common/command_buffer.c b/drivers/misc/habanalabs/common/command_buffer.c
index 8132a84698d5..3c0ae07a2d80 100644
--- a/drivers/misc/habanalabs/common/command_buffer.c
+++ b/drivers/misc/habanalabs/common/command_buffer.c
@@ -57,7 +57,7 @@ static int cb_map_mem(struct hl_ctx *ctx, struct hl_cb *cb)
}
va_block->start = virt_addr;
- va_block->end = virt_addr + page_size;
+ va_block->end = virt_addr + page_size - 1;
va_block->size = page_size;
list_add_tail(&va_block->node, &cb->va_block_list);
}
@@ -80,13 +80,13 @@ static int cb_map_mem(struct hl_ctx *ctx, struct hl_cb *cb)
offset += va_block->size;
}
- hdev->asic_funcs->mmu_invalidate_cache(hdev, false, VM_TYPE_USERPTR);
+ rc = hl_mmu_invalidate_cache(hdev, false, MMU_OP_USERPTR | MMU_OP_SKIP_LOW_CACHE_INV);
mutex_unlock(&ctx->mmu_lock);
cb->is_mmu_mapped = true;
- return 0;
+ return rc;
err_va_umap:
list_for_each_entry(va_block, &cb->va_block_list, node) {
@@ -97,7 +97,7 @@ err_va_umap:
offset -= va_block->size;
}
- hdev->asic_funcs->mmu_invalidate_cache(hdev, true, VM_TYPE_USERPTR);
+ rc = hl_mmu_invalidate_cache(hdev, true, MMU_OP_USERPTR);
mutex_unlock(&ctx->mmu_lock);
@@ -126,7 +126,7 @@ static void cb_unmap_mem(struct hl_ctx *ctx, struct hl_cb *cb)
"Failed to unmap CB's va 0x%llx\n",
va_block->start);
- hdev->asic_funcs->mmu_invalidate_cache(hdev, true, VM_TYPE_USERPTR);
+ hl_mmu_invalidate_cache(hdev, true, MMU_OP_USERPTR);
mutex_unlock(&ctx->mmu_lock);
@@ -250,8 +250,7 @@ int hl_cb_create(struct hl_device *hdev, struct hl_cb_mgr *mgr,
* Can't use generic function to check this because of special case
* where we create a CB as part of the reset process
*/
- if ((hdev->disabled) || ((atomic_read(&hdev->in_reset)) &&
- (ctx_id != HL_KERNEL_ASID_ID))) {
+ if ((hdev->disabled) || (hdev->reset_info.in_reset && (ctx_id != HL_KERNEL_ASID_ID))) {
dev_warn_ratelimited(hdev->dev,
"Device is disabled or in reset. Can't create new CBs\n");
rc = -EBUSY;
@@ -380,8 +379,9 @@ int hl_cb_destroy(struct hl_device *hdev, struct hl_cb_mgr *mgr, u64 cb_handle)
}
static int hl_cb_info(struct hl_device *hdev, struct hl_cb_mgr *mgr,
- u64 cb_handle, u32 *usage_cnt)
+ u64 cb_handle, u32 flags, u32 *usage_cnt, u64 *device_va)
{
+ struct hl_vm_va_block *va_block;
struct hl_cb *cb;
u32 handle;
int rc = 0;
@@ -402,7 +402,18 @@ static int hl_cb_info(struct hl_device *hdev, struct hl_cb_mgr *mgr,
goto out;
}
- *usage_cnt = atomic_read(&cb->cs_cnt);
+ if (flags & HL_CB_FLAGS_GET_DEVICE_VA) {
+ va_block = list_first_entry(&cb->va_block_list, struct hl_vm_va_block, node);
+ if (va_block) {
+ *device_va = va_block->start;
+ } else {
+ dev_err(hdev->dev, "CB is not mapped to the device's MMU\n");
+ rc = -EINVAL;
+ goto out;
+ }
+ } else {
+ *usage_cnt = atomic_read(&cb->cs_cnt);
+ }
out:
spin_unlock(&mgr->cb_lock);
@@ -414,7 +425,7 @@ int hl_cb_ioctl(struct hl_fpriv *hpriv, void *data)
union hl_cb_args *args = data;
struct hl_device *hdev = hpriv->hdev;
enum hl_device_status status;
- u64 handle = 0;
+ u64 handle = 0, device_va;
u32 usage_cnt = 0;
int rc;
@@ -450,13 +461,20 @@ int hl_cb_ioctl(struct hl_fpriv *hpriv, void *data)
case HL_CB_OP_INFO:
rc = hl_cb_info(hdev, &hpriv->cb_mgr, args->in.cb_handle,
- &usage_cnt);
- memset(args, 0, sizeof(*args));
- args->out.usage_cnt = usage_cnt;
+ args->in.flags,
+ &usage_cnt,
+ &device_va);
+
+ memset(&args->out, 0, sizeof(args->out));
+
+ if (args->in.flags & HL_CB_FLAGS_GET_DEVICE_VA)
+ args->out.device_va = device_va;
+ else
+ args->out.usage_cnt = usage_cnt;
break;
default:
- rc = -ENOTTY;
+ rc = -EINVAL;
break;
}
diff --git a/drivers/misc/habanalabs/common/command_submission.c b/drivers/misc/habanalabs/common/command_submission.c
index 4c8000fd246c..0a4ef13d9ac4 100644
--- a/drivers/misc/habanalabs/common/command_submission.c
+++ b/drivers/misc/habanalabs/common/command_submission.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright 2016-2019 HabanaLabs, Ltd.
+ * Copyright 2016-2021 HabanaLabs, Ltd.
* All Rights Reserved.
*/
@@ -533,8 +533,8 @@ static void complete_multi_cs(struct hl_device *hdev, struct hl_cs *cs)
mcs_compl->stream_master_qid_map)) {
/* extract the timestamp only of first completed CS */
if (!mcs_compl->timestamp)
- mcs_compl->timestamp =
- ktime_to_ns(fence->timestamp);
+ mcs_compl->timestamp = ktime_to_ns(fence->timestamp);
+
complete_all(&mcs_compl->completion);
/*
@@ -733,6 +733,14 @@ static void cs_timedout(struct work_struct *work)
hdev = cs->ctx->hdev;
+ /* Save only the first CS timeout parameters */
+ rc = atomic_cmpxchg(&hdev->last_error.cs_write_disable, 0, 1);
+ if (!rc) {
+ hdev->last_error.open_dev_timestamp = hdev->last_successful_open_ktime;
+ hdev->last_error.cs_timeout_timestamp = ktime_get();
+ hdev->last_error.cs_timeout_seq = cs->sequence;
+ }
+
switch (cs->type) {
case CS_TYPE_SIGNAL:
dev_err(hdev->dev,
@@ -767,9 +775,9 @@ static void cs_timedout(struct work_struct *work)
if (likely(!skip_reset_on_timeout)) {
if (hdev->reset_on_lockup)
- hl_device_reset(hdev, HL_RESET_TDR);
+ hl_device_reset(hdev, HL_DRV_RESET_TDR);
else
- hdev->needs_reset = true;
+ hdev->reset_info.needs_reset = true;
}
}
@@ -806,7 +814,7 @@ static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
cs->encaps_signals = !!(flags & HL_CS_FLAGS_ENCAP_SIGNALS);
cs->timeout_jiffies = timeout;
cs->skip_reset_on_timeout =
- hdev->skip_reset_on_timeout ||
+ hdev->reset_info.skip_reset_on_timeout ||
!!(flags & HL_CS_FLAGS_SKIP_RESET_ON_TIMEOUT);
cs->submission_time_jiffies = jiffies;
INIT_LIST_HEAD(&cs->job_list);
@@ -1131,9 +1139,6 @@ static int hl_cs_sanity_checks(struct hl_fpriv *hpriv, union hl_cs_args *args)
enum hl_cs_type cs_type;
if (!hl_device_operational(hdev, &status)) {
- dev_warn_ratelimited(hdev->dev,
- "Device is %s. Can't submit new CS\n",
- hdev->status[status]);
return -EBUSY;
}
@@ -1262,7 +1267,8 @@ static u32 get_stream_master_qid_mask(struct hl_device *hdev, u32 qid)
static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
u32 num_chunks, u64 *cs_seq, u32 flags,
- u32 encaps_signals_handle, u32 timeout)
+ u32 encaps_signals_handle, u32 timeout,
+ u16 *signal_initial_sob_count)
{
bool staged_mid, int_queues_only = true;
struct hl_device *hdev = hpriv->hdev;
@@ -1429,6 +1435,8 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
goto free_cs_object;
}
+ *signal_initial_sob_count = cs->initial_sob_count;
+
rc = HL_CS_STATUS_SUCCESS;
goto put_cs;
@@ -1457,6 +1465,7 @@ static int hl_cs_ctx_switch(struct hl_fpriv *hpriv, union hl_cs_args *args,
int rc = 0, do_ctx_switch;
void __user *chunks;
u32 num_chunks, tmp;
+ u16 sob_count;
int ret;
do_ctx_switch = atomic_cmpxchg(&ctx->thread_ctx_switch_token, 1, 0);
@@ -1497,7 +1506,7 @@ static int hl_cs_ctx_switch(struct hl_fpriv *hpriv, union hl_cs_args *args,
rc = 0;
} else {
rc = cs_ioctl_default(hpriv, chunks, num_chunks,
- cs_seq, 0, 0, hdev->timeout_jiffies);
+ cs_seq, 0, 0, hdev->timeout_jiffies, &sob_count);
}
mutex_unlock(&hpriv->restore_phase_mutex);
@@ -1813,6 +1822,9 @@ static int cs_ioctl_reserve_signals(struct hl_fpriv *hpriv,
}
handle->count = count;
+
+ hl_ctx_get(hdev, hpriv->ctx);
+ handle->ctx = hpriv->ctx;
mgr = &hpriv->ctx->sig_mgr;
spin_lock(&mgr->lock);
@@ -1822,7 +1834,7 @@ static int cs_ioctl_reserve_signals(struct hl_fpriv *hpriv,
if (hdl_id < 0) {
dev_err(hdev->dev, "Failed to allocate IDR for a new signal reservation\n");
rc = -EINVAL;
- goto out;
+ goto put_ctx;
}
handle->id = hdl_id;
@@ -1875,7 +1887,10 @@ remove_idr:
idr_remove(&mgr->handles, hdl_id);
spin_unlock(&mgr->lock);
+put_ctx:
+ hl_ctx_put(handle->ctx);
kfree(handle);
+
out:
return rc;
}
@@ -1935,6 +1950,7 @@ static int cs_ioctl_unreserve_signals(struct hl_fpriv *hpriv, u32 handle_id)
/* Release the id and free allocated memory of the handle */
idr_remove(&mgr->handles, handle_id);
+ hl_ctx_put(encaps_sig_hdl->ctx);
kfree(encaps_sig_hdl);
} else {
rc = -EINVAL;
@@ -1948,7 +1964,8 @@ out:
static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
void __user *chunks, u32 num_chunks,
- u64 *cs_seq, u32 flags, u32 timeout)
+ u64 *cs_seq, u32 flags, u32 timeout,
+ u32 *signal_sob_addr_offset, u16 *signal_initial_sob_count)
{
struct hl_cs_encaps_sig_handle *encaps_sig_hdl = NULL;
bool handle_found = false, is_wait_cs = false,
@@ -2180,6 +2197,9 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
goto free_cs_object;
}
+ *signal_sob_addr_offset = cs->sob_addr_offset;
+ *signal_initial_sob_count = cs->initial_sob_count;
+
rc = HL_CS_STATUS_SUCCESS;
if (is_wait_cs)
wait_cs_submitted = true;
@@ -2210,6 +2230,7 @@ int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
void __user *chunks;
u32 num_chunks, flags, timeout,
signals_count = 0, sob_addr = 0, handle_id = 0;
+ u16 sob_initial_count = 0;
int rc;
rc = hl_cs_sanity_checks(hpriv, args);
@@ -2240,7 +2261,8 @@ int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
case CS_TYPE_WAIT:
case CS_TYPE_COLLECTIVE_WAIT:
rc = cs_ioctl_signal_wait(hpriv, cs_type, chunks, num_chunks,
- &cs_seq, args->in.cs_flags, timeout);
+ &cs_seq, args->in.cs_flags, timeout,
+ &sob_addr, &sob_initial_count);
break;
case CS_RESERVE_SIGNALS:
rc = cs_ioctl_reserve_signals(hpriv,
@@ -2256,20 +2278,33 @@ int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
rc = cs_ioctl_default(hpriv, chunks, num_chunks, &cs_seq,
args->in.cs_flags,
args->in.encaps_sig_handle_id,
- timeout);
+ timeout, &sob_initial_count);
break;
}
out:
if (rc != -EAGAIN) {
memset(args, 0, sizeof(*args));
- if (cs_type == CS_RESERVE_SIGNALS) {
+ switch (cs_type) {
+ case CS_RESERVE_SIGNALS:
args->out.handle_id = handle_id;
args->out.sob_base_addr_offset = sob_addr;
args->out.count = signals_count;
- } else {
+ break;
+ case CS_TYPE_SIGNAL:
+ args->out.sob_base_addr_offset = sob_addr;
+ args->out.sob_count_before_submission = sob_initial_count;
+ args->out.seq = cs_seq;
+ break;
+ case CS_TYPE_DEFAULT:
+ args->out.sob_count_before_submission = sob_initial_count;
args->out.seq = cs_seq;
+ break;
+ default:
+ args->out.seq = cs_seq;
+ break;
}
+
args->out.status = rc;
}
@@ -2334,16 +2369,18 @@ static int hl_wait_for_fence(struct hl_ctx *ctx, u64 seq, struct hl_fence *fence
* hl_cs_poll_fences - iterate CS fences to check for CS completion
*
* @mcs_data: multi-CS internal data
+ * @mcs_compl: multi-CS completion structure
*
* @return 0 on success, otherwise non 0 error code
*
* The function iterates on all CS sequence in the list and set bit in
* completion_bitmap for each completed CS.
- * while iterating, the function can extracts the stream map to be later
- * used by the waiting function.
- * this function shall be called after taking context ref
+ * While iterating, the function sets the stream map of each fence in the fence
+ * array in the completion QID stream map to be used by CSs to perform
+ * completion to the multi-CS context.
+ * This function shall be called after taking context ref
*/
-static int hl_cs_poll_fences(struct multi_cs_data *mcs_data)
+static int hl_cs_poll_fences(struct multi_cs_data *mcs_data, struct multi_cs_completion *mcs_compl)
{
struct hl_fence **fence_ptr = mcs_data->fence_arr;
struct hl_device *hdev = mcs_data->ctx->hdev;
@@ -2360,6 +2397,15 @@ static int hl_cs_poll_fences(struct multi_cs_data *mcs_data)
return rc;
/*
+ * re-initialize the completion here to handle 2 possible cases:
+ * 1. CS will complete the multi-CS prior clearing the completion. in which
+ * case the fence iteration is guaranteed to catch the CS completion.
+ * 2. the completion will occur after re-init of the completion.
+ * in which case we will wake up immediately in wait_for_completion.
+ */
+ reinit_completion(&mcs_compl->completion);
+
+ /*
* set to maximum time to verify timestamp is valid: if at the end
* this value is maintained- no timestamp was updated
*/
@@ -2370,6 +2416,21 @@ static int hl_cs_poll_fences(struct multi_cs_data *mcs_data)
struct hl_fence *fence = *fence_ptr;
/*
+ * In order to prevent case where we wait until timeout even though a CS associated
+ * with the multi-CS actually completed we do things in the below order:
+ * 1. for each fence set it's QID map in the multi-CS completion QID map. This way
+ * any CS can, potentially, complete the multi CS for the specific QID (note
+ * that once completion is initialized, calling complete* and then wait on the
+ * completion will cause it to return at once)
+ * 2. only after allowing multi-CS completion for the specific QID we check whether
+ * the specific CS already completed (and thus the wait for completion part will
+ * be skipped). if the CS not completed it is guaranteed that completing CS will
+ * wake up the completion.
+ */
+ if (fence)
+ mcs_compl->stream_master_qid_map |= fence->stream_master_qid_map;
+
+ /*
* function won't sleep as it is called with timeout 0 (i.e.
* poll the fence)
*/
@@ -2384,9 +2445,7 @@ static int hl_cs_poll_fences(struct multi_cs_data *mcs_data)
switch (status) {
case CS_WAIT_STATUS_BUSY:
- /* CS did not finished, keep waiting on its QID*/
- mcs_data->stream_master_qid_map |=
- fence->stream_master_qid_map;
+ /* CS did not finished, QID to wait on already stored */
break;
case CS_WAIT_STATUS_COMPLETED:
/*
@@ -2394,9 +2453,19 @@ static int hl_cs_poll_fences(struct multi_cs_data *mcs_data)
* returns to user indicating CS completed before it finished
* all of its mcs handling, to avoid race the next time the
* user waits for mcs.
+ * note: when reaching this case fence is definitely not NULL
+ * but NULL check was added to overcome static analysis
*/
- if (!fence->mcs_handling_done)
+ if (fence && !fence->mcs_handling_done) {
+ /*
+ * in case multi CS is completed but MCS handling not done
+ * we "complete" the multi CS to prevent it from waiting
+ * until time-out and the "multi-CS handling done" will have
+ * another chance at the next iteration
+ */
+ complete_all(&mcs_compl->completion);
break;
+ }
mcs_data->completion_bitmap |= BIT(i);
/*
@@ -2456,6 +2525,21 @@ static int _hl_cs_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
return rc;
}
+static inline unsigned long hl_usecs64_to_jiffies(const u64 usecs)
+{
+ if (usecs <= U32_MAX)
+ return usecs_to_jiffies(usecs);
+
+ /*
+ * If the value in nanoseconds is larger than 64 bit, use the largest
+ * 64 bit value.
+ */
+ if (usecs >= ((u64)(U64_MAX / NSEC_PER_USEC)))
+ return nsecs_to_jiffies(U64_MAX);
+
+ return nsecs_to_jiffies(usecs * NSEC_PER_USEC);
+}
+
/*
* hl_wait_multi_cs_completion_init - init completion structure
*
@@ -2469,9 +2553,7 @@ static int _hl_cs_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
* the function gets the first available completion (by marking it "used")
* and initialize its values.
*/
-static struct multi_cs_completion *hl_wait_multi_cs_completion_init(
- struct hl_device *hdev,
- u8 stream_master_bitmap)
+static struct multi_cs_completion *hl_wait_multi_cs_completion_init(struct hl_device *hdev)
{
struct multi_cs_completion *mcs_compl;
int i;
@@ -2483,8 +2565,11 @@ static struct multi_cs_completion *hl_wait_multi_cs_completion_init(
if (!mcs_compl->used) {
mcs_compl->used = 1;
mcs_compl->timestamp = 0;
- mcs_compl->stream_master_qid_map = stream_master_bitmap;
- reinit_completion(&mcs_compl->completion);
+ /*
+ * init QID map to 0 to avoid completion by CSs. the actual QID map
+ * to multi-CS CSs will be set incrementally at a later stage
+ */
+ mcs_compl->stream_master_qid_map = 0;
spin_unlock(&mcs_compl->lock);
break;
}
@@ -2492,8 +2577,7 @@ static struct multi_cs_completion *hl_wait_multi_cs_completion_init(
}
if (i == MULTI_CS_MAX_USER_CTX) {
- dev_err(hdev->dev,
- "no available multi-CS completion structure\n");
+ dev_err(hdev->dev, "no available multi-CS completion structure\n");
return ERR_PTR(-ENOMEM);
}
return mcs_compl;
@@ -2524,27 +2608,18 @@ static void hl_wait_multi_cs_completion_fini(
*
* @return 0 on success, otherwise non 0 error code
*/
-static int hl_wait_multi_cs_completion(struct multi_cs_data *mcs_data)
+static int hl_wait_multi_cs_completion(struct multi_cs_data *mcs_data,
+ struct multi_cs_completion *mcs_compl)
{
- struct hl_device *hdev = mcs_data->ctx->hdev;
- struct multi_cs_completion *mcs_compl;
long completion_rc;
- mcs_compl = hl_wait_multi_cs_completion_init(hdev,
- mcs_data->stream_master_qid_map);
- if (IS_ERR(mcs_compl))
- return PTR_ERR(mcs_compl);
-
- completion_rc = wait_for_completion_interruptible_timeout(
- &mcs_compl->completion,
- usecs_to_jiffies(mcs_data->timeout_us));
+ completion_rc = wait_for_completion_interruptible_timeout(&mcs_compl->completion,
+ mcs_data->timeout_jiffies);
/* update timestamp */
if (completion_rc > 0)
mcs_data->timestamp = mcs_compl->timestamp;
- hl_wait_multi_cs_completion_fini(mcs_compl);
-
mcs_data->wait_status = completion_rc;
return 0;
@@ -2577,6 +2652,7 @@ void hl_multi_cs_completion_init(struct hl_device *hdev)
*/
static int hl_multi_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data)
{
+ struct multi_cs_completion *mcs_compl;
struct hl_device *hdev = hpriv->hdev;
struct multi_cs_data mcs_data = {0};
union hl_wait_cs_args *args = data;
@@ -2631,9 +2707,17 @@ static int hl_multi_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data)
hl_ctx_get(hdev, ctx);
+ /* wait (with timeout) for the first CS to be completed */
+ mcs_data.timeout_jiffies = hl_usecs64_to_jiffies(args->in.timeout_us);
+ mcs_compl = hl_wait_multi_cs_completion_init(hdev);
+ if (IS_ERR(mcs_compl)) {
+ rc = PTR_ERR(mcs_compl);
+ goto put_ctx;
+ }
+
/* poll all CS fences, extract timestamp */
mcs_data.update_ts = true;
- rc = hl_cs_poll_fences(&mcs_data);
+ rc = hl_cs_poll_fences(&mcs_data, mcs_compl);
/*
* skip wait for CS completion when one of the below is true:
* - an error on the poll function
@@ -2641,34 +2725,39 @@ static int hl_multi_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data)
* - the user called ioctl with timeout 0
*/
if (rc || mcs_data.completion_bitmap || !args->in.timeout_us)
- goto put_ctx;
+ goto completion_fini;
- /* wait (with timeout) for the first CS to be completed */
- mcs_data.timeout_us = args->in.timeout_us;
- rc = hl_wait_multi_cs_completion(&mcs_data);
- if (rc)
- goto put_ctx;
+ while (true) {
+ rc = hl_wait_multi_cs_completion(&mcs_data, mcs_compl);
+ if (rc || (mcs_data.wait_status == 0))
+ break;
- if (mcs_data.wait_status > 0) {
/*
* poll fences once again to update the CS map.
* no timestamp should be updated this time.
*/
mcs_data.update_ts = false;
- rc = hl_cs_poll_fences(&mcs_data);
+ rc = hl_cs_poll_fences(&mcs_data, mcs_compl);
+
+ if (mcs_data.completion_bitmap)
+ break;
/*
* if hl_wait_multi_cs_completion returned before timeout (i.e.
- * it got a completion) we expect to see at least one CS
- * completed after the poll function.
+ * it got a completion) it either got completed by CS in the multi CS list
+ * (in which case the indication will be non empty completion_bitmap) or it
+ * got completed by CS submitted to one of the shared stream master but
+ * not in the multi CS list (in which case we should wait again but modify
+ * the timeout and set timestamp as zero to let a CS related to the current
+ * multi-CS set a new, relevant, timestamp)
*/
- if (!mcs_data.completion_bitmap) {
- dev_warn_ratelimited(hdev->dev,
- "Multi-CS got completion on wait but no CS completed\n");
- rc = -EFAULT;
- }
+ mcs_data.timeout_jiffies = mcs_data.wait_status;
+ mcs_compl->timestamp = 0;
}
+completion_fini:
+ hl_wait_multi_cs_completion_fini(mcs_compl);
+
put_ctx:
hl_ctx_put(ctx);
kfree(fence_arr);
@@ -2699,7 +2788,7 @@ free_seq_arr:
}
/* update if some CS was gone */
- if (mcs_data.timestamp)
+ if (!mcs_data.timestamp)
args->out.flags |= HL_WAIT_CS_STATUS_FLAG_GONE;
} else {
args->out.status = HL_WAIT_CS_STATUS_BUSY;
@@ -2766,37 +2855,129 @@ static int hl_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data)
}
static int _hl_interrupt_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
- u32 timeout_us, u64 user_address,
- u64 target_value, u16 interrupt_offset,
- enum hl_cs_wait_status *status,
+ struct hl_cb_mgr *cb_mgr, u64 timeout_us,
+ u64 cq_counters_handle, u64 cq_counters_offset,
+ u64 target_value, struct hl_user_interrupt *interrupt,
+ u32 *status,
u64 *timestamp)
{
struct hl_user_pending_interrupt *pend;
- struct hl_user_interrupt *interrupt;
unsigned long timeout, flags;
- u64 completion_value;
long completion_rc;
+ struct hl_cb *cb;
int rc = 0;
+ u32 handle;
- if (timeout_us == U32_MAX)
- timeout = timeout_us;
- else
- timeout = usecs_to_jiffies(timeout_us);
+ timeout = hl_usecs64_to_jiffies(timeout_us);
hl_ctx_get(hdev, ctx);
- pend = kmalloc(sizeof(*pend), GFP_KERNEL);
+ cq_counters_handle >>= PAGE_SHIFT;
+ handle = (u32) cq_counters_handle;
+
+ cb = hl_cb_get(hdev, cb_mgr, handle);
+ if (!cb) {
+ hl_ctx_put(ctx);
+ return -EINVAL;
+ }
+
+ pend = kzalloc(sizeof(*pend), GFP_KERNEL);
if (!pend) {
+ hl_cb_put(cb);
hl_ctx_put(ctx);
return -ENOMEM;
}
hl_fence_init(&pend->fence, ULONG_MAX);
- if (interrupt_offset == HL_COMMON_USER_INTERRUPT_ID)
- interrupt = &hdev->common_user_interrupt;
- else
- interrupt = &hdev->user_interrupt[interrupt_offset];
+ pend->cq_kernel_addr = (u64 *) cb->kernel_address + cq_counters_offset;
+ pend->cq_target_value = target_value;
+
+ /* We check for completion value as interrupt could have been received
+ * before we added the node to the wait list
+ */
+ if (*pend->cq_kernel_addr >= target_value) {
+ *status = HL_WAIT_CS_STATUS_COMPLETED;
+ /* There was no interrupt, we assume the completion is now. */
+ pend->fence.timestamp = ktime_get();
+ }
+
+ if (!timeout_us || (*status == HL_WAIT_CS_STATUS_COMPLETED))
+ goto set_timestamp;
+
+ /* Add pending user interrupt to relevant list for the interrupt
+ * handler to monitor
+ */
+ spin_lock_irqsave(&interrupt->wait_list_lock, flags);
+ list_add_tail(&pend->wait_list_node, &interrupt->wait_list_head);
+ spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
+
+ /* Wait for interrupt handler to signal completion */
+ completion_rc = wait_for_completion_interruptible_timeout(&pend->fence.completion,
+ timeout);
+ if (completion_rc > 0) {
+ *status = HL_WAIT_CS_STATUS_COMPLETED;
+ } else {
+ if (completion_rc == -ERESTARTSYS) {
+ dev_err_ratelimited(hdev->dev,
+ "user process got signal while waiting for interrupt ID %d\n",
+ interrupt->interrupt_id);
+ rc = -EINTR;
+ *status = HL_WAIT_CS_STATUS_ABORTED;
+ } else {
+ if (pend->fence.error == -EIO) {
+ dev_err_ratelimited(hdev->dev,
+ "interrupt based wait ioctl aborted(error:%d) due to a reset cycle initiated\n",
+ pend->fence.error);
+ rc = -EIO;
+ *status = HL_WAIT_CS_STATUS_ABORTED;
+ } else {
+ dev_err_ratelimited(hdev->dev, "Waiting for interrupt ID %d timedout\n",
+ interrupt->interrupt_id);
+ rc = -ETIMEDOUT;
+ }
+ *status = HL_WAIT_CS_STATUS_BUSY;
+ }
+ }
+
+ spin_lock_irqsave(&interrupt->wait_list_lock, flags);
+ list_del(&pend->wait_list_node);
+ spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
+
+set_timestamp:
+ *timestamp = ktime_to_ns(pend->fence.timestamp);
+
+ kfree(pend);
+ hl_cb_put(cb);
+ hl_ctx_put(ctx);
+
+ return rc;
+}
+
+static int _hl_interrupt_wait_ioctl_user_addr(struct hl_device *hdev, struct hl_ctx *ctx,
+ u64 timeout_us, u64 user_address,
+ u64 target_value, struct hl_user_interrupt *interrupt,
+
+ u32 *status,
+ u64 *timestamp)
+{
+ struct hl_user_pending_interrupt *pend;
+ unsigned long timeout, flags;
+ u64 completion_value;
+ long completion_rc;
+ int rc = 0;
+
+ timeout = hl_usecs64_to_jiffies(timeout_us);
+
+ hl_ctx_get(hdev, ctx);
+
+ pend = kzalloc(sizeof(*pend), GFP_KERNEL);
+ if (!pend) {
+ hl_ctx_put(ctx);
+ return -ENOMEM;
+ }
+
+ hl_fence_init(&pend->fence, ULONG_MAX);
/* Add pending user interrupt to relevant list for the interrupt
* handler to monitor
@@ -2815,13 +2996,14 @@ static int _hl_interrupt_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
}
if (completion_value >= target_value) {
- *status = CS_WAIT_STATUS_COMPLETED;
+ *status = HL_WAIT_CS_STATUS_COMPLETED;
/* There was no interrupt, we assume the completion is now. */
pend->fence.timestamp = ktime_get();
- } else
- *status = CS_WAIT_STATUS_BUSY;
+ } else {
+ *status = HL_WAIT_CS_STATUS_BUSY;
+ }
- if (!timeout_us || (*status == CS_WAIT_STATUS_COMPLETED))
+ if (!timeout_us || (*status == HL_WAIT_CS_STATUS_COMPLETED))
goto remove_pending_user_interrupt;
wait_again:
@@ -2850,7 +3032,13 @@ wait_again:
}
if (completion_value >= target_value) {
- *status = CS_WAIT_STATUS_COMPLETED;
+ *status = HL_WAIT_CS_STATUS_COMPLETED;
+ } else if (pend->fence.error) {
+ dev_err_ratelimited(hdev->dev,
+ "interrupt based wait ioctl aborted(error:%d) due to a reset cycle initiated\n",
+ pend->fence.error);
+ /* set the command completion status as ABORTED */
+ *status = HL_WAIT_CS_STATUS_ABORTED;
} else {
timeout = completion_rc;
goto wait_again;
@@ -2861,7 +3049,7 @@ wait_again:
interrupt->interrupt_id);
rc = -EINTR;
} else {
- *status = CS_WAIT_STATUS_BUSY;
+ *status = HL_WAIT_CS_STATUS_BUSY;
}
remove_pending_user_interrupt:
@@ -2879,11 +3067,12 @@ remove_pending_user_interrupt:
static int hl_interrupt_wait_ioctl(struct hl_fpriv *hpriv, void *data)
{
- u16 interrupt_id, interrupt_offset, first_interrupt, last_interrupt;
+ u16 interrupt_id, first_interrupt, last_interrupt;
struct hl_device *hdev = hpriv->hdev;
struct asic_fixed_properties *prop;
+ struct hl_user_interrupt *interrupt;
union hl_wait_cs_args *args = data;
- enum hl_cs_wait_status status;
+ u32 status = HL_WAIT_CS_STATUS_BUSY;
u64 timestamp;
int rc;
@@ -2894,8 +3083,7 @@ static int hl_interrupt_wait_ioctl(struct hl_fpriv *hpriv, void *data)
return -EPERM;
}
- interrupt_id =
- FIELD_GET(HL_WAIT_CS_FLAGS_INTERRUPT_MASK, args->in.flags);
+ interrupt_id = FIELD_GET(HL_WAIT_CS_FLAGS_INTERRUPT_MASK, args->in.flags);
first_interrupt = prop->first_available_user_msix_interrupt;
last_interrupt = prop->first_available_user_msix_interrupt +
@@ -2908,15 +3096,21 @@ static int hl_interrupt_wait_ioctl(struct hl_fpriv *hpriv, void *data)
}
if (interrupt_id == HL_COMMON_USER_INTERRUPT_ID)
- interrupt_offset = HL_COMMON_USER_INTERRUPT_ID;
+ interrupt = &hdev->common_user_interrupt;
else
- interrupt_offset = interrupt_id - first_interrupt;
+ interrupt = &hdev->user_interrupt[interrupt_id - first_interrupt];
- rc = _hl_interrupt_wait_ioctl(hdev, hpriv->ctx,
+ if (args->in.flags & HL_WAIT_CS_FLAGS_INTERRUPT_KERNEL_CQ)
+ rc = _hl_interrupt_wait_ioctl(hdev, hpriv->ctx, &hpriv->cb_mgr,
+ args->in.interrupt_timeout_us, args->in.cq_counters_handle,
+ args->in.cq_counters_offset,
+ args->in.target, interrupt, &status,
+ &timestamp);
+ else
+ rc = _hl_interrupt_wait_ioctl_user_addr(hdev, hpriv->ctx,
args->in.interrupt_timeout_us, args->in.addr,
- args->in.target, interrupt_offset, &status,
+ args->in.target, interrupt, &status,
&timestamp);
-
if (rc) {
if (rc != -EINTR)
dev_err_ratelimited(hdev->dev,
@@ -2926,22 +3120,13 @@ static int hl_interrupt_wait_ioctl(struct hl_fpriv *hpriv, void *data)
}
memset(args, 0, sizeof(*args));
+ args->out.status = status;
if (timestamp) {
args->out.timestamp_nsec = timestamp;
args->out.flags |= HL_WAIT_CS_STATUS_FLAG_TIMESTAMP_VLD;
}
- switch (status) {
- case CS_WAIT_STATUS_COMPLETED:
- args->out.status = HL_WAIT_CS_STATUS_COMPLETED;
- break;
- case CS_WAIT_STATUS_BUSY:
- default:
- args->out.status = HL_WAIT_CS_STATUS_BUSY;
- break;
- }
-
return 0;
}
@@ -2955,7 +3140,7 @@ int hl_wait_ioctl(struct hl_fpriv *hpriv, void *data)
* user interrupt
*/
if (!hl_device_operational(hpriv->hdev, NULL))
- return -EPERM;
+ return -EBUSY;
if (flags & HL_WAIT_CS_FLAGS_INTERRUPT)
rc = hl_interrupt_wait_ioctl(hpriv, data);
diff --git a/drivers/misc/habanalabs/common/context.c b/drivers/misc/habanalabs/common/context.c
index d0aaccd4df2c..c6360e33bce8 100644
--- a/drivers/misc/habanalabs/common/context.c
+++ b/drivers/misc/habanalabs/common/context.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright 2016-2019 HabanaLabs, Ltd.
+ * Copyright 2016-2021 HabanaLabs, Ltd.
* All Rights Reserved.
*/
@@ -13,13 +13,13 @@ void hl_encaps_handle_do_release(struct kref *ref)
{
struct hl_cs_encaps_sig_handle *handle =
container_of(ref, struct hl_cs_encaps_sig_handle, refcount);
- struct hl_ctx *ctx = handle->hdev->compute_ctx;
- struct hl_encaps_signals_mgr *mgr = &ctx->sig_mgr;
+ struct hl_encaps_signals_mgr *mgr = &handle->ctx->sig_mgr;
spin_lock(&mgr->lock);
idr_remove(&mgr->handles, handle->id);
spin_unlock(&mgr->lock);
+ hl_ctx_put(handle->ctx);
kfree(handle);
}
@@ -27,8 +27,7 @@ static void hl_encaps_handle_do_release_sob(struct kref *ref)
{
struct hl_cs_encaps_sig_handle *handle =
container_of(ref, struct hl_cs_encaps_sig_handle, refcount);
- struct hl_ctx *ctx = handle->hdev->compute_ctx;
- struct hl_encaps_signals_mgr *mgr = &ctx->sig_mgr;
+ struct hl_encaps_signals_mgr *mgr = &handle->ctx->sig_mgr;
/* if we're here, then there was a signals reservation but cs with
* encaps signals wasn't submitted, so need to put refcount
@@ -40,6 +39,7 @@ static void hl_encaps_handle_do_release_sob(struct kref *ref)
idr_remove(&mgr->handles, handle->id);
spin_unlock(&mgr->lock);
+ hl_ctx_put(handle->ctx);
kfree(handle);
}
@@ -97,11 +97,9 @@ static void hl_ctx_fini(struct hl_ctx *ctx)
/* The engines are stopped as there is no executing CS, but the
* Coresight might be still working by accessing addresses
* related to the stopped engines. Hence stop it explicitly.
- * Stop only if this is the compute context, as there can be
- * only one compute context
*/
- if ((hdev->in_debug) && (hdev->compute_ctx == ctx))
- hl_device_set_debug_mode(hdev, false);
+ if (hdev->in_debug)
+ hl_device_set_debug_mode(hdev, ctx, false);
hdev->asic_funcs->ctx_fini(ctx);
hl_cb_va_pool_fini(ctx);
@@ -167,7 +165,7 @@ int hl_ctx_create(struct hl_device *hdev, struct hl_fpriv *hpriv)
hpriv->ctx = ctx;
/* TODO: remove the following line for multiple process support */
- hdev->compute_ctx = ctx;
+ hdev->is_compute_ctx_active = true;
return 0;
@@ -274,6 +272,27 @@ int hl_ctx_put(struct hl_ctx *ctx)
return kref_put(&ctx->refcount, hl_ctx_do_release);
}
+struct hl_ctx *hl_get_compute_ctx(struct hl_device *hdev)
+{
+ struct hl_ctx *ctx = NULL;
+ struct hl_fpriv *hpriv;
+
+ mutex_lock(&hdev->fpriv_list_lock);
+
+ list_for_each_entry(hpriv, &hdev->fpriv_list, dev_node) {
+ /* There can only be a single user which has opened the compute device, so exit
+ * immediately once we find him
+ */
+ ctx = hpriv->ctx;
+ hl_ctx_get(hdev, ctx);
+ break;
+ }
+
+ mutex_unlock(&hdev->fpriv_list_lock);
+
+ return ctx;
+}
+
/*
* hl_ctx_get_fence_locked - get CS fence under CS lock
*
diff --git a/drivers/misc/habanalabs/common/debugfs.c b/drivers/misc/habanalabs/common/debugfs.c
index 1f2a3dc6c4e2..fc084ee5106e 100644
--- a/drivers/misc/habanalabs/common/debugfs.c
+++ b/drivers/misc/habanalabs/common/debugfs.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright 2016-2019 HabanaLabs, Ltd.
+ * Copyright 2016-2021 HabanaLabs, Ltd.
* All Rights Reserved.
*/
@@ -15,19 +15,25 @@
#define MMU_ADDR_BUF_SIZE 40
#define MMU_ASID_BUF_SIZE 10
#define MMU_KBUF_SIZE (MMU_ADDR_BUF_SIZE + MMU_ASID_BUF_SIZE)
+#define I2C_MAX_TRANSACTION_LEN 8
static struct dentry *hl_debug_root;
static int hl_debugfs_i2c_read(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr,
- u8 i2c_reg, long *val)
+ u8 i2c_reg, u8 i2c_len, u64 *val)
{
struct cpucp_packet pkt;
- u64 result;
int rc;
if (!hl_device_operational(hdev, NULL))
return -EBUSY;
+ if (i2c_len > I2C_MAX_TRANSACTION_LEN) {
+ dev_err(hdev->dev, "I2C transaction length %u, exceeds maximum of %u\n",
+ i2c_len, I2C_MAX_TRANSACTION_LEN);
+ return -EINVAL;
+ }
+
memset(&pkt, 0, sizeof(pkt));
pkt.ctl = cpu_to_le32(CPUCP_PACKET_I2C_RD <<
@@ -35,12 +41,10 @@ static int hl_debugfs_i2c_read(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr,
pkt.i2c_bus = i2c_bus;
pkt.i2c_addr = i2c_addr;
pkt.i2c_reg = i2c_reg;
+ pkt.i2c_len = i2c_len;
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
- 0, &result);
-
- *val = (long) result;
-
+ 0, val);
if (rc)
dev_err(hdev->dev, "Failed to read from I2C, error %d\n", rc);
@@ -48,7 +52,7 @@ static int hl_debugfs_i2c_read(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr,
}
static int hl_debugfs_i2c_write(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr,
- u8 i2c_reg, u32 val)
+ u8 i2c_reg, u8 i2c_len, u64 val)
{
struct cpucp_packet pkt;
int rc;
@@ -56,6 +60,12 @@ static int hl_debugfs_i2c_write(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr,
if (!hl_device_operational(hdev, NULL))
return -EBUSY;
+ if (i2c_len > I2C_MAX_TRANSACTION_LEN) {
+ dev_err(hdev->dev, "I2C transaction length %u, exceeds maximum of %u\n",
+ i2c_len, I2C_MAX_TRANSACTION_LEN);
+ return -EINVAL;
+ }
+
memset(&pkt, 0, sizeof(pkt));
pkt.ctl = cpu_to_le32(CPUCP_PACKET_I2C_WR <<
@@ -63,6 +73,7 @@ static int hl_debugfs_i2c_write(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr,
pkt.i2c_bus = i2c_bus;
pkt.i2c_addr = i2c_addr;
pkt.i2c_reg = i2c_reg;
+ pkt.i2c_len = i2c_len;
pkt.value = cpu_to_le64(val);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
@@ -235,6 +246,8 @@ static int vm_show(struct seq_file *s, void *data)
struct hl_vm_hash_node *hnode;
struct hl_userptr *userptr;
struct hl_vm_phys_pg_pack *phys_pg_pack = NULL;
+ struct hl_va_range *va_range;
+ struct hl_vm_va_block *va_block;
enum vm_type *vm_type;
bool once = true;
u64 j;
@@ -314,6 +327,25 @@ static int vm_show(struct seq_file *s, void *data)
spin_unlock(&dev_entry->ctx_mem_hash_spinlock);
+ ctx = hl_get_compute_ctx(dev_entry->hdev);
+ if (ctx) {
+ seq_puts(s, "\nVA ranges:\n\n");
+ for (i = HL_VA_RANGE_TYPE_HOST ; i < HL_VA_RANGE_TYPE_MAX ; ++i) {
+ va_range = ctx->va_range[i];
+ seq_printf(s, " va_range %d\n", i);
+ seq_puts(s, "---------------------\n");
+ mutex_lock(&va_range->lock);
+ list_for_each_entry(va_block, &va_range->list, node) {
+ seq_printf(s, "%#16llx - %#16llx (%#llx)\n",
+ va_block->start, va_block->end,
+ va_block->size);
+ }
+ mutex_unlock(&va_range->lock);
+ seq_puts(s, "\n");
+ }
+ hl_ctx_put(ctx);
+ }
+
if (!once)
seq_puts(s, "\n");
@@ -407,7 +439,7 @@ static int mmu_show(struct seq_file *s, void *data)
if (dev_entry->mmu_asid == HL_KERNEL_ASID_ID)
ctx = hdev->kernel_ctx;
else
- ctx = hdev->compute_ctx;
+ ctx = hl_get_compute_ctx(hdev);
if (!ctx) {
dev_err(hdev->dev, "no ctx available\n");
@@ -495,7 +527,7 @@ static int engines_show(struct seq_file *s, void *data)
struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
struct hl_device *hdev = dev_entry->hdev;
- if (atomic_read(&hdev->in_reset)) {
+ if (hdev->reset_info.in_reset) {
dev_warn_ratelimited(hdev->dev,
"Can't check device idle during reset\n");
return 0;
@@ -560,7 +592,7 @@ static int device_va_to_pa(struct hl_device *hdev, u64 virt_addr, u32 size,
u64 *phys_addr)
{
struct hl_vm_phys_pg_pack *phys_pg_pack;
- struct hl_ctx *ctx = hdev->compute_ctx;
+ struct hl_ctx *ctx;
struct hl_vm_hash_node *hnode;
u64 end_address, range_size;
struct hl_userptr *userptr;
@@ -568,6 +600,8 @@ static int device_va_to_pa(struct hl_device *hdev, u64 virt_addr, u32 size,
bool valid = false;
int i, rc = 0;
+ ctx = hl_get_compute_ctx(hdev);
+
if (!ctx) {
dev_err(hdev->dev, "no ctx available\n");
return -EINVAL;
@@ -624,7 +658,7 @@ static ssize_t hl_data_read32(struct file *f, char __user *buf,
ssize_t rc;
u32 val;
- if (atomic_read(&hdev->in_reset)) {
+ if (hdev->reset_info.in_reset) {
dev_warn_ratelimited(hdev->dev, "Can't read during reset\n");
return 0;
}
@@ -660,7 +694,7 @@ static ssize_t hl_data_write32(struct file *f, const char __user *buf,
u32 value;
ssize_t rc;
- if (atomic_read(&hdev->in_reset)) {
+ if (hdev->reset_info.in_reset) {
dev_warn_ratelimited(hdev->dev, "Can't write during reset\n");
return 0;
}
@@ -697,7 +731,7 @@ static ssize_t hl_data_read64(struct file *f, char __user *buf,
ssize_t rc;
u64 val;
- if (atomic_read(&hdev->in_reset)) {
+ if (hdev->reset_info.in_reset) {
dev_warn_ratelimited(hdev->dev, "Can't read during reset\n");
return 0;
}
@@ -733,7 +767,7 @@ static ssize_t hl_data_write64(struct file *f, const char __user *buf,
u64 value;
ssize_t rc;
- if (atomic_read(&hdev->in_reset)) {
+ if (hdev->reset_info.in_reset) {
dev_warn_ratelimited(hdev->dev, "Can't write during reset\n");
return 0;
}
@@ -768,7 +802,7 @@ static ssize_t hl_dma_size_write(struct file *f, const char __user *buf,
ssize_t rc;
u32 size;
- if (atomic_read(&hdev->in_reset)) {
+ if (hdev->reset_info.in_reset) {
dev_warn_ratelimited(hdev->dev, "Can't DMA during reset\n");
return 0;
}
@@ -874,22 +908,22 @@ static ssize_t hl_i2c_data_read(struct file *f, char __user *buf,
struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
struct hl_device *hdev = entry->hdev;
char tmp_buf[32];
- long val;
+ u64 val;
ssize_t rc;
if (*ppos)
return 0;
rc = hl_debugfs_i2c_read(hdev, entry->i2c_bus, entry->i2c_addr,
- entry->i2c_reg, &val);
+ entry->i2c_reg, entry->i2c_len, &val);
if (rc) {
dev_err(hdev->dev,
- "Failed to read from I2C bus %d, addr %d, reg %d\n",
- entry->i2c_bus, entry->i2c_addr, entry->i2c_reg);
+ "Failed to read from I2C bus %d, addr %d, reg %d, len %d\n",
+ entry->i2c_bus, entry->i2c_addr, entry->i2c_reg, entry->i2c_len);
return rc;
}
- sprintf(tmp_buf, "0x%02lx\n", val);
+ sprintf(tmp_buf, "%#02llx\n", val);
rc = simple_read_from_buffer(buf, count, ppos, tmp_buf,
strlen(tmp_buf));
@@ -901,19 +935,19 @@ static ssize_t hl_i2c_data_write(struct file *f, const char __user *buf,
{
struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
struct hl_device *hdev = entry->hdev;
- u32 value;
+ u64 value;
ssize_t rc;
- rc = kstrtouint_from_user(buf, count, 16, &value);
+ rc = kstrtou64_from_user(buf, count, 16, &value);
if (rc)
return rc;
rc = hl_debugfs_i2c_write(hdev, entry->i2c_bus, entry->i2c_addr,
- entry->i2c_reg, value);
+ entry->i2c_reg, entry->i2c_len, value);
if (rc) {
dev_err(hdev->dev,
- "Failed to write 0x%02x to I2C bus %d, addr %d, reg %d\n",
- value, entry->i2c_bus, entry->i2c_addr, entry->i2c_reg);
+ "Failed to write %#02llx to I2C bus %d, addr %d, reg %d, len %d\n",
+ value, entry->i2c_bus, entry->i2c_addr, entry->i2c_reg, entry->i2c_len);
return rc;
}
@@ -1043,7 +1077,7 @@ static ssize_t hl_clk_gate_write(struct file *f, const char __user *buf,
u64 value;
ssize_t rc;
- if (atomic_read(&hdev->in_reset)) {
+ if (hdev->reset_info.in_reset) {
dev_warn_ratelimited(hdev->dev,
"Can't change clock gating during reset\n");
return 0;
@@ -1085,7 +1119,7 @@ static ssize_t hl_stop_on_err_write(struct file *f, const char __user *buf,
u32 value;
ssize_t rc;
- if (atomic_read(&hdev->in_reset)) {
+ if (hdev->reset_info.in_reset) {
dev_warn_ratelimited(hdev->dev,
"Can't change stop on error during reset\n");
return 0;
@@ -1396,6 +1430,11 @@ void hl_debugfs_add_device(struct hl_device *hdev)
dev_entry->root,
&dev_entry->i2c_reg);
+ debugfs_create_u8("i2c_len",
+ 0644,
+ dev_entry->root,
+ &dev_entry->i2c_len);
+
debugfs_create_file("i2c_data",
0644,
dev_entry->root,
@@ -1458,7 +1497,7 @@ void hl_debugfs_add_device(struct hl_device *hdev)
debugfs_create_x8("skip_reset_on_timeout",
0644,
dev_entry->root,
- &hdev->skip_reset_on_timeout);
+ &hdev->reset_info.skip_reset_on_timeout);
debugfs_create_file("state_dump",
0600,
diff --git a/drivers/misc/habanalabs/common/device.c b/drivers/misc/habanalabs/common/device.c
index 2022e5d7b3ad..733338ab6f1d 100644
--- a/drivers/misc/habanalabs/common/device.c
+++ b/drivers/misc/habanalabs/common/device.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright 2016-2019 HabanaLabs, Ltd.
+ * Copyright 2016-2021 HabanaLabs, Ltd.
* All Rights Reserved.
*/
@@ -17,9 +17,9 @@ enum hl_device_status hl_device_status(struct hl_device *hdev)
{
enum hl_device_status status;
- if (atomic_read(&hdev->in_reset))
+ if (hdev->reset_info.in_reset)
status = HL_DEVICE_STATUS_IN_RESET;
- else if (hdev->needs_reset)
+ else if (hdev->reset_info.needs_reset)
status = HL_DEVICE_STATUS_NEEDS_RESET;
else if (hdev->disabled)
status = HL_DEVICE_STATUS_MALFUNCTION;
@@ -95,14 +95,14 @@ static void hpriv_release(struct kref *ref)
if ((hdev->reset_if_device_not_idle && !device_is_idle)
|| hdev->reset_upon_device_release)
- hl_device_reset(hdev, HL_RESET_DEVICE_RELEASE);
+ hl_device_reset(hdev, HL_DRV_RESET_DEV_RELEASE);
- /* Now we can mark the compute_ctx as empty. Even if a reset is running in a different
+ /* Now we can mark the compute_ctx as not active. Even if a reset is running in a different
* thread, we don't care because the in_reset is marked so if a user will try to open
- * the device it will fail on that, even if compute_ctx is NULL.
+ * the device it will fail on that, even if compute_ctx is false.
*/
mutex_lock(&hdev->fpriv_list_lock);
- hdev->compute_ctx = NULL;
+ hdev->is_compute_ctx_active = false;
mutex_unlock(&hdev->fpriv_list_lock);
kfree(hpriv);
@@ -169,9 +169,9 @@ static int hl_device_release_ctrl(struct inode *inode, struct file *filp)
goto out;
}
- mutex_lock(&hdev->fpriv_list_lock);
+ mutex_lock(&hdev->fpriv_ctrl_list_lock);
list_del(&hpriv->dev_node);
- mutex_unlock(&hdev->fpriv_list_lock);
+ mutex_unlock(&hdev->fpriv_ctrl_list_lock);
out:
put_pid(hpriv->taskpid);
@@ -324,16 +324,12 @@ put_devices:
static void device_hard_reset_pending(struct work_struct *work)
{
struct hl_device_reset_work *device_reset_work =
- container_of(work, struct hl_device_reset_work,
- reset_work.work);
+ container_of(work, struct hl_device_reset_work, reset_work.work);
struct hl_device *hdev = device_reset_work->hdev;
u32 flags;
int rc;
- flags = HL_RESET_HARD | HL_RESET_FROM_RESET_THREAD;
-
- if (device_reset_work->fw_reset)
- flags |= HL_RESET_FW;
+ flags = device_reset_work->flags | HL_DRV_RESET_FROM_RESET_THR;
rc = hl_device_reset(hdev, flags);
if ((rc == -EBUSY) && !hdev->device_fini_pending) {
@@ -452,9 +448,12 @@ static int device_early_init(struct hl_device *hdev)
mutex_init(&hdev->debug_lock);
INIT_LIST_HEAD(&hdev->cs_mirror_list);
spin_lock_init(&hdev->cs_mirror_lock);
+ spin_lock_init(&hdev->reset_info.lock);
INIT_LIST_HEAD(&hdev->fpriv_list);
+ INIT_LIST_HEAD(&hdev->fpriv_ctrl_list);
mutex_init(&hdev->fpriv_list_lock);
- atomic_set(&hdev->in_reset, 0);
+ mutex_init(&hdev->fpriv_ctrl_list_lock);
+ mutex_init(&hdev->clk_throttling.lock);
return 0;
@@ -494,6 +493,9 @@ static void device_early_fini(struct hl_device *hdev)
mutex_destroy(&hdev->send_cpu_message_lock);
mutex_destroy(&hdev->fpriv_list_lock);
+ mutex_destroy(&hdev->fpriv_ctrl_list_lock);
+
+ mutex_destroy(&hdev->clk_throttling.lock);
hl_cb_mgr_fini(hdev, &hdev->kernel_cb_mgr);
@@ -513,22 +515,6 @@ static void device_early_fini(struct hl_device *hdev)
hdev->asic_funcs->early_fini(hdev);
}
-static void set_freq_to_low_job(struct work_struct *work)
-{
- struct hl_device *hdev = container_of(work, struct hl_device,
- work_freq.work);
-
- mutex_lock(&hdev->fpriv_list_lock);
-
- if (!hdev->compute_ctx)
- hl_device_set_frequency(hdev, PLL_LOW);
-
- mutex_unlock(&hdev->fpriv_list_lock);
-
- schedule_delayed_work(&hdev->work_freq,
- usecs_to_jiffies(HL_PLL_LOW_JOB_FREQ_USEC));
-}
-
static void hl_device_heartbeat(struct work_struct *work)
{
struct hl_device *hdev = container_of(work, struct hl_device,
@@ -540,8 +526,10 @@ static void hl_device_heartbeat(struct work_struct *work)
if (!hdev->asic_funcs->send_heartbeat(hdev))
goto reschedule;
- dev_err(hdev->dev, "Device heartbeat failed!\n");
- hl_device_reset(hdev, HL_RESET_HARD | HL_RESET_HEARTBEAT);
+ if (hl_device_operational(hdev, NULL))
+ dev_err(hdev->dev, "Device heartbeat failed!\n");
+
+ hl_device_reset(hdev, HL_DRV_RESET_HARD | HL_DRV_RESET_HEARTBEAT);
return;
@@ -552,12 +540,12 @@ reschedule:
* If control reached here, then at least one heartbeat work has been
* scheduled since last reset/init cycle.
* So if the device is not already in reset cycle, reset the flag
- * prev_reset_trigger as no reset occurred with HL_RESET_FW_FATAL_ERR
+ * prev_reset_trigger as no reset occurred with HL_DRV_RESET_FW_FATAL_ERR
* status for at least one heartbeat. From this point driver restarts
* tracking future consecutive fatal errors.
*/
- if (!(atomic_read(&hdev->in_reset)))
- hdev->prev_reset_trigger = HL_RESET_TRIGGER_DEFAULT;
+ if (!hdev->reset_info.in_reset)
+ hdev->reset_info.prev_reset_trigger = HL_RESET_TRIGGER_DEFAULT;
schedule_delayed_work(&hdev->work_heartbeat,
usecs_to_jiffies(HL_HEARTBEAT_PER_USEC));
@@ -586,18 +574,6 @@ static int device_late_init(struct hl_device *hdev)
hdev->high_pll = hdev->asic_prop.high_pll;
- /* force setting to low frequency */
- hdev->curr_pll_profile = PLL_LOW;
-
- if (hdev->pm_mng_profile == PM_AUTO)
- hdev->asic_funcs->set_pll_profile(hdev, PLL_LOW);
- else
- hdev->asic_funcs->set_pll_profile(hdev, PLL_LAST);
-
- INIT_DELAYED_WORK(&hdev->work_freq, set_freq_to_low_job);
- schedule_delayed_work(&hdev->work_freq,
- usecs_to_jiffies(HL_PLL_LOW_JOB_FREQ_USEC));
-
if (hdev->heartbeat) {
INIT_DELAYED_WORK(&hdev->work_heartbeat, hl_device_heartbeat);
schedule_delayed_work(&hdev->work_heartbeat,
@@ -620,7 +596,6 @@ static void device_late_fini(struct hl_device *hdev)
if (!hdev->late_init_done)
return;
- cancel_delayed_work_sync(&hdev->work_freq);
if (hdev->heartbeat)
cancel_delayed_work_sync(&hdev->work_heartbeat);
@@ -650,36 +625,7 @@ int hl_device_utilization(struct hl_device *hdev, u32 *utilization)
return 0;
}
-/*
- * hl_device_set_frequency - set the frequency of the device
- *
- * @hdev: pointer to habanalabs device structure
- * @freq: the new frequency value
- *
- * Change the frequency if needed. This function has no protection against
- * concurrency, therefore it is assumed that the calling function has protected
- * itself against the case of calling this function from multiple threads with
- * different values
- *
- * Returns 0 if no change was done, otherwise returns 1
- */
-int hl_device_set_frequency(struct hl_device *hdev, enum hl_pll_frequency freq)
-{
- if ((hdev->pm_mng_profile == PM_MANUAL) ||
- (hdev->curr_pll_profile == freq))
- return 0;
-
- dev_dbg(hdev->dev, "Changing device frequency to %s\n",
- freq == PLL_HIGH ? "high" : "low");
-
- hdev->asic_funcs->set_pll_profile(hdev, freq);
-
- hdev->curr_pll_profile = freq;
-
- return 1;
-}
-
-int hl_device_set_debug_mode(struct hl_device *hdev, bool enable)
+int hl_device_set_debug_mode(struct hl_device *hdev, struct hl_ctx *ctx, bool enable)
{
int rc = 0;
@@ -693,12 +639,12 @@ int hl_device_set_debug_mode(struct hl_device *hdev, bool enable)
goto out;
}
- if (!hdev->hard_reset_pending)
- hdev->asic_funcs->halt_coresight(hdev);
+ if (!hdev->reset_info.hard_reset_pending)
+ hdev->asic_funcs->halt_coresight(hdev, ctx);
hdev->in_debug = 0;
- if (!hdev->hard_reset_pending)
+ if (!hdev->reset_info.hard_reset_pending)
hdev->asic_funcs->set_clock_gating(hdev);
goto out;
@@ -735,6 +681,8 @@ static void take_release_locks(struct hl_device *hdev)
/* Flush anyone that is inside device open */
mutex_lock(&hdev->fpriv_list_lock);
mutex_unlock(&hdev->fpriv_list_lock);
+ mutex_lock(&hdev->fpriv_ctrl_list_lock);
+ mutex_unlock(&hdev->fpriv_ctrl_list_lock);
}
static void cleanup_resources(struct hl_device *hdev, bool hard_reset, bool fw_reset)
@@ -774,11 +722,14 @@ int hl_device_suspend(struct hl_device *hdev)
pci_save_state(hdev->pdev);
/* Block future CS/VM/JOB completion operations */
- rc = atomic_cmpxchg(&hdev->in_reset, 0, 1);
- if (rc) {
+ spin_lock(&hdev->reset_info.lock);
+ if (hdev->reset_info.in_reset) {
+ spin_unlock(&hdev->reset_info.lock);
dev_err(hdev->dev, "Can't suspend while in reset\n");
return -EIO;
}
+ hdev->reset_info.in_reset = 1;
+ spin_unlock(&hdev->reset_info.lock);
/* This blocks all other stuff that is not blocked by in_reset */
hdev->disabled = true;
@@ -828,10 +779,12 @@ int hl_device_resume(struct hl_device *hdev)
}
- hdev->disabled = false;
- atomic_set(&hdev->in_reset, 0);
+ /* 'in_reset' was set to true during suspend, now we must clear it in order
+ * for hard reset to be performed
+ */
+ hdev->reset_info.in_reset = 0;
- rc = hl_device_reset(hdev, HL_RESET_HARD);
+ rc = hl_device_reset(hdev, HL_DRV_RESET_HARD);
if (rc) {
dev_err(hdev->dev, "Failed to reset device during resume\n");
goto disable_device;
@@ -846,17 +799,21 @@ disable_device:
return rc;
}
-static int device_kill_open_processes(struct hl_device *hdev, u32 timeout)
+static int device_kill_open_processes(struct hl_device *hdev, u32 timeout, bool control_dev)
{
- struct hl_fpriv *hpriv;
struct task_struct *task = NULL;
+ struct list_head *fd_list;
+ struct hl_fpriv *hpriv;
+ struct mutex *fd_lock;
u32 pending_cnt;
+ fd_lock = control_dev ? &hdev->fpriv_ctrl_list_lock : &hdev->fpriv_list_lock;
+ fd_list = control_dev ? &hdev->fpriv_ctrl_list : &hdev->fpriv_list;
/* Giving time for user to close FD, and for processes that are inside
* hl_device_open to finish
*/
- if (!list_empty(&hdev->fpriv_list))
+ if (!list_empty(fd_list))
ssleep(1);
if (timeout) {
@@ -872,12 +829,12 @@ static int device_kill_open_processes(struct hl_device *hdev, u32 timeout)
}
}
- mutex_lock(&hdev->fpriv_list_lock);
+ mutex_lock(fd_lock);
/* This section must be protected because we are dereferencing
* pointers that are freed if the process exits
*/
- list_for_each_entry(hpriv, &hdev->fpriv_list, dev_node) {
+ list_for_each_entry(hpriv, fd_list, dev_node) {
task = get_pid_task(hpriv->taskpid, PIDTYPE_PID);
if (task) {
dev_info(hdev->dev, "Killing user process pid=%d\n",
@@ -889,12 +846,12 @@ static int device_kill_open_processes(struct hl_device *hdev, u32 timeout)
} else {
dev_warn(hdev->dev,
"Can't get task struct for PID so giving up on killing process\n");
- mutex_unlock(&hdev->fpriv_list_lock);
+ mutex_unlock(fd_lock);
return -ETIME;
}
}
- mutex_unlock(&hdev->fpriv_list_lock);
+ mutex_unlock(fd_lock);
/*
* We killed the open users, but that doesn't mean they are closed.
@@ -906,7 +863,7 @@ static int device_kill_open_processes(struct hl_device *hdev, u32 timeout)
*/
wait_for_processes:
- while ((!list_empty(&hdev->fpriv_list)) && (pending_cnt)) {
+ while ((!list_empty(fd_list)) && (pending_cnt)) {
dev_dbg(hdev->dev,
"Waiting for all unmap operations to finish before hard reset\n");
@@ -916,7 +873,7 @@ wait_for_processes:
}
/* All processes exited successfully */
- if (list_empty(&hdev->fpriv_list))
+ if (list_empty(fd_list))
return 0;
/* Give up waiting for processes to exit */
@@ -928,14 +885,19 @@ wait_for_processes:
return -EBUSY;
}
-static void device_disable_open_processes(struct hl_device *hdev)
+static void device_disable_open_processes(struct hl_device *hdev, bool control_dev)
{
+ struct list_head *fd_list;
struct hl_fpriv *hpriv;
+ struct mutex *fd_lock;
- mutex_lock(&hdev->fpriv_list_lock);
- list_for_each_entry(hpriv, &hdev->fpriv_list, dev_node)
+ fd_lock = control_dev ? &hdev->fpriv_ctrl_list_lock : &hdev->fpriv_list_lock;
+ fd_list = control_dev ? &hdev->fpriv_ctrl_list : &hdev->fpriv_list;
+
+ mutex_lock(fd_lock);
+ list_for_each_entry(hpriv, fd_list, dev_node)
hpriv->hdev = NULL;
- mutex_unlock(&hdev->fpriv_list_lock);
+ mutex_unlock(fd_lock);
}
static void handle_reset_trigger(struct hl_device *hdev, u32 flags)
@@ -948,17 +910,17 @@ static void handle_reset_trigger(struct hl_device *hdev, u32 flags)
* ('in_reset' makes sure of it). This makes sure that
* 'reset_cause' will continue holding its 1st recorded reason!
*/
- if (flags & HL_RESET_HEARTBEAT) {
- hdev->curr_reset_cause = HL_RESET_CAUSE_HEARTBEAT;
- cur_reset_trigger = HL_RESET_HEARTBEAT;
- } else if (flags & HL_RESET_TDR) {
- hdev->curr_reset_cause = HL_RESET_CAUSE_TDR;
- cur_reset_trigger = HL_RESET_TDR;
- } else if (flags & HL_RESET_FW_FATAL_ERR) {
- hdev->curr_reset_cause = HL_RESET_CAUSE_UNKNOWN;
- cur_reset_trigger = HL_RESET_FW_FATAL_ERR;
+ if (flags & HL_DRV_RESET_HEARTBEAT) {
+ hdev->reset_info.curr_reset_cause = HL_RESET_CAUSE_HEARTBEAT;
+ cur_reset_trigger = HL_DRV_RESET_HEARTBEAT;
+ } else if (flags & HL_DRV_RESET_TDR) {
+ hdev->reset_info.curr_reset_cause = HL_RESET_CAUSE_TDR;
+ cur_reset_trigger = HL_DRV_RESET_TDR;
+ } else if (flags & HL_DRV_RESET_FW_FATAL_ERR) {
+ hdev->reset_info.curr_reset_cause = HL_RESET_CAUSE_UNKNOWN;
+ cur_reset_trigger = HL_DRV_RESET_FW_FATAL_ERR;
} else {
- hdev->curr_reset_cause = HL_RESET_CAUSE_UNKNOWN;
+ hdev->reset_info.curr_reset_cause = HL_RESET_CAUSE_UNKNOWN;
}
/*
@@ -966,11 +928,11 @@ static void handle_reset_trigger(struct hl_device *hdev, u32 flags)
* is set and if this reset is due to a fatal FW error
* device is set to an unstable state.
*/
- if (hdev->prev_reset_trigger != cur_reset_trigger) {
- hdev->prev_reset_trigger = cur_reset_trigger;
- hdev->reset_trigger_repeated = 0;
+ if (hdev->reset_info.prev_reset_trigger != cur_reset_trigger) {
+ hdev->reset_info.prev_reset_trigger = cur_reset_trigger;
+ hdev->reset_info.reset_trigger_repeated = 0;
} else {
- hdev->reset_trigger_repeated = 1;
+ hdev->reset_info.reset_trigger_repeated = 1;
}
/* If reset is due to heartbeat, device CPU is no responsive in
@@ -979,8 +941,8 @@ static void handle_reset_trigger(struct hl_device *hdev, u32 flags)
* If F/W is performing the reset, no need to send it a message to disable
* PCI access
*/
- if ((flags & HL_RESET_HARD) &&
- !(flags & (HL_RESET_HEARTBEAT | HL_RESET_FW))) {
+ if ((flags & HL_DRV_RESET_HARD) &&
+ !(flags & (HL_DRV_RESET_HEARTBEAT | HL_DRV_RESET_BYPASS_REQ_TO_FW))) {
/* Disable PCI access from device F/W so he won't send
* us additional interrupts. We disable MSI/MSI-X at
* the halt_engines function and we can't have the F/W
@@ -1015,34 +977,39 @@ static void handle_reset_trigger(struct hl_device *hdev, u32 flags)
*/
int hl_device_reset(struct hl_device *hdev, u32 flags)
{
+ bool hard_reset, from_hard_reset_thread, fw_reset, hard_instead_soft = false,
+ reset_upon_device_release = false, schedule_hard_reset = false;
u64 idle_mask[HL_BUSY_ENGINES_MASK_EXT_SIZE] = {0};
- bool hard_reset, from_hard_reset_thread, fw_reset, hard_instead_soft = false;
+ struct hl_ctx *ctx;
int i, rc;
if (!hdev->init_done) {
- dev_err(hdev->dev,
- "Can't reset before initialization is done\n");
+ dev_err(hdev->dev, "Can't reset before initialization is done\n");
return 0;
}
- hard_reset = !!(flags & HL_RESET_HARD);
- from_hard_reset_thread = !!(flags & HL_RESET_FROM_RESET_THREAD);
- fw_reset = !!(flags & HL_RESET_FW);
+ hard_reset = !!(flags & HL_DRV_RESET_HARD);
+ from_hard_reset_thread = !!(flags & HL_DRV_RESET_FROM_RESET_THR);
+ fw_reset = !!(flags & HL_DRV_RESET_BYPASS_REQ_TO_FW);
- if (!hard_reset && !hdev->supports_soft_reset) {
+ if (!hard_reset && !hdev->asic_prop.supports_soft_reset) {
hard_instead_soft = true;
hard_reset = true;
}
- if (hdev->reset_upon_device_release &&
- (flags & HL_RESET_DEVICE_RELEASE)) {
- dev_dbg(hdev->dev,
- "Perform %s-reset upon device release\n",
- hard_reset ? "hard" : "soft");
+ if (hdev->reset_upon_device_release && (flags & HL_DRV_RESET_DEV_RELEASE)) {
+ if (hard_reset) {
+ dev_crit(hdev->dev,
+ "Aborting reset because hard-reset is mutually exclusive with reset-on-device-release\n");
+ return -EINVAL;
+ }
+
+ reset_upon_device_release = true;
+
goto do_reset;
}
- if (!hard_reset && !hdev->allow_inference_soft_reset) {
+ if (!hard_reset && !hdev->asic_prop.allow_inference_soft_reset) {
hard_instead_soft = true;
hard_reset = true;
}
@@ -1062,12 +1029,22 @@ do_reset:
*/
if (!from_hard_reset_thread) {
/* Block future CS/VM/JOB completion operations */
- rc = atomic_cmpxchg(&hdev->in_reset, 0, 1);
- if (rc)
+ spin_lock(&hdev->reset_info.lock);
+ if (hdev->reset_info.in_reset) {
+ /* We only allow scheduling of a hard reset during soft reset */
+ if (hard_reset && hdev->reset_info.is_in_soft_reset)
+ hdev->reset_info.hard_reset_schedule_flags = flags;
+ spin_unlock(&hdev->reset_info.lock);
return 0;
+ }
+ hdev->reset_info.in_reset = 1;
+ spin_unlock(&hdev->reset_info.lock);
handle_reset_trigger(hdev, flags);
+ /* This still allows the completion of some KDMA ops */
+ hdev->reset_info.is_in_soft_reset = !hard_reset;
+
/* This also blocks future CS/VM/JOB completion operations */
hdev->disabled = true;
@@ -1075,21 +1052,19 @@ do_reset:
if (hard_reset)
dev_info(hdev->dev, "Going to reset device\n");
- else if (flags & HL_RESET_DEVICE_RELEASE)
- dev_info(hdev->dev,
- "Going to reset device after it was released by user\n");
+ else if (reset_upon_device_release)
+ dev_info(hdev->dev, "Going to reset device after release by user\n");
else
- dev_info(hdev->dev,
- "Going to reset compute engines of inference device\n");
+ dev_info(hdev->dev, "Going to reset engines of inference device\n");
}
again:
if ((hard_reset) && (!from_hard_reset_thread)) {
- hdev->hard_reset_pending = true;
+ hdev->reset_info.hard_reset_pending = true;
hdev->process_kill_trial_cnt = 0;
- hdev->device_reset_work.fw_reset = fw_reset;
+ hdev->device_reset_work.flags = flags;
/*
* Because the reset function can't run from heartbeat work,
@@ -1109,7 +1084,7 @@ kill_processes:
* process can't really exit until all its CSs are done, which
* is what we do in cs rollback
*/
- rc = device_kill_open_processes(hdev, 0);
+ rc = device_kill_open_processes(hdev, 0, false);
if (rc == -EBUSY) {
if (hdev->device_fini_pending) {
@@ -1138,7 +1113,7 @@ kill_processes:
hdev->asic_funcs->hw_fini(hdev, hard_reset, fw_reset);
if (hard_reset) {
- hdev->fw_loader.linux_loaded = false;
+ hdev->fw_loader.fw_comp_loaded = FW_TYPE_NONE;
/* Release kernel context */
if (hdev->kernel_ctx && hl_ctx_put(hdev->kernel_ctx) == 1)
@@ -1154,24 +1129,23 @@ kill_processes:
for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
hl_cq_reset(hdev, &hdev->completion_queue[i]);
- mutex_lock(&hdev->fpriv_list_lock);
-
/* Make sure the context switch phase will run again */
- if (hdev->compute_ctx) {
- atomic_set(&hdev->compute_ctx->thread_ctx_switch_token, 1);
- hdev->compute_ctx->thread_ctx_switch_wait_token = 0;
+ ctx = hl_get_compute_ctx(hdev);
+ if (ctx) {
+ atomic_set(&ctx->thread_ctx_switch_token, 1);
+ ctx->thread_ctx_switch_wait_token = 0;
+ hl_ctx_put(ctx);
}
- mutex_unlock(&hdev->fpriv_list_lock);
-
/* Finished tear-down, starting to re-initialize */
if (hard_reset) {
hdev->device_cpu_disabled = false;
- hdev->hard_reset_pending = false;
+ hdev->reset_info.hard_reset_pending = false;
- if (hdev->reset_trigger_repeated &&
- (hdev->prev_reset_trigger == HL_RESET_FW_FATAL_ERR)) {
+ if (hdev->reset_info.reset_trigger_repeated &&
+ (hdev->reset_info.prev_reset_trigger ==
+ HL_DRV_RESET_FW_FATAL_ERR)) {
/* if there 2 back to back resets from FW,
* ensure driver puts the driver in a unusable state
*/
@@ -1204,7 +1178,7 @@ kill_processes:
goto out_err;
}
- hdev->compute_ctx = NULL;
+ hdev->is_compute_ctx_active = false;
rc = hl_ctx_init(hdev, hdev->kernel_ctx, true);
if (rc) {
@@ -1225,16 +1199,14 @@ kill_processes:
rc = hdev->asic_funcs->hw_init(hdev);
if (rc) {
- dev_err(hdev->dev,
- "failed to initialize the H/W after reset\n");
+ dev_err(hdev->dev, "failed to initialize the H/W after reset\n");
goto out_err;
}
/* If device is not idle fail the reset process */
if (!hdev->asic_funcs->is_device_idle(hdev, idle_mask,
HL_BUSY_ENGINES_MASK_EXT_SIZE, NULL)) {
- dev_err(hdev->dev,
- "device is not idle (mask 0x%llx_%llx) after reset\n",
+ dev_err(hdev->dev, "device is not idle (mask 0x%llx_%llx) after reset\n",
idle_mask[1], idle_mask[0]);
rc = -EIO;
goto out_err;
@@ -1243,43 +1215,56 @@ kill_processes:
/* Check that the communication with the device is working */
rc = hdev->asic_funcs->test_queues(hdev);
if (rc) {
- dev_err(hdev->dev,
- "Failed to detect if device is alive after reset\n");
+ dev_err(hdev->dev, "Failed to detect if device is alive after reset\n");
goto out_err;
}
if (hard_reset) {
rc = device_late_init(hdev);
if (rc) {
- dev_err(hdev->dev,
- "Failed late init after hard reset\n");
+ dev_err(hdev->dev, "Failed late init after hard reset\n");
goto out_err;
}
rc = hl_vm_init(hdev);
if (rc) {
- dev_err(hdev->dev,
- "Failed to init memory module after hard reset\n");
+ dev_err(hdev->dev, "Failed to init memory module after hard reset\n");
goto out_err;
}
hl_set_max_power(hdev);
} else {
- rc = hdev->asic_funcs->soft_reset_late_init(hdev);
+ rc = hdev->asic_funcs->non_hard_reset_late_init(hdev);
if (rc) {
- dev_err(hdev->dev,
- "Failed late init after soft reset\n");
+ if (reset_upon_device_release)
+ dev_err(hdev->dev,
+ "Failed late init in reset after device release\n");
+ else
+ dev_err(hdev->dev, "Failed late init after soft reset\n");
goto out_err;
}
}
- atomic_set(&hdev->in_reset, 0);
- hdev->needs_reset = false;
+ spin_lock(&hdev->reset_info.lock);
+ hdev->reset_info.is_in_soft_reset = false;
+
+ /* Schedule hard reset only if requested and if not already in hard reset.
+ * We keep 'in_reset' enabled, so no other reset can go in during the hard
+ * reset schedule
+ */
+ if (!hard_reset && hdev->reset_info.hard_reset_schedule_flags)
+ schedule_hard_reset = true;
+ else
+ hdev->reset_info.in_reset = 0;
+
+ spin_unlock(&hdev->reset_info.lock);
+
+ hdev->reset_info.needs_reset = false;
dev_notice(hdev->dev, "Successfully finished resetting the device\n");
if (hard_reset) {
- hdev->hard_reset_cnt++;
+ hdev->reset_info.hard_reset_cnt++;
/* After reset is done, we are ready to receive events from
* the F/W. We can't do it before because we will ignore events
@@ -1287,28 +1272,41 @@ kill_processes:
* the device will be operational although it shouldn't be
*/
hdev->asic_funcs->enable_events_from_fw(hdev);
- } else {
- hdev->soft_reset_cnt++;
+ } else if (!reset_upon_device_release) {
+ hdev->reset_info.soft_reset_cnt++;
+ }
+
+ if (schedule_hard_reset) {
+ dev_info(hdev->dev, "Performing hard reset scheduled during soft reset\n");
+ flags = hdev->reset_info.hard_reset_schedule_flags;
+ hdev->reset_info.hard_reset_schedule_flags = 0;
+ hdev->disabled = true;
+ hard_reset = true;
+ handle_reset_trigger(hdev, flags);
+ goto again;
}
return 0;
out_err:
hdev->disabled = true;
+ hdev->reset_info.is_in_soft_reset = false;
if (hard_reset) {
- dev_err(hdev->dev,
- "Failed to reset! Device is NOT usable\n");
- hdev->hard_reset_cnt++;
+ dev_err(hdev->dev, "Failed to reset! Device is NOT usable\n");
+ hdev->reset_info.hard_reset_cnt++;
+ } else if (reset_upon_device_release) {
+ dev_err(hdev->dev, "Failed to reset device after user release\n");
+ hard_reset = true;
+ goto again;
} else {
- dev_err(hdev->dev,
- "Failed to do soft-reset, trying hard reset\n");
- hdev->soft_reset_cnt++;
+ dev_err(hdev->dev, "Failed to do soft-reset\n");
+ hdev->reset_info.soft_reset_cnt++;
hard_reset = true;
goto again;
}
- atomic_set(&hdev->in_reset, 0);
+ hdev->reset_info.in_reset = 0;
return rc;
}
@@ -1455,7 +1453,7 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass)
goto mmu_fini;
}
- hdev->compute_ctx = NULL;
+ hdev->is_compute_ctx_active = false;
hdev->asic_funcs->state_dump_init(hdev);
@@ -1619,6 +1617,7 @@ out_disabled:
*/
void hl_device_fini(struct hl_device *hdev)
{
+ bool device_in_reset;
ktime_t timeout;
u64 reset_sec;
int i, rc;
@@ -1642,10 +1641,22 @@ void hl_device_fini(struct hl_device *hdev)
*/
timeout = ktime_add_us(ktime_get(), reset_sec * 1000 * 1000);
- rc = atomic_cmpxchg(&hdev->in_reset, 0, 1);
- while (rc) {
+
+ spin_lock(&hdev->reset_info.lock);
+ device_in_reset = !!hdev->reset_info.in_reset;
+ if (!device_in_reset)
+ hdev->reset_info.in_reset = 1;
+ spin_unlock(&hdev->reset_info.lock);
+
+ while (device_in_reset) {
usleep_range(50, 200);
- rc = atomic_cmpxchg(&hdev->in_reset, 0, 1);
+
+ spin_lock(&hdev->reset_info.lock);
+ device_in_reset = !!hdev->reset_info.in_reset;
+ if (!device_in_reset)
+ hdev->reset_info.in_reset = 1;
+ spin_unlock(&hdev->reset_info.lock);
+
if (ktime_compare(ktime_get(), timeout) > 0) {
dev_crit(hdev->dev,
"Failed to remove device because reset function did not finish\n");
@@ -1667,7 +1678,7 @@ void hl_device_fini(struct hl_device *hdev)
take_release_locks(hdev);
- hdev->hard_reset_pending = true;
+ hdev->reset_info.hard_reset_pending = true;
hl_hwmon_fini(hdev);
@@ -1681,10 +1692,16 @@ void hl_device_fini(struct hl_device *hdev)
"Waiting for all processes to exit (timeout of %u seconds)",
HL_PENDING_RESET_LONG_SEC);
- rc = device_kill_open_processes(hdev, HL_PENDING_RESET_LONG_SEC);
+ rc = device_kill_open_processes(hdev, HL_PENDING_RESET_LONG_SEC, false);
if (rc) {
dev_crit(hdev->dev, "Failed to kill all open processes\n");
- device_disable_open_processes(hdev);
+ device_disable_open_processes(hdev, false);
+ }
+
+ rc = device_kill_open_processes(hdev, 0, true);
+ if (rc) {
+ dev_crit(hdev->dev, "Failed to kill all control device open processes\n");
+ device_disable_open_processes(hdev, true);
}
hl_cb_pool_fini(hdev);
@@ -1692,7 +1709,7 @@ void hl_device_fini(struct hl_device *hdev)
/* Reset the H/W. It will be in idle state after this returns */
hdev->asic_funcs->hw_fini(hdev, true, false);
- hdev->fw_loader.linux_loaded = false;
+ hdev->fw_loader.fw_comp_loaded = FW_TYPE_NONE;
/* Release kernel context */
if ((hdev->kernel_ctx) && (hl_ctx_put(hdev->kernel_ctx) != 1))
diff --git a/drivers/misc/habanalabs/common/firmware_if.c b/drivers/misc/habanalabs/common/firmware_if.c
index 4e68fb9d2a6b..6775c5c3166b 100644
--- a/drivers/misc/habanalabs/common/firmware_if.c
+++ b/drivers/misc/habanalabs/common/firmware_if.c
@@ -15,8 +15,6 @@
#define FW_FILE_MAX_SIZE 0x1400000 /* maximum size of 20MB */
-#define FW_CPU_STATUS_POLL_INTERVAL_USEC 10000
-
static char *extract_fw_ver_from_str(const char *fw_str)
{
char *str, *fw_ver, *whitespace;
@@ -214,7 +212,8 @@ int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
struct asic_fixed_properties *prop = &hdev->asic_prop;
struct cpucp_packet *pkt;
dma_addr_t pkt_dma_addr;
- u32 tmp, expected_ack_val;
+ struct hl_bd *sent_bd;
+ u32 tmp, expected_ack_val, pi;
int rc = 0;
pkt = hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev, len,
@@ -239,6 +238,7 @@ int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
/* set fence to a non valid value */
pkt->fence = cpu_to_le32(UINT_MAX);
+ pi = queue->pi;
/*
* The CPU queue is a synchronous queue with an effective depth of
@@ -248,7 +248,7 @@ int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
* Which means that we don't need to lock the access to the entire H/W
* queues module when submitting a JOB to the CPU queue.
*/
- hl_hw_queue_submit_bd(hdev, queue, 0, len, pkt_dma_addr);
+ hl_hw_queue_submit_bd(hdev, queue, hl_queue_inc_ptr(queue->pi), len, pkt_dma_addr);
if (prop->fw_app_cpu_boot_dev_sts0 & CPU_BOOT_DEV_STS0_PKT_PI_ACK_EN)
expected_ack_val = queue->pi;
@@ -280,6 +280,14 @@ int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
*result = le64_to_cpu(pkt->result);
}
+ /* Scrub previous buffer descriptor 'ctl' field which contains the
+ * previous PI value written during packet submission.
+ * We must do this or else F/W can read an old value upon queue wraparound.
+ */
+ sent_bd = queue->kernel_address;
+ sent_bd += hl_pi_2_offset(pi);
+ sent_bd->ctl = cpu_to_le32(UINT_MAX);
+
out:
mutex_unlock(&hdev->send_cpu_message_lock);
@@ -445,15 +453,6 @@ static bool fw_report_boot_dev0(struct hl_device *hdev, u32 err_val,
err_exists = true;
}
- if (err_val & CPU_BOOT_ERR0_DRAM_SKIPPED) {
- dev_warn(hdev->dev,
- "Device boot warning - Skipped DRAM initialization\n");
- /* This is a warning so we don't want it to disable the
- * device
- */
- err_val &= ~CPU_BOOT_ERR0_DRAM_SKIPPED;
- }
-
if (err_val & CPU_BOOT_ERR0_BMC_WAIT_SKIPPED) {
if (hdev->bmc_enable) {
dev_err(hdev->dev,
@@ -497,15 +496,6 @@ static bool fw_report_boot_dev0(struct hl_device *hdev, u32 err_val,
err_exists = true;
}
- if (err_val & CPU_BOOT_ERR0_PRI_IMG_VER_FAIL) {
- dev_warn(hdev->dev,
- "Device boot warning - Failed to load preboot primary image\n");
- /* This is a warning so we don't want it to disable the
- * device as we have a secondary preboot image
- */
- err_val &= ~CPU_BOOT_ERR0_PRI_IMG_VER_FAIL;
- }
-
if (err_val & CPU_BOOT_ERR0_SEC_IMG_VER_FAIL) {
dev_err(hdev->dev, "Device boot error - Failed to load preboot secondary image\n");
err_exists = true;
@@ -525,6 +515,34 @@ static bool fw_report_boot_dev0(struct hl_device *hdev, u32 err_val,
if (sts_val & CPU_BOOT_DEV_STS0_ENABLED)
dev_dbg(hdev->dev, "Device status0 %#x\n", sts_val);
+ /* All warnings should go here in order not to reach the unknown error validation */
+ if (err_val & CPU_BOOT_ERR0_DRAM_SKIPPED) {
+ dev_warn(hdev->dev,
+ "Device boot warning - Skipped DRAM initialization\n");
+ /* This is a warning so we don't want it to disable the
+ * device
+ */
+ err_val &= ~CPU_BOOT_ERR0_DRAM_SKIPPED;
+ }
+
+ if (err_val & CPU_BOOT_ERR0_PRI_IMG_VER_FAIL) {
+ dev_warn(hdev->dev,
+ "Device boot warning - Failed to load preboot primary image\n");
+ /* This is a warning so we don't want it to disable the
+ * device as we have a secondary preboot image
+ */
+ err_val &= ~CPU_BOOT_ERR0_PRI_IMG_VER_FAIL;
+ }
+
+ if (err_val & CPU_BOOT_ERR0_TPM_FAIL) {
+ dev_warn(hdev->dev,
+ "Device boot warning - TPM failure\n");
+ /* This is a warning so we don't want it to disable the
+ * device
+ */
+ err_val &= ~CPU_BOOT_ERR0_TPM_FAIL;
+ }
+
if (!err_exists && (err_val & ~CPU_BOOT_ERR0_ENABLED)) {
dev_err(hdev->dev,
"Device boot error - unknown ERR0 error 0x%08x\n", err_val);
@@ -961,6 +979,7 @@ int hl_fw_cpucp_power_get(struct hl_device *hdev, u64 *power)
pkt.ctl = cpu_to_le32(CPUCP_PACKET_POWER_GET <<
CPUCP_PKT_CTL_OPCODE_SHIFT);
+ pkt.type = cpu_to_le16(CPUCP_POWER_INPUT);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
HL_CPUCP_INFO_TIMEOUT_USEC, &result);
@@ -974,6 +993,92 @@ int hl_fw_cpucp_power_get(struct hl_device *hdev, u64 *power)
return rc;
}
+int hl_fw_dram_replaced_row_get(struct hl_device *hdev,
+ struct cpucp_hbm_row_info *info)
+{
+ struct cpucp_hbm_row_info *cpucp_repl_rows_info_cpu_addr;
+ dma_addr_t cpucp_repl_rows_info_dma_addr;
+ struct cpucp_packet pkt = {};
+ u64 result;
+ int rc;
+
+ cpucp_repl_rows_info_cpu_addr =
+ hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev,
+ sizeof(struct cpucp_hbm_row_info),
+ &cpucp_repl_rows_info_dma_addr);
+ if (!cpucp_repl_rows_info_cpu_addr) {
+ dev_err(hdev->dev,
+ "Failed to allocate DMA memory for CPU-CP replaced rows info packet\n");
+ return -ENOMEM;
+ }
+
+ memset(cpucp_repl_rows_info_cpu_addr, 0, sizeof(struct cpucp_hbm_row_info));
+
+ pkt.ctl = cpu_to_le32(CPUCP_PACKET_HBM_REPLACED_ROWS_INFO_GET <<
+ CPUCP_PKT_CTL_OPCODE_SHIFT);
+ pkt.addr = cpu_to_le64(cpucp_repl_rows_info_dma_addr);
+ pkt.data_max_size = cpu_to_le32(sizeof(struct cpucp_hbm_row_info));
+
+ rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
+ HL_CPUCP_INFO_TIMEOUT_USEC, &result);
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed to handle CPU-CP replaced rows info pkt, error %d\n", rc);
+ goto out;
+ }
+
+ memcpy(info, cpucp_repl_rows_info_cpu_addr, sizeof(*info));
+
+out:
+ hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev,
+ sizeof(struct cpucp_hbm_row_info),
+ cpucp_repl_rows_info_cpu_addr);
+
+ return rc;
+}
+
+int hl_fw_dram_pending_row_get(struct hl_device *hdev, u32 *pend_rows_num)
+{
+ struct cpucp_packet pkt;
+ u64 result;
+ int rc;
+
+ memset(&pkt, 0, sizeof(pkt));
+
+ pkt.ctl = cpu_to_le32(CPUCP_PACKET_HBM_PENDING_ROWS_STATUS << CPUCP_PKT_CTL_OPCODE_SHIFT);
+
+ rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, &result);
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed to handle CPU-CP pending rows info pkt, error %d\n", rc);
+ goto out;
+ }
+
+ *pend_rows_num = (u32) result;
+out:
+ return rc;
+}
+
+int hl_fw_cpucp_engine_core_asid_set(struct hl_device *hdev, u32 asid)
+{
+ struct cpucp_packet pkt;
+ int rc;
+
+ memset(&pkt, 0, sizeof(pkt));
+
+ pkt.ctl = cpu_to_le32(CPUCP_PACKET_ENGINE_CORE_ASID_SET << CPUCP_PKT_CTL_OPCODE_SHIFT);
+ pkt.value = cpu_to_le64(asid);
+
+ rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
+ HL_CPUCP_INFO_TIMEOUT_USEC, NULL);
+ if (rc)
+ dev_err(hdev->dev,
+ "Failed on ASID configuration request for engine core, error %d\n",
+ rc);
+
+ return rc;
+}
+
void hl_fw_ask_hard_reset_without_linux(struct hl_device *hdev)
{
struct static_fw_load_mgr *static_loader =
@@ -1028,7 +1133,7 @@ static void detect_cpu_boot_status(struct hl_device *hdev, u32 status)
switch (status) {
case CPU_BOOT_STATUS_NA:
dev_err(hdev->dev,
- "Device boot progress - BTL did NOT run\n");
+ "Device boot progress - BTL/ROM did NOT run\n");
break;
case CPU_BOOT_STATUS_IN_WFE:
dev_err(hdev->dev,
@@ -1101,9 +1206,8 @@ static int hl_fw_read_preboot_caps(struct hl_device *hdev,
(status == CPU_BOOT_STATUS_DRAM_RDY) ||
(status == CPU_BOOT_STATUS_NIC_FW_RDY) ||
(status == CPU_BOOT_STATUS_READY_TO_BOOT) ||
- (status == CPU_BOOT_STATUS_SRAM_AVAIL) ||
(status == CPU_BOOT_STATUS_WAITING_FOR_BOOT_FIT),
- FW_CPU_STATUS_POLL_INTERVAL_USEC,
+ hdev->fw_poll_interval_usec,
timeout);
if (rc) {
@@ -1250,8 +1354,7 @@ static void hl_fw_preboot_update_state(struct hl_device *hdev)
* 3. FW application - a. Fetch fw application security status
* b. Check whether hard reset is done by fw app
*/
- prop->hard_reset_done_by_fw =
- !!(cpu_boot_dev_sts0 & CPU_BOOT_DEV_STS0_FW_HARD_RST_EN);
+ prop->hard_reset_done_by_fw = !!(cpu_boot_dev_sts0 & CPU_BOOT_DEV_STS0_FW_HARD_RST_EN);
dev_dbg(hdev->dev, "Firmware preboot boot device status0 %#x\n",
cpu_boot_dev_sts0);
@@ -1287,11 +1390,7 @@ int hl_fw_read_preboot_status(struct hl_device *hdev, u32 cpu_boot_status_reg,
{
int rc;
- /* pldm was added for cases in which we use preboot on pldm and want
- * to load boot fit, but we can't wait for preboot because it runs
- * very slowly
- */
- if (!(hdev->fw_components & FW_TYPE_PREBOOT_CPU) || hdev->pldm)
+ if (!(hdev->fw_components & FW_TYPE_PREBOOT_CPU))
return 0;
/*
@@ -1437,7 +1536,7 @@ static int hl_fw_dynamic_wait_for_status(struct hl_device *hdev,
le32_to_cpu(dyn_regs->cpu_cmd_status_to_host),
status,
FIELD_GET(COMMS_STATUS_STATUS_MASK, status) == expected_status,
- FW_CPU_STATUS_POLL_INTERVAL_USEC,
+ hdev->fw_poll_interval_usec,
timeout);
if (rc) {
@@ -1703,6 +1802,9 @@ static int hl_fw_dynamic_validate_descriptor(struct hl_device *hdev,
return rc;
}
+ /* here we can mark the descriptor as valid as the content has been validated */
+ fw_loader->dynamic_loader.fw_desc_valid = true;
+
return 0;
}
@@ -1759,7 +1861,13 @@ static int hl_fw_dynamic_read_and_validate_descriptor(struct hl_device *hdev,
return rc;
}
- /* extract address copy the descriptor from */
+ /*
+ * extract address to copy the descriptor from
+ * in addition, as the descriptor value is going to be over-ridden by new data- we mark it
+ * as invalid.
+ * it will be marked again as valid once validated
+ */
+ fw_loader->dynamic_loader.fw_desc_valid = false;
src = hdev->pcie_bar[region->bar_id] + region->offset_in_bar +
response->ram_offset;
memcpy_fromio(fw_desc, src, sizeof(struct lkd_fw_comms_desc));
@@ -1920,17 +2028,15 @@ static void hl_fw_boot_fit_update_state(struct hl_device *hdev,
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
- /* Clear reset status since we need to read it again from boot CPU */
- prop->hard_reset_done_by_fw = false;
+ hdev->fw_loader.fw_comp_loaded |= FW_TYPE_BOOT_CPU;
/* Read boot_cpu status bits */
if (prop->fw_preboot_cpu_boot_dev_sts0 & CPU_BOOT_DEV_STS0_ENABLED) {
prop->fw_bootfit_cpu_boot_dev_sts0 =
RREG32(cpu_boot_dev_sts0_reg);
- if (prop->fw_bootfit_cpu_boot_dev_sts0 &
- CPU_BOOT_DEV_STS0_FW_HARD_RST_EN)
- prop->hard_reset_done_by_fw = true;
+ prop->hard_reset_done_by_fw = !!(prop->fw_bootfit_cpu_boot_dev_sts0 &
+ CPU_BOOT_DEV_STS0_FW_HARD_RST_EN);
dev_dbg(hdev->dev, "Firmware boot CPU status0 %#x\n",
prop->fw_bootfit_cpu_boot_dev_sts0);
@@ -2055,14 +2161,21 @@ static int hl_fw_dynamic_wait_for_boot_fit_active(struct hl_device *hdev,
dyn_loader = &fw_loader->dynamic_loader;
- /* Make sure CPU boot-loader is running */
+ /*
+ * Make sure CPU boot-loader is running
+ * Note that the CPU_BOOT_STATUS_SRAM_AVAIL is generally set by Linux
+ * yet there is a debug scenario in which we loading uboot (without Linux)
+ * which at later stage is relocated to DRAM. In this case we expect
+ * uboot to set the CPU_BOOT_STATUS_SRAM_AVAIL and so we add it to the
+ * poll flags
+ */
rc = hl_poll_timeout(
hdev,
le32_to_cpu(dyn_loader->comm_desc.cpu_dyn_regs.cpu_boot_status),
status,
- (status == CPU_BOOT_STATUS_NIC_FW_RDY) ||
- (status == CPU_BOOT_STATUS_READY_TO_BOOT),
- FW_CPU_STATUS_POLL_INTERVAL_USEC,
+ (status == CPU_BOOT_STATUS_READY_TO_BOOT) ||
+ (status == CPU_BOOT_STATUS_SRAM_AVAIL),
+ hdev->fw_poll_interval_usec,
dyn_loader->wait_for_bl_timeout);
if (rc) {
dev_err(hdev->dev, "failed to wait for boot\n");
@@ -2082,14 +2195,14 @@ static int hl_fw_dynamic_wait_for_linux_active(struct hl_device *hdev,
dyn_loader = &fw_loader->dynamic_loader;
- /* Make sure CPU boot-loader is running */
+ /* Make sure CPU linux is running */
rc = hl_poll_timeout(
hdev,
le32_to_cpu(dyn_loader->comm_desc.cpu_dyn_regs.cpu_boot_status),
status,
(status == CPU_BOOT_STATUS_SRAM_AVAIL),
- FW_CPU_STATUS_POLL_INTERVAL_USEC,
+ hdev->fw_poll_interval_usec,
fw_loader->cpu_timeout);
if (rc) {
dev_err(hdev->dev, "failed to wait for Linux\n");
@@ -2121,18 +2234,14 @@ static void hl_fw_linux_update_state(struct hl_device *hdev,
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
- hdev->fw_loader.linux_loaded = true;
-
- /* Clear reset status since we need to read again from app */
- prop->hard_reset_done_by_fw = false;
+ hdev->fw_loader.fw_comp_loaded |= FW_TYPE_LINUX;
/* Read FW application security bits */
if (prop->fw_cpu_boot_dev_sts0_valid) {
prop->fw_app_cpu_boot_dev_sts0 = RREG32(cpu_boot_dev_sts0_reg);
- if (prop->fw_app_cpu_boot_dev_sts0 &
- CPU_BOOT_DEV_STS0_FW_HARD_RST_EN)
- prop->hard_reset_done_by_fw = true;
+ prop->hard_reset_done_by_fw = !!(prop->fw_app_cpu_boot_dev_sts0 &
+ CPU_BOOT_DEV_STS0_FW_HARD_RST_EN);
if (prop->fw_app_cpu_boot_dev_sts0 &
CPU_BOOT_DEV_STS0_GIC_PRIVILEGED_EN)
@@ -2247,6 +2356,9 @@ static int hl_fw_dynamic_init_cpu(struct hl_device *hdev,
dev_info(hdev->dev,
"Loading firmware to device, may take some time...\n");
+ /* initialize FW descriptor as invalid */
+ fw_loader->dynamic_loader.fw_desc_valid = false;
+
/*
* In this stage, "cpu_dyn_regs" contains only LKD's hard coded values!
* It will be updated from FW after hl_fw_dynamic_request_descriptor().
@@ -2259,14 +2371,14 @@ static int hl_fw_dynamic_init_cpu(struct hl_device *hdev,
if (rc)
goto protocol_err;
- if (hdev->curr_reset_cause) {
+ if (hdev->reset_info.curr_reset_cause) {
rc = hl_fw_dynamic_send_msg(hdev, fw_loader,
- HL_COMMS_RESET_CAUSE_TYPE, &hdev->curr_reset_cause);
+ HL_COMMS_RESET_CAUSE_TYPE, &hdev->reset_info.curr_reset_cause);
if (rc)
goto protocol_err;
/* Clear current reset cause */
- hdev->curr_reset_cause = HL_RESET_CAUSE_UNKNOWN;
+ hdev->reset_info.curr_reset_cause = HL_RESET_CAUSE_UNKNOWN;
}
if (!(hdev->fw_components & FW_TYPE_BOOT_CPU)) {
@@ -2288,6 +2400,15 @@ static int hl_fw_dynamic_init_cpu(struct hl_device *hdev,
goto protocol_err;
}
+ /*
+ * when testing FW load (without Linux) on PLDM we don't want to
+ * wait until boot fit is active as it may take several hours.
+ * instead, we load the bootfit and let it do all initializations in
+ * the background.
+ */
+ if (hdev->pldm && !(hdev->fw_components & FW_TYPE_LINUX))
+ return 0;
+
rc = hl_fw_dynamic_wait_for_boot_fit_active(hdev, fw_loader);
if (rc)
goto protocol_err;
@@ -2333,7 +2454,8 @@ static int hl_fw_dynamic_init_cpu(struct hl_device *hdev,
return 0;
protocol_err:
- fw_read_errors(hdev, le32_to_cpu(dyn_regs->cpu_boot_err0),
+ if (fw_loader->dynamic_loader.fw_desc_valid)
+ fw_read_errors(hdev, le32_to_cpu(dyn_regs->cpu_boot_err0),
le32_to_cpu(dyn_regs->cpu_boot_err1),
le32_to_cpu(dyn_regs->cpu_boot_dev_sts0),
le32_to_cpu(dyn_regs->cpu_boot_dev_sts1));
@@ -2380,7 +2502,7 @@ static int hl_fw_static_init_cpu(struct hl_device *hdev,
cpu_boot_status_reg,
status,
status == CPU_BOOT_STATUS_WAITING_FOR_BOOT_FIT,
- FW_CPU_STATUS_POLL_INTERVAL_USEC,
+ hdev->fw_poll_interval_usec,
fw_loader->boot_fit_timeout);
if (rc) {
@@ -2403,7 +2525,7 @@ static int hl_fw_static_init_cpu(struct hl_device *hdev,
cpu_msg_status_reg,
status,
status == CPU_MSG_OK,
- FW_CPU_STATUS_POLL_INTERVAL_USEC,
+ hdev->fw_poll_interval_usec,
fw_loader->boot_fit_timeout);
if (rc) {
@@ -2416,7 +2538,14 @@ static int hl_fw_static_init_cpu(struct hl_device *hdev,
WREG32(msg_to_cpu_reg, KMD_MSG_NA);
}
- /* Make sure CPU boot-loader is running */
+ /*
+ * Make sure CPU boot-loader is running
+ * Note that the CPU_BOOT_STATUS_SRAM_AVAIL is generally set by Linux
+ * yet there is a debug scenario in which we loading uboot (without Linux)
+ * which at later stage is relocated to DRAM. In this case we expect
+ * uboot to set the CPU_BOOT_STATUS_SRAM_AVAIL and so we add it to the
+ * poll flags
+ */
rc = hl_poll_timeout(
hdev,
cpu_boot_status_reg,
@@ -2425,7 +2554,7 @@ static int hl_fw_static_init_cpu(struct hl_device *hdev,
(status == CPU_BOOT_STATUS_NIC_FW_RDY) ||
(status == CPU_BOOT_STATUS_READY_TO_BOOT) ||
(status == CPU_BOOT_STATUS_SRAM_AVAIL),
- FW_CPU_STATUS_POLL_INTERVAL_USEC,
+ hdev->fw_poll_interval_usec,
cpu_timeout);
dev_dbg(hdev->dev, "uboot status = %d\n", status);
@@ -2474,7 +2603,7 @@ static int hl_fw_static_init_cpu(struct hl_device *hdev,
cpu_boot_status_reg,
status,
(status == CPU_BOOT_STATUS_BMC_WAITING_SKIPPED),
- FW_CPU_STATUS_POLL_INTERVAL_USEC,
+ hdev->fw_poll_interval_usec,
cpu_timeout);
if (rc) {
@@ -2494,7 +2623,7 @@ static int hl_fw_static_init_cpu(struct hl_device *hdev,
cpu_boot_status_reg,
status,
(status == CPU_BOOT_STATUS_SRAM_AVAIL),
- FW_CPU_STATUS_POLL_INTERVAL_USEC,
+ hdev->fw_poll_interval_usec,
cpu_timeout);
/* Clear message */
diff --git a/drivers/misc/habanalabs/common/habanalabs.h b/drivers/misc/habanalabs/common/habanalabs.h
index a2002cbf794b..cb710fd478b6 100644
--- a/drivers/misc/habanalabs/common/habanalabs.h
+++ b/drivers/misc/habanalabs/common/habanalabs.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0
*
- * Copyright 2016-2019 HabanaLabs, Ltd.
+ * Copyright 2016-2021 HabanaLabs, Ltd.
* All Rights Reserved.
*
*/
@@ -61,6 +61,8 @@
#define HL_CPUCP_INFO_TIMEOUT_USEC 10000000 /* 10s */
#define HL_CPUCP_EEPROM_TIMEOUT_USEC 10000000 /* 10s */
+#define HL_FW_STATUS_POLL_INTERVAL_USEC 10000 /* 10ms */
+
#define HL_PCI_ELBI_TIMEOUT_MSEC 10 /* 10ms */
#define HL_SIM_MAX_TIMEOUT_US 10000000 /* 10s */
@@ -117,37 +119,37 @@ enum hl_mmu_page_table_location {
/*
* Reset Flags
*
- * - HL_RESET_HARD
+ * - HL_DRV_RESET_HARD
* If set do hard reset to all engines. If not set reset just
* compute/DMA engines.
*
- * - HL_RESET_FROM_RESET_THREAD
+ * - HL_DRV_RESET_FROM_RESET_THR
* Set if the caller is the hard-reset thread
*
- * - HL_RESET_HEARTBEAT
+ * - HL_DRV_RESET_HEARTBEAT
* Set if reset is due to heartbeat
*
- * - HL_RESET_TDR
+ * - HL_DRV_RESET_TDR
* Set if reset is due to TDR
*
- * - HL_RESET_DEVICE_RELEASE
+ * - HL_DRV_RESET_DEV_RELEASE
* Set if reset is due to device release
*
- * - HL_RESET_FW
+ * - HL_DRV_RESET_BYPASS_REQ_TO_FW
* F/W will perform the reset. No need to ask it to reset the device. This is relevant
* only when running with secured f/w
*
- * - HL_RESET_FW_FATAL_ERR
+ * - HL_DRV_RESET_FW_FATAL_ERR
* Set if reset is due to a fatal error from FW
*/
-#define HL_RESET_HARD (1 << 0)
-#define HL_RESET_FROM_RESET_THREAD (1 << 1)
-#define HL_RESET_HEARTBEAT (1 << 2)
-#define HL_RESET_TDR (1 << 3)
-#define HL_RESET_DEVICE_RELEASE (1 << 4)
-#define HL_RESET_FW (1 << 5)
-#define HL_RESET_FW_FATAL_ERR (1 << 6)
+#define HL_DRV_RESET_HARD (1 << 0)
+#define HL_DRV_RESET_FROM_RESET_THR (1 << 1)
+#define HL_DRV_RESET_HEARTBEAT (1 << 2)
+#define HL_DRV_RESET_TDR (1 << 3)
+#define HL_DRV_RESET_DEV_RELEASE (1 << 4)
+#define HL_DRV_RESET_BYPASS_REQ_TO_FW (1 << 5)
+#define HL_DRV_RESET_FW_FATAL_ERR (1 << 6)
#define HL_MAX_SOBS_PER_MONITOR 8
@@ -219,6 +221,7 @@ enum hl_fw_component {
/**
* enum hl_fw_types - F/W types present in the system
+ * @FW_TYPE_NONE: no FW component indication
* @FW_TYPE_LINUX: Linux image for device CPU
* @FW_TYPE_BOOT_CPU: Boot image for device CPU
* @FW_TYPE_PREBOOT_CPU: Indicates pre-loaded CPUs are present in the system
@@ -226,6 +229,7 @@ enum hl_fw_component {
* @FW_TYPE_ALL_TYPES: Mask for all types
*/
enum hl_fw_types {
+ FW_TYPE_NONE = 0x0,
FW_TYPE_LINUX = 0x1,
FW_TYPE_BOOT_CPU = 0x2,
FW_TYPE_PREBOOT_CPU = 0x4,
@@ -353,6 +357,21 @@ enum vm_type {
};
/**
+ * enum mmu_op_flags - mmu operation relevant information.
+ * @MMU_OP_USERPTR: operation on user memory (host resident).
+ * @MMU_OP_PHYS_PACK: operation on DRAM (device resident).
+ * @MMU_OP_CLEAR_MEMCACHE: operation has to clear memcache.
+ * @MMU_OP_SKIP_LOW_CACHE_INV: operation is allowed to skip parts of cache invalidation.
+ */
+enum mmu_op_flags {
+ MMU_OP_USERPTR = 0x1,
+ MMU_OP_PHYS_PACK = 0x2,
+ MMU_OP_CLEAR_MEMCACHE = 0x4,
+ MMU_OP_SKIP_LOW_CACHE_INV = 0x8,
+};
+
+
+/**
* enum hl_device_hw_state - H/W device state. use this to understand whether
* to do reset before hw_init or not
* @HL_DEVICE_HW_STATE_CLEAN: H/W state is clean. i.e. after hard reset
@@ -382,6 +401,7 @@ enum hl_device_hw_state {
* @hop3_mask: mask to get the PTE address in hop 3.
* @hop4_mask: mask to get the PTE address in hop 4.
* @hop5_mask: mask to get the PTE address in hop 5.
+ * @last_mask: mask to get the bit indicating this is the last hop.
* @page_size: default page size used to allocate memory.
* @num_hops: The amount of hops supported by the translation table.
* @host_resident: Should the MMU page table reside in host memory or in the
@@ -402,6 +422,7 @@ struct hl_mmu_properties {
u64 hop3_mask;
u64 hop4_mask;
u64 hop5_mask;
+ u64 last_mask;
u32 page_size;
u32 num_hops;
u8 host_resident;
@@ -524,6 +545,15 @@ struct hl_hints_range {
* @dynamic_fw_load: is dynamic FW load is supported.
* @gic_interrupts_enable: true if FW is not blocking GIC controller,
* false otherwise.
+ * @use_get_power_for_reset_history: To support backward compatibility for Goya
+ * and Gaudi
+ * @supports_soft_reset: is soft reset supported.
+ * @allow_inference_soft_reset: true if the ASIC supports soft reset that is
+ * initiated by user or TDR. This is only true
+ * in inference ASICs, as there is no real-world
+ * use-case of doing soft-reset in training (due
+ * to the fact that training runs on multiple
+ * devices)
*/
struct asic_fixed_properties {
struct hw_queue_properties *hw_queues_props;
@@ -604,6 +634,9 @@ struct asic_fixed_properties {
u8 iatu_done_by_fw;
u8 dynamic_fw_load;
u8 gic_interrupts_enable;
+ u8 use_get_power_for_reset_history;
+ u8 supports_soft_reset;
+ u8 allow_inference_soft_reset;
};
/**
@@ -852,10 +885,15 @@ struct hl_user_interrupt {
* pending on an interrupt
* @wait_list_node: node in the list of user threads pending on an interrupt
* @fence: hl fence object for interrupt completion
+ * @cq_target_value: CQ target value
+ * @cq_kernel_addr: CQ kernel address, to be used in the cq interrupt
+ * handler for taget value comparison
*/
struct hl_user_pending_interrupt {
struct list_head wait_list_node;
struct hl_fence fence;
+ u64 cq_target_value;
+ u64 *cq_kernel_addr;
};
/**
@@ -1010,6 +1048,7 @@ struct fw_response {
* @image_region: region to copy the FW image to
* @fw_image_size: size of FW image to load
* @wait_for_bl_timeout: timeout for waiting for boot loader to respond
+ * @fw_desc_valid: true if FW descriptor has been validated and hence the data can be used
*/
struct dynamic_fw_load_mgr {
struct fw_response response;
@@ -1017,6 +1056,7 @@ struct dynamic_fw_load_mgr {
struct pci_mem_region *image_region;
size_t fw_image_size;
u32 wait_for_bl_timeout;
+ bool fw_desc_valid;
};
/**
@@ -1042,7 +1082,8 @@ struct fw_image_props {
* @skip_bmc: should BMC be skipped
* @sram_bar_id: SRAM bar ID
* @dram_bar_id: DRAM bar ID
- * @linux_loaded: true if linux was loaded so far
+ * @fw_comp_loaded: bitmask of loaded FW components. set bit meaning loaded
+ * component. values are set according to enum hl_fw_types.
*/
struct fw_load_mgr {
union {
@@ -1056,7 +1097,7 @@ struct fw_load_mgr {
u8 skip_bmc;
u8 sram_bar_id;
u8 dram_bar_id;
- u8 linux_loaded;
+ u8 fw_comp_loaded;
};
/**
@@ -1128,7 +1169,7 @@ struct fw_load_mgr {
* @disable_clock_gating: disable clock gating completely
* @debug_coresight: perform certain actions on Coresight for debugging.
* @is_device_idle: return true if device is idle, false otherwise.
- * @soft_reset_late_init: perform certain actions needed after soft reset.
+ * @non_hard_reset_late_init: perform certain actions needed after a reset which is not hard-reset
* @hw_queues_lock: acquire H/W queues lock.
* @hw_queues_unlock: release H/W queues lock.
* @get_pci_id: retrieve PCI ID.
@@ -1261,10 +1302,10 @@ struct hl_asic_funcs {
int (*send_heartbeat)(struct hl_device *hdev);
void (*set_clock_gating)(struct hl_device *hdev);
void (*disable_clock_gating)(struct hl_device *hdev);
- int (*debug_coresight)(struct hl_device *hdev, void *data);
+ int (*debug_coresight)(struct hl_device *hdev, struct hl_ctx *ctx, void *data);
bool (*is_device_idle)(struct hl_device *hdev, u64 *mask_arr,
u8 mask_len, struct seq_file *s);
- int (*soft_reset_late_init)(struct hl_device *hdev);
+ int (*non_hard_reset_late_init)(struct hl_device *hdev);
void (*hw_queues_lock)(struct hl_device *hdev);
void (*hw_queues_unlock)(struct hl_device *hdev);
u32 (*get_pci_id)(struct hl_device *hdev);
@@ -1276,7 +1317,7 @@ struct hl_asic_funcs {
int (*init_iatu)(struct hl_device *hdev);
u32 (*rreg)(struct hl_device *hdev, u32 reg);
void (*wreg)(struct hl_device *hdev, u32 reg, u32 val);
- void (*halt_coresight)(struct hl_device *hdev);
+ void (*halt_coresight)(struct hl_device *hdev, struct hl_ctx *ctx);
int (*ctx_init)(struct hl_ctx *ctx);
void (*ctx_fini)(struct hl_ctx *ctx);
int (*get_clk_rate)(struct hl_device *hdev, u32 *cur_clk, u32 *max_clk);
@@ -1518,6 +1559,9 @@ struct hl_userptr {
* @submission_time_jiffies: submission time of the cs
* @type: CS_TYPE_*.
* @encaps_sig_hdl_id: encaps signals handle id, set for the first staged cs.
+ * @sob_addr_offset: sob offset from the configuration base address.
+ * @initial_sob_count: count of completed signals in SOB before current submission of signal or
+ * cs with encaps signals.
* @submitted: true if CS was submitted to H/W.
* @completed: true if CS was completed by device.
* @timedout : true if CS was timedout.
@@ -1553,6 +1597,8 @@ struct hl_cs {
u64 submission_time_jiffies;
enum hl_cs_type type;
u32 encaps_sig_hdl_id;
+ u32 sob_addr_offset;
+ u16 initial_sob_count;
u8 submitted;
u8 completed;
u8 timedout;
@@ -1792,7 +1838,6 @@ struct hl_debug_params {
* @dev_node: node in the device list of file private data
* @refcount: number of related contexts.
* @restore_phase_mutex: lock for context switch and restore phase.
- * @is_control: true for control device, false otherwise
*/
struct hl_fpriv {
struct hl_device *hdev;
@@ -1805,7 +1850,6 @@ struct hl_fpriv {
struct list_head dev_node;
struct kref refcount;
struct mutex restore_phase_mutex;
- u8 is_control;
};
@@ -1864,6 +1908,7 @@ struct hl_debugfs_entry {
* @i2c_bus: generic u8 debugfs file for bus value to use in i2c_data_read.
* @i2c_addr: generic u8 debugfs file for address value to use in i2c_data_read.
* @i2c_reg: generic u8 debugfs file for register value to use in i2c_data_read.
+ * @i2c_len: generic u8 debugfs file for length value to use in i2c_data_read.
*/
struct hl_dbg_device_entry {
struct dentry *root;
@@ -1892,6 +1937,7 @@ struct hl_dbg_device_entry {
u8 i2c_bus;
u8 i2c_addr;
u8 i2c_reg;
+ u8 i2c_len;
};
/**
@@ -2180,13 +2226,13 @@ struct hwmon_chip_info;
* @wq: work queue for device reset procedure.
* @reset_work: reset work to be done.
* @hdev: habanalabs device structure.
- * @fw_reset: whether f/w will do the reset without us sending them a message to do it.
+ * @flags: reset flags.
*/
struct hl_device_reset_work {
struct workqueue_struct *wq;
struct delayed_work reset_work;
struct hl_device *hdev;
- bool fw_reset;
+ u32 flags;
};
/**
@@ -2328,12 +2374,10 @@ struct multi_cs_completion {
* @ctx: pointer to the context structure
* @fence_arr: array of fences of all CSs
* @seq_arr: array of CS sequence numbers
- * @timeout_us: timeout in usec for waiting for CS to complete
+ * @timeout_jiffies: timeout in jiffies for waiting for CS to complete
* @timestamp: timestamp of first completed CS
* @wait_status: wait for CS status
* @completion_bitmap: bitmap of completed CSs (1- completed, otherwise 0)
- * @stream_master_qid_map: bitmap of all stream master QIDs on which the
- * multi-CS is waiting
* @arr_len: fence_arr and seq_arr array length
* @gone_cs: indication of gone CS (1- there was gone CS, otherwise 0)
* @update_ts: update timestamp. 1- update the timestamp, otherwise 0.
@@ -2342,17 +2386,114 @@ struct multi_cs_data {
struct hl_ctx *ctx;
struct hl_fence **fence_arr;
u64 *seq_arr;
- s64 timeout_us;
+ s64 timeout_jiffies;
s64 timestamp;
long wait_status;
u32 completion_bitmap;
- u32 stream_master_qid_map;
u8 arr_len;
u8 gone_cs;
u8 update_ts;
};
/**
+ * struct hl_clk_throttle_timestamp - current/last clock throttling timestamp
+ * @start: timestamp taken when 'start' event is received in driver
+ * @end: timestamp taken when 'end' event is received in driver
+ */
+struct hl_clk_throttle_timestamp {
+ ktime_t start;
+ ktime_t end;
+};
+
+/**
+ * struct hl_clk_throttle - keeps current/last clock throttling timestamps
+ * @timestamp: timestamp taken by driver and firmware, index 0 refers to POWER
+ * index 1 refers to THERMAL
+ * @lock: protects this structure as it can be accessed from both event queue
+ * context and info_ioctl context
+ * @current_reason: bitmask represents the current clk throttling reasons
+ * @aggregated_reason: bitmask represents aggregated clk throttling reasons since driver load
+ */
+struct hl_clk_throttle {
+ struct hl_clk_throttle_timestamp timestamp[HL_CLK_THROTTLE_TYPE_MAX];
+ struct mutex lock;
+ u32 current_reason;
+ u32 aggregated_reason;
+};
+
+/**
+ * struct last_error_session_info - info about last session in which CS timeout or
+ * razwi error occurred.
+ * @open_dev_timestamp: device open timestamp.
+ * @cs_timeout_timestamp: CS timeout timestamp.
+ * @razwi_timestamp: razwi timestamp.
+ * @cs_write_disable: if set writing to CS parameters in the structure is disabled so the
+ * first (root cause) CS timeout will not be overwritten.
+ * @razwi_write_disable: if set writing to razwi parameters in the structure is disabled so the
+ * first (root cause) razwi will not be overwritten.
+ * @cs_timeout_seq: CS timeout sequence number.
+ * @razwi_addr: address that caused razwi.
+ * @razwi_engine_id_1: engine id of the razwi initiator, if it was initiated by engine that does
+ * not have engine id it will be set to U16_MAX.
+ * @razwi_engine_id_2: second engine id of razwi initiator. Might happen that razwi have 2 possible
+ * engines which one them caused the razwi. In that case, it will contain the
+ * second possible engine id, otherwise it will be set to U16_MAX.
+ * @razwi_non_engine_initiator: in case the initiator of the razwi does not have engine id.
+ * @razwi_type: cause of razwi, page fault or access error, otherwise it will be set to U8_MAX.
+ */
+struct last_error_session_info {
+ ktime_t open_dev_timestamp;
+ ktime_t cs_timeout_timestamp;
+ ktime_t razwi_timestamp;
+ atomic_t cs_write_disable;
+ atomic_t razwi_write_disable;
+ u64 cs_timeout_seq;
+ u64 razwi_addr;
+ u16 razwi_engine_id_1;
+ u16 razwi_engine_id_2;
+ u8 razwi_non_engine_initiator;
+ u8 razwi_type;
+};
+
+/**
+ * struct hl_reset_info - holds current device reset information.
+ * @lock: lock to protect critical reset flows.
+ * @soft_reset_cnt: number of soft reset since the driver was loaded.
+ * @hard_reset_cnt: number of hard reset since the driver was loaded.
+ * @hard_reset_schedule_flags: hard reset is scheduled to after current soft reset,
+ * here we hold the hard reset flags.
+ * @in_reset: is device in reset flow.
+ * @is_in_soft_reset: Device is currently in soft reset process.
+ * @needs_reset: true if reset_on_lockup is false and device should be reset
+ * due to lockup.
+ * @hard_reset_pending: is there a hard reset work pending.
+ * @curr_reset_cause: saves an enumerated reset cause when a hard reset is
+ * triggered, and cleared after it is shared with preboot.
+ * @prev_reset_trigger: saves the previous trigger which caused a reset, overidden
+ * with a new value on next reset
+ * @reset_trigger_repeated: set if device reset is triggered more than once with
+ * same cause.
+ * @skip_reset_on_timeout: Skip device reset if CS has timed out, wait for it to
+ * complete instead.
+ */
+struct hl_reset_info {
+ spinlock_t lock;
+ u32 soft_reset_cnt;
+ u32 hard_reset_cnt;
+ u32 hard_reset_schedule_flags;
+ u8 in_reset;
+ u8 is_in_soft_reset;
+ u8 needs_reset;
+ u8 hard_reset_pending;
+
+ u8 curr_reset_cause;
+ u8 prev_reset_trigger;
+ u8 reset_trigger_repeated;
+
+ u8 skip_reset_on_timeout;
+};
+
+/**
* struct hl_device - habanalabs device structure.
* @pdev: pointer to PCI device, can be NULL in case of simulator device.
* @pcie_bar_phys: array of available PCIe bars physical addresses.
@@ -2363,7 +2504,6 @@ struct multi_cs_data {
* @cdev_ctrl: char device for control operations only (INFO IOCTL)
* @dev: related kernel basic device structure.
* @dev_ctrl: related kernel device structure for the control device
- * @work_freq: delayed work to lower device frequency if possible.
* @work_heartbeat: delayed work for CPU-CP is-alive check.
* @device_reset_work: delayed work which performs hard reset
* @asic_name: ASIC specific name.
@@ -2398,7 +2538,6 @@ struct multi_cs_data {
* @asic_specific: ASIC specific information to use only from ASIC files.
* @vm: virtual memory manager for MMU.
* @hwmon_dev: H/W monitor device.
- * @pm_mng_profile: current power management profile.
* @hl_chip_info: ASIC's sensors information.
* @device_status_description: device status description.
* @hl_debugfs: device's debugfs manager.
@@ -2410,8 +2549,10 @@ struct multi_cs_data {
* @internal_cb_va_base: internal cb pool mmu virtual address base
* @fpriv_list: list of file private data structures. Each structure is created
* when a user opens the device
+ * @fpriv_ctrl_list: list of file private data structures. Each structure is created
+ * when a user opens the control device
* @fpriv_list_lock: protects the fpriv_list
- * @compute_ctx: current compute context executing.
+ * @fpriv_ctrl_list_lock: protects the fpriv_ctrl_list
* @aggregated_cs_counters: aggregated cs counters among all contexts
* @mmu_priv: device-specific MMU data.
* @mmu_func: device-related MMU functions.
@@ -2419,6 +2560,10 @@ struct multi_cs_data {
* @pci_mem_region: array of memory regions in the PCI
* @state_dump_specs: constants and dictionaries needed to dump system state.
* @multi_cs_completion: array of multi-CS completion.
+ * @clk_throttling: holds information about current/previous clock throttling events
+ * @reset_info: holds current device reset information.
+ * @last_error: holds information about last session in which CS timeout or razwi error occurred.
+ * @stream_master_qid_arr: pointer to array with QIDs of master streams.
* @dram_used_mem: current DRAM memory consumption.
* @timeout_jiffies: device CS timeout value.
* @max_power: the max power of the device, as configured by the sysadmin. This
@@ -2434,20 +2579,17 @@ struct multi_cs_data {
* device initialization. Mainly used to debug and
* workaround firmware bugs
* @dram_pci_bar_start: start bus address of PCIe bar towards DRAM.
+ * @last_successful_open_ktime: timestamp (ktime) of the last successful device open.
* @last_successful_open_jif: timestamp (jiffies) of the last successful
* device open.
* @last_open_session_duration_jif: duration (jiffies) of the last device open
* session.
* @open_counter: number of successful device open operations.
- * @in_reset: is device in reset flow.
- * @curr_pll_profile: current PLL profile.
+ * @fw_poll_interval_usec: FW status poll interval in usec.
* @card_type: Various ASICs have several card types. This indicates the card
* type of the current device.
* @major: habanalabs kernel driver major.
* @high_pll: high PLL profile frequency.
- * @soft_reset_cnt: number of soft reset since the driver was loaded.
- * @hard_reset_cnt: number of hard reset since the driver was loaded.
- * @clk_throttling_reason: bitmask represents the current clk throttling reasons
* @id: device minor.
* @id_control: minor of the control device
* @cpu_pci_msb_addr: 50-bit extension bits for the device CPU's 40-bit
@@ -2455,7 +2597,6 @@ struct multi_cs_data {
* @disabled: is device disabled.
* @late_init_done: is late init stage was done during initialization.
* @hwmon_initialized: is H/W monitor sensors was initialized.
- * @hard_reset_pending: is there a hard reset work pending.
* @heartbeat: is heartbeat sanity check towards CPU-CP enabled.
* @reset_on_lockup: true if a reset should be done in case of stuck CS, false
* otherwise.
@@ -2467,8 +2608,9 @@ struct multi_cs_data {
* @init_done: is the initialization of the device done.
* @device_cpu_disabled: is the device CPU disabled (due to timeouts)
* @dma_mask: the dma mask that was set for this device
- * @in_debug: is device under debug. This, together with fpriv_list, enforces
- * that only a single user is configuring the debug infrastructure.
+ * @in_debug: whether the device is in a state where the profiling/tracing infrastructure
+ * can be used. This indication is needed because in some ASICs we need to do
+ * specific operations to enable that infrastructure.
* @power9_64bit_dma_enable: true to enable 64-bit DMA mask support. Relevant
* only to POWER9 machines.
* @cdev_sysfs_created: were char devices and sysfs nodes created.
@@ -2477,34 +2619,18 @@ struct multi_cs_data {
* @sync_stream_queue_idx: helper index for sync stream queues initialization.
* @collective_mon_idx: helper index for collective initialization
* @supports_coresight: is CoreSight supported.
- * @supports_soft_reset: is soft reset supported.
- * @allow_inference_soft_reset: true if the ASIC supports soft reset that is
- * initiated by user or TDR. This is only true
- * in inference ASICs, as there is no real-world
- * use-case of doing soft-reset in training (due
- * to the fact that training runs on multiple
- * devices)
* @supports_cb_mapping: is mapping a CB to the device's MMU supported.
- * @needs_reset: true if reset_on_lockup is false and device should be reset
- * due to lockup.
* @process_kill_trial_cnt: number of trials reset thread tried killing
* user processes
* @device_fini_pending: true if device_fini was called and might be
* waiting for the reset thread to finish
* @supports_staged_submission: true if staged submissions are supported
- * @curr_reset_cause: saves an enumerated reset cause when a hard reset is
- * triggered, and cleared after it is shared with preboot.
- * @prev_reset_trigger: saves the previous trigger which caused a reset, overidden
- * with a new value on next reset
- * @reset_trigger_repeated: set if device reset is triggered more than once with
- * same cause.
- * @skip_reset_on_timeout: Skip device reset if CS has timed out, wait for it to
- * complete instead.
* @device_cpu_is_halted: Flag to indicate whether the device CPU was already
* halted. We can't halt it again because the COMMS
* protocol will throw an error. Relevant only for
* cases where Linux was not loaded to device CPU
* @supports_wait_for_multi_cs: true if wait for multi CS is supported
+ * @is_compute_ctx_active: Whether there is an active compute context executing.
*/
struct hl_device {
struct pci_dev *pdev;
@@ -2515,7 +2641,6 @@ struct hl_device {
struct cdev cdev_ctrl;
struct device *dev;
struct device *dev_ctrl;
- struct delayed_work work_freq;
struct delayed_work work_heartbeat;
struct hl_device_reset_work device_reset_work;
char asic_name[HL_STR_MAX];
@@ -2546,7 +2671,6 @@ struct hl_device {
void *asic_specific;
struct hl_vm vm;
struct device *hwmon_dev;
- enum hl_pm_mng_profile pm_mng_profile;
struct hwmon_chip_info *hl_chip_info;
struct hl_dbg_device_entry hl_debugfs;
@@ -2560,9 +2684,9 @@ struct hl_device {
u64 internal_cb_va_base;
struct list_head fpriv_list;
+ struct list_head fpriv_ctrl_list;
struct mutex fpriv_list_lock;
-
- struct hl_ctx *compute_ctx;
+ struct mutex fpriv_ctrl_list_lock;
struct hl_cs_counters_atomic aggregated_cs_counters;
@@ -2577,6 +2701,11 @@ struct hl_device {
struct multi_cs_completion multi_cs_completion[
MULTI_CS_MAX_USER_CTX];
+ struct hl_clk_throttle clk_throttling;
+ struct last_error_session_info last_error;
+
+ struct hl_reset_info reset_info;
+
u32 *stream_master_qid_arr;
atomic64_t dram_used_mem;
u64 timeout_jiffies;
@@ -2587,21 +2716,17 @@ struct hl_device {
u64 last_successful_open_jif;
u64 last_open_session_duration_jif;
u64 open_counter;
- atomic_t in_reset;
- enum hl_pll_frequency curr_pll_profile;
+ u64 fw_poll_interval_usec;
+ ktime_t last_successful_open_ktime;
enum cpucp_card_types card_type;
u32 major;
u32 high_pll;
- u32 soft_reset_cnt;
- u32 hard_reset_cnt;
- u32 clk_throttling_reason;
u16 id;
u16 id_control;
u16 cpu_pci_msb_addr;
u8 disabled;
u8 late_init_done;
u8 hwmon_initialized;
- u8 hard_reset_pending;
u8 heartbeat;
u8 reset_on_lockup;
u8 dram_default_page_mapping;
@@ -2618,20 +2743,14 @@ struct hl_device {
u8 sync_stream_queue_idx;
u8 collective_mon_idx;
u8 supports_coresight;
- u8 supports_soft_reset;
- u8 allow_inference_soft_reset;
u8 supports_cb_mapping;
- u8 needs_reset;
u8 process_kill_trial_cnt;
u8 device_fini_pending;
u8 supports_staged_submission;
- u8 curr_reset_cause;
- u8 prev_reset_trigger;
- u8 reset_trigger_repeated;
- u8 skip_reset_on_timeout;
u8 device_cpu_is_halted;
u8 supports_wait_for_multi_cs;
u8 stream_master_qid_arr_size;
+ u8 is_compute_ctx_active;
/* Parameters for bring-up */
u64 nic_ports_mask;
@@ -2659,6 +2778,7 @@ struct hl_device {
* wait cs are used to wait of the reserved encaps signals.
* @hdev: pointer to habanalabs device structure.
* @hw_sob: pointer to H/W SOB used in the reservation.
+ * @ctx: pointer to the user's context data structure
* @cs_seq: staged cs sequence which contains encapsulated signals
* @id: idr handler id to be used to fetch the handler info
* @q_idx: stream queue index
@@ -2669,6 +2789,7 @@ struct hl_cs_encaps_sig_handle {
struct kref refcount;
struct hl_device *hdev;
struct hl_hw_sob *hw_sob;
+ struct hl_ctx *ctx;
u64 cs_seq;
u32 id;
u32 q_idx;
@@ -2757,21 +2878,9 @@ static inline bool hl_mem_area_inside_range(u64 address, u64 size,
static inline bool hl_mem_area_crosses_range(u64 address, u32 size,
u64 range_start_address, u64 range_end_address)
{
- u64 end_address = address + size;
+ u64 end_address = address + size - 1;
- if ((address >= range_start_address) &&
- (address < range_end_address))
- return true;
-
- if ((end_address >= range_start_address) &&
- (end_address < range_end_address))
- return true;
-
- if ((address < range_start_address) &&
- (end_address >= range_end_address))
- return true;
-
- return false;
+ return ((address <= range_end_address) && (range_start_address <= end_address));
}
int hl_device_open(struct inode *inode, struct file *filp);
@@ -2779,10 +2888,7 @@ int hl_device_open_ctrl(struct inode *inode, struct file *filp);
bool hl_device_operational(struct hl_device *hdev,
enum hl_device_status *status);
enum hl_device_status hl_device_status(struct hl_device *hdev);
-int hl_device_set_debug_mode(struct hl_device *hdev, bool enable);
-int create_hdev(struct hl_device **dev, struct pci_dev *pdev,
- enum hl_asic_type asic_type, int minor);
-void destroy_hdev(struct hl_device *hdev);
+int hl_device_set_debug_mode(struct hl_device *hdev, struct hl_ctx *ctx, bool enable);
int hl_hw_queues_create(struct hl_device *hdev);
void hl_hw_queues_destroy(struct hl_device *hdev);
int hl_hw_queue_send_cb_no_cmpl(struct hl_device *hdev, u32 hw_queue_id,
@@ -2821,6 +2927,7 @@ int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx);
void hl_ctx_do_release(struct kref *ref);
void hl_ctx_get(struct hl_device *hdev, struct hl_ctx *ctx);
int hl_ctx_put(struct hl_ctx *ctx);
+struct hl_ctx *hl_get_compute_ctx(struct hl_device *hdev);
struct hl_fence *hl_ctx_get_fence(struct hl_ctx *ctx, u64 seq);
int hl_ctx_get_fences(struct hl_ctx *ctx, u64 *seq_arr,
struct hl_fence **fence, u32 arr_len);
@@ -2834,7 +2941,6 @@ int hl_device_resume(struct hl_device *hdev);
int hl_device_reset(struct hl_device *hdev, u32 flags);
void hl_hpriv_get(struct hl_fpriv *hpriv);
int hl_hpriv_put(struct hl_fpriv *hpriv);
-int hl_device_set_frequency(struct hl_device *hdev, enum hl_pll_frequency freq);
int hl_device_utilization(struct hl_device *hdev, u32 *utilization);
int hl_build_hwmon_channel_info(struct hl_device *hdev,
@@ -2915,6 +3021,9 @@ int hl_mmu_unmap_page(struct hl_ctx *ctx, u64 virt_addr, u32 page_size,
int hl_mmu_map_contiguous(struct hl_ctx *ctx, u64 virt_addr,
u64 phys_addr, u32 size);
int hl_mmu_unmap_contiguous(struct hl_ctx *ctx, u64 virt_addr, u32 size);
+int hl_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard, u32 flags);
+int hl_mmu_invalidate_cache_range(struct hl_device *hdev, bool is_hard,
+ u32 flags, u32 asid, u64 va, u64 size);
void hl_mmu_swap_out(struct hl_ctx *ctx);
void hl_mmu_swap_in(struct hl_ctx *ctx);
int hl_mmu_if_set_funcs(struct hl_device *hdev);
@@ -2969,6 +3078,10 @@ int hl_fw_dynamic_send_protocol_cmd(struct hl_device *hdev,
struct fw_load_mgr *fw_loader,
enum comms_cmd cmd, unsigned int size,
bool wait_ok, u32 timeout);
+int hl_fw_dram_replaced_row_get(struct hl_device *hdev,
+ struct cpucp_hbm_row_info *info);
+int hl_fw_dram_pending_row_get(struct hl_device *hdev, u32 *pend_rows_num);
+int hl_fw_cpucp_engine_core_asid_set(struct hl_device *hdev, u32 asid);
int hl_pci_bars_map(struct hl_device *hdev, const char * const name[3],
bool is_wc[3]);
int hl_pci_elbi_read(struct hl_device *hdev, u64 addr, u32 *data);
diff --git a/drivers/misc/habanalabs/common/habanalabs_drv.c b/drivers/misc/habanalabs/common/habanalabs_drv.c
index 949d1b5c5c41..690b763c7a95 100644
--- a/drivers/misc/habanalabs/common/habanalabs_drv.c
+++ b/drivers/misc/habanalabs/common/habanalabs_drv.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright 2016-2019 HabanaLabs, Ltd.
+ * Copyright 2016-2021 HabanaLabs, Ltd.
* All Rights Reserved.
*
*/
@@ -153,15 +153,7 @@ int hl_device_open(struct inode *inode, struct file *filp)
goto out_err;
}
- if (hdev->in_debug) {
- dev_err_ratelimited(hdev->dev,
- "Can't open %s because it is being debugged by another user\n",
- dev_name(hdev->dev));
- rc = -EPERM;
- goto out_err;
- }
-
- if (hdev->compute_ctx) {
+ if (hdev->is_compute_ctx_active) {
dev_dbg_ratelimited(hdev->dev,
"Can't open %s because another user is working on it\n",
dev_name(hdev->dev));
@@ -175,20 +167,17 @@ int hl_device_open(struct inode *inode, struct file *filp)
goto out_err;
}
- /* Device is IDLE at this point so it is legal to change PLLs.
- * There is no need to check anything because if the PLL is
- * already HIGH, the set function will return without doing
- * anything
- */
- hl_device_set_frequency(hdev, PLL_HIGH);
-
list_add(&hpriv->dev_node, &hdev->fpriv_list);
mutex_unlock(&hdev->fpriv_list_lock);
hl_debugfs_add_file(hpriv);
+ atomic_set(&hdev->last_error.cs_write_disable, 0);
+ atomic_set(&hdev->last_error.razwi_write_disable, 0);
+
hdev->open_counter++;
hdev->last_successful_open_jif = jiffies;
+ hdev->last_successful_open_ktime = ktime_get();
return 0;
@@ -231,12 +220,11 @@ int hl_device_open_ctrl(struct inode *inode, struct file *filp)
hpriv->hdev = hdev;
filp->private_data = hpriv;
hpriv->filp = filp;
- hpriv->is_control = true;
nonseekable_open(inode, filp);
hpriv->taskpid = find_get_pid(current->pid);
- mutex_lock(&hdev->fpriv_list_lock);
+ mutex_lock(&hdev->fpriv_ctrl_list_lock);
if (!hl_device_operational(hdev, NULL)) {
dev_err_ratelimited(hdev->dev_ctrl,
@@ -246,13 +234,13 @@ int hl_device_open_ctrl(struct inode *inode, struct file *filp)
goto out_err;
}
- list_add(&hpriv->dev_node, &hdev->fpriv_list);
- mutex_unlock(&hdev->fpriv_list_lock);
+ list_add(&hpriv->dev_node, &hdev->fpriv_ctrl_list);
+ mutex_unlock(&hdev->fpriv_ctrl_list_lock);
return 0;
out_err:
- mutex_unlock(&hdev->fpriv_list_lock);
+ mutex_unlock(&hdev->fpriv_ctrl_list_lock);
filp->private_data = NULL;
put_pid(hpriv->taskpid);
@@ -263,6 +251,7 @@ out_err:
static void set_driver_behavior_per_device(struct hl_device *hdev)
{
+ hdev->pldm = 0;
hdev->fw_components = FW_TYPE_ALL_TYPES;
hdev->cpu_queues_enable = 1;
hdev->heartbeat = 1;
@@ -279,23 +268,53 @@ static void set_driver_behavior_per_device(struct hl_device *hdev)
hdev->axi_drain = 0;
}
-/*
+static void copy_kernel_module_params_to_device(struct hl_device *hdev)
+{
+ hdev->major = hl_major;
+ hdev->memory_scrub = memory_scrub;
+ hdev->reset_on_lockup = reset_on_lockup;
+ hdev->boot_error_status_mask = boot_error_status_mask;
+
+ if (timeout_locked)
+ hdev->timeout_jiffies = msecs_to_jiffies(timeout_locked * 1000);
+ else
+ hdev->timeout_jiffies = MAX_SCHEDULE_TIMEOUT;
+
+}
+
+static int fixup_device_params(struct hl_device *hdev)
+{
+ hdev->asic_prop.fw_security_enabled = is_asic_secured(hdev->asic_type);
+
+ hdev->fw_poll_interval_usec = HL_FW_STATUS_POLL_INTERVAL_USEC;
+
+ hdev->stop_on_err = true;
+ hdev->reset_info.curr_reset_cause = HL_RESET_CAUSE_UNKNOWN;
+ hdev->reset_info.prev_reset_trigger = HL_RESET_TRIGGER_DEFAULT;
+
+ /* Enable only after the initialization of the device */
+ hdev->disabled = true;
+
+ /* Set default DMA mask to 32 bits */
+ hdev->dma_mask = 32;
+
+ return 0;
+}
+
+/**
* create_hdev - create habanalabs device instance
*
* @dev: will hold the pointer to the new habanalabs device structure
* @pdev: pointer to the pci device
- * @asic_type: in case of simulator device, which device is it
- * @minor: in case of simulator device, the minor of the device
*
* Allocate memory for habanalabs device and initialize basic fields
* Identify the ASIC type
* Allocate ID (minor) for the device (only for real devices)
*/
-int create_hdev(struct hl_device **dev, struct pci_dev *pdev,
- enum hl_asic_type asic_type, int minor)
+static int create_hdev(struct hl_device **dev, struct pci_dev *pdev)
{
+ int main_id, ctrl_id = 0, rc = 0;
struct hl_device *hdev;
- int rc, main_id, ctrl_id = 0;
*dev = NULL;
@@ -303,69 +322,39 @@ int create_hdev(struct hl_device **dev, struct pci_dev *pdev,
if (!hdev)
return -ENOMEM;
- /* First, we must find out which ASIC are we handling. This is needed
- * to configure the behavior of the driver (kernel parameters)
- */
- if (pdev) {
- hdev->asic_type = get_asic_type(pdev->device);
- if (hdev->asic_type == ASIC_INVALID) {
- dev_err(&pdev->dev, "Unsupported ASIC\n");
- rc = -ENODEV;
- goto free_hdev;
- }
- } else {
- hdev->asic_type = asic_type;
- }
-
- if (pdev)
- hdev->asic_prop.fw_security_enabled =
- is_asic_secured(hdev->asic_type);
- else
- hdev->asic_prop.fw_security_enabled = false;
+ /* can be NULL in case of simulator device */
+ hdev->pdev = pdev;
/* Assign status description string */
- strncpy(hdev->status[HL_DEVICE_STATUS_OPERATIONAL],
- "operational", HL_STR_MAX);
- strncpy(hdev->status[HL_DEVICE_STATUS_IN_RESET],
- "in reset", HL_STR_MAX);
- strncpy(hdev->status[HL_DEVICE_STATUS_MALFUNCTION],
- "disabled", HL_STR_MAX);
- strncpy(hdev->status[HL_DEVICE_STATUS_NEEDS_RESET],
- "needs reset", HL_STR_MAX);
+ strncpy(hdev->status[HL_DEVICE_STATUS_OPERATIONAL], "operational", HL_STR_MAX);
+ strncpy(hdev->status[HL_DEVICE_STATUS_IN_RESET], "in reset", HL_STR_MAX);
+ strncpy(hdev->status[HL_DEVICE_STATUS_MALFUNCTION], "disabled", HL_STR_MAX);
+ strncpy(hdev->status[HL_DEVICE_STATUS_NEEDS_RESET], "needs reset", HL_STR_MAX);
strncpy(hdev->status[HL_DEVICE_STATUS_IN_DEVICE_CREATION],
"in device creation", HL_STR_MAX);
- hdev->major = hl_major;
- hdev->reset_on_lockup = reset_on_lockup;
- hdev->memory_scrub = memory_scrub;
- hdev->boot_error_status_mask = boot_error_status_mask;
- hdev->stop_on_err = true;
+ /* First, we must find out which ASIC are we handling. This is needed
+ * to configure the behavior of the driver (kernel parameters)
+ */
+ hdev->asic_type = get_asic_type(pdev->device);
+ if (hdev->asic_type == ASIC_INVALID) {
+ dev_err(&pdev->dev, "Unsupported ASIC\n");
+ rc = -ENODEV;
+ goto free_hdev;
+ }
- hdev->pldm = 0;
+ copy_kernel_module_params_to_device(hdev);
set_driver_behavior_per_device(hdev);
- hdev->curr_reset_cause = HL_RESET_CAUSE_UNKNOWN;
- hdev->prev_reset_trigger = HL_RESET_TRIGGER_DEFAULT;
-
- if (timeout_locked)
- hdev->timeout_jiffies = msecs_to_jiffies(timeout_locked * 1000);
- else
- hdev->timeout_jiffies = MAX_SCHEDULE_TIMEOUT;
-
- hdev->disabled = true;
- hdev->pdev = pdev; /* can be NULL in case of simulator device */
-
- /* Set default DMA mask to 32 bits */
- hdev->dma_mask = 32;
+ fixup_device_params(hdev);
mutex_lock(&hl_devs_idr_lock);
/* Always save 2 numbers, 1 for main device and 1 for control.
* They must be consecutive
*/
- main_id = idr_alloc(&hl_devs_idr, hdev, 0, HL_MAX_MINORS,
- GFP_KERNEL);
+ main_id = idr_alloc(&hl_devs_idr, hdev, 0, HL_MAX_MINORS, GFP_KERNEL);
if (main_id >= 0)
ctrl_id = idr_alloc(&hl_devs_idr, hdev, main_id + 1,
@@ -405,7 +394,7 @@ free_hdev:
* @dev: pointer to the habanalabs device structure
*
*/
-void destroy_hdev(struct hl_device *hdev)
+static void destroy_hdev(struct hl_device *hdev)
{
/* Remove device from the device list */
mutex_lock(&hl_devs_idr_lock);
@@ -444,7 +433,7 @@ static int hl_pmops_resume(struct device *dev)
return hl_device_resume(hdev);
}
-/*
+/**
* hl_pci_probe - probe PCI habanalabs devices
*
* @pdev: pointer to pci device
@@ -454,8 +443,7 @@ static int hl_pmops_resume(struct device *dev)
* Create a new habanalabs device and initialize it according to the
* device's type
*/
-static int hl_pci_probe(struct pci_dev *pdev,
- const struct pci_device_id *id)
+static int hl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct hl_device *hdev;
int rc;
@@ -464,7 +452,7 @@ static int hl_pci_probe(struct pci_dev *pdev,
" device found [%04x:%04x] (rev %x)\n",
(int)pdev->vendor, (int)pdev->device, (int)pdev->revision);
- rc = create_hdev(&hdev, pdev, ASIC_INVALID, -1);
+ rc = create_hdev(&hdev, pdev);
if (rc)
return rc;
diff --git a/drivers/misc/habanalabs/common/habanalabs_ioctl.c b/drivers/misc/habanalabs/common/habanalabs_ioctl.c
index 86c3257d9ae1..3ba3a8ffda3e 100644
--- a/drivers/misc/habanalabs/common/habanalabs_ioctl.c
+++ b/drivers/misc/habanalabs/common/habanalabs_ioctl.c
@@ -158,7 +158,7 @@ static int hw_idle(struct hl_device *hdev, struct hl_info_args *args)
min((size_t) max_size, sizeof(hw_idle))) ? -EFAULT : 0;
}
-static int debug_coresight(struct hl_device *hdev, struct hl_debug_args *args)
+static int debug_coresight(struct hl_device *hdev, struct hl_ctx *ctx, struct hl_debug_args *args)
{
struct hl_debug_params *params;
void *input = NULL, *output = NULL;
@@ -200,7 +200,7 @@ static int debug_coresight(struct hl_device *hdev, struct hl_debug_args *args)
params->output_size = args->output_size;
}
- rc = hdev->asic_funcs->debug_coresight(hdev, params);
+ rc = hdev->asic_funcs->debug_coresight(hdev, ctx, params);
if (rc) {
dev_err(hdev->dev,
"debug coresight operation failed %d\n", rc);
@@ -269,8 +269,8 @@ static int get_reset_count(struct hl_device *hdev, struct hl_info_args *args)
if ((!max_size) || (!out))
return -EINVAL;
- reset_count.hard_reset_cnt = hdev->hard_reset_cnt;
- reset_count.soft_reset_cnt = hdev->soft_reset_cnt;
+ reset_count.hard_reset_cnt = hdev->reset_info.hard_reset_cnt;
+ reset_count.soft_reset_cnt = hdev->reset_info.soft_reset_cnt;
return copy_to_user(out, &reset_count,
min((size_t) max_size, sizeof(reset_count))) ? -EFAULT : 0;
@@ -313,15 +313,38 @@ static int pci_counters_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
static int clk_throttle_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
{
+ void __user *out = (void __user *) (uintptr_t) args->return_pointer;
struct hl_device *hdev = hpriv->hdev;
struct hl_info_clk_throttle clk_throttle = {0};
+ ktime_t end_time, zero_time = ktime_set(0, 0);
u32 max_size = args->return_size;
- void __user *out = (void __user *) (uintptr_t) args->return_pointer;
+ int i;
if ((!max_size) || (!out))
return -EINVAL;
- clk_throttle.clk_throttling_reason = hdev->clk_throttling_reason;
+ mutex_lock(&hdev->clk_throttling.lock);
+
+ clk_throttle.clk_throttling_reason = hdev->clk_throttling.current_reason;
+
+ for (i = 0 ; i < HL_CLK_THROTTLE_TYPE_MAX ; i++) {
+ if (!(hdev->clk_throttling.aggregated_reason & BIT(i)))
+ continue;
+
+ clk_throttle.clk_throttling_timestamp_us[i] =
+ ktime_to_us(hdev->clk_throttling.timestamp[i].start);
+
+ if (ktime_compare(hdev->clk_throttling.timestamp[i].end, zero_time))
+ end_time = hdev->clk_throttling.timestamp[i].end;
+ else
+ end_time = ktime_get();
+
+ clk_throttle.clk_throttling_duration_ns[i] =
+ ktime_to_ns(ktime_sub(end_time,
+ hdev->clk_throttling.timestamp[i].start));
+
+ }
+ mutex_unlock(&hdev->clk_throttling.lock);
return copy_to_user(out, &clk_throttle,
min((size_t) max_size, sizeof(clk_throttle))) ? -EFAULT : 0;
@@ -480,6 +503,94 @@ static int open_stats_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
min((size_t) max_size, sizeof(open_stats_info))) ? -EFAULT : 0;
}
+static int dram_pending_rows_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
+{
+ struct hl_device *hdev = hpriv->hdev;
+ u32 max_size = args->return_size;
+ u32 pend_rows_num = 0;
+ void __user *out = (void __user *) (uintptr_t) args->return_pointer;
+ int rc;
+
+ if ((!max_size) || (!out))
+ return -EINVAL;
+
+ rc = hl_fw_dram_pending_row_get(hdev, &pend_rows_num);
+ if (rc)
+ return rc;
+
+ return copy_to_user(out, &pend_rows_num,
+ min_t(size_t, max_size, sizeof(pend_rows_num))) ? -EFAULT : 0;
+}
+
+static int dram_replaced_rows_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
+{
+ struct hl_device *hdev = hpriv->hdev;
+ u32 max_size = args->return_size;
+ struct cpucp_hbm_row_info info = {0};
+ void __user *out = (void __user *) (uintptr_t) args->return_pointer;
+ int rc;
+
+ if ((!max_size) || (!out))
+ return -EINVAL;
+
+ rc = hl_fw_dram_replaced_row_get(hdev, &info);
+ if (rc)
+ return rc;
+
+ return copy_to_user(out, &info, min_t(size_t, max_size, sizeof(info))) ? -EFAULT : 0;
+}
+
+static int last_err_open_dev_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
+{
+ struct hl_info_last_err_open_dev_time info = {0};
+ struct hl_device *hdev = hpriv->hdev;
+ u32 max_size = args->return_size;
+ void __user *out = (void __user *) (uintptr_t) args->return_pointer;
+
+ if ((!max_size) || (!out))
+ return -EINVAL;
+
+ info.timestamp = ktime_to_ns(hdev->last_error.open_dev_timestamp);
+
+ return copy_to_user(out, &info, min_t(size_t, max_size, sizeof(info))) ? -EFAULT : 0;
+}
+
+static int cs_timeout_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
+{
+ struct hl_info_cs_timeout_event info = {0};
+ struct hl_device *hdev = hpriv->hdev;
+ u32 max_size = args->return_size;
+ void __user *out = (void __user *) (uintptr_t) args->return_pointer;
+
+ if ((!max_size) || (!out))
+ return -EINVAL;
+
+ info.seq = hdev->last_error.cs_timeout_seq;
+ info.timestamp = ktime_to_ns(hdev->last_error.cs_timeout_timestamp);
+
+ return copy_to_user(out, &info, min_t(size_t, max_size, sizeof(info))) ? -EFAULT : 0;
+}
+
+static int razwi_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
+{
+ struct hl_device *hdev = hpriv->hdev;
+ u32 max_size = args->return_size;
+ struct hl_info_razwi_event info = {0};
+ void __user *out = (void __user *) (uintptr_t) args->return_pointer;
+
+ if ((!max_size) || (!out))
+ return -EINVAL;
+
+ info.timestamp = ktime_to_ns(hdev->last_error.razwi_timestamp);
+ info.addr = hdev->last_error.razwi_addr;
+ info.engine_id_1 = hdev->last_error.razwi_engine_id_1;
+ info.engine_id_2 = hdev->last_error.razwi_engine_id_2;
+ info.no_engine_id = hdev->last_error.razwi_non_engine_initiator;
+ info.error_type = hdev->last_error.razwi_type;
+
+ return copy_to_user(out, &info, min_t(size_t, max_size, sizeof(info))) ? -EFAULT : 0;
+}
+
static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data,
struct device *dev)
{
@@ -503,6 +614,33 @@ static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data,
case HL_INFO_RESET_COUNT:
return get_reset_count(hdev, args);
+ case HL_INFO_HW_EVENTS:
+ return hw_events_info(hdev, false, args);
+
+ case HL_INFO_HW_EVENTS_AGGREGATE:
+ return hw_events_info(hdev, true, args);
+
+ case HL_INFO_CS_COUNTERS:
+ return cs_counters_info(hpriv, args);
+
+ case HL_INFO_CLK_THROTTLE_REASON:
+ return clk_throttle_info(hpriv, args);
+
+ case HL_INFO_SYNC_MANAGER:
+ return sync_manager_info(hpriv, args);
+
+ case HL_INFO_OPEN_STATS:
+ return open_stats_info(hpriv, args);
+
+ case HL_INFO_LAST_ERR_OPEN_DEV_TIME:
+ return last_err_open_dev_info(hpriv, args);
+
+ case HL_INFO_CS_TIMEOUT_EVENT:
+ return cs_timeout_info(hpriv, args);
+
+ case HL_INFO_RAZWI_EVENT:
+ return razwi_info(hpriv, args);
+
default:
break;
}
@@ -515,10 +653,6 @@ static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data,
}
switch (args->op) {
- case HL_INFO_HW_EVENTS:
- rc = hw_events_info(hdev, false, args);
- break;
-
case HL_INFO_DRAM_USAGE:
rc = dram_usage_info(hpriv, args);
break;
@@ -531,10 +665,6 @@ static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data,
rc = device_utilization(hdev, args);
break;
- case HL_INFO_HW_EVENTS_AGGREGATE:
- rc = hw_events_info(hdev, true, args);
- break;
-
case HL_INFO_CLK_RATE:
rc = get_clk_rate(hdev, args);
break;
@@ -542,18 +672,9 @@ static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data,
case HL_INFO_TIME_SYNC:
return time_sync_info(hdev, args);
- case HL_INFO_CS_COUNTERS:
- return cs_counters_info(hpriv, args);
-
case HL_INFO_PCI_COUNTERS:
return pci_counters_info(hpriv, args);
- case HL_INFO_CLK_THROTTLE_REASON:
- return clk_throttle_info(hpriv, args);
-
- case HL_INFO_SYNC_MANAGER:
- return sync_manager_info(hpriv, args);
-
case HL_INFO_TOTAL_ENERGY:
return total_energy_consumption_info(hpriv, args);
@@ -563,12 +684,16 @@ static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data,
case HL_INFO_POWER:
return power_info(hpriv, args);
- case HL_INFO_OPEN_STATS:
- return open_stats_info(hpriv, args);
+
+ case HL_INFO_DRAM_REPLACED_ROWS:
+ return dram_replaced_rows_info(hpriv, args);
+
+ case HL_INFO_DRAM_PENDING_ROWS:
+ return dram_pending_rows_info(hpriv, args);
default:
dev_err(dev, "Invalid request %d\n", args->op);
- rc = -ENOTTY;
+ rc = -EINVAL;
break;
}
@@ -613,16 +738,17 @@ static int hl_debug_ioctl(struct hl_fpriv *hpriv, void *data)
"Rejecting debug configuration request because device not in debug mode\n");
return -EFAULT;
}
- args->input_size =
- min(args->input_size, hl_debug_struct_size[args->op]);
- rc = debug_coresight(hdev, args);
+ args->input_size = min(args->input_size, hl_debug_struct_size[args->op]);
+ rc = debug_coresight(hdev, hpriv->ctx, args);
break;
+
case HL_DEBUG_OP_SET_MODE:
- rc = hl_device_set_debug_mode(hdev, (bool) args->enable);
+ rc = hl_device_set_debug_mode(hdev, hpriv->ctx, (bool) args->enable);
break;
+
default:
dev_err(hdev->dev, "Invalid request %d\n", args->op);
- rc = -ENOTTY;
+ rc = -EINVAL;
break;
}
@@ -649,7 +775,6 @@ static long _hl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg,
const struct hl_ioctl_desc *ioctl, struct device *dev)
{
struct hl_fpriv *hpriv = filep->private_data;
- struct hl_device *hdev = hpriv->hdev;
unsigned int nr = _IOC_NR(cmd);
char stack_kdata[128] = {0};
char *kdata = NULL;
@@ -658,12 +783,6 @@ static long _hl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg,
u32 hl_size;
int retcode;
- if (hdev->hard_reset_pending) {
- dev_crit_ratelimited(dev,
- "Device HARD reset pending! Please close FD\n");
- return -ENODEV;
- }
-
/* Do not trust userspace, use our own definition */
func = ioctl->func;
diff --git a/drivers/misc/habanalabs/common/hw_queue.c b/drivers/misc/habanalabs/common/hw_queue.c
index 0743319b10c7..6103e479e855 100644
--- a/drivers/misc/habanalabs/common/hw_queue.c
+++ b/drivers/misc/habanalabs/common/hw_queue.c
@@ -429,6 +429,9 @@ static int init_signal_cs(struct hl_device *hdev,
rc = hl_cs_signal_sob_wraparound_handler(hdev, q_idx, &hw_sob, 1,
false);
+ job->cs->sob_addr_offset = hw_sob->sob_addr;
+ job->cs->initial_sob_count = prop->next_sob_val - 1;
+
return rc;
}
@@ -571,7 +574,7 @@ static int encaps_sig_first_staged_cs_handler
struct hl_encaps_signals_mgr *mgr;
int rc = 0;
- mgr = &hdev->compute_ctx->sig_mgr;
+ mgr = &cs->ctx->sig_mgr;
spin_lock(&mgr->lock);
encaps_sig_hdl = idr_find(&mgr->handles, cs->encaps_sig_hdl_id);
diff --git a/drivers/misc/habanalabs/common/hwmon.c b/drivers/misc/habanalabs/common/hwmon.c
index e33f65be8a00..57f5d2c48330 100644
--- a/drivers/misc/habanalabs/common/hwmon.c
+++ b/drivers/misc/habanalabs/common/hwmon.c
@@ -10,17 +10,148 @@
#include <linux/pci.h>
#include <linux/hwmon.h>
-#define HWMON_NR_SENSOR_TYPES (hwmon_pwm + 1)
+#define HWMON_NR_SENSOR_TYPES (hwmon_max)
-int hl_build_hwmon_channel_info(struct hl_device *hdev,
- struct cpucp_sensor *sensors_arr)
+#ifdef _HAS_HWMON_HWMON_T_ENABLE
+
+static u32 fixup_flags_legacy_fw(struct hl_device *hdev, enum hwmon_sensor_types type,
+ u32 cpucp_flags)
{
- u32 counts[HWMON_NR_SENSOR_TYPES] = {0};
- u32 *sensors_by_type[HWMON_NR_SENSOR_TYPES] = {NULL};
+ u32 flags;
+
+ switch (type) {
+ case hwmon_temp:
+ flags = (cpucp_flags << 1) | HWMON_T_ENABLE;
+ break;
+
+ case hwmon_in:
+ flags = (cpucp_flags << 1) | HWMON_I_ENABLE;
+ break;
+
+ case hwmon_curr:
+ flags = (cpucp_flags << 1) | HWMON_C_ENABLE;
+ break;
+
+ case hwmon_fan:
+ flags = (cpucp_flags << 1) | HWMON_F_ENABLE;
+ break;
+
+ case hwmon_power:
+ flags = (cpucp_flags << 1) | HWMON_P_ENABLE;
+ break;
+
+ case hwmon_pwm:
+ /* enable bit was here from day 1, so no need to adjust */
+ flags = cpucp_flags;
+ break;
+
+ default:
+ dev_err(hdev->dev, "unsupported h/w sensor type %d\n", type);
+ flags = cpucp_flags;
+ break;
+ }
+
+ return flags;
+}
+
+static u32 fixup_attr_legacy_fw(u32 attr)
+{
+ return (attr - 1);
+}
+
+#else
+
+static u32 fixup_flags_legacy_fw(struct hl_device *hdev, enum hwmon_sensor_types type,
+ u32 cpucp_flags)
+{
+ return cpucp_flags;
+}
+
+static u32 fixup_attr_legacy_fw(u32 attr)
+{
+ return attr;
+}
+
+#endif /* !_HAS_HWMON_HWMON_T_ENABLE */
+
+static u32 adjust_hwmon_flags(struct hl_device *hdev, enum hwmon_sensor_types type, u32 cpucp_flags)
+{
+ u32 flags, cpucp_input_val;
+ bool use_cpucp_enum;
+
+ use_cpucp_enum = (hdev->asic_prop.fw_app_cpu_boot_dev_sts0 &
+ CPU_BOOT_DEV_STS0_MAP_HWMON_EN) ? true : false;
+
+ /* If f/w is using it's own enum, we need to check if the properties values are aligned.
+ * If not, it means we need to adjust the values to the new format that is used in the
+ * kernel since 5.6 (enum values were incremented by 1 by adding a new enable value).
+ */
+ if (use_cpucp_enum) {
+ switch (type) {
+ case hwmon_temp:
+ cpucp_input_val = cpucp_temp_input;
+ if (cpucp_input_val == hwmon_temp_input)
+ flags = cpucp_flags;
+ else
+ flags = (cpucp_flags << 1) | HWMON_T_ENABLE;
+ break;
+
+ case hwmon_in:
+ cpucp_input_val = cpucp_in_input;
+ if (cpucp_input_val == hwmon_in_input)
+ flags = cpucp_flags;
+ else
+ flags = (cpucp_flags << 1) | HWMON_I_ENABLE;
+ break;
+
+ case hwmon_curr:
+ cpucp_input_val = cpucp_curr_input;
+ if (cpucp_input_val == hwmon_curr_input)
+ flags = cpucp_flags;
+ else
+ flags = (cpucp_flags << 1) | HWMON_C_ENABLE;
+ break;
+
+ case hwmon_fan:
+ cpucp_input_val = cpucp_fan_input;
+ if (cpucp_input_val == hwmon_fan_input)
+ flags = cpucp_flags;
+ else
+ flags = (cpucp_flags << 1) | HWMON_F_ENABLE;
+ break;
+
+ case hwmon_pwm:
+ /* enable bit was here from day 1, so no need to adjust */
+ flags = cpucp_flags;
+ break;
+
+ case hwmon_power:
+ cpucp_input_val = CPUCP_POWER_INPUT;
+ if (cpucp_input_val == hwmon_power_input)
+ flags = cpucp_flags;
+ else
+ flags = (cpucp_flags << 1) | HWMON_P_ENABLE;
+ break;
+
+ default:
+ dev_err(hdev->dev, "unsupported h/w sensor type %d\n", type);
+ flags = cpucp_flags;
+ break;
+ }
+ } else {
+ flags = fixup_flags_legacy_fw(hdev, type, cpucp_flags);
+ }
+
+ return flags;
+}
+
+int hl_build_hwmon_channel_info(struct hl_device *hdev, struct cpucp_sensor *sensors_arr)
+{
+ u32 num_sensors_for_type, flags, num_active_sensor_types = 0, arr_size = 0, *curr_arr;
u32 sensors_by_type_next_index[HWMON_NR_SENSOR_TYPES] = {0};
+ u32 *sensors_by_type[HWMON_NR_SENSOR_TYPES] = {NULL};
struct hwmon_channel_info **channels_info;
- u32 num_sensors_for_type, num_active_sensor_types = 0,
- arr_size = 0, *curr_arr;
+ u32 counts[HWMON_NR_SENSOR_TYPES] = {0};
enum hwmon_sensor_types type;
int rc, i, j;
@@ -31,8 +162,7 @@ int hl_build_hwmon_channel_info(struct hl_device *hdev,
break;
if (type >= HWMON_NR_SENSOR_TYPES) {
- dev_err(hdev->dev,
- "Got wrong sensor type %d from device\n", type);
+ dev_err(hdev->dev, "Got wrong sensor type %d from device\n", type);
return -EINVAL;
}
@@ -45,8 +175,9 @@ int hl_build_hwmon_channel_info(struct hl_device *hdev,
continue;
num_sensors_for_type = counts[i] + 1;
- curr_arr = kcalloc(num_sensors_for_type, sizeof(*curr_arr),
- GFP_KERNEL);
+ dev_dbg(hdev->dev, "num_sensors_for_type %d = %d\n", i, num_sensors_for_type);
+
+ curr_arr = kcalloc(num_sensors_for_type, sizeof(*curr_arr), GFP_KERNEL);
if (!curr_arr) {
rc = -ENOMEM;
goto sensors_type_err;
@@ -59,20 +190,18 @@ int hl_build_hwmon_channel_info(struct hl_device *hdev,
for (i = 0 ; i < arr_size ; i++) {
type = le32_to_cpu(sensors_arr[i].type);
curr_arr = sensors_by_type[type];
- curr_arr[sensors_by_type_next_index[type]++] =
- le32_to_cpu(sensors_arr[i].flags);
+ flags = adjust_hwmon_flags(hdev, type, le32_to_cpu(sensors_arr[i].flags));
+ curr_arr[sensors_by_type_next_index[type]++] = flags;
}
- channels_info = kcalloc(num_active_sensor_types + 1,
- sizeof(*channels_info), GFP_KERNEL);
+ channels_info = kcalloc(num_active_sensor_types + 1, sizeof(*channels_info), GFP_KERNEL);
if (!channels_info) {
rc = -ENOMEM;
goto channels_info_array_err;
}
for (i = 0 ; i < num_active_sensor_types ; i++) {
- channels_info[i] = kzalloc(sizeof(*channels_info[i]),
- GFP_KERNEL);
+ channels_info[i] = kzalloc(sizeof(*channels_info[i]), GFP_KERNEL);
if (!channels_info[i]) {
rc = -ENOMEM;
goto channel_info_err;
@@ -88,18 +217,19 @@ int hl_build_hwmon_channel_info(struct hl_device *hdev,
j++;
}
- hdev->hl_chip_info->info =
- (const struct hwmon_channel_info **)channels_info;
+ hdev->hl_chip_info->info = (const struct hwmon_channel_info **)channels_info;
return 0;
channel_info_err:
- for (i = 0 ; i < num_active_sensor_types ; i++)
+ for (i = 0 ; i < num_active_sensor_types ; i++) {
if (channels_info[i]) {
kfree(channels_info[i]->config);
kfree(channels_info[i]);
}
+ }
kfree(channels_info);
+
channels_info_array_err:
sensors_type_err:
for (i = 0 ; i < HWMON_NR_SENSOR_TYPES ; i++)
@@ -112,14 +242,16 @@ static int hl_read(struct device *dev, enum hwmon_sensor_types type,
u32 attr, int channel, long *val)
{
struct hl_device *hdev = dev_get_drvdata(dev);
- int rc;
+ bool use_cpucp_enum;
u32 cpucp_attr;
- bool use_cpucp_enum = (hdev->asic_prop.fw_app_cpu_boot_dev_sts0 &
- CPU_BOOT_DEV_STS0_MAP_HWMON_EN) ? true : false;
+ int rc;
if (!hl_device_operational(hdev, NULL))
return -ENODEV;
+ use_cpucp_enum = (hdev->asic_prop.fw_app_cpu_boot_dev_sts0 &
+ CPU_BOOT_DEV_STS0_MAP_HWMON_EN) ? true : false;
+
switch (type) {
case hwmon_temp:
switch (attr) {
@@ -151,7 +283,7 @@ static int hl_read(struct device *dev, enum hwmon_sensor_types type,
if (use_cpucp_enum)
rc = hl_get_temperature(hdev, channel, cpucp_attr, val);
else
- rc = hl_get_temperature(hdev, channel, attr, val);
+ rc = hl_get_temperature(hdev, channel, fixup_attr_legacy_fw(attr), val);
break;
case hwmon_in:
switch (attr) {
@@ -174,7 +306,7 @@ static int hl_read(struct device *dev, enum hwmon_sensor_types type,
if (use_cpucp_enum)
rc = hl_get_voltage(hdev, channel, cpucp_attr, val);
else
- rc = hl_get_voltage(hdev, channel, attr, val);
+ rc = hl_get_voltage(hdev, channel, fixup_attr_legacy_fw(attr), val);
break;
case hwmon_curr:
switch (attr) {
@@ -197,7 +329,7 @@ static int hl_read(struct device *dev, enum hwmon_sensor_types type,
if (use_cpucp_enum)
rc = hl_get_current(hdev, channel, cpucp_attr, val);
else
- rc = hl_get_current(hdev, channel, attr, val);
+ rc = hl_get_current(hdev, channel, fixup_attr_legacy_fw(attr), val);
break;
case hwmon_fan:
switch (attr) {
@@ -217,7 +349,7 @@ static int hl_read(struct device *dev, enum hwmon_sensor_types type,
if (use_cpucp_enum)
rc = hl_get_fan_speed(hdev, channel, cpucp_attr, val);
else
- rc = hl_get_fan_speed(hdev, channel, attr, val);
+ rc = hl_get_fan_speed(hdev, channel, fixup_attr_legacy_fw(attr), val);
break;
case hwmon_pwm:
switch (attr) {
@@ -234,6 +366,7 @@ static int hl_read(struct device *dev, enum hwmon_sensor_types type,
if (use_cpucp_enum)
rc = hl_get_pwm_info(hdev, channel, cpucp_attr, val);
else
+ /* no need for fixup as pwm was aligned from day 1 */
rc = hl_get_pwm_info(hdev, channel, attr, val);
break;
case hwmon_power:
@@ -251,7 +384,7 @@ static int hl_read(struct device *dev, enum hwmon_sensor_types type,
if (use_cpucp_enum)
rc = hl_get_power(hdev, channel, cpucp_attr, val);
else
- rc = hl_get_power(hdev, channel, attr, val);
+ rc = hl_get_power(hdev, channel, fixup_attr_legacy_fw(attr), val);
break;
default:
return -EINVAL;
@@ -286,7 +419,7 @@ static int hl_write(struct device *dev, enum hwmon_sensor_types type,
if (use_cpucp_enum)
hl_set_temperature(hdev, channel, cpucp_attr, val);
else
- hl_set_temperature(hdev, channel, attr, val);
+ hl_set_temperature(hdev, channel, fixup_attr_legacy_fw(attr), val);
break;
case hwmon_pwm:
switch (attr) {
@@ -303,6 +436,7 @@ static int hl_write(struct device *dev, enum hwmon_sensor_types type,
if (use_cpucp_enum)
hl_set_pwm_info(hdev, channel, cpucp_attr, val);
else
+ /* no need for fixup as pwm was aligned from day 1 */
hl_set_pwm_info(hdev, channel, attr, val);
break;
case hwmon_in:
@@ -317,7 +451,7 @@ static int hl_write(struct device *dev, enum hwmon_sensor_types type,
if (use_cpucp_enum)
hl_set_voltage(hdev, channel, cpucp_attr, val);
else
- hl_set_voltage(hdev, channel, attr, val);
+ hl_set_voltage(hdev, channel, fixup_attr_legacy_fw(attr), val);
break;
case hwmon_curr:
switch (attr) {
@@ -331,7 +465,7 @@ static int hl_write(struct device *dev, enum hwmon_sensor_types type,
if (use_cpucp_enum)
hl_set_current(hdev, channel, cpucp_attr, val);
else
- hl_set_current(hdev, channel, attr, val);
+ hl_set_current(hdev, channel, fixup_attr_legacy_fw(attr), val);
break;
case hwmon_power:
switch (attr) {
@@ -345,7 +479,7 @@ static int hl_write(struct device *dev, enum hwmon_sensor_types type,
if (use_cpucp_enum)
hl_set_power(hdev, channel, cpucp_attr, val);
else
- hl_set_power(hdev, channel, attr, val);
+ hl_set_power(hdev, channel, fixup_attr_legacy_fw(attr), val);
break;
default:
return -EINVAL;
@@ -444,6 +578,9 @@ int hl_get_temperature(struct hl_device *hdev,
pkt.sensor_index = __cpu_to_le16(sensor_index);
pkt.type = __cpu_to_le16(attr);
+ dev_dbg(hdev->dev, "get temp, ctl 0x%x, sensor %d, type %d\n",
+ pkt.ctl, pkt.sensor_index, pkt.type);
+
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
0, &result);
@@ -677,12 +814,18 @@ int hl_set_power(struct hl_device *hdev,
int sensor_index, u32 attr, long value)
{
struct cpucp_packet pkt;
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
int rc;
memset(&pkt, 0, sizeof(pkt));
- pkt.ctl = cpu_to_le32(CPUCP_PACKET_POWER_GET <<
+ if (prop->use_get_power_for_reset_history)
+ pkt.ctl = cpu_to_le32(CPUCP_PACKET_POWER_GET <<
CPUCP_PKT_CTL_OPCODE_SHIFT);
+ else
+ pkt.ctl = cpu_to_le32(CPUCP_PACKET_POWER_SET <<
+ CPUCP_PKT_CTL_OPCODE_SHIFT);
+
pkt.sensor_index = __cpu_to_le16(sensor_index);
pkt.type = __cpu_to_le16(attr);
pkt.value = __cpu_to_le64(value);
diff --git a/drivers/misc/habanalabs/common/irq.c b/drivers/misc/habanalabs/common/irq.c
index 96d82b682674..1b6bdc900c26 100644
--- a/drivers/misc/habanalabs/common/irq.c
+++ b/drivers/misc/habanalabs/common/irq.c
@@ -145,8 +145,12 @@ static void handle_user_cq(struct hl_device *hdev,
spin_lock(&user_cq->wait_list_lock);
list_for_each_entry(pend, &user_cq->wait_list_head, wait_list_node) {
- pend->fence.timestamp = now;
- complete_all(&pend->fence.completion);
+ if ((pend->cq_kernel_addr &&
+ *(pend->cq_kernel_addr) >= pend->cq_target_value) ||
+ !pend->cq_kernel_addr) {
+ pend->fence.timestamp = now;
+ complete_all(&pend->fence.completion);
+ }
}
spin_unlock(&user_cq->wait_list_lock);
}
@@ -245,10 +249,8 @@ irqreturn_t hl_irq_handler_eq(int irq, void *arg)
*/
dma_rmb();
- if (hdev->disabled) {
- dev_warn(hdev->dev,
- "Device disabled but received IRQ %d for EQ\n",
- irq);
+ if (hdev->disabled && !hdev->reset_info.is_in_soft_reset) {
+ dev_warn(hdev->dev, "Device disabled but received an EQ event\n");
goto skip_irq;
}
diff --git a/drivers/misc/habanalabs/common/memory.c b/drivers/misc/habanalabs/common/memory.c
index 9bd626a00de3..c1eefaebacb6 100644
--- a/drivers/misc/habanalabs/common/memory.c
+++ b/drivers/misc/habanalabs/common/memory.c
@@ -316,7 +316,7 @@ static int free_phys_pg_pack(struct hl_device *hdev,
}
if (rc && !hdev->disabled)
- hl_device_reset(hdev, HL_RESET_HARD);
+ hl_device_reset(hdev, HL_DRV_RESET_HARD);
end:
kvfree(phys_pg_pack->pages);
@@ -477,7 +477,7 @@ static int add_va_block_locked(struct hl_device *hdev,
struct list_head *va_list, u64 start, u64 end)
{
struct hl_vm_va_block *va_block, *res = NULL;
- u64 size = end - start;
+ u64 size = end - start + 1;
print_va_list_locked(hdev, va_list);
@@ -518,7 +518,7 @@ static int add_va_block_locked(struct hl_device *hdev,
/**
* add_va_block() - wrapper for add_va_block_locked.
* @hdev: pointer to the habanalabs device structure.
- * @va_list: pointer to the virtual addresses block list.
+ * @va_range: pointer to the virtual addresses range object.
* @start: start virtual address.
* @end: end virtual address.
*
@@ -538,8 +538,11 @@ static inline int add_va_block(struct hl_device *hdev,
}
/**
- * is_hint_crossing_range() - check if hint address crossing specified reserved
- * range.
+ * is_hint_crossing_range() - check if hint address crossing specified reserved.
+ * @range_type: virtual space range type.
+ * @start_addr: start virtual address.
+ * @size: block size.
+ * @prop: asic properties structure to retrieve reserved ranges from.
*/
static inline bool is_hint_crossing_range(enum hl_va_range_type range_type,
u64 start_addr, u32 size, struct asic_fixed_properties *prop) {
@@ -644,7 +647,7 @@ static u64 get_va_block(struct hl_device *hdev,
continue;
}
- valid_size = va_block->end - valid_start;
+ valid_size = va_block->end - valid_start + 1;
if (valid_size < size)
continue;
@@ -707,7 +710,7 @@ static u64 get_va_block(struct hl_device *hdev,
if (new_va_block->size > size) {
new_va_block->start += size;
- new_va_block->size = new_va_block->end - new_va_block->start;
+ new_va_block->size = new_va_block->end - new_va_block->start + 1;
} else {
list_del(&new_va_block->node);
kfree(new_va_block);
@@ -749,6 +752,7 @@ u64 hl_reserve_va_block(struct hl_device *hdev, struct hl_ctx *ctx,
/**
* hl_get_va_range_type() - get va_range type for the given address and size.
+ * @ctx: context to fetch va_range from.
* @address: the start address of the area we want to validate.
* @size: the size in bytes of the area we want to validate.
* @type: returned va_range type.
@@ -776,8 +780,8 @@ static int hl_get_va_range_type(struct hl_ctx *ctx, u64 address, u64 size,
* hl_unreserve_va_block() - wrapper for add_va_block to unreserve a va block.
* @hdev: pointer to the habanalabs device structure
* @ctx: pointer to the context structure.
- * @start: start virtual address.
- * @end: end virtual address.
+ * @start_addr: start virtual address.
+ * @size: number of bytes to unreserve.
*
* This function does the following:
* - Takes the list lock and calls add_va_block_locked.
@@ -1201,17 +1205,13 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
goto map_err;
}
- rc = hdev->asic_funcs->mmu_invalidate_cache_range(hdev, false,
- *vm_type, ctx->asid, ret_vaddr, phys_pg_pack->total_size);
+ rc = hl_mmu_invalidate_cache_range(hdev, false, *vm_type | MMU_OP_SKIP_LOW_CACHE_INV,
+ ctx->asid, ret_vaddr, phys_pg_pack->total_size);
mutex_unlock(&ctx->mmu_lock);
- if (rc) {
- dev_err(hdev->dev,
- "mapping handle %u failed due to MMU cache invalidation\n",
- handle);
+ if (rc)
goto map_err;
- }
ret_vaddr += phys_pg_pack->offset;
@@ -1349,9 +1349,8 @@ static int unmap_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
* at the loop end rather than for each iteration
*/
if (!ctx_free)
- rc = hdev->asic_funcs->mmu_invalidate_cache_range(hdev, true,
- *vm_type, ctx->asid, vaddr,
- phys_pg_pack->total_size);
+ rc = hl_mmu_invalidate_cache_range(hdev, true, *vm_type, ctx->asid, vaddr,
+ phys_pg_pack->total_size);
mutex_unlock(&ctx->mmu_lock);
@@ -1364,11 +1363,6 @@ static int unmap_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
if (!ctx_free) {
int tmp_rc;
- if (rc)
- dev_err(hdev->dev,
- "unmapping vaddr 0x%llx failed due to MMU cache invalidation\n",
- vaddr);
-
tmp_rc = add_va_block(hdev, va_range, vaddr,
vaddr + phys_pg_pack->total_size - 1);
if (tmp_rc) {
@@ -2037,7 +2031,7 @@ static int mem_ioctl_no_mmu(struct hl_fpriv *hpriv, union hl_mem_args *args)
default:
dev_err(hdev->dev, "Unknown opcode for memory IOCTL\n");
- rc = -ENOTTY;
+ rc = -EINVAL;
break;
}
@@ -2162,7 +2156,7 @@ int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data)
default:
dev_err(hdev->dev, "Unknown opcode for memory IOCTL\n");
- rc = -ENOTTY;
+ rc = -EINVAL;
break;
}
@@ -2339,6 +2333,8 @@ void hl_userptr_delete_list(struct hl_device *hdev,
/**
* hl_userptr_is_pinned() - returns whether the given userptr is pinned.
* @hdev: pointer to the habanalabs device structure.
+ * @addr: user address to check.
+ * @size: user block size to check.
* @userptr_list: pointer to the list to clear.
* @userptr: pointer to userptr to check.
*
@@ -2361,9 +2357,10 @@ bool hl_userptr_is_pinned(struct hl_device *hdev, u64 addr,
/**
* va_range_init() - initialize virtual addresses range.
* @hdev: pointer to the habanalabs device structure.
- * @va_range: pointer to the range to initialize.
+ * @va_ranges: pointer to va_ranges array.
* @start: range start address.
* @end: range end address.
+ * @page_size: page size for this va_range.
*
* This function does the following:
* - Initializes the virtual addresses list of the given range with the given
@@ -2388,8 +2385,14 @@ static int va_range_init(struct hl_device *hdev, struct hl_va_range *va_range,
start += PAGE_SIZE;
}
- if (end & (PAGE_SIZE - 1))
- end &= PAGE_MASK;
+ /*
+ * The end of the range is inclusive, hence we need to align it
+ * to the end of the last full page in the range. For example if
+ * end = 0x3ff5 with page size 0x1000, we need to align it to
+ * 0x2fff. The remainig 0xff5 bytes do not form a full page.
+ */
+ if ((end + 1) & (PAGE_SIZE - 1))
+ end = ((end + 1) & PAGE_MASK) - 1;
}
if (start >= end) {
@@ -2414,7 +2417,7 @@ static int va_range_init(struct hl_device *hdev, struct hl_va_range *va_range,
/**
* va_range_fini() - clear a virtual addresses range.
* @hdev: pointer to the habanalabs structure.
- * va_range: pointer to virtual addresses rang.e
+ * @va_range: pointer to virtual addresses range.
*
* This function does the following:
* - Frees the virtual addresses block list and its lock.
@@ -2434,12 +2437,15 @@ static void va_range_fini(struct hl_device *hdev, struct hl_va_range *va_range)
* @ctx: pointer to the habanalabs context structure.
* @host_range_start: host virtual addresses range start.
* @host_range_end: host virtual addresses range end.
+ * @host_page_size: host page size.
* @host_huge_range_start: host virtual addresses range start for memory
* allocated with huge pages.
* @host_huge_range_end: host virtual addresses range end for memory allocated
* with huge pages.
+ * @host_huge_page_size: host huge page size.
* @dram_range_start: dram virtual addresses range start.
* @dram_range_end: dram virtual addresses range end.
+ * @dram_page_size: dram page size.
*
* This function initializes the following:
* - MMU for context.
@@ -2564,14 +2570,14 @@ int hl_vm_ctx_init(struct hl_ctx *ctx)
return 0;
dram_range_start = prop->dmmu.start_addr;
- dram_range_end = prop->dmmu.end_addr;
+ dram_range_end = prop->dmmu.end_addr - 1;
dram_page_size = prop->dram_page_size ?
prop->dram_page_size : prop->dmmu.page_size;
host_range_start = prop->pmmu.start_addr;
- host_range_end = prop->pmmu.end_addr;
+ host_range_end = prop->pmmu.end_addr - 1;
host_page_size = prop->pmmu.page_size;
host_huge_range_start = prop->pmmu_huge.start_addr;
- host_huge_range_end = prop->pmmu_huge.end_addr;
+ host_huge_range_end = prop->pmmu_huge.end_addr - 1;
host_huge_page_size = prop->pmmu_huge.page_size;
return vm_ctx_init_with_ranges(ctx, host_range_start, host_range_end,
@@ -2618,7 +2624,7 @@ void hl_vm_ctx_fini(struct hl_ctx *ctx)
* Clearly something went wrong on hard reset so no point in printing
* another side effect error
*/
- if (!hdev->hard_reset_pending && !hash_empty(ctx->mem_hash))
+ if (!hdev->reset_info.hard_reset_pending && !hash_empty(ctx->mem_hash))
dev_dbg(hdev->dev,
"user released device without removing its memory mappings\n");
@@ -2633,8 +2639,8 @@ void hl_vm_ctx_fini(struct hl_ctx *ctx)
mutex_lock(&ctx->mmu_lock);
/* invalidate the cache once after the unmapping loop */
- hdev->asic_funcs->mmu_invalidate_cache(hdev, true, VM_TYPE_USERPTR);
- hdev->asic_funcs->mmu_invalidate_cache(hdev, true, VM_TYPE_PHYS_PACK);
+ hl_mmu_invalidate_cache(hdev, true, MMU_OP_USERPTR);
+ hl_mmu_invalidate_cache(hdev, true, MMU_OP_PHYS_PACK);
mutex_unlock(&ctx->mmu_lock);
diff --git a/drivers/misc/habanalabs/common/mmu/mmu.c b/drivers/misc/habanalabs/common/mmu/mmu.c
index aa96917f62e5..9153a1f55175 100644
--- a/drivers/misc/habanalabs/common/mmu/mmu.c
+++ b/drivers/misc/habanalabs/common/mmu/mmu.c
@@ -637,3 +637,28 @@ u64 hl_mmu_descramble_addr(struct hl_device *hdev, u64 addr)
{
return addr;
}
+
+int hl_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard, u32 flags)
+{
+ int rc;
+
+ rc = hdev->asic_funcs->mmu_invalidate_cache(hdev, is_hard, flags);
+ if (rc)
+ dev_err_ratelimited(hdev->dev, "MMU cache invalidation failed\n");
+
+ return rc;
+}
+
+int hl_mmu_invalidate_cache_range(struct hl_device *hdev, bool is_hard,
+ u32 flags, u32 asid, u64 va, u64 size)
+{
+ int rc;
+
+ rc = hdev->asic_funcs->mmu_invalidate_cache_range(hdev, is_hard, flags,
+ asid, va, size);
+ if (rc)
+ dev_err_ratelimited(hdev->dev, "MMU cache range invalidation failed\n");
+
+ return rc;
+}
+
diff --git a/drivers/misc/habanalabs/common/mmu/mmu_v1.c b/drivers/misc/habanalabs/common/mmu/mmu_v1.c
index 0f536f79dd9c..6134b6ae7615 100644
--- a/drivers/misc/habanalabs/common/mmu/mmu_v1.c
+++ b/drivers/misc/habanalabs/common/mmu/mmu_v1.c
@@ -269,7 +269,7 @@ static int dram_default_mapping_init(struct hl_ctx *ctx)
num_of_hop3 = prop->dram_size_for_default_page_mapping;
do_div(num_of_hop3, prop->dram_page_size);
- do_div(num_of_hop3, PTE_ENTRIES_IN_HOP);
+ do_div(num_of_hop3, HOP_PTE_ENTRIES_512);
/* add hop1 and hop2 */
total_hops = num_of_hop3 + 2;
@@ -330,7 +330,7 @@ static int dram_default_mapping_init(struct hl_ctx *ctx)
for (i = 0 ; i < num_of_hop3 ; i++) {
hop3_pte_addr = ctx->dram_default_hops[i];
- for (j = 0 ; j < PTE_ENTRIES_IN_HOP ; j++) {
+ for (j = 0 ; j < HOP_PTE_ENTRIES_512 ; j++) {
write_final_pte(ctx, hop3_pte_addr, pte_val);
get_pte(ctx, ctx->dram_default_hops[i]);
hop3_pte_addr += HL_PTE_SIZE;
@@ -369,7 +369,7 @@ static void dram_default_mapping_fini(struct hl_ctx *ctx)
num_of_hop3 = prop->dram_size_for_default_page_mapping;
do_div(num_of_hop3, prop->dram_page_size);
- do_div(num_of_hop3, PTE_ENTRIES_IN_HOP);
+ do_div(num_of_hop3, HOP_PTE_ENTRIES_512);
hop0_addr = get_hop0_addr(ctx);
/* add hop1 and hop2 */
@@ -379,7 +379,7 @@ static void dram_default_mapping_fini(struct hl_ctx *ctx)
for (i = 0 ; i < num_of_hop3 ; i++) {
hop3_pte_addr = ctx->dram_default_hops[i];
- for (j = 0 ; j < PTE_ENTRIES_IN_HOP ; j++) {
+ for (j = 0 ; j < HOP_PTE_ENTRIES_512 ; j++) {
clear_pte(ctx, hop3_pte_addr);
put_pte(ctx, ctx->dram_default_hops[i]);
hop3_pte_addr += HL_PTE_SIZE;
@@ -573,7 +573,7 @@ static int _hl_mmu_v1_unmap(struct hl_ctx *ctx,
curr_pte = *(u64 *) (uintptr_t) hop3_pte_addr;
- is_huge = curr_pte & LAST_MASK;
+ is_huge = curr_pte & mmu_prop->last_mask;
if (is_dram_addr && !is_huge) {
dev_err(hdev->dev,
@@ -597,7 +597,7 @@ static int _hl_mmu_v1_unmap(struct hl_ctx *ctx,
if (hdev->dram_default_page_mapping && is_dram_addr) {
u64 default_pte = (prop->mmu_dram_default_page_addr &
- HOP_PHYS_ADDR_MASK) | LAST_MASK |
+ HOP_PHYS_ADDR_MASK) | mmu_prop->last_mask |
PAGE_PRESENT_MASK;
if (curr_pte == default_pte) {
dev_err(hdev->dev,
@@ -729,7 +729,7 @@ static int _hl_mmu_v1_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
if (hdev->dram_default_page_mapping && is_dram_addr) {
u64 default_pte = (prop->mmu_dram_default_page_addr &
- HOP_PHYS_ADDR_MASK) | LAST_MASK |
+ HOP_PHYS_ADDR_MASK) | mmu_prop->last_mask |
PAGE_PRESENT_MASK;
if (curr_pte != default_pte) {
@@ -769,7 +769,7 @@ static int _hl_mmu_v1_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr,
goto err;
}
- curr_pte = (phys_addr & HOP_PHYS_ADDR_MASK) | LAST_MASK
+ curr_pte = (phys_addr & HOP_PHYS_ADDR_MASK) | mmu_prop->last_mask
| PAGE_PRESENT_MASK;
if (is_huge)
@@ -930,7 +930,7 @@ static int hl_mmu_v1_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr,
if (!(hops->hop_info[i].hop_pte_val & PAGE_PRESENT_MASK))
return -EFAULT;
- if (hops->hop_info[i].hop_pte_val & LAST_MASK)
+ if (hops->hop_info[i].hop_pte_val & mmu_prop->last_mask)
break;
}
diff --git a/drivers/misc/habanalabs/common/sysfs.c b/drivers/misc/habanalabs/common/sysfs.c
index 42c1769ad25d..45c715325e2a 100644
--- a/drivers/misc/habanalabs/common/sysfs.c
+++ b/drivers/misc/habanalabs/common/sysfs.c
@@ -139,7 +139,7 @@ static ssize_t cpld_ver_show(struct device *dev, struct device_attribute *attr,
struct hl_device *hdev = dev_get_drvdata(dev);
return sprintf(buf, "0x%08x\n",
- hdev->asic_prop.cpucp_info.cpld_version);
+ le32_to_cpu(hdev->asic_prop.cpucp_info.cpld_version));
}
static ssize_t cpucp_kernel_ver_show(struct device *dev,
@@ -163,8 +163,13 @@ static ssize_t infineon_ver_show(struct device *dev,
{
struct hl_device *hdev = dev_get_drvdata(dev);
- return sprintf(buf, "0x%04x\n",
- hdev->asic_prop.cpucp_info.infineon_version);
+ if (hdev->asic_prop.cpucp_info.infineon_second_stage_version)
+ return sprintf(buf, "%#04x %#04x\n",
+ le32_to_cpu(hdev->asic_prop.cpucp_info.infineon_version),
+ le32_to_cpu(hdev->asic_prop.cpucp_info.infineon_second_stage_version));
+ else
+ return sprintf(buf, "%#04x\n",
+ le32_to_cpu(hdev->asic_prop.cpucp_info.infineon_version));
}
static ssize_t fuse_ver_show(struct device *dev, struct device_attribute *attr,
@@ -206,7 +211,7 @@ static ssize_t soft_reset_store(struct device *dev,
goto out;
}
- if (!hdev->allow_inference_soft_reset) {
+ if (!hdev->asic_prop.allow_inference_soft_reset) {
dev_err(hdev->dev, "Device does not support inference soft-reset\n");
goto out;
}
@@ -236,7 +241,7 @@ static ssize_t hard_reset_store(struct device *dev,
dev_warn(hdev->dev, "Hard-Reset requested through sysfs\n");
- hl_device_reset(hdev, HL_RESET_HARD);
+ hl_device_reset(hdev, HL_DRV_RESET_HARD);
out:
return count;
@@ -298,7 +303,7 @@ static ssize_t soft_reset_cnt_show(struct device *dev,
{
struct hl_device *hdev = dev_get_drvdata(dev);
- return sprintf(buf, "%d\n", hdev->soft_reset_cnt);
+ return sprintf(buf, "%d\n", hdev->reset_info.soft_reset_cnt);
}
static ssize_t hard_reset_cnt_show(struct device *dev,
@@ -306,7 +311,7 @@ static ssize_t hard_reset_cnt_show(struct device *dev,
{
struct hl_device *hdev = dev_get_drvdata(dev);
- return sprintf(buf, "%d\n", hdev->hard_reset_cnt);
+ return sprintf(buf, "%d\n", hdev->reset_info.hard_reset_cnt);
}
static ssize_t max_power_show(struct device *dev, struct device_attribute *attr,
@@ -419,8 +424,6 @@ static struct attribute *hl_dev_attrs[] = {
&dev_attr_max_power.attr,
&dev_attr_pci_addr.attr,
&dev_attr_preboot_btl_ver.attr,
- &dev_attr_soft_reset.attr,
- &dev_attr_soft_reset_cnt.attr,
&dev_attr_status.attr,
&dev_attr_thermal_ver.attr,
&dev_attr_uboot_ver.attr,
@@ -445,15 +448,25 @@ static const struct attribute_group *hl_dev_attr_groups[] = {
NULL,
};
+static struct attribute *hl_dev_inference_attrs[] = {
+ &dev_attr_soft_reset.attr,
+ &dev_attr_soft_reset_cnt.attr,
+ NULL,
+};
+
+static struct attribute_group hl_dev_inference_attr_group = {
+ .attrs = hl_dev_inference_attrs,
+};
+
+static const struct attribute_group *hl_dev_inference_attr_groups[] = {
+ &hl_dev_inference_attr_group,
+ NULL,
+};
+
int hl_sysfs_init(struct hl_device *hdev)
{
int rc;
- if (hdev->asic_type == ASIC_GOYA)
- hdev->pm_mng_profile = PM_AUTO;
- else
- hdev->pm_mng_profile = PM_MANUAL;
-
hdev->max_power = hdev->asic_prop.max_power_default;
hdev->asic_funcs->add_device_attr(hdev, &hl_dev_clks_attr_group);
@@ -465,10 +478,25 @@ int hl_sysfs_init(struct hl_device *hdev)
return rc;
}
+ if (!hdev->asic_prop.allow_inference_soft_reset)
+ return 0;
+
+ rc = device_add_groups(hdev->dev, hl_dev_inference_attr_groups);
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed to add groups to device, error %d\n", rc);
+ return rc;
+ }
+
return 0;
}
void hl_sysfs_fini(struct hl_device *hdev)
{
device_remove_groups(hdev->dev, hl_dev_attr_groups);
+
+ if (!hdev->asic_prop.allow_inference_soft_reset)
+ return;
+
+ device_remove_groups(hdev->dev, hl_dev_inference_attr_groups);
}
diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c
index 825737dfe381..013c6da2e3ca 100644
--- a/drivers/misc/habanalabs/gaudi/gaudi.c
+++ b/drivers/misc/habanalabs/gaudi/gaudi.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright 2016-2020 HabanaLabs, Ltd.
+ * Copyright 2016-2021 HabanaLabs, Ltd.
* All Rights Reserved.
*/
@@ -593,26 +593,27 @@ static int gaudi_set_fixed_properties(struct hl_device *hdev)
else
prop->mmu_pgt_size = MMU_PAGE_TABLES_SIZE;
prop->mmu_pte_size = HL_PTE_SIZE;
- prop->mmu_hop_table_size = HOP_TABLE_SIZE;
- prop->mmu_hop0_tables_total_size = HOP0_TABLES_TOTAL_SIZE;
+ prop->mmu_hop_table_size = HOP_TABLE_SIZE_512_PTE;
+ prop->mmu_hop0_tables_total_size = HOP0_512_PTE_TABLES_TOTAL_SIZE;
prop->dram_page_size = PAGE_SIZE_2MB;
prop->dram_supports_virtual_memory = false;
- prop->pmmu.hop0_shift = HOP0_SHIFT;
- prop->pmmu.hop1_shift = HOP1_SHIFT;
- prop->pmmu.hop2_shift = HOP2_SHIFT;
- prop->pmmu.hop3_shift = HOP3_SHIFT;
- prop->pmmu.hop4_shift = HOP4_SHIFT;
- prop->pmmu.hop0_mask = HOP0_MASK;
- prop->pmmu.hop1_mask = HOP1_MASK;
- prop->pmmu.hop2_mask = HOP2_MASK;
- prop->pmmu.hop3_mask = HOP3_MASK;
- prop->pmmu.hop4_mask = HOP4_MASK;
+ prop->pmmu.hop0_shift = MMU_V1_1_HOP0_SHIFT;
+ prop->pmmu.hop1_shift = MMU_V1_1_HOP1_SHIFT;
+ prop->pmmu.hop2_shift = MMU_V1_1_HOP2_SHIFT;
+ prop->pmmu.hop3_shift = MMU_V1_1_HOP3_SHIFT;
+ prop->pmmu.hop4_shift = MMU_V1_1_HOP4_SHIFT;
+ prop->pmmu.hop0_mask = MMU_V1_1_HOP0_MASK;
+ prop->pmmu.hop1_mask = MMU_V1_1_HOP1_MASK;
+ prop->pmmu.hop2_mask = MMU_V1_1_HOP2_MASK;
+ prop->pmmu.hop3_mask = MMU_V1_1_HOP3_MASK;
+ prop->pmmu.hop4_mask = MMU_V1_1_HOP4_MASK;
prop->pmmu.start_addr = VA_HOST_SPACE_START;
prop->pmmu.end_addr =
(VA_HOST_SPACE_START + VA_HOST_SPACE_SIZE / 2) - 1;
prop->pmmu.page_size = PAGE_SIZE_4KB;
prop->pmmu.num_hops = MMU_ARCH_5_HOPS;
+ prop->pmmu.last_mask = LAST_MASK;
/* PMMU and HPMMU are the same except of page size */
memcpy(&prop->pmmu_huge, &prop->pmmu, sizeof(prop->pmmu));
@@ -664,6 +665,8 @@ static int gaudi_set_fixed_properties(struct hl_device *hdev)
prop->clk_pll_index = HL_GAUDI_MME_PLL;
prop->max_freq_value = GAUDI_MAX_CLK_FREQ;
+ prop->use_get_power_for_reset_history = true;
+
return 0;
}
@@ -878,6 +881,11 @@ static int gaudi_fetch_psoc_frequency(struct hl_device *hdev)
int rc;
if (hdev->asic_prop.fw_security_enabled) {
+ struct gaudi_device *gaudi = hdev->asic_specific;
+
+ if (!(gaudi->hw_cap_initialized & HW_CAP_CPU_Q))
+ return 0;
+
rc = hl_fw_cpucp_pll_info_get(hdev, HL_GAUDI_CPU_PLL, pll_freq_arr);
if (rc)
@@ -1273,6 +1281,7 @@ static int gaudi_collective_wait_init_cs(struct hl_cs *cs)
container_of(cs->signal_fence, struct hl_cs_compl, base_fence);
struct hl_cs_compl *cs_cmpl =
container_of(cs->fence, struct hl_cs_compl, base_fence);
+ struct hl_cs_encaps_sig_handle *handle = cs->encaps_sig_hdl;
struct gaudi_collective_properties *cprop;
u32 stream, queue_id, sob_group_offset;
struct gaudi_device *gaudi;
@@ -1285,10 +1294,16 @@ static int gaudi_collective_wait_init_cs(struct hl_cs *cs)
gaudi = hdev->asic_specific;
cprop = &gaudi->collective_props;
- /* In encaps signals case the SOB info will be retrieved from
- * the handle in gaudi_collective_slave_init_job.
- */
- if (!cs->encaps_signals) {
+ if (cs->encaps_signals) {
+ cs_cmpl->hw_sob = handle->hw_sob;
+ /* at this checkpoint we only need the hw_sob pointer
+ * for the completion check before start going over the jobs
+ * of the master/slaves, the sob_value will be taken later on
+ * in gaudi_collective_slave_init_job depends on each
+ * job wait offset value.
+ */
+ cs_cmpl->sob_val = 0;
+ } else {
/* copy the SOB id and value of the signal CS */
cs_cmpl->hw_sob = signal_cs_cmpl->hw_sob;
cs_cmpl->sob_val = signal_cs_cmpl->sob_val;
@@ -1621,6 +1636,8 @@ static int gaudi_late_init(struct hl_device *hdev)
*/
gaudi_mmu_prepare(hdev, 1);
+ hdev->asic_funcs->set_pll_profile(hdev, PLL_LAST);
+
return 0;
disable_pci_access:
@@ -4006,7 +4023,7 @@ static void gaudi_init_firmware_loader(struct hl_device *hdev)
struct fw_load_mgr *fw_loader = &hdev->fw_loader;
/* fill common fields */
- fw_loader->linux_loaded = false;
+ fw_loader->fw_comp_loaded = FW_TYPE_NONE;
fw_loader->boot_fit_img.image_name = GAUDI_BOOT_FIT_FILE;
fw_loader->linux_img.image_name = GAUDI_LINUX_FW_FILE;
fw_loader->cpu_timeout = GAUDI_CPU_TIMEOUT_USEC;
@@ -4289,13 +4306,31 @@ static void gaudi_hw_fini(struct hl_device *hdev, bool hard_reset, bool fw_reset
* via the GIC. Otherwise, we need to use COMMS or the MSG_TO_CPU
* registers in case of old F/Ws
*/
- if (hdev->fw_loader.linux_loaded) {
+ if (hdev->fw_loader.fw_comp_loaded & FW_TYPE_LINUX) {
irq_handler_offset = hdev->asic_prop.gic_interrupts_enable ?
mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR :
le32_to_cpu(dyn_regs->gic_host_halt_irq);
WREG32(irq_handler_offset,
gaudi_irq_map_table[GAUDI_EVENT_HALT_MACHINE].cpu_id);
+
+ /* This is a hail-mary attempt to revive the card in the small chance that the
+ * f/w has experienced a watchdog event, which caused it to return back to preboot.
+ * In that case, triggering reset through GIC won't help. We need to trigger the
+ * reset as if Linux wasn't loaded.
+ *
+ * We do it only if the reset cause was HB, because that would be the indication
+ * of such an event.
+ *
+ * In case watchdog hasn't expired but we still got HB, then this won't do any
+ * damage.
+ */
+ if (hdev->reset_info.curr_reset_cause == HL_RESET_CAUSE_HEARTBEAT) {
+ if (hdev->asic_prop.hard_reset_done_by_fw)
+ hl_fw_ask_hard_reset_without_linux(hdev);
+ else
+ hl_fw_ask_halt_machine_without_linux(hdev);
+ }
} else {
if (hdev->asic_prop.hard_reset_done_by_fw)
hl_fw_ask_hard_reset_without_linux(hdev);
@@ -6412,6 +6447,7 @@ static int gaudi_debugfs_read_dma(struct hl_device *hdev, u64 addr, u32 size,
{
u32 dma_core_sts0, err_cause, cfg1, size_left, pos, size_to_dma;
struct gaudi_device *gaudi = hdev->asic_specific;
+ u32 qm_glbl_sts0, qm_cgm_sts;
u64 dma_offset, qm_offset;
dma_addr_t dma_addr;
void *kernel_addr;
@@ -6436,14 +6472,20 @@ static int gaudi_debugfs_read_dma(struct hl_device *hdev, u64 addr, u32 size,
dma_offset = dma_id * DMA_CORE_OFFSET;
qm_offset = dma_id * DMA_QMAN_OFFSET;
dma_core_sts0 = RREG32(mmDMA0_CORE_STS0 + dma_offset);
- is_eng_idle = IS_DMA_IDLE(dma_core_sts0);
+ qm_glbl_sts0 = RREG32(mmDMA0_QM_GLBL_STS0 + qm_offset);
+ qm_cgm_sts = RREG32(mmDMA0_QM_CGM_STS + qm_offset);
+ is_eng_idle = IS_QM_IDLE(qm_glbl_sts0, qm_cgm_sts) &&
+ IS_DMA_IDLE(dma_core_sts0);
if (!is_eng_idle) {
dma_id = gaudi_dma_assignment[GAUDI_PCI_DMA_2];
dma_offset = dma_id * DMA_CORE_OFFSET;
qm_offset = dma_id * DMA_QMAN_OFFSET;
dma_core_sts0 = RREG32(mmDMA0_CORE_STS0 + dma_offset);
- is_eng_idle = IS_DMA_IDLE(dma_core_sts0);
+ qm_glbl_sts0 = RREG32(mmDMA0_QM_GLBL_STS0 + qm_offset);
+ qm_cgm_sts = RREG32(mmDMA0_QM_CGM_STS + qm_offset);
+ is_eng_idle = IS_QM_IDLE(qm_glbl_sts0, qm_cgm_sts) &&
+ IS_DMA_IDLE(dma_core_sts0);
if (!is_eng_idle) {
dev_err_ratelimited(hdev->dev,
@@ -6522,7 +6564,7 @@ static u64 gaudi_read_pte(struct hl_device *hdev, u64 addr)
{
struct gaudi_device *gaudi = hdev->asic_specific;
- if (hdev->hard_reset_pending)
+ if (hdev->reset_info.hard_reset_pending)
return U64_MAX;
return readq(hdev->pcie_bar[HBM_BAR_ID] +
@@ -6533,7 +6575,7 @@ static void gaudi_write_pte(struct hl_device *hdev, u64 addr, u64 val)
{
struct gaudi_device *gaudi = hdev->asic_specific;
- if (hdev->hard_reset_pending)
+ if (hdev->reset_info.hard_reset_pending)
return;
writeq(val, hdev->pcie_bar[HBM_BAR_ID] +
@@ -6935,8 +6977,9 @@ event_not_supported:
snprintf(desc, size, "N/A");
}
-static const char *gaudi_get_razwi_initiator_dma_name(struct hl_device *hdev,
- u32 x_y, bool is_write)
+static const char *gaudi_get_razwi_initiator_dma_name(struct hl_device *hdev, u32 x_y,
+ bool is_write, s32 *engine_id_1,
+ s32 *engine_id_2)
{
u32 dma_id[2], dma_offset, err_cause[2], mask, i;
@@ -6976,44 +7019,64 @@ static const char *gaudi_get_razwi_initiator_dma_name(struct hl_device *hdev,
switch (x_y) {
case RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_S_0:
case RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_S_1:
- if ((err_cause[0] & mask) && !(err_cause[1] & mask))
+ if ((err_cause[0] & mask) && !(err_cause[1] & mask)) {
+ *engine_id_1 = GAUDI_ENGINE_ID_DMA_0;
return "DMA0";
- else if (!(err_cause[0] & mask) && (err_cause[1] & mask))
+ } else if (!(err_cause[0] & mask) && (err_cause[1] & mask)) {
+ *engine_id_1 = GAUDI_ENGINE_ID_DMA_2;
return "DMA2";
- else
+ } else {
+ *engine_id_1 = GAUDI_ENGINE_ID_DMA_0;
+ *engine_id_2 = GAUDI_ENGINE_ID_DMA_2;
return "DMA0 or DMA2";
+ }
case RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_S_0:
case RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_S_1:
- if ((err_cause[0] & mask) && !(err_cause[1] & mask))
+ if ((err_cause[0] & mask) && !(err_cause[1] & mask)) {
+ *engine_id_1 = GAUDI_ENGINE_ID_DMA_1;
return "DMA1";
- else if (!(err_cause[0] & mask) && (err_cause[1] & mask))
+ } else if (!(err_cause[0] & mask) && (err_cause[1] & mask)) {
+ *engine_id_1 = GAUDI_ENGINE_ID_DMA_3;
return "DMA3";
- else
+ } else {
+ *engine_id_1 = GAUDI_ENGINE_ID_DMA_1;
+ *engine_id_2 = GAUDI_ENGINE_ID_DMA_3;
return "DMA1 or DMA3";
+ }
case RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_N_0:
case RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_N_1:
- if ((err_cause[0] & mask) && !(err_cause[1] & mask))
+ if ((err_cause[0] & mask) && !(err_cause[1] & mask)) {
+ *engine_id_1 = GAUDI_ENGINE_ID_DMA_4;
return "DMA4";
- else if (!(err_cause[0] & mask) && (err_cause[1] & mask))
+ } else if (!(err_cause[0] & mask) && (err_cause[1] & mask)) {
+ *engine_id_1 = GAUDI_ENGINE_ID_DMA_6;
return "DMA6";
- else
+ } else {
+ *engine_id_1 = GAUDI_ENGINE_ID_DMA_4;
+ *engine_id_2 = GAUDI_ENGINE_ID_DMA_6;
return "DMA4 or DMA6";
+ }
case RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_N_0:
case RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_N_1:
- if ((err_cause[0] & mask) && !(err_cause[1] & mask))
+ if ((err_cause[0] & mask) && !(err_cause[1] & mask)) {
+ *engine_id_1 = GAUDI_ENGINE_ID_DMA_5;
return "DMA5";
- else if (!(err_cause[0] & mask) && (err_cause[1] & mask))
+ } else if (!(err_cause[0] & mask) && (err_cause[1] & mask)) {
+ *engine_id_1 = GAUDI_ENGINE_ID_DMA_7;
return "DMA7";
- else
+ } else {
+ *engine_id_1 = GAUDI_ENGINE_ID_DMA_5;
+ *engine_id_2 = GAUDI_ENGINE_ID_DMA_7;
return "DMA5 or DMA7";
+ }
}
unknown_initiator:
return "unknown initiator";
}
-static const char *gaudi_get_razwi_initiator_name(struct hl_device *hdev,
- bool is_write)
+static const char *gaudi_get_razwi_initiator_name(struct hl_device *hdev, bool is_write,
+ u32 *engine_id_1, u32 *engine_id_2)
{
u32 val, x_y, axi_id;
@@ -7026,24 +7089,35 @@ static const char *gaudi_get_razwi_initiator_name(struct hl_device *hdev,
switch (x_y) {
case RAZWI_INITIATOR_ID_X_Y_TPC0_NIC0:
- if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_TPC))
+ if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_TPC)) {
+ *engine_id_1 = GAUDI_ENGINE_ID_TPC_0;
return "TPC0";
- if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_NIC))
+ }
+ if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_NIC)) {
+ *engine_id_1 = GAUDI_ENGINE_ID_NIC_0;
return "NIC0";
+ }
break;
case RAZWI_INITIATOR_ID_X_Y_TPC1:
+ *engine_id_1 = GAUDI_ENGINE_ID_TPC_1;
return "TPC1";
case RAZWI_INITIATOR_ID_X_Y_MME0_0:
case RAZWI_INITIATOR_ID_X_Y_MME0_1:
+ *engine_id_1 = GAUDI_ENGINE_ID_MME_0;
return "MME0";
case RAZWI_INITIATOR_ID_X_Y_MME1_0:
case RAZWI_INITIATOR_ID_X_Y_MME1_1:
+ *engine_id_1 = GAUDI_ENGINE_ID_MME_1;
return "MME1";
case RAZWI_INITIATOR_ID_X_Y_TPC2:
+ *engine_id_1 = GAUDI_ENGINE_ID_TPC_2;
return "TPC2";
case RAZWI_INITIATOR_ID_X_Y_TPC3_PCI_CPU_PSOC:
- if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_TPC))
+ if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_TPC)) {
+ *engine_id_1 = GAUDI_ENGINE_ID_TPC_3;
return "TPC3";
+ }
+ /* PCI, CPU or PSOC does not have engine id*/
if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_PCI))
return "PCI";
if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_CPU))
@@ -7059,32 +7133,49 @@ static const char *gaudi_get_razwi_initiator_name(struct hl_device *hdev,
case RAZWI_INITIATOR_ID_X_Y_DMA_IF_W_N_1:
case RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_N_0:
case RAZWI_INITIATOR_ID_X_Y_DMA_IF_E_N_1:
- return gaudi_get_razwi_initiator_dma_name(hdev, x_y, is_write);
+ return gaudi_get_razwi_initiator_dma_name(hdev, x_y, is_write,
+ engine_id_1, engine_id_2);
case RAZWI_INITIATOR_ID_X_Y_TPC4_NIC1_NIC2:
- if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_TPC))
+ if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_TPC)) {
+ *engine_id_1 = GAUDI_ENGINE_ID_TPC_4;
return "TPC4";
- if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_NIC))
+ }
+ if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_NIC)) {
+ *engine_id_1 = GAUDI_ENGINE_ID_NIC_1;
return "NIC1";
- if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_NIC_FT))
+ }
+ if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_NIC_FT)) {
+ *engine_id_1 = GAUDI_ENGINE_ID_NIC_2;
return "NIC2";
+ }
break;
case RAZWI_INITIATOR_ID_X_Y_TPC5:
+ *engine_id_1 = GAUDI_ENGINE_ID_TPC_5;
return "TPC5";
case RAZWI_INITIATOR_ID_X_Y_MME2_0:
case RAZWI_INITIATOR_ID_X_Y_MME2_1:
+ *engine_id_1 = GAUDI_ENGINE_ID_MME_2;
return "MME2";
case RAZWI_INITIATOR_ID_X_Y_MME3_0:
case RAZWI_INITIATOR_ID_X_Y_MME3_1:
+ *engine_id_1 = GAUDI_ENGINE_ID_MME_3;
return "MME3";
case RAZWI_INITIATOR_ID_X_Y_TPC6:
+ *engine_id_1 = GAUDI_ENGINE_ID_TPC_6;
return "TPC6";
case RAZWI_INITIATOR_ID_X_Y_TPC7_NIC4_NIC5:
- if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_TPC))
+ if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_TPC)) {
+ *engine_id_1 = GAUDI_ENGINE_ID_TPC_7;
return "TPC7";
- if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_NIC))
+ }
+ if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_NIC)) {
+ *engine_id_1 = GAUDI_ENGINE_ID_NIC_4;
return "NIC4";
- if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_NIC_FT))
+ }
+ if (axi_id == RAZWI_INITIATOR_ID_AXI_ID(AXI_ID_NIC_FT)) {
+ *engine_id_1 = GAUDI_ENGINE_ID_NIC_5;
return "NIC5";
+ }
break;
default:
break;
@@ -7101,27 +7192,28 @@ static const char *gaudi_get_razwi_initiator_name(struct hl_device *hdev,
return "unknown initiator";
}
-static void gaudi_print_razwi_info(struct hl_device *hdev)
+static void gaudi_print_and_get_razwi_info(struct hl_device *hdev, u32 *engine_id_1,
+ u32 *engine_id_2)
{
+
if (RREG32(mmMMU_UP_RAZWI_WRITE_VLD)) {
dev_err_ratelimited(hdev->dev,
"RAZWI event caused by illegal write of %s\n",
- gaudi_get_razwi_initiator_name(hdev, true));
+ gaudi_get_razwi_initiator_name(hdev, true, engine_id_1, engine_id_2));
WREG32(mmMMU_UP_RAZWI_WRITE_VLD, 0);
}
if (RREG32(mmMMU_UP_RAZWI_READ_VLD)) {
dev_err_ratelimited(hdev->dev,
"RAZWI event caused by illegal read of %s\n",
- gaudi_get_razwi_initiator_name(hdev, false));
+ gaudi_get_razwi_initiator_name(hdev, false, engine_id_1, engine_id_2));
WREG32(mmMMU_UP_RAZWI_READ_VLD, 0);
}
}
-static void gaudi_print_mmu_error_info(struct hl_device *hdev)
+static void gaudi_print_and_get_mmu_error_info(struct hl_device *hdev, u64 *addr, u8 *type)
{
struct gaudi_device *gaudi = hdev->asic_specific;
- u64 addr;
u32 val;
if (!(gaudi->hw_cap_initialized & HW_CAP_MMU))
@@ -7129,24 +7221,24 @@ static void gaudi_print_mmu_error_info(struct hl_device *hdev)
val = RREG32(mmMMU_UP_PAGE_ERROR_CAPTURE);
if (val & MMU_UP_PAGE_ERROR_CAPTURE_ENTRY_VALID_MASK) {
- addr = val & MMU_UP_PAGE_ERROR_CAPTURE_VA_49_32_MASK;
- addr <<= 32;
- addr |= RREG32(mmMMU_UP_PAGE_ERROR_CAPTURE_VA);
+ *addr = val & MMU_UP_PAGE_ERROR_CAPTURE_VA_49_32_MASK;
+ *addr <<= 32;
+ *addr |= RREG32(mmMMU_UP_PAGE_ERROR_CAPTURE_VA);
- dev_err_ratelimited(hdev->dev, "MMU page fault on va 0x%llx\n",
- addr);
+ dev_err_ratelimited(hdev->dev, "MMU page fault on va 0x%llx\n", *addr);
+ *type = HL_RAZWI_PAGE_FAULT;
WREG32(mmMMU_UP_PAGE_ERROR_CAPTURE, 0);
}
val = RREG32(mmMMU_UP_ACCESS_ERROR_CAPTURE);
if (val & MMU_UP_ACCESS_ERROR_CAPTURE_ENTRY_VALID_MASK) {
- addr = val & MMU_UP_ACCESS_ERROR_CAPTURE_VA_49_32_MASK;
- addr <<= 32;
- addr |= RREG32(mmMMU_UP_ACCESS_ERROR_CAPTURE_VA);
+ *addr = val & MMU_UP_ACCESS_ERROR_CAPTURE_VA_49_32_MASK;
+ *addr <<= 32;
+ *addr |= RREG32(mmMMU_UP_ACCESS_ERROR_CAPTURE_VA);
- dev_err_ratelimited(hdev->dev,
- "MMU access error on va 0x%llx\n", addr);
+ dev_err_ratelimited(hdev->dev, "MMU access error on va 0x%llx\n", *addr);
+ *type = HL_RAZWI_MMU_ACCESS_ERROR;
WREG32(mmMMU_UP_ACCESS_ERROR_CAPTURE, 0);
}
@@ -7665,15 +7757,46 @@ static void gaudi_handle_qman_err(struct hl_device *hdev, u16 event_type)
static void gaudi_print_irq_info(struct hl_device *hdev, u16 event_type,
bool razwi)
{
+ u32 engine_id_1, engine_id_2;
char desc[64] = "";
+ u64 razwi_addr = 0;
+ u8 razwi_type;
+ int rc;
+
+ /*
+ * Init engine id by default as not valid and only if razwi initiated from engine with
+ * engine id it will get valid value.
+ * Init razwi type to default, will be changed only if razwi caused by page fault of
+ * MMU access error
+ */
+ engine_id_1 = U16_MAX;
+ engine_id_2 = U16_MAX;
+ razwi_type = U8_MAX;
gaudi_get_event_desc(event_type, desc, sizeof(desc));
dev_err_ratelimited(hdev->dev, "Received H/W interrupt %d [\"%s\"]\n",
event_type, desc);
if (razwi) {
- gaudi_print_razwi_info(hdev);
- gaudi_print_mmu_error_info(hdev);
+ gaudi_print_and_get_razwi_info(hdev, &engine_id_1, &engine_id_2);
+ gaudi_print_and_get_mmu_error_info(hdev, &razwi_addr, &razwi_type);
+
+ /* In case it's the first razwi, save its parameters*/
+ rc = atomic_cmpxchg(&hdev->last_error.razwi_write_disable, 0, 1);
+ if (!rc) {
+ hdev->last_error.open_dev_timestamp = hdev->last_successful_open_ktime;
+ hdev->last_error.razwi_timestamp = ktime_get();
+ hdev->last_error.razwi_addr = razwi_addr;
+ hdev->last_error.razwi_engine_id_1 = engine_id_1;
+ hdev->last_error.razwi_engine_id_2 = engine_id_2;
+ /*
+ * If first engine id holds non valid value the razwi initiator
+ * does not have engine id
+ */
+ hdev->last_error.razwi_non_engine_initiator = (engine_id_1 == U16_MAX);
+ hdev->last_error.razwi_type = razwi_type;
+
+ }
}
}
@@ -7696,14 +7819,10 @@ static void gaudi_print_fw_alive_info(struct hl_device *hdev,
fw_alive->thread_id, fw_alive->uptime_seconds);
}
-static int gaudi_soft_reset_late_init(struct hl_device *hdev)
+static int gaudi_non_hard_reset_late_init(struct hl_device *hdev)
{
- struct gaudi_device *gaudi = hdev->asic_specific;
-
- /* Unmask all IRQs since some could have been received
- * during the soft reset
- */
- return hl_fw_unmask_irq_arr(hdev, gaudi->events, sizeof(gaudi->events));
+ /* GAUDI doesn't support any reset except hard-reset */
+ return -EPERM;
}
static int gaudi_hbm_read_interrupts(struct hl_device *hdev, int device,
@@ -7897,27 +8016,39 @@ static int tpc_krn_event_to_tpc_id(u16 tpc_dec_event_type)
static void gaudi_print_clk_change_info(struct hl_device *hdev,
u16 event_type)
{
+ ktime_t zero_time = ktime_set(0, 0);
+
+ mutex_lock(&hdev->clk_throttling.lock);
+
switch (event_type) {
case GAUDI_EVENT_FIX_POWER_ENV_S:
- hdev->clk_throttling_reason |= HL_CLK_THROTTLE_POWER;
+ hdev->clk_throttling.current_reason |= HL_CLK_THROTTLE_POWER;
+ hdev->clk_throttling.aggregated_reason |= HL_CLK_THROTTLE_POWER;
+ hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_POWER].start = ktime_get();
+ hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_POWER].end = zero_time;
dev_info_ratelimited(hdev->dev,
"Clock throttling due to power consumption\n");
break;
case GAUDI_EVENT_FIX_POWER_ENV_E:
- hdev->clk_throttling_reason &= ~HL_CLK_THROTTLE_POWER;
+ hdev->clk_throttling.current_reason &= ~HL_CLK_THROTTLE_POWER;
+ hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_POWER].end = ktime_get();
dev_info_ratelimited(hdev->dev,
"Power envelop is safe, back to optimal clock\n");
break;
case GAUDI_EVENT_FIX_THERMAL_ENV_S:
- hdev->clk_throttling_reason |= HL_CLK_THROTTLE_THERMAL;
+ hdev->clk_throttling.current_reason |= HL_CLK_THROTTLE_THERMAL;
+ hdev->clk_throttling.aggregated_reason |= HL_CLK_THROTTLE_THERMAL;
+ hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_THERMAL].start = ktime_get();
+ hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_THERMAL].end = zero_time;
dev_info_ratelimited(hdev->dev,
"Clock throttling due to overheating\n");
break;
case GAUDI_EVENT_FIX_THERMAL_ENV_E:
- hdev->clk_throttling_reason &= ~HL_CLK_THROTTLE_THERMAL;
+ hdev->clk_throttling.current_reason &= ~HL_CLK_THROTTLE_THERMAL;
+ hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_THERMAL].end = ktime_get();
dev_info_ratelimited(hdev->dev,
"Thermal envelop is safe, back to optimal clock\n");
break;
@@ -7927,6 +8058,8 @@ static void gaudi_print_clk_change_info(struct hl_device *hdev,
event_type);
break;
}
+
+ mutex_unlock(&hdev->clk_throttling.lock);
}
static void gaudi_handle_eqe(struct hl_device *hdev,
@@ -7975,7 +8108,7 @@ static void gaudi_handle_eqe(struct hl_device *hdev,
case GAUDI_EVENT_NIC0_CS_DBG_DERR ... GAUDI_EVENT_NIC4_CS_DBG_DERR:
gaudi_print_irq_info(hdev, event_type, true);
gaudi_handle_ecc_event(hdev, event_type, &eq_entry->ecc_data);
- fw_fatal_err_flag = HL_RESET_FW_FATAL_ERR;
+ fw_fatal_err_flag = HL_DRV_RESET_FW_FATAL_ERR;
goto reset_device;
case GAUDI_EVENT_GIC500:
@@ -7983,7 +8116,7 @@ static void gaudi_handle_eqe(struct hl_device *hdev,
case GAUDI_EVENT_L2_RAM_ECC:
case GAUDI_EVENT_PLL0 ... GAUDI_EVENT_PLL17:
gaudi_print_irq_info(hdev, event_type, false);
- fw_fatal_err_flag = HL_RESET_FW_FATAL_ERR;
+ fw_fatal_err_flag = HL_DRV_RESET_FW_FATAL_ERR;
goto reset_device;
case GAUDI_EVENT_HBM0_SPI_0:
@@ -7994,7 +8127,7 @@ static void gaudi_handle_eqe(struct hl_device *hdev,
gaudi_hbm_read_interrupts(hdev,
gaudi_hbm_event_to_dev(event_type),
&eq_entry->hbm_ecc_data);
- fw_fatal_err_flag = HL_RESET_FW_FATAL_ERR;
+ fw_fatal_err_flag = HL_DRV_RESET_FW_FATAL_ERR;
goto reset_device;
case GAUDI_EVENT_HBM0_SPI_1:
@@ -8177,9 +8310,11 @@ static void gaudi_handle_eqe(struct hl_device *hdev,
reset_device:
if (hdev->asic_prop.fw_security_enabled)
- hl_device_reset(hdev, HL_RESET_HARD | HL_RESET_FW | fw_fatal_err_flag);
+ hl_device_reset(hdev, HL_DRV_RESET_HARD
+ | HL_DRV_RESET_BYPASS_REQ_TO_FW
+ | fw_fatal_err_flag);
else if (hdev->hard_reset_on_fw_events)
- hl_device_reset(hdev, HL_RESET_HARD | fw_fatal_err_flag);
+ hl_device_reset(hdev, HL_DRV_RESET_HARD | fw_fatal_err_flag);
else
hl_fw_unmask_irq(hdev, event_type);
}
@@ -8206,7 +8341,7 @@ static int gaudi_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard,
int rc;
if (!(gaudi->hw_cap_initialized & HW_CAP_MMU) ||
- hdev->hard_reset_pending)
+ hdev->reset_info.hard_reset_pending)
return 0;
if (hdev->pldm)
@@ -8229,12 +8364,6 @@ static int gaudi_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard,
WREG32(mmSTLB_INV_SET, 0);
- if (rc) {
- dev_err_ratelimited(hdev->dev,
- "MMU cache invalidation timeout\n");
- hl_device_reset(hdev, HL_RESET_HARD);
- }
-
return rc;
}
@@ -8662,7 +8791,7 @@ static int gaudi_internal_cb_pool_init(struct hl_device *hdev,
hdev->internal_cb_pool_dma_addr,
HOST_SPACE_INTERNAL_CB_SZ);
- hdev->asic_funcs->mmu_invalidate_cache(hdev, false, VM_TYPE_USERPTR);
+ hdev->asic_funcs->mmu_invalidate_cache(hdev, false, MMU_OP_USERPTR);
mutex_unlock(&ctx->mmu_lock);
if (rc)
@@ -8697,7 +8826,7 @@ static void gaudi_internal_cb_pool_fini(struct hl_device *hdev,
HOST_SPACE_INTERNAL_CB_SZ);
hl_unreserve_va_block(hdev, ctx, hdev->internal_cb_va_base,
HOST_SPACE_INTERNAL_CB_SZ);
- hdev->asic_funcs->mmu_invalidate_cache(hdev, true, VM_TYPE_USERPTR);
+ hdev->asic_funcs->mmu_invalidate_cache(hdev, true, MMU_OP_USERPTR);
mutex_unlock(&ctx->mmu_lock);
gen_pool_destroy(hdev->internal_cb_pool);
@@ -9458,7 +9587,7 @@ static const struct hl_asic_funcs gaudi_funcs = {
.disable_clock_gating = gaudi_disable_clock_gating,
.debug_coresight = gaudi_debug_coresight,
.is_device_idle = gaudi_is_device_idle,
- .soft_reset_late_init = gaudi_soft_reset_late_init,
+ .non_hard_reset_late_init = gaudi_non_hard_reset_late_init,
.hw_queues_lock = gaudi_hw_queues_lock,
.hw_queues_unlock = gaudi_hw_queues_unlock,
.get_pci_id = gaudi_get_pci_id,
diff --git a/drivers/misc/habanalabs/gaudi/gaudiP.h b/drivers/misc/habanalabs/gaudi/gaudiP.h
index f325e36a71e6..8ac16a9b7d15 100644
--- a/drivers/misc/habanalabs/gaudi/gaudiP.h
+++ b/drivers/misc/habanalabs/gaudi/gaudiP.h
@@ -357,8 +357,8 @@ void gaudi_init_security(struct hl_device *hdev);
void gaudi_ack_protection_bits_errors(struct hl_device *hdev);
void gaudi_add_device_attr(struct hl_device *hdev,
struct attribute_group *dev_attr_grp);
-int gaudi_debug_coresight(struct hl_device *hdev, void *data);
-void gaudi_halt_coresight(struct hl_device *hdev);
+int gaudi_debug_coresight(struct hl_device *hdev, struct hl_ctx *ctx, void *data);
+void gaudi_halt_coresight(struct hl_device *hdev, struct hl_ctx *ctx);
void gaudi_mmu_prepare_reg(struct hl_device *hdev, u64 reg, u32 asid);
#endif /* GAUDIP_H_ */
diff --git a/drivers/misc/habanalabs/gaudi/gaudi_coresight.c b/drivers/misc/habanalabs/gaudi/gaudi_coresight.c
index 5349c1be13f9..08108f5fed67 100644
--- a/drivers/misc/habanalabs/gaudi/gaudi_coresight.c
+++ b/drivers/misc/habanalabs/gaudi/gaudi_coresight.c
@@ -848,7 +848,7 @@ static int gaudi_config_spmu(struct hl_device *hdev,
return 0;
}
-int gaudi_debug_coresight(struct hl_device *hdev, void *data)
+int gaudi_debug_coresight(struct hl_device *hdev, struct hl_ctx *ctx, void *data)
{
struct hl_debug_params *params = data;
int rc = 0;
@@ -887,7 +887,7 @@ int gaudi_debug_coresight(struct hl_device *hdev, void *data)
return rc;
}
-void gaudi_halt_coresight(struct hl_device *hdev)
+void gaudi_halt_coresight(struct hl_device *hdev, struct hl_ctx *ctx)
{
struct hl_debug_params params = {};
int i, rc;
diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c
index 5536e8c27bd5..fbcc7bbf44b3 100644
--- a/drivers/misc/habanalabs/goya/goya.c
+++ b/drivers/misc/habanalabs/goya/goya.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright 2016-2019 HabanaLabs, Ltd.
+ * Copyright 2016-2021 HabanaLabs, Ltd.
* All Rights Reserved.
*/
@@ -410,25 +410,26 @@ int goya_set_fixed_properties(struct hl_device *hdev)
else
prop->mmu_pgt_size = MMU_PAGE_TABLES_SIZE;
prop->mmu_pte_size = HL_PTE_SIZE;
- prop->mmu_hop_table_size = HOP_TABLE_SIZE;
- prop->mmu_hop0_tables_total_size = HOP0_TABLES_TOTAL_SIZE;
+ prop->mmu_hop_table_size = HOP_TABLE_SIZE_512_PTE;
+ prop->mmu_hop0_tables_total_size = HOP0_512_PTE_TABLES_TOTAL_SIZE;
prop->dram_page_size = PAGE_SIZE_2MB;
prop->dram_supports_virtual_memory = true;
- prop->dmmu.hop0_shift = HOP0_SHIFT;
- prop->dmmu.hop1_shift = HOP1_SHIFT;
- prop->dmmu.hop2_shift = HOP2_SHIFT;
- prop->dmmu.hop3_shift = HOP3_SHIFT;
- prop->dmmu.hop4_shift = HOP4_SHIFT;
- prop->dmmu.hop0_mask = HOP0_MASK;
- prop->dmmu.hop1_mask = HOP1_MASK;
- prop->dmmu.hop2_mask = HOP2_MASK;
- prop->dmmu.hop3_mask = HOP3_MASK;
- prop->dmmu.hop4_mask = HOP4_MASK;
+ prop->dmmu.hop0_shift = MMU_V1_0_HOP0_SHIFT;
+ prop->dmmu.hop1_shift = MMU_V1_0_HOP1_SHIFT;
+ prop->dmmu.hop2_shift = MMU_V1_0_HOP2_SHIFT;
+ prop->dmmu.hop3_shift = MMU_V1_0_HOP3_SHIFT;
+ prop->dmmu.hop4_shift = MMU_V1_0_HOP4_SHIFT;
+ prop->dmmu.hop0_mask = MMU_V1_0_HOP0_MASK;
+ prop->dmmu.hop1_mask = MMU_V1_0_HOP1_MASK;
+ prop->dmmu.hop2_mask = MMU_V1_0_HOP2_MASK;
+ prop->dmmu.hop3_mask = MMU_V1_0_HOP3_MASK;
+ prop->dmmu.hop4_mask = MMU_V1_0_HOP4_MASK;
prop->dmmu.start_addr = VA_DDR_SPACE_START;
prop->dmmu.end_addr = VA_DDR_SPACE_END;
prop->dmmu.page_size = PAGE_SIZE_2MB;
prop->dmmu.num_hops = MMU_ARCH_5_HOPS;
+ prop->dmmu.last_mask = LAST_MASK;
/* shifts and masks are the same in PMMU and DMMU */
memcpy(&prop->pmmu, &prop->dmmu, sizeof(prop->dmmu));
@@ -436,6 +437,7 @@ int goya_set_fixed_properties(struct hl_device *hdev)
prop->pmmu.end_addr = VA_HOST_SPACE_END;
prop->pmmu.page_size = PAGE_SIZE_4KB;
prop->pmmu.num_hops = MMU_ARCH_5_HOPS;
+ prop->pmmu.last_mask = LAST_MASK;
/* PMMU and HPMMU are the same except of page size */
memcpy(&prop->pmmu_huge, &prop->pmmu, sizeof(prop->pmmu));
@@ -473,6 +475,8 @@ int goya_set_fixed_properties(struct hl_device *hdev)
prop->clk_pll_index = HL_GOYA_MME_PLL;
+ prop->use_get_power_for_reset_history = true;
+
return 0;
}
@@ -735,6 +739,11 @@ static void goya_fetch_psoc_frequency(struct hl_device *hdev)
int rc;
if (hdev->asic_prop.fw_security_enabled) {
+ struct goya_device *goya = hdev->asic_specific;
+
+ if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q))
+ return;
+
rc = hl_fw_cpucp_pll_info_get(hdev, HL_GOYA_PCI_PLL,
pll_freq_arr);
@@ -778,9 +787,59 @@ static void goya_fetch_psoc_frequency(struct hl_device *hdev)
prop->psoc_pci_pll_div_factor = div_fctr;
}
+/*
+ * goya_set_frequency - set the frequency of the device
+ *
+ * @hdev: pointer to habanalabs device structure
+ * @freq: the new frequency value
+ *
+ * Change the frequency if needed. This function has no protection against
+ * concurrency, therefore it is assumed that the calling function has protected
+ * itself against the case of calling this function from multiple threads with
+ * different values
+ *
+ * Returns 0 if no change was done, otherwise returns 1
+ */
+int goya_set_frequency(struct hl_device *hdev, enum hl_pll_frequency freq)
+{
+ struct goya_device *goya = hdev->asic_specific;
+
+ if ((goya->pm_mng_profile == PM_MANUAL) ||
+ (goya->curr_pll_profile == freq))
+ return 0;
+
+ dev_dbg(hdev->dev, "Changing device frequency to %s\n",
+ freq == PLL_HIGH ? "high" : "low");
+
+ goya_set_pll_profile(hdev, freq);
+
+ goya->curr_pll_profile = freq;
+
+ return 1;
+}
+
+static void goya_set_freq_to_low_job(struct work_struct *work)
+{
+ struct goya_work_freq *goya_work = container_of(work,
+ struct goya_work_freq,
+ work_freq.work);
+ struct hl_device *hdev = goya_work->hdev;
+
+ mutex_lock(&hdev->fpriv_list_lock);
+
+ if (!hdev->is_compute_ctx_active)
+ goya_set_frequency(hdev, PLL_LOW);
+
+ mutex_unlock(&hdev->fpriv_list_lock);
+
+ schedule_delayed_work(&goya_work->work_freq,
+ usecs_to_jiffies(HL_PLL_LOW_JOB_FREQ_USEC));
+}
+
int goya_late_init(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct goya_device *goya = hdev->asic_specific;
int rc;
goya_fetch_psoc_frequency(hdev);
@@ -829,6 +888,16 @@ int goya_late_init(struct hl_device *hdev)
return rc;
}
+ /* force setting to low frequency */
+ goya->curr_pll_profile = PLL_LOW;
+
+ goya->pm_mng_profile = PM_AUTO;
+
+ hdev->asic_funcs->set_pll_profile(hdev, PLL_LOW);
+
+ schedule_delayed_work(&goya->goya_work->work_freq,
+ usecs_to_jiffies(HL_PLL_LOW_JOB_FREQ_USEC));
+
return 0;
}
@@ -842,8 +911,11 @@ int goya_late_init(struct hl_device *hdev)
void goya_late_fini(struct hl_device *hdev)
{
const struct hwmon_channel_info **channel_info_arr;
+ struct goya_device *goya = hdev->asic_specific;
int i = 0;
+ cancel_delayed_work_sync(&goya->goya_work->work_freq);
+
if (!hdev->hl_chip_info->info)
return;
@@ -961,12 +1033,21 @@ static int goya_sw_init(struct hl_device *hdev)
spin_lock_init(&goya->hw_queues_lock);
hdev->supports_coresight = true;
- hdev->supports_soft_reset = true;
- hdev->allow_inference_soft_reset = true;
+ hdev->asic_prop.supports_soft_reset = true;
+ hdev->asic_prop.allow_inference_soft_reset = true;
hdev->supports_wait_for_multi_cs = false;
hdev->asic_funcs->set_pci_memory_regions(hdev);
+ goya->goya_work = kmalloc(sizeof(struct goya_work_freq), GFP_KERNEL);
+ if (!goya->goya_work) {
+ rc = -ENOMEM;
+ goto free_cpu_accessible_dma_pool;
+ }
+
+ goya->goya_work->hdev = hdev;
+ INIT_DELAYED_WORK(&goya->goya_work->work_freq, goya_set_freq_to_low_job);
+
return 0;
free_cpu_accessible_dma_pool:
@@ -1003,6 +1084,7 @@ static int goya_sw_fini(struct hl_device *hdev)
dma_pool_destroy(hdev->dma_pool);
+ kfree(goya->goya_work);
kfree(goya);
return 0;
@@ -2502,7 +2584,7 @@ static void goya_init_firmware_loader(struct hl_device *hdev)
struct fw_load_mgr *fw_loader = &hdev->fw_loader;
/* fill common fields */
- fw_loader->linux_loaded = false;
+ fw_loader->fw_comp_loaded = FW_TYPE_NONE;
fw_loader->boot_fit_img.image_name = GOYA_BOOT_FIT_FILE;
fw_loader->linux_img.image_name = GOYA_LINUX_FW_FILE;
fw_loader->cpu_timeout = GOYA_CPU_TIMEOUT_USEC;
@@ -2619,7 +2701,7 @@ int goya_mmu_init(struct hl_device *hdev)
(~STLB_STLB_FEATURE_EN_FOLLOWER_EN_MASK));
hdev->asic_funcs->mmu_invalidate_cache(hdev, true,
- VM_TYPE_USERPTR | VM_TYPE_PHYS_PACK);
+ MMU_OP_USERPTR | MMU_OP_PHYS_PACK);
WREG32(mmMMU_MMU_ENABLE, 1);
WREG32(mmMMU_SPI_MASK, 0xF);
@@ -4395,7 +4477,7 @@ static u64 goya_read_pte(struct hl_device *hdev, u64 addr)
{
struct goya_device *goya = hdev->asic_specific;
- if (hdev->hard_reset_pending)
+ if (hdev->reset_info.hard_reset_pending)
return U64_MAX;
return readq(hdev->pcie_bar[DDR_BAR_ID] +
@@ -4406,7 +4488,7 @@ static void goya_write_pte(struct hl_device *hdev, u64 addr, u64 val)
{
struct goya_device *goya = hdev->asic_specific;
- if (hdev->hard_reset_pending)
+ if (hdev->reset_info.hard_reset_pending)
return;
writeq(val, hdev->pcie_bar[DDR_BAR_ID] +
@@ -4731,7 +4813,7 @@ static int goya_unmask_irq_arr(struct hl_device *hdev, u32 *irq_arr,
return rc;
}
-static int goya_soft_reset_late_init(struct hl_device *hdev)
+static int goya_non_hard_reset_late_init(struct hl_device *hdev)
{
/*
* Unmask all IRQs since some could have been received
@@ -4764,24 +4846,39 @@ static int goya_unmask_irq(struct hl_device *hdev, u16 event_type)
static void goya_print_clk_change_info(struct hl_device *hdev, u16 event_type)
{
+ ktime_t zero_time = ktime_set(0, 0);
+
+ mutex_lock(&hdev->clk_throttling.lock);
+
switch (event_type) {
case GOYA_ASYNC_EVENT_ID_FIX_POWER_ENV_S:
- hdev->clk_throttling_reason |= HL_CLK_THROTTLE_POWER;
+ hdev->clk_throttling.current_reason |= HL_CLK_THROTTLE_POWER;
+ hdev->clk_throttling.aggregated_reason |= HL_CLK_THROTTLE_POWER;
+ hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_POWER].start = ktime_get();
+ hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_POWER].end = zero_time;
dev_info_ratelimited(hdev->dev,
"Clock throttling due to power consumption\n");
break;
+
case GOYA_ASYNC_EVENT_ID_FIX_POWER_ENV_E:
- hdev->clk_throttling_reason &= ~HL_CLK_THROTTLE_POWER;
+ hdev->clk_throttling.current_reason &= ~HL_CLK_THROTTLE_POWER;
+ hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_POWER].end = ktime_get();
dev_info_ratelimited(hdev->dev,
"Power envelop is safe, back to optimal clock\n");
break;
+
case GOYA_ASYNC_EVENT_ID_FIX_THERMAL_ENV_S:
- hdev->clk_throttling_reason |= HL_CLK_THROTTLE_THERMAL;
+ hdev->clk_throttling.current_reason |= HL_CLK_THROTTLE_THERMAL;
+ hdev->clk_throttling.aggregated_reason |= HL_CLK_THROTTLE_THERMAL;
+ hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_THERMAL].start = ktime_get();
+ hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_THERMAL].end = zero_time;
dev_info_ratelimited(hdev->dev,
"Clock throttling due to overheating\n");
break;
+
case GOYA_ASYNC_EVENT_ID_FIX_THERMAL_ENV_E:
- hdev->clk_throttling_reason &= ~HL_CLK_THROTTLE_THERMAL;
+ hdev->clk_throttling.current_reason &= ~HL_CLK_THROTTLE_THERMAL;
+ hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_THERMAL].end = ktime_get();
dev_info_ratelimited(hdev->dev,
"Thermal envelop is safe, back to optimal clock\n");
break;
@@ -4791,6 +4888,8 @@ static void goya_print_clk_change_info(struct hl_device *hdev, u16 event_type)
event_type);
break;
}
+
+ mutex_unlock(&hdev->clk_throttling.lock);
}
void goya_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entry)
@@ -4834,14 +4933,14 @@ void goya_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entry)
case GOYA_ASYNC_EVENT_ID_L2_RAM_ECC:
goya_print_irq_info(hdev, event_type, false);
if (hdev->hard_reset_on_fw_events)
- hl_device_reset(hdev, (HL_RESET_HARD |
- HL_RESET_FW_FATAL_ERR));
+ hl_device_reset(hdev, (HL_DRV_RESET_HARD |
+ HL_DRV_RESET_FW_FATAL_ERR));
break;
case GOYA_ASYNC_EVENT_ID_PSOC_GPIO_05_SW_RESET:
goya_print_irq_info(hdev, event_type, false);
if (hdev->hard_reset_on_fw_events)
- hl_device_reset(hdev, HL_RESET_HARD);
+ hl_device_reset(hdev, HL_DRV_RESET_HARD);
break;
case GOYA_ASYNC_EVENT_ID_PCIE_DEC:
@@ -4901,7 +5000,7 @@ void goya_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entry)
goya_print_irq_info(hdev, event_type, false);
goya_print_out_of_sync_info(hdev, &eq_entry->pkt_sync_err);
if (hdev->hard_reset_on_fw_events)
- hl_device_reset(hdev, HL_RESET_HARD);
+ hl_device_reset(hdev, HL_DRV_RESET_HARD);
else
hl_fw_unmask_irq(hdev, event_type);
break;
@@ -5209,7 +5308,7 @@ static int goya_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard,
int rc;
if (!(goya->hw_cap_initialized & HW_CAP_MMU) ||
- hdev->hard_reset_pending)
+ hdev->reset_info.hard_reset_pending)
return 0;
/* no need in L1 only invalidation in Goya */
@@ -5232,12 +5331,6 @@ static int goya_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard,
1000,
timeout_usec);
- if (rc) {
- dev_err_ratelimited(hdev->dev,
- "MMU cache invalidation timeout\n");
- hl_device_reset(hdev, HL_RESET_HARD);
- }
-
return rc;
}
@@ -5645,7 +5738,7 @@ static const struct hl_asic_funcs goya_funcs = {
.disable_clock_gating = goya_disable_clock_gating,
.debug_coresight = goya_debug_coresight,
.is_device_idle = goya_is_device_idle,
- .soft_reset_late_init = goya_soft_reset_late_init,
+ .non_hard_reset_late_init = goya_non_hard_reset_late_init,
.hw_queues_lock = goya_hw_queues_lock,
.hw_queues_unlock = goya_hw_queues_unlock,
.get_pci_id = goya_get_pci_id,
diff --git a/drivers/misc/habanalabs/goya/goyaP.h b/drivers/misc/habanalabs/goya/goyaP.h
index 97add7b04f82..3740fd25bf84 100644
--- a/drivers/misc/habanalabs/goya/goyaP.h
+++ b/drivers/misc/habanalabs/goya/goyaP.h
@@ -153,9 +153,15 @@
#define HW_CAP_GOLDEN 0x00000400
#define HW_CAP_TPC 0x00000800
+struct goya_work_freq {
+ struct hl_device *hdev;
+ struct delayed_work work_freq;
+};
+
struct goya_device {
/* TODO: remove hw_queues_lock after moving to scheduler code */
spinlock_t hw_queues_lock;
+ struct goya_work_freq *goya_work;
u64 mme_clk;
u64 tpc_clk;
@@ -166,6 +172,9 @@ struct goya_device {
u32 events_stat_aggregate[GOYA_ASYNC_EVENT_ID_SIZE];
u32 hw_cap_initialized;
u8 device_cpu_mmu_mappings_done;
+
+ enum hl_pll_frequency curr_pll_profile;
+ enum hl_pm_mng_profile pm_mng_profile;
};
int goya_set_fixed_properties(struct hl_device *hdev);
@@ -211,8 +220,8 @@ void goya_set_pll_profile(struct hl_device *hdev, enum hl_pll_frequency freq);
void goya_add_device_attr(struct hl_device *hdev,
struct attribute_group *dev_attr_grp);
int goya_cpucp_info_get(struct hl_device *hdev);
-int goya_debug_coresight(struct hl_device *hdev, void *data);
-void goya_halt_coresight(struct hl_device *hdev);
+int goya_debug_coresight(struct hl_device *hdev, struct hl_ctx *ctx, void *data);
+void goya_halt_coresight(struct hl_device *hdev, struct hl_ctx *ctx);
int goya_suspend(struct hl_device *hdev);
int goya_resume(struct hl_device *hdev);
@@ -237,5 +246,6 @@ void goya_mmu_remove_device_cpu_mappings(struct hl_device *hdev);
u32 goya_get_queue_id_for_cq(struct hl_device *hdev, u32 cq_idx);
u64 goya_get_device_time(struct hl_device *hdev);
+int goya_set_frequency(struct hl_device *hdev, enum hl_pll_frequency freq);
#endif /* GOYAP_H_ */
diff --git a/drivers/misc/habanalabs/goya/goya_coresight.c b/drivers/misc/habanalabs/goya/goya_coresight.c
index c55c100fdd24..2c5133cfae65 100644
--- a/drivers/misc/habanalabs/goya/goya_coresight.c
+++ b/drivers/misc/habanalabs/goya/goya_coresight.c
@@ -652,7 +652,7 @@ static int goya_config_spmu(struct hl_device *hdev,
return 0;
}
-int goya_debug_coresight(struct hl_device *hdev, void *data)
+int goya_debug_coresight(struct hl_device *hdev, struct hl_ctx *ctx, void *data)
{
struct hl_debug_params *params = data;
int rc = 0;
@@ -691,7 +691,7 @@ int goya_debug_coresight(struct hl_device *hdev, void *data)
return rc;
}
-void goya_halt_coresight(struct hl_device *hdev)
+void goya_halt_coresight(struct hl_device *hdev, struct hl_ctx *ctx)
{
struct hl_debug_params params = {};
int i, rc;
diff --git a/drivers/misc/habanalabs/goya/goya_hwmgr.c b/drivers/misc/habanalabs/goya/goya_hwmgr.c
index 59b2624ff81a..76b47749affe 100644
--- a/drivers/misc/habanalabs/goya/goya_hwmgr.c
+++ b/drivers/misc/habanalabs/goya/goya_hwmgr.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright 2016-2019 HabanaLabs, Ltd.
+ * Copyright 2016-2021 HabanaLabs, Ltd.
* All Rights Reserved.
*/
@@ -62,7 +62,7 @@ static ssize_t mme_clk_store(struct device *dev, struct device_attribute *attr,
goto fail;
}
- if (hdev->pm_mng_profile == PM_AUTO) {
+ if (goya->pm_mng_profile == PM_AUTO) {
count = -EPERM;
goto fail;
}
@@ -111,7 +111,7 @@ static ssize_t tpc_clk_store(struct device *dev, struct device_attribute *attr,
goto fail;
}
- if (hdev->pm_mng_profile == PM_AUTO) {
+ if (goya->pm_mng_profile == PM_AUTO) {
count = -EPERM;
goto fail;
}
@@ -160,7 +160,7 @@ static ssize_t ic_clk_store(struct device *dev, struct device_attribute *attr,
goto fail;
}
- if (hdev->pm_mng_profile == PM_AUTO) {
+ if (goya->pm_mng_profile == PM_AUTO) {
count = -EPERM;
goto fail;
}
@@ -234,13 +234,14 @@ static ssize_t pm_mng_profile_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct hl_device *hdev = dev_get_drvdata(dev);
+ struct goya_device *goya = hdev->asic_specific;
if (!hl_device_operational(hdev, NULL))
return -ENODEV;
return sprintf(buf, "%s\n",
- (hdev->pm_mng_profile == PM_AUTO) ? "auto" :
- (hdev->pm_mng_profile == PM_MANUAL) ? "manual" :
+ (goya->pm_mng_profile == PM_AUTO) ? "auto" :
+ (goya->pm_mng_profile == PM_MANUAL) ? "manual" :
"unknown");
}
@@ -248,6 +249,7 @@ static ssize_t pm_mng_profile_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct hl_device *hdev = dev_get_drvdata(dev);
+ struct goya_device *goya = hdev->asic_specific;
if (!hl_device_operational(hdev, NULL)) {
count = -ENODEV;
@@ -256,7 +258,7 @@ static ssize_t pm_mng_profile_store(struct device *dev,
mutex_lock(&hdev->fpriv_list_lock);
- if (hdev->compute_ctx) {
+ if (hdev->is_compute_ctx_active) {
dev_err(hdev->dev,
"Can't change PM profile while compute context is opened on the device\n");
count = -EPERM;
@@ -265,26 +267,27 @@ static ssize_t pm_mng_profile_store(struct device *dev,
if (strncmp("auto", buf, strlen("auto")) == 0) {
/* Make sure we are in LOW PLL when changing modes */
- if (hdev->pm_mng_profile == PM_MANUAL) {
- hdev->curr_pll_profile = PLL_HIGH;
- hdev->pm_mng_profile = PM_AUTO;
- hl_device_set_frequency(hdev, PLL_LOW);
+ if (goya->pm_mng_profile == PM_MANUAL) {
+ goya->curr_pll_profile = PLL_HIGH;
+ goya->pm_mng_profile = PM_AUTO;
+ goya_set_frequency(hdev, PLL_LOW);
}
} else if (strncmp("manual", buf, strlen("manual")) == 0) {
- if (hdev->pm_mng_profile == PM_AUTO) {
+ if (goya->pm_mng_profile == PM_AUTO) {
/* Must release the lock because the work thread also
* takes this lock. But before we release it, set
* the mode to manual so nothing will change if a user
* suddenly opens the device
*/
- hdev->pm_mng_profile = PM_MANUAL;
+ goya->pm_mng_profile = PM_MANUAL;
mutex_unlock(&hdev->fpriv_list_lock);
/* Flush the current work so we can return to the user
* knowing that he is the only one changing frequencies
*/
- flush_delayed_work(&hdev->work_freq);
+ if (goya->goya_work)
+ flush_delayed_work(&goya->goya_work->work_freq);
return count;
}
diff --git a/drivers/misc/habanalabs/include/common/cpucp_if.h b/drivers/misc/habanalabs/include/common/cpucp_if.h
index ae13231fda94..737c39f33f05 100644
--- a/drivers/misc/habanalabs/include/common/cpucp_if.h
+++ b/drivers/misc/habanalabs/include/common/cpucp_if.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0
*
- * Copyright 2020 HabanaLabs, Ltd.
+ * Copyright 2020-2021 HabanaLabs, Ltd.
* All Rights Reserved.
*
*/
@@ -376,6 +376,19 @@ enum pq_init_status {
* and QMANs. The f/w will return a bitmask where each bit represents
* a different engine or QMAN according to enum cpucp_idle_mask.
* The bit will be 1 if the engine is NOT idle.
+ *
+ * CPUCP_PACKET_HBM_REPLACED_ROWS_INFO_GET -
+ * Fetch all HBM replaced-rows and prending to be replaced rows data.
+ *
+ * CPUCP_PACKET_HBM_PENDING_ROWS_STATUS -
+ * Fetch status of HBM rows pending replacement and need a reboot to
+ * be replaced.
+ *
+ * CPUCP_PACKET_POWER_SET -
+ * Resets power history of device to 0
+ *
+ * CPUCP_PACKET_ENGINE_CORE_ASID_SET -
+ * Packet to perform engine core ASID configuration
*/
enum cpucp_packet_id {
@@ -421,6 +434,11 @@ enum cpucp_packet_id {
CPUCP_PACKET_NIC_STAT_REGS_CLR, /* internal */
CPUCP_PACKET_NIC_STAT_REGS_ALL_GET, /* internal */
CPUCP_PACKET_IS_IDLE_CHECK, /* internal */
+ CPUCP_PACKET_HBM_REPLACED_ROWS_INFO_GET,/* internal */
+ CPUCP_PACKET_HBM_PENDING_ROWS_STATUS, /* internal */
+ CPUCP_PACKET_POWER_SET, /* internal */
+ CPUCP_PACKET_RESERVED, /* not used */
+ CPUCP_PACKET_ENGINE_CORE_ASID_SET, /* internal */
};
#define CPUCP_PACKET_FENCE_VAL 0xFE8CE7A5
@@ -480,7 +498,14 @@ struct cpucp_packet {
__u8 i2c_bus;
__u8 i2c_addr;
__u8 i2c_reg;
- __u8 pad; /* unused */
+ /*
+ * In legacy implemetations, i2c_len was not present,
+ * was unused and just added as pad.
+ * So if i2c_len is 0, it is treated as legacy
+ * and r/w 1 Byte, else if i2c_len is specified,
+ * its treated as new multibyte r/w support.
+ */
+ __u8 i2c_len;
};
struct {/* For PLL info fetch */
@@ -688,6 +713,7 @@ struct eq_generic_event {
#define CPUCP_MAX_NIC_LANES (CPUCP_MAX_NICS * CPUCP_LANES_PER_NIC)
#define CPUCP_NIC_MASK_ARR_LEN ((CPUCP_MAX_NICS + 63) / 64)
#define CPUCP_NIC_POLARITY_ARR_LEN ((CPUCP_MAX_NIC_LANES + 63) / 64)
+#define CPUCP_HBM_ROW_REPLACE_MAX 32
struct cpucp_sensor {
__le32 type;
@@ -740,6 +766,7 @@ struct cpucp_security_info {
* @fuse_version: silicon production FUSE information.
* @thermal_version: thermald S/W version.
* @cpucp_version: CpuCP S/W version.
+ * @infineon_second_stage_version: Infineon 2nd stage DC-DC version.
* @dram_size: available DRAM size.
* @card_name: card name that will be displayed in HWMON subsystem on the host
* @sec_info: security information
@@ -749,6 +776,10 @@ struct cpucp_security_info {
* @dram_binning_mask: DRAM binning mask, 1 bit per dram instance
* (0 = functional 1 = binned)
* @memory_repair_flag: eFuse flag indicating memory repair
+ * @edma_binning_mask: EDMA binning mask, 1 bit per EDMA instance
+ * (0 = functional 1 = binned)
+ * @xbar_binning_mask: Xbar binning mask, 1 bit per Xbar instance
+ * (0 = functional 1 = binned)
*/
struct cpucp_info {
struct cpucp_sensor sensors[CPUCP_MAX_SENSORS];
@@ -761,7 +792,7 @@ struct cpucp_info {
__u8 fuse_version[VERSION_MAX_LEN];
__u8 thermal_version[VERSION_MAX_LEN];
__u8 cpucp_version[VERSION_MAX_LEN];
- __le32 reserved2;
+ __le32 infineon_second_stage_version;
__le64 dram_size;
char card_name[CARD_NAME_MAX_LEN];
__le64 reserved3;
@@ -769,7 +800,9 @@ struct cpucp_info {
__u8 reserved5;
__u8 dram_binning_mask;
__u8 memory_repair_flag;
- __u8 pad[5];
+ __u8 edma_binning_mask;
+ __u8 xbar_binning_mask;
+ __u8 pad[3];
struct cpucp_security_info sec_info;
__le32 reserved6;
__u8 pll_map[PLL_MAP_LEN];
@@ -833,4 +866,25 @@ struct cpucp_nic_status {
__le32 high_ber_cnt;
};
+enum cpucp_hbm_row_replace_cause {
+ REPLACE_CAUSE_DOUBLE_ECC_ERR,
+ REPLACE_CAUSE_MULTI_SINGLE_ECC_ERR,
+};
+
+struct cpucp_hbm_row_info {
+ __u8 hbm_idx;
+ __u8 pc;
+ __u8 sid;
+ __u8 bank_idx;
+ __le16 row_addr;
+ __u8 replaced_row_cause; /* enum cpucp_hbm_row_replace_cause */
+ __u8 pad;
+};
+
+struct cpucp_hbm_row_replaced_rows_info {
+ __le16 num_replaced_rows;
+ __u8 pad[6];
+ struct cpucp_hbm_row_info replaced_rows[CPUCP_HBM_ROW_REPLACE_MAX];
+};
+
#endif /* CPUCP_IF_H */
diff --git a/drivers/misc/habanalabs/include/common/hl_boot_if.h b/drivers/misc/habanalabs/include/common/hl_boot_if.h
index 2626df6ef3ef..135e21d6edc9 100644
--- a/drivers/misc/habanalabs/include/common/hl_boot_if.h
+++ b/drivers/misc/habanalabs/include/common/hl_boot_if.h
@@ -32,6 +32,7 @@ enum cpu_boot_err {
CPU_BOOT_ERR_DEVICE_UNUSABLE_FAIL = 13,
CPU_BOOT_ERR_BOOT_FW_CRIT_ERR = 18,
CPU_BOOT_ERR_BINNING_FAIL = 19,
+ CPU_BOOT_ERR_TPM_FAIL = 20,
CPU_BOOT_ERR_ENABLED = 31,
CPU_BOOT_ERR_SCND_EN = 63,
CPU_BOOT_ERR_LAST = 64 /* we have 2 registers of 32 bits */
@@ -108,6 +109,8 @@ enum cpu_boot_err {
* malfunctioning components might still be
* in use.
*
+ * CPU_BOOT_ERR0_TPM_FAIL TPM verification flow failed.
+ *
* CPU_BOOT_ERR0_ENABLED Error registers enabled.
* This is a main indication that the
* running FW populates the error
@@ -130,6 +133,7 @@ enum cpu_boot_err {
#define CPU_BOOT_ERR0_DEVICE_UNUSABLE_FAIL (1 << CPU_BOOT_ERR_DEVICE_UNUSABLE_FAIL)
#define CPU_BOOT_ERR0_BOOT_FW_CRIT_ERR (1 << CPU_BOOT_ERR_BOOT_FW_CRIT_ERR)
#define CPU_BOOT_ERR0_BINNING_FAIL (1 << CPU_BOOT_ERR_BINNING_FAIL)
+#define CPU_BOOT_ERR0_TPM_FAIL (1 << CPU_BOOT_ERR_TPM_FAIL)
#define CPU_BOOT_ERR0_ENABLED (1 << CPU_BOOT_ERR_ENABLED)
#define CPU_BOOT_ERR1_ENABLED (1 << CPU_BOOT_ERR_ENABLED)
diff --git a/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h b/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h
index dedf20e8f956..758f246627f8 100644
--- a/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h
+++ b/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h
@@ -16,27 +16,18 @@
#define PAGE_PRESENT_MASK 0x0000000000001ull
#define SWAP_OUT_MASK 0x0000000000004ull
#define LAST_MASK 0x0000000000800ull
-#define HOP0_MASK 0x3000000000000ull
-#define HOP1_MASK 0x0FF8000000000ull
-#define HOP2_MASK 0x0007FC0000000ull
-#define HOP3_MASK 0x000003FE00000ull
-#define HOP4_MASK 0x00000001FF000ull
#define FLAGS_MASK 0x0000000000FFFull
-#define HOP0_SHIFT 48
-#define HOP1_SHIFT 39
-#define HOP2_SHIFT 30
-#define HOP3_SHIFT 21
-#define HOP4_SHIFT 12
-
#define MMU_ARCH_5_HOPS 5
#define HOP_PHYS_ADDR_MASK (~FLAGS_MASK)
#define HL_PTE_SIZE sizeof(u64)
-#define HOP_TABLE_SIZE PAGE_SIZE_4KB
-#define PTE_ENTRIES_IN_HOP (HOP_TABLE_SIZE / HL_PTE_SIZE)
-#define HOP0_TABLES_TOTAL_SIZE (HOP_TABLE_SIZE * MAX_ASID)
+
+/* definitions for HOP with 512 PTE entries */
+#define HOP_PTE_ENTRIES_512 512
+#define HOP_TABLE_SIZE_512_PTE (HOP_PTE_ENTRIES_512 * HL_PTE_SIZE)
+#define HOP0_512_PTE_TABLES_TOTAL_SIZE (HOP_TABLE_SIZE_512_PTE * MAX_ASID)
#define MMU_HOP0_PA43_12_SHIFT 12
#define MMU_HOP0_PA49_44_SHIFT (12 + 32)
diff --git a/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_v1_0.h b/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_v1_0.h
index 8539dd041f2c..86511002e367 100644
--- a/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_v1_0.h
+++ b/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_v1_0.h
@@ -8,8 +8,20 @@
#ifndef INCLUDE_MMU_V1_0_H_
#define INCLUDE_MMU_V1_0_H_
-#define MMU_HOP0_PA43_12 0x490004
-#define MMU_HOP0_PA49_44 0x490008
-#define MMU_ASID_BUSY 0x490000
+#define MMU_V1_0_HOP0_MASK 0x3000000000000ull
+#define MMU_V1_0_HOP1_MASK 0x0FF8000000000ull
+#define MMU_V1_0_HOP2_MASK 0x0007FC0000000ull
+#define MMU_V1_0_HOP3_MASK 0x000003FE00000ull
+#define MMU_V1_0_HOP4_MASK 0x00000001FF000ull
+
+#define MMU_V1_0_HOP0_SHIFT 48
+#define MMU_V1_0_HOP1_SHIFT 39
+#define MMU_V1_0_HOP2_SHIFT 30
+#define MMU_V1_0_HOP3_SHIFT 21
+#define MMU_V1_0_HOP4_SHIFT 12
+
+#define MMU_HOP0_PA43_12 0x490004
+#define MMU_HOP0_PA49_44 0x490008
+#define MMU_ASID_BUSY 0x490000
#endif /* INCLUDE_MMU_V1_0_H_ */
diff --git a/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_v1_1.h b/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_v1_1.h
index b2a9570583ac..9c727a5d47b4 100644
--- a/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_v1_1.h
+++ b/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_v1_1.h
@@ -8,9 +8,21 @@
#ifndef INCLUDE_MMU_V1_1_H_
#define INCLUDE_MMU_V1_1_H_
-#define MMU_ASID 0xC12004
-#define MMU_HOP0_PA43_12 0xC12008
-#define MMU_HOP0_PA49_44 0xC1200C
-#define MMU_BUSY 0xC12000
+#define MMU_V1_1_HOP0_MASK 0x3000000000000ull
+#define MMU_V1_1_HOP1_MASK 0x0FF8000000000ull
+#define MMU_V1_1_HOP2_MASK 0x0007FC0000000ull
+#define MMU_V1_1_HOP3_MASK 0x000003FE00000ull
+#define MMU_V1_1_HOP4_MASK 0x00000001FF000ull
+
+#define MMU_V1_1_HOP0_SHIFT 48
+#define MMU_V1_1_HOP1_SHIFT 39
+#define MMU_V1_1_HOP2_SHIFT 30
+#define MMU_V1_1_HOP3_SHIFT 21
+#define MMU_V1_1_HOP4_SHIFT 12
+
+#define MMU_ASID 0xC12004
+#define MMU_HOP0_PA43_12 0xC12008
+#define MMU_HOP0_PA49_44 0xC1200C
+#define MMU_BUSY 0xC12000
#endif /* INCLUDE_MMU_V1_1_H_ */
diff --git a/drivers/misc/lattice-ecp3-config.c b/drivers/misc/lattice-ecp3-config.c
index 0f54730c7ed5..98828030b5a4 100644
--- a/drivers/misc/lattice-ecp3-config.c
+++ b/drivers/misc/lattice-ecp3-config.c
@@ -76,12 +76,12 @@ static void firmware_load(const struct firmware *fw, void *context)
if (fw == NULL) {
dev_err(&spi->dev, "Cannot load firmware, aborting\n");
- return;
+ goto out;
}
if (fw->size == 0) {
dev_err(&spi->dev, "Error: Firmware size is 0!\n");
- return;
+ goto out;
}
/* Fill dummy data (24 stuffing bits for commands) */
@@ -103,7 +103,7 @@ static void firmware_load(const struct firmware *fw, void *context)
dev_err(&spi->dev,
"Error: No supported FPGA detected (JEDEC_ID=%08x)!\n",
jedec_id);
- return;
+ goto out;
}
dev_info(&spi->dev, "FPGA %s detected\n", ecp3_dev[i].name);
@@ -116,7 +116,7 @@ static void firmware_load(const struct firmware *fw, void *context)
buffer = kzalloc(fw->size + 8, GFP_KERNEL);
if (!buffer) {
dev_err(&spi->dev, "Error: Can't allocate memory!\n");
- return;
+ goto out;
}
/*
@@ -155,7 +155,7 @@ static void firmware_load(const struct firmware *fw, void *context)
"Error: Timeout waiting for FPGA to clear (status=%08x)!\n",
status);
kfree(buffer);
- return;
+ goto out;
}
dev_info(&spi->dev, "Configuring the FPGA...\n");
@@ -181,7 +181,7 @@ static void firmware_load(const struct firmware *fw, void *context)
release_firmware(fw);
kfree(buffer);
-
+out:
complete(&data->fw_loaded);
}
diff --git a/drivers/misc/lkdtm/Makefile b/drivers/misc/lkdtm/Makefile
index aa12097668d3..2e0aa74ac185 100644
--- a/drivers/misc/lkdtm/Makefile
+++ b/drivers/misc/lkdtm/Makefile
@@ -11,7 +11,7 @@ lkdtm-$(CONFIG_LKDTM) += usercopy.o
lkdtm-$(CONFIG_LKDTM) += stackleak.o
lkdtm-$(CONFIG_LKDTM) += cfi.o
lkdtm-$(CONFIG_LKDTM) += fortify.o
-lkdtm-$(CONFIG_PPC_BOOK3S_64) += powerpc.o
+lkdtm-$(CONFIG_PPC_64S_HASH_MMU) += powerpc.o
KASAN_SANITIZE_rodata.o := n
KASAN_SANITIZE_stackleak.o := n
@@ -20,7 +20,7 @@ CFLAGS_REMOVE_rodata.o += $(CC_FLAGS_LTO)
OBJCOPYFLAGS :=
OBJCOPYFLAGS_rodata_objcopy.o := \
- --rename-section .noinstr.text=.rodata,alloc,readonly,load
+ --rename-section .noinstr.text=.rodata,alloc,readonly,load,contents
targets += rodata.o rodata_objcopy.o
$(obj)/rodata_objcopy.o: $(obj)/rodata.o FORCE
$(call if_changed,objcopy)
diff --git a/drivers/misc/lkdtm/bugs.c b/drivers/misc/lkdtm/bugs.c
index f4cb94a9aa9c..f21854ac5cc2 100644
--- a/drivers/misc/lkdtm/bugs.c
+++ b/drivers/misc/lkdtm/bugs.c
@@ -41,20 +41,22 @@ static DEFINE_SPINLOCK(lock_me_up);
* Make sure compiler does not optimize this function or stack frame away:
* - function marked noinline
* - stack variables are marked volatile
- * - stack variables are written (memset()) and read (pr_info())
- * - function has external effects (pr_info())
- * */
+ * - stack variables are written (memset()) and read (buf[..] passed as arg)
+ * - function may have external effects (memzero_explicit())
+ * - no tail recursion possible
+ */
static int noinline recursive_loop(int remaining)
{
volatile char buf[REC_STACK_SIZE];
+ volatile int ret;
memset((void *)buf, remaining & 0xFF, sizeof(buf));
- pr_info("loop %d/%d ...\n", (int)buf[remaining % sizeof(buf)],
- recur_count);
if (!remaining)
- return 0;
+ ret = 0;
else
- return recursive_loop(remaining - 1);
+ ret = recursive_loop((int)buf[remaining % sizeof(buf)] - 1);
+ memzero_explicit((void *)buf, sizeof(buf));
+ return ret;
}
/* If the depth is negative, use the default, otherwise keep parameter. */
diff --git a/drivers/misc/lkdtm/core.c b/drivers/misc/lkdtm/core.c
index 609d9ee2acc0..f69b964b9952 100644
--- a/drivers/misc/lkdtm/core.c
+++ b/drivers/misc/lkdtm/core.c
@@ -182,7 +182,7 @@ static const struct crashtype crashtypes[] = {
CRASHTYPE(FORTIFIED_SUBOBJECT),
CRASHTYPE(FORTIFIED_STRSCPY),
CRASHTYPE(DOUBLE_FAULT),
-#ifdef CONFIG_PPC_BOOK3S_64
+#ifdef CONFIG_PPC_64S_HASH_MMU
CRASHTYPE(PPC_SLB_MULTIHIT),
#endif
};
@@ -212,7 +212,11 @@ module_param(cpoint_count, int, 0644);
MODULE_PARM_DESC(cpoint_count, " Crash Point Count, number of times the "\
"crash point is to be hit to trigger action");
-/* For test debug reporting. */
+/*
+ * For test debug reporting when CI systems provide terse summaries.
+ * TODO: Remove this once reasonable reporting exists in most CI systems:
+ * https://lore.kernel.org/lkml/CAHk-=wiFvfkoFixTapvvyPMN9pq5G-+Dys2eSyBa1vzDGAO5+A@mail.gmail.com
+ */
char *lkdtm_kernel_info;
/* Return the crashtype number or NULL if the name is invalid */
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
index 0e90591235a6..06734670a732 100644
--- a/drivers/misc/mei/client.c
+++ b/drivers/misc/mei/client.c
@@ -2330,6 +2330,8 @@ int mei_cl_dma_alloc_and_map(struct mei_cl *cl, const struct file *fp,
list_move_tail(&cb->list, &dev->ctrl_rd_list);
}
+ cl->status = 0;
+
mutex_unlock(&dev->device_lock);
wait_event_timeout(cl->wait,
cl->dma_mapped || cl->status,
@@ -2407,6 +2409,8 @@ int mei_cl_dma_unmap(struct mei_cl *cl, const struct file *fp)
list_move_tail(&cb->list, &dev->ctrl_rd_list);
}
+ cl->status = 0;
+
mutex_unlock(&dev->device_lock);
wait_event_timeout(cl->wait,
!cl->dma_mapped || cl->status,
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
index be41843df75b..cebcca6d6d3e 100644
--- a/drivers/misc/mei/hbm.c
+++ b/drivers/misc/mei/hbm.c
@@ -672,10 +672,14 @@ static void mei_hbm_cl_dma_map_res(struct mei_device *dev,
if (!cl)
return;
- dev_dbg(dev->dev, "cl dma map result = %d\n", res->status);
- cl->status = res->status;
- if (!cl->status)
+ if (res->status) {
+ dev_err(dev->dev, "cl dma map failed %d\n", res->status);
+ cl->status = -EFAULT;
+ } else {
+ dev_dbg(dev->dev, "cl dma map succeeded\n");
cl->dma_mapped = 1;
+ cl->status = 0;
+ }
wake_up(&cl->wait);
}
@@ -698,10 +702,14 @@ static void mei_hbm_cl_dma_unmap_res(struct mei_device *dev,
if (!cl)
return;
- dev_dbg(dev->dev, "cl dma unmap result = %d\n", res->status);
- cl->status = res->status;
- if (!cl->status)
+ if (res->status) {
+ dev_err(dev->dev, "cl dma unmap failed %d\n", res->status);
+ cl->status = -EFAULT;
+ } else {
+ dev_dbg(dev->dev, "cl dma unmap succeeded\n");
cl->dma_mapped = 0;
+ cl->status = 0;
+ }
wake_up(&cl->wait);
}
diff --git a/drivers/misc/mei/hw-txe.c b/drivers/misc/mei/hw-txe.c
index a4e854b9b9e6..00652c137cc7 100644
--- a/drivers/misc/mei/hw-txe.c
+++ b/drivers/misc/mei/hw-txe.c
@@ -994,11 +994,7 @@ static bool mei_txe_check_and_ack_intrs(struct mei_device *dev, bool do_ack)
hhisr &= ~IPC_HHIER_SEC;
}
- generated = generated ||
- (hisr & HISR_INT_STS_MSK) ||
- (ipc_isr & SEC_IPC_HOST_INT_STATUS_PENDING);
-
- if (generated && do_ack) {
+ if (do_ack) {
/* Save the interrupt causes */
hw->intr_cause |= hisr & HISR_INT_STS_MSK;
if (ipc_isr & SEC_IPC_HOST_INT_STATUS_IN_RDY)
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c
index 5c8cb679b997..f79076c67256 100644
--- a/drivers/misc/mei/init.c
+++ b/drivers/misc/mei/init.c
@@ -24,6 +24,7 @@ const char *mei_dev_state_str(int state)
MEI_DEV_STATE(ENABLED);
MEI_DEV_STATE(RESETTING);
MEI_DEV_STATE(DISABLED);
+ MEI_DEV_STATE(POWERING_DOWN);
MEI_DEV_STATE(POWER_DOWN);
MEI_DEV_STATE(POWER_UP);
default:
diff --git a/drivers/misc/ocxl/file.c b/drivers/misc/ocxl/file.c
index e70525eedaae..d881f5e40ad9 100644
--- a/drivers/misc/ocxl/file.c
+++ b/drivers/misc/ocxl/file.c
@@ -74,7 +74,6 @@ static long afu_ioctl_attach(struct ocxl_context *ctx,
{
struct ocxl_ioctl_attach arg;
u64 amr = 0;
- int rc;
pr_debug("%s for context %d\n", __func__, ctx->pasid);
@@ -86,8 +85,7 @@ static long afu_ioctl_attach(struct ocxl_context *ctx,
return -EINVAL;
amr = arg.amr & mfspr(SPRN_UAMOR);
- rc = ocxl_context_attach(ctx, amr, current->mm);
- return rc;
+ return ocxl_context_attach(ctx, amr, current->mm);
}
static long afu_ioctl_get_metadata(struct ocxl_context *ctx,
diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c
index 2ed7e3aaff3a..8f786a225dcf 100644
--- a/drivers/misc/pci_endpoint_test.c
+++ b/drivers/misc/pci_endpoint_test.c
@@ -865,7 +865,7 @@ static int pci_endpoint_test_probe(struct pci_dev *pdev,
goto err_release_irq;
}
misc_device->parent = &pdev->dev;
- misc_device->fops = &pci_endpoint_test_fops,
+ misc_device->fops = &pci_endpoint_test_fops;
err = misc_register(misc_device);
if (err) {
diff --git a/drivers/misc/sram.c b/drivers/misc/sram.c
index 4c26b19f5154..f0e7f02605eb 100644
--- a/drivers/misc/sram.c
+++ b/drivers/misc/sram.c
@@ -371,6 +371,7 @@ static const struct of_device_id sram_dt_ids[] = {
{ .compatible = "atmel,sama5d2-securam", .data = &atmel_securam_config },
{ .compatible = "nvidia,tegra186-sysram", .data = &tegra_sysram_config },
{ .compatible = "nvidia,tegra194-sysram", .data = &tegra_sysram_config },
+ { .compatible = "nvidia,tegra234-sysram", .data = &tegra_sysram_config },
{}
};
diff --git a/drivers/misc/uacce/uacce.c b/drivers/misc/uacce/uacce.c
index 488eeb2811ae..281c54003edc 100644
--- a/drivers/misc/uacce/uacce.c
+++ b/drivers/misc/uacce/uacce.c
@@ -289,7 +289,7 @@ static ssize_t api_show(struct device *dev,
{
struct uacce_device *uacce = to_uacce_device(dev);
- return sprintf(buf, "%s\n", uacce->api_ver);
+ return sysfs_emit(buf, "%s\n", uacce->api_ver);
}
static ssize_t flags_show(struct device *dev,
@@ -297,7 +297,7 @@ static ssize_t flags_show(struct device *dev,
{
struct uacce_device *uacce = to_uacce_device(dev);
- return sprintf(buf, "%u\n", uacce->flags);
+ return sysfs_emit(buf, "%u\n", uacce->flags);
}
static ssize_t available_instances_show(struct device *dev,
@@ -309,7 +309,7 @@ static ssize_t available_instances_show(struct device *dev,
if (!uacce->ops->get_available_instances)
return -ENODEV;
- return sprintf(buf, "%d\n",
+ return sysfs_emit(buf, "%d\n",
uacce->ops->get_available_instances(uacce));
}
@@ -318,7 +318,7 @@ static ssize_t algorithms_show(struct device *dev,
{
struct uacce_device *uacce = to_uacce_device(dev);
- return sprintf(buf, "%s\n", uacce->algs);
+ return sysfs_emit(buf, "%s\n", uacce->algs);
}
static ssize_t region_mmio_size_show(struct device *dev,
@@ -326,7 +326,7 @@ static ssize_t region_mmio_size_show(struct device *dev,
{
struct uacce_device *uacce = to_uacce_device(dev);
- return sprintf(buf, "%lu\n",
+ return sysfs_emit(buf, "%lu\n",
uacce->qf_pg_num[UACCE_QFRT_MMIO] << PAGE_SHIFT);
}
@@ -335,7 +335,7 @@ static ssize_t region_dus_size_show(struct device *dev,
{
struct uacce_device *uacce = to_uacce_device(dev);
- return sprintf(buf, "%lu\n",
+ return sysfs_emit(buf, "%lu\n",
uacce->qf_pg_num[UACCE_QFRT_DUS] << PAGE_SHIFT);
}
diff --git a/drivers/misc/vmw_vmci/vmci_context.c b/drivers/misc/vmw_vmci/vmci_context.c
index c0b5e339d5a1..6cf3e21c7604 100644
--- a/drivers/misc/vmw_vmci/vmci_context.c
+++ b/drivers/misc/vmw_vmci/vmci_context.c
@@ -687,10 +687,8 @@ int vmci_ctx_remove_notification(u32 context_id, u32 remote_cid)
}
spin_unlock(&context->lock);
- if (found) {
- synchronize_rcu();
- kfree(notifier);
- }
+ if (found)
+ kvfree_rcu(notifier);
vmci_ctx_put(context);
diff --git a/drivers/misc/vmw_vmci/vmci_event.c b/drivers/misc/vmw_vmci/vmci_event.c
index e3436abf39f4..2100297c94ad 100644
--- a/drivers/misc/vmw_vmci/vmci_event.c
+++ b/drivers/misc/vmw_vmci/vmci_event.c
@@ -209,8 +209,7 @@ int vmci_event_unsubscribe(u32 sub_id)
if (!s)
return VMCI_ERROR_NOT_FOUND;
- synchronize_rcu();
- kfree(s);
+ kvfree_rcu(s);
return VMCI_SUCCESS;
}
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index 2483cfdd30ea..8d718aa56d33 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -264,7 +264,7 @@ static ssize_t power_ro_lock_store(struct device *dev,
goto out_put;
}
req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_BOOT_WP;
- blk_execute_rq(NULL, req, 0);
+ blk_execute_rq(req, false);
ret = req_to_mmc_queue_req(req)->drv_op_result;
blk_mq_free_request(req);
@@ -657,7 +657,7 @@ static int mmc_blk_ioctl_cmd(struct mmc_blk_data *md,
rpmb ? MMC_DRV_OP_IOCTL_RPMB : MMC_DRV_OP_IOCTL;
req_to_mmc_queue_req(req)->drv_op_data = idatas;
req_to_mmc_queue_req(req)->ioc_count = 1;
- blk_execute_rq(NULL, req, 0);
+ blk_execute_rq(req, false);
ioc_err = req_to_mmc_queue_req(req)->drv_op_result;
err = mmc_blk_ioctl_copy_to_user(ic_ptr, idata);
blk_mq_free_request(req);
@@ -726,7 +726,7 @@ static int mmc_blk_ioctl_multi_cmd(struct mmc_blk_data *md,
rpmb ? MMC_DRV_OP_IOCTL_RPMB : MMC_DRV_OP_IOCTL;
req_to_mmc_queue_req(req)->drv_op_data = idata;
req_to_mmc_queue_req(req)->ioc_count = num_of_cmds;
- blk_execute_rq(NULL, req, 0);
+ blk_execute_rq(req, false);
ioc_err = req_to_mmc_queue_req(req)->drv_op_result;
/* copy to user if data and response */
@@ -1682,31 +1682,31 @@ static void mmc_blk_read_single(struct mmc_queue *mq, struct request *req)
struct mmc_card *card = mq->card;
struct mmc_host *host = card->host;
blk_status_t error = BLK_STS_OK;
- int retries = 0;
do {
u32 status;
int err;
+ int retries = 0;
- mmc_blk_rw_rq_prep(mqrq, card, 1, mq);
+ while (retries++ <= MMC_READ_SINGLE_RETRIES) {
+ mmc_blk_rw_rq_prep(mqrq, card, 1, mq);
- mmc_wait_for_req(host, mrq);
+ mmc_wait_for_req(host, mrq);
- err = mmc_send_status(card, &status);
- if (err)
- goto error_exit;
-
- if (!mmc_host_is_spi(host) &&
- !mmc_ready_for_data(status)) {
- err = mmc_blk_fix_state(card, req);
+ err = mmc_send_status(card, &status);
if (err)
goto error_exit;
- }
- if (mrq->cmd->error && retries++ < MMC_READ_SINGLE_RETRIES)
- continue;
+ if (!mmc_host_is_spi(host) &&
+ !mmc_ready_for_data(status)) {
+ err = mmc_blk_fix_state(card, req);
+ if (err)
+ goto error_exit;
+ }
- retries = 0;
+ if (!mrq->cmd->error)
+ break;
+ }
if (mrq->cmd->error ||
mrq->data->error ||
@@ -1837,7 +1837,7 @@ static void mmc_blk_mq_rw_recovery(struct mmc_queue *mq, struct request *req)
/* Reset if the card is in a bad state */
if (!mmc_host_is_spi(mq->card->host) &&
err && mmc_blk_reset(md, card->host, type)) {
- pr_err("%s: recovery failed!\n", req->rq_disk->disk_name);
+ pr_err("%s: recovery failed!\n", req->q->disk->disk_name);
mqrq->retries = MMC_NO_RETRIES;
return;
}
@@ -2051,7 +2051,8 @@ static void mmc_blk_mq_dec_in_flight(struct mmc_queue *mq, struct request *req)
mmc_put_card(mq->card, &mq->ctx);
}
-static void mmc_blk_mq_post_req(struct mmc_queue *mq, struct request *req)
+static void mmc_blk_mq_post_req(struct mmc_queue *mq, struct request *req,
+ bool can_sleep)
{
struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req);
struct mmc_request *mrq = &mqrq->brq.mrq;
@@ -2063,10 +2064,14 @@ static void mmc_blk_mq_post_req(struct mmc_queue *mq, struct request *req)
* Block layer timeouts race with completions which means the normal
* completion path cannot be used during recovery.
*/
- if (mq->in_recovery)
+ if (mq->in_recovery) {
mmc_blk_mq_complete_rq(mq, req);
- else if (likely(!blk_should_fake_timeout(req->q)))
- blk_mq_complete_request(req);
+ } else if (likely(!blk_should_fake_timeout(req->q))) {
+ if (can_sleep)
+ blk_mq_complete_request_direct(req, mmc_blk_mq_complete);
+ else
+ blk_mq_complete_request(req);
+ }
mmc_blk_mq_dec_in_flight(mq, req);
}
@@ -2087,7 +2092,7 @@ void mmc_blk_mq_recovery(struct mmc_queue *mq)
mmc_blk_urgent_bkops(mq, mqrq);
- mmc_blk_mq_post_req(mq, req);
+ mmc_blk_mq_post_req(mq, req, true);
}
static void mmc_blk_mq_complete_prev_req(struct mmc_queue *mq,
@@ -2106,7 +2111,7 @@ static void mmc_blk_mq_complete_prev_req(struct mmc_queue *mq,
if (prev_req)
*prev_req = mq->complete_req;
else
- mmc_blk_mq_post_req(mq, mq->complete_req);
+ mmc_blk_mq_post_req(mq, mq->complete_req, true);
mq->complete_req = NULL;
@@ -2178,7 +2183,8 @@ static void mmc_blk_mq_req_done(struct mmc_request *mrq)
mq->rw_wait = false;
wake_up(&mq->wait);
- mmc_blk_mq_post_req(mq, req);
+ /* context unknown */
+ mmc_blk_mq_post_req(mq, req, false);
}
static bool mmc_blk_rw_wait_cond(struct mmc_queue *mq, int *err)
@@ -2238,7 +2244,7 @@ static int mmc_blk_mq_issue_rw_rq(struct mmc_queue *mq,
err = mmc_start_request(host, &mqrq->brq.mrq);
if (prev_req)
- mmc_blk_mq_post_req(mq, prev_req);
+ mmc_blk_mq_post_req(mq, prev_req, true);
if (err)
mq->rw_wait = false;
@@ -2395,10 +2401,8 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
md->disk->private_data = md;
md->parent = parent;
set_disk_ro(md->disk, md->read_only || default_ro);
- md->disk->flags = GENHD_FL_EXT_DEVT;
if (area_type & (MMC_BLK_DATA_AREA_RPMB | MMC_BLK_DATA_AREA_BOOT))
- md->disk->flags |= GENHD_FL_NO_PART_SCAN
- | GENHD_FL_SUPPRESS_PARTITION_INFO;
+ md->disk->flags |= GENHD_FL_NO_PART;
/*
* As discussed on lkml, GENHD_FL_REMOVABLE should:
@@ -2739,7 +2743,7 @@ static int mmc_dbg_card_status_get(void *data, u64 *val)
if (IS_ERR(req))
return PTR_ERR(req);
req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_CARD_STATUS;
- blk_execute_rq(NULL, req, 0);
+ blk_execute_rq(req, false);
ret = req_to_mmc_queue_req(req)->drv_op_result;
if (ret >= 0) {
*val = ret;
@@ -2778,7 +2782,7 @@ static int mmc_ext_csd_open(struct inode *inode, struct file *filp)
}
req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_EXT_CSD;
req_to_mmc_queue_req(req)->drv_op_data = &ext_csd;
- blk_execute_rq(NULL, req, 0);
+ blk_execute_rq(req, false);
err = req_to_mmc_queue_req(req)->drv_op_result;
blk_mq_free_request(req);
if (err) {
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 45f578793980..bd87012c220c 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -67,7 +67,7 @@ static const unsigned int sd_au_size[] = {
__res & __mask; \
})
-#define SD_POWEROFF_NOTIFY_TIMEOUT_MS 2000
+#define SD_POWEROFF_NOTIFY_TIMEOUT_MS 1000
#define SD_WRITE_EXTR_SINGLE_TIMEOUT_MS 1000
struct sd_busy_data {
@@ -1664,6 +1664,12 @@ static int sd_poweroff_notify(struct mmc_card *card)
goto out;
}
+ /* Find out when the command is completed. */
+ err = mmc_poll_for_busy(card, SD_WRITE_EXTR_SINGLE_TIMEOUT_MS, false,
+ MMC_BUSY_EXTR_SINGLE);
+ if (err)
+ goto out;
+
cb_data.card = card;
cb_data.reg_buf = reg_buf;
err = __mmc_poll_for_busy(card->host, SD_POWEROFF_NOTIFY_TIMEOUT_MS,
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 5af8494c31b5..52b0b27a6839 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -966,6 +966,7 @@ config MMC_REALTEK_USB
config MMC_SUNXI
tristate "Allwinner sunxi SD/MMC Host Controller support"
depends on ARCH_SUNXI || COMPILE_TEST
+ depends on SUNXI_CCU
help
This selects support for the SD/MMC Host Controller on
Allwinner sunxi SoCs.
diff --git a/drivers/mmc/host/bcm2835.c b/drivers/mmc/host/bcm2835.c
index 8c2361e66277..463b707d9e99 100644
--- a/drivers/mmc/host/bcm2835.c
+++ b/drivers/mmc/host/bcm2835.c
@@ -1293,14 +1293,12 @@ static int bcm2835_add_host(struct bcm2835_host *host)
host->dma_cfg_tx.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
host->dma_cfg_tx.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
- host->dma_cfg_tx.slave_id = 13; /* DREQ channel */
host->dma_cfg_tx.direction = DMA_MEM_TO_DEV;
host->dma_cfg_tx.src_addr = 0;
host->dma_cfg_tx.dst_addr = host->phys_addr + SDDATA;
host->dma_cfg_rx.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
host->dma_cfg_rx.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
- host->dma_cfg_rx.slave_id = 13; /* DREQ channel */
host->dma_cfg_rx.direction = DMA_DEV_TO_MEM;
host->dma_cfg_rx.src_addr = host->phys_addr + SDDATA;
host->dma_cfg_rx.dst_addr = 0;
diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c
index 7693236c946f..7ab1b38a7be5 100644
--- a/drivers/mmc/host/jz4740_mmc.c
+++ b/drivers/mmc/host/jz4740_mmc.c
@@ -1128,8 +1128,8 @@ static int jz4740_mmc_resume(struct device *dev)
return pinctrl_select_default_state(dev);
}
-DEFINE_SIMPLE_DEV_PM_OPS(jz4740_mmc_pm_ops, jz4740_mmc_suspend,
- jz4740_mmc_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(jz4740_mmc_pm_ops, jz4740_mmc_suspend,
+ jz4740_mmc_resume);
static struct platform_driver jz4740_mmc_driver = {
.probe = jz4740_mmc_probe,
diff --git a/drivers/mmc/host/moxart-mmc.c b/drivers/mmc/host/moxart-mmc.c
index 16d1c7a43d33..b6eb75f4bbfc 100644
--- a/drivers/mmc/host/moxart-mmc.c
+++ b/drivers/mmc/host/moxart-mmc.c
@@ -705,12 +705,12 @@ static int moxart_remove(struct platform_device *pdev)
if (!IS_ERR_OR_NULL(host->dma_chan_rx))
dma_release_channel(host->dma_chan_rx);
mmc_remove_host(mmc);
- mmc_free_host(mmc);
writel(0, host->base + REG_INTERRUPT_MASK);
writel(0, host->base + REG_POWER_CONTROL);
writel(readl(host->base + REG_CLOCK_CONTROL) | CLK_OFF,
host->base + REG_CLOCK_CONTROL);
+ mmc_free_host(mmc);
return 0;
}
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index 98c218bd6669..40b6878bea6c 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -1210,7 +1210,7 @@ static int mxcmci_resume(struct device *dev)
return ret;
}
-DEFINE_SIMPLE_DEV_PM_OPS(mxcmci_pm_ops, mxcmci_suspend, mxcmci_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(mxcmci_pm_ops, mxcmci_suspend, mxcmci_resume);
static struct platform_driver mxcmci_driver = {
.probe = mxcmci_probe,
diff --git a/drivers/mmc/host/renesas_sdhi.h b/drivers/mmc/host/renesas_sdhi.h
index 0c45e82ff0de..66d308e73e17 100644
--- a/drivers/mmc/host/renesas_sdhi.h
+++ b/drivers/mmc/host/renesas_sdhi.h
@@ -18,6 +18,8 @@ struct renesas_sdhi_scc {
u32 tap_hs400_4tap; /* sampling clock position for HS400 (4 TAP) */
};
+#define SDHI_FLAG_NEED_CLKH_FALLBACK BIT(0)
+
struct renesas_sdhi_of_data {
unsigned long tmio_flags;
u32 tmio_ocr_mask;
@@ -31,6 +33,7 @@ struct renesas_sdhi_of_data {
int taps_num;
unsigned int max_blk_count;
unsigned short max_segs;
+ unsigned long sdhi_flags;
};
#define SDHI_CALIB_TABLE_MAX 32
@@ -57,6 +60,7 @@ struct tmio_mmc_dma {
struct renesas_sdhi {
struct clk *clk;
+ struct clk *clkh;
struct clk *clk_cd;
struct tmio_mmc_data mmc_data;
struct tmio_mmc_dma dma_priv;
diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c
index f5b2684ad805..2797a9c0f17d 100644
--- a/drivers/mmc/host/renesas_sdhi_core.c
+++ b/drivers/mmc/host/renesas_sdhi_core.c
@@ -127,10 +127,12 @@ static int renesas_sdhi_clk_enable(struct tmio_mmc_host *host)
}
static unsigned int renesas_sdhi_clk_update(struct tmio_mmc_host *host,
- unsigned int new_clock)
+ unsigned int wanted_clock)
{
struct renesas_sdhi *priv = host_to_priv(host);
+ struct clk *ref_clk = priv->clk;
unsigned int freq, diff, best_freq = 0, diff_min = ~0;
+ unsigned int new_clock, clkh_shift = 0;
int i;
/*
@@ -141,6 +143,16 @@ static unsigned int renesas_sdhi_clk_update(struct tmio_mmc_host *host,
if (!(host->pdata->flags & TMIO_MMC_MIN_RCAR2) || mmc_doing_tune(host->mmc))
return clk_get_rate(priv->clk);
+ if (priv->clkh) {
+ bool use_4tap = priv->quirks && priv->quirks->hs400_4taps;
+ bool need_slow_clkh = (host->mmc->ios.timing == MMC_TIMING_UHS_SDR104) ||
+ (host->mmc->ios.timing == MMC_TIMING_MMC_HS400);
+ clkh_shift = use_4tap && need_slow_clkh ? 1 : 2;
+ ref_clk = priv->clkh;
+ }
+
+ new_clock = wanted_clock << clkh_shift;
+
/*
* We want the bus clock to be as close as possible to, but no
* greater than, new_clock. As we can divide by 1 << i for
@@ -148,11 +160,10 @@ static unsigned int renesas_sdhi_clk_update(struct tmio_mmc_host *host,
* possible, but no greater than, new_clock << i.
*/
for (i = min(9, ilog2(UINT_MAX / new_clock)); i >= 0; i--) {
- freq = clk_round_rate(priv->clk, new_clock << i);
+ freq = clk_round_rate(ref_clk, new_clock << i);
if (freq > (new_clock << i)) {
/* Too fast; look for a slightly slower option */
- freq = clk_round_rate(priv->clk,
- (new_clock << i) / 4 * 3);
+ freq = clk_round_rate(ref_clk, (new_clock << i) / 4 * 3);
if (freq > (new_clock << i))
continue;
}
@@ -164,7 +175,10 @@ static unsigned int renesas_sdhi_clk_update(struct tmio_mmc_host *host,
}
}
- clk_set_rate(priv->clk, best_freq);
+ clk_set_rate(ref_clk, best_freq);
+
+ if (priv->clkh)
+ clk_set_rate(priv->clk, best_freq >> clkh_shift);
return clk_get_rate(priv->clk);
}
@@ -628,7 +642,7 @@ static int renesas_sdhi_select_tuning(struct tmio_mmc_host *host)
* is at least SH_MOBILE_SDHI_MIN_TAP_ROW probes long then use the
* center index as the tap, otherwise bail out.
*/
- bitmap_for_each_set_region(bitmap, rs, re, 0, taps_size) {
+ for_each_set_bitrange(rs, re, bitmap, taps_size) {
if (re - rs > tap_cnt) {
tap_end = re;
tap_start = rs;
@@ -904,11 +918,12 @@ int renesas_sdhi_probe(struct platform_device *pdev,
dma_priv = &priv->dma_priv;
priv->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(priv->clk)) {
- ret = PTR_ERR(priv->clk);
- dev_err(&pdev->dev, "cannot get clock: %d\n", ret);
- return ret;
- }
+ if (IS_ERR(priv->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(priv->clk), "cannot get clock");
+
+ priv->clkh = devm_clk_get_optional(&pdev->dev, "clkh");
+ if (IS_ERR(priv->clkh))
+ return dev_err_probe(&pdev->dev, PTR_ERR(priv->clkh), "cannot get clkh");
/*
* Some controllers provide a 2nd clock just to run the internal card
@@ -921,9 +936,9 @@ int renesas_sdhi_probe(struct platform_device *pdev,
* to the card detect circuit. That leaves us with if separate clocks
* are presented, we must treat them both as virtually 1 clock.
*/
- priv->clk_cd = devm_clk_get(&pdev->dev, "cd");
+ priv->clk_cd = devm_clk_get_optional(&pdev->dev, "cd");
if (IS_ERR(priv->clk_cd))
- priv->clk_cd = NULL;
+ return dev_err_probe(&pdev->dev, PTR_ERR(priv->clk_cd), "cannot get cd clock");
priv->pinctrl = devm_pinctrl_get(&pdev->dev);
if (!IS_ERR(priv->pinctrl)) {
@@ -947,6 +962,10 @@ int renesas_sdhi_probe(struct platform_device *pdev,
mmc_data->max_segs = of_data->max_segs;
dma_priv->dma_buswidth = of_data->dma_buswidth;
host->bus_shift = of_data->bus_shift;
+ /* Fallback for old DTs */
+ if (!priv->clkh && of_data->sdhi_flags & SDHI_FLAG_NEED_CLKH_FALLBACK)
+ priv->clkh = clk_get_parent(clk_get_parent(priv->clk));
+
}
host->write16_hook = renesas_sdhi_write16_hook;
@@ -1044,7 +1063,7 @@ int renesas_sdhi_probe(struct platform_device *pdev,
host->mmc->caps2 & (MMC_CAP2_HS200_1_8V_SDR |
MMC_CAP2_HS400_1_8V))) {
const struct renesas_sdhi_scc *taps = of_data->taps;
- bool use_4tap = priv->quirks && priv->quirks->hs400_4taps;
+ bool use_4tap = quirks && quirks->hs400_4taps;
bool hit = false;
for (i = 0; i < of_data->taps_num; i++) {
diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
index 7660f7ea74dd..9d2c600fd4ce 100644
--- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c
+++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
@@ -125,6 +125,22 @@ static const struct renesas_sdhi_of_data of_data_rcar_gen3 = {
/* DMAC can handle 32bit blk count but only 1 segment */
.max_blk_count = UINT_MAX / TMIO_MAX_BLK_SIZE,
.max_segs = 1,
+ .sdhi_flags = SDHI_FLAG_NEED_CLKH_FALLBACK,
+};
+
+static const struct renesas_sdhi_of_data of_data_rcar_gen3_no_fallback = {
+ .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_CLK_ACTUAL |
+ TMIO_MMC_HAVE_CBSY | TMIO_MMC_MIN_RCAR2,
+ .capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ |
+ MMC_CAP_CMD23 | MMC_CAP_WAIT_WHILE_BUSY,
+ .capabilities2 = MMC_CAP2_NO_WRITE_PROTECT | MMC_CAP2_MERGE_CAPABLE,
+ .bus_shift = 2,
+ .scc_offset = 0x1000,
+ .taps = rcar_gen3_scc_taps,
+ .taps_num = ARRAY_SIZE(rcar_gen3_scc_taps),
+ /* DMAC can handle 32bit blk count but only 1 segment */
+ .max_blk_count = UINT_MAX / TMIO_MAX_BLK_SIZE,
+ .max_segs = 1,
};
static const u8 r8a7796_es13_calib_table[2][SDHI_CALIB_TABLE_MAX] = {
@@ -214,6 +230,10 @@ static const struct renesas_sdhi_of_data_with_quirks of_r8a77965_compatible = {
.quirks = &sdhi_quirks_r8a77965,
};
+static const struct renesas_sdhi_of_data_with_quirks of_r8a77970_compatible = {
+ .of_data = &of_data_rcar_gen3_no_fallback,
+};
+
static const struct renesas_sdhi_of_data_with_quirks of_r8a77980_compatible = {
.of_data = &of_data_rcar_gen3,
.quirks = &sdhi_quirks_nohs400,
@@ -235,6 +255,7 @@ static const struct of_device_id renesas_sdhi_internal_dmac_of_match[] = {
{ .compatible = "renesas,sdhi-r8a7796", .data = &of_rcar_gen3_compatible, },
{ .compatible = "renesas,sdhi-r8a77961", .data = &of_r8a77961_compatible, },
{ .compatible = "renesas,sdhi-r8a77965", .data = &of_r8a77965_compatible, },
+ { .compatible = "renesas,sdhi-r8a77970", .data = &of_r8a77970_compatible, },
{ .compatible = "renesas,sdhi-r8a77980", .data = &of_r8a77980_compatible, },
{ .compatible = "renesas,sdhi-r8a77990", .data = &of_r8a77990_compatible, },
{ .compatible = "renesas,rcar-gen3-sdhi", .data = &of_rcar_gen3_compatible, },
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index a593b1fbd69e..0f3658b36513 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -524,12 +524,16 @@ static void esdhc_of_adma_workaround(struct sdhci_host *host, u32 intmask)
static int esdhc_of_enable_dma(struct sdhci_host *host)
{
+ int ret;
u32 value;
struct device *dev = mmc_dev(host->mmc);
if (of_device_is_compatible(dev->of_node, "fsl,ls1043a-esdhc") ||
- of_device_is_compatible(dev->of_node, "fsl,ls1046a-esdhc"))
- dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
+ of_device_is_compatible(dev->of_node, "fsl,ls1046a-esdhc")) {
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
+ if (ret)
+ return ret;
+ }
value = sdhci_readl(host, ESDHC_DMA_SYSCTL);
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index bcc595c70a9f..104dcd702870 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -405,6 +405,9 @@ static int sh_mmcif_dma_slave_config(struct sh_mmcif_host *host,
struct dma_slave_config cfg = { 0, };
res = platform_get_resource(host->pd, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
+
cfg.direction = direction;
if (direction == DMA_DEV_TO_MEM) {
diff --git a/drivers/most/most_usb.c b/drivers/most/most_usb.c
index acabb7715b42..73258b24fea7 100644
--- a/drivers/most/most_usb.c
+++ b/drivers/most/most_usb.c
@@ -831,7 +831,7 @@ static ssize_t value_show(struct device *dev, struct device_attribute *attr,
int err;
if (sysfs_streq(name, "arb_address"))
- return snprintf(buf, PAGE_SIZE, "%04x\n", dci_obj->reg_addr);
+ return sysfs_emit(buf, "%04x\n", dci_obj->reg_addr);
if (sysfs_streq(name, "arb_value"))
reg_addr = dci_obj->reg_addr;
@@ -843,7 +843,7 @@ static ssize_t value_show(struct device *dev, struct device_attribute *attr,
if (err < 0)
return err;
- return snprintf(buf, PAGE_SIZE, "%04x\n", val);
+ return sysfs_emit(buf, "%04x\n", val);
}
static ssize_t value_store(struct device *dev, struct device_attribute *attr,
diff --git a/drivers/mtd/devices/phram.c b/drivers/mtd/devices/phram.c
index 6ed6c51fac69..d503821a3e60 100644
--- a/drivers/mtd/devices/phram.c
+++ b/drivers/mtd/devices/phram.c
@@ -264,16 +264,20 @@ static int phram_setup(const char *val)
}
}
- if (erasesize)
- div_u64_rem(len, (uint32_t)erasesize, &rem);
-
if (len == 0 || erasesize == 0 || erasesize > len
- || erasesize > UINT_MAX || rem) {
+ || erasesize > UINT_MAX) {
parse_err("illegal erasesize or len\n");
ret = -EINVAL;
goto error;
}
+ div_u64_rem(len, (uint32_t)erasesize, &rem);
+ if (rem) {
+ parse_err("len is not multiple of erasesize\n");
+ ret = -EINVAL;
+ goto error;
+ }
+
ret = register_device(name, start, len, (uint32_t)erasesize);
if (ret)
goto error;
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index 4945caa88345..6a099bbcd8be 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -357,12 +357,6 @@ config MTD_INTEL_VR_NOR
Map driver for a NOR flash bank located on the Expansion Bus of the
Intel Vermilion Range chipset.
-config MTD_RBTX4939
- tristate "Map driver for RBTX4939 board"
- depends on TOSHIBA_RBTX4939 && MTD_CFI && MTD_COMPLEX_MAPPINGS
- help
- Map driver for NOR flash chips on RBTX4939 board.
-
config MTD_PLATRAM
tristate "Map driver for platform device RAM (mtd-ram)"
select MTD_RAM
diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile
index 11fea9c8d561..2240b100f66a 100644
--- a/drivers/mtd/maps/Makefile
+++ b/drivers/mtd/maps/Makefile
@@ -42,6 +42,5 @@ obj-$(CONFIG_MTD_SCB2_FLASH) += scb2_flash.o
obj-$(CONFIG_MTD_IXP4XX) += ixp4xx.o
obj-$(CONFIG_MTD_PLATRAM) += plat-ram.o
obj-$(CONFIG_MTD_INTEL_VR_NOR) += intel_vr_nor.o
-obj-$(CONFIG_MTD_RBTX4939) += rbtx4939-flash.o
obj-$(CONFIG_MTD_VMU) += vmu-flash.o
obj-$(CONFIG_MTD_LANTIQ) += lantiq-flash.o
diff --git a/drivers/mtd/maps/rbtx4939-flash.c b/drivers/mtd/maps/rbtx4939-flash.c
deleted file mode 100644
index 39c86c0b0ec1..000000000000
--- a/drivers/mtd/maps/rbtx4939-flash.c
+++ /dev/null
@@ -1,133 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * rbtx4939-flash (based on physmap.c)
- *
- * This is a simplified physmap driver with map_init callback function.
- *
- * Copyright (C) 2009 Atsushi Nemoto <anemo@mba.ocn.ne.jp>
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/device.h>
-#include <linux/platform_device.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/map.h>
-#include <linux/mtd/partitions.h>
-#include <asm/txx9/rbtx4939.h>
-
-struct rbtx4939_flash_info {
- struct mtd_info *mtd;
- struct map_info map;
-};
-
-static int rbtx4939_flash_remove(struct platform_device *dev)
-{
- struct rbtx4939_flash_info *info;
-
- info = platform_get_drvdata(dev);
- if (!info)
- return 0;
-
- if (info->mtd) {
- mtd_device_unregister(info->mtd);
- map_destroy(info->mtd);
- }
- return 0;
-}
-
-static const char * const rom_probe_types[] = {
- "cfi_probe", "jedec_probe", NULL };
-
-static int rbtx4939_flash_probe(struct platform_device *dev)
-{
- struct rbtx4939_flash_data *pdata;
- struct rbtx4939_flash_info *info;
- struct resource *res;
- const char * const *probe_type;
- int err = 0;
- unsigned long size;
-
- pdata = dev_get_platdata(&dev->dev);
- if (!pdata)
- return -ENODEV;
-
- res = platform_get_resource(dev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
- info = devm_kzalloc(&dev->dev, sizeof(struct rbtx4939_flash_info),
- GFP_KERNEL);
- if (!info)
- return -ENOMEM;
-
- platform_set_drvdata(dev, info);
-
- size = resource_size(res);
- pr_notice("rbtx4939 platform flash device: %pR\n", res);
-
- if (!devm_request_mem_region(&dev->dev, res->start, size,
- dev_name(&dev->dev)))
- return -EBUSY;
-
- info->map.name = dev_name(&dev->dev);
- info->map.phys = res->start;
- info->map.size = size;
- info->map.bankwidth = pdata->width;
-
- info->map.virt = devm_ioremap(&dev->dev, info->map.phys, size);
- if (!info->map.virt)
- return -EBUSY;
-
- if (pdata->map_init)
- (*pdata->map_init)(&info->map);
- else
- simple_map_init(&info->map);
-
- probe_type = rom_probe_types;
- for (; !info->mtd && *probe_type; probe_type++)
- info->mtd = do_map_probe(*probe_type, &info->map);
- if (!info->mtd) {
- dev_err(&dev->dev, "map_probe failed\n");
- err = -ENXIO;
- goto err_out;
- }
- info->mtd->dev.parent = &dev->dev;
- err = mtd_device_register(info->mtd, pdata->parts, pdata->nr_parts);
-
- if (err)
- goto err_out;
- return 0;
-
-err_out:
- rbtx4939_flash_remove(dev);
- return err;
-}
-
-#ifdef CONFIG_PM
-static void rbtx4939_flash_shutdown(struct platform_device *dev)
-{
- struct rbtx4939_flash_info *info = platform_get_drvdata(dev);
-
- if (mtd_suspend(info->mtd) == 0)
- mtd_resume(info->mtd);
-}
-#else
-#define rbtx4939_flash_shutdown NULL
-#endif
-
-static struct platform_driver rbtx4939_flash_driver = {
- .probe = rbtx4939_flash_probe,
- .remove = rbtx4939_flash_remove,
- .shutdown = rbtx4939_flash_shutdown,
- .driver = {
- .name = "rbtx4939-flash",
- },
-};
-
-module_platform_driver(rbtx4939_flash_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("RBTX4939 MTD map driver");
-MODULE_ALIAS("platform:rbtx4939-flash");
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index 4eaba6f4ec68..243f28a3206b 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -46,23 +46,19 @@ static blk_status_t do_blktrans_request(struct mtd_blktrans_ops *tr,
struct mtd_blktrans_dev *dev,
struct request *req)
{
+ struct req_iterator iter;
+ struct bio_vec bvec;
unsigned long block, nsect;
char *buf;
block = blk_rq_pos(req) << 9 >> tr->blkshift;
nsect = blk_rq_cur_bytes(req) >> tr->blkshift;
- if (req_op(req) == REQ_OP_FLUSH) {
+ switch (req_op(req)) {
+ case REQ_OP_FLUSH:
if (tr->flush(dev))
return BLK_STS_IOERR;
return BLK_STS_OK;
- }
-
- if (blk_rq_pos(req) + blk_rq_cur_sectors(req) >
- get_capacity(req->rq_disk))
- return BLK_STS_IOERR;
-
- switch (req_op(req)) {
case REQ_OP_DISCARD:
if (tr->discard(dev, block, nsect))
return BLK_STS_IOERR;
@@ -76,13 +72,17 @@ static blk_status_t do_blktrans_request(struct mtd_blktrans_ops *tr,
}
}
kunmap(bio_page(req->bio));
- rq_flush_dcache_pages(req);
+
+ rq_for_each_segment(bvec, req, iter)
+ flush_dcache_page(bvec.bv_page);
return BLK_STS_OK;
case REQ_OP_WRITE:
if (!tr->writesect)
return BLK_STS_IOERR;
- rq_flush_dcache_pages(req);
+ rq_for_each_segment(bvec, req, iter)
+ flush_dcache_page(bvec.bv_page);
+
buf = kmap(bio_page(req->bio)) + bio_offset(req->bio);
for (; nsect > 0; nsect--, block++, buf += tr->blksize) {
if (tr->writesect(dev, block, buf)) {
@@ -346,7 +346,7 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
gd->minors = 1 << tr->part_bits;
gd->fops = &mtd_block_ops;
- if (tr->part_bits)
+ if (tr->part_bits) {
if (new->devnum < 26)
snprintf(gd->disk_name, sizeof(gd->disk_name),
"%s%c", tr->name, 'a' + new->devnum);
@@ -355,9 +355,11 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
"%s%c%c", tr->name,
'a' - 1 + new->devnum / 26,
'a' + new->devnum % 26);
- else
+ } else {
snprintf(gd->disk_name, sizeof(gd->disk_name),
"%s%d", tr->name, new->devnum);
+ gd->flags |= GENHD_FL_NO_PART;
+ }
set_capacity(gd, ((u64)new->size * tr->blksize) >> 9);
diff --git a/drivers/mtd/nand/raw/Kconfig b/drivers/mtd/nand/raw/Kconfig
index 32b8738baa24..d986ab4e4c35 100644
--- a/drivers/mtd/nand/raw/Kconfig
+++ b/drivers/mtd/nand/raw/Kconfig
@@ -42,7 +42,8 @@ config MTD_NAND_OMAP2
tristate "OMAP2, OMAP3, OMAP4 and Keystone NAND controller"
depends on ARCH_OMAP2PLUS || ARCH_KEYSTONE || ARCH_K3 || COMPILE_TEST
depends on HAS_IOMEM
- select OMAP_GPMC if ARCH_K3
+ select MEMORY
+ select OMAP_GPMC
help
Support for NAND flash on Texas Instruments OMAP2, OMAP3, OMAP4
and Keystone platforms.
@@ -309,7 +310,7 @@ config MTD_NAND_DAVINCI
config MTD_NAND_TXX9NDFMC
tristate "TXx9 NAND controller"
- depends on SOC_TX4938 || SOC_TX4939 || COMPILE_TEST
+ depends on SOC_TX4938 || COMPILE_TEST
depends on HAS_IOMEM
help
This enables the NAND flash controller on the TXx9 SoCs.
diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
index f75929783b94..aee78f5f4f15 100644
--- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c
+++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c
@@ -2106,7 +2106,7 @@ static int brcmnand_read_by_pio(struct mtd_info *mtd, struct nand_chip *chip,
mtd->oobsize / trans,
host->hwcfg.sector_size_1k);
- if (!ret) {
+ if (ret != -EBADMSG) {
*err_addr = brcmnand_get_uncorrecc_addr(ctrl);
if (*err_addr)
diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
index 1b64c5a5140d..ded4df473928 100644
--- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c
@@ -2285,7 +2285,7 @@ static int gpmi_nfc_exec_op(struct nand_chip *chip,
this->hw.must_apply_timings = false;
ret = gpmi_nfc_apply_timings(this);
if (ret)
- return ret;
+ goto out_pm;
}
dev_dbg(this->dev, "%s: %d instructions\n", __func__, op->ninstrs);
@@ -2414,6 +2414,7 @@ unmap:
this->bch = false;
+out_pm:
pm_runtime_mark_last_busy(this->dev);
pm_runtime_put_autosuspend(this->dev);
diff --git a/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c b/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c
index efe0ffe4f1ab..9054559e52dd 100644
--- a/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c
+++ b/drivers/mtd/nand/raw/ingenic/ingenic_ecc.c
@@ -68,9 +68,14 @@ static struct ingenic_ecc *ingenic_ecc_get(struct device_node *np)
struct ingenic_ecc *ecc;
pdev = of_find_device_by_node(np);
- if (!pdev || !platform_get_drvdata(pdev))
+ if (!pdev)
return ERR_PTR(-EPROBE_DEFER);
+ if (!platform_get_drvdata(pdev)) {
+ put_device(&pdev->dev);
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
ecc = platform_get_drvdata(pdev);
clk_prepare_enable(ecc->clk);
diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
index 04e6f7b26706..1a77542c6d67 100644
--- a/drivers/mtd/nand/raw/qcom_nandc.c
+++ b/drivers/mtd/nand/raw/qcom_nandc.c
@@ -2,10 +2,10 @@
/*
* Copyright (c) 2016, The Linux Foundation. All rights reserved.
*/
-
#include <linux/clk.h>
#include <linux/slab.h>
#include <linux/bitops.h>
+#include <linux/dma/qcom_adm.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/module.h>
@@ -952,6 +952,7 @@ static int prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read,
struct dma_async_tx_descriptor *dma_desc;
struct scatterlist *sgl;
struct dma_slave_config slave_conf;
+ struct qcom_adm_peripheral_config periph_conf = {};
enum dma_transfer_direction dir_eng;
int ret;
@@ -983,11 +984,19 @@ static int prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read,
if (read) {
slave_conf.src_maxburst = 16;
slave_conf.src_addr = nandc->base_dma + reg_off;
- slave_conf.slave_id = nandc->data_crci;
+ if (nandc->data_crci) {
+ periph_conf.crci = nandc->data_crci;
+ slave_conf.peripheral_config = &periph_conf;
+ slave_conf.peripheral_size = sizeof(periph_conf);
+ }
} else {
slave_conf.dst_maxburst = 16;
slave_conf.dst_addr = nandc->base_dma + reg_off;
- slave_conf.slave_id = nandc->cmd_crci;
+ if (nandc->cmd_crci) {
+ periph_conf.crci = nandc->cmd_crci;
+ slave_conf.peripheral_config = &periph_conf;
+ slave_conf.peripheral_size = sizeof(periph_conf);
+ }
}
ret = dmaengine_slave_config(nandc->chan, &slave_conf);
@@ -3063,10 +3072,6 @@ static int qcom_nandc_probe(struct platform_device *pdev)
if (dma_mapping_error(dev, nandc->base_dma))
return -ENXIO;
- ret = qcom_nandc_alloc(nandc);
- if (ret)
- goto err_nandc_alloc;
-
ret = clk_prepare_enable(nandc->core_clk);
if (ret)
goto err_core_clk;
@@ -3075,6 +3080,10 @@ static int qcom_nandc_probe(struct platform_device *pdev)
if (ret)
goto err_aon_clk;
+ ret = qcom_nandc_alloc(nandc);
+ if (ret)
+ goto err_nandc_alloc;
+
ret = qcom_nandc_setup(nandc);
if (ret)
goto err_setup;
@@ -3086,15 +3095,14 @@ static int qcom_nandc_probe(struct platform_device *pdev)
return 0;
err_setup:
+ qcom_nandc_unalloc(nandc);
+err_nandc_alloc:
clk_disable_unprepare(nandc->aon_clk);
err_aon_clk:
clk_disable_unprepare(nandc->core_clk);
err_core_clk:
- qcom_nandc_unalloc(nandc);
-err_nandc_alloc:
dma_unmap_resource(dev, res->start, resource_size(res),
DMA_BIDIRECTIONAL, 0);
-
return ret;
}
diff --git a/drivers/mtd/parsers/qcomsmempart.c b/drivers/mtd/parsers/qcomsmempart.c
index 06a818cd2433..4311b89d8df0 100644
--- a/drivers/mtd/parsers/qcomsmempart.c
+++ b/drivers/mtd/parsers/qcomsmempart.c
@@ -58,11 +58,11 @@ static int parse_qcomsmem_part(struct mtd_info *mtd,
const struct mtd_partition **pparts,
struct mtd_part_parser_data *data)
{
+ size_t len = SMEM_FLASH_PTABLE_HDR_LEN;
+ int ret, i, j, tmpparts, numparts = 0;
struct smem_flash_pentry *pentry;
struct smem_flash_ptable *ptable;
- size_t len = SMEM_FLASH_PTABLE_HDR_LEN;
struct mtd_partition *parts;
- int ret, i, numparts;
char *name, *c;
if (IS_ENABLED(CONFIG_MTD_SPI_NOR_USE_4K_SECTORS)
@@ -75,7 +75,8 @@ static int parse_qcomsmem_part(struct mtd_info *mtd,
pr_debug("Parsing partition table info from SMEM\n");
ptable = qcom_smem_get(SMEM_APPS, SMEM_AARM_PARTITION_TABLE, &len);
if (IS_ERR(ptable)) {
- pr_err("Error reading partition table header\n");
+ if (PTR_ERR(ptable) != -EPROBE_DEFER)
+ pr_err("Error reading partition table header\n");
return PTR_ERR(ptable);
}
@@ -87,8 +88,8 @@ static int parse_qcomsmem_part(struct mtd_info *mtd,
}
/* Ensure that # of partitions is less than the max we have allocated */
- numparts = le32_to_cpu(ptable->numparts);
- if (numparts > SMEM_FLASH_PTABLE_MAX_PARTS_V4) {
+ tmpparts = le32_to_cpu(ptable->numparts);
+ if (tmpparts > SMEM_FLASH_PTABLE_MAX_PARTS_V4) {
pr_err("Partition numbers exceed the max limit\n");
return -EINVAL;
}
@@ -116,11 +117,17 @@ static int parse_qcomsmem_part(struct mtd_info *mtd,
return PTR_ERR(ptable);
}
+ for (i = 0; i < tmpparts; i++) {
+ pentry = &ptable->pentry[i];
+ if (pentry->name[0] != '\0')
+ numparts++;
+ }
+
parts = kcalloc(numparts, sizeof(*parts), GFP_KERNEL);
if (!parts)
return -ENOMEM;
- for (i = 0; i < numparts; i++) {
+ for (i = 0, j = 0; i < tmpparts; i++) {
pentry = &ptable->pentry[i];
if (pentry->name[0] == '\0')
continue;
@@ -135,24 +142,25 @@ static int parse_qcomsmem_part(struct mtd_info *mtd,
for (c = name; *c != '\0'; c++)
*c = tolower(*c);
- parts[i].name = name;
- parts[i].offset = le32_to_cpu(pentry->offset) * mtd->erasesize;
- parts[i].mask_flags = pentry->attr;
- parts[i].size = le32_to_cpu(pentry->length) * mtd->erasesize;
+ parts[j].name = name;
+ parts[j].offset = le32_to_cpu(pentry->offset) * mtd->erasesize;
+ parts[j].mask_flags = pentry->attr;
+ parts[j].size = le32_to_cpu(pentry->length) * mtd->erasesize;
pr_debug("%d: %s offs=0x%08x size=0x%08x attr:0x%08x\n",
i, pentry->name, le32_to_cpu(pentry->offset),
le32_to_cpu(pentry->length), pentry->attr);
+ j++;
}
pr_debug("SMEM partition table found: ver: %d len: %d\n",
- le32_to_cpu(ptable->version), numparts);
+ le32_to_cpu(ptable->version), tmpparts);
*pparts = parts;
return numparts;
out_free_parts:
- while (--i >= 0)
- kfree(parts[i].name);
+ while (--j >= 0)
+ kfree(parts[j].name);
kfree(parts);
*pparts = NULL;
@@ -166,6 +174,8 @@ static void parse_qcomsmem_cleanup(const struct mtd_partition *pparts,
for (i = 0; i < nr_parts; i++)
kfree(pparts[i].name);
+
+ kfree(pparts);
}
static const struct of_device_id qcomsmem_of_match_table[] = {
diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c
index 062e6c2c45f5..a78fdf3b30f7 100644
--- a/drivers/mtd/ubi/block.c
+++ b/drivers/mtd/ubi/block.c
@@ -294,6 +294,8 @@ static void ubiblock_do_work(struct work_struct *work)
int ret;
struct ubiblock_pdu *pdu = container_of(work, struct ubiblock_pdu, work);
struct request *req = blk_mq_rq_from_pdu(pdu);
+ struct req_iterator iter;
+ struct bio_vec bvec;
blk_mq_start_request(req);
@@ -305,7 +307,9 @@ static void ubiblock_do_work(struct work_struct *work)
blk_rq_map_sg(req->q, req, pdu->usgl.sg);
ret = ubiblock_read(pdu);
- rq_flush_dcache_pages(req);
+
+ rq_for_each_segment(bvec, req, iter)
+ flush_dcache_page(bvec.bv_page);
blk_mq_end_request(req, errno_to_blk_status(ret));
}
@@ -426,6 +430,7 @@ int ubiblock_create(struct ubi_volume_info *vi)
ret = -ENODEV;
goto out_cleanup_disk;
}
+ gd->flags |= GENHD_FL_NO_PART;
gd->private_data = dev;
sprintf(gd->disk_name, "ubiblock%d_%d", dev->ubi_num, dev->vol_id);
set_capacity(gd, disk_capacity);
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 6006c2e8fa2b..a86b1f71762e 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -225,7 +225,7 @@ static inline int __check_agg_selection_timer(struct port *port)
if (bond == NULL)
return 0;
- return BOND_AD_INFO(bond).agg_select_timer ? 1 : 0;
+ return atomic_read(&BOND_AD_INFO(bond).agg_select_timer) ? 1 : 0;
}
/**
@@ -1021,8 +1021,8 @@ static void ad_mux_machine(struct port *port, bool *update_slave_arr)
if (port->aggregator &&
port->aggregator->is_active &&
!__port_is_enabled(port)) {
-
__enable_port(port);
+ *update_slave_arr = true;
}
}
break;
@@ -1779,6 +1779,7 @@ static void ad_agg_selection_logic(struct aggregator *agg,
port = port->next_port_in_aggregator) {
__enable_port(port);
}
+ *update_slave_arr = true;
}
}
@@ -1994,7 +1995,7 @@ static void ad_marker_response_received(struct bond_marker *marker,
*/
void bond_3ad_initiate_agg_selection(struct bonding *bond, int timeout)
{
- BOND_AD_INFO(bond).agg_select_timer = timeout;
+ atomic_set(&BOND_AD_INFO(bond).agg_select_timer, timeout);
}
/**
@@ -2278,6 +2279,28 @@ void bond_3ad_update_ad_actor_settings(struct bonding *bond)
}
/**
+ * bond_agg_timer_advance - advance agg_select_timer
+ * @bond: bonding structure
+ *
+ * Return true when agg_select_timer reaches 0.
+ */
+static bool bond_agg_timer_advance(struct bonding *bond)
+{
+ int val, nval;
+
+ while (1) {
+ val = atomic_read(&BOND_AD_INFO(bond).agg_select_timer);
+ if (!val)
+ return false;
+ nval = val - 1;
+ if (atomic_cmpxchg(&BOND_AD_INFO(bond).agg_select_timer,
+ val, nval) == val)
+ break;
+ }
+ return nval == 0;
+}
+
+/**
* bond_3ad_state_machine_handler - handle state machines timeout
* @work: work context to fetch bonding struct to work on from
*
@@ -2312,9 +2335,7 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
if (!bond_has_slaves(bond))
goto re_arm;
- /* check if agg_select_timer timer after initialize is timed out */
- if (BOND_AD_INFO(bond).agg_select_timer &&
- !(--BOND_AD_INFO(bond).agg_select_timer)) {
+ if (bond_agg_timer_advance(bond)) {
slave = bond_first_slave_rcu(bond);
port = slave ? &(SLAVE_AD_INFO(slave)->port) : NULL;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 07fc603c2fa7..aebeb46e6fa6 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -2379,10 +2379,9 @@ static int __bond_release_one(struct net_device *bond_dev,
bond_select_active_slave(bond);
}
- if (!bond_has_slaves(bond)) {
- bond_set_carrier(bond);
+ bond_set_carrier(bond);
+ if (!bond_has_slaves(bond))
eth_hw_addr_random(bond_dev);
- }
unblock_netpoll_tx();
synchronize_rcu();
@@ -3874,8 +3873,8 @@ u32 bond_xmit_hash(struct bonding *bond, struct sk_buff *skb)
skb->l4_hash)
return skb->hash;
- return __bond_xmit_hash(bond, skb, skb->head, skb->protocol,
- skb->mac_header, skb->network_header,
+ return __bond_xmit_hash(bond, skb, skb->data, skb->protocol,
+ skb_mac_offset(skb), skb_network_offset(skb),
skb_headlen(skb));
}
@@ -4133,9 +4132,7 @@ static int bond_eth_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cm
fallthrough;
case SIOCGHWTSTAMP:
- rcu_read_lock();
real_dev = bond_option_active_slave_get_rcu(bond);
- rcu_read_unlock();
if (!real_dev)
return -EOPNOTSUPP;
@@ -4884,25 +4881,39 @@ static netdev_tx_t bond_xmit_broadcast(struct sk_buff *skb,
struct bonding *bond = netdev_priv(bond_dev);
struct slave *slave = NULL;
struct list_head *iter;
+ bool xmit_suc = false;
+ bool skb_used = false;
bond_for_each_slave_rcu(bond, slave, iter) {
- if (bond_is_last_slave(bond, slave))
- break;
- if (bond_slave_is_up(slave) && slave->link == BOND_LINK_UP) {
- struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
+ struct sk_buff *skb2;
+
+ if (!(bond_slave_is_up(slave) && slave->link == BOND_LINK_UP))
+ continue;
+ if (bond_is_last_slave(bond, slave)) {
+ skb2 = skb;
+ skb_used = true;
+ } else {
+ skb2 = skb_clone(skb, GFP_ATOMIC);
if (!skb2) {
net_err_ratelimited("%s: Error: %s: skb_clone() failed\n",
bond_dev->name, __func__);
continue;
}
- bond_dev_queue_xmit(bond, skb2, slave->dev);
}
+
+ if (bond_dev_queue_xmit(bond, skb2, slave->dev) == NETDEV_TX_OK)
+ xmit_suc = true;
}
- if (slave && bond_slave_is_up(slave) && slave->link == BOND_LINK_UP)
- return bond_dev_queue_xmit(bond, skb, slave->dev);
- return bond_tx_drop(bond_dev, skb);
+ if (!skb_used)
+ dev_kfree_skb_any(skb);
+
+ if (xmit_suc)
+ return NETDEV_TX_OK;
+
+ atomic_long_inc(&bond_dev->tx_dropped);
+ return NET_XMIT_DROP;
}
/*------------------------- Device initialization ---------------------------*/
@@ -5368,9 +5379,7 @@ static int bond_ethtool_get_ts_info(struct net_device *bond_dev,
struct net_device *real_dev;
struct phy_device *phydev;
- rcu_read_lock();
real_dev = bond_option_active_slave_get_rcu(bond);
- rcu_read_unlock();
if (real_dev) {
ops = real_dev->ethtool_ops;
phydev = real_dev->phydev;
diff --git a/drivers/net/bonding/bond_procfs.c b/drivers/net/bonding/bond_procfs.c
index 2ec11af5f0cc..46b150e6289e 100644
--- a/drivers/net/bonding/bond_procfs.c
+++ b/drivers/net/bonding/bond_procfs.c
@@ -11,7 +11,7 @@
static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos)
__acquires(RCU)
{
- struct bonding *bond = PDE_DATA(file_inode(seq->file));
+ struct bonding *bond = pde_data(file_inode(seq->file));
struct list_head *iter;
struct slave *slave;
loff_t off = 0;
@@ -30,7 +30,7 @@ static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos)
static void *bond_info_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
- struct bonding *bond = PDE_DATA(file_inode(seq->file));
+ struct bonding *bond = pde_data(file_inode(seq->file));
struct list_head *iter;
struct slave *slave;
bool found = false;
@@ -57,7 +57,7 @@ static void bond_info_seq_stop(struct seq_file *seq, void *v)
static void bond_info_show_master(struct seq_file *seq)
{
- struct bonding *bond = PDE_DATA(file_inode(seq->file));
+ struct bonding *bond = pde_data(file_inode(seq->file));
const struct bond_opt_value *optval;
struct slave *curr, *primary;
int i;
@@ -175,7 +175,7 @@ static void bond_info_show_master(struct seq_file *seq)
static void bond_info_show_slave(struct seq_file *seq,
const struct slave *slave)
{
- struct bonding *bond = PDE_DATA(file_inode(seq->file));
+ struct bonding *bond = pde_data(file_inode(seq->file));
seq_printf(seq, "\nSlave Interface: %s\n", slave->dev->name);
seq_printf(seq, "MII Status: %s\n", bond_slave_link_status(slave->link));
diff --git a/drivers/net/caif/caif_virtio.c b/drivers/net/caif/caif_virtio.c
index 91230894692d..444ef6a342f6 100644
--- a/drivers/net/caif/caif_virtio.c
+++ b/drivers/net/caif/caif_virtio.c
@@ -754,7 +754,7 @@ static void cfv_remove(struct virtio_device *vdev)
debugfs_remove_recursive(cfv->debugfs);
vringh_kiov_cleanup(&cfv->ctx.riov);
- vdev->config->reset(vdev);
+ virtio_reset_device(vdev);
vdev->vringh_config->del_vrhs(cfv->vdev);
cfv->vr_rx = NULL;
vdev->config->del_vqs(cfv->vdev);
diff --git a/drivers/net/can/flexcan/flexcan-core.c b/drivers/net/can/flexcan/flexcan-core.c
index 0bff1884d5cc..74d7fcbfd065 100644
--- a/drivers/net/can/flexcan/flexcan-core.c
+++ b/drivers/net/can/flexcan/flexcan-core.c
@@ -296,6 +296,7 @@ static_assert(sizeof(struct flexcan_regs) == 0x4 * 18 + 0xfb8);
static const struct flexcan_devtype_data fsl_mcf5441x_devtype_data = {
.quirks = FLEXCAN_QUIRK_BROKEN_PERR_STATE |
FLEXCAN_QUIRK_NR_IRQ_3 | FLEXCAN_QUIRK_NR_MB_16 |
+ FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX |
FLEXCAN_QUIRK_SUPPPORT_RX_FIFO,
};
diff --git a/drivers/net/can/flexcan/flexcan.h b/drivers/net/can/flexcan/flexcan.h
index fccdff8b1f0f..23fc09a7e10f 100644
--- a/drivers/net/can/flexcan/flexcan.h
+++ b/drivers/net/can/flexcan/flexcan.h
@@ -21,7 +21,7 @@
* Below is some version info we got:
* SOC Version IP-Version Glitch- [TR]WRN_INT IRQ Err Memory err RTR rece- FD Mode MB
* Filter? connected? Passive detection ption in MB Supported?
- * MCF5441X FlexCAN2 ? no yes no no yes no 16
+ * MCF5441X FlexCAN2 ? no yes no no no no 16
* MX25 FlexCAN2 03.00.00.00 no no no no no no 64
* MX28 FlexCAN2 03.00.04.00 yes yes no no no no 64
* MX35 FlexCAN2 03.00.00.00 no no no no no no 64
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index 5b47cd867783..1a4b56f6fa8c 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -336,6 +336,9 @@ m_can_fifo_read(struct m_can_classdev *cdev,
u32 addr_offset = cdev->mcfg[MRAM_RXF0].off + fgi * RXF0_ELEMENT_SIZE +
offset;
+ if (val_count == 0)
+ return 0;
+
return cdev->ops->read_fifo(cdev, addr_offset, val, val_count);
}
@@ -346,6 +349,9 @@ m_can_fifo_write(struct m_can_classdev *cdev,
u32 addr_offset = cdev->mcfg[MRAM_TXB].off + fpi * TXB_ELEMENT_SIZE +
offset;
+ if (val_count == 0)
+ return 0;
+
return cdev->ops->write_fifo(cdev, addr_offset, val, val_count);
}
diff --git a/drivers/net/can/m_can/tcan4x5x-regmap.c b/drivers/net/can/m_can/tcan4x5x-regmap.c
index ca80dbaf7a3f..26e212b8ca7a 100644
--- a/drivers/net/can/m_can/tcan4x5x-regmap.c
+++ b/drivers/net/can/m_can/tcan4x5x-regmap.c
@@ -12,7 +12,7 @@
#define TCAN4X5X_SPI_INSTRUCTION_WRITE (0x61 << 24)
#define TCAN4X5X_SPI_INSTRUCTION_READ (0x41 << 24)
-#define TCAN4X5X_MAX_REGISTER 0x8ffc
+#define TCAN4X5X_MAX_REGISTER 0x87fc
static int tcan4x5x_regmap_gather_write(void *context,
const void *reg, size_t reg_len,
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
index d4c7ce998a34..27783fbf011f 100644
--- a/drivers/net/can/slcan.c
+++ b/drivers/net/can/slcan.c
@@ -673,8 +673,8 @@ static void slcan_hangup(struct tty_struct *tty)
}
/* Perform I/O control on an active SLCAN channel. */
-static int slcan_ioctl(struct tty_struct *tty, struct file *file,
- unsigned int cmd, unsigned long arg)
+static int slcan_ioctl(struct tty_struct *tty, unsigned int cmd,
+ unsigned long arg)
{
struct slcan *sl = (struct slcan *) tty->disc_data;
unsigned int tmp;
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
index 7b1457a6e327..0029d279616f 100644
--- a/drivers/net/dsa/Kconfig
+++ b/drivers/net/dsa/Kconfig
@@ -36,6 +36,7 @@ config NET_DSA_LANTIQ_GSWIP
config NET_DSA_MT7530
tristate "MediaTek MT753x and MT7621 Ethernet switch support"
select NET_DSA_TAG_MTK
+ select MEDIATEK_GE_PHY
help
This enables support for the MediaTek MT7530, MT7531, and MT7621
Ethernet switch chips.
@@ -81,6 +82,7 @@ config NET_DSA_REALTEK_SMI
config NET_DSA_SMSC_LAN9303
tristate
+ depends on VLAN_8021Q || VLAN_8021Q=n
select NET_DSA_TAG_LAN9303
select REGMAP
help
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 33499fcd8848..6afb5db8244c 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -621,7 +621,7 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds)
get_device(&priv->master_mii_bus->dev);
priv->master_mii_dn = dn;
- priv->slave_mii_bus = devm_mdiobus_alloc(ds->dev);
+ priv->slave_mii_bus = mdiobus_alloc();
if (!priv->slave_mii_bus) {
of_node_put(dn);
return -ENOMEM;
@@ -681,8 +681,10 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds)
}
err = mdiobus_register(priv->slave_mii_bus);
- if (err && dn)
+ if (err && dn) {
+ mdiobus_free(priv->slave_mii_bus);
of_node_put(dn);
+ }
return err;
}
@@ -690,6 +692,7 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds)
static void bcm_sf2_mdio_unregister(struct bcm_sf2_priv *priv)
{
mdiobus_unregister(priv->slave_mii_bus);
+ mdiobus_free(priv->slave_mii_bus);
of_node_put(priv->master_mii_dn);
}
diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c
index d55784d19fa4..3969d89fa4db 100644
--- a/drivers/net/dsa/lan9303-core.c
+++ b/drivers/net/dsa/lan9303-core.c
@@ -10,6 +10,7 @@
#include <linux/mii.h>
#include <linux/phy.h>
#include <linux/if_bridge.h>
+#include <linux/if_vlan.h>
#include <linux/etherdevice.h>
#include "lan9303.h"
@@ -1083,21 +1084,27 @@ static void lan9303_adjust_link(struct dsa_switch *ds, int port,
static int lan9303_port_enable(struct dsa_switch *ds, int port,
struct phy_device *phy)
{
+ struct dsa_port *dp = dsa_to_port(ds, port);
struct lan9303 *chip = ds->priv;
- if (!dsa_is_user_port(ds, port))
+ if (!dsa_port_is_user(dp))
return 0;
+ vlan_vid_add(dp->cpu_dp->master, htons(ETH_P_8021Q), port);
+
return lan9303_enable_processing_port(chip, port);
}
static void lan9303_port_disable(struct dsa_switch *ds, int port)
{
+ struct dsa_port *dp = dsa_to_port(ds, port);
struct lan9303 *chip = ds->priv;
- if (!dsa_is_user_port(ds, port))
+ if (!dsa_port_is_user(dp))
return;
+ vlan_vid_del(dp->cpu_dp->master, htons(ETH_P_8021Q), port);
+
lan9303_disable_processing_port(chip, port);
lan9303_phy_write(ds, chip->phy_addr_base + port, MII_BMCR, BMCR_PDOWN);
}
@@ -1310,7 +1317,7 @@ static int lan9303_probe_reset_gpio(struct lan9303 *chip,
struct device_node *np)
{
chip->reset_gpio = devm_gpiod_get_optional(chip->dev, "reset",
- GPIOD_OUT_LOW);
+ GPIOD_OUT_HIGH);
if (IS_ERR(chip->reset_gpio))
return PTR_ERR(chip->reset_gpio);
diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c
index 46ed953e787e..8a7a8093a156 100644
--- a/drivers/net/dsa/lantiq_gswip.c
+++ b/drivers/net/dsa/lantiq_gswip.c
@@ -498,8 +498,9 @@ static int gswip_mdio_rd(struct mii_bus *bus, int addr, int reg)
static int gswip_mdio(struct gswip_priv *priv, struct device_node *mdio_np)
{
struct dsa_switch *ds = priv->ds;
+ int err;
- ds->slave_mii_bus = devm_mdiobus_alloc(priv->dev);
+ ds->slave_mii_bus = mdiobus_alloc();
if (!ds->slave_mii_bus)
return -ENOMEM;
@@ -512,7 +513,11 @@ static int gswip_mdio(struct gswip_priv *priv, struct device_node *mdio_np)
ds->slave_mii_bus->parent = priv->dev;
ds->slave_mii_bus->phy_mask = ~ds->phys_mii_mask;
- return of_mdiobus_register(ds->slave_mii_bus, mdio_np);
+ err = of_mdiobus_register(ds->slave_mii_bus, mdio_np);
+ if (err)
+ mdiobus_free(ds->slave_mii_bus);
+
+ return err;
}
static int gswip_pce_table_entry_read(struct gswip_priv *priv,
@@ -2145,8 +2150,10 @@ disable_switch:
gswip_mdio_mask(priv, GSWIP_MDIO_GLOB_ENABLE, 0, GSWIP_MDIO_GLOB);
dsa_unregister_switch(priv->ds);
mdio_bus:
- if (mdio_np)
+ if (mdio_np) {
mdiobus_unregister(priv->ds->slave_mii_bus);
+ mdiobus_free(priv->ds->slave_mii_bus);
+ }
put_mdio_node:
of_node_put(mdio_np);
for (i = 0; i < priv->num_gphy_fw; i++)
@@ -2170,6 +2177,7 @@ static int gswip_remove(struct platform_device *pdev)
if (priv->ds->slave_mii_bus) {
mdiobus_unregister(priv->ds->slave_mii_bus);
of_node_put(priv->ds->slave_mii_bus->dev.of_node);
+ mdiobus_free(priv->ds->slave_mii_bus);
}
for (i = 0; i < priv->num_gphy_fw; i++)
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index b82512e5b33b..ff3c267d0f26 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -2074,7 +2074,7 @@ mt7530_setup_mdio(struct mt7530_priv *priv)
if (priv->irq)
mt7530_setup_mdio_irq(priv);
- ret = mdiobus_register(bus);
+ ret = devm_mdiobus_register(dev, bus);
if (ret) {
dev_err(dev, "failed to register MDIO bus: %d\n", ret);
if (priv->irq)
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 58ca684d73f7..ab1676553714 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -2284,6 +2284,13 @@ static int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port,
if (!mv88e6xxx_max_vid(chip))
return -EOPNOTSUPP;
+ /* The ATU removal procedure needs the FID to be mapped in the VTU,
+ * but FDB deletion runs concurrently with VLAN deletion. Flush the DSA
+ * switchdev workqueue to ensure that all FDB entries are deleted
+ * before we remove the VLAN.
+ */
+ dsa_flush_workqueue();
+
mv88e6xxx_reg_lock(chip);
err = mv88e6xxx_port_get_pvid(chip, port, &pvid);
@@ -3399,7 +3406,7 @@ static int mv88e6xxx_mdio_register(struct mv88e6xxx_chip *chip,
return err;
}
- bus = devm_mdiobus_alloc_size(chip->dev, sizeof(*mdio_bus));
+ bus = mdiobus_alloc_size(sizeof(*mdio_bus));
if (!bus)
return -ENOMEM;
@@ -3424,14 +3431,14 @@ static int mv88e6xxx_mdio_register(struct mv88e6xxx_chip *chip,
if (!external) {
err = mv88e6xxx_g2_irq_mdio_setup(chip, bus);
if (err)
- return err;
+ goto out;
}
err = of_mdiobus_register(bus, np);
if (err) {
dev_err(chip->dev, "Cannot register MDIO bus (%d)\n", err);
mv88e6xxx_g2_irq_mdio_free(chip, bus);
- return err;
+ goto out;
}
if (external)
@@ -3440,21 +3447,26 @@ static int mv88e6xxx_mdio_register(struct mv88e6xxx_chip *chip,
list_add(&mdio_bus->list, &chip->mdios);
return 0;
+
+out:
+ mdiobus_free(bus);
+ return err;
}
static void mv88e6xxx_mdios_unregister(struct mv88e6xxx_chip *chip)
{
- struct mv88e6xxx_mdio_bus *mdio_bus;
+ struct mv88e6xxx_mdio_bus *mdio_bus, *p;
struct mii_bus *bus;
- list_for_each_entry(mdio_bus, &chip->mdios, list) {
+ list_for_each_entry_safe(mdio_bus, p, &chip->mdios, list) {
bus = mdio_bus->bus;
if (!mdio_bus->external)
mv88e6xxx_g2_irq_mdio_free(chip, bus);
mdiobus_unregister(bus);
+ mdiobus_free(bus);
}
}
diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c
index bf8d38239e7e..33f0ceae381d 100644
--- a/drivers/net/dsa/ocelot/felix_vsc9959.c
+++ b/drivers/net/dsa/ocelot/felix_vsc9959.c
@@ -1061,7 +1061,7 @@ static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot)
return PTR_ERR(hw);
}
- bus = devm_mdiobus_alloc_size(dev, sizeof(*mdio_priv));
+ bus = mdiobus_alloc_size(sizeof(*mdio_priv));
if (!bus)
return -ENOMEM;
@@ -1081,6 +1081,7 @@ static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot)
rc = mdiobus_register(bus);
if (rc < 0) {
dev_err(dev, "failed to register MDIO bus\n");
+ mdiobus_free(bus);
return rc;
}
@@ -1132,6 +1133,7 @@ static void vsc9959_mdio_bus_free(struct ocelot *ocelot)
lynx_pcs_destroy(phylink_pcs);
}
mdiobus_unregister(felix->imdio);
+ mdiobus_free(felix->imdio);
}
static void vsc9959_sched_speed_set(struct ocelot *ocelot, int port,
diff --git a/drivers/net/dsa/ocelot/seville_vsc9953.c b/drivers/net/dsa/ocelot/seville_vsc9953.c
index 8c1c9da61602..f2f1608a476c 100644
--- a/drivers/net/dsa/ocelot/seville_vsc9953.c
+++ b/drivers/net/dsa/ocelot/seville_vsc9953.c
@@ -1029,7 +1029,7 @@ static int vsc9953_mdio_bus_alloc(struct ocelot *ocelot)
}
/* Needed in order to initialize the bus mutex lock */
- rc = of_mdiobus_register(bus, NULL);
+ rc = devm_of_mdiobus_register(dev, bus, NULL);
if (rc < 0) {
dev_err(dev, "failed to register MDIO bus\n");
return rc;
@@ -1083,7 +1083,8 @@ static void vsc9953_mdio_bus_free(struct ocelot *ocelot)
mdio_device_free(mdio_device);
lynx_pcs_destroy(phylink_pcs);
}
- mdiobus_unregister(felix->imdio);
+
+ /* mdiobus_unregister and mdiobus_free handled by devres */
}
static const struct felix_info seville_info_vsc9953 = {
diff --git a/drivers/net/dsa/qca/ar9331.c b/drivers/net/dsa/qca/ar9331.c
index da0d7e68643a..c39de2a4c1fe 100644
--- a/drivers/net/dsa/qca/ar9331.c
+++ b/drivers/net/dsa/qca/ar9331.c
@@ -378,7 +378,7 @@ static int ar9331_sw_mbus_init(struct ar9331_sw_priv *priv)
if (!mnp)
return -ENODEV;
- ret = of_mdiobus_register(mbus, mnp);
+ ret = devm_of_mdiobus_register(dev, mbus, mnp);
of_node_put(mnp);
if (ret)
return ret;
@@ -1091,7 +1091,6 @@ static void ar9331_sw_remove(struct mdio_device *mdiodev)
}
irq_domain_remove(priv->irqdomain);
- mdiobus_unregister(priv->mbus);
dsa_unregister_switch(&priv->ds);
reset_control_assert(priv->sw_reset);
diff --git a/drivers/net/ethernet/3com/typhoon.c b/drivers/net/ethernet/3com/typhoon.c
index 481f1df3106c..8aec5d9fbfef 100644
--- a/drivers/net/ethernet/3com/typhoon.c
+++ b/drivers/net/ethernet/3com/typhoon.c
@@ -2278,6 +2278,7 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
struct net_device *dev;
struct typhoon *tp;
int card_id = (int) ent->driver_data;
+ u8 addr[ETH_ALEN] __aligned(4);
void __iomem *ioaddr;
void *shared;
dma_addr_t shared_dma;
@@ -2409,8 +2410,9 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto error_out_reset;
}
- *(__be16 *)&dev->dev_addr[0] = htons(le16_to_cpu(xp_resp[0].parm1));
- *(__be32 *)&dev->dev_addr[2] = htonl(le32_to_cpu(xp_resp[0].parm2));
+ *(__be16 *)&addr[0] = htons(le16_to_cpu(xp_resp[0].parm1));
+ *(__be32 *)&addr[2] = htonl(le32_to_cpu(xp_resp[0].parm2));
+ eth_hw_addr_set(dev, addr);
if (!is_valid_ether_addr(dev->dev_addr)) {
err_msg = "Could not obtain valid ethernet address, aborting";
diff --git a/drivers/net/ethernet/8390/etherh.c b/drivers/net/ethernet/8390/etherh.c
index bd22a534b1c0..e7b879123bb1 100644
--- a/drivers/net/ethernet/8390/etherh.c
+++ b/drivers/net/ethernet/8390/etherh.c
@@ -655,6 +655,7 @@ etherh_probe(struct expansion_card *ec, const struct ecard_id *id)
struct ei_device *ei_local;
struct net_device *dev;
struct etherh_priv *eh;
+ u8 addr[ETH_ALEN];
int ret;
ret = ecard_request_resources(ec);
@@ -724,12 +725,13 @@ etherh_probe(struct expansion_card *ec, const struct ecard_id *id)
spin_lock_init(&ei_local->page_lock);
if (ec->cid.product == PROD_ANT_ETHERM) {
- etherm_addr(dev->dev_addr);
+ etherm_addr(addr);
ei_local->reg_offset = etherm_regoffsets;
} else {
- etherh_addr(dev->dev_addr, ec);
+ etherh_addr(addr, ec);
ei_local->reg_offset = etherh_regoffsets;
}
+ eth_hw_addr_set(dev, addr);
ei_local->name = dev->name;
ei_local->word16 = 1;
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
index 849de4564709..621ce742ad21 100644
--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
@@ -106,9 +106,9 @@ static void emac_update_speed(struct net_device *dev)
/* set EMAC SPEED, depend on PHY */
reg_val = readl(db->membase + EMAC_MAC_SUPP_REG);
- reg_val &= ~(0x1 << 8);
+ reg_val &= ~EMAC_MAC_SUPP_100M;
if (db->speed == SPEED_100)
- reg_val |= 1 << 8;
+ reg_val |= EMAC_MAC_SUPP_100M;
writel(reg_val, db->membase + EMAC_MAC_SUPP_REG);
}
@@ -264,7 +264,7 @@ static void emac_dma_done_callback(void *arg)
/* re enable interrupt */
reg_val = readl(db->membase + EMAC_INT_CTL_REG);
- reg_val |= (0x01 << 8);
+ reg_val |= EMAC_INT_CTL_RX_EN;
writel(reg_val, db->membase + EMAC_INT_CTL_REG);
db->emacrx_completed_flag = 1;
@@ -429,7 +429,7 @@ static unsigned int emac_powerup(struct net_device *ndev)
/* initial EMAC */
/* flush RX FIFO */
reg_val = readl(db->membase + EMAC_RX_CTL_REG);
- reg_val |= 0x8;
+ reg_val |= EMAC_RX_CTL_FLUSH_FIFO;
writel(reg_val, db->membase + EMAC_RX_CTL_REG);
udelay(1);
@@ -441,8 +441,8 @@ static unsigned int emac_powerup(struct net_device *ndev)
/* set MII clock */
reg_val = readl(db->membase + EMAC_MAC_MCFG_REG);
- reg_val &= (~(0xf << 2));
- reg_val |= (0xD << 2);
+ reg_val &= ~EMAC_MAC_MCFG_MII_CLKD_MASK;
+ reg_val |= EMAC_MAC_MCFG_MII_CLKD_72;
writel(reg_val, db->membase + EMAC_MAC_MCFG_REG);
/* clear RX counter */
@@ -506,7 +506,7 @@ static void emac_init_device(struct net_device *dev)
/* enable RX/TX0/RX Hlevel interrup */
reg_val = readl(db->membase + EMAC_INT_CTL_REG);
- reg_val |= (0xf << 0) | (0x01 << 8);
+ reg_val |= (EMAC_INT_CTL_TX_EN | EMAC_INT_CTL_TX_ABRT_EN | EMAC_INT_CTL_RX_EN);
writel(reg_val, db->membase + EMAC_INT_CTL_REG);
spin_unlock_irqrestore(&db->lock, flags);
@@ -637,7 +637,9 @@ static void emac_rx(struct net_device *dev)
if (!rxcount) {
db->emacrx_completed_flag = 1;
reg_val = readl(db->membase + EMAC_INT_CTL_REG);
- reg_val |= (0xf << 0) | (0x01 << 8);
+ reg_val |= (EMAC_INT_CTL_TX_EN |
+ EMAC_INT_CTL_TX_ABRT_EN |
+ EMAC_INT_CTL_RX_EN);
writel(reg_val, db->membase + EMAC_INT_CTL_REG);
/* had one stuck? */
@@ -669,7 +671,9 @@ static void emac_rx(struct net_device *dev)
writel(reg_val | EMAC_CTL_RX_EN,
db->membase + EMAC_CTL_REG);
reg_val = readl(db->membase + EMAC_INT_CTL_REG);
- reg_val |= (0xf << 0) | (0x01 << 8);
+ reg_val |= (EMAC_INT_CTL_TX_EN |
+ EMAC_INT_CTL_TX_ABRT_EN |
+ EMAC_INT_CTL_RX_EN);
writel(reg_val, db->membase + EMAC_INT_CTL_REG);
db->emacrx_completed_flag = 1;
@@ -783,20 +787,20 @@ static irqreturn_t emac_interrupt(int irq, void *dev_id)
}
/* Transmit Interrupt check */
- if (int_status & (0x01 | 0x02))
+ if (int_status & EMAC_INT_STA_TX_COMPLETE)
emac_tx_done(dev, db, int_status);
- if (int_status & (0x04 | 0x08))
+ if (int_status & EMAC_INT_STA_TX_ABRT)
netdev_info(dev, " ab : %x\n", int_status);
/* Re-enable interrupt mask */
if (db->emacrx_completed_flag == 1) {
reg_val = readl(db->membase + EMAC_INT_CTL_REG);
- reg_val |= (0xf << 0) | (0x01 << 8);
+ reg_val |= (EMAC_INT_CTL_TX_EN | EMAC_INT_CTL_TX_ABRT_EN | EMAC_INT_CTL_RX_EN);
writel(reg_val, db->membase + EMAC_INT_CTL_REG);
} else {
reg_val = readl(db->membase + EMAC_INT_CTL_REG);
- reg_val |= (0xf << 0);
+ reg_val |= (EMAC_INT_CTL_TX_EN | EMAC_INT_CTL_TX_ABRT_EN);
writel(reg_val, db->membase + EMAC_INT_CTL_REG);
}
@@ -1068,6 +1072,7 @@ out_clk_disable_unprepare:
clk_disable_unprepare(db->clk);
out_dispose_mapping:
irq_dispose_mapping(ndev->irq);
+ dma_release_channel(db->rx_chan);
out_iounmap:
iounmap(db->membase);
out:
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.h b/drivers/net/ethernet/allwinner/sun4i-emac.h
index 38c72d9ec600..90bd9ad77607 100644
--- a/drivers/net/ethernet/allwinner/sun4i-emac.h
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.h
@@ -38,6 +38,7 @@
#define EMAC_RX_CTL_REG (0x3c)
#define EMAC_RX_CTL_AUTO_DRQ_EN (1 << 1)
#define EMAC_RX_CTL_DMA_EN (1 << 2)
+#define EMAC_RX_CTL_FLUSH_FIFO (1 << 3)
#define EMAC_RX_CTL_PASS_ALL_EN (1 << 4)
#define EMAC_RX_CTL_PASS_CTL_EN (1 << 5)
#define EMAC_RX_CTL_PASS_CRC_ERR_EN (1 << 6)
@@ -61,7 +62,21 @@
#define EMAC_RX_IO_DATA_STATUS_OK (1 << 7)
#define EMAC_RX_FBC_REG (0x50)
#define EMAC_INT_CTL_REG (0x54)
+#define EMAC_INT_CTL_RX_EN (1 << 8)
+#define EMAC_INT_CTL_TX0_EN (1)
+#define EMAC_INT_CTL_TX1_EN (1 << 1)
+#define EMAC_INT_CTL_TX_EN (EMAC_INT_CTL_TX0_EN | EMAC_INT_CTL_TX1_EN)
+#define EMAC_INT_CTL_TX0_ABRT_EN (0x1 << 2)
+#define EMAC_INT_CTL_TX1_ABRT_EN (0x1 << 3)
+#define EMAC_INT_CTL_TX_ABRT_EN (EMAC_INT_CTL_TX0_ABRT_EN | EMAC_INT_CTL_TX1_ABRT_EN)
#define EMAC_INT_STA_REG (0x58)
+#define EMAC_INT_STA_TX0_COMPLETE (0x1)
+#define EMAC_INT_STA_TX1_COMPLETE (0x1 << 1)
+#define EMAC_INT_STA_TX_COMPLETE (EMAC_INT_STA_TX0_COMPLETE | EMAC_INT_STA_TX1_COMPLETE)
+#define EMAC_INT_STA_TX0_ABRT (0x1 << 2)
+#define EMAC_INT_STA_TX1_ABRT (0x1 << 3)
+#define EMAC_INT_STA_TX_ABRT (EMAC_INT_STA_TX0_ABRT | EMAC_INT_STA_TX1_ABRT)
+#define EMAC_INT_STA_RX_COMPLETE (0x1 << 8)
#define EMAC_MAC_CTL0_REG (0x5c)
#define EMAC_MAC_CTL0_RX_FLOW_CTL_EN (1 << 2)
#define EMAC_MAC_CTL0_TX_FLOW_CTL_EN (1 << 3)
@@ -87,8 +102,11 @@
#define EMAC_MAC_CLRT_RM (0x0f)
#define EMAC_MAC_MAXF_REG (0x70)
#define EMAC_MAC_SUPP_REG (0x74)
+#define EMAC_MAC_SUPP_100M (0x1 << 8)
#define EMAC_MAC_TEST_REG (0x78)
#define EMAC_MAC_MCFG_REG (0x7c)
+#define EMAC_MAC_MCFG_MII_CLKD_MASK (0xff << 2)
+#define EMAC_MAC_MCFG_MII_CLKD_72 (0x0d << 2)
#define EMAC_MAC_A0_REG (0x98)
#define EMAC_MAC_A1_REG (0x9c)
#define EMAC_MAC_A2_REG (0xa0)
diff --git a/drivers/net/ethernet/amd/declance.c b/drivers/net/ethernet/amd/declance.c
index 493b0cefcc2a..ec8df05e7bf6 100644
--- a/drivers/net/ethernet/amd/declance.c
+++ b/drivers/net/ethernet/amd/declance.c
@@ -1032,6 +1032,7 @@ static int dec_lance_probe(struct device *bdev, const int type)
int i, ret;
unsigned long esar_base;
unsigned char *esar;
+ u8 addr[ETH_ALEN];
const char *desc;
if (dec_lance_debug && version_printed++ == 0)
@@ -1228,7 +1229,8 @@ static int dec_lance_probe(struct device *bdev, const int type)
break;
}
for (i = 0; i < 6; i++)
- dev->dev_addr[i] = esar[i * 4];
+ addr[i] = esar[i * 4];
+ eth_hw_addr_set(dev, addr);
printk("%s: %s, addr = %pM, irq = %d\n",
name, desc, dev->dev_addr, dev->irq);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 492ac383f16d..a3593290886f 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -721,7 +721,9 @@ static void xgbe_stop_timers(struct xgbe_prv_data *pdata)
if (!channel->tx_ring)
break;
+ /* Deactivate the Tx timer */
del_timer_sync(&channel->tx_timer);
+ channel->tx_timer_active = 0;
}
}
@@ -2550,6 +2552,14 @@ read_again:
buf2_len = xgbe_rx_buf2_len(rdata, packet, len);
len += buf2_len;
+ if (buf2_len > rdata->rx.buf.dma_len) {
+ /* Hardware inconsistency within the descriptors
+ * that has resulted in a length underflow.
+ */
+ error = 1;
+ goto skip_data;
+ }
+
if (!skb) {
skb = xgbe_create_skb(pdata, napi, rdata,
buf1_len);
@@ -2579,8 +2589,10 @@ skip_data:
if (!last || context_next)
goto read_again;
- if (!skb)
+ if (!skb || error) {
+ dev_kfree_skb(skb);
goto next_packet;
+ }
/* Be sure we don't exceed the configured MTU */
max_len = netdev->mtu + ETH_HLEN;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
index efdcf484a510..2af3da4b2d05 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
@@ -425,6 +425,9 @@ static void xgbe_pci_remove(struct pci_dev *pdev)
pci_free_irq_vectors(pdata->pcidev);
+ /* Disable all interrupts in the hardware */
+ XP_IOWRITE(pdata, XP_INT_EN, 0x0);
+
xgbe_free_pdata(pdata);
}
diff --git a/drivers/net/ethernet/apple/bmac.c b/drivers/net/ethernet/apple/bmac.c
index 9a650d1c1bdd..4d2ba30c2fbd 100644
--- a/drivers/net/ethernet/apple/bmac.c
+++ b/drivers/net/ethernet/apple/bmac.c
@@ -1237,6 +1237,7 @@ static int bmac_probe(struct macio_dev *mdev, const struct of_device_id *match)
struct bmac_data *bp;
const unsigned char *prop_addr;
unsigned char addr[6];
+ u8 macaddr[6];
struct net_device *dev;
int is_bmac_plus = ((int)match->data) != 0;
@@ -1284,7 +1285,9 @@ static int bmac_probe(struct macio_dev *mdev, const struct of_device_id *match)
rev = addr[0] == 0 && addr[1] == 0xA0;
for (j = 0; j < 6; ++j)
- dev->dev_addr[j] = rev ? bitrev8(addr[j]): addr[j];
+ macaddr[j] = rev ? bitrev8(addr[j]): addr[j];
+
+ eth_hw_addr_set(dev, macaddr);
/* Enable chip without interrupts for now */
bmac_enable_and_reset_chip(dev);
diff --git a/drivers/net/ethernet/apple/mace.c b/drivers/net/ethernet/apple/mace.c
index 4b80e3a52a19..6f8c91eb1263 100644
--- a/drivers/net/ethernet/apple/mace.c
+++ b/drivers/net/ethernet/apple/mace.c
@@ -90,7 +90,7 @@ static void mace_set_timeout(struct net_device *dev);
static void mace_tx_timeout(struct timer_list *t);
static inline void dbdma_reset(volatile struct dbdma_regs __iomem *dma);
static inline void mace_clean_rings(struct mace_data *mp);
-static void __mace_set_address(struct net_device *dev, void *addr);
+static void __mace_set_address(struct net_device *dev, const void *addr);
/*
* If we can't get a skbuff when we need it, we use this area for DMA.
@@ -112,6 +112,7 @@ static int mace_probe(struct macio_dev *mdev, const struct of_device_id *match)
struct net_device *dev;
struct mace_data *mp;
const unsigned char *addr;
+ u8 macaddr[ETH_ALEN];
int j, rev, rc = -EBUSY;
if (macio_resource_count(mdev) != 3 || macio_irq_count(mdev) != 3) {
@@ -167,8 +168,9 @@ static int mace_probe(struct macio_dev *mdev, const struct of_device_id *match)
rev = addr[0] == 0 && addr[1] == 0xA0;
for (j = 0; j < 6; ++j) {
- dev->dev_addr[j] = rev ? bitrev8(addr[j]): addr[j];
+ macaddr[j] = rev ? bitrev8(addr[j]): addr[j];
}
+ eth_hw_addr_set(dev, macaddr);
mp->chipid = (in_8(&mp->mace->chipid_hi) << 8) |
in_8(&mp->mace->chipid_lo);
@@ -369,11 +371,12 @@ static void mace_reset(struct net_device *dev)
out_8(&mb->plscc, PORTSEL_GPSI + ENPLSIO);
}
-static void __mace_set_address(struct net_device *dev, void *addr)
+static void __mace_set_address(struct net_device *dev, const void *addr)
{
struct mace_data *mp = netdev_priv(dev);
volatile struct mace __iomem *mb = mp->mace;
- unsigned char *p = addr;
+ const unsigned char *p = addr;
+ u8 macaddr[ETH_ALEN];
int i;
/* load up the hardware address */
@@ -385,7 +388,10 @@ static void __mace_set_address(struct net_device *dev, void *addr)
;
}
for (i = 0; i < 6; ++i)
- out_8(&mb->padr, dev->dev_addr[i] = p[i]);
+ out_8(&mb->padr, macaddr[i] = p[i]);
+
+ eth_hw_addr_set(dev, macaddr);
+
if (mp->chipid != BROKEN_ADDRCHG_REV)
out_8(&mb->iac, 0);
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_filters.c b/drivers/net/ethernet/aquantia/atlantic/aq_filters.c
index 1bc4d33a0ce5..30a573db02bb 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_filters.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_filters.c
@@ -826,7 +826,6 @@ int aq_filters_vlans_update(struct aq_nic_s *aq_nic)
struct aq_hw_s *aq_hw = aq_nic->aq_hw;
int hweight = 0;
int err = 0;
- int i;
if (unlikely(!aq_hw_ops->hw_filter_vlan_set))
return -EOPNOTSUPP;
@@ -837,8 +836,7 @@ int aq_filters_vlans_update(struct aq_nic_s *aq_nic)
aq_nic->aq_hw_rx_fltrs.fl2.aq_vlans);
if (aq_nic->ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) {
- for (i = 0; i < BITS_TO_LONGS(VLAN_N_VID); i++)
- hweight += hweight_long(aq_nic->active_vlans[i]);
+ hweight = bitmap_weight(aq_nic->active_vlans, VLAN_N_VID);
err = aq_hw_ops->hw_filter_vlan_ctrl(aq_hw, false);
if (err)
@@ -871,7 +869,7 @@ int aq_filters_vlan_offload_off(struct aq_nic_s *aq_nic)
struct aq_hw_s *aq_hw = aq_nic->aq_hw;
int err = 0;
- memset(aq_nic->active_vlans, 0, sizeof(aq_nic->active_vlans));
+ bitmap_zero(aq_nic->active_vlans, VLAN_N_VID);
aq_fvlan_rebuild(aq_nic, aq_nic->active_vlans,
aq_nic->aq_hw_rx_fltrs.fl2.aq_vlans);
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index da595242bc13..f50604f3e541 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -900,7 +900,7 @@ static void atl1c_clean_tx_ring(struct atl1c_adapter *adapter,
atl1c_clean_buffer(pdev, buffer_info);
}
- netdev_reset_queue(adapter->netdev);
+ netdev_tx_reset_queue(netdev_get_tx_queue(adapter->netdev, queue));
/* Zero out Tx-buffers */
memset(tpd_ring->desc, 0, sizeof(struct atl1c_tpd_desc) *
diff --git a/drivers/net/ethernet/broadcom/bgmac-platform.c b/drivers/net/ethernet/broadcom/bgmac-platform.c
index c6412c523637..b4381cd41979 100644
--- a/drivers/net/ethernet/broadcom/bgmac-platform.c
+++ b/drivers/net/ethernet/broadcom/bgmac-platform.c
@@ -172,6 +172,7 @@ static int bgmac_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct bgmac *bgmac;
+ struct resource *regs;
int ret;
bgmac = bgmac_alloc(&pdev->dev);
@@ -208,15 +209,23 @@ static int bgmac_probe(struct platform_device *pdev)
if (IS_ERR(bgmac->plat.base))
return PTR_ERR(bgmac->plat.base);
- bgmac->plat.idm_base = devm_platform_ioremap_resource_byname(pdev, "idm_base");
- if (IS_ERR(bgmac->plat.idm_base))
- return PTR_ERR(bgmac->plat.idm_base);
- else
+ /* The idm_base resource is optional for some platforms */
+ regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "idm_base");
+ if (regs) {
+ bgmac->plat.idm_base = devm_ioremap_resource(&pdev->dev, regs);
+ if (IS_ERR(bgmac->plat.idm_base))
+ return PTR_ERR(bgmac->plat.idm_base);
bgmac->feature_flags &= ~BGMAC_FEAT_IDM_MASK;
+ }
- bgmac->plat.nicpm_base = devm_platform_ioremap_resource_byname(pdev, "nicpm_base");
- if (IS_ERR(bgmac->plat.nicpm_base))
- return PTR_ERR(bgmac->plat.nicpm_base);
+ /* The nicpm_base resource is optional for some platforms */
+ regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nicpm_base");
+ if (regs) {
+ bgmac->plat.nicpm_base = devm_ioremap_resource(&pdev->dev,
+ regs);
+ if (IS_ERR(bgmac->plat.nicpm_base))
+ return PTR_ERR(bgmac->plat.nicpm_base);
+ }
bgmac->read = platform_bgmac_read;
bgmac->write = platform_bgmac_write;
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 226f4403cfed..87f1056e29ff 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -4020,10 +4020,12 @@ static int bcmgenet_probe(struct platform_device *pdev)
/* Request the WOL interrupt and advertise suspend if available */
priv->wol_irq_disabled = true;
- err = devm_request_irq(&pdev->dev, priv->wol_irq, bcmgenet_wol_isr, 0,
- dev->name, priv);
- if (!err)
- device_set_wakeup_capable(&pdev->dev, 1);
+ if (priv->wol_irq > 0) {
+ err = devm_request_irq(&pdev->dev, priv->wol_irq,
+ bcmgenet_wol_isr, 0, dev->name, priv);
+ if (!err)
+ device_set_wakeup_capable(&pdev->dev, 1);
+ }
/* Set the needed headroom to account for any possible
* features enabling/disabling at runtime
diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c
index f38f40eb966e..a1a38456c9a3 100644
--- a/drivers/net/ethernet/broadcom/sb1250-mac.c
+++ b/drivers/net/ethernet/broadcom/sb1250-mac.c
@@ -2183,9 +2183,7 @@ static int sbmac_init(struct platform_device *pldev, long long base)
ea_reg >>= 8;
}
- for (i = 0; i < 6; i++) {
- dev->dev_addr[i] = eaddr[i];
- }
+ eth_hw_addr_set(dev, eaddr);
/*
* Initialize context (get pointers to registers and stuff), then
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index a363da928e8b..98498a76ae16 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -4712,7 +4712,7 @@ static int macb_probe(struct platform_device *pdev)
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
if (GEM_BFEXT(DAW64, gem_readl(bp, DCFG6))) {
- dma_set_mask(&pdev->dev, DMA_BIT_MASK(44));
+ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(44));
bp->hw_dma_cap |= HW_DMA_CAP_64B;
}
#endif
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index fed5f93bf620..26433a62d7f0 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -497,7 +497,7 @@ struct cpl_t5_pass_accept_rpl {
__be32 opt2;
__be64 opt0;
__be32 iss;
- __be32 rsvd;
+ __be32 rsvd[3];
};
struct cpl_act_open_req {
diff --git a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c
index d04a6c163445..da8d10475a08 100644
--- a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c
+++ b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_cm.c
@@ -32,6 +32,7 @@
#include <linux/tcp.h>
#include <linux/ipv6.h>
+#include <net/inet_ecn.h>
#include <net/route.h>
#include <net/ip6_route.h>
@@ -99,7 +100,7 @@ cxgb_find_route(struct cxgb4_lld_info *lldi,
rt = ip_route_output_ports(&init_net, &fl4, NULL, peer_ip, local_ip,
peer_port, local_port, IPPROTO_TCP,
- tos, 0);
+ tos & ~INET_ECN_MASK, 0);
if (IS_ERR(rt))
return NULL;
n = dst_neigh_lookup(&rt->dst, &peer_ip);
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 4db6889b79ba..1c81b161de52 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -150,10 +150,10 @@ static void enic_set_affinity_hint(struct enic *enic)
!cpumask_available(enic->msix[i].affinity_mask) ||
cpumask_empty(enic->msix[i].affinity_mask))
continue;
- err = irq_set_affinity_hint(enic->msix_entry[i].vector,
- enic->msix[i].affinity_mask);
+ err = irq_update_affinity_hint(enic->msix_entry[i].vector,
+ enic->msix[i].affinity_mask);
if (err)
- netdev_warn(enic->netdev, "irq_set_affinity_hint failed, err %d\n",
+ netdev_warn(enic->netdev, "irq_update_affinity_hint failed, err %d\n",
err);
}
@@ -173,7 +173,7 @@ static void enic_unset_affinity_hint(struct enic *enic)
int i;
for (i = 0; i < enic->intr_count; i++)
- irq_set_affinity_hint(enic->msix_entry[i].vector, NULL);
+ irq_update_affinity_hint(enic->msix_entry[i].vector, NULL);
}
static int enic_udp_tunnel_set_port(struct net_device *netdev,
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index ad67b4216079..d0c262f2695a 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -3491,7 +3491,7 @@ static int be_msix_register(struct be_adapter *adapter)
if (status)
goto err_msix;
- irq_set_affinity_hint(vec, eqo->affinity_mask);
+ irq_update_affinity_hint(vec, eqo->affinity_mask);
}
return 0;
@@ -3552,7 +3552,7 @@ static void be_irq_unregister(struct be_adapter *adapter)
/* MSIx */
for_all_evt_queues(adapter, eqo, i) {
vec = be_msix_vec_get(adapter, eqo);
- irq_set_affinity_hint(vec, NULL);
+ irq_update_affinity_hint(vec, NULL);
free_irq(vec, eqo);
}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index d21ba70ef4a3..0f90d2d5bb60 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -4246,7 +4246,7 @@ static int dpaa2_eth_setup_irqs(struct fsl_mc_device *ls_dev)
}
irq = ls_dev->irqs[0];
- err = devm_request_threaded_irq(&ls_dev->dev, irq->msi_desc->irq,
+ err = devm_request_threaded_irq(&ls_dev->dev, irq->virq,
NULL, dpni_irq0_handler_thread,
IRQF_NO_SUSPEND | IRQF_ONESHOT,
dev_name(&ls_dev->dev), &ls_dev->dev);
@@ -4273,7 +4273,7 @@ static int dpaa2_eth_setup_irqs(struct fsl_mc_device *ls_dev)
return 0;
free_irq:
- devm_free_irq(&ls_dev->dev, irq->msi_desc->irq, &ls_dev->dev);
+ devm_free_irq(&ls_dev->dev, irq->virq, &ls_dev->dev);
free_mc_irq:
fsl_mc_free_irqs(ls_dev);
@@ -4338,7 +4338,7 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
}
INIT_WORK(&priv->tx_onestep_tstamp, dpaa2_eth_tx_onestep_tstamp);
-
+ mutex_init(&priv->onestep_tstamp_lock);
skb_queue_head_init(&priv->tx_skbs);
priv->rx_copybreak = DPAA2_ETH_DEFAULT_COPYBREAK;
@@ -4523,12 +4523,12 @@ static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
#ifdef CONFIG_DEBUG_FS
dpaa2_dbg_remove(priv);
#endif
+
+ unregister_netdev(net_dev);
rtnl_lock();
dpaa2_eth_disconnect_mac(priv);
rtnl_unlock();
- unregister_netdev(net_dev);
-
dpaa2_eth_dl_port_del(priv);
dpaa2_eth_dl_traps_unregister(priv);
dpaa2_eth_dl_free(priv);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c
index 32b5faa87bb8..5f5f8c53c4a0 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c
@@ -129,7 +129,6 @@ static irqreturn_t dpaa2_ptp_irq_handler_thread(int irq, void *priv)
static int dpaa2_ptp_probe(struct fsl_mc_device *mc_dev)
{
struct device *dev = &mc_dev->dev;
- struct fsl_mc_device_irq *irq;
struct ptp_qoriq *ptp_qoriq;
struct device_node *node;
void __iomem *base;
@@ -177,8 +176,7 @@ static int dpaa2_ptp_probe(struct fsl_mc_device *mc_dev)
goto err_unmap;
}
- irq = mc_dev->irqs[0];
- ptp_qoriq->irq = irq->msi_desc->irq;
+ ptp_qoriq->irq = mc_dev->irqs[0]->virq;
err = request_threaded_irq(ptp_qoriq->irq, NULL,
dpaa2_ptp_irq_handler_thread,
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c
index d6eefbbf163f..cacd454ac696 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c
@@ -532,6 +532,7 @@ static int dpaa2_switch_flower_parse_mirror_key(struct flow_cls_offload *cls,
struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
struct flow_dissector *dissector = rule->match.dissector;
struct netlink_ext_ack *extack = cls->common.extack;
+ int ret = -EOPNOTSUPP;
if (dissector->used_keys &
~(BIT(FLOW_DISSECTOR_KEY_BASIC) |
@@ -561,9 +562,10 @@ static int dpaa2_switch_flower_parse_mirror_key(struct flow_cls_offload *cls,
}
*vlan = (u16)match.key->vlan_id;
+ ret = 0;
}
- return 0;
+ return ret;
}
static int
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
index 9b5512b4f15d..9a561072aa4a 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
@@ -1554,8 +1554,7 @@ static int dpaa2_switch_setup_irqs(struct fsl_mc_device *sw_dev)
irq = sw_dev->irqs[DPSW_IRQ_INDEX_IF];
- err = devm_request_threaded_irq(dev, irq->msi_desc->irq,
- NULL,
+ err = devm_request_threaded_irq(dev, irq->virq, NULL,
dpaa2_switch_irq0_handler_thread,
IRQF_NO_SUSPEND | IRQF_ONESHOT,
dev_name(dev), dev);
@@ -1581,7 +1580,7 @@ static int dpaa2_switch_setup_irqs(struct fsl_mc_device *sw_dev)
return 0;
free_devm_irq:
- devm_free_irq(dev, irq->msi_desc->irq, dev);
+ devm_free_irq(dev, irq->virq, dev);
free_irq:
fsl_mc_free_irqs(sw_dev);
return err;
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c
index bbbde9f701c2..be0bd4b44926 100644
--- a/drivers/net/ethernet/freescale/fec_mpc52xx.c
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c
@@ -99,13 +99,13 @@ static void mpc52xx_fec_tx_timeout(struct net_device *dev, unsigned int txqueue)
netif_wake_queue(dev);
}
-static void mpc52xx_fec_set_paddr(struct net_device *dev, u8 *mac)
+static void mpc52xx_fec_set_paddr(struct net_device *dev, const u8 *mac)
{
struct mpc52xx_fec_priv *priv = netdev_priv(dev);
struct mpc52xx_fec __iomem *fec = priv->fec;
- out_be32(&fec->paddr1, *(u32 *)(&mac[0]));
- out_be32(&fec->paddr2, (*(u16 *)(&mac[4]) << 16) | FEC_PADDR2_TYPE);
+ out_be32(&fec->paddr1, *(const u32 *)(&mac[0]));
+ out_be32(&fec->paddr2, (*(const u16 *)(&mac[4]) << 16) | FEC_PADDR2_TYPE);
}
static int mpc52xx_fec_set_mac_address(struct net_device *dev, void *addr)
@@ -893,13 +893,15 @@ static int mpc52xx_fec_probe(struct platform_device *op)
rv = of_get_ethdev_address(np, ndev);
if (rv) {
struct mpc52xx_fec __iomem *fec = priv->fec;
+ u8 addr[ETH_ALEN] __aligned(4);
/*
* If the MAC addresse is not provided via DT then read
* it back from the controller regs
*/
- *(u32 *)(&ndev->dev_addr[0]) = in_be32(&fec->paddr1);
- *(u16 *)(&ndev->dev_addr[4]) = in_be32(&fec->paddr2) >> 16;
+ *(u32 *)(&addr[0]) = in_be32(&fec->paddr1);
+ *(u16 *)(&addr[4]) = in_be32(&fec->paddr2) >> 16;
+ eth_hw_addr_set(ndev, addr);
}
/*
diff --git a/drivers/net/ethernet/freescale/xgmac_mdio.c b/drivers/net/ethernet/freescale/xgmac_mdio.c
index 5b8b9bcf41a2..266e562bd67a 100644
--- a/drivers/net/ethernet/freescale/xgmac_mdio.c
+++ b/drivers/net/ethernet/freescale/xgmac_mdio.c
@@ -51,6 +51,7 @@ struct tgec_mdio_controller {
struct mdio_fsl_priv {
struct tgec_mdio_controller __iomem *mdio_base;
bool is_little_endian;
+ bool has_a009885;
bool has_a011043;
};
@@ -186,10 +187,10 @@ static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
{
struct mdio_fsl_priv *priv = (struct mdio_fsl_priv *)bus->priv;
struct tgec_mdio_controller __iomem *regs = priv->mdio_base;
+ unsigned long flags;
uint16_t dev_addr;
uint32_t mdio_stat;
uint32_t mdio_ctl;
- uint16_t value;
int ret;
bool endian = priv->is_little_endian;
@@ -221,12 +222,18 @@ static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
return ret;
}
+ if (priv->has_a009885)
+ /* Once the operation completes, i.e. MDIO_STAT_BSY clears, we
+ * must read back the data register within 16 MDC cycles.
+ */
+ local_irq_save(flags);
+
/* Initiate the read */
xgmac_write32(mdio_ctl | MDIO_CTL_READ, &regs->mdio_ctl, endian);
ret = xgmac_wait_until_done(&bus->dev, regs, endian);
if (ret)
- return ret;
+ goto irq_restore;
/* Return all Fs if nothing was there */
if ((xgmac_read32(&regs->mdio_stat, endian) & MDIO_STAT_RD_ER) &&
@@ -234,13 +241,17 @@ static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
dev_dbg(&bus->dev,
"Error while reading PHY%d reg at %d.%hhu\n",
phy_id, dev_addr, regnum);
- return 0xffff;
+ ret = 0xffff;
+ } else {
+ ret = xgmac_read32(&regs->mdio_data, endian) & 0xffff;
+ dev_dbg(&bus->dev, "read %04x\n", ret);
}
- value = xgmac_read32(&regs->mdio_data, endian) & 0xffff;
- dev_dbg(&bus->dev, "read %04x\n", value);
+irq_restore:
+ if (priv->has_a009885)
+ local_irq_restore(flags);
- return value;
+ return ret;
}
static int xgmac_mdio_probe(struct platform_device *pdev)
@@ -287,6 +298,8 @@ static int xgmac_mdio_probe(struct platform_device *pdev)
priv->is_little_endian = device_property_read_bool(&pdev->dev,
"little-endian");
+ priv->has_a009885 = device_property_read_bool(&pdev->dev,
+ "fsl,erratum-a009885");
priv->has_a011043 = device_property_read_bool(&pdev->dev,
"fsl,erratum-a011043");
@@ -318,9 +331,10 @@ err_ioremap:
static int xgmac_mdio_remove(struct platform_device *pdev)
{
struct mii_bus *bus = platform_get_drvdata(pdev);
+ struct mdio_fsl_priv *priv = bus->priv;
mdiobus_unregister(bus);
- iounmap(bus->priv);
+ iounmap(priv->mdio_base);
mdiobus_free(bus);
return 0;
diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h
index 5f5d4f7aa813..160735484465 100644
--- a/drivers/net/ethernet/google/gve/gve.h
+++ b/drivers/net/ethernet/google/gve/gve.h
@@ -843,7 +843,7 @@ static inline bool gve_is_gqi(struct gve_priv *priv)
/* buffers */
int gve_alloc_page(struct gve_priv *priv, struct device *dev,
struct page **page, dma_addr_t *dma,
- enum dma_data_direction);
+ enum dma_data_direction, gfp_t gfp_flags);
void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
enum dma_data_direction);
/* tx handling */
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c b/drivers/net/ethernet/google/gve/gve_adminq.c
index 2ad7f57f7e5b..f7621ab672b9 100644
--- a/drivers/net/ethernet/google/gve/gve_adminq.c
+++ b/drivers/net/ethernet/google/gve/gve_adminq.c
@@ -301,7 +301,7 @@ static int gve_adminq_parse_err(struct gve_priv *priv, u32 status)
*/
static int gve_adminq_kick_and_wait(struct gve_priv *priv)
{
- u32 tail, head;
+ int tail, head;
int i;
tail = ioread32be(&priv->reg_bar0->adminq_event_counter);
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index f7f65c4bf993..54e51c8221b8 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -766,9 +766,9 @@ static void gve_free_rings(struct gve_priv *priv)
int gve_alloc_page(struct gve_priv *priv, struct device *dev,
struct page **page, dma_addr_t *dma,
- enum dma_data_direction dir)
+ enum dma_data_direction dir, gfp_t gfp_flags)
{
- *page = alloc_page(GFP_KERNEL);
+ *page = alloc_page(gfp_flags);
if (!*page) {
priv->page_alloc_fail++;
return -ENOMEM;
@@ -811,7 +811,7 @@ static int gve_alloc_queue_page_list(struct gve_priv *priv, u32 id,
for (i = 0; i < pages; i++) {
err = gve_alloc_page(priv, &priv->pdev->dev, &qpl->pages[i],
&qpl->page_buses[i],
- gve_qpl_dma_dir(priv, id));
+ gve_qpl_dma_dir(priv, id), GFP_KERNEL);
/* caller handles clean up */
if (err)
return -ENOMEM;
diff --git a/drivers/net/ethernet/google/gve/gve_rx.c b/drivers/net/ethernet/google/gve/gve_rx.c
index 9ddcc497f48e..e4e98aa7745f 100644
--- a/drivers/net/ethernet/google/gve/gve_rx.c
+++ b/drivers/net/ethernet/google/gve/gve_rx.c
@@ -86,7 +86,8 @@ static int gve_rx_alloc_buffer(struct gve_priv *priv, struct device *dev,
dma_addr_t dma;
int err;
- err = gve_alloc_page(priv, dev, &page, &dma, DMA_FROM_DEVICE);
+ err = gve_alloc_page(priv, dev, &page, &dma, DMA_FROM_DEVICE,
+ GFP_ATOMIC);
if (err)
return err;
@@ -608,6 +609,7 @@ static bool gve_rx(struct gve_rx_ring *rx, netdev_features_t feat,
*packet_size_bytes = skb->len + (skb->protocol ? ETH_HLEN : 0);
*work_done = work_cnt;
+ skb_record_rx_queue(skb, rx->q_num);
if (skb_is_nonlinear(skb))
napi_gro_frags(napi);
else
diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
index beb8bb079023..8c939628e2d8 100644
--- a/drivers/net/ethernet/google/gve/gve_rx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
@@ -157,7 +157,7 @@ static int gve_alloc_page_dqo(struct gve_priv *priv,
int err;
err = gve_alloc_page(priv, &priv->pdev->dev, &buf_state->page_info.page,
- &buf_state->addr, DMA_FROM_DEVICE);
+ &buf_state->addr, DMA_FROM_DEVICE, GFP_KERNEL);
if (err)
return err;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 7df87610ad96..21442a9bb996 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -2043,8 +2043,7 @@ static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
break;
}
- if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER)
- hclgevf_enable_vector(&hdev->misc_vector, true);
+ hclgevf_enable_vector(&hdev->misc_vector, true);
return IRQ_HANDLED;
}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.c b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
index fed3b6bc0d76..b33ed4d92b71 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_rx.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
@@ -548,7 +548,7 @@ static int rx_request_irq(struct hinic_rxq *rxq)
goto err_req_irq;
cpumask_set_cpu(qp->q_id % num_online_cpus(), &rq->affinity_mask);
- err = irq_set_affinity_hint(rq->irq, &rq->affinity_mask);
+ err = irq_set_affinity_and_hint(rq->irq, &rq->affinity_mask);
if (err)
goto err_irq_affinity;
@@ -565,7 +565,7 @@ static void rx_free_irq(struct hinic_rxq *rxq)
{
struct hinic_rq *rq = rxq->rq;
- irq_set_affinity_hint(rq->irq, NULL);
+ irq_update_affinity_hint(rq->irq, NULL);
free_irq(rq->irq, rxq);
rx_del_napi(rxq);
}
diff --git a/drivers/net/ethernet/i825xx/ether1.c b/drivers/net/ethernet/i825xx/ether1.c
index c612ef526d16..3e7d7c4bafdc 100644
--- a/drivers/net/ethernet/i825xx/ether1.c
+++ b/drivers/net/ethernet/i825xx/ether1.c
@@ -986,6 +986,7 @@ static int
ether1_probe(struct expansion_card *ec, const struct ecard_id *id)
{
struct net_device *dev;
+ u8 addr[ETH_ALEN];
int i, ret = 0;
ether1_banner();
@@ -1015,7 +1016,8 @@ ether1_probe(struct expansion_card *ec, const struct ecard_id *id)
}
for (i = 0; i < 6; i++)
- dev->dev_addr[i] = readb(IDPROM_ADDRESS + (i << 2));
+ addr[i] = readb(IDPROM_ADDRESS + (i << 2));
+ eth_hw_addr_set(dev, addr);
if (ether1_init_2(dev)) {
ret = -ENODEV;
diff --git a/drivers/net/ethernet/i825xx/sni_82596.c b/drivers/net/ethernet/i825xx/sni_82596.c
index 27937c5d7956..daec9ce04531 100644
--- a/drivers/net/ethernet/i825xx/sni_82596.c
+++ b/drivers/net/ethernet/i825xx/sni_82596.c
@@ -117,9 +117,10 @@ static int sni_82596_probe(struct platform_device *dev)
netdevice->dev_addr[5] = readb(eth_addr + 0x06);
iounmap(eth_addr);
- if (!netdevice->irq) {
+ if (netdevice->irq < 0) {
printk(KERN_ERR "%s: IRQ not found for i82596 at 0x%lx\n",
__FILE__, netdevice->base_addr);
+ retval = netdevice->irq;
goto probe_failed;
}
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 59536bd5cab1..29617a86b299 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -110,6 +110,7 @@ static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter,
struct ibmvnic_sub_crq_queue *tx_scrq);
static void free_long_term_buff(struct ibmvnic_adapter *adapter,
struct ibmvnic_long_term_buff *ltb);
+static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter);
struct ibmvnic_stat {
char name[ETH_GSTRING_LEN];
@@ -1424,7 +1425,7 @@ static int __ibmvnic_open(struct net_device *netdev)
rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP);
if (rc) {
ibmvnic_napi_disable(adapter);
- release_resources(adapter);
+ ibmvnic_disable_irqs(adapter);
return rc;
}
@@ -1474,9 +1475,6 @@ static int ibmvnic_open(struct net_device *netdev)
rc = init_resources(adapter);
if (rc) {
netdev_err(netdev, "failed to initialize resources\n");
- release_resources(adapter);
- release_rx_pools(adapter);
- release_tx_pools(adapter);
goto out;
}
}
@@ -1493,6 +1491,13 @@ out:
adapter->state = VNIC_OPEN;
rc = 0;
}
+
+ if (rc) {
+ release_resources(adapter);
+ release_rx_pools(adapter);
+ release_tx_pools(adapter);
+ }
+
return rc;
}
@@ -2602,6 +2607,7 @@ static void __ibmvnic_reset(struct work_struct *work)
struct ibmvnic_rwi *rwi;
unsigned long flags;
u32 reset_state;
+ int num_fails = 0;
int rc = 0;
adapter = container_of(work, struct ibmvnic_adapter, ibmvnic_reset);
@@ -2655,11 +2661,23 @@ static void __ibmvnic_reset(struct work_struct *work)
rc = do_hard_reset(adapter, rwi, reset_state);
rtnl_unlock();
}
- if (rc) {
- /* give backing device time to settle down */
+ if (rc)
+ num_fails++;
+ else
+ num_fails = 0;
+
+ /* If auto-priority-failover is enabled we can get
+ * back to back failovers during resets, resulting
+ * in at least two failed resets (from high-priority
+ * backing device to low-priority one and then back)
+ * If resets continue to fail beyond that, give the
+ * adapter some time to settle down before retrying.
+ */
+ if (num_fails >= 3) {
netdev_dbg(adapter->netdev,
- "[S:%s] Hard reset failed, waiting 60 secs\n",
- adapter_state_to_string(adapter->state));
+ "[S:%s] Hard reset failed %d times, waiting 60 secs\n",
+ adapter_state_to_string(adapter->state),
+ num_fails);
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(60 * HZ);
}
@@ -3844,11 +3862,25 @@ static void send_request_cap(struct ibmvnic_adapter *adapter, int retry)
struct device *dev = &adapter->vdev->dev;
union ibmvnic_crq crq;
int max_entries;
+ int cap_reqs;
+
+ /* We send out 6 or 7 REQUEST_CAPABILITY CRQs below (depending on
+ * the PROMISC flag). Initialize this count upfront. When the tasklet
+ * receives a response to all of these, it will send the next protocol
+ * message (QUERY_IP_OFFLOAD).
+ */
+ if (!(adapter->netdev->flags & IFF_PROMISC) ||
+ adapter->promisc_supported)
+ cap_reqs = 7;
+ else
+ cap_reqs = 6;
if (!retry) {
/* Sub-CRQ entries are 32 byte long */
int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4);
+ atomic_set(&adapter->running_cap_crqs, cap_reqs);
+
if (adapter->min_tx_entries_per_subcrq > entries_page ||
adapter->min_rx_add_entries_per_subcrq > entries_page) {
dev_err(dev, "Fatal, invalid entries per sub-crq\n");
@@ -3909,44 +3941,45 @@ static void send_request_cap(struct ibmvnic_adapter *adapter, int retry)
adapter->opt_rx_comp_queues;
adapter->req_rx_add_queues = adapter->max_rx_add_queues;
+ } else {
+ atomic_add(cap_reqs, &adapter->running_cap_crqs);
}
-
memset(&crq, 0, sizeof(crq));
crq.request_capability.first = IBMVNIC_CRQ_CMD;
crq.request_capability.cmd = REQUEST_CAPABILITY;
crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES);
crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues);
- atomic_inc(&adapter->running_cap_crqs);
+ cap_reqs--;
ibmvnic_send_crq(adapter, &crq);
crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES);
crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues);
- atomic_inc(&adapter->running_cap_crqs);
+ cap_reqs--;
ibmvnic_send_crq(adapter, &crq);
crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES);
crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues);
- atomic_inc(&adapter->running_cap_crqs);
+ cap_reqs--;
ibmvnic_send_crq(adapter, &crq);
crq.request_capability.capability =
cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ);
crq.request_capability.number =
cpu_to_be64(adapter->req_tx_entries_per_subcrq);
- atomic_inc(&adapter->running_cap_crqs);
+ cap_reqs--;
ibmvnic_send_crq(adapter, &crq);
crq.request_capability.capability =
cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ);
crq.request_capability.number =
cpu_to_be64(adapter->req_rx_add_entries_per_subcrq);
- atomic_inc(&adapter->running_cap_crqs);
+ cap_reqs--;
ibmvnic_send_crq(adapter, &crq);
crq.request_capability.capability = cpu_to_be16(REQ_MTU);
crq.request_capability.number = cpu_to_be64(adapter->req_mtu);
- atomic_inc(&adapter->running_cap_crqs);
+ cap_reqs--;
ibmvnic_send_crq(adapter, &crq);
if (adapter->netdev->flags & IFF_PROMISC) {
@@ -3954,16 +3987,21 @@ static void send_request_cap(struct ibmvnic_adapter *adapter, int retry)
crq.request_capability.capability =
cpu_to_be16(PROMISC_REQUESTED);
crq.request_capability.number = cpu_to_be64(1);
- atomic_inc(&adapter->running_cap_crqs);
+ cap_reqs--;
ibmvnic_send_crq(adapter, &crq);
}
} else {
crq.request_capability.capability =
cpu_to_be16(PROMISC_REQUESTED);
crq.request_capability.number = cpu_to_be64(0);
- atomic_inc(&adapter->running_cap_crqs);
+ cap_reqs--;
ibmvnic_send_crq(adapter, &crq);
}
+
+ /* Keep at end to catch any discrepancy between expected and actual
+ * CRQs sent.
+ */
+ WARN_ON(cap_reqs != 0);
}
static int pending_scrq(struct ibmvnic_adapter *adapter,
@@ -4357,118 +4395,132 @@ static void send_query_map(struct ibmvnic_adapter *adapter)
static void send_query_cap(struct ibmvnic_adapter *adapter)
{
union ibmvnic_crq crq;
+ int cap_reqs;
+
+ /* We send out 25 QUERY_CAPABILITY CRQs below. Initialize this count
+ * upfront. When the tasklet receives a response to all of these, it
+ * can send out the next protocol messaage (REQUEST_CAPABILITY).
+ */
+ cap_reqs = 25;
+
+ atomic_set(&adapter->running_cap_crqs, cap_reqs);
- atomic_set(&adapter->running_cap_crqs, 0);
memset(&crq, 0, sizeof(crq));
crq.query_capability.first = IBMVNIC_CRQ_CMD;
crq.query_capability.cmd = QUERY_CAPABILITY;
crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability =
cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability =
cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability =
cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability =
cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(MIN_MTU);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(MAX_MTU);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(RX_VLAN_HEADER_INSERTION);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability =
cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability =
cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability =
cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ);
- atomic_inc(&adapter->running_cap_crqs);
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ);
- atomic_inc(&adapter->running_cap_crqs);
+
ibmvnic_send_crq(adapter, &crq);
+ cap_reqs--;
+
+ /* Keep at end to catch any discrepancy between expected and actual
+ * CRQs sent.
+ */
+ WARN_ON(cap_reqs != 0);
}
static void send_query_ip_offload(struct ibmvnic_adapter *adapter)
@@ -4772,6 +4824,8 @@ static void handle_request_cap_rsp(union ibmvnic_crq *crq,
char *name;
atomic_dec(&adapter->running_cap_crqs);
+ netdev_dbg(adapter->netdev, "Outstanding request-caps: %d\n",
+ atomic_read(&adapter->running_cap_crqs));
switch (be16_to_cpu(crq->request_capability_rsp.capability)) {
case REQ_TX_QUEUES:
req_value = &adapter->req_tx_queues;
@@ -4835,10 +4889,8 @@ static void handle_request_cap_rsp(union ibmvnic_crq *crq,
}
/* Done receiving requested capabilities, query IP offload support */
- if (atomic_read(&adapter->running_cap_crqs) == 0) {
- adapter->wait_capability = false;
+ if (atomic_read(&adapter->running_cap_crqs) == 0)
send_query_ip_offload(adapter);
- }
}
static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
@@ -5136,10 +5188,8 @@ static void handle_query_cap_rsp(union ibmvnic_crq *crq,
}
out:
- if (atomic_read(&adapter->running_cap_crqs) == 0) {
- adapter->wait_capability = false;
+ if (atomic_read(&adapter->running_cap_crqs) == 0)
send_request_cap(adapter, 0);
- }
}
static int send_query_phys_parms(struct ibmvnic_adapter *adapter)
@@ -5435,33 +5485,21 @@ static void ibmvnic_tasklet(struct tasklet_struct *t)
struct ibmvnic_crq_queue *queue = &adapter->crq;
union ibmvnic_crq *crq;
unsigned long flags;
- bool done = false;
spin_lock_irqsave(&queue->lock, flags);
- while (!done) {
- /* Pull all the valid messages off the CRQ */
- while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
- /* This barrier makes sure ibmvnic_next_crq()'s
- * crq->generic.first & IBMVNIC_CRQ_CMD_RSP is loaded
- * before ibmvnic_handle_crq()'s
- * switch(gen_crq->first) and switch(gen_crq->cmd).
- */
- dma_rmb();
- ibmvnic_handle_crq(crq, adapter);
- crq->generic.first = 0;
- }
- /* remain in tasklet until all
- * capabilities responses are received
+ /* Pull all the valid messages off the CRQ */
+ while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
+ /* This barrier makes sure ibmvnic_next_crq()'s
+ * crq->generic.first & IBMVNIC_CRQ_CMD_RSP is loaded
+ * before ibmvnic_handle_crq()'s
+ * switch(gen_crq->first) and switch(gen_crq->cmd).
*/
- if (!adapter->wait_capability)
- done = true;
+ dma_rmb();
+ ibmvnic_handle_crq(crq, adapter);
+ crq->generic.first = 0;
}
- /* if capabilities CRQ's were sent in this tasklet, the following
- * tasklet must wait until all responses are received
- */
- if (atomic_read(&adapter->running_cap_crqs) != 0)
- adapter->wait_capability = true;
+
spin_unlock_irqrestore(&queue->lock, flags);
}
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index 4a8f36e0ab07..4a7a56ff74ce 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -919,7 +919,6 @@ struct ibmvnic_adapter {
int login_rsp_buf_sz;
atomic_t running_cap_crqs;
- bool wait_capability;
struct ibmvnic_sub_crq_queue **tx_scrq ____cacheline_aligned;
struct ibmvnic_sub_crq_queue **rx_scrq ____cacheline_aligned;
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index c3def0ee7788..8d06c9d8ff8b 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -115,7 +115,8 @@ enum e1000_boards {
board_pch_lpt,
board_pch_spt,
board_pch_cnp,
- board_pch_tgp
+ board_pch_tgp,
+ board_pch_adp
};
struct e1000_ps_page {
@@ -502,6 +503,7 @@ extern const struct e1000_info e1000_pch_lpt_info;
extern const struct e1000_info e1000_pch_spt_info;
extern const struct e1000_info e1000_pch_cnp_info;
extern const struct e1000_info e1000_pch_tgp_info;
+extern const struct e1000_info e1000_pch_adp_info;
extern const struct e1000_info e1000_es2_info;
void e1000e_ptp_init(struct e1000_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index 5e4fc9b4e2ad..c908c84b86d2 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -6021,3 +6021,23 @@ const struct e1000_info e1000_pch_tgp_info = {
.phy_ops = &ich8_phy_ops,
.nvm_ops = &spt_nvm_ops,
};
+
+const struct e1000_info e1000_pch_adp_info = {
+ .mac = e1000_pch_adp,
+ .flags = FLAG_IS_ICH
+ | FLAG_HAS_WOL
+ | FLAG_HAS_HW_TIMESTAMP
+ | FLAG_HAS_CTRLEXT_ON_LOAD
+ | FLAG_HAS_AMT
+ | FLAG_HAS_FLASH
+ | FLAG_HAS_JUMBO_FRAMES
+ | FLAG_APME_IN_WUC,
+ .flags2 = FLAG2_HAS_PHY_STATS
+ | FLAG2_HAS_EEE,
+ .pba = 26,
+ .max_hw_frame_size = 9022,
+ .get_variants = e1000_get_variants_ich8lan,
+ .mac_ops = &ich8_mac_ops,
+ .phy_ops = &ich8_phy_ops,
+ .nvm_ops = &spt_nvm_ops,
+};
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 635a95927e93..a42aeb555f34 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -52,6 +52,7 @@ static const struct e1000_info *e1000_info_tbl[] = {
[board_pch_spt] = &e1000_pch_spt_info,
[board_pch_cnp] = &e1000_pch_cnp_info,
[board_pch_tgp] = &e1000_pch_tgp_info,
+ [board_pch_adp] = &e1000_pch_adp_info,
};
struct e1000_reg_info {
@@ -6341,7 +6342,8 @@ static void e1000e_s0ix_entry_flow(struct e1000_adapter *adapter)
u32 mac_data;
u16 phy_data;
- if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID) {
+ if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID &&
+ hw->mac.type >= e1000_pch_adp) {
/* Request ME configure the device for S0ix */
mac_data = er32(H2ME);
mac_data |= E1000_H2ME_START_DPG;
@@ -6490,7 +6492,8 @@ static void e1000e_s0ix_exit_flow(struct e1000_adapter *adapter)
u16 phy_data;
u32 i = 0;
- if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID) {
+ if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID &&
+ hw->mac.type >= e1000_pch_adp) {
/* Request ME unconfigure the device from S0ix */
mac_data = er32(H2ME);
mac_data &= ~E1000_H2ME_START_DPG;
@@ -7898,22 +7901,22 @@ static const struct pci_device_id e1000_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V14), board_pch_tgp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM15), board_pch_tgp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V15), board_pch_tgp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_LM23), board_pch_tgp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_V23), board_pch_tgp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM16), board_pch_tgp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V16), board_pch_tgp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM17), board_pch_tgp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V17), board_pch_tgp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_LM22), board_pch_tgp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_V22), board_pch_tgp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM18), board_pch_tgp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V18), board_pch_tgp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM19), board_pch_tgp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V19), board_pch_tgp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM20), board_pch_tgp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_V20), board_pch_tgp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM21), board_pch_tgp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_V21), board_pch_tgp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_LM23), board_pch_adp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_V23), board_pch_adp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM16), board_pch_adp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V16), board_pch_adp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM17), board_pch_adp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V17), board_pch_adp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_LM22), board_pch_adp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_V22), board_pch_adp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM18), board_pch_adp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V18), board_pch_adp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM19), board_pch_adp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V19), board_pch_adp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM20), board_pch_adp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_V20), board_pch_adp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM21), board_pch_adp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_V21), board_pch_adp },
{ 0, 0, 0, 0, 0, 0, 0 } /* terminate list */
};
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 4d939af0a626..80c5cecaf2b5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -144,6 +144,7 @@ enum i40e_state_t {
__I40E_VIRTCHNL_OP_PENDING,
__I40E_RECOVERY_MODE,
__I40E_VF_RESETS_DISABLED, /* disable resets during i40e_remove */
+ __I40E_IN_REMOVE,
__I40E_VFS_RELEASING,
/* This must be last as it determines the size of the BITMAP */
__I40E_STATE_SIZE__,
@@ -174,7 +175,6 @@ enum i40e_interrupt_policy {
struct i40e_lump_tracking {
u16 num_entries;
- u16 search_hint;
u16 list[0];
#define I40E_PILE_VALID_BIT 0x8000
#define I40E_IWARP_IRQ_PILE_ID (I40E_PILE_VALID_BIT - 2)
@@ -848,12 +848,12 @@ struct i40e_vsi {
struct rtnl_link_stats64 net_stats_offsets;
struct i40e_eth_stats eth_stats;
struct i40e_eth_stats eth_stats_offsets;
- u32 tx_restart;
- u32 tx_busy;
+ u64 tx_restart;
+ u64 tx_busy;
u64 tx_linearize;
u64 tx_force_wb;
- u32 rx_buf_failed;
- u32 rx_page_failed;
+ u64 rx_buf_failed;
+ u64 rx_page_failed;
/* These are containers of ring pointers, allocated at run-time */
struct i40e_ring **rx_rings;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 2c1b1da1220e..1e57cc8c47d7 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -240,7 +240,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
(unsigned long int)vsi->net_stats_offsets.rx_compressed,
(unsigned long int)vsi->net_stats_offsets.tx_compressed);
dev_info(&pf->pdev->dev,
- " tx_restart = %d, tx_busy = %d, rx_buf_failed = %d, rx_page_failed = %d\n",
+ " tx_restart = %llu, tx_busy = %llu, rx_buf_failed = %llu, rx_page_failed = %llu\n",
vsi->tx_restart, vsi->tx_busy,
vsi->rx_buf_failed, vsi->rx_page_failed);
rcu_read_lock();
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 61afc220fc6c..0c4b7dfb3b35 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -196,10 +196,6 @@ int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem)
* @id: an owner id to stick on the items assigned
*
* Returns the base item index of the lump, or negative for error
- *
- * The search_hint trick and lack of advanced fit-finding only work
- * because we're highly likely to have all the same size lump requests.
- * Linear search time and any fragmentation should be minimal.
**/
static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
u16 needed, u16 id)
@@ -214,8 +210,21 @@ static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
return -EINVAL;
}
- /* start the linear search with an imperfect hint */
- i = pile->search_hint;
+ /* Allocate last queue in the pile for FDIR VSI queue
+ * so it doesn't fragment the qp_pile
+ */
+ if (pile == pf->qp_pile && pf->vsi[id]->type == I40E_VSI_FDIR) {
+ if (pile->list[pile->num_entries - 1] & I40E_PILE_VALID_BIT) {
+ dev_err(&pf->pdev->dev,
+ "Cannot allocate queue %d for I40E_VSI_FDIR\n",
+ pile->num_entries - 1);
+ return -ENOMEM;
+ }
+ pile->list[pile->num_entries - 1] = id | I40E_PILE_VALID_BIT;
+ return pile->num_entries - 1;
+ }
+
+ i = 0;
while (i < pile->num_entries) {
/* skip already allocated entries */
if (pile->list[i] & I40E_PILE_VALID_BIT) {
@@ -234,7 +243,6 @@ static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
for (j = 0; j < needed; j++)
pile->list[i+j] = id | I40E_PILE_VALID_BIT;
ret = i;
- pile->search_hint = i + j;
break;
}
@@ -257,7 +265,7 @@ static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
{
int valid_id = (id | I40E_PILE_VALID_BIT);
int count = 0;
- int i;
+ u16 i;
if (!pile || index >= pile->num_entries)
return -EINVAL;
@@ -269,8 +277,6 @@ static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
count++;
}
- if (count && index < pile->search_hint)
- pile->search_hint = index;
return count;
}
@@ -772,9 +778,9 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
struct rtnl_link_stats64 *ns; /* netdev stats */
struct i40e_eth_stats *oes;
struct i40e_eth_stats *es; /* device's eth stats */
- u32 tx_restart, tx_busy;
+ u64 tx_restart, tx_busy;
struct i40e_ring *p;
- u32 rx_page, rx_buf;
+ u64 rx_page, rx_buf;
u64 bytes, packets;
unsigned int start;
u64 tx_linearize;
@@ -3915,10 +3921,10 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
*
* get_cpu_mask returns a static constant mask with
* a permanent lifetime so it's ok to pass to
- * irq_set_affinity_hint without making a copy.
+ * irq_update_affinity_hint without making a copy.
*/
cpu = cpumask_local_spread(q_vector->v_idx, -1);
- irq_set_affinity_hint(irq_num, get_cpu_mask(cpu));
+ irq_update_affinity_hint(irq_num, get_cpu_mask(cpu));
}
vsi->irqs_ready = true;
@@ -3929,7 +3935,7 @@ free_queue_irqs:
vector--;
irq_num = pf->msix_entries[base + vector].vector;
irq_set_affinity_notifier(irq_num, NULL);
- irq_set_affinity_hint(irq_num, NULL);
+ irq_update_affinity_hint(irq_num, NULL);
free_irq(irq_num, &vsi->q_vectors[vector]);
}
return err;
@@ -4750,7 +4756,7 @@ static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
/* clear the affinity notifier in the IRQ descriptor */
irq_set_affinity_notifier(irq_num, NULL);
/* remove our suggested affinity mask for this IRQ */
- irq_set_affinity_hint(irq_num, NULL);
+ irq_update_affinity_hint(irq_num, NULL);
synchronize_irq(irq_num);
free_irq(irq_num, vsi->q_vectors[i]);
@@ -5366,7 +5372,15 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
/* There is no need to reset BW when mqprio mode is on. */
if (pf->flags & I40E_FLAG_TC_MQPRIO)
return 0;
- if (!vsi->mqprio_qopt.qopt.hw && !(pf->flags & I40E_FLAG_DCB_ENABLED)) {
+
+ if (!vsi->mqprio_qopt.qopt.hw) {
+ if (pf->flags & I40E_FLAG_DCB_ENABLED)
+ goto skip_reset;
+
+ if (IS_ENABLED(CONFIG_I40E_DCB) &&
+ i40e_dcb_hw_get_num_tc(&pf->hw) == 1)
+ goto skip_reset;
+
ret = i40e_set_bw_limit(vsi, vsi->seid, 0);
if (ret)
dev_info(&pf->pdev->dev,
@@ -5374,6 +5388,8 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
vsi->seid);
return ret;
}
+
+skip_reset:
memset(&bw_data, 0, sizeof(bw_data));
bw_data.tc_valid_bits = enabled_tc;
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
@@ -10574,15 +10590,9 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
}
i40e_get_oem_version(&pf->hw);
- if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state) &&
- ((hw->aq.fw_maj_ver == 4 && hw->aq.fw_min_ver <= 33) ||
- hw->aq.fw_maj_ver < 4) && hw->mac.type == I40E_MAC_XL710) {
- /* The following delay is necessary for 4.33 firmware and older
- * to recover after EMP reset. 200 ms should suffice but we
- * put here 300 ms to be sure that FW is ready to operate
- * after reset.
- */
- mdelay(300);
+ if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state)) {
+ /* The following delay is necessary for firmware update. */
+ mdelay(1000);
}
/* re-verify the eeprom if we just had an EMP reset */
@@ -10853,6 +10863,9 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit,
bool lock_acquired)
{
int ret;
+
+ if (test_bit(__I40E_IN_REMOVE, pf->state))
+ return;
/* Now we wait for GRST to settle out.
* We don't have to delete the VEBs or VSIs from the hw switch
* because the reset will make them disappear.
@@ -11792,7 +11805,6 @@ static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
return -ENOMEM;
pf->irq_pile->num_entries = vectors;
- pf->irq_pile->search_hint = 0;
/* track first vector for misc interrupts, ignore return */
(void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1);
@@ -12213,6 +12225,8 @@ int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
vsi->req_queue_pairs = queue_count;
i40e_prep_for_reset(pf);
+ if (test_bit(__I40E_IN_REMOVE, pf->state))
+ return pf->alloc_rss_size;
pf->alloc_rss_size = new_rss_size;
@@ -12595,7 +12609,6 @@ static int i40e_sw_init(struct i40e_pf *pf)
goto sw_init_done;
}
pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp;
- pf->qp_pile->search_hint = 0;
pf->tx_timeout_recovery_level = 1;
@@ -13040,6 +13053,10 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi, struct bpf_prog *prog,
if (need_reset)
i40e_prep_for_reset(pf);
+ /* VSI shall be deleted in a moment, just return EINVAL */
+ if (test_bit(__I40E_IN_REMOVE, pf->state))
+ return -EINVAL;
+
old_prog = xchg(&vsi->xdp_prog, prog);
if (need_reset) {
@@ -15930,8 +15947,13 @@ static void i40e_remove(struct pci_dev *pdev)
i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0);
i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0);
- while (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
+ /* Grab __I40E_RESET_RECOVERY_PENDING and set __I40E_IN_REMOVE
+ * flags, once they are set, i40e_rebuild should not be called as
+ * i40e_prep_for_reset always returns early.
+ */
+ while (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
usleep_range(1000, 2000);
+ set_bit(__I40E_IN_REMOVE, pf->state);
if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
set_bit(__I40E_VF_RESETS_DISABLED, pf->state);
@@ -16130,6 +16152,9 @@ static void i40e_pci_error_reset_done(struct pci_dev *pdev)
{
struct i40e_pf *pf = pci_get_drvdata(pdev);
+ if (test_bit(__I40E_IN_REMOVE, pf->state))
+ return;
+
i40e_reset_and_rebuild(pf, false, false);
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h b/drivers/net/ethernet/intel/i40e/i40e_register.h
index 8d0588a27a05..1908eed4fa5e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_register.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_register.h
@@ -413,6 +413,9 @@
#define I40E_VFINT_DYN_CTLN(_INTVF) (0x00024800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */
#define I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT 1
#define I40E_VFINT_DYN_CTLN_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT)
+#define I40E_VFINT_ICR0_ADMINQ_SHIFT 30
+#define I40E_VFINT_ICR0_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ADMINQ_SHIFT)
+#define I40E_VFINT_ICR0_ENA(_VF) (0x0002C000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
#define I40E_VPINT_AEQCTL(_VF) (0x0002B800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
#define I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT 0
#define I40E_VPINT_AEQCTL_ITR_INDX_SHIFT 11
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index b785d09c19f8..dfdb6e786461 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -1377,6 +1377,32 @@ static i40e_status i40e_config_vf_promiscuous_mode(struct i40e_vf *vf,
}
/**
+ * i40e_sync_vfr_reset
+ * @hw: pointer to hw struct
+ * @vf_id: VF identifier
+ *
+ * Before trigger hardware reset, we need to know if no other process has
+ * reserved the hardware for any reset operations. This check is done by
+ * examining the status of the RSTAT1 register used to signal the reset.
+ **/
+static int i40e_sync_vfr_reset(struct i40e_hw *hw, int vf_id)
+{
+ u32 reg;
+ int i;
+
+ for (i = 0; i < I40E_VFR_WAIT_COUNT; i++) {
+ reg = rd32(hw, I40E_VFINT_ICR0_ENA(vf_id)) &
+ I40E_VFINT_ICR0_ADMINQ_MASK;
+ if (reg)
+ return 0;
+
+ usleep_range(100, 200);
+ }
+
+ return -EAGAIN;
+}
+
+/**
* i40e_trigger_vf_reset
* @vf: pointer to the VF structure
* @flr: VFLR was issued or not
@@ -1390,9 +1416,11 @@ static void i40e_trigger_vf_reset(struct i40e_vf *vf, bool flr)
struct i40e_pf *pf = vf->pf;
struct i40e_hw *hw = &pf->hw;
u32 reg, reg_idx, bit_idx;
+ bool vf_active;
+ u32 radq;
/* warn the VF */
- clear_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
+ vf_active = test_and_clear_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states);
/* Disable VF's configuration API during reset. The flag is re-enabled
* in i40e_alloc_vf_res(), when it's safe again to access VF's VSI.
@@ -1406,7 +1434,19 @@ static void i40e_trigger_vf_reset(struct i40e_vf *vf, bool flr)
* just need to clean up, so don't hit the VFRTRIG register.
*/
if (!flr) {
- /* reset VF using VPGEN_VFRTRIG reg */
+ /* Sync VFR reset before trigger next one */
+ radq = rd32(hw, I40E_VFINT_ICR0_ENA(vf->vf_id)) &
+ I40E_VFINT_ICR0_ADMINQ_MASK;
+ if (vf_active && !radq)
+ /* waiting for finish reset by virtual driver */
+ if (i40e_sync_vfr_reset(hw, vf->vf_id))
+ dev_info(&pf->pdev->dev,
+ "Reset VF %d never finished\n",
+ vf->vf_id);
+
+ /* Reset VF using VPGEN_VFRTRIG reg. It is also setting
+ * in progress state in rstat1 register.
+ */
reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
@@ -2618,6 +2658,59 @@ error_param:
}
/**
+ * i40e_check_enough_queue - find big enough queue number
+ * @vf: pointer to the VF info
+ * @needed: the number of items needed
+ *
+ * Returns the base item index of the queue, or negative for error
+ **/
+static int i40e_check_enough_queue(struct i40e_vf *vf, u16 needed)
+{
+ unsigned int i, cur_queues, more, pool_size;
+ struct i40e_lump_tracking *pile;
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_vsi *vsi;
+
+ vsi = pf->vsi[vf->lan_vsi_idx];
+ cur_queues = vsi->alloc_queue_pairs;
+
+ /* if current allocated queues are enough for need */
+ if (cur_queues >= needed)
+ return vsi->base_queue;
+
+ pile = pf->qp_pile;
+ if (cur_queues > 0) {
+ /* if the allocated queues are not zero
+ * just check if there are enough queues for more
+ * behind the allocated queues.
+ */
+ more = needed - cur_queues;
+ for (i = vsi->base_queue + cur_queues;
+ i < pile->num_entries; i++) {
+ if (pile->list[i] & I40E_PILE_VALID_BIT)
+ break;
+
+ if (more-- == 1)
+ /* there is enough */
+ return vsi->base_queue;
+ }
+ }
+
+ pool_size = 0;
+ for (i = 0; i < pile->num_entries; i++) {
+ if (pile->list[i] & I40E_PILE_VALID_BIT) {
+ pool_size = 0;
+ continue;
+ }
+ if (needed <= ++pool_size)
+ /* there is enough */
+ return i;
+ }
+
+ return -ENOMEM;
+}
+
+/**
* i40e_vc_request_queues_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
@@ -2651,6 +2744,12 @@ static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg)
req_pairs - cur_pairs,
pf->queues_left);
vfres->num_queue_pairs = pf->queues_left + cur_pairs;
+ } else if (i40e_check_enough_queue(vf, req_pairs) < 0) {
+ dev_warn(&pf->pdev->dev,
+ "VF %d requested %d more queues, but there is not enough for it.\n",
+ vf->vf_id,
+ req_pairs - cur_pairs);
+ vfres->num_queue_pairs = cur_pairs;
} else {
/* successful request */
vf->num_req_queues = req_pairs;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index 49575a640a84..03c42fd0fea1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -19,6 +19,7 @@
#define I40E_MAX_VF_PROMISC_FLAGS 3
#define I40E_VF_STATE_WAIT_COUNT 20
+#define I40E_VFR_WAIT_COUNT 100
/* Various queue ctrls */
enum i40e_queue_ctrl {
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index 95116ef2f0ae..8125b9120615 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -492,10 +492,10 @@ iavf_request_traffic_irqs(struct iavf_adapter *adapter, char *basename)
irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
/* Spread the IRQ affinity hints across online CPUs. Note that
* get_cpu_mask returns a mask with a permanent lifetime so
- * it's safe to use as a hint for irq_set_affinity_hint.
+ * it's safe to use as a hint for irq_update_affinity_hint.
*/
cpu = cpumask_local_spread(q_vector->v_idx, -1);
- irq_set_affinity_hint(irq_num, get_cpu_mask(cpu));
+ irq_update_affinity_hint(irq_num, get_cpu_mask(cpu));
}
return 0;
@@ -505,7 +505,7 @@ free_queue_irqs:
vector--;
irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
irq_set_affinity_notifier(irq_num, NULL);
- irq_set_affinity_hint(irq_num, NULL);
+ irq_update_affinity_hint(irq_num, NULL);
free_irq(irq_num, &adapter->q_vectors[vector]);
}
return err;
@@ -557,7 +557,7 @@ static void iavf_free_traffic_irqs(struct iavf_adapter *adapter)
for (vector = 0; vector < q_vectors; vector++) {
irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
irq_set_affinity_notifier(irq_num, NULL);
- irq_set_affinity_hint(irq_num, NULL);
+ irq_update_affinity_hint(irq_num, NULL);
free_irq(irq_num, &adapter->q_vectors[vector]);
}
}
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 4e16d185077d..a9fa701aaa95 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -483,6 +483,7 @@ enum ice_pf_flags {
ICE_FLAG_VF_TRUE_PROMISC_ENA,
ICE_FLAG_MDD_AUTO_RESET_VF,
ICE_FLAG_LINK_LENIENT_MODE_ENA,
+ ICE_FLAG_PLUG_AUX_DEV,
ICE_PF_FLAGS_NBITS /* must be last */
};
@@ -887,7 +888,7 @@ static inline void ice_set_rdma_cap(struct ice_pf *pf)
if (pf->hw.func_caps.common_cap.rdma && pf->num_rdma_msix) {
set_bit(ICE_FLAG_RDMA_ENA, pf->flags);
set_bit(ICE_FLAG_AUX_ENA, pf->flags);
- ice_plug_aux_dev(pf);
+ set_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags);
}
}
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 408d15a5b0e3..a6d7d3eff186 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -3342,7 +3342,8 @@ ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
!ice_fw_supports_report_dflt_cfg(hw)) {
struct ice_link_default_override_tlv tlv;
- if (ice_get_link_default_override(&tlv, pi))
+ status = ice_get_link_default_override(&tlv, pi);
+ if (status)
goto out;
if (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) &&
diff --git a/drivers/net/ethernet/intel/ice/ice_lag.c b/drivers/net/ethernet/intel/ice/ice_lag.c
index e375ac849aec..4f954db01b92 100644
--- a/drivers/net/ethernet/intel/ice/ice_lag.c
+++ b/drivers/net/ethernet/intel/ice/ice_lag.c
@@ -204,11 +204,7 @@ ice_lag_unlink(struct ice_lag *lag,
lag->upper_netdev = NULL;
}
- if (lag->peer_netdev) {
- dev_put(lag->peer_netdev);
- lag->peer_netdev = NULL;
- }
-
+ lag->peer_netdev = NULL;
ice_set_sriov_cap(pf);
ice_set_rdma_cap(pf);
lag->bonded = false;
@@ -216,6 +212,32 @@ ice_lag_unlink(struct ice_lag *lag,
}
/**
+ * ice_lag_unregister - handle netdev unregister events
+ * @lag: LAG info struct
+ * @netdev: netdev reporting the event
+ */
+static void ice_lag_unregister(struct ice_lag *lag, struct net_device *netdev)
+{
+ struct ice_pf *pf = lag->pf;
+
+ /* check to see if this event is for this netdev
+ * check that we are in an aggregate
+ */
+ if (netdev != lag->netdev || !lag->bonded)
+ return;
+
+ if (lag->upper_netdev) {
+ dev_put(lag->upper_netdev);
+ lag->upper_netdev = NULL;
+ ice_set_sriov_cap(pf);
+ ice_set_rdma_cap(pf);
+ }
+ /* perform some cleanup in case we come back */
+ lag->bonded = false;
+ lag->role = ICE_LAG_NONE;
+}
+
+/**
* ice_lag_changeupper_event - handle LAG changeupper event
* @lag: LAG info struct
* @ptr: opaque pointer data
@@ -307,7 +329,7 @@ ice_lag_event_handler(struct notifier_block *notif_blk, unsigned long event,
ice_lag_info_event(lag, ptr);
break;
case NETDEV_UNREGISTER:
- ice_lag_unlink(lag, ptr);
+ ice_lag_unregister(lag, netdev);
break;
default:
break;
diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
index d981dc6f2323..85a612838a89 100644
--- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
+++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
@@ -568,6 +568,7 @@ struct ice_tx_ctx_desc {
(0x3FFFFULL << ICE_TXD_CTX_QW1_TSO_LEN_S)
#define ICE_TXD_CTX_QW1_MSS_S 50
+#define ICE_TXD_CTX_MIN_MSS 64
#define ICE_TXD_CTX_QW1_VSI_S 50
#define ICE_TXD_CTX_QW1_VSI_M (0x3FFULL << ICE_TXD_CTX_QW1_VSI_S)
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 0c187cf04fcf..53256aca27c7 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -1684,6 +1684,12 @@ static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi)
if (status)
dev_dbg(dev, "ice_add_rss_cfg failed for sctp6 flow, vsi = %d, error = %d\n",
vsi_num, status);
+
+ status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_ESP_SPI,
+ ICE_FLOW_SEG_HDR_ESP);
+ if (status)
+ dev_dbg(dev, "ice_add_rss_cfg failed for esp/spi flow, vsi = %d, error = %d\n",
+ vsi_num, status);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 30814435f779..17a9bb461dc3 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -2253,6 +2253,9 @@ static void ice_service_task(struct work_struct *work)
return;
}
+ if (test_and_clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags))
+ ice_plug_aux_dev(pf);
+
ice_clean_adminq_subtask(pf);
ice_check_media_subtask(pf);
ice_check_for_hang_subtask(pf);
@@ -8525,6 +8528,7 @@ ice_features_check(struct sk_buff *skb,
struct net_device __always_unused *netdev,
netdev_features_t features)
{
+ bool gso = skb_is_gso(skb);
size_t len;
/* No point in doing any of this if neither checksum nor GSO are
@@ -8537,24 +8541,32 @@ ice_features_check(struct sk_buff *skb,
/* We cannot support GSO if the MSS is going to be less than
* 64 bytes. If it is then we need to drop support for GSO.
*/
- if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
+ if (gso && (skb_shinfo(skb)->gso_size < ICE_TXD_CTX_MIN_MSS))
features &= ~NETIF_F_GSO_MASK;
- len = skb_network_header(skb) - skb->data;
+ len = skb_network_offset(skb);
if (len > ICE_TXD_MACLEN_MAX || len & 0x1)
goto out_rm_features;
- len = skb_transport_header(skb) - skb_network_header(skb);
+ len = skb_network_header_len(skb);
if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
goto out_rm_features;
if (skb->encapsulation) {
- len = skb_inner_network_header(skb) - skb_transport_header(skb);
- if (len > ICE_TXD_L4LEN_MAX || len & 0x1)
- goto out_rm_features;
+ /* this must work for VXLAN frames AND IPIP/SIT frames, and in
+ * the case of IPIP frames, the transport header pointer is
+ * after the inner header! So check to make sure that this
+ * is a GRE or UDP_TUNNEL frame before doing that math.
+ */
+ if (gso && (skb_shinfo(skb)->gso_type &
+ (SKB_GSO_GRE | SKB_GSO_UDP_TUNNEL))) {
+ len = skb_inner_network_header(skb) -
+ skb_transport_header(skb);
+ if (len > ICE_TXD_L4LEN_MAX || len & 0x1)
+ goto out_rm_features;
+ }
- len = skb_inner_transport_header(skb) -
- skb_inner_network_header(skb);
+ len = skb_inner_network_header_len(skb);
if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
goto out_rm_features;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index c6ff656b2476..89b467006291 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -3247,8 +3247,8 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
/* If Flow Director is enabled, set interrupt affinity */
if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
/* assign the mask for this irq */
- irq_set_affinity_hint(entry->vector,
- &q_vector->affinity_mask);
+ irq_update_affinity_hint(entry->vector,
+ &q_vector->affinity_mask);
}
}
@@ -3264,8 +3264,8 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
free_queue_irqs:
while (vector) {
vector--;
- irq_set_affinity_hint(adapter->msix_entries[vector].vector,
- NULL);
+ irq_update_affinity_hint(adapter->msix_entries[vector].vector,
+ NULL);
free_irq(adapter->msix_entries[vector].vector,
adapter->q_vector[vector]);
}
@@ -3398,7 +3398,7 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
continue;
/* clear the affinity_mask in the IRQ descriptor */
- irq_set_affinity_hint(entry->vector, NULL);
+ irq_update_affinity_hint(entry->vector, NULL);
free_irq(entry->vector, q_vector);
}
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 0015fcf1df2b..0f293acd17e8 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -1984,14 +1984,15 @@ static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter,
if (adapter->flags & IXGBEVF_FLAGS_LEGACY_RX)
return;
- set_ring_build_skb_enabled(rx_ring);
+ if (PAGE_SIZE < 8192)
+ if (max_frame > IXGBEVF_MAX_FRAME_BUILD_SKB)
+ set_ring_uses_large_buffer(rx_ring);
- if (PAGE_SIZE < 8192) {
- if (max_frame <= IXGBEVF_MAX_FRAME_BUILD_SKB)
- return;
+ /* 82599 can't rely on RXDCTL.RLPML to restrict the size of the frame */
+ if (adapter->hw.mac.type == ixgbe_mac_82599_vf && !ring_uses_large_buffer(rx_ring))
+ return;
- set_ring_uses_large_buffer(rx_ring);
- }
+ set_ring_build_skb_enabled(rx_ring);
}
/**
diff --git a/drivers/net/ethernet/litex/Kconfig b/drivers/net/ethernet/litex/Kconfig
index f99adbf26ab4..04345b929d8e 100644
--- a/drivers/net/ethernet/litex/Kconfig
+++ b/drivers/net/ethernet/litex/Kconfig
@@ -17,7 +17,7 @@ if NET_VENDOR_LITEX
config LITEX_LITEETH
tristate "LiteX Ethernet support"
- depends on OF
+ depends on OF && HAS_IOMEM
help
If you wish to compile a kernel for hardware with a LiteX LiteEth
device then you should answer Y to this.
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
index 186d00a9ab35..3631d612aaca 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -1570,6 +1570,8 @@ static struct mac_ops cgx_mac_ops = {
.mac_enadis_pause_frm = cgx_lmac_enadis_pause_frm,
.mac_pause_frm_config = cgx_lmac_pause_frm_config,
.mac_enadis_ptp_config = cgx_lmac_ptp_config,
+ .mac_rx_tx_enable = cgx_lmac_rx_tx_enable,
+ .mac_tx_enable = cgx_lmac_tx_enable,
};
static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
index fc6e7423cbd8..b33e7d1d0851 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
@@ -107,6 +107,9 @@ struct mac_ops {
void (*mac_enadis_ptp_config)(void *cgxd,
int lmac_id,
bool enable);
+
+ int (*mac_rx_tx_enable)(void *cgxd, int lmac_id, bool enable);
+ int (*mac_tx_enable)(void *cgxd, int lmac_id, bool enable);
};
struct cgx {
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index 4e79e918a161..58e2aeebc14f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -732,6 +732,7 @@ enum nix_af_status {
NIX_AF_ERR_BANDPROF_INVAL_REQ = -428,
NIX_AF_ERR_CQ_CTX_WRITE_ERR = -429,
NIX_AF_ERR_AQ_CTX_RETRY_WRITE = -430,
+ NIX_AF_ERR_LINK_CREDITS = -431,
};
/* For NIX RX vtag action */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
index 0fe7ad35e36f..4180376fa676 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
@@ -185,7 +185,6 @@ enum npc_kpu_parser_state {
NPC_S_KPU2_QINQ,
NPC_S_KPU2_ETAG,
NPC_S_KPU2_EXDSA,
- NPC_S_KPU2_NGIO,
NPC_S_KPU2_CPT_CTAG,
NPC_S_KPU2_CPT_QINQ,
NPC_S_KPU3_CTAG,
@@ -212,6 +211,7 @@ enum npc_kpu_parser_state {
NPC_S_KPU5_NSH,
NPC_S_KPU5_CPT_IP,
NPC_S_KPU5_CPT_IP6,
+ NPC_S_KPU5_NGIO,
NPC_S_KPU6_IP6_EXT,
NPC_S_KPU6_IP6_HOP_DEST,
NPC_S_KPU6_IP6_ROUT,
@@ -1124,15 +1124,6 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
NPC_S_KPU1_ETHER, 0xff,
NPC_ETYPE_CTAG,
0xffff,
- NPC_ETYPE_NGIO,
- 0xffff,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU1_ETHER, 0xff,
- NPC_ETYPE_CTAG,
- 0xffff,
NPC_ETYPE_CTAG,
0xffff,
0x0000,
@@ -1968,6 +1959,15 @@ static struct npc_kpu_profile_cam kpu2_cam_entries[] = {
},
{
NPC_S_KPU2_CTAG, 0xff,
+ NPC_ETYPE_NGIO,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CTAG, 0xff,
NPC_ETYPE_PPPOE,
0xffff,
0x0000,
@@ -2750,15 +2750,6 @@ static struct npc_kpu_profile_cam kpu2_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU2_NGIO, 0xff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
NPC_S_KPU2_CPT_CTAG, 0xff,
NPC_ETYPE_IP,
0xffff,
@@ -5090,6 +5081,15 @@ static struct npc_kpu_profile_cam kpu5_cam_entries[] = {
0x0000,
},
{
+ NPC_S_KPU5_NGIO, 0xff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
NPC_S_NA, 0X00,
0x0000,
0x0000,
@@ -8425,14 +8425,6 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 12, 0, 0, 0,
- NPC_S_KPU2_NGIO, 12, 1,
- NPC_LID_LA, NPC_LT_LA_ETHER,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 8, 12, 0, 0, 0,
NPC_S_KPU2_CTAG2, 12, 1,
NPC_LID_LA, NPC_LT_LA_ETHER,
NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
@@ -9196,6 +9188,14 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 2, 0,
+ NPC_S_KPU5_NGIO, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 0, 6, 2, 0,
NPC_S_KPU5_IP, 14, 1,
NPC_LID_LB, NPC_LT_LB_PPPOE,
@@ -9892,14 +9892,6 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 0, 1,
- NPC_S_NA, 0, 1,
- NPC_LID_LC, NPC_LT_LC_NGIO,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 0, 6, 2, 0,
NPC_S_KPU5_CPT_IP, 6, 1,
NPC_LID_LB, NPC_LT_LB_CTAG,
@@ -11974,6 +11966,14 @@ static struct npc_kpu_profile_action kpu5_action_entries[] = {
0, 0, 0, 0,
},
{
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 1,
+ NPC_LID_LC, NPC_LT_LC_NGIO,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
NPC_ERRLEV_LC, NPC_EC_UNK,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
index e695fa0e82a9..9ea2f6ac38ec 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
@@ -30,6 +30,8 @@ static struct mac_ops rpm_mac_ops = {
.mac_enadis_pause_frm = rpm_lmac_enadis_pause_frm,
.mac_pause_frm_config = rpm_lmac_pause_frm_config,
.mac_enadis_ptp_config = rpm_lmac_ptp_config,
+ .mac_rx_tx_enable = rpm_lmac_rx_tx_enable,
+ .mac_tx_enable = rpm_lmac_tx_enable,
};
struct mac_ops *rpm_get_mac_ops(void)
@@ -54,6 +56,43 @@ int rpm_get_nr_lmacs(void *rpmd)
return hweight8(rpm_read(rpm, 0, CGXX_CMRX_RX_LMACS) & 0xFULL);
}
+int rpm_lmac_tx_enable(void *rpmd, int lmac_id, bool enable)
+{
+ rpm_t *rpm = rpmd;
+ u64 cfg, last;
+
+ if (!is_lmac_valid(rpm, lmac_id))
+ return -ENODEV;
+
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ last = cfg;
+ if (enable)
+ cfg |= RPM_TX_EN;
+ else
+ cfg &= ~(RPM_TX_EN);
+
+ if (cfg != last)
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+ return !!(last & RPM_TX_EN);
+}
+
+int rpm_lmac_rx_tx_enable(void *rpmd, int lmac_id, bool enable)
+{
+ rpm_t *rpm = rpmd;
+ u64 cfg;
+
+ if (!is_lmac_valid(rpm, lmac_id))
+ return -ENODEV;
+
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG);
+ if (enable)
+ cfg |= RPM_RX_EN | RPM_TX_EN;
+ else
+ cfg &= ~(RPM_RX_EN | RPM_TX_EN);
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_COMMAND_CONFIG, cfg);
+ return 0;
+}
+
void rpm_lmac_enadis_rx_pause_fwding(void *rpmd, int lmac_id, bool enable)
{
rpm_t *rpm = rpmd;
@@ -252,23 +291,20 @@ int rpm_lmac_internal_loopback(void *rpmd, int lmac_id, bool enable)
if (!rpm || lmac_id >= rpm->lmac_count)
return -ENODEV;
lmac_type = rpm->mac_ops->get_lmac_type(rpm, lmac_id);
- if (lmac_type == LMAC_MODE_100G_R) {
- cfg = rpm_read(rpm, lmac_id, RPMX_MTI_PCS100X_CONTROL1);
-
- if (enable)
- cfg |= RPMX_MTI_PCS_LBK;
- else
- cfg &= ~RPMX_MTI_PCS_LBK;
- rpm_write(rpm, lmac_id, RPMX_MTI_PCS100X_CONTROL1, cfg);
- } else {
- cfg = rpm_read(rpm, lmac_id, RPMX_MTI_LPCSX_CONTROL1);
- if (enable)
- cfg |= RPMX_MTI_PCS_LBK;
- else
- cfg &= ~RPMX_MTI_PCS_LBK;
- rpm_write(rpm, lmac_id, RPMX_MTI_LPCSX_CONTROL1, cfg);
+
+ if (lmac_type == LMAC_MODE_QSGMII || lmac_type == LMAC_MODE_SGMII) {
+ dev_err(&rpm->pdev->dev, "loopback not supported for LPC mode\n");
+ return 0;
}
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_PCS100X_CONTROL1);
+
+ if (enable)
+ cfg |= RPMX_MTI_PCS_LBK;
+ else
+ cfg &= ~RPMX_MTI_PCS_LBK;
+ rpm_write(rpm, lmac_id, RPMX_MTI_PCS100X_CONTROL1, cfg);
+
return 0;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.h b/drivers/net/ethernet/marvell/octeontx2/af/rpm.h
index 57c8a687b488..ff580311edd0 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rpm.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.h
@@ -43,6 +43,8 @@
#define RPMX_MTI_STAT_DATA_HI_CDC 0x10038
#define RPM_LMAC_FWI 0xa
+#define RPM_TX_EN BIT_ULL(0)
+#define RPM_RX_EN BIT_ULL(1)
/* Function Declarations */
int rpm_get_nr_lmacs(void *rpmd);
@@ -57,4 +59,6 @@ int rpm_lmac_enadis_pause_frm(void *rpmd, int lmac_id, u8 tx_pause,
int rpm_get_tx_stats(void *rpmd, int lmac_id, int idx, u64 *tx_stat);
int rpm_get_rx_stats(void *rpmd, int lmac_id, int idx, u64 *rx_stat);
void rpm_lmac_ptp_config(void *rpmd, int lmac_id, bool enable);
+int rpm_lmac_rx_tx_enable(void *rpmd, int lmac_id, bool enable);
+int rpm_lmac_tx_enable(void *rpmd, int lmac_id, bool enable);
#endif /* RPM_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index 3ca6b942ebe2..54e1b27a7dfe 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -520,8 +520,11 @@ static void rvu_block_reset(struct rvu *rvu, int blkaddr, u64 rst_reg)
rvu_write64(rvu, blkaddr, rst_reg, BIT_ULL(0));
err = rvu_poll_reg(rvu, blkaddr, rst_reg, BIT_ULL(63), true);
- if (err)
- dev_err(rvu->dev, "HW block:%d reset failed\n", blkaddr);
+ if (err) {
+ dev_err(rvu->dev, "HW block:%d reset timeout retrying again\n", blkaddr);
+ while (rvu_poll_reg(rvu, blkaddr, rst_reg, BIT_ULL(63), true) == -EBUSY)
+ ;
+ }
}
static void rvu_reset_all_blocks(struct rvu *rvu)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index 66e45d733824..5ed94cfb47d2 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -806,6 +806,7 @@ bool is_mac_feature_supported(struct rvu *rvu, int pf, int feature);
u32 rvu_cgx_get_fifolen(struct rvu *rvu);
void *rvu_first_cgx_pdata(struct rvu *rvu);
int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id);
+int rvu_cgx_config_tx(void *cgxd, int lmac_id, bool enable);
int npc_get_nixlf_mcam_index(struct npc_mcam *mcam, u16 pcifunc, int nixlf,
int type);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
index 2ca182a4ce82..8a7ac5a8b821 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
@@ -441,16 +441,26 @@ void rvu_cgx_enadis_rx_bp(struct rvu *rvu, int pf, bool enable)
int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start)
{
int pf = rvu_get_pf(pcifunc);
+ struct mac_ops *mac_ops;
u8 cgx_id, lmac_id;
+ void *cgxd;
if (!is_cgx_config_permitted(rvu, pcifunc))
return LMAC_AF_ERR_PERM_DENIED;
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ cgxd = rvu_cgx_pdata(cgx_id, rvu);
+ mac_ops = get_mac_ops(cgxd);
+
+ return mac_ops->mac_rx_tx_enable(cgxd, lmac_id, start);
+}
- cgx_lmac_rx_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, start);
+int rvu_cgx_config_tx(void *cgxd, int lmac_id, bool enable)
+{
+ struct mac_ops *mac_ops;
- return 0;
+ mac_ops = get_mac_ops(cgxd);
+ return mac_ops->mac_tx_enable(cgxd, lmac_id, enable);
}
void rvu_cgx_disable_dmac_entries(struct rvu *rvu, u16 pcifunc)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
index a09a507369ac..d1eddb769a41 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
@@ -1224,6 +1224,8 @@ static void print_nix_cn10k_sq_ctx(struct seq_file *m,
seq_printf(m, "W3: head_offset\t\t\t%d\nW3: smenq_next_sqb_vld\t\t%d\n\n",
sq_ctx->head_offset, sq_ctx->smenq_next_sqb_vld);
+ seq_printf(m, "W3: smq_next_sq_vld\t\t%d\nW3: smq_pend\t\t\t%d\n",
+ sq_ctx->smq_next_sq_vld, sq_ctx->smq_pend);
seq_printf(m, "W4: next_sqb \t\t\t%llx\n\n", sq_ctx->next_sqb);
seq_printf(m, "W5: tail_sqb \t\t\t%llx\n\n", sq_ctx->tail_sqb);
seq_printf(m, "W6: smenq_sqb \t\t\t%llx\n\n", sq_ctx->smenq_sqb);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index d8b1948aaa0a..97fb61915379 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -512,11 +512,11 @@ static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req,
cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST);
lmac_chan_cnt = cfg & 0xFF;
- cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
- sdp_chan_cnt = cfg & 0xFFF;
-
cgx_bpid_cnt = hw->cgx_links * lmac_chan_cnt;
lbk_bpid_cnt = hw->lbk_links * ((cfg >> 16) & 0xFF);
+
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1);
+ sdp_chan_cnt = cfg & 0xFFF;
sdp_bpid_cnt = hw->sdp_links * sdp_chan_cnt;
pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc);
@@ -2068,8 +2068,8 @@ static int nix_smq_flush(struct rvu *rvu, int blkaddr,
/* enable cgx tx if disabled */
if (is_pf_cgxmapped(rvu, pf)) {
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
- restore_tx_en = !cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu),
- lmac_id, true);
+ restore_tx_en = !rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu),
+ lmac_id, true);
}
cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(smq));
@@ -2092,7 +2092,7 @@ static int nix_smq_flush(struct rvu *rvu, int blkaddr,
rvu_cgx_enadis_rx_bp(rvu, pf, true);
/* restore cgx tx state */
if (restore_tx_en)
- cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false);
+ rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false);
return err;
}
@@ -3878,7 +3878,7 @@ nix_config_link_credits(struct rvu *rvu, int blkaddr, int link,
/* Enable cgx tx if disabled for credits to be back */
if (is_pf_cgxmapped(rvu, pf)) {
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
- restore_tx_en = !cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu),
+ restore_tx_en = !rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu),
lmac_id, true);
}
@@ -3891,8 +3891,8 @@ nix_config_link_credits(struct rvu *rvu, int blkaddr, int link,
NIX_AF_TL1X_SW_XOFF(schq), BIT_ULL(0));
}
- rc = -EBUSY;
- poll_tmo = jiffies + usecs_to_jiffies(10000);
+ rc = NIX_AF_ERR_LINK_CREDITS;
+ poll_tmo = jiffies + usecs_to_jiffies(200000);
/* Wait for credits to return */
do {
if (time_after(jiffies, poll_tmo))
@@ -3918,7 +3918,7 @@ exit:
/* Restore state of cgx tx */
if (restore_tx_en)
- cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false);
+ rvu_cgx_config_tx(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false);
mutex_unlock(&rvu->rsrc_lock);
return rc;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
index c0005a1feee6..91f86d77cd41 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
@@ -402,6 +402,7 @@ static void npc_fixup_vf_rule(struct rvu *rvu, struct npc_mcam *mcam,
int blkaddr, int index, struct mcam_entry *entry,
bool *enable)
{
+ struct rvu_npc_mcam_rule *rule;
u16 owner, target_func;
struct rvu_pfvf *pfvf;
u64 rx_action;
@@ -423,6 +424,12 @@ static void npc_fixup_vf_rule(struct rvu *rvu, struct npc_mcam *mcam,
test_bit(NIXLF_INITIALIZED, &pfvf->flags)))
*enable = false;
+ /* fix up not needed for the rules added by user(ntuple filters) */
+ list_for_each_entry(rule, &mcam->mcam_rules, list) {
+ if (rule->entry == index)
+ return;
+ }
+
/* copy VF default entry action to the VF mcam entry */
rx_action = npc_get_default_entry_action(rvu, mcam, blkaddr,
target_func);
@@ -489,8 +496,8 @@ static void npc_config_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
}
/* PF installing VF rule */
- if (intf == NIX_INTF_RX && actindex < mcam->bmap_entries)
- npc_fixup_vf_rule(rvu, mcam, blkaddr, index, entry, &enable);
+ if (is_npc_intf_rx(intf) && actindex < mcam->bmap_entries)
+ npc_fixup_vf_rule(rvu, mcam, blkaddr, actindex, entry, &enable);
/* Set 'action' */
rvu_write64(rvu, blkaddr,
@@ -916,7 +923,8 @@ static void npc_update_vf_flow_entry(struct rvu *rvu, struct npc_mcam *mcam,
int blkaddr, u16 pcifunc, u64 rx_action)
{
int actindex, index, bank, entry;
- bool enable;
+ struct rvu_npc_mcam_rule *rule;
+ bool enable, update;
if (!(pcifunc & RVU_PFVF_FUNC_MASK))
return;
@@ -924,6 +932,14 @@ static void npc_update_vf_flow_entry(struct rvu *rvu, struct npc_mcam *mcam,
mutex_lock(&mcam->lock);
for (index = 0; index < mcam->bmap_entries; index++) {
if (mcam->entry2target_pffunc[index] == pcifunc) {
+ update = true;
+ /* update not needed for the rules added via ntuple filters */
+ list_for_each_entry(rule, &mcam->mcam_rules, list) {
+ if (rule->entry == index)
+ update = false;
+ }
+ if (!update)
+ continue;
bank = npc_get_bank(mcam, index);
actindex = index;
entry = index & (mcam->banksize - 1);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
index ff2b21999f36..19c53e591d0d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
@@ -1098,14 +1098,6 @@ find_rule:
write_req.cntr = rule->cntr;
}
- err = rvu_mbox_handler_npc_mcam_write_entry(rvu, &write_req,
- &write_rsp);
- if (err) {
- rvu_mcam_remove_counter_from_rule(rvu, owner, rule);
- if (new)
- kfree(rule);
- return err;
- }
/* update rule */
memcpy(&rule->packet, &dummy.packet, sizeof(rule->packet));
memcpy(&rule->mask, &dummy.mask, sizeof(rule->mask));
@@ -1132,6 +1124,18 @@ find_rule:
if (req->default_rule)
pfvf->def_ucast_rule = rule;
+ /* write to mcam entry registers */
+ err = rvu_mbox_handler_npc_mcam_write_entry(rvu, &write_req,
+ &write_rsp);
+ if (err) {
+ rvu_mcam_remove_counter_from_rule(rvu, owner, rule);
+ if (new) {
+ list_del(&rule->list);
+ kfree(rule);
+ }
+ return err;
+ }
+
/* VF's MAC address is being changed via PF */
if (pf_set_vfs_mac) {
ether_addr_copy(pfvf->default_mac, req->packet.dmac);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index 61e52812983f..14509fc64cce 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -603,6 +603,7 @@ static inline void __cn10k_aura_freeptr(struct otx2_nic *pfvf, u64 aura,
size++;
tar_addr |= ((size - 1) & 0x7) << 4;
}
+ dma_wmb();
memcpy((u64 *)lmt_info->lmt_addr, ptrs, sizeof(u64) * num_ptrs);
/* Perform LMTST flush */
cn10k_lmt_flush(val, tar_addr);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index 6080ebd9bd94..d39341e4ab37 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -394,7 +394,12 @@ static int otx2_forward_vf_mbox_msgs(struct otx2_nic *pf,
dst_mdev->msg_size = mbox_hdr->msg_size;
dst_mdev->num_msgs = num_msgs;
err = otx2_sync_mbox_msg(dst_mbox);
- if (err) {
+ /* Error code -EIO indicate there is a communication failure
+ * to the AF. Rest of the error codes indicate that AF processed
+ * VF messages and set the error codes in response messages
+ * (if any) so simply forward responses to VF.
+ */
+ if (err == -EIO) {
dev_warn(pf->dev,
"AF not responding to VF%d messages\n", vf);
/* restore PF mbase and exit */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera.h b/drivers/net/ethernet/marvell/prestera/prestera.h
index a0a5a8e6bd8c..2fd9ef2fe5d6 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera.h
+++ b/drivers/net/ethernet/marvell/prestera/prestera.h
@@ -283,7 +283,6 @@ struct prestera_router {
struct list_head rif_entry_list;
struct notifier_block inetaddr_nb;
struct notifier_block inetaddr_valid_nb;
- bool aborted;
};
struct prestera_rxtx_params {
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_hw.c b/drivers/net/ethernet/marvell/prestera/prestera_hw.c
index 51fc841b1e7a..e6bfadc874c5 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_hw.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_hw.c
@@ -1831,8 +1831,8 @@ static int prestera_iface_to_msg(struct prestera_iface *iface,
int prestera_hw_rif_create(struct prestera_switch *sw,
struct prestera_iface *iif, u8 *mac, u16 *rif_id)
{
- struct prestera_msg_rif_req req;
struct prestera_msg_rif_resp resp;
+ struct prestera_msg_rif_req req;
int err;
memcpy(req.mac, mac, ETH_ALEN);
@@ -1868,9 +1868,9 @@ int prestera_hw_rif_delete(struct prestera_switch *sw, u16 rif_id,
int prestera_hw_vr_create(struct prestera_switch *sw, u16 *vr_id)
{
- int err;
struct prestera_msg_vr_resp resp;
struct prestera_msg_vr_req req;
+ int err;
err = prestera_cmd_ret(sw, PRESTERA_CMD_TYPE_ROUTER_VR_CREATE,
&req.cmd, sizeof(req), &resp.ret, sizeof(resp));
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_main.c b/drivers/net/ethernet/marvell/prestera/prestera_main.c
index 08fdd1e50388..cad93f747d0c 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_main.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_main.c
@@ -982,6 +982,7 @@ static void prestera_switch_fini(struct prestera_switch *sw)
prestera_event_handlers_unregister(sw);
prestera_rxtx_switch_fini(sw);
prestera_switchdev_fini(sw);
+ prestera_router_fini(sw);
prestera_netdev_event_handler_unregister(sw);
prestera_hw_switch_fini(sw);
}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_router.c b/drivers/net/ethernet/marvell/prestera/prestera_router.c
index 8a3b7b664358..6ef4d32b8fdd 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_router.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_router.c
@@ -25,10 +25,10 @@ static int __prestera_inetaddr_port_event(struct net_device *port_dev,
struct netlink_ext_ack *extack)
{
struct prestera_port *port = netdev_priv(port_dev);
- int err;
- struct prestera_rif_entry *re;
struct prestera_rif_entry_key re_key = {};
+ struct prestera_rif_entry *re;
u32 kern_tb_id;
+ int err;
err = prestera_is_valid_mac_addr(port, port_dev->dev_addr);
if (err) {
@@ -45,21 +45,21 @@ static int __prestera_inetaddr_port_event(struct net_device *port_dev,
switch (event) {
case NETDEV_UP:
if (re) {
- NL_SET_ERR_MSG_MOD(extack, "rif_entry already exist");
+ NL_SET_ERR_MSG_MOD(extack, "RIF already exist");
return -EEXIST;
}
re = prestera_rif_entry_create(port->sw, &re_key,
prestera_fix_tb_id(kern_tb_id),
port_dev->dev_addr);
if (!re) {
- NL_SET_ERR_MSG_MOD(extack, "Can't create rif_entry");
+ NL_SET_ERR_MSG_MOD(extack, "Can't create RIF");
return -EINVAL;
}
dev_hold(port_dev);
break;
case NETDEV_DOWN:
if (!re) {
- NL_SET_ERR_MSG_MOD(extack, "rif_entry not exist");
+ NL_SET_ERR_MSG_MOD(extack, "Can't find RIF");
return -EEXIST;
}
prestera_rif_entry_destroy(port->sw, re);
@@ -75,11 +75,11 @@ static int __prestera_inetaddr_event(struct prestera_switch *sw,
unsigned long event,
struct netlink_ext_ack *extack)
{
- if (prestera_netdev_check(dev) && !netif_is_bridge_port(dev) &&
- !netif_is_lag_port(dev) && !netif_is_ovs_port(dev))
- return __prestera_inetaddr_port_event(dev, event, extack);
+ if (!prestera_netdev_check(dev) || netif_is_bridge_port(dev) ||
+ netif_is_lag_port(dev) || netif_is_ovs_port(dev))
+ return 0;
- return 0;
+ return __prestera_inetaddr_port_event(dev, event, extack);
}
static int __prestera_inetaddr_cb(struct notifier_block *nb,
@@ -126,6 +126,8 @@ static int __prestera_inetaddr_valid_cb(struct notifier_block *nb,
goto out;
if (ipv4_is_multicast(ivi->ivi_addr)) {
+ NL_SET_ERR_MSG_MOD(ivi->extack,
+ "Multicast addr on RIF is not supported");
err = -EINVAL;
goto out;
}
@@ -166,7 +168,7 @@ int prestera_router_init(struct prestera_switch *sw)
err_register_inetaddr_notifier:
unregister_inetaddr_validator_notifier(&router->inetaddr_valid_nb);
err_register_inetaddr_validator_notifier:
- /* prestera_router_hw_fini */
+ prestera_router_hw_fini(sw);
err_router_lib_init:
kfree(sw->router);
return err;
@@ -176,7 +178,7 @@ void prestera_router_fini(struct prestera_switch *sw)
{
unregister_inetaddr_notifier(&sw->router->inetaddr_nb);
unregister_inetaddr_validator_notifier(&sw->router->inetaddr_valid_nb);
- /* router_hw_fini */
+ prestera_router_hw_fini(sw);
kfree(sw->router);
sw->router = NULL;
}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_router_hw.c b/drivers/net/ethernet/marvell/prestera/prestera_router_hw.c
index 5866a4be50f5..e5592b69ad37 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_router_hw.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_router_hw.c
@@ -29,6 +29,12 @@ int prestera_router_hw_init(struct prestera_switch *sw)
return 0;
}
+void prestera_router_hw_fini(struct prestera_switch *sw)
+{
+ WARN_ON(!list_empty(&sw->router->vr_list));
+ WARN_ON(!list_empty(&sw->router->rif_entry_list));
+}
+
static struct prestera_vr *__prestera_vr_find(struct prestera_switch *sw,
u32 tb_id)
{
@@ -47,13 +53,8 @@ static struct prestera_vr *__prestera_vr_create(struct prestera_switch *sw,
struct netlink_ext_ack *extack)
{
struct prestera_vr *vr;
- u16 hw_vr_id;
int err;
- err = prestera_hw_vr_create(sw, &hw_vr_id);
- if (err)
- return ERR_PTR(-ENOMEM);
-
vr = kzalloc(sizeof(*vr), GFP_KERNEL);
if (!vr) {
err = -ENOMEM;
@@ -61,23 +62,26 @@ static struct prestera_vr *__prestera_vr_create(struct prestera_switch *sw,
}
vr->tb_id = tb_id;
- vr->hw_vr_id = hw_vr_id;
+
+ err = prestera_hw_vr_create(sw, &vr->hw_vr_id);
+ if (err)
+ goto err_hw_create;
list_add(&vr->router_node, &sw->router->vr_list);
return vr;
-err_alloc_vr:
- prestera_hw_vr_delete(sw, hw_vr_id);
+err_hw_create:
kfree(vr);
+err_alloc_vr:
return ERR_PTR(err);
}
static void __prestera_vr_destroy(struct prestera_switch *sw,
struct prestera_vr *vr)
{
- prestera_hw_vr_delete(sw, vr->hw_vr_id);
list_del(&vr->router_node);
+ prestera_hw_vr_delete(sw, vr->hw_vr_id);
kfree(vr);
}
@@ -87,17 +91,22 @@ static struct prestera_vr *prestera_vr_get(struct prestera_switch *sw, u32 tb_id
struct prestera_vr *vr;
vr = __prestera_vr_find(sw, tb_id);
- if (!vr)
+ if (vr) {
+ refcount_inc(&vr->refcount);
+ } else {
vr = __prestera_vr_create(sw, tb_id, extack);
- if (IS_ERR(vr))
- return ERR_CAST(vr);
+ if (IS_ERR(vr))
+ return ERR_CAST(vr);
+
+ refcount_set(&vr->refcount, 1);
+ }
return vr;
}
static void prestera_vr_put(struct prestera_switch *sw, struct prestera_vr *vr)
{
- if (!vr->ref_cnt)
+ if (refcount_dec_and_test(&vr->refcount))
__prestera_vr_destroy(sw, vr);
}
@@ -120,7 +129,7 @@ __prestera_rif_entry_key_copy(const struct prestera_rif_entry_key *in,
out->iface.vlan_id = in->iface.vlan_id;
break;
default:
- pr_err("Unsupported iface type");
+ WARN(1, "Unsupported iface type");
return -EINVAL;
}
@@ -158,7 +167,6 @@ void prestera_rif_entry_destroy(struct prestera_switch *sw,
iface.vr_id = e->vr->hw_vr_id;
prestera_hw_rif_delete(sw, e->hw_id, &iface);
- e->vr->ref_cnt--;
prestera_vr_put(sw, e->vr);
kfree(e);
}
@@ -183,7 +191,6 @@ prestera_rif_entry_create(struct prestera_switch *sw,
if (IS_ERR(e->vr))
goto err_vr_get;
- e->vr->ref_cnt++;
memcpy(&e->addr, addr, sizeof(e->addr));
/* HW */
@@ -198,7 +205,6 @@ prestera_rif_entry_create(struct prestera_switch *sw,
return e;
err_hw_create:
- e->vr->ref_cnt--;
prestera_vr_put(sw, e->vr);
err_vr_get:
err_key_copy:
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_router_hw.h b/drivers/net/ethernet/marvell/prestera/prestera_router_hw.h
index fed53595f7bb..b6b028551868 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_router_hw.h
+++ b/drivers/net/ethernet/marvell/prestera/prestera_router_hw.h
@@ -6,7 +6,7 @@
struct prestera_vr {
struct list_head router_node;
- unsigned int ref_cnt;
+ refcount_t refcount;
u32 tb_id; /* key (kernel fib table id) */
u16 hw_vr_id; /* virtual router ID */
u8 __pad[2];
@@ -32,5 +32,6 @@ prestera_rif_entry_create(struct prestera_switch *sw,
struct prestera_rif_entry_key *k,
u32 tb_id, const unsigned char *addr);
int prestera_router_hw_init(struct prestera_switch *sw);
+void prestera_router_hw_fini(struct prestera_switch *sw);
#endif /* _PRESTERA_ROUTER_HW_H_ */
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index b67b4323cff0..f02d07ec5ccb 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -267,7 +267,7 @@ static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
phylink_config);
struct mtk_eth *eth = mac->hw;
u32 mcr_cur, mcr_new, sid, i;
- int val, ge_mode, err;
+ int val, ge_mode, err = 0;
/* MT76x8 has no hardware settings between for the MAC */
if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index 9e48509ed3b2..414e390e6b48 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -244,9 +244,9 @@ static void mlx4_set_eq_affinity_hint(struct mlx4_priv *priv, int vec)
cpumask_empty(eq->affinity_mask))
return;
- hint_err = irq_set_affinity_hint(eq->irq, eq->affinity_mask);
+ hint_err = irq_update_affinity_hint(eq->irq, eq->affinity_mask);
if (hint_err)
- mlx4_warn(dev, "irq_set_affinity_hint failed, err %d\n", hint_err);
+ mlx4_warn(dev, "irq_update_affinity_hint failed, err %d\n", hint_err);
}
#endif
@@ -1123,9 +1123,7 @@ static void mlx4_free_irqs(struct mlx4_dev *dev)
for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
if (eq_table->eq[i].have_irq) {
free_cpumask_var(eq_table->eq[i].affinity_mask);
-#if defined(CONFIG_SMP)
- irq_set_affinity_hint(eq_table->eq[i].irq, NULL);
-#endif
+ irq_update_affinity_hint(eq_table->eq[i].irq, NULL);
free_irq(eq_table->eq[i].irq, eq_table->eq + i);
eq_table->eq[i].have_irq = 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 812e6810cb3b..c14e06ca64d8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -224,7 +224,7 @@ static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
struct mlx5e_tx_wqe {
struct mlx5_wqe_ctrl_seg ctrl;
struct mlx5_wqe_eth_seg eth;
- struct mlx5_wqe_data_seg data[0];
+ struct mlx5_wqe_data_seg data[];
};
struct mlx5e_rx_wqe_ll {
@@ -241,8 +241,8 @@ struct mlx5e_umr_wqe {
struct mlx5_wqe_umr_ctrl_seg uctrl;
struct mlx5_mkey_seg mkc;
union {
- struct mlx5_mtt inline_mtts[0];
- struct mlx5_klm inline_klms[0];
+ DECLARE_FLEX_ARRAY(struct mlx5_mtt, inline_mtts);
+ DECLARE_FLEX_ARRAY(struct mlx5_klm, inline_klms);
};
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
index 00449df98a5e..c1e07496c89c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
@@ -570,7 +570,8 @@ static int mlx5e_htb_convert_rate(struct mlx5e_priv *priv, u64 rate,
static void mlx5e_htb_convert_ceil(struct mlx5e_priv *priv, u64 ceil, u32 *max_average_bw)
{
- *max_average_bw = div_u64(ceil, BYTES_IN_MBIT);
+ /* Hardware treats 0 as "unlimited", set at least 1. */
+ *max_average_bw = max_t(u32, div_u64(ceil, BYTES_IN_MBIT), 1);
qos_dbg(priv->mdev, "Convert: ceil %llu -> max_average_bw %u\n",
ceil, *max_average_bw);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c
index 9c076aa20306..b6f5c1bcdbcd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c
@@ -183,18 +183,7 @@ void mlx5e_rep_bond_unslave(struct mlx5_eswitch *esw,
static bool mlx5e_rep_is_lag_netdev(struct net_device *netdev)
{
- struct mlx5e_rep_priv *rpriv;
- struct mlx5e_priv *priv;
-
- /* A given netdev is not a representor or not a slave of LAG configuration */
- if (!mlx5e_eswitch_rep(netdev) || !netif_is_lag_port(netdev))
- return false;
-
- priv = netdev_priv(netdev);
- rpriv = priv->ppriv;
-
- /* Egress acl forward to vport is supported only non-uplink representor */
- return rpriv->rep->vport != MLX5_VPORT_UPLINK;
+ return netif_is_lag_port(netdev) && mlx5e_eswitch_vf_rep(netdev);
}
static void mlx5e_rep_changelowerstate_event(struct net_device *netdev, void *ptr)
@@ -210,9 +199,6 @@ static void mlx5e_rep_changelowerstate_event(struct net_device *netdev, void *pt
u16 fwd_vport_num;
int err;
- if (!mlx5e_rep_is_lag_netdev(netdev))
- return;
-
info = ptr;
lag_info = info->lower_state_info;
/* This is not an event of a representor becoming active slave */
@@ -266,9 +252,6 @@ static void mlx5e_rep_changeupper_event(struct net_device *netdev, void *ptr)
struct net_device *lag_dev;
struct mlx5e_priv *priv;
- if (!mlx5e_rep_is_lag_netdev(netdev))
- return;
-
priv = netdev_priv(netdev);
rpriv = priv->ppriv;
lag_dev = info->upper_dev;
@@ -293,6 +276,19 @@ static int mlx5e_rep_esw_bond_netevent(struct notifier_block *nb,
unsigned long event, void *ptr)
{
struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
+ struct mlx5e_rep_priv *rpriv;
+ struct mlx5e_rep_bond *bond;
+ struct mlx5e_priv *priv;
+
+ if (!mlx5e_rep_is_lag_netdev(netdev))
+ return NOTIFY_DONE;
+
+ bond = container_of(nb, struct mlx5e_rep_bond, nb);
+ priv = netdev_priv(netdev);
+ rpriv = mlx5_eswitch_get_uplink_priv(priv->mdev->priv.eswitch, REP_ETH);
+ /* Verify VF representor is on the same device of the bond handling the netevent. */
+ if (rpriv->uplink_priv.bond != bond)
+ return NOTIFY_DONE;
switch (event) {
case NETDEV_CHANGELOWERSTATE:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
index c6d2f8c78db7..48dc121b2cb4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
@@ -491,7 +491,7 @@ void mlx5e_rep_bridge_init(struct mlx5e_priv *priv)
}
br_offloads->netdev_nb.notifier_call = mlx5_esw_bridge_switchdev_port_event;
- err = register_netdevice_notifier(&br_offloads->netdev_nb);
+ err = register_netdevice_notifier_net(&init_net, &br_offloads->netdev_nb);
if (err) {
esw_warn(mdev, "Failed to register bridge offloads netdevice notifier (err=%d)\n",
err);
@@ -509,7 +509,9 @@ err_register_swdev_blk:
err_register_swdev:
destroy_workqueue(br_offloads->wq);
err_alloc_wq:
+ rtnl_lock();
mlx5_esw_bridge_cleanup(esw);
+ rtnl_unlock();
}
void mlx5e_rep_bridge_cleanup(struct mlx5e_priv *priv)
@@ -524,7 +526,7 @@ void mlx5e_rep_bridge_cleanup(struct mlx5e_priv *priv)
return;
cancel_delayed_work_sync(&br_offloads->update_work);
- unregister_netdevice_notifier(&br_offloads->netdev_nb);
+ unregister_netdevice_notifier_net(&init_net, &br_offloads->netdev_nb);
unregister_switchdev_blocking_notifier(&br_offloads->nb_blk);
unregister_switchdev_notifier(&br_offloads->nb);
destroy_workqueue(br_offloads->wq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
index 33815246fead..378fc8e3bd97 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2018 Mellanox Technologies. */
+#include <net/inet_ecn.h>
#include <net/vxlan.h>
#include <net/gre.h>
#include <net/geneve.h>
@@ -235,7 +236,7 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
int err;
/* add the IP fields */
- attr.fl.fl4.flowi4_tos = tun_key->tos;
+ attr.fl.fl4.flowi4_tos = tun_key->tos & ~INET_ECN_MASK;
attr.fl.fl4.daddr = tun_key->u.ipv4.dst;
attr.fl.fl4.saddr = tun_key->u.ipv4.src;
attr.ttl = tun_key->ttl;
@@ -350,7 +351,7 @@ int mlx5e_tc_tun_update_header_ipv4(struct mlx5e_priv *priv,
int err;
/* add the IP fields */
- attr.fl.fl4.flowi4_tos = tun_key->tos;
+ attr.fl.fl4.flowi4_tos = tun_key->tos & ~INET_ECN_MASK;
attr.fl.fl4.daddr = tun_key->u.ipv4.dst;
attr.fl.fl4.saddr = tun_key->u.ipv4.src;
attr.ttl = tun_key->ttl;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
index 4cdf8e5b24c2..b789af07829c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
@@ -167,6 +167,11 @@ static inline u16 mlx5e_txqsq_get_next_pi(struct mlx5e_txqsq *sq, u16 size)
return pi;
}
+static inline u16 mlx5e_shampo_get_cqe_header_index(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
+{
+ return be16_to_cpu(cqe->shampo.header_entry_index) & (rq->mpwqe.shampo->hd_per_wq - 1);
+}
+
struct mlx5e_shampo_umr {
u16 len;
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
index 338d65e2c9ce..56e10c84a706 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
@@ -341,8 +341,10 @@ mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
/* copy the inline part if required */
if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) {
- memcpy(eseg->inline_hdr.start, xdptxd->data, MLX5E_XDP_MIN_INLINE);
+ memcpy(eseg->inline_hdr.start, xdptxd->data, sizeof(eseg->inline_hdr.start));
eseg->inline_hdr.sz = cpu_to_be16(MLX5E_XDP_MIN_INLINE);
+ memcpy(dseg, xdptxd->data + sizeof(eseg->inline_hdr.start),
+ MLX5E_XDP_MIN_INLINE - sizeof(eseg->inline_hdr.start));
dma_len -= MLX5E_XDP_MIN_INLINE;
dma_addr += MLX5E_XDP_MIN_INLINE;
dseg++;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
index 2db9573a3fe6..b56fea142c24 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
@@ -157,11 +157,20 @@ static void mlx5e_ipsec_set_swp(struct sk_buff *skb,
/* Tunnel mode */
if (mode == XFRM_MODE_TUNNEL) {
eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
- eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2;
if (xo->proto == IPPROTO_IPV6)
eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
- if (inner_ip_hdr(skb)->protocol == IPPROTO_UDP)
+
+ switch (xo->inner_ipproto) {
+ case IPPROTO_UDP:
eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
+ fallthrough;
+ case IPPROTO_TCP:
+ /* IP | ESP | IP | [TCP | UDP] */
+ eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2;
+ break;
+ default:
+ break;
+ }
return;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
index b98db50c3418..428881e0adcb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
@@ -131,14 +131,17 @@ static inline bool
mlx5e_ipsec_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5_wqe_eth_seg *eseg)
{
- struct xfrm_offload *xo = xfrm_offload(skb);
+ u8 inner_ipproto;
if (!mlx5e_ipsec_eseg_meta(eseg))
return false;
eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM;
- if (xo->inner_ipproto) {
- eseg->cs_flags |= MLX5_ETH_WQE_L4_INNER_CSUM | MLX5_ETH_WQE_L3_INNER_CSUM;
+ inner_ipproto = xfrm_offload(skb)->inner_ipproto;
+ if (inner_ipproto) {
+ eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM;
+ if (inner_ipproto == IPPROTO_TCP || inner_ipproto == IPPROTO_UDP)
+ eseg->cs_flags |= MLX5_ETH_WQE_L4_INNER_CSUM;
} else if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM;
sq->stats->csum_partial_inner++;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index ac69e0aa09bf..bf80fb612449 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -5534,7 +5534,7 @@ void mlx5e_destroy_netdev(struct mlx5e_priv *priv)
static int mlx5e_resume(struct auxiliary_device *adev)
{
struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev);
- struct mlx5e_priv *priv = dev_get_drvdata(&adev->dev);
+ struct mlx5e_priv *priv = auxiliary_get_drvdata(adev);
struct net_device *netdev = priv->netdev;
struct mlx5_core_dev *mdev = edev->mdev;
int err;
@@ -5557,7 +5557,7 @@ static int mlx5e_resume(struct auxiliary_device *adev)
static int mlx5e_suspend(struct auxiliary_device *adev, pm_message_t state)
{
- struct mlx5e_priv *priv = dev_get_drvdata(&adev->dev);
+ struct mlx5e_priv *priv = auxiliary_get_drvdata(adev);
struct net_device *netdev = priv->netdev;
struct mlx5_core_dev *mdev = priv->mdev;
@@ -5589,7 +5589,7 @@ static int mlx5e_probe(struct auxiliary_device *adev,
mlx5e_build_nic_netdev(netdev);
priv = netdev_priv(netdev);
- dev_set_drvdata(&adev->dev, priv);
+ auxiliary_set_drvdata(adev, priv);
priv->profile = profile;
priv->ppriv = NULL;
@@ -5637,7 +5637,7 @@ err_destroy_netdev:
static void mlx5e_remove(struct auxiliary_device *adev)
{
- struct mlx5e_priv *priv = dev_get_drvdata(&adev->dev);
+ struct mlx5e_priv *priv = auxiliary_get_drvdata(adev);
pm_message_t state = {};
mlx5e_dcbnl_delete_app(priv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index e86ccc22fb82..ee0a8f5206e3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -1117,7 +1117,7 @@ static void mlx5e_shampo_update_ipv6_udp_hdr(struct mlx5e_rq *rq, struct ipv6hdr
static void mlx5e_shampo_update_fin_psh_flags(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
struct tcphdr *skb_tcp_hd)
{
- u16 header_index = be16_to_cpu(cqe->shampo.header_entry_index);
+ u16 header_index = mlx5e_shampo_get_cqe_header_index(rq, cqe);
struct tcphdr *last_tcp_hd;
void *last_hd_addr;
@@ -1871,7 +1871,7 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
return skb;
}
-static void
+static struct sk_buff *
mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
struct mlx5_cqe64 *cqe, u16 header_index)
{
@@ -1895,7 +1895,7 @@ mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
skb = mlx5e_build_linear_skb(rq, hdr, frag_size, rx_headroom, head_size);
if (unlikely(!skb))
- return;
+ return NULL;
/* queue up for recycling/reuse */
page_ref_inc(head->page);
@@ -1907,7 +1907,7 @@ mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
ALIGN(head_size, sizeof(long)));
if (unlikely(!skb)) {
rq->stats->buff_alloc_err++;
- return;
+ return NULL;
}
prefetchw(skb->data);
@@ -1918,9 +1918,7 @@ mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
skb->tail += head_size;
skb->len += head_size;
}
- rq->hw_gro_data->skb = skb;
- NAPI_GRO_CB(skb)->count = 1;
- skb_shinfo(skb)->gso_size = mpwrq_get_cqe_byte_cnt(cqe) - head_size;
+ return skb;
}
static void
@@ -1973,13 +1971,14 @@ mlx5e_free_rx_shampo_hd_entry(struct mlx5e_rq *rq, u16 header_index)
static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
{
u16 data_bcnt = mpwrq_get_cqe_byte_cnt(cqe) - cqe->shampo.header_size;
- u16 header_index = be16_to_cpu(cqe->shampo.header_entry_index);
+ u16 header_index = mlx5e_shampo_get_cqe_header_index(rq, cqe);
u32 wqe_offset = be32_to_cpu(cqe->shampo.data_offset);
u16 cstrides = mpwrq_get_cqe_consumed_strides(cqe);
u32 data_offset = wqe_offset & (PAGE_SIZE - 1);
u32 cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe);
u16 wqe_id = be16_to_cpu(cqe->wqe_id);
u32 page_idx = wqe_offset >> PAGE_SHIFT;
+ u16 head_size = cqe->shampo.header_size;
struct sk_buff **skb = &rq->hw_gro_data->skb;
bool flush = cqe->shampo.flush;
bool match = cqe->shampo.match;
@@ -2011,9 +2010,16 @@ static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cq
}
if (!*skb) {
- mlx5e_skb_from_cqe_shampo(rq, wi, cqe, header_index);
+ if (likely(head_size))
+ *skb = mlx5e_skb_from_cqe_shampo(rq, wi, cqe, header_index);
+ else
+ *skb = mlx5e_skb_from_cqe_mpwrq_nonlinear(rq, wi, cqe_bcnt, data_offset,
+ page_idx);
if (unlikely(!*skb))
goto free_hd_entry;
+
+ NAPI_GRO_CB(*skb)->count = 1;
+ skb_shinfo(*skb)->gso_size = cqe_bcnt - head_size;
} else {
NAPI_GRO_CB(*skb)->count++;
if (NAPI_GRO_CB(*skb)->count == 2 &&
@@ -2027,8 +2033,10 @@ static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cq
}
}
- di = &wi->umr.dma_info[page_idx];
- mlx5e_fill_skb_data(*skb, rq, di, data_bcnt, data_offset);
+ if (likely(head_size)) {
+ di = &wi->umr.dma_info[page_idx];
+ mlx5e_fill_skb_data(*skb, rq, di, data_bcnt, data_offset);
+ }
mlx5e_shampo_complete_rx_cqe(rq, cqe, cqe_bcnt, *skb);
if (flush)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 3d908a7e1406..2022fa4a9598 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -1414,7 +1414,8 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
if (err)
goto err_out;
- if (!attr->chain && esw_attr->int_port) {
+ if (!attr->chain && esw_attr->int_port &&
+ attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
/* If decap route device is internal port, change the
* source vport value in reg_c0 back to uplink just in
* case the rule performs goto chain > 0. If we have a miss
@@ -3191,6 +3192,18 @@ actions_match_supported(struct mlx5e_priv *priv,
return false;
}
+ if (!(~actions &
+ (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) {
+ NL_SET_ERR_MSG_MOD(extack, "Rule cannot support forward+drop action");
+ return false;
+ }
+
+ if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR &&
+ actions & MLX5_FLOW_CONTEXT_ACTION_DROP) {
+ NL_SET_ERR_MSG_MOD(extack, "Drop with modify header action is not supported");
+ return false;
+ }
+
if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR &&
!modify_header_match_supported(priv, &parse_attr->spec, flow_action,
actions, ct_flow, ct_clear, extack))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 7fd33b356cc8..ee7ecb88adc1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -208,7 +208,7 @@ static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs)
int cpy1_sz = 2 * ETH_ALEN;
int cpy2_sz = ihs - cpy1_sz;
- memcpy(vhdr, skb->data, cpy1_sz);
+ memcpy(&vhdr->addrs, skb->data, cpy1_sz);
vhdr->h_vlan_proto = skb->vlan_proto;
vhdr->h_vlan_TCI = cpu_to_be16(skb_vlan_tag_get(skb));
memcpy(&vhdr->h_vlan_encapsulated_proto, skb->data + cpy1_sz, cpy2_sz);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
index f690f430f40f..05e08cec5a8c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
@@ -1574,6 +1574,8 @@ struct mlx5_esw_bridge_offloads *mlx5_esw_bridge_init(struct mlx5_eswitch *esw)
{
struct mlx5_esw_bridge_offloads *br_offloads;
+ ASSERT_RTNL();
+
br_offloads = kvzalloc(sizeof(*br_offloads), GFP_KERNEL);
if (!br_offloads)
return ERR_PTR(-ENOMEM);
@@ -1590,6 +1592,8 @@ void mlx5_esw_bridge_cleanup(struct mlx5_eswitch *esw)
{
struct mlx5_esw_bridge_offloads *br_offloads = esw->br_offloads;
+ ASSERT_RTNL();
+
if (!br_offloads)
return;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/diag/bridge_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/diag/bridge_tracepoint.h
index 3401188e0a60..51ac24e6ec3c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/diag/bridge_tracepoint.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/diag/bridge_tracepoint.h
@@ -21,7 +21,7 @@ DECLARE_EVENT_CLASS(mlx5_esw_bridge_fdb_template,
__field(unsigned int, used)
),
TP_fast_assign(
- strncpy(__entry->dev_name,
+ strscpy(__entry->dev_name,
netdev_name(fdb->dev),
IFNAMSIZ);
memcpy(__entry->addr, fdb->key.addr, ETH_ALEN);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
index 0b0234f9d694..84dbe46d5ede 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
@@ -132,7 +132,7 @@ static void mlx5_stop_sync_reset_poll(struct mlx5_core_dev *dev)
{
struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
- del_timer(&fw_reset->timer);
+ del_timer_sync(&fw_reset->timer);
}
static void mlx5_sync_reset_clear_reset_requested(struct mlx5_core_dev *dev, bool poll_health)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
index d5e47630e284..df58cba37930 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c
@@ -121,12 +121,13 @@ u32 mlx5_chains_get_nf_ft_chain(struct mlx5_fs_chains *chains)
u32 mlx5_chains_get_prio_range(struct mlx5_fs_chains *chains)
{
- if (!mlx5_chains_prios_supported(chains))
- return 1;
-
if (mlx5_chains_ignore_flow_level_supported(chains))
return UINT_MAX;
+ if (!chains->dev->priv.eswitch ||
+ chains->dev->priv.eswitch->mode != MLX5_ESWITCH_OFFLOADS)
+ return 1;
+
/* We should get here only for eswitch case */
return FDB_TC_MAX_PRIO;
}
@@ -211,7 +212,7 @@ static int
create_chain_restore(struct fs_chain *chain)
{
struct mlx5_eswitch *esw = chain->chains->dev->priv.eswitch;
- char modact[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)];
+ u8 modact[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
struct mlx5_fs_chains *chains = chain->chains;
enum mlx5e_tc_attr_to_reg chain_to_reg;
struct mlx5_modify_hdr *mod_hdr;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
index 90fec0649ef5..41807ef55201 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
@@ -129,11 +129,11 @@ static void irq_release(struct mlx5_irq *irq)
struct mlx5_irq_pool *pool = irq->pool;
xa_erase(&pool->irqs, irq->index);
- /* free_irq requires that affinity and rmap will be cleared
+ /* free_irq requires that affinity_hint and rmap will be cleared
* before calling it. This is why there is asymmetry with set_rmap
* which should be called after alloc_irq but before request_irq.
*/
- irq_set_affinity_hint(irq->irqn, NULL);
+ irq_update_affinity_hint(irq->irqn, NULL);
free_cpumask_var(irq->mask);
free_irq(irq->irqn, &irq->nh);
kfree(irq);
@@ -238,7 +238,7 @@ struct mlx5_irq *mlx5_irq_alloc(struct mlx5_irq_pool *pool, int i,
}
if (affinity) {
cpumask_copy(irq->mask, affinity);
- irq_set_affinity_hint(irq->irqn, irq->mask);
+ irq_set_affinity_and_hint(irq->irqn, irq->mask);
}
irq->pool = pool;
irq->refcount = 1;
@@ -251,7 +251,7 @@ struct mlx5_irq *mlx5_irq_alloc(struct mlx5_irq_pool *pool, int i,
}
return irq;
err_xa:
- irq_set_affinity_hint(irq->irqn, NULL);
+ irq_update_affinity_hint(irq->irqn, NULL);
free_cpumask_var(irq->mask);
err_cpumask:
free_irq(irq->irqn, &irq->nh);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index 1ef2b6a848c1..7b16a1188aab 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -406,23 +406,24 @@ int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
switch (module_id) {
case MLX5_MODULE_ID_SFP:
- mlx5_sfp_eeprom_params_set(&query.i2c_address, &query.page, &query.offset);
+ mlx5_sfp_eeprom_params_set(&query.i2c_address, &query.page, &offset);
break;
case MLX5_MODULE_ID_QSFP:
case MLX5_MODULE_ID_QSFP_PLUS:
case MLX5_MODULE_ID_QSFP28:
- mlx5_qsfp_eeprom_params_set(&query.i2c_address, &query.page, &query.offset);
+ mlx5_qsfp_eeprom_params_set(&query.i2c_address, &query.page, &offset);
break;
default:
mlx5_core_err(dev, "Module ID not recognized: 0x%x\n", module_id);
return -EINVAL;
}
- if (query.offset + size > MLX5_EEPROM_PAGE_LENGTH)
+ if (offset + size > MLX5_EEPROM_PAGE_LENGTH)
/* Cross pages read, read until offset 256 in low page */
- size -= offset + size - MLX5_EEPROM_PAGE_LENGTH;
+ size = MLX5_EEPROM_PAGE_LENGTH - offset;
query.size = size;
+ query.offset = offset;
return mlx5_query_mcia(dev, &query, data);
}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_mac.c b/drivers/net/ethernet/microchip/lan966x/lan966x_mac.c
index ca5f1177963d..ce5970bdcc6a 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_mac.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_mac.c
@@ -40,11 +40,12 @@ static int lan966x_mac_wait_for_completion(struct lan966x *lan966x)
{
u32 val;
- return readx_poll_timeout(lan966x_mac_get_status,
- lan966x, val,
- (ANA_MACACCESS_MAC_TABLE_CMD_GET(val)) ==
- MACACCESS_CMD_IDLE,
- TABLE_UPDATE_SLEEP_US, TABLE_UPDATE_TIMEOUT_US);
+ return readx_poll_timeout_atomic(lan966x_mac_get_status,
+ lan966x, val,
+ (ANA_MACACCESS_MAC_TABLE_CMD_GET(val)) ==
+ MACACCESS_CMD_IDLE,
+ TABLE_UPDATE_SLEEP_US,
+ TABLE_UPDATE_TIMEOUT_US);
}
static void lan966x_mac_select(struct lan966x *lan966x,
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
index 2cb70da63db3..1f60fd125a1d 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
@@ -182,9 +182,9 @@ static int lan966x_port_inj_ready(struct lan966x *lan966x, u8 grp)
{
u32 val;
- return readx_poll_timeout(lan966x_port_inj_status, lan966x, val,
- QS_INJ_STATUS_FIFO_RDY_GET(val) & BIT(grp),
- READL_SLEEP_US, READL_TIMEOUT_US);
+ return readx_poll_timeout_atomic(lan966x_port_inj_status, lan966x, val,
+ QS_INJ_STATUS_FIFO_RDY_GET(val) & BIT(grp),
+ READL_SLEEP_US, READL_TIMEOUT_US);
}
static int lan966x_port_ifh_xmit(struct sk_buff *skb,
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c b/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c
index 59783fc46a7b..10b866e9f726 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c
@@ -1103,7 +1103,7 @@ void sparx5_get_stats64(struct net_device *ndev,
stats->tx_carrier_errors = portstats[spx5_stats_tx_csense_cnt];
stats->tx_window_errors = portstats[spx5_stats_tx_late_coll_cnt];
stats->rx_dropped = portstats[spx5_stats_ana_ac_port_stat_lsb_cnt];
- for (idx = 0; idx < 2 * SPX5_PRIOS; ++idx, ++stats)
+ for (idx = 0; idx < 2 * SPX5_PRIOS; ++idx)
stats->rx_dropped += portstats[spx5_stats_green_p0_rx_port_drop
+ idx];
stats->tx_dropped = portstats[spx5_stats_tx_local_drop];
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
index dc7e5ea6ec15..148d431fcde4 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
@@ -145,9 +145,9 @@ static void sparx5_xtr_grp(struct sparx5 *sparx5, u8 grp, bool byte_swap)
skb_put(skb, byte_cnt - ETH_FCS_LEN);
eth_skb_pad(skb);
skb->protocol = eth_type_trans(skb, netdev);
- netif_rx(skb);
netdev->stats.rx_bytes += skb->len;
netdev->stats.rx_packets++;
+ netif_rx(skb);
}
static int sparx5_inject(struct sparx5 *sparx5,
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index b1311b656e17..fd3ceb74620d 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -549,14 +549,18 @@ EXPORT_SYMBOL(ocelot_vlan_add);
int ocelot_vlan_del(struct ocelot *ocelot, int port, u16 vid)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
+ bool del_pvid = false;
int err;
+ if (ocelot_port->pvid_vlan && ocelot_port->pvid_vlan->vid == vid)
+ del_pvid = true;
+
err = ocelot_vlan_member_del(ocelot, port, vid);
if (err)
return err;
/* Ingress */
- if (ocelot_port->pvid_vlan && ocelot_port->pvid_vlan->vid == vid)
+ if (del_pvid)
ocelot_port_set_pvid(ocelot, port, NULL);
/* Egress */
@@ -771,7 +775,10 @@ void ocelot_phylink_mac_link_up(struct ocelot *ocelot, int port,
ocelot_write_rix(ocelot, 0, ANA_POL_FLOWC, port);
- ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, tx_pause);
+ /* Don't attempt to send PAUSE frames on the NPI port, it's broken */
+ if (port != ocelot->npi)
+ ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA,
+ tx_pause);
/* Undo the effects of ocelot_phylink_mac_link_down:
* enable MAC module
@@ -1429,6 +1436,8 @@ static void
ocelot_populate_ipv4_ptp_event_trap_key(struct ocelot_vcap_filter *trap)
{
trap->key_type = OCELOT_VCAP_KEY_IPV4;
+ trap->key.ipv4.proto.value[0] = IPPROTO_UDP;
+ trap->key.ipv4.proto.mask[0] = 0xff;
trap->key.ipv4.dport.value = PTP_EV_PORT;
trap->key.ipv4.dport.mask = 0xffff;
}
@@ -1437,6 +1446,8 @@ static void
ocelot_populate_ipv6_ptp_event_trap_key(struct ocelot_vcap_filter *trap)
{
trap->key_type = OCELOT_VCAP_KEY_IPV6;
+ trap->key.ipv4.proto.value[0] = IPPROTO_UDP;
+ trap->key.ipv4.proto.mask[0] = 0xff;
trap->key.ipv6.dport.value = PTP_EV_PORT;
trap->key.ipv6.dport.mask = 0xffff;
}
@@ -1445,6 +1456,8 @@ static void
ocelot_populate_ipv4_ptp_general_trap_key(struct ocelot_vcap_filter *trap)
{
trap->key_type = OCELOT_VCAP_KEY_IPV4;
+ trap->key.ipv4.proto.value[0] = IPPROTO_UDP;
+ trap->key.ipv4.proto.mask[0] = 0xff;
trap->key.ipv4.dport.value = PTP_GEN_PORT;
trap->key.ipv4.dport.mask = 0xffff;
}
@@ -1453,6 +1466,8 @@ static void
ocelot_populate_ipv6_ptp_general_trap_key(struct ocelot_vcap_filter *trap)
{
trap->key_type = OCELOT_VCAP_KEY_IPV6;
+ trap->key.ipv4.proto.value[0] = IPPROTO_UDP;
+ trap->key.ipv4.proto.mask[0] = 0xff;
trap->key.ipv6.dport.value = PTP_GEN_PORT;
trap->key.ipv6.dport.mask = 0xffff;
}
@@ -1734,12 +1749,11 @@ void ocelot_get_strings(struct ocelot *ocelot, int port, u32 sset, u8 *data)
}
EXPORT_SYMBOL(ocelot_get_strings);
+/* Caller must hold &ocelot->stats_lock */
static void ocelot_update_stats(struct ocelot *ocelot)
{
int i, j;
- mutex_lock(&ocelot->stats_lock);
-
for (i = 0; i < ocelot->num_phys_ports; i++) {
/* Configure the port to read the stats from */
ocelot_write(ocelot, SYS_STAT_CFG_STAT_VIEW(i), SYS_STAT_CFG);
@@ -1758,8 +1772,6 @@ static void ocelot_update_stats(struct ocelot *ocelot)
~(u64)U32_MAX) + val;
}
}
-
- mutex_unlock(&ocelot->stats_lock);
}
static void ocelot_check_stats_work(struct work_struct *work)
@@ -1768,7 +1780,9 @@ static void ocelot_check_stats_work(struct work_struct *work)
struct ocelot *ocelot = container_of(del_work, struct ocelot,
stats_work);
+ mutex_lock(&ocelot->stats_lock);
ocelot_update_stats(ocelot);
+ mutex_unlock(&ocelot->stats_lock);
queue_delayed_work(ocelot->stats_queue, &ocelot->stats_work,
OCELOT_STATS_CHECK_DELAY);
@@ -1778,12 +1792,16 @@ void ocelot_get_ethtool_stats(struct ocelot *ocelot, int port, u64 *data)
{
int i;
+ mutex_lock(&ocelot->stats_lock);
+
/* check and update now */
ocelot_update_stats(ocelot);
/* Copy all counters */
for (i = 0; i < ocelot->num_stats; i++)
*data++ = ocelot->stats[port * ocelot->num_stats + i];
+
+ mutex_unlock(&ocelot->stats_lock);
}
EXPORT_SYMBOL(ocelot_get_ethtool_stats);
diff --git a/drivers/net/ethernet/mscc/ocelot_flower.c b/drivers/net/ethernet/mscc/ocelot_flower.c
index beb9379424c0..949858891973 100644
--- a/drivers/net/ethernet/mscc/ocelot_flower.c
+++ b/drivers/net/ethernet/mscc/ocelot_flower.c
@@ -559,13 +559,6 @@ ocelot_flower_parse_key(struct ocelot *ocelot, int port, bool ingress,
return -EOPNOTSUPP;
}
- if (filter->block_id == VCAP_IS1 &&
- !is_zero_ether_addr(match.mask->dst)) {
- NL_SET_ERR_MSG_MOD(extack,
- "Key type S1_NORMAL cannot match on destination MAC");
- return -EOPNOTSUPP;
- }
-
/* The hw support mac matches only for MAC_ETYPE key,
* therefore if other matches(port, tcp flags, etc) are added
* then just bail out
@@ -580,6 +573,14 @@ ocelot_flower_parse_key(struct ocelot *ocelot, int port, bool ingress,
return -EOPNOTSUPP;
flow_rule_match_eth_addrs(rule, &match);
+
+ if (filter->block_id == VCAP_IS1 &&
+ !is_zero_ether_addr(match.mask->dst)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Key type S1_NORMAL cannot match on destination MAC");
+ return -EOPNOTSUPP;
+ }
+
filter->key_type = OCELOT_VCAP_KEY_ETYPE;
ether_addr_copy(filter->key.etype.dmac.value,
match.key->dst);
@@ -805,13 +806,34 @@ int ocelot_cls_flower_replace(struct ocelot *ocelot, int port,
struct netlink_ext_ack *extack = f->common.extack;
struct ocelot_vcap_filter *filter;
int chain = f->common.chain_index;
- int ret;
+ int block_id, ret;
if (chain && !ocelot_find_vcap_filter_that_points_at(ocelot, chain)) {
NL_SET_ERR_MSG_MOD(extack, "No default GOTO action points to this chain");
return -EOPNOTSUPP;
}
+ block_id = ocelot_chain_to_block(chain, ingress);
+ if (block_id < 0) {
+ NL_SET_ERR_MSG_MOD(extack, "Cannot offload to this chain");
+ return -EOPNOTSUPP;
+ }
+
+ filter = ocelot_vcap_block_find_filter_by_id(&ocelot->block[block_id],
+ f->cookie, true);
+ if (filter) {
+ /* Filter already exists on other ports */
+ if (!ingress) {
+ NL_SET_ERR_MSG_MOD(extack, "VCAP ES0 does not support shared filters");
+ return -EOPNOTSUPP;
+ }
+
+ filter->ingress_port_mask |= BIT(port);
+
+ return ocelot_vcap_filter_replace(ocelot, filter);
+ }
+
+ /* Filter didn't exist, create it now */
filter = ocelot_vcap_filter_create(ocelot, port, ingress, f);
if (!filter)
return -ENOMEM;
@@ -874,6 +896,12 @@ int ocelot_cls_flower_destroy(struct ocelot *ocelot, int port,
if (filter->type == OCELOT_VCAP_FILTER_DUMMY)
return ocelot_vcap_dummy_filter_del(ocelot, filter);
+ if (ingress) {
+ filter->ingress_port_mask &= ~BIT(port);
+ if (filter->ingress_port_mask)
+ return ocelot_vcap_filter_replace(ocelot, filter);
+ }
+
return ocelot_vcap_filter_del(ocelot, filter);
}
EXPORT_SYMBOL_GPL(ocelot_cls_flower_destroy);
diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c
index 8115c3db252e..e271b6225b72 100644
--- a/drivers/net/ethernet/mscc/ocelot_net.c
+++ b/drivers/net/ethernet/mscc/ocelot_net.c
@@ -1187,7 +1187,7 @@ static int ocelot_netdevice_bridge_join(struct net_device *dev,
ocelot_port_bridge_join(ocelot, port, bridge);
err = switchdev_bridge_port_offload(brport_dev, dev, priv,
- &ocelot_netdevice_nb,
+ &ocelot_switchdev_nb,
&ocelot_switchdev_blocking_nb,
false, extack);
if (err)
@@ -1201,7 +1201,7 @@ static int ocelot_netdevice_bridge_join(struct net_device *dev,
err_switchdev_sync:
switchdev_bridge_port_unoffload(brport_dev, priv,
- &ocelot_netdevice_nb,
+ &ocelot_switchdev_nb,
&ocelot_switchdev_blocking_nb);
err_switchdev_offload:
ocelot_port_bridge_leave(ocelot, port, bridge);
@@ -1214,7 +1214,7 @@ static void ocelot_netdevice_pre_bridge_leave(struct net_device *dev,
struct ocelot_port_private *priv = netdev_priv(dev);
switchdev_bridge_port_unoffload(brport_dev, priv,
- &ocelot_netdevice_nb,
+ &ocelot_switchdev_nb,
&ocelot_switchdev_blocking_nb);
}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
index 784292b16290..1543e47456d5 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
@@ -723,6 +723,8 @@ static inline bool nfp_fl_is_netdev_to_offload(struct net_device *netdev)
return true;
if (netif_is_gretap(netdev))
return true;
+ if (netif_is_ip6gretap(netdev))
+ return true;
return false;
}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
index dfb4468fe287..0a326e04e692 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
@@ -1011,6 +1011,7 @@ nfp_tunnel_del_shared_mac(struct nfp_app *app, struct net_device *netdev,
struct nfp_flower_repr_priv *repr_priv;
struct nfp_tun_offloaded_mac *entry;
struct nfp_repr *repr;
+ u16 nfp_mac_idx;
int ida_idx;
entry = nfp_tunnel_lookup_offloaded_macs(app, mac);
@@ -1029,8 +1030,6 @@ nfp_tunnel_del_shared_mac(struct nfp_app *app, struct net_device *netdev,
entry->bridge_count--;
if (!entry->bridge_count && entry->ref_count) {
- u16 nfp_mac_idx;
-
nfp_mac_idx = entry->index & ~NFP_TUN_PRE_TUN_IDX_BIT;
if (__nfp_tunnel_offload_mac(app, mac, nfp_mac_idx,
false)) {
@@ -1046,7 +1045,6 @@ nfp_tunnel_del_shared_mac(struct nfp_app *app, struct net_device *netdev,
/* If MAC is now used by 1 repr set the offloaded MAC index to port. */
if (entry->ref_count == 1 && list_is_singular(&entry->repr_list)) {
- u16 nfp_mac_idx;
int port, err;
repr_priv = list_first_entry(&entry->repr_list,
@@ -1074,8 +1072,14 @@ nfp_tunnel_del_shared_mac(struct nfp_app *app, struct net_device *netdev,
WARN_ON_ONCE(rhashtable_remove_fast(&priv->tun.offloaded_macs,
&entry->ht_node,
offloaded_macs_params));
+
+ if (nfp_flower_is_supported_bridge(netdev))
+ nfp_mac_idx = entry->index & ~NFP_TUN_PRE_TUN_IDX_BIT;
+ else
+ nfp_mac_idx = entry->index;
+
/* If MAC has global ID then extract and free the ida entry. */
- if (nfp_tunnel_is_mac_idx_global(entry->index)) {
+ if (nfp_tunnel_is_mac_idx_global(nfp_mac_idx)) {
ida_idx = nfp_tunnel_get_ida_from_global_mac_idx(entry->index);
ida_simple_remove(&priv->tun.mac_off_ids, ida_idx);
}
diff --git a/drivers/net/ethernet/seeq/ether3.c b/drivers/net/ethernet/seeq/ether3.c
index 16a4cbae9326..c672f92d65e9 100644
--- a/drivers/net/ethernet/seeq/ether3.c
+++ b/drivers/net/ethernet/seeq/ether3.c
@@ -749,6 +749,7 @@ ether3_probe(struct expansion_card *ec, const struct ecard_id *id)
const struct ether3_data *data = id->data;
struct net_device *dev;
int bus_type, ret;
+ u8 addr[ETH_ALEN];
ether3_banner();
@@ -776,7 +777,8 @@ ether3_probe(struct expansion_card *ec, const struct ecard_id *id)
priv(dev)->seeq = priv(dev)->base + data->base_offset;
dev->irq = ec->irq;
- ether3_addr(dev->dev_addr, ec);
+ ether3_addr(addr, ec);
+ eth_hw_addr_set(dev, addr);
priv(dev)->dev = dev;
timer_setup(&priv(dev)->timer, ether3_ledoff, 0);
diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c
index dd6f69ced4ee..fc9cef9dcefc 100644
--- a/drivers/net/ethernet/smsc/smc911x.c
+++ b/drivers/net/ethernet/smsc/smc911x.c
@@ -1648,7 +1648,7 @@ static int smc911x_ethtool_geteeprom(struct net_device *dev,
return ret;
if ((ret=smc911x_ethtool_read_eeprom_byte(dev, &eebuf[i]))!=0)
return ret;
- }
+ }
memcpy(data, eebuf+eeprom->offset, eeprom->len);
return 0;
}
@@ -1667,11 +1667,11 @@ static int smc911x_ethtool_seteeprom(struct net_device *dev,
return ret;
/* write byte */
if ((ret=smc911x_ethtool_write_eeprom_byte(dev, *data))!=0)
- return ret;
+ return ret;
if ((ret=smc911x_ethtool_write_eeprom_cmd(dev, E2P_CMD_EPC_CMD_WRITE_, i ))!=0)
return ret;
- }
- return 0;
+ }
+ return 0;
}
static int smc911x_ethtool_geteeprom_len(struct net_device *dev)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-oxnas.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-oxnas.c
index adfeb8d3293d..62a69a91ab22 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-oxnas.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-oxnas.c
@@ -12,6 +12,7 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/mfd/syscon.h>
@@ -48,46 +49,60 @@
#define DWMAC_RX_VARDELAY(d) ((d) << DWMAC_RX_VARDELAY_SHIFT)
#define DWMAC_RXN_VARDELAY(d) ((d) << DWMAC_RXN_VARDELAY_SHIFT)
+struct oxnas_dwmac;
+
+struct oxnas_dwmac_data {
+ int (*setup)(struct oxnas_dwmac *dwmac);
+};
+
struct oxnas_dwmac {
struct device *dev;
struct clk *clk;
struct regmap *regmap;
+ const struct oxnas_dwmac_data *data;
};
-static int oxnas_dwmac_init(struct platform_device *pdev, void *priv)
+static int oxnas_dwmac_setup_ox810se(struct oxnas_dwmac *dwmac)
{
- struct oxnas_dwmac *dwmac = priv;
unsigned int value;
int ret;
- /* Reset HW here before changing the glue configuration */
- ret = device_reset(dwmac->dev);
- if (ret)
+ ret = regmap_read(dwmac->regmap, OXNAS_DWMAC_CTRL_REGOFFSET, &value);
+ if (ret < 0)
return ret;
- ret = clk_prepare_enable(dwmac->clk);
- if (ret)
- return ret;
+ /* Enable GMII_GTXCLK to follow GMII_REFCLK, required for gigabit PHY */
+ value |= BIT(DWMAC_CKEN_GTX) |
+ /* Use simple mux for 25/125 Mhz clock switching */
+ BIT(DWMAC_SIMPLE_MUX);
+
+ regmap_write(dwmac->regmap, OXNAS_DWMAC_CTRL_REGOFFSET, value);
+
+ return 0;
+}
+
+static int oxnas_dwmac_setup_ox820(struct oxnas_dwmac *dwmac)
+{
+ unsigned int value;
+ int ret;
ret = regmap_read(dwmac->regmap, OXNAS_DWMAC_CTRL_REGOFFSET, &value);
- if (ret < 0) {
- clk_disable_unprepare(dwmac->clk);
+ if (ret < 0)
return ret;
- }
/* Enable GMII_GTXCLK to follow GMII_REFCLK, required for gigabit PHY */
value |= BIT(DWMAC_CKEN_GTX) |
/* Use simple mux for 25/125 Mhz clock switching */
- BIT(DWMAC_SIMPLE_MUX) |
- /* set auto switch tx clock source */
- BIT(DWMAC_AUTO_TX_SOURCE) |
- /* enable tx & rx vardelay */
- BIT(DWMAC_CKEN_TX_OUT) |
- BIT(DWMAC_CKEN_TXN_OUT) |
- BIT(DWMAC_CKEN_TX_IN) |
- BIT(DWMAC_CKEN_RX_OUT) |
- BIT(DWMAC_CKEN_RXN_OUT) |
- BIT(DWMAC_CKEN_RX_IN);
+ BIT(DWMAC_SIMPLE_MUX) |
+ /* set auto switch tx clock source */
+ BIT(DWMAC_AUTO_TX_SOURCE) |
+ /* enable tx & rx vardelay */
+ BIT(DWMAC_CKEN_TX_OUT) |
+ BIT(DWMAC_CKEN_TXN_OUT) |
+ BIT(DWMAC_CKEN_TX_IN) |
+ BIT(DWMAC_CKEN_RX_OUT) |
+ BIT(DWMAC_CKEN_RXN_OUT) |
+ BIT(DWMAC_CKEN_RX_IN);
regmap_write(dwmac->regmap, OXNAS_DWMAC_CTRL_REGOFFSET, value);
/* set tx & rx vardelay */
@@ -100,6 +115,27 @@ static int oxnas_dwmac_init(struct platform_device *pdev, void *priv)
return 0;
}
+static int oxnas_dwmac_init(struct platform_device *pdev, void *priv)
+{
+ struct oxnas_dwmac *dwmac = priv;
+ int ret;
+
+ /* Reset HW here before changing the glue configuration */
+ ret = device_reset(dwmac->dev);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(dwmac->clk);
+ if (ret)
+ return ret;
+
+ ret = dwmac->data->setup(dwmac);
+ if (ret)
+ clk_disable_unprepare(dwmac->clk);
+
+ return ret;
+}
+
static void oxnas_dwmac_exit(struct platform_device *pdev, void *priv)
{
struct oxnas_dwmac *dwmac = priv;
@@ -128,6 +164,12 @@ static int oxnas_dwmac_probe(struct platform_device *pdev)
goto err_remove_config_dt;
}
+ dwmac->data = (const struct oxnas_dwmac_data *)of_device_get_match_data(&pdev->dev);
+ if (!dwmac->data) {
+ ret = -EINVAL;
+ goto err_remove_config_dt;
+ }
+
dwmac->dev = &pdev->dev;
plat_dat->bsp_priv = dwmac;
plat_dat->init = oxnas_dwmac_init;
@@ -166,8 +208,23 @@ err_remove_config_dt:
return ret;
}
+static const struct oxnas_dwmac_data ox810se_dwmac_data = {
+ .setup = oxnas_dwmac_setup_ox810se,
+};
+
+static const struct oxnas_dwmac_data ox820_dwmac_data = {
+ .setup = oxnas_dwmac_setup_ox820,
+};
+
static const struct of_device_id oxnas_dwmac_match[] = {
- { .compatible = "oxsemi,ox820-dwmac" },
+ {
+ .compatible = "oxsemi,ox810se-dwmac",
+ .data = &ox810se_dwmac_data,
+ },
+ {
+ .compatible = "oxsemi,ox820-dwmac",
+ .data = &ox820_dwmac_data,
+ },
{ }
};
MODULE_DEVICE_TABLE(of, oxnas_dwmac_match);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index 617d0e4c6495..09644ab0d87a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -756,7 +756,7 @@ static int sun8i_dwmac_reset(struct stmmac_priv *priv)
if (err) {
dev_err(priv->device, "EMAC reset timeout\n");
- return -EFAULT;
+ return err;
}
return 0;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c
index e2e0f977875d..c3f10a92b62b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c
@@ -22,21 +22,21 @@
#define ETHER_CLK_SEL_RMII_CLK_EN BIT(2)
#define ETHER_CLK_SEL_RMII_CLK_RST BIT(3)
#define ETHER_CLK_SEL_DIV_SEL_2 BIT(4)
-#define ETHER_CLK_SEL_DIV_SEL_20 BIT(0)
+#define ETHER_CLK_SEL_DIV_SEL_20 0
#define ETHER_CLK_SEL_FREQ_SEL_125M (BIT(9) | BIT(8))
#define ETHER_CLK_SEL_FREQ_SEL_50M BIT(9)
#define ETHER_CLK_SEL_FREQ_SEL_25M BIT(8)
#define ETHER_CLK_SEL_FREQ_SEL_2P5M 0
-#define ETHER_CLK_SEL_TX_CLK_EXT_SEL_IN BIT(0)
+#define ETHER_CLK_SEL_TX_CLK_EXT_SEL_IN 0
#define ETHER_CLK_SEL_TX_CLK_EXT_SEL_TXC BIT(10)
#define ETHER_CLK_SEL_TX_CLK_EXT_SEL_DIV BIT(11)
-#define ETHER_CLK_SEL_RX_CLK_EXT_SEL_IN BIT(0)
+#define ETHER_CLK_SEL_RX_CLK_EXT_SEL_IN 0
#define ETHER_CLK_SEL_RX_CLK_EXT_SEL_RXC BIT(12)
#define ETHER_CLK_SEL_RX_CLK_EXT_SEL_DIV BIT(13)
-#define ETHER_CLK_SEL_TX_CLK_O_TX_I BIT(0)
+#define ETHER_CLK_SEL_TX_CLK_O_TX_I 0
#define ETHER_CLK_SEL_TX_CLK_O_RMII_I BIT(14)
#define ETHER_CLK_SEL_TX_O_E_N_IN BIT(15)
-#define ETHER_CLK_SEL_RMII_CLK_SEL_IN BIT(0)
+#define ETHER_CLK_SEL_RMII_CLK_SEL_IN 0
#define ETHER_CLK_SEL_RMII_CLK_SEL_RX_C BIT(16)
#define ETHER_CLK_SEL_RX_TX_CLK_EN (ETHER_CLK_SEL_RX_CLK_EN | ETHER_CLK_SEL_TX_CLK_EN)
@@ -49,13 +49,15 @@ struct visconti_eth {
void __iomem *reg;
u32 phy_intf_sel;
struct clk *phy_ref_clk;
+ struct device *dev;
spinlock_t lock; /* lock to protect register update */
};
static void visconti_eth_fix_mac_speed(void *priv, unsigned int speed)
{
struct visconti_eth *dwmac = priv;
- unsigned int val, clk_sel_val;
+ struct net_device *netdev = dev_get_drvdata(dwmac->dev);
+ unsigned int val, clk_sel_val = 0;
unsigned long flags;
spin_lock_irqsave(&dwmac->lock, flags);
@@ -85,7 +87,9 @@ static void visconti_eth_fix_mac_speed(void *priv, unsigned int speed)
break;
default:
/* No bit control */
- break;
+ netdev_err(netdev, "Unsupported speed request (%d)", speed);
+ spin_unlock_irqrestore(&dwmac->lock, flags);
+ return;
}
writel(val, dwmac->reg + MAC_CTRL_REG);
@@ -96,31 +100,41 @@ static void visconti_eth_fix_mac_speed(void *priv, unsigned int speed)
val |= ETHER_CLK_SEL_TX_O_E_N_IN;
writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
+ /* Set Clock-Mux, Start clock, Set TX_O direction */
switch (dwmac->phy_intf_sel) {
case ETHER_CONFIG_INTF_RGMII:
val = clk_sel_val | ETHER_CLK_SEL_RX_CLK_EXT_SEL_RXC;
+ writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
+
+ val |= ETHER_CLK_SEL_RX_TX_CLK_EN;
+ writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
+
+ val &= ~ETHER_CLK_SEL_TX_O_E_N_IN;
+ writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
break;
case ETHER_CONFIG_INTF_RMII:
val = clk_sel_val | ETHER_CLK_SEL_RX_CLK_EXT_SEL_DIV |
- ETHER_CLK_SEL_TX_CLK_EXT_SEL_TXC | ETHER_CLK_SEL_TX_O_E_N_IN |
+ ETHER_CLK_SEL_TX_CLK_EXT_SEL_DIV | ETHER_CLK_SEL_TX_O_E_N_IN |
ETHER_CLK_SEL_RMII_CLK_SEL_RX_C;
+ writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
+
+ val |= ETHER_CLK_SEL_RMII_CLK_RST;
+ writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
+
+ val |= ETHER_CLK_SEL_RMII_CLK_EN | ETHER_CLK_SEL_RX_TX_CLK_EN;
+ writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
break;
case ETHER_CONFIG_INTF_MII:
default:
val = clk_sel_val | ETHER_CLK_SEL_RX_CLK_EXT_SEL_RXC |
- ETHER_CLK_SEL_TX_CLK_EXT_SEL_DIV | ETHER_CLK_SEL_TX_O_E_N_IN |
- ETHER_CLK_SEL_RMII_CLK_EN;
+ ETHER_CLK_SEL_TX_CLK_EXT_SEL_TXC | ETHER_CLK_SEL_TX_O_E_N_IN;
+ writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
+
+ val |= ETHER_CLK_SEL_RX_TX_CLK_EN;
+ writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
break;
}
- /* Start clock */
- writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
- val |= ETHER_CLK_SEL_RX_TX_CLK_EN;
- writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
-
- val &= ~ETHER_CLK_SEL_TX_O_E_N_IN;
- writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
-
spin_unlock_irqrestore(&dwmac->lock, flags);
}
@@ -219,6 +233,7 @@ static int visconti_eth_dwmac_probe(struct platform_device *pdev)
spin_lock_init(&dwmac->lock);
dwmac->reg = stmmac_res.addr;
+ dwmac->dev = &pdev->dev;
plat_dat->bsp_priv = dwmac;
plat_dat->fix_mac_speed = visconti_eth_fix_mac_speed;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
index 1914ad698cab..acd70b9a3173 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
@@ -150,6 +150,7 @@
#define NUM_DWMAC100_DMA_REGS 9
#define NUM_DWMAC1000_DMA_REGS 23
+#define NUM_DWMAC4_DMA_REGS 27
void dwmac_enable_dma_transmission(void __iomem *ioaddr);
void dwmac_enable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index 40b5ed94cb54..5b195d5051d6 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -194,7 +194,6 @@ struct stmmac_priv {
u32 tx_coal_timer[MTL_MAX_TX_QUEUES];
u32 rx_coal_frames[MTL_MAX_TX_QUEUES];
- int tx_coalesce;
int hwts_tx_en;
bool tx_path_in_lpi_mode;
bool tso;
@@ -229,7 +228,6 @@ struct stmmac_priv {
unsigned int flow_ctrl;
unsigned int pause;
struct mii_bus *mii;
- int mii_irq[PHY_MAX_ADDR];
struct phylink_config phylink_config;
struct phylink *phylink;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index 164dff5ec32e..abfb3cd5958d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -21,10 +21,18 @@
#include "dwxgmac2.h"
#define REG_SPACE_SIZE 0x1060
+#define GMAC4_REG_SPACE_SIZE 0x116C
#define MAC100_ETHTOOL_NAME "st_mac100"
#define GMAC_ETHTOOL_NAME "st_gmac"
#define XGMAC_ETHTOOL_NAME "st_xgmac"
+/* Same as DMA_CHAN_BASE_ADDR defined in dwmac4_dma.h
+ *
+ * It is here because dwmac_dma.h and dwmac4_dam.h can not be included at the
+ * same time due to the conflicting macro names.
+ */
+#define GMAC4_DMA_CHAN_BASE_ADDR 0x00001100
+
#define ETHTOOL_DMA_OFFSET 55
struct stmmac_stats {
@@ -434,6 +442,8 @@ static int stmmac_ethtool_get_regs_len(struct net_device *dev)
if (priv->plat->has_xgmac)
return XGMAC_REGSIZE * 4;
+ else if (priv->plat->has_gmac4)
+ return GMAC4_REG_SPACE_SIZE;
return REG_SPACE_SIZE;
}
@@ -446,8 +456,13 @@ static void stmmac_ethtool_gregs(struct net_device *dev,
stmmac_dump_mac_regs(priv, priv->hw, reg_space);
stmmac_dump_dma_regs(priv, priv->ioaddr, reg_space);
- if (!priv->plat->has_xgmac) {
- /* Copy DMA registers to where ethtool expects them */
+ /* Copy DMA registers to where ethtool expects them */
+ if (priv->plat->has_gmac4) {
+ /* GMAC4 dumps its DMA registers at its DMA_CHAN_BASE_ADDR */
+ memcpy(&reg_space[ETHTOOL_DMA_OFFSET],
+ &reg_space[GMAC4_DMA_CHAN_BASE_ADDR / 4],
+ NUM_DWMAC4_DMA_REGS * 4);
+ } else if (!priv->plat->has_xgmac) {
memcpy(&reg_space[ETHTOOL_DMA_OFFSET],
&reg_space[DMA_BUS_MODE / 4],
NUM_DWMAC1000_DMA_REGS * 4);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
index 074e2cdfb0fa..a7ec9f4d46ce 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
@@ -145,15 +145,20 @@ static int adjust_systime(void __iomem *ioaddr, u32 sec, u32 nsec,
static void get_systime(void __iomem *ioaddr, u64 *systime)
{
- u64 ns;
-
- /* Get the TSSS value */
- ns = readl(ioaddr + PTP_STNSR);
- /* Get the TSS and convert sec time value to nanosecond */
- ns += readl(ioaddr + PTP_STSR) * 1000000000ULL;
+ u64 ns, sec0, sec1;
+
+ /* Get the TSS value */
+ sec1 = readl_relaxed(ioaddr + PTP_STSR);
+ do {
+ sec0 = sec1;
+ /* Get the TSSS value */
+ ns = readl_relaxed(ioaddr + PTP_STNSR);
+ /* Get the TSS value */
+ sec1 = readl_relaxed(ioaddr + PTP_STSR);
+ } while (sec0 != sec1);
if (systime)
- *systime = ns;
+ *systime = ns + (sec1 * 1000000000ULL);
}
static void get_ptptime(void __iomem *ptpaddr, u64 *ptp_time)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 63ff2dad8c85..bde76ea2deec 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -402,7 +402,7 @@ static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en)
* Description: this function is to verify and enter in LPI mode in case of
* EEE.
*/
-static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
+static int stmmac_enable_eee_mode(struct stmmac_priv *priv)
{
u32 tx_cnt = priv->plat->tx_queues_to_use;
u32 queue;
@@ -412,13 +412,14 @@ static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
if (tx_q->dirty_tx != tx_q->cur_tx)
- return; /* still unfinished work */
+ return -EBUSY; /* still unfinished work */
}
/* Check and enter in LPI mode */
if (!priv->tx_path_in_lpi_mode)
stmmac_set_eee_mode(priv, priv->hw,
priv->plat->en_tx_lpi_clockgating);
+ return 0;
}
/**
@@ -450,8 +451,8 @@ static void stmmac_eee_ctrl_timer(struct timer_list *t)
{
struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
- stmmac_enable_eee_mode(priv);
- mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
+ if (stmmac_enable_eee_mode(priv))
+ mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
}
/**
@@ -889,6 +890,9 @@ static int stmmac_init_ptp(struct stmmac_priv *priv)
bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
int ret;
+ if (priv->plat->ptp_clk_freq_config)
+ priv->plat->ptp_clk_freq_config(priv);
+
ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
if (ret)
return ret;
@@ -911,8 +915,6 @@ static int stmmac_init_ptp(struct stmmac_priv *priv)
priv->hwts_tx_en = 0;
priv->hwts_rx_en = 0;
- stmmac_ptp_register(priv);
-
return 0;
}
@@ -2647,8 +2649,8 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
if (priv->eee_enabled && !priv->tx_path_in_lpi_mode &&
priv->eee_sw_timer_en) {
- stmmac_enable_eee_mode(priv);
- mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
+ if (stmmac_enable_eee_mode(priv))
+ mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
}
/* We still have pending packets, let's call for a new scheduling */
@@ -3238,7 +3240,7 @@ static int stmmac_fpe_start_wq(struct stmmac_priv *priv)
/**
* stmmac_hw_setup - setup mac in a usable state.
* @dev : pointer to the device structure.
- * @init_ptp: initialize PTP if set
+ * @ptp_register: register PTP if set
* Description:
* this is the main function to setup the HW in a usable state because the
* dma engine is reset, the core registers are configured (e.g. AXI,
@@ -3248,7 +3250,7 @@ static int stmmac_fpe_start_wq(struct stmmac_priv *priv)
* 0 on success and an appropriate (-)ve integer as defined in errno.h
* file on failure.
*/
-static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
+static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
{
struct stmmac_priv *priv = netdev_priv(dev);
u32 rx_cnt = priv->plat->rx_queues_to_use;
@@ -3305,13 +3307,13 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
stmmac_mmc_setup(priv);
- if (init_ptp) {
- ret = stmmac_init_ptp(priv);
- if (ret == -EOPNOTSUPP)
- netdev_warn(priv->dev, "PTP not supported by HW\n");
- else if (ret)
- netdev_warn(priv->dev, "PTP init failed\n");
- }
+ ret = stmmac_init_ptp(priv);
+ if (ret == -EOPNOTSUPP)
+ netdev_warn(priv->dev, "PTP not supported by HW\n");
+ else if (ret)
+ netdev_warn(priv->dev, "PTP init failed\n");
+ else if (ptp_register)
+ stmmac_ptp_register(priv);
priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS;
@@ -7159,7 +7161,8 @@ int stmmac_dvr_probe(struct device *device,
pm_runtime_get_noresume(device);
pm_runtime_set_active(device);
- pm_runtime_enable(device);
+ if (!pm_runtime_enabled(device))
+ pm_runtime_enable(device);
if (priv->hw->pcs != STMMAC_PCS_TBI &&
priv->hw->pcs != STMMAC_PCS_RTBI) {
@@ -7249,6 +7252,10 @@ int stmmac_dvr_remove(struct device *dev)
netdev_info(priv->dev, "%s: removing driver", __func__);
+ pm_runtime_get_sync(dev);
+ pm_runtime_disable(dev);
+ pm_runtime_put_noidle(dev);
+
stmmac_stop_all_dma(priv);
stmmac_mac_set(priv, priv->ioaddr, false);
netif_carrier_off(ndev);
@@ -7267,8 +7274,6 @@ int stmmac_dvr_remove(struct device *dev)
if (priv->plat->stmmac_rst)
reset_control_assert(priv->plat->stmmac_rst);
reset_control_assert(priv->plat->stmmac_ahb_rst);
- pm_runtime_put(dev);
- pm_runtime_disable(dev);
if (priv->hw->pcs != STMMAC_PCS_TBI &&
priv->hw->pcs != STMMAC_PCS_RTBI)
stmmac_mdio_unregister(ndev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
index 0d24ebd37873..1c9f02f9c317 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
@@ -297,9 +297,6 @@ void stmmac_ptp_register(struct stmmac_priv *priv)
{
int i;
- if (priv->plat->ptp_clk_freq_config)
- priv->plat->ptp_clk_freq_config(priv);
-
for (i = 0; i < priv->dma_cap.pps_out_num; i++) {
if (i >= STMMAC_PPS_MAX)
break;
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 33142d505fc8..03575c017500 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -349,7 +349,7 @@ static void cpsw_rx_handler(void *token, int len, int status)
struct cpsw_common *cpsw = ndev_to_cpsw(xmeta->ndev);
int pkt_size = cpsw->rx_packet_max;
int ret = 0, port, ch = xmeta->ch;
- int headroom = CPSW_HEADROOM;
+ int headroom = CPSW_HEADROOM_NA;
struct net_device *ndev = xmeta->ndev;
struct cpsw_priv *priv;
struct page_pool *pool;
@@ -392,7 +392,7 @@ static void cpsw_rx_handler(void *token, int len, int status)
}
if (priv->xdp_prog) {
- int headroom = CPSW_HEADROOM, size = len;
+ int size = len;
xdp_init_buff(&xdp, PAGE_SIZE, &priv->xdp_rxq[ch]);
if (status & CPDMA_RX_VLAN_ENCAP) {
@@ -442,7 +442,7 @@ requeue:
xmeta->ndev = ndev;
xmeta->ch = ch;
- dma = page_pool_get_dma_addr(new_page) + CPSW_HEADROOM;
+ dma = page_pool_get_dma_addr(new_page) + CPSW_HEADROOM_NA;
ret = cpdma_chan_submit_mapped(cpsw->rxv[ch].ch, new_page, dma,
pkt_size, 0);
if (ret < 0) {
diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c
index 279e261e4720..bd4b1528cf99 100644
--- a/drivers/net/ethernet/ti/cpsw_new.c
+++ b/drivers/net/ethernet/ti/cpsw_new.c
@@ -283,7 +283,7 @@ static void cpsw_rx_handler(void *token, int len, int status)
{
struct page *new_page, *page = token;
void *pa = page_address(page);
- int headroom = CPSW_HEADROOM;
+ int headroom = CPSW_HEADROOM_NA;
struct cpsw_meta_xdp *xmeta;
struct cpsw_common *cpsw;
struct net_device *ndev;
@@ -336,7 +336,7 @@ static void cpsw_rx_handler(void *token, int len, int status)
}
if (priv->xdp_prog) {
- int headroom = CPSW_HEADROOM, size = len;
+ int size = len;
xdp_init_buff(&xdp, PAGE_SIZE, &priv->xdp_rxq[ch]);
if (status & CPDMA_RX_VLAN_ENCAP) {
@@ -386,7 +386,7 @@ requeue:
xmeta->ndev = ndev;
xmeta->ch = ch;
- dma = page_pool_get_dma_addr(new_page) + CPSW_HEADROOM;
+ dma = page_pool_get_dma_addr(new_page) + CPSW_HEADROOM_NA;
ret = cpdma_chan_submit_mapped(cpsw->rxv[ch].ch, new_page, dma,
pkt_size, 0);
if (ret < 0) {
diff --git a/drivers/net/ethernet/ti/cpsw_priv.c b/drivers/net/ethernet/ti/cpsw_priv.c
index 3537502e5e8b..8f6817f346ba 100644
--- a/drivers/net/ethernet/ti/cpsw_priv.c
+++ b/drivers/net/ethernet/ti/cpsw_priv.c
@@ -1122,7 +1122,7 @@ int cpsw_fill_rx_channels(struct cpsw_priv *priv)
xmeta->ndev = priv->ndev;
xmeta->ch = ch;
- dma = page_pool_get_dma_addr(page) + CPSW_HEADROOM;
+ dma = page_pool_get_dma_addr(page) + CPSW_HEADROOM_NA;
ret = cpdma_chan_idle_submit_mapped(cpsw->rxv[ch].ch,
page, dma,
cpsw->rx_packet_max,
@@ -1146,7 +1146,7 @@ int cpsw_fill_rx_channels(struct cpsw_priv *priv)
static struct page_pool *cpsw_create_page_pool(struct cpsw_common *cpsw,
int size)
{
- struct page_pool_params pp_params;
+ struct page_pool_params pp_params = {};
struct page_pool *pool;
pp_params.order = 0;
diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c
index cf0917b29e30..5251fc324221 100644
--- a/drivers/net/ethernet/tundra/tsi108_eth.c
+++ b/drivers/net/ethernet/tundra/tsi108_eth.c
@@ -1091,20 +1091,22 @@ static int tsi108_get_mac(struct net_device *dev)
struct tsi108_prv_data *data = netdev_priv(dev);
u32 word1 = TSI_READ(TSI108_MAC_ADDR1);
u32 word2 = TSI_READ(TSI108_MAC_ADDR2);
+ u8 addr[ETH_ALEN];
/* Note that the octets are reversed from what the manual says,
* producing an even weirder ordering...
*/
if (word2 == 0 && word1 == 0) {
- dev->dev_addr[0] = 0x00;
- dev->dev_addr[1] = 0x06;
- dev->dev_addr[2] = 0xd2;
- dev->dev_addr[3] = 0x00;
- dev->dev_addr[4] = 0x00;
+ addr[0] = 0x00;
+ addr[1] = 0x06;
+ addr[2] = 0xd2;
+ addr[3] = 0x00;
+ addr[4] = 0x00;
if (0x8 == data->phy)
- dev->dev_addr[5] = 0x01;
+ addr[5] = 0x01;
else
- dev->dev_addr[5] = 0x02;
+ addr[5] = 0x02;
+ eth_hw_addr_set(dev, addr);
word2 = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 24);
@@ -1114,12 +1116,13 @@ static int tsi108_get_mac(struct net_device *dev)
TSI_WRITE(TSI108_MAC_ADDR1, word1);
TSI_WRITE(TSI108_MAC_ADDR2, word2);
} else {
- dev->dev_addr[0] = (word2 >> 16) & 0xff;
- dev->dev_addr[1] = (word2 >> 24) & 0xff;
- dev->dev_addr[2] = (word1 >> 0) & 0xff;
- dev->dev_addr[3] = (word1 >> 8) & 0xff;
- dev->dev_addr[4] = (word1 >> 16) & 0xff;
- dev->dev_addr[5] = (word1 >> 24) & 0xff;
+ addr[0] = (word2 >> 16) & 0xff;
+ addr[1] = (word2 >> 24) & 0xff;
+ addr[2] = (word1 >> 0) & 0xff;
+ addr[3] = (word1 >> 8) & 0xff;
+ addr[4] = (word1 >> 16) & 0xff;
+ addr[5] = (word1 >> 24) & 0xff;
+ eth_hw_addr_set(dev, addr);
}
if (!is_valid_ether_addr(dev->dev_addr)) {
@@ -1136,14 +1139,12 @@ static int tsi108_set_mac(struct net_device *dev, void *addr)
{
struct tsi108_prv_data *data = netdev_priv(dev);
u32 word1, word2;
- int i;
if (!is_valid_ether_addr(addr))
return -EADDRNOTAVAIL;
- for (i = 0; i < 6; i++)
- /* +2 is for the offset of the HW addr type */
- dev->dev_addr[i] = ((unsigned char *)addr)[i + 2];
+ /* +2 is for the offset of the HW addr type */
+ eth_hw_addr_set(dev, ((unsigned char *)addr) + 2);
word2 = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 24);
diff --git a/drivers/net/ethernet/vertexcom/Kconfig b/drivers/net/ethernet/vertexcom/Kconfig
index 6e2cf062ddba..4184a635fe01 100644
--- a/drivers/net/ethernet/vertexcom/Kconfig
+++ b/drivers/net/ethernet/vertexcom/Kconfig
@@ -5,7 +5,7 @@
config NET_VENDOR_VERTEXCOM
bool "Vertexcom devices"
- default n
+ default y
help
If you have a network (Ethernet) card belonging to this class, say Y.
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 23ac353b35fe..377c94ec2486 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -41,8 +41,9 @@
#include "xilinx_axienet.h"
/* Descriptors defines for Tx and Rx DMA */
-#define TX_BD_NUM_DEFAULT 64
+#define TX_BD_NUM_DEFAULT 128
#define RX_BD_NUM_DEFAULT 1024
+#define TX_BD_NUM_MIN (MAX_SKB_FRAGS + 1)
#define TX_BD_NUM_MAX 4096
#define RX_BD_NUM_MAX 4096
@@ -496,7 +497,8 @@ static void axienet_setoptions(struct net_device *ndev, u32 options)
static int __axienet_device_reset(struct axienet_local *lp)
{
- u32 timeout;
+ u32 value;
+ int ret;
/* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset
* process of Axi DMA takes a while to complete as all pending
@@ -506,15 +508,23 @@ static int __axienet_device_reset(struct axienet_local *lp)
* they both reset the entire DMA core, so only one needs to be used.
*/
axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, XAXIDMA_CR_RESET_MASK);
- timeout = DELAY_OF_ONE_MILLISEC;
- while (axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET) &
- XAXIDMA_CR_RESET_MASK) {
- udelay(1);
- if (--timeout == 0) {
- netdev_err(lp->ndev, "%s: DMA reset timeout!\n",
- __func__);
- return -ETIMEDOUT;
- }
+ ret = read_poll_timeout(axienet_dma_in32, value,
+ !(value & XAXIDMA_CR_RESET_MASK),
+ DELAY_OF_ONE_MILLISEC, 50000, false, lp,
+ XAXIDMA_TX_CR_OFFSET);
+ if (ret) {
+ dev_err(lp->dev, "%s: DMA reset timeout!\n", __func__);
+ return ret;
+ }
+
+ /* Wait for PhyRstCmplt bit to be set, indicating the PHY reset has finished */
+ ret = read_poll_timeout(axienet_ior, value,
+ value & XAE_INT_PHYRSTCMPLT_MASK,
+ DELAY_OF_ONE_MILLISEC, 50000, false, lp,
+ XAE_IS_OFFSET);
+ if (ret) {
+ dev_err(lp->dev, "%s: timeout waiting for PhyRstCmplt\n", __func__);
+ return ret;
}
return 0;
@@ -623,6 +633,8 @@ static int axienet_free_tx_chain(struct net_device *ndev, u32 first_bd,
if (nr_bds == -1 && !(status & XAXIDMA_BD_STS_COMPLETE_MASK))
break;
+ /* Ensure we see complete descriptor update */
+ dma_rmb();
phys = desc_get_phys_addr(lp, cur_p);
dma_unmap_single(ndev->dev.parent, phys,
(cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK),
@@ -631,13 +643,15 @@ static int axienet_free_tx_chain(struct net_device *ndev, u32 first_bd,
if (cur_p->skb && (status & XAXIDMA_BD_STS_COMPLETE_MASK))
dev_consume_skb_irq(cur_p->skb);
- cur_p->cntrl = 0;
cur_p->app0 = 0;
cur_p->app1 = 0;
cur_p->app2 = 0;
cur_p->app4 = 0;
- cur_p->status = 0;
cur_p->skb = NULL;
+ /* ensure our transmit path and device don't prematurely see status cleared */
+ wmb();
+ cur_p->cntrl = 0;
+ cur_p->status = 0;
if (sizep)
*sizep += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
@@ -647,6 +661,32 @@ static int axienet_free_tx_chain(struct net_device *ndev, u32 first_bd,
}
/**
+ * axienet_check_tx_bd_space - Checks if a BD/group of BDs are currently busy
+ * @lp: Pointer to the axienet_local structure
+ * @num_frag: The number of BDs to check for
+ *
+ * Return: 0, on success
+ * NETDEV_TX_BUSY, if any of the descriptors are not free
+ *
+ * This function is invoked before BDs are allocated and transmission starts.
+ * This function returns 0 if a BD or group of BDs can be allocated for
+ * transmission. If the BD or any of the BDs are not free the function
+ * returns a busy status. This is invoked from axienet_start_xmit.
+ */
+static inline int axienet_check_tx_bd_space(struct axienet_local *lp,
+ int num_frag)
+{
+ struct axidma_bd *cur_p;
+
+ /* Ensure we see all descriptor updates from device or TX IRQ path */
+ rmb();
+ cur_p = &lp->tx_bd_v[(lp->tx_bd_tail + num_frag) % lp->tx_bd_num];
+ if (cur_p->cntrl)
+ return NETDEV_TX_BUSY;
+ return 0;
+}
+
+/**
* axienet_start_xmit_done - Invoked once a transmit is completed by the
* Axi DMA Tx channel.
* @ndev: Pointer to the net_device structure
@@ -675,30 +715,8 @@ static void axienet_start_xmit_done(struct net_device *ndev)
/* Matches barrier in axienet_start_xmit */
smp_mb();
- netif_wake_queue(ndev);
-}
-
-/**
- * axienet_check_tx_bd_space - Checks if a BD/group of BDs are currently busy
- * @lp: Pointer to the axienet_local structure
- * @num_frag: The number of BDs to check for
- *
- * Return: 0, on success
- * NETDEV_TX_BUSY, if any of the descriptors are not free
- *
- * This function is invoked before BDs are allocated and transmission starts.
- * This function returns 0 if a BD or group of BDs can be allocated for
- * transmission. If the BD or any of the BDs are not free the function
- * returns a busy status. This is invoked from axienet_start_xmit.
- */
-static inline int axienet_check_tx_bd_space(struct axienet_local *lp,
- int num_frag)
-{
- struct axidma_bd *cur_p;
- cur_p = &lp->tx_bd_v[(lp->tx_bd_tail + num_frag) % lp->tx_bd_num];
- if (cur_p->status & XAXIDMA_BD_STS_ALL_MASK)
- return NETDEV_TX_BUSY;
- return 0;
+ if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1))
+ netif_wake_queue(ndev);
}
/**
@@ -730,20 +748,15 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
num_frag = skb_shinfo(skb)->nr_frags;
cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
- if (axienet_check_tx_bd_space(lp, num_frag)) {
- if (netif_queue_stopped(ndev))
- return NETDEV_TX_BUSY;
-
+ if (axienet_check_tx_bd_space(lp, num_frag + 1)) {
+ /* Should not happen as last start_xmit call should have
+ * checked for sufficient space and queue should only be
+ * woken when sufficient space is available.
+ */
netif_stop_queue(ndev);
-
- /* Matches barrier in axienet_start_xmit_done */
- smp_mb();
-
- /* Space might have just been freed - check again */
- if (axienet_check_tx_bd_space(lp, num_frag))
- return NETDEV_TX_BUSY;
-
- netif_wake_queue(ndev);
+ if (net_ratelimit())
+ netdev_warn(ndev, "TX ring unexpectedly full\n");
+ return NETDEV_TX_BUSY;
}
if (skb->ip_summed == CHECKSUM_PARTIAL) {
@@ -804,6 +817,18 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
if (++lp->tx_bd_tail >= lp->tx_bd_num)
lp->tx_bd_tail = 0;
+ /* Stop queue if next transmit may not have space */
+ if (axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) {
+ netif_stop_queue(ndev);
+
+ /* Matches barrier in axienet_start_xmit_done */
+ smp_mb();
+
+ /* Space might have just been freed - check again */
+ if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1))
+ netif_wake_queue(ndev);
+ }
+
return NETDEV_TX_OK;
}
@@ -834,6 +859,8 @@ static void axienet_recv(struct net_device *ndev)
tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
+ /* Ensure we see complete descriptor update */
+ dma_rmb();
phys = desc_get_phys_addr(lp, cur_p);
dma_unmap_single(ndev->dev.parent, phys, lp->max_frm_size,
DMA_FROM_DEVICE);
@@ -1352,7 +1379,8 @@ axienet_ethtools_set_ringparam(struct net_device *ndev,
if (ering->rx_pending > RX_BD_NUM_MAX ||
ering->rx_mini_pending ||
ering->rx_jumbo_pending ||
- ering->rx_pending > TX_BD_NUM_MAX)
+ ering->tx_pending < TX_BD_NUM_MIN ||
+ ering->tx_pending > TX_BD_NUM_MAX)
return -EINVAL;
if (netif_running(ndev))
@@ -2027,6 +2055,11 @@ static int axienet_probe(struct platform_device *pdev)
lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD;
lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD;
+ /* Reset core now that clocks are enabled, prior to accessing MDIO */
+ ret = __axienet_device_reset(lp);
+ if (ret)
+ goto cleanup_clk;
+
lp->phy_node = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
if (lp->phy_node) {
ret = axienet_mdio_setup(lp);
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
index 8a19a06b505d..b1fc153125d9 100644
--- a/drivers/net/hamradio/6pack.c
+++ b/drivers/net/hamradio/6pack.c
@@ -681,8 +681,8 @@ static void sixpack_close(struct tty_struct *tty)
}
/* Perform I/O control on an active 6pack channel. */
-static int sixpack_ioctl(struct tty_struct *tty, struct file *file,
- unsigned int cmd, unsigned long arg)
+static int sixpack_ioctl(struct tty_struct *tty, unsigned int cmd,
+ unsigned long arg)
{
struct sixpack *sp = sp_get(tty);
struct net_device *dev;
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index edde9c3ae12b..c251e04ae047 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -806,8 +806,8 @@ static void mkiss_close(struct tty_struct *tty)
}
/* Perform I/O control on an active ax25 channel. */
-static int mkiss_ioctl(struct tty_struct *tty, struct file *file,
- unsigned int cmd, unsigned long arg)
+static int mkiss_ioctl(struct tty_struct *tty, unsigned int cmd,
+ unsigned long arg)
{
struct mkiss *ax = mkiss_get(tty);
struct net_device *dev;
diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c
index 6376b8485976..980f2be32f05 100644
--- a/drivers/net/hamradio/yam.c
+++ b/drivers/net/hamradio/yam.c
@@ -950,9 +950,7 @@ static int yam_siocdevprivate(struct net_device *dev, struct ifreq *ifr, void __
ym = memdup_user(data, sizeof(struct yamdrv_ioctl_mcs));
if (IS_ERR(ym))
return PTR_ERR(ym);
- if (ym->cmd != SIOCYAMSMCS)
- return -EINVAL;
- if (ym->bitrate > YAM_MAXBITRATE) {
+ if (ym->cmd != SIOCYAMSMCS || ym->bitrate > YAM_MAXBITRATE) {
kfree(ym);
return -EINVAL;
}
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 315278a7cf88..cf69da0e296c 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -164,6 +164,7 @@ struct hv_netvsc_packet {
u32 total_bytes;
u32 send_buf_index;
u32 total_data_buflen;
+ struct hv_dma_range *dma_range;
};
#define NETVSC_HASH_KEYLEN 40
@@ -1074,6 +1075,7 @@ struct netvsc_device {
/* Receive buffer allocated by us but manages by NetVSP */
void *recv_buf;
+ void *recv_original_buf;
u32 recv_buf_size; /* allocated bytes */
struct vmbus_gpadl recv_buf_gpadl_handle;
u32 recv_section_cnt;
@@ -1082,6 +1084,7 @@ struct netvsc_device {
/* Send buffer allocated by us */
void *send_buf;
+ void *send_original_buf;
u32 send_buf_size;
struct vmbus_gpadl send_buf_gpadl_handle;
u32 send_section_cnt;
@@ -1731,4 +1734,6 @@ struct rndis_message {
#define RETRY_US_HI 10000
#define RETRY_MAX 2000 /* >10 sec */
+void netvsc_dma_unmap(struct hv_device *hv_dev,
+ struct hv_netvsc_packet *packet);
#endif /* _HYPERV_NET_H */
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 5086cd07d1ed..afa81a9480cc 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -153,8 +153,21 @@ static void free_netvsc_device(struct rcu_head *head)
int i;
kfree(nvdev->extension);
- vfree(nvdev->recv_buf);
- vfree(nvdev->send_buf);
+
+ if (nvdev->recv_original_buf) {
+ hv_unmap_memory(nvdev->recv_buf);
+ vfree(nvdev->recv_original_buf);
+ } else {
+ vfree(nvdev->recv_buf);
+ }
+
+ if (nvdev->send_original_buf) {
+ hv_unmap_memory(nvdev->send_buf);
+ vfree(nvdev->send_original_buf);
+ } else {
+ vfree(nvdev->send_buf);
+ }
+
bitmap_free(nvdev->send_section_map);
for (i = 0; i < VRSS_CHANNEL_MAX; i++) {
@@ -337,6 +350,7 @@ static int netvsc_init_buf(struct hv_device *device,
struct nvsp_message *init_packet;
unsigned int buf_size;
int i, ret = 0;
+ void *vaddr;
/* Get receive buffer area. */
buf_size = device_info->recv_sections * device_info->recv_section_size;
@@ -372,6 +386,17 @@ static int netvsc_init_buf(struct hv_device *device,
goto cleanup;
}
+ if (hv_isolation_type_snp()) {
+ vaddr = hv_map_memory(net_device->recv_buf, buf_size);
+ if (!vaddr) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ net_device->recv_original_buf = net_device->recv_buf;
+ net_device->recv_buf = vaddr;
+ }
+
/* Notify the NetVsp of the gpadl handle */
init_packet = &net_device->channel_init_pkt;
memset(init_packet, 0, sizeof(struct nvsp_message));
@@ -475,6 +500,17 @@ static int netvsc_init_buf(struct hv_device *device,
goto cleanup;
}
+ if (hv_isolation_type_snp()) {
+ vaddr = hv_map_memory(net_device->send_buf, buf_size);
+ if (!vaddr) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ net_device->send_original_buf = net_device->send_buf;
+ net_device->send_buf = vaddr;
+ }
+
/* Notify the NetVsp of the gpadl handle */
init_packet = &net_device->channel_init_pkt;
memset(init_packet, 0, sizeof(struct nvsp_message));
@@ -764,7 +800,7 @@ static void netvsc_send_tx_complete(struct net_device *ndev,
/* Notify the layer above us */
if (likely(skb)) {
- const struct hv_netvsc_packet *packet
+ struct hv_netvsc_packet *packet
= (struct hv_netvsc_packet *)skb->cb;
u32 send_index = packet->send_buf_index;
struct netvsc_stats *tx_stats;
@@ -780,6 +816,7 @@ static void netvsc_send_tx_complete(struct net_device *ndev,
tx_stats->bytes += packet->total_bytes;
u64_stats_update_end(&tx_stats->syncp);
+ netvsc_dma_unmap(ndev_ctx->device_ctx, packet);
napi_consume_skb(skb, budget);
}
@@ -944,6 +981,88 @@ static void netvsc_copy_to_send_buf(struct netvsc_device *net_device,
memset(dest, 0, padding);
}
+void netvsc_dma_unmap(struct hv_device *hv_dev,
+ struct hv_netvsc_packet *packet)
+{
+ u32 page_count = packet->cp_partial ?
+ packet->page_buf_cnt - packet->rmsg_pgcnt :
+ packet->page_buf_cnt;
+ int i;
+
+ if (!hv_is_isolation_supported())
+ return;
+
+ if (!packet->dma_range)
+ return;
+
+ for (i = 0; i < page_count; i++)
+ dma_unmap_single(&hv_dev->device, packet->dma_range[i].dma,
+ packet->dma_range[i].mapping_size,
+ DMA_TO_DEVICE);
+
+ kfree(packet->dma_range);
+}
+
+/* netvsc_dma_map - Map swiotlb bounce buffer with data page of
+ * packet sent by vmbus_sendpacket_pagebuffer() in the Isolation
+ * VM.
+ *
+ * In isolation VM, netvsc send buffer has been marked visible to
+ * host and so the data copied to send buffer doesn't need to use
+ * bounce buffer. The data pages handled by vmbus_sendpacket_pagebuffer()
+ * may not be copied to send buffer and so these pages need to be
+ * mapped with swiotlb bounce buffer. netvsc_dma_map() is to do
+ * that. The pfns in the struct hv_page_buffer need to be converted
+ * to bounce buffer's pfn. The loop here is necessary because the
+ * entries in the page buffer array are not necessarily full
+ * pages of data. Each entry in the array has a separate offset and
+ * len that may be non-zero, even for entries in the middle of the
+ * array. And the entries are not physically contiguous. So each
+ * entry must be individually mapped rather than as a contiguous unit.
+ * So not use dma_map_sg() here.
+ */
+static int netvsc_dma_map(struct hv_device *hv_dev,
+ struct hv_netvsc_packet *packet,
+ struct hv_page_buffer *pb)
+{
+ u32 page_count = packet->cp_partial ?
+ packet->page_buf_cnt - packet->rmsg_pgcnt :
+ packet->page_buf_cnt;
+ dma_addr_t dma;
+ int i;
+
+ if (!hv_is_isolation_supported())
+ return 0;
+
+ packet->dma_range = kcalloc(page_count,
+ sizeof(*packet->dma_range),
+ GFP_KERNEL);
+ if (!packet->dma_range)
+ return -ENOMEM;
+
+ for (i = 0; i < page_count; i++) {
+ char *src = phys_to_virt((pb[i].pfn << HV_HYP_PAGE_SHIFT)
+ + pb[i].offset);
+ u32 len = pb[i].len;
+
+ dma = dma_map_single(&hv_dev->device, src, len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&hv_dev->device, dma)) {
+ kfree(packet->dma_range);
+ return -ENOMEM;
+ }
+
+ /* pb[].offset and pb[].len are not changed during dma mapping
+ * and so not reassign.
+ */
+ packet->dma_range[i].dma = dma;
+ packet->dma_range[i].mapping_size = len;
+ pb[i].pfn = dma >> HV_HYP_PAGE_SHIFT;
+ }
+
+ return 0;
+}
+
static inline int netvsc_send_pkt(
struct hv_device *device,
struct hv_netvsc_packet *packet,
@@ -984,14 +1103,24 @@ static inline int netvsc_send_pkt(
trace_nvsp_send_pkt(ndev, out_channel, rpkt);
+ packet->dma_range = NULL;
if (packet->page_buf_cnt) {
if (packet->cp_partial)
pb += packet->rmsg_pgcnt;
+ ret = netvsc_dma_map(ndev_ctx->device_ctx, packet, pb);
+ if (ret) {
+ ret = -EAGAIN;
+ goto exit;
+ }
+
ret = vmbus_sendpacket_pagebuffer(out_channel,
pb, packet->page_buf_cnt,
&nvmsg, sizeof(nvmsg),
req_id);
+
+ if (ret)
+ netvsc_dma_unmap(ndev_ctx->device_ctx, packet);
} else {
ret = vmbus_sendpacket(out_channel,
&nvmsg, sizeof(nvmsg),
@@ -999,6 +1128,7 @@ static inline int netvsc_send_pkt(
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
}
+exit:
if (ret == 0) {
atomic_inc_return(&nvchan->queue_sends);
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index efa963b7af54..3646469433b1 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -2516,6 +2516,7 @@ static int netvsc_probe(struct hv_device *dev,
net->netdev_ops = &device_ops;
net->ethtool_ops = &ethtool_ops;
SET_NETDEV_DEV(net, &dev->device);
+ dma_set_min_align_mask(&dev->device, HV_HYP_PAGE_SIZE - 1);
/* We always need headroom for rndis header */
net->needed_headroom = RNDIS_AND_PPI_SIZE;
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index f6c9c2a670f9..448fcc325ed7 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -361,6 +361,8 @@ static void rndis_filter_receive_response(struct net_device *ndev,
}
}
+ netvsc_dma_unmap(((struct net_device_context *)
+ netdev_priv(ndev))->device_ctx, &request->pkt);
complete(&request->wait_event);
} else {
netdev_err(ndev,
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index 7d67f41387f5..4f5ef8a9a9a8 100644
--- a/drivers/net/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -100,6 +100,7 @@ struct at86rf230_local {
unsigned long cal_timeout;
bool is_tx;
bool is_tx_from_off;
+ bool was_tx;
u8 tx_retry;
struct sk_buff *tx_skb;
struct at86rf230_state_change tx;
@@ -343,7 +344,11 @@ at86rf230_async_error_recover_complete(void *context)
if (ctx->free)
kfree(ctx);
- ieee802154_wake_queue(lp->hw);
+ if (lp->was_tx) {
+ lp->was_tx = 0;
+ dev_kfree_skb_any(lp->tx_skb);
+ ieee802154_wake_queue(lp->hw);
+ }
}
static void
@@ -352,7 +357,11 @@ at86rf230_async_error_recover(void *context)
struct at86rf230_state_change *ctx = context;
struct at86rf230_local *lp = ctx->lp;
- lp->is_tx = 0;
+ if (lp->is_tx) {
+ lp->was_tx = 1;
+ lp->is_tx = 0;
+ }
+
at86rf230_async_state_change(lp, ctx, STATE_RX_AACK_ON,
at86rf230_async_error_recover_complete);
}
diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c
index ece6ff6049f6..2bc730fd260e 100644
--- a/drivers/net/ieee802154/ca8210.c
+++ b/drivers/net/ieee802154/ca8210.c
@@ -1771,6 +1771,7 @@ static int ca8210_async_xmit_complete(
status
);
if (status != MAC_TRANSACTION_OVERFLOW) {
+ dev_kfree_skb_any(priv->tx_skb);
ieee802154_wake_queue(priv->hw);
return 0;
}
@@ -2974,8 +2975,8 @@ static void ca8210_hw_setup(struct ieee802154_hw *ca8210_hw)
ca8210_hw->phy->cca.opt = NL802154_CCA_OPT_ENERGY_CARRIER_AND;
ca8210_hw->phy->cca_ed_level = -9800;
ca8210_hw->phy->symbol_duration = 16;
- ca8210_hw->phy->lifs_period = 40;
- ca8210_hw->phy->sifs_period = 12;
+ ca8210_hw->phy->lifs_period = 40 * ca8210_hw->phy->symbol_duration;
+ ca8210_hw->phy->sifs_period = 12 * ca8210_hw->phy->symbol_duration;
ca8210_hw->flags =
IEEE802154_HW_AFILT |
IEEE802154_HW_OMIT_CKSUM |
diff --git a/drivers/net/ieee802154/mac802154_hwsim.c b/drivers/net/ieee802154/mac802154_hwsim.c
index 8caa61ec718f..36f1c5aa98fc 100644
--- a/drivers/net/ieee802154/mac802154_hwsim.c
+++ b/drivers/net/ieee802154/mac802154_hwsim.c
@@ -786,6 +786,7 @@ static int hwsim_add_one(struct genl_info *info, struct device *dev,
goto err_pib;
}
+ pib->channel = 13;
rcu_assign_pointer(phy->pib, pib);
phy->idx = idx;
INIT_LIST_HEAD(&phy->edges);
diff --git a/drivers/net/ieee802154/mcr20a.c b/drivers/net/ieee802154/mcr20a.c
index 8dc04e2590b1..383231b85464 100644
--- a/drivers/net/ieee802154/mcr20a.c
+++ b/drivers/net/ieee802154/mcr20a.c
@@ -976,8 +976,8 @@ static void mcr20a_hw_setup(struct mcr20a_local *lp)
dev_dbg(printdev(lp), "%s\n", __func__);
phy->symbol_duration = 16;
- phy->lifs_period = 40;
- phy->sifs_period = 12;
+ phy->lifs_period = 40 * phy->symbol_duration;
+ phy->sifs_period = 12 * phy->symbol_duration;
hw->flags = IEEE802154_HW_TX_OMIT_CKSUM |
IEEE802154_HW_AFILT |
diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c
index 49d9a077d037..68291a3efd04 100644
--- a/drivers/net/ipa/ipa_endpoint.c
+++ b/drivers/net/ipa/ipa_endpoint.c
@@ -1080,27 +1080,38 @@ static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint, bool add_one)
{
struct gsi *gsi;
u32 backlog;
+ int delta;
- if (!endpoint->replenish_enabled) {
+ if (!test_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags)) {
if (add_one)
atomic_inc(&endpoint->replenish_saved);
return;
}
+ /* If already active, just update the backlog */
+ if (test_and_set_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags)) {
+ if (add_one)
+ atomic_inc(&endpoint->replenish_backlog);
+ return;
+ }
+
while (atomic_dec_not_zero(&endpoint->replenish_backlog))
if (ipa_endpoint_replenish_one(endpoint))
goto try_again_later;
+
+ clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
+
if (add_one)
atomic_inc(&endpoint->replenish_backlog);
return;
try_again_later:
- /* The last one didn't succeed, so fix the backlog */
- backlog = atomic_inc_return(&endpoint->replenish_backlog);
+ clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
- if (add_one)
- atomic_inc(&endpoint->replenish_backlog);
+ /* The last one didn't succeed, so fix the backlog */
+ delta = add_one ? 2 : 1;
+ backlog = atomic_add_return(delta, &endpoint->replenish_backlog);
/* Whenever a receive buffer transaction completes we'll try to
* replenish again. It's unlikely, but if we fail to supply even
@@ -1120,7 +1131,7 @@ static void ipa_endpoint_replenish_enable(struct ipa_endpoint *endpoint)
u32 max_backlog;
u32 saved;
- endpoint->replenish_enabled = true;
+ set_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
while ((saved = atomic_xchg(&endpoint->replenish_saved, 0)))
atomic_add(saved, &endpoint->replenish_backlog);
@@ -1134,7 +1145,7 @@ static void ipa_endpoint_replenish_disable(struct ipa_endpoint *endpoint)
{
u32 backlog;
- endpoint->replenish_enabled = false;
+ clear_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
while ((backlog = atomic_xchg(&endpoint->replenish_backlog, 0)))
atomic_add(backlog, &endpoint->replenish_saved);
}
@@ -1691,7 +1702,8 @@ static void ipa_endpoint_setup_one(struct ipa_endpoint *endpoint)
/* RX transactions require a single TRE, so the maximum
* backlog is the same as the maximum outstanding TREs.
*/
- endpoint->replenish_enabled = false;
+ clear_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
+ clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
atomic_set(&endpoint->replenish_saved,
gsi_channel_tre_max(gsi, endpoint->channel_id));
atomic_set(&endpoint->replenish_backlog, 0);
diff --git a/drivers/net/ipa/ipa_endpoint.h b/drivers/net/ipa/ipa_endpoint.h
index 0a859d10312d..0313cdc607de 100644
--- a/drivers/net/ipa/ipa_endpoint.h
+++ b/drivers/net/ipa/ipa_endpoint.h
@@ -41,6 +41,19 @@ enum ipa_endpoint_name {
#define IPA_ENDPOINT_MAX 32 /* Max supported by driver */
/**
+ * enum ipa_replenish_flag: RX buffer replenish flags
+ *
+ * @IPA_REPLENISH_ENABLED: Whether receive buffer replenishing is enabled
+ * @IPA_REPLENISH_ACTIVE: Whether replenishing is underway
+ * @IPA_REPLENISH_COUNT: Number of defined replenish flags
+ */
+enum ipa_replenish_flag {
+ IPA_REPLENISH_ENABLED,
+ IPA_REPLENISH_ACTIVE,
+ IPA_REPLENISH_COUNT, /* Number of flags (must be last) */
+};
+
+/**
* struct ipa_endpoint - IPA endpoint information
* @ipa: IPA pointer
* @ee_id: Execution environmnent endpoint is associated with
@@ -51,7 +64,7 @@ enum ipa_endpoint_name {
* @trans_tre_max: Maximum number of TRE descriptors per transaction
* @evt_ring_id: GSI event ring used by the endpoint
* @netdev: Network device pointer, if endpoint uses one
- * @replenish_enabled: Whether receive buffer replenishing is enabled
+ * @replenish_flags: Replenishing state flags
* @replenish_ready: Number of replenish transactions without doorbell
* @replenish_saved: Replenish requests held while disabled
* @replenish_backlog: Number of buffers needed to fill hardware queue
@@ -72,7 +85,7 @@ struct ipa_endpoint {
struct net_device *netdev;
/* Receive buffer replenishing for RX endpoints */
- bool replenish_enabled;
+ DECLARE_BITMAP(replenish_flags, IPA_REPLENISH_COUNT);
u32 replenish_ready;
atomic_t replenish_saved;
atomic_t replenish_backlog;
diff --git a/drivers/net/ipa/ipa_power.c b/drivers/net/ipa/ipa_power.c
index b1c6c0fcb654..f2989aac47a6 100644
--- a/drivers/net/ipa/ipa_power.c
+++ b/drivers/net/ipa/ipa_power.c
@@ -11,6 +11,8 @@
#include <linux/pm_runtime.h>
#include <linux/bitops.h>
+#include "linux/soc/qcom/qcom_aoss.h"
+
#include "ipa.h"
#include "ipa_power.h"
#include "ipa_endpoint.h"
@@ -64,6 +66,7 @@ enum ipa_power_flag {
* struct ipa_power - IPA power management information
* @dev: IPA device pointer
* @core: IPA core clock
+ * @qmp: QMP handle for AOSS communication
* @spinlock: Protects modem TX queue enable/disable
* @flags: Boolean state flags
* @interconnect_count: Number of elements in interconnect[]
@@ -72,6 +75,7 @@ enum ipa_power_flag {
struct ipa_power {
struct device *dev;
struct clk *core;
+ struct qmp *qmp;
spinlock_t spinlock; /* used with STOPPED/STARTED power flags */
DECLARE_BITMAP(flags, IPA_POWER_FLAG_COUNT);
u32 interconnect_count;
@@ -382,6 +386,47 @@ void ipa_power_modem_queue_active(struct ipa *ipa)
clear_bit(IPA_POWER_FLAG_STARTED, ipa->power->flags);
}
+static int ipa_power_retention_init(struct ipa_power *power)
+{
+ struct qmp *qmp = qmp_get(power->dev);
+
+ if (IS_ERR(qmp)) {
+ if (PTR_ERR(qmp) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ /* We assume any other error means it's not defined/needed */
+ qmp = NULL;
+ }
+ power->qmp = qmp;
+
+ return 0;
+}
+
+static void ipa_power_retention_exit(struct ipa_power *power)
+{
+ qmp_put(power->qmp);
+ power->qmp = NULL;
+}
+
+/* Control register retention on power collapse */
+void ipa_power_retention(struct ipa *ipa, bool enable)
+{
+ static const char fmt[] = "{ class: bcm, res: ipa_pc, val: %c }";
+ struct ipa_power *power = ipa->power;
+ char buf[36]; /* Exactly enough for fmt[]; size a multiple of 4 */
+ int ret;
+
+ if (!power->qmp)
+ return; /* Not needed on this platform */
+
+ (void)snprintf(buf, sizeof(buf), fmt, enable ? '1' : '0');
+
+ ret = qmp_send(power->qmp, buf, sizeof(buf));
+ if (ret)
+ dev_err(power->dev, "error %d sending QMP %sable request\n",
+ ret, enable ? "en" : "dis");
+}
+
int ipa_power_setup(struct ipa *ipa)
{
int ret;
@@ -438,12 +483,18 @@ ipa_power_init(struct device *dev, const struct ipa_power_data *data)
if (ret)
goto err_kfree;
+ ret = ipa_power_retention_init(power);
+ if (ret)
+ goto err_interconnect_exit;
+
pm_runtime_set_autosuspend_delay(dev, IPA_AUTOSUSPEND_DELAY);
pm_runtime_use_autosuspend(dev);
pm_runtime_enable(dev);
return power;
+err_interconnect_exit:
+ ipa_interconnect_exit(power);
err_kfree:
kfree(power);
err_clk_put:
@@ -460,6 +511,7 @@ void ipa_power_exit(struct ipa_power *power)
pm_runtime_disable(dev);
pm_runtime_dont_use_autosuspend(dev);
+ ipa_power_retention_exit(power);
ipa_interconnect_exit(power);
kfree(power);
clk_put(clk);
diff --git a/drivers/net/ipa/ipa_power.h b/drivers/net/ipa/ipa_power.h
index 2151805d7fbb..6f84f057a209 100644
--- a/drivers/net/ipa/ipa_power.h
+++ b/drivers/net/ipa/ipa_power.h
@@ -41,6 +41,13 @@ void ipa_power_modem_queue_wake(struct ipa *ipa);
void ipa_power_modem_queue_active(struct ipa *ipa);
/**
+ * ipa_power_retention() - Control register retention on power collapse
+ * @ipa: IPA pointer
+ * @enable: Whether retention should be enabled or disabled
+ */
+void ipa_power_retention(struct ipa *ipa, bool enable);
+
+/**
* ipa_power_setup() - Set up IPA power management
* @ipa: IPA pointer
*
diff --git a/drivers/net/ipa/ipa_uc.c b/drivers/net/ipa/ipa_uc.c
index 856e55a080a7..fe11910518d9 100644
--- a/drivers/net/ipa/ipa_uc.c
+++ b/drivers/net/ipa/ipa_uc.c
@@ -11,6 +11,7 @@
#include "ipa.h"
#include "ipa_uc.h"
+#include "ipa_power.h"
/**
* DOC: The IPA embedded microcontroller
@@ -154,6 +155,7 @@ static void ipa_uc_response_hdlr(struct ipa *ipa, enum ipa_irq_id irq_id)
case IPA_UC_RESPONSE_INIT_COMPLETED:
if (ipa->uc_powered) {
ipa->uc_loaded = true;
+ ipa_power_retention(ipa, true);
pm_runtime_mark_last_busy(dev);
(void)pm_runtime_put_autosuspend(dev);
ipa->uc_powered = false;
@@ -184,6 +186,9 @@ void ipa_uc_deconfig(struct ipa *ipa)
ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_UC_1);
ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_UC_0);
+ if (ipa->uc_loaded)
+ ipa_power_retention(ipa, false);
+
if (!ipa->uc_powered)
return;
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 16aa3a478e9e..3d0874331763 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -3870,6 +3870,18 @@ static void macsec_common_dellink(struct net_device *dev, struct list_head *head
struct macsec_dev *macsec = macsec_priv(dev);
struct net_device *real_dev = macsec->real_dev;
+ /* If h/w offloading is available, propagate to the device */
+ if (macsec_is_offloaded(macsec)) {
+ const struct macsec_ops *ops;
+ struct macsec_context ctx;
+
+ ops = macsec_get_ops(netdev_priv(dev), &ctx);
+ if (ops) {
+ ctx.secy = &macsec->secy;
+ macsec_offload(ops->mdo_del_secy, &ctx);
+ }
+ }
+
unregister_netdevice_queue(dev, head);
list_del_rcu(&macsec->secys);
macsec_del_dev(macsec);
@@ -3884,18 +3896,6 @@ static void macsec_dellink(struct net_device *dev, struct list_head *head)
struct net_device *real_dev = macsec->real_dev;
struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev);
- /* If h/w offloading is available, propagate to the device */
- if (macsec_is_offloaded(macsec)) {
- const struct macsec_ops *ops;
- struct macsec_context ctx;
-
- ops = macsec_get_ops(netdev_priv(dev), &ctx);
- if (ops) {
- ctx.secy = &macsec->secy;
- macsec_offload(ops->mdo_del_secy, &ctx);
- }
- }
-
macsec_common_dellink(dev, head);
if (list_empty(&rxd->secys)) {
@@ -4018,6 +4018,15 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
!macsec_check_offload(macsec->offload, macsec))
return -EOPNOTSUPP;
+ /* send_sci must be set to true when transmit sci explicitly is set */
+ if ((data && data[IFLA_MACSEC_SCI]) &&
+ (data && data[IFLA_MACSEC_INC_SCI])) {
+ u8 send_sci = !!nla_get_u8(data[IFLA_MACSEC_INC_SCI]);
+
+ if (!send_sci)
+ return -EINVAL;
+ }
+
if (data && data[IFLA_MACSEC_ICV_LEN])
icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]);
mtu = real_dev->mtu - icv_len - macsec_extra_len(true);
diff --git a/drivers/net/mctp/mctp-serial.c b/drivers/net/mctp/mctp-serial.c
index eaa6fb3224bc..62723a7faa2d 100644
--- a/drivers/net/mctp/mctp-serial.c
+++ b/drivers/net/mctp/mctp-serial.c
@@ -403,8 +403,16 @@ static void mctp_serial_tty_receive_buf(struct tty_struct *tty,
mctp_serial_push(dev, c[i]);
}
+static void mctp_serial_uninit(struct net_device *ndev)
+{
+ struct mctp_serial *dev = netdev_priv(ndev);
+
+ cancel_work_sync(&dev->tx_work);
+}
+
static const struct net_device_ops mctp_serial_netdev_ops = {
.ndo_start_xmit = mctp_serial_tx,
+ .ndo_uninit = mctp_serial_uninit,
};
static void mctp_serial_setup(struct net_device *ndev)
@@ -483,7 +491,6 @@ static void mctp_serial_close(struct tty_struct *tty)
int idx = dev->idx;
unregister_netdev(dev->netdev);
- cancel_work_sync(&dev->tx_work);
ida_free(&mctp_serial_ida, idx);
}
diff --git a/drivers/net/mdio/mdio-aspeed.c b/drivers/net/mdio/mdio-aspeed.c
index 966c3b4ad59d..e2273588c75b 100644
--- a/drivers/net/mdio/mdio-aspeed.c
+++ b/drivers/net/mdio/mdio-aspeed.c
@@ -148,6 +148,7 @@ static const struct of_device_id aspeed_mdio_of_match[] = {
{ .compatible = "aspeed,ast2600-mdio", },
{ },
};
+MODULE_DEVICE_TABLE(of, aspeed_mdio_of_match);
static struct platform_driver aspeed_mdio_driver = {
.driver = {
diff --git a/drivers/net/netdevsim/fib.c b/drivers/net/netdevsim/fib.c
index 4300261e2f9e..378ee779061c 100644
--- a/drivers/net/netdevsim/fib.c
+++ b/drivers/net/netdevsim/fib.c
@@ -623,14 +623,14 @@ static int nsim_fib6_rt_append(struct nsim_fib_data *data,
if (err)
goto err_fib6_rt_nh_del;
- fib6_event->rt_arr[i]->trap = true;
+ WRITE_ONCE(fib6_event->rt_arr[i]->trap, true);
}
return 0;
err_fib6_rt_nh_del:
for (i--; i >= 0; i--) {
- fib6_event->rt_arr[i]->trap = false;
+ WRITE_ONCE(fib6_event->rt_arr[i]->trap, false);
nsim_fib6_rt_nh_del(fib6_rt, fib6_event->rt_arr[i]);
}
return err;
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index dae95d9a07e8..29aa811af430 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -421,7 +421,7 @@ static int at803x_set_wol(struct phy_device *phydev,
const u8 *mac;
int ret, irq_enabled;
unsigned int i;
- const unsigned int offsets[] = {
+ static const unsigned int offsets[] = {
AT803X_LOC_MAC_ADDR_32_47_OFFSET,
AT803X_LOC_MAC_ADDR_16_31_OFFSET,
AT803X_LOC_MAC_ADDR_0_15_OFFSET,
@@ -1688,19 +1688,19 @@ static int qca808x_read_status(struct phy_device *phydev)
if (ret < 0)
return ret;
- if (phydev->link && phydev->speed == SPEED_2500)
- phydev->interface = PHY_INTERFACE_MODE_2500BASEX;
- else
- phydev->interface = PHY_INTERFACE_MODE_SMII;
-
- /* generate seed as a lower random value to make PHY linked as SLAVE easily,
- * except for master/slave configuration fault detected.
- * the reason for not putting this code into the function link_change_notify is
- * the corner case where the link partner is also the qca8081 PHY and the seed
- * value is configured as the same value, the link can't be up and no link change
- * occurs.
- */
- if (!phydev->link) {
+ if (phydev->link) {
+ if (phydev->speed == SPEED_2500)
+ phydev->interface = PHY_INTERFACE_MODE_2500BASEX;
+ else
+ phydev->interface = PHY_INTERFACE_MODE_SGMII;
+ } else {
+ /* generate seed as a lower random value to make PHY linked as SLAVE easily,
+ * except for master/slave configuration fault detected.
+ * the reason for not putting this code into the function link_change_notify is
+ * the corner case where the link partner is also the qca8081 PHY and the seed
+ * value is configured as the same value, the link can't be up and no link change
+ * occurs.
+ */
if (phydev->master_slave_state == MASTER_SLAVE_STATE_ERR) {
qca808x_phy_ms_seed_enable(phydev, false);
} else {
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index bb5104ae4610..3c683e0e40e9 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -854,6 +854,7 @@ static struct phy_driver broadcom_drivers[] = {
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM54616S",
/* PHY_GBIT_FEATURES */
+ .soft_reset = genphy_soft_reset,
.config_init = bcm54xx_config_init,
.config_aneg = bcm54616s_config_aneg,
.config_intr = bcm_phy_config_intr,
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 739859c0dfb1..2429db614b59 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -189,6 +189,8 @@
#define MII_88E1510_GEN_CTRL_REG_1_MODE_RGMII_SGMII 0x4
#define MII_88E1510_GEN_CTRL_REG_1_RESET 0x8000 /* Soft reset */
+#define MII_88E1510_MSCR_2 0x15
+
#define MII_VCT5_TX_RX_MDI0_COUPLING 0x10
#define MII_VCT5_TX_RX_MDI1_COUPLING 0x11
#define MII_VCT5_TX_RX_MDI2_COUPLING 0x12
@@ -551,9 +553,9 @@ static int m88e1121_config_aneg_rgmii_delays(struct phy_device *phydev)
else
mscr = 0;
- return phy_modify_paged(phydev, MII_MARVELL_MSCR_PAGE,
- MII_88E1121_PHY_MSCR_REG,
- MII_88E1121_PHY_MSCR_DELAY_MASK, mscr);
+ return phy_modify_paged_changed(phydev, MII_MARVELL_MSCR_PAGE,
+ MII_88E1121_PHY_MSCR_REG,
+ MII_88E1121_PHY_MSCR_DELAY_MASK, mscr);
}
static int m88e1121_config_aneg(struct phy_device *phydev)
@@ -567,11 +569,13 @@ static int m88e1121_config_aneg(struct phy_device *phydev)
return err;
}
+ changed = err;
+
err = marvell_set_polarity(phydev, phydev->mdix_ctrl);
if (err < 0)
return err;
- changed = err;
+ changed |= err;
err = genphy_config_aneg(phydev);
if (err < 0)
@@ -1211,16 +1215,15 @@ static int m88e1118_config_aneg(struct phy_device *phydev)
{
int err;
- err = genphy_soft_reset(phydev);
+ err = marvell_set_polarity(phydev, phydev->mdix_ctrl);
if (err < 0)
return err;
- err = marvell_set_polarity(phydev, phydev->mdix_ctrl);
+ err = genphy_config_aneg(phydev);
if (err < 0)
return err;
- err = genphy_config_aneg(phydev);
- return 0;
+ return genphy_soft_reset(phydev);
}
static int m88e1118_config_init(struct phy_device *phydev)
@@ -1932,6 +1935,58 @@ static void marvell_get_stats(struct phy_device *phydev,
data[i] = marvell_get_stat(phydev, i);
}
+static int m88e1510_loopback(struct phy_device *phydev, bool enable)
+{
+ int err;
+
+ if (enable) {
+ u16 bmcr_ctl = 0, mscr2_ctl = 0;
+
+ if (phydev->speed == SPEED_1000)
+ bmcr_ctl = BMCR_SPEED1000;
+ else if (phydev->speed == SPEED_100)
+ bmcr_ctl = BMCR_SPEED100;
+
+ if (phydev->duplex == DUPLEX_FULL)
+ bmcr_ctl |= BMCR_FULLDPLX;
+
+ err = phy_write(phydev, MII_BMCR, bmcr_ctl);
+ if (err < 0)
+ return err;
+
+ if (phydev->speed == SPEED_1000)
+ mscr2_ctl = BMCR_SPEED1000;
+ else if (phydev->speed == SPEED_100)
+ mscr2_ctl = BMCR_SPEED100;
+
+ err = phy_modify_paged(phydev, MII_MARVELL_MSCR_PAGE,
+ MII_88E1510_MSCR_2, BMCR_SPEED1000 |
+ BMCR_SPEED100, mscr2_ctl);
+ if (err < 0)
+ return err;
+
+ /* Need soft reset to have speed configuration takes effect */
+ err = genphy_soft_reset(phydev);
+ if (err < 0)
+ return err;
+
+ /* FIXME: Based on trial and error test, it seem 1G need to have
+ * delay between soft reset and loopback enablement.
+ */
+ if (phydev->speed == SPEED_1000)
+ msleep(1000);
+
+ return phy_modify(phydev, MII_BMCR, BMCR_LOOPBACK,
+ BMCR_LOOPBACK);
+ } else {
+ err = phy_modify(phydev, MII_BMCR, BMCR_LOOPBACK, 0);
+ if (err < 0)
+ return err;
+
+ return phy_config_aneg(phydev);
+ }
+}
+
static int marvell_vct5_wait_complete(struct phy_device *phydev)
{
int i;
@@ -3078,7 +3133,7 @@ static struct phy_driver marvell_drivers[] = {
.get_sset_count = marvell_get_sset_count,
.get_strings = marvell_get_strings,
.get_stats = marvell_get_stats,
- .set_loopback = genphy_loopback,
+ .set_loopback = m88e1510_loopback,
.get_tunable = m88e1011_get_tunable,
.set_tunable = m88e1011_set_tunable,
.cable_test_start = marvell_vct7_cable_test_start,
diff --git a/drivers/net/phy/mediatek-ge.c b/drivers/net/phy/mediatek-ge.c
index b7a5ae20edd5..68ee434f9dea 100644
--- a/drivers/net/phy/mediatek-ge.c
+++ b/drivers/net/phy/mediatek-ge.c
@@ -55,9 +55,6 @@ static int mt7530_phy_config_init(struct phy_device *phydev)
static int mt7531_phy_config_init(struct phy_device *phydev)
{
- if (phydev->interface != PHY_INTERFACE_MODE_INTERNAL)
- return -EINVAL;
-
mtk_gephy_config_init(phydev);
/* PHY link down power saving enable */
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 4570cb9535b7..a7ebcdab415b 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -1726,8 +1726,8 @@ static struct phy_driver ksphy_driver[] = {
.config_init = kszphy_config_init,
.config_intr = kszphy_config_intr,
.handle_interrupt = kszphy_handle_interrupt,
- .suspend = genphy_suspend,
- .resume = genphy_resume,
+ .suspend = kszphy_suspend,
+ .resume = kszphy_resume,
}, {
.phy_id = PHY_ID_KSZ8021,
.phy_id_mask = 0x00ffffff,
@@ -1741,8 +1741,8 @@ static struct phy_driver ksphy_driver[] = {
.get_sset_count = kszphy_get_sset_count,
.get_strings = kszphy_get_strings,
.get_stats = kszphy_get_stats,
- .suspend = genphy_suspend,
- .resume = genphy_resume,
+ .suspend = kszphy_suspend,
+ .resume = kszphy_resume,
}, {
.phy_id = PHY_ID_KSZ8031,
.phy_id_mask = 0x00ffffff,
@@ -1756,8 +1756,8 @@ static struct phy_driver ksphy_driver[] = {
.get_sset_count = kszphy_get_sset_count,
.get_strings = kszphy_get_strings,
.get_stats = kszphy_get_stats,
- .suspend = genphy_suspend,
- .resume = genphy_resume,
+ .suspend = kszphy_suspend,
+ .resume = kszphy_resume,
}, {
.phy_id = PHY_ID_KSZ8041,
.phy_id_mask = MICREL_PHY_ID_MASK,
@@ -1788,8 +1788,8 @@ static struct phy_driver ksphy_driver[] = {
.get_sset_count = kszphy_get_sset_count,
.get_strings = kszphy_get_strings,
.get_stats = kszphy_get_stats,
- .suspend = genphy_suspend,
- .resume = genphy_resume,
+ .suspend = kszphy_suspend,
+ .resume = kszphy_resume,
}, {
.name = "Micrel KSZ8051",
/* PHY_BASIC_FEATURES */
@@ -1802,8 +1802,8 @@ static struct phy_driver ksphy_driver[] = {
.get_strings = kszphy_get_strings,
.get_stats = kszphy_get_stats,
.match_phy_device = ksz8051_match_phy_device,
- .suspend = genphy_suspend,
- .resume = genphy_resume,
+ .suspend = kszphy_suspend,
+ .resume = kszphy_resume,
}, {
.phy_id = PHY_ID_KSZ8001,
.name = "Micrel KSZ8001 or KS8721",
@@ -1817,8 +1817,8 @@ static struct phy_driver ksphy_driver[] = {
.get_sset_count = kszphy_get_sset_count,
.get_strings = kszphy_get_strings,
.get_stats = kszphy_get_stats,
- .suspend = genphy_suspend,
- .resume = genphy_resume,
+ .suspend = kszphy_suspend,
+ .resume = kszphy_resume,
}, {
.phy_id = PHY_ID_KSZ8081,
.name = "Micrel KSZ8081 or KSZ8091",
@@ -1848,8 +1848,8 @@ static struct phy_driver ksphy_driver[] = {
.config_init = ksz8061_config_init,
.config_intr = kszphy_config_intr,
.handle_interrupt = kszphy_handle_interrupt,
- .suspend = genphy_suspend,
- .resume = genphy_resume,
+ .suspend = kszphy_suspend,
+ .resume = kszphy_resume,
}, {
.phy_id = PHY_ID_KSZ9021,
.phy_id_mask = 0x000ffffe,
@@ -1864,8 +1864,8 @@ static struct phy_driver ksphy_driver[] = {
.get_sset_count = kszphy_get_sset_count,
.get_strings = kszphy_get_strings,
.get_stats = kszphy_get_stats,
- .suspend = genphy_suspend,
- .resume = genphy_resume,
+ .suspend = kszphy_suspend,
+ .resume = kszphy_resume,
.read_mmd = genphy_read_mmd_unsupported,
.write_mmd = genphy_write_mmd_unsupported,
}, {
@@ -1883,7 +1883,7 @@ static struct phy_driver ksphy_driver[] = {
.get_sset_count = kszphy_get_sset_count,
.get_strings = kszphy_get_strings,
.get_stats = kszphy_get_stats,
- .suspend = genphy_suspend,
+ .suspend = kszphy_suspend,
.resume = kszphy_resume,
}, {
.phy_id = PHY_ID_LAN8814,
@@ -1928,7 +1928,7 @@ static struct phy_driver ksphy_driver[] = {
.get_sset_count = kszphy_get_sset_count,
.get_strings = kszphy_get_strings,
.get_stats = kszphy_get_stats,
- .suspend = genphy_suspend,
+ .suspend = kszphy_suspend,
.resume = kszphy_resume,
}, {
.phy_id = PHY_ID_KSZ8873MLL,
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 74d8e1dc125f..ce0bb5951b81 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -1746,6 +1746,9 @@ void phy_detach(struct phy_device *phydev)
phy_driver_is_genphy_10g(phydev))
device_release_driver(&phydev->mdio.dev);
+ /* Assert the reset signal */
+ phy_device_reset(phydev, 1);
+
/*
* The phydev might go away on the put_device() below, so avoid
* a use-after-free bug by reading the underlying bus first.
@@ -1757,9 +1760,6 @@ void phy_detach(struct phy_device *phydev)
ndev_owner = dev->dev.parent->driver->owner;
if (ndev_owner != bus->owner)
module_put(bus->owner);
-
- /* Assert the reset signal */
- phy_device_reset(phydev, 1);
}
EXPORT_SYMBOL(phy_detach);
diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c
index 0c6c0d1843bc..c1512c9925a6 100644
--- a/drivers/net/phy/sfp-bus.c
+++ b/drivers/net/phy/sfp-bus.c
@@ -651,6 +651,11 @@ struct sfp_bus *sfp_bus_find_fwnode(struct fwnode_handle *fwnode)
else if (ret < 0)
return ERR_PTR(ret);
+ if (!fwnode_device_is_available(ref.fwnode)) {
+ fwnode_handle_put(ref.fwnode);
+ return NULL;
+ }
+
bus = sfp_bus_get(ref.fwnode);
fwnode_handle_put(ref.fwnode);
if (!bus)
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
index ab77a9f439ef..4720b24ca51b 100644
--- a/drivers/net/phy/sfp.c
+++ b/drivers/net/phy/sfp.c
@@ -1641,17 +1641,20 @@ static int sfp_sm_probe_for_phy(struct sfp *sfp)
static int sfp_module_parse_power(struct sfp *sfp)
{
u32 power_mW = 1000;
+ bool supports_a2;
if (sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_POWER_DECL))
power_mW = 1500;
if (sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_HIGH_POWER_LEVEL))
power_mW = 2000;
+ supports_a2 = sfp->id.ext.sff8472_compliance !=
+ SFP_SFF8472_COMPLIANCE_NONE ||
+ sfp->id.ext.diagmon & SFP_DIAGMON_DDM;
+
if (power_mW > sfp->max_power_mW) {
/* Module power specification exceeds the allowed maximum. */
- if (sfp->id.ext.sff8472_compliance ==
- SFP_SFF8472_COMPLIANCE_NONE &&
- !(sfp->id.ext.diagmon & SFP_DIAGMON_DDM)) {
+ if (!supports_a2) {
/* The module appears not to implement bus address
* 0xa2, so assume that the module powers up in the
* indicated mode.
@@ -1668,11 +1671,25 @@ static int sfp_module_parse_power(struct sfp *sfp)
}
}
+ if (power_mW <= 1000) {
+ /* Modules below 1W do not require a power change sequence */
+ sfp->module_power_mW = power_mW;
+ return 0;
+ }
+
+ if (!supports_a2) {
+ /* The module power level is below the host maximum and the
+ * module appears not to implement bus address 0xa2, so assume
+ * that the module powers up in the indicated mode.
+ */
+ return 0;
+ }
+
/* If the module requires a higher power mode, but also requires
* an address change sequence, warn the user that the module may
* not be functional.
*/
- if (sfp->id.ext.diagmon & SFP_DIAGMON_ADDRMODE && power_mW > 1000) {
+ if (sfp->id.ext.diagmon & SFP_DIAGMON_ADDRMODE) {
dev_warn(sfp->dev,
"Address Change Sequence not supported but module requires %u.%uW, module may not be functional\n",
power_mW / 1000, (power_mW / 100) % 10);
diff --git a/drivers/net/ppp/ppp_async.c b/drivers/net/ppp/ppp_async.c
index f4429b93a9c8..15a179631903 100644
--- a/drivers/net/ppp/ppp_async.c
+++ b/drivers/net/ppp/ppp_async.c
@@ -281,8 +281,7 @@ ppp_asynctty_write(struct tty_struct *tty, struct file *file,
*/
static int
-ppp_asynctty_ioctl(struct tty_struct *tty, struct file *file,
- unsigned int cmd, unsigned long arg)
+ppp_asynctty_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg)
{
struct asyncppp *ap = ap_get(tty);
int err, val;
diff --git a/drivers/net/ppp/ppp_synctty.c b/drivers/net/ppp/ppp_synctty.c
index b3a71b409a80..18283b7b94bc 100644
--- a/drivers/net/ppp/ppp_synctty.c
+++ b/drivers/net/ppp/ppp_synctty.c
@@ -274,8 +274,7 @@ ppp_sync_write(struct tty_struct *tty, struct file *file,
}
static int
-ppp_synctty_ioctl(struct tty_struct *tty, struct file *file,
- unsigned int cmd, unsigned long arg)
+ppp_synctty_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg)
{
struct syncppp *ap = sp_get(tty);
int __user *p = (int __user *)arg;
diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c
index 9f3b4c1aa5ce..98f586f910fb 100644
--- a/drivers/net/slip/slip.c
+++ b/drivers/net/slip/slip.c
@@ -1072,8 +1072,8 @@ static void slip_unesc6(struct slip *sl, unsigned char s)
#endif /* CONFIG_SLIP_MODE_SLIP6 */
/* Perform I/O control on an active SLIP channel. */
-static int slip_ioctl(struct tty_struct *tty, struct file *file,
- unsigned int cmd, unsigned long arg)
+static int slip_ioctl(struct tty_struct *tty, unsigned int cmd,
+ unsigned long arg)
{
struct slip *sl = tty->disc_data;
unsigned int tmp;
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
index 1a627ba4b850..a31098981a65 100644
--- a/drivers/net/usb/ax88179_178a.c
+++ b/drivers/net/usb/ax88179_178a.c
@@ -1468,58 +1468,68 @@ static int ax88179_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
u16 hdr_off;
u32 *pkt_hdr;
- /* This check is no longer done by usbnet */
- if (skb->len < dev->net->hard_header_len)
+ /* At the end of the SKB, there's a header telling us how many packets
+ * are bundled into this buffer and where we can find an array of
+ * per-packet metadata (which contains elements encoded into u16).
+ */
+ if (skb->len < 4)
return 0;
-
skb_trim(skb, skb->len - 4);
rx_hdr = get_unaligned_le32(skb_tail_pointer(skb));
-
pkt_cnt = (u16)rx_hdr;
hdr_off = (u16)(rx_hdr >> 16);
+
+ if (pkt_cnt == 0)
+ return 0;
+
+ /* Make sure that the bounds of the metadata array are inside the SKB
+ * (and in front of the counter at the end).
+ */
+ if (pkt_cnt * 2 + hdr_off > skb->len)
+ return 0;
pkt_hdr = (u32 *)(skb->data + hdr_off);
- while (pkt_cnt--) {
+ /* Packets must not overlap the metadata array */
+ skb_trim(skb, hdr_off);
+
+ for (; ; pkt_cnt--, pkt_hdr++) {
u16 pkt_len;
le32_to_cpus(pkt_hdr);
pkt_len = (*pkt_hdr >> 16) & 0x1fff;
- /* Check CRC or runt packet */
- if ((*pkt_hdr & AX_RXHDR_CRC_ERR) ||
- (*pkt_hdr & AX_RXHDR_DROP_ERR)) {
- skb_pull(skb, (pkt_len + 7) & 0xFFF8);
- pkt_hdr++;
- continue;
- }
-
- if (pkt_cnt == 0) {
- skb->len = pkt_len;
- /* Skip IP alignment pseudo header */
- skb_pull(skb, 2);
- skb_set_tail_pointer(skb, skb->len);
- skb->truesize = pkt_len + sizeof(struct sk_buff);
- ax88179_rx_checksum(skb, pkt_hdr);
- return 1;
- }
+ if (pkt_len > skb->len)
+ return 0;
- ax_skb = skb_clone(skb, GFP_ATOMIC);
- if (ax_skb) {
+ /* Check CRC or runt packet */
+ if (((*pkt_hdr & (AX_RXHDR_CRC_ERR | AX_RXHDR_DROP_ERR)) == 0) &&
+ pkt_len >= 2 + ETH_HLEN) {
+ bool last = (pkt_cnt == 0);
+
+ if (last) {
+ ax_skb = skb;
+ } else {
+ ax_skb = skb_clone(skb, GFP_ATOMIC);
+ if (!ax_skb)
+ return 0;
+ }
ax_skb->len = pkt_len;
/* Skip IP alignment pseudo header */
skb_pull(ax_skb, 2);
skb_set_tail_pointer(ax_skb, ax_skb->len);
ax_skb->truesize = pkt_len + sizeof(struct sk_buff);
ax88179_rx_checksum(ax_skb, pkt_hdr);
+
+ if (last)
+ return 1;
+
usbnet_skb_return(dev, ax_skb);
- } else {
- return 0;
}
- skb_pull(skb, (pkt_len + 7) & 0xFFF8);
- pkt_hdr++;
+ /* Trim this packet away from the SKB */
+ if (!skb_pull(skb, (pkt_len + 7) & 0xFFF8))
+ return 0;
}
- return 1;
}
static struct sk_buff *
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index eb3817d70f2b..9b4dfa3001d6 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -583,6 +583,11 @@ static const struct usb_device_id products[] = {
.bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET, \
.bInterfaceProtocol = USB_CDC_PROTO_NONE
+#define ZAURUS_FAKE_INTERFACE \
+ .bInterfaceClass = USB_CLASS_COMM, \
+ .bInterfaceSubClass = USB_CDC_SUBCLASS_MDLM, \
+ .bInterfaceProtocol = USB_CDC_PROTO_NONE
+
/* SA-1100 based Sharp Zaurus ("collie"), or compatible;
* wire-incompatible with true CDC Ethernet implementations.
* (And, it seems, needlessly so...)
@@ -640,6 +645,13 @@ static const struct usb_device_id products[] = {
.match_flags = USB_DEVICE_ID_MATCH_INT_INFO
| USB_DEVICE_ID_MATCH_DEVICE,
.idVendor = 0x04DD,
+ .idProduct = 0x9032, /* SL-6000 */
+ ZAURUS_FAKE_INTERFACE,
+ .driver_info = 0,
+}, {
+ .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
+ | USB_DEVICE_ID_MATCH_DEVICE,
+ .idVendor = 0x04DD,
/* reported with some C860 units */
.idProduct = 0x9050, /* C-860 */
ZAURUS_MASTER_INTERFACE,
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
index 82bb5ed94c48..c0b8b4aa78f3 100644
--- a/drivers/net/usb/cdc_mbim.c
+++ b/drivers/net/usb/cdc_mbim.c
@@ -659,6 +659,11 @@ static const struct usb_device_id mbim_devs[] = {
.driver_info = (unsigned long)&cdc_mbim_info_avoid_altsetting_toggle,
},
+ /* Telit FN990 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x1bc7, 0x1071, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long)&cdc_mbim_info_avoid_altsetting_toggle,
+ },
+
/* default entry */
{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&cdc_mbim_info_zlp,
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index e303b522efb5..15f91d691bba 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -1715,10 +1715,10 @@ int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in)
{
struct sk_buff *skb;
struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
- int len;
+ unsigned int len;
int nframes;
int x;
- int offset;
+ unsigned int offset;
union {
struct usb_cdc_ncm_ndp16 *ndp16;
struct usb_cdc_ncm_ndp32 *ndp32;
@@ -1790,8 +1790,8 @@ next_ndp:
break;
}
- /* sanity checking */
- if (((offset + len) > skb_in->len) ||
+ /* sanity checking - watch out for integer wrap*/
+ if ((offset > skb_in->len) || (len > skb_in->len - offset) ||
(len > ctx->rx_max) || (len < ETH_HLEN)) {
netif_dbg(dev, rx_err, dev->net,
"invalid frame detected (ignored) offset[%u]=%u, length=%u, skb=%p\n",
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
index cd33955df0b6..6a769df0b421 100644
--- a/drivers/net/usb/ipheth.c
+++ b/drivers/net/usb/ipheth.c
@@ -121,7 +121,7 @@ static int ipheth_alloc_urbs(struct ipheth_device *iphone)
if (tx_buf == NULL)
goto free_rx_urb;
- rx_buf = usb_alloc_coherent(iphone->udev, IPHETH_BUF_SIZE,
+ rx_buf = usb_alloc_coherent(iphone->udev, IPHETH_BUF_SIZE + IPHETH_IP_ALIGN,
GFP_KERNEL, &rx_urb->transfer_dma);
if (rx_buf == NULL)
goto free_tx_buf;
@@ -146,7 +146,7 @@ error_nomem:
static void ipheth_free_urbs(struct ipheth_device *iphone)
{
- usb_free_coherent(iphone->udev, IPHETH_BUF_SIZE, iphone->rx_buf,
+ usb_free_coherent(iphone->udev, IPHETH_BUF_SIZE + IPHETH_IP_ALIGN, iphone->rx_buf,
iphone->rx_urb->transfer_dma);
usb_free_coherent(iphone->udev, IPHETH_BUF_SIZE, iphone->tx_buf,
iphone->tx_urb->transfer_dma);
@@ -317,7 +317,7 @@ static int ipheth_rx_submit(struct ipheth_device *dev, gfp_t mem_flags)
usb_fill_bulk_urb(dev->rx_urb, udev,
usb_rcvbulkpipe(udev, dev->bulk_in),
- dev->rx_buf, IPHETH_BUF_SIZE,
+ dev->rx_buf, IPHETH_BUF_SIZE + IPHETH_IP_ALIGN,
ipheth_rcvbulk_callback,
dev);
dev->rx_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index f510e8219470..3353e761016d 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1316,6 +1316,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x19d2, 0x1426, 2)}, /* ZTE MF91 */
{QMI_FIXED_INTF(0x19d2, 0x1428, 2)}, /* Telewell TW-LTE 4G v2 */
{QMI_FIXED_INTF(0x19d2, 0x1432, 3)}, /* ZTE ME3620 */
+ {QMI_FIXED_INTF(0x19d2, 0x1485, 5)}, /* ZTE MF286D */
{QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */
{QMI_FIXED_INTF(0x2001, 0x7e16, 3)}, /* D-Link DWM-221 */
{QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */
@@ -1399,8 +1400,11 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x413c, 0x81d7, 0)}, /* Dell Wireless 5821e */
{QMI_FIXED_INTF(0x413c, 0x81d7, 1)}, /* Dell Wireless 5821e preproduction config */
{QMI_FIXED_INTF(0x413c, 0x81e0, 0)}, /* Dell Wireless 5821e with eSIM support*/
+ {QMI_FIXED_INTF(0x413c, 0x81e4, 0)}, /* Dell Wireless 5829e with eSIM support*/
+ {QMI_FIXED_INTF(0x413c, 0x81e6, 0)}, /* Dell Wireless 5829e */
{QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
{QMI_FIXED_INTF(0x03f0, 0x9d1d, 1)}, /* HP lt4120 Snapdragon X5 LTE */
+ {QMI_QUIRK_SET_DTR(0x22de, 0x9051, 2)}, /* Hucom Wireless HM-211S/K */
{QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */
{QMI_QUIRK_SET_DTR(0x1e0e, 0x9001, 5)}, /* SIMCom 7100E, 7230E, 7600E ++ */
{QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index abe0149ed917..bc1e3dd67c04 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -1962,7 +1962,8 @@ static const struct driver_info smsc95xx_info = {
.bind = smsc95xx_bind,
.unbind = smsc95xx_unbind,
.link_reset = smsc95xx_link_reset,
- .reset = smsc95xx_start_phy,
+ .reset = smsc95xx_reset,
+ .check_connect = smsc95xx_start_phy,
.stop = smsc95xx_stop,
.rx_fixup = smsc95xx_rx_fixup,
.tx_fixup = smsc95xx_tx_fixup,
diff --git a/drivers/net/usb/zaurus.c b/drivers/net/usb/zaurus.c
index 8e717a0b559b..7984f2157d22 100644
--- a/drivers/net/usb/zaurus.c
+++ b/drivers/net/usb/zaurus.c
@@ -256,6 +256,11 @@ static const struct usb_device_id products [] = {
.bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET, \
.bInterfaceProtocol = USB_CDC_PROTO_NONE
+#define ZAURUS_FAKE_INTERFACE \
+ .bInterfaceClass = USB_CLASS_COMM, \
+ .bInterfaceSubClass = USB_CDC_SUBCLASS_MDLM, \
+ .bInterfaceProtocol = USB_CDC_PROTO_NONE
+
/* SA-1100 based Sharp Zaurus ("collie"), or compatible. */
{
.match_flags = USB_DEVICE_ID_MATCH_INT_INFO
@@ -315,6 +320,13 @@ static const struct usb_device_id products [] = {
.driver_info = ZAURUS_PXA_INFO,
}, {
.match_flags = USB_DEVICE_ID_MATCH_INT_INFO
+ | USB_DEVICE_ID_MATCH_DEVICE,
+ .idVendor = 0x04DD,
+ .idProduct = 0x9032, /* SL-6000 */
+ ZAURUS_FAKE_INTERFACE,
+ .driver_info = (unsigned long)&bogus_mdlm_info,
+}, {
+ .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
| USB_DEVICE_ID_MATCH_DEVICE,
.idVendor = 0x04DD,
/* reported with some C860 units */
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 354a963075c5..d29fb9759cc9 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -265,9 +265,10 @@ static void __veth_xdp_flush(struct veth_rq *rq)
{
/* Write ptr_ring before reading rx_notify_masked */
smp_mb();
- if (!rq->rx_notify_masked) {
- rq->rx_notify_masked = true;
- napi_schedule(&rq->xdp_napi);
+ if (!READ_ONCE(rq->rx_notify_masked) &&
+ napi_schedule_prep(&rq->xdp_napi)) {
+ WRITE_ONCE(rq->rx_notify_masked, true);
+ __napi_schedule(&rq->xdp_napi);
}
}
@@ -912,8 +913,10 @@ static int veth_poll(struct napi_struct *napi, int budget)
/* Write rx_notify_masked before reading ptr_ring */
smp_store_mb(rq->rx_notify_masked, false);
if (unlikely(!__ptr_ring_empty(&rq->xdp_ring))) {
- rq->rx_notify_masked = true;
- napi_schedule(&rq->xdp_napi);
+ if (napi_schedule_prep(&rq->xdp_napi)) {
+ WRITE_ONCE(rq->rx_notify_masked, true);
+ __napi_schedule(&rq->xdp_napi);
+ }
}
}
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 569eecfbc2cd..a801ea40908f 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -2101,7 +2101,7 @@ static void virtnet_set_affinity(struct virtnet_info *vi)
stragglers = num_cpu >= vi->curr_queue_pairs ?
num_cpu % vi->curr_queue_pairs :
0;
- cpu = cpumask_next(-1, cpu_online_mask);
+ cpu = cpumask_first(cpu_online_mask);
for (i = 0; i < vi->curr_queue_pairs; i++) {
group_size = stride + (i < stragglers ? 1 : 0);
@@ -3312,7 +3312,7 @@ static int virtnet_probe(struct virtio_device *vdev)
return 0;
free_unregister_netdev:
- vi->vdev->config->reset(vdev);
+ virtio_reset_device(vdev);
unregister_netdev(dev);
free_failover:
@@ -3328,7 +3328,7 @@ free:
static void remove_vq_common(struct virtnet_info *vi)
{
- vi->vdev->config->reset(vi->vdev);
+ virtio_reset_device(vi->vdev);
/* Free unused buffers in both send and recv, if any. */
free_unused_bufs(vi);
diff --git a/drivers/net/wireguard/noise.c b/drivers/net/wireguard/noise.c
index c0cfd9b36c0b..720952b92e78 100644
--- a/drivers/net/wireguard/noise.c
+++ b/drivers/net/wireguard/noise.c
@@ -302,6 +302,41 @@ void wg_noise_set_static_identity_private_key(
static_identity->static_public, private_key);
}
+static void hmac(u8 *out, const u8 *in, const u8 *key, const size_t inlen, const size_t keylen)
+{
+ struct blake2s_state state;
+ u8 x_key[BLAKE2S_BLOCK_SIZE] __aligned(__alignof__(u32)) = { 0 };
+ u8 i_hash[BLAKE2S_HASH_SIZE] __aligned(__alignof__(u32));
+ int i;
+
+ if (keylen > BLAKE2S_BLOCK_SIZE) {
+ blake2s_init(&state, BLAKE2S_HASH_SIZE);
+ blake2s_update(&state, key, keylen);
+ blake2s_final(&state, x_key);
+ } else
+ memcpy(x_key, key, keylen);
+
+ for (i = 0; i < BLAKE2S_BLOCK_SIZE; ++i)
+ x_key[i] ^= 0x36;
+
+ blake2s_init(&state, BLAKE2S_HASH_SIZE);
+ blake2s_update(&state, x_key, BLAKE2S_BLOCK_SIZE);
+ blake2s_update(&state, in, inlen);
+ blake2s_final(&state, i_hash);
+
+ for (i = 0; i < BLAKE2S_BLOCK_SIZE; ++i)
+ x_key[i] ^= 0x5c ^ 0x36;
+
+ blake2s_init(&state, BLAKE2S_HASH_SIZE);
+ blake2s_update(&state, x_key, BLAKE2S_BLOCK_SIZE);
+ blake2s_update(&state, i_hash, BLAKE2S_HASH_SIZE);
+ blake2s_final(&state, i_hash);
+
+ memcpy(out, i_hash, BLAKE2S_HASH_SIZE);
+ memzero_explicit(x_key, BLAKE2S_BLOCK_SIZE);
+ memzero_explicit(i_hash, BLAKE2S_HASH_SIZE);
+}
+
/* This is Hugo Krawczyk's HKDF:
* - https://eprint.iacr.org/2010/264.pdf
* - https://tools.ietf.org/html/rfc5869
@@ -322,14 +357,14 @@ static void kdf(u8 *first_dst, u8 *second_dst, u8 *third_dst, const u8 *data,
((third_len || third_dst) && (!second_len || !second_dst))));
/* Extract entropy from data into secret */
- blake2s256_hmac(secret, data, chaining_key, data_len, NOISE_HASH_LEN);
+ hmac(secret, data, chaining_key, data_len, NOISE_HASH_LEN);
if (!first_dst || !first_len)
goto out;
/* Expand first key: key = secret, data = 0x1 */
output[0] = 1;
- blake2s256_hmac(output, output, secret, 1, BLAKE2S_HASH_SIZE);
+ hmac(output, output, secret, 1, BLAKE2S_HASH_SIZE);
memcpy(first_dst, output, first_len);
if (!second_dst || !second_len)
@@ -337,8 +372,7 @@ static void kdf(u8 *first_dst, u8 *second_dst, u8 *third_dst, const u8 *data,
/* Expand second key: key = secret, data = first-key || 0x2 */
output[BLAKE2S_HASH_SIZE] = 2;
- blake2s256_hmac(output, output, secret, BLAKE2S_HASH_SIZE + 1,
- BLAKE2S_HASH_SIZE);
+ hmac(output, output, secret, BLAKE2S_HASH_SIZE + 1, BLAKE2S_HASH_SIZE);
memcpy(second_dst, output, second_len);
if (!third_dst || !third_len)
@@ -346,8 +380,7 @@ static void kdf(u8 *first_dst, u8 *second_dst, u8 *third_dst, const u8 *data,
/* Expand third key: key = secret, data = second-key || 0x3 */
output[BLAKE2S_HASH_SIZE] = 3;
- blake2s256_hmac(output, output, secret, BLAKE2S_HASH_SIZE + 1,
- BLAKE2S_HASH_SIZE);
+ hmac(output, output, secret, BLAKE2S_HASH_SIZE + 1, BLAKE2S_HASH_SIZE);
memcpy(third_dst, output, third_len);
out:
diff --git a/drivers/net/wireless/ath/ath11k/pci.c b/drivers/net/wireless/ath/ath11k/pci.c
index d73b522a0081..de71ad594f34 100644
--- a/drivers/net/wireless/ath/ath11k/pci.c
+++ b/drivers/net/wireless/ath/ath11k/pci.c
@@ -1014,7 +1014,7 @@ static int ath11k_pci_alloc_msi(struct ath11k_pci *ab_pci)
}
ab_pci->msi_ep_base_data = msi_desc->msg.data;
- if (msi_desc->msi_attrib.is_64)
+ if (msi_desc->pci.msi_attrib.is_64)
set_bit(ATH11K_PCI_FLAG_IS_MSI_64, &ab_pci->flags);
ath11k_dbg(ab, ATH11K_DBG_PCI, "msi base data is %d\n", ab_pci->msi_ep_base_data);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
index 0eb13e5df517..d99140960a82 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c
@@ -693,7 +693,7 @@ int brcmf_fw_get_firmwares(struct device *dev, struct brcmf_fw_request *req,
{
struct brcmf_fw_item *first = &req->items[0];
struct brcmf_fw *fwctx;
- char *alt_path;
+ char *alt_path = NULL;
int ret;
brcmf_dbg(TRACE, "enter: dev=%s\n", dev_name(dev));
@@ -712,7 +712,9 @@ int brcmf_fw_get_firmwares(struct device *dev, struct brcmf_fw_request *req,
fwctx->done = fw_cb;
/* First try alternative board-specific path if any */
- alt_path = brcm_alt_fw_path(first->path, fwctx->req->board_type);
+ if (fwctx->req->board_type)
+ alt_path = brcm_alt_fw_path(first->path,
+ fwctx->req->board_type);
if (alt_path) {
ret = request_firmware_nowait(THIS_MODULE, true, alt_path,
fwctx->dev, GFP_KERNEL, fwctx,
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/xtlv.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/xtlv.c
index 2f3c451148db..2f8908074303 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/xtlv.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/xtlv.c
@@ -4,6 +4,8 @@
*/
#include <asm/unaligned.h>
+
+#include <linux/math.h>
#include <linux/string.h>
#include <linux/bug.h>
diff --git a/drivers/net/wireless/cisco/airo.c b/drivers/net/wireless/cisco/airo.c
index 45594f003ef7..452d08545d31 100644
--- a/drivers/net/wireless/cisco/airo.c
+++ b/drivers/net/wireless/cisco/airo.c
@@ -4672,7 +4672,7 @@ static ssize_t proc_write(struct file *file,
static int proc_status_open(struct inode *inode, struct file *file)
{
struct proc_data *data;
- struct net_device *dev = PDE_DATA(inode);
+ struct net_device *dev = pde_data(inode);
struct airo_info *apriv = dev->ml_priv;
CapabilityRid cap_rid;
StatusRid status_rid;
@@ -4756,7 +4756,7 @@ static int proc_stats_rid_open(struct inode *inode,
u16 rid)
{
struct proc_data *data;
- struct net_device *dev = PDE_DATA(inode);
+ struct net_device *dev = pde_data(inode);
struct airo_info *apriv = dev->ml_priv;
StatsRid stats;
int i, j;
@@ -4819,7 +4819,7 @@ static inline int sniffing_mode(struct airo_info *ai)
static void proc_config_on_close(struct inode *inode, struct file *file)
{
struct proc_data *data = file->private_data;
- struct net_device *dev = PDE_DATA(inode);
+ struct net_device *dev = pde_data(inode);
struct airo_info *ai = dev->ml_priv;
char *line;
@@ -5030,7 +5030,7 @@ static const char *get_rmode(__le16 mode)
static int proc_config_open(struct inode *inode, struct file *file)
{
struct proc_data *data;
- struct net_device *dev = PDE_DATA(inode);
+ struct net_device *dev = pde_data(inode);
struct airo_info *ai = dev->ml_priv;
int i;
__le16 mode;
@@ -5120,7 +5120,7 @@ static int proc_config_open(struct inode *inode, struct file *file)
static void proc_SSID_on_close(struct inode *inode, struct file *file)
{
struct proc_data *data = file->private_data;
- struct net_device *dev = PDE_DATA(inode);
+ struct net_device *dev = pde_data(inode);
struct airo_info *ai = dev->ml_priv;
SsidRid SSID_rid;
int i;
@@ -5156,7 +5156,7 @@ static void proc_SSID_on_close(struct inode *inode, struct file *file)
static void proc_APList_on_close(struct inode *inode, struct file *file)
{
struct proc_data *data = file->private_data;
- struct net_device *dev = PDE_DATA(inode);
+ struct net_device *dev = pde_data(inode);
struct airo_info *ai = dev->ml_priv;
APListRid *APList_rid = &ai->APList;
int i;
@@ -5280,7 +5280,7 @@ static int set_wep_tx_idx(struct airo_info *ai, u16 index, int perm, int lock)
static void proc_wepkey_on_close(struct inode *inode, struct file *file)
{
struct proc_data *data;
- struct net_device *dev = PDE_DATA(inode);
+ struct net_device *dev = pde_data(inode);
struct airo_info *ai = dev->ml_priv;
int i, rc;
char key[16];
@@ -5331,7 +5331,7 @@ static void proc_wepkey_on_close(struct inode *inode, struct file *file)
static int proc_wepkey_open(struct inode *inode, struct file *file)
{
struct proc_data *data;
- struct net_device *dev = PDE_DATA(inode);
+ struct net_device *dev = pde_data(inode);
struct airo_info *ai = dev->ml_priv;
char *ptr;
WepKeyRid wkr;
@@ -5379,7 +5379,7 @@ static int proc_wepkey_open(struct inode *inode, struct file *file)
static int proc_SSID_open(struct inode *inode, struct file *file)
{
struct proc_data *data;
- struct net_device *dev = PDE_DATA(inode);
+ struct net_device *dev = pde_data(inode);
struct airo_info *ai = dev->ml_priv;
int i;
char *ptr;
@@ -5423,7 +5423,7 @@ static int proc_SSID_open(struct inode *inode, struct file *file)
static int proc_APList_open(struct inode *inode, struct file *file)
{
struct proc_data *data;
- struct net_device *dev = PDE_DATA(inode);
+ struct net_device *dev = pde_data(inode);
struct airo_info *ai = dev->ml_priv;
int i;
char *ptr;
@@ -5462,7 +5462,7 @@ static int proc_APList_open(struct inode *inode, struct file *file)
static int proc_BSSList_open(struct inode *inode, struct file *file)
{
struct proc_data *data;
- struct net_device *dev = PDE_DATA(inode);
+ struct net_device *dev = pde_data(inode);
struct airo_info *ai = dev->ml_priv;
char *ptr;
BSSListRid BSSList_rid;
diff --git a/drivers/net/wireless/intel/iwlwifi/Kconfig b/drivers/net/wireless/intel/iwlwifi/Kconfig
index c21c0c68849a..85e704283755 100644
--- a/drivers/net/wireless/intel/iwlwifi/Kconfig
+++ b/drivers/net/wireless/intel/iwlwifi/Kconfig
@@ -80,19 +80,6 @@ config IWLWIFI_OPMODE_MODULAR
comment "WARNING: iwlwifi is useless without IWLDVM or IWLMVM"
depends on IWLDVM=n && IWLMVM=n
-config IWLWIFI_BCAST_FILTERING
- bool "Enable broadcast filtering"
- depends on IWLMVM
- help
- Say Y here to enable default bcast filtering configuration.
-
- Enabling broadcast filtering will drop any incoming wireless
- broadcast frames, except some very specific predefined
- patterns (e.g. incoming arp requests).
-
- If unsure, don't enable this option, as some programs might
- expect incoming broadcasts for their normal operations.
-
menu "Debugging Options"
config IWLWIFI_DEBUG
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
index 790c96df58cb..c17ab53fcd8f 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2017 Intel Deutschland GmbH
- * Copyright (C) 2019-2021 Intel Corporation
+ * Copyright (C) 2019-2022 Intel Corporation
*/
#include <linux/uuid.h>
#include "iwl-drv.h"
@@ -888,10 +888,11 @@ bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt)
* only one using version 36, so skip this version entirely.
*/
return IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) >= 38 ||
- IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 17 ||
- (IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 29 &&
- ((fwrt->trans->hw_rev & CSR_HW_REV_TYPE_MSK) ==
- CSR_HW_REV_TYPE_7265D));
+ (IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 17 &&
+ fwrt->trans->hw_rev != CSR_HW_REV_TYPE_3160) ||
+ (IWL_UCODE_SERIAL(fwrt->fw->ucode_ver) == 29 &&
+ ((fwrt->trans->hw_rev & CSR_HW_REV_TYPE_MSK) ==
+ CSR_HW_REV_TYPE_7265D));
}
IWL_EXPORT_SYMBOL(iwl_sar_geo_support);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
index 0703e41403a6..35b8856e511f 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h
@@ -502,11 +502,6 @@ enum iwl_legacy_cmds {
DEBUG_LOG_MSG = 0xf7,
/**
- * @BCAST_FILTER_CMD: &struct iwl_bcast_filter_cmd
- */
- BCAST_FILTER_CMD = 0xcf,
-
- /**
* @MCAST_FILTER_CMD: &struct iwl_mcast_filter_cmd
*/
MCAST_FILTER_CMD = 0xd0,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/filter.h b/drivers/net/wireless/intel/iwlwifi/fw/api/filter.h
index dd62a63956b3..e44c70b7c790 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/filter.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/filter.h
@@ -36,92 +36,4 @@ struct iwl_mcast_filter_cmd {
u8 addr_list[0];
} __packed; /* MCAST_FILTERING_CMD_API_S_VER_1 */
-#define MAX_BCAST_FILTERS 8
-#define MAX_BCAST_FILTER_ATTRS 2
-
-/**
- * enum iwl_mvm_bcast_filter_attr_offset - written by fw for each Rx packet
- * @BCAST_FILTER_OFFSET_PAYLOAD_START: offset is from payload start.
- * @BCAST_FILTER_OFFSET_IP_END: offset is from ip header end (i.e.
- * start of ip payload).
- */
-enum iwl_mvm_bcast_filter_attr_offset {
- BCAST_FILTER_OFFSET_PAYLOAD_START = 0,
- BCAST_FILTER_OFFSET_IP_END = 1,
-};
-
-/**
- * struct iwl_fw_bcast_filter_attr - broadcast filter attribute
- * @offset_type: &enum iwl_mvm_bcast_filter_attr_offset.
- * @offset: starting offset of this pattern.
- * @reserved1: reserved
- * @val: value to match - big endian (MSB is the first
- * byte to match from offset pos).
- * @mask: mask to match (big endian).
- */
-struct iwl_fw_bcast_filter_attr {
- u8 offset_type;
- u8 offset;
- __le16 reserved1;
- __be32 val;
- __be32 mask;
-} __packed; /* BCAST_FILTER_ATT_S_VER_1 */
-
-/**
- * enum iwl_mvm_bcast_filter_frame_type - filter frame type
- * @BCAST_FILTER_FRAME_TYPE_ALL: consider all frames.
- * @BCAST_FILTER_FRAME_TYPE_IPV4: consider only ipv4 frames
- */
-enum iwl_mvm_bcast_filter_frame_type {
- BCAST_FILTER_FRAME_TYPE_ALL = 0,
- BCAST_FILTER_FRAME_TYPE_IPV4 = 1,
-};
-
-/**
- * struct iwl_fw_bcast_filter - broadcast filter
- * @discard: discard frame (1) or let it pass (0).
- * @frame_type: &enum iwl_mvm_bcast_filter_frame_type.
- * @reserved1: reserved
- * @num_attrs: number of valid attributes in this filter.
- * @attrs: attributes of this filter. a filter is considered matched
- * only when all its attributes are matched (i.e. AND relationship)
- */
-struct iwl_fw_bcast_filter {
- u8 discard;
- u8 frame_type;
- u8 num_attrs;
- u8 reserved1;
- struct iwl_fw_bcast_filter_attr attrs[MAX_BCAST_FILTER_ATTRS];
-} __packed; /* BCAST_FILTER_S_VER_1 */
-
-/**
- * struct iwl_fw_bcast_mac - per-mac broadcast filtering configuration.
- * @default_discard: default action for this mac (discard (1) / pass (0)).
- * @reserved1: reserved
- * @attached_filters: bitmap of relevant filters for this mac.
- */
-struct iwl_fw_bcast_mac {
- u8 default_discard;
- u8 reserved1;
- __le16 attached_filters;
-} __packed; /* BCAST_MAC_CONTEXT_S_VER_1 */
-
-/**
- * struct iwl_bcast_filter_cmd - broadcast filtering configuration
- * @disable: enable (0) / disable (1)
- * @max_bcast_filters: max number of filters (MAX_BCAST_FILTERS)
- * @max_macs: max number of macs (NUM_MAC_INDEX_DRIVER)
- * @reserved1: reserved
- * @filters: broadcast filters
- * @macs: broadcast filtering configuration per-mac
- */
-struct iwl_bcast_filter_cmd {
- u8 disable;
- u8 max_bcast_filters;
- u8 max_macs;
- u8 reserved1;
- struct iwl_fw_bcast_filter filters[MAX_BCAST_FILTERS];
- struct iwl_fw_bcast_mac macs[NUM_MAC_INDEX_DRIVER];
-} __packed; /* BCAST_FILTERING_HCMD_API_S_VER_1 */
-
#endif /* __iwl_fw_api_filter_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
index 173a6991587b..4a7723eb8c1d 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
@@ -752,7 +752,6 @@ struct iwl_lq_cmd {
u8 iwl_fw_rate_idx_to_plcp(int idx);
u32 iwl_new_rate_from_v1(u32 rate_v1);
-u32 iwl_legacy_rate_to_fw_idx(u32 rate_n_flags);
const struct iwl_rate_mcs_info *iwl_rate_mcs(int idx);
const char *iwl_rs_pretty_ant(u8 ant);
const char *iwl_rs_pretty_bw(int bw);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h
index e4ebda64cd52..efc6540d31af 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/file.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h
@@ -181,7 +181,6 @@ struct iwl_ucode_capa {
* @IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE: new NS offload (large version)
* @IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT: General support for uAPSD
* @IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD: P2P client supports uAPSD power save
- * @IWL_UCODE_TLV_FLAGS_BCAST_FILTERING: uCode supports broadcast filtering.
* @IWL_UCODE_TLV_FLAGS_EBS_SUPPORT: this uCode image supports EBS.
*/
enum iwl_ucode_tlv_flag {
@@ -196,7 +195,6 @@ enum iwl_ucode_tlv_flag {
IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT = BIT(24),
IWL_UCODE_TLV_FLAGS_EBS_SUPPORT = BIT(25),
IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD = BIT(26),
- IWL_UCODE_TLV_FLAGS_BCAST_FILTERING = BIT(29),
};
typedef unsigned int __bitwise iwl_ucode_tlv_api_t;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/rs.c b/drivers/net/wireless/intel/iwlwifi/fw/rs.c
index a21c3befd93b..a835214611ce 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/rs.c
@@ -91,6 +91,20 @@ const char *iwl_rs_pretty_bw(int bw)
}
IWL_EXPORT_SYMBOL(iwl_rs_pretty_bw);
+static u32 iwl_legacy_rate_to_fw_idx(u32 rate_n_flags)
+{
+ int rate = rate_n_flags & RATE_LEGACY_RATE_MSK_V1;
+ int idx;
+ bool ofdm = !(rate_n_flags & RATE_MCS_CCK_MSK_V1);
+ int offset = ofdm ? IWL_FIRST_OFDM_RATE : 0;
+ int last = ofdm ? IWL_RATE_COUNT_LEGACY : IWL_FIRST_OFDM_RATE;
+
+ for (idx = offset; idx < last; idx++)
+ if (iwl_fw_rate_idx_to_plcp(idx) == rate)
+ return idx - offset;
+ return IWL_RATE_INVALID;
+}
+
u32 iwl_new_rate_from_v1(u32 rate_v1)
{
u32 rate_v2 = 0;
@@ -144,7 +158,10 @@ u32 iwl_new_rate_from_v1(u32 rate_v1)
} else {
u32 legacy_rate = iwl_legacy_rate_to_fw_idx(rate_v1);
- WARN_ON(legacy_rate < 0);
+ if (WARN_ON_ONCE(legacy_rate == IWL_RATE_INVALID))
+ legacy_rate = (rate_v1 & RATE_MCS_CCK_MSK_V1) ?
+ IWL_FIRST_CCK_RATE : IWL_FIRST_OFDM_RATE;
+
rate_v2 |= legacy_rate;
if (!(rate_v1 & RATE_MCS_CCK_MSK_V1))
rate_v2 |= RATE_MCS_LEGACY_OFDM_MSK;
@@ -172,20 +189,6 @@ u32 iwl_new_rate_from_v1(u32 rate_v1)
}
IWL_EXPORT_SYMBOL(iwl_new_rate_from_v1);
-u32 iwl_legacy_rate_to_fw_idx(u32 rate_n_flags)
-{
- int rate = rate_n_flags & RATE_LEGACY_RATE_MSK_V1;
- int idx;
- bool ofdm = !(rate_n_flags & RATE_MCS_CCK_MSK_V1);
- int offset = ofdm ? IWL_FIRST_OFDM_RATE : 0;
- int last = ofdm ? IWL_RATE_COUNT_LEGACY : IWL_FIRST_OFDM_RATE;
-
- for (idx = offset; idx < last; idx++)
- if (iwl_fw_rate_idx_to_plcp(idx) == rate)
- return idx - offset;
- return -1;
-}
-
int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate)
{
char *type;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
index f90d4662c164..8e10ba88afb3 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2014, 2018-2021 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2022 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2016 Intel Deutschland GmbH
*/
@@ -329,6 +329,7 @@ enum {
#define CSR_HW_REV_TYPE_2x00 (0x0000100)
#define CSR_HW_REV_TYPE_105 (0x0000110)
#define CSR_HW_REV_TYPE_135 (0x0000120)
+#define CSR_HW_REV_TYPE_3160 (0x0000164)
#define CSR_HW_REV_TYPE_7265D (0x0000210)
#define CSR_HW_REV_TYPE_NONE (0x00001F0)
#define CSR_HW_REV_TYPE_QNJ (0x0000360)
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index 83e3b731ad29..6651e78b39ec 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -1707,6 +1707,8 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
out_unbind:
complete(&drv->request_firmware_complete);
device_release_driver(drv->trans->dev);
+ /* drv has just been freed by the release */
+ failure = false;
free:
if (failure)
iwl_dealloc_ucode(drv);
diff --git a/drivers/net/wireless/intel/iwlwifi/mei/main.c b/drivers/net/wireless/intel/iwlwifi/mei/main.c
index d9733aaf6f6e..2f7f0f994ca3 100644
--- a/drivers/net/wireless/intel/iwlwifi/mei/main.c
+++ b/drivers/net/wireless/intel/iwlwifi/mei/main.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) 2021 Intel Corporation
+ * Copyright (C) 2021-2022 Intel Corporation
*/
#include <linux/etherdevice.h>
@@ -146,6 +146,7 @@ struct iwl_mei_filters {
* @csme_taking_ownership: true when CSME is taking ownership. Used to remember
* to send CSME_OWNERSHIP_CONFIRMED when the driver completes its down
* flow.
+ * @link_prot_state: true when we are in link protection PASSIVE
* @csa_throttle_end_wk: used when &csa_throttled is true
* @data_q_lock: protects the access to the data queues which are
* accessed without the mutex.
@@ -165,6 +166,7 @@ struct iwl_mei {
bool amt_enabled;
bool csa_throttled;
bool csme_taking_ownership;
+ bool link_prot_state;
struct delayed_work csa_throttle_end_wk;
spinlock_t data_q_lock;
@@ -229,8 +231,6 @@ static int iwl_mei_alloc_shared_mem(struct mei_cl_device *cldev)
if (IS_ERR(mem->ctrl)) {
int ret = PTR_ERR(mem->ctrl);
- dev_err(&cldev->dev, "Couldn't allocate the shared memory: %d\n",
- ret);
mem->ctrl = NULL;
return ret;
@@ -669,6 +669,8 @@ iwl_mei_handle_conn_status(struct mei_cl_device *cldev,
iwl_mei_cache.ops->me_conn_status(iwl_mei_cache.priv, &conn_info);
+ mei->link_prot_state = status->link_prot_state;
+
/*
* Update the Rfkill state in case the host does not own the device:
* if we are in Link Protection, ask to not touch the device, else,
@@ -1663,9 +1665,11 @@ int iwl_mei_register(void *priv, const struct iwl_mei_ops *ops)
mei_cldev_get_drvdata(iwl_mei_global_cldev);
/* we have already a SAP connection */
- if (iwl_mei_is_connected())
+ if (iwl_mei_is_connected()) {
iwl_mei_send_sap_msg(mei->cldev,
SAP_MSG_NOTIF_WIFIDR_UP);
+ ops->rfkill(priv, mei->link_prot_state);
+ }
}
ret = 0;
@@ -1784,6 +1788,8 @@ static void iwl_mei_dbgfs_unregister(struct iwl_mei *mei) {}
#endif /* CONFIG_DEBUG_FS */
+#define ALLOC_SHARED_MEM_RETRY_MAX_NUM 3
+
/*
* iwl_mei_probe - the probe function called by the mei bus enumeration
*
@@ -1795,6 +1801,7 @@ static void iwl_mei_dbgfs_unregister(struct iwl_mei *mei) {}
static int iwl_mei_probe(struct mei_cl_device *cldev,
const struct mei_cl_device_id *id)
{
+ int alloc_retry = ALLOC_SHARED_MEM_RETRY_MAX_NUM;
struct iwl_mei *mei;
int ret;
@@ -1812,15 +1819,31 @@ static int iwl_mei_probe(struct mei_cl_device *cldev,
mei_cldev_set_drvdata(cldev, mei);
mei->cldev = cldev;
- /*
- * The CSME firmware needs to boot the internal WLAN client. Wait here
- * so that the DMA map request will succeed.
- */
- msleep(20);
+ do {
+ ret = iwl_mei_alloc_shared_mem(cldev);
+ if (!ret)
+ break;
+ /*
+ * The CSME firmware needs to boot the internal WLAN client.
+ * This can take time in certain configurations (usually
+ * upon resume and when the whole CSME firmware is shut down
+ * during suspend).
+ *
+ * Wait a bit before retrying and hope we'll succeed next time.
+ */
- ret = iwl_mei_alloc_shared_mem(cldev);
- if (ret)
+ dev_dbg(&cldev->dev,
+ "Couldn't allocate the shared memory: %d, attempt %d / %d\n",
+ ret, alloc_retry, ALLOC_SHARED_MEM_RETRY_MAX_NUM);
+ msleep(100);
+ alloc_retry--;
+ } while (alloc_retry);
+
+ if (ret) {
+ dev_err(&cldev->dev, "Couldn't allocate the shared memory: %d\n",
+ ret);
goto free;
+ }
iwl_mei_init_shared_mem(mei);
diff --git a/drivers/net/wireless/intel/iwlwifi/mei/net.c b/drivers/net/wireless/intel/iwlwifi/mei/net.c
index 5f966af69720..468102a95e1b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mei/net.c
+++ b/drivers/net/wireless/intel/iwlwifi/mei/net.c
@@ -195,8 +195,7 @@ static bool iwl_mei_rx_filter_ipv4(struct sk_buff *skb,
bool match;
if (!pskb_may_pull(skb, skb_network_offset(skb) + sizeof(*iphdr)) ||
- !pskb_may_pull(skb, skb_network_offset(skb) +
- sizeof(ip_hdrlen(skb) - sizeof(*iphdr))))
+ !pskb_may_pull(skb, skb_network_offset(skb) + ip_hdrlen(skb)))
return false;
iphdrlen = ip_hdrlen(skb);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index fb4920b01dbb..63432c24eb59 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -1369,189 +1369,6 @@ static ssize_t iwl_dbgfs_dbg_time_point_write(struct iwl_mvm *mvm,
return count;
}
-#define ADD_TEXT(...) pos += scnprintf(buf + pos, bufsz - pos, __VA_ARGS__)
-#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
-static ssize_t iwl_dbgfs_bcast_filters_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_mvm *mvm = file->private_data;
- struct iwl_bcast_filter_cmd cmd;
- const struct iwl_fw_bcast_filter *filter;
- char *buf;
- int bufsz = 1024;
- int i, j, pos = 0;
- ssize_t ret;
-
- buf = kzalloc(bufsz, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- mutex_lock(&mvm->mutex);
- if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd)) {
- ADD_TEXT("None\n");
- mutex_unlock(&mvm->mutex);
- goto out;
- }
- mutex_unlock(&mvm->mutex);
-
- for (i = 0; cmd.filters[i].attrs[0].mask; i++) {
- filter = &cmd.filters[i];
-
- ADD_TEXT("Filter [%d]:\n", i);
- ADD_TEXT("\tDiscard=%d\n", filter->discard);
- ADD_TEXT("\tFrame Type: %s\n",
- filter->frame_type ? "IPv4" : "Generic");
-
- for (j = 0; j < ARRAY_SIZE(filter->attrs); j++) {
- const struct iwl_fw_bcast_filter_attr *attr;
-
- attr = &filter->attrs[j];
- if (!attr->mask)
- break;
-
- ADD_TEXT("\tAttr [%d]: offset=%d (from %s), mask=0x%x, value=0x%x reserved=0x%x\n",
- j, attr->offset,
- attr->offset_type ? "IP End" :
- "Payload Start",
- be32_to_cpu(attr->mask),
- be32_to_cpu(attr->val),
- le16_to_cpu(attr->reserved1));
- }
- }
-out:
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return ret;
-}
-
-static ssize_t iwl_dbgfs_bcast_filters_write(struct iwl_mvm *mvm, char *buf,
- size_t count, loff_t *ppos)
-{
- int pos, next_pos;
- struct iwl_fw_bcast_filter filter = {};
- struct iwl_bcast_filter_cmd cmd;
- u32 filter_id, attr_id, mask, value;
- int err = 0;
-
- if (sscanf(buf, "%d %hhi %hhi %n", &filter_id, &filter.discard,
- &filter.frame_type, &pos) != 3)
- return -EINVAL;
-
- if (filter_id >= ARRAY_SIZE(mvm->dbgfs_bcast_filtering.cmd.filters) ||
- filter.frame_type > BCAST_FILTER_FRAME_TYPE_IPV4)
- return -EINVAL;
-
- for (attr_id = 0; attr_id < ARRAY_SIZE(filter.attrs);
- attr_id++) {
- struct iwl_fw_bcast_filter_attr *attr =
- &filter.attrs[attr_id];
-
- if (pos >= count)
- break;
-
- if (sscanf(&buf[pos], "%hhi %hhi %i %i %n",
- &attr->offset, &attr->offset_type,
- &mask, &value, &next_pos) != 4)
- return -EINVAL;
-
- attr->mask = cpu_to_be32(mask);
- attr->val = cpu_to_be32(value);
- if (mask)
- filter.num_attrs++;
-
- pos += next_pos;
- }
-
- mutex_lock(&mvm->mutex);
- memcpy(&mvm->dbgfs_bcast_filtering.cmd.filters[filter_id],
- &filter, sizeof(filter));
-
- /* send updated bcast filtering configuration */
- if (iwl_mvm_firmware_running(mvm) &&
- mvm->dbgfs_bcast_filtering.override &&
- iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
- err = iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, 0,
- sizeof(cmd), &cmd);
- mutex_unlock(&mvm->mutex);
-
- return err ?: count;
-}
-
-static ssize_t iwl_dbgfs_bcast_filters_macs_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_mvm *mvm = file->private_data;
- struct iwl_bcast_filter_cmd cmd;
- char *buf;
- int bufsz = 1024;
- int i, pos = 0;
- ssize_t ret;
-
- buf = kzalloc(bufsz, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- mutex_lock(&mvm->mutex);
- if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd)) {
- ADD_TEXT("None\n");
- mutex_unlock(&mvm->mutex);
- goto out;
- }
- mutex_unlock(&mvm->mutex);
-
- for (i = 0; i < ARRAY_SIZE(cmd.macs); i++) {
- const struct iwl_fw_bcast_mac *mac = &cmd.macs[i];
-
- ADD_TEXT("Mac [%d]: discard=%d attached_filters=0x%x\n",
- i, mac->default_discard, mac->attached_filters);
- }
-out:
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return ret;
-}
-
-static ssize_t iwl_dbgfs_bcast_filters_macs_write(struct iwl_mvm *mvm,
- char *buf, size_t count,
- loff_t *ppos)
-{
- struct iwl_bcast_filter_cmd cmd;
- struct iwl_fw_bcast_mac mac = {};
- u32 mac_id, attached_filters;
- int err = 0;
-
- if (!mvm->bcast_filters)
- return -ENOENT;
-
- if (sscanf(buf, "%d %hhi %i", &mac_id, &mac.default_discard,
- &attached_filters) != 3)
- return -EINVAL;
-
- if (mac_id >= ARRAY_SIZE(cmd.macs) ||
- mac.default_discard > 1 ||
- attached_filters >= BIT(ARRAY_SIZE(cmd.filters)))
- return -EINVAL;
-
- mac.attached_filters = cpu_to_le16(attached_filters);
-
- mutex_lock(&mvm->mutex);
- memcpy(&mvm->dbgfs_bcast_filtering.cmd.macs[mac_id],
- &mac, sizeof(mac));
-
- /* send updated bcast filtering configuration */
- if (iwl_mvm_firmware_running(mvm) &&
- mvm->dbgfs_bcast_filtering.override &&
- iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
- err = iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, 0,
- sizeof(cmd), &cmd);
- mutex_unlock(&mvm->mutex);
-
- return err ?: count;
-}
-#endif
-
#define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \
_MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct iwl_mvm)
#define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \
@@ -1881,11 +1698,6 @@ MVM_DEBUGFS_WRITE_FILE_OPS(inject_beacon_ie_restore, 512);
MVM_DEBUGFS_READ_FILE_OPS(uapsd_noagg_bssids);
-#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
-MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters, 256);
-MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters_macs, 256);
-#endif
-
#ifdef CONFIG_ACPI
MVM_DEBUGFS_READ_FILE_OPS(sar_geo_profile);
#endif
@@ -2097,21 +1909,6 @@ void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm)
MVM_DEBUGFS_ADD_FILE(uapsd_noagg_bssids, mvm->debugfs_dir, S_IRUSR);
-#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
- if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING) {
- bcast_dir = debugfs_create_dir("bcast_filtering",
- mvm->debugfs_dir);
-
- debugfs_create_bool("override", 0600, bcast_dir,
- &mvm->dbgfs_bcast_filtering.override);
-
- MVM_DEBUGFS_ADD_FILE_ALIAS("filters", bcast_filters,
- bcast_dir, 0600);
- MVM_DEBUGFS_ADD_FILE_ALIAS("macs", bcast_filters_macs,
- bcast_dir, 0600);
- }
-#endif
-
#ifdef CONFIG_PM_SLEEP
MVM_DEBUGFS_ADD_FILE(d3_test, mvm->debugfs_dir, 0400);
debugfs_create_bool("d3_wake_sysassert", 0600, mvm->debugfs_dir,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index 6f4690e56a46..ae589b3b8c46 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -1741,7 +1741,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
ret = iwl_mvm_sar_init(mvm);
if (ret == 0)
ret = iwl_mvm_sar_geo_init(mvm);
- else if (ret < 0)
+ if (ret < 0)
goto error;
ret = iwl_mvm_sgom_init(mvm);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 65f4fe3ef504..4ac599f6ad22 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -55,79 +55,6 @@ static const struct ieee80211_iface_combination iwl_mvm_iface_combinations[] = {
},
};
-#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
-/*
- * Use the reserved field to indicate magic values.
- * these values will only be used internally by the driver,
- * and won't make it to the fw (reserved will be 0).
- * BC_FILTER_MAGIC_IP - configure the val of this attribute to
- * be the vif's ip address. in case there is not a single
- * ip address (0, or more than 1), this attribute will
- * be skipped.
- * BC_FILTER_MAGIC_MAC - set the val of this attribute to
- * the LSB bytes of the vif's mac address
- */
-enum {
- BC_FILTER_MAGIC_NONE = 0,
- BC_FILTER_MAGIC_IP,
- BC_FILTER_MAGIC_MAC,
-};
-
-static const struct iwl_fw_bcast_filter iwl_mvm_default_bcast_filters[] = {
- {
- /* arp */
- .discard = 0,
- .frame_type = BCAST_FILTER_FRAME_TYPE_ALL,
- .attrs = {
- {
- /* frame type - arp, hw type - ethernet */
- .offset_type =
- BCAST_FILTER_OFFSET_PAYLOAD_START,
- .offset = sizeof(rfc1042_header),
- .val = cpu_to_be32(0x08060001),
- .mask = cpu_to_be32(0xffffffff),
- },
- {
- /* arp dest ip */
- .offset_type =
- BCAST_FILTER_OFFSET_PAYLOAD_START,
- .offset = sizeof(rfc1042_header) + 2 +
- sizeof(struct arphdr) +
- ETH_ALEN + sizeof(__be32) +
- ETH_ALEN,
- .mask = cpu_to_be32(0xffffffff),
- /* mark it as special field */
- .reserved1 = cpu_to_le16(BC_FILTER_MAGIC_IP),
- },
- },
- },
- {
- /* dhcp offer bcast */
- .discard = 0,
- .frame_type = BCAST_FILTER_FRAME_TYPE_IPV4,
- .attrs = {
- {
- /* udp dest port - 68 (bootp client)*/
- .offset_type = BCAST_FILTER_OFFSET_IP_END,
- .offset = offsetof(struct udphdr, dest),
- .val = cpu_to_be32(0x00440000),
- .mask = cpu_to_be32(0xffff0000),
- },
- {
- /* dhcp - lsb bytes of client hw address */
- .offset_type = BCAST_FILTER_OFFSET_IP_END,
- .offset = 38,
- .mask = cpu_to_be32(0xffffffff),
- /* mark it as special field */
- .reserved1 = cpu_to_le16(BC_FILTER_MAGIC_MAC),
- },
- },
- },
- /* last filter must be empty */
- {},
-};
-#endif
-
static const struct cfg80211_pmsr_capabilities iwl_mvm_pmsr_capa = {
.max_peers = IWL_MVM_TOF_MAX_APS,
.report_ap_tsf = 1,
@@ -693,11 +620,6 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
}
#endif
-#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
- /* assign default bcast filtering configuration */
- mvm->bcast_filters = iwl_mvm_default_bcast_filters;
-#endif
-
ret = iwl_mvm_leds_init(mvm);
if (ret)
return ret;
@@ -1853,162 +1775,6 @@ static void iwl_mvm_config_iface_filter(struct ieee80211_hw *hw,
mutex_unlock(&mvm->mutex);
}
-#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
-struct iwl_bcast_iter_data {
- struct iwl_mvm *mvm;
- struct iwl_bcast_filter_cmd *cmd;
- u8 current_filter;
-};
-
-static void
-iwl_mvm_set_bcast_filter(struct ieee80211_vif *vif,
- const struct iwl_fw_bcast_filter *in_filter,
- struct iwl_fw_bcast_filter *out_filter)
-{
- struct iwl_fw_bcast_filter_attr *attr;
- int i;
-
- memcpy(out_filter, in_filter, sizeof(*out_filter));
-
- for (i = 0; i < ARRAY_SIZE(out_filter->attrs); i++) {
- attr = &out_filter->attrs[i];
-
- if (!attr->mask)
- break;
-
- switch (attr->reserved1) {
- case cpu_to_le16(BC_FILTER_MAGIC_IP):
- if (vif->bss_conf.arp_addr_cnt != 1) {
- attr->mask = 0;
- continue;
- }
-
- attr->val = vif->bss_conf.arp_addr_list[0];
- break;
- case cpu_to_le16(BC_FILTER_MAGIC_MAC):
- attr->val = *(__be32 *)&vif->addr[2];
- break;
- default:
- break;
- }
- attr->reserved1 = 0;
- out_filter->num_attrs++;
- }
-}
-
-static void iwl_mvm_bcast_filter_iterator(void *_data, u8 *mac,
- struct ieee80211_vif *vif)
-{
- struct iwl_bcast_iter_data *data = _data;
- struct iwl_mvm *mvm = data->mvm;
- struct iwl_bcast_filter_cmd *cmd = data->cmd;
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_fw_bcast_mac *bcast_mac;
- int i;
-
- if (WARN_ON(mvmvif->id >= ARRAY_SIZE(cmd->macs)))
- return;
-
- bcast_mac = &cmd->macs[mvmvif->id];
-
- /*
- * enable filtering only for associated stations, but not for P2P
- * Clients
- */
- if (vif->type != NL80211_IFTYPE_STATION || vif->p2p ||
- !vif->bss_conf.assoc)
- return;
-
- bcast_mac->default_discard = 1;
-
- /* copy all configured filters */
- for (i = 0; mvm->bcast_filters[i].attrs[0].mask; i++) {
- /*
- * Make sure we don't exceed our filters limit.
- * if there is still a valid filter to be configured,
- * be on the safe side and just allow bcast for this mac.
- */
- if (WARN_ON_ONCE(data->current_filter >=
- ARRAY_SIZE(cmd->filters))) {
- bcast_mac->default_discard = 0;
- bcast_mac->attached_filters = 0;
- break;
- }
-
- iwl_mvm_set_bcast_filter(vif,
- &mvm->bcast_filters[i],
- &cmd->filters[data->current_filter]);
-
- /* skip current filter if it contains no attributes */
- if (!cmd->filters[data->current_filter].num_attrs)
- continue;
-
- /* attach the filter to current mac */
- bcast_mac->attached_filters |=
- cpu_to_le16(BIT(data->current_filter));
-
- data->current_filter++;
- }
-}
-
-bool iwl_mvm_bcast_filter_build_cmd(struct iwl_mvm *mvm,
- struct iwl_bcast_filter_cmd *cmd)
-{
- struct iwl_bcast_iter_data iter_data = {
- .mvm = mvm,
- .cmd = cmd,
- };
-
- if (IWL_MVM_FW_BCAST_FILTER_PASS_ALL)
- return false;
-
- memset(cmd, 0, sizeof(*cmd));
- cmd->max_bcast_filters = ARRAY_SIZE(cmd->filters);
- cmd->max_macs = ARRAY_SIZE(cmd->macs);
-
-#ifdef CONFIG_IWLWIFI_DEBUGFS
- /* use debugfs filters/macs if override is configured */
- if (mvm->dbgfs_bcast_filtering.override) {
- memcpy(cmd->filters, &mvm->dbgfs_bcast_filtering.cmd.filters,
- sizeof(cmd->filters));
- memcpy(cmd->macs, &mvm->dbgfs_bcast_filtering.cmd.macs,
- sizeof(cmd->macs));
- return true;
- }
-#endif
-
- /* if no filters are configured, do nothing */
- if (!mvm->bcast_filters)
- return false;
-
- /* configure and attach these filters for each associated sta vif */
- ieee80211_iterate_active_interfaces(
- mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
- iwl_mvm_bcast_filter_iterator, &iter_data);
-
- return true;
-}
-
-static int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm)
-{
- struct iwl_bcast_filter_cmd cmd;
-
- if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING))
- return 0;
-
- if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
- return 0;
-
- return iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, 0,
- sizeof(cmd), &cmd);
-}
-#else
-static inline int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm)
-{
- return 0;
-}
-#endif
-
static int iwl_mvm_update_mu_groups(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
@@ -2520,7 +2286,6 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
}
iwl_mvm_recalc_multicast(mvm);
- iwl_mvm_configure_bcast_filter(mvm);
/* reset rssi values */
mvmvif->bf_data.ave_beacon_signal = 0;
@@ -2570,11 +2335,6 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
}
}
- if (changes & BSS_CHANGED_ARP_FILTER) {
- IWL_DEBUG_MAC80211(mvm, "arp filter changed\n");
- iwl_mvm_configure_bcast_filter(mvm);
- }
-
if (changes & BSS_CHANGED_BANDWIDTH)
iwl_mvm_apply_fw_smps_request(vif);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index 1dcbb0eb63c3..d78f40730594 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -884,17 +884,6 @@ struct iwl_mvm {
/* rx chain antennas set through debugfs for the scan command */
u8 scan_rx_ant;
-#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
- /* broadcast filters to configure for each associated station */
- const struct iwl_fw_bcast_filter *bcast_filters;
-#ifdef CONFIG_IWLWIFI_DEBUGFS
- struct {
- bool override;
- struct iwl_bcast_filter_cmd cmd;
- } dbgfs_bcast_filtering;
-#endif
-#endif
-
/* Internal station */
struct iwl_mvm_int_sta aux_sta;
struct iwl_mvm_int_sta snif_sta;
@@ -1593,8 +1582,6 @@ int iwl_mvm_up(struct iwl_mvm *mvm);
int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm);
int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm);
-bool iwl_mvm_bcast_filter_build_cmd(struct iwl_mvm *mvm,
- struct iwl_bcast_filter_cmd *cmd);
/*
* FW notifications / CMD responses handlers
@@ -2225,7 +2212,7 @@ static inline void iwl_mvm_mei_device_down(struct iwl_mvm *mvm)
static inline void iwl_mvm_mei_set_sw_rfkill_state(struct iwl_mvm *mvm)
{
bool sw_rfkill =
- mvm->hw_registered ? rfkill_blocked(mvm->hw->wiphy->rfkill) : false;
+ mvm->hw_registered ? rfkill_soft_blocked(mvm->hw->wiphy->rfkill) : false;
if (mvm->mei_registered)
iwl_mei_set_rfkill_state(iwl_mvm_is_radio_killed(mvm),
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index 87630d38dc52..1f8b97995b94 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -469,7 +469,6 @@ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = {
HCMD_NAME(MCC_CHUB_UPDATE_CMD),
HCMD_NAME(MARKER_CMD),
HCMD_NAME(BT_PROFILE_NOTIFICATION),
- HCMD_NAME(BCAST_FILTER_CMD),
HCMD_NAME(MCAST_FILTER_CMD),
HCMD_NAME(REPLY_SF_CFG_CMD),
HCMD_NAME(REPLY_BEACON_FILTERING_CMD),
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index 6fa2c12f7955..9213f8518f10 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -1427,7 +1427,7 @@ static void iwl_mvm_hwrate_to_tx_status(const struct iwl_fw *fw,
struct ieee80211_tx_rate *r = &info->status.rates[0];
if (iwl_fw_lookup_notif_ver(fw, LONG_GROUP,
- TX_CMD, 0) > 6)
+ TX_CMD, 0) <= 6)
rate_n_flags = iwl_new_rate_from_v1(rate_n_flags);
info->status.antenna =
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
index 0febdcacbd42..94f40c4d2421 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
@@ -385,8 +385,7 @@ int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
/* This may fail if AMT took ownership of the device */
if (iwl_pcie_prepare_card_hw(trans)) {
IWL_WARN(trans, "Exit HW not ready\n");
- ret = -EIO;
- goto out;
+ return -EIO;
}
iwl_enable_rfkill_int(trans);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index a63386a01232..ef14584fc0a1 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -1329,8 +1329,7 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
/* This may fail if AMT took ownership of the device */
if (iwl_pcie_prepare_card_hw(trans)) {
IWL_WARN(trans, "Exit HW not ready\n");
- ret = -EIO;
- goto out;
+ return -EIO;
}
iwl_enable_rfkill_int(trans);
diff --git a/drivers/net/wireless/intersil/hostap/hostap_ap.c b/drivers/net/wireless/intersil/hostap/hostap_ap.c
index 8bcc1cdcb75b..462ccc7d7d1a 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_ap.c
+++ b/drivers/net/wireless/intersil/hostap/hostap_ap.c
@@ -69,7 +69,7 @@ static void prism2_send_mgmt(struct net_device *dev,
#if !defined(PRISM2_NO_PROCFS_DEBUG) && defined(CONFIG_PROC_FS)
static int ap_debug_proc_show(struct seq_file *m, void *v)
{
- struct ap_data *ap = PDE_DATA(file_inode(m->file));
+ struct ap_data *ap = pde_data(file_inode(m->file));
seq_printf(m, "BridgedUnicastFrames=%u\n", ap->bridged_unicast);
seq_printf(m, "BridgedMulticastFrames=%u\n", ap->bridged_multicast);
@@ -320,7 +320,7 @@ void hostap_deauth_all_stas(struct net_device *dev, struct ap_data *ap,
static int ap_control_proc_show(struct seq_file *m, void *v)
{
- struct ap_data *ap = PDE_DATA(file_inode(m->file));
+ struct ap_data *ap = pde_data(file_inode(m->file));
char *policy_txt;
struct mac_entry *entry;
@@ -352,20 +352,20 @@ static int ap_control_proc_show(struct seq_file *m, void *v)
static void *ap_control_proc_start(struct seq_file *m, loff_t *_pos)
{
- struct ap_data *ap = PDE_DATA(file_inode(m->file));
+ struct ap_data *ap = pde_data(file_inode(m->file));
spin_lock_bh(&ap->mac_restrictions.lock);
return seq_list_start_head(&ap->mac_restrictions.mac_list, *_pos);
}
static void *ap_control_proc_next(struct seq_file *m, void *v, loff_t *_pos)
{
- struct ap_data *ap = PDE_DATA(file_inode(m->file));
+ struct ap_data *ap = pde_data(file_inode(m->file));
return seq_list_next(v, &ap->mac_restrictions.mac_list, _pos);
}
static void ap_control_proc_stop(struct seq_file *m, void *v)
{
- struct ap_data *ap = PDE_DATA(file_inode(m->file));
+ struct ap_data *ap = pde_data(file_inode(m->file));
spin_unlock_bh(&ap->mac_restrictions.lock);
}
@@ -554,20 +554,20 @@ static int prism2_ap_proc_show(struct seq_file *m, void *v)
static void *prism2_ap_proc_start(struct seq_file *m, loff_t *_pos)
{
- struct ap_data *ap = PDE_DATA(file_inode(m->file));
+ struct ap_data *ap = pde_data(file_inode(m->file));
spin_lock_bh(&ap->sta_table_lock);
return seq_list_start_head(&ap->sta_list, *_pos);
}
static void *prism2_ap_proc_next(struct seq_file *m, void *v, loff_t *_pos)
{
- struct ap_data *ap = PDE_DATA(file_inode(m->file));
+ struct ap_data *ap = pde_data(file_inode(m->file));
return seq_list_next(v, &ap->sta_list, _pos);
}
static void prism2_ap_proc_stop(struct seq_file *m, void *v)
{
- struct ap_data *ap = PDE_DATA(file_inode(m->file));
+ struct ap_data *ap = pde_data(file_inode(m->file));
spin_unlock_bh(&ap->sta_table_lock);
}
diff --git a/drivers/net/wireless/intersil/hostap/hostap_download.c b/drivers/net/wireless/intersil/hostap/hostap_download.c
index 7c6a5a6d1d45..3672291ced5c 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_download.c
+++ b/drivers/net/wireless/intersil/hostap/hostap_download.c
@@ -227,7 +227,7 @@ static int prism2_download_aux_dump_proc_open(struct inode *inode, struct file *
sizeof(struct prism2_download_aux_dump));
if (ret == 0) {
struct seq_file *m = file->private_data;
- m->private = PDE_DATA(inode);
+ m->private = pde_data(inode);
}
return ret;
}
diff --git a/drivers/net/wireless/intersil/hostap/hostap_proc.c b/drivers/net/wireless/intersil/hostap/hostap_proc.c
index 51c847d98755..61f68786056f 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_proc.c
+++ b/drivers/net/wireless/intersil/hostap/hostap_proc.c
@@ -97,20 +97,20 @@ static int prism2_wds_proc_show(struct seq_file *m, void *v)
static void *prism2_wds_proc_start(struct seq_file *m, loff_t *_pos)
{
- local_info_t *local = PDE_DATA(file_inode(m->file));
+ local_info_t *local = pde_data(file_inode(m->file));
read_lock_bh(&local->iface_lock);
return seq_list_start(&local->hostap_interfaces, *_pos);
}
static void *prism2_wds_proc_next(struct seq_file *m, void *v, loff_t *_pos)
{
- local_info_t *local = PDE_DATA(file_inode(m->file));
+ local_info_t *local = pde_data(file_inode(m->file));
return seq_list_next(v, &local->hostap_interfaces, _pos);
}
static void prism2_wds_proc_stop(struct seq_file *m, void *v)
{
- local_info_t *local = PDE_DATA(file_inode(m->file));
+ local_info_t *local = pde_data(file_inode(m->file));
read_unlock_bh(&local->iface_lock);
}
@@ -123,7 +123,7 @@ static const struct seq_operations prism2_wds_proc_seqops = {
static int prism2_bss_list_proc_show(struct seq_file *m, void *v)
{
- local_info_t *local = PDE_DATA(file_inode(m->file));
+ local_info_t *local = pde_data(file_inode(m->file));
struct list_head *ptr = v;
struct hostap_bss_info *bss;
@@ -151,21 +151,21 @@ static int prism2_bss_list_proc_show(struct seq_file *m, void *v)
static void *prism2_bss_list_proc_start(struct seq_file *m, loff_t *_pos)
__acquires(&local->lock)
{
- local_info_t *local = PDE_DATA(file_inode(m->file));
+ local_info_t *local = pde_data(file_inode(m->file));
spin_lock_bh(&local->lock);
return seq_list_start_head(&local->bss_list, *_pos);
}
static void *prism2_bss_list_proc_next(struct seq_file *m, void *v, loff_t *_pos)
{
- local_info_t *local = PDE_DATA(file_inode(m->file));
+ local_info_t *local = pde_data(file_inode(m->file));
return seq_list_next(v, &local->bss_list, _pos);
}
static void prism2_bss_list_proc_stop(struct seq_file *m, void *v)
__releases(&local->lock)
{
- local_info_t *local = PDE_DATA(file_inode(m->file));
+ local_info_t *local = pde_data(file_inode(m->file));
spin_unlock_bh(&local->lock);
}
@@ -198,7 +198,7 @@ static int prism2_crypt_proc_show(struct seq_file *m, void *v)
static ssize_t prism2_pda_proc_read(struct file *file, char __user *buf,
size_t count, loff_t *_pos)
{
- local_info_t *local = PDE_DATA(file_inode(file));
+ local_info_t *local = pde_data(file_inode(file));
size_t off;
if (local->pda == NULL || *_pos >= PRISM2_PDA_SIZE)
@@ -272,7 +272,7 @@ static int prism2_io_debug_proc_read(char *page, char **start, off_t off,
#ifndef PRISM2_NO_STATION_MODES
static int prism2_scan_results_proc_show(struct seq_file *m, void *v)
{
- local_info_t *local = PDE_DATA(file_inode(m->file));
+ local_info_t *local = pde_data(file_inode(m->file));
unsigned long entry;
int i, len;
struct hfa384x_hostscan_result *scanres;
@@ -322,7 +322,7 @@ static int prism2_scan_results_proc_show(struct seq_file *m, void *v)
static void *prism2_scan_results_proc_start(struct seq_file *m, loff_t *_pos)
{
- local_info_t *local = PDE_DATA(file_inode(m->file));
+ local_info_t *local = pde_data(file_inode(m->file));
spin_lock_bh(&local->lock);
/* We have a header (pos 0) + N results to show (pos 1...N) */
@@ -333,7 +333,7 @@ static void *prism2_scan_results_proc_start(struct seq_file *m, loff_t *_pos)
static void *prism2_scan_results_proc_next(struct seq_file *m, void *v, loff_t *_pos)
{
- local_info_t *local = PDE_DATA(file_inode(m->file));
+ local_info_t *local = pde_data(file_inode(m->file));
++*_pos;
if (*_pos > local->last_scan_results_count)
@@ -343,7 +343,7 @@ static void *prism2_scan_results_proc_next(struct seq_file *m, void *v, loff_t *
static void prism2_scan_results_proc_stop(struct seq_file *m, void *v)
{
- local_info_t *local = PDE_DATA(file_inode(m->file));
+ local_info_t *local = pde_data(file_inode(m->file));
spin_unlock_bh(&local->lock);
}
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 0307a6677907..fc5725f6daee 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -2336,6 +2336,15 @@ static void hw_scan_work(struct work_struct *work)
if (req->ie_len)
skb_put_data(probe, req->ie, req->ie_len);
+ if (!ieee80211_tx_prepare_skb(hwsim->hw,
+ hwsim->hw_scan_vif,
+ probe,
+ hwsim->tmp_chan->band,
+ NULL)) {
+ kfree_skb(probe);
+ continue;
+ }
+
local_bh_disable();
mac80211_hwsim_tx_frame(hwsim->hw, probe,
hwsim->tmp_chan);
@@ -3770,6 +3779,10 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2,
}
txi->flags |= IEEE80211_TX_STAT_ACK;
}
+
+ if (hwsim_flags & HWSIM_TX_CTL_NO_ACK)
+ txi->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
+
ieee80211_tx_status_irqsafe(data2->hw, skb);
return 0;
out:
@@ -4498,7 +4511,7 @@ static void remove_vqs(struct virtio_device *vdev)
{
int i;
- vdev->config->reset(vdev);
+ virtio_reset_device(vdev);
for (i = 0; i < ARRAY_SIZE(hwsim_vqs); i++) {
struct virtqueue *vq = hwsim_vqs[i];
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index e3a3dc3e45b4..2987ad9271f6 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -2746,7 +2746,7 @@ static ssize_t int_proc_write(struct file *file, const char __user *buffer,
nr = nr * 10 + c;
p++;
} while (--len);
- *(int *)PDE_DATA(file_inode(file)) = nr;
+ *(int *)pde_data(file_inode(file)) = nr;
return count;
}
diff --git a/drivers/net/wireless/rsi/rsi_91x_coex.c b/drivers/net/wireless/rsi/rsi_91x_coex.c
index a0c5d02ae88c..8a3d86897ea8 100644
--- a/drivers/net/wireless/rsi/rsi_91x_coex.c
+++ b/drivers/net/wireless/rsi/rsi_91x_coex.c
@@ -63,7 +63,7 @@ static void rsi_coex_scheduler_thread(struct rsi_common *common)
rsi_coex_sched_tx_pkts(coex_cb);
} while (atomic_read(&coex_cb->coex_tx_thread.thread_done) == 0);
- complete_and_exit(&coex_cb->coex_tx_thread.completion, 0);
+ kthread_complete_and_exit(&coex_cb->coex_tx_thread.completion, 0);
}
int rsi_coex_recv_pkt(struct rsi_common *common, u8 *msg)
diff --git a/drivers/net/wireless/rsi/rsi_91x_main.c b/drivers/net/wireless/rsi/rsi_91x_main.c
index 5d1490fc32db..f9f004446b07 100644
--- a/drivers/net/wireless/rsi/rsi_91x_main.c
+++ b/drivers/net/wireless/rsi/rsi_91x_main.c
@@ -264,7 +264,7 @@ static void rsi_tx_scheduler_thread(struct rsi_common *common)
if (common->init_done)
rsi_core_qos_processor(common);
} while (atomic_read(&common->tx_thread.thread_done) == 0);
- complete_and_exit(&common->tx_thread.completion, 0);
+ kthread_complete_and_exit(&common->tx_thread.completion, 0);
}
#ifdef CONFIG_RSI_COEX
diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
index 8ace1874e5cb..b2b47a0abcbf 100644
--- a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
+++ b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
@@ -75,7 +75,7 @@ void rsi_sdio_rx_thread(struct rsi_common *common)
rsi_dbg(INFO_ZONE, "%s: Terminated SDIO RX thread\n", __func__);
atomic_inc(&sdev->rx_thread.thread_done);
- complete_and_exit(&sdev->rx_thread.completion, 0);
+ kthread_complete_and_exit(&sdev->rx_thread.completion, 0);
}
/**
diff --git a/drivers/net/wireless/rsi/rsi_91x_usb_ops.c b/drivers/net/wireless/rsi/rsi_91x_usb_ops.c
index 4ffcdde1acb1..5130b0e72adc 100644
--- a/drivers/net/wireless/rsi/rsi_91x_usb_ops.c
+++ b/drivers/net/wireless/rsi/rsi_91x_usb_ops.c
@@ -56,6 +56,6 @@ void rsi_usb_rx_thread(struct rsi_common *common)
out:
rsi_dbg(INFO_ZONE, "%s: Terminated thread\n", __func__);
skb_queue_purge(&dev->rx_q);
- complete_and_exit(&dev->rx_thread.completion, 0);
+ kthread_complete_and_exit(&dev->rx_thread.completion, 0);
}
diff --git a/drivers/net/wwan/mhi_wwan_mbim.c b/drivers/net/wwan/mhi_wwan_mbim.c
index 71bf9b4f769f..6872782e8dd8 100644
--- a/drivers/net/wwan/mhi_wwan_mbim.c
+++ b/drivers/net/wwan/mhi_wwan_mbim.c
@@ -385,13 +385,13 @@ static void mhi_net_rx_refill_work(struct work_struct *work)
int err;
while (!mhi_queue_is_full(mdev, DMA_FROM_DEVICE)) {
- struct sk_buff *skb = alloc_skb(MHI_DEFAULT_MRU, GFP_KERNEL);
+ struct sk_buff *skb = alloc_skb(mbim->mru, GFP_KERNEL);
if (unlikely(!skb))
break;
err = mhi_queue_skb(mdev, DMA_FROM_DEVICE, skb,
- MHI_DEFAULT_MRU, MHI_EOT);
+ mbim->mru, MHI_EOT);
if (unlikely(err)) {
kfree_skb(skb);
break;
diff --git a/drivers/nfc/pn544/i2c.c b/drivers/nfc/pn544/i2c.c
index 37d26f01986b..62a0f1a010cb 100644
--- a/drivers/nfc/pn544/i2c.c
+++ b/drivers/nfc/pn544/i2c.c
@@ -188,7 +188,7 @@ do { \
static void pn544_hci_i2c_platform_init(struct pn544_i2c_phy *phy)
{
int polarity, retry, ret;
- char rset_cmd[] = { 0x05, 0xF9, 0x04, 0x00, 0xC3, 0xE5 };
+ static const char rset_cmd[] = { 0x05, 0xF9, 0x04, 0x00, 0xC3, 0xE5 };
int count = sizeof(rset_cmd);
nfc_info(&phy->i2c_dev->dev, "Detecting nfc_en polarity\n");
diff --git a/drivers/nfc/st21nfca/se.c b/drivers/nfc/st21nfca/se.c
index a43fc4117fa5..c922f10d0d7b 100644
--- a/drivers/nfc/st21nfca/se.c
+++ b/drivers/nfc/st21nfca/se.c
@@ -316,6 +316,11 @@ int st21nfca_connectivity_event_received(struct nfc_hci_dev *hdev, u8 host,
return -ENOMEM;
transaction->aid_len = skb->data[1];
+
+ /* Checking if the length of the AID is valid */
+ if (transaction->aid_len > sizeof(transaction->aid))
+ return -EINVAL;
+
memcpy(transaction->aid, &skb->data[2],
transaction->aid_len);
@@ -325,6 +330,11 @@ int st21nfca_connectivity_event_received(struct nfc_hci_dev *hdev, u8 host,
return -EPROTO;
transaction->params_len = skb->data[transaction->aid_len + 3];
+
+ /* Total size is allocated (skb->len - 2) minus fixed array members */
+ if (transaction->params_len > ((skb->len - 2) - sizeof(struct nfc_evt_transaction)))
+ return -EINVAL;
+
memcpy(transaction->params, skb->data +
transaction->aid_len + 4, transaction->params_len);
diff --git a/drivers/ntb/hw/amd/ntb_hw_amd.c b/drivers/ntb/hw/amd/ntb_hw_amd.c
index 87847c380051..04550b1f984c 100644
--- a/drivers/ntb/hw/amd/ntb_hw_amd.c
+++ b/drivers/ntb/hw/amd/ntb_hw_amd.c
@@ -1321,6 +1321,8 @@ static const struct ntb_dev_data dev_data[] = {
static const struct pci_device_id amd_ntb_pci_tbl[] = {
{ PCI_VDEVICE(AMD, 0x145b), (kernel_ulong_t)&dev_data[0] },
{ PCI_VDEVICE(AMD, 0x148b), (kernel_ulong_t)&dev_data[1] },
+ { PCI_VDEVICE(AMD, 0x14c0), (kernel_ulong_t)&dev_data[1] },
+ { PCI_VDEVICE(AMD, 0x14c3), (kernel_ulong_t)&dev_data[1] },
{ PCI_VDEVICE(HYGON, 0x145b), (kernel_ulong_t)&dev_data[0] },
{ 0, }
};
diff --git a/drivers/ntb/hw/mscc/ntb_hw_switchtec.c b/drivers/ntb/hw/mscc/ntb_hw_switchtec.c
index 4c6eb61a6ac6..88ae18b0efa8 100644
--- a/drivers/ntb/hw/mscc/ntb_hw_switchtec.c
+++ b/drivers/ntb/hw/mscc/ntb_hw_switchtec.c
@@ -297,7 +297,7 @@ static int switchtec_ntb_mw_set_trans(struct ntb_dev *ntb, int pidx, int widx,
* (see CMA_CONFIG_ALIGNMENT)
*/
dev_err(&sndev->stdev->dev,
- "ERROR: Memory window address is not aligned to it's size!\n");
+ "ERROR: Memory window address is not aligned to its size!\n");
return -EINVAL;
}
@@ -419,8 +419,10 @@ static void switchtec_ntb_part_link_speed(struct switchtec_ntb *sndev,
enum ntb_width *width)
{
struct switchtec_dev *stdev = sndev->stdev;
+ struct part_cfg_regs __iomem *part_cfg =
+ &stdev->mmio_part_cfg_all[partition];
- u32 pff = ioread32(&stdev->mmio_part_cfg[partition].vep_pff_inst_id);
+ u32 pff = ioread32(&part_cfg->vep_pff_inst_id) & 0xFF;
u32 linksta = ioread32(&stdev->mmio_pff_csr[pff].pci_cap_region[13]);
if (speed)
@@ -840,7 +842,6 @@ static int switchtec_ntb_init_sndev(struct switchtec_ntb *sndev)
u64 tpart_vec;
int self;
u64 part_map;
- int bit;
sndev->ntb.pdev = sndev->stdev->pdev;
sndev->ntb.topo = NTB_TOPO_SWITCH;
@@ -859,31 +860,31 @@ static int switchtec_ntb_init_sndev(struct switchtec_ntb *sndev)
tpart_vec |= ioread32(&sndev->mmio_ntb->ntp_info[self].target_part_low);
part_map = ioread64(&sndev->mmio_ntb->ep_map);
+ tpart_vec &= part_map;
part_map &= ~(1 << sndev->self_partition);
- if (!ffs(tpart_vec)) {
+ if (!tpart_vec) {
if (sndev->stdev->partition_count != 2) {
dev_err(&sndev->stdev->dev,
"ntb target partition not defined\n");
return -ENODEV;
}
- bit = ffs(part_map);
- if (!bit) {
+ if (!part_map) {
dev_err(&sndev->stdev->dev,
"peer partition is not NT partition\n");
return -ENODEV;
}
- sndev->peer_partition = bit - 1;
+ sndev->peer_partition = __ffs64(part_map);
} else {
- if (ffs(tpart_vec) != fls(tpart_vec)) {
+ if (__ffs64(tpart_vec) != (fls64(tpart_vec) - 1)) {
dev_err(&sndev->stdev->dev,
"ntb driver only supports 1 pair of 1-1 ntb mapping\n");
return -ENODEV;
}
- sndev->peer_partition = ffs(tpart_vec) - 1;
+ sndev->peer_partition = __ffs64(tpart_vec);
if (!(part_map & (1ULL << sndev->peer_partition))) {
dev_err(&sndev->stdev->dev,
"ntb target partition is not NT partition\n");
@@ -954,7 +955,7 @@ static int config_req_id_table(struct switchtec_ntb *sndev,
u32 error;
u32 proxy_id;
- if (ioread32(&mmio_ctrl->req_id_table_size) < count) {
+ if (ioread16(&mmio_ctrl->req_id_table_size) < count) {
dev_err(&sndev->stdev->dev,
"Not enough requester IDs available.\n");
return -EFAULT;
@@ -966,9 +967,6 @@ static int config_req_id_table(struct switchtec_ntb *sndev,
if (rc)
return rc;
- iowrite32(NTB_PART_CTRL_ID_PROT_DIS,
- &mmio_ctrl->partition_ctrl);
-
for (i = 0; i < count; i++) {
iowrite32(req_ids[i] << 16 | NTB_CTRL_REQ_ID_EN,
&mmio_ctrl->req_id_table[i]);
@@ -1090,7 +1088,7 @@ static int crosslink_enum_partition(struct switchtec_ntb *sndev,
{
struct part_cfg_regs __iomem *part_cfg =
&sndev->stdev->mmio_part_cfg_all[sndev->peer_partition];
- u32 pff = ioread32(&part_cfg->vep_pff_inst_id);
+ u32 pff = ioread32(&part_cfg->vep_pff_inst_id) & 0xFF;
struct pff_csr_regs __iomem *mmio_pff =
&sndev->stdev->mmio_pff_csr[pff];
const u64 bar_space = 0x1000000000LL;
diff --git a/drivers/ntb/msi.c b/drivers/ntb/msi.c
index 3f05cfbc73af..dd683cb58d09 100644
--- a/drivers/ntb/msi.c
+++ b/drivers/ntb/msi.c
@@ -108,8 +108,10 @@ int ntb_msi_setup_mws(struct ntb_dev *ntb)
if (!ntb->msi)
return -EINVAL;
- desc = first_msi_entry(&ntb->pdev->dev);
+ msi_lock_descs(&ntb->pdev->dev);
+ desc = msi_first_desc(&ntb->pdev->dev, MSI_DESC_ASSOCIATED);
addr = desc->msg.address_lo + ((uint64_t)desc->msg.address_hi << 32);
+ msi_unlock_descs(&ntb->pdev->dev);
for (peer = 0; peer < ntb_peer_port_count(ntb); peer++) {
peer_widx = ntb_peer_highest_mw_idx(ntb, peer);
@@ -260,8 +262,9 @@ static int ntbm_msi_setup_callback(struct ntb_dev *ntb, struct msi_desc *entry,
* @handler: Function to be called when the IRQ occurs
* @thread_fn: Function to be called in a threaded interrupt context. NULL
* for clients which handle everything in @handler
- * @devname: An ascii name for the claiming device, dev_name(dev) if NULL
+ * @name: An ascii name for the claiming device, dev_name(dev) if NULL
* @dev_id: A cookie passed back to the handler function
+ * @msi_desc: MSI descriptor data which triggers the interrupt
*
* This function assigns an interrupt handler to an unused
* MSI interrupt and returns the descriptor used to trigger
@@ -281,13 +284,15 @@ int ntbm_msi_request_threaded_irq(struct ntb_dev *ntb, irq_handler_t handler,
const char *name, void *dev_id,
struct ntb_msi_desc *msi_desc)
{
+ struct device *dev = &ntb->pdev->dev;
struct msi_desc *entry;
int ret;
if (!ntb->msi)
return -EINVAL;
- for_each_pci_msi_entry(entry, ntb->pdev) {
+ msi_lock_descs(dev);
+ msi_for_each_desc(entry, dev, MSI_DESC_ASSOCIATED) {
if (irq_has_action(entry->irq))
continue;
@@ -304,14 +309,17 @@ int ntbm_msi_request_threaded_irq(struct ntb_dev *ntb, irq_handler_t handler,
ret = ntbm_msi_setup_callback(ntb, entry, msi_desc);
if (ret) {
devm_free_irq(&ntb->dev, entry->irq, dev_id);
- return ret;
+ goto unlock;
}
-
- return entry->irq;
+ ret = entry->irq;
+ goto unlock;
}
+ ret = -ENODEV;
- return -ENODEV;
+unlock:
+ msi_unlock_descs(dev);
+ return ret;
}
EXPORT_SYMBOL(ntbm_msi_request_threaded_irq);
diff --git a/drivers/nubus/proc.c b/drivers/nubus/proc.c
index 88e1f9a0faaf..1fd667852271 100644
--- a/drivers/nubus/proc.c
+++ b/drivers/nubus/proc.c
@@ -93,30 +93,30 @@ struct nubus_proc_pde_data {
static struct nubus_proc_pde_data *
nubus_proc_alloc_pde_data(unsigned char *ptr, unsigned int size)
{
- struct nubus_proc_pde_data *pde_data;
+ struct nubus_proc_pde_data *pded;
- pde_data = kmalloc(sizeof(*pde_data), GFP_KERNEL);
- if (!pde_data)
+ pded = kmalloc(sizeof(*pded), GFP_KERNEL);
+ if (!pded)
return NULL;
- pde_data->res_ptr = ptr;
- pde_data->res_size = size;
- return pde_data;
+ pded->res_ptr = ptr;
+ pded->res_size = size;
+ return pded;
}
static int nubus_proc_rsrc_show(struct seq_file *m, void *v)
{
struct inode *inode = m->private;
- struct nubus_proc_pde_data *pde_data;
+ struct nubus_proc_pde_data *pded;
- pde_data = PDE_DATA(inode);
- if (!pde_data)
+ pded = pde_data(inode);
+ if (!pded)
return 0;
- if (pde_data->res_size > m->size)
+ if (pded->res_size > m->size)
return -EFBIG;
- if (pde_data->res_size) {
+ if (pded->res_size) {
int lanes = (int)proc_get_parent_data(inode);
struct nubus_dirent ent;
@@ -124,11 +124,11 @@ static int nubus_proc_rsrc_show(struct seq_file *m, void *v)
return 0;
ent.mask = lanes;
- ent.base = pde_data->res_ptr;
+ ent.base = pded->res_ptr;
ent.data = 0;
- nubus_seq_write_rsrc_mem(m, &ent, pde_data->res_size);
+ nubus_seq_write_rsrc_mem(m, &ent, pded->res_size);
} else {
- unsigned int data = (unsigned int)pde_data->res_ptr;
+ unsigned int data = (unsigned int)pded->res_ptr;
seq_putc(m, data >> 16);
seq_putc(m, data >> 8);
@@ -142,18 +142,18 @@ void nubus_proc_add_rsrc_mem(struct proc_dir_entry *procdir,
unsigned int size)
{
char name[9];
- struct nubus_proc_pde_data *pde_data;
+ struct nubus_proc_pde_data *pded;
if (!procdir)
return;
snprintf(name, sizeof(name), "%x", ent->type);
if (size)
- pde_data = nubus_proc_alloc_pde_data(nubus_dirptr(ent), size);
+ pded = nubus_proc_alloc_pde_data(nubus_dirptr(ent), size);
else
- pde_data = NULL;
+ pded = NULL;
proc_create_single_data(name, S_IFREG | 0444, procdir,
- nubus_proc_rsrc_show, pde_data);
+ nubus_proc_rsrc_show, pded);
}
void nubus_proc_add_rsrc(struct proc_dir_entry *procdir,
diff --git a/drivers/nvdimm/Kconfig b/drivers/nvdimm/Kconfig
index b7d1eb38b27d..347fe7afa583 100644
--- a/drivers/nvdimm/Kconfig
+++ b/drivers/nvdimm/Kconfig
@@ -22,7 +22,7 @@ if LIBNVDIMM
config BLK_DEV_PMEM
tristate "PMEM: Persistent memory block device support"
default LIBNVDIMM
- select DAX_DRIVER
+ select DAX
select ND_BTT if BTT
select ND_PFN if NVDIMM_PFN
help
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index fe7ece1534e1..58d95242a836 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -301,29 +301,8 @@ static long pmem_dax_direct_access(struct dax_device *dax_dev,
return __pmem_direct_access(pmem, pgoff, nr_pages, kaddr, pfn);
}
-/*
- * Use the 'no check' versions of copy_from_iter_flushcache() and
- * copy_mc_to_iter() to bypass HARDENED_USERCOPY overhead. Bounds
- * checking, both file offset and device offset, is handled by
- * dax_iomap_actor()
- */
-static size_t pmem_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff,
- void *addr, size_t bytes, struct iov_iter *i)
-{
- return _copy_from_iter_flushcache(addr, bytes, i);
-}
-
-static size_t pmem_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff,
- void *addr, size_t bytes, struct iov_iter *i)
-{
- return _copy_mc_to_iter(addr, bytes, i);
-}
-
static const struct dax_operations pmem_dax_ops = {
.direct_access = pmem_dax_direct_access,
- .dax_supported = generic_fsdax_supported,
- .copy_from_iter = pmem_copy_from_iter,
- .copy_to_iter = pmem_copy_to_iter,
.zero_page_range = pmem_dax_zero_page_range,
};
@@ -379,6 +358,7 @@ static void pmem_release_disk(void *__pmem)
{
struct pmem_device *pmem = __pmem;
+ dax_remove_host(pmem->disk);
kill_dax(pmem->dax_dev);
put_dax(pmem->dax_dev);
del_gendisk(pmem->disk);
@@ -402,7 +382,6 @@ static int pmem_attach_disk(struct device *dev,
struct gendisk *disk;
void *addr;
int rc;
- unsigned long flags = 0UL;
pmem = devm_kzalloc(dev, sizeof(*pmem), GFP_KERNEL);
if (!pmem)
@@ -495,19 +474,24 @@ static int pmem_attach_disk(struct device *dev,
nvdimm_badblocks_populate(nd_region, &pmem->bb, &bb_range);
disk->bb = &pmem->bb;
- if (is_nvdimm_sync(nd_region))
- flags = DAXDEV_F_SYNC;
- dax_dev = alloc_dax(pmem, disk->disk_name, &pmem_dax_ops, flags);
+ dax_dev = alloc_dax(pmem, &pmem_dax_ops);
if (IS_ERR(dax_dev)) {
rc = PTR_ERR(dax_dev);
goto out;
}
+ set_dax_nocache(dax_dev);
+ set_dax_nomc(dax_dev);
+ if (is_nvdimm_sync(nd_region))
+ set_dax_synchronous(dax_dev);
+ rc = dax_add_host(dax_dev, disk);
+ if (rc)
+ goto out_cleanup_dax;
dax_write_cache(dax_dev, nvdimm_has_cache(nd_region));
pmem->dax_dev = dax_dev;
rc = device_add_disk(dev, disk, pmem_attribute_groups);
if (rc)
- goto out_cleanup_dax;
+ goto out_remove_host;
if (devm_add_action_or_reset(dev, pmem_release_disk, pmem))
return -ENOMEM;
@@ -519,6 +503,8 @@ static int pmem_attach_disk(struct device *dev,
dev_warn(dev, "'badblocks' notification disabled\n");
return 0;
+out_remove_host:
+ dax_remove_host(pmem->disk);
out_cleanup_dax:
kill_dax(pmem->dax_dev);
put_dax(pmem->dax_dev);
diff --git a/drivers/nvdimm/virtio_pmem.c b/drivers/nvdimm/virtio_pmem.c
index 726c7354d465..995b6cdc67ed 100644
--- a/drivers/nvdimm/virtio_pmem.c
+++ b/drivers/nvdimm/virtio_pmem.c
@@ -105,7 +105,7 @@ static void virtio_pmem_remove(struct virtio_device *vdev)
nvdimm_bus_unregister(nvdimm_bus);
vdev->config->del_vqs(vdev);
- vdev->config->reset(vdev);
+ virtio_reset_device(vdev);
}
static struct virtio_driver virtio_pmem_driver = {
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 1af8a4513708..469f23186159 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -368,6 +368,7 @@ EXPORT_SYMBOL_GPL(nvme_complete_rq);
void nvme_complete_batch_req(struct request *req)
{
+ trace_nvme_complete_rq(req);
nvme_cleanup_cmd(req);
nvme_end_req_zoned(req);
}
@@ -991,7 +992,6 @@ EXPORT_SYMBOL_GPL(nvme_cleanup_cmd);
blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req)
{
struct nvme_command *cmd = nvme_req(req)->cmd;
- struct nvme_ctrl *ctrl = nvme_req(req)->ctrl;
blk_status_t ret = BLK_STS_OK;
if (!(req->rq_flags & RQF_DONTPREP))
@@ -1038,8 +1038,6 @@ blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req)
return BLK_STS_IOERR;
}
- if (!(ctrl->quirks & NVME_QUIRK_SKIP_CID_GEN))
- nvme_req(req)->genctr++;
cmd->common.command_id = nvme_cid(req);
trace_nvme_setup_cmd(req, cmd);
return ret;
@@ -1057,7 +1055,7 @@ static int nvme_execute_rq(struct gendisk *disk, struct request *rq,
{
blk_status_t status;
- status = blk_execute_rq(disk, rq, at_head);
+ status = blk_execute_rq(rq, at_head);
if (nvme_req(rq)->flags & NVME_REQ_CANCELLED)
return -EINTR;
if (nvme_req(rq)->status)
@@ -1284,7 +1282,7 @@ static void nvme_keep_alive_work(struct work_struct *work)
rq->timeout = ctrl->kato * HZ;
rq->end_io_data = ctrl;
- blk_execute_rq_nowait(NULL, rq, 0, nvme_keep_alive_end_io);
+ blk_execute_rq_nowait(rq, false, nvme_keep_alive_end_io);
}
static void nvme_start_keep_alive(struct nvme_ctrl *ctrl)
@@ -2762,9 +2760,7 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
return -EINVAL;
}
subsys->awupf = le16_to_cpu(id->awupf);
-#ifdef CONFIG_NVME_MULTIPATH
- subsys->iopolicy = NVME_IOPOLICY_NUMA;
-#endif
+ nvme_mpath_default_iopolicy(subsys);
subsys->dev.class = nvme_subsys_class;
subsys->dev.release = nvme_release_subsystem;
@@ -4258,7 +4254,14 @@ static void nvme_async_event_work(struct work_struct *work)
container_of(work, struct nvme_ctrl, async_event_work);
nvme_aen_uevent(ctrl);
- ctrl->ops->submit_async_event(ctrl);
+
+ /*
+ * The transport drivers must guarantee AER submission here is safe by
+ * flushing ctrl async_event_work after changing the controller state
+ * from LIVE and before freeing the admin queue.
+ */
+ if (ctrl->state == NVME_CTRL_LIVE)
+ ctrl->ops->submit_async_event(ctrl);
}
static bool nvme_ctrl_pp_status(struct nvme_ctrl *ctrl)
@@ -4571,7 +4574,7 @@ static void nvme_set_queue_dying(struct nvme_ns *ns)
if (test_and_set_bit(NVME_NS_DEAD, &ns->flags))
return;
- blk_set_queue_dying(ns->queue);
+ blk_mark_disk_dead(ns->disk);
nvme_start_ns_queue(ns);
set_capacity_and_notify(ns->disk, 0);
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index 282d54117e0a..f79a66d4e22c 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -1069,15 +1069,34 @@ out_unlock:
return ret ? ret : count;
}
+static void __nvmf_concat_opt_tokens(struct seq_file *seq_file)
+{
+ const struct match_token *tok;
+ int idx;
+
+ /*
+ * Add dummy entries for instance and cntlid to
+ * signal an invalid/non-existing controller
+ */
+ seq_puts(seq_file, "instance=-1,cntlid=-1");
+ for (idx = 0; idx < ARRAY_SIZE(opt_tokens); idx++) {
+ tok = &opt_tokens[idx];
+ if (tok->token == NVMF_OPT_ERR)
+ continue;
+ seq_puts(seq_file, ",");
+ seq_puts(seq_file, tok->pattern);
+ }
+ seq_puts(seq_file, "\n");
+}
+
static int nvmf_dev_show(struct seq_file *seq_file, void *private)
{
struct nvme_ctrl *ctrl;
- int ret = 0;
mutex_lock(&nvmf_dev_mutex);
ctrl = seq_file->private;
if (!ctrl) {
- ret = -EINVAL;
+ __nvmf_concat_opt_tokens(seq_file);
goto out_unlock;
}
@@ -1086,7 +1105,7 @@ static int nvmf_dev_show(struct seq_file *seq_file, void *private)
out_unlock:
mutex_unlock(&nvmf_dev_mutex);
- return ret;
+ return 0;
}
static int nvmf_dev_open(struct inode *inode, struct file *file)
diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
index c3203ff1c654..1e3a09cad961 100644
--- a/drivers/nvme/host/fabrics.h
+++ b/drivers/nvme/host/fabrics.h
@@ -170,6 +170,7 @@ nvmf_ctlr_matches_baseopts(struct nvme_ctrl *ctrl,
struct nvmf_ctrl_options *opts)
{
if (ctrl->state == NVME_CTRL_DELETING ||
+ ctrl->state == NVME_CTRL_DELETING_NOIO ||
ctrl->state == NVME_CTRL_DEAD ||
strcmp(opts->subsysnqn, ctrl->opts->subsysnqn) ||
strcmp(opts->host->nqn, ctrl->opts->host->nqn) ||
diff --git a/drivers/nvme/host/fault_inject.c b/drivers/nvme/host/fault_inject.c
index 1352159733b0..83d2e6860d38 100644
--- a/drivers/nvme/host/fault_inject.c
+++ b/drivers/nvme/host/fault_inject.c
@@ -56,7 +56,7 @@ void nvme_fault_inject_fini(struct nvme_fault_inject *fault_inject)
void nvme_should_fail(struct request *req)
{
- struct gendisk *disk = req->rq_disk;
+ struct gendisk *disk = req->q->disk;
struct nvme_fault_inject *fault_inject = NULL;
u16 status;
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 13e5d503ed07..ff775235534c 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -13,6 +13,42 @@ module_param(multipath, bool, 0444);
MODULE_PARM_DESC(multipath,
"turn on native support for multiple controllers per subsystem");
+static const char *nvme_iopolicy_names[] = {
+ [NVME_IOPOLICY_NUMA] = "numa",
+ [NVME_IOPOLICY_RR] = "round-robin",
+};
+
+static int iopolicy = NVME_IOPOLICY_NUMA;
+
+static int nvme_set_iopolicy(const char *val, const struct kernel_param *kp)
+{
+ if (!val)
+ return -EINVAL;
+ if (!strncmp(val, "numa", 4))
+ iopolicy = NVME_IOPOLICY_NUMA;
+ else if (!strncmp(val, "round-robin", 11))
+ iopolicy = NVME_IOPOLICY_RR;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+
+static int nvme_get_iopolicy(char *buf, const struct kernel_param *kp)
+{
+ return sprintf(buf, "%s\n", nvme_iopolicy_names[iopolicy]);
+}
+
+module_param_call(iopolicy, nvme_set_iopolicy, nvme_get_iopolicy,
+ &iopolicy, 0644);
+MODULE_PARM_DESC(iopolicy,
+ "Default multipath I/O policy; 'numa' (default) or 'round-robin'");
+
+void nvme_mpath_default_iopolicy(struct nvme_subsystem *subsys)
+{
+ subsys->iopolicy = iopolicy;
+}
+
void nvme_mpath_unfreeze(struct nvme_subsystem *subsys)
{
struct nvme_ns_head *h;
@@ -706,11 +742,6 @@ void nvme_mpath_stop(struct nvme_ctrl *ctrl)
struct device_attribute subsys_attr_##_name = \
__ATTR(_name, _mode, _show, _store)
-static const char *nvme_iopolicy_names[] = {
- [NVME_IOPOLICY_NUMA] = "numa",
- [NVME_IOPOLICY_RR] = "round-robin",
-};
-
static ssize_t nvme_subsys_iopolicy_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -817,7 +848,7 @@ void nvme_mpath_remove_disk(struct nvme_ns_head *head)
{
if (!head->disk)
return;
- blk_set_queue_dying(head->disk->queue);
+ blk_mark_disk_dead(head->disk);
/* make sure all pending bios are cleaned up */
kblockd_schedule_work(&head->requeue_work);
flush_work(&head->requeue_work);
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 9b095ee01364..a162f6c6da6e 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -614,6 +614,10 @@ static inline bool nvme_try_complete_req(struct request *req, __le16 status,
union nvme_result result)
{
struct nvme_request *rq = nvme_req(req);
+ struct nvme_ctrl *ctrl = rq->ctrl;
+
+ if (!(ctrl->quirks & NVME_QUIRK_SKIP_CID_GEN))
+ rq->genctr++;
rq->status = le16_to_cpu(status) >> 1;
rq->result = result;
@@ -763,6 +767,7 @@ static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl)
void nvme_mpath_unfreeze(struct nvme_subsystem *subsys);
void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys);
void nvme_mpath_start_freeze(struct nvme_subsystem *subsys);
+void nvme_mpath_default_iopolicy(struct nvme_subsystem *subsys);
bool nvme_mpath_set_disk_name(struct nvme_ns *ns, char *disk_name, int *flags);
void nvme_failover_req(struct request *req);
void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl);
@@ -860,6 +865,9 @@ static inline void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys)
static inline void nvme_mpath_start_freeze(struct nvme_subsystem *subsys)
{
}
+static inline void nvme_mpath_default_iopolicy(struct nvme_subsystem *subsys)
+{
+}
#endif /* CONFIG_NVME_MULTIPATH */
int nvme_revalidate_zones(struct nvme_ns *ns);
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index ca2ee806d74b..6a99ed680915 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -500,22 +500,13 @@ static inline void nvme_write_sq_db(struct nvme_queue *nvmeq, bool write_sq)
nvmeq->last_sq_tail = nvmeq->sq_tail;
}
-/**
- * nvme_submit_cmd() - Copy a command into a queue and ring the doorbell
- * @nvmeq: The queue to use
- * @cmd: The command to send
- * @write_sq: whether to write to the SQ doorbell
- */
-static void nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd,
- bool write_sq)
+static inline void nvme_sq_copy_cmd(struct nvme_queue *nvmeq,
+ struct nvme_command *cmd)
{
- spin_lock(&nvmeq->sq_lock);
memcpy(nvmeq->sq_cmds + (nvmeq->sq_tail << nvmeq->sqes),
- cmd, sizeof(*cmd));
+ absolute_pointer(cmd), sizeof(*cmd));
if (++nvmeq->sq_tail == nvmeq->q_depth)
nvmeq->sq_tail = 0;
- nvme_write_sq_db(nvmeq, write_sq);
- spin_unlock(&nvmeq->sq_lock);
}
static void nvme_commit_rqs(struct blk_mq_hw_ctx *hctx)
@@ -912,52 +903,32 @@ static blk_status_t nvme_map_metadata(struct nvme_dev *dev, struct request *req,
return BLK_STS_OK;
}
-/*
- * NOTE: ns is NULL when called on the admin queue.
- */
-static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
- const struct blk_mq_queue_data *bd)
+static blk_status_t nvme_prep_rq(struct nvme_dev *dev, struct request *req)
{
- struct nvme_ns *ns = hctx->queue->queuedata;
- struct nvme_queue *nvmeq = hctx->driver_data;
- struct nvme_dev *dev = nvmeq->dev;
- struct request *req = bd->rq;
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
- struct nvme_command *cmnd = &iod->cmd;
blk_status_t ret;
iod->aborted = 0;
iod->npages = -1;
iod->nents = 0;
- /*
- * We should not need to do this, but we're still using this to
- * ensure we can drain requests on a dying queue.
- */
- if (unlikely(!test_bit(NVMEQ_ENABLED, &nvmeq->flags)))
- return BLK_STS_IOERR;
-
- if (!nvme_check_ready(&dev->ctrl, req, true))
- return nvme_fail_nonready_command(&dev->ctrl, req);
-
- ret = nvme_setup_cmd(ns, req);
+ ret = nvme_setup_cmd(req->q->queuedata, req);
if (ret)
return ret;
if (blk_rq_nr_phys_segments(req)) {
- ret = nvme_map_data(dev, req, cmnd);
+ ret = nvme_map_data(dev, req, &iod->cmd);
if (ret)
goto out_free_cmd;
}
if (blk_integrity_rq(req)) {
- ret = nvme_map_metadata(dev, req, cmnd);
+ ret = nvme_map_metadata(dev, req, &iod->cmd);
if (ret)
goto out_unmap_data;
}
blk_mq_start_request(req);
- nvme_submit_cmd(nvmeq, cmnd, bd->last);
return BLK_STS_OK;
out_unmap_data:
nvme_unmap_data(dev, req);
@@ -966,6 +937,96 @@ out_free_cmd:
return ret;
}
+/*
+ * NOTE: ns is NULL when called on the admin queue.
+ */
+static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
+ const struct blk_mq_queue_data *bd)
+{
+ struct nvme_queue *nvmeq = hctx->driver_data;
+ struct nvme_dev *dev = nvmeq->dev;
+ struct request *req = bd->rq;
+ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+ blk_status_t ret;
+
+ /*
+ * We should not need to do this, but we're still using this to
+ * ensure we can drain requests on a dying queue.
+ */
+ if (unlikely(!test_bit(NVMEQ_ENABLED, &nvmeq->flags)))
+ return BLK_STS_IOERR;
+
+ if (unlikely(!nvme_check_ready(&dev->ctrl, req, true)))
+ return nvme_fail_nonready_command(&dev->ctrl, req);
+
+ ret = nvme_prep_rq(dev, req);
+ if (unlikely(ret))
+ return ret;
+ spin_lock(&nvmeq->sq_lock);
+ nvme_sq_copy_cmd(nvmeq, &iod->cmd);
+ nvme_write_sq_db(nvmeq, bd->last);
+ spin_unlock(&nvmeq->sq_lock);
+ return BLK_STS_OK;
+}
+
+static void nvme_submit_cmds(struct nvme_queue *nvmeq, struct request **rqlist)
+{
+ spin_lock(&nvmeq->sq_lock);
+ while (!rq_list_empty(*rqlist)) {
+ struct request *req = rq_list_pop(rqlist);
+ struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
+
+ nvme_sq_copy_cmd(nvmeq, &iod->cmd);
+ }
+ nvme_write_sq_db(nvmeq, true);
+ spin_unlock(&nvmeq->sq_lock);
+}
+
+static bool nvme_prep_rq_batch(struct nvme_queue *nvmeq, struct request *req)
+{
+ /*
+ * We should not need to do this, but we're still using this to
+ * ensure we can drain requests on a dying queue.
+ */
+ if (unlikely(!test_bit(NVMEQ_ENABLED, &nvmeq->flags)))
+ return false;
+ if (unlikely(!nvme_check_ready(&nvmeq->dev->ctrl, req, true)))
+ return false;
+
+ req->mq_hctx->tags->rqs[req->tag] = req;
+ return nvme_prep_rq(nvmeq->dev, req) == BLK_STS_OK;
+}
+
+static void nvme_queue_rqs(struct request **rqlist)
+{
+ struct request *req, *next, *prev = NULL;
+ struct request *requeue_list = NULL;
+
+ rq_list_for_each_safe(rqlist, req, next) {
+ struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
+
+ if (!nvme_prep_rq_batch(nvmeq, req)) {
+ /* detach 'req' and add to remainder list */
+ rq_list_move(rqlist, &requeue_list, req, prev);
+
+ req = prev;
+ if (!req)
+ continue;
+ }
+
+ if (!next || req->mq_hctx != next->mq_hctx) {
+ /* detach rest of list, and submit */
+ req->rq_next = NULL;
+ nvme_submit_cmds(nvmeq, rqlist);
+ *rqlist = next;
+ prev = NULL;
+ } else
+ prev = req;
+ }
+
+ *rqlist = requeue_list;
+}
+
static __always_inline void nvme_pci_unmap_rq(struct request *req)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
@@ -1140,7 +1201,11 @@ static void nvme_pci_submit_async_event(struct nvme_ctrl *ctrl)
c.common.opcode = nvme_admin_async_event;
c.common.command_id = NVME_AQ_BLK_MQ_DEPTH;
- nvme_submit_cmd(nvmeq, &c, true);
+
+ spin_lock(&nvmeq->sq_lock);
+ nvme_sq_copy_cmd(nvmeq, &c);
+ nvme_write_sq_db(nvmeq, true);
+ spin_unlock(&nvmeq->sq_lock);
}
static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
@@ -1371,7 +1436,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
}
abort_req->end_io_data = NULL;
- blk_execute_rq_nowait(NULL, abort_req, 0, abort_endio);
+ blk_execute_rq_nowait(abort_req, false, abort_endio);
/*
* The aborted req will be completed on receiving the abort req.
@@ -1663,6 +1728,7 @@ static const struct blk_mq_ops nvme_mq_admin_ops = {
static const struct blk_mq_ops nvme_mq_ops = {
.queue_rq = nvme_queue_rq,
+ .queue_rqs = nvme_queue_rqs,
.complete = nvme_pci_complete_rq,
.commit_rqs = nvme_commit_rqs,
.init_hctx = nvme_init_hctx,
@@ -2416,9 +2482,8 @@ static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode)
req->end_io_data = nvmeq;
init_completion(&nvmeq->delete_done);
- blk_execute_rq_nowait(NULL, req, false,
- opcode == nvme_admin_delete_cq ?
- nvme_del_cq_end : nvme_del_queue_end);
+ blk_execute_rq_nowait(req, false, opcode == nvme_admin_delete_cq ?
+ nvme_del_cq_end : nvme_del_queue_end);
return 0;
}
@@ -3326,7 +3391,8 @@ static const struct pci_device_id nvme_id_table[] = {
NVME_QUIRK_DEALLOCATE_ZEROES, },
{ PCI_VDEVICE(INTEL, 0x0a54), /* Intel P4500/P4600 */
.driver_data = NVME_QUIRK_STRIPE_SIZE |
- NVME_QUIRK_DEALLOCATE_ZEROES, },
+ NVME_QUIRK_DEALLOCATE_ZEROES |
+ NVME_QUIRK_IGNORE_DEV_SUBNQN, },
{ PCI_VDEVICE(INTEL, 0x0a55), /* Dell Express Flash P4600 */
.driver_data = NVME_QUIRK_STRIPE_SIZE |
NVME_QUIRK_DEALLOCATE_ZEROES, },
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 850f84d204d0..9c55e4be8a39 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -1200,6 +1200,7 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work)
struct nvme_rdma_ctrl, err_work);
nvme_stop_keep_alive(&ctrl->ctrl);
+ flush_work(&ctrl->ctrl.async_event_work);
nvme_rdma_teardown_io_queues(ctrl, false);
nvme_start_queues(&ctrl->ctrl);
nvme_rdma_teardown_admin_queue(ctrl, false);
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 4ceb28675fdf..891a36d02e7c 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -913,7 +913,15 @@ static inline void nvme_tcp_done_send_req(struct nvme_tcp_queue *queue)
static void nvme_tcp_fail_request(struct nvme_tcp_request *req)
{
- nvme_tcp_end_request(blk_mq_rq_from_pdu(req), NVME_SC_HOST_PATH_ERROR);
+ if (nvme_tcp_async_req(req)) {
+ union nvme_result res = {};
+
+ nvme_complete_async_event(&req->queue->ctrl->ctrl,
+ cpu_to_le16(NVME_SC_HOST_PATH_ERROR), &res);
+ } else {
+ nvme_tcp_end_request(blk_mq_rq_from_pdu(req),
+ NVME_SC_HOST_PATH_ERROR);
+ }
}
static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
@@ -2096,6 +2104,7 @@ static void nvme_tcp_error_recovery_work(struct work_struct *work)
struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
nvme_stop_keep_alive(ctrl);
+ flush_work(&ctrl->async_event_work);
nvme_tcp_teardown_io_queues(ctrl, false);
/* unquiesce to fail fast pending requests */
nvme_start_queues(ctrl);
diff --git a/drivers/nvme/host/trace.h b/drivers/nvme/host/trace.h
index 35bac7a25422..b5f85259461a 100644
--- a/drivers/nvme/host/trace.h
+++ b/drivers/nvme/host/trace.h
@@ -68,7 +68,7 @@ TRACE_EVENT(nvme_setup_cmd,
__entry->nsid = le32_to_cpu(cmd->common.nsid);
__entry->metadata = !!blk_integrity_rq(req);
__entry->fctype = cmd->fabrics.fctype;
- __assign_disk_name(__entry->disk, req->rq_disk);
+ __assign_disk_name(__entry->disk, req->q->disk);
memcpy(__entry->cdw10, &cmd->common.cdw10,
sizeof(__entry->cdw10));
),
@@ -103,7 +103,7 @@ TRACE_EVENT(nvme_complete_rq,
__entry->retries = nvme_req(req)->retries;
__entry->flags = nvme_req(req)->flags;
__entry->status = nvme_req(req)->status;
- __assign_disk_name(__entry->disk, req->rq_disk);
+ __assign_disk_name(__entry->disk, req->q->disk);
),
TP_printk("nvme%d: %sqid=%d, cmdid=%u, res=%#llx, retries=%u, flags=0x%x, status=%#x",
__entry->ctrl_id, __print_disk_name(__entry->disk),
@@ -153,7 +153,7 @@ TRACE_EVENT(nvme_sq,
),
TP_fast_assign(
__entry->ctrl_id = nvme_req(req)->ctrl->instance;
- __assign_disk_name(__entry->disk, req->rq_disk);
+ __assign_disk_name(__entry->disk, req->q->disk);
__entry->qid = nvme_req_qid(req);
__entry->sq_head = le16_to_cpu(sq_head);
__entry->sq_tail = sq_tail;
diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
index f0efb3537989..9e5b89ae29df 100644
--- a/drivers/nvme/target/passthru.c
+++ b/drivers/nvme/target/passthru.c
@@ -284,8 +284,7 @@ static void nvmet_passthru_execute_cmd(struct nvmet_req *req)
schedule_work(&req->p.work);
} else {
rq->end_io_data = req;
- blk_execute_rq_nowait(ns ? ns->disk : NULL, rq, 0,
- nvmet_passthru_req_done);
+ blk_execute_rq_nowait(rq, false, nvmet_passthru_req_done);
}
if (ns)
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index e765d3d0542e..23a38dcf0fc4 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -312,6 +312,8 @@ static umode_t nvmem_bin_attr_is_visible(struct kobject *kobj,
struct device *dev = kobj_to_dev(kobj);
struct nvmem_device *nvmem = to_nvmem_device(dev);
+ attr->size = nvmem->size;
+
return nvmem_bin_attr_get_umode(nvmem);
}
diff --git a/drivers/nvmem/mtk-efuse.c b/drivers/nvmem/mtk-efuse.c
index 6a537d959f14..e9a375dd84af 100644
--- a/drivers/nvmem/mtk-efuse.c
+++ b/drivers/nvmem/mtk-efuse.c
@@ -19,11 +19,12 @@ static int mtk_reg_read(void *context,
unsigned int reg, void *_val, size_t bytes)
{
struct mtk_efuse_priv *priv = context;
- u32 *val = _val;
- int i = 0, words = bytes / 4;
+ void __iomem *addr = priv->base + reg;
+ u8 *val = _val;
+ int i;
- while (words--)
- *val++ = readl(priv->base + reg + (i++ * 4));
+ for (i = 0; i < bytes; i++, val++)
+ *val = readb(addr + i);
return 0;
}
@@ -45,8 +46,8 @@ static int mtk_efuse_probe(struct platform_device *pdev)
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
- econfig.stride = 4;
- econfig.word_size = 4;
+ econfig.stride = 1;
+ econfig.word_size = 1;
econfig.reg_read = mtk_reg_read;
econfig.size = resource_size(res);
econfig.priv = priv;
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 61de453b885c..e7d92b67cb8a 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -651,6 +651,28 @@ bool of_device_is_available(const struct device_node *device)
EXPORT_SYMBOL(of_device_is_available);
/**
+ * __of_device_is_fail - check if a device has status "fail" or "fail-..."
+ *
+ * @device: Node to check status for, with locks already held
+ *
+ * Return: True if the status property is set to "fail" or "fail-..." (for any
+ * error code suffix), false otherwise
+ */
+static bool __of_device_is_fail(const struct device_node *device)
+{
+ const char *status;
+
+ if (!device)
+ return false;
+
+ status = __of_get_property(device, "status", NULL);
+ if (status == NULL)
+ return false;
+
+ return !strcmp(status, "fail") || !strncmp(status, "fail-", 5);
+}
+
+/**
* of_device_is_big_endian - check if a device has BE registers
*
* @device: Node to check for endianness
@@ -796,6 +818,9 @@ EXPORT_SYMBOL(of_get_next_available_child);
* of_get_next_cpu_node - Iterate on cpu nodes
* @prev: previous child of the /cpus node, or NULL to get first
*
+ * Unusable CPUs (those with the status property set to "fail" or "fail-...")
+ * will be skipped.
+ *
* Return: A cpu node pointer with refcount incremented, use of_node_put()
* on it when done. Returns NULL when prev is the last child. Decrements
* the refcount of prev.
@@ -817,6 +842,8 @@ struct device_node *of_get_next_cpu_node(struct device_node *prev)
of_node_put(node);
}
for (; next; next = next->sibling) {
+ if (__of_device_is_fail(next))
+ continue;
if (!(of_node_name_eq(next, "cpu") ||
__of_node_is_type(next, "cpu")))
continue;
@@ -1349,9 +1376,14 @@ int of_phandle_iterator_next(struct of_phandle_iterator *it)
* property data length
*/
if (it->cur + count > it->list_end) {
- pr_err("%pOF: %s = %d found %d\n",
- it->parent, it->cells_name,
- count, it->cell_count);
+ if (it->cells_name)
+ pr_err("%pOF: %s = %d found %td\n",
+ it->parent, it->cells_name,
+ count, it->list_end - it->cur);
+ else
+ pr_err("%pOF: phandle %s needs %d, found %td\n",
+ it->parent, of_node_full_name(it->node),
+ count, it->list_end - it->cur);
goto err;
}
}
@@ -1388,15 +1420,18 @@ int of_phandle_iterator_args(struct of_phandle_iterator *it,
return count;
}
-static int __of_parse_phandle_with_args(const struct device_node *np,
- const char *list_name,
- const char *cells_name,
- int cell_count, int index,
- struct of_phandle_args *out_args)
+int __of_parse_phandle_with_args(const struct device_node *np,
+ const char *list_name,
+ const char *cells_name,
+ int cell_count, int index,
+ struct of_phandle_args *out_args)
{
struct of_phandle_iterator it;
int rc, cur_index = 0;
+ if (index < 0)
+ return -EINVAL;
+
/* Loop over the phandles until all the requested entry is found */
of_for_each_phandle(&it, rc, np, list_name, cells_name, cell_count) {
/*
@@ -1439,82 +1474,7 @@ static int __of_parse_phandle_with_args(const struct device_node *np,
of_node_put(it.node);
return rc;
}
-
-/**
- * of_parse_phandle - Resolve a phandle property to a device_node pointer
- * @np: Pointer to device node holding phandle property
- * @phandle_name: Name of property holding a phandle value
- * @index: For properties holding a table of phandles, this is the index into
- * the table
- *
- * Return: The device_node pointer with refcount incremented. Use
- * of_node_put() on it when done.
- */
-struct device_node *of_parse_phandle(const struct device_node *np,
- const char *phandle_name, int index)
-{
- struct of_phandle_args args;
-
- if (index < 0)
- return NULL;
-
- if (__of_parse_phandle_with_args(np, phandle_name, NULL, 0,
- index, &args))
- return NULL;
-
- return args.np;
-}
-EXPORT_SYMBOL(of_parse_phandle);
-
-/**
- * of_parse_phandle_with_args() - Find a node pointed by phandle in a list
- * @np: pointer to a device tree node containing a list
- * @list_name: property name that contains a list
- * @cells_name: property name that specifies phandles' arguments count
- * @index: index of a phandle to parse out
- * @out_args: optional pointer to output arguments structure (will be filled)
- *
- * This function is useful to parse lists of phandles and their arguments.
- * Returns 0 on success and fills out_args, on error returns appropriate
- * errno value.
- *
- * Caller is responsible to call of_node_put() on the returned out_args->np
- * pointer.
- *
- * Example::
- *
- * phandle1: node1 {
- * #list-cells = <2>;
- * };
- *
- * phandle2: node2 {
- * #list-cells = <1>;
- * };
- *
- * node3 {
- * list = <&phandle1 1 2 &phandle2 3>;
- * };
- *
- * To get a device_node of the ``node2`` node you may call this:
- * of_parse_phandle_with_args(node3, "list", "#list-cells", 1, &args);
- */
-int of_parse_phandle_with_args(const struct device_node *np, const char *list_name,
- const char *cells_name, int index,
- struct of_phandle_args *out_args)
-{
- int cell_count = -1;
-
- if (index < 0)
- return -EINVAL;
-
- /* If cells_name is NULL we assume a cell count of 0 */
- if (!cells_name)
- cell_count = 0;
-
- return __of_parse_phandle_with_args(np, list_name, cells_name,
- cell_count, index, out_args);
-}
-EXPORT_SYMBOL(of_parse_phandle_with_args);
+EXPORT_SYMBOL(__of_parse_phandle_with_args);
/**
* of_parse_phandle_with_args_map() - Find a node pointed by phandle in a list and remap it
@@ -1701,47 +1661,6 @@ free:
EXPORT_SYMBOL(of_parse_phandle_with_args_map);
/**
- * of_parse_phandle_with_fixed_args() - Find a node pointed by phandle in a list
- * @np: pointer to a device tree node containing a list
- * @list_name: property name that contains a list
- * @cell_count: number of argument cells following the phandle
- * @index: index of a phandle to parse out
- * @out_args: optional pointer to output arguments structure (will be filled)
- *
- * This function is useful to parse lists of phandles and their arguments.
- * Returns 0 on success and fills out_args, on error returns appropriate
- * errno value.
- *
- * Caller is responsible to call of_node_put() on the returned out_args->np
- * pointer.
- *
- * Example::
- *
- * phandle1: node1 {
- * };
- *
- * phandle2: node2 {
- * };
- *
- * node3 {
- * list = <&phandle1 0 2 &phandle2 2 3>;
- * };
- *
- * To get a device_node of the ``node2`` node you may call this:
- * of_parse_phandle_with_fixed_args(node3, "list", 2, 1, &args);
- */
-int of_parse_phandle_with_fixed_args(const struct device_node *np,
- const char *list_name, int cell_count,
- int index, struct of_phandle_args *out_args)
-{
- if (index < 0)
- return -EINVAL;
- return __of_parse_phandle_with_args(np, list_name, NULL, cell_count,
- index, out_args);
-}
-EXPORT_SYMBOL(of_parse_phandle_with_fixed_args);
-
-/**
* of_count_phandle_with_args() - Find the number of phandles references in a property
* @np: pointer to a device tree node containing a list
* @list_name: property name that contains a list
diff --git a/drivers/of/device.c b/drivers/of/device.c
index b0800c260f64..874f031442dc 100644
--- a/drivers/of/device.c
+++ b/drivers/of/device.c
@@ -28,7 +28,7 @@
const struct of_device_id *of_match_device(const struct of_device_id *matches,
const struct device *dev)
{
- if ((!matches) || (!dev->of_node))
+ if (!matches || !dev->of_node || dev->of_node_reused)
return NULL;
return of_match_node(matches, dev->of_node);
}
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index bdca35284ceb..ad85ff6474ff 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -26,6 +26,7 @@
#include <linux/serial_core.h>
#include <linux/sysfs.h>
#include <linux/random.h>
+#include <linux/kmemleak.h>
#include <asm/setup.h> /* for COMMAND_LINE_SIZE */
#include <asm/page.h>
@@ -482,9 +483,11 @@ static int __init early_init_dt_reserve_memory_arch(phys_addr_t base,
if (nomap) {
/*
* If the memory is already reserved (by another region), we
- * should not allow it to be marked nomap.
+ * should not allow it to be marked nomap, but don't worry
+ * if the region isn't memory as it won't be mapped.
*/
- if (memblock_is_region_reserved(base, size))
+ if (memblock_overlaps_region(&memblock.memory, base, size) &&
+ memblock_is_region_reserved(base, size))
return -EBUSY;
return memblock_mark_nomap(base, size);
@@ -522,9 +525,12 @@ static int __init __reserved_mem_reserve_reg(unsigned long node,
size = dt_mem_next_cell(dt_root_size_cells, &prop);
if (size &&
- early_init_dt_reserve_memory_arch(base, size, nomap) == 0)
+ early_init_dt_reserve_memory_arch(base, size, nomap) == 0) {
pr_debug("Reserved memory: reserved region for node '%s': base %pa, size %lu MiB\n",
uname, &base, (unsigned long)(size / SZ_1M));
+ if (!nomap)
+ kmemleak_alloc_phys(base, size, 0, 0);
+ }
else
pr_info("Reserved memory: failed to reserve memory for node '%s': base %pa, size %lu MiB\n",
uname, &base, (unsigned long)(size / SZ_1M));
@@ -965,18 +971,22 @@ static void __init early_init_dt_check_for_elfcorehdr(unsigned long node)
elfcorehdr_addr, elfcorehdr_size);
}
-static phys_addr_t cap_mem_addr;
-static phys_addr_t cap_mem_size;
+static unsigned long chosen_node_offset = -FDT_ERR_NOTFOUND;
/**
* early_init_dt_check_for_usable_mem_range - Decode usable memory range
* location from flat tree
- * @node: reference to node containing usable memory range location ('chosen')
*/
-static void __init early_init_dt_check_for_usable_mem_range(unsigned long node)
+void __init early_init_dt_check_for_usable_mem_range(void)
{
const __be32 *prop;
int len;
+ phys_addr_t cap_mem_addr;
+ phys_addr_t cap_mem_size;
+ unsigned long node = chosen_node_offset;
+
+ if ((long)node < 0)
+ return;
pr_debug("Looking for usable-memory-range property... ");
@@ -989,6 +999,8 @@ static void __init early_init_dt_check_for_usable_mem_range(unsigned long node)
pr_debug("cap_mem_start=%pa cap_mem_size=%pa\n", &cap_mem_addr,
&cap_mem_size);
+
+ memblock_cap_memory_range(cap_mem_addr, cap_mem_size);
}
#ifdef CONFIG_SERIAL_EARLYCON
@@ -1042,13 +1054,14 @@ int __init early_init_dt_scan_chosen_stdout(void)
/*
* early_init_dt_scan_root - fetch the top level address and size cells
*/
-int __init early_init_dt_scan_root(unsigned long node, const char *uname,
- int depth, void *data)
+int __init early_init_dt_scan_root(void)
{
const __be32 *prop;
+ const void *fdt = initial_boot_params;
+ int node = fdt_path_offset(fdt, "/");
- if (depth != 0)
- return 0;
+ if (node < 0)
+ return -ENODEV;
dt_root_size_cells = OF_ROOT_NODE_SIZE_CELLS_DEFAULT;
dt_root_addr_cells = OF_ROOT_NODE_ADDR_CELLS_DEFAULT;
@@ -1063,8 +1076,7 @@ int __init early_init_dt_scan_root(unsigned long node, const char *uname,
dt_root_addr_cells = be32_to_cpup(prop);
pr_debug("dt_root_addr_cells = %x\n", dt_root_addr_cells);
- /* break now */
- return 1;
+ return 0;
}
u64 __init dt_mem_next_cell(int s, const __be32 **cellp)
@@ -1078,73 +1090,78 @@ u64 __init dt_mem_next_cell(int s, const __be32 **cellp)
/*
* early_init_dt_scan_memory - Look for and parse memory nodes
*/
-int __init early_init_dt_scan_memory(unsigned long node, const char *uname,
- int depth, void *data)
+int __init early_init_dt_scan_memory(void)
{
- const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
- const __be32 *reg, *endp;
- int l;
- bool hotpluggable;
+ int node;
+ const void *fdt = initial_boot_params;
- /* We are scanning "memory" nodes only */
- if (type == NULL || strcmp(type, "memory") != 0)
- return 0;
+ fdt_for_each_subnode(node, fdt, 0) {
+ const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
+ const __be32 *reg, *endp;
+ int l;
+ bool hotpluggable;
- reg = of_get_flat_dt_prop(node, "linux,usable-memory", &l);
- if (reg == NULL)
- reg = of_get_flat_dt_prop(node, "reg", &l);
- if (reg == NULL)
- return 0;
+ /* We are scanning "memory" nodes only */
+ if (type == NULL || strcmp(type, "memory") != 0)
+ continue;
- endp = reg + (l / sizeof(__be32));
- hotpluggable = of_get_flat_dt_prop(node, "hotpluggable", NULL);
+ reg = of_get_flat_dt_prop(node, "linux,usable-memory", &l);
+ if (reg == NULL)
+ reg = of_get_flat_dt_prop(node, "reg", &l);
+ if (reg == NULL)
+ continue;
- pr_debug("memory scan node %s, reg size %d,\n", uname, l);
+ endp = reg + (l / sizeof(__be32));
+ hotpluggable = of_get_flat_dt_prop(node, "hotpluggable", NULL);
- while ((endp - reg) >= (dt_root_addr_cells + dt_root_size_cells)) {
- u64 base, size;
+ pr_debug("memory scan node %s, reg size %d,\n",
+ fdt_get_name(fdt, node, NULL), l);
- base = dt_mem_next_cell(dt_root_addr_cells, &reg);
- size = dt_mem_next_cell(dt_root_size_cells, &reg);
+ while ((endp - reg) >= (dt_root_addr_cells + dt_root_size_cells)) {
+ u64 base, size;
- if (size == 0)
- continue;
- pr_debug(" - %llx, %llx\n", base, size);
+ base = dt_mem_next_cell(dt_root_addr_cells, &reg);
+ size = dt_mem_next_cell(dt_root_size_cells, &reg);
- early_init_dt_add_memory_arch(base, size);
+ if (size == 0)
+ continue;
+ pr_debug(" - %llx, %llx\n", base, size);
- if (!hotpluggable)
- continue;
+ early_init_dt_add_memory_arch(base, size);
- if (memblock_mark_hotplug(base, size))
- pr_warn("failed to mark hotplug range 0x%llx - 0x%llx\n",
- base, base + size);
- }
+ if (!hotpluggable)
+ continue;
+ if (memblock_mark_hotplug(base, size))
+ pr_warn("failed to mark hotplug range 0x%llx - 0x%llx\n",
+ base, base + size);
+ }
+ }
return 0;
}
-int __init early_init_dt_scan_chosen(unsigned long node, const char *uname,
- int depth, void *data)
+int __init early_init_dt_scan_chosen(char *cmdline)
{
- int l;
+ int l, node;
const char *p;
const void *rng_seed;
+ const void *fdt = initial_boot_params;
- pr_debug("search \"chosen\", depth: %d, uname: %s\n", depth, uname);
+ node = fdt_path_offset(fdt, "/chosen");
+ if (node < 0)
+ node = fdt_path_offset(fdt, "/chosen@0");
+ if (node < 0)
+ return -ENOENT;
- if (depth != 1 || !data ||
- (strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0))
- return 0;
+ chosen_node_offset = node;
early_init_dt_check_for_initrd(node);
early_init_dt_check_for_elfcorehdr(node);
- early_init_dt_check_for_usable_mem_range(node);
/* Retrieve command line */
p = of_get_flat_dt_prop(node, "bootargs", &l);
if (p != NULL && l > 0)
- strlcpy(data, p, min(l, COMMAND_LINE_SIZE));
+ strlcpy(cmdline, p, min(l, COMMAND_LINE_SIZE));
/*
* CONFIG_CMDLINE is meant to be a default in case nothing else
@@ -1153,18 +1170,18 @@ int __init early_init_dt_scan_chosen(unsigned long node, const char *uname,
*/
#ifdef CONFIG_CMDLINE
#if defined(CONFIG_CMDLINE_EXTEND)
- strlcat(data, " ", COMMAND_LINE_SIZE);
- strlcat(data, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
+ strlcat(cmdline, " ", COMMAND_LINE_SIZE);
+ strlcat(cmdline, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
#elif defined(CONFIG_CMDLINE_FORCE)
- strlcpy(data, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
+ strlcpy(cmdline, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
#else
/* No arguments from boot loader, use kernel's cmdl*/
- if (!((char *)data)[0])
- strlcpy(data, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
+ if (!((char *)cmdline)[0])
+ strlcpy(cmdline, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
#endif
#endif /* CONFIG_CMDLINE */
- pr_debug("Command line is: %s\n", (char *)data);
+ pr_debug("Command line is: %s\n", (char *)cmdline);
rng_seed = of_get_flat_dt_prop(node, "rng-seed", &l);
if (rng_seed && l > 0) {
@@ -1178,8 +1195,7 @@ int __init early_init_dt_scan_chosen(unsigned long node, const char *uname,
fdt_totalsize(initial_boot_params));
}
- /* break now */
- return 1;
+ return 0;
}
#ifndef MIN_MEMBLOCK_ADDR
@@ -1261,21 +1277,21 @@ bool __init early_init_dt_verify(void *params)
void __init early_init_dt_scan_nodes(void)
{
- int rc = 0;
+ int rc;
/* Initialize {size,address}-cells info */
- of_scan_flat_dt(early_init_dt_scan_root, NULL);
+ early_init_dt_scan_root();
/* Retrieve various information from the /chosen node */
- rc = of_scan_flat_dt(early_init_dt_scan_chosen, boot_command_line);
- if (!rc)
+ rc = early_init_dt_scan_chosen(boot_command_line);
+ if (rc)
pr_warn("No chosen node found, continuing without\n");
/* Setup memory, calling early_init_dt_add_memory_arch */
- of_scan_flat_dt(early_init_dt_scan_memory, NULL);
+ early_init_dt_scan_memory();
/* Handle linux,usable-memory-range property */
- memblock_cap_memory_range(cap_mem_addr, cap_mem_size);
+ early_init_dt_check_for_usable_mem_range();
}
bool __init early_init_dt_scan(void *params)
diff --git a/drivers/of/property.c b/drivers/of/property.c
index a3483484a5a2..8e90071de6ed 100644
--- a/drivers/of/property.c
+++ b/drivers/of/property.c
@@ -1075,6 +1075,17 @@ static struct device_node *of_get_compat_node(struct device_node *np)
return np;
}
+static struct device_node *of_get_compat_node_parent(struct device_node *np)
+{
+ struct device_node *parent, *node;
+
+ parent = of_get_parent(np);
+ node = of_get_compat_node(parent);
+ of_node_put(parent);
+
+ return node;
+}
+
/**
* of_link_to_phandle - Add fwnode link to supplier from supplier phandle
* @con_np: consumer device tree node
@@ -1249,7 +1260,9 @@ static struct device_node *parse_##fname(struct device_node *np, \
* @parse_prop.index: For properties holding a list of phandles, this is the
* index into the list
* @optional: Describes whether a supplier is mandatory or not
- * @node_not_dev: The consumer node containing the property is never a device.
+ * @node_not_dev: The consumer node containing the property is never converted
+ * to a struct device. Instead, parse ancestor nodes for the
+ * compatible property to find a node corresponding to a device.
*
* Returns:
* parse_prop() return values are
@@ -1424,7 +1437,7 @@ static int of_link_property(struct device_node *con_np, const char *prop_name)
struct device_node *con_dev_np;
con_dev_np = s->node_not_dev
- ? of_get_compat_node(con_np)
+ ? of_get_compat_node_parent(con_np)
: of_node_get(con_np);
matched = true;
i++;
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index 481ba8682ebf..70992103c07d 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -911,11 +911,18 @@ static void __init of_unittest_dma_ranges_one(const char *path,
if (!rc) {
phys_addr_t paddr;
dma_addr_t dma_addr;
- struct device dev_bogus;
+ struct device *dev_bogus;
- dev_bogus.dma_range_map = map;
- paddr = dma_to_phys(&dev_bogus, expect_dma_addr);
- dma_addr = phys_to_dma(&dev_bogus, expect_paddr);
+ dev_bogus = kzalloc(sizeof(struct device), GFP_KERNEL);
+ if (!dev_bogus) {
+ unittest(0, "kzalloc() failed\n");
+ kfree(map);
+ return;
+ }
+
+ dev_bogus->dma_range_map = map;
+ paddr = dma_to_phys(dev_bogus, expect_dma_addr);
+ dma_addr = phys_to_dma(dev_bogus, expect_paddr);
unittest(paddr == expect_paddr,
"of_dma_get_range: wrong phys addr %pap (expecting %llx) on node %pOF\n",
@@ -925,6 +932,7 @@ static void __init of_unittest_dma_ranges_one(const char *path,
&dma_addr, expect_dma_addr, np);
kfree(map);
+ kfree(dev_bogus);
}
of_node_put(np);
#endif
@@ -934,8 +942,9 @@ static void __init of_unittest_parse_dma_ranges(void)
{
of_unittest_dma_ranges_one("/testcase-data/address-tests/device@70000000",
0x0, 0x20000000);
- of_unittest_dma_ranges_one("/testcase-data/address-tests/bus@80000000/device@1000",
- 0x100000000, 0x20000000);
+ if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
+ of_unittest_dma_ranges_one("/testcase-data/address-tests/bus@80000000/device@1000",
+ 0x100000000, 0x20000000);
of_unittest_dma_ranges_one("/testcase-data/address-tests/pci@90000000",
0x80000000, 0x20000000);
}
@@ -1492,7 +1501,7 @@ static int __init unittest_data_add(void)
}
#ifdef CONFIG_OF_OVERLAY
-static int __init overlay_data_apply(const char *overlay_name, int *overlay_id);
+static int __init overlay_data_apply(const char *overlay_name, int *ovcs_id);
static int unittest_probe(struct platform_device *pdev)
{
@@ -1657,7 +1666,7 @@ static void __init of_unittest_overlay_gpio(void)
* The overlays are applied by overlay_data_apply()
* instead of of_unittest_apply_overlay() so that they
* will not be tracked. Thus they will not be removed
- * by of_unittest_destroy_tracked_overlays().
+ * by of_unittest_remove_tracked_overlays().
*
* - apply overlay_gpio_01
* - apply overlay_gpio_02a
@@ -1905,86 +1914,70 @@ static const char *overlay_name_from_nr(int nr)
static const char *bus_path = "/testcase-data/overlay-node/test-bus";
-/* FIXME: it is NOT guaranteed that overlay ids are assigned in sequence */
-
-#define MAX_UNITTEST_OVERLAYS 256
-static unsigned long overlay_id_bits[BITS_TO_LONGS(MAX_UNITTEST_OVERLAYS)];
-static int overlay_first_id = -1;
+#define MAX_TRACK_OVCS_IDS 256
-static long of_unittest_overlay_tracked(int id)
-{
- if (WARN_ON(id >= MAX_UNITTEST_OVERLAYS))
- return 0;
- return overlay_id_bits[BIT_WORD(id)] & BIT_MASK(id);
-}
+static int track_ovcs_id[MAX_TRACK_OVCS_IDS];
+static int track_ovcs_id_overlay_nr[MAX_TRACK_OVCS_IDS];
+static int track_ovcs_id_cnt;
-static void of_unittest_track_overlay(int id)
+static void of_unittest_track_overlay(int ovcs_id, int overlay_nr)
{
- if (overlay_first_id < 0)
- overlay_first_id = id;
- id -= overlay_first_id;
-
- if (WARN_ON(id >= MAX_UNITTEST_OVERLAYS))
+ if (WARN_ON(track_ovcs_id_cnt >= MAX_TRACK_OVCS_IDS))
return;
- overlay_id_bits[BIT_WORD(id)] |= BIT_MASK(id);
+
+ track_ovcs_id[track_ovcs_id_cnt] = ovcs_id;
+ track_ovcs_id_overlay_nr[track_ovcs_id_cnt] = overlay_nr;
+ track_ovcs_id_cnt++;
}
-static void of_unittest_untrack_overlay(int id)
+static void of_unittest_untrack_overlay(int ovcs_id)
{
- if (overlay_first_id < 0)
+ if (WARN_ON(track_ovcs_id_cnt < 1))
return;
- id -= overlay_first_id;
- if (WARN_ON(id >= MAX_UNITTEST_OVERLAYS))
- return;
- overlay_id_bits[BIT_WORD(id)] &= ~BIT_MASK(id);
-}
-static void of_unittest_destroy_tracked_overlays(void)
-{
- int id, ret, defers, ovcs_id;
+ track_ovcs_id_cnt--;
- if (overlay_first_id < 0)
- return;
+ /* If out of synch then test is broken. Do not try to recover. */
+ WARN_ON(track_ovcs_id[track_ovcs_id_cnt] != ovcs_id);
+}
- /* try until no defers */
- do {
- defers = 0;
- /* remove in reverse order */
- for (id = MAX_UNITTEST_OVERLAYS - 1; id >= 0; id--) {
- if (!of_unittest_overlay_tracked(id))
- continue;
+static void of_unittest_remove_tracked_overlays(void)
+{
+ int ret, ovcs_id, overlay_nr, save_ovcs_id;
+ const char *overlay_name;
- ovcs_id = id + overlay_first_id;
- ret = of_overlay_remove(&ovcs_id);
- if (ret == -ENODEV) {
- pr_warn("%s: no overlay to destroy for #%d\n",
- __func__, id + overlay_first_id);
- continue;
- }
- if (ret != 0) {
- defers++;
- pr_warn("%s: overlay destroy failed for #%d\n",
- __func__, id + overlay_first_id);
- continue;
- }
+ while (track_ovcs_id_cnt > 0) {
- of_unittest_untrack_overlay(id);
+ ovcs_id = track_ovcs_id[track_ovcs_id_cnt - 1];
+ overlay_nr = track_ovcs_id_overlay_nr[track_ovcs_id_cnt - 1];
+ save_ovcs_id = ovcs_id;
+ ret = of_overlay_remove(&ovcs_id);
+ if (ret == -ENODEV) {
+ overlay_name = overlay_name_from_nr(overlay_nr);
+ pr_warn("%s: of_overlay_remove() for overlay \"%s\" failed, ret = %d\n",
+ __func__, overlay_name, ret);
}
- } while (defers > 0);
+ of_unittest_untrack_overlay(save_ovcs_id);
+ }
+
}
-static int __init of_unittest_apply_overlay(int overlay_nr, int *overlay_id)
+static int __init of_unittest_apply_overlay(int overlay_nr, int *ovcs_id)
{
+ /*
+ * The overlay will be tracked, thus it will be removed
+ * by of_unittest_remove_tracked_overlays().
+ */
+
const char *overlay_name;
overlay_name = overlay_name_from_nr(overlay_nr);
- if (!overlay_data_apply(overlay_name, overlay_id)) {
- unittest(0, "could not apply overlay \"%s\"\n",
- overlay_name);
+ if (!overlay_data_apply(overlay_name, ovcs_id)) {
+ unittest(0, "could not apply overlay \"%s\"\n", overlay_name);
return -EFAULT;
}
- of_unittest_track_overlay(*overlay_id);
+ of_unittest_track_overlay(*ovcs_id, overlay_nr);
return 0;
}
@@ -2029,7 +2022,7 @@ static int __init of_unittest_apply_revert_overlay_check(int overlay_nr,
int unittest_nr, int before, int after,
enum overlay_type ovtype)
{
- int ret, ovcs_id, save_id;
+ int ret, ovcs_id, save_ovcs_id;
/* unittest device must be in before state */
if (of_unittest_device_exists(unittest_nr, ovtype) != before) {
@@ -2057,7 +2050,7 @@ static int __init of_unittest_apply_revert_overlay_check(int overlay_nr,
return -EINVAL;
}
- save_id = ovcs_id;
+ save_ovcs_id = ovcs_id;
ret = of_overlay_remove(&ovcs_id);
if (ret != 0) {
unittest(0, "%s failed to be destroyed @\"%s\"\n",
@@ -2065,7 +2058,7 @@ static int __init of_unittest_apply_revert_overlay_check(int overlay_nr,
unittest_path(unittest_nr, ovtype));
return ret;
}
- of_unittest_untrack_overlay(save_id);
+ of_unittest_untrack_overlay(save_ovcs_id);
/* unittest device must be again in before state */
if (of_unittest_device_exists(unittest_nr, PDEV_OVERLAY) != before) {
@@ -2192,7 +2185,7 @@ static void __init of_unittest_overlay_5(void)
/* test overlay application in sequence */
static void __init of_unittest_overlay_6(void)
{
- int i, ov_id[2], ovcs_id;
+ int i, save_ovcs_id[2], ovcs_id;
int overlay_nr = 6, unittest_nr = 6;
int before = 0, after = 1;
const char *overlay_name;
@@ -2225,8 +2218,8 @@ static void __init of_unittest_overlay_6(void)
unittest(0, "could not apply overlay \"%s\"\n", overlay_name);
return;
}
- ov_id[0] = ovcs_id;
- of_unittest_track_overlay(ov_id[0]);
+ save_ovcs_id[0] = ovcs_id;
+ of_unittest_track_overlay(ovcs_id, overlay_nr + 0);
EXPECT_END(KERN_INFO,
"OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data/overlay-node/test-bus/test-unittest6/status");
@@ -2242,8 +2235,8 @@ static void __init of_unittest_overlay_6(void)
unittest(0, "could not apply overlay \"%s\"\n", overlay_name);
return;
}
- ov_id[1] = ovcs_id;
- of_unittest_track_overlay(ov_id[1]);
+ save_ovcs_id[1] = ovcs_id;
+ of_unittest_track_overlay(ovcs_id, overlay_nr + 1);
EXPECT_END(KERN_INFO,
"OF: overlay: WARNING: memory leak will occur if overlay removed, property: /testcase-data/overlay-node/test-bus/test-unittest7/status");
@@ -2263,7 +2256,7 @@ static void __init of_unittest_overlay_6(void)
}
for (i = 1; i >= 0; i--) {
- ovcs_id = ov_id[i];
+ ovcs_id = save_ovcs_id[i];
if (of_overlay_remove(&ovcs_id)) {
unittest(0, "%s failed destroy @\"%s\"\n",
overlay_name_from_nr(overlay_nr + i),
@@ -2271,7 +2264,7 @@ static void __init of_unittest_overlay_6(void)
PDEV_OVERLAY));
return;
}
- of_unittest_untrack_overlay(ov_id[i]);
+ of_unittest_untrack_overlay(save_ovcs_id[i]);
}
for (i = 0; i < 2; i++) {
@@ -2294,7 +2287,7 @@ static void __init of_unittest_overlay_6(void)
/* test overlay application in sequence */
static void __init of_unittest_overlay_8(void)
{
- int i, ov_id[2], ovcs_id;
+ int i, save_ovcs_id[2], ovcs_id;
int overlay_nr = 8, unittest_nr = 8;
const char *overlay_name;
int ret;
@@ -2316,8 +2309,8 @@ static void __init of_unittest_overlay_8(void)
if (!ret)
return;
- ov_id[0] = ovcs_id;
- of_unittest_track_overlay(ov_id[0]);
+ save_ovcs_id[0] = ovcs_id;
+ of_unittest_track_overlay(ovcs_id, overlay_nr + 0);
overlay_name = overlay_name_from_nr(overlay_nr + 1);
@@ -2335,11 +2328,11 @@ static void __init of_unittest_overlay_8(void)
return;
}
- ov_id[1] = ovcs_id;
- of_unittest_track_overlay(ov_id[1]);
+ save_ovcs_id[1] = ovcs_id;
+ of_unittest_track_overlay(ovcs_id, overlay_nr + 1);
/* now try to remove first overlay (it should fail) */
- ovcs_id = ov_id[0];
+ ovcs_id = save_ovcs_id[0];
EXPECT_BEGIN(KERN_INFO,
"OF: overlay: node_overlaps_later_cs: #6 overlaps with #7 @/testcase-data/overlay-node/test-bus/test-unittest8");
@@ -2356,6 +2349,10 @@ static void __init of_unittest_overlay_8(void)
"OF: overlay: node_overlaps_later_cs: #6 overlaps with #7 @/testcase-data/overlay-node/test-bus/test-unittest8");
if (!ret) {
+ /*
+ * Should never get here. If we do, expect a lot of
+ * subsequent tracking and overlay removal related errors.
+ */
unittest(0, "%s was destroyed @\"%s\"\n",
overlay_name_from_nr(overlay_nr + 0),
unittest_path(unittest_nr,
@@ -2365,7 +2362,7 @@ static void __init of_unittest_overlay_8(void)
/* removing them in order should work */
for (i = 1; i >= 0; i--) {
- ovcs_id = ov_id[i];
+ ovcs_id = save_ovcs_id[i];
if (of_overlay_remove(&ovcs_id)) {
unittest(0, "%s not destroyed @\"%s\"\n",
overlay_name_from_nr(overlay_nr + i),
@@ -2373,7 +2370,7 @@ static void __init of_unittest_overlay_8(void)
PDEV_OVERLAY));
return;
}
- of_unittest_untrack_overlay(ov_id[i]);
+ of_unittest_untrack_overlay(save_ovcs_id[i]);
}
unittest(1, "overlay test %d passed\n", 8);
@@ -2805,7 +2802,7 @@ static void __init of_unittest_overlay(void)
of_unittest_overlay_gpio();
- of_unittest_destroy_tracked_overlays();
+ of_unittest_remove_tracked_overlays();
out:
of_node_put(bus_np);
@@ -2837,7 +2834,7 @@ struct overlay_info {
uint8_t *dtb_begin;
uint8_t *dtb_end;
int expected_result;
- int overlay_id;
+ int ovcs_id;
char *name;
};
@@ -2991,7 +2988,7 @@ void __init unittest_unflatten_overlay_base(void)
*
* Return 0 on unexpected error.
*/
-static int __init overlay_data_apply(const char *overlay_name, int *overlay_id)
+static int __init overlay_data_apply(const char *overlay_name, int *ovcs_id)
{
struct overlay_info *info;
int found = 0;
@@ -3013,9 +3010,9 @@ static int __init overlay_data_apply(const char *overlay_name, int *overlay_id)
if (!size)
pr_err("no overlay data for %s\n", overlay_name);
- ret = of_overlay_fdt_apply(info->dtb_begin, size, &info->overlay_id);
- if (overlay_id)
- *overlay_id = info->overlay_id;
+ ret = of_overlay_fdt_apply(info->dtb_begin, size, &info->ovcs_id);
+ if (ovcs_id)
+ *ovcs_id = info->ovcs_id;
if (ret < 0)
goto out;
diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c
index 059566f54429..9be007c9420f 100644
--- a/drivers/parisc/ccio-dma.c
+++ b/drivers/parisc/ccio-dma.c
@@ -1003,7 +1003,7 @@ ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
ioc->usg_calls++;
#endif
- while(sg_dma_len(sglist) && nents--) {
+ while (nents && sg_dma_len(sglist)) {
#ifdef CCIO_COLLECT_STATS
ioc->usg_pages += sg_dma_len(sglist) >> PAGE_SHIFT;
@@ -1011,6 +1011,7 @@ ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
ccio_unmap_page(dev, sg_dma_address(sglist),
sg_dma_len(sglist), direction, 0);
++sglist;
+ nents--;
}
DBG_RUN_SG("%s() DONE (nents %d)\n", __func__, nents);
diff --git a/drivers/parisc/led.c b/drivers/parisc/led.c
index cf91cb024be3..1e4a5663d011 100644
--- a/drivers/parisc/led.c
+++ b/drivers/parisc/led.c
@@ -168,14 +168,14 @@ static int led_proc_show(struct seq_file *m, void *v)
static int led_proc_open(struct inode *inode, struct file *file)
{
- return single_open(file, led_proc_show, PDE_DATA(inode));
+ return single_open(file, led_proc_show, pde_data(inode));
}
static ssize_t led_proc_write(struct file *file, const char __user *buf,
size_t count, loff_t *pos)
{
- void *data = PDE_DATA(file_inode(file));
+ void *data = pde_data(file_inode(file));
char *cur, lbuf[32];
int d;
diff --git a/drivers/parisc/pdc_stable.c b/drivers/parisc/pdc_stable.c
index 9513c39719d1..d9e51036a4fa 100644
--- a/drivers/parisc/pdc_stable.c
+++ b/drivers/parisc/pdc_stable.c
@@ -980,8 +980,10 @@ pdcs_register_pathentries(void)
entry->kobj.kset = paths_kset;
err = kobject_init_and_add(&entry->kobj, &ktype_pdcspath, NULL,
"%s", entry->name);
- if (err)
+ if (err) {
+ kobject_put(&entry->kobj);
return err;
+ }
/* kobject is now registered */
write_lock(&entry->rw_lock);
diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c
index e60690d38d67..374b9199878d 100644
--- a/drivers/parisc/sba_iommu.c
+++ b/drivers/parisc/sba_iommu.c
@@ -1047,7 +1047,7 @@ sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
spin_unlock_irqrestore(&ioc->res_lock, flags);
#endif
- while (sg_dma_len(sglist) && nents--) {
+ while (nents && sg_dma_len(sglist)) {
sba_unmap_page(dev, sg_dma_address(sglist), sg_dma_len(sglist),
direction, 0);
@@ -1056,6 +1056,7 @@ sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
ioc->usingle_calls--; /* kluge since call is unmap_sg() */
#endif
++sglist;
+ nents--;
}
DBG_RUN_SG("%s() DONE (nents %d)\n", __func__, nents);
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index 43e615aa12ff..d98fafdd0f99 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -184,7 +184,7 @@ config PCI_LABEL
config PCI_HYPERV
tristate "Hyper-V PCI Frontend"
- depends on X86_64 && HYPERV && PCI_MSI && PCI_MSI_IRQ_DOMAIN && SYSFS
+ depends on ((X86 && X86_64) || ARM64) && HYPERV && PCI_MSI && PCI_MSI_IRQ_DOMAIN && SYSFS
select PCI_HYPERV_INTERFACE
help
The PCI device frontend driver allows the kernel to import arbitrary
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index d62c4ac4ae1b..37be95adf169 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -5,8 +5,9 @@
obj-$(CONFIG_PCI) += access.o bus.o probe.o host-bridge.o \
remove.o pci.o pci-driver.o search.o \
pci-sysfs.o rom.o setup-res.o irq.o vpd.o \
- setup-bus.o vc.o mmap.o setup-irq.o msi.o
+ setup-bus.o vc.o mmap.o setup-irq.o
+obj-$(CONFIG_PCI) += msi/
obj-$(CONFIG_PCI) += pcie/
ifdef CONFIG_PCI
diff --git a/drivers/pci/access.c b/drivers/pci/access.c
index 46935695cfb9..0d9f6b21babb 100644
--- a/drivers/pci/access.c
+++ b/drivers/pci/access.c
@@ -42,7 +42,10 @@ int noinline pci_bus_read_config_##size \
if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER; \
pci_lock_config(flags); \
res = bus->ops->read(bus, devfn, pos, len, &data); \
- *value = (type)data; \
+ if (res) \
+ PCI_SET_ERROR_RESPONSE(value); \
+ else \
+ *value = (type)data; \
pci_unlock_config(flags); \
return res; \
}
@@ -80,10 +83,8 @@ int pci_generic_config_read(struct pci_bus *bus, unsigned int devfn,
void __iomem *addr;
addr = bus->ops->map_bus(bus, devfn, where);
- if (!addr) {
- *val = ~0;
+ if (!addr)
return PCIBIOS_DEVICE_NOT_FOUND;
- }
if (size == 1)
*val = readb(addr);
@@ -122,10 +123,8 @@ int pci_generic_config_read32(struct pci_bus *bus, unsigned int devfn,
void __iomem *addr;
addr = bus->ops->map_bus(bus, devfn, where & ~0x3);
- if (!addr) {
- *val = ~0;
+ if (!addr)
return PCIBIOS_DEVICE_NOT_FOUND;
- }
*val = readl(addr);
@@ -228,7 +227,10 @@ int pci_user_read_config_##size \
ret = dev->bus->ops->read(dev->bus, dev->devfn, \
pos, sizeof(type), &data); \
raw_spin_unlock_irq(&pci_lock); \
- *val = (type)data; \
+ if (ret) \
+ PCI_SET_ERROR_RESPONSE(val); \
+ else \
+ *val = (type)data; \
return pcibios_err_to_errno(ret); \
} \
EXPORT_SYMBOL_GPL(pci_user_read_config_##size);
@@ -410,9 +412,9 @@ int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val)
if (pcie_capability_reg_implemented(dev, pos)) {
ret = pci_read_config_word(dev, pci_pcie_cap(dev) + pos, val);
/*
- * Reset *val to 0 if pci_read_config_word() fails, it may
- * have been written as 0xFFFF if hardware error happens
- * during pci_read_config_word().
+ * Reset *val to 0 if pci_read_config_word() fails; it may
+ * have been written as 0xFFFF (PCI_ERROR_RESPONSE) if the
+ * config read failed on PCI.
*/
if (ret)
*val = 0;
@@ -445,9 +447,9 @@ int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val)
if (pcie_capability_reg_implemented(dev, pos)) {
ret = pci_read_config_dword(dev, pci_pcie_cap(dev) + pos, val);
/*
- * Reset *val to 0 if pci_read_config_dword() fails, it may
- * have been written as 0xFFFFFFFF if hardware error happens
- * during pci_read_config_dword().
+ * Reset *val to 0 if pci_read_config_dword() fails; it may
+ * have been written as 0xFFFFFFFF (PCI_ERROR_RESPONSE) if
+ * the config read failed on PCI.
*/
if (ret)
*val = 0;
@@ -523,7 +525,7 @@ EXPORT_SYMBOL(pcie_capability_clear_and_set_dword);
int pci_read_config_byte(const struct pci_dev *dev, int where, u8 *val)
{
if (pci_dev_is_disconnected(dev)) {
- *val = ~0;
+ PCI_SET_ERROR_RESPONSE(val);
return PCIBIOS_DEVICE_NOT_FOUND;
}
return pci_bus_read_config_byte(dev->bus, dev->devfn, where, val);
@@ -533,7 +535,7 @@ EXPORT_SYMBOL(pci_read_config_byte);
int pci_read_config_word(const struct pci_dev *dev, int where, u16 *val)
{
if (pci_dev_is_disconnected(dev)) {
- *val = ~0;
+ PCI_SET_ERROR_RESPONSE(val);
return PCIBIOS_DEVICE_NOT_FOUND;
}
return pci_bus_read_config_word(dev->bus, dev->devfn, where, val);
@@ -544,7 +546,7 @@ int pci_read_config_dword(const struct pci_dev *dev, int where,
u32 *val)
{
if (pci_dev_is_disconnected(dev)) {
- *val = ~0;
+ PCI_SET_ERROR_RESPONSE(val);
return PCIBIOS_DEVICE_NOT_FOUND;
}
return pci_bus_read_config_dword(dev->bus, dev->devfn, where, val);
diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig
index 7fc5135ffbbf..601f2531ee91 100644
--- a/drivers/pci/controller/Kconfig
+++ b/drivers/pci/controller/Kconfig
@@ -4,7 +4,7 @@ menu "PCI controller drivers"
depends on PCI
config PCI_MVEBU
- bool "Marvell EBU PCIe controller"
+ tristate "Marvell EBU PCIe controller"
depends on ARCH_MVEBU || ARCH_DOVE || COMPILE_TEST
depends on MVEBU_MBUS
depends on ARM
@@ -274,14 +274,14 @@ config PCIE_BRCMSTB
BMIPS_GENERIC || COMPILE_TEST
depends on OF
depends on PCI_MSI_IRQ_DOMAIN
- default ARCH_BRCMSTB
+ default ARCH_BRCMSTB || BMIPS_GENERIC
help
Say Y here to enable PCIe host controller support for
Broadcom STB based SoCs, like the Raspberry Pi 4.
config PCI_HYPERV_INTERFACE
tristate "Hyper-V PCI Interface"
- depends on X86 && HYPERV && PCI_MSI && PCI_MSI_IRQ_DOMAIN && X86_64
+ depends on ((X86 && X86_64) || ARM64) && HYPERV && PCI_MSI && PCI_MSI_IRQ_DOMAIN
help
The Hyper-V PCI Interface is a helper driver allows other drivers to
have a common interface with the Hyper-V PCI frontend driver.
@@ -332,8 +332,8 @@ config PCIE_APPLE
If unsure, say Y if you have an Apple Silicon system.
config PCIE_MT7621
- bool "MediaTek MT7621 PCIe Controller"
- depends on SOC_MT7621 || (MIPS && COMPILE_TEST)
+ tristate "MediaTek MT7621 PCIe Controller"
+ depends on SOC_MT7621 || COMPILE_TEST
select PHY_MT7621_PCI
default SOC_MT7621
help
diff --git a/drivers/pci/controller/cadence/pci-j721e.c b/drivers/pci/controller/cadence/pci-j721e.c
index 918e11082e6a..768d33f9ebc8 100644
--- a/drivers/pci/controller/cadence/pci-j721e.c
+++ b/drivers/pci/controller/cadence/pci-j721e.c
@@ -51,11 +51,10 @@ enum link_status {
#define MAX_LANES 2
struct j721e_pcie {
- struct device *dev;
+ struct cdns_pcie *cdns_pcie;
struct clk *refclk;
u32 mode;
u32 num_lanes;
- struct cdns_pcie *cdns_pcie;
void __iomem *user_cfg_base;
void __iomem *intd_cfg_base;
u32 linkdown_irq_regfield;
@@ -99,7 +98,7 @@ static inline void j721e_pcie_intd_writel(struct j721e_pcie *pcie, u32 offset,
static irqreturn_t j721e_pcie_link_irq_handler(int irq, void *priv)
{
struct j721e_pcie *pcie = priv;
- struct device *dev = pcie->dev;
+ struct device *dev = pcie->cdns_pcie->dev;
u32 reg;
reg = j721e_pcie_intd_readl(pcie, STATUS_REG_SYS_2);
@@ -165,7 +164,7 @@ static const struct cdns_pcie_ops j721e_pcie_ops = {
static int j721e_pcie_set_mode(struct j721e_pcie *pcie, struct regmap *syscon,
unsigned int offset)
{
- struct device *dev = pcie->dev;
+ struct device *dev = pcie->cdns_pcie->dev;
u32 mask = J721E_MODE_RC;
u32 mode = pcie->mode;
u32 val = 0;
@@ -184,7 +183,7 @@ static int j721e_pcie_set_mode(struct j721e_pcie *pcie, struct regmap *syscon,
static int j721e_pcie_set_link_speed(struct j721e_pcie *pcie,
struct regmap *syscon, unsigned int offset)
{
- struct device *dev = pcie->dev;
+ struct device *dev = pcie->cdns_pcie->dev;
struct device_node *np = dev->of_node;
int link_speed;
u32 val = 0;
@@ -205,7 +204,7 @@ static int j721e_pcie_set_link_speed(struct j721e_pcie *pcie,
static int j721e_pcie_set_lane_count(struct j721e_pcie *pcie,
struct regmap *syscon, unsigned int offset)
{
- struct device *dev = pcie->dev;
+ struct device *dev = pcie->cdns_pcie->dev;
u32 lanes = pcie->num_lanes;
u32 val = 0;
int ret;
@@ -220,7 +219,7 @@ static int j721e_pcie_set_lane_count(struct j721e_pcie *pcie,
static int j721e_pcie_ctrl_init(struct j721e_pcie *pcie)
{
- struct device *dev = pcie->dev;
+ struct device *dev = pcie->cdns_pcie->dev;
struct device_node *node = dev->of_node;
struct of_phandle_args args;
unsigned int offset = 0;
@@ -354,11 +353,11 @@ static int j721e_pcie_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *node = dev->of_node;
struct pci_host_bridge *bridge;
- struct j721e_pcie_data *data;
+ const struct j721e_pcie_data *data;
struct cdns_pcie *cdns_pcie;
struct j721e_pcie *pcie;
- struct cdns_pcie_rc *rc;
- struct cdns_pcie_ep *ep;
+ struct cdns_pcie_rc *rc = NULL;
+ struct cdns_pcie_ep *ep = NULL;
struct gpio_desc *gpiod;
void __iomem *base;
struct clk *clk;
@@ -367,7 +366,7 @@ static int j721e_pcie_probe(struct platform_device *pdev)
int ret;
int irq;
- data = (struct j721e_pcie_data *)of_device_get_match_data(dev);
+ data = of_device_get_match_data(dev);
if (!data)
return -EINVAL;
@@ -377,7 +376,46 @@ static int j721e_pcie_probe(struct platform_device *pdev)
if (!pcie)
return -ENOMEM;
- pcie->dev = dev;
+ switch (mode) {
+ case PCI_MODE_RC:
+ if (!IS_ENABLED(CONFIG_PCIE_CADENCE_HOST))
+ return -ENODEV;
+
+ bridge = devm_pci_alloc_host_bridge(dev, sizeof(*rc));
+ if (!bridge)
+ return -ENOMEM;
+
+ if (!data->byte_access_allowed)
+ bridge->ops = &cdns_ti_pcie_host_ops;
+ rc = pci_host_bridge_priv(bridge);
+ rc->quirk_retrain_flag = data->quirk_retrain_flag;
+ rc->quirk_detect_quiet_flag = data->quirk_detect_quiet_flag;
+
+ cdns_pcie = &rc->pcie;
+ cdns_pcie->dev = dev;
+ cdns_pcie->ops = &j721e_pcie_ops;
+ pcie->cdns_pcie = cdns_pcie;
+ break;
+ case PCI_MODE_EP:
+ if (!IS_ENABLED(CONFIG_PCIE_CADENCE_EP))
+ return -ENODEV;
+
+ ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL);
+ if (!ep)
+ return -ENOMEM;
+
+ ep->quirk_detect_quiet_flag = data->quirk_detect_quiet_flag;
+
+ cdns_pcie = &ep->pcie;
+ cdns_pcie->dev = dev;
+ cdns_pcie->ops = &j721e_pcie_ops;
+ pcie->cdns_pcie = cdns_pcie;
+ break;
+ default:
+ dev_err(dev, "INVALID device type %d\n", mode);
+ return 0;
+ }
+
pcie->mode = mode;
pcie->linkdown_irq_regfield = data->linkdown_irq_regfield;
@@ -428,28 +466,6 @@ static int j721e_pcie_probe(struct platform_device *pdev)
switch (mode) {
case PCI_MODE_RC:
- if (!IS_ENABLED(CONFIG_PCIE_CADENCE_HOST)) {
- ret = -ENODEV;
- goto err_get_sync;
- }
-
- bridge = devm_pci_alloc_host_bridge(dev, sizeof(*rc));
- if (!bridge) {
- ret = -ENOMEM;
- goto err_get_sync;
- }
-
- if (!data->byte_access_allowed)
- bridge->ops = &cdns_ti_pcie_host_ops;
- rc = pci_host_bridge_priv(bridge);
- rc->quirk_retrain_flag = data->quirk_retrain_flag;
- rc->quirk_detect_quiet_flag = data->quirk_detect_quiet_flag;
-
- cdns_pcie = &rc->pcie;
- cdns_pcie->dev = dev;
- cdns_pcie->ops = &j721e_pcie_ops;
- pcie->cdns_pcie = cdns_pcie;
-
gpiod = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
if (IS_ERR(gpiod)) {
ret = PTR_ERR(gpiod);
@@ -499,23 +515,6 @@ static int j721e_pcie_probe(struct platform_device *pdev)
break;
case PCI_MODE_EP:
- if (!IS_ENABLED(CONFIG_PCIE_CADENCE_EP)) {
- ret = -ENODEV;
- goto err_get_sync;
- }
-
- ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL);
- if (!ep) {
- ret = -ENOMEM;
- goto err_get_sync;
- }
- ep->quirk_detect_quiet_flag = data->quirk_detect_quiet_flag;
-
- cdns_pcie = &ep->pcie;
- cdns_pcie->dev = dev;
- cdns_pcie->ops = &j721e_pcie_ops;
- pcie->cdns_pcie = cdns_pcie;
-
ret = cdns_pcie_init_phy(dev, cdns_pcie);
if (ret) {
dev_err(dev, "Failed to init phy\n");
@@ -527,8 +526,6 @@ static int j721e_pcie_probe(struct platform_device *pdev)
goto err_pcie_setup;
break;
- default:
- dev_err(dev, "INVALID device type %d\n", mode);
}
return 0;
diff --git a/drivers/pci/controller/cadence/pcie-cadence-plat.c b/drivers/pci/controller/cadence/pcie-cadence-plat.c
index a224afadbcc0..bac0541317c1 100644
--- a/drivers/pci/controller/cadence/pcie-cadence-plat.c
+++ b/drivers/pci/controller/cadence/pcie-cadence-plat.c
@@ -45,7 +45,6 @@ static int cdns_plat_pcie_probe(struct platform_device *pdev)
{
const struct cdns_plat_pcie_of_data *data;
struct cdns_plat_pcie *cdns_plat_pcie;
- const struct of_device_id *match;
struct device *dev = &pdev->dev;
struct pci_host_bridge *bridge;
struct cdns_pcie_ep *ep;
@@ -54,11 +53,10 @@ static int cdns_plat_pcie_probe(struct platform_device *pdev)
bool is_rc;
int ret;
- match = of_match_device(cdns_plat_pcie_of_match, dev);
- if (!match)
+ data = of_device_get_match_data(dev);
+ if (!data)
return -EINVAL;
- data = (struct cdns_plat_pcie_of_data *)match->data;
is_rc = data->is_rc;
pr_debug(" Started %s with is_rc: %d\n", __func__, is_rc);
diff --git a/drivers/pci/controller/cadence/pcie-cadence.h b/drivers/pci/controller/cadence/pcie-cadence.h
index 262421e5d917..c8a27b6290ce 100644
--- a/drivers/pci/controller/cadence/pcie-cadence.h
+++ b/drivers/pci/controller/cadence/pcie-cadence.h
@@ -310,7 +310,7 @@ struct cdns_pcie {
* single function at a time
* @vendor_id: PCI vendor ID
* @device_id: PCI device ID
- * @avail_ib_bar: Satus of RP_BAR0, RP_BAR1 and RP_NO_BAR if it's free or
+ * @avail_ib_bar: Status of RP_BAR0, RP_BAR1 and RP_NO_BAR if it's free or
* available
* @quirk_retrain_flag: Retrain link as quirk for PCIe Gen2
* @quirk_detect_quiet_flag: LTSSM Detect Quiet min delay set as quirk
diff --git a/drivers/pci/controller/dwc/pci-dra7xx.c b/drivers/pci/controller/dwc/pci-dra7xx.c
index a4221f6f3629..dfcdeb432dc8 100644
--- a/drivers/pci/controller/dwc/pci-dra7xx.c
+++ b/drivers/pci/controller/dwc/pci-dra7xx.c
@@ -213,7 +213,7 @@ static int dra7xx_pcie_handle_msi(struct pcie_port *pp, int index)
if (!val)
return 0;
- pos = find_next_bit(&val, MAX_MSI_IRQS_PER_CTRL, 0);
+ pos = find_first_bit(&val, MAX_MSI_IRQS_PER_CTRL);
while (pos != MAX_MSI_IRQS_PER_CTRL) {
generic_handle_domain_irq(pp->irq_domain,
(index * MAX_MSI_IRQS_PER_CTRL) + pos);
@@ -697,16 +697,14 @@ static int dra7xx_pcie_probe(struct platform_device *pdev)
struct device_node *np = dev->of_node;
char name[10];
struct gpio_desc *reset;
- const struct of_device_id *match;
const struct dra7xx_pcie_of_data *data;
enum dw_pcie_device_mode mode;
u32 b1co_mode_sel_mask;
- match = of_match_device(of_match_ptr(of_dra7xx_pcie_match), dev);
- if (!match)
+ data = of_device_get_match_data(dev);
+ if (!data)
return -EINVAL;
- data = (struct dra7xx_pcie_of_data *)match->data;
mode = (enum dw_pcie_device_mode)data->mode;
b1co_mode_sel_mask = data->b1co_mode_sel_mask;
diff --git a/drivers/pci/controller/dwc/pci-exynos.c b/drivers/pci/controller/dwc/pci-exynos.c
index 722dacdd5a17..467c8d1cd7e4 100644
--- a/drivers/pci/controller/dwc/pci-exynos.c
+++ b/drivers/pci/controller/dwc/pci-exynos.c
@@ -217,10 +217,8 @@ static int exynos_pcie_rd_own_conf(struct pci_bus *bus, unsigned int devfn,
{
struct dw_pcie *pci = to_dw_pcie_from_pp(bus->sysdata);
- if (PCI_SLOT(devfn)) {
- *val = ~0;
+ if (PCI_SLOT(devfn))
return PCIBIOS_DEVICE_NOT_FOUND;
- }
*val = dw_pcie_read_dbi(pci, where, size);
return PCIBIOS_SUCCESSFUL;
diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
index 26f49f797b0f..6974bd5aa116 100644
--- a/drivers/pci/controller/dwc/pci-imx6.c
+++ b/drivers/pci/controller/dwc/pci-imx6.c
@@ -29,6 +29,7 @@
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/reset.h>
+#include <linux/phy/phy.h>
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
@@ -49,6 +50,7 @@ enum imx6_pcie_variants {
IMX6QP,
IMX7D,
IMX8MQ,
+ IMX8MM,
};
#define IMX6_PCIE_FLAG_IMX6_PHY BIT(0)
@@ -88,6 +90,7 @@ struct imx6_pcie {
struct device *pd_pcie;
/* power domain for pcie phy */
struct device *pd_pcie_phy;
+ struct phy *phy;
const struct imx6_pcie_drvdata *drvdata;
};
@@ -372,6 +375,8 @@ static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie)
case IMX7D:
case IMX8MQ:
reset_control_assert(imx6_pcie->pciephy_reset);
+ fallthrough;
+ case IMX8MM:
reset_control_assert(imx6_pcie->apps_reset);
break;
case IMX6SX:
@@ -407,7 +412,8 @@ static void imx6_pcie_assert_core_reset(struct imx6_pcie *imx6_pcie)
static unsigned int imx6_pcie_grp_offset(const struct imx6_pcie *imx6_pcie)
{
- WARN_ON(imx6_pcie->drvdata->variant != IMX8MQ);
+ WARN_ON(imx6_pcie->drvdata->variant != IMX8MQ &&
+ imx6_pcie->drvdata->variant != IMX8MM);
return imx6_pcie->controller_id == 1 ? IOMUXC_GPR16 : IOMUXC_GPR14;
}
@@ -446,6 +452,11 @@ static int imx6_pcie_enable_ref_clk(struct imx6_pcie *imx6_pcie)
break;
case IMX7D:
break;
+ case IMX8MM:
+ ret = clk_prepare_enable(imx6_pcie->pcie_aux);
+ if (ret)
+ dev_err(dev, "unable to enable pcie_aux clock\n");
+ break;
case IMX8MQ:
ret = clk_prepare_enable(imx6_pcie->pcie_aux);
if (ret) {
@@ -522,6 +533,14 @@ static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
goto err_ref_clk;
}
+ switch (imx6_pcie->drvdata->variant) {
+ case IMX8MM:
+ if (phy_power_on(imx6_pcie->phy))
+ dev_err(dev, "unable to power on PHY\n");
+ break;
+ default:
+ break;
+ }
/* allow the clocks to stabilize */
usleep_range(200, 500);
@@ -538,6 +557,10 @@ static void imx6_pcie_deassert_core_reset(struct imx6_pcie *imx6_pcie)
case IMX8MQ:
reset_control_deassert(imx6_pcie->pciephy_reset);
break;
+ case IMX8MM:
+ if (phy_init(imx6_pcie->phy))
+ dev_err(dev, "waiting for phy ready timeout!\n");
+ break;
case IMX7D:
reset_control_deassert(imx6_pcie->pciephy_reset);
@@ -614,6 +637,12 @@ static void imx6_pcie_configure_type(struct imx6_pcie *imx6_pcie)
static void imx6_pcie_init_phy(struct imx6_pcie *imx6_pcie)
{
switch (imx6_pcie->drvdata->variant) {
+ case IMX8MM:
+ /*
+ * The PHY initialization had been done in the PHY
+ * driver, break here directly.
+ */
+ break;
case IMX8MQ:
/*
* TODO: Currently this code assumes external
@@ -753,6 +782,7 @@ static void imx6_pcie_ltssm_enable(struct device *dev)
break;
case IMX7D:
case IMX8MQ:
+ case IMX8MM:
reset_control_deassert(imx6_pcie->apps_reset);
break;
}
@@ -871,6 +901,7 @@ static void imx6_pcie_ltssm_disable(struct device *dev)
IMX6Q_GPR12_PCIE_CTL_2, 0);
break;
case IMX7D:
+ case IMX8MM:
reset_control_assert(imx6_pcie->apps_reset);
break;
default:
@@ -930,6 +961,7 @@ static void imx6_pcie_clk_disable(struct imx6_pcie *imx6_pcie)
IMX7D_GPR12_PCIE_PHY_REFCLK_SEL);
break;
case IMX8MQ:
+ case IMX8MM:
clk_disable_unprepare(imx6_pcie->pcie_aux);
break;
default:
@@ -945,8 +977,16 @@ static int imx6_pcie_suspend_noirq(struct device *dev)
return 0;
imx6_pcie_pm_turnoff(imx6_pcie);
- imx6_pcie_clk_disable(imx6_pcie);
imx6_pcie_ltssm_disable(dev);
+ imx6_pcie_clk_disable(imx6_pcie);
+ switch (imx6_pcie->drvdata->variant) {
+ case IMX8MM:
+ if (phy_power_off(imx6_pcie->phy))
+ dev_err(dev, "unable to power off PHY\n");
+ break;
+ default:
+ break;
+ }
return 0;
}
@@ -1043,11 +1083,6 @@ static int imx6_pcie_probe(struct platform_device *pdev)
}
/* Fetch clocks */
- imx6_pcie->pcie_phy = devm_clk_get(dev, "pcie_phy");
- if (IS_ERR(imx6_pcie->pcie_phy))
- return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_phy),
- "pcie_phy clock source missing or invalid\n");
-
imx6_pcie->pcie_bus = devm_clk_get(dev, "pcie_bus");
if (IS_ERR(imx6_pcie->pcie_bus))
return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_bus),
@@ -1090,9 +1125,34 @@ static int imx6_pcie_probe(struct platform_device *pdev)
return PTR_ERR(imx6_pcie->apps_reset);
}
break;
+ case IMX8MM:
+ imx6_pcie->pcie_aux = devm_clk_get(dev, "pcie_aux");
+ if (IS_ERR(imx6_pcie->pcie_aux))
+ return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_aux),
+ "pcie_aux clock source missing or invalid\n");
+ imx6_pcie->apps_reset = devm_reset_control_get_exclusive(dev,
+ "apps");
+ if (IS_ERR(imx6_pcie->apps_reset))
+ return dev_err_probe(dev, PTR_ERR(imx6_pcie->apps_reset),
+ "failed to get pcie apps reset control\n");
+
+ imx6_pcie->phy = devm_phy_get(dev, "pcie-phy");
+ if (IS_ERR(imx6_pcie->phy))
+ return dev_err_probe(dev, PTR_ERR(imx6_pcie->phy),
+ "failed to get pcie phy\n");
+
+ break;
default:
break;
}
+ /* Don't fetch the pcie_phy clock, if it has abstract PHY driver */
+ if (imx6_pcie->phy == NULL) {
+ imx6_pcie->pcie_phy = devm_clk_get(dev, "pcie_phy");
+ if (IS_ERR(imx6_pcie->pcie_phy))
+ return dev_err_probe(dev, PTR_ERR(imx6_pcie->pcie_phy),
+ "pcie_phy clock source missing or invalid\n");
+ }
+
/* Grab turnoff reset */
imx6_pcie->turnoff_reset = devm_reset_control_get_optional_exclusive(dev, "turnoff");
@@ -1202,6 +1262,10 @@ static const struct imx6_pcie_drvdata drvdata[] = {
[IMX8MQ] = {
.variant = IMX8MQ,
},
+ [IMX8MM] = {
+ .variant = IMX8MM,
+ .flags = IMX6_PCIE_FLAG_SUPPORTS_SUSPEND,
+ },
};
static const struct of_device_id imx6_pcie_of_match[] = {
@@ -1209,7 +1273,8 @@ static const struct of_device_id imx6_pcie_of_match[] = {
{ .compatible = "fsl,imx6sx-pcie", .data = &drvdata[IMX6SX], },
{ .compatible = "fsl,imx6qp-pcie", .data = &drvdata[IMX6QP], },
{ .compatible = "fsl,imx7d-pcie", .data = &drvdata[IMX7D], },
- { .compatible = "fsl,imx8mq-pcie", .data = &drvdata[IMX8MQ], } ,
+ { .compatible = "fsl,imx8mq-pcie", .data = &drvdata[IMX8MQ], },
+ { .compatible = "fsl,imx8mm-pcie", .data = &drvdata[IMX8MM], },
{},
};
diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c
index 865258d8c53c..1c2ee4e13f1c 100644
--- a/drivers/pci/controller/dwc/pci-keystone.c
+++ b/drivers/pci/controller/dwc/pci-keystone.c
@@ -747,9 +747,9 @@ err:
#ifdef CONFIG_ARM
/*
- * When a PCI device does not exist during config cycles, keystone host gets a
- * bus error instead of returning 0xffffffff. This handler always returns 0
- * for this kind of faults.
+ * When a PCI device does not exist during config cycles, keystone host
+ * gets a bus error instead of returning 0xffffffff (PCI_ERROR_RESPONSE).
+ * This handler always returns 0 for this kind of fault.
*/
static int ks_pcie_fault(unsigned long addr, unsigned int fsr,
struct pt_regs *regs)
@@ -775,12 +775,19 @@ static int __init ks_pcie_init_id(struct keystone_pcie *ks_pcie)
struct dw_pcie *pci = ks_pcie->pci;
struct device *dev = pci->dev;
struct device_node *np = dev->of_node;
+ struct of_phandle_args args;
+ unsigned int offset = 0;
devctrl_regs = syscon_regmap_lookup_by_phandle(np, "ti,syscon-pcie-id");
if (IS_ERR(devctrl_regs))
return PTR_ERR(devctrl_regs);
- ret = regmap_read(devctrl_regs, 0, &id);
+ /* Do not error out to maintain old DT compatibility */
+ ret = of_parse_phandle_with_fixed_args(np, "ti,syscon-pcie-id", 1, 0, &args);
+ if (!ret)
+ offset = args.args[0];
+
+ ret = regmap_read(devctrl_regs, offset, &id);
if (ret)
return ret;
@@ -989,6 +996,8 @@ err_phy:
static int ks_pcie_set_mode(struct device *dev)
{
struct device_node *np = dev->of_node;
+ struct of_phandle_args args;
+ unsigned int offset = 0;
struct regmap *syscon;
u32 val;
u32 mask;
@@ -998,10 +1007,15 @@ static int ks_pcie_set_mode(struct device *dev)
if (IS_ERR(syscon))
return 0;
+ /* Do not error out to maintain old DT compatibility */
+ ret = of_parse_phandle_with_fixed_args(np, "ti,syscon-pcie-mode", 1, 0, &args);
+ if (!ret)
+ offset = args.args[0];
+
mask = KS_PCIE_DEV_TYPE_MASK | KS_PCIE_SYSCLOCKOUTEN;
val = KS_PCIE_DEV_TYPE(RC) | KS_PCIE_SYSCLOCKOUTEN;
- ret = regmap_update_bits(syscon, 0, mask, val);
+ ret = regmap_update_bits(syscon, offset, mask, val);
if (ret) {
dev_err(dev, "failed to set pcie mode\n");
return ret;
@@ -1014,6 +1028,8 @@ static int ks_pcie_am654_set_mode(struct device *dev,
enum dw_pcie_device_mode mode)
{
struct device_node *np = dev->of_node;
+ struct of_phandle_args args;
+ unsigned int offset = 0;
struct regmap *syscon;
u32 val;
u32 mask;
@@ -1023,6 +1039,11 @@ static int ks_pcie_am654_set_mode(struct device *dev,
if (IS_ERR(syscon))
return 0;
+ /* Do not error out to maintain old DT compatibility */
+ ret = of_parse_phandle_with_fixed_args(np, "ti,syscon-pcie-mode", 1, 0, &args);
+ if (!ret)
+ offset = args.args[0];
+
mask = AM654_PCIE_DEV_TYPE_MASK;
switch (mode) {
@@ -1037,7 +1058,7 @@ static int ks_pcie_am654_set_mode(struct device *dev,
return -EINVAL;
}
- ret = regmap_update_bits(syscon, 0, mask, val);
+ ret = regmap_update_bits(syscon, offset, mask, val);
if (ret) {
dev_err(dev, "failed to set pcie mode\n");
return ret;
@@ -1087,7 +1108,6 @@ static int __init ks_pcie_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
const struct ks_pcie_of_data *data;
- const struct of_device_id *match;
enum dw_pcie_device_mode mode;
struct dw_pcie *pci;
struct keystone_pcie *ks_pcie;
@@ -1104,8 +1124,7 @@ static int __init ks_pcie_probe(struct platform_device *pdev)
int irq;
int i;
- match = of_match_device(of_match_ptr(ks_pcie_of_match), dev);
- data = (struct ks_pcie_of_data *)match->data;
+ data = of_device_get_match_data(dev);
if (!data)
return -EINVAL;
diff --git a/drivers/pci/controller/dwc/pci-layerscape.c b/drivers/pci/controller/dwc/pci-layerscape.c
index 5b9c625df7b8..6a4f0619bb1c 100644
--- a/drivers/pci/controller/dwc/pci-layerscape.c
+++ b/drivers/pci/controller/dwc/pci-layerscape.c
@@ -3,6 +3,7 @@
* PCIe host controller driver for Freescale Layerscape SoCs
*
* Copyright (C) 2014 Freescale Semiconductor.
+ * Copyright 2021 NXP
*
* Author: Minghuan Lian <Minghuan.Lian@freescale.com>
*/
@@ -22,12 +23,6 @@
#include "pcie-designware.h"
-/* PEX1/2 Misc Ports Status Register */
-#define SCFG_PEXMSCPORTSR(pex_idx) (0x94 + (pex_idx) * 4)
-#define LTSSM_STATE_SHIFT 20
-#define LTSSM_STATE_MASK 0x3f
-#define LTSSM_PCIE_L0 0x11 /* L0 state */
-
/* PEX Internal Configuration Registers */
#define PCIE_STRFMR1 0x71c /* Symbol Timer & Filter Mask Register1 */
#define PCIE_ABSERR 0x8d0 /* Bridge Slave Error Response Register */
@@ -35,20 +30,8 @@
#define PCIE_IATU_NUM 6
-struct ls_pcie_drvdata {
- u32 lut_offset;
- u32 ltssm_shift;
- u32 lut_dbg;
- const struct dw_pcie_host_ops *ops;
- const struct dw_pcie_ops *dw_pcie_ops;
-};
-
struct ls_pcie {
struct dw_pcie *pci;
- void __iomem *lut;
- struct regmap *scfg;
- const struct ls_pcie_drvdata *drvdata;
- int index;
};
#define to_ls_pcie(x) dev_get_drvdata((x)->dev)
@@ -83,38 +66,6 @@ static void ls_pcie_drop_msg_tlp(struct ls_pcie *pcie)
iowrite32(val, pci->dbi_base + PCIE_STRFMR1);
}
-static int ls1021_pcie_link_up(struct dw_pcie *pci)
-{
- u32 state;
- struct ls_pcie *pcie = to_ls_pcie(pci);
-
- if (!pcie->scfg)
- return 0;
-
- regmap_read(pcie->scfg, SCFG_PEXMSCPORTSR(pcie->index), &state);
- state = (state >> LTSSM_STATE_SHIFT) & LTSSM_STATE_MASK;
-
- if (state < LTSSM_PCIE_L0)
- return 0;
-
- return 1;
-}
-
-static int ls_pcie_link_up(struct dw_pcie *pci)
-{
- struct ls_pcie *pcie = to_ls_pcie(pci);
- u32 state;
-
- state = (ioread32(pcie->lut + pcie->drvdata->lut_dbg) >>
- pcie->drvdata->ltssm_shift) &
- LTSSM_STATE_MASK;
-
- if (state < LTSSM_PCIE_L0)
- return 0;
-
- return 1;
-}
-
/* Forward error response of outbound non-posted requests */
static void ls_pcie_fix_error_response(struct ls_pcie *pcie)
{
@@ -139,96 +90,20 @@ static int ls_pcie_host_init(struct pcie_port *pp)
return 0;
}
-static int ls1021_pcie_host_init(struct pcie_port *pp)
-{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct ls_pcie *pcie = to_ls_pcie(pci);
- struct device *dev = pci->dev;
- u32 index[2];
- int ret;
-
- pcie->scfg = syscon_regmap_lookup_by_phandle(dev->of_node,
- "fsl,pcie-scfg");
- if (IS_ERR(pcie->scfg)) {
- ret = PTR_ERR(pcie->scfg);
- dev_err(dev, "No syscfg phandle specified\n");
- pcie->scfg = NULL;
- return ret;
- }
-
- if (of_property_read_u32_array(dev->of_node,
- "fsl,pcie-scfg", index, 2)) {
- pcie->scfg = NULL;
- return -EINVAL;
- }
- pcie->index = index[1];
-
- return ls_pcie_host_init(pp);
-}
-
-static const struct dw_pcie_host_ops ls1021_pcie_host_ops = {
- .host_init = ls1021_pcie_host_init,
-};
-
static const struct dw_pcie_host_ops ls_pcie_host_ops = {
.host_init = ls_pcie_host_init,
};
-static const struct dw_pcie_ops dw_ls1021_pcie_ops = {
- .link_up = ls1021_pcie_link_up,
-};
-
-static const struct dw_pcie_ops dw_ls_pcie_ops = {
- .link_up = ls_pcie_link_up,
-};
-
-static const struct ls_pcie_drvdata ls1021_drvdata = {
- .ops = &ls1021_pcie_host_ops,
- .dw_pcie_ops = &dw_ls1021_pcie_ops,
-};
-
-static const struct ls_pcie_drvdata ls1043_drvdata = {
- .lut_offset = 0x10000,
- .ltssm_shift = 24,
- .lut_dbg = 0x7fc,
- .ops = &ls_pcie_host_ops,
- .dw_pcie_ops = &dw_ls_pcie_ops,
-};
-
-static const struct ls_pcie_drvdata ls1046_drvdata = {
- .lut_offset = 0x80000,
- .ltssm_shift = 24,
- .lut_dbg = 0x407fc,
- .ops = &ls_pcie_host_ops,
- .dw_pcie_ops = &dw_ls_pcie_ops,
-};
-
-static const struct ls_pcie_drvdata ls2080_drvdata = {
- .lut_offset = 0x80000,
- .ltssm_shift = 0,
- .lut_dbg = 0x7fc,
- .ops = &ls_pcie_host_ops,
- .dw_pcie_ops = &dw_ls_pcie_ops,
-};
-
-static const struct ls_pcie_drvdata ls2088_drvdata = {
- .lut_offset = 0x80000,
- .ltssm_shift = 0,
- .lut_dbg = 0x407fc,
- .ops = &ls_pcie_host_ops,
- .dw_pcie_ops = &dw_ls_pcie_ops,
-};
-
static const struct of_device_id ls_pcie_of_match[] = {
- { .compatible = "fsl,ls1012a-pcie", .data = &ls1046_drvdata },
- { .compatible = "fsl,ls1021a-pcie", .data = &ls1021_drvdata },
- { .compatible = "fsl,ls1028a-pcie", .data = &ls2088_drvdata },
- { .compatible = "fsl,ls1043a-pcie", .data = &ls1043_drvdata },
- { .compatible = "fsl,ls1046a-pcie", .data = &ls1046_drvdata },
- { .compatible = "fsl,ls2080a-pcie", .data = &ls2080_drvdata },
- { .compatible = "fsl,ls2085a-pcie", .data = &ls2080_drvdata },
- { .compatible = "fsl,ls2088a-pcie", .data = &ls2088_drvdata },
- { .compatible = "fsl,ls1088a-pcie", .data = &ls2088_drvdata },
+ { .compatible = "fsl,ls1012a-pcie", },
+ { .compatible = "fsl,ls1021a-pcie", },
+ { .compatible = "fsl,ls1028a-pcie", },
+ { .compatible = "fsl,ls1043a-pcie", },
+ { .compatible = "fsl,ls1046a-pcie", },
+ { .compatible = "fsl,ls2080a-pcie", },
+ { .compatible = "fsl,ls2085a-pcie", },
+ { .compatible = "fsl,ls2088a-pcie", },
+ { .compatible = "fsl,ls1088a-pcie", },
{ },
};
@@ -247,11 +122,8 @@ static int ls_pcie_probe(struct platform_device *pdev)
if (!pci)
return -ENOMEM;
- pcie->drvdata = of_device_get_match_data(dev);
-
pci->dev = dev;
- pci->ops = pcie->drvdata->dw_pcie_ops;
- pci->pp.ops = pcie->drvdata->ops;
+ pci->pp.ops = &ls_pcie_host_ops;
pcie->pci = pci;
@@ -260,8 +132,6 @@ static int ls_pcie_probe(struct platform_device *pdev)
if (IS_ERR(pci->dbi_base))
return PTR_ERR(pci->dbi_base);
- pcie->lut = pci->dbi_base + pcie->drvdata->lut_offset;
-
if (!ls_pcie_is_bridge(pcie))
return -ENODEV;
diff --git a/drivers/pci/controller/dwc/pcie-artpec6.c b/drivers/pci/controller/dwc/pcie-artpec6.c
index c91fc1954432..2f15441770e1 100644
--- a/drivers/pci/controller/dwc/pcie-artpec6.c
+++ b/drivers/pci/controller/dwc/pcie-artpec6.c
@@ -380,17 +380,15 @@ static int artpec6_pcie_probe(struct platform_device *pdev)
struct dw_pcie *pci;
struct artpec6_pcie *artpec6_pcie;
int ret;
- const struct of_device_id *match;
const struct artpec_pcie_of_data *data;
enum artpec_pcie_variants variant;
enum dw_pcie_device_mode mode;
u32 val;
- match = of_match_device(artpec6_pcie_of_match, dev);
- if (!match)
+ data = of_device_get_match_data(dev);
+ if (!data)
return -EINVAL;
- data = (struct artpec_pcie_of_data *)match->data;
variant = (enum artpec_pcie_variants)data->variant;
mode = (enum dw_pcie_device_mode)data->mode;
diff --git a/drivers/pci/controller/dwc/pcie-designware-plat.c b/drivers/pci/controller/dwc/pcie-designware-plat.c
index 8851eb161a0e..0c5de87d3cc6 100644
--- a/drivers/pci/controller/dwc/pcie-designware-plat.c
+++ b/drivers/pci/controller/dwc/pcie-designware-plat.c
@@ -122,15 +122,13 @@ static int dw_plat_pcie_probe(struct platform_device *pdev)
struct dw_plat_pcie *dw_plat_pcie;
struct dw_pcie *pci;
int ret;
- const struct of_device_id *match;
const struct dw_plat_pcie_of_data *data;
enum dw_pcie_device_mode mode;
- match = of_match_device(dw_plat_pcie_of_match, dev);
- if (!match)
+ data = of_device_get_match_data(dev);
+ if (!data)
return -EINVAL;
- data = (struct dw_plat_pcie_of_data *)match->data;
mode = (enum dw_pcie_device_mode)data->mode;
dw_plat_pcie = devm_kzalloc(dev, sizeof(*dw_plat_pcie), GFP_KERNEL);
diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c
index 850b4533f4ef..d92c8a25094f 100644
--- a/drivers/pci/controller/dwc/pcie-designware.c
+++ b/drivers/pci/controller/dwc/pcie-designware.c
@@ -672,10 +672,11 @@ void dw_pcie_iatu_detect(struct dw_pcie *pci)
if (!pci->atu_base) {
struct resource *res =
platform_get_resource_byname(pdev, IORESOURCE_MEM, "atu");
- if (res)
+ if (res) {
pci->atu_size = resource_size(res);
- pci->atu_base = devm_ioremap_resource(dev, res);
- if (IS_ERR(pci->atu_base))
+ pci->atu_base = devm_ioremap_resource(dev, res);
+ }
+ if (!pci->atu_base || IS_ERR(pci->atu_base))
pci->atu_base = pci->dbi_base + DEFAULT_DBI_ATU_OFFSET;
}
diff --git a/drivers/pci/controller/dwc/pcie-hisi.c b/drivers/pci/controller/dwc/pcie-hisi.c
index 8fc5960faf28..8904b5b85ee5 100644
--- a/drivers/pci/controller/dwc/pcie-hisi.c
+++ b/drivers/pci/controller/dwc/pcie-hisi.c
@@ -18,6 +18,10 @@
#if defined(CONFIG_PCI_HISI) || (defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS))
+struct hisi_pcie {
+ void __iomem *reg_base;
+};
+
static int hisi_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
int size, u32 *val)
{
@@ -58,10 +62,10 @@ static void __iomem *hisi_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
int where)
{
struct pci_config_window *cfg = bus->sysdata;
- void __iomem *reg_base = cfg->priv;
+ struct hisi_pcie *pcie = cfg->priv;
if (bus->number == cfg->busr.start)
- return reg_base + where;
+ return pcie->reg_base + where;
else
return pci_ecam_map_bus(bus, devfn, where);
}
@@ -71,12 +75,16 @@ static void __iomem *hisi_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
static int hisi_pcie_init(struct pci_config_window *cfg)
{
struct device *dev = cfg->parent;
+ struct hisi_pcie *pcie;
struct acpi_device *adev = to_acpi_device(dev);
struct acpi_pci_root *root = acpi_driver_data(adev);
struct resource *res;
- void __iomem *reg_base;
int ret;
+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+ if (!pcie)
+ return -ENOMEM;
+
/*
* Retrieve RC base and size from a HISI0081 device with _UID
* matching our segment.
@@ -91,11 +99,11 @@ static int hisi_pcie_init(struct pci_config_window *cfg)
return -ENOMEM;
}
- reg_base = devm_pci_remap_cfgspace(dev, res->start, resource_size(res));
- if (!reg_base)
+ pcie->reg_base = devm_pci_remap_cfgspace(dev, res->start, resource_size(res));
+ if (!pcie->reg_base)
return -ENOMEM;
- cfg->priv = reg_base;
+ cfg->priv = pcie;
return 0;
}
@@ -115,9 +123,13 @@ const struct pci_ecam_ops hisi_pcie_ops = {
static int hisi_pcie_platform_init(struct pci_config_window *cfg)
{
struct device *dev = cfg->parent;
+ struct hisi_pcie *pcie;
struct platform_device *pdev = to_platform_device(dev);
struct resource *res;
- void __iomem *reg_base;
+
+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+ if (!pcie)
+ return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (!res) {
@@ -125,11 +137,11 @@ static int hisi_pcie_platform_init(struct pci_config_window *cfg)
return -EINVAL;
}
- reg_base = devm_pci_remap_cfgspace(dev, res->start, resource_size(res));
- if (!reg_base)
+ pcie->reg_base = devm_pci_remap_cfgspace(dev, res->start, resource_size(res));
+ if (!pcie->reg_base)
return -ENOMEM;
- cfg->priv = reg_base;
+ cfg->priv = pcie;
return 0;
}
diff --git a/drivers/pci/controller/dwc/pcie-histb.c b/drivers/pci/controller/dwc/pcie-histb.c
index 86f9d16c50d7..410555dccb6d 100644
--- a/drivers/pci/controller/dwc/pcie-histb.c
+++ b/drivers/pci/controller/dwc/pcie-histb.c
@@ -127,10 +127,8 @@ static int histb_pcie_rd_own_conf(struct pci_bus *bus, unsigned int devfn,
{
struct dw_pcie *pci = to_dw_pcie_from_pp(bus->sysdata);
- if (PCI_SLOT(devfn)) {
- *val = ~0;
+ if (PCI_SLOT(devfn))
return PCIBIOS_DEVICE_NOT_FOUND;
- }
*val = dw_pcie_read_dbi(pci, where, size);
return PCIBIOS_SUCCESSFUL;
diff --git a/drivers/pci/controller/dwc/pcie-intel-gw.c b/drivers/pci/controller/dwc/pcie-intel-gw.c
index d15cf35fa7f2..5ba144924ff8 100644
--- a/drivers/pci/controller/dwc/pcie-intel-gw.c
+++ b/drivers/pci/controller/dwc/pcie-intel-gw.c
@@ -62,7 +62,7 @@ struct intel_pcie_soc {
unsigned int pcie_ver;
};
-struct intel_pcie_port {
+struct intel_pcie {
struct dw_pcie pci;
void __iomem *app_base;
struct gpio_desc *reset_gpio;
@@ -83,53 +83,53 @@ static void pcie_update_bits(void __iomem *base, u32 ofs, u32 mask, u32 val)
writel(val, base + ofs);
}
-static inline void pcie_app_wr(struct intel_pcie_port *lpp, u32 ofs, u32 val)
+static inline void pcie_app_wr(struct intel_pcie *pcie, u32 ofs, u32 val)
{
- writel(val, lpp->app_base + ofs);
+ writel(val, pcie->app_base + ofs);
}
-static void pcie_app_wr_mask(struct intel_pcie_port *lpp, u32 ofs,
+static void pcie_app_wr_mask(struct intel_pcie *pcie, u32 ofs,
u32 mask, u32 val)
{
- pcie_update_bits(lpp->app_base, ofs, mask, val);
+ pcie_update_bits(pcie->app_base, ofs, mask, val);
}
-static inline u32 pcie_rc_cfg_rd(struct intel_pcie_port *lpp, u32 ofs)
+static inline u32 pcie_rc_cfg_rd(struct intel_pcie *pcie, u32 ofs)
{
- return dw_pcie_readl_dbi(&lpp->pci, ofs);
+ return dw_pcie_readl_dbi(&pcie->pci, ofs);
}
-static inline void pcie_rc_cfg_wr(struct intel_pcie_port *lpp, u32 ofs, u32 val)
+static inline void pcie_rc_cfg_wr(struct intel_pcie *pcie, u32 ofs, u32 val)
{
- dw_pcie_writel_dbi(&lpp->pci, ofs, val);
+ dw_pcie_writel_dbi(&pcie->pci, ofs, val);
}
-static void pcie_rc_cfg_wr_mask(struct intel_pcie_port *lpp, u32 ofs,
+static void pcie_rc_cfg_wr_mask(struct intel_pcie *pcie, u32 ofs,
u32 mask, u32 val)
{
- pcie_update_bits(lpp->pci.dbi_base, ofs, mask, val);
+ pcie_update_bits(pcie->pci.dbi_base, ofs, mask, val);
}
-static void intel_pcie_ltssm_enable(struct intel_pcie_port *lpp)
+static void intel_pcie_ltssm_enable(struct intel_pcie *pcie)
{
- pcie_app_wr_mask(lpp, PCIE_APP_CCR, PCIE_APP_CCR_LTSSM_ENABLE,
+ pcie_app_wr_mask(pcie, PCIE_APP_CCR, PCIE_APP_CCR_LTSSM_ENABLE,
PCIE_APP_CCR_LTSSM_ENABLE);
}
-static void intel_pcie_ltssm_disable(struct intel_pcie_port *lpp)
+static void intel_pcie_ltssm_disable(struct intel_pcie *pcie)
{
- pcie_app_wr_mask(lpp, PCIE_APP_CCR, PCIE_APP_CCR_LTSSM_ENABLE, 0);
+ pcie_app_wr_mask(pcie, PCIE_APP_CCR, PCIE_APP_CCR_LTSSM_ENABLE, 0);
}
-static void intel_pcie_link_setup(struct intel_pcie_port *lpp)
+static void intel_pcie_link_setup(struct intel_pcie *pcie)
{
u32 val;
- u8 offset = dw_pcie_find_capability(&lpp->pci, PCI_CAP_ID_EXP);
+ u8 offset = dw_pcie_find_capability(&pcie->pci, PCI_CAP_ID_EXP);
- val = pcie_rc_cfg_rd(lpp, offset + PCI_EXP_LNKCTL);
+ val = pcie_rc_cfg_rd(pcie, offset + PCI_EXP_LNKCTL);
val &= ~(PCI_EXP_LNKCTL_LD | PCI_EXP_LNKCTL_ASPMC);
- pcie_rc_cfg_wr(lpp, offset + PCI_EXP_LNKCTL, val);
+ pcie_rc_cfg_wr(pcie, offset + PCI_EXP_LNKCTL, val);
}
static void intel_pcie_init_n_fts(struct dw_pcie *pci)
@@ -148,14 +148,14 @@ static void intel_pcie_init_n_fts(struct dw_pcie *pci)
pci->n_fts[0] = PORT_AFR_N_FTS_GEN12_DFT;
}
-static int intel_pcie_ep_rst_init(struct intel_pcie_port *lpp)
+static int intel_pcie_ep_rst_init(struct intel_pcie *pcie)
{
- struct device *dev = lpp->pci.dev;
+ struct device *dev = pcie->pci.dev;
int ret;
- lpp->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
- if (IS_ERR(lpp->reset_gpio)) {
- ret = PTR_ERR(lpp->reset_gpio);
+ pcie->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(pcie->reset_gpio)) {
+ ret = PTR_ERR(pcie->reset_gpio);
if (ret != -EPROBE_DEFER)
dev_err(dev, "Failed to request PCIe GPIO: %d\n", ret);
return ret;
@@ -167,19 +167,19 @@ static int intel_pcie_ep_rst_init(struct intel_pcie_port *lpp)
return 0;
}
-static void intel_pcie_core_rst_assert(struct intel_pcie_port *lpp)
+static void intel_pcie_core_rst_assert(struct intel_pcie *pcie)
{
- reset_control_assert(lpp->core_rst);
+ reset_control_assert(pcie->core_rst);
}
-static void intel_pcie_core_rst_deassert(struct intel_pcie_port *lpp)
+static void intel_pcie_core_rst_deassert(struct intel_pcie *pcie)
{
/*
* One micro-second delay to make sure the reset pulse
* wide enough so that core reset is clean.
*/
udelay(1);
- reset_control_deassert(lpp->core_rst);
+ reset_control_deassert(pcie->core_rst);
/*
* Some SoC core reset also reset PHY, more delay needed
@@ -188,58 +188,58 @@ static void intel_pcie_core_rst_deassert(struct intel_pcie_port *lpp)
usleep_range(1000, 2000);
}
-static void intel_pcie_device_rst_assert(struct intel_pcie_port *lpp)
+static void intel_pcie_device_rst_assert(struct intel_pcie *pcie)
{
- gpiod_set_value_cansleep(lpp->reset_gpio, 1);
+ gpiod_set_value_cansleep(pcie->reset_gpio, 1);
}
-static void intel_pcie_device_rst_deassert(struct intel_pcie_port *lpp)
+static void intel_pcie_device_rst_deassert(struct intel_pcie *pcie)
{
- msleep(lpp->rst_intrvl);
- gpiod_set_value_cansleep(lpp->reset_gpio, 0);
+ msleep(pcie->rst_intrvl);
+ gpiod_set_value_cansleep(pcie->reset_gpio, 0);
}
-static void intel_pcie_core_irq_disable(struct intel_pcie_port *lpp)
+static void intel_pcie_core_irq_disable(struct intel_pcie *pcie)
{
- pcie_app_wr(lpp, PCIE_APP_IRNEN, 0);
- pcie_app_wr(lpp, PCIE_APP_IRNCR, PCIE_APP_IRN_INT);
+ pcie_app_wr(pcie, PCIE_APP_IRNEN, 0);
+ pcie_app_wr(pcie, PCIE_APP_IRNCR, PCIE_APP_IRN_INT);
}
static int intel_pcie_get_resources(struct platform_device *pdev)
{
- struct intel_pcie_port *lpp = platform_get_drvdata(pdev);
- struct dw_pcie *pci = &lpp->pci;
+ struct intel_pcie *pcie = platform_get_drvdata(pdev);
+ struct dw_pcie *pci = &pcie->pci;
struct device *dev = pci->dev;
int ret;
- lpp->core_clk = devm_clk_get(dev, NULL);
- if (IS_ERR(lpp->core_clk)) {
- ret = PTR_ERR(lpp->core_clk);
+ pcie->core_clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(pcie->core_clk)) {
+ ret = PTR_ERR(pcie->core_clk);
if (ret != -EPROBE_DEFER)
dev_err(dev, "Failed to get clks: %d\n", ret);
return ret;
}
- lpp->core_rst = devm_reset_control_get(dev, NULL);
- if (IS_ERR(lpp->core_rst)) {
- ret = PTR_ERR(lpp->core_rst);
+ pcie->core_rst = devm_reset_control_get(dev, NULL);
+ if (IS_ERR(pcie->core_rst)) {
+ ret = PTR_ERR(pcie->core_rst);
if (ret != -EPROBE_DEFER)
dev_err(dev, "Failed to get resets: %d\n", ret);
return ret;
}
ret = device_property_read_u32(dev, "reset-assert-ms",
- &lpp->rst_intrvl);
+ &pcie->rst_intrvl);
if (ret)
- lpp->rst_intrvl = RESET_INTERVAL_MS;
+ pcie->rst_intrvl = RESET_INTERVAL_MS;
- lpp->app_base = devm_platform_ioremap_resource_byname(pdev, "app");
- if (IS_ERR(lpp->app_base))
- return PTR_ERR(lpp->app_base);
+ pcie->app_base = devm_platform_ioremap_resource_byname(pdev, "app");
+ if (IS_ERR(pcie->app_base))
+ return PTR_ERR(pcie->app_base);
- lpp->phy = devm_phy_get(dev, "pcie");
- if (IS_ERR(lpp->phy)) {
- ret = PTR_ERR(lpp->phy);
+ pcie->phy = devm_phy_get(dev, "pcie");
+ if (IS_ERR(pcie->phy)) {
+ ret = PTR_ERR(pcie->phy);
if (ret != -EPROBE_DEFER)
dev_err(dev, "Couldn't get pcie-phy: %d\n", ret);
return ret;
@@ -248,137 +248,137 @@ static int intel_pcie_get_resources(struct platform_device *pdev)
return 0;
}
-static int intel_pcie_wait_l2(struct intel_pcie_port *lpp)
+static int intel_pcie_wait_l2(struct intel_pcie *pcie)
{
u32 value;
int ret;
- struct dw_pcie *pci = &lpp->pci;
+ struct dw_pcie *pci = &pcie->pci;
if (pci->link_gen < 3)
return 0;
/* Send PME_TURN_OFF message */
- pcie_app_wr_mask(lpp, PCIE_APP_MSG_CR, PCIE_APP_MSG_XMT_PM_TURNOFF,
+ pcie_app_wr_mask(pcie, PCIE_APP_MSG_CR, PCIE_APP_MSG_XMT_PM_TURNOFF,
PCIE_APP_MSG_XMT_PM_TURNOFF);
/* Read PMC status and wait for falling into L2 link state */
- ret = readl_poll_timeout(lpp->app_base + PCIE_APP_PMC, value,
+ ret = readl_poll_timeout(pcie->app_base + PCIE_APP_PMC, value,
value & PCIE_APP_PMC_IN_L2, 20,
jiffies_to_usecs(5 * HZ));
if (ret)
- dev_err(lpp->pci.dev, "PCIe link enter L2 timeout!\n");
+ dev_err(pcie->pci.dev, "PCIe link enter L2 timeout!\n");
return ret;
}
-static void intel_pcie_turn_off(struct intel_pcie_port *lpp)
+static void intel_pcie_turn_off(struct intel_pcie *pcie)
{
- if (dw_pcie_link_up(&lpp->pci))
- intel_pcie_wait_l2(lpp);
+ if (dw_pcie_link_up(&pcie->pci))
+ intel_pcie_wait_l2(pcie);
/* Put endpoint device in reset state */
- intel_pcie_device_rst_assert(lpp);
- pcie_rc_cfg_wr_mask(lpp, PCI_COMMAND, PCI_COMMAND_MEMORY, 0);
+ intel_pcie_device_rst_assert(pcie);
+ pcie_rc_cfg_wr_mask(pcie, PCI_COMMAND, PCI_COMMAND_MEMORY, 0);
}
-static int intel_pcie_host_setup(struct intel_pcie_port *lpp)
+static int intel_pcie_host_setup(struct intel_pcie *pcie)
{
int ret;
- struct dw_pcie *pci = &lpp->pci;
+ struct dw_pcie *pci = &pcie->pci;
- intel_pcie_core_rst_assert(lpp);
- intel_pcie_device_rst_assert(lpp);
+ intel_pcie_core_rst_assert(pcie);
+ intel_pcie_device_rst_assert(pcie);
- ret = phy_init(lpp->phy);
+ ret = phy_init(pcie->phy);
if (ret)
return ret;
- intel_pcie_core_rst_deassert(lpp);
+ intel_pcie_core_rst_deassert(pcie);
- ret = clk_prepare_enable(lpp->core_clk);
+ ret = clk_prepare_enable(pcie->core_clk);
if (ret) {
- dev_err(lpp->pci.dev, "Core clock enable failed: %d\n", ret);
+ dev_err(pcie->pci.dev, "Core clock enable failed: %d\n", ret);
goto clk_err;
}
pci->atu_base = pci->dbi_base + 0xC0000;
- intel_pcie_ltssm_disable(lpp);
- intel_pcie_link_setup(lpp);
+ intel_pcie_ltssm_disable(pcie);
+ intel_pcie_link_setup(pcie);
intel_pcie_init_n_fts(pci);
dw_pcie_setup_rc(&pci->pp);
dw_pcie_upconfig_setup(pci);
- intel_pcie_device_rst_deassert(lpp);
- intel_pcie_ltssm_enable(lpp);
+ intel_pcie_device_rst_deassert(pcie);
+ intel_pcie_ltssm_enable(pcie);
ret = dw_pcie_wait_for_link(pci);
if (ret)
goto app_init_err;
/* Enable integrated interrupts */
- pcie_app_wr_mask(lpp, PCIE_APP_IRNEN, PCIE_APP_IRN_INT,
+ pcie_app_wr_mask(pcie, PCIE_APP_IRNEN, PCIE_APP_IRN_INT,
PCIE_APP_IRN_INT);
return 0;
app_init_err:
- clk_disable_unprepare(lpp->core_clk);
+ clk_disable_unprepare(pcie->core_clk);
clk_err:
- intel_pcie_core_rst_assert(lpp);
- phy_exit(lpp->phy);
+ intel_pcie_core_rst_assert(pcie);
+ phy_exit(pcie->phy);
return ret;
}
-static void __intel_pcie_remove(struct intel_pcie_port *lpp)
+static void __intel_pcie_remove(struct intel_pcie *pcie)
{
- intel_pcie_core_irq_disable(lpp);
- intel_pcie_turn_off(lpp);
- clk_disable_unprepare(lpp->core_clk);
- intel_pcie_core_rst_assert(lpp);
- phy_exit(lpp->phy);
+ intel_pcie_core_irq_disable(pcie);
+ intel_pcie_turn_off(pcie);
+ clk_disable_unprepare(pcie->core_clk);
+ intel_pcie_core_rst_assert(pcie);
+ phy_exit(pcie->phy);
}
static int intel_pcie_remove(struct platform_device *pdev)
{
- struct intel_pcie_port *lpp = platform_get_drvdata(pdev);
- struct pcie_port *pp = &lpp->pci.pp;
+ struct intel_pcie *pcie = platform_get_drvdata(pdev);
+ struct pcie_port *pp = &pcie->pci.pp;
dw_pcie_host_deinit(pp);
- __intel_pcie_remove(lpp);
+ __intel_pcie_remove(pcie);
return 0;
}
static int __maybe_unused intel_pcie_suspend_noirq(struct device *dev)
{
- struct intel_pcie_port *lpp = dev_get_drvdata(dev);
+ struct intel_pcie *pcie = dev_get_drvdata(dev);
int ret;
- intel_pcie_core_irq_disable(lpp);
- ret = intel_pcie_wait_l2(lpp);
+ intel_pcie_core_irq_disable(pcie);
+ ret = intel_pcie_wait_l2(pcie);
if (ret)
return ret;
- phy_exit(lpp->phy);
- clk_disable_unprepare(lpp->core_clk);
+ phy_exit(pcie->phy);
+ clk_disable_unprepare(pcie->core_clk);
return ret;
}
static int __maybe_unused intel_pcie_resume_noirq(struct device *dev)
{
- struct intel_pcie_port *lpp = dev_get_drvdata(dev);
+ struct intel_pcie *pcie = dev_get_drvdata(dev);
- return intel_pcie_host_setup(lpp);
+ return intel_pcie_host_setup(pcie);
}
static int intel_pcie_rc_init(struct pcie_port *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct intel_pcie_port *lpp = dev_get_drvdata(pci->dev);
+ struct intel_pcie *pcie = dev_get_drvdata(pci->dev);
- return intel_pcie_host_setup(lpp);
+ return intel_pcie_host_setup(pcie);
}
static u64 intel_pcie_cpu_addr(struct dw_pcie *pcie, u64 cpu_addr)
@@ -402,17 +402,17 @@ static int intel_pcie_probe(struct platform_device *pdev)
{
const struct intel_pcie_soc *data;
struct device *dev = &pdev->dev;
- struct intel_pcie_port *lpp;
+ struct intel_pcie *pcie;
struct pcie_port *pp;
struct dw_pcie *pci;
int ret;
- lpp = devm_kzalloc(dev, sizeof(*lpp), GFP_KERNEL);
- if (!lpp)
+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+ if (!pcie)
return -ENOMEM;
- platform_set_drvdata(pdev, lpp);
- pci = &lpp->pci;
+ platform_set_drvdata(pdev, pcie);
+ pci = &pcie->pci;
pci->dev = dev;
pp = &pci->pp;
@@ -420,7 +420,7 @@ static int intel_pcie_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = intel_pcie_ep_rst_init(lpp);
+ ret = intel_pcie_ep_rst_init(pcie);
if (ret)
return ret;
diff --git a/drivers/pci/controller/dwc/pcie-kirin.c b/drivers/pci/controller/dwc/pcie-kirin.c
index 095afbccf9c1..c625fc6bb287 100644
--- a/drivers/pci/controller/dwc/pcie-kirin.c
+++ b/drivers/pci/controller/dwc/pcie-kirin.c
@@ -530,10 +530,8 @@ static int kirin_pcie_rd_own_conf(struct pci_bus *bus, unsigned int devfn,
{
struct dw_pcie *pci = to_dw_pcie_from_pp(bus->sysdata);
- if (PCI_SLOT(devfn)) {
- *val = ~0;
+ if (PCI_SLOT(devfn))
return PCIBIOS_DEVICE_NOT_FOUND;
- }
*val = dw_pcie_read_dbi(pci, where, size);
return PCIBIOS_SUCCESSFUL;
@@ -758,23 +756,28 @@ static int __exit kirin_pcie_remove(struct platform_device *pdev)
return 0;
}
+struct kirin_pcie_data {
+ enum pcie_kirin_phy_type phy_type;
+};
+
+static const struct kirin_pcie_data kirin_960_data = {
+ .phy_type = PCIE_KIRIN_INTERNAL_PHY,
+};
+
+static const struct kirin_pcie_data kirin_970_data = {
+ .phy_type = PCIE_KIRIN_EXTERNAL_PHY,
+};
+
static const struct of_device_id kirin_pcie_match[] = {
- {
- .compatible = "hisilicon,kirin960-pcie",
- .data = (void *)PCIE_KIRIN_INTERNAL_PHY
- },
- {
- .compatible = "hisilicon,kirin970-pcie",
- .data = (void *)PCIE_KIRIN_EXTERNAL_PHY
- },
+ { .compatible = "hisilicon,kirin960-pcie", .data = &kirin_960_data },
+ { .compatible = "hisilicon,kirin970-pcie", .data = &kirin_970_data },
{},
};
static int kirin_pcie_probe(struct platform_device *pdev)
{
- enum pcie_kirin_phy_type phy_type;
- const struct of_device_id *of_id;
struct device *dev = &pdev->dev;
+ const struct kirin_pcie_data *data;
struct kirin_pcie *kirin_pcie;
struct dw_pcie *pci;
int ret;
@@ -784,14 +787,12 @@ static int kirin_pcie_probe(struct platform_device *pdev)
return -EINVAL;
}
- of_id = of_match_device(kirin_pcie_match, dev);
- if (!of_id) {
+ data = of_device_get_match_data(dev);
+ if (!data) {
dev_err(dev, "OF data missing\n");
return -EINVAL;
}
- phy_type = (long)of_id->data;
-
kirin_pcie = devm_kzalloc(dev, sizeof(struct kirin_pcie), GFP_KERNEL);
if (!kirin_pcie)
return -ENOMEM;
@@ -804,7 +805,7 @@ static int kirin_pcie_probe(struct platform_device *pdev)
pci->ops = &kirin_dw_pcie_ops;
pci->pp.ops = &kirin_pcie_host_ops;
kirin_pcie->pci = pci;
- kirin_pcie->type = phy_type;
+ kirin_pcie->type = data->phy_type;
ret = kirin_pcie_get_resource(kirin_pcie, pdev);
if (ret)
diff --git a/drivers/pci/controller/dwc/pcie-qcom-ep.c b/drivers/pci/controller/dwc/pcie-qcom-ep.c
index cfe66bf04c1d..6ce8eddf3a37 100644
--- a/drivers/pci/controller/dwc/pcie-qcom-ep.c
+++ b/drivers/pci/controller/dwc/pcie-qcom-ep.c
@@ -553,10 +553,8 @@ static int qcom_pcie_ep_enable_irq_resources(struct platform_device *pdev,
int irq, ret;
irq = platform_get_irq_byname(pdev, "global");
- if (irq < 0) {
- dev_err(&pdev->dev, "Failed to get Global IRQ\n");
+ if (irq < 0)
return irq;
- }
ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
qcom_pcie_ep_global_irq_thread,
@@ -620,7 +618,7 @@ static void qcom_pcie_ep_init(struct dw_pcie_ep *ep)
dw_pcie_ep_reset_bar(pci, bar);
}
-static struct dw_pcie_ep_ops pci_ep_ops = {
+static const struct dw_pcie_ep_ops pci_ep_ops = {
.ep_init = qcom_pcie_ep_init,
.raise_irq = qcom_pcie_ep_raise_irq,
.get_features = qcom_pcie_epc_get_features,
diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
index 1c3d1116bb60..c19cd506ed3f 100644
--- a/drivers/pci/controller/dwc/pcie-qcom.c
+++ b/drivers/pci/controller/dwc/pcie-qcom.c
@@ -1343,7 +1343,7 @@ static int qcom_pcie_config_sid_sm8250(struct qcom_pcie *pcie)
/* Look for an available entry to hold the mapping */
for (i = 0; i < nr_map; i++) {
- u16 bdf_be = cpu_to_be16(map[i].bdf);
+ __be16 bdf_be = cpu_to_be16(map[i].bdf);
u32 val;
u8 hash;
@@ -1534,6 +1534,12 @@ static int qcom_pcie_probe(struct platform_device *pdev)
const struct qcom_pcie_cfg *pcie_cfg;
int ret;
+ pcie_cfg = of_device_get_match_data(dev);
+ if (!pcie_cfg || !pcie_cfg->ops) {
+ dev_err(dev, "Invalid platform data\n");
+ return -EINVAL;
+ }
+
pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
if (!pcie)
return -ENOMEM;
@@ -1553,12 +1559,6 @@ static int qcom_pcie_probe(struct platform_device *pdev)
pcie->pci = pci;
- pcie_cfg = of_device_get_match_data(dev);
- if (!pcie_cfg || !pcie_cfg->ops) {
- dev_err(dev, "Invalid platform data\n");
- return -EINVAL;
- }
-
pcie->ops = pcie_cfg->ops;
pcie->pipe_clk_need_muxing = pcie_cfg->pipe_clk_need_muxing;
diff --git a/drivers/pci/controller/dwc/pcie-spear13xx.c b/drivers/pci/controller/dwc/pcie-spear13xx.c
index 1a9e353bef55..1569e82b5568 100644
--- a/drivers/pci/controller/dwc/pcie-spear13xx.c
+++ b/drivers/pci/controller/dwc/pcie-spear13xx.c
@@ -69,7 +69,7 @@ struct pcie_app_reg {
static int spear13xx_pcie_start_link(struct dw_pcie *pci)
{
struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pci);
- struct pcie_app_reg *app_reg = spear13xx_pcie->app_base;
+ struct pcie_app_reg __iomem *app_reg = spear13xx_pcie->app_base;
/* enable ltssm */
writel(DEVICE_TYPE_RC | (1 << MISCTRL_EN_ID)
@@ -83,7 +83,7 @@ static int spear13xx_pcie_start_link(struct dw_pcie *pci)
static irqreturn_t spear13xx_pcie_irq_handler(int irq, void *arg)
{
struct spear13xx_pcie *spear13xx_pcie = arg;
- struct pcie_app_reg *app_reg = spear13xx_pcie->app_base;
+ struct pcie_app_reg __iomem *app_reg = spear13xx_pcie->app_base;
struct dw_pcie *pci = spear13xx_pcie->pci;
struct pcie_port *pp = &pci->pp;
unsigned int status;
@@ -102,7 +102,7 @@ static irqreturn_t spear13xx_pcie_irq_handler(int irq, void *arg)
static void spear13xx_pcie_enable_interrupts(struct spear13xx_pcie *spear13xx_pcie)
{
- struct pcie_app_reg *app_reg = spear13xx_pcie->app_base;
+ struct pcie_app_reg __iomem *app_reg = spear13xx_pcie->app_base;
/* Enable MSI interrupt */
if (IS_ENABLED(CONFIG_PCI_MSI))
@@ -113,7 +113,7 @@ static void spear13xx_pcie_enable_interrupts(struct spear13xx_pcie *spear13xx_pc
static int spear13xx_pcie_link_up(struct dw_pcie *pci)
{
struct spear13xx_pcie *spear13xx_pcie = to_spear13xx_pcie(pci);
- struct pcie_app_reg *app_reg = spear13xx_pcie->app_base;
+ struct pcie_app_reg __iomem *app_reg = spear13xx_pcie->app_base;
if (readl(&app_reg->app_status_1) & XMLH_LINK_UP)
return 1;
diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c
index 904976913081..b1b5f836a806 100644
--- a/drivers/pci/controller/dwc/pcie-tegra194.c
+++ b/drivers/pci/controller/dwc/pcie-tegra194.c
@@ -245,7 +245,7 @@ static const unsigned int pcie_gen_freq[] = {
GEN4_CORE_CLK_FREQ
};
-struct tegra_pcie_dw {
+struct tegra194_pcie {
struct device *dev;
struct resource *appl_res;
struct resource *dbi_res;
@@ -289,22 +289,22 @@ struct tegra_pcie_dw {
int ep_state;
};
-struct tegra_pcie_dw_of_data {
+struct tegra194_pcie_of_data {
enum dw_pcie_device_mode mode;
};
-static inline struct tegra_pcie_dw *to_tegra_pcie(struct dw_pcie *pci)
+static inline struct tegra194_pcie *to_tegra_pcie(struct dw_pcie *pci)
{
- return container_of(pci, struct tegra_pcie_dw, pci);
+ return container_of(pci, struct tegra194_pcie, pci);
}
-static inline void appl_writel(struct tegra_pcie_dw *pcie, const u32 value,
+static inline void appl_writel(struct tegra194_pcie *pcie, const u32 value,
const u32 reg)
{
writel_relaxed(value, pcie->appl_base + reg);
}
-static inline u32 appl_readl(struct tegra_pcie_dw *pcie, const u32 reg)
+static inline u32 appl_readl(struct tegra194_pcie *pcie, const u32 reg)
{
return readl_relaxed(pcie->appl_base + reg);
}
@@ -316,7 +316,7 @@ struct tegra_pcie_soc {
static void apply_bad_link_workaround(struct pcie_port *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
+ struct tegra194_pcie *pcie = to_tegra_pcie(pci);
u32 current_link_width;
u16 val;
@@ -349,7 +349,7 @@ static void apply_bad_link_workaround(struct pcie_port *pp)
static irqreturn_t tegra_pcie_rp_irq_handler(int irq, void *arg)
{
- struct tegra_pcie_dw *pcie = arg;
+ struct tegra194_pcie *pcie = arg;
struct dw_pcie *pci = &pcie->pci;
struct pcie_port *pp = &pci->pp;
u32 val, tmp;
@@ -420,7 +420,7 @@ static irqreturn_t tegra_pcie_rp_irq_handler(int irq, void *arg)
return IRQ_HANDLED;
}
-static void pex_ep_event_hot_rst_done(struct tegra_pcie_dw *pcie)
+static void pex_ep_event_hot_rst_done(struct tegra194_pcie *pcie)
{
u32 val;
@@ -448,7 +448,7 @@ static void pex_ep_event_hot_rst_done(struct tegra_pcie_dw *pcie)
static irqreturn_t tegra_pcie_ep_irq_thread(int irq, void *arg)
{
- struct tegra_pcie_dw *pcie = arg;
+ struct tegra194_pcie *pcie = arg;
struct dw_pcie *pci = &pcie->pci;
u32 val, speed;
@@ -494,7 +494,7 @@ static irqreturn_t tegra_pcie_ep_irq_thread(int irq, void *arg)
static irqreturn_t tegra_pcie_ep_hard_irq(int irq, void *arg)
{
- struct tegra_pcie_dw *pcie = arg;
+ struct tegra194_pcie *pcie = arg;
struct dw_pcie_ep *ep = &pcie->pci.ep;
int spurious = 1;
u32 status_l0, status_l1, link_status;
@@ -537,7 +537,7 @@ static irqreturn_t tegra_pcie_ep_hard_irq(int irq, void *arg)
return IRQ_HANDLED;
}
-static int tegra_pcie_dw_rd_own_conf(struct pci_bus *bus, u32 devfn, int where,
+static int tegra194_pcie_rd_own_conf(struct pci_bus *bus, u32 devfn, int where,
int size, u32 *val)
{
/*
@@ -554,7 +554,7 @@ static int tegra_pcie_dw_rd_own_conf(struct pci_bus *bus, u32 devfn, int where,
return pci_generic_config_read(bus, devfn, where, size, val);
}
-static int tegra_pcie_dw_wr_own_conf(struct pci_bus *bus, u32 devfn, int where,
+static int tegra194_pcie_wr_own_conf(struct pci_bus *bus, u32 devfn, int where,
int size, u32 val)
{
/*
@@ -571,8 +571,8 @@ static int tegra_pcie_dw_wr_own_conf(struct pci_bus *bus, u32 devfn, int where,
static struct pci_ops tegra_pci_ops = {
.map_bus = dw_pcie_own_conf_map_bus,
- .read = tegra_pcie_dw_rd_own_conf,
- .write = tegra_pcie_dw_wr_own_conf,
+ .read = tegra194_pcie_rd_own_conf,
+ .write = tegra194_pcie_wr_own_conf,
};
#if defined(CONFIG_PCIEASPM)
@@ -594,7 +594,7 @@ static const u32 event_cntr_data_offset[] = {
0x1dc
};
-static void disable_aspm_l11(struct tegra_pcie_dw *pcie)
+static void disable_aspm_l11(struct tegra194_pcie *pcie)
{
u32 val;
@@ -603,7 +603,7 @@ static void disable_aspm_l11(struct tegra_pcie_dw *pcie)
dw_pcie_writel_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub, val);
}
-static void disable_aspm_l12(struct tegra_pcie_dw *pcie)
+static void disable_aspm_l12(struct tegra194_pcie *pcie)
{
u32 val;
@@ -612,7 +612,7 @@ static void disable_aspm_l12(struct tegra_pcie_dw *pcie)
dw_pcie_writel_dbi(&pcie->pci, pcie->cfg_link_cap_l1sub, val);
}
-static inline u32 event_counter_prog(struct tegra_pcie_dw *pcie, u32 event)
+static inline u32 event_counter_prog(struct tegra194_pcie *pcie, u32 event)
{
u32 val;
@@ -629,7 +629,7 @@ static inline u32 event_counter_prog(struct tegra_pcie_dw *pcie, u32 event)
static int aspm_state_cnt(struct seq_file *s, void *data)
{
- struct tegra_pcie_dw *pcie = (struct tegra_pcie_dw *)
+ struct tegra194_pcie *pcie = (struct tegra194_pcie *)
dev_get_drvdata(s->private);
u32 val;
@@ -660,7 +660,7 @@ static int aspm_state_cnt(struct seq_file *s, void *data)
return 0;
}
-static void init_host_aspm(struct tegra_pcie_dw *pcie)
+static void init_host_aspm(struct tegra194_pcie *pcie)
{
struct dw_pcie *pci = &pcie->pci;
u32 val;
@@ -688,22 +688,22 @@ static void init_host_aspm(struct tegra_pcie_dw *pcie)
dw_pcie_writel_dbi(pci, PCIE_PORT_AFR, val);
}
-static void init_debugfs(struct tegra_pcie_dw *pcie)
+static void init_debugfs(struct tegra194_pcie *pcie)
{
debugfs_create_devm_seqfile(pcie->dev, "aspm_state_cnt", pcie->debugfs,
aspm_state_cnt);
}
#else
-static inline void disable_aspm_l12(struct tegra_pcie_dw *pcie) { return; }
-static inline void disable_aspm_l11(struct tegra_pcie_dw *pcie) { return; }
-static inline void init_host_aspm(struct tegra_pcie_dw *pcie) { return; }
-static inline void init_debugfs(struct tegra_pcie_dw *pcie) { return; }
+static inline void disable_aspm_l12(struct tegra194_pcie *pcie) { return; }
+static inline void disable_aspm_l11(struct tegra194_pcie *pcie) { return; }
+static inline void init_host_aspm(struct tegra194_pcie *pcie) { return; }
+static inline void init_debugfs(struct tegra194_pcie *pcie) { return; }
#endif
static void tegra_pcie_enable_system_interrupts(struct pcie_port *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
+ struct tegra194_pcie *pcie = to_tegra_pcie(pci);
u32 val;
u16 val_w;
@@ -741,7 +741,7 @@ static void tegra_pcie_enable_system_interrupts(struct pcie_port *pp)
static void tegra_pcie_enable_legacy_interrupts(struct pcie_port *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
+ struct tegra194_pcie *pcie = to_tegra_pcie(pci);
u32 val;
/* Enable legacy interrupt generation */
@@ -762,7 +762,7 @@ static void tegra_pcie_enable_legacy_interrupts(struct pcie_port *pp)
static void tegra_pcie_enable_msi_interrupts(struct pcie_port *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
+ struct tegra194_pcie *pcie = to_tegra_pcie(pci);
u32 val;
/* Enable MSI interrupt generation */
@@ -775,7 +775,7 @@ static void tegra_pcie_enable_msi_interrupts(struct pcie_port *pp)
static void tegra_pcie_enable_interrupts(struct pcie_port *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
+ struct tegra194_pcie *pcie = to_tegra_pcie(pci);
/* Clear interrupt statuses before enabling interrupts */
appl_writel(pcie, 0xFFFFFFFF, APPL_INTR_STATUS_L0);
@@ -800,7 +800,7 @@ static void tegra_pcie_enable_interrupts(struct pcie_port *pp)
tegra_pcie_enable_msi_interrupts(pp);
}
-static void config_gen3_gen4_eq_presets(struct tegra_pcie_dw *pcie)
+static void config_gen3_gen4_eq_presets(struct tegra194_pcie *pcie)
{
struct dw_pcie *pci = &pcie->pci;
u32 val, offset, i;
@@ -853,10 +853,10 @@ static void config_gen3_gen4_eq_presets(struct tegra_pcie_dw *pcie)
dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
}
-static int tegra_pcie_dw_host_init(struct pcie_port *pp)
+static int tegra194_pcie_host_init(struct pcie_port *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
+ struct tegra194_pcie *pcie = to_tegra_pcie(pci);
u32 val;
pp->bridge->ops = &tegra_pci_ops;
@@ -914,10 +914,10 @@ static int tegra_pcie_dw_host_init(struct pcie_port *pp)
return 0;
}
-static int tegra_pcie_dw_start_link(struct dw_pcie *pci)
+static int tegra194_pcie_start_link(struct dw_pcie *pci)
{
u32 val, offset, speed, tmp;
- struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
+ struct tegra194_pcie *pcie = to_tegra_pcie(pci);
struct pcie_port *pp = &pci->pp;
bool retry = true;
@@ -982,7 +982,7 @@ retry_link:
val &= ~PCI_DLF_EXCHANGE_ENABLE;
dw_pcie_writel_dbi(pci, offset, val);
- tegra_pcie_dw_host_init(pp);
+ tegra194_pcie_host_init(pp);
dw_pcie_setup_rc(pp);
retry = false;
@@ -998,32 +998,32 @@ retry_link:
return 0;
}
-static int tegra_pcie_dw_link_up(struct dw_pcie *pci)
+static int tegra194_pcie_link_up(struct dw_pcie *pci)
{
- struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
+ struct tegra194_pcie *pcie = to_tegra_pcie(pci);
u32 val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA);
return !!(val & PCI_EXP_LNKSTA_DLLLA);
}
-static void tegra_pcie_dw_stop_link(struct dw_pcie *pci)
+static void tegra194_pcie_stop_link(struct dw_pcie *pci)
{
- struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
+ struct tegra194_pcie *pcie = to_tegra_pcie(pci);
disable_irq(pcie->pex_rst_irq);
}
static const struct dw_pcie_ops tegra_dw_pcie_ops = {
- .link_up = tegra_pcie_dw_link_up,
- .start_link = tegra_pcie_dw_start_link,
- .stop_link = tegra_pcie_dw_stop_link,
+ .link_up = tegra194_pcie_link_up,
+ .start_link = tegra194_pcie_start_link,
+ .stop_link = tegra194_pcie_stop_link,
};
-static const struct dw_pcie_host_ops tegra_pcie_dw_host_ops = {
- .host_init = tegra_pcie_dw_host_init,
+static const struct dw_pcie_host_ops tegra194_pcie_host_ops = {
+ .host_init = tegra194_pcie_host_init,
};
-static void tegra_pcie_disable_phy(struct tegra_pcie_dw *pcie)
+static void tegra_pcie_disable_phy(struct tegra194_pcie *pcie)
{
unsigned int phy_count = pcie->phy_count;
@@ -1033,7 +1033,7 @@ static void tegra_pcie_disable_phy(struct tegra_pcie_dw *pcie)
}
}
-static int tegra_pcie_enable_phy(struct tegra_pcie_dw *pcie)
+static int tegra_pcie_enable_phy(struct tegra194_pcie *pcie)
{
unsigned int i;
int ret;
@@ -1060,7 +1060,7 @@ phy_exit:
return ret;
}
-static int tegra_pcie_dw_parse_dt(struct tegra_pcie_dw *pcie)
+static int tegra194_pcie_parse_dt(struct tegra194_pcie *pcie)
{
struct platform_device *pdev = to_platform_device(pcie->dev);
struct device_node *np = pcie->dev->of_node;
@@ -1156,7 +1156,7 @@ static int tegra_pcie_dw_parse_dt(struct tegra_pcie_dw *pcie)
return 0;
}
-static int tegra_pcie_bpmp_set_ctrl_state(struct tegra_pcie_dw *pcie,
+static int tegra_pcie_bpmp_set_ctrl_state(struct tegra194_pcie *pcie,
bool enable)
{
struct mrq_uphy_response resp;
@@ -1184,7 +1184,7 @@ static int tegra_pcie_bpmp_set_ctrl_state(struct tegra_pcie_dw *pcie,
return tegra_bpmp_transfer(pcie->bpmp, &msg);
}
-static int tegra_pcie_bpmp_set_pll_state(struct tegra_pcie_dw *pcie,
+static int tegra_pcie_bpmp_set_pll_state(struct tegra194_pcie *pcie,
bool enable)
{
struct mrq_uphy_response resp;
@@ -1212,7 +1212,7 @@ static int tegra_pcie_bpmp_set_pll_state(struct tegra_pcie_dw *pcie,
return tegra_bpmp_transfer(pcie->bpmp, &msg);
}
-static void tegra_pcie_downstream_dev_to_D0(struct tegra_pcie_dw *pcie)
+static void tegra_pcie_downstream_dev_to_D0(struct tegra194_pcie *pcie)
{
struct pcie_port *pp = &pcie->pci.pp;
struct pci_bus *child, *root_bus = NULL;
@@ -1250,7 +1250,7 @@ static void tegra_pcie_downstream_dev_to_D0(struct tegra_pcie_dw *pcie)
}
}
-static int tegra_pcie_get_slot_regulators(struct tegra_pcie_dw *pcie)
+static int tegra_pcie_get_slot_regulators(struct tegra194_pcie *pcie)
{
pcie->slot_ctl_3v3 = devm_regulator_get_optional(pcie->dev, "vpcie3v3");
if (IS_ERR(pcie->slot_ctl_3v3)) {
@@ -1271,7 +1271,7 @@ static int tegra_pcie_get_slot_regulators(struct tegra_pcie_dw *pcie)
return 0;
}
-static int tegra_pcie_enable_slot_regulators(struct tegra_pcie_dw *pcie)
+static int tegra_pcie_enable_slot_regulators(struct tegra194_pcie *pcie)
{
int ret;
@@ -1309,7 +1309,7 @@ fail_12v_enable:
return ret;
}
-static void tegra_pcie_disable_slot_regulators(struct tegra_pcie_dw *pcie)
+static void tegra_pcie_disable_slot_regulators(struct tegra194_pcie *pcie)
{
if (pcie->slot_ctl_12v)
regulator_disable(pcie->slot_ctl_12v);
@@ -1317,7 +1317,7 @@ static void tegra_pcie_disable_slot_regulators(struct tegra_pcie_dw *pcie)
regulator_disable(pcie->slot_ctl_3v3);
}
-static int tegra_pcie_config_controller(struct tegra_pcie_dw *pcie,
+static int tegra_pcie_config_controller(struct tegra194_pcie *pcie,
bool en_hw_hot_rst)
{
int ret;
@@ -1414,7 +1414,7 @@ fail_slot_reg_en:
return ret;
}
-static void tegra_pcie_unconfig_controller(struct tegra_pcie_dw *pcie)
+static void tegra_pcie_unconfig_controller(struct tegra194_pcie *pcie)
{
int ret;
@@ -1442,7 +1442,7 @@ static void tegra_pcie_unconfig_controller(struct tegra_pcie_dw *pcie)
pcie->cid, ret);
}
-static int tegra_pcie_init_controller(struct tegra_pcie_dw *pcie)
+static int tegra_pcie_init_controller(struct tegra194_pcie *pcie)
{
struct dw_pcie *pci = &pcie->pci;
struct pcie_port *pp = &pci->pp;
@@ -1452,7 +1452,7 @@ static int tegra_pcie_init_controller(struct tegra_pcie_dw *pcie)
if (ret < 0)
return ret;
- pp->ops = &tegra_pcie_dw_host_ops;
+ pp->ops = &tegra194_pcie_host_ops;
ret = dw_pcie_host_init(pp);
if (ret < 0) {
@@ -1467,11 +1467,11 @@ fail_host_init:
return ret;
}
-static int tegra_pcie_try_link_l2(struct tegra_pcie_dw *pcie)
+static int tegra_pcie_try_link_l2(struct tegra194_pcie *pcie)
{
u32 val;
- if (!tegra_pcie_dw_link_up(&pcie->pci))
+ if (!tegra194_pcie_link_up(&pcie->pci))
return 0;
val = appl_readl(pcie, APPL_RADM_STATUS);
@@ -1483,12 +1483,12 @@ static int tegra_pcie_try_link_l2(struct tegra_pcie_dw *pcie)
1, PME_ACK_TIMEOUT);
}
-static void tegra_pcie_dw_pme_turnoff(struct tegra_pcie_dw *pcie)
+static void tegra194_pcie_pme_turnoff(struct tegra194_pcie *pcie)
{
u32 data;
int err;
- if (!tegra_pcie_dw_link_up(&pcie->pci)) {
+ if (!tegra194_pcie_link_up(&pcie->pci)) {
dev_dbg(pcie->dev, "PCIe link is not up...!\n");
return;
}
@@ -1545,15 +1545,15 @@ static void tegra_pcie_dw_pme_turnoff(struct tegra_pcie_dw *pcie)
appl_writel(pcie, data, APPL_PINMUX);
}
-static void tegra_pcie_deinit_controller(struct tegra_pcie_dw *pcie)
+static void tegra_pcie_deinit_controller(struct tegra194_pcie *pcie)
{
tegra_pcie_downstream_dev_to_D0(pcie);
dw_pcie_host_deinit(&pcie->pci.pp);
- tegra_pcie_dw_pme_turnoff(pcie);
+ tegra194_pcie_pme_turnoff(pcie);
tegra_pcie_unconfig_controller(pcie);
}
-static int tegra_pcie_config_rp(struct tegra_pcie_dw *pcie)
+static int tegra_pcie_config_rp(struct tegra194_pcie *pcie)
{
struct device *dev = pcie->dev;
char *name;
@@ -1580,7 +1580,7 @@ static int tegra_pcie_config_rp(struct tegra_pcie_dw *pcie)
goto fail_pm_get_sync;
}
- pcie->link_state = tegra_pcie_dw_link_up(&pcie->pci);
+ pcie->link_state = tegra194_pcie_link_up(&pcie->pci);
if (!pcie->link_state) {
ret = -ENOMEDIUM;
goto fail_host_init;
@@ -1605,7 +1605,7 @@ fail_pm_get_sync:
return ret;
}
-static void pex_ep_event_pex_rst_assert(struct tegra_pcie_dw *pcie)
+static void pex_ep_event_pex_rst_assert(struct tegra194_pcie *pcie)
{
u32 val;
int ret;
@@ -1644,7 +1644,7 @@ static void pex_ep_event_pex_rst_assert(struct tegra_pcie_dw *pcie)
dev_dbg(pcie->dev, "Uninitialization of endpoint is completed\n");
}
-static void pex_ep_event_pex_rst_deassert(struct tegra_pcie_dw *pcie)
+static void pex_ep_event_pex_rst_deassert(struct tegra194_pcie *pcie)
{
struct dw_pcie *pci = &pcie->pci;
struct dw_pcie_ep *ep = &pci->ep;
@@ -1809,7 +1809,7 @@ fail_pll_init:
static irqreturn_t tegra_pcie_ep_pex_rst_irq(int irq, void *arg)
{
- struct tegra_pcie_dw *pcie = arg;
+ struct tegra194_pcie *pcie = arg;
if (gpiod_get_value(pcie->pex_rst_gpiod))
pex_ep_event_pex_rst_assert(pcie);
@@ -1819,7 +1819,7 @@ static irqreturn_t tegra_pcie_ep_pex_rst_irq(int irq, void *arg)
return IRQ_HANDLED;
}
-static int tegra_pcie_ep_raise_legacy_irq(struct tegra_pcie_dw *pcie, u16 irq)
+static int tegra_pcie_ep_raise_legacy_irq(struct tegra194_pcie *pcie, u16 irq)
{
/* Tegra194 supports only INTA */
if (irq > 1)
@@ -1831,7 +1831,7 @@ static int tegra_pcie_ep_raise_legacy_irq(struct tegra_pcie_dw *pcie, u16 irq)
return 0;
}
-static int tegra_pcie_ep_raise_msi_irq(struct tegra_pcie_dw *pcie, u16 irq)
+static int tegra_pcie_ep_raise_msi_irq(struct tegra194_pcie *pcie, u16 irq)
{
if (unlikely(irq > 31))
return -EINVAL;
@@ -1841,7 +1841,7 @@ static int tegra_pcie_ep_raise_msi_irq(struct tegra_pcie_dw *pcie, u16 irq)
return 0;
}
-static int tegra_pcie_ep_raise_msix_irq(struct tegra_pcie_dw *pcie, u16 irq)
+static int tegra_pcie_ep_raise_msix_irq(struct tegra194_pcie *pcie, u16 irq)
{
struct dw_pcie_ep *ep = &pcie->pci.ep;
@@ -1855,7 +1855,7 @@ static int tegra_pcie_ep_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
u16 interrupt_num)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
- struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
+ struct tegra194_pcie *pcie = to_tegra_pcie(pci);
switch (type) {
case PCI_EPC_IRQ_LEGACY:
@@ -1896,7 +1896,7 @@ static const struct dw_pcie_ep_ops pcie_ep_ops = {
.get_features = tegra_pcie_ep_get_features,
};
-static int tegra_pcie_config_ep(struct tegra_pcie_dw *pcie,
+static int tegra_pcie_config_ep(struct tegra194_pcie *pcie,
struct platform_device *pdev)
{
struct dw_pcie *pci = &pcie->pci;
@@ -1957,12 +1957,12 @@ static int tegra_pcie_config_ep(struct tegra_pcie_dw *pcie,
return 0;
}
-static int tegra_pcie_dw_probe(struct platform_device *pdev)
+static int tegra194_pcie_probe(struct platform_device *pdev)
{
- const struct tegra_pcie_dw_of_data *data;
+ const struct tegra194_pcie_of_data *data;
struct device *dev = &pdev->dev;
struct resource *atu_dma_res;
- struct tegra_pcie_dw *pcie;
+ struct tegra194_pcie *pcie;
struct pcie_port *pp;
struct dw_pcie *pci;
struct phy **phys;
@@ -1988,7 +1988,7 @@ static int tegra_pcie_dw_probe(struct platform_device *pdev)
pcie->dev = &pdev->dev;
pcie->mode = (enum dw_pcie_device_mode)data->mode;
- ret = tegra_pcie_dw_parse_dt(pcie);
+ ret = tegra194_pcie_parse_dt(pcie);
if (ret < 0) {
const char *level = KERN_ERR;
@@ -2146,9 +2146,9 @@ fail:
return ret;
}
-static int tegra_pcie_dw_remove(struct platform_device *pdev)
+static int tegra194_pcie_remove(struct platform_device *pdev)
{
- struct tegra_pcie_dw *pcie = platform_get_drvdata(pdev);
+ struct tegra194_pcie *pcie = platform_get_drvdata(pdev);
if (!pcie->link_state)
return 0;
@@ -2164,9 +2164,9 @@ static int tegra_pcie_dw_remove(struct platform_device *pdev)
return 0;
}
-static int tegra_pcie_dw_suspend_late(struct device *dev)
+static int tegra194_pcie_suspend_late(struct device *dev)
{
- struct tegra_pcie_dw *pcie = dev_get_drvdata(dev);
+ struct tegra194_pcie *pcie = dev_get_drvdata(dev);
u32 val;
if (!pcie->link_state)
@@ -2182,9 +2182,9 @@ static int tegra_pcie_dw_suspend_late(struct device *dev)
return 0;
}
-static int tegra_pcie_dw_suspend_noirq(struct device *dev)
+static int tegra194_pcie_suspend_noirq(struct device *dev)
{
- struct tegra_pcie_dw *pcie = dev_get_drvdata(dev);
+ struct tegra194_pcie *pcie = dev_get_drvdata(dev);
if (!pcie->link_state)
return 0;
@@ -2193,15 +2193,15 @@ static int tegra_pcie_dw_suspend_noirq(struct device *dev)
pcie->msi_ctrl_int = dw_pcie_readl_dbi(&pcie->pci,
PORT_LOGIC_MSI_CTRL_INT_0_EN);
tegra_pcie_downstream_dev_to_D0(pcie);
- tegra_pcie_dw_pme_turnoff(pcie);
+ tegra194_pcie_pme_turnoff(pcie);
tegra_pcie_unconfig_controller(pcie);
return 0;
}
-static int tegra_pcie_dw_resume_noirq(struct device *dev)
+static int tegra194_pcie_resume_noirq(struct device *dev)
{
- struct tegra_pcie_dw *pcie = dev_get_drvdata(dev);
+ struct tegra194_pcie *pcie = dev_get_drvdata(dev);
int ret;
if (!pcie->link_state)
@@ -2211,7 +2211,7 @@ static int tegra_pcie_dw_resume_noirq(struct device *dev)
if (ret < 0)
return ret;
- ret = tegra_pcie_dw_host_init(&pcie->pci.pp);
+ ret = tegra194_pcie_host_init(&pcie->pci.pp);
if (ret < 0) {
dev_err(dev, "Failed to init host: %d\n", ret);
goto fail_host_init;
@@ -2219,7 +2219,7 @@ static int tegra_pcie_dw_resume_noirq(struct device *dev)
dw_pcie_setup_rc(&pcie->pci.pp);
- ret = tegra_pcie_dw_start_link(&pcie->pci);
+ ret = tegra194_pcie_start_link(&pcie->pci);
if (ret < 0)
goto fail_host_init;
@@ -2234,9 +2234,9 @@ fail_host_init:
return ret;
}
-static int tegra_pcie_dw_resume_early(struct device *dev)
+static int tegra194_pcie_resume_early(struct device *dev)
{
- struct tegra_pcie_dw *pcie = dev_get_drvdata(dev);
+ struct tegra194_pcie *pcie = dev_get_drvdata(dev);
u32 val;
if (pcie->mode == DW_PCIE_EP_TYPE) {
@@ -2259,9 +2259,9 @@ static int tegra_pcie_dw_resume_early(struct device *dev)
return 0;
}
-static void tegra_pcie_dw_shutdown(struct platform_device *pdev)
+static void tegra194_pcie_shutdown(struct platform_device *pdev)
{
- struct tegra_pcie_dw *pcie = platform_get_drvdata(pdev);
+ struct tegra194_pcie *pcie = platform_get_drvdata(pdev);
if (!pcie->link_state)
return;
@@ -2273,50 +2273,50 @@ static void tegra_pcie_dw_shutdown(struct platform_device *pdev)
if (IS_ENABLED(CONFIG_PCI_MSI))
disable_irq(pcie->pci.pp.msi_irq);
- tegra_pcie_dw_pme_turnoff(pcie);
+ tegra194_pcie_pme_turnoff(pcie);
tegra_pcie_unconfig_controller(pcie);
}
-static const struct tegra_pcie_dw_of_data tegra_pcie_dw_rc_of_data = {
+static const struct tegra194_pcie_of_data tegra194_pcie_rc_of_data = {
.mode = DW_PCIE_RC_TYPE,
};
-static const struct tegra_pcie_dw_of_data tegra_pcie_dw_ep_of_data = {
+static const struct tegra194_pcie_of_data tegra194_pcie_ep_of_data = {
.mode = DW_PCIE_EP_TYPE,
};
-static const struct of_device_id tegra_pcie_dw_of_match[] = {
+static const struct of_device_id tegra194_pcie_of_match[] = {
{
.compatible = "nvidia,tegra194-pcie",
- .data = &tegra_pcie_dw_rc_of_data,
+ .data = &tegra194_pcie_rc_of_data,
},
{
.compatible = "nvidia,tegra194-pcie-ep",
- .data = &tegra_pcie_dw_ep_of_data,
+ .data = &tegra194_pcie_ep_of_data,
},
{},
};
-static const struct dev_pm_ops tegra_pcie_dw_pm_ops = {
- .suspend_late = tegra_pcie_dw_suspend_late,
- .suspend_noirq = tegra_pcie_dw_suspend_noirq,
- .resume_noirq = tegra_pcie_dw_resume_noirq,
- .resume_early = tegra_pcie_dw_resume_early,
+static const struct dev_pm_ops tegra194_pcie_pm_ops = {
+ .suspend_late = tegra194_pcie_suspend_late,
+ .suspend_noirq = tegra194_pcie_suspend_noirq,
+ .resume_noirq = tegra194_pcie_resume_noirq,
+ .resume_early = tegra194_pcie_resume_early,
};
-static struct platform_driver tegra_pcie_dw_driver = {
- .probe = tegra_pcie_dw_probe,
- .remove = tegra_pcie_dw_remove,
- .shutdown = tegra_pcie_dw_shutdown,
+static struct platform_driver tegra194_pcie_driver = {
+ .probe = tegra194_pcie_probe,
+ .remove = tegra194_pcie_remove,
+ .shutdown = tegra194_pcie_shutdown,
.driver = {
.name = "tegra194-pcie",
- .pm = &tegra_pcie_dw_pm_ops,
- .of_match_table = tegra_pcie_dw_of_match,
+ .pm = &tegra194_pcie_pm_ops,
+ .of_match_table = tegra194_pcie_of_match,
},
};
-module_platform_driver(tegra_pcie_dw_driver);
+module_platform_driver(tegra194_pcie_driver);
-MODULE_DEVICE_TABLE(of, tegra_pcie_dw_of_match);
+MODULE_DEVICE_TABLE(of, tegra194_pcie_of_match);
MODULE_AUTHOR("Vidya Sagar <vidyas@nvidia.com>");
MODULE_DESCRIPTION("NVIDIA PCIe host controller driver");
diff --git a/drivers/pci/controller/dwc/pcie-uniphier.c b/drivers/pci/controller/dwc/pcie-uniphier.c
index d05be942956e..b45ac3754242 100644
--- a/drivers/pci/controller/dwc/pcie-uniphier.c
+++ b/drivers/pci/controller/dwc/pcie-uniphier.c
@@ -61,9 +61,9 @@
#define PCL_RDLH_LINK_UP BIT(1)
#define PCL_XMLH_LINK_UP BIT(0)
-struct uniphier_pcie_priv {
- void __iomem *base;
+struct uniphier_pcie {
struct dw_pcie pci;
+ void __iomem *base;
struct clk *clk;
struct reset_control *rst;
struct phy *phy;
@@ -72,62 +72,62 @@ struct uniphier_pcie_priv {
#define to_uniphier_pcie(x) dev_get_drvdata((x)->dev)
-static void uniphier_pcie_ltssm_enable(struct uniphier_pcie_priv *priv,
+static void uniphier_pcie_ltssm_enable(struct uniphier_pcie *pcie,
bool enable)
{
u32 val;
- val = readl(priv->base + PCL_APP_READY_CTRL);
+ val = readl(pcie->base + PCL_APP_READY_CTRL);
if (enable)
val |= PCL_APP_LTSSM_ENABLE;
else
val &= ~PCL_APP_LTSSM_ENABLE;
- writel(val, priv->base + PCL_APP_READY_CTRL);
+ writel(val, pcie->base + PCL_APP_READY_CTRL);
}
-static void uniphier_pcie_init_rc(struct uniphier_pcie_priv *priv)
+static void uniphier_pcie_init_rc(struct uniphier_pcie *pcie)
{
u32 val;
/* set RC MODE */
- val = readl(priv->base + PCL_MODE);
+ val = readl(pcie->base + PCL_MODE);
val |= PCL_MODE_REGEN;
val &= ~PCL_MODE_REGVAL;
- writel(val, priv->base + PCL_MODE);
+ writel(val, pcie->base + PCL_MODE);
/* use auxiliary power detection */
- val = readl(priv->base + PCL_APP_PM0);
+ val = readl(pcie->base + PCL_APP_PM0);
val |= PCL_SYS_AUX_PWR_DET;
- writel(val, priv->base + PCL_APP_PM0);
+ writel(val, pcie->base + PCL_APP_PM0);
/* assert PERST# */
- val = readl(priv->base + PCL_PINCTRL0);
+ val = readl(pcie->base + PCL_PINCTRL0);
val &= ~(PCL_PERST_NOE_REGVAL | PCL_PERST_OUT_REGVAL
| PCL_PERST_PLDN_REGVAL);
val |= PCL_PERST_NOE_REGEN | PCL_PERST_OUT_REGEN
| PCL_PERST_PLDN_REGEN;
- writel(val, priv->base + PCL_PINCTRL0);
+ writel(val, pcie->base + PCL_PINCTRL0);
- uniphier_pcie_ltssm_enable(priv, false);
+ uniphier_pcie_ltssm_enable(pcie, false);
usleep_range(100000, 200000);
/* deassert PERST# */
- val = readl(priv->base + PCL_PINCTRL0);
+ val = readl(pcie->base + PCL_PINCTRL0);
val |= PCL_PERST_OUT_REGVAL | PCL_PERST_OUT_REGEN;
- writel(val, priv->base + PCL_PINCTRL0);
+ writel(val, pcie->base + PCL_PINCTRL0);
}
-static int uniphier_pcie_wait_rc(struct uniphier_pcie_priv *priv)
+static int uniphier_pcie_wait_rc(struct uniphier_pcie *pcie)
{
u32 status;
int ret;
/* wait PIPE clock */
- ret = readl_poll_timeout(priv->base + PCL_PIPEMON, status,
+ ret = readl_poll_timeout(pcie->base + PCL_PIPEMON, status,
status & PCL_PCLK_ALIVE, 100000, 1000000);
if (ret) {
- dev_err(priv->pci.dev,
+ dev_err(pcie->pci.dev,
"Failed to initialize controller in RC mode\n");
return ret;
}
@@ -137,10 +137,10 @@ static int uniphier_pcie_wait_rc(struct uniphier_pcie_priv *priv)
static int uniphier_pcie_link_up(struct dw_pcie *pci)
{
- struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci);
+ struct uniphier_pcie *pcie = to_uniphier_pcie(pci);
u32 val, mask;
- val = readl(priv->base + PCL_STATUS_LINK);
+ val = readl(pcie->base + PCL_STATUS_LINK);
mask = PCL_RDLH_LINK_UP | PCL_XMLH_LINK_UP;
return (val & mask) == mask;
@@ -148,39 +148,40 @@ static int uniphier_pcie_link_up(struct dw_pcie *pci)
static int uniphier_pcie_start_link(struct dw_pcie *pci)
{
- struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci);
+ struct uniphier_pcie *pcie = to_uniphier_pcie(pci);
- uniphier_pcie_ltssm_enable(priv, true);
+ uniphier_pcie_ltssm_enable(pcie, true);
return 0;
}
static void uniphier_pcie_stop_link(struct dw_pcie *pci)
{
- struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci);
+ struct uniphier_pcie *pcie = to_uniphier_pcie(pci);
- uniphier_pcie_ltssm_enable(priv, false);
+ uniphier_pcie_ltssm_enable(pcie, false);
}
-static void uniphier_pcie_irq_enable(struct uniphier_pcie_priv *priv)
+static void uniphier_pcie_irq_enable(struct uniphier_pcie *pcie)
{
- writel(PCL_RCV_INT_ALL_ENABLE, priv->base + PCL_RCV_INT);
- writel(PCL_RCV_INTX_ALL_ENABLE, priv->base + PCL_RCV_INTX);
+ writel(PCL_RCV_INT_ALL_ENABLE, pcie->base + PCL_RCV_INT);
+ writel(PCL_RCV_INTX_ALL_ENABLE, pcie->base + PCL_RCV_INTX);
}
+
static void uniphier_pcie_irq_mask(struct irq_data *d)
{
struct pcie_port *pp = irq_data_get_irq_chip_data(d);
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci);
+ struct uniphier_pcie *pcie = to_uniphier_pcie(pci);
unsigned long flags;
u32 val;
raw_spin_lock_irqsave(&pp->lock, flags);
- val = readl(priv->base + PCL_RCV_INTX);
+ val = readl(pcie->base + PCL_RCV_INTX);
val |= BIT(irqd_to_hwirq(d) + PCL_RCV_INTX_MASK_SHIFT);
- writel(val, priv->base + PCL_RCV_INTX);
+ writel(val, pcie->base + PCL_RCV_INTX);
raw_spin_unlock_irqrestore(&pp->lock, flags);
}
@@ -189,15 +190,15 @@ static void uniphier_pcie_irq_unmask(struct irq_data *d)
{
struct pcie_port *pp = irq_data_get_irq_chip_data(d);
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci);
+ struct uniphier_pcie *pcie = to_uniphier_pcie(pci);
unsigned long flags;
u32 val;
raw_spin_lock_irqsave(&pp->lock, flags);
- val = readl(priv->base + PCL_RCV_INTX);
+ val = readl(pcie->base + PCL_RCV_INTX);
val &= ~BIT(irqd_to_hwirq(d) + PCL_RCV_INTX_MASK_SHIFT);
- writel(val, priv->base + PCL_RCV_INTX);
+ writel(val, pcie->base + PCL_RCV_INTX);
raw_spin_unlock_irqrestore(&pp->lock, flags);
}
@@ -226,13 +227,13 @@ static void uniphier_pcie_irq_handler(struct irq_desc *desc)
{
struct pcie_port *pp = irq_desc_get_handler_data(desc);
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci);
+ struct uniphier_pcie *pcie = to_uniphier_pcie(pci);
struct irq_chip *chip = irq_desc_get_chip(desc);
unsigned long reg;
u32 val, bit;
/* INT for debug */
- val = readl(priv->base + PCL_RCV_INT);
+ val = readl(pcie->base + PCL_RCV_INT);
if (val & PCL_CFG_BW_MGT_STATUS)
dev_dbg(pci->dev, "Link Bandwidth Management Event\n");
@@ -243,16 +244,16 @@ static void uniphier_pcie_irq_handler(struct irq_desc *desc)
if (val & PCL_CFG_PME_MSI_STATUS)
dev_dbg(pci->dev, "PME Interrupt\n");
- writel(val, priv->base + PCL_RCV_INT);
+ writel(val, pcie->base + PCL_RCV_INT);
/* INTx */
chained_irq_enter(chip, desc);
- val = readl(priv->base + PCL_RCV_INTX);
+ val = readl(pcie->base + PCL_RCV_INTX);
reg = FIELD_GET(PCL_RCV_INTX_ALL_STATUS, val);
for_each_set_bit(bit, &reg, PCI_NUM_INTX)
- generic_handle_domain_irq(priv->legacy_irq_domain, bit);
+ generic_handle_domain_irq(pcie->legacy_irq_domain, bit);
chained_irq_exit(chip, desc);
}
@@ -260,7 +261,7 @@ static void uniphier_pcie_irq_handler(struct irq_desc *desc)
static int uniphier_pcie_config_legacy_irq(struct pcie_port *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci);
+ struct uniphier_pcie *pcie = to_uniphier_pcie(pci);
struct device_node *np = pci->dev->of_node;
struct device_node *np_intc;
int ret = 0;
@@ -278,9 +279,9 @@ static int uniphier_pcie_config_legacy_irq(struct pcie_port *pp)
goto out_put_node;
}
- priv->legacy_irq_domain = irq_domain_add_linear(np_intc, PCI_NUM_INTX,
+ pcie->legacy_irq_domain = irq_domain_add_linear(np_intc, PCI_NUM_INTX,
&uniphier_intx_domain_ops, pp);
- if (!priv->legacy_irq_domain) {
+ if (!pcie->legacy_irq_domain) {
dev_err(pci->dev, "Failed to get INTx domain\n");
ret = -ENODEV;
goto out_put_node;
@@ -297,14 +298,14 @@ out_put_node:
static int uniphier_pcie_host_init(struct pcie_port *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct uniphier_pcie_priv *priv = to_uniphier_pcie(pci);
+ struct uniphier_pcie *pcie = to_uniphier_pcie(pci);
int ret;
ret = uniphier_pcie_config_legacy_irq(pp);
if (ret)
return ret;
- uniphier_pcie_irq_enable(priv);
+ uniphier_pcie_irq_enable(pcie);
return 0;
}
@@ -313,36 +314,36 @@ static const struct dw_pcie_host_ops uniphier_pcie_host_ops = {
.host_init = uniphier_pcie_host_init,
};
-static int uniphier_pcie_host_enable(struct uniphier_pcie_priv *priv)
+static int uniphier_pcie_host_enable(struct uniphier_pcie *pcie)
{
int ret;
- ret = clk_prepare_enable(priv->clk);
+ ret = clk_prepare_enable(pcie->clk);
if (ret)
return ret;
- ret = reset_control_deassert(priv->rst);
+ ret = reset_control_deassert(pcie->rst);
if (ret)
goto out_clk_disable;
- uniphier_pcie_init_rc(priv);
+ uniphier_pcie_init_rc(pcie);
- ret = phy_init(priv->phy);
+ ret = phy_init(pcie->phy);
if (ret)
goto out_rst_assert;
- ret = uniphier_pcie_wait_rc(priv);
+ ret = uniphier_pcie_wait_rc(pcie);
if (ret)
goto out_phy_exit;
return 0;
out_phy_exit:
- phy_exit(priv->phy);
+ phy_exit(pcie->phy);
out_rst_assert:
- reset_control_assert(priv->rst);
+ reset_control_assert(pcie->rst);
out_clk_disable:
- clk_disable_unprepare(priv->clk);
+ clk_disable_unprepare(pcie->clk);
return ret;
}
@@ -356,41 +357,41 @@ static const struct dw_pcie_ops dw_pcie_ops = {
static int uniphier_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct uniphier_pcie_priv *priv;
+ struct uniphier_pcie *pcie;
int ret;
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
+ pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
+ if (!pcie)
return -ENOMEM;
- priv->pci.dev = dev;
- priv->pci.ops = &dw_pcie_ops;
+ pcie->pci.dev = dev;
+ pcie->pci.ops = &dw_pcie_ops;
- priv->base = devm_platform_ioremap_resource_byname(pdev, "link");
- if (IS_ERR(priv->base))
- return PTR_ERR(priv->base);
+ pcie->base = devm_platform_ioremap_resource_byname(pdev, "link");
+ if (IS_ERR(pcie->base))
+ return PTR_ERR(pcie->base);
- priv->clk = devm_clk_get(dev, NULL);
- if (IS_ERR(priv->clk))
- return PTR_ERR(priv->clk);
+ pcie->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(pcie->clk))
+ return PTR_ERR(pcie->clk);
- priv->rst = devm_reset_control_get_shared(dev, NULL);
- if (IS_ERR(priv->rst))
- return PTR_ERR(priv->rst);
+ pcie->rst = devm_reset_control_get_shared(dev, NULL);
+ if (IS_ERR(pcie->rst))
+ return PTR_ERR(pcie->rst);
- priv->phy = devm_phy_optional_get(dev, "pcie-phy");
- if (IS_ERR(priv->phy))
- return PTR_ERR(priv->phy);
+ pcie->phy = devm_phy_optional_get(dev, "pcie-phy");
+ if (IS_ERR(pcie->phy))
+ return PTR_ERR(pcie->phy);
- platform_set_drvdata(pdev, priv);
+ platform_set_drvdata(pdev, pcie);
- ret = uniphier_pcie_host_enable(priv);
+ ret = uniphier_pcie_host_enable(pcie);
if (ret)
return ret;
- priv->pci.pp.ops = &uniphier_pcie_host_ops;
+ pcie->pci.pp.ops = &uniphier_pcie_host_ops;
- return dw_pcie_host_init(&priv->pci.pp);
+ return dw_pcie_host_init(&pcie->pci.pp);
}
static const struct of_device_id uniphier_pcie_match[] = {
diff --git a/drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c b/drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c
index 306950272fd6..d7b7350f02dd 100644
--- a/drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c
+++ b/drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c
@@ -34,31 +34,31 @@
#define PF_DBG_WE BIT(31)
#define PF_DBG_PABR BIT(27)
-#define to_ls_pcie_g4(x) platform_get_drvdata((x)->pdev)
+#define to_ls_g4_pcie(x) platform_get_drvdata((x)->pdev)
-struct ls_pcie_g4 {
+struct ls_g4_pcie {
struct mobiveil_pcie pci;
struct delayed_work dwork;
int irq;
};
-static inline u32 ls_pcie_g4_pf_readl(struct ls_pcie_g4 *pcie, u32 off)
+static inline u32 ls_g4_pcie_pf_readl(struct ls_g4_pcie *pcie, u32 off)
{
return ioread32(pcie->pci.csr_axi_slave_base + PCIE_PF_OFF + off);
}
-static inline void ls_pcie_g4_pf_writel(struct ls_pcie_g4 *pcie,
+static inline void ls_g4_pcie_pf_writel(struct ls_g4_pcie *pcie,
u32 off, u32 val)
{
iowrite32(val, pcie->pci.csr_axi_slave_base + PCIE_PF_OFF + off);
}
-static int ls_pcie_g4_link_up(struct mobiveil_pcie *pci)
+static int ls_g4_pcie_link_up(struct mobiveil_pcie *pci)
{
- struct ls_pcie_g4 *pcie = to_ls_pcie_g4(pci);
+ struct ls_g4_pcie *pcie = to_ls_g4_pcie(pci);
u32 state;
- state = ls_pcie_g4_pf_readl(pcie, PCIE_PF_DBG);
+ state = ls_g4_pcie_pf_readl(pcie, PCIE_PF_DBG);
state = state & PF_DBG_LTSSM_MASK;
if (state == PF_DBG_LTSSM_L0)
@@ -67,14 +67,14 @@ static int ls_pcie_g4_link_up(struct mobiveil_pcie *pci)
return 0;
}
-static void ls_pcie_g4_disable_interrupt(struct ls_pcie_g4 *pcie)
+static void ls_g4_pcie_disable_interrupt(struct ls_g4_pcie *pcie)
{
struct mobiveil_pcie *mv_pci = &pcie->pci;
mobiveil_csr_writel(mv_pci, 0, PAB_INTP_AMBA_MISC_ENB);
}
-static void ls_pcie_g4_enable_interrupt(struct ls_pcie_g4 *pcie)
+static void ls_g4_pcie_enable_interrupt(struct ls_g4_pcie *pcie)
{
struct mobiveil_pcie *mv_pci = &pcie->pci;
u32 val;
@@ -87,7 +87,7 @@ static void ls_pcie_g4_enable_interrupt(struct ls_pcie_g4 *pcie)
mobiveil_csr_writel(mv_pci, val, PAB_INTP_AMBA_MISC_ENB);
}
-static int ls_pcie_g4_reinit_hw(struct ls_pcie_g4 *pcie)
+static int ls_g4_pcie_reinit_hw(struct ls_g4_pcie *pcie)
{
struct mobiveil_pcie *mv_pci = &pcie->pci;
struct device *dev = &mv_pci->pdev->dev;
@@ -97,7 +97,7 @@ static int ls_pcie_g4_reinit_hw(struct ls_pcie_g4 *pcie)
/* Poll for pab_csb_reset to set and PAB activity to clear */
do {
usleep_range(10, 15);
- val = ls_pcie_g4_pf_readl(pcie, PCIE_PF_INT_STAT);
+ val = ls_g4_pcie_pf_readl(pcie, PCIE_PF_INT_STAT);
act_stat = mobiveil_csr_readl(mv_pci, PAB_ACTIVITY_STAT);
} while (((val & PF_INT_STAT_PABRST) == 0 || act_stat) && to--);
if (to < 0) {
@@ -106,22 +106,22 @@ static int ls_pcie_g4_reinit_hw(struct ls_pcie_g4 *pcie)
}
/* clear PEX_RESET bit in PEX_PF0_DBG register */
- val = ls_pcie_g4_pf_readl(pcie, PCIE_PF_DBG);
+ val = ls_g4_pcie_pf_readl(pcie, PCIE_PF_DBG);
val |= PF_DBG_WE;
- ls_pcie_g4_pf_writel(pcie, PCIE_PF_DBG, val);
+ ls_g4_pcie_pf_writel(pcie, PCIE_PF_DBG, val);
- val = ls_pcie_g4_pf_readl(pcie, PCIE_PF_DBG);
+ val = ls_g4_pcie_pf_readl(pcie, PCIE_PF_DBG);
val |= PF_DBG_PABR;
- ls_pcie_g4_pf_writel(pcie, PCIE_PF_DBG, val);
+ ls_g4_pcie_pf_writel(pcie, PCIE_PF_DBG, val);
- val = ls_pcie_g4_pf_readl(pcie, PCIE_PF_DBG);
+ val = ls_g4_pcie_pf_readl(pcie, PCIE_PF_DBG);
val &= ~PF_DBG_WE;
- ls_pcie_g4_pf_writel(pcie, PCIE_PF_DBG, val);
+ ls_g4_pcie_pf_writel(pcie, PCIE_PF_DBG, val);
mobiveil_host_init(mv_pci, true);
to = 100;
- while (!ls_pcie_g4_link_up(mv_pci) && to--)
+ while (!ls_g4_pcie_link_up(mv_pci) && to--)
usleep_range(200, 250);
if (to < 0) {
dev_err(dev, "PCIe link training timeout\n");
@@ -131,9 +131,9 @@ static int ls_pcie_g4_reinit_hw(struct ls_pcie_g4 *pcie)
return 0;
}
-static irqreturn_t ls_pcie_g4_isr(int irq, void *dev_id)
+static irqreturn_t ls_g4_pcie_isr(int irq, void *dev_id)
{
- struct ls_pcie_g4 *pcie = (struct ls_pcie_g4 *)dev_id;
+ struct ls_g4_pcie *pcie = (struct ls_g4_pcie *)dev_id;
struct mobiveil_pcie *mv_pci = &pcie->pci;
u32 val;
@@ -142,7 +142,7 @@ static irqreturn_t ls_pcie_g4_isr(int irq, void *dev_id)
return IRQ_NONE;
if (val & PAB_INTP_RESET) {
- ls_pcie_g4_disable_interrupt(pcie);
+ ls_g4_pcie_disable_interrupt(pcie);
schedule_delayed_work(&pcie->dwork, msecs_to_jiffies(1));
}
@@ -151,9 +151,9 @@ static irqreturn_t ls_pcie_g4_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int ls_pcie_g4_interrupt_init(struct mobiveil_pcie *mv_pci)
+static int ls_g4_pcie_interrupt_init(struct mobiveil_pcie *mv_pci)
{
- struct ls_pcie_g4 *pcie = to_ls_pcie_g4(mv_pci);
+ struct ls_g4_pcie *pcie = to_ls_g4_pcie(mv_pci);
struct platform_device *pdev = mv_pci->pdev;
struct device *dev = &pdev->dev;
int ret;
@@ -162,7 +162,7 @@ static int ls_pcie_g4_interrupt_init(struct mobiveil_pcie *mv_pci)
if (pcie->irq < 0)
return pcie->irq;
- ret = devm_request_irq(dev, pcie->irq, ls_pcie_g4_isr,
+ ret = devm_request_irq(dev, pcie->irq, ls_g4_pcie_isr,
IRQF_SHARED, pdev->name, pcie);
if (ret) {
dev_err(dev, "Can't register PCIe IRQ, errno = %d\n", ret);
@@ -172,11 +172,11 @@ static int ls_pcie_g4_interrupt_init(struct mobiveil_pcie *mv_pci)
return 0;
}
-static void ls_pcie_g4_reset(struct work_struct *work)
+static void ls_g4_pcie_reset(struct work_struct *work)
{
struct delayed_work *dwork = container_of(work, struct delayed_work,
work);
- struct ls_pcie_g4 *pcie = container_of(dwork, struct ls_pcie_g4, dwork);
+ struct ls_g4_pcie *pcie = container_of(dwork, struct ls_g4_pcie, dwork);
struct mobiveil_pcie *mv_pci = &pcie->pci;
u16 ctrl;
@@ -184,26 +184,26 @@ static void ls_pcie_g4_reset(struct work_struct *work)
ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
mobiveil_csr_writew(mv_pci, ctrl, PCI_BRIDGE_CONTROL);
- if (!ls_pcie_g4_reinit_hw(pcie))
+ if (!ls_g4_pcie_reinit_hw(pcie))
return;
- ls_pcie_g4_enable_interrupt(pcie);
+ ls_g4_pcie_enable_interrupt(pcie);
}
-static struct mobiveil_rp_ops ls_pcie_g4_rp_ops = {
- .interrupt_init = ls_pcie_g4_interrupt_init,
+static struct mobiveil_rp_ops ls_g4_pcie_rp_ops = {
+ .interrupt_init = ls_g4_pcie_interrupt_init,
};
-static const struct mobiveil_pab_ops ls_pcie_g4_pab_ops = {
- .link_up = ls_pcie_g4_link_up,
+static const struct mobiveil_pab_ops ls_g4_pcie_pab_ops = {
+ .link_up = ls_g4_pcie_link_up,
};
-static int __init ls_pcie_g4_probe(struct platform_device *pdev)
+static int __init ls_g4_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct pci_host_bridge *bridge;
struct mobiveil_pcie *mv_pci;
- struct ls_pcie_g4 *pcie;
+ struct ls_g4_pcie *pcie;
struct device_node *np = dev->of_node;
int ret;
@@ -220,13 +220,13 @@ static int __init ls_pcie_g4_probe(struct platform_device *pdev)
mv_pci = &pcie->pci;
mv_pci->pdev = pdev;
- mv_pci->ops = &ls_pcie_g4_pab_ops;
- mv_pci->rp.ops = &ls_pcie_g4_rp_ops;
+ mv_pci->ops = &ls_g4_pcie_pab_ops;
+ mv_pci->rp.ops = &ls_g4_pcie_rp_ops;
mv_pci->rp.bridge = bridge;
platform_set_drvdata(pdev, pcie);
- INIT_DELAYED_WORK(&pcie->dwork, ls_pcie_g4_reset);
+ INIT_DELAYED_WORK(&pcie->dwork, ls_g4_pcie_reset);
ret = mobiveil_pcie_host_probe(mv_pci);
if (ret) {
@@ -234,22 +234,22 @@ static int __init ls_pcie_g4_probe(struct platform_device *pdev)
return ret;
}
- ls_pcie_g4_enable_interrupt(pcie);
+ ls_g4_pcie_enable_interrupt(pcie);
return 0;
}
-static const struct of_device_id ls_pcie_g4_of_match[] = {
+static const struct of_device_id ls_g4_pcie_of_match[] = {
{ .compatible = "fsl,lx2160a-pcie", },
{ },
};
-static struct platform_driver ls_pcie_g4_driver = {
+static struct platform_driver ls_g4_pcie_driver = {
.driver = {
.name = "layerscape-pcie-gen4",
- .of_match_table = ls_pcie_g4_of_match,
+ .of_match_table = ls_g4_pcie_of_match,
.suppress_bind_attrs = true,
},
};
-builtin_platform_driver_probe(ls_pcie_g4_driver, ls_pcie_g4_probe);
+builtin_platform_driver_probe(ls_g4_pcie_driver, ls_g4_pcie_probe);
diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c
index c3b725afa11f..4f5b44827d21 100644
--- a/drivers/pci/controller/pci-aardvark.c
+++ b/drivers/pci/controller/pci-aardvark.c
@@ -115,6 +115,7 @@
#define PCIE_MSI_ADDR_HIGH_REG (CONTROL_BASE_ADDR + 0x54)
#define PCIE_MSI_STATUS_REG (CONTROL_BASE_ADDR + 0x58)
#define PCIE_MSI_MASK_REG (CONTROL_BASE_ADDR + 0x5C)
+#define PCIE_MSI_ALL_MASK GENMASK(31, 0)
#define PCIE_MSI_PAYLOAD_REG (CONTROL_BASE_ADDR + 0x9C)
#define PCIE_MSI_DATA_MASK GENMASK(15, 0)
@@ -570,6 +571,7 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
advk_writel(pcie, reg, PCIE_CORE_CTRL2_REG);
/* Clear all interrupts */
+ advk_writel(pcie, PCIE_MSI_ALL_MASK, PCIE_MSI_STATUS_REG);
advk_writel(pcie, PCIE_ISR0_ALL_MASK, PCIE_ISR0_REG);
advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_REG);
advk_writel(pcie, PCIE_IRQ_ALL_MASK, HOST_CTRL_INT_STATUS_REG);
@@ -582,7 +584,7 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_MASK_REG);
/* Unmask all MSIs */
- advk_writel(pcie, 0, PCIE_MSI_MASK_REG);
+ advk_writel(pcie, ~(u32)PCIE_MSI_ALL_MASK, PCIE_MSI_MASK_REG);
/* Enable summary interrupt for GIC SPI source */
reg = PCIE_IRQ_ALL_MASK & (~PCIE_IRQ_ENABLE_INTS_MASK);
@@ -872,11 +874,15 @@ advk_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge,
return PCI_BRIDGE_EMUL_HANDLED;
}
- case PCI_CAP_LIST_ID:
case PCI_EXP_DEVCAP:
case PCI_EXP_DEVCTL:
+ case PCI_EXP_DEVCAP2:
+ case PCI_EXP_DEVCTL2:
+ case PCI_EXP_LNKCAP2:
+ case PCI_EXP_LNKCTL2:
*value = advk_readl(pcie, PCIE_CORE_PCIEXP_CAP + reg);
return PCI_BRIDGE_EMUL_HANDLED;
+
default:
return PCI_BRIDGE_EMUL_NOT_HANDLED;
}
@@ -890,10 +896,6 @@ advk_pci_bridge_emul_pcie_conf_write(struct pci_bridge_emul *bridge,
struct advk_pcie *pcie = bridge->data;
switch (reg) {
- case PCI_EXP_DEVCTL:
- advk_writel(pcie, new, PCIE_CORE_PCIEXP_CAP + reg);
- break;
-
case PCI_EXP_LNKCTL:
advk_writel(pcie, new, PCIE_CORE_PCIEXP_CAP + reg);
if (new & PCI_EXP_LNKCTL_RL)
@@ -915,6 +917,12 @@ advk_pci_bridge_emul_pcie_conf_write(struct pci_bridge_emul *bridge,
advk_writel(pcie, new, PCIE_ISR0_REG);
break;
+ case PCI_EXP_DEVCTL:
+ case PCI_EXP_DEVCTL2:
+ case PCI_EXP_LNKCTL2:
+ advk_writel(pcie, new, PCIE_CORE_PCIEXP_CAP + reg);
+ break;
+
default:
break;
}
@@ -953,6 +961,9 @@ static int advk_sw_pci_bridge_init(struct advk_pcie *pcie)
/* Support interrupt A for MSI feature */
bridge->conf.intpin = PCIE_CORE_INT_A_ASSERT_ENABLE;
+ /* Aardvark HW provides PCIe Capability structure in version 2 */
+ bridge->pcie_conf.cap = cpu_to_le16(2);
+
/* Indicates supports for Completion Retry Status */
bridge->pcie_conf.rootcap = cpu_to_le16(PCI_EXP_RTCAP_CRSVIS);
@@ -1017,10 +1028,8 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
u32 reg;
int ret;
- if (!advk_pcie_valid_device(pcie, bus, devfn)) {
- *val = 0xffffffff;
+ if (!advk_pcie_valid_device(pcie, bus, devfn))
return PCIBIOS_DEVICE_NOT_FOUND;
- }
if (pci_is_root_bus(bus))
return pci_bridge_emul_conf_read(&pcie->bridge, where,
@@ -1383,7 +1392,7 @@ static void advk_pcie_handle_msi(struct advk_pcie *pcie)
msi_mask = advk_readl(pcie, PCIE_MSI_MASK_REG);
msi_val = advk_readl(pcie, PCIE_MSI_STATUS_REG);
- msi_status = msi_val & ~msi_mask;
+ msi_status = msi_val & ((~msi_mask) & PCIE_MSI_ALL_MASK);
for (msi_idx = 0; msi_idx < MSI_IRQ_NUM; msi_idx++) {
if (!(BIT(msi_idx) & msi_status))
@@ -1535,8 +1544,7 @@ static int advk_pcie_probe(struct platform_device *pdev)
* only PIO for issuing configuration transfers which does
* not use PCIe window configuration.
*/
- if (type != IORESOURCE_MEM && type != IORESOURCE_MEM_64 &&
- type != IORESOURCE_IO)
+ if (type != IORESOURCE_MEM && type != IORESOURCE_IO)
continue;
/*
@@ -1544,8 +1552,7 @@ static int advk_pcie_probe(struct platform_device *pdev)
* configuration is set to transparent memory access so it
* does not need window configuration.
*/
- if ((type == IORESOURCE_MEM || type == IORESOURCE_MEM_64) &&
- entry->offset == 0)
+ if (type == IORESOURCE_MEM && entry->offset == 0)
continue;
/*
@@ -1677,20 +1684,64 @@ static int advk_pcie_remove(struct platform_device *pdev)
{
struct advk_pcie *pcie = platform_get_drvdata(pdev);
struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
+ u32 val;
int i;
+ /* Remove PCI bus with all devices */
pci_lock_rescan_remove();
pci_stop_root_bus(bridge->bus);
pci_remove_root_bus(bridge->bus);
pci_unlock_rescan_remove();
+ /* Disable Root Bridge I/O space, memory space and bus mastering */
+ val = advk_readl(pcie, PCIE_CORE_CMD_STATUS_REG);
+ val &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
+ advk_writel(pcie, val, PCIE_CORE_CMD_STATUS_REG);
+
+ /* Disable MSI */
+ val = advk_readl(pcie, PCIE_CORE_CTRL2_REG);
+ val &= ~PCIE_CORE_CTRL2_MSI_ENABLE;
+ advk_writel(pcie, val, PCIE_CORE_CTRL2_REG);
+
+ /* Clear MSI address */
+ advk_writel(pcie, 0, PCIE_MSI_ADDR_LOW_REG);
+ advk_writel(pcie, 0, PCIE_MSI_ADDR_HIGH_REG);
+
+ /* Mask all interrupts */
+ advk_writel(pcie, PCIE_MSI_ALL_MASK, PCIE_MSI_MASK_REG);
+ advk_writel(pcie, PCIE_ISR0_ALL_MASK, PCIE_ISR0_MASK_REG);
+ advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_MASK_REG);
+ advk_writel(pcie, PCIE_IRQ_ALL_MASK, HOST_CTRL_INT_MASK_REG);
+
+ /* Clear all interrupts */
+ advk_writel(pcie, PCIE_MSI_ALL_MASK, PCIE_MSI_STATUS_REG);
+ advk_writel(pcie, PCIE_ISR0_ALL_MASK, PCIE_ISR0_REG);
+ advk_writel(pcie, PCIE_ISR1_ALL_MASK, PCIE_ISR1_REG);
+ advk_writel(pcie, PCIE_IRQ_ALL_MASK, HOST_CTRL_INT_STATUS_REG);
+
+ /* Remove IRQ domains */
advk_pcie_remove_msi_irq_domain(pcie);
advk_pcie_remove_irq_domain(pcie);
+ /* Free config space for emulated root bridge */
+ pci_bridge_emul_cleanup(&pcie->bridge);
+
+ /* Assert PERST# signal which prepares PCIe card for power down */
+ if (pcie->reset_gpio)
+ gpiod_set_value_cansleep(pcie->reset_gpio, 1);
+
+ /* Disable link training */
+ val = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
+ val &= ~LINK_TRAINING_EN;
+ advk_writel(pcie, val, PCIE_CORE_CTRL0_REG);
+
/* Disable outbound address windows mapping */
for (i = 0; i < OB_WIN_COUNT; i++)
advk_pcie_disable_ob_win(pcie, i);
+ /* Disable phy */
+ advk_pcie_disable_phy(pcie);
+
return 0;
}
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
index 6733cb14e775..ae0bc2fee4ca 100644
--- a/drivers/pci/controller/pci-hyperv.c
+++ b/drivers/pci/controller/pci-hyperv.c
@@ -43,13 +43,12 @@
#include <linux/pci-ecam.h>
#include <linux/delay.h>
#include <linux/semaphore.h>
-#include <linux/irqdomain.h>
-#include <asm/irqdomain.h>
-#include <asm/apic.h>
#include <linux/irq.h>
#include <linux/msi.h>
#include <linux/hyperv.h>
#include <linux/refcount.h>
+#include <linux/irqdomain.h>
+#include <linux/acpi.h>
#include <asm/mshyperv.h>
/*
@@ -583,6 +582,265 @@ struct hv_pci_compl {
static void hv_pci_onchannelcallback(void *context);
+#ifdef CONFIG_X86
+#define DELIVERY_MODE APIC_DELIVERY_MODE_FIXED
+#define FLOW_HANDLER handle_edge_irq
+#define FLOW_NAME "edge"
+
+static int hv_pci_irqchip_init(void)
+{
+ return 0;
+}
+
+static struct irq_domain *hv_pci_get_root_domain(void)
+{
+ return x86_vector_domain;
+}
+
+static unsigned int hv_msi_get_int_vector(struct irq_data *data)
+{
+ struct irq_cfg *cfg = irqd_cfg(data);
+
+ return cfg->vector;
+}
+
+static void hv_set_msi_entry_from_desc(union hv_msi_entry *msi_entry,
+ struct msi_desc *msi_desc)
+{
+ msi_entry->address.as_uint32 = msi_desc->msg.address_lo;
+ msi_entry->data.as_uint32 = msi_desc->msg.data;
+}
+
+static int hv_msi_prepare(struct irq_domain *domain, struct device *dev,
+ int nvec, msi_alloc_info_t *info)
+{
+ return pci_msi_prepare(domain, dev, nvec, info);
+}
+#elif defined(CONFIG_ARM64)
+/*
+ * SPI vectors to use for vPCI; arch SPIs range is [32, 1019], but leaving a bit
+ * of room at the start to allow for SPIs to be specified through ACPI and
+ * starting with a power of two to satisfy power of 2 multi-MSI requirement.
+ */
+#define HV_PCI_MSI_SPI_START 64
+#define HV_PCI_MSI_SPI_NR (1020 - HV_PCI_MSI_SPI_START)
+#define DELIVERY_MODE 0
+#define FLOW_HANDLER NULL
+#define FLOW_NAME NULL
+#define hv_msi_prepare NULL
+
+struct hv_pci_chip_data {
+ DECLARE_BITMAP(spi_map, HV_PCI_MSI_SPI_NR);
+ struct mutex map_lock;
+};
+
+/* Hyper-V vPCI MSI GIC IRQ domain */
+static struct irq_domain *hv_msi_gic_irq_domain;
+
+/* Hyper-V PCI MSI IRQ chip */
+static struct irq_chip hv_arm64_msi_irq_chip = {
+ .name = "MSI",
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent
+};
+
+static unsigned int hv_msi_get_int_vector(struct irq_data *irqd)
+{
+ return irqd->parent_data->hwirq;
+}
+
+static void hv_set_msi_entry_from_desc(union hv_msi_entry *msi_entry,
+ struct msi_desc *msi_desc)
+{
+ msi_entry->address = ((u64)msi_desc->msg.address_hi << 32) |
+ msi_desc->msg.address_lo;
+ msi_entry->data = msi_desc->msg.data;
+}
+
+/*
+ * @nr_bm_irqs: Indicates the number of IRQs that were allocated from
+ * the bitmap.
+ * @nr_dom_irqs: Indicates the number of IRQs that were allocated from
+ * the parent domain.
+ */
+static void hv_pci_vec_irq_free(struct irq_domain *domain,
+ unsigned int virq,
+ unsigned int nr_bm_irqs,
+ unsigned int nr_dom_irqs)
+{
+ struct hv_pci_chip_data *chip_data = domain->host_data;
+ struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+ int first = d->hwirq - HV_PCI_MSI_SPI_START;
+ int i;
+
+ mutex_lock(&chip_data->map_lock);
+ bitmap_release_region(chip_data->spi_map,
+ first,
+ get_count_order(nr_bm_irqs));
+ mutex_unlock(&chip_data->map_lock);
+ for (i = 0; i < nr_dom_irqs; i++) {
+ if (i)
+ d = irq_domain_get_irq_data(domain, virq + i);
+ irq_domain_reset_irq_data(d);
+ }
+
+ irq_domain_free_irqs_parent(domain, virq, nr_dom_irqs);
+}
+
+static void hv_pci_vec_irq_domain_free(struct irq_domain *domain,
+ unsigned int virq,
+ unsigned int nr_irqs)
+{
+ hv_pci_vec_irq_free(domain, virq, nr_irqs, nr_irqs);
+}
+
+static int hv_pci_vec_alloc_device_irq(struct irq_domain *domain,
+ unsigned int nr_irqs,
+ irq_hw_number_t *hwirq)
+{
+ struct hv_pci_chip_data *chip_data = domain->host_data;
+ int index;
+
+ /* Find and allocate region from the SPI bitmap */
+ mutex_lock(&chip_data->map_lock);
+ index = bitmap_find_free_region(chip_data->spi_map,
+ HV_PCI_MSI_SPI_NR,
+ get_count_order(nr_irqs));
+ mutex_unlock(&chip_data->map_lock);
+ if (index < 0)
+ return -ENOSPC;
+
+ *hwirq = index + HV_PCI_MSI_SPI_START;
+
+ return 0;
+}
+
+static int hv_pci_vec_irq_gic_domain_alloc(struct irq_domain *domain,
+ unsigned int virq,
+ irq_hw_number_t hwirq)
+{
+ struct irq_fwspec fwspec;
+ struct irq_data *d;
+ int ret;
+
+ fwspec.fwnode = domain->parent->fwnode;
+ fwspec.param_count = 2;
+ fwspec.param[0] = hwirq;
+ fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
+
+ ret = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
+ if (ret)
+ return ret;
+
+ /*
+ * Since the interrupt specifier is not coming from ACPI or DT, the
+ * trigger type will need to be set explicitly. Otherwise, it will be
+ * set to whatever is in the GIC configuration.
+ */
+ d = irq_domain_get_irq_data(domain->parent, virq);
+
+ return d->chip->irq_set_type(d, IRQ_TYPE_EDGE_RISING);
+}
+
+static int hv_pci_vec_irq_domain_alloc(struct irq_domain *domain,
+ unsigned int virq, unsigned int nr_irqs,
+ void *args)
+{
+ irq_hw_number_t hwirq;
+ unsigned int i;
+ int ret;
+
+ ret = hv_pci_vec_alloc_device_irq(domain, nr_irqs, &hwirq);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < nr_irqs; i++) {
+ ret = hv_pci_vec_irq_gic_domain_alloc(domain, virq + i,
+ hwirq + i);
+ if (ret) {
+ hv_pci_vec_irq_free(domain, virq, nr_irqs, i);
+ return ret;
+ }
+
+ irq_domain_set_hwirq_and_chip(domain, virq + i,
+ hwirq + i,
+ &hv_arm64_msi_irq_chip,
+ domain->host_data);
+ pr_debug("pID:%d vID:%u\n", (int)(hwirq + i), virq + i);
+ }
+
+ return 0;
+}
+
+/*
+ * Pick the first cpu as the irq affinity that can be temporarily used for
+ * composing MSI from the hypervisor. GIC will eventually set the right
+ * affinity for the irq and the 'unmask' will retarget the interrupt to that
+ * cpu.
+ */
+static int hv_pci_vec_irq_domain_activate(struct irq_domain *domain,
+ struct irq_data *irqd, bool reserve)
+{
+ int cpu = cpumask_first(cpu_present_mask);
+
+ irq_data_update_effective_affinity(irqd, cpumask_of(cpu));
+
+ return 0;
+}
+
+static const struct irq_domain_ops hv_pci_domain_ops = {
+ .alloc = hv_pci_vec_irq_domain_alloc,
+ .free = hv_pci_vec_irq_domain_free,
+ .activate = hv_pci_vec_irq_domain_activate,
+};
+
+static int hv_pci_irqchip_init(void)
+{
+ static struct hv_pci_chip_data *chip_data;
+ struct fwnode_handle *fn = NULL;
+ int ret = -ENOMEM;
+
+ chip_data = kzalloc(sizeof(*chip_data), GFP_KERNEL);
+ if (!chip_data)
+ return ret;
+
+ mutex_init(&chip_data->map_lock);
+ fn = irq_domain_alloc_named_fwnode("hv_vpci_arm64");
+ if (!fn)
+ goto free_chip;
+
+ /*
+ * IRQ domain once enabled, should not be removed since there is no
+ * way to ensure that all the corresponding devices are also gone and
+ * no interrupts will be generated.
+ */
+ hv_msi_gic_irq_domain = acpi_irq_create_hierarchy(0, HV_PCI_MSI_SPI_NR,
+ fn, &hv_pci_domain_ops,
+ chip_data);
+
+ if (!hv_msi_gic_irq_domain) {
+ pr_err("Failed to create Hyper-V arm64 vPCI MSI IRQ domain\n");
+ goto free_chip;
+ }
+
+ return 0;
+
+free_chip:
+ kfree(chip_data);
+ if (fn)
+ irq_domain_free_fwnode(fn);
+
+ return ret;
+}
+
+static struct irq_domain *hv_pci_get_root_domain(void)
+{
+ return hv_msi_gic_irq_domain;
+}
+#endif /* CONFIG_ARM64 */
+
/**
* hv_pci_generic_compl() - Invoked for a completion packet
* @context: Set up by the sender of the packet.
@@ -1191,17 +1449,11 @@ static void hv_msi_free(struct irq_domain *domain, struct msi_domain_info *info,
put_pcichild(hpdev);
}
-static int hv_set_affinity(struct irq_data *data, const struct cpumask *dest,
- bool force)
-{
- struct irq_data *parent = data->parent_data;
-
- return parent->chip->irq_set_affinity(parent, dest, force);
-}
-
static void hv_irq_mask(struct irq_data *data)
{
pci_msi_mask_irq(data);
+ if (data->parent_data->chip->irq_mask)
+ irq_chip_mask_parent(data);
}
/**
@@ -1217,7 +1469,6 @@ static void hv_irq_mask(struct irq_data *data)
static void hv_irq_unmask(struct irq_data *data)
{
struct msi_desc *msi_desc = irq_data_get_msi_desc(data);
- struct irq_cfg *cfg = irqd_cfg(data);
struct hv_retarget_device_interrupt *params;
struct hv_pcibus_device *hbus;
struct cpumask *dest;
@@ -1246,7 +1497,7 @@ static void hv_irq_unmask(struct irq_data *data)
(hbus->hdev->dev_instance.b[7] << 8) |
(hbus->hdev->dev_instance.b[6] & 0xf8) |
PCI_FUNC(pdev->devfn);
- params->int_target.vector = cfg->vector;
+ params->int_target.vector = hv_msi_get_int_vector(data);
/*
* Honoring apic->delivery_mode set to APIC_DELIVERY_MODE_FIXED by
@@ -1319,6 +1570,8 @@ exit_unlock:
dev_err(&hbus->hdev->device,
"%s() failed: %#llx", __func__, res);
+ if (data->parent_data->chip->irq_unmask)
+ irq_chip_unmask_parent(data);
pci_msi_unmask_irq(data);
}
@@ -1347,7 +1600,7 @@ static u32 hv_compose_msi_req_v1(
int_pkt->wslot.slot = slot;
int_pkt->int_desc.vector = vector;
int_pkt->int_desc.vector_count = 1;
- int_pkt->int_desc.delivery_mode = APIC_DELIVERY_MODE_FIXED;
+ int_pkt->int_desc.delivery_mode = DELIVERY_MODE;
/*
* Create MSI w/ dummy vCPU set, overwritten by subsequent retarget in
@@ -1377,7 +1630,7 @@ static u32 hv_compose_msi_req_v2(
int_pkt->wslot.slot = slot;
int_pkt->int_desc.vector = vector;
int_pkt->int_desc.vector_count = 1;
- int_pkt->int_desc.delivery_mode = APIC_DELIVERY_MODE_FIXED;
+ int_pkt->int_desc.delivery_mode = DELIVERY_MODE;
cpu = hv_compose_msi_req_get_cpu(affinity);
int_pkt->int_desc.processor_array[0] =
hv_cpu_number_to_vp_number(cpu);
@@ -1397,7 +1650,7 @@ static u32 hv_compose_msi_req_v3(
int_pkt->int_desc.vector = vector;
int_pkt->int_desc.reserved = 0;
int_pkt->int_desc.vector_count = 1;
- int_pkt->int_desc.delivery_mode = APIC_DELIVERY_MODE_FIXED;
+ int_pkt->int_desc.delivery_mode = DELIVERY_MODE;
cpu = hv_compose_msi_req_get_cpu(affinity);
int_pkt->int_desc.processor_array[0] =
hv_cpu_number_to_vp_number(cpu);
@@ -1419,7 +1672,6 @@ static u32 hv_compose_msi_req_v3(
*/
static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
{
- struct irq_cfg *cfg = irqd_cfg(data);
struct hv_pcibus_device *hbus;
struct vmbus_channel *channel;
struct hv_pci_dev *hpdev;
@@ -1470,7 +1722,7 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
size = hv_compose_msi_req_v1(&ctxt.int_pkts.v1,
dest,
hpdev->desc.win_slot.slot,
- cfg->vector);
+ hv_msi_get_int_vector(data));
break;
case PCI_PROTOCOL_VERSION_1_2:
@@ -1478,14 +1730,14 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
size = hv_compose_msi_req_v2(&ctxt.int_pkts.v2,
dest,
hpdev->desc.win_slot.slot,
- cfg->vector);
+ hv_msi_get_int_vector(data));
break;
case PCI_PROTOCOL_VERSION_1_4:
size = hv_compose_msi_req_v3(&ctxt.int_pkts.v3,
dest,
hpdev->desc.win_slot.slot,
- cfg->vector);
+ hv_msi_get_int_vector(data));
break;
default:
@@ -1594,14 +1846,18 @@ return_null_message:
static struct irq_chip hv_msi_irq_chip = {
.name = "Hyper-V PCIe MSI",
.irq_compose_msi_msg = hv_compose_msi_msg,
- .irq_set_affinity = hv_set_affinity,
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+#ifdef CONFIG_X86
.irq_ack = irq_chip_ack_parent,
+#elif defined(CONFIG_ARM64)
+ .irq_eoi = irq_chip_eoi_parent,
+#endif
.irq_mask = hv_irq_mask,
.irq_unmask = hv_irq_unmask,
};
static struct msi_domain_ops hv_msi_ops = {
- .msi_prepare = pci_msi_prepare,
+ .msi_prepare = hv_msi_prepare,
.msi_free = hv_msi_free,
};
@@ -1625,12 +1881,12 @@ static int hv_pcie_init_irq_domain(struct hv_pcibus_device *hbus)
hbus->msi_info.flags = (MSI_FLAG_USE_DEF_DOM_OPS |
MSI_FLAG_USE_DEF_CHIP_OPS | MSI_FLAG_MULTI_PCI_MSI |
MSI_FLAG_PCI_MSIX);
- hbus->msi_info.handler = handle_edge_irq;
- hbus->msi_info.handler_name = "edge";
+ hbus->msi_info.handler = FLOW_HANDLER;
+ hbus->msi_info.handler_name = FLOW_NAME;
hbus->msi_info.data = hbus;
hbus->irq_domain = pci_msi_create_irq_domain(hbus->fwnode,
&hbus->msi_info,
- x86_vector_domain);
+ hv_pci_get_root_domain());
if (!hbus->irq_domain) {
dev_err(&hbus->hdev->device,
"Failed to build an MSI IRQ domain\n");
@@ -1774,7 +2030,7 @@ static void prepopulate_bars(struct hv_pcibus_device *hbus)
* If the memory enable bit is already set, Hyper-V silently ignores
* the below BAR updates, and the related PCI device driver can not
* work, because reading from the device register(s) always returns
- * 0xFFFFFFFF.
+ * 0xFFFFFFFF (PCI_ERROR_RESPONSE).
*/
list_for_each_entry(hpdev, &hbus->children, list_entry) {
_hv_pcifront_read_config(hpdev, PCI_COMMAND, 2, &command);
@@ -1899,8 +2155,17 @@ static void hv_pci_assign_numa_node(struct hv_pcibus_device *hbus)
if (!hv_dev)
continue;
- if (hv_dev->desc.flags & HV_PCI_DEVICE_FLAG_NUMA_AFFINITY)
- set_dev_node(&dev->dev, hv_dev->desc.virtual_numa_node);
+ if (hv_dev->desc.flags & HV_PCI_DEVICE_FLAG_NUMA_AFFINITY &&
+ hv_dev->desc.virtual_numa_node < num_possible_nodes())
+ /*
+ * The kernel may boot with some NUMA nodes offline
+ * (e.g. in a KDUMP kernel) or with NUMA disabled via
+ * "numa=off". In those cases, adjust the host provided
+ * NUMA node to a valid NUMA node used by the kernel.
+ */
+ set_dev_node(&dev->dev,
+ numa_map_to_online_node(
+ hv_dev->desc.virtual_numa_node));
put_pcichild(hv_dev);
}
@@ -3445,18 +3710,23 @@ static int hv_pci_suspend(struct hv_device *hdev)
static int hv_pci_restore_msi_msg(struct pci_dev *pdev, void *arg)
{
- struct msi_desc *entry;
struct irq_data *irq_data;
+ struct msi_desc *entry;
+ int ret = 0;
- for_each_pci_msi_entry(entry, pdev) {
+ msi_lock_descs(&pdev->dev);
+ msi_for_each_desc(entry, &pdev->dev, MSI_DESC_ASSOCIATED) {
irq_data = irq_get_irq_data(entry->irq);
- if (WARN_ON_ONCE(!irq_data))
- return -EINVAL;
+ if (WARN_ON_ONCE(!irq_data)) {
+ ret = -EINVAL;
+ break;
+ }
hv_compose_msi_msg(irq_data, &entry->msg);
}
+ msi_unlock_descs(&pdev->dev);
- return 0;
+ return ret;
}
/*
@@ -3542,9 +3812,15 @@ static void __exit exit_hv_pci_drv(void)
static int __init init_hv_pci_drv(void)
{
+ int ret;
+
if (!hv_is_hyperv_initialized())
return -ENODEV;
+ ret = hv_pci_irqchip_init();
+ if (ret)
+ return ret;
+
/* Set the invalid domain number's bit, so it will not be used */
set_bit(HVPCI_DOM_INVALID, hvpci_dom_map);
diff --git a/drivers/pci/controller/pci-mvebu.c b/drivers/pci/controller/pci-mvebu.c
index ed13e81cd691..71258ea3d35f 100644
--- a/drivers/pci/controller/pci-mvebu.c
+++ b/drivers/pci/controller/pci-mvebu.c
@@ -6,6 +6,7 @@
*/
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/pci.h>
#include <linux/clk.h>
#include <linux/delay.h>
@@ -51,10 +52,14 @@
PCIE_CONF_FUNC(PCI_FUNC(devfn)) | PCIE_CONF_REG(where) | \
PCIE_CONF_ADDR_EN)
#define PCIE_CONF_DATA_OFF 0x18fc
+#define PCIE_INT_CAUSE_OFF 0x1900
+#define PCIE_INT_PM_PME BIT(28)
#define PCIE_MASK_OFF 0x1910
#define PCIE_MASK_ENABLE_INTS 0x0f000000
#define PCIE_CTRL_OFF 0x1a00
#define PCIE_CTRL_X1_MODE 0x0001
+#define PCIE_CTRL_RC_MODE BIT(1)
+#define PCIE_CTRL_MASTER_HOT_RESET BIT(24)
#define PCIE_STAT_OFF 0x1a04
#define PCIE_STAT_BUS 0xff00
#define PCIE_STAT_DEV 0x1f0000
@@ -125,6 +130,11 @@ static bool mvebu_pcie_link_up(struct mvebu_pcie_port *port)
return !(mvebu_readl(port, PCIE_STAT_OFF) & PCIE_STAT_LINK_DOWN);
}
+static u8 mvebu_pcie_get_local_bus_nr(struct mvebu_pcie_port *port)
+{
+ return (mvebu_readl(port, PCIE_STAT_OFF) & PCIE_STAT_BUS) >> 8;
+}
+
static void mvebu_pcie_set_local_bus_nr(struct mvebu_pcie_port *port, int nr)
{
u32 stat;
@@ -145,22 +155,13 @@ static void mvebu_pcie_set_local_dev_nr(struct mvebu_pcie_port *port, int nr)
mvebu_writel(port, stat, PCIE_STAT_OFF);
}
-/*
- * Setup PCIE BARs and Address Decode Wins:
- * BAR[0] -> internal registers (needed for MSI)
- * BAR[1] -> covers all DRAM banks
- * BAR[2] -> Disabled
- * WIN[0-3] -> DRAM bank[0-3]
- */
-static void mvebu_pcie_setup_wins(struct mvebu_pcie_port *port)
+static void mvebu_pcie_disable_wins(struct mvebu_pcie_port *port)
{
- const struct mbus_dram_target_info *dram;
- u32 size;
int i;
- dram = mv_mbus_dram_info();
+ mvebu_writel(port, 0, PCIE_BAR_LO_OFF(0));
+ mvebu_writel(port, 0, PCIE_BAR_HI_OFF(0));
- /* First, disable and clear BARs and windows. */
for (i = 1; i < 3; i++) {
mvebu_writel(port, 0, PCIE_BAR_CTRL_OFF(i));
mvebu_writel(port, 0, PCIE_BAR_LO_OFF(i));
@@ -176,6 +177,25 @@ static void mvebu_pcie_setup_wins(struct mvebu_pcie_port *port)
mvebu_writel(port, 0, PCIE_WIN5_CTRL_OFF);
mvebu_writel(port, 0, PCIE_WIN5_BASE_OFF);
mvebu_writel(port, 0, PCIE_WIN5_REMAP_OFF);
+}
+
+/*
+ * Setup PCIE BARs and Address Decode Wins:
+ * BAR[0] -> internal registers (needed for MSI)
+ * BAR[1] -> covers all DRAM banks
+ * BAR[2] -> Disabled
+ * WIN[0-3] -> DRAM bank[0-3]
+ */
+static void mvebu_pcie_setup_wins(struct mvebu_pcie_port *port)
+{
+ const struct mbus_dram_target_info *dram;
+ u32 size;
+ int i;
+
+ dram = mv_mbus_dram_info();
+
+ /* First, disable and clear BARs and windows. */
+ mvebu_pcie_disable_wins(port);
/* Setup windows for DDR banks. Count total DDR size on the fly. */
size = 0;
@@ -213,18 +233,47 @@ static void mvebu_pcie_setup_wins(struct mvebu_pcie_port *port)
static void mvebu_pcie_setup_hw(struct mvebu_pcie_port *port)
{
- u32 cmd, mask;
+ u32 ctrl, cmd, dev_rev, mask;
- /* Point PCIe unit MBUS decode windows to DRAM space. */
- mvebu_pcie_setup_wins(port);
+ /* Setup PCIe controller to Root Complex mode. */
+ ctrl = mvebu_readl(port, PCIE_CTRL_OFF);
+ ctrl |= PCIE_CTRL_RC_MODE;
+ mvebu_writel(port, ctrl, PCIE_CTRL_OFF);
- /* Master + slave enable. */
+ /* Disable Root Bridge I/O space, memory space and bus mastering. */
cmd = mvebu_readl(port, PCIE_CMD_OFF);
- cmd |= PCI_COMMAND_IO;
- cmd |= PCI_COMMAND_MEMORY;
- cmd |= PCI_COMMAND_MASTER;
+ cmd &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
mvebu_writel(port, cmd, PCIE_CMD_OFF);
+ /*
+ * Change Class Code of PCI Bridge device to PCI Bridge (0x6004)
+ * because default value is Memory controller (0x5080).
+ *
+ * Note that this mvebu PCI Bridge does not have compliant Type 1
+ * Configuration Space. Header Type is reported as Type 0 and it
+ * has format of Type 0 config space.
+ *
+ * Moreover Type 0 BAR registers (ranges 0x10 - 0x28 and 0x30 - 0x34)
+ * have the same format in Marvell's specification as in PCIe
+ * specification, but their meaning is totally different and they do
+ * different things: they are aliased into internal mvebu registers
+ * (e.g. PCIE_BAR_LO_OFF) and these should not be changed or
+ * reconfigured by pci device drivers.
+ *
+ * Therefore driver uses emulation of PCI Bridge which emulates
+ * access to configuration space via internal mvebu registers or
+ * emulated configuration buffer. Driver access these PCI Bridge
+ * directly for simplification, but these registers can be accessed
+ * also via standard mvebu way for accessing PCI config space.
+ */
+ dev_rev = mvebu_readl(port, PCIE_DEV_REV_OFF);
+ dev_rev &= ~0xffffff00;
+ dev_rev |= (PCI_CLASS_BRIDGE_PCI << 8) << 8;
+ mvebu_writel(port, dev_rev, PCIE_DEV_REV_OFF);
+
+ /* Point PCIe unit MBUS decode windows to DRAM space. */
+ mvebu_pcie_setup_wins(port);
+
/* Enable interrupt lines A-D. */
mask = mvebu_readl(port, PCIE_MASK_OFF);
mask |= PCIE_MASK_ENABLE_INTS;
@@ -250,6 +299,9 @@ static int mvebu_pcie_hw_rd_conf(struct mvebu_pcie_port *port,
case 4:
*val = readl_relaxed(conf_data);
break;
+ default:
+ *val = 0xffffffff;
+ return PCIBIOS_BAD_REGISTER_NUMBER;
}
return PCIBIOS_SUCCESSFUL;
@@ -303,7 +355,7 @@ static void mvebu_pcie_del_windows(struct mvebu_pcie_port *port,
* areas each having a power of two size. We start from the largest
* one (i.e highest order bit set in the size).
*/
-static void mvebu_pcie_add_windows(struct mvebu_pcie_port *port,
+static int mvebu_pcie_add_windows(struct mvebu_pcie_port *port,
unsigned int target, unsigned int attribute,
phys_addr_t base, size_t size,
phys_addr_t remap)
@@ -324,7 +376,7 @@ static void mvebu_pcie_add_windows(struct mvebu_pcie_port *port,
&base, &end, ret);
mvebu_pcie_del_windows(port, base - size_mapped,
size_mapped);
- return;
+ return ret;
}
size -= sz;
@@ -333,16 +385,20 @@ static void mvebu_pcie_add_windows(struct mvebu_pcie_port *port,
if (remap != MVEBU_MBUS_NO_REMAP)
remap += sz;
}
+
+ return 0;
}
-static void mvebu_pcie_set_window(struct mvebu_pcie_port *port,
+static int mvebu_pcie_set_window(struct mvebu_pcie_port *port,
unsigned int target, unsigned int attribute,
const struct mvebu_pcie_window *desired,
struct mvebu_pcie_window *cur)
{
+ int ret;
+
if (desired->base == cur->base && desired->remap == cur->remap &&
desired->size == cur->size)
- return;
+ return 0;
if (cur->size != 0) {
mvebu_pcie_del_windows(port, cur->base, cur->size);
@@ -357,31 +413,35 @@ static void mvebu_pcie_set_window(struct mvebu_pcie_port *port,
}
if (desired->size == 0)
- return;
+ return 0;
+
+ ret = mvebu_pcie_add_windows(port, target, attribute, desired->base,
+ desired->size, desired->remap);
+ if (ret) {
+ cur->size = 0;
+ cur->base = 0;
+ return ret;
+ }
- mvebu_pcie_add_windows(port, target, attribute, desired->base,
- desired->size, desired->remap);
*cur = *desired;
+ return 0;
}
-static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port)
+static int mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port)
{
struct mvebu_pcie_window desired = {};
struct pci_bridge_emul_conf *conf = &port->bridge.conf;
/* Are the new iobase/iolimit values invalid? */
if (conf->iolimit < conf->iobase ||
- conf->iolimitupper < conf->iobaseupper ||
- !(conf->command & PCI_COMMAND_IO)) {
- mvebu_pcie_set_window(port, port->io_target, port->io_attr,
- &desired, &port->iowin);
- return;
- }
+ conf->iolimitupper < conf->iobaseupper)
+ return mvebu_pcie_set_window(port, port->io_target, port->io_attr,
+ &desired, &port->iowin);
if (!mvebu_has_ioport(port)) {
dev_WARN(&port->pcie->pdev->dev,
"Attempt to set IO when IO is disabled\n");
- return;
+ return -EOPNOTSUPP;
}
/*
@@ -399,22 +459,19 @@ static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port)
desired.remap) +
1;
- mvebu_pcie_set_window(port, port->io_target, port->io_attr, &desired,
- &port->iowin);
+ return mvebu_pcie_set_window(port, port->io_target, port->io_attr, &desired,
+ &port->iowin);
}
-static void mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port)
+static int mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port)
{
struct mvebu_pcie_window desired = {.remap = MVEBU_MBUS_NO_REMAP};
struct pci_bridge_emul_conf *conf = &port->bridge.conf;
/* Are the new membase/memlimit values invalid? */
- if (conf->memlimit < conf->membase ||
- !(conf->command & PCI_COMMAND_MEMORY)) {
- mvebu_pcie_set_window(port, port->mem_target, port->mem_attr,
- &desired, &port->memwin);
- return;
- }
+ if (conf->memlimit < conf->membase)
+ return mvebu_pcie_set_window(port, port->mem_target, port->mem_attr,
+ &desired, &port->memwin);
/*
* We read the PCI-to-PCI bridge emulated registers, and
@@ -426,8 +483,56 @@ static void mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port)
desired.size = (((conf->memlimit & 0xFFF0) << 16) | 0xFFFFF) -
desired.base + 1;
- mvebu_pcie_set_window(port, port->mem_target, port->mem_attr, &desired,
- &port->memwin);
+ return mvebu_pcie_set_window(port, port->mem_target, port->mem_attr, &desired,
+ &port->memwin);
+}
+
+static pci_bridge_emul_read_status_t
+mvebu_pci_bridge_emul_base_conf_read(struct pci_bridge_emul *bridge,
+ int reg, u32 *value)
+{
+ struct mvebu_pcie_port *port = bridge->data;
+
+ switch (reg) {
+ case PCI_COMMAND:
+ *value = mvebu_readl(port, PCIE_CMD_OFF);
+ break;
+
+ case PCI_PRIMARY_BUS: {
+ /*
+ * From the whole 32bit register we support reading from HW only
+ * secondary bus number which is mvebu local bus number.
+ * Other bits are retrieved only from emulated config buffer.
+ */
+ __le32 *cfgspace = (__le32 *)&bridge->conf;
+ u32 val = le32_to_cpu(cfgspace[PCI_PRIMARY_BUS / 4]);
+ val &= ~0xff00;
+ val |= mvebu_pcie_get_local_bus_nr(port) << 8;
+ *value = val;
+ break;
+ }
+
+ case PCI_INTERRUPT_LINE: {
+ /*
+ * From the whole 32bit register we support reading from HW only
+ * one bit: PCI_BRIDGE_CTL_BUS_RESET.
+ * Other bits are retrieved only from emulated config buffer.
+ */
+ __le32 *cfgspace = (__le32 *)&bridge->conf;
+ u32 val = le32_to_cpu(cfgspace[PCI_INTERRUPT_LINE / 4]);
+ if (mvebu_readl(port, PCIE_CTRL_OFF) & PCIE_CTRL_MASTER_HOT_RESET)
+ val |= PCI_BRIDGE_CTL_BUS_RESET << 16;
+ else
+ val &= ~(PCI_BRIDGE_CTL_BUS_RESET << 16);
+ *value = val;
+ break;
+ }
+
+ default:
+ return PCI_BRIDGE_EMUL_NOT_HANDLED;
+ }
+
+ return PCI_BRIDGE_EMUL_HANDLED;
}
static pci_bridge_emul_read_status_t
@@ -442,9 +547,7 @@ mvebu_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge,
break;
case PCI_EXP_DEVCTL:
- *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_DEVCTL) &
- ~(PCI_EXP_DEVCTL_URRE | PCI_EXP_DEVCTL_FERE |
- PCI_EXP_DEVCTL_NFERE | PCI_EXP_DEVCTL_CERE);
+ *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_DEVCTL);
break;
case PCI_EXP_LNKCAP:
@@ -468,6 +571,18 @@ mvebu_pci_bridge_emul_pcie_conf_read(struct pci_bridge_emul *bridge,
*value = mvebu_readl(port, PCIE_RC_RTSTA);
break;
+ case PCI_EXP_DEVCAP2:
+ *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_DEVCAP2);
+ break;
+
+ case PCI_EXP_DEVCTL2:
+ *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_DEVCTL2);
+ break;
+
+ case PCI_EXP_LNKCTL2:
+ *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL2);
+ break;
+
default:
return PCI_BRIDGE_EMUL_NOT_HANDLED;
}
@@ -484,39 +599,62 @@ mvebu_pci_bridge_emul_base_conf_write(struct pci_bridge_emul *bridge,
switch (reg) {
case PCI_COMMAND:
- {
- if (!mvebu_has_ioport(port))
- conf->command &= ~PCI_COMMAND_IO;
-
- if ((old ^ new) & PCI_COMMAND_IO)
- mvebu_pcie_handle_iobase_change(port);
- if ((old ^ new) & PCI_COMMAND_MEMORY)
- mvebu_pcie_handle_membase_change(port);
+ if (!mvebu_has_ioport(port)) {
+ conf->command = cpu_to_le16(
+ le16_to_cpu(conf->command) & ~PCI_COMMAND_IO);
+ new &= ~PCI_COMMAND_IO;
+ }
+ mvebu_writel(port, new, PCIE_CMD_OFF);
break;
- }
case PCI_IO_BASE:
- /*
- * We keep bit 1 set, it is a read-only bit that
- * indicates we support 32 bits addressing for the
- * I/O
- */
- conf->iobase |= PCI_IO_RANGE_TYPE_32;
- conf->iolimit |= PCI_IO_RANGE_TYPE_32;
- mvebu_pcie_handle_iobase_change(port);
+ if ((mask & 0xffff) && mvebu_pcie_handle_iobase_change(port)) {
+ /* On error disable IO range */
+ conf->iobase &= ~0xf0;
+ conf->iolimit &= ~0xf0;
+ conf->iobaseupper = cpu_to_le16(0x0000);
+ conf->iolimitupper = cpu_to_le16(0x0000);
+ if (mvebu_has_ioport(port))
+ conf->iobase |= 0xf0;
+ }
break;
case PCI_MEMORY_BASE:
- mvebu_pcie_handle_membase_change(port);
+ if (mvebu_pcie_handle_membase_change(port)) {
+ /* On error disable mem range */
+ conf->membase = cpu_to_le16(le16_to_cpu(conf->membase) & ~0xfff0);
+ conf->memlimit = cpu_to_le16(le16_to_cpu(conf->memlimit) & ~0xfff0);
+ conf->membase = cpu_to_le16(le16_to_cpu(conf->membase) | 0xfff0);
+ }
break;
case PCI_IO_BASE_UPPER16:
- mvebu_pcie_handle_iobase_change(port);
+ if (mvebu_pcie_handle_iobase_change(port)) {
+ /* On error disable IO range */
+ conf->iobase &= ~0xf0;
+ conf->iolimit &= ~0xf0;
+ conf->iobaseupper = cpu_to_le16(0x0000);
+ conf->iolimitupper = cpu_to_le16(0x0000);
+ if (mvebu_has_ioport(port))
+ conf->iobase |= 0xf0;
+ }
break;
case PCI_PRIMARY_BUS:
- mvebu_pcie_set_local_bus_nr(port, conf->secondary_bus);
+ if (mask & 0xff00)
+ mvebu_pcie_set_local_bus_nr(port, conf->secondary_bus);
+ break;
+
+ case PCI_INTERRUPT_LINE:
+ if (mask & (PCI_BRIDGE_CTL_BUS_RESET << 16)) {
+ u32 ctrl = mvebu_readl(port, PCIE_CTRL_OFF);
+ if (new & (PCI_BRIDGE_CTL_BUS_RESET << 16))
+ ctrl |= PCIE_CTRL_MASTER_HOT_RESET;
+ else
+ ctrl &= ~PCIE_CTRL_MASTER_HOT_RESET;
+ mvebu_writel(port, ctrl, PCIE_CTRL_OFF);
+ }
break;
default:
@@ -532,13 +670,6 @@ mvebu_pci_bridge_emul_pcie_conf_write(struct pci_bridge_emul *bridge,
switch (reg) {
case PCI_EXP_DEVCTL:
- /*
- * Armada370 data says these bits must always
- * be zero when in root complex mode.
- */
- new &= ~(PCI_EXP_DEVCTL_URRE | PCI_EXP_DEVCTL_FERE |
- PCI_EXP_DEVCTL_NFERE | PCI_EXP_DEVCTL_CERE);
-
mvebu_writel(port, new, PCIE_CAP_PCIEXP + PCI_EXP_DEVCTL);
break;
@@ -555,12 +686,31 @@ mvebu_pci_bridge_emul_pcie_conf_write(struct pci_bridge_emul *bridge,
break;
case PCI_EXP_RTSTA:
- mvebu_writel(port, new, PCIE_RC_RTSTA);
+ /*
+ * PME Status bit in Root Status Register (PCIE_RC_RTSTA)
+ * is read-only and can be cleared only by writing 0b to the
+ * Interrupt Cause RW0C register (PCIE_INT_CAUSE_OFF). So
+ * clear PME via Interrupt Cause.
+ */
+ if (new & PCI_EXP_RTSTA_PME)
+ mvebu_writel(port, ~PCIE_INT_PM_PME, PCIE_INT_CAUSE_OFF);
+ break;
+
+ case PCI_EXP_DEVCTL2:
+ mvebu_writel(port, new, PCIE_CAP_PCIEXP + PCI_EXP_DEVCTL2);
+ break;
+
+ case PCI_EXP_LNKCTL2:
+ mvebu_writel(port, new, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL2);
+ break;
+
+ default:
break;
}
}
static struct pci_bridge_emul_ops mvebu_pci_bridge_emul_ops = {
+ .read_base = mvebu_pci_bridge_emul_base_conf_read,
.write_base = mvebu_pci_bridge_emul_base_conf_write,
.read_pcie = mvebu_pci_bridge_emul_pcie_conf_read,
.write_pcie = mvebu_pci_bridge_emul_pcie_conf_write,
@@ -570,9 +720,11 @@ static struct pci_bridge_emul_ops mvebu_pci_bridge_emul_ops = {
* Initialize the configuration space of the PCI-to-PCI bridge
* associated with the given PCIe interface.
*/
-static void mvebu_pci_bridge_emul_init(struct mvebu_pcie_port *port)
+static int mvebu_pci_bridge_emul_init(struct mvebu_pcie_port *port)
{
struct pci_bridge_emul *bridge = &port->bridge;
+ u32 pcie_cap = mvebu_readl(port, PCIE_CAP_PCIEXP);
+ u8 pcie_cap_ver = ((pcie_cap >> 16) & PCI_EXP_FLAGS_VERS);
bridge->conf.vendor = PCI_VENDOR_ID_MARVELL;
bridge->conf.device = mvebu_readl(port, PCIE_DEV_ID_OFF) >> 16;
@@ -585,11 +737,17 @@ static void mvebu_pci_bridge_emul_init(struct mvebu_pcie_port *port)
bridge->conf.iolimit = PCI_IO_RANGE_TYPE_32;
}
+ /*
+ * Older mvebu hardware provides PCIe Capability structure only in
+ * version 1. New hardware provides it in version 2.
+ */
+ bridge->pcie_conf.cap = cpu_to_le16(pcie_cap_ver);
+
bridge->has_pcie = true;
bridge->data = port;
bridge->ops = &mvebu_pci_bridge_emul_ops;
- pci_bridge_emul_init(bridge, PCI_BRIDGE_EMUL_NO_PREFETCHABLE_BAR);
+ return pci_bridge_emul_init(bridge, PCI_BRIDGE_EMUL_NO_PREFETCHABLE_BAR);
}
static inline struct mvebu_pcie *sys_to_pcie(struct pci_sys_data *sys)
@@ -606,6 +764,9 @@ static struct mvebu_pcie_port *mvebu_pcie_find_port(struct mvebu_pcie *pcie,
for (i = 0; i < pcie->nports; i++) {
struct mvebu_pcie_port *port = &pcie->ports[i];
+ if (!port->base)
+ continue;
+
if (bus->number == 0 && port->devfn == devfn)
return port;
if (bus->number != 0 &&
@@ -653,20 +814,16 @@ static int mvebu_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
int ret;
port = mvebu_pcie_find_port(pcie, bus, devfn);
- if (!port) {
- *val = 0xffffffff;
+ if (!port)
return PCIBIOS_DEVICE_NOT_FOUND;
- }
/* Access the emulated PCI-to-PCI bridge */
if (bus->number == 0)
return pci_bridge_emul_conf_read(&port->bridge, where,
size, val);
- if (!mvebu_pcie_link_up(port)) {
- *val = 0xffffffff;
+ if (!mvebu_pcie_link_up(port))
return PCIBIOS_DEVICE_NOT_FOUND;
- }
/* Access the real PCIe interface */
ret = mvebu_pcie_hw_rd_conf(port, bus, devfn,
@@ -680,6 +837,15 @@ static struct pci_ops mvebu_pcie_ops = {
.write = mvebu_pcie_wr_conf,
};
+static int mvebu_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+{
+ /* Interrupt support on mvebu emulated bridges is not implemented yet */
+ if (dev->bus->number == 0)
+ return 0; /* Proper return code 0 == NO_IRQ */
+
+ return of_irq_parse_and_map_pci(dev, slot, pin);
+}
+
static resource_size_t mvebu_pcie_align_resource(struct pci_dev *dev,
const struct resource *res,
resource_size_t start,
@@ -781,6 +947,8 @@ static int mvebu_pcie_suspend(struct device *dev)
pcie = dev_get_drvdata(dev);
for (i = 0; i < pcie->nports; i++) {
struct mvebu_pcie_port *port = pcie->ports + i;
+ if (!port->base)
+ continue;
port->saved_pcie_stat = mvebu_readl(port, PCIE_STAT_OFF);
}
@@ -795,6 +963,8 @@ static int mvebu_pcie_resume(struct device *dev)
pcie = dev_get_drvdata(dev);
for (i = 0; i < pcie->nports; i++) {
struct mvebu_pcie_port *port = pcie->ports + i;
+ if (!port->base)
+ continue;
mvebu_writel(port, port->saved_pcie_stat, PCIE_STAT_OFF);
mvebu_pcie_setup_hw(port);
}
@@ -838,6 +1008,11 @@ static int mvebu_pcie_parse_port(struct mvebu_pcie *pcie,
port->devfn = of_pci_get_devfn(child);
if (port->devfn < 0)
goto skip;
+ if (PCI_FUNC(port->devfn) != 0) {
+ dev_err(dev, "%s: invalid function number, must be zero\n",
+ port->name);
+ goto skip;
+ }
ret = mvebu_get_tgt_attr(dev->of_node, port->devfn, IORESOURCE_MEM,
&port->mem_target, &port->mem_attr);
@@ -992,6 +1167,10 @@ static int mvebu_pcie_parse_request_resources(struct mvebu_pcie *pcie)
resource_size(&pcie->io) - 1);
pcie->realio.name = "PCI I/O";
+ ret = devm_pci_remap_iospace(dev, &pcie->realio, pcie->io.start);
+ if (ret)
+ return ret;
+
pci_add_resource(&bridge->windows, &pcie->realio);
ret = devm_request_resource(dev, &ioport_resource, &pcie->realio);
if (ret)
@@ -1001,54 +1180,6 @@ static int mvebu_pcie_parse_request_resources(struct mvebu_pcie *pcie)
return 0;
}
-/*
- * This is a copy of pci_host_probe(), except that it does the I/O
- * remap as the last step, once we are sure we won't fail.
- *
- * It should be removed once the I/O remap error handling issue has
- * been sorted out.
- */
-static int mvebu_pci_host_probe(struct pci_host_bridge *bridge)
-{
- struct mvebu_pcie *pcie;
- struct pci_bus *bus, *child;
- int ret;
-
- ret = pci_scan_root_bus_bridge(bridge);
- if (ret < 0) {
- dev_err(bridge->dev.parent, "Scanning root bridge failed");
- return ret;
- }
-
- pcie = pci_host_bridge_priv(bridge);
- if (resource_size(&pcie->io) != 0) {
- unsigned int i;
-
- for (i = 0; i < resource_size(&pcie->realio); i += SZ_64K)
- pci_ioremap_io(i, pcie->io.start + i);
- }
-
- bus = bridge->bus;
-
- /*
- * We insert PCI resources into the iomem_resource and
- * ioport_resource trees in either pci_bus_claim_resources()
- * or pci_bus_assign_resources().
- */
- if (pci_has_flag(PCI_PROBE_ONLY)) {
- pci_bus_claim_resources(bus);
- } else {
- pci_bus_size_bridges(bus);
- pci_bus_assign_resources(bus);
-
- list_for_each_entry(child, &bus->children, node)
- pcie_bus_configure_settings(child);
- }
-
- pci_bus_add_devices(bus);
- return 0;
-}
-
static int mvebu_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -1112,9 +1243,93 @@ static int mvebu_pcie_probe(struct platform_device *pdev)
continue;
}
+ ret = mvebu_pci_bridge_emul_init(port);
+ if (ret < 0) {
+ dev_err(dev, "%s: cannot init emulated bridge\n",
+ port->name);
+ devm_iounmap(dev, port->base);
+ port->base = NULL;
+ mvebu_pcie_powerdown(port);
+ continue;
+ }
+
+ /*
+ * PCIe topology exported by mvebu hw is quite complicated. In
+ * reality has something like N fully independent host bridges
+ * where each host bridge has one PCIe Root Port (which acts as
+ * PCI Bridge device). Each host bridge has its own independent
+ * internal registers, independent access to PCI config space,
+ * independent interrupt lines, independent window and memory
+ * access configuration. But additionally there is some kind of
+ * peer-to-peer support between PCIe devices behind different
+ * host bridges limited just to forwarding of memory and I/O
+ * transactions (forwarding of error messages and config cycles
+ * is not supported). So we could say there are N independent
+ * PCIe Root Complexes.
+ *
+ * For this kind of setup DT should have been structured into
+ * N independent PCIe controllers / host bridges. But instead
+ * structure in past was defined to put PCIe Root Ports of all
+ * host bridges into one bus zero, like in classic multi-port
+ * Root Complex setup with just one host bridge.
+ *
+ * This means that pci-mvebu.c driver provides "virtual" bus 0
+ * on which registers all PCIe Root Ports (PCI Bridge devices)
+ * specified in DT by their BDF addresses and virtually routes
+ * PCI config access of each PCI bridge device to specific PCIe
+ * host bridge.
+ *
+ * Normally PCI Bridge should choose between Type 0 and Type 1
+ * config requests based on primary and secondary bus numbers
+ * configured on the bridge itself. But because mvebu PCI Bridge
+ * does not have registers for primary and secondary bus numbers
+ * in its config space, it determinates type of config requests
+ * via its own custom way.
+ *
+ * There are two options how mvebu determinate type of config
+ * request.
+ *
+ * 1. If Secondary Bus Number Enable bit is not set or is not
+ * available (applies for pre-XP PCIe controllers) then Type 0
+ * is used if target bus number equals Local Bus Number (bits
+ * [15:8] in register 0x1a04) and target device number differs
+ * from Local Device Number (bits [20:16] in register 0x1a04).
+ * Type 1 is used if target bus number differs from Local Bus
+ * Number. And when target bus number equals Local Bus Number
+ * and target device equals Local Device Number then request is
+ * routed to Local PCI Bridge (PCIe Root Port).
+ *
+ * 2. If Secondary Bus Number Enable bit is set (bit 7 in
+ * register 0x1a2c) then mvebu hw determinate type of config
+ * request like compliant PCI Bridge based on primary bus number
+ * which is configured via Local Bus Number (bits [15:8] in
+ * register 0x1a04) and secondary bus number which is configured
+ * via Secondary Bus Number (bits [7:0] in register 0x1a2c).
+ * Local PCI Bridge (PCIe Root Port) is available on primary bus
+ * as device with Local Device Number (bits [20:16] in register
+ * 0x1a04).
+ *
+ * Secondary Bus Number Enable bit is disabled by default and
+ * option 2. is not available on pre-XP PCIe controllers. Hence
+ * this driver always use option 1.
+ *
+ * Basically it means that primary and secondary buses shares
+ * one virtual number configured via Local Bus Number bits and
+ * Local Device Number bits determinates if accessing primary
+ * or secondary bus. Set Local Device Number to 1 and redirect
+ * all writes of PCI Bridge Secondary Bus Number register to
+ * Local Bus Number (bits [15:8] in register 0x1a04).
+ *
+ * So when accessing devices on buses behind secondary bus
+ * number it would work correctly. And also when accessing
+ * device 0 at secondary bus number via config space would be
+ * correctly routed to secondary bus. Due to issues described
+ * in mvebu_pcie_setup_hw(), PCI Bridges at primary bus (zero)
+ * are not accessed directly via PCI config space but rarher
+ * indirectly via kernel emulated PCI bridge driver.
+ */
mvebu_pcie_setup_hw(port);
- mvebu_pcie_set_local_dev_nr(port, 1);
- mvebu_pci_bridge_emul_init(port);
+ mvebu_pcie_set_local_dev_nr(port, 0);
}
pcie->nports = i;
@@ -1122,8 +1337,55 @@ static int mvebu_pcie_probe(struct platform_device *pdev)
bridge->sysdata = pcie;
bridge->ops = &mvebu_pcie_ops;
bridge->align_resource = mvebu_pcie_align_resource;
+ bridge->map_irq = mvebu_pcie_map_irq;
+
+ return pci_host_probe(bridge);
+}
+
+static int mvebu_pcie_remove(struct platform_device *pdev)
+{
+ struct mvebu_pcie *pcie = platform_get_drvdata(pdev);
+ struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
+ u32 cmd;
+ int i;
+
+ /* Remove PCI bus with all devices. */
+ pci_lock_rescan_remove();
+ pci_stop_root_bus(bridge->bus);
+ pci_remove_root_bus(bridge->bus);
+ pci_unlock_rescan_remove();
+
+ for (i = 0; i < pcie->nports; i++) {
+ struct mvebu_pcie_port *port = &pcie->ports[i];
+
+ if (!port->base)
+ continue;
+
+ /* Disable Root Bridge I/O space, memory space and bus mastering. */
+ cmd = mvebu_readl(port, PCIE_CMD_OFF);
+ cmd &= ~(PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
+ mvebu_writel(port, cmd, PCIE_CMD_OFF);
+
+ /* Mask all interrupt sources. */
+ mvebu_writel(port, 0, PCIE_MASK_OFF);
+
+ /* Free config space for emulated root bridge. */
+ pci_bridge_emul_cleanup(&port->bridge);
- return mvebu_pci_host_probe(bridge);
+ /* Disable and clear BARs and windows. */
+ mvebu_pcie_disable_wins(port);
+
+ /* Delete PCIe IO and MEM windows. */
+ if (port->iowin.size)
+ mvebu_pcie_del_windows(port, port->iowin.base, port->iowin.size);
+ if (port->memwin.size)
+ mvebu_pcie_del_windows(port, port->memwin.base, port->memwin.size);
+
+ /* Power down card and disable clocks. Must be the last step. */
+ mvebu_pcie_powerdown(port);
+ }
+
+ return 0;
}
static const struct of_device_id mvebu_pcie_of_match_table[] = {
@@ -1142,10 +1404,14 @@ static struct platform_driver mvebu_pcie_driver = {
.driver = {
.name = "mvebu-pcie",
.of_match_table = mvebu_pcie_of_match_table,
- /* driver unloading/unbinding currently not supported */
- .suppress_bind_attrs = true,
.pm = &mvebu_pcie_pm_ops,
},
.probe = mvebu_pcie_probe,
+ .remove = mvebu_pcie_remove,
};
-builtin_platform_driver(mvebu_pcie_driver);
+module_platform_driver(mvebu_pcie_driver);
+
+MODULE_AUTHOR("Thomas Petazzoni <thomas.petazzoni@bootlin.com>");
+MODULE_AUTHOR("Pali Rohár <pali@kernel.org>");
+MODULE_DESCRIPTION("Marvell EBU PCIe controller");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/pci-rcar-gen2.c b/drivers/pci/controller/pci-rcar-gen2.c
index afde4aa8f6dc..35804ea394fd 100644
--- a/drivers/pci/controller/pci-rcar-gen2.c
+++ b/drivers/pci/controller/pci-rcar-gen2.c
@@ -93,7 +93,7 @@
#define RCAR_PCI_UNIT_REV_REG (RCAR_AHBPCI_PCICOM_OFFSET + 0x48)
-struct rcar_pci_priv {
+struct rcar_pci {
struct device *dev;
void __iomem *reg;
struct resource mem_res;
@@ -105,7 +105,7 @@ struct rcar_pci_priv {
static void __iomem *rcar_pci_cfg_base(struct pci_bus *bus, unsigned int devfn,
int where)
{
- struct rcar_pci_priv *priv = bus->sysdata;
+ struct rcar_pci *priv = bus->sysdata;
int slot, val;
if (!pci_is_root_bus(bus) || PCI_FUNC(devfn))
@@ -132,7 +132,7 @@ static void __iomem *rcar_pci_cfg_base(struct pci_bus *bus, unsigned int devfn,
static irqreturn_t rcar_pci_err_irq(int irq, void *pw)
{
- struct rcar_pci_priv *priv = pw;
+ struct rcar_pci *priv = pw;
struct device *dev = priv->dev;
u32 status = ioread32(priv->reg + RCAR_PCI_INT_STATUS_REG);
@@ -148,7 +148,7 @@ static irqreturn_t rcar_pci_err_irq(int irq, void *pw)
return IRQ_NONE;
}
-static void rcar_pci_setup_errirq(struct rcar_pci_priv *priv)
+static void rcar_pci_setup_errirq(struct rcar_pci *priv)
{
struct device *dev = priv->dev;
int ret;
@@ -166,11 +166,11 @@ static void rcar_pci_setup_errirq(struct rcar_pci_priv *priv)
iowrite32(val, priv->reg + RCAR_PCI_INT_ENABLE_REG);
}
#else
-static inline void rcar_pci_setup_errirq(struct rcar_pci_priv *priv) { }
+static inline void rcar_pci_setup_errirq(struct rcar_pci *priv) { }
#endif
/* PCI host controller setup */
-static void rcar_pci_setup(struct rcar_pci_priv *priv)
+static void rcar_pci_setup(struct rcar_pci *priv)
{
struct pci_host_bridge *bridge = pci_host_bridge_from_priv(priv);
struct device *dev = priv->dev;
@@ -279,7 +279,7 @@ static int rcar_pci_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct resource *cfg_res, *mem_res;
- struct rcar_pci_priv *priv;
+ struct rcar_pci *priv;
struct pci_host_bridge *bridge;
void __iomem *reg;
diff --git a/drivers/pci/controller/pci-thunder-ecam.c b/drivers/pci/controller/pci-thunder-ecam.c
index e9d5ca245f5e..b5bd10a62adb 100644
--- a/drivers/pci/controller/pci-thunder-ecam.c
+++ b/drivers/pci/controller/pci-thunder-ecam.c
@@ -41,10 +41,9 @@ static int handle_ea_bar(u32 e0, int bar, struct pci_bus *bus,
}
if (where_a == 0x4) {
addr = bus->ops->map_bus(bus, devfn, bar); /* BAR 0 */
- if (!addr) {
- *val = ~0;
+ if (!addr)
return PCIBIOS_DEVICE_NOT_FOUND;
- }
+
v = readl(addr);
v &= ~0xf;
v |= 2; /* EA entry-1. Base-L */
@@ -56,10 +55,9 @@ static int handle_ea_bar(u32 e0, int bar, struct pci_bus *bus,
u32 barl_rb;
addr = bus->ops->map_bus(bus, devfn, bar); /* BAR 0 */
- if (!addr) {
- *val = ~0;
+ if (!addr)
return PCIBIOS_DEVICE_NOT_FOUND;
- }
+
barl_orig = readl(addr + 0);
writel(0xffffffff, addr + 0);
barl_rb = readl(addr + 0);
@@ -72,10 +70,9 @@ static int handle_ea_bar(u32 e0, int bar, struct pci_bus *bus,
}
if (where_a == 0xc) {
addr = bus->ops->map_bus(bus, devfn, bar + 4); /* BAR 1 */
- if (!addr) {
- *val = ~0;
+ if (!addr)
return PCIBIOS_DEVICE_NOT_FOUND;
- }
+
v = readl(addr); /* EA entry-3. Base-H */
set_val(v, where, size, val);
return PCIBIOS_SUCCESSFUL;
@@ -104,10 +101,8 @@ static int thunder_ecam_p2_config_read(struct pci_bus *bus, unsigned int devfn,
}
addr = bus->ops->map_bus(bus, devfn, where_a);
- if (!addr) {
- *val = ~0;
+ if (!addr)
return PCIBIOS_DEVICE_NOT_FOUND;
- }
v = readl(addr);
@@ -135,10 +130,8 @@ static int thunder_ecam_config_read(struct pci_bus *bus, unsigned int devfn,
int where_a = where & ~3;
addr = bus->ops->map_bus(bus, devfn, 0xc);
- if (!addr) {
- *val = ~0;
+ if (!addr)
return PCIBIOS_DEVICE_NOT_FOUND;
- }
v = readl(addr);
@@ -146,10 +139,8 @@ static int thunder_ecam_config_read(struct pci_bus *bus, unsigned int devfn,
cfg_type = (v >> 16) & 0x7f;
addr = bus->ops->map_bus(bus, devfn, 8);
- if (!addr) {
- *val = ~0;
+ if (!addr)
return PCIBIOS_DEVICE_NOT_FOUND;
- }
class_rev = readl(addr);
if (class_rev == 0xffffffff)
@@ -176,10 +167,8 @@ static int thunder_ecam_config_read(struct pci_bus *bus, unsigned int devfn,
}
addr = bus->ops->map_bus(bus, devfn, 0);
- if (!addr) {
- *val = ~0;
+ if (!addr)
return PCIBIOS_DEVICE_NOT_FOUND;
- }
vendor_device = readl(addr);
if (vendor_device == 0xffffffff)
@@ -196,10 +185,9 @@ static int thunder_ecam_config_read(struct pci_bus *bus, unsigned int devfn,
bool is_tns = (vendor_device == 0xa01f177d);
addr = bus->ops->map_bus(bus, devfn, 0x70);
- if (!addr) {
- *val = ~0;
+ if (!addr)
return PCIBIOS_DEVICE_NOT_FOUND;
- }
+
/* E_CAP */
v = readl(addr);
has_msix = (v & 0xff00) != 0;
@@ -211,10 +199,9 @@ static int thunder_ecam_config_read(struct pci_bus *bus, unsigned int devfn,
}
if (where_a == 0xb0) {
addr = bus->ops->map_bus(bus, devfn, where_a);
- if (!addr) {
- *val = ~0;
+ if (!addr)
return PCIBIOS_DEVICE_NOT_FOUND;
- }
+
v = readl(addr);
if (v & 0xff00)
pr_err("Bad MSIX cap header: %08x\n", v);
@@ -268,10 +255,9 @@ static int thunder_ecam_config_read(struct pci_bus *bus, unsigned int devfn,
if (where_a == 0x70) {
addr = bus->ops->map_bus(bus, devfn, where_a);
- if (!addr) {
- *val = ~0;
+ if (!addr)
return PCIBIOS_DEVICE_NOT_FOUND;
- }
+
v = readl(addr);
if (v & 0xff00)
pr_err("Bad PCIe cap header: %08x\n", v);
diff --git a/drivers/pci/controller/pci-thunder-pem.c b/drivers/pci/controller/pci-thunder-pem.c
index 0660b9da204f..06a9855cb431 100644
--- a/drivers/pci/controller/pci-thunder-pem.c
+++ b/drivers/pci/controller/pci-thunder-pem.c
@@ -41,10 +41,8 @@ static int thunder_pem_bridge_read(struct pci_bus *bus, unsigned int devfn,
struct pci_config_window *cfg = bus->sysdata;
struct thunder_pem_pci *pem_pci = (struct thunder_pem_pci *)cfg->priv;
- if (devfn != 0 || where >= 2048) {
- *val = ~0;
+ if (devfn != 0 || where >= 2048)
return PCIBIOS_DEVICE_NOT_FOUND;
- }
/*
* 32-bit accesses only. Write the address to the low order
diff --git a/drivers/pci/controller/pci-xgene-msi.c b/drivers/pci/controller/pci-xgene-msi.c
index c50ff279903c..bfa259781b69 100644
--- a/drivers/pci/controller/pci-xgene-msi.c
+++ b/drivers/pci/controller/pci-xgene-msi.c
@@ -269,9 +269,7 @@ static void xgene_free_domains(struct xgene_msi *msi)
static int xgene_msi_init_allocator(struct xgene_msi *xgene_msi)
{
- int size = BITS_TO_LONGS(NR_MSI_VEC) * sizeof(long);
-
- xgene_msi->bitmap = kzalloc(size, GFP_KERNEL);
+ xgene_msi->bitmap = bitmap_zalloc(NR_MSI_VEC, GFP_KERNEL);
if (!xgene_msi->bitmap)
return -ENOMEM;
@@ -360,7 +358,7 @@ static int xgene_msi_remove(struct platform_device *pdev)
kfree(msi->msi_groups);
- kfree(msi->bitmap);
+ bitmap_free(msi->bitmap);
msi->bitmap = NULL;
xgene_free_domains(msi);
diff --git a/drivers/pci/controller/pci-xgene.c b/drivers/pci/controller/pci-xgene.c
index 56d0d50338c8..0d5acbfc7143 100644
--- a/drivers/pci/controller/pci-xgene.c
+++ b/drivers/pci/controller/pci-xgene.c
@@ -60,7 +60,7 @@
#define XGENE_PCIE_IP_VER_2 2
#if defined(CONFIG_PCI_XGENE) || (defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS))
-struct xgene_pcie_port {
+struct xgene_pcie {
struct device_node *node;
struct device *dev;
struct clk *clk;
@@ -71,12 +71,12 @@ struct xgene_pcie_port {
u32 version;
};
-static u32 xgene_pcie_readl(struct xgene_pcie_port *port, u32 reg)
+static u32 xgene_pcie_readl(struct xgene_pcie *port, u32 reg)
{
return readl(port->csr_base + reg);
}
-static void xgene_pcie_writel(struct xgene_pcie_port *port, u32 reg, u32 val)
+static void xgene_pcie_writel(struct xgene_pcie *port, u32 reg, u32 val)
{
writel(val, port->csr_base + reg);
}
@@ -86,15 +86,15 @@ static inline u32 pcie_bar_low_val(u32 addr, u32 flags)
return (addr & PCI_BASE_ADDRESS_MEM_MASK) | flags;
}
-static inline struct xgene_pcie_port *pcie_bus_to_port(struct pci_bus *bus)
+static inline struct xgene_pcie *pcie_bus_to_port(struct pci_bus *bus)
{
struct pci_config_window *cfg;
if (acpi_disabled)
- return (struct xgene_pcie_port *)(bus->sysdata);
+ return (struct xgene_pcie *)(bus->sysdata);
cfg = bus->sysdata;
- return (struct xgene_pcie_port *)(cfg->priv);
+ return (struct xgene_pcie *)(cfg->priv);
}
/*
@@ -103,7 +103,7 @@ static inline struct xgene_pcie_port *pcie_bus_to_port(struct pci_bus *bus)
*/
static void __iomem *xgene_pcie_get_cfg_base(struct pci_bus *bus)
{
- struct xgene_pcie_port *port = pcie_bus_to_port(bus);
+ struct xgene_pcie *port = pcie_bus_to_port(bus);
if (bus->number >= (bus->primary + 1))
return port->cfg_base + AXI_EP_CFG_ACCESS;
@@ -117,7 +117,7 @@ static void __iomem *xgene_pcie_get_cfg_base(struct pci_bus *bus)
*/
static void xgene_pcie_set_rtdid_reg(struct pci_bus *bus, uint devfn)
{
- struct xgene_pcie_port *port = pcie_bus_to_port(bus);
+ struct xgene_pcie *port = pcie_bus_to_port(bus);
unsigned int b, d, f;
u32 rtdid_val = 0;
@@ -164,18 +164,18 @@ static void __iomem *xgene_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
static int xgene_pcie_config_read32(struct pci_bus *bus, unsigned int devfn,
int where, int size, u32 *val)
{
- struct xgene_pcie_port *port = pcie_bus_to_port(bus);
+ struct xgene_pcie *port = pcie_bus_to_port(bus);
if (pci_generic_config_read32(bus, devfn, where & ~0x3, 4, val) !=
PCIBIOS_SUCCESSFUL)
return PCIBIOS_DEVICE_NOT_FOUND;
/*
- * The v1 controller has a bug in its Configuration Request
- * Retry Status (CRS) logic: when CRS Software Visibility is
- * enabled and we read the Vendor and Device ID of a non-existent
- * device, the controller fabricates return data of 0xFFFF0001
- * ("device exists but is not ready") instead of 0xFFFFFFFF
+ * The v1 controller has a bug in its Configuration Request Retry
+ * Status (CRS) logic: when CRS Software Visibility is enabled and
+ * we read the Vendor and Device ID of a non-existent device, the
+ * controller fabricates return data of 0xFFFF0001 ("device exists
+ * but is not ready") instead of 0xFFFFFFFF (PCI_ERROR_RESPONSE)
* ("device does not exist"). This causes the PCI core to retry
* the read until it times out. Avoid this by not claiming to
* support CRS SV.
@@ -227,7 +227,7 @@ static int xgene_pcie_ecam_init(struct pci_config_window *cfg, u32 ipversion)
{
struct device *dev = cfg->parent;
struct acpi_device *adev = to_acpi_device(dev);
- struct xgene_pcie_port *port;
+ struct xgene_pcie *port;
struct resource csr;
int ret;
@@ -281,7 +281,7 @@ const struct pci_ecam_ops xgene_v2_pcie_ecam_ops = {
#endif
#if defined(CONFIG_PCI_XGENE)
-static u64 xgene_pcie_set_ib_mask(struct xgene_pcie_port *port, u32 addr,
+static u64 xgene_pcie_set_ib_mask(struct xgene_pcie *port, u32 addr,
u32 flags, u64 size)
{
u64 mask = (~(size - 1) & PCI_BASE_ADDRESS_MEM_MASK) | flags;
@@ -307,7 +307,7 @@ static u64 xgene_pcie_set_ib_mask(struct xgene_pcie_port *port, u32 addr,
return mask;
}
-static void xgene_pcie_linkup(struct xgene_pcie_port *port,
+static void xgene_pcie_linkup(struct xgene_pcie *port,
u32 *lanes, u32 *speed)
{
u32 val32;
@@ -322,7 +322,7 @@ static void xgene_pcie_linkup(struct xgene_pcie_port *port,
}
}
-static int xgene_pcie_init_port(struct xgene_pcie_port *port)
+static int xgene_pcie_init_port(struct xgene_pcie *port)
{
struct device *dev = port->dev;
int rc;
@@ -342,7 +342,7 @@ static int xgene_pcie_init_port(struct xgene_pcie_port *port)
return 0;
}
-static int xgene_pcie_map_reg(struct xgene_pcie_port *port,
+static int xgene_pcie_map_reg(struct xgene_pcie *port,
struct platform_device *pdev)
{
struct device *dev = port->dev;
@@ -362,7 +362,7 @@ static int xgene_pcie_map_reg(struct xgene_pcie_port *port,
return 0;
}
-static void xgene_pcie_setup_ob_reg(struct xgene_pcie_port *port,
+static void xgene_pcie_setup_ob_reg(struct xgene_pcie *port,
struct resource *res, u32 offset,
u64 cpu_addr, u64 pci_addr)
{
@@ -394,7 +394,7 @@ static void xgene_pcie_setup_ob_reg(struct xgene_pcie_port *port,
xgene_pcie_writel(port, offset + 0x14, upper_32_bits(pci_addr));
}
-static void xgene_pcie_setup_cfg_reg(struct xgene_pcie_port *port)
+static void xgene_pcie_setup_cfg_reg(struct xgene_pcie *port)
{
u64 addr = port->cfg_addr;
@@ -403,7 +403,7 @@ static void xgene_pcie_setup_cfg_reg(struct xgene_pcie_port *port)
xgene_pcie_writel(port, CFGCTL, EN_REG);
}
-static int xgene_pcie_map_ranges(struct xgene_pcie_port *port)
+static int xgene_pcie_map_ranges(struct xgene_pcie *port)
{
struct pci_host_bridge *bridge = pci_host_bridge_from_priv(port);
struct resource_entry *window;
@@ -444,7 +444,7 @@ static int xgene_pcie_map_ranges(struct xgene_pcie_port *port)
return 0;
}
-static void xgene_pcie_setup_pims(struct xgene_pcie_port *port, u32 pim_reg,
+static void xgene_pcie_setup_pims(struct xgene_pcie *port, u32 pim_reg,
u64 pim, u64 size)
{
xgene_pcie_writel(port, pim_reg, lower_32_bits(pim));
@@ -465,7 +465,7 @@ static int xgene_pcie_select_ib_reg(u8 *ib_reg_mask, u64 size)
return 1;
}
- if ((size > SZ_1K) && (size < SZ_1T) && !(*ib_reg_mask & (1 << 0))) {
+ if ((size > SZ_1K) && (size < SZ_4G) && !(*ib_reg_mask & (1 << 0))) {
*ib_reg_mask |= (1 << 0);
return 0;
}
@@ -478,7 +478,7 @@ static int xgene_pcie_select_ib_reg(u8 *ib_reg_mask, u64 size)
return -EINVAL;
}
-static void xgene_pcie_setup_ib_reg(struct xgene_pcie_port *port,
+static void xgene_pcie_setup_ib_reg(struct xgene_pcie *port,
struct resource_entry *entry,
u8 *ib_reg_mask)
{
@@ -529,7 +529,7 @@ static void xgene_pcie_setup_ib_reg(struct xgene_pcie_port *port,
xgene_pcie_setup_pims(port, pim_reg, pci_addr, ~(size - 1));
}
-static int xgene_pcie_parse_map_dma_ranges(struct xgene_pcie_port *port)
+static int xgene_pcie_parse_map_dma_ranges(struct xgene_pcie *port)
{
struct pci_host_bridge *bridge = pci_host_bridge_from_priv(port);
struct resource_entry *entry;
@@ -542,7 +542,7 @@ static int xgene_pcie_parse_map_dma_ranges(struct xgene_pcie_port *port)
}
/* clear BAR configuration which was done by firmware */
-static void xgene_pcie_clear_config(struct xgene_pcie_port *port)
+static void xgene_pcie_clear_config(struct xgene_pcie *port)
{
int i;
@@ -550,7 +550,7 @@ static void xgene_pcie_clear_config(struct xgene_pcie_port *port)
xgene_pcie_writel(port, i, 0);
}
-static int xgene_pcie_setup(struct xgene_pcie_port *port)
+static int xgene_pcie_setup(struct xgene_pcie *port)
{
struct device *dev = port->dev;
u32 val, lanes = 0, speed = 0;
@@ -588,7 +588,7 @@ static int xgene_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *dn = dev->of_node;
- struct xgene_pcie_port *port;
+ struct xgene_pcie *port;
struct pci_host_bridge *bridge;
int ret;
diff --git a/drivers/pci/controller/pcie-altera.c b/drivers/pci/controller/pcie-altera.c
index 2513e9363236..18b2361d6462 100644
--- a/drivers/pci/controller/pcie-altera.c
+++ b/drivers/pci/controller/pcie-altera.c
@@ -510,10 +510,8 @@ static int altera_pcie_cfg_read(struct pci_bus *bus, unsigned int devfn,
if (altera_pcie_hide_rc_bar(bus, devfn, where))
return PCIBIOS_BAD_REGISTER_NUMBER;
- if (!altera_pcie_valid_device(pcie, bus, PCI_SLOT(devfn))) {
- *value = 0xffffffff;
+ if (!altera_pcie_valid_device(pcie, bus, PCI_SLOT(devfn)))
return PCIBIOS_DEVICE_NOT_FOUND;
- }
return _altera_pcie_cfg_read(pcie, bus->number, devfn, where, size,
value);
@@ -767,7 +765,7 @@ static int altera_pcie_probe(struct platform_device *pdev)
struct altera_pcie *pcie;
struct pci_host_bridge *bridge;
int ret;
- const struct of_device_id *match;
+ const struct altera_pcie_data *data;
bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
if (!bridge)
@@ -777,11 +775,11 @@ static int altera_pcie_probe(struct platform_device *pdev)
pcie->pdev = pdev;
platform_set_drvdata(pdev, pcie);
- match = of_match_device(altera_pcie_of_match, &pdev->dev);
- if (!match)
+ data = of_device_get_match_data(&pdev->dev);
+ if (!data)
return -ENODEV;
- pcie->pcie_data = match->data;
+ pcie->pcie_data = data;
ret = altera_pcie_parse_dt(pcie);
if (ret) {
diff --git a/drivers/pci/controller/pcie-apple.c b/drivers/pci/controller/pcie-apple.c
index b090924b41fe..854d95163112 100644
--- a/drivers/pci/controller/pcie-apple.c
+++ b/drivers/pci/controller/pcie-apple.c
@@ -42,8 +42,9 @@
#define CORE_FABRIC_STAT_MASK 0x001F001F
#define CORE_LANE_CFG(port) (0x84000 + 0x4000 * (port))
#define CORE_LANE_CFG_REFCLK0REQ BIT(0)
-#define CORE_LANE_CFG_REFCLK1 BIT(1)
+#define CORE_LANE_CFG_REFCLK1REQ BIT(1)
#define CORE_LANE_CFG_REFCLK0ACK BIT(2)
+#define CORE_LANE_CFG_REFCLK1ACK BIT(3)
#define CORE_LANE_CFG_REFCLKEN (BIT(9) | BIT(10))
#define CORE_LANE_CTL(port) (0x84004 + 0x4000 * (port))
#define CORE_LANE_CTL_CFGACC BIT(15)
@@ -482,9 +483,9 @@ static int apple_pcie_setup_refclk(struct apple_pcie *pcie,
if (res < 0)
return res;
- rmw_set(CORE_LANE_CFG_REFCLK1, pcie->base + CORE_LANE_CFG(port->idx));
+ rmw_set(CORE_LANE_CFG_REFCLK1REQ, pcie->base + CORE_LANE_CFG(port->idx));
res = readl_relaxed_poll_timeout(pcie->base + CORE_LANE_CFG(port->idx),
- stat, stat & CORE_LANE_CFG_REFCLK1,
+ stat, stat & CORE_LANE_CFG_REFCLK1ACK,
100, 50000);
if (res < 0)
@@ -563,6 +564,9 @@ static int apple_pcie_setup_port(struct apple_pcie *pcie,
return ret;
}
+ rmw_clear(PORT_REFCLK_CGDIS, port->base + PORT_REFCLK);
+ rmw_clear(PORT_APPCLK_CGDIS, port->base + PORT_APPCLK);
+
ret = apple_pcie_port_setup_irq(port);
if (ret)
return ret;
diff --git a/drivers/pci/controller/pcie-brcmstb.c b/drivers/pci/controller/pcie-brcmstb.c
index 1fc7bd49a7ad..375c0c40bbf8 100644
--- a/drivers/pci/controller/pcie-brcmstb.c
+++ b/drivers/pci/controller/pcie-brcmstb.c
@@ -24,6 +24,7 @@
#include <linux/pci.h>
#include <linux/pci-ecam.h>
#include <linux/printk.h>
+#include <linux/regulator/consumer.h>
#include <linux/reset.h>
#include <linux/sizes.h>
#include <linux/slab.h>
@@ -118,6 +119,7 @@
#define PCIE_MISC_HARD_PCIE_HARD_DEBUG 0x4204
#define PCIE_MISC_HARD_PCIE_HARD_DEBUG_CLKREQ_DEBUG_ENABLE_MASK 0x2
#define PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK 0x08000000
+#define PCIE_BMIPS_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK 0x00800000
#define PCIE_INTR2_CPU_BASE 0x4300
@@ -144,6 +146,9 @@
#define BRCM_INT_PCI_MSI_NR 32
#define BRCM_INT_PCI_MSI_LEGACY_NR 8
#define BRCM_INT_PCI_MSI_SHIFT 0
+#define BRCM_INT_PCI_MSI_MASK GENMASK(BRCM_INT_PCI_MSI_NR - 1, 0)
+#define BRCM_INT_PCI_MSI_LEGACY_MASK GENMASK(31, \
+ 32 - BRCM_INT_PCI_MSI_LEGACY_NR)
/* MSI target addresses */
#define BRCM_MSI_TARGET_ADDR_LT_4GB 0x0fffffffcULL
@@ -191,6 +196,8 @@ static inline void brcm_pcie_bridge_sw_init_set_generic(struct brcm_pcie *pcie,
static inline void brcm_pcie_perst_set_4908(struct brcm_pcie *pcie, u32 val);
static inline void brcm_pcie_perst_set_7278(struct brcm_pcie *pcie, u32 val);
static inline void brcm_pcie_perst_set_generic(struct brcm_pcie *pcie, u32 val);
+static int brcm_pcie_linkup(struct brcm_pcie *pcie);
+static int brcm_pcie_add_bus(struct pci_bus *bus);
enum {
RGR1_SW_INIT_1,
@@ -205,6 +212,8 @@ enum {
enum pcie_type {
GENERIC,
+ BCM7425,
+ BCM7435,
BCM4908,
BCM7278,
BCM2711,
@@ -223,6 +232,12 @@ static const int pcie_offsets[] = {
[EXT_CFG_DATA] = 0x9004,
};
+static const int pcie_offsets_bmips_7425[] = {
+ [RGR1_SW_INIT_1] = 0x8010,
+ [EXT_CFG_INDEX] = 0x8300,
+ [EXT_CFG_DATA] = 0x8304,
+};
+
static const struct pcie_cfg_data generic_cfg = {
.offsets = pcie_offsets,
.type = GENERIC,
@@ -230,6 +245,20 @@ static const struct pcie_cfg_data generic_cfg = {
.bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
};
+static const struct pcie_cfg_data bcm7425_cfg = {
+ .offsets = pcie_offsets_bmips_7425,
+ .type = BCM7425,
+ .perst_set = brcm_pcie_perst_set_generic,
+ .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
+};
+
+static const struct pcie_cfg_data bcm7435_cfg = {
+ .offsets = pcie_offsets,
+ .type = BCM7435,
+ .perst_set = brcm_pcie_perst_set_generic,
+ .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
+};
+
static const struct pcie_cfg_data bcm4908_cfg = {
.offsets = pcie_offsets,
.type = BCM4908,
@@ -257,6 +286,14 @@ static const struct pcie_cfg_data bcm2711_cfg = {
.bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
};
+struct subdev_regulators {
+ unsigned int num_supplies;
+ struct regulator_bulk_data supplies[];
+};
+
+static int pci_subdev_regulators_add_bus(struct pci_bus *bus);
+static void pci_subdev_regulators_remove_bus(struct pci_bus *bus);
+
struct brcm_msi {
struct device *dev;
void __iomem *base;
@@ -266,8 +303,7 @@ struct brcm_msi {
struct mutex lock; /* guards the alloc/free operations */
u64 target_addr;
int irq;
- /* used indicates which MSI interrupts have been alloc'd */
- unsigned long used;
+ DECLARE_BITMAP(used, BRCM_INT_PCI_MSI_NR);
bool legacy;
/* Some chips have MSIs in bits [31..24] of a shared register. */
int legacy_shift;
@@ -295,8 +331,16 @@ struct brcm_pcie {
u32 hw_rev;
void (*perst_set)(struct brcm_pcie *pcie, u32 val);
void (*bridge_sw_init_set)(struct brcm_pcie *pcie, u32 val);
+ bool refusal_mode;
+ struct subdev_regulators *sr;
+ bool ep_wakeup_capable;
};
+static inline bool is_bmips(const struct brcm_pcie *pcie)
+{
+ return pcie->type == BCM7435 || pcie->type == BCM7425;
+}
+
/*
* This is to convert the size of the inbound "BAR" region to the
* non-linear values of PCIE_X_MISC_RC_BAR[123]_CONFIG_LO.SIZE
@@ -406,6 +450,99 @@ static int brcm_pcie_set_ssc(struct brcm_pcie *pcie)
return ssc && pll ? 0 : -EIO;
}
+static void *alloc_subdev_regulators(struct device *dev)
+{
+ static const char * const supplies[] = {
+ "vpcie3v3",
+ "vpcie3v3aux",
+ "vpcie12v",
+ };
+ const size_t size = sizeof(struct subdev_regulators)
+ + sizeof(struct regulator_bulk_data) * ARRAY_SIZE(supplies);
+ struct subdev_regulators *sr;
+ int i;
+
+ sr = devm_kzalloc(dev, size, GFP_KERNEL);
+ if (sr) {
+ sr->num_supplies = ARRAY_SIZE(supplies);
+ for (i = 0; i < ARRAY_SIZE(supplies); i++)
+ sr->supplies[i].supply = supplies[i];
+ }
+
+ return sr;
+}
+
+static int pci_subdev_regulators_add_bus(struct pci_bus *bus)
+{
+ struct device *dev = &bus->dev;
+ struct subdev_regulators *sr;
+ int ret;
+
+ if (!dev->of_node || !bus->parent || !pci_is_root_bus(bus->parent))
+ return 0;
+
+ if (dev->driver_data)
+ dev_err(dev, "dev.driver_data unexpectedly non-NULL\n");
+
+ sr = alloc_subdev_regulators(dev);
+ if (!sr)
+ return -ENOMEM;
+
+ dev->driver_data = sr;
+ ret = regulator_bulk_get(dev, sr->num_supplies, sr->supplies);
+ if (ret)
+ return ret;
+
+ ret = regulator_bulk_enable(sr->num_supplies, sr->supplies);
+ if (ret) {
+ dev_err(dev, "failed to enable regulators for downstream device\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int brcm_pcie_add_bus(struct pci_bus *bus)
+{
+ struct device *dev = &bus->dev;
+ struct brcm_pcie *pcie = (struct brcm_pcie *) bus->sysdata;
+ int ret;
+
+ if (!dev->of_node || !bus->parent || !pci_is_root_bus(bus->parent))
+ return 0;
+
+ ret = pci_subdev_regulators_add_bus(bus);
+ if (ret)
+ return ret;
+
+ /* Grab the regulators for suspend/resume */
+ pcie->sr = bus->dev.driver_data;
+
+ /*
+ * If we have failed linkup there is no point to return an error as
+ * currently it will cause a WARNING() from pci_alloc_child_bus().
+ * We return 0 and turn on the "refusal_mode" so that any further
+ * accesses to the pci_dev just get 0xffffffff
+ */
+ if (brcm_pcie_linkup(pcie) != 0)
+ pcie->refusal_mode = true;
+
+ return 0;
+}
+
+static void pci_subdev_regulators_remove_bus(struct pci_bus *bus)
+{
+ struct device *dev = &bus->dev;
+ struct subdev_regulators *sr = dev->driver_data;
+
+ if (!sr || !bus->parent || !pci_is_root_bus(bus->parent))
+ return;
+
+ if (regulator_bulk_disable(sr->num_supplies, sr->supplies))
+ dev_err(dev, "failed to disable regulators for downstream device\n");
+ dev->driver_data = NULL;
+}
+
/* Limits operation to a specific generation (1, 2, or 3) */
static void brcm_pcie_set_gen(struct brcm_pcie *pcie, int gen)
{
@@ -443,6 +580,9 @@ static void brcm_pcie_set_outbound_win(struct brcm_pcie *pcie,
PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT_LIMIT_MASK);
writel(tmp, pcie->base + PCIE_MEM_WIN0_BASE_LIMIT(win));
+ if (is_bmips(pcie))
+ return;
+
/* Write the cpu & limit addr upper bits */
high_addr_shift =
HWEIGHT32(PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT_BASE_MASK);
@@ -534,7 +674,7 @@ static int brcm_msi_alloc(struct brcm_msi *msi)
int hwirq;
mutex_lock(&msi->lock);
- hwirq = bitmap_find_free_region(&msi->used, msi->nr, 0);
+ hwirq = bitmap_find_free_region(msi->used, msi->nr, 0);
mutex_unlock(&msi->lock);
return hwirq;
@@ -543,7 +683,7 @@ static int brcm_msi_alloc(struct brcm_msi *msi)
static void brcm_msi_free(struct brcm_msi *msi, unsigned long hwirq)
{
mutex_lock(&msi->lock);
- bitmap_release_region(&msi->used, hwirq, 0);
+ bitmap_release_region(msi->used, hwirq, 0);
mutex_unlock(&msi->lock);
}
@@ -619,7 +759,8 @@ static void brcm_msi_remove(struct brcm_pcie *pcie)
static void brcm_msi_set_regs(struct brcm_msi *msi)
{
- u32 val = __GENMASK(31, msi->legacy_shift);
+ u32 val = msi->legacy ? BRCM_INT_PCI_MSI_LEGACY_MASK :
+ BRCM_INT_PCI_MSI_MASK;
writel(val, msi->intr_base + MSI_INT_MASK_CLR);
writel(val, msi->intr_base + MSI_INT_CLR);
@@ -661,6 +802,12 @@ static int brcm_pcie_enable_msi(struct brcm_pcie *pcie)
msi->irq = irq;
msi->legacy = pcie->hw_rev < BRCM_PCIE_HW_REV_33;
+ /*
+ * Sanity check to make sure that the 'used' bitmap in struct brcm_msi
+ * is large enough.
+ */
+ BUILD_BUG_ON(BRCM_INT_PCI_MSI_LEGACY_NR > BRCM_INT_PCI_MSI_NR);
+
if (msi->legacy) {
msi->intr_base = msi->base + PCIE_INTR2_CPU_BASE;
msi->nr = BRCM_INT_PCI_MSI_LEGACY_NR;
@@ -711,6 +858,18 @@ static void __iomem *brcm_pcie_map_conf(struct pci_bus *bus, unsigned int devfn,
/* Accesses to the RC go right to the RC registers if slot==0 */
if (pci_is_root_bus(bus))
return PCI_SLOT(devfn) ? NULL : base + where;
+ if (pcie->refusal_mode) {
+ /*
+ * At this point we do not have link. There will be a CPU
+ * abort -- a quirk with this controller --if Linux tries
+ * to read any config-space registers besides those
+ * targeting the host bridge. To prevent this we hijack
+ * the address to point to a safe access that will return
+ * 0xffffffff.
+ */
+ writel(0xffffffff, base + PCIE_MISC_RC_BAR2_CONFIG_HI);
+ return base + PCIE_MISC_RC_BAR2_CONFIG_HI + (where & 0x3);
+ }
/* For devices, write to the config space index register */
idx = PCIE_ECAM_OFFSET(bus->number, devfn, 0);
@@ -718,10 +877,35 @@ static void __iomem *brcm_pcie_map_conf(struct pci_bus *bus, unsigned int devfn,
return base + PCIE_EXT_CFG_DATA + where;
}
+static void __iomem *brcm_pcie_map_conf32(struct pci_bus *bus, unsigned int devfn,
+ int where)
+{
+ struct brcm_pcie *pcie = bus->sysdata;
+ void __iomem *base = pcie->base;
+ int idx;
+
+ /* Accesses to the RC go right to the RC registers if slot==0 */
+ if (pci_is_root_bus(bus))
+ return PCI_SLOT(devfn) ? NULL : base + (where & ~0x3);
+
+ /* For devices, write to the config space index register */
+ idx = PCIE_ECAM_OFFSET(bus->number, devfn, (where & ~3));
+ writel(idx, base + IDX_ADDR(pcie));
+ return base + DATA_ADDR(pcie);
+}
+
static struct pci_ops brcm_pcie_ops = {
.map_bus = brcm_pcie_map_conf,
.read = pci_generic_config_read,
.write = pci_generic_config_write,
+ .add_bus = brcm_pcie_add_bus,
+ .remove_bus = pci_subdev_regulators_remove_bus,
+};
+
+static struct pci_ops brcm_pcie_ops32 = {
+ .map_bus = brcm_pcie_map_conf32,
+ .read = pci_generic_config_read32,
+ .write = pci_generic_config_write32,
};
static inline void brcm_pcie_bridge_sw_init_set_generic(struct brcm_pcie *pcie, u32 val)
@@ -863,16 +1047,9 @@ static inline int brcm_pcie_get_rc_bar2_size_and_offset(struct brcm_pcie *pcie,
static int brcm_pcie_setup(struct brcm_pcie *pcie)
{
- struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
u64 rc_bar2_offset, rc_bar2_size;
void __iomem *base = pcie->base;
- struct device *dev = pcie->dev;
- struct resource_entry *entry;
- bool ssc_good = false;
- struct resource *res;
- int num_out_wins = 0;
- u16 nlw, cls, lnksta;
- int i, ret, memc;
+ int ret, memc;
u32 tmp, burst, aspm_support;
/* Reset the bridge */
@@ -883,7 +1060,10 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie)
pcie->bridge_sw_init_set(pcie, 0);
tmp = readl(base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
- tmp &= ~PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK;
+ if (is_bmips(pcie))
+ tmp &= ~PCIE_BMIPS_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK;
+ else
+ tmp &= ~PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK;
writel(tmp, base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
/* Wait for SerDes to be stable */
usleep_range(100, 200);
@@ -893,8 +1073,10 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie)
* is encoded as 0=128, 1=256, 2=512, 3=Rsvd, for BCM7278 it
* is encoded as 0=Rsvd, 1=128, 2=256, 3=512.
*/
- if (pcie->type == BCM2711)
- burst = 0x0; /* 128B */
+ if (is_bmips(pcie))
+ burst = 0x1; /* 256 bytes */
+ else if (pcie->type == BCM2711)
+ burst = 0x0; /* 128 bytes */
else if (pcie->type == BCM7278)
burst = 0x3; /* 512 bytes */
else
@@ -957,6 +1139,40 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie)
if (pcie->gen)
brcm_pcie_set_gen(pcie, pcie->gen);
+ /* Don't advertise L0s capability if 'aspm-no-l0s' */
+ aspm_support = PCIE_LINK_STATE_L1;
+ if (!of_property_read_bool(pcie->np, "aspm-no-l0s"))
+ aspm_support |= PCIE_LINK_STATE_L0S;
+ tmp = readl(base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY);
+ u32p_replace_bits(&tmp, aspm_support,
+ PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_ASPM_SUPPORT_MASK);
+ writel(tmp, base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY);
+
+ /*
+ * For config space accesses on the RC, show the right class for
+ * a PCIe-PCIe bridge (the default setting is to be EP mode).
+ */
+ tmp = readl(base + PCIE_RC_CFG_PRIV1_ID_VAL3);
+ u32p_replace_bits(&tmp, 0x060400,
+ PCIE_RC_CFG_PRIV1_ID_VAL3_CLASS_CODE_MASK);
+ writel(tmp, base + PCIE_RC_CFG_PRIV1_ID_VAL3);
+
+ return 0;
+}
+
+static int brcm_pcie_linkup(struct brcm_pcie *pcie)
+{
+ struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
+ struct device *dev = pcie->dev;
+ void __iomem *base = pcie->base;
+ struct resource_entry *entry;
+ struct resource *res;
+ int num_out_wins = 0;
+ u16 nlw, cls, lnksta;
+ bool ssc_good = false;
+ u32 tmp;
+ int ret, i;
+
/* Unassert the fundamental reset */
pcie->perst_set(pcie, 0);
@@ -988,30 +1204,25 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie)
return -EINVAL;
}
+ if (is_bmips(pcie)) {
+ u64 start = res->start;
+ unsigned int j, nwins = resource_size(res) / SZ_128M;
+
+ /* bmips PCIe outbound windows have a 128MB max size */
+ if (nwins > BRCM_NUM_PCIE_OUT_WINS)
+ nwins = BRCM_NUM_PCIE_OUT_WINS;
+ for (j = 0; j < nwins; j++, start += SZ_128M)
+ brcm_pcie_set_outbound_win(pcie, j, start,
+ start - entry->offset,
+ SZ_128M);
+ break;
+ }
brcm_pcie_set_outbound_win(pcie, num_out_wins, res->start,
res->start - entry->offset,
resource_size(res));
num_out_wins++;
}
- /* Don't advertise L0s capability if 'aspm-no-l0s' */
- aspm_support = PCIE_LINK_STATE_L1;
- if (!of_property_read_bool(pcie->np, "aspm-no-l0s"))
- aspm_support |= PCIE_LINK_STATE_L0S;
- tmp = readl(base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY);
- u32p_replace_bits(&tmp, aspm_support,
- PCIE_RC_CFG_PRIV1_LINK_CAPABILITY_ASPM_SUPPORT_MASK);
- writel(tmp, base + PCIE_RC_CFG_PRIV1_LINK_CAPABILITY);
-
- /*
- * For config space accesses on the RC, show the right class for
- * a PCIe-PCIe bridge (the default setting is to be EP mode).
- */
- tmp = readl(base + PCIE_RC_CFG_PRIV1_ID_VAL3);
- u32p_replace_bits(&tmp, 0x060400,
- PCIE_RC_CFG_PRIV1_ID_VAL3_CLASS_CODE_MASK);
- writel(tmp, base + PCIE_RC_CFG_PRIV1_ID_VAL3);
-
if (pcie->ssc) {
ret = brcm_pcie_set_ssc(pcie);
if (ret == 0)
@@ -1140,17 +1351,60 @@ static void brcm_pcie_turn_off(struct brcm_pcie *pcie)
pcie->bridge_sw_init_set(pcie, 1);
}
+static int pci_dev_may_wakeup(struct pci_dev *dev, void *data)
+{
+ bool *ret = data;
+
+ if (device_may_wakeup(&dev->dev)) {
+ *ret = true;
+ dev_info(&dev->dev, "disable cancelled for wake-up device\n");
+ }
+ return (int) *ret;
+}
+
static int brcm_pcie_suspend(struct device *dev)
{
struct brcm_pcie *pcie = dev_get_drvdata(dev);
+ struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
int ret;
brcm_pcie_turn_off(pcie);
- ret = brcm_phy_stop(pcie);
- reset_control_rearm(pcie->rescal);
+ /*
+ * If brcm_phy_stop() returns an error, just dev_err(). If we
+ * return the error it will cause the suspend to fail and this is a
+ * forgivable offense that will probably be erased on resume.
+ */
+ if (brcm_phy_stop(pcie))
+ dev_err(dev, "Could not stop phy for suspend\n");
+
+ ret = reset_control_rearm(pcie->rescal);
+ if (ret) {
+ dev_err(dev, "Could not rearm rescal reset\n");
+ return ret;
+ }
+
+ if (pcie->sr) {
+ /*
+ * Now turn off the regulators, but if at least one
+ * downstream device is enabled as a wake-up source, do not
+ * turn off regulators.
+ */
+ pcie->ep_wakeup_capable = false;
+ pci_walk_bus(bridge->bus, pci_dev_may_wakeup,
+ &pcie->ep_wakeup_capable);
+ if (!pcie->ep_wakeup_capable) {
+ ret = regulator_bulk_disable(pcie->sr->num_supplies,
+ pcie->sr->supplies);
+ if (ret) {
+ dev_err(dev, "Could not turn off regulators\n");
+ reset_control_reset(pcie->rescal);
+ return ret;
+ }
+ }
+ }
clk_disable_unprepare(pcie->clk);
- return ret;
+ return 0;
}
static int brcm_pcie_resume(struct device *dev)
@@ -1161,11 +1415,32 @@ static int brcm_pcie_resume(struct device *dev)
int ret;
base = pcie->base;
- clk_prepare_enable(pcie->clk);
+ ret = clk_prepare_enable(pcie->clk);
+ if (ret)
+ return ret;
+
+ if (pcie->sr) {
+ if (pcie->ep_wakeup_capable) {
+ /*
+ * We are resuming from a suspend. In the suspend we
+ * did not disable the power supplies, so there is
+ * no need to enable them (and falsely increase their
+ * usage count).
+ */
+ pcie->ep_wakeup_capable = false;
+ } else {
+ ret = regulator_bulk_enable(pcie->sr->num_supplies,
+ pcie->sr->supplies);
+ if (ret) {
+ dev_err(dev, "Could not turn on regulators\n");
+ goto err_disable_clk;
+ }
+ }
+ }
ret = reset_control_reset(pcie->rescal);
if (ret)
- goto err_disable_clk;
+ goto err_regulator;
ret = brcm_phy_start(pcie);
if (ret)
@@ -1186,6 +1461,10 @@ static int brcm_pcie_resume(struct device *dev)
if (ret)
goto err_reset;
+ ret = brcm_pcie_linkup(pcie);
+ if (ret)
+ goto err_reset;
+
if (pcie->msi)
brcm_msi_set_regs(pcie->msi);
@@ -1193,6 +1472,9 @@ static int brcm_pcie_resume(struct device *dev)
err_reset:
reset_control_rearm(pcie->rescal);
+err_regulator:
+ if (pcie->sr)
+ regulator_bulk_disable(pcie->sr->num_supplies, pcie->sr->supplies);
err_disable_clk:
clk_disable_unprepare(pcie->clk);
return ret;
@@ -1202,8 +1484,10 @@ static void __brcm_pcie_remove(struct brcm_pcie *pcie)
{
brcm_msi_remove(pcie);
brcm_pcie_turn_off(pcie);
- brcm_phy_stop(pcie);
- reset_control_rearm(pcie->rescal);
+ if (brcm_phy_stop(pcie))
+ dev_err(pcie->dev, "Could not stop phy\n");
+ if (reset_control_rearm(pcie->rescal))
+ dev_err(pcie->dev, "Could not rearm rescal reset\n");
clk_disable_unprepare(pcie->clk);
}
@@ -1226,6 +1510,8 @@ static const struct of_device_id brcm_pcie_match[] = {
{ .compatible = "brcm,bcm7278-pcie", .data = &bcm7278_cfg },
{ .compatible = "brcm,bcm7216-pcie", .data = &bcm7278_cfg },
{ .compatible = "brcm,bcm7445-pcie", .data = &generic_cfg },
+ { .compatible = "brcm,bcm7435-pcie", .data = &bcm7435_cfg },
+ { .compatible = "brcm,bcm7425-pcie", .data = &bcm7425_cfg },
{},
};
@@ -1315,12 +1601,22 @@ static int brcm_pcie_probe(struct platform_device *pdev)
}
}
- bridge->ops = &brcm_pcie_ops;
+ bridge->ops = pcie->type == BCM7425 ? &brcm_pcie_ops32 : &brcm_pcie_ops;
bridge->sysdata = pcie;
platform_set_drvdata(pdev, pcie);
- return pci_host_probe(bridge);
+ ret = pci_host_probe(bridge);
+ if (!ret && !brcm_pcie_link_up(pcie))
+ ret = -ENODEV;
+
+ if (ret) {
+ brcm_pcie_remove(pdev);
+ return ret;
+ }
+
+ return 0;
+
fail:
__brcm_pcie_remove(pcie);
return ret;
@@ -1329,8 +1625,8 @@ fail:
MODULE_DEVICE_TABLE(of, brcm_pcie_match);
static const struct dev_pm_ops brcm_pcie_pm_ops = {
- .suspend = brcm_pcie_suspend,
- .resume = brcm_pcie_resume,
+ .suspend_noirq = brcm_pcie_suspend,
+ .resume_noirq = brcm_pcie_resume,
};
static struct platform_driver brcm_pcie_driver = {
diff --git a/drivers/pci/controller/pcie-iproc-bcma.c b/drivers/pci/controller/pcie-iproc-bcma.c
index f918c713afb0..54b6e6d5bc64 100644
--- a/drivers/pci/controller/pcie-iproc-bcma.c
+++ b/drivers/pci/controller/pcie-iproc-bcma.c
@@ -23,7 +23,7 @@ static void bcma_pcie2_fixup_class(struct pci_dev *dev)
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x8011, bcma_pcie2_fixup_class);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_BROADCOM, 0x8012, bcma_pcie2_fixup_class);
-static int iproc_pcie_bcma_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+static int iproc_bcma_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
struct iproc_pcie *pcie = dev->sysdata;
struct bcma_device *bdev = container_of(pcie->dev, struct bcma_device, dev);
@@ -31,7 +31,7 @@ static int iproc_pcie_bcma_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
return bcma_core_irq(bdev, 5);
}
-static int iproc_pcie_bcma_probe(struct bcma_device *bdev)
+static int iproc_bcma_pcie_probe(struct bcma_device *bdev)
{
struct device *dev = &bdev->dev;
struct iproc_pcie *pcie;
@@ -64,33 +64,33 @@ static int iproc_pcie_bcma_probe(struct bcma_device *bdev)
if (ret)
return ret;
- pcie->map_irq = iproc_pcie_bcma_map_irq;
+ pcie->map_irq = iproc_bcma_pcie_map_irq;
bcma_set_drvdata(bdev, pcie);
return iproc_pcie_setup(pcie, &bridge->windows);
}
-static void iproc_pcie_bcma_remove(struct bcma_device *bdev)
+static void iproc_bcma_pcie_remove(struct bcma_device *bdev)
{
struct iproc_pcie *pcie = bcma_get_drvdata(bdev);
iproc_pcie_remove(pcie);
}
-static const struct bcma_device_id iproc_pcie_bcma_table[] = {
+static const struct bcma_device_id iproc_bcma_pcie_table[] = {
BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_NS_PCIEG2, BCMA_ANY_REV, BCMA_ANY_CLASS),
{},
};
-MODULE_DEVICE_TABLE(bcma, iproc_pcie_bcma_table);
+MODULE_DEVICE_TABLE(bcma, iproc_bcma_pcie_table);
-static struct bcma_driver iproc_pcie_bcma_driver = {
+static struct bcma_driver iproc_bcma_pcie_driver = {
.name = KBUILD_MODNAME,
- .id_table = iproc_pcie_bcma_table,
- .probe = iproc_pcie_bcma_probe,
- .remove = iproc_pcie_bcma_remove,
+ .id_table = iproc_bcma_pcie_table,
+ .probe = iproc_bcma_pcie_probe,
+ .remove = iproc_bcma_pcie_remove,
};
-module_bcma_driver(iproc_pcie_bcma_driver);
+module_bcma_driver(iproc_bcma_pcie_driver);
MODULE_AUTHOR("Hauke Mehrtens");
MODULE_DESCRIPTION("Broadcom iProc PCIe BCMA driver");
diff --git a/drivers/pci/controller/pcie-iproc-platform.c b/drivers/pci/controller/pcie-iproc-platform.c
index b93e7bda101b..538115246c79 100644
--- a/drivers/pci/controller/pcie-iproc-platform.c
+++ b/drivers/pci/controller/pcie-iproc-platform.c
@@ -37,7 +37,7 @@ static const struct of_device_id iproc_pcie_of_match_table[] = {
};
MODULE_DEVICE_TABLE(of, iproc_pcie_of_match_table);
-static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
+static int iproc_pltfm_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct iproc_pcie *pcie;
@@ -115,30 +115,30 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
return 0;
}
-static int iproc_pcie_pltfm_remove(struct platform_device *pdev)
+static int iproc_pltfm_pcie_remove(struct platform_device *pdev)
{
struct iproc_pcie *pcie = platform_get_drvdata(pdev);
return iproc_pcie_remove(pcie);
}
-static void iproc_pcie_pltfm_shutdown(struct platform_device *pdev)
+static void iproc_pltfm_pcie_shutdown(struct platform_device *pdev)
{
struct iproc_pcie *pcie = platform_get_drvdata(pdev);
iproc_pcie_shutdown(pcie);
}
-static struct platform_driver iproc_pcie_pltfm_driver = {
+static struct platform_driver iproc_pltfm_pcie_driver = {
.driver = {
.name = "iproc-pcie",
.of_match_table = of_match_ptr(iproc_pcie_of_match_table),
},
- .probe = iproc_pcie_pltfm_probe,
- .remove = iproc_pcie_pltfm_remove,
- .shutdown = iproc_pcie_pltfm_shutdown,
+ .probe = iproc_pltfm_pcie_probe,
+ .remove = iproc_pltfm_pcie_remove,
+ .shutdown = iproc_pltfm_pcie_shutdown,
};
-module_platform_driver(iproc_pcie_pltfm_driver);
+module_platform_driver(iproc_pltfm_pcie_driver);
MODULE_AUTHOR("Ray Jui <rjui@broadcom.com>");
MODULE_DESCRIPTION("Broadcom iPROC PCIe platform driver");
diff --git a/drivers/pci/controller/pcie-iproc.c b/drivers/pci/controller/pcie-iproc.c
index 36b9d2c46cfa..b3e75bc61ff1 100644
--- a/drivers/pci/controller/pcie-iproc.c
+++ b/drivers/pci/controller/pcie-iproc.c
@@ -659,10 +659,8 @@ static int iproc_pci_raw_config_read32(struct iproc_pcie *pcie,
void __iomem *addr;
addr = iproc_pcie_map_cfg_bus(pcie, 0, devfn, where & ~0x3);
- if (!addr) {
- *val = ~0;
+ if (!addr)
return PCIBIOS_DEVICE_NOT_FOUND;
- }
*val = readl(addr);
diff --git a/drivers/pci/controller/pcie-mediatek-gen3.c b/drivers/pci/controller/pcie-mediatek-gen3.c
index 17c59b0d6978..7705d61fba4c 100644
--- a/drivers/pci/controller/pcie-mediatek-gen3.c
+++ b/drivers/pci/controller/pcie-mediatek-gen3.c
@@ -79,6 +79,9 @@
#define PCIE_ICMD_PM_REG 0x198
#define PCIE_TURN_OFF_LINK BIT(4)
+#define PCIE_MISC_CTRL_REG 0x348
+#define PCIE_DISABLE_DVFSRC_VLT_REQ BIT(1)
+
#define PCIE_TRANS_TABLE_BASE_REG 0x800
#define PCIE_ATR_SRC_ADDR_MSB_OFFSET 0x4
#define PCIE_ATR_TRSL_ADDR_LSB_OFFSET 0x8
@@ -110,7 +113,7 @@ struct mtk_msi_set {
};
/**
- * struct mtk_pcie_port - PCIe port information
+ * struct mtk_gen3_pcie - PCIe port information
* @dev: pointer to PCIe device
* @base: IO mapped register base
* @reg_base: physical register base
@@ -129,7 +132,7 @@ struct mtk_msi_set {
* @lock: lock protecting IRQ bit map
* @msi_irq_in_use: bit map for assigned MSI IRQ
*/
-struct mtk_pcie_port {
+struct mtk_gen3_pcie {
struct device *dev;
void __iomem *base;
phys_addr_t reg_base;
@@ -162,7 +165,7 @@ struct mtk_pcie_port {
static void mtk_pcie_config_tlp_header(struct pci_bus *bus, unsigned int devfn,
int where, int size)
{
- struct mtk_pcie_port *port = bus->sysdata;
+ struct mtk_gen3_pcie *pcie = bus->sysdata;
int bytes;
u32 val;
@@ -171,15 +174,15 @@ static void mtk_pcie_config_tlp_header(struct pci_bus *bus, unsigned int devfn,
val = PCIE_CFG_FORCE_BYTE_EN | PCIE_CFG_BYTE_EN(bytes) |
PCIE_CFG_HEADER(bus->number, devfn);
- writel_relaxed(val, port->base + PCIE_CFGNUM_REG);
+ writel_relaxed(val, pcie->base + PCIE_CFGNUM_REG);
}
static void __iomem *mtk_pcie_map_bus(struct pci_bus *bus, unsigned int devfn,
int where)
{
- struct mtk_pcie_port *port = bus->sysdata;
+ struct mtk_gen3_pcie *pcie = bus->sysdata;
- return port->base + PCIE_CFG_OFFSET_ADDR + where;
+ return pcie->base + PCIE_CFG_OFFSET_ADDR + where;
}
static int mtk_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
@@ -207,7 +210,7 @@ static struct pci_ops mtk_pcie_ops = {
.write = mtk_pcie_config_write,
};
-static int mtk_pcie_set_trans_table(struct mtk_pcie_port *port,
+static int mtk_pcie_set_trans_table(struct mtk_gen3_pcie *pcie,
resource_size_t cpu_addr,
resource_size_t pci_addr,
resource_size_t size,
@@ -217,12 +220,12 @@ static int mtk_pcie_set_trans_table(struct mtk_pcie_port *port,
u32 val;
if (num >= PCIE_MAX_TRANS_TABLES) {
- dev_err(port->dev, "not enough translate table for addr: %#llx, limited to [%d]\n",
+ dev_err(pcie->dev, "not enough translate table for addr: %#llx, limited to [%d]\n",
(unsigned long long)cpu_addr, PCIE_MAX_TRANS_TABLES);
return -ENODEV;
}
- table = port->base + PCIE_TRANS_TABLE_BASE_REG +
+ table = pcie->base + PCIE_TRANS_TABLE_BASE_REG +
num * PCIE_ATR_TLB_SET_OFFSET;
writel_relaxed(lower_32_bits(cpu_addr) | PCIE_ATR_SIZE(fls(size) - 1),
@@ -244,66 +247,71 @@ static int mtk_pcie_set_trans_table(struct mtk_pcie_port *port,
return 0;
}
-static void mtk_pcie_enable_msi(struct mtk_pcie_port *port)
+static void mtk_pcie_enable_msi(struct mtk_gen3_pcie *pcie)
{
int i;
u32 val;
for (i = 0; i < PCIE_MSI_SET_NUM; i++) {
- struct mtk_msi_set *msi_set = &port->msi_sets[i];
+ struct mtk_msi_set *msi_set = &pcie->msi_sets[i];
- msi_set->base = port->base + PCIE_MSI_SET_BASE_REG +
+ msi_set->base = pcie->base + PCIE_MSI_SET_BASE_REG +
i * PCIE_MSI_SET_OFFSET;
- msi_set->msg_addr = port->reg_base + PCIE_MSI_SET_BASE_REG +
+ msi_set->msg_addr = pcie->reg_base + PCIE_MSI_SET_BASE_REG +
i * PCIE_MSI_SET_OFFSET;
/* Configure the MSI capture address */
writel_relaxed(lower_32_bits(msi_set->msg_addr), msi_set->base);
writel_relaxed(upper_32_bits(msi_set->msg_addr),
- port->base + PCIE_MSI_SET_ADDR_HI_BASE +
+ pcie->base + PCIE_MSI_SET_ADDR_HI_BASE +
i * PCIE_MSI_SET_ADDR_HI_OFFSET);
}
- val = readl_relaxed(port->base + PCIE_MSI_SET_ENABLE_REG);
+ val = readl_relaxed(pcie->base + PCIE_MSI_SET_ENABLE_REG);
val |= PCIE_MSI_SET_ENABLE;
- writel_relaxed(val, port->base + PCIE_MSI_SET_ENABLE_REG);
+ writel_relaxed(val, pcie->base + PCIE_MSI_SET_ENABLE_REG);
- val = readl_relaxed(port->base + PCIE_INT_ENABLE_REG);
+ val = readl_relaxed(pcie->base + PCIE_INT_ENABLE_REG);
val |= PCIE_MSI_ENABLE;
- writel_relaxed(val, port->base + PCIE_INT_ENABLE_REG);
+ writel_relaxed(val, pcie->base + PCIE_INT_ENABLE_REG);
}
-static int mtk_pcie_startup_port(struct mtk_pcie_port *port)
+static int mtk_pcie_startup_port(struct mtk_gen3_pcie *pcie)
{
struct resource_entry *entry;
- struct pci_host_bridge *host = pci_host_bridge_from_priv(port);
+ struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
unsigned int table_index = 0;
int err;
u32 val;
/* Set as RC mode */
- val = readl_relaxed(port->base + PCIE_SETTING_REG);
+ val = readl_relaxed(pcie->base + PCIE_SETTING_REG);
val |= PCIE_RC_MODE;
- writel_relaxed(val, port->base + PCIE_SETTING_REG);
+ writel_relaxed(val, pcie->base + PCIE_SETTING_REG);
/* Set class code */
- val = readl_relaxed(port->base + PCIE_PCI_IDS_1);
+ val = readl_relaxed(pcie->base + PCIE_PCI_IDS_1);
val &= ~GENMASK(31, 8);
val |= PCI_CLASS(PCI_CLASS_BRIDGE_PCI << 8);
- writel_relaxed(val, port->base + PCIE_PCI_IDS_1);
+ writel_relaxed(val, pcie->base + PCIE_PCI_IDS_1);
/* Mask all INTx interrupts */
- val = readl_relaxed(port->base + PCIE_INT_ENABLE_REG);
+ val = readl_relaxed(pcie->base + PCIE_INT_ENABLE_REG);
val &= ~PCIE_INTX_ENABLE;
- writel_relaxed(val, port->base + PCIE_INT_ENABLE_REG);
+ writel_relaxed(val, pcie->base + PCIE_INT_ENABLE_REG);
+
+ /* Disable DVFSRC voltage request */
+ val = readl_relaxed(pcie->base + PCIE_MISC_CTRL_REG);
+ val |= PCIE_DISABLE_DVFSRC_VLT_REQ;
+ writel_relaxed(val, pcie->base + PCIE_MISC_CTRL_REG);
/* Assert all reset signals */
- val = readl_relaxed(port->base + PCIE_RST_CTRL_REG);
+ val = readl_relaxed(pcie->base + PCIE_RST_CTRL_REG);
val |= PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB | PCIE_PE_RSTB;
- writel_relaxed(val, port->base + PCIE_RST_CTRL_REG);
+ writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
/*
- * Described in PCIe CEM specification setctions 2.2 (PERST# Signal)
+ * Described in PCIe CEM specification sections 2.2 (PERST# Signal)
* and 2.2.1 (Initial Power-Up (G3 to S0)).
* The deassertion of PERST# should be delayed 100ms (TPVPERL)
* for the power and clock to become stable.
@@ -312,19 +320,19 @@ static int mtk_pcie_startup_port(struct mtk_pcie_port *port)
/* De-assert reset signals */
val &= ~(PCIE_MAC_RSTB | PCIE_PHY_RSTB | PCIE_BRG_RSTB | PCIE_PE_RSTB);
- writel_relaxed(val, port->base + PCIE_RST_CTRL_REG);
+ writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
/* Check if the link is up or not */
- err = readl_poll_timeout(port->base + PCIE_LINK_STATUS_REG, val,
+ err = readl_poll_timeout(pcie->base + PCIE_LINK_STATUS_REG, val,
!!(val & PCIE_PORT_LINKUP), 20,
PCI_PM_D3COLD_WAIT * USEC_PER_MSEC);
if (err) {
- val = readl_relaxed(port->base + PCIE_LTSSM_STATUS_REG);
- dev_err(port->dev, "PCIe link down, ltssm reg val: %#x\n", val);
+ val = readl_relaxed(pcie->base + PCIE_LTSSM_STATUS_REG);
+ dev_err(pcie->dev, "PCIe link down, ltssm reg val: %#x\n", val);
return err;
}
- mtk_pcie_enable_msi(port);
+ mtk_pcie_enable_msi(pcie);
/* Set PCIe translation windows */
resource_list_for_each_entry(entry, &host->windows) {
@@ -347,12 +355,12 @@ static int mtk_pcie_startup_port(struct mtk_pcie_port *port)
pci_addr = res->start - entry->offset;
size = resource_size(res);
- err = mtk_pcie_set_trans_table(port, cpu_addr, pci_addr, size,
+ err = mtk_pcie_set_trans_table(pcie, cpu_addr, pci_addr, size,
type, table_index);
if (err)
return err;
- dev_dbg(port->dev, "set %s trans window[%d]: cpu_addr = %#llx, pci_addr = %#llx, size = %#llx\n",
+ dev_dbg(pcie->dev, "set %s trans window[%d]: cpu_addr = %#llx, pci_addr = %#llx, size = %#llx\n",
range_type, table_index, (unsigned long long)cpu_addr,
(unsigned long long)pci_addr, (unsigned long long)size);
@@ -396,7 +404,7 @@ static struct msi_domain_info mtk_msi_domain_info = {
static void mtk_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
{
struct mtk_msi_set *msi_set = irq_data_get_irq_chip_data(data);
- struct mtk_pcie_port *port = data->domain->host_data;
+ struct mtk_gen3_pcie *pcie = data->domain->host_data;
unsigned long hwirq;
hwirq = data->hwirq % PCIE_MSI_IRQS_PER_SET;
@@ -404,7 +412,7 @@ static void mtk_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
msg->address_hi = upper_32_bits(msi_set->msg_addr);
msg->address_lo = lower_32_bits(msi_set->msg_addr);
msg->data = hwirq;
- dev_dbg(port->dev, "msi#%#lx address_hi %#x address_lo %#x data %d\n",
+ dev_dbg(pcie->dev, "msi#%#lx address_hi %#x address_lo %#x data %d\n",
hwirq, msg->address_hi, msg->address_lo, msg->data);
}
@@ -421,33 +429,33 @@ static void mtk_msi_bottom_irq_ack(struct irq_data *data)
static void mtk_msi_bottom_irq_mask(struct irq_data *data)
{
struct mtk_msi_set *msi_set = irq_data_get_irq_chip_data(data);
- struct mtk_pcie_port *port = data->domain->host_data;
+ struct mtk_gen3_pcie *pcie = data->domain->host_data;
unsigned long hwirq, flags;
u32 val;
hwirq = data->hwirq % PCIE_MSI_IRQS_PER_SET;
- raw_spin_lock_irqsave(&port->irq_lock, flags);
+ raw_spin_lock_irqsave(&pcie->irq_lock, flags);
val = readl_relaxed(msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
val &= ~BIT(hwirq);
writel_relaxed(val, msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
- raw_spin_unlock_irqrestore(&port->irq_lock, flags);
+ raw_spin_unlock_irqrestore(&pcie->irq_lock, flags);
}
static void mtk_msi_bottom_irq_unmask(struct irq_data *data)
{
struct mtk_msi_set *msi_set = irq_data_get_irq_chip_data(data);
- struct mtk_pcie_port *port = data->domain->host_data;
+ struct mtk_gen3_pcie *pcie = data->domain->host_data;
unsigned long hwirq, flags;
u32 val;
hwirq = data->hwirq % PCIE_MSI_IRQS_PER_SET;
- raw_spin_lock_irqsave(&port->irq_lock, flags);
+ raw_spin_lock_irqsave(&pcie->irq_lock, flags);
val = readl_relaxed(msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
val |= BIT(hwirq);
writel_relaxed(val, msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
- raw_spin_unlock_irqrestore(&port->irq_lock, flags);
+ raw_spin_unlock_irqrestore(&pcie->irq_lock, flags);
}
static struct irq_chip mtk_msi_bottom_irq_chip = {
@@ -463,22 +471,22 @@ static int mtk_msi_bottom_domain_alloc(struct irq_domain *domain,
unsigned int virq, unsigned int nr_irqs,
void *arg)
{
- struct mtk_pcie_port *port = domain->host_data;
+ struct mtk_gen3_pcie *pcie = domain->host_data;
struct mtk_msi_set *msi_set;
int i, hwirq, set_idx;
- mutex_lock(&port->lock);
+ mutex_lock(&pcie->lock);
- hwirq = bitmap_find_free_region(port->msi_irq_in_use, PCIE_MSI_IRQS_NUM,
+ hwirq = bitmap_find_free_region(pcie->msi_irq_in_use, PCIE_MSI_IRQS_NUM,
order_base_2(nr_irqs));
- mutex_unlock(&port->lock);
+ mutex_unlock(&pcie->lock);
if (hwirq < 0)
return -ENOSPC;
set_idx = hwirq / PCIE_MSI_IRQS_PER_SET;
- msi_set = &port->msi_sets[set_idx];
+ msi_set = &pcie->msi_sets[set_idx];
for (i = 0; i < nr_irqs; i++)
irq_domain_set_info(domain, virq + i, hwirq + i,
@@ -491,15 +499,15 @@ static int mtk_msi_bottom_domain_alloc(struct irq_domain *domain,
static void mtk_msi_bottom_domain_free(struct irq_domain *domain,
unsigned int virq, unsigned int nr_irqs)
{
- struct mtk_pcie_port *port = domain->host_data;
+ struct mtk_gen3_pcie *pcie = domain->host_data;
struct irq_data *data = irq_domain_get_irq_data(domain, virq);
- mutex_lock(&port->lock);
+ mutex_lock(&pcie->lock);
- bitmap_release_region(port->msi_irq_in_use, data->hwirq,
+ bitmap_release_region(pcie->msi_irq_in_use, data->hwirq,
order_base_2(nr_irqs));
- mutex_unlock(&port->lock);
+ mutex_unlock(&pcie->lock);
irq_domain_free_irqs_common(domain, virq, nr_irqs);
}
@@ -511,28 +519,28 @@ static const struct irq_domain_ops mtk_msi_bottom_domain_ops = {
static void mtk_intx_mask(struct irq_data *data)
{
- struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data);
+ struct mtk_gen3_pcie *pcie = irq_data_get_irq_chip_data(data);
unsigned long flags;
u32 val;
- raw_spin_lock_irqsave(&port->irq_lock, flags);
- val = readl_relaxed(port->base + PCIE_INT_ENABLE_REG);
+ raw_spin_lock_irqsave(&pcie->irq_lock, flags);
+ val = readl_relaxed(pcie->base + PCIE_INT_ENABLE_REG);
val &= ~BIT(data->hwirq + PCIE_INTX_SHIFT);
- writel_relaxed(val, port->base + PCIE_INT_ENABLE_REG);
- raw_spin_unlock_irqrestore(&port->irq_lock, flags);
+ writel_relaxed(val, pcie->base + PCIE_INT_ENABLE_REG);
+ raw_spin_unlock_irqrestore(&pcie->irq_lock, flags);
}
static void mtk_intx_unmask(struct irq_data *data)
{
- struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data);
+ struct mtk_gen3_pcie *pcie = irq_data_get_irq_chip_data(data);
unsigned long flags;
u32 val;
- raw_spin_lock_irqsave(&port->irq_lock, flags);
- val = readl_relaxed(port->base + PCIE_INT_ENABLE_REG);
+ raw_spin_lock_irqsave(&pcie->irq_lock, flags);
+ val = readl_relaxed(pcie->base + PCIE_INT_ENABLE_REG);
val |= BIT(data->hwirq + PCIE_INTX_SHIFT);
- writel_relaxed(val, port->base + PCIE_INT_ENABLE_REG);
- raw_spin_unlock_irqrestore(&port->irq_lock, flags);
+ writel_relaxed(val, pcie->base + PCIE_INT_ENABLE_REG);
+ raw_spin_unlock_irqrestore(&pcie->irq_lock, flags);
}
/**
@@ -545,11 +553,11 @@ static void mtk_intx_unmask(struct irq_data *data)
*/
static void mtk_intx_eoi(struct irq_data *data)
{
- struct mtk_pcie_port *port = irq_data_get_irq_chip_data(data);
+ struct mtk_gen3_pcie *pcie = irq_data_get_irq_chip_data(data);
unsigned long hwirq;
hwirq = data->hwirq + PCIE_INTX_SHIFT;
- writel_relaxed(BIT(hwirq), port->base + PCIE_INT_STATUS_REG);
+ writel_relaxed(BIT(hwirq), pcie->base + PCIE_INT_STATUS_REG);
}
static struct irq_chip mtk_intx_irq_chip = {
@@ -573,13 +581,13 @@ static const struct irq_domain_ops intx_domain_ops = {
.map = mtk_pcie_intx_map,
};
-static int mtk_pcie_init_irq_domains(struct mtk_pcie_port *port)
+static int mtk_pcie_init_irq_domains(struct mtk_gen3_pcie *pcie)
{
- struct device *dev = port->dev;
+ struct device *dev = pcie->dev;
struct device_node *intc_node, *node = dev->of_node;
int ret;
- raw_spin_lock_init(&port->irq_lock);
+ raw_spin_lock_init(&pcie->irq_lock);
/* Setup INTx */
intc_node = of_get_child_by_name(node, "interrupt-controller");
@@ -588,28 +596,28 @@ static int mtk_pcie_init_irq_domains(struct mtk_pcie_port *port)
return -ENODEV;
}
- port->intx_domain = irq_domain_add_linear(intc_node, PCI_NUM_INTX,
- &intx_domain_ops, port);
- if (!port->intx_domain) {
+ pcie->intx_domain = irq_domain_add_linear(intc_node, PCI_NUM_INTX,
+ &intx_domain_ops, pcie);
+ if (!pcie->intx_domain) {
dev_err(dev, "failed to create INTx IRQ domain\n");
return -ENODEV;
}
/* Setup MSI */
- mutex_init(&port->lock);
+ mutex_init(&pcie->lock);
- port->msi_bottom_domain = irq_domain_add_linear(node, PCIE_MSI_IRQS_NUM,
- &mtk_msi_bottom_domain_ops, port);
- if (!port->msi_bottom_domain) {
+ pcie->msi_bottom_domain = irq_domain_add_linear(node, PCIE_MSI_IRQS_NUM,
+ &mtk_msi_bottom_domain_ops, pcie);
+ if (!pcie->msi_bottom_domain) {
dev_err(dev, "failed to create MSI bottom domain\n");
ret = -ENODEV;
goto err_msi_bottom_domain;
}
- port->msi_domain = pci_msi_create_irq_domain(dev->fwnode,
+ pcie->msi_domain = pci_msi_create_irq_domain(dev->fwnode,
&mtk_msi_domain_info,
- port->msi_bottom_domain);
- if (!port->msi_domain) {
+ pcie->msi_bottom_domain);
+ if (!pcie->msi_domain) {
dev_err(dev, "failed to create MSI domain\n");
ret = -ENODEV;
goto err_msi_domain;
@@ -618,32 +626,32 @@ static int mtk_pcie_init_irq_domains(struct mtk_pcie_port *port)
return 0;
err_msi_domain:
- irq_domain_remove(port->msi_bottom_domain);
+ irq_domain_remove(pcie->msi_bottom_domain);
err_msi_bottom_domain:
- irq_domain_remove(port->intx_domain);
+ irq_domain_remove(pcie->intx_domain);
return ret;
}
-static void mtk_pcie_irq_teardown(struct mtk_pcie_port *port)
+static void mtk_pcie_irq_teardown(struct mtk_gen3_pcie *pcie)
{
- irq_set_chained_handler_and_data(port->irq, NULL, NULL);
+ irq_set_chained_handler_and_data(pcie->irq, NULL, NULL);
- if (port->intx_domain)
- irq_domain_remove(port->intx_domain);
+ if (pcie->intx_domain)
+ irq_domain_remove(pcie->intx_domain);
- if (port->msi_domain)
- irq_domain_remove(port->msi_domain);
+ if (pcie->msi_domain)
+ irq_domain_remove(pcie->msi_domain);
- if (port->msi_bottom_domain)
- irq_domain_remove(port->msi_bottom_domain);
+ if (pcie->msi_bottom_domain)
+ irq_domain_remove(pcie->msi_bottom_domain);
- irq_dispose_mapping(port->irq);
+ irq_dispose_mapping(pcie->irq);
}
-static void mtk_pcie_msi_handler(struct mtk_pcie_port *port, int set_idx)
+static void mtk_pcie_msi_handler(struct mtk_gen3_pcie *pcie, int set_idx)
{
- struct mtk_msi_set *msi_set = &port->msi_sets[set_idx];
+ struct mtk_msi_set *msi_set = &pcie->msi_sets[set_idx];
unsigned long msi_enable, msi_status;
irq_hw_number_t bit, hwirq;
@@ -658,59 +666,59 @@ static void mtk_pcie_msi_handler(struct mtk_pcie_port *port, int set_idx)
for_each_set_bit(bit, &msi_status, PCIE_MSI_IRQS_PER_SET) {
hwirq = bit + set_idx * PCIE_MSI_IRQS_PER_SET;
- generic_handle_domain_irq(port->msi_bottom_domain, hwirq);
+ generic_handle_domain_irq(pcie->msi_bottom_domain, hwirq);
}
} while (true);
}
static void mtk_pcie_irq_handler(struct irq_desc *desc)
{
- struct mtk_pcie_port *port = irq_desc_get_handler_data(desc);
+ struct mtk_gen3_pcie *pcie = irq_desc_get_handler_data(desc);
struct irq_chip *irqchip = irq_desc_get_chip(desc);
unsigned long status;
irq_hw_number_t irq_bit = PCIE_INTX_SHIFT;
chained_irq_enter(irqchip, desc);
- status = readl_relaxed(port->base + PCIE_INT_STATUS_REG);
+ status = readl_relaxed(pcie->base + PCIE_INT_STATUS_REG);
for_each_set_bit_from(irq_bit, &status, PCI_NUM_INTX +
PCIE_INTX_SHIFT)
- generic_handle_domain_irq(port->intx_domain,
+ generic_handle_domain_irq(pcie->intx_domain,
irq_bit - PCIE_INTX_SHIFT);
irq_bit = PCIE_MSI_SHIFT;
for_each_set_bit_from(irq_bit, &status, PCIE_MSI_SET_NUM +
PCIE_MSI_SHIFT) {
- mtk_pcie_msi_handler(port, irq_bit - PCIE_MSI_SHIFT);
+ mtk_pcie_msi_handler(pcie, irq_bit - PCIE_MSI_SHIFT);
- writel_relaxed(BIT(irq_bit), port->base + PCIE_INT_STATUS_REG);
+ writel_relaxed(BIT(irq_bit), pcie->base + PCIE_INT_STATUS_REG);
}
chained_irq_exit(irqchip, desc);
}
-static int mtk_pcie_setup_irq(struct mtk_pcie_port *port)
+static int mtk_pcie_setup_irq(struct mtk_gen3_pcie *pcie)
{
- struct device *dev = port->dev;
+ struct device *dev = pcie->dev;
struct platform_device *pdev = to_platform_device(dev);
int err;
- err = mtk_pcie_init_irq_domains(port);
+ err = mtk_pcie_init_irq_domains(pcie);
if (err)
return err;
- port->irq = platform_get_irq(pdev, 0);
- if (port->irq < 0)
- return port->irq;
+ pcie->irq = platform_get_irq(pdev, 0);
+ if (pcie->irq < 0)
+ return pcie->irq;
- irq_set_chained_handler_and_data(port->irq, mtk_pcie_irq_handler, port);
+ irq_set_chained_handler_and_data(pcie->irq, mtk_pcie_irq_handler, pcie);
return 0;
}
-static int mtk_pcie_parse_port(struct mtk_pcie_port *port)
+static int mtk_pcie_parse_port(struct mtk_gen3_pcie *pcie)
{
- struct device *dev = port->dev;
+ struct device *dev = pcie->dev;
struct platform_device *pdev = to_platform_device(dev);
struct resource *regs;
int ret;
@@ -718,77 +726,77 @@ static int mtk_pcie_parse_port(struct mtk_pcie_port *port)
regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcie-mac");
if (!regs)
return -EINVAL;
- port->base = devm_ioremap_resource(dev, regs);
- if (IS_ERR(port->base)) {
+ pcie->base = devm_ioremap_resource(dev, regs);
+ if (IS_ERR(pcie->base)) {
dev_err(dev, "failed to map register base\n");
- return PTR_ERR(port->base);
+ return PTR_ERR(pcie->base);
}
- port->reg_base = regs->start;
+ pcie->reg_base = regs->start;
- port->phy_reset = devm_reset_control_get_optional_exclusive(dev, "phy");
- if (IS_ERR(port->phy_reset)) {
- ret = PTR_ERR(port->phy_reset);
+ pcie->phy_reset = devm_reset_control_get_optional_exclusive(dev, "phy");
+ if (IS_ERR(pcie->phy_reset)) {
+ ret = PTR_ERR(pcie->phy_reset);
if (ret != -EPROBE_DEFER)
dev_err(dev, "failed to get PHY reset\n");
return ret;
}
- port->mac_reset = devm_reset_control_get_optional_exclusive(dev, "mac");
- if (IS_ERR(port->mac_reset)) {
- ret = PTR_ERR(port->mac_reset);
+ pcie->mac_reset = devm_reset_control_get_optional_exclusive(dev, "mac");
+ if (IS_ERR(pcie->mac_reset)) {
+ ret = PTR_ERR(pcie->mac_reset);
if (ret != -EPROBE_DEFER)
dev_err(dev, "failed to get MAC reset\n");
return ret;
}
- port->phy = devm_phy_optional_get(dev, "pcie-phy");
- if (IS_ERR(port->phy)) {
- ret = PTR_ERR(port->phy);
+ pcie->phy = devm_phy_optional_get(dev, "pcie-phy");
+ if (IS_ERR(pcie->phy)) {
+ ret = PTR_ERR(pcie->phy);
if (ret != -EPROBE_DEFER)
dev_err(dev, "failed to get PHY\n");
return ret;
}
- port->num_clks = devm_clk_bulk_get_all(dev, &port->clks);
- if (port->num_clks < 0) {
+ pcie->num_clks = devm_clk_bulk_get_all(dev, &pcie->clks);
+ if (pcie->num_clks < 0) {
dev_err(dev, "failed to get clocks\n");
- return port->num_clks;
+ return pcie->num_clks;
}
return 0;
}
-static int mtk_pcie_power_up(struct mtk_pcie_port *port)
+static int mtk_pcie_power_up(struct mtk_gen3_pcie *pcie)
{
- struct device *dev = port->dev;
+ struct device *dev = pcie->dev;
int err;
/* PHY power on and enable pipe clock */
- reset_control_deassert(port->phy_reset);
+ reset_control_deassert(pcie->phy_reset);
- err = phy_init(port->phy);
+ err = phy_init(pcie->phy);
if (err) {
dev_err(dev, "failed to initialize PHY\n");
goto err_phy_init;
}
- err = phy_power_on(port->phy);
+ err = phy_power_on(pcie->phy);
if (err) {
dev_err(dev, "failed to power on PHY\n");
goto err_phy_on;
}
/* MAC power on and enable transaction layer clocks */
- reset_control_deassert(port->mac_reset);
+ reset_control_deassert(pcie->mac_reset);
pm_runtime_enable(dev);
pm_runtime_get_sync(dev);
- err = clk_bulk_prepare_enable(port->num_clks, port->clks);
+ err = clk_bulk_prepare_enable(pcie->num_clks, pcie->clks);
if (err) {
dev_err(dev, "failed to enable clocks\n");
goto err_clk_init;
@@ -799,55 +807,55 @@ static int mtk_pcie_power_up(struct mtk_pcie_port *port)
err_clk_init:
pm_runtime_put_sync(dev);
pm_runtime_disable(dev);
- reset_control_assert(port->mac_reset);
- phy_power_off(port->phy);
+ reset_control_assert(pcie->mac_reset);
+ phy_power_off(pcie->phy);
err_phy_on:
- phy_exit(port->phy);
+ phy_exit(pcie->phy);
err_phy_init:
- reset_control_assert(port->phy_reset);
+ reset_control_assert(pcie->phy_reset);
return err;
}
-static void mtk_pcie_power_down(struct mtk_pcie_port *port)
+static void mtk_pcie_power_down(struct mtk_gen3_pcie *pcie)
{
- clk_bulk_disable_unprepare(port->num_clks, port->clks);
+ clk_bulk_disable_unprepare(pcie->num_clks, pcie->clks);
- pm_runtime_put_sync(port->dev);
- pm_runtime_disable(port->dev);
- reset_control_assert(port->mac_reset);
+ pm_runtime_put_sync(pcie->dev);
+ pm_runtime_disable(pcie->dev);
+ reset_control_assert(pcie->mac_reset);
- phy_power_off(port->phy);
- phy_exit(port->phy);
- reset_control_assert(port->phy_reset);
+ phy_power_off(pcie->phy);
+ phy_exit(pcie->phy);
+ reset_control_assert(pcie->phy_reset);
}
-static int mtk_pcie_setup(struct mtk_pcie_port *port)
+static int mtk_pcie_setup(struct mtk_gen3_pcie *pcie)
{
int err;
- err = mtk_pcie_parse_port(port);
+ err = mtk_pcie_parse_port(pcie);
if (err)
return err;
/* Don't touch the hardware registers before power up */
- err = mtk_pcie_power_up(port);
+ err = mtk_pcie_power_up(pcie);
if (err)
return err;
/* Try link up */
- err = mtk_pcie_startup_port(port);
+ err = mtk_pcie_startup_port(pcie);
if (err)
goto err_setup;
- err = mtk_pcie_setup_irq(port);
+ err = mtk_pcie_setup_irq(pcie);
if (err)
goto err_setup;
return 0;
err_setup:
- mtk_pcie_power_down(port);
+ mtk_pcie_power_down(pcie);
return err;
}
@@ -855,30 +863,30 @@ err_setup:
static int mtk_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct mtk_pcie_port *port;
+ struct mtk_gen3_pcie *pcie;
struct pci_host_bridge *host;
int err;
- host = devm_pci_alloc_host_bridge(dev, sizeof(*port));
+ host = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
if (!host)
return -ENOMEM;
- port = pci_host_bridge_priv(host);
+ pcie = pci_host_bridge_priv(host);
- port->dev = dev;
- platform_set_drvdata(pdev, port);
+ pcie->dev = dev;
+ platform_set_drvdata(pdev, pcie);
- err = mtk_pcie_setup(port);
+ err = mtk_pcie_setup(pcie);
if (err)
return err;
host->ops = &mtk_pcie_ops;
- host->sysdata = port;
+ host->sysdata = pcie;
err = pci_host_probe(host);
if (err) {
- mtk_pcie_irq_teardown(port);
- mtk_pcie_power_down(port);
+ mtk_pcie_irq_teardown(pcie);
+ mtk_pcie_power_down(pcie);
return err;
}
@@ -887,66 +895,66 @@ static int mtk_pcie_probe(struct platform_device *pdev)
static int mtk_pcie_remove(struct platform_device *pdev)
{
- struct mtk_pcie_port *port = platform_get_drvdata(pdev);
- struct pci_host_bridge *host = pci_host_bridge_from_priv(port);
+ struct mtk_gen3_pcie *pcie = platform_get_drvdata(pdev);
+ struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
pci_lock_rescan_remove();
pci_stop_root_bus(host->bus);
pci_remove_root_bus(host->bus);
pci_unlock_rescan_remove();
- mtk_pcie_irq_teardown(port);
- mtk_pcie_power_down(port);
+ mtk_pcie_irq_teardown(pcie);
+ mtk_pcie_power_down(pcie);
return 0;
}
-static void __maybe_unused mtk_pcie_irq_save(struct mtk_pcie_port *port)
+static void __maybe_unused mtk_pcie_irq_save(struct mtk_gen3_pcie *pcie)
{
int i;
- raw_spin_lock(&port->irq_lock);
+ raw_spin_lock(&pcie->irq_lock);
- port->saved_irq_state = readl_relaxed(port->base + PCIE_INT_ENABLE_REG);
+ pcie->saved_irq_state = readl_relaxed(pcie->base + PCIE_INT_ENABLE_REG);
for (i = 0; i < PCIE_MSI_SET_NUM; i++) {
- struct mtk_msi_set *msi_set = &port->msi_sets[i];
+ struct mtk_msi_set *msi_set = &pcie->msi_sets[i];
msi_set->saved_irq_state = readl_relaxed(msi_set->base +
PCIE_MSI_SET_ENABLE_OFFSET);
}
- raw_spin_unlock(&port->irq_lock);
+ raw_spin_unlock(&pcie->irq_lock);
}
-static void __maybe_unused mtk_pcie_irq_restore(struct mtk_pcie_port *port)
+static void __maybe_unused mtk_pcie_irq_restore(struct mtk_gen3_pcie *pcie)
{
int i;
- raw_spin_lock(&port->irq_lock);
+ raw_spin_lock(&pcie->irq_lock);
- writel_relaxed(port->saved_irq_state, port->base + PCIE_INT_ENABLE_REG);
+ writel_relaxed(pcie->saved_irq_state, pcie->base + PCIE_INT_ENABLE_REG);
for (i = 0; i < PCIE_MSI_SET_NUM; i++) {
- struct mtk_msi_set *msi_set = &port->msi_sets[i];
+ struct mtk_msi_set *msi_set = &pcie->msi_sets[i];
writel_relaxed(msi_set->saved_irq_state,
msi_set->base + PCIE_MSI_SET_ENABLE_OFFSET);
}
- raw_spin_unlock(&port->irq_lock);
+ raw_spin_unlock(&pcie->irq_lock);
}
-static int __maybe_unused mtk_pcie_turn_off_link(struct mtk_pcie_port *port)
+static int __maybe_unused mtk_pcie_turn_off_link(struct mtk_gen3_pcie *pcie)
{
u32 val;
- val = readl_relaxed(port->base + PCIE_ICMD_PM_REG);
+ val = readl_relaxed(pcie->base + PCIE_ICMD_PM_REG);
val |= PCIE_TURN_OFF_LINK;
- writel_relaxed(val, port->base + PCIE_ICMD_PM_REG);
+ writel_relaxed(val, pcie->base + PCIE_ICMD_PM_REG);
/* Check the link is L2 */
- return readl_poll_timeout(port->base + PCIE_LTSSM_STATUS_REG, val,
+ return readl_poll_timeout(pcie->base + PCIE_LTSSM_STATUS_REG, val,
(PCIE_LTSSM_STATE(val) ==
PCIE_LTSSM_STATE_L2_IDLE), 20,
50 * USEC_PER_MSEC);
@@ -954,46 +962,46 @@ static int __maybe_unused mtk_pcie_turn_off_link(struct mtk_pcie_port *port)
static int __maybe_unused mtk_pcie_suspend_noirq(struct device *dev)
{
- struct mtk_pcie_port *port = dev_get_drvdata(dev);
+ struct mtk_gen3_pcie *pcie = dev_get_drvdata(dev);
int err;
u32 val;
/* Trigger link to L2 state */
- err = mtk_pcie_turn_off_link(port);
+ err = mtk_pcie_turn_off_link(pcie);
if (err) {
- dev_err(port->dev, "cannot enter L2 state\n");
+ dev_err(pcie->dev, "cannot enter L2 state\n");
return err;
}
/* Pull down the PERST# pin */
- val = readl_relaxed(port->base + PCIE_RST_CTRL_REG);
+ val = readl_relaxed(pcie->base + PCIE_RST_CTRL_REG);
val |= PCIE_PE_RSTB;
- writel_relaxed(val, port->base + PCIE_RST_CTRL_REG);
+ writel_relaxed(val, pcie->base + PCIE_RST_CTRL_REG);
- dev_dbg(port->dev, "entered L2 states successfully");
+ dev_dbg(pcie->dev, "entered L2 states successfully");
- mtk_pcie_irq_save(port);
- mtk_pcie_power_down(port);
+ mtk_pcie_irq_save(pcie);
+ mtk_pcie_power_down(pcie);
return 0;
}
static int __maybe_unused mtk_pcie_resume_noirq(struct device *dev)
{
- struct mtk_pcie_port *port = dev_get_drvdata(dev);
+ struct mtk_gen3_pcie *pcie = dev_get_drvdata(dev);
int err;
- err = mtk_pcie_power_up(port);
+ err = mtk_pcie_power_up(pcie);
if (err)
return err;
- err = mtk_pcie_startup_port(port);
+ err = mtk_pcie_startup_port(pcie);
if (err) {
- mtk_pcie_power_down(port);
+ mtk_pcie_power_down(pcie);
return err;
}
- mtk_pcie_irq_restore(port);
+ mtk_pcie_irq_restore(pcie);
return 0;
}
diff --git a/drivers/pci/controller/pcie-mediatek.c b/drivers/pci/controller/pcie-mediatek.c
index 2f3f974977a3..ddfbd4aebdec 100644
--- a/drivers/pci/controller/pcie-mediatek.c
+++ b/drivers/pci/controller/pcie-mediatek.c
@@ -365,19 +365,12 @@ static int mtk_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
{
struct mtk_pcie_port *port;
u32 bn = bus->number;
- int ret;
port = mtk_pcie_find_port(bus, devfn);
- if (!port) {
- *val = ~0;
+ if (!port)
return PCIBIOS_DEVICE_NOT_FOUND;
- }
-
- ret = mtk_pcie_hw_rd_cfg(port, bn, devfn, where, size, val);
- if (ret)
- *val = ~0;
- return ret;
+ return mtk_pcie_hw_rd_cfg(port, bn, devfn, where, size, val);
}
static int mtk_pcie_config_write(struct pci_bus *bus, unsigned int devfn,
@@ -702,6 +695,13 @@ static int mtk_pcie_startup_port_v2(struct mtk_pcie_port *port)
*/
writel(PCIE_LINKDOWN_RST_EN, port->base + PCIE_RST_CTRL);
+ /*
+ * Described in PCIe CEM specification sections 2.2 (PERST# Signal) and
+ * 2.2.1 (Initial Power-Up (G3 to S0)). The deassertion of PERST# should
+ * be delayed 100ms (TPVPERL) for the power and clock to become stable.
+ */
+ msleep(100);
+
/* De-assert PHY, PE, PIPE, MAC and configuration reset */
val = readl(port->base + PCIE_RST_CTRL);
val |= PCIE_PHY_RSTB | PCIE_PERSTB | PCIE_PIPE_SRSTB |
diff --git a/drivers/pci/controller/pcie-microchip-host.c b/drivers/pci/controller/pcie-microchip-host.c
index 329f930d17aa..29d8e81e4181 100644
--- a/drivers/pci/controller/pcie-microchip-host.c
+++ b/drivers/pci/controller/pcie-microchip-host.c
@@ -262,7 +262,7 @@ struct mc_msi {
DECLARE_BITMAP(used, MC_NUM_MSI_IRQS);
};
-struct mc_port {
+struct mc_pcie {
void __iomem *axi_base_addr;
struct device *dev;
struct irq_domain *intx_domain;
@@ -382,7 +382,7 @@ static struct {
static char poss_clks[][5] = { "fic0", "fic1", "fic2", "fic3" };
-static void mc_pcie_enable_msi(struct mc_port *port, void __iomem *base)
+static void mc_pcie_enable_msi(struct mc_pcie *port, void __iomem *base)
{
struct mc_msi *msi = &port->msi;
u32 cap_offset = MC_MSI_CAP_CTRL_OFFSET;
@@ -405,7 +405,7 @@ static void mc_pcie_enable_msi(struct mc_port *port, void __iomem *base)
static void mc_handle_msi(struct irq_desc *desc)
{
- struct mc_port *port = irq_desc_get_handler_data(desc);
+ struct mc_pcie *port = irq_desc_get_handler_data(desc);
struct device *dev = port->dev;
struct mc_msi *msi = &port->msi;
void __iomem *bridge_base_addr =
@@ -428,7 +428,7 @@ static void mc_handle_msi(struct irq_desc *desc)
static void mc_msi_bottom_irq_ack(struct irq_data *data)
{
- struct mc_port *port = irq_data_get_irq_chip_data(data);
+ struct mc_pcie *port = irq_data_get_irq_chip_data(data);
void __iomem *bridge_base_addr =
port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
u32 bitpos = data->hwirq;
@@ -443,7 +443,7 @@ static void mc_msi_bottom_irq_ack(struct irq_data *data)
static void mc_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
{
- struct mc_port *port = irq_data_get_irq_chip_data(data);
+ struct mc_pcie *port = irq_data_get_irq_chip_data(data);
phys_addr_t addr = port->msi.vector_phy;
msg->address_lo = lower_32_bits(addr);
@@ -470,7 +470,7 @@ static struct irq_chip mc_msi_bottom_irq_chip = {
static int mc_irq_msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
unsigned int nr_irqs, void *args)
{
- struct mc_port *port = domain->host_data;
+ struct mc_pcie *port = domain->host_data;
struct mc_msi *msi = &port->msi;
void __iomem *bridge_base_addr =
port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
@@ -503,7 +503,7 @@ static void mc_irq_msi_domain_free(struct irq_domain *domain, unsigned int virq,
unsigned int nr_irqs)
{
struct irq_data *d = irq_domain_get_irq_data(domain, virq);
- struct mc_port *port = irq_data_get_irq_chip_data(d);
+ struct mc_pcie *port = irq_data_get_irq_chip_data(d);
struct mc_msi *msi = &port->msi;
mutex_lock(&msi->lock);
@@ -534,7 +534,7 @@ static struct msi_domain_info mc_msi_domain_info = {
.chip = &mc_msi_irq_chip,
};
-static int mc_allocate_msi_domains(struct mc_port *port)
+static int mc_allocate_msi_domains(struct mc_pcie *port)
{
struct device *dev = port->dev;
struct fwnode_handle *fwnode = of_node_to_fwnode(dev->of_node);
@@ -562,7 +562,7 @@ static int mc_allocate_msi_domains(struct mc_port *port)
static void mc_handle_intx(struct irq_desc *desc)
{
- struct mc_port *port = irq_desc_get_handler_data(desc);
+ struct mc_pcie *port = irq_desc_get_handler_data(desc);
struct device *dev = port->dev;
void __iomem *bridge_base_addr =
port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
@@ -585,7 +585,7 @@ static void mc_handle_intx(struct irq_desc *desc)
static void mc_ack_intx_irq(struct irq_data *data)
{
- struct mc_port *port = irq_data_get_irq_chip_data(data);
+ struct mc_pcie *port = irq_data_get_irq_chip_data(data);
void __iomem *bridge_base_addr =
port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
u32 mask = BIT(data->hwirq + PM_MSI_INT_INTX_SHIFT);
@@ -595,7 +595,7 @@ static void mc_ack_intx_irq(struct irq_data *data)
static void mc_mask_intx_irq(struct irq_data *data)
{
- struct mc_port *port = irq_data_get_irq_chip_data(data);
+ struct mc_pcie *port = irq_data_get_irq_chip_data(data);
void __iomem *bridge_base_addr =
port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
unsigned long flags;
@@ -611,7 +611,7 @@ static void mc_mask_intx_irq(struct irq_data *data)
static void mc_unmask_intx_irq(struct irq_data *data)
{
- struct mc_port *port = irq_data_get_irq_chip_data(data);
+ struct mc_pcie *port = irq_data_get_irq_chip_data(data);
void __iomem *bridge_base_addr =
port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
unsigned long flags;
@@ -698,7 +698,7 @@ static u32 local_events(void __iomem *addr)
return val;
}
-static u32 get_events(struct mc_port *port)
+static u32 get_events(struct mc_pcie *port)
{
void __iomem *bridge_base_addr =
port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
@@ -715,7 +715,7 @@ static u32 get_events(struct mc_port *port)
static irqreturn_t mc_event_handler(int irq, void *dev_id)
{
- struct mc_port *port = dev_id;
+ struct mc_pcie *port = dev_id;
struct device *dev = port->dev;
struct irq_data *data;
@@ -731,7 +731,7 @@ static irqreturn_t mc_event_handler(int irq, void *dev_id)
static void mc_handle_event(struct irq_desc *desc)
{
- struct mc_port *port = irq_desc_get_handler_data(desc);
+ struct mc_pcie *port = irq_desc_get_handler_data(desc);
unsigned long events;
u32 bit;
struct irq_chip *chip = irq_desc_get_chip(desc);
@@ -748,7 +748,7 @@ static void mc_handle_event(struct irq_desc *desc)
static void mc_ack_event_irq(struct irq_data *data)
{
- struct mc_port *port = irq_data_get_irq_chip_data(data);
+ struct mc_pcie *port = irq_data_get_irq_chip_data(data);
u32 event = data->hwirq;
void __iomem *addr;
u32 mask;
@@ -763,7 +763,7 @@ static void mc_ack_event_irq(struct irq_data *data)
static void mc_mask_event_irq(struct irq_data *data)
{
- struct mc_port *port = irq_data_get_irq_chip_data(data);
+ struct mc_pcie *port = irq_data_get_irq_chip_data(data);
u32 event = data->hwirq;
void __iomem *addr;
u32 mask;
@@ -793,7 +793,7 @@ static void mc_mask_event_irq(struct irq_data *data)
static void mc_unmask_event_irq(struct irq_data *data)
{
- struct mc_port *port = irq_data_get_irq_chip_data(data);
+ struct mc_pcie *port = irq_data_get_irq_chip_data(data);
u32 event = data->hwirq;
void __iomem *addr;
u32 mask;
@@ -881,7 +881,7 @@ static int mc_pcie_init_clks(struct device *dev)
return 0;
}
-static int mc_pcie_init_irq_domains(struct mc_port *port)
+static int mc_pcie_init_irq_domains(struct mc_pcie *port)
{
struct device *dev = port->dev;
struct device_node *node = dev->of_node;
@@ -957,7 +957,7 @@ static void mc_pcie_setup_window(void __iomem *bridge_base_addr, u32 index,
}
static int mc_pcie_setup_windows(struct platform_device *pdev,
- struct mc_port *port)
+ struct mc_pcie *port)
{
void __iomem *bridge_base_addr =
port->axi_base_addr + MC_PCIE_BRIDGE_ADDR;
@@ -983,7 +983,7 @@ static int mc_platform_init(struct pci_config_window *cfg)
{
struct device *dev = cfg->parent;
struct platform_device *pdev = to_platform_device(dev);
- struct mc_port *port;
+ struct mc_pcie *port;
void __iomem *bridge_base_addr;
void __iomem *ctrl_base_addr;
int ret;
diff --git a/drivers/pci/controller/pcie-mt7621.c b/drivers/pci/controller/pcie-mt7621.c
index b60dfb45ef7b..33eb37a2225c 100644
--- a/drivers/pci/controller/pcie-mt7621.c
+++ b/drivers/pci/controller/pcie-mt7621.c
@@ -93,8 +93,8 @@ struct mt7621_pcie_port {
* reset lines are inverted.
*/
struct mt7621_pcie {
- void __iomem *base;
struct device *dev;
+ void __iomem *base;
struct list_head ports;
bool resets_inverted;
};
@@ -109,15 +109,6 @@ static inline void pcie_write(struct mt7621_pcie *pcie, u32 val, u32 reg)
writel_relaxed(val, pcie->base + reg);
}
-static inline void pcie_rmw(struct mt7621_pcie *pcie, u32 reg, u32 clr, u32 set)
-{
- u32 val = readl_relaxed(pcie->base + reg);
-
- val &= ~clr;
- val |= set;
- writel_relaxed(val, pcie->base + reg);
-}
-
static inline u32 pcie_port_read(struct mt7621_pcie_port *port, u32 reg)
{
return readl_relaxed(port->base + reg);
@@ -129,7 +120,7 @@ static inline void pcie_port_write(struct mt7621_pcie_port *port,
writel_relaxed(val, port->base + reg);
}
-static inline u32 mt7621_pci_get_cfgaddr(unsigned int bus, unsigned int slot,
+static inline u32 mt7621_pcie_get_cfgaddr(unsigned int bus, unsigned int slot,
unsigned int func, unsigned int where)
{
return (((where & 0xf00) >> 8) << 24) | (bus << 16) | (slot << 11) |
@@ -140,7 +131,7 @@ static void __iomem *mt7621_pcie_map_bus(struct pci_bus *bus,
unsigned int devfn, int where)
{
struct mt7621_pcie *pcie = bus->sysdata;
- u32 address = mt7621_pci_get_cfgaddr(bus->number, PCI_SLOT(devfn),
+ u32 address = mt7621_pcie_get_cfgaddr(bus->number, PCI_SLOT(devfn),
PCI_FUNC(devfn), where);
writel_relaxed(address, pcie->base + RALINK_PCI_CONFIG_ADDR);
@@ -148,7 +139,7 @@ static void __iomem *mt7621_pcie_map_bus(struct pci_bus *bus,
return pcie->base + RALINK_PCI_CONFIG_DATA + (where & 3);
}
-struct pci_ops mt7621_pci_ops = {
+static struct pci_ops mt7621_pcie_ops = {
.map_bus = mt7621_pcie_map_bus,
.read = pci_generic_config_read,
.write = pci_generic_config_write,
@@ -156,7 +147,7 @@ struct pci_ops mt7621_pci_ops = {
static u32 read_config(struct mt7621_pcie *pcie, unsigned int dev, u32 reg)
{
- u32 address = mt7621_pci_get_cfgaddr(0, dev, 0, reg);
+ u32 address = mt7621_pcie_get_cfgaddr(0, dev, 0, reg);
pcie_write(pcie, address, RALINK_PCI_CONFIG_ADDR);
return pcie_read(pcie, RALINK_PCI_CONFIG_DATA);
@@ -165,7 +156,7 @@ static u32 read_config(struct mt7621_pcie *pcie, unsigned int dev, u32 reg)
static void write_config(struct mt7621_pcie *pcie, unsigned int dev,
u32 reg, u32 val)
{
- u32 address = mt7621_pci_get_cfgaddr(0, dev, 0, reg);
+ u32 address = mt7621_pcie_get_cfgaddr(0, dev, 0, reg);
pcie_write(pcie, address, RALINK_PCI_CONFIG_ADDR);
pcie_write(pcie, val, RALINK_PCI_CONFIG_DATA);
@@ -208,37 +199,6 @@ static inline void mt7621_control_deassert(struct mt7621_pcie_port *port)
reset_control_assert(port->pcie_rst);
}
-static int setup_cm_memory_region(struct pci_host_bridge *host)
-{
- struct mt7621_pcie *pcie = pci_host_bridge_priv(host);
- struct device *dev = pcie->dev;
- struct resource_entry *entry;
- resource_size_t mask;
-
- entry = resource_list_first_type(&host->windows, IORESOURCE_MEM);
- if (!entry) {
- dev_err(dev, "cannot get memory resource\n");
- return -EINVAL;
- }
-
- if (mips_cps_numiocu(0)) {
- /*
- * FIXME: hardware doesn't accept mask values with 1s after
- * 0s (e.g. 0xffef), so it would be great to warn if that's
- * about to happen
- */
- mask = ~(entry->res->end - entry->res->start);
-
- write_gcr_reg1_base(entry->res->start);
- write_gcr_reg1_mask(mask | CM_GCR_REGn_MASK_CMTGT_IOCU0);
- dev_info(dev, "PCI coherence region base: 0x%08llx, mask/settings: 0x%08llx\n",
- (unsigned long long)read_gcr_reg1_base(),
- (unsigned long long)read_gcr_reg1_mask());
- }
-
- return 0;
-}
-
static int mt7621_pcie_parse_port(struct mt7621_pcie *pcie,
struct device_node *node,
int slot)
@@ -505,16 +465,16 @@ static int mt7621_pcie_register_host(struct pci_host_bridge *host)
{
struct mt7621_pcie *pcie = pci_host_bridge_priv(host);
- host->ops = &mt7621_pci_ops;
+ host->ops = &mt7621_pcie_ops;
host->sysdata = pcie;
return pci_host_probe(host);
}
-static const struct soc_device_attribute mt7621_pci_quirks_match[] = {
+static const struct soc_device_attribute mt7621_pcie_quirks_match[] = {
{ .soc_id = "mt7621", .revision = "E2" }
};
-static int mt7621_pci_probe(struct platform_device *pdev)
+static int mt7621_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
const struct soc_device_attribute *attr;
@@ -535,7 +495,7 @@ static int mt7621_pci_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pcie);
INIT_LIST_HEAD(&pcie->ports);
- attr = soc_device_match(mt7621_pci_quirks_match);
+ attr = soc_device_match(mt7621_pcie_quirks_match);
if (attr)
pcie->resets_inverted = true;
@@ -557,12 +517,6 @@ static int mt7621_pci_probe(struct platform_device *pdev)
goto remove_resets;
}
- err = setup_cm_memory_region(bridge);
- if (err) {
- dev_err(dev, "error setting up iocu mem regions\n");
- goto remove_resets;
- }
-
return mt7621_pcie_register_host(bridge);
remove_resets:
@@ -572,7 +526,7 @@ remove_resets:
return err;
}
-static int mt7621_pci_remove(struct platform_device *pdev)
+static int mt7621_pcie_remove(struct platform_device *pdev)
{
struct mt7621_pcie *pcie = platform_get_drvdata(pdev);
struct mt7621_pcie_port *port;
@@ -583,18 +537,20 @@ static int mt7621_pci_remove(struct platform_device *pdev)
return 0;
}
-static const struct of_device_id mt7621_pci_ids[] = {
+static const struct of_device_id mt7621_pcie_ids[] = {
{ .compatible = "mediatek,mt7621-pci" },
{},
};
-MODULE_DEVICE_TABLE(of, mt7621_pci_ids);
+MODULE_DEVICE_TABLE(of, mt7621_pcie_ids);
-static struct platform_driver mt7621_pci_driver = {
- .probe = mt7621_pci_probe,
- .remove = mt7621_pci_remove,
+static struct platform_driver mt7621_pcie_driver = {
+ .probe = mt7621_pcie_probe,
+ .remove = mt7621_pcie_remove,
.driver = {
.name = "mt7621-pci",
- .of_match_table = of_match_ptr(mt7621_pci_ids),
+ .of_match_table = mt7621_pcie_ids,
},
};
-builtin_platform_driver(mt7621_pci_driver);
+builtin_platform_driver(mt7621_pcie_driver);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/pcie-rcar-host.c b/drivers/pci/controller/pcie-rcar-host.c
index e12c2d8be05a..38b6e02edfa9 100644
--- a/drivers/pci/controller/pcie-rcar-host.c
+++ b/drivers/pci/controller/pcie-rcar-host.c
@@ -50,10 +50,10 @@ struct rcar_msi {
*/
static void __iomem *pcie_base;
/*
- * Static copy of bus clock pointer, so we can check whether the clock
- * is enabled or not.
+ * Static copy of PCIe device pointer, so we can check whether the
+ * device is runtime suspended or not.
*/
-static struct clk *pcie_bus_clk;
+static struct device *pcie_dev;
#endif
/* Structure representing the PCIe interface */
@@ -159,10 +159,8 @@ static int rcar_pcie_read_conf(struct pci_bus *bus, unsigned int devfn,
ret = rcar_pcie_config_access(host, RCAR_PCI_ACCESS_READ,
bus, devfn, where, val);
- if (ret != PCIBIOS_SUCCESSFUL) {
- *val = 0xffffffff;
+ if (ret != PCIBIOS_SUCCESSFUL)
return ret;
- }
if (size == 1)
*val = (*val >> (BITS_PER_BYTE * (where & 3))) & 0xff;
@@ -792,7 +790,7 @@ static int rcar_pcie_get_resources(struct rcar_pcie_host *host)
#ifdef CONFIG_ARM
/* Cache static copy for L1 link state fixup hook on aarch32 */
pcie_base = pcie->base;
- pcie_bus_clk = host->bus_clk;
+ pcie_dev = pcie->dev;
#endif
return 0;
@@ -1062,7 +1060,7 @@ static int rcar_pcie_aarch32_abort_handler(unsigned long addr,
spin_lock_irqsave(&pmsr_lock, flags);
- if (!pcie_base || !__clk_is_enabled(pcie_bus_clk)) {
+ if (!pcie_base || pm_runtime_suspended(pcie_dev)) {
ret = 1;
goto unlock_exit;
}
diff --git a/drivers/pci/controller/pcie-rockchip-host.c b/drivers/pci/controller/pcie-rockchip-host.c
index c52316d0bfd2..45a28880f322 100644
--- a/drivers/pci/controller/pcie-rockchip-host.c
+++ b/drivers/pci/controller/pcie-rockchip-host.c
@@ -221,10 +221,8 @@ static int rockchip_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
{
struct rockchip_pcie *rockchip = bus->sysdata;
- if (!rockchip_pcie_valid_device(rockchip, bus, PCI_SLOT(devfn))) {
- *val = 0xffffffff;
+ if (!rockchip_pcie_valid_device(rockchip, bus, PCI_SLOT(devfn)))
return PCIBIOS_DEVICE_NOT_FOUND;
- }
if (pci_is_root_bus(bus))
return rockchip_pcie_rd_own_conf(rockchip, where, size, val);
diff --git a/drivers/pci/controller/pcie-xilinx-cpm.c b/drivers/pci/controller/pcie-xilinx-cpm.c
index 95426df03200..c7cd44ed4dfc 100644
--- a/drivers/pci/controller/pcie-xilinx-cpm.c
+++ b/drivers/pci/controller/pcie-xilinx-cpm.c
@@ -99,10 +99,10 @@
#define XILINX_CPM_PCIE_REG_PSCR_LNKUP BIT(11)
/**
- * struct xilinx_cpm_pcie_port - PCIe port information
+ * struct xilinx_cpm_pcie - PCIe port information
+ * @dev: Device pointer
* @reg_base: Bridge Register Base
* @cpm_base: CPM System Level Control and Status Register(SLCR) Base
- * @dev: Device pointer
* @intx_domain: Legacy IRQ domain pointer
* @cpm_domain: CPM IRQ domain pointer
* @cfg: Holds mappings of config space window
@@ -110,10 +110,10 @@
* @irq: Error interrupt number
* @lock: lock protecting shared register access
*/
-struct xilinx_cpm_pcie_port {
+struct xilinx_cpm_pcie {
+ struct device *dev;
void __iomem *reg_base;
void __iomem *cpm_base;
- struct device *dev;
struct irq_domain *intx_domain;
struct irq_domain *cpm_domain;
struct pci_config_window *cfg;
@@ -122,24 +122,24 @@ struct xilinx_cpm_pcie_port {
raw_spinlock_t lock;
};
-static u32 pcie_read(struct xilinx_cpm_pcie_port *port, u32 reg)
+static u32 pcie_read(struct xilinx_cpm_pcie *port, u32 reg)
{
return readl_relaxed(port->reg_base + reg);
}
-static void pcie_write(struct xilinx_cpm_pcie_port *port,
+static void pcie_write(struct xilinx_cpm_pcie *port,
u32 val, u32 reg)
{
writel_relaxed(val, port->reg_base + reg);
}
-static bool cpm_pcie_link_up(struct xilinx_cpm_pcie_port *port)
+static bool cpm_pcie_link_up(struct xilinx_cpm_pcie *port)
{
return (pcie_read(port, XILINX_CPM_PCIE_REG_PSCR) &
XILINX_CPM_PCIE_REG_PSCR_LNKUP);
}
-static void cpm_pcie_clear_err_interrupts(struct xilinx_cpm_pcie_port *port)
+static void cpm_pcie_clear_err_interrupts(struct xilinx_cpm_pcie *port)
{
unsigned long val = pcie_read(port, XILINX_CPM_PCIE_REG_RPEFR);
@@ -153,7 +153,7 @@ static void cpm_pcie_clear_err_interrupts(struct xilinx_cpm_pcie_port *port)
static void xilinx_cpm_mask_leg_irq(struct irq_data *data)
{
- struct xilinx_cpm_pcie_port *port = irq_data_get_irq_chip_data(data);
+ struct xilinx_cpm_pcie *port = irq_data_get_irq_chip_data(data);
unsigned long flags;
u32 mask;
u32 val;
@@ -167,7 +167,7 @@ static void xilinx_cpm_mask_leg_irq(struct irq_data *data)
static void xilinx_cpm_unmask_leg_irq(struct irq_data *data)
{
- struct xilinx_cpm_pcie_port *port = irq_data_get_irq_chip_data(data);
+ struct xilinx_cpm_pcie *port = irq_data_get_irq_chip_data(data);
unsigned long flags;
u32 mask;
u32 val;
@@ -211,7 +211,7 @@ static const struct irq_domain_ops intx_domain_ops = {
static void xilinx_cpm_pcie_intx_flow(struct irq_desc *desc)
{
- struct xilinx_cpm_pcie_port *port = irq_desc_get_handler_data(desc);
+ struct xilinx_cpm_pcie *port = irq_desc_get_handler_data(desc);
struct irq_chip *chip = irq_desc_get_chip(desc);
unsigned long val;
int i;
@@ -229,7 +229,7 @@ static void xilinx_cpm_pcie_intx_flow(struct irq_desc *desc)
static void xilinx_cpm_mask_event_irq(struct irq_data *d)
{
- struct xilinx_cpm_pcie_port *port = irq_data_get_irq_chip_data(d);
+ struct xilinx_cpm_pcie *port = irq_data_get_irq_chip_data(d);
u32 val;
raw_spin_lock(&port->lock);
@@ -241,7 +241,7 @@ static void xilinx_cpm_mask_event_irq(struct irq_data *d)
static void xilinx_cpm_unmask_event_irq(struct irq_data *d)
{
- struct xilinx_cpm_pcie_port *port = irq_data_get_irq_chip_data(d);
+ struct xilinx_cpm_pcie *port = irq_data_get_irq_chip_data(d);
u32 val;
raw_spin_lock(&port->lock);
@@ -273,7 +273,7 @@ static const struct irq_domain_ops event_domain_ops = {
static void xilinx_cpm_pcie_event_flow(struct irq_desc *desc)
{
- struct xilinx_cpm_pcie_port *port = irq_desc_get_handler_data(desc);
+ struct xilinx_cpm_pcie *port = irq_desc_get_handler_data(desc);
struct irq_chip *chip = irq_desc_get_chip(desc);
unsigned long val;
int i;
@@ -327,7 +327,7 @@ static const struct {
static irqreturn_t xilinx_cpm_pcie_intr_handler(int irq, void *dev_id)
{
- struct xilinx_cpm_pcie_port *port = dev_id;
+ struct xilinx_cpm_pcie *port = dev_id;
struct device *dev = port->dev;
struct irq_data *d;
@@ -350,7 +350,7 @@ static irqreturn_t xilinx_cpm_pcie_intr_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static void xilinx_cpm_free_irq_domains(struct xilinx_cpm_pcie_port *port)
+static void xilinx_cpm_free_irq_domains(struct xilinx_cpm_pcie *port)
{
if (port->intx_domain) {
irq_domain_remove(port->intx_domain);
@@ -369,7 +369,7 @@ static void xilinx_cpm_free_irq_domains(struct xilinx_cpm_pcie_port *port)
*
* Return: '0' on success and error value on failure
*/
-static int xilinx_cpm_pcie_init_irq_domain(struct xilinx_cpm_pcie_port *port)
+static int xilinx_cpm_pcie_init_irq_domain(struct xilinx_cpm_pcie *port)
{
struct device *dev = port->dev;
struct device_node *node = dev->of_node;
@@ -410,7 +410,7 @@ out:
return -ENOMEM;
}
-static int xilinx_cpm_setup_irq(struct xilinx_cpm_pcie_port *port)
+static int xilinx_cpm_setup_irq(struct xilinx_cpm_pcie *port)
{
struct device *dev = port->dev;
struct platform_device *pdev = to_platform_device(dev);
@@ -462,7 +462,7 @@ static int xilinx_cpm_setup_irq(struct xilinx_cpm_pcie_port *port)
* xilinx_cpm_pcie_init_port - Initialize hardware
* @port: PCIe port information
*/
-static void xilinx_cpm_pcie_init_port(struct xilinx_cpm_pcie_port *port)
+static void xilinx_cpm_pcie_init_port(struct xilinx_cpm_pcie *port)
{
if (cpm_pcie_link_up(port))
dev_info(port->dev, "PCIe Link is UP\n");
@@ -497,7 +497,7 @@ static void xilinx_cpm_pcie_init_port(struct xilinx_cpm_pcie_port *port)
*
* Return: '0' on success and error value on failure
*/
-static int xilinx_cpm_pcie_parse_dt(struct xilinx_cpm_pcie_port *port,
+static int xilinx_cpm_pcie_parse_dt(struct xilinx_cpm_pcie *port,
struct resource *bus_range)
{
struct device *dev = port->dev;
@@ -523,7 +523,7 @@ static int xilinx_cpm_pcie_parse_dt(struct xilinx_cpm_pcie_port *port,
return 0;
}
-static void xilinx_cpm_free_interrupts(struct xilinx_cpm_pcie_port *port)
+static void xilinx_cpm_free_interrupts(struct xilinx_cpm_pcie *port)
{
irq_set_chained_handler_and_data(port->intx_irq, NULL, NULL);
irq_set_chained_handler_and_data(port->irq, NULL, NULL);
@@ -537,7 +537,7 @@ static void xilinx_cpm_free_interrupts(struct xilinx_cpm_pcie_port *port)
*/
static int xilinx_cpm_pcie_probe(struct platform_device *pdev)
{
- struct xilinx_cpm_pcie_port *port;
+ struct xilinx_cpm_pcie *port;
struct device *dev = &pdev->dev;
struct pci_host_bridge *bridge;
struct resource_entry *bus;
diff --git a/drivers/pci/controller/pcie-xilinx-nwl.c b/drivers/pci/controller/pcie-xilinx-nwl.c
index a72b4f9a2b00..40d070e54ad2 100644
--- a/drivers/pci/controller/pcie-xilinx-nwl.c
+++ b/drivers/pci/controller/pcie-xilinx-nwl.c
@@ -146,7 +146,7 @@
struct nwl_msi { /* MSI information */
struct irq_domain *msi_domain;
- unsigned long *bitmap;
+ DECLARE_BITMAP(bitmap, INT_PCI_MSI_NR);
struct irq_domain *dev_domain;
struct mutex lock; /* protect bitmap variable */
int irq_msi0;
@@ -335,12 +335,10 @@ static void nwl_pcie_leg_handler(struct irq_desc *desc)
static void nwl_pcie_handle_msi_irq(struct nwl_pcie *pcie, u32 status_reg)
{
- struct nwl_msi *msi;
+ struct nwl_msi *msi = &pcie->msi;
unsigned long status;
u32 bit;
- msi = &pcie->msi;
-
while ((status = nwl_bridge_readl(pcie, status_reg)) != 0) {
for_each_set_bit(bit, &status, 32) {
nwl_bridge_writel(pcie, 1 << bit, status_reg);
@@ -560,30 +558,21 @@ static int nwl_pcie_enable_msi(struct nwl_pcie *pcie)
struct nwl_msi *msi = &pcie->msi;
unsigned long base;
int ret;
- int size = BITS_TO_LONGS(INT_PCI_MSI_NR) * sizeof(long);
mutex_init(&msi->lock);
- msi->bitmap = kzalloc(size, GFP_KERNEL);
- if (!msi->bitmap)
- return -ENOMEM;
-
/* Get msi_1 IRQ number */
msi->irq_msi1 = platform_get_irq_byname(pdev, "msi1");
- if (msi->irq_msi1 < 0) {
- ret = -EINVAL;
- goto err;
- }
+ if (msi->irq_msi1 < 0)
+ return -EINVAL;
irq_set_chained_handler_and_data(msi->irq_msi1,
nwl_pcie_msi_handler_high, pcie);
/* Get msi_0 IRQ number */
msi->irq_msi0 = platform_get_irq_byname(pdev, "msi0");
- if (msi->irq_msi0 < 0) {
- ret = -EINVAL;
- goto err;
- }
+ if (msi->irq_msi0 < 0)
+ return -EINVAL;
irq_set_chained_handler_and_data(msi->irq_msi0,
nwl_pcie_msi_handler_low, pcie);
@@ -592,8 +581,7 @@ static int nwl_pcie_enable_msi(struct nwl_pcie *pcie)
ret = nwl_bridge_readl(pcie, I_MSII_CAPABILITIES) & MSII_PRESENT;
if (!ret) {
dev_err(dev, "MSI not present\n");
- ret = -EIO;
- goto err;
+ return -EIO;
}
/* Enable MSII */
@@ -632,10 +620,6 @@ static int nwl_pcie_enable_msi(struct nwl_pcie *pcie)
nwl_bridge_writel(pcie, MSGF_MSI_SR_LO_MASK, MSGF_MSI_MASK_LO);
return 0;
-err:
- kfree(msi->bitmap);
- msi->bitmap = NULL;
- return ret;
}
static int nwl_pcie_bridge_init(struct nwl_pcie *pcie)
diff --git a/drivers/pci/controller/pcie-xilinx.c b/drivers/pci/controller/pcie-xilinx.c
index aa9bdcebc838..cb6e9f7b0152 100644
--- a/drivers/pci/controller/pcie-xilinx.c
+++ b/drivers/pci/controller/pcie-xilinx.c
@@ -91,18 +91,18 @@
#define XILINX_NUM_MSI_IRQS 128
/**
- * struct xilinx_pcie_port - PCIe port information
- * @reg_base: IO Mapped Register Base
+ * struct xilinx_pcie - PCIe port information
* @dev: Device pointer
+ * @reg_base: IO Mapped Register Base
* @msi_map: Bitmap of allocated MSIs
* @map_lock: Mutex protecting the MSI allocation
* @msi_domain: MSI IRQ domain pointer
* @leg_domain: Legacy IRQ domain pointer
* @resources: Bus Resources
*/
-struct xilinx_pcie_port {
- void __iomem *reg_base;
+struct xilinx_pcie {
struct device *dev;
+ void __iomem *reg_base;
unsigned long msi_map[BITS_TO_LONGS(XILINX_NUM_MSI_IRQS)];
struct mutex map_lock;
struct irq_domain *msi_domain;
@@ -110,35 +110,35 @@ struct xilinx_pcie_port {
struct list_head resources;
};
-static inline u32 pcie_read(struct xilinx_pcie_port *port, u32 reg)
+static inline u32 pcie_read(struct xilinx_pcie *pcie, u32 reg)
{
- return readl(port->reg_base + reg);
+ return readl(pcie->reg_base + reg);
}
-static inline void pcie_write(struct xilinx_pcie_port *port, u32 val, u32 reg)
+static inline void pcie_write(struct xilinx_pcie *pcie, u32 val, u32 reg)
{
- writel(val, port->reg_base + reg);
+ writel(val, pcie->reg_base + reg);
}
-static inline bool xilinx_pcie_link_up(struct xilinx_pcie_port *port)
+static inline bool xilinx_pcie_link_up(struct xilinx_pcie *pcie)
{
- return (pcie_read(port, XILINX_PCIE_REG_PSCR) &
+ return (pcie_read(pcie, XILINX_PCIE_REG_PSCR) &
XILINX_PCIE_REG_PSCR_LNKUP) ? 1 : 0;
}
/**
* xilinx_pcie_clear_err_interrupts - Clear Error Interrupts
- * @port: PCIe port information
+ * @pcie: PCIe port information
*/
-static void xilinx_pcie_clear_err_interrupts(struct xilinx_pcie_port *port)
+static void xilinx_pcie_clear_err_interrupts(struct xilinx_pcie *pcie)
{
- struct device *dev = port->dev;
- unsigned long val = pcie_read(port, XILINX_PCIE_REG_RPEFR);
+ struct device *dev = pcie->dev;
+ unsigned long val = pcie_read(pcie, XILINX_PCIE_REG_RPEFR);
if (val & XILINX_PCIE_RPEFR_ERR_VALID) {
dev_dbg(dev, "Requester ID %lu\n",
val & XILINX_PCIE_RPEFR_REQ_ID);
- pcie_write(port, XILINX_PCIE_RPEFR_ALL_MASK,
+ pcie_write(pcie, XILINX_PCIE_RPEFR_ALL_MASK,
XILINX_PCIE_REG_RPEFR);
}
}
@@ -152,11 +152,11 @@ static void xilinx_pcie_clear_err_interrupts(struct xilinx_pcie_port *port)
*/
static bool xilinx_pcie_valid_device(struct pci_bus *bus, unsigned int devfn)
{
- struct xilinx_pcie_port *port = bus->sysdata;
+ struct xilinx_pcie *pcie = bus->sysdata;
- /* Check if link is up when trying to access downstream ports */
+ /* Check if link is up when trying to access downstream pcie ports */
if (!pci_is_root_bus(bus)) {
- if (!xilinx_pcie_link_up(port))
+ if (!xilinx_pcie_link_up(pcie))
return false;
} else if (devfn > 0) {
/* Only one device down on each root port */
@@ -177,12 +177,12 @@ static bool xilinx_pcie_valid_device(struct pci_bus *bus, unsigned int devfn)
static void __iomem *xilinx_pcie_map_bus(struct pci_bus *bus,
unsigned int devfn, int where)
{
- struct xilinx_pcie_port *port = bus->sysdata;
+ struct xilinx_pcie *pcie = bus->sysdata;
if (!xilinx_pcie_valid_device(bus, devfn))
return NULL;
- return port->reg_base + PCIE_ECAM_OFFSET(bus->number, devfn, where);
+ return pcie->reg_base + PCIE_ECAM_OFFSET(bus->number, devfn, where);
}
/* PCIe operations */
@@ -215,7 +215,7 @@ static int xilinx_msi_set_affinity(struct irq_data *d, const struct cpumask *mas
static void xilinx_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
{
- struct xilinx_pcie_port *pcie = irq_data_get_irq_chip_data(data);
+ struct xilinx_pcie *pcie = irq_data_get_irq_chip_data(data);
phys_addr_t pa = ALIGN_DOWN(virt_to_phys(pcie), SZ_4K);
msg->address_lo = lower_32_bits(pa);
@@ -232,14 +232,14 @@ static struct irq_chip xilinx_msi_bottom_chip = {
static int xilinx_msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
unsigned int nr_irqs, void *args)
{
- struct xilinx_pcie_port *port = domain->host_data;
+ struct xilinx_pcie *pcie = domain->host_data;
int hwirq, i;
- mutex_lock(&port->map_lock);
+ mutex_lock(&pcie->map_lock);
- hwirq = bitmap_find_free_region(port->msi_map, XILINX_NUM_MSI_IRQS, order_base_2(nr_irqs));
+ hwirq = bitmap_find_free_region(pcie->msi_map, XILINX_NUM_MSI_IRQS, order_base_2(nr_irqs));
- mutex_unlock(&port->map_lock);
+ mutex_unlock(&pcie->map_lock);
if (hwirq < 0)
return -ENOSPC;
@@ -256,13 +256,13 @@ static void xilinx_msi_domain_free(struct irq_domain *domain, unsigned int virq,
unsigned int nr_irqs)
{
struct irq_data *d = irq_domain_get_irq_data(domain, virq);
- struct xilinx_pcie_port *port = domain->host_data;
+ struct xilinx_pcie *pcie = domain->host_data;
- mutex_lock(&port->map_lock);
+ mutex_lock(&pcie->map_lock);
- bitmap_release_region(port->msi_map, d->hwirq, order_base_2(nr_irqs));
+ bitmap_release_region(pcie->msi_map, d->hwirq, order_base_2(nr_irqs));
- mutex_unlock(&port->map_lock);
+ mutex_unlock(&pcie->map_lock);
}
static const struct irq_domain_ops xilinx_msi_domain_ops = {
@@ -275,7 +275,7 @@ static struct msi_domain_info xilinx_msi_info = {
.chip = &xilinx_msi_top_chip,
};
-static int xilinx_allocate_msi_domains(struct xilinx_pcie_port *pcie)
+static int xilinx_allocate_msi_domains(struct xilinx_pcie *pcie)
{
struct fwnode_handle *fwnode = dev_fwnode(pcie->dev);
struct irq_domain *parent;
@@ -298,7 +298,7 @@ static int xilinx_allocate_msi_domains(struct xilinx_pcie_port *pcie)
return 0;
}
-static void xilinx_free_msi_domains(struct xilinx_pcie_port *pcie)
+static void xilinx_free_msi_domains(struct xilinx_pcie *pcie)
{
struct irq_domain *parent = pcie->msi_domain->parent;
@@ -342,13 +342,13 @@ static const struct irq_domain_ops intx_domain_ops = {
*/
static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data)
{
- struct xilinx_pcie_port *port = (struct xilinx_pcie_port *)data;
- struct device *dev = port->dev;
+ struct xilinx_pcie *pcie = (struct xilinx_pcie *)data;
+ struct device *dev = pcie->dev;
u32 val, mask, status;
/* Read interrupt decode and mask registers */
- val = pcie_read(port, XILINX_PCIE_REG_IDR);
- mask = pcie_read(port, XILINX_PCIE_REG_IMR);
+ val = pcie_read(pcie, XILINX_PCIE_REG_IDR);
+ mask = pcie_read(pcie, XILINX_PCIE_REG_IMR);
status = val & mask;
if (!status)
@@ -371,23 +371,23 @@ static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data)
if (status & XILINX_PCIE_INTR_CORRECTABLE) {
dev_warn(dev, "Correctable error message\n");
- xilinx_pcie_clear_err_interrupts(port);
+ xilinx_pcie_clear_err_interrupts(pcie);
}
if (status & XILINX_PCIE_INTR_NONFATAL) {
dev_warn(dev, "Non fatal error message\n");
- xilinx_pcie_clear_err_interrupts(port);
+ xilinx_pcie_clear_err_interrupts(pcie);
}
if (status & XILINX_PCIE_INTR_FATAL) {
dev_warn(dev, "Fatal error message\n");
- xilinx_pcie_clear_err_interrupts(port);
+ xilinx_pcie_clear_err_interrupts(pcie);
}
if (status & (XILINX_PCIE_INTR_INTX | XILINX_PCIE_INTR_MSI)) {
struct irq_domain *domain;
- val = pcie_read(port, XILINX_PCIE_REG_RPIFR1);
+ val = pcie_read(pcie, XILINX_PCIE_REG_RPIFR1);
/* Check whether interrupt valid */
if (!(val & XILINX_PCIE_RPIFR1_INTR_VALID)) {
@@ -397,17 +397,17 @@ static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data)
/* Decode the IRQ number */
if (val & XILINX_PCIE_RPIFR1_MSI_INTR) {
- val = pcie_read(port, XILINX_PCIE_REG_RPIFR2) &
+ val = pcie_read(pcie, XILINX_PCIE_REG_RPIFR2) &
XILINX_PCIE_RPIFR2_MSG_DATA;
- domain = port->msi_domain->parent;
+ domain = pcie->msi_domain->parent;
} else {
val = (val & XILINX_PCIE_RPIFR1_INTR_MASK) >>
XILINX_PCIE_RPIFR1_INTR_SHIFT;
- domain = port->leg_domain;
+ domain = pcie->leg_domain;
}
/* Clear interrupt FIFO register 1 */
- pcie_write(port, XILINX_PCIE_RPIFR1_ALL_MASK,
+ pcie_write(pcie, XILINX_PCIE_RPIFR1_ALL_MASK,
XILINX_PCIE_REG_RPIFR1);
generic_handle_domain_irq(domain, val);
@@ -442,20 +442,20 @@ static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data)
error:
/* Clear the Interrupt Decode register */
- pcie_write(port, status, XILINX_PCIE_REG_IDR);
+ pcie_write(pcie, status, XILINX_PCIE_REG_IDR);
return IRQ_HANDLED;
}
/**
* xilinx_pcie_init_irq_domain - Initialize IRQ domain
- * @port: PCIe port information
+ * @pcie: PCIe port information
*
* Return: '0' on success and error value on failure
*/
-static int xilinx_pcie_init_irq_domain(struct xilinx_pcie_port *port)
+static int xilinx_pcie_init_irq_domain(struct xilinx_pcie *pcie)
{
- struct device *dev = port->dev;
+ struct device *dev = pcie->dev;
struct device_node *pcie_intc_node;
int ret;
@@ -466,25 +466,25 @@ static int xilinx_pcie_init_irq_domain(struct xilinx_pcie_port *port)
return -ENODEV;
}
- port->leg_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
+ pcie->leg_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
&intx_domain_ops,
- port);
+ pcie);
of_node_put(pcie_intc_node);
- if (!port->leg_domain) {
+ if (!pcie->leg_domain) {
dev_err(dev, "Failed to get a INTx IRQ domain\n");
return -ENODEV;
}
/* Setup MSI */
if (IS_ENABLED(CONFIG_PCI_MSI)) {
- phys_addr_t pa = ALIGN_DOWN(virt_to_phys(port), SZ_4K);
+ phys_addr_t pa = ALIGN_DOWN(virt_to_phys(pcie), SZ_4K);
- ret = xilinx_allocate_msi_domains(port);
+ ret = xilinx_allocate_msi_domains(pcie);
if (ret)
return ret;
- pcie_write(port, upper_32_bits(pa), XILINX_PCIE_REG_MSIBASE1);
- pcie_write(port, lower_32_bits(pa), XILINX_PCIE_REG_MSIBASE2);
+ pcie_write(pcie, upper_32_bits(pa), XILINX_PCIE_REG_MSIBASE1);
+ pcie_write(pcie, lower_32_bits(pa), XILINX_PCIE_REG_MSIBASE2);
}
return 0;
@@ -492,44 +492,44 @@ static int xilinx_pcie_init_irq_domain(struct xilinx_pcie_port *port)
/**
* xilinx_pcie_init_port - Initialize hardware
- * @port: PCIe port information
+ * @pcie: PCIe port information
*/
-static void xilinx_pcie_init_port(struct xilinx_pcie_port *port)
+static void xilinx_pcie_init_port(struct xilinx_pcie *pcie)
{
- struct device *dev = port->dev;
+ struct device *dev = pcie->dev;
- if (xilinx_pcie_link_up(port))
+ if (xilinx_pcie_link_up(pcie))
dev_info(dev, "PCIe Link is UP\n");
else
dev_info(dev, "PCIe Link is DOWN\n");
/* Disable all interrupts */
- pcie_write(port, ~XILINX_PCIE_IDR_ALL_MASK,
+ pcie_write(pcie, ~XILINX_PCIE_IDR_ALL_MASK,
XILINX_PCIE_REG_IMR);
/* Clear pending interrupts */
- pcie_write(port, pcie_read(port, XILINX_PCIE_REG_IDR) &
+ pcie_write(pcie, pcie_read(pcie, XILINX_PCIE_REG_IDR) &
XILINX_PCIE_IMR_ALL_MASK,
XILINX_PCIE_REG_IDR);
/* Enable all interrupts we handle */
- pcie_write(port, XILINX_PCIE_IMR_ENABLE_MASK, XILINX_PCIE_REG_IMR);
+ pcie_write(pcie, XILINX_PCIE_IMR_ENABLE_MASK, XILINX_PCIE_REG_IMR);
/* Enable the Bridge enable bit */
- pcie_write(port, pcie_read(port, XILINX_PCIE_REG_RPSC) |
+ pcie_write(pcie, pcie_read(pcie, XILINX_PCIE_REG_RPSC) |
XILINX_PCIE_REG_RPSC_BEN,
XILINX_PCIE_REG_RPSC);
}
/**
* xilinx_pcie_parse_dt - Parse Device tree
- * @port: PCIe port information
+ * @pcie: PCIe port information
*
* Return: '0' on success and error value on failure
*/
-static int xilinx_pcie_parse_dt(struct xilinx_pcie_port *port)
+static int xilinx_pcie_parse_dt(struct xilinx_pcie *pcie)
{
- struct device *dev = port->dev;
+ struct device *dev = pcie->dev;
struct device_node *node = dev->of_node;
struct resource regs;
unsigned int irq;
@@ -541,14 +541,14 @@ static int xilinx_pcie_parse_dt(struct xilinx_pcie_port *port)
return err;
}
- port->reg_base = devm_pci_remap_cfg_resource(dev, &regs);
- if (IS_ERR(port->reg_base))
- return PTR_ERR(port->reg_base);
+ pcie->reg_base = devm_pci_remap_cfg_resource(dev, &regs);
+ if (IS_ERR(pcie->reg_base))
+ return PTR_ERR(pcie->reg_base);
irq = irq_of_parse_and_map(node, 0);
err = devm_request_irq(dev, irq, xilinx_pcie_intr_handler,
IRQF_SHARED | IRQF_NO_THREAD,
- "xilinx-pcie", port);
+ "xilinx-pcie", pcie);
if (err) {
dev_err(dev, "unable to request irq %d\n", irq);
return err;
@@ -566,41 +566,41 @@ static int xilinx_pcie_parse_dt(struct xilinx_pcie_port *port)
static int xilinx_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct xilinx_pcie_port *port;
+ struct xilinx_pcie *pcie;
struct pci_host_bridge *bridge;
int err;
if (!dev->of_node)
return -ENODEV;
- bridge = devm_pci_alloc_host_bridge(dev, sizeof(*port));
+ bridge = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
if (!bridge)
return -ENODEV;
- port = pci_host_bridge_priv(bridge);
- mutex_init(&port->map_lock);
- port->dev = dev;
+ pcie = pci_host_bridge_priv(bridge);
+ mutex_init(&pcie->map_lock);
+ pcie->dev = dev;
- err = xilinx_pcie_parse_dt(port);
+ err = xilinx_pcie_parse_dt(pcie);
if (err) {
dev_err(dev, "Parsing DT failed\n");
return err;
}
- xilinx_pcie_init_port(port);
+ xilinx_pcie_init_port(pcie);
- err = xilinx_pcie_init_irq_domain(port);
+ err = xilinx_pcie_init_irq_domain(pcie);
if (err) {
dev_err(dev, "Failed creating IRQ Domain\n");
return err;
}
- bridge->sysdata = port;
+ bridge->sysdata = pcie;
bridge->ops = &xilinx_pcie_ops;
err = pci_host_probe(bridge);
if (err)
- xilinx_free_msi_domains(port);
+ xilinx_free_msi_domains(pcie);
return err;
}
diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c
index a45e8e59d3d4..cc166c683638 100644
--- a/drivers/pci/controller/vmd.c
+++ b/drivers/pci/controller/vmd.c
@@ -501,6 +501,40 @@ static inline void vmd_acpi_begin(void) { }
static inline void vmd_acpi_end(void) { }
#endif /* CONFIG_ACPI */
+static void vmd_domain_reset(struct vmd_dev *vmd)
+{
+ u16 bus, max_buses = resource_size(&vmd->resources[0]);
+ u8 dev, functions, fn, hdr_type;
+ char __iomem *base;
+
+ for (bus = 0; bus < max_buses; bus++) {
+ for (dev = 0; dev < 32; dev++) {
+ base = vmd->cfgbar + PCIE_ECAM_OFFSET(bus,
+ PCI_DEVFN(dev, 0), 0);
+
+ hdr_type = readb(base + PCI_HEADER_TYPE) &
+ PCI_HEADER_TYPE_MASK;
+
+ functions = (hdr_type & 0x80) ? 8 : 1;
+ for (fn = 0; fn < functions; fn++) {
+ base = vmd->cfgbar + PCIE_ECAM_OFFSET(bus,
+ PCI_DEVFN(dev, fn), 0);
+
+ hdr_type = readb(base + PCI_HEADER_TYPE) &
+ PCI_HEADER_TYPE_MASK;
+
+ if (hdr_type != PCI_HEADER_TYPE_BRIDGE ||
+ (readw(base + PCI_CLASS_DEVICE) !=
+ PCI_CLASS_BRIDGE_PCI))
+ continue;
+
+ memset_io(base + PCI_IO_BASE, 0,
+ PCI_ROM_ADDRESS1 - PCI_IO_BASE);
+ }
+ }
+ }
+}
+
static void vmd_attach_resources(struct vmd_dev *vmd)
{
vmd->dev->resource[VMD_MEMBAR1].child = &vmd->resources[1];
@@ -541,7 +575,7 @@ static int vmd_get_phys_offsets(struct vmd_dev *vmd, bool native_hint,
int ret;
ret = pci_read_config_dword(dev, PCI_REG_VMLOCK, &vmlock);
- if (ret || vmlock == ~0)
+ if (ret || PCI_POSSIBLE_ERROR(vmlock))
return -ENODEV;
if (MB2_SHADOW_EN(vmlock)) {
@@ -661,6 +695,21 @@ static int vmd_alloc_irqs(struct vmd_dev *vmd)
return 0;
}
+/*
+ * Since VMD is an aperture to regular PCIe root ports, only allow it to
+ * control features that the OS is allowed to control on the physical PCI bus.
+ */
+static void vmd_copy_host_bridge_flags(struct pci_host_bridge *root_bridge,
+ struct pci_host_bridge *vmd_bridge)
+{
+ vmd_bridge->native_pcie_hotplug = root_bridge->native_pcie_hotplug;
+ vmd_bridge->native_shpc_hotplug = root_bridge->native_shpc_hotplug;
+ vmd_bridge->native_aer = root_bridge->native_aer;
+ vmd_bridge->native_pme = root_bridge->native_pme;
+ vmd_bridge->native_ltr = root_bridge->native_ltr;
+ vmd_bridge->native_dpc = root_bridge->native_dpc;
+}
+
static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
{
struct pci_sysdata *sd = &vmd->sysdata;
@@ -798,6 +847,9 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
return -ENODEV;
}
+ vmd_copy_host_bridge_flags(pci_find_host_bridge(vmd->dev->bus),
+ to_pci_host_bridge(vmd->bus->bridge));
+
vmd_attach_resources(vmd);
if (vmd->irq_domain)
dev_set_msi_domain(&vmd->bus->dev, vmd->irq_domain);
@@ -805,6 +857,9 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
vmd_acpi_begin();
pci_scan_child_bus(vmd->bus);
+ vmd_domain_reset(vmd);
+ list_for_each_entry(child, &vmd->bus->children, node)
+ pci_reset_bus(child->self);
pci_assign_unassigned_bus_resources(vmd->bus);
/*
@@ -953,6 +1008,10 @@ static const struct pci_device_id vmd_ids[] = {
.driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP |
VMD_FEAT_HAS_BUS_RESTRICTIONS |
VMD_FEAT_OFFSET_FIRST_VECTOR,},
+ {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa77f),
+ .driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP |
+ VMD_FEAT_HAS_BUS_RESTRICTIONS |
+ VMD_FEAT_OFFSET_FIRST_VECTOR,},
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_9A0B),
.driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP |
VMD_FEAT_HAS_BUS_RESTRICTIONS |
diff --git a/drivers/pci/endpoint/functions/pci-epf-ntb.c b/drivers/pci/endpoint/functions/pci-epf-ntb.c
index 5a03401f4571..9a00448c7e61 100644
--- a/drivers/pci/endpoint/functions/pci-epf-ntb.c
+++ b/drivers/pci/endpoint/functions/pci-epf-ntb.c
@@ -1262,7 +1262,7 @@ static void epf_ntb_db_mw_bar_cleanup(struct epf_ntb *ntb,
}
/**
- * epf_ntb_configure_interrupt() - Configure MSI/MSI-X capaiblity
+ * epf_ntb_configure_interrupt() - Configure MSI/MSI-X capability
* @ntb: NTB device that facilitates communication between HOST1 and HOST2
* @type: PRIMARY interface or SECONDARY interface
*
diff --git a/drivers/pci/endpoint/pci-epc-core.c b/drivers/pci/endpoint/pci-epc-core.c
index 38621558d397..3bc9273d0a08 100644
--- a/drivers/pci/endpoint/pci-epc-core.c
+++ b/drivers/pci/endpoint/pci-epc-core.c
@@ -334,7 +334,7 @@ int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no, u8 interrupts)
u8 encode_int;
if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions ||
- interrupts > 32)
+ interrupts < 1 || interrupts > 32)
return -EINVAL;
if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no]))
diff --git a/drivers/pci/hotplug/TODO b/drivers/pci/hotplug/TODO
index cc6194aa24c1..88f217c82b4f 100644
--- a/drivers/pci/hotplug/TODO
+++ b/drivers/pci/hotplug/TODO
@@ -30,11 +30,6 @@ ibmphp:
or ibmphp should store a pointer to its bus in struct slot. Probably the
former.
-* The functions get_max_adapter_speed() and get_bus_name() are commented out.
- Can they be deleted? There are also forward declarations at the top of
- ibmphp_core.c as well as pointers in ibmphp_hotplug_slot_ops, likewise
- commented out.
-
* ibmphp_init_devno() takes a struct slot **, it could instead take a
struct slot *.
diff --git a/drivers/pci/hotplug/cpqphp_ctrl.c b/drivers/pci/hotplug/cpqphp_ctrl.c
index ed7b58eb64d2..93fd2a621822 100644
--- a/drivers/pci/hotplug/cpqphp_ctrl.c
+++ b/drivers/pci/hotplug/cpqphp_ctrl.c
@@ -2273,7 +2273,7 @@ static u32 configure_new_device(struct controller *ctrl, struct pci_func *func
while ((function < max_functions) && (!stop_it)) {
pci_bus_read_config_dword(ctrl->pci_bus, PCI_DEVFN(func->device, function), 0x00, &ID);
- if (ID == 0xFFFFFFFF) {
+ if (PCI_POSSIBLE_ERROR(ID)) {
function++;
} else {
/* Setup slot structure. */
@@ -2517,7 +2517,7 @@ static int configure_new_function(struct controller *ctrl, struct pci_func *func
pci_bus_read_config_dword(pci_bus, PCI_DEVFN(device, 0), 0x00, &ID);
pci_bus->number = func->bus;
- if (ID != 0xFFFFFFFF) { /* device present */
+ if (!PCI_POSSIBLE_ERROR(ID)) { /* device present */
/* Setup slot structure. */
new_slot = cpqhp_slot_create(hold_bus_node->base);
diff --git a/drivers/pci/hotplug/ibmphp_core.c b/drivers/pci/hotplug/ibmphp_core.c
index 17124254d897..197997e264a2 100644
--- a/drivers/pci/hotplug/ibmphp_core.c
+++ b/drivers/pci/hotplug/ibmphp_core.c
@@ -50,14 +50,6 @@ static int irqs[16]; /* PIC mode IRQs we're using so far (in case MPS
static int init_flag;
-/*
-static int get_max_adapter_speed_1 (struct hotplug_slot *, u8 *, u8);
-
-static inline int get_max_adapter_speed (struct hotplug_slot *hs, u8 *value)
-{
- return get_max_adapter_speed_1 (hs, value, 1);
-}
-*/
static inline int get_cur_bus_info(struct slot **sl)
{
int rc = 1;
@@ -401,69 +393,6 @@ static int get_max_bus_speed(struct slot *slot)
return rc;
}
-/*
-static int get_max_adapter_speed_1(struct hotplug_slot *hotplug_slot, u8 *value, u8 flag)
-{
- int rc = -ENODEV;
- struct slot *pslot;
- struct slot myslot;
-
- debug("get_max_adapter_speed_1 - Entry hotplug_slot[%lx] pvalue[%lx]\n",
- (ulong)hotplug_slot, (ulong) value);
-
- if (flag)
- ibmphp_lock_operations();
-
- if (hotplug_slot && value) {
- pslot = hotplug_slot->private;
- if (pslot) {
- memcpy(&myslot, pslot, sizeof(struct slot));
- rc = ibmphp_hpc_readslot(pslot, READ_SLOTSTATUS,
- &(myslot.status));
-
- if (!(SLOT_LATCH (myslot.status)) &&
- (SLOT_PRESENT (myslot.status))) {
- rc = ibmphp_hpc_readslot(pslot,
- READ_EXTSLOTSTATUS,
- &(myslot.ext_status));
- if (!rc)
- *value = SLOT_SPEED(myslot.ext_status);
- } else
- *value = MAX_ADAPTER_NONE;
- }
- }
-
- if (flag)
- ibmphp_unlock_operations();
-
- debug("get_max_adapter_speed_1 - Exit rc[%d] value[%x]\n", rc, *value);
- return rc;
-}
-
-static int get_bus_name(struct hotplug_slot *hotplug_slot, char *value)
-{
- int rc = -ENODEV;
- struct slot *pslot = NULL;
-
- debug("get_bus_name - Entry hotplug_slot[%lx]\n", (ulong)hotplug_slot);
-
- ibmphp_lock_operations();
-
- if (hotplug_slot) {
- pslot = hotplug_slot->private;
- if (pslot) {
- rc = 0;
- snprintf(value, 100, "Bus %x", pslot->bus);
- }
- } else
- rc = -ENODEV;
-
- ibmphp_unlock_operations();
- debug("get_bus_name - Exit rc[%d] value[%x]\n", rc, *value);
- return rc;
-}
-*/
-
/****************************************************************************
* This routine will initialize the ops data structure used in the validate
* function. It will also power off empty slots that are powered on since BIOS
@@ -1231,9 +1160,6 @@ const struct hotplug_slot_ops ibmphp_hotplug_slot_ops = {
.get_attention_status = get_attention_status,
.get_latch_status = get_latch_status,
.get_adapter_status = get_adapter_present,
-/* .get_max_adapter_speed = get_max_adapter_speed,
- .get_bus_name_status = get_bus_name,
-*/
};
static void ibmphp_unload(void)
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index 918dccbc74b6..e0a614acee05 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -75,6 +75,8 @@ extern int pciehp_poll_time;
* @reset_lock: prevents access to the Data Link Layer Link Active bit in the
* Link Status register and to the Presence Detect State bit in the Slot
* Status register during a slot reset which may cause them to flap
+ * @depth: Number of additional hotplug ports in the path to the root bus,
+ * used as lock subclass for @reset_lock
* @ist_running: flag to keep user request waiting while IRQ thread is running
* @request_result: result of last user request submitted to the IRQ thread
* @requester: wait queue to wake up on completion of user request,
@@ -106,6 +108,7 @@ struct controller {
struct hotplug_slot hotplug_slot; /* hotplug core interface */
struct rw_semaphore reset_lock;
+ unsigned int depth;
unsigned int ist_running;
int request_result;
wait_queue_head_t requester;
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index f34114d45259..4042d87d539d 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -166,7 +166,7 @@ static void pciehp_check_presence(struct controller *ctrl)
{
int occupied;
- down_read(&ctrl->reset_lock);
+ down_read_nested(&ctrl->reset_lock, ctrl->depth);
mutex_lock(&ctrl->state_lock);
occupied = pciehp_card_present_or_link_active(ctrl);
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 83a0fa119cae..1c1ebf3dad43 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -89,7 +89,7 @@ static int pcie_poll_cmd(struct controller *ctrl, int timeout)
do {
pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
- if (slot_status == (u16) ~0) {
+ if (PCI_POSSIBLE_ERROR(slot_status)) {
ctrl_info(ctrl, "%s: no response from device\n",
__func__);
return 0;
@@ -165,7 +165,7 @@ static void pcie_do_write_cmd(struct controller *ctrl, u16 cmd,
pcie_wait_cmd(ctrl);
pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &slot_ctrl);
- if (slot_ctrl == (u16) ~0) {
+ if (PCI_POSSIBLE_ERROR(slot_ctrl)) {
ctrl_info(ctrl, "%s: no response from device\n", __func__);
goto out;
}
@@ -236,7 +236,7 @@ int pciehp_check_link_active(struct controller *ctrl)
int ret;
ret = pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
- if (ret == PCIBIOS_DEVICE_NOT_FOUND || lnk_status == (u16)~0)
+ if (ret == PCIBIOS_DEVICE_NOT_FOUND || PCI_POSSIBLE_ERROR(lnk_status))
return -ENODEV;
ret = !!(lnk_status & PCI_EXP_LNKSTA_DLLLA);
@@ -443,7 +443,7 @@ int pciehp_card_present(struct controller *ctrl)
int ret;
ret = pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
- if (ret == PCIBIOS_DEVICE_NOT_FOUND || slot_status == (u16)~0)
+ if (ret == PCIBIOS_DEVICE_NOT_FOUND || PCI_POSSIBLE_ERROR(slot_status))
return -ENODEV;
return !!(slot_status & PCI_EXP_SLTSTA_PDS);
@@ -583,7 +583,7 @@ static void pciehp_ignore_dpc_link_change(struct controller *ctrl,
* the corresponding link change may have been ignored above.
* Synthesize it to ensure that it is acted on.
*/
- down_read(&ctrl->reset_lock);
+ down_read_nested(&ctrl->reset_lock, ctrl->depth);
if (!pciehp_check_link_active(ctrl))
pciehp_request(ctrl, PCI_EXP_SLTSTA_DLLSC);
up_read(&ctrl->reset_lock);
@@ -621,7 +621,7 @@ static irqreturn_t pciehp_isr(int irq, void *dev_id)
read_status:
pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &status);
- if (status == (u16) ~0) {
+ if (PCI_POSSIBLE_ERROR(status)) {
ctrl_info(ctrl, "%s: no response from device\n", __func__);
if (parent)
pm_runtime_put(parent);
@@ -642,6 +642,8 @@ read_status:
*/
if (ctrl->power_fault_detected)
status &= ~PCI_EXP_SLTSTA_PFD;
+ else if (status & PCI_EXP_SLTSTA_PFD)
+ ctrl->power_fault_detected = true;
events |= status;
if (!events) {
@@ -651,7 +653,7 @@ read_status:
}
if (status) {
- pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, events);
+ pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, status);
/*
* In MSI mode, all event bits must be zero before the port
@@ -725,8 +727,7 @@ static irqreturn_t pciehp_ist(int irq, void *dev_id)
}
/* Check Power Fault Detected */
- if ((events & PCI_EXP_SLTSTA_PFD) && !ctrl->power_fault_detected) {
- ctrl->power_fault_detected = 1;
+ if (events & PCI_EXP_SLTSTA_PFD) {
ctrl_err(ctrl, "Slot(%s): Power fault\n", slot_name(ctrl));
pciehp_set_indicators(ctrl, PCI_EXP_SLTCTL_PWR_IND_OFF,
PCI_EXP_SLTCTL_ATTN_IND_ON);
@@ -746,7 +747,7 @@ static irqreturn_t pciehp_ist(int irq, void *dev_id)
* Disable requests have higher priority than Presence Detect Changed
* or Data Link Layer State Changed events.
*/
- down_read(&ctrl->reset_lock);
+ down_read_nested(&ctrl->reset_lock, ctrl->depth);
if (events & DISABLE_SLOT)
pciehp_handle_disable_request(ctrl);
else if (events & (PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_DLLSC))
@@ -906,7 +907,7 @@ int pciehp_reset_slot(struct hotplug_slot *hotplug_slot, bool probe)
if (probe)
return 0;
- down_write(&ctrl->reset_lock);
+ down_write_nested(&ctrl->reset_lock, ctrl->depth);
if (!ATTN_BUTTN(ctrl)) {
ctrl_mask |= PCI_EXP_SLTCTL_PDCE;
@@ -962,6 +963,20 @@ static inline void dbg_ctrl(struct controller *ctrl)
#define FLAG(x, y) (((x) & (y)) ? '+' : '-')
+static inline int pcie_hotplug_depth(struct pci_dev *dev)
+{
+ struct pci_bus *bus = dev->bus;
+ int depth = 0;
+
+ while (bus->parent) {
+ bus = bus->parent;
+ if (bus->self && bus->self->is_hotplug_bridge)
+ depth++;
+ }
+
+ return depth;
+}
+
struct controller *pcie_init(struct pcie_device *dev)
{
struct controller *ctrl;
@@ -975,6 +990,7 @@ struct controller *pcie_init(struct pcie_device *dev)
return NULL;
ctrl->pcie = dev;
+ ctrl->depth = pcie_hotplug_depth(dev->port);
pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &slot_cap);
if (pdev->hotplug_user_indicators)
diff --git a/drivers/pci/msi/Makefile b/drivers/pci/msi/Makefile
new file mode 100644
index 000000000000..93ef7b9e404d
--- /dev/null
+++ b/drivers/pci/msi/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the PCI/MSI
+obj-$(CONFIG_PCI) += pcidev_msi.o
+obj-$(CONFIG_PCI_MSI) += msi.o
+obj-$(CONFIG_PCI_MSI_IRQ_DOMAIN) += irqdomain.o
+obj-$(CONFIG_PCI_MSI_ARCH_FALLBACKS) += legacy.o
diff --git a/drivers/pci/msi/irqdomain.c b/drivers/pci/msi/irqdomain.c
new file mode 100644
index 000000000000..e9cf318e6670
--- /dev/null
+++ b/drivers/pci/msi/irqdomain.c
@@ -0,0 +1,280 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCI Message Signaled Interrupt (MSI) - irqdomain support
+ */
+#include <linux/acpi_iort.h>
+#include <linux/irqdomain.h>
+#include <linux/of_irq.h>
+
+#include "msi.h"
+
+int pci_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
+{
+ struct irq_domain *domain;
+
+ domain = dev_get_msi_domain(&dev->dev);
+ if (domain && irq_domain_is_hierarchy(domain))
+ return msi_domain_alloc_irqs_descs_locked(domain, &dev->dev, nvec);
+
+ return pci_msi_legacy_setup_msi_irqs(dev, nvec, type);
+}
+
+void pci_msi_teardown_msi_irqs(struct pci_dev *dev)
+{
+ struct irq_domain *domain;
+
+ domain = dev_get_msi_domain(&dev->dev);
+ if (domain && irq_domain_is_hierarchy(domain))
+ msi_domain_free_irqs_descs_locked(domain, &dev->dev);
+ else
+ pci_msi_legacy_teardown_msi_irqs(dev);
+ msi_free_msi_descs(&dev->dev);
+}
+
+/**
+ * pci_msi_domain_write_msg - Helper to write MSI message to PCI config space
+ * @irq_data: Pointer to interrupt data of the MSI interrupt
+ * @msg: Pointer to the message
+ */
+static void pci_msi_domain_write_msg(struct irq_data *irq_data, struct msi_msg *msg)
+{
+ struct msi_desc *desc = irq_data_get_msi_desc(irq_data);
+
+ /*
+ * For MSI-X desc->irq is always equal to irq_data->irq. For
+ * MSI only the first interrupt of MULTI MSI passes the test.
+ */
+ if (desc->irq == irq_data->irq)
+ __pci_write_msi_msg(desc, msg);
+}
+
+/**
+ * pci_msi_domain_calc_hwirq - Generate a unique ID for an MSI source
+ * @desc: Pointer to the MSI descriptor
+ *
+ * The ID number is only used within the irqdomain.
+ */
+static irq_hw_number_t pci_msi_domain_calc_hwirq(struct msi_desc *desc)
+{
+ struct pci_dev *dev = msi_desc_to_pci_dev(desc);
+
+ return (irq_hw_number_t)desc->msi_index |
+ pci_dev_id(dev) << 11 |
+ (pci_domain_nr(dev->bus) & 0xFFFFFFFF) << 27;
+}
+
+static inline bool pci_msi_desc_is_multi_msi(struct msi_desc *desc)
+{
+ return !desc->pci.msi_attrib.is_msix && desc->nvec_used > 1;
+}
+
+/**
+ * pci_msi_domain_check_cap - Verify that @domain supports the capabilities
+ * for @dev
+ * @domain: The interrupt domain to check
+ * @info: The domain info for verification
+ * @dev: The device to check
+ *
+ * Returns:
+ * 0 if the functionality is supported
+ * 1 if Multi MSI is requested, but the domain does not support it
+ * -ENOTSUPP otherwise
+ */
+static int pci_msi_domain_check_cap(struct irq_domain *domain,
+ struct msi_domain_info *info,
+ struct device *dev)
+{
+ struct msi_desc *desc = msi_first_desc(dev, MSI_DESC_ALL);
+
+ /* Special handling to support __pci_enable_msi_range() */
+ if (pci_msi_desc_is_multi_msi(desc) &&
+ !(info->flags & MSI_FLAG_MULTI_PCI_MSI))
+ return 1;
+
+ if (desc->pci.msi_attrib.is_msix) {
+ if (!(info->flags & MSI_FLAG_PCI_MSIX))
+ return -ENOTSUPP;
+
+ if (info->flags & MSI_FLAG_MSIX_CONTIGUOUS) {
+ unsigned int idx = 0;
+
+ /* Check for gaps in the entry indices */
+ msi_for_each_desc(desc, dev, MSI_DESC_ALL) {
+ if (desc->msi_index != idx++)
+ return -ENOTSUPP;
+ }
+ }
+ }
+ return 0;
+}
+
+static void pci_msi_domain_set_desc(msi_alloc_info_t *arg,
+ struct msi_desc *desc)
+{
+ arg->desc = desc;
+ arg->hwirq = pci_msi_domain_calc_hwirq(desc);
+}
+
+static struct msi_domain_ops pci_msi_domain_ops_default = {
+ .set_desc = pci_msi_domain_set_desc,
+ .msi_check = pci_msi_domain_check_cap,
+};
+
+static void pci_msi_domain_update_dom_ops(struct msi_domain_info *info)
+{
+ struct msi_domain_ops *ops = info->ops;
+
+ if (ops == NULL) {
+ info->ops = &pci_msi_domain_ops_default;
+ } else {
+ if (ops->set_desc == NULL)
+ ops->set_desc = pci_msi_domain_set_desc;
+ if (ops->msi_check == NULL)
+ ops->msi_check = pci_msi_domain_check_cap;
+ }
+}
+
+static void pci_msi_domain_update_chip_ops(struct msi_domain_info *info)
+{
+ struct irq_chip *chip = info->chip;
+
+ BUG_ON(!chip);
+ if (!chip->irq_write_msi_msg)
+ chip->irq_write_msi_msg = pci_msi_domain_write_msg;
+ if (!chip->irq_mask)
+ chip->irq_mask = pci_msi_mask_irq;
+ if (!chip->irq_unmask)
+ chip->irq_unmask = pci_msi_unmask_irq;
+}
+
+/**
+ * pci_msi_create_irq_domain - Create a MSI interrupt domain
+ * @fwnode: Optional fwnode of the interrupt controller
+ * @info: MSI domain info
+ * @parent: Parent irq domain
+ *
+ * Updates the domain and chip ops and creates a MSI interrupt domain.
+ *
+ * Returns:
+ * A domain pointer or NULL in case of failure.
+ */
+struct irq_domain *pci_msi_create_irq_domain(struct fwnode_handle *fwnode,
+ struct msi_domain_info *info,
+ struct irq_domain *parent)
+{
+ struct irq_domain *domain;
+
+ if (WARN_ON(info->flags & MSI_FLAG_LEVEL_CAPABLE))
+ info->flags &= ~MSI_FLAG_LEVEL_CAPABLE;
+
+ if (info->flags & MSI_FLAG_USE_DEF_DOM_OPS)
+ pci_msi_domain_update_dom_ops(info);
+ if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS)
+ pci_msi_domain_update_chip_ops(info);
+
+ info->flags |= MSI_FLAG_ACTIVATE_EARLY | MSI_FLAG_DEV_SYSFS;
+ if (IS_ENABLED(CONFIG_GENERIC_IRQ_RESERVATION_MODE))
+ info->flags |= MSI_FLAG_MUST_REACTIVATE;
+
+ /* PCI-MSI is oneshot-safe */
+ info->chip->flags |= IRQCHIP_ONESHOT_SAFE;
+
+ domain = msi_create_irq_domain(fwnode, info, parent);
+ if (!domain)
+ return NULL;
+
+ irq_domain_update_bus_token(domain, DOMAIN_BUS_PCI_MSI);
+ return domain;
+}
+EXPORT_SYMBOL_GPL(pci_msi_create_irq_domain);
+
+/*
+ * Users of the generic MSI infrastructure expect a device to have a single ID,
+ * so with DMA aliases we have to pick the least-worst compromise. Devices with
+ * DMA phantom functions tend to still emit MSIs from the real function number,
+ * so we ignore those and only consider topological aliases where either the
+ * alias device or RID appears on a different bus number. We also make the
+ * reasonable assumption that bridges are walked in an upstream direction (so
+ * the last one seen wins), and the much braver assumption that the most likely
+ * case is that of PCI->PCIe so we should always use the alias RID. This echoes
+ * the logic from intel_irq_remapping's set_msi_sid(), which presumably works
+ * well enough in practice; in the face of the horrible PCIe<->PCI-X conditions
+ * for taking ownership all we can really do is close our eyes and hope...
+ */
+static int get_msi_id_cb(struct pci_dev *pdev, u16 alias, void *data)
+{
+ u32 *pa = data;
+ u8 bus = PCI_BUS_NUM(*pa);
+
+ if (pdev->bus->number != bus || PCI_BUS_NUM(alias) != bus)
+ *pa = alias;
+
+ return 0;
+}
+
+/**
+ * pci_msi_domain_get_msi_rid - Get the MSI requester id (RID)
+ * @domain: The interrupt domain
+ * @pdev: The PCI device.
+ *
+ * The RID for a device is formed from the alias, with a firmware
+ * supplied mapping applied
+ *
+ * Returns: The RID.
+ */
+u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev)
+{
+ struct device_node *of_node;
+ u32 rid = pci_dev_id(pdev);
+
+ pci_for_each_dma_alias(pdev, get_msi_id_cb, &rid);
+
+ of_node = irq_domain_get_of_node(domain);
+ rid = of_node ? of_msi_map_id(&pdev->dev, of_node, rid) :
+ iort_msi_map_id(&pdev->dev, rid);
+
+ return rid;
+}
+
+/**
+ * pci_msi_get_device_domain - Get the MSI domain for a given PCI device
+ * @pdev: The PCI device
+ *
+ * Use the firmware data to find a device-specific MSI domain
+ * (i.e. not one that is set as a default).
+ *
+ * Returns: The corresponding MSI domain or NULL if none has been found.
+ */
+struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev)
+{
+ struct irq_domain *dom;
+ u32 rid = pci_dev_id(pdev);
+
+ pci_for_each_dma_alias(pdev, get_msi_id_cb, &rid);
+ dom = of_msi_map_get_device_domain(&pdev->dev, rid, DOMAIN_BUS_PCI_MSI);
+ if (!dom)
+ dom = iort_get_device_domain(&pdev->dev, rid,
+ DOMAIN_BUS_PCI_MSI);
+ return dom;
+}
+
+/**
+ * pci_dev_has_special_msi_domain - Check whether the device is handled by
+ * a non-standard PCI-MSI domain
+ * @pdev: The PCI device to check.
+ *
+ * Returns: True if the device irqdomain or the bus irqdomain is
+ * non-standard PCI/MSI.
+ */
+bool pci_dev_has_special_msi_domain(struct pci_dev *pdev)
+{
+ struct irq_domain *dom = dev_get_msi_domain(&pdev->dev);
+
+ if (!dom)
+ dom = dev_get_msi_domain(&pdev->bus->dev);
+
+ if (!dom)
+ return true;
+
+ return dom->bus_token != DOMAIN_BUS_PCI_MSI;
+}
diff --git a/drivers/pci/msi/legacy.c b/drivers/pci/msi/legacy.c
new file mode 100644
index 000000000000..db761adef652
--- /dev/null
+++ b/drivers/pci/msi/legacy.c
@@ -0,0 +1,80 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * PCI Message Signaled Interrupt (MSI).
+ *
+ * Legacy architecture specific setup and teardown mechanism.
+ */
+#include "msi.h"
+
+/* Arch hooks */
+int __weak arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc)
+{
+ return -EINVAL;
+}
+
+void __weak arch_teardown_msi_irq(unsigned int irq)
+{
+}
+
+int __weak arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
+{
+ struct msi_desc *desc;
+ int ret;
+
+ /*
+ * If an architecture wants to support multiple MSI, it needs to
+ * override arch_setup_msi_irqs()
+ */
+ if (type == PCI_CAP_ID_MSI && nvec > 1)
+ return 1;
+
+ msi_for_each_desc(desc, &dev->dev, MSI_DESC_NOTASSOCIATED) {
+ ret = arch_setup_msi_irq(dev, desc);
+ if (ret)
+ return ret < 0 ? ret : -ENOSPC;
+ }
+
+ return 0;
+}
+
+void __weak arch_teardown_msi_irqs(struct pci_dev *dev)
+{
+ struct msi_desc *desc;
+ int i;
+
+ msi_for_each_desc(desc, &dev->dev, MSI_DESC_ASSOCIATED) {
+ for (i = 0; i < desc->nvec_used; i++)
+ arch_teardown_msi_irq(desc->irq + i);
+ }
+}
+
+static int pci_msi_setup_check_result(struct pci_dev *dev, int type, int ret)
+{
+ struct msi_desc *desc;
+ int avail = 0;
+
+ if (type != PCI_CAP_ID_MSIX || ret >= 0)
+ return ret;
+
+ /* Scan the MSI descriptors for successfully allocated ones. */
+ msi_for_each_desc(desc, &dev->dev, MSI_DESC_ASSOCIATED)
+ avail++;
+
+ return avail ? avail : ret;
+}
+
+int pci_msi_legacy_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
+{
+ int ret = arch_setup_msi_irqs(dev, nvec, type);
+
+ ret = pci_msi_setup_check_result(dev, type, ret);
+ if (!ret)
+ ret = msi_device_populate_sysfs(&dev->dev);
+ return ret;
+}
+
+void pci_msi_legacy_teardown_msi_irqs(struct pci_dev *dev)
+{
+ msi_device_destroy_sysfs(&dev->dev);
+ arch_teardown_msi_irqs(dev);
+}
diff --git a/drivers/pci/msi.c b/drivers/pci/msi/msi.c
index d84cf30bb279..9037a7827eca 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi/msi.c
@@ -6,156 +6,29 @@
* Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com)
* Copyright (C) 2016 Christoph Hellwig.
*/
-
#include <linux/err.h>
-#include <linux/mm.h>
-#include <linux/irq.h>
-#include <linux/interrupt.h>
#include <linux/export.h>
-#include <linux/ioport.h>
-#include <linux/pci.h>
-#include <linux/proc_fs.h>
-#include <linux/msi.h>
-#include <linux/smp.h>
-#include <linux/errno.h>
-#include <linux/io.h>
-#include <linux/acpi_iort.h>
-#include <linux/slab.h>
-#include <linux/irqdomain.h>
-#include <linux/of_irq.h>
-
-#include "pci.h"
-
-#ifdef CONFIG_PCI_MSI
+#include <linux/irq.h>
+
+#include "../pci.h"
+#include "msi.h"
static int pci_msi_enable = 1;
int pci_msi_ignore_mask;
-#define msix_table_size(flags) ((flags & PCI_MSIX_FLAGS_QSIZE) + 1)
-
-#ifdef CONFIG_PCI_MSI_IRQ_DOMAIN
-static int pci_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
-{
- struct irq_domain *domain;
-
- domain = dev_get_msi_domain(&dev->dev);
- if (domain && irq_domain_is_hierarchy(domain))
- return msi_domain_alloc_irqs(domain, &dev->dev, nvec);
-
- return arch_setup_msi_irqs(dev, nvec, type);
-}
-
-static void pci_msi_teardown_msi_irqs(struct pci_dev *dev)
-{
- struct irq_domain *domain;
-
- domain = dev_get_msi_domain(&dev->dev);
- if (domain && irq_domain_is_hierarchy(domain))
- msi_domain_free_irqs(domain, &dev->dev);
- else
- arch_teardown_msi_irqs(dev);
-}
-#else
-#define pci_msi_setup_msi_irqs arch_setup_msi_irqs
-#define pci_msi_teardown_msi_irqs arch_teardown_msi_irqs
-#endif
-
-#ifdef CONFIG_PCI_MSI_ARCH_FALLBACKS
-/* Arch hooks */
-int __weak arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc)
-{
- return -EINVAL;
-}
-
-void __weak arch_teardown_msi_irq(unsigned int irq)
-{
-}
-
-int __weak arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
-{
- struct msi_desc *entry;
- int ret;
-
- /*
- * If an architecture wants to support multiple MSI, it needs to
- * override arch_setup_msi_irqs()
- */
- if (type == PCI_CAP_ID_MSI && nvec > 1)
- return 1;
-
- for_each_pci_msi_entry(entry, dev) {
- ret = arch_setup_msi_irq(dev, entry);
- if (ret < 0)
- return ret;
- if (ret > 0)
- return -ENOSPC;
- }
-
- return 0;
-}
-
-void __weak arch_teardown_msi_irqs(struct pci_dev *dev)
-{
- int i;
- struct msi_desc *entry;
-
- for_each_pci_msi_entry(entry, dev)
- if (entry->irq)
- for (i = 0; i < entry->nvec_used; i++)
- arch_teardown_msi_irq(entry->irq + i);
-}
-#endif /* CONFIG_PCI_MSI_ARCH_FALLBACKS */
-
-static void default_restore_msi_irq(struct pci_dev *dev, int irq)
-{
- struct msi_desc *entry;
-
- entry = NULL;
- if (dev->msix_enabled) {
- for_each_pci_msi_entry(entry, dev) {
- if (irq == entry->irq)
- break;
- }
- } else if (dev->msi_enabled) {
- entry = irq_get_msi_desc(irq);
- }
-
- if (entry)
- __pci_write_msi_msg(entry, &entry->msg);
-}
-
-void __weak arch_restore_msi_irqs(struct pci_dev *dev)
-{
- return default_restore_msi_irqs(dev);
-}
-
-/*
- * PCI 2.3 does not specify mask bits for each MSI interrupt. Attempting to
- * mask all MSI interrupts by clearing the MSI enable bit does not work
- * reliably as devices without an INTx disable bit will then generate a
- * level IRQ which will never be cleared.
- */
-static inline __attribute_const__ u32 msi_multi_mask(struct msi_desc *desc)
-{
- /* Don't shift by >= width of type */
- if (desc->msi_attrib.multi_cap >= 5)
- return 0xffffffff;
- return (1 << (1 << desc->msi_attrib.multi_cap)) - 1;
-}
-
static noinline void pci_msi_update_mask(struct msi_desc *desc, u32 clear, u32 set)
{
- raw_spinlock_t *lock = &desc->dev->msi_lock;
+ raw_spinlock_t *lock = &to_pci_dev(desc->dev)->msi_lock;
unsigned long flags;
- if (!desc->msi_attrib.can_mask)
+ if (!desc->pci.msi_attrib.can_mask)
return;
raw_spin_lock_irqsave(lock, flags);
- desc->msi_mask &= ~clear;
- desc->msi_mask |= set;
- pci_write_config_dword(msi_desc_to_pci_dev(desc), desc->mask_pos,
- desc->msi_mask);
+ desc->pci.msi_mask &= ~clear;
+ desc->pci.msi_mask |= set;
+ pci_write_config_dword(msi_desc_to_pci_dev(desc), desc->pci.mask_pos,
+ desc->pci.msi_mask);
raw_spin_unlock_irqrestore(lock, flags);
}
@@ -171,7 +44,7 @@ static inline void pci_msi_unmask(struct msi_desc *desc, u32 mask)
static inline void __iomem *pci_msix_desc_addr(struct msi_desc *desc)
{
- return desc->mask_base + desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE;
+ return desc->pci.mask_base + desc->msi_index * PCI_MSIX_ENTRY_SIZE;
}
/*
@@ -184,27 +57,27 @@ static void pci_msix_write_vector_ctrl(struct msi_desc *desc, u32 ctrl)
{
void __iomem *desc_addr = pci_msix_desc_addr(desc);
- if (desc->msi_attrib.can_mask)
+ if (desc->pci.msi_attrib.can_mask)
writel(ctrl, desc_addr + PCI_MSIX_ENTRY_VECTOR_CTRL);
}
static inline void pci_msix_mask(struct msi_desc *desc)
{
- desc->msix_ctrl |= PCI_MSIX_ENTRY_CTRL_MASKBIT;
- pci_msix_write_vector_ctrl(desc, desc->msix_ctrl);
+ desc->pci.msix_ctrl |= PCI_MSIX_ENTRY_CTRL_MASKBIT;
+ pci_msix_write_vector_ctrl(desc, desc->pci.msix_ctrl);
/* Flush write to device */
- readl(desc->mask_base);
+ readl(desc->pci.mask_base);
}
static inline void pci_msix_unmask(struct msi_desc *desc)
{
- desc->msix_ctrl &= ~PCI_MSIX_ENTRY_CTRL_MASKBIT;
- pci_msix_write_vector_ctrl(desc, desc->msix_ctrl);
+ desc->pci.msix_ctrl &= ~PCI_MSIX_ENTRY_CTRL_MASKBIT;
+ pci_msix_write_vector_ctrl(desc, desc->pci.msix_ctrl);
}
static void __pci_msi_mask_desc(struct msi_desc *desc, u32 mask)
{
- if (desc->msi_attrib.is_msix)
+ if (desc->pci.msi_attrib.is_msix)
pci_msix_mask(desc);
else
pci_msi_mask(desc, mask);
@@ -212,7 +85,7 @@ static void __pci_msi_mask_desc(struct msi_desc *desc, u32 mask)
static void __pci_msi_unmask_desc(struct msi_desc *desc, u32 mask)
{
- if (desc->msi_attrib.is_msix)
+ if (desc->pci.msi_attrib.is_msix)
pci_msix_unmask(desc);
else
pci_msi_unmask(desc, mask);
@@ -242,24 +115,16 @@ void pci_msi_unmask_irq(struct irq_data *data)
}
EXPORT_SYMBOL_GPL(pci_msi_unmask_irq);
-void default_restore_msi_irqs(struct pci_dev *dev)
-{
- struct msi_desc *entry;
-
- for_each_pci_msi_entry(entry, dev)
- default_restore_msi_irq(dev, entry->irq);
-}
-
void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
{
struct pci_dev *dev = msi_desc_to_pci_dev(entry);
BUG_ON(dev->current_state != PCI_D0);
- if (entry->msi_attrib.is_msix) {
+ if (entry->pci.msi_attrib.is_msix) {
void __iomem *base = pci_msix_desc_addr(entry);
- if (WARN_ON_ONCE(entry->msi_attrib.is_virtual))
+ if (WARN_ON_ONCE(entry->pci.msi_attrib.is_virtual))
return;
msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR);
@@ -271,7 +136,7 @@ void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
pci_read_config_dword(dev, pos + PCI_MSI_ADDRESS_LO,
&msg->address_lo);
- if (entry->msi_attrib.is_64) {
+ if (entry->pci.msi_attrib.is_64) {
pci_read_config_dword(dev, pos + PCI_MSI_ADDRESS_HI,
&msg->address_hi);
pci_read_config_word(dev, pos + PCI_MSI_DATA_64, &data);
@@ -289,12 +154,12 @@ void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
if (dev->current_state != PCI_D0 || pci_dev_is_disconnected(dev)) {
/* Don't touch the hardware now */
- } else if (entry->msi_attrib.is_msix) {
+ } else if (entry->pci.msi_attrib.is_msix) {
void __iomem *base = pci_msix_desc_addr(entry);
- u32 ctrl = entry->msix_ctrl;
+ u32 ctrl = entry->pci.msix_ctrl;
bool unmasked = !(ctrl & PCI_MSIX_ENTRY_CTRL_MASKBIT);
- if (entry->msi_attrib.is_virtual)
+ if (entry->pci.msi_attrib.is_virtual)
goto skip;
/*
@@ -323,12 +188,12 @@ void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &msgctl);
msgctl &= ~PCI_MSI_FLAGS_QSIZE;
- msgctl |= entry->msi_attrib.multiple << 4;
+ msgctl |= entry->pci.msi_attrib.multiple << 4;
pci_write_config_word(dev, pos + PCI_MSI_FLAGS, msgctl);
pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_LO,
msg->address_lo);
- if (entry->msi_attrib.is_64) {
+ if (entry->pci.msi_attrib.is_64) {
pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_HI,
msg->address_hi);
pci_write_config_word(dev, pos + PCI_MSI_DATA_64,
@@ -359,30 +224,11 @@ EXPORT_SYMBOL_GPL(pci_write_msi_msg);
static void free_msi_irqs(struct pci_dev *dev)
{
- struct list_head *msi_list = dev_to_msi_list(&dev->dev);
- struct msi_desc *entry, *tmp;
- int i;
-
- for_each_pci_msi_entry(entry, dev)
- if (entry->irq)
- for (i = 0; i < entry->nvec_used; i++)
- BUG_ON(irq_has_action(entry->irq + i));
-
- if (dev->msi_irq_groups) {
- msi_destroy_sysfs(&dev->dev, dev->msi_irq_groups);
- dev->msi_irq_groups = NULL;
- }
-
pci_msi_teardown_msi_irqs(dev);
- list_for_each_entry_safe(entry, tmp, msi_list, list) {
- if (entry->msi_attrib.is_msix) {
- if (list_is_last(&entry->list, msi_list))
- iounmap(entry->mask_base);
- }
-
- list_del(&entry->list);
- free_msi_entry(entry);
+ if (dev->msix_base) {
+ iounmap(dev->msix_base);
+ dev->msix_base = NULL;
}
}
@@ -403,10 +249,19 @@ static void pci_msi_set_enable(struct pci_dev *dev, int enable)
pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control);
}
+/*
+ * Architecture override returns true when the PCI MSI message should be
+ * written by the generic restore function.
+ */
+bool __weak arch_restore_msi_irqs(struct pci_dev *dev)
+{
+ return true;
+}
+
static void __pci_restore_msi_state(struct pci_dev *dev)
{
- u16 control;
struct msi_desc *entry;
+ u16 control;
if (!dev->msi_enabled)
return;
@@ -415,12 +270,13 @@ static void __pci_restore_msi_state(struct pci_dev *dev)
pci_intx_for_msi(dev, 0);
pci_msi_set_enable(dev, 0);
- arch_restore_msi_irqs(dev);
+ if (arch_restore_msi_irqs(dev))
+ __pci_write_msi_msg(entry, &entry->msg);
pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control);
pci_msi_update_mask(entry, 0, 0);
control &= ~PCI_MSI_FLAGS_QSIZE;
- control |= (entry->msi_attrib.multiple << 4) | PCI_MSI_FLAGS_ENABLE;
+ control |= (entry->pci.msi_attrib.multiple << 4) | PCI_MSI_FLAGS_ENABLE;
pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control);
}
@@ -437,19 +293,25 @@ static void pci_msix_clear_and_set_ctrl(struct pci_dev *dev, u16 clear, u16 set)
static void __pci_restore_msix_state(struct pci_dev *dev)
{
struct msi_desc *entry;
+ bool write_msg;
if (!dev->msix_enabled)
return;
- BUG_ON(list_empty(dev_to_msi_list(&dev->dev)));
/* route the table */
pci_intx_for_msi(dev, 0);
pci_msix_clear_and_set_ctrl(dev, 0,
PCI_MSIX_FLAGS_ENABLE | PCI_MSIX_FLAGS_MASKALL);
- arch_restore_msi_irqs(dev);
- for_each_pci_msi_entry(entry, dev)
- pci_msix_write_vector_ctrl(entry, entry->msix_ctrl);
+ write_msg = arch_restore_msi_irqs(dev);
+
+ msi_lock_descs(&dev->dev);
+ msi_for_each_desc(entry, &dev->dev, MSI_DESC_ALL) {
+ if (write_msg)
+ __pci_write_msi_msg(entry, &entry->msg);
+ pci_msix_write_vector_ctrl(entry, entry->pci.msix_ctrl);
+ }
+ msi_unlock_descs(&dev->dev);
pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0);
}
@@ -461,48 +323,79 @@ void pci_restore_msi_state(struct pci_dev *dev)
}
EXPORT_SYMBOL_GPL(pci_restore_msi_state);
-static struct msi_desc *
-msi_setup_entry(struct pci_dev *dev, int nvec, struct irq_affinity *affd)
+static void pcim_msi_release(void *pcidev)
{
- struct irq_affinity_desc *masks = NULL;
- struct msi_desc *entry;
- u16 control;
+ struct pci_dev *dev = pcidev;
- if (affd)
- masks = irq_create_affinity_masks(nvec, affd);
+ dev->is_msi_managed = false;
+ pci_free_irq_vectors(dev);
+}
+
+/*
+ * Needs to be separate from pcim_release to prevent an ordering problem
+ * vs. msi_device_data_release() in the MSI core code.
+ */
+static int pcim_setup_msi_release(struct pci_dev *dev)
+{
+ int ret;
+
+ if (!pci_is_managed(dev) || dev->is_msi_managed)
+ return 0;
+
+ ret = devm_add_action(&dev->dev, pcim_msi_release, dev);
+ if (!ret)
+ dev->is_msi_managed = true;
+ return ret;
+}
+
+/*
+ * Ordering vs. devres: msi device data has to be installed first so that
+ * pcim_msi_release() is invoked before it on device release.
+ */
+static int pci_setup_msi_context(struct pci_dev *dev)
+{
+ int ret = msi_setup_device_data(&dev->dev);
+
+ if (!ret)
+ ret = pcim_setup_msi_release(dev);
+ return ret;
+}
+
+static int msi_setup_msi_desc(struct pci_dev *dev, int nvec,
+ struct irq_affinity_desc *masks)
+{
+ struct msi_desc desc;
+ u16 control;
/* MSI Entry Initialization */
- entry = alloc_msi_entry(&dev->dev, nvec, masks);
- if (!entry)
- goto out;
+ memset(&desc, 0, sizeof(desc));
pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control);
/* Lies, damned lies, and MSIs */
if (dev->dev_flags & PCI_DEV_FLAGS_HAS_MSI_MASKING)
control |= PCI_MSI_FLAGS_MASKBIT;
+ /* Respect XEN's mask disabling */
+ if (pci_msi_ignore_mask)
+ control &= ~PCI_MSI_FLAGS_MASKBIT;
- entry->msi_attrib.is_msix = 0;
- entry->msi_attrib.is_64 = !!(control & PCI_MSI_FLAGS_64BIT);
- entry->msi_attrib.is_virtual = 0;
- entry->msi_attrib.entry_nr = 0;
- entry->msi_attrib.can_mask = !pci_msi_ignore_mask &&
- !!(control & PCI_MSI_FLAGS_MASKBIT);
- entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */
- entry->msi_attrib.multi_cap = (control & PCI_MSI_FLAGS_QMASK) >> 1;
- entry->msi_attrib.multiple = ilog2(__roundup_pow_of_two(nvec));
+ desc.nvec_used = nvec;
+ desc.pci.msi_attrib.is_64 = !!(control & PCI_MSI_FLAGS_64BIT);
+ desc.pci.msi_attrib.can_mask = !!(control & PCI_MSI_FLAGS_MASKBIT);
+ desc.pci.msi_attrib.default_irq = dev->irq;
+ desc.pci.msi_attrib.multi_cap = (control & PCI_MSI_FLAGS_QMASK) >> 1;
+ desc.pci.msi_attrib.multiple = ilog2(__roundup_pow_of_two(nvec));
+ desc.affinity = masks;
if (control & PCI_MSI_FLAGS_64BIT)
- entry->mask_pos = dev->msi_cap + PCI_MSI_MASK_64;
+ desc.pci.mask_pos = dev->msi_cap + PCI_MSI_MASK_64;
else
- entry->mask_pos = dev->msi_cap + PCI_MSI_MASK_32;
+ desc.pci.mask_pos = dev->msi_cap + PCI_MSI_MASK_32;
/* Save the initial mask status */
- if (entry->msi_attrib.can_mask)
- pci_read_config_dword(dev, entry->mask_pos, &entry->msi_mask);
+ if (desc.pci.msi_attrib.can_mask)
+ pci_read_config_dword(dev, desc.pci.mask_pos, &desc.pci.msi_mask);
-out:
- kfree(masks);
- return entry;
+ return msi_add_msi_desc(&dev->dev, &desc);
}
static int msi_verify_entries(struct pci_dev *dev)
@@ -512,14 +405,14 @@ static int msi_verify_entries(struct pci_dev *dev)
if (!dev->no_64bit_msi)
return 0;
- for_each_pci_msi_entry(entry, dev) {
+ msi_for_each_desc(entry, &dev->dev, MSI_DESC_ALL) {
if (entry->msg.address_hi) {
pci_err(dev, "arch assigned 64-bit MSI address %#x%08x but device only supports 32 bits\n",
entry->msg.address_hi, entry->msg.address_lo);
- return -EIO;
+ break;
}
}
- return 0;
+ return !entry ? 0 : -EIO;
}
/**
@@ -537,21 +430,29 @@ static int msi_verify_entries(struct pci_dev *dev)
static int msi_capability_init(struct pci_dev *dev, int nvec,
struct irq_affinity *affd)
{
- const struct attribute_group **groups;
+ struct irq_affinity_desc *masks = NULL;
struct msi_desc *entry;
int ret;
- pci_msi_set_enable(dev, 0); /* Disable MSI during set up */
+ /*
+ * Disable MSI during setup in the hardware, but mark it enabled
+ * so that setup code can evaluate it.
+ */
+ pci_msi_set_enable(dev, 0);
+ dev->msi_enabled = 1;
- entry = msi_setup_entry(dev, nvec, affd);
- if (!entry)
- return -ENOMEM;
+ if (affd)
+ masks = irq_create_affinity_masks(nvec, affd);
+
+ msi_lock_descs(&dev->dev);
+ ret = msi_setup_msi_desc(dev, nvec, masks);
+ if (ret)
+ goto fail;
/* All MSIs are unmasked by default; mask them all */
+ entry = msi_first_desc(&dev->dev, MSI_DESC_ALL);
pci_msi_mask(entry, msi_multi_mask(entry));
- list_add_tail(&entry->list, dev_to_msi_list(&dev->dev));
-
/* Configure MSI capability structure */
ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI);
if (ret)
@@ -561,26 +462,22 @@ static int msi_capability_init(struct pci_dev *dev, int nvec,
if (ret)
goto err;
- groups = msi_populate_sysfs(&dev->dev);
- if (IS_ERR(groups)) {
- ret = PTR_ERR(groups);
- goto err;
- }
-
- dev->msi_irq_groups = groups;
-
/* Set MSI enabled bits */
pci_intx_for_msi(dev, 0);
pci_msi_set_enable(dev, 1);
- dev->msi_enabled = 1;
pcibios_free_irq(dev);
dev->irq = entry->irq;
- return 0;
+ goto unlock;
err:
pci_msi_unmask(entry, msi_multi_mask(entry));
free_msi_irqs(dev);
+fail:
+ dev->msi_enabled = 0;
+unlock:
+ msi_unlock_descs(&dev->dev);
+ kfree(masks);
return ret;
}
@@ -605,70 +502,49 @@ static void __iomem *msix_map_region(struct pci_dev *dev,
return ioremap(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE);
}
-static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
- struct msix_entry *entries, int nvec,
- struct irq_affinity *affd)
+static int msix_setup_msi_descs(struct pci_dev *dev, void __iomem *base,
+ struct msix_entry *entries, int nvec,
+ struct irq_affinity_desc *masks)
{
- struct irq_affinity_desc *curmsk, *masks = NULL;
- struct msi_desc *entry;
+ int ret = 0, i, vec_count = pci_msix_vec_count(dev);
+ struct irq_affinity_desc *curmsk;
+ struct msi_desc desc;
void __iomem *addr;
- int ret, i;
- int vec_count = pci_msix_vec_count(dev);
-
- if (affd)
- masks = irq_create_affinity_masks(nvec, affd);
- for (i = 0, curmsk = masks; i < nvec; i++) {
- entry = alloc_msi_entry(&dev->dev, 1, curmsk);
- if (!entry) {
- if (!i)
- iounmap(base);
- else
- free_msi_irqs(dev);
- /* No enough memory. Don't try again */
- ret = -ENOMEM;
- goto out;
- }
+ memset(&desc, 0, sizeof(desc));
- entry->msi_attrib.is_msix = 1;
- entry->msi_attrib.is_64 = 1;
+ desc.nvec_used = 1;
+ desc.pci.msi_attrib.is_msix = 1;
+ desc.pci.msi_attrib.is_64 = 1;
+ desc.pci.msi_attrib.default_irq = dev->irq;
+ desc.pci.mask_base = base;
- if (entries)
- entry->msi_attrib.entry_nr = entries[i].entry;
- else
- entry->msi_attrib.entry_nr = i;
+ for (i = 0, curmsk = masks; i < nvec; i++, curmsk++) {
+ desc.msi_index = entries ? entries[i].entry : i;
+ desc.affinity = masks ? curmsk : NULL;
+ desc.pci.msi_attrib.is_virtual = desc.msi_index >= vec_count;
+ desc.pci.msi_attrib.can_mask = !pci_msi_ignore_mask &&
+ !desc.pci.msi_attrib.is_virtual;
- entry->msi_attrib.is_virtual =
- entry->msi_attrib.entry_nr >= vec_count;
-
- entry->msi_attrib.can_mask = !pci_msi_ignore_mask &&
- !entry->msi_attrib.is_virtual;
-
- entry->msi_attrib.default_irq = dev->irq;
- entry->mask_base = base;
-
- if (entry->msi_attrib.can_mask) {
- addr = pci_msix_desc_addr(entry);
- entry->msix_ctrl = readl(addr + PCI_MSIX_ENTRY_VECTOR_CTRL);
+ if (!desc.pci.msi_attrib.can_mask) {
+ addr = pci_msix_desc_addr(&desc);
+ desc.pci.msix_ctrl = readl(addr + PCI_MSIX_ENTRY_VECTOR_CTRL);
}
- list_add_tail(&entry->list, dev_to_msi_list(&dev->dev));
- if (masks)
- curmsk++;
+ ret = msi_add_msi_desc(&dev->dev, &desc);
+ if (ret)
+ break;
}
- ret = 0;
-out:
- kfree(masks);
return ret;
}
static void msix_update_entries(struct pci_dev *dev, struct msix_entry *entries)
{
- struct msi_desc *entry;
+ struct msi_desc *desc;
- for_each_pci_msi_entry(entry, dev) {
- if (entries) {
- entries->vector = entry->irq;
+ if (entries) {
+ msi_for_each_desc(desc, &dev->dev, MSI_DESC_ALL) {
+ entries->vector = desc->irq;
entries++;
}
}
@@ -686,6 +562,41 @@ static void msix_mask_all(void __iomem *base, int tsize)
writel(ctrl, base + PCI_MSIX_ENTRY_VECTOR_CTRL);
}
+static int msix_setup_interrupts(struct pci_dev *dev, void __iomem *base,
+ struct msix_entry *entries, int nvec,
+ struct irq_affinity *affd)
+{
+ struct irq_affinity_desc *masks = NULL;
+ int ret;
+
+ if (affd)
+ masks = irq_create_affinity_masks(nvec, affd);
+
+ msi_lock_descs(&dev->dev);
+ ret = msix_setup_msi_descs(dev, base, entries, nvec, masks);
+ if (ret)
+ goto out_free;
+
+ ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX);
+ if (ret)
+ goto out_free;
+
+ /* Check if all MSI entries honor device restrictions */
+ ret = msi_verify_entries(dev);
+ if (ret)
+ goto out_free;
+
+ msix_update_entries(dev, entries);
+ goto out_unlock;
+
+out_free:
+ free_msi_irqs(dev);
+out_unlock:
+ msi_unlock_descs(&dev->dev);
+ kfree(masks);
+ return ret;
+}
+
/**
* msix_capability_init - configure device's MSI-X capability
* @dev: pointer to the pci_dev data structure of MSI-X device function
@@ -700,7 +611,6 @@ static void msix_mask_all(void __iomem *base, int tsize)
static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries,
int nvec, struct irq_affinity *affd)
{
- const struct attribute_group **groups;
void __iomem *base;
int ret, tsize;
u16 control;
@@ -713,6 +623,9 @@ static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries,
pci_msix_clear_and_set_ctrl(dev, 0, PCI_MSIX_FLAGS_MASKALL |
PCI_MSIX_FLAGS_ENABLE);
+ /* Mark it enabled so setup functions can query it */
+ dev->msix_enabled = 1;
+
pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control);
/* Request & Map MSI-X table region */
tsize = msix_table_size(control);
@@ -722,32 +635,14 @@ static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries,
goto out_disable;
}
- ret = msix_setup_entries(dev, base, entries, nvec, affd);
- if (ret)
- goto out_disable;
-
- ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX);
- if (ret)
- goto out_avail;
+ dev->msix_base = base;
- /* Check if all MSI entries honor device restrictions */
- ret = msi_verify_entries(dev);
+ ret = msix_setup_interrupts(dev, base, entries, nvec, affd);
if (ret)
- goto out_free;
-
- msix_update_entries(dev, entries);
-
- groups = msi_populate_sysfs(&dev->dev);
- if (IS_ERR(groups)) {
- ret = PTR_ERR(groups);
- goto out_free;
- }
-
- dev->msi_irq_groups = groups;
+ goto out_disable;
- /* Set MSI-X enabled bits and unmask the function */
+ /* Disable INTX */
pci_intx_for_msi(dev, 0);
- dev->msix_enabled = 1;
/*
* Ensure that all table entries are masked to prevent
@@ -763,27 +658,8 @@ static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries,
pcibios_free_irq(dev);
return 0;
-out_avail:
- if (ret < 0) {
- /*
- * If we had some success, report the number of IRQs
- * we succeeded in setting up.
- */
- struct msi_desc *entry;
- int avail = 0;
-
- for_each_pci_msi_entry(entry, dev) {
- if (entry->irq != 0)
- avail++;
- }
- if (avail != 0)
- ret = avail;
- }
-
-out_free:
- free_msi_irqs(dev);
-
out_disable:
+ dev->msix_enabled = 0;
pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL | PCI_MSIX_FLAGS_ENABLE, 0);
return ret;
@@ -870,18 +746,17 @@ static void pci_msi_shutdown(struct pci_dev *dev)
if (!pci_msi_enable || !dev || !dev->msi_enabled)
return;
- BUG_ON(list_empty(dev_to_msi_list(&dev->dev)));
- desc = first_pci_msi_entry(dev);
-
pci_msi_set_enable(dev, 0);
pci_intx_for_msi(dev, 1);
dev->msi_enabled = 0;
/* Return the device with MSI unmasked as initial states */
- pci_msi_unmask(desc, msi_multi_mask(desc));
+ desc = msi_first_desc(&dev->dev, MSI_DESC_ALL);
+ if (!WARN_ON_ONCE(!desc))
+ pci_msi_unmask(desc, msi_multi_mask(desc));
/* Restore dev->irq to its default pin-assertion IRQ */
- dev->irq = desc->msi_attrib.default_irq;
+ dev->irq = desc->pci.msi_attrib.default_irq;
pcibios_alloc_irq(dev);
}
@@ -890,8 +765,10 @@ void pci_disable_msi(struct pci_dev *dev)
if (!pci_msi_enable || !dev || !dev->msi_enabled)
return;
+ msi_lock_descs(&dev->dev);
pci_msi_shutdown(dev);
free_msi_irqs(dev);
+ msi_unlock_descs(&dev->dev);
}
EXPORT_SYMBOL(pci_disable_msi);
@@ -952,7 +829,7 @@ static int __pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries,
static void pci_msix_shutdown(struct pci_dev *dev)
{
- struct msi_desc *entry;
+ struct msi_desc *desc;
if (!pci_msi_enable || !dev || !dev->msix_enabled)
return;
@@ -963,8 +840,8 @@ static void pci_msix_shutdown(struct pci_dev *dev)
}
/* Return the device with MSI-X masked as initial states */
- for_each_pci_msi_entry(entry, dev)
- pci_msix_mask(entry);
+ msi_for_each_desc(desc, &dev->dev, MSI_DESC_ALL)
+ pci_msix_mask(desc);
pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
pci_intx_for_msi(dev, 1);
@@ -977,28 +854,13 @@ void pci_disable_msix(struct pci_dev *dev)
if (!pci_msi_enable || !dev || !dev->msix_enabled)
return;
+ msi_lock_descs(&dev->dev);
pci_msix_shutdown(dev);
free_msi_irqs(dev);
+ msi_unlock_descs(&dev->dev);
}
EXPORT_SYMBOL(pci_disable_msix);
-void pci_no_msi(void)
-{
- pci_msi_enable = 0;
-}
-
-/**
- * pci_msi_enabled - is MSI enabled?
- *
- * Returns true if MSI has not been disabled by the command-line option
- * pci=nomsi.
- **/
-int pci_msi_enabled(void)
-{
- return pci_msi_enable;
-}
-EXPORT_SYMBOL(pci_msi_enabled);
-
static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec,
struct irq_affinity *affd)
{
@@ -1029,6 +891,10 @@ static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec,
if (nvec > maxvec)
nvec = maxvec;
+ rc = pci_setup_msi_context(dev);
+ if (rc)
+ return rc;
+
for (;;) {
if (affd) {
nvec = irq_calc_affinity_vectors(minvec, nvec, affd);
@@ -1072,6 +938,10 @@ static int __pci_enable_msix_range(struct pci_dev *dev,
if (WARN_ON_ONCE(dev->msix_enabled))
return -EINVAL;
+ rc = pci_setup_msi_context(dev);
+ if (rc)
+ return rc;
+
for (;;) {
if (affd) {
nvec = irq_calc_affinity_vectors(minvec, nvec, affd);
@@ -1194,35 +1064,25 @@ EXPORT_SYMBOL(pci_free_irq_vectors);
/**
* pci_irq_vector - return Linux IRQ number of a device vector
- * @dev: PCI device to operate on
- * @nr: device-relative interrupt vector index (0-based).
+ * @dev: PCI device to operate on
+ * @nr: Interrupt vector index (0-based)
+ *
+ * @nr has the following meanings depending on the interrupt mode:
+ * MSI-X: The index in the MSI-X vector table
+ * MSI: The index of the enabled MSI vectors
+ * INTx: Must be 0
+ *
+ * Return: The Linux interrupt number or -EINVAl if @nr is out of range.
*/
int pci_irq_vector(struct pci_dev *dev, unsigned int nr)
{
- if (dev->msix_enabled) {
- struct msi_desc *entry;
- int i = 0;
-
- for_each_pci_msi_entry(entry, dev) {
- if (i == nr)
- return entry->irq;
- i++;
- }
- WARN_ON_ONCE(1);
- return -EINVAL;
- }
+ unsigned int irq;
- if (dev->msi_enabled) {
- struct msi_desc *entry = first_pci_msi_entry(dev);
-
- if (WARN_ON_ONCE(nr >= entry->nvec_used))
- return -EINVAL;
- } else {
- if (WARN_ON_ONCE(nr > 0))
- return -EINVAL;
- }
+ if (!dev->msi_enabled && !dev->msix_enabled)
+ return !nr ? dev->irq : -EINVAL;
- return dev->irq + nr;
+ irq = msi_get_virq(&dev->dev, nr);
+ return irq ? irq : -EINVAL;
}
EXPORT_SYMBOL(pci_irq_vector);
@@ -1230,332 +1090,59 @@ EXPORT_SYMBOL(pci_irq_vector);
* pci_irq_get_affinity - return the affinity of a particular MSI vector
* @dev: PCI device to operate on
* @nr: device-relative interrupt vector index (0-based).
+ *
+ * @nr has the following meanings depending on the interrupt mode:
+ * MSI-X: The index in the MSI-X vector table
+ * MSI: The index of the enabled MSI vectors
+ * INTx: Must be 0
+ *
+ * Return: A cpumask pointer or NULL if @nr is out of range
*/
const struct cpumask *pci_irq_get_affinity(struct pci_dev *dev, int nr)
{
- if (dev->msix_enabled) {
- struct msi_desc *entry;
- int i = 0;
+ int idx, irq = pci_irq_vector(dev, nr);
+ struct msi_desc *desc;
- for_each_pci_msi_entry(entry, dev) {
- if (i == nr)
- return &entry->affinity->mask;
- i++;
- }
- WARN_ON_ONCE(1);
+ if (WARN_ON_ONCE(irq <= 0))
return NULL;
- } else if (dev->msi_enabled) {
- struct msi_desc *entry = first_pci_msi_entry(dev);
- if (WARN_ON_ONCE(!entry || !entry->affinity ||
- nr >= entry->nvec_used))
- return NULL;
-
- return &entry->affinity[nr].mask;
- } else {
+ desc = irq_get_msi_desc(irq);
+ /* Non-MSI does not have the information handy */
+ if (!desc)
return cpu_possible_mask;
- }
-}
-EXPORT_SYMBOL(pci_irq_get_affinity);
-struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc)
-{
- return to_pci_dev(desc->dev);
-}
-EXPORT_SYMBOL(msi_desc_to_pci_dev);
-
-void *msi_desc_to_pci_sysdata(struct msi_desc *desc)
-{
- struct pci_dev *dev = msi_desc_to_pci_dev(desc);
-
- return dev->bus->sysdata;
-}
-EXPORT_SYMBOL_GPL(msi_desc_to_pci_sysdata);
-
-#ifdef CONFIG_PCI_MSI_IRQ_DOMAIN
-/**
- * pci_msi_domain_write_msg - Helper to write MSI message to PCI config space
- * @irq_data: Pointer to interrupt data of the MSI interrupt
- * @msg: Pointer to the message
- */
-void pci_msi_domain_write_msg(struct irq_data *irq_data, struct msi_msg *msg)
-{
- struct msi_desc *desc = irq_data_get_msi_desc(irq_data);
+ /* MSI[X] interrupts can be allocated without affinity descriptor */
+ if (!desc->affinity)
+ return NULL;
/*
- * For MSI-X desc->irq is always equal to irq_data->irq. For
- * MSI only the first interrupt of MULTI MSI passes the test.
+ * MSI has a mask array in the descriptor.
+ * MSI-X has a single mask.
*/
- if (desc->irq == irq_data->irq)
- __pci_write_msi_msg(desc, msg);
-}
-
-/**
- * pci_msi_domain_calc_hwirq - Generate a unique ID for an MSI source
- * @desc: Pointer to the MSI descriptor
- *
- * The ID number is only used within the irqdomain.
- */
-static irq_hw_number_t pci_msi_domain_calc_hwirq(struct msi_desc *desc)
-{
- struct pci_dev *dev = msi_desc_to_pci_dev(desc);
-
- return (irq_hw_number_t)desc->msi_attrib.entry_nr |
- pci_dev_id(dev) << 11 |
- (pci_domain_nr(dev->bus) & 0xFFFFFFFF) << 27;
-}
-
-static inline bool pci_msi_desc_is_multi_msi(struct msi_desc *desc)
-{
- return !desc->msi_attrib.is_msix && desc->nvec_used > 1;
-}
-
-/**
- * pci_msi_domain_check_cap - Verify that @domain supports the capabilities
- * for @dev
- * @domain: The interrupt domain to check
- * @info: The domain info for verification
- * @dev: The device to check
- *
- * Returns:
- * 0 if the functionality is supported
- * 1 if Multi MSI is requested, but the domain does not support it
- * -ENOTSUPP otherwise
- */
-int pci_msi_domain_check_cap(struct irq_domain *domain,
- struct msi_domain_info *info, struct device *dev)
-{
- struct msi_desc *desc = first_pci_msi_entry(to_pci_dev(dev));
-
- /* Special handling to support __pci_enable_msi_range() */
- if (pci_msi_desc_is_multi_msi(desc) &&
- !(info->flags & MSI_FLAG_MULTI_PCI_MSI))
- return 1;
- else if (desc->msi_attrib.is_msix && !(info->flags & MSI_FLAG_PCI_MSIX))
- return -ENOTSUPP;
-
- return 0;
-}
-
-static int pci_msi_domain_handle_error(struct irq_domain *domain,
- struct msi_desc *desc, int error)
-{
- /* Special handling to support __pci_enable_msi_range() */
- if (pci_msi_desc_is_multi_msi(desc) && error == -ENOSPC)
- return 1;
-
- return error;
-}
-
-static void pci_msi_domain_set_desc(msi_alloc_info_t *arg,
- struct msi_desc *desc)
-{
- arg->desc = desc;
- arg->hwirq = pci_msi_domain_calc_hwirq(desc);
-}
-
-static struct msi_domain_ops pci_msi_domain_ops_default = {
- .set_desc = pci_msi_domain_set_desc,
- .msi_check = pci_msi_domain_check_cap,
- .handle_error = pci_msi_domain_handle_error,
-};
-
-static void pci_msi_domain_update_dom_ops(struct msi_domain_info *info)
-{
- struct msi_domain_ops *ops = info->ops;
-
- if (ops == NULL) {
- info->ops = &pci_msi_domain_ops_default;
- } else {
- if (ops->set_desc == NULL)
- ops->set_desc = pci_msi_domain_set_desc;
- if (ops->msi_check == NULL)
- ops->msi_check = pci_msi_domain_check_cap;
- if (ops->handle_error == NULL)
- ops->handle_error = pci_msi_domain_handle_error;
- }
-}
-
-static void pci_msi_domain_update_chip_ops(struct msi_domain_info *info)
-{
- struct irq_chip *chip = info->chip;
-
- BUG_ON(!chip);
- if (!chip->irq_write_msi_msg)
- chip->irq_write_msi_msg = pci_msi_domain_write_msg;
- if (!chip->irq_mask)
- chip->irq_mask = pci_msi_mask_irq;
- if (!chip->irq_unmask)
- chip->irq_unmask = pci_msi_unmask_irq;
-}
-
-/**
- * pci_msi_create_irq_domain - Create a MSI interrupt domain
- * @fwnode: Optional fwnode of the interrupt controller
- * @info: MSI domain info
- * @parent: Parent irq domain
- *
- * Updates the domain and chip ops and creates a MSI interrupt domain.
- *
- * Returns:
- * A domain pointer or NULL in case of failure.
- */
-struct irq_domain *pci_msi_create_irq_domain(struct fwnode_handle *fwnode,
- struct msi_domain_info *info,
- struct irq_domain *parent)
-{
- struct irq_domain *domain;
-
- if (WARN_ON(info->flags & MSI_FLAG_LEVEL_CAPABLE))
- info->flags &= ~MSI_FLAG_LEVEL_CAPABLE;
-
- if (info->flags & MSI_FLAG_USE_DEF_DOM_OPS)
- pci_msi_domain_update_dom_ops(info);
- if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS)
- pci_msi_domain_update_chip_ops(info);
-
- info->flags |= MSI_FLAG_ACTIVATE_EARLY;
- if (IS_ENABLED(CONFIG_GENERIC_IRQ_RESERVATION_MODE))
- info->flags |= MSI_FLAG_MUST_REACTIVATE;
-
- /* PCI-MSI is oneshot-safe */
- info->chip->flags |= IRQCHIP_ONESHOT_SAFE;
-
- domain = msi_create_irq_domain(fwnode, info, parent);
- if (!domain)
- return NULL;
-
- irq_domain_update_bus_token(domain, DOMAIN_BUS_PCI_MSI);
- return domain;
-}
-EXPORT_SYMBOL_GPL(pci_msi_create_irq_domain);
-
-/*
- * Users of the generic MSI infrastructure expect a device to have a single ID,
- * so with DMA aliases we have to pick the least-worst compromise. Devices with
- * DMA phantom functions tend to still emit MSIs from the real function number,
- * so we ignore those and only consider topological aliases where either the
- * alias device or RID appears on a different bus number. We also make the
- * reasonable assumption that bridges are walked in an upstream direction (so
- * the last one seen wins), and the much braver assumption that the most likely
- * case is that of PCI->PCIe so we should always use the alias RID. This echoes
- * the logic from intel_irq_remapping's set_msi_sid(), which presumably works
- * well enough in practice; in the face of the horrible PCIe<->PCI-X conditions
- * for taking ownership all we can really do is close our eyes and hope...
- */
-static int get_msi_id_cb(struct pci_dev *pdev, u16 alias, void *data)
-{
- u32 *pa = data;
- u8 bus = PCI_BUS_NUM(*pa);
-
- if (pdev->bus->number != bus || PCI_BUS_NUM(alias) != bus)
- *pa = alias;
-
- return 0;
+ idx = dev->msi_enabled ? nr : 0;
+ return &desc->affinity[idx].mask;
}
+EXPORT_SYMBOL(pci_irq_get_affinity);
-/**
- * pci_msi_domain_get_msi_rid - Get the MSI requester id (RID)
- * @domain: The interrupt domain
- * @pdev: The PCI device.
- *
- * The RID for a device is formed from the alias, with a firmware
- * supplied mapping applied
- *
- * Returns: The RID.
- */
-u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev)
+struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc)
{
- struct device_node *of_node;
- u32 rid = pci_dev_id(pdev);
-
- pci_for_each_dma_alias(pdev, get_msi_id_cb, &rid);
-
- of_node = irq_domain_get_of_node(domain);
- rid = of_node ? of_msi_map_id(&pdev->dev, of_node, rid) :
- iort_msi_map_id(&pdev->dev, rid);
-
- return rid;
+ return to_pci_dev(desc->dev);
}
+EXPORT_SYMBOL(msi_desc_to_pci_dev);
-/**
- * pci_msi_get_device_domain - Get the MSI domain for a given PCI device
- * @pdev: The PCI device
- *
- * Use the firmware data to find a device-specific MSI domain
- * (i.e. not one that is set as a default).
- *
- * Returns: The corresponding MSI domain or NULL if none has been found.
- */
-struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev)
+void pci_no_msi(void)
{
- struct irq_domain *dom;
- u32 rid = pci_dev_id(pdev);
-
- pci_for_each_dma_alias(pdev, get_msi_id_cb, &rid);
- dom = of_msi_map_get_device_domain(&pdev->dev, rid, DOMAIN_BUS_PCI_MSI);
- if (!dom)
- dom = iort_get_device_domain(&pdev->dev, rid,
- DOMAIN_BUS_PCI_MSI);
- return dom;
+ pci_msi_enable = 0;
}
/**
- * pci_dev_has_special_msi_domain - Check whether the device is handled by
- * a non-standard PCI-MSI domain
- * @pdev: The PCI device to check.
+ * pci_msi_enabled - is MSI enabled?
*
- * Returns: True if the device irqdomain or the bus irqdomain is
- * non-standard PCI/MSI.
- */
-bool pci_dev_has_special_msi_domain(struct pci_dev *pdev)
-{
- struct irq_domain *dom = dev_get_msi_domain(&pdev->dev);
-
- if (!dom)
- dom = dev_get_msi_domain(&pdev->bus->dev);
-
- if (!dom)
- return true;
-
- return dom->bus_token != DOMAIN_BUS_PCI_MSI;
-}
-
-#endif /* CONFIG_PCI_MSI_IRQ_DOMAIN */
-#endif /* CONFIG_PCI_MSI */
-
-void pci_msi_init(struct pci_dev *dev)
-{
- u16 ctrl;
-
- /*
- * Disable the MSI hardware to avoid screaming interrupts
- * during boot. This is the power on reset default so
- * usually this should be a noop.
- */
- dev->msi_cap = pci_find_capability(dev, PCI_CAP_ID_MSI);
- if (!dev->msi_cap)
- return;
-
- pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &ctrl);
- if (ctrl & PCI_MSI_FLAGS_ENABLE)
- pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS,
- ctrl & ~PCI_MSI_FLAGS_ENABLE);
-
- if (!(ctrl & PCI_MSI_FLAGS_64BIT))
- dev->no_64bit_msi = 1;
-}
-
-void pci_msix_init(struct pci_dev *dev)
+ * Returns true if MSI has not been disabled by the command-line option
+ * pci=nomsi.
+ **/
+int pci_msi_enabled(void)
{
- u16 ctrl;
-
- dev->msix_cap = pci_find_capability(dev, PCI_CAP_ID_MSIX);
- if (!dev->msix_cap)
- return;
-
- pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
- if (ctrl & PCI_MSIX_FLAGS_ENABLE)
- pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS,
- ctrl & ~PCI_MSIX_FLAGS_ENABLE);
+ return pci_msi_enable;
}
+EXPORT_SYMBOL(pci_msi_enabled);
diff --git a/drivers/pci/msi/msi.h b/drivers/pci/msi/msi.h
new file mode 100644
index 000000000000..dbeff066bedd
--- /dev/null
+++ b/drivers/pci/msi/msi.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#include <linux/pci.h>
+#include <linux/msi.h>
+
+#define msix_table_size(flags) ((flags & PCI_MSIX_FLAGS_QSIZE) + 1)
+
+extern int pci_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type);
+extern void pci_msi_teardown_msi_irqs(struct pci_dev *dev);
+
+#ifdef CONFIG_PCI_MSI_ARCH_FALLBACKS
+extern int pci_msi_legacy_setup_msi_irqs(struct pci_dev *dev, int nvec, int type);
+extern void pci_msi_legacy_teardown_msi_irqs(struct pci_dev *dev);
+#else
+static inline int pci_msi_legacy_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
+{
+ WARN_ON_ONCE(1);
+ return -ENODEV;
+}
+
+static inline void pci_msi_legacy_teardown_msi_irqs(struct pci_dev *dev)
+{
+ WARN_ON_ONCE(1);
+}
+#endif
+
+/*
+ * PCI 2.3 does not specify mask bits for each MSI interrupt. Attempting to
+ * mask all MSI interrupts by clearing the MSI enable bit does not work
+ * reliably as devices without an INTx disable bit will then generate a
+ * level IRQ which will never be cleared.
+ */
+static inline __attribute_const__ u32 msi_multi_mask(struct msi_desc *desc)
+{
+ /* Don't shift by >= width of type */
+ if (desc->pci.msi_attrib.multi_cap >= 5)
+ return 0xffffffff;
+ return (1 << (1 << desc->pci.msi_attrib.multi_cap)) - 1;
+}
diff --git a/drivers/pci/msi/pcidev_msi.c b/drivers/pci/msi/pcidev_msi.c
new file mode 100644
index 000000000000..5520aff53b56
--- /dev/null
+++ b/drivers/pci/msi/pcidev_msi.c
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * MSI[X} related functions which are available unconditionally.
+ */
+#include "../pci.h"
+
+/*
+ * Disable the MSI[X] hardware to avoid screaming interrupts during boot.
+ * This is the power on reset default so usually this should be a noop.
+ */
+
+void pci_msi_init(struct pci_dev *dev)
+{
+ u16 ctrl;
+
+ dev->msi_cap = pci_find_capability(dev, PCI_CAP_ID_MSI);
+ if (!dev->msi_cap)
+ return;
+
+ pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &ctrl);
+ if (ctrl & PCI_MSI_FLAGS_ENABLE) {
+ pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS,
+ ctrl & ~PCI_MSI_FLAGS_ENABLE);
+ }
+
+ if (!(ctrl & PCI_MSI_FLAGS_64BIT))
+ dev->no_64bit_msi = 1;
+}
+
+void pci_msix_init(struct pci_dev *dev)
+{
+ u16 ctrl;
+
+ dev->msix_cap = pci_find_capability(dev, PCI_CAP_ID_MSIX);
+ if (!dev->msix_cap)
+ return;
+
+ pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
+ if (ctrl & PCI_MSIX_FLAGS_ENABLE) {
+ pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS,
+ ctrl & ~PCI_MSIX_FLAGS_ENABLE);
+ }
+}
diff --git a/drivers/pci/of.c b/drivers/pci/of.c
index 0b1237cff239..cb2e8351c2cc 100644
--- a/drivers/pci/of.c
+++ b/drivers/pci/of.c
@@ -247,7 +247,7 @@ void of_pci_check_probe_only(void)
else
pci_clear_flags(PCI_PROBE_ONLY);
- pr_info("PROBE_ONLY %sabled\n", val ? "en" : "dis");
+ pr_info("PROBE_ONLY %s\n", val ? "enabled" : "disabled");
}
EXPORT_SYMBOL_GPL(of_pci_check_probe_only);
diff --git a/drivers/pci/p2pdma.c b/drivers/pci/p2pdma.c
index 8d47cb7218d1..1015274bd2fe 100644
--- a/drivers/pci/p2pdma.c
+++ b/drivers/pci/p2pdma.c
@@ -219,7 +219,7 @@ int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size,
error = gen_pool_add_owner(p2pdma->pool, (unsigned long)addr,
pci_bus_address(pdev, bar) + offset,
range_len(&pgmap->range), dev_to_node(&pdev->dev),
- pgmap->ref);
+ &pgmap->ref);
if (error)
goto pages_free;
@@ -710,7 +710,7 @@ void *pci_alloc_p2pmem(struct pci_dev *pdev, size_t size)
if (!ret)
goto out;
- if (unlikely(!percpu_ref_tryget_live(ref))) {
+ if (unlikely(!percpu_ref_tryget_live_rcu(ref))) {
gen_pool_free(p2pdma->pool, (unsigned long) ret, size);
ret = NULL;
goto out;
diff --git a/drivers/pci/pci-bridge-emul.c b/drivers/pci/pci-bridge-emul.c
index db97cddfc85e..c994ebec2360 100644
--- a/drivers/pci/pci-bridge-emul.c
+++ b/drivers/pci/pci-bridge-emul.c
@@ -139,8 +139,13 @@ struct pci_bridge_reg_behavior pci_regs_behavior[PCI_STD_HEADER_SIZEOF / 4] = {
.ro = GENMASK(7, 0),
},
+ /*
+ * If expansion ROM is unsupported then ROM Base Address register must
+ * be implemented as read-only register that return 0 when read, same
+ * as for unused Base Address registers.
+ */
[PCI_ROM_ADDRESS1 / 4] = {
- .rw = GENMASK(31, 11) | BIT(0),
+ .ro = ~0,
},
/*
@@ -171,41 +176,55 @@ struct pci_bridge_reg_behavior pcie_cap_regs_behavior[PCI_CAP_PCIE_SIZEOF / 4] =
[PCI_CAP_LIST_ID / 4] = {
/*
* Capability ID, Next Capability Pointer and
- * Capabilities register are all read-only.
+ * bits [14:0] of Capabilities register are all read-only.
+ * Bit 15 of Capabilities register is reserved.
*/
- .ro = ~0,
+ .ro = GENMASK(30, 0),
},
[PCI_EXP_DEVCAP / 4] = {
- .ro = ~0,
+ /*
+ * Bits [31:29] and [17:16] are reserved.
+ * Bits [27:18] are reserved for non-upstream ports.
+ * Bits 28 and [14:6] are reserved for non-endpoint devices.
+ * Other bits are read-only.
+ */
+ .ro = BIT(15) | GENMASK(5, 0),
},
[PCI_EXP_DEVCTL / 4] = {
- /* Device control register is RW */
- .rw = GENMASK(15, 0),
+ /*
+ * Device control register is RW, except bit 15 which is
+ * reserved for non-endpoints or non-PCIe-to-PCI/X bridges.
+ */
+ .rw = GENMASK(14, 0),
/*
* Device status register has bits 6 and [3:0] W1C, [5:4] RO,
- * the rest is reserved
+ * the rest is reserved. Also bit 6 is reserved for non-upstream
+ * ports.
*/
- .w1c = (BIT(6) | GENMASK(3, 0)) << 16,
+ .w1c = GENMASK(3, 0) << 16,
.ro = GENMASK(5, 4) << 16,
},
[PCI_EXP_LNKCAP / 4] = {
- /* All bits are RO, except bit 23 which is reserved */
- .ro = lower_32_bits(~BIT(23)),
+ /*
+ * All bits are RO, except bit 23 which is reserved and
+ * bit 18 which is reserved for non-upstream ports.
+ */
+ .ro = lower_32_bits(~(BIT(23) | PCI_EXP_LNKCAP_CLKPM)),
},
[PCI_EXP_LNKCTL / 4] = {
/*
* Link control has bits [15:14], [11:3] and [1:0] RW, the
- * rest is reserved.
+ * rest is reserved. Bit 8 is reserved for non-upstream ports.
*
* Link status has bits [13:0] RO, and bits [15:14]
* W1C.
*/
- .rw = GENMASK(15, 14) | GENMASK(11, 3) | GENMASK(1, 0),
+ .rw = GENMASK(15, 14) | GENMASK(11, 9) | GENMASK(7, 3) | GENMASK(1, 0),
.ro = GENMASK(13, 0) << 16,
.w1c = GENMASK(15, 14) << 16,
},
@@ -251,6 +270,49 @@ struct pci_bridge_reg_behavior pcie_cap_regs_behavior[PCI_CAP_PCIE_SIZEOF / 4] =
.ro = GENMASK(15, 0) | PCI_EXP_RTSTA_PENDING,
.w1c = PCI_EXP_RTSTA_PME,
},
+
+ [PCI_EXP_DEVCAP2 / 4] = {
+ /*
+ * Device capabilities 2 register has reserved bits [30:27].
+ * Also bits [26:24] are reserved for non-upstream ports.
+ */
+ .ro = BIT(31) | GENMASK(23, 0),
+ },
+
+ [PCI_EXP_DEVCTL2 / 4] = {
+ /*
+ * Device control 2 register is RW. Bit 11 is reserved for
+ * non-upstream ports.
+ *
+ * Device status 2 register is reserved.
+ */
+ .rw = GENMASK(15, 12) | GENMASK(10, 0),
+ },
+
+ [PCI_EXP_LNKCAP2 / 4] = {
+ /* Link capabilities 2 register has reserved bits [30:25] and 0. */
+ .ro = BIT(31) | GENMASK(24, 1),
+ },
+
+ [PCI_EXP_LNKCTL2 / 4] = {
+ /*
+ * Link control 2 register is RW.
+ *
+ * Link status 2 register has bits 5, 15 W1C;
+ * bits 10, 11 reserved and others are RO.
+ */
+ .rw = GENMASK(15, 0),
+ .w1c = (BIT(15) | BIT(5)) << 16,
+ .ro = (GENMASK(14, 12) | GENMASK(9, 6) | GENMASK(4, 0)) << 16,
+ },
+
+ [PCI_EXP_SLTCAP2 / 4] = {
+ /* Slot capabilities 2 register is reserved. */
+ },
+
+ [PCI_EXP_SLTCTL2 / 4] = {
+ /* Both Slot control 2 and Slot status 2 registers are reserved. */
+ },
};
/*
@@ -265,7 +327,11 @@ int pci_bridge_emul_init(struct pci_bridge_emul *bridge,
{
BUILD_BUG_ON(sizeof(bridge->conf) != PCI_BRIDGE_CONF_END);
- bridge->conf.class_revision |= cpu_to_le32(PCI_CLASS_BRIDGE_PCI << 16);
+ /*
+ * class_revision: Class is high 24 bits and revision is low 8 bit of this member,
+ * while class for PCI Bridge Normal Decode has the 24-bit value: PCI_CLASS_BRIDGE_PCI << 8
+ */
+ bridge->conf.class_revision |= cpu_to_le32((PCI_CLASS_BRIDGE_PCI << 8) << 8);
bridge->conf.header_type = PCI_HEADER_TYPE_BRIDGE;
bridge->conf.cache_line_size = 0x10;
bridge->conf.status = cpu_to_le16(PCI_STATUS_CAP_LIST);
@@ -277,11 +343,9 @@ int pci_bridge_emul_init(struct pci_bridge_emul *bridge,
if (bridge->has_pcie) {
bridge->conf.capabilities_pointer = PCI_CAP_PCIE_START;
+ bridge->conf.status |= cpu_to_le16(PCI_STATUS_CAP_LIST);
bridge->pcie_conf.cap_id = PCI_CAP_ID_EXP;
- /* Set PCIe v2, root port, slot support */
- bridge->pcie_conf.cap =
- cpu_to_le16(PCI_EXP_TYPE_ROOT_PORT << 4 | 2 |
- PCI_EXP_FLAGS_SLOT);
+ bridge->pcie_conf.cap |= cpu_to_le16(PCI_EXP_TYPE_ROOT_PORT << 4);
bridge->pcie_cap_regs_behavior =
kmemdup(pcie_cap_regs_behavior,
sizeof(pcie_cap_regs_behavior),
@@ -290,6 +354,27 @@ int pci_bridge_emul_init(struct pci_bridge_emul *bridge,
kfree(bridge->pci_regs_behavior);
return -ENOMEM;
}
+ /* These bits are applicable only for PCI and reserved on PCIe */
+ bridge->pci_regs_behavior[PCI_CACHE_LINE_SIZE / 4].ro &=
+ ~GENMASK(15, 8);
+ bridge->pci_regs_behavior[PCI_COMMAND / 4].ro &=
+ ~((PCI_COMMAND_SPECIAL | PCI_COMMAND_INVALIDATE |
+ PCI_COMMAND_VGA_PALETTE | PCI_COMMAND_WAIT |
+ PCI_COMMAND_FAST_BACK) |
+ (PCI_STATUS_66MHZ | PCI_STATUS_FAST_BACK |
+ PCI_STATUS_DEVSEL_MASK) << 16);
+ bridge->pci_regs_behavior[PCI_PRIMARY_BUS / 4].ro &=
+ ~GENMASK(31, 24);
+ bridge->pci_regs_behavior[PCI_IO_BASE / 4].ro &=
+ ~((PCI_STATUS_66MHZ | PCI_STATUS_FAST_BACK |
+ PCI_STATUS_DEVSEL_MASK) << 16);
+ bridge->pci_regs_behavior[PCI_INTERRUPT_LINE / 4].rw &=
+ ~((PCI_BRIDGE_CTL_MASTER_ABORT |
+ BIT(8) | BIT(9) | BIT(11)) << 16);
+ bridge->pci_regs_behavior[PCI_INTERRUPT_LINE / 4].ro &=
+ ~((PCI_BRIDGE_CTL_FAST_BACK) << 16);
+ bridge->pci_regs_behavior[PCI_INTERRUPT_LINE / 4].w1c &=
+ ~(BIT(10) << 16);
}
if (flags & PCI_BRIDGE_EMUL_NO_PREFETCHABLE_BAR) {
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index cfe2f85af09e..602f0fb0b007 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -62,11 +62,8 @@ static ssize_t irq_show(struct device *dev,
* For MSI, show the first MSI IRQ; for all other cases including
* MSI-X, show the legacy INTx IRQ.
*/
- if (pdev->msi_enabled) {
- struct msi_desc *desc = first_pci_msi_entry(pdev);
-
- return sysfs_emit(buf, "%u\n", desc->irq);
- }
+ if (pdev->msi_enabled)
+ return sysfs_emit(buf, "%u\n", pci_irq_vector(pdev, 0));
#endif
return sysfs_emit(buf, "%u\n", pdev->irq);
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 3d2fb394986a..9ecce435fb3f 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -1115,7 +1115,7 @@ static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
return -EIO;
pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
- if (pmcsr == (u16) ~0) {
+ if (PCI_POSSIBLE_ERROR(pmcsr)) {
pci_err(dev, "can't change power state from %s to %s (config space inaccessible)\n",
pci_power_name(dev->current_state),
pci_power_name(state));
@@ -1271,16 +1271,16 @@ static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout)
* After reset, the device should not silently discard config
* requests, but it may still indicate that it needs more time by
* responding to them with CRS completions. The Root Port will
- * generally synthesize ~0 data to complete the read (except when
- * CRS SV is enabled and the read was for the Vendor ID; in that
- * case it synthesizes 0x0001 data).
+ * generally synthesize ~0 (PCI_ERROR_RESPONSE) data to complete
+ * the read (except when CRS SV is enabled and the read was for the
+ * Vendor ID; in that case it synthesizes 0x0001 data).
*
* Wait for the device to return a non-CRS completion. Read the
* Command register instead of Vendor ID so we don't have to
* contend with the CRS SV value.
*/
pci_read_config_dword(dev, PCI_COMMAND, &id);
- while (id == ~0) {
+ while (PCI_POSSIBLE_ERROR(id)) {
if (delay > timeout) {
pci_warn(dev, "not ready %dms after %s; giving up\n",
delay - 1, reset_type);
@@ -1556,7 +1556,7 @@ static void pci_save_ltr_state(struct pci_dev *dev)
{
int ltr;
struct pci_cap_saved_state *save_state;
- u16 *cap;
+ u32 *cap;
if (!pci_is_pcie(dev))
return;
@@ -1571,25 +1571,25 @@ static void pci_save_ltr_state(struct pci_dev *dev)
return;
}
- cap = (u16 *)&save_state->cap.data[0];
- pci_read_config_word(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, cap++);
- pci_read_config_word(dev, ltr + PCI_LTR_MAX_NOSNOOP_LAT, cap++);
+ /* Some broken devices only support dword access to LTR */
+ cap = &save_state->cap.data[0];
+ pci_read_config_dword(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, cap);
}
static void pci_restore_ltr_state(struct pci_dev *dev)
{
struct pci_cap_saved_state *save_state;
int ltr;
- u16 *cap;
+ u32 *cap;
save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR);
ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
if (!save_state || !ltr)
return;
- cap = (u16 *)&save_state->cap.data[0];
- pci_write_config_word(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, *cap++);
- pci_write_config_word(dev, ltr + PCI_LTR_MAX_NOSNOOP_LAT, *cap++);
+ /* Some broken devices only support dword access to LTR */
+ cap = &save_state->cap.data[0];
+ pci_write_config_dword(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, *cap);
}
/**
@@ -2024,11 +2024,6 @@ static void pcim_release(struct device *gendev, void *res)
struct pci_devres *this = res;
int i;
- if (dev->msi_enabled)
- pci_disable_msi(dev);
- if (dev->msix_enabled)
- pci_disable_msix(dev);
-
for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
if (this->region_mask & (1 << i))
pci_release_region(dev, i);
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 52c74682601a..a96b7424c9bc 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -41,11 +41,6 @@
#define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1 | \
ASPM_STATE_L1SS)
-struct aspm_latency {
- u32 l0s; /* L0s latency (nsec) */
- u32 l1; /* L1 latency (nsec) */
-};
-
struct pcie_link_state {
struct pci_dev *pdev; /* Upstream component of the Link */
struct pci_dev *downstream; /* Downstream component, function 0 */
@@ -65,15 +60,6 @@ struct pcie_link_state {
u32 clkpm_enabled:1; /* Current Clock PM state */
u32 clkpm_default:1; /* Default Clock PM state by BIOS */
u32 clkpm_disable:1; /* Clock PM disabled */
-
- /* Exit latencies */
- struct aspm_latency latency_up; /* Upstream direction exit latency */
- struct aspm_latency latency_dw; /* Downstream direction exit latency */
- /*
- * Endpoint acceptable latencies. A pcie downstream port only
- * has one slot under it, so at most there are 8 functions.
- */
- struct aspm_latency acceptable[8];
};
static int aspm_disabled, aspm_force;
@@ -105,6 +91,20 @@ static const char *policy_str[] = {
#define LINK_RETRAIN_TIMEOUT HZ
+/*
+ * The L1 PM substate capability is only implemented in function 0 in a
+ * multi function device.
+ */
+static struct pci_dev *pci_function_0(struct pci_bus *linkbus)
+{
+ struct pci_dev *child;
+
+ list_for_each_entry(child, &linkbus->devices, bus_list)
+ if (PCI_FUNC(child->devfn) == 0)
+ return child;
+ return NULL;
+}
+
static int policy_to_aspm_state(struct pcie_link_state *link)
{
switch (aspm_policy) {
@@ -378,8 +378,10 @@ static void encode_l12_threshold(u32 threshold_us, u32 *scale, u32 *value)
static void pcie_aspm_check_latency(struct pci_dev *endpoint)
{
- u32 latency, l1_switch_latency = 0;
- struct aspm_latency *acceptable;
+ u32 latency, encoding, lnkcap_up, lnkcap_dw;
+ u32 l1_switch_latency = 0, latency_up_l0s;
+ u32 latency_up_l1, latency_dw_l0s, latency_dw_l1;
+ u32 acceptable_l0s, acceptable_l1;
struct pcie_link_state *link;
/* Device not in D0 doesn't need latency check */
@@ -388,17 +390,36 @@ static void pcie_aspm_check_latency(struct pci_dev *endpoint)
return;
link = endpoint->bus->self->link_state;
- acceptable = &link->acceptable[PCI_FUNC(endpoint->devfn)];
+
+ /* Calculate endpoint L0s acceptable latency */
+ encoding = (endpoint->devcap & PCI_EXP_DEVCAP_L0S) >> 6;
+ acceptable_l0s = calc_l0s_acceptable(encoding);
+
+ /* Calculate endpoint L1 acceptable latency */
+ encoding = (endpoint->devcap & PCI_EXP_DEVCAP_L1) >> 9;
+ acceptable_l1 = calc_l1_acceptable(encoding);
while (link) {
+ struct pci_dev *dev = pci_function_0(link->pdev->subordinate);
+
+ /* Read direction exit latencies */
+ pcie_capability_read_dword(link->pdev, PCI_EXP_LNKCAP,
+ &lnkcap_up);
+ pcie_capability_read_dword(dev, PCI_EXP_LNKCAP,
+ &lnkcap_dw);
+ latency_up_l0s = calc_l0s_latency(lnkcap_up);
+ latency_up_l1 = calc_l1_latency(lnkcap_up);
+ latency_dw_l0s = calc_l0s_latency(lnkcap_dw);
+ latency_dw_l1 = calc_l1_latency(lnkcap_dw);
+
/* Check upstream direction L0s latency */
if ((link->aspm_capable & ASPM_STATE_L0S_UP) &&
- (link->latency_up.l0s > acceptable->l0s))
+ (latency_up_l0s > acceptable_l0s))
link->aspm_capable &= ~ASPM_STATE_L0S_UP;
/* Check downstream direction L0s latency */
if ((link->aspm_capable & ASPM_STATE_L0S_DW) &&
- (link->latency_dw.l0s > acceptable->l0s))
+ (latency_dw_l0s > acceptable_l0s))
link->aspm_capable &= ~ASPM_STATE_L0S_DW;
/*
* Check L1 latency.
@@ -413,9 +434,9 @@ static void pcie_aspm_check_latency(struct pci_dev *endpoint)
* L1 exit latencies advertised by a device include L1
* substate latencies (and hence do not do any check).
*/
- latency = max_t(u32, link->latency_up.l1, link->latency_dw.l1);
+ latency = max_t(u32, latency_up_l1, latency_dw_l1);
if ((link->aspm_capable & ASPM_STATE_L1) &&
- (latency + l1_switch_latency > acceptable->l1))
+ (latency + l1_switch_latency > acceptable_l1))
link->aspm_capable &= ~ASPM_STATE_L1;
l1_switch_latency += 1000;
@@ -423,20 +444,6 @@ static void pcie_aspm_check_latency(struct pci_dev *endpoint)
}
}
-/*
- * The L1 PM substate capability is only implemented in function 0 in a
- * multi function device.
- */
-static struct pci_dev *pci_function_0(struct pci_bus *linkbus)
-{
- struct pci_dev *child;
-
- list_for_each_entry(child, &linkbus->devices, bus_list)
- if (PCI_FUNC(child->devfn) == 0)
- return child;
- return NULL;
-}
-
static void pci_clear_and_set_dword(struct pci_dev *pdev, int pos,
u32 clear, u32 set)
{
@@ -496,6 +503,7 @@ static void aspm_calc_l1ss_info(struct pcie_link_state *link,
encode_l12_threshold(l1_2_threshold, &scale, &value);
ctl1 |= t_common_mode << 8 | scale << 29 | value << 16;
+ /* Some broken devices only support dword access to L1 SS */
pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1, &pctl1);
pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CTL2, &pctl2);
pci_read_config_dword(child, child->l1ss + PCI_L1SS_CTL1, &cctl1);
@@ -593,8 +601,6 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
link->aspm_enabled |= ASPM_STATE_L0S_UP;
if (parent_lnkctl & PCI_EXP_LNKCTL_ASPM_L0S)
link->aspm_enabled |= ASPM_STATE_L0S_DW;
- link->latency_up.l0s = calc_l0s_latency(parent_lnkcap);
- link->latency_dw.l0s = calc_l0s_latency(child_lnkcap);
/* Setup L1 state */
if (parent_lnkcap & child_lnkcap & PCI_EXP_LNKCAP_ASPM_L1)
@@ -602,8 +608,6 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
if (parent_lnkctl & child_lnkctl & PCI_EXP_LNKCTL_ASPM_L1)
link->aspm_enabled |= ASPM_STATE_L1;
- link->latency_up.l1 = calc_l1_latency(parent_lnkcap);
- link->latency_dw.l1 = calc_l1_latency(child_lnkcap);
/* Setup L1 substate */
pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CAP,
@@ -660,22 +664,10 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
/* Get and check endpoint acceptable latencies */
list_for_each_entry(child, &linkbus->devices, bus_list) {
- u32 reg32, encoding;
- struct aspm_latency *acceptable =
- &link->acceptable[PCI_FUNC(child->devfn)];
-
if (pci_pcie_type(child) != PCI_EXP_TYPE_ENDPOINT &&
pci_pcie_type(child) != PCI_EXP_TYPE_LEG_END)
continue;
- pcie_capability_read_dword(child, PCI_EXP_DEVCAP, &reg32);
- /* Calculate endpoint L0s acceptable latency */
- encoding = (reg32 & PCI_EXP_DEVCAP_L0S) >> 6;
- acceptable->l0s = calc_l0s_acceptable(encoding);
- /* Calculate endpoint L1 acceptable latency */
- encoding = (reg32 & PCI_EXP_DEVCAP_L1) >> 9;
- acceptable->l1 = calc_l1_acceptable(encoding);
-
pcie_aspm_check_latency(child);
}
}
diff --git a/drivers/pci/pcie/dpc.c b/drivers/pci/pcie/dpc.c
index c556e7beafe3..3e9afee02e8d 100644
--- a/drivers/pci/pcie/dpc.c
+++ b/drivers/pci/pcie/dpc.c
@@ -79,7 +79,7 @@ static bool dpc_completed(struct pci_dev *pdev)
u16 status;
pci_read_config_word(pdev, pdev->dpc_cap + PCI_EXP_DPC_STATUS, &status);
- if ((status != 0xffff) && (status & PCI_EXP_DPC_STATUS_TRIGGER))
+ if ((!PCI_POSSIBLE_ERROR(status)) && (status & PCI_EXP_DPC_STATUS_TRIGGER))
return false;
if (test_bit(PCI_DPC_RECOVERING, &pdev->priv_flags))
@@ -312,7 +312,7 @@ static irqreturn_t dpc_irq(int irq, void *context)
pci_read_config_word(pdev, cap + PCI_EXP_DPC_STATUS, &status);
- if (!(status & PCI_EXP_DPC_STATUS_INTERRUPT) || status == (u16)(~0))
+ if (!(status & PCI_EXP_DPC_STATUS_INTERRUPT) || PCI_POSSIBLE_ERROR(status))
return IRQ_NONE;
pci_write_config_word(pdev, cap + PCI_EXP_DPC_STATUS,
diff --git a/drivers/pci/pcie/pme.c b/drivers/pci/pcie/pme.c
index 1d0dd77fed3a..ef8ce436ead9 100644
--- a/drivers/pci/pcie/pme.c
+++ b/drivers/pci/pcie/pme.c
@@ -224,7 +224,7 @@ static void pcie_pme_work_fn(struct work_struct *work)
break;
pcie_capability_read_dword(port, PCI_EXP_RTSTA, &rtsta);
- if (rtsta == (u32) ~0)
+ if (PCI_POSSIBLE_ERROR(rtsta))
break;
if (rtsta & PCI_EXP_RTSTA_PME) {
@@ -274,7 +274,7 @@ static irqreturn_t pcie_pme_irq(int irq, void *context)
spin_lock_irqsave(&data->lock, flags);
pcie_capability_read_dword(port, PCI_EXP_RTSTA, &rtsta);
- if (rtsta == (u32) ~0 || !(rtsta & PCI_EXP_RTSTA_PME)) {
+ if (PCI_POSSIBLE_ERROR(rtsta) || !(rtsta & PCI_EXP_RTSTA_PME)) {
spin_unlock_irqrestore(&data->lock, flags);
return IRQ_NONE;
}
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index bda630889f95..604feeb84ee4 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -166,6 +166,9 @@ static int pcie_init_service_irqs(struct pci_dev *dev, int *irqs, int mask)
{
int ret, i;
+ for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++)
+ irqs[i] = -1;
+
/*
* If we support PME but can't use MSI/MSI-X for it, we have to
* fall back to INTx or other interrupts, e.g., a system shared
@@ -314,10 +317,8 @@ static int pcie_device_init(struct pci_dev *pdev, int service, int irq)
*/
int pcie_port_device_register(struct pci_dev *dev)
{
- int status, capabilities, irq_services, i, nr_service;
- int irqs[PCIE_PORT_DEVICE_MAXSERVICES] = {
- [0 ... PCIE_PORT_DEVICE_MAXSERVICES-1] = -1
- };
+ int status, capabilities, i, nr_service;
+ int irqs[PCIE_PORT_DEVICE_MAXSERVICES];
/* Enable PCI Express port device */
status = pci_enable_device(dev);
@@ -330,32 +331,18 @@ int pcie_port_device_register(struct pci_dev *dev)
return 0;
pci_set_master(dev);
-
- irq_services = 0;
- if (IS_ENABLED(CONFIG_PCIE_PME))
- irq_services |= PCIE_PORT_SERVICE_PME;
- if (IS_ENABLED(CONFIG_PCIEAER))
- irq_services |= PCIE_PORT_SERVICE_AER;
- if (IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE))
- irq_services |= PCIE_PORT_SERVICE_HP;
- if (IS_ENABLED(CONFIG_PCIE_DPC))
- irq_services |= PCIE_PORT_SERVICE_DPC;
- irq_services &= capabilities;
-
- if (irq_services) {
- /*
- * Initialize service IRQs. Don't use service devices that
- * require interrupts if there is no way to generate them.
- * However, some drivers may have a polling mode (e.g.
- * pciehp_poll_mode) that can be used in the absence of IRQs.
- * Allow them to determine if that is to be used.
- */
- status = pcie_init_service_irqs(dev, irqs, irq_services);
- if (status) {
- irq_services &= PCIE_PORT_SERVICE_HP;
- if (!irq_services)
- goto error_disable;
- }
+ /*
+ * Initialize service irqs. Don't use service devices that
+ * require interrupts if there is no way to generate them.
+ * However, some drivers may have a polling mode (e.g. pciehp_poll_mode)
+ * that can be used in the absence of irqs. Allow them to determine
+ * if that is to be used.
+ */
+ status = pcie_init_service_irqs(dev, irqs, capabilities);
+ if (status) {
+ capabilities &= PCIE_PORT_SERVICE_HP;
+ if (!capabilities)
+ goto error_disable;
}
/* Allocate child services if any */
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 087d3658f75c..17a969942d37 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -206,14 +206,14 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
* memory BAR or a ROM, bit 0 must be clear; if it's an io BAR, bit
* 1 must be clear.
*/
- if (sz == 0xffffffff)
+ if (PCI_POSSIBLE_ERROR(sz))
sz = 0;
/*
* I don't know how l can have all bits set. Copied from old code.
* Maybe it fixes a bug on some ancient platform.
*/
- if (l == 0xffffffff)
+ if (PCI_POSSIBLE_ERROR(l))
l = 0;
if (type == pci_bar_unknown) {
@@ -898,8 +898,6 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge)
bridge->bus = bus;
- /* Temporarily move resources off the list */
- list_splice_init(&bridge->windows, &resources);
bus->sysdata = bridge->sysdata;
bus->ops = bridge->ops;
bus->number = bus->busn_res.start = bridge->busnr;
@@ -925,6 +923,8 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge)
if (err)
goto free;
+ /* Temporarily move resources off the list */
+ list_splice_init(&bridge->windows, &resources);
err = device_add(&bridge->dev);
if (err) {
put_device(&bridge->dev);
@@ -1579,20 +1579,12 @@ void set_pcie_hotplug_bridge(struct pci_dev *pdev)
static void set_pcie_thunderbolt(struct pci_dev *dev)
{
- int vsec = 0;
- u32 header;
-
- while ((vsec = pci_find_next_ext_capability(dev, vsec,
- PCI_EXT_CAP_ID_VNDR))) {
- pci_read_config_dword(dev, vsec + PCI_VNDR_HEADER, &header);
+ u16 vsec;
- /* Is the device part of a Thunderbolt controller? */
- if (dev->vendor == PCI_VENDOR_ID_INTEL &&
- PCI_VNDR_HEADER_ID(header) == PCI_VSEC_ID_INTEL_TBT) {
- dev->is_thunderbolt = 1;
- return;
- }
- }
+ /* Is the device part of a Thunderbolt controller? */
+ vsec = pci_find_vsec_capability(dev, PCI_VENDOR_ID_INTEL, PCI_VSEC_ID_INTEL_TBT);
+ if (vsec)
+ dev->is_thunderbolt = 1;
}
static void set_pcie_untrusted(struct pci_dev *dev)
@@ -1683,7 +1675,7 @@ static int pci_cfg_space_size_ext(struct pci_dev *dev)
if (pci_read_config_dword(dev, pos, &status) != PCIBIOS_SUCCESSFUL)
return PCI_CFG_SPACE_SIZE;
- if (status == 0xffffffff || pci_ext_cfg_is_aliased(dev))
+ if (PCI_POSSIBLE_ERROR(status) || pci_ext_cfg_is_aliased(dev))
return PCI_CFG_SPACE_SIZE;
return PCI_CFG_SPACE_EXP_SIZE;
@@ -2311,7 +2303,9 @@ struct pci_dev *pci_alloc_dev(struct pci_bus *bus)
INIT_LIST_HEAD(&dev->bus_list);
dev->dev.type = &pci_dev_type;
dev->bus = pci_bus_get(bus);
-
+#ifdef CONFIG_PCI_MSI
+ raw_spin_lock_init(&dev->msi_lock);
+#endif
return dev;
}
EXPORT_SYMBOL(pci_alloc_dev);
@@ -2371,8 +2365,8 @@ bool pci_bus_generic_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l,
if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l))
return false;
- /* Some broken boards return 0 or ~0 if a slot is empty: */
- if (*l == 0xffffffff || *l == 0x00000000 ||
+ /* Some broken boards return 0 or ~0 (PCI_ERROR_RESPONSE) if a slot is empty: */
+ if (PCI_POSSIBLE_ERROR(*l) || *l == 0x00000000 ||
*l == 0x0000ffff || *l == 0xffff0000)
return false;
diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
index cb18f8a13ab6..9c7edec64f7e 100644
--- a/drivers/pci/proc.c
+++ b/drivers/pci/proc.c
@@ -21,14 +21,14 @@ static int proc_initialized; /* = 0 */
static loff_t proc_bus_pci_lseek(struct file *file, loff_t off, int whence)
{
- struct pci_dev *dev = PDE_DATA(file_inode(file));
+ struct pci_dev *dev = pde_data(file_inode(file));
return fixed_size_llseek(file, off, whence, dev->cfg_size);
}
static ssize_t proc_bus_pci_read(struct file *file, char __user *buf,
size_t nbytes, loff_t *ppos)
{
- struct pci_dev *dev = PDE_DATA(file_inode(file));
+ struct pci_dev *dev = pde_data(file_inode(file));
unsigned int pos = *ppos;
unsigned int cnt, size;
@@ -114,7 +114,7 @@ static ssize_t proc_bus_pci_write(struct file *file, const char __user *buf,
size_t nbytes, loff_t *ppos)
{
struct inode *ino = file_inode(file);
- struct pci_dev *dev = PDE_DATA(ino);
+ struct pci_dev *dev = pde_data(ino);
int pos = *ppos;
int size = dev->cfg_size;
int cnt, ret;
@@ -196,7 +196,7 @@ struct pci_filp_private {
static long proc_bus_pci_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
- struct pci_dev *dev = PDE_DATA(file_inode(file));
+ struct pci_dev *dev = pde_data(file_inode(file));
#ifdef HAVE_PCI_MMAP
struct pci_filp_private *fpriv = file->private_data;
#endif /* HAVE_PCI_MMAP */
@@ -244,7 +244,7 @@ static long proc_bus_pci_ioctl(struct file *file, unsigned int cmd,
#ifdef HAVE_PCI_MMAP
static int proc_bus_pci_mmap(struct file *file, struct vm_area_struct *vma)
{
- struct pci_dev *dev = PDE_DATA(file_inode(file));
+ struct pci_dev *dev = pde_data(file_inode(file));
struct pci_filp_private *fpriv = file->private_data;
int i, ret, write_combine = 0, res_bit = IORESOURCE_MEM;
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index b4cb658cce2b..d2dd6a6cda60 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -980,8 +980,8 @@ static void quirk_via_ioapic(struct pci_dev *dev)
else
tmp = 0x1f; /* all known bits (4-0) routed to external APIC */
- pci_info(dev, "%sbling VIA external APIC routing\n",
- tmp == 0 ? "Disa" : "Ena");
+ pci_info(dev, "%s VIA external APIC routing\n",
+ tmp ? "Enabling" : "Disabling");
/* Offset 0x58: External APIC IRQ output control */
pci_write_config_byte(dev, 0x58, tmp);
@@ -4103,6 +4103,9 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9120,
quirk_dma_func1_alias);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9123,
quirk_dma_func1_alias);
+/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c136 */
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9125,
+ quirk_dma_func1_alias);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9128,
quirk_dma_func1_alias);
/* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c14 */
@@ -5683,6 +5686,15 @@ SWITCHTEC_QUIRK(0x4268); /* PAX 68XG4 */
SWITCHTEC_QUIRK(0x4252); /* PAX 52XG4 */
SWITCHTEC_QUIRK(0x4236); /* PAX 36XG4 */
SWITCHTEC_QUIRK(0x4228); /* PAX 28XG4 */
+SWITCHTEC_QUIRK(0x4352); /* PFXA 52XG4 */
+SWITCHTEC_QUIRK(0x4336); /* PFXA 36XG4 */
+SWITCHTEC_QUIRK(0x4328); /* PFXA 28XG4 */
+SWITCHTEC_QUIRK(0x4452); /* PSXA 52XG4 */
+SWITCHTEC_QUIRK(0x4436); /* PSXA 36XG4 */
+SWITCHTEC_QUIRK(0x4428); /* PSXA 28XG4 */
+SWITCHTEC_QUIRK(0x4552); /* PAXA 52XG4 */
+SWITCHTEC_QUIRK(0x4536); /* PAXA 36XG4 */
+SWITCHTEC_QUIRK(0x4528); /* PAXA 28XG4 */
/*
* The PLX NTB uses devfn proxy IDs to move TLPs between NT endpoints.
@@ -5857,3 +5869,13 @@ static void nvidia_ion_ahci_fixup(struct pci_dev *pdev)
pdev->dev_flags |= PCI_DEV_FLAGS_HAS_MSI_MASKING;
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, 0x0ab8, nvidia_ion_ahci_fixup);
+
+static void rom_bar_overlap_defect(struct pci_dev *dev)
+{
+ pci_info(dev, "working around ROM BAR overlap defect\n");
+ dev->rom_bar_overlap = 1;
+}
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1533, rom_bar_overlap_defect);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1536, rom_bar_overlap_defect);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1537, rom_bar_overlap_defect);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1538, rom_bar_overlap_defect);
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index 7f1acb3918d0..439ac5f5907a 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -75,12 +75,16 @@ static void pci_std_update_resource(struct pci_dev *dev, int resno)
* as zero when disabled, so don't update ROM BARs unless
* they're enabled. See
* https://lore.kernel.org/r/43147B3D.1030309@vc.cvut.cz/
+ * But we must update ROM BAR for buggy devices where even a
+ * disabled ROM can conflict with other BARs.
*/
- if (!(res->flags & IORESOURCE_ROM_ENABLE))
+ if (!(res->flags & IORESOURCE_ROM_ENABLE) &&
+ !dev->rom_bar_overlap)
return;
reg = dev->rom_base_reg;
- new |= PCI_ROM_ADDRESS_ENABLE;
+ if (res->flags & IORESOURCE_ROM_ENABLE)
+ new |= PCI_ROM_ADDRESS_ENABLE;
} else
return;
diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c
index 751a26668e3a..a0c67191a8b9 100644
--- a/drivers/pci/slot.c
+++ b/drivers/pci/slot.c
@@ -96,11 +96,12 @@ static struct attribute *pci_slot_default_attrs[] = {
&pci_slot_attr_cur_speed.attr,
NULL,
};
+ATTRIBUTE_GROUPS(pci_slot_default);
static struct kobj_type pci_slot_ktype = {
.sysfs_ops = &pci_slot_sysfs_ops,
.release = &pci_slot_release,
- .default_attrs = pci_slot_default_attrs,
+ .default_groups = pci_slot_default_groups,
};
static char *make_slot_name(const char *name)
diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c
index 38c2b036fb8e..c36c1238c604 100644
--- a/drivers/pci/switch/switchtec.c
+++ b/drivers/pci/switch/switchtec.c
@@ -122,7 +122,7 @@ static void stuser_set_state(struct switchtec_user *stuser,
{
/* requires the mrpc_mutex to already be held when called */
- const char * const state_names[] = {
+ static const char * const state_names[] = {
[MRPC_IDLE] = "IDLE",
[MRPC_QUEUED] = "QUEUED",
[MRPC_RUNNING] = "RUNNING",
@@ -1779,6 +1779,15 @@ static const struct pci_device_id switchtec_pci_tbl[] = {
SWITCHTEC_PCI_DEVICE(0x4252, SWITCHTEC_GEN4), //PAX 52XG4
SWITCHTEC_PCI_DEVICE(0x4236, SWITCHTEC_GEN4), //PAX 36XG4
SWITCHTEC_PCI_DEVICE(0x4228, SWITCHTEC_GEN4), //PAX 28XG4
+ SWITCHTEC_PCI_DEVICE(0x4352, SWITCHTEC_GEN4), //PFXA 52XG4
+ SWITCHTEC_PCI_DEVICE(0x4336, SWITCHTEC_GEN4), //PFXA 36XG4
+ SWITCHTEC_PCI_DEVICE(0x4328, SWITCHTEC_GEN4), //PFXA 28XG4
+ SWITCHTEC_PCI_DEVICE(0x4452, SWITCHTEC_GEN4), //PSXA 52XG4
+ SWITCHTEC_PCI_DEVICE(0x4436, SWITCHTEC_GEN4), //PSXA 36XG4
+ SWITCHTEC_PCI_DEVICE(0x4428, SWITCHTEC_GEN4), //PSXA 28XG4
+ SWITCHTEC_PCI_DEVICE(0x4552, SWITCHTEC_GEN4), //PAXA 52XG4
+ SWITCHTEC_PCI_DEVICE(0x4536, SWITCHTEC_GEN4), //PAXA 36XG4
+ SWITCHTEC_PCI_DEVICE(0x4528, SWITCHTEC_GEN4), //PAXA 28XG4
{0}
};
MODULE_DEVICE_TABLE(pci, switchtec_pci_tbl);
diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c
index d858d25b6cab..d2a7b9fd678b 100644
--- a/drivers/pci/xen-pcifront.c
+++ b/drivers/pci/xen-pcifront.c
@@ -262,8 +262,8 @@ static int pci_frontend_enable_msix(struct pci_dev *dev,
}
i = 0;
- for_each_pci_msi_entry(entry, dev) {
- op.msix_entries[i].entry = entry->msi_attrib.entry_nr;
+ msi_for_each_desc(entry, &dev->dev, MSI_DESC_NOTASSOCIATED) {
+ op.msix_entries[i].entry = entry->msi_index;
/* Vector is useless at this point. */
op.msix_entries[i].vector = -1;
i++;
diff --git a/drivers/pcmcia/at91_cf.c b/drivers/pcmcia/at91_cf.c
index 6b1edfc890a3..92df2c2c5d07 100644
--- a/drivers/pcmcia/at91_cf.c
+++ b/drivers/pcmcia/at91_cf.c
@@ -20,6 +20,7 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_gpio.h>
+#include <linux/pci.h>
#include <linux/regmap.h>
#include <pcmcia/ss.h>
@@ -230,6 +231,7 @@ static int at91_cf_probe(struct platform_device *pdev)
struct at91_cf_socket *cf;
struct at91_cf_data *board;
struct resource *io;
+ struct resource realio;
int status;
board = devm_kzalloc(&pdev->dev, sizeof(*board), GFP_KERNEL);
@@ -307,7 +309,9 @@ static int at91_cf_probe(struct platform_device *pdev)
* io_offset is set to 0x10000 to avoid the check in static_find_io().
* */
cf->socket.io_offset = 0x10000;
- status = pci_ioremap_io(0x10000, cf->phys_baseaddr + CF_IO_PHYS);
+ realio.start = cf->socket.io_offset;
+ realio.end = realio.start + SZ_64K - 1;
+ status = pci_remap_iospace(&realio, cf->phys_baseaddr + CF_IO_PHYS);
if (status)
goto fail0a;
diff --git a/drivers/perf/arm_smmuv3_pmu.c b/drivers/perf/arm_smmuv3_pmu.c
index 1ae19f7301b2..c49108a72865 100644
--- a/drivers/perf/arm_smmuv3_pmu.c
+++ b/drivers/perf/arm_smmuv3_pmu.c
@@ -703,7 +703,6 @@ static void smmu_pmu_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
static void smmu_pmu_setup_msi(struct smmu_pmu *pmu)
{
- struct msi_desc *desc;
struct device *dev = pmu->dev;
int ret;
@@ -720,9 +719,7 @@ static void smmu_pmu_setup_msi(struct smmu_pmu *pmu)
return;
}
- desc = first_msi_entry(dev);
- if (desc)
- pmu->irq = desc->irq;
+ pmu->irq = msi_get_virq(dev, 0);
/* Add callback to free MSIs on teardown */
devm_add_action(dev, smmu_pmu_free_msis, dev);
diff --git a/drivers/phy/amlogic/Kconfig b/drivers/phy/amlogic/Kconfig
index db5d0cd757e3..486ca23aba32 100644
--- a/drivers/phy/amlogic/Kconfig
+++ b/drivers/phy/amlogic/Kconfig
@@ -2,6 +2,16 @@
#
# Phy drivers for Amlogic platforms
#
+config PHY_MESON8_HDMI_TX
+ tristate "Meson8, Meson8b and Meson8m2 HDMI TX PHY driver"
+ depends on (ARCH_MESON && ARM) || COMPILE_TEST
+ depends on OF
+ select MFD_SYSCON
+ help
+ Enable this to support the HDMI TX PHYs found in Meson8,
+ Meson8b and Meson8m2 SoCs.
+ If unsure, say N.
+
config PHY_MESON8B_USB2
tristate "Meson8, Meson8b, Meson8m2 and GXBB USB2 PHY driver"
default ARCH_MESON
diff --git a/drivers/phy/amlogic/Makefile b/drivers/phy/amlogic/Makefile
index 8fa07fbd0d92..c0886c850bb0 100644
--- a/drivers/phy/amlogic/Makefile
+++ b/drivers/phy/amlogic/Makefile
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_PHY_MESON8_HDMI_TX) += phy-meson8-hdmi-tx.o
obj-$(CONFIG_PHY_MESON8B_USB2) += phy-meson8b-usb2.o
obj-$(CONFIG_PHY_MESON_GXL_USB2) += phy-meson-gxl-usb2.o
obj-$(CONFIG_PHY_MESON_G12A_USB2) += phy-meson-g12a-usb2.o
diff --git a/drivers/phy/amlogic/phy-meson-axg-mipi-dphy.c b/drivers/phy/amlogic/phy-meson-axg-mipi-dphy.c
index cd2332bf0e31..fdbd64c03e12 100644
--- a/drivers/phy/amlogic/phy-meson-axg-mipi-dphy.c
+++ b/drivers/phy/amlogic/phy-meson-axg-mipi-dphy.c
@@ -9,6 +9,7 @@
#include <linux/bitfield.h>
#include <linux/bitops.h>
+#include <linux/bits.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/io.h>
@@ -250,7 +251,7 @@ static int phy_meson_axg_mipi_dphy_power_on(struct phy *phy)
(DIV_ROUND_UP(priv->config.clk_zero, temp) << 16) |
(DIV_ROUND_UP(priv->config.clk_prepare, temp) << 24));
regmap_write(priv->regmap, MIPI_DSI_CLK_TIM1,
- DIV_ROUND_UP(priv->config.clk_pre, temp));
+ DIV_ROUND_UP(priv->config.clk_pre, BITS_PER_BYTE));
regmap_write(priv->regmap, MIPI_DSI_HS_TIM,
DIV_ROUND_UP(priv->config.hs_exit, temp) |
diff --git a/drivers/phy/amlogic/phy-meson8-hdmi-tx.c b/drivers/phy/amlogic/phy-meson8-hdmi-tx.c
new file mode 100644
index 000000000000..f9a6572c27d8
--- /dev/null
+++ b/drivers/phy/amlogic/phy-meson8-hdmi-tx.c
@@ -0,0 +1,160 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Meson8, Meson8b and Meson8m2 HDMI TX PHY.
+ *
+ * Copyright (C) 2021 Martin Blumenstingl <martin.blumenstingl@googlemail.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+
+/*
+ * Unfortunately there is no detailed documentation available for the
+ * HHI_HDMI_PHY_CNTL0 register. CTL0 and CTL1 is all we know about.
+ * Magic register values in the driver below are taken from the vendor
+ * BSP / kernel.
+ */
+#define HHI_HDMI_PHY_CNTL0 0x3a0
+ #define HHI_HDMI_PHY_CNTL0_HDMI_CTL1 GENMASK(31, 16)
+ #define HHI_HDMI_PHY_CNTL0_HDMI_CTL0 GENMASK(15, 0)
+
+#define HHI_HDMI_PHY_CNTL1 0x3a4
+ #define HHI_HDMI_PHY_CNTL1_CLOCK_ENABLE BIT(1)
+ #define HHI_HDMI_PHY_CNTL1_SOFT_RESET BIT(0)
+
+#define HHI_HDMI_PHY_CNTL2 0x3a8
+
+struct phy_meson8_hdmi_tx_priv {
+ struct regmap *hhi;
+ struct clk *tmds_clk;
+};
+
+static int phy_meson8_hdmi_tx_init(struct phy *phy)
+{
+ struct phy_meson8_hdmi_tx_priv *priv = phy_get_drvdata(phy);
+
+ return clk_prepare_enable(priv->tmds_clk);
+}
+
+static int phy_meson8_hdmi_tx_exit(struct phy *phy)
+{
+ struct phy_meson8_hdmi_tx_priv *priv = phy_get_drvdata(phy);
+
+ clk_disable_unprepare(priv->tmds_clk);
+
+ return 0;
+}
+
+static int phy_meson8_hdmi_tx_power_on(struct phy *phy)
+{
+ struct phy_meson8_hdmi_tx_priv *priv = phy_get_drvdata(phy);
+ unsigned int i;
+ u16 hdmi_ctl0;
+
+ if (clk_get_rate(priv->tmds_clk) >= 2970UL * 1000 * 1000)
+ hdmi_ctl0 = 0x1e8b;
+ else
+ hdmi_ctl0 = 0x4d0b;
+
+ regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL0,
+ FIELD_PREP(HHI_HDMI_PHY_CNTL0_HDMI_CTL1, 0x08c3) |
+ FIELD_PREP(HHI_HDMI_PHY_CNTL0_HDMI_CTL0, hdmi_ctl0));
+
+ regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL1, 0x0);
+
+ /* Reset three times, just like the vendor driver does */
+ for (i = 0; i < 3; i++) {
+ regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL1,
+ HHI_HDMI_PHY_CNTL1_CLOCK_ENABLE |
+ HHI_HDMI_PHY_CNTL1_SOFT_RESET);
+ usleep_range(1000, 2000);
+
+ regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL1,
+ HHI_HDMI_PHY_CNTL1_CLOCK_ENABLE);
+ usleep_range(1000, 2000);
+ }
+
+ return 0;
+}
+
+static int phy_meson8_hdmi_tx_power_off(struct phy *phy)
+{
+ struct phy_meson8_hdmi_tx_priv *priv = phy_get_drvdata(phy);
+
+ regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL0,
+ FIELD_PREP(HHI_HDMI_PHY_CNTL0_HDMI_CTL1, 0x0841) |
+ FIELD_PREP(HHI_HDMI_PHY_CNTL0_HDMI_CTL0, 0x8d00));
+
+ return 0;
+}
+
+static const struct phy_ops phy_meson8_hdmi_tx_ops = {
+ .init = phy_meson8_hdmi_tx_init,
+ .exit = phy_meson8_hdmi_tx_exit,
+ .power_on = phy_meson8_hdmi_tx_power_on,
+ .power_off = phy_meson8_hdmi_tx_power_off,
+ .owner = THIS_MODULE,
+};
+
+static int phy_meson8_hdmi_tx_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct phy_meson8_hdmi_tx_priv *priv;
+ struct phy_provider *phy_provider;
+ struct resource *res;
+ struct phy *phy;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->hhi = syscon_node_to_regmap(np->parent);
+ if (IS_ERR(priv->hhi))
+ return PTR_ERR(priv->hhi);
+
+ priv->tmds_clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(priv->tmds_clk))
+ return PTR_ERR(priv->tmds_clk);
+
+ phy = devm_phy_create(&pdev->dev, np, &phy_meson8_hdmi_tx_ops);
+ if (IS_ERR(phy))
+ return PTR_ERR(phy);
+
+ phy_set_drvdata(phy, priv);
+
+ phy_provider = devm_of_phy_provider_register(&pdev->dev,
+ of_phy_simple_xlate);
+
+ return PTR_ERR_OR_ZERO(phy_provider);
+}
+
+static const struct of_device_id phy_meson8_hdmi_tx_of_match[] = {
+ { .compatible = "amlogic,meson8-hdmi-tx-phy" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, phy_meson8_hdmi_tx_of_match);
+
+static struct platform_driver phy_meson8_hdmi_tx_driver = {
+ .probe = phy_meson8_hdmi_tx_probe,
+ .driver = {
+ .name = "phy-meson8-hdmi-tx",
+ .of_match_table = phy_meson8_hdmi_tx_of_match,
+ },
+};
+module_platform_driver(phy_meson8_hdmi_tx_driver);
+
+MODULE_AUTHOR("Martin Blumenstingl <martin.blumenstingl@googlemail.com>");
+MODULE_DESCRIPTION("Meson8, Meson8b and Meson8m2 HDMI TX PHY driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/broadcom/Kconfig b/drivers/phy/broadcom/Kconfig
index f81e23742079..849c4204f550 100644
--- a/drivers/phy/broadcom/Kconfig
+++ b/drivers/phy/broadcom/Kconfig
@@ -97,8 +97,7 @@ config PHY_BRCM_USB
depends on OF
select GENERIC_PHY
select SOC_BRCMSTB if ARCH_BRCMSTB
- default ARCH_BCM4908
- default ARCH_BRCMSTB
+ default ARCH_BCM4908 || ARCH_BRCMSTB
help
Enable this to support the Broadcom STB USB PHY.
This driver is required by the USB XHCI, EHCI and OHCI
diff --git a/drivers/phy/broadcom/phy-bcm-ns-usb2.c b/drivers/phy/broadcom/phy-bcm-ns-usb2.c
index 4b015b8a71c3..6a36e187d100 100644
--- a/drivers/phy/broadcom/phy-bcm-ns-usb2.c
+++ b/drivers/phy/broadcom/phy-bcm-ns-usb2.c
@@ -9,17 +9,23 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/err.h>
+#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
+#include <linux/regmap.h>
#include <linux/slab.h>
struct bcm_ns_usb2 {
struct device *dev;
struct clk *ref_clk;
struct phy *phy;
+ struct regmap *clkset;
+ void __iomem *base;
+
+ /* Deprecated binding */
void __iomem *dmu;
};
@@ -27,7 +33,6 @@ static int bcm_ns_usb2_phy_init(struct phy *phy)
{
struct bcm_ns_usb2 *usb2 = phy_get_drvdata(phy);
struct device *dev = usb2->dev;
- void __iomem *dmu = usb2->dmu;
u32 ref_clk_rate, usb2ctl, usb_pll_ndiv, usb_pll_pdiv;
int err = 0;
@@ -44,7 +49,10 @@ static int bcm_ns_usb2_phy_init(struct phy *phy)
goto err_clk_off;
}
- usb2ctl = readl(dmu + BCMA_DMU_CRU_USB2_CONTROL);
+ if (usb2->base)
+ usb2ctl = readl(usb2->base);
+ else
+ usb2ctl = readl(usb2->dmu + BCMA_DMU_CRU_USB2_CONTROL);
if (usb2ctl & BCMA_DMU_CRU_USB2_CONTROL_USB_PLL_PDIV_MASK) {
usb_pll_pdiv = usb2ctl;
@@ -58,15 +66,24 @@ static int bcm_ns_usb2_phy_init(struct phy *phy)
usb_pll_ndiv = (1920000000 * usb_pll_pdiv) / ref_clk_rate;
/* Unlock DMU PLL settings with some magic value */
- writel(0x0000ea68, dmu + BCMA_DMU_CRU_CLKSET_KEY);
+ if (usb2->clkset)
+ regmap_write(usb2->clkset, 0, 0x0000ea68);
+ else
+ writel(0x0000ea68, usb2->dmu + BCMA_DMU_CRU_CLKSET_KEY);
/* Write USB 2.0 PLL control setting */
usb2ctl &= ~BCMA_DMU_CRU_USB2_CONTROL_USB_PLL_NDIV_MASK;
usb2ctl |= usb_pll_ndiv << BCMA_DMU_CRU_USB2_CONTROL_USB_PLL_NDIV_SHIFT;
- writel(usb2ctl, dmu + BCMA_DMU_CRU_USB2_CONTROL);
+ if (usb2->base)
+ writel(usb2ctl, usb2->base);
+ else
+ writel(usb2ctl, usb2->dmu + BCMA_DMU_CRU_USB2_CONTROL);
/* Lock DMU PLL settings */
- writel(0x00000000, dmu + BCMA_DMU_CRU_CLKSET_KEY);
+ if (usb2->clkset)
+ regmap_write(usb2->clkset, 0, 0x00000000);
+ else
+ writel(0x00000000, usb2->dmu + BCMA_DMU_CRU_CLKSET_KEY);
err_clk_off:
clk_disable_unprepare(usb2->ref_clk);
@@ -90,15 +107,32 @@ static int bcm_ns_usb2_probe(struct platform_device *pdev)
return -ENOMEM;
usb2->dev = dev;
- usb2->dmu = devm_platform_ioremap_resource_byname(pdev, "dmu");
- if (IS_ERR(usb2->dmu)) {
- dev_err(dev, "Failed to map DMU regs\n");
- return PTR_ERR(usb2->dmu);
+ if (of_find_property(dev->of_node, "brcm,syscon-clkset", NULL)) {
+ usb2->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(usb2->base)) {
+ dev_err(dev, "Failed to map control reg\n");
+ return PTR_ERR(usb2->base);
+ }
+
+ usb2->clkset = syscon_regmap_lookup_by_phandle(dev->of_node,
+ "brcm,syscon-clkset");
+ if (IS_ERR(usb2->clkset)) {
+ dev_err(dev, "Failed to lookup clkset regmap\n");
+ return PTR_ERR(usb2->clkset);
+ }
+ } else {
+ usb2->dmu = devm_platform_ioremap_resource_byname(pdev, "dmu");
+ if (IS_ERR(usb2->dmu)) {
+ dev_err(dev, "Failed to map DMU regs\n");
+ return PTR_ERR(usb2->dmu);
+ }
+
+ dev_warn(dev, "using deprecated DT binding\n");
}
usb2->ref_clk = devm_clk_get(dev, "phy-ref-clk");
if (IS_ERR(usb2->ref_clk)) {
- dev_err(dev, "Clock not defined\n");
+ dev_err_probe(dev, PTR_ERR(usb2->ref_clk), "failed to get ref clk\n");
return PTR_ERR(usb2->ref_clk);
}
diff --git a/drivers/phy/broadcom/phy-brcm-usb.c b/drivers/phy/broadcom/phy-brcm-usb.c
index 116fb23aebd9..0f1deb6e0eab 100644
--- a/drivers/phy/broadcom/phy-brcm-usb.c
+++ b/drivers/phy/broadcom/phy-brcm-usb.c
@@ -18,6 +18,7 @@
#include <linux/soc/brcmstb/brcmstb.h>
#include <dt-bindings/phy/phy.h>
#include <linux/mfd/syscon.h>
+#include <linux/suspend.h>
#include "phy-brcm-usb-init.h"
@@ -70,12 +71,35 @@ struct brcm_usb_phy_data {
int init_count;
int wake_irq;
struct brcm_usb_phy phys[BRCM_USB_PHY_ID_MAX];
+ struct notifier_block pm_notifier;
+ bool pm_active;
};
static s8 *node_reg_names[BRCM_REGS_MAX] = {
"crtl", "xhci_ec", "xhci_gbl", "usb_phy", "usb_mdio", "bdc_ec"
};
+static int brcm_pm_notifier(struct notifier_block *notifier,
+ unsigned long pm_event,
+ void *unused)
+{
+ struct brcm_usb_phy_data *priv =
+ container_of(notifier, struct brcm_usb_phy_data, pm_notifier);
+
+ switch (pm_event) {
+ case PM_HIBERNATION_PREPARE:
+ case PM_SUSPEND_PREPARE:
+ priv->pm_active = true;
+ break;
+ case PM_POST_RESTORE:
+ case PM_POST_HIBERNATION:
+ case PM_POST_SUSPEND:
+ priv->pm_active = false;
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
static irqreturn_t brcm_usb_phy_wake_isr(int irq, void *dev_id)
{
struct phy *gphy = dev_id;
@@ -91,6 +115,9 @@ static int brcm_usb_phy_init(struct phy *gphy)
struct brcm_usb_phy_data *priv =
container_of(phy, struct brcm_usb_phy_data, phys[phy->id]);
+ if (priv->pm_active)
+ return 0;
+
/*
* Use a lock to make sure a second caller waits until
* the base phy is inited before using it.
@@ -120,6 +147,9 @@ static int brcm_usb_phy_exit(struct phy *gphy)
struct brcm_usb_phy_data *priv =
container_of(phy, struct brcm_usb_phy_data, phys[phy->id]);
+ if (priv->pm_active)
+ return 0;
+
dev_dbg(&gphy->dev, "EXIT\n");
if (phy->id == BRCM_USB_PHY_2_0)
brcm_usb_uninit_eohci(&priv->ini);
@@ -488,6 +518,9 @@ static int brcm_usb_phy_probe(struct platform_device *pdev)
if (err)
return err;
+ priv->pm_notifier.notifier_call = brcm_pm_notifier;
+ register_pm_notifier(&priv->pm_notifier);
+
mutex_init(&priv->mutex);
/* make sure invert settings are correct */
@@ -528,7 +561,10 @@ static int brcm_usb_phy_probe(struct platform_device *pdev)
static int brcm_usb_phy_remove(struct platform_device *pdev)
{
+ struct brcm_usb_phy_data *priv = dev_get_drvdata(&pdev->dev);
+
sysfs_remove_group(&pdev->dev.kobj, &brcm_usb_phy_group);
+ unregister_pm_notifier(&priv->pm_notifier);
return 0;
}
@@ -539,6 +575,7 @@ static int brcm_usb_phy_suspend(struct device *dev)
struct brcm_usb_phy_data *priv = dev_get_drvdata(dev);
if (priv->init_count) {
+ dev_dbg(dev, "SUSPEND\n");
priv->ini.wake_enabled = device_may_wakeup(dev);
if (priv->phys[BRCM_USB_PHY_3_0].inited)
brcm_usb_uninit_xhci(&priv->ini);
@@ -578,6 +615,7 @@ static int brcm_usb_phy_resume(struct device *dev)
* Uninitialize anything that wasn't previously initialized.
*/
if (priv->init_count) {
+ dev_dbg(dev, "RESUME\n");
if (priv->wake_irq >= 0)
disable_irq_wake(priv->wake_irq);
brcm_usb_init_common(&priv->ini);
diff --git a/drivers/phy/cadence/phy-cadence-sierra.c b/drivers/phy/cadence/phy-cadence-sierra.c
index e93818e3991f..e265647e29a2 100644
--- a/drivers/phy/cadence/phy-cadence-sierra.c
+++ b/drivers/phy/cadence/phy-cadence-sierra.c
@@ -23,6 +23,9 @@
#include <dt-bindings/phy/phy.h>
#include <dt-bindings/phy/phy-cadence.h>
+#define NUM_SSC_MODE 3
+#define NUM_PHY_TYPE 4
+
/* PHY register offsets */
#define SIERRA_COMMON_CDB_OFFSET 0x0
#define SIERRA_MACRO_ID_REG 0x0
@@ -31,12 +34,21 @@
#define SIERRA_CMN_PLLLC_LF_COEFF_MODE1_PREG 0x49
#define SIERRA_CMN_PLLLC_LF_COEFF_MODE0_PREG 0x4A
#define SIERRA_CMN_PLLLC_LOCK_CNTSTART_PREG 0x4B
+#define SIERRA_CMN_PLLLC_CLK1_PREG 0x4D
#define SIERRA_CMN_PLLLC_BWCAL_MODE1_PREG 0x4F
#define SIERRA_CMN_PLLLC_BWCAL_MODE0_PREG 0x50
+#define SIERRA_CMN_PLLLC_DSMCORR_PREG 0x51
+#define SIERRA_CMN_PLLLC_SS_PREG 0x52
+#define SIERRA_CMN_PLLLC_SS_AMP_STEP_SIZE_PREG 0x53
+#define SIERRA_CMN_PLLLC_SSTWOPT_PREG 0x54
#define SIERRA_CMN_PLLLC_SS_TIME_STEPSIZE_MODE_PREG 0x62
+#define SIERRA_CMN_PLLLC_LOCK_DELAY_CTRL_PREG 0x63
#define SIERRA_CMN_REFRCV_PREG 0x98
#define SIERRA_CMN_REFRCV1_PREG 0xB8
#define SIERRA_CMN_PLLLC1_GEN_PREG 0xC2
+#define SIERRA_CMN_PLLLC1_LF_COEFF_MODE0_PREG 0xCA
+#define SIERRA_CMN_PLLLC1_BWCAL_MODE0_PREG 0xD0
+#define SIERRA_CMN_PLLLC1_SS_TIME_STEPSIZE_MODE_PREG 0xE2
#define SIERRA_LANE_CDB_OFFSET(ln, block_offset, reg_offset) \
((0x4000 << (block_offset)) + \
@@ -49,7 +61,11 @@
#define SIERRA_DET_STANDEC_E_PREG 0x004
#define SIERRA_PSM_LANECAL_DLY_A1_RESETS_PREG 0x008
#define SIERRA_PSM_A0IN_TMR_PREG 0x009
+#define SIERRA_PSM_A3IN_TMR_PREG 0x00C
#define SIERRA_PSM_DIAG_PREG 0x015
+#define SIERRA_PSC_LN_A3_PREG 0x023
+#define SIERRA_PSC_LN_A4_PREG 0x024
+#define SIERRA_PSC_LN_IDLE_PREG 0x026
#define SIERRA_PSC_TX_A0_PREG 0x028
#define SIERRA_PSC_TX_A1_PREG 0x029
#define SIERRA_PSC_TX_A2_PREG 0x02A
@@ -59,18 +75,22 @@
#define SIERRA_PSC_RX_A2_PREG 0x032
#define SIERRA_PSC_RX_A3_PREG 0x033
#define SIERRA_PLLCTRL_SUBRATE_PREG 0x03A
+#define SIERRA_PLLCTRL_GEN_A_PREG 0x03B
#define SIERRA_PLLCTRL_GEN_D_PREG 0x03E
#define SIERRA_PLLCTRL_CPGAIN_MODE_PREG 0x03F
#define SIERRA_PLLCTRL_STATUS_PREG 0x044
#define SIERRA_CLKPATH_BIASTRIM_PREG 0x04B
#define SIERRA_DFE_BIASTRIM_PREG 0x04C
#define SIERRA_DRVCTRL_ATTEN_PREG 0x06A
+#define SIERRA_DRVCTRL_BOOST_PREG 0x06F
#define SIERRA_CLKPATHCTRL_TMR_PREG 0x081
#define SIERRA_RX_CREQ_FLTR_A_MODE3_PREG 0x085
#define SIERRA_RX_CREQ_FLTR_A_MODE2_PREG 0x086
#define SIERRA_RX_CREQ_FLTR_A_MODE1_PREG 0x087
#define SIERRA_RX_CREQ_FLTR_A_MODE0_PREG 0x088
+#define SIERRA_CREQ_DCBIASATTEN_OVR_PREG 0x08C
#define SIERRA_CREQ_CCLKDET_MODE01_PREG 0x08E
+#define SIERRA_RX_CTLE_CAL_PREG 0x08F
#define SIERRA_RX_CTLE_MAINTENANCE_PREG 0x091
#define SIERRA_CREQ_FSMCLK_SEL_PREG 0x092
#define SIERRA_CREQ_EQ_CTRL_PREG 0x093
@@ -120,15 +140,28 @@
#define SIERRA_DEQ_ALUT12 0x114
#define SIERRA_DEQ_ALUT13 0x115
#define SIERRA_DEQ_DFETAP_CTRL_PREG 0x128
+#define SIERRA_DEQ_DFETAP0 0x129
+#define SIERRA_DEQ_DFETAP1 0x12B
+#define SIERRA_DEQ_DFETAP2 0x12D
+#define SIERRA_DEQ_DFETAP3 0x12F
+#define SIERRA_DEQ_DFETAP4 0x131
#define SIERRA_DFE_EN_1010_IGNORE_PREG 0x134
+#define SIERRA_DEQ_PRECUR_PREG 0x138
+#define SIERRA_DEQ_POSTCUR_PREG 0x140
+#define SIERRA_DEQ_POSTCUR_DECR_PREG 0x142
#define SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG 0x150
#define SIERRA_DEQ_TAU_CTRL2_PREG 0x151
+#define SIERRA_DEQ_TAU_CTRL3_PREG 0x152
+#define SIERRA_DEQ_OPENEYE_CTRL_PREG 0x158
#define SIERRA_DEQ_PICTRL_PREG 0x161
#define SIERRA_CPICAL_TMRVAL_MODE1_PREG 0x170
#define SIERRA_CPICAL_TMRVAL_MODE0_PREG 0x171
#define SIERRA_CPICAL_PICNT_MODE1_PREG 0x174
#define SIERRA_CPI_OUTBUF_RATESEL_PREG 0x17C
+#define SIERRA_CPI_RESBIAS_BIN_PREG 0x17E
+#define SIERRA_CPI_TRIM_PREG 0x17F
#define SIERRA_CPICAL_RES_STARTCODE_MODE23_PREG 0x183
+#define SIERRA_EPI_CTRL_PREG 0x187
#define SIERRA_LFPSDET_SUPPORT_PREG 0x188
#define SIERRA_LFPSFILT_NS_PREG 0x18A
#define SIERRA_LFPSFILT_RD_PREG 0x18B
@@ -142,15 +175,36 @@
#define SIERRA_DEQ_TAU_CTRL1_FAST_MAINT_PREG 0x14F
#define SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG 0x150
-#define SIERRA_PHY_CONFIG_CTRL_OFFSET(block_offset) \
- (0xc000 << (block_offset))
+/* PHY PCS common registers */
+#define SIERRA_PHY_PCS_COMMON_OFFSET(block_offset) \
+ (0xc000 << (block_offset))
+#define SIERRA_PHY_PIPE_CMN_CTRL1 0x0
#define SIERRA_PHY_PLL_CFG 0xe
+/* PHY PCS lane registers */
+#define SIERRA_PHY_PCS_LANE_CDB_OFFSET(ln, block_offset, reg_offset) \
+ ((0xD000 << (block_offset)) + \
+ (((ln) << 8) << (reg_offset)))
+
+#define SIERRA_PHY_ISO_LINK_CTRL 0xB
+
+/* PHY PMA common registers */
+#define SIERRA_PHY_PMA_COMMON_OFFSET(block_offset) \
+ (0xE000 << (block_offset))
+#define SIERRA_PHY_PMA_CMN_CTRL 0x000
+
+/* PHY PMA lane registers */
+#define SIERRA_PHY_PMA_LANE_CDB_OFFSET(ln, block_offset, reg_offset) \
+ ((0xF000 << (block_offset)) + \
+ (((ln) << 8) << (reg_offset)))
+
+#define SIERRA_PHY_PMA_XCVR_CTRL 0x000
+
#define SIERRA_MACRO_ID 0x00007364
#define SIERRA_MAX_LANES 16
#define PLL_LOCK_TIME 100000
-#define CDNS_SIERRA_OUTPUT_CLOCKS 2
+#define CDNS_SIERRA_OUTPUT_CLOCKS 3
#define CDNS_SIERRA_INPUT_CLOCKS 5
enum cdns_sierra_clock_input {
PHY_CLK,
@@ -167,12 +221,21 @@ static const struct reg_field macro_id_type =
REG_FIELD(SIERRA_MACRO_ID_REG, 0, 15);
static const struct reg_field phy_pll_cfg_1 =
REG_FIELD(SIERRA_PHY_PLL_CFG, 1, 1);
+static const struct reg_field pma_cmn_ready =
+ REG_FIELD(SIERRA_PHY_PMA_CMN_CTRL, 0, 0);
static const struct reg_field pllctrl_lock =
REG_FIELD(SIERRA_PLLCTRL_STATUS_PREG, 0, 0);
+static const struct reg_field phy_iso_link_ctrl_1 =
+ REG_FIELD(SIERRA_PHY_ISO_LINK_CTRL, 1, 1);
+static const struct reg_field cmn_plllc_clk1outdiv_preg =
+ REG_FIELD(SIERRA_CMN_PLLLC_CLK1_PREG, 0, 6);
+static const struct reg_field cmn_plllc_clk1_en_preg =
+ REG_FIELD(SIERRA_CMN_PLLLC_CLK1_PREG, 12, 12);
static const char * const clk_names[] = {
[CDNS_SIERRA_PLL_CMNLC] = "pll_cmnlc",
[CDNS_SIERRA_PLL_CMNLC1] = "pll_cmnlc1",
+ [CDNS_SIERRA_DERIVED_REFCLK] = "refclk_der",
};
enum cdns_sierra_cmn_plllc {
@@ -215,14 +278,41 @@ static const int pll_mux_parent_index[][SIERRA_NUM_CMN_PLLC_PARENTS] = {
[CMN_PLLLC1] = { PLL1_REFCLK, PLL0_REFCLK },
};
-static u32 cdns_sierra_pll_mux_table[] = { 0, 1 };
+static u32 cdns_sierra_pll_mux_table[][SIERRA_NUM_CMN_PLLC_PARENTS] = {
+ [CMN_PLLLC] = { 0, 1 },
+ [CMN_PLLLC1] = { 1, 0 },
+};
+
+struct cdns_sierra_derived_refclk {
+ struct clk_hw hw;
+ struct regmap_field *cmn_plllc_clk1outdiv_preg;
+ struct regmap_field *cmn_plllc_clk1_en_preg;
+ struct clk_init_data clk_data;
+};
+
+#define to_cdns_sierra_derived_refclk(_hw) \
+ container_of(_hw, struct cdns_sierra_derived_refclk, hw)
+
+enum cdns_sierra_phy_type {
+ TYPE_NONE,
+ TYPE_PCIE,
+ TYPE_USB,
+ TYPE_QSGMII
+};
+
+enum cdns_sierra_ssc_mode {
+ NO_SSC,
+ EXTERNAL_SSC,
+ INTERNAL_SSC
+};
struct cdns_sierra_inst {
struct phy *phy;
- u32 phy_type;
+ enum cdns_sierra_phy_type phy_type;
u32 num_lanes;
u32 mlane;
struct reset_control *lnk_rst;
+ enum cdns_sierra_ssc_mode ssc_mode;
};
struct cdns_reg_pairs {
@@ -230,18 +320,23 @@ struct cdns_reg_pairs {
u32 off;
};
+struct cdns_sierra_vals {
+ const struct cdns_reg_pairs *reg_pairs;
+ u32 num_regs;
+};
+
struct cdns_sierra_data {
- u32 id_value;
- u8 block_offset_shift;
- u8 reg_offset_shift;
- u32 pcie_cmn_regs;
- u32 pcie_ln_regs;
- u32 usb_cmn_regs;
- u32 usb_ln_regs;
- const struct cdns_reg_pairs *pcie_cmn_vals;
- const struct cdns_reg_pairs *pcie_ln_vals;
- const struct cdns_reg_pairs *usb_cmn_vals;
- const struct cdns_reg_pairs *usb_ln_vals;
+ u32 id_value;
+ u8 block_offset_shift;
+ u8 reg_offset_shift;
+ struct cdns_sierra_vals *pcs_cmn_vals[NUM_PHY_TYPE][NUM_PHY_TYPE]
+ [NUM_SSC_MODE];
+ struct cdns_sierra_vals *phy_pma_ln_vals[NUM_PHY_TYPE][NUM_PHY_TYPE]
+ [NUM_SSC_MODE];
+ struct cdns_sierra_vals *pma_cmn_vals[NUM_PHY_TYPE][NUM_PHY_TYPE]
+ [NUM_SSC_MODE];
+ struct cdns_sierra_vals *pma_ln_vals[NUM_PHY_TYPE][NUM_PHY_TYPE]
+ [NUM_SSC_MODE];
};
struct cdns_regmap_cdb_context {
@@ -253,16 +348,21 @@ struct cdns_regmap_cdb_context {
struct cdns_sierra_phy {
struct device *dev;
struct regmap *regmap;
- struct cdns_sierra_data *init_data;
+ const struct cdns_sierra_data *init_data;
struct cdns_sierra_inst phys[SIERRA_MAX_LANES];
struct reset_control *phy_rst;
struct reset_control *apb_rst;
struct regmap *regmap_lane_cdb[SIERRA_MAX_LANES];
- struct regmap *regmap_phy_config_ctrl;
+ struct regmap *regmap_phy_pcs_common_cdb;
+ struct regmap *regmap_phy_pcs_lane_cdb[SIERRA_MAX_LANES];
+ struct regmap *regmap_phy_pma_common_cdb;
+ struct regmap *regmap_phy_pma_lane_cdb[SIERRA_MAX_LANES];
struct regmap *regmap_common_cdb;
struct regmap_field *macro_id_type;
struct regmap_field *phy_pll_cfg_1;
+ struct regmap_field *pma_cmn_ready;
struct regmap_field *pllctrl_lock[SIERRA_MAX_LANES];
+ struct regmap_field *phy_iso_link_ctrl_1[SIERRA_MAX_LANES];
struct regmap_field *cmn_refrcv_refclk_plllc1en_preg[SIERRA_NUM_CMN_PLLC];
struct regmap_field *cmn_refrcv_refclk_termen_preg[SIERRA_NUM_CMN_PLLC];
struct regmap_field *cmn_plllc_pfdclk1_sel_preg[SIERRA_NUM_CMN_PLLC];
@@ -329,51 +429,141 @@ static const struct regmap_config cdns_sierra_common_cdb_config = {
.reg_read = cdns_regmap_read,
};
-static const struct regmap_config cdns_sierra_phy_config_ctrl_config = {
- .name = "sierra_phy_config_ctrl",
+static const struct regmap_config cdns_sierra_phy_pcs_cmn_cdb_config = {
+ .name = "sierra_phy_pcs_cmn_cdb",
+ .reg_stride = 1,
+ .fast_io = true,
+ .reg_write = cdns_regmap_write,
+ .reg_read = cdns_regmap_read,
+};
+
+#define SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF(n) \
+{ \
+ .name = "sierra_phy_pcs_lane" n "_cdb", \
+ .reg_stride = 1, \
+ .fast_io = true, \
+ .reg_write = cdns_regmap_write, \
+ .reg_read = cdns_regmap_read, \
+}
+
+static const struct regmap_config cdns_sierra_phy_pcs_lane_cdb_config[] = {
+ SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF("0"),
+ SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF("1"),
+ SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF("2"),
+ SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF("3"),
+ SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF("4"),
+ SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF("5"),
+ SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF("6"),
+ SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF("7"),
+ SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF("8"),
+ SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF("9"),
+ SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF("10"),
+ SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF("11"),
+ SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF("12"),
+ SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF("13"),
+ SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF("14"),
+ SIERRA_PHY_PCS_LANE_CDB_REGMAP_CONF("15"),
+};
+
+static const struct regmap_config cdns_sierra_phy_pma_cmn_cdb_config = {
+ .name = "sierra_phy_pma_cmn_cdb",
.reg_stride = 1,
.fast_io = true,
.reg_write = cdns_regmap_write,
.reg_read = cdns_regmap_read,
};
+#define SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF(n) \
+{ \
+ .name = "sierra_phy_pma_lane" n "_cdb", \
+ .reg_stride = 1, \
+ .fast_io = true, \
+ .reg_write = cdns_regmap_write, \
+ .reg_read = cdns_regmap_read, \
+}
+
+static const struct regmap_config cdns_sierra_phy_pma_lane_cdb_config[] = {
+ SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("0"),
+ SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("1"),
+ SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("2"),
+ SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("3"),
+ SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("4"),
+ SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("5"),
+ SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("6"),
+ SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("7"),
+ SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("8"),
+ SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("9"),
+ SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("10"),
+ SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("11"),
+ SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("12"),
+ SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("13"),
+ SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("14"),
+ SIERRA_PHY_PMA_LANE_CDB_REGMAP_CONF("15"),
+};
+
static int cdns_sierra_phy_init(struct phy *gphy)
{
struct cdns_sierra_inst *ins = phy_get_drvdata(gphy);
struct cdns_sierra_phy *phy = dev_get_drvdata(gphy->dev.parent);
+ const struct cdns_sierra_data *init_data = phy->init_data;
+ struct cdns_sierra_vals *pma_cmn_vals, *pma_ln_vals;
+ enum cdns_sierra_phy_type phy_type = ins->phy_type;
+ enum cdns_sierra_ssc_mode ssc = ins->ssc_mode;
+ struct cdns_sierra_vals *phy_pma_ln_vals;
+ const struct cdns_reg_pairs *reg_pairs;
+ struct cdns_sierra_vals *pcs_cmn_vals;
struct regmap *regmap;
+ u32 num_regs;
int i, j;
- const struct cdns_reg_pairs *cmn_vals, *ln_vals;
- u32 num_cmn_regs, num_ln_regs;
/* Initialise the PHY registers, unless auto configured */
- if (phy->autoconf)
+ if (phy->autoconf || phy->nsubnodes > 1)
return 0;
clk_set_rate(phy->input_clks[CMN_REFCLK_DIG_DIV], 25000000);
clk_set_rate(phy->input_clks[CMN_REFCLK1_DIG_DIV], 25000000);
- if (ins->phy_type == PHY_TYPE_PCIE) {
- num_cmn_regs = phy->init_data->pcie_cmn_regs;
- num_ln_regs = phy->init_data->pcie_ln_regs;
- cmn_vals = phy->init_data->pcie_cmn_vals;
- ln_vals = phy->init_data->pcie_ln_vals;
- } else if (ins->phy_type == PHY_TYPE_USB3) {
- num_cmn_regs = phy->init_data->usb_cmn_regs;
- num_ln_regs = phy->init_data->usb_ln_regs;
- cmn_vals = phy->init_data->usb_cmn_vals;
- ln_vals = phy->init_data->usb_ln_vals;
- } else {
- return -EINVAL;
+
+ /* PHY PCS common registers configurations */
+ pcs_cmn_vals = init_data->pcs_cmn_vals[phy_type][TYPE_NONE][ssc];
+ if (pcs_cmn_vals) {
+ reg_pairs = pcs_cmn_vals->reg_pairs;
+ num_regs = pcs_cmn_vals->num_regs;
+ regmap = phy->regmap_phy_pcs_common_cdb;
+ for (i = 0; i < num_regs; i++)
+ regmap_write(regmap, reg_pairs[i].off, reg_pairs[i].val);
}
- regmap = phy->regmap_common_cdb;
- for (j = 0; j < num_cmn_regs ; j++)
- regmap_write(regmap, cmn_vals[j].off, cmn_vals[j].val);
+ /* PHY PMA lane registers configurations */
+ phy_pma_ln_vals = init_data->phy_pma_ln_vals[phy_type][TYPE_NONE][ssc];
+ if (phy_pma_ln_vals) {
+ reg_pairs = phy_pma_ln_vals->reg_pairs;
+ num_regs = phy_pma_ln_vals->num_regs;
+ for (i = 0; i < ins->num_lanes; i++) {
+ regmap = phy->regmap_phy_pma_lane_cdb[i + ins->mlane];
+ for (j = 0; j < num_regs; j++)
+ regmap_write(regmap, reg_pairs[j].off, reg_pairs[j].val);
+ }
+ }
+
+ /* PMA common registers configurations */
+ pma_cmn_vals = init_data->pma_cmn_vals[phy_type][TYPE_NONE][ssc];
+ if (pma_cmn_vals) {
+ reg_pairs = pma_cmn_vals->reg_pairs;
+ num_regs = pma_cmn_vals->num_regs;
+ regmap = phy->regmap_common_cdb;
+ for (i = 0; i < num_regs; i++)
+ regmap_write(regmap, reg_pairs[i].off, reg_pairs[i].val);
+ }
- for (i = 0; i < ins->num_lanes; i++) {
- for (j = 0; j < num_ln_regs ; j++) {
+ /* PMA lane registers configurations */
+ pma_ln_vals = init_data->pma_ln_vals[phy_type][TYPE_NONE][ssc];
+ if (pma_ln_vals) {
+ reg_pairs = pma_ln_vals->reg_pairs;
+ num_regs = pma_ln_vals->num_regs;
+ for (i = 0; i < ins->num_lanes; i++) {
regmap = phy->regmap_lane_cdb[i + ins->mlane];
- regmap_write(regmap, ln_vals[j].off, ln_vals[j].val);
+ for (j = 0; j < num_regs; j++)
+ regmap_write(regmap, reg_pairs[j].off, reg_pairs[j].val);
}
}
@@ -388,10 +578,13 @@ static int cdns_sierra_phy_on(struct phy *gphy)
u32 val;
int ret;
- ret = reset_control_deassert(sp->phy_rst);
- if (ret) {
- dev_err(dev, "Failed to take the PHY out of reset\n");
- return ret;
+ if (sp->nsubnodes == 1) {
+ /* Take the PHY out of reset */
+ ret = reset_control_deassert(sp->phy_rst);
+ if (ret) {
+ dev_err(dev, "Failed to take the PHY out of reset\n");
+ return ret;
+ }
}
/* Take the PHY lane group out of reset */
@@ -401,6 +594,26 @@ static int cdns_sierra_phy_on(struct phy *gphy)
return ret;
}
+ if (ins->phy_type == TYPE_PCIE || ins->phy_type == TYPE_USB) {
+ ret = regmap_field_read_poll_timeout(sp->phy_iso_link_ctrl_1[ins->mlane],
+ val, !val, 1000, PLL_LOCK_TIME);
+ if (ret) {
+ dev_err(dev, "Timeout waiting for PHY status ready\n");
+ return ret;
+ }
+ }
+
+ /*
+ * Wait for cmn_ready assertion
+ * PHY_PMA_CMN_CTRL[0] == 1
+ */
+ ret = regmap_field_read_poll_timeout(sp->pma_cmn_ready, val, val,
+ 1000, PLL_LOCK_TIME);
+ if (ret) {
+ dev_err(dev, "Timeout waiting for CMN ready\n");
+ return ret;
+ }
+
ret = regmap_field_read_poll_timeout(sp->pllctrl_lock[ins->mlane],
val, val, 1000, PLL_LOCK_TIME);
if (ret < 0)
@@ -436,11 +649,25 @@ static const struct phy_ops ops = {
static u8 cdns_sierra_pll_mux_get_parent(struct clk_hw *hw)
{
struct cdns_sierra_pll_mux *mux = to_cdns_sierra_pll_mux(hw);
+ struct regmap_field *plllc1en_field = mux->plllc1en_field;
+ struct regmap_field *termen_field = mux->termen_field;
struct regmap_field *field = mux->pfdclk_sel_preg;
unsigned int val;
+ int index;
regmap_field_read(field, &val);
- return clk_mux_val_to_index(hw, cdns_sierra_pll_mux_table, 0, val);
+
+ if (strstr(clk_hw_get_name(hw), clk_names[CDNS_SIERRA_PLL_CMNLC1])) {
+ index = clk_mux_val_to_index(hw, cdns_sierra_pll_mux_table[CMN_PLLLC1], 0, val);
+ if (index == 1) {
+ regmap_field_write(plllc1en_field, 1);
+ regmap_field_write(termen_field, 1);
+ }
+ } else {
+ index = clk_mux_val_to_index(hw, cdns_sierra_pll_mux_table[CMN_PLLLC], 0, val);
+ }
+
+ return index;
}
static int cdns_sierra_pll_mux_set_parent(struct clk_hw *hw, u8 index)
@@ -458,7 +685,11 @@ static int cdns_sierra_pll_mux_set_parent(struct clk_hw *hw, u8 index)
ret |= regmap_field_write(termen_field, 1);
}
- val = cdns_sierra_pll_mux_table[index];
+ if (strstr(clk_hw_get_name(hw), clk_names[CDNS_SIERRA_PLL_CMNLC1]))
+ val = cdns_sierra_pll_mux_table[CMN_PLLLC1][index];
+ else
+ val = cdns_sierra_pll_mux_table[CMN_PLLLC][index];
+
ret |= regmap_field_write(field, val);
return ret;
@@ -496,8 +727,8 @@ static int cdns_sierra_pll_mux_register(struct cdns_sierra_phy *sp,
for (i = 0; i < num_parents; i++) {
clk = sp->input_clks[pll_mux_parent_index[clk_index][i]];
if (IS_ERR_OR_NULL(clk)) {
- dev_err(dev, "No parent clock for derived_refclk\n");
- return PTR_ERR(clk);
+ dev_err(dev, "No parent clock for PLL mux clocks\n");
+ return IS_ERR(clk) ? PTR_ERR(clk) : -ENOENT;
}
parent_names[i] = __clk_get_name(clk);
}
@@ -551,6 +782,91 @@ static int cdns_sierra_phy_register_pll_mux(struct cdns_sierra_phy *sp)
return 0;
}
+static int cdns_sierra_derived_refclk_enable(struct clk_hw *hw)
+{
+ struct cdns_sierra_derived_refclk *derived_refclk = to_cdns_sierra_derived_refclk(hw);
+
+ regmap_field_write(derived_refclk->cmn_plllc_clk1_en_preg, 0x1);
+
+ /* Programming to get 100Mhz clock output in ref_der_clk_out 5GHz VCO/50 = 100MHz */
+ regmap_field_write(derived_refclk->cmn_plllc_clk1outdiv_preg, 0x2E);
+
+ return 0;
+}
+
+static void cdns_sierra_derived_refclk_disable(struct clk_hw *hw)
+{
+ struct cdns_sierra_derived_refclk *derived_refclk = to_cdns_sierra_derived_refclk(hw);
+
+ regmap_field_write(derived_refclk->cmn_plllc_clk1_en_preg, 0);
+}
+
+static int cdns_sierra_derived_refclk_is_enabled(struct clk_hw *hw)
+{
+ struct cdns_sierra_derived_refclk *derived_refclk = to_cdns_sierra_derived_refclk(hw);
+ int val;
+
+ regmap_field_read(derived_refclk->cmn_plllc_clk1_en_preg, &val);
+
+ return !!val;
+}
+
+static const struct clk_ops cdns_sierra_derived_refclk_ops = {
+ .enable = cdns_sierra_derived_refclk_enable,
+ .disable = cdns_sierra_derived_refclk_disable,
+ .is_enabled = cdns_sierra_derived_refclk_is_enabled,
+};
+
+static int cdns_sierra_derived_refclk_register(struct cdns_sierra_phy *sp)
+{
+ struct cdns_sierra_derived_refclk *derived_refclk;
+ struct device *dev = sp->dev;
+ struct regmap_field *field;
+ struct clk_init_data *init;
+ struct regmap *regmap;
+ char clk_name[100];
+ struct clk *clk;
+
+ derived_refclk = devm_kzalloc(dev, sizeof(*derived_refclk), GFP_KERNEL);
+ if (!derived_refclk)
+ return -ENOMEM;
+
+ snprintf(clk_name, sizeof(clk_name), "%s_%s", dev_name(dev),
+ clk_names[CDNS_SIERRA_DERIVED_REFCLK]);
+
+ init = &derived_refclk->clk_data;
+
+ init->ops = &cdns_sierra_derived_refclk_ops;
+ init->flags = 0;
+ init->name = clk_name;
+
+ regmap = sp->regmap_common_cdb;
+
+ field = devm_regmap_field_alloc(dev, regmap, cmn_plllc_clk1outdiv_preg);
+ if (IS_ERR(field)) {
+ dev_err(dev, "cmn_plllc_clk1outdiv_preg reg field init failed\n");
+ return PTR_ERR(field);
+ }
+ derived_refclk->cmn_plllc_clk1outdiv_preg = field;
+
+ field = devm_regmap_field_alloc(dev, regmap, cmn_plllc_clk1_en_preg);
+ if (IS_ERR(field)) {
+ dev_err(dev, "cmn_plllc_clk1_en_preg reg field init failed\n");
+ return PTR_ERR(field);
+ }
+ derived_refclk->cmn_plllc_clk1_en_preg = field;
+
+ derived_refclk->hw.init = init;
+
+ clk = devm_clk_register(dev, &derived_refclk->hw);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ sp->output_clks[CDNS_SIERRA_DERIVED_REFCLK] = clk;
+
+ return 0;
+}
+
static void cdns_sierra_clk_unregister(struct cdns_sierra_phy *sp)
{
struct device *dev = sp->dev;
@@ -571,6 +887,12 @@ static int cdns_sierra_clk_register(struct cdns_sierra_phy *sp)
return ret;
}
+ ret = cdns_sierra_derived_refclk_register(sp);
+ if (ret) {
+ dev_err(dev, "Failed to register derived refclk\n");
+ return ret;
+ }
+
sp->clk_data.clks = sp->output_clks;
sp->clk_data.clk_num = CDNS_SIERRA_OUTPUT_CLOCKS;
ret = of_clk_add_provider(node, of_clk_src_onecell_get, &sp->clk_data);
@@ -583,20 +905,37 @@ static int cdns_sierra_clk_register(struct cdns_sierra_phy *sp)
static int cdns_sierra_get_optional(struct cdns_sierra_inst *inst,
struct device_node *child)
{
+ u32 phy_type;
+
if (of_property_read_u32(child, "reg", &inst->mlane))
return -EINVAL;
if (of_property_read_u32(child, "cdns,num-lanes", &inst->num_lanes))
return -EINVAL;
- if (of_property_read_u32(child, "cdns,phy-type", &inst->phy_type))
+ if (of_property_read_u32(child, "cdns,phy-type", &phy_type))
+ return -EINVAL;
+
+ switch (phy_type) {
+ case PHY_TYPE_PCIE:
+ inst->phy_type = TYPE_PCIE;
+ break;
+ case PHY_TYPE_USB3:
+ inst->phy_type = TYPE_USB;
+ break;
+ case PHY_TYPE_QSGMII:
+ inst->phy_type = TYPE_QSGMII;
+ break;
+ default:
return -EINVAL;
+ }
+
+ inst->ssc_mode = EXTERNAL_SSC;
+ of_property_read_u32(child, "cdns,ssc-mode", &inst->ssc_mode);
return 0;
}
-static const struct of_device_id cdns_sierra_id_table[];
-
static struct regmap *cdns_regmap_init(struct device *dev, void __iomem *base,
u32 block_offset, u8 reg_offset_shift,
const struct regmap_config *config)
@@ -656,7 +995,7 @@ static int cdns_regfield_init(struct cdns_sierra_phy *sp)
sp->cmn_refrcv_refclk_termen_preg[i] = field;
}
- regmap = sp->regmap_phy_config_ctrl;
+ regmap = sp->regmap_phy_pcs_common_cdb;
field = devm_regmap_field_alloc(dev, regmap, phy_pll_cfg_1);
if (IS_ERR(field)) {
dev_err(dev, "PHY_PLL_CFG_1 reg field init failed\n");
@@ -664,6 +1003,14 @@ static int cdns_regfield_init(struct cdns_sierra_phy *sp)
}
sp->phy_pll_cfg_1 = field;
+ regmap = sp->regmap_phy_pma_common_cdb;
+ field = devm_regmap_field_alloc(dev, regmap, pma_cmn_ready);
+ if (IS_ERR(field)) {
+ dev_err(dev, "PHY_PMA_CMN_CTRL reg field init failed\n");
+ return PTR_ERR(field);
+ }
+ sp->pma_cmn_ready = field;
+
for (i = 0; i < SIERRA_MAX_LANES; i++) {
regmap = sp->regmap_lane_cdb[i];
field = devm_regmap_field_alloc(dev, regmap, pllctrl_lock);
@@ -671,7 +1018,17 @@ static int cdns_regfield_init(struct cdns_sierra_phy *sp)
dev_err(dev, "P%d_ENABLE reg field init failed\n", i);
return PTR_ERR(field);
}
- sp->pllctrl_lock[i] = field;
+ sp->pllctrl_lock[i] = field;
+ }
+
+ for (i = 0; i < SIERRA_MAX_LANES; i++) {
+ regmap = sp->regmap_phy_pcs_lane_cdb[i];
+ field = devm_regmap_field_alloc(dev, regmap, phy_iso_link_ctrl_1);
+ if (IS_ERR(field)) {
+ dev_err(dev, "PHY_ISO_LINK_CTRL reg field init for lane %d failed\n", i);
+ return PTR_ERR(field);
+ }
+ sp->phy_iso_link_ctrl_1[i] = field;
}
return 0;
@@ -708,14 +1065,49 @@ static int cdns_regmap_init_blocks(struct cdns_sierra_phy *sp,
}
sp->regmap_common_cdb = regmap;
- block_offset = SIERRA_PHY_CONFIG_CTRL_OFFSET(block_offset_shift);
+ block_offset = SIERRA_PHY_PCS_COMMON_OFFSET(block_offset_shift);
regmap = cdns_regmap_init(dev, base, block_offset, reg_offset_shift,
- &cdns_sierra_phy_config_ctrl_config);
+ &cdns_sierra_phy_pcs_cmn_cdb_config);
if (IS_ERR(regmap)) {
- dev_err(dev, "Failed to init PHY config and control regmap\n");
+ dev_err(dev, "Failed to init PHY PCS common CDB regmap\n");
return PTR_ERR(regmap);
}
- sp->regmap_phy_config_ctrl = regmap;
+ sp->regmap_phy_pcs_common_cdb = regmap;
+
+ for (i = 0; i < SIERRA_MAX_LANES; i++) {
+ block_offset = SIERRA_PHY_PCS_LANE_CDB_OFFSET(i, block_offset_shift,
+ reg_offset_shift);
+ regmap = cdns_regmap_init(dev, base, block_offset,
+ reg_offset_shift,
+ &cdns_sierra_phy_pcs_lane_cdb_config[i]);
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "Failed to init PHY PCS lane CDB regmap\n");
+ return PTR_ERR(regmap);
+ }
+ sp->regmap_phy_pcs_lane_cdb[i] = regmap;
+ }
+
+ block_offset = SIERRA_PHY_PMA_COMMON_OFFSET(block_offset_shift);
+ regmap = cdns_regmap_init(dev, base, block_offset, reg_offset_shift,
+ &cdns_sierra_phy_pma_cmn_cdb_config);
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "Failed to init PHY PMA common CDB regmap\n");
+ return PTR_ERR(regmap);
+ }
+ sp->regmap_phy_pma_common_cdb = regmap;
+
+ for (i = 0; i < SIERRA_MAX_LANES; i++) {
+ block_offset = SIERRA_PHY_PMA_LANE_CDB_OFFSET(i, block_offset_shift,
+ reg_offset_shift);
+ regmap = cdns_regmap_init(dev, base, block_offset,
+ reg_offset_shift,
+ &cdns_sierra_phy_pma_lane_cdb_config[i]);
+ if (IS_ERR(regmap)) {
+ dev_err(dev, "Failed to init PHY PMA lane CDB regmap\n");
+ return PTR_ERR(regmap);
+ }
+ sp->regmap_phy_pma_lane_cdb[i] = regmap;
+ }
return 0;
}
@@ -824,15 +1216,129 @@ static int cdns_sierra_phy_get_resets(struct cdns_sierra_phy *sp,
return 0;
}
+static int cdns_sierra_phy_configure_multilink(struct cdns_sierra_phy *sp)
+{
+ const struct cdns_sierra_data *init_data = sp->init_data;
+ struct cdns_sierra_vals *pma_cmn_vals, *pma_ln_vals;
+ enum cdns_sierra_phy_type phy_t1, phy_t2;
+ struct cdns_sierra_vals *phy_pma_ln_vals;
+ const struct cdns_reg_pairs *reg_pairs;
+ struct cdns_sierra_vals *pcs_cmn_vals;
+ int i, j, node, mlane, num_lanes, ret;
+ enum cdns_sierra_ssc_mode ssc;
+ struct regmap *regmap;
+ u32 num_regs;
+
+ /* Maximum 2 links (subnodes) are supported */
+ if (sp->nsubnodes != 2)
+ return -EINVAL;
+
+ clk_set_rate(sp->input_clks[CMN_REFCLK_DIG_DIV], 25000000);
+ clk_set_rate(sp->input_clks[CMN_REFCLK1_DIG_DIV], 25000000);
+
+ /* PHY configured to use both PLL LC and LC1 */
+ regmap_field_write(sp->phy_pll_cfg_1, 0x1);
+
+ phy_t1 = sp->phys[0].phy_type;
+ phy_t2 = sp->phys[1].phy_type;
+
+ /*
+ * PHY configuration for multi-link operation is done in two steps.
+ * e.g. Consider a case for a 4 lane PHY with PCIe using 2 lanes and QSGMII other 2 lanes.
+ * Sierra PHY has 2 PLLs, viz. PLLLC and PLLLC1. So in this case, PLLLC is used for PCIe
+ * and PLLLC1 is used for QSGMII. PHY is configured in two steps as described below.
+ *
+ * [1] For first step, phy_t1 = TYPE_PCIE and phy_t2 = TYPE_QSGMII
+ * So the register values are selected as [TYPE_PCIE][TYPE_QSGMII][ssc].
+ * This will configure PHY registers associated for PCIe (i.e. first protocol)
+ * involving PLLLC registers and registers for first 2 lanes of PHY.
+ * [2] In second step, the variables phy_t1 and phy_t2 are swapped. So now,
+ * phy_t1 = TYPE_QSGMII and phy_t2 = TYPE_PCIE. And the register values are selected as
+ * [TYPE_QSGMII][TYPE_PCIE][ssc].
+ * This will configure PHY registers associated for QSGMII (i.e. second protocol)
+ * involving PLLLC1 registers and registers for other 2 lanes of PHY.
+ *
+ * This completes the PHY configuration for multilink operation. This approach enables
+ * dividing the large number of PHY register configurations into protocol specific
+ * smaller groups.
+ */
+ for (node = 0; node < sp->nsubnodes; node++) {
+ if (node == 1) {
+ /*
+ * If first link with phy_t1 is configured, then configure the PHY for
+ * second link with phy_t2. Get the array values as [phy_t2][phy_t1][ssc].
+ */
+ swap(phy_t1, phy_t2);
+ }
+
+ mlane = sp->phys[node].mlane;
+ ssc = sp->phys[node].ssc_mode;
+ num_lanes = sp->phys[node].num_lanes;
+
+ /* PHY PCS common registers configurations */
+ pcs_cmn_vals = init_data->pcs_cmn_vals[phy_t1][phy_t2][ssc];
+ if (pcs_cmn_vals) {
+ reg_pairs = pcs_cmn_vals->reg_pairs;
+ num_regs = pcs_cmn_vals->num_regs;
+ regmap = sp->regmap_phy_pcs_common_cdb;
+ for (i = 0; i < num_regs; i++)
+ regmap_write(regmap, reg_pairs[i].off, reg_pairs[i].val);
+ }
+
+ /* PHY PMA lane registers configurations */
+ phy_pma_ln_vals = init_data->phy_pma_ln_vals[phy_t1][phy_t2][ssc];
+ if (phy_pma_ln_vals) {
+ reg_pairs = phy_pma_ln_vals->reg_pairs;
+ num_regs = phy_pma_ln_vals->num_regs;
+ for (i = 0; i < num_lanes; i++) {
+ regmap = sp->regmap_phy_pma_lane_cdb[i + mlane];
+ for (j = 0; j < num_regs; j++)
+ regmap_write(regmap, reg_pairs[j].off, reg_pairs[j].val);
+ }
+ }
+
+ /* PMA common registers configurations */
+ pma_cmn_vals = init_data->pma_cmn_vals[phy_t1][phy_t2][ssc];
+ if (pma_cmn_vals) {
+ reg_pairs = pma_cmn_vals->reg_pairs;
+ num_regs = pma_cmn_vals->num_regs;
+ regmap = sp->regmap_common_cdb;
+ for (i = 0; i < num_regs; i++)
+ regmap_write(regmap, reg_pairs[i].off, reg_pairs[i].val);
+ }
+
+ /* PMA lane registers configurations */
+ pma_ln_vals = init_data->pma_ln_vals[phy_t1][phy_t2][ssc];
+ if (pma_ln_vals) {
+ reg_pairs = pma_ln_vals->reg_pairs;
+ num_regs = pma_ln_vals->num_regs;
+ for (i = 0; i < num_lanes; i++) {
+ regmap = sp->regmap_lane_cdb[i + mlane];
+ for (j = 0; j < num_regs; j++)
+ regmap_write(regmap, reg_pairs[j].off, reg_pairs[j].val);
+ }
+ }
+
+ if (phy_t1 == TYPE_QSGMII)
+ reset_control_deassert(sp->phys[node].lnk_rst);
+ }
+
+ /* Take the PHY out of reset */
+ ret = reset_control_deassert(sp->phy_rst);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
static int cdns_sierra_phy_probe(struct platform_device *pdev)
{
struct cdns_sierra_phy *sp;
struct phy_provider *phy_provider;
struct device *dev = &pdev->dev;
- const struct of_device_id *match;
- struct cdns_sierra_data *data;
+ const struct cdns_sierra_data *data;
unsigned int id_value;
- int i, ret, node = 0;
+ int ret, node = 0;
void __iomem *base;
struct device_node *dn = dev->of_node, *child;
@@ -840,12 +1346,10 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev)
return -ENODEV;
/* Get init data for this PHY */
- match = of_match_device(cdns_sierra_id_table, dev);
- if (!match)
+ data = of_device_get_match_data(dev);
+ if (!data)
return -EINVAL;
- data = (struct cdns_sierra_data *)match->data;
-
sp = devm_kzalloc(dev, sizeof(*sp), GFP_KERNEL);
if (!sp)
return -ENOMEM;
@@ -912,7 +1416,8 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev)
dev_err(dev, "failed to get reset %s\n",
child->full_name);
ret = PTR_ERR(sp->phys[node].lnk_rst);
- goto put_child2;
+ of_node_put(child);
+ goto put_control;
}
if (!sp->autoconf) {
@@ -920,7 +1425,9 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev)
if (ret) {
dev_err(dev, "missing property in node %s\n",
child->name);
- goto put_child;
+ of_node_put(child);
+ reset_control_put(sp->phys[node].lnk_rst);
+ goto put_control;
}
}
@@ -930,7 +1437,9 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev)
if (IS_ERR(gphy)) {
ret = PTR_ERR(gphy);
- goto put_child;
+ of_node_put(child);
+ reset_control_put(sp->phys[node].lnk_rst);
+ goto put_control;
}
sp->phys[node].phy = gphy;
phy_set_drvdata(gphy, &sp->phys[node]);
@@ -942,23 +1451,28 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev)
if (sp->num_lanes > SIERRA_MAX_LANES) {
ret = -EINVAL;
dev_err(dev, "Invalid lane configuration\n");
- goto put_child2;
+ goto put_control;
}
/* If more than one subnode, configure the PHY as multilink */
- if (!sp->autoconf && sp->nsubnodes > 1)
- regmap_field_write(sp->phy_pll_cfg_1, 0x1);
+ if (!sp->autoconf && sp->nsubnodes > 1) {
+ ret = cdns_sierra_phy_configure_multilink(sp);
+ if (ret)
+ goto put_control;
+ }
pm_runtime_enable(dev);
phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
- return PTR_ERR_OR_ZERO(phy_provider);
-
-put_child:
- node++;
-put_child2:
- for (i = 0; i < node; i++)
- reset_control_put(sp->phys[i].lnk_rst);
- of_node_put(child);
+ if (IS_ERR(phy_provider)) {
+ ret = PTR_ERR(phy_provider);
+ goto put_control;
+ }
+
+ return 0;
+
+put_control:
+ while (--node >= 0)
+ reset_control_put(sp->phys[node].lnk_rst);
clk_disable:
cdns_sierra_phy_disable_clocks(sp);
reset_control_assert(sp->apb_rst);
@@ -991,6 +1505,449 @@ static int cdns_sierra_phy_remove(struct platform_device *pdev)
return 0;
}
+/* QSGMII PHY PMA lane configuration */
+static struct cdns_reg_pairs qsgmii_phy_pma_ln_regs[] = {
+ {0x9010, SIERRA_PHY_PMA_XCVR_CTRL}
+};
+
+static struct cdns_sierra_vals qsgmii_phy_pma_ln_vals = {
+ .reg_pairs = qsgmii_phy_pma_ln_regs,
+ .num_regs = ARRAY_SIZE(qsgmii_phy_pma_ln_regs),
+};
+
+/* QSGMII refclk 100MHz, 20b, opt1, No BW cal, no ssc, PLL LC1 */
+static const struct cdns_reg_pairs qsgmii_100_no_ssc_plllc1_cmn_regs[] = {
+ {0x2085, SIERRA_CMN_PLLLC1_LF_COEFF_MODE0_PREG},
+ {0x0000, SIERRA_CMN_PLLLC1_BWCAL_MODE0_PREG},
+ {0x0000, SIERRA_CMN_PLLLC1_SS_TIME_STEPSIZE_MODE_PREG}
+};
+
+static const struct cdns_reg_pairs qsgmii_100_no_ssc_plllc1_ln_regs[] = {
+ {0xFC08, SIERRA_DET_STANDEC_A_PREG},
+ {0x0252, SIERRA_DET_STANDEC_E_PREG},
+ {0x0004, SIERRA_PSC_LN_IDLE_PREG},
+ {0x0FFE, SIERRA_PSC_RX_A0_PREG},
+ {0x0011, SIERRA_PLLCTRL_SUBRATE_PREG},
+ {0x0001, SIERRA_PLLCTRL_GEN_A_PREG},
+ {0x5233, SIERRA_PLLCTRL_CPGAIN_MODE_PREG},
+ {0x0000, SIERRA_DRVCTRL_ATTEN_PREG},
+ {0x0089, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG},
+ {0x3C3C, SIERRA_CREQ_CCLKDET_MODE01_PREG},
+ {0x3222, SIERRA_CREQ_FSMCLK_SEL_PREG},
+ {0x0000, SIERRA_CREQ_EQ_CTRL_PREG},
+ {0x8422, SIERRA_CTLELUT_CTRL_PREG},
+ {0x4111, SIERRA_DFE_ECMP_RATESEL_PREG},
+ {0x4111, SIERRA_DFE_SMP_RATESEL_PREG},
+ {0x0002, SIERRA_DEQ_PHALIGN_CTRL},
+ {0x9595, SIERRA_DEQ_VGATUNE_CTRL_PREG},
+ {0x0186, SIERRA_DEQ_GLUT0},
+ {0x0186, SIERRA_DEQ_GLUT1},
+ {0x0186, SIERRA_DEQ_GLUT2},
+ {0x0186, SIERRA_DEQ_GLUT3},
+ {0x0186, SIERRA_DEQ_GLUT4},
+ {0x0861, SIERRA_DEQ_ALUT0},
+ {0x07E0, SIERRA_DEQ_ALUT1},
+ {0x079E, SIERRA_DEQ_ALUT2},
+ {0x071D, SIERRA_DEQ_ALUT3},
+ {0x03F5, SIERRA_DEQ_DFETAP_CTRL_PREG},
+ {0x0C01, SIERRA_DEQ_TAU_CTRL1_FAST_MAINT_PREG},
+ {0x3C40, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG},
+ {0x1C04, SIERRA_DEQ_TAU_CTRL2_PREG},
+ {0x0033, SIERRA_DEQ_PICTRL_PREG},
+ {0x0660, SIERRA_CPICAL_TMRVAL_MODE0_PREG},
+ {0x00D5, SIERRA_CPI_OUTBUF_RATESEL_PREG},
+ {0x0B6D, SIERRA_CPI_RESBIAS_BIN_PREG},
+ {0x0102, SIERRA_RXBUFFER_CTLECTRL_PREG},
+ {0x0002, SIERRA_RXBUFFER_RCDFECTRL_PREG}
+};
+
+static struct cdns_sierra_vals qsgmii_100_no_ssc_plllc1_cmn_vals = {
+ .reg_pairs = qsgmii_100_no_ssc_plllc1_cmn_regs,
+ .num_regs = ARRAY_SIZE(qsgmii_100_no_ssc_plllc1_cmn_regs),
+};
+
+static struct cdns_sierra_vals qsgmii_100_no_ssc_plllc1_ln_vals = {
+ .reg_pairs = qsgmii_100_no_ssc_plllc1_ln_regs,
+ .num_regs = ARRAY_SIZE(qsgmii_100_no_ssc_plllc1_ln_regs),
+};
+
+/* PCIE PHY PCS common configuration */
+static struct cdns_reg_pairs pcie_phy_pcs_cmn_regs[] = {
+ {0x0430, SIERRA_PHY_PIPE_CMN_CTRL1}
+};
+
+static struct cdns_sierra_vals pcie_phy_pcs_cmn_vals = {
+ .reg_pairs = pcie_phy_pcs_cmn_regs,
+ .num_regs = ARRAY_SIZE(pcie_phy_pcs_cmn_regs),
+};
+
+/* refclk100MHz_32b_PCIe_cmn_pll_no_ssc, pcie_links_using_plllc, pipe_bw_3 */
+static const struct cdns_reg_pairs pcie_100_no_ssc_plllc_cmn_regs[] = {
+ {0x2105, SIERRA_CMN_PLLLC_LF_COEFF_MODE1_PREG},
+ {0x2105, SIERRA_CMN_PLLLC_LF_COEFF_MODE0_PREG},
+ {0x8A06, SIERRA_CMN_PLLLC_BWCAL_MODE1_PREG},
+ {0x8A06, SIERRA_CMN_PLLLC_BWCAL_MODE0_PREG}
+};
+
+/*
+ * refclk100MHz_32b_PCIe_ln_no_ssc, multilink, using_plllc,
+ * cmn_pllcy_anaclk0_1Ghz, xcvr_pllclk_fullrt_500mhz
+ */
+static const struct cdns_reg_pairs ml_pcie_100_no_ssc_ln_regs[] = {
+ {0xFC08, SIERRA_DET_STANDEC_A_PREG},
+ {0x001D, SIERRA_PSM_A3IN_TMR_PREG},
+ {0x0004, SIERRA_PSC_LN_A3_PREG},
+ {0x0004, SIERRA_PSC_LN_A4_PREG},
+ {0x0004, SIERRA_PSC_LN_IDLE_PREG},
+ {0x1555, SIERRA_DFE_BIASTRIM_PREG},
+ {0x9703, SIERRA_DRVCTRL_BOOST_PREG},
+ {0x8055, SIERRA_RX_CREQ_FLTR_A_MODE3_PREG},
+ {0x80BB, SIERRA_RX_CREQ_FLTR_A_MODE2_PREG},
+ {0x8351, SIERRA_RX_CREQ_FLTR_A_MODE1_PREG},
+ {0x8349, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG},
+ {0x0002, SIERRA_CREQ_DCBIASATTEN_OVR_PREG},
+ {0x9800, SIERRA_RX_CTLE_CAL_PREG},
+ {0x5624, SIERRA_DEQ_CONCUR_CTRL2_PREG},
+ {0x000F, SIERRA_DEQ_EPIPWR_CTRL2_PREG},
+ {0x00FF, SIERRA_DEQ_FAST_MAINT_CYCLES_PREG},
+ {0x4C4C, SIERRA_DEQ_ERRCMP_CTRL_PREG},
+ {0x02FA, SIERRA_DEQ_OFFSET_CTRL_PREG},
+ {0x02FA, SIERRA_DEQ_GAIN_CTRL_PREG},
+ {0x0041, SIERRA_DEQ_GLUT0},
+ {0x0082, SIERRA_DEQ_GLUT1},
+ {0x00C3, SIERRA_DEQ_GLUT2},
+ {0x0145, SIERRA_DEQ_GLUT3},
+ {0x0186, SIERRA_DEQ_GLUT4},
+ {0x09E7, SIERRA_DEQ_ALUT0},
+ {0x09A6, SIERRA_DEQ_ALUT1},
+ {0x0965, SIERRA_DEQ_ALUT2},
+ {0x08E3, SIERRA_DEQ_ALUT3},
+ {0x00FA, SIERRA_DEQ_DFETAP0},
+ {0x00FA, SIERRA_DEQ_DFETAP1},
+ {0x00FA, SIERRA_DEQ_DFETAP2},
+ {0x00FA, SIERRA_DEQ_DFETAP3},
+ {0x00FA, SIERRA_DEQ_DFETAP4},
+ {0x000F, SIERRA_DEQ_PRECUR_PREG},
+ {0x0280, SIERRA_DEQ_POSTCUR_PREG},
+ {0x8F00, SIERRA_DEQ_POSTCUR_DECR_PREG},
+ {0x3C0F, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG},
+ {0x1C0C, SIERRA_DEQ_TAU_CTRL2_PREG},
+ {0x0100, SIERRA_DEQ_TAU_CTRL3_PREG},
+ {0x5E82, SIERRA_DEQ_OPENEYE_CTRL_PREG},
+ {0x002B, SIERRA_CPI_TRIM_PREG},
+ {0x0003, SIERRA_EPI_CTRL_PREG},
+ {0x803F, SIERRA_SDFILT_H2L_A_PREG},
+ {0x0004, SIERRA_RXBUFFER_CTLECTRL_PREG},
+ {0x2010, SIERRA_RXBUFFER_RCDFECTRL_PREG},
+ {0x4432, SIERRA_RXBUFFER_DFECTRL_PREG}
+};
+
+static struct cdns_sierra_vals pcie_100_no_ssc_plllc_cmn_vals = {
+ .reg_pairs = pcie_100_no_ssc_plllc_cmn_regs,
+ .num_regs = ARRAY_SIZE(pcie_100_no_ssc_plllc_cmn_regs),
+};
+
+static struct cdns_sierra_vals ml_pcie_100_no_ssc_ln_vals = {
+ .reg_pairs = ml_pcie_100_no_ssc_ln_regs,
+ .num_regs = ARRAY_SIZE(ml_pcie_100_no_ssc_ln_regs),
+};
+
+/* refclk100MHz_32b_PCIe_cmn_pll_int_ssc, pcie_links_using_plllc, pipe_bw_3 */
+static const struct cdns_reg_pairs pcie_100_int_ssc_plllc_cmn_regs[] = {
+ {0x000E, SIERRA_CMN_PLLLC_MODE_PREG},
+ {0x4006, SIERRA_CMN_PLLLC_LF_COEFF_MODE1_PREG},
+ {0x4006, SIERRA_CMN_PLLLC_LF_COEFF_MODE0_PREG},
+ {0x0000, SIERRA_CMN_PLLLC_BWCAL_MODE1_PREG},
+ {0x0000, SIERRA_CMN_PLLLC_BWCAL_MODE0_PREG},
+ {0x0581, SIERRA_CMN_PLLLC_DSMCORR_PREG},
+ {0x7F80, SIERRA_CMN_PLLLC_SS_PREG},
+ {0x0041, SIERRA_CMN_PLLLC_SS_AMP_STEP_SIZE_PREG},
+ {0x0464, SIERRA_CMN_PLLLC_SSTWOPT_PREG},
+ {0x0D0D, SIERRA_CMN_PLLLC_SS_TIME_STEPSIZE_MODE_PREG},
+ {0x0060, SIERRA_CMN_PLLLC_LOCK_DELAY_CTRL_PREG}
+};
+
+/*
+ * refclk100MHz_32b_PCIe_ln_int_ssc, multilink, using_plllc,
+ * cmn_pllcy_anaclk0_1Ghz, xcvr_pllclk_fullrt_500mhz
+ */
+static const struct cdns_reg_pairs ml_pcie_100_int_ssc_ln_regs[] = {
+ {0xFC08, SIERRA_DET_STANDEC_A_PREG},
+ {0x001D, SIERRA_PSM_A3IN_TMR_PREG},
+ {0x0004, SIERRA_PSC_LN_A3_PREG},
+ {0x0004, SIERRA_PSC_LN_A4_PREG},
+ {0x0004, SIERRA_PSC_LN_IDLE_PREG},
+ {0x1555, SIERRA_DFE_BIASTRIM_PREG},
+ {0x9703, SIERRA_DRVCTRL_BOOST_PREG},
+ {0x813E, SIERRA_CLKPATHCTRL_TMR_PREG},
+ {0x8047, SIERRA_RX_CREQ_FLTR_A_MODE3_PREG},
+ {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE2_PREG},
+ {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE1_PREG},
+ {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG},
+ {0x0002, SIERRA_CREQ_DCBIASATTEN_OVR_PREG},
+ {0x9800, SIERRA_RX_CTLE_CAL_PREG},
+ {0x033C, SIERRA_RX_CTLE_MAINTENANCE_PREG},
+ {0x44CC, SIERRA_CREQ_EQ_OPEN_EYE_THRESH_PREG},
+ {0x5624, SIERRA_DEQ_CONCUR_CTRL2_PREG},
+ {0x000F, SIERRA_DEQ_EPIPWR_CTRL2_PREG},
+ {0x00FF, SIERRA_DEQ_FAST_MAINT_CYCLES_PREG},
+ {0x4C4C, SIERRA_DEQ_ERRCMP_CTRL_PREG},
+ {0x02FA, SIERRA_DEQ_OFFSET_CTRL_PREG},
+ {0x02FA, SIERRA_DEQ_GAIN_CTRL_PREG},
+ {0x0041, SIERRA_DEQ_GLUT0},
+ {0x0082, SIERRA_DEQ_GLUT1},
+ {0x00C3, SIERRA_DEQ_GLUT2},
+ {0x0145, SIERRA_DEQ_GLUT3},
+ {0x0186, SIERRA_DEQ_GLUT4},
+ {0x09E7, SIERRA_DEQ_ALUT0},
+ {0x09A6, SIERRA_DEQ_ALUT1},
+ {0x0965, SIERRA_DEQ_ALUT2},
+ {0x08E3, SIERRA_DEQ_ALUT3},
+ {0x00FA, SIERRA_DEQ_DFETAP0},
+ {0x00FA, SIERRA_DEQ_DFETAP1},
+ {0x00FA, SIERRA_DEQ_DFETAP2},
+ {0x00FA, SIERRA_DEQ_DFETAP3},
+ {0x00FA, SIERRA_DEQ_DFETAP4},
+ {0x000F, SIERRA_DEQ_PRECUR_PREG},
+ {0x0280, SIERRA_DEQ_POSTCUR_PREG},
+ {0x8F00, SIERRA_DEQ_POSTCUR_DECR_PREG},
+ {0x3C0F, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG},
+ {0x1C0C, SIERRA_DEQ_TAU_CTRL2_PREG},
+ {0x0100, SIERRA_DEQ_TAU_CTRL3_PREG},
+ {0x5E82, SIERRA_DEQ_OPENEYE_CTRL_PREG},
+ {0x002B, SIERRA_CPI_TRIM_PREG},
+ {0x0003, SIERRA_EPI_CTRL_PREG},
+ {0x803F, SIERRA_SDFILT_H2L_A_PREG},
+ {0x0004, SIERRA_RXBUFFER_CTLECTRL_PREG},
+ {0x2010, SIERRA_RXBUFFER_RCDFECTRL_PREG},
+ {0x4432, SIERRA_RXBUFFER_DFECTRL_PREG}
+};
+
+static struct cdns_sierra_vals pcie_100_int_ssc_plllc_cmn_vals = {
+ .reg_pairs = pcie_100_int_ssc_plllc_cmn_regs,
+ .num_regs = ARRAY_SIZE(pcie_100_int_ssc_plllc_cmn_regs),
+};
+
+static struct cdns_sierra_vals ml_pcie_100_int_ssc_ln_vals = {
+ .reg_pairs = ml_pcie_100_int_ssc_ln_regs,
+ .num_regs = ARRAY_SIZE(ml_pcie_100_int_ssc_ln_regs),
+};
+
+/* refclk100MHz_32b_PCIe_cmn_pll_ext_ssc, pcie_links_using_plllc, pipe_bw_3 */
+static const struct cdns_reg_pairs pcie_100_ext_ssc_plllc_cmn_regs[] = {
+ {0x2106, SIERRA_CMN_PLLLC_LF_COEFF_MODE1_PREG},
+ {0x2106, SIERRA_CMN_PLLLC_LF_COEFF_MODE0_PREG},
+ {0x8A06, SIERRA_CMN_PLLLC_BWCAL_MODE1_PREG},
+ {0x8A06, SIERRA_CMN_PLLLC_BWCAL_MODE0_PREG},
+ {0x1B1B, SIERRA_CMN_PLLLC_SS_TIME_STEPSIZE_MODE_PREG}
+};
+
+/*
+ * refclk100MHz_32b_PCIe_ln_ext_ssc, multilink, using_plllc,
+ * cmn_pllcy_anaclk0_1Ghz, xcvr_pllclk_fullrt_500mhz
+ */
+static const struct cdns_reg_pairs ml_pcie_100_ext_ssc_ln_regs[] = {
+ {0xFC08, SIERRA_DET_STANDEC_A_PREG},
+ {0x001D, SIERRA_PSM_A3IN_TMR_PREG},
+ {0x0004, SIERRA_PSC_LN_A3_PREG},
+ {0x0004, SIERRA_PSC_LN_A4_PREG},
+ {0x0004, SIERRA_PSC_LN_IDLE_PREG},
+ {0x1555, SIERRA_DFE_BIASTRIM_PREG},
+ {0x9703, SIERRA_DRVCTRL_BOOST_PREG},
+ {0x813E, SIERRA_CLKPATHCTRL_TMR_PREG},
+ {0x8047, SIERRA_RX_CREQ_FLTR_A_MODE3_PREG},
+ {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE2_PREG},
+ {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE1_PREG},
+ {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG},
+ {0x0002, SIERRA_CREQ_DCBIASATTEN_OVR_PREG},
+ {0x9800, SIERRA_RX_CTLE_CAL_PREG},
+ {0x033C, SIERRA_RX_CTLE_MAINTENANCE_PREG},
+ {0x44CC, SIERRA_CREQ_EQ_OPEN_EYE_THRESH_PREG},
+ {0x5624, SIERRA_DEQ_CONCUR_CTRL2_PREG},
+ {0x000F, SIERRA_DEQ_EPIPWR_CTRL2_PREG},
+ {0x00FF, SIERRA_DEQ_FAST_MAINT_CYCLES_PREG},
+ {0x4C4C, SIERRA_DEQ_ERRCMP_CTRL_PREG},
+ {0x02FA, SIERRA_DEQ_OFFSET_CTRL_PREG},
+ {0x02FA, SIERRA_DEQ_GAIN_CTRL_PREG},
+ {0x0041, SIERRA_DEQ_GLUT0},
+ {0x0082, SIERRA_DEQ_GLUT1},
+ {0x00C3, SIERRA_DEQ_GLUT2},
+ {0x0145, SIERRA_DEQ_GLUT3},
+ {0x0186, SIERRA_DEQ_GLUT4},
+ {0x09E7, SIERRA_DEQ_ALUT0},
+ {0x09A6, SIERRA_DEQ_ALUT1},
+ {0x0965, SIERRA_DEQ_ALUT2},
+ {0x08E3, SIERRA_DEQ_ALUT3},
+ {0x00FA, SIERRA_DEQ_DFETAP0},
+ {0x00FA, SIERRA_DEQ_DFETAP1},
+ {0x00FA, SIERRA_DEQ_DFETAP2},
+ {0x00FA, SIERRA_DEQ_DFETAP3},
+ {0x00FA, SIERRA_DEQ_DFETAP4},
+ {0x000F, SIERRA_DEQ_PRECUR_PREG},
+ {0x0280, SIERRA_DEQ_POSTCUR_PREG},
+ {0x8F00, SIERRA_DEQ_POSTCUR_DECR_PREG},
+ {0x3C0F, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG},
+ {0x1C0C, SIERRA_DEQ_TAU_CTRL2_PREG},
+ {0x0100, SIERRA_DEQ_TAU_CTRL3_PREG},
+ {0x5E82, SIERRA_DEQ_OPENEYE_CTRL_PREG},
+ {0x002B, SIERRA_CPI_TRIM_PREG},
+ {0x0003, SIERRA_EPI_CTRL_PREG},
+ {0x803F, SIERRA_SDFILT_H2L_A_PREG},
+ {0x0004, SIERRA_RXBUFFER_CTLECTRL_PREG},
+ {0x2010, SIERRA_RXBUFFER_RCDFECTRL_PREG},
+ {0x4432, SIERRA_RXBUFFER_DFECTRL_PREG}
+};
+
+static struct cdns_sierra_vals pcie_100_ext_ssc_plllc_cmn_vals = {
+ .reg_pairs = pcie_100_ext_ssc_plllc_cmn_regs,
+ .num_regs = ARRAY_SIZE(pcie_100_ext_ssc_plllc_cmn_regs),
+};
+
+static struct cdns_sierra_vals ml_pcie_100_ext_ssc_ln_vals = {
+ .reg_pairs = ml_pcie_100_ext_ssc_ln_regs,
+ .num_regs = ARRAY_SIZE(ml_pcie_100_ext_ssc_ln_regs),
+};
+
+/* refclk100MHz_32b_PCIe_cmn_pll_no_ssc */
+static const struct cdns_reg_pairs cdns_pcie_cmn_regs_no_ssc[] = {
+ {0x2105, SIERRA_CMN_PLLLC_LF_COEFF_MODE1_PREG},
+ {0x2105, SIERRA_CMN_PLLLC_LF_COEFF_MODE0_PREG},
+ {0x8A06, SIERRA_CMN_PLLLC_BWCAL_MODE1_PREG},
+ {0x8A06, SIERRA_CMN_PLLLC_BWCAL_MODE0_PREG}
+};
+
+/* refclk100MHz_32b_PCIe_ln_no_ssc */
+static const struct cdns_reg_pairs cdns_pcie_ln_regs_no_ssc[] = {
+ {0xFC08, SIERRA_DET_STANDEC_A_PREG},
+ {0x001D, SIERRA_PSM_A3IN_TMR_PREG},
+ {0x1555, SIERRA_DFE_BIASTRIM_PREG},
+ {0x9703, SIERRA_DRVCTRL_BOOST_PREG},
+ {0x8055, SIERRA_RX_CREQ_FLTR_A_MODE3_PREG},
+ {0x80BB, SIERRA_RX_CREQ_FLTR_A_MODE2_PREG},
+ {0x8351, SIERRA_RX_CREQ_FLTR_A_MODE1_PREG},
+ {0x8349, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG},
+ {0x0002, SIERRA_CREQ_DCBIASATTEN_OVR_PREG},
+ {0x9800, SIERRA_RX_CTLE_CAL_PREG},
+ {0x5624, SIERRA_DEQ_CONCUR_CTRL2_PREG},
+ {0x000F, SIERRA_DEQ_EPIPWR_CTRL2_PREG},
+ {0x00FF, SIERRA_DEQ_FAST_MAINT_CYCLES_PREG},
+ {0x4C4C, SIERRA_DEQ_ERRCMP_CTRL_PREG},
+ {0x02FA, SIERRA_DEQ_OFFSET_CTRL_PREG},
+ {0x02FA, SIERRA_DEQ_GAIN_CTRL_PREG},
+ {0x0041, SIERRA_DEQ_GLUT0},
+ {0x0082, SIERRA_DEQ_GLUT1},
+ {0x00C3, SIERRA_DEQ_GLUT2},
+ {0x0145, SIERRA_DEQ_GLUT3},
+ {0x0186, SIERRA_DEQ_GLUT4},
+ {0x09E7, SIERRA_DEQ_ALUT0},
+ {0x09A6, SIERRA_DEQ_ALUT1},
+ {0x0965, SIERRA_DEQ_ALUT2},
+ {0x08E3, SIERRA_DEQ_ALUT3},
+ {0x00FA, SIERRA_DEQ_DFETAP0},
+ {0x00FA, SIERRA_DEQ_DFETAP1},
+ {0x00FA, SIERRA_DEQ_DFETAP2},
+ {0x00FA, SIERRA_DEQ_DFETAP3},
+ {0x00FA, SIERRA_DEQ_DFETAP4},
+ {0x000F, SIERRA_DEQ_PRECUR_PREG},
+ {0x0280, SIERRA_DEQ_POSTCUR_PREG},
+ {0x8F00, SIERRA_DEQ_POSTCUR_DECR_PREG},
+ {0x3C0F, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG},
+ {0x1C0C, SIERRA_DEQ_TAU_CTRL2_PREG},
+ {0x0100, SIERRA_DEQ_TAU_CTRL3_PREG},
+ {0x5E82, SIERRA_DEQ_OPENEYE_CTRL_PREG},
+ {0x002B, SIERRA_CPI_TRIM_PREG},
+ {0x0003, SIERRA_EPI_CTRL_PREG},
+ {0x803F, SIERRA_SDFILT_H2L_A_PREG},
+ {0x0004, SIERRA_RXBUFFER_CTLECTRL_PREG},
+ {0x2010, SIERRA_RXBUFFER_RCDFECTRL_PREG},
+ {0x4432, SIERRA_RXBUFFER_DFECTRL_PREG}
+};
+
+static struct cdns_sierra_vals pcie_100_no_ssc_cmn_vals = {
+ .reg_pairs = cdns_pcie_cmn_regs_no_ssc,
+ .num_regs = ARRAY_SIZE(cdns_pcie_cmn_regs_no_ssc),
+};
+
+static struct cdns_sierra_vals pcie_100_no_ssc_ln_vals = {
+ .reg_pairs = cdns_pcie_ln_regs_no_ssc,
+ .num_regs = ARRAY_SIZE(cdns_pcie_ln_regs_no_ssc),
+};
+
+/* refclk100MHz_32b_PCIe_cmn_pll_int_ssc */
+static const struct cdns_reg_pairs cdns_pcie_cmn_regs_int_ssc[] = {
+ {0x000E, SIERRA_CMN_PLLLC_MODE_PREG},
+ {0x4006, SIERRA_CMN_PLLLC_LF_COEFF_MODE1_PREG},
+ {0x4006, SIERRA_CMN_PLLLC_LF_COEFF_MODE0_PREG},
+ {0x0000, SIERRA_CMN_PLLLC_BWCAL_MODE1_PREG},
+ {0x0000, SIERRA_CMN_PLLLC_BWCAL_MODE0_PREG},
+ {0x0581, SIERRA_CMN_PLLLC_DSMCORR_PREG},
+ {0x7F80, SIERRA_CMN_PLLLC_SS_PREG},
+ {0x0041, SIERRA_CMN_PLLLC_SS_AMP_STEP_SIZE_PREG},
+ {0x0464, SIERRA_CMN_PLLLC_SSTWOPT_PREG},
+ {0x0D0D, SIERRA_CMN_PLLLC_SS_TIME_STEPSIZE_MODE_PREG},
+ {0x0060, SIERRA_CMN_PLLLC_LOCK_DELAY_CTRL_PREG}
+};
+
+/* refclk100MHz_32b_PCIe_ln_int_ssc */
+static const struct cdns_reg_pairs cdns_pcie_ln_regs_int_ssc[] = {
+ {0xFC08, SIERRA_DET_STANDEC_A_PREG},
+ {0x001D, SIERRA_PSM_A3IN_TMR_PREG},
+ {0x1555, SIERRA_DFE_BIASTRIM_PREG},
+ {0x9703, SIERRA_DRVCTRL_BOOST_PREG},
+ {0x813E, SIERRA_CLKPATHCTRL_TMR_PREG},
+ {0x8047, SIERRA_RX_CREQ_FLTR_A_MODE3_PREG},
+ {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE2_PREG},
+ {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE1_PREG},
+ {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG},
+ {0x0002, SIERRA_CREQ_DCBIASATTEN_OVR_PREG},
+ {0x9800, SIERRA_RX_CTLE_CAL_PREG},
+ {0x033C, SIERRA_RX_CTLE_MAINTENANCE_PREG},
+ {0x44CC, SIERRA_CREQ_EQ_OPEN_EYE_THRESH_PREG},
+ {0x5624, SIERRA_DEQ_CONCUR_CTRL2_PREG},
+ {0x000F, SIERRA_DEQ_EPIPWR_CTRL2_PREG},
+ {0x00FF, SIERRA_DEQ_FAST_MAINT_CYCLES_PREG},
+ {0x4C4C, SIERRA_DEQ_ERRCMP_CTRL_PREG},
+ {0x02FA, SIERRA_DEQ_OFFSET_CTRL_PREG},
+ {0x02FA, SIERRA_DEQ_GAIN_CTRL_PREG},
+ {0x0041, SIERRA_DEQ_GLUT0},
+ {0x0082, SIERRA_DEQ_GLUT1},
+ {0x00C3, SIERRA_DEQ_GLUT2},
+ {0x0145, SIERRA_DEQ_GLUT3},
+ {0x0186, SIERRA_DEQ_GLUT4},
+ {0x09E7, SIERRA_DEQ_ALUT0},
+ {0x09A6, SIERRA_DEQ_ALUT1},
+ {0x0965, SIERRA_DEQ_ALUT2},
+ {0x08E3, SIERRA_DEQ_ALUT3},
+ {0x00FA, SIERRA_DEQ_DFETAP0},
+ {0x00FA, SIERRA_DEQ_DFETAP1},
+ {0x00FA, SIERRA_DEQ_DFETAP2},
+ {0x00FA, SIERRA_DEQ_DFETAP3},
+ {0x00FA, SIERRA_DEQ_DFETAP4},
+ {0x000F, SIERRA_DEQ_PRECUR_PREG},
+ {0x0280, SIERRA_DEQ_POSTCUR_PREG},
+ {0x8F00, SIERRA_DEQ_POSTCUR_DECR_PREG},
+ {0x3C0F, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG},
+ {0x1C0C, SIERRA_DEQ_TAU_CTRL2_PREG},
+ {0x0100, SIERRA_DEQ_TAU_CTRL3_PREG},
+ {0x5E82, SIERRA_DEQ_OPENEYE_CTRL_PREG},
+ {0x002B, SIERRA_CPI_TRIM_PREG},
+ {0x0003, SIERRA_EPI_CTRL_PREG},
+ {0x803F, SIERRA_SDFILT_H2L_A_PREG},
+ {0x0004, SIERRA_RXBUFFER_CTLECTRL_PREG},
+ {0x2010, SIERRA_RXBUFFER_RCDFECTRL_PREG},
+ {0x4432, SIERRA_RXBUFFER_DFECTRL_PREG}
+};
+
+static struct cdns_sierra_vals pcie_100_int_ssc_cmn_vals = {
+ .reg_pairs = cdns_pcie_cmn_regs_int_ssc,
+ .num_regs = ARRAY_SIZE(cdns_pcie_cmn_regs_int_ssc),
+};
+
+static struct cdns_sierra_vals pcie_100_int_ssc_ln_vals = {
+ .reg_pairs = cdns_pcie_ln_regs_int_ssc,
+ .num_regs = ARRAY_SIZE(cdns_pcie_ln_regs_int_ssc),
+};
+
/* refclk100MHz_32b_PCIe_cmn_pll_ext_ssc */
static const struct cdns_reg_pairs cdns_pcie_cmn_regs_ext_ssc[] = {
{0x2106, SIERRA_CMN_PLLLC_LF_COEFF_MODE1_PREG},
@@ -1002,13 +1959,62 @@ static const struct cdns_reg_pairs cdns_pcie_cmn_regs_ext_ssc[] = {
/* refclk100MHz_32b_PCIe_ln_ext_ssc */
static const struct cdns_reg_pairs cdns_pcie_ln_regs_ext_ssc[] = {
+ {0xFC08, SIERRA_DET_STANDEC_A_PREG},
+ {0x001D, SIERRA_PSM_A3IN_TMR_PREG},
+ {0x1555, SIERRA_DFE_BIASTRIM_PREG},
+ {0x9703, SIERRA_DRVCTRL_BOOST_PREG},
{0x813E, SIERRA_CLKPATHCTRL_TMR_PREG},
{0x8047, SIERRA_RX_CREQ_FLTR_A_MODE3_PREG},
{0x808F, SIERRA_RX_CREQ_FLTR_A_MODE2_PREG},
{0x808F, SIERRA_RX_CREQ_FLTR_A_MODE1_PREG},
{0x808F, SIERRA_RX_CREQ_FLTR_A_MODE0_PREG},
+ {0x0002, SIERRA_CREQ_DCBIASATTEN_OVR_PREG},
+ {0x9800, SIERRA_RX_CTLE_CAL_PREG},
{0x033C, SIERRA_RX_CTLE_MAINTENANCE_PREG},
- {0x44CC, SIERRA_CREQ_EQ_OPEN_EYE_THRESH_PREG}
+ {0x44CC, SIERRA_CREQ_EQ_OPEN_EYE_THRESH_PREG},
+ {0x5624, SIERRA_DEQ_CONCUR_CTRL2_PREG},
+ {0x000F, SIERRA_DEQ_EPIPWR_CTRL2_PREG},
+ {0x00FF, SIERRA_DEQ_FAST_MAINT_CYCLES_PREG},
+ {0x4C4C, SIERRA_DEQ_ERRCMP_CTRL_PREG},
+ {0x02FA, SIERRA_DEQ_OFFSET_CTRL_PREG},
+ {0x02FA, SIERRA_DEQ_GAIN_CTRL_PREG},
+ {0x0041, SIERRA_DEQ_GLUT0},
+ {0x0082, SIERRA_DEQ_GLUT1},
+ {0x00C3, SIERRA_DEQ_GLUT2},
+ {0x0145, SIERRA_DEQ_GLUT3},
+ {0x0186, SIERRA_DEQ_GLUT4},
+ {0x09E7, SIERRA_DEQ_ALUT0},
+ {0x09A6, SIERRA_DEQ_ALUT1},
+ {0x0965, SIERRA_DEQ_ALUT2},
+ {0x08E3, SIERRA_DEQ_ALUT3},
+ {0x00FA, SIERRA_DEQ_DFETAP0},
+ {0x00FA, SIERRA_DEQ_DFETAP1},
+ {0x00FA, SIERRA_DEQ_DFETAP2},
+ {0x00FA, SIERRA_DEQ_DFETAP3},
+ {0x00FA, SIERRA_DEQ_DFETAP4},
+ {0x000F, SIERRA_DEQ_PRECUR_PREG},
+ {0x0280, SIERRA_DEQ_POSTCUR_PREG},
+ {0x8F00, SIERRA_DEQ_POSTCUR_DECR_PREG},
+ {0x3C0F, SIERRA_DEQ_TAU_CTRL1_SLOW_MAINT_PREG},
+ {0x1C0C, SIERRA_DEQ_TAU_CTRL2_PREG},
+ {0x0100, SIERRA_DEQ_TAU_CTRL3_PREG},
+ {0x5E82, SIERRA_DEQ_OPENEYE_CTRL_PREG},
+ {0x002B, SIERRA_CPI_TRIM_PREG},
+ {0x0003, SIERRA_EPI_CTRL_PREG},
+ {0x803F, SIERRA_SDFILT_H2L_A_PREG},
+ {0x0004, SIERRA_RXBUFFER_CTLECTRL_PREG},
+ {0x2010, SIERRA_RXBUFFER_RCDFECTRL_PREG},
+ {0x4432, SIERRA_RXBUFFER_DFECTRL_PREG}
+};
+
+static struct cdns_sierra_vals pcie_100_ext_ssc_cmn_vals = {
+ .reg_pairs = cdns_pcie_cmn_regs_ext_ssc,
+ .num_regs = ARRAY_SIZE(cdns_pcie_cmn_regs_ext_ssc),
+};
+
+static struct cdns_sierra_vals pcie_100_ext_ssc_ln_vals = {
+ .reg_pairs = cdns_pcie_ln_regs_ext_ssc,
+ .num_regs = ARRAY_SIZE(cdns_pcie_ln_regs_ext_ssc),
};
/* refclk100MHz_20b_USB_cmn_pll_ext_ssc */
@@ -1118,32 +2124,167 @@ static const struct cdns_reg_pairs cdns_usb_ln_regs_ext_ssc[] = {
{0x4243, SIERRA_RXBUFFER_DFECTRL_PREG}
};
+static struct cdns_sierra_vals usb_100_ext_ssc_cmn_vals = {
+ .reg_pairs = cdns_usb_cmn_regs_ext_ssc,
+ .num_regs = ARRAY_SIZE(cdns_usb_cmn_regs_ext_ssc),
+};
+
+static struct cdns_sierra_vals usb_100_ext_ssc_ln_vals = {
+ .reg_pairs = cdns_usb_ln_regs_ext_ssc,
+ .num_regs = ARRAY_SIZE(cdns_usb_ln_regs_ext_ssc),
+};
+
static const struct cdns_sierra_data cdns_map_sierra = {
- SIERRA_MACRO_ID,
- 0x2,
- 0x2,
- ARRAY_SIZE(cdns_pcie_cmn_regs_ext_ssc),
- ARRAY_SIZE(cdns_pcie_ln_regs_ext_ssc),
- ARRAY_SIZE(cdns_usb_cmn_regs_ext_ssc),
- ARRAY_SIZE(cdns_usb_ln_regs_ext_ssc),
- cdns_pcie_cmn_regs_ext_ssc,
- cdns_pcie_ln_regs_ext_ssc,
- cdns_usb_cmn_regs_ext_ssc,
- cdns_usb_ln_regs_ext_ssc,
+ .id_value = SIERRA_MACRO_ID,
+ .block_offset_shift = 0x2,
+ .reg_offset_shift = 0x2,
+ .pcs_cmn_vals = {
+ [TYPE_PCIE] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &pcie_phy_pcs_cmn_vals,
+ [EXTERNAL_SSC] = &pcie_phy_pcs_cmn_vals,
+ [INTERNAL_SSC] = &pcie_phy_pcs_cmn_vals,
+ },
+ [TYPE_QSGMII] = {
+ [NO_SSC] = &pcie_phy_pcs_cmn_vals,
+ [EXTERNAL_SSC] = &pcie_phy_pcs_cmn_vals,
+ [INTERNAL_SSC] = &pcie_phy_pcs_cmn_vals,
+ },
+ },
+ },
+ .pma_cmn_vals = {
+ [TYPE_PCIE] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &pcie_100_no_ssc_cmn_vals,
+ [EXTERNAL_SSC] = &pcie_100_ext_ssc_cmn_vals,
+ [INTERNAL_SSC] = &pcie_100_int_ssc_cmn_vals,
+ },
+ [TYPE_QSGMII] = {
+ [NO_SSC] = &pcie_100_no_ssc_plllc_cmn_vals,
+ [EXTERNAL_SSC] = &pcie_100_ext_ssc_plllc_cmn_vals,
+ [INTERNAL_SSC] = &pcie_100_int_ssc_plllc_cmn_vals,
+ },
+ },
+ [TYPE_USB] = {
+ [TYPE_NONE] = {
+ [EXTERNAL_SSC] = &usb_100_ext_ssc_cmn_vals,
+ },
+ },
+ [TYPE_QSGMII] = {
+ [TYPE_PCIE] = {
+ [NO_SSC] = &qsgmii_100_no_ssc_plllc1_cmn_vals,
+ [EXTERNAL_SSC] = &qsgmii_100_no_ssc_plllc1_cmn_vals,
+ [INTERNAL_SSC] = &qsgmii_100_no_ssc_plllc1_cmn_vals,
+ },
+ },
+ },
+ .pma_ln_vals = {
+ [TYPE_PCIE] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &pcie_100_no_ssc_ln_vals,
+ [EXTERNAL_SSC] = &pcie_100_ext_ssc_ln_vals,
+ [INTERNAL_SSC] = &pcie_100_int_ssc_ln_vals,
+ },
+ [TYPE_QSGMII] = {
+ [NO_SSC] = &ml_pcie_100_no_ssc_ln_vals,
+ [EXTERNAL_SSC] = &ml_pcie_100_ext_ssc_ln_vals,
+ [INTERNAL_SSC] = &ml_pcie_100_int_ssc_ln_vals,
+ },
+ },
+ [TYPE_USB] = {
+ [TYPE_NONE] = {
+ [EXTERNAL_SSC] = &usb_100_ext_ssc_ln_vals,
+ },
+ },
+ [TYPE_QSGMII] = {
+ [TYPE_PCIE] = {
+ [NO_SSC] = &qsgmii_100_no_ssc_plllc1_ln_vals,
+ [EXTERNAL_SSC] = &qsgmii_100_no_ssc_plllc1_ln_vals,
+ [INTERNAL_SSC] = &qsgmii_100_no_ssc_plllc1_ln_vals,
+ },
+ },
+ },
};
static const struct cdns_sierra_data cdns_ti_map_sierra = {
- SIERRA_MACRO_ID,
- 0x0,
- 0x1,
- ARRAY_SIZE(cdns_pcie_cmn_regs_ext_ssc),
- ARRAY_SIZE(cdns_pcie_ln_regs_ext_ssc),
- ARRAY_SIZE(cdns_usb_cmn_regs_ext_ssc),
- ARRAY_SIZE(cdns_usb_ln_regs_ext_ssc),
- cdns_pcie_cmn_regs_ext_ssc,
- cdns_pcie_ln_regs_ext_ssc,
- cdns_usb_cmn_regs_ext_ssc,
- cdns_usb_ln_regs_ext_ssc,
+ .id_value = SIERRA_MACRO_ID,
+ .block_offset_shift = 0x0,
+ .reg_offset_shift = 0x1,
+ .pcs_cmn_vals = {
+ [TYPE_PCIE] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &pcie_phy_pcs_cmn_vals,
+ [EXTERNAL_SSC] = &pcie_phy_pcs_cmn_vals,
+ [INTERNAL_SSC] = &pcie_phy_pcs_cmn_vals,
+ },
+ [TYPE_QSGMII] = {
+ [NO_SSC] = &pcie_phy_pcs_cmn_vals,
+ [EXTERNAL_SSC] = &pcie_phy_pcs_cmn_vals,
+ [INTERNAL_SSC] = &pcie_phy_pcs_cmn_vals,
+ },
+ },
+ },
+ .phy_pma_ln_vals = {
+ [TYPE_QSGMII] = {
+ [TYPE_PCIE] = {
+ [NO_SSC] = &qsgmii_phy_pma_ln_vals,
+ [EXTERNAL_SSC] = &qsgmii_phy_pma_ln_vals,
+ [INTERNAL_SSC] = &qsgmii_phy_pma_ln_vals,
+ },
+ },
+ },
+ .pma_cmn_vals = {
+ [TYPE_PCIE] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &pcie_100_no_ssc_cmn_vals,
+ [EXTERNAL_SSC] = &pcie_100_ext_ssc_cmn_vals,
+ [INTERNAL_SSC] = &pcie_100_int_ssc_cmn_vals,
+ },
+ [TYPE_QSGMII] = {
+ [NO_SSC] = &pcie_100_no_ssc_plllc_cmn_vals,
+ [EXTERNAL_SSC] = &pcie_100_ext_ssc_plllc_cmn_vals,
+ [INTERNAL_SSC] = &pcie_100_int_ssc_plllc_cmn_vals,
+ },
+ },
+ [TYPE_USB] = {
+ [TYPE_NONE] = {
+ [EXTERNAL_SSC] = &usb_100_ext_ssc_cmn_vals,
+ },
+ },
+ [TYPE_QSGMII] = {
+ [TYPE_PCIE] = {
+ [NO_SSC] = &qsgmii_100_no_ssc_plllc1_cmn_vals,
+ [EXTERNAL_SSC] = &qsgmii_100_no_ssc_plllc1_cmn_vals,
+ [INTERNAL_SSC] = &qsgmii_100_no_ssc_plllc1_cmn_vals,
+ },
+ },
+ },
+ .pma_ln_vals = {
+ [TYPE_PCIE] = {
+ [TYPE_NONE] = {
+ [NO_SSC] = &pcie_100_no_ssc_ln_vals,
+ [EXTERNAL_SSC] = &pcie_100_ext_ssc_ln_vals,
+ [INTERNAL_SSC] = &pcie_100_int_ssc_ln_vals,
+ },
+ [TYPE_QSGMII] = {
+ [NO_SSC] = &ml_pcie_100_no_ssc_ln_vals,
+ [EXTERNAL_SSC] = &ml_pcie_100_ext_ssc_ln_vals,
+ [INTERNAL_SSC] = &ml_pcie_100_int_ssc_ln_vals,
+ },
+ },
+ [TYPE_USB] = {
+ [TYPE_NONE] = {
+ [EXTERNAL_SSC] = &usb_100_ext_ssc_ln_vals,
+ },
+ },
+ [TYPE_QSGMII] = {
+ [TYPE_PCIE] = {
+ [NO_SSC] = &qsgmii_100_no_ssc_plllc1_ln_vals,
+ [EXTERNAL_SSC] = &qsgmii_100_no_ssc_plllc1_ln_vals,
+ [INTERNAL_SSC] = &qsgmii_100_no_ssc_plllc1_ln_vals,
+ },
+ },
+ },
};
static const struct of_device_id cdns_sierra_id_table[] = {
diff --git a/drivers/phy/cadence/phy-cadence-torrent.c b/drivers/phy/cadence/phy-cadence-torrent.c
index 5786166133d3..7c4b8050485f 100644
--- a/drivers/phy/cadence/phy-cadence-torrent.c
+++ b/drivers/phy/cadence/phy-cadence-torrent.c
@@ -2278,7 +2278,7 @@ int cdns_torrent_phy_configure_multilink(struct cdns_torrent_phy *cdns_phy)
struct cdns_torrent_vals *cmn_vals, *tx_ln_vals, *rx_ln_vals;
enum cdns_torrent_ref_clk ref_clk = cdns_phy->ref_clk_rate;
struct cdns_torrent_vals *link_cmn_vals, *xcvr_diag_vals;
- enum cdns_torrent_phy_type phy_t1, phy_t2, tmp_phy_type;
+ enum cdns_torrent_phy_type phy_t1, phy_t2;
struct cdns_torrent_vals *pcs_cmn_vals;
int i, j, node, mlane, num_lanes, ret;
struct cdns_reg_pairs *reg_pairs;
@@ -2304,9 +2304,7 @@ int cdns_torrent_phy_configure_multilink(struct cdns_torrent_phy *cdns_phy)
* configure the PHY for second link with phy_t2.
* Get the array values as [phy_t2][phy_t1][ssc].
*/
- tmp_phy_type = phy_t1;
- phy_t1 = phy_t2;
- phy_t2 = tmp_phy_type;
+ swap(phy_t1, phy_t2);
}
mlane = cdns_phy->phys[node].mlane;
diff --git a/drivers/phy/freescale/Kconfig b/drivers/phy/freescale/Kconfig
index 320630ffe3cd..c3669c28ea9f 100644
--- a/drivers/phy/freescale/Kconfig
+++ b/drivers/phy/freescale/Kconfig
@@ -14,3 +14,11 @@ config PHY_MIXEL_MIPI_DPHY
help
Enable this to add support for the Mixel DSI PHY as found
on NXP's i.MX8 family of SOCs.
+
+config PHY_FSL_IMX8M_PCIE
+ tristate "Freescale i.MX8M PCIE PHY"
+ depends on OF && HAS_IOMEM
+ select GENERIC_PHY
+ help
+ Enable this to add support for the PCIE PHY as found on
+ i.MX8M family of SOCs.
diff --git a/drivers/phy/freescale/Makefile b/drivers/phy/freescale/Makefile
index 1d02e3869b45..55d07c742ab0 100644
--- a/drivers/phy/freescale/Makefile
+++ b/drivers/phy/freescale/Makefile
@@ -1,3 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_PHY_FSL_IMX8MQ_USB) += phy-fsl-imx8mq-usb.o
obj-$(CONFIG_PHY_MIXEL_MIPI_DPHY) += phy-fsl-imx8-mipi-dphy.o
+obj-$(CONFIG_PHY_FSL_IMX8M_PCIE) += phy-fsl-imx8m-pcie.o
diff --git a/drivers/phy/freescale/phy-fsl-imx8m-pcie.c b/drivers/phy/freescale/phy-fsl-imx8m-pcie.c
new file mode 100644
index 000000000000..04b1aafb29f4
--- /dev/null
+++ b/drivers/phy/freescale/phy-fsl-imx8m-pcie.c
@@ -0,0 +1,237 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2021 NXP
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/delay.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mfd/syscon/imx7-iomuxc-gpr.h>
+#include <linux/module.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <dt-bindings/phy/phy-imx8-pcie.h>
+
+#define IMX8MM_PCIE_PHY_CMN_REG061 0x184
+#define ANA_PLL_CLK_OUT_TO_EXT_IO_EN BIT(0)
+#define IMX8MM_PCIE_PHY_CMN_REG062 0x188
+#define ANA_PLL_CLK_OUT_TO_EXT_IO_SEL BIT(3)
+#define IMX8MM_PCIE_PHY_CMN_REG063 0x18C
+#define AUX_PLL_REFCLK_SEL_SYS_PLL GENMASK(7, 6)
+#define IMX8MM_PCIE_PHY_CMN_REG064 0x190
+#define ANA_AUX_RX_TX_SEL_TX BIT(7)
+#define ANA_AUX_RX_TERM_GND_EN BIT(3)
+#define ANA_AUX_TX_TERM BIT(2)
+#define IMX8MM_PCIE_PHY_CMN_REG065 0x194
+#define ANA_AUX_RX_TERM (BIT(7) | BIT(4))
+#define ANA_AUX_TX_LVL GENMASK(3, 0)
+#define IMX8MM_PCIE_PHY_CMN_REG75 0x1D4
+#define PCIE_PHY_CMN_REG75_PLL_DONE 0x3
+#define PCIE_PHY_TRSV_REG5 0x414
+#define PCIE_PHY_TRSV_REG5_GEN1_DEEMP 0x2D
+#define PCIE_PHY_TRSV_REG6 0x418
+#define PCIE_PHY_TRSV_REG6_GEN2_DEEMP 0xF
+
+#define IMX8MM_GPR_PCIE_REF_CLK_SEL GENMASK(25, 24)
+#define IMX8MM_GPR_PCIE_REF_CLK_PLL FIELD_PREP(IMX8MM_GPR_PCIE_REF_CLK_SEL, 0x3)
+#define IMX8MM_GPR_PCIE_REF_CLK_EXT FIELD_PREP(IMX8MM_GPR_PCIE_REF_CLK_SEL, 0x2)
+#define IMX8MM_GPR_PCIE_AUX_EN BIT(19)
+#define IMX8MM_GPR_PCIE_CMN_RST BIT(18)
+#define IMX8MM_GPR_PCIE_POWER_OFF BIT(17)
+#define IMX8MM_GPR_PCIE_SSC_EN BIT(16)
+#define IMX8MM_GPR_PCIE_AUX_EN_OVERRIDE BIT(9)
+
+struct imx8_pcie_phy {
+ void __iomem *base;
+ struct clk *clk;
+ struct phy *phy;
+ struct regmap *iomuxc_gpr;
+ struct reset_control *reset;
+ u32 refclk_pad_mode;
+ u32 tx_deemph_gen1;
+ u32 tx_deemph_gen2;
+ bool clkreq_unused;
+};
+
+static int imx8_pcie_phy_init(struct phy *phy)
+{
+ int ret;
+ u32 val, pad_mode;
+ struct imx8_pcie_phy *imx8_phy = phy_get_drvdata(phy);
+
+ reset_control_assert(imx8_phy->reset);
+
+ pad_mode = imx8_phy->refclk_pad_mode;
+ /* Set AUX_EN_OVERRIDE 1'b0, when the CLKREQ# isn't hooked */
+ regmap_update_bits(imx8_phy->iomuxc_gpr, IOMUXC_GPR14,
+ IMX8MM_GPR_PCIE_AUX_EN_OVERRIDE,
+ imx8_phy->clkreq_unused ?
+ 0 : IMX8MM_GPR_PCIE_AUX_EN_OVERRIDE);
+ regmap_update_bits(imx8_phy->iomuxc_gpr, IOMUXC_GPR14,
+ IMX8MM_GPR_PCIE_AUX_EN,
+ IMX8MM_GPR_PCIE_AUX_EN);
+ regmap_update_bits(imx8_phy->iomuxc_gpr, IOMUXC_GPR14,
+ IMX8MM_GPR_PCIE_POWER_OFF, 0);
+ regmap_update_bits(imx8_phy->iomuxc_gpr, IOMUXC_GPR14,
+ IMX8MM_GPR_PCIE_SSC_EN, 0);
+
+ regmap_update_bits(imx8_phy->iomuxc_gpr, IOMUXC_GPR14,
+ IMX8MM_GPR_PCIE_REF_CLK_SEL,
+ pad_mode == IMX8_PCIE_REFCLK_PAD_INPUT ?
+ IMX8MM_GPR_PCIE_REF_CLK_EXT :
+ IMX8MM_GPR_PCIE_REF_CLK_PLL);
+ usleep_range(100, 200);
+
+ /* Do the PHY common block reset */
+ regmap_update_bits(imx8_phy->iomuxc_gpr, IOMUXC_GPR14,
+ IMX8MM_GPR_PCIE_CMN_RST,
+ IMX8MM_GPR_PCIE_CMN_RST);
+ usleep_range(200, 500);
+
+ if (pad_mode == IMX8_PCIE_REFCLK_PAD_INPUT) {
+ /* Configure the pad as input */
+ val = readl(imx8_phy->base + IMX8MM_PCIE_PHY_CMN_REG061);
+ writel(val & ~ANA_PLL_CLK_OUT_TO_EXT_IO_EN,
+ imx8_phy->base + IMX8MM_PCIE_PHY_CMN_REG061);
+ } else if (pad_mode == IMX8_PCIE_REFCLK_PAD_OUTPUT) {
+ /* Configure the PHY to output the refclock via pad */
+ writel(ANA_PLL_CLK_OUT_TO_EXT_IO_EN,
+ imx8_phy->base + IMX8MM_PCIE_PHY_CMN_REG061);
+ writel(ANA_PLL_CLK_OUT_TO_EXT_IO_SEL,
+ imx8_phy->base + IMX8MM_PCIE_PHY_CMN_REG062);
+ writel(AUX_PLL_REFCLK_SEL_SYS_PLL,
+ imx8_phy->base + IMX8MM_PCIE_PHY_CMN_REG063);
+ val = ANA_AUX_RX_TX_SEL_TX | ANA_AUX_TX_TERM;
+ writel(val | ANA_AUX_RX_TERM_GND_EN,
+ imx8_phy->base + IMX8MM_PCIE_PHY_CMN_REG064);
+ writel(ANA_AUX_RX_TERM | ANA_AUX_TX_LVL,
+ imx8_phy->base + IMX8MM_PCIE_PHY_CMN_REG065);
+ }
+
+ /* Tune PHY de-emphasis setting to pass PCIe compliance. */
+ if (imx8_phy->tx_deemph_gen1)
+ writel(imx8_phy->tx_deemph_gen1,
+ imx8_phy->base + PCIE_PHY_TRSV_REG5);
+ if (imx8_phy->tx_deemph_gen2)
+ writel(imx8_phy->tx_deemph_gen2,
+ imx8_phy->base + PCIE_PHY_TRSV_REG6);
+
+ reset_control_deassert(imx8_phy->reset);
+
+ /* Polling to check the phy is ready or not. */
+ ret = readl_poll_timeout(imx8_phy->base + IMX8MM_PCIE_PHY_CMN_REG75,
+ val, val == PCIE_PHY_CMN_REG75_PLL_DONE,
+ 10, 20000);
+ return ret;
+}
+
+static int imx8_pcie_phy_power_on(struct phy *phy)
+{
+ struct imx8_pcie_phy *imx8_phy = phy_get_drvdata(phy);
+
+ return clk_prepare_enable(imx8_phy->clk);
+}
+
+static int imx8_pcie_phy_power_off(struct phy *phy)
+{
+ struct imx8_pcie_phy *imx8_phy = phy_get_drvdata(phy);
+
+ clk_disable_unprepare(imx8_phy->clk);
+
+ return 0;
+}
+
+static const struct phy_ops imx8_pcie_phy_ops = {
+ .init = imx8_pcie_phy_init,
+ .power_on = imx8_pcie_phy_power_on,
+ .power_off = imx8_pcie_phy_power_off,
+ .owner = THIS_MODULE,
+};
+
+static int imx8_pcie_phy_probe(struct platform_device *pdev)
+{
+ struct phy_provider *phy_provider;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct imx8_pcie_phy *imx8_phy;
+ struct resource *res;
+
+ imx8_phy = devm_kzalloc(dev, sizeof(*imx8_phy), GFP_KERNEL);
+ if (!imx8_phy)
+ return -ENOMEM;
+
+ /* get PHY refclk pad mode */
+ of_property_read_u32(np, "fsl,refclk-pad-mode",
+ &imx8_phy->refclk_pad_mode);
+
+ if (of_property_read_u32(np, "fsl,tx-deemph-gen1",
+ &imx8_phy->tx_deemph_gen1))
+ imx8_phy->tx_deemph_gen1 = 0;
+
+ if (of_property_read_u32(np, "fsl,tx-deemph-gen2",
+ &imx8_phy->tx_deemph_gen2))
+ imx8_phy->tx_deemph_gen2 = 0;
+
+ if (of_property_read_bool(np, "fsl,clkreq-unsupported"))
+ imx8_phy->clkreq_unused = true;
+ else
+ imx8_phy->clkreq_unused = false;
+
+ imx8_phy->clk = devm_clk_get(dev, "ref");
+ if (IS_ERR(imx8_phy->clk)) {
+ dev_err(dev, "failed to get imx pcie phy clock\n");
+ return PTR_ERR(imx8_phy->clk);
+ }
+
+ /* Grab GPR config register range */
+ imx8_phy->iomuxc_gpr =
+ syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr");
+ if (IS_ERR(imx8_phy->iomuxc_gpr)) {
+ dev_err(dev, "unable to find iomuxc registers\n");
+ return PTR_ERR(imx8_phy->iomuxc_gpr);
+ }
+
+ imx8_phy->reset = devm_reset_control_get_exclusive(dev, "pciephy");
+ if (IS_ERR(imx8_phy->reset)) {
+ dev_err(dev, "Failed to get PCIEPHY reset control\n");
+ return PTR_ERR(imx8_phy->reset);
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ imx8_phy->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(imx8_phy->base))
+ return PTR_ERR(imx8_phy->base);
+
+ imx8_phy->phy = devm_phy_create(dev, NULL, &imx8_pcie_phy_ops);
+ if (IS_ERR(imx8_phy->phy))
+ return PTR_ERR(imx8_phy->phy);
+
+ phy_set_drvdata(imx8_phy->phy, imx8_phy);
+
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+
+ return PTR_ERR_OR_ZERO(phy_provider);
+}
+
+static const struct of_device_id imx8_pcie_phy_of_match[] = {
+ {.compatible = "fsl,imx8mm-pcie-phy",},
+ { },
+};
+MODULE_DEVICE_TABLE(of, imx8_pcie_phy_of_match);
+
+static struct platform_driver imx8_pcie_phy_driver = {
+ .probe = imx8_pcie_phy_probe,
+ .driver = {
+ .name = "imx8-pcie-phy",
+ .of_match_table = imx8_pcie_phy_of_match,
+ }
+};
+module_platform_driver(imx8_pcie_phy_driver);
+
+MODULE_DESCRIPTION("FSL IMX8 PCIE PHY driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/intel/Kconfig b/drivers/phy/intel/Kconfig
index ac42bb2fb394..18a3cc5b98c0 100644
--- a/drivers/phy/intel/Kconfig
+++ b/drivers/phy/intel/Kconfig
@@ -46,3 +46,13 @@ config PHY_INTEL_LGM_EMMC
select GENERIC_PHY
help
Enable this to support the Intel EMMC PHY
+
+config PHY_INTEL_THUNDERBAY_EMMC
+ tristate "Intel Thunder Bay eMMC PHY driver"
+ depends on OF && (ARCH_THUNDERBAY || COMPILE_TEST)
+ select GENERIC_PHY
+ help
+ This option enables support for Intel Thunder Bay SoC eMMC PHY.
+
+ To compile this driver as a module, choose M here: the module
+ will be called phy-intel-thunderbay-emmc.ko.
diff --git a/drivers/phy/intel/Makefile b/drivers/phy/intel/Makefile
index 14550981a707..b7321d56b0bb 100644
--- a/drivers/phy/intel/Makefile
+++ b/drivers/phy/intel/Makefile
@@ -3,3 +3,4 @@ obj-$(CONFIG_PHY_INTEL_KEEMBAY_EMMC) += phy-intel-keembay-emmc.o
obj-$(CONFIG_PHY_INTEL_KEEMBAY_USB) += phy-intel-keembay-usb.o
obj-$(CONFIG_PHY_INTEL_LGM_COMBO) += phy-intel-lgm-combo.o
obj-$(CONFIG_PHY_INTEL_LGM_EMMC) += phy-intel-lgm-emmc.o
+obj-$(CONFIG_PHY_INTEL_THUNDERBAY_EMMC) += phy-intel-thunderbay-emmc.o
diff --git a/drivers/phy/intel/phy-intel-thunderbay-emmc.c b/drivers/phy/intel/phy-intel-thunderbay-emmc.c
new file mode 100644
index 000000000000..593f6970b81e
--- /dev/null
+++ b/drivers/phy/intel/phy-intel-thunderbay-emmc.c
@@ -0,0 +1,509 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Intel ThunderBay eMMC PHY driver
+ *
+ * Copyright (C) 2021 Intel Corporation
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+
+/* eMMC/SD/SDIO core/phy configuration registers */
+#define CTRL_CFG_0 0x00
+#define CTRL_CFG_1 0x04
+#define CTRL_PRESET_0 0x08
+#define CTRL_PRESET_1 0x0c
+#define CTRL_PRESET_2 0x10
+#define CTRL_PRESET_3 0x14
+#define CTRL_PRESET_4 0x18
+#define CTRL_CFG_2 0x1c
+#define CTRL_CFG_3 0x20
+#define PHY_CFG_0 0x24
+#define PHY_CFG_1 0x28
+#define PHY_CFG_2 0x2c
+#define PHYBIST_CTRL 0x30
+#define SDHC_STAT3 0x34
+#define PHY_STAT 0x38
+#define PHYBIST_STAT_0 0x3c
+#define PHYBIST_STAT_1 0x40
+#define EMMC_AXI 0x44
+
+/* CTRL_PRESET_3 */
+#define CTRL_PRESET3_MASK GENMASK(31, 0)
+#define CTRL_PRESET3_SHIFT 0
+
+/* CTRL_CFG_0 bit fields */
+#define SUPPORT_HS_MASK BIT(26)
+#define SUPPORT_HS_SHIFT 26
+
+#define SUPPORT_8B_MASK BIT(24)
+#define SUPPORT_8B_SHIFT 24
+
+/* CTRL_CFG_1 bit fields */
+#define SUPPORT_SDR50_MASK BIT(28)
+#define SUPPORT_SDR50_SHIFT 28
+#define SLOT_TYPE_MASK GENMASK(27, 26)
+#define SLOT_TYPE_OFFSET 26
+#define SUPPORT_64B_MASK BIT(24)
+#define SUPPORT_64B_SHIFT 24
+#define SUPPORT_HS400_MASK BIT(2)
+#define SUPPORT_HS400_SHIFT 2
+#define SUPPORT_DDR50_MASK BIT(1)
+#define SUPPORT_DDR50_SHIFT 1
+#define SUPPORT_SDR104_MASK BIT(0)
+#define SUPPORT_SDR104_SHIFT 0
+
+/* PHY_CFG_0 bit fields */
+#define SEL_DLY_TXCLK_MASK BIT(29)
+#define SEL_DLY_TXCLK_SHIFT 29
+#define SEL_DLY_RXCLK_MASK BIT(28)
+#define SEL_DLY_RXCLK_SHIFT 28
+
+#define OTAP_DLY_ENA_MASK BIT(27)
+#define OTAP_DLY_ENA_SHIFT 27
+#define OTAP_DLY_SEL_MASK GENMASK(26, 23)
+#define OTAP_DLY_SEL_SHIFT 23
+#define ITAP_CHG_WIN_MASK BIT(22)
+#define ITAP_CHG_WIN_SHIFT 22
+#define ITAP_DLY_ENA_MASK BIT(21)
+#define ITAP_DLY_ENA_SHIFT 21
+#define ITAP_DLY_SEL_MASK GENMASK(20, 16)
+#define ITAP_DLY_SEL_SHIFT 16
+#define RET_ENB_MASK BIT(15)
+#define RET_ENB_SHIFT 15
+#define RET_EN_MASK BIT(14)
+#define RET_EN_SHIFT 14
+#define DLL_IFF_MASK GENMASK(13, 11)
+#define DLL_IFF_SHIFT 11
+#define DLL_EN_MASK BIT(10)
+#define DLL_EN_SHIFT 10
+#define DLL_TRIM_ICP_MASK GENMASK(9, 6)
+#define DLL_TRIM_ICP_SHIFT 6
+#define RETRIM_EN_MASK BIT(5)
+#define RETRIM_EN_SHIFT 5
+#define RETRIM_MASK BIT(4)
+#define RETRIM_SHIFT 4
+#define DR_TY_MASK GENMASK(3, 1)
+#define DR_TY_SHIFT 1
+#define PWR_DOWN_MASK BIT(0)
+#define PWR_DOWN_SHIFT 0
+
+/* PHY_CFG_1 bit fields */
+#define REN_DAT_MASK GENMASK(19, 12)
+#define REN_DAT_SHIFT 12
+#define REN_CMD_MASK BIT(11)
+#define REN_CMD_SHIFT 11
+#define REN_STRB_MASK BIT(10)
+#define REN_STRB_SHIFT 10
+#define PU_STRB_MASK BIT(20)
+#define PU_STRB_SHIFT 20
+
+/* PHY_CFG_2 bit fields */
+#define CLKBUF_MASK GENMASK(24, 21)
+#define CLKBUF_SHIFT 21
+#define SEL_STRB_MASK GENMASK(20, 13)
+#define SEL_STRB_SHIFT 13
+#define SEL_FREQ_MASK GENMASK(12, 10)
+#define SEL_FREQ_SHIFT 10
+
+/* PHY_STAT bit fields */
+#define CAL_DONE BIT(6)
+#define DLL_RDY BIT(5)
+
+#define OTAP_DLY 0x0
+#define ITAP_DLY 0x0
+#define STRB 0x33
+
+/* From ACS_eMMC51_16nFFC_RO1100_Userguide_v1p0.pdf p17 */
+#define FREQSEL_200M_170M 0x0
+#define FREQSEL_170M_140M 0x1
+#define FREQSEL_140M_110M 0x2
+#define FREQSEL_110M_80M 0x3
+#define FREQSEL_80M_50M 0x4
+#define FREQSEL_275M_250M 0x5
+#define FREQSEL_250M_225M 0x6
+#define FREQSEL_225M_200M 0x7
+
+/* Phy power status */
+#define PHY_UNINITIALIZED 0
+#define PHY_INITIALIZED 1
+
+/*
+ * During init(400KHz) phy_settings will be called with 200MHZ clock
+ * To avoid incorrectly setting the phy for init(400KHZ) "phy_power_sts" is used.
+ * When actual clock is set always phy is powered off once and then powered on.
+ * (sdhci_arasan_set_clock). That feature will be used to identify whether the
+ * settings are for init phy_power_on or actual clock phy_power_on
+ * 0 --> init settings
+ * 1 --> actual settings
+ */
+
+struct thunderbay_emmc_phy {
+ void __iomem *reg_base;
+ struct clk *emmcclk;
+ int phy_power_sts;
+};
+
+static inline void update_reg(struct thunderbay_emmc_phy *tbh_phy, u32 offset,
+ u32 mask, u32 shift, u32 val)
+{
+ u32 tmp;
+
+ tmp = readl(tbh_phy->reg_base + offset);
+ tmp &= ~mask;
+ tmp |= val << shift;
+ writel(tmp, tbh_phy->reg_base + offset);
+}
+
+static int thunderbay_emmc_phy_power(struct phy *phy, bool power_on)
+{
+ struct thunderbay_emmc_phy *tbh_phy = phy_get_drvdata(phy);
+ unsigned int freqsel = FREQSEL_200M_170M;
+ unsigned long rate;
+ static int lock;
+ u32 val;
+ int ret;
+
+ /* Disable DLL */
+ rate = clk_get_rate(tbh_phy->emmcclk);
+ switch (rate) {
+ case 200000000:
+ /* lock dll only when it is used, i.e only if SEL_DLY_TXCLK/RXCLK are 0 */
+ update_reg(tbh_phy, PHY_CFG_0, DLL_EN_MASK, DLL_EN_SHIFT, 0x0);
+ break;
+
+ /* dll lock not required for other frequencies */
+ case 50000000 ... 52000000:
+ case 400000:
+ default:
+ break;
+ }
+
+ if (!power_on)
+ return 0;
+
+ rate = clk_get_rate(tbh_phy->emmcclk);
+ switch (rate) {
+ case 170000001 ... 200000000:
+ freqsel = FREQSEL_200M_170M;
+ break;
+
+ case 140000001 ... 170000000:
+ freqsel = FREQSEL_170M_140M;
+ break;
+
+ case 110000001 ... 140000000:
+ freqsel = FREQSEL_140M_110M;
+ break;
+
+ case 80000001 ... 110000000:
+ freqsel = FREQSEL_110M_80M;
+ break;
+
+ case 50000000 ... 80000000:
+ freqsel = FREQSEL_80M_50M;
+ break;
+
+ case 250000001 ... 275000000:
+ freqsel = FREQSEL_275M_250M;
+ break;
+
+ case 225000001 ... 250000000:
+ freqsel = FREQSEL_250M_225M;
+ break;
+
+ case 200000001 ... 225000000:
+ freqsel = FREQSEL_225M_200M;
+ break;
+ default:
+ break;
+ }
+ /* Clock rate is checked against upper limit. It may fall low during init */
+ if (rate > 200000000)
+ dev_warn(&phy->dev, "Unsupported rate: %lu\n", rate);
+
+ udelay(5);
+
+ if (lock == 0) {
+ /* PDB will be done only once per boot */
+ update_reg(tbh_phy, PHY_CFG_0, PWR_DOWN_MASK,
+ PWR_DOWN_SHIFT, 0x1);
+ lock = 1;
+ /*
+ * According to the user manual, it asks driver to wait 5us for
+ * calpad busy trimming. However it is documented that this value is
+ * PVT(A.K.A. process, voltage and temperature) relevant, so some
+ * failure cases are found which indicates we should be more tolerant
+ * to calpad busy trimming.
+ */
+ ret = readl_poll_timeout(tbh_phy->reg_base + PHY_STAT,
+ val, (val & CAL_DONE), 10, 50);
+ if (ret) {
+ dev_err(&phy->dev, "caldone failed, ret=%d\n", ret);
+ return ret;
+ }
+ }
+ rate = clk_get_rate(tbh_phy->emmcclk);
+ switch (rate) {
+ case 200000000:
+ /* Set frequency of the DLL operation */
+ update_reg(tbh_phy, PHY_CFG_2, SEL_FREQ_MASK, SEL_FREQ_SHIFT, freqsel);
+
+ /* Enable DLL */
+ update_reg(tbh_phy, PHY_CFG_0, DLL_EN_MASK, DLL_EN_SHIFT, 0x1);
+
+ /*
+ * After enabling analog DLL circuits docs say that we need 10.2 us if
+ * our source clock is at 50 MHz and that lock time scales linearly
+ * with clock speed. If we are powering on the PHY and the card clock
+ * is super slow (like 100kHz) this could take as long as 5.1 ms as
+ * per the math: 10.2 us * (50000000 Hz / 100000 Hz) => 5.1 ms
+ * hopefully we won't be running at 100 kHz, but we should still make
+ * sure we wait long enough.
+ *
+ * NOTE: There appear to be corner cases where the DLL seems to take
+ * extra long to lock for reasons that aren't understood. In some
+ * extreme cases we've seen it take up to over 10ms (!). We'll be
+ * generous and give it 50ms.
+ */
+ ret = readl_poll_timeout(tbh_phy->reg_base + PHY_STAT,
+ val, (val & DLL_RDY), 10, 50 * USEC_PER_MSEC);
+ if (ret) {
+ dev_err(&phy->dev, "dllrdy failed, ret=%d\n", ret);
+ return ret;
+ }
+ break;
+
+ default:
+ break;
+ }
+ return 0;
+}
+
+static int thunderbay_emmc_phy_init(struct phy *phy)
+{
+ struct thunderbay_emmc_phy *tbh_phy = phy_get_drvdata(phy);
+
+ tbh_phy->emmcclk = clk_get(&phy->dev, "emmcclk");
+
+ return PTR_ERR_OR_ZERO(tbh_phy->emmcclk);
+}
+
+static int thunderbay_emmc_phy_exit(struct phy *phy)
+{
+ struct thunderbay_emmc_phy *tbh_phy = phy_get_drvdata(phy);
+
+ clk_put(tbh_phy->emmcclk);
+
+ return 0;
+}
+
+static int thunderbay_emmc_phy_power_on(struct phy *phy)
+{
+ struct thunderbay_emmc_phy *tbh_phy = phy_get_drvdata(phy);
+ unsigned long rate;
+
+ /* Overwrite capability bits configurable in bootloader */
+ update_reg(tbh_phy, CTRL_CFG_0,
+ SUPPORT_HS_MASK, SUPPORT_HS_SHIFT, 0x1);
+ update_reg(tbh_phy, CTRL_CFG_0,
+ SUPPORT_8B_MASK, SUPPORT_8B_SHIFT, 0x1);
+ update_reg(tbh_phy, CTRL_CFG_1,
+ SUPPORT_SDR50_MASK, SUPPORT_SDR50_SHIFT, 0x1);
+ update_reg(tbh_phy, CTRL_CFG_1,
+ SUPPORT_DDR50_MASK, SUPPORT_DDR50_SHIFT, 0x1);
+ update_reg(tbh_phy, CTRL_CFG_1,
+ SUPPORT_SDR104_MASK, SUPPORT_SDR104_SHIFT, 0x1);
+ update_reg(tbh_phy, CTRL_CFG_1,
+ SUPPORT_HS400_MASK, SUPPORT_HS400_SHIFT, 0x1);
+ update_reg(tbh_phy, CTRL_CFG_1,
+ SUPPORT_64B_MASK, SUPPORT_64B_SHIFT, 0x1);
+
+ if (tbh_phy->phy_power_sts == PHY_UNINITIALIZED) {
+ /* Indicates initialization, settings for init, same as 400KHZ setting */
+ update_reg(tbh_phy, PHY_CFG_0, SEL_DLY_TXCLK_MASK, SEL_DLY_TXCLK_SHIFT, 0x1);
+ update_reg(tbh_phy, PHY_CFG_0, SEL_DLY_RXCLK_MASK, SEL_DLY_RXCLK_SHIFT, 0x1);
+ update_reg(tbh_phy, PHY_CFG_0, ITAP_DLY_ENA_MASK, ITAP_DLY_ENA_SHIFT, 0x0);
+ update_reg(tbh_phy, PHY_CFG_0, ITAP_DLY_SEL_MASK, ITAP_DLY_SEL_SHIFT, 0x0);
+ update_reg(tbh_phy, PHY_CFG_0, OTAP_DLY_ENA_MASK, OTAP_DLY_ENA_SHIFT, 0x0);
+ update_reg(tbh_phy, PHY_CFG_0, OTAP_DLY_SEL_MASK, OTAP_DLY_SEL_SHIFT, 0);
+ update_reg(tbh_phy, PHY_CFG_0, DLL_TRIM_ICP_MASK, DLL_TRIM_ICP_SHIFT, 0);
+ update_reg(tbh_phy, PHY_CFG_0, DR_TY_MASK, DR_TY_SHIFT, 0x1);
+
+ } else if (tbh_phy->phy_power_sts == PHY_INITIALIZED) {
+ /* Indicates actual clock setting */
+ rate = clk_get_rate(tbh_phy->emmcclk);
+ switch (rate) {
+ case 200000000:
+ update_reg(tbh_phy, PHY_CFG_0, SEL_DLY_TXCLK_MASK,
+ SEL_DLY_TXCLK_SHIFT, 0x0);
+ update_reg(tbh_phy, PHY_CFG_0, SEL_DLY_RXCLK_MASK,
+ SEL_DLY_RXCLK_SHIFT, 0x0);
+ update_reg(tbh_phy, PHY_CFG_0, ITAP_DLY_ENA_MASK,
+ ITAP_DLY_ENA_SHIFT, 0x0);
+ update_reg(tbh_phy, PHY_CFG_0, ITAP_DLY_SEL_MASK,
+ ITAP_DLY_SEL_SHIFT, 0x0);
+ update_reg(tbh_phy, PHY_CFG_0, OTAP_DLY_ENA_MASK,
+ OTAP_DLY_ENA_SHIFT, 0x1);
+ update_reg(tbh_phy, PHY_CFG_0, OTAP_DLY_SEL_MASK,
+ OTAP_DLY_SEL_SHIFT, 2);
+ update_reg(tbh_phy, PHY_CFG_0, DLL_TRIM_ICP_MASK,
+ DLL_TRIM_ICP_SHIFT, 0x8);
+ update_reg(tbh_phy, PHY_CFG_0, DR_TY_MASK,
+ DR_TY_SHIFT, 0x1);
+ /* For HS400 only */
+ update_reg(tbh_phy, PHY_CFG_2, SEL_STRB_MASK,
+ SEL_STRB_SHIFT, STRB);
+ break;
+
+ case 50000000 ... 52000000:
+ /* For both HS and DDR52 this setting works */
+ update_reg(tbh_phy, PHY_CFG_0, SEL_DLY_TXCLK_MASK,
+ SEL_DLY_TXCLK_SHIFT, 0x1);
+ update_reg(tbh_phy, PHY_CFG_0, SEL_DLY_RXCLK_MASK,
+ SEL_DLY_RXCLK_SHIFT, 0x1);
+ update_reg(tbh_phy, PHY_CFG_0, ITAP_DLY_ENA_MASK,
+ ITAP_DLY_ENA_SHIFT, 0x0);
+ update_reg(tbh_phy, PHY_CFG_0, ITAP_DLY_SEL_MASK,
+ ITAP_DLY_SEL_SHIFT, 0x0);
+ update_reg(tbh_phy, PHY_CFG_0, OTAP_DLY_ENA_MASK,
+ OTAP_DLY_ENA_SHIFT, 0x1);
+ update_reg(tbh_phy, PHY_CFG_0, OTAP_DLY_SEL_MASK,
+ OTAP_DLY_SEL_SHIFT, 4);
+ update_reg(tbh_phy, PHY_CFG_0, DLL_TRIM_ICP_MASK,
+ DLL_TRIM_ICP_SHIFT, 0x8);
+ update_reg(tbh_phy, PHY_CFG_0,
+ DR_TY_MASK, DR_TY_SHIFT, 0x1);
+ break;
+
+ case 400000:
+ update_reg(tbh_phy, PHY_CFG_0, SEL_DLY_TXCLK_MASK,
+ SEL_DLY_TXCLK_SHIFT, 0x1);
+ update_reg(tbh_phy, PHY_CFG_0, SEL_DLY_RXCLK_MASK,
+ SEL_DLY_RXCLK_SHIFT, 0x1);
+ update_reg(tbh_phy, PHY_CFG_0, ITAP_DLY_ENA_MASK,
+ ITAP_DLY_ENA_SHIFT, 0x0);
+ update_reg(tbh_phy, PHY_CFG_0, ITAP_DLY_SEL_MASK,
+ ITAP_DLY_SEL_SHIFT, 0x0);
+ update_reg(tbh_phy, PHY_CFG_0, OTAP_DLY_ENA_MASK,
+ OTAP_DLY_ENA_SHIFT, 0x0);
+ update_reg(tbh_phy, PHY_CFG_0, OTAP_DLY_SEL_MASK,
+ OTAP_DLY_SEL_SHIFT, 0);
+ update_reg(tbh_phy, PHY_CFG_0, DLL_TRIM_ICP_MASK,
+ DLL_TRIM_ICP_SHIFT, 0);
+ update_reg(tbh_phy, PHY_CFG_0, DR_TY_MASK, DR_TY_SHIFT, 0x1);
+ break;
+
+ default:
+ update_reg(tbh_phy, PHY_CFG_0, SEL_DLY_TXCLK_MASK,
+ SEL_DLY_TXCLK_SHIFT, 0x1);
+ update_reg(tbh_phy, PHY_CFG_0, SEL_DLY_RXCLK_MASK,
+ SEL_DLY_RXCLK_SHIFT, 0x1);
+ update_reg(tbh_phy, PHY_CFG_0, ITAP_DLY_ENA_MASK,
+ ITAP_DLY_ENA_SHIFT, 0x0);
+ update_reg(tbh_phy, PHY_CFG_0, ITAP_DLY_SEL_MASK,
+ ITAP_DLY_SEL_SHIFT, 0x0);
+ update_reg(tbh_phy, PHY_CFG_0, OTAP_DLY_ENA_MASK,
+ OTAP_DLY_ENA_SHIFT, 0x1);
+ update_reg(tbh_phy, PHY_CFG_0, OTAP_DLY_SEL_MASK,
+ OTAP_DLY_SEL_SHIFT, 2);
+ update_reg(tbh_phy, PHY_CFG_0, DLL_TRIM_ICP_MASK,
+ DLL_TRIM_ICP_SHIFT, 0x8);
+ update_reg(tbh_phy, PHY_CFG_0, DR_TY_MASK,
+ DR_TY_SHIFT, 0x1);
+ break;
+ }
+ /* Reset, init seq called without phy_power_off, this indicates init seq */
+ tbh_phy->phy_power_sts = PHY_UNINITIALIZED;
+ }
+
+ update_reg(tbh_phy, PHY_CFG_0, RETRIM_EN_MASK, RETRIM_EN_SHIFT, 0x1);
+ update_reg(tbh_phy, PHY_CFG_0, RETRIM_MASK, RETRIM_SHIFT, 0x0);
+
+ return thunderbay_emmc_phy_power(phy, 1);
+}
+
+static int thunderbay_emmc_phy_power_off(struct phy *phy)
+{
+ struct thunderbay_emmc_phy *tbh_phy = phy_get_drvdata(phy);
+
+ tbh_phy->phy_power_sts = PHY_INITIALIZED;
+
+ return thunderbay_emmc_phy_power(phy, 0);
+}
+
+static const struct phy_ops thunderbay_emmc_phy_ops = {
+ .init = thunderbay_emmc_phy_init,
+ .exit = thunderbay_emmc_phy_exit,
+ .power_on = thunderbay_emmc_phy_power_on,
+ .power_off = thunderbay_emmc_phy_power_off,
+ .owner = THIS_MODULE,
+};
+
+static const struct of_device_id thunderbay_emmc_phy_of_match[] = {
+ { .compatible = "intel,thunderbay-emmc-phy",
+ (void *)&thunderbay_emmc_phy_ops },
+ {}
+};
+MODULE_DEVICE_TABLE(of, thunderbay_emmc_phy_of_match);
+
+static int thunderbay_emmc_phy_probe(struct platform_device *pdev)
+{
+ struct thunderbay_emmc_phy *tbh_phy;
+ struct phy_provider *phy_provider;
+ struct device *dev = &pdev->dev;
+ const struct of_device_id *id;
+ struct phy *generic_phy;
+ struct resource *res;
+
+ if (!dev->of_node)
+ return -ENODEV;
+
+ tbh_phy = devm_kzalloc(dev, sizeof(*tbh_phy), GFP_KERNEL);
+ if (!tbh_phy)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ tbh_phy->reg_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(tbh_phy->reg_base))
+ return PTR_ERR(tbh_phy->reg_base);
+
+ tbh_phy->phy_power_sts = PHY_UNINITIALIZED;
+ id = of_match_node(thunderbay_emmc_phy_of_match, pdev->dev.of_node);
+ if (!id) {
+ dev_err(dev, "failed to get match_node\n");
+ return -EINVAL;
+ }
+
+ generic_phy = devm_phy_create(dev, dev->of_node, id->data);
+ if (IS_ERR(generic_phy)) {
+ dev_err(dev, "failed to create PHY\n");
+ return PTR_ERR(generic_phy);
+ }
+
+ phy_set_drvdata(generic_phy, tbh_phy);
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+
+ return PTR_ERR_OR_ZERO(phy_provider);
+}
+
+static struct platform_driver thunderbay_emmc_phy_driver = {
+ .probe = thunderbay_emmc_phy_probe,
+ .driver = {
+ .name = "thunderbay-emmc-phy",
+ .of_match_table = thunderbay_emmc_phy_of_match,
+ },
+};
+module_platform_driver(thunderbay_emmc_phy_driver);
+
+MODULE_AUTHOR("Nandhini S <nandhini.srikandan@intel.com>");
+MODULE_AUTHOR("Rashmi A <rashmi.a@intel.com>");
+MODULE_DESCRIPTION("Intel Thunder Bay eMMC PHY driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/mediatek/phy-mtk-io.h b/drivers/phy/mediatek/phy-mtk-io.h
new file mode 100644
index 000000000000..500fcdab165d
--- /dev/null
+++ b/drivers/phy/mediatek/phy-mtk-io.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 MediaTek Inc.
+ *
+ * Author: Chunfeng Yun <chunfeng.yun@mediatek.com>
+ */
+
+#ifndef __PHY_MTK_H__
+#define __PHY_MTK_H__
+
+#include <linux/io.h>
+
+static inline void mtk_phy_clear_bits(void __iomem *reg, u32 bits)
+{
+ u32 tmp = readl(reg);
+
+ tmp &= ~bits;
+ writel(tmp, reg);
+}
+
+static inline void mtk_phy_set_bits(void __iomem *reg, u32 bits)
+{
+ u32 tmp = readl(reg);
+
+ tmp |= bits;
+ writel(tmp, reg);
+}
+
+static inline void mtk_phy_update_bits(void __iomem *reg, u32 mask, u32 val)
+{
+ u32 tmp = readl(reg);
+
+ tmp &= ~mask;
+ tmp |= val & mask;
+ writel(tmp, reg);
+}
+
+#endif
diff --git a/drivers/phy/mediatek/phy-mtk-mipi-dsi.c b/drivers/phy/mediatek/phy-mtk-mipi-dsi.c
index 28ad9403c441..67b005d5b9e3 100644
--- a/drivers/phy/mediatek/phy-mtk-mipi-dsi.c
+++ b/drivers/phy/mediatek/phy-mtk-mipi-dsi.c
@@ -146,6 +146,8 @@ static int mtk_mipi_tx_probe(struct platform_device *pdev)
return -ENOMEM;
mipi_tx->driver_data = of_device_get_match_data(dev);
+ if (!mipi_tx->driver_data)
+ return -ENODEV;
mipi_tx->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mipi_tx->regs))
diff --git a/drivers/phy/mediatek/phy-mtk-tphy.c b/drivers/phy/mediatek/phy-mtk-tphy.c
index cdcef865fe9e..8ee7682b8e93 100644
--- a/drivers/phy/mediatek/phy-mtk-tphy.c
+++ b/drivers/phy/mediatek/phy-mtk-tphy.c
@@ -8,16 +8,18 @@
#include <dt-bindings/phy/phy.h>
#include <linux/clk.h>
#include <linux/delay.h>
-#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
+#include <linux/nvmem-consumer.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
+#include "phy-mtk-io.h"
+
/* version V1 sub-banks offset base address */
/* banks shared by multiple phys */
#define SSUSB_SIFSLV_V1_SPLLC 0x000 /* shared by u3 phys */
@@ -41,6 +43,9 @@
#define SSUSB_SIFSLV_V2_U3PHYD 0x200
#define SSUSB_SIFSLV_V2_U3PHYA 0x400
+#define U3P_MISC_REG1 0x04
+#define MR1_EFUSE_AUTO_LOAD_DIS BIT(6)
+
#define U3P_USBPHYACR0 0x000
#define PA0_RG_U2PLL_FORCE_ON BIT(15)
#define PA0_USB20_PLL_PREDIV GENMASK(7, 6)
@@ -133,6 +138,8 @@
#define P3C_RG_SWRST_U3_PHYD_FORCE_EN BIT(24)
#define U3P_U3_PHYA_REG0 0x000
+#define P3A_RG_IEXT_INTR GENMASK(15, 10)
+#define P3A_RG_IEXT_INTR_VAL(x) ((0x3f & (x)) << 10)
#define P3A_RG_CLKDRV_OFF GENMASK(3, 2)
#define P3A_RG_CLKDRV_OFF_VAL(x) ((0x3 & (x)) << 2)
@@ -187,6 +194,19 @@
#define P3D_RG_FWAKE_TH GENMASK(21, 16)
#define P3D_RG_FWAKE_TH_VAL(x) ((0x3f & (x)) << 16)
+#define U3P_U3_PHYD_IMPCAL0 0x010
+#define P3D_RG_FORCE_TX_IMPEL BIT(31)
+#define P3D_RG_TX_IMPEL GENMASK(28, 24)
+#define P3D_RG_TX_IMPEL_VAL(x) ((0x1f & (x)) << 24)
+
+#define U3P_U3_PHYD_IMPCAL1 0x014
+#define P3D_RG_FORCE_RX_IMPEL BIT(31)
+#define P3D_RG_RX_IMPEL GENMASK(28, 24)
+#define P3D_RG_RX_IMPEL_VAL(x) ((0x1f & (x)) << 24)
+
+#define U3P_U3_PHYD_RSV 0x054
+#define P3D_RG_EFUSE_AUTO_LOAD_DIS BIT(12)
+
#define U3P_U3_PHYD_CDR1 0x05c
#define P3D_RG_CDR_BIR_LTD1 GENMASK(28, 24)
#define P3D_RG_CDR_BIR_LTD1_VAL(x) ((0x1f & (x)) << 24)
@@ -307,6 +327,11 @@ struct mtk_phy_pdata {
* 48M PLL, fix it by switching PLL to 26M from default 48M
*/
bool sw_pll_48m_to_26m;
+ /*
+ * Some SoCs (e.g. mt8195) drop a bit when use auto load efuse,
+ * support sw way, also support it for v2/v3 optionally.
+ */
+ bool sw_efuse_supported;
enum mtk_phy_version version;
};
@@ -336,6 +361,10 @@ struct mtk_phy_instance {
struct regmap *type_sw;
u32 type_sw_reg;
u32 type_sw_index;
+ u32 efuse_sw_en;
+ u32 efuse_intr;
+ u32 efuse_tx_imp;
+ u32 efuse_rx_imp;
int eye_src;
int eye_vrt;
int eye_term;
@@ -373,15 +402,11 @@ static void hs_slew_rate_calibrate(struct mtk_tphy *tphy,
return;
/* enable USB ring oscillator */
- tmp = readl(com + U3P_USBPHYACR5);
- tmp |= PA5_RG_U2_HSTX_SRCAL_EN;
- writel(tmp, com + U3P_USBPHYACR5);
+ mtk_phy_set_bits(com + U3P_USBPHYACR5, PA5_RG_U2_HSTX_SRCAL_EN);
udelay(1);
/*enable free run clock */
- tmp = readl(fmreg + U3P_U2FREQ_FMMONR1);
- tmp |= P2F_RG_FRCK_EN;
- writel(tmp, fmreg + U3P_U2FREQ_FMMONR1);
+ mtk_phy_set_bits(fmreg + U3P_U2FREQ_FMMONR1, P2F_RG_FRCK_EN);
/* set cycle count as 1024, and select u2 channel */
tmp = readl(fmreg + U3P_U2FREQ_FMCR0);
@@ -393,9 +418,7 @@ static void hs_slew_rate_calibrate(struct mtk_tphy *tphy,
writel(tmp, fmreg + U3P_U2FREQ_FMCR0);
/* enable frequency meter */
- tmp = readl(fmreg + U3P_U2FREQ_FMCR0);
- tmp |= P2F_RG_FREQDET_EN;
- writel(tmp, fmreg + U3P_U2FREQ_FMCR0);
+ mtk_phy_set_bits(fmreg + U3P_U2FREQ_FMCR0, P2F_RG_FREQDET_EN);
/* ignore return value */
readl_poll_timeout(fmreg + U3P_U2FREQ_FMMONR1, tmp,
@@ -404,14 +427,10 @@ static void hs_slew_rate_calibrate(struct mtk_tphy *tphy,
fm_out = readl(fmreg + U3P_U2FREQ_VALUE);
/* disable frequency meter */
- tmp = readl(fmreg + U3P_U2FREQ_FMCR0);
- tmp &= ~P2F_RG_FREQDET_EN;
- writel(tmp, fmreg + U3P_U2FREQ_FMCR0);
+ mtk_phy_clear_bits(fmreg + U3P_U2FREQ_FMCR0, P2F_RG_FREQDET_EN);
/*disable free run clock */
- tmp = readl(fmreg + U3P_U2FREQ_FMMONR1);
- tmp &= ~P2F_RG_FRCK_EN;
- writel(tmp, fmreg + U3P_U2FREQ_FMMONR1);
+ mtk_phy_clear_bits(fmreg + U3P_U2FREQ_FMMONR1, P2F_RG_FRCK_EN);
if (fm_out) {
/* ( 1024 / FM_OUT ) x reference clock frequency x coef */
@@ -427,63 +446,44 @@ static void hs_slew_rate_calibrate(struct mtk_tphy *tphy,
tphy->src_ref_clk, tphy->src_coef);
/* set HS slew rate */
- tmp = readl(com + U3P_USBPHYACR5);
- tmp &= ~PA5_RG_U2_HSTX_SRCTRL;
- tmp |= PA5_RG_U2_HSTX_SRCTRL_VAL(calibration_val);
- writel(tmp, com + U3P_USBPHYACR5);
+ mtk_phy_update_bits(com + U3P_USBPHYACR5, PA5_RG_U2_HSTX_SRCTRL,
+ PA5_RG_U2_HSTX_SRCTRL_VAL(calibration_val));
/* disable USB ring oscillator */
- tmp = readl(com + U3P_USBPHYACR5);
- tmp &= ~PA5_RG_U2_HSTX_SRCAL_EN;
- writel(tmp, com + U3P_USBPHYACR5);
+ mtk_phy_clear_bits(com + U3P_USBPHYACR5, PA5_RG_U2_HSTX_SRCAL_EN);
}
static void u3_phy_instance_init(struct mtk_tphy *tphy,
struct mtk_phy_instance *instance)
{
struct u3phy_banks *u3_banks = &instance->u3_banks;
- u32 tmp;
/* gating PCIe Analog XTAL clock */
- tmp = readl(u3_banks->spllc + U3P_SPLLC_XTALCTL3);
- tmp |= XC3_RG_U3_XTAL_RX_PWD | XC3_RG_U3_FRC_XTAL_RX_PWD;
- writel(tmp, u3_banks->spllc + U3P_SPLLC_XTALCTL3);
+ mtk_phy_set_bits(u3_banks->spllc + U3P_SPLLC_XTALCTL3,
+ XC3_RG_U3_XTAL_RX_PWD | XC3_RG_U3_FRC_XTAL_RX_PWD);
/* gating XSQ */
- tmp = readl(u3_banks->phya + U3P_U3_PHYA_DA_REG0);
- tmp &= ~P3A_RG_XTAL_EXT_EN_U3;
- tmp |= P3A_RG_XTAL_EXT_EN_U3_VAL(2);
- writel(tmp, u3_banks->phya + U3P_U3_PHYA_DA_REG0);
-
- tmp = readl(u3_banks->phya + U3P_U3_PHYA_REG9);
- tmp &= ~P3A_RG_RX_DAC_MUX;
- tmp |= P3A_RG_RX_DAC_MUX_VAL(4);
- writel(tmp, u3_banks->phya + U3P_U3_PHYA_REG9);
-
- tmp = readl(u3_banks->phya + U3P_U3_PHYA_REG6);
- tmp &= ~P3A_RG_TX_EIDLE_CM;
- tmp |= P3A_RG_TX_EIDLE_CM_VAL(0xe);
- writel(tmp, u3_banks->phya + U3P_U3_PHYA_REG6);
-
- tmp = readl(u3_banks->phyd + U3P_U3_PHYD_CDR1);
- tmp &= ~(P3D_RG_CDR_BIR_LTD0 | P3D_RG_CDR_BIR_LTD1);
- tmp |= P3D_RG_CDR_BIR_LTD0_VAL(0xc) | P3D_RG_CDR_BIR_LTD1_VAL(0x3);
- writel(tmp, u3_banks->phyd + U3P_U3_PHYD_CDR1);
-
- tmp = readl(u3_banks->phyd + U3P_U3_PHYD_LFPS1);
- tmp &= ~P3D_RG_FWAKE_TH;
- tmp |= P3D_RG_FWAKE_TH_VAL(0x34);
- writel(tmp, u3_banks->phyd + U3P_U3_PHYD_LFPS1);
-
- tmp = readl(u3_banks->phyd + U3P_U3_PHYD_RXDET1);
- tmp &= ~P3D_RG_RXDET_STB2_SET;
- tmp |= P3D_RG_RXDET_STB2_SET_VAL(0x10);
- writel(tmp, u3_banks->phyd + U3P_U3_PHYD_RXDET1);
-
- tmp = readl(u3_banks->phyd + U3P_U3_PHYD_RXDET2);
- tmp &= ~P3D_RG_RXDET_STB2_SET_P3;
- tmp |= P3D_RG_RXDET_STB2_SET_P3_VAL(0x10);
- writel(tmp, u3_banks->phyd + U3P_U3_PHYD_RXDET2);
+ mtk_phy_update_bits(u3_banks->phya + U3P_U3_PHYA_DA_REG0,
+ P3A_RG_XTAL_EXT_EN_U3, P3A_RG_XTAL_EXT_EN_U3_VAL(2));
+
+ mtk_phy_update_bits(u3_banks->phya + U3P_U3_PHYA_REG9,
+ P3A_RG_RX_DAC_MUX, P3A_RG_RX_DAC_MUX_VAL(4));
+
+ mtk_phy_update_bits(u3_banks->phya + U3P_U3_PHYA_REG6,
+ P3A_RG_TX_EIDLE_CM, P3A_RG_TX_EIDLE_CM_VAL(0xe));
+
+ mtk_phy_update_bits(u3_banks->phyd + U3P_U3_PHYD_CDR1,
+ P3D_RG_CDR_BIR_LTD0 | P3D_RG_CDR_BIR_LTD1,
+ P3D_RG_CDR_BIR_LTD0_VAL(0xc) | P3D_RG_CDR_BIR_LTD1_VAL(0x3));
+
+ mtk_phy_update_bits(u3_banks->phyd + U3P_U3_PHYD_LFPS1,
+ P3D_RG_FWAKE_TH, P3D_RG_FWAKE_TH_VAL(0x34));
+
+ mtk_phy_update_bits(u3_banks->phyd + U3P_U3_PHYD_RXDET1,
+ P3D_RG_RXDET_STB2_SET, P3D_RG_RXDET_STB2_SET_VAL(0x10));
+
+ mtk_phy_update_bits(u3_banks->phyd + U3P_U3_PHYD_RXDET2,
+ P3D_RG_RXDET_STB2_SET_P3, P3D_RG_RXDET_STB2_SET_P3_VAL(0x10));
dev_dbg(tphy->dev, "%s(%d)\n", __func__, instance->index);
}
@@ -493,26 +493,20 @@ static void u2_phy_pll_26m_set(struct mtk_tphy *tphy,
{
struct u2phy_banks *u2_banks = &instance->u2_banks;
void __iomem *com = u2_banks->com;
- u32 tmp;
if (!tphy->pdata->sw_pll_48m_to_26m)
return;
- tmp = readl(com + U3P_USBPHYACR0);
- tmp &= ~PA0_USB20_PLL_PREDIV;
- tmp |= PA0_USB20_PLL_PREDIV_VAL(0);
- writel(tmp, com + U3P_USBPHYACR0);
+ mtk_phy_update_bits(com + U3P_USBPHYACR0, PA0_USB20_PLL_PREDIV,
+ PA0_USB20_PLL_PREDIV_VAL(0));
- tmp = readl(com + U3P_USBPHYACR2);
- tmp &= ~PA2_RG_U2PLL_BW;
- tmp |= PA2_RG_U2PLL_BW_VAL(3);
- writel(tmp, com + U3P_USBPHYACR2);
+ mtk_phy_update_bits(com + U3P_USBPHYACR2, PA2_RG_U2PLL_BW,
+ PA2_RG_U2PLL_BW_VAL(3));
writel(P2R_RG_U2PLL_FBDIV_26M, com + U3P_U2PHYA_RESV);
- tmp = readl(com + U3P_U2PHYA_RESV1);
- tmp |= P2R_RG_U2PLL_FRA_EN | P2R_RG_U2PLL_REFCLK_SEL;
- writel(tmp, com + U3P_U2PHYA_RESV1);
+ mtk_phy_set_bits(com + U3P_U2PHYA_RESV1,
+ P2R_RG_U2PLL_FRA_EN | P2R_RG_U2PLL_REFCLK_SEL);
}
static void u2_phy_instance_init(struct mtk_tphy *tphy,
@@ -521,58 +515,40 @@ static void u2_phy_instance_init(struct mtk_tphy *tphy,
struct u2phy_banks *u2_banks = &instance->u2_banks;
void __iomem *com = u2_banks->com;
u32 index = instance->index;
- u32 tmp;
/* switch to USB function, and enable usb pll */
- tmp = readl(com + U3P_U2PHYDTM0);
- tmp &= ~(P2C_FORCE_UART_EN | P2C_FORCE_SUSPENDM);
- tmp |= P2C_RG_XCVRSEL_VAL(1) | P2C_RG_DATAIN_VAL(0);
- writel(tmp, com + U3P_U2PHYDTM0);
+ mtk_phy_clear_bits(com + U3P_U2PHYDTM0, P2C_FORCE_UART_EN | P2C_FORCE_SUSPENDM);
+
+ mtk_phy_update_bits(com + U3P_U2PHYDTM0, P2C_RG_XCVRSEL | P2C_RG_DATAIN,
+ P2C_RG_XCVRSEL_VAL(1) | P2C_RG_DATAIN_VAL(0));
- tmp = readl(com + U3P_U2PHYDTM1);
- tmp &= ~P2C_RG_UART_EN;
- writel(tmp, com + U3P_U2PHYDTM1);
+ mtk_phy_clear_bits(com + U3P_U2PHYDTM1, P2C_RG_UART_EN);
- tmp = readl(com + U3P_USBPHYACR0);
- tmp |= PA0_RG_USB20_INTR_EN;
- writel(tmp, com + U3P_USBPHYACR0);
+ mtk_phy_set_bits(com + U3P_USBPHYACR0, PA0_RG_USB20_INTR_EN);
/* disable switch 100uA current to SSUSB */
- tmp = readl(com + U3P_USBPHYACR5);
- tmp &= ~PA5_RG_U2_HS_100U_U3_EN;
- writel(tmp, com + U3P_USBPHYACR5);
-
- if (!index) {
- tmp = readl(com + U3P_U2PHYACR4);
- tmp &= ~P2C_U2_GPIO_CTR_MSK;
- writel(tmp, com + U3P_U2PHYACR4);
- }
+ mtk_phy_clear_bits(com + U3P_USBPHYACR5, PA5_RG_U2_HS_100U_U3_EN);
+
+ if (!index)
+ mtk_phy_clear_bits(com + U3P_U2PHYACR4, P2C_U2_GPIO_CTR_MSK);
if (tphy->pdata->avoid_rx_sen_degradation) {
if (!index) {
- tmp = readl(com + U3P_USBPHYACR2);
- tmp |= PA2_RG_SIF_U2PLL_FORCE_EN;
- writel(tmp, com + U3P_USBPHYACR2);
+ mtk_phy_set_bits(com + U3P_USBPHYACR2, PA2_RG_SIF_U2PLL_FORCE_EN);
- tmp = readl(com + U3D_U2PHYDCR0);
- tmp &= ~P2C_RG_SIF_U2PLL_FORCE_ON;
- writel(tmp, com + U3D_U2PHYDCR0);
+ mtk_phy_clear_bits(com + U3D_U2PHYDCR0, P2C_RG_SIF_U2PLL_FORCE_ON);
} else {
- tmp = readl(com + U3D_U2PHYDCR0);
- tmp |= P2C_RG_SIF_U2PLL_FORCE_ON;
- writel(tmp, com + U3D_U2PHYDCR0);
+ mtk_phy_set_bits(com + U3D_U2PHYDCR0, P2C_RG_SIF_U2PLL_FORCE_ON);
- tmp = readl(com + U3P_U2PHYDTM0);
- tmp |= P2C_RG_SUSPENDM | P2C_FORCE_SUSPENDM;
- writel(tmp, com + U3P_U2PHYDTM0);
+ mtk_phy_set_bits(com + U3P_U2PHYDTM0,
+ P2C_RG_SUSPENDM | P2C_FORCE_SUSPENDM);
}
}
- tmp = readl(com + U3P_USBPHYACR6);
- tmp &= ~PA6_RG_U2_BC11_SW_EN; /* DP/DM BC1.1 path Disable */
- tmp &= ~PA6_RG_U2_SQTH;
- tmp |= PA6_RG_U2_SQTH_VAL(2);
- writel(tmp, com + U3P_USBPHYACR6);
+ /* DP/DM BC1.1 path Disable */
+ mtk_phy_clear_bits(com + U3P_USBPHYACR6, PA6_RG_U2_BC11_SW_EN);
+
+ mtk_phy_update_bits(com + U3P_USBPHYACR6, PA6_RG_U2_SQTH, PA6_RG_U2_SQTH_VAL(2));
/* Workaround only for mt8195, HW fix it for others (V3) */
u2_phy_pll_26m_set(tphy, instance);
@@ -586,30 +562,21 @@ static void u2_phy_instance_power_on(struct mtk_tphy *tphy,
struct u2phy_banks *u2_banks = &instance->u2_banks;
void __iomem *com = u2_banks->com;
u32 index = instance->index;
- u32 tmp;
- tmp = readl(com + U3P_U2PHYDTM0);
- tmp &= ~(P2C_RG_XCVRSEL | P2C_RG_DATAIN | P2C_DTM0_PART_MASK);
- writel(tmp, com + U3P_U2PHYDTM0);
+ mtk_phy_clear_bits(com + U3P_U2PHYDTM0,
+ P2C_RG_XCVRSEL | P2C_RG_DATAIN | P2C_DTM0_PART_MASK);
/* OTG Enable */
- tmp = readl(com + U3P_USBPHYACR6);
- tmp |= PA6_RG_U2_OTG_VBUSCMP_EN;
- writel(tmp, com + U3P_USBPHYACR6);
+ mtk_phy_set_bits(com + U3P_USBPHYACR6, PA6_RG_U2_OTG_VBUSCMP_EN);
+
+ mtk_phy_set_bits(com + U3P_U2PHYDTM1, P2C_RG_VBUSVALID | P2C_RG_AVALID);
- tmp = readl(com + U3P_U2PHYDTM1);
- tmp |= P2C_RG_VBUSVALID | P2C_RG_AVALID;
- tmp &= ~P2C_RG_SESSEND;
- writel(tmp, com + U3P_U2PHYDTM1);
+ mtk_phy_clear_bits(com + U3P_U2PHYDTM1, P2C_RG_SESSEND);
if (tphy->pdata->avoid_rx_sen_degradation && index) {
- tmp = readl(com + U3D_U2PHYDCR0);
- tmp |= P2C_RG_SIF_U2PLL_FORCE_ON;
- writel(tmp, com + U3D_U2PHYDCR0);
+ mtk_phy_set_bits(com + U3D_U2PHYDCR0, P2C_RG_SIF_U2PLL_FORCE_ON);
- tmp = readl(com + U3P_U2PHYDTM0);
- tmp |= P2C_RG_SUSPENDM | P2C_FORCE_SUSPENDM;
- writel(tmp, com + U3P_U2PHYDTM0);
+ mtk_phy_set_bits(com + U3P_U2PHYDTM0, P2C_RG_SUSPENDM | P2C_FORCE_SUSPENDM);
}
dev_dbg(tphy->dev, "%s(%d)\n", __func__, index);
}
@@ -620,30 +587,20 @@ static void u2_phy_instance_power_off(struct mtk_tphy *tphy,
struct u2phy_banks *u2_banks = &instance->u2_banks;
void __iomem *com = u2_banks->com;
u32 index = instance->index;
- u32 tmp;
- tmp = readl(com + U3P_U2PHYDTM0);
- tmp &= ~(P2C_RG_XCVRSEL | P2C_RG_DATAIN);
- writel(tmp, com + U3P_U2PHYDTM0);
+ mtk_phy_clear_bits(com + U3P_U2PHYDTM0, P2C_RG_XCVRSEL | P2C_RG_DATAIN);
/* OTG Disable */
- tmp = readl(com + U3P_USBPHYACR6);
- tmp &= ~PA6_RG_U2_OTG_VBUSCMP_EN;
- writel(tmp, com + U3P_USBPHYACR6);
+ mtk_phy_clear_bits(com + U3P_USBPHYACR6, PA6_RG_U2_OTG_VBUSCMP_EN);
+
+ mtk_phy_clear_bits(com + U3P_U2PHYDTM1, P2C_RG_VBUSVALID | P2C_RG_AVALID);
- tmp = readl(com + U3P_U2PHYDTM1);
- tmp &= ~(P2C_RG_VBUSVALID | P2C_RG_AVALID);
- tmp |= P2C_RG_SESSEND;
- writel(tmp, com + U3P_U2PHYDTM1);
+ mtk_phy_set_bits(com + U3P_U2PHYDTM1, P2C_RG_SESSEND);
if (tphy->pdata->avoid_rx_sen_degradation && index) {
- tmp = readl(com + U3P_U2PHYDTM0);
- tmp &= ~(P2C_RG_SUSPENDM | P2C_FORCE_SUSPENDM);
- writel(tmp, com + U3P_U2PHYDTM0);
+ mtk_phy_clear_bits(com + U3P_U2PHYDTM0, P2C_RG_SUSPENDM | P2C_FORCE_SUSPENDM);
- tmp = readl(com + U3D_U2PHYDCR0);
- tmp &= ~P2C_RG_SIF_U2PLL_FORCE_ON;
- writel(tmp, com + U3D_U2PHYDCR0);
+ mtk_phy_clear_bits(com + U3D_U2PHYDCR0, P2C_RG_SIF_U2PLL_FORCE_ON);
}
dev_dbg(tphy->dev, "%s(%d)\n", __func__, index);
@@ -655,16 +612,11 @@ static void u2_phy_instance_exit(struct mtk_tphy *tphy,
struct u2phy_banks *u2_banks = &instance->u2_banks;
void __iomem *com = u2_banks->com;
u32 index = instance->index;
- u32 tmp;
if (tphy->pdata->avoid_rx_sen_degradation && index) {
- tmp = readl(com + U3D_U2PHYDCR0);
- tmp &= ~P2C_RG_SIF_U2PLL_FORCE_ON;
- writel(tmp, com + U3D_U2PHYDCR0);
+ mtk_phy_clear_bits(com + U3D_U2PHYDCR0, P2C_RG_SIF_U2PLL_FORCE_ON);
- tmp = readl(com + U3P_U2PHYDTM0);
- tmp &= ~P2C_FORCE_SUSPENDM;
- writel(tmp, com + U3P_U2PHYDTM0);
+ mtk_phy_clear_bits(com + U3P_U2PHYDTM0, P2C_FORCE_SUSPENDM);
}
}
@@ -697,69 +649,50 @@ static void pcie_phy_instance_init(struct mtk_tphy *tphy,
struct mtk_phy_instance *instance)
{
struct u3phy_banks *u3_banks = &instance->u3_banks;
- u32 tmp;
+ void __iomem *phya = u3_banks->phya;
if (tphy->pdata->version != MTK_PHY_V1)
return;
- tmp = readl(u3_banks->phya + U3P_U3_PHYA_DA_REG0);
- tmp &= ~(P3A_RG_XTAL_EXT_PE1H | P3A_RG_XTAL_EXT_PE2H);
- tmp |= P3A_RG_XTAL_EXT_PE1H_VAL(0x2) | P3A_RG_XTAL_EXT_PE2H_VAL(0x2);
- writel(tmp, u3_banks->phya + U3P_U3_PHYA_DA_REG0);
+ mtk_phy_update_bits(phya + U3P_U3_PHYA_DA_REG0,
+ P3A_RG_XTAL_EXT_PE1H | P3A_RG_XTAL_EXT_PE2H,
+ P3A_RG_XTAL_EXT_PE1H_VAL(0x2) | P3A_RG_XTAL_EXT_PE2H_VAL(0x2));
/* ref clk drive */
- tmp = readl(u3_banks->phya + U3P_U3_PHYA_REG1);
- tmp &= ~P3A_RG_CLKDRV_AMP;
- tmp |= P3A_RG_CLKDRV_AMP_VAL(0x4);
- writel(tmp, u3_banks->phya + U3P_U3_PHYA_REG1);
+ mtk_phy_update_bits(phya + U3P_U3_PHYA_REG1, P3A_RG_CLKDRV_AMP,
+ P3A_RG_CLKDRV_AMP_VAL(0x4));
- tmp = readl(u3_banks->phya + U3P_U3_PHYA_REG0);
- tmp &= ~P3A_RG_CLKDRV_OFF;
- tmp |= P3A_RG_CLKDRV_OFF_VAL(0x1);
- writel(tmp, u3_banks->phya + U3P_U3_PHYA_REG0);
+ mtk_phy_update_bits(phya + U3P_U3_PHYA_REG0, P3A_RG_CLKDRV_OFF,
+ P3A_RG_CLKDRV_OFF_VAL(0x1));
/* SSC delta -5000ppm */
- tmp = readl(u3_banks->phya + U3P_U3_PHYA_DA_REG20);
- tmp &= ~P3A_RG_PLL_DELTA1_PE2H;
- tmp |= P3A_RG_PLL_DELTA1_PE2H_VAL(0x3c);
- writel(tmp, u3_banks->phya + U3P_U3_PHYA_DA_REG20);
+ mtk_phy_update_bits(phya + U3P_U3_PHYA_DA_REG20, P3A_RG_PLL_DELTA1_PE2H,
+ P3A_RG_PLL_DELTA1_PE2H_VAL(0x3c));
- tmp = readl(u3_banks->phya + U3P_U3_PHYA_DA_REG25);
- tmp &= ~P3A_RG_PLL_DELTA_PE2H;
- tmp |= P3A_RG_PLL_DELTA_PE2H_VAL(0x36);
- writel(tmp, u3_banks->phya + U3P_U3_PHYA_DA_REG25);
+ mtk_phy_update_bits(phya + U3P_U3_PHYA_DA_REG25, P3A_RG_PLL_DELTA_PE2H,
+ P3A_RG_PLL_DELTA_PE2H_VAL(0x36));
/* change pll BW 0.6M */
- tmp = readl(u3_banks->phya + U3P_U3_PHYA_DA_REG5);
- tmp &= ~(P3A_RG_PLL_BR_PE2H | P3A_RG_PLL_IC_PE2H);
- tmp |= P3A_RG_PLL_BR_PE2H_VAL(0x1) | P3A_RG_PLL_IC_PE2H_VAL(0x1);
- writel(tmp, u3_banks->phya + U3P_U3_PHYA_DA_REG5);
-
- tmp = readl(u3_banks->phya + U3P_U3_PHYA_DA_REG4);
- tmp &= ~(P3A_RG_PLL_DIVEN_PE2H | P3A_RG_PLL_BC_PE2H);
- tmp |= P3A_RG_PLL_BC_PE2H_VAL(0x3);
- writel(tmp, u3_banks->phya + U3P_U3_PHYA_DA_REG4);
-
- tmp = readl(u3_banks->phya + U3P_U3_PHYA_DA_REG6);
- tmp &= ~P3A_RG_PLL_IR_PE2H;
- tmp |= P3A_RG_PLL_IR_PE2H_VAL(0x2);
- writel(tmp, u3_banks->phya + U3P_U3_PHYA_DA_REG6);
-
- tmp = readl(u3_banks->phya + U3P_U3_PHYA_DA_REG7);
- tmp &= ~P3A_RG_PLL_BP_PE2H;
- tmp |= P3A_RG_PLL_BP_PE2H_VAL(0xa);
- writel(tmp, u3_banks->phya + U3P_U3_PHYA_DA_REG7);
+ mtk_phy_update_bits(phya + U3P_U3_PHYA_DA_REG5,
+ P3A_RG_PLL_BR_PE2H | P3A_RG_PLL_IC_PE2H,
+ P3A_RG_PLL_BR_PE2H_VAL(0x1) | P3A_RG_PLL_IC_PE2H_VAL(0x1));
+
+ mtk_phy_update_bits(phya + U3P_U3_PHYA_DA_REG4,
+ P3A_RG_PLL_DIVEN_PE2H | P3A_RG_PLL_BC_PE2H,
+ P3A_RG_PLL_BC_PE2H_VAL(0x3));
+
+ mtk_phy_update_bits(phya + U3P_U3_PHYA_DA_REG6, P3A_RG_PLL_IR_PE2H,
+ P3A_RG_PLL_IR_PE2H_VAL(0x2));
+
+ mtk_phy_update_bits(phya + U3P_U3_PHYA_DA_REG7, P3A_RG_PLL_BP_PE2H,
+ P3A_RG_PLL_BP_PE2H_VAL(0xa));
/* Tx Detect Rx Timing: 10us -> 5us */
- tmp = readl(u3_banks->phyd + U3P_U3_PHYD_RXDET1);
- tmp &= ~P3D_RG_RXDET_STB2_SET;
- tmp |= P3D_RG_RXDET_STB2_SET_VAL(0x10);
- writel(tmp, u3_banks->phyd + U3P_U3_PHYD_RXDET1);
+ mtk_phy_update_bits(u3_banks->phyd + U3P_U3_PHYD_RXDET1,
+ P3D_RG_RXDET_STB2_SET, P3D_RG_RXDET_STB2_SET_VAL(0x10));
- tmp = readl(u3_banks->phyd + U3P_U3_PHYD_RXDET2);
- tmp &= ~P3D_RG_RXDET_STB2_SET_P3;
- tmp |= P3D_RG_RXDET_STB2_SET_P3_VAL(0x10);
- writel(tmp, u3_banks->phyd + U3P_U3_PHYD_RXDET2);
+ mtk_phy_update_bits(u3_banks->phyd + U3P_U3_PHYD_RXDET2,
+ P3D_RG_RXDET_STB2_SET_P3, P3D_RG_RXDET_STB2_SET_P3_VAL(0x10));
/* wait for PCIe subsys register to active */
usleep_range(2500, 3000);
@@ -770,15 +703,12 @@ static void pcie_phy_instance_power_on(struct mtk_tphy *tphy,
struct mtk_phy_instance *instance)
{
struct u3phy_banks *bank = &instance->u3_banks;
- u32 tmp;
- tmp = readl(bank->chip + U3P_U3_CHIP_GPIO_CTLD);
- tmp &= ~(P3C_FORCE_IP_SW_RST | P3C_REG_IP_SW_RST);
- writel(tmp, bank->chip + U3P_U3_CHIP_GPIO_CTLD);
+ mtk_phy_clear_bits(bank->chip + U3P_U3_CHIP_GPIO_CTLD,
+ P3C_FORCE_IP_SW_RST | P3C_REG_IP_SW_RST);
- tmp = readl(bank->chip + U3P_U3_CHIP_GPIO_CTLE);
- tmp &= ~(P3C_RG_SWRST_U3_PHYD_FORCE_EN | P3C_RG_SWRST_U3_PHYD);
- writel(tmp, bank->chip + U3P_U3_CHIP_GPIO_CTLE);
+ mtk_phy_clear_bits(bank->chip + U3P_U3_CHIP_GPIO_CTLE,
+ P3C_RG_SWRST_U3_PHYD_FORCE_EN | P3C_RG_SWRST_U3_PHYD);
}
static void pcie_phy_instance_power_off(struct mtk_tphy *tphy,
@@ -786,15 +716,12 @@ static void pcie_phy_instance_power_off(struct mtk_tphy *tphy,
{
struct u3phy_banks *bank = &instance->u3_banks;
- u32 tmp;
- tmp = readl(bank->chip + U3P_U3_CHIP_GPIO_CTLD);
- tmp |= P3C_FORCE_IP_SW_RST | P3C_REG_IP_SW_RST;
- writel(tmp, bank->chip + U3P_U3_CHIP_GPIO_CTLD);
+ mtk_phy_set_bits(bank->chip + U3P_U3_CHIP_GPIO_CTLD,
+ P3C_FORCE_IP_SW_RST | P3C_REG_IP_SW_RST);
- tmp = readl(bank->chip + U3P_U3_CHIP_GPIO_CTLE);
- tmp |= P3C_RG_SWRST_U3_PHYD_FORCE_EN | P3C_RG_SWRST_U3_PHYD;
- writel(tmp, bank->chip + U3P_U3_CHIP_GPIO_CTLE);
+ mtk_phy_set_bits(bank->chip + U3P_U3_CHIP_GPIO_CTLE,
+ P3C_RG_SWRST_U3_PHYD_FORCE_EN | P3C_RG_SWRST_U3_PHYD);
}
static void sata_phy_instance_init(struct mtk_tphy *tphy,
@@ -802,55 +729,42 @@ static void sata_phy_instance_init(struct mtk_tphy *tphy,
{
struct u3phy_banks *u3_banks = &instance->u3_banks;
void __iomem *phyd = u3_banks->phyd;
- u32 tmp;
/* charge current adjustment */
- tmp = readl(phyd + ANA_RG_CTRL_SIGNAL6);
- tmp &= ~(RG_CDR_BIRLTR_GEN1_MSK | RG_CDR_BC_GEN1_MSK);
- tmp |= RG_CDR_BIRLTR_GEN1_VAL(0x6) | RG_CDR_BC_GEN1_VAL(0x1a);
- writel(tmp, phyd + ANA_RG_CTRL_SIGNAL6);
-
- tmp = readl(phyd + ANA_EQ_EYE_CTRL_SIGNAL4);
- tmp &= ~RG_CDR_BIRLTD0_GEN1_MSK;
- tmp |= RG_CDR_BIRLTD0_GEN1_VAL(0x18);
- writel(tmp, phyd + ANA_EQ_EYE_CTRL_SIGNAL4);
-
- tmp = readl(phyd + ANA_EQ_EYE_CTRL_SIGNAL5);
- tmp &= ~RG_CDR_BIRLTD0_GEN3_MSK;
- tmp |= RG_CDR_BIRLTD0_GEN3_VAL(0x06);
- writel(tmp, phyd + ANA_EQ_EYE_CTRL_SIGNAL5);
-
- tmp = readl(phyd + ANA_RG_CTRL_SIGNAL4);
- tmp &= ~(RG_CDR_BICLTR_GEN1_MSK | RG_CDR_BR_GEN2_MSK);
- tmp |= RG_CDR_BICLTR_GEN1_VAL(0x0c) | RG_CDR_BR_GEN2_VAL(0x07);
- writel(tmp, phyd + ANA_RG_CTRL_SIGNAL4);
-
- tmp = readl(phyd + PHYD_CTRL_SIGNAL_MODE4);
- tmp &= ~(RG_CDR_BICLTD0_GEN1_MSK | RG_CDR_BICLTD1_GEN1_MSK);
- tmp |= RG_CDR_BICLTD0_GEN1_VAL(0x08) | RG_CDR_BICLTD1_GEN1_VAL(0x02);
- writel(tmp, phyd + PHYD_CTRL_SIGNAL_MODE4);
-
- tmp = readl(phyd + PHYD_DESIGN_OPTION2);
- tmp &= ~RG_LOCK_CNT_SEL_MSK;
- tmp |= RG_LOCK_CNT_SEL_VAL(0x02);
- writel(tmp, phyd + PHYD_DESIGN_OPTION2);
-
- tmp = readl(phyd + PHYD_DESIGN_OPTION9);
- tmp &= ~(RG_T2_MIN_MSK | RG_TG_MIN_MSK |
- RG_T2_MAX_MSK | RG_TG_MAX_MSK);
- tmp |= RG_T2_MIN_VAL(0x12) | RG_TG_MIN_VAL(0x04) |
- RG_T2_MAX_VAL(0x31) | RG_TG_MAX_VAL(0x0e);
- writel(tmp, phyd + PHYD_DESIGN_OPTION9);
-
- tmp = readl(phyd + ANA_RG_CTRL_SIGNAL1);
- tmp &= ~RG_IDRV_0DB_GEN1_MSK;
- tmp |= RG_IDRV_0DB_GEN1_VAL(0x20);
- writel(tmp, phyd + ANA_RG_CTRL_SIGNAL1);
-
- tmp = readl(phyd + ANA_EQ_EYE_CTRL_SIGNAL1);
- tmp &= ~RG_EQ_DLEQ_LFI_GEN1_MSK;
- tmp |= RG_EQ_DLEQ_LFI_GEN1_VAL(0x03);
- writel(tmp, phyd + ANA_EQ_EYE_CTRL_SIGNAL1);
+ mtk_phy_update_bits(phyd + ANA_RG_CTRL_SIGNAL6,
+ RG_CDR_BIRLTR_GEN1_MSK | RG_CDR_BC_GEN1_MSK,
+ RG_CDR_BIRLTR_GEN1_VAL(0x6) | RG_CDR_BC_GEN1_VAL(0x1a));
+
+ mtk_phy_update_bits(phyd + ANA_EQ_EYE_CTRL_SIGNAL4, RG_CDR_BIRLTD0_GEN1_MSK,
+ RG_CDR_BIRLTD0_GEN1_VAL(0x18));
+
+ mtk_phy_update_bits(phyd + ANA_EQ_EYE_CTRL_SIGNAL5, RG_CDR_BIRLTD0_GEN3_MSK,
+ RG_CDR_BIRLTD0_GEN3_VAL(0x06));
+
+ mtk_phy_update_bits(phyd + ANA_RG_CTRL_SIGNAL4,
+ RG_CDR_BICLTR_GEN1_MSK | RG_CDR_BR_GEN2_MSK,
+ RG_CDR_BICLTR_GEN1_VAL(0x0c) | RG_CDR_BR_GEN2_VAL(0x07));
+
+ mtk_phy_update_bits(phyd + PHYD_CTRL_SIGNAL_MODE4,
+ RG_CDR_BICLTD0_GEN1_MSK | RG_CDR_BICLTD1_GEN1_MSK,
+ RG_CDR_BICLTD0_GEN1_VAL(0x08) | RG_CDR_BICLTD1_GEN1_VAL(0x02));
+
+ mtk_phy_update_bits(phyd + PHYD_DESIGN_OPTION2, RG_LOCK_CNT_SEL_MSK,
+ RG_LOCK_CNT_SEL_VAL(0x02));
+
+ mtk_phy_update_bits(phyd + PHYD_DESIGN_OPTION9,
+ RG_T2_MIN_MSK | RG_TG_MIN_MSK,
+ RG_T2_MIN_VAL(0x12) | RG_TG_MIN_VAL(0x04));
+
+ mtk_phy_update_bits(phyd + PHYD_DESIGN_OPTION9,
+ RG_T2_MAX_MSK | RG_TG_MAX_MSK,
+ RG_T2_MAX_VAL(0x31) | RG_TG_MAX_VAL(0x0e));
+
+ mtk_phy_update_bits(phyd + ANA_RG_CTRL_SIGNAL1, RG_IDRV_0DB_GEN1_MSK,
+ RG_IDRV_0DB_GEN1_VAL(0x20));
+
+ mtk_phy_update_bits(phyd + ANA_EQ_EYE_CTRL_SIGNAL1, RG_EQ_DLEQ_LFI_GEN1_MSK,
+ RG_EQ_DLEQ_LFI_GEN1_VAL(0x03));
dev_dbg(tphy->dev, "%s(%d)\n", __func__, instance->index);
}
@@ -938,48 +852,29 @@ static void u2_phy_props_set(struct mtk_tphy *tphy,
{
struct u2phy_banks *u2_banks = &instance->u2_banks;
void __iomem *com = u2_banks->com;
- u32 tmp;
- if (instance->bc12_en) {
- tmp = readl(com + U3P_U2PHYBC12C);
- tmp |= P2C_RG_CHGDT_EN; /* BC1.2 path Enable */
- writel(tmp, com + U3P_U2PHYBC12C);
- }
+ if (instance->bc12_en) /* BC1.2 path Enable */
+ mtk_phy_set_bits(com + U3P_U2PHYBC12C, P2C_RG_CHGDT_EN);
- if (tphy->pdata->version < MTK_PHY_V3 && instance->eye_src) {
- tmp = readl(com + U3P_USBPHYACR5);
- tmp &= ~PA5_RG_U2_HSTX_SRCTRL;
- tmp |= PA5_RG_U2_HSTX_SRCTRL_VAL(instance->eye_src);
- writel(tmp, com + U3P_USBPHYACR5);
- }
+ if (tphy->pdata->version < MTK_PHY_V3 && instance->eye_src)
+ mtk_phy_update_bits(com + U3P_USBPHYACR5, PA5_RG_U2_HSTX_SRCTRL,
+ PA5_RG_U2_HSTX_SRCTRL_VAL(instance->eye_src));
- if (instance->eye_vrt) {
- tmp = readl(com + U3P_USBPHYACR1);
- tmp &= ~PA1_RG_VRT_SEL;
- tmp |= PA1_RG_VRT_SEL_VAL(instance->eye_vrt);
- writel(tmp, com + U3P_USBPHYACR1);
- }
+ if (instance->eye_vrt)
+ mtk_phy_update_bits(com + U3P_USBPHYACR1, PA1_RG_VRT_SEL,
+ PA1_RG_VRT_SEL_VAL(instance->eye_vrt));
- if (instance->eye_term) {
- tmp = readl(com + U3P_USBPHYACR1);
- tmp &= ~PA1_RG_TERM_SEL;
- tmp |= PA1_RG_TERM_SEL_VAL(instance->eye_term);
- writel(tmp, com + U3P_USBPHYACR1);
- }
+ if (instance->eye_term)
+ mtk_phy_update_bits(com + U3P_USBPHYACR1, PA1_RG_TERM_SEL,
+ PA1_RG_TERM_SEL_VAL(instance->eye_term));
- if (instance->intr) {
- tmp = readl(com + U3P_USBPHYACR1);
- tmp &= ~PA1_RG_INTR_CAL;
- tmp |= PA1_RG_INTR_CAL_VAL(instance->intr);
- writel(tmp, com + U3P_USBPHYACR1);
- }
+ if (instance->intr)
+ mtk_phy_update_bits(com + U3P_USBPHYACR1, PA1_RG_INTR_CAL,
+ PA1_RG_INTR_CAL_VAL(instance->intr));
- if (instance->discth) {
- tmp = readl(com + U3P_USBPHYACR6);
- tmp &= ~PA6_RG_U2_DISCTH;
- tmp |= PA6_RG_U2_DISCTH_VAL(instance->discth);
- writel(tmp, com + U3P_USBPHYACR6);
- }
+ if (instance->discth)
+ mtk_phy_update_bits(com + U3P_USBPHYACR6, PA6_RG_U2_DISCTH,
+ PA6_RG_U2_DISCTH_VAL(instance->discth));
}
/* type switch for usb3/pcie/sgmii/sata */
@@ -1040,6 +935,117 @@ static int phy_type_set(struct mtk_phy_instance *instance)
return 0;
}
+static int phy_efuse_get(struct mtk_tphy *tphy, struct mtk_phy_instance *instance)
+{
+ struct device *dev = &instance->phy->dev;
+ int ret = 0;
+
+ /* tphy v1 doesn't support sw efuse, skip it */
+ if (!tphy->pdata->sw_efuse_supported) {
+ instance->efuse_sw_en = 0;
+ return 0;
+ }
+
+ /* software efuse is optional */
+ instance->efuse_sw_en = device_property_read_bool(dev, "nvmem-cells");
+ if (!instance->efuse_sw_en)
+ return 0;
+
+ switch (instance->type) {
+ case PHY_TYPE_USB2:
+ ret = nvmem_cell_read_variable_le_u32(dev, "intr", &instance->efuse_intr);
+ if (ret) {
+ dev_err(dev, "fail to get u2 intr efuse, %d\n", ret);
+ break;
+ }
+
+ /* no efuse, ignore it */
+ if (!instance->efuse_intr) {
+ dev_warn(dev, "no u2 intr efuse, but dts enable it\n");
+ instance->efuse_sw_en = 0;
+ break;
+ }
+
+ dev_dbg(dev, "u2 efuse - intr %x\n", instance->efuse_intr);
+ break;
+
+ case PHY_TYPE_USB3:
+ case PHY_TYPE_PCIE:
+ ret = nvmem_cell_read_variable_le_u32(dev, "intr", &instance->efuse_intr);
+ if (ret) {
+ dev_err(dev, "fail to get u3 intr efuse, %d\n", ret);
+ break;
+ }
+
+ ret = nvmem_cell_read_variable_le_u32(dev, "rx_imp", &instance->efuse_rx_imp);
+ if (ret) {
+ dev_err(dev, "fail to get u3 rx_imp efuse, %d\n", ret);
+ break;
+ }
+
+ ret = nvmem_cell_read_variable_le_u32(dev, "tx_imp", &instance->efuse_tx_imp);
+ if (ret) {
+ dev_err(dev, "fail to get u3 tx_imp efuse, %d\n", ret);
+ break;
+ }
+
+ /* no efuse, ignore it */
+ if (!instance->efuse_intr &&
+ !instance->efuse_rx_imp &&
+ !instance->efuse_tx_imp) {
+ dev_warn(dev, "no u3 intr efuse, but dts enable it\n");
+ instance->efuse_sw_en = 0;
+ break;
+ }
+
+ dev_dbg(dev, "u3 efuse - intr %x, rx_imp %x, tx_imp %x\n",
+ instance->efuse_intr, instance->efuse_rx_imp,instance->efuse_tx_imp);
+ break;
+ default:
+ dev_err(dev, "no sw efuse for type %d\n", instance->type);
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static void phy_efuse_set(struct mtk_phy_instance *instance)
+{
+ struct device *dev = &instance->phy->dev;
+ struct u2phy_banks *u2_banks = &instance->u2_banks;
+ struct u3phy_banks *u3_banks = &instance->u3_banks;
+
+ if (!instance->efuse_sw_en)
+ return;
+
+ switch (instance->type) {
+ case PHY_TYPE_USB2:
+ mtk_phy_set_bits(u2_banks->misc + U3P_MISC_REG1, MR1_EFUSE_AUTO_LOAD_DIS);
+
+ mtk_phy_update_bits(u2_banks->com + U3P_USBPHYACR1, PA1_RG_INTR_CAL,
+ PA1_RG_INTR_CAL_VAL(instance->efuse_intr));
+ break;
+ case PHY_TYPE_USB3:
+ case PHY_TYPE_PCIE:
+ mtk_phy_set_bits(u3_banks->phyd + U3P_U3_PHYD_RSV, P3D_RG_EFUSE_AUTO_LOAD_DIS);
+
+ mtk_phy_update_bits(u3_banks->phyd + U3P_U3_PHYD_IMPCAL0, P3D_RG_TX_IMPEL,
+ P3D_RG_TX_IMPEL_VAL(instance->efuse_tx_imp));
+ mtk_phy_set_bits(u3_banks->phyd + U3P_U3_PHYD_IMPCAL0, P3D_RG_FORCE_TX_IMPEL);
+
+ mtk_phy_update_bits(u3_banks->phyd + U3P_U3_PHYD_IMPCAL1, P3D_RG_RX_IMPEL,
+ P3D_RG_RX_IMPEL_VAL(instance->efuse_rx_imp));
+ mtk_phy_set_bits(u3_banks->phyd + U3P_U3_PHYD_IMPCAL1, P3D_RG_FORCE_RX_IMPEL);
+
+ mtk_phy_update_bits(u3_banks->phya + U3P_U3_PHYA_REG0, P3A_RG_IEXT_INTR,
+ P3A_RG_IEXT_INTR_VAL(instance->efuse_intr));
+ break;
+ default:
+ dev_warn(dev, "no sw efuse for type %d\n", instance->type);
+ break;
+ }
+}
+
static int mtk_phy_init(struct phy *phy)
{
struct mtk_phy_instance *instance = phy_get_drvdata(phy);
@@ -1050,6 +1056,8 @@ static int mtk_phy_init(struct phy *phy)
if (ret)
return ret;
+ phy_efuse_set(instance);
+
switch (instance->type) {
case PHY_TYPE_USB2:
u2_phy_instance_init(tphy, instance);
@@ -1134,6 +1142,7 @@ static struct phy *mtk_phy_xlate(struct device *dev,
struct mtk_phy_instance *instance = NULL;
struct device_node *phy_np = args->np;
int index;
+ int ret;
if (args->args_count != 1) {
dev_err(dev, "invalid number of cells in 'phy' property\n");
@@ -1174,6 +1183,10 @@ static struct phy *mtk_phy_xlate(struct device *dev,
return ERR_PTR(-EINVAL);
}
+ ret = phy_efuse_get(tphy, instance);
+ if (ret)
+ return ERR_PTR(ret);
+
phy_parse_property(tphy, instance);
phy_type_set(instance);
@@ -1196,10 +1209,12 @@ static const struct mtk_phy_pdata tphy_v1_pdata = {
static const struct mtk_phy_pdata tphy_v2_pdata = {
.avoid_rx_sen_degradation = false,
+ .sw_efuse_supported = true,
.version = MTK_PHY_V2,
};
static const struct mtk_phy_pdata tphy_v3_pdata = {
+ .sw_efuse_supported = true,
.version = MTK_PHY_V3,
};
@@ -1210,6 +1225,7 @@ static const struct mtk_phy_pdata mt8173_pdata = {
static const struct mtk_phy_pdata mt8195_pdata = {
.sw_pll_48m_to_26m = true,
+ .sw_efuse_supported = true,
.version = MTK_PHY_V3,
};
diff --git a/drivers/phy/mediatek/phy-mtk-xsphy.c b/drivers/phy/mediatek/phy-mtk-xsphy.c
index 8c51131945c0..c0cdb78f77fa 100644
--- a/drivers/phy/mediatek/phy-mtk-xsphy.c
+++ b/drivers/phy/mediatek/phy-mtk-xsphy.c
@@ -10,13 +10,14 @@
#include <dt-bindings/phy/phy.h>
#include <linux/clk.h>
#include <linux/delay.h>
-#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
+#include "phy-mtk-io.h"
+
/* u2 phy banks */
#define SSUSB_SIFSLV_MISC 0x000
#define SSUSB_SIFSLV_U2FREQ 0x100
@@ -126,26 +127,18 @@ static void u2_phy_slew_rate_calibrate(struct mtk_xsphy *xsphy,
return;
/* enable USB ring oscillator */
- tmp = readl(pbase + XSP_USBPHYACR5);
- tmp |= P2A5_RG_HSTX_SRCAL_EN;
- writel(tmp, pbase + XSP_USBPHYACR5);
+ mtk_phy_set_bits(pbase + XSP_USBPHYACR5, P2A5_RG_HSTX_SRCAL_EN);
udelay(1); /* wait clock stable */
/* enable free run clock */
- tmp = readl(pbase + XSP_U2FREQ_FMMONR1);
- tmp |= P2F_RG_FRCK_EN;
- writel(tmp, pbase + XSP_U2FREQ_FMMONR1);
+ mtk_phy_set_bits(pbase + XSP_U2FREQ_FMMONR1, P2F_RG_FRCK_EN);
/* set cycle count as 1024 */
- tmp = readl(pbase + XSP_U2FREQ_FMCR0);
- tmp &= ~(P2F_RG_CYCLECNT);
- tmp |= P2F_RG_CYCLECNT_VAL(XSP_FM_DET_CYCLE_CNT);
- writel(tmp, pbase + XSP_U2FREQ_FMCR0);
+ mtk_phy_update_bits(pbase + XSP_U2FREQ_FMCR0, P2F_RG_CYCLECNT,
+ P2F_RG_CYCLECNT_VAL(XSP_FM_DET_CYCLE_CNT));
/* enable frequency meter */
- tmp = readl(pbase + XSP_U2FREQ_FMCR0);
- tmp |= P2F_RG_FREQDET_EN;
- writel(tmp, pbase + XSP_U2FREQ_FMCR0);
+ mtk_phy_set_bits(pbase + XSP_U2FREQ_FMCR0, P2F_RG_FREQDET_EN);
/* ignore return value */
readl_poll_timeout(pbase + XSP_U2FREQ_FMMONR1, tmp,
@@ -154,14 +147,10 @@ static void u2_phy_slew_rate_calibrate(struct mtk_xsphy *xsphy,
fm_out = readl(pbase + XSP_U2FREQ_MMONR0);
/* disable frequency meter */
- tmp = readl(pbase + XSP_U2FREQ_FMCR0);
- tmp &= ~P2F_RG_FREQDET_EN;
- writel(tmp, pbase + XSP_U2FREQ_FMCR0);
+ mtk_phy_clear_bits(pbase + XSP_U2FREQ_FMCR0, P2F_RG_FREQDET_EN);
/* disable free run clock */
- tmp = readl(pbase + XSP_U2FREQ_FMMONR1);
- tmp &= ~P2F_RG_FRCK_EN;
- writel(tmp, pbase + XSP_U2FREQ_FMMONR1);
+ mtk_phy_clear_bits(pbase + XSP_U2FREQ_FMMONR1, P2F_RG_FRCK_EN);
if (fm_out) {
/* (1024 / FM_OUT) x reference clock frequency x coefficient */
@@ -177,31 +166,22 @@ static void u2_phy_slew_rate_calibrate(struct mtk_xsphy *xsphy,
xsphy->src_ref_clk, xsphy->src_coef);
/* set HS slew rate */
- tmp = readl(pbase + XSP_USBPHYACR5);
- tmp &= ~P2A5_RG_HSTX_SRCTRL;
- tmp |= P2A5_RG_HSTX_SRCTRL_VAL(calib_val);
- writel(tmp, pbase + XSP_USBPHYACR5);
+ mtk_phy_update_bits(pbase + XSP_USBPHYACR5, P2A5_RG_HSTX_SRCTRL,
+ P2A5_RG_HSTX_SRCTRL_VAL(calib_val));
/* disable USB ring oscillator */
- tmp = readl(pbase + XSP_USBPHYACR5);
- tmp &= ~P2A5_RG_HSTX_SRCAL_EN;
- writel(tmp, pbase + XSP_USBPHYACR5);
+ mtk_phy_clear_bits(pbase + XSP_USBPHYACR5, P2A5_RG_HSTX_SRCAL_EN);
}
static void u2_phy_instance_init(struct mtk_xsphy *xsphy,
struct xsphy_instance *inst)
{
void __iomem *pbase = inst->port_base;
- u32 tmp;
/* DP/DM BC1.1 path Disable */
- tmp = readl(pbase + XSP_USBPHYACR6);
- tmp &= ~P2A6_RG_BC11_SW_EN;
- writel(tmp, pbase + XSP_USBPHYACR6);
+ mtk_phy_clear_bits(pbase + XSP_USBPHYACR6, P2A6_RG_BC11_SW_EN);
- tmp = readl(pbase + XSP_USBPHYACR0);
- tmp |= P2A0_RG_INTR_EN;
- writel(tmp, pbase + XSP_USBPHYACR0);
+ mtk_phy_set_bits(pbase + XSP_USBPHYACR0, P2A0_RG_INTR_EN);
}
static void u2_phy_instance_power_on(struct mtk_xsphy *xsphy,
@@ -209,16 +189,12 @@ static void u2_phy_instance_power_on(struct mtk_xsphy *xsphy,
{
void __iomem *pbase = inst->port_base;
u32 index = inst->index;
- u32 tmp;
- tmp = readl(pbase + XSP_USBPHYACR6);
- tmp |= P2A6_RG_OTG_VBUSCMP_EN;
- writel(tmp, pbase + XSP_USBPHYACR6);
+ mtk_phy_set_bits(pbase + XSP_USBPHYACR6, P2A6_RG_OTG_VBUSCMP_EN);
- tmp = readl(pbase + XSP_U2PHYDTM1);
- tmp |= P2D_RG_VBUSVALID | P2D_RG_AVALID;
- tmp &= ~P2D_RG_SESSEND;
- writel(tmp, pbase + XSP_U2PHYDTM1);
+ mtk_phy_update_bits(pbase + XSP_U2PHYDTM1,
+ P2D_RG_VBUSVALID | P2D_RG_AVALID | P2D_RG_SESSEND,
+ P2D_RG_VBUSVALID | P2D_RG_AVALID);
dev_dbg(xsphy->dev, "%s(%d)\n", __func__, index);
}
@@ -228,16 +204,12 @@ static void u2_phy_instance_power_off(struct mtk_xsphy *xsphy,
{
void __iomem *pbase = inst->port_base;
u32 index = inst->index;
- u32 tmp;
- tmp = readl(pbase + XSP_USBPHYACR6);
- tmp &= ~P2A6_RG_OTG_VBUSCMP_EN;
- writel(tmp, pbase + XSP_USBPHYACR6);
+ mtk_phy_clear_bits(pbase + XSP_USBPHYACR6, P2A6_RG_OTG_VBUSCMP_EN);
- tmp = readl(pbase + XSP_U2PHYDTM1);
- tmp &= ~(P2D_RG_VBUSVALID | P2D_RG_AVALID);
- tmp |= P2D_RG_SESSEND;
- writel(tmp, pbase + XSP_U2PHYDTM1);
+ mtk_phy_update_bits(pbase + XSP_U2PHYDTM1,
+ P2D_RG_VBUSVALID | P2D_RG_AVALID | P2D_RG_SESSEND,
+ P2D_RG_SESSEND);
dev_dbg(xsphy->dev, "%s(%d)\n", __func__, index);
}
@@ -306,63 +278,43 @@ static void u2_phy_props_set(struct mtk_xsphy *xsphy,
struct xsphy_instance *inst)
{
void __iomem *pbase = inst->port_base;
- u32 tmp;
- if (inst->efuse_intr) {
- tmp = readl(pbase + XSP_USBPHYACR1);
- tmp &= ~P2A1_RG_INTR_CAL;
- tmp |= P2A1_RG_INTR_CAL_VAL(inst->efuse_intr);
- writel(tmp, pbase + XSP_USBPHYACR1);
- }
+ if (inst->efuse_intr)
+ mtk_phy_update_bits(pbase + XSP_USBPHYACR1, P2A1_RG_INTR_CAL,
+ P2A1_RG_INTR_CAL_VAL(inst->efuse_intr));
- if (inst->eye_src) {
- tmp = readl(pbase + XSP_USBPHYACR5);
- tmp &= ~P2A5_RG_HSTX_SRCTRL;
- tmp |= P2A5_RG_HSTX_SRCTRL_VAL(inst->eye_src);
- writel(tmp, pbase + XSP_USBPHYACR5);
- }
+ if (inst->eye_src)
+ mtk_phy_update_bits(pbase + XSP_USBPHYACR5, P2A5_RG_HSTX_SRCTRL,
+ P2A5_RG_HSTX_SRCTRL_VAL(inst->eye_src));
- if (inst->eye_vrt) {
- tmp = readl(pbase + XSP_USBPHYACR1);
- tmp &= ~P2A1_RG_VRT_SEL;
- tmp |= P2A1_RG_VRT_SEL_VAL(inst->eye_vrt);
- writel(tmp, pbase + XSP_USBPHYACR1);
- }
+ if (inst->eye_vrt)
+ mtk_phy_update_bits(pbase + XSP_USBPHYACR1, P2A1_RG_VRT_SEL,
+ P2A1_RG_VRT_SEL_VAL(inst->eye_vrt));
- if (inst->eye_term) {
- tmp = readl(pbase + XSP_USBPHYACR1);
- tmp &= ~P2A1_RG_TERM_SEL;
- tmp |= P2A1_RG_TERM_SEL_VAL(inst->eye_term);
- writel(tmp, pbase + XSP_USBPHYACR1);
- }
+ if (inst->eye_term)
+ mtk_phy_update_bits(pbase + XSP_USBPHYACR1, P2A1_RG_TERM_SEL,
+ P2A1_RG_TERM_SEL_VAL(inst->eye_term));
}
static void u3_phy_props_set(struct mtk_xsphy *xsphy,
struct xsphy_instance *inst)
{
void __iomem *pbase = inst->port_base;
- u32 tmp;
- if (inst->efuse_intr) {
- tmp = readl(xsphy->glb_base + SSPXTP_PHYA_GLB_00);
- tmp &= ~RG_XTP_GLB_BIAS_INTR_CTRL;
- tmp |= RG_XTP_GLB_BIAS_INTR_CTRL_VAL(inst->efuse_intr);
- writel(tmp, xsphy->glb_base + SSPXTP_PHYA_GLB_00);
- }
+ if (inst->efuse_intr)
+ mtk_phy_update_bits(xsphy->glb_base + SSPXTP_PHYA_GLB_00,
+ RG_XTP_GLB_BIAS_INTR_CTRL,
+ RG_XTP_GLB_BIAS_INTR_CTRL_VAL(inst->efuse_intr));
- if (inst->efuse_tx_imp) {
- tmp = readl(pbase + SSPXTP_PHYA_LN_04);
- tmp &= ~RG_XTP_LN0_TX_IMPSEL;
- tmp |= RG_XTP_LN0_TX_IMPSEL_VAL(inst->efuse_tx_imp);
- writel(tmp, pbase + SSPXTP_PHYA_LN_04);
- }
+ if (inst->efuse_tx_imp)
+ mtk_phy_update_bits(pbase + SSPXTP_PHYA_LN_04,
+ RG_XTP_LN0_TX_IMPSEL,
+ RG_XTP_LN0_TX_IMPSEL_VAL(inst->efuse_tx_imp));
- if (inst->efuse_rx_imp) {
- tmp = readl(pbase + SSPXTP_PHYA_LN_14);
- tmp &= ~RG_XTP_LN0_RX_IMPSEL;
- tmp |= RG_XTP_LN0_RX_IMPSEL_VAL(inst->efuse_rx_imp);
- writel(tmp, pbase + SSPXTP_PHYA_LN_14);
- }
+ if (inst->efuse_rx_imp)
+ mtk_phy_update_bits(pbase + SSPXTP_PHYA_LN_14,
+ RG_XTP_LN0_RX_IMPSEL,
+ RG_XTP_LN0_RX_IMPSEL_VAL(inst->efuse_rx_imp));
}
static int mtk_phy_init(struct phy *phy)
diff --git a/drivers/phy/microchip/Kconfig b/drivers/phy/microchip/Kconfig
index 3728a284bf64..38039ed0754c 100644
--- a/drivers/phy/microchip/Kconfig
+++ b/drivers/phy/microchip/Kconfig
@@ -11,3 +11,11 @@ config PHY_SPARX5_SERDES
depends on HAS_IOMEM
help
Enable this for support of the 10G/25G SerDes on Microchip Sparx5.
+
+config PHY_LAN966X_SERDES
+ tristate "SerDes PHY driver for Microchip LAN966X"
+ select GENERIC_PHY
+ depends on OF
+ depends on MFD_SYSCON
+ help
+ Enable this for supporting SerDes muxing with Microchip LAN966X
diff --git a/drivers/phy/microchip/Makefile b/drivers/phy/microchip/Makefile
index 7b98345712aa..fd73b87960a5 100644
--- a/drivers/phy/microchip/Makefile
+++ b/drivers/phy/microchip/Makefile
@@ -4,3 +4,4 @@
#
obj-$(CONFIG_PHY_SPARX5_SERDES) := sparx5_serdes.o
+obj-$(CONFIG_PHY_LAN966X_SERDES) := lan966x_serdes.o
diff --git a/drivers/phy/microchip/lan966x_serdes.c b/drivers/phy/microchip/lan966x_serdes.c
new file mode 100644
index 000000000000..e86a879b92b5
--- /dev/null
+++ b/drivers/phy/microchip/lan966x_serdes.c
@@ -0,0 +1,545 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/phy.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+
+#include <dt-bindings/phy/phy-lan966x-serdes.h>
+#include "lan966x_serdes_regs.h"
+
+#define PLL_CONF_MASK GENMASK(4, 3)
+#define PLL_CONF_25MHZ 0
+#define PLL_CONF_125MHZ 1
+#define PLL_CONF_SERDES_125MHZ 2
+#define PLL_CONF_BYPASS 3
+
+#define lan_offset_(id, tinst, tcnt, \
+ gbase, ginst, gcnt, gwidth, \
+ raddr, rinst, rcnt, rwidth) \
+ (gbase + ((ginst) * gwidth) + raddr + ((rinst) * rwidth))
+#define lan_offset(...) lan_offset_(__VA_ARGS__)
+
+#define lan_rmw(val, mask, reg, off) \
+ lan_rmw_(val, mask, reg, lan_offset(off))
+
+#define SERDES_MUX(_idx, _port, _mode, _submode, _mask, _mux) { \
+ .idx = _idx, \
+ .port = _port, \
+ .mode = _mode, \
+ .submode = _submode, \
+ .mask = _mask, \
+ .mux = _mux, \
+}
+
+#define SERDES_MUX_GMII(i, p, m, c) \
+ SERDES_MUX(i, p, PHY_MODE_ETHERNET, PHY_INTERFACE_MODE_GMII, m, c)
+#define SERDES_MUX_SGMII(i, p, m, c) \
+ SERDES_MUX(i, p, PHY_MODE_ETHERNET, PHY_INTERFACE_MODE_SGMII, m, c)
+#define SERDES_MUX_QSGMII(i, p, m, c) \
+ SERDES_MUX(i, p, PHY_MODE_ETHERNET, PHY_INTERFACE_MODE_QSGMII, m, c)
+#define SERDES_MUX_RGMII(i, p, m, c) \
+ SERDES_MUX(i, p, PHY_MODE_ETHERNET, PHY_INTERFACE_MODE_RGMII, m, c)
+
+static void lan_rmw_(u32 val, u32 mask, void __iomem *mem, u32 offset)
+{
+ u32 v;
+
+ v = readl(mem + offset);
+ v = (v & ~mask) | (val & mask);
+ writel(v, mem + offset);
+}
+
+struct serdes_mux {
+ u8 idx;
+ u8 port;
+ enum phy_mode mode;
+ int submode;
+ u32 mask;
+ u32 mux;
+};
+
+static const struct serdes_mux lan966x_serdes_muxes[] = {
+ SERDES_MUX_QSGMII(SERDES6G(1), 0, HSIO_HW_CFG_QSGMII_ENA,
+ HSIO_HW_CFG_QSGMII_ENA_SET(BIT(0))),
+ SERDES_MUX_QSGMII(SERDES6G(1), 1, HSIO_HW_CFG_QSGMII_ENA,
+ HSIO_HW_CFG_QSGMII_ENA_SET(BIT(0))),
+ SERDES_MUX_QSGMII(SERDES6G(1), 2, HSIO_HW_CFG_QSGMII_ENA,
+ HSIO_HW_CFG_QSGMII_ENA_SET(BIT(0))),
+ SERDES_MUX_QSGMII(SERDES6G(1), 3, HSIO_HW_CFG_QSGMII_ENA,
+ HSIO_HW_CFG_QSGMII_ENA_SET(BIT(0))),
+
+ SERDES_MUX_QSGMII(SERDES6G(2), 4, HSIO_HW_CFG_QSGMII_ENA,
+ HSIO_HW_CFG_QSGMII_ENA_SET(BIT(1))),
+ SERDES_MUX_QSGMII(SERDES6G(2), 5, HSIO_HW_CFG_QSGMII_ENA,
+ HSIO_HW_CFG_QSGMII_ENA_SET(BIT(1))),
+ SERDES_MUX_QSGMII(SERDES6G(2), 6, HSIO_HW_CFG_QSGMII_ENA,
+ HSIO_HW_CFG_QSGMII_ENA_SET(BIT(1))),
+ SERDES_MUX_QSGMII(SERDES6G(2), 7, HSIO_HW_CFG_QSGMII_ENA,
+ HSIO_HW_CFG_QSGMII_ENA_SET(BIT(1))),
+
+ SERDES_MUX_GMII(CU(0), 0, HSIO_HW_CFG_GMII_ENA,
+ HSIO_HW_CFG_GMII_ENA_SET(BIT(0))),
+ SERDES_MUX_GMII(CU(1), 1, HSIO_HW_CFG_GMII_ENA,
+ HSIO_HW_CFG_GMII_ENA_SET(BIT(1))),
+
+ SERDES_MUX_SGMII(SERDES6G(0), 0, HSIO_HW_CFG_SD6G_0_CFG, 0),
+ SERDES_MUX_SGMII(SERDES6G(1), 1, HSIO_HW_CFG_SD6G_1_CFG, 0),
+ SERDES_MUX_SGMII(SERDES6G(0), 2, HSIO_HW_CFG_SD6G_0_CFG,
+ HSIO_HW_CFG_SD6G_0_CFG_SET(1)),
+ SERDES_MUX_SGMII(SERDES6G(1), 3, HSIO_HW_CFG_SD6G_1_CFG,
+ HSIO_HW_CFG_SD6G_1_CFG_SET(1)),
+
+ SERDES_MUX_RGMII(RGMII(0), 2, HSIO_HW_CFG_RGMII_0_CFG |
+ HSIO_HW_CFG_RGMII_ENA,
+ HSIO_HW_CFG_RGMII_0_CFG_SET(BIT(0)) |
+ HSIO_HW_CFG_RGMII_ENA_SET(BIT(0))),
+ SERDES_MUX_RGMII(RGMII(1), 3, HSIO_HW_CFG_RGMII_1_CFG |
+ HSIO_HW_CFG_RGMII_ENA,
+ HSIO_HW_CFG_RGMII_1_CFG_SET(BIT(0)) |
+ HSIO_HW_CFG_RGMII_ENA_SET(BIT(1))),
+ SERDES_MUX_RGMII(RGMII(0), 5, HSIO_HW_CFG_RGMII_0_CFG |
+ HSIO_HW_CFG_RGMII_ENA,
+ HSIO_HW_CFG_RGMII_0_CFG_SET(BIT(0)) |
+ HSIO_HW_CFG_RGMII_ENA_SET(BIT(0))),
+ SERDES_MUX_RGMII(RGMII(1), 6, HSIO_HW_CFG_RGMII_1_CFG |
+ HSIO_HW_CFG_RGMII_ENA,
+ HSIO_HW_CFG_RGMII_1_CFG_SET(BIT(0)) |
+ HSIO_HW_CFG_RGMII_ENA_SET(BIT(1))),
+};
+
+struct serdes_ctrl {
+ void __iomem *regs;
+ struct device *dev;
+ struct phy *phys[SERDES_MAX];
+ int ref125;
+};
+
+struct serdes_macro {
+ u8 idx;
+ int port;
+ struct serdes_ctrl *ctrl;
+ int speed;
+ phy_interface_t mode;
+};
+
+enum lan966x_sd6g40_mode {
+ LAN966X_SD6G40_MODE_QSGMII,
+ LAN966X_SD6G40_MODE_SGMII,
+};
+
+enum lan966x_sd6g40_ltx2rx {
+ LAN966X_SD6G40_TX2RX_LOOP_NONE,
+ LAN966X_SD6G40_LTX2RX
+};
+
+struct lan966x_sd6g40_setup_args {
+ enum lan966x_sd6g40_mode mode;
+ enum lan966x_sd6g40_ltx2rx tx2rx_loop;
+ bool txinvert;
+ bool rxinvert;
+ bool refclk125M;
+ bool mute;
+};
+
+struct lan966x_sd6g40_mode_args {
+ enum lan966x_sd6g40_mode mode;
+ u8 lane_10bit_sel;
+ u8 mpll_multiplier;
+ u8 ref_clkdiv2;
+ u8 tx_rate;
+ u8 rx_rate;
+};
+
+struct lan966x_sd6g40_setup {
+ u8 rx_term_en;
+ u8 lane_10bit_sel;
+ u8 tx_invert;
+ u8 rx_invert;
+ u8 mpll_multiplier;
+ u8 lane_loopbk_en;
+ u8 ref_clkdiv2;
+ u8 tx_rate;
+ u8 rx_rate;
+};
+
+static int lan966x_sd6g40_reg_cfg(struct serdes_macro *macro,
+ struct lan966x_sd6g40_setup *res_struct,
+ u32 idx)
+{
+ u32 value;
+
+ /* Note: SerDes HSIO is configured in 1G_LAN mode */
+ lan_rmw(HSIO_SD_CFG_LANE_10BIT_SEL_SET(res_struct->lane_10bit_sel) |
+ HSIO_SD_CFG_RX_RATE_SET(res_struct->rx_rate) |
+ HSIO_SD_CFG_TX_RATE_SET(res_struct->tx_rate) |
+ HSIO_SD_CFG_TX_INVERT_SET(res_struct->tx_invert) |
+ HSIO_SD_CFG_RX_INVERT_SET(res_struct->rx_invert) |
+ HSIO_SD_CFG_LANE_LOOPBK_EN_SET(res_struct->lane_loopbk_en) |
+ HSIO_SD_CFG_RX_RESET_SET(0) |
+ HSIO_SD_CFG_TX_RESET_SET(0),
+ HSIO_SD_CFG_LANE_10BIT_SEL |
+ HSIO_SD_CFG_RX_RATE |
+ HSIO_SD_CFG_TX_RATE |
+ HSIO_SD_CFG_TX_INVERT |
+ HSIO_SD_CFG_RX_INVERT |
+ HSIO_SD_CFG_LANE_LOOPBK_EN |
+ HSIO_SD_CFG_RX_RESET |
+ HSIO_SD_CFG_TX_RESET,
+ macro->ctrl->regs, HSIO_SD_CFG(idx));
+
+ lan_rmw(HSIO_MPLL_CFG_MPLL_MULTIPLIER_SET(res_struct->mpll_multiplier) |
+ HSIO_MPLL_CFG_REF_CLKDIV2_SET(res_struct->ref_clkdiv2),
+ HSIO_MPLL_CFG_MPLL_MULTIPLIER |
+ HSIO_MPLL_CFG_REF_CLKDIV2,
+ macro->ctrl->regs, HSIO_MPLL_CFG(idx));
+
+ lan_rmw(HSIO_SD_CFG_RX_TERM_EN_SET(res_struct->rx_term_en),
+ HSIO_SD_CFG_RX_TERM_EN,
+ macro->ctrl->regs, HSIO_SD_CFG(idx));
+
+ lan_rmw(HSIO_MPLL_CFG_REF_SSP_EN_SET(1),
+ HSIO_MPLL_CFG_REF_SSP_EN,
+ macro->ctrl->regs, HSIO_MPLL_CFG(idx));
+
+ usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
+
+ lan_rmw(HSIO_SD_CFG_PHY_RESET_SET(0),
+ HSIO_SD_CFG_PHY_RESET,
+ macro->ctrl->regs, HSIO_SD_CFG(idx));
+
+ usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
+
+ lan_rmw(HSIO_MPLL_CFG_MPLL_EN_SET(1),
+ HSIO_MPLL_CFG_MPLL_EN,
+ macro->ctrl->regs, HSIO_MPLL_CFG(idx));
+
+ usleep_range(7 * USEC_PER_MSEC, 8 * USEC_PER_MSEC);
+
+ value = readl(macro->ctrl->regs + lan_offset(HSIO_SD_STAT(idx)));
+ value = HSIO_SD_STAT_MPLL_STATE_GET(value);
+ if (value != 0x1) {
+ dev_err(macro->ctrl->dev,
+ "Unexpected sd_sd_stat[%u] mpll_state was 0x1 but is 0x%x\n",
+ idx, value);
+ return -EIO;
+ }
+
+ lan_rmw(HSIO_SD_CFG_TX_CM_EN_SET(1),
+ HSIO_SD_CFG_TX_CM_EN,
+ macro->ctrl->regs, HSIO_SD_CFG(idx));
+
+ usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
+
+ value = readl(macro->ctrl->regs + lan_offset(HSIO_SD_STAT(idx)));
+ value = HSIO_SD_STAT_TX_CM_STATE_GET(value);
+ if (value != 0x1) {
+ dev_err(macro->ctrl->dev,
+ "Unexpected sd_sd_stat[%u] tx_cm_state was 0x1 but is 0x%x\n",
+ idx, value);
+ return -EIO;
+ }
+
+ lan_rmw(HSIO_SD_CFG_RX_PLL_EN_SET(1) |
+ HSIO_SD_CFG_TX_EN_SET(1),
+ HSIO_SD_CFG_RX_PLL_EN |
+ HSIO_SD_CFG_TX_EN,
+ macro->ctrl->regs, HSIO_SD_CFG(idx));
+
+ usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
+
+ /* Waiting for serdes 0 rx DPLL to lock... */
+ value = readl(macro->ctrl->regs + lan_offset(HSIO_SD_STAT(idx)));
+ value = HSIO_SD_STAT_RX_PLL_STATE_GET(value);
+ if (value != 0x1) {
+ dev_err(macro->ctrl->dev,
+ "Unexpected sd_sd_stat[%u] rx_pll_state was 0x1 but is 0x%x\n",
+ idx, value);
+ return -EIO;
+ }
+
+ /* Waiting for serdes 0 tx operational... */
+ value = readl(macro->ctrl->regs + lan_offset(HSIO_SD_STAT(idx)));
+ value = HSIO_SD_STAT_TX_STATE_GET(value);
+ if (value != 0x1) {
+ dev_err(macro->ctrl->dev,
+ "Unexpected sd_sd_stat[%u] tx_state was 0x1 but is 0x%x\n",
+ idx, value);
+ return -EIO;
+ }
+
+ lan_rmw(HSIO_SD_CFG_TX_DATA_EN_SET(1) |
+ HSIO_SD_CFG_RX_DATA_EN_SET(1),
+ HSIO_SD_CFG_TX_DATA_EN |
+ HSIO_SD_CFG_RX_DATA_EN,
+ macro->ctrl->regs, HSIO_SD_CFG(idx));
+
+ return 0;
+}
+
+static int lan966x_sd6g40_get_conf_from_mode(struct serdes_macro *macro,
+ enum lan966x_sd6g40_mode f_mode,
+ bool ref125M,
+ struct lan966x_sd6g40_mode_args *ret_val)
+{
+ switch (f_mode) {
+ case LAN966X_SD6G40_MODE_QSGMII:
+ ret_val->lane_10bit_sel = 0;
+ if (ref125M) {
+ ret_val->mpll_multiplier = 40;
+ ret_val->ref_clkdiv2 = 0x1;
+ ret_val->tx_rate = 0x0;
+ ret_val->rx_rate = 0x0;
+ } else {
+ ret_val->mpll_multiplier = 100;
+ ret_val->ref_clkdiv2 = 0x0;
+ ret_val->tx_rate = 0x0;
+ ret_val->rx_rate = 0x0;
+ }
+ break;
+
+ case LAN966X_SD6G40_MODE_SGMII:
+ ret_val->lane_10bit_sel = 1;
+ if (ref125M) {
+ ret_val->mpll_multiplier = macro->speed == SPEED_2500 ? 50 : 40;
+ ret_val->ref_clkdiv2 = 0x1;
+ ret_val->tx_rate = macro->speed == SPEED_2500 ? 0x1 : 0x2;
+ ret_val->rx_rate = macro->speed == SPEED_2500 ? 0x1 : 0x2;
+ } else {
+ ret_val->mpll_multiplier = macro->speed == SPEED_2500 ? 125 : 100;
+ ret_val->ref_clkdiv2 = 0x0;
+ ret_val->tx_rate = macro->speed == SPEED_2500 ? 0x1 : 0x2;
+ ret_val->rx_rate = macro->speed == SPEED_2500 ? 0x1 : 0x2;
+ }
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int lan966x_calc_sd6g40_setup_lane(struct serdes_macro *macro,
+ struct lan966x_sd6g40_setup_args config,
+ struct lan966x_sd6g40_setup *ret_val)
+{
+ struct lan966x_sd6g40_mode_args sd6g40_mode;
+ struct lan966x_sd6g40_mode_args *mode_args = &sd6g40_mode;
+ int ret;
+
+ ret = lan966x_sd6g40_get_conf_from_mode(macro, config.mode,
+ config.refclk125M, mode_args);
+ if (ret)
+ return ret;
+
+ ret_val->lane_10bit_sel = mode_args->lane_10bit_sel;
+ ret_val->rx_rate = mode_args->rx_rate;
+ ret_val->tx_rate = mode_args->tx_rate;
+ ret_val->mpll_multiplier = mode_args->mpll_multiplier;
+ ret_val->ref_clkdiv2 = mode_args->ref_clkdiv2;
+ ret_val->rx_term_en = 0;
+
+ if (config.tx2rx_loop == LAN966X_SD6G40_LTX2RX)
+ ret_val->lane_loopbk_en = 1;
+ else
+ ret_val->lane_loopbk_en = 0;
+
+ ret_val->tx_invert = !!config.txinvert;
+ ret_val->rx_invert = !!config.rxinvert;
+
+ return 0;
+}
+
+static int lan966x_sd6g40_setup_lane(struct serdes_macro *macro,
+ struct lan966x_sd6g40_setup_args config,
+ u32 idx)
+{
+ struct lan966x_sd6g40_setup calc_results = {};
+ int ret;
+
+ ret = lan966x_calc_sd6g40_setup_lane(macro, config, &calc_results);
+ if (ret)
+ return ret;
+
+ return lan966x_sd6g40_reg_cfg(macro, &calc_results, idx);
+}
+
+static int lan966x_sd6g40_setup(struct serdes_macro *macro, u32 idx, int mode)
+{
+ struct lan966x_sd6g40_setup_args conf = {};
+
+ conf.refclk125M = macro->ctrl->ref125;
+
+ if (mode == PHY_INTERFACE_MODE_QSGMII)
+ conf.mode = LAN966X_SD6G40_MODE_QSGMII;
+ else
+ conf.mode = LAN966X_SD6G40_MODE_SGMII;
+
+ return lan966x_sd6g40_setup_lane(macro, conf, idx);
+}
+
+static int serdes_set_mode(struct phy *phy, enum phy_mode mode, int submode)
+{
+ struct serdes_macro *macro = phy_get_drvdata(phy);
+ unsigned int i;
+ int val;
+
+ /* As of now only PHY_MODE_ETHERNET is supported */
+ if (mode != PHY_MODE_ETHERNET)
+ return -EOPNOTSUPP;
+
+ if (submode == PHY_INTERFACE_MODE_2500BASEX)
+ macro->speed = SPEED_2500;
+ else
+ macro->speed = SPEED_1000;
+
+ if (submode == PHY_INTERFACE_MODE_1000BASEX ||
+ submode == PHY_INTERFACE_MODE_2500BASEX)
+ submode = PHY_INTERFACE_MODE_SGMII;
+
+ for (i = 0; i < ARRAY_SIZE(lan966x_serdes_muxes); i++) {
+ if (macro->idx != lan966x_serdes_muxes[i].idx ||
+ mode != lan966x_serdes_muxes[i].mode ||
+ submode != lan966x_serdes_muxes[i].submode ||
+ macro->port != lan966x_serdes_muxes[i].port)
+ continue;
+
+ val = readl(macro->ctrl->regs + lan_offset(HSIO_HW_CFG));
+ val |= lan966x_serdes_muxes[i].mux;
+ lan_rmw(val, lan966x_serdes_muxes[i].mask,
+ macro->ctrl->regs, HSIO_HW_CFG);
+
+ macro->mode = lan966x_serdes_muxes[i].submode;
+
+ if (macro->idx < CU_MAX)
+ return 0;
+
+ if (macro->idx < SERDES6G_MAX)
+ return lan966x_sd6g40_setup(macro,
+ macro->idx - (CU_MAX + 1),
+ macro->mode);
+
+ if (macro->idx < RGMII_MAX)
+ return 0;
+
+ return -EOPNOTSUPP;
+ }
+
+ return -EINVAL;
+}
+
+static const struct phy_ops serdes_ops = {
+ .set_mode = serdes_set_mode,
+ .owner = THIS_MODULE,
+};
+
+static struct phy *serdes_simple_xlate(struct device *dev,
+ struct of_phandle_args *args)
+{
+ struct serdes_ctrl *ctrl = dev_get_drvdata(dev);
+ unsigned int port, idx, i;
+
+ if (args->args_count != 2)
+ return ERR_PTR(-EINVAL);
+
+ port = args->args[0];
+ idx = args->args[1];
+
+ for (i = 0; i < SERDES_MAX; i++) {
+ struct serdes_macro *macro = phy_get_drvdata(ctrl->phys[i]);
+
+ if (idx != macro->idx)
+ continue;
+
+ macro->port = port;
+ return ctrl->phys[i];
+ }
+
+ return ERR_PTR(-ENODEV);
+}
+
+static int serdes_phy_create(struct serdes_ctrl *ctrl, u8 idx, struct phy **phy)
+{
+ struct serdes_macro *macro;
+
+ *phy = devm_phy_create(ctrl->dev, NULL, &serdes_ops);
+ if (IS_ERR(*phy))
+ return PTR_ERR(*phy);
+
+ macro = devm_kzalloc(ctrl->dev, sizeof(*macro), GFP_KERNEL);
+ if (!macro)
+ return -ENOMEM;
+
+ macro->idx = idx;
+ macro->ctrl = ctrl;
+ macro->port = -1;
+
+ phy_set_drvdata(*phy, macro);
+
+ return 0;
+}
+
+static int serdes_probe(struct platform_device *pdev)
+{
+ struct phy_provider *provider;
+ struct serdes_ctrl *ctrl;
+ void __iomem *hw_stat;
+ unsigned int i;
+ u32 val;
+ int ret;
+
+ ctrl = devm_kzalloc(&pdev->dev, sizeof(*ctrl), GFP_KERNEL);
+ if (!ctrl)
+ return -ENOMEM;
+
+ ctrl->dev = &pdev->dev;
+ ctrl->regs = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
+ if (IS_ERR(ctrl->regs))
+ return PTR_ERR(ctrl->regs);
+
+ hw_stat = devm_platform_get_and_ioremap_resource(pdev, 1, NULL);
+ if (IS_ERR(hw_stat))
+ return PTR_ERR(hw_stat);
+
+ for (i = 0; i < SERDES_MAX; i++) {
+ ret = serdes_phy_create(ctrl, i, &ctrl->phys[i]);
+ if (ret)
+ return ret;
+ }
+
+ val = readl(hw_stat);
+ val = FIELD_GET(PLL_CONF_MASK, val);
+ ctrl->ref125 = (val == PLL_CONF_125MHZ ||
+ val == PLL_CONF_SERDES_125MHZ);
+
+ dev_set_drvdata(&pdev->dev, ctrl);
+
+ provider = devm_of_phy_provider_register(ctrl->dev,
+ serdes_simple_xlate);
+
+ return PTR_ERR_OR_ZERO(provider);
+}
+
+static const struct of_device_id serdes_ids[] = {
+ { .compatible = "microchip,lan966x-serdes", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, serdes_ids);
+
+static struct platform_driver mscc_lan966x_serdes = {
+ .probe = serdes_probe,
+ .driver = {
+ .name = "microchip,lan966x-serdes",
+ .of_match_table = of_match_ptr(serdes_ids),
+ },
+};
+
+module_platform_driver(mscc_lan966x_serdes);
+
+MODULE_DESCRIPTION("Microchip lan966x switch serdes driver");
+MODULE_AUTHOR("Horatiu Vultur <horatiu.vultur@microchip.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/microchip/lan966x_serdes_regs.h b/drivers/phy/microchip/lan966x_serdes_regs.h
new file mode 100644
index 000000000000..ea30f64ffd5c
--- /dev/null
+++ b/drivers/phy/microchip/lan966x_serdes_regs.h
@@ -0,0 +1,209 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#ifndef _LAN966X_SERDES_REGS_H_
+#define _LAN966X_SERDES_REGS_H_
+
+#include <linux/bitfield.h>
+#include <linux/types.h>
+#include <linux/bug.h>
+
+enum lan966x_target {
+ TARGET_HSIO = 32,
+ NUM_TARGETS = 66
+};
+
+#define __REG(...) __VA_ARGS__
+
+/* HSIO:SD:SD_CFG */
+#define HSIO_SD_CFG(g) __REG(TARGET_HSIO, 0, 1, 8, g, 3, 32, 0, 0, 1, 4)
+
+#define HSIO_SD_CFG_PHY_RESET BIT(27)
+#define HSIO_SD_CFG_PHY_RESET_SET(x)\
+ FIELD_PREP(HSIO_SD_CFG_PHY_RESET, x)
+#define HSIO_SD_CFG_PHY_RESET_GET(x)\
+ FIELD_GET(HSIO_SD_CFG_PHY_RESET, x)
+
+#define HSIO_SD_CFG_TX_RESET BIT(18)
+#define HSIO_SD_CFG_TX_RESET_SET(x)\
+ FIELD_PREP(HSIO_SD_CFG_TX_RESET, x)
+#define HSIO_SD_CFG_TX_RESET_GET(x)\
+ FIELD_GET(HSIO_SD_CFG_TX_RESET, x)
+
+#define HSIO_SD_CFG_TX_RATE GENMASK(17, 16)
+#define HSIO_SD_CFG_TX_RATE_SET(x)\
+ FIELD_PREP(HSIO_SD_CFG_TX_RATE, x)
+#define HSIO_SD_CFG_TX_RATE_GET(x)\
+ FIELD_GET(HSIO_SD_CFG_TX_RATE, x)
+
+#define HSIO_SD_CFG_TX_INVERT BIT(15)
+#define HSIO_SD_CFG_TX_INVERT_SET(x)\
+ FIELD_PREP(HSIO_SD_CFG_TX_INVERT, x)
+#define HSIO_SD_CFG_TX_INVERT_GET(x)\
+ FIELD_GET(HSIO_SD_CFG_TX_INVERT, x)
+
+#define HSIO_SD_CFG_TX_EN BIT(14)
+#define HSIO_SD_CFG_TX_EN_SET(x)\
+ FIELD_PREP(HSIO_SD_CFG_TX_EN, x)
+#define HSIO_SD_CFG_TX_EN_GET(x)\
+ FIELD_GET(HSIO_SD_CFG_TX_EN, x)
+
+#define HSIO_SD_CFG_TX_DATA_EN BIT(12)
+#define HSIO_SD_CFG_TX_DATA_EN_SET(x)\
+ FIELD_PREP(HSIO_SD_CFG_TX_DATA_EN, x)
+#define HSIO_SD_CFG_TX_DATA_EN_GET(x)\
+ FIELD_GET(HSIO_SD_CFG_TX_DATA_EN, x)
+
+#define HSIO_SD_CFG_TX_CM_EN BIT(11)
+#define HSIO_SD_CFG_TX_CM_EN_SET(x)\
+ FIELD_PREP(HSIO_SD_CFG_TX_CM_EN, x)
+#define HSIO_SD_CFG_TX_CM_EN_GET(x)\
+ FIELD_GET(HSIO_SD_CFG_TX_CM_EN, x)
+
+#define HSIO_SD_CFG_LANE_10BIT_SEL BIT(10)
+#define HSIO_SD_CFG_LANE_10BIT_SEL_SET(x)\
+ FIELD_PREP(HSIO_SD_CFG_LANE_10BIT_SEL, x)
+#define HSIO_SD_CFG_LANE_10BIT_SEL_GET(x)\
+ FIELD_GET(HSIO_SD_CFG_LANE_10BIT_SEL, x)
+
+#define HSIO_SD_CFG_RX_TERM_EN BIT(9)
+#define HSIO_SD_CFG_RX_TERM_EN_SET(x)\
+ FIELD_PREP(HSIO_SD_CFG_RX_TERM_EN, x)
+#define HSIO_SD_CFG_RX_TERM_EN_GET(x)\
+ FIELD_GET(HSIO_SD_CFG_RX_TERM_EN, x)
+
+#define HSIO_SD_CFG_RX_RESET BIT(8)
+#define HSIO_SD_CFG_RX_RESET_SET(x)\
+ FIELD_PREP(HSIO_SD_CFG_RX_RESET, x)
+#define HSIO_SD_CFG_RX_RESET_GET(x)\
+ FIELD_GET(HSIO_SD_CFG_RX_RESET, x)
+
+#define HSIO_SD_CFG_RX_RATE GENMASK(7, 6)
+#define HSIO_SD_CFG_RX_RATE_SET(x)\
+ FIELD_PREP(HSIO_SD_CFG_RX_RATE, x)
+#define HSIO_SD_CFG_RX_RATE_GET(x)\
+ FIELD_GET(HSIO_SD_CFG_RX_RATE, x)
+
+#define HSIO_SD_CFG_RX_PLL_EN BIT(5)
+#define HSIO_SD_CFG_RX_PLL_EN_SET(x)\
+ FIELD_PREP(HSIO_SD_CFG_RX_PLL_EN, x)
+#define HSIO_SD_CFG_RX_PLL_EN_GET(x)\
+ FIELD_GET(HSIO_SD_CFG_RX_PLL_EN, x)
+
+#define HSIO_SD_CFG_RX_INVERT BIT(3)
+#define HSIO_SD_CFG_RX_INVERT_SET(x)\
+ FIELD_PREP(HSIO_SD_CFG_RX_INVERT, x)
+#define HSIO_SD_CFG_RX_INVERT_GET(x)\
+ FIELD_GET(HSIO_SD_CFG_RX_INVERT, x)
+
+#define HSIO_SD_CFG_RX_DATA_EN BIT(2)
+#define HSIO_SD_CFG_RX_DATA_EN_SET(x)\
+ FIELD_PREP(HSIO_SD_CFG_RX_DATA_EN, x)
+#define HSIO_SD_CFG_RX_DATA_EN_GET(x)\
+ FIELD_GET(HSIO_SD_CFG_RX_DATA_EN, x)
+
+#define HSIO_SD_CFG_LANE_LOOPBK_EN BIT(0)
+#define HSIO_SD_CFG_LANE_LOOPBK_EN_SET(x)\
+ FIELD_PREP(HSIO_SD_CFG_LANE_LOOPBK_EN, x)
+#define HSIO_SD_CFG_LANE_LOOPBK_EN_GET(x)\
+ FIELD_GET(HSIO_SD_CFG_LANE_LOOPBK_EN, x)
+
+/* HSIO:SD:MPLL_CFG */
+#define HSIO_MPLL_CFG(g) __REG(TARGET_HSIO, 0, 1, 8, g, 3, 32, 8, 0, 1, 4)
+
+#define HSIO_MPLL_CFG_REF_SSP_EN BIT(18)
+#define HSIO_MPLL_CFG_REF_SSP_EN_SET(x)\
+ FIELD_PREP(HSIO_MPLL_CFG_REF_SSP_EN, x)
+#define HSIO_MPLL_CFG_REF_SSP_EN_GET(x)\
+ FIELD_GET(HSIO_MPLL_CFG_REF_SSP_EN, x)
+
+#define HSIO_MPLL_CFG_REF_CLKDIV2 BIT(17)
+#define HSIO_MPLL_CFG_REF_CLKDIV2_SET(x)\
+ FIELD_PREP(HSIO_MPLL_CFG_REF_CLKDIV2, x)
+#define HSIO_MPLL_CFG_REF_CLKDIV2_GET(x)\
+ FIELD_GET(HSIO_MPLL_CFG_REF_CLKDIV2, x)
+
+#define HSIO_MPLL_CFG_MPLL_EN BIT(16)
+#define HSIO_MPLL_CFG_MPLL_EN_SET(x)\
+ FIELD_PREP(HSIO_MPLL_CFG_MPLL_EN, x)
+#define HSIO_MPLL_CFG_MPLL_EN_GET(x)\
+ FIELD_GET(HSIO_MPLL_CFG_MPLL_EN, x)
+
+#define HSIO_MPLL_CFG_MPLL_MULTIPLIER GENMASK(6, 0)
+#define HSIO_MPLL_CFG_MPLL_MULTIPLIER_SET(x)\
+ FIELD_PREP(HSIO_MPLL_CFG_MPLL_MULTIPLIER, x)
+#define HSIO_MPLL_CFG_MPLL_MULTIPLIER_GET(x)\
+ FIELD_GET(HSIO_MPLL_CFG_MPLL_MULTIPLIER, x)
+
+/* HSIO:SD:SD_STAT */
+#define HSIO_SD_STAT(g) __REG(TARGET_HSIO, 0, 1, 8, g, 3, 32, 12, 0, 1, 4)
+
+#define HSIO_SD_STAT_MPLL_STATE BIT(6)
+#define HSIO_SD_STAT_MPLL_STATE_SET(x)\
+ FIELD_PREP(HSIO_SD_STAT_MPLL_STATE, x)
+#define HSIO_SD_STAT_MPLL_STATE_GET(x)\
+ FIELD_GET(HSIO_SD_STAT_MPLL_STATE, x)
+
+#define HSIO_SD_STAT_TX_STATE BIT(5)
+#define HSIO_SD_STAT_TX_STATE_SET(x)\
+ FIELD_PREP(HSIO_SD_STAT_TX_STATE, x)
+#define HSIO_SD_STAT_TX_STATE_GET(x)\
+ FIELD_GET(HSIO_SD_STAT_TX_STATE, x)
+
+#define HSIO_SD_STAT_TX_CM_STATE BIT(2)
+#define HSIO_SD_STAT_TX_CM_STATE_SET(x)\
+ FIELD_PREP(HSIO_SD_STAT_TX_CM_STATE, x)
+#define HSIO_SD_STAT_TX_CM_STATE_GET(x)\
+ FIELD_GET(HSIO_SD_STAT_TX_CM_STATE, x)
+
+#define HSIO_SD_STAT_RX_PLL_STATE BIT(0)
+#define HSIO_SD_STAT_RX_PLL_STATE_SET(x)\
+ FIELD_PREP(HSIO_SD_STAT_RX_PLL_STATE, x)
+#define HSIO_SD_STAT_RX_PLL_STATE_GET(x)\
+ FIELD_GET(HSIO_SD_STAT_RX_PLL_STATE, x)
+
+/* HSIO:HW_CFGSTAT:HW_CFG */
+#define HSIO_HW_CFG __REG(TARGET_HSIO, 0, 1, 104, 0, 1, 52, 0, 0, 1, 4)
+
+#define HSIO_HW_CFG_RGMII_1_CFG BIT(15)
+#define HSIO_HW_CFG_RGMII_1_CFG_SET(x)\
+ (((x) << 15) & GENMASK(15, 15))
+#define HSIO_HW_CFG_RGMII_1_CFG_GET(x)\
+ FIELD_GET(HSIO_HW_CFG_RGMII_1_CFG, x)
+
+#define HSIO_HW_CFG_RGMII_0_CFG BIT(14)
+#define HSIO_HW_CFG_RGMII_0_CFG_SET(x)\
+ (((x) << 14) & GENMASK(14, 14))
+#define HSIO_HW_CFG_RGMII_0_CFG_GET(x)\
+ FIELD_GET(HSIO_HW_CFG_RGMII_0_CFG, x)
+
+#define HSIO_HW_CFG_RGMII_ENA GENMASK(13, 12)
+#define HSIO_HW_CFG_RGMII_ENA_SET(x)\
+ (((x) << 12) & GENMASK(13, 12))
+#define HSIO_HW_CFG_RGMII_ENA_GET(x)\
+ FIELD_GET(HSIO_HW_CFG_RGMII_ENA, x)
+
+#define HSIO_HW_CFG_SD6G_0_CFG BIT(11)
+#define HSIO_HW_CFG_SD6G_0_CFG_SET(x)\
+ (((x) << 11) & GENMASK(11, 11))
+#define HSIO_HW_CFG_SD6G_0_CFG_GET(x)\
+ FIELD_GET(HSIO_HW_CFG_SD6G_0_CFG, x)
+
+#define HSIO_HW_CFG_SD6G_1_CFG BIT(10)
+#define HSIO_HW_CFG_SD6G_1_CFG_SET(x)\
+ (((x) << 10) & GENMASK(10, 10))
+#define HSIO_HW_CFG_SD6G_1_CFG_GET(x)\
+ FIELD_GET(HSIO_HW_CFG_SD6G_1_CFG, x)
+
+#define HSIO_HW_CFG_GMII_ENA GENMASK(9, 2)
+#define HSIO_HW_CFG_GMII_ENA_SET(x)\
+ (((x) << 2) & GENMASK(9, 2))
+#define HSIO_HW_CFG_GMII_ENA_GET(x)\
+ FIELD_GET(HSIO_HW_CFG_GMII_ENA, x)
+
+#define HSIO_HW_CFG_QSGMII_ENA GENMASK(1, 0)
+#define HSIO_HW_CFG_QSGMII_ENA_SET(x)\
+ ((x) & GENMASK(1, 0))
+#define HSIO_HW_CFG_QSGMII_ENA_GET(x)\
+ FIELD_GET(HSIO_HW_CFG_QSGMII_ENA, x)
+
+#endif /* _LAN966X_HSIO_REGS_H_ */
diff --git a/drivers/phy/phy-can-transceiver.c b/drivers/phy/phy-can-transceiver.c
index c2cb93b4df71..6f3fe37dee0e 100644
--- a/drivers/phy/phy-can-transceiver.c
+++ b/drivers/phy/phy-can-transceiver.c
@@ -110,14 +110,14 @@ static int can_transceiver_phy_probe(struct platform_device *pdev)
can_transceiver_phy->generic_phy = phy;
if (drvdata->flags & CAN_TRANSCEIVER_STB_PRESENT) {
- standby_gpio = devm_gpiod_get(dev, "standby", GPIOD_OUT_HIGH);
+ standby_gpio = devm_gpiod_get_optional(dev, "standby", GPIOD_OUT_HIGH);
if (IS_ERR(standby_gpio))
return PTR_ERR(standby_gpio);
can_transceiver_phy->standby_gpio = standby_gpio;
}
if (drvdata->flags & CAN_TRANSCEIVER_EN_PRESENT) {
- enable_gpio = devm_gpiod_get(dev, "enable", GPIOD_OUT_LOW);
+ enable_gpio = devm_gpiod_get_optional(dev, "enable", GPIOD_OUT_LOW);
if (IS_ERR(enable_gpio))
return PTR_ERR(enable_gpio);
can_transceiver_phy->enable_gpio = enable_gpio;
diff --git a/drivers/phy/phy-core-mipi-dphy.c b/drivers/phy/phy-core-mipi-dphy.c
index 288c9c67aa74..ccb4045685cd 100644
--- a/drivers/phy/phy-core-mipi-dphy.c
+++ b/drivers/phy/phy-core-mipi-dphy.c
@@ -36,7 +36,7 @@ int phy_mipi_dphy_get_default_config(unsigned long pixel_clock,
cfg->clk_miss = 0;
cfg->clk_post = 60000 + 52 * ui;
- cfg->clk_pre = 8000;
+ cfg->clk_pre = 8;
cfg->clk_prepare = 38000;
cfg->clk_settle = 95000;
cfg->clk_term_en = 0;
@@ -97,7 +97,7 @@ int phy_mipi_dphy_config_validate(struct phy_configure_opts_mipi_dphy *cfg)
if (cfg->clk_post < (60000 + 52 * ui))
return -EINVAL;
- if (cfg->clk_pre < 8000)
+ if (cfg->clk_pre < 8)
return -EINVAL;
if (cfg->clk_prepare < 38000 || cfg->clk_prepare > 95000)
diff --git a/drivers/phy/qualcomm/Kconfig b/drivers/phy/qualcomm/Kconfig
index 7f6fcb8ec5ba..5c98850f5a36 100644
--- a/drivers/phy/qualcomm/Kconfig
+++ b/drivers/phy/qualcomm/Kconfig
@@ -18,6 +18,16 @@ config PHY_QCOM_APQ8064_SATA
depends on OF
select GENERIC_PHY
+config PHY_QCOM_EDP
+ tristate "Qualcomm eDP PHY driver"
+ depends on ARCH_QCOM || COMPILE_TEST
+ depends on OF
+ depends on COMMON_CLK
+ select GENERIC_PHY
+ help
+ Enable this driver to support the Qualcomm eDP PHY found in various
+ Qualcomm chipsets.
+
config PHY_QCOM_IPQ4019_USB
tristate "Qualcomm IPQ4019 USB PHY driver"
depends on OF && (ARCH_QCOM || COMPILE_TEST)
diff --git a/drivers/phy/qualcomm/Makefile b/drivers/phy/qualcomm/Makefile
index 47acbd7daa3a..e9e3b1a4dbb0 100644
--- a/drivers/phy/qualcomm/Makefile
+++ b/drivers/phy/qualcomm/Makefile
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_PHY_ATH79_USB) += phy-ath79-usb.o
obj-$(CONFIG_PHY_QCOM_APQ8064_SATA) += phy-qcom-apq8064-sata.o
+obj-$(CONFIG_PHY_QCOM_EDP) += phy-qcom-edp.o
obj-$(CONFIG_PHY_QCOM_IPQ4019_USB) += phy-qcom-ipq4019-usb.o
obj-$(CONFIG_PHY_QCOM_IPQ806X_SATA) += phy-qcom-ipq806x-sata.o
obj-$(CONFIG_PHY_QCOM_PCIE2) += phy-qcom-pcie2.o
diff --git a/drivers/phy/qualcomm/phy-qcom-edp.c b/drivers/phy/qualcomm/phy-qcom-edp.c
new file mode 100644
index 000000000000..a8ecd2e8442d
--- /dev/null
+++ b/drivers/phy/qualcomm/phy-qcom-edp.c
@@ -0,0 +1,674 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017, 2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021, Linaro Ltd.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
+#include <linux/slab.h>
+
+#include <dt-bindings/phy/phy.h>
+
+#include "phy-qcom-qmp.h"
+
+/* EDP_PHY registers */
+#define DP_PHY_CFG 0x0010
+#define DP_PHY_CFG_1 0x0014
+#define DP_PHY_PD_CTL 0x001c
+#define DP_PHY_MODE 0x0020
+
+#define DP_PHY_AUX_CFG0 0x0024
+#define DP_PHY_AUX_CFG1 0x0028
+#define DP_PHY_AUX_CFG2 0x002C
+#define DP_PHY_AUX_CFG3 0x0030
+#define DP_PHY_AUX_CFG4 0x0034
+#define DP_PHY_AUX_CFG5 0x0038
+#define DP_PHY_AUX_CFG6 0x003C
+#define DP_PHY_AUX_CFG7 0x0040
+#define DP_PHY_AUX_CFG8 0x0044
+#define DP_PHY_AUX_CFG9 0x0048
+
+#define DP_PHY_AUX_INTERRUPT_MASK 0x0058
+
+#define DP_PHY_VCO_DIV 0x0074
+#define DP_PHY_TX0_TX1_LANE_CTL 0x007c
+#define DP_PHY_TX2_TX3_LANE_CTL 0x00a0
+
+#define DP_PHY_STATUS 0x00e0
+
+/* LANE_TXn registers */
+#define TXn_CLKBUF_ENABLE 0x0000
+#define TXn_TX_EMP_POST1_LVL 0x0004
+
+#define TXn_TX_DRV_LVL 0x0014
+#define TXn_TX_DRV_LVL_OFFSET 0x0018
+#define TXn_RESET_TSYNC_EN 0x001c
+#define TXn_LDO_CONFIG 0x0084
+#define TXn_TX_BAND 0x0028
+
+#define TXn_RES_CODE_LANE_OFFSET_TX0 0x0044
+#define TXn_RES_CODE_LANE_OFFSET_TX1 0x0048
+
+#define TXn_TRANSCEIVER_BIAS_EN 0x0054
+#define TXn_HIGHZ_DRVR_EN 0x0058
+#define TXn_TX_POL_INV 0x005c
+#define TXn_LANE_MODE_1 0x0064
+
+#define TXn_TRAN_DRVR_EMP_EN 0x0078
+
+struct qcom_edp {
+ struct device *dev;
+
+ struct phy *phy;
+
+ void __iomem *edp;
+ void __iomem *tx0;
+ void __iomem *tx1;
+ void __iomem *pll;
+
+ struct clk_hw dp_link_hw;
+ struct clk_hw dp_pixel_hw;
+
+ struct phy_configure_opts_dp dp_opts;
+
+ struct clk_bulk_data clks[2];
+ struct regulator_bulk_data supplies[2];
+};
+
+static int qcom_edp_phy_init(struct phy *phy)
+{
+ struct qcom_edp *edp = phy_get_drvdata(phy);
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(edp->supplies), edp->supplies);
+ if (ret)
+ return ret;
+
+ ret = clk_bulk_prepare_enable(ARRAY_SIZE(edp->clks), edp->clks);
+ if (ret)
+ goto out_disable_supplies;
+
+ writel(DP_PHY_PD_CTL_PWRDN | DP_PHY_PD_CTL_AUX_PWRDN |
+ DP_PHY_PD_CTL_PLL_PWRDN | DP_PHY_PD_CTL_DP_CLAMP_EN,
+ edp->edp + DP_PHY_PD_CTL);
+
+ /* Turn on BIAS current for PHY/PLL */
+ writel(0x17, edp->pll + QSERDES_V4_COM_BIAS_EN_CLKBUFLR_EN);
+
+ writel(DP_PHY_PD_CTL_PSR_PWRDN, edp->edp + DP_PHY_PD_CTL);
+ msleep(20);
+
+ writel(DP_PHY_PD_CTL_PWRDN | DP_PHY_PD_CTL_AUX_PWRDN |
+ DP_PHY_PD_CTL_LANE_0_1_PWRDN | DP_PHY_PD_CTL_LANE_2_3_PWRDN |
+ DP_PHY_PD_CTL_PLL_PWRDN | DP_PHY_PD_CTL_DP_CLAMP_EN,
+ edp->edp + DP_PHY_PD_CTL);
+
+ writel(0x00, edp->edp + DP_PHY_AUX_CFG0);
+ writel(0x13, edp->edp + DP_PHY_AUX_CFG1);
+ writel(0x24, edp->edp + DP_PHY_AUX_CFG2);
+ writel(0x00, edp->edp + DP_PHY_AUX_CFG3);
+ writel(0x0a, edp->edp + DP_PHY_AUX_CFG4);
+ writel(0x26, edp->edp + DP_PHY_AUX_CFG5);
+ writel(0x0a, edp->edp + DP_PHY_AUX_CFG6);
+ writel(0x03, edp->edp + DP_PHY_AUX_CFG7);
+ writel(0x37, edp->edp + DP_PHY_AUX_CFG8);
+ writel(0x03, edp->edp + DP_PHY_AUX_CFG9);
+
+ writel(PHY_AUX_STOP_ERR_MASK | PHY_AUX_DEC_ERR_MASK |
+ PHY_AUX_SYNC_ERR_MASK | PHY_AUX_ALIGN_ERR_MASK |
+ PHY_AUX_REQ_ERR_MASK, edp->edp + DP_PHY_AUX_INTERRUPT_MASK);
+
+ msleep(20);
+
+ return 0;
+
+out_disable_supplies:
+ regulator_bulk_disable(ARRAY_SIZE(edp->supplies), edp->supplies);
+
+ return ret;
+}
+
+static int qcom_edp_phy_configure(struct phy *phy, union phy_configure_opts *opts)
+{
+ const struct phy_configure_opts_dp *dp_opts = &opts->dp;
+ struct qcom_edp *edp = phy_get_drvdata(phy);
+
+ memcpy(&edp->dp_opts, dp_opts, sizeof(*dp_opts));
+
+ return 0;
+}
+
+static int qcom_edp_configure_ssc(const struct qcom_edp *edp)
+{
+ const struct phy_configure_opts_dp *dp_opts = &edp->dp_opts;
+ u32 step1;
+ u32 step2;
+
+ switch (dp_opts->link_rate) {
+ case 1620:
+ case 2700:
+ case 8100:
+ step1 = 0x45;
+ step2 = 0x06;
+ break;
+
+ case 5400:
+ step1 = 0x5c;
+ step2 = 0x08;
+ break;
+
+ default:
+ /* Other link rates aren't supported */
+ return -EINVAL;
+ }
+
+ writel(0x01, edp->pll + QSERDES_V4_COM_SSC_EN_CENTER);
+ writel(0x00, edp->pll + QSERDES_V4_COM_SSC_ADJ_PER1);
+ writel(0x36, edp->pll + QSERDES_V4_COM_SSC_PER1);
+ writel(0x01, edp->pll + QSERDES_V4_COM_SSC_PER2);
+ writel(step1, edp->pll + QSERDES_V4_COM_SSC_STEP_SIZE1_MODE0);
+ writel(step2, edp->pll + QSERDES_V4_COM_SSC_STEP_SIZE2_MODE0);
+
+ return 0;
+}
+
+static int qcom_edp_configure_pll(const struct qcom_edp *edp)
+{
+ const struct phy_configure_opts_dp *dp_opts = &edp->dp_opts;
+ u32 div_frac_start2_mode0;
+ u32 div_frac_start3_mode0;
+ u32 dec_start_mode0;
+ u32 lock_cmp1_mode0;
+ u32 lock_cmp2_mode0;
+ u32 hsclk_sel;
+
+ switch (dp_opts->link_rate) {
+ case 1620:
+ hsclk_sel = 0x5;
+ dec_start_mode0 = 0x69;
+ div_frac_start2_mode0 = 0x80;
+ div_frac_start3_mode0 = 0x07;
+ lock_cmp1_mode0 = 0x6f;
+ lock_cmp2_mode0 = 0x08;
+ break;
+
+ case 2700:
+ hsclk_sel = 0x3;
+ dec_start_mode0 = 0x69;
+ div_frac_start2_mode0 = 0x80;
+ div_frac_start3_mode0 = 0x07;
+ lock_cmp1_mode0 = 0x0f;
+ lock_cmp2_mode0 = 0x0e;
+ break;
+
+ case 5400:
+ hsclk_sel = 0x1;
+ dec_start_mode0 = 0x8c;
+ div_frac_start2_mode0 = 0x00;
+ div_frac_start3_mode0 = 0x0a;
+ lock_cmp1_mode0 = 0x1f;
+ lock_cmp2_mode0 = 0x1c;
+ break;
+
+ case 8100:
+ hsclk_sel = 0x0;
+ dec_start_mode0 = 0x69;
+ div_frac_start2_mode0 = 0x80;
+ div_frac_start3_mode0 = 0x07;
+ lock_cmp1_mode0 = 0x2f;
+ lock_cmp2_mode0 = 0x2a;
+ break;
+
+ default:
+ /* Other link rates aren't supported */
+ return -EINVAL;
+ }
+
+ writel(0x01, edp->pll + QSERDES_V4_COM_SVS_MODE_CLK_SEL);
+ writel(0x0b, edp->pll + QSERDES_V4_COM_SYSCLK_EN_SEL);
+ writel(0x02, edp->pll + QSERDES_V4_COM_SYS_CLK_CTRL);
+ writel(0x0c, edp->pll + QSERDES_V4_COM_CLK_ENABLE1);
+ writel(0x06, edp->pll + QSERDES_V4_COM_SYSCLK_BUF_ENABLE);
+ writel(0x30, edp->pll + QSERDES_V4_COM_CLK_SELECT);
+ writel(hsclk_sel, edp->pll + QSERDES_V4_COM_HSCLK_SEL);
+ writel(0x0f, edp->pll + QSERDES_V4_COM_PLL_IVCO);
+ writel(0x08, edp->pll + QSERDES_V4_COM_LOCK_CMP_EN);
+ writel(0x36, edp->pll + QSERDES_V4_COM_PLL_CCTRL_MODE0);
+ writel(0x16, edp->pll + QSERDES_V4_COM_PLL_RCTRL_MODE0);
+ writel(0x06, edp->pll + QSERDES_V4_COM_CP_CTRL_MODE0);
+ writel(dec_start_mode0, edp->pll + QSERDES_V4_COM_DEC_START_MODE0);
+ writel(0x00, edp->pll + QSERDES_V4_COM_DIV_FRAC_START1_MODE0);
+ writel(div_frac_start2_mode0, edp->pll + QSERDES_V4_COM_DIV_FRAC_START2_MODE0);
+ writel(div_frac_start3_mode0, edp->pll + QSERDES_V4_COM_DIV_FRAC_START3_MODE0);
+ writel(0x02, edp->pll + QSERDES_V4_COM_CMN_CONFIG);
+ writel(0x3f, edp->pll + QSERDES_V4_COM_INTEGLOOP_GAIN0_MODE0);
+ writel(0x00, edp->pll + QSERDES_V4_COM_INTEGLOOP_GAIN1_MODE0);
+ writel(0x00, edp->pll + QSERDES_V4_COM_VCO_TUNE_MAP);
+ writel(lock_cmp1_mode0, edp->pll + QSERDES_V4_COM_LOCK_CMP1_MODE0);
+ writel(lock_cmp2_mode0, edp->pll + QSERDES_V4_COM_LOCK_CMP2_MODE0);
+
+ writel(0x0a, edp->pll + QSERDES_V4_COM_BG_TIMER);
+ writel(0x14, edp->pll + QSERDES_V4_COM_CORECLK_DIV_MODE0);
+ writel(0x00, edp->pll + QSERDES_V4_COM_VCO_TUNE_CTRL);
+ writel(0x17, edp->pll + QSERDES_V4_COM_BIAS_EN_CLKBUFLR_EN);
+ writel(0x0f, edp->pll + QSERDES_V4_COM_CORE_CLK_EN);
+ writel(0xa0, edp->pll + QSERDES_V4_COM_VCO_TUNE1_MODE0);
+ writel(0x03, edp->pll + QSERDES_V4_COM_VCO_TUNE2_MODE0);
+
+ return 0;
+}
+
+static int qcom_edp_set_vco_div(const struct qcom_edp *edp)
+{
+ const struct phy_configure_opts_dp *dp_opts = &edp->dp_opts;
+ unsigned long pixel_freq;
+ u32 vco_div;
+
+ switch (dp_opts->link_rate) {
+ case 1620:
+ vco_div = 0x1;
+ pixel_freq = 1620000000UL / 2;
+ break;
+
+ case 2700:
+ vco_div = 0x1;
+ pixel_freq = 2700000000UL / 2;
+ break;
+
+ case 5400:
+ vco_div = 0x2;
+ pixel_freq = 5400000000UL / 4;
+ break;
+
+ case 8100:
+ vco_div = 0x0;
+ pixel_freq = 8100000000UL / 6;
+ break;
+
+ default:
+ /* Other link rates aren't supported */
+ return -EINVAL;
+ }
+
+ writel(vco_div, edp->edp + DP_PHY_VCO_DIV);
+
+ clk_set_rate(edp->dp_link_hw.clk, dp_opts->link_rate * 100000);
+ clk_set_rate(edp->dp_pixel_hw.clk, pixel_freq);
+
+ return 0;
+}
+
+static int qcom_edp_phy_power_on(struct phy *phy)
+{
+ const struct qcom_edp *edp = phy_get_drvdata(phy);
+ int timeout;
+ int ret;
+ u32 val;
+
+ writel(DP_PHY_PD_CTL_PWRDN | DP_PHY_PD_CTL_AUX_PWRDN |
+ DP_PHY_PD_CTL_LANE_0_1_PWRDN | DP_PHY_PD_CTL_LANE_2_3_PWRDN |
+ DP_PHY_PD_CTL_PLL_PWRDN | DP_PHY_PD_CTL_DP_CLAMP_EN,
+ edp->edp + DP_PHY_PD_CTL);
+ writel(0xfc, edp->edp + DP_PHY_MODE);
+
+ timeout = readl_poll_timeout(edp->pll + QSERDES_V4_COM_CMN_STATUS,
+ val, val & BIT(7), 5, 200);
+ if (timeout)
+ return timeout;
+
+ writel(0x01, edp->tx0 + TXn_LDO_CONFIG);
+ writel(0x01, edp->tx1 + TXn_LDO_CONFIG);
+ writel(0x00, edp->tx0 + TXn_LANE_MODE_1);
+ writel(0x00, edp->tx1 + TXn_LANE_MODE_1);
+
+ ret = qcom_edp_configure_ssc(edp);
+ if (ret)
+ return ret;
+
+ ret = qcom_edp_configure_pll(edp);
+ if (ret)
+ return ret;
+
+ /* TX Lane configuration */
+ writel(0x05, edp->edp + DP_PHY_TX0_TX1_LANE_CTL);
+ writel(0x05, edp->edp + DP_PHY_TX2_TX3_LANE_CTL);
+
+ /* TX-0 register configuration */
+ writel(0x03, edp->tx0 + TXn_TRANSCEIVER_BIAS_EN);
+ writel(0x0f, edp->tx0 + TXn_CLKBUF_ENABLE);
+ writel(0x03, edp->tx0 + TXn_RESET_TSYNC_EN);
+ writel(0x01, edp->tx0 + TXn_TRAN_DRVR_EMP_EN);
+ writel(0x04, edp->tx0 + TXn_TX_BAND);
+
+ /* TX-1 register configuration */
+ writel(0x03, edp->tx1 + TXn_TRANSCEIVER_BIAS_EN);
+ writel(0x0f, edp->tx1 + TXn_CLKBUF_ENABLE);
+ writel(0x03, edp->tx1 + TXn_RESET_TSYNC_EN);
+ writel(0x01, edp->tx1 + TXn_TRAN_DRVR_EMP_EN);
+ writel(0x04, edp->tx1 + TXn_TX_BAND);
+
+ ret = qcom_edp_set_vco_div(edp);
+ if (ret)
+ return ret;
+
+ writel(0x01, edp->edp + DP_PHY_CFG);
+ writel(0x05, edp->edp + DP_PHY_CFG);
+ writel(0x01, edp->edp + DP_PHY_CFG);
+ writel(0x09, edp->edp + DP_PHY_CFG);
+
+ writel(0x20, edp->pll + QSERDES_V4_COM_RESETSM_CNTRL);
+
+ timeout = readl_poll_timeout(edp->pll + QSERDES_V4_COM_C_READY_STATUS,
+ val, val & BIT(0), 500, 10000);
+ if (timeout)
+ return timeout;
+
+ writel(0x19, edp->edp + DP_PHY_CFG);
+ writel(0x1f, edp->tx0 + TXn_HIGHZ_DRVR_EN);
+ writel(0x04, edp->tx0 + TXn_HIGHZ_DRVR_EN);
+ writel(0x00, edp->tx0 + TXn_TX_POL_INV);
+ writel(0x1f, edp->tx1 + TXn_HIGHZ_DRVR_EN);
+ writel(0x04, edp->tx1 + TXn_HIGHZ_DRVR_EN);
+ writel(0x00, edp->tx1 + TXn_TX_POL_INV);
+ writel(0x10, edp->tx0 + TXn_TX_DRV_LVL_OFFSET);
+ writel(0x10, edp->tx1 + TXn_TX_DRV_LVL_OFFSET);
+ writel(0x11, edp->tx0 + TXn_RES_CODE_LANE_OFFSET_TX0);
+ writel(0x11, edp->tx0 + TXn_RES_CODE_LANE_OFFSET_TX1);
+ writel(0x11, edp->tx1 + TXn_RES_CODE_LANE_OFFSET_TX0);
+ writel(0x11, edp->tx1 + TXn_RES_CODE_LANE_OFFSET_TX1);
+
+ writel(0x10, edp->tx0 + TXn_TX_EMP_POST1_LVL);
+ writel(0x10, edp->tx1 + TXn_TX_EMP_POST1_LVL);
+ writel(0x1f, edp->tx0 + TXn_TX_DRV_LVL);
+ writel(0x1f, edp->tx1 + TXn_TX_DRV_LVL);
+
+ writel(0x4, edp->tx0 + TXn_HIGHZ_DRVR_EN);
+ writel(0x3, edp->tx0 + TXn_TRANSCEIVER_BIAS_EN);
+ writel(0x4, edp->tx1 + TXn_HIGHZ_DRVR_EN);
+ writel(0x0, edp->tx1 + TXn_TRANSCEIVER_BIAS_EN);
+ writel(0x3, edp->edp + DP_PHY_CFG_1);
+
+ writel(0x18, edp->edp + DP_PHY_CFG);
+ usleep_range(100, 1000);
+
+ writel(0x19, edp->edp + DP_PHY_CFG);
+
+ return readl_poll_timeout(edp->edp + DP_PHY_STATUS,
+ val, val & BIT(1), 500, 10000);
+}
+
+static int qcom_edp_phy_power_off(struct phy *phy)
+{
+ const struct qcom_edp *edp = phy_get_drvdata(phy);
+
+ writel(DP_PHY_PD_CTL_PSR_PWRDN, edp->edp + DP_PHY_PD_CTL);
+
+ return 0;
+}
+
+static int qcom_edp_phy_exit(struct phy *phy)
+{
+ struct qcom_edp *edp = phy_get_drvdata(phy);
+
+ clk_bulk_disable_unprepare(ARRAY_SIZE(edp->clks), edp->clks);
+ regulator_bulk_disable(ARRAY_SIZE(edp->supplies), edp->supplies);
+
+ return 0;
+}
+
+static const struct phy_ops qcom_edp_ops = {
+ .init = qcom_edp_phy_init,
+ .configure = qcom_edp_phy_configure,
+ .power_on = qcom_edp_phy_power_on,
+ .power_off = qcom_edp_phy_power_off,
+ .exit = qcom_edp_phy_exit,
+ .owner = THIS_MODULE,
+};
+
+/*
+ * Embedded Display Port PLL driver block diagram for branch clocks
+ *
+ * +------------------------------+
+ * | EDP_VCO_CLK |
+ * | |
+ * | +-------------------+ |
+ * | | (EDP PLL/VCO) | |
+ * | +---------+---------+ |
+ * | v |
+ * | +----------+-----------+ |
+ * | | hsclk_divsel_clk_src | |
+ * | +----------+-----------+ |
+ * +------------------------------+
+ * |
+ * +---------<---------v------------>----------+
+ * | |
+ * +--------v----------------+ |
+ * | edp_phy_pll_link_clk | |
+ * | link_clk | |
+ * +--------+----------------+ |
+ * | |
+ * | |
+ * v v
+ * Input to DISPCC block |
+ * for link clk, crypto clk |
+ * and interface clock |
+ * |
+ * |
+ * +--------<------------+-----------------+---<---+
+ * | | |
+ * +----v---------+ +--------v-----+ +--------v------+
+ * | vco_divided | | vco_divided | | vco_divided |
+ * | _clk_src | | _clk_src | | _clk_src |
+ * | | | | | |
+ * |divsel_six | | divsel_two | | divsel_four |
+ * +-------+------+ +-----+--------+ +--------+------+
+ * | | |
+ * v---->----------v-------------<------v
+ * |
+ * +----------+-----------------+
+ * | edp_phy_pll_vco_div_clk |
+ * +---------+------------------+
+ * |
+ * v
+ * Input to DISPCC block
+ * for EDP pixel clock
+ *
+ */
+static int qcom_edp_dp_pixel_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ switch (req->rate) {
+ case 1620000000UL / 2:
+ case 2700000000UL / 2:
+ /* 5.4 and 8.1 GHz are same link rate as 2.7GHz, i.e. div 4 and div 6 */
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static unsigned long
+qcom_edp_dp_pixel_clk_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+{
+ const struct qcom_edp *edp = container_of(hw, struct qcom_edp, dp_pixel_hw);
+ const struct phy_configure_opts_dp *dp_opts = &edp->dp_opts;
+
+ switch (dp_opts->link_rate) {
+ case 1620:
+ return 1620000000UL / 2;
+ case 2700:
+ return 2700000000UL / 2;
+ case 5400:
+ return 5400000000UL / 4;
+ case 8100:
+ return 8100000000UL / 6;
+ default:
+ return 0;
+ }
+}
+
+static const struct clk_ops qcom_edp_dp_pixel_clk_ops = {
+ .determine_rate = qcom_edp_dp_pixel_clk_determine_rate,
+ .recalc_rate = qcom_edp_dp_pixel_clk_recalc_rate,
+};
+
+static int qcom_edp_dp_link_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ switch (req->rate) {
+ case 162000000:
+ case 270000000:
+ case 540000000:
+ case 810000000:
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static unsigned long
+qcom_edp_dp_link_clk_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+{
+ const struct qcom_edp *edp = container_of(hw, struct qcom_edp, dp_link_hw);
+ const struct phy_configure_opts_dp *dp_opts = &edp->dp_opts;
+
+ switch (dp_opts->link_rate) {
+ case 1620:
+ case 2700:
+ case 5400:
+ case 8100:
+ return dp_opts->link_rate * 100000;
+
+ default:
+ return 0;
+ }
+}
+
+static const struct clk_ops qcom_edp_dp_link_clk_ops = {
+ .determine_rate = qcom_edp_dp_link_clk_determine_rate,
+ .recalc_rate = qcom_edp_dp_link_clk_recalc_rate,
+};
+
+static int qcom_edp_clks_register(struct qcom_edp *edp, struct device_node *np)
+{
+ struct clk_hw_onecell_data *data;
+ struct clk_init_data init = { };
+ int ret;
+
+ data = devm_kzalloc(edp->dev, struct_size(data, hws, 2), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ init.ops = &qcom_edp_dp_link_clk_ops;
+ init.name = "edp_phy_pll_link_clk";
+ edp->dp_link_hw.init = &init;
+ ret = devm_clk_hw_register(edp->dev, &edp->dp_link_hw);
+ if (ret)
+ return ret;
+
+ init.ops = &qcom_edp_dp_pixel_clk_ops;
+ init.name = "edp_phy_pll_vco_div_clk";
+ edp->dp_pixel_hw.init = &init;
+ ret = devm_clk_hw_register(edp->dev, &edp->dp_pixel_hw);
+ if (ret)
+ return ret;
+
+ data->hws[0] = &edp->dp_link_hw;
+ data->hws[1] = &edp->dp_pixel_hw;
+ data->num = 2;
+
+ return devm_of_clk_add_hw_provider(edp->dev, of_clk_hw_onecell_get, data);
+}
+
+static int qcom_edp_phy_probe(struct platform_device *pdev)
+{
+ struct phy_provider *phy_provider;
+ struct device *dev = &pdev->dev;
+ struct qcom_edp *edp;
+ int ret;
+
+ edp = devm_kzalloc(dev, sizeof(*edp), GFP_KERNEL);
+ if (!edp)
+ return -ENOMEM;
+
+ edp->dev = dev;
+
+ edp->edp = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(edp->edp))
+ return PTR_ERR(edp->edp);
+
+ edp->tx0 = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(edp->tx0))
+ return PTR_ERR(edp->tx0);
+
+ edp->tx1 = devm_platform_ioremap_resource(pdev, 2);
+ if (IS_ERR(edp->tx1))
+ return PTR_ERR(edp->tx1);
+
+ edp->pll = devm_platform_ioremap_resource(pdev, 3);
+ if (IS_ERR(edp->pll))
+ return PTR_ERR(edp->pll);
+
+ edp->clks[0].id = "aux";
+ edp->clks[1].id = "cfg_ahb";
+ ret = devm_clk_bulk_get(dev, ARRAY_SIZE(edp->clks), edp->clks);
+ if (ret)
+ return ret;
+
+ edp->supplies[0].supply = "vdda-phy";
+ edp->supplies[1].supply = "vdda-pll";
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(edp->supplies), edp->supplies);
+ if (ret)
+ return ret;
+
+ ret = qcom_edp_clks_register(edp, pdev->dev.of_node);
+ if (ret)
+ return ret;
+
+ edp->phy = devm_phy_create(dev, pdev->dev.of_node, &qcom_edp_ops);
+ if (IS_ERR(edp->phy)) {
+ dev_err(dev, "failed to register phy\n");
+ return PTR_ERR(edp->phy);
+ }
+
+ phy_set_drvdata(edp->phy, edp);
+
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+ return PTR_ERR_OR_ZERO(phy_provider);
+}
+
+static const struct of_device_id qcom_edp_phy_match_table[] = {
+ { .compatible = "qcom,sc8180x-edp-phy" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, qcom_edp_phy_match_table);
+
+static struct platform_driver qcom_edp_phy_driver = {
+ .probe = qcom_edp_phy_probe,
+ .driver = {
+ .name = "qcom-edp-phy",
+ .of_match_table = qcom_edp_phy_match_table,
+ },
+};
+
+module_platform_driver(qcom_edp_phy_driver);
+
+MODULE_AUTHOR("Bjorn Andersson <bjorn.andersson@linaro.org>");
+MODULE_DESCRIPTION("Qualcomm eDP QMP PHY driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.c b/drivers/phy/qualcomm/phy-qcom-qmp.c
index c96639d5f581..8ea87c69f463 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp.c
+++ b/drivers/phy/qualcomm/phy-qcom-qmp.c
@@ -2866,6 +2866,215 @@ static const struct qmp_phy_init_tbl qcm2290_usb3_pcs_tbl[] = {
QMP_PHY_INIT_CFG(QPHY_V3_PCS_RX_SIGDET_LVL, 0x88),
};
+static const struct qmp_phy_init_tbl sm8450_qmp_gen3x1_pcie_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SYSCLK_EN_SEL, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CLK_SELECT, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CORECLK_DIV_MODE1, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_IVCO, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP_EN, 0x42),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_TUNE1_MODE0, 0x24),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_TUNE2_MODE1, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_TUNE1_MODE1, 0xb4),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_TUNE_MAP, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIN_VCOCAL_HSCLK_SEL, 0x11),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DEC_START_MODE0, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START3_MODE0, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START2_MODE0, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START1_MODE0, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP2_MODE0, 0x1a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP1_MODE0, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DEC_START_MODE1, 0x68),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START3_MODE1, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START2_MODE1, 0xaa),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START1_MODE1, 0xab),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP2_MODE1, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP1_MODE1, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_HSCLK_SEL, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CP_CTRL_MODE0, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_CCTRL_MODE0, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CP_CTRL_MODE1, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_RCTRL_MODE1, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_CCTRL_MODE1, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIN_VCOCAL_CMP_CODE2_MODE0, 0x1e),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIN_VCOCAL_CMP_CODE1_MODE0, 0xca),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIN_VCOCAL_CMP_CODE2_MODE1, 0x18),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIN_VCOCAL_CMP_CODE1_MODE1, 0xa2),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SYSCLK_BUF_ENABLE, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_EN_CENTER, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_PER1, 0x31),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_PER2, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE1_MODE0, 0xde),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE2_MODE0, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE1_MODE1, 0x4c),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE2_MODE1, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CLK_ENABLE1, 0x90),
+};
+
+static const struct qmp_phy_init_tbl sm8450_qmp_gen3x1_pcie_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_PI_QEC_CTRL, 0x20),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_1, 0x75),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_LANE_MODE_4, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_OFFSET_TX, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V5_TX_RES_CODE_LANE_OFFSET_RX, 0x04),
+};
+
+static const struct qmp_phy_init_tbl sm8450_qmp_gen3x1_pcie_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_LOW, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH, 0xff),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH2, 0xbf),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH3, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_00_HIGH4, 0xd8),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_LOW, 0xdc),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH, 0xdc),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH2, 0x5c),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH3, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_01_HIGH4, 0xa6),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_10_HIGH3, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_MODE_10_HIGH4, 0x38),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_VGA_CAL_CNTRL2, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_GM_CAL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_THRESH1, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SB2_THRESH2, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_PI_CONTROLS, 0xf0),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_TX_ADAPT_POST_THRESH, 0xf0),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_RX_EQU_ADAPTOR_CNTRL4, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_FO_GAIN, 0x09),
+ QMP_PHY_INIT_CFG(QSERDES_V5_RX_UCDR_SO_GAIN, 0x05),
+};
+
+static const struct qmp_phy_init_tbl sm8450_qmp_gen3x1_pcie_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_RX_SIGDET_LVL, 0x77),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_RATE_SLEW_CNTRL1, 0x0b),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_REFGEN_REQ_CONFIG1, 0x05),
+};
+
+static const struct qmp_phy_init_tbl sm8450_qmp_gen3x1_pcie_pcs_misc_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_OSC_DTCT_ACTIONS, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_INT_AUX_CLK_CONFIG1, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_EQ_CONFIG2, 0x0f),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_PCIE_ENDPOINT_REFCLK_DRIVE, 0xc1),
+};
+
+static const struct qmp_phy_init_tbl sm8450_qmp_gen4x2_pcie_serdes_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_PER1, 0x31),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_PER2, 0x01),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE1_MODE0, 0xde),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE2_MODE0, 0x07),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE1_MODE1, 0x97),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SSC_STEP_SIZE2_MODE1, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_BIAS_EN_CLKBUFLR_EN, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CLK_ENABLE1, 0x90),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_IVCO, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CP_CTRL_MODE0, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CP_CTRL_MODE1, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_RCTRL_MODE0, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_RCTRL_MODE1, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_CCTRL_MODE0, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_PLL_CCTRL_MODE1, 0x36),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_SYSCLK_EN_SEL, 0x08),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP_EN, 0x46),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP_CFG, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP1_MODE0, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP2_MODE0, 0x1a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP1_MODE1, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_LOCK_CMP2_MODE1, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DEC_START_MODE0, 0x82),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DEC_START_MODE1, 0xd0),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START1_MODE0, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START2_MODE0, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START3_MODE0, 0x03),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START1_MODE1, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START2_MODE1, 0x55),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_DIV_FRAC_START3_MODE1, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_TUNE_MAP, 0x02),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CLK_SELECT, 0x34),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_HSCLK_SEL, 0x12),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_HSCLK_HS_SWITCH_SEL, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CORECLK_DIV_MODE0, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CORECLK_DIV_MODE1, 0x04),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CMN_MISC1, 0x88),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CORE_CLK_EN, 0x20),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CMN_CONFIG, 0x06),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_CMN_MODE, 0x14),
+ QMP_PHY_INIT_CFG(QSERDES_V5_COM_VCO_DC_LEVEL_CTRL, 0x0f),
+};
+
+static const struct qmp_phy_init_tbl sm8450_qmp_gen4x2_pcie_tx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_TX_LANE_MODE_1, 0x05),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_TX_LANE_MODE_2, 0xf6),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_TX_RES_CODE_LANE_OFFSET_TX, 0x1a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_TX_RES_CODE_LANE_OFFSET_RX, 0x0c),
+};
+
+static const struct qmp_phy_init_tbl sm8450_qmp_gen4x2_pcie_rx_tbl[] = {
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_UCDR_PI_CONTROLS, 0x16),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_DFE_CTLE_POST_CAL_OFFSET, 0x38),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B1, 0xcc),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B2, 0x12),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B3, 0xcc),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B5, 0x4a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B6, 0x29),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B0, 0xc5),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B1, 0xad),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B2, 0xb6),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B3, 0xc0),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B4, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B5, 0xfb),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE2_B6, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B0, 0xc7),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B1, 0xef),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B2, 0xbf),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B3, 0xa0),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B4, 0x81),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B5, 0xde),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MODE_RATE3_B6, 0x7f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_PHPRE_CTRL, 0x20),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_AUX_DATA_THRESH_BIN_RATE_0_1, 0x3f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_AUX_DATA_THRESH_BIN_RATE_2_3, 0x37),
+
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_DFE_3, 0x05),
+
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH1_RATE3, 0x1f),
+
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH2_RATE3, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH3_RATE3, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH4_RATE3, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH5_RATE3, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH6_RATE3, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH1_RATE210, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH2_RATE210, 0x1f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH3_RATE210, 0x1f),
+
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_UCDR_FO_GAIN_RATE2, 0x0c),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_UCDR_FO_GAIN_RATE3, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_VGA_CAL_MAN_VAL, 0x0a),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_EQU_ADAPTOR_CNTRL4, 0x0b),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_RX_IDAC_SAOFFSET, 0x10),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_DFE_DAC_ENABLE1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_GM_CAL, 0x0f),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_TX_ADAPT_POST_THRESH1, 0x00),
+ QMP_PHY_INIT_CFG(QSERDES_V5_20_RX_TX_ADAPT_POST_THRESH2, 0x1f),
+};
+
+/* Register names should be validated, they might be different for this PHY */
+static const struct qmp_phy_init_tbl sm8450_qmp_gen4x2_pcie_pcs_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_EQ_CONFIG2, 0x16),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_EQ_CONFIG3, 0x22),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_G3S2_PRE_GAIN, 0x2e),
+ QMP_PHY_INIT_CFG(QPHY_V5_PCS_RX_SIGDET_LVL, 0x99),
+};
+
+static const struct qmp_phy_init_tbl sm8450_qmp_gen4x2_pcie_pcs_misc_tbl[] = {
+ QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_ENDPOINT_REFCLK_DRIVE, 0xc1),
+ QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_OSC_DTCT_ACTIONS, 0x00),
+ QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_G4_EQ_CONFIG5, 0x02),
+ QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_EQ_CONFIG1, 0x16),
+ QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_RX_MARGINING_CONFIG3, 0x28),
+ QMP_PHY_INIT_CFG(QPHY_V5_20_PCS_PCIE_G4_PRE_GAIN, 0x2e),
+};
+
struct qmp_phy;
/* struct qmp_phy_cfg - per-PHY initialization config */
@@ -3094,6 +3303,10 @@ static const char * const qmp_v4_sm8250_usbphy_clk_l[] = {
"aux", "ref_clk_src", "com_aux"
};
+static const char * const sm8450_ufs_phy_clk_l[] = {
+ "qref", "ref", "ref_aux",
+};
+
static const char * const sdm845_ufs_phy_clk_l[] = {
"ref", "ref_aux",
};
@@ -4090,6 +4303,94 @@ static const struct qmp_phy_cfg sm8350_usb3_uniphy_cfg = {
.pwrdn_delay_max = POWER_DOWN_DELAY_US_MAX,
};
+static const struct qmp_phy_cfg sm8450_ufsphy_cfg = {
+ .type = PHY_TYPE_UFS,
+ .nlanes = 2,
+
+ .serdes_tbl = sm8350_ufsphy_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(sm8350_ufsphy_serdes_tbl),
+ .tx_tbl = sm8350_ufsphy_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(sm8350_ufsphy_tx_tbl),
+ .rx_tbl = sm8350_ufsphy_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(sm8350_ufsphy_rx_tbl),
+ .pcs_tbl = sm8350_ufsphy_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(sm8350_ufsphy_pcs_tbl),
+ .clk_list = sm8450_ufs_phy_clk_l,
+ .num_clks = ARRAY_SIZE(sm8450_ufs_phy_clk_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = sm8150_ufsphy_regs_layout,
+
+ .start_ctrl = SERDES_START,
+ .pwrdn_ctrl = SW_PWRDN,
+ .phy_status = PHYSTATUS,
+
+ .is_dual_lane_phy = true,
+};
+
+static const struct qmp_phy_cfg sm8450_qmp_gen3x1_pciephy_cfg = {
+ .type = PHY_TYPE_PCIE,
+ .nlanes = 1,
+
+ .serdes_tbl = sm8450_qmp_gen3x1_pcie_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(sm8450_qmp_gen3x1_pcie_serdes_tbl),
+ .tx_tbl = sm8450_qmp_gen3x1_pcie_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(sm8450_qmp_gen3x1_pcie_tx_tbl),
+ .rx_tbl = sm8450_qmp_gen3x1_pcie_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(sm8450_qmp_gen3x1_pcie_rx_tbl),
+ .pcs_tbl = sm8450_qmp_gen3x1_pcie_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(sm8450_qmp_gen3x1_pcie_pcs_tbl),
+ .pcs_misc_tbl = sm8450_qmp_gen3x1_pcie_pcs_misc_tbl,
+ .pcs_misc_tbl_num = ARRAY_SIZE(sm8450_qmp_gen3x1_pcie_pcs_misc_tbl),
+ .clk_list = sdm845_pciephy_clk_l,
+ .num_clks = ARRAY_SIZE(sdm845_pciephy_clk_l),
+ .reset_list = sdm845_pciephy_reset_l,
+ .num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = sm8250_pcie_regs_layout,
+
+ .start_ctrl = SERDES_START | PCS_START,
+ .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
+ .phy_status = PHYSTATUS,
+
+ .has_pwrdn_delay = true,
+ .pwrdn_delay_min = 995, /* us */
+ .pwrdn_delay_max = 1005, /* us */
+};
+
+static const struct qmp_phy_cfg sm8450_qmp_gen4x2_pciephy_cfg = {
+ .type = PHY_TYPE_PCIE,
+ .nlanes = 2,
+
+ .serdes_tbl = sm8450_qmp_gen4x2_pcie_serdes_tbl,
+ .serdes_tbl_num = ARRAY_SIZE(sm8450_qmp_gen4x2_pcie_serdes_tbl),
+ .tx_tbl = sm8450_qmp_gen4x2_pcie_tx_tbl,
+ .tx_tbl_num = ARRAY_SIZE(sm8450_qmp_gen4x2_pcie_tx_tbl),
+ .rx_tbl = sm8450_qmp_gen4x2_pcie_rx_tbl,
+ .rx_tbl_num = ARRAY_SIZE(sm8450_qmp_gen4x2_pcie_rx_tbl),
+ .pcs_tbl = sm8450_qmp_gen4x2_pcie_pcs_tbl,
+ .pcs_tbl_num = ARRAY_SIZE(sm8450_qmp_gen4x2_pcie_pcs_tbl),
+ .pcs_misc_tbl = sm8450_qmp_gen4x2_pcie_pcs_misc_tbl,
+ .pcs_misc_tbl_num = ARRAY_SIZE(sm8450_qmp_gen4x2_pcie_pcs_misc_tbl),
+ .clk_list = sdm845_pciephy_clk_l,
+ .num_clks = ARRAY_SIZE(sdm845_pciephy_clk_l),
+ .reset_list = sdm845_pciephy_reset_l,
+ .num_resets = ARRAY_SIZE(sdm845_pciephy_reset_l),
+ .vreg_list = qmp_phy_vreg_l,
+ .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l),
+ .regs = sm8250_pcie_regs_layout,
+
+ .start_ctrl = SERDES_START | PCS_START,
+ .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL,
+ .phy_status = PHYSTATUS_4_20,
+
+ .is_dual_lane_phy = true,
+ .has_pwrdn_delay = true,
+ .pwrdn_delay_min = 995, /* us */
+ .pwrdn_delay_max = 1005, /* us */
+};
+
static const struct qmp_phy_cfg qcm2290_usb3phy_cfg = {
.type = PHY_TYPE_USB3,
.nlanes = 1,
@@ -5749,6 +6050,18 @@ static const struct of_device_id qcom_qmp_phy_of_match_table[] = {
.compatible = "qcom,sm8350-qmp-usb3-uni-phy",
.data = &sm8350_usb3_uniphy_cfg,
}, {
+ .compatible = "qcom,sm8450-qmp-gen3x1-pcie-phy",
+ .data = &sm8450_qmp_gen3x1_pciephy_cfg,
+ }, {
+ .compatible = "qcom,sm8450-qmp-gen4x2-pcie-phy",
+ .data = &sm8450_qmp_gen4x2_pciephy_cfg,
+ }, {
+ .compatible = "qcom,sm8450-qmp-ufs-phy",
+ .data = &sm8450_ufsphy_cfg,
+ }, {
+ .compatible = "qcom,sm8450-qmp-usb3-phy",
+ .data = &sm8350_usb3phy_cfg,
+ }, {
.compatible = "qcom,qcm2290-qmp-usb3-phy",
.data = &qcm2290_usb3phy_cfg,
},
diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.h b/drivers/phy/qualcomm/phy-qcom-qmp.h
index e15f461065bb..06b2556ed93a 100644
--- a/drivers/phy/qualcomm/phy-qcom-qmp.h
+++ b/drivers/phy/qualcomm/phy-qcom-qmp.h
@@ -551,6 +551,7 @@
/* Only for QMP V4 PHY - QSERDES COM registers */
#define QSERDES_V4_COM_BG_TIMER 0x00c
#define QSERDES_V4_COM_SSC_EN_CENTER 0x010
+#define QSERDES_V4_COM_SSC_ADJ_PER1 0x014
#define QSERDES_V4_COM_SSC_PER1 0x01c
#define QSERDES_V4_COM_SSC_PER2 0x020
#define QSERDES_V4_COM_SSC_STEP_SIZE1_MODE0 0x024
@@ -1069,6 +1070,16 @@
#define QPHY_V4_20_PCS_LANE1_INSIG_MX_CTRL2 0x828
/* Only for QMP V5 PHY - QSERDES COM registers */
+#define QSERDES_V5_COM_SSC_EN_CENTER 0x010
+#define QSERDES_V5_COM_SSC_PER1 0x01c
+#define QSERDES_V5_COM_SSC_PER2 0x020
+#define QSERDES_V5_COM_SSC_STEP_SIZE1_MODE0 0x024
+#define QSERDES_V5_COM_SSC_STEP_SIZE2_MODE0 0x028
+#define QSERDES_V5_COM_SSC_STEP_SIZE1_MODE1 0x030
+#define QSERDES_V5_COM_SSC_STEP_SIZE2_MODE1 0x034
+#define QSERDES_V5_COM_BIAS_EN_CLKBUFLR_EN 0x044
+#define QSERDES_V5_COM_CLK_ENABLE1 0x048
+#define QSERDES_V5_COM_SYSCLK_BUF_ENABLE 0x050
#define QSERDES_V5_COM_PLL_IVCO 0x058
#define QSERDES_V5_COM_CP_CTRL_MODE0 0x074
#define QSERDES_V5_COM_CP_CTRL_MODE1 0x078
@@ -1078,16 +1089,35 @@
#define QSERDES_V5_COM_PLL_CCTRL_MODE1 0x088
#define QSERDES_V5_COM_SYSCLK_EN_SEL 0x094
#define QSERDES_V5_COM_LOCK_CMP_EN 0x0a4
+#define QSERDES_V5_COM_LOCK_CMP_CFG 0x0a8
#define QSERDES_V5_COM_LOCK_CMP1_MODE0 0x0ac
#define QSERDES_V5_COM_LOCK_CMP2_MODE0 0x0b0
#define QSERDES_V5_COM_LOCK_CMP1_MODE1 0x0b4
#define QSERDES_V5_COM_DEC_START_MODE0 0x0bc
#define QSERDES_V5_COM_LOCK_CMP2_MODE1 0x0b8
#define QSERDES_V5_COM_DEC_START_MODE1 0x0c4
+#define QSERDES_V5_COM_DIV_FRAC_START1_MODE0 0x0cc
+#define QSERDES_V5_COM_DIV_FRAC_START2_MODE0 0x0d0
+#define QSERDES_V5_COM_DIV_FRAC_START3_MODE0 0x0d4
+#define QSERDES_V5_COM_DIV_FRAC_START1_MODE1 0x0d8
+#define QSERDES_V5_COM_DIV_FRAC_START2_MODE1 0x0dc
+#define QSERDES_V5_COM_DIV_FRAC_START3_MODE1 0x0e0
#define QSERDES_V5_COM_VCO_TUNE_MAP 0x10c
+#define QSERDES_V5_COM_VCO_TUNE1_MODE0 0x110
+#define QSERDES_V5_COM_VCO_TUNE2_MODE0 0x114
+#define QSERDES_V5_COM_VCO_TUNE1_MODE1 0x118
+#define QSERDES_V5_COM_VCO_TUNE2_MODE1 0x11c
#define QSERDES_V5_COM_VCO_TUNE_INITVAL2 0x124
+#define QSERDES_V5_COM_CLK_SELECT 0x154
#define QSERDES_V5_COM_HSCLK_SEL 0x158
#define QSERDES_V5_COM_HSCLK_HS_SWITCH_SEL 0x15c
+#define QSERDES_V5_COM_CORECLK_DIV_MODE0 0x168
+#define QSERDES_V5_COM_CORECLK_DIV_MODE1 0x16c
+#define QSERDES_V5_COM_CORE_CLK_EN 0x174
+#define QSERDES_V5_COM_CMN_CONFIG 0x17c
+#define QSERDES_V5_COM_CMN_MISC1 0x19c
+#define QSERDES_V5_COM_CMN_MODE 0x1a4
+#define QSERDES_V5_COM_VCO_DC_LEVEL_CTRL 0x1a8
#define QSERDES_V5_COM_BIN_VCOCAL_CMP_CODE1_MODE0 0x1ac
#define QSERDES_V5_COM_BIN_VCOCAL_CMP_CODE2_MODE0 0x1b0
#define QSERDES_V5_COM_BIN_VCOCAL_CMP_CODE1_MODE1 0x1b4
@@ -1112,6 +1142,12 @@
#define QSERDES_V5_TX_PWM_GEAR_3_DIVIDER_BAND0_1 0x180
#define QSERDES_V5_TX_PWM_GEAR_4_DIVIDER_BAND0_1 0x184
+/* Only for QMP V5_20 PHY - TX registers */
+#define QSERDES_V5_20_TX_RES_CODE_LANE_OFFSET_TX 0x30
+#define QSERDES_V5_20_TX_RES_CODE_LANE_OFFSET_RX 0x34
+#define QSERDES_V5_20_TX_LANE_MODE_1 0x78
+#define QSERDES_V5_20_TX_LANE_MODE_2 0x7c
+
/* Only for QMP V5 PHY - RX registers */
#define QSERDES_V5_RX_UCDR_FO_GAIN 0x008
#define QSERDES_V5_RX_UCDR_SO_GAIN 0x014
@@ -1130,6 +1166,7 @@
#define QSERDES_V5_RX_AC_JTAG_ENABLE 0x068
#define QSERDES_V5_RX_AC_JTAG_MODE 0x078
#define QSERDES_V5_RX_RX_TERM_BW 0x080
+#define QSERDES_V5_RX_TX_ADAPT_POST_THRESH 0x0cc
#define QSERDES_V5_RX_VGA_CAL_CNTRL1 0x0d4
#define QSERDES_V5_RX_VGA_CAL_CNTRL2 0x0d8
#define QSERDES_V5_RX_GM_CAL 0x0dc
@@ -1167,6 +1204,73 @@
#define QSERDES_V5_RX_DCC_CTRL1 0x1a8
#define QSERDES_V5_RX_VTH_CODE 0x1b0
+/* Only for QMP V5_20 PHY - RX registers */
+#define QSERDES_V5_20_RX_UCDR_FO_GAIN_RATE2 0x008
+#define QSERDES_V5_20_RX_UCDR_FO_GAIN_RATE3 0x00c
+#define QSERDES_V5_20_RX_UCDR_PI_CONTROLS 0x020
+#define QSERDES_V5_20_RX_AUX_DATA_THRESH_BIN_RATE_0_1 0x02c
+#define QSERDES_V5_20_RX_AUX_DATA_THRESH_BIN_RATE_2_3 0x030
+#define QSERDES_V5_20_RX_RX_IDAC_SAOFFSET 0x07c
+#define QSERDES_V5_20_RX_DFE_3 0x090
+#define QSERDES_V5_20_RX_DFE_DAC_ENABLE1 0x0b4
+#define QSERDES_V5_20_RX_TX_ADAPT_POST_THRESH1 0x0c4
+#define QSERDES_V5_20_RX_TX_ADAPT_POST_THRESH2 0x0c8
+#define QSERDES_V5_20_RX_VGA_CAL_MAN_VAL 0x0dc
+#define QSERDES_V5_20_RX_GM_CAL 0x0ec
+#define QSERDES_V5_20_RX_RX_EQU_ADAPTOR_CNTRL4 0x108
+#define QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B1 0x164
+#define QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B2 0x168
+#define QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B3 0x16c
+#define QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B5 0x174
+#define QSERDES_V5_20_RX_RX_MODE_RATE_0_1_B6 0x178
+#define QSERDES_V5_20_RX_RX_MODE_RATE2_B0 0x17c
+#define QSERDES_V5_20_RX_RX_MODE_RATE2_B1 0x180
+#define QSERDES_V5_20_RX_RX_MODE_RATE2_B2 0x184
+#define QSERDES_V5_20_RX_RX_MODE_RATE2_B3 0x188
+#define QSERDES_V5_20_RX_RX_MODE_RATE2_B4 0x18c
+#define QSERDES_V5_20_RX_RX_MODE_RATE2_B5 0x190
+#define QSERDES_V5_20_RX_RX_MODE_RATE2_B6 0x194
+#define QSERDES_V5_20_RX_RX_MODE_RATE3_B0 0x198
+#define QSERDES_V5_20_RX_RX_MODE_RATE3_B1 0x19c
+#define QSERDES_V5_20_RX_RX_MODE_RATE3_B2 0x1a0
+#define QSERDES_V5_20_RX_RX_MODE_RATE3_B3 0x1a4
+#define QSERDES_V5_20_RX_RX_MODE_RATE3_B4 0x1a8
+#define QSERDES_V5_20_RX_RX_MODE_RATE3_B5 0x1ac
+#define QSERDES_V5_20_RX_RX_MODE_RATE3_B6 0x1b0
+#define QSERDES_V5_20_RX_PHPRE_CTRL 0x1b4
+#define QSERDES_V5_20_RX_DFE_CTLE_POST_CAL_OFFSET 0x1c0
+#define QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH1_RATE210 0x1f4
+#define QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH1_RATE3 0x1f8
+#define QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH2_RATE210 0x1fc
+#define QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH2_RATE3 0x200
+#define QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH3_RATE210 0x204
+#define QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH3_RATE3 0x208
+#define QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH4_RATE3 0x210
+#define QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH5_RATE3 0x218
+#define QSERDES_V5_20_RX_RX_MARG_COARSE_THRESH6_RATE3 0x220
+
+/* Only for QMP V5 PHY - USB/PCIe PCS registers */
+#define QPHY_V5_PCS_REFGEN_REQ_CONFIG1 0x0dc
+#define QPHY_V5_PCS_G3S2_PRE_GAIN 0x170
+#define QPHY_V5_PCS_RX_SIGDET_LVL 0x188
+#define QPHY_V5_PCS_RATE_SLEW_CNTRL1 0x198
+#define QPHY_V5_PCS_EQ_CONFIG2 0x1e0
+#define QPHY_V5_PCS_EQ_CONFIG3 0x1e4
+
+/* Only for QMP V5 PHY - PCS_PCIE registers */
+#define QPHY_V5_PCS_PCIE_ENDPOINT_REFCLK_DRIVE 0x20
+#define QPHY_V5_PCS_PCIE_INT_AUX_CLK_CONFIG1 0x54
+#define QPHY_V5_PCS_PCIE_OSC_DTCT_ACTIONS 0x94
+#define QPHY_V5_PCS_PCIE_EQ_CONFIG2 0xa8
+
+/* Only for QMP V5_20 PHY - PCIe PCS registers */
+#define QPHY_V5_20_PCS_PCIE_ENDPOINT_REFCLK_DRIVE 0x01c
+#define QPHY_V5_20_PCS_PCIE_OSC_DTCT_ACTIONS 0x090
+#define QPHY_V5_20_PCS_PCIE_EQ_CONFIG1 0x0a0
+#define QPHY_V5_20_PCS_PCIE_G4_EQ_CONFIG5 0x108
+#define QPHY_V5_20_PCS_PCIE_G4_PRE_GAIN 0x15c
+#define QPHY_V5_20_PCS_PCIE_RX_MARGINING_CONFIG3 0x184
+
/* Only for QMP V5 PHY - UFS PCS registers */
#define QPHY_V5_PCS_UFS_TIMER_20US_CORECLK_STEPS_MSB 0x00c
#define QPHY_V5_PCS_UFS_TIMER_20US_CORECLK_STEPS_LSB 0x010
diff --git a/drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c b/drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c
index 347dc79a18c1..630e01b5c19b 100644
--- a/drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c
+++ b/drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c
@@ -5,6 +5,7 @@
* Author: Wyon Bi <bivvy.bi@rock-chips.com>
*/
+#include <linux/bits.h>
#include <linux/kernel.h>
#include <linux/clk.h>
#include <linux/iopoll.h>
@@ -364,7 +365,7 @@ static void inno_dsidphy_mipi_mode_enable(struct inno_dsidphy *inno)
* The value of counter for HS Tclk-pre
* Tclk-pre = Tpin_txbyteclkhs * value
*/
- clk_pre = DIV_ROUND_UP(cfg->clk_pre, t_txbyteclkhs);
+ clk_pre = DIV_ROUND_UP(cfg->clk_pre, BITS_PER_BYTE);
/*
* The value of counter for HS Tlpx Time
diff --git a/drivers/phy/rockchip/phy-rockchip-inno-usb2.c b/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
index 1938365abbb3..eca77e44a4c1 100644
--- a/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
+++ b/drivers/phy/rockchip/phy-rockchip-inno-usb2.c
@@ -204,6 +204,7 @@ struct rockchip_usb2phy_port {
* @dcd_retries: The retry count used to track Data contact
* detection process.
* @edev: extcon device for notification registration
+ * @irq: muxed interrupt for single irq configuration
* @phy_cfg: phy register configuration, assigned by driver data.
* @ports: phy port instance.
*/
@@ -218,6 +219,7 @@ struct rockchip_usb2phy {
enum power_supply_type chg_type;
u8 dcd_retries;
struct extcon_dev *edev;
+ int irq;
const struct rockchip_usb2phy_cfg *phy_cfg;
struct rockchip_usb2phy_port ports[USB2PHY_NUM_PORTS];
};
@@ -750,7 +752,6 @@ static void rockchip_chg_detect_work(struct work_struct *work)
fallthrough;
case USB_CHG_STATE_SECONDARY_DONE:
rphy->chg_state = USB_CHG_STATE_DETECTED;
- delay = 0;
fallthrough;
case USB_CHG_STATE_DETECTED:
/* put the controller in normal mode */
@@ -927,6 +928,102 @@ static irqreturn_t rockchip_usb2phy_otg_mux_irq(int irq, void *data)
return IRQ_NONE;
}
+static irqreturn_t rockchip_usb2phy_irq(int irq, void *data)
+{
+ struct rockchip_usb2phy *rphy = data;
+ struct rockchip_usb2phy_port *rport;
+ irqreturn_t ret = IRQ_NONE;
+ unsigned int index;
+
+ for (index = 0; index < rphy->phy_cfg->num_ports; index++) {
+ rport = &rphy->ports[index];
+ if (!rport->phy)
+ continue;
+
+ /* Handle linestate irq for both otg port and host port */
+ ret = rockchip_usb2phy_linestate_irq(irq, rport);
+ }
+
+ return ret;
+}
+
+static int rockchip_usb2phy_port_irq_init(struct rockchip_usb2phy *rphy,
+ struct rockchip_usb2phy_port *rport,
+ struct device_node *child_np)
+{
+ int ret;
+
+ /*
+ * If the usb2 phy used combined irq for otg and host port,
+ * don't need to init otg and host port irq separately.
+ */
+ if (rphy->irq > 0)
+ return 0;
+
+ switch (rport->port_id) {
+ case USB2PHY_PORT_HOST:
+ rport->ls_irq = of_irq_get_byname(child_np, "linestate");
+ if (rport->ls_irq < 0) {
+ dev_err(rphy->dev, "no linestate irq provided\n");
+ return rport->ls_irq;
+ }
+
+ ret = devm_request_threaded_irq(rphy->dev, rport->ls_irq, NULL,
+ rockchip_usb2phy_linestate_irq,
+ IRQF_ONESHOT,
+ "rockchip_usb2phy", rport);
+ if (ret) {
+ dev_err(rphy->dev, "failed to request linestate irq handle\n");
+ return ret;
+ }
+ break;
+ case USB2PHY_PORT_OTG:
+ /*
+ * Some SoCs use one interrupt with otg-id/otg-bvalid/linestate
+ * interrupts muxed together, so probe the otg-mux interrupt first,
+ * if not found, then look for the regular interrupts one by one.
+ */
+ rport->otg_mux_irq = of_irq_get_byname(child_np, "otg-mux");
+ if (rport->otg_mux_irq > 0) {
+ ret = devm_request_threaded_irq(rphy->dev, rport->otg_mux_irq,
+ NULL,
+ rockchip_usb2phy_otg_mux_irq,
+ IRQF_ONESHOT,
+ "rockchip_usb2phy_otg",
+ rport);
+ if (ret) {
+ dev_err(rphy->dev,
+ "failed to request otg-mux irq handle\n");
+ return ret;
+ }
+ } else {
+ rport->bvalid_irq = of_irq_get_byname(child_np, "otg-bvalid");
+ if (rport->bvalid_irq < 0) {
+ dev_err(rphy->dev, "no vbus valid irq provided\n");
+ ret = rport->bvalid_irq;
+ return ret;
+ }
+
+ ret = devm_request_threaded_irq(rphy->dev, rport->bvalid_irq,
+ NULL,
+ rockchip_usb2phy_bvalid_irq,
+ IRQF_ONESHOT,
+ "rockchip_usb2phy_bvalid",
+ rport);
+ if (ret) {
+ dev_err(rphy->dev,
+ "failed to request otg-bvalid irq handle\n");
+ return ret;
+ }
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int rockchip_usb2phy_host_port_init(struct rockchip_usb2phy *rphy,
struct rockchip_usb2phy_port *rport,
struct device_node *child_np)
@@ -940,18 +1037,9 @@ static int rockchip_usb2phy_host_port_init(struct rockchip_usb2phy *rphy,
mutex_init(&rport->mutex);
INIT_DELAYED_WORK(&rport->sm_work, rockchip_usb2phy_sm_work);
- rport->ls_irq = of_irq_get_byname(child_np, "linestate");
- if (rport->ls_irq < 0) {
- dev_err(rphy->dev, "no linestate irq provided\n");
- return rport->ls_irq;
- }
-
- ret = devm_request_threaded_irq(rphy->dev, rport->ls_irq, NULL,
- rockchip_usb2phy_linestate_irq,
- IRQF_ONESHOT,
- "rockchip_usb2phy", rport);
+ ret = rockchip_usb2phy_port_irq_init(rphy, rport, child_np);
if (ret) {
- dev_err(rphy->dev, "failed to request linestate irq handle\n");
+ dev_err(rphy->dev, "failed to setup host irq\n");
return ret;
}
@@ -1000,43 +1088,10 @@ static int rockchip_usb2phy_otg_port_init(struct rockchip_usb2phy *rphy,
INIT_DELAYED_WORK(&rport->chg_work, rockchip_chg_detect_work);
INIT_DELAYED_WORK(&rport->otg_sm_work, rockchip_usb2phy_otg_sm_work);
- /*
- * Some SoCs use one interrupt with otg-id/otg-bvalid/linestate
- * interrupts muxed together, so probe the otg-mux interrupt first,
- * if not found, then look for the regular interrupts one by one.
- */
- rport->otg_mux_irq = of_irq_get_byname(child_np, "otg-mux");
- if (rport->otg_mux_irq > 0) {
- ret = devm_request_threaded_irq(rphy->dev, rport->otg_mux_irq,
- NULL,
- rockchip_usb2phy_otg_mux_irq,
- IRQF_ONESHOT,
- "rockchip_usb2phy_otg",
- rport);
- if (ret) {
- dev_err(rphy->dev,
- "failed to request otg-mux irq handle\n");
- goto out;
- }
- } else {
- rport->bvalid_irq = of_irq_get_byname(child_np, "otg-bvalid");
- if (rport->bvalid_irq < 0) {
- dev_err(rphy->dev, "no vbus valid irq provided\n");
- ret = rport->bvalid_irq;
- goto out;
- }
-
- ret = devm_request_threaded_irq(rphy->dev, rport->bvalid_irq,
- NULL,
- rockchip_usb2phy_bvalid_irq,
- IRQF_ONESHOT,
- "rockchip_usb2phy_bvalid",
- rport);
- if (ret) {
- dev_err(rphy->dev,
- "failed to request otg-bvalid irq handle\n");
- goto out;
- }
+ ret = rockchip_usb2phy_port_irq_init(rphy, rport, child_np);
+ if (ret) {
+ dev_err(rphy->dev, "failed to init irq for host port\n");
+ goto out;
}
if (!IS_ERR(rphy->edev)) {
@@ -1074,12 +1129,19 @@ static int rockchip_usb2phy_probe(struct platform_device *pdev)
return -EINVAL;
}
- if (!dev->parent || !dev->parent->of_node)
- return -EINVAL;
+ if (!dev->parent || !dev->parent->of_node) {
+ rphy->grf = syscon_regmap_lookup_by_phandle(np, "rockchip,usbgrf");
+ if (IS_ERR(rphy->grf)) {
+ dev_err(dev, "failed to locate usbgrf\n");
+ return PTR_ERR(rphy->grf);
+ }
+ }
- rphy->grf = syscon_node_to_regmap(dev->parent->of_node);
- if (IS_ERR(rphy->grf))
- return PTR_ERR(rphy->grf);
+ else {
+ rphy->grf = syscon_node_to_regmap(dev->parent->of_node);
+ if (IS_ERR(rphy->grf))
+ return PTR_ERR(rphy->grf);
+ }
if (of_device_is_compatible(np, "rockchip,rv1108-usb2phy")) {
rphy->usbgrf =
@@ -1091,16 +1153,26 @@ static int rockchip_usb2phy_probe(struct platform_device *pdev)
rphy->usbgrf = NULL;
}
- if (of_property_read_u32(np, "reg", &reg)) {
+ if (of_property_read_u32_index(np, "reg", 0, &reg)) {
dev_err(dev, "the reg property is not assigned in %pOFn node\n",
np);
return -EINVAL;
}
+ /* support address_cells=2 */
+ if (reg == 0) {
+ if (of_property_read_u32_index(np, "reg", 1, &reg)) {
+ dev_err(dev, "the reg property is not assigned in %pOFn node\n",
+ np);
+ return -EINVAL;
+ }
+ }
+
rphy->dev = dev;
phy_cfgs = match->data;
rphy->chg_state = USB_CHG_STATE_UNDEFINED;
rphy->chg_type = POWER_SUPPLY_TYPE_UNKNOWN;
+ rphy->irq = platform_get_irq_optional(pdev, 0);
platform_set_drvdata(pdev, rphy);
ret = rockchip_usb2phy_extcon_register(rphy);
@@ -1180,6 +1252,20 @@ next_child:
}
provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+
+ if (rphy->irq > 0) {
+ ret = devm_request_threaded_irq(rphy->dev, rphy->irq, NULL,
+ rockchip_usb2phy_irq,
+ IRQF_ONESHOT,
+ "rockchip_usb2phy",
+ rphy);
+ if (ret) {
+ dev_err(rphy->dev,
+ "failed to request usb2phy irq handle\n");
+ goto put_child;
+ }
+ }
+
return PTR_ERR_OR_ZERO(provider);
put_child:
@@ -1418,6 +1504,69 @@ static const struct rockchip_usb2phy_cfg rk3399_phy_cfgs[] = {
{ /* sentinel */ }
};
+static const struct rockchip_usb2phy_cfg rk3568_phy_cfgs[] = {
+ {
+ .reg = 0xfe8a0000,
+ .num_ports = 2,
+ .clkout_ctl = { 0x0008, 4, 4, 1, 0 },
+ .port_cfgs = {
+ [USB2PHY_PORT_OTG] = {
+ .phy_sus = { 0x0000, 8, 0, 0, 0x1d1 },
+ .bvalid_det_en = { 0x0080, 2, 2, 0, 1 },
+ .bvalid_det_st = { 0x0084, 2, 2, 0, 1 },
+ .bvalid_det_clr = { 0x0088, 2, 2, 0, 1 },
+ .utmi_avalid = { 0x00c0, 10, 10, 0, 1 },
+ .utmi_bvalid = { 0x00c0, 9, 9, 0, 1 },
+ },
+ [USB2PHY_PORT_HOST] = {
+ /* Select suspend control from controller */
+ .phy_sus = { 0x0004, 8, 0, 0x1d2, 0x1d2 },
+ .ls_det_en = { 0x0080, 1, 1, 0, 1 },
+ .ls_det_st = { 0x0084, 1, 1, 0, 1 },
+ .ls_det_clr = { 0x0088, 1, 1, 0, 1 },
+ .utmi_ls = { 0x00c0, 17, 16, 0, 1 },
+ .utmi_hstdet = { 0x00c0, 19, 19, 0, 1 }
+ }
+ },
+ .chg_det = {
+ .opmode = { 0x0000, 3, 0, 5, 1 },
+ .cp_det = { 0x00c0, 24, 24, 0, 1 },
+ .dcp_det = { 0x00c0, 23, 23, 0, 1 },
+ .dp_det = { 0x00c0, 25, 25, 0, 1 },
+ .idm_sink_en = { 0x0008, 8, 8, 0, 1 },
+ .idp_sink_en = { 0x0008, 7, 7, 0, 1 },
+ .idp_src_en = { 0x0008, 9, 9, 0, 1 },
+ .rdm_pdwn_en = { 0x0008, 10, 10, 0, 1 },
+ .vdm_src_en = { 0x0008, 12, 12, 0, 1 },
+ .vdp_src_en = { 0x0008, 11, 11, 0, 1 },
+ },
+ },
+ {
+ .reg = 0xfe8b0000,
+ .num_ports = 2,
+ .clkout_ctl = { 0x0008, 4, 4, 1, 0 },
+ .port_cfgs = {
+ [USB2PHY_PORT_OTG] = {
+ .phy_sus = { 0x0000, 8, 0, 0x1d2, 0x1d1 },
+ .ls_det_en = { 0x0080, 0, 0, 0, 1 },
+ .ls_det_st = { 0x0084, 0, 0, 0, 1 },
+ .ls_det_clr = { 0x0088, 0, 0, 0, 1 },
+ .utmi_ls = { 0x00c0, 5, 4, 0, 1 },
+ .utmi_hstdet = { 0x00c0, 7, 7, 0, 1 }
+ },
+ [USB2PHY_PORT_HOST] = {
+ .phy_sus = { 0x0004, 8, 0, 0x1d2, 0x1d1 },
+ .ls_det_en = { 0x0080, 1, 1, 0, 1 },
+ .ls_det_st = { 0x0084, 1, 1, 0, 1 },
+ .ls_det_clr = { 0x0088, 1, 1, 0, 1 },
+ .utmi_ls = { 0x00c0, 17, 16, 0, 1 },
+ .utmi_hstdet = { 0x00c0, 19, 19, 0, 1 }
+ }
+ },
+ },
+ { /* sentinel */ }
+};
+
static const struct rockchip_usb2phy_cfg rv1108_phy_cfgs[] = {
{
.reg = 0x100,
@@ -1467,6 +1616,7 @@ static const struct of_device_id rockchip_usb2phy_dt_match[] = {
{ .compatible = "rockchip,rk3328-usb2phy", .data = &rk3328_phy_cfgs },
{ .compatible = "rockchip,rk3366-usb2phy", .data = &rk3366_phy_cfgs },
{ .compatible = "rockchip,rk3399-usb2phy", .data = &rk3399_phy_cfgs },
+ { .compatible = "rockchip,rk3568-usb2phy", .data = &rk3568_phy_cfgs },
{ .compatible = "rockchip,rv1108-usb2phy", .data = &rv1108_phy_cfgs },
{}
};
diff --git a/drivers/phy/socionext/Kconfig b/drivers/phy/socionext/Kconfig
index a3970e0f89da..8ae644756352 100644
--- a/drivers/phy/socionext/Kconfig
+++ b/drivers/phy/socionext/Kconfig
@@ -43,4 +43,4 @@ config PHY_UNIPHIER_AHCI
select GENERIC_PHY
help
Enable this to support PHY implemented in AHCI controller
- on UniPhier SoCs. This driver supports PXs2 and PXs3 SoCs.
+ on UniPhier SoCs. This driver supports Pro4, PXs2 and PXs3 SoCs.
diff --git a/drivers/phy/socionext/phy-uniphier-ahci.c b/drivers/phy/socionext/phy-uniphier-ahci.c
index 7427c40bf4ae..28cf3efe0695 100644
--- a/drivers/phy/socionext/phy-uniphier-ahci.c
+++ b/drivers/phy/socionext/phy-uniphier-ahci.c
@@ -19,8 +19,9 @@
struct uniphier_ahciphy_priv {
struct device *dev;
void __iomem *base;
- struct clk *clk, *clk_parent;
- struct reset_control *rst, *rst_parent;
+ struct clk *clk, *clk_parent, *clk_parent_gio;
+ struct reset_control *rst, *rst_parent, *rst_parent_gio;
+ struct reset_control *rst_pm, *rst_tx, *rst_rx;
const struct uniphier_ahciphy_soc_data *data;
};
@@ -28,10 +29,30 @@ struct uniphier_ahciphy_soc_data {
int (*init)(struct uniphier_ahciphy_priv *priv);
int (*power_on)(struct uniphier_ahciphy_priv *priv);
int (*power_off)(struct uniphier_ahciphy_priv *priv);
+ bool is_legacy;
bool is_ready_high;
bool is_phy_clk;
};
+/* for Pro4 */
+#define CKCTRL0 0x0
+#define CKCTRL0_CK_OFF BIT(9)
+#define CKCTRL0_NCY_MASK GENMASK(8, 4)
+#define CKCTRL0_NCY5_MASK GENMASK(3, 2)
+#define CKCTRL0_PRESCALE_MASK GENMASK(1, 0)
+#define CKCTRL1 0x4
+#define CKCTRL1_LOS_LVL_MASK GENMASK(20, 16)
+#define CKCTRL1_TX_LVL_MASK GENMASK(12, 8)
+#define RXTXCTRL 0x8
+#define RXTXCTRL_RX_EQ_VALL_MASK GENMASK(31, 29)
+#define RXTXCTRL_RX_DPLL_MODE_MASK GENMASK(28, 26)
+#define RXTXCTRL_TX_ATTEN_MASK GENMASK(14, 12)
+#define RXTXCTRL_TX_BOOST_MASK GENMASK(11, 8)
+#define RXTXCTRL_TX_EDGERATE_MASK GENMASK(3, 2)
+#define RXTXCTRL_TX_CKO_EN BIT(0)
+#define RSTPWR 0x30
+#define RSTPWR_RX_EN_VAL BIT(18)
+
/* for PXs2/PXs3 */
#define CKCTRL 0x0
#define CKCTRL_P0_READY BIT(15)
@@ -50,6 +71,128 @@ struct uniphier_ahciphy_soc_data {
#define RXCTRL_LOS_BIAS_MASK GENMASK(10, 8)
#define RXCTRL_RX_EQ_MASK GENMASK(2, 0)
+static int uniphier_ahciphy_pro4_init(struct uniphier_ahciphy_priv *priv)
+{
+ u32 val;
+
+ /* set phy MPLL parameters */
+ val = readl(priv->base + CKCTRL0);
+ val &= ~CKCTRL0_NCY_MASK;
+ val |= FIELD_PREP(CKCTRL0_NCY_MASK, 0x6);
+ val &= ~CKCTRL0_NCY5_MASK;
+ val |= FIELD_PREP(CKCTRL0_NCY5_MASK, 0x2);
+ val &= ~CKCTRL0_PRESCALE_MASK;
+ val |= FIELD_PREP(CKCTRL0_PRESCALE_MASK, 0x1);
+ writel(val, priv->base + CKCTRL0);
+
+ /* setup phy control parameters */
+ val = readl(priv->base + CKCTRL1);
+ val &= ~CKCTRL1_LOS_LVL_MASK;
+ val |= FIELD_PREP(CKCTRL1_LOS_LVL_MASK, 0x10);
+ val &= ~CKCTRL1_TX_LVL_MASK;
+ val |= FIELD_PREP(CKCTRL1_TX_LVL_MASK, 0x06);
+ writel(val, priv->base + CKCTRL1);
+
+ val = readl(priv->base + RXTXCTRL);
+ val &= ~RXTXCTRL_RX_EQ_VALL_MASK;
+ val |= FIELD_PREP(RXTXCTRL_RX_EQ_VALL_MASK, 0x6);
+ val &= ~RXTXCTRL_RX_DPLL_MODE_MASK;
+ val |= FIELD_PREP(RXTXCTRL_RX_DPLL_MODE_MASK, 0x3);
+ val &= ~RXTXCTRL_TX_ATTEN_MASK;
+ val |= FIELD_PREP(RXTXCTRL_TX_ATTEN_MASK, 0x3);
+ val &= ~RXTXCTRL_TX_BOOST_MASK;
+ val |= FIELD_PREP(RXTXCTRL_TX_BOOST_MASK, 0x5);
+ val &= ~RXTXCTRL_TX_EDGERATE_MASK;
+ val |= FIELD_PREP(RXTXCTRL_TX_EDGERATE_MASK, 0x0);
+ writel(val, priv->base + RXTXCTRL);
+
+ return 0;
+}
+
+static int uniphier_ahciphy_pro4_power_on(struct uniphier_ahciphy_priv *priv)
+{
+ u32 val;
+ int ret;
+
+ /* enable reference clock for phy */
+ val = readl(priv->base + CKCTRL0);
+ val &= ~CKCTRL0_CK_OFF;
+ writel(val, priv->base + CKCTRL0);
+
+ /* enable TX clock */
+ val = readl(priv->base + RXTXCTRL);
+ val |= RXTXCTRL_TX_CKO_EN;
+ writel(val, priv->base + RXTXCTRL);
+
+ /* wait until RX is ready */
+ ret = readl_poll_timeout(priv->base + RSTPWR, val,
+ !(val & RSTPWR_RX_EN_VAL), 200, 2000);
+ if (ret) {
+ dev_err(priv->dev, "Failed to check whether Rx is ready\n");
+ goto out_disable_clock;
+ }
+
+ /* release all reset */
+ ret = reset_control_deassert(priv->rst_pm);
+ if (ret) {
+ dev_err(priv->dev, "Failed to release PM reset\n");
+ goto out_disable_clock;
+ }
+
+ ret = reset_control_deassert(priv->rst_tx);
+ if (ret) {
+ dev_err(priv->dev, "Failed to release Tx reset\n");
+ goto out_reset_pm_assert;
+ }
+
+ ret = reset_control_deassert(priv->rst_rx);
+ if (ret) {
+ dev_err(priv->dev, "Failed to release Rx reset\n");
+ goto out_reset_tx_assert;
+ }
+
+ return 0;
+
+out_reset_tx_assert:
+ reset_control_assert(priv->rst_tx);
+out_reset_pm_assert:
+ reset_control_assert(priv->rst_pm);
+
+out_disable_clock:
+ /* disable TX clock */
+ val = readl(priv->base + RXTXCTRL);
+ val &= ~RXTXCTRL_TX_CKO_EN;
+ writel(val, priv->base + RXTXCTRL);
+
+ /* disable reference clock for phy */
+ val = readl(priv->base + CKCTRL0);
+ val |= CKCTRL0_CK_OFF;
+ writel(val, priv->base + CKCTRL0);
+
+ return ret;
+}
+
+static int uniphier_ahciphy_pro4_power_off(struct uniphier_ahciphy_priv *priv)
+{
+ u32 val;
+
+ reset_control_assert(priv->rst_rx);
+ reset_control_assert(priv->rst_tx);
+ reset_control_assert(priv->rst_pm);
+
+ /* disable TX clock */
+ val = readl(priv->base + RXTXCTRL);
+ val &= ~RXTXCTRL_TX_CKO_EN;
+ writel(val, priv->base + RXTXCTRL);
+
+ /* disable reference clock for phy */
+ val = readl(priv->base + CKCTRL0);
+ val |= CKCTRL0_CK_OFF;
+ writel(val, priv->base + CKCTRL0);
+
+ return 0;
+}
+
static void uniphier_ahciphy_pxs2_enable(struct uniphier_ahciphy_priv *priv,
bool enable)
{
@@ -142,14 +285,22 @@ static int uniphier_ahciphy_init(struct phy *phy)
struct uniphier_ahciphy_priv *priv = phy_get_drvdata(phy);
int ret;
- ret = clk_prepare_enable(priv->clk_parent);
+ ret = clk_prepare_enable(priv->clk_parent_gio);
if (ret)
return ret;
- ret = reset_control_deassert(priv->rst_parent);
+ ret = clk_prepare_enable(priv->clk_parent);
+ if (ret)
+ goto out_clk_gio_disable;
+
+ ret = reset_control_deassert(priv->rst_parent_gio);
if (ret)
goto out_clk_disable;
+ ret = reset_control_deassert(priv->rst_parent);
+ if (ret)
+ goto out_rst_gio_assert;
+
if (priv->data->init) {
ret = priv->data->init(priv);
if (ret)
@@ -160,8 +311,12 @@ static int uniphier_ahciphy_init(struct phy *phy)
out_rst_assert:
reset_control_assert(priv->rst_parent);
+out_rst_gio_assert:
+ reset_control_assert(priv->rst_parent_gio);
out_clk_disable:
clk_disable_unprepare(priv->clk_parent);
+out_clk_gio_disable:
+ clk_disable_unprepare(priv->clk_parent_gio);
return ret;
}
@@ -171,7 +326,9 @@ static int uniphier_ahciphy_exit(struct phy *phy)
struct uniphier_ahciphy_priv *priv = phy_get_drvdata(phy);
reset_control_assert(priv->rst_parent);
+ reset_control_assert(priv->rst_parent_gio);
clk_disable_unprepare(priv->clk_parent);
+ clk_disable_unprepare(priv->clk_parent_gio);
return 0;
}
@@ -265,6 +422,28 @@ static int uniphier_ahciphy_probe(struct platform_device *pdev)
if (IS_ERR(priv->rst))
return PTR_ERR(priv->rst);
+ if (priv->data->is_legacy) {
+ priv->clk_parent_gio = devm_clk_get(dev, "gio");
+ if (IS_ERR(priv->clk_parent_gio))
+ return PTR_ERR(priv->clk_parent_gio);
+ priv->rst_parent_gio =
+ devm_reset_control_get_shared(dev, "gio");
+ if (IS_ERR(priv->rst_parent_gio))
+ return PTR_ERR(priv->rst_parent_gio);
+
+ priv->rst_pm = devm_reset_control_get_shared(dev, "pm");
+ if (IS_ERR(priv->rst_pm))
+ return PTR_ERR(priv->rst_pm);
+
+ priv->rst_tx = devm_reset_control_get_shared(dev, "tx");
+ if (IS_ERR(priv->rst_tx))
+ return PTR_ERR(priv->rst_tx);
+
+ priv->rst_rx = devm_reset_control_get_shared(dev, "rx");
+ if (IS_ERR(priv->rst_rx))
+ return PTR_ERR(priv->rst_rx);
+ }
+
phy = devm_phy_create(dev, dev->of_node, &uniphier_ahciphy_ops);
if (IS_ERR(phy)) {
dev_err(dev, "failed to create phy\n");
@@ -279,9 +458,18 @@ static int uniphier_ahciphy_probe(struct platform_device *pdev)
return 0;
}
+static const struct uniphier_ahciphy_soc_data uniphier_pro4_data = {
+ .init = uniphier_ahciphy_pro4_init,
+ .power_on = uniphier_ahciphy_pro4_power_on,
+ .power_off = uniphier_ahciphy_pro4_power_off,
+ .is_legacy = true,
+ .is_phy_clk = false,
+};
+
static const struct uniphier_ahciphy_soc_data uniphier_pxs2_data = {
.power_on = uniphier_ahciphy_pxs2_power_on,
.power_off = uniphier_ahciphy_pxs2_power_off,
+ .is_legacy = false,
.is_ready_high = false,
.is_phy_clk = false,
};
@@ -290,12 +478,17 @@ static const struct uniphier_ahciphy_soc_data uniphier_pxs3_data = {
.init = uniphier_ahciphy_pxs3_init,
.power_on = uniphier_ahciphy_pxs2_power_on,
.power_off = uniphier_ahciphy_pxs2_power_off,
+ .is_legacy = false,
.is_ready_high = true,
.is_phy_clk = true,
};
static const struct of_device_id uniphier_ahciphy_match[] = {
{
+ .compatible = "socionext,uniphier-pro4-ahci-phy",
+ .data = &uniphier_pro4_data,
+ },
+ {
.compatible = "socionext,uniphier-pxs2-ahci-phy",
.data = &uniphier_pxs2_data,
},
diff --git a/drivers/phy/socionext/phy-uniphier-pcie.c b/drivers/phy/socionext/phy-uniphier-pcie.c
index 6bdbd1f214dd..ebca296ef123 100644
--- a/drivers/phy/socionext/phy-uniphier-pcie.c
+++ b/drivers/phy/socionext/phy-uniphier-pcie.c
@@ -27,6 +27,7 @@
#define TESTI_DAT_MASK GENMASK(13, 6)
#define TESTI_ADR_MASK GENMASK(5, 1)
#define TESTI_WR_EN BIT(0)
+#define TESTIO_PHY_SHIFT 16
#define PCL_PHY_TEST_O 0x2004
#define TESTO_DAT_MASK GENMASK(7, 0)
@@ -39,6 +40,10 @@
#define SG_USBPCIESEL 0x590
#define SG_USBPCIESEL_PCIE BIT(0)
+/* SC */
+#define SC_US3SRCSEL 0x2244
+#define SC_US3SRCSEL_2LANE GENMASK(9, 8)
+
#define PCL_PHY_R00 0
#define RX_EQ_ADJ_EN BIT(3) /* enable for EQ adjustment */
#define PCL_PHY_R06 6
@@ -47,6 +52,9 @@
#define PCL_PHY_R26 26
#define VCO_CTRL GENMASK(7, 4) /* Tx VCO adjustment value */
#define VCO_CTRL_INIT_VAL 5
+#define PCL_PHY_R28 28
+#define VCOPLL_CLMP GENMASK(3, 2) /* Tx VCOPLL clamp mode */
+#define VCOPLL_CLMP_VAL 0
struct uniphier_pciephy_priv {
void __iomem *base;
@@ -58,43 +66,57 @@ struct uniphier_pciephy_priv {
struct uniphier_pciephy_soc_data {
bool is_legacy;
+ bool is_dual_phy;
void (*set_phymode)(struct regmap *regmap);
};
static void uniphier_pciephy_testio_write(struct uniphier_pciephy_priv *priv,
- u32 data)
+ int id, u32 data)
{
+ if (id)
+ data <<= TESTIO_PHY_SHIFT;
+
/* need to read TESTO twice after accessing TESTI */
writel(data, priv->base + PCL_PHY_TEST_I);
readl(priv->base + PCL_PHY_TEST_O);
readl(priv->base + PCL_PHY_TEST_O);
}
+static u32 uniphier_pciephy_testio_read(struct uniphier_pciephy_priv *priv, int id)
+{
+ u32 val = readl(priv->base + PCL_PHY_TEST_O);
+
+ if (id)
+ val >>= TESTIO_PHY_SHIFT;
+
+ return val & TESTO_DAT_MASK;
+}
+
static void uniphier_pciephy_set_param(struct uniphier_pciephy_priv *priv,
- u32 reg, u32 mask, u32 param)
+ int id, u32 reg, u32 mask, u32 param)
{
u32 val;
/* read previous data */
val = FIELD_PREP(TESTI_DAT_MASK, 1);
val |= FIELD_PREP(TESTI_ADR_MASK, reg);
- uniphier_pciephy_testio_write(priv, val);
- val = readl(priv->base + PCL_PHY_TEST_O) & TESTO_DAT_MASK;
+ uniphier_pciephy_testio_write(priv, id, val);
+ val = uniphier_pciephy_testio_read(priv, id);
/* update value */
val &= ~mask;
val |= mask & param;
val = FIELD_PREP(TESTI_DAT_MASK, val);
val |= FIELD_PREP(TESTI_ADR_MASK, reg);
- uniphier_pciephy_testio_write(priv, val);
- uniphier_pciephy_testio_write(priv, val | TESTI_WR_EN);
- uniphier_pciephy_testio_write(priv, val);
+ uniphier_pciephy_testio_write(priv, id, val);
+ uniphier_pciephy_testio_write(priv, id, val | TESTI_WR_EN);
+ uniphier_pciephy_testio_write(priv, id, val);
/* read current data as dummy */
val = FIELD_PREP(TESTI_DAT_MASK, 1);
val |= FIELD_PREP(TESTI_ADR_MASK, reg);
- uniphier_pciephy_testio_write(priv, val);
- readl(priv->base + PCL_PHY_TEST_O);
+ uniphier_pciephy_testio_write(priv, id, val);
+ uniphier_pciephy_testio_read(priv, id);
}
static void uniphier_pciephy_assert(struct uniphier_pciephy_priv *priv)
@@ -120,7 +142,7 @@ static int uniphier_pciephy_init(struct phy *phy)
{
struct uniphier_pciephy_priv *priv = phy_get_drvdata(phy);
u32 val;
- int ret;
+ int ret, id;
ret = clk_prepare_enable(priv->clk);
if (ret)
@@ -148,12 +170,16 @@ static int uniphier_pciephy_init(struct phy *phy)
if (priv->data->is_legacy)
return 0;
- uniphier_pciephy_set_param(priv, PCL_PHY_R00,
+ for (id = 0; id < (priv->data->is_dual_phy ? 2 : 1); id++) {
+ uniphier_pciephy_set_param(priv, id, PCL_PHY_R00,
RX_EQ_ADJ_EN, RX_EQ_ADJ_EN);
- uniphier_pciephy_set_param(priv, PCL_PHY_R06, RX_EQ_ADJ,
+ uniphier_pciephy_set_param(priv, id, PCL_PHY_R06, RX_EQ_ADJ,
FIELD_PREP(RX_EQ_ADJ, RX_EQ_ADJ_VAL));
- uniphier_pciephy_set_param(priv, PCL_PHY_R26, VCO_CTRL,
+ uniphier_pciephy_set_param(priv, id, PCL_PHY_R26, VCO_CTRL,
FIELD_PREP(VCO_CTRL, VCO_CTRL_INIT_VAL));
+ uniphier_pciephy_set_param(priv, id, PCL_PHY_R28, VCOPLL_CLMP,
+ FIELD_PREP(VCOPLL_CLMP, VCOPLL_CLMP_VAL));
+ }
usleep_range(1, 10);
uniphier_pciephy_deassert(priv);
@@ -261,17 +287,31 @@ static void uniphier_pciephy_ld20_setmode(struct regmap *regmap)
SG_USBPCIESEL_PCIE, SG_USBPCIESEL_PCIE);
}
+static void uniphier_pciephy_nx1_setmode(struct regmap *regmap)
+{
+ regmap_update_bits(regmap, SC_US3SRCSEL,
+ SC_US3SRCSEL_2LANE, SC_US3SRCSEL_2LANE);
+}
+
static const struct uniphier_pciephy_soc_data uniphier_pro5_data = {
.is_legacy = true,
};
static const struct uniphier_pciephy_soc_data uniphier_ld20_data = {
.is_legacy = false,
+ .is_dual_phy = false,
.set_phymode = uniphier_pciephy_ld20_setmode,
};
static const struct uniphier_pciephy_soc_data uniphier_pxs3_data = {
.is_legacy = false,
+ .is_dual_phy = false,
+};
+
+static const struct uniphier_pciephy_soc_data uniphier_nx1_data = {
+ .is_legacy = false,
+ .is_dual_phy = true,
+ .set_phymode = uniphier_pciephy_nx1_setmode,
};
static const struct of_device_id uniphier_pciephy_match[] = {
@@ -287,6 +327,10 @@ static const struct of_device_id uniphier_pciephy_match[] = {
.compatible = "socionext,uniphier-pxs3-pcie-phy",
.data = &uniphier_pxs3_data,
},
+ {
+ .compatible = "socionext,uniphier-nx1-pcie-phy",
+ .data = &uniphier_nx1_data,
+ },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, uniphier_pciephy_match);
diff --git a/drivers/phy/socionext/phy-uniphier-usb3hs.c b/drivers/phy/socionext/phy-uniphier-usb3hs.c
index a9bc74121f38..8c8673df0084 100644
--- a/drivers/phy/socionext/phy-uniphier-usb3hs.c
+++ b/drivers/phy/socionext/phy-uniphier-usb3hs.c
@@ -447,6 +447,10 @@ static const struct of_device_id uniphier_u3hsphy_match[] = {
.compatible = "socionext,uniphier-pxs3-usb3-hsphy",
.data = &uniphier_pxs3_data,
},
+ {
+ .compatible = "socionext,uniphier-nx1-usb3-hsphy",
+ .data = &uniphier_pxs3_data,
+ },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, uniphier_u3hsphy_match);
diff --git a/drivers/phy/socionext/phy-uniphier-usb3ss.c b/drivers/phy/socionext/phy-uniphier-usb3ss.c
index 6700645bcbe6..f402ed8732fd 100644
--- a/drivers/phy/socionext/phy-uniphier-usb3ss.c
+++ b/drivers/phy/socionext/phy-uniphier-usb3ss.c
@@ -22,11 +22,13 @@
#include <linux/reset.h>
#define SSPHY_TESTI 0x0
-#define SSPHY_TESTO 0x4
#define TESTI_DAT_MASK GENMASK(13, 6)
#define TESTI_ADR_MASK GENMASK(5, 1)
#define TESTI_WR_EN BIT(0)
+#define SSPHY_TESTO 0x4
+#define TESTO_DAT_MASK GENMASK(7, 0)
+
#define PHY_F(regno, msb, lsb) { (regno), (msb), (lsb) }
#define CDR_CPD_TRIM PHY_F(7, 3, 0) /* RxPLL charge pump current */
@@ -84,12 +86,12 @@ static void uniphier_u3ssphy_set_param(struct uniphier_u3ssphy_priv *priv,
val = FIELD_PREP(TESTI_DAT_MASK, 1);
val |= FIELD_PREP(TESTI_ADR_MASK, p->field.reg_no);
uniphier_u3ssphy_testio_write(priv, val);
- val = readl(priv->base + SSPHY_TESTO);
+ val = readl(priv->base + SSPHY_TESTO) & TESTO_DAT_MASK;
/* update value */
- val &= ~FIELD_PREP(TESTI_DAT_MASK, field_mask);
+ val &= ~field_mask;
data = field_mask & (p->value << p->field.lsb);
- val = FIELD_PREP(TESTI_DAT_MASK, data);
+ val = FIELD_PREP(TESTI_DAT_MASK, data | val);
val |= FIELD_PREP(TESTI_ADR_MASK, p->field.reg_no);
uniphier_u3ssphy_testio_write(priv, val);
uniphier_u3ssphy_testio_write(priv, val | TESTI_WR_EN);
@@ -328,6 +330,10 @@ static const struct of_device_id uniphier_u3ssphy_match[] = {
.compatible = "socionext,uniphier-pxs3-usb3-ssphy",
.data = &uniphier_ld20_data,
},
+ {
+ .compatible = "socionext,uniphier-nx1-usb3-ssphy",
+ .data = &uniphier_ld20_data,
+ },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, uniphier_u3ssphy_match);
diff --git a/drivers/phy/st/phy-stm32-usbphyc.c b/drivers/phy/st/phy-stm32-usbphyc.c
index e4f4a9be5132..007a23c78d56 100644
--- a/drivers/phy/st/phy-stm32-usbphyc.c
+++ b/drivers/phy/st/phy-stm32-usbphyc.c
@@ -304,7 +304,7 @@ static int stm32_usbphyc_pll_enable(struct stm32_usbphyc *usbphyc)
ret = __stm32_usbphyc_pll_disable(usbphyc);
if (ret)
- return ret;
+ goto dec_n_pll_cons;
}
ret = stm32_usbphyc_regulators_enable(usbphyc);
@@ -672,17 +672,15 @@ static int stm32_usbphyc_probe(struct platform_device *pdev)
usbphyc->vdda1v1 = devm_regulator_get(dev, "vdda1v1");
if (IS_ERR(usbphyc->vdda1v1)) {
- ret = PTR_ERR(usbphyc->vdda1v1);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "failed to get vdda1v1 supply: %d\n", ret);
+ ret = dev_err_probe(dev, PTR_ERR(usbphyc->vdda1v1),
+ "failed to get vdda1v1 supply\n");
goto clk_disable;
}
usbphyc->vdda1v8 = devm_regulator_get(dev, "vdda1v8");
if (IS_ERR(usbphyc->vdda1v8)) {
- ret = PTR_ERR(usbphyc->vdda1v8);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "failed to get vdda1v8 supply: %d\n", ret);
+ ret = dev_err_probe(dev, PTR_ERR(usbphyc->vdda1v8),
+ "failed to get vdda1v8 supply\n");
goto clk_disable;
}
diff --git a/drivers/phy/tegra/xusb.c b/drivers/phy/tegra/xusb.c
index 963de5913e50..aa5237eacd29 100644
--- a/drivers/phy/tegra/xusb.c
+++ b/drivers/phy/tegra/xusb.c
@@ -455,7 +455,7 @@ tegra_xusb_find_port_node(struct tegra_xusb_padctl *padctl, const char *type,
name = kasprintf(GFP_KERNEL, "%s-%u", type, index);
if (!name) {
of_node_put(ports);
- return ERR_PTR(-ENOMEM);
+ return NULL;
}
np = of_get_child_by_name(ports, name);
kfree(name);
diff --git a/drivers/phy/ti/phy-j721e-wiz.c b/drivers/phy/ti/phy-j721e-wiz.c
index b3384c31637a..da546c35d1d5 100644
--- a/drivers/phy/ti/phy-j721e-wiz.c
+++ b/drivers/phy/ti/phy-j721e-wiz.c
@@ -233,6 +233,7 @@ static const struct clk_div_table clk_div_table[] = {
{ .val = 1, .div = 2, },
{ .val = 2, .div = 4, },
{ .val = 3, .div = 8, },
+ { /* sentinel */ },
};
static const struct wiz_clk_div_sel clk_div_sel[] = {
diff --git a/drivers/phy/ti/phy-omap-control.c b/drivers/phy/ti/phy-omap-control.c
index 47482f106fab..76c5595f0859 100644
--- a/drivers/phy/ti/phy-omap-control.c
+++ b/drivers/phy/ti/phy-omap-control.c
@@ -26,7 +26,7 @@ void omap_control_pcie_pcs(struct device *dev, u8 delay)
u32 val;
struct omap_control_phy *control_phy;
- if (IS_ERR(dev) || !dev) {
+ if (IS_ERR_OR_NULL(dev)) {
pr_err("%s: invalid device\n", __func__);
return;
}
@@ -61,7 +61,7 @@ void omap_control_phy_power(struct device *dev, int on)
unsigned long rate;
struct omap_control_phy *control_phy;
- if (IS_ERR(dev) || !dev) {
+ if (IS_ERR_OR_NULL(dev)) {
pr_err("%s: invalid device\n", __func__);
return;
}
@@ -202,7 +202,7 @@ void omap_control_usb_set_mode(struct device *dev,
{
struct omap_control_phy *ctrl_phy;
- if (IS_ERR(dev) || !dev)
+ if (IS_ERR_OR_NULL(dev))
return;
ctrl_phy = dev_get_drvdata(dev);
diff --git a/drivers/phy/xilinx/phy-zynqmp.c b/drivers/phy/xilinx/phy-zynqmp.c
index f478d8a17115..9be9535ad7ab 100644
--- a/drivers/phy/xilinx/phy-zynqmp.c
+++ b/drivers/phy/xilinx/phy-zynqmp.c
@@ -134,7 +134,8 @@
#define PROT_BUS_WIDTH_10 0x0
#define PROT_BUS_WIDTH_20 0x1
#define PROT_BUS_WIDTH_40 0x2
-#define PROT_BUS_WIDTH_SHIFT 2
+#define PROT_BUS_WIDTH_SHIFT(n) ((n) * 2)
+#define PROT_BUS_WIDTH_MASK(n) GENMASK((n) * 2 + 1, (n) * 2)
/* Number of GT lanes */
#define NUM_LANES 4
@@ -445,12 +446,12 @@ static void xpsgtr_phy_init_sata(struct xpsgtr_phy *gtr_phy)
static void xpsgtr_phy_init_sgmii(struct xpsgtr_phy *gtr_phy)
{
struct xpsgtr_dev *gtr_dev = gtr_phy->dev;
+ u32 mask = PROT_BUS_WIDTH_MASK(gtr_phy->lane);
+ u32 val = PROT_BUS_WIDTH_10 << PROT_BUS_WIDTH_SHIFT(gtr_phy->lane);
/* Set SGMII protocol TX and RX bus width to 10 bits. */
- xpsgtr_write(gtr_dev, TX_PROT_BUS_WIDTH,
- PROT_BUS_WIDTH_10 << (gtr_phy->lane * PROT_BUS_WIDTH_SHIFT));
- xpsgtr_write(gtr_dev, RX_PROT_BUS_WIDTH,
- PROT_BUS_WIDTH_10 << (gtr_phy->lane * PROT_BUS_WIDTH_SHIFT));
+ xpsgtr_clr_set(gtr_dev, TX_PROT_BUS_WIDTH, mask, val);
+ xpsgtr_clr_set(gtr_dev, RX_PROT_BUS_WIDTH, mask, val);
xpsgtr_bypass_scrambler_8b10b(gtr_phy);
}
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index 0d5b61e4c21e..6fc56d6598e2 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -31,6 +31,24 @@ config DEBUG_PINCTRL
help
Say Y here to add some extra checks and diagnostics to PINCTRL calls.
+config PINCTRL_AMD
+ tristate "AMD GPIO pin control"
+ depends on HAS_IOMEM
+ depends on ACPI || COMPILE_TEST
+ select GPIOLIB
+ select GPIOLIB_IRQCHIP
+ select PINMUX
+ select PINCONF
+ select GENERIC_PINCONF
+ help
+ The driver for memory mapped GPIO functionality on AMD platforms
+ (x86 or arm). Most of the pins are usually muxed to some other
+ functionality by firmware, so only a small amount is available
+ for GPIO use.
+
+ Requires ACPI/FDT device enumeration code to set up a platform
+ device.
+
config PINCTRL_APPLE_GPIO
tristate "Apple SoC GPIO pin controller driver"
depends on ARCH_APPLE
@@ -69,20 +87,6 @@ config PINCTRL_AS3722
open drain configuration for the GPIO pins of AS3722 devices. It also
supports the GPIO functionality through gpiolib.
-config PINCTRL_AXP209
- tristate "X-Powers AXP209 PMIC pinctrl and GPIO Support"
- depends on MFD_AXP20X
- depends on OF
- select PINMUX
- select GENERIC_PINCONF
- select GPIOLIB
- help
- AXP PMICs provides multiple GPIOs that can be muxed for different
- functions. This driver bundles a pinctrl driver to select the function
- muxing and a GPIO driver to handle the GPIO when the GPIO function is
- selected.
- Say yes to enable pinctrl and GPIO support for the AXP209 PMIC
-
config PINCTRL_AT91
bool "AT91 pinctrl driver"
depends on OF
@@ -109,23 +113,19 @@ config PINCTRL_AT91PIO4
Say Y here to enable the at91 pinctrl/gpio driver for Atmel PIO4
controller available on sama5d2 SoC.
-config PINCTRL_AMD
- tristate "AMD GPIO pin control"
- depends on HAS_IOMEM
- depends on ACPI || COMPILE_TEST
- select GPIOLIB
- select GPIOLIB_IRQCHIP
+config PINCTRL_AXP209
+ tristate "X-Powers AXP209 PMIC pinctrl and GPIO Support"
+ depends on MFD_AXP20X
+ depends on OF
select PINMUX
- select PINCONF
select GENERIC_PINCONF
+ select GPIOLIB
help
- driver for memory mapped GPIO functionality on AMD platforms
- (x86 or arm).Most pins are usually muxed to some other
- functionality by firmware,so only a small amount is available
- for gpio use.
-
- Requires ACPI/FDT device enumeration code to set up a platform
- device.
+ AXP PMICs provides multiple GPIOs that can be muxed for different
+ functions. This driver bundles a pinctrl driver to select the function
+ muxing and a GPIO driver to handle the GPIO when the GPIO function is
+ selected.
+ Say Y to enable pinctrl and GPIO support for the AXP209 PMIC.
config PINCTRL_BM1880
bool "Bitmain BM1880 Pinctrl driver"
@@ -136,13 +136,13 @@ config PINCTRL_BM1880
Pinctrl driver for Bitmain BM1880 SoC.
config PINCTRL_DA850_PUPD
- tristate "TI DA850/OMAP-L138/AM18XX pullup/pulldown groups"
+ tristate "TI DA850/OMAP-L138/AM18XX pull-up and pull-down groups"
depends on OF && (ARCH_DAVINCI_DA850 || COMPILE_TEST)
select PINCONF
select GENERIC_PINCONF
help
Driver for TI DA850/OMAP-L138/AM18XX pinconf. Used to control
- pullup/pulldown pin groups.
+ pull-up and pull-down pin groups.
config PINCTRL_DA9062
tristate "Dialog Semiconductor DA9062 PMIC pinctrl and GPIO Support"
@@ -154,7 +154,7 @@ config PINCTRL_DA9062
function muxing and a GPIO driver to handle the GPIO when the GPIO
function is selected.
- Say yes to enable pinctrl and GPIO support for the DA9062 PMIC.
+ Say Y to enable pinctrl and GPIO support for the DA9062 PMIC.
config PINCTRL_DIGICOLOR
bool
@@ -162,12 +162,93 @@ config PINCTRL_DIGICOLOR
select PINMUX
select GENERIC_PINCONF
+config PINCTRL_EQUILIBRIUM
+ tristate "Generic pinctrl and GPIO driver for Intel Lightning Mountain SoC"
+ depends on OF && HAS_IOMEM
+ depends on X86 || COMPILE_TEST
+ select PINMUX
+ select PINCONF
+ select GPIOLIB
+ select GPIO_GENERIC
+ select GPIOLIB_IRQCHIP
+ select GENERIC_PINCONF
+ select GENERIC_PINCTRL_GROUPS
+ select GENERIC_PINMUX_FUNCTIONS
+ help
+ Equilibrium driver is a pinctrl and GPIO driver for Intel Lightning
+ Mountain network processor SoC that supports both the GPIO and pin
+ control frameworks. It provides interfaces to setup pin muxing, assign
+ desired pin functions, configure GPIO attributes for LGM SoC pins.
+ Pin muxing and pin config settings are retrieved from device tree.
+
+config PINCTRL_GEMINI
+ bool
+ depends on ARCH_GEMINI
+ default ARCH_GEMINI
+ select PINMUX
+ select GENERIC_PINCONF
+ select MFD_SYSCON
+
+config PINCTRL_INGENIC
+ bool "Pinctrl driver for the Ingenic JZ47xx SoCs"
+ default MACH_INGENIC
+ depends on OF
+ depends on MIPS || COMPILE_TEST
+ select GENERIC_PINCONF
+ select GENERIC_PINCTRL_GROUPS
+ select GENERIC_PINMUX_FUNCTIONS
+ select GPIOLIB
+ select GPIOLIB_IRQCHIP
+ select REGMAP_MMIO
+
+config PINCTRL_K210
+ bool "Pinctrl driver for the Canaan Kendryte K210 SoC"
+ depends on RISCV && SOC_CANAAN && OF
+ select GENERIC_PINMUX_FUNCTIONS
+ select GENERIC_PINCONF
+ select GPIOLIB
+ select OF_GPIO
+ select REGMAP_MMIO
+ default SOC_CANAAN
+ help
+ Add support for the Canaan Kendryte K210 RISC-V SOC Field
+ Programmable IO Array (FPIOA) controller.
+
+config PINCTRL_KEEMBAY
+ tristate "Pinctrl driver for Intel Keem Bay SoC"
+ depends on ARCH_KEEMBAY || (ARM64 && COMPILE_TEST)
+ depends on HAS_IOMEM
+ select PINMUX
+ select PINCONF
+ select GENERIC_PINCONF
+ select GENERIC_PINCTRL_GROUPS
+ select GENERIC_PINMUX_FUNCTIONS
+ select GPIOLIB
+ select GPIOLIB_IRQCHIP
+ select GPIO_GENERIC
+ help
+ This selects pin control driver for the Intel Keem Bay SoC.
+ It provides pin config functions such as pull-up, pull-down,
+ interrupt, drive strength, sec lock, Schmitt trigger, slew
+ rate control and direction control. This module will be
+ called as pinctrl-keembay.
+
config PINCTRL_LANTIQ
bool
depends on LANTIQ
select PINMUX
select PINCONF
+config PINCTRL_FALCON
+ bool
+ depends on SOC_FALCON
+ depends on PINCTRL_LANTIQ
+
+config PINCTRL_XWAY
+ bool
+ depends on SOC_TYPE_XWAY
+ depends on PINCTRL_LANTIQ
+
config PINCTRL_LPC18XX
bool "NXP LPC18XX/43XX SCU pinctrl driver"
depends on OF && (ARCH_LPC18XX || COMPILE_TEST)
@@ -177,18 +258,16 @@ config PINCTRL_LPC18XX
help
Pinctrl driver for NXP LPC18xx/43xx System Control Unit (SCU).
-config PINCTRL_FALCON
- bool
- depends on SOC_FALCON
- depends on PINCTRL_LANTIQ
-
-config PINCTRL_GEMINI
- bool
- depends on ARCH_GEMINI
- default ARCH_GEMINI
+config PINCTRL_MAX77620
+ tristate "MAX77620/MAX20024 Pincontrol support"
+ depends on MFD_MAX77620 && OF
select PINMUX
select GENERIC_PINCONF
- select MFD_SYSCON
+ help
+ Say Y here to enable Pin control support for Maxim MAX77620 PMIC.
+ This PMIC has 8 GPIO pins that work as GPIO as well as special
+ function in alternate mode. This driver also configure push-pull,
+ open drain, FPS slots etc.
config PINCTRL_MCP23S08_I2C
tristate
@@ -212,6 +291,37 @@ config PINCTRL_MCP23S08
This provides a GPIO interface supporting inputs and outputs and a
corresponding interrupt-controller.
+config PINCTRL_MICROCHIP_SGPIO
+ bool "Pinctrl driver for Microsemi/Microchip Serial GPIO"
+ depends on OF
+ depends on HAS_IOMEM
+ select GPIOLIB
+ select GPIOLIB_IRQCHIP
+ select GENERIC_PINCONF
+ select GENERIC_PINCTRL_GROUPS
+ select GENERIC_PINMUX_FUNCTIONS
+ select OF_GPIO
+ help
+ Support for the serial GPIO interface used on Microsemi and
+ Microchip SoCs. By using a serial interface, the SIO
+ controller significantly extends the number of available
+ GPIOs with a minimum number of additional pins on the
+ device. The primary purpose of the SIO controller is to
+ connect control signals from SFP modules and to act as an
+ LED controller.
+
+config PINCTRL_OCELOT
+ bool "Pinctrl driver for the Microsemi Ocelot and Jaguar2 SoCs"
+ depends on OF
+ depends on HAS_IOMEM
+ select GPIOLIB
+ select GPIOLIB_IRQCHIP
+ select GENERIC_PINCONF
+ select GENERIC_PINCTRL_GROUPS
+ select GENERIC_PINMUX_FUNCTIONS
+ select OF_GPIO
+ select REGMAP_MMIO
+
config PINCTRL_OXNAS
bool
depends on OF
@@ -223,6 +333,54 @@ config PINCTRL_OXNAS
select GPIOLIB_IRQCHIP
select MFD_SYSCON
+config PINCTRL_PALMAS
+ tristate "Pinctrl driver for the PALMAS Series MFD devices"
+ depends on OF && MFD_PALMAS
+ select PINMUX
+ select GENERIC_PINCONF
+ help
+ Palmas device supports the configuration of pins for different
+ functionality. This driver supports the pinmux, push-pull and
+ open drain configuration for the Palmas series devices like
+ TPS65913, TPS80036 etc.
+
+config PINCTRL_PIC32
+ bool "Microchip PIC32 pin controller driver"
+ depends on OF
+ depends on MACH_PIC32
+ select PINMUX
+ select GENERIC_PINCONF
+ select GPIOLIB_IRQCHIP
+ select OF_GPIO
+ help
+ This is the pin controller and gpio driver for Microchip PIC32
+ microcontrollers. This option is selected automatically when specific
+ machine and arch are selected to build.
+
+config PINCTRL_PIC32MZDA
+ def_bool y if PIC32MZDA
+ select PINCTRL_PIC32
+
+config PINCTRL_PISTACHIO
+ bool "IMG Pistachio SoC pinctrl driver"
+ depends on OF && (MIPS || COMPILE_TEST)
+ depends on GPIOLIB
+ select PINMUX
+ select GENERIC_PINCONF
+ select GPIOLIB_IRQCHIP
+ select OF_GPIO
+ help
+ This support pinctrl and GPIO driver for IMG Pistachio SoC.
+
+config PINCTRL_RK805
+ tristate "Pinctrl and GPIO driver for RK805 PMIC"
+ depends on MFD_RK808
+ select GPIOLIB
+ select PINMUX
+ select GENERIC_PINCONF
+ help
+ This selects the pinctrl driver for RK805.
+
config PINCTRL_ROCKCHIP
tristate "Rockchip gpio and pinctrl driver"
depends on ARCH_ROCKCHIP || COMPILE_TEST
@@ -235,7 +393,7 @@ config PINCTRL_ROCKCHIP
select OF_GPIO
default ARCH_ROCKCHIP
help
- This support pinctrl and gpio driver for Rockchip SoCs.
+ This support pinctrl and GPIO driver for Rockchip SoCs.
config PINCTRL_SINGLE
tristate "One-register-per-pin type device tree based pinctrl driver"
@@ -247,33 +405,6 @@ config PINCTRL_SINGLE
help
This selects the device tree based generic pinctrl driver.
-config PINCTRL_SX150X
- bool "Semtech SX150x I2C GPIO expander pinctrl driver"
- depends on I2C=y
- select PINMUX
- select PINCONF
- select GENERIC_PINCONF
- select GPIOLIB
- select GPIOLIB_IRQCHIP
- select REGMAP
- help
- Say yes here to provide support for Semtech SX150x-series I2C
- GPIO expanders as pinctrl module.
- Compatible models include:
- - 8 bits: sx1508q, sx1502q
- - 16 bits: sx1509q, sx1506q
-
-config PINCTRL_PISTACHIO
- bool "IMG Pistachio SoC pinctrl driver"
- depends on OF && (MIPS || COMPILE_TEST)
- depends on GPIOLIB
- select PINMUX
- select GENERIC_PINCONF
- select GPIOLIB_IRQCHIP
- select OF_GPIO
- help
- This support pinctrl and gpio driver for IMG Pistachio SoC.
-
config PINCTRL_ST
bool
depends on OF
@@ -312,44 +443,45 @@ config PINCTRL_STMFX
and configuring push-pull, open-drain, and can also be used as
interrupt-controller.
-config PINCTRL_MAX77620
- tristate "MAX77620/MAX20024 Pincontrol support"
- depends on MFD_MAX77620 && OF
+config PINCTRL_SX150X
+ bool "Semtech SX150x I2C GPIO expander pinctrl driver"
+ depends on I2C=y
select PINMUX
+ select PINCONF
select GENERIC_PINCONF
+ select GPIOLIB
+ select GPIOLIB_IRQCHIP
+ select REGMAP
help
- Say Yes here to enable Pin control support for Maxim PMIC MAX77620.
- This PMIC has 8 GPIO pins that work as GPIO as well as special
- function in alternate mode. This driver also configure push-pull,
- open drain, FPS slots etc.
+ Say Y here to provide support for Semtech SX150x-series I2C
+ GPIO expanders as pinctrl module.
+ Compatible models include:
+ - 8 bits: sx1508q, sx1502q
+ - 16 bits: sx1509q, sx1506q
-config PINCTRL_PALMAS
- tristate "Pinctrl driver for the PALMAS Series MFD devices"
- depends on OF && MFD_PALMAS
- select PINMUX
- select GENERIC_PINCONF
- help
- Palmas device supports the configuration of pins for different
- functionality. This driver supports the pinmux, push-pull and
- open drain configuration for the Palmas series devices like
- TPS65913, TPS80036 etc.
+config PINCTRL_TB10X
+ bool
+ depends on OF && ARC_PLAT_TB10X
+ select GPIOLIB
-config PINCTRL_PIC32
- bool "Microchip PIC32 pin controller driver"
- depends on OF
- depends on MACH_PIC32
+config PINCTRL_THUNDERBAY
+ tristate "Generic pinctrl and GPIO driver for Intel Thunder Bay SoC"
+ depends on ARCH_THUNDERBAY || (ARM64 && COMPILE_TEST)
+ depends on HAS_IOMEM
select PINMUX
+ select PINCONF
select GENERIC_PINCONF
+ select GENERIC_PINCTRL_GROUPS
+ select GENERIC_PINMUX_FUNCTIONS
+ select GPIOLIB
select GPIOLIB_IRQCHIP
- select OF_GPIO
+ select GPIO_GENERIC
help
- This is the pin controller and gpio driver for Microchip PIC32
- microcontrollers. This option is selected automatically when specific
- machine and arch are selected to build.
-
-config PINCTRL_PIC32MZDA
- def_bool y if PIC32MZDA
- select PINCTRL_PIC32
+ This selects pin control driver for the Intel Thunder Bay SoC.
+ It provides pin config functions such as pull-up, pull-down,
+ interrupt, drive strength, sec lock, Schmitt trigger, slew
+ rate control and direction control. This module will be
+ called as pinctrl-thunderbay.
config PINCTRL_ZYNQ
bool "Pinctrl driver for Xilinx Zynq"
@@ -375,96 +507,15 @@ config PINCTRL_ZYNQMP
This driver can also be built as a module. If so, the module
will be called pinctrl-zynqmp.
-config PINCTRL_INGENIC
- bool "Pinctrl driver for the Ingenic JZ47xx SoCs"
- default MACH_INGENIC
- depends on OF
- depends on MIPS || COMPILE_TEST
- select GENERIC_PINCONF
- select GENERIC_PINCTRL_GROUPS
- select GENERIC_PINMUX_FUNCTIONS
- select GPIOLIB
- select GPIOLIB_IRQCHIP
- select REGMAP_MMIO
-
-config PINCTRL_RK805
- tristate "Pinctrl and GPIO driver for RK805 PMIC"
- depends on MFD_RK808
- select GPIOLIB
- select PINMUX
- select GENERIC_PINCONF
- help
- This selects the pinctrl driver for RK805.
-
-config PINCTRL_OCELOT
- bool "Pinctrl driver for the Microsemi Ocelot and Jaguar2 SoCs"
- depends on OF
- depends on HAS_IOMEM
- select GPIOLIB
- select GPIOLIB_IRQCHIP
- select GENERIC_PINCONF
- select GENERIC_PINCTRL_GROUPS
- select GENERIC_PINMUX_FUNCTIONS
- select OF_GPIO
- select REGMAP_MMIO
-
-config PINCTRL_MICROCHIP_SGPIO
- bool "Pinctrl driver for Microsemi/Microchip Serial GPIO"
- depends on OF
- depends on HAS_IOMEM
- select GPIOLIB
- select GPIOLIB_IRQCHIP
- select GENERIC_PINCONF
- select GENERIC_PINCTRL_GROUPS
- select GENERIC_PINMUX_FUNCTIONS
- select OF_GPIO
- help
- Support for the serial GPIO interface used on Microsemi and
- Microchip SoC's. By using a serial interface, the SIO
- controller significantly extends the number of available
- GPIOs with a minimum number of additional pins on the
- device. The primary purpose of the SIO controller is to
- connect control signals from SFP modules and to act as an
- LED controller.
-
-config PINCTRL_K210
- bool "Pinctrl driver for the Canaan Kendryte K210 SoC"
- depends on RISCV && SOC_CANAAN && OF
- select GENERIC_PINMUX_FUNCTIONS
- select GENERIC_PINCONF
- select GPIOLIB
- select OF_GPIO
- select REGMAP_MMIO
- default SOC_CANAAN
- help
- Add support for the Canaan Kendryte K210 RISC-V SOC Field
- Programmable IO Array (FPIOA) controller.
-
-config PINCTRL_KEEMBAY
- tristate "Pinctrl driver for Intel Keem Bay SoC"
- depends on ARCH_KEEMBAY || (ARM64 && COMPILE_TEST)
- depends on HAS_IOMEM
- select PINMUX
- select PINCONF
- select GENERIC_PINCONF
- select GENERIC_PINCTRL_GROUPS
- select GENERIC_PINMUX_FUNCTIONS
- select GPIOLIB
- select GPIOLIB_IRQCHIP
- select GPIO_GENERIC
- help
- This selects pin control driver for the Intel Keembay SoC.
- It provides pin config functions such as pullup, pulldown,
- interrupt, drive strength, sec lock, schmitt trigger, slew
- rate control and direction control. This module will be
- called as pinctrl-keembay.
-
source "drivers/pinctrl/actions/Kconfig"
source "drivers/pinctrl/aspeed/Kconfig"
source "drivers/pinctrl/bcm/Kconfig"
source "drivers/pinctrl/berlin/Kconfig"
+source "drivers/pinctrl/cirrus/Kconfig"
source "drivers/pinctrl/freescale/Kconfig"
source "drivers/pinctrl/intel/Kconfig"
+source "drivers/pinctrl/mediatek/Kconfig"
+source "drivers/pinctrl/meson/Kconfig"
source "drivers/pinctrl/mvebu/Kconfig"
source "drivers/pinctrl/nomadik/Kconfig"
source "drivers/pinctrl/nuvoton/Kconfig"
@@ -480,40 +531,7 @@ source "drivers/pinctrl/sunxi/Kconfig"
source "drivers/pinctrl/tegra/Kconfig"
source "drivers/pinctrl/ti/Kconfig"
source "drivers/pinctrl/uniphier/Kconfig"
-source "drivers/pinctrl/vt8500/Kconfig"
-source "drivers/pinctrl/mediatek/Kconfig"
-source "drivers/pinctrl/meson/Kconfig"
-source "drivers/pinctrl/cirrus/Kconfig"
source "drivers/pinctrl/visconti/Kconfig"
-
-config PINCTRL_XWAY
- bool
- depends on SOC_TYPE_XWAY
- depends on PINCTRL_LANTIQ
-
-config PINCTRL_TB10X
- bool
- depends on OF && ARC_PLAT_TB10X
- select GPIOLIB
-
-config PINCTRL_EQUILIBRIUM
- tristate "Generic pinctrl and GPIO driver for Intel Lightning Mountain SoC"
- depends on OF && HAS_IOMEM
- depends on X86 || COMPILE_TEST
- select PINMUX
- select PINCONF
- select GPIOLIB
- select GPIO_GENERIC
- select GPIOLIB_IRQCHIP
- select GENERIC_PINCONF
- select GENERIC_PINCTRL_GROUPS
- select GENERIC_PINMUX_FUNCTIONS
-
- help
- Equilibrium pinctrl driver is a pinctrl & GPIO driver for Intel Lightning
- Mountain network processor SoC that supports both the linux GPIO and pin
- control frameworks. It provides interfaces to setup pinmux, assign desired
- pin functions, configure GPIO attributes for LGM SoC pins. Pinmux and
- pinconf settings are retrieved from device tree.
+source "drivers/pinctrl/vt8500/Kconfig"
endif
diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile
index f5bdd6b209a6..f64d29f614ec 100644
--- a/drivers/pinctrl/Makefile
+++ b/drivers/pinctrl/Makefile
@@ -6,57 +6,60 @@ subdir-ccflags-$(CONFIG_DEBUG_PINCTRL) += -DDEBUG
obj-y += core.o pinctrl-utils.o
obj-$(CONFIG_PINMUX) += pinmux.o
obj-$(CONFIG_PINCONF) += pinconf.o
-obj-$(CONFIG_OF) += devicetree.o
obj-$(CONFIG_GENERIC_PINCONF) += pinconf-generic.o
+obj-$(CONFIG_OF) += devicetree.o
+
+obj-$(CONFIG_PINCTRL_AMD) += pinctrl-amd.o
obj-$(CONFIG_PINCTRL_APPLE_GPIO) += pinctrl-apple-gpio.o
obj-$(CONFIG_PINCTRL_ARTPEC6) += pinctrl-artpec6.o
obj-$(CONFIG_PINCTRL_AS3722) += pinctrl-as3722.o
-obj-$(CONFIG_PINCTRL_AXP209) += pinctrl-axp209.o
obj-$(CONFIG_PINCTRL_AT91) += pinctrl-at91.o
obj-$(CONFIG_PINCTRL_AT91PIO4) += pinctrl-at91-pio4.o
-obj-$(CONFIG_PINCTRL_AMD) += pinctrl-amd.o
+obj-$(CONFIG_PINCTRL_AXP209) += pinctrl-axp209.o
obj-$(CONFIG_PINCTRL_BM1880) += pinctrl-bm1880.o
obj-$(CONFIG_PINCTRL_DA850_PUPD) += pinctrl-da850-pupd.o
obj-$(CONFIG_PINCTRL_DA9062) += pinctrl-da9062.o
obj-$(CONFIG_PINCTRL_DIGICOLOR) += pinctrl-digicolor.o
-obj-$(CONFIG_PINCTRL_FALCON) += pinctrl-falcon.o
+obj-$(CONFIG_PINCTRL_EQUILIBRIUM) += pinctrl-equilibrium.o
obj-$(CONFIG_PINCTRL_GEMINI) += pinctrl-gemini.o
+obj-$(CONFIG_PINCTRL_INGENIC) += pinctrl-ingenic.o
+obj-$(CONFIG_PINCTRL_K210) += pinctrl-k210.o
+obj-$(CONFIG_PINCTRL_KEEMBAY) += pinctrl-keembay.o
+obj-$(CONFIG_PINCTRL_LANTIQ) += pinctrl-lantiq.o
+obj-$(CONFIG_PINCTRL_FALCON) += pinctrl-falcon.o
+obj-$(CONFIG_PINCTRL_XWAY) += pinctrl-xway.o
+obj-$(CONFIG_PINCTRL_LPC18XX) += pinctrl-lpc18xx.o
obj-$(CONFIG_PINCTRL_MAX77620) += pinctrl-max77620.o
obj-$(CONFIG_PINCTRL_MCP23S08_I2C) += pinctrl-mcp23s08_i2c.o
obj-$(CONFIG_PINCTRL_MCP23S08_SPI) += pinctrl-mcp23s08_spi.o
obj-$(CONFIG_PINCTRL_MCP23S08) += pinctrl-mcp23s08.o
-obj-$(CONFIG_PINCTRL_MESON) += meson/
+obj-$(CONFIG_PINCTRL_MICROCHIP_SGPIO) += pinctrl-microchip-sgpio.o
+obj-$(CONFIG_PINCTRL_OCELOT) += pinctrl-ocelot.o
obj-$(CONFIG_PINCTRL_OXNAS) += pinctrl-oxnas.o
obj-$(CONFIG_PINCTRL_PALMAS) += pinctrl-palmas.o
obj-$(CONFIG_PINCTRL_PIC32) += pinctrl-pic32.o
obj-$(CONFIG_PINCTRL_PISTACHIO) += pinctrl-pistachio.o
+obj-$(CONFIG_PINCTRL_RK805) += pinctrl-rk805.o
obj-$(CONFIG_PINCTRL_ROCKCHIP) += pinctrl-rockchip.o
obj-$(CONFIG_PINCTRL_SINGLE) += pinctrl-single.o
-obj-$(CONFIG_PINCTRL_SX150X) += pinctrl-sx150x.o
-obj-$(CONFIG_ARCH_TEGRA) += tegra/
-obj-$(CONFIG_PINCTRL_XWAY) += pinctrl-xway.o
-obj-$(CONFIG_PINCTRL_LANTIQ) += pinctrl-lantiq.o
-obj-$(CONFIG_PINCTRL_LPC18XX) += pinctrl-lpc18xx.o
-obj-$(CONFIG_PINCTRL_TB10X) += pinctrl-tb10x.o
obj-$(CONFIG_PINCTRL_ST) += pinctrl-st.o
obj-$(CONFIG_PINCTRL_STARFIVE) += pinctrl-starfive.o
obj-$(CONFIG_PINCTRL_STMFX) += pinctrl-stmfx.o
-obj-$(CONFIG_PINCTRL_ZYNQ) += pinctrl-zynq.o
+obj-$(CONFIG_PINCTRL_SX150X) += pinctrl-sx150x.o
+obj-$(CONFIG_PINCTRL_TB10X) += pinctrl-tb10x.o
+obj-$(CONFIG_PINCTRL_THUNDERBAY) += pinctrl-thunderbay.o
obj-$(CONFIG_PINCTRL_ZYNQMP) += pinctrl-zynqmp.o
-obj-$(CONFIG_PINCTRL_INGENIC) += pinctrl-ingenic.o
-obj-$(CONFIG_PINCTRL_RK805) += pinctrl-rk805.o
-obj-$(CONFIG_PINCTRL_OCELOT) += pinctrl-ocelot.o
-obj-$(CONFIG_PINCTRL_MICROCHIP_SGPIO) += pinctrl-microchip-sgpio.o
-obj-$(CONFIG_PINCTRL_EQUILIBRIUM) += pinctrl-equilibrium.o
-obj-$(CONFIG_PINCTRL_K210) += pinctrl-k210.o
-obj-$(CONFIG_PINCTRL_KEEMBAY) += pinctrl-keembay.o
+obj-$(CONFIG_PINCTRL_ZYNQ) += pinctrl-zynq.o
obj-y += actions/
obj-$(CONFIG_ARCH_ASPEED) += aspeed/
obj-y += bcm/
obj-$(CONFIG_PINCTRL_BERLIN) += berlin/
+obj-y += cirrus/
obj-y += freescale/
obj-$(CONFIG_X86) += intel/
+obj-y += mediatek/
+obj-$(CONFIG_PINCTRL_MESON) += meson/
obj-y += mvebu/
obj-y += nomadik/
obj-$(CONFIG_ARCH_NPCM7XX) += nuvoton/
@@ -69,9 +72,8 @@ obj-$(CONFIG_PINCTRL_SPEAR) += spear/
obj-y += sprd/
obj-$(CONFIG_PINCTRL_STM32) += stm32/
obj-$(CONFIG_PINCTRL_SUNXI) += sunxi/
+obj-$(CONFIG_ARCH_TEGRA) += tegra/
obj-y += ti/
obj-$(CONFIG_PINCTRL_UNIPHIER) += uniphier/
-obj-$(CONFIG_ARCH_VT8500) += vt8500/
-obj-y += mediatek/
-obj-y += cirrus/
obj-$(CONFIG_PINCTRL_VISCONTI) += visconti/
+obj-$(CONFIG_ARCH_VT8500) += vt8500/
diff --git a/drivers/pinctrl/actions/pinctrl-owl.c b/drivers/pinctrl/actions/pinctrl-owl.c
index 781f2200ed58..ed46abc15d72 100644
--- a/drivers/pinctrl/actions/pinctrl-owl.c
+++ b/drivers/pinctrl/actions/pinctrl-owl.c
@@ -874,7 +874,6 @@ static int owl_gpio_init(struct owl_pinctrl *pctrl)
chip->label = dev_name(pctrl->dev);
chip->parent = pctrl->dev;
chip->owner = THIS_MODULE;
- chip->of_node = pctrl->dev->of_node;
pctrl->irq_chip.name = chip->of_node->name;
pctrl->irq_chip.irq_ack = owl_gpio_irq_ack;
diff --git a/drivers/pinctrl/aspeed/Kconfig b/drivers/pinctrl/aspeed/Kconfig
index de8b185c4fee..1a4e5b9ed471 100644
--- a/drivers/pinctrl/aspeed/Kconfig
+++ b/drivers/pinctrl/aspeed/Kconfig
@@ -2,7 +2,7 @@
config PINCTRL_ASPEED
bool
depends on (ARCH_ASPEED || COMPILE_TEST) && OF
- depends on MFD_SYSCON
+ select MFD_SYSCON
select PINMUX
select PINCONF
select GENERIC_PINCONF
diff --git a/drivers/pinctrl/bcm/Kconfig b/drivers/pinctrl/bcm/Kconfig
index 8fc1feedd861..ac1e400bbbac 100644
--- a/drivers/pinctrl/bcm/Kconfig
+++ b/drivers/pinctrl/bcm/Kconfig
@@ -35,6 +35,7 @@ config PINCTRL_BCM63XX
select PINCONF
select GENERIC_PINCONF
select GPIOLIB
+ select REGMAP
select GPIO_REGMAP
config PINCTRL_BCM6318
@@ -146,6 +147,8 @@ config PINCTRL_NS
depends on OF && (ARCH_BCM_5301X || COMPILE_TEST)
select PINMUX
select GENERIC_PINCONF
+ select GENERIC_PINCTRL_GROUPS
+ select GENERIC_PINMUX_FUNCTIONS
default ARCH_BCM_5301X
help
Say yes here to enable the Broadcom NS SoC pins driver.
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
index b607d10e4cbd..47e433e09c5c 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
@@ -313,7 +313,10 @@ static inline void bcm2835_pinctrl_fsel_set(
static int bcm2835_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
{
- return pinctrl_gpio_direction_input(chip->base + offset);
+ struct bcm2835_pinctrl *pc = gpiochip_get_data(chip);
+
+ bcm2835_pinctrl_fsel_set(pc, offset, BCM2835_FSEL_GPIO_IN);
+ return 0;
}
static int bcm2835_gpio_get(struct gpio_chip *chip, unsigned offset)
@@ -348,8 +351,11 @@ static void bcm2835_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
static int bcm2835_gpio_direction_output(struct gpio_chip *chip,
unsigned offset, int value)
{
- bcm2835_gpio_set(chip, offset, value);
- return pinctrl_gpio_direction_output(chip->base + offset);
+ struct bcm2835_pinctrl *pc = gpiochip_get_data(chip);
+
+ bcm2835_gpio_set_bit(pc, value ? GPSET0 : GPCLR0, offset);
+ bcm2835_pinctrl_fsel_set(pc, offset, BCM2835_FSEL_GPIO_OUT);
+ return 0;
}
static const struct gpio_chip bcm2835_gpio_chip = {
@@ -407,7 +413,7 @@ static void bcm2835_gpio_irq_handler(struct irq_desc *desc)
struct bcm2835_pinctrl *pc = gpiochip_get_data(chip);
struct irq_chip *host_chip = irq_desc_get_chip(desc);
int irq = irq_desc_get_irq(desc);
- int group;
+ int group = 0;
int i;
for (i = 0; i < BCM2835_NUM_IRQS; i++) {
@@ -1222,7 +1228,6 @@ static int bcm2835_pinctrl_probe(struct platform_device *pdev)
pc->gpio_chip = *pdata->gpio_chip;
pc->gpio_chip.parent = dev;
- pc->gpio_chip.of_node = np;
for (i = 0; i < BCM2835_NUM_BANKS; i++) {
unsigned long events;
@@ -1264,16 +1269,18 @@ static int bcm2835_pinctrl_probe(struct platform_device *pdev)
sizeof(*girq->parents),
GFP_KERNEL);
if (!girq->parents) {
- pinctrl_remove_gpio_range(pc->pctl_dev, &pc->gpio_range);
- return -ENOMEM;
+ err = -ENOMEM;
+ goto out_remove;
}
if (is_7211) {
pc->wake_irq = devm_kcalloc(dev, BCM2835_NUM_IRQS,
sizeof(*pc->wake_irq),
GFP_KERNEL);
- if (!pc->wake_irq)
- return -ENOMEM;
+ if (!pc->wake_irq) {
+ err = -ENOMEM;
+ goto out_remove;
+ }
}
/*
@@ -1301,8 +1308,10 @@ static int bcm2835_pinctrl_probe(struct platform_device *pdev)
len = strlen(dev_name(pc->dev)) + 16;
name = devm_kzalloc(pc->dev, len, GFP_KERNEL);
- if (!name)
- return -ENOMEM;
+ if (!name) {
+ err = -ENOMEM;
+ goto out_remove;
+ }
snprintf(name, len, "%s:bank%d", dev_name(pc->dev), i);
@@ -1321,11 +1330,14 @@ static int bcm2835_pinctrl_probe(struct platform_device *pdev)
err = gpiochip_add_data(&pc->gpio_chip, pc);
if (err) {
dev_err(dev, "could not add GPIO chip\n");
- pinctrl_remove_gpio_range(pc->pctl_dev, &pc->gpio_range);
- return err;
+ goto out_remove;
}
return 0;
+
+out_remove:
+ pinctrl_remove_gpio_range(pc->pctl_dev, &pc->gpio_range);
+ return err;
}
static struct platform_driver bcm2835_pinctrl_driver = {
diff --git a/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c b/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
index a7a0dd638a26..52fa2f4cd618 100644
--- a/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
+++ b/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c
@@ -836,7 +836,6 @@ static int iproc_gpio_probe(struct platform_device *pdev)
chip->num_banks = (ngpios + NGPIOS_PER_BANK - 1) / NGPIOS_PER_BANK;
gc->label = dev_name(dev);
gc->parent = dev;
- gc->of_node = dev->of_node;
gc->request = iproc_gpio_request;
gc->free = iproc_gpio_free;
gc->direction_input = iproc_gpio_direction_input;
diff --git a/drivers/pinctrl/bcm/pinctrl-ns.c b/drivers/pinctrl/bcm/pinctrl-ns.c
index d7f8175d2c1c..65a86543c58c 100644
--- a/drivers/pinctrl/bcm/pinctrl-ns.c
+++ b/drivers/pinctrl/bcm/pinctrl-ns.c
@@ -14,6 +14,9 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include "../core.h"
+#include "../pinmux.h"
+
#define FLAG_BCM4708 BIT(1)
#define FLAG_BCM4709 BIT(2)
#define FLAG_BCM53012 BIT(3)
@@ -25,10 +28,6 @@ struct ns_pinctrl {
void __iomem *base;
struct pinctrl_desc pctldesc;
- struct ns_pinctrl_group *groups;
- unsigned int num_groups;
- struct ns_pinctrl_function *functions;
- unsigned int num_functions;
};
/*
@@ -65,22 +64,22 @@ static const struct pinctrl_pin_desc ns_pinctrl_pins[] = {
struct ns_pinctrl_group {
const char *name;
- const unsigned int *pins;
+ unsigned int *pins;
const unsigned int num_pins;
unsigned int chipsets;
};
-static const unsigned int spi_pins[] = { 0, 1, 2, 3 };
-static const unsigned int i2c_pins[] = { 4, 5 };
-static const unsigned int mdio_pins[] = { 6, 7 };
-static const unsigned int pwm0_pins[] = { 8 };
-static const unsigned int pwm1_pins[] = { 9 };
-static const unsigned int pwm2_pins[] = { 10 };
-static const unsigned int pwm3_pins[] = { 11 };
-static const unsigned int uart1_pins[] = { 12, 13, 14, 15 };
-static const unsigned int uart2_pins[] = { 16, 17 };
-static const unsigned int sdio_pwr_pins[] = { 22 };
-static const unsigned int sdio_1p8v_pins[] = { 23 };
+static unsigned int spi_pins[] = { 0, 1, 2, 3 };
+static unsigned int i2c_pins[] = { 4, 5 };
+static unsigned int mdio_pins[] = { 6, 7 };
+static unsigned int pwm0_pins[] = { 8 };
+static unsigned int pwm1_pins[] = { 9 };
+static unsigned int pwm2_pins[] = { 10 };
+static unsigned int pwm3_pins[] = { 11 };
+static unsigned int uart1_pins[] = { 12, 13, 14, 15 };
+static unsigned int uart2_pins[] = { 16, 17 };
+static unsigned int sdio_pwr_pins[] = { 22 };
+static unsigned int sdio_1p8v_pins[] = { 23 };
#define NS_GROUP(_name, _pins, _chipsets) \
{ \
@@ -146,38 +145,10 @@ static const struct ns_pinctrl_function ns_pinctrl_functions[] = {
* Groups code
*/
-static int ns_pinctrl_get_groups_count(struct pinctrl_dev *pctrl_dev)
-{
- struct ns_pinctrl *ns_pinctrl = pinctrl_dev_get_drvdata(pctrl_dev);
-
- return ns_pinctrl->num_groups;
-}
-
-static const char *ns_pinctrl_get_group_name(struct pinctrl_dev *pctrl_dev,
- unsigned int selector)
-{
- struct ns_pinctrl *ns_pinctrl = pinctrl_dev_get_drvdata(pctrl_dev);
-
- return ns_pinctrl->groups[selector].name;
-}
-
-static int ns_pinctrl_get_group_pins(struct pinctrl_dev *pctrl_dev,
- unsigned int selector,
- const unsigned int **pins,
- unsigned int *num_pins)
-{
- struct ns_pinctrl *ns_pinctrl = pinctrl_dev_get_drvdata(pctrl_dev);
-
- *pins = ns_pinctrl->groups[selector].pins;
- *num_pins = ns_pinctrl->groups[selector].num_pins;
-
- return 0;
-}
-
static const struct pinctrl_ops ns_pinctrl_ops = {
- .get_groups_count = ns_pinctrl_get_groups_count,
- .get_group_name = ns_pinctrl_get_group_name,
- .get_group_pins = ns_pinctrl_get_group_pins,
+ .get_groups_count = pinctrl_generic_get_group_count,
+ .get_group_name = pinctrl_generic_get_group_name,
+ .get_group_pins = pinctrl_generic_get_group_pins,
.dt_node_to_map = pinconf_generic_dt_node_to_map_group,
.dt_free_map = pinconf_generic_dt_free_map,
};
@@ -186,48 +157,22 @@ static const struct pinctrl_ops ns_pinctrl_ops = {
* Functions code
*/
-static int ns_pinctrl_get_functions_count(struct pinctrl_dev *pctrl_dev)
-{
- struct ns_pinctrl *ns_pinctrl = pinctrl_dev_get_drvdata(pctrl_dev);
-
- return ns_pinctrl->num_functions;
-}
-
-static const char *ns_pinctrl_get_function_name(struct pinctrl_dev *pctrl_dev,
- unsigned int selector)
-{
- struct ns_pinctrl *ns_pinctrl = pinctrl_dev_get_drvdata(pctrl_dev);
-
- return ns_pinctrl->functions[selector].name;
-}
-
-static int ns_pinctrl_get_function_groups(struct pinctrl_dev *pctrl_dev,
- unsigned int selector,
- const char * const **groups,
- unsigned * const num_groups)
-{
- struct ns_pinctrl *ns_pinctrl = pinctrl_dev_get_drvdata(pctrl_dev);
-
- *groups = ns_pinctrl->functions[selector].groups;
- *num_groups = ns_pinctrl->functions[selector].num_groups;
-
- return 0;
-}
-
static int ns_pinctrl_set_mux(struct pinctrl_dev *pctrl_dev,
unsigned int func_select,
- unsigned int grp_select)
+ unsigned int group_selector)
{
struct ns_pinctrl *ns_pinctrl = pinctrl_dev_get_drvdata(pctrl_dev);
+ struct group_desc *group;
u32 unset = 0;
u32 tmp;
int i;
- for (i = 0; i < ns_pinctrl->groups[grp_select].num_pins; i++) {
- int pin_number = ns_pinctrl->groups[grp_select].pins[i];
+ group = pinctrl_generic_get_group(pctrl_dev, group_selector);
+ if (!group)
+ return -EINVAL;
- unset |= BIT(pin_number);
- }
+ for (i = 0; i < group->num_pins; i++)
+ unset |= BIT(group->pins[i]);
tmp = readl(ns_pinctrl->base);
tmp &= ~unset;
@@ -237,9 +182,9 @@ static int ns_pinctrl_set_mux(struct pinctrl_dev *pctrl_dev,
}
static const struct pinmux_ops ns_pinctrl_pmxops = {
- .get_functions_count = ns_pinctrl_get_functions_count,
- .get_function_name = ns_pinctrl_get_function_name,
- .get_function_groups = ns_pinctrl_get_function_groups,
+ .get_functions_count = pinmux_generic_get_function_count,
+ .get_function_name = pinmux_generic_get_function_name,
+ .get_function_groups = pinmux_generic_get_function_groups,
.set_mux = ns_pinctrl_set_mux,
};
@@ -267,8 +212,6 @@ static int ns_pinctrl_probe(struct platform_device *pdev)
struct ns_pinctrl *ns_pinctrl;
struct pinctrl_desc *pctldesc;
struct pinctrl_pin_desc *pin;
- struct ns_pinctrl_group *group;
- struct ns_pinctrl_function *function;
struct resource *res;
int i;
@@ -315,43 +258,33 @@ static int ns_pinctrl_probe(struct platform_device *pdev)
}
}
- ns_pinctrl->groups = devm_kcalloc(dev, ARRAY_SIZE(ns_pinctrl_groups),
- sizeof(struct ns_pinctrl_group),
- GFP_KERNEL);
- if (!ns_pinctrl->groups)
- return -ENOMEM;
- for (i = 0, group = &ns_pinctrl->groups[0];
- i < ARRAY_SIZE(ns_pinctrl_groups); i++) {
- const struct ns_pinctrl_group *src = &ns_pinctrl_groups[i];
+ /* Register */
- if (src->chipsets & ns_pinctrl->chipset_flag) {
- memcpy(group++, src, sizeof(*src));
- ns_pinctrl->num_groups++;
- }
+ ns_pinctrl->pctldev = devm_pinctrl_register(dev, pctldesc, ns_pinctrl);
+ if (IS_ERR(ns_pinctrl->pctldev)) {
+ dev_err(dev, "Failed to register pinctrl\n");
+ return PTR_ERR(ns_pinctrl->pctldev);
}
- ns_pinctrl->functions = devm_kcalloc(dev,
- ARRAY_SIZE(ns_pinctrl_functions),
- sizeof(struct ns_pinctrl_function),
- GFP_KERNEL);
- if (!ns_pinctrl->functions)
- return -ENOMEM;
- for (i = 0, function = &ns_pinctrl->functions[0];
- i < ARRAY_SIZE(ns_pinctrl_functions); i++) {
- const struct ns_pinctrl_function *src = &ns_pinctrl_functions[i];
+ for (i = 0; i < ARRAY_SIZE(ns_pinctrl_groups); i++) {
+ const struct ns_pinctrl_group *group = &ns_pinctrl_groups[i];
- if (src->chipsets & ns_pinctrl->chipset_flag) {
- memcpy(function++, src, sizeof(*src));
- ns_pinctrl->num_functions++;
- }
+ if (!(group->chipsets & ns_pinctrl->chipset_flag))
+ continue;
+
+ pinctrl_generic_add_group(ns_pinctrl->pctldev, group->name,
+ group->pins, group->num_pins, NULL);
}
- /* Register */
+ for (i = 0; i < ARRAY_SIZE(ns_pinctrl_functions); i++) {
+ const struct ns_pinctrl_function *function = &ns_pinctrl_functions[i];
- ns_pinctrl->pctldev = devm_pinctrl_register(dev, pctldesc, ns_pinctrl);
- if (IS_ERR(ns_pinctrl->pctldev)) {
- dev_err(dev, "Failed to register pinctrl\n");
- return PTR_ERR(ns_pinctrl->pctldev);
+ if (!(function->chipsets & ns_pinctrl->chipset_flag))
+ continue;
+
+ pinmux_generic_add_function(ns_pinctrl->pctldev, function->name,
+ function->groups,
+ function->num_groups, NULL);
}
return 0;
diff --git a/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c b/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c
index e03142895f61..643dbd315033 100644
--- a/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c
+++ b/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c
@@ -648,7 +648,6 @@ static int nsp_gpio_probe(struct platform_device *pdev)
gc->ngpio = val;
gc->label = dev_name(dev);
gc->parent = dev;
- gc->of_node = dev->of_node;
gc->request = gpiochip_generic_request;
gc->free = gpiochip_generic_free;
gc->direction_input = nsp_gpio_direction_input;
diff --git a/drivers/pinctrl/cirrus/pinctrl-lochnagar.c b/drivers/pinctrl/cirrus/pinctrl-lochnagar.c
index 670ac53a3141..3fda4446d70b 100644
--- a/drivers/pinctrl/cirrus/pinctrl-lochnagar.c
+++ b/drivers/pinctrl/cirrus/pinctrl-lochnagar.c
@@ -1161,9 +1161,6 @@ static int lochnagar_pin_probe(struct platform_device *pdev)
priv->gpio_chip.can_sleep = true;
priv->gpio_chip.parent = dev;
priv->gpio_chip.base = -1;
-#ifdef CONFIG_OF_GPIO
- priv->gpio_chip.of_node = dev->of_node;
-#endif
switch (lochnagar->type) {
case LOCHNAGAR1:
diff --git a/drivers/pinctrl/cirrus/pinctrl-madera-core.c b/drivers/pinctrl/cirrus/pinctrl-madera-core.c
index dce2626384a9..e1cfbee3643a 100644
--- a/drivers/pinctrl/cirrus/pinctrl-madera-core.c
+++ b/drivers/pinctrl/cirrus/pinctrl-madera-core.c
@@ -8,8 +8,10 @@
#include <linux/err.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/slab.h>
+
#include <linux/pinctrl/machine.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/pinctrl/pinmux.h>
@@ -1004,13 +1006,14 @@ static int madera_pin_probe(struct platform_device *pdev)
dev_dbg(&pdev->dev, "%s\n", __func__);
+ device_set_node(&pdev->dev, dev_fwnode(pdev->dev.parent));
+
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->dev = &pdev->dev;
priv->madera = madera;
- pdev->dev.of_node = madera->dev->of_node;
switch (madera->type) {
case CS47L15:
diff --git a/drivers/pinctrl/freescale/Kconfig b/drivers/pinctrl/freescale/Kconfig
index 21fa21c6547b..8bdafaf40b29 100644
--- a/drivers/pinctrl/freescale/Kconfig
+++ b/drivers/pinctrl/freescale/Kconfig
@@ -173,6 +173,13 @@ config PINCTRL_IMX8ULP
help
Say Y here to enable the imx8ulp pinctrl driver
+config PINCTRL_IMXRT1050
+ bool "IMXRT1050 pinctrl driver"
+ depends on ARCH_MXC
+ select PINCTRL_IMX
+ help
+ Say Y here to enable the imxrt1050 pinctrl driver
+
config PINCTRL_VF610
bool "Freescale Vybrid VF610 pinctrl driver"
depends on SOC_VF610
diff --git a/drivers/pinctrl/freescale/Makefile b/drivers/pinctrl/freescale/Makefile
index c44930b1b362..565a0350bf09 100644
--- a/drivers/pinctrl/freescale/Makefile
+++ b/drivers/pinctrl/freescale/Makefile
@@ -30,3 +30,4 @@ obj-$(CONFIG_PINCTRL_MXS) += pinctrl-mxs.o
obj-$(CONFIG_PINCTRL_IMX23) += pinctrl-imx23.o
obj-$(CONFIG_PINCTRL_IMX25) += pinctrl-imx25.o
obj-$(CONFIG_PINCTRL_IMX28) += pinctrl-imx28.o
+obj-$(CONFIG_PINCTRL_IMXRT1050) += pinctrl-imxrt1050.o
diff --git a/drivers/pinctrl/freescale/pinctrl-imx.c b/drivers/pinctrl/freescale/pinctrl-imx.c
index daf28bc5661d..fa3cc0b80ede 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx.c
@@ -648,7 +648,8 @@ static int imx_pinctrl_parse_functions(struct device_node *np,
struct device_node *child;
struct function_desc *func;
struct group_desc *grp;
- u32 i = 0;
+ const char **group_names;
+ u32 i;
dev_dbg(pctl->dev, "parse function(%d): %pOFn\n", index, np);
@@ -663,14 +664,18 @@ static int imx_pinctrl_parse_functions(struct device_node *np,
dev_err(ipctl->dev, "no groups defined in %pOF\n", np);
return -EINVAL;
}
- func->group_names = devm_kcalloc(ipctl->dev, func->num_group_names,
- sizeof(char *), GFP_KERNEL);
- if (!func->group_names)
+
+ group_names = devm_kcalloc(ipctl->dev, func->num_group_names,
+ sizeof(char *), GFP_KERNEL);
+ if (!group_names)
return -ENOMEM;
+ i = 0;
+ for_each_child_of_node(np, child)
+ group_names[i++] = child->name;
+ func->group_names = group_names;
+ i = 0;
for_each_child_of_node(np, child) {
- func->group_names[i] = child->name;
-
grp = devm_kzalloc(ipctl->dev, sizeof(struct group_desc),
GFP_KERNEL);
if (!grp) {
diff --git a/drivers/pinctrl/freescale/pinctrl-imxrt1050.c b/drivers/pinctrl/freescale/pinctrl-imxrt1050.c
new file mode 100644
index 000000000000..11f31c90ad30
--- /dev/null
+++ b/drivers/pinctrl/freescale/pinctrl-imxrt1050.c
@@ -0,0 +1,349 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020
+ * Author(s): Giulio Benetti <giulio.benetti@benettiengineering.com>
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/platform_device.h>
+
+#include "pinctrl-imx.h"
+
+enum imxrt1050_pads {
+ IMXRT1050_PAD_RESERVE0 = 0,
+ IMXRT1050_PAD_RESERVE1 = 1,
+ IMXRT1050_PAD_RESERVE2 = 2,
+ IMXRT1050_PAD_RESERVE3 = 3,
+ IMXRT1050_PAD_RESERVE4 = 4,
+ IMXRT1050_PAD_RESERVE5 = 5,
+ IMXRT1050_PAD_RESERVE6 = 6,
+ IMXRT1050_PAD_RESERVE7 = 7,
+ IMXRT1050_PAD_RESERVE8 = 8,
+ IMXRT1050_PAD_RESERVE9 = 9,
+ IMXRT1050_IOMUXC_GPIO1_IO00 = 10,
+ IMXRT1050_IOMUXC_GPIO1_IO01 = 11,
+ IMXRT1050_IOMUXC_GPIO1_IO02 = 12,
+ IMXRT1050_IOMUXC_GPIO1_IO03 = 13,
+ IMXRT1050_IOMUXC_GPIO1_IO04 = 14,
+ IMXRT1050_IOMUXC_GPIO1_IO05 = 15,
+ IMXRT1050_IOMUXC_GPIO1_IO06 = 16,
+ IMXRT1050_IOMUXC_GPIO1_IO07 = 17,
+ IMXRT1050_IOMUXC_GPIO1_IO08 = 18,
+ IMXRT1050_IOMUXC_GPIO1_IO09 = 19,
+ IMXRT1050_IOMUXC_GPIO1_IO10 = 20,
+ IMXRT1050_IOMUXC_GPIO1_IO11 = 21,
+ IMXRT1050_IOMUXC_GPIO1_IO12 = 22,
+ IMXRT1050_IOMUXC_GPIO1_IO13 = 23,
+ IMXRT1050_IOMUXC_GPIO1_IO14 = 24,
+ IMXRT1050_IOMUXC_GPIO1_IO15 = 25,
+ IMXRT1050_IOMUXC_ENET_MDC = 26,
+ IMXRT1050_IOMUXC_ENET_MDIO = 27,
+ IMXRT1050_IOMUXC_ENET_TD3 = 28,
+ IMXRT1050_IOMUXC_ENET_TD2 = 29,
+ IMXRT1050_IOMUXC_ENET_TD1 = 30,
+ IMXRT1050_IOMUXC_ENET_TD0 = 31,
+ IMXRT1050_IOMUXC_ENET_TX_CTL = 32,
+ IMXRT1050_IOMUXC_ENET_TXC = 33,
+ IMXRT1050_IOMUXC_ENET_RX_CTL = 34,
+ IMXRT1050_IOMUXC_ENET_RXC = 35,
+ IMXRT1050_IOMUXC_ENET_RD0 = 36,
+ IMXRT1050_IOMUXC_ENET_RD1 = 37,
+ IMXRT1050_IOMUXC_ENET_RD2 = 38,
+ IMXRT1050_IOMUXC_ENET_RD3 = 39,
+ IMXRT1050_IOMUXC_SD1_CLK = 40,
+ IMXRT1050_IOMUXC_SD1_CMD = 41,
+ IMXRT1050_IOMUXC_SD1_DATA0 = 42,
+ IMXRT1050_IOMUXC_SD1_DATA1 = 43,
+ IMXRT1050_IOMUXC_SD1_DATA2 = 44,
+ IMXRT1050_IOMUXC_SD1_DATA3 = 45,
+ IMXRT1050_IOMUXC_SD1_DATA4 = 46,
+ IMXRT1050_IOMUXC_SD1_DATA5 = 47,
+ IMXRT1050_IOMUXC_SD1_DATA6 = 48,
+ IMXRT1050_IOMUXC_SD1_DATA7 = 49,
+ IMXRT1050_IOMUXC_SD1_RESET_B = 50,
+ IMXRT1050_IOMUXC_SD1_STROBE = 51,
+ IMXRT1050_IOMUXC_SD2_CD_B = 52,
+ IMXRT1050_IOMUXC_SD2_CLK = 53,
+ IMXRT1050_IOMUXC_SD2_CMD = 54,
+ IMXRT1050_IOMUXC_SD2_DATA0 = 55,
+ IMXRT1050_IOMUXC_SD2_DATA1 = 56,
+ IMXRT1050_IOMUXC_SD2_DATA2 = 57,
+ IMXRT1050_IOMUXC_SD2_DATA3 = 58,
+ IMXRT1050_IOMUXC_SD2_RESET_B = 59,
+ IMXRT1050_IOMUXC_SD2_WP = 60,
+ IMXRT1050_IOMUXC_NAND_ALE = 61,
+ IMXRT1050_IOMUXC_NAND_CE0 = 62,
+ IMXRT1050_IOMUXC_NAND_CE1 = 63,
+ IMXRT1050_IOMUXC_NAND_CE2 = 64,
+ IMXRT1050_IOMUXC_NAND_CE3 = 65,
+ IMXRT1050_IOMUXC_NAND_CLE = 66,
+ IMXRT1050_IOMUXC_NAND_DATA00 = 67,
+ IMXRT1050_IOMUXC_NAND_DATA01 = 68,
+ IMXRT1050_IOMUXC_NAND_DATA02 = 69,
+ IMXRT1050_IOMUXC_NAND_DATA03 = 70,
+ IMXRT1050_IOMUXC_NAND_DATA04 = 71,
+ IMXRT1050_IOMUXC_NAND_DATA05 = 72,
+ IMXRT1050_IOMUXC_NAND_DATA06 = 73,
+ IMXRT1050_IOMUXC_NAND_DATA07 = 74,
+ IMXRT1050_IOMUXC_NAND_DQS = 75,
+ IMXRT1050_IOMUXC_NAND_RE_B = 76,
+ IMXRT1050_IOMUXC_NAND_READY_B = 77,
+ IMXRT1050_IOMUXC_NAND_WE_B = 78,
+ IMXRT1050_IOMUXC_NAND_WP_B = 79,
+ IMXRT1050_IOMUXC_SAI5_RXFS = 80,
+ IMXRT1050_IOMUXC_SAI5_RXC = 81,
+ IMXRT1050_IOMUXC_SAI5_RXD0 = 82,
+ IMXRT1050_IOMUXC_SAI5_RXD1 = 83,
+ IMXRT1050_IOMUXC_SAI5_RXD2 = 84,
+ IMXRT1050_IOMUXC_SAI5_RXD3 = 85,
+ IMXRT1050_IOMUXC_SAI5_MCLK = 86,
+ IMXRT1050_IOMUXC_SAI1_RXFS = 87,
+ IMXRT1050_IOMUXC_SAI1_RXC = 88,
+ IMXRT1050_IOMUXC_SAI1_RXD0 = 89,
+ IMXRT1050_IOMUXC_SAI1_RXD1 = 90,
+ IMXRT1050_IOMUXC_SAI1_RXD2 = 91,
+ IMXRT1050_IOMUXC_SAI1_RXD3 = 92,
+ IMXRT1050_IOMUXC_SAI1_RXD4 = 93,
+ IMXRT1050_IOMUXC_SAI1_RXD5 = 94,
+ IMXRT1050_IOMUXC_SAI1_RXD6 = 95,
+ IMXRT1050_IOMUXC_SAI1_RXD7 = 96,
+ IMXRT1050_IOMUXC_SAI1_TXFS = 97,
+ IMXRT1050_IOMUXC_SAI1_TXC = 98,
+ IMXRT1050_IOMUXC_SAI1_TXD0 = 99,
+ IMXRT1050_IOMUXC_SAI1_TXD1 = 100,
+ IMXRT1050_IOMUXC_SAI1_TXD2 = 101,
+ IMXRT1050_IOMUXC_SAI1_TXD3 = 102,
+ IMXRT1050_IOMUXC_SAI1_TXD4 = 103,
+ IMXRT1050_IOMUXC_SAI1_TXD5 = 104,
+ IMXRT1050_IOMUXC_SAI1_TXD6 = 105,
+ IMXRT1050_IOMUXC_SAI1_TXD7 = 106,
+ IMXRT1050_IOMUXC_SAI1_MCLK = 107,
+ IMXRT1050_IOMUXC_SAI2_RXFS = 108,
+ IMXRT1050_IOMUXC_SAI2_RXC = 109,
+ IMXRT1050_IOMUXC_SAI2_RXD0 = 110,
+ IMXRT1050_IOMUXC_SAI2_TXFS = 111,
+ IMXRT1050_IOMUXC_SAI2_TXC = 112,
+ IMXRT1050_IOMUXC_SAI2_TXD0 = 113,
+ IMXRT1050_IOMUXC_SAI2_MCLK = 114,
+ IMXRT1050_IOMUXC_SAI3_RXFS = 115,
+ IMXRT1050_IOMUXC_SAI3_RXC = 116,
+ IMXRT1050_IOMUXC_SAI3_RXD = 117,
+ IMXRT1050_IOMUXC_SAI3_TXFS = 118,
+ IMXRT1050_IOMUXC_SAI3_TXC = 119,
+ IMXRT1050_IOMUXC_SAI3_TXD = 120,
+ IMXRT1050_IOMUXC_SAI3_MCLK = 121,
+ IMXRT1050_IOMUXC_SPDIF_TX = 122,
+ IMXRT1050_IOMUXC_SPDIF_RX = 123,
+ IMXRT1050_IOMUXC_SPDIF_EXT_CLK = 124,
+ IMXRT1050_IOMUXC_ECSPI1_SCLK = 125,
+ IMXRT1050_IOMUXC_ECSPI1_MOSI = 126,
+ IMXRT1050_IOMUXC_ECSPI1_MISO = 127,
+ IMXRT1050_IOMUXC_ECSPI1_SS0 = 128,
+ IMXRT1050_IOMUXC_ECSPI2_SCLK = 129,
+ IMXRT1050_IOMUXC_ECSPI2_MOSI = 130,
+ IMXRT1050_IOMUXC_ECSPI2_MISO = 131,
+ IMXRT1050_IOMUXC_ECSPI2_SS0 = 132,
+ IMXRT1050_IOMUXC_I2C1_SCL = 133,
+ IMXRT1050_IOMUXC_I2C1_SDA = 134,
+ IMXRT1050_IOMUXC_I2C2_SCL = 135,
+ IMXRT1050_IOMUXC_I2C2_SDA = 136,
+ IMXRT1050_IOMUXC_I2C3_SCL = 137,
+ IMXRT1050_IOMUXC_I2C3_SDA = 138,
+ IMXRT1050_IOMUXC_I2C4_SCL = 139,
+ IMXRT1050_IOMUXC_I2C4_SDA = 140,
+ IMXRT1050_IOMUXC_UART1_RXD = 141,
+ IMXRT1050_IOMUXC_UART1_TXD = 142,
+ IMXRT1050_IOMUXC_UART2_RXD = 143,
+ IMXRT1050_IOMUXC_UART2_TXD = 144,
+ IMXRT1050_IOMUXC_UART3_RXD = 145,
+ IMXRT1050_IOMUXC_UART3_TXD = 146,
+ IMXRT1050_IOMUXC_UART4_RXD = 147,
+ IMXRT1050_IOMUXC_UART4_TXD = 148,
+};
+
+/* Pad names for the pinmux subsystem */
+static const struct pinctrl_pin_desc imxrt1050_pinctrl_pads[] = {
+ IMX_PINCTRL_PIN(IMXRT1050_PAD_RESERVE0),
+ IMX_PINCTRL_PIN(IMXRT1050_PAD_RESERVE1),
+ IMX_PINCTRL_PIN(IMXRT1050_PAD_RESERVE2),
+ IMX_PINCTRL_PIN(IMXRT1050_PAD_RESERVE3),
+ IMX_PINCTRL_PIN(IMXRT1050_PAD_RESERVE4),
+ IMX_PINCTRL_PIN(IMXRT1050_PAD_RESERVE5),
+ IMX_PINCTRL_PIN(IMXRT1050_PAD_RESERVE6),
+ IMX_PINCTRL_PIN(IMXRT1050_PAD_RESERVE7),
+ IMX_PINCTRL_PIN(IMXRT1050_PAD_RESERVE8),
+ IMX_PINCTRL_PIN(IMXRT1050_PAD_RESERVE9),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_GPIO1_IO00),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_GPIO1_IO01),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_GPIO1_IO02),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_GPIO1_IO03),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_GPIO1_IO04),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_GPIO1_IO05),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_GPIO1_IO06),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_GPIO1_IO07),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_GPIO1_IO08),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_GPIO1_IO09),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_GPIO1_IO10),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_GPIO1_IO11),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_GPIO1_IO12),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_GPIO1_IO13),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_GPIO1_IO14),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_GPIO1_IO15),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_ENET_MDC),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_ENET_MDIO),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_ENET_TD3),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_ENET_TD2),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_ENET_TD1),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_ENET_TD0),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_ENET_TX_CTL),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_ENET_TXC),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_ENET_RX_CTL),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_ENET_RXC),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_ENET_RD0),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_ENET_RD1),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_ENET_RD2),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_ENET_RD3),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SD1_CLK),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SD1_CMD),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SD1_DATA0),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SD1_DATA1),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SD1_DATA2),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SD1_DATA3),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SD1_DATA4),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SD1_DATA5),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SD1_DATA6),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SD1_DATA7),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SD1_RESET_B),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SD1_STROBE),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SD2_CD_B),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SD2_CLK),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SD2_CMD),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SD2_DATA0),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SD2_DATA1),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SD2_DATA2),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SD2_DATA3),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SD2_RESET_B),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SD2_WP),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_NAND_ALE),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_NAND_CE0),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_NAND_CE1),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_NAND_CE2),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_NAND_CE3),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_NAND_CLE),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_NAND_DATA00),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_NAND_DATA01),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_NAND_DATA02),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_NAND_DATA03),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_NAND_DATA04),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_NAND_DATA05),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_NAND_DATA06),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_NAND_DATA07),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_NAND_DQS),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_NAND_RE_B),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_NAND_READY_B),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_NAND_WE_B),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_NAND_WP_B),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SAI5_RXFS),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SAI5_RXC),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SAI5_RXD0),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SAI5_RXD1),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SAI5_RXD2),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SAI5_RXD3),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SAI5_MCLK),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SAI1_RXFS),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SAI1_RXC),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SAI1_RXD0),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SAI1_RXD1),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SAI1_RXD2),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SAI1_RXD3),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SAI1_RXD4),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SAI1_RXD5),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SAI1_RXD6),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SAI1_RXD7),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SAI1_TXFS),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SAI1_TXC),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SAI1_TXD0),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SAI1_TXD1),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SAI1_TXD2),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SAI1_TXD3),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SAI1_TXD4),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SAI1_TXD5),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SAI1_TXD6),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SAI1_TXD7),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SAI1_MCLK),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SAI2_RXFS),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SAI2_RXC),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SAI2_RXD0),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SAI2_TXFS),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SAI2_TXC),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SAI2_TXD0),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SAI2_MCLK),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SAI3_RXFS),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SAI3_RXC),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SAI3_RXD),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SAI3_TXFS),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SAI3_TXC),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SAI3_TXD),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SAI3_MCLK),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SPDIF_TX),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SPDIF_RX),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_SPDIF_EXT_CLK),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_ECSPI1_SCLK),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_ECSPI1_MOSI),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_ECSPI1_MISO),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_ECSPI1_SS0),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_ECSPI2_SCLK),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_ECSPI2_MOSI),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_ECSPI2_MISO),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_ECSPI2_SS0),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_I2C1_SCL),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_I2C1_SDA),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_I2C2_SCL),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_I2C2_SDA),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_I2C3_SCL),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_I2C3_SDA),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_I2C4_SCL),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_I2C4_SDA),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_UART1_RXD),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_UART1_TXD),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_UART2_RXD),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_UART2_TXD),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_UART3_RXD),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_UART3_TXD),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_UART4_RXD),
+ IMX_PINCTRL_PIN(IMXRT1050_IOMUXC_UART4_TXD),
+};
+
+static const struct imx_pinctrl_soc_info imxrt1050_pinctrl_info = {
+ .pins = imxrt1050_pinctrl_pads,
+ .npins = ARRAY_SIZE(imxrt1050_pinctrl_pads),
+ .gpr_compatible = "fsl,imxrt1050-iomuxc-gpr",
+};
+
+static const struct of_device_id imxrt1050_pinctrl_of_match[] = {
+ { .compatible = "fsl,imxrt1050-iomuxc", .data = &imxrt1050_pinctrl_info, },
+ { /* sentinel */ }
+};
+
+static int imxrt1050_pinctrl_probe(struct platform_device *pdev)
+{
+ return imx_pinctrl_probe(pdev, &imxrt1050_pinctrl_info);
+}
+
+static struct platform_driver imxrt1050_pinctrl_driver = {
+ .driver = {
+ .name = "imxrt1050-pinctrl",
+ .of_match_table = of_match_ptr(imxrt1050_pinctrl_of_match),
+ .suppress_bind_attrs = true,
+ },
+ .probe = imxrt1050_pinctrl_probe,
+};
+
+static int __init imxrt1050_pinctrl_init(void)
+{
+ return platform_driver_register(&imxrt1050_pinctrl_driver);
+}
+arch_initcall(imxrt1050_pinctrl_init);
diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
index 8f23d126c6a7..4c01333e1406 100644
--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
+++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
@@ -1577,7 +1577,7 @@ static int byt_gpio_probe(struct intel_pinctrl *vg)
vg->irqchip.irq_mask = byt_irq_mask,
vg->irqchip.irq_unmask = byt_irq_unmask,
vg->irqchip.irq_set_type = byt_irq_type,
- vg->irqchip.flags = IRQCHIP_SKIP_SET_WAKE,
+ vg->irqchip.flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_SET_TYPE_MASKED,
girq = &gc->irq;
girq->chip = &vg->irqchip;
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index 980099028cf8..1d5818269076 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -73,6 +73,8 @@ struct intel_pad_context {
u32 padctrl1;
};
+#define CHV_INVALID_HWIRQ ((unsigned int)INVALID_HWIRQ)
+
/**
* struct intel_community_context - community context for Cherryview
* @intr_lines: Mapping between 16 HW interrupt wires and GPIO offset (in GPIO number space)
@@ -709,6 +711,7 @@ static int chv_pinmux_set_mux(struct pinctrl_dev *pctldev,
unsigned int function, unsigned int group)
{
struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ struct device *dev = pctrl->dev;
const struct intel_pingroup *grp;
unsigned long flags;
int i;
@@ -720,9 +723,8 @@ static int chv_pinmux_set_mux(struct pinctrl_dev *pctldev,
/* Check first that the pad is not locked */
for (i = 0; i < grp->npins; i++) {
if (chv_pad_locked(pctrl, grp->pins[i])) {
- dev_warn(pctrl->dev, "unable to set mode for locked pin %u\n",
- grp->pins[i]);
raw_spin_unlock_irqrestore(&chv_lock, flags);
+ dev_warn(dev, "unable to set mode for locked pin %u\n", grp->pins[i]);
return -EBUSY;
}
}
@@ -757,8 +759,8 @@ static int chv_pinmux_set_mux(struct pinctrl_dev *pctldev,
value |= CHV_PADCTRL1_INVRXTX_TXENABLE;
chv_writel(pctrl, pin, CHV_PADCTRL1, value);
- dev_dbg(pctrl->dev, "configured pin %u mode %u OE %sinverted\n",
- pin, mode, invert_oe ? "" : "not ");
+ dev_dbg(dev, "configured pin %u mode %u OE %sinverted\n", pin, mode,
+ invert_oe ? "" : "not ");
}
raw_spin_unlock_irqrestore(&chv_lock, flags);
@@ -812,7 +814,7 @@ static int chv_gpio_request_enable(struct pinctrl_dev *pctldev,
/* Reset the interrupt mapping */
for (i = 0; i < ARRAY_SIZE(cctx->intr_lines); i++) {
if (cctx->intr_lines[i] == offset) {
- cctx->intr_lines[i] = 0;
+ cctx->intr_lines[i] = CHV_INVALID_HWIRQ;
break;
}
}
@@ -1058,6 +1060,7 @@ static int chv_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
unsigned long *configs, unsigned int nconfigs)
{
struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
+ struct device *dev = pctrl->dev;
enum pin_config_param param;
int i, ret;
u32 arg;
@@ -1094,8 +1097,7 @@ static int chv_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
return -ENOTSUPP;
}
- dev_dbg(pctrl->dev, "pin %d set config %d arg %u\n", pin,
- param, arg);
+ dev_dbg(dev, "pin %d set config %d arg %u\n", pin, param, arg);
}
return 0;
@@ -1302,6 +1304,7 @@ static unsigned chv_gpio_irq_startup(struct irq_data *d)
if (irqd_get_trigger_type(d) == IRQ_TYPE_NONE) {
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct intel_pinctrl *pctrl = gpiochip_get_data(gc);
+ struct device *dev = pctrl->dev;
struct intel_community_context *cctx = &pctrl->context.communities[0];
unsigned int pin = irqd_to_hwirq(d);
irq_flow_handler_t handler;
@@ -1319,8 +1322,10 @@ static unsigned chv_gpio_irq_startup(struct irq_data *d)
else
handler = handle_edge_irq;
- if (!cctx->intr_lines[intsel]) {
+ if (cctx->intr_lines[intsel] == CHV_INVALID_HWIRQ) {
irq_set_handler_locked(d, handler);
+ dev_dbg(dev, "using interrupt line %u for IRQ_TYPE_NONE on pin %u\n",
+ intsel, pin);
cctx->intr_lines[intsel] = pin;
}
raw_spin_unlock_irqrestore(&chv_lock, flags);
@@ -1330,17 +1335,74 @@ static unsigned chv_gpio_irq_startup(struct irq_data *d)
return 0;
}
+static int chv_gpio_set_intr_line(struct intel_pinctrl *pctrl, unsigned int pin)
+{
+ struct device *dev = pctrl->dev;
+ struct intel_community_context *cctx = &pctrl->context.communities[0];
+ const struct intel_community *community = &pctrl->communities[0];
+ u32 value, intsel;
+ int i;
+
+ value = chv_readl(pctrl, pin, CHV_PADCTRL0);
+ intsel = (value & CHV_PADCTRL0_INTSEL_MASK) >> CHV_PADCTRL0_INTSEL_SHIFT;
+
+ if (cctx->intr_lines[intsel] == pin)
+ return 0;
+
+ if (cctx->intr_lines[intsel] == CHV_INVALID_HWIRQ) {
+ dev_dbg(dev, "using interrupt line %u for pin %u\n", intsel, pin);
+ cctx->intr_lines[intsel] = pin;
+ return 0;
+ }
+
+ /*
+ * The interrupt line selected by the BIOS is already in use by
+ * another pin, this is a known BIOS bug found on several models.
+ * But this may also be caused by Linux deciding to use a pin as
+ * IRQ which was not expected to be used as such by the BIOS authors,
+ * so log this at info level only.
+ */
+ dev_info(dev, "interrupt line %u is used by both pin %u and pin %u\n", intsel,
+ cctx->intr_lines[intsel], pin);
+
+ if (chv_pad_locked(pctrl, pin))
+ return -EBUSY;
+
+ /*
+ * The BIOS fills the interrupt lines from 0 counting up, start at
+ * the other end to find a free interrupt line to workaround this.
+ */
+ for (i = community->nirqs - 1; i >= 0; i--) {
+ if (cctx->intr_lines[i] == CHV_INVALID_HWIRQ)
+ break;
+ }
+ if (i < 0)
+ return -EBUSY;
+
+ dev_info(dev, "changing the interrupt line for pin %u to %d\n", pin, i);
+
+ value = (value & ~CHV_PADCTRL0_INTSEL_MASK) | (i << CHV_PADCTRL0_INTSEL_SHIFT);
+ chv_writel(pctrl, pin, CHV_PADCTRL0, value);
+ cctx->intr_lines[i] = pin;
+
+ return 0;
+}
+
static int chv_gpio_irq_type(struct irq_data *d, unsigned int type)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct intel_pinctrl *pctrl = gpiochip_get_data(gc);
- struct intel_community_context *cctx = &pctrl->context.communities[0];
unsigned int pin = irqd_to_hwirq(d);
unsigned long flags;
u32 value;
+ int ret;
raw_spin_lock_irqsave(&chv_lock, flags);
+ ret = chv_gpio_set_intr_line(pctrl, pin);
+ if (ret)
+ goto out_unlock;
+
/*
* Pins which can be used as shared interrupt are configured in
* BIOS. Driver trusts BIOS configurations and assigns different
@@ -1375,26 +1437,22 @@ static int chv_gpio_irq_type(struct irq_data *d, unsigned int type)
chv_writel(pctrl, pin, CHV_PADCTRL1, value);
}
- value = chv_readl(pctrl, pin, CHV_PADCTRL0);
- value &= CHV_PADCTRL0_INTSEL_MASK;
- value >>= CHV_PADCTRL0_INTSEL_SHIFT;
-
- cctx->intr_lines[value] = pin;
-
if (type & IRQ_TYPE_EDGE_BOTH)
irq_set_handler_locked(d, handle_edge_irq);
else if (type & IRQ_TYPE_LEVEL_MASK)
irq_set_handler_locked(d, handle_level_irq);
+out_unlock:
raw_spin_unlock_irqrestore(&chv_lock, flags);
- return 0;
+ return ret;
}
static void chv_gpio_irq_handler(struct irq_desc *desc)
{
struct gpio_chip *gc = irq_desc_get_handler_data(desc);
struct intel_pinctrl *pctrl = gpiochip_get_data(gc);
+ struct device *dev = pctrl->dev;
const struct intel_community *community = &pctrl->communities[0];
struct intel_community_context *cctx = &pctrl->context.communities[0];
struct irq_chip *chip = irq_desc_get_chip(desc);
@@ -1412,6 +1470,12 @@ static void chv_gpio_irq_handler(struct irq_desc *desc)
unsigned int offset;
offset = cctx->intr_lines[intr_line];
+ if (offset == CHV_INVALID_HWIRQ) {
+ dev_warn_once(dev, "interrupt on unmapped interrupt line %u\n", intr_line);
+ /* Some boards expect hwirq 0 to trigger in this case */
+ offset = 0;
+ }
+
generic_handle_domain_irq(gc->irq.domain, offset);
}
@@ -1512,17 +1576,16 @@ static int chv_gpio_irq_init_hw(struct gpio_chip *chip)
static int chv_gpio_add_pin_ranges(struct gpio_chip *chip)
{
struct intel_pinctrl *pctrl = gpiochip_get_data(chip);
+ struct device *dev = pctrl->dev;
const struct intel_community *community = &pctrl->communities[0];
const struct intel_padgroup *gpp;
int ret, i;
for (i = 0; i < community->ngpps; i++) {
gpp = &community->gpps[i];
- ret = gpiochip_add_pin_range(chip, dev_name(pctrl->dev),
- gpp->base, gpp->base,
- gpp->size);
+ ret = gpiochip_add_pin_range(chip, dev_name(dev), gpp->base, gpp->base, gpp->size);
if (ret) {
- dev_err(pctrl->dev, "failed to add GPIO pin range\n");
+ dev_err(dev, "failed to add GPIO pin range\n");
return ret;
}
}
@@ -1535,15 +1598,16 @@ static int chv_gpio_probe(struct intel_pinctrl *pctrl, int irq)
const struct intel_community *community = &pctrl->communities[0];
const struct intel_padgroup *gpp;
struct gpio_chip *chip = &pctrl->chip;
+ struct device *dev = pctrl->dev;
bool need_valid_mask = !dmi_check_system(chv_no_valid_mask);
int ret, i, irq_base;
*chip = chv_gpio_chip;
chip->ngpio = pctrl->soc->pins[pctrl->soc->npins - 1].number + 1;
- chip->label = dev_name(pctrl->dev);
+ chip->label = dev_name(dev);
chip->add_pin_ranges = chv_gpio_add_pin_ranges;
- chip->parent = pctrl->dev;
+ chip->parent = dev;
chip->base = -1;
pctrl->irq = irq;
@@ -1565,17 +1629,16 @@ static int chv_gpio_probe(struct intel_pinctrl *pctrl, int irq)
if (need_valid_mask) {
chip->irq.init_valid_mask = chv_init_irq_valid_mask;
} else {
- irq_base = devm_irq_alloc_descs(pctrl->dev, -1, 0,
- pctrl->soc->npins, NUMA_NO_NODE);
+ irq_base = devm_irq_alloc_descs(dev, -1, 0, pctrl->soc->npins, NUMA_NO_NODE);
if (irq_base < 0) {
- dev_err(pctrl->dev, "Failed to allocate IRQ numbers\n");
+ dev_err(dev, "Failed to allocate IRQ numbers\n");
return irq_base;
}
}
- ret = devm_gpiochip_add_data(pctrl->dev, chip, pctrl);
+ ret = devm_gpiochip_add_data(dev, chip, pctrl);
if (ret) {
- dev_err(pctrl->dev, "Failed to register gpiochip\n");
+ dev_err(dev, "Failed to register gpiochip\n");
return ret;
}
@@ -1617,11 +1680,13 @@ static acpi_status chv_pinctrl_mmio_access_handler(u32 function,
static int chv_pinctrl_probe(struct platform_device *pdev)
{
const struct intel_pinctrl_soc_data *soc_data;
+ struct intel_community_context *cctx;
struct intel_community *community;
struct device *dev = &pdev->dev;
struct acpi_device *adev = ACPI_COMPANION(dev);
struct intel_pinctrl *pctrl;
acpi_status status;
+ unsigned int i;
int ret, irq;
soc_data = intel_pinctrl_get_soc_data(pdev);
@@ -1663,6 +1728,10 @@ static int chv_pinctrl_probe(struct platform_device *pdev)
if (!pctrl->context.communities)
return -ENOMEM;
+ cctx = &pctrl->context.communities[0];
+ for (i = 0; i < ARRAY_SIZE(cctx->intr_lines); i++)
+ cctx->intr_lines[i] = CHV_INVALID_HWIRQ;
+
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
@@ -1767,15 +1836,15 @@ static int chv_pinctrl_resume_noirq(struct device *dev)
val &= ~CHV_PADCTRL0_GPIORXSTATE;
if (ctx->padctrl0 != val) {
chv_writel(pctrl, desc->number, CHV_PADCTRL0, ctx->padctrl0);
- dev_dbg(pctrl->dev, "restored pin %2u ctrl0 0x%08x\n",
- desc->number, chv_readl(pctrl, desc->number, CHV_PADCTRL0));
+ dev_dbg(dev, "restored pin %2u ctrl0 0x%08x\n", desc->number,
+ chv_readl(pctrl, desc->number, CHV_PADCTRL0));
}
val = chv_readl(pctrl, desc->number, CHV_PADCTRL1);
if (ctx->padctrl1 != val) {
chv_writel(pctrl, desc->number, CHV_PADCTRL1, ctx->padctrl1);
- dev_dbg(pctrl->dev, "restored pin %2u ctrl1 0x%08x\n",
- desc->number, chv_readl(pctrl, desc->number, CHV_PADCTRL1));
+ dev_dbg(dev, "restored pin %2u ctrl1 0x%08x\n", desc->number,
+ chv_readl(pctrl, desc->number, CHV_PADCTRL1));
}
}
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
index 85750974d182..826d494f3cc6 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.c
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -451,8 +451,8 @@ static void intel_gpio_set_gpio_mode(void __iomem *padcfg0)
value &= ~PADCFG0_PMODE_MASK;
value |= PADCFG0_PMODE_GPIO;
- /* Disable input and output buffers */
- value |= PADCFG0_GPIORXDIS;
+ /* Disable TX buffer and enable RX (this will be input) */
+ value &= ~PADCFG0_GPIORXDIS;
value |= PADCFG0_GPIOTXDIS;
/* Disable SCI/SMI/NMI generation */
@@ -497,9 +497,6 @@ static int intel_gpio_request_enable(struct pinctrl_dev *pctldev,
intel_gpio_set_gpio_mode(padcfg0);
- /* Disable TX buffer and enable RX (this will be input) */
- __intel_gpio_set_direction(padcfg0, true);
-
raw_spin_unlock_irqrestore(&pctrl->lock, flags);
return 0;
@@ -1115,9 +1112,6 @@ static int intel_gpio_irq_type(struct irq_data *d, unsigned int type)
intel_gpio_set_gpio_mode(reg);
- /* Disable TX buffer and enable RX (this will be input) */
- __intel_gpio_set_direction(reg, true);
-
value = readl(reg);
value &= ~(PADCFG0_RXEVCFG_MASK | PADCFG0_RXINV);
@@ -1216,6 +1210,39 @@ static irqreturn_t intel_gpio_irq(int irq, void *data)
return IRQ_RETVAL(ret);
}
+static void intel_gpio_irq_init(struct intel_pinctrl *pctrl)
+{
+ int i;
+
+ for (i = 0; i < pctrl->ncommunities; i++) {
+ const struct intel_community *community;
+ void __iomem *base;
+ unsigned int gpp;
+
+ community = &pctrl->communities[i];
+ base = community->regs;
+
+ for (gpp = 0; gpp < community->ngpps; gpp++) {
+ /* Mask and clear all interrupts */
+ writel(0, base + community->ie_offset + gpp * 4);
+ writel(0xffff, base + community->is_offset + gpp * 4);
+ }
+ }
+}
+
+static int intel_gpio_irq_init_hw(struct gpio_chip *gc)
+{
+ struct intel_pinctrl *pctrl = gpiochip_get_data(gc);
+
+ /*
+ * Make sure the interrupt lines are in a proper state before
+ * further configuration.
+ */
+ intel_gpio_irq_init(pctrl);
+
+ return 0;
+}
+
static int intel_gpio_add_community_ranges(struct intel_pinctrl *pctrl,
const struct intel_community *community)
{
@@ -1320,6 +1347,7 @@ static int intel_gpio_probe(struct intel_pinctrl *pctrl, int irq)
girq->num_parents = 0;
girq->default_type = IRQ_TYPE_NONE;
girq->handler = handle_bad_irq;
+ girq->init_hw = intel_gpio_irq_init_hw;
ret = devm_gpiochip_add_data(pctrl->dev, &pctrl->chip, pctrl);
if (ret) {
@@ -1695,26 +1723,6 @@ int intel_pinctrl_suspend_noirq(struct device *dev)
}
EXPORT_SYMBOL_GPL(intel_pinctrl_suspend_noirq);
-static void intel_gpio_irq_init(struct intel_pinctrl *pctrl)
-{
- size_t i;
-
- for (i = 0; i < pctrl->ncommunities; i++) {
- const struct intel_community *community;
- void __iomem *base;
- unsigned int gpp;
-
- community = &pctrl->communities[i];
- base = community->regs;
-
- for (gpp = 0; gpp < community->ngpps; gpp++) {
- /* Mask and clear all interrupts */
- writel(0, base + community->ie_offset + gpp * 4);
- writel(0xffff, base + community->is_offset + gpp * 4);
- }
- }
-}
-
static bool intel_gpio_update_reg(void __iomem *reg, u32 mask, u32 value)
{
u32 curr, updated;
diff --git a/drivers/pinctrl/mediatek/pinctrl-moore.c b/drivers/pinctrl/mediatek/pinctrl-moore.c
index ad3b67163973..5bfaa84839c7 100644
--- a/drivers/pinctrl/mediatek/pinctrl-moore.c
+++ b/drivers/pinctrl/mediatek/pinctrl-moore.c
@@ -519,7 +519,7 @@ static int mtk_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
return mtk_eint_set_debounce(hw->eint, desc->eint.eint_n, debounce);
}
-static int mtk_build_gpiochip(struct mtk_pinctrl *hw, struct device_node *np)
+static int mtk_build_gpiochip(struct mtk_pinctrl *hw)
{
struct gpio_chip *chip = &hw->chip;
int ret;
@@ -536,7 +536,6 @@ static int mtk_build_gpiochip(struct mtk_pinctrl *hw, struct device_node *np)
chip->set_config = mtk_gpio_set_config;
chip->base = -1;
chip->ngpio = hw->soc->npins;
- chip->of_node = np;
chip->of_gpio_n_cells = 2;
ret = gpiochip_add_data(chip, hw);
@@ -550,7 +549,7 @@ static int mtk_build_gpiochip(struct mtk_pinctrl *hw, struct device_node *np)
* Documentation/devicetree/bindings/gpio/gpio.txt on how to
* bind pinctrl and gpio drivers via the "gpio-ranges" property.
*/
- if (!of_find_property(np, "gpio-ranges", NULL)) {
+ if (!of_find_property(hw->dev->of_node, "gpio-ranges", NULL)) {
ret = gpiochip_add_pin_range(chip, dev_name(hw->dev), 0, 0,
chip->ngpio);
if (ret < 0) {
@@ -691,7 +690,7 @@ int mtk_moore_pinctrl_probe(struct platform_device *pdev,
"Failed to add EINT, but pinctrl still can work\n");
/* Build gpiochip should be after pinctrl_enable is done */
- err = mtk_build_gpiochip(hw, pdev->dev.of_node);
+ err = mtk_build_gpiochip(hw);
if (err) {
dev_err(&pdev->dev, "Failed to add gpio_chip\n");
return err;
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
index 53779822348d..e1ae3beb9f72 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c
@@ -815,6 +815,8 @@ static int mtk_pinconf_bias_get_rsel(struct mtk_pinctrl *hw,
goto out;
err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_PD, &pd);
+ if (err)
+ goto out;
if (pu == 0 && pd == 0) {
*pullup = 0;
diff --git a/drivers/pinctrl/mediatek/pinctrl-paris.c b/drivers/pinctrl/mediatek/pinctrl-paris.c
index d4e02c5d74a8..f9f9110f2107 100644
--- a/drivers/pinctrl/mediatek/pinctrl-paris.c
+++ b/drivers/pinctrl/mediatek/pinctrl-paris.c
@@ -581,7 +581,7 @@ ssize_t mtk_pctrl_show_one_pin(struct mtk_pinctrl *hw,
{
int pinmux, pullup, pullen, len = 0, r1 = -1, r0 = -1, rsel = -1;
const struct mtk_pin_desc *desc;
- u32 try_all_type;
+ u32 try_all_type = 0;
if (gpio >= hw->soc->npins)
return -EINVAL;
@@ -895,7 +895,7 @@ static int mtk_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
return mtk_eint_set_debounce(hw->eint, desc->eint.eint_n, debounce);
}
-static int mtk_build_gpiochip(struct mtk_pinctrl *hw, struct device_node *np)
+static int mtk_build_gpiochip(struct mtk_pinctrl *hw)
{
struct gpio_chip *chip = &hw->chip;
int ret;
@@ -913,7 +913,6 @@ static int mtk_build_gpiochip(struct mtk_pinctrl *hw, struct device_node *np)
chip->set_config = mtk_gpio_set_config;
chip->base = -1;
chip->ngpio = hw->soc->npins;
- chip->of_node = np;
chip->of_gpio_n_cells = 2;
ret = gpiochip_add_data(chip, hw);
@@ -1037,7 +1036,7 @@ int mtk_paris_pinctrl_probe(struct platform_device *pdev,
"Failed to add EINT, but pinctrl still can work\n");
/* Build gpiochip should be after pinctrl_enable is done */
- err = mtk_build_gpiochip(hw, pdev->dev.of_node);
+ err = mtk_build_gpiochip(hw);
if (err) {
dev_err(&pdev->dev, "Failed to add gpio_chip\n");
return err;
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
index 5cb018f98800..08cad14042e2 100644
--- a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c
@@ -23,6 +23,7 @@
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/slab.h>
+#include <linux/string_helpers.h>
#include "../pinctrl-utils.h"
@@ -341,12 +342,12 @@ static int armada_37xx_pmx_set_by_name(struct pinctrl_dev *pctldev,
struct armada_37xx_pin_group *grp)
{
struct armada_37xx_pinctrl *info = pinctrl_dev_get_drvdata(pctldev);
+ struct device *dev = info->dev;
unsigned int reg = SELECTION;
unsigned int mask = grp->reg_mask;
int func, val;
- dev_dbg(info->dev, "enable function %s group %s\n",
- name, grp->name);
+ dev_dbg(dev, "enable function %s group %s\n", name, grp->name);
func = match_string(grp->funcs, NB_FUNCS, name);
if (func < 0)
@@ -722,25 +723,22 @@ static unsigned int armada_37xx_irq_startup(struct irq_data *d)
static int armada_37xx_irqchip_register(struct platform_device *pdev,
struct armada_37xx_pinctrl *info)
{
- struct device_node *np = info->dev->of_node;
struct gpio_chip *gc = &info->gpio_chip;
struct irq_chip *irqchip = &info->irq_chip;
struct gpio_irq_chip *girq = &gc->irq;
struct device *dev = &pdev->dev;
- struct resource res;
+ struct device_node *np;
int ret = -ENODEV, i, nr_irq_parent;
/* Check if we have at least one gpio-controller child node */
- for_each_child_of_node(info->dev->of_node, np) {
+ for_each_child_of_node(dev->of_node, np) {
if (of_property_read_bool(np, "gpio-controller")) {
ret = 0;
break;
}
}
- if (ret) {
- dev_err(dev, "no gpio-controller child node\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "no gpio-controller child node\n");
nr_irq_parent = of_irq_count(np);
spin_lock_init(&info->irq_lock);
@@ -750,12 +748,7 @@ static int armada_37xx_irqchip_register(struct platform_device *pdev,
return 0;
}
- if (of_address_to_resource(info->dev->of_node, 1, &res)) {
- dev_err(dev, "cannot find IO resource\n");
- return -ENOENT;
- }
-
- info->base = devm_ioremap_resource(info->dev, &res);
+ info->base = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(info->base))
return PTR_ERR(info->base);
@@ -774,8 +767,7 @@ static int armada_37xx_irqchip_register(struct platform_device *pdev,
* the chained irq with all of them.
*/
girq->num_parents = nr_irq_parent;
- girq->parents = devm_kcalloc(&pdev->dev, nr_irq_parent,
- sizeof(*girq->parents), GFP_KERNEL);
+ girq->parents = devm_kcalloc(dev, nr_irq_parent, sizeof(*girq->parents), GFP_KERNEL);
if (!girq->parents)
return -ENOMEM;
for (i = 0; i < nr_irq_parent; i++) {
@@ -794,11 +786,12 @@ static int armada_37xx_irqchip_register(struct platform_device *pdev,
static int armada_37xx_gpiochip_register(struct platform_device *pdev,
struct armada_37xx_pinctrl *info)
{
+ struct device *dev = &pdev->dev;
struct device_node *np;
struct gpio_chip *gc;
int ret = -ENODEV;
- for_each_child_of_node(info->dev->of_node, np) {
+ for_each_child_of_node(dev->of_node, np) {
if (of_find_property(np, "gpio-controller", NULL)) {
ret = 0;
break;
@@ -811,7 +804,7 @@ static int armada_37xx_gpiochip_register(struct platform_device *pdev,
gc = &info->gpio_chip;
gc->ngpio = info->data->nr_pins;
- gc->parent = &pdev->dev;
+ gc->parent = dev;
gc->base = -1;
gc->of_node = np;
gc->label = info->data->name;
@@ -819,11 +812,8 @@ static int armada_37xx_gpiochip_register(struct platform_device *pdev,
ret = armada_37xx_irqchip_register(pdev, info);
if (ret)
return ret;
- ret = devm_gpiochip_add_data(&pdev->dev, gc, info);
- if (ret)
- return ret;
- return 0;
+ return devm_gpiochip_add_data(dev, gc, info);
}
/**
@@ -874,13 +864,13 @@ static int armada_37xx_add_function(struct armada_37xx_pmx_func *funcs,
static int armada_37xx_fill_group(struct armada_37xx_pinctrl *info)
{
int n, num = 0, funcsize = info->data->nr_pins;
+ struct device *dev = info->dev;
for (n = 0; n < info->ngroups; n++) {
struct armada_37xx_pin_group *grp = &info->groups[n];
int i, j, f;
- grp->pins = devm_kcalloc(info->dev,
- grp->npins + grp->extra_npins,
+ grp->pins = devm_kcalloc(dev, grp->npins + grp->extra_npins,
sizeof(*grp->pins),
GFP_KERNEL);
if (!grp->pins)
@@ -898,8 +888,7 @@ static int armada_37xx_fill_group(struct armada_37xx_pinctrl *info)
ret = armada_37xx_add_function(info->funcs, &funcsize,
grp->funcs[f]);
if (ret == -EOVERFLOW)
- dev_err(info->dev,
- "More functions than pins(%d)\n",
+ dev_err(dev, "More functions than pins(%d)\n",
info->data->nr_pins);
if (ret < 0)
continue;
@@ -913,7 +902,7 @@ static int armada_37xx_fill_group(struct armada_37xx_pinctrl *info)
}
/**
- * armada_37xx_fill_funcs() - complete the funcs array
+ * armada_37xx_fill_func() - complete the funcs array
* @info: info driver instance
*
* Based on the data available from the armada_37xx_pin_group array
@@ -925,6 +914,7 @@ static int armada_37xx_fill_group(struct armada_37xx_pinctrl *info)
static int armada_37xx_fill_func(struct armada_37xx_pinctrl *info)
{
struct armada_37xx_pmx_func *funcs = info->funcs;
+ struct device *dev = info->dev;
int n;
for (n = 0; n < info->nfuncs; n++) {
@@ -932,8 +922,7 @@ static int armada_37xx_fill_func(struct armada_37xx_pinctrl *info)
const char **groups;
int g;
- funcs[n].groups = devm_kcalloc(info->dev,
- funcs[n].ngroups,
+ funcs[n].groups = devm_kcalloc(dev, funcs[n].ngroups,
sizeof(*(funcs[n].groups)),
GFP_KERNEL);
if (!funcs[n].groups)
@@ -962,6 +951,8 @@ static int armada_37xx_pinctrl_register(struct platform_device *pdev,
const struct armada_37xx_pin_data *pin_data = info->data;
struct pinctrl_desc *ctrldesc = &info->pctl;
struct pinctrl_pin_desc *pindesc, *pdesc;
+ struct device *dev = &pdev->dev;
+ char **pin_names;
int pin, ret;
info->groups = pin_data->groups;
@@ -973,20 +964,21 @@ static int armada_37xx_pinctrl_register(struct platform_device *pdev,
ctrldesc->pmxops = &armada_37xx_pmx_ops;
ctrldesc->confops = &armada_37xx_pinconf_ops;
- pindesc = devm_kcalloc(&pdev->dev,
- pin_data->nr_pins, sizeof(*pindesc),
- GFP_KERNEL);
+ pindesc = devm_kcalloc(dev, pin_data->nr_pins, sizeof(*pindesc), GFP_KERNEL);
if (!pindesc)
return -ENOMEM;
ctrldesc->pins = pindesc;
ctrldesc->npins = pin_data->nr_pins;
+ pin_names = devm_kasprintf_strarray(dev, pin_data->name, pin_data->nr_pins);
+ if (IS_ERR(pin_names))
+ return PTR_ERR(pin_names);
+
pdesc = pindesc;
for (pin = 0; pin < pin_data->nr_pins; pin++) {
pdesc->number = pin;
- pdesc->name = kasprintf(GFP_KERNEL, "%s-%d",
- pin_data->name, pin);
+ pdesc->name = pin_names[pin];
pdesc++;
}
@@ -994,14 +986,10 @@ static int armada_37xx_pinctrl_register(struct platform_device *pdev,
* we allocate functions for number of pins and hope there are
* fewer unique functions than pins available
*/
- info->funcs = devm_kcalloc(&pdev->dev,
- pin_data->nr_pins,
- sizeof(struct armada_37xx_pmx_func),
- GFP_KERNEL);
+ info->funcs = devm_kcalloc(dev, pin_data->nr_pins, sizeof(*info->funcs), GFP_KERNEL);
if (!info->funcs)
return -ENOMEM;
-
ret = armada_37xx_fill_group(info);
if (ret)
return ret;
@@ -1010,11 +998,9 @@ static int armada_37xx_pinctrl_register(struct platform_device *pdev,
if (ret)
return ret;
- info->pctl_dev = devm_pinctrl_register(&pdev->dev, ctrldesc, info);
- if (IS_ERR(info->pctl_dev)) {
- dev_err(&pdev->dev, "could not register pinctrl driver\n");
- return PTR_ERR(info->pctl_dev);
- }
+ info->pctl_dev = devm_pinctrl_register(dev, ctrldesc, info);
+ if (IS_ERR(info->pctl_dev))
+ return dev_err_probe(dev, PTR_ERR(info->pctl_dev), "could not register pinctrl driver\n");
return 0;
}
@@ -1143,18 +1129,15 @@ static int __init armada_37xx_pinctrl_probe(struct platform_device *pdev)
struct regmap *regmap;
int ret;
- info = devm_kzalloc(dev, sizeof(struct armada_37xx_pinctrl),
- GFP_KERNEL);
+ info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
info->dev = dev;
regmap = syscon_node_to_regmap(np);
- if (IS_ERR(regmap)) {
- dev_err(&pdev->dev, "cannot get regmap\n");
- return PTR_ERR(regmap);
- }
+ if (IS_ERR(regmap))
+ return dev_err_probe(dev, PTR_ERR(regmap), "cannot get regmap\n");
info->regmap = regmap;
info->data = of_device_get_match_data(dev);
diff --git a/drivers/pinctrl/pinconf-generic.c b/drivers/pinctrl/pinconf-generic.c
index 22e8d4c4040e..f8edcc88ac01 100644
--- a/drivers/pinctrl/pinconf-generic.c
+++ b/drivers/pinctrl/pinconf-generic.c
@@ -46,6 +46,7 @@ static const struct pin_config_item conf_items[] = {
PCONFDUMP(PIN_CONFIG_MODE_LOW_POWER, "pin low power", "mode", true),
PCONFDUMP(PIN_CONFIG_OUTPUT_ENABLE, "output enabled", NULL, false),
PCONFDUMP(PIN_CONFIG_OUTPUT, "pin output", "level", true),
+ PCONFDUMP(PIN_CONFIG_OUTPUT_IMPEDANCE_OHMS, "output impedance", "ohms", true),
PCONFDUMP(PIN_CONFIG_POWER_SOURCE, "pin power source", "selector", true),
PCONFDUMP(PIN_CONFIG_SLEEP_HARDWARE_STATE, "sleep hardware state", NULL, false),
PCONFDUMP(PIN_CONFIG_SLEW_RATE, "slew rate", NULL, true),
@@ -179,6 +180,7 @@ static const struct pinconf_generic_params dt_params[] = {
{ "output-disable", PIN_CONFIG_OUTPUT_ENABLE, 0 },
{ "output-enable", PIN_CONFIG_OUTPUT_ENABLE, 1 },
{ "output-high", PIN_CONFIG_OUTPUT, 1, },
+ { "output-impedance-ohms", PIN_CONFIG_OUTPUT_IMPEDANCE_OHMS, 0 },
{ "output-low", PIN_CONFIG_OUTPUT, 0, },
{ "power-source", PIN_CONFIG_POWER_SOURCE, 0 },
{ "sleep-hardware-state", PIN_CONFIG_SLEEP_HARDWARE_STATE, 0 },
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index ecab9064a845..1a7d686494ff 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -1009,9 +1009,6 @@ static int amd_gpio_probe(struct platform_device *pdev)
gpio_dev->gc.owner = THIS_MODULE;
gpio_dev->gc.parent = &pdev->dev;
gpio_dev->gc.ngpio = resource_size(res) / 4;
-#if defined(CONFIG_OF_GPIO)
- gpio_dev->gc.of_node = pdev->dev.of_node;
-#endif
gpio_dev->hwbank_num = gpio_dev->gc.ngpio / 64;
gpio_dev->groups = kerncz_groups;
diff --git a/drivers/pinctrl/pinctrl-apple-gpio.c b/drivers/pinctrl/pinctrl-apple-gpio.c
index a7861079a650..72f4dd2466e1 100644
--- a/drivers/pinctrl/pinctrl-apple-gpio.c
+++ b/drivers/pinctrl/pinctrl-apple-gpio.c
@@ -11,6 +11,7 @@
*/
#include <dt-bindings/pinctrl/apple.h>
+#include <linux/bits.h>
#include <linux/gpio/driver.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
@@ -36,7 +37,7 @@ struct apple_gpio_pinctrl {
struct pinctrl_desc pinctrl_desc;
struct gpio_chip gpio_chip;
struct irq_chip irq_chip;
- u8 irqgrps[0];
+ u8 irqgrps[];
};
#define REG_GPIO(x) (4 * (x))
@@ -70,31 +71,35 @@ struct regmap_config regmap_config = {
.cache_type = REGCACHE_FLAT,
.max_register = 512 * sizeof(u32),
.num_reg_defaults_raw = 512,
- .use_relaxed_mmio = true
+ .use_relaxed_mmio = true,
};
-// No locking needed to mask/unmask IRQs as the interrupt mode is per pin-register.
+/* No locking needed to mask/unmask IRQs as the interrupt mode is per pin-register. */
static void apple_gpio_set_reg(struct apple_gpio_pinctrl *pctl,
- unsigned int pin, u32 mask, u32 value)
+ unsigned int pin, u32 mask, u32 value)
{
regmap_update_bits(pctl->map, REG_GPIO(pin), mask, value);
}
-static uint32_t apple_gpio_get_reg(struct apple_gpio_pinctrl *pctl,
- unsigned int pin)
+static u32 apple_gpio_get_reg(struct apple_gpio_pinctrl *pctl,
+ unsigned int pin)
{
- unsigned int val = 0;
+ int ret;
+ u32 val;
+
+ ret = regmap_read(pctl->map, REG_GPIO(pin), &val);
+ if (ret)
+ return 0;
- regmap_read(pctl->map, REG_GPIO(pin), &val);
return val;
}
/* Pin controller functions */
static int apple_gpio_dt_node_to_map(struct pinctrl_dev *pctldev,
- struct device_node *node,
- struct pinctrl_map **map,
- unsigned *num_maps)
+ struct device_node *node,
+ struct pinctrl_map **map,
+ unsigned *num_maps)
{
unsigned reserved_maps;
struct apple_gpio_pinctrl *pctl;
@@ -114,13 +119,12 @@ static int apple_gpio_dt_node_to_map(struct pinctrl_dev *pctldev,
dev_err(pctl->dev,
"missing or empty pinmux property in node %pOFn.\n",
node);
- return ret;
+ return ret ? ret : -EINVAL;
}
num_pins = ret;
- ret = pinctrl_utils_reserve_map(pctldev, map, &reserved_maps, num_maps,
- num_pins);
+ ret = pinctrl_utils_reserve_map(pctldev, map, &reserved_maps, num_maps, num_pins);
if (ret)
return ret;
@@ -138,11 +142,10 @@ static int apple_gpio_dt_node_to_map(struct pinctrl_dev *pctldev,
}
group_name = pinctrl_generic_get_group_name(pctldev, pin);
- function_name =
- pinmux_generic_get_function_name(pctl->pctldev, func);
+ function_name = pinmux_generic_get_function_name(pctl->pctldev, func);
ret = pinctrl_utils_add_map_mux(pctl->pctldev, map,
- &reserved_maps, num_maps,
- group_name, function_name);
+ &reserved_maps, num_maps,
+ group_name, function_name);
if (ret)
goto free_map;
}
@@ -165,7 +168,7 @@ static const struct pinctrl_ops apple_gpio_pinctrl_ops = {
/* Pin multiplexer functions */
static int apple_gpio_pinmux_set(struct pinctrl_dev *pctldev, unsigned func,
- unsigned group)
+ unsigned group)
{
struct apple_gpio_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
@@ -186,14 +189,14 @@ static const struct pinmux_ops apple_gpio_pinmux_ops = {
/* GPIO chip functions */
-static int apple_gpio_get_direction(struct gpio_chip *chip,
- unsigned int offset)
+static int apple_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
{
struct apple_gpio_pinctrl *pctl = gpiochip_get_data(chip);
unsigned int reg = apple_gpio_get_reg(pctl, offset);
- return (FIELD_GET(REG_GPIOx_MODE, reg) == REG_GPIOx_OUT) ?
- GPIO_LINE_DIRECTION_OUT : GPIO_LINE_DIRECTION_IN;
+ if (FIELD_GET(REG_GPIOx_MODE, reg) == REG_GPIOx_OUT)
+ return GPIO_LINE_DIRECTION_OUT;
+ return GPIO_LINE_DIRECTION_IN;
}
static int apple_gpio_get(struct gpio_chip *chip, unsigned offset)
@@ -211,17 +214,14 @@ static int apple_gpio_get(struct gpio_chip *chip, unsigned offset)
return !!(reg & REG_GPIOx_DATA);
}
-static void apple_gpio_set(struct gpio_chip *chip, unsigned int offset,
- int value)
+static void apple_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
{
struct apple_gpio_pinctrl *pctl = gpiochip_get_data(chip);
- apple_gpio_set_reg(pctl, offset, REG_GPIOx_DATA,
- value ? REG_GPIOx_DATA : 0);
+ apple_gpio_set_reg(pctl, offset, REG_GPIOx_DATA, value ? REG_GPIOx_DATA : 0);
}
-static int apple_gpio_direction_input(struct gpio_chip *chip,
- unsigned int offset)
+static int apple_gpio_direction_input(struct gpio_chip *chip, unsigned int offset)
{
struct apple_gpio_pinctrl *pctl = gpiochip_get_data(chip);
@@ -234,7 +234,7 @@ static int apple_gpio_direction_input(struct gpio_chip *chip,
}
static int apple_gpio_direction_output(struct gpio_chip *chip,
- unsigned int offset, int value)
+ unsigned int offset, int value)
{
struct apple_gpio_pinctrl *pctl = gpiochip_get_data(chip);
@@ -249,13 +249,10 @@ static int apple_gpio_direction_output(struct gpio_chip *chip,
static void apple_gpio_irq_ack(struct irq_data *data)
{
- struct apple_gpio_pinctrl *pctl =
- gpiochip_get_data(irq_data_get_irq_chip_data(data));
- unsigned int irqgrp =
- FIELD_GET(REG_GPIOx_GRP, apple_gpio_get_reg(pctl, data->hwirq));
+ struct apple_gpio_pinctrl *pctl = gpiochip_get_data(irq_data_get_irq_chip_data(data));
+ unsigned int irqgrp = FIELD_GET(REG_GPIOx_GRP, apple_gpio_get_reg(pctl, data->hwirq));
- writel(BIT(data->hwirq & 31),
- pctl->base + REG_IRQ(irqgrp, data->hwirq));
+ writel(BIT(data->hwirq % 32), pctl->base + REG_IRQ(irqgrp, data->hwirq));
}
static unsigned int apple_gpio_irq_type(unsigned int type)
@@ -278,20 +275,19 @@ static unsigned int apple_gpio_irq_type(unsigned int type)
static void apple_gpio_irq_mask(struct irq_data *data)
{
- struct apple_gpio_pinctrl *pctl =
- gpiochip_get_data(irq_data_get_irq_chip_data(data));
+ struct apple_gpio_pinctrl *pctl = gpiochip_get_data(irq_data_get_irq_chip_data(data));
+
apple_gpio_set_reg(pctl, data->hwirq, REG_GPIOx_MODE,
- FIELD_PREP(REG_GPIOx_MODE, REG_GPIOx_IN_IRQ_OFF));
+ FIELD_PREP(REG_GPIOx_MODE, REG_GPIOx_IN_IRQ_OFF));
}
static void apple_gpio_irq_unmask(struct irq_data *data)
{
- struct apple_gpio_pinctrl *pctl =
- gpiochip_get_data(irq_data_get_irq_chip_data(data));
+ struct apple_gpio_pinctrl *pctl = gpiochip_get_data(irq_data_get_irq_chip_data(data));
unsigned int irqtype = apple_gpio_irq_type(irqd_get_trigger_type(data));
apple_gpio_set_reg(pctl, data->hwirq, REG_GPIOx_MODE,
- FIELD_PREP(REG_GPIOx_MODE, irqtype));
+ FIELD_PREP(REG_GPIOx_MODE, irqtype));
}
static unsigned int apple_gpio_irq_startup(struct irq_data *data)
@@ -300,7 +296,7 @@ static unsigned int apple_gpio_irq_startup(struct irq_data *data)
struct apple_gpio_pinctrl *pctl = gpiochip_get_data(chip);
apple_gpio_set_reg(pctl, data->hwirq, REG_GPIOx_GRP,
- FIELD_PREP(REG_GPIOx_GRP, 0));
+ FIELD_PREP(REG_GPIOx_GRP, 0));
apple_gpio_direction_input(chip, data->hwirq);
apple_gpio_irq_unmask(data);
@@ -308,18 +304,16 @@ static unsigned int apple_gpio_irq_startup(struct irq_data *data)
return 0;
}
-static int apple_gpio_irq_set_type(struct irq_data *data,
- unsigned int type)
+static int apple_gpio_irq_set_type(struct irq_data *data, unsigned int type)
{
- struct apple_gpio_pinctrl *pctl =
- gpiochip_get_data(irq_data_get_irq_chip_data(data));
+ struct apple_gpio_pinctrl *pctl = gpiochip_get_data(irq_data_get_irq_chip_data(data));
unsigned int irqtype = apple_gpio_irq_type(type);
if (irqtype == REG_GPIOx_IN_IRQ_OFF)
return -EINVAL;
apple_gpio_set_reg(pctl, data->hwirq, REG_GPIOx_MODE,
- FIELD_PREP(REG_GPIOx_MODE, irqtype));
+ FIELD_PREP(REG_GPIOx_MODE, irqtype));
if (type & IRQ_TYPE_LEVEL_MASK)
irq_set_handler_locked(data, handle_level_irq);
@@ -366,10 +360,6 @@ static int apple_gpio_register(struct apple_gpio_pinctrl *pctl)
void **irq_data = NULL;
int ret;
- if (!of_property_read_bool(pctl->dev->of_node, "gpio-controller"))
- return dev_err_probe(pctl->dev, -ENODEV,
- "No gpio-controller property\n");
-
pctl->irq_chip = apple_gpio_irqchip;
pctl->gpio_chip.label = dev_name(pctl->dev);
@@ -383,7 +373,6 @@ static int apple_gpio_register(struct apple_gpio_pinctrl *pctl)
pctl->gpio_chip.base = -1;
pctl->gpio_chip.ngpio = pctl->pinctrl_desc.npins;
pctl->gpio_chip.parent = pctl->dev;
- pctl->gpio_chip.of_node = pctl->dev->of_node;
if (girq->num_parents) {
int i;
@@ -398,14 +387,13 @@ static int apple_gpio_register(struct apple_gpio_pinctrl *pctl)
GFP_KERNEL);
if (!girq->parents || !irq_data) {
ret = -ENOMEM;
- goto out;
+ goto out_free_irq_data;
}
for (i = 0; i < girq->num_parents; i++) {
- ret = platform_get_irq(to_platform_device(pctl->dev),
- i);
+ ret = platform_get_irq(to_platform_device(pctl->dev), i);
if (ret < 0)
- goto out;
+ goto out_free_irq_data;
girq->parents[i] = ret;
pctl->irqgrps[i] = i;
@@ -419,7 +407,8 @@ static int apple_gpio_register(struct apple_gpio_pinctrl *pctl)
}
ret = devm_gpiochip_add_data(pctl->dev, &pctl->gpio_chip, pctl);
-out:
+
+out_free_irq_data:
kfree(girq->parents);
kfree(irq_data);
diff --git a/drivers/pinctrl/pinctrl-as3722.c b/drivers/pinctrl/pinctrl-as3722.c
index 13c193156363..4313756b52e6 100644
--- a/drivers/pinctrl/pinctrl-as3722.c
+++ b/drivers/pinctrl/pinctrl-as3722.c
@@ -23,19 +23,20 @@
#include <linux/delay.h>
#include <linux/gpio/driver.h>
#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/mfd/as3722.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/property.h>
+#include <linux/slab.h>
+
#include <linux/pinctrl/consumer.h>
#include <linux/pinctrl/machine.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/pinctrl/pinconf-generic.h>
#include <linux/pinctrl/pinconf.h>
#include <linux/pinctrl/pinmux.h>
-#include <linux/pm.h>
-#include <linux/slab.h>
#include "core.h"
#include "pinconf.h"
@@ -551,12 +552,13 @@ static int as3722_pinctrl_probe(struct platform_device *pdev)
struct as3722_pctrl_info *as_pci;
int ret;
+ device_set_node(&pdev->dev, dev_fwnode(pdev->dev.parent));
+
as_pci = devm_kzalloc(&pdev->dev, sizeof(*as_pci), GFP_KERNEL);
if (!as_pci)
return -ENOMEM;
as_pci->dev = &pdev->dev;
- as_pci->dev->of_node = pdev->dev.parent->of_node;
as_pci->as3722 = dev_get_drvdata(pdev->dev.parent);
platform_set_drvdata(pdev, as_pci);
@@ -578,7 +580,6 @@ static int as3722_pinctrl_probe(struct platform_device *pdev)
as_pci->gpio_chip = as3722_gpio_chip;
as_pci->gpio_chip.parent = &pdev->dev;
- as_pci->gpio_chip.of_node = pdev->dev.parent->of_node;
ret = gpiochip_add_data(&as_pci->gpio_chip, as_pci);
if (ret < 0) {
dev_err(&pdev->dev, "Couldn't register gpiochip, %d\n", ret);
diff --git a/drivers/pinctrl/pinctrl-at91-pio4.c b/drivers/pinctrl/pinctrl-at91-pio4.c
index 03c32b2c5d30..fafd1f55cba7 100644
--- a/drivers/pinctrl/pinctrl-at91-pio4.c
+++ b/drivers/pinctrl/pinctrl-at91-pio4.c
@@ -1136,7 +1136,6 @@ static int atmel_pinctrl_probe(struct platform_device *pdev)
}
atmel_pioctrl->gpio_chip = &atmel_gpio_chip;
- atmel_pioctrl->gpio_chip->of_node = dev->of_node;
atmel_pioctrl->gpio_chip->ngpio = atmel_pioctrl->npins;
atmel_pioctrl->gpio_chip->label = dev_name(dev);
atmel_pioctrl->gpio_chip->parent = dev;
diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c
index 6022496bb6a9..d91a010e65f5 100644
--- a/drivers/pinctrl/pinctrl-at91.c
+++ b/drivers/pinctrl/pinctrl-at91.c
@@ -1868,7 +1868,6 @@ static int at91_gpio_probe(struct platform_device *pdev)
at91_chip->chip = at91_gpio_template;
chip = &at91_chip->chip;
- chip->of_node = np;
chip->label = dev_name(&pdev->dev);
chip->parent = &pdev->dev;
chip->owner = THIS_MODULE;
diff --git a/drivers/pinctrl/pinctrl-da9062.c b/drivers/pinctrl/pinctrl-da9062.c
index 1c08579f0198..0e0ac3f3ffef 100644
--- a/drivers/pinctrl/pinctrl-da9062.c
+++ b/drivers/pinctrl/pinctrl-da9062.c
@@ -14,6 +14,7 @@
#include <linux/bits.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/gpio/driver.h>
@@ -256,6 +257,8 @@ static int da9062_pctl_probe(struct platform_device *pdev)
struct da9062_pctl *pctl;
int i;
+ device_set_node(&pdev->dev, dev_fwnode(pdev->dev.parent));
+
pctl = devm_kzalloc(&pdev->dev, sizeof(*pctl), GFP_KERNEL);
if (!pctl)
return -ENOMEM;
@@ -277,9 +280,6 @@ static int da9062_pctl_probe(struct platform_device *pdev)
pctl->gc = reference_gc;
pctl->gc.label = dev_name(&pdev->dev);
pctl->gc.parent = &pdev->dev;
-#ifdef CONFIG_OF_GPIO
- pctl->gc.of_node = parent->of_node;
-#endif
platform_set_drvdata(pdev, pctl);
diff --git a/drivers/pinctrl/pinctrl-digicolor.c b/drivers/pinctrl/pinctrl-digicolor.c
index ff702cfbaa28..cc3546fc4610 100644
--- a/drivers/pinctrl/pinctrl-digicolor.c
+++ b/drivers/pinctrl/pinctrl-digicolor.c
@@ -233,7 +233,7 @@ static void dc_gpio_set(struct gpio_chip *chip, unsigned gpio, int value)
spin_unlock_irqrestore(&pmap->lock, flags);
}
-static int dc_gpiochip_add(struct dc_pinmap *pmap, struct device_node *np)
+static int dc_gpiochip_add(struct dc_pinmap *pmap)
{
struct gpio_chip *chip = &pmap->chip;
int ret;
@@ -248,7 +248,6 @@ static int dc_gpiochip_add(struct dc_pinmap *pmap, struct device_node *np)
chip->set = dc_gpio_set;
chip->base = -1;
chip->ngpio = PINS_COUNT;
- chip->of_node = np;
chip->of_gpio_n_cells = 2;
spin_lock_init(&pmap->lock);
@@ -326,7 +325,7 @@ static int dc_pinctrl_probe(struct platform_device *pdev)
return PTR_ERR(pmap->pctl);
}
- return dc_gpiochip_add(pmap, pdev->dev.of_node);
+ return dc_gpiochip_add(pmap);
}
static const struct of_device_id dc_pinctrl_ids[] = {
diff --git a/drivers/pinctrl/pinctrl-keembay.c b/drivers/pinctrl/pinctrl-keembay.c
index 2bce563d5b8b..152c35bce8ec 100644
--- a/drivers/pinctrl/pinctrl-keembay.c
+++ b/drivers/pinctrl/pinctrl-keembay.c
@@ -1555,58 +1555,42 @@ static int keembay_pinctrl_reg(struct keembay_pinctrl *kpc, struct device *dev)
}
static int keembay_add_functions(struct keembay_pinctrl *kpc,
- struct function_desc *function)
+ struct function_desc *functions)
{
unsigned int i;
/* Assign the groups for each function */
- for (i = 0; i < kpc->npins; i++) {
- const struct pinctrl_pin_desc *pdesc = keembay_pins + i;
- struct keembay_mux_desc *mux = pdesc->drv_data;
-
- while (mux->name) {
- struct function_desc *func;
- const char **grp;
- size_t grp_size;
- u32 j, grp_num;
-
- for (j = 0; j < kpc->nfuncs; j++) {
- if (!strcmp(mux->name, function[j].name))
- break;
- }
-
- if (j == kpc->nfuncs)
- return -EINVAL;
-
- func = function + j;
- grp_num = func->num_group_names;
- grp_size = sizeof(*func->group_names);
-
- if (!func->group_names) {
- func->group_names = devm_kcalloc(kpc->dev,
- grp_num,
- grp_size,
- GFP_KERNEL);
- if (!func->group_names)
- return -ENOMEM;
+ for (i = 0; i < kpc->nfuncs; i++) {
+ struct function_desc *func = &functions[i];
+ const char **group_names;
+ unsigned int grp_idx = 0;
+ int j;
+
+ group_names = devm_kcalloc(kpc->dev, func->num_group_names,
+ sizeof(*group_names), GFP_KERNEL);
+ if (!group_names)
+ return -ENOMEM;
+
+ for (j = 0; j < kpc->npins; j++) {
+ const struct pinctrl_pin_desc *pdesc = &keembay_pins[j];
+ struct keembay_mux_desc *mux;
+
+ for (mux = pdesc->drv_data; mux->name; mux++) {
+ if (!strcmp(mux->name, func->name))
+ group_names[grp_idx++] = pdesc->name;
}
-
- grp = func->group_names;
- while (*grp)
- grp++;
-
- *grp = pdesc->name;
- mux++;
}
+
+ func->group_names = group_names;
}
/* Add all functions */
for (i = 0; i < kpc->nfuncs; i++) {
pinmux_generic_add_function(kpc->pctrl,
- function[i].name,
- function[i].group_names,
- function[i].num_group_names,
- function[i].data);
+ functions[i].name,
+ functions[i].group_names,
+ functions[i].num_group_names,
+ functions[i].data);
}
return 0;
@@ -1617,37 +1601,38 @@ static int keembay_build_functions(struct keembay_pinctrl *kpc)
struct function_desc *keembay_funcs, *new_funcs;
int i;
- /* Allocate total number of functions */
+ /*
+ * Allocate maximum possible number of functions. Assume every pin
+ * being part of 8 (hw maximum) globally unique muxes.
+ */
kpc->nfuncs = 0;
keembay_funcs = kcalloc(kpc->npins * 8, sizeof(*keembay_funcs), GFP_KERNEL);
if (!keembay_funcs)
return -ENOMEM;
- /* Find total number of functions and each's properties */
+ /* Setup 1 function for each unique mux */
for (i = 0; i < kpc->npins; i++) {
const struct pinctrl_pin_desc *pdesc = keembay_pins + i;
- struct keembay_mux_desc *mux = pdesc->drv_data;
+ struct keembay_mux_desc *mux;
- while (mux->name) {
- struct function_desc *fdesc = keembay_funcs;
+ for (mux = pdesc->drv_data; mux->name; mux++) {
+ struct function_desc *fdesc;
- while (fdesc->name) {
+ /* Check if we already have function for this mux */
+ for (fdesc = keembay_funcs; fdesc->name; fdesc++) {
if (!strcmp(mux->name, fdesc->name)) {
fdesc->num_group_names++;
break;
}
-
- fdesc++;
}
+ /* Setup new function for this mux we didn't see before */
if (!fdesc->name) {
fdesc->name = mux->name;
fdesc->num_group_names = 1;
fdesc->data = &mux->mode;
kpc->nfuncs++;
}
-
- mux++;
}
}
diff --git a/drivers/pinctrl/pinctrl-max77620.c b/drivers/pinctrl/pinctrl-max77620.c
index c643ed43ebbf..1ee94574f0af 100644
--- a/drivers/pinctrl/pinctrl-max77620.c
+++ b/drivers/pinctrl/pinctrl-max77620.c
@@ -10,14 +10,16 @@
*/
#include <linux/mfd/max77620.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+
#include <linux/pinctrl/pinctrl.h>
#include <linux/pinctrl/pinconf-generic.h>
#include <linux/pinctrl/pinconf.h>
#include <linux/pinctrl/pinmux.h>
-#include <linux/platform_device.h>
-#include <linux/regmap.h>
#include "core.h"
#include "pinconf.h"
@@ -551,12 +553,13 @@ static int max77620_pinctrl_probe(struct platform_device *pdev)
struct max77620_pctrl_info *mpci;
int i;
+ device_set_node(&pdev->dev, dev_fwnode(pdev->dev.parent));
+
mpci = devm_kzalloc(&pdev->dev, sizeof(*mpci), GFP_KERNEL);
if (!mpci)
return -ENOMEM;
mpci->dev = &pdev->dev;
- mpci->dev->of_node = pdev->dev.parent->of_node;
mpci->rmap = max77620->rmap;
mpci->pins = max77620_pins_desc;
diff --git a/drivers/pinctrl/pinctrl-mcp23s08.c b/drivers/pinctrl/pinctrl-mcp23s08.c
index bccebe43dd6a..695236636d05 100644
--- a/drivers/pinctrl/pinctrl-mcp23s08.c
+++ b/drivers/pinctrl/pinctrl-mcp23s08.c
@@ -551,7 +551,6 @@ int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
mcp->chip.set = mcp23s08_set;
#ifdef CONFIG_OF_GPIO
mcp->chip.of_gpio_n_cells = 2;
- mcp->chip.of_node = dev->of_node;
#endif
mcp->chip.base = base;
diff --git a/drivers/pinctrl/pinctrl-microchip-sgpio.c b/drivers/pinctrl/pinctrl-microchip-sgpio.c
index 78765faa245a..639f1130e989 100644
--- a/drivers/pinctrl/pinctrl-microchip-sgpio.c
+++ b/drivers/pinctrl/pinctrl-microchip-sgpio.c
@@ -17,6 +17,7 @@
#include <linux/pinctrl/pinmux.h>
#include <linux/platform_device.h>
#include <linux/property.h>
+#include <linux/regmap.h>
#include <linux/reset.h>
#include "core.h"
@@ -113,7 +114,7 @@ struct sgpio_priv {
u32 bitcount;
u32 ports;
u32 clock;
- u32 __iomem *regs;
+ struct regmap *regs;
const struct sgpio_properties *properties;
};
@@ -134,31 +135,43 @@ static inline int sgpio_addr_to_pin(struct sgpio_priv *priv, int port, int bit)
return bit + port * priv->bitcount;
}
-static inline u32 sgpio_readl(struct sgpio_priv *priv, u32 rno, u32 off)
+static inline u32 sgpio_get_addr(struct sgpio_priv *priv, u32 rno, u32 off)
+{
+ return (priv->properties->regoff[rno] + off) *
+ regmap_get_reg_stride(priv->regs);
+}
+
+static u32 sgpio_readl(struct sgpio_priv *priv, u32 rno, u32 off)
{
- u32 __iomem *reg = &priv->regs[priv->properties->regoff[rno] + off];
+ u32 addr = sgpio_get_addr(priv, rno, off);
+ u32 val = 0;
+ int ret;
- return readl(reg);
+ ret = regmap_read(priv->regs, addr, &val);
+ WARN_ONCE(ret, "error reading sgpio reg %d\n", ret);
+
+ return val;
}
-static inline void sgpio_writel(struct sgpio_priv *priv,
+static void sgpio_writel(struct sgpio_priv *priv,
u32 val, u32 rno, u32 off)
{
- u32 __iomem *reg = &priv->regs[priv->properties->regoff[rno] + off];
+ u32 addr = sgpio_get_addr(priv, rno, off);
+ int ret;
- writel(val, reg);
+ ret = regmap_write(priv->regs, addr, val);
+ WARN_ONCE(ret, "error writing sgpio reg %d\n", ret);
}
static inline void sgpio_clrsetbits(struct sgpio_priv *priv,
u32 rno, u32 off, u32 clear, u32 set)
{
- u32 __iomem *reg = &priv->regs[priv->properties->regoff[rno] + off];
- u32 val = readl(reg);
+ u32 val = sgpio_readl(priv, rno, off);
val &= ~clear;
val |= set;
- writel(val, reg);
+ sgpio_writel(priv, val, rno, off);
}
static inline void sgpio_configure_bitstream(struct sgpio_priv *priv)
@@ -807,7 +820,13 @@ static int microchip_sgpio_probe(struct platform_device *pdev)
struct reset_control *reset;
struct sgpio_priv *priv;
struct clk *clk;
+ u32 __iomem *regs;
u32 val;
+ struct regmap_config regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ };
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -832,9 +851,14 @@ static int microchip_sgpio_probe(struct platform_device *pdev)
return -EINVAL;
}
- priv->regs = devm_platform_ioremap_resource(pdev, 0);
+ regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
+
+ priv->regs = devm_regmap_init_mmio(dev, regs, &regmap_config);
if (IS_ERR(priv->regs))
return PTR_ERR(priv->regs);
+
priv->properties = device_get_match_data(dev);
priv->in.is_input = true;
diff --git a/drivers/pinctrl/pinctrl-ocelot.c b/drivers/pinctrl/pinctrl-ocelot.c
index 0a36ec8775a3..fc969208d904 100644
--- a/drivers/pinctrl/pinctrl-ocelot.c
+++ b/drivers/pinctrl/pinctrl-ocelot.c
@@ -57,16 +57,71 @@ enum {
#define OCELOT_FUNC_PER_PIN 4
enum {
+ FUNC_CAN0_a,
+ FUNC_CAN0_b,
+ FUNC_CAN1,
FUNC_NONE,
+ FUNC_FC0_a,
+ FUNC_FC0_b,
+ FUNC_FC0_c,
+ FUNC_FC1_a,
+ FUNC_FC1_b,
+ FUNC_FC1_c,
+ FUNC_FC2_a,
+ FUNC_FC2_b,
+ FUNC_FC3_a,
+ FUNC_FC3_b,
+ FUNC_FC3_c,
+ FUNC_FC4_a,
+ FUNC_FC4_b,
+ FUNC_FC4_c,
+ FUNC_FC_SHRD0,
+ FUNC_FC_SHRD1,
+ FUNC_FC_SHRD2,
+ FUNC_FC_SHRD3,
+ FUNC_FC_SHRD4,
+ FUNC_FC_SHRD5,
+ FUNC_FC_SHRD6,
+ FUNC_FC_SHRD7,
+ FUNC_FC_SHRD8,
+ FUNC_FC_SHRD9,
+ FUNC_FC_SHRD10,
+ FUNC_FC_SHRD11,
+ FUNC_FC_SHRD12,
+ FUNC_FC_SHRD13,
+ FUNC_FC_SHRD14,
+ FUNC_FC_SHRD15,
+ FUNC_FC_SHRD16,
+ FUNC_FC_SHRD17,
+ FUNC_FC_SHRD18,
+ FUNC_FC_SHRD19,
+ FUNC_FC_SHRD20,
FUNC_GPIO,
+ FUNC_IB_TRG_a,
+ FUNC_IB_TRG_b,
+ FUNC_IB_TRG_c,
FUNC_IRQ0,
+ FUNC_IRQ_IN_a,
+ FUNC_IRQ_IN_b,
+ FUNC_IRQ_IN_c,
FUNC_IRQ0_IN,
+ FUNC_IRQ_OUT_a,
+ FUNC_IRQ_OUT_b,
+ FUNC_IRQ_OUT_c,
FUNC_IRQ0_OUT,
FUNC_IRQ1,
FUNC_IRQ1_IN,
FUNC_IRQ1_OUT,
FUNC_EXT_IRQ,
FUNC_MIIM,
+ FUNC_MIIM_a,
+ FUNC_MIIM_b,
+ FUNC_MIIM_c,
+ FUNC_MIIM_Sa,
+ FUNC_MIIM_Sb,
+ FUNC_OB_TRG,
+ FUNC_OB_TRG_a,
+ FUNC_OB_TRG_b,
FUNC_PHY_LED,
FUNC_PCI_WAKE,
FUNC_MD,
@@ -74,65 +129,174 @@ enum {
FUNC_PTP1,
FUNC_PTP2,
FUNC_PTP3,
+ FUNC_PTPSYNC_1,
+ FUNC_PTPSYNC_2,
+ FUNC_PTPSYNC_3,
+ FUNC_PTPSYNC_4,
+ FUNC_PTPSYNC_5,
+ FUNC_PTPSYNC_6,
+ FUNC_PTPSYNC_7,
FUNC_PWM,
+ FUNC_QSPI1,
+ FUNC_QSPI2,
+ FUNC_R,
+ FUNC_RECO_a,
+ FUNC_RECO_b,
FUNC_RECO_CLK,
+ FUNC_SD,
FUNC_SFP,
+ FUNC_SFP_SD,
FUNC_SG0,
FUNC_SG1,
FUNC_SG2,
+ FUNC_SGPIO_a,
+ FUNC_SGPIO_b,
FUNC_SI,
FUNC_SI2,
FUNC_TACHO,
+ FUNC_TACHO_a,
+ FUNC_TACHO_b,
FUNC_TWI,
FUNC_TWI2,
FUNC_TWI3,
FUNC_TWI_SCL_M,
+ FUNC_TWI_SLC_GATE,
+ FUNC_TWI_SLC_GATE_AD,
FUNC_UART,
FUNC_UART2,
FUNC_UART3,
+ FUNC_USB_H_a,
+ FUNC_USB_H_b,
+ FUNC_USB_H_c,
+ FUNC_USB_S_a,
+ FUNC_USB_S_b,
+ FUNC_USB_S_c,
FUNC_PLL_STAT,
FUNC_EMMC,
+ FUNC_EMMC_SD,
FUNC_REF_CLK,
FUNC_RCVRD_CLK,
FUNC_MAX
};
static const char *const ocelot_function_names[] = {
+ [FUNC_CAN0_a] = "can0_a",
+ [FUNC_CAN0_b] = "can0_b",
+ [FUNC_CAN1] = "can1",
[FUNC_NONE] = "none",
+ [FUNC_FC0_a] = "fc0_a",
+ [FUNC_FC0_b] = "fc0_b",
+ [FUNC_FC0_c] = "fc0_c",
+ [FUNC_FC1_a] = "fc1_a",
+ [FUNC_FC1_b] = "fc1_b",
+ [FUNC_FC1_c] = "fc1_c",
+ [FUNC_FC2_a] = "fc2_a",
+ [FUNC_FC2_b] = "fc2_b",
+ [FUNC_FC3_a] = "fc3_a",
+ [FUNC_FC3_b] = "fc3_b",
+ [FUNC_FC3_c] = "fc3_c",
+ [FUNC_FC4_a] = "fc4_a",
+ [FUNC_FC4_b] = "fc4_b",
+ [FUNC_FC4_c] = "fc4_c",
+ [FUNC_FC_SHRD0] = "fc_shrd0",
+ [FUNC_FC_SHRD1] = "fc_shrd1",
+ [FUNC_FC_SHRD2] = "fc_shrd2",
+ [FUNC_FC_SHRD3] = "fc_shrd3",
+ [FUNC_FC_SHRD4] = "fc_shrd4",
+ [FUNC_FC_SHRD5] = "fc_shrd5",
+ [FUNC_FC_SHRD6] = "fc_shrd6",
+ [FUNC_FC_SHRD7] = "fc_shrd7",
+ [FUNC_FC_SHRD8] = "fc_shrd8",
+ [FUNC_FC_SHRD9] = "fc_shrd9",
+ [FUNC_FC_SHRD10] = "fc_shrd10",
+ [FUNC_FC_SHRD11] = "fc_shrd11",
+ [FUNC_FC_SHRD12] = "fc_shrd12",
+ [FUNC_FC_SHRD13] = "fc_shrd13",
+ [FUNC_FC_SHRD14] = "fc_shrd14",
+ [FUNC_FC_SHRD15] = "fc_shrd15",
+ [FUNC_FC_SHRD16] = "fc_shrd16",
+ [FUNC_FC_SHRD17] = "fc_shrd17",
+ [FUNC_FC_SHRD18] = "fc_shrd18",
+ [FUNC_FC_SHRD19] = "fc_shrd19",
+ [FUNC_FC_SHRD20] = "fc_shrd20",
[FUNC_GPIO] = "gpio",
+ [FUNC_IB_TRG_a] = "ib_trig_a",
+ [FUNC_IB_TRG_b] = "ib_trig_b",
+ [FUNC_IB_TRG_c] = "ib_trig_c",
[FUNC_IRQ0] = "irq0",
+ [FUNC_IRQ_IN_a] = "irq_in_a",
+ [FUNC_IRQ_IN_b] = "irq_in_b",
+ [FUNC_IRQ_IN_c] = "irq_in_c",
[FUNC_IRQ0_IN] = "irq0_in",
+ [FUNC_IRQ_OUT_a] = "irq_out_a",
+ [FUNC_IRQ_OUT_b] = "irq_out_b",
+ [FUNC_IRQ_OUT_c] = "irq_out_c",
[FUNC_IRQ0_OUT] = "irq0_out",
[FUNC_IRQ1] = "irq1",
[FUNC_IRQ1_IN] = "irq1_in",
[FUNC_IRQ1_OUT] = "irq1_out",
[FUNC_EXT_IRQ] = "ext_irq",
[FUNC_MIIM] = "miim",
+ [FUNC_MIIM_a] = "miim_a",
+ [FUNC_MIIM_b] = "miim_b",
+ [FUNC_MIIM_c] = "miim_c",
+ [FUNC_MIIM_Sa] = "miim_slave_a",
+ [FUNC_MIIM_Sb] = "miim_slave_b",
[FUNC_PHY_LED] = "phy_led",
[FUNC_PCI_WAKE] = "pci_wake",
[FUNC_MD] = "md",
+ [FUNC_OB_TRG] = "ob_trig",
+ [FUNC_OB_TRG_a] = "ob_trig_a",
+ [FUNC_OB_TRG_b] = "ob_trig_b",
[FUNC_PTP0] = "ptp0",
[FUNC_PTP1] = "ptp1",
[FUNC_PTP2] = "ptp2",
[FUNC_PTP3] = "ptp3",
+ [FUNC_PTPSYNC_1] = "ptpsync_1",
+ [FUNC_PTPSYNC_2] = "ptpsync_2",
+ [FUNC_PTPSYNC_3] = "ptpsync_3",
+ [FUNC_PTPSYNC_4] = "ptpsync_4",
+ [FUNC_PTPSYNC_5] = "ptpsync_5",
+ [FUNC_PTPSYNC_6] = "ptpsync_6",
+ [FUNC_PTPSYNC_7] = "ptpsync_7",
[FUNC_PWM] = "pwm",
+ [FUNC_QSPI1] = "qspi1",
+ [FUNC_QSPI2] = "qspi2",
+ [FUNC_R] = "reserved",
+ [FUNC_RECO_a] = "reco_a",
+ [FUNC_RECO_b] = "reco_b",
[FUNC_RECO_CLK] = "reco_clk",
+ [FUNC_SD] = "sd",
[FUNC_SFP] = "sfp",
+ [FUNC_SFP_SD] = "sfp_sd",
[FUNC_SG0] = "sg0",
[FUNC_SG1] = "sg1",
[FUNC_SG2] = "sg2",
+ [FUNC_SGPIO_a] = "sgpio_a",
+ [FUNC_SGPIO_b] = "sgpio_b",
[FUNC_SI] = "si",
[FUNC_SI2] = "si2",
[FUNC_TACHO] = "tacho",
+ [FUNC_TACHO_a] = "tacho_a",
+ [FUNC_TACHO_b] = "tacho_b",
[FUNC_TWI] = "twi",
[FUNC_TWI2] = "twi2",
[FUNC_TWI3] = "twi3",
[FUNC_TWI_SCL_M] = "twi_scl_m",
+ [FUNC_TWI_SLC_GATE] = "twi_slc_gate",
+ [FUNC_TWI_SLC_GATE_AD] = "twi_slc_gate_ad",
+ [FUNC_USB_H_a] = "usb_host_a",
+ [FUNC_USB_H_b] = "usb_host_b",
+ [FUNC_USB_H_c] = "usb_host_c",
+ [FUNC_USB_S_a] = "usb_slave_a",
+ [FUNC_USB_S_b] = "usb_slave_b",
+ [FUNC_USB_S_c] = "usb_slave_c",
[FUNC_UART] = "uart",
[FUNC_UART2] = "uart2",
[FUNC_UART3] = "uart3",
[FUNC_PLL_STAT] = "pll_stat",
[FUNC_EMMC] = "emmc",
+ [FUNC_EMMC_SD] = "emmc_sd",
[FUNC_REF_CLK] = "ref_clk",
[FUNC_RCVRD_CLK] = "rcvrd_clk",
};
@@ -145,6 +309,7 @@ struct ocelot_pmx_func {
struct ocelot_pin_caps {
unsigned int pin;
unsigned char functions[OCELOT_FUNC_PER_PIN];
+ unsigned char a_functions[OCELOT_FUNC_PER_PIN]; /* Additional functions */
};
struct ocelot_pinctrl {
@@ -152,7 +317,7 @@ struct ocelot_pinctrl {
struct pinctrl_dev *pctl;
struct gpio_chip gpio_chip;
struct regmap *map;
- void __iomem *pincfg;
+ struct regmap *pincfg;
struct pinctrl_desc *desc;
struct ocelot_pmx_func func[FUNC_MAX];
u8 stride;
@@ -676,6 +841,187 @@ static const struct pinctrl_pin_desc sparx5_pins[] = {
SPARX5_PIN(63),
};
+#define LAN966X_P(p, f0, f1, f2, f3, f4, f5, f6, f7) \
+static struct ocelot_pin_caps lan966x_pin_##p = { \
+ .pin = p, \
+ .functions = { \
+ FUNC_##f0, FUNC_##f1, FUNC_##f2, \
+ FUNC_##f3 \
+ }, \
+ .a_functions = { \
+ FUNC_##f4, FUNC_##f5, FUNC_##f6, \
+ FUNC_##f7 \
+ }, \
+}
+
+/* Pinmuxing table taken from data sheet */
+/* Pin FUNC0 FUNC1 FUNC2 FUNC3 FUNC4 FUNC5 FUNC6 FUNC7 */
+LAN966X_P(0, GPIO, NONE, NONE, NONE, NONE, NONE, NONE, R);
+LAN966X_P(1, GPIO, NONE, NONE, NONE, NONE, NONE, NONE, R);
+LAN966X_P(2, GPIO, NONE, NONE, NONE, NONE, NONE, NONE, R);
+LAN966X_P(3, GPIO, NONE, NONE, NONE, NONE, NONE, NONE, R);
+LAN966X_P(4, GPIO, NONE, NONE, NONE, NONE, NONE, NONE, R);
+LAN966X_P(5, GPIO, NONE, NONE, NONE, NONE, NONE, NONE, R);
+LAN966X_P(6, GPIO, NONE, NONE, NONE, NONE, NONE, NONE, R);
+LAN966X_P(7, GPIO, NONE, NONE, NONE, NONE, NONE, NONE, R);
+LAN966X_P(8, GPIO, FC0_a, USB_H_b, NONE, USB_S_b, NONE, NONE, R);
+LAN966X_P(9, GPIO, FC0_a, USB_H_b, NONE, NONE, NONE, NONE, R);
+LAN966X_P(10, GPIO, FC0_a, NONE, NONE, NONE, NONE, NONE, R);
+LAN966X_P(11, GPIO, FC1_a, NONE, NONE, NONE, NONE, NONE, R);
+LAN966X_P(12, GPIO, FC1_a, NONE, NONE, NONE, NONE, NONE, R);
+LAN966X_P(13, GPIO, FC1_a, NONE, NONE, NONE, NONE, NONE, R);
+LAN966X_P(14, GPIO, FC2_a, NONE, NONE, NONE, NONE, NONE, R);
+LAN966X_P(15, GPIO, FC2_a, NONE, NONE, NONE, NONE, NONE, R);
+LAN966X_P(16, GPIO, FC2_a, IB_TRG_a, NONE, OB_TRG_a, IRQ_IN_c, IRQ_OUT_c, R);
+LAN966X_P(17, GPIO, FC3_a, IB_TRG_a, NONE, OB_TRG_a, IRQ_IN_c, IRQ_OUT_c, R);
+LAN966X_P(18, GPIO, FC3_a, IB_TRG_a, NONE, OB_TRG_a, IRQ_IN_c, IRQ_OUT_c, R);
+LAN966X_P(19, GPIO, FC3_a, IB_TRG_a, NONE, OB_TRG_a, IRQ_IN_c, IRQ_OUT_c, R);
+LAN966X_P(20, GPIO, FC4_a, IB_TRG_a, NONE, OB_TRG_a, IRQ_IN_c, NONE, R);
+LAN966X_P(21, GPIO, FC4_a, NONE, NONE, OB_TRG_a, NONE, NONE, R);
+LAN966X_P(22, GPIO, FC4_a, NONE, NONE, OB_TRG_a, NONE, NONE, R);
+LAN966X_P(23, GPIO, NONE, NONE, NONE, OB_TRG_a, NONE, NONE, R);
+LAN966X_P(24, GPIO, FC0_b, IB_TRG_a, USB_H_c, OB_TRG_a, IRQ_IN_c, TACHO_a, R);
+LAN966X_P(25, GPIO, FC0_b, IB_TRG_a, USB_H_c, OB_TRG_a, IRQ_OUT_c, SFP_SD, R);
+LAN966X_P(26, GPIO, FC0_b, IB_TRG_a, USB_S_c, OB_TRG_a, CAN0_a, SFP_SD, R);
+LAN966X_P(27, GPIO, NONE, NONE, NONE, OB_TRG_a, CAN0_a, NONE, R);
+LAN966X_P(28, GPIO, MIIM_a, NONE, NONE, OB_TRG_a, IRQ_OUT_c, SFP_SD, R);
+LAN966X_P(29, GPIO, MIIM_a, NONE, NONE, OB_TRG_a, NONE, NONE, R);
+LAN966X_P(30, GPIO, FC3_c, CAN1, NONE, OB_TRG, RECO_b, NONE, R);
+LAN966X_P(31, GPIO, FC3_c, CAN1, NONE, OB_TRG, RECO_b, NONE, R);
+LAN966X_P(32, GPIO, FC3_c, NONE, SGPIO_a, NONE, MIIM_Sa, NONE, R);
+LAN966X_P(33, GPIO, FC1_b, NONE, SGPIO_a, NONE, MIIM_Sa, MIIM_b, R);
+LAN966X_P(34, GPIO, FC1_b, NONE, SGPIO_a, NONE, MIIM_Sa, MIIM_b, R);
+LAN966X_P(35, GPIO, FC1_b, NONE, SGPIO_a, CAN0_b, NONE, NONE, R);
+LAN966X_P(36, GPIO, NONE, PTPSYNC_1, NONE, CAN0_b, NONE, NONE, R);
+LAN966X_P(37, GPIO, FC_SHRD0, PTPSYNC_2, TWI_SLC_GATE_AD, NONE, NONE, NONE, R);
+LAN966X_P(38, GPIO, NONE, PTPSYNC_3, NONE, NONE, NONE, NONE, R);
+LAN966X_P(39, GPIO, NONE, PTPSYNC_4, NONE, NONE, NONE, NONE, R);
+LAN966X_P(40, GPIO, FC_SHRD1, PTPSYNC_5, NONE, NONE, NONE, NONE, R);
+LAN966X_P(41, GPIO, FC_SHRD2, PTPSYNC_6, TWI_SLC_GATE_AD, NONE, NONE, NONE, R);
+LAN966X_P(42, GPIO, FC_SHRD3, PTPSYNC_7, TWI_SLC_GATE_AD, NONE, NONE, NONE, R);
+LAN966X_P(43, GPIO, FC2_b, OB_TRG_b, IB_TRG_b, IRQ_OUT_a, RECO_a, IRQ_IN_a, R);
+LAN966X_P(44, GPIO, FC2_b, OB_TRG_b, IB_TRG_b, IRQ_OUT_a, RECO_a, IRQ_IN_a, R);
+LAN966X_P(45, GPIO, FC2_b, OB_TRG_b, IB_TRG_b, IRQ_OUT_a, NONE, IRQ_IN_a, R);
+LAN966X_P(46, GPIO, FC1_c, OB_TRG_b, IB_TRG_b, IRQ_OUT_a, FC_SHRD4, IRQ_IN_a, R);
+LAN966X_P(47, GPIO, FC1_c, OB_TRG_b, IB_TRG_b, IRQ_OUT_a, FC_SHRD5, IRQ_IN_a, R);
+LAN966X_P(48, GPIO, FC1_c, OB_TRG_b, IB_TRG_b, IRQ_OUT_a, FC_SHRD6, IRQ_IN_a, R);
+LAN966X_P(49, GPIO, FC_SHRD7, OB_TRG_b, IB_TRG_b, IRQ_OUT_a, TWI_SLC_GATE, IRQ_IN_a, R);
+LAN966X_P(50, GPIO, FC_SHRD16, OB_TRG_b, IB_TRG_b, IRQ_OUT_a, TWI_SLC_GATE, NONE, R);
+LAN966X_P(51, GPIO, FC3_b, OB_TRG_b, IB_TRG_c, IRQ_OUT_b, NONE, IRQ_IN_b, R);
+LAN966X_P(52, GPIO, FC3_b, OB_TRG_b, IB_TRG_c, IRQ_OUT_b, TACHO_b, IRQ_IN_b, R);
+LAN966X_P(53, GPIO, FC3_b, OB_TRG_b, IB_TRG_c, IRQ_OUT_b, NONE, IRQ_IN_b, R);
+LAN966X_P(54, GPIO, FC_SHRD8, OB_TRG_b, IB_TRG_c, IRQ_OUT_b, TWI_SLC_GATE, IRQ_IN_b, R);
+LAN966X_P(55, GPIO, FC_SHRD9, OB_TRG_b, IB_TRG_c, IRQ_OUT_b, TWI_SLC_GATE, IRQ_IN_b, R);
+LAN966X_P(56, GPIO, FC4_b, OB_TRG_b, IB_TRG_c, IRQ_OUT_b, FC_SHRD10, IRQ_IN_b, R);
+LAN966X_P(57, GPIO, FC4_b, TWI_SLC_GATE, IB_TRG_c, IRQ_OUT_b, FC_SHRD11, IRQ_IN_b, R);
+LAN966X_P(58, GPIO, FC4_b, TWI_SLC_GATE, IB_TRG_c, IRQ_OUT_b, FC_SHRD12, IRQ_IN_b, R);
+LAN966X_P(59, GPIO, QSPI1, MIIM_c, NONE, NONE, MIIM_Sb, NONE, R);
+LAN966X_P(60, GPIO, QSPI1, MIIM_c, NONE, NONE, MIIM_Sb, NONE, R);
+LAN966X_P(61, GPIO, QSPI1, NONE, SGPIO_b, FC0_c, MIIM_Sb, NONE, R);
+LAN966X_P(62, GPIO, QSPI1, FC_SHRD13, SGPIO_b, FC0_c, TWI_SLC_GATE, SFP_SD, R);
+LAN966X_P(63, GPIO, QSPI1, FC_SHRD14, SGPIO_b, FC0_c, TWI_SLC_GATE, SFP_SD, R);
+LAN966X_P(64, GPIO, QSPI1, FC4_c, SGPIO_b, FC_SHRD15, TWI_SLC_GATE, SFP_SD, R);
+LAN966X_P(65, GPIO, USB_H_a, FC4_c, NONE, IRQ_OUT_c, TWI_SLC_GATE_AD, NONE, R);
+LAN966X_P(66, GPIO, USB_H_a, FC4_c, USB_S_a, IRQ_OUT_c, IRQ_IN_c, NONE, R);
+LAN966X_P(67, GPIO, EMMC_SD, NONE, QSPI2, NONE, NONE, NONE, R);
+LAN966X_P(68, GPIO, EMMC_SD, NONE, QSPI2, NONE, NONE, NONE, R);
+LAN966X_P(69, GPIO, EMMC_SD, NONE, QSPI2, NONE, NONE, NONE, R);
+LAN966X_P(70, GPIO, EMMC_SD, NONE, QSPI2, NONE, NONE, NONE, R);
+LAN966X_P(71, GPIO, EMMC_SD, NONE, QSPI2, NONE, NONE, NONE, R);
+LAN966X_P(72, GPIO, EMMC_SD, NONE, QSPI2, NONE, NONE, NONE, R);
+LAN966X_P(73, GPIO, EMMC, NONE, NONE, SD, NONE, NONE, R);
+LAN966X_P(74, GPIO, EMMC, NONE, FC_SHRD17, SD, TWI_SLC_GATE, NONE, R);
+LAN966X_P(75, GPIO, EMMC, NONE, FC_SHRD18, SD, TWI_SLC_GATE, NONE, R);
+LAN966X_P(76, GPIO, EMMC, NONE, FC_SHRD19, SD, TWI_SLC_GATE, NONE, R);
+LAN966X_P(77, GPIO, EMMC_SD, NONE, FC_SHRD20, NONE, TWI_SLC_GATE, NONE, R);
+
+#define LAN966X_PIN(n) { \
+ .number = n, \
+ .name = "GPIO_"#n, \
+ .drv_data = &lan966x_pin_##n \
+}
+
+static const struct pinctrl_pin_desc lan966x_pins[] = {
+ LAN966X_PIN(0),
+ LAN966X_PIN(1),
+ LAN966X_PIN(2),
+ LAN966X_PIN(3),
+ LAN966X_PIN(4),
+ LAN966X_PIN(5),
+ LAN966X_PIN(6),
+ LAN966X_PIN(7),
+ LAN966X_PIN(8),
+ LAN966X_PIN(9),
+ LAN966X_PIN(10),
+ LAN966X_PIN(11),
+ LAN966X_PIN(12),
+ LAN966X_PIN(13),
+ LAN966X_PIN(14),
+ LAN966X_PIN(15),
+ LAN966X_PIN(16),
+ LAN966X_PIN(17),
+ LAN966X_PIN(18),
+ LAN966X_PIN(19),
+ LAN966X_PIN(20),
+ LAN966X_PIN(21),
+ LAN966X_PIN(22),
+ LAN966X_PIN(23),
+ LAN966X_PIN(24),
+ LAN966X_PIN(25),
+ LAN966X_PIN(26),
+ LAN966X_PIN(27),
+ LAN966X_PIN(28),
+ LAN966X_PIN(29),
+ LAN966X_PIN(30),
+ LAN966X_PIN(31),
+ LAN966X_PIN(32),
+ LAN966X_PIN(33),
+ LAN966X_PIN(34),
+ LAN966X_PIN(35),
+ LAN966X_PIN(36),
+ LAN966X_PIN(37),
+ LAN966X_PIN(38),
+ LAN966X_PIN(39),
+ LAN966X_PIN(40),
+ LAN966X_PIN(41),
+ LAN966X_PIN(42),
+ LAN966X_PIN(43),
+ LAN966X_PIN(44),
+ LAN966X_PIN(45),
+ LAN966X_PIN(46),
+ LAN966X_PIN(47),
+ LAN966X_PIN(48),
+ LAN966X_PIN(49),
+ LAN966X_PIN(50),
+ LAN966X_PIN(51),
+ LAN966X_PIN(52),
+ LAN966X_PIN(53),
+ LAN966X_PIN(54),
+ LAN966X_PIN(55),
+ LAN966X_PIN(56),
+ LAN966X_PIN(57),
+ LAN966X_PIN(58),
+ LAN966X_PIN(59),
+ LAN966X_PIN(60),
+ LAN966X_PIN(61),
+ LAN966X_PIN(62),
+ LAN966X_PIN(63),
+ LAN966X_PIN(64),
+ LAN966X_PIN(65),
+ LAN966X_PIN(66),
+ LAN966X_PIN(67),
+ LAN966X_PIN(68),
+ LAN966X_PIN(69),
+ LAN966X_PIN(70),
+ LAN966X_PIN(71),
+ LAN966X_PIN(72),
+ LAN966X_PIN(73),
+ LAN966X_PIN(74),
+ LAN966X_PIN(75),
+ LAN966X_PIN(76),
+ LAN966X_PIN(77),
+};
+
static int ocelot_get_functions_count(struct pinctrl_dev *pctldev)
{
return ARRAY_SIZE(ocelot_function_names);
@@ -709,6 +1055,9 @@ static int ocelot_pin_function_idx(struct ocelot_pinctrl *info,
for (i = 0; i < OCELOT_FUNC_PER_PIN; i++) {
if (function == p->functions[i])
return i;
+
+ if (function == p->a_functions[i])
+ return i + OCELOT_FUNC_PER_PIN;
}
return -1;
@@ -744,6 +1093,36 @@ static int ocelot_pinmux_set_mux(struct pinctrl_dev *pctldev,
return 0;
}
+static int lan966x_pinmux_set_mux(struct pinctrl_dev *pctldev,
+ unsigned int selector, unsigned int group)
+{
+ struct ocelot_pinctrl *info = pinctrl_dev_get_drvdata(pctldev);
+ struct ocelot_pin_caps *pin = info->desc->pins[group].drv_data;
+ unsigned int p = pin->pin % 32;
+ int f;
+
+ f = ocelot_pin_function_idx(info, group, selector);
+ if (f < 0)
+ return -EINVAL;
+
+ /*
+ * f is encoded on three bits.
+ * bit 0 of f goes in BIT(pin) of ALT[0], bit 1 of f goes in BIT(pin) of
+ * ALT[1], bit 2 of f goes in BIT(pin) of ALT[2]
+ * This is racy because three registers can't be updated at the same time
+ * but it doesn't matter much for now.
+ * Note: ALT0/ALT1/ALT2 are organized specially for 78 gpio targets
+ */
+ regmap_update_bits(info->map, REG_ALT(0, info, pin->pin),
+ BIT(p), f << p);
+ regmap_update_bits(info->map, REG_ALT(1, info, pin->pin),
+ BIT(p), (f >> 1) << p);
+ regmap_update_bits(info->map, REG_ALT(2, info, pin->pin),
+ BIT(p), (f >> 2) << p);
+
+ return 0;
+}
+
#define REG(r, info, p) ((r) * (info)->stride + (4 * ((p) / 32)))
static int ocelot_gpio_set_direction(struct pinctrl_dev *pctldev,
@@ -774,6 +1153,23 @@ static int ocelot_gpio_request_enable(struct pinctrl_dev *pctldev,
return 0;
}
+static int lan966x_gpio_request_enable(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned int offset)
+{
+ struct ocelot_pinctrl *info = pinctrl_dev_get_drvdata(pctldev);
+ unsigned int p = offset % 32;
+
+ regmap_update_bits(info->map, REG_ALT(0, info, offset),
+ BIT(p), 0);
+ regmap_update_bits(info->map, REG_ALT(1, info, offset),
+ BIT(p), 0);
+ regmap_update_bits(info->map, REG_ALT(2, info, offset),
+ BIT(p), 0);
+
+ return 0;
+}
+
static const struct pinmux_ops ocelot_pmx_ops = {
.get_functions_count = ocelot_get_functions_count,
.get_function_name = ocelot_get_function_name,
@@ -783,6 +1179,15 @@ static const struct pinmux_ops ocelot_pmx_ops = {
.gpio_request_enable = ocelot_gpio_request_enable,
};
+static const struct pinmux_ops lan966x_pmx_ops = {
+ .get_functions_count = ocelot_get_functions_count,
+ .get_function_name = ocelot_get_function_name,
+ .get_function_groups = ocelot_get_function_groups,
+ .set_mux = lan966x_pinmux_set_mux,
+ .gpio_set_direction = ocelot_gpio_set_direction,
+ .gpio_request_enable = lan966x_gpio_request_enable,
+};
+
static int ocelot_pctl_get_groups_count(struct pinctrl_dev *pctldev)
{
struct ocelot_pinctrl *info = pinctrl_dev_get_drvdata(pctldev);
@@ -819,7 +1224,11 @@ static int ocelot_hw_get_value(struct ocelot_pinctrl *info,
int ret = -EOPNOTSUPP;
if (info->pincfg) {
- u32 regcfg = readl(info->pincfg + (pin * sizeof(u32)));
+ u32 regcfg;
+
+ ret = regmap_read(info->pincfg, pin, &regcfg);
+ if (ret)
+ return ret;
ret = 0;
switch (reg) {
@@ -843,6 +1252,24 @@ static int ocelot_hw_get_value(struct ocelot_pinctrl *info,
return ret;
}
+static int ocelot_pincfg_clrsetbits(struct ocelot_pinctrl *info, u32 regaddr,
+ u32 clrbits, u32 setbits)
+{
+ u32 val;
+ int ret;
+
+ ret = regmap_read(info->pincfg, regaddr, &val);
+ if (ret)
+ return ret;
+
+ val &= ~clrbits;
+ val |= setbits;
+
+ ret = regmap_write(info->pincfg, regaddr, val);
+
+ return ret;
+}
+
static int ocelot_hw_set_value(struct ocelot_pinctrl *info,
unsigned int pin,
unsigned int reg,
@@ -851,21 +1278,23 @@ static int ocelot_hw_set_value(struct ocelot_pinctrl *info,
int ret = -EOPNOTSUPP;
if (info->pincfg) {
- void __iomem *regaddr = info->pincfg + (pin * sizeof(u32));
ret = 0;
switch (reg) {
case PINCONF_BIAS:
- ocelot_clrsetbits(regaddr, BIAS_BITS, val);
+ ret = ocelot_pincfg_clrsetbits(info, pin, BIAS_BITS,
+ val);
break;
case PINCONF_SCHMITT:
- ocelot_clrsetbits(regaddr, SCHMITT_BIT, val);
+ ret = ocelot_pincfg_clrsetbits(info, pin, SCHMITT_BIT,
+ val);
break;
case PINCONF_DRIVE_STRENGTH:
if (val <= 3)
- ocelot_clrsetbits(regaddr, DRIVE_BITS, val);
+ ret = ocelot_pincfg_clrsetbits(info, pin,
+ DRIVE_BITS, val);
else
ret = -EINVAL;
break;
@@ -1078,6 +1507,16 @@ static struct pinctrl_desc sparx5_desc = {
.owner = THIS_MODULE,
};
+static struct pinctrl_desc lan966x_desc = {
+ .name = "lan966x-pinctrl",
+ .pins = lan966x_pins,
+ .npins = ARRAY_SIZE(lan966x_pins),
+ .pctlops = &ocelot_pctl_ops,
+ .pmxops = &lan966x_pmx_ops,
+ .confops = &ocelot_confops,
+ .owner = THIS_MODULE,
+};
+
static int ocelot_create_group_func_map(struct device *dev,
struct ocelot_pinctrl *info)
{
@@ -1308,8 +1747,7 @@ static int ocelot_gpiochip_register(struct platform_device *pdev,
gc = &info->gpio_chip;
gc->ngpio = info->desc->npins;
gc->parent = &pdev->dev;
- gc->base = 0;
- gc->of_node = info->dev->of_node;
+ gc->base = -1;
gc->label = "ocelot-gpio";
irq = irq_of_parse_and_map(gc->of_node, 0);
@@ -1337,15 +1775,36 @@ static const struct of_device_id ocelot_pinctrl_of_match[] = {
{ .compatible = "mscc,ocelot-pinctrl", .data = &ocelot_desc },
{ .compatible = "mscc,jaguar2-pinctrl", .data = &jaguar2_desc },
{ .compatible = "microchip,sparx5-pinctrl", .data = &sparx5_desc },
+ { .compatible = "microchip,lan966x-pinctrl", .data = &lan966x_desc },
{},
};
+static struct regmap *ocelot_pinctrl_create_pincfg(struct platform_device *pdev)
+{
+ void __iomem *base;
+
+ const struct regmap_config regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = 32,
+ };
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base)) {
+ dev_dbg(&pdev->dev, "Failed to ioremap config registers (no extended pinconf)\n");
+ return NULL;
+ }
+
+ return devm_regmap_init_mmio(&pdev->dev, base, &regmap_config);
+}
+
static int ocelot_pinctrl_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct ocelot_pinctrl *info;
+ struct regmap *pincfg;
void __iomem *base;
- struct resource *res;
int ret;
struct regmap_config regmap_config = {
.reg_bits = 32,
@@ -1378,12 +1837,11 @@ static int ocelot_pinctrl_probe(struct platform_device *pdev)
/* Pinconf registers */
if (info->desc->confops) {
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- base = devm_ioremap_resource(dev, res);
- if (IS_ERR(base))
- dev_dbg(dev, "Failed to ioremap config registers (no extended pinconf)\n");
+ pincfg = ocelot_pinctrl_create_pincfg(pdev);
+ if (IS_ERR(pincfg))
+ dev_dbg(dev, "Failed to create pincfg regmap\n");
else
- info->pincfg = base;
+ info->pincfg = pincfg;
}
ret = ocelot_pinctrl_register(pdev, info);
diff --git a/drivers/pinctrl/pinctrl-oxnas.c b/drivers/pinctrl/pinctrl-oxnas.c
index cebd810bd6d1..fb10a8473ebe 100644
--- a/drivers/pinctrl/pinctrl-oxnas.c
+++ b/drivers/pinctrl/pinctrl-oxnas.c
@@ -1232,7 +1232,6 @@ static int oxnas_gpio_probe(struct platform_device *pdev)
bank->id = id;
bank->gpio_chip.parent = &pdev->dev;
- bank->gpio_chip.of_node = np;
bank->gpio_chip.ngpio = ngpios;
girq = &bank->gpio_chip.irq;
girq->chip = &bank->irq_chip;
diff --git a/drivers/pinctrl/pinctrl-pic32.c b/drivers/pinctrl/pinctrl-pic32.c
index 748dabd8db6e..37acfdfc2cae 100644
--- a/drivers/pinctrl/pinctrl-pic32.c
+++ b/drivers/pinctrl/pinctrl-pic32.c
@@ -2241,7 +2241,7 @@ static int pic32_gpio_probe(struct platform_device *pdev)
}
bank->gpio_chip.parent = &pdev->dev;
- bank->gpio_chip.of_node = np;
+
girq = &bank->gpio_chip.irq;
girq->chip = &bank->irq_chip;
girq->parent_handler = pic32_gpio_irq_handler;
diff --git a/drivers/pinctrl/pinctrl-rk805.c b/drivers/pinctrl/pinctrl-rk805.c
index c6f4229eb106..7c1f7408fb9a 100644
--- a/drivers/pinctrl/pinctrl-rk805.c
+++ b/drivers/pinctrl/pinctrl-rk805.c
@@ -13,17 +13,17 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mfd/rk808.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/property.h>
+#include <linux/slab.h>
+
#include <linux/pinctrl/consumer.h>
#include <linux/pinctrl/machine.h>
#include <linux/pinctrl/pinctrl.h>
#include <linux/pinctrl/pinconf-generic.h>
#include <linux/pinctrl/pinconf.h>
#include <linux/pinctrl/pinmux.h>
-#include <linux/pm.h>
-#include <linux/slab.h>
#include "core.h"
#include "pinconf.h"
@@ -420,18 +420,18 @@ static int rk805_pinctrl_probe(struct platform_device *pdev)
struct rk805_pctrl_info *pci;
int ret;
+ device_set_node(&pdev->dev, dev_fwnode(pdev->dev.parent));
+
pci = devm_kzalloc(&pdev->dev, sizeof(*pci), GFP_KERNEL);
if (!pci)
return -ENOMEM;
pci->dev = &pdev->dev;
- pci->dev->of_node = pdev->dev.parent->of_node;
pci->rk808 = dev_get_drvdata(pdev->dev.parent);
pci->pinctrl_desc = rk805_pinctrl_desc;
pci->gpio_chip = rk805_gpio_chip;
pci->gpio_chip.parent = &pdev->dev;
- pci->gpio_chip.of_node = pdev->dev.parent->of_node;
platform_set_drvdata(pdev, pci);
diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
index 5ce260f152ce..d8dd8415fa81 100644
--- a/drivers/pinctrl/pinctrl-rockchip.c
+++ b/drivers/pinctrl/pinctrl-rockchip.c
@@ -33,13 +33,15 @@
#include <linux/clk.h>
#include <linux/regmap.h>
#include <linux/mfd/syscon.h>
+#include <linux/string_helpers.h>
+
#include <dt-bindings/pinctrl/rockchip.h>
#include "core.h"
#include "pinconf.h"
#include "pinctrl-rockchip.h"
-/**
+/*
* Generate a bitmask for setting a value (v) with a write mask bit in hiword
* register 31:16 area.
*/
@@ -285,6 +287,7 @@ static int rockchip_dt_node_to_map(struct pinctrl_dev *pctldev,
{
struct rockchip_pinctrl *info = pinctrl_dev_get_drvdata(pctldev);
const struct rockchip_pin_group *grp;
+ struct device *dev = info->dev;
struct pinctrl_map *new_map;
struct device_node *parent;
int map_num = 1;
@@ -296,8 +299,7 @@ static int rockchip_dt_node_to_map(struct pinctrl_dev *pctldev,
*/
grp = pinctrl_name_to_group(info, np->name);
if (!grp) {
- dev_err(info->dev, "unable to find group for node %pOFn\n",
- np);
+ dev_err(dev, "unable to find group for node %pOFn\n", np);
return -EINVAL;
}
@@ -331,7 +333,7 @@ static int rockchip_dt_node_to_map(struct pinctrl_dev *pctldev,
new_map[i].data.configs.num_configs = grp->data[i].nconfigs;
}
- dev_dbg(pctldev->dev, "maps: function %s group %s num %d\n",
+ dev_dbg(dev, "maps: function %s group %s num %d\n",
(*map)->data.mux.function, (*map)->data.mux.group, map_num);
return 0;
@@ -872,20 +874,20 @@ static int rockchip_verify_mux(struct rockchip_pin_bank *bank,
int pin, int mux)
{
struct rockchip_pinctrl *info = bank->drvdata;
+ struct device *dev = info->dev;
int iomux_num = (pin / 8);
if (iomux_num > 3)
return -EINVAL;
if (bank->iomux[iomux_num].type & IOMUX_UNROUTED) {
- dev_err(info->dev, "pin %d is unrouted\n", pin);
+ dev_err(dev, "pin %d is unrouted\n", pin);
return -EINVAL;
}
if (bank->iomux[iomux_num].type & IOMUX_GPIO_ONLY) {
if (mux != RK_FUNC_GPIO) {
- dev_err(info->dev,
- "pin %d only supports a gpio mux\n", pin);
+ dev_err(dev, "pin %d only supports a gpio mux\n", pin);
return -ENOTSUPP;
}
}
@@ -909,6 +911,7 @@ static int rockchip_verify_mux(struct rockchip_pin_bank *bank,
static int rockchip_set_mux(struct rockchip_pin_bank *bank, int pin, int mux)
{
struct rockchip_pinctrl *info = bank->drvdata;
+ struct device *dev = info->dev;
int iomux_num = (pin / 8);
struct regmap *regmap;
int reg, ret, mask, mux_type;
@@ -922,8 +925,7 @@ static int rockchip_set_mux(struct rockchip_pin_bank *bank, int pin, int mux)
if (bank->iomux[iomux_num].type & IOMUX_GPIO_ONLY)
return 0;
- dev_dbg(info->dev, "setting mux of GPIO%d-%d to %d\n",
- bank->bank_num, pin, mux);
+ dev_dbg(dev, "setting mux of GPIO%d-%d to %d\n", bank->bank_num, pin, mux);
regmap = (bank->iomux[iomux_num].type & IOMUX_SOURCE_PMU)
? info->regmap_pmu : info->regmap_base;
@@ -1575,6 +1577,7 @@ static int rockchip_get_drive_perpin(struct rockchip_pin_bank *bank,
{
struct rockchip_pinctrl *info = bank->drvdata;
struct rockchip_pin_ctrl *ctrl = info->ctrl;
+ struct device *dev = info->dev;
struct regmap *regmap;
int reg, ret;
u32 data, temp, rmask_bits;
@@ -1620,7 +1623,7 @@ static int rockchip_get_drive_perpin(struct rockchip_pin_bank *bank,
bit -= 16;
break;
default:
- dev_err(info->dev, "unsupported bit: %d for pinctrl drive type: %d\n",
+ dev_err(dev, "unsupported bit: %d for pinctrl drive type: %d\n",
bit, drv_type);
return -EINVAL;
}
@@ -1632,8 +1635,7 @@ static int rockchip_get_drive_perpin(struct rockchip_pin_bank *bank,
rmask_bits = RK3288_DRV_BITS_PER_PIN;
break;
default:
- dev_err(info->dev, "unsupported pinctrl drive type: %d\n",
- drv_type);
+ dev_err(dev, "unsupported pinctrl drive type: %d\n", drv_type);
return -EINVAL;
}
@@ -1652,13 +1654,14 @@ static int rockchip_set_drive_perpin(struct rockchip_pin_bank *bank,
{
struct rockchip_pinctrl *info = bank->drvdata;
struct rockchip_pin_ctrl *ctrl = info->ctrl;
+ struct device *dev = info->dev;
struct regmap *regmap;
int reg, ret, i;
u32 data, rmask, rmask_bits, temp;
u8 bit;
int drv_type = bank->drv[pin_num / 8].drv_type;
- dev_dbg(info->dev, "setting drive of GPIO%d-%d to %d\n",
+ dev_dbg(dev, "setting drive of GPIO%d-%d to %d\n",
bank->bank_num, pin_num, strength);
ctrl->drv_calc_reg(bank, pin_num, &regmap, &reg, &bit);
@@ -1680,8 +1683,7 @@ static int rockchip_set_drive_perpin(struct rockchip_pin_bank *bank,
}
if (ret < 0) {
- dev_err(info->dev, "unsupported driver strength %d\n",
- strength);
+ dev_err(dev, "unsupported driver strength %d\n", strength);
return ret;
}
@@ -1720,7 +1722,7 @@ static int rockchip_set_drive_perpin(struct rockchip_pin_bank *bank,
bit -= 16;
break;
default:
- dev_err(info->dev, "unsupported bit: %d for pinctrl drive type: %d\n",
+ dev_err(dev, "unsupported bit: %d for pinctrl drive type: %d\n",
bit, drv_type);
return -EINVAL;
}
@@ -1731,8 +1733,7 @@ static int rockchip_set_drive_perpin(struct rockchip_pin_bank *bank,
rmask_bits = RK3288_DRV_BITS_PER_PIN;
break;
default:
- dev_err(info->dev, "unsupported pinctrl drive type: %d\n",
- drv_type);
+ dev_err(dev, "unsupported pinctrl drive type: %d\n", drv_type);
return -EINVAL;
}
@@ -1766,6 +1767,7 @@ static int rockchip_get_pull(struct rockchip_pin_bank *bank, int pin_num)
{
struct rockchip_pinctrl *info = bank->drvdata;
struct rockchip_pin_ctrl *ctrl = info->ctrl;
+ struct device *dev = info->dev;
struct regmap *regmap;
int reg, ret, pull_type;
u8 bit;
@@ -1800,7 +1802,7 @@ static int rockchip_get_pull(struct rockchip_pin_bank *bank, int pin_num)
return rockchip_pull_list[pull_type][data];
default:
- dev_err(info->dev, "unsupported pinctrl type\n");
+ dev_err(dev, "unsupported pinctrl type\n");
return -EINVAL;
};
}
@@ -1810,13 +1812,13 @@ static int rockchip_set_pull(struct rockchip_pin_bank *bank,
{
struct rockchip_pinctrl *info = bank->drvdata;
struct rockchip_pin_ctrl *ctrl = info->ctrl;
+ struct device *dev = info->dev;
struct regmap *regmap;
int reg, ret, i, pull_type;
u8 bit;
u32 data, rmask;
- dev_dbg(info->dev, "setting pull of GPIO%d-%d to %d\n",
- bank->bank_num, pin_num, pull);
+ dev_dbg(dev, "setting pull of GPIO%d-%d to %d\n", bank->bank_num, pin_num, pull);
/* rk3066b does support any pulls */
if (ctrl->type == RK3066B)
@@ -1859,8 +1861,7 @@ static int rockchip_set_pull(struct rockchip_pin_bank *bank,
}
if (ret < 0) {
- dev_err(info->dev, "unsupported pull setting %d\n",
- pull);
+ dev_err(dev, "unsupported pull setting %d\n", pull);
return ret;
}
@@ -1872,7 +1873,7 @@ static int rockchip_set_pull(struct rockchip_pin_bank *bank,
ret = regmap_update_bits(regmap, reg, rmask, data);
break;
default:
- dev_err(info->dev, "unsupported pinctrl type\n");
+ dev_err(dev, "unsupported pinctrl type\n");
return -EINVAL;
}
@@ -1963,12 +1964,13 @@ static int rockchip_set_schmitt(struct rockchip_pin_bank *bank,
{
struct rockchip_pinctrl *info = bank->drvdata;
struct rockchip_pin_ctrl *ctrl = info->ctrl;
+ struct device *dev = info->dev;
struct regmap *regmap;
int reg, ret;
u8 bit;
u32 data, rmask;
- dev_dbg(info->dev, "setting input schmitt of GPIO%d-%d to %d\n",
+ dev_dbg(dev, "setting input schmitt of GPIO%d-%d to %d\n",
bank->bank_num, pin_num, enable);
ret = ctrl->schmitt_calc_reg(bank, pin_num, &regmap, &reg, &bit);
@@ -2028,10 +2030,11 @@ static int rockchip_pmx_set(struct pinctrl_dev *pctldev, unsigned selector,
struct rockchip_pinctrl *info = pinctrl_dev_get_drvdata(pctldev);
const unsigned int *pins = info->groups[group].pins;
const struct rockchip_pin_config *data = info->groups[group].data;
+ struct device *dev = info->dev;
struct rockchip_pin_bank *bank;
int cnt, ret = 0;
- dev_dbg(info->dev, "enable function %s group %s\n",
+ dev_dbg(dev, "enable function %s group %s\n",
info->functions[selector].name, info->groups[group].name);
/*
@@ -2310,6 +2313,7 @@ static int rockchip_pinctrl_parse_groups(struct device_node *np,
struct rockchip_pinctrl *info,
u32 index)
{
+ struct device *dev = info->dev;
struct rockchip_pin_bank *bank;
int size;
const __be32 *list;
@@ -2317,7 +2321,7 @@ static int rockchip_pinctrl_parse_groups(struct device_node *np,
int i, j;
int ret;
- dev_dbg(info->dev, "group(%d): %pOFn\n", index, np);
+ dev_dbg(dev, "group(%d): %pOFn\n", index, np);
/* Initialise group */
grp->name = np->name;
@@ -2329,19 +2333,13 @@ static int rockchip_pinctrl_parse_groups(struct device_node *np,
list = of_get_property(np, "rockchip,pins", &size);
/* we do not check return since it's safe node passed down */
size /= sizeof(*list);
- if (!size || size % 4) {
- dev_err(info->dev, "wrong pins number or pins and configs should be by 4\n");
- return -EINVAL;
- }
+ if (!size || size % 4)
+ return dev_err_probe(dev, -EINVAL, "wrong pins number or pins and configs should be by 4\n");
grp->npins = size / 4;
- grp->pins = devm_kcalloc(info->dev, grp->npins, sizeof(unsigned int),
- GFP_KERNEL);
- grp->data = devm_kcalloc(info->dev,
- grp->npins,
- sizeof(struct rockchip_pin_config),
- GFP_KERNEL);
+ grp->pins = devm_kcalloc(dev, grp->npins, sizeof(*grp->pins), GFP_KERNEL);
+ grp->data = devm_kcalloc(dev, grp->npins, sizeof(*grp->data), GFP_KERNEL);
if (!grp->pins || !grp->data)
return -ENOMEM;
@@ -2375,6 +2373,7 @@ static int rockchip_pinctrl_parse_functions(struct device_node *np,
struct rockchip_pinctrl *info,
u32 index)
{
+ struct device *dev = info->dev;
struct device_node *child;
struct rockchip_pmx_func *func;
struct rockchip_pin_group *grp;
@@ -2382,7 +2381,7 @@ static int rockchip_pinctrl_parse_functions(struct device_node *np,
static u32 grp_index;
u32 i = 0;
- dev_dbg(info->dev, "parse function(%d): %pOFn\n", index, np);
+ dev_dbg(dev, "parse function(%d): %pOFn\n", index, np);
func = &info->functions[index];
@@ -2392,8 +2391,7 @@ static int rockchip_pinctrl_parse_functions(struct device_node *np,
if (func->ngroups <= 0)
return 0;
- func->groups = devm_kcalloc(info->dev,
- func->ngroups, sizeof(char *), GFP_KERNEL);
+ func->groups = devm_kcalloc(dev, func->ngroups, sizeof(*func->groups), GFP_KERNEL);
if (!func->groups)
return -ENOMEM;
@@ -2421,20 +2419,14 @@ static int rockchip_pinctrl_parse_dt(struct platform_device *pdev,
rockchip_pinctrl_child_count(info, np);
- dev_dbg(&pdev->dev, "nfunctions = %d\n", info->nfunctions);
- dev_dbg(&pdev->dev, "ngroups = %d\n", info->ngroups);
+ dev_dbg(dev, "nfunctions = %d\n", info->nfunctions);
+ dev_dbg(dev, "ngroups = %d\n", info->ngroups);
- info->functions = devm_kcalloc(dev,
- info->nfunctions,
- sizeof(struct rockchip_pmx_func),
- GFP_KERNEL);
+ info->functions = devm_kcalloc(dev, info->nfunctions, sizeof(*info->functions), GFP_KERNEL);
if (!info->functions)
return -ENOMEM;
- info->groups = devm_kcalloc(dev,
- info->ngroups,
- sizeof(struct rockchip_pin_group),
- GFP_KERNEL);
+ info->groups = devm_kcalloc(dev, info->ngroups, sizeof(*info->groups), GFP_KERNEL);
if (!info->groups)
return -ENOMEM;
@@ -2446,7 +2438,7 @@ static int rockchip_pinctrl_parse_dt(struct platform_device *pdev,
ret = rockchip_pinctrl_parse_functions(child, info, i++);
if (ret) {
- dev_err(&pdev->dev, "failed to parse function\n");
+ dev_err(dev, "failed to parse function\n");
of_node_put(child);
return ret;
}
@@ -2461,6 +2453,8 @@ static int rockchip_pinctrl_register(struct platform_device *pdev,
struct pinctrl_desc *ctrldesc = &info->pctl;
struct pinctrl_pin_desc *pindesc, *pdesc;
struct rockchip_pin_bank *pin_bank;
+ struct device *dev = &pdev->dev;
+ char **pin_names;
int pin, bank, ret;
int k;
@@ -2470,9 +2464,7 @@ static int rockchip_pinctrl_register(struct platform_device *pdev,
ctrldesc->pmxops = &rockchip_pmx_ops;
ctrldesc->confops = &rockchip_pinconf_ops;
- pindesc = devm_kcalloc(&pdev->dev,
- info->ctrl->nr_pins, sizeof(*pindesc),
- GFP_KERNEL);
+ pindesc = devm_kcalloc(dev, info->ctrl->nr_pins, sizeof(*pindesc), GFP_KERNEL);
if (!pindesc)
return -ENOMEM;
@@ -2482,10 +2474,14 @@ static int rockchip_pinctrl_register(struct platform_device *pdev,
pdesc = pindesc;
for (bank = 0, k = 0; bank < info->ctrl->nr_banks; bank++) {
pin_bank = &info->ctrl->pin_banks[bank];
+
+ pin_names = devm_kasprintf_strarray(dev, pin_bank->name, pin_bank->nr_pins);
+ if (IS_ERR(pin_names))
+ return PTR_ERR(pin_names);
+
for (pin = 0; pin < pin_bank->nr_pins; pin++, k++) {
pdesc->number = k;
- pdesc->name = kasprintf(GFP_KERNEL, "%s-%d",
- pin_bank->name, pin);
+ pdesc->name = pin_names[pin];
pdesc++;
}
@@ -2497,11 +2493,9 @@ static int rockchip_pinctrl_register(struct platform_device *pdev,
if (ret)
return ret;
- info->pctl_dev = devm_pinctrl_register(&pdev->dev, ctrldesc, info);
- if (IS_ERR(info->pctl_dev)) {
- dev_err(&pdev->dev, "could not register pinctrl driver\n");
- return PTR_ERR(info->pctl_dev);
- }
+ info->pctl_dev = devm_pinctrl_register(dev, ctrldesc, info);
+ if (IS_ERR(info->pctl_dev))
+ return dev_err_probe(dev, PTR_ERR(info->pctl_dev), "could not register pinctrl driver\n");
return 0;
}
@@ -2513,8 +2507,9 @@ static struct rockchip_pin_ctrl *rockchip_pinctrl_get_soc_data(
struct rockchip_pinctrl *d,
struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
const struct of_device_id *match;
- struct device_node *node = pdev->dev.of_node;
struct rockchip_pin_ctrl *ctrl;
struct rockchip_pin_bank *bank;
int grf_offs, pmu_offs, drv_grf_offs, drv_pmu_offs, i, j;
@@ -2566,7 +2561,7 @@ static struct rockchip_pin_ctrl *rockchip_pinctrl_get_soc_data(
drv_pmu_offs : drv_grf_offs;
}
- dev_dbg(d->dev, "bank %d, iomux %d has iom_offset 0x%x drv_offset 0x%x\n",
+ dev_dbg(dev, "bank %d, iomux %d has iom_offset 0x%x drv_offset 0x%x\n",
i, j, iom->offset, drv->offset);
/*
@@ -2675,16 +2670,14 @@ static int rockchip_pinctrl_probe(struct platform_device *pdev)
{
struct rockchip_pinctrl *info;
struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node, *node;
struct rockchip_pin_ctrl *ctrl;
- struct device_node *np = pdev->dev.of_node, *node;
struct resource *res;
void __iomem *base;
int ret;
- if (!dev->of_node) {
- dev_err(dev, "device tree node not found\n");
- return -ENODEV;
- }
+ if (!dev->of_node)
+ return dev_err_probe(dev, -ENODEV, "device tree node not found\n");
info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
if (!info)
@@ -2693,10 +2686,8 @@ static int rockchip_pinctrl_probe(struct platform_device *pdev)
info->dev = dev;
ctrl = rockchip_pinctrl_get_soc_data(info, pdev);
- if (!ctrl) {
- dev_err(dev, "driver data not available\n");
- return -EINVAL;
- }
+ if (!ctrl)
+ return dev_err_probe(dev, -EINVAL, "driver data not available\n");
info->ctrl = ctrl;
node = of_parse_phandle(np, "rockchip,grf", 0);
@@ -2705,32 +2696,28 @@ static int rockchip_pinctrl_probe(struct platform_device *pdev)
if (IS_ERR(info->regmap_base))
return PTR_ERR(info->regmap_base);
} else {
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(base))
return PTR_ERR(base);
rockchip_regmap_config.max_register = resource_size(res) - 4;
rockchip_regmap_config.name = "rockchip,pinctrl";
- info->regmap_base = devm_regmap_init_mmio(&pdev->dev, base,
- &rockchip_regmap_config);
+ info->regmap_base =
+ devm_regmap_init_mmio(dev, base, &rockchip_regmap_config);
/* to check for the old dt-bindings */
info->reg_size = resource_size(res);
/* Honor the old binding, with pull registers as 2nd resource */
if (ctrl->type == RK3188 && info->reg_size < 0x200) {
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_get_and_ioremap_resource(pdev, 1, &res);
if (IS_ERR(base))
return PTR_ERR(base);
- rockchip_regmap_config.max_register =
- resource_size(res) - 4;
+ rockchip_regmap_config.max_register = resource_size(res) - 4;
rockchip_regmap_config.name = "rockchip,pinctrl-pull";
- info->regmap_pull = devm_regmap_init_mmio(&pdev->dev,
- base,
- &rockchip_regmap_config);
+ info->regmap_pull =
+ devm_regmap_init_mmio(dev, base, &rockchip_regmap_config);
}
}
@@ -2748,11 +2735,9 @@ static int rockchip_pinctrl_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, info);
- ret = of_platform_populate(np, rockchip_bank_match, NULL, NULL);
- if (ret) {
- dev_err(&pdev->dev, "failed to register gpio device\n");
- return ret;
- }
+ ret = of_platform_populate(np, NULL, NULL, &pdev->dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to register gpio device\n");
return 0;
}
diff --git a/drivers/pinctrl/pinctrl-st.c b/drivers/pinctrl/pinctrl-st.c
index 1482a01dfec7..0fea71fd9a00 100644
--- a/drivers/pinctrl/pinctrl-st.c
+++ b/drivers/pinctrl/pinctrl-st.c
@@ -55,7 +55,7 @@
#define ST_GPIO_DIRECTION_OUT 0x2
#define ST_GPIO_DIRECTION_IN 0x4
-/**
+/*
* Packed style retime configuration.
* There are two registers cfg0 and cfg1 in this style for each bank.
* Each field in this register is 8 bit corresponding to 8 pins in the bank.
@@ -69,7 +69,7 @@
#define RT_P_CFG1_CLKNOTDATA_FIELD(reg) REG_FIELD(reg, 16, 23)
#define RT_P_CFG1_DOUBLE_EDGE_FIELD(reg) REG_FIELD(reg, 24, 31)
-/**
+/*
* Dedicated style retime Configuration register
* each register is dedicated per pin.
*/
@@ -814,26 +814,25 @@ static int st_pctl_dt_node_to_map(struct pinctrl_dev *pctldev,
{
struct st_pinctrl *info = pinctrl_dev_get_drvdata(pctldev);
const struct st_pctl_group *grp;
+ struct device *dev = info->dev;
struct pinctrl_map *new_map;
struct device_node *parent;
int map_num, i;
grp = st_pctl_find_group_by_name(info, np->name);
if (!grp) {
- dev_err(info->dev, "unable to find group for node %pOFn\n",
- np);
+ dev_err(dev, "unable to find group for node %pOFn\n", np);
return -EINVAL;
}
map_num = grp->npins + 1;
- new_map = devm_kcalloc(pctldev->dev,
- map_num, sizeof(*new_map), GFP_KERNEL);
+ new_map = devm_kcalloc(dev, map_num, sizeof(*new_map), GFP_KERNEL);
if (!new_map)
return -ENOMEM;
parent = of_get_parent(np);
if (!parent) {
- devm_kfree(pctldev->dev, new_map);
+ devm_kfree(dev, new_map);
return -EINVAL;
}
@@ -853,7 +852,7 @@ static int st_pctl_dt_node_to_map(struct pinctrl_dev *pctldev,
new_map[i].data.configs.configs = &grp->pin_conf[i].config;
new_map[i].data.configs.num_configs = 1;
}
- dev_info(pctldev->dev, "maps: function %s group %s num %d\n",
+ dev_info(dev, "maps: function %s group %s num %d\n",
(*map)->data.mux.function, grp->name, map_num);
return 0;
@@ -1173,6 +1172,7 @@ static int st_pctl_dt_parse_groups(struct device_node *np,
/* bank pad direction val altfunction */
const __be32 *list;
struct property *pp;
+ struct device *dev = info->dev;
struct st_pinconf *conf;
struct device_node *pins;
int i = 0, npins = 0, nr_props, ret = 0;
@@ -1197,9 +1197,8 @@ static int st_pctl_dt_parse_groups(struct device_node *np,
grp->npins = npins;
grp->name = np->name;
- grp->pins = devm_kcalloc(info->dev, npins, sizeof(u32), GFP_KERNEL);
- grp->pin_conf = devm_kcalloc(info->dev,
- npins, sizeof(*conf), GFP_KERNEL);
+ grp->pins = devm_kcalloc(dev, npins, sizeof(*grp->pins), GFP_KERNEL);
+ grp->pin_conf = devm_kcalloc(dev, npins, sizeof(*grp->pin_conf), GFP_KERNEL);
if (!grp->pins || !grp->pin_conf) {
ret = -ENOMEM;
@@ -1247,6 +1246,7 @@ out_put_node:
static int st_pctl_parse_functions(struct device_node *np,
struct st_pinctrl *info, u32 index, int *grp_index)
{
+ struct device *dev = info->dev;
struct device_node *child;
struct st_pmx_func *func;
struct st_pctl_group *grp;
@@ -1255,12 +1255,9 @@ static int st_pctl_parse_functions(struct device_node *np,
func = &info->functions[index];
func->name = np->name;
func->ngroups = of_get_child_count(np);
- if (func->ngroups == 0) {
- dev_err(info->dev, "No groups defined\n");
- return -EINVAL;
- }
- func->groups = devm_kcalloc(info->dev,
- func->ngroups, sizeof(char *), GFP_KERNEL);
+ if (func->ngroups == 0)
+ return dev_err_probe(dev, -EINVAL, "No groups defined\n");
+ func->groups = devm_kcalloc(dev, func->ngroups, sizeof(*func->groups), GFP_KERNEL);
if (!func->groups)
return -ENOMEM;
@@ -1275,8 +1272,7 @@ static int st_pctl_parse_functions(struct device_node *np,
return ret;
}
}
- dev_info(info->dev, "Function[%d\t name:%s,\tgroups:%d]\n",
- index, func->name, func->ngroups);
+ dev_info(dev, "Function[%d\t name:%s,\tgroups:%d]\n", index, func->name, func->ngroups);
return 0;
}
@@ -1557,10 +1553,8 @@ static int st_gpiolib_register_bank(struct st_pinctrl *info,
skip_irq:
err = gpiochip_add_data(&bank->gpio_chip, bank);
- if (err) {
- dev_err(dev, "Failed to add gpiochip(%d)!\n", bank_num);
- return err;
- }
+ if (err)
+ return dev_err_probe(dev, err, "Failed to add gpiochip(%d)!\n", bank_num);
dev_info(dev, "%s bank added.\n", range->name);
return 0;
@@ -1577,63 +1571,50 @@ static const struct of_device_id st_pctl_of_match[] = {
static int st_pctl_probe_dt(struct platform_device *pdev,
struct pinctrl_desc *pctl_desc, struct st_pinctrl *info)
{
+ struct device *dev = &pdev->dev;
int ret = 0;
int i = 0, j = 0, k = 0, bank;
struct pinctrl_pin_desc *pdesc;
- struct device_node *np = pdev->dev.of_node;
+ struct device_node *np = dev->of_node;
struct device_node *child;
int grp_index = 0;
int irq = 0;
- struct resource *res;
st_pctl_dt_child_count(info, np);
- if (!info->nbanks) {
- dev_err(&pdev->dev, "you need at least one gpio bank\n");
- return -EINVAL;
- }
+ if (!info->nbanks)
+ return dev_err_probe(dev, -EINVAL, "you need at least one gpio bank\n");
- dev_info(&pdev->dev, "nbanks = %d\n", info->nbanks);
- dev_info(&pdev->dev, "nfunctions = %d\n", info->nfunctions);
- dev_info(&pdev->dev, "ngroups = %d\n", info->ngroups);
+ dev_info(dev, "nbanks = %d\n", info->nbanks);
+ dev_info(dev, "nfunctions = %d\n", info->nfunctions);
+ dev_info(dev, "ngroups = %d\n", info->ngroups);
- info->functions = devm_kcalloc(&pdev->dev,
- info->nfunctions, sizeof(*info->functions), GFP_KERNEL);
+ info->functions = devm_kcalloc(dev, info->nfunctions, sizeof(*info->functions), GFP_KERNEL);
- info->groups = devm_kcalloc(&pdev->dev,
- info->ngroups, sizeof(*info->groups),
- GFP_KERNEL);
+ info->groups = devm_kcalloc(dev, info->ngroups, sizeof(*info->groups), GFP_KERNEL);
- info->banks = devm_kcalloc(&pdev->dev,
- info->nbanks, sizeof(*info->banks), GFP_KERNEL);
+ info->banks = devm_kcalloc(dev, info->nbanks, sizeof(*info->banks), GFP_KERNEL);
if (!info->functions || !info->groups || !info->banks)
return -ENOMEM;
info->regmap = syscon_regmap_lookup_by_phandle(np, "st,syscfg");
- if (IS_ERR(info->regmap)) {
- dev_err(info->dev, "No syscfg phandle specified\n");
- return PTR_ERR(info->regmap);
- }
+ if (IS_ERR(info->regmap))
+ return dev_err_probe(dev, PTR_ERR(info->regmap), "No syscfg phandle specified\n");
info->data = of_match_node(st_pctl_of_match, np)->data;
irq = platform_get_irq(pdev, 0);
if (irq > 0) {
- res = platform_get_resource_byname(pdev,
- IORESOURCE_MEM, "irqmux");
- info->irqmux_base = devm_ioremap_resource(&pdev->dev, res);
-
+ info->irqmux_base = devm_platform_ioremap_resource_byname(pdev, "irqmux");
if (IS_ERR(info->irqmux_base))
return PTR_ERR(info->irqmux_base);
irq_set_chained_handler_and_data(irq, st_gpio_irqmux_handler,
info);
-
}
pctl_desc->npins = info->nbanks * ST_GPIO_PINS_PER_BANK;
- pdesc = devm_kcalloc(&pdev->dev,
- pctl_desc->npins, sizeof(*pdesc), GFP_KERNEL);
+ pdesc = devm_kcalloc(dev, pctl_desc->npins, sizeof(*pdesc), GFP_KERNEL);
if (!pdesc)
return -ENOMEM;
@@ -1643,6 +1624,8 @@ static int st_pctl_probe_dt(struct platform_device *pdev,
for_each_child_of_node(np, child) {
if (of_property_read_bool(child, "gpio-controller")) {
const char *bank_name = NULL;
+ char **pin_names;
+
ret = st_gpiolib_register_bank(info, bank, child);
if (ret) {
of_node_put(child);
@@ -1651,10 +1634,16 @@ static int st_pctl_probe_dt(struct platform_device *pdev,
k = info->banks[bank].range.pin_base;
bank_name = info->banks[bank].range.name;
+
+ pin_names = devm_kasprintf_strarray(dev, bank_name, ST_GPIO_PINS_PER_BANK);
+ if (IS_ERR(pin_names)) {
+ of_node_put(child);
+ return PTR_ERR(pin_names);
+ }
+
for (j = 0; j < ST_GPIO_PINS_PER_BANK; j++, k++) {
pdesc->number = k;
- pdesc->name = kasprintf(GFP_KERNEL, "%s[%d]",
- bank_name, j);
+ pdesc->name = pin_names[j];
pdesc++;
}
st_parse_syscfgs(info, bank, child);
@@ -1663,7 +1652,7 @@ static int st_pctl_probe_dt(struct platform_device *pdev,
ret = st_pctl_parse_functions(child, info,
i++, &grp_index);
if (ret) {
- dev_err(&pdev->dev, "No functions found.\n");
+ dev_err(dev, "No functions found.\n");
of_node_put(child);
return ret;
}
@@ -1675,24 +1664,25 @@ static int st_pctl_probe_dt(struct platform_device *pdev,
static int st_pctl_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct st_pinctrl *info;
struct pinctrl_desc *pctl_desc;
int ret, i;
- if (!pdev->dev.of_node) {
- dev_err(&pdev->dev, "device node not found.\n");
+ if (!dev->of_node) {
+ dev_err(dev, "device node not found.\n");
return -EINVAL;
}
- pctl_desc = devm_kzalloc(&pdev->dev, sizeof(*pctl_desc), GFP_KERNEL);
+ pctl_desc = devm_kzalloc(dev, sizeof(*pctl_desc), GFP_KERNEL);
if (!pctl_desc)
return -ENOMEM;
- info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
+ info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
- info->dev = &pdev->dev;
+ info->dev = dev;
platform_set_drvdata(pdev, info);
ret = st_pctl_probe_dt(pdev, pctl_desc, info);
if (ret)
@@ -1702,13 +1692,11 @@ static int st_pctl_probe(struct platform_device *pdev)
pctl_desc->pctlops = &st_pctlops;
pctl_desc->pmxops = &st_pmxops;
pctl_desc->confops = &st_confops;
- pctl_desc->name = dev_name(&pdev->dev);
+ pctl_desc->name = dev_name(dev);
- info->pctl = devm_pinctrl_register(&pdev->dev, pctl_desc, info);
- if (IS_ERR(info->pctl)) {
- dev_err(&pdev->dev, "Failed pinctrl registration\n");
- return PTR_ERR(info->pctl);
- }
+ info->pctl = devm_pinctrl_register(dev, pctl_desc, info);
+ if (IS_ERR(info->pctl))
+ return dev_err_probe(dev, PTR_ERR(info->pctl), "Failed pinctrl registration\n");
for (i = 0; i < info->nbanks; i++)
pinctrl_add_gpio_range(info->pctl, &info->banks[i].range);
diff --git a/drivers/pinctrl/pinctrl-stmfx.c b/drivers/pinctrl/pinctrl-stmfx.c
index 5fa2488fae87..ab4dde40d3ed 100644
--- a/drivers/pinctrl/pinctrl-stmfx.c
+++ b/drivers/pinctrl/pinctrl-stmfx.c
@@ -675,7 +675,6 @@ static int stmfx_pinctrl_probe(struct platform_device *pdev)
pctl->gpio_chip.base = -1;
pctl->gpio_chip.ngpio = pctl->pctl_desc.npins;
pctl->gpio_chip.can_sleep = true;
- pctl->gpio_chip.of_node = np;
pctl->irq_chip.name = dev_name(pctl->dev);
pctl->irq_chip.irq_mask = stmfx_pinctrl_irq_mask;
diff --git a/drivers/pinctrl/pinctrl-sx150x.c b/drivers/pinctrl/pinctrl-sx150x.c
index 484a3b9e875c..a87ea3b95cf4 100644
--- a/drivers/pinctrl/pinctrl-sx150x.c
+++ b/drivers/pinctrl/pinctrl-sx150x.c
@@ -1163,9 +1163,6 @@ static int sx150x_probe(struct i2c_client *client,
pctl->gpio.set = sx150x_gpio_set;
pctl->gpio.set_config = gpiochip_generic_config;
pctl->gpio.parent = dev;
-#ifdef CONFIG_OF_GPIO
- pctl->gpio.of_node = dev->of_node;
-#endif
pctl->gpio.can_sleep = true;
pctl->gpio.label = devm_kstrdup(dev, client->name, GFP_KERNEL);
if (!pctl->gpio.label)
diff --git a/drivers/pinctrl/pinctrl-thunderbay.c b/drivers/pinctrl/pinctrl-thunderbay.c
new file mode 100644
index 000000000000..79d44bca039e
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-thunderbay.c
@@ -0,0 +1,1302 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel Thunder Bay SOC pinctrl/GPIO driver
+ *
+ * Copyright (C) 2021 Intel Corporation
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/gpio/driver.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include "core.h"
+#include "pinconf.h"
+#include "pinctrl-utils.h"
+#include "pinmux.h"
+
+/* Bit 0:2 and 4:6 should be used for mode selection */
+#define THB_GPIO_PINMUX_MODE_0 0x00
+#define THB_GPIO_PINMUX_MODE_1 0x11
+#define THB_GPIO_PINMUX_MODE_2 0x22
+#define THB_GPIO_PINMUX_MODE_3 0x33
+#define THB_GPIO_PINMUX_MODE_4 0x44
+
+#define THB_GPIO_PORT_SELECT_MASK BIT(8)
+#define THB_GPIO_PAD_DIRECTION_MASK BIT(10)
+#define THB_GPIO_SPU_MASK BIT(11)
+#define THB_GPIO_PULL_ENABLE_MASK BIT(12)
+#define THB_GPIO_PULL_UP_MASK BIT(13)
+#define THB_GPIO_PULL_DOWN_MASK BIT(14)
+#define THB_GPIO_ENAQ_MASK BIT(15)
+/* bit 16-19: Drive Strength for the Pad */
+#define THB_GPIO_DRIVE_STRENGTH_MASK (0xF0000)
+#define THB_GPIO_SLEW_RATE_MASK BIT(20)
+#define THB_GPIO_SCHMITT_TRIGGER_MASK BIT(21)
+
+#define THB_GPIO_REG_OFFSET(pin_num) ((pin_num) * (0x4))
+#define THB_MAX_MODE_SUPPORTED (5u)
+#define THB_MAX_NPINS_SUPPORTED (67u)
+
+/* store Pin status */
+static u32 thb_pinx_status[THB_MAX_NPINS_SUPPORTED];
+
+struct thunderbay_mux_desc {
+ u8 mode;
+ const char *name;
+};
+
+#define THUNDERBAY_PIN_DESC(pin_number, pin_name, ...) { \
+ .number = pin_number, \
+ .name = pin_name, \
+ .drv_data = &(struct thunderbay_mux_desc[]) { \
+ __VA_ARGS__, { } }, \
+}
+
+#define THUNDERBAY_MUX(pin_mode, pin_function) { \
+ .mode = pin_mode, \
+ .name = pin_function, \
+}
+
+struct thunderbay_pin_soc {
+ const struct pinctrl_pin_desc *pins;
+ unsigned int npins;
+};
+
+/**
+ * struct thunderbay_pinctrl - Intel Thunderbay pinctrl structure
+ * @pctrl: Pointer to the pin controller device
+ * @base0: First register base address
+ * @dev: Pointer to the device structure
+ * @chip: GPIO chip used by this pin controller
+ * @soc: Pin control configuration data based on SoC
+ * @ngroups: Number of pin groups available
+ * @nfuncs: Number of pin functions available
+ */
+struct thunderbay_pinctrl {
+ struct pinctrl_dev *pctrl;
+ void __iomem *base0;
+ struct device *dev;
+ struct gpio_chip chip;
+ const struct thunderbay_pin_soc *soc;
+ unsigned int ngroups;
+ unsigned int nfuncs;
+};
+
+static const struct pinctrl_pin_desc thunderbay_pins[] = {
+ THUNDERBAY_PIN_DESC(0, "GPIO0",
+ THUNDERBAY_MUX(0X0, "I2C0_M0"),
+ THUNDERBAY_MUX(0X1, "EMPTY_M1"),
+ THUNDERBAY_MUX(0X2, "EMPTY_M2"),
+ THUNDERBAY_MUX(0X3, "EMPTY_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(1, "GPIO1",
+ THUNDERBAY_MUX(0X0, "I2C0_M0"),
+ THUNDERBAY_MUX(0X1, "EMPTY_M1"),
+ THUNDERBAY_MUX(0X2, "EMPTY_M2"),
+ THUNDERBAY_MUX(0X3, "EMPTY_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(2, "GPIO2",
+ THUNDERBAY_MUX(0X0, "I2C1_M0"),
+ THUNDERBAY_MUX(0X1, "EMPTY_M1"),
+ THUNDERBAY_MUX(0X2, "EMPTY_M2"),
+ THUNDERBAY_MUX(0X3, "EMPTY_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(3, "GPIO3",
+ THUNDERBAY_MUX(0X0, "I2C1_M0"),
+ THUNDERBAY_MUX(0X1, "EMPTY_M1"),
+ THUNDERBAY_MUX(0X2, "EMPTY_M2"),
+ THUNDERBAY_MUX(0X3, "EMPTY_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(4, "GPIO4",
+ THUNDERBAY_MUX(0X0, "I2C2_M0"),
+ THUNDERBAY_MUX(0X1, "EMPTY_M1"),
+ THUNDERBAY_MUX(0X2, "EMPTY_M2"),
+ THUNDERBAY_MUX(0X3, "EMPTY_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(5, "GPIO5",
+ THUNDERBAY_MUX(0X0, "I2C2_M0"),
+ THUNDERBAY_MUX(0X1, "EMPTY_M1"),
+ THUNDERBAY_MUX(0X2, "EMPTY_M2"),
+ THUNDERBAY_MUX(0X3, "EMPTY_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(6, "GPIO6",
+ THUNDERBAY_MUX(0X0, "I2C3_M0"),
+ THUNDERBAY_MUX(0X1, "EMPTY_M1"),
+ THUNDERBAY_MUX(0X2, "EMPTY_M2"),
+ THUNDERBAY_MUX(0X3, "EMPTY_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(7, "GPIO7",
+ THUNDERBAY_MUX(0X0, "I2C3_M0"),
+ THUNDERBAY_MUX(0X1, "EMPTY_M1"),
+ THUNDERBAY_MUX(0X2, "EMPTY_M2"),
+ THUNDERBAY_MUX(0X3, "EMPTY_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(8, "GPIO8",
+ THUNDERBAY_MUX(0X0, "I2C4_M0"),
+ THUNDERBAY_MUX(0X1, "EMPTY_M1"),
+ THUNDERBAY_MUX(0X2, "EMPTY_M2"),
+ THUNDERBAY_MUX(0X3, "EMPTY_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(9, "GPIO9",
+ THUNDERBAY_MUX(0X0, "I2C4_M0"),
+ THUNDERBAY_MUX(0X1, "EMPTY_M1"),
+ THUNDERBAY_MUX(0X2, "EMPTY_M2"),
+ THUNDERBAY_MUX(0X3, "EMPTY_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(10, "GPIO10",
+ THUNDERBAY_MUX(0X0, "UART0_M0"),
+ THUNDERBAY_MUX(0X1, "RT0_DSU_M1"),
+ THUNDERBAY_MUX(0X2, "EMPTY_M2"),
+ THUNDERBAY_MUX(0X3, "EMPTY_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(11, "GPIO11",
+ THUNDERBAY_MUX(0X0, "UART0_M0"),
+ THUNDERBAY_MUX(0X1, "RT0_DSU_M1"),
+ THUNDERBAY_MUX(0X2, "EMPTY_M2"),
+ THUNDERBAY_MUX(0X3, "EMPTY_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(12, "GPIO12",
+ THUNDERBAY_MUX(0X0, "UART0_M0"),
+ THUNDERBAY_MUX(0X1, "RT1_DSU_M1"),
+ THUNDERBAY_MUX(0X2, "EMPTY_M2"),
+ THUNDERBAY_MUX(0X3, "EMPTY_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(13, "GPIO13",
+ THUNDERBAY_MUX(0X0, "UART0_M0"),
+ THUNDERBAY_MUX(0X1, "RT1_DSU_M1"),
+ THUNDERBAY_MUX(0X2, "EMPTY_M2"),
+ THUNDERBAY_MUX(0X3, "EMPTY_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(14, "GPIO14",
+ THUNDERBAY_MUX(0X0, "UART1_M0"),
+ THUNDERBAY_MUX(0X1, "RT2_DSU_M1"),
+ THUNDERBAY_MUX(0X2, "EMPTY_M2"),
+ THUNDERBAY_MUX(0X3, "TRIGGER_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(15, "GPIO15",
+ THUNDERBAY_MUX(0X0, "UART1_M0"),
+ THUNDERBAY_MUX(0X1, "RT2_DSU_M1"),
+ THUNDERBAY_MUX(0X2, "EMPTY_M2"),
+ THUNDERBAY_MUX(0X3, "TRIGGER_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(16, "GPIO16",
+ THUNDERBAY_MUX(0X0, "UART1_M0"),
+ THUNDERBAY_MUX(0X1, "RT3_DSU_M1"),
+ THUNDERBAY_MUX(0X2, "EMPTY_M2"),
+ THUNDERBAY_MUX(0X3, "EMPTY_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(17, "GPIO17",
+ THUNDERBAY_MUX(0X0, "UART1_M0"),
+ THUNDERBAY_MUX(0X1, "RT3_DSU_M1"),
+ THUNDERBAY_MUX(0X2, "EMPTY_M2"),
+ THUNDERBAY_MUX(0X3, "EMPTY_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(18, "GPIO18",
+ THUNDERBAY_MUX(0X0, "SPI0_M0"),
+ THUNDERBAY_MUX(0X1, "EMPTY_M1"),
+ THUNDERBAY_MUX(0X2, "EMPTY_M2"),
+ THUNDERBAY_MUX(0X3, "EMPTY_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(19, "GPIO19",
+ THUNDERBAY_MUX(0X0, "SPI0_M0"),
+ THUNDERBAY_MUX(0X1, "EMPTY_M1"),
+ THUNDERBAY_MUX(0X2, "EMPTY_M2"),
+ THUNDERBAY_MUX(0X3, "EMPTY_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(20, "GPIO20",
+ THUNDERBAY_MUX(0X0, "SPI0_M0"),
+ THUNDERBAY_MUX(0X1, "EMPTY_M1"),
+ THUNDERBAY_MUX(0X2, "TPIU_TRACE_M2"),
+ THUNDERBAY_MUX(0X3, "EMPTY_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(21, "GPIO21",
+ THUNDERBAY_MUX(0X0, "SPI0_M0"),
+ THUNDERBAY_MUX(0X1, "EMPTY_M1"),
+ THUNDERBAY_MUX(0X2, "TPIU_TRACE_M2"),
+ THUNDERBAY_MUX(0X3, "EMPTY_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(22, "GPIO22",
+ THUNDERBAY_MUX(0X0, "SPI1_M0"),
+ THUNDERBAY_MUX(0X1, "EMPTY_M0"),
+ THUNDERBAY_MUX(0X2, "EMPTY_M2"),
+ THUNDERBAY_MUX(0X3, "EMPTY_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(23, "GPIO23",
+ THUNDERBAY_MUX(0X0, "SPI1_M0"),
+ THUNDERBAY_MUX(0X1, "EMPTY_M1"),
+ THUNDERBAY_MUX(0X2, "EMPTY_M2"),
+ THUNDERBAY_MUX(0X3, "EMPTY_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(24, "GPIO24",
+ THUNDERBAY_MUX(0X0, "SPI1_M0"),
+ THUNDERBAY_MUX(0X1, "TPIU_TRACE_M1"),
+ THUNDERBAY_MUX(0X2, "EMPTY_M2"),
+ THUNDERBAY_MUX(0X3, "EMPTY_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(25, "GPIO25",
+ THUNDERBAY_MUX(0X0, "SPI1_M0"),
+ THUNDERBAY_MUX(0X1, "TPIU_TRACE_M1"),
+ THUNDERBAY_MUX(0X2, "EMPTY_M2"),
+ THUNDERBAY_MUX(0X3, "EMPTY_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(26, "GPIO26",
+ THUNDERBAY_MUX(0X0, "ETHER0_M0"),
+ THUNDERBAY_MUX(0X1, "TPIU_DATA_M1"),
+ THUNDERBAY_MUX(0X2, "TPIU_DATA_M2"),
+ THUNDERBAY_MUX(0X3, "DEBUG_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(27, "GPIO27",
+ THUNDERBAY_MUX(0X0, "ETHER0_M0"),
+ THUNDERBAY_MUX(0X1, "TPIU_DATA_M1"),
+ THUNDERBAY_MUX(0X2, "TPIU_DATA_M2"),
+ THUNDERBAY_MUX(0X3, "DEBUG_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(28, "GPIO28",
+ THUNDERBAY_MUX(0X0, "ETHER0_M0"),
+ THUNDERBAY_MUX(0X1, "TPIU_DATA_M1"),
+ THUNDERBAY_MUX(0X2, "TPIU_DATA_M2"),
+ THUNDERBAY_MUX(0X3, "DEBUG_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(29, "GPIO29",
+ THUNDERBAY_MUX(0X0, "ETHER0_M0"),
+ THUNDERBAY_MUX(0X1, "TPIU_DATA_M1"),
+ THUNDERBAY_MUX(0X2, "TPIU_DATA_M2"),
+ THUNDERBAY_MUX(0X3, "DEBUG_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(30, "GPIO30",
+ THUNDERBAY_MUX(0X0, "ETHER0_M0"),
+ THUNDERBAY_MUX(0X1, "TPIU_DATA_M1"),
+ THUNDERBAY_MUX(0X2, "TPIU_DATA_M2"),
+ THUNDERBAY_MUX(0X3, "DEBUG_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(31, "GPIO31",
+ THUNDERBAY_MUX(0X0, "ETHER0_M0"),
+ THUNDERBAY_MUX(0X1, "TPIU_DATA_M1"),
+ THUNDERBAY_MUX(0X2, "TPIU_DATA_M2"),
+ THUNDERBAY_MUX(0X3, "DEBUG_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(32, "GPIO32",
+ THUNDERBAY_MUX(0X0, "ETHER0_M0"),
+ THUNDERBAY_MUX(0X1, "TPIU_DATA_M1"),
+ THUNDERBAY_MUX(0X2, "TPIU_DATA_M2"),
+ THUNDERBAY_MUX(0X3, "DEBUG_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(33, "GPIO33",
+ THUNDERBAY_MUX(0X0, "ETHER0_M0"),
+ THUNDERBAY_MUX(0X1, "TPIU_DATA_M1"),
+ THUNDERBAY_MUX(0X2, "TPIU_DATA_M2"),
+ THUNDERBAY_MUX(0X3, "DEBUG_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(34, "GPIO34",
+ THUNDERBAY_MUX(0X0, "ETHER0_M0"),
+ THUNDERBAY_MUX(0X1, "TPIU_DATA_M1"),
+ THUNDERBAY_MUX(0X2, "TPIU_DATA_M2"),
+ THUNDERBAY_MUX(0X3, "DIG_VIEW_0"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(35, "GPIO35",
+ THUNDERBAY_MUX(0X0, "ETHER0_M0"),
+ THUNDERBAY_MUX(0X1, "TPIU_DATA_M1"),
+ THUNDERBAY_MUX(0X2, "TPIU_DATA_M2"),
+ THUNDERBAY_MUX(0X3, "DIG_VIEW_1"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(36, "GPIO36",
+ THUNDERBAY_MUX(0X0, "ETHER0_M0"),
+ THUNDERBAY_MUX(0X1, "TPIU_DATA_M1"),
+ THUNDERBAY_MUX(0X2, "TPIU_DATA_M2"),
+ THUNDERBAY_MUX(0X3, "CPR_IO_OUT_CLK_0"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(37, "GPIO37",
+ THUNDERBAY_MUX(0X0, "ETHER0_M0"),
+ THUNDERBAY_MUX(0X1, "TPIU_DATA_M1"),
+ THUNDERBAY_MUX(0X2, "TPIU_DATA_M2"),
+ THUNDERBAY_MUX(0X3, "CPR_IO_OUT_CLK_1"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(38, "GPIO38",
+ THUNDERBAY_MUX(0X0, "ETHER0_M0"),
+ THUNDERBAY_MUX(0X1, "TPIU_DATA_M1"),
+ THUNDERBAY_MUX(0X2, "TPIU_DATA_M2"),
+ THUNDERBAY_MUX(0X3, "CPR_IO_OUT_CLK_2"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(39, "GPIO39",
+ THUNDERBAY_MUX(0X0, "ETHER0_M0"),
+ THUNDERBAY_MUX(0X1, "TPIU_DATA_M1"),
+ THUNDERBAY_MUX(0X2, "TPIU_DATA_M2"),
+ THUNDERBAY_MUX(0X3, "CPR_IO_OUT_CLK_3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(40, "GPIO40",
+ THUNDERBAY_MUX(0X0, "ETHER0_M0"),
+ THUNDERBAY_MUX(0X1, "TPIU_DATA_M1"),
+ THUNDERBAY_MUX(0X2, "TPIU_DATA_M2"),
+ THUNDERBAY_MUX(0X3, "EMPTY_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(41, "GPIO41",
+ THUNDERBAY_MUX(0X0, "POWER_INTERRUPT_MAX_PLATFORM_POWER_M0"),
+ THUNDERBAY_MUX(0X1, "TPIU_DATA_M1"),
+ THUNDERBAY_MUX(0X2, "TPIU_DATA_M2"),
+ THUNDERBAY_MUX(0X3, "EMPTY_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(42, "GPIO42",
+ THUNDERBAY_MUX(0X0, "ETHER1_M0"),
+ THUNDERBAY_MUX(0X1, "TPIU_DATA_M1"),
+ THUNDERBAY_MUX(0X2, "TPIU_DATA_M2"),
+ THUNDERBAY_MUX(0X3, "DEBUG_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(43, "GPIO43",
+ THUNDERBAY_MUX(0X0, "ETHER1_M0"),
+ THUNDERBAY_MUX(0X1, "TPIU_DATA_M1"),
+ THUNDERBAY_MUX(0X2, "TPIU_DATA_M2"),
+ THUNDERBAY_MUX(0X3, "DEBUG_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(44, "GPIO44",
+ THUNDERBAY_MUX(0X0, "ETHER1_M0"),
+ THUNDERBAY_MUX(0X1, "TPIU_DATA_M1"),
+ THUNDERBAY_MUX(0X2, "TPIU_DATA_M2"),
+ THUNDERBAY_MUX(0X3, "DEBUG_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(45, "GPIO45",
+ THUNDERBAY_MUX(0X0, "ETHER1_M0"),
+ THUNDERBAY_MUX(0X1, "TPIU_DATA_M1"),
+ THUNDERBAY_MUX(0X2, "TPIU_DATA_M2"),
+ THUNDERBAY_MUX(0X3, "DEBUG_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(46, "GPIO46",
+ THUNDERBAY_MUX(0X0, "ETHER1_M0"),
+ THUNDERBAY_MUX(0X1, "TPIU_DATA_M1"),
+ THUNDERBAY_MUX(0X2, "TPIU_DATA_M2"),
+ THUNDERBAY_MUX(0X3, "DEBUG_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(47, "GPIO47",
+ THUNDERBAY_MUX(0X0, "ETHER1_M0"),
+ THUNDERBAY_MUX(0X1, "TPIU_DATA_M1"),
+ THUNDERBAY_MUX(0X2, "TPIU_DATA_M2"),
+ THUNDERBAY_MUX(0X3, "DEBUG_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(48, "GPIO48",
+ THUNDERBAY_MUX(0X0, "ETHER1_M0"),
+ THUNDERBAY_MUX(0X1, "TPIU_DATA_M1"),
+ THUNDERBAY_MUX(0X2, "TPIU_DATA_M2"),
+ THUNDERBAY_MUX(0X3, "DEBUG_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(49, "GPIO49",
+ THUNDERBAY_MUX(0X0, "ETHER1_M0"),
+ THUNDERBAY_MUX(0X1, "TPIU_DATA_M1"),
+ THUNDERBAY_MUX(0X2, "TPIU_DATA_M2"),
+ THUNDERBAY_MUX(0X3, "DEBUG_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(50, "GPIO50",
+ THUNDERBAY_MUX(0X0, "ETHER1_M0"),
+ THUNDERBAY_MUX(0X1, "TPIU_DATA_M1"),
+ THUNDERBAY_MUX(0X2, "TPIU_DATA_M2"),
+ THUNDERBAY_MUX(0X3, "DIG_VIEW_0"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(51, "GPIO51",
+ THUNDERBAY_MUX(0X0, "ETHER1_M0"),
+ THUNDERBAY_MUX(0X1, "TPIU_DATA_M1"),
+ THUNDERBAY_MUX(0X2, "TPIU_DATA_M2"),
+ THUNDERBAY_MUX(0X3, "DIG_VIEW_1"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(52, "GPIO52",
+ THUNDERBAY_MUX(0X0, "ETHER1_M0"),
+ THUNDERBAY_MUX(0X1, "TPIU_DATA_M1"),
+ THUNDERBAY_MUX(0X2, "TPIU_DATA_M2"),
+ THUNDERBAY_MUX(0X3, "CPR_IO_OUT_CLK_0"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(53, "GPIO53",
+ THUNDERBAY_MUX(0X0, "ETHER1_M0"),
+ THUNDERBAY_MUX(0X1, "TPIU_DATA_M1"),
+ THUNDERBAY_MUX(0X2, "TPIU_DATA_M2"),
+ THUNDERBAY_MUX(0X3, "CPR_IO_OUT_CLK_1"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(54, "GPIO54",
+ THUNDERBAY_MUX(0X0, "ETHER1_M0"),
+ THUNDERBAY_MUX(0X1, "TPIU_DATA_M1"),
+ THUNDERBAY_MUX(0X2, "TPIU_DATA_M2"),
+ THUNDERBAY_MUX(0X3, "CPR_IO_OUT_CLK_2"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(55, "GPIO55",
+ THUNDERBAY_MUX(0X0, "ETHER1_M0"),
+ THUNDERBAY_MUX(0X1, "TPIU_DATA_M1"),
+ THUNDERBAY_MUX(0X2, "TPIU_DATA_M2"),
+ THUNDERBAY_MUX(0X3, "CPR_IO_OUT_CLK_3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(56, "GPIO56",
+ THUNDERBAY_MUX(0X0, "ETHER1_M0"),
+ THUNDERBAY_MUX(0X1, "TPIU_DATA_M1"),
+ THUNDERBAY_MUX(0X2, "TPIU_DATA_M2"),
+ THUNDERBAY_MUX(0X3, "POWER_INTERRUPT_ICCMAX_VDDD_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(57, "GPIO57",
+ THUNDERBAY_MUX(0X0, "POWER_INTERRUPT_ICCMAX_VPU_M0"),
+ THUNDERBAY_MUX(0X1, "TPIU_DATA_M1"),
+ THUNDERBAY_MUX(0X2, "TPIU_DATA_M2"),
+ THUNDERBAY_MUX(0X3, "EMPTY_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(58, "GPIO58",
+ THUNDERBAY_MUX(0X0, "THERMTRIP_M0"),
+ THUNDERBAY_MUX(0X1, "EMPTY_M1"),
+ THUNDERBAY_MUX(0X2, "EMPTY_M2"),
+ THUNDERBAY_MUX(0X3, "EMPTY_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(59, "GPIO59",
+ THUNDERBAY_MUX(0X0, "THERMTRIP_M0"),
+ THUNDERBAY_MUX(0X1, "EMPTY_M1"),
+ THUNDERBAY_MUX(0X2, "EMPTY_M2"),
+ THUNDERBAY_MUX(0X3, "EMPTY_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(60, "GPIO60",
+ THUNDERBAY_MUX(0X0, "SMBUS_M0"),
+ THUNDERBAY_MUX(0X1, "EMPTY_M1"),
+ THUNDERBAY_MUX(0X2, "EMPTY_M2"),
+ THUNDERBAY_MUX(0X3, "EMPTY_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(61, "GPIO61",
+ THUNDERBAY_MUX(0X0, "SMBUS_M0"),
+ THUNDERBAY_MUX(0X1, "EMPTY_M1"),
+ THUNDERBAY_MUX(0X2, "EMPTY_M2"),
+ THUNDERBAY_MUX(0X3, "POWER_INTERRUPT_ICCMAX_VDDD_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(62, "GPIO62",
+ THUNDERBAY_MUX(0X0, "PLATFORM_RESET_M0"),
+ THUNDERBAY_MUX(0X1, "EMPTY_M1"),
+ THUNDERBAY_MUX(0X2, "EMPTY_M2"),
+ THUNDERBAY_MUX(0X3, "EMPTY_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(63, "GPIO63",
+ THUNDERBAY_MUX(0X0, "PLATFORM_RESET_M0"),
+ THUNDERBAY_MUX(0X1, "EMPTY_M1"),
+ THUNDERBAY_MUX(0X2, "EMPTY_M2"),
+ THUNDERBAY_MUX(0X3, "EMPTY_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(64, "GPIO64",
+ THUNDERBAY_MUX(0X0, "PLATFORM_SHUTDOWN_M0"),
+ THUNDERBAY_MUX(0X1, "EMPTY_M1"),
+ THUNDERBAY_MUX(0X2, "EMPTY_M2"),
+ THUNDERBAY_MUX(0X3, "EMPTY_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(65, "GPIO65",
+ THUNDERBAY_MUX(0X0, "PLATFORM_SHUTDOWN_M0"),
+ THUNDERBAY_MUX(0X1, "EMPTY_M1"),
+ THUNDERBAY_MUX(0X2, "EMPTY_M2"),
+ THUNDERBAY_MUX(0X3, "EMPTY_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+ THUNDERBAY_PIN_DESC(66, "GPIO66",
+ THUNDERBAY_MUX(0X0, "POWER_INTERRUPT_ICCMAX_MEDIA_M0"),
+ THUNDERBAY_MUX(0X1, "EMPTY_M1"),
+ THUNDERBAY_MUX(0X2, "EMPTY_M2"),
+ THUNDERBAY_MUX(0X3, "EMPTY_M3"),
+ THUNDERBAY_MUX(0X4, "GPIO_M4")),
+};
+
+static const struct thunderbay_pin_soc thunderbay_data = {
+ .pins = thunderbay_pins,
+ .npins = ARRAY_SIZE(thunderbay_pins),
+};
+
+static u32 thb_gpio_read_reg(struct gpio_chip *chip, unsigned int pinnr)
+{
+ struct thunderbay_pinctrl *tpc = gpiochip_get_data(chip);
+
+ return readl(tpc->base0 + THB_GPIO_REG_OFFSET(pinnr));
+}
+
+static u32 thb_gpio_write_reg(struct gpio_chip *chip, unsigned int pinnr, u32 value)
+{
+ struct thunderbay_pinctrl *tpc = gpiochip_get_data(chip);
+
+ writel(value, (tpc->base0 + THB_GPIO_REG_OFFSET(pinnr)));
+ return 0;
+}
+
+static int thb_read_gpio_data(struct gpio_chip *chip, unsigned int offset, unsigned int pad_dir)
+{
+ int data_offset;
+ u32 data_reg;
+
+ /* as per GPIO Spec = pad_dir 0:input, 1:output */
+ data_offset = 0x2000u + (offset / 32);
+ if (!pad_dir)
+ data_offset += 4;
+ data_reg = thb_gpio_read_reg(chip, data_offset);
+
+ return data_reg & BIT(offset % 32);
+}
+
+static int thb_write_gpio_data(struct gpio_chip *chip, unsigned int offset, unsigned int value)
+{
+ int data_offset;
+ u32 data_reg;
+
+ data_offset = 0x2000u + (offset / 32);
+
+ data_reg = thb_gpio_read_reg(chip, data_offset);
+
+ if (value > 0)
+ data_reg |= BIT(offset % 32);
+ else
+ data_reg &= ~BIT(offset % 32);
+
+ return thb_gpio_write_reg(chip, data_offset, data_reg);
+}
+
+static int thunderbay_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
+{
+ u32 reg = thb_gpio_read_reg(chip, offset);
+
+ /* Return direction only if configured as GPIO else negative error */
+ if (reg & THB_GPIO_PORT_SELECT_MASK)
+ return !(reg & THB_GPIO_PAD_DIRECTION_MASK);
+ return -EINVAL;
+}
+
+static int thunderbay_gpio_set_direction_input(struct gpio_chip *chip, unsigned int offset)
+{
+ u32 reg = thb_gpio_read_reg(chip, offset);
+
+ /* set pin as input only if it is GPIO else error */
+ if (reg & THB_GPIO_PORT_SELECT_MASK) {
+ reg &= (~THB_GPIO_PAD_DIRECTION_MASK);
+ thb_gpio_write_reg(chip, offset, reg);
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static void thunderbay_gpio_set_value(struct gpio_chip *chip, unsigned int offset, int value)
+{
+ u32 reg = thb_gpio_read_reg(chip, offset);
+
+ /* update pin value only if it is GPIO-output else error */
+ if ((reg & THB_GPIO_PORT_SELECT_MASK) && (reg & THB_GPIO_PAD_DIRECTION_MASK))
+ thb_write_gpio_data(chip, offset, value);
+}
+
+static int thunderbay_gpio_set_direction_output(struct gpio_chip *chip,
+ unsigned int offset, int value)
+{
+ u32 reg = thb_gpio_read_reg(chip, offset);
+
+ /* set pin as output only if it is GPIO else error */
+ if (reg & THB_GPIO_PORT_SELECT_MASK) {
+ reg |= THB_GPIO_PAD_DIRECTION_MASK;
+ thb_gpio_write_reg(chip, offset, reg);
+ thunderbay_gpio_set_value(chip, offset, value);
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static int thunderbay_gpio_get_value(struct gpio_chip *chip, unsigned int offset)
+{
+ u32 reg = thb_gpio_read_reg(chip, offset);
+ int gpio_dir = 0;
+
+ /* Read pin value only if it is GPIO else error */
+ if (reg & THB_GPIO_PORT_SELECT_MASK) {
+ /* 0=in, 1=out */
+ gpio_dir = (reg & THB_GPIO_PAD_DIRECTION_MASK) > 0;
+
+ /* Returns negative value when pin is configured as PORT */
+ return thb_read_gpio_data(chip, offset, gpio_dir);
+ }
+ return -EINVAL;
+}
+
+static int thunderbay_gpiochip_probe(struct thunderbay_pinctrl *tpc)
+{
+ struct gpio_chip *chip = &tpc->chip;
+ int ret;
+
+ chip->label = dev_name(tpc->dev);
+ chip->parent = tpc->dev;
+ chip->request = gpiochip_generic_request;
+ chip->free = gpiochip_generic_free;
+ chip->get_direction = thunderbay_gpio_get_direction;
+ chip->direction_input = thunderbay_gpio_set_direction_input;
+ chip->direction_output = thunderbay_gpio_set_direction_output;
+ chip->get = thunderbay_gpio_get_value;
+ chip->set = thunderbay_gpio_set_value;
+ chip->set_config = gpiochip_generic_config;
+ /* identifies the first GPIO number handled by this chip; or,
+ * if negative during registration, requests dynamic ID allocation.
+ * Please pass -1 as base to let gpiolib select the chip base in all possible cases.
+ * We want to get rid of the static GPIO number space in the long run.
+ */
+ chip->base = -1;
+ /* Number of GPIOs handled by this controller; the last GPIO handled is (base + ngpio - 1)*/
+ chip->ngpio = THB_MAX_NPINS_SUPPORTED;
+
+ /* Register/add Thunder Bay GPIO chip with Linux framework */
+ ret = gpiochip_add_data(chip, tpc);
+ if (ret)
+ dev_err(tpc->dev, "Failed to add gpiochip\n");
+ return ret;
+}
+
+static int thunderbay_request_gpio(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned int pin)
+{
+ struct thunderbay_pinctrl *tpc = pinctrl_dev_get_drvdata(pctldev);
+ struct gpio_chip *chip = &tpc->chip;
+ u32 reg = 0;
+
+ if (thb_pinx_status[pin] == 0u) {
+ reg = thb_gpio_read_reg(chip, pin);
+ /* Updates PIN configuration as GPIO and sets GPIO to MODE-4*/
+ reg |= (THB_GPIO_PORT_SELECT_MASK | THB_GPIO_PINMUX_MODE_4);
+ thb_gpio_write_reg(chip, pin, reg);
+
+ /* update pin status as busy */
+ thb_pinx_status[pin] = 1u;
+
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static void thunderbay_free_gpio(struct pinctrl_dev *pctldev,
+ struct pinctrl_gpio_range *range,
+ unsigned int pin)
+{
+ struct thunderbay_pinctrl *tpc = pinctrl_dev_get_drvdata(pctldev);
+ struct gpio_chip *chip = &tpc->chip;
+ u32 reg = 0;
+
+ if (thb_pinx_status[pin] == 1u) {
+ reg = thb_gpio_read_reg(chip, pin);
+
+ /* Updates PIN configuration from GPIO to PORT */
+ reg &= (~THB_GPIO_PORT_SELECT_MASK);
+
+ /* Change Port/gpio mode to default mode-0 */
+ reg &= (~THB_GPIO_PINMUX_MODE_4);
+
+ thb_gpio_write_reg(chip, pin, reg);
+
+ /* update pin status as free */
+ thb_pinx_status[pin] = 0u;
+ }
+}
+
+static int thb_pinctrl_set_mux(struct pinctrl_dev *pctldev,
+ unsigned int func_select, unsigned int group_select)
+{
+ struct thunderbay_pinctrl *tpc = pinctrl_dev_get_drvdata(pctldev);
+ struct gpio_chip *chip = &tpc->chip;
+ struct function_desc *function;
+ unsigned int i, pin_mode;
+ struct group_desc *group;
+ int ret = -EINVAL;
+ u32 reg = 0u;
+
+ group = pinctrl_generic_get_group(pctldev, group_select);
+ if (!group)
+ return -EINVAL;
+
+ function = pinmux_generic_get_function(pctldev, func_select);
+ if (!function)
+ return -EINVAL;
+
+ pin_mode = *(unsigned int *)(function->data);
+
+ /* Change modes for pins in the selected group */
+ for (i = 0; i < group->num_pins; i++) {
+ reg = thb_gpio_read_reg(chip, group->pins[i]);
+
+ switch (pin_mode) {
+ case 0u:
+ reg |= THB_GPIO_PINMUX_MODE_0;
+ break;
+ case 1u:
+ reg |= THB_GPIO_PINMUX_MODE_1;
+ break;
+ case 2u:
+ reg |= THB_GPIO_PINMUX_MODE_2;
+ break;
+ case 3u:
+ reg |= THB_GPIO_PINMUX_MODE_3;
+ break;
+ case 4u:
+ reg |= THB_GPIO_PINMUX_MODE_4;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = thb_gpio_write_reg(chip, group->pins[i], reg);
+ if (~ret) {
+ /* update pin status as busy */
+ thb_pinx_status[group->pins[i]] = 1u;
+ }
+ }
+ return ret;
+}
+
+static int thunderbay_build_groups(struct thunderbay_pinctrl *tpc)
+{
+ struct group_desc *thunderbay_groups;
+ int i;
+
+ tpc->ngroups = tpc->soc->npins;
+ thunderbay_groups = devm_kcalloc(tpc->dev, tpc->ngroups,
+ sizeof(*thunderbay_groups), GFP_KERNEL);
+ if (!thunderbay_groups)
+ return -ENOMEM;
+
+ for (i = 0; i < tpc->ngroups; i++) {
+ struct group_desc *group = thunderbay_groups + i;
+ const struct pinctrl_pin_desc *pin_info = thunderbay_pins + i;
+
+ group->name = pin_info->name;
+ group->pins = (int *)&pin_info->number;
+ pinctrl_generic_add_group(tpc->pctrl, group->name,
+ group->pins, 1, NULL);
+ }
+ return 0;
+}
+
+static int thunderbay_add_functions(struct thunderbay_pinctrl *tpc, struct function_desc *funcs)
+{
+ int i;
+
+ /* Assign the groups for each function */
+ for (i = 0; i < tpc->nfuncs; i++) {
+ struct function_desc *func = &funcs[i];
+ const char **group_names;
+ unsigned int grp_idx = 0;
+ int j;
+
+ group_names = devm_kcalloc(tpc->dev, func->num_group_names,
+ sizeof(*group_names), GFP_KERNEL);
+ if (!group_names)
+ return -ENOMEM;
+
+ for (j = 0; j < tpc->soc->npins; j++) {
+ const struct pinctrl_pin_desc *pin_info = &thunderbay_pins[j];
+ struct thunderbay_mux_desc *pin_mux;
+
+ for (pin_mux = pin_info->drv_data; pin_mux->name; pin_mux++) {
+ if (!strcmp(pin_mux->name, func->name))
+ group_names[grp_idx++] = pin_info->name;
+ }
+ }
+
+ func->group_names = group_names;
+ }
+
+ /* Add all functions */
+ for (i = 0; i < tpc->nfuncs; i++) {
+ pinmux_generic_add_function(tpc->pctrl,
+ funcs[i].name,
+ funcs[i].group_names,
+ funcs[i].num_group_names,
+ funcs[i].data);
+ }
+ kfree(funcs);
+ return 0;
+}
+
+static int thunderbay_build_functions(struct thunderbay_pinctrl *tpc)
+{
+ struct function_desc *thunderbay_funcs;
+ void *ptr;
+ int pin;
+
+ /*
+ * Allocate maximum possible number of functions. Assume every pin
+ * being part of 8 (hw maximum) globally unique muxes.
+ */
+ tpc->nfuncs = 0;
+ thunderbay_funcs = kcalloc(tpc->soc->npins * 8,
+ sizeof(*thunderbay_funcs), GFP_KERNEL);
+ if (!thunderbay_funcs)
+ return -ENOMEM;
+
+ /* Setup 1 function for each unique mux */
+ for (pin = 0; pin < tpc->soc->npins; pin++) {
+ const struct pinctrl_pin_desc *pin_info = thunderbay_pins + pin;
+ struct thunderbay_mux_desc *pin_mux;
+
+ for (pin_mux = pin_info->drv_data; pin_mux->name; pin_mux++) {
+ struct function_desc *func;
+
+ /* Check if we already have function for this mux */
+ for (func = thunderbay_funcs; func->name; func++) {
+ if (!strcmp(pin_mux->name, func->name)) {
+ func->num_group_names++;
+ break;
+ }
+ }
+
+ if (!func->name) {
+ func->name = pin_mux->name;
+ func->num_group_names = 1;
+ func->data = (int *)&pin_mux->mode;
+ tpc->nfuncs++;
+ }
+ }
+ }
+
+ /* Reallocate memory based on actual number of functions */
+ ptr = krealloc(thunderbay_funcs,
+ tpc->nfuncs * sizeof(*thunderbay_funcs), GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ thunderbay_funcs = ptr;
+ return thunderbay_add_functions(tpc, thunderbay_funcs);
+}
+
+static int thunderbay_pinconf_set_tristate(struct thunderbay_pinctrl *tpc,
+ unsigned int pin, u32 config)
+{
+ struct gpio_chip *chip = &tpc->chip;
+ u32 reg;
+
+ reg = thb_gpio_read_reg(chip, pin);
+ if (config > 0)
+ reg |= THB_GPIO_ENAQ_MASK;
+ else
+ reg &= ~THB_GPIO_ENAQ_MASK;
+
+ return thb_gpio_write_reg(chip, pin, reg);
+}
+
+static int thunderbay_pinconf_get_tristate(struct thunderbay_pinctrl *tpc,
+ unsigned int pin, u32 *config)
+{
+ struct gpio_chip *chip = &tpc->chip;
+ u32 reg;
+
+ reg = thb_gpio_read_reg(chip, pin);
+ *config = (reg & THB_GPIO_ENAQ_MASK) > 0;
+
+ return 0;
+}
+
+static int thunderbay_pinconf_set_pulldown(struct thunderbay_pinctrl *tpc,
+ unsigned int pin, u32 config)
+{
+ struct gpio_chip *chip = &tpc->chip;
+ u32 reg;
+
+ reg = thb_gpio_read_reg(chip, pin);
+ if (config > 0)
+ reg |= THB_GPIO_PULL_DOWN_MASK;
+ else
+ reg &= ~THB_GPIO_PULL_DOWN_MASK;
+
+ return thb_gpio_write_reg(chip, pin, reg);
+}
+
+static int thunderbay_pinconf_get_pulldown(struct thunderbay_pinctrl *tpc,
+ unsigned int pin, u32 *config)
+{
+ struct gpio_chip *chip = &tpc->chip;
+ u32 reg = 0;
+
+ reg = thb_gpio_read_reg(chip, pin);
+ *config = ((reg & THB_GPIO_PULL_DOWN_MASK) > 0) ? 1 : 0;
+
+ return 0;
+}
+
+static int thunderbay_pinconf_set_pullup(struct thunderbay_pinctrl *tpc,
+ unsigned int pin, u32 config)
+{
+ struct gpio_chip *chip = &tpc->chip;
+ u32 reg;
+
+ reg = thb_gpio_read_reg(chip, pin);
+ if (config > 0)
+ reg &= ~THB_GPIO_PULL_UP_MASK;
+ else
+ reg |= THB_GPIO_PULL_UP_MASK;
+
+ return thb_gpio_write_reg(chip, pin, reg);
+}
+
+static int thunderbay_pinconf_get_pullup(struct thunderbay_pinctrl *tpc,
+ unsigned int pin, u32 *config)
+{
+ struct gpio_chip *chip = &tpc->chip;
+ u32 reg;
+
+ reg = thb_gpio_read_reg(chip, pin);
+ *config = ((reg & THB_GPIO_PULL_UP_MASK) == 0) ? 1 : 0;
+
+ return 0;
+}
+
+static int thunderbay_pinconf_set_opendrain(struct thunderbay_pinctrl *tpc,
+ unsigned int pin, u32 config)
+{
+ struct gpio_chip *chip = &tpc->chip;
+ u32 reg;
+
+ reg = thb_gpio_read_reg(chip, pin);
+ if (config > 0)
+ reg &= ~THB_GPIO_PULL_ENABLE_MASK;
+ else
+ reg |= THB_GPIO_PULL_ENABLE_MASK;
+
+ return thb_gpio_write_reg(chip, pin, reg);
+}
+
+static int thunderbay_pinconf_get_opendrain(struct thunderbay_pinctrl *tpc,
+ unsigned int pin, u32 *config)
+{
+ struct gpio_chip *chip = &tpc->chip;
+ u32 reg;
+
+ reg = thb_gpio_read_reg(chip, pin);
+ *config = ((reg & THB_GPIO_PULL_ENABLE_MASK) == 0) ? 1 : 0;
+
+ return 0;
+}
+
+static int thunderbay_pinconf_set_pushpull(struct thunderbay_pinctrl *tpc,
+ unsigned int pin, u32 config)
+{
+ struct gpio_chip *chip = &tpc->chip;
+ u32 reg;
+
+ reg = thb_gpio_read_reg(chip, pin);
+ if (config > 0)
+ reg |= THB_GPIO_PULL_ENABLE_MASK;
+ else
+ reg &= ~THB_GPIO_PULL_ENABLE_MASK;
+
+ return thb_gpio_write_reg(chip, pin, reg);
+}
+
+static int thunderbay_pinconf_get_pushpull(struct thunderbay_pinctrl *tpc,
+ unsigned int pin, u32 *config)
+{
+ struct gpio_chip *chip = &tpc->chip;
+ u32 reg;
+
+ reg = thb_gpio_read_reg(chip, pin);
+ *config = ((reg & THB_GPIO_PULL_ENABLE_MASK) > 0) ? 1 : 0;
+
+ return 0;
+}
+
+static int thunderbay_pinconf_set_drivestrength(struct thunderbay_pinctrl *tpc,
+ unsigned int pin, u32 config)
+{
+ struct gpio_chip *chip = &tpc->chip;
+ u32 reg;
+
+ reg = thb_gpio_read_reg(chip, pin);
+
+ /* Drive Strength: 0x0 to 0xF */
+ if (config <= 0xF) {
+ reg = (reg | config);
+ return thb_gpio_write_reg(chip, pin, reg);
+ }
+
+ return -EINVAL;
+}
+
+static int thunderbay_pinconf_get_drivestrength(struct thunderbay_pinctrl *tpc,
+ unsigned int pin, u32 *config)
+{
+ struct gpio_chip *chip = &tpc->chip;
+ u32 reg;
+
+ reg = thb_gpio_read_reg(chip, pin);
+ reg = (reg & THB_GPIO_DRIVE_STRENGTH_MASK) >> 16;
+ *config = (reg > 0) ? reg : 0;
+
+ return 0;
+}
+
+static int thunderbay_pinconf_set_schmitt(struct thunderbay_pinctrl *tpc,
+ unsigned int pin, u32 config)
+{
+ struct gpio_chip *chip = &tpc->chip;
+ u32 reg;
+
+ reg = thb_gpio_read_reg(chip, pin);
+ if (config > 0)
+ reg |= THB_GPIO_SCHMITT_TRIGGER_MASK;
+ else
+ reg &= ~THB_GPIO_SCHMITT_TRIGGER_MASK;
+
+ return thb_gpio_write_reg(chip, pin, reg);
+}
+
+static int thunderbay_pinconf_get_schmitt(struct thunderbay_pinctrl *tpc,
+ unsigned int pin, u32 *config)
+{
+ struct gpio_chip *chip = &tpc->chip;
+ u32 reg;
+
+ reg = thb_gpio_read_reg(chip, pin);
+ *config = ((reg & THB_GPIO_SCHMITT_TRIGGER_MASK) > 0) ? 1 : 0;
+
+ return 0;
+}
+
+static int thunderbay_pinconf_set_slew_rate(struct thunderbay_pinctrl *tpc,
+ unsigned int pin, u32 config)
+{
+ struct gpio_chip *chip = &tpc->chip;
+ u32 reg = 0;
+
+ reg = thb_gpio_read_reg(chip, pin);
+ if (config > 0)
+ reg |= THB_GPIO_SLEW_RATE_MASK;
+ else
+ reg &= ~THB_GPIO_SLEW_RATE_MASK;
+
+ return thb_gpio_write_reg(chip, pin, reg);
+}
+
+static int thunderbay_pinconf_get_slew_rate(struct thunderbay_pinctrl *tpc,
+ unsigned int pin, u32 *config)
+{
+ struct gpio_chip *chip = &tpc->chip;
+ u32 reg;
+
+ reg = thb_gpio_read_reg(chip, pin);
+ *config = ((reg & THB_GPIO_SLEW_RATE_MASK) > 0) ? 1 : 0;
+
+ return 0;
+}
+
+static int thunderbay_pinconf_get(struct pinctrl_dev *pctldev, unsigned int pin,
+ unsigned long *config)
+{
+ struct thunderbay_pinctrl *tpc = pinctrl_dev_get_drvdata(pctldev);
+ enum pin_config_param param = pinconf_to_config_param(*config);
+ u32 arg;
+ int ret;
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_HIGH_IMPEDANCE:
+ ret = thunderbay_pinconf_get_tristate(tpc, pin, &arg);
+ break;
+
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ ret = thunderbay_pinconf_get_pulldown(tpc, pin, &arg);
+ break;
+
+ case PIN_CONFIG_BIAS_PULL_UP:
+ ret = thunderbay_pinconf_get_pullup(tpc, pin, &arg);
+ break;
+
+ case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+ ret = thunderbay_pinconf_get_opendrain(tpc, pin, &arg);
+ break;
+
+ case PIN_CONFIG_DRIVE_PUSH_PULL:
+ ret = thunderbay_pinconf_get_pushpull(tpc, pin, &arg);
+ break;
+
+ case PIN_CONFIG_DRIVE_STRENGTH:
+ ret = thunderbay_pinconf_get_drivestrength(tpc, pin, &arg);
+ break;
+
+ case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
+ ret = thunderbay_pinconf_get_schmitt(tpc, pin, &arg);
+ break;
+
+ case PIN_CONFIG_SLEW_RATE:
+ ret = thunderbay_pinconf_get_slew_rate(tpc, pin, &arg);
+ break;
+
+ default:
+ return -ENOTSUPP;
+ }
+
+ *config = pinconf_to_config_packed(param, arg);
+
+ return ret;
+}
+
+static int thunderbay_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
+ unsigned long *configs, unsigned int num_configs)
+{
+ struct thunderbay_pinctrl *tpc = pinctrl_dev_get_drvdata(pctldev);
+ enum pin_config_param param;
+ unsigned int pinconf;
+ int ret = 0;
+ u32 arg;
+
+ for (pinconf = 0; pinconf < num_configs; pinconf++) {
+ param = pinconf_to_config_param(configs[pinconf]);
+ arg = pinconf_to_config_argument(configs[pinconf]);
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_HIGH_IMPEDANCE:
+ ret = thunderbay_pinconf_set_tristate(tpc, pin, arg);
+ break;
+
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ ret = thunderbay_pinconf_set_pulldown(tpc, pin, arg);
+ break;
+
+ case PIN_CONFIG_BIAS_PULL_UP:
+ ret = thunderbay_pinconf_set_pullup(tpc, pin, arg);
+ break;
+
+ case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+ ret = thunderbay_pinconf_set_opendrain(tpc, pin, arg);
+ break;
+
+ case PIN_CONFIG_DRIVE_PUSH_PULL:
+ ret = thunderbay_pinconf_set_pushpull(tpc, pin, arg);
+ break;
+
+ case PIN_CONFIG_DRIVE_STRENGTH:
+ ret = thunderbay_pinconf_set_drivestrength(tpc, pin, arg);
+ break;
+
+ case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
+ ret = thunderbay_pinconf_set_schmitt(tpc, pin, arg);
+ break;
+
+ case PIN_CONFIG_SLEW_RATE:
+ ret = thunderbay_pinconf_set_slew_rate(tpc, pin, arg);
+ break;
+
+ default:
+ return -ENOTSUPP;
+ }
+ }
+ return ret;
+}
+
+static const struct pinctrl_ops thunderbay_pctlops = {
+ .get_groups_count = pinctrl_generic_get_group_count,
+ .get_group_name = pinctrl_generic_get_group_name,
+ .get_group_pins = pinctrl_generic_get_group_pins,
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_all,
+ .dt_free_map = pinconf_generic_dt_free_map,
+};
+
+static const struct pinmux_ops thunderbay_pmxops = {
+ .get_functions_count = pinmux_generic_get_function_count,
+ .get_function_name = pinmux_generic_get_function_name,
+ .get_function_groups = pinmux_generic_get_function_groups,
+ .set_mux = thb_pinctrl_set_mux,
+ .gpio_request_enable = thunderbay_request_gpio,
+ .gpio_disable_free = thunderbay_free_gpio,
+};
+
+static const struct pinconf_ops thunderbay_confops = {
+ .is_generic = true,
+ .pin_config_get = thunderbay_pinconf_get,
+ .pin_config_set = thunderbay_pinconf_set,
+};
+
+static struct pinctrl_desc thunderbay_pinctrl_desc = {
+ .name = "thunderbay-pinmux",
+ .pctlops = &thunderbay_pctlops,
+ .pmxops = &thunderbay_pmxops,
+ .confops = &thunderbay_confops,
+ .owner = THIS_MODULE,
+};
+
+static const struct of_device_id thunderbay_pinctrl_match[] = {
+ {
+ .compatible = "intel,thunderbay-pinctrl",
+ .data = &thunderbay_data
+ },
+ {}
+};
+
+static int thunderbay_pinctrl_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *of_id;
+ struct device *dev = &pdev->dev;
+ struct thunderbay_pinctrl *tpc;
+ struct resource *iomem;
+ int ret;
+
+ of_id = of_match_node(thunderbay_pinctrl_match, pdev->dev.of_node);
+ if (!of_id)
+ return -ENODEV;
+
+ tpc = devm_kzalloc(dev, sizeof(*tpc), GFP_KERNEL);
+ if (!tpc)
+ return -ENOMEM;
+
+ tpc->dev = dev;
+ tpc->soc = of_id->data;
+
+ iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!iomem)
+ return -ENXIO;
+
+ tpc->base0 = devm_ioremap_resource(dev, iomem);
+ if (IS_ERR(tpc->base0))
+ return PTR_ERR(tpc->base0);
+
+ thunderbay_pinctrl_desc.pins = tpc->soc->pins;
+ thunderbay_pinctrl_desc.npins = tpc->soc->npins;
+
+ /* Register pinctrl */
+ tpc->pctrl = devm_pinctrl_register(dev, &thunderbay_pinctrl_desc, tpc);
+ if (IS_ERR(tpc->pctrl))
+ return PTR_ERR(tpc->pctrl);
+
+ /* Setup pinmux groups */
+ ret = thunderbay_build_groups(tpc);
+ if (ret)
+ return ret;
+
+ /* Setup pinmux functions */
+ ret = thunderbay_build_functions(tpc);
+ if (ret)
+ return ret;
+
+ /* Setup GPIO */
+ ret = thunderbay_gpiochip_probe(tpc);
+ if (ret < 0)
+ return ret;
+
+ platform_set_drvdata(pdev, tpc);
+
+ return 0;
+}
+
+static int thunderbay_pinctrl_remove(struct platform_device *pdev)
+{
+ /* thunderbay_pinctrl_remove function to clear the assigned memory */
+ return 0;
+}
+
+static struct platform_driver thunderbay_pinctrl_driver = {
+ .driver = {
+ .name = "thunderbay-pinctrl",
+ .of_match_table = thunderbay_pinctrl_match,
+ },
+ .probe = thunderbay_pinctrl_probe,
+ .remove = thunderbay_pinctrl_remove,
+};
+
+builtin_platform_driver(thunderbay_pinctrl_driver);
+
+MODULE_AUTHOR("Lakshmi Sowjanya D <lakshmi.sowjanya.d@intel.com>");
+MODULE_AUTHOR("Kiran Kumar S <kiran.kumar1.s@intel.com>");
+MODULE_DESCRIPTION("Intel Thunder Bay Pinctrl/GPIO Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/pinctrl-xway.c b/drivers/pinctrl/pinctrl-xway.c
index 5e3f31b55eb7..3a03beb8a755 100644
--- a/drivers/pinctrl/pinctrl-xway.c
+++ b/drivers/pinctrl/pinctrl-xway.c
@@ -1763,7 +1763,6 @@ static int pinmux_xway_probe(struct platform_device *pdev)
/* register the gpio chip */
xway_chip.parent = &pdev->dev;
xway_chip.owner = THIS_MODULE;
- xway_chip.of_node = pdev->dev.of_node;
ret = devm_gpiochip_add_data(&pdev->dev, &xway_chip, NULL);
if (ret) {
dev_err(&pdev->dev, "Failed to register gpio chip\n");
diff --git a/drivers/pinctrl/pinmux.c b/drivers/pinctrl/pinmux.c
index 6cdbd9ccf2f0..f94d43b082d9 100644
--- a/drivers/pinctrl/pinmux.c
+++ b/drivers/pinctrl/pinmux.c
@@ -875,7 +875,7 @@ EXPORT_SYMBOL_GPL(pinmux_generic_get_function);
*/
int pinmux_generic_add_function(struct pinctrl_dev *pctldev,
const char *name,
- const char **groups,
+ const char * const *groups,
const unsigned int num_groups,
void *data)
{
diff --git a/drivers/pinctrl/pinmux.h b/drivers/pinctrl/pinmux.h
index 78c3a31be882..72fcf03eaa43 100644
--- a/drivers/pinctrl/pinmux.h
+++ b/drivers/pinctrl/pinmux.h
@@ -129,7 +129,7 @@ static inline void pinmux_init_device_debugfs(struct dentry *devroot,
*/
struct function_desc {
const char *name;
- const char **group_names;
+ const char * const *group_names;
int num_group_names;
void *data;
};
@@ -150,7 +150,7 @@ struct function_desc *pinmux_generic_get_function(struct pinctrl_dev *pctldev,
int pinmux_generic_add_function(struct pinctrl_dev *pctldev,
const char *name,
- const char **groups,
+ const char * const *groups,
unsigned const num_groups,
void *data);
diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig
index 3e0c00766f59..ca6f68a061a8 100644
--- a/drivers/pinctrl/qcom/Kconfig
+++ b/drivers/pinctrl/qcom/Kconfig
@@ -302,6 +302,15 @@ config PINCTRL_SM6350
Qualcomm Technologies Inc TLMM block found on the Qualcomm
Technologies Inc SM6350 platform.
+config PINCTRL_SDX65
+ tristate "Qualcomm Technologies Inc SDX65 pin controller driver"
+ depends on GPIOLIB && OF
+ depends on PINCTRL_MSM
+ help
+ This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+ Qualcomm Technologies Inc TLMM block found on the Qualcomm
+ Technologies Inc SDX65 platform.
+
config PINCTRL_SM8150
tristate "Qualcomm Technologies Inc SM8150 pin controller driver"
depends on OF
@@ -328,6 +337,15 @@ config PINCTRL_SM8350
Qualcomm Technologies Inc TLMM block found on the Qualcomm
Technologies Inc SM8350 platform.
+config PINCTRL_SM8450
+ tristate "Qualcomm Technologies Inc SM8450 pin controller driver"
+ depends on GPIOLIB && OF
+ select PINCTRL_MSM
+ help
+ This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+ Qualcomm Technologies Inc TLMM block found on the Qualcomm
+ Technologies Inc SM8450 platform.
+
config PINCTRL_LPASS_LPI
tristate "Qualcomm Technologies Inc LPASS LPI pin controller driver"
select PINMUX
diff --git a/drivers/pinctrl/qcom/Makefile b/drivers/pinctrl/qcom/Makefile
index 49b509080745..709882f54d25 100644
--- a/drivers/pinctrl/qcom/Makefile
+++ b/drivers/pinctrl/qcom/Makefile
@@ -35,7 +35,9 @@ obj-$(CONFIG_PINCTRL_SDX55) += pinctrl-sdx55.o
obj-$(CONFIG_PINCTRL_SM6115) += pinctrl-sm6115.o
obj-$(CONFIG_PINCTRL_SM6125) += pinctrl-sm6125.o
obj-$(CONFIG_PINCTRL_SM6350) += pinctrl-sm6350.o
+obj-$(CONFIG_PINCTRL_SDX65) += pinctrl-sdx65.o
obj-$(CONFIG_PINCTRL_SM8150) += pinctrl-sm8150.o
obj-$(CONFIG_PINCTRL_SM8250) += pinctrl-sm8250.o
obj-$(CONFIG_PINCTRL_SM8350) += pinctrl-sm8350.o
+obj-$(CONFIG_PINCTRL_SM8450) += pinctrl-sm8450.o
obj-$(CONFIG_PINCTRL_LPASS_LPI) += pinctrl-lpass-lpi.o
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index 8476a8ac4451..780878dede9e 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -185,6 +185,7 @@ static int msm_pinmux_set_mux(struct pinctrl_dev *pctldev,
unsigned int irq = irq_find_mapping(gc->irq.domain, group);
struct irq_data *d = irq_get_irq_data(irq);
unsigned int gpio_func = pctrl->soc->gpio_func;
+ unsigned int egpio_func = pctrl->soc->egpio_func;
const struct msm_pingroup *g;
unsigned long flags;
u32 val, mask;
@@ -218,8 +219,18 @@ static int msm_pinmux_set_mux(struct pinctrl_dev *pctldev,
raw_spin_lock_irqsave(&pctrl->lock, flags);
val = msm_readl_ctl(pctrl, g);
- val &= ~mask;
- val |= i << g->mux_bit;
+
+ if (egpio_func && i == egpio_func) {
+ if (val & BIT(g->egpio_present))
+ val &= ~BIT(g->egpio_enable);
+ } else {
+ val &= ~mask;
+ val |= i << g->mux_bit;
+ /* Claim ownership of pin if egpio capable */
+ if (egpio_func && val & BIT(g->egpio_present))
+ val |= BIT(g->egpio_enable);
+ }
+
msm_writel_ctl(val, pctrl, g);
raw_spin_unlock_irqrestore(&pctrl->lock, flags);
@@ -1253,7 +1264,6 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl)
chip->label = dev_name(pctrl->dev);
chip->parent = pctrl->dev;
chip->owner = THIS_MODULE;
- chip->of_node = pctrl->dev->of_node;
if (msm_gpio_needs_valid_mask(pctrl))
chip->init_valid_mask = msm_gpio_init_valid_mask;
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.h b/drivers/pinctrl/qcom/pinctrl-msm.h
index e31a5167c91e..dd0d949f7a9e 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.h
+++ b/drivers/pinctrl/qcom/pinctrl-msm.h
@@ -77,6 +77,8 @@ struct msm_pingroup {
unsigned drv_bit:5;
unsigned od_bit:5;
+ unsigned egpio_enable:5;
+ unsigned egpio_present:5;
unsigned oe_bit:5;
unsigned in_bit:5;
unsigned out_bit:5;
@@ -119,6 +121,13 @@ struct msm_gpio_wakeirq_map {
* to be aware that their parent can't handle dual
* edge interrupts.
* @gpio_func: Which function number is GPIO (usually 0).
+ * @egpio_func: If non-zero then this SoC supports eGPIO. Even though in
+ * hardware this is a mux 1-level above the TLMM, we'll treat
+ * it as if this is just another mux state of the TLMM. Since
+ * it doesn't really map to hardware, we'll allocate a virtual
+ * function number for eGPIO and any time we see that function
+ * number used we'll treat it as a request to mux away from
+ * our TLMM towards another owner.
*/
struct msm_pinctrl_soc_data {
const struct pinctrl_pin_desc *pins;
@@ -136,6 +145,7 @@ struct msm_pinctrl_soc_data {
unsigned int nwakeirq_map;
bool wakeirq_dual_edge_errata;
unsigned int gpio_func;
+ unsigned int egpio_func;
};
extern const struct dev_pm_ops msm_pinctrl_dev_pm_ops;
diff --git a/drivers/pinctrl/qcom/pinctrl-sc7280.c b/drivers/pinctrl/qcom/pinctrl-sc7280.c
index 9017ede409c9..31df55c79cb3 100644
--- a/drivers/pinctrl/qcom/pinctrl-sc7280.c
+++ b/drivers/pinctrl/qcom/pinctrl-sc7280.c
@@ -43,6 +43,8 @@
.mux_bit = 2, \
.pull_bit = 0, \
.drv_bit = 6, \
+ .egpio_enable = 12, \
+ .egpio_present = 11, \
.oe_bit = 9, \
.in_bit = 0, \
.out_bit = 1, \
@@ -520,6 +522,7 @@ enum sc7280_functions {
msm_mux_dp_lcd,
msm_mux_edp_hot,
msm_mux_edp_lcd,
+ msm_mux_egpio,
msm_mux_gcc_gp1,
msm_mux_gcc_gp2,
msm_mux_gcc_gp3,
@@ -658,6 +661,14 @@ static const char * const gpio_groups[] = {
"gpio165", "gpio166", "gpio167", "gpio168", "gpio169", "gpio170",
"gpio171", "gpio172", "gpio173", "gpio174",
};
+static const char * const egpio_groups[] = {
+ "gpio144", "gpio145", "gpio146", "gpio147", "gpio148", "gpio149",
+ "gpio150", "gpio151", "gpio152", "gpio153", "gpio154", "gpio155",
+ "gpio156", "gpio157", "gpio158", "gpio159", "gpio160", "gpio161",
+ "gpio162", "gpio163", "gpio164", "gpio165", "gpio166", "gpio167",
+ "gpio168", "gpio169", "gpio170", "gpio171", "gpio172", "gpio173",
+ "gpio174",
+};
static const char * const atest_char_groups[] = {
"gpio81",
};
@@ -1150,6 +1161,7 @@ static const struct msm_function sc7280_functions[] = {
FUNCTION(dp_lcd),
FUNCTION(edp_hot),
FUNCTION(edp_lcd),
+ FUNCTION(egpio),
FUNCTION(gcc_gp1),
FUNCTION(gcc_gp2),
FUNCTION(gcc_gp3),
@@ -1408,37 +1420,37 @@ static const struct msm_pingroup sc7280_groups[] = {
[141] = PINGROUP(141, _, _, _, _, _, _, _, _, _),
[142] = PINGROUP(142, _, _, _, _, _, _, _, _, _),
[143] = PINGROUP(143, _, _, _, _, _, _, _, _, _),
- [144] = PINGROUP(144, _, _, _, _, _, _, _, _, _),
- [145] = PINGROUP(145, _, _, _, _, _, _, _, _, _),
- [146] = PINGROUP(146, _, _, _, _, _, _, _, _, _),
- [147] = PINGROUP(147, _, _, _, _, _, _, _, _, _),
- [148] = PINGROUP(148, _, _, _, _, _, _, _, _, _),
- [149] = PINGROUP(149, _, _, _, _, _, _, _, _, _),
- [150] = PINGROUP(150, qdss, _, _, _, _, _, _, _, _),
- [151] = PINGROUP(151, qdss, _, _, _, _, _, _, _, _),
- [152] = PINGROUP(152, qdss, _, _, _, _, _, _, _, _),
- [153] = PINGROUP(153, qdss, _, _, _, _, _, _, _, _),
- [154] = PINGROUP(154, _, _, _, _, _, _, _, _, _),
- [155] = PINGROUP(155, _, _, _, _, _, _, _, _, _),
- [156] = PINGROUP(156, qdss_cti, _, _, _, _, _, _, _, _),
- [157] = PINGROUP(157, qdss_cti, _, _, _, _, _, _, _, _),
- [158] = PINGROUP(158, _, _, _, _, _, _, _, _, _),
- [159] = PINGROUP(159, _, _, _, _, _, _, _, _, _),
- [160] = PINGROUP(160, _, _, _, _, _, _, _, _, _),
- [161] = PINGROUP(161, _, _, _, _, _, _, _, _, _),
- [162] = PINGROUP(162, _, _, _, _, _, _, _, _, _),
- [163] = PINGROUP(163, _, _, _, _, _, _, _, _, _),
- [164] = PINGROUP(164, _, _, _, _, _, _, _, _, _),
- [165] = PINGROUP(165, qdss_cti, _, _, _, _, _, _, _, _),
- [166] = PINGROUP(166, qdss_cti, _, _, _, _, _, _, _, _),
- [167] = PINGROUP(167, _, _, _, _, _, _, _, _, _),
- [168] = PINGROUP(168, _, _, _, _, _, _, _, _, _),
- [169] = PINGROUP(169, _, _, _, _, _, _, _, _, _),
- [170] = PINGROUP(170, _, _, _, _, _, _, _, _, _),
- [171] = PINGROUP(171, qdss, _, _, _, _, _, _, _, _),
- [172] = PINGROUP(172, qdss, _, _, _, _, _, _, _, _),
- [173] = PINGROUP(173, qdss, _, _, _, _, _, _, _, _),
- [174] = PINGROUP(174, qdss, _, _, _, _, _, _, _, _),
+ [144] = PINGROUP(144, _, _, _, _, _, _, _, _, egpio),
+ [145] = PINGROUP(145, _, _, _, _, _, _, _, _, egpio),
+ [146] = PINGROUP(146, _, _, _, _, _, _, _, _, egpio),
+ [147] = PINGROUP(147, _, _, _, _, _, _, _, _, egpio),
+ [148] = PINGROUP(148, _, _, _, _, _, _, _, _, egpio),
+ [149] = PINGROUP(149, _, _, _, _, _, _, _, _, egpio),
+ [150] = PINGROUP(150, qdss, _, _, _, _, _, _, _, egpio),
+ [151] = PINGROUP(151, qdss, _, _, _, _, _, _, _, egpio),
+ [152] = PINGROUP(152, qdss, _, _, _, _, _, _, _, egpio),
+ [153] = PINGROUP(153, qdss, _, _, _, _, _, _, _, egpio),
+ [154] = PINGROUP(154, _, _, _, _, _, _, _, _, egpio),
+ [155] = PINGROUP(155, _, _, _, _, _, _, _, _, egpio),
+ [156] = PINGROUP(156, qdss_cti, _, _, _, _, _, _, _, egpio),
+ [157] = PINGROUP(157, qdss_cti, _, _, _, _, _, _, _, egpio),
+ [158] = PINGROUP(158, _, _, _, _, _, _, _, _, egpio),
+ [159] = PINGROUP(159, _, _, _, _, _, _, _, _, egpio),
+ [160] = PINGROUP(160, _, _, _, _, _, _, _, _, egpio),
+ [161] = PINGROUP(161, _, _, _, _, _, _, _, _, egpio),
+ [162] = PINGROUP(162, _, _, _, _, _, _, _, _, egpio),
+ [163] = PINGROUP(163, _, _, _, _, _, _, _, _, egpio),
+ [164] = PINGROUP(164, _, _, _, _, _, _, _, _, egpio),
+ [165] = PINGROUP(165, qdss_cti, _, _, _, _, _, _, _, egpio),
+ [166] = PINGROUP(166, qdss_cti, _, _, _, _, _, _, _, egpio),
+ [167] = PINGROUP(167, _, _, _, _, _, _, _, _, egpio),
+ [168] = PINGROUP(168, _, _, _, _, _, _, _, _, egpio),
+ [169] = PINGROUP(169, _, _, _, _, _, _, _, _, egpio),
+ [170] = PINGROUP(170, _, _, _, _, _, _, _, _, egpio),
+ [171] = PINGROUP(171, qdss, _, _, _, _, _, _, _, egpio),
+ [172] = PINGROUP(172, qdss, _, _, _, _, _, _, _, egpio),
+ [173] = PINGROUP(173, qdss, _, _, _, _, _, _, _, egpio),
+ [174] = PINGROUP(174, qdss, _, _, _, _, _, _, _, egpio),
[175] = UFS_RESET(ufs_reset, 0xbe000),
[176] = SDC_QDSD_PINGROUP(sdc1_rclk, 0xb3004, 0, 6),
[177] = SDC_QDSD_PINGROUP(sdc1_clk, 0xb3000, 13, 6),
@@ -1481,6 +1493,7 @@ static const struct msm_pinctrl_soc_data sc7280_pinctrl = {
.ngpios = 176,
.wakeirq_map = sc7280_pdc_map,
.nwakeirq_map = ARRAY_SIZE(sc7280_pdc_map),
+ .egpio_func = 9,
};
static int sc7280_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/pinctrl/qcom/pinctrl-sdx65.c b/drivers/pinctrl/qcom/pinctrl-sdx65.c
new file mode 100644
index 000000000000..e793ea713965
--- /dev/null
+++ b/drivers/pinctrl/qcom/pinctrl-sdx65.c
@@ -0,0 +1,967 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-msm.h"
+
+#define FUNCTION(fname) \
+ [msm_mux_##fname] = { \
+ .name = #fname, \
+ .groups = fname##_groups, \
+ .ngroups = ARRAY_SIZE(fname##_groups), \
+ }
+
+#define REG_BASE 0x0
+#define REG_SIZE 0x1000
+#define PINGROUP(id, f1, f2, f3, f4, f5, f6, f7, f8, f9) \
+ { \
+ .name = "gpio" #id, \
+ .pins = gpio##id##_pins, \
+ .npins = (unsigned int)ARRAY_SIZE(gpio##id##_pins), \
+ .funcs = (int[]){ \
+ msm_mux_gpio, /* gpio mode */ \
+ msm_mux_##f1, \
+ msm_mux_##f2, \
+ msm_mux_##f3, \
+ msm_mux_##f4, \
+ msm_mux_##f5, \
+ msm_mux_##f6, \
+ msm_mux_##f7, \
+ msm_mux_##f8, \
+ msm_mux_##f9 \
+ }, \
+ .nfuncs = 10, \
+ .ctl_reg = REG_BASE + REG_SIZE * id, \
+ .io_reg = REG_BASE + 0x4 + REG_SIZE * id, \
+ .intr_cfg_reg = REG_BASE + 0x8 + REG_SIZE * id, \
+ .intr_status_reg = REG_BASE + 0xc + REG_SIZE * id, \
+ .intr_target_reg = REG_BASE + 0x8 + REG_SIZE * id, \
+ .mux_bit = 2, \
+ .pull_bit = 0, \
+ .drv_bit = 6, \
+ .oe_bit = 9, \
+ .in_bit = 0, \
+ .out_bit = 1, \
+ .intr_enable_bit = 0, \
+ .intr_status_bit = 0, \
+ .intr_target_bit = 5, \
+ .intr_target_kpss_val = 3, \
+ .intr_raw_status_bit = 4, \
+ .intr_polarity_bit = 1, \
+ .intr_detection_bit = 2, \
+ .intr_detection_width = 2, \
+ }
+
+#define SDC_QDSD_PINGROUP(pg_name, ctl, pull, drv) \
+ { \
+ .name = #pg_name, \
+ .pins = pg_name##_pins, \
+ .npins = (unsigned int)ARRAY_SIZE(pg_name##_pins), \
+ .ctl_reg = ctl, \
+ .io_reg = 0, \
+ .intr_cfg_reg = 0, \
+ .intr_status_reg = 0, \
+ .intr_target_reg = 0, \
+ .mux_bit = -1, \
+ .pull_bit = pull, \
+ .drv_bit = drv, \
+ .oe_bit = -1, \
+ .in_bit = -1, \
+ .out_bit = -1, \
+ .intr_enable_bit = -1, \
+ .intr_status_bit = -1, \
+ .intr_target_bit = -1, \
+ .intr_raw_status_bit = -1, \
+ .intr_polarity_bit = -1, \
+ .intr_detection_bit = -1, \
+ .intr_detection_width = -1, \
+ }
+
+#define UFS_RESET(pg_name, offset) \
+ { \
+ .name = #pg_name, \
+ .pins = pg_name##_pins, \
+ .npins = (unsigned int)ARRAY_SIZE(pg_name##_pins), \
+ .ctl_reg = offset, \
+ .io_reg = offset + 0x4, \
+ .intr_cfg_reg = 0, \
+ .intr_status_reg = 0, \
+ .intr_target_reg = 0, \
+ .mux_bit = -1, \
+ .pull_bit = 3, \
+ .drv_bit = 0, \
+ .oe_bit = -1, \
+ .in_bit = -1, \
+ .out_bit = 0, \
+ .intr_enable_bit = -1, \
+ .intr_status_bit = -1, \
+ .intr_target_bit = -1, \
+ .intr_raw_status_bit = -1, \
+ .intr_polarity_bit = -1, \
+ .intr_detection_bit = -1, \
+ .intr_detection_width = -1, \
+ }
+
+static const struct pinctrl_pin_desc sdx65_pins[] = {
+ PINCTRL_PIN(0, "GPIO_0"),
+ PINCTRL_PIN(1, "GPIO_1"),
+ PINCTRL_PIN(2, "GPIO_2"),
+ PINCTRL_PIN(3, "GPIO_3"),
+ PINCTRL_PIN(4, "GPIO_4"),
+ PINCTRL_PIN(5, "GPIO_5"),
+ PINCTRL_PIN(6, "GPIO_6"),
+ PINCTRL_PIN(7, "GPIO_7"),
+ PINCTRL_PIN(8, "GPIO_8"),
+ PINCTRL_PIN(9, "GPIO_9"),
+ PINCTRL_PIN(10, "GPIO_10"),
+ PINCTRL_PIN(11, "GPIO_11"),
+ PINCTRL_PIN(12, "GPIO_12"),
+ PINCTRL_PIN(13, "GPIO_13"),
+ PINCTRL_PIN(14, "GPIO_14"),
+ PINCTRL_PIN(15, "GPIO_15"),
+ PINCTRL_PIN(16, "GPIO_16"),
+ PINCTRL_PIN(17, "GPIO_17"),
+ PINCTRL_PIN(18, "GPIO_18"),
+ PINCTRL_PIN(19, "GPIO_19"),
+ PINCTRL_PIN(20, "GPIO_20"),
+ PINCTRL_PIN(21, "GPIO_21"),
+ PINCTRL_PIN(22, "GPIO_22"),
+ PINCTRL_PIN(23, "GPIO_23"),
+ PINCTRL_PIN(24, "GPIO_24"),
+ PINCTRL_PIN(25, "GPIO_25"),
+ PINCTRL_PIN(26, "GPIO_26"),
+ PINCTRL_PIN(27, "GPIO_27"),
+ PINCTRL_PIN(28, "GPIO_28"),
+ PINCTRL_PIN(29, "GPIO_29"),
+ PINCTRL_PIN(30, "GPIO_30"),
+ PINCTRL_PIN(31, "GPIO_31"),
+ PINCTRL_PIN(32, "GPIO_32"),
+ PINCTRL_PIN(33, "GPIO_33"),
+ PINCTRL_PIN(34, "GPIO_34"),
+ PINCTRL_PIN(35, "GPIO_35"),
+ PINCTRL_PIN(36, "GPIO_36"),
+ PINCTRL_PIN(37, "GPIO_37"),
+ PINCTRL_PIN(38, "GPIO_38"),
+ PINCTRL_PIN(39, "GPIO_39"),
+ PINCTRL_PIN(40, "GPIO_40"),
+ PINCTRL_PIN(41, "GPIO_41"),
+ PINCTRL_PIN(42, "GPIO_42"),
+ PINCTRL_PIN(43, "GPIO_43"),
+ PINCTRL_PIN(44, "GPIO_44"),
+ PINCTRL_PIN(45, "GPIO_45"),
+ PINCTRL_PIN(46, "GPIO_46"),
+ PINCTRL_PIN(47, "GPIO_47"),
+ PINCTRL_PIN(48, "GPIO_48"),
+ PINCTRL_PIN(49, "GPIO_49"),
+ PINCTRL_PIN(50, "GPIO_50"),
+ PINCTRL_PIN(51, "GPIO_51"),
+ PINCTRL_PIN(52, "GPIO_52"),
+ PINCTRL_PIN(53, "GPIO_53"),
+ PINCTRL_PIN(54, "GPIO_54"),
+ PINCTRL_PIN(55, "GPIO_55"),
+ PINCTRL_PIN(56, "GPIO_56"),
+ PINCTRL_PIN(57, "GPIO_57"),
+ PINCTRL_PIN(58, "GPIO_58"),
+ PINCTRL_PIN(59, "GPIO_59"),
+ PINCTRL_PIN(60, "GPIO_60"),
+ PINCTRL_PIN(61, "GPIO_61"),
+ PINCTRL_PIN(62, "GPIO_62"),
+ PINCTRL_PIN(63, "GPIO_63"),
+ PINCTRL_PIN(64, "GPIO_64"),
+ PINCTRL_PIN(65, "GPIO_65"),
+ PINCTRL_PIN(66, "GPIO_66"),
+ PINCTRL_PIN(67, "GPIO_67"),
+ PINCTRL_PIN(68, "GPIO_68"),
+ PINCTRL_PIN(69, "GPIO_69"),
+ PINCTRL_PIN(70, "GPIO_70"),
+ PINCTRL_PIN(71, "GPIO_71"),
+ PINCTRL_PIN(72, "GPIO_72"),
+ PINCTRL_PIN(73, "GPIO_73"),
+ PINCTRL_PIN(74, "GPIO_74"),
+ PINCTRL_PIN(75, "GPIO_75"),
+ PINCTRL_PIN(76, "GPIO_76"),
+ PINCTRL_PIN(77, "GPIO_77"),
+ PINCTRL_PIN(78, "GPIO_78"),
+ PINCTRL_PIN(79, "GPIO_79"),
+ PINCTRL_PIN(80, "GPIO_80"),
+ PINCTRL_PIN(81, "GPIO_81"),
+ PINCTRL_PIN(82, "GPIO_82"),
+ PINCTRL_PIN(83, "GPIO_83"),
+ PINCTRL_PIN(84, "GPIO_84"),
+ PINCTRL_PIN(85, "GPIO_85"),
+ PINCTRL_PIN(86, "GPIO_86"),
+ PINCTRL_PIN(87, "GPIO_87"),
+ PINCTRL_PIN(88, "GPIO_88"),
+ PINCTRL_PIN(89, "GPIO_89"),
+ PINCTRL_PIN(90, "GPIO_90"),
+ PINCTRL_PIN(91, "GPIO_91"),
+ PINCTRL_PIN(92, "GPIO_92"),
+ PINCTRL_PIN(93, "GPIO_93"),
+ PINCTRL_PIN(94, "GPIO_94"),
+ PINCTRL_PIN(95, "GPIO_95"),
+ PINCTRL_PIN(96, "GPIO_96"),
+ PINCTRL_PIN(97, "GPIO_97"),
+ PINCTRL_PIN(98, "GPIO_98"),
+ PINCTRL_PIN(99, "GPIO_99"),
+ PINCTRL_PIN(100, "GPIO_100"),
+ PINCTRL_PIN(101, "GPIO_101"),
+ PINCTRL_PIN(102, "GPIO_102"),
+ PINCTRL_PIN(103, "GPIO_103"),
+ PINCTRL_PIN(104, "GPIO_104"),
+ PINCTRL_PIN(105, "GPIO_105"),
+ PINCTRL_PIN(106, "GPIO_106"),
+ PINCTRL_PIN(107, "GPIO_107"),
+ PINCTRL_PIN(108, "UFS_RESET"),
+ PINCTRL_PIN(109, "SDC1_RCLK"),
+ PINCTRL_PIN(110, "SDC1_CLK"),
+ PINCTRL_PIN(111, "SDC1_CMD"),
+ PINCTRL_PIN(112, "SDC1_DATA"),
+};
+
+#define DECLARE_MSM_GPIO_PINS(pin) \
+ static const unsigned int gpio##pin##_pins[] = { pin }
+DECLARE_MSM_GPIO_PINS(0);
+DECLARE_MSM_GPIO_PINS(1);
+DECLARE_MSM_GPIO_PINS(2);
+DECLARE_MSM_GPIO_PINS(3);
+DECLARE_MSM_GPIO_PINS(4);
+DECLARE_MSM_GPIO_PINS(5);
+DECLARE_MSM_GPIO_PINS(6);
+DECLARE_MSM_GPIO_PINS(7);
+DECLARE_MSM_GPIO_PINS(8);
+DECLARE_MSM_GPIO_PINS(9);
+DECLARE_MSM_GPIO_PINS(10);
+DECLARE_MSM_GPIO_PINS(11);
+DECLARE_MSM_GPIO_PINS(12);
+DECLARE_MSM_GPIO_PINS(13);
+DECLARE_MSM_GPIO_PINS(14);
+DECLARE_MSM_GPIO_PINS(15);
+DECLARE_MSM_GPIO_PINS(16);
+DECLARE_MSM_GPIO_PINS(17);
+DECLARE_MSM_GPIO_PINS(18);
+DECLARE_MSM_GPIO_PINS(19);
+DECLARE_MSM_GPIO_PINS(20);
+DECLARE_MSM_GPIO_PINS(21);
+DECLARE_MSM_GPIO_PINS(22);
+DECLARE_MSM_GPIO_PINS(23);
+DECLARE_MSM_GPIO_PINS(24);
+DECLARE_MSM_GPIO_PINS(25);
+DECLARE_MSM_GPIO_PINS(26);
+DECLARE_MSM_GPIO_PINS(27);
+DECLARE_MSM_GPIO_PINS(28);
+DECLARE_MSM_GPIO_PINS(29);
+DECLARE_MSM_GPIO_PINS(30);
+DECLARE_MSM_GPIO_PINS(31);
+DECLARE_MSM_GPIO_PINS(32);
+DECLARE_MSM_GPIO_PINS(33);
+DECLARE_MSM_GPIO_PINS(34);
+DECLARE_MSM_GPIO_PINS(35);
+DECLARE_MSM_GPIO_PINS(36);
+DECLARE_MSM_GPIO_PINS(37);
+DECLARE_MSM_GPIO_PINS(38);
+DECLARE_MSM_GPIO_PINS(39);
+DECLARE_MSM_GPIO_PINS(40);
+DECLARE_MSM_GPIO_PINS(41);
+DECLARE_MSM_GPIO_PINS(42);
+DECLARE_MSM_GPIO_PINS(43);
+DECLARE_MSM_GPIO_PINS(44);
+DECLARE_MSM_GPIO_PINS(45);
+DECLARE_MSM_GPIO_PINS(46);
+DECLARE_MSM_GPIO_PINS(47);
+DECLARE_MSM_GPIO_PINS(48);
+DECLARE_MSM_GPIO_PINS(49);
+DECLARE_MSM_GPIO_PINS(50);
+DECLARE_MSM_GPIO_PINS(51);
+DECLARE_MSM_GPIO_PINS(52);
+DECLARE_MSM_GPIO_PINS(53);
+DECLARE_MSM_GPIO_PINS(54);
+DECLARE_MSM_GPIO_PINS(55);
+DECLARE_MSM_GPIO_PINS(56);
+DECLARE_MSM_GPIO_PINS(57);
+DECLARE_MSM_GPIO_PINS(58);
+DECLARE_MSM_GPIO_PINS(59);
+DECLARE_MSM_GPIO_PINS(60);
+DECLARE_MSM_GPIO_PINS(61);
+DECLARE_MSM_GPIO_PINS(62);
+DECLARE_MSM_GPIO_PINS(63);
+DECLARE_MSM_GPIO_PINS(64);
+DECLARE_MSM_GPIO_PINS(65);
+DECLARE_MSM_GPIO_PINS(66);
+DECLARE_MSM_GPIO_PINS(67);
+DECLARE_MSM_GPIO_PINS(68);
+DECLARE_MSM_GPIO_PINS(69);
+DECLARE_MSM_GPIO_PINS(70);
+DECLARE_MSM_GPIO_PINS(71);
+DECLARE_MSM_GPIO_PINS(72);
+DECLARE_MSM_GPIO_PINS(73);
+DECLARE_MSM_GPIO_PINS(74);
+DECLARE_MSM_GPIO_PINS(75);
+DECLARE_MSM_GPIO_PINS(76);
+DECLARE_MSM_GPIO_PINS(77);
+DECLARE_MSM_GPIO_PINS(78);
+DECLARE_MSM_GPIO_PINS(79);
+DECLARE_MSM_GPIO_PINS(80);
+DECLARE_MSM_GPIO_PINS(81);
+DECLARE_MSM_GPIO_PINS(82);
+DECLARE_MSM_GPIO_PINS(83);
+DECLARE_MSM_GPIO_PINS(84);
+DECLARE_MSM_GPIO_PINS(85);
+DECLARE_MSM_GPIO_PINS(86);
+DECLARE_MSM_GPIO_PINS(87);
+DECLARE_MSM_GPIO_PINS(88);
+DECLARE_MSM_GPIO_PINS(89);
+DECLARE_MSM_GPIO_PINS(90);
+DECLARE_MSM_GPIO_PINS(91);
+DECLARE_MSM_GPIO_PINS(92);
+DECLARE_MSM_GPIO_PINS(93);
+DECLARE_MSM_GPIO_PINS(94);
+DECLARE_MSM_GPIO_PINS(95);
+DECLARE_MSM_GPIO_PINS(96);
+DECLARE_MSM_GPIO_PINS(97);
+DECLARE_MSM_GPIO_PINS(98);
+DECLARE_MSM_GPIO_PINS(99);
+DECLARE_MSM_GPIO_PINS(100);
+DECLARE_MSM_GPIO_PINS(101);
+DECLARE_MSM_GPIO_PINS(102);
+DECLARE_MSM_GPIO_PINS(103);
+DECLARE_MSM_GPIO_PINS(104);
+DECLARE_MSM_GPIO_PINS(105);
+DECLARE_MSM_GPIO_PINS(106);
+DECLARE_MSM_GPIO_PINS(107);
+
+static const unsigned int ufs_reset_pins[] = { 108 };
+static const unsigned int sdc1_rclk_pins[] = { 109 };
+static const unsigned int sdc1_clk_pins[] = { 110 };
+static const unsigned int sdc1_cmd_pins[] = { 111 };
+static const unsigned int sdc1_data_pins[] = { 112 };
+
+enum sdx65_functions {
+ msm_mux_qlink0_wmss,
+ msm_mux_adsp_ext,
+ msm_mux_atest_char,
+ msm_mux_atest_char0,
+ msm_mux_atest_char1,
+ msm_mux_atest_char2,
+ msm_mux_atest_char3,
+ msm_mux_audio_ref,
+ msm_mux_bimc_dte0,
+ msm_mux_bimc_dte1,
+ msm_mux_blsp_i2c1,
+ msm_mux_blsp_i2c2,
+ msm_mux_blsp_i2c3,
+ msm_mux_blsp_i2c4,
+ msm_mux_blsp_spi1,
+ msm_mux_blsp_spi2,
+ msm_mux_blsp_spi3,
+ msm_mux_blsp_spi4,
+ msm_mux_blsp_uart1,
+ msm_mux_blsp_uart2,
+ msm_mux_blsp_uart3,
+ msm_mux_blsp_uart4,
+ msm_mux_char_exec,
+ msm_mux_coex_uart,
+ msm_mux_coex_uart2,
+ msm_mux_cri_trng,
+ msm_mux_cri_trng0,
+ msm_mux_cri_trng1,
+ msm_mux_dbg_out,
+ msm_mux_ddr_bist,
+ msm_mux_ddr_pxi0,
+ msm_mux_ebi0_wrcdc,
+ msm_mux_ebi2_a,
+ msm_mux_ebi2_lcd,
+ msm_mux_ext_dbg,
+ msm_mux_gcc_gp1,
+ msm_mux_gcc_gp2,
+ msm_mux_gcc_gp3,
+ msm_mux_gcc_plltest,
+ msm_mux_gpio,
+ msm_mux_i2s_mclk,
+ msm_mux_jitter_bist,
+ msm_mux_ldo_en,
+ msm_mux_ldo_update,
+ msm_mux_m_voc,
+ msm_mux_mgpi_clk,
+ msm_mux_native_char,
+ msm_mux_native_tsens,
+ msm_mux_native_tsense,
+ msm_mux_nav_gpio,
+ msm_mux_pa_indicator,
+ msm_mux_pci_e,
+ msm_mux_pcie_clkreq,
+ msm_mux_pll_bist,
+ msm_mux_pll_ref,
+ msm_mux_pri_mi2s,
+ msm_mux_pri_mi2s_ws,
+ msm_mux_prng_rosc,
+ msm_mux_qdss_cti,
+ msm_mux_qdss_gpio,
+ msm_mux_qlink0_en,
+ msm_mux_qlink0_req,
+ msm_mux_qlink1_en,
+ msm_mux_qlink1_req,
+ msm_mux_qlink1_wmss,
+ msm_mux_qlink2_en,
+ msm_mux_qlink2_req,
+ msm_mux_qlink2_wmss,
+ msm_mux_sdc1_tb,
+ msm_mux_sec_mi2s,
+ msm_mux_spmi_coex,
+ msm_mux_spmi_vgi,
+ msm_mux_tgu_ch0,
+ msm_mux_uim1_clk,
+ msm_mux_uim1_data,
+ msm_mux_uim1_present,
+ msm_mux_uim1_reset,
+ msm_mux_uim2_clk,
+ msm_mux_uim2_data,
+ msm_mux_uim2_present,
+ msm_mux_uim2_reset,
+ msm_mux_usb2phy_ac,
+ msm_mux_vsense_trigger,
+ msm_mux__,
+};
+
+static const char * const gpio_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7",
+ "gpio8", "gpio9", "gpio10", "gpio11", "gpio12", "gpio13", "gpio14",
+ "gpio15", "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21",
+ "gpio22", "gpio23", "gpio24", "gpio25", "gpio26", "gpio27", "gpio28",
+ "gpio29", "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35",
+ "gpio36", "gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42",
+ "gpio43", "gpio44", "gpio45", "gpio46", "gpio47", "gpio48", "gpio49",
+ "gpio50", "gpio51", "gpio52", "gpio53", "gpio54", "gpio55", "gpio56",
+ "gpio57", "gpio58", "gpio59", "gpio60", "gpio61", "gpio62", "gpio63",
+ "gpio64", "gpio65", "gpio66", "gpio67", "gpio68", "gpio69", "gpio70",
+ "gpio71", "gpio72", "gpio73", "gpio74", "gpio75", "gpio76", "gpio77",
+ "gpio78", "gpio79", "gpio80", "gpio81", "gpio82", "gpio83", "gpio84",
+ "gpio85", "gpio86", "gpio87", "gpio88", "gpio89", "gpio90", "gpio91",
+ "gpio92", "gpio93", "gpio94", "gpio95", "gpio96", "gpio97", "gpio98",
+ "gpio99", "gpio100", "gpio101", "gpio102", "gpio103", "gpio104",
+ "gpio105", "gpio106", "gpio107",
+};
+static const char * const uim2_data_groups[] = {
+ "gpio0",
+};
+static const char * const blsp_uart1_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3", "gpio48", "gpio49", "gpio80",
+ "gpio81",
+};
+static const char * const ebi0_wrcdc_groups[] = {
+ "gpio0", "gpio2",
+};
+static const char * const uim2_present_groups[] = {
+ "gpio1",
+};
+static const char * const uim2_reset_groups[] = {
+ "gpio2",
+};
+static const char * const blsp_i2c1_groups[] = {
+ "gpio2", "gpio3", "gpio82", "gpio83",
+};
+static const char * const uim2_clk_groups[] = {
+ "gpio3",
+};
+static const char * const blsp_spi2_groups[] = {
+ "gpio4", "gpio5", "gpio6", "gpio7", "gpio23", "gpio47", "gpio62",
+};
+static const char * const blsp_uart2_groups[] = {
+ "gpio4", "gpio5", "gpio6", "gpio7", "gpio63", "gpio64", "gpio65",
+ "gpio66",
+};
+static const char * const blsp_i2c2_groups[] = {
+ "gpio6", "gpio7", "gpio65", "gpio66",
+};
+static const char * const char_exec_groups[] = {
+ "gpio6", "gpio7",
+};
+static const char * const qdss_gpio_groups[] = {
+ "gpio4", "gpio5", "gpio6", "gpio7", "gpio12", "gpio13",
+ "gpio14", "gpio15", "gpio16", "gpio17", "gpio18", "gpio19",
+ "gpio33", "gpio42", "gpio63", "gpio64", "gpio65", "gpio66",
+};
+static const char * const blsp_spi3_groups[] = {
+ "gpio8", "gpio9", "gpio10", "gpio11", "gpio23", "gpio47", "gpio62",
+};
+static const char * const blsp_uart3_groups[] = {
+ "gpio8", "gpio9", "gpio10", "gpio11",
+};
+static const char * const ext_dbg_groups[] = {
+ "gpio8", "gpio9", "gpio10", "gpio11",
+};
+static const char * const ldo_en_groups[] = {
+ "gpio8",
+};
+static const char * const blsp_i2c3_groups[] = {
+ "gpio10", "gpio11",
+};
+static const char * const gcc_gp3_groups[] = {
+ "gpio11",
+};
+static const char * const pri_mi2s_ws_groups[] = {
+ "gpio12",
+};
+static const char * const pri_mi2s_groups[] = {
+ "gpio13", "gpio14", "gpio15",
+};
+static const char * const vsense_trigger_groups[] = {
+ "gpio13",
+};
+static const char * const native_tsens_groups[] = {
+ "gpio14",
+};
+static const char * const bimc_dte0_groups[] = {
+ "gpio14", "gpio59",
+};
+static const char * const bimc_dte1_groups[] = {
+ "gpio15", "gpio61",
+};
+static const char * const sec_mi2s_groups[] = {
+ "gpio16", "gpio17", "gpio18", "gpio19",
+};
+static const char * const blsp_spi4_groups[] = {
+ "gpio16", "gpio17", "gpio18", "gpio19", "gpio23", "gpio47", "gpio62",
+};
+static const char * const blsp_uart4_groups[] = {
+ "gpio16", "gpio17", "gpio18", "gpio19", "gpio22", "gpio23", "gpio48",
+ "gpio49",
+};
+static const char * const qdss_cti_groups[] = {
+ "gpio16", "gpio16", "gpio17", "gpio17", "gpio54", "gpio54", "gpio55",
+ "gpio55", "gpio59", "gpio60", "gpio65", "gpio65", "gpio66", "gpio66",
+ "gpio94", "gpio94", "gpio95", "gpio95",
+};
+static const char * const blsp_i2c4_groups[] = {
+ "gpio18", "gpio19", "gpio84", "gpio85",
+};
+static const char * const gcc_gp1_groups[] = {
+ "gpio18",
+};
+static const char * const jitter_bist_groups[] = {
+ "gpio19",
+};
+static const char * const gcc_gp2_groups[] = {
+ "gpio19",
+};
+static const char * const pll_bist_groups[] = {
+ "gpio22",
+};
+static const char * const blsp_spi1_groups[] = {
+ "gpio23", "gpio47", "gpio62", "gpio80", "gpio81", "gpio82", "gpio83",
+};
+static const char * const adsp_ext_groups[] = {
+ "gpio24", "gpio25",
+};
+static const char * const qlink0_wmss_groups[] = {
+ "gpio28",
+};
+static const char * const native_tsense_groups[] = {
+ "gpio29", "gpio72",
+};
+static const char * const nav_gpio_groups[] = {
+ "gpio31", "gpio32",
+};
+static const char * const pll_ref_groups[] = {
+ "gpio32",
+};
+static const char * const pa_indicator_groups[] = {
+ "gpio33",
+};
+static const char * const qlink0_en_groups[] = {
+ "gpio34",
+};
+static const char * const qlink0_req_groups[] = {
+ "gpio35",
+};
+static const char * const dbg_out_groups[] = {
+ "gpio35",
+};
+static const char * const cri_trng_groups[] = {
+ "gpio36",
+};
+static const char * const prng_rosc_groups[] = {
+ "gpio38",
+};
+static const char * const cri_trng0_groups[] = {
+ "gpio40",
+};
+static const char * const cri_trng1_groups[] = {
+ "gpio41",
+};
+static const char * const coex_uart_groups[] = {
+ "gpio44", "gpio45",
+};
+static const char * const ddr_pxi0_groups[] = {
+ "gpio45", "gpio46",
+};
+static const char * const m_voc_groups[] = {
+ "gpio46", "gpio48", "gpio49", "gpio59", "gpio60",
+};
+static const char * const ddr_bist_groups[] = {
+ "gpio46", "gpio47", "gpio48", "gpio49",
+};
+static const char * const pci_e_groups[] = {
+ "gpio53",
+};
+static const char * const tgu_ch0_groups[] = {
+ "gpio55",
+};
+static const char * const pcie_clkreq_groups[] = {
+ "gpio56",
+};
+static const char * const native_char_groups[] = {
+ "gpio26", "gpio29", "gpio33", "gpio42", "gpio57",
+};
+static const char * const mgpi_clk_groups[] = {
+ "gpio61", "gpio71",
+};
+static const char * const qlink2_wmss_groups[] = {
+ "gpio61",
+};
+static const char * const i2s_mclk_groups[] = {
+ "gpio62",
+};
+static const char * const audio_ref_groups[] = {
+ "gpio62",
+};
+static const char * const ldo_update_groups[] = {
+ "gpio62",
+};
+static const char * const atest_char_groups[] = {
+ "gpio63",
+};
+static const char * const atest_char3_groups[] = {
+ "gpio64",
+};
+static const char * const atest_char2_groups[] = {
+ "gpio65",
+};
+static const char * const atest_char1_groups[] = {
+ "gpio66",
+};
+static const char * const uim1_data_groups[] = {
+ "gpio67",
+};
+static const char * const atest_char0_groups[] = {
+ "gpio67",
+};
+static const char * const uim1_present_groups[] = {
+ "gpio68",
+};
+static const char * const uim1_reset_groups[] = {
+ "gpio69",
+};
+static const char * const uim1_clk_groups[] = {
+ "gpio70",
+};
+static const char * const qlink2_en_groups[] = {
+ "gpio71",
+};
+static const char * const qlink1_en_groups[] = {
+ "gpio72",
+};
+static const char * const qlink1_req_groups[] = {
+ "gpio73",
+};
+static const char * const qlink1_wmss_groups[] = {
+ "gpio74",
+};
+static const char * const coex_uart2_groups[] = {
+ "gpio75", "gpio76", "gpio102", "gpio103",
+};
+static const char * const spmi_coex_groups[] = {
+ "gpio75", "gpio76",
+};
+static const char * const qlink2_req_groups[] = {
+ "gpio77",
+};
+static const char * const spmi_vgi_groups[] = {
+ "gpio78", "gpio79",
+};
+static const char * const gcc_plltest_groups[] = {
+ "gpio81", "gpio82",
+};
+static const char * const ebi2_lcd_groups[] = {
+ "gpio84", "gpio85", "gpio90",
+};
+static const char * const ebi2_a_groups[] = {
+ "gpio89",
+};
+static const char * const usb2phy_ac_groups[] = {
+ "gpio93",
+};
+static const char * const sdc1_tb_groups[] = {
+ "gpio106",
+};
+
+static const struct msm_function sdx65_functions[] = {
+ FUNCTION(qlink0_wmss),
+ FUNCTION(adsp_ext),
+ FUNCTION(atest_char),
+ FUNCTION(atest_char0),
+ FUNCTION(atest_char1),
+ FUNCTION(atest_char2),
+ FUNCTION(atest_char3),
+ FUNCTION(audio_ref),
+ FUNCTION(bimc_dte0),
+ FUNCTION(bimc_dte1),
+ FUNCTION(blsp_i2c1),
+ FUNCTION(blsp_i2c2),
+ FUNCTION(blsp_i2c3),
+ FUNCTION(blsp_i2c4),
+ FUNCTION(blsp_spi1),
+ FUNCTION(blsp_spi2),
+ FUNCTION(blsp_spi3),
+ FUNCTION(blsp_spi4),
+ FUNCTION(blsp_uart1),
+ FUNCTION(blsp_uart2),
+ FUNCTION(blsp_uart3),
+ FUNCTION(blsp_uart4),
+ FUNCTION(char_exec),
+ FUNCTION(coex_uart),
+ FUNCTION(coex_uart2),
+ FUNCTION(cri_trng),
+ FUNCTION(cri_trng0),
+ FUNCTION(cri_trng1),
+ FUNCTION(dbg_out),
+ FUNCTION(ddr_bist),
+ FUNCTION(ddr_pxi0),
+ FUNCTION(ebi0_wrcdc),
+ FUNCTION(ebi2_a),
+ FUNCTION(ebi2_lcd),
+ FUNCTION(ext_dbg),
+ FUNCTION(gcc_gp1),
+ FUNCTION(gcc_gp2),
+ FUNCTION(gcc_gp3),
+ FUNCTION(gcc_plltest),
+ FUNCTION(gpio),
+ FUNCTION(i2s_mclk),
+ FUNCTION(jitter_bist),
+ FUNCTION(ldo_en),
+ FUNCTION(ldo_update),
+ FUNCTION(m_voc),
+ FUNCTION(mgpi_clk),
+ FUNCTION(native_char),
+ FUNCTION(native_tsens),
+ FUNCTION(native_tsense),
+ FUNCTION(nav_gpio),
+ FUNCTION(pa_indicator),
+ FUNCTION(pci_e),
+ FUNCTION(pcie_clkreq),
+ FUNCTION(pll_bist),
+ FUNCTION(pll_ref),
+ FUNCTION(pri_mi2s),
+ FUNCTION(pri_mi2s_ws),
+ FUNCTION(prng_rosc),
+ FUNCTION(qdss_cti),
+ FUNCTION(qdss_gpio),
+ FUNCTION(qlink0_en),
+ FUNCTION(qlink0_req),
+ FUNCTION(qlink1_en),
+ FUNCTION(qlink1_req),
+ FUNCTION(qlink1_wmss),
+ FUNCTION(qlink2_en),
+ FUNCTION(qlink2_req),
+ FUNCTION(qlink2_wmss),
+ FUNCTION(sdc1_tb),
+ FUNCTION(sec_mi2s),
+ FUNCTION(spmi_coex),
+ FUNCTION(spmi_vgi),
+ FUNCTION(tgu_ch0),
+ FUNCTION(uim1_clk),
+ FUNCTION(uim1_data),
+ FUNCTION(uim1_present),
+ FUNCTION(uim1_reset),
+ FUNCTION(uim2_clk),
+ FUNCTION(uim2_data),
+ FUNCTION(uim2_present),
+ FUNCTION(uim2_reset),
+ FUNCTION(usb2phy_ac),
+ FUNCTION(vsense_trigger),
+};
+
+/* Every pin is maintained as a single group, and missing or non-existing pin
+ * would be maintained as dummy group to synchronize pin group index with
+ * pin descriptor registered with pinctrl core.
+ * Clients would not be able to request these dummy pin groups.
+ */
+static const struct msm_pingroup sdx65_groups[] = {
+ [0] = PINGROUP(0, uim2_data, blsp_uart1, ebi0_wrcdc, _, _, _, _, _, _),
+ [1] = PINGROUP(1, uim2_present, blsp_uart1, _, _, _, _, _, _, _),
+ [2] = PINGROUP(2, uim2_reset, blsp_uart1, blsp_i2c1, ebi0_wrcdc, _, _, _, _, _),
+ [3] = PINGROUP(3, uim2_clk, blsp_uart1, blsp_i2c1, _, _, _, _, _, _),
+ [4] = PINGROUP(4, blsp_spi2, blsp_uart2, _, qdss_gpio, _, _, _, _, _),
+ [5] = PINGROUP(5, blsp_spi2, blsp_uart2, _, qdss_gpio, _, _, _, _, _),
+ [6] = PINGROUP(6, blsp_spi2, blsp_uart2, blsp_i2c2, char_exec, _, qdss_gpio, _, _, _),
+ [7] = PINGROUP(7, blsp_spi2, blsp_uart2, blsp_i2c2, char_exec, _, qdss_gpio, _, _, _),
+ [8] = PINGROUP(8, blsp_spi3, blsp_uart3, ext_dbg, ldo_en, _, _, _, _, _),
+ [9] = PINGROUP(9, blsp_spi3, blsp_uart3, ext_dbg, _, _, _, _, _, _),
+ [10] = PINGROUP(10, blsp_spi3, blsp_uart3, blsp_i2c3, ext_dbg, _, _, _, _, _),
+ [11] = PINGROUP(11, blsp_spi3, blsp_uart3, blsp_i2c3, ext_dbg, gcc_gp3, _, _, _, _),
+ [12] = PINGROUP(12, pri_mi2s_ws, _, qdss_gpio, _, _, _, _, _, _),
+ [13] = PINGROUP(13, pri_mi2s, _, qdss_gpio, vsense_trigger, _, _, _, _, _),
+ [14] = PINGROUP(14, pri_mi2s, _, _, qdss_gpio, native_tsens, bimc_dte0, _, _, _),
+ [15] = PINGROUP(15, pri_mi2s, _, _, qdss_gpio, bimc_dte1, _, _, _, _),
+ [16] = PINGROUP(16, sec_mi2s, blsp_spi4, blsp_uart4, qdss_cti, qdss_cti, _, _, qdss_gpio, _),
+ [17] = PINGROUP(17, sec_mi2s, blsp_spi4, blsp_uart4, qdss_cti, qdss_cti, _, qdss_gpio, _, _),
+ [18] = PINGROUP(18, sec_mi2s, blsp_spi4, blsp_uart4, blsp_i2c4, gcc_gp1, qdss_gpio, _, _, _),
+ [19] = PINGROUP(19, sec_mi2s, blsp_spi4, blsp_uart4, blsp_i2c4, jitter_bist, gcc_gp2, _, qdss_gpio, _),
+ [20] = PINGROUP(20, _, _, _, _, _, _, _, _, _),
+ [21] = PINGROUP(21, _, _, _, _, _, _, _, _, _),
+ [22] = PINGROUP(22, blsp_uart4, pll_bist, _, _, _, _, _, _, _),
+ [23] = PINGROUP(23, blsp_uart4, blsp_spi2, blsp_spi1, blsp_spi3, blsp_spi4, _, _, _, _),
+ [24] = PINGROUP(24, adsp_ext, _, _, _, _, _, _, _, _),
+ [25] = PINGROUP(25, adsp_ext, _, _, _, _, _, _, _, _),
+ [26] = PINGROUP(26, _, _, _, native_char, _, _, _, _, _),
+ [27] = PINGROUP(27, _, _, _, _, _, _, _, _, _),
+ [28] = PINGROUP(28, qlink0_wmss, _, _, _, _, _, _, _, _),
+ [29] = PINGROUP(29, _, _, _, native_tsense, native_char, _, _, _, _),
+ [30] = PINGROUP(30, _, _, _, _, _, _, _, _, _),
+ [31] = PINGROUP(31, nav_gpio, _, _, _, _, _, _, _, _),
+ [32] = PINGROUP(32, nav_gpio, pll_ref, _, _, _, _, _, _, _),
+ [33] = PINGROUP(33, _, pa_indicator, qdss_gpio, native_char, _, _, _, _, _),
+ [34] = PINGROUP(34, qlink0_en, _, _, _, _, _, _, _, _),
+ [35] = PINGROUP(35, qlink0_req, dbg_out, _, _, _, _, _, _, _),
+ [36] = PINGROUP(36, _, _, cri_trng, _, _, _, _, _, _),
+ [37] = PINGROUP(37, _, _, _, _, _, _, _, _, _),
+ [38] = PINGROUP(38, _, _, prng_rosc, _, _, _, _, _, _),
+ [39] = PINGROUP(39, _, _, _, _, _, _, _, _, _),
+ [40] = PINGROUP(40, _, _, cri_trng0, _, _, _, _, _, _),
+ [41] = PINGROUP(41, _, _, cri_trng1, _, _, _, _, _, _),
+ [42] = PINGROUP(42, _, qdss_gpio, native_char, _, _, _, _, _, _),
+ [43] = PINGROUP(43, _, _, _, _, _, _, _, _, _),
+ [44] = PINGROUP(44, coex_uart, _, _, _, _, _, _, _, _),
+ [45] = PINGROUP(45, coex_uart, ddr_pxi0, _, _, _, _, _, _, _),
+ [46] = PINGROUP(46, m_voc, ddr_bist, ddr_pxi0, _, _, _, _, _, _),
+ [47] = PINGROUP(47, ddr_bist, blsp_spi1, blsp_spi2, blsp_spi3, blsp_spi4, _, _, _, _),
+ [48] = PINGROUP(48, m_voc, blsp_uart1, blsp_uart4, ddr_bist, _, _, _, _, _),
+ [49] = PINGROUP(49, m_voc, blsp_uart1, blsp_uart4, ddr_bist, _, _, _, _, _),
+ [50] = PINGROUP(50, _, _, _, _, _, _, _, _, _),
+ [51] = PINGROUP(51, _, _, _, _, _, _, _, _, _),
+ [52] = PINGROUP(52, _, _, _, _, _, _, _, _, _),
+ [53] = PINGROUP(53, pci_e, _, _, _, _, _, _, _, _),
+ [54] = PINGROUP(54, qdss_cti, qdss_cti, _, _, _, _, _, _, _),
+ [55] = PINGROUP(55, qdss_cti, qdss_cti, tgu_ch0, _, _, _, _, _, _),
+ [56] = PINGROUP(56, pcie_clkreq, _, _, _, _, _, _, _, _),
+ [57] = PINGROUP(57, _, native_char, _, _, _, _, _, _, _),
+ [58] = PINGROUP(58, _, _, _, _, _, _, _, _, _),
+ [59] = PINGROUP(59, qdss_cti, m_voc, bimc_dte0, _, _, _, _, _, _),
+ [60] = PINGROUP(60, qdss_cti, _, m_voc, _, _, _, _, _, _),
+ [61] = PINGROUP(61, mgpi_clk, qlink2_wmss, bimc_dte1, _, _, _, _, _, _),
+ [62] = PINGROUP(62, i2s_mclk, audio_ref, blsp_spi1, blsp_spi2, blsp_spi3, blsp_spi4, ldo_update, _, _),
+ [63] = PINGROUP(63, blsp_uart2, _, qdss_gpio, atest_char, _, _, _, _, _),
+ [64] = PINGROUP(64, blsp_uart2, qdss_gpio, atest_char3, _, _, _, _, _, _),
+ [65] = PINGROUP(65, blsp_uart2, blsp_i2c2, qdss_cti, qdss_cti, _, qdss_gpio, atest_char2, _, _),
+ [66] = PINGROUP(66, blsp_uart2, blsp_i2c2, qdss_cti, qdss_cti, qdss_gpio, atest_char1, _, _, _),
+ [67] = PINGROUP(67, uim1_data, atest_char0, _, _, _, _, _, _, _),
+ [68] = PINGROUP(68, uim1_present, _, _, _, _, _, _, _, _),
+ [69] = PINGROUP(69, uim1_reset, _, _, _, _, _, _, _, _),
+ [70] = PINGROUP(70, uim1_clk, _, _, _, _, _, _, _, _),
+ [71] = PINGROUP(71, mgpi_clk, qlink2_en, _, _, _, _, _, _, _),
+ [72] = PINGROUP(72, qlink1_en, _, native_tsense, _, _, _, _, _, _),
+ [73] = PINGROUP(73, qlink1_req, _, _, _, _, _, _, _, _),
+ [74] = PINGROUP(74, qlink1_wmss, _, _, _, _, _, _, _, _),
+ [75] = PINGROUP(75, coex_uart2, spmi_coex, _, _, _, _, _, _, _),
+ [76] = PINGROUP(76, coex_uart2, spmi_coex, _, _, _, _, _, _, _),
+ [77] = PINGROUP(77, _, qlink2_req, _, _, _, _, _, _, _),
+ [78] = PINGROUP(78, spmi_vgi, _, _, _, _, _, _, _, _),
+ [79] = PINGROUP(79, spmi_vgi, _, _, _, _, _, _, _, _),
+ [80] = PINGROUP(80, _, blsp_spi1, _, blsp_uart1, _, _, _, _, _),
+ [81] = PINGROUP(81, _, blsp_spi1, _, blsp_uart1, gcc_plltest, _, _, _, _),
+ [82] = PINGROUP(82, _, blsp_spi1, _, blsp_i2c1, gcc_plltest, _, _, _, _),
+ [83] = PINGROUP(83, _, blsp_spi1, _, blsp_i2c1, _, _, _, _, _),
+ [84] = PINGROUP(84, _, ebi2_lcd, _, blsp_i2c4, _, _, _, _, _),
+ [85] = PINGROUP(85, _, ebi2_lcd, _, blsp_i2c4, _, _, _, _, _),
+ [86] = PINGROUP(86, _, _, _, _, _, _, _, _, _),
+ [87] = PINGROUP(87, _, _, _, _, _, _, _, _, _),
+ [88] = PINGROUP(88, _, _, _, _, _, _, _, _, _),
+ [89] = PINGROUP(89, _, _, _, _, ebi2_a, _, _, _, _),
+ [90] = PINGROUP(90, _, _, _, _, ebi2_lcd, _, _, _, _),
+ [91] = PINGROUP(91, _, _, _, _, _, _, _, _, _),
+ [92] = PINGROUP(92, _, _, _, _, _, _, _, _, _),
+ [93] = PINGROUP(93, _, _, usb2phy_ac, _, _, _, _, _, _),
+ [94] = PINGROUP(94, qdss_cti, qdss_cti, _, _, _, _, _, _, _),
+ [95] = PINGROUP(95, qdss_cti, qdss_cti, _, _, _, _, _, _, _),
+ [96] = PINGROUP(96, _, _, _, _, _, _, _, _, _),
+ [97] = PINGROUP(97, _, _, _, _, _, _, _, _, _),
+ [98] = PINGROUP(98, _, _, _, _, _, _, _, _, _),
+ [99] = PINGROUP(99, _, _, _, _, _, _, _, _, _),
+ [100] = PINGROUP(100, _, _, _, _, _, _, _, _, _),
+ [101] = PINGROUP(101, _, _, _, _, _, _, _, _, _),
+ [102] = PINGROUP(102, _, _, coex_uart2, _, _, _, _, _, _),
+ [103] = PINGROUP(103, _, _, coex_uart2, _, _, _, _, _, _),
+ [104] = PINGROUP(104, _, _, _, _, _, _, _, _, _),
+ [105] = PINGROUP(105, _, _, _, _, _, _, _, _, _),
+ [106] = PINGROUP(106, sdc1_tb, _, _, _, _, _, _, _, _),
+ [107] = PINGROUP(107, _, _, _, _, _, _, _, _, _),
+ [108] = UFS_RESET(ufs_reset, 0x0),
+ [109] = SDC_QDSD_PINGROUP(sdc1_rclk, 0x9a000, 15, 0),
+ [110] = SDC_QDSD_PINGROUP(sdc1_clk, 0x9a000, 13, 6),
+ [111] = SDC_QDSD_PINGROUP(sdc1_cmd, 0x9a000, 11, 3),
+ [112] = SDC_QDSD_PINGROUP(sdc1_data, 0x9a000, 9, 0),
+};
+
+static const struct msm_gpio_wakeirq_map sdx65_pdc_map[] = {
+ {1, 20}, {2, 21}, {5, 22}, {6, 23}, {9, 24}, {10, 25},
+ {11, 26}, {12, 27}, {13, 28}, {14, 29}, {15, 30}, {16, 31},
+ {17, 32}, {18, 33}, {19, 34}, {21, 35}, {22, 36}, {23, 70},
+ {24, 37}, {25, 38}, {35, 40}, {43, 41}, {46, 44}, {48, 45},
+ {49, 57}, {50, 46}, {52, 47}, {54, 49}, {55, 50}, {60, 53},
+ {61, 54}, {64, 55}, {65, 81}, {68, 56}, {71, 58}, {73, 59},
+ {77, 77}, {81, 65}, {83, 63}, {84, 64}, {86, 66}, {88, 67},
+ {89, 68}, {90, 69}, {93, 71}, {94, 72}, {95, 73}, {96, 74},
+ {99, 75}, {103, 78}, {104, 79}
+};
+
+static const struct msm_pinctrl_soc_data sdx65_pinctrl = {
+ .pins = sdx65_pins,
+ .npins = ARRAY_SIZE(sdx65_pins),
+ .functions = sdx65_functions,
+ .nfunctions = ARRAY_SIZE(sdx65_functions),
+ .groups = sdx65_groups,
+ .ngroups = ARRAY_SIZE(sdx65_groups),
+ .ngpios = 109,
+ .wakeirq_map = sdx65_pdc_map,
+ .nwakeirq_map = ARRAY_SIZE(sdx65_pdc_map),
+};
+
+static int sdx65_pinctrl_probe(struct platform_device *pdev)
+{
+ return msm_pinctrl_probe(pdev, &sdx65_pinctrl);
+}
+
+static const struct of_device_id sdx65_pinctrl_of_match[] = {
+ { .compatible = "qcom,sdx65-tlmm", },
+ { },
+};
+
+static struct platform_driver sdx65_pinctrl_driver = {
+ .driver = {
+ .name = "sdx65-tlmm",
+ .of_match_table = sdx65_pinctrl_of_match,
+ },
+ .probe = sdx65_pinctrl_probe,
+ .remove = msm_pinctrl_remove,
+};
+
+static int __init sdx65_pinctrl_init(void)
+{
+ return platform_driver_register(&sdx65_pinctrl_driver);
+}
+arch_initcall(sdx65_pinctrl_init);
+
+static void __exit sdx65_pinctrl_exit(void)
+{
+ platform_driver_unregister(&sdx65_pinctrl_driver);
+}
+module_exit(sdx65_pinctrl_exit);
+
+MODULE_DESCRIPTION("QTI sdx65 pinctrl driver");
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(of, sdx65_pinctrl_of_match);
diff --git a/drivers/pinctrl/qcom/pinctrl-sm8450.c b/drivers/pinctrl/qcom/pinctrl-sm8450.c
new file mode 100644
index 000000000000..c6fa3dbc14a1
--- /dev/null
+++ b/drivers/pinctrl/qcom/pinctrl-sm8450.c
@@ -0,0 +1,1689 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021, Linaro Limited
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-msm.h"
+
+#define FUNCTION(fname) \
+ [msm_mux_##fname] = { \
+ .name = #fname, \
+ .groups = fname##_groups, \
+ .ngroups = ARRAY_SIZE(fname##_groups), \
+ }
+
+#define REG_SIZE 0x1000
+
+#define PINGROUP(id, f1, f2, f3, f4, f5, f6, f7, f8, f9) \
+ { \
+ .name = "gpio" #id, \
+ .pins = gpio##id##_pins, \
+ .npins = (unsigned int)ARRAY_SIZE(gpio##id##_pins), \
+ .funcs = (int[]){ \
+ msm_mux_gpio, /* gpio mode */ \
+ msm_mux_##f1, \
+ msm_mux_##f2, \
+ msm_mux_##f3, \
+ msm_mux_##f4, \
+ msm_mux_##f5, \
+ msm_mux_##f6, \
+ msm_mux_##f7, \
+ msm_mux_##f8, \
+ msm_mux_##f9 \
+ }, \
+ .nfuncs = 10, \
+ .ctl_reg = REG_SIZE * id, \
+ .io_reg = 0x4 + REG_SIZE * id, \
+ .intr_cfg_reg = 0x8 + REG_SIZE * id, \
+ .intr_status_reg = 0xc + REG_SIZE * id, \
+ .intr_target_reg = 0x8 + REG_SIZE * id, \
+ .mux_bit = 2, \
+ .pull_bit = 0, \
+ .drv_bit = 6, \
+ .oe_bit = 9, \
+ .in_bit = 0, \
+ .out_bit = 1, \
+ .intr_enable_bit = 0, \
+ .intr_status_bit = 0, \
+ .intr_target_bit = 5, \
+ .intr_target_kpss_val = 3, \
+ .intr_raw_status_bit = 4, \
+ .intr_polarity_bit = 1, \
+ .intr_detection_bit = 2, \
+ .intr_detection_width = 2, \
+ }
+
+#define SDC_QDSD_PINGROUP(pg_name, ctl, pull, drv) \
+ { \
+ .name = #pg_name, \
+ .pins = pg_name##_pins, \
+ .npins = (unsigned int)ARRAY_SIZE(pg_name##_pins), \
+ .ctl_reg = ctl, \
+ .io_reg = 0, \
+ .intr_cfg_reg = 0, \
+ .intr_status_reg = 0, \
+ .intr_target_reg = 0, \
+ .mux_bit = -1, \
+ .pull_bit = pull, \
+ .drv_bit = drv, \
+ .oe_bit = -1, \
+ .in_bit = -1, \
+ .out_bit = -1, \
+ .intr_enable_bit = -1, \
+ .intr_status_bit = -1, \
+ .intr_target_bit = -1, \
+ .intr_raw_status_bit = -1, \
+ .intr_polarity_bit = -1, \
+ .intr_detection_bit = -1, \
+ .intr_detection_width = -1, \
+ }
+
+#define UFS_RESET(pg_name, offset) \
+ { \
+ .name = #pg_name, \
+ .pins = pg_name##_pins, \
+ .npins = (unsigned int)ARRAY_SIZE(pg_name##_pins), \
+ .ctl_reg = offset, \
+ .io_reg = offset + 0x4, \
+ .intr_cfg_reg = 0, \
+ .intr_status_reg = 0, \
+ .intr_target_reg = 0, \
+ .mux_bit = -1, \
+ .pull_bit = 3, \
+ .drv_bit = 0, \
+ .oe_bit = -1, \
+ .in_bit = -1, \
+ .out_bit = 0, \
+ .intr_enable_bit = -1, \
+ .intr_status_bit = -1, \
+ .intr_target_bit = -1, \
+ .intr_raw_status_bit = -1, \
+ .intr_polarity_bit = -1, \
+ .intr_detection_bit = -1, \
+ .intr_detection_width = -1, \
+ }
+
+static const struct pinctrl_pin_desc sm8450_pins[] = {
+ PINCTRL_PIN(0, "GPIO_0"),
+ PINCTRL_PIN(1, "GPIO_1"),
+ PINCTRL_PIN(2, "GPIO_2"),
+ PINCTRL_PIN(3, "GPIO_3"),
+ PINCTRL_PIN(4, "GPIO_4"),
+ PINCTRL_PIN(5, "GPIO_5"),
+ PINCTRL_PIN(6, "GPIO_6"),
+ PINCTRL_PIN(7, "GPIO_7"),
+ PINCTRL_PIN(8, "GPIO_8"),
+ PINCTRL_PIN(9, "GPIO_9"),
+ PINCTRL_PIN(10, "GPIO_10"),
+ PINCTRL_PIN(11, "GPIO_11"),
+ PINCTRL_PIN(12, "GPIO_12"),
+ PINCTRL_PIN(13, "GPIO_13"),
+ PINCTRL_PIN(14, "GPIO_14"),
+ PINCTRL_PIN(15, "GPIO_15"),
+ PINCTRL_PIN(16, "GPIO_16"),
+ PINCTRL_PIN(17, "GPIO_17"),
+ PINCTRL_PIN(18, "GPIO_18"),
+ PINCTRL_PIN(19, "GPIO_19"),
+ PINCTRL_PIN(20, "GPIO_20"),
+ PINCTRL_PIN(21, "GPIO_21"),
+ PINCTRL_PIN(22, "GPIO_22"),
+ PINCTRL_PIN(23, "GPIO_23"),
+ PINCTRL_PIN(24, "GPIO_24"),
+ PINCTRL_PIN(25, "GPIO_25"),
+ PINCTRL_PIN(26, "GPIO_26"),
+ PINCTRL_PIN(27, "GPIO_27"),
+ PINCTRL_PIN(28, "GPIO_28"),
+ PINCTRL_PIN(29, "GPIO_29"),
+ PINCTRL_PIN(30, "GPIO_30"),
+ PINCTRL_PIN(31, "GPIO_31"),
+ PINCTRL_PIN(32, "GPIO_32"),
+ PINCTRL_PIN(33, "GPIO_33"),
+ PINCTRL_PIN(34, "GPIO_34"),
+ PINCTRL_PIN(35, "GPIO_35"),
+ PINCTRL_PIN(36, "GPIO_36"),
+ PINCTRL_PIN(37, "GPIO_37"),
+ PINCTRL_PIN(38, "GPIO_38"),
+ PINCTRL_PIN(39, "GPIO_39"),
+ PINCTRL_PIN(40, "GPIO_40"),
+ PINCTRL_PIN(41, "GPIO_41"),
+ PINCTRL_PIN(42, "GPIO_42"),
+ PINCTRL_PIN(43, "GPIO_43"),
+ PINCTRL_PIN(44, "GPIO_44"),
+ PINCTRL_PIN(45, "GPIO_45"),
+ PINCTRL_PIN(46, "GPIO_46"),
+ PINCTRL_PIN(47, "GPIO_47"),
+ PINCTRL_PIN(48, "GPIO_48"),
+ PINCTRL_PIN(49, "GPIO_49"),
+ PINCTRL_PIN(50, "GPIO_50"),
+ PINCTRL_PIN(51, "GPIO_51"),
+ PINCTRL_PIN(52, "GPIO_52"),
+ PINCTRL_PIN(53, "GPIO_53"),
+ PINCTRL_PIN(54, "GPIO_54"),
+ PINCTRL_PIN(55, "GPIO_55"),
+ PINCTRL_PIN(56, "GPIO_56"),
+ PINCTRL_PIN(57, "GPIO_57"),
+ PINCTRL_PIN(58, "GPIO_58"),
+ PINCTRL_PIN(59, "GPIO_59"),
+ PINCTRL_PIN(60, "GPIO_60"),
+ PINCTRL_PIN(61, "GPIO_61"),
+ PINCTRL_PIN(62, "GPIO_62"),
+ PINCTRL_PIN(63, "GPIO_63"),
+ PINCTRL_PIN(64, "GPIO_64"),
+ PINCTRL_PIN(65, "GPIO_65"),
+ PINCTRL_PIN(66, "GPIO_66"),
+ PINCTRL_PIN(67, "GPIO_67"),
+ PINCTRL_PIN(68, "GPIO_68"),
+ PINCTRL_PIN(69, "GPIO_69"),
+ PINCTRL_PIN(70, "GPIO_70"),
+ PINCTRL_PIN(71, "GPIO_71"),
+ PINCTRL_PIN(72, "GPIO_72"),
+ PINCTRL_PIN(73, "GPIO_73"),
+ PINCTRL_PIN(74, "GPIO_74"),
+ PINCTRL_PIN(75, "GPIO_75"),
+ PINCTRL_PIN(76, "GPIO_76"),
+ PINCTRL_PIN(77, "GPIO_77"),
+ PINCTRL_PIN(78, "GPIO_78"),
+ PINCTRL_PIN(79, "GPIO_79"),
+ PINCTRL_PIN(80, "GPIO_80"),
+ PINCTRL_PIN(81, "GPIO_81"),
+ PINCTRL_PIN(82, "GPIO_82"),
+ PINCTRL_PIN(83, "GPIO_83"),
+ PINCTRL_PIN(84, "GPIO_84"),
+ PINCTRL_PIN(85, "GPIO_85"),
+ PINCTRL_PIN(86, "GPIO_86"),
+ PINCTRL_PIN(87, "GPIO_87"),
+ PINCTRL_PIN(88, "GPIO_88"),
+ PINCTRL_PIN(89, "GPIO_89"),
+ PINCTRL_PIN(90, "GPIO_90"),
+ PINCTRL_PIN(91, "GPIO_91"),
+ PINCTRL_PIN(92, "GPIO_92"),
+ PINCTRL_PIN(93, "GPIO_93"),
+ PINCTRL_PIN(94, "GPIO_94"),
+ PINCTRL_PIN(95, "GPIO_95"),
+ PINCTRL_PIN(96, "GPIO_96"),
+ PINCTRL_PIN(97, "GPIO_97"),
+ PINCTRL_PIN(98, "GPIO_98"),
+ PINCTRL_PIN(99, "GPIO_99"),
+ PINCTRL_PIN(100, "GPIO_100"),
+ PINCTRL_PIN(101, "GPIO_101"),
+ PINCTRL_PIN(102, "GPIO_102"),
+ PINCTRL_PIN(103, "GPIO_103"),
+ PINCTRL_PIN(104, "GPIO_104"),
+ PINCTRL_PIN(105, "GPIO_105"),
+ PINCTRL_PIN(106, "GPIO_106"),
+ PINCTRL_PIN(107, "GPIO_107"),
+ PINCTRL_PIN(108, "GPIO_108"),
+ PINCTRL_PIN(109, "GPIO_109"),
+ PINCTRL_PIN(110, "GPIO_110"),
+ PINCTRL_PIN(111, "GPIO_111"),
+ PINCTRL_PIN(112, "GPIO_112"),
+ PINCTRL_PIN(113, "GPIO_113"),
+ PINCTRL_PIN(114, "GPIO_114"),
+ PINCTRL_PIN(115, "GPIO_115"),
+ PINCTRL_PIN(116, "GPIO_116"),
+ PINCTRL_PIN(117, "GPIO_117"),
+ PINCTRL_PIN(118, "GPIO_118"),
+ PINCTRL_PIN(119, "GPIO_119"),
+ PINCTRL_PIN(120, "GPIO_120"),
+ PINCTRL_PIN(121, "GPIO_121"),
+ PINCTRL_PIN(122, "GPIO_122"),
+ PINCTRL_PIN(123, "GPIO_123"),
+ PINCTRL_PIN(124, "GPIO_124"),
+ PINCTRL_PIN(125, "GPIO_125"),
+ PINCTRL_PIN(126, "GPIO_126"),
+ PINCTRL_PIN(127, "GPIO_127"),
+ PINCTRL_PIN(128, "GPIO_128"),
+ PINCTRL_PIN(129, "GPIO_129"),
+ PINCTRL_PIN(130, "GPIO_130"),
+ PINCTRL_PIN(131, "GPIO_131"),
+ PINCTRL_PIN(132, "GPIO_132"),
+ PINCTRL_PIN(133, "GPIO_133"),
+ PINCTRL_PIN(134, "GPIO_134"),
+ PINCTRL_PIN(135, "GPIO_135"),
+ PINCTRL_PIN(136, "GPIO_136"),
+ PINCTRL_PIN(137, "GPIO_137"),
+ PINCTRL_PIN(138, "GPIO_138"),
+ PINCTRL_PIN(139, "GPIO_139"),
+ PINCTRL_PIN(140, "GPIO_140"),
+ PINCTRL_PIN(141, "GPIO_141"),
+ PINCTRL_PIN(142, "GPIO_142"),
+ PINCTRL_PIN(143, "GPIO_143"),
+ PINCTRL_PIN(144, "GPIO_144"),
+ PINCTRL_PIN(145, "GPIO_145"),
+ PINCTRL_PIN(146, "GPIO_146"),
+ PINCTRL_PIN(147, "GPIO_147"),
+ PINCTRL_PIN(148, "GPIO_148"),
+ PINCTRL_PIN(149, "GPIO_149"),
+ PINCTRL_PIN(150, "GPIO_150"),
+ PINCTRL_PIN(151, "GPIO_151"),
+ PINCTRL_PIN(152, "GPIO_152"),
+ PINCTRL_PIN(153, "GPIO_153"),
+ PINCTRL_PIN(154, "GPIO_154"),
+ PINCTRL_PIN(155, "GPIO_155"),
+ PINCTRL_PIN(156, "GPIO_156"),
+ PINCTRL_PIN(157, "GPIO_157"),
+ PINCTRL_PIN(158, "GPIO_158"),
+ PINCTRL_PIN(159, "GPIO_159"),
+ PINCTRL_PIN(160, "GPIO_160"),
+ PINCTRL_PIN(161, "GPIO_161"),
+ PINCTRL_PIN(162, "GPIO_162"),
+ PINCTRL_PIN(163, "GPIO_163"),
+ PINCTRL_PIN(164, "GPIO_164"),
+ PINCTRL_PIN(165, "GPIO_165"),
+ PINCTRL_PIN(166, "GPIO_166"),
+ PINCTRL_PIN(167, "GPIO_167"),
+ PINCTRL_PIN(168, "GPIO_168"),
+ PINCTRL_PIN(169, "GPIO_169"),
+ PINCTRL_PIN(170, "GPIO_170"),
+ PINCTRL_PIN(171, "GPIO_171"),
+ PINCTRL_PIN(172, "GPIO_172"),
+ PINCTRL_PIN(173, "GPIO_173"),
+ PINCTRL_PIN(174, "GPIO_174"),
+ PINCTRL_PIN(175, "GPIO_175"),
+ PINCTRL_PIN(176, "GPIO_176"),
+ PINCTRL_PIN(177, "GPIO_177"),
+ PINCTRL_PIN(178, "GPIO_178"),
+ PINCTRL_PIN(179, "GPIO_179"),
+ PINCTRL_PIN(180, "GPIO_180"),
+ PINCTRL_PIN(181, "GPIO_181"),
+ PINCTRL_PIN(182, "GPIO_182"),
+ PINCTRL_PIN(183, "GPIO_183"),
+ PINCTRL_PIN(184, "GPIO_184"),
+ PINCTRL_PIN(185, "GPIO_185"),
+ PINCTRL_PIN(186, "GPIO_186"),
+ PINCTRL_PIN(187, "GPIO_187"),
+ PINCTRL_PIN(188, "GPIO_188"),
+ PINCTRL_PIN(189, "GPIO_189"),
+ PINCTRL_PIN(190, "GPIO_190"),
+ PINCTRL_PIN(191, "GPIO_191"),
+ PINCTRL_PIN(192, "GPIO_192"),
+ PINCTRL_PIN(193, "GPIO_193"),
+ PINCTRL_PIN(194, "GPIO_194"),
+ PINCTRL_PIN(195, "GPIO_195"),
+ PINCTRL_PIN(196, "GPIO_196"),
+ PINCTRL_PIN(197, "GPIO_197"),
+ PINCTRL_PIN(198, "GPIO_198"),
+ PINCTRL_PIN(199, "GPIO_199"),
+ PINCTRL_PIN(200, "GPIO_200"),
+ PINCTRL_PIN(201, "GPIO_201"),
+ PINCTRL_PIN(202, "GPIO_202"),
+ PINCTRL_PIN(203, "GPIO_203"),
+ PINCTRL_PIN(204, "GPIO_204"),
+ PINCTRL_PIN(205, "GPIO_205"),
+ PINCTRL_PIN(206, "GPIO_206"),
+ PINCTRL_PIN(207, "GPIO_207"),
+ PINCTRL_PIN(208, "GPIO_208"),
+ PINCTRL_PIN(209, "GPIO_209"),
+ PINCTRL_PIN(210, "UFS_RESET"),
+ PINCTRL_PIN(211, "SDC2_CLK"),
+ PINCTRL_PIN(212, "SDC2_CMD"),
+ PINCTRL_PIN(213, "SDC2_DATA"),
+};
+
+#define DECLARE_MSM_GPIO_PINS(pin) \
+ static const unsigned int gpio##pin##_pins[] = { pin }
+DECLARE_MSM_GPIO_PINS(0);
+DECLARE_MSM_GPIO_PINS(1);
+DECLARE_MSM_GPIO_PINS(2);
+DECLARE_MSM_GPIO_PINS(3);
+DECLARE_MSM_GPIO_PINS(4);
+DECLARE_MSM_GPIO_PINS(5);
+DECLARE_MSM_GPIO_PINS(6);
+DECLARE_MSM_GPIO_PINS(7);
+DECLARE_MSM_GPIO_PINS(8);
+DECLARE_MSM_GPIO_PINS(9);
+DECLARE_MSM_GPIO_PINS(10);
+DECLARE_MSM_GPIO_PINS(11);
+DECLARE_MSM_GPIO_PINS(12);
+DECLARE_MSM_GPIO_PINS(13);
+DECLARE_MSM_GPIO_PINS(14);
+DECLARE_MSM_GPIO_PINS(15);
+DECLARE_MSM_GPIO_PINS(16);
+DECLARE_MSM_GPIO_PINS(17);
+DECLARE_MSM_GPIO_PINS(18);
+DECLARE_MSM_GPIO_PINS(19);
+DECLARE_MSM_GPIO_PINS(20);
+DECLARE_MSM_GPIO_PINS(21);
+DECLARE_MSM_GPIO_PINS(22);
+DECLARE_MSM_GPIO_PINS(23);
+DECLARE_MSM_GPIO_PINS(24);
+DECLARE_MSM_GPIO_PINS(25);
+DECLARE_MSM_GPIO_PINS(26);
+DECLARE_MSM_GPIO_PINS(27);
+DECLARE_MSM_GPIO_PINS(28);
+DECLARE_MSM_GPIO_PINS(29);
+DECLARE_MSM_GPIO_PINS(30);
+DECLARE_MSM_GPIO_PINS(31);
+DECLARE_MSM_GPIO_PINS(32);
+DECLARE_MSM_GPIO_PINS(33);
+DECLARE_MSM_GPIO_PINS(34);
+DECLARE_MSM_GPIO_PINS(35);
+DECLARE_MSM_GPIO_PINS(36);
+DECLARE_MSM_GPIO_PINS(37);
+DECLARE_MSM_GPIO_PINS(38);
+DECLARE_MSM_GPIO_PINS(39);
+DECLARE_MSM_GPIO_PINS(40);
+DECLARE_MSM_GPIO_PINS(41);
+DECLARE_MSM_GPIO_PINS(42);
+DECLARE_MSM_GPIO_PINS(43);
+DECLARE_MSM_GPIO_PINS(44);
+DECLARE_MSM_GPIO_PINS(45);
+DECLARE_MSM_GPIO_PINS(46);
+DECLARE_MSM_GPIO_PINS(47);
+DECLARE_MSM_GPIO_PINS(48);
+DECLARE_MSM_GPIO_PINS(49);
+DECLARE_MSM_GPIO_PINS(50);
+DECLARE_MSM_GPIO_PINS(51);
+DECLARE_MSM_GPIO_PINS(52);
+DECLARE_MSM_GPIO_PINS(53);
+DECLARE_MSM_GPIO_PINS(54);
+DECLARE_MSM_GPIO_PINS(55);
+DECLARE_MSM_GPIO_PINS(56);
+DECLARE_MSM_GPIO_PINS(57);
+DECLARE_MSM_GPIO_PINS(58);
+DECLARE_MSM_GPIO_PINS(59);
+DECLARE_MSM_GPIO_PINS(60);
+DECLARE_MSM_GPIO_PINS(61);
+DECLARE_MSM_GPIO_PINS(62);
+DECLARE_MSM_GPIO_PINS(63);
+DECLARE_MSM_GPIO_PINS(64);
+DECLARE_MSM_GPIO_PINS(65);
+DECLARE_MSM_GPIO_PINS(66);
+DECLARE_MSM_GPIO_PINS(67);
+DECLARE_MSM_GPIO_PINS(68);
+DECLARE_MSM_GPIO_PINS(69);
+DECLARE_MSM_GPIO_PINS(70);
+DECLARE_MSM_GPIO_PINS(71);
+DECLARE_MSM_GPIO_PINS(72);
+DECLARE_MSM_GPIO_PINS(73);
+DECLARE_MSM_GPIO_PINS(74);
+DECLARE_MSM_GPIO_PINS(75);
+DECLARE_MSM_GPIO_PINS(76);
+DECLARE_MSM_GPIO_PINS(77);
+DECLARE_MSM_GPIO_PINS(78);
+DECLARE_MSM_GPIO_PINS(79);
+DECLARE_MSM_GPIO_PINS(80);
+DECLARE_MSM_GPIO_PINS(81);
+DECLARE_MSM_GPIO_PINS(82);
+DECLARE_MSM_GPIO_PINS(83);
+DECLARE_MSM_GPIO_PINS(84);
+DECLARE_MSM_GPIO_PINS(85);
+DECLARE_MSM_GPIO_PINS(86);
+DECLARE_MSM_GPIO_PINS(87);
+DECLARE_MSM_GPIO_PINS(88);
+DECLARE_MSM_GPIO_PINS(89);
+DECLARE_MSM_GPIO_PINS(90);
+DECLARE_MSM_GPIO_PINS(91);
+DECLARE_MSM_GPIO_PINS(92);
+DECLARE_MSM_GPIO_PINS(93);
+DECLARE_MSM_GPIO_PINS(94);
+DECLARE_MSM_GPIO_PINS(95);
+DECLARE_MSM_GPIO_PINS(96);
+DECLARE_MSM_GPIO_PINS(97);
+DECLARE_MSM_GPIO_PINS(98);
+DECLARE_MSM_GPIO_PINS(99);
+DECLARE_MSM_GPIO_PINS(100);
+DECLARE_MSM_GPIO_PINS(101);
+DECLARE_MSM_GPIO_PINS(102);
+DECLARE_MSM_GPIO_PINS(103);
+DECLARE_MSM_GPIO_PINS(104);
+DECLARE_MSM_GPIO_PINS(105);
+DECLARE_MSM_GPIO_PINS(106);
+DECLARE_MSM_GPIO_PINS(107);
+DECLARE_MSM_GPIO_PINS(108);
+DECLARE_MSM_GPIO_PINS(109);
+DECLARE_MSM_GPIO_PINS(110);
+DECLARE_MSM_GPIO_PINS(111);
+DECLARE_MSM_GPIO_PINS(112);
+DECLARE_MSM_GPIO_PINS(113);
+DECLARE_MSM_GPIO_PINS(114);
+DECLARE_MSM_GPIO_PINS(115);
+DECLARE_MSM_GPIO_PINS(116);
+DECLARE_MSM_GPIO_PINS(117);
+DECLARE_MSM_GPIO_PINS(118);
+DECLARE_MSM_GPIO_PINS(119);
+DECLARE_MSM_GPIO_PINS(120);
+DECLARE_MSM_GPIO_PINS(121);
+DECLARE_MSM_GPIO_PINS(122);
+DECLARE_MSM_GPIO_PINS(123);
+DECLARE_MSM_GPIO_PINS(124);
+DECLARE_MSM_GPIO_PINS(125);
+DECLARE_MSM_GPIO_PINS(126);
+DECLARE_MSM_GPIO_PINS(127);
+DECLARE_MSM_GPIO_PINS(128);
+DECLARE_MSM_GPIO_PINS(129);
+DECLARE_MSM_GPIO_PINS(130);
+DECLARE_MSM_GPIO_PINS(131);
+DECLARE_MSM_GPIO_PINS(132);
+DECLARE_MSM_GPIO_PINS(133);
+DECLARE_MSM_GPIO_PINS(134);
+DECLARE_MSM_GPIO_PINS(135);
+DECLARE_MSM_GPIO_PINS(136);
+DECLARE_MSM_GPIO_PINS(137);
+DECLARE_MSM_GPIO_PINS(138);
+DECLARE_MSM_GPIO_PINS(139);
+DECLARE_MSM_GPIO_PINS(140);
+DECLARE_MSM_GPIO_PINS(141);
+DECLARE_MSM_GPIO_PINS(142);
+DECLARE_MSM_GPIO_PINS(143);
+DECLARE_MSM_GPIO_PINS(144);
+DECLARE_MSM_GPIO_PINS(145);
+DECLARE_MSM_GPIO_PINS(146);
+DECLARE_MSM_GPIO_PINS(147);
+DECLARE_MSM_GPIO_PINS(148);
+DECLARE_MSM_GPIO_PINS(149);
+DECLARE_MSM_GPIO_PINS(150);
+DECLARE_MSM_GPIO_PINS(151);
+DECLARE_MSM_GPIO_PINS(152);
+DECLARE_MSM_GPIO_PINS(153);
+DECLARE_MSM_GPIO_PINS(154);
+DECLARE_MSM_GPIO_PINS(155);
+DECLARE_MSM_GPIO_PINS(156);
+DECLARE_MSM_GPIO_PINS(157);
+DECLARE_MSM_GPIO_PINS(158);
+DECLARE_MSM_GPIO_PINS(159);
+DECLARE_MSM_GPIO_PINS(160);
+DECLARE_MSM_GPIO_PINS(161);
+DECLARE_MSM_GPIO_PINS(162);
+DECLARE_MSM_GPIO_PINS(163);
+DECLARE_MSM_GPIO_PINS(164);
+DECLARE_MSM_GPIO_PINS(165);
+DECLARE_MSM_GPIO_PINS(166);
+DECLARE_MSM_GPIO_PINS(167);
+DECLARE_MSM_GPIO_PINS(168);
+DECLARE_MSM_GPIO_PINS(169);
+DECLARE_MSM_GPIO_PINS(170);
+DECLARE_MSM_GPIO_PINS(171);
+DECLARE_MSM_GPIO_PINS(172);
+DECLARE_MSM_GPIO_PINS(173);
+DECLARE_MSM_GPIO_PINS(174);
+DECLARE_MSM_GPIO_PINS(175);
+DECLARE_MSM_GPIO_PINS(176);
+DECLARE_MSM_GPIO_PINS(177);
+DECLARE_MSM_GPIO_PINS(178);
+DECLARE_MSM_GPIO_PINS(179);
+DECLARE_MSM_GPIO_PINS(180);
+DECLARE_MSM_GPIO_PINS(181);
+DECLARE_MSM_GPIO_PINS(182);
+DECLARE_MSM_GPIO_PINS(183);
+DECLARE_MSM_GPIO_PINS(184);
+DECLARE_MSM_GPIO_PINS(185);
+DECLARE_MSM_GPIO_PINS(186);
+DECLARE_MSM_GPIO_PINS(187);
+DECLARE_MSM_GPIO_PINS(188);
+DECLARE_MSM_GPIO_PINS(189);
+DECLARE_MSM_GPIO_PINS(190);
+DECLARE_MSM_GPIO_PINS(191);
+DECLARE_MSM_GPIO_PINS(192);
+DECLARE_MSM_GPIO_PINS(193);
+DECLARE_MSM_GPIO_PINS(194);
+DECLARE_MSM_GPIO_PINS(195);
+DECLARE_MSM_GPIO_PINS(196);
+DECLARE_MSM_GPIO_PINS(197);
+DECLARE_MSM_GPIO_PINS(198);
+DECLARE_MSM_GPIO_PINS(199);
+DECLARE_MSM_GPIO_PINS(200);
+DECLARE_MSM_GPIO_PINS(201);
+DECLARE_MSM_GPIO_PINS(202);
+DECLARE_MSM_GPIO_PINS(203);
+DECLARE_MSM_GPIO_PINS(204);
+DECLARE_MSM_GPIO_PINS(205);
+DECLARE_MSM_GPIO_PINS(206);
+DECLARE_MSM_GPIO_PINS(207);
+DECLARE_MSM_GPIO_PINS(208);
+DECLARE_MSM_GPIO_PINS(209);
+
+static const unsigned int ufs_reset_pins[] = { 210 };
+static const unsigned int sdc2_clk_pins[] = { 211 };
+static const unsigned int sdc2_cmd_pins[] = { 212 };
+static const unsigned int sdc2_data_pins[] = { 213 };
+
+enum sm8450_functions {
+ msm_mux_gpio,
+ msm_mux_aon_cam,
+ msm_mux_atest_char,
+ msm_mux_atest_usb,
+ msm_mux_audio_ref,
+ msm_mux_cam_mclk,
+ msm_mux_cci_async,
+ msm_mux_cci_i2c,
+ msm_mux_cci_timer,
+ msm_mux_cmu_rng,
+ msm_mux_coex_uart1,
+ msm_mux_coex_uart2,
+ msm_mux_cri_trng,
+ msm_mux_cri_trng0,
+ msm_mux_cri_trng1,
+ msm_mux_dbg_out,
+ msm_mux_ddr_bist,
+ msm_mux_ddr_pxi0,
+ msm_mux_ddr_pxi1,
+ msm_mux_ddr_pxi2,
+ msm_mux_ddr_pxi3,
+ msm_mux_dp_hot,
+ msm_mux_gcc_gp1,
+ msm_mux_gcc_gp2,
+ msm_mux_gcc_gp3,
+ msm_mux_ibi_i3c,
+ msm_mux_jitter_bist,
+ msm_mux_mdp_vsync,
+ msm_mux_mdp_vsync0,
+ msm_mux_mdp_vsync1,
+ msm_mux_mdp_vsync2,
+ msm_mux_mdp_vsync3,
+ msm_mux_mi2s0_data0,
+ msm_mux_mi2s0_data1,
+ msm_mux_mi2s0_sck,
+ msm_mux_mi2s0_ws,
+ msm_mux_mi2s2_data0,
+ msm_mux_mi2s2_data1,
+ msm_mux_mi2s2_sck,
+ msm_mux_mi2s2_ws,
+ msm_mux_mss_grfc0,
+ msm_mux_mss_grfc1,
+ msm_mux_mss_grfc10,
+ msm_mux_mss_grfc11,
+ msm_mux_mss_grfc12,
+ msm_mux_mss_grfc2,
+ msm_mux_mss_grfc3,
+ msm_mux_mss_grfc4,
+ msm_mux_mss_grfc5,
+ msm_mux_mss_grfc6,
+ msm_mux_mss_grfc7,
+ msm_mux_mss_grfc8,
+ msm_mux_mss_grfc9,
+ msm_mux_nav,
+ msm_mux_pcie0_clkreqn,
+ msm_mux_pcie1_clkreqn,
+ msm_mux_phase_flag,
+ msm_mux_pll_bist,
+ msm_mux_pll_clk,
+ msm_mux_pri_mi2s,
+ msm_mux_prng_rosc,
+ msm_mux_qdss_cti,
+ msm_mux_qdss_gpio,
+ msm_mux_qlink0_enable,
+ msm_mux_qlink0_request,
+ msm_mux_qlink0_wmss,
+ msm_mux_qlink1_enable,
+ msm_mux_qlink1_request,
+ msm_mux_qlink1_wmss,
+ msm_mux_qlink2_enable,
+ msm_mux_qlink2_request,
+ msm_mux_qlink2_wmss,
+ msm_mux_qspi0,
+ msm_mux_qspi1,
+ msm_mux_qspi2,
+ msm_mux_qspi3,
+ msm_mux_qspi_clk,
+ msm_mux_qspi_cs,
+ msm_mux_qup0,
+ msm_mux_qup1,
+ msm_mux_qup10,
+ msm_mux_qup11,
+ msm_mux_qup12,
+ msm_mux_qup13,
+ msm_mux_qup14,
+ msm_mux_qup15,
+ msm_mux_qup16,
+ msm_mux_qup17,
+ msm_mux_qup18,
+ msm_mux_qup19,
+ msm_mux_qup2,
+ msm_mux_qup20,
+ msm_mux_qup21,
+ msm_mux_qup3,
+ msm_mux_qup4,
+ msm_mux_qup5,
+ msm_mux_qup6,
+ msm_mux_qup7,
+ msm_mux_qup8,
+ msm_mux_qup9,
+ msm_mux_qup_l4,
+ msm_mux_qup_l5,
+ msm_mux_qup_l6,
+ msm_mux_sd_write,
+ msm_mux_sdc40,
+ msm_mux_sdc41,
+ msm_mux_sdc42,
+ msm_mux_sdc43,
+ msm_mux_sdc4_clk,
+ msm_mux_sdc4_cmd,
+ msm_mux_sec_mi2s,
+ msm_mux_tb_trig,
+ msm_mux_tgu_ch0,
+ msm_mux_tgu_ch1,
+ msm_mux_tgu_ch2,
+ msm_mux_tgu_ch3,
+ msm_mux_tmess_prng0,
+ msm_mux_tmess_prng1,
+ msm_mux_tmess_prng2,
+ msm_mux_tmess_prng3,
+ msm_mux_tsense_pwm1,
+ msm_mux_tsense_pwm2,
+ msm_mux_uim0_clk,
+ msm_mux_uim0_data,
+ msm_mux_uim0_present,
+ msm_mux_uim0_reset,
+ msm_mux_uim1_clk,
+ msm_mux_uim1_data,
+ msm_mux_uim1_present,
+ msm_mux_uim1_reset,
+ msm_mux_usb2phy_ac,
+ msm_mux_usb_phy,
+ msm_mux_vfr_0,
+ msm_mux_vfr_1,
+ msm_mux_vsense_trigger,
+ msm_mux__,
+};
+
+static const char * const gpio_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7",
+ "gpio8", "gpio9", "gpio10", "gpio11", "gpio12", "gpio13", "gpio14",
+ "gpio15", "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21",
+ "gpio22", "gpio23", "gpio24", "gpio25", "gpio26", "gpio27", "gpio28",
+ "gpio29", "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35",
+ "gpio36", "gpio37", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42",
+ "gpio43", "gpio44", "gpio45", "gpio46", "gpio47", "gpio48", "gpio49",
+ "gpio50", "gpio51", "gpio52", "gpio53", "gpio54", "gpio55", "gpio56",
+ "gpio57", "gpio58", "gpio59", "gpio60", "gpio61", "gpio62", "gpio63",
+ "gpio64", "gpio65", "gpio66", "gpio67", "gpio68", "gpio69", "gpio70",
+ "gpio71", "gpio72", "gpio73", "gpio74", "gpio75", "gpio76", "gpio77",
+ "gpio78", "gpio79", "gpio80", "gpio81", "gpio82", "gpio83", "gpio84",
+ "gpio85", "gpio86", "gpio87", "gpio88", "gpio89", "gpio90", "gpio91",
+ "gpio92", "gpio93", "gpio94", "gpio95", "gpio96", "gpio97", "gpio98",
+ "gpio99", "gpio100", "gpio101", "gpio102", "gpio103", "gpio104",
+ "gpio105", "gpio106", "gpio107", "gpio108", "gpio109", "gpio110",
+ "gpio111", "gpio112", "gpio113", "gpio114", "gpio115", "gpio116",
+ "gpio117", "gpio118", "gpio119", "gpio120", "gpio121", "gpio122",
+ "gpio123", "gpio124", "gpio125", "gpio126", "gpio127", "gpio128",
+ "gpio129", "gpio130", "gpio131", "gpio132", "gpio133", "gpio134",
+ "gpio135", "gpio136", "gpio137", "gpio138", "gpio139", "gpio140",
+ "gpio141", "gpio142", "gpio143", "gpio144", "gpio145", "gpio146",
+ "gpio147", "gpio148", "gpio149", "gpio150", "gpio151", "gpio152",
+ "gpio153", "gpio154", "gpio155", "gpio156", "gpio157", "gpio158",
+ "gpio159", "gpio160", "gpio161", "gpio162", "gpio163", "gpio164",
+ "gpio165", "gpio166", "gpio167", "gpio168", "gpio169", "gpio170",
+ "gpio171", "gpio172", "gpio173", "gpio174", "gpio175", "gpio176",
+ "gpio177", "gpio178", "gpio179", "gpio180", "gpio181", "gpio182",
+ "gpio183", "gpio184", "gpio185", "gpio186", "gpio187", "gpio188",
+ "gpio189", "gpio190", "gpio191", "gpio192", "gpio193", "gpio194",
+ "gpio195", "gpio196", "gpio197", "gpio198", "gpio199", "gpio200",
+ "gpio201", "gpio202", "gpio203", "gpio204", "gpio205", "gpio206",
+ "gpio207", "gpio208", "gpio209",
+};
+
+static const char * const aon_cam_groups[] = {
+ "gpio108",
+};
+
+static const char * const atest_char_groups[] = {
+ "gpio86", "gpio87", "gpio88", "gpio89", "gpio90",
+};
+
+static const char * const atest_usb_groups[] = {
+ "gpio37", "gpio39", "gpio55", "gpio148", "gpio149",
+};
+
+static const char * const audio_ref_groups[] = {
+ "gpio124",
+};
+
+static const char * const cam_mclk_groups[] = {
+ "gpio100", "gpio101", "gpio102", "gpio103", "gpio104", "gpio105", "gpio106", "gpio107",
+};
+
+static const char * const cci_async_groups[] = {
+ "gpio109", "gpio119", "gpio120",
+};
+
+static const char * const cci_i2c_groups[] = {
+ "gpio110", "gpio111", "gpio112", "gpio113", "gpio114", "gpio115", "gpio208", "gpio209",
+};
+
+static const char * const cci_timer_groups[] = {
+ "gpio116", "gpio117", "gpio118", "gpio119", "gpio120",
+};
+
+static const char * const cmu_rng_groups[] = {
+ "gpio94", "gpio95", "gpio96", "gpio97",
+};
+
+static const char * const coex_uart1_groups[] = {
+ "gpio148", "gpio149",
+};
+
+static const char * const coex_uart2_groups[] = {
+ "gpio150", "gpio151",
+};
+
+static const char * const cri_trng_groups[] = {
+ "gpio99",
+};
+
+static const char * const cri_trng0_groups[] = {
+ "gpio71",
+};
+
+static const char * const cri_trng1_groups[] = {
+ "gpio72",
+};
+
+static const char * const dbg_out_groups[] = {
+ "gpio9",
+};
+
+static const char * const ddr_bist_groups[] = {
+ "gpio36", "gpio37", "gpio40", "gpio41",
+};
+
+static const char * const ddr_pxi0_groups[] = {
+ "gpio51", "gpio52",
+};
+
+static const char * const ddr_pxi1_groups[] = {
+ "gpio40", "gpio41",
+};
+
+static const char * const ddr_pxi2_groups[] = {
+ "gpio45", "gpio47",
+};
+
+static const char * const ddr_pxi3_groups[] = {
+ "gpio43", "gpio44",
+};
+
+static const char * const dp_hot_groups[] = {
+ "gpio47",
+};
+
+static const char * const gcc_gp1_groups[] = {
+ "gpio86", "gpio134",
+};
+
+static const char * const gcc_gp2_groups[] = {
+ "gpio87", "gpio135",
+};
+
+static const char * const gcc_gp3_groups[] = {
+ "gpio88", "gpio136",
+};
+
+static const char * const ibi_i3c_groups[] = {
+ "gpio28", "gpio29", "gpio32", "gpio33", "gpio56", "gpio57", "gpio60", "gpio61",
+};
+
+static const char * const jitter_bist_groups[] = {
+ "gpio24",
+};
+
+static const char * const mdp_vsync_groups[] = {
+ "gpio46", "gpio47", "gpio86", "gpio87", "gpio88",
+};
+
+static const char * const mdp_vsync0_groups[] = {
+ "gpio86",
+};
+
+static const char * const mdp_vsync1_groups[] = {
+ "gpio86",
+};
+
+static const char * const mdp_vsync2_groups[] = {
+ "gpio87",
+};
+
+static const char * const mdp_vsync3_groups[] = {
+ "gpio87",
+};
+
+static const char * const mi2s0_data0_groups[] = {
+ "gpio127",
+};
+
+static const char * const mi2s0_data1_groups[] = {
+ "gpio128",
+};
+
+static const char * const mi2s0_sck_groups[] = {
+ "gpio126",
+};
+
+static const char * const mi2s0_ws_groups[] = {
+ "gpio129",
+};
+
+static const char * const mi2s2_data0_groups[] = {
+ "gpio122",
+};
+
+static const char * const mi2s2_data1_groups[] = {
+ "gpio124",
+};
+
+static const char * const mi2s2_sck_groups[] = {
+ "gpio121",
+};
+
+static const char * const mi2s2_ws_groups[] = {
+ "gpio123",
+};
+
+static const char * const mss_grfc0_groups[] = {
+ "gpio138", "gpio153",
+};
+
+static const char * const mss_grfc1_groups[] = {
+ "gpio139",
+};
+
+static const char * const mss_grfc10_groups[] = {
+ "gpio150",
+};
+
+static const char * const mss_grfc11_groups[] = {
+ "gpio151",
+};
+
+static const char * const mss_grfc12_groups[] = {
+ "gpio152",
+};
+
+static const char * const mss_grfc2_groups[] = {
+ "gpio140",
+};
+
+static const char * const mss_grfc3_groups[] = {
+ "gpio141",
+};
+
+static const char * const mss_grfc4_groups[] = {
+ "gpio142",
+};
+
+static const char * const mss_grfc5_groups[] = {
+ "gpio143",
+};
+
+static const char * const mss_grfc6_groups[] = {
+ "gpio144",
+};
+
+static const char * const mss_grfc7_groups[] = {
+ "gpio145",
+};
+
+static const char * const mss_grfc8_groups[] = {
+ "gpio146",
+};
+
+static const char * const mss_grfc9_groups[] = {
+ "gpio147",
+};
+
+static const char * const nav_groups[] = {
+ "gpio153", "gpio154", "gpio155",
+};
+
+static const char * const pcie0_clkreqn_groups[] = {
+ "gpio95",
+};
+
+static const char * const pcie1_clkreqn_groups[] = {
+ "gpio98",
+};
+
+static const char * const phase_flag_groups[] = {
+ "gpio4", "gpio5", "gpio6", "gpio7", "gpio10", "gpio11", "gpio12", "gpio13",
+ "gpio14", "gpio15", "gpio16", "gpio17", "gpio18", "gpio19", "gpio25", "gpio26",
+ "gpio76", "gpio77", "gpio78", "gpio79", "gpio81", "gpio82", "gpio83", "gpio92",
+ "gpio93", "gpio94", "gpio95", "gpio96", "gpio97", "gpio98", "gpio99",
+};
+
+static const char * const pll_bist_groups[] = {
+ "gpio20",
+};
+
+static const char * const pll_clk_groups[] = {
+ "gpio107",
+};
+
+static const char * const pri_mi2s_groups[] = {
+ "gpio125",
+};
+
+static const char * const prng_rosc_groups[] = {
+ "gpio73", "gpio75", "gpio81", "gpio83", "gpio81",
+};
+
+static const char * const qdss_cti_groups[] = {
+ "gpio2", "gpio80", "gpio81", "gpio82", "gpio83", "gpio84", "gpio85", "gpio93",
+};
+
+static const char * const qdss_gpio_groups[] = {
+ "gpio100", "gpio101", "gpio102", "gpio103", "gpio104", "gpio105", "gpio106", "gpio107",
+ "gpio110", "gpio111", "gpio112", "gpio113", "gpio114", "gpio115", "gpio117", "gpio118",
+ "gpio119", "gpio120", "gpio188", "gpio189", "gpio190", "gpio191", "gpio192", "gpio193",
+ "gpio194", "gpio195", "gpio196", "gpio197", "gpio198", "gpio199", "gpio200", "gpio201",
+ "gpio202", "gpio203", "gpio204", "gpio205",
+};
+
+static const char * const qlink0_enable_groups[] = {
+ "gpio157",
+};
+
+static const char * const qlink0_request_groups[] = {
+ "gpio156",
+};
+
+static const char * const qlink0_wmss_groups[] = {
+ "gpio158",
+};
+
+static const char * const qlink1_enable_groups[] = {
+ "gpio160",
+};
+
+static const char * const qlink1_request_groups[] = {
+ "gpio159",
+};
+
+static const char * const qlink1_wmss_groups[] = {
+ "gpio161",
+};
+
+static const char * const qlink2_enable_groups[] = {
+ "gpio163",
+};
+
+static const char * const qlink2_request_groups[] = {
+ "gpio162",
+};
+
+static const char * const qlink2_wmss_groups[] = {
+ "gpio164",
+};
+
+static const char * const qspi0_groups[] = {
+ "gpio52",
+};
+
+static const char * const qspi1_groups[] = {
+ "gpio53",
+};
+
+static const char * const qspi2_groups[] = {
+ "gpio48",
+};
+
+static const char * const qspi3_groups[] = {
+ "gpio49",
+};
+
+static const char * const qspi_clk_groups[] = {
+ "gpio50",
+};
+
+static const char * const qspi_cs_groups[] = {
+ "gpio51", "gpio54",
+};
+
+static const char * const qup0_groups[] = {
+ "gpio0", "gpio1", "gpio2", "gpio3",
+};
+
+static const char * const qup1_groups[] = {
+ "gpio4", "gpio5", "gpio6", "gpio7",
+};
+
+static const char * const qup10_groups[] = {
+ "gpio36", "gpio37", "gpio38", "gpio39",
+};
+
+static const char * const qup11_groups[] = {
+ "gpio40", "gpio41", "gpio42", "gpio43",
+};
+
+static const char * const qup12_groups[] = {
+ "gpio44", "gpio45", "gpio46", "gpio47",
+};
+
+static const char * const qup13_groups[] = {
+ "gpio48", "gpio49", "gpio50", "gpio51",
+};
+
+static const char * const qup14_groups[] = {
+ "gpio52", "gpio53", "gpio54", "gpio55",
+};
+
+static const char * const qup15_groups[] = {
+ "gpio56", "gpio57", "gpio58", "gpio59",
+};
+
+static const char * const qup16_groups[] = {
+ "gpio60", "gpio61", "gpio62", "gpio63",
+};
+
+static const char * const qup17_groups[] = {
+ "gpio64", "gpio65", "gpio66", "gpio67",
+};
+
+static const char * const qup18_groups[] = {
+ "gpio68", "gpio69", "gpio70", "gpio71",
+};
+
+static const char * const qup19_groups[] = {
+ "gpio72", "gpio73", "gpio74", "gpio75",
+};
+
+static const char * const qup2_groups[] = {
+ "gpio8", "gpio9", "gpio10", "gpio11",
+};
+
+static const char * const qup20_groups[] = {
+ "gpio76", "gpio77", "gpio78", "gpio79",
+};
+
+static const char * const qup21_groups[] = {
+ "gpio80", "gpio81", "gpio82", "gpio83",
+};
+
+static const char * const qup3_groups[] = {
+ "gpio12", "gpio13", "gpio14", "gpio15",
+};
+
+static const char * const qup4_groups[] = {
+ "gpio16", "gpio17", "gpio18", "gpio19",
+};
+
+static const char * const qup5_groups[] = {
+ "gpio84", "gpio85", "gpio206", "gpio207",
+};
+
+static const char * const qup6_groups[] = {
+ "gpio20", "gpio21", "gpio22", "gpio23",
+};
+
+static const char * const qup7_groups[] = {
+ "gpio24", "gpio25", "gpio26", "gpio27",
+};
+
+static const char * const qup8_groups[] = {
+ "gpio28", "gpio29", "gpio30", "gpio31",
+};
+
+static const char * const qup9_groups[] = {
+ "gpio32", "gpio33", "gpio34", "gpio35",
+};
+
+static const char * const qup_l4_groups[] = {
+ "gpio24", "gpio40", "gpio58", "gpio63",
+};
+
+static const char * const qup_l5_groups[] = {
+ "gpio25", "gpio41", "gpio59", "gpio66",
+};
+
+static const char * const qup_l6_groups[] = {
+ "gpio26", "gpio42", "gpio62", "gpio67",
+};
+
+static const char * const sd_write_groups[] = {
+ "gpio93",
+};
+
+static const char * const sdc40_groups[] = {
+ "gpio52",
+};
+
+static const char * const sdc41_groups[] = {
+ "gpio53",
+};
+
+static const char * const sdc42_groups[] = {
+ "gpio48",
+};
+
+static const char * const sdc43_groups[] = {
+ "gpio49",
+};
+
+static const char * const sdc4_clk_groups[] = {
+ "gpio50",
+};
+
+static const char * const sdc4_cmd_groups[] = {
+ "gpio51",
+};
+
+static const char * const sec_mi2s_groups[] = {
+ "gpio124",
+};
+
+static const char * const tb_trig_groups[] = {
+ "gpio64", "gpio137",
+};
+
+static const char * const tgu_ch0_groups[] = {
+ "gpio64",
+};
+
+static const char * const tgu_ch1_groups[] = {
+ "gpio65",
+};
+
+static const char * const tgu_ch2_groups[] = {
+ "gpio66",
+};
+
+static const char * const tgu_ch3_groups[] = {
+ "gpio67",
+};
+
+static const char * const tmess_prng0_groups[] = {
+ "gpio80",
+};
+
+static const char * const tmess_prng1_groups[] = {
+ "gpio79",
+};
+
+static const char * const tmess_prng2_groups[] = {
+ "gpio77",
+};
+
+static const char * const tmess_prng3_groups[] = {
+ "gpio76",
+};
+
+static const char * const tsense_pwm1_groups[] = {
+ "gpio91",
+};
+
+static const char * const tsense_pwm2_groups[] = {
+ "gpio91",
+};
+
+static const char * const uim0_clk_groups[] = {
+ "gpio131",
+};
+
+static const char * const uim0_data_groups[] = {
+ "gpio130",
+};
+
+static const char * const uim0_present_groups[] = {
+ "gpio133",
+};
+
+static const char * const uim0_reset_groups[] = {
+ "gpio132",
+};
+
+static const char * const uim1_clk_groups[] = {
+ "gpio135",
+};
+
+static const char * const uim1_data_groups[] = {
+ "gpio134",
+};
+
+static const char * const uim1_present_groups[] = {
+ "gpio137",
+};
+
+static const char * const uim1_reset_groups[] = {
+ "gpio136",
+};
+
+static const char * const usb2phy_ac_groups[] = {
+ "gpio90",
+};
+
+static const char * const usb_phy_groups[] = {
+ "gpio91",
+};
+
+static const char * const vfr_0_groups[] = {
+ "gpio89",
+};
+
+static const char * const vfr_1_groups[] = {
+ "gpio155",
+};
+
+static const char * const vsense_trigger_groups[] = {
+ "gpio18",
+};
+
+static const struct msm_function sm8450_functions[] = {
+ FUNCTION(gpio),
+ FUNCTION(aon_cam),
+ FUNCTION(atest_char),
+ FUNCTION(atest_usb),
+ FUNCTION(audio_ref),
+ FUNCTION(cam_mclk),
+ FUNCTION(cci_async),
+ FUNCTION(cci_i2c),
+ FUNCTION(cci_timer),
+ FUNCTION(cmu_rng),
+ FUNCTION(coex_uart1),
+ FUNCTION(coex_uart2),
+ FUNCTION(cri_trng),
+ FUNCTION(cri_trng0),
+ FUNCTION(cri_trng1),
+ FUNCTION(dbg_out),
+ FUNCTION(ddr_bist),
+ FUNCTION(ddr_pxi0),
+ FUNCTION(ddr_pxi1),
+ FUNCTION(ddr_pxi2),
+ FUNCTION(ddr_pxi3),
+ FUNCTION(dp_hot),
+ FUNCTION(gcc_gp1),
+ FUNCTION(gcc_gp2),
+ FUNCTION(gcc_gp3),
+ FUNCTION(ibi_i3c),
+ FUNCTION(jitter_bist),
+ FUNCTION(mdp_vsync),
+ FUNCTION(mdp_vsync0),
+ FUNCTION(mdp_vsync1),
+ FUNCTION(mdp_vsync2),
+ FUNCTION(mdp_vsync3),
+ FUNCTION(mi2s0_data0),
+ FUNCTION(mi2s0_data1),
+ FUNCTION(mi2s0_sck),
+ FUNCTION(mi2s0_ws),
+ FUNCTION(mi2s2_data0),
+ FUNCTION(mi2s2_data1),
+ FUNCTION(mi2s2_sck),
+ FUNCTION(mi2s2_ws),
+ FUNCTION(mss_grfc0),
+ FUNCTION(mss_grfc1),
+ FUNCTION(mss_grfc10),
+ FUNCTION(mss_grfc11),
+ FUNCTION(mss_grfc12),
+ FUNCTION(mss_grfc2),
+ FUNCTION(mss_grfc3),
+ FUNCTION(mss_grfc4),
+ FUNCTION(mss_grfc5),
+ FUNCTION(mss_grfc6),
+ FUNCTION(mss_grfc7),
+ FUNCTION(mss_grfc8),
+ FUNCTION(mss_grfc9),
+ FUNCTION(nav),
+ FUNCTION(pcie0_clkreqn),
+ FUNCTION(pcie1_clkreqn),
+ FUNCTION(phase_flag),
+ FUNCTION(pll_bist),
+ FUNCTION(pll_clk),
+ FUNCTION(pri_mi2s),
+ FUNCTION(prng_rosc),
+ FUNCTION(qdss_cti),
+ FUNCTION(qdss_gpio),
+ FUNCTION(qlink0_enable),
+ FUNCTION(qlink0_request),
+ FUNCTION(qlink0_wmss),
+ FUNCTION(qlink1_enable),
+ FUNCTION(qlink1_request),
+ FUNCTION(qlink1_wmss),
+ FUNCTION(qlink2_enable),
+ FUNCTION(qlink2_request),
+ FUNCTION(qlink2_wmss),
+ FUNCTION(qspi0),
+ FUNCTION(qspi1),
+ FUNCTION(qspi2),
+ FUNCTION(qspi3),
+ FUNCTION(qspi_clk),
+ FUNCTION(qspi_cs),
+ FUNCTION(qup0),
+ FUNCTION(qup1),
+ FUNCTION(qup10),
+ FUNCTION(qup11),
+ FUNCTION(qup12),
+ FUNCTION(qup13),
+ FUNCTION(qup14),
+ FUNCTION(qup15),
+ FUNCTION(qup16),
+ FUNCTION(qup17),
+ FUNCTION(qup18),
+ FUNCTION(qup19),
+ FUNCTION(qup2),
+ FUNCTION(qup20),
+ FUNCTION(qup21),
+ FUNCTION(qup3),
+ FUNCTION(qup4),
+ FUNCTION(qup5),
+ FUNCTION(qup6),
+ FUNCTION(qup7),
+ FUNCTION(qup8),
+ FUNCTION(qup9),
+ FUNCTION(qup_l4),
+ FUNCTION(qup_l5),
+ FUNCTION(qup_l6),
+ FUNCTION(sd_write),
+ FUNCTION(sdc40),
+ FUNCTION(sdc41),
+ FUNCTION(sdc42),
+ FUNCTION(sdc43),
+ FUNCTION(sdc4_clk),
+ FUNCTION(sdc4_cmd),
+ FUNCTION(sec_mi2s),
+ FUNCTION(tb_trig),
+ FUNCTION(tgu_ch0),
+ FUNCTION(tgu_ch1),
+ FUNCTION(tgu_ch2),
+ FUNCTION(tgu_ch3),
+ FUNCTION(tmess_prng0),
+ FUNCTION(tmess_prng1),
+ FUNCTION(tmess_prng2),
+ FUNCTION(tmess_prng3),
+ FUNCTION(tsense_pwm1),
+ FUNCTION(tsense_pwm2),
+ FUNCTION(uim0_clk),
+ FUNCTION(uim0_data),
+ FUNCTION(uim0_present),
+ FUNCTION(uim0_reset),
+ FUNCTION(uim1_clk),
+ FUNCTION(uim1_data),
+ FUNCTION(uim1_present),
+ FUNCTION(uim1_reset),
+ FUNCTION(usb2phy_ac),
+ FUNCTION(usb_phy),
+ FUNCTION(vfr_0),
+ FUNCTION(vfr_1),
+ FUNCTION(vsense_trigger),
+};
+
+/* Every pin is maintained as a single group, and missing or non-existing pin
+ * would be maintained as dummy group to synchronize pin group index with
+ * pin descriptor registered with pinctrl core.
+ * Clients would not be able to request these dummy pin groups.
+ */
+static const struct msm_pingroup sm8450_groups[] = {
+ [0] = PINGROUP(0, qup0, _, _, _, _, _, _, _, _),
+ [1] = PINGROUP(1, qup0, _, _, _, _, _, _, _, _),
+ [2] = PINGROUP(2, qup0, qdss_cti, _, _, _, _, _, _, _),
+ [3] = PINGROUP(3, qup0, _, _, _, _, _, _, _, _),
+ [4] = PINGROUP(4, qup1, phase_flag, _, _, _, _, _, _, _),
+ [5] = PINGROUP(5, qup1, phase_flag, _, _, _, _, _, _, _),
+ [6] = PINGROUP(6, qup1, phase_flag, _, _, _, _, _, _, _),
+ [7] = PINGROUP(7, qup1, phase_flag, _, _, _, _, _, _, _),
+ [8] = PINGROUP(8, qup2, _, _, _, _, _, _, _, _),
+ [9] = PINGROUP(9, qup2, dbg_out, _, _, _, _, _, _, _),
+ [10] = PINGROUP(10, qup2, phase_flag, _, _, _, _, _, _, _),
+ [11] = PINGROUP(11, qup2, phase_flag, _, _, _, _, _, _, _),
+ [12] = PINGROUP(12, qup3, phase_flag, _, _, _, _, _, _, _),
+ [13] = PINGROUP(13, qup3, phase_flag, _, _, _, _, _, _, _),
+ [14] = PINGROUP(14, qup3, phase_flag, _, _, _, _, _, _, _),
+ [15] = PINGROUP(15, qup3, phase_flag, _, _, _, _, _, _, _),
+ [16] = PINGROUP(16, qup4, phase_flag, _, _, _, _, _, _, _),
+ [17] = PINGROUP(17, qup4, phase_flag, _, _, _, _, _, _, _),
+ [18] = PINGROUP(18, qup4, phase_flag, _, vsense_trigger, _, _, _, _, _),
+ [19] = PINGROUP(19, qup4, phase_flag, _, _, _, _, _, _, _),
+ [20] = PINGROUP(20, qup6, pll_bist, _, _, _, _, _, _, _),
+ [21] = PINGROUP(21, qup6, _, _, _, _, _, _, _, _),
+ [22] = PINGROUP(22, qup6, _, _, _, _, _, _, _, _),
+ [23] = PINGROUP(23, qup6, _, _, _, _, _, _, _, _),
+ [24] = PINGROUP(24, qup7, qup_l4, jitter_bist, _, _, _, _, _, _),
+ [25] = PINGROUP(25, qup7, qup_l5, phase_flag, _, _, _, _, _, _),
+ [26] = PINGROUP(26, qup7, qup_l6, phase_flag, _, _, _, _, _, _),
+ [27] = PINGROUP(27, qup7, _, _, _, _, _, _, _, _),
+ [28] = PINGROUP(28, qup8, ibi_i3c, _, _, _, _, _, _, _),
+ [29] = PINGROUP(29, qup8, ibi_i3c, _, _, _, _, _, _, _),
+ [30] = PINGROUP(30, qup8, _, _, _, _, _, _, _, _),
+ [31] = PINGROUP(31, qup8, _, _, _, _, _, _, _, _),
+ [32] = PINGROUP(32, qup9, ibi_i3c, _, _, _, _, _, _, _),
+ [33] = PINGROUP(33, qup9, ibi_i3c, _, _, _, _, _, _, _),
+ [34] = PINGROUP(34, qup9, _, _, _, _, _, _, _, _),
+ [35] = PINGROUP(35, qup9, _, _, _, _, _, _, _, _),
+ [36] = PINGROUP(36, qup10, ddr_bist, _, _, _, _, _, _, _),
+ [37] = PINGROUP(37, qup10, ddr_bist, atest_usb, _, _, _, _, _, _),
+ [38] = PINGROUP(38, qup10, _, _, _, _, _, _, _, _),
+ [39] = PINGROUP(39, qup10, atest_usb, _, _, _, _, _, _, _),
+ [40] = PINGROUP(40, qup11, qup_l4, ddr_bist, ddr_pxi1, _, _, _, _, _),
+ [41] = PINGROUP(41, qup11, qup_l5, ddr_bist, ddr_pxi1, _, _, _, _, _),
+ [42] = PINGROUP(42, qup11, qup_l6, _, _, _, _, _, _, _),
+ [43] = PINGROUP(43, qup11, ddr_pxi3, _, _, _, _, _, _, _),
+ [44] = PINGROUP(44, qup12, ddr_pxi3, _, _, _, _, _, _, _),
+ [45] = PINGROUP(45, qup12, ddr_pxi2, _, _, _, _, _, _, _),
+ [46] = PINGROUP(46, qup12, mdp_vsync, _, _, _, _, _, _, _),
+ [47] = PINGROUP(47, qup12, dp_hot, mdp_vsync, ddr_pxi2, _, _, _, _, _),
+ [48] = PINGROUP(48, qup13, qspi2, sdc42, _, _, _, _, _, _),
+ [49] = PINGROUP(49, qup13, qspi3, sdc43, _, _, _, _, _, _),
+ [50] = PINGROUP(50, qup13, qspi_clk, sdc4_clk, _, _, _, _, _, _),
+ [51] = PINGROUP(51, qup13, qspi_cs, sdc4_cmd, ddr_pxi0, _, _, _, _, _),
+ [52] = PINGROUP(52, qup14, qspi0, sdc40, ddr_pxi0, _, _, _, _, _),
+ [53] = PINGROUP(53, qup14, qspi1, sdc41, _, _, _, _, _, _),
+ [54] = PINGROUP(54, qup14, qspi_cs, _, _, _, _, _, _, _),
+ [55] = PINGROUP(55, qup14, atest_usb, _, _, _, _, _, _, _),
+ [56] = PINGROUP(56, qup15, ibi_i3c, _, _, _, _, _, _, _),
+ [57] = PINGROUP(57, qup15, ibi_i3c, _, _, _, _, _, _, _),
+ [58] = PINGROUP(58, qup15, qup_l4, _, _, _, _, _, _, _),
+ [59] = PINGROUP(59, qup15, qup_l5, _, _, _, _, _, _, _),
+ [60] = PINGROUP(60, qup16, ibi_i3c, _, _, _, _, _, _, _),
+ [61] = PINGROUP(61, qup16, ibi_i3c, _, _, _, _, _, _, _),
+ [62] = PINGROUP(62, qup16, qup_l6, _, _, _, _, _, _, _),
+ [63] = PINGROUP(63, qup16, qup_l4, _, _, _, _, _, _, _),
+ [64] = PINGROUP(64, qup17, tb_trig, tgu_ch0, _, _, _, _, _, _),
+ [65] = PINGROUP(65, qup17, tgu_ch1, _, _, _, _, _, _, _),
+ [66] = PINGROUP(66, qup17, qup_l5, tgu_ch2, _, _, _, _, _, _),
+ [67] = PINGROUP(67, qup17, qup_l6, tgu_ch3, _, _, _, _, _, _),
+ [68] = PINGROUP(68, qup18, _, _, _, _, _, _, _, _),
+ [69] = PINGROUP(69, qup18, _, _, _, _, _, _, _, _),
+ [70] = PINGROUP(70, qup18, _, _, _, _, _, _, _, _),
+ [71] = PINGROUP(71, qup18, cri_trng0, _, _, _, _, _, _, _),
+ [72] = PINGROUP(72, qup19, cri_trng1, _, _, _, _, _, _, _),
+ [73] = PINGROUP(73, qup19, prng_rosc, _, _, _, _, _, _, _),
+ [74] = PINGROUP(74, qup19, _, _, _, _, _, _, _, _),
+ [75] = PINGROUP(75, qup19, prng_rosc, _, _, _, _, _, _, _),
+ [76] = PINGROUP(76, qup20, phase_flag, tmess_prng3, _, _, _, _, _, _),
+ [77] = PINGROUP(77, qup20, phase_flag, tmess_prng2, _, _, _, _, _, _),
+ [78] = PINGROUP(78, qup20, phase_flag, _, _, _, _, _, _, _),
+ [79] = PINGROUP(79, qup20, phase_flag, tmess_prng1, _, _, _, _, _, _),
+ [80] = PINGROUP(80, qup21, qdss_cti, phase_flag, tmess_prng0, _, _, _, _, _),
+ [81] = PINGROUP(81, qup21, qdss_cti, phase_flag, prng_rosc, _, _, _, _, _),
+ [82] = PINGROUP(82, qup21, qdss_cti, phase_flag, _, _, _, _, _, _),
+ [83] = PINGROUP(83, qup21, qdss_cti, phase_flag, prng_rosc, _, _, _, _, _),
+ [84] = PINGROUP(84, qup5, qdss_cti, _, _, _, _, _, _, _),
+ [85] = PINGROUP(85, qup5, qdss_cti, _, _, _, _, _, _, _),
+ [86] = PINGROUP(86, mdp_vsync, mdp_vsync0, mdp_vsync1, gcc_gp1, atest_char, _, _, _, _),
+ [87] = PINGROUP(87, mdp_vsync, mdp_vsync2, mdp_vsync3, gcc_gp2, atest_char, _, _, _, _),
+ [88] = PINGROUP(88, mdp_vsync, gcc_gp3, atest_char, _, _, _, _, _, _),
+ [89] = PINGROUP(89, vfr_0, atest_char, _, _, _, _, _, _, _),
+ [90] = PINGROUP(90, usb2phy_ac, atest_char, _, _, _, _, _, _, _),
+ [91] = PINGROUP(91, usb_phy, tsense_pwm1, tsense_pwm2, _, _, _, _, _, _),
+ [92] = PINGROUP(92, phase_flag, _, _, _, _, _, _, _, _),
+ [93] = PINGROUP(93, sd_write, qdss_cti, phase_flag, _, _, _, _, _, _),
+ [94] = PINGROUP(94, cmu_rng, phase_flag, _, _, _, _, _, _, _),
+ [95] = PINGROUP(95, pcie0_clkreqn, cmu_rng, phase_flag, _, _, _, _, _, _),
+ [96] = PINGROUP(96, cmu_rng, phase_flag, _, _, _, _, _, _, _),
+ [97] = PINGROUP(97, cmu_rng, phase_flag, _, _, _, _, _, _, _),
+ [98] = PINGROUP(98, pcie1_clkreqn, phase_flag, _, _, _, _, _, _, _),
+ [99] = PINGROUP(99, phase_flag, cri_trng, _, _, _, _, _, _, _),
+ [100] = PINGROUP(100, cam_mclk, qdss_gpio, _, _, _, _, _, _, _),
+ [101] = PINGROUP(101, cam_mclk, qdss_gpio, _, _, _, _, _, _, _),
+ [102] = PINGROUP(102, cam_mclk, qdss_gpio, _, _, _, _, _, _, _),
+ [103] = PINGROUP(103, cam_mclk, qdss_gpio, _, _, _, _, _, _, _),
+ [104] = PINGROUP(104, cam_mclk, qdss_gpio, _, _, _, _, _, _, _),
+ [105] = PINGROUP(105, cam_mclk, qdss_gpio, _, _, _, _, _, _, _),
+ [106] = PINGROUP(106, cam_mclk, qdss_gpio, _, _, _, _, _, _, _),
+ [107] = PINGROUP(107, cam_mclk, qdss_gpio, pll_clk, _, _, _, _, _, _),
+ [108] = PINGROUP(108, aon_cam, _, _, _, _, _, _, _, _),
+ [109] = PINGROUP(109, cci_async, _, _, _, _, _, _, _, _),
+ [110] = PINGROUP(110, cci_i2c, qdss_gpio, _, _, _, _, _, _, _),
+ [111] = PINGROUP(111, cci_i2c, qdss_gpio, _, _, _, _, _, _, _),
+ [112] = PINGROUP(112, cci_i2c, qdss_gpio, _, _, _, _, _, _, _),
+ [113] = PINGROUP(113, cci_i2c, qdss_gpio, _, _, _, _, _, _, _),
+ [114] = PINGROUP(114, cci_i2c, qdss_gpio, _, _, _, _, _, _, _),
+ [115] = PINGROUP(115, cci_i2c, qdss_gpio, _, _, _, _, _, _, _),
+ [116] = PINGROUP(116, cci_timer, _, _, _, _, _, _, _, _),
+ [117] = PINGROUP(117, cci_timer, qdss_gpio, _, _, _, _, _, _, _),
+ [118] = PINGROUP(118, cci_timer, qdss_gpio, _, _, _, _, _, _, _),
+ [119] = PINGROUP(119, cci_timer, cci_async, qdss_gpio, _, _, _, _, _, _),
+ [120] = PINGROUP(120, cci_timer, cci_async, qdss_gpio, _, _, _, _, _, _),
+ [121] = PINGROUP(121, mi2s2_sck, _, _, _, _, _, _, _, _),
+ [122] = PINGROUP(122, mi2s2_data0, _, _, _, _, _, _, _, _),
+ [123] = PINGROUP(123, mi2s2_ws, _, _, _, _, _, _, _, _),
+ [124] = PINGROUP(124, mi2s2_data1, sec_mi2s, audio_ref, _, _, _, _, _, _),
+ [125] = PINGROUP(125, pri_mi2s, _, _, _, _, _, _, _, _),
+ [126] = PINGROUP(126, mi2s0_sck, _, _, _, _, _, _, _, _),
+ [127] = PINGROUP(127, mi2s0_data0, _, _, _, _, _, _, _, _),
+ [128] = PINGROUP(128, mi2s0_data1, _, _, _, _, _, _, _, _),
+ [129] = PINGROUP(129, mi2s0_ws, _, _, _, _, _, _, _, _),
+ [130] = PINGROUP(130, uim0_data, _, _, _, _, _, _, _, _),
+ [131] = PINGROUP(131, uim0_clk, _, _, _, _, _, _, _, _),
+ [132] = PINGROUP(132, uim0_reset, _, _, _, _, _, _, _, _),
+ [133] = PINGROUP(133, uim0_present, _, _, _, _, _, _, _, _),
+ [134] = PINGROUP(134, uim1_data, gcc_gp1, _, _, _, _, _, _, _),
+ [135] = PINGROUP(135, uim1_clk, gcc_gp2, _, _, _, _, _, _, _),
+ [136] = PINGROUP(136, uim1_reset, gcc_gp3, _, _, _, _, _, _, _),
+ [137] = PINGROUP(137, uim1_present, tb_trig, _, _, _, _, _, _, _),
+ [138] = PINGROUP(138, _, mss_grfc0, _, _, _, _, _, _, _),
+ [139] = PINGROUP(139, _, mss_grfc1, _, _, _, _, _, _, _),
+ [140] = PINGROUP(140, _, mss_grfc2, _, _, _, _, _, _, _),
+ [141] = PINGROUP(141, _, mss_grfc3, _, _, _, _, _, _, _),
+ [142] = PINGROUP(142, _, mss_grfc4, _, _, _, _, _, _, _),
+ [143] = PINGROUP(143, _, mss_grfc5, _, _, _, _, _, _, _),
+ [144] = PINGROUP(144, _, mss_grfc6, _, _, _, _, _, _, _),
+ [145] = PINGROUP(145, _, mss_grfc7, _, _, _, _, _, _, _),
+ [146] = PINGROUP(146, _, mss_grfc8, _, _, _, _, _, _, _),
+ [147] = PINGROUP(147, _, mss_grfc9, _, _, _, _, _, _, _),
+ [148] = PINGROUP(148, coex_uart1, atest_usb, _, _, _, _, _, _, _),
+ [149] = PINGROUP(149, coex_uart1, atest_usb, _, _, _, _, _, _, _),
+ [150] = PINGROUP(150, coex_uart2, mss_grfc10, _, _, _, _, _, _, _),
+ [151] = PINGROUP(151, coex_uart2, mss_grfc11, _, _, _, _, _, _, _),
+ [152] = PINGROUP(152, mss_grfc12, _, _, _, _, _, _, _, _),
+ [153] = PINGROUP(153, mss_grfc0, nav, _, _, _, _, _, _, _),
+ [154] = PINGROUP(154, nav, _, _, _, _, _, _, _, _),
+ [155] = PINGROUP(155, nav, vfr_1, _, _, _, _, _, _, _),
+ [156] = PINGROUP(156, qlink0_request, _, _, _, _, _, _, _, _),
+ [157] = PINGROUP(157, qlink0_enable, _, _, _, _, _, _, _, _),
+ [158] = PINGROUP(158, qlink0_wmss, _, _, _, _, _, _, _, _),
+ [159] = PINGROUP(159, qlink1_request, _, _, _, _, _, _, _, _),
+ [160] = PINGROUP(160, qlink1_enable, _, _, _, _, _, _, _, _),
+ [161] = PINGROUP(161, qlink1_wmss, _, _, _, _, _, _, _, _),
+ [162] = PINGROUP(162, qlink2_request, _, _, _, _, _, _, _, _),
+ [163] = PINGROUP(163, qlink2_enable, _, _, _, _, _, _, _, _),
+ [164] = PINGROUP(164, qlink2_wmss, _, _, _, _, _, _, _, _),
+ [165] = PINGROUP(165, _, _, _, _, _, _, _, _, _),
+ [166] = PINGROUP(166, _, _, _, _, _, _, _, _, _),
+ [167] = PINGROUP(167, _, _, _, _, _, _, _, _, _),
+ [168] = PINGROUP(168, _, _, _, _, _, _, _, _, _),
+ [169] = PINGROUP(169, _, _, _, _, _, _, _, _, _),
+ [170] = PINGROUP(170, _, _, _, _, _, _, _, _, _),
+ [171] = PINGROUP(171, _, _, _, _, _, _, _, _, _),
+ [172] = PINGROUP(172, _, _, _, _, _, _, _, _, _),
+ [173] = PINGROUP(173, _, _, _, _, _, _, _, _, _),
+ [174] = PINGROUP(174, _, _, _, _, _, _, _, _, _),
+ [175] = PINGROUP(175, _, _, _, _, _, _, _, _, _),
+ [176] = PINGROUP(176, _, _, _, _, _, _, _, _, _),
+ [177] = PINGROUP(177, _, _, _, _, _, _, _, _, _),
+ [178] = PINGROUP(178, _, _, _, _, _, _, _, _, _),
+ [179] = PINGROUP(179, _, _, _, _, _, _, _, _, _),
+ [180] = PINGROUP(180, _, _, _, _, _, _, _, _, _),
+ [181] = PINGROUP(181, _, _, _, _, _, _, _, _, _),
+ [182] = PINGROUP(182, _, _, _, _, _, _, _, _, _),
+ [183] = PINGROUP(183, _, _, _, _, _, _, _, _, _),
+ [184] = PINGROUP(184, _, _, _, _, _, _, _, _, _),
+ [185] = PINGROUP(185, _, _, _, _, _, _, _, _, _),
+ [186] = PINGROUP(186, _, _, _, _, _, _, _, _, _),
+ [187] = PINGROUP(187, _, _, _, _, _, _, _, _, _),
+ [188] = PINGROUP(188, _, qdss_gpio, _, _, _, _, _, _, _),
+ [189] = PINGROUP(189, _, qdss_gpio, _, _, _, _, _, _, _),
+ [190] = PINGROUP(190, qdss_gpio, _, _, _, _, _, _, _, _),
+ [191] = PINGROUP(191, qdss_gpio, _, _, _, _, _, _, _, _),
+ [192] = PINGROUP(192, _, qdss_gpio, _, _, _, _, _, _, _),
+ [193] = PINGROUP(193, _, qdss_gpio, _, _, _, _, _, _, _),
+ [194] = PINGROUP(194, _, qdss_gpio, _, _, _, _, _, _, _),
+ [195] = PINGROUP(195, _, qdss_gpio, _, _, _, _, _, _, _),
+ [196] = PINGROUP(196, _, qdss_gpio, _, _, _, _, _, _, _),
+ [197] = PINGROUP(197, _, qdss_gpio, _, _, _, _, _, _, _),
+ [198] = PINGROUP(198, _, qdss_gpio, _, _, _, _, _, _, _),
+ [199] = PINGROUP(199, _, qdss_gpio, _, _, _, _, _, _, _),
+ [200] = PINGROUP(200, _, qdss_gpio, _, _, _, _, _, _, _),
+ [201] = PINGROUP(201, _, qdss_gpio, _, _, _, _, _, _, _),
+ [202] = PINGROUP(202, qdss_gpio, _, _, _, _, _, _, _, _),
+ [203] = PINGROUP(203, qdss_gpio, _, _, _, _, _, _, _, _),
+ [204] = PINGROUP(204, qdss_gpio, _, _, _, _, _, _, _, _),
+ [205] = PINGROUP(205, qdss_gpio, _, _, _, _, _, _, _, _),
+ [206] = PINGROUP(206, qup5, _, _, _, _, _, _, _, _),
+ [207] = PINGROUP(207, qup5, _, _, _, _, _, _, _, _),
+ [208] = PINGROUP(208, cci_i2c, _, _, _, _, _, _, _, _),
+ [209] = PINGROUP(209, cci_i2c, _, _, _, _, _, _, _, _),
+ [210] = UFS_RESET(ufs_reset, 0xde000),
+ [211] = SDC_QDSD_PINGROUP(sdc2_clk, 0xd6000, 14, 6),
+ [212] = SDC_QDSD_PINGROUP(sdc2_cmd, 0xd6000, 11, 3),
+ [213] = SDC_QDSD_PINGROUP(sdc2_data, 0xd6000, 9, 0),
+};
+
+static const struct msm_gpio_wakeirq_map sm8450_pdc_map[] = {
+ { 2, 70 }, { 3, 77 }, { 7, 52 }, { 8, 108 }, { 10, 128 }, { 11, 53 },
+ { 12, 129 }, { 13, 130 }, { 14, 131 }, { 15, 67 }, { 19, 69 }, { 21, 132 },
+ { 23, 54 }, { 26, 56 }, { 27, 71 }, { 28, 57 }, { 31, 55 }, { 32, 58 },
+ { 34, 72 }, { 35, 43 }, { 36, 78 }, { 38, 79 }, { 39, 62 }, { 40, 80 },
+ { 41, 133 }, { 43, 81 }, { 44, 87 }, { 45, 134 }, { 46, 66 }, { 47, 63 },
+ { 50, 88 }, { 51, 89 }, { 55, 90 }, { 56, 59 }, { 59, 82 }, { 60, 60 },
+ { 62, 135 }, { 63, 91 }, { 66, 136 }, { 67, 44 }, { 69, 137 }, { 71, 97 },
+ { 75, 73 }, { 79, 74 }, { 80, 96 }, { 81, 98 }, { 82, 45 }, { 83, 99 },
+ { 84, 94 }, { 85, 100 }, { 86, 101 }, { 87, 102 }, { 88, 92 }, { 89, 83 },
+ { 90, 84 }, { 91, 85 }, { 92, 46 }, { 95, 103 }, { 96, 104 }, { 98, 105 },
+ { 99, 106 }, { 115, 95 }, { 116, 76 }, { 117, 75 }, { 118, 86 }, { 119, 93 },
+ { 133, 47 }, { 137, 42 }, { 148, 61 }, { 150, 68 }, { 153, 65 }, { 154, 48 },
+ { 155, 49 }, { 156, 64 }, { 159, 50 }, { 162, 51 }, { 166, 111 }, { 169, 114 },
+ { 171, 115 }, { 172, 116 }, { 174, 117 }, { 176, 107 }, { 181, 109 },
+ { 182, 110 }, { 185, 112 }, { 187, 113 }, { 188, 118 }, { 190, 122 },
+ { 192, 123 }, { 195, 124 }, { 201, 119 }, { 203, 120 }, { 205, 121 },
+};
+
+static const struct msm_pinctrl_soc_data sm8450_tlmm = {
+ .pins = sm8450_pins,
+ .npins = ARRAY_SIZE(sm8450_pins),
+ .functions = sm8450_functions,
+ .nfunctions = ARRAY_SIZE(sm8450_functions),
+ .groups = sm8450_groups,
+ .ngroups = ARRAY_SIZE(sm8450_groups),
+ .ngpios = 211,
+ .wakeirq_map = sm8450_pdc_map,
+ .nwakeirq_map = ARRAY_SIZE(sm8450_pdc_map),
+};
+
+static int sm8450_tlmm_probe(struct platform_device *pdev)
+{
+ return msm_pinctrl_probe(pdev, &sm8450_tlmm);
+}
+
+static const struct of_device_id sm8450_tlmm_of_match[] = {
+ { .compatible = "qcom,sm8450-tlmm", },
+ { },
+};
+
+static struct platform_driver sm8450_tlmm_driver = {
+ .driver = {
+ .name = "sm8450-tlmm",
+ .of_match_table = sm8450_tlmm_of_match,
+ },
+ .probe = sm8450_tlmm_probe,
+ .remove = msm_pinctrl_remove,
+};
+
+static int __init sm8450_tlmm_init(void)
+{
+ return platform_driver_register(&sm8450_tlmm_driver);
+}
+arch_initcall(sm8450_tlmm_init);
+
+static void __exit sm8450_tlmm_exit(void)
+{
+ platform_driver_unregister(&sm8450_tlmm_driver);
+}
+module_exit(sm8450_tlmm_exit);
+
+MODULE_DESCRIPTION("QTI SM8450 TLMM driver");
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(of, sm8450_tlmm_of_match);
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
index 5283d5e9e8bc..f2eac3b05d67 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
@@ -1141,6 +1141,7 @@ static int pmic_gpio_remove(struct platform_device *pdev)
}
static const struct of_device_id pmic_gpio_of_match[] = {
+ { .compatible = "qcom,pm2250-gpio", .data = (void *) 10 },
/* pm660 has 13 GPIOs with holes on 1, 5, 6, 7, 8 and 10 */
{ .compatible = "qcom,pm660-gpio", .data = (void *) 13 },
/* pm660l has 12 GPIOs with holes on 1, 2, 10, 11 and 12 */
@@ -1151,6 +1152,7 @@ static const struct of_device_id pmic_gpio_of_match[] = {
{ .compatible = "qcom,pm7325-gpio", .data = (void *) 10 },
{ .compatible = "qcom,pm8005-gpio", .data = (void *) 4 },
{ .compatible = "qcom,pm8008-gpio", .data = (void *) 2 },
+ { .compatible = "qcom,pm8019-gpio", .data = (void *) 6 },
/* pm8150 has 10 GPIOs with holes on 2, 5, 7 and 8 */
{ .compatible = "qcom,pm8150-gpio", .data = (void *) 10 },
{ .compatible = "qcom,pmc8180-gpio", .data = (void *) 10 },
diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
index b5949f766a7a..1b41adda8129 100644
--- a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
@@ -773,7 +773,6 @@ static int pm8xxx_gpio_probe(struct platform_device *pdev)
pctrl->chip = pm8xxx_gpio_template;
pctrl->chip.base = -1;
pctrl->chip.parent = &pdev->dev;
- pctrl->chip.of_node = pdev->dev.of_node;
pctrl->chip.of_gpio_n_cells = 2;
pctrl->chip.label = dev_name(pctrl->dev);
pctrl->chip.ngpio = pctrl->npins;
diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c b/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
index 842940594c4a..49893a5133a8 100644
--- a/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
+++ b/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
@@ -857,7 +857,6 @@ static int pm8xxx_mpp_probe(struct platform_device *pdev)
pctrl->chip = pm8xxx_mpp_template;
pctrl->chip.base = -1;
pctrl->chip.parent = &pdev->dev;
- pctrl->chip.of_node = pdev->dev.of_node;
pctrl->chip.of_gpio_n_cells = 2;
pctrl->chip.label = dev_name(pctrl->dev);
pctrl->chip.ngpio = pctrl->npins;
diff --git a/drivers/pinctrl/renesas/pfc-r8a779a0.c b/drivers/pinctrl/renesas/pfc-r8a779a0.c
index ad6532443a78..83580385c3ca 100644
--- a/drivers/pinctrl/renesas/pfc-r8a779a0.c
+++ b/drivers/pinctrl/renesas/pfc-r8a779a0.c
@@ -3835,7 +3835,7 @@ static const struct pinmux_drive_reg pinmux_drive_regs[] = {
{ RCAR_GP_PIN(3, 12), 16, 3 }, /* CANFD5_RX */
{ RCAR_GP_PIN(3, 11), 12, 3 }, /* CANFD5_TX */
{ RCAR_GP_PIN(3, 10), 8, 3 }, /* CANFD4_RX */
- { RCAR_GP_PIN(3, 9), 4, 3 }, /* CANFD4_TX*/
+ { RCAR_GP_PIN(3, 9), 4, 3 }, /* CANFD4_TX */
{ RCAR_GP_PIN(3, 8), 0, 3 }, /* CANFD3_RX */
} },
{ PINMUX_DRIVE_REG("DRV2CTRL3", 0xe6058888) {
@@ -4305,7 +4305,7 @@ static const struct pinmux_bias_reg pinmux_bias_regs[] = {
[11] = RCAR_GP_PIN(6, 11), /* AVB2_TD3 */
[12] = RCAR_GP_PIN(6, 12), /* AVB2_TXCREFCLK */
[13] = RCAR_GP_PIN(6, 13), /* AVB2_MDIO */
- [14] = RCAR_GP_PIN(6, 14), /* AVB2_MDC*/
+ [14] = RCAR_GP_PIN(6, 14), /* AVB2_MDC */
[15] = RCAR_GP_PIN(6, 15), /* AVB2_MAGIC */
[16] = RCAR_GP_PIN(6, 16), /* AVB2_PHY_INT */
[17] = RCAR_GP_PIN(6, 17), /* AVB2_LINK */
diff --git a/drivers/pinctrl/renesas/pinctrl-rza1.c b/drivers/pinctrl/renesas/pinctrl-rza1.c
index 10020fe302b8..c1d6e9512c7a 100644
--- a/drivers/pinctrl/renesas/pinctrl-rza1.c
+++ b/drivers/pinctrl/renesas/pinctrl-rza1.c
@@ -757,9 +757,9 @@ static int rza1_gpio_request(struct gpio_chip *chip, unsigned int gpio)
}
/**
- * rza1_gpio_disable_free() - reset a pin
+ * rza1_gpio_free() - reset a pin
*
- * Surprisingly, disable_free a gpio, is equivalent to request it.
+ * Surprisingly, freeing a gpio is equivalent to requesting it.
* Reset pin to port mode, with input buffer disabled. This overwrites all
* port direction settings applied with set_direction
*
@@ -875,7 +875,7 @@ static int rza1_dt_node_pin_count(struct device_node *np)
}
/**
- * rza1_parse_pmx_function() - parse a pin mux sub-node
+ * rza1_parse_pinmux_node() - parse a pin mux sub-node
*
* @rza1_pctl: RZ/A1 pin controller device
* @np: of pmx sub-node
diff --git a/drivers/pinctrl/renesas/pinctrl-rza2.c b/drivers/pinctrl/renesas/pinctrl-rza2.c
index 32829eb9656c..c0a04f1ee994 100644
--- a/drivers/pinctrl/renesas/pinctrl-rza2.c
+++ b/drivers/pinctrl/renesas/pinctrl-rza2.c
@@ -240,7 +240,6 @@ static int rza2_gpio_register(struct rza2_pinctrl_priv *priv)
int ret;
chip.label = devm_kasprintf(priv->dev, GFP_KERNEL, "%pOFn", np);
- chip.of_node = np;
chip.parent = priv->dev;
chip.ngpio = priv->npins;
diff --git a/drivers/pinctrl/renesas/pinctrl-rzg2l.c b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
index 20b2af889ca9..ccee9c9e2e22 100644
--- a/drivers/pinctrl/renesas/pinctrl-rzg2l.c
+++ b/drivers/pinctrl/renesas/pinctrl-rzg2l.c
@@ -35,20 +35,21 @@
#define MUX_FUNC(pinconf) (((pinconf) & MUX_FUNC_MASK) >> MUX_FUNC_OFFS)
/* PIN capabilities */
-#define PIN_CFG_IOLH BIT(0)
-#define PIN_CFG_SR BIT(1)
-#define PIN_CFG_IEN BIT(2)
-#define PIN_CFG_PUPD BIT(3)
-#define PIN_CFG_IOLH_SD0 BIT(4)
-#define PIN_CFG_IOLH_SD1 BIT(5)
-#define PIN_CFG_IOLH_QSPI BIT(6)
-#define PIN_CFG_IOLH_ETH0 BIT(7)
-#define PIN_CFG_IOLH_ETH1 BIT(8)
-#define PIN_CFG_FILONOFF BIT(9)
-#define PIN_CFG_FILNUM BIT(10)
-#define PIN_CFG_FILCLKSEL BIT(11)
-
-#define RZG2L_MPXED_PIN_FUNCS (PIN_CFG_IOLH | \
+#define PIN_CFG_IOLH_A BIT(0)
+#define PIN_CFG_IOLH_B BIT(1)
+#define PIN_CFG_SR BIT(2)
+#define PIN_CFG_IEN BIT(3)
+#define PIN_CFG_PUPD BIT(4)
+#define PIN_CFG_IO_VMC_SD0 BIT(5)
+#define PIN_CFG_IO_VMC_SD1 BIT(6)
+#define PIN_CFG_IO_VMC_QSPI BIT(7)
+#define PIN_CFG_IO_VMC_ETH0 BIT(8)
+#define PIN_CFG_IO_VMC_ETH1 BIT(9)
+#define PIN_CFG_FILONOFF BIT(10)
+#define PIN_CFG_FILNUM BIT(11)
+#define PIN_CFG_FILCLKSEL BIT(12)
+
+#define RZG2L_MPXED_PIN_FUNCS (PIN_CFG_IOLH_A | \
PIN_CFG_SR | \
PIN_CFG_PUPD | \
PIN_CFG_FILONOFF | \
@@ -77,7 +78,7 @@
#define RZG2L_SINGLE_PIN BIT(31)
#define RZG2L_SINGLE_PIN_PACK(p, b, f) (RZG2L_SINGLE_PIN | \
((p) << 24) | ((b) << 20) | (f))
-#define RZG2L_SINGLE_PIN_GET_PORT(x) (((x) & GENMASK(30, 24)) >> 24)
+#define RZG2L_SINGLE_PIN_GET_PORT_OFFSET(x) (((x) & GENMASK(30, 24)) >> 24)
#define RZG2L_SINGLE_PIN_GET_BIT(x) (((x) & GENMASK(22, 20)) >> 20)
#define RZG2L_SINGLE_PIN_GET_CFGS(x) ((x) & GENMASK(19, 0))
@@ -86,6 +87,7 @@
#define PMC(n) (0x0200 + 0x10 + (n))
#define PFC(n) (0x0400 + 0x40 + (n) * 4)
#define PIN(n) (0x0800 + 0x10 + (n))
+#define IOLH(n) (0x1000 + (n) * 8)
#define IEN(n) (0x1800 + (n) * 8)
#define PWPR (0x3014)
#define SD_CH(n) (0x3000 + (n) * 4)
@@ -101,11 +103,13 @@
#define PVDD_MASK 0x01
#define PFC_MASK 0x07
#define IEN_MASK 0x01
+#define IOLH_MASK 0x03
#define PM_INPUT 0x1
#define PM_OUTPUT 0x2
#define RZG2L_PIN_ID_TO_PORT(id) ((id) / RZG2L_PINS_PER_PORT)
+#define RZG2L_PIN_ID_TO_PORT_OFFSET(id) (RZG2L_PIN_ID_TO_PORT(id) + 0x10)
#define RZG2L_PIN_ID_TO_PIN(id) ((id) % RZG2L_PINS_PER_PORT)
struct rzg2l_dedicated_configs {
@@ -137,6 +141,9 @@ struct rzg2l_pinctrl {
spinlock_t lock;
};
+static const unsigned int iolh_groupa_mA[] = { 2, 4, 8, 12 };
+static const unsigned int iolh_groupb_oi[] = { 100, 66, 50, 33 };
+
static void rzg2l_pinctrl_set_pfc_mode(struct rzg2l_pinctrl *pctrl,
u8 port, u8 pin, u8 func)
{
@@ -424,6 +431,56 @@ done:
return ret;
}
+static int rzg2l_validate_gpio_pin(struct rzg2l_pinctrl *pctrl,
+ u32 cfg, u32 port, u8 bit)
+{
+ u8 pincount = RZG2L_GPIO_PORT_GET_PINCNT(cfg);
+ u32 port_index = RZG2L_GPIO_PORT_GET_INDEX(cfg);
+ u32 data;
+
+ if (bit >= pincount || port >= pctrl->data->n_port_pins)
+ return -EINVAL;
+
+ data = pctrl->data->port_pin_configs[port];
+ if (port_index != RZG2L_GPIO_PORT_GET_INDEX(data))
+ return -EINVAL;
+
+ return 0;
+}
+
+static u32 rzg2l_read_pin_config(struct rzg2l_pinctrl *pctrl, u32 offset,
+ u8 bit, u32 mask)
+{
+ void __iomem *addr = pctrl->base + offset;
+
+ /* handle _L/_H for 32-bit register read/write */
+ if (bit >= 4) {
+ bit -= 4;
+ addr += 4;
+ }
+
+ return (readl(addr) >> (bit * 8)) & mask;
+}
+
+static void rzg2l_rmw_pin_config(struct rzg2l_pinctrl *pctrl, u32 offset,
+ u8 bit, u32 mask, u32 val)
+{
+ void __iomem *addr = pctrl->base + offset;
+ unsigned long flags;
+ u32 reg;
+
+ /* handle _L/_H for 32-bit register read/write */
+ if (bit >= 4) {
+ bit -= 4;
+ addr += 4;
+ }
+
+ spin_lock_irqsave(&pctrl->lock, flags);
+ reg = readl(addr) & ~(mask << (bit * 8));
+ writel(reg | (val << (bit * 8)), addr);
+ spin_unlock_irqrestore(&pctrl->lock, flags);
+}
+
static int rzg2l_pinctrl_pinconf_get(struct pinctrl_dev *pctldev,
unsigned int _pin,
unsigned long *config)
@@ -435,7 +492,7 @@ static int rzg2l_pinctrl_pinconf_get(struct pinctrl_dev *pctldev,
unsigned int arg = 0;
unsigned long flags;
void __iomem *addr;
- u32 port = 0, reg;
+ u32 port_offset;
u32 cfg = 0;
u8 bit = 0;
@@ -443,36 +500,33 @@ static int rzg2l_pinctrl_pinconf_get(struct pinctrl_dev *pctldev,
return -EINVAL;
if (*pin_data & RZG2L_SINGLE_PIN) {
- port = RZG2L_SINGLE_PIN_GET_PORT(*pin_data);
+ port_offset = RZG2L_SINGLE_PIN_GET_PORT_OFFSET(*pin_data);
cfg = RZG2L_SINGLE_PIN_GET_CFGS(*pin_data);
bit = RZG2L_SINGLE_PIN_GET_BIT(*pin_data);
+ } else {
+ cfg = RZG2L_GPIO_PORT_GET_CFGS(*pin_data);
+ port_offset = RZG2L_PIN_ID_TO_PORT_OFFSET(_pin);
+ bit = RZG2L_PIN_ID_TO_PIN(_pin);
+
+ if (rzg2l_validate_gpio_pin(pctrl, *pin_data, RZG2L_PIN_ID_TO_PORT(_pin), bit))
+ return -EINVAL;
}
switch (param) {
case PIN_CONFIG_INPUT_ENABLE:
if (!(cfg & PIN_CFG_IEN))
return -EINVAL;
- spin_lock_irqsave(&pctrl->lock, flags);
- /* handle _L/_H for 32-bit register read/write */
- addr = pctrl->base + IEN(port);
- if (bit >= 4) {
- bit -= 4;
- addr += 4;
- }
-
- reg = readl(addr) & (IEN_MASK << (bit * 8));
- arg = (reg >> (bit * 8)) & 0x1;
- spin_unlock_irqrestore(&pctrl->lock, flags);
+ arg = rzg2l_read_pin_config(pctrl, IEN(port_offset), bit, IEN_MASK);
break;
case PIN_CONFIG_POWER_SOURCE: {
u32 pwr_reg = 0x0;
- if (cfg & PIN_CFG_IOLH_SD0)
+ if (cfg & PIN_CFG_IO_VMC_SD0)
pwr_reg = SD_CH(0);
- else if (cfg & PIN_CFG_IOLH_SD1)
+ else if (cfg & PIN_CFG_IO_VMC_SD1)
pwr_reg = SD_CH(1);
- else if (cfg & PIN_CFG_IOLH_QSPI)
+ else if (cfg & PIN_CFG_IO_VMC_QSPI)
pwr_reg = QSPI;
else
return -EINVAL;
@@ -484,6 +538,28 @@ static int rzg2l_pinctrl_pinconf_get(struct pinctrl_dev *pctldev,
break;
}
+ case PIN_CONFIG_DRIVE_STRENGTH: {
+ unsigned int index;
+
+ if (!(cfg & PIN_CFG_IOLH_A))
+ return -EINVAL;
+
+ index = rzg2l_read_pin_config(pctrl, IOLH(port_offset), bit, IOLH_MASK);
+ arg = iolh_groupa_mA[index];
+ break;
+ }
+
+ case PIN_CONFIG_OUTPUT_IMPEDANCE_OHMS: {
+ unsigned int index;
+
+ if (!(cfg & PIN_CFG_IOLH_B))
+ return -EINVAL;
+
+ index = rzg2l_read_pin_config(pctrl, IOLH(port_offset), bit, IOLH_MASK);
+ arg = iolh_groupb_oi[index];
+ break;
+ }
+
default:
return -ENOTSUPP;
}
@@ -504,7 +580,7 @@ static int rzg2l_pinctrl_pinconf_set(struct pinctrl_dev *pctldev,
enum pin_config_param param;
unsigned long flags;
void __iomem *addr;
- u32 port = 0, reg;
+ u32 port_offset;
unsigned int i;
u32 cfg = 0;
u8 bit = 0;
@@ -513,9 +589,16 @@ static int rzg2l_pinctrl_pinconf_set(struct pinctrl_dev *pctldev,
return -EINVAL;
if (*pin_data & RZG2L_SINGLE_PIN) {
- port = RZG2L_SINGLE_PIN_GET_PORT(*pin_data);
+ port_offset = RZG2L_SINGLE_PIN_GET_PORT_OFFSET(*pin_data);
cfg = RZG2L_SINGLE_PIN_GET_CFGS(*pin_data);
bit = RZG2L_SINGLE_PIN_GET_BIT(*pin_data);
+ } else {
+ cfg = RZG2L_GPIO_PORT_GET_CFGS(*pin_data);
+ port_offset = RZG2L_PIN_ID_TO_PORT_OFFSET(_pin);
+ bit = RZG2L_PIN_ID_TO_PIN(_pin);
+
+ if (rzg2l_validate_gpio_pin(pctrl, *pin_data, RZG2L_PIN_ID_TO_PORT(_pin), bit))
+ return -EINVAL;
}
for (i = 0; i < num_configs; i++) {
@@ -528,17 +611,7 @@ static int rzg2l_pinctrl_pinconf_set(struct pinctrl_dev *pctldev,
if (!(cfg & PIN_CFG_IEN))
return -EINVAL;
- /* handle _L/_H for 32-bit register read/write */
- addr = pctrl->base + IEN(port);
- if (bit >= 4) {
- bit -= 4;
- addr += 4;
- }
-
- spin_lock_irqsave(&pctrl->lock, flags);
- reg = readl(addr) & ~(IEN_MASK << (bit * 8));
- writel(reg | (arg << (bit * 8)), addr);
- spin_unlock_irqrestore(&pctrl->lock, flags);
+ rzg2l_rmw_pin_config(pctrl, IEN(port_offset), bit, IEN_MASK, !!arg);
break;
}
@@ -549,11 +622,11 @@ static int rzg2l_pinctrl_pinconf_set(struct pinctrl_dev *pctldev,
if (mV != 1800 && mV != 3300)
return -EINVAL;
- if (cfg & PIN_CFG_IOLH_SD0)
+ if (cfg & PIN_CFG_IO_VMC_SD0)
pwr_reg = SD_CH(0);
- else if (cfg & PIN_CFG_IOLH_SD1)
+ else if (cfg & PIN_CFG_IO_VMC_SD1)
pwr_reg = SD_CH(1);
- else if (cfg & PIN_CFG_IOLH_QSPI)
+ else if (cfg & PIN_CFG_IO_VMC_QSPI)
pwr_reg = QSPI;
else
return -EINVAL;
@@ -564,6 +637,43 @@ static int rzg2l_pinctrl_pinconf_set(struct pinctrl_dev *pctldev,
spin_unlock_irqrestore(&pctrl->lock, flags);
break;
}
+
+ case PIN_CONFIG_DRIVE_STRENGTH: {
+ unsigned int arg = pinconf_to_config_argument(_configs[i]);
+ unsigned int index;
+
+ if (!(cfg & PIN_CFG_IOLH_A))
+ return -EINVAL;
+
+ for (index = 0; index < ARRAY_SIZE(iolh_groupa_mA); index++) {
+ if (arg == iolh_groupa_mA[index])
+ break;
+ }
+ if (index >= ARRAY_SIZE(iolh_groupa_mA))
+ return -EINVAL;
+
+ rzg2l_rmw_pin_config(pctrl, IOLH(port_offset), bit, IOLH_MASK, index);
+ break;
+ }
+
+ case PIN_CONFIG_OUTPUT_IMPEDANCE_OHMS: {
+ unsigned int arg = pinconf_to_config_argument(_configs[i]);
+ unsigned int index;
+
+ if (!(cfg & PIN_CFG_IOLH_B))
+ return -EINVAL;
+
+ for (index = 0; index < ARRAY_SIZE(iolh_groupb_oi); index++) {
+ if (arg == iolh_groupb_oi[index])
+ break;
+ }
+ if (index >= ARRAY_SIZE(iolh_groupb_oi))
+ return -EINVAL;
+
+ rzg2l_rmw_pin_config(pctrl, IOLH(port_offset), bit, IOLH_MASK, index);
+ break;
+ }
+
default:
return -EOPNOTSUPP;
}
@@ -855,24 +965,24 @@ static const u32 rzg2l_gpio_configs[] = {
RZG2L_GPIO_PORT_PACK(3, 0x21, RZG2L_MPXED_PIN_FUNCS),
RZG2L_GPIO_PORT_PACK(2, 0x22, RZG2L_MPXED_PIN_FUNCS),
RZG2L_GPIO_PORT_PACK(2, 0x23, RZG2L_MPXED_PIN_FUNCS),
- RZG2L_GPIO_PORT_PACK(3, 0x24, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IOLH_ETH0)),
- RZG2L_GPIO_PORT_PACK(2, 0x25, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IOLH_ETH0)),
- RZG2L_GPIO_PORT_PACK(2, 0x26, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IOLH_ETH0)),
- RZG2L_GPIO_PORT_PACK(2, 0x27, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IOLH_ETH0)),
- RZG2L_GPIO_PORT_PACK(2, 0x28, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IOLH_ETH0)),
- RZG2L_GPIO_PORT_PACK(2, 0x29, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IOLH_ETH0)),
- RZG2L_GPIO_PORT_PACK(2, 0x2a, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IOLH_ETH0)),
- RZG2L_GPIO_PORT_PACK(2, 0x2b, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IOLH_ETH0)),
- RZG2L_GPIO_PORT_PACK(2, 0x2c, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IOLH_ETH0)),
- RZG2L_GPIO_PORT_PACK(2, 0x2d, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IOLH_ETH1)),
- RZG2L_GPIO_PORT_PACK(2, 0x2e, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IOLH_ETH1)),
- RZG2L_GPIO_PORT_PACK(2, 0x2f, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IOLH_ETH1)),
- RZG2L_GPIO_PORT_PACK(2, 0x30, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IOLH_ETH1)),
- RZG2L_GPIO_PORT_PACK(2, 0x31, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IOLH_ETH1)),
- RZG2L_GPIO_PORT_PACK(2, 0x32, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IOLH_ETH1)),
- RZG2L_GPIO_PORT_PACK(2, 0x33, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IOLH_ETH1)),
- RZG2L_GPIO_PORT_PACK(2, 0x34, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IOLH_ETH1)),
- RZG2L_GPIO_PORT_PACK(3, 0x35, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IOLH_ETH1)),
+ RZG2L_GPIO_PORT_PACK(3, 0x24, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IO_VMC_ETH0)),
+ RZG2L_GPIO_PORT_PACK(2, 0x25, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IO_VMC_ETH0)),
+ RZG2L_GPIO_PORT_PACK(2, 0x26, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IO_VMC_ETH0)),
+ RZG2L_GPIO_PORT_PACK(2, 0x27, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IO_VMC_ETH0)),
+ RZG2L_GPIO_PORT_PACK(2, 0x28, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IO_VMC_ETH0)),
+ RZG2L_GPIO_PORT_PACK(2, 0x29, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IO_VMC_ETH0)),
+ RZG2L_GPIO_PORT_PACK(2, 0x2a, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IO_VMC_ETH0)),
+ RZG2L_GPIO_PORT_PACK(2, 0x2b, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IO_VMC_ETH0)),
+ RZG2L_GPIO_PORT_PACK(2, 0x2c, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IO_VMC_ETH0)),
+ RZG2L_GPIO_PORT_PACK(2, 0x2d, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IO_VMC_ETH1)),
+ RZG2L_GPIO_PORT_PACK(2, 0x2e, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IO_VMC_ETH1)),
+ RZG2L_GPIO_PORT_PACK(2, 0x2f, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IO_VMC_ETH1)),
+ RZG2L_GPIO_PORT_PACK(2, 0x30, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IO_VMC_ETH1)),
+ RZG2L_GPIO_PORT_PACK(2, 0x31, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IO_VMC_ETH1)),
+ RZG2L_GPIO_PORT_PACK(2, 0x32, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IO_VMC_ETH1)),
+ RZG2L_GPIO_PORT_PACK(2, 0x33, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IO_VMC_ETH1)),
+ RZG2L_GPIO_PORT_PACK(2, 0x34, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IO_VMC_ETH1)),
+ RZG2L_GPIO_PORT_PACK(3, 0x35, RZG2L_MPXED_ETH_PIN_FUNCS(PIN_CFG_IO_VMC_ETH1)),
RZG2L_GPIO_PORT_PACK(2, 0x36, RZG2L_MPXED_PIN_FUNCS),
RZG2L_GPIO_PORT_PACK(3, 0x37, RZG2L_MPXED_PIN_FUNCS),
RZG2L_GPIO_PORT_PACK(3, 0x38, RZG2L_MPXED_PIN_FUNCS),
@@ -890,75 +1000,75 @@ static struct rzg2l_dedicated_configs rzg2l_dedicated_pins[] = {
{ "NMI", RZG2L_SINGLE_PIN_PACK(0x1, 0,
(PIN_CFG_FILONOFF | PIN_CFG_FILNUM | PIN_CFG_FILCLKSEL)) },
{ "TMS/SWDIO", RZG2L_SINGLE_PIN_PACK(0x2, 0,
- (PIN_CFG_SR | PIN_CFG_IOLH | PIN_CFG_IEN)) },
+ (PIN_CFG_SR | PIN_CFG_IOLH_A | PIN_CFG_IEN)) },
{ "TDO", RZG2L_SINGLE_PIN_PACK(0x3, 0,
- (PIN_CFG_IOLH | PIN_CFG_SR | PIN_CFG_IEN)) },
+ (PIN_CFG_IOLH_A | PIN_CFG_SR | PIN_CFG_IEN)) },
{ "AUDIO_CLK1", RZG2L_SINGLE_PIN_PACK(0x4, 0, PIN_CFG_IEN) },
{ "AUDIO_CLK2", RZG2L_SINGLE_PIN_PACK(0x4, 1, PIN_CFG_IEN) },
{ "SD0_CLK", RZG2L_SINGLE_PIN_PACK(0x6, 0,
- (PIN_CFG_IOLH | PIN_CFG_SR | PIN_CFG_IOLH_SD0)) },
+ (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_SD0)) },
{ "SD0_CMD", RZG2L_SINGLE_PIN_PACK(0x6, 1,
- (PIN_CFG_IOLH | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IOLH_SD0)) },
+ (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IO_VMC_SD0)) },
{ "SD0_RST#", RZG2L_SINGLE_PIN_PACK(0x6, 2,
- (PIN_CFG_IOLH | PIN_CFG_SR | PIN_CFG_IOLH_SD0)) },
+ (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_SD0)) },
{ "SD0_DATA0", RZG2L_SINGLE_PIN_PACK(0x7, 0,
- (PIN_CFG_IOLH | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IOLH_SD0)) },
+ (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IO_VMC_SD0)) },
{ "SD0_DATA1", RZG2L_SINGLE_PIN_PACK(0x7, 1,
- (PIN_CFG_IOLH | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IOLH_SD0)) },
+ (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IO_VMC_SD0)) },
{ "SD0_DATA2", RZG2L_SINGLE_PIN_PACK(0x7, 2,
- (PIN_CFG_IOLH | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IOLH_SD0)) },
+ (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IO_VMC_SD0)) },
{ "SD0_DATA3", RZG2L_SINGLE_PIN_PACK(0x7, 3,
- (PIN_CFG_IOLH | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IOLH_SD0)) },
+ (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IO_VMC_SD0)) },
{ "SD0_DATA4", RZG2L_SINGLE_PIN_PACK(0x7, 4,
- (PIN_CFG_IOLH | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IOLH_SD0)) },
+ (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IO_VMC_SD0)) },
{ "SD0_DATA5", RZG2L_SINGLE_PIN_PACK(0x7, 5,
- (PIN_CFG_IOLH | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IOLH_SD0)) },
+ (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IO_VMC_SD0)) },
{ "SD0_DATA6", RZG2L_SINGLE_PIN_PACK(0x7, 6,
- (PIN_CFG_IOLH | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IOLH_SD0)) },
+ (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IO_VMC_SD0)) },
{ "SD0_DATA7", RZG2L_SINGLE_PIN_PACK(0x7, 7,
- (PIN_CFG_IOLH | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IOLH_SD0)) },
+ (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IO_VMC_SD0)) },
{ "SD1_CLK", RZG2L_SINGLE_PIN_PACK(0x8, 0,
- (PIN_CFG_IOLH | PIN_CFG_SR | PIN_CFG_IOLH_SD1))},
+ (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_SD1)) },
{ "SD1_CMD", RZG2L_SINGLE_PIN_PACK(0x8, 1,
- (PIN_CFG_IOLH | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IOLH_SD1)) },
+ (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IO_VMC_SD1)) },
{ "SD1_DATA0", RZG2L_SINGLE_PIN_PACK(0x9, 0,
- (PIN_CFG_IOLH | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IOLH_SD1)) },
+ (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IO_VMC_SD1)) },
{ "SD1_DATA1", RZG2L_SINGLE_PIN_PACK(0x9, 1,
- (PIN_CFG_IOLH | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IOLH_SD1)) },
+ (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IO_VMC_SD1)) },
{ "SD1_DATA2", RZG2L_SINGLE_PIN_PACK(0x9, 2,
- (PIN_CFG_IOLH | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IOLH_SD1)) },
+ (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IO_VMC_SD1)) },
{ "SD1_DATA3", RZG2L_SINGLE_PIN_PACK(0x9, 3,
- (PIN_CFG_IOLH | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IOLH_SD1)) },
+ (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IEN | PIN_CFG_IO_VMC_SD1)) },
{ "QSPI0_SPCLK", RZG2L_SINGLE_PIN_PACK(0xa, 0,
- (PIN_CFG_IOLH | PIN_CFG_SR | PIN_CFG_IOLH_QSPI)) },
+ (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_QSPI)) },
{ "QSPI0_IO0", RZG2L_SINGLE_PIN_PACK(0xa, 1,
- (PIN_CFG_IOLH | PIN_CFG_SR | PIN_CFG_IOLH_QSPI)) },
+ (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_QSPI)) },
{ "QSPI0_IO1", RZG2L_SINGLE_PIN_PACK(0xa, 2,
- (PIN_CFG_IOLH | PIN_CFG_SR | PIN_CFG_IOLH_QSPI)) },
+ (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_QSPI)) },
{ "QSPI0_IO2", RZG2L_SINGLE_PIN_PACK(0xa, 3,
- (PIN_CFG_IOLH | PIN_CFG_SR | PIN_CFG_IOLH_QSPI)) },
+ (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_QSPI)) },
{ "QSPI0_IO3", RZG2L_SINGLE_PIN_PACK(0xa, 4,
- (PIN_CFG_IOLH | PIN_CFG_SR | PIN_CFG_IOLH_QSPI)) },
+ (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_QSPI)) },
{ "QSPI0_SSL", RZG2L_SINGLE_PIN_PACK(0xa, 5,
- (PIN_CFG_IOLH | PIN_CFG_SR | PIN_CFG_IOLH_QSPI)) },
+ (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_QSPI)) },
{ "QSPI1_SPCLK", RZG2L_SINGLE_PIN_PACK(0xb, 0,
- (PIN_CFG_IOLH | PIN_CFG_SR | PIN_CFG_IOLH_QSPI)) },
+ (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_QSPI)) },
{ "QSPI1_IO0", RZG2L_SINGLE_PIN_PACK(0xb, 1,
- (PIN_CFG_IOLH | PIN_CFG_SR | PIN_CFG_IOLH_QSPI)) },
+ (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_QSPI)) },
{ "QSPI1_IO1", RZG2L_SINGLE_PIN_PACK(0xb, 2,
- (PIN_CFG_IOLH | PIN_CFG_SR | PIN_CFG_IOLH_QSPI)) },
+ (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_QSPI)) },
{ "QSPI1_IO2", RZG2L_SINGLE_PIN_PACK(0xb, 3,
- (PIN_CFG_IOLH | PIN_CFG_SR | PIN_CFG_IOLH_QSPI)) },
+ (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_QSPI)) },
{ "QSPI1_IO3", RZG2L_SINGLE_PIN_PACK(0xb, 4,
- (PIN_CFG_IOLH | PIN_CFG_SR | PIN_CFG_IOLH_QSPI)) },
+ (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_QSPI)) },
{ "QSPI1_SSL", RZG2L_SINGLE_PIN_PACK(0xb, 5,
- (PIN_CFG_IOLH | PIN_CFG_SR | PIN_CFG_IOLH_QSPI)) },
+ (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_QSPI)) },
{ "QSPI_RESET#", RZG2L_SINGLE_PIN_PACK(0xc, 0,
- (PIN_CFG_IOLH | PIN_CFG_SR | PIN_CFG_IOLH_QSPI)) },
+ (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_QSPI)) },
{ "QSPI_WP#", RZG2L_SINGLE_PIN_PACK(0xc, 1,
- (PIN_CFG_IOLH | PIN_CFG_SR | PIN_CFG_IOLH_QSPI)) },
- { "QSPI_INT#", RZG2L_SINGLE_PIN_PACK(0xc, 2, (PIN_CFG_SR | PIN_CFG_IOLH_QSPI)) },
- { "WDTOVF_PERROUT#", RZG2L_SINGLE_PIN_PACK(0xd, 0, (PIN_CFG_IOLH | PIN_CFG_SR)) },
+ (PIN_CFG_IOLH_B | PIN_CFG_SR | PIN_CFG_IO_VMC_QSPI)) },
+ { "QSPI_INT#", RZG2L_SINGLE_PIN_PACK(0xc, 2, (PIN_CFG_SR | PIN_CFG_IO_VMC_QSPI)) },
+ { "WDTOVF_PERROUT#", RZG2L_SINGLE_PIN_PACK(0xd, 0, (PIN_CFG_IOLH_A | PIN_CFG_SR)) },
{ "RIIC0_SDA", RZG2L_SINGLE_PIN_PACK(0xe, 0, PIN_CFG_IEN) },
{ "RIIC0_SCL", RZG2L_SINGLE_PIN_PACK(0xe, 1, PIN_CFG_IEN) },
{ "RIIC1_SDA", RZG2L_SINGLE_PIN_PACK(0xe, 2, PIN_CFG_IEN) },
diff --git a/drivers/pinctrl/renesas/pinctrl.c b/drivers/pinctrl/renesas/pinctrl.c
index f3eecb20c086..96b9de974246 100644
--- a/drivers/pinctrl/renesas/pinctrl.c
+++ b/drivers/pinctrl/renesas/pinctrl.c
@@ -504,7 +504,6 @@ static u32 sh_pfc_pinconf_find_drive_strength_reg(struct sh_pfc *pfc,
static int sh_pfc_pinconf_get_drive_strength(struct sh_pfc *pfc,
unsigned int pin)
{
- unsigned long flags;
unsigned int offset;
unsigned int size;
u32 reg;
@@ -514,11 +513,7 @@ static int sh_pfc_pinconf_get_drive_strength(struct sh_pfc *pfc,
if (!reg)
return -EINVAL;
- spin_lock_irqsave(&pfc->lock, flags);
- val = sh_pfc_read(pfc, reg);
- spin_unlock_irqrestore(&pfc->lock, flags);
-
- val = (val >> offset) & GENMASK(size - 1, 0);
+ val = (sh_pfc_read(pfc, reg) >> offset) & GENMASK(size - 1, 0);
/* Convert the value to mA based on a full drive strength value of 24mA.
* We can make the full value configurable later if needed.
@@ -648,9 +643,7 @@ static int sh_pfc_pinconf_get(struct pinctrl_dev *pctldev, unsigned _pin,
if (WARN(bit < 0, "invalid pin %#x", _pin))
return bit;
- spin_lock_irqsave(&pfc->lock, flags);
val = sh_pfc_read(pfc, pocctrl);
- spin_unlock_irqrestore(&pfc->lock, flags);
lower_voltage = (pin->configs & SH_PFC_PIN_VOLTAGE_25_33) ?
2500 : 1800;
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c b/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c
index 6b77fd24571e..2e490e7696f4 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c
+++ b/drivers/pinctrl/samsung/pinctrl-exynos-arm64.c
@@ -441,6 +441,87 @@ const struct samsung_pinctrl_of_match_data exynos7_of_data __initconst = {
.num_ctrl = ARRAY_SIZE(exynos7_pin_ctrl),
};
+/* pin banks of exynos7885 pin-controller 0 (ALIVE) */
+static const struct samsung_pin_bank_data exynos7885_pin_banks0[] __initconst = {
+ EXYNOS_PIN_BANK_EINTN(3, 0x000, "etc0"),
+ EXYNOS_PIN_BANK_EINTN(3, 0x020, "etc1"),
+ EXYNOS850_PIN_BANK_EINTW(8, 0x040, "gpa0", 0x00),
+ EXYNOS850_PIN_BANK_EINTW(8, 0x060, "gpa1", 0x04),
+ EXYNOS850_PIN_BANK_EINTW(8, 0x080, "gpa2", 0x08),
+ EXYNOS850_PIN_BANK_EINTW(5, 0x0a0, "gpq0", 0x0c),
+};
+
+/* pin banks of exynos7885 pin-controller 1 (DISPAUD) */
+static const struct samsung_pin_bank_data exynos7885_pin_banks1[] __initconst = {
+ EXYNOS850_PIN_BANK_EINTG(5, 0x000, "gpb0", 0x00),
+ EXYNOS850_PIN_BANK_EINTG(4, 0x020, "gpb1", 0x04),
+ EXYNOS850_PIN_BANK_EINTG(5, 0x040, "gpb2", 0x08),
+};
+
+/* pin banks of exynos7885 pin-controller 2 (FSYS) */
+static const struct samsung_pin_bank_data exynos7885_pin_banks2[] __initconst = {
+ EXYNOS850_PIN_BANK_EINTG(4, 0x000, "gpf0", 0x00),
+ EXYNOS850_PIN_BANK_EINTG(8, 0x020, "gpf2", 0x04),
+ EXYNOS850_PIN_BANK_EINTG(6, 0x040, "gpf3", 0x08),
+ EXYNOS850_PIN_BANK_EINTG(6, 0x060, "gpf4", 0x0c),
+};
+
+/* pin banks of exynos7885 pin-controller 3 (TOP) */
+static const struct samsung_pin_bank_data exynos7885_pin_banks3[] __initconst = {
+ EXYNOS850_PIN_BANK_EINTG(4, 0x000, "gpp0", 0x00),
+ EXYNOS850_PIN_BANK_EINTG(3, 0x020, "gpg0", 0x04),
+ EXYNOS850_PIN_BANK_EINTG(4, 0x040, "gpp1", 0x08),
+ EXYNOS850_PIN_BANK_EINTG(4, 0x060, "gpp2", 0x0c),
+ EXYNOS850_PIN_BANK_EINTG(3, 0x080, "gpp3", 0x10),
+ EXYNOS850_PIN_BANK_EINTG(6, 0x0a0, "gpp4", 0x14),
+ EXYNOS850_PIN_BANK_EINTG(4, 0x0c0, "gpp5", 0x18),
+ EXYNOS850_PIN_BANK_EINTG(5, 0x0e0, "gpp6", 0x1c),
+ EXYNOS850_PIN_BANK_EINTG(2, 0x100, "gpp7", 0x20),
+ EXYNOS850_PIN_BANK_EINTG(2, 0x120, "gpp8", 0x24),
+ EXYNOS850_PIN_BANK_EINTG(8, 0x140, "gpg1", 0x28),
+ EXYNOS850_PIN_BANK_EINTG(8, 0x160, "gpg2", 0x2c),
+ EXYNOS850_PIN_BANK_EINTG(8, 0x180, "gpg3", 0x30),
+ EXYNOS850_PIN_BANK_EINTG(2, 0x1a0, "gpg4", 0x34),
+ EXYNOS850_PIN_BANK_EINTG(4, 0x1c0, "gpc0", 0x38),
+ EXYNOS850_PIN_BANK_EINTG(8, 0x1e0, "gpc1", 0x3c),
+ EXYNOS850_PIN_BANK_EINTG(8, 0x200, "gpc2", 0x40),
+};
+
+static const struct samsung_pin_ctrl exynos7885_pin_ctrl[] __initconst = {
+ {
+ /* pin-controller instance 0 Alive data */
+ .pin_banks = exynos7885_pin_banks0,
+ .nr_banks = ARRAY_SIZE(exynos7885_pin_banks0),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ .eint_wkup_init = exynos_eint_wkup_init,
+ .suspend = exynos_pinctrl_suspend,
+ .resume = exynos_pinctrl_resume,
+ }, {
+ /* pin-controller instance 1 DISPAUD data */
+ .pin_banks = exynos7885_pin_banks1,
+ .nr_banks = ARRAY_SIZE(exynos7885_pin_banks1),
+ }, {
+ /* pin-controller instance 2 FSYS data */
+ .pin_banks = exynos7885_pin_banks2,
+ .nr_banks = ARRAY_SIZE(exynos7885_pin_banks2),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ .suspend = exynos_pinctrl_suspend,
+ .resume = exynos_pinctrl_resume,
+ }, {
+ /* pin-controller instance 3 TOP data */
+ .pin_banks = exynos7885_pin_banks3,
+ .nr_banks = ARRAY_SIZE(exynos7885_pin_banks3),
+ .eint_gpio_init = exynos_eint_gpio_init,
+ .suspend = exynos_pinctrl_suspend,
+ .resume = exynos_pinctrl_resume,
+ },
+};
+
+const struct samsung_pinctrl_of_match_data exynos7885_of_data __initconst = {
+ .ctrl = exynos7885_pin_ctrl,
+ .num_ctrl = ARRAY_SIZE(exynos7885_pin_ctrl),
+};
+
/* pin banks of exynos850 pin-controller 0 (ALIVE) */
static const struct samsung_pin_bank_data exynos850_pin_banks0[] __initconst = {
/* Must start with EINTG banks, ordered by EINT group number. */
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c
index 23f355ae9ca0..0f6e9305fec5 100644
--- a/drivers/pinctrl/samsung/pinctrl-samsung.c
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.c
@@ -1095,7 +1095,6 @@ static int samsung_pinctrl_probe(struct platform_device *pdev)
struct samsung_pinctrl_drv_data *drvdata;
const struct samsung_pin_ctrl *ctrl;
struct device *dev = &pdev->dev;
- struct resource *res;
int ret;
drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
@@ -1109,9 +1108,11 @@ static int samsung_pinctrl_probe(struct platform_device *pdev)
}
drvdata->dev = dev;
- res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (res)
- drvdata->irq = res->start;
+ ret = platform_get_irq_optional(pdev, 0);
+ if (ret < 0 && ret != -ENXIO)
+ return ret;
+ if (ret > 0)
+ drvdata->irq = ret;
if (ctrl->retention_data) {
drvdata->retention_ctrl = ctrl->retention_data->init(drvdata,
@@ -1264,6 +1265,8 @@ static const struct of_device_id samsung_pinctrl_dt_match[] = {
.data = &exynos5433_of_data },
{ .compatible = "samsung,exynos7-pinctrl",
.data = &exynos7_of_data },
+ { .compatible = "samsung,exynos7885-pinctrl",
+ .data = &exynos7885_of_data },
{ .compatible = "samsung,exynos850-pinctrl",
.data = &exynos850_of_data },
{ .compatible = "samsung,exynosautov9-pinctrl",
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.h b/drivers/pinctrl/samsung/pinctrl-samsung.h
index 547968a31aed..1f8d30ba05af 100644
--- a/drivers/pinctrl/samsung/pinctrl-samsung.h
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.h
@@ -339,6 +339,7 @@ extern const struct samsung_pinctrl_of_match_data exynos5410_of_data;
extern const struct samsung_pinctrl_of_match_data exynos5420_of_data;
extern const struct samsung_pinctrl_of_match_data exynos5433_of_data;
extern const struct samsung_pinctrl_of_match_data exynos7_of_data;
+extern const struct samsung_pinctrl_of_match_data exynos7885_of_data;
extern const struct samsung_pinctrl_of_match_data exynos850_of_data;
extern const struct samsung_pinctrl_of_match_data exynosautov9_of_data;
extern const struct samsung_pinctrl_of_match_data s3c64xx_of_data;
diff --git a/drivers/pinctrl/spear/pinctrl-plgpio.c b/drivers/pinctrl/spear/pinctrl-plgpio.c
index 43bb334af1e1..ada401ef4342 100644
--- a/drivers/pinctrl/spear/pinctrl-plgpio.c
+++ b/drivers/pinctrl/spear/pinctrl-plgpio.c
@@ -14,11 +14,13 @@
#include <linux/gpio/driver.h>
#include <linux/io.h>
#include <linux/init.h>
+#include <linux/mfd/syscon.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
+#include <linux/regmap.h>
#include <linux/spinlock.h>
#define MAX_GPIO_PER_REG 32
@@ -64,7 +66,7 @@ struct plgpio_regs {
*/
struct plgpio {
spinlock_t lock;
- void __iomem *base;
+ struct regmap *regmap;
struct clk *clk;
struct gpio_chip chip;
int (*p2o)(int pin); /* pin_to_offset */
@@ -77,33 +79,38 @@ struct plgpio {
};
/* register manipulation inline functions */
-static inline u32 is_plgpio_set(void __iomem *base, u32 pin, u32 reg)
+static inline u32 is_plgpio_set(struct regmap *regmap, u32 pin, u32 reg)
{
u32 offset = PIN_OFFSET(pin);
- void __iomem *reg_off = REG_OFFSET(base, reg, pin);
- u32 val = readl_relaxed(reg_off);
+ u32 reg_off = REG_OFFSET(0, reg, pin);
+ u32 val;
+
+ regmap_read(regmap, reg_off, &val);
return !!(val & (1 << offset));
}
-static inline void plgpio_reg_set(void __iomem *base, u32 pin, u32 reg)
+static inline void plgpio_reg_set(struct regmap *regmap, u32 pin, u32 reg)
{
u32 offset = PIN_OFFSET(pin);
- void __iomem *reg_off = REG_OFFSET(base, reg, pin);
- u32 val = readl_relaxed(reg_off);
+ u32 reg_off = REG_OFFSET(0, reg, pin);
+ u32 mask;
- writel_relaxed(val | (1 << offset), reg_off);
+ mask = 1 << offset;
+ regmap_update_bits(regmap, reg_off, mask, mask);
}
-static inline void plgpio_reg_reset(void __iomem *base, u32 pin, u32 reg)
+static inline void plgpio_reg_reset(struct regmap *regmap, u32 pin, u32 reg)
{
u32 offset = PIN_OFFSET(pin);
- void __iomem *reg_off = REG_OFFSET(base, reg, pin);
- u32 val = readl_relaxed(reg_off);
+ u32 reg_off = REG_OFFSET(0, reg, pin);
+ u32 mask;
- writel_relaxed(val & ~(1 << offset), reg_off);
+ mask = 1 << offset;
+ regmap_update_bits(regmap, reg_off, mask, 0);
}
+
/* gpio framework specific routines */
static int plgpio_direction_input(struct gpio_chip *chip, unsigned offset)
{
@@ -118,7 +125,7 @@ static int plgpio_direction_input(struct gpio_chip *chip, unsigned offset)
}
spin_lock_irqsave(&plgpio->lock, flags);
- plgpio_reg_set(plgpio->base, offset, plgpio->regs.dir);
+ plgpio_reg_set(plgpio->regmap, offset, plgpio->regs.dir);
spin_unlock_irqrestore(&plgpio->lock, flags);
return 0;
@@ -145,13 +152,13 @@ static int plgpio_direction_output(struct gpio_chip *chip, unsigned offset,
spin_lock_irqsave(&plgpio->lock, flags);
if (value)
- plgpio_reg_set(plgpio->base, wdata_offset,
+ plgpio_reg_set(plgpio->regmap, wdata_offset,
plgpio->regs.wdata);
else
- plgpio_reg_reset(plgpio->base, wdata_offset,
+ plgpio_reg_reset(plgpio->regmap, wdata_offset,
plgpio->regs.wdata);
- plgpio_reg_reset(plgpio->base, dir_offset, plgpio->regs.dir);
+ plgpio_reg_reset(plgpio->regmap, dir_offset, plgpio->regs.dir);
spin_unlock_irqrestore(&plgpio->lock, flags);
return 0;
@@ -171,7 +178,7 @@ static int plgpio_get_value(struct gpio_chip *chip, unsigned offset)
return -EINVAL;
}
- return is_plgpio_set(plgpio->base, offset, plgpio->regs.rdata);
+ return is_plgpio_set(plgpio->regmap, offset, plgpio->regs.rdata);
}
static void plgpio_set_value(struct gpio_chip *chip, unsigned offset, int value)
@@ -189,9 +196,9 @@ static void plgpio_set_value(struct gpio_chip *chip, unsigned offset, int value)
}
if (value)
- plgpio_reg_set(plgpio->base, offset, plgpio->regs.wdata);
+ plgpio_reg_set(plgpio->regmap, offset, plgpio->regs.wdata);
else
- plgpio_reg_reset(plgpio->base, offset, plgpio->regs.wdata);
+ plgpio_reg_reset(plgpio->regmap, offset, plgpio->regs.wdata);
}
static int plgpio_request(struct gpio_chip *chip, unsigned offset)
@@ -234,7 +241,7 @@ static int plgpio_request(struct gpio_chip *chip, unsigned offset)
}
spin_lock_irqsave(&plgpio->lock, flags);
- plgpio_reg_set(plgpio->base, offset, plgpio->regs.enb);
+ plgpio_reg_set(plgpio->regmap, offset, plgpio->regs.enb);
spin_unlock_irqrestore(&plgpio->lock, flags);
return 0;
@@ -266,7 +273,7 @@ static void plgpio_free(struct gpio_chip *chip, unsigned offset)
}
spin_lock_irqsave(&plgpio->lock, flags);
- plgpio_reg_reset(plgpio->base, offset, plgpio->regs.enb);
+ plgpio_reg_reset(plgpio->regmap, offset, plgpio->regs.enb);
spin_unlock_irqrestore(&plgpio->lock, flags);
disable_clk:
@@ -292,7 +299,7 @@ static void plgpio_irq_disable(struct irq_data *d)
}
spin_lock_irqsave(&plgpio->lock, flags);
- plgpio_reg_set(plgpio->base, offset, plgpio->regs.ie);
+ plgpio_reg_set(plgpio->regmap, offset, plgpio->regs.ie);
spin_unlock_irqrestore(&plgpio->lock, flags);
}
@@ -311,7 +318,7 @@ static void plgpio_irq_enable(struct irq_data *d)
}
spin_lock_irqsave(&plgpio->lock, flags);
- plgpio_reg_reset(plgpio->base, offset, plgpio->regs.ie);
+ plgpio_reg_reset(plgpio->regmap, offset, plgpio->regs.ie);
spin_unlock_irqrestore(&plgpio->lock, flags);
}
@@ -320,7 +327,7 @@ static int plgpio_irq_set_type(struct irq_data *d, unsigned trigger)
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct plgpio *plgpio = gpiochip_get_data(gc);
int offset = d->hwirq;
- void __iomem *reg_off;
+ u32 reg_off;
unsigned int supported_type = 0, val;
if (offset >= plgpio->chip.ngpio)
@@ -337,14 +344,14 @@ static int plgpio_irq_set_type(struct irq_data *d, unsigned trigger)
if (plgpio->regs.eit == -1)
return 0;
- reg_off = REG_OFFSET(plgpio->base, plgpio->regs.eit, offset);
- val = readl_relaxed(reg_off);
+ reg_off = REG_OFFSET(0, plgpio->regs.eit, offset);
+ regmap_read(plgpio->regmap, reg_off, &val);
offset = PIN_OFFSET(offset);
if (trigger & IRQ_TYPE_EDGE_RISING)
- writel_relaxed(val | (1 << offset), reg_off);
+ regmap_write(plgpio->regmap, reg_off, val | (1 << offset));
else
- writel_relaxed(val & ~(1 << offset), reg_off);
+ regmap_write(plgpio->regmap, reg_off, val & ~(1 << offset));
return 0;
}
@@ -362,7 +369,8 @@ static void plgpio_irq_handler(struct irq_desc *desc)
struct plgpio *plgpio = gpiochip_get_data(gc);
struct irq_chip *irqchip = irq_desc_get_chip(desc);
int regs_count, count, pin, offset, i = 0;
- unsigned long pending;
+ u32 pending;
+ unsigned long pendingl;
count = plgpio->chip.ngpio;
regs_count = DIV_ROUND_UP(count, MAX_GPIO_PER_REG);
@@ -370,14 +378,14 @@ static void plgpio_irq_handler(struct irq_desc *desc)
chained_irq_enter(irqchip, desc);
/* check all plgpio MIS registers for a possible interrupt */
for (; i < regs_count; i++) {
- pending = readl_relaxed(plgpio->base + plgpio->regs.mis +
- i * sizeof(int *));
+ regmap_read(plgpio->regmap, plgpio->regs.mis +
+ i * sizeof(int *), &pending);
if (!pending)
continue;
/* clear interrupts */
- writel_relaxed(~pending, plgpio->base + plgpio->regs.mis +
- i * sizeof(int *));
+ regmap_write(plgpio->regmap, plgpio->regs.mis +
+ i * sizeof(int *), ~pending);
/*
* clear extra bits in last register having gpios < MAX/REG
* ex: Suppose there are max 102 plgpios. then last register
@@ -389,7 +397,8 @@ static void plgpio_irq_handler(struct irq_desc *desc)
if (count < MAX_GPIO_PER_REG)
pending &= (1 << count) - 1;
- for_each_set_bit(offset, &pending, MAX_GPIO_PER_REG) {
+ pendingl = pending;
+ for_each_set_bit(offset, &pendingl, MAX_GPIO_PER_REG) {
/* get correct pin for "offset" */
if (plgpio->o2p && (plgpio->p2o_regs & PTO_MIS_REG)) {
pin = plgpio->o2p(offset);
@@ -511,8 +520,10 @@ static int plgpio_probe_dt(struct platform_device *pdev, struct plgpio *plgpio)
end:
return ret;
}
+
static int plgpio_probe(struct platform_device *pdev)
{
+ struct device_node *regmap_np;
struct plgpio *plgpio;
int ret, irq;
@@ -520,9 +531,23 @@ static int plgpio_probe(struct platform_device *pdev)
if (!plgpio)
return -ENOMEM;
- plgpio->base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(plgpio->base))
- return PTR_ERR(plgpio->base);
+ regmap_np = of_parse_phandle(pdev->dev.of_node, "regmap", 0);
+ if (regmap_np) {
+ plgpio->regmap = device_node_to_regmap(regmap_np);
+ of_node_put(regmap_np);
+ if (IS_ERR(plgpio->regmap)) {
+ dev_err(&pdev->dev, "Retrieve regmap failed (%pe)\n",
+ plgpio->regmap);
+ return PTR_ERR(plgpio->regmap);
+ }
+ } else {
+ plgpio->regmap = device_node_to_regmap(pdev->dev.of_node);
+ if (IS_ERR(plgpio->regmap)) {
+ dev_err(&pdev->dev, "Init regmap failed (%pe)\n",
+ plgpio->regmap);
+ return PTR_ERR(plgpio->regmap);
+ }
+ }
ret = plgpio_probe_dt(pdev, plgpio);
if (ret) {
@@ -556,7 +581,6 @@ static int plgpio_probe(struct platform_device *pdev)
plgpio->chip.label = dev_name(&pdev->dev);
plgpio->chip.parent = &pdev->dev;
plgpio->chip.owner = THIS_MODULE;
- plgpio->chip.of_node = pdev->dev.of_node;
if (!IS_ERR(plgpio->clk)) {
ret = clk_prepare(plgpio->clk);
@@ -607,22 +631,23 @@ static int plgpio_suspend(struct device *dev)
{
struct plgpio *plgpio = dev_get_drvdata(dev);
int i, reg_count = DIV_ROUND_UP(plgpio->chip.ngpio, MAX_GPIO_PER_REG);
- void __iomem *off;
+ u32 off;
for (i = 0; i < reg_count; i++) {
- off = plgpio->base + i * sizeof(int *);
+ off = i * sizeof(int *);
if (plgpio->regs.enb != -1)
- plgpio->csave_regs[i].enb =
- readl_relaxed(plgpio->regs.enb + off);
+ regmap_read(plgpio->regmap, plgpio->regs.enb + off,
+ &plgpio->csave_regs[i].enb);
if (plgpio->regs.eit != -1)
- plgpio->csave_regs[i].eit =
- readl_relaxed(plgpio->regs.eit + off);
- plgpio->csave_regs[i].wdata = readl_relaxed(plgpio->regs.wdata +
- off);
- plgpio->csave_regs[i].dir = readl_relaxed(plgpio->regs.dir +
- off);
- plgpio->csave_regs[i].ie = readl_relaxed(plgpio->regs.ie + off);
+ regmap_read(plgpio->regmap, plgpio->regs.eit + off,
+ &plgpio->csave_regs[i].eit);
+ regmap_read(plgpio->regmap, plgpio->regs.wdata + off,
+ &plgpio->csave_regs[i].wdata);
+ regmap_read(plgpio->regmap, plgpio->regs.dir + off,
+ &plgpio->csave_regs[i].dir);
+ regmap_read(plgpio->regmap, plgpio->regs.ie + off,
+ &plgpio->csave_regs[i].ie);
}
return 0;
@@ -636,7 +661,7 @@ static int plgpio_suspend(struct device *dev)
*/
#define plgpio_prepare_reg(__reg, _off, _mask, _tmp) \
{ \
- _tmp = readl_relaxed(plgpio->regs.__reg + _off); \
+ regmap_read(plgpio->regmap, plgpio->regs.__reg + _off, &_tmp); \
_tmp &= ~_mask; \
plgpio->csave_regs[i].__reg = \
_tmp | (plgpio->csave_regs[i].__reg & _mask); \
@@ -646,11 +671,11 @@ static int plgpio_resume(struct device *dev)
{
struct plgpio *plgpio = dev_get_drvdata(dev);
int i, reg_count = DIV_ROUND_UP(plgpio->chip.ngpio, MAX_GPIO_PER_REG);
- void __iomem *off;
+ u32 off;
u32 mask, tmp;
for (i = 0; i < reg_count; i++) {
- off = plgpio->base + i * sizeof(int *);
+ off = i * sizeof(int *);
if (i == reg_count - 1) {
mask = (1 << (plgpio->chip.ngpio - i *
@@ -667,20 +692,22 @@ static int plgpio_resume(struct device *dev)
plgpio_prepare_reg(ie, off, mask, tmp);
}
- writel_relaxed(plgpio->csave_regs[i].wdata, plgpio->regs.wdata +
- off);
- writel_relaxed(plgpio->csave_regs[i].dir, plgpio->regs.dir +
- off);
+ regmap_write(plgpio->regmap, plgpio->regs.wdata + off,
+ plgpio->csave_regs[i].wdata);
+
+ regmap_write(plgpio->regmap, plgpio->regs.dir + off,
+ plgpio->csave_regs[i].dir);
if (plgpio->regs.eit != -1)
- writel_relaxed(plgpio->csave_regs[i].eit,
- plgpio->regs.eit + off);
+ regmap_write(plgpio->regmap, plgpio->regs.eit + off,
+ plgpio->csave_regs[i].eit);
- writel_relaxed(plgpio->csave_regs[i].ie, plgpio->regs.ie + off);
+ regmap_write(plgpio->regmap, plgpio->regs.ie + off,
+ plgpio->csave_regs[i].ie);
if (plgpio->regs.enb != -1)
- writel_relaxed(plgpio->csave_regs[i].enb,
- plgpio->regs.enb + off);
+ regmap_write(plgpio->regmap, plgpio->regs.enb + off,
+ plgpio->csave_regs[i].enb);
}
return 0;
diff --git a/drivers/pinctrl/spear/pinctrl-spear.c b/drivers/pinctrl/spear/pinctrl-spear.c
index 948f56abb9ae..e0543c1ad641 100644
--- a/drivers/pinctrl/spear/pinctrl-spear.c
+++ b/drivers/pinctrl/spear/pinctrl-spear.c
@@ -14,6 +14,7 @@
*/
#include <linux/err.h>
+#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
@@ -367,9 +368,12 @@ int spear_pinctrl_probe(struct platform_device *pdev,
if (!pmx)
return -ENOMEM;
- pmx->vbase = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(pmx->vbase))
- return PTR_ERR(pmx->vbase);
+ pmx->regmap = device_node_to_regmap(np);
+ if (IS_ERR(pmx->regmap)) {
+ dev_err(&pdev->dev, "Init regmap failed (%pe).\n",
+ pmx->regmap);
+ return PTR_ERR(pmx->regmap);
+ }
pmx->dev = &pdev->dev;
pmx->machdata = machdata;
diff --git a/drivers/pinctrl/spear/pinctrl-spear.h b/drivers/pinctrl/spear/pinctrl-spear.h
index db029b148c87..63a0b5ea56ef 100644
--- a/drivers/pinctrl/spear/pinctrl-spear.h
+++ b/drivers/pinctrl/spear/pinctrl-spear.h
@@ -15,6 +15,7 @@
#include <linux/gpio/driver.h>
#include <linux/io.h>
#include <linux/pinctrl/pinctrl.h>
+#include <linux/regmap.h>
#include <linux/types.h>
struct platform_device;
@@ -172,24 +173,27 @@ struct spear_pinctrl_machdata {
* @dev: pointer to struct dev of platform_device registered
* @pctl: pointer to struct pinctrl_dev
* @machdata: pointer to SoC or machine specific structure
- * @vbase: virtual base address of pinmux controller
+ * @regmap: regmap of pinmux controller
*/
struct spear_pmx {
struct device *dev;
struct pinctrl_dev *pctl;
struct spear_pinctrl_machdata *machdata;
- void __iomem *vbase;
+ struct regmap *regmap;
};
/* exported routines */
static inline u32 pmx_readl(struct spear_pmx *pmx, u32 reg)
{
- return readl_relaxed(pmx->vbase + reg);
+ u32 val;
+
+ regmap_read(pmx->regmap, reg, &val);
+ return val;
}
static inline void pmx_writel(struct spear_pmx *pmx, u32 val, u32 reg)
{
- writel_relaxed(val, pmx->vbase + reg);
+ regmap_write(pmx->regmap, reg, val);
}
void pmx_init_addr(struct spear_pinctrl_machdata *machdata, u16 reg);
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun50i-h616.c b/drivers/pinctrl/sunxi/pinctrl-sun50i-h616.c
index ce1917e230f4..152b71226a80 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun50i-h616.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun50i-h616.c
@@ -363,16 +363,16 @@ static const struct sunxi_desc_pin h616_pins[] = {
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
SUNXI_FUNCTION(0x2, "uart2"), /* CTS */
- SUNXI_FUNCTION(0x3, "i2s3"), /* DO0 */
+ SUNXI_FUNCTION(0x3, "i2s3_dout0"), /* DO0 */
SUNXI_FUNCTION(0x4, "spi1"), /* MISO */
- SUNXI_FUNCTION(0x5, "i2s3"), /* DI1 */
+ SUNXI_FUNCTION(0x5, "i2s3_din1"), /* DI1 */
SUNXI_FUNCTION_IRQ_BANK(0x6, 6, 8)), /* PH_EINT8 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 9),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x3, "i2s3"), /* DI0 */
+ SUNXI_FUNCTION(0x3, "i2s3_din0"), /* DI0 */
SUNXI_FUNCTION(0x4, "spi1"), /* CS1 */
- SUNXI_FUNCTION(0x3, "i2s3"), /* DO1 */
+ SUNXI_FUNCTION(0x5, "i2s3_dout1"), /* DO1 */
SUNXI_FUNCTION_IRQ_BANK(0x6, 6, 9)), /* PH_EINT9 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 10),
SUNXI_FUNCTION(0x0, "gpio_in"),
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
index 862c84efb718..80d6750c74a6 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
@@ -835,7 +835,9 @@ static const struct pinmux_ops sunxi_pmx_ops = {
static int sunxi_pinctrl_gpio_direction_input(struct gpio_chip *chip,
unsigned offset)
{
- return pinctrl_gpio_direction_input(chip->base + offset);
+ struct sunxi_pinctrl *pctl = gpiochip_get_data(chip);
+
+ return sunxi_pmx_gpio_set_direction(pctl->pctl_dev, NULL, offset, true);
}
static int sunxi_pinctrl_gpio_get(struct gpio_chip *chip, unsigned offset)
@@ -885,8 +887,10 @@ static void sunxi_pinctrl_gpio_set(struct gpio_chip *chip,
static int sunxi_pinctrl_gpio_direction_output(struct gpio_chip *chip,
unsigned offset, int value)
{
+ struct sunxi_pinctrl *pctl = gpiochip_get_data(chip);
+
sunxi_pinctrl_gpio_set(chip, offset, value);
- return pinctrl_gpio_direction_output(chip->base + offset);
+ return sunxi_pmx_gpio_set_direction(pctl->pctl_dev, NULL, offset, false);
}
static int sunxi_pinctrl_gpio_of_xlate(struct gpio_chip *gc,
diff --git a/drivers/pinctrl/vt8500/pinctrl-wmt.c b/drivers/pinctrl/vt8500/pinctrl-wmt.c
index 65b97e240196..6fac30de1c6a 100644
--- a/drivers/pinctrl/vt8500/pinctrl-wmt.c
+++ b/drivers/pinctrl/vt8500/pinctrl-wmt.c
@@ -565,7 +565,6 @@ int wmt_pinctrl_probe(struct platform_device *pdev,
data->gpio_chip = wmt_gpio_chip;
data->gpio_chip.parent = &pdev->dev;
- data->gpio_chip.of_node = pdev->dev.of_node;
data->gpio_chip.ngpio = data->nbanks * 32;
platform_set_drvdata(pdev, data);
diff --git a/drivers/platform/mips/Kconfig b/drivers/platform/mips/Kconfig
index 8ac149173c64..d421e1482395 100644
--- a/drivers/platform/mips/Kconfig
+++ b/drivers/platform/mips/Kconfig
@@ -30,4 +30,10 @@ config RS780E_ACPI
help
Loongson RS780E PCH ACPI Controller driver.
+config LS2K_RESET
+ bool "Loongson-2K1000 Reset Controller"
+ depends on MACH_LOONGSON64 || COMPILE_TEST
+ help
+ Loongson-2K1000 Reset Controller driver.
+
endif # MIPS_PLATFORM_DEVICES
diff --git a/drivers/platform/mips/Makefile b/drivers/platform/mips/Makefile
index 178149098777..4c71444e453a 100644
--- a/drivers/platform/mips/Makefile
+++ b/drivers/platform/mips/Makefile
@@ -1,3 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_CPU_HWMON) += cpu_hwmon.o
obj-$(CONFIG_RS780E_ACPI) += rs780e-acpi.o
+obj-$(CONFIG_LS2K_RESET) += ls2k-reset.o
diff --git a/drivers/platform/mips/ls2k-reset.c b/drivers/platform/mips/ls2k-reset.c
new file mode 100644
index 000000000000..8f42d5d16480
--- /dev/null
+++ b/drivers/platform/mips/ls2k-reset.c
@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021, Qing Zhang <zhangqing@loongson.cn>
+ * Loongson-2K1000 reset support
+ */
+
+#include <linux/of_address.h>
+#include <linux/pm.h>
+#include <asm/reboot.h>
+
+#define PM1_STS 0x0c /* Power Management 1 Status Register */
+#define PM1_CNT 0x14 /* Power Management 1 Control Register */
+#define RST_CNT 0x30 /* Reset Control Register */
+
+static void __iomem *base;
+
+static void ls2k_restart(char *command)
+{
+ writel(0x1, base + RST_CNT);
+}
+
+static void ls2k_poweroff(void)
+{
+ /* Clear */
+ writel((readl(base + PM1_STS) & 0xffffffff), base + PM1_STS);
+ /* Sleep Enable | Soft Off*/
+ writel(GENMASK(12, 10) | BIT(13), base + PM1_CNT);
+}
+
+static int ls2k_reset_init(void)
+{
+ struct device_node *np;
+
+ np = of_find_compatible_node(NULL, NULL, "loongson,ls2k-pm");
+ if (!np) {
+ pr_info("Failed to get PM node\n");
+ return -ENODEV;
+ }
+
+ base = of_iomap(np, 0);
+ of_node_put(np);
+ if (!base) {
+ pr_info("Failed to map PM register base address\n");
+ return -ENOMEM;
+ }
+
+ _machine_restart = ls2k_restart;
+ pm_power_off = ls2k_poweroff;
+
+ return 0;
+}
+
+arch_initcall(ls2k_reset_init);
diff --git a/drivers/platform/surface/Kconfig b/drivers/platform/surface/Kconfig
index 5f0578e25f71..463f1ec5c14e 100644
--- a/drivers/platform/surface/Kconfig
+++ b/drivers/platform/surface/Kconfig
@@ -5,6 +5,7 @@
menuconfig SURFACE_PLATFORMS
bool "Microsoft Surface Platform-Specific Device Drivers"
+ depends on ARM64 || X86 || COMPILE_TEST
default y
help
Say Y here to get to see options for platform-specific device drivers
diff --git a/drivers/platform/x86/amd-pmc.c b/drivers/platform/x86/amd-pmc.c
index f794343d6aaa..4c72ba68b315 100644
--- a/drivers/platform/x86/amd-pmc.c
+++ b/drivers/platform/x86/amd-pmc.c
@@ -124,9 +124,10 @@ struct amd_pmc_dev {
u32 cpu_id;
u32 active_ips;
/* SMU version information */
- u16 major;
- u16 minor;
- u16 rev;
+ u8 smu_program;
+ u8 major;
+ u8 minor;
+ u8 rev;
struct device *dev;
struct pci_dev *rdev;
struct mutex lock; /* generic mutex lock */
@@ -180,11 +181,13 @@ static int amd_pmc_get_smu_version(struct amd_pmc_dev *dev)
if (rc)
return rc;
- dev->major = (val >> 16) & GENMASK(15, 0);
+ dev->smu_program = (val >> 24) & GENMASK(7, 0);
+ dev->major = (val >> 16) & GENMASK(7, 0);
dev->minor = (val >> 8) & GENMASK(7, 0);
dev->rev = (val >> 0) & GENMASK(7, 0);
- dev_dbg(dev->dev, "SMU version is %u.%u.%u\n", dev->major, dev->minor, dev->rev);
+ dev_dbg(dev->dev, "SMU program %u version is %u.%u.%u\n",
+ dev->smu_program, dev->major, dev->minor, dev->rev);
return 0;
}
@@ -226,7 +229,7 @@ static int amd_pmc_stb_debugfs_release(struct inode *inode, struct file *filp)
return 0;
}
-const struct file_operations amd_pmc_stb_debugfs_fops = {
+static const struct file_operations amd_pmc_stb_debugfs_fops = {
.owner = THIS_MODULE,
.open = amd_pmc_stb_debugfs_open,
.read = amd_pmc_stb_debugfs_read,
diff --git a/drivers/platform/x86/asus-tf103c-dock.c b/drivers/platform/x86/asus-tf103c-dock.c
index d4ef8f362ee6..6fd0c9fea82d 100644
--- a/drivers/platform/x86/asus-tf103c-dock.c
+++ b/drivers/platform/x86/asus-tf103c-dock.c
@@ -250,7 +250,7 @@ static int tf103c_dock_hid_raw_request(struct hid_device *hid, u8 reportnum,
return 0;
}
-struct hid_ll_driver tf103c_dock_hid_ll_driver = {
+static struct hid_ll_driver tf103c_dock_hid_ll_driver = {
.parse = tf103c_dock_hid_parse,
.start = tf103c_dock_hid_start,
.stop = tf103c_dock_hid_stop,
@@ -921,7 +921,7 @@ static int __maybe_unused tf103c_dock_resume(struct device *dev)
return 0;
}
-SIMPLE_DEV_PM_OPS(tf103c_dock_pm_ops, tf103c_dock_suspend, tf103c_dock_resume);
+static SIMPLE_DEV_PM_OPS(tf103c_dock_pm_ops, tf103c_dock_suspend, tf103c_dock_resume);
static const struct acpi_device_id tf103c_dock_acpi_match[] = {
{"NPCE69A"},
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index a3b83b22a3b1..2104a2621e50 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -2223,7 +2223,7 @@ static int fan_curve_check_present(struct asus_wmi *asus, bool *available,
err = fan_curve_get_factory_default(asus, fan_dev);
if (err) {
- if (err == -ENODEV)
+ if (err == -ENODEV || err == -ENODATA)
return 0;
return err;
}
diff --git a/drivers/platform/x86/intel/Kconfig b/drivers/platform/x86/intel/Kconfig
index 40096b25994a..8e65086bb6c8 100644
--- a/drivers/platform/x86/intel/Kconfig
+++ b/drivers/platform/x86/intel/Kconfig
@@ -170,3 +170,14 @@ config INTEL_UNCORE_FREQ_CONTROL
To compile this driver as a module, choose M here: the module
will be called intel-uncore-frequency.
+
+config INTEL_VSEC
+ tristate "Intel Vendor Specific Extended Capabilities Driver"
+ depends on PCI
+ select AUXILIARY_BUS
+ help
+ Adds support for feature drivers exposed using Intel PCIe VSEC and
+ DVSEC.
+
+ To compile this driver as a module, choose M here: the module will
+ be called intel_vsec.
diff --git a/drivers/platform/x86/intel/Makefile b/drivers/platform/x86/intel/Makefile
index 174a80a6c87c..35f2066578b2 100644
--- a/drivers/platform/x86/intel/Makefile
+++ b/drivers/platform/x86/intel/Makefile
@@ -26,6 +26,8 @@ intel_int0002_vgpio-y := int0002_vgpio.o
obj-$(CONFIG_INTEL_INT0002_VGPIO) += intel_int0002_vgpio.o
intel_oaktrail-y := oaktrail.o
obj-$(CONFIG_INTEL_OAKTRAIL) += intel_oaktrail.o
+intel_vsec-y := vsec.o
+obj-$(CONFIG_INTEL_VSEC) += intel_vsec.o
# Intel PMIC / PMC / P-Unit drivers
intel_bxtwc_tmu-y := bxtwc_tmu.o
diff --git a/drivers/platform/x86/intel/crystal_cove_charger.c b/drivers/platform/x86/intel/crystal_cove_charger.c
index 0374bc742513..e4299cfa2205 100644
--- a/drivers/platform/x86/intel/crystal_cove_charger.c
+++ b/drivers/platform/x86/intel/crystal_cove_charger.c
@@ -17,6 +17,7 @@
#include <linux/regmap.h>
#define CHGRIRQ_REG 0x0a
+#define MCHGRIRQ_REG 0x17
struct crystal_cove_charger_data {
struct mutex buslock; /* irq_bus_lock */
@@ -25,8 +26,8 @@ struct crystal_cove_charger_data {
struct irq_domain *irq_domain;
int irq;
int charger_irq;
- bool irq_enabled;
- bool irq_is_enabled;
+ u8 mask;
+ u8 new_mask;
};
static irqreturn_t crystal_cove_charger_irq(int irq, void *data)
@@ -53,13 +54,9 @@ static void crystal_cove_charger_irq_bus_sync_unlock(struct irq_data *data)
{
struct crystal_cove_charger_data *charger = irq_data_get_irq_chip_data(data);
- if (charger->irq_is_enabled != charger->irq_enabled) {
- if (charger->irq_enabled)
- enable_irq(charger->irq);
- else
- disable_irq(charger->irq);
-
- charger->irq_is_enabled = charger->irq_enabled;
+ if (charger->mask != charger->new_mask) {
+ regmap_write(charger->regmap, MCHGRIRQ_REG, charger->new_mask);
+ charger->mask = charger->new_mask;
}
mutex_unlock(&charger->buslock);
@@ -69,14 +66,14 @@ static void crystal_cove_charger_irq_unmask(struct irq_data *data)
{
struct crystal_cove_charger_data *charger = irq_data_get_irq_chip_data(data);
- charger->irq_enabled = true;
+ charger->new_mask &= ~BIT(data->hwirq);
}
static void crystal_cove_charger_irq_mask(struct irq_data *data)
{
struct crystal_cove_charger_data *charger = irq_data_get_irq_chip_data(data);
- charger->irq_enabled = false;
+ charger->new_mask |= BIT(data->hwirq);
}
static void crystal_cove_charger_rm_irq_domain(void *data)
@@ -130,10 +127,13 @@ static int crystal_cove_charger_probe(struct platform_device *pdev)
irq_set_nested_thread(charger->charger_irq, true);
irq_set_noprobe(charger->charger_irq);
+ /* Mask the single 2nd level IRQ before enabling the 1st level IRQ */
+ charger->mask = charger->new_mask = BIT(0);
+ regmap_write(charger->regmap, MCHGRIRQ_REG, charger->mask);
+
ret = devm_request_threaded_irq(&pdev->dev, charger->irq, NULL,
crystal_cove_charger_irq,
- IRQF_ONESHOT | IRQF_NO_AUTOEN,
- KBUILD_MODNAME, charger);
+ IRQF_ONESHOT, KBUILD_MODNAME, charger);
if (ret)
return dev_err_probe(&pdev->dev, ret, "requesting irq\n");
diff --git a/drivers/platform/x86/intel/int3472/tps68470_board_data.c b/drivers/platform/x86/intel/int3472/tps68470_board_data.c
index f93d437fd192..525f09a3b5ff 100644
--- a/drivers/platform/x86/intel/int3472/tps68470_board_data.c
+++ b/drivers/platform/x86/intel/int3472/tps68470_board_data.c
@@ -100,7 +100,8 @@ static struct gpiod_lookup_table surface_go_tps68470_gpios = {
.dev_id = "i2c-INT347A:00",
.table = {
GPIO_LOOKUP("tps68470-gpio", 9, "reset", GPIO_ACTIVE_LOW),
- GPIO_LOOKUP("tps68470-gpio", 7, "powerdown", GPIO_ACTIVE_LOW)
+ GPIO_LOOKUP("tps68470-gpio", 7, "powerdown", GPIO_ACTIVE_LOW),
+ { }
}
};
diff --git a/drivers/platform/x86/intel/pmt/Kconfig b/drivers/platform/x86/intel/pmt/Kconfig
index d630f883a717..e916fc966221 100644
--- a/drivers/platform/x86/intel/pmt/Kconfig
+++ b/drivers/platform/x86/intel/pmt/Kconfig
@@ -17,7 +17,7 @@ config INTEL_PMT_CLASS
config INTEL_PMT_TELEMETRY
tristate "Intel Platform Monitoring Technology (PMT) Telemetry driver"
- depends on MFD_INTEL_PMT
+ depends on INTEL_VSEC
select INTEL_PMT_CLASS
help
The Intel Platform Monitory Technology (PMT) Telemetry driver provides
@@ -29,7 +29,7 @@ config INTEL_PMT_TELEMETRY
config INTEL_PMT_CRASHLOG
tristate "Intel Platform Monitoring Technology (PMT) Crashlog driver"
- depends on MFD_INTEL_PMT
+ depends on INTEL_VSEC
select INTEL_PMT_CLASS
help
The Intel Platform Monitoring Technology (PMT) crashlog driver provides
diff --git a/drivers/platform/x86/intel/pmt/class.c b/drivers/platform/x86/intel/pmt/class.c
index 659b1073033c..1c9e3f3ea41c 100644
--- a/drivers/platform/x86/intel/pmt/class.c
+++ b/drivers/platform/x86/intel/pmt/class.c
@@ -13,6 +13,7 @@
#include <linux/mm.h>
#include <linux/pci.h>
+#include "../vsec.h"
#include "class.h"
#define PMT_XA_START 0
@@ -281,31 +282,29 @@ fail_dev_create:
return ret;
}
-int intel_pmt_dev_create(struct intel_pmt_entry *entry,
- struct intel_pmt_namespace *ns,
- struct platform_device *pdev, int idx)
+int intel_pmt_dev_create(struct intel_pmt_entry *entry, struct intel_pmt_namespace *ns,
+ struct intel_vsec_device *intel_vsec_dev, int idx)
{
+ struct device *dev = &intel_vsec_dev->auxdev.dev;
struct intel_pmt_header header;
struct resource *disc_res;
- int ret = -ENODEV;
+ int ret;
- disc_res = platform_get_resource(pdev, IORESOURCE_MEM, idx);
- if (!disc_res)
- return ret;
+ disc_res = &intel_vsec_dev->resource[idx];
- entry->disc_table = devm_platform_ioremap_resource(pdev, idx);
+ entry->disc_table = devm_ioremap_resource(dev, disc_res);
if (IS_ERR(entry->disc_table))
return PTR_ERR(entry->disc_table);
- ret = ns->pmt_header_decode(entry, &header, &pdev->dev);
+ ret = ns->pmt_header_decode(entry, &header, dev);
if (ret)
return ret;
- ret = intel_pmt_populate_entry(entry, &header, &pdev->dev, disc_res);
+ ret = intel_pmt_populate_entry(entry, &header, dev, disc_res);
if (ret)
return ret;
- return intel_pmt_dev_register(entry, ns, &pdev->dev);
+ return intel_pmt_dev_register(entry, ns, dev);
}
EXPORT_SYMBOL_GPL(intel_pmt_dev_create);
diff --git a/drivers/platform/x86/intel/pmt/class.h b/drivers/platform/x86/intel/pmt/class.h
index 1337019c2873..db11d58867ce 100644
--- a/drivers/platform/x86/intel/pmt/class.h
+++ b/drivers/platform/x86/intel/pmt/class.h
@@ -2,13 +2,14 @@
#ifndef _INTEL_PMT_CLASS_H
#define _INTEL_PMT_CLASS_H
-#include <linux/platform_device.h>
#include <linux/xarray.h>
#include <linux/types.h>
#include <linux/bits.h>
#include <linux/err.h>
#include <linux/io.h>
+#include "../vsec.h"
+
/* PMT access types */
#define ACCESS_BARID 2
#define ACCESS_LOCAL 3
@@ -47,7 +48,7 @@ struct intel_pmt_namespace {
bool intel_pmt_is_early_client_hw(struct device *dev);
int intel_pmt_dev_create(struct intel_pmt_entry *entry,
struct intel_pmt_namespace *ns,
- struct platform_device *pdev, int idx);
+ struct intel_vsec_device *dev, int idx);
void intel_pmt_dev_destroy(struct intel_pmt_entry *entry,
struct intel_pmt_namespace *ns);
#endif
diff --git a/drivers/platform/x86/intel/pmt/crashlog.c b/drivers/platform/x86/intel/pmt/crashlog.c
index 1c1021f04d3c..34daf9df168b 100644
--- a/drivers/platform/x86/intel/pmt/crashlog.c
+++ b/drivers/platform/x86/intel/pmt/crashlog.c
@@ -8,6 +8,7 @@
* Author: "Alexander Duyck" <alexander.h.duyck@linux.intel.com>
*/
+#include <linux/auxiliary_bus.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
@@ -15,10 +16,9 @@
#include <linux/uaccess.h>
#include <linux/overflow.h>
+#include "../vsec.h"
#include "class.h"
-#define DRV_NAME "pmt_crashlog"
-
/* Crashlog discovery header types */
#define CRASH_TYPE_OOBMSM 1
@@ -257,34 +257,34 @@ static struct intel_pmt_namespace pmt_crashlog_ns = {
/*
* initialization
*/
-static int pmt_crashlog_remove(struct platform_device *pdev)
+static void pmt_crashlog_remove(struct auxiliary_device *auxdev)
{
- struct pmt_crashlog_priv *priv = platform_get_drvdata(pdev);
+ struct pmt_crashlog_priv *priv = auxiliary_get_drvdata(auxdev);
int i;
for (i = 0; i < priv->num_entries; i++)
intel_pmt_dev_destroy(&priv->entry[i].entry, &pmt_crashlog_ns);
-
- return 0;
}
-static int pmt_crashlog_probe(struct platform_device *pdev)
+static int pmt_crashlog_probe(struct auxiliary_device *auxdev,
+ const struct auxiliary_device_id *id)
{
+ struct intel_vsec_device *intel_vsec_dev = auxdev_to_ivdev(auxdev);
struct pmt_crashlog_priv *priv;
size_t size;
int i, ret;
- size = struct_size(priv, entry, pdev->num_resources);
- priv = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+ size = struct_size(priv, entry, intel_vsec_dev->num_resources);
+ priv = devm_kzalloc(&auxdev->dev, size, GFP_KERNEL);
if (!priv)
return -ENOMEM;
- platform_set_drvdata(pdev, priv);
+ auxiliary_set_drvdata(auxdev, priv);
- for (i = 0; i < pdev->num_resources; i++) {
+ for (i = 0; i < intel_vsec_dev->num_resources; i++) {
struct intel_pmt_entry *entry = &priv->entry[i].entry;
- ret = intel_pmt_dev_create(entry, &pmt_crashlog_ns, pdev, i);
+ ret = intel_pmt_dev_create(entry, &pmt_crashlog_ns, intel_vsec_dev, i);
if (ret < 0)
goto abort_probe;
if (ret)
@@ -295,26 +295,30 @@ static int pmt_crashlog_probe(struct platform_device *pdev)
return 0;
abort_probe:
- pmt_crashlog_remove(pdev);
+ pmt_crashlog_remove(auxdev);
return ret;
}
-static struct platform_driver pmt_crashlog_driver = {
- .driver = {
- .name = DRV_NAME,
- },
- .remove = pmt_crashlog_remove,
- .probe = pmt_crashlog_probe,
+static const struct auxiliary_device_id pmt_crashlog_id_table[] = {
+ { .name = "intel_vsec.crashlog" },
+ {}
+};
+MODULE_DEVICE_TABLE(auxiliary, pmt_crashlog_id_table);
+
+static struct auxiliary_driver pmt_crashlog_aux_driver = {
+ .id_table = pmt_crashlog_id_table,
+ .remove = pmt_crashlog_remove,
+ .probe = pmt_crashlog_probe,
};
static int __init pmt_crashlog_init(void)
{
- return platform_driver_register(&pmt_crashlog_driver);
+ return auxiliary_driver_register(&pmt_crashlog_aux_driver);
}
static void __exit pmt_crashlog_exit(void)
{
- platform_driver_unregister(&pmt_crashlog_driver);
+ auxiliary_driver_unregister(&pmt_crashlog_aux_driver);
xa_destroy(&crashlog_array);
}
@@ -323,5 +327,4 @@ module_exit(pmt_crashlog_exit);
MODULE_AUTHOR("Alexander Duyck <alexander.h.duyck@linux.intel.com>");
MODULE_DESCRIPTION("Intel PMT Crashlog driver");
-MODULE_ALIAS("platform:" DRV_NAME);
MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/intel/pmt/telemetry.c b/drivers/platform/x86/intel/pmt/telemetry.c
index 38d52651c572..6b6f3e2a617a 100644
--- a/drivers/platform/x86/intel/pmt/telemetry.c
+++ b/drivers/platform/x86/intel/pmt/telemetry.c
@@ -8,6 +8,7 @@
* Author: "David E. Box" <david.e.box@linux.intel.com>
*/
+#include <linux/auxiliary_bus.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
@@ -15,10 +16,9 @@
#include <linux/uaccess.h>
#include <linux/overflow.h>
+#include "../vsec.h"
#include "class.h"
-#define TELEM_DEV_NAME "pmt_telemetry"
-
#define TELEM_SIZE_OFFSET 0x0
#define TELEM_GUID_OFFSET 0x4
#define TELEM_BASE_OFFSET 0x8
@@ -79,34 +79,33 @@ static struct intel_pmt_namespace pmt_telem_ns = {
.pmt_header_decode = pmt_telem_header_decode,
};
-static int pmt_telem_remove(struct platform_device *pdev)
+static void pmt_telem_remove(struct auxiliary_device *auxdev)
{
- struct pmt_telem_priv *priv = platform_get_drvdata(pdev);
+ struct pmt_telem_priv *priv = auxiliary_get_drvdata(auxdev);
int i;
for (i = 0; i < priv->num_entries; i++)
intel_pmt_dev_destroy(&priv->entry[i], &pmt_telem_ns);
-
- return 0;
}
-static int pmt_telem_probe(struct platform_device *pdev)
+static int pmt_telem_probe(struct auxiliary_device *auxdev, const struct auxiliary_device_id *id)
{
+ struct intel_vsec_device *intel_vsec_dev = auxdev_to_ivdev(auxdev);
struct pmt_telem_priv *priv;
size_t size;
int i, ret;
- size = struct_size(priv, entry, pdev->num_resources);
- priv = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+ size = struct_size(priv, entry, intel_vsec_dev->num_resources);
+ priv = devm_kzalloc(&auxdev->dev, size, GFP_KERNEL);
if (!priv)
return -ENOMEM;
- platform_set_drvdata(pdev, priv);
+ auxiliary_set_drvdata(auxdev, priv);
- for (i = 0; i < pdev->num_resources; i++) {
+ for (i = 0; i < intel_vsec_dev->num_resources; i++) {
struct intel_pmt_entry *entry = &priv->entry[i];
- ret = intel_pmt_dev_create(entry, &pmt_telem_ns, pdev, i);
+ ret = intel_pmt_dev_create(entry, &pmt_telem_ns, intel_vsec_dev, i);
if (ret < 0)
goto abort_probe;
if (ret)
@@ -117,32 +116,35 @@ static int pmt_telem_probe(struct platform_device *pdev)
return 0;
abort_probe:
- pmt_telem_remove(pdev);
+ pmt_telem_remove(auxdev);
return ret;
}
-static struct platform_driver pmt_telem_driver = {
- .driver = {
- .name = TELEM_DEV_NAME,
- },
- .remove = pmt_telem_remove,
- .probe = pmt_telem_probe,
+static const struct auxiliary_device_id pmt_telem_id_table[] = {
+ { .name = "intel_vsec.telemetry" },
+ {}
+};
+MODULE_DEVICE_TABLE(auxiliary, pmt_telem_id_table);
+
+static struct auxiliary_driver pmt_telem_aux_driver = {
+ .id_table = pmt_telem_id_table,
+ .remove = pmt_telem_remove,
+ .probe = pmt_telem_probe,
};
static int __init pmt_telem_init(void)
{
- return platform_driver_register(&pmt_telem_driver);
+ return auxiliary_driver_register(&pmt_telem_aux_driver);
}
module_init(pmt_telem_init);
static void __exit pmt_telem_exit(void)
{
- platform_driver_unregister(&pmt_telem_driver);
+ auxiliary_driver_unregister(&pmt_telem_aux_driver);
xa_destroy(&telem_array);
}
module_exit(pmt_telem_exit);
MODULE_AUTHOR("David E. Box <david.e.box@linux.intel.com>");
MODULE_DESCRIPTION("Intel PMT Telemetry driver");
-MODULE_ALIAS("platform:" TELEM_DEV_NAME);
MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
index c9a85eb2e860..e8424e70d81d 100644
--- a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
+++ b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c
@@ -596,7 +596,10 @@ static long isst_if_def_ioctl(struct file *file, unsigned int cmd,
return ret;
}
-static DEFINE_MUTEX(punit_misc_dev_lock);
+/* Lock to prevent module registration when already opened by user space */
+static DEFINE_MUTEX(punit_misc_dev_open_lock);
+/* Lock to allow one share misc device for all ISST interace */
+static DEFINE_MUTEX(punit_misc_dev_reg_lock);
static int misc_usage_count;
static int misc_device_ret;
static int misc_device_open;
@@ -606,7 +609,7 @@ static int isst_if_open(struct inode *inode, struct file *file)
int i, ret = 0;
/* Fail open, if a module is going away */
- mutex_lock(&punit_misc_dev_lock);
+ mutex_lock(&punit_misc_dev_open_lock);
for (i = 0; i < ISST_IF_DEV_MAX; ++i) {
struct isst_if_cmd_cb *cb = &punit_callbacks[i];
@@ -628,7 +631,7 @@ static int isst_if_open(struct inode *inode, struct file *file)
} else {
misc_device_open++;
}
- mutex_unlock(&punit_misc_dev_lock);
+ mutex_unlock(&punit_misc_dev_open_lock);
return ret;
}
@@ -637,7 +640,7 @@ static int isst_if_relase(struct inode *inode, struct file *f)
{
int i;
- mutex_lock(&punit_misc_dev_lock);
+ mutex_lock(&punit_misc_dev_open_lock);
misc_device_open--;
for (i = 0; i < ISST_IF_DEV_MAX; ++i) {
struct isst_if_cmd_cb *cb = &punit_callbacks[i];
@@ -645,7 +648,7 @@ static int isst_if_relase(struct inode *inode, struct file *f)
if (cb->registered)
module_put(cb->owner);
}
- mutex_unlock(&punit_misc_dev_lock);
+ mutex_unlock(&punit_misc_dev_open_lock);
return 0;
}
@@ -662,6 +665,43 @@ static struct miscdevice isst_if_char_driver = {
.fops = &isst_if_char_driver_ops,
};
+static int isst_misc_reg(void)
+{
+ mutex_lock(&punit_misc_dev_reg_lock);
+ if (misc_device_ret)
+ goto unlock_exit;
+
+ if (!misc_usage_count) {
+ misc_device_ret = isst_if_cpu_info_init();
+ if (misc_device_ret)
+ goto unlock_exit;
+
+ misc_device_ret = misc_register(&isst_if_char_driver);
+ if (misc_device_ret) {
+ isst_if_cpu_info_exit();
+ goto unlock_exit;
+ }
+ }
+ misc_usage_count++;
+
+unlock_exit:
+ mutex_unlock(&punit_misc_dev_reg_lock);
+
+ return misc_device_ret;
+}
+
+static void isst_misc_unreg(void)
+{
+ mutex_lock(&punit_misc_dev_reg_lock);
+ if (misc_usage_count)
+ misc_usage_count--;
+ if (!misc_usage_count && !misc_device_ret) {
+ misc_deregister(&isst_if_char_driver);
+ isst_if_cpu_info_exit();
+ }
+ mutex_unlock(&punit_misc_dev_reg_lock);
+}
+
/**
* isst_if_cdev_register() - Register callback for IOCTL
* @device_type: The device type this callback handling.
@@ -679,38 +719,31 @@ static struct miscdevice isst_if_char_driver = {
*/
int isst_if_cdev_register(int device_type, struct isst_if_cmd_cb *cb)
{
- if (misc_device_ret)
- return misc_device_ret;
+ int ret;
if (device_type >= ISST_IF_DEV_MAX)
return -EINVAL;
- mutex_lock(&punit_misc_dev_lock);
+ mutex_lock(&punit_misc_dev_open_lock);
+ /* Device is already open, we don't want to add new callbacks */
if (misc_device_open) {
- mutex_unlock(&punit_misc_dev_lock);
+ mutex_unlock(&punit_misc_dev_open_lock);
return -EAGAIN;
}
- if (!misc_usage_count) {
- int ret;
-
- misc_device_ret = misc_register(&isst_if_char_driver);
- if (misc_device_ret)
- goto unlock_exit;
-
- ret = isst_if_cpu_info_init();
- if (ret) {
- misc_deregister(&isst_if_char_driver);
- misc_device_ret = ret;
- goto unlock_exit;
- }
- }
memcpy(&punit_callbacks[device_type], cb, sizeof(*cb));
punit_callbacks[device_type].registered = 1;
- misc_usage_count++;
-unlock_exit:
- mutex_unlock(&punit_misc_dev_lock);
+ mutex_unlock(&punit_misc_dev_open_lock);
- return misc_device_ret;
+ ret = isst_misc_reg();
+ if (ret) {
+ /*
+ * No need of mutex as the misc device register failed
+ * as no one can open device yet. Hence no contention.
+ */
+ punit_callbacks[device_type].registered = 0;
+ return ret;
+ }
+ return 0;
}
EXPORT_SYMBOL_GPL(isst_if_cdev_register);
@@ -725,16 +758,12 @@ EXPORT_SYMBOL_GPL(isst_if_cdev_register);
*/
void isst_if_cdev_unregister(int device_type)
{
- mutex_lock(&punit_misc_dev_lock);
- misc_usage_count--;
+ isst_misc_unreg();
+ mutex_lock(&punit_misc_dev_open_lock);
punit_callbacks[device_type].registered = 0;
if (device_type == ISST_IF_DEV_MBOX)
isst_delete_hash();
- if (!misc_usage_count && !misc_device_ret) {
- misc_deregister(&isst_if_char_driver);
- isst_if_cpu_info_exit();
- }
- mutex_unlock(&punit_misc_dev_lock);
+ mutex_unlock(&punit_misc_dev_open_lock);
}
EXPORT_SYMBOL_GPL(isst_if_cdev_unregister);
diff --git a/drivers/platform/x86/intel/vsec.c b/drivers/platform/x86/intel/vsec.c
new file mode 100644
index 000000000000..c3bdd75ed690
--- /dev/null
+++ b/drivers/platform/x86/intel/vsec.c
@@ -0,0 +1,408 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel Vendor Specific Extended Capabilities auxiliary bus driver
+ *
+ * Copyright (c) 2021, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Author: David E. Box <david.e.box@linux.intel.com>
+ *
+ * This driver discovers and creates auxiliary devices for Intel defined PCIe
+ * "Vendor Specific" and "Designated Vendor Specific" Extended Capabilities,
+ * VSEC and DVSEC respectively. The driver supports features on specific PCIe
+ * endpoints that exist primarily to expose them.
+ */
+
+#include <linux/auxiliary_bus.h>
+#include <linux/bits.h>
+#include <linux/kernel.h>
+#include <linux/idr.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/types.h>
+
+#include "vsec.h"
+
+/* Intel DVSEC offsets */
+#define INTEL_DVSEC_ENTRIES 0xA
+#define INTEL_DVSEC_SIZE 0xB
+#define INTEL_DVSEC_TABLE 0xC
+#define INTEL_DVSEC_TABLE_BAR(x) ((x) & GENMASK(2, 0))
+#define INTEL_DVSEC_TABLE_OFFSET(x) ((x) & GENMASK(31, 3))
+#define TABLE_OFFSET_SHIFT 3
+
+static DEFINE_IDA(intel_vsec_ida);
+
+/**
+ * struct intel_vsec_header - Common fields of Intel VSEC and DVSEC registers.
+ * @rev: Revision ID of the VSEC/DVSEC register space
+ * @length: Length of the VSEC/DVSEC register space
+ * @id: ID of the feature
+ * @num_entries: Number of instances of the feature
+ * @entry_size: Size of the discovery table for each feature
+ * @tbir: BAR containing the discovery tables
+ * @offset: BAR offset of start of the first discovery table
+ */
+struct intel_vsec_header {
+ u8 rev;
+ u16 length;
+ u16 id;
+ u8 num_entries;
+ u8 entry_size;
+ u8 tbir;
+ u32 offset;
+};
+
+/* Platform specific data */
+struct intel_vsec_platform_info {
+ struct intel_vsec_header **capabilities;
+ unsigned long quirks;
+};
+
+enum intel_vsec_id {
+ VSEC_ID_TELEMETRY = 2,
+ VSEC_ID_WATCHER = 3,
+ VSEC_ID_CRASHLOG = 4,
+};
+
+static enum intel_vsec_id intel_vsec_allow_list[] = {
+ VSEC_ID_TELEMETRY,
+ VSEC_ID_WATCHER,
+ VSEC_ID_CRASHLOG,
+};
+
+static const char *intel_vsec_name(enum intel_vsec_id id)
+{
+ switch (id) {
+ case VSEC_ID_TELEMETRY:
+ return "telemetry";
+
+ case VSEC_ID_WATCHER:
+ return "watcher";
+
+ case VSEC_ID_CRASHLOG:
+ return "crashlog";
+
+ default:
+ return NULL;
+ }
+}
+
+static bool intel_vsec_allowed(u16 id)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(intel_vsec_allow_list); i++)
+ if (intel_vsec_allow_list[i] == id)
+ return true;
+
+ return false;
+}
+
+static bool intel_vsec_disabled(u16 id, unsigned long quirks)
+{
+ switch (id) {
+ case VSEC_ID_WATCHER:
+ return !!(quirks & VSEC_QUIRK_NO_WATCHER);
+
+ case VSEC_ID_CRASHLOG:
+ return !!(quirks & VSEC_QUIRK_NO_CRASHLOG);
+
+ default:
+ return false;
+ }
+}
+
+static void intel_vsec_remove_aux(void *data)
+{
+ auxiliary_device_delete(data);
+ auxiliary_device_uninit(data);
+}
+
+static void intel_vsec_dev_release(struct device *dev)
+{
+ struct intel_vsec_device *intel_vsec_dev = dev_to_ivdev(dev);
+
+ ida_free(intel_vsec_dev->ida, intel_vsec_dev->auxdev.id);
+ kfree(intel_vsec_dev->resource);
+ kfree(intel_vsec_dev);
+}
+
+static int intel_vsec_add_aux(struct pci_dev *pdev, struct intel_vsec_device *intel_vsec_dev,
+ const char *name)
+{
+ struct auxiliary_device *auxdev = &intel_vsec_dev->auxdev;
+ int ret;
+
+ ret = ida_alloc(intel_vsec_dev->ida, GFP_KERNEL);
+ if (ret < 0) {
+ kfree(intel_vsec_dev);
+ return ret;
+ }
+
+ auxdev->id = ret;
+ auxdev->name = name;
+ auxdev->dev.parent = &pdev->dev;
+ auxdev->dev.release = intel_vsec_dev_release;
+
+ ret = auxiliary_device_init(auxdev);
+ if (ret < 0) {
+ ida_free(intel_vsec_dev->ida, auxdev->id);
+ kfree(intel_vsec_dev->resource);
+ kfree(intel_vsec_dev);
+ return ret;
+ }
+
+ ret = auxiliary_device_add(auxdev);
+ if (ret < 0) {
+ auxiliary_device_uninit(auxdev);
+ return ret;
+ }
+
+ return devm_add_action_or_reset(&pdev->dev, intel_vsec_remove_aux, auxdev);
+}
+
+static int intel_vsec_add_dev(struct pci_dev *pdev, struct intel_vsec_header *header,
+ unsigned long quirks)
+{
+ struct intel_vsec_device *intel_vsec_dev;
+ struct resource *res, *tmp;
+ int i;
+
+ if (!intel_vsec_allowed(header->id) || intel_vsec_disabled(header->id, quirks))
+ return -EINVAL;
+
+ if (!header->num_entries) {
+ dev_dbg(&pdev->dev, "Invalid 0 entry count for header id %d\n", header->id);
+ return -EINVAL;
+ }
+
+ if (!header->entry_size) {
+ dev_dbg(&pdev->dev, "Invalid 0 entry size for header id %d\n", header->id);
+ return -EINVAL;
+ }
+
+ intel_vsec_dev = kzalloc(sizeof(*intel_vsec_dev), GFP_KERNEL);
+ if (!intel_vsec_dev)
+ return -ENOMEM;
+
+ res = kcalloc(header->num_entries, sizeof(*res), GFP_KERNEL);
+ if (!res) {
+ kfree(intel_vsec_dev);
+ return -ENOMEM;
+ }
+
+ if (quirks & VSEC_QUIRK_TABLE_SHIFT)
+ header->offset >>= TABLE_OFFSET_SHIFT;
+
+ /*
+ * The DVSEC/VSEC contains the starting offset and count for a block of
+ * discovery tables. Create a resource array of these tables to the
+ * auxiliary device driver.
+ */
+ for (i = 0, tmp = res; i < header->num_entries; i++, tmp++) {
+ tmp->start = pdev->resource[header->tbir].start +
+ header->offset + i * (header->entry_size * sizeof(u32));
+ tmp->end = tmp->start + (header->entry_size * sizeof(u32)) - 1;
+ tmp->flags = IORESOURCE_MEM;
+ }
+
+ intel_vsec_dev->pcidev = pdev;
+ intel_vsec_dev->resource = res;
+ intel_vsec_dev->num_resources = header->num_entries;
+ intel_vsec_dev->quirks = quirks;
+ intel_vsec_dev->ida = &intel_vsec_ida;
+
+ return intel_vsec_add_aux(pdev, intel_vsec_dev, intel_vsec_name(header->id));
+}
+
+static bool intel_vsec_walk_header(struct pci_dev *pdev, unsigned long quirks,
+ struct intel_vsec_header **header)
+{
+ bool have_devices = false;
+ int ret;
+
+ for ( ; *header; header++) {
+ ret = intel_vsec_add_dev(pdev, *header, quirks);
+ if (ret)
+ dev_info(&pdev->dev, "Could not add device for DVSEC id %d\n",
+ (*header)->id);
+ else
+ have_devices = true;
+ }
+
+ return have_devices;
+}
+
+static bool intel_vsec_walk_dvsec(struct pci_dev *pdev, unsigned long quirks)
+{
+ bool have_devices = false;
+ int pos = 0;
+
+ do {
+ struct intel_vsec_header header;
+ u32 table, hdr;
+ u16 vid;
+ int ret;
+
+ pos = pci_find_next_ext_capability(pdev, pos, PCI_EXT_CAP_ID_DVSEC);
+ if (!pos)
+ break;
+
+ pci_read_config_dword(pdev, pos + PCI_DVSEC_HEADER1, &hdr);
+ vid = PCI_DVSEC_HEADER1_VID(hdr);
+ if (vid != PCI_VENDOR_ID_INTEL)
+ continue;
+
+ /* Support only revision 1 */
+ header.rev = PCI_DVSEC_HEADER1_REV(hdr);
+ if (header.rev != 1) {
+ dev_info(&pdev->dev, "Unsupported DVSEC revision %d\n", header.rev);
+ continue;
+ }
+
+ header.length = PCI_DVSEC_HEADER1_LEN(hdr);
+
+ pci_read_config_byte(pdev, pos + INTEL_DVSEC_ENTRIES, &header.num_entries);
+ pci_read_config_byte(pdev, pos + INTEL_DVSEC_SIZE, &header.entry_size);
+ pci_read_config_dword(pdev, pos + INTEL_DVSEC_TABLE, &table);
+
+ header.tbir = INTEL_DVSEC_TABLE_BAR(table);
+ header.offset = INTEL_DVSEC_TABLE_OFFSET(table);
+
+ pci_read_config_dword(pdev, pos + PCI_DVSEC_HEADER2, &hdr);
+ header.id = PCI_DVSEC_HEADER2_ID(hdr);
+
+ ret = intel_vsec_add_dev(pdev, &header, quirks);
+ if (ret)
+ continue;
+
+ have_devices = true;
+ } while (true);
+
+ return have_devices;
+}
+
+static bool intel_vsec_walk_vsec(struct pci_dev *pdev, unsigned long quirks)
+{
+ bool have_devices = false;
+ int pos = 0;
+
+ do {
+ struct intel_vsec_header header;
+ u32 table, hdr;
+ int ret;
+
+ pos = pci_find_next_ext_capability(pdev, pos, PCI_EXT_CAP_ID_VNDR);
+ if (!pos)
+ break;
+
+ pci_read_config_dword(pdev, pos + PCI_VNDR_HEADER, &hdr);
+
+ /* Support only revision 1 */
+ header.rev = PCI_VNDR_HEADER_REV(hdr);
+ if (header.rev != 1) {
+ dev_info(&pdev->dev, "Unsupported VSEC revision %d\n", header.rev);
+ continue;
+ }
+
+ header.id = PCI_VNDR_HEADER_ID(hdr);
+ header.length = PCI_VNDR_HEADER_LEN(hdr);
+
+ /* entry, size, and table offset are the same as DVSEC */
+ pci_read_config_byte(pdev, pos + INTEL_DVSEC_ENTRIES, &header.num_entries);
+ pci_read_config_byte(pdev, pos + INTEL_DVSEC_SIZE, &header.entry_size);
+ pci_read_config_dword(pdev, pos + INTEL_DVSEC_TABLE, &table);
+
+ header.tbir = INTEL_DVSEC_TABLE_BAR(table);
+ header.offset = INTEL_DVSEC_TABLE_OFFSET(table);
+
+ ret = intel_vsec_add_dev(pdev, &header, quirks);
+ if (ret)
+ continue;
+
+ have_devices = true;
+ } while (true);
+
+ return have_devices;
+}
+
+static int intel_vsec_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct intel_vsec_platform_info *info;
+ bool have_devices = false;
+ unsigned long quirks = 0;
+ int ret;
+
+ ret = pcim_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ info = (struct intel_vsec_platform_info *)id->driver_data;
+ if (info)
+ quirks = info->quirks;
+
+ if (intel_vsec_walk_dvsec(pdev, quirks))
+ have_devices = true;
+
+ if (intel_vsec_walk_vsec(pdev, quirks))
+ have_devices = true;
+
+ if (info && (info->quirks & VSEC_QUIRK_NO_DVSEC) &&
+ intel_vsec_walk_header(pdev, quirks, info->capabilities))
+ have_devices = true;
+
+ if (!have_devices)
+ return -ENODEV;
+
+ return 0;
+}
+
+/* TGL info */
+static const struct intel_vsec_platform_info tgl_info = {
+ .quirks = VSEC_QUIRK_NO_WATCHER | VSEC_QUIRK_NO_CRASHLOG | VSEC_QUIRK_TABLE_SHIFT,
+};
+
+/* DG1 info */
+static struct intel_vsec_header dg1_telemetry = {
+ .length = 0x10,
+ .id = 2,
+ .num_entries = 1,
+ .entry_size = 3,
+ .tbir = 0,
+ .offset = 0x466000,
+};
+
+static struct intel_vsec_header *dg1_capabilities[] = {
+ &dg1_telemetry,
+ NULL
+};
+
+static const struct intel_vsec_platform_info dg1_info = {
+ .capabilities = dg1_capabilities,
+ .quirks = VSEC_QUIRK_NO_DVSEC,
+};
+
+#define PCI_DEVICE_ID_INTEL_VSEC_ADL 0x467d
+#define PCI_DEVICE_ID_INTEL_VSEC_DG1 0x490e
+#define PCI_DEVICE_ID_INTEL_VSEC_OOBMSM 0x09a7
+#define PCI_DEVICE_ID_INTEL_VSEC_TGL 0x9a0d
+static const struct pci_device_id intel_vsec_pci_ids[] = {
+ { PCI_DEVICE_DATA(INTEL, VSEC_ADL, &tgl_info) },
+ { PCI_DEVICE_DATA(INTEL, VSEC_DG1, &dg1_info) },
+ { PCI_DEVICE_DATA(INTEL, VSEC_OOBMSM, NULL) },
+ { PCI_DEVICE_DATA(INTEL, VSEC_TGL, &tgl_info) },
+ { }
+};
+MODULE_DEVICE_TABLE(pci, intel_vsec_pci_ids);
+
+static struct pci_driver intel_vsec_pci_driver = {
+ .name = "intel_vsec",
+ .id_table = intel_vsec_pci_ids,
+ .probe = intel_vsec_pci_probe,
+};
+module_pci_driver(intel_vsec_pci_driver);
+
+MODULE_AUTHOR("David E. Box <david.e.box@linux.intel.com>");
+MODULE_DESCRIPTION("Intel Extended Capabilities auxiliary bus driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/x86/intel/vsec.h b/drivers/platform/x86/intel/vsec.h
new file mode 100644
index 000000000000..4cc36678e8c5
--- /dev/null
+++ b/drivers/platform/x86/intel/vsec.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _VSEC_H
+#define _VSEC_H
+
+#include <linux/auxiliary_bus.h>
+#include <linux/bits.h>
+
+struct pci_dev;
+struct resource;
+
+enum intel_vsec_quirks {
+ /* Watcher feature not supported */
+ VSEC_QUIRK_NO_WATCHER = BIT(0),
+
+ /* Crashlog feature not supported */
+ VSEC_QUIRK_NO_CRASHLOG = BIT(1),
+
+ /* Use shift instead of mask to read discovery table offset */
+ VSEC_QUIRK_TABLE_SHIFT = BIT(2),
+
+ /* DVSEC not present (provided in driver data) */
+ VSEC_QUIRK_NO_DVSEC = BIT(3),
+};
+
+struct intel_vsec_device {
+ struct auxiliary_device auxdev;
+ struct pci_dev *pcidev;
+ struct resource *resource;
+ struct ida *ida;
+ unsigned long quirks;
+ int num_resources;
+};
+
+static inline struct intel_vsec_device *dev_to_ivdev(struct device *dev)
+{
+ return container_of(dev, struct intel_vsec_device, auxdev.dev);
+}
+
+static inline struct intel_vsec_device *auxdev_to_ivdev(struct auxiliary_device *auxdev)
+{
+ return container_of(auxdev, struct intel_vsec_device, auxdev);
+}
+#endif
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 82fa6148216c..3424b080db77 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -880,14 +880,14 @@ static int dispatch_proc_show(struct seq_file *m, void *v)
static int dispatch_proc_open(struct inode *inode, struct file *file)
{
- return single_open(file, dispatch_proc_show, PDE_DATA(inode));
+ return single_open(file, dispatch_proc_show, pde_data(inode));
}
static ssize_t dispatch_proc_write(struct file *file,
const char __user *userbuf,
size_t count, loff_t *pos)
{
- struct ibm_struct *ibm = PDE_DATA(file_inode(file));
+ struct ibm_struct *ibm = pde_data(file_inode(file));
char *kernbuf;
int ret;
@@ -8679,9 +8679,10 @@ static const struct attribute_group fan_driver_attr_group = {
.attrs = fan_driver_attributes,
};
-#define TPACPI_FAN_Q1 0x0001 /* Unitialized HFSP */
-#define TPACPI_FAN_2FAN 0x0002 /* EC 0x31 bit 0 selects fan2 */
-#define TPACPI_FAN_2CTL 0x0004 /* selects fan2 control */
+#define TPACPI_FAN_Q1 0x0001 /* Uninitialized HFSP */
+#define TPACPI_FAN_2FAN 0x0002 /* EC 0x31 bit 0 selects fan2 */
+#define TPACPI_FAN_2CTL 0x0004 /* selects fan2 control */
+#define TPACPI_FAN_NOFAN 0x0008 /* no fan available */
static const struct tpacpi_quirk fan_quirk_table[] __initconst = {
TPACPI_QEC_IBM('1', 'Y', TPACPI_FAN_Q1),
@@ -8702,6 +8703,8 @@ static const struct tpacpi_quirk fan_quirk_table[] __initconst = {
TPACPI_Q_LNV3('N', '4', '0', TPACPI_FAN_2CTL), /* P1 / X1 Extreme (4nd gen) */
TPACPI_Q_LNV3('N', '3', '0', TPACPI_FAN_2CTL), /* P15 (1st gen) / P15v (1st gen) */
TPACPI_Q_LNV3('N', '3', '2', TPACPI_FAN_2CTL), /* X1 Carbon (9th gen) */
+ TPACPI_Q_LNV3('N', '3', '7', TPACPI_FAN_2CTL), /* T15g (2nd gen) */
+ TPACPI_Q_LNV3('N', '1', 'O', TPACPI_FAN_NOFAN), /* X1 Tablet (2nd gen) */
};
static int __init fan_init(struct ibm_init_struct *iibm)
@@ -8730,6 +8733,11 @@ static int __init fan_init(struct ibm_init_struct *iibm)
quirks = tpacpi_check_quirks(fan_quirk_table,
ARRAY_SIZE(fan_quirk_table));
+ if (quirks & TPACPI_FAN_NOFAN) {
+ pr_info("No integrated ThinkPad fan available\n");
+ return -ENODEV;
+ }
+
if (gfan_handle) {
/* 570, 600e/x, 770e, 770x */
fan_status_access_mode = TPACPI_FAN_RD_ACPI_GFAN;
@@ -10112,6 +10120,9 @@ static struct ibm_struct proxsensor_driver_data = {
#define DYTC_CMD_MMC_GET 8 /* To get current MMC function and mode */
#define DYTC_CMD_RESET 0x1ff /* To reset back to default */
+#define DYTC_CMD_FUNC_CAP 3 /* To get DYTC capabilities */
+#define DYTC_FC_MMC 27 /* MMC Mode supported */
+
#define DYTC_GET_FUNCTION_BIT 8 /* Bits 8-11 - function setting */
#define DYTC_GET_MODE_BIT 12 /* Bits 12-15 - mode setting */
@@ -10324,6 +10335,15 @@ static int tpacpi_dytc_profile_init(struct ibm_init_struct *iibm)
if (dytc_version < 5)
return -ENODEV;
+ /* Check what capabilities are supported. Currently MMC is needed */
+ err = dytc_command(DYTC_CMD_FUNC_CAP, &output);
+ if (err)
+ return err;
+ if (!(output & BIT(DYTC_FC_MMC))) {
+ dbg_printk(TPACPI_DBG_INIT, " DYTC MMC mode not supported\n");
+ return -ENODEV;
+ }
+
dbg_printk(TPACPI_DBG_INIT,
"DYTC version %d: thermal mode available\n", dytc_version);
/*
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index 352508d30467..f113dec98e21 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -1368,7 +1368,7 @@ static int lcd_proc_show(struct seq_file *m, void *v)
static int lcd_proc_open(struct inode *inode, struct file *file)
{
- return single_open(file, lcd_proc_show, PDE_DATA(inode));
+ return single_open(file, lcd_proc_show, pde_data(inode));
}
static int set_lcd_brightness(struct toshiba_acpi_dev *dev, int value)
@@ -1404,7 +1404,7 @@ static int set_lcd_status(struct backlight_device *bd)
static ssize_t lcd_proc_write(struct file *file, const char __user *buf,
size_t count, loff_t *pos)
{
- struct toshiba_acpi_dev *dev = PDE_DATA(file_inode(file));
+ struct toshiba_acpi_dev *dev = pde_data(file_inode(file));
char cmd[42];
size_t len;
int levels;
@@ -1469,13 +1469,13 @@ static int video_proc_show(struct seq_file *m, void *v)
static int video_proc_open(struct inode *inode, struct file *file)
{
- return single_open(file, video_proc_show, PDE_DATA(inode));
+ return single_open(file, video_proc_show, pde_data(inode));
}
static ssize_t video_proc_write(struct file *file, const char __user *buf,
size_t count, loff_t *pos)
{
- struct toshiba_acpi_dev *dev = PDE_DATA(file_inode(file));
+ struct toshiba_acpi_dev *dev = pde_data(file_inode(file));
char *buffer;
char *cmd;
int lcd_out = -1, crt_out = -1, tv_out = -1;
@@ -1580,13 +1580,13 @@ static int fan_proc_show(struct seq_file *m, void *v)
static int fan_proc_open(struct inode *inode, struct file *file)
{
- return single_open(file, fan_proc_show, PDE_DATA(inode));
+ return single_open(file, fan_proc_show, pde_data(inode));
}
static ssize_t fan_proc_write(struct file *file, const char __user *buf,
size_t count, loff_t *pos)
{
- struct toshiba_acpi_dev *dev = PDE_DATA(file_inode(file));
+ struct toshiba_acpi_dev *dev = pde_data(file_inode(file));
char cmd[42];
size_t len;
int value;
@@ -1628,13 +1628,13 @@ static int keys_proc_show(struct seq_file *m, void *v)
static int keys_proc_open(struct inode *inode, struct file *file)
{
- return single_open(file, keys_proc_show, PDE_DATA(inode));
+ return single_open(file, keys_proc_show, pde_data(inode));
}
static ssize_t keys_proc_write(struct file *file, const char __user *buf,
size_t count, loff_t *pos)
{
- struct toshiba_acpi_dev *dev = PDE_DATA(file_inode(file));
+ struct toshiba_acpi_dev *dev = pde_data(file_inode(file));
char cmd[42];
size_t len;
int value;
diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c
index 494f23052678..bc97bfa8e8a6 100644
--- a/drivers/platform/x86/touchscreen_dmi.c
+++ b/drivers/platform/x86/touchscreen_dmi.c
@@ -770,6 +770,21 @@ static const struct ts_dmi_data predia_basic_data = {
.properties = predia_basic_props,
};
+static const struct property_entry rwc_nanote_p8_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-min-y", 46),
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1728),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1140),
+ PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-rwc-nanote-p8.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ { }
+};
+
+static const struct ts_dmi_data rwc_nanote_p8_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = rwc_nanote_p8_props,
+};
+
static const struct property_entry schneider_sct101ctm_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1715),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1140),
@@ -1395,6 +1410,15 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
},
},
{
+ /* RWC NANOTE P8 */
+ .driver_data = (void *)&rwc_nanote_p8_data,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Default string"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "AY07J"),
+ DMI_MATCH(DMI_PRODUCT_SKU, "0001")
+ },
+ },
+ {
/* Schneider SCT101CTM */
.driver_data = (void *)&schneider_sct101ctm_data,
.matches = {
diff --git a/drivers/platform/x86/x86-android-tablets.c b/drivers/platform/x86/x86-android-tablets.c
index 3ba63ad91b28..9360a8a92486 100644
--- a/drivers/platform/x86/x86-android-tablets.c
+++ b/drivers/platform/x86/x86-android-tablets.c
@@ -26,6 +26,7 @@
#include <linux/string.h>
/* For gpio_get_desc() which is EXPORT_SYMBOL_GPL() */
#include "../../gpio/gpiolib.h"
+#include "../../gpio/gpiolib-acpi.h"
/*
* Helper code to get Linux IRQ numbers given a description of the IRQ source
@@ -47,7 +48,7 @@ struct x86_acpi_irq_data {
int polarity; /* ACPI_ACTIVE_HIGH / ACPI_ACTIVE_LOW / ACPI_ACTIVE_BOTH */
};
-static int x86_acpi_irq_helper_gpiochip_find(struct gpio_chip *gc, void *data)
+static int gpiochip_find_match_label(struct gpio_chip *gc, void *data)
{
return gc->label && !strcmp(gc->label, data);
}
@@ -73,7 +74,7 @@ static int x86_acpi_irq_helper_get(const struct x86_acpi_irq_data *data)
return irq;
case X86_ACPI_IRQ_TYPE_GPIOINT:
/* Like acpi_dev_gpio_irq_get(), but without parsing ACPI resources */
- chip = gpiochip_find(data->chip, x86_acpi_irq_helper_gpiochip_find);
+ chip = gpiochip_find(data->chip, gpiochip_find_match_label);
if (!chip) {
pr_err("error cannot find GPIO chip %s\n", data->chip);
return -ENODEV;
@@ -143,14 +144,17 @@ struct x86_serdev_info {
};
struct x86_dev_info {
+ char *invalid_aei_gpiochip;
const char * const *modules;
- struct gpiod_lookup_table **gpiod_lookup_tables;
+ struct gpiod_lookup_table * const *gpiod_lookup_tables;
const struct x86_i2c_client_info *i2c_client_info;
const struct platform_device_info *pdev_info;
const struct x86_serdev_info *serdev_info;
int i2c_client_count;
int pdev_count;
int serdev_count;
+ int (*init)(void);
+ void (*exit)(void);
};
/* Generic / shared bq24190 settings */
@@ -187,8 +191,8 @@ static struct bq24190_platform_data bq24190_pdata = {
};
static const char * const bq24190_modules[] __initconst = {
- "crystal_cove_charger", /* For the bq24190 IRQ */
- "bq24190_charger", /* For the Vbus regulator for intel-int3496 */
+ "intel_crystal_cove_charger", /* For the bq24190 IRQ */
+ "bq24190_charger", /* For the Vbus regulator for intel-int3496 */
NULL
};
@@ -302,7 +306,7 @@ static struct gpiod_lookup_table asus_me176c_goodix_gpios = {
},
};
-static struct gpiod_lookup_table *asus_me176c_gpios[] = {
+static struct gpiod_lookup_table * const asus_me176c_gpios[] = {
&int3496_gpo2_pin22_gpios,
&asus_me176c_goodix_gpios,
NULL
@@ -317,6 +321,7 @@ static const struct x86_dev_info asus_me176c_info __initconst = {
.serdev_count = ARRAY_SIZE(asus_me176c_serdevs),
.gpiod_lookup_tables = asus_me176c_gpios,
.modules = bq24190_modules,
+ .invalid_aei_gpiochip = "INT33FC:02",
};
/* Asus TF103C tablets have an Android factory img with everything hardcoded */
@@ -405,7 +410,7 @@ static const struct x86_i2c_client_info asus_tf103c_i2c_clients[] __initconst =
},
};
-static struct gpiod_lookup_table *asus_tf103c_gpios[] = {
+static struct gpiod_lookup_table * const asus_tf103c_gpios[] = {
&int3496_gpo2_pin22_gpios,
NULL
};
@@ -417,6 +422,7 @@ static const struct x86_dev_info asus_tf103c_info __initconst = {
.pdev_count = ARRAY_SIZE(int3496_pdevs),
.gpiod_lookup_tables = asus_tf103c_gpios,
.modules = bq24190_modules,
+ .invalid_aei_gpiochip = "INT33FC:02",
};
/*
@@ -490,6 +496,39 @@ static const struct x86_dev_info chuwi_hi8_info __initconst = {
.i2c_client_count = ARRAY_SIZE(chuwi_hi8_i2c_clients),
};
+#define CZC_EC_EXTRA_PORT 0x68
+#define CZC_EC_ANDROID_KEYS 0x63
+
+static int __init czc_p10t_init(void)
+{
+ /*
+ * The device boots up in "Windows 7" mode, when the home button sends a
+ * Windows specific key sequence (Left Meta + D) and the second button
+ * sends an unknown one while also toggling the Radio Kill Switch.
+ * This is a surprising behavior when the second button is labeled "Back".
+ *
+ * The vendor-supplied Android-x86 build switches the device to a "Android"
+ * mode by writing value 0x63 to the I/O port 0x68. This just seems to just
+ * set bit 6 on address 0x96 in the EC region; switching the bit directly
+ * seems to achieve the same result. It uses a "p10t_switcher" to do the
+ * job. It doesn't seem to be able to do anything else, and no other use
+ * of the port 0x68 is known.
+ *
+ * In the Android mode, the home button sends just a single scancode,
+ * which can be handled in Linux userspace more reasonably and the back
+ * button only sends a scancode without toggling the kill switch.
+ * The scancode can then be mapped either to Back or RF Kill functionality
+ * in userspace, depending on how the button is labeled on that particular
+ * model.
+ */
+ outb(CZC_EC_ANDROID_KEYS, CZC_EC_EXTRA_PORT);
+ return 0;
+}
+
+static const struct x86_dev_info czc_p10t __initconst = {
+ .init = czc_p10t_init,
+};
+
/*
* Whitelabel (sold as various brands) TM800A550L tablets.
* These tablet's DSDT contains a whole bunch of bogus ACPI I2C devices
@@ -559,7 +598,7 @@ static struct gpiod_lookup_table whitelabel_tm800a550l_goodix_gpios = {
},
};
-static struct gpiod_lookup_table *whitelabel_tm800a550l_gpios[] = {
+static struct gpiod_lookup_table * const whitelabel_tm800a550l_gpios[] = {
&whitelabel_tm800a550l_goodix_gpios,
NULL
};
@@ -642,6 +681,24 @@ static const struct dmi_system_id x86_android_tablet_ids[] __initconst = {
.driver_data = (void *)&chuwi_hi8_info,
},
{
+ /* CZC P10T */
+ .ident = "CZC ODEON TPC-10 (\"P10T\")",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "CZC"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ODEON*TPC-10"),
+ },
+ .driver_data = (void *)&czc_p10t,
+ },
+ {
+ /* A variant of CZC P10T */
+ .ident = "ViewSonic ViewPad 10",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ViewSonic"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "VPAD10"),
+ },
+ .driver_data = (void *)&czc_p10t,
+ },
+ {
/* Whitelabel (sold as various brands) TM800A550L */
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
@@ -669,7 +726,8 @@ static int serdev_count;
static struct i2c_client **i2c_clients;
static struct platform_device **pdevs;
static struct serdev_device **serdevs;
-static struct gpiod_lookup_table **gpiod_lookup_tables;
+static struct gpiod_lookup_table * const *gpiod_lookup_tables;
+static void (*exit_handler)(void);
static __init int x86_instantiate_i2c_client(const struct x86_dev_info *dev_info,
int idx)
@@ -787,6 +845,9 @@ static void x86_android_tablet_cleanup(void)
kfree(i2c_clients);
+ if (exit_handler)
+ exit_handler();
+
for (i = 0; gpiod_lookup_tables && gpiod_lookup_tables[i]; i++)
gpiod_remove_lookup_table(gpiod_lookup_tables[i]);
}
@@ -795,6 +856,7 @@ static __init int x86_android_tablet_init(void)
{
const struct x86_dev_info *dev_info;
const struct dmi_system_id *id;
+ struct gpio_chip *chip;
int i, ret = 0;
id = dmi_first_match(x86_android_tablet_ids);
@@ -804,6 +866,20 @@ static __init int x86_android_tablet_init(void)
dev_info = id->driver_data;
/*
+ * The broken DSDTs on these devices often also include broken
+ * _AEI (ACPI Event Interrupt) handlers, disable these.
+ */
+ if (dev_info->invalid_aei_gpiochip) {
+ chip = gpiochip_find(dev_info->invalid_aei_gpiochip,
+ gpiochip_find_match_label);
+ if (!chip) {
+ pr_err("error cannot find GPIO chip %s\n", dev_info->invalid_aei_gpiochip);
+ return -ENODEV;
+ }
+ acpi_gpiochip_free_interrupts(chip);
+ }
+
+ /*
* Since this runs from module_init() it cannot use -EPROBE_DEFER,
* instead pre-load any modules which are listed as requirements.
*/
@@ -814,6 +890,15 @@ static __init int x86_android_tablet_init(void)
for (i = 0; gpiod_lookup_tables && gpiod_lookup_tables[i]; i++)
gpiod_add_lookup_table(gpiod_lookup_tables[i]);
+ if (dev_info->init) {
+ ret = dev_info->init();
+ if (ret < 0) {
+ x86_android_tablet_cleanup();
+ return ret;
+ }
+ exit_handler = dev_info->exit;
+ }
+
i2c_clients = kcalloc(dev_info->i2c_client_count, sizeof(*i2c_clients), GFP_KERNEL);
if (!i2c_clients) {
x86_android_tablet_cleanup();
@@ -865,6 +950,6 @@ static __init int x86_android_tablet_init(void)
module_init(x86_android_tablet_init);
module_exit(x86_android_tablet_cleanup);
-MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com");
+MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
MODULE_DESCRIPTION("X86 Android tablets DSDT fixups driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/pnp/isapnp/proc.c b/drivers/pnp/isapnp/proc.c
index 1ae458c02656..55ae72a2818b 100644
--- a/drivers/pnp/isapnp/proc.c
+++ b/drivers/pnp/isapnp/proc.c
@@ -22,7 +22,7 @@ static loff_t isapnp_proc_bus_lseek(struct file *file, loff_t off, int whence)
static ssize_t isapnp_proc_bus_read(struct file *file, char __user * buf,
size_t nbytes, loff_t * ppos)
{
- struct pnp_dev *dev = PDE_DATA(file_inode(file));
+ struct pnp_dev *dev = pde_data(file_inode(file));
int pos = *ppos;
int cnt, size = 256;
diff --git a/drivers/pnp/pnpbios/core.c b/drivers/pnp/pnpbios/core.c
index 669ef4700c1a..f7e86ae9f72f 100644
--- a/drivers/pnp/pnpbios/core.c
+++ b/drivers/pnp/pnpbios/core.c
@@ -160,7 +160,7 @@ static int pnp_dock_thread(void *unused)
* No dock to manage
*/
case PNP_FUNCTION_NOT_SUPPORTED:
- complete_and_exit(&unload_sem, 0);
+ kthread_complete_and_exit(&unload_sem, 0);
case PNP_SYSTEM_NOT_DOCKED:
d = 0;
break;
@@ -170,7 +170,7 @@ static int pnp_dock_thread(void *unused)
default:
pnpbios_print_status("pnp_dock_thread", status);
printk(KERN_WARNING "PnPBIOS: disabling dock monitoring.\n");
- complete_and_exit(&unload_sem, 0);
+ kthread_complete_and_exit(&unload_sem, 0);
}
if (d != docked) {
if (pnp_dock_event(d, &now) == 0) {
@@ -183,7 +183,7 @@ static int pnp_dock_thread(void *unused)
}
}
}
- complete_and_exit(&unload_sem, 0);
+ kthread_complete_and_exit(&unload_sem, 0);
}
static int pnpbios_get_resources(struct pnp_dev *dev)
diff --git a/drivers/pnp/pnpbios/proc.c b/drivers/pnp/pnpbios/proc.c
index a806830e3a40..0f0d819b157f 100644
--- a/drivers/pnp/pnpbios/proc.c
+++ b/drivers/pnp/pnpbios/proc.c
@@ -173,13 +173,13 @@ static int pnpbios_proc_show(struct seq_file *m, void *v)
static int pnpbios_proc_open(struct inode *inode, struct file *file)
{
- return single_open(file, pnpbios_proc_show, PDE_DATA(inode));
+ return single_open(file, pnpbios_proc_show, pde_data(inode));
}
static ssize_t pnpbios_proc_write(struct file *file, const char __user *buf,
size_t count, loff_t *pos)
{
- void *data = PDE_DATA(file_inode(file));
+ void *data = pde_data(file_inode(file));
struct pnp_bios_node *node;
int boot = (long)data >> 8;
u8 nodenum = (long)data;
diff --git a/drivers/power/supply/bq256xx_charger.c b/drivers/power/supply/bq256xx_charger.c
index b274942dc46a..01ad84fd147c 100644
--- a/drivers/power/supply/bq256xx_charger.c
+++ b/drivers/power/supply/bq256xx_charger.c
@@ -1523,6 +1523,9 @@ static int bq256xx_hw_init(struct bq256xx_device *bq)
BQ256XX_WDT_BIT_SHIFT);
ret = power_supply_get_battery_info(bq->charger, &bat_info);
+ if (ret == -ENOMEM)
+ return ret;
+
if (ret) {
dev_warn(bq->dev, "battery info missing, default values will be applied\n");
diff --git a/drivers/power/supply/cw2015_battery.c b/drivers/power/supply/cw2015_battery.c
index 0c87ad0dbf71..728e2a6cc9c3 100644
--- a/drivers/power/supply/cw2015_battery.c
+++ b/drivers/power/supply/cw2015_battery.c
@@ -689,7 +689,7 @@ static int cw_bat_probe(struct i2c_client *client)
if (ret) {
/* Allocate an empty battery */
cw_bat->battery = devm_kzalloc(&client->dev,
- sizeof(cw_bat->battery),
+ sizeof(*cw_bat->battery),
GFP_KERNEL);
if (!cw_bat->battery)
return -ENOMEM;
diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
index 93772ab8d7e3..c7552df32082 100644
--- a/drivers/pwm/core.c
+++ b/drivers/pwm/core.c
@@ -548,6 +548,73 @@ static void pwm_apply_state_debug(struct pwm_device *pwm,
}
}
+static int pwm_apply_legacy(struct pwm_chip *chip, struct pwm_device *pwm,
+ const struct pwm_state *state)
+{
+ int err;
+ struct pwm_state initial_state = pwm->state;
+
+ if (state->polarity != pwm->state.polarity) {
+ if (!chip->ops->set_polarity)
+ return -EINVAL;
+
+ /*
+ * Changing the polarity of a running PWM is only allowed when
+ * the PWM driver implements ->apply().
+ */
+ if (pwm->state.enabled) {
+ chip->ops->disable(chip, pwm);
+
+ /*
+ * Update pwm->state already here in case
+ * .set_polarity() or another callback depend on that.
+ */
+ pwm->state.enabled = false;
+ }
+
+ err = chip->ops->set_polarity(chip, pwm, state->polarity);
+ if (err)
+ goto rollback;
+
+ pwm->state.polarity = state->polarity;
+ }
+
+ if (!state->enabled) {
+ if (pwm->state.enabled)
+ chip->ops->disable(chip, pwm);
+
+ return 0;
+ }
+
+ /*
+ * We cannot skip calling ->config even if state->period ==
+ * pwm->state.period && state->duty_cycle == pwm->state.duty_cycle
+ * because we might have exited early in the last call to
+ * pwm_apply_state because of !state->enabled and so the two values in
+ * pwm->state might not be configured in hardware.
+ */
+ err = chip->ops->config(pwm->chip, pwm,
+ state->duty_cycle,
+ state->period);
+ if (err)
+ goto rollback;
+
+ pwm->state.period = state->period;
+ pwm->state.duty_cycle = state->duty_cycle;
+
+ if (!pwm->state.enabled) {
+ err = chip->ops->enable(chip, pwm);
+ if (err)
+ goto rollback;
+ }
+
+ return 0;
+
+rollback:
+ pwm->state = initial_state;
+ return err;
+}
+
/**
* pwm_apply_state() - atomically apply a new state to a PWM device
* @pwm: PWM device
@@ -580,70 +647,22 @@ int pwm_apply_state(struct pwm_device *pwm, const struct pwm_state *state)
state->usage_power == pwm->state.usage_power)
return 0;
- if (chip->ops->apply) {
+ if (chip->ops->apply)
err = chip->ops->apply(chip, pwm, state);
- if (err)
- return err;
-
- trace_pwm_apply(pwm, state);
-
- pwm->state = *state;
-
- /*
- * only do this after pwm->state was applied as some
- * implementations of .get_state depend on this
- */
- pwm_apply_state_debug(pwm, state);
- } else {
- /*
- * FIXME: restore the initial state in case of error.
- */
- if (state->polarity != pwm->state.polarity) {
- if (!chip->ops->set_polarity)
- return -EINVAL;
+ else
+ err = pwm_apply_legacy(chip, pwm, state);
+ if (err)
+ return err;
- /*
- * Changing the polarity of a running PWM is
- * only allowed when the PWM driver implements
- * ->apply().
- */
- if (pwm->state.enabled) {
- chip->ops->disable(chip, pwm);
- pwm->state.enabled = false;
- }
+ trace_pwm_apply(pwm, state);
- err = chip->ops->set_polarity(chip, pwm,
- state->polarity);
- if (err)
- return err;
+ pwm->state = *state;
- pwm->state.polarity = state->polarity;
- }
-
- if (state->period != pwm->state.period ||
- state->duty_cycle != pwm->state.duty_cycle) {
- err = chip->ops->config(pwm->chip, pwm,
- state->duty_cycle,
- state->period);
- if (err)
- return err;
-
- pwm->state.duty_cycle = state->duty_cycle;
- pwm->state.period = state->period;
- }
-
- if (state->enabled != pwm->state.enabled) {
- if (state->enabled) {
- err = chip->ops->enable(chip, pwm);
- if (err)
- return err;
- } else {
- chip->ops->disable(chip, pwm);
- }
-
- pwm->state.enabled = state->enabled;
- }
- }
+ /*
+ * only do this after pwm->state was applied as some
+ * implementations of .get_state depend on this
+ */
+ pwm_apply_state_debug(pwm, state);
return 0;
}
diff --git a/drivers/pwm/pwm-img.c b/drivers/pwm/pwm-img.c
index f97f82548293..5996049f66ec 100644
--- a/drivers/pwm/pwm-img.c
+++ b/drivers/pwm/pwm-img.c
@@ -128,11 +128,9 @@ static int img_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
duty = DIV_ROUND_UP(timebase * duty_ns, period_ns);
- ret = pm_runtime_get_sync(chip->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(chip->dev);
+ ret = pm_runtime_resume_and_get(chip->dev);
+ if (ret < 0)
return ret;
- }
val = img_pwm_readl(pwm_chip, PWM_CTRL_CFG);
val &= ~(PWM_CTRL_CFG_DIV_MASK << PWM_CTRL_CFG_DIV_SHIFT(pwm->hwpwm));
@@ -184,10 +182,33 @@ static void img_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
pm_runtime_put_autosuspend(chip->dev);
}
+static int img_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ const struct pwm_state *state)
+{
+ int err;
+
+ if (state->polarity != PWM_POLARITY_NORMAL)
+ return -EINVAL;
+
+ if (!state->enabled) {
+ if (pwm->state.enabled)
+ img_pwm_disable(chip, pwm);
+
+ return 0;
+ }
+
+ err = img_pwm_config(pwm->chip, pwm, state->duty_cycle, state->period);
+ if (err)
+ return err;
+
+ if (!pwm->state.enabled)
+ err = img_pwm_enable(chip, pwm);
+
+ return err;
+}
+
static const struct pwm_ops img_pwm_ops = {
- .config = img_pwm_config,
- .enable = img_pwm_enable,
- .disable = img_pwm_disable,
+ .apply = img_pwm_apply,
.owner = THIS_MODULE,
};
diff --git a/drivers/pwm/pwm-twl.c b/drivers/pwm/pwm-twl.c
index 203194f2c92e..86567add79db 100644
--- a/drivers/pwm/pwm-twl.c
+++ b/drivers/pwm/pwm-twl.c
@@ -58,9 +58,9 @@ static inline struct twl_pwm_chip *to_twl(struct pwm_chip *chip)
}
static int twl_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
- int duty_ns, int period_ns)
+ u64 duty_ns, u64 period_ns)
{
- int duty_cycle = DIV_ROUND_UP(duty_ns * TWL_PWM_MAX, period_ns) + 1;
+ int duty_cycle = DIV64_U64_ROUND_UP(duty_ns * TWL_PWM_MAX, period_ns) + 1;
u8 pwm_config[2] = { 1, 0 };
int base, ret;
@@ -279,19 +279,65 @@ out:
mutex_unlock(&twl->mutex);
}
+static int twl4030_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ const struct pwm_state *state)
+{
+ int err;
+
+ if (state->polarity != PWM_POLARITY_NORMAL)
+ return -EINVAL;
+
+ if (!state->enabled) {
+ if (pwm->state.enabled)
+ twl4030_pwm_disable(chip, pwm);
+
+ return 0;
+ }
+
+ err = twl_pwm_config(pwm->chip, pwm, state->duty_cycle, state->period);
+ if (err)
+ return err;
+
+ if (!pwm->state.enabled)
+ err = twl4030_pwm_enable(chip, pwm);
+
+ return err;
+}
+
+static int twl6030_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ const struct pwm_state *state)
+{
+ int err;
+
+ if (state->polarity != PWM_POLARITY_NORMAL)
+ return -EINVAL;
+
+ if (!state->enabled) {
+ if (pwm->state.enabled)
+ twl6030_pwm_disable(chip, pwm);
+
+ return 0;
+ }
+
+ err = twl_pwm_config(pwm->chip, pwm, state->duty_cycle, state->period);
+ if (err)
+ return err;
+
+ if (!pwm->state.enabled)
+ err = twl6030_pwm_enable(chip, pwm);
+
+ return err;
+}
+
static const struct pwm_ops twl4030_pwm_ops = {
- .config = twl_pwm_config,
- .enable = twl4030_pwm_enable,
- .disable = twl4030_pwm_disable,
+ .apply = twl4030_pwm_apply,
.request = twl4030_pwm_request,
.free = twl4030_pwm_free,
.owner = THIS_MODULE,
};
static const struct pwm_ops twl6030_pwm_ops = {
- .config = twl_pwm_config,
- .enable = twl6030_pwm_enable,
- .disable = twl6030_pwm_disable,
+ .apply = twl6030_pwm_apply,
.owner = THIS_MODULE,
};
diff --git a/drivers/pwm/pwm-vt8500.c b/drivers/pwm/pwm-vt8500.c
index 480bfc29782f..7170a315535b 100644
--- a/drivers/pwm/pwm-vt8500.c
+++ b/drivers/pwm/pwm-vt8500.c
@@ -70,7 +70,7 @@ static inline void vt8500_pwm_busy_wait(struct vt8500_chip *vt8500, int nr, u8 b
}
static int vt8500_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
- int duty_ns, int period_ns)
+ u64 duty_ns, u64 period_ns)
{
struct vt8500_chip *vt8500 = to_vt8500_chip(chip);
unsigned long long c;
@@ -102,8 +102,8 @@ static int vt8500_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
}
c = (unsigned long long)pv * duty_ns;
- do_div(c, period_ns);
- dc = c;
+
+ dc = div64_u64(c, period_ns);
writel(prescale, vt8500->base + REG_SCALAR(pwm->hwpwm));
vt8500_pwm_busy_wait(vt8500, pwm->hwpwm, STATUS_SCALAR_UPDATE);
@@ -176,11 +176,54 @@ static int vt8500_pwm_set_polarity(struct pwm_chip *chip,
return 0;
}
+static int vt8500_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ const struct pwm_state *state)
+{
+ int err;
+ bool enabled = pwm->state.enabled;
+
+ if (state->polarity != pwm->state.polarity) {
+ /*
+ * Changing the polarity of a running PWM is only allowed when
+ * the PWM driver implements ->apply().
+ */
+ if (enabled) {
+ vt8500_pwm_disable(chip, pwm);
+
+ enabled = false;
+ }
+
+ err = vt8500_pwm_set_polarity(chip, pwm, state->polarity);
+ if (err)
+ return err;
+ }
+
+ if (!state->enabled) {
+ if (enabled)
+ vt8500_pwm_disable(chip, pwm);
+
+ return 0;
+ }
+
+ /*
+ * We cannot skip calling ->config even if state->period ==
+ * pwm->state.period && state->duty_cycle == pwm->state.duty_cycle
+ * because we might have exited early in the last call to
+ * pwm_apply_state because of !state->enabled and so the two values in
+ * pwm->state might not be configured in hardware.
+ */
+ err = vt8500_pwm_config(pwm->chip, pwm, state->duty_cycle, state->period);
+ if (err)
+ return err;
+
+ if (!enabled)
+ err = vt8500_pwm_enable(chip, pwm);
+
+ return err;
+}
+
static const struct pwm_ops vt8500_pwm_ops = {
- .enable = vt8500_pwm_enable,
- .disable = vt8500_pwm_disable,
- .config = vt8500_pwm_config,
- .set_polarity = vt8500_pwm_set_polarity,
+ .apply = vt8500_pwm_apply,
.owner = THIS_MODULE,
};
diff --git a/drivers/rapidio/switches/Kconfig b/drivers/rapidio/switches/Kconfig
index 3e18f9c51e29..02771ba3e54f 100644
--- a/drivers/rapidio/switches/Kconfig
+++ b/drivers/rapidio/switches/Kconfig
@@ -2,22 +2,11 @@
#
# RapidIO switches configuration
#
-config RAPIDIO_TSI57X
- tristate "IDT Tsi57x SRIO switches support"
- help
- Includes support for IDT Tsi57x family of serial RapidIO switches.
-
config RAPIDIO_CPS_XX
tristate "IDT CPS-xx SRIO switches support"
help
Includes support for IDT CPS-16/12/10/8 serial RapidIO switches.
-config RAPIDIO_TSI568
- tristate "Tsi568 SRIO switch support"
- default n
- help
- Includes support for IDT Tsi568 serial RapidIO switch.
-
config RAPIDIO_CPS_GEN2
tristate "IDT CPS Gen.2 SRIO switch support"
default n
diff --git a/drivers/rapidio/switches/Makefile b/drivers/rapidio/switches/Makefile
index 69e7de31e41c..ef1749a79c2b 100644
--- a/drivers/rapidio/switches/Makefile
+++ b/drivers/rapidio/switches/Makefile
@@ -3,8 +3,6 @@
# Makefile for RIO switches
#
-obj-$(CONFIG_RAPIDIO_TSI57X) += tsi57x.o
obj-$(CONFIG_RAPIDIO_CPS_XX) += idtcps.o
-obj-$(CONFIG_RAPIDIO_TSI568) += tsi568.o
obj-$(CONFIG_RAPIDIO_CPS_GEN2) += idt_gen2.o
obj-$(CONFIG_RAPIDIO_RXS_GEN3) += idt_gen3.o
diff --git a/drivers/rapidio/switches/tsi568.c b/drivers/rapidio/switches/tsi568.c
deleted file mode 100644
index 103b48a24980..000000000000
--- a/drivers/rapidio/switches/tsi568.c
+++ /dev/null
@@ -1,195 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * RapidIO Tsi568 switch support
- *
- * Copyright 2009-2010 Integrated Device Technology, Inc.
- * Alexandre Bounine <alexandre.bounine@idt.com>
- * - Added EM support
- * - Modified switch operations initialization.
- *
- * Copyright 2005 MontaVista Software, Inc.
- * Matt Porter <mporter@kernel.crashing.org>
- */
-
-#include <linux/rio.h>
-#include <linux/rio_drv.h>
-#include <linux/rio_ids.h>
-#include <linux/delay.h>
-#include <linux/module.h>
-#include "../rio.h"
-
-/* Global (broadcast) route registers */
-#define SPBC_ROUTE_CFG_DESTID 0x10070
-#define SPBC_ROUTE_CFG_PORT 0x10074
-
-/* Per port route registers */
-#define SPP_ROUTE_CFG_DESTID(n) (0x11070 + 0x100*n)
-#define SPP_ROUTE_CFG_PORT(n) (0x11074 + 0x100*n)
-
-#define TSI568_SP_MODE(n) (0x11004 + 0x100*n)
-#define TSI568_SP_MODE_PW_DIS 0x08000000
-
-static int
-tsi568_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount,
- u16 table, u16 route_destid, u8 route_port)
-{
- if (table == RIO_GLOBAL_TABLE) {
- rio_mport_write_config_32(mport, destid, hopcount,
- SPBC_ROUTE_CFG_DESTID, route_destid);
- rio_mport_write_config_32(mport, destid, hopcount,
- SPBC_ROUTE_CFG_PORT, route_port);
- } else {
- rio_mport_write_config_32(mport, destid, hopcount,
- SPP_ROUTE_CFG_DESTID(table),
- route_destid);
- rio_mport_write_config_32(mport, destid, hopcount,
- SPP_ROUTE_CFG_PORT(table), route_port);
- }
-
- udelay(10);
-
- return 0;
-}
-
-static int
-tsi568_route_get_entry(struct rio_mport *mport, u16 destid, u8 hopcount,
- u16 table, u16 route_destid, u8 *route_port)
-{
- int ret = 0;
- u32 result;
-
- if (table == RIO_GLOBAL_TABLE) {
- rio_mport_write_config_32(mport, destid, hopcount,
- SPBC_ROUTE_CFG_DESTID, route_destid);
- rio_mport_read_config_32(mport, destid, hopcount,
- SPBC_ROUTE_CFG_PORT, &result);
- } else {
- rio_mport_write_config_32(mport, destid, hopcount,
- SPP_ROUTE_CFG_DESTID(table),
- route_destid);
- rio_mport_read_config_32(mport, destid, hopcount,
- SPP_ROUTE_CFG_PORT(table), &result);
- }
-
- *route_port = result;
- if (*route_port > 15)
- ret = -1;
-
- return ret;
-}
-
-static int
-tsi568_route_clr_table(struct rio_mport *mport, u16 destid, u8 hopcount,
- u16 table)
-{
- u32 route_idx;
- u32 lut_size;
-
- lut_size = (mport->sys_size) ? 0x1ff : 0xff;
-
- if (table == RIO_GLOBAL_TABLE) {
- rio_mport_write_config_32(mport, destid, hopcount,
- SPBC_ROUTE_CFG_DESTID, 0x80000000);
- for (route_idx = 0; route_idx <= lut_size; route_idx++)
- rio_mport_write_config_32(mport, destid, hopcount,
- SPBC_ROUTE_CFG_PORT,
- RIO_INVALID_ROUTE);
- } else {
- rio_mport_write_config_32(mport, destid, hopcount,
- SPP_ROUTE_CFG_DESTID(table),
- 0x80000000);
- for (route_idx = 0; route_idx <= lut_size; route_idx++)
- rio_mport_write_config_32(mport, destid, hopcount,
- SPP_ROUTE_CFG_PORT(table),
- RIO_INVALID_ROUTE);
- }
-
- return 0;
-}
-
-static int
-tsi568_em_init(struct rio_dev *rdev)
-{
- u32 regval;
- int portnum;
-
- pr_debug("TSI568 %s [%d:%d]\n", __func__, rdev->destid, rdev->hopcount);
-
- /* Make sure that Port-Writes are disabled (for all ports) */
- for (portnum = 0;
- portnum < RIO_GET_TOTAL_PORTS(rdev->swpinfo); portnum++) {
- rio_read_config_32(rdev, TSI568_SP_MODE(portnum), &regval);
- rio_write_config_32(rdev, TSI568_SP_MODE(portnum),
- regval | TSI568_SP_MODE_PW_DIS);
- }
-
- return 0;
-}
-
-static struct rio_switch_ops tsi568_switch_ops = {
- .owner = THIS_MODULE,
- .add_entry = tsi568_route_add_entry,
- .get_entry = tsi568_route_get_entry,
- .clr_table = tsi568_route_clr_table,
- .set_domain = NULL,
- .get_domain = NULL,
- .em_init = tsi568_em_init,
- .em_handle = NULL,
-};
-
-static int tsi568_probe(struct rio_dev *rdev, const struct rio_device_id *id)
-{
- pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev));
-
- spin_lock(&rdev->rswitch->lock);
-
- if (rdev->rswitch->ops) {
- spin_unlock(&rdev->rswitch->lock);
- return -EINVAL;
- }
-
- rdev->rswitch->ops = &tsi568_switch_ops;
- spin_unlock(&rdev->rswitch->lock);
- return 0;
-}
-
-static void tsi568_remove(struct rio_dev *rdev)
-{
- pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev));
- spin_lock(&rdev->rswitch->lock);
- if (rdev->rswitch->ops != &tsi568_switch_ops) {
- spin_unlock(&rdev->rswitch->lock);
- return;
- }
- rdev->rswitch->ops = NULL;
- spin_unlock(&rdev->rswitch->lock);
-}
-
-static const struct rio_device_id tsi568_id_table[] = {
- {RIO_DEVICE(RIO_DID_TSI568, RIO_VID_TUNDRA)},
- { 0, } /* terminate list */
-};
-
-static struct rio_driver tsi568_driver = {
- .name = "tsi568",
- .id_table = tsi568_id_table,
- .probe = tsi568_probe,
- .remove = tsi568_remove,
-};
-
-static int __init tsi568_init(void)
-{
- return rio_register_driver(&tsi568_driver);
-}
-
-static void __exit tsi568_exit(void)
-{
- rio_unregister_driver(&tsi568_driver);
-}
-
-device_initcall(tsi568_init);
-module_exit(tsi568_exit);
-
-MODULE_DESCRIPTION("IDT Tsi568 Serial RapidIO switch driver");
-MODULE_AUTHOR("Integrated Device Technology, Inc.");
-MODULE_LICENSE("GPL");
diff --git a/drivers/rapidio/switches/tsi57x.c b/drivers/rapidio/switches/tsi57x.c
deleted file mode 100644
index 271762046f8c..000000000000
--- a/drivers/rapidio/switches/tsi57x.c
+++ /dev/null
@@ -1,365 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * RapidIO Tsi57x switch family support
- *
- * Copyright 2009-2010 Integrated Device Technology, Inc.
- * Alexandre Bounine <alexandre.bounine@idt.com>
- * - Added EM support
- * - Modified switch operations initialization.
- *
- * Copyright 2005 MontaVista Software, Inc.
- * Matt Porter <mporter@kernel.crashing.org>
- */
-
-#include <linux/rio.h>
-#include <linux/rio_drv.h>
-#include <linux/rio_ids.h>
-#include <linux/delay.h>
-#include <linux/module.h>
-#include "../rio.h"
-
-/* Global (broadcast) route registers */
-#define SPBC_ROUTE_CFG_DESTID 0x10070
-#define SPBC_ROUTE_CFG_PORT 0x10074
-
-/* Per port route registers */
-#define SPP_ROUTE_CFG_DESTID(n) (0x11070 + 0x100*n)
-#define SPP_ROUTE_CFG_PORT(n) (0x11074 + 0x100*n)
-
-#define TSI578_SP_MODE(n) (0x11004 + n*0x100)
-#define TSI578_SP_MODE_GLBL 0x10004
-#define TSI578_SP_MODE_PW_DIS 0x08000000
-#define TSI578_SP_MODE_LUT_512 0x01000000
-
-#define TSI578_SP_CTL_INDEP(n) (0x13004 + n*0x100)
-#define TSI578_SP_LUT_PEINF(n) (0x13010 + n*0x100)
-#define TSI578_SP_CS_TX(n) (0x13014 + n*0x100)
-#define TSI578_SP_INT_STATUS(n) (0x13018 + n*0x100)
-
-#define TSI578_GLBL_ROUTE_BASE 0x10078
-
-static int
-tsi57x_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount,
- u16 table, u16 route_destid, u8 route_port)
-{
- if (table == RIO_GLOBAL_TABLE) {
- rio_mport_write_config_32(mport, destid, hopcount,
- SPBC_ROUTE_CFG_DESTID, route_destid);
- rio_mport_write_config_32(mport, destid, hopcount,
- SPBC_ROUTE_CFG_PORT, route_port);
- } else {
- rio_mport_write_config_32(mport, destid, hopcount,
- SPP_ROUTE_CFG_DESTID(table), route_destid);
- rio_mport_write_config_32(mport, destid, hopcount,
- SPP_ROUTE_CFG_PORT(table), route_port);
- }
-
- udelay(10);
-
- return 0;
-}
-
-static int
-tsi57x_route_get_entry(struct rio_mport *mport, u16 destid, u8 hopcount,
- u16 table, u16 route_destid, u8 *route_port)
-{
- int ret = 0;
- u32 result;
-
- if (table == RIO_GLOBAL_TABLE) {
- /* Use local RT of the ingress port to avoid possible
- race condition */
- rio_mport_read_config_32(mport, destid, hopcount,
- RIO_SWP_INFO_CAR, &result);
- table = (result & RIO_SWP_INFO_PORT_NUM_MASK);
- }
-
- rio_mport_write_config_32(mport, destid, hopcount,
- SPP_ROUTE_CFG_DESTID(table), route_destid);
- rio_mport_read_config_32(mport, destid, hopcount,
- SPP_ROUTE_CFG_PORT(table), &result);
-
- *route_port = (u8)result;
- if (*route_port > 15)
- ret = -1;
-
- return ret;
-}
-
-static int
-tsi57x_route_clr_table(struct rio_mport *mport, u16 destid, u8 hopcount,
- u16 table)
-{
- u32 route_idx;
- u32 lut_size;
-
- lut_size = (mport->sys_size) ? 0x1ff : 0xff;
-
- if (table == RIO_GLOBAL_TABLE) {
- rio_mport_write_config_32(mport, destid, hopcount,
- SPBC_ROUTE_CFG_DESTID, 0x80000000);
- for (route_idx = 0; route_idx <= lut_size; route_idx++)
- rio_mport_write_config_32(mport, destid, hopcount,
- SPBC_ROUTE_CFG_PORT,
- RIO_INVALID_ROUTE);
- } else {
- rio_mport_write_config_32(mport, destid, hopcount,
- SPP_ROUTE_CFG_DESTID(table), 0x80000000);
- for (route_idx = 0; route_idx <= lut_size; route_idx++)
- rio_mport_write_config_32(mport, destid, hopcount,
- SPP_ROUTE_CFG_PORT(table) , RIO_INVALID_ROUTE);
- }
-
- return 0;
-}
-
-static int
-tsi57x_set_domain(struct rio_mport *mport, u16 destid, u8 hopcount,
- u8 sw_domain)
-{
- u32 regval;
-
- /*
- * Switch domain configuration operates only at global level
- */
-
- /* Turn off flat (LUT_512) mode */
- rio_mport_read_config_32(mport, destid, hopcount,
- TSI578_SP_MODE_GLBL, &regval);
- rio_mport_write_config_32(mport, destid, hopcount, TSI578_SP_MODE_GLBL,
- regval & ~TSI578_SP_MODE_LUT_512);
- /* Set switch domain base */
- rio_mport_write_config_32(mport, destid, hopcount,
- TSI578_GLBL_ROUTE_BASE,
- (u32)(sw_domain << 24));
- return 0;
-}
-
-static int
-tsi57x_get_domain(struct rio_mport *mport, u16 destid, u8 hopcount,
- u8 *sw_domain)
-{
- u32 regval;
-
- /*
- * Switch domain configuration operates only at global level
- */
- rio_mport_read_config_32(mport, destid, hopcount,
- TSI578_GLBL_ROUTE_BASE, &regval);
-
- *sw_domain = (u8)(regval >> 24);
-
- return 0;
-}
-
-static int
-tsi57x_em_init(struct rio_dev *rdev)
-{
- u32 regval;
- int portnum;
-
- pr_debug("TSI578 %s [%d:%d]\n", __func__, rdev->destid, rdev->hopcount);
-
- for (portnum = 0;
- portnum < RIO_GET_TOTAL_PORTS(rdev->swpinfo); portnum++) {
- /* Make sure that Port-Writes are enabled (for all ports) */
- rio_read_config_32(rdev,
- TSI578_SP_MODE(portnum), &regval);
- rio_write_config_32(rdev,
- TSI578_SP_MODE(portnum),
- regval & ~TSI578_SP_MODE_PW_DIS);
-
- /* Clear all pending interrupts */
- rio_read_config_32(rdev,
- RIO_DEV_PORT_N_ERR_STS_CSR(rdev, portnum),
- &regval);
- rio_write_config_32(rdev,
- RIO_DEV_PORT_N_ERR_STS_CSR(rdev, portnum),
- regval & 0x07120214);
-
- rio_read_config_32(rdev,
- TSI578_SP_INT_STATUS(portnum), &regval);
- rio_write_config_32(rdev,
- TSI578_SP_INT_STATUS(portnum),
- regval & 0x000700bd);
-
- /* Enable all interrupts to allow ports to send a port-write */
- rio_read_config_32(rdev,
- TSI578_SP_CTL_INDEP(portnum), &regval);
- rio_write_config_32(rdev,
- TSI578_SP_CTL_INDEP(portnum),
- regval | 0x000b0000);
-
- /* Skip next (odd) port if the current port is in x4 mode */
- rio_read_config_32(rdev,
- RIO_DEV_PORT_N_CTL_CSR(rdev, portnum),
- &regval);
- if ((regval & RIO_PORT_N_CTL_PWIDTH) == RIO_PORT_N_CTL_PWIDTH_4)
- portnum++;
- }
-
- /* set TVAL = ~50us */
- rio_write_config_32(rdev,
- rdev->phys_efptr + RIO_PORT_LINKTO_CTL_CSR, 0x9a << 8);
-
- return 0;
-}
-
-static int
-tsi57x_em_handler(struct rio_dev *rdev, u8 portnum)
-{
- struct rio_mport *mport = rdev->net->hport;
- u32 intstat, err_status;
- int sendcount, checkcount;
- u8 route_port;
- u32 regval;
-
- rio_read_config_32(rdev,
- RIO_DEV_PORT_N_ERR_STS_CSR(rdev, portnum),
- &err_status);
-
- if ((err_status & RIO_PORT_N_ERR_STS_PORT_OK) &&
- (err_status & (RIO_PORT_N_ERR_STS_OUT_ES |
- RIO_PORT_N_ERR_STS_INP_ES))) {
- /* Remove any queued packets by locking/unlocking port */
- rio_read_config_32(rdev,
- RIO_DEV_PORT_N_CTL_CSR(rdev, portnum),
- &regval);
- if (!(regval & RIO_PORT_N_CTL_LOCKOUT)) {
- rio_write_config_32(rdev,
- RIO_DEV_PORT_N_CTL_CSR(rdev, portnum),
- regval | RIO_PORT_N_CTL_LOCKOUT);
- udelay(50);
- rio_write_config_32(rdev,
- RIO_DEV_PORT_N_CTL_CSR(rdev, portnum),
- regval);
- }
-
- /* Read from link maintenance response register to clear
- * valid bit
- */
- rio_read_config_32(rdev,
- RIO_DEV_PORT_N_MNT_RSP_CSR(rdev, portnum),
- &regval);
-
- /* Send a Packet-Not-Accepted/Link-Request-Input-Status control
- * symbol to recover from IES/OES
- */
- sendcount = 3;
- while (sendcount) {
- rio_write_config_32(rdev,
- TSI578_SP_CS_TX(portnum), 0x40fc8000);
- checkcount = 3;
- while (checkcount--) {
- udelay(50);
- rio_read_config_32(rdev,
- RIO_DEV_PORT_N_MNT_RSP_CSR(rdev,
- portnum),
- &regval);
- if (regval & RIO_PORT_N_MNT_RSP_RVAL)
- goto exit_es;
- }
-
- sendcount--;
- }
- }
-
-exit_es:
- /* Clear implementation specific error status bits */
- rio_read_config_32(rdev, TSI578_SP_INT_STATUS(portnum), &intstat);
- pr_debug("TSI578[%x:%x] SP%d_INT_STATUS=0x%08x\n",
- rdev->destid, rdev->hopcount, portnum, intstat);
-
- if (intstat & 0x10000) {
- rio_read_config_32(rdev,
- TSI578_SP_LUT_PEINF(portnum), &regval);
- regval = (mport->sys_size) ? (regval >> 16) : (regval >> 24);
- route_port = rdev->rswitch->route_table[regval];
- pr_debug("RIO: TSI578[%s] P%d LUT Parity Error (destID=%d)\n",
- rio_name(rdev), portnum, regval);
- tsi57x_route_add_entry(mport, rdev->destid, rdev->hopcount,
- RIO_GLOBAL_TABLE, regval, route_port);
- }
-
- rio_write_config_32(rdev, TSI578_SP_INT_STATUS(portnum),
- intstat & 0x000700bd);
-
- return 0;
-}
-
-static struct rio_switch_ops tsi57x_switch_ops = {
- .owner = THIS_MODULE,
- .add_entry = tsi57x_route_add_entry,
- .get_entry = tsi57x_route_get_entry,
- .clr_table = tsi57x_route_clr_table,
- .set_domain = tsi57x_set_domain,
- .get_domain = tsi57x_get_domain,
- .em_init = tsi57x_em_init,
- .em_handle = tsi57x_em_handler,
-};
-
-static int tsi57x_probe(struct rio_dev *rdev, const struct rio_device_id *id)
-{
- pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev));
-
- spin_lock(&rdev->rswitch->lock);
-
- if (rdev->rswitch->ops) {
- spin_unlock(&rdev->rswitch->lock);
- return -EINVAL;
- }
- rdev->rswitch->ops = &tsi57x_switch_ops;
-
- if (rdev->do_enum) {
- /* Ensure that default routing is disabled on startup */
- rio_write_config_32(rdev, RIO_STD_RTE_DEFAULT_PORT,
- RIO_INVALID_ROUTE);
- }
-
- spin_unlock(&rdev->rswitch->lock);
- return 0;
-}
-
-static void tsi57x_remove(struct rio_dev *rdev)
-{
- pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev));
- spin_lock(&rdev->rswitch->lock);
- if (rdev->rswitch->ops != &tsi57x_switch_ops) {
- spin_unlock(&rdev->rswitch->lock);
- return;
- }
- rdev->rswitch->ops = NULL;
- spin_unlock(&rdev->rswitch->lock);
-}
-
-static const struct rio_device_id tsi57x_id_table[] = {
- {RIO_DEVICE(RIO_DID_TSI572, RIO_VID_TUNDRA)},
- {RIO_DEVICE(RIO_DID_TSI574, RIO_VID_TUNDRA)},
- {RIO_DEVICE(RIO_DID_TSI577, RIO_VID_TUNDRA)},
- {RIO_DEVICE(RIO_DID_TSI578, RIO_VID_TUNDRA)},
- { 0, } /* terminate list */
-};
-
-static struct rio_driver tsi57x_driver = {
- .name = "tsi57x",
- .id_table = tsi57x_id_table,
- .probe = tsi57x_probe,
- .remove = tsi57x_remove,
-};
-
-static int __init tsi57x_init(void)
-{
- return rio_register_driver(&tsi57x_driver);
-}
-
-static void __exit tsi57x_exit(void)
-{
- rio_unregister_driver(&tsi57x_driver);
-}
-
-device_initcall(tsi57x_init);
-module_exit(tsi57x_exit);
-
-MODULE_DESCRIPTION("IDT Tsi57x Serial RapidIO switch family driver");
-MODULE_AUTHOR("Integrated Device Technology, Inc.");
-MODULE_LICENSE("GPL");
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 86aa4141efa9..d2553970a67b 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -6014,9 +6014,8 @@ core_initcall(regulator_init);
static int regulator_late_cleanup(struct device *dev, void *data)
{
struct regulator_dev *rdev = dev_to_rdev(dev);
- const struct regulator_ops *ops = rdev->desc->ops;
struct regulation_constraints *c = rdev->constraints;
- int enabled, ret;
+ int ret;
if (c && c->always_on)
return 0;
@@ -6029,14 +6028,8 @@ static int regulator_late_cleanup(struct device *dev, void *data)
if (rdev->use_count)
goto unlock;
- /* If we can't read the status assume it's always on. */
- if (ops->is_enabled)
- enabled = ops->is_enabled(rdev);
- else
- enabled = 1;
-
- /* But if reading the status failed, assume that it's off. */
- if (enabled <= 0)
+ /* If reading the status failed, assume that it's off. */
+ if (_regulator_is_enabled(rdev) <= 0)
goto unlock;
if (have_full_constraints()) {
diff --git a/drivers/regulator/max20086-regulator.c b/drivers/regulator/max20086-regulator.c
index fbc56b043071..b8bf76c170fe 100644
--- a/drivers/regulator/max20086-regulator.c
+++ b/drivers/regulator/max20086-regulator.c
@@ -7,6 +7,7 @@
#include <linux/err.h>
#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/regmap.h>
@@ -140,7 +141,7 @@ static int max20086_parse_regulators_dt(struct max20086 *chip, bool *boot_on)
node = of_get_child_by_name(chip->dev->of_node, "regulators");
if (!node) {
dev_err(chip->dev, "regulators node not found\n");
- return PTR_ERR(node);
+ return -ENODEV;
}
for (i = 0; i < chip->info->num_outputs; ++i)
diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig
index f2e961f998ca..166019786653 100644
--- a/drivers/remoteproc/Kconfig
+++ b/drivers/remoteproc/Kconfig
@@ -180,6 +180,7 @@ config QCOM_Q6V5_ADSP
depends on RPMSG_QCOM_GLINK_SMEM || RPMSG_QCOM_GLINK_SMEM=n
depends on QCOM_SYSMON || QCOM_SYSMON=n
depends on RPMSG_QCOM_GLINK || RPMSG_QCOM_GLINK=n
+ depends on QCOM_AOSS_QMP || QCOM_AOSS_QMP=n
select MFD_SYSCON
select QCOM_PIL_INFO
select QCOM_MDT_LOADER
@@ -199,6 +200,7 @@ config QCOM_Q6V5_MSS
depends on RPMSG_QCOM_GLINK_SMEM || RPMSG_QCOM_GLINK_SMEM=n
depends on QCOM_SYSMON || QCOM_SYSMON=n
depends on RPMSG_QCOM_GLINK || RPMSG_QCOM_GLINK=n
+ depends on QCOM_AOSS_QMP || QCOM_AOSS_QMP=n
select MFD_SYSCON
select QCOM_MDT_LOADER
select QCOM_PIL_INFO
@@ -218,6 +220,7 @@ config QCOM_Q6V5_PAS
depends on RPMSG_QCOM_GLINK_SMEM || RPMSG_QCOM_GLINK_SMEM=n
depends on QCOM_SYSMON || QCOM_SYSMON=n
depends on RPMSG_QCOM_GLINK || RPMSG_QCOM_GLINK=n
+ depends on QCOM_AOSS_QMP || QCOM_AOSS_QMP=n
select MFD_SYSCON
select QCOM_PIL_INFO
select QCOM_MDT_LOADER
@@ -239,6 +242,7 @@ config QCOM_Q6V5_WCSS
depends on RPMSG_QCOM_GLINK_SMEM || RPMSG_QCOM_GLINK_SMEM=n
depends on QCOM_SYSMON || QCOM_SYSMON=n
depends on RPMSG_QCOM_GLINK || RPMSG_QCOM_GLINK=n
+ depends on QCOM_AOSS_QMP || QCOM_AOSS_QMP=n
select MFD_SYSCON
select QCOM_MDT_LOADER
select QCOM_PIL_INFO
@@ -283,6 +287,17 @@ config QCOM_WCNSS_PIL
verified and booted with the help of the Peripheral Authentication
System (PAS) in TrustZone.
+config RCAR_REMOTEPROC
+ tristate "Renesas R-Car Gen3 remoteproc support"
+ depends on ARCH_RENESAS || COMPILE_TEST
+ help
+ Say y here to support R-Car realtime processor via the
+ remote processor framework. An ELF firmware can be loaded
+ thanks to sysfs remoteproc entries. The remote processor
+ can be started and stopped.
+ This can be either built-in or a loadable module.
+ If compiled as module (M), the module name is rcar_rproc.
+
config ST_REMOTEPROC
tristate "ST remoteproc support"
depends on ARCH_STI
diff --git a/drivers/remoteproc/Makefile b/drivers/remoteproc/Makefile
index 0ac256b6c977..5478c7cb9e07 100644
--- a/drivers/remoteproc/Makefile
+++ b/drivers/remoteproc/Makefile
@@ -32,6 +32,7 @@ obj-$(CONFIG_QCOM_SYSMON) += qcom_sysmon.o
obj-$(CONFIG_QCOM_WCNSS_PIL) += qcom_wcnss_pil.o
qcom_wcnss_pil-y += qcom_wcnss.o
qcom_wcnss_pil-y += qcom_wcnss_iris.o
+obj-$(CONFIG_RCAR_REMOTEPROC) += rcar_rproc.o
obj-$(CONFIG_ST_REMOTEPROC) += st_remoteproc.o
obj-$(CONFIG_ST_SLIM_REMOTEPROC) += st_slim_rproc.o
obj-$(CONFIG_STM32_RPROC) += stm32_rproc.o
diff --git a/drivers/remoteproc/imx_rproc.c b/drivers/remoteproc/imx_rproc.c
index ff8170dbbc3c..7a096f1891e6 100644
--- a/drivers/remoteproc/imx_rproc.c
+++ b/drivers/remoteproc/imx_rproc.c
@@ -34,7 +34,8 @@
#define IMX7D_M4_START (IMX7D_ENABLE_M4 | IMX7D_SW_M4P_RST \
| IMX7D_SW_M4C_RST)
-#define IMX7D_M4_STOP IMX7D_SW_M4C_NON_SCLR_RST
+#define IMX7D_M4_STOP (IMX7D_ENABLE_M4 | IMX7D_SW_M4C_RST | \
+ IMX7D_SW_M4C_NON_SCLR_RST)
/* Address: 0x020D8000 */
#define IMX6SX_SRC_SCR 0x00
@@ -45,7 +46,8 @@
#define IMX6SX_M4_START (IMX6SX_ENABLE_M4 | IMX6SX_SW_M4P_RST \
| IMX6SX_SW_M4C_RST)
-#define IMX6SX_M4_STOP IMX6SX_SW_M4C_NON_SCLR_RST
+#define IMX6SX_M4_STOP (IMX6SX_ENABLE_M4 | IMX6SX_SW_M4C_RST | \
+ IMX6SX_SW_M4C_NON_SCLR_RST)
#define IMX6SX_M4_RST_MASK (IMX6SX_ENABLE_M4 | IMX6SX_SW_M4P_RST \
| IMX6SX_SW_M4C_NON_SCLR_RST \
| IMX6SX_SW_M4C_RST)
@@ -684,7 +686,7 @@ static int imx_rproc_detect_mode(struct imx_rproc *priv)
return ret;
}
- if (!(val & dcfg->src_stop))
+ if ((val & dcfg->src_mask) != dcfg->src_stop)
priv->rproc->state = RPROC_DETACHED;
return 0;
@@ -804,6 +806,7 @@ static int imx_rproc_remove(struct platform_device *pdev)
clk_disable_unprepare(priv->clk);
rproc_del(rproc);
imx_rproc_free_mbox(rproc);
+ destroy_workqueue(priv->workqueue);
rproc_free(rproc);
return 0;
diff --git a/drivers/remoteproc/ingenic_rproc.c b/drivers/remoteproc/ingenic_rproc.c
index a356738160a4..9902cce28692 100644
--- a/drivers/remoteproc/ingenic_rproc.c
+++ b/drivers/remoteproc/ingenic_rproc.c
@@ -218,14 +218,13 @@ static int ingenic_rproc_probe(struct platform_device *pdev)
if (vpu->irq < 0)
return vpu->irq;
- ret = devm_request_irq(dev, vpu->irq, vpu_interrupt, 0, "VPU", rproc);
+ ret = devm_request_irq(dev, vpu->irq, vpu_interrupt, IRQF_NO_AUTOEN,
+ "VPU", rproc);
if (ret < 0) {
dev_err(dev, "Failed to request IRQ\n");
return ret;
}
- disable_irq(vpu->irq);
-
ret = devm_rproc_add(dev, rproc);
if (ret) {
dev_err(dev, "Failed to register remote processor\n");
diff --git a/drivers/remoteproc/mtk_scp_ipi.c b/drivers/remoteproc/mtk_scp_ipi.c
index 6dc955ecab80..00f041ebcde6 100644
--- a/drivers/remoteproc/mtk_scp_ipi.c
+++ b/drivers/remoteproc/mtk_scp_ipi.c
@@ -23,7 +23,7 @@
*
* Register an ipi function to receive ipi interrupt from SCP.
*
- * Returns 0 if ipi registers successfully, -error on error.
+ * Return: 0 if ipi registers successfully, -error on error.
*/
int scp_ipi_register(struct mtk_scp *scp,
u32 id,
@@ -150,7 +150,7 @@ EXPORT_SYMBOL_GPL(scp_ipi_unlock);
* When the processing completes, IPI handler registered
* by scp_ipi_register will be called in interrupt context.
*
- * Returns 0 if sending data successfully, -error on error.
+ * Return: 0 if sending data successfully, -error on error.
**/
int scp_ipi_send(struct mtk_scp *scp, u32 id, void *buf, unsigned int len,
unsigned int wait)
diff --git a/drivers/remoteproc/qcom_pil_info.c b/drivers/remoteproc/qcom_pil_info.c
index 7c007dd7b200..aca21560e20b 100644
--- a/drivers/remoteproc/qcom_pil_info.c
+++ b/drivers/remoteproc/qcom_pil_info.c
@@ -104,7 +104,7 @@ int qcom_pil_info_store(const char *image, phys_addr_t base, size_t size)
return -ENOMEM;
found_unused:
- memcpy_toio(entry, image, PIL_RELOC_NAME_LEN);
+ memcpy_toio(entry, image, strnlen(image, PIL_RELOC_NAME_LEN));
found_existing:
/* Use two writel() as base is only aligned to 4 bytes on odd entries */
writel(base, entry + PIL_RELOC_NAME_LEN);
diff --git a/drivers/remoteproc/qcom_q6v5.c b/drivers/remoteproc/qcom_q6v5.c
index eada7e34f3af..442a388f8102 100644
--- a/drivers/remoteproc/qcom_q6v5.c
+++ b/drivers/remoteproc/qcom_q6v5.c
@@ -10,6 +10,7 @@
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/module.h>
+#include <linux/soc/qcom/qcom_aoss.h>
#include <linux/soc/qcom/smem.h>
#include <linux/soc/qcom/smem_state.h>
#include <linux/remoteproc.h>
diff --git a/drivers/remoteproc/qcom_q6v5_pas.c b/drivers/remoteproc/qcom_q6v5_pas.c
index 03857dc9cdc1..184bb7cdf95a 100644
--- a/drivers/remoteproc/qcom_q6v5_pas.c
+++ b/drivers/remoteproc/qcom_q6v5_pas.c
@@ -524,6 +524,23 @@ static const struct adsp_data sdm845_adsp_resource_init = {
.ssctl_id = 0x14,
};
+static const struct adsp_data sm6350_adsp_resource = {
+ .crash_reason_smem = 423,
+ .firmware_name = "adsp.mdt",
+ .pas_id = 1,
+ .has_aggre2_clk = false,
+ .auto_boot = true,
+ .proxy_pd_names = (char*[]){
+ "lcx",
+ "lmx",
+ NULL
+ },
+ .load_state = "adsp",
+ .ssr_name = "lpass",
+ .sysmon_name = "adsp",
+ .ssctl_id = 0x14,
+};
+
static const struct adsp_data sm8150_adsp_resource = {
.crash_reason_smem = 423,
.firmware_name = "adsp.mdt",
@@ -612,6 +629,23 @@ static const struct adsp_data sdm845_cdsp_resource_init = {
.ssctl_id = 0x17,
};
+static const struct adsp_data sm6350_cdsp_resource = {
+ .crash_reason_smem = 601,
+ .firmware_name = "cdsp.mdt",
+ .pas_id = 18,
+ .has_aggre2_clk = false,
+ .auto_boot = true,
+ .proxy_pd_names = (char*[]){
+ "cx",
+ "mx",
+ NULL
+ },
+ .load_state = "cdsp",
+ .ssr_name = "cdsp",
+ .sysmon_name = "cdsp",
+ .ssctl_id = 0x17,
+};
+
static const struct adsp_data sm8150_cdsp_resource = {
.crash_reason_smem = 601,
.firmware_name = "cdsp.mdt",
@@ -652,6 +686,7 @@ static const struct adsp_data sm8350_cdsp_resource = {
.auto_boot = true,
.proxy_pd_names = (char*[]){
"cx",
+ "mxc",
NULL
},
.load_state = "cdsp",
@@ -804,6 +839,9 @@ static const struct of_device_id adsp_of_match[] = {
{ .compatible = "qcom,sdm845-adsp-pas", .data = &sdm845_adsp_resource_init},
{ .compatible = "qcom,sdm845-cdsp-pas", .data = &sdm845_cdsp_resource_init},
{ .compatible = "qcom,sdx55-mpss-pas", .data = &sdx55_mpss_resource},
+ { .compatible = "qcom,sm6350-adsp-pas", .data = &sm6350_adsp_resource},
+ { .compatible = "qcom,sm6350-cdsp-pas", .data = &sm6350_cdsp_resource},
+ { .compatible = "qcom,sm6350-mpss-pas", .data = &mpss_resource_init},
{ .compatible = "qcom,sm8150-adsp-pas", .data = &sm8150_adsp_resource},
{ .compatible = "qcom,sm8150-cdsp-pas", .data = &sm8150_cdsp_resource},
{ .compatible = "qcom,sm8150-mpss-pas", .data = &mpss_resource_init},
diff --git a/drivers/remoteproc/rcar_rproc.c b/drivers/remoteproc/rcar_rproc.c
new file mode 100644
index 000000000000..aa86154109c7
--- /dev/null
+++ b/drivers/remoteproc/rcar_rproc.c
@@ -0,0 +1,224 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) IoT.bzh 2021
+ */
+
+#include <linux/limits.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/pm_runtime.h>
+#include <linux/remoteproc.h>
+#include <linux/reset.h>
+#include <linux/soc/renesas/rcar-rst.h>
+
+#include "remoteproc_internal.h"
+
+struct rcar_rproc {
+ struct reset_control *rst;
+};
+
+static int rcar_rproc_mem_alloc(struct rproc *rproc,
+ struct rproc_mem_entry *mem)
+{
+ struct device *dev = &rproc->dev;
+ void *va;
+
+ dev_dbg(dev, "map memory: %pa+%zx\n", &mem->dma, mem->len);
+ va = ioremap_wc(mem->dma, mem->len);
+ if (!va) {
+ dev_err(dev, "Unable to map memory region: %pa+%zx\n",
+ &mem->dma, mem->len);
+ return -ENOMEM;
+ }
+
+ /* Update memory entry va */
+ mem->va = va;
+
+ return 0;
+}
+
+static int rcar_rproc_mem_release(struct rproc *rproc,
+ struct rproc_mem_entry *mem)
+{
+ dev_dbg(&rproc->dev, "unmap memory: %pa\n", &mem->dma);
+ iounmap(mem->va);
+
+ return 0;
+}
+
+static int rcar_rproc_prepare(struct rproc *rproc)
+{
+ struct device *dev = rproc->dev.parent;
+ struct device_node *np = dev->of_node;
+ struct of_phandle_iterator it;
+ struct rproc_mem_entry *mem;
+ struct reserved_mem *rmem;
+ u32 da;
+
+ /* Register associated reserved memory regions */
+ of_phandle_iterator_init(&it, np, "memory-region", NULL, 0);
+ while (of_phandle_iterator_next(&it) == 0) {
+
+ rmem = of_reserved_mem_lookup(it.node);
+ if (!rmem) {
+ dev_err(&rproc->dev,
+ "unable to acquire memory-region\n");
+ return -EINVAL;
+ }
+
+ if (rmem->base > U32_MAX)
+ return -EINVAL;
+
+ /* No need to translate pa to da, R-Car use same map */
+ da = rmem->base;
+ mem = rproc_mem_entry_init(dev, NULL,
+ rmem->base,
+ rmem->size, da,
+ rcar_rproc_mem_alloc,
+ rcar_rproc_mem_release,
+ it.node->name);
+
+ if (!mem)
+ return -ENOMEM;
+
+ rproc_add_carveout(rproc, mem);
+ }
+
+ return 0;
+}
+
+static int rcar_rproc_parse_fw(struct rproc *rproc, const struct firmware *fw)
+{
+ int ret;
+
+ ret = rproc_elf_load_rsc_table(rproc, fw);
+ if (ret)
+ dev_info(&rproc->dev, "No resource table in elf\n");
+
+ return 0;
+}
+
+static int rcar_rproc_start(struct rproc *rproc)
+{
+ struct rcar_rproc *priv = rproc->priv;
+ int err;
+
+ if (!rproc->bootaddr)
+ return -EINVAL;
+
+ err = rcar_rst_set_rproc_boot_addr(rproc->bootaddr);
+ if (err) {
+ dev_err(&rproc->dev, "failed to set rproc boot addr\n");
+ return err;
+ }
+
+ err = reset_control_deassert(priv->rst);
+ if (err)
+ dev_err(&rproc->dev, "failed to deassert reset\n");
+
+ return err;
+}
+
+static int rcar_rproc_stop(struct rproc *rproc)
+{
+ struct rcar_rproc *priv = rproc->priv;
+ int err;
+
+ err = reset_control_assert(priv->rst);
+ if (err)
+ dev_err(&rproc->dev, "failed to assert reset\n");
+
+ return err;
+}
+
+static struct rproc_ops rcar_rproc_ops = {
+ .prepare = rcar_rproc_prepare,
+ .start = rcar_rproc_start,
+ .stop = rcar_rproc_stop,
+ .load = rproc_elf_load_segments,
+ .parse_fw = rcar_rproc_parse_fw,
+ .find_loaded_rsc_table = rproc_elf_find_loaded_rsc_table,
+ .sanity_check = rproc_elf_sanity_check,
+ .get_boot_addr = rproc_elf_get_boot_addr,
+
+};
+
+static int rcar_rproc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct rcar_rproc *priv;
+ struct rproc *rproc;
+ int ret;
+
+ rproc = devm_rproc_alloc(dev, np->name, &rcar_rproc_ops,
+ NULL, sizeof(*priv));
+ if (!rproc)
+ return -ENOMEM;
+
+ priv = rproc->priv;
+
+ priv->rst = devm_reset_control_get_exclusive(dev, NULL);
+ if (IS_ERR(priv->rst)) {
+ ret = PTR_ERR(priv->rst);
+ dev_err_probe(dev, ret, "fail to acquire rproc reset\n");
+ return ret;
+ }
+
+ pm_runtime_enable(dev);
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret) {
+ dev_err(dev, "failed to power up\n");
+ return ret;
+ }
+
+ dev_set_drvdata(dev, rproc);
+
+ /* Manually start the rproc */
+ rproc->auto_boot = false;
+
+ ret = devm_rproc_add(dev, rproc);
+ if (ret) {
+ dev_err(dev, "rproc_add failed\n");
+ goto pm_disable;
+ }
+
+ return 0;
+
+pm_disable:
+ pm_runtime_disable(dev);
+
+ return ret;
+}
+
+static int rcar_rproc_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+
+ pm_runtime_disable(dev);
+
+ return 0;
+}
+
+static const struct of_device_id rcar_rproc_of_match[] = {
+ { .compatible = "renesas,rcar-cr7" },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, rcar_rproc_of_match);
+
+static struct platform_driver rcar_rproc_driver = {
+ .probe = rcar_rproc_probe,
+ .remove = rcar_rproc_remove,
+ .driver = {
+ .name = "rcar-rproc",
+ .of_match_table = rcar_rproc_of_match,
+ },
+};
+
+module_platform_driver(rcar_rproc_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Renesas R-Car Gen3 remote processor control driver");
+MODULE_AUTHOR("Julien Massot <julien.massot@iot.bzh>");
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
index 775df165eb45..69f51acf235e 100644
--- a/drivers/remoteproc/remoteproc_core.c
+++ b/drivers/remoteproc/remoteproc_core.c
@@ -577,8 +577,8 @@ static int rproc_handle_vdev(struct rproc *rproc, void *ptr,
dma_get_mask(rproc->dev.parent));
if (ret) {
dev_warn(dev,
- "Failed to set DMA mask %llx. Trying to continue... %x\n",
- dma_get_mask(rproc->dev.parent), ret);
+ "Failed to set DMA mask %llx. Trying to continue... (%pe)\n",
+ dma_get_mask(rproc->dev.parent), ERR_PTR(ret));
}
/* parse the vrings */
diff --git a/drivers/remoteproc/remoteproc_coredump.c b/drivers/remoteproc/remoteproc_coredump.c
index c892f433a323..4b093420d98a 100644
--- a/drivers/remoteproc/remoteproc_coredump.c
+++ b/drivers/remoteproc/remoteproc_coredump.c
@@ -166,7 +166,7 @@ static void rproc_copy_segment(struct rproc *rproc, void *dest,
memset(dest, 0xff, size);
} else {
if (is_iomem)
- memcpy_fromio(dest, ptr, size);
+ memcpy_fromio(dest, (void const __iomem *)ptr, size);
else
memcpy(dest, ptr, size);
}
diff --git a/drivers/remoteproc/st_slim_rproc.c b/drivers/remoteproc/st_slim_rproc.c
index 22096adc1ad3..4ed9467897e5 100644
--- a/drivers/remoteproc/st_slim_rproc.c
+++ b/drivers/remoteproc/st_slim_rproc.c
@@ -216,7 +216,7 @@ static const struct rproc_ops slim_rproc_ops = {
* obtains and enables any clocks required by the SLIM core and also
* ioremaps the various IO.
*
- * Returns st_slim_rproc pointer or PTR_ERR() on error.
+ * Return: st_slim_rproc pointer or PTR_ERR() on error.
*/
struct st_slim_rproc *st_slim_rproc_alloc(struct platform_device *pdev,
diff --git a/drivers/remoteproc/stm32_rproc.c b/drivers/remoteproc/stm32_rproc.c
index b643efcf995a..7d782ed9e589 100644
--- a/drivers/remoteproc/stm32_rproc.c
+++ b/drivers/remoteproc/stm32_rproc.c
@@ -494,7 +494,7 @@ static int stm32_rproc_stop(struct rproc *rproc)
int err, idx;
/* request shutdown of the remote processor */
- if (rproc->state != RPROC_OFFLINE) {
+ if (rproc->state != RPROC_OFFLINE && rproc->state != RPROC_CRASHED) {
idx = stm32_rproc_mbox_idx(rproc, STM32_MBX_SHUTDOWN);
if (idx >= 0 && ddata->mb[idx].chan) {
err = mbox_send_message(ddata->mb[idx].chan, "detach");
diff --git a/drivers/remoteproc/ti_k3_dsp_remoteproc.c b/drivers/remoteproc/ti_k3_dsp_remoteproc.c
index c352fa277c8d..939c5d90b562 100644
--- a/drivers/remoteproc/ti_k3_dsp_remoteproc.c
+++ b/drivers/remoteproc/ti_k3_dsp_remoteproc.c
@@ -767,6 +767,7 @@ static const struct k3_dsp_dev_data c71_data = {
static const struct of_device_id k3_dsp_of_match[] = {
{ .compatible = "ti,j721e-c66-dsp", .data = &c66_data, },
{ .compatible = "ti,j721e-c71-dsp", .data = &c71_data, },
+ { .compatible = "ti,j721s2-c71-dsp", .data = &c71_data, },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, k3_dsp_of_match);
diff --git a/drivers/remoteproc/ti_k3_r5_remoteproc.c b/drivers/remoteproc/ti_k3_r5_remoteproc.c
index 6499302d00c3..969531c05b13 100644
--- a/drivers/remoteproc/ti_k3_r5_remoteproc.c
+++ b/drivers/remoteproc/ti_k3_r5_remoteproc.c
@@ -1535,7 +1535,7 @@ static const struct k3_r5_soc_data am65_j721e_soc_data = {
.single_cpu_mode = false,
};
-static const struct k3_r5_soc_data j7200_soc_data = {
+static const struct k3_r5_soc_data j7200_j721s2_soc_data = {
.tcm_is_double = true,
.tcm_ecc_autoinit = true,
.single_cpu_mode = false,
@@ -1550,8 +1550,9 @@ static const struct k3_r5_soc_data am64_soc_data = {
static const struct of_device_id k3_r5_of_match[] = {
{ .compatible = "ti,am654-r5fss", .data = &am65_j721e_soc_data, },
{ .compatible = "ti,j721e-r5fss", .data = &am65_j721e_soc_data, },
- { .compatible = "ti,j7200-r5fss", .data = &j7200_soc_data, },
+ { .compatible = "ti,j7200-r5fss", .data = &j7200_j721s2_soc_data, },
{ .compatible = "ti,am64-r5fss", .data = &am64_soc_data, },
+ { .compatible = "ti,j721s2-r5fss", .data = &j7200_j721s2_soc_data, },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, k3_r5_of_match);
diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c
index 3f377a795b33..1030cfa80e04 100644
--- a/drivers/rpmsg/qcom_glink_native.c
+++ b/drivers/rpmsg/qcom_glink_native.c
@@ -427,7 +427,7 @@ static void qcom_glink_handle_intent_req_ack(struct qcom_glink *glink,
* Allocates a local channel id and sends a RPM_CMD_OPEN message to the remote.
* Will return with refcount held, regardless of outcome.
*
- * Returns 0 on success, negative errno otherwise.
+ * Return: 0 on success, negative errno otherwise.
*/
static int qcom_glink_send_open_req(struct qcom_glink *glink,
struct glink_channel *channel)
diff --git a/drivers/rpmsg/qcom_smd.c b/drivers/rpmsg/qcom_smd.c
index 8da1b5cb31b3..540e027f08c4 100644
--- a/drivers/rpmsg/qcom_smd.c
+++ b/drivers/rpmsg/qcom_smd.c
@@ -1467,7 +1467,7 @@ ATTRIBUTE_GROUPS(qcom_smd_edge);
* @parent: parent device for the edge
* @node: device_node describing the edge
*
- * Returns an edge reference, or negative ERR_PTR() on failure.
+ * Return: an edge reference, or negative ERR_PTR() on failure.
*/
struct qcom_smd_edge *qcom_smd_register_edge(struct device *parent,
struct device_node *node)
diff --git a/drivers/rpmsg/rpmsg_char.c b/drivers/rpmsg/rpmsg_char.c
index b5907b80727c..5663cf799c95 100644
--- a/drivers/rpmsg/rpmsg_char.c
+++ b/drivers/rpmsg/rpmsg_char.c
@@ -9,6 +9,9 @@
* Based on rpmsg performance statistics driver by Michal Simek, which in turn
* was based on TI & Google OMX rpmsg driver.
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/cdev.h>
#include <linux/device.h>
#include <linux/fs.h>
@@ -90,7 +93,7 @@ static int rpmsg_eptdev_destroy(struct device *dev, void *data)
/* wake up any blocked readers */
wake_up_interruptible(&eptdev->readq);
- device_del(&eptdev->dev);
+ cdev_device_del(&eptdev->cdev, &eptdev->dev);
put_device(&eptdev->dev);
return 0;
@@ -333,7 +336,6 @@ static void rpmsg_eptdev_release_device(struct device *dev)
ida_simple_remove(&rpmsg_ept_ida, dev->id);
ida_simple_remove(&rpmsg_minor_ida, MINOR(eptdev->dev.devt));
- cdev_del(&eptdev->cdev);
kfree(eptdev);
}
@@ -378,19 +380,13 @@ static int rpmsg_eptdev_create(struct rpmsg_ctrldev *ctrldev,
dev->id = ret;
dev_set_name(dev, "rpmsg%d", ret);
- ret = cdev_add(&eptdev->cdev, dev->devt, 1);
+ ret = cdev_device_add(&eptdev->cdev, &eptdev->dev);
if (ret)
goto free_ept_ida;
/* We can now rely on the release function for cleanup */
dev->release = rpmsg_eptdev_release_device;
- ret = device_add(dev);
- if (ret) {
- dev_err(dev, "device_add failed: %d\n", ret);
- put_device(dev);
- }
-
return ret;
free_ept_ida:
@@ -459,7 +455,6 @@ static void rpmsg_ctrldev_release_device(struct device *dev)
ida_simple_remove(&rpmsg_ctrl_ida, dev->id);
ida_simple_remove(&rpmsg_minor_ida, MINOR(dev->devt));
- cdev_del(&ctrldev->cdev);
kfree(ctrldev);
}
@@ -494,19 +489,13 @@ static int rpmsg_chrdev_probe(struct rpmsg_device *rpdev)
dev->id = ret;
dev_set_name(&ctrldev->dev, "rpmsg_ctrl%d", ret);
- ret = cdev_add(&ctrldev->cdev, dev->devt, 1);
+ ret = cdev_device_add(&ctrldev->cdev, &ctrldev->dev);
if (ret)
goto free_ctrl_ida;
/* We can now rely on the release function for cleanup */
dev->release = rpmsg_ctrldev_release_device;
- ret = device_add(dev);
- if (ret) {
- dev_err(&rpdev->dev, "device_add failed: %d\n", ret);
- put_device(dev);
- }
-
dev_set_drvdata(&rpdev->dev, ctrldev);
return ret;
@@ -532,7 +521,7 @@ static void rpmsg_chrdev_remove(struct rpmsg_device *rpdev)
if (ret)
dev_warn(&rpdev->dev, "failed to nuke endpoints: %d\n", ret);
- device_del(&ctrldev->dev);
+ cdev_device_del(&ctrldev->cdev, &ctrldev->dev);
put_device(&ctrldev->dev);
}
@@ -550,7 +539,7 @@ static int rpmsg_chrdev_init(void)
ret = alloc_chrdev_region(&rpmsg_major, 0, RPMSG_DEV_MAX, "rpmsg");
if (ret < 0) {
- pr_err("rpmsg: failed to allocate char dev region\n");
+ pr_err("failed to allocate char dev region\n");
return ret;
}
@@ -563,7 +552,7 @@ static int rpmsg_chrdev_init(void)
ret = register_rpmsg_driver(&rpmsg_chrdev_driver);
if (ret < 0) {
- pr_err("rpmsgchr: failed to register rpmsg driver\n");
+ pr_err("failed to register rpmsg driver\n");
class_destroy(rpmsg_class);
unregister_chrdev_region(rpmsg_major, RPMSG_DEV_MAX);
}
diff --git a/drivers/rpmsg/rpmsg_core.c b/drivers/rpmsg/rpmsg_core.c
index d3eb60059ef1..d9e612f4f0f2 100644
--- a/drivers/rpmsg/rpmsg_core.c
+++ b/drivers/rpmsg/rpmsg_core.c
@@ -26,7 +26,7 @@
* @rpdev: rpmsg device
* @chinfo: channel_info to bind
*
- * Returns a pointer to the new rpmsg device on success, or NULL on error.
+ * Return: a pointer to the new rpmsg device on success, or NULL on error.
*/
struct rpmsg_device *rpmsg_create_channel(struct rpmsg_device *rpdev,
struct rpmsg_channel_info *chinfo)
@@ -48,7 +48,7 @@ EXPORT_SYMBOL(rpmsg_create_channel);
* @rpdev: rpmsg device
* @chinfo: channel_info to bind
*
- * Returns 0 on success or an appropriate error value.
+ * Return: 0 on success or an appropriate error value.
*/
int rpmsg_release_channel(struct rpmsg_device *rpdev,
struct rpmsg_channel_info *chinfo)
@@ -102,7 +102,7 @@ EXPORT_SYMBOL(rpmsg_release_channel);
* dynamically assign them an available rpmsg address (drivers should have
* a very good reason why not to always use RPMSG_ADDR_ANY here).
*
- * Returns a pointer to the endpoint on success, or NULL on error.
+ * Return: a pointer to the endpoint on success, or NULL on error.
*/
struct rpmsg_endpoint *rpmsg_create_ept(struct rpmsg_device *rpdev,
rpmsg_rx_cb_t cb, void *priv,
@@ -146,7 +146,7 @@ EXPORT_SYMBOL(rpmsg_destroy_ept);
*
* Can only be called from process context (for now).
*
- * Returns 0 on success and an appropriate error value on failure.
+ * Return: 0 on success and an appropriate error value on failure.
*/
int rpmsg_send(struct rpmsg_endpoint *ept, void *data, int len)
{
@@ -175,7 +175,7 @@ EXPORT_SYMBOL(rpmsg_send);
*
* Can only be called from process context (for now).
*
- * Returns 0 on success and an appropriate error value on failure.
+ * Return: 0 on success and an appropriate error value on failure.
*/
int rpmsg_sendto(struct rpmsg_endpoint *ept, void *data, int len, u32 dst)
{
@@ -206,7 +206,7 @@ EXPORT_SYMBOL(rpmsg_sendto);
*
* Can only be called from process context (for now).
*
- * Returns 0 on success and an appropriate error value on failure.
+ * Return: 0 on success and an appropriate error value on failure.
*/
int rpmsg_send_offchannel(struct rpmsg_endpoint *ept, u32 src, u32 dst,
void *data, int len)
@@ -235,7 +235,7 @@ EXPORT_SYMBOL(rpmsg_send_offchannel);
*
* Can only be called from process context (for now).
*
- * Returns 0 on success and an appropriate error value on failure.
+ * Return: 0 on success and an appropriate error value on failure.
*/
int rpmsg_trysend(struct rpmsg_endpoint *ept, void *data, int len)
{
@@ -263,7 +263,7 @@ EXPORT_SYMBOL(rpmsg_trysend);
*
* Can only be called from process context (for now).
*
- * Returns 0 on success and an appropriate error value on failure.
+ * Return: 0 on success and an appropriate error value on failure.
*/
int rpmsg_trysendto(struct rpmsg_endpoint *ept, void *data, int len, u32 dst)
{
@@ -282,7 +282,7 @@ EXPORT_SYMBOL(rpmsg_trysendto);
* @filp: file for poll_wait()
* @wait: poll_table for poll_wait()
*
- * Returns mask representing the current state of the endpoint's send buffers
+ * Return: mask representing the current state of the endpoint's send buffers
*/
__poll_t rpmsg_poll(struct rpmsg_endpoint *ept, struct file *filp,
poll_table *wait)
@@ -313,7 +313,7 @@ EXPORT_SYMBOL(rpmsg_poll);
*
* Can only be called from process context (for now).
*
- * Returns 0 on success and an appropriate error value on failure.
+ * Return: 0 on success and an appropriate error value on failure.
*/
int rpmsg_trysend_offchannel(struct rpmsg_endpoint *ept, u32 src, u32 dst,
void *data, int len)
@@ -540,13 +540,25 @@ static int rpmsg_dev_probe(struct device *dev)
err = rpdrv->probe(rpdev);
if (err) {
dev_err(dev, "%s: failed: %d\n", __func__, err);
- if (ept)
- rpmsg_destroy_ept(ept);
- goto out;
+ goto destroy_ept;
}
- if (ept && rpdev->ops->announce_create)
+ if (ept && rpdev->ops->announce_create) {
err = rpdev->ops->announce_create(rpdev);
+ if (err) {
+ dev_err(dev, "failed to announce creation\n");
+ goto remove_rpdev;
+ }
+ }
+
+ return 0;
+
+remove_rpdev:
+ if (rpdrv->remove)
+ rpdrv->remove(rpdev);
+destroy_ept:
+ if (ept)
+ rpmsg_destroy_ept(ept);
out:
return err;
}
@@ -623,7 +635,7 @@ EXPORT_SYMBOL(rpmsg_unregister_device);
* @rpdrv: pointer to a struct rpmsg_driver
* @owner: owning module/driver
*
- * Returns 0 on success, and an appropriate error value on failure.
+ * Return: 0 on success, and an appropriate error value on failure.
*/
int __register_rpmsg_driver(struct rpmsg_driver *rpdrv, struct module *owner)
{
@@ -637,7 +649,7 @@ EXPORT_SYMBOL(__register_rpmsg_driver);
* unregister_rpmsg_driver() - unregister an rpmsg driver from the rpmsg bus
* @rpdrv: pointer to a struct rpmsg_driver
*
- * Returns 0 on success, and an appropriate error value on failure.
+ * Return: 0 on success, and an appropriate error value on failure.
*/
void unregister_rpmsg_driver(struct rpmsg_driver *rpdrv)
{
diff --git a/drivers/rpmsg/virtio_rpmsg_bus.c b/drivers/rpmsg/virtio_rpmsg_bus.c
index 9c112aa65040..ac764e04c898 100644
--- a/drivers/rpmsg/virtio_rpmsg_bus.c
+++ b/drivers/rpmsg/virtio_rpmsg_bus.c
@@ -547,7 +547,7 @@ static void rpmsg_downref_sleepers(struct virtproc_info *vrp)
* should use the appropriate rpmsg_{try}send{to, _offchannel} API
* (see include/linux/rpmsg.h).
*
- * Returns 0 on success and an appropriate error value on failure.
+ * Return: 0 on success and an appropriate error value on failure.
*/
static int rpmsg_send_offchannel_raw(struct rpmsg_device *rpdev,
u32 src, u32 dst,
@@ -1024,7 +1024,7 @@ static void rpmsg_remove(struct virtio_device *vdev)
size_t total_buf_space = vrp->num_bufs * vrp->buf_size;
int ret;
- vdev->config->reset(vdev);
+ virtio_reset_device(vdev);
ret = device_for_each_child(&vdev->dev, NULL, rpmsg_remove_device);
if (ret)
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 058e56a10ab8..d85a3c31347c 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -1216,6 +1216,17 @@ config RTC_DRV_V3020
This driver can also be built as a module. If so, the module
will be called rtc-v3020.
+config RTC_DRV_GAMECUBE
+ tristate "Nintendo GameCube, Wii and Wii U RTC"
+ depends on GAMECUBE || WII || COMPILE_TEST
+ select REGMAP
+ help
+ If you say yes here you will get support for the RTC subsystem
+ of the Nintendo GameCube, Wii and Wii U.
+
+ This driver can also be built as a module. If so, the module
+ will be called "rtc-gamecube".
+
config RTC_DRV_WM831X
tristate "Wolfson Microelectronics WM831x RTC"
depends on MFD_WM831X
@@ -1444,6 +1455,19 @@ config RTC_DRV_SH
To compile this driver as a module, choose M here: the
module will be called rtc-sh.
+config RTC_DRV_SUNPLUS
+ tristate "Sunplus SP7021 RTC"
+ depends on SOC_SP7021
+ help
+ Say 'yes' to get support for the real-time clock present in
+ Sunplus SP7021 - a SoC for industrial applications. It provides
+ RTC status check, timer/alarm functionalities, user data
+ reservation with the battery over 2.5V, RTC power status check
+ and battery charge.
+
+ This driver can also be built as a module. If so, the module
+ will be called rtc-sunplus.
+
config RTC_DRV_VR41XX
tristate "NEC VR41XX"
depends on CPU_VR41XX || COMPILE_TEST
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 678a8ef4abae..e92f3e943245 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -111,6 +111,7 @@ obj-$(CONFIG_RTC_DRV_MT7622) += rtc-mt7622.o
obj-$(CONFIG_RTC_DRV_MV) += rtc-mv.o
obj-$(CONFIG_RTC_DRV_MXC) += rtc-mxc.o
obj-$(CONFIG_RTC_DRV_MXC_V2) += rtc-mxc_v2.o
+obj-$(CONFIG_RTC_DRV_GAMECUBE) += rtc-gamecube.o
obj-$(CONFIG_RTC_DRV_NTXEC) += rtc-ntxec.o
obj-$(CONFIG_RTC_DRV_OMAP) += rtc-omap.o
obj-$(CONFIG_RTC_DRV_OPAL) += rtc-opal.o
@@ -165,6 +166,7 @@ obj-$(CONFIG_RTC_DRV_STM32) += rtc-stm32.o
obj-$(CONFIG_RTC_DRV_STMP) += rtc-stmp3xxx.o
obj-$(CONFIG_RTC_DRV_SUN4V) += rtc-sun4v.o
obj-$(CONFIG_RTC_DRV_SUN6I) += rtc-sun6i.o
+obj-$(CONFIG_RTC_DRV_SUNPLUS) += rtc-sunplus.o
obj-$(CONFIG_RTC_DRV_SUNXI) += rtc-sunxi.o
obj-$(CONFIG_RTC_DRV_TEGRA) += rtc-tegra.o
obj-$(CONFIG_RTC_DRV_TEST) += rtc-test.o
diff --git a/drivers/rtc/dev.c b/drivers/rtc/dev.c
index e104972a28fd..69325aeede1a 100644
--- a/drivers/rtc/dev.c
+++ b/drivers/rtc/dev.c
@@ -391,14 +391,14 @@ static long rtc_dev_ioctl(struct file *file,
}
switch(param.param) {
- long offset;
case RTC_PARAM_FEATURES:
if (param.index != 0)
err = -EINVAL;
param.uvalue = rtc->features[0];
break;
- case RTC_PARAM_CORRECTION:
+ case RTC_PARAM_CORRECTION: {
+ long offset;
mutex_unlock(&rtc->ops_lock);
if (param.index != 0)
return -EINVAL;
@@ -407,7 +407,7 @@ static long rtc_dev_ioctl(struct file *file,
if (err == 0)
param.svalue = offset;
break;
-
+ }
default:
if (rtc->ops->param_get)
err = rtc->ops->param_get(rtc->dev.parent, &param);
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index 4eb53412b808..7c006c2b125f 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -222,6 +222,8 @@ static inline void cmos_write_bank2(unsigned char val, unsigned char addr)
static int cmos_read_time(struct device *dev, struct rtc_time *t)
{
+ int ret;
+
/*
* If pm_trace abused the RTC for storage, set the timespec to 0,
* which tells the caller that this RTC value is unusable.
@@ -229,7 +231,12 @@ static int cmos_read_time(struct device *dev, struct rtc_time *t)
if (!pm_trace_rtc_valid())
return -EIO;
- mc146818_get_time(t);
+ ret = mc146818_get_time(t);
+ if (ret < 0) {
+ dev_err_ratelimited(dev, "unable to read current time\n");
+ return ret;
+ }
+
return 0;
}
@@ -242,10 +249,46 @@ static int cmos_set_time(struct device *dev, struct rtc_time *t)
return mc146818_set_time(t);
}
+struct cmos_read_alarm_callback_param {
+ struct cmos_rtc *cmos;
+ struct rtc_time *time;
+ unsigned char rtc_control;
+};
+
+static void cmos_read_alarm_callback(unsigned char __always_unused seconds,
+ void *param_in)
+{
+ struct cmos_read_alarm_callback_param *p =
+ (struct cmos_read_alarm_callback_param *)param_in;
+ struct rtc_time *time = p->time;
+
+ time->tm_sec = CMOS_READ(RTC_SECONDS_ALARM);
+ time->tm_min = CMOS_READ(RTC_MINUTES_ALARM);
+ time->tm_hour = CMOS_READ(RTC_HOURS_ALARM);
+
+ if (p->cmos->day_alrm) {
+ /* ignore upper bits on readback per ACPI spec */
+ time->tm_mday = CMOS_READ(p->cmos->day_alrm) & 0x3f;
+ if (!time->tm_mday)
+ time->tm_mday = -1;
+
+ if (p->cmos->mon_alrm) {
+ time->tm_mon = CMOS_READ(p->cmos->mon_alrm);
+ if (!time->tm_mon)
+ time->tm_mon = -1;
+ }
+ }
+
+ p->rtc_control = CMOS_READ(RTC_CONTROL);
+}
+
static int cmos_read_alarm(struct device *dev, struct rtc_wkalrm *t)
{
struct cmos_rtc *cmos = dev_get_drvdata(dev);
- unsigned char rtc_control;
+ struct cmos_read_alarm_callback_param p = {
+ .cmos = cmos,
+ .time = &t->time,
+ };
/* This not only a rtc_op, but also called directly */
if (!is_valid_irq(cmos->irq))
@@ -256,28 +299,18 @@ static int cmos_read_alarm(struct device *dev, struct rtc_wkalrm *t)
* the future.
*/
- spin_lock_irq(&rtc_lock);
- t->time.tm_sec = CMOS_READ(RTC_SECONDS_ALARM);
- t->time.tm_min = CMOS_READ(RTC_MINUTES_ALARM);
- t->time.tm_hour = CMOS_READ(RTC_HOURS_ALARM);
-
- if (cmos->day_alrm) {
- /* ignore upper bits on readback per ACPI spec */
- t->time.tm_mday = CMOS_READ(cmos->day_alrm) & 0x3f;
- if (!t->time.tm_mday)
- t->time.tm_mday = -1;
-
- if (cmos->mon_alrm) {
- t->time.tm_mon = CMOS_READ(cmos->mon_alrm);
- if (!t->time.tm_mon)
- t->time.tm_mon = -1;
- }
- }
-
- rtc_control = CMOS_READ(RTC_CONTROL);
- spin_unlock_irq(&rtc_lock);
+ /* Some Intel chipsets disconnect the alarm registers when the clock
+ * update is in progress - during this time reads return bogus values
+ * and writes may fail silently. See for example "7th Generation Intel®
+ * Processor Family I/O for U/Y Platforms [...] Datasheet", section
+ * 27.7.1
+ *
+ * Use the mc146818_avoid_UIP() function to avoid this.
+ */
+ if (!mc146818_avoid_UIP(cmos_read_alarm_callback, &p))
+ return -EIO;
- if (!(rtc_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
+ if (!(p.rtc_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
if (((unsigned)t->time.tm_sec) < 0x60)
t->time.tm_sec = bcd2bin(t->time.tm_sec);
else
@@ -306,7 +339,7 @@ static int cmos_read_alarm(struct device *dev, struct rtc_wkalrm *t)
}
}
- t->enabled = !!(rtc_control & RTC_AIE);
+ t->enabled = !!(p.rtc_control & RTC_AIE);
t->pending = 0;
return 0;
@@ -437,10 +470,57 @@ static int cmos_validate_alarm(struct device *dev, struct rtc_wkalrm *t)
return 0;
}
+struct cmos_set_alarm_callback_param {
+ struct cmos_rtc *cmos;
+ unsigned char mon, mday, hrs, min, sec;
+ struct rtc_wkalrm *t;
+};
+
+/* Note: this function may be executed by mc146818_avoid_UIP() more then
+ * once
+ */
+static void cmos_set_alarm_callback(unsigned char __always_unused seconds,
+ void *param_in)
+{
+ struct cmos_set_alarm_callback_param *p =
+ (struct cmos_set_alarm_callback_param *)param_in;
+
+ /* next rtc irq must not be from previous alarm setting */
+ cmos_irq_disable(p->cmos, RTC_AIE);
+
+ /* update alarm */
+ CMOS_WRITE(p->hrs, RTC_HOURS_ALARM);
+ CMOS_WRITE(p->min, RTC_MINUTES_ALARM);
+ CMOS_WRITE(p->sec, RTC_SECONDS_ALARM);
+
+ /* the system may support an "enhanced" alarm */
+ if (p->cmos->day_alrm) {
+ CMOS_WRITE(p->mday, p->cmos->day_alrm);
+ if (p->cmos->mon_alrm)
+ CMOS_WRITE(p->mon, p->cmos->mon_alrm);
+ }
+
+ if (use_hpet_alarm()) {
+ /*
+ * FIXME the HPET alarm glue currently ignores day_alrm
+ * and mon_alrm ...
+ */
+ hpet_set_alarm_time(p->t->time.tm_hour, p->t->time.tm_min,
+ p->t->time.tm_sec);
+ }
+
+ if (p->t->enabled)
+ cmos_irq_enable(p->cmos, RTC_AIE);
+}
+
static int cmos_set_alarm(struct device *dev, struct rtc_wkalrm *t)
{
struct cmos_rtc *cmos = dev_get_drvdata(dev);
- unsigned char mon, mday, hrs, min, sec, rtc_control;
+ struct cmos_set_alarm_callback_param p = {
+ .cmos = cmos,
+ .t = t
+ };
+ unsigned char rtc_control;
int ret;
/* This not only a rtc_op, but also called directly */
@@ -451,52 +531,33 @@ static int cmos_set_alarm(struct device *dev, struct rtc_wkalrm *t)
if (ret < 0)
return ret;
- mon = t->time.tm_mon + 1;
- mday = t->time.tm_mday;
- hrs = t->time.tm_hour;
- min = t->time.tm_min;
- sec = t->time.tm_sec;
+ p.mon = t->time.tm_mon + 1;
+ p.mday = t->time.tm_mday;
+ p.hrs = t->time.tm_hour;
+ p.min = t->time.tm_min;
+ p.sec = t->time.tm_sec;
+ spin_lock_irq(&rtc_lock);
rtc_control = CMOS_READ(RTC_CONTROL);
+ spin_unlock_irq(&rtc_lock);
+
if (!(rtc_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
/* Writing 0xff means "don't care" or "match all". */
- mon = (mon <= 12) ? bin2bcd(mon) : 0xff;
- mday = (mday >= 1 && mday <= 31) ? bin2bcd(mday) : 0xff;
- hrs = (hrs < 24) ? bin2bcd(hrs) : 0xff;
- min = (min < 60) ? bin2bcd(min) : 0xff;
- sec = (sec < 60) ? bin2bcd(sec) : 0xff;
- }
-
- spin_lock_irq(&rtc_lock);
-
- /* next rtc irq must not be from previous alarm setting */
- cmos_irq_disable(cmos, RTC_AIE);
-
- /* update alarm */
- CMOS_WRITE(hrs, RTC_HOURS_ALARM);
- CMOS_WRITE(min, RTC_MINUTES_ALARM);
- CMOS_WRITE(sec, RTC_SECONDS_ALARM);
-
- /* the system may support an "enhanced" alarm */
- if (cmos->day_alrm) {
- CMOS_WRITE(mday, cmos->day_alrm);
- if (cmos->mon_alrm)
- CMOS_WRITE(mon, cmos->mon_alrm);
+ p.mon = (p.mon <= 12) ? bin2bcd(p.mon) : 0xff;
+ p.mday = (p.mday >= 1 && p.mday <= 31) ? bin2bcd(p.mday) : 0xff;
+ p.hrs = (p.hrs < 24) ? bin2bcd(p.hrs) : 0xff;
+ p.min = (p.min < 60) ? bin2bcd(p.min) : 0xff;
+ p.sec = (p.sec < 60) ? bin2bcd(p.sec) : 0xff;
}
- if (use_hpet_alarm()) {
- /*
- * FIXME the HPET alarm glue currently ignores day_alrm
- * and mon_alrm ...
- */
- hpet_set_alarm_time(t->time.tm_hour, t->time.tm_min,
- t->time.tm_sec);
- }
-
- if (t->enabled)
- cmos_irq_enable(cmos, RTC_AIE);
-
- spin_unlock_irq(&rtc_lock);
+ /*
+ * Some Intel chipsets disconnect the alarm registers when the clock
+ * update is in progress - during this time writes fail silently.
+ *
+ * Use mc146818_avoid_UIP() to avoid this.
+ */
+ if (!mc146818_avoid_UIP(cmos_set_alarm_callback, &p))
+ return -EIO;
cmos->alarm_expires = rtc_tm_to_time64(&t->time);
@@ -790,16 +851,14 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
rename_region(ports, dev_name(&cmos_rtc.rtc->dev));
- spin_lock_irq(&rtc_lock);
-
- /* Ensure that the RTC is accessible. Bit 6 must be 0! */
- if ((CMOS_READ(RTC_VALID) & 0x40) != 0) {
- spin_unlock_irq(&rtc_lock);
- dev_warn(dev, "not accessible\n");
+ if (!mc146818_does_rtc_work()) {
+ dev_warn(dev, "broken or not accessible\n");
retval = -ENXIO;
goto cleanup1;
}
+ spin_lock_irq(&rtc_lock);
+
if (!(flags & CMOS_RTC_FLAGS_NOFREQ)) {
/* force periodic irq to CMOS reset default of 1024Hz;
*
diff --git a/drivers/rtc/rtc-da9063.c b/drivers/rtc/rtc-da9063.c
index d4b72a9fa2ba..ee2efb496174 100644
--- a/drivers/rtc/rtc-da9063.c
+++ b/drivers/rtc/rtc-da9063.c
@@ -475,12 +475,14 @@ static int da9063_rtc_probe(struct platform_device *pdev)
da9063_data_to_tm(data, &rtc->alarm_time, rtc);
rtc->rtc_sync = false;
- /*
- * TODO: some models have alarms on a minute boundary but still support
- * real hardware interrupts. Add this once the core supports it.
- */
- if (config->rtc_data_start != RTC_SEC)
- rtc->rtc_dev->uie_unsupported = 1;
+ if (config->rtc_data_start != RTC_SEC) {
+ set_bit(RTC_FEATURE_ALARM_RES_MINUTE, rtc->rtc_dev->features);
+ /*
+ * TODO: some models have alarms on a minute boundary but still
+ * support real hardware interrupts.
+ */
+ clear_bit(RTC_FEATURE_UPDATE_INTERRUPT, rtc->rtc_dev->features);
+ }
irq_alarm = platform_get_irq_byname(pdev, "ALARM");
if (irq_alarm < 0)
@@ -494,6 +496,8 @@ static int da9063_rtc_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "Failed to request ALARM IRQ %d: %d\n",
irq_alarm, ret);
+ device_init_wakeup(&pdev->dev, true);
+
return devm_rtc_register_device(rtc->rtc_dev);
}
diff --git a/drivers/rtc/rtc-ftrtc010.c b/drivers/rtc/rtc-ftrtc010.c
index ad3add5db4c8..53bb08fe1cd4 100644
--- a/drivers/rtc/rtc-ftrtc010.c
+++ b/drivers/rtc/rtc-ftrtc010.c
@@ -141,11 +141,9 @@ static int ftrtc010_rtc_probe(struct platform_device *pdev)
}
}
- res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!res)
- return -ENODEV;
-
- rtc->rtc_irq = res->start;
+ rtc->rtc_irq = platform_get_irq(pdev, 0);
+ if (rtc->rtc_irq < 0)
+ return rtc->rtc_irq;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
diff --git a/drivers/rtc/rtc-gamecube.c b/drivers/rtc/rtc-gamecube.c
new file mode 100644
index 000000000000..f717b36f4738
--- /dev/null
+++ b/drivers/rtc/rtc-gamecube.c
@@ -0,0 +1,377 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Nintendo GameCube, Wii and Wii U RTC driver
+ *
+ * This driver is for the MX23L4005, more specifically its real-time clock and
+ * SRAM storage. The value returned by the RTC counter must be added with the
+ * offset stored in a bias register in SRAM (on the GameCube and Wii) or in
+ * /config/rtc.xml (on the Wii U). The latter being very impractical to access
+ * from Linux, this driver assumes the bootloader has read it and stored it in
+ * SRAM like for the other two consoles.
+ *
+ * This device sits on a bus named EXI (which is similar to SPI), channel 0,
+ * device 1. This driver assumes no other user of the EXI bus, which is
+ * currently the case but would have to be reworked to add support for other
+ * GameCube hardware exposed on this bus.
+ *
+ * References:
+ * - https://wiiubrew.org/wiki/Hardware/RTC
+ * - https://wiibrew.org/wiki/MX23L4005
+ *
+ * Copyright (C) 2018 rw-r-r-0644
+ * Copyright (C) 2021 Emmanuel Gil Peyrot <linkmauve@linkmauve.fr>
+ *
+ * Based on rtc-gcn.c
+ * Copyright (C) 2004-2009 The GameCube Linux Team
+ * Copyright (C) 2005,2008,2009 Albert Herranz
+ * Based on gamecube_time.c from Torben Nielsen.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/rtc.h>
+#include <linux/time.h>
+
+/* EXI registers */
+#define EXICSR 0
+#define EXICR 12
+#define EXIDATA 16
+
+/* EXI register values */
+#define EXICSR_DEV 0x380
+ #define EXICSR_DEV1 0x100
+#define EXICSR_CLK 0x070
+ #define EXICSR_CLK_1MHZ 0x000
+ #define EXICSR_CLK_2MHZ 0x010
+ #define EXICSR_CLK_4MHZ 0x020
+ #define EXICSR_CLK_8MHZ 0x030
+ #define EXICSR_CLK_16MHZ 0x040
+ #define EXICSR_CLK_32MHZ 0x050
+#define EXICSR_INT 0x008
+ #define EXICSR_INTSET 0x008
+
+#define EXICR_TSTART 0x001
+#define EXICR_TRSMODE 0x002
+ #define EXICR_TRSMODE_IMM 0x000
+#define EXICR_TRSTYPE 0x00C
+ #define EXICR_TRSTYPE_R 0x000
+ #define EXICR_TRSTYPE_W 0x004
+#define EXICR_TLEN 0x030
+ #define EXICR_TLEN32 0x030
+
+/* EXI registers values to access the RTC */
+#define RTC_EXICSR (EXICSR_DEV1 | EXICSR_CLK_8MHZ | EXICSR_INTSET)
+#define RTC_EXICR_W (EXICR_TSTART | EXICR_TRSMODE_IMM | EXICR_TRSTYPE_W | EXICR_TLEN32)
+#define RTC_EXICR_R (EXICR_TSTART | EXICR_TRSMODE_IMM | EXICR_TRSTYPE_R | EXICR_TLEN32)
+#define RTC_EXIDATA_W 0x80000000
+
+/* RTC registers */
+#define RTC_COUNTER 0x200000
+#define RTC_SRAM 0x200001
+#define RTC_SRAM_BIAS 0x200004
+#define RTC_SNAPSHOT 0x204000
+#define RTC_ONTMR 0x210000
+#define RTC_OFFTMR 0x210001
+#define RTC_TEST0 0x210004
+#define RTC_TEST1 0x210005
+#define RTC_TEST2 0x210006
+#define RTC_TEST3 0x210007
+#define RTC_CONTROL0 0x21000c
+#define RTC_CONTROL1 0x21000d
+
+/* RTC flags */
+#define RTC_CONTROL0_UNSTABLE_POWER 0x00000800
+#define RTC_CONTROL0_LOW_BATTERY 0x00000200
+
+struct priv {
+ struct regmap *regmap;
+ void __iomem *iob;
+ u32 rtc_bias;
+};
+
+static int exi_read(void *context, u32 reg, u32 *data)
+{
+ struct priv *d = (struct priv *)context;
+ void __iomem *iob = d->iob;
+
+ /* The spin loops here loop about 15~16 times each, so there is no need
+ * to use a more expensive sleep method.
+ */
+
+ /* Write register offset */
+ iowrite32be(RTC_EXICSR, iob + EXICSR);
+ iowrite32be(reg << 8, iob + EXIDATA);
+ iowrite32be(RTC_EXICR_W, iob + EXICR);
+ while (!(ioread32be(iob + EXICSR) & EXICSR_INTSET))
+ cpu_relax();
+
+ /* Read data */
+ iowrite32be(RTC_EXICSR, iob + EXICSR);
+ iowrite32be(RTC_EXICR_R, iob + EXICR);
+ while (!(ioread32be(iob + EXICSR) & EXICSR_INTSET))
+ cpu_relax();
+ *data = ioread32be(iob + EXIDATA);
+
+ /* Clear channel parameters */
+ iowrite32be(0, iob + EXICSR);
+
+ return 0;
+}
+
+static int exi_write(void *context, u32 reg, u32 data)
+{
+ struct priv *d = (struct priv *)context;
+ void __iomem *iob = d->iob;
+
+ /* The spin loops here loop about 15~16 times each, so there is no need
+ * to use a more expensive sleep method.
+ */
+
+ /* Write register offset */
+ iowrite32be(RTC_EXICSR, iob + EXICSR);
+ iowrite32be(RTC_EXIDATA_W | (reg << 8), iob + EXIDATA);
+ iowrite32be(RTC_EXICR_W, iob + EXICR);
+ while (!(ioread32be(iob + EXICSR) & EXICSR_INTSET))
+ cpu_relax();
+
+ /* Write data */
+ iowrite32be(RTC_EXICSR, iob + EXICSR);
+ iowrite32be(data, iob + EXIDATA);
+ iowrite32be(RTC_EXICR_W, iob + EXICR);
+ while (!(ioread32be(iob + EXICSR) & EXICSR_INTSET))
+ cpu_relax();
+
+ /* Clear channel parameters */
+ iowrite32be(0, iob + EXICSR);
+
+ return 0;
+}
+
+static const struct regmap_bus exi_bus = {
+ /* TODO: is that true? Not that it matters here, but still. */
+ .fast_io = true,
+ .reg_read = exi_read,
+ .reg_write = exi_write,
+};
+
+static int gamecube_rtc_read_time(struct device *dev, struct rtc_time *t)
+{
+ struct priv *d = dev_get_drvdata(dev);
+ int ret;
+ u32 counter;
+ time64_t timestamp;
+
+ ret = regmap_read(d->regmap, RTC_COUNTER, &counter);
+ if (ret)
+ return ret;
+
+ /* Add the counter and the bias to obtain the timestamp */
+ timestamp = (time64_t)d->rtc_bias + counter;
+ rtc_time64_to_tm(timestamp, t);
+
+ return 0;
+}
+
+static int gamecube_rtc_set_time(struct device *dev, struct rtc_time *t)
+{
+ struct priv *d = dev_get_drvdata(dev);
+ time64_t timestamp;
+
+ /* Subtract the timestamp and the bias to obtain the counter value */
+ timestamp = rtc_tm_to_time64(t);
+ return regmap_write(d->regmap, RTC_COUNTER, timestamp - d->rtc_bias);
+}
+
+static int gamecube_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
+{
+ struct priv *d = dev_get_drvdata(dev);
+ int value;
+ int control0;
+ int ret;
+
+ switch (cmd) {
+ case RTC_VL_READ:
+ ret = regmap_read(d->regmap, RTC_CONTROL0, &control0);
+ if (ret)
+ return ret;
+
+ value = 0;
+ if (control0 & RTC_CONTROL0_UNSTABLE_POWER)
+ value |= RTC_VL_DATA_INVALID;
+ if (control0 & RTC_CONTROL0_LOW_BATTERY)
+ value |= RTC_VL_BACKUP_LOW;
+ return put_user(value, (unsigned int __user *)arg);
+
+ default:
+ return -ENOIOCTLCMD;
+ }
+}
+
+static const struct rtc_class_ops gamecube_rtc_ops = {
+ .read_time = gamecube_rtc_read_time,
+ .set_time = gamecube_rtc_set_time,
+ .ioctl = gamecube_rtc_ioctl,
+};
+
+static int gamecube_rtc_read_offset_from_sram(struct priv *d)
+{
+ struct device_node *np;
+ int ret;
+ struct resource res;
+ void __iomem *hw_srnprot;
+ u32 old;
+
+ np = of_find_compatible_node(NULL, NULL, "nintendo,latte-srnprot");
+ if (!np)
+ np = of_find_compatible_node(NULL, NULL,
+ "nintendo,hollywood-srnprot");
+ if (!np) {
+ pr_info("HW_SRNPROT not found, assuming a GameCube\n");
+ return regmap_read(d->regmap, RTC_SRAM_BIAS, &d->rtc_bias);
+ }
+
+ ret = of_address_to_resource(np, 0, &res);
+ if (ret) {
+ pr_err("no io memory range found\n");
+ return -1;
+ }
+
+ hw_srnprot = ioremap(res.start, resource_size(&res));
+ old = ioread32be(hw_srnprot);
+
+ /* TODO: figure out why we use this magic constant. I obtained it by
+ * reading the leftover value after boot, after IOSU already ran.
+ *
+ * On my Wii U, setting this register to 1 prevents the console from
+ * rebooting properly, so wiiubrew.org must be missing something.
+ *
+ * See https://wiiubrew.org/wiki/Hardware/Latte_registers
+ */
+ if (old != 0x7bf)
+ iowrite32be(0x7bf, hw_srnprot);
+
+ /* Get the offset from RTC SRAM.
+ *
+ * Its default location on the GameCube and on the Wii is in the SRAM,
+ * while on the Wii U the bootloader needs to fill it with the contents
+ * of /config/rtc.xml on the SLC (the eMMC). We don’t do that from
+ * Linux since it requires implementing a proprietary filesystem and do
+ * file decryption, instead we require the bootloader to fill the same
+ * SRAM address as on previous consoles.
+ */
+ ret = regmap_read(d->regmap, RTC_SRAM_BIAS, &d->rtc_bias);
+ if (ret) {
+ pr_err("failed to get the RTC bias\n");
+ return -1;
+ }
+
+ /* Reset SRAM access to how it was before, our job here is done. */
+ if (old != 0x7bf)
+ iowrite32be(old, hw_srnprot);
+ iounmap(hw_srnprot);
+
+ return 0;
+}
+
+static const struct regmap_range rtc_rd_ranges[] = {
+ regmap_reg_range(0x200000, 0x200010),
+ regmap_reg_range(0x204000, 0x204000),
+ regmap_reg_range(0x210000, 0x210001),
+ regmap_reg_range(0x210004, 0x210007),
+ regmap_reg_range(0x21000c, 0x21000d),
+};
+
+static const struct regmap_access_table rtc_rd_regs = {
+ .yes_ranges = rtc_rd_ranges,
+ .n_yes_ranges = ARRAY_SIZE(rtc_rd_ranges),
+};
+
+static const struct regmap_range rtc_wr_ranges[] = {
+ regmap_reg_range(0x200000, 0x200010),
+ regmap_reg_range(0x204000, 0x204000),
+ regmap_reg_range(0x210000, 0x210001),
+ regmap_reg_range(0x21000d, 0x21000d),
+};
+
+static const struct regmap_access_table rtc_wr_regs = {
+ .yes_ranges = rtc_wr_ranges,
+ .n_yes_ranges = ARRAY_SIZE(rtc_wr_ranges),
+};
+
+static const struct regmap_config gamecube_rtc_regmap_config = {
+ .reg_bits = 24,
+ .val_bits = 32,
+ .rd_table = &rtc_rd_regs,
+ .wr_table = &rtc_wr_regs,
+ .max_register = 0x21000d,
+ .name = "gamecube-rtc",
+};
+
+static int gamecube_rtc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rtc_device *rtc;
+ struct priv *d;
+ int ret;
+
+ d = devm_kzalloc(dev, sizeof(struct priv), GFP_KERNEL);
+ if (!d)
+ return -ENOMEM;
+
+ d->iob = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(d->iob))
+ return PTR_ERR(d->iob);
+
+ d->regmap = devm_regmap_init(dev, &exi_bus, d,
+ &gamecube_rtc_regmap_config);
+ if (IS_ERR(d->regmap))
+ return PTR_ERR(d->regmap);
+
+ ret = gamecube_rtc_read_offset_from_sram(d);
+ if (ret)
+ return ret;
+ dev_dbg(dev, "SRAM bias: 0x%x", d->rtc_bias);
+
+ dev_set_drvdata(dev, d);
+
+ rtc = devm_rtc_allocate_device(dev);
+ if (IS_ERR(rtc))
+ return PTR_ERR(rtc);
+
+ /* We can represent further than that, but it depends on the stored
+ * bias and we can’t modify it persistently on all supported consoles,
+ * so here we pretend to be limited to 2106.
+ */
+ rtc->range_min = 0;
+ rtc->range_max = U32_MAX;
+ rtc->ops = &gamecube_rtc_ops;
+
+ devm_rtc_register_device(rtc);
+
+ return 0;
+}
+
+static const struct of_device_id gamecube_rtc_of_match[] = {
+ {.compatible = "nintendo,latte-exi" },
+ {.compatible = "nintendo,hollywood-exi" },
+ {.compatible = "nintendo,flipper-exi" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gamecube_rtc_of_match);
+
+static struct platform_driver gamecube_rtc_driver = {
+ .probe = gamecube_rtc_probe,
+ .driver = {
+ .name = "rtc-gamecube",
+ .of_match_table = gamecube_rtc_of_match,
+ },
+};
+module_platform_driver(gamecube_rtc_driver);
+
+MODULE_AUTHOR("Emmanuel Gil Peyrot <linkmauve@linkmauve.fr>");
+MODULE_DESCRIPTION("Nintendo GameCube, Wii and Wii U RTC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-mc146818-lib.c b/drivers/rtc/rtc-mc146818-lib.c
index dcfaf09946ee..ae9f131b43c0 100644
--- a/drivers/rtc/rtc-mc146818-lib.c
+++ b/drivers/rtc/rtc-mc146818-lib.c
@@ -8,48 +8,100 @@
#include <linux/acpi.h>
#endif
-unsigned int mc146818_get_time(struct rtc_time *time)
+/*
+ * Execute a function while the UIP (Update-in-progress) bit of the RTC is
+ * unset.
+ *
+ * Warning: callback may be executed more then once.
+ */
+bool mc146818_avoid_UIP(void (*callback)(unsigned char seconds, void *param),
+ void *param)
{
- unsigned char ctrl;
+ int i;
unsigned long flags;
- unsigned char century = 0;
- bool retry;
+ unsigned char seconds;
-#ifdef CONFIG_MACH_DECSTATION
- unsigned int real_year;
-#endif
+ for (i = 0; i < 10; i++) {
+ spin_lock_irqsave(&rtc_lock, flags);
-again:
- spin_lock_irqsave(&rtc_lock, flags);
- /* Ensure that the RTC is accessible. Bit 6 must be 0! */
- if (WARN_ON_ONCE((CMOS_READ(RTC_VALID) & 0x40) != 0)) {
- spin_unlock_irqrestore(&rtc_lock, flags);
- memset(time, 0xff, sizeof(*time));
- return 0;
- }
+ /*
+ * Check whether there is an update in progress during which the
+ * readout is unspecified. The maximum update time is ~2ms. Poll
+ * every msec for completion.
+ *
+ * Store the second value before checking UIP so a long lasting
+ * NMI which happens to hit after the UIP check cannot make
+ * an update cycle invisible.
+ */
+ seconds = CMOS_READ(RTC_SECONDS);
- /*
- * Check whether there is an update in progress during which the
- * readout is unspecified. The maximum update time is ~2ms. Poll
- * every msec for completion.
- *
- * Store the second value before checking UIP so a long lasting NMI
- * which happens to hit after the UIP check cannot make an update
- * cycle invisible.
- */
- time->tm_sec = CMOS_READ(RTC_SECONDS);
+ if (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP) {
+ spin_unlock_irqrestore(&rtc_lock, flags);
+ mdelay(1);
+ continue;
+ }
- if (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP) {
- spin_unlock_irqrestore(&rtc_lock, flags);
- mdelay(1);
- goto again;
- }
+ /* Revalidate the above readout */
+ if (seconds != CMOS_READ(RTC_SECONDS)) {
+ spin_unlock_irqrestore(&rtc_lock, flags);
+ continue;
+ }
- /* Revalidate the above readout */
- if (time->tm_sec != CMOS_READ(RTC_SECONDS)) {
+ if (callback)
+ callback(seconds, param);
+
+ /*
+ * Check for the UIP bit again. If it is set now then
+ * the above values may contain garbage.
+ */
+ if (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP) {
+ spin_unlock_irqrestore(&rtc_lock, flags);
+ mdelay(1);
+ continue;
+ }
+
+ /*
+ * A NMI might have interrupted the above sequence so check
+ * whether the seconds value has changed which indicates that
+ * the NMI took longer than the UIP bit was set. Unlikely, but
+ * possible and there is also virt...
+ */
+ if (seconds != CMOS_READ(RTC_SECONDS)) {
+ spin_unlock_irqrestore(&rtc_lock, flags);
+ continue;
+ }
spin_unlock_irqrestore(&rtc_lock, flags);
- goto again;
+
+ return true;
}
+ return false;
+}
+EXPORT_SYMBOL_GPL(mc146818_avoid_UIP);
+
+/*
+ * If the UIP (Update-in-progress) bit of the RTC is set for more then
+ * 10ms, the RTC is apparently broken or not present.
+ */
+bool mc146818_does_rtc_work(void)
+{
+ return mc146818_avoid_UIP(NULL, NULL);
+}
+EXPORT_SYMBOL_GPL(mc146818_does_rtc_work);
+
+struct mc146818_get_time_callback_param {
+ struct rtc_time *time;
+ unsigned char ctrl;
+#ifdef CONFIG_ACPI
+ unsigned char century;
+#endif
+#ifdef CONFIG_MACH_DECSTATION
+ unsigned int real_year;
+#endif
+};
+
+static void mc146818_get_time_callback(unsigned char seconds, void *param_in)
+{
+ struct mc146818_get_time_callback_param *p = param_in;
/*
* Only the values that we read from the RTC are set. We leave
@@ -57,39 +109,39 @@ again:
* RTC has RTC_DAY_OF_WEEK, we ignore it, as it is only updated
* by the RTC when initially set to a non-zero value.
*/
- time->tm_min = CMOS_READ(RTC_MINUTES);
- time->tm_hour = CMOS_READ(RTC_HOURS);
- time->tm_mday = CMOS_READ(RTC_DAY_OF_MONTH);
- time->tm_mon = CMOS_READ(RTC_MONTH);
- time->tm_year = CMOS_READ(RTC_YEAR);
+ p->time->tm_sec = seconds;
+ p->time->tm_min = CMOS_READ(RTC_MINUTES);
+ p->time->tm_hour = CMOS_READ(RTC_HOURS);
+ p->time->tm_mday = CMOS_READ(RTC_DAY_OF_MONTH);
+ p->time->tm_mon = CMOS_READ(RTC_MONTH);
+ p->time->tm_year = CMOS_READ(RTC_YEAR);
#ifdef CONFIG_MACH_DECSTATION
- real_year = CMOS_READ(RTC_DEC_YEAR);
+ p->real_year = CMOS_READ(RTC_DEC_YEAR);
#endif
#ifdef CONFIG_ACPI
if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID &&
- acpi_gbl_FADT.century)
- century = CMOS_READ(acpi_gbl_FADT.century);
+ acpi_gbl_FADT.century) {
+ p->century = CMOS_READ(acpi_gbl_FADT.century);
+ } else {
+ p->century = 0;
+ }
#endif
- ctrl = CMOS_READ(RTC_CONTROL);
- /*
- * Check for the UIP bit again. If it is set now then
- * the above values may contain garbage.
- */
- retry = CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP;
- /*
- * A NMI might have interrupted the above sequence so check whether
- * the seconds value has changed which indicates that the NMI took
- * longer than the UIP bit was set. Unlikely, but possible and
- * there is also virt...
- */
- retry |= time->tm_sec != CMOS_READ(RTC_SECONDS);
- spin_unlock_irqrestore(&rtc_lock, flags);
+ p->ctrl = CMOS_READ(RTC_CONTROL);
+}
- if (retry)
- goto again;
+int mc146818_get_time(struct rtc_time *time)
+{
+ struct mc146818_get_time_callback_param p = {
+ .time = time
+ };
+
+ if (!mc146818_avoid_UIP(mc146818_get_time_callback, &p)) {
+ memset(time, 0, sizeof(*time));
+ return -EIO;
+ }
- if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
+ if (!(p.ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
{
time->tm_sec = bcd2bin(time->tm_sec);
time->tm_min = bcd2bin(time->tm_min);
@@ -97,15 +149,19 @@ again:
time->tm_mday = bcd2bin(time->tm_mday);
time->tm_mon = bcd2bin(time->tm_mon);
time->tm_year = bcd2bin(time->tm_year);
- century = bcd2bin(century);
+#ifdef CONFIG_ACPI
+ p.century = bcd2bin(p.century);
+#endif
}
#ifdef CONFIG_MACH_DECSTATION
- time->tm_year += real_year - 72;
+ time->tm_year += p.real_year - 72;
#endif
- if (century > 20)
- time->tm_year += (century - 19) * 100;
+#ifdef CONFIG_ACPI
+ if (p.century > 19)
+ time->tm_year += (p.century - 19) * 100;
+#endif
/*
* Account for differences between how the RTC uses the values
@@ -116,7 +172,7 @@ again:
time->tm_mon--;
- return RTC_24H;
+ return 0;
}
EXPORT_SYMBOL_GPL(mc146818_get_time);
diff --git a/drivers/rtc/rtc-pcf2127.c b/drivers/rtc/rtc-pcf2127.c
index 56c58b055dff..81a5b1f2e68c 100644
--- a/drivers/rtc/rtc-pcf2127.c
+++ b/drivers/rtc/rtc-pcf2127.c
@@ -748,7 +748,7 @@ static int pcf2127_probe(struct device *dev, struct regmap *regmap,
/*
* Enable timestamp function and store timestamp of first trigger
- * event until TSF1 and TFS2 interrupt flags are cleared.
+ * event until TSF1 and TSF2 interrupt flags are cleared.
*/
ret = regmap_update_bits(pcf2127->regmap, PCF2127_REG_TS_CTRL,
PCF2127_BIT_TS_CTRL_TSOFF |
diff --git a/drivers/rtc/rtc-pcf85063.c b/drivers/rtc/rtc-pcf85063.c
index 15e50bb10cf0..df2b072c394d 100644
--- a/drivers/rtc/rtc-pcf85063.c
+++ b/drivers/rtc/rtc-pcf85063.c
@@ -514,21 +514,56 @@ static struct clk *pcf85063_clkout_register_clk(struct pcf85063 *pcf85063)
}
#endif
-static const struct pcf85063_config pcf85063tp_config = {
- .regmap = {
- .reg_bits = 8,
- .val_bits = 8,
- .max_register = 0x0a,
+enum pcf85063_type {
+ PCF85063,
+ PCF85063TP,
+ PCF85063A,
+ RV8263,
+ PCF85063_LAST_ID
+};
+
+static struct pcf85063_config pcf85063_cfg[] = {
+ [PCF85063] = {
+ .regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0x0a,
+ },
+ },
+ [PCF85063TP] = {
+ .regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0x0a,
+ },
+ },
+ [PCF85063A] = {
+ .regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0x11,
+ },
+ .has_alarms = 1,
+ },
+ [RV8263] = {
+ .regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 0x11,
+ },
+ .has_alarms = 1,
+ .force_cap_7000 = 1,
},
};
+static const struct i2c_device_id pcf85063_ids[];
+
static int pcf85063_probe(struct i2c_client *client)
{
struct pcf85063 *pcf85063;
unsigned int tmp;
int err;
- const struct pcf85063_config *config = &pcf85063tp_config;
- const void *data = of_device_get_match_data(&client->dev);
+ const struct pcf85063_config *config;
struct nvmem_config nvmem_cfg = {
.name = "pcf85063_nvram",
.reg_read = pcf85063_nvmem_read,
@@ -544,8 +579,17 @@ static int pcf85063_probe(struct i2c_client *client)
if (!pcf85063)
return -ENOMEM;
- if (data)
- config = data;
+ if (client->dev.of_node) {
+ config = of_device_get_match_data(&client->dev);
+ if (!config)
+ return -ENODEV;
+ } else {
+ enum pcf85063_type type =
+ i2c_match_id(pcf85063_ids, client)->driver_data;
+ if (type >= PCF85063_LAST_ID)
+ return -ENODEV;
+ config = &pcf85063_cfg[type];
+ }
pcf85063->regmap = devm_regmap_init_i2c(client, &config->regmap);
if (IS_ERR(pcf85063->regmap))
@@ -604,31 +648,21 @@ static int pcf85063_probe(struct i2c_client *client)
return devm_rtc_register_device(pcf85063->rtc);
}
-#ifdef CONFIG_OF
-static const struct pcf85063_config pcf85063a_config = {
- .regmap = {
- .reg_bits = 8,
- .val_bits = 8,
- .max_register = 0x11,
- },
- .has_alarms = 1,
-};
-
-static const struct pcf85063_config rv8263_config = {
- .regmap = {
- .reg_bits = 8,
- .val_bits = 8,
- .max_register = 0x11,
- },
- .has_alarms = 1,
- .force_cap_7000 = 1,
+static const struct i2c_device_id pcf85063_ids[] = {
+ { "pcf85063", PCF85063 },
+ { "pcf85063tp", PCF85063TP },
+ { "pcf85063a", PCF85063A },
+ { "rv8263", RV8263 },
+ {}
};
+MODULE_DEVICE_TABLE(i2c, pcf85063_ids);
+#ifdef CONFIG_OF
static const struct of_device_id pcf85063_of_match[] = {
- { .compatible = "nxp,pcf85063", .data = &pcf85063tp_config },
- { .compatible = "nxp,pcf85063tp", .data = &pcf85063tp_config },
- { .compatible = "nxp,pcf85063a", .data = &pcf85063a_config },
- { .compatible = "microcrystal,rv8263", .data = &rv8263_config },
+ { .compatible = "nxp,pcf85063", .data = &pcf85063_cfg[PCF85063] },
+ { .compatible = "nxp,pcf85063tp", .data = &pcf85063_cfg[PCF85063TP] },
+ { .compatible = "nxp,pcf85063a", .data = &pcf85063_cfg[PCF85063A] },
+ { .compatible = "microcrystal,rv8263", .data = &pcf85063_cfg[RV8263] },
{}
};
MODULE_DEVICE_TABLE(of, pcf85063_of_match);
@@ -640,6 +674,7 @@ static struct i2c_driver pcf85063_driver = {
.of_match_table = of_match_ptr(pcf85063_of_match),
},
.probe_new = pcf85063_probe,
+ .id_table = pcf85063_ids,
};
module_i2c_driver(pcf85063_driver);
diff --git a/drivers/rtc/rtc-pxa.c b/drivers/rtc/rtc-pxa.c
index d2f1d8f754bf..cf8119b6d320 100644
--- a/drivers/rtc/rtc-pxa.c
+++ b/drivers/rtc/rtc-pxa.c
@@ -330,6 +330,10 @@ static int __init pxa_rtc_probe(struct platform_device *pdev)
if (sa1100_rtc->irq_alarm < 0)
return -ENXIO;
+ sa1100_rtc->rtc = devm_rtc_allocate_device(&pdev->dev);
+ if (IS_ERR(sa1100_rtc->rtc))
+ return PTR_ERR(sa1100_rtc->rtc);
+
pxa_rtc->base = devm_ioremap(dev, pxa_rtc->ress->start,
resource_size(pxa_rtc->ress));
if (!pxa_rtc->base) {
diff --git a/drivers/rtc/rtc-rs5c372.c b/drivers/rtc/rtc-rs5c372.c
index 80980414890c..cb15983383f5 100644
--- a/drivers/rtc/rtc-rs5c372.c
+++ b/drivers/rtc/rtc-rs5c372.c
@@ -28,8 +28,10 @@
#define RS5C372_REG_MONTH 5
#define RS5C372_REG_YEAR 6
#define RS5C372_REG_TRIM 7
-# define RS5C372_TRIM_XSL 0x80
+# define RS5C372_TRIM_XSL 0x80 /* only if RS5C372[a|b] */
# define RS5C372_TRIM_MASK 0x7F
+# define R2221TL_TRIM_DEV (1 << 7) /* only if R2221TL */
+# define RS5C372_TRIM_DECR (1 << 6)
#define RS5C_REG_ALARM_A_MIN 8 /* or ALARM_W */
#define RS5C_REG_ALARM_A_HOURS 9
@@ -324,8 +326,12 @@ static int rs5c372_get_trim(struct i2c_client *client, int *osc, int *trim)
struct rs5c372 *rs5c372 = i2c_get_clientdata(client);
u8 tmp = rs5c372->regs[RS5C372_REG_TRIM];
- if (osc)
- *osc = (tmp & RS5C372_TRIM_XSL) ? 32000 : 32768;
+ if (osc) {
+ if (rs5c372->type == rtc_rs5c372a || rs5c372->type == rtc_rs5c372b)
+ *osc = (tmp & RS5C372_TRIM_XSL) ? 32000 : 32768;
+ else
+ *osc = 32768;
+ }
if (trim) {
dev_dbg(&client->dev, "%s: raw trim=%x\n", __func__, tmp);
@@ -485,6 +491,176 @@ static int rs5c372_rtc_proc(struct device *dev, struct seq_file *seq)
#define rs5c372_rtc_proc NULL
#endif
+#ifdef CONFIG_RTC_INTF_DEV
+static int rs5c372_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
+{
+ struct rs5c372 *rs5c = i2c_get_clientdata(to_i2c_client(dev));
+ unsigned char ctrl2;
+ int addr;
+ unsigned int flags;
+
+ dev_dbg(dev, "%s: cmd=%x\n", __func__, cmd);
+
+ addr = RS5C_ADDR(RS5C_REG_CTRL2);
+ ctrl2 = i2c_smbus_read_byte_data(rs5c->client, addr);
+
+ switch (cmd) {
+ case RTC_VL_READ:
+ flags = 0;
+
+ switch (rs5c->type) {
+ case rtc_r2025sd:
+ case rtc_r2221tl:
+ if ((rs5c->type == rtc_r2025sd && !(ctrl2 & R2x2x_CTRL2_XSTP)) ||
+ (rs5c->type == rtc_r2221tl && (ctrl2 & R2x2x_CTRL2_XSTP))) {
+ flags |= RTC_VL_DATA_INVALID;
+ }
+ if (ctrl2 & R2x2x_CTRL2_VDET)
+ flags |= RTC_VL_BACKUP_LOW;
+ break;
+ default:
+ if (ctrl2 & RS5C_CTRL2_XSTP)
+ flags |= RTC_VL_DATA_INVALID;
+ break;
+ }
+
+ return put_user(flags, (unsigned int __user *)arg);
+ case RTC_VL_CLR:
+ /* clear VDET bit */
+ if (rs5c->type == rtc_r2025sd || rs5c->type == rtc_r2221tl) {
+ ctrl2 &= ~R2x2x_CTRL2_VDET;
+ if (i2c_smbus_write_byte_data(rs5c->client, addr, ctrl2) < 0) {
+ dev_dbg(&rs5c->client->dev, "%s: write error in line %i\n",
+ __func__, __LINE__);
+ return -EIO;
+ }
+ }
+ return 0;
+ default:
+ return -ENOIOCTLCMD;
+ }
+ return 0;
+}
+#else
+#define rs5c372_ioctl NULL
+#endif
+
+static int rs5c372_read_offset(struct device *dev, long *offset)
+{
+ struct rs5c372 *rs5c = i2c_get_clientdata(to_i2c_client(dev));
+ u8 val = rs5c->regs[RS5C372_REG_TRIM];
+ long ppb_per_step = 0;
+ bool decr = val & RS5C372_TRIM_DECR;
+
+ switch (rs5c->type) {
+ case rtc_r2221tl:
+ ppb_per_step = val & R2221TL_TRIM_DEV ? 1017 : 3051;
+ break;
+ case rtc_rs5c372a:
+ case rtc_rs5c372b:
+ ppb_per_step = val & RS5C372_TRIM_XSL ? 3125 : 3051;
+ break;
+ default:
+ ppb_per_step = 3051;
+ break;
+ }
+
+ /* Only bits[0:5] repsents the time counts */
+ val &= 0x3F;
+
+ /* If bits[1:5] are all 0, it means no increment or decrement */
+ if (!(val & 0x3E)) {
+ *offset = 0;
+ } else {
+ if (decr)
+ *offset = -(((~val) & 0x3F) + 1) * ppb_per_step;
+ else
+ *offset = (val - 1) * ppb_per_step;
+ }
+
+ return 0;
+}
+
+static int rs5c372_set_offset(struct device *dev, long offset)
+{
+ struct rs5c372 *rs5c = i2c_get_clientdata(to_i2c_client(dev));
+ int addr = RS5C_ADDR(RS5C372_REG_TRIM);
+ u8 val = 0;
+ u8 tmp = 0;
+ long ppb_per_step = 3051;
+ long steps = LONG_MIN;
+
+ switch (rs5c->type) {
+ case rtc_rs5c372a:
+ case rtc_rs5c372b:
+ tmp = rs5c->regs[RS5C372_REG_TRIM];
+ if (tmp & RS5C372_TRIM_XSL) {
+ ppb_per_step = 3125;
+ val |= RS5C372_TRIM_XSL;
+ }
+ break;
+ case rtc_r2221tl:
+ /*
+ * Check if it is possible to use high resolution mode (DEV=1).
+ * In this mode, the minimum resolution is 2 / (32768 * 20 * 3),
+ * which is about 1017 ppb.
+ */
+ steps = DIV_ROUND_CLOSEST(offset, 1017);
+ if (steps >= -0x3E && steps <= 0x3E) {
+ ppb_per_step = 1017;
+ val |= R2221TL_TRIM_DEV;
+ } else {
+ /*
+ * offset is out of the range of high resolution mode.
+ * Try to use low resolution mode (DEV=0). In this mode,
+ * the minimum resolution is 2 / (32768 * 20), which is
+ * about 3051 ppb.
+ */
+ steps = LONG_MIN;
+ }
+ break;
+ default:
+ break;
+ }
+
+ if (steps == LONG_MIN) {
+ steps = DIV_ROUND_CLOSEST(offset, ppb_per_step);
+ if (steps > 0x3E || steps < -0x3E)
+ return -ERANGE;
+ }
+
+ if (steps > 0) {
+ val |= steps + 1;
+ } else {
+ val |= RS5C372_TRIM_DECR;
+ val |= (~(-steps - 1)) & 0x3F;
+ }
+
+ if (!steps || !(val & 0x3E)) {
+ /*
+ * if offset is too small, set oscillation adjustment register
+ * or time trimming register with its default value whic means
+ * no increment or decrement. But for rs5c372[a|b], the XSL bit
+ * should be kept unchanged.
+ */
+ if (rs5c->type == rtc_rs5c372a || rs5c->type == rtc_rs5c372b)
+ val &= RS5C372_TRIM_XSL;
+ else
+ val = 0;
+ }
+
+ dev_dbg(&rs5c->client->dev, "write 0x%x for offset %ld\n", val, offset);
+
+ if (i2c_smbus_write_byte_data(rs5c->client, addr, val) < 0) {
+ dev_err(&rs5c->client->dev, "failed to write 0x%x to reg %d\n", val, addr);
+ return -EIO;
+ }
+
+ rs5c->regs[RS5C372_REG_TRIM] = val;
+
+ return 0;
+}
+
static const struct rtc_class_ops rs5c372_rtc_ops = {
.proc = rs5c372_rtc_proc,
.read_time = rs5c372_rtc_read_time,
@@ -492,6 +668,9 @@ static const struct rtc_class_ops rs5c372_rtc_ops = {
.read_alarm = rs5c_read_alarm,
.set_alarm = rs5c_set_alarm,
.alarm_irq_enable = rs5c_rtc_alarm_irq_enable,
+ .ioctl = rs5c372_ioctl,
+ .read_offset = rs5c372_read_offset,
+ .set_offset = rs5c372_set_offset,
};
#if IS_ENABLED(CONFIG_RTC_INTF_SYSFS)
diff --git a/drivers/rtc/rtc-rv8803.c b/drivers/rtc/rtc-rv8803.c
index 0d5ed38bf60c..f69e0b1137cd 100644
--- a/drivers/rtc/rtc-rv8803.c
+++ b/drivers/rtc/rtc-rv8803.c
@@ -55,6 +55,7 @@
enum rv8803_type {
rv_8803,
+ rx_8804,
rx_8900
};
@@ -601,6 +602,7 @@ static int rv8803_probe(struct i2c_client *client,
static const struct i2c_device_id rv8803_id[] = {
{ "rv8803", rv_8803 },
+ { "rv8804", rx_8804 },
{ "rx8803", rv_8803 },
{ "rx8900", rx_8900 },
{ }
@@ -617,6 +619,10 @@ static const __maybe_unused struct of_device_id rv8803_of_match[] = {
.data = (void *)rv_8803
},
{
+ .compatible = "epson,rx8804",
+ .data = (void *)rx_8804
+ },
+ {
.compatible = "epson,rx8900",
.data = (void *)rx_8900
},
diff --git a/drivers/rtc/rtc-sunplus.c b/drivers/rtc/rtc-sunplus.c
new file mode 100644
index 000000000000..e8e2ab1103fc
--- /dev/null
+++ b/drivers/rtc/rtc-sunplus.c
@@ -0,0 +1,362 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * The RTC driver for Sunplus SP7021
+ *
+ * Copyright (C) 2019 Sunplus Technology Inc., All rights reseerved.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/ktime.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/rtc.h>
+
+#define RTC_REG_NAME "rtc"
+
+#define RTC_CTRL 0x40
+#define TIMER_FREEZE_MASK_BIT BIT(5 + 16)
+#define TIMER_FREEZE BIT(5)
+#define DIS_SYS_RST_RTC_MASK_BIT BIT(4 + 16)
+#define DIS_SYS_RST_RTC BIT(4)
+#define RTC32K_MODE_RESET_MASK_BIT BIT(3 + 16)
+#define RTC32K_MODE_RESET BIT(3)
+#define ALARM_EN_OVERDUE_MASK_BIT BIT(2 + 16)
+#define ALARM_EN_OVERDUE BIT(2)
+#define ALARM_EN_PMC_MASK_BIT BIT(1 + 16)
+#define ALARM_EN_PMC BIT(1)
+#define ALARM_EN_MASK_BIT BIT(0 + 16)
+#define ALARM_EN BIT(0)
+#define RTC_TIMER_OUT 0x44
+#define RTC_DIVIDER 0x48
+#define RTC_TIMER_SET 0x4c
+#define RTC_ALARM_SET 0x50
+#define RTC_USER_DATA 0x54
+#define RTC_RESET_RECORD 0x58
+#define RTC_BATT_CHARGE_CTRL 0x5c
+#define BAT_CHARGE_RSEL_MASK_BIT GENMASK(3 + 16, 2 + 16)
+#define BAT_CHARGE_RSEL_MASK GENMASK(3, 2)
+#define BAT_CHARGE_RSEL_2K_OHM FIELD_PREP(BAT_CHARGE_RSEL_MASK, 0)
+#define BAT_CHARGE_RSEL_250_OHM FIELD_PREP(BAT_CHARGE_RSEL_MASK, 1)
+#define BAT_CHARGE_RSEL_50_OHM FIELD_PREP(BAT_CHARGE_RSEL_MASK, 2)
+#define BAT_CHARGE_RSEL_0_OHM FIELD_PREP(BAT_CHARGE_RSEL_MASK, 3)
+#define BAT_CHARGE_DSEL_MASK_BIT BIT(1 + 16)
+#define BAT_CHARGE_DSEL_MASK GENMASK(1, 1)
+#define BAT_CHARGE_DSEL_ON FIELD_PREP(BAT_CHARGE_DSEL_MASK, 0)
+#define BAT_CHARGE_DSEL_OFF FIELD_PREP(BAT_CHARGE_DSEL_MASK, 1)
+#define BAT_CHARGE_EN_MASK_BIT BIT(0 + 16)
+#define BAT_CHARGE_EN BIT(0)
+#define RTC_TRIM_CTRL 0x60
+
+struct sunplus_rtc {
+ struct rtc_device *rtc;
+ struct resource *res;
+ struct clk *rtcclk;
+ struct reset_control *rstc;
+ void __iomem *reg_base;
+ int irq;
+};
+
+static void sp_get_seconds(struct device *dev, unsigned long *secs)
+{
+ struct sunplus_rtc *sp_rtc = dev_get_drvdata(dev);
+
+ *secs = (unsigned long)readl(sp_rtc->reg_base + RTC_TIMER_OUT);
+}
+
+static void sp_set_seconds(struct device *dev, unsigned long secs)
+{
+ struct sunplus_rtc *sp_rtc = dev_get_drvdata(dev);
+
+ writel((u32)secs, sp_rtc->reg_base + RTC_TIMER_SET);
+}
+
+static int sp_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ unsigned long secs;
+
+ sp_get_seconds(dev, &secs);
+ rtc_time64_to_tm(secs, tm);
+
+ return 0;
+}
+
+static int sp_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ unsigned long secs;
+
+ secs = rtc_tm_to_time64(tm);
+ dev_dbg(dev, "%s, secs = %lu\n", __func__, secs);
+ sp_set_seconds(dev, secs);
+
+ return 0;
+}
+
+static int sp_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct sunplus_rtc *sp_rtc = dev_get_drvdata(dev);
+ unsigned long alarm_time;
+
+ alarm_time = rtc_tm_to_time64(&alrm->time);
+ dev_dbg(dev, "%s, alarm_time: %u\n", __func__, (u32)(alarm_time));
+ writel((u32)alarm_time, sp_rtc->reg_base + RTC_ALARM_SET);
+
+ return 0;
+}
+
+static int sp_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct sunplus_rtc *sp_rtc = dev_get_drvdata(dev);
+ unsigned int alarm_time;
+
+ alarm_time = readl(sp_rtc->reg_base + RTC_ALARM_SET);
+ dev_dbg(dev, "%s, alarm_time: %u\n", __func__, alarm_time);
+
+ if (alarm_time == 0)
+ alrm->enabled = 0;
+ else
+ alrm->enabled = 1;
+
+ rtc_time64_to_tm((unsigned long)(alarm_time), &alrm->time);
+
+ return 0;
+}
+
+static int sp_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
+{
+ struct sunplus_rtc *sp_rtc = dev_get_drvdata(dev);
+
+ if (enabled)
+ writel((TIMER_FREEZE_MASK_BIT | DIS_SYS_RST_RTC_MASK_BIT |
+ RTC32K_MODE_RESET_MASK_BIT | ALARM_EN_OVERDUE_MASK_BIT |
+ ALARM_EN_PMC_MASK_BIT | ALARM_EN_MASK_BIT) |
+ (DIS_SYS_RST_RTC | ALARM_EN_OVERDUE | ALARM_EN_PMC | ALARM_EN),
+ sp_rtc->reg_base + RTC_CTRL);
+ else
+ writel((ALARM_EN_OVERDUE_MASK_BIT | ALARM_EN_PMC_MASK_BIT | ALARM_EN_MASK_BIT) |
+ 0x0, sp_rtc->reg_base + RTC_CTRL);
+
+ return 0;
+}
+
+static const struct rtc_class_ops sp_rtc_ops = {
+ .read_time = sp_rtc_read_time,
+ .set_time = sp_rtc_set_time,
+ .set_alarm = sp_rtc_set_alarm,
+ .read_alarm = sp_rtc_read_alarm,
+ .alarm_irq_enable = sp_rtc_alarm_irq_enable,
+};
+
+static irqreturn_t sp_rtc_irq_handler(int irq, void *dev_id)
+{
+ struct platform_device *plat_dev = dev_id;
+ struct sunplus_rtc *sp_rtc = dev_get_drvdata(&plat_dev->dev);
+
+ rtc_update_irq(sp_rtc->rtc, 1, RTC_IRQF | RTC_AF);
+ dev_dbg(&plat_dev->dev, "[RTC] ALARM INT\n");
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * -------------------------------------------------------------------------------------
+ * bat_charge_rsel bat_charge_dsel bat_charge_en Remarks
+ * x x 0 Disable
+ * 0 0 1 0.86mA (2K Ohm with diode)
+ * 1 0 1 1.81mA (250 Ohm with diode)
+ * 2 0 1 2.07mA (50 Ohm with diode)
+ * 3 0 1 16.0mA (0 Ohm with diode)
+ * 0 1 1 1.36mA (2K Ohm without diode)
+ * 1 1 1 3.99mA (250 Ohm without diode)
+ * 2 1 1 4.41mA (50 Ohm without diode)
+ * 3 1 1 16.0mA (0 Ohm without diode)
+ * -------------------------------------------------------------------------------------
+ */
+static void sp_rtc_set_trickle_charger(struct device dev)
+{
+ struct sunplus_rtc *sp_rtc = dev_get_drvdata(&dev);
+ u32 ohms, rsel;
+ u32 chargeable;
+
+ if (of_property_read_u32(dev.of_node, "trickle-resistor-ohms", &ohms) ||
+ of_property_read_u32(dev.of_node, "aux-voltage-chargeable", &chargeable)) {
+ dev_warn(&dev, "battery charger disabled\n");
+ return;
+ }
+
+ switch (ohms) {
+ case 2000:
+ rsel = BAT_CHARGE_RSEL_2K_OHM;
+ break;
+ case 250:
+ rsel = BAT_CHARGE_RSEL_250_OHM;
+ break;
+ case 50:
+ rsel = BAT_CHARGE_RSEL_50_OHM;
+ break;
+ case 0:
+ rsel = BAT_CHARGE_RSEL_0_OHM;
+ break;
+ default:
+ dev_err(&dev, "invalid charger resistor value (%d)\n", ohms);
+ return;
+ }
+
+ writel(BAT_CHARGE_RSEL_MASK_BIT | rsel, sp_rtc->reg_base + RTC_BATT_CHARGE_CTRL);
+
+ switch (chargeable) {
+ case 0:
+ writel(BAT_CHARGE_DSEL_MASK_BIT | BAT_CHARGE_DSEL_OFF,
+ sp_rtc->reg_base + RTC_BATT_CHARGE_CTRL);
+ break;
+ case 1:
+ writel(BAT_CHARGE_DSEL_MASK_BIT | BAT_CHARGE_DSEL_ON,
+ sp_rtc->reg_base + RTC_BATT_CHARGE_CTRL);
+ break;
+ default:
+ dev_err(&dev, "invalid aux-voltage-chargeable value (%d)\n", chargeable);
+ return;
+ }
+
+ writel(BAT_CHARGE_EN_MASK_BIT | BAT_CHARGE_EN, sp_rtc->reg_base + RTC_BATT_CHARGE_CTRL);
+}
+
+static int sp_rtc_probe(struct platform_device *plat_dev)
+{
+ struct sunplus_rtc *sp_rtc;
+ int ret;
+
+ sp_rtc = devm_kzalloc(&plat_dev->dev, sizeof(*sp_rtc), GFP_KERNEL);
+ if (!sp_rtc)
+ return -ENOMEM;
+
+ sp_rtc->res = platform_get_resource_byname(plat_dev, IORESOURCE_MEM, RTC_REG_NAME);
+ sp_rtc->reg_base = devm_ioremap_resource(&plat_dev->dev, sp_rtc->res);
+ if (IS_ERR(sp_rtc->reg_base))
+ return dev_err_probe(&plat_dev->dev, PTR_ERR(sp_rtc->reg_base),
+ "%s devm_ioremap_resource fail\n", RTC_REG_NAME);
+ dev_dbg(&plat_dev->dev, "res = 0x%x, reg_base = 0x%lx\n",
+ sp_rtc->res->start, (unsigned long)sp_rtc->reg_base);
+
+ sp_rtc->irq = platform_get_irq(plat_dev, 0);
+ if (sp_rtc->irq < 0)
+ return dev_err_probe(&plat_dev->dev, sp_rtc->irq, "platform_get_irq failed\n");
+
+ ret = devm_request_irq(&plat_dev->dev, sp_rtc->irq, sp_rtc_irq_handler,
+ IRQF_TRIGGER_RISING, "rtc irq", plat_dev);
+ if (ret)
+ return dev_err_probe(&plat_dev->dev, ret, "devm_request_irq failed:\n");
+
+ sp_rtc->rtcclk = devm_clk_get(&plat_dev->dev, NULL);
+ if (IS_ERR(sp_rtc->rtcclk))
+ return dev_err_probe(&plat_dev->dev, PTR_ERR(sp_rtc->rtcclk),
+ "devm_clk_get fail\n");
+
+ sp_rtc->rstc = devm_reset_control_get_exclusive(&plat_dev->dev, NULL);
+ if (IS_ERR(sp_rtc->rstc))
+ return dev_err_probe(&plat_dev->dev, PTR_ERR(sp_rtc->rstc),
+ "failed to retrieve reset controller\n");
+
+ ret = clk_prepare_enable(sp_rtc->rtcclk);
+ if (ret)
+ goto free_clk;
+
+ ret = reset_control_deassert(sp_rtc->rstc);
+ if (ret)
+ goto free_reset_assert;
+
+ device_init_wakeup(&plat_dev->dev, 1);
+ dev_set_drvdata(&plat_dev->dev, sp_rtc);
+
+ sp_rtc->rtc = devm_rtc_allocate_device(&plat_dev->dev);
+ if (IS_ERR(sp_rtc->rtc)) {
+ ret = PTR_ERR(sp_rtc->rtc);
+ goto free_reset_assert;
+ }
+
+ sp_rtc->rtc->range_max = U32_MAX;
+ sp_rtc->rtc->range_min = 0;
+ sp_rtc->rtc->ops = &sp_rtc_ops;
+
+ ret = devm_rtc_register_device(sp_rtc->rtc);
+ if (ret)
+ goto free_reset_assert;
+
+ /* Setup trickle charger */
+ if (plat_dev->dev.of_node)
+ sp_rtc_set_trickle_charger(plat_dev->dev);
+
+ /* Keep RTC from system reset */
+ writel(DIS_SYS_RST_RTC_MASK_BIT | DIS_SYS_RST_RTC, sp_rtc->reg_base + RTC_CTRL);
+
+ return 0;
+
+free_reset_assert:
+ reset_control_assert(sp_rtc->rstc);
+free_clk:
+ clk_disable_unprepare(sp_rtc->rtcclk);
+
+ return ret;
+}
+
+static int sp_rtc_remove(struct platform_device *plat_dev)
+{
+ struct sunplus_rtc *sp_rtc = dev_get_drvdata(&plat_dev->dev);
+
+ device_init_wakeup(&plat_dev->dev, 0);
+ reset_control_assert(sp_rtc->rstc);
+ clk_disable_unprepare(sp_rtc->rtcclk);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int sp_rtc_suspend(struct device *dev)
+{
+ struct sunplus_rtc *sp_rtc = dev_get_drvdata(dev);
+
+ if (device_may_wakeup(dev))
+ enable_irq_wake(sp_rtc->irq);
+
+ return 0;
+}
+
+static int sp_rtc_resume(struct device *dev)
+{
+ struct sunplus_rtc *sp_rtc = dev_get_drvdata(dev);
+
+ if (device_may_wakeup(dev))
+ disable_irq_wake(sp_rtc->irq);
+
+ return 0;
+}
+#endif
+
+static const struct of_device_id sp_rtc_of_match[] = {
+ { .compatible = "sunplus,sp7021-rtc" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, sp_rtc_of_match);
+
+static SIMPLE_DEV_PM_OPS(sp_rtc_pm_ops, sp_rtc_suspend, sp_rtc_resume);
+
+static struct platform_driver sp_rtc_driver = {
+ .probe = sp_rtc_probe,
+ .remove = sp_rtc_remove,
+ .driver = {
+ .name = "sp7021-rtc",
+ .of_match_table = sp_rtc_of_match,
+ .pm = &sp_rtc_pm_ops,
+ },
+};
+module_platform_driver(sp_rtc_driver);
+
+MODULE_AUTHOR("Vincent Shih <vincent.sunplus@gmail.com>");
+MODULE_DESCRIPTION("Sunplus RTC driver");
+MODULE_LICENSE("GPL v2");
+
diff --git a/drivers/s390/block/Kconfig b/drivers/s390/block/Kconfig
index d0416dbd0cd8..e3710a762aba 100644
--- a/drivers/s390/block/Kconfig
+++ b/drivers/s390/block/Kconfig
@@ -5,7 +5,7 @@ comment "S/390 block device drivers"
config DCSSBLK
def_tristate m
select FS_DAX_LIMITED
- select DAX_DRIVER
+ select DAX
prompt "DCSSBLK support"
depends on S390 && BLOCK
help
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 27ab888b44d0..d614843caf6c 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -44,18 +44,6 @@ static const struct block_device_operations dcssblk_devops = {
.release = dcssblk_release,
};
-static size_t dcssblk_dax_copy_from_iter(struct dax_device *dax_dev,
- pgoff_t pgoff, void *addr, size_t bytes, struct iov_iter *i)
-{
- return copy_from_iter(addr, bytes, i);
-}
-
-static size_t dcssblk_dax_copy_to_iter(struct dax_device *dax_dev,
- pgoff_t pgoff, void *addr, size_t bytes, struct iov_iter *i)
-{
- return copy_to_iter(addr, bytes, i);
-}
-
static int dcssblk_dax_zero_page_range(struct dax_device *dax_dev,
pgoff_t pgoff, size_t nr_pages)
{
@@ -72,9 +60,6 @@ static int dcssblk_dax_zero_page_range(struct dax_device *dax_dev,
static const struct dax_operations dcssblk_dax_ops = {
.direct_access = dcssblk_dax_direct_access,
- .dax_supported = generic_fsdax_supported,
- .copy_from_iter = dcssblk_dax_copy_from_iter,
- .copy_to_iter = dcssblk_dax_copy_to_iter,
.zero_page_range = dcssblk_dax_zero_page_range,
};
@@ -687,18 +672,21 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
if (rc)
goto put_dev;
- dev_info->dax_dev = alloc_dax(dev_info, dev_info->gd->disk_name,
- &dcssblk_dax_ops, DAXDEV_F_SYNC);
+ dev_info->dax_dev = alloc_dax(dev_info, &dcssblk_dax_ops);
if (IS_ERR(dev_info->dax_dev)) {
rc = PTR_ERR(dev_info->dax_dev);
dev_info->dax_dev = NULL;
goto put_dev;
}
+ set_dax_synchronous(dev_info->dax_dev);
+ rc = dax_add_host(dev_info->dax_dev, dev_info->gd);
+ if (rc)
+ goto out_dax;
get_device(&dev_info->dev);
rc = device_add_disk(&dev_info->dev, dev_info->gd, NULL);
if (rc)
- goto out_dax;
+ goto out_dax_host;
switch (dev_info->segment_type) {
case SEG_TYPE_SR:
@@ -714,6 +702,8 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
rc = count;
goto out;
+out_dax_host:
+ dax_remove_host(dev_info->gd);
out_dax:
put_device(&dev_info->dev);
kill_dax(dev_info->dax_dev);
diff --git a/drivers/s390/char/keyboard.h b/drivers/s390/char/keyboard.h
index c467589c7f45..c06d399b9b1f 100644
--- a/drivers/s390/char/keyboard.h
+++ b/drivers/s390/char/keyboard.h
@@ -56,7 +56,7 @@ static inline void
kbd_put_queue(struct tty_port *port, int ch)
{
tty_insert_flip_char(port, ch, 0);
- tty_schedule_flip(port);
+ tty_flip_buffer_push(port);
}
static inline void
@@ -64,5 +64,5 @@ kbd_puts_queue(struct tty_port *port, char *cp)
{
while (*cp)
tty_insert_flip_char(port, *cp++, 0);
- tty_schedule_flip(port);
+ tty_flip_buffer_push(port);
}
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index cd938a26b76c..3b1cd0c96a74 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -1180,7 +1180,7 @@ static int io_subchannel_chp_event(struct subchannel *sch,
else
path_event[chpid] = PE_NONE;
}
- if (cdev)
+ if (cdev && cdev->drv && cdev->drv->path_event)
cdev->drv->path_event(cdev, path_event);
break;
}
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index d24cafe02708..511bf8e0a436 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -521,6 +521,8 @@ static void zfcp_fc_adisc_handler(void *data)
goto out;
}
+ /* re-init to undo drop from zfcp_fc_adisc() */
+ port->d_id = ntoh24(adisc_resp->adisc_port_id);
/* port is good, unblock rport without going through erp */
zfcp_scsi_schedule_rport_register(port);
out:
@@ -534,6 +536,7 @@ static int zfcp_fc_adisc(struct zfcp_port *port)
struct zfcp_fc_req *fc_req;
struct zfcp_adapter *adapter = port->adapter;
struct Scsi_Host *shost = adapter->scsi_host;
+ u32 d_id;
int ret;
fc_req = kmem_cache_zalloc(zfcp_fc_req_cache, GFP_ATOMIC);
@@ -558,7 +561,15 @@ static int zfcp_fc_adisc(struct zfcp_port *port)
fc_req->u.adisc.req.adisc_cmd = ELS_ADISC;
hton24(fc_req->u.adisc.req.adisc_port_id, fc_host_port_id(shost));
- ret = zfcp_fsf_send_els(adapter, port->d_id, &fc_req->ct_els,
+ d_id = port->d_id; /* remember as destination for send els below */
+ /*
+ * Force fresh GID_PN lookup on next port recovery.
+ * Must happen after request setup and before sending request,
+ * to prevent race with port->d_id re-init in zfcp_fc_adisc_handler().
+ */
+ port->d_id = 0;
+
+ ret = zfcp_fsf_send_els(adapter, d_id, &fc_req->ct_els,
ZFCP_FC_CTELS_TMO);
if (ret)
kmem_cache_free(zfcp_fc_req_cache, fc_req);
diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c
index b9482da79512..3ebe66151dcb 100644
--- a/drivers/scsi/3w-sas.c
+++ b/drivers/scsi/3w-sas.c
@@ -1567,8 +1567,6 @@ static int twl_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
pci_try_set_mwi(pdev);
retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
- if (retval)
- retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (retval) {
TW_PRINTK(host, TW_DRIVER, 0x18, "Failed to set dma mask");
retval = -ENODEV;
@@ -1786,8 +1784,6 @@ static int __maybe_unused twl_resume(struct device *dev)
pci_try_set_mwi(pdev);
retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
- if (retval)
- retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (retval) {
TW_PRINTK(host, TW_DRIVER, 0x25, "Failed to set dma mask during resume");
retval = -ENODEV;
diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c
index 3ad3ebaca8e9..ad4972c0fc53 100644
--- a/drivers/scsi/53c700.c
+++ b/drivers/scsi/53c700.c
@@ -1507,7 +1507,6 @@ NCR_700_intr(int irq, void *dev_id)
struct scsi_cmnd *SCp = hostdata->cmd;
handled = 1;
- SCp = hostdata->cmd;
if(istat & SCSI_INT_PENDING) {
udelay(10);
diff --git a/drivers/scsi/a100u2w.c b/drivers/scsi/a100u2w.c
index 564ade03b530..d02eb5b213d0 100644
--- a/drivers/scsi/a100u2w.c
+++ b/drivers/scsi/a100u2w.c
@@ -904,13 +904,11 @@ static int inia100_build_scb(struct orc_host * host, struct orc_scb * scb, struc
/**
* inia100_queue_lck - queue command with host
* @cmd: Command block
- * @done: Completion function
*
* Called by the mid layer to queue a command. Process the command
* block, build the host specific scb structures and if there is room
* queue the command down to the controller
*/
-
static int inia100_queue_lck(struct scsi_cmnd *cmd)
{
struct orc_scb *scb;
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index 59f6b7b2a70a..b04d039da276 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -271,7 +271,7 @@ MODULE_PARM_DESC(msi, "IRQ handling."
" 0=PIC(default), 1=MSI, 2=MSI-X)");
module_param(startup_timeout, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(startup_timeout, "The duration of time in seconds to wait for"
- " adapter to have it's kernel up and\n"
+ " adapter to have its kernel up and\n"
"running. This is typically adjusted for large systems that do not"
" have a BIOS.");
module_param(aif_timeout, int, S_IRUGO|S_IWUSR);
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.c b/drivers/scsi/aic7xxx/aic79xx_osm.c
index 5d566d2b2997..928099163f0f 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.c
@@ -755,11 +755,7 @@ ahd_linux_biosparam(struct scsi_device *sdev, struct block_device *bdev,
static int
ahd_linux_abort(struct scsi_cmnd *cmd)
{
- int error;
-
- error = ahd_linux_queue_abort_cmd(cmd);
-
- return error;
+ return ahd_linux_queue_abort_cmd(cmd);
}
/*
diff --git a/drivers/scsi/atp870u.c b/drivers/scsi/atp870u.c
index dcd6fae65a88..7143418d690f 100644
--- a/drivers/scsi/atp870u.c
+++ b/drivers/scsi/atp870u.c
@@ -614,7 +614,6 @@ static irqreturn_t atp870u_intr_handle(int irq, void *dev_id)
/**
* atp870u_queuecommand_lck - Queue SCSI command
* @req_p: request block
- * @done: completion function
*
* Queue a command to the ATP queue. Called with the host lock held.
*/
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index 440ef32be048..e5aa982ffedc 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -732,9 +732,6 @@ bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad)
pci_set_master(pdev);
rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
- if (rc)
- rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
-
if (rc) {
rc = -ENODEV;
printk(KERN_ERR "dma_set_mask_and_coherent fail %p\n", pdev);
@@ -1560,9 +1557,6 @@ bfad_pci_slot_reset(struct pci_dev *pdev)
rc = dma_set_mask_and_coherent(&bfad->pcidev->dev, DMA_BIT_MASK(64));
if (rc)
- rc = dma_set_mask_and_coherent(&bfad->pcidev->dev,
- DMA_BIT_MASK(32));
- if (rc)
goto out_disable_device;
if (restart_bfa(bfad) == -1)
diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c
index c8b947c16069..f46989bd083c 100644
--- a/drivers/scsi/bfa/bfad_attr.c
+++ b/drivers/scsi/bfa/bfad_attr.c
@@ -981,7 +981,7 @@ const struct attribute_group *bfad_im_host_groups[] = {
NULL
};
-struct attribute *bfad_im_vport_attrs[] = {
+static struct attribute *bfad_im_vport_attrs[] = {
&dev_attr_serial_number.attr,
&dev_attr_model.attr,
&dev_attr_model_description.attr,
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index 71fa62bd3083..a826456c6075 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -82,7 +82,7 @@ static int bnx2fc_bind_pcidev(struct bnx2fc_hba *hba);
static void bnx2fc_unbind_pcidev(struct bnx2fc_hba *hba);
static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface,
struct device *parent, int npiv);
-static void bnx2fc_destroy_work(struct work_struct *work);
+static void bnx2fc_port_destroy(struct fcoe_port *port);
static struct bnx2fc_hba *bnx2fc_hba_lookup(struct net_device *phys_dev);
static struct bnx2fc_interface *bnx2fc_interface_lookup(struct net_device
@@ -508,7 +508,8 @@ static int bnx2fc_l2_rcv_thread(void *arg)
static void bnx2fc_recv_frame(struct sk_buff *skb)
{
- u32 fr_len;
+ u64 crc_err;
+ u32 fr_len, fr_crc;
struct fc_lport *lport;
struct fcoe_rcv_info *fr;
struct fc_stats *stats;
@@ -542,6 +543,11 @@ static void bnx2fc_recv_frame(struct sk_buff *skb)
skb_pull(skb, sizeof(struct fcoe_hdr));
fr_len = skb->len - sizeof(struct fcoe_crc_eof);
+ stats = per_cpu_ptr(lport->stats, get_cpu());
+ stats->RxFrames++;
+ stats->RxWords += fr_len / FCOE_WORD_TO_BYTE;
+ put_cpu();
+
fp = (struct fc_frame *)skb;
fc_frame_init(fp);
fr_dev(fp) = lport;
@@ -624,16 +630,15 @@ static void bnx2fc_recv_frame(struct sk_buff *skb)
return;
}
- stats = per_cpu_ptr(lport->stats, smp_processor_id());
- stats->RxFrames++;
- stats->RxWords += fr_len / FCOE_WORD_TO_BYTE;
+ fr_crc = le32_to_cpu(fr_crc(fp));
- if (le32_to_cpu(fr_crc(fp)) !=
- ~crc32(~0, skb->data, fr_len)) {
- if (stats->InvalidCRCCount < 5)
+ if (unlikely(fr_crc != ~crc32(~0, skb->data, fr_len))) {
+ stats = per_cpu_ptr(lport->stats, get_cpu());
+ crc_err = (stats->InvalidCRCCount++);
+ put_cpu();
+ if (crc_err < 5)
printk(KERN_WARNING PFX "dropping frame with "
"CRC error\n");
- stats->InvalidCRCCount++;
kfree_skb(skb);
return;
}
@@ -907,9 +912,6 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event,
__bnx2fc_destroy(interface);
}
mutex_unlock(&bnx2fc_dev_lock);
-
- /* Ensure ALL destroy work has been completed before return */
- flush_workqueue(bnx2fc_wq);
return;
default:
@@ -1215,8 +1217,8 @@ static int bnx2fc_vport_destroy(struct fc_vport *vport)
mutex_unlock(&n_port->lp_mutex);
bnx2fc_free_vport(interface->hba, port->lport);
bnx2fc_port_shutdown(port->lport);
+ bnx2fc_port_destroy(port);
bnx2fc_interface_put(interface);
- queue_work(bnx2fc_wq, &port->destroy_work);
return 0;
}
@@ -1525,7 +1527,6 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface,
port->lport = lport;
port->priv = interface;
port->get_netdev = bnx2fc_netdev;
- INIT_WORK(&port->destroy_work, bnx2fc_destroy_work);
/* Configure fcoe_port */
rc = bnx2fc_lport_config(lport);
@@ -1653,8 +1654,8 @@ static void __bnx2fc_destroy(struct bnx2fc_interface *interface)
bnx2fc_interface_cleanup(interface);
bnx2fc_stop(interface);
list_del(&interface->list);
+ bnx2fc_port_destroy(port);
bnx2fc_interface_put(interface);
- queue_work(bnx2fc_wq, &port->destroy_work);
}
/**
@@ -1694,15 +1695,12 @@ netdev_err:
return rc;
}
-static void bnx2fc_destroy_work(struct work_struct *work)
+static void bnx2fc_port_destroy(struct fcoe_port *port)
{
- struct fcoe_port *port;
struct fc_lport *lport;
- port = container_of(work, struct fcoe_port, destroy_work);
lport = port->lport;
-
- BNX2FC_HBA_DBG(lport, "Entered bnx2fc_destroy_work\n");
+ BNX2FC_HBA_DBG(lport, "Entered %s, destroying lport %p\n", __func__, lport);
bnx2fc_if_destroy(lport);
}
@@ -2556,9 +2554,6 @@ static void bnx2fc_ulp_exit(struct cnic_dev *dev)
__bnx2fc_destroy(interface);
mutex_unlock(&bnx2fc_dev_lock);
- /* Ensure ALL destroy work has been completed before return */
- flush_workqueue(bnx2fc_wq);
-
bnx2fc_ulp_stop(hba);
/* unregister cnic device */
if (test_and_clear_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic))
diff --git a/drivers/scsi/ch.c b/drivers/scsi/ch.c
index 27012908b586..908854869864 100644
--- a/drivers/scsi/ch.c
+++ b/drivers/scsi/ch.c
@@ -239,7 +239,7 @@ ch_read_element_status(scsi_changer *ch, u_int elem, char *data)
u_char *buffer;
int result;
- buffer = kmalloc(512, GFP_KERNEL | GFP_DMA);
+ buffer = kmalloc(512, GFP_KERNEL);
if(!buffer)
return -ENOMEM;
@@ -297,7 +297,7 @@ ch_readconfig(scsi_changer *ch)
int result,id,lun,i;
u_int elem;
- buffer = kzalloc(512, GFP_KERNEL | GFP_DMA);
+ buffer = kzalloc(512, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
@@ -783,7 +783,7 @@ static long ch_ioctl(struct file *file,
return -EINVAL;
elem = ch->firsts[cge.cge_type] + cge.cge_unit;
- buffer = kmalloc(512, GFP_KERNEL | GFP_DMA);
+ buffer = kmalloc(512, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
mutex_lock(&ch->lock);
@@ -877,7 +877,7 @@ static long ch_ioctl(struct file *file,
}
default:
- return scsi_ioctl(ch->device, NULL, file->f_mode, cmd, argp);
+ return scsi_ioctl(ch->device, file->f_mode, cmd, argp);
}
}
diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c
index 9b8796c9e634..c11916b8ae00 100644
--- a/drivers/scsi/dc395x.c
+++ b/drivers/scsi/dc395x.c
@@ -946,7 +946,6 @@ static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb,
* layer, invoke 'done' on completion
*
* @cmd: pointer to scsi command object
- * @done: function pointer to be invoked on completion
*
* Returns 1 if the adapter (host) is busy, else returns 0. One
* reason for an adapter to be busy is that the number
@@ -959,7 +958,7 @@ static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb,
* Locks: struct Scsi_Host::host_lock held on entry (with "irqsave")
* and is expected to be held on return.
*
- **/
+ */
static int dc395x_queue_command_lck(struct scsi_cmnd *cmd)
{
void (*done)(struct scsi_cmnd *) = scsi_done;
diff --git a/drivers/scsi/elx/efct/efct_driver.c b/drivers/scsi/elx/efct/efct_driver.c
index b2b61bc45f12..b08fc8839808 100644
--- a/drivers/scsi/elx/efct/efct_driver.c
+++ b/drivers/scsi/elx/efct/efct_driver.c
@@ -261,7 +261,7 @@ efct_firmware_write(struct efct *efct, const u8 *buf, size_t buf_len,
dma.size = FW_WRITE_BUFSIZE;
dma.virt = dma_alloc_coherent(&efct->pci->dev,
- dma.size, &dma.phys, GFP_DMA);
+ dma.size, &dma.phys, GFP_KERNEL);
if (!dma.virt)
return -ENOMEM;
@@ -541,13 +541,10 @@ efct_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, efct);
- if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
- dev_warn(&pdev->dev, "trying DMA_BIT_MASK(32)\n");
- if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
- dev_err(&pdev->dev, "setting DMA_BIT_MASK failed\n");
- rc = -1;
- goto dma_mask_out;
- }
+ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (rc) {
+ dev_err(&pdev->dev, "setting DMA_BIT_MASK failed\n");
+ goto dma_mask_out;
}
num_interrupts = efct_device_interrupts_required(efct);
diff --git a/drivers/scsi/elx/efct/efct_hw.c b/drivers/scsi/elx/efct/efct_hw.c
index ba8256b4c782..d4bb37960a3c 100644
--- a/drivers/scsi/elx/efct/efct_hw.c
+++ b/drivers/scsi/elx/efct/efct_hw.c
@@ -516,7 +516,7 @@ efct_hw_setup_io(struct efct_hw *hw)
dma = &hw->xfer_rdy;
dma->size = sizeof(struct fcp_txrdy) * hw->config.n_io;
dma->virt = dma_alloc_coherent(&efct->pci->dev,
- dma->size, &dma->phys, GFP_DMA);
+ dma->size, &dma->phys, GFP_KERNEL);
if (!dma->virt)
return -ENOMEM;
}
@@ -562,7 +562,7 @@ efct_hw_setup_io(struct efct_hw *hw)
sizeof(struct sli4_sge);
dma->virt = dma_alloc_coherent(&efct->pci->dev,
dma->size, &dma->phys,
- GFP_DMA);
+ GFP_KERNEL);
if (!dma->virt) {
efc_log_err(hw->os, "dma_alloc fail %d\n", i);
memset(&io->def_sgl, 0,
@@ -618,7 +618,7 @@ efct_hw_init_prereg_io(struct efct_hw *hw)
memset(&req, 0, sizeof(struct efc_dma));
req.size = 32 + sgls_per_request * 16;
req.virt = dma_alloc_coherent(&efct->pci->dev, req.size, &req.phys,
- GFP_DMA);
+ GFP_KERNEL);
if (!req.virt) {
kfree(sgls);
return -ENOMEM;
@@ -1063,7 +1063,7 @@ efct_hw_init(struct efct_hw *hw)
dma = &hw->loop_map;
dma->size = SLI4_MIN_LOOP_MAP_BYTES;
dma->virt = dma_alloc_coherent(&hw->os->pci->dev, dma->size, &dma->phys,
- GFP_DMA);
+ GFP_KERNEL);
if (!dma->virt)
return -EIO;
@@ -1192,7 +1192,7 @@ efct_hw_rx_buffer_alloc(struct efct_hw *hw, u32 rqindex, u32 count,
prq->dma.virt = dma_alloc_coherent(&efct->pci->dev,
prq->dma.size,
&prq->dma.phys,
- GFP_DMA);
+ GFP_KERNEL);
if (!prq->dma.virt) {
efc_log_err(hw->os, "DMA allocation failed\n");
kfree(rq_buf);
diff --git a/drivers/scsi/elx/efct/efct_io.c b/drivers/scsi/elx/efct/efct_io.c
index 71e21655916a..c3247b951a76 100644
--- a/drivers/scsi/elx/efct/efct_io.c
+++ b/drivers/scsi/elx/efct/efct_io.c
@@ -48,7 +48,7 @@ efct_io_pool_create(struct efct *efct, u32 num_sgl)
io->rspbuf.size = SCSI_RSP_BUF_LENGTH;
io->rspbuf.virt = dma_alloc_coherent(&efct->pci->dev,
io->rspbuf.size,
- &io->rspbuf.phys, GFP_DMA);
+ &io->rspbuf.phys, GFP_KERNEL);
if (!io->rspbuf.virt) {
efc_log_err(efct, "dma_alloc rspbuf failed\n");
efct_io_pool_free(io_pool);
diff --git a/drivers/scsi/elx/libefc/efc_cmds.c b/drivers/scsi/elx/libefc/efc_cmds.c
index f8665d48904a..da4ac8a4ce12 100644
--- a/drivers/scsi/elx/libefc/efc_cmds.c
+++ b/drivers/scsi/elx/libefc/efc_cmds.c
@@ -179,7 +179,7 @@ efc_nport_alloc_read_sparm64(struct efc *efc, struct efc_nport *nport)
nport->dma.size = EFC_SPARAM_DMA_SZ;
nport->dma.virt = dma_alloc_coherent(&efc->pci->dev,
nport->dma.size, &nport->dma.phys,
- GFP_DMA);
+ GFP_KERNEL);
if (!nport->dma.virt) {
efc_log_err(efc, "Failed to allocate DMA memory\n");
efc_nport_free_resources(nport, EFC_EVT_NPORT_ALLOC_FAIL, data);
@@ -466,7 +466,7 @@ efc_cmd_domain_alloc(struct efc *efc, struct efc_domain *domain, u32 fcf)
domain->dma.size = EFC_SPARAM_DMA_SZ;
domain->dma.virt = dma_alloc_coherent(&efc->pci->dev,
domain->dma.size,
- &domain->dma.phys, GFP_DMA);
+ &domain->dma.phys, GFP_KERNEL);
if (!domain->dma.virt) {
efc_log_err(efc, "Failed to allocate DMA memory\n");
return -EIO;
diff --git a/drivers/scsi/elx/libefc/efc_els.c b/drivers/scsi/elx/libefc/efc_els.c
index 24db0accb256..84bc81d7ce76 100644
--- a/drivers/scsi/elx/libefc/efc_els.c
+++ b/drivers/scsi/elx/libefc/efc_els.c
@@ -46,18 +46,14 @@ efc_els_io_alloc_size(struct efc_node *node, u32 reqlen, u32 rsplen)
efc = node->efc;
- spin_lock_irqsave(&node->els_ios_lock, flags);
-
if (!node->els_io_enabled) {
efc_log_err(efc, "els io alloc disabled\n");
- spin_unlock_irqrestore(&node->els_ios_lock, flags);
return NULL;
}
els = mempool_alloc(efc->els_io_pool, GFP_ATOMIC);
if (!els) {
atomic_add_return(1, &efc->els_io_alloc_failed_count);
- spin_unlock_irqrestore(&node->els_ios_lock, flags);
return NULL;
}
@@ -71,16 +67,15 @@ efc_els_io_alloc_size(struct efc_node *node, u32 reqlen, u32 rsplen)
/* now allocate DMA for request and response */
els->io.req.size = reqlen;
els->io.req.virt = dma_alloc_coherent(&efc->pci->dev, els->io.req.size,
- &els->io.req.phys, GFP_DMA);
+ &els->io.req.phys, GFP_KERNEL);
if (!els->io.req.virt) {
mempool_free(els, efc->els_io_pool);
- spin_unlock_irqrestore(&node->els_ios_lock, flags);
return NULL;
}
els->io.rsp.size = rsplen;
els->io.rsp.virt = dma_alloc_coherent(&efc->pci->dev, els->io.rsp.size,
- &els->io.rsp.phys, GFP_DMA);
+ &els->io.rsp.phys, GFP_KERNEL);
if (!els->io.rsp.virt) {
dma_free_coherent(&efc->pci->dev, els->io.req.size,
els->io.req.virt, els->io.req.phys);
@@ -94,10 +89,11 @@ efc_els_io_alloc_size(struct efc_node *node, u32 reqlen, u32 rsplen)
/* add els structure to ELS IO list */
INIT_LIST_HEAD(&els->list_entry);
+ spin_lock_irqsave(&node->els_ios_lock, flags);
list_add_tail(&els->list_entry, &node->els_ios_list);
+ spin_unlock_irqrestore(&node->els_ios_lock, flags);
}
- spin_unlock_irqrestore(&node->els_ios_lock, flags);
return els;
}
diff --git a/drivers/scsi/elx/libefc_sli/sli4.c b/drivers/scsi/elx/libefc_sli/sli4.c
index 907d67aeac23..3ea57bd6fb0a 100644
--- a/drivers/scsi/elx/libefc_sli/sli4.c
+++ b/drivers/scsi/elx/libefc_sli/sli4.c
@@ -445,7 +445,7 @@ sli_cmd_rq_create_v2(struct sli4 *sli4, u32 num_rqs,
dma->size = payload_size;
dma->virt = dma_alloc_coherent(&sli4->pci->dev, dma->size,
- &dma->phys, GFP_DMA);
+ &dma->phys, GFP_KERNEL);
if (!dma->virt)
return -EIO;
@@ -508,7 +508,7 @@ __sli_queue_init(struct sli4 *sli4, struct sli4_queue *q, u32 qtype,
q->dma.size = size * n_entries;
q->dma.virt = dma_alloc_coherent(&sli4->pci->dev, q->dma.size,
- &q->dma.phys, GFP_DMA);
+ &q->dma.phys, GFP_KERNEL);
if (!q->dma.virt) {
memset(&q->dma, 0, sizeof(struct efc_dma));
efc_log_err(sli4, "%s allocation failed\n", SLI4_QNAME[qtype]);
@@ -849,7 +849,7 @@ static int sli_cmd_cq_set_create(struct sli4 *sli4,
dma->size = payload_size;
dma->virt = dma_alloc_coherent(&sli4->pci->dev, dma->size,
- &dma->phys, GFP_DMA);
+ &dma->phys, GFP_KERNEL);
if (!dma->virt)
return -EIO;
@@ -4413,7 +4413,7 @@ sli_get_ctrl_attributes(struct sli4 *sli4)
psize = sizeof(struct sli4_rsp_cmn_get_cntl_addl_attributes);
data.size = psize;
data.virt = dma_alloc_coherent(&sli4->pci->dev, data.size,
- &data.phys, GFP_DMA);
+ &data.phys, GFP_KERNEL);
if (!data.virt) {
memset(&data, 0, sizeof(struct efc_dma));
efc_log_err(sli4, "Failed to allocate memory for GET_CNTL_ADDL_ATTR\n");
@@ -4653,7 +4653,7 @@ sli_setup(struct sli4 *sli4, void *os, struct pci_dev *pdev,
*/
sli4->bmbx.size = SLI4_BMBX_SIZE + sizeof(struct sli4_mcqe);
sli4->bmbx.virt = dma_alloc_coherent(&pdev->dev, sli4->bmbx.size,
- &sli4->bmbx.phys, GFP_DMA);
+ &sli4->bmbx.phys, GFP_KERNEL);
if (!sli4->bmbx.virt) {
memset(&sli4->bmbx, 0, sizeof(struct efc_dma));
efc_log_err(sli4, "bootstrap mailbox allocation failed\n");
@@ -4674,7 +4674,7 @@ sli_setup(struct sli4 *sli4, void *os, struct pci_dev *pdev,
sli4->vpd_data.virt = dma_alloc_coherent(&pdev->dev,
sli4->vpd_data.size,
&sli4->vpd_data.phys,
- GFP_DMA);
+ GFP_KERNEL);
if (!sli4->vpd_data.virt) {
memset(&sli4->vpd_data, 0, sizeof(struct efc_dma));
/* Note that failure isn't fatal in this specific case */
@@ -5070,7 +5070,7 @@ sli_cmd_post_hdr_templates(struct sli4 *sli4, void *buf, struct efc_dma *dma,
payload_dma->size = payload_size;
payload_dma->virt = dma_alloc_coherent(&sli4->pci->dev,
payload_dma->size,
- &payload_dma->phys, GFP_DMA);
+ &payload_dma->phys, GFP_KERNEL);
if (!payload_dma->virt) {
memset(payload_dma, 0, sizeof(struct efc_dma));
efc_log_err(sli4, "mbox payload memory allocation fail\n");
diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h
index 2213a91923a5..15a58c955516 100644
--- a/drivers/scsi/hisi_sas/hisi_sas.h
+++ b/drivers/scsi/hisi_sas/hisi_sas.h
@@ -8,7 +8,6 @@
#define _HISI_SAS_H_
#include <linux/acpi.h>
-#include <linux/async.h>
#include <linux/blk-mq.h>
#include <linux/blk-mq-pci.h>
#include <linux/clk.h>
@@ -134,6 +133,11 @@ struct hisi_sas_rst {
bool done;
};
+struct hisi_sas_internal_abort {
+ unsigned int flag;
+ unsigned int tag;
+};
+
#define HISI_SAS_RST_WORK_INIT(r, c) \
{ .hisi_hba = hisi_hba, \
.completion = &c, \
@@ -154,6 +158,7 @@ enum hisi_sas_bit_err_type {
enum hisi_sas_phy_event {
HISI_PHYE_PHY_UP = 0U,
HISI_PHYE_LINK_RESET,
+ HISI_PHYE_PHY_UP_PM,
HISI_PHYES_NUM,
};
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index f206c433de32..ebf5ec38891b 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -158,7 +158,7 @@ static void hisi_sas_slot_index_clear(struct hisi_hba *hisi_hba, int slot_idx)
{
void *bitmap = hisi_hba->slot_index_tags;
- clear_bit(slot_idx, bitmap);
+ __clear_bit(slot_idx, bitmap);
}
static void hisi_sas_slot_index_free(struct hisi_hba *hisi_hba, int slot_idx)
@@ -175,7 +175,7 @@ static void hisi_sas_slot_index_set(struct hisi_hba *hisi_hba, int slot_idx)
{
void *bitmap = hisi_hba->slot_index_tags;
- set_bit(slot_idx, bitmap);
+ __set_bit(slot_idx, bitmap);
}
static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba,
@@ -206,14 +206,6 @@ static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba,
return index;
}
-static void hisi_sas_slot_index_init(struct hisi_hba *hisi_hba)
-{
- int i;
-
- for (i = 0; i < hisi_hba->slot_index_count; ++i)
- hisi_sas_slot_index_clear(hisi_hba, i);
-}
-
void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
struct hisi_sas_slot *slot)
{
@@ -273,11 +265,11 @@ static void hisi_sas_task_prep_ata(struct hisi_hba *hisi_hba,
}
static void hisi_sas_task_prep_abort(struct hisi_hba *hisi_hba,
- struct hisi_sas_slot *slot,
- int device_id, int abort_flag, int tag_to_abort)
+ struct hisi_sas_internal_abort *abort,
+ struct hisi_sas_slot *slot, int device_id)
{
hisi_hba->hw->prep_abort(hisi_hba, slot,
- device_id, abort_flag, tag_to_abort);
+ device_id, abort->flag, abort->tag);
}
static void hisi_sas_dma_unmap(struct hisi_hba *hisi_hba,
@@ -403,95 +395,19 @@ err_out_dif_dma_unmap:
return rc;
}
-static int hisi_sas_task_prep(struct sas_task *task,
- struct hisi_sas_dq **dq_pointer,
- bool is_tmf, struct hisi_sas_tmf_task *tmf,
- int *pass)
+static
+void hisi_sas_task_deliver(struct hisi_hba *hisi_hba,
+ struct hisi_sas_slot *slot,
+ struct hisi_sas_dq *dq,
+ struct hisi_sas_device *sas_dev,
+ struct hisi_sas_internal_abort *abort)
{
- struct domain_device *device = task->dev;
- struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
- struct hisi_sas_device *sas_dev = device->lldd_dev;
- struct hisi_sas_port *port;
- struct hisi_sas_slot *slot;
- struct hisi_sas_cmd_hdr *cmd_hdr_base;
- struct asd_sas_port *sas_port = device->port;
- struct device *dev = hisi_hba->dev;
- int dlvry_queue_slot, dlvry_queue, rc, slot_idx;
- int n_elem = 0, n_elem_dif = 0, n_elem_req = 0;
- struct scsi_cmnd *scmd = NULL;
- struct hisi_sas_dq *dq;
+ struct hisi_sas_cmd_hdr *cmd_hdr_base;
+ int dlvry_queue_slot, dlvry_queue;
+ struct sas_task *task = slot->task;
unsigned long flags;
int wr_q_index;
- if (DEV_IS_GONE(sas_dev)) {
- if (sas_dev)
- dev_info(dev, "task prep: device %d not ready\n",
- sas_dev->device_id);
- else
- dev_info(dev, "task prep: device %016llx not ready\n",
- SAS_ADDR(device->sas_addr));
-
- return -ECOMM;
- }
-
- if (task->uldd_task) {
- struct ata_queued_cmd *qc;
-
- if (dev_is_sata(device)) {
- qc = task->uldd_task;
- scmd = qc->scsicmd;
- } else {
- scmd = task->uldd_task;
- }
- }
-
- if (scmd) {
- unsigned int dq_index;
- u32 blk_tag;
-
- blk_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd));
- dq_index = blk_mq_unique_tag_to_hwq(blk_tag);
- *dq_pointer = dq = &hisi_hba->dq[dq_index];
- } else {
- struct Scsi_Host *shost = hisi_hba->shost;
- struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
- int queue = qmap->mq_map[raw_smp_processor_id()];
-
- *dq_pointer = dq = &hisi_hba->dq[queue];
- }
-
- port = to_hisi_sas_port(sas_port);
- if (port && !port->port_attached) {
- dev_info(dev, "task prep: %s port%d not attach device\n",
- (dev_is_sata(device)) ?
- "SATA/STP" : "SAS",
- device->port->id);
-
- return -ECOMM;
- }
-
- rc = hisi_sas_dma_map(hisi_hba, task, &n_elem,
- &n_elem_req);
- if (rc < 0)
- goto prep_out;
-
- if (!sas_protocol_ata(task->task_proto)) {
- rc = hisi_sas_dif_dma_map(hisi_hba, &n_elem_dif, task);
- if (rc < 0)
- goto err_out_dma_unmap;
- }
-
- if (hisi_hba->hw->slot_index_alloc)
- rc = hisi_hba->hw->slot_index_alloc(hisi_hba, device);
- else
- rc = hisi_sas_slot_index_alloc(hisi_hba, scmd);
-
- if (rc < 0)
- goto err_out_dif_dma_unmap;
-
- slot_idx = rc;
- slot = &hisi_hba->slot_info[slot_idx];
-
spin_lock(&dq->lock);
wr_q_index = dq->wr_point;
dq->wr_point = (dq->wr_point + 1) % HISI_SAS_QUEUE_SLOTS;
@@ -505,16 +421,11 @@ static int hisi_sas_task_prep(struct sas_task *task,
dlvry_queue_slot = wr_q_index;
slot->device_id = sas_dev->device_id;
- slot->n_elem = n_elem;
- slot->n_elem_dif = n_elem_dif;
slot->dlvry_queue = dlvry_queue;
slot->dlvry_queue_slot = dlvry_queue_slot;
cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue];
slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot];
- slot->task = task;
- slot->port = port;
- slot->tmf = tmf;
- slot->is_internal = is_tmf;
+
task->lldd_task = slot;
memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
@@ -534,8 +445,14 @@ static int hisi_sas_task_prep(struct sas_task *task,
case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
hisi_sas_task_prep_ata(hisi_hba, slot);
break;
+ case SAS_PROTOCOL_NONE:
+ if (abort) {
+ hisi_sas_task_prep_abort(hisi_hba, abort, slot, sas_dev->device_id);
+ break;
+ }
+ fallthrough;
default:
- dev_err(dev, "task prep: unknown/unsupported proto (0x%x)\n",
+ dev_err(hisi_hba->dev, "task prep: unknown/unsupported proto (0x%x)\n",
task->task_proto);
break;
}
@@ -544,32 +461,27 @@ static int hisi_sas_task_prep(struct sas_task *task,
task->task_state_flags |= SAS_TASK_AT_INITIATOR;
spin_unlock_irqrestore(&task->task_state_lock, flags);
- ++(*pass);
WRITE_ONCE(slot->ready, 1);
- return 0;
-
-err_out_dif_dma_unmap:
- if (!sas_protocol_ata(task->task_proto))
- hisi_sas_dif_dma_unmap(hisi_hba, task, n_elem_dif);
-err_out_dma_unmap:
- hisi_sas_dma_unmap(hisi_hba, task, n_elem,
- n_elem_req);
-prep_out:
- dev_err(dev, "task prep: failed[%d]!\n", rc);
- return rc;
+ spin_lock(&dq->lock);
+ hisi_hba->hw->start_delivery(dq);
+ spin_unlock(&dq->lock);
}
static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags,
- bool is_tmf, struct hisi_sas_tmf_task *tmf)
+ struct hisi_sas_tmf_task *tmf)
{
- u32 rc;
- u32 pass = 0;
- struct hisi_hba *hisi_hba;
- struct device *dev;
+ int n_elem = 0, n_elem_dif = 0, n_elem_req = 0;
struct domain_device *device = task->dev;
struct asd_sas_port *sas_port = device->port;
+ struct hisi_sas_device *sas_dev = device->lldd_dev;
+ struct scsi_cmnd *scmd = NULL;
struct hisi_sas_dq *dq = NULL;
+ struct hisi_sas_port *port;
+ struct hisi_hba *hisi_hba;
+ struct hisi_sas_slot *slot;
+ struct device *dev;
+ int rc;
if (!sas_port) {
struct task_status_struct *ts = &task->task_status;
@@ -596,17 +508,94 @@ static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags,
up(&hisi_hba->sem);
}
- /* protect task_prep and start_delivery sequence */
- rc = hisi_sas_task_prep(task, &dq, is_tmf, tmf, &pass);
- if (rc)
- dev_err(dev, "task exec: failed[%d]!\n", rc);
+ if (DEV_IS_GONE(sas_dev)) {
+ if (sas_dev)
+ dev_info(dev, "task prep: device %d not ready\n",
+ sas_dev->device_id);
+ else
+ dev_info(dev, "task prep: device %016llx not ready\n",
+ SAS_ADDR(device->sas_addr));
+
+ return -ECOMM;
+ }
+
+ if (task->uldd_task) {
+ struct ata_queued_cmd *qc;
- if (likely(pass)) {
- spin_lock(&dq->lock);
- hisi_hba->hw->start_delivery(dq);
- spin_unlock(&dq->lock);
+ if (dev_is_sata(device)) {
+ qc = task->uldd_task;
+ scmd = qc->scsicmd;
+ } else {
+ scmd = task->uldd_task;
+ }
}
+ if (scmd) {
+ unsigned int dq_index;
+ u32 blk_tag;
+
+ blk_tag = blk_mq_unique_tag(scsi_cmd_to_rq(scmd));
+ dq_index = blk_mq_unique_tag_to_hwq(blk_tag);
+ dq = &hisi_hba->dq[dq_index];
+ } else {
+ struct Scsi_Host *shost = hisi_hba->shost;
+ struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
+ int queue = qmap->mq_map[raw_smp_processor_id()];
+
+ dq = &hisi_hba->dq[queue];
+ }
+
+ port = to_hisi_sas_port(sas_port);
+ if (port && !port->port_attached) {
+ dev_info(dev, "task prep: %s port%d not attach device\n",
+ (dev_is_sata(device)) ?
+ "SATA/STP" : "SAS",
+ device->port->id);
+
+ return -ECOMM;
+ }
+
+ rc = hisi_sas_dma_map(hisi_hba, task, &n_elem,
+ &n_elem_req);
+ if (rc < 0)
+ goto prep_out;
+
+ if (!sas_protocol_ata(task->task_proto)) {
+ rc = hisi_sas_dif_dma_map(hisi_hba, &n_elem_dif, task);
+ if (rc < 0)
+ goto err_out_dma_unmap;
+ }
+
+ if (hisi_hba->hw->slot_index_alloc)
+ rc = hisi_hba->hw->slot_index_alloc(hisi_hba, device);
+ else
+ rc = hisi_sas_slot_index_alloc(hisi_hba, scmd);
+
+ if (rc < 0)
+ goto err_out_dif_dma_unmap;
+
+ slot = &hisi_hba->slot_info[rc];
+ slot->n_elem = n_elem;
+ slot->n_elem_dif = n_elem_dif;
+ slot->task = task;
+ slot->port = port;
+
+ slot->tmf = tmf;
+ slot->is_internal = tmf;
+
+ /* protect task_prep and start_delivery sequence */
+ hisi_sas_task_deliver(hisi_hba, slot, dq, sas_dev, NULL);
+
+ return 0;
+
+err_out_dif_dma_unmap:
+ if (!sas_protocol_ata(task->task_proto))
+ hisi_sas_dif_dma_unmap(hisi_hba, task, n_elem_dif);
+err_out_dma_unmap:
+ hisi_sas_dma_unmap(hisi_hba, task, n_elem,
+ n_elem_req);
+prep_out:
+ dev_err(dev, "task exec: failed[%d]!\n", rc);
return rc;
}
@@ -619,12 +608,6 @@ static void hisi_sas_bytes_dmaed(struct hisi_hba *hisi_hba, int phy_no,
if (!phy->phy_attached)
return;
- if (test_bit(HISI_SAS_PM_BIT, &hisi_hba->flags) &&
- !sas_phy->suspended) {
- dev_warn(hisi_hba->dev, "phy%d during suspend filtered out\n", phy_no);
- return;
- }
-
sas_notify_phy_event(sas_phy, PHYE_OOB_DONE, gfp_flags);
if (sas_phy->phy) {
@@ -860,10 +843,11 @@ int hisi_sas_scan_finished(struct Scsi_Host *shost, unsigned long time)
}
EXPORT_SYMBOL_GPL(hisi_sas_scan_finished);
-static void hisi_sas_phyup_work(struct work_struct *work)
+static void hisi_sas_phyup_work_common(struct work_struct *work,
+ enum hisi_sas_phy_event event)
{
struct hisi_sas_phy *phy =
- container_of(work, typeof(*phy), works[HISI_PHYE_PHY_UP]);
+ container_of(work, typeof(*phy), works[event]);
struct hisi_hba *hisi_hba = phy->hisi_hba;
struct asd_sas_phy *sas_phy = &phy->sas_phy;
int phy_no = sas_phy->id;
@@ -874,6 +858,11 @@ static void hisi_sas_phyup_work(struct work_struct *work)
hisi_sas_bytes_dmaed(hisi_hba, phy_no, GFP_KERNEL);
}
+static void hisi_sas_phyup_work(struct work_struct *work)
+{
+ hisi_sas_phyup_work_common(work, HISI_PHYE_PHY_UP);
+}
+
static void hisi_sas_linkreset_work(struct work_struct *work)
{
struct hisi_sas_phy *phy =
@@ -883,9 +872,21 @@ static void hisi_sas_linkreset_work(struct work_struct *work)
hisi_sas_control_phy(sas_phy, PHY_FUNC_LINK_RESET, NULL);
}
+static void hisi_sas_phyup_pm_work(struct work_struct *work)
+{
+ struct hisi_sas_phy *phy =
+ container_of(work, typeof(*phy), works[HISI_PHYE_PHY_UP_PM]);
+ struct hisi_hba *hisi_hba = phy->hisi_hba;
+ struct device *dev = hisi_hba->dev;
+
+ hisi_sas_phyup_work_common(work, HISI_PHYE_PHY_UP_PM);
+ pm_runtime_put_sync(dev);
+}
+
static const work_func_t hisi_sas_phye_fns[HISI_PHYES_NUM] = {
[HISI_PHYE_PHY_UP] = hisi_sas_phyup_work,
[HISI_PHYE_LINK_RESET] = hisi_sas_linkreset_work,
+ [HISI_PHYE_PHY_UP_PM] = hisi_sas_phyup_pm_work,
};
bool hisi_sas_notify_phy_event(struct hisi_sas_phy *phy,
@@ -917,10 +918,14 @@ void hisi_sas_phy_oob_ready(struct hisi_hba *hisi_hba, int phy_no)
{
struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
struct device *dev = hisi_hba->dev;
+ unsigned long flags;
dev_dbg(dev, "phy%d OOB ready\n", phy_no);
- if (phy->phy_attached)
+ spin_lock_irqsave(&phy->lock, flags);
+ if (phy->phy_attached) {
+ spin_unlock_irqrestore(&phy->lock, flags);
return;
+ }
if (!timer_pending(&phy->timer)) {
if (phy->wait_phyup_cnt < HISI_SAS_WAIT_PHYUP_RETRIES) {
@@ -928,13 +933,17 @@ void hisi_sas_phy_oob_ready(struct hisi_hba *hisi_hba, int phy_no)
phy->timer.expires = jiffies +
HISI_SAS_WAIT_PHYUP_TIMEOUT;
add_timer(&phy->timer);
- } else {
- dev_warn(dev, "phy%d failed to come up %d times, giving up\n",
- phy_no, phy->wait_phyup_cnt);
- phy->wait_phyup_cnt = 0;
+ spin_unlock_irqrestore(&phy->lock, flags);
+ return;
}
+
+ dev_warn(dev, "phy%d failed to come up %d times, giving up\n",
+ phy_no, phy->wait_phyup_cnt);
+ phy->wait_phyup_cnt = 0;
}
+ spin_unlock_irqrestore(&phy->lock, flags);
}
+
EXPORT_SYMBOL_GPL(hisi_sas_phy_oob_ready);
static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no)
@@ -1105,7 +1114,7 @@ static void hisi_sas_dev_gone(struct domain_device *device)
static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags)
{
- return hisi_sas_task_exec(task, gfp_flags, 0, NULL);
+ return hisi_sas_task_exec(task, gfp_flags, NULL);
}
static int hisi_sas_phy_set_linkrate(struct hisi_hba *hisi_hba, int phy_no,
@@ -1156,6 +1165,7 @@ static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
u8 sts = phy->phy_attached;
int ret = 0;
+ down(&hisi_hba->sem);
phy->reset_completion = &completion;
switch (func) {
@@ -1199,6 +1209,7 @@ static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
out:
phy->reset_completion = NULL;
+ up(&hisi_hba->sem);
return ret;
}
@@ -1259,8 +1270,7 @@ static int hisi_sas_exec_internal_tmf_task(struct domain_device *device,
task->slow_task->timer.expires = jiffies + TASK_TIMEOUT;
add_timer(&task->slow_task->timer);
- res = hisi_sas_task_exec(task, GFP_KERNEL, 1, tmf);
-
+ res = hisi_sas_task_exec(task, GFP_KERNEL, tmf);
if (res) {
del_timer_sync(&task->slow_task->timer);
dev_err(dev, "abort tmf: executing internal task failed: %d\n",
@@ -1367,12 +1377,13 @@ static int hisi_sas_softreset_ata_disk(struct domain_device *device)
struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
struct device *dev = hisi_hba->dev;
int s = sizeof(struct host_to_dev_fis);
+ struct hisi_sas_tmf_task tmf = {};
ata_for_each_link(link, ap, EDGE) {
int pmp = sata_srst_pmp(link);
hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis);
- rc = hisi_sas_exec_internal_tmf_task(device, fis, s, NULL);
+ rc = hisi_sas_exec_internal_tmf_task(device, fis, s, &tmf);
if (rc != TMF_RESP_FUNC_COMPLETE)
break;
}
@@ -1383,7 +1394,7 @@ static int hisi_sas_softreset_ata_disk(struct domain_device *device)
hisi_sas_fill_ata_reset_cmd(link->device, 0, pmp, fis);
rc = hisi_sas_exec_internal_tmf_task(device, fis,
- s, NULL);
+ s, &tmf);
if (rc != TMF_RESP_FUNC_COMPLETE)
dev_err(dev, "ata disk %016llx de-reset failed\n",
SAS_ADDR(device->sas_addr));
@@ -1433,11 +1444,13 @@ static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba)
sas_port = device->port;
port = to_hisi_sas_port(sas_port);
+ spin_lock(&sas_port->phy_list_lock);
list_for_each_entry(sas_phy, &sas_port->phy_list, port_phy_el)
if (state & BIT(sas_phy->id)) {
phy = sas_phy->lldd_phy;
break;
}
+ spin_unlock(&sas_port->phy_list_lock);
if (phy) {
port->id = phy->port_id;
@@ -1510,26 +1523,24 @@ static void hisi_sas_send_ata_reset_each_phy(struct hisi_hba *hisi_hba,
struct device *dev = hisi_hba->dev;
int s = sizeof(struct host_to_dev_fis);
int rc = TMF_RESP_FUNC_FAILED;
- struct asd_sas_phy *sas_phy;
struct ata_link *link;
u8 fis[20] = {0};
- u32 state;
+ int i;
- state = hisi_hba->hw->get_phys_state(hisi_hba);
- list_for_each_entry(sas_phy, &sas_port->phy_list, port_phy_el) {
- if (!(state & BIT(sas_phy->id)))
+ for (i = 0; i < hisi_hba->n_phy; i++) {
+ if (!(sas_port->phy_mask & BIT(i)))
continue;
ata_for_each_link(link, ap, EDGE) {
int pmp = sata_srst_pmp(link);
- tmf_task.phy_id = sas_phy->id;
+ tmf_task.phy_id = i;
hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis);
rc = hisi_sas_exec_internal_tmf_task(device, fis, s,
&tmf_task);
if (rc != TMF_RESP_FUNC_COMPLETE) {
dev_err(dev, "phy%d ata reset failed rc=%d\n",
- sas_phy->id, rc);
+ i, rc);
break;
}
}
@@ -1581,7 +1592,6 @@ void hisi_sas_controller_reset_prepare(struct hisi_hba *hisi_hba)
{
struct Scsi_Host *shost = hisi_hba->shost;
- down(&hisi_hba->sem);
hisi_hba->phy_state = hisi_hba->hw->get_phys_state(hisi_hba);
scsi_block_requests(shost);
@@ -1606,9 +1616,9 @@ void hisi_sas_controller_reset_done(struct hisi_hba *hisi_hba)
if (hisi_hba->reject_stp_links_msk)
hisi_sas_terminate_stp_reject(hisi_hba);
hisi_sas_reset_init_all_devices(hisi_hba);
- up(&hisi_hba->sem);
scsi_unblock_requests(shost);
clear_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags);
+ up(&hisi_hba->sem);
hisi_sas_rescan_topology(hisi_hba, hisi_hba->phy_state);
}
@@ -1619,8 +1629,11 @@ static int hisi_sas_controller_prereset(struct hisi_hba *hisi_hba)
if (!hisi_hba->hw->soft_reset)
return -1;
- if (test_and_set_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags))
+ down(&hisi_hba->sem);
+ if (test_and_set_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags)) {
+ up(&hisi_hba->sem);
return -1;
+ }
if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct[0].itct)
hisi_hba->hw->debugfs_snapshot_regs(hisi_hba);
@@ -2021,19 +2034,17 @@ static int hisi_sas_query_task(struct sas_task *task)
static int
hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
- struct sas_task *task, int abort_flag,
- int task_tag, struct hisi_sas_dq *dq)
+ struct hisi_sas_internal_abort *abort,
+ struct sas_task *task,
+ struct hisi_sas_dq *dq)
{
struct domain_device *device = task->dev;
struct hisi_sas_device *sas_dev = device->lldd_dev;
struct device *dev = hisi_hba->dev;
struct hisi_sas_port *port;
- struct hisi_sas_slot *slot;
struct asd_sas_port *sas_port = device->port;
- struct hisi_sas_cmd_hdr *cmd_hdr_base;
- int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx;
- unsigned long flags;
- int wr_q_index;
+ struct hisi_sas_slot *slot;
+ int slot_idx;
if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags)))
return -EINVAL;
@@ -2044,59 +2055,24 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
port = to_hisi_sas_port(sas_port);
/* simply get a slot and send abort command */
- rc = hisi_sas_slot_index_alloc(hisi_hba, NULL);
- if (rc < 0)
+ slot_idx = hisi_sas_slot_index_alloc(hisi_hba, NULL);
+ if (slot_idx < 0)
goto err_out;
- slot_idx = rc;
slot = &hisi_hba->slot_info[slot_idx];
-
- spin_lock(&dq->lock);
- wr_q_index = dq->wr_point;
- dq->wr_point = (dq->wr_point + 1) % HISI_SAS_QUEUE_SLOTS;
- list_add_tail(&slot->delivery, &dq->list);
- spin_unlock(&dq->lock);
- spin_lock(&sas_dev->lock);
- list_add_tail(&slot->entry, &sas_dev->list);
- spin_unlock(&sas_dev->lock);
-
- dlvry_queue = dq->id;
- dlvry_queue_slot = wr_q_index;
-
- slot->device_id = sas_dev->device_id;
- slot->n_elem = n_elem;
- slot->dlvry_queue = dlvry_queue;
- slot->dlvry_queue_slot = dlvry_queue_slot;
- cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue];
- slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot];
+ slot->n_elem = 0;
slot->task = task;
slot->port = port;
slot->is_internal = true;
- task->lldd_task = slot;
- memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr));
- memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ);
- memset(hisi_sas_status_buf_addr_mem(slot), 0,
- sizeof(struct hisi_sas_err_record));
-
- hisi_sas_task_prep_abort(hisi_hba, slot, device_id,
- abort_flag, task_tag);
-
- spin_lock_irqsave(&task->task_state_lock, flags);
- task->task_state_flags |= SAS_TASK_AT_INITIATOR;
- spin_unlock_irqrestore(&task->task_state_lock, flags);
- WRITE_ONCE(slot->ready, 1);
- /* send abort command to the chip */
- spin_lock(&dq->lock);
- hisi_hba->hw->start_delivery(dq);
- spin_unlock(&dq->lock);
+ hisi_sas_task_deliver(hisi_hba, slot, dq, sas_dev, abort);
return 0;
err_out:
- dev_err(dev, "internal abort task prep: failed[%d]!\n", rc);
+ dev_err(dev, "internal abort task prep: failed[%d]!\n", slot_idx);
- return rc;
+ return slot_idx;
}
/**
@@ -2118,9 +2094,12 @@ _hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
{
struct sas_task *task;
struct hisi_sas_device *sas_dev = device->lldd_dev;
+ struct hisi_sas_internal_abort abort = {
+ .flag = abort_flag,
+ .tag = tag,
+ };
struct device *dev = hisi_hba->dev;
int res;
-
/*
* The interface is not realized means this HW don't support internal
* abort, or don't need to do internal abort. Then here, we return
@@ -2138,14 +2117,14 @@ _hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
return -ENOMEM;
task->dev = device;
- task->task_proto = device->tproto;
+ task->task_proto = SAS_PROTOCOL_NONE;
task->task_done = hisi_sas_task_done;
task->slow_task->timer.function = hisi_sas_tmf_timedout;
task->slow_task->timer.expires = jiffies + INTERNAL_ABORT_TIMEOUT;
add_timer(&task->slow_task->timer);
res = hisi_sas_internal_abort_task_exec(hisi_hba, sas_dev->device_id,
- task, abort_flag, tag, dq);
+ &abort, task, dq);
if (res) {
del_timer_sync(&task->slow_task->timer);
dev_err(dev, "internal task abort: executing internal task failed: %d\n",
@@ -2516,9 +2495,8 @@ int hisi_sas_alloc(struct hisi_hba *hisi_hba)
if (!hisi_hba->breakpoint)
goto err_out;
- hisi_hba->slot_index_count = max_command_entries;
- s = hisi_hba->slot_index_count / BITS_PER_BYTE;
- hisi_hba->slot_index_tags = devm_kzalloc(dev, s, GFP_KERNEL);
+ s = hisi_hba->slot_index_count = max_command_entries;
+ hisi_hba->slot_index_tags = devm_bitmap_zalloc(dev, s, GFP_KERNEL);
if (!hisi_hba->slot_index_tags)
goto err_out;
@@ -2536,7 +2514,6 @@ int hisi_sas_alloc(struct hisi_hba *hisi_hba)
if (!hisi_hba->sata_breakpoint)
goto err_out;
- hisi_sas_slot_index_init(hisi_hba);
hisi_hba->last_slot_index = HISI_SAS_UNRESERVED_IPTT;
hisi_hba->wq = create_singlethread_workqueue(dev_name(dev));
@@ -2687,9 +2664,6 @@ static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev,
goto err_out;
error = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
- if (error)
- error = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
-
if (error) {
dev_err(dev, "No usable DMA addressing method\n");
goto err_out;
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index 0ef6c21bf081..a01a3a7b706b 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -1484,7 +1484,6 @@ static irqreturn_t phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
struct asd_sas_phy *sas_phy = &phy->sas_phy;
struct device *dev = hisi_hba->dev;
- del_timer(&phy->timer);
hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_PHY_ENA_MSK, 1);
port_id = hisi_sas_read32(hisi_hba, PHY_PORT_NUM_MA);
@@ -1561,9 +1560,18 @@ static irqreturn_t phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
}
phy->port_id = port_id;
- phy->phy_attached = 1;
- hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP);
+
+ /* Call pm_runtime_put_sync() with pairs in hisi_sas_phyup_pm_work() */
+ pm_runtime_get_noresume(dev);
+ hisi_sas_notify_phy_event(phy, HISI_PHYE_PHY_UP_PM);
+
res = IRQ_HANDLED;
+
+ spin_lock(&phy->lock);
+ /* Delete timer and set phy_attached atomically */
+ del_timer(&phy->timer);
+ phy->phy_attached = 1;
+ spin_unlock(&phy->lock);
end:
if (phy->reset_completion)
complete(phy->reset_completion);
@@ -4687,8 +4695,6 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_out;
rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
- if (rc)
- rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (rc) {
dev_err(dev, "No usable DMA addressing method\n");
rc = -ENODEV;
@@ -4775,6 +4781,8 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
scsi_scan_host(shost);
+ pm_runtime_set_autosuspend_delay(dev, 5000);
+ pm_runtime_use_autosuspend(dev);
/*
* For the situation that there are ATA disks connected with SAS
* controller, it additionally creates ata_port which will affect the
@@ -4848,6 +4856,7 @@ static void hisi_sas_reset_prepare_v3_hw(struct pci_dev *pdev)
int rc;
dev_info(dev, "FLR prepare\n");
+ down(&hisi_hba->sem);
set_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags);
hisi_sas_controller_reset_prepare(hisi_hba);
@@ -4897,6 +4906,8 @@ static int _suspend_v3_hw(struct device *device)
if (test_and_set_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags))
return -1;
+ dev_warn(dev, "entering suspend state\n");
+
scsi_block_requests(shost);
set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags);
flush_workqueue(hisi_hba->wq);
@@ -4912,11 +4923,11 @@ static int _suspend_v3_hw(struct device *device)
hisi_sas_init_mem(hisi_hba);
- dev_warn(dev, "entering suspend state\n");
-
hisi_sas_release_tasks(hisi_hba);
sas_suspend_ha(sha);
+
+ dev_warn(dev, "end of suspending controller\n");
return 0;
}
@@ -4943,9 +4954,19 @@ static int _resume_v3_hw(struct device *device)
return rc;
}
phys_init_v3_hw(hisi_hba);
- sas_resume_ha(sha);
+
+ /*
+ * If a directly-attached disk is removed during suspend, a deadlock
+ * may occur, as the PHYE_RESUME_TIMEOUT processing will require the
+ * hisi_hba->device to be active, which can only happen when resume
+ * completes. So don't wait for the HA event workqueue to drain upon
+ * resume.
+ */
+ sas_resume_ha_no_sync(sha);
clear_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags);
+ dev_warn(dev, "end of resuming controller\n");
+
return 0;
}
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 8049b00b6766..f69b77cbf538 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -61,6 +61,7 @@ static void scsi_host_cls_release(struct device *dev)
static struct class shost_class = {
.name = "scsi_host",
.dev_release = scsi_host_cls_release,
+ .dev_groups = scsi_shost_groups,
};
/**
@@ -377,7 +378,7 @@ static struct device_type scsi_host_type = {
struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
{
struct Scsi_Host *shost;
- int index, i, j = 0;
+ int index;
shost = kzalloc(sizeof(struct Scsi_Host) + privsize, GFP_KERNEL);
if (!shost)
@@ -483,17 +484,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
shost->shost_dev.parent = &shost->shost_gendev;
shost->shost_dev.class = &shost_class;
dev_set_name(&shost->shost_dev, "host%d", shost->host_no);
- shost->shost_dev.groups = shost->shost_dev_attr_groups;
- shost->shost_dev_attr_groups[j++] = &scsi_shost_attr_group;
- if (sht->shost_groups) {
- for (i = 0; sht->shost_groups[i] &&
- j < ARRAY_SIZE(shost->shost_dev_attr_groups);
- i++, j++) {
- shost->shost_dev_attr_groups[j] =
- sht->shost_groups[i];
- }
- }
- WARN_ON_ONCE(j >= ARRAY_SIZE(shost->shost_dev_attr_groups));
+ shost->shost_dev.groups = sht->shost_groups;
shost->ehandler = kthread_run(scsi_error_handler, shost,
"scsi_eh_%d", shost->host_no);
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index cdf3328cc065..a47bcce3c9c7 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -4354,7 +4354,6 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h)
int i, ndevs_to_allocate;
int raid_ctlr_position;
bool physical_device;
- DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS);
currentsd = kcalloc(HPSA_MAX_DEVICES, sizeof(*currentsd), GFP_KERNEL);
physdev_list = kzalloc(sizeof(*physdev_list), GFP_KERNEL);
@@ -4368,7 +4367,6 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h)
dev_err(&h->pdev->dev, "out of memory\n");
goto out;
}
- memset(lunzerobits, 0, sizeof(lunzerobits));
h->drv_req_rescan = 0; /* cancel scheduled rescan - we're doing it. */
diff --git a/drivers/scsi/initio.c b/drivers/scsi/initio.c
index fd6da96bc51a..5f96ac47d7fd 100644
--- a/drivers/scsi/initio.c
+++ b/drivers/scsi/initio.c
@@ -2602,13 +2602,11 @@ static void initio_build_scb(struct initio_host * host, struct scsi_ctrl_blk * c
/**
* i91u_queuecommand_lck - Queue a new command if possible
* @cmd: SCSI command block from the mid layer
- * @done: Completion handler
*
* Attempts to queue a new command with the host adapter. Will return
* zero if successful or indicate a host busy condition if not (which
* will cause the mid layer to call us again later with the command)
*/
-
static int i91u_queuecommand_lck(struct scsi_cmnd *cmd)
{
struct initio_host *host = (struct initio_host *) cmd->device->host->hostdata;
@@ -2849,7 +2847,8 @@ static int initio_probe_one(struct pci_dev *pdev,
for (; num_scb >= MAX_TARGETS + 3; num_scb--) {
i = num_scb * sizeof(struct scsi_ctrl_blk);
- if ((scb = kzalloc(i, GFP_DMA)) != NULL)
+ scb = kzalloc(i, GFP_KERNEL);
+ if (scb)
break;
}
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
index 12e1e36d7c04..758213694091 100644
--- a/drivers/scsi/libsas/sas_discover.c
+++ b/drivers/scsi/libsas/sas_discover.c
@@ -8,7 +8,6 @@
#include <linux/scatterlist.h>
#include <linux/slab.h>
-#include <linux/async.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_eh.h>
#include "sas_internal.h"
diff --git a/drivers/scsi/libsas/sas_event.c b/drivers/scsi/libsas/sas_event.c
index f703115e7a25..3613b9b315bc 100644
--- a/drivers/scsi/libsas/sas_event.c
+++ b/drivers/scsi/libsas/sas_event.c
@@ -41,12 +41,25 @@ static int sas_queue_event(int event, struct sas_work *work,
return rc;
}
-
-void __sas_drain_work(struct sas_ha_struct *ha)
+void sas_queue_deferred_work(struct sas_ha_struct *ha)
{
struct sas_work *sw, *_sw;
int ret;
+ spin_lock_irq(&ha->lock);
+ list_for_each_entry_safe(sw, _sw, &ha->defer_q, drain_node) {
+ list_del_init(&sw->drain_node);
+ ret = sas_queue_work(ha, sw);
+ if (ret != 1) {
+ pm_runtime_put(ha->dev);
+ sas_free_event(to_asd_sas_event(&sw->work));
+ }
+ }
+ spin_unlock_irq(&ha->lock);
+}
+
+void __sas_drain_work(struct sas_ha_struct *ha)
+{
set_bit(SAS_HA_DRAINING, &ha->state);
/* flush submitters */
spin_lock_irq(&ha->lock);
@@ -55,16 +68,8 @@ void __sas_drain_work(struct sas_ha_struct *ha)
drain_workqueue(ha->event_q);
drain_workqueue(ha->disco_q);
- spin_lock_irq(&ha->lock);
clear_bit(SAS_HA_DRAINING, &ha->state);
- list_for_each_entry_safe(sw, _sw, &ha->defer_q, drain_node) {
- list_del_init(&sw->drain_node);
- ret = sas_queue_work(ha, sw);
- if (ret != 1)
- sas_free_event(to_asd_sas_event(&sw->work));
-
- }
- spin_unlock_irq(&ha->lock);
+ sas_queue_deferred_work(ha);
}
int sas_drain_work(struct sas_ha_struct *ha)
@@ -104,11 +109,15 @@ void sas_enable_revalidation(struct sas_ha_struct *ha)
if (!test_and_clear_bit(ev, &d->pending))
continue;
- if (list_empty(&port->phy_list))
+ spin_lock(&port->phy_list_lock);
+ if (list_empty(&port->phy_list)) {
+ spin_unlock(&port->phy_list_lock);
continue;
+ }
sas_phy = container_of(port->phy_list.next, struct asd_sas_phy,
port_phy_el);
+ spin_unlock(&port->phy_list_lock);
sas_notify_port_event(sas_phy,
PORTE_BROADCAST_RCVD, GFP_KERNEL);
}
@@ -119,19 +128,43 @@ void sas_enable_revalidation(struct sas_ha_struct *ha)
static void sas_port_event_worker(struct work_struct *work)
{
struct asd_sas_event *ev = to_asd_sas_event(work);
+ struct asd_sas_phy *phy = ev->phy;
+ struct sas_ha_struct *ha = phy->ha;
sas_port_event_fns[ev->event](work);
+ pm_runtime_put(ha->dev);
sas_free_event(ev);
}
static void sas_phy_event_worker(struct work_struct *work)
{
struct asd_sas_event *ev = to_asd_sas_event(work);
+ struct asd_sas_phy *phy = ev->phy;
+ struct sas_ha_struct *ha = phy->ha;
sas_phy_event_fns[ev->event](work);
+ pm_runtime_put(ha->dev);
sas_free_event(ev);
}
+/* defer works of new phys during suspend */
+static bool sas_defer_event(struct asd_sas_phy *phy, struct asd_sas_event *ev)
+{
+ struct sas_ha_struct *ha = phy->ha;
+ unsigned long flags;
+ bool deferred = false;
+
+ spin_lock_irqsave(&ha->lock, flags);
+ if (test_bit(SAS_HA_RESUMING, &ha->state) && !phy->suspended) {
+ struct sas_work *sw = &ev->work;
+
+ list_add_tail(&sw->drain_node, &ha->defer_q);
+ deferred = true;
+ }
+ spin_unlock_irqrestore(&ha->lock, flags);
+ return deferred;
+}
+
int sas_notify_port_event(struct asd_sas_phy *phy, enum port_event event,
gfp_t gfp_flags)
{
@@ -145,11 +178,19 @@ int sas_notify_port_event(struct asd_sas_phy *phy, enum port_event event,
if (!ev)
return -ENOMEM;
+ /* Call pm_runtime_put() with pairs in sas_port_event_worker() */
+ pm_runtime_get_noresume(ha->dev);
+
INIT_SAS_EVENT(ev, sas_port_event_worker, phy, event);
+ if (sas_defer_event(phy, ev))
+ return 0;
+
ret = sas_queue_event(event, &ev->work, ha);
- if (ret != 1)
+ if (ret != 1) {
+ pm_runtime_put(ha->dev);
sas_free_event(ev);
+ }
return ret;
}
@@ -168,11 +209,19 @@ int sas_notify_phy_event(struct asd_sas_phy *phy, enum phy_event event,
if (!ev)
return -ENOMEM;
+ /* Call pm_runtime_put() with pairs in sas_phy_event_worker() */
+ pm_runtime_get_noresume(ha->dev);
+
INIT_SAS_EVENT(ev, sas_phy_event_worker, phy, event);
+ if (sas_defer_event(phy, ev))
+ return 0;
+
ret = sas_queue_event(event, &ev->work, ha);
- if (ret != 1)
+ if (ret != 1) {
+ pm_runtime_put(ha->dev);
sas_free_event(ev);
+ }
return ret;
}
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index c2150a818423..6abce9dfc17b 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -58,7 +58,9 @@ static int smp_execute_task_sg(struct domain_device *dev,
struct sas_task *task = NULL;
struct sas_internal *i =
to_sas_internal(dev->port->ha->core.shost->transportt);
+ struct sas_ha_struct *ha = dev->port->ha;
+ pm_runtime_get_sync(ha->dev);
mutex_lock(&dev->ex_dev.cmd_mutex);
for (retry = 0; retry < 3; retry++) {
if (test_bit(SAS_DEV_GONE, &dev->state)) {
@@ -131,6 +133,7 @@ static int smp_execute_task_sg(struct domain_device *dev,
}
}
mutex_unlock(&dev->ex_dev.cmd_mutex);
+ pm_runtime_put_sync(ha->dev);
BUG_ON(retry == 3 && task != NULL);
sas_free_task(task);
diff --git a/drivers/scsi/libsas/sas_init.c b/drivers/scsi/libsas/sas_init.c
index b640e09af6a4..dc35f0f8eae3 100644
--- a/drivers/scsi/libsas/sas_init.c
+++ b/drivers/scsi/libsas/sas_init.c
@@ -362,6 +362,7 @@ void sas_prep_resume_ha(struct sas_ha_struct *ha)
int i;
set_bit(SAS_HA_REGISTERED, &ha->state);
+ set_bit(SAS_HA_RESUMING, &ha->state);
/* clear out any stale link events/data from the suspension path */
for (i = 0; i < ha->num_phys; i++) {
@@ -387,7 +388,31 @@ static int phys_suspended(struct sas_ha_struct *ha)
return rc;
}
-void sas_resume_ha(struct sas_ha_struct *ha)
+static void sas_resume_insert_broadcast_ha(struct sas_ha_struct *ha)
+{
+ int i;
+
+ for (i = 0; i < ha->num_phys; i++) {
+ struct asd_sas_port *port = ha->sas_port[i];
+ struct domain_device *dev = port->port_dev;
+
+ if (dev && dev_is_expander(dev->dev_type)) {
+ struct asd_sas_phy *first_phy;
+
+ spin_lock(&port->phy_list_lock);
+ first_phy = list_first_entry_or_null(
+ &port->phy_list, struct asd_sas_phy,
+ port_phy_el);
+ spin_unlock(&port->phy_list_lock);
+
+ if (first_phy)
+ sas_notify_port_event(first_phy,
+ PORTE_BROADCAST_RCVD, GFP_KERNEL);
+ }
+ }
+}
+
+static void _sas_resume_ha(struct sas_ha_struct *ha, bool drain)
{
const unsigned long tmo = msecs_to_jiffies(25000);
int i;
@@ -417,10 +442,30 @@ void sas_resume_ha(struct sas_ha_struct *ha)
* flush out disks that did not return
*/
scsi_unblock_requests(ha->core.shost);
- sas_drain_work(ha);
+ if (drain)
+ sas_drain_work(ha);
+ clear_bit(SAS_HA_RESUMING, &ha->state);
+
+ sas_queue_deferred_work(ha);
+ /* send event PORTE_BROADCAST_RCVD to identify some new inserted
+ * disks for expander
+ */
+ sas_resume_insert_broadcast_ha(ha);
+}
+
+void sas_resume_ha(struct sas_ha_struct *ha)
+{
+ _sas_resume_ha(ha, true);
}
EXPORT_SYMBOL(sas_resume_ha);
+/* A no-sync variant, which does not call sas_drain_ha(). */
+void sas_resume_ha_no_sync(struct sas_ha_struct *ha)
+{
+ _sas_resume_ha(ha, false);
+}
+EXPORT_SYMBOL(sas_resume_ha_no_sync);
+
void sas_suspend_ha(struct sas_ha_struct *ha)
{
int i;
diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h
index d7a1fb5c10c6..acd515c01861 100644
--- a/drivers/scsi/libsas/sas_internal.h
+++ b/drivers/scsi/libsas/sas_internal.h
@@ -14,6 +14,7 @@
#include <scsi/scsi_transport_sas.h>
#include <scsi/libsas.h>
#include <scsi/sas_ata.h>
+#include <linux/pm_runtime.h>
#ifdef pr_fmt
#undef pr_fmt
@@ -56,6 +57,7 @@ void sas_unregister_ports(struct sas_ha_struct *sas_ha);
void sas_disable_revalidation(struct sas_ha_struct *ha);
void sas_enable_revalidation(struct sas_ha_struct *ha);
+void sas_queue_deferred_work(struct sas_ha_struct *ha);
void __sas_drain_work(struct sas_ha_struct *ha);
void sas_deform_port(struct asd_sas_phy *phy, int gone);
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index d337fdf1b9ca..fb19e739a39c 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -37,7 +37,8 @@
static void sas_end_task(struct scsi_cmnd *sc, struct sas_task *task)
{
struct task_status_struct *ts = &task->task_status;
- int hs = 0, stat = 0;
+ enum scsi_host_status hs = DID_OK;
+ enum exec_status stat = SAS_SAM_STAT_GOOD;
if (ts->resp == SAS_TASK_UNDELIVERED) {
/* transport error */
@@ -82,10 +83,10 @@ static void sas_end_task(struct scsi_cmnd *sc, struct sas_task *task)
case SAS_ABORTED_TASK:
hs = DID_ABORT;
break;
- case SAM_STAT_CHECK_CONDITION:
+ case SAS_SAM_STAT_CHECK_CONDITION:
memcpy(sc->sense_buffer, ts->buf,
min(SCSI_SENSE_BUFFERSIZE, ts->buf_valid_size));
- stat = SAM_STAT_CHECK_CONDITION;
+ stat = SAS_SAM_STAT_CHECK_CONDITION;
break;
default:
stat = ts->stat;
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 2f8e6d0a926f..98cabe09c040 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -496,52 +496,50 @@ struct lpfc_cgn_info {
__le32 cgn_alarm_hr[24];
__le32 cgn_alarm_day[LPFC_MAX_CGN_DAYS];
- /* Start of congestion statistics */
- uint8_t cgn_stat_npm; /* Notifications per minute */
-
- /* Start Time */
- uint8_t cgn_stat_month;
- uint8_t cgn_stat_day;
- uint8_t cgn_stat_year;
- uint8_t cgn_stat_hour;
- uint8_t cgn_stat_minute;
- uint8_t cgn_pad2[2];
-
- __le32 cgn_notification;
- __le32 cgn_peer_notification;
- __le32 link_integ_notification;
- __le32 delivery_notification;
-
- uint8_t cgn_stat_cgn_month; /* Last congestion notification FPIN */
- uint8_t cgn_stat_cgn_day;
- uint8_t cgn_stat_cgn_year;
- uint8_t cgn_stat_cgn_hour;
- uint8_t cgn_stat_cgn_min;
- uint8_t cgn_stat_cgn_sec;
-
- uint8_t cgn_stat_peer_month; /* Last peer congestion FPIN */
- uint8_t cgn_stat_peer_day;
- uint8_t cgn_stat_peer_year;
- uint8_t cgn_stat_peer_hour;
- uint8_t cgn_stat_peer_min;
- uint8_t cgn_stat_peer_sec;
-
- uint8_t cgn_stat_lnk_month; /* Last link integrity FPIN */
- uint8_t cgn_stat_lnk_day;
- uint8_t cgn_stat_lnk_year;
- uint8_t cgn_stat_lnk_hour;
- uint8_t cgn_stat_lnk_min;
- uint8_t cgn_stat_lnk_sec;
-
- uint8_t cgn_stat_del_month; /* Last delivery notification FPIN */
- uint8_t cgn_stat_del_day;
- uint8_t cgn_stat_del_year;
- uint8_t cgn_stat_del_hour;
- uint8_t cgn_stat_del_min;
- uint8_t cgn_stat_del_sec;
-#define LPFC_CGN_STAT_SIZE 48
-#define LPFC_CGN_DATA_SIZE (sizeof(struct lpfc_cgn_info) - \
- LPFC_CGN_STAT_SIZE - sizeof(uint32_t))
+ struct_group(cgn_stat,
+ uint8_t cgn_stat_npm; /* Notifications per minute */
+
+ /* Start Time */
+ uint8_t cgn_stat_month;
+ uint8_t cgn_stat_day;
+ uint8_t cgn_stat_year;
+ uint8_t cgn_stat_hour;
+ uint8_t cgn_stat_minute;
+ uint8_t cgn_pad2[2];
+
+ __le32 cgn_notification;
+ __le32 cgn_peer_notification;
+ __le32 link_integ_notification;
+ __le32 delivery_notification;
+
+ uint8_t cgn_stat_cgn_month; /* Last congestion notification FPIN */
+ uint8_t cgn_stat_cgn_day;
+ uint8_t cgn_stat_cgn_year;
+ uint8_t cgn_stat_cgn_hour;
+ uint8_t cgn_stat_cgn_min;
+ uint8_t cgn_stat_cgn_sec;
+
+ uint8_t cgn_stat_peer_month; /* Last peer congestion FPIN */
+ uint8_t cgn_stat_peer_day;
+ uint8_t cgn_stat_peer_year;
+ uint8_t cgn_stat_peer_hour;
+ uint8_t cgn_stat_peer_min;
+ uint8_t cgn_stat_peer_sec;
+
+ uint8_t cgn_stat_lnk_month; /* Last link integrity FPIN */
+ uint8_t cgn_stat_lnk_day;
+ uint8_t cgn_stat_lnk_year;
+ uint8_t cgn_stat_lnk_hour;
+ uint8_t cgn_stat_lnk_min;
+ uint8_t cgn_stat_lnk_sec;
+
+ uint8_t cgn_stat_del_month; /* Last delivery notification FPIN */
+ uint8_t cgn_stat_del_day;
+ uint8_t cgn_stat_del_year;
+ uint8_t cgn_stat_del_hour;
+ uint8_t cgn_stat_del_min;
+ uint8_t cgn_stat_del_sec;
+ );
__le32 cgn_info_crc;
#define LPFC_CGN_CRC32_MAGIC_NUMBER 0x1EDC6F41
@@ -594,6 +592,7 @@ struct lpfc_vport {
#define FC_VPORT_LOGO_RCVD 0x200 /* LOGO received on vport */
#define FC_RSCN_DISCOVERY 0x400 /* Auth all devices after RSCN */
#define FC_LOGO_RCVD_DID_CHNG 0x800 /* FDISC on phys port detect DID chng*/
+#define FC_PT2PT_NO_NVME 0x1000 /* Don't send NVME PRLI */
#define FC_SCSI_SCAN_TMO 0x4000 /* scsi scan timer running */
#define FC_ABORT_DISCOVERY 0x8000 /* we want to abort discovery */
#define FC_NDISC_ACTIVE 0x10000 /* NPort discovery active */
@@ -669,8 +668,6 @@ struct lpfc_vport {
struct timer_list els_tmofunc;
struct timer_list delayed_disc_tmo;
- int unreg_vpi_cmpl;
-
uint8_t load_flag;
#define FC_LOADING 0x1 /* HBA in process of loading drvr */
#define FC_UNLOADING 0x2 /* HBA in process of unloading drvr */
@@ -1023,7 +1020,6 @@ struct lpfc_hba {
#define HBA_DEVLOSS_TMO 0x2000 /* HBA in devloss timeout */
#define HBA_RRQ_ACTIVE 0x4000 /* process the rrq active list */
#define HBA_IOQ_FLUSH 0x8000 /* FCP/NVME I/O queues being flushed */
-#define HBA_FW_DUMP_OP 0x10000 /* Skips fn reset before FW dump */
#define HBA_RECOVERABLE_UE 0x20000 /* Firmware supports recoverable UE */
#define HBA_FORCED_LINK_SPEED 0x40000 /*
* Firmware supports Forced Link Speed
@@ -1031,7 +1027,7 @@ struct lpfc_hba {
*/
#define HBA_PCI_ERR 0x80000 /* The PCI slot is offline */
#define HBA_FLOGI_ISSUED 0x100000 /* FLOGI was issued */
-#define HBA_CGN_RSVD1 0x200000 /* Reserved CGN flag */
+#define HBA_SHORT_CMF 0x200000 /* shorter CMF timer routine */
#define HBA_CGN_DAY_WRAP 0x400000 /* HBA Congestion info day wraps */
#define HBA_DEFER_FLOGI 0x800000 /* Defer FLOGI till read_sparm cmpl */
#define HBA_SETUP 0x1000000 /* Signifies HBA setup is completed */
@@ -1040,6 +1036,7 @@ struct lpfc_hba {
#define HBA_HBEAT_TMO 0x8000000 /* HBEAT initiated after timeout */
#define HBA_FLOGI_OUTSTANDING 0x10000000 /* FLOGI is outstanding */
+ struct completion *fw_dump_cmpl; /* cmpl event tracker for fw_dump */
uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/
struct lpfc_dmabuf slim2p;
@@ -1165,6 +1162,16 @@ struct lpfc_hba {
uint32_t cfg_hostmem_hgp;
uint32_t cfg_log_verbose;
uint32_t cfg_enable_fc4_type;
+#define LPFC_ENABLE_FCP 1
+#define LPFC_ENABLE_NVME 2
+#define LPFC_ENABLE_BOTH 3
+#if (IS_ENABLED(CONFIG_NVME_FC))
+#define LPFC_MAX_ENBL_FC4_TYPE LPFC_ENABLE_BOTH
+#define LPFC_DEF_ENBL_FC4_TYPE LPFC_ENABLE_BOTH
+#else
+#define LPFC_MAX_ENBL_FC4_TYPE LPFC_ENABLE_FCP
+#define LPFC_DEF_ENBL_FC4_TYPE LPFC_ENABLE_FCP
+#endif
uint32_t cfg_aer_support;
uint32_t cfg_sriov_nr_virtfn;
uint32_t cfg_request_firmware_upgrade;
@@ -1186,9 +1193,6 @@ struct lpfc_hba {
uint32_t cfg_ras_fwlog_func;
uint32_t cfg_enable_bbcr; /* Enable BB Credit Recovery */
uint32_t cfg_enable_dpp; /* Enable Direct Packet Push */
-#define LPFC_ENABLE_FCP 1
-#define LPFC_ENABLE_NVME 2
-#define LPFC_ENABLE_BOTH 3
uint32_t cfg_enable_pbde;
uint32_t cfg_enable_mi;
struct nvmet_fc_target_port *targetport;
@@ -1604,6 +1608,7 @@ struct lpfc_hba {
#define LPFC_MAX_RXMONITOR_ENTRY 800
#define LPFC_MAX_RXMONITOR_DUMP 32
struct rxtable_entry {
+ uint64_t cmf_bytes; /* Total no of read bytes for CMF_SYNC_WQE */
uint64_t total_bytes; /* Total no of read bytes requested */
uint64_t rcv_bytes; /* Total no of read bytes completed */
uint64_t avg_io_size;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index dd4c51b6ef4e..fa8415259cb8 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -1315,6 +1315,9 @@ lpfc_issue_lip(struct Scsi_Host *shost)
pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK;
pmboxq->u.mb.mbxOwner = OWN_HOST;
+ if ((vport->fc_flag & FC_PT2PT) && (vport->fc_flag & FC_PT2PT_NO_NVME))
+ vport->fc_flag &= ~FC_PT2PT_NO_NVME;
+
mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO * 2);
if ((mbxstatus == MBX_SUCCESS) &&
@@ -1709,25 +1712,25 @@ lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
before_fc_flag = phba->pport->fc_flag;
sriov_nr_virtfn = phba->cfg_sriov_nr_virtfn;
- /* Disable SR-IOV virtual functions if enabled */
- if (phba->cfg_sriov_nr_virtfn) {
- pci_disable_sriov(pdev);
- phba->cfg_sriov_nr_virtfn = 0;
- }
+ if (opcode == LPFC_FW_DUMP) {
+ init_completion(&online_compl);
+ phba->fw_dump_cmpl = &online_compl;
+ } else {
+ /* Disable SR-IOV virtual functions if enabled */
+ if (phba->cfg_sriov_nr_virtfn) {
+ pci_disable_sriov(pdev);
+ phba->cfg_sriov_nr_virtfn = 0;
+ }
- if (opcode == LPFC_FW_DUMP)
- phba->hba_flag |= HBA_FW_DUMP_OP;
+ status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
- status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
+ if (status != 0)
+ return status;
- if (status != 0) {
- phba->hba_flag &= ~HBA_FW_DUMP_OP;
- return status;
+ /* wait for the device to be quiesced before firmware reset */
+ msleep(100);
}
- /* wait for the device to be quiesced before firmware reset */
- msleep(100);
-
reg_val = readl(phba->sli4_hba.conf_regs_memmap_p +
LPFC_CTL_PDEV_CTL_OFFSET);
@@ -1756,24 +1759,42 @@ lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"3153 Fail to perform the requested "
"access: x%x\n", reg_val);
+ if (phba->fw_dump_cmpl)
+ phba->fw_dump_cmpl = NULL;
return rc;
}
/* keep the original port state */
- if (before_fc_flag & FC_OFFLINE_MODE)
- goto out;
-
- init_completion(&online_compl);
- job_posted = lpfc_workq_post_event(phba, &status, &online_compl,
- LPFC_EVT_ONLINE);
- if (!job_posted)
+ if (before_fc_flag & FC_OFFLINE_MODE) {
+ if (phba->fw_dump_cmpl)
+ phba->fw_dump_cmpl = NULL;
goto out;
+ }
- wait_for_completion(&online_compl);
+ /* Firmware dump will trigger an HA_ERATT event, and
+ * lpfc_handle_eratt_s4 routine already handles bringing the port back
+ * online.
+ */
+ if (opcode == LPFC_FW_DUMP) {
+ wait_for_completion(phba->fw_dump_cmpl);
+ } else {
+ init_completion(&online_compl);
+ job_posted = lpfc_workq_post_event(phba, &status, &online_compl,
+ LPFC_EVT_ONLINE);
+ if (!job_posted)
+ goto out;
+ wait_for_completion(&online_compl);
+ }
out:
/* in any case, restore the virtual functions enabled as before */
if (sriov_nr_virtfn) {
+ /* If fw_dump was performed, first disable to clean up */
+ if (opcode == LPFC_FW_DUMP) {
+ pci_disable_sriov(pdev);
+ phba->cfg_sriov_nr_virtfn = 0;
+ }
+
sriov_err =
lpfc_sli_probe_sriov_nr_virtfn(phba, sriov_nr_virtfn);
if (!sriov_err)
@@ -3960,8 +3981,8 @@ LPFC_ATTR_R(nvmet_mrq_post,
* 3 - register both FCP and NVME
* Supported values are [1,3]. Default value is 3
*/
-LPFC_ATTR_R(enable_fc4_type, LPFC_ENABLE_BOTH,
- LPFC_ENABLE_FCP, LPFC_ENABLE_BOTH,
+LPFC_ATTR_R(enable_fc4_type, LPFC_DEF_ENBL_FC4_TYPE,
+ LPFC_ENABLE_FCP, LPFC_MAX_ENBL_FC4_TYPE,
"Enable FC4 Protocol support - FCP / NVME");
/*
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 08b2e85dcd7d..30fac2f6fb06 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -5484,7 +5484,7 @@ lpfc_cgn_buffer_read(struct file *file, char __user *buf, size_t nbytes,
if (len > (LPFC_CGN_BUF_SIZE - LPFC_DEBUG_OUT_LINE_SZ)) {
len += scnprintf(buffer + len, LPFC_CGN_BUF_SIZE - len,
"Truncated . . .\n");
- break;
+ goto out;
}
len += scnprintf(buffer + len, LPFC_CGN_BUF_SIZE - len,
"%03x: %08x %08x %08x %08x "
@@ -5495,6 +5495,17 @@ lpfc_cgn_buffer_read(struct file *file, char __user *buf, size_t nbytes,
cnt += 32;
ptr += 8;
}
+ if (len > (LPFC_CGN_BUF_SIZE - LPFC_DEBUG_OUT_LINE_SZ)) {
+ len += scnprintf(buffer + len, LPFC_CGN_BUF_SIZE - len,
+ "Truncated . . .\n");
+ goto out;
+ }
+ len += scnprintf(buffer + len, LPFC_CGN_BUF_SIZE - len,
+ "Parameter Data\n");
+ ptr = (uint32_t *)&phba->cgn_p;
+ len += scnprintf(buffer + len, LPFC_CGN_BUF_SIZE - len,
+ "%08x %08x %08x %08x\n",
+ *ptr, *(ptr + 1), *(ptr + 2), *(ptr + 3));
out:
return simple_read_from_buffer(buf, nbytes, ppos, buffer, len);
}
@@ -5561,22 +5572,24 @@ lpfc_rx_monitor_read(struct file *file, char __user *buf, size_t nbytes,
start = tail;
len += scnprintf(buffer + len, MAX_DEBUGFS_RX_TABLE_SIZE - len,
- " MaxBPI\t Total Data Cmd Total Data Cmpl "
- " Latency(us) Avg IO Size\tMax IO Size IO cnt "
- "Info BWutil(ms)\n");
+ " MaxBPI Tot_Data_CMF Tot_Data_Cmd "
+ "Tot_Data_Cmpl Lat(us) Avg_IO Max_IO "
+ "Bsy IO_cnt Info BWutil(ms)\n");
get_table:
for (i = start; i < last; i++) {
entry = &phba->rxtable[i];
len += scnprintf(buffer + len, MAX_DEBUGFS_RX_TABLE_SIZE - len,
- "%3d:%12lld %12lld\t%12lld\t"
- "%8lldus\t%8lld\t%10lld "
- "%8d %2d %2d(%2d)\n",
+ "%3d:%12lld %12lld %12lld %12lld "
+ "%7lldus %8lld %7lld "
+ "%2d %4d %2d %2d(%2d)\n",
i, entry->max_bytes_per_interval,
+ entry->cmf_bytes,
entry->total_bytes,
entry->rcv_bytes,
entry->avg_io_latency,
entry->avg_io_size,
entry->max_read_cnt,
+ entry->cmf_busy,
entry->io_cnt,
entry->cmf_info,
entry->timer_utilization,
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.h b/drivers/scsi/lpfc/lpfc_debugfs.h
index a5bf71b34972..6dd361c1fd31 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.h
+++ b/drivers/scsi/lpfc/lpfc_debugfs.h
@@ -282,7 +282,7 @@ struct lpfc_idiag {
void *ptr_private;
};
-#define MAX_DEBUGFS_RX_TABLE_SIZE (100 * LPFC_MAX_RXMONITOR_ENTRY)
+#define MAX_DEBUGFS_RX_TABLE_SIZE (128 * LPFC_MAX_RXMONITOR_ENTRY)
struct lpfc_rx_monitor_debug {
char *i_private;
char *buffer;
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index e83453bea2ae..f936833c9909 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1072,7 +1072,8 @@ stop_rr_fcf_flogi:
/* FLOGI failed, so there is no fabric */
spin_lock_irq(shost->host_lock);
- vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP);
+ vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP |
+ FC_PT2PT_NO_NVME);
spin_unlock_irq(shost->host_lock);
/* If private loop, then allow max outstanding els to be
@@ -3538,11 +3539,6 @@ lpfc_issue_els_rscn(struct lpfc_vport *vport, uint8_t retry)
return 1;
}
- /* This will cause the callback-function lpfc_cmpl_els_cmd to
- * trigger the release of node.
- */
- if (!(vport->fc_flag & FC_PT2PT))
- lpfc_nlp_put(ndlp);
return 0;
}
@@ -4612,6 +4608,23 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* Added for Vendor specifc support
* Just keep retrying for these Rsn / Exp codes
*/
+ if ((vport->fc_flag & FC_PT2PT) &&
+ cmd == ELS_CMD_NVMEPRLI) {
+ switch (stat.un.b.lsRjtRsnCode) {
+ case LSRJT_UNABLE_TPC:
+ case LSRJT_INVALID_CMD:
+ case LSRJT_LOGICAL_ERR:
+ case LSRJT_CMD_UNSUPPORTED:
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
+ "0168 NVME PRLI LS_RJT "
+ "reason %x port doesn't "
+ "support NVME, disabling NVME\n",
+ stat.un.b.lsRjtRsnCode);
+ retry = 0;
+ vport->fc_flag |= FC_PT2PT_NO_NVME;
+ goto out_retry;
+ }
+ }
switch (stat.un.b.lsRjtRsnCode) {
case LSRJT_UNABLE_TPC:
/* The driver has a VALID PLOGI but the rport has
@@ -6899,6 +6912,7 @@ static int
lpfc_get_rdp_info(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context)
{
LPFC_MBOXQ_t *mbox = NULL;
+ struct lpfc_dmabuf *mp;
int rc;
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
@@ -6914,8 +6928,11 @@ lpfc_get_rdp_info(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context)
mbox->mbox_cmpl = lpfc_mbx_cmpl_rdp_page_a0;
mbox->ctx_ndlp = (struct lpfc_rdp_context *)rdp_context;
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
- if (rc == MBX_NOT_FINISHED)
+ if (rc == MBX_NOT_FINISHED) {
+ mp = (struct lpfc_dmabuf *)mbox->ctx_buf;
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
goto issue_mbox_fail;
+ }
return 0;
@@ -10974,10 +10991,19 @@ lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_can_disctmo(vport);
}
+ if (ndlp->save_flags & NLP_WAIT_FOR_LOGO) {
+ /* Wake up lpfc_vport_delete if waiting...*/
+ if (ndlp->logo_waitq)
+ wake_up(ndlp->logo_waitq);
+ spin_lock_irq(&ndlp->lock);
+ ndlp->nlp_flag &= ~(NLP_ISSUE_LOGO | NLP_LOGO_SND);
+ ndlp->save_flags &= ~NLP_WAIT_FOR_LOGO;
+ spin_unlock_irq(&ndlp->lock);
+ }
+
/* Safe to release resources now. */
lpfc_els_free_iocb(phba, cmdiocb);
lpfc_nlp_put(ndlp);
- vport->unreg_vpi_cmpl = VPORT_ERROR;
}
/**
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 9fe6e5b386ce..816fc406135b 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -869,10 +869,16 @@ lpfc_work_done(struct lpfc_hba *phba)
if (phba->pci_dev_grp == LPFC_PCI_DEV_OC)
lpfc_sli4_post_async_mbox(phba);
- if (ha_copy & HA_ERATT)
+ if (ha_copy & HA_ERATT) {
/* Handle the error attention event */
lpfc_handle_eratt(phba);
+ if (phba->fw_dump_cmpl) {
+ complete(phba->fw_dump_cmpl);
+ phba->fw_dump_cmpl = NULL;
+ }
+ }
+
if (ha_copy & HA_MBATT)
lpfc_sli_handle_mb_event(phba);
@@ -3928,7 +3934,6 @@ lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
vport->vpi_state &= ~LPFC_VPI_REGISTERED;
vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
spin_unlock_irq(shost->host_lock);
- vport->unreg_vpi_cmpl = VPORT_OK;
mempool_free(pmb, phba->mbox_mem_pool);
lpfc_cleanup_vports_rrqs(vport, NULL);
/*
@@ -3958,7 +3963,6 @@ lpfc_mbx_unreg_vpi(struct lpfc_vport *vport)
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"1800 Could not issue unreg_vpi\n");
mempool_free(mbox, phba->mbox_mem_pool);
- vport->unreg_vpi_cmpl = VPORT_ERROR;
return rc;
}
return 0;
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 634f8fff7425..4461c3d6fc4f 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -3675,19 +3675,26 @@ union sli_var {
};
typedef struct {
+ struct_group_tagged(MAILBOX_word0, bits,
+ union {
+ struct {
#ifdef __BIG_ENDIAN_BITFIELD
- uint16_t mbxStatus;
- uint8_t mbxCommand;
- uint8_t mbxReserved:6;
- uint8_t mbxHc:1;
- uint8_t mbxOwner:1; /* Low order bit first word */
+ uint16_t mbxStatus;
+ uint8_t mbxCommand;
+ uint8_t mbxReserved:6;
+ uint8_t mbxHc:1;
+ uint8_t mbxOwner:1; /* Low order bit first word */
#else /* __LITTLE_ENDIAN_BITFIELD */
- uint8_t mbxOwner:1; /* Low order bit first word */
- uint8_t mbxHc:1;
- uint8_t mbxReserved:6;
- uint8_t mbxCommand;
- uint16_t mbxStatus;
+ uint8_t mbxOwner:1; /* Low order bit first word */
+ uint8_t mbxHc:1;
+ uint8_t mbxReserved:6;
+ uint8_t mbxCommand;
+ uint16_t mbxStatus;
#endif
+ };
+ u32 word0;
+ };
+ );
MAILVARIANTS un;
union sli_var us;
@@ -3746,7 +3753,7 @@ typedef struct {
#define IOERR_ILLEGAL_COMMAND 0x06
#define IOERR_XCHG_DROPPED 0x07
#define IOERR_ILLEGAL_FIELD 0x08
-#define IOERR_BAD_CONTINUE 0x09
+#define IOERR_RPI_SUSPENDED 0x09
#define IOERR_TOO_MANY_BUFFERS 0x0A
#define IOERR_RCV_BUFFER_WAITING 0x0B
#define IOERR_NO_CONNECTION 0x0C
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index ba17a8f740a9..558f7d2559c4 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -2104,7 +2104,7 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
}
if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) {
- lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"3143 Port Down: Firmware Update "
"Detected\n");
en_rn_msg = false;
@@ -5373,8 +5373,10 @@ lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
*/
if (!(phba->hba_flag & HBA_FCOE_MODE)) {
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
- if (rc == MBX_NOT_FINISHED)
+ if (rc == MBX_NOT_FINISHED) {
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
goto out_free_dmabuf;
+ }
return;
}
/*
@@ -5925,7 +5927,7 @@ lpfc_cmf_timer(struct hrtimer *timer)
uint32_t io_cnt;
uint32_t head, tail;
uint32_t busy, max_read;
- uint64_t total, rcv, lat, mbpi, extra;
+ uint64_t total, rcv, lat, mbpi, extra, cnt;
int timer_interval = LPFC_CMF_INTERVAL;
uint32_t ms;
struct lpfc_cgn_stat *cgs;
@@ -5996,20 +5998,28 @@ lpfc_cmf_timer(struct hrtimer *timer)
/* Calculate any extra bytes needed to account for the
* timer accuracy. If we are less than LPFC_CMF_INTERVAL
- * add an extra 3% slop factor, equal to LPFC_CMF_INTERVAL
- * add an extra 2%. The goal is to equalize total with a
- * time > LPFC_CMF_INTERVAL or <= LPFC_CMF_INTERVAL + 1
+ * calculate the adjustment needed for total to reflect
+ * a full LPFC_CMF_INTERVAL.
*/
- if (ms == LPFC_CMF_INTERVAL)
- extra = div_u64(total, 50);
- else if (ms < LPFC_CMF_INTERVAL)
- extra = div_u64(total, 33);
+ if (ms && ms < LPFC_CMF_INTERVAL) {
+ cnt = div_u64(total, ms); /* bytes per ms */
+ cnt *= LPFC_CMF_INTERVAL; /* what total should be */
+
+ /* If the timeout is scheduled to be shorter,
+ * this value may skew the data, so cap it at mbpi.
+ */
+ if ((phba->hba_flag & HBA_SHORT_CMF) && cnt > mbpi)
+ cnt = mbpi;
+
+ extra = cnt - total;
+ }
lpfc_issue_cmf_sync_wqe(phba, LPFC_CMF_INTERVAL, total + extra);
} else {
/* For Monitor mode or link down we want mbpi
* to be the full link speed
*/
mbpi = phba->cmf_link_byte_count;
+ extra = 0;
}
phba->cmf_timer_cnt++;
@@ -6040,6 +6050,7 @@ lpfc_cmf_timer(struct hrtimer *timer)
LPFC_RXMONITOR_TABLE_IN_USE);
entry = &phba->rxtable[head];
entry->total_bytes = total;
+ entry->cmf_bytes = total + extra;
entry->rcv_bytes = rcv;
entry->cmf_busy = busy;
entry->cmf_info = phba->cmf_active_info;
@@ -6082,6 +6093,8 @@ lpfc_cmf_timer(struct hrtimer *timer)
/* Each minute save Fabric and Driver congestion information */
lpfc_cgn_save_evt_cnt(phba);
+ phba->hba_flag &= ~HBA_SHORT_CMF;
+
/* Since we need to call lpfc_cgn_save_evt_cnt every minute, on the
* minute, adjust our next timer interval, if needed, to ensure a
* 1 minute granularity when we get the next timer interrupt.
@@ -6092,6 +6105,8 @@ lpfc_cmf_timer(struct hrtimer *timer)
jiffies);
if (timer_interval <= 0)
timer_interval = LPFC_CMF_INTERVAL;
+ else
+ phba->hba_flag |= HBA_SHORT_CMF;
/* If we adjust timer_interval, max_bytes_per_interval
* needs to be adjusted as well.
@@ -6337,8 +6352,10 @@ lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
}
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
- if (rc == MBX_NOT_FINISHED)
+ if (rc == MBX_NOT_FINISHED) {
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
goto out_free_dmabuf;
+ }
return;
out_free_dmabuf:
@@ -12709,7 +12726,7 @@ lpfc_irq_set_aff(struct lpfc_hba_eq_hdl *eqhdl, unsigned int cpu)
cpumask_clear(&eqhdl->aff_mask);
cpumask_set_cpu(cpu, &eqhdl->aff_mask);
irq_set_status_flags(eqhdl->irq, IRQ_NO_BALANCING);
- irq_set_affinity_hint(eqhdl->irq, &eqhdl->aff_mask);
+ irq_set_affinity(eqhdl->irq, &eqhdl->aff_mask);
}
/**
@@ -12998,7 +13015,6 @@ cfg_fail_out:
for (--index; index >= 0; index--) {
eqhdl = lpfc_get_eq_hdl(index);
lpfc_irq_clear_aff(eqhdl);
- irq_set_affinity_hint(eqhdl->irq, NULL);
free_irq(eqhdl->irq, eqhdl);
}
@@ -13159,7 +13175,6 @@ lpfc_sli4_disable_intr(struct lpfc_hba *phba)
for (index = 0; index < phba->cfg_irq_chann; index++) {
eqhdl = lpfc_get_eq_hdl(index);
lpfc_irq_clear_aff(eqhdl);
- irq_set_affinity_hint(eqhdl->irq, NULL);
free_irq(eqhdl->irq, eqhdl);
}
} else {
@@ -13466,7 +13481,7 @@ lpfc_init_congestion_buf(struct lpfc_hba *phba)
phba->cgn_evt_minute = 0;
phba->hba_flag &= ~HBA_CGN_DAY_WRAP;
- memset(cp, 0xff, LPFC_CGN_DATA_SIZE);
+ memset(cp, 0xff, offsetof(struct lpfc_cgn_info, cgn_stat));
cp->cgn_info_size = cpu_to_le16(LPFC_CGN_INFO_SZ);
cp->cgn_info_version = LPFC_CGN_INFO_V3;
@@ -13525,7 +13540,7 @@ lpfc_init_congestion_stat(struct lpfc_hba *phba)
return;
cp = (struct lpfc_cgn_info *)phba->cgn_i->virt;
- memset(&cp->cgn_stat_npm, 0, LPFC_CGN_STAT_SIZE);
+ memset(&cp->cgn_stat, 0, sizeof(cp->cgn_stat));
ktime_get_real_ts64(&cmpl_time);
time64_to_tm(cmpl_time.tv_sec, 0, &broken);
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 27263f02ab9f..fdf5e777bf11 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -322,6 +322,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
{
struct lpfc_hba *phba = vport->phba;
struct lpfc_dmabuf *pcmd;
+ struct lpfc_dmabuf *mp;
uint64_t nlp_portwwn = 0;
uint32_t *lp;
IOCB_t *icmd;
@@ -571,6 +572,11 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
* a default RPI.
*/
if (phba->sli_rev == LPFC_SLI_REV4) {
+ mp = (struct lpfc_dmabuf *)login_mbox->ctx_buf;
+ if (mp) {
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ }
mempool_free(login_mbox, phba->mbox_mem_pool);
login_mbox = NULL;
} else {
@@ -1955,8 +1961,9 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
* is configured try it.
*/
ndlp->nlp_fc4_type |= NLP_FC4_FCP;
- if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
- (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
+ if ((!(vport->fc_flag & FC_PT2PT_NO_NVME)) &&
+ (vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH ||
+ vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
ndlp->nlp_fc4_type |= NLP_FC4_NVME;
/* We need to update the localport also */
lpfc_nvme_update_localport(vport);
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 6ccf573acdec..5a3da38a9067 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -4393,6 +4393,7 @@ lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
if (lpfc_cmd->result == IOERR_INVALID_RPI ||
lpfc_cmd->result == IOERR_NO_RESOURCES ||
lpfc_cmd->result == IOERR_ABORT_REQUESTED ||
+ lpfc_cmd->result == IOERR_RPI_SUSPENDED ||
lpfc_cmd->result == IOERR_SLER_CMD_RCV_FAILURE) {
cmd->result = DID_REQUEUE << 16;
break;
@@ -4448,10 +4449,11 @@ lpfc_fcp_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
"9039 Iodone <%d/%llu> cmd x%px, error "
- "x%x SNS x%x x%x Data: x%x x%x\n",
+ "x%x SNS x%x x%x LBA x%llx Data: x%x x%x\n",
cmd->device->id, cmd->device->lun, cmd,
- cmd->result, *lp, *(lp + 3), cmd->retries,
- scsi_get_resid(cmd));
+ cmd->result, *lp, *(lp + 3),
+ (u64)scsi_get_lba(cmd),
+ cmd->retries, scsi_get_resid(cmd));
}
lpfc_update_stats(vport, lpfc_cmd);
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 5dedb3de271d..430abebf99f1 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -4749,7 +4749,7 @@ void lpfc_reset_barrier(struct lpfc_hba *phba)
{
uint32_t __iomem *resp_buf;
uint32_t __iomem *mbox_buf;
- volatile uint32_t mbox;
+ volatile struct MAILBOX_word0 mbox;
uint32_t hc_copy, ha_copy, resp_data;
int i;
uint8_t hdrtype;
@@ -4783,13 +4783,13 @@ void lpfc_reset_barrier(struct lpfc_hba *phba)
phba->pport->stopped = 1;
}
- mbox = 0;
- ((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD;
- ((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP;
+ mbox.word0 = 0;
+ mbox.mbxCommand = MBX_KILL_BOARD;
+ mbox.mbxOwner = OWN_CHIP;
writel(BARRIER_TEST_PATTERN, (resp_buf + 1));
mbox_buf = phba->MBslimaddr;
- writel(mbox, mbox_buf);
+ writel(mbox.word0, mbox_buf);
for (i = 0; i < 50; i++) {
if (lpfc_readl((resp_buf + 1), &resp_data))
@@ -4810,12 +4810,12 @@ void lpfc_reset_barrier(struct lpfc_hba *phba)
goto clear_errat;
}
- ((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST;
+ mbox.mbxOwner = OWN_HOST;
resp_data = 0;
for (i = 0; i < 500; i++) {
if (lpfc_readl(resp_buf, &resp_data))
return;
- if (resp_data != mbox)
+ if (resp_data != mbox.word0)
mdelay(1);
else
break;
@@ -5046,12 +5046,6 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba)
phba->fcf.fcf_flag = 0;
spin_unlock_irq(&phba->hbalock);
- /* SLI4 INTF 2: if FW dump is being taken skip INIT_PORT */
- if (phba->hba_flag & HBA_FW_DUMP_OP) {
- phba->hba_flag &= ~HBA_FW_DUMP_OP;
- return rc;
- }
-
/* Now physically reset the device */
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"0389 Performing PCI function reset!\n");
@@ -5091,9 +5085,8 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba)
static int
lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
{
- MAILBOX_t *mb;
+ volatile struct MAILBOX_word0 mb;
struct lpfc_sli *psli;
- volatile uint32_t word0;
void __iomem *to_slim;
uint32_t hba_aer_enabled;
@@ -5110,24 +5103,23 @@ lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
(phba->pport) ? phba->pport->port_state : 0,
psli->sli_flag);
- word0 = 0;
- mb = (MAILBOX_t *) &word0;
- mb->mbxCommand = MBX_RESTART;
- mb->mbxHc = 1;
+ mb.word0 = 0;
+ mb.mbxCommand = MBX_RESTART;
+ mb.mbxHc = 1;
lpfc_reset_barrier(phba);
to_slim = phba->MBslimaddr;
- writel(*(uint32_t *) mb, to_slim);
+ writel(mb.word0, to_slim);
readl(to_slim); /* flush */
/* Only skip post after fc_ffinit is completed */
if (phba->pport && phba->pport->port_state)
- word0 = 1; /* This is really setting up word1 */
+ mb.word0 = 1; /* This is really setting up word1 */
else
- word0 = 0; /* This is really setting up word1 */
+ mb.word0 = 0; /* This is really setting up word1 */
to_slim = phba->MBslimaddr + sizeof (uint32_t);
- writel(*(uint32_t *) mb, to_slim);
+ writel(mb.word0, to_slim);
readl(to_slim); /* flush */
lpfc_sli_brdreset(phba);
@@ -13371,6 +13363,7 @@ lpfc_sli4_eratt_read(struct lpfc_hba *phba)
uint32_t uerr_sta_hi, uerr_sta_lo;
uint32_t if_type, portsmphr;
struct lpfc_register portstat_reg;
+ u32 logmask;
/*
* For now, use the SLI4 device internal unrecoverable error
@@ -13421,7 +13414,12 @@ lpfc_sli4_eratt_read(struct lpfc_hba *phba)
readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
phba->work_status[1] =
readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
- lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ logmask = LOG_TRACE_EVENT;
+ if (phba->work_status[0] ==
+ SLIPORT_ERR1_REG_ERR_CODE_2 &&
+ phba->work_status[1] == SLIPORT_ERR2_REG_FW_RESTART)
+ logmask = LOG_SLI;
+ lpfc_printf_log(phba, KERN_ERR, logmask,
"2885 Port Status Event: "
"port status reg 0x%x, "
"port smphr reg 0x%x, "
@@ -17990,8 +17988,8 @@ lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
* the driver starts at 0 each time.
*/
spin_lock_irq(&phba->hbalock);
- xri = find_next_zero_bit(phba->sli4_hba.xri_bmask,
- phba->sli4_hba.max_cfg_param.max_xri, 0);
+ xri = find_first_zero_bit(phba->sli4_hba.xri_bmask,
+ phba->sli4_hba.max_cfg_param.max_xri);
if (xri >= phba->sli4_hba.max_cfg_param.max_xri) {
spin_unlock_irq(&phba->hbalock);
return NO_XRI;
@@ -19668,7 +19666,7 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
rpi_limit = phba->sli4_hba.next_rpi;
- rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, 0);
+ rpi = find_first_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit);
if (rpi >= rpi_limit)
rpi = LPFC_RPI_ALLOC_ERROR;
else {
@@ -20311,8 +20309,8 @@ next_priority:
* have been tested so that we can detect when we should
* change the priority level.
*/
- next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
- LPFC_SLI4_FCF_TBL_INDX_MAX, 0);
+ next_fcf_index = find_first_bit(phba->fcf.fcf_rr_bmask,
+ LPFC_SLI4_FCF_TBL_INDX_MAX);
}
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 5a4d3b24fbce..2e9348a6897c 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "14.0.0.3"
+#define LPFC_DRIVER_VERSION "14.0.0.4"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index da9a1f72d938..d694d0cff5a5 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -486,22 +486,67 @@ error_out:
}
static int
+lpfc_send_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
+{
+ int rc;
+ struct lpfc_hba *phba = vport->phba;
+
+ DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
+
+ spin_lock_irq(&ndlp->lock);
+ if (!(ndlp->save_flags & NLP_WAIT_FOR_LOGO) &&
+ !ndlp->logo_waitq) {
+ ndlp->logo_waitq = &waitq;
+ ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
+ ndlp->nlp_flag |= NLP_ISSUE_LOGO;
+ ndlp->save_flags |= NLP_WAIT_FOR_LOGO;
+ }
+ spin_unlock_irq(&ndlp->lock);
+ rc = lpfc_issue_els_npiv_logo(vport, ndlp);
+ if (!rc) {
+ wait_event_timeout(waitq,
+ (!(ndlp->save_flags & NLP_WAIT_FOR_LOGO)),
+ msecs_to_jiffies(phba->fc_ratov * 2000));
+
+ if (!(ndlp->save_flags & NLP_WAIT_FOR_LOGO))
+ goto logo_cmpl;
+ /* LOGO wait failed. Correct status. */
+ rc = -EINTR;
+ } else {
+ rc = -EIO;
+ }
+
+ /* Error - clean up node flags. */
+ spin_lock_irq(&ndlp->lock);
+ ndlp->nlp_flag &= ~NLP_ISSUE_LOGO;
+ ndlp->save_flags &= ~NLP_WAIT_FOR_LOGO;
+ spin_unlock_irq(&ndlp->lock);
+
+ logo_cmpl:
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_VPORT,
+ "1824 Issue LOGO completes with status %d\n",
+ rc);
+ spin_lock_irq(&ndlp->lock);
+ ndlp->logo_waitq = NULL;
+ spin_unlock_irq(&ndlp->lock);
+ return rc;
+}
+
+static int
disable_vport(struct fc_vport *fc_vport)
{
struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
struct lpfc_hba *phba = vport->phba;
struct lpfc_nodelist *ndlp = NULL, *next_ndlp = NULL;
- long timeout;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ /* Can't disable during an outstanding delete. */
+ if (vport->load_flag & FC_UNLOADING)
+ return 0;
+
ndlp = lpfc_findnode_did(vport, Fabric_DID);
- if (ndlp && phba->link_state >= LPFC_LINK_UP) {
- vport->unreg_vpi_cmpl = VPORT_INVAL;
- timeout = msecs_to_jiffies(phba->fc_ratov * 2000);
- if (!lpfc_issue_els_npiv_logo(vport, ndlp))
- while (vport->unreg_vpi_cmpl == VPORT_INVAL && timeout)
- timeout = schedule_timeout(timeout);
- }
+ if (ndlp && phba->link_state >= LPFC_LINK_UP)
+ (void)lpfc_send_npiv_logo(vport, ndlp);
lpfc_sli_host_down(vport);
@@ -600,7 +645,7 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data;
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
struct lpfc_hba *phba = vport->phba;
- long timeout;
+ int rc;
if (vport->port_type == LPFC_PHYSICAL_PORT) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
@@ -665,15 +710,14 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
phba->fc_topology != LPFC_TOPOLOGY_LOOP) {
if (vport->cfg_enable_da_id) {
/* Send DA_ID and wait for a completion. */
- timeout = msecs_to_jiffies(phba->fc_ratov * 2000);
- if (!lpfc_ns_cmd(vport, SLI_CTNS_DA_ID, 0, 0))
- while (vport->ct_flags && timeout)
- timeout = schedule_timeout(timeout);
- else
+ rc = lpfc_ns_cmd(vport, SLI_CTNS_DA_ID, 0, 0);
+ if (rc) {
lpfc_printf_log(vport->phba, KERN_WARNING,
LOG_VPORT,
"1829 CT command failed to "
- "delete objects on fabric\n");
+ "delete objects on fabric, "
+ "rc %d\n", rc);
+ }
}
/*
@@ -688,11 +732,10 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
ndlp = lpfc_findnode_did(vport, Fabric_DID);
if (!ndlp)
goto skip_logo;
- vport->unreg_vpi_cmpl = VPORT_INVAL;
- timeout = msecs_to_jiffies(phba->fc_ratov * 2000);
- if (!lpfc_issue_els_npiv_logo(vport, ndlp))
- while (vport->unreg_vpi_cmpl == VPORT_INVAL && timeout)
- timeout = schedule_timeout(timeout);
+
+ rc = lpfc_send_npiv_logo(vport, ndlp);
+ if (rc)
+ goto skip_logo;
}
if (!(phba->pport->load_flag & FC_UNLOADING))
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 0d31d7a5e335..bf987f3a7f3f 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -192,23 +192,21 @@ mega_query_adapter(adapter_t *adapter)
{
dma_addr_t prod_info_dma_handle;
mega_inquiry3 *inquiry3;
- u8 raw_mbox[sizeof(struct mbox_out)];
- mbox_t *mbox;
+ struct mbox_out mbox;
+ u8 *raw_mbox = (u8 *)&mbox;
int retval;
/* Initialize adapter inquiry mailbox */
- mbox = (mbox_t *)raw_mbox;
-
memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE);
- memset(&mbox->m_out, 0, sizeof(raw_mbox));
+ memset(&mbox, 0, sizeof(mbox));
/*
* Try to issue Inquiry3 command
* if not succeeded, then issue MEGA_MBOXCMD_ADAPTERINQ command and
* update enquiry3 structure
*/
- mbox->m_out.xferaddr = (u32)adapter->buf_dma_handle;
+ mbox.xferaddr = (u32)adapter->buf_dma_handle;
inquiry3 = (mega_inquiry3 *)adapter->mega_buffer;
@@ -232,10 +230,10 @@ mega_query_adapter(adapter_t *adapter)
inq = &ext_inq->raid_inq;
- mbox->m_out.xferaddr = (u32)dma_handle;
+ mbox.xferaddr = (u32)dma_handle;
/*issue old 0x04 command to adapter */
- mbox->m_out.cmd = MEGA_MBOXCMD_ADPEXTINQ;
+ mbox.cmd = MEGA_MBOXCMD_ADPEXTINQ;
issue_scb_block(adapter, raw_mbox);
@@ -262,7 +260,7 @@ mega_query_adapter(adapter_t *adapter)
sizeof(mega_product_info),
DMA_FROM_DEVICE);
- mbox->m_out.xferaddr = prod_info_dma_handle;
+ mbox.xferaddr = prod_info_dma_handle;
raw_mbox[0] = FC_NEW_CONFIG; /* i.e. mbox->cmd=0xA1 */
raw_mbox[2] = NC_SUBOP_PRODUCT_INFO; /* i.e. 0x0E */
@@ -3569,16 +3567,14 @@ mega_n_to_m(void __user *arg, megacmd_t *mc)
static int
mega_is_bios_enabled(adapter_t *adapter)
{
- unsigned char raw_mbox[sizeof(struct mbox_out)];
- mbox_t *mbox;
-
- mbox = (mbox_t *)raw_mbox;
+ struct mbox_out mbox;
+ unsigned char *raw_mbox = (u8 *)&mbox;
- memset(&mbox->m_out, 0, sizeof(raw_mbox));
+ memset(&mbox, 0, sizeof(mbox));
memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE);
- mbox->m_out.xferaddr = (u32)adapter->buf_dma_handle;
+ mbox.xferaddr = (u32)adapter->buf_dma_handle;
raw_mbox[0] = IS_BIOS_ENABLED;
raw_mbox[2] = GET_BIOS;
@@ -3600,13 +3596,11 @@ mega_is_bios_enabled(adapter_t *adapter)
static void
mega_enum_raid_scsi(adapter_t *adapter)
{
- unsigned char raw_mbox[sizeof(struct mbox_out)];
- mbox_t *mbox;
+ struct mbox_out mbox;
+ unsigned char *raw_mbox = (u8 *)&mbox;
int i;
- mbox = (mbox_t *)raw_mbox;
-
- memset(&mbox->m_out, 0, sizeof(raw_mbox));
+ memset(&mbox, 0, sizeof(mbox));
/*
* issue command to find out what channels are raid/scsi
@@ -3616,7 +3610,7 @@ mega_enum_raid_scsi(adapter_t *adapter)
memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE);
- mbox->m_out.xferaddr = (u32)adapter->buf_dma_handle;
+ mbox.xferaddr = (u32)adapter->buf_dma_handle;
/*
* Non-ROMB firmware fail this command, so all channels
@@ -3655,23 +3649,21 @@ static void
mega_get_boot_drv(adapter_t *adapter)
{
struct private_bios_data *prv_bios_data;
- unsigned char raw_mbox[sizeof(struct mbox_out)];
- mbox_t *mbox;
+ struct mbox_out mbox;
+ unsigned char *raw_mbox = (u8 *)&mbox;
u16 cksum = 0;
u8 *cksum_p;
u8 boot_pdrv;
int i;
- mbox = (mbox_t *)raw_mbox;
-
- memset(&mbox->m_out, 0, sizeof(raw_mbox));
+ memset(&mbox, 0, sizeof(mbox));
raw_mbox[0] = BIOS_PVT_DATA;
raw_mbox[2] = GET_BIOS_PVT_DATA;
memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE);
- mbox->m_out.xferaddr = (u32)adapter->buf_dma_handle;
+ mbox.xferaddr = (u32)adapter->buf_dma_handle;
adapter->boot_ldrv_enabled = 0;
adapter->boot_ldrv = 0;
@@ -3721,13 +3713,11 @@ mega_get_boot_drv(adapter_t *adapter)
static int
mega_support_random_del(adapter_t *adapter)
{
- unsigned char raw_mbox[sizeof(struct mbox_out)];
- mbox_t *mbox;
+ struct mbox_out mbox;
+ unsigned char *raw_mbox = (u8 *)&mbox;
int rval;
- mbox = (mbox_t *)raw_mbox;
-
- memset(&mbox->m_out, 0, sizeof(raw_mbox));
+ memset(&mbox, 0, sizeof(mbox));
/*
* issue command
@@ -3750,13 +3740,11 @@ mega_support_random_del(adapter_t *adapter)
static int
mega_support_ext_cdb(adapter_t *adapter)
{
- unsigned char raw_mbox[sizeof(struct mbox_out)];
- mbox_t *mbox;
+ struct mbox_out mbox;
+ unsigned char *raw_mbox = (u8 *)&mbox;
int rval;
- mbox = (mbox_t *)raw_mbox;
-
- memset(&mbox->m_out, 0, sizeof(raw_mbox));
+ memset(&mbox, 0, sizeof(mbox));
/*
* issue command to find out if controller supports extended CDBs.
*/
@@ -3865,16 +3853,14 @@ mega_do_del_logdrv(adapter_t *adapter, int logdrv)
static void
mega_get_max_sgl(adapter_t *adapter)
{
- unsigned char raw_mbox[sizeof(struct mbox_out)];
- mbox_t *mbox;
+ struct mbox_out mbox;
+ unsigned char *raw_mbox = (u8 *)&mbox;
- mbox = (mbox_t *)raw_mbox;
-
- memset(mbox, 0, sizeof(raw_mbox));
+ memset(&mbox, 0, sizeof(mbox));
memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE);
- mbox->m_out.xferaddr = (u32)adapter->buf_dma_handle;
+ mbox.xferaddr = (u32)adapter->buf_dma_handle;
raw_mbox[0] = MAIN_MISC_OPCODE;
raw_mbox[2] = GET_MAX_SG_SUPPORT;
@@ -3888,7 +3874,7 @@ mega_get_max_sgl(adapter_t *adapter)
}
else {
adapter->sglen = *((char *)adapter->mega_buffer);
-
+
/*
* Make sure this is not more than the resources we are
* planning to allocate
@@ -3910,16 +3896,14 @@ mega_get_max_sgl(adapter_t *adapter)
static int
mega_support_cluster(adapter_t *adapter)
{
- unsigned char raw_mbox[sizeof(struct mbox_out)];
- mbox_t *mbox;
-
- mbox = (mbox_t *)raw_mbox;
+ struct mbox_out mbox;
+ unsigned char *raw_mbox = (u8 *)&mbox;
- memset(mbox, 0, sizeof(raw_mbox));
+ memset(&mbox, 0, sizeof(mbox));
memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE);
- mbox->m_out.xferaddr = (u32)adapter->buf_dma_handle;
+ mbox.xferaddr = (u32)adapter->buf_dma_handle;
/*
* Try to get the initiator id. This command will succeed iff the
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index 14f930d27ca1..2a339d4a7e9d 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -1431,7 +1431,6 @@ mbox_post_cmd(adapter_t *adapter, scb_t *scb)
/**
* megaraid_queue_command_lck - generic queue entry point for all LLDs
* @scp : pointer to the scsi command to be executed
- * @done : callback routine to be called after the cmd has be completed
*
* Queue entry point for mailbox based controllers.
*/
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index aeb95f409826..82e1e24257bc 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -5720,7 +5720,7 @@ megasas_setup_irqs_msix(struct megasas_instance *instance, u8 is_probe)
"Failed to register IRQ for vector %d.\n", i);
for (j = 0; j < i; j++) {
if (j < instance->low_latency_index_start)
- irq_set_affinity_hint(
+ irq_update_affinity_hint(
pci_irq_vector(pdev, j), NULL);
free_irq(pci_irq_vector(pdev, j),
&instance->irq_context[j]);
@@ -5763,7 +5763,7 @@ megasas_destroy_irqs(struct megasas_instance *instance) {
if (instance->msix_vectors)
for (i = 0; i < instance->msix_vectors; i++) {
if (i < instance->low_latency_index_start)
- irq_set_affinity_hint(
+ irq_update_affinity_hint(
pci_irq_vector(instance->pdev, i), NULL);
free_irq(pci_irq_vector(instance->pdev, i),
&instance->irq_context[i]);
@@ -5894,22 +5894,25 @@ int megasas_get_device_list(struct megasas_instance *instance)
}
/**
- * megasas_set_high_iops_queue_affinity_hint - Set affinity hint for high IOPS queues
- * @instance: Adapter soft state
- * return: void
+ * megasas_set_high_iops_queue_affinity_and_hint - Set affinity and hint
+ * for high IOPS queues
+ * @instance: Adapter soft state
+ * return: void
*/
static inline void
-megasas_set_high_iops_queue_affinity_hint(struct megasas_instance *instance)
+megasas_set_high_iops_queue_affinity_and_hint(struct megasas_instance *instance)
{
int i;
- int local_numa_node;
+ unsigned int irq;
+ const struct cpumask *mask;
if (instance->perf_mode == MR_BALANCED_PERF_MODE) {
- local_numa_node = dev_to_node(&instance->pdev->dev);
+ mask = cpumask_of_node(dev_to_node(&instance->pdev->dev));
- for (i = 0; i < instance->low_latency_index_start; i++)
- irq_set_affinity_hint(pci_irq_vector(instance->pdev, i),
- cpumask_of_node(local_numa_node));
+ for (i = 0; i < instance->low_latency_index_start; i++) {
+ irq = pci_irq_vector(instance->pdev, i);
+ irq_set_affinity_and_hint(irq, mask);
+ }
}
}
@@ -5998,7 +6001,7 @@ megasas_alloc_irq_vectors(struct megasas_instance *instance)
instance->msix_vectors = 0;
if (instance->smp_affinity_enable)
- megasas_set_high_iops_queue_affinity_hint(instance);
+ megasas_set_high_iops_queue_affinity_and_hint(instance);
}
/**
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h b/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h
index d43bbecef651..5e1f6ced0e71 100644
--- a/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_cnfg.h
@@ -8,7 +8,7 @@
#define MPI3_CONFIG_PAGETYPE_IO_UNIT (0x00)
#define MPI3_CONFIG_PAGETYPE_MANUFACTURING (0x01)
#define MPI3_CONFIG_PAGETYPE_IOC (0x02)
-#define MPI3_CONFIG_PAGETYPE_UEFI_BSD (0x03)
+#define MPI3_CONFIG_PAGETYPE_DRIVER (0x03)
#define MPI3_CONFIG_PAGETYPE_SECURITY (0x04)
#define MPI3_CONFIG_PAGETYPE_ENCLOSURE (0x11)
#define MPI3_CONFIG_PAGETYPE_DEVICE (0x12)
@@ -181,8 +181,17 @@ struct mpi3_config_page_header {
#define MPI3_SAS_HWRATE_MIN_RATE_6_0 (0x0a)
#define MPI3_SAS_HWRATE_MIN_RATE_12_0 (0x0b)
#define MPI3_SAS_HWRATE_MIN_RATE_22_5 (0x0c)
-#define MPI3_SLOT_INVALID (0xffff)
-#define MPI3_SLOT_INDEX_INVALID (0xffff)
+#define MPI3_SLOT_INVALID (0xffff)
+#define MPI3_SLOT_INDEX_INVALID (0xffff)
+#define MPI3_LINK_CHANGE_COUNT_INVALID (0xffff)
+#define MPI3_RATE_CHANGE_COUNT_INVALID (0xffff)
+#define MPI3_TEMP_SENSOR_LOCATION_INTERNAL (0x0)
+#define MPI3_TEMP_SENSOR_LOCATION_INLET (0x1)
+#define MPI3_TEMP_SENSOR_LOCATION_OUTLET (0x2)
+#define MPI3_TEMP_SENSOR_LOCATION_DRAM (0x3)
+#define MPI3_MFGPAGE_VENDORID_BROADCOM (0x1000)
+#define MPI3_MFGPAGE_DEVID_SAS4116 (0x00a5)
+#define MPI3_MFGPAGE_DEVID_SAS4016 (0x00a7)
struct mpi3_man_page0 {
struct mpi3_config_page_header header;
u8 chip_revision[8];
@@ -195,7 +204,7 @@ struct mpi3_man_page0 {
__le32 reserved98;
u8 oem;
u8 sub_oem;
- __le16 reserved9e;
+ __le16 flags;
u8 board_mfg_day;
u8 board_mfg_month;
__le16 board_mfg_year;
@@ -208,6 +217,8 @@ struct mpi3_man_page0 {
};
#define MPI3_MAN0_PAGEVERSION (0x00)
+#define MPI3_MAN0_FLAGS_SWITCH_PRESENT (0x0002)
+#define MPI3_MAN0_FLAGS_EXPANDER_PRESENT (0x0001)
#define MPI3_MAN1_VPD_SIZE (512)
struct mpi3_man_page1 {
struct mpi3_config_page_header header;
@@ -236,7 +247,7 @@ struct mpi3_man_page5 {
#define MPI3_MAN5_PAGEVERSION (0x00)
struct mpi3_man6_gpio_entry {
u8 function_code;
- u8 reserved01;
+ u8 function_flags;
__le16 flags;
u8 param1;
u8 param2;
@@ -253,7 +264,6 @@ struct mpi3_man6_gpio_entry {
#define MPI3_MAN6_GPIO_FUNCTION_PORT_STATUS_YELLOW (0x06)
#define MPI3_MAN6_GPIO_FUNCTION_CABLE_MANAGEMENT (0x07)
#define MPI3_MAN6_GPIO_FUNCTION_BKPLANE_MGMT_TYPE (0x08)
-#define MPI3_MAN6_GPIO_FUNCTION_ISTWI_MUX_RESET (0x09)
#define MPI3_MAN6_GPIO_FUNCTION_ISTWI_RESET (0x0a)
#define MPI3_MAN6_GPIO_FUNCTION_BACKEND_PCIE_RESET (0x0b)
#define MPI3_MAN6_GPIO_FUNCTION_GLOBAL_FAULT (0x0c)
@@ -263,6 +273,10 @@ struct mpi3_man6_gpio_entry {
#define MPI3_MAN6_GPIO_FUNCTION_CTRL_TYPE (0x10)
#define MPI3_MAN6_GPIO_FUNCTION_LICENSE (0x11)
#define MPI3_MAN6_GPIO_FUNCTION_REFCLK_CONTROL (0x12)
+#define MPI3_MAN6_GPIO_FUNCTION_BACKEND_PCIE_RESET_CLAMP (0x13)
+#define MPI3_MAN6_GPIO_ISTWI_RESET_FUNCTIONFLAGS_DEVSELECT_MASK (0x01)
+#define MPI3_MAN6_GPIO_ISTWI_RESET_FUNCTIONFLAGS_DEVSELECT_ISTWI (0x00)
+#define MPI3_MAN6_GPIO_ISTWI_RESET_FUNCTIONFLAGS_DEVSELECT_RECEPTACLEID (0x01)
#define MPI3_MAN6_GPIO_EXTINT_PARAM1_FLAGS_SOURCE_MASK (0xf0)
#define MPI3_MAN6_GPIO_EXTINT_PARAM1_FLAGS_SOURCE_GENERIC (0x00)
#define MPI3_MAN6_GPIO_EXTINT_PARAM1_FLAGS_SOURCE_CABLE_MGMT (0x10)
@@ -275,8 +289,6 @@ struct mpi3_man6_gpio_entry {
#define MPI3_MAN6_GPIO_CABLE_MGMT_PARAM1_INTERFACE_MODULE_PRESENT (0x00)
#define MPI3_MAN6_GPIO_CABLE_MGMT_PARAM1_INTERFACE_ACTIVE_CABLE_ENABLE (0x01)
#define MPI3_MAN6_GPIO_CABLE_MGMT_PARAM1_INTERFACE_CABLE_MGMT_ENABLE (0x02)
-#define MPI3_MAN6_GPIO_ISTWI_MUX_RESET_PARAM2_SPEC_MUX (0x00)
-#define MPI3_MAN6_GPIO_ISTWI_MUX_RESET_PARAM2_ALL_MUXES (0x01)
#define MPI3_MAN6_GPIO_LICENSE_PARAM1_TYPE_IBUTTON (0x00)
#define MPI3_MAN6_GPIO_FLAGS_SLEW_RATE_MASK (0x0100)
#define MPI3_MAN6_GPIO_FLAGS_SLEW_RATE_FAST_EDGE (0x0100)
@@ -353,6 +365,7 @@ struct mpi3_man8_phy_info {
__le32 reserved0c;
};
+#define MPI3_MAN8_PHY_INFO_RECEPTACLE_ID_HOST_PHY (0xff)
#ifndef MPI3_MAN8_PHY_INFO_MAX
#define MPI3_MAN8_PHY_INFO_MAX (1)
#endif
@@ -373,20 +386,22 @@ struct mpi3_man9_rsrc_entry {
};
enum mpi3_man9_resources {
- MPI3_MAN9_RSRC_OUTSTANDING_REQS = 0,
- MPI3_MAN9_RSRC_TARGET_CMDS = 1,
- MPI3_MAN9_RSRC_SAS_TARGETS = 2,
- MPI3_MAN9_RSRC_PCIE_TARGETS = 3,
- MPI3_MAN9_RSRC_INITIATORS = 4,
- MPI3_MAN9_RSRC_VDS = 5,
- MPI3_MAN9_RSRC_ENCLOSURES = 6,
- MPI3_MAN9_RSRC_ENCLOSURE_PHYS = 7,
- MPI3_MAN9_RSRC_EXPANDERS = 8,
- MPI3_MAN9_RSRC_PCIE_SWITCHES = 9,
- MPI3_MAN9_RSRC_PDS = 10,
- MPI3_MAN9_RSRC_HOST_PDS = 11,
- MPI3_MAN9_RSRC_ADV_HOST_PDS = 12,
- MPI3_MAN9_RSRC_RAID_PDS = 13,
+ MPI3_MAN9_RSRC_OUTSTANDING_REQS = 0,
+ MPI3_MAN9_RSRC_TARGET_CMDS = 1,
+ MPI3_MAN9_RSRC_RESERVED02 = 2,
+ MPI3_MAN9_RSRC_NVME = 3,
+ MPI3_MAN9_RSRC_INITIATORS = 4,
+ MPI3_MAN9_RSRC_VDS = 5,
+ MPI3_MAN9_RSRC_ENCLOSURES = 6,
+ MPI3_MAN9_RSRC_ENCLOSURE_PHYS = 7,
+ MPI3_MAN9_RSRC_EXPANDERS = 8,
+ MPI3_MAN9_RSRC_PCIE_SWITCHES = 9,
+ MPI3_MAN9_RSRC_RESERVED10 = 10,
+ MPI3_MAN9_RSRC_HOST_PD_DRIVES = 11,
+ MPI3_MAN9_RSRC_ADV_HOST_PD_DRIVES = 12,
+ MPI3_MAN9_RSRC_RAID_PD_DRIVES = 13,
+ MPI3_MAN9_RSRC_DRV_DIAG_BUF = 14,
+ MPI3_MAN9_RSRC_NAMESPACE_COUNT = 15,
MPI3_MAN9_RSRC_NUM_RESOURCES
};
@@ -402,6 +417,7 @@ enum mpi3_man9_resources {
#define MPI3_MAN9_MIN_ENCLOSURES (0)
#define MPI3_MAN9_MAX_ENCLOSURES (65535)
#define MPI3_MAN9_MIN_ENCLOSURE_PHYS (0)
+#define MPI3_MAN9_MIN_NAMESPACE_COUNT (1)
#define MPI3_MAN9_MIN_EXPANDERS (0)
#define MPI3_MAN9_MAX_EXPANDERS (65535)
#define MPI3_MAN9_MIN_PCIE_SWITCHES (0)
@@ -422,9 +438,14 @@ struct mpi3_man_page9 {
struct mpi3_man10_istwi_ctrlr_entry {
__le16 slave_address;
__le16 flags;
- __le32 reserved04;
+ u8 scl_low_override;
+ u8 scl_high_override;
+ __le16 reserved06;
};
+#define MPI3_MAN10_ISTWI_CTRLR_FLAGS_BUS_SPEED_MASK (0x000c)
+#define MPI3_MAN10_ISTWI_CTRLR_FLAGS_BUS_SPEED_100K (0x0000)
+#define MPI3_MAN10_ISTWI_CTRLR_FLAGS_BUS_SPEED_400K (0x0004)
#define MPI3_MAN10_ISTWI_CTRLR_FLAGS_SLAVE_ENABLED (0x0002)
#define MPI3_MAN10_ISTWI_CTRLR_FLAGS_MASTER_ENABLED (0x0001)
#ifndef MPI3_MAN10_ISTWI_CTRLR_MAX
@@ -451,10 +472,13 @@ struct mpi3_man11_temp_sensor_device_format {
u8 temp_channel[4];
};
-#define MPI3_MAN11_TEMP_SENSOR_TYPE_MAX6654 (0x00)
-#define MPI3_MAN11_TEMP_SENSOR_TYPE_EMC1442 (0x01)
-#define MPI3_MAN11_TEMP_SENSOR_TYPE_ADT7476 (0x02)
-#define MPI3_MAN11_TEMP_SENSOR_CHANNEL_ENABLED (0x01)
+#define MPI3_MAN11_TEMP_SENSOR_TYPE_MAX6654 (0x00)
+#define MPI3_MAN11_TEMP_SENSOR_TYPE_EMC1442 (0x01)
+#define MPI3_MAN11_TEMP_SENSOR_TYPE_ADT7476 (0x02)
+#define MPI3_MAN11_TEMP_SENSOR_TYPE_SE97B (0x03)
+#define MPI3_MAN11_TEMP_SENSOR_CHANNEL_LOCATION_MASK (0xe0)
+#define MPI3_MAN11_TEMP_SENSOR_CHANNEL_LOCATION_SHIFT (5)
+#define MPI3_MAN11_TEMP_SENSOR_CHANNEL_ENABLED (0x01)
struct mpi3_man11_seeprom_device_format {
u8 size;
u8 page_write_size;
@@ -495,31 +519,40 @@ struct mpi3_man11_bkplane_spec_ubm_format {
#define MPI3_MAN11_BKPLANE_UBM_FLAGS_MAX_FRU_SHIFT (4)
#define MPI3_MAN11_BKPLANE_UBM_FLAGS_POLL_INTERVAL_MASK (0x000f)
#define MPI3_MAN11_BKPLANE_UBM_FLAGS_POLL_INTERVAL_SHIFT (0)
-struct mpi3_man11_bkplane_spec_vpp_format {
+struct mpi3_man11_bkplane_spec_non_ubm_format {
__le16 flags;
- __le16 reserved02;
+ u8 reserved02;
+ u8 type;
};
-#define MPI3_MAN11_BKPLANE_VPP_FLAGS_REFCLK_POLICY_ALWAYS_ENABLED (0x0040)
-#define MPI3_MAN11_BKPLANE_VPP_FLAGS_PRESENCE_DETECT_MASK (0x0030)
-#define MPI3_MAN11_BKPLANE_VPP_FLAGS_PRESENCE_DETECT_GPIO (0x0000)
-#define MPI3_MAN11_BKPLANE_VPP_FLAGS_PRESENCE_DETECT_REG (0x0010)
-#define MPI3_MAN11_BKPLANE_VPP_FLAGS_POLL_INTERVAL_MASK (0x000f)
-#define MPI3_MAN11_BKPLANE_VPP_FLAGS_POLL_INTERVAL_SHIFT (0)
+#define MPI3_MAN11_BKPLANE_NON_UBM_FLAGS_GROUP_MASK (0xf000)
+#define MPI3_MAN11_BKPLANE_NON_UBM_FLAGS_GROUP_SHIFT (12)
+#define MPI3_MAN11_BKPLANE_NON_UBM_FLAGS_REFCLK_POLICY_ALWAYS_ENABLED (0x0200)
+#define MPI3_MAN11_BKPLANE_NON_UBM_FLAGS_PRESENCE_DETECT_MASK (0x0030)
+#define MPI3_MAN11_BKPLANE_NON_UBM_FLAGS_PRESENCE_DETECT_GPIO (0x0000)
+#define MPI3_MAN11_BKPLANE_NON_UBM_FLAGS_PRESENCE_DETECT_REG (0x0010)
+#define MPI3_MAN11_BKPLANE_NON_UBM_FLAGS_POLL_INTERVAL_MASK (0x000f)
+#define MPI3_MAN11_BKPLANE_NON_UBM_FLAGS_POLL_INTERVAL_SHIFT (0)
+#define MPI3_MAN11_BKPLANE_NON_UBM_TYPE_VPP (0x00)
union mpi3_man11_bkplane_spec_format {
- struct mpi3_man11_bkplane_spec_ubm_format ubm;
- struct mpi3_man11_bkplane_spec_vpp_format vpp;
+ struct mpi3_man11_bkplane_spec_ubm_format ubm;
+ struct mpi3_man11_bkplane_spec_non_ubm_format non_ubm;
};
struct mpi3_man11_bkplane_mgmt_device_format {
u8 type;
u8 receptacle_id;
- __le16 reserved02;
+ u8 reset_info;
+ u8 reserved03;
union mpi3_man11_bkplane_spec_format backplane_mgmt_specific;
};
#define MPI3_MAN11_BKPLANE_MGMT_TYPE_UBM (0x00)
-#define MPI3_MAN11_BKPLANE_MGMT_TYPE_VPP (0x01)
+#define MPI3_MAN11_BKPLANE_MGMT_TYPE_NON_UBM (0x01)
+#define MPI3_MAN11_BACKPLANE_RESETINFO_ASSERT_TIME_MASK (0xf0)
+#define MPI3_MAN11_BACKPLANE_RESETINFO_ASSERT_TIME_SHIFT (4)
+#define MPI3_MAN11_BACKPLANE_RESETINFO_READY_TIME_MASK (0x0f)
+#define MPI3_MAN11_BACKPLANE_RESETINFO_READY_TIME_SHIFT (0)
struct mpi3_man11_gas_gauge_device_format {
u8 type;
u8 reserved01[3];
@@ -527,6 +560,11 @@ struct mpi3_man11_gas_gauge_device_format {
};
#define MPI3_MAN11_GAS_GAUGE_TYPE_STANDARD (0x00)
+struct mpi3_man11_mgmt_ctrlr_device_format {
+ __le32 reserved00;
+ __le32 reserved04;
+};
+
union mpi3_man11_device_specific_format {
struct mpi3_man11_mux_device_format mux;
struct mpi3_man11_temp_sensor_device_format temp_sensor;
@@ -535,6 +573,7 @@ union mpi3_man11_device_specific_format {
struct mpi3_man11_cable_mgmt_device_format cable_mgmt;
struct mpi3_man11_bkplane_mgmt_device_format bkplane_mgmt;
struct mpi3_man11_gas_gauge_device_format gas_gauge;
+ struct mpi3_man11_mgmt_ctrlr_device_format mgmt_controller;
__le32 words[2];
};
@@ -556,10 +595,8 @@ struct mpi3_man11_istwi_device_format {
#define MPI3_MAN11_ISTWI_DEVTYPE_CABLE_MGMT (0x04)
#define MPI3_MAN11_ISTWI_DEVTYPE_BACKPLANE_MGMT (0x05)
#define MPI3_MAN11_ISTWI_DEVTYPE_GAS_GAUGE (0x06)
+#define MPI3_MAN11_ISTWI_DEVTYPE_MGMT_CONTROLLER (0x07)
#define MPI3_MAN11_ISTWI_FLAGS_MUX_PRESENT (0x01)
-#define MPI3_MAN11_ISTWI_FLAGS_BUS_SPEED_MASK (0x06)
-#define MPI3_MAN11_ISTWI_FLAGS_BUS_SPEED_100KHZ (0x00)
-#define MPI3_MAN11_ISTWI_FLAGS_BUS_SPEED_400KHZ (0x02)
#ifndef MPI3_MAN11_ISTWI_DEVICE_MAX
#define MPI3_MAN11_ISTWI_DEVICE_MAX (1)
#endif
@@ -692,8 +729,8 @@ struct mpi3_man_page14 {
#define MPI3_MAN14_FLAGS_AUTH_SESSION_REQ (0x01)
#define MPI3_MAN14_FLAGS_AUTH_API_MASK (0x0e)
#define MPI3_MAN14_FLAGS_AUTH_API_NONE (0x00)
-#define MPI3_MAN14_FLAGS_AUTH_API_CEREBUS (0x02)
-#define MPI3_MAN14_FLAGS_AUTH_API_DMTF_PMCI (0x04)
+#define MPI3_MAN14_FLAGS_AUTH_API_CERBERUS (0x02)
+#define MPI3_MAN14_FLAGS_AUTH_API_SPDM (0x04)
#ifndef MPI3_MAN15_VERSION_RECORD_MAX
#define MPI3_MAN15_VERSION_RECORD_MAX 1
#endif
@@ -808,7 +845,7 @@ struct mpi3_io_unit_page1 {
struct mpi3_config_page_header header;
__le32 flags;
u8 dmd_io_delay;
- u8 dmd_report_pc_ie;
+ u8 dmd_report_pcie;
u8 dmd_report_sata;
u8 dmd_report_sas;
};
@@ -844,26 +881,30 @@ struct mpi3_io_unit_page2 {
#define MPI3_IOUNIT2_GPIO_SETTING_ON (0x0001)
struct mpi3_io_unit3_sensor {
__le16 flags;
- __le16 reserved02;
- __le16 threshold[4];
+ u8 threshold_margin;
+ u8 reserved03;
+ __le16 threshold[3];
+ __le16 reserved0a;
__le32 reserved0c;
__le32 reserved10;
__le32 reserved14;
};
-#define MPI3_IOUNIT3_SENSOR_FLAGS_T3_ENABLE (0x0008)
-#define MPI3_IOUNIT3_SENSOR_FLAGS_T2_ENABLE (0x0004)
-#define MPI3_IOUNIT3_SENSOR_FLAGS_T1_ENABLE (0x0002)
-#define MPI3_IOUNIT3_SENSOR_FLAGS_T0_ENABLE (0x0001)
+#define MPI3_IOUNIT3_SENSOR_FLAGS_FATAL_EVENT_ENABLED (0x0010)
+#define MPI3_IOUNIT3_SENSOR_FLAGS_FATAL_ACTION_ENABLED (0x0008)
+#define MPI3_IOUNIT3_SENSOR_FLAGS_CRITICAL_EVENT_ENABLED (0x0004)
+#define MPI3_IOUNIT3_SENSOR_FLAGS_CRITICAL_ACTION_ENABLED (0x0002)
+#define MPI3_IOUNIT3_SENSOR_FLAGS_WARNING_EVENT_ENABLED (0x0001)
#ifndef MPI3_IO_UNIT3_SENSOR_MAX
-#define MPI3_IO_UNIT3_SENSOR_MAX (1)
+#define MPI3_IO_UNIT3_SENSOR_MAX (1)
#endif
struct mpi3_io_unit_page3 {
struct mpi3_config_page_header header;
__le32 reserved08;
u8 num_sensors;
- u8 polling_interval;
- __le16 reserved0e;
+ u8 nominal_poll_interval;
+ u8 warning_poll_interval;
+ u8 reserved0f;
struct mpi3_io_unit3_sensor sensor[MPI3_IO_UNIT3_SENSOR_MAX];
};
@@ -873,13 +914,19 @@ struct mpi3_io_unit4_sensor {
__le16 reserved02;
u8 flags;
u8 reserved05[3];
- __le32 reserved08;
+ __le16 istwi_index;
+ u8 channel;
+ u8 reserved0b;
__le32 reserved0c;
};
+#define MPI3_IOUNIT4_SENSOR_FLAGS_LOC_MASK (0xe0)
+#define MPI3_IOUNIT4_SENSOR_FLAGS_LOC_SHIFT (5)
#define MPI3_IOUNIT4_SENSOR_FLAGS_TEMP_VALID (0x01)
+#define MPI3_IOUNIT4_SENSOR_ISTWI_INDEX_INTERNAL (0xffff)
+#define MPI3_IOUNIT4_SENSOR_CHANNEL_RESERVED (0xff)
#ifndef MPI3_IO_UNIT4_SENSOR_MAX
-#define MPI3_IO_UNIT4_SENSOR_MAX (1)
+#define MPI3_IO_UNIT4_SENSOR_MAX (1)
#endif
struct mpi3_io_unit_page4 {
struct mpi3_config_page_header header;
@@ -906,8 +953,9 @@ struct mpi3_io_unit_page5 {
struct mpi3_io_unit5_spinup_group spinup_group_parameters[4];
__le32 reserved18;
__le32 reserved1c;
- __le32 reserved20;
- u8 reserved24;
+ __le16 device_shutdown;
+ __le16 reserved22;
+ u8 pcie_device_wait_time;
u8 sata_device_wait_time;
u8 spinup_encl_drive_count;
u8 spinup_encl_delay;
@@ -919,6 +967,22 @@ struct mpi3_io_unit_page5 {
};
#define MPI3_IOUNIT5_PAGEVERSION (0x00)
+#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_NO_ACTION (0x00)
+#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_DIRECT_ATTACHED (0x01)
+#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_EXPANDER_ATTACHED (0x02)
+#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_SWITCH_ATTACHED (0x02)
+#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_DIRECT_AND_EXPANDER (0x03)
+#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_DIRECT_AND_SWITCH (0x03)
+#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_SATA_HDD_MASK (0x0300)
+#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_SATA_HDD_SHIFT (8)
+#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_SAS_HDD_MASK (0x00c0)
+#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_SAS_HDD_SHIFT (6)
+#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_NVME_SSD_MASK (0x0030)
+#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_NVME_SSD_SHIFT (4)
+#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_SATA_SSD_MASK (0x000c)
+#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_SATA_SSD_SHIFT (2)
+#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_SAS_SSD_MASK (0x0003)
+#define MPI3_IOUNIT5_DEVICE_SHUTDOWN_SAA_SSD_SHIFT (0)
#define MPI3_IOUNIT5_FLAGS_POWER_CAPABLE_SPINUP (0x02)
#define MPI3_IOUNIT5_FLAGS_AUTO_PORT_ENABLE (0x01)
#define MPI3_IOUNIT5_PHY_SPINUP_GROUP_MASK (0x03)
@@ -1012,7 +1076,52 @@ struct mpi3_ioc_page2 {
};
#define MPI3_IOC2_PAGEVERSION (0x00)
-struct mpi3_uefibsd_page0 {
+#define MPI3_DRIVER_FLAGS_ADMINRAIDPD_BLOCKED (0x0010)
+#define MPI3_DRIVER_FLAGS_OOBRAIDPD_BLOCKED (0x0008)
+#define MPI3_DRIVER_FLAGS_OOBRAIDVD_BLOCKED (0x0004)
+#define MPI3_DRIVER_FLAGS_OOBADVHOSTPD_BLOCKED (0x0002)
+#define MPI3_DRIVER_FLAGS_OOBHOSTPD_BLOCKED (0x0001)
+struct mpi3_allowed_cmd_scsi {
+ __le16 service_action;
+ u8 operation_code;
+ u8 command_flags;
+};
+
+struct mpi3_allowed_cmd_ata {
+ u8 subcommand;
+ u8 reserved01;
+ u8 command;
+ u8 command_flags;
+};
+
+struct mpi3_allowed_cmd_nvme {
+ u8 reserved00;
+ u8 nvme_cmd_flags;
+ u8 op_code;
+ u8 command_flags;
+};
+
+#define MPI3_DRIVER_ALLOWEDCMD_NVMECMDFLAGS_SUBQ_TYPE_MASK (0x80)
+#define MPI3_DRIVER_ALLOWEDCMD_NVMECMDFLAGS_SUBQ_TYPE_IO (0x00)
+#define MPI3_DRIVER_ALLOWEDCMD_NVMECMDFLAGS_SUBQ_TYPE_ADMIN (0x80)
+#define MPI3_DRIVER_ALLOWEDCMD_NVMECMDFLAGS_CMDSET_MASK (0x3f)
+#define MPI3_DRIVER_ALLOWEDCMD_NVMECMDFLAGS_CMDSET_NVM (0x00)
+union mpi3_allowed_cmd {
+ struct mpi3_allowed_cmd_scsi scsi;
+ struct mpi3_allowed_cmd_ata ata;
+ struct mpi3_allowed_cmd_nvme nvme;
+};
+
+#define MPI3_DRIVER_ALLOWEDCMD_CMDFLAGS_ADMINRAIDPD_BLOCKED (0x20)
+#define MPI3_DRIVER_ALLOWEDCMD_CMDFLAGS_OOBRAIDPD_BLOCKED (0x10)
+#define MPI3_DRIVER_ALLOWEDCMD_CMDFLAGS_OOBRAIDVD_BLOCKED (0x08)
+#define MPI3_DRIVER_ALLOWEDCMD_CMDFLAGS_OOBADVHOSTPD_BLOCKED (0x04)
+#define MPI3_DRIVER_ALLOWEDCMD_CMDFLAGS_OOBHOSTPD_BLOCKED (0x02)
+#define MPI3_DRIVER_ALLOWEDCMD_CMDFLAGS_CHECKSUBCMD_ENABLED (0x01)
+#ifndef MPI3_ALLOWED_CMDS_MAX
+#define MPI3_ALLOWED_CMDS_MAX (1)
+#endif
+struct mpi3_driver_page0 {
struct mpi3_config_page_header header;
__le32 bsd_options;
u8 ssu_timeout;
@@ -1026,13 +1135,122 @@ struct mpi3_uefibsd_page0 {
__le32 reserved18;
};
-#define MPI3_UEFIBSD_PAGEVERSION (0x00)
-#define MPI3_UEFIBSD_BSDOPTS_REGISTRATION_MASK (0x00000003)
-#define MPI3_UEFIBSD_BSDOPTS_REGISTRATION_IOC_AND_DEVS (0x00000000)
-#define MPI3_UEFIBSD_BSDOPTS_REGISTRATION_IOC_ONLY (0x00000001)
-#define MPI3_UEFIBSD_BSDOPTS_REGISTRATION_NONE (0x00000002)
-#define MPI3_UEFIBSD_BSDOPTS_DIS_HII_CONFIG_UTIL (0x00000004)
-#define MPI3_UEFIBSD_BSDOPTS_EN_ADV_ADAPTER_CONFIG (0x00000008)
+#define MPI3_DRIVER0_PAGEVERSION (0x00)
+#define MPI3_DRIVER0_BSDOPTS_REGISTRATION_MASK (0x00000003)
+#define MPI3_DRIVER0_BSDOPTS_REGISTRATION_IOC_AND_DEVS (0x00000000)
+#define MPI3_DRIVER0_BSDOPTS_REGISTRATION_IOC_ONLY (0x00000001)
+#define MPI3_DRIVER0_BSDOPTS_DIS_HII_CONFIG_UTIL (0x00000004)
+#define MPI3_DRIVER0_BSDOPTS_EN_ADV_ADAPTER_CONFIG (0x00000008)
+struct mpi3_driver_page1 {
+ struct mpi3_config_page_header header;
+ __le32 flags;
+ __le32 reserved0c;
+ __le16 host_diag_trace_max_size;
+ __le16 host_diag_trace_min_size;
+ __le16 host_diag_trace_decrement_size;
+ __le16 reserved16;
+ __le16 host_diag_fw_max_size;
+ __le16 host_diag_fw_min_size;
+ __le16 host_diag_fw_decrement_size;
+ __le16 reserved1e;
+ __le16 host_diag_driver_max_size;
+ __le16 host_diag_driver_min_size;
+ __le16 host_diag_driver_decrement_size;
+ __le16 reserved26;
+};
+
+#define MPI3_DRIVER1_PAGEVERSION (0x00)
+#ifndef MPI3_DRIVER2_TRIGGER_MAX
+#define MPI3_DRIVER2_TRIGGER_MAX (1)
+#endif
+struct mpi3_driver2_trigger_event {
+ u8 type;
+ u8 flags;
+ u8 reserved02;
+ u8 event;
+ __le32 reserved04[3];
+};
+
+struct mpi3_driver2_trigger_scsi_sense {
+ u8 type;
+ u8 flags;
+ __le16 reserved02;
+ u8 ascq;
+ u8 asc;
+ u8 sense_key;
+ u8 reserved07;
+ __le32 reserved08[2];
+};
+
+#define MPI3_DRIVER2_TRIGGER_SCSI_SENSE_ASCQ_MATCH_ALL (0xff)
+#define MPI3_DRIVER2_TRIGGER_SCSI_SENSE_ASC_MATCH_ALL (0xff)
+#define MPI3_DRIVER2_TRIGGER_SCSI_SENSE_SENSE_KEY_MATCH_ALL (0xff)
+struct mpi3_driver2_trigger_reply {
+ u8 type;
+ u8 flags;
+ __le16 ioc_status;
+ __le32 ioc_log_info;
+ __le32 ioc_log_info_mask;
+ __le32 reserved0c;
+};
+
+#define MPI3_DRIVER2_TRIGGER_REPLY_IOCSTATUS_MATCH_ALL (0xffff)
+union mpi3_driver2_trigger_element {
+ struct mpi3_driver2_trigger_event event;
+ struct mpi3_driver2_trigger_scsi_sense scsi_sense;
+ struct mpi3_driver2_trigger_reply reply;
+};
+
+#define MPI3_DRIVER2_TRIGGER_TYPE_EVENT (0x00)
+#define MPI3_DRIVER2_TRIGGER_TYPE_SCSI_SENSE (0x01)
+#define MPI3_DRIVER2_TRIGGER_TYPE_REPLY (0x02)
+#define MPI3_DRIVER2_TRIGGER_FLAGS_DIAG_TRACE_RELEASE (0x02)
+#define MPI3_DRIVER2_TRIGGER_FLAGS_DIAG_FW_RELEASE (0x01)
+struct mpi3_driver_page2 {
+ struct mpi3_config_page_header header;
+ __le64 master_trigger;
+ __le32 reserved10[3];
+ u8 num_triggers;
+ u8 reserved1d[3];
+ union mpi3_driver2_trigger_element trigger[MPI3_DRIVER2_TRIGGER_MAX];
+};
+
+#define MPI3_DRIVER2_PAGEVERSION (0x00)
+#define MPI3_DRIVER2_MASTERTRIGGER_DIAG_TRACE_RELEASE (0x8000000000000000ULL)
+#define MPI3_DRIVER2_MASTERTRIGGER_DIAG_FW_RELEASE (0x4000000000000000ULL)
+#define MPI3_DRIVER2_MASTERTRIGGER_SNAPDUMP (0x2000000000000000ULL)
+#define MPI3_DRIVER2_MASTERTRIGGER_DEVICE_REMOVAL_ENABLED (0x0000000000000004ULL)
+#define MPI3_DRIVER2_MASTERTRIGGER_TASK_MANAGEMENT_ENABLED (0x0000000000000002ULL)
+struct mpi3_driver_page10 {
+ struct mpi3_config_page_header header;
+ __le16 flags;
+ __le16 reserved0a;
+ u8 num_allowed_commands;
+ u8 reserved0d[3];
+ union mpi3_allowed_cmd allowed_command[MPI3_ALLOWED_CMDS_MAX];
+};
+
+#define MPI3_DRIVER10_PAGEVERSION (0x00)
+struct mpi3_driver_page20 {
+ struct mpi3_config_page_header header;
+ __le16 flags;
+ __le16 reserved0a;
+ u8 num_allowed_commands;
+ u8 reserved0d[3];
+ union mpi3_allowed_cmd allowed_command[MPI3_ALLOWED_CMDS_MAX];
+};
+
+#define MPI3_DRIVER20_PAGEVERSION (0x00)
+struct mpi3_driver_page30 {
+ struct mpi3_config_page_header header;
+ __le16 flags;
+ __le16 reserved0a;
+ u8 num_allowed_commands;
+ u8 reserved0d[3];
+ union mpi3_allowed_cmd allowed_command[MPI3_ALLOWED_CMDS_MAX];
+};
+
+#define MPI3_DRIVER30_PAGEVERSION (0x00)
union mpi3_security_mac {
__le32 dword[16];
__le16 word[32];
@@ -1102,7 +1320,7 @@ struct mpi3_security1_key_record {
#define MPI3_SECURITY1_KEY_RECORD_CONSUMER_NOT_VALID (0x00)
#define MPI3_SECURITY1_KEY_RECORD_CONSUMER_SAFESTORE (0x01)
#define MPI3_SECURITY1_KEY_RECORD_CONSUMER_CERT_CHAIN (0x02)
-#define MPI3_SECURITY1_KEY_RECORD_CONSUMER_AUTH_DEV_KEY (0x03)
+#define MPI3_SECURITY1_KEY_RECORD_CONSUMER_DEVICE_KEY (0x03)
#define MPI3_SECURITY1_KEY_RECORD_CONSUMER_CACHE_OFFLOAD (0x04)
struct mpi3_security_page1 {
struct mpi3_config_page_header header;
@@ -1137,16 +1355,30 @@ struct mpi3_sas_io_unit_page0 {
struct mpi3_config_page_header header;
__le32 reserved08;
u8 num_phys;
- u8 reserved0d[3];
+ u8 init_status;
+ __le16 reserved0e;
struct mpi3_sas_io_unit0_phy_data phy_data[MPI3_SAS_IO_UNIT0_PHY_MAX];
};
-#define MPI3_SASIOUNIT0_PAGEVERSION (0x00)
-#define MPI3_SASIOUNIT0_PORTFLAGS_DISC_IN_PROGRESS (0x08)
-#define MPI3_SASIOUNIT0_PORTFLAGS_AUTO_PORT_CONFIG (0x01)
-#define MPI3_SASIOUNIT0_PHYFLAGS_INIT_PERSIST_CONNECT (0x40)
-#define MPI3_SASIOUNIT0_PHYFLAGS_TARG_PERSIST_CONNECT (0x20)
-#define MPI3_SASIOUNIT0_PHYFLAGS_PHY_DISABLED (0x08)
+#define MPI3_SASIOUNIT0_PAGEVERSION (0x00)
+#define MPI3_SASIOUNIT0_INITSTATUS_NO_ERRORS (0x00)
+#define MPI3_SASIOUNIT0_INITSTATUS_NEEDS_INITIALIZATION (0x01)
+#define MPI3_SASIOUNIT0_INITSTATUS_NO_TARGETS_ALLOCATED (0x02)
+#define MPI3_SASIOUNIT0_INITSTATUS_BAD_NUM_PHYS (0x04)
+#define MPI3_SASIOUNIT0_INITSTATUS_UNSUPPORTED_CONFIG (0x05)
+#define MPI3_SASIOUNIT0_INITSTATUS_HOST_PHYS_ENABLED (0x06)
+#define MPI3_SASIOUNIT0_INITSTATUS_PRODUCT_SPECIFIC_MIN (0xf0)
+#define MPI3_SASIOUNIT0_INITSTATUS_PRODUCT_SPECIFIC_MAX (0xff)
+#define MPI3_SASIOUNIT0_PORTFLAGS_DISC_IN_PROGRESS (0x08)
+#define MPI3_SASIOUNIT0_PORTFLAGS_AUTO_PORT_CONFIG_MASK (0x03)
+#define MPI3_SASIOUNIT0_PORTFLAGS_AUTO_PORT_CONFIG_IOUNIT1 (0x00)
+#define MPI3_SASIOUNIT0_PORTFLAGS_AUTO_PORT_CONFIG_DYNAMIC (0x01)
+#define MPI3_SASIOUNIT0_PORTFLAGS_AUTO_PORT_CONFIG_BACKPLANE (0x02)
+#define MPI3_SASIOUNIT0_PHYFLAGS_INIT_PERSIST_CONNECT (0x40)
+#define MPI3_SASIOUNIT0_PHYFLAGS_TARG_PERSIST_CONNECT (0x20)
+#define MPI3_SASIOUNIT0_PHYFLAGS_PHY_DISABLED (0x08)
+#define MPI3_SASIOUNIT0_PHYFLAGS_VIRTUAL_PHY (0x02)
+#define MPI3_SASIOUNIT0_PHYFLAGS_HOST_PHY (0x01)
struct mpi3_sas_io_unit1_phy_data {
u8 io_unit_port;
u8 port_flags;
@@ -1343,6 +1575,26 @@ struct mpi3_sas_expander_page1 {
#define MPI3_SASEXPANDER1_DISCINFO_BAD_PHY_DISABLED (0x04)
#define MPI3_SASEXPANDER1_DISCINFO_LINK_STATUS_CHANGE (0x02)
#define MPI3_SASEXPANDER1_DISCINFO_NO_ROUTING_ENTRIES (0x01)
+#ifndef MPI3_SASEXPANDER2_MAX_NUM_PHYS
+#define MPI3_SASEXPANDER2_MAX_NUM_PHYS (1)
+#endif
+struct mpi3_sasexpander2_phy_element {
+ u8 link_change_count;
+ u8 reserved01;
+ __le16 rate_change_count;
+ __le32 reserved04;
+};
+
+struct mpi3_sas_expander_page2 {
+ struct mpi3_config_page_header header;
+ u8 num_phys;
+ u8 reserved09;
+ __le16 dev_handle;
+ __le32 reserved0c;
+ struct mpi3_sasexpander2_phy_element phy[MPI3_SASEXPANDER2_MAX_NUM_PHYS];
+};
+
+#define MPI3_SASEXPANDER2_PAGEVERSION (0x00)
struct mpi3_sas_port_page0 {
struct mpi3_config_page_header header;
u8 port_number;
@@ -1510,6 +1762,14 @@ struct mpi3_sas_phy_page4 {
#define MPI3_PCIE_NEG_LINK_RATE_8_0 (0x04)
#define MPI3_PCIE_NEG_LINK_RATE_16_0 (0x05)
#define MPI3_PCIE_NEG_LINK_RATE_32_0 (0x06)
+#define MPI3_PCIE_ASPM_ENABLE_NONE (0x0)
+#define MPI3_PCIE_ASPM_ENABLE_L0S (0x1)
+#define MPI3_PCIE_ASPM_ENABLE_L1 (0x2)
+#define MPI3_PCIE_ASPM_ENABLE_L0S_L1 (0x3)
+#define MPI3_PCIE_ASPM_SUPPORT_NONE (0x0)
+#define MPI3_PCIE_ASPM_SUPPORT_L0S (0x1)
+#define MPI3_PCIE_ASPM_SUPPORT_L1 (0x2)
+#define MPI3_PCIE_ASPM_SUPPORT_L0S_L1 (0x3)
struct mpi3_pcie_io_unit0_phy_data {
u8 link;
u8 link_flags;
@@ -1540,7 +1800,8 @@ struct mpi3_pcie_io_unit_page0 {
__le32 reserved08;
u8 num_phys;
u8 init_status;
- __le16 reserved0e;
+ u8 aspm;
+ u8 reserved0f;
struct mpi3_pcie_io_unit0_phy_data phy_data[MPI3_PCIE_IO_UNIT0_PHY_MAX];
};
@@ -1556,6 +1817,14 @@ struct mpi3_pcie_io_unit_page0 {
#define MPI3_PCIEIOUNIT0_INITSTATUS_BAD_CLOCKING_MODE (0x08)
#define MPI3_PCIEIOUNIT0_INITSTATUS_PROD_SPEC_START (0xf0)
#define MPI3_PCIEIOUNIT0_INITSTATUS_PROD_SPEC_END (0xff)
+#define MPI3_PCIEIOUNIT0_ASPM_SWITCH_STATES_MASK (0xc0)
+#define MPI3_PCIEIOUNIT0_ASPM_SWITCH_STATES_SHIFT (6)
+#define MPI3_PCIEIOUNIT0_ASPM_DIRECT_STATES_MASK (0x30)
+#define MPI3_PCIEIOUNIT0_ASPM_DIRECT_STATES_SHIFT (4)
+#define MPI3_PCIEIOUNIT0_ASPM_SWITCH_SUPPORT_MASK (0x0c)
+#define MPI3_PCIEIOUNIT0_ASPM_SWITCH_SUPPORT_SHIFT (2)
+#define MPI3_PCIEIOUNIT0_ASPM_DIRECT_SUPPORT_MASK (0x03)
+#define MPI3_PCIEIOUNIT0_ASPM_DIRECT_SUPPORT_SHIFT (0)
struct mpi3_pcie_io_unit1_phy_data {
u8 link;
u8 link_flags;
@@ -1569,16 +1838,16 @@ struct mpi3_pcie_io_unit1_phy_data {
#define MPI3_PCIEIOUNIT1_LINKFLAGS_PCIE_CLK_MODE_DIS_SEPARATE_REFCLK (0x00)
#define MPI3_PCIEIOUNIT1_LINKFLAGS_PCIE_CLK_MODE_EN_SRIS (0x01)
#define MPI3_PCIEIOUNIT1_LINKFLAGS_PCIE_CLK_MODE_EN_SRNS (0x02)
-#define MPI3_PCIEIOUNIT1_PHYFLAGS_PHY_DISABLE (0x08)
-#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_MASK (0xf0)
-#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_SHIFT (4)
-#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_2_5 (0x20)
-#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_5_0 (0x30)
-#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_8_0 (0x40)
-#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_16_0 (0x50)
-#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_32_0 (0x60)
+#define MPI3_PCIEIOUNIT1_PHYFLAGS_PHY_DISABLE (0x08)
+#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_MASK (0xf0)
+#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_SHIFT (4)
+#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_2_5 (0x20)
+#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_5_0 (0x30)
+#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_8_0 (0x40)
+#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_16_0 (0x50)
+#define MPI3_PCIEIOUNIT1_MMLR_MAX_RATE_32_0 (0x60)
#ifndef MPI3_PCIE_IO_UNIT1_PHY_MAX
-#define MPI3_PCIE_IO_UNIT1_PHY_MAX (1)
+#define MPI3_PCIE_IO_UNIT1_PHY_MAX (1)
#endif
struct mpi3_pcie_io_unit_page1 {
struct mpi3_config_page_header header;
@@ -1586,21 +1855,66 @@ struct mpi3_pcie_io_unit_page1 {
__le32 reserved0c;
u8 num_phys;
u8 reserved11;
- __le16 reserved12;
+ u8 aspm;
+ u8 reserved13;
struct mpi3_pcie_io_unit1_phy_data phy_data[MPI3_PCIE_IO_UNIT1_PHY_MAX];
};
-#define MPI3_PCIEIOUNIT1_PAGEVERSION (0x00)
+#define MPI3_PCIEIOUNIT1_PAGEVERSION (0x00)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_LINK_OVERRIDE_DISABLE (0x80)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_CLOCK_OVERRIDE_DISABLE (0x40)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_CLOCK_OVERRIDE_MODE_MASK (0x30)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_CLOCK_OVERRIDE_MODE_SHIFT (4)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_CLOCK_OVERRIDE_MODE_SRIS_SRNS_DISABLED (0x00)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_CLOCK_OVERRIDE_MODE_SRIS_ENABLED (0x10)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_CLOCK_OVERRIDE_MODE_SRNS_ENABLED (0x20)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_LINK_RATE_OVERRIDE_MASK (0x0f)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_LINK_RATE_OVERRIDE_MAX_2_5 (0x02)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_LINK_RATE_OVERRIDE_MAX_5_0 (0x03)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_LINK_RATE_OVERRIDE_MAX_8_0 (0x04)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_LINK_RATE_OVERRIDE_MAX_16_0 (0x05)
+#define MPI3_PCIEIOUNIT1_CONTROL_FLAGS_LINK_RATE_OVERRIDE_MAX_32_0 (0x06)
+#define MPI3_PCIEIOUNIT1_ASPM_SWITCH_MASK (0x0c)
+#define MPI3_PCIEIOUNIT1_ASPM_SWITCH_SHIFT (2)
+#define MPI3_PCIEIOUNIT1_ASPM_DIRECT_MASK (0x03)
+#define MPI3_PCIEIOUNIT1_ASPM_DIRECT_SHIFT (0)
struct mpi3_pcie_io_unit_page2 {
struct mpi3_config_page_header header;
- __le16 nv_me_max_queue_depth;
- __le16 reserved0a;
- u8 nv_me_abort_to;
+ __le16 nvme_max_q_dx1;
+ __le16 nvme_max_q_dx2;
+ u8 nvme_abort_to;
u8 reserved0d;
- __le16 reserved0e;
+ __le16 nvme_max_q_dx4;
};
#define MPI3_PCIEIOUNIT2_PAGEVERSION (0x00)
+#define MPI3_PCIEIOUNIT3_ERROR_RECEIVER_ERROR (0)
+#define MPI3_PCIEIOUNIT3_ERROR_RECOVERY (1)
+#define MPI3_PCIEIOUNIT3_ERROR_CORRECTABLE_ERROR_MSG (2)
+#define MPI3_PCIEIOUNIT3_ERROR_BAD_DLLP (3)
+#define MPI3_PCIEIOUNIT3_ERROR_BAD_TLP (4)
+#define MPI3_PCIEIOUNIT3_NUM_ERROR_INDEX (5)
+struct mpi3_pcie_io_unit3_error {
+ __le16 threshold_count;
+ __le16 reserved02;
+};
+
+struct mpi3_pcie_io_unit_page3 {
+ struct mpi3_config_page_header header;
+ u8 threshold_window;
+ u8 threshold_action;
+ u8 escalation_count;
+ u8 escalation_action;
+ u8 num_errors;
+ u8 reserved0d[3];
+ struct mpi3_pcie_io_unit3_error error[MPI3_PCIEIOUNIT3_NUM_ERROR_INDEX];
+};
+
+#define MPI3_PCIEIOUNIT3_PAGEVERSION (0x00)
+#define MPI3_PCIEIOUNIT3_ACTION_NO_ACTION (0x00)
+#define MPI3_PCIEIOUNIT3_ACTION_HOT_RESET (0x01)
+#define MPI3_PCIEIOUNIT3_ACTION_REDUCE_LINK_RATE_ONLY (0x02)
+#define MPI3_PCIEIOUNIT3_ACTION_REDUCE_LINK_RATE_NO_ACCESS (0x03)
struct mpi3_pcie_switch_page0 {
struct mpi3_config_page_header header;
u8 io_unit_port;
@@ -1609,7 +1923,7 @@ struct mpi3_pcie_switch_page0 {
__le16 dev_handle;
__le16 parent_dev_handle;
u8 num_ports;
- u8 pc_ie_level;
+ u8 pcie_level;
__le16 reserved12;
__le32 reserved14;
__le32 reserved18;
@@ -1623,7 +1937,8 @@ struct mpi3_pcie_switch_page0 {
struct mpi3_pcie_switch_page1 {
struct mpi3_config_page_header header;
u8 io_unit_port;
- u8 reserved09[3];
+ u8 flags;
+ __le16 reserved0a;
u8 num_ports;
u8 port_num;
__le16 attached_dev_handle;
@@ -1636,15 +1951,43 @@ struct mpi3_pcie_switch_page1 {
};
#define MPI3_PCIESWITCH1_PAGEVERSION (0x00)
+#define MPI3_PCIESWITCH1_FLAGS_ASPMSTATE_MASK (0x0c)
+#define MPI3_PCIESWITCH1_FLAGS_ASPMSTATE_SHIFT (2)
+#define MPI3_PCIESWITCH1_FLAGS_ASPMSUPPORT_MASK (0x03)
+#define MPI3_PCIESWITCH1_FLAGS_ASPMSUPPORT_SHIFT (0)
+#ifndef MPI3_PCIESWITCH2_MAX_NUM_PORTS
+#define MPI3_PCIESWITCH2_MAX_NUM_PORTS (1)
+#endif
+struct mpi3_pcieswitch2_port_element {
+ __le16 link_change_count;
+ __le16 rate_change_count;
+ __le32 reserved04;
+};
+
+struct mpi3_pcie_switch_page2 {
+ struct mpi3_config_page_header header;
+ u8 num_ports;
+ u8 reserved09;
+ __le16 dev_handle;
+ __le32 reserved0c;
+ struct mpi3_pcieswitch2_port_element port[MPI3_PCIESWITCH2_MAX_NUM_PORTS];
+};
+
+#define MPI3_PCIESWITCH2_PAGEVERSION (0x00)
struct mpi3_pcie_link_page0 {
struct mpi3_config_page_header header;
u8 link;
u8 reserved09[3];
- __le32 correctable_error_count;
- __le16 n_fatal_error_count;
- __le16 reserved12;
- __le16 fatal_error_count;
- __le16 reserved16;
+ __le32 reserved0c;
+ __le32 receiver_error_count;
+ __le32 recovery_count;
+ __le32 corr_error_msg_count;
+ __le32 non_fatal_error_msg_count;
+ __le32 fatal_error_msg_count;
+ __le32 non_fatal_error_count;
+ __le32 fatal_error_count;
+ __le32 bad_dllp_count;
+ __le32 bad_tlp_count;
};
#define MPI3_PCIELINK0_PAGEVERSION (0x00)
@@ -1654,11 +1997,12 @@ struct mpi3_enclosure_page0 {
__le16 flags;
__le16 enclosure_handle;
__le16 num_slots;
- __le16 start_slot;
+ __le16 reserved16;
u8 io_unit_port;
u8 enclosure_level;
__le16 sep_dev_handle;
- __le32 reserved1c;
+ u8 chassis_slot;
+ u8 reserved1d[3];
};
#define MPI3_ENCLOSURE0_PAGEVERSION (0x00)
@@ -1666,6 +2010,7 @@ struct mpi3_enclosure_page0 {
#define MPI3_ENCLS0_FLAGS_ENCL_TYPE_VIRTUAL (0x0000)
#define MPI3_ENCLS0_FLAGS_ENCL_TYPE_SAS (0x4000)
#define MPI3_ENCLS0_FLAGS_ENCL_TYPE_PCIE (0x8000)
+#define MPI3_ENCLS0_FLAGS_CHASSIS_SLOT_VALID (0x0020)
#define MPI3_ENCLS0_FLAGS_ENCL_DEV_PRESENT_MASK (0x0010)
#define MPI3_ENCLS0_FLAGS_ENCL_DEV_NOT_FOUND (0x0000)
#define MPI3_ENCLS0_FLAGS_ENCL_DEV_PRESENT (0x0010)
@@ -1686,6 +2031,7 @@ struct mpi3_device0_sas_sata_format {
u8 zone_group;
};
+#define MPI3_DEVICE0_SASSATA_FLAGS_WRITE_SAME_UNMAP_NCQ (0x0400)
#define MPI3_DEVICE0_SASSATA_FLAGS_SLUMBER_CAP (0x0200)
#define MPI3_DEVICE0_SASSATA_FLAGS_PARTIAL_CAP (0x0100)
#define MPI3_DEVICE0_SASSATA_FLAGS_ASYNC_NOTIFY (0x0080)
@@ -1707,10 +2053,11 @@ struct mpi3_device0_pcie_format {
__le32 maximum_data_transfer_size;
__le32 capabilities;
__le16 noiob;
- u8 nv_me_abort_to;
+ u8 nvme_abort_to;
u8 page_size;
__le16 shutdown_latency;
- __le16 reserved16;
+ u8 recovery_info;
+ u8 reserved17;
};
#define MPI3_DEVICE0_PCIE_LINK_RATE_32_0_SUPP (0x10)
@@ -1718,16 +2065,38 @@ struct mpi3_device0_pcie_format {
#define MPI3_DEVICE0_PCIE_LINK_RATE_8_0_SUPP (0x04)
#define MPI3_DEVICE0_PCIE_LINK_RATE_5_0_SUPP (0x02)
#define MPI3_DEVICE0_PCIE_LINK_RATE_2_5_SUPP (0x01)
-#define MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK (0x0003)
+#define MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK (0x0007)
#define MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NO_DEVICE (0x0000)
#define MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE (0x0001)
#define MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_SWITCH_DEVICE (0x0002)
#define MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_SCSI_DEVICE (0x0003)
+#define MPI3_DEVICE0_PCIE_DEVICE_INFO_ASPM_MASK (0x0030)
+#define MPI3_DEVICE0_PCIE_DEVICE_INFO_ASPM_SHIFT (4)
+#define MPI3_DEVICE0_PCIE_DEVICE_INFO_PITYPE_MASK (0x00c0)
+#define MPI3_DEVICE0_PCIE_DEVICE_INFO_PITYPE_SHIFT (6)
+#define MPI3_DEVICE0_PCIE_DEVICE_INFO_PITYPE_0 (0x0000)
+#define MPI3_DEVICE0_PCIE_DEVICE_INFO_PITYPE_1 (0x0040)
+#define MPI3_DEVICE0_PCIE_DEVICE_INFO_PITYPE_2 (0x0080)
+#define MPI3_DEVICE0_PCIE_DEVICE_INFO_PITYPE_3 (0x00c0)
+#define MPI3_DEVICE0_PCIE_CAP_SGL_EXTRA_LENGTH_SUPPORTED (0x00000020)
#define MPI3_DEVICE0_PCIE_CAP_METADATA_SEPARATED (0x00000010)
#define MPI3_DEVICE0_PCIE_CAP_SGL_DWORD_ALIGN_REQUIRED (0x00000008)
-#define MPI3_DEVICE0_PCIE_CAP_NVME_SGL_ENABLED (0x00000004)
+#define MPI3_DEVICE0_PCIE_CAP_SGL_FORMAT_SGL (0x00000004)
+#define MPI3_DEVICE0_PCIE_CAP_SGL_FORMAT_PRP (0x00000000)
#define MPI3_DEVICE0_PCIE_CAP_BIT_BUCKET_SGL_SUPP (0x00000002)
#define MPI3_DEVICE0_PCIE_CAP_SGL_SUPP (0x00000001)
+#define MPI3_DEVICE0_PCIE_CAP_ASPM_MASK (0x000000c0)
+#define MPI3_DEVICE0_PCIE_CAP_ASPM_SHIFT (6)
+#define MPI3_DEVICE0_PCIE_RECOVER_METHOD_MASK (0xe0)
+#define MPI3_DEVICE0_PCIE_RECOVER_METHOD_NS_MGMT (0x00)
+#define MPI3_DEVICE0_PCIE_RECOVER_METHOD_FORMAT (0x20)
+#define MPI3_DEVICE0_PCIE_RECOVER_REASON_MASK (0x1f)
+#define MPI3_DEVICE0_PCIE_RECOVER_REASON_NO_NS (0x00)
+#define MPI3_DEVICE0_PCIE_RECOVER_REASON_NO_NSID_1 (0x01)
+#define MPI3_DEVICE0_PCIE_RECOVER_REASON_TOO_MANY_NS (0x02)
+#define MPI3_DEVICE0_PCIE_RECOVER_REASON_PROTECTION (0x03)
+#define MPI3_DEVICE0_PCIE_RECOVER_REASON_METADATA_SZ (0x04)
+#define MPI3_DEVICE0_PCIE_RECOVER_REASON_LBA_DATA_SZ (0x05)
struct mpi3_device0_vd_format {
u8 vd_state;
u8 raid_level;
@@ -1783,6 +2152,8 @@ struct mpi3_device_page0 {
};
#define MPI3_DEVICE0_PAGEVERSION (0x00)
+#define MPI3_DEVICE0_PARENT_INVALID (0xffff)
+#define MPI3_DEVICE0_ENCLOSURE_HANDLE_NO_ENCLOSURE (0x0000)
#define MPI3_DEVICE0_WWID_INVALID (0xffffffffffffffff)
#define MPI3_DEVICE0_PERSISTENTID_INVALID (0xffff)
#define MPI3_DEVICE0_IOUNITPORT_INVALID (0xff)
@@ -1792,9 +2163,13 @@ struct mpi3_device_page0 {
#define MPI3_DEVICE0_ASTATUS_DEVICE_BLOCKED (0x03)
#define MPI3_DEVICE0_ASTATUS_UNAUTHORIZED (0x04)
#define MPI3_DEVICE0_ASTATUS_DEVICE_MISSING_DELAY (0x05)
+#define MPI3_DEVICE0_ASTATUS_PREPARE (0x06)
+#define MPI3_DEVICE0_ASTATUS_SAFE_MODE (0x07)
+#define MPI3_DEVICE0_ASTATUS_GENERIC_MAX (0x0f)
#define MPI3_DEVICE0_ASTATUS_SAS_UNKNOWN (0x10)
#define MPI3_DEVICE0_ASTATUS_ROUTE_NOT_ADDRESSABLE (0x11)
#define MPI3_DEVICE0_ASTATUS_SMP_ERROR_NOT_ADDRESSABLE (0x12)
+#define MPI3_DEVICE0_ASTATUS_SAS_MAX (0x1f)
#define MPI3_DEVICE0_ASTATUS_SIF_UNKNOWN (0x20)
#define MPI3_DEVICE0_ASTATUS_SIF_AFFILIATION_CONFLICT (0x21)
#define MPI3_DEVICE0_ASTATUS_SIF_DIAG (0x22)
@@ -1810,6 +2185,8 @@ struct mpi3_device_page0 {
#define MPI3_DEVICE0_ASTATUS_PCIE_MEM_SPACE_ACCESS (0x31)
#define MPI3_DEVICE0_ASTATUS_PCIE_UNSUPPORTED (0x32)
#define MPI3_DEVICE0_ASTATUS_PCIE_MSIX_REQUIRED (0x33)
+#define MPI3_DEVICE0_ASTATUS_PCIE_ECRC_REQUIRED (0x34)
+#define MPI3_DEVICE0_ASTATUS_PCIE_MAX (0x3f)
#define MPI3_DEVICE0_ASTATUS_NVME_UNKNOWN (0x40)
#define MPI3_DEVICE0_ASTATUS_NVME_READY_TIMEOUT (0x41)
#define MPI3_DEVICE0_ASTATUS_NVME_DEVCFG_UNSUPPORTED (0x42)
@@ -1820,7 +2197,17 @@ struct mpi3_device_page0 {
#define MPI3_DEVICE0_ASTATUS_NVME_GET_FEATURE_STAT_FAILED (0x47)
#define MPI3_DEVICE0_ASTATUS_NVME_IDLE_TIMEOUT (0x48)
#define MPI3_DEVICE0_ASTATUS_NVME_CTRL_FAILURE_STATUS (0x49)
-#define MPI3_DEVICE0_ASTATUS_VD_UNKNOWN (0x50)
+#define MPI3_DEVICE0_ASTATUS_NVME_INSUFFICIENT_POWER (0x4a)
+#define MPI3_DEVICE0_ASTATUS_NVME_DOORBELL_STRIDE (0x4b)
+#define MPI3_DEVICE0_ASTATUS_NVME_MEM_PAGE_MIN_SIZE (0x4c)
+#define MPI3_DEVICE0_ASTATUS_NVME_MEMORY_ALLOCATION (0x4d)
+#define MPI3_DEVICE0_ASTATUS_NVME_COMPLETION_TIME (0x4e)
+#define MPI3_DEVICE0_ASTATUS_NVME_BAR (0x4f)
+#define MPI3_DEVICE0_ASTATUS_NVME_NS_DESCRIPTOR (0x50)
+#define MPI3_DEVICE0_ASTATUS_NVME_INCOMPATIBLE_SETTINGS (0x51)
+#define MPI3_DEVICE0_ASTATUS_NVME_MAX (0x5f)
+#define MPI3_DEVICE0_ASTATUS_VD_UNKNOWN (0x80)
+#define MPI3_DEVICE0_ASTATUS_VD_MAX (0x8f)
#define MPI3_DEVICE0_FLAGS_CONTROLLER_DEV_HANDLE (0x0080)
#define MPI3_DEVICE0_FLAGS_HIDDEN (0x0008)
#define MPI3_DEVICE0_FLAGS_ATT_METHOD_MASK (0x0006)
@@ -1870,11 +2257,17 @@ struct mpi3_device_page1 {
struct mpi3_config_page_header header;
__le16 dev_handle;
__le16 reserved0a;
- __le32 reserved0c[12];
+ __le16 link_change_count;
+ __le16 rate_change_count;
+ __le16 tm_count;
+ __le16 reserved12;
+ __le32 reserved14[10];
u8 reserved3c[3];
u8 device_form;
union mpi3_device1_dev_spec_format device_specific;
};
#define MPI3_DEVICE1_PAGEVERSION (0x00)
+#define MPI3_DEVICE1_COUNTER_MAX (0xfffe)
+#define MPI3_DEVICE1_COUNTER_INVALID (0xffff)
#endif
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_image.h b/drivers/scsi/mpi3mr/mpi/mpi30_image.h
index 169e4f9b7b7c..c29b87de8e18 100644
--- a/drivers/scsi/mpi3mr/mpi/mpi30_image.h
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_image.h
@@ -61,6 +61,8 @@ struct mpi3_component_image_header {
#define MPI3_IMAGE_HEADER_SIGNATURE1_SPD (0x20445053)
#define MPI3_IMAGE_HEADER_SIGNATURE1_GAS_GAUGE (0x20534147)
#define MPI3_IMAGE_HEADER_SIGNATURE1_PBLP (0x504c4250)
+#define MPI3_IMAGE_HEADER_SIGNATURE1_MANIFEST (0x464e414d)
+#define MPI3_IMAGE_HEADER_SIGNATURE1_OEM (0x204d454f)
#define MPI3_IMAGE_HEADER_SIGNATURE2_VALUE (0x50584546)
#define MPI3_IMAGE_HEADER_FLAGS_DEVICE_KEY_BASIS_MASK (0x00000030)
#define MPI3_IMAGE_HEADER_FLAGS_DEVICE_KEY_BASIS_CDI (0x00000000)
@@ -94,6 +96,61 @@ struct mpi3_component_image_header {
#define MPI3_IMAGE_HEADER_HASH_EXCLUSION_OFFSET (0x5c)
#define MPI3_IMAGE_HEADER_NEXT_IMAGE_HEADER_OFFSET_OFFSET (0x7c)
#define MPI3_IMAGE_HEADER_SIZE (0x100)
+#ifndef MPI3_CI_MANIFEST_MPI_MAX
+#define MPI3_CI_MANIFEST_MPI_MAX (1)
+#endif
+struct mpi3_ci_manifest_mpi_comp_image_ref {
+ __le32 signature1;
+ __le32 reserved04[3];
+ struct mpi3_comp_image_version component_image_version;
+ __le32 component_image_version_string_offset;
+ __le32 crc;
+};
+
+struct mpi3_ci_manifest_mpi {
+ u8 manifest_type;
+ u8 reserved01[3];
+ __le32 reserved04[3];
+ u8 num_image_references;
+ u8 release_level;
+ __le16 reserved12;
+ __le16 reserved14;
+ __le16 flags;
+ __le32 reserved18[2];
+ __le16 vendor_id;
+ __le16 device_id;
+ __le16 subsystem_vendor_id;
+ __le16 subsystem_id;
+ __le32 reserved28[2];
+ union mpi3_version_union package_security_version;
+ __le32 reserved34;
+ struct mpi3_comp_image_version package_version;
+ __le32 package_version_string_offset;
+ __le32 package_build_date_string_offset;
+ __le32 package_build_time_string_offset;
+ __le32 reserved4c;
+ __le32 diag_authorization_identifier[16];
+ struct mpi3_ci_manifest_mpi_comp_image_ref component_image_ref[MPI3_CI_MANIFEST_MPI_MAX];
+};
+
+#define MPI3_CI_MANIFEST_MPI_RELEASE_LEVEL_DEV (0x00)
+#define MPI3_CI_MANIFEST_MPI_RELEASE_LEVEL_PREALPHA (0x10)
+#define MPI3_CI_MANIFEST_MPI_RELEASE_LEVEL_ALPHA (0x20)
+#define MPI3_CI_MANIFEST_MPI_RELEASE_LEVEL_BETA (0x30)
+#define MPI3_CI_MANIFEST_MPI_RELEASE_LEVEL_RC (0x40)
+#define MPI3_CI_MANIFEST_MPI_RELEASE_LEVEL_GCA (0x50)
+#define MPI3_CI_MANIFEST_MPI_RELEASE_LEVEL_POINT (0x60)
+#define MPI3_CI_MANIFEST_MPI_FLAGS_DIAG_AUTHORIZATION (0x01)
+#define MPI3_CI_MANIFEST_MPI_SUBSYSTEMID_IGNORED (0xffff)
+#define MPI3_CI_MANIFEST_MPI_PKG_VER_STR_OFF_UNSPECIFIED (0x00000000)
+#define MPI3_CI_MANIFEST_MPI_PKG_BUILD_DATE_STR_OFF_UNSPECIFIED (0x00000000)
+#define MPI3_CI_MANIFEST_MPI_PKG_BUILD_TIME_STR_OFF_UNSPECIFIED (0x00000000)
+union mpi3_ci_manifest {
+ struct mpi3_ci_manifest_mpi mpi;
+ __le32 dword[1];
+};
+
+#define MPI3_CI_MANIFEST_TYPE_MPI (0x00)
struct mpi3_extended_image_header {
u8 image_type;
u8 reserved01[3];
@@ -161,6 +218,7 @@ struct mpi3_encrypted_hash_entry {
#define MPI3_HASH_ALGORITHM_SIZE_UNUSED (0x00)
#define MPI3_HASH_ALGORITHM_SIZE_SHA256 (0x01)
#define MPI3_HASH_ALGORITHM_SIZE_SHA512 (0x02)
+#define MPI3_HASH_ALGORITHM_SIZE_SHA384 (0x03)
#define MPI3_ENCRYPTION_ALGORITHM_UNUSED (0x00)
#define MPI3_ENCRYPTION_ALGORITHM_RSA256 (0x01)
#define MPI3_ENCRYPTION_ALGORITHM_RSA512 (0x02)
@@ -178,7 +236,6 @@ struct mpi3_encrypted_key_with_hash_entry {
u8 reserved03;
__le32 reserved04;
__le32 public_key[MPI3_PUBLIC_KEY_MAX];
- __le32 encrypted_hash[MPI3_ENCRYPTED_HASH_MAX];
};
#ifndef MPI3_ENCRYPTED_HASH_ENTRY_MAX
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_init.h b/drivers/scsi/mpi3mr/mpi/mpi30_init.h
index e02b6d3cfba2..7a208dc81d49 100644
--- a/drivers/scsi/mpi3mr/mpi/mpi30_init.h
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_init.h
@@ -13,7 +13,7 @@ struct mpi3_scsi_io_cdb_eedp32 {
__le32 transfer_length;
};
-union mpi3_scso_io_cdb_union {
+union mpi3_scsi_io_cdb_union {
u8 cdb32[32];
struct mpi3_scsi_io_cdb_eedp32 eedp32;
struct mpi3_sge_common sge;
@@ -32,11 +32,12 @@ struct mpi3_scsi_io_request {
__le32 skip_count;
__le32 data_length;
u8 lun[8];
- union mpi3_scso_io_cdb_union cdb;
+ union mpi3_scsi_io_cdb_union cdb;
union mpi3_sge_union sgl[4];
};
#define MPI3_SCSIIO_MSGFLAGS_METASGL_VALID (0x80)
+#define MPI3_SCSIIO_MSGFLAGS_DIVERT_TO_FIRMWARE (0x40)
#define MPI3_SCSIIO_FLAGS_LARGE_CDB (0x60000000)
#define MPI3_SCSIIO_FLAGS_CDB_16_OR_LESS (0x00000000)
#define MPI3_SCSIIO_FLAGS_CDB_GREATER_THAN_16 (0x20000000)
@@ -155,5 +156,13 @@ struct mpi3_scsi_task_mgmt_reply {
__le32 reserved18;
};
-#define MPI3_SCSITASKMGMT_RSPCODE_IO_QUEUED_ON_IOC (0x80)
+#define MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE (0x00)
+#define MPI3_SCSITASKMGMT_RSPCODE_INVALID_FRAME (0x02)
+#define MPI3_SCSITASKMGMT_RSPCODE_TM_FUNCTION_NOT_SUPPORTED (0x04)
+#define MPI3_SCSITASKMGMT_RSPCODE_TM_FAILED (0x05)
+#define MPI3_SCSITASKMGMT_RSPCODE_TM_SUCCEEDED (0x08)
+#define MPI3_SCSITASKMGMT_RSPCODE_TM_INVALID_LUN (0x09)
+#define MPI3_SCSITASKMGMT_RSPCODE_TM_OVERLAPPED_TAG (0x0a)
+#define MPI3_SCSITASKMGMT_RSPCODE_IO_QUEUED_ON_IOC (0x80)
+#define MPI3_SCSITASKMGMT_RSPCODE_TM_NVME_DENIED (0x81)
#endif
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h b/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h
index 1af99a5382d5..bc56273778d3 100644
--- a/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_ioc.h
@@ -29,10 +29,15 @@ struct mpi3_ioc_init_request {
__le64 driver_information_address;
};
-#define MPI3_WHOINIT_NOT_INITIALIZED (0x00)
-#define MPI3_WHOINIT_ROM_BIOS (0x02)
-#define MPI3_WHOINIT_HOST_DRIVER (0x03)
-#define MPI3_WHOINIT_MANUFACTURER (0x04)
+#define MPI3_IOCINIT_MSGFLAGS_HOSTMETADATA_MASK (0x03)
+#define MPI3_IOCINIT_MSGFLAGS_HOSTMETADATA_NOT_USED (0x00)
+#define MPI3_IOCINIT_MSGFLAGS_HOSTMETADATA_SEPARATED (0x01)
+#define MPI3_IOCINIT_MSGFLAGS_HOSTMETADATA_INLINE (0x02)
+#define MPI3_IOCINIT_MSGFLAGS_HOSTMETADATA_BOTH (0x03)
+#define MPI3_WHOINIT_NOT_INITIALIZED (0x00)
+#define MPI3_WHOINIT_ROM_BIOS (0x02)
+#define MPI3_WHOINIT_HOST_DRIVER (0x03)
+#define MPI3_WHOINIT_MANUFACTURER (0x04)
struct mpi3_driver_info_layout {
__le32 information_length;
u8 driver_signature[12];
@@ -77,17 +82,17 @@ struct mpi3_ioc_facts_data {
u8 sge_modifier_shift;
u8 protocol_flags;
__le16 max_sas_initiators;
- __le16 max_sas_targets;
+ __le16 reserved2a;
__le16 max_sas_expanders;
__le16 max_enclosures;
__le16 min_dev_handle;
__le16 max_dev_handle;
- __le16 max_pc_ie_switches;
+ __le16 max_pcie_switches;
__le16 max_nvme;
- __le16 max_pds;
+ __le16 reserved38;
__le16 max_vds;
__le16 max_host_pds;
- __le16 max_advanced_host_pds;
+ __le16 max_adv_host_pds;
__le16 max_raid_pds;
__le16 max_posted_cmd_buffers;
__le32 flags;
@@ -97,26 +102,41 @@ struct mpi3_ioc_facts_data {
__le16 reserved4e;
__le32 diag_trace_size;
__le32 diag_fw_size;
+ __le32 diag_driver_size;
+ u8 max_host_pd_ns_count;
+ u8 max_adv_host_pd_ns_count;
+ u8 max_raidpd_ns_count;
+ u8 reserved5f;
};
-#define MPI3_IOCFACTS_CAPABILITY_ADVANCED_HOST_PD (0x00000010)
+#define MPI3_IOCFACTS_CAPABILITY_NON_SUPERVISOR_MASK (0x80000000)
+#define MPI3_IOCFACTS_CAPABILITY_SUPERVISOR_IOC (0x00000000)
+#define MPI3_IOCFACTS_CAPABILITY_NON_SUPERVISOR_IOC (0x10000000)
+#define MPI3_IOCFACTS_CAPABILITY_COMPLETE_RESET_CAPABLE (0x00000100)
+#define MPI3_IOCFACTS_CAPABILITY_SEG_DIAG_TRACE_ENABLED (0x00000080)
+#define MPI3_IOCFACTS_CAPABILITY_SEG_DIAG_FW_ENABLED (0x00000040)
+#define MPI3_IOCFACTS_CAPABILITY_SEG_DIAG_DRIVER_ENABLED (0x00000020)
+#define MPI3_IOCFACTS_CAPABILITY_ADVANCED_HOST_PD_ENABLED (0x00000010)
#define MPI3_IOCFACTS_CAPABILITY_RAID_CAPABLE (0x00000008)
-#define MPI3_IOCFACTS_CAPABILITY_COALESCE_CTRL_GRAN_MASK (0x00000001)
-#define MPI3_IOCFACTS_CAPABILITY_COALESCE_CTRL_IOC_GRAN (0x00000000)
-#define MPI3_IOCFACTS_CAPABILITY_COALESCE_CTRL_REPLY_Q_GRAN (0x00000001)
+#define MPI3_IOCFACTS_CAPABILITY_MULTIPATH_ENABLED (0x00000002)
+#define MPI3_IOCFACTS_CAPABILITY_COALESCE_CTRL_SUPPORTED (0x00000001)
#define MPI3_IOCFACTS_PID_TYPE_MASK (0xf000)
#define MPI3_IOCFACTS_PID_TYPE_SHIFT (12)
#define MPI3_IOCFACTS_PID_PRODUCT_MASK (0x0f00)
#define MPI3_IOCFACTS_PID_PRODUCT_SHIFT (8)
#define MPI3_IOCFACTS_PID_FAMILY_MASK (0x00ff)
#define MPI3_IOCFACTS_PID_FAMILY_SHIFT (0)
+#define MPI3_IOCFACTS_EXCEPT_SECURITY_REKEY (0x2000)
+#define MPI3_IOCFACTS_EXCEPT_SAS_DISABLED (0x1000)
#define MPI3_IOCFACTS_EXCEPT_SAFE_MODE (0x0800)
#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_MASK (0x0700)
#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_NONE (0x0000)
-#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_LOCAL_VIA_RAID (0x0100)
-#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_LOCAL_VIA_OOB (0x0200)
-#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_EXT_VIA_RAID (0x0300)
-#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_EXT_VIA_OOB (0x0400)
+#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_LOCAL_VIA_MGMT (0x0100)
+#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_EXT_VIA_MGMT (0x0200)
+#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_DRIVE_EXT_VIA_MGMT (0x0300)
+#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_LOCAL_VIA_OOB (0x0400)
+#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_EXT_VIA_OOB (0x0500)
+#define MPI3_IOCFACTS_EXCEPT_SECURITY_KEY_DRIVE_EXT_VIA_OOB (0x0600)
#define MPI3_IOCFACTS_EXCEPT_PCIE_DISABLED (0x0080)
#define MPI3_IOCFACTS_EXCEPT_PARTIAL_MEMORY_FAILURE (0x0040)
#define MPI3_IOCFACTS_EXCEPT_MANUFACT_CHECKSUM_FAIL (0x0020)
@@ -175,6 +195,7 @@ struct mpi3_create_request_queue_request {
#define MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_MASK (0x80)
#define MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_SEGMENTED (0x80)
#define MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_CONTIGUOUS (0x00)
+#define MPI3_CREATE_REQUEST_QUEUE_SIZE_MINIMUM (2)
struct mpi3_delete_request_queue_request {
__le16 host_tag;
u8 ioc_use_only02;
@@ -210,6 +231,7 @@ struct mpi3_create_reply_queue_request {
#define MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_MASK (0x01)
#define MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_DISABLE (0x00)
#define MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_ENABLE (0x01)
+#define MPI3_CREATE_REPLY_QUEUE_SIZE_MINIMUM (2)
struct mpi3_delete_reply_queue_request {
__le16 host_tag;
u8 ioc_use_only02;
@@ -255,7 +277,9 @@ struct mpi3_port_enable_request {
#define MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR (0x19)
#define MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST (0x20)
#define MPI3_EVENT_PCIE_ENUMERATION (0x22)
+#define MPI3_EVENT_PCIE_ERROR_THRESHOLD (0x23)
#define MPI3_EVENT_HARD_RESET_RECEIVED (0x40)
+#define MPI3_EVENT_DIAGNOSTIC_BUFFER_STATUS_CHANGE (0x50)
#define MPI3_EVENT_MIN_PRODUCT_SPECIFIC (0x60)
#define MPI3_EVENT_MAX_PRODUCT_SPECIFIC (0x7f)
#define MPI3_EVENT_NOTIFY_EVENTMASK_WORDS (4)
@@ -311,10 +335,9 @@ struct mpi3_event_data_temp_threshold {
__le32 reserved0c;
};
-#define MPI3_EVENT_TEMP_THRESHOLD_STATUS_THRESHOLD3_EXCEEDED (0x0008)
-#define MPI3_EVENT_TEMP_THRESHOLD_STATUS_THRESHOLD2_EXCEEDED (0x0004)
-#define MPI3_EVENT_TEMP_THRESHOLD_STATUS_THRESHOLD1_EXCEEDED (0x0002)
-#define MPI3_EVENT_TEMP_THRESHOLD_STATUS_THRESHOLD0_EXCEEDED (0x0001)
+#define MPI3_EVENT_TEMP_THRESHOLD_STATUS_FATAL_THRESHOLD_EXCEEDED (0x0004)
+#define MPI3_EVENT_TEMP_THRESHOLD_STATUS_CRITICAL_THRESHOLD_EXCEEDED (0x0002)
+#define MPI3_EVENT_TEMP_THRESHOLD_STATUS_WARNING_THRESHOLD_EXCEEDED (0x0001)
struct mpi3_event_data_cable_management {
__le32 active_cable_power_requirement;
u8 status;
@@ -398,8 +421,10 @@ struct mpi3_event_data_sas_discovery {
#define MPI3_SAS_DISC_STATUS_MAX_EXPANDERS_EXCEED (0x40000000)
#define MPI3_SAS_DISC_STATUS_MAX_DEVICES_EXCEED (0x20000000)
#define MPI3_SAS_DISC_STATUS_MAX_TOPO_PHYS_EXCEED (0x10000000)
+#define MPI3_SAS_DISC_STATUS_INVALID_CEI (0x00010000)
+#define MPI3_SAS_DISC_STATUS_FECEI_MISMATCH (0x00008000)
#define MPI3_SAS_DISC_STATUS_MULTIPLE_DEVICES_IN_SLOT (0x00004000)
-#define MPI3_SAS_DISC_STATUS_SLOT_COUNT_MISMATCH (0x00002000)
+#define MPI3_SAS_DISC_STATUS_NECEI_MISMATCH (0x00002000)
#define MPI3_SAS_DISC_STATUS_TOO_MANY_SLOTS (0x00001000)
#define MPI3_SAS_DISC_STATUS_EXP_MULTI_SUBTRACTIVE (0x00000800)
#define MPI3_SAS_DISC_STATUS_MULTI_PORT_DOMAIN (0x00000400)
@@ -581,6 +606,20 @@ struct mpi3_event_data_pcie_topology_change_list {
#define MPI3_EVENT_PCIE_TOPO_SS_NOT_RESPONDING (0x02)
#define MPI3_EVENT_PCIE_TOPO_SS_RESPONDING (0x03)
#define MPI3_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING (0x04)
+struct mpi3_event_data_pcie_error_threshold {
+ __le64 timestamp;
+ u8 reason_code;
+ u8 port;
+ __le16 switch_dev_handle;
+ u8 error;
+ u8 action;
+ __le16 threshold_count;
+ __le16 attached_dev_handle;
+ __le16 reserved12;
+};
+
+#define MPI3_EVENT_PCI_ERROR_RC_THRESHOLD_EXCEEDED (0x00)
+#define MPI3_EVENT_PCI_ERROR_RC_ESCALATION (0x01)
struct mpi3_event_data_sas_init_dev_status_change {
u8 reason_code;
u8 io_unit_port;
@@ -604,6 +643,16 @@ struct mpi3_event_data_hard_reset_received {
__le16 reserved02;
};
+struct mpi3_event_data_diag_buffer_status_change {
+ u8 type;
+ u8 reason_code;
+ __le16 reserved02;
+ __le32 reserved04;
+};
+
+#define MPI3_EVENT_DIAG_BUFFER_STATUS_CHANGE_RC_RELEASED (0x01)
+#define MPI3_EVENT_DIAG_BUFFER_STATUS_CHANGE_RC_PAUSED (0x02)
+#define MPI3_EVENT_DIAG_BUFFER_STATUS_CHANGE_RC_RESUMED (0x03)
#define MPI3_PEL_LOCALE_FLAGS_NON_BLOCKING_BOOT_EVENT (0x0200)
#define MPI3_PEL_LOCALE_FLAGS_BLOCKING_BOOT_EVENT (0x0100)
#define MPI3_PEL_LOCALE_FLAGS_PCIE (0x0080)
@@ -645,21 +694,23 @@ struct mpi3_pel_seq {
};
struct mpi3_pel_entry {
+ __le64 time_stamp;
__le32 sequence_number;
- __le32 time_stamp[2];
__le16 log_code;
__le16 arg_type;
__le16 locale;
u8 class;
- u8 reserved13;
+ u8 flags;
u8 ext_num;
u8 num_exts;
u8 arg_data_size;
- u8 fixed_format_size;
+ u8 fixed_format_strings_size;
__le32 reserved18[2];
__le32 pel_info[24];
};
+#define MPI3_PEL_FLAGS_COMPLETE_RESET_NEEDED (0x02)
+#define MPI3_PEL_FLAGS_ACK_NEEDED (0x01)
struct mpi3_pel_list {
__le32 log_count;
__le32 reserved04;
@@ -837,7 +888,10 @@ struct mpi3_pel_req_action_acknowledge {
__le32 reserved10;
};
-#define MPI3_PELACKNOWLEDGE_MSGFLAGS_SAFE_MODE_EXIT (0x01)
+#define MPI3_PELACKNOWLEDGE_MSGFLAGS_SAFE_MODE_EXIT_MASK (0x03)
+#define MPI3_PELACKNOWLEDGE_MSGFLAGS_SAFE_MODE_EXIT_NO_GUIDANCE (0x00)
+#define MPI3_PELACKNOWLEDGE_MSGFLAGS_SAFE_MODE_EXIT_CONTINUE_OP (0x01)
+#define MPI3_PELACKNOWLEDGE_MSGFLAGS_SAFE_MODE_EXIT_TRANSITION_TO_FAULT (0x02)
struct mpi3_pel_reply {
__le16 host_tag;
u8 ioc_use_only02;
@@ -885,6 +939,7 @@ struct mpi3_ci_download_request {
#define MPI3_CI_DOWNLOAD_ACTION_ONLINE_ACTIVATION (0x02)
#define MPI3_CI_DOWNLOAD_ACTION_OFFLINE_ACTIVATION (0x03)
#define MPI3_CI_DOWNLOAD_ACTION_GET_STATUS (0x04)
+#define MPI3_CI_DOWNLOAD_ACTION_CANCEL_OFFLINE_ACTIVATION (0x05)
struct mpi3_ci_download_reply {
__le16 host_tag;
u8 ioc_use_only02;
@@ -902,6 +957,7 @@ struct mpi3_ci_download_reply {
};
#define MPI3_CI_DOWNLOAD_FLAGS_DOWNLOAD_IN_PROGRESS (0x80)
+#define MPI3_CI_DOWNLOAD_FLAGS_OFFLINE_ACTIVATION_REQUIRED (0x20)
#define MPI3_CI_DOWNLOAD_FLAGS_KEY_UPDATE_PENDING (0x10)
#define MPI3_CI_DOWNLOAD_FLAGS_ACTIVATION_STATUS_MASK (0x0e)
#define MPI3_CI_DOWNLOAD_FLAGS_ACTIVATION_STATUS_NOT_NEEDED (0x00)
@@ -939,19 +995,28 @@ struct mpi3_ci_upload_request {
#define MPI3_CTRL_OP_REMOVE_DEVICE (0x10)
#define MPI3_CTRL_OP_CLOSE_PERSISTENT_CONNECTION (0x11)
#define MPI3_CTRL_OP_HIDDEN_ACK (0x12)
+#define MPI3_CTRL_OP_CLEAR_DEVICE_COUNTERS (0x13)
#define MPI3_CTRL_OP_SAS_SEND_PRIMITIVE (0x20)
-#define MPI3_CTRL_OP_SAS_CLEAR_ERROR_LOG (0x21)
-#define MPI3_CTRL_OP_PCIE_CLEAR_ERROR_LOG (0x22)
+#define MPI3_CTRL_OP_SAS_PHY_CONTROL (0x21)
+#define MPI3_CTRL_OP_READ_INTERNAL_BUS (0x23)
+#define MPI3_CTRL_OP_WRITE_INTERNAL_BUS (0x24)
+#define MPI3_CTRL_OP_PCIE_LINK_CONTROL (0x30)
#define MPI3_CTRL_OP_LOOKUP_MAPPING_PARAM8_LOOKUP_METHOD_INDEX (0x00)
#define MPI3_CTRL_OP_UPDATE_TIMESTAMP_PARAM64_TIMESTAMP_INDEX (0x00)
#define MPI3_CTRL_OP_REMOVE_DEVICE_PARAM16_DEVHANDLE_INDEX (0x00)
#define MPI3_CTRL_OP_CLOSE_PERSIST_CONN_PARAM16_DEVHANDLE_INDEX (0x00)
#define MPI3_CTRL_OP_HIDDEN_ACK_PARAM16_DEVHANDLE_INDEX (0x00)
+#define MPI3_CTRL_OP_CLEAR_DEVICE_COUNTERS_PARAM16_DEVHANDLE_INDEX (0x00)
#define MPI3_CTRL_OP_SAS_SEND_PRIM_PARAM8_PHY_INDEX (0x00)
#define MPI3_CTRL_OP_SAS_SEND_PRIM_PARAM8_PRIMSEQ_INDEX (0x01)
#define MPI3_CTRL_OP_SAS_SEND_PRIM_PARAM32_PRIMITIVE_INDEX (0x00)
-#define MPI3_CTRL_OP_SAS_CLEAR_ERR_LOG_PARAM8_PHY_INDEX (0x00)
-#define MPI3_CTRL_OP_PCIE_CLEAR_ERR_LOG_PARAM8_PHY_INDEX (0x00)
+#define MPI3_CTRL_OP_SAS_PHY_CONTROL_PARAM8_ACTION_INDEX (0x00)
+#define MPI3_CTRL_OP_SAS_PHY_CONTROL_PARAM8_PHY_INDEX (0x01)
+#define MPI3_CTRL_OP_READ_INTERNAL_BUS_PARAM64_ADDRESS_INDEX (0x00)
+#define MPI3_CTRL_OP_WRITE_INTERNAL_BUS_PARAM64_ADDRESS_INDEX (0x00)
+#define MPI3_CTRL_OP_WRITE_INTERNAL_BUS_PARAM32_VALUE_INDEX (0x00)
+#define MPI3_CTRL_OP_PCIE_LINK_CONTROL_PARAM8_ACTION_INDEX (0x00)
+#define MPI3_CTRL_OP_PCIE_LINK_CONTROL_PARAM8_LINK_INDEX (0x01)
#define MPI3_CTRL_LOOKUP_METHOD_WWID_ADDRESS (0x01)
#define MPI3_CTRL_LOOKUP_METHOD_ENCLOSURE_SLOT (0x02)
#define MPI3_CTRL_LOOKUP_METHOD_SAS_DEVICE_NAME (0x03)
@@ -966,9 +1031,14 @@ struct mpi3_ci_upload_request {
#define MPI3_CTRL_LOOKUP_METHOD_PERSISTID_PARAM16_PERSISTENT_ID_INDEX (1)
#define MPI3_CTRL_LOOKUP_METHOD_VALUE16_DEVH_INDEX (0)
#define MPI3_CTRL_GET_TIMESTAMP_VALUE64_TIMESTAMP_INDEX (0)
+#define MPI3_CTRL_READ_INTERNAL_BUS_VALUE32_VALUE_INDEX (0)
#define MPI3_CTRL_PRIMFLAGS_SINGLE (0x01)
#define MPI3_CTRL_PRIMFLAGS_TRIPLE (0x03)
#define MPI3_CTRL_PRIMFLAGS_REDUNDANT (0x06)
+#define MPI3_CTRL_ACTION_NOP (0x00)
+#define MPI3_CTRL_ACTION_LINK_RESET (0x01)
+#define MPI3_CTRL_ACTION_HARD_RESET (0x02)
+#define MPI3_CTRL_ACTION_CLEAR_ERROR_LOG (0x05)
struct mpi3_iounit_control_request {
__le16 host_tag;
u8 ioc_use_only02;
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_pci.h b/drivers/scsi/mpi3mr/mpi/mpi30_pci.h
new file mode 100644
index 000000000000..dbfaf4137560
--- /dev/null
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_pci.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright 2016-2021 Broadcom Inc. All rights reserved.
+ *
+ */
+#ifndef MPI30_PCI_H
+#define MPI30_PCI_H 1
+#ifndef MPI3_NVME_ENCAP_CMD_MAX
+#define MPI3_NVME_ENCAP_CMD_MAX (1)
+#endif
+struct mpi3_nvme_encapsulated_request {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 change_count;
+ __le16 dev_handle;
+ __le16 encapsulated_command_length;
+ __le16 flags;
+ __le32 reserved10[4];
+ __le32 command[MPI3_NVME_ENCAP_CMD_MAX];
+};
+
+#define MPI3_NVME_FLAGS_FORCE_ADMIN_ERR_REPLY_MASK (0x0002)
+#define MPI3_NVME_FLAGS_FORCE_ADMIN_ERR_REPLY_FAIL_ONLY (0x0000)
+#define MPI3_NVME_FLAGS_FORCE_ADMIN_ERR_REPLY_ALL (0x0002)
+#define MPI3_NVME_FLAGS_SUBMISSIONQ_MASK (0x0001)
+#define MPI3_NVME_FLAGS_SUBMISSIONQ_IO (0x0000)
+#define MPI3_NVME_FLAGS_SUBMISSIONQ_ADMIN (0x0001)
+struct mpi3_nvme_encapsulated_error_reply {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 ioc_use_only08;
+ __le16 ioc_status;
+ __le32 ioc_log_info;
+ __le32 nvme_completion_entry[4];
+};
+#endif
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_sas.h b/drivers/scsi/mpi3mr/mpi/mpi30_sas.h
index ba5018702960..298d895e374b 100644
--- a/drivers/scsi/mpi3mr/mpi/mpi30_sas.h
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_sas.h
@@ -30,4 +30,18 @@ struct mpi3_smp_passthrough_request {
struct mpi3_sge_common request_sge;
struct mpi3_sge_common response_sge;
};
+
+struct mpi3_smp_passthrough_reply {
+ __le16 host_tag;
+ u8 ioc_use_only02;
+ u8 function;
+ __le16 ioc_use_only04;
+ u8 ioc_use_only06;
+ u8 msg_flags;
+ __le16 ioc_use_only08;
+ __le16 ioc_status;
+ __le32 ioc_log_info;
+ __le16 response_data_length;
+ __le16 reserved12;
+};
#endif
diff --git a/drivers/scsi/mpi3mr/mpi/mpi30_transport.h b/drivers/scsi/mpi3mr/mpi/mpi30_transport.h
index 63e4e81d5397..6d550117ec2e 100644
--- a/drivers/scsi/mpi3mr/mpi/mpi30_transport.h
+++ b/drivers/scsi/mpi3mr/mpi/mpi30_transport.h
@@ -19,8 +19,8 @@ union mpi3_version_union {
#define MPI3_VERSION_MAJOR (3)
#define MPI3_VERSION_MINOR (0)
-#define MPI3_VERSION_UNIT (0)
-#define MPI3_VERSION_DEV (18)
+#define MPI3_VERSION_UNIT (22)
+#define MPI3_VERSION_DEV (0)
struct mpi3_sysif_oper_queue_indexes {
__le16 producer_index;
__le16 reserved02;
@@ -74,6 +74,7 @@ struct mpi3_sysif_registers {
#define MPI3_SYSIF_IOC_INFO_HIGH_OFFSET (0x00000004)
#define MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_MASK (0xff000000)
#define MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_SHIFT (24)
+#define MPI3_SYSIF_IOC_INFO_LOW_HCB_DISABLED (0x00000001)
#define MPI3_SYSIF_IOC_CONFIG_OFFSET (0x00000014)
#define MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ (0x00f00000)
#define MPI3_SYSIF_IOC_CONFIG_OPER_RPY_ENT_SZ_SHIFT (20)
@@ -82,12 +83,13 @@ struct mpi3_sysif_registers {
#define MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_MASK (0x0000c000)
#define MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_NO (0x00000000)
#define MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_NORMAL (0x00004000)
-#define MPI3_SYSIF_IOC_CONFIG_DEVICE_SHUTDOWN (0x00002000)
+#define MPI3_SYSIF_IOC_CONFIG_DEVICE_SHUTDOWN_SEND_REQ (0x00002000)
#define MPI3_SYSIF_IOC_CONFIG_DIAG_SAVE (0x00000010)
#define MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC (0x00000001)
#define MPI3_SYSIF_IOC_STATUS_OFFSET (0x0000001c)
#define MPI3_SYSIF_IOC_STATUS_RESET_HISTORY (0x00000010)
#define MPI3_SYSIF_IOC_STATUS_SHUTDOWN_MASK (0x0000000c)
+#define MPI3_SYSIF_IOC_STATUS_SHUTDOWN_SHIFT (0x00000002)
#define MPI3_SYSIF_IOC_STATUS_SHUTDOWN_NONE (0x00000000)
#define MPI3_SYSIF_IOC_STATUS_SHUTDOWN_IN_PROGRESS (0x00000004)
#define MPI3_SYSIF_IOC_STATUS_SHUTDOWN_COMPLETE (0x00000008)
@@ -107,9 +109,9 @@ struct mpi3_sysif_registers {
#define MPI3_SYSIF_COALESCE_CONTROL_ENABLE_NO_CHANGE (0x00000000)
#define MPI3_SYSIF_COALESCE_CONTROL_ENABLE_DISABLE (0x40000000)
#define MPI3_SYSIF_COALESCE_CONTROL_ENABLE_ENABLE (0xc0000000)
-#define MPI3_SYSIF_COALESCE_CONTROL_VALID (0x30000000)
-#define MPI3_SYSIF_COALESCE_CONTROL_QUEUE_ID_MASK (0x00ff0000)
-#define MPI3_SYSIF_COALESCE_CONTROL_QUEUE_ID_SHIFT (16)
+#define MPI3_SYSIF_COALESCE_CONTROL_VALID (0x20000000)
+#define MPI3_SYSIF_COALESCE_CONTROL_MSIX_IDX_MASK (0x01ff0000)
+#define MPI3_SYSIF_COALESCE_CONTROL_MSIX_IDX_SHIFT (16)
#define MPI3_SYSIF_COALESCE_CONTROL_TIMEOUT_MASK (0x0000ff00)
#define MPI3_SYSIF_COALESCE_CONTROL_TIMEOUT_SHIFT (8)
#define MPI3_SYSIF_COALESCE_CONTROL_DEPTH_MASK (0x000000ff)
@@ -117,9 +119,9 @@ struct mpi3_sysif_registers {
#define MPI3_SYSIF_ADMIN_REQ_Q_PI_OFFSET (0x00001000)
#define MPI3_SYSIF_ADMIN_REPLY_Q_CI_OFFSET (0x00001004)
#define MPI3_SYSIF_OPER_REQ_Q_PI_OFFSET (0x00001008)
-#define MPI3_SYSIF_OPER_REQ_Q_N_PI_OFFSET(n) (MPI3_SYSIF_OPER_REQ_Q_PI_OFFSET + (((n) - 1) * 8))
+#define MPI3_SYSIF_OPER_REQ_Q_N_PI_OFFSET(N) (MPI3_SYSIF_OPER_REQ_Q_PI_OFFSET + (((N) - 1) * 8))
#define MPI3_SYSIF_OPER_REPLY_Q_CI_OFFSET (0x0000100c)
-#define MPI3_SYSIF_OPER_REPLY_Q_N_CI_OFFSET(n) (MPI3_SYSIF_OPER_REPLY_Q_CI_OFFSET + (((n) - 1) * 8))
+#define MPI3_SYSIF_OPER_REPLY_Q_N_CI_OFFSET(N) (MPI3_SYSIF_OPER_REPLY_Q_CI_OFFSET + (((N) - 1) * 8))
#define MPI3_SYSIF_WRITE_SEQUENCE_OFFSET (0x00001c04)
#define MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_MASK (0x0000000f)
#define MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_FLUSH (0x0)
@@ -133,7 +135,7 @@ struct mpi3_sysif_registers {
#define MPI3_SYSIF_HOST_DIAG_RESET_ACTION_MASK (0x00000700)
#define MPI3_SYSIF_HOST_DIAG_RESET_ACTION_NO_RESET (0x00000000)
#define MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET (0x00000100)
-#define MPI3_SYSIF_HOST_DIAG_RESET_ACTION_FLASH_RCVRY_RESET (0x00000200)
+#define MPI3_SYSIF_HOST_DIAG_RESET_ACTION_HOST_CONTROL_BOOT_RESET (0x00000200)
#define MPI3_SYSIF_HOST_DIAG_RESET_ACTION_COMPLETE_RESET (0x00000300)
#define MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT (0x00000700)
#define MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS (0x00000080)
@@ -153,8 +155,9 @@ struct mpi3_sysif_registers {
#define MPI3_SYSIF_FAULT_CODE_CI_ACTIVATION_RESET (0x0000f001)
#define MPI3_SYSIF_FAULT_CODE_SOFT_RESET_IN_PROGRESS (0x0000f002)
#define MPI3_SYSIF_FAULT_CODE_COMPLETE_RESET_NEEDED (0x0000f003)
-#define MPI3_SYSIF_FAULT_CODE_SAFE_MODE_EXIT (0x0000f004)
-#define MPI3_SYSIF_FAULT_CODE_FACTORY_RESET (0x0000f005)
+#define MPI3_SYSIF_FAULT_CODE_SOFT_RESET_NEEDED (0x0000f004)
+#define MPI3_SYSIF_FAULT_CODE_POWER_CYCLE_REQUIRED (0x0000f005)
+#define MPI3_SYSIF_FAULT_CODE_TEMP_THRESHOLD_EXCEEDED (0x0000f006)
#define MPI3_SYSIF_FAULT_INFO0_OFFSET (0x00001c14)
#define MPI3_SYSIF_FAULT_INFO1_OFFSET (0x00001c18)
#define MPI3_SYSIF_FAULT_INFO2_OFFSET (0x00001c1c)
@@ -409,6 +412,8 @@ struct mpi3_default_reply {
#define MPI3_IOCSTATUS_INVALID_STATE (0x0008)
#define MPI3_IOCSTATUS_INSUFFICIENT_POWER (0x000a)
#define MPI3_IOCSTATUS_INVALID_CHANGE_COUNT (0x000b)
+#define MPI3_IOCSTATUS_ALLOWED_CMD_BLOCK (0x000c)
+#define MPI3_IOCSTATUS_SUPERVISOR_ONLY (0x000d)
#define MPI3_IOCSTATUS_FAILURE (0x001f)
#define MPI3_IOCSTATUS_CONFIG_INVALID_ACTION (0x0020)
#define MPI3_IOCSTATUS_CONFIG_INVALID_TYPE (0x0021)
@@ -448,8 +453,10 @@ struct mpi3_default_reply {
#define MPI3_IOCSTATUS_CI_UNSUPPORTED (0x00b0)
#define MPI3_IOCSTATUS_CI_UPDATE_SEQUENCE (0x00b1)
#define MPI3_IOCSTATUS_CI_VALIDATION_FAILED (0x00b2)
-#define MPI3_IOCSTATUS_CI_UPDATE_PENDING (0x00b3)
+#define MPI3_IOCSTATUS_CI_KEY_UPDATE_PENDING (0x00b3)
+#define MPI3_IOCSTATUS_CI_KEY_UPDATE_NOT_POSSIBLE (0x00b4)
#define MPI3_IOCSTATUS_SECURITY_KEY_REQUIRED (0x00c0)
+#define MPI3_IOCSTATUS_SECURITY_VIOLATION (0x00c1)
#define MPI3_IOCSTATUS_INVALID_QUEUE_ID (0x0f00)
#define MPI3_IOCSTATUS_INVALID_QUEUE_SIZE (0x0f01)
#define MPI3_IOCSTATUS_INVALID_MSIX_VECTOR (0x0f02)
diff --git a/drivers/scsi/mpi3mr/mpi3mr.h b/drivers/scsi/mpi3mr/mpi3mr.h
index 9787b53a2b59..fc4eaf6d1e47 100644
--- a/drivers/scsi/mpi3mr/mpi3mr.h
+++ b/drivers/scsi/mpi3mr/mpi3mr.h
@@ -45,6 +45,7 @@
#include "mpi/mpi30_init.h"
#include "mpi/mpi30_ioc.h"
#include "mpi/mpi30_sas.h"
+#include "mpi/mpi30_pci.h"
#include "mpi3mr_debug.h"
/* Global list and lock for storing multiple adapters managed by the driver */
@@ -52,8 +53,8 @@ extern spinlock_t mrioc_list_lock;
extern struct list_head mrioc_list;
extern int prot_mask;
-#define MPI3MR_DRIVER_VERSION "00.255.45.01"
-#define MPI3MR_DRIVER_RELDATE "12-December-2020"
+#define MPI3MR_DRIVER_VERSION "8.0.0.61.0"
+#define MPI3MR_DRIVER_RELDATE "20-December-2021"
#define MPI3MR_DRIVER_NAME "mpi3mr"
#define MPI3MR_DRIVER_LICENSE "GPL"
@@ -79,7 +80,8 @@ extern int prot_mask;
/* Operational queue management definitions */
#define MPI3MR_OP_REQ_Q_QD 512
-#define MPI3MR_OP_REP_Q_QD 4096
+#define MPI3MR_OP_REP_Q_QD 1024
+#define MPI3MR_OP_REP_Q_QD4K 4096
#define MPI3MR_OP_REQ_Q_SEG_SIZE 4096
#define MPI3MR_OP_REP_Q_SEG_SIZE 4096
#define MPI3MR_MAX_SEG_LIST_SIZE 4096
@@ -90,25 +92,31 @@ extern int prot_mask;
#define MPI3MR_HOSTTAG_IOCTLCMDS 2
#define MPI3MR_HOSTTAG_BLK_TMS 5
-#define MPI3MR_NUM_DEVRMCMD 1
+#define MPI3MR_NUM_DEVRMCMD 16
#define MPI3MR_HOSTTAG_DEVRMCMD_MIN (MPI3MR_HOSTTAG_BLK_TMS + 1)
#define MPI3MR_HOSTTAG_DEVRMCMD_MAX (MPI3MR_HOSTTAG_DEVRMCMD_MIN + \
MPI3MR_NUM_DEVRMCMD - 1)
-#define MPI3MR_INTERNAL_CMDS_RESVD MPI3MR_HOSTTAG_DEVRMCMD_MAX
+#define MPI3MR_INTERNAL_CMDS_RESVD MPI3MR_HOSTTAG_DEVRMCMD_MAX
+#define MPI3MR_NUM_EVTACKCMD 4
+#define MPI3MR_HOSTTAG_EVTACKCMD_MIN (MPI3MR_HOSTTAG_DEVRMCMD_MAX + 1)
+#define MPI3MR_HOSTTAG_EVTACKCMD_MAX (MPI3MR_HOSTTAG_EVTACKCMD_MIN + \
+ MPI3MR_NUM_EVTACKCMD - 1)
/* Reduced resource count definition for crash kernel */
#define MPI3MR_HOST_IOS_KDUMP 128
/* command/controller interaction timeout definitions in seconds */
-#define MPI3MR_INTADMCMD_TIMEOUT 10
+#define MPI3MR_INTADMCMD_TIMEOUT 60
#define MPI3MR_PORTENABLE_TIMEOUT 300
-#define MPI3MR_ABORTTM_TIMEOUT 30
-#define MPI3MR_RESETTM_TIMEOUT 30
+#define MPI3MR_ABORTTM_TIMEOUT 60
+#define MPI3MR_RESETTM_TIMEOUT 60
#define MPI3MR_RESET_HOST_IOWAIT_TIMEOUT 5
#define MPI3MR_TSUPDATE_INTERVAL 900
#define MPI3MR_DEFAULT_SHUTDOWN_TIME 120
#define MPI3MR_RAID_ERRREC_RESET_TIMEOUT 180
+#define MPI3MR_PREPARE_FOR_RESET_TIMEOUT 180
+#define MPI3MR_RESET_ACK_TIMEOUT 30
#define MPI3MR_WATCHDOG_INTERVAL 1000 /* in milli seconds */
@@ -121,7 +129,7 @@ extern int prot_mask;
/* Definitions for Event replies and sense buffer allocated per controller */
#define MPI3MR_NUM_EVT_REPLIES 64
-#define MPI3MR_SENSEBUF_SZ 256
+#define MPI3MR_SENSE_BUF_SZ 256
#define MPI3MR_SENSEBUF_FACTOR 3
#define MPI3MR_CHAINBUF_FACTOR 3
#define MPI3MR_CHAINBUFDIX_FACTOR 2
@@ -135,17 +143,11 @@ extern int prot_mask;
/* ResponseCode definitions */
#define MPI3MR_RI_MASK_RESPCODE (0x000000FF)
-#define MPI3MR_RSP_TM_COMPLETE 0x00
-#define MPI3MR_RSP_INVALID_FRAME 0x02
-#define MPI3MR_RSP_TM_NOT_SUPPORTED 0x04
-#define MPI3MR_RSP_TM_FAILED 0x05
-#define MPI3MR_RSP_TM_SUCCEEDED 0x08
-#define MPI3MR_RSP_TM_INVALID_LUN 0x09
-#define MPI3MR_RSP_TM_OVERLAPPED_TAG 0x0A
#define MPI3MR_RSP_IO_QUEUED_ON_IOC \
MPI3_SCSITASKMGMT_RSPCODE_IO_QUEUED_ON_IOC
#define MPI3MR_DEFAULT_MDTS (128 * 1024)
+#define MPI3MR_DEFAULT_PGSZEXP (12)
/* Command retry count definitions */
#define MPI3MR_DEV_RMHS_RETRY_COUNT 3
@@ -183,20 +185,6 @@ enum mpi3mr_iocstate {
MRIOC_STATE_UNRECOVERABLE,
};
-/* Init type definitions */
-enum mpi3mr_init_type {
- MPI3MR_IT_INIT = 0,
- MPI3MR_IT_RESET,
- MPI3MR_IT_RESUME,
-};
-
-/* Cleanup reason definitions */
-enum mpi3mr_cleanup_reason {
- MPI3MR_COMPLETE_CLEANUP = 0,
- MPI3MR_REINIT_FAILURE,
- MPI3MR_SUSPEND,
-};
-
/* Reset reason code definitions*/
enum mpi3mr_reset_reason {
MPI3MR_RESET_FROM_BRINGUP = 1,
@@ -222,7 +210,14 @@ enum mpi3mr_reset_reason {
MPI3MR_RESET_FROM_GETPKGVER_TIMEOUT = 21,
MPI3MR_RESET_FROM_PELABORT_TIMEOUT = 22,
MPI3MR_RESET_FROM_SYSFS = 23,
- MPI3MR_RESET_FROM_SYSFS_TIMEOUT = 24
+ MPI3MR_RESET_FROM_SYSFS_TIMEOUT = 24,
+ MPI3MR_RESET_FROM_FIRMWARE = 27,
+};
+
+/* Queue type definitions */
+enum queue_type {
+ MPI3MR_DEFAULT_QUEUE = 0,
+ MPI3MR_POLL_QUEUE,
};
/**
@@ -263,7 +258,7 @@ struct mpi3mr_ioc_facts {
u16 max_vds;
u16 max_hpds;
u16 max_advhpds;
- u16 max_raidpds;
+ u16 max_raid_pds;
u16 min_devhandle;
u16 max_devhandle;
u16 max_op_req_q;
@@ -336,6 +331,7 @@ struct op_req_qinfo {
* @pend_ios: Number of IOs pending in HW for this queue
* @enable_irq_poll: Flag to indicate polling is enabled
* @in_use: Queue is handled by poll/ISR
+ * @qtype: Type of queue (types defined in enum queue_type)
*/
struct op_reply_qinfo {
u16 ci;
@@ -350,6 +346,7 @@ struct op_reply_qinfo {
atomic_t pend_ios;
bool enable_irq_poll;
atomic_t in_use;
+ enum queue_type qtype;
};
/**
@@ -388,6 +385,7 @@ struct tgt_dev_sas_sata {
* @pgsz: Device page size
* @abort_to: Timeout for abort TM
* @reset_to: Timeout for Target/LUN reset TM
+ * @dev_info: Device information bits
*/
struct tgt_dev_pcie {
u32 mdts;
@@ -395,6 +393,7 @@ struct tgt_dev_pcie {
u8 pgsz;
u8 abort_to;
u8 reset_to;
+ u16 dev_info;
};
/**
@@ -499,6 +498,8 @@ static inline void mpi3mr_tgtdev_put(struct mpi3mr_tgt_dev *s)
* @dev_removedelay: Device is waiting to be removed in FW
* @dev_type: Device type
* @tgt_dev: Internal target device pointer
+ * @pend_count: Counter to track pending I/Os during error
+ * handling
*/
struct mpi3mr_stgt_priv_data {
struct scsi_target *starget;
@@ -510,6 +511,7 @@ struct mpi3mr_stgt_priv_data {
u8 dev_removedelay;
u8 dev_type;
struct mpi3mr_tgt_dev *tgt_dev;
+ u32 pend_count;
};
/**
@@ -518,11 +520,14 @@ struct mpi3mr_stgt_priv_data {
* @tgt_priv_data: Scsi_target private data pointer
* @lun_id: LUN ID of the device
* @ncq_prio_enable: NCQ priority enable for SATA device
+ * @pend_count: Counter to track pending I/Os during error
+ * handling
*/
struct mpi3mr_sdev_priv_data {
struct mpi3mr_stgt_priv_data *tgt_priv_data;
u32 lun_id;
u8 ncq_prio_enable;
+ u32 pend_count;
};
/**
@@ -630,6 +635,7 @@ struct scmd_priv {
* @ready_timeout: Controller ready timeout
* @intr_info: Interrupt cookie pointer
* @intr_info_count: Number of interrupt cookies
+ * @is_intr_info_set: Flag to indicate intr info is setup
* @num_queues: Number of operational queues
* @num_op_req_q: Number of operational request queues
* @req_qinfo: Operational request queue info pointer
@@ -681,17 +687,23 @@ struct scmd_priv {
* @chain_buf_lock: Chain buffer list lock
* @host_tm_cmds: Command tracker for task management commands
* @dev_rmhs_cmds: Command tracker for device removal commands
+ * @evtack_cmds: Command tracker for event ack commands
* @devrem_bitmap_sz: Device removal bitmap size
* @devrem_bitmap: Device removal bitmap
* @dev_handle_bitmap_sz: Device handle bitmap size
* @removepend_bitmap: Remove pending bitmap
* @delayed_rmhs_list: Delayed device removal list
+ * @evtack_cmds_bitmap_sz: Event Ack bitmap size
+ * @evtack_cmds_bitmap: Event Ack bitmap
+ * @delayed_evtack_cmds_list: Delayed event acknowledgment list
* @ts_update_counter: Timestamp update counter
- * @fault_dbg: Fault debug flag
* @reset_in_progress: Reset in progress flag
* @unrecoverable: Controller unrecoverable flag
+ * @prev_reset_result: Result of previous reset
* @reset_mutex: Controller reset mutex
* @reset_waitq: Controller reset wait queue
+ * @prepare_for_reset: Prepare for reset event received
+ * @prepare_for_reset_timeout_counter: Prepare for reset timeout
* @diagsave_timeout: Diagnostic information save timeout
* @logging_level: Controller debug logging level
* @flush_io_count: I/O count to flush after reset
@@ -699,6 +711,9 @@ struct scmd_priv {
* @driver_info: Driver, Kernel, OS information to firmware
* @change_count: Topology change count
* @op_reply_q_offset: Operational reply queue offset with MSIx
+ * @default_qcount: Total Default queues
+ * @active_poll_qcount: Currently active poll queue count
+ * @requested_poll_qcount: User requested poll queue count
*/
struct mpi3mr_ioc {
struct list_head list;
@@ -739,6 +754,7 @@ struct mpi3mr_ioc {
struct mpi3mr_intr_info *intr_info;
u16 intr_info_count;
+ bool is_intr_info_set;
u16 num_queues;
u16 num_op_req_q;
@@ -758,6 +774,7 @@ struct mpi3mr_ioc {
dma_addr_t reply_buf_dma_max_address;
u16 reply_free_qsz;
+ u16 reply_sz;
struct dma_pool *reply_free_q_pool;
__le64 *reply_free_q;
dma_addr_t reply_free_q_dma;
@@ -805,19 +822,26 @@ struct mpi3mr_ioc {
struct mpi3mr_drv_cmd host_tm_cmds;
struct mpi3mr_drv_cmd dev_rmhs_cmds[MPI3MR_NUM_DEVRMCMD];
+ struct mpi3mr_drv_cmd evtack_cmds[MPI3MR_NUM_EVTACKCMD];
u16 devrem_bitmap_sz;
void *devrem_bitmap;
u16 dev_handle_bitmap_sz;
void *removepend_bitmap;
struct list_head delayed_rmhs_list;
+ u16 evtack_cmds_bitmap_sz;
+ void *evtack_cmds_bitmap;
+ struct list_head delayed_evtack_cmds_list;
u32 ts_update_counter;
- u8 fault_dbg;
u8 reset_in_progress;
u8 unrecoverable;
+ int prev_reset_result;
struct mutex reset_mutex;
wait_queue_head_t reset_waitq;
+ u8 prepare_for_reset;
+ u16 prepare_for_reset_timeout_counter;
+
u16 diagsave_timeout;
int logging_level;
u16 flush_io_count;
@@ -826,6 +850,10 @@ struct mpi3mr_ioc {
struct mpi3_driver_info_layout driver_info;
u16 change_count;
u16 op_reply_q_offset;
+
+ u16 default_qcount;
+ u16 active_poll_qcount;
+ u16 requested_poll_qcount;
};
/**
@@ -867,10 +895,23 @@ struct delayed_dev_rmhs_node {
u8 iou_rc;
};
+/**
+ * struct delayed_evt_ack_node - Delayed event ack node
+ * @list: list head
+ * @event: MPI3 event ID
+ * @event_ctx: event context
+ */
+struct delayed_evt_ack_node {
+ struct list_head list;
+ u8 event;
+ u32 event_ctx;
+};
+
int mpi3mr_setup_resources(struct mpi3mr_ioc *mrioc);
void mpi3mr_cleanup_resources(struct mpi3mr_ioc *mrioc);
-int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc, u8 init_type);
-void mpi3mr_cleanup_ioc(struct mpi3mr_ioc *mrioc, u8 reason);
+int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc);
+int mpi3mr_reinit_ioc(struct mpi3mr_ioc *mrioc, u8 is_resume);
+void mpi3mr_cleanup_ioc(struct mpi3mr_ioc *mrioc);
int mpi3mr_issue_port_enable(struct mpi3mr_ioc *mrioc, u8 async);
int mpi3mr_admin_request_post(struct mpi3mr_ioc *mrioc, void *admin_req,
u16 admin_req_sz, u8 ignore_reset);
@@ -887,6 +928,7 @@ void mpi3mr_repost_sense_buf(struct mpi3mr_ioc *mrioc,
u64 sense_buf_dma);
void mpi3mr_memset_buffers(struct mpi3mr_ioc *mrioc);
+void mpi3mr_free_mem(struct mpi3mr_ioc *mrioc);
void mpi3mr_os_handle_events(struct mpi3mr_ioc *mrioc,
struct mpi3_event_notification_reply *event_reply);
void mpi3mr_process_op_reply_desc(struct mpi3mr_ioc *mrioc,
@@ -897,13 +939,11 @@ void mpi3mr_stop_watchdog(struct mpi3mr_ioc *mrioc);
int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc,
u32 reset_reason, u8 snapdump);
-int mpi3mr_diagfault_reset_handler(struct mpi3mr_ioc *mrioc,
- u32 reset_reason);
void mpi3mr_ioc_disable_intr(struct mpi3mr_ioc *mrioc);
void mpi3mr_ioc_enable_intr(struct mpi3mr_ioc *mrioc);
enum mpi3mr_iocstate mpi3mr_get_iocstate(struct mpi3mr_ioc *mrioc);
-int mpi3mr_send_event_ack(struct mpi3mr_ioc *mrioc, u8 event,
+int mpi3mr_process_event_ack(struct mpi3mr_ioc *mrioc, u8 event,
u32 event_ctx);
void mpi3mr_wait_for_host_io(struct mpi3mr_ioc *mrioc, u32 timeout);
@@ -911,6 +951,12 @@ void mpi3mr_cleanup_fwevt_list(struct mpi3mr_ioc *mrioc);
void mpi3mr_flush_host_io(struct mpi3mr_ioc *mrioc);
void mpi3mr_invalidate_devhandles(struct mpi3mr_ioc *mrioc);
void mpi3mr_rfresh_tgtdevs(struct mpi3mr_ioc *mrioc);
-void mpi3mr_flush_delayed_rmhs_list(struct mpi3mr_ioc *mrioc);
+void mpi3mr_flush_delayed_cmd_lists(struct mpi3mr_ioc *mrioc);
+void mpi3mr_check_rh_fault_ioc(struct mpi3mr_ioc *mrioc, u32 reason_code);
+void mpi3mr_print_fault_info(struct mpi3mr_ioc *mrioc);
+void mpi3mr_check_rh_fault_ioc(struct mpi3mr_ioc *mrioc, u32 reason_code);
+int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc,
+ struct op_reply_qinfo *op_reply_q);
+int mpi3mr_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num);
#endif /*MPI3MR_H_INCLUDED*/
diff --git a/drivers/scsi/mpi3mr/mpi3mr_debug.h b/drivers/scsi/mpi3mr/mpi3mr_debug.h
index c085bb048d41..cef61c5d59d3 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_debug.h
+++ b/drivers/scsi/mpi3mr/mpi3mr_debug.h
@@ -14,27 +14,20 @@
/*
* debug levels
*/
-#define MPI3_DEBUG 0x00000001
-#define MPI3_DEBUG_MSG_FRAME 0x00000002
-#define MPI3_DEBUG_SG 0x00000004
-#define MPI3_DEBUG_EVENTS 0x00000008
-#define MPI3_DEBUG_EVENT_WORK_TASK 0x00000010
-#define MPI3_DEBUG_INIT 0x00000020
-#define MPI3_DEBUG_EXIT 0x00000040
-#define MPI3_DEBUG_FAIL 0x00000080
-#define MPI3_DEBUG_TM 0x00000100
-#define MPI3_DEBUG_REPLY 0x00000200
-#define MPI3_DEBUG_HANDSHAKE 0x00000400
-#define MPI3_DEBUG_CONFIG 0x00000800
-#define MPI3_DEBUG_DL 0x00001000
-#define MPI3_DEBUG_RESET 0x00002000
-#define MPI3_DEBUG_SCSI 0x00004000
-#define MPI3_DEBUG_IOCTL 0x00008000
-#define MPI3_DEBUG_CSMISAS 0x00010000
-#define MPI3_DEBUG_SAS 0x00020000
-#define MPI3_DEBUG_TRANSPORT 0x00040000
-#define MPI3_DEBUG_TASK_SET_FULL 0x00080000
-#define MPI3_DEBUG_TRIGGER_DIAG 0x00200000
+
+#define MPI3_DEBUG_EVENT 0x00000001
+#define MPI3_DEBUG_EVENT_WORK_TASK 0x00000002
+#define MPI3_DEBUG_INIT 0x00000004
+#define MPI3_DEBUG_EXIT 0x00000008
+#define MPI3_DEBUG_TM 0x00000010
+#define MPI3_DEBUG_RESET 0x00000020
+#define MPI3_DEBUG_SCSI_ERROR 0x00000040
+#define MPI3_DEBUG_REPLY 0x00000080
+#define MPI3_DEBUG_IOCTL_ERROR 0x00008000
+#define MPI3_DEBUG_IOCTL_INFO 0x00010000
+#define MPI3_DEBUG_SCSI_INFO 0x00020000
+#define MPI3_DEBUG 0x01000000
+#define MPI3_DEBUG_SG 0x02000000
/*
@@ -50,11 +43,103 @@
#define ioc_info(ioc, fmt, ...) \
pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__)
+#define dprint(ioc, fmt, ...) \
+ do { \
+ if (ioc->logging_level & MPI3_DEBUG) \
+ pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \
+ } while (0)
+
+#define dprint_event_th(ioc, fmt, ...) \
+ do { \
+ if (ioc->logging_level & MPI3_DEBUG_EVENT) \
+ pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \
+ } while (0)
+
+#define dprint_event_bh(ioc, fmt, ...) \
+ do { \
+ if (ioc->logging_level & MPI3_DEBUG_EVENT_WORK_TASK) \
+ pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \
+ } while (0)
+
+#define dprint_init(ioc, fmt, ...) \
+ do { \
+ if (ioc->logging_level & MPI3_DEBUG_INIT) \
+ pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \
+ } while (0)
+
+#define dprint_exit(ioc, fmt, ...) \
+ do { \
+ if (ioc->logging_level & MPI3_DEBUG_EXIT) \
+ pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \
+ } while (0)
+
+#define dprint_tm(ioc, fmt, ...) \
+ do { \
+ if (ioc->logging_level & MPI3_DEBUG_TM) \
+ pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \
+ } while (0)
+
+#define dprint_reply(ioc, fmt, ...) \
+ do { \
+ if (ioc->logging_level & MPI3_DEBUG_REPLY) \
+ pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \
+ } while (0)
+
+#define dprint_reset(ioc, fmt, ...) \
+ do { \
+ if (ioc->logging_level & MPI3_DEBUG_RESET) \
+ pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \
+ } while (0)
+
+#define dprint_scsi_info(ioc, fmt, ...) \
+ do { \
+ if (ioc->logging_level & MPI3_DEBUG_SCSI_INFO) \
+ pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \
+ } while (0)
+
+#define dprint_scsi_err(ioc, fmt, ...) \
+ do { \
+ if (ioc->logging_level & MPI3_DEBUG_SCSI_ERROR) \
+ pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \
+ } while (0)
+
+#define dprint_scsi_command(ioc, SCMD, LOG_LEVEL) \
+ do { \
+ if (ioc->logging_level & LOG_LEVEL) \
+ scsi_print_command(SCMD); \
+ } while (0)
+
+
+#define dprint_ioctl_info(ioc, fmt, ...) \
+ do { \
+ if (ioc->logging_level & MPI3_DEBUG_IOCTL_INFO) \
+ pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \
+ } while (0)
-#define dbgprint(IOC, FMT, ...) \
+#define dprint_ioctl_err(ioc, fmt, ...) \
do { \
- if (IOC->logging_level & MPI3_DEBUG) \
- pr_info("%s: " FMT, (IOC)->name, ##__VA_ARGS__); \
+ if (ioc->logging_level & MPI3_DEBUG_IOCTL_ERROR) \
+ pr_info("%s: " fmt, (ioc)->name, ##__VA_ARGS__); \
} while (0)
#endif /* MPT3SAS_DEBUG_H_INCLUDED */
+
+/**
+ * dprint_dump_req - print message frame contents
+ * @req: pointer to message frame
+ * @sz: number of dwords
+ */
+static inline void
+dprint_dump_req(void *req, int sz)
+{
+ int i;
+ __le32 *mfp = (__le32 *)req;
+
+ pr_info("request:\n\t");
+ for (i = 0; i < sz; i++) {
+ if (i && ((i % 8) == 0))
+ pr_info("\n\t");
+ pr_info("%08x ", le32_to_cpu(mfp[i]));
+ }
+ pr_info("\n");
+}
diff --git a/drivers/scsi/mpi3mr/mpi3mr_fw.c b/drivers/scsi/mpi3mr/mpi3mr_fw.c
index aa5d877df6f8..15bdc21ead66 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_fw.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_fw.c
@@ -10,6 +10,16 @@
#include "mpi3mr.h"
#include <linux/io-64-nonatomic-lo-hi.h>
+static int
+mpi3mr_issue_reset(struct mpi3mr_ioc *mrioc, u16 reset_type, u32 reset_reason);
+static int mpi3mr_setup_admin_qpair(struct mpi3mr_ioc *mrioc);
+static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc,
+ struct mpi3_ioc_facts_data *facts_data);
+
+static int poll_queues;
+module_param(poll_queues, int, 0444);
+MODULE_PARM_DESC(poll_queues, "Number of queues for io_uring poll mode. (Range 1 - 126)");
+
#if defined(writeq) && defined(CONFIG_64BIT)
static inline void mpi3mr_writeq(__u64 b, volatile void __iomem *addr)
{
@@ -78,6 +88,7 @@ static void mpi3mr_cleanup_isr(struct mpi3mr_ioc *mrioc)
kfree(mrioc->intr_info);
mrioc->intr_info = NULL;
mrioc->intr_info_count = 0;
+ mrioc->is_intr_info_set = false;
pci_free_irq_vectors(mrioc->pdev);
}
@@ -124,8 +135,9 @@ static void mpi3mr_repost_reply_buf(struct mpi3mr_ioc *mrioc,
u64 reply_dma)
{
u32 old_idx = 0;
+ unsigned long flags;
- spin_lock(&mrioc->reply_free_queue_lock);
+ spin_lock_irqsave(&mrioc->reply_free_queue_lock, flags);
old_idx = mrioc->reply_free_queue_host_index;
mrioc->reply_free_queue_host_index = (
(mrioc->reply_free_queue_host_index ==
@@ -134,15 +146,16 @@ static void mpi3mr_repost_reply_buf(struct mpi3mr_ioc *mrioc,
mrioc->reply_free_q[old_idx] = cpu_to_le64(reply_dma);
writel(mrioc->reply_free_queue_host_index,
&mrioc->sysif_regs->reply_free_host_index);
- spin_unlock(&mrioc->reply_free_queue_lock);
+ spin_unlock_irqrestore(&mrioc->reply_free_queue_lock, flags);
}
void mpi3mr_repost_sense_buf(struct mpi3mr_ioc *mrioc,
u64 sense_buf_dma)
{
u32 old_idx = 0;
+ unsigned long flags;
- spin_lock(&mrioc->sbq_lock);
+ spin_lock_irqsave(&mrioc->sbq_lock, flags);
old_idx = mrioc->sbq_host_index;
mrioc->sbq_host_index = ((mrioc->sbq_host_index ==
(mrioc->sense_buf_q_sz - 1)) ? 0 :
@@ -150,7 +163,7 @@ void mpi3mr_repost_sense_buf(struct mpi3mr_ioc *mrioc,
mrioc->sense_buf_q[old_idx] = cpu_to_le64(sense_buf_dma);
writel(mrioc->sbq_host_index,
&mrioc->sysif_regs->sense_buffer_free_host_index);
- spin_unlock(&mrioc->sbq_lock);
+ spin_unlock_irqrestore(&mrioc->sbq_lock, flags);
}
static void mpi3mr_print_event_data(struct mpi3mr_ioc *mrioc,
@@ -303,6 +316,12 @@ mpi3mr_get_drv_cmd(struct mpi3mr_ioc *mrioc, u16 host_tag,
return &mrioc->dev_rmhs_cmds[idx];
}
+ if (host_tag >= MPI3MR_HOSTTAG_EVTACKCMD_MIN &&
+ host_tag <= MPI3MR_HOSTTAG_EVTACKCMD_MAX) {
+ idx = host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN;
+ return &mrioc->evtack_cmds[idx];
+ }
+
return NULL;
}
@@ -369,7 +388,7 @@ static void mpi3mr_process_admin_reply_desc(struct mpi3mr_ioc *mrioc,
if (def_reply) {
cmdptr->state |= MPI3MR_CMD_REPLY_VALID;
memcpy((u8 *)cmdptr->reply, (u8 *)def_reply,
- mrioc->facts.reply_sz);
+ mrioc->reply_sz);
}
if (cmdptr->is_waiting) {
complete(&cmdptr->done);
@@ -446,10 +465,21 @@ mpi3mr_get_reply_desc(struct op_reply_qinfo *op_reply_q, u32 reply_ci)
return reply_desc;
}
-static int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc,
- struct mpi3mr_intr_info *intr_info)
+/**
+ * mpi3mr_process_op_reply_q - Operational reply queue handler
+ * @mrioc: Adapter instance reference
+ * @op_reply_q: Operational reply queue info
+ *
+ * Checks the specific operational reply queue and drains the
+ * reply queue entries until the queue is empty and process the
+ * individual reply descriptors.
+ *
+ * Return: 0 if queue is already processed,or number of reply
+ * descriptors processed.
+ */
+int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc,
+ struct op_reply_qinfo *op_reply_q)
{
- struct op_reply_qinfo *op_reply_q = intr_info->op_reply_q;
struct op_req_qinfo *op_req_q;
u32 exp_phase;
u32 reply_ci;
@@ -500,7 +530,7 @@ static int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc,
* Ensure remaining completion happens from threaded ISR.
*/
if (num_op_reply > mrioc->max_host_ios) {
- intr_info->op_reply_q->enable_irq_poll = true;
+ op_reply_q->enable_irq_poll = true;
break;
}
@@ -515,6 +545,34 @@ static int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc,
return num_op_reply;
}
+/**
+ * mpi3mr_blk_mq_poll - Operational reply queue handler
+ * @shost: SCSI Host reference
+ * @queue_num: Request queue number (w.r.t OS it is hardware context number)
+ *
+ * Checks the specific operational reply queue and drains the
+ * reply queue entries until the queue is empty and process the
+ * individual reply descriptors.
+ *
+ * Return: 0 if queue is already processed,or number of reply
+ * descriptors processed.
+ */
+int mpi3mr_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
+{
+ int num_entries = 0;
+ struct mpi3mr_ioc *mrioc;
+
+ mrioc = (struct mpi3mr_ioc *)shost->hostdata;
+
+ if ((mrioc->reset_in_progress || mrioc->prepare_for_reset))
+ return 0;
+
+ num_entries = mpi3mr_process_op_reply_q(mrioc,
+ &mrioc->op_reply_qinfo[queue_num]);
+
+ return num_entries;
+}
+
static irqreturn_t mpi3mr_isr_primary(int irq, void *privdata)
{
struct mpi3mr_intr_info *intr_info = privdata;
@@ -535,7 +593,8 @@ static irqreturn_t mpi3mr_isr_primary(int irq, void *privdata)
if (!midx)
num_admin_replies = mpi3mr_process_admin_reply_q(mrioc);
if (intr_info->op_reply_q)
- num_op_reply = mpi3mr_process_op_reply_q(mrioc, intr_info);
+ num_op_reply = mpi3mr_process_op_reply_q(mrioc,
+ intr_info->op_reply_q);
if (num_admin_replies || num_op_reply)
return IRQ_HANDLED;
@@ -606,9 +665,10 @@ static irqreturn_t mpi3mr_isr_poll(int irq, void *privdata)
mpi3mr_process_admin_reply_q(mrioc);
if (intr_info->op_reply_q)
num_op_reply +=
- mpi3mr_process_op_reply_q(mrioc, intr_info);
+ mpi3mr_process_op_reply_q(mrioc,
+ intr_info->op_reply_q);
- usleep_range(mrioc->irqpoll_sleep, 10 * mrioc->irqpoll_sleep);
+ usleep_range(MPI3MR_IRQ_POLL_SLEEP, 10 * MPI3MR_IRQ_POLL_SLEEP);
} while (atomic_read(&intr_info->op_reply_q->pend_ios) &&
(num_op_reply < mrioc->max_host_ios));
@@ -652,6 +712,25 @@ static inline int mpi3mr_request_irq(struct mpi3mr_ioc *mrioc, u16 index)
return retval;
}
+static void mpi3mr_calc_poll_queues(struct mpi3mr_ioc *mrioc, u16 max_vectors)
+{
+ if (!mrioc->requested_poll_qcount)
+ return;
+
+ /* Reserved for Admin and Default Queue */
+ if (max_vectors > 2 &&
+ (mrioc->requested_poll_qcount < max_vectors - 2)) {
+ ioc_info(mrioc,
+ "enabled polled queues (%d) msix (%d)\n",
+ mrioc->requested_poll_qcount, max_vectors);
+ } else {
+ ioc_info(mrioc,
+ "disabled polled queues (%d) msix (%d) because of no resources for default queue\n",
+ mrioc->requested_poll_qcount, max_vectors);
+ mrioc->requested_poll_qcount = 0;
+ }
+}
+
/**
* mpi3mr_setup_isr - Setup ISR for the controller
* @mrioc: Adapter instance reference
@@ -664,48 +743,72 @@ static inline int mpi3mr_request_irq(struct mpi3mr_ioc *mrioc, u16 index)
static int mpi3mr_setup_isr(struct mpi3mr_ioc *mrioc, u8 setup_one)
{
unsigned int irq_flags = PCI_IRQ_MSIX;
- int max_vectors;
+ int max_vectors, min_vec;
int retval;
int i;
- struct irq_affinity desc = { .pre_vectors = 1};
+ struct irq_affinity desc = { .pre_vectors = 1, .post_vectors = 1 };
+
+ if (mrioc->is_intr_info_set)
+ return 0;
mpi3mr_cleanup_isr(mrioc);
- if (setup_one || reset_devices)
+ if (setup_one || reset_devices) {
max_vectors = 1;
- else {
+ retval = pci_alloc_irq_vectors(mrioc->pdev,
+ 1, max_vectors, irq_flags);
+ if (retval < 0) {
+ ioc_err(mrioc, "cannot allocate irq vectors, ret %d\n",
+ retval);
+ goto out_failed;
+ }
+ } else {
max_vectors =
- min_t(int, mrioc->cpu_count + 1, mrioc->msix_count);
+ min_t(int, mrioc->cpu_count + 1 +
+ mrioc->requested_poll_qcount, mrioc->msix_count);
+
+ mpi3mr_calc_poll_queues(mrioc, max_vectors);
ioc_info(mrioc,
"MSI-X vectors supported: %d, no of cores: %d,",
mrioc->msix_count, mrioc->cpu_count);
ioc_info(mrioc,
- "MSI-x vectors requested: %d\n", max_vectors);
- }
+ "MSI-x vectors requested: %d poll_queues %d\n",
+ max_vectors, mrioc->requested_poll_qcount);
+
+ desc.post_vectors = mrioc->requested_poll_qcount;
+ min_vec = desc.pre_vectors + desc.post_vectors;
+ irq_flags |= PCI_IRQ_AFFINITY | PCI_IRQ_ALL_TYPES;
+
+ retval = pci_alloc_irq_vectors_affinity(mrioc->pdev,
+ min_vec, max_vectors, irq_flags, &desc);
+
+ if (retval < 0) {
+ ioc_err(mrioc, "cannot allocate irq vectors, ret %d\n",
+ retval);
+ goto out_failed;
+ }
- irq_flags |= PCI_IRQ_AFFINITY | PCI_IRQ_ALL_TYPES;
- mrioc->op_reply_q_offset = (max_vectors > 1) ? 1 : 0;
- retval = pci_alloc_irq_vectors_affinity(mrioc->pdev,
- 1, max_vectors, irq_flags, &desc);
- if (retval < 0) {
- ioc_err(mrioc, "Cannot alloc irq vectors\n");
- goto out_failed;
- }
- if (retval != max_vectors) {
- ioc_info(mrioc,
- "allocated vectors (%d) are less than configured (%d)\n",
- retval, max_vectors);
/*
* If only one MSI-x is allocated, then MSI-x 0 will be shared
* between Admin queue and operational queue
*/
- if (retval == 1)
+ if (retval == min_vec)
mrioc->op_reply_q_offset = 0;
+ else if (retval != (max_vectors)) {
+ ioc_info(mrioc,
+ "allocated vectors (%d) are less than configured (%d)\n",
+ retval, max_vectors);
+ }
max_vectors = retval;
+ mrioc->op_reply_q_offset = (max_vectors > 1) ? 1 : 0;
+
+ mpi3mr_calc_poll_queues(mrioc, max_vectors);
+
}
+
mrioc->intr_info = kzalloc(sizeof(struct mpi3mr_intr_info) * max_vectors,
GFP_KERNEL);
if (!mrioc->intr_info) {
@@ -720,6 +823,8 @@ static int mpi3mr_setup_isr(struct mpi3mr_ioc *mrioc, u8 setup_one)
goto out_failed;
}
}
+ if (reset_devices || !setup_one)
+ mrioc->is_intr_info_set = true;
mrioc->intr_info_count = max_vectors;
mpi3mr_ioc_enable_intr(mrioc);
return 0;
@@ -796,6 +901,7 @@ static const struct {
},
{ MPI3MR_RESET_FROM_SYSFS, "sysfs invocation" },
{ MPI3MR_RESET_FROM_SYSFS_TIMEOUT, "sysfs TM timeout" },
+ { MPI3MR_RESET_FROM_FIRMWARE, "firmware asynchronous reset" },
};
/**
@@ -860,7 +966,7 @@ static const char *mpi3mr_reset_type_name(u16 reset_type)
*
* Return: Nothing.
*/
-static void mpi3mr_print_fault_info(struct mpi3mr_ioc *mrioc)
+void mpi3mr_print_fault_info(struct mpi3mr_ioc *mrioc)
{
u32 ioc_status, code, code1, code2, code3;
@@ -958,25 +1064,25 @@ static int mpi3mr_issue_and_process_mur(struct mpi3mr_ioc *mrioc,
ioc_config &= ~MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC;
writel(ioc_config, &mrioc->sysif_regs->ioc_configuration);
- timeout = mrioc->ready_timeout * 10;
+ timeout = MPI3MR_RESET_ACK_TIMEOUT * 10;
do {
ioc_status = readl(&mrioc->sysif_regs->ioc_status);
if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)) {
mpi3mr_clear_reset_history(mrioc);
- ioc_config =
- readl(&mrioc->sysif_regs->ioc_configuration);
- if (!((ioc_status & MPI3_SYSIF_IOC_STATUS_READY) ||
- (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) ||
- (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC))) {
- retval = 0;
- break;
- }
+ break;
+ }
+ if (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) {
+ mpi3mr_print_fault_info(mrioc);
+ break;
}
msleep(100);
} while (--timeout);
- ioc_status = readl(&mrioc->sysif_regs->ioc_status);
ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
+ if (timeout && !((ioc_status & MPI3_SYSIF_IOC_STATUS_READY) ||
+ (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) ||
+ (ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC)))
+ retval = 0;
ioc_info(mrioc, "Base IOC Sts/Config after %s MUR is (0x%x)/(0x%x)\n",
(!retval) ? "successful" : "failed", ioc_status, ioc_config);
@@ -984,32 +1090,171 @@ static int mpi3mr_issue_and_process_mur(struct mpi3mr_ioc *mrioc,
}
/**
+ * mpi3mr_revalidate_factsdata - validate IOCFacts parameters
+ * during reset/resume
+ * @mrioc: Adapter instance reference
+ *
+ * Return zero if the new IOCFacts parameters value is compatible with
+ * older values else return -EPERM
+ */
+static int
+mpi3mr_revalidate_factsdata(struct mpi3mr_ioc *mrioc)
+{
+ u16 dev_handle_bitmap_sz;
+ void *removepend_bitmap;
+
+ if (mrioc->facts.reply_sz > mrioc->reply_sz) {
+ ioc_err(mrioc,
+ "cannot increase reply size from %d to %d\n",
+ mrioc->reply_sz, mrioc->facts.reply_sz);
+ return -EPERM;
+ }
+
+ if (mrioc->facts.max_op_reply_q < mrioc->num_op_reply_q) {
+ ioc_err(mrioc,
+ "cannot reduce number of operational reply queues from %d to %d\n",
+ mrioc->num_op_reply_q,
+ mrioc->facts.max_op_reply_q);
+ return -EPERM;
+ }
+
+ if (mrioc->facts.max_op_req_q < mrioc->num_op_req_q) {
+ ioc_err(mrioc,
+ "cannot reduce number of operational request queues from %d to %d\n",
+ mrioc->num_op_req_q, mrioc->facts.max_op_req_q);
+ return -EPERM;
+ }
+
+ dev_handle_bitmap_sz = mrioc->facts.max_devhandle / 8;
+ if (mrioc->facts.max_devhandle % 8)
+ dev_handle_bitmap_sz++;
+ if (dev_handle_bitmap_sz > mrioc->dev_handle_bitmap_sz) {
+ removepend_bitmap = krealloc(mrioc->removepend_bitmap,
+ dev_handle_bitmap_sz, GFP_KERNEL);
+ if (!removepend_bitmap) {
+ ioc_err(mrioc,
+ "failed to increase removepend_bitmap sz from: %d to %d\n",
+ mrioc->dev_handle_bitmap_sz, dev_handle_bitmap_sz);
+ return -EPERM;
+ }
+ memset(removepend_bitmap + mrioc->dev_handle_bitmap_sz, 0,
+ dev_handle_bitmap_sz - mrioc->dev_handle_bitmap_sz);
+ mrioc->removepend_bitmap = removepend_bitmap;
+ ioc_info(mrioc,
+ "increased dev_handle_bitmap_sz from %d to %d\n",
+ mrioc->dev_handle_bitmap_sz, dev_handle_bitmap_sz);
+ mrioc->dev_handle_bitmap_sz = dev_handle_bitmap_sz;
+ }
+
+ return 0;
+}
+
+/**
* mpi3mr_bring_ioc_ready - Bring controller to ready state
* @mrioc: Adapter instance reference
*
* Set Enable IOC bit in IOC configuration register and wait for
* the controller to become ready.
*
- * Return: 0 on success, -1 on failure.
+ * Return: 0 on success, appropriate error on failure.
*/
static int mpi3mr_bring_ioc_ready(struct mpi3mr_ioc *mrioc)
{
- u32 ioc_config, timeout;
- enum mpi3mr_iocstate current_state;
+ u32 ioc_config, ioc_status, timeout;
+ int retval = 0;
+ enum mpi3mr_iocstate ioc_state;
+ u64 base_info;
+ ioc_status = readl(&mrioc->sysif_regs->ioc_status);
+ ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
+ base_info = lo_hi_readq(&mrioc->sysif_regs->ioc_information);
+ ioc_info(mrioc, "ioc_status(0x%08x), ioc_config(0x%08x), ioc_info(0x%016llx) at the bringup\n",
+ ioc_status, ioc_config, base_info);
+
+ /*The timeout value is in 2sec unit, changing it to seconds*/
+ mrioc->ready_timeout =
+ ((base_info & MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_MASK) >>
+ MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_SHIFT) * 2;
+
+ ioc_info(mrioc, "ready timeout: %d seconds\n", mrioc->ready_timeout);
+
+ ioc_state = mpi3mr_get_iocstate(mrioc);
+ ioc_info(mrioc, "controller is in %s state during detection\n",
+ mpi3mr_iocstate_name(ioc_state));
+
+ if (ioc_state == MRIOC_STATE_BECOMING_READY ||
+ ioc_state == MRIOC_STATE_RESET_REQUESTED) {
+ timeout = mrioc->ready_timeout * 10;
+ do {
+ msleep(100);
+ } while (--timeout);
+
+ ioc_state = mpi3mr_get_iocstate(mrioc);
+ ioc_info(mrioc,
+ "controller is in %s state after waiting to reset\n",
+ mpi3mr_iocstate_name(ioc_state));
+ }
+
+ if (ioc_state == MRIOC_STATE_READY) {
+ ioc_info(mrioc, "issuing message unit reset (MUR) to bring to reset state\n");
+ retval = mpi3mr_issue_and_process_mur(mrioc,
+ MPI3MR_RESET_FROM_BRINGUP);
+ ioc_state = mpi3mr_get_iocstate(mrioc);
+ if (retval)
+ ioc_err(mrioc,
+ "message unit reset failed with error %d current state %s\n",
+ retval, mpi3mr_iocstate_name(ioc_state));
+ }
+ if (ioc_state != MRIOC_STATE_RESET) {
+ mpi3mr_print_fault_info(mrioc);
+ ioc_info(mrioc, "issuing soft reset to bring to reset state\n");
+ retval = mpi3mr_issue_reset(mrioc,
+ MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET,
+ MPI3MR_RESET_FROM_BRINGUP);
+ if (retval) {
+ ioc_err(mrioc,
+ "soft reset failed with error %d\n", retval);
+ goto out_failed;
+ }
+ }
+ ioc_state = mpi3mr_get_iocstate(mrioc);
+ if (ioc_state != MRIOC_STATE_RESET) {
+ ioc_err(mrioc,
+ "cannot bring controller to reset state, current state: %s\n",
+ mpi3mr_iocstate_name(ioc_state));
+ goto out_failed;
+ }
+ mpi3mr_clear_reset_history(mrioc);
+ retval = mpi3mr_setup_admin_qpair(mrioc);
+ if (retval) {
+ ioc_err(mrioc, "failed to setup admin queues: error %d\n",
+ retval);
+ goto out_failed;
+ }
+
+ ioc_info(mrioc, "bringing controller to ready state\n");
ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
ioc_config |= MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC;
writel(ioc_config, &mrioc->sysif_regs->ioc_configuration);
timeout = mrioc->ready_timeout * 10;
do {
- current_state = mpi3mr_get_iocstate(mrioc);
- if (current_state == MRIOC_STATE_READY)
+ ioc_state = mpi3mr_get_iocstate(mrioc);
+ if (ioc_state == MRIOC_STATE_READY) {
+ ioc_info(mrioc,
+ "successfully transitioned to %s state\n",
+ mpi3mr_iocstate_name(ioc_state));
return 0;
+ }
msleep(100);
} while (--timeout);
- return -1;
+out_failed:
+ ioc_state = mpi3mr_get_iocstate(mrioc);
+ ioc_err(mrioc,
+ "failed to bring to ready state, current state: %s\n",
+ mpi3mr_iocstate_name(ioc_state));
+ return retval;
}
/**
@@ -1026,7 +1271,6 @@ static inline bool
mpi3mr_soft_reset_success(u32 ioc_status, u32 ioc_config)
{
if (!((ioc_status & MPI3_SYSIF_IOC_STATUS_READY) ||
- (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT) ||
(ioc_config & MPI3_SYSIF_IOC_CONFIG_ENABLE_IOC)))
return true;
return false;
@@ -1049,8 +1293,10 @@ static inline bool mpi3mr_diagfault_success(struct mpi3mr_ioc *mrioc,
if (!(ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT))
return false;
fault = readl(&mrioc->sysif_regs->fault) & MPI3_SYSIF_FAULT_CODE_MASK;
- if (fault == MPI3_SYSIF_FAULT_CODE_DIAG_FAULT_RESET)
+ if (fault == MPI3_SYSIF_FAULT_CODE_DIAG_FAULT_RESET) {
+ mpi3mr_print_fault_info(mrioc);
return true;
+ }
return false;
}
@@ -1089,26 +1335,36 @@ static int mpi3mr_issue_reset(struct mpi3mr_ioc *mrioc, u16 reset_type,
u32 reset_reason)
{
int retval = -1;
- u8 unlock_retry_count, reset_retry_count = 0;
- u32 host_diagnostic, timeout, ioc_status, ioc_config;
+ u8 unlock_retry_count = 0;
+ u32 host_diagnostic, ioc_status, ioc_config;
+ u32 timeout = MPI3MR_RESET_ACK_TIMEOUT * 10;
- pci_cfg_access_lock(mrioc->pdev);
if ((reset_type != MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET) &&
(reset_type != MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT))
- goto out;
+ return retval;
if (mrioc->unrecoverable)
- goto out;
-retry_reset:
- unlock_retry_count = 0;
+ return retval;
+ if (reset_reason == MPI3MR_RESET_FROM_FIRMWARE) {
+ retval = 0;
+ return retval;
+ }
+
+ ioc_info(mrioc, "%s reset due to %s(0x%x)\n",
+ mpi3mr_reset_type_name(reset_type),
+ mpi3mr_reset_rc_name(reset_reason), reset_reason);
+
mpi3mr_clear_reset_history(mrioc);
do {
ioc_info(mrioc,
"Write magic sequence to unlock host diag register (retry=%d)\n",
++unlock_retry_count);
if (unlock_retry_count >= MPI3MR_HOSTDIAG_UNLOCK_RETRY_COUNT) {
- writel(reset_reason, &mrioc->sysif_regs->scratchpad[0]);
+ ioc_err(mrioc,
+ "%s reset failed due to unlock failure, host_diagnostic(0x%08x)\n",
+ mpi3mr_reset_type_name(reset_type),
+ host_diagnostic);
mrioc->unrecoverable = 1;
- goto out;
+ return retval;
}
writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_FLUSH,
@@ -1133,31 +1389,26 @@ retry_reset:
} while (!(host_diagnostic & MPI3_SYSIF_HOST_DIAG_DIAG_WRITE_ENABLE));
writel(reset_reason, &mrioc->sysif_regs->scratchpad[0]);
- ioc_info(mrioc, "%s reset due to %s(0x%x)\n",
- mpi3mr_reset_type_name(reset_type),
- mpi3mr_reset_rc_name(reset_reason), reset_reason);
writel(host_diagnostic | reset_type,
&mrioc->sysif_regs->host_diagnostic);
- timeout = mrioc->ready_timeout * 10;
- if (reset_type == MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET) {
+ switch (reset_type) {
+ case MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET:
do {
ioc_status = readl(&mrioc->sysif_regs->ioc_status);
- if (ioc_status &
- MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) {
+ ioc_config =
+ readl(&mrioc->sysif_regs->ioc_configuration);
+ if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY)
+ && mpi3mr_soft_reset_success(ioc_status, ioc_config)
+ ) {
mpi3mr_clear_reset_history(mrioc);
- ioc_config =
- readl(&mrioc->sysif_regs->ioc_configuration);
- if (mpi3mr_soft_reset_success(ioc_status,
- ioc_config)) {
- retval = 0;
- break;
- }
+ retval = 0;
+ break;
}
msleep(100);
} while (--timeout);
- writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND,
- &mrioc->sysif_regs->write_sequence);
- } else if (reset_type == MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT) {
+ mpi3mr_print_fault_info(mrioc);
+ break;
+ case MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT:
do {
ioc_status = readl(&mrioc->sysif_regs->ioc_status);
if (mpi3mr_diagfault_success(mrioc, ioc_status)) {
@@ -1166,28 +1417,22 @@ retry_reset:
}
msleep(100);
} while (--timeout);
- mpi3mr_clear_reset_history(mrioc);
- writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND,
- &mrioc->sysif_regs->write_sequence);
- }
- if (retval && ((++reset_retry_count) < MPI3MR_MAX_RESET_RETRY_COUNT)) {
- ioc_status = readl(&mrioc->sysif_regs->ioc_status);
- ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
- ioc_info(mrioc,
- "Base IOC Sts/Config after reset try %d is (0x%x)/(0x%x)\n",
- reset_retry_count, ioc_status, ioc_config);
- goto retry_reset;
+ break;
+ default:
+ break;
}
-out:
- pci_cfg_access_unlock(mrioc->pdev);
- ioc_status = readl(&mrioc->sysif_regs->ioc_status);
- ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
+ writel(MPI3_SYSIF_WRITE_SEQUENCE_KEY_VALUE_2ND,
+ &mrioc->sysif_regs->write_sequence);
+ ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
+ ioc_status = readl(&mrioc->sysif_regs->ioc_status);
ioc_info(mrioc,
- "Base IOC Sts/Config after %s reset is (0x%x)/(0x%x)\n",
- (!retval) ? "successful" : "failed", ioc_status,
+ "ioc_status/ioc_onfig after %s reset is (0x%x)/(0x%x)\n",
+ (!retval)?"successful":"failed", ioc_status,
ioc_config);
+ if (retval)
+ mrioc->unrecoverable = 1;
return retval;
}
@@ -1278,7 +1523,7 @@ static void mpi3mr_free_op_req_q_segments(struct mpi3mr_ioc *mrioc, u16 q_idx)
mrioc->op_reply_qinfo[q_idx].q_segment_list = NULL;
}
} else
- size = mrioc->req_qinfo[q_idx].num_requests *
+ size = mrioc->req_qinfo[q_idx].segment_qd *
mrioc->facts.op_req_sz;
for (j = 0; j < mrioc->req_qinfo[q_idx].num_segments; j++) {
@@ -1351,10 +1596,11 @@ static void mpi3mr_free_op_reply_q_segments(struct mpi3mr_ioc *mrioc, u16 q_idx)
static int mpi3mr_delete_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx)
{
struct mpi3_delete_reply_queue_request delq_req;
+ struct op_reply_qinfo *op_reply_q = mrioc->op_reply_qinfo + qidx;
int retval = 0;
u16 reply_qid = 0, midx;
- reply_qid = mrioc->op_reply_qinfo[qidx].qid;
+ reply_qid = op_reply_q->qid;
midx = REPLY_QUEUE_IDX_TO_MSIX_IDX(qidx, mrioc->op_reply_q_offset);
@@ -1364,6 +1610,9 @@ static int mpi3mr_delete_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx)
goto out;
}
+ (op_reply_q->qtype == MPI3MR_DEFAULT_QUEUE) ? mrioc->default_qcount-- :
+ mrioc->active_poll_qcount--;
+
memset(&delq_req, 0, sizeof(delq_req));
mutex_lock(&mrioc->init_cmds.mutex);
if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
@@ -1389,13 +1638,9 @@ static int mpi3mr_delete_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx)
wait_for_completion_timeout(&mrioc->init_cmds.done,
(MPI3MR_INTADMCMD_TIMEOUT * HZ));
if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
- ioc_err(mrioc, "Issue DelRepQ: command timed out\n");
- mpi3mr_set_diagsave(mrioc);
- mpi3mr_issue_reset(mrioc,
- MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
+ ioc_err(mrioc, "delete reply queue timed out\n");
+ mpi3mr_check_rh_fault_ioc(mrioc,
MPI3MR_RESET_FROM_DELREPQ_TIMEOUT);
- mrioc->unrecoverable = 1;
-
retval = -1;
goto out_unlock;
}
@@ -1565,6 +1810,8 @@ static int mpi3mr_create_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx)
reply_qid = qidx + 1;
op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD;
+ if (!mrioc->pdev->revision)
+ op_reply_q->num_replies = MPI3MR_OP_REP_Q_QD4K;
op_reply_q->ci = 0;
op_reply_q->ephase = 1;
atomic_set(&op_reply_q->pend_ios, 0);
@@ -1592,8 +1839,26 @@ static int mpi3mr_create_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx)
create_req.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
create_req.function = MPI3_FUNCTION_CREATE_REPLY_QUEUE;
create_req.queue_id = cpu_to_le16(reply_qid);
- create_req.flags = MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_ENABLE;
- create_req.msix_index = cpu_to_le16(mrioc->intr_info[midx].msix_index);
+
+ if (midx < (mrioc->intr_info_count - mrioc->requested_poll_qcount))
+ op_reply_q->qtype = MPI3MR_DEFAULT_QUEUE;
+ else
+ op_reply_q->qtype = MPI3MR_POLL_QUEUE;
+
+ if (op_reply_q->qtype == MPI3MR_DEFAULT_QUEUE) {
+ create_req.flags =
+ MPI3_CREATE_REPLY_QUEUE_FLAGS_INT_ENABLE_ENABLE;
+ create_req.msix_index =
+ cpu_to_le16(mrioc->intr_info[midx].msix_index);
+ } else {
+ create_req.msix_index = cpu_to_le16(mrioc->intr_info_count - 1);
+ ioc_info(mrioc, "create reply queue(polled): for qid(%d), midx(%d)\n",
+ reply_qid, midx);
+ if (!mrioc->active_poll_qcount)
+ disable_irq_nosync(pci_irq_vector(mrioc->pdev,
+ mrioc->intr_info_count - 1));
+ }
+
if (mrioc->enable_segqueue) {
create_req.flags |=
MPI3_CREATE_REQUEST_QUEUE_FLAGS_SEGMENTED_SEGMENTED;
@@ -1615,12 +1880,9 @@ static int mpi3mr_create_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx)
wait_for_completion_timeout(&mrioc->init_cmds.done,
(MPI3MR_INTADMCMD_TIMEOUT * HZ));
if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
- ioc_err(mrioc, "CreateRepQ: command timed out\n");
- mpi3mr_set_diagsave(mrioc);
- mpi3mr_issue_reset(mrioc,
- MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
+ ioc_err(mrioc, "create reply queue timed out\n");
+ mpi3mr_check_rh_fault_ioc(mrioc,
MPI3MR_RESET_FROM_CREATEREPQ_TIMEOUT);
- mrioc->unrecoverable = 1;
retval = -1;
goto out_unlock;
}
@@ -1634,7 +1896,11 @@ static int mpi3mr_create_op_reply_q(struct mpi3mr_ioc *mrioc, u16 qidx)
goto out_unlock;
}
op_reply_q->qid = reply_qid;
- mrioc->intr_info[midx].op_reply_q = op_reply_q;
+ if (midx < mrioc->intr_info_count)
+ mrioc->intr_info[midx].op_reply_q = op_reply_q;
+
+ (op_reply_q->qtype == MPI3MR_DEFAULT_QUEUE) ? mrioc->default_qcount++ :
+ mrioc->active_poll_qcount++;
out_unlock:
mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
@@ -1722,12 +1988,9 @@ static int mpi3mr_create_op_req_q(struct mpi3mr_ioc *mrioc, u16 idx,
wait_for_completion_timeout(&mrioc->init_cmds.done,
(MPI3MR_INTADMCMD_TIMEOUT * HZ));
if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
- ioc_err(mrioc, "CreateReqQ: command timed out\n");
- mpi3mr_set_diagsave(mrioc);
- if (mpi3mr_issue_reset(mrioc,
- MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
- MPI3MR_RESET_FROM_CREATEREQQ_TIMEOUT))
- mrioc->unrecoverable = 1;
+ ioc_err(mrioc, "create request queue timed out\n");
+ mpi3mr_check_rh_fault_ioc(mrioc,
+ MPI3MR_RESET_FROM_CREATEREQQ_TIMEOUT);
retval = -1;
goto out_unlock;
}
@@ -1771,8 +2034,13 @@ static int mpi3mr_create_op_queues(struct mpi3mr_ioc *mrioc)
mrioc->intr_info_count - mrioc->op_reply_q_offset;
if (!mrioc->num_queues)
mrioc->num_queues = min_t(int, num_queues, msix_count_op_q);
- num_queues = mrioc->num_queues;
- ioc_info(mrioc, "Trying to create %d Operational Q pairs\n",
+ /*
+ * During reset set the num_queues to the number of queues
+ * that was set before the reset.
+ */
+ num_queues = mrioc->num_op_reply_q ?
+ mrioc->num_op_reply_q : mrioc->num_queues;
+ ioc_info(mrioc, "trying to create %d operational queue pairs\n",
num_queues);
if (!mrioc->req_qinfo) {
@@ -1814,8 +2082,10 @@ static int mpi3mr_create_op_queues(struct mpi3mr_ioc *mrioc)
goto out_failed;
}
mrioc->num_op_reply_q = mrioc->num_op_req_q = i;
- ioc_info(mrioc, "Successfully created %d Operational Q pairs\n",
- mrioc->num_op_reply_q);
+ ioc_info(mrioc,
+ "successfully created %d operational queue pairs(default/polled) queue = (%d/%d)\n",
+ mrioc->num_op_reply_q, mrioc->default_qcount,
+ mrioc->active_poll_qcount);
return retval;
out_failed:
@@ -1863,7 +2133,7 @@ int mpi3mr_op_request_post(struct mpi3mr_ioc *mrioc,
if (mpi3mr_check_req_qfull(op_req_q)) {
midx = REPLY_QUEUE_IDX_TO_MSIX_IDX(
reply_qidx, mrioc->op_reply_q_offset);
- mpi3mr_process_op_reply_q(mrioc, &mrioc->intr_info[midx]);
+ mpi3mr_process_op_reply_q(mrioc, mrioc->intr_info[midx].op_reply_q);
if (mpi3mr_check_req_qfull(op_req_q)) {
retval = -EAGAIN;
@@ -1901,6 +2171,42 @@ out:
}
/**
+ * mpi3mr_check_rh_fault_ioc - check reset history and fault
+ * controller
+ * @mrioc: Adapter instance reference
+ * @reason_code: reason code for the fault.
+ *
+ * This routine will save snapdump and fault the controller with
+ * the given reason code if it is not already in the fault or
+ * not asynchronosuly reset. This will be used to handle
+ * initilaization time faults/resets/timeout as in those cases
+ * immediate soft reset invocation is not required.
+ *
+ * Return: None.
+ */
+void mpi3mr_check_rh_fault_ioc(struct mpi3mr_ioc *mrioc, u32 reason_code)
+{
+ u32 ioc_status, host_diagnostic, timeout;
+
+ ioc_status = readl(&mrioc->sysif_regs->ioc_status);
+ if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) ||
+ (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) {
+ mpi3mr_print_fault_info(mrioc);
+ return;
+ }
+ mpi3mr_set_diagsave(mrioc);
+ mpi3mr_issue_reset(mrioc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
+ reason_code);
+ timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10;
+ do {
+ host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic);
+ if (!(host_diagnostic & MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS))
+ break;
+ msleep(100);
+ } while (--timeout);
+}
+
+/**
* mpi3mr_sync_timestamp - Issue time stamp sync request
* @mrioc: Adapter reference
*
@@ -1945,8 +2251,9 @@ static int mpi3mr_sync_timestamp(struct mpi3mr_ioc *mrioc)
if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
ioc_err(mrioc, "Issue IOUCTL time_stamp: command timed out\n");
mrioc->init_cmds.is_waiting = 0;
- mpi3mr_soft_reset_handler(mrioc,
- MPI3MR_RESET_FROM_TSU_TIMEOUT, 1);
+ if (!(mrioc->init_cmds.state & MPI3MR_CMD_RESET))
+ mpi3mr_soft_reset_handler(mrioc,
+ MPI3MR_RESET_FROM_TSU_TIMEOUT, 1);
retval = -1;
goto out_unlock;
}
@@ -1969,6 +2276,91 @@ out:
}
/**
+ * mpi3mr_print_pkg_ver - display controller fw package version
+ * @mrioc: Adapter reference
+ *
+ * Retrieve firmware package version from the component image
+ * header of the controller flash and display it.
+ *
+ * Return: 0 on success and non-zero on failure.
+ */
+static int mpi3mr_print_pkg_ver(struct mpi3mr_ioc *mrioc)
+{
+ struct mpi3_ci_upload_request ci_upload;
+ int retval = -1;
+ void *data = NULL;
+ dma_addr_t data_dma;
+ struct mpi3_ci_manifest_mpi *manifest;
+ u32 data_len = sizeof(struct mpi3_ci_manifest_mpi);
+ u8 sgl_flags = MPI3MR_SGEFLAGS_SYSTEM_SIMPLE_END_OF_LIST;
+
+ data = dma_alloc_coherent(&mrioc->pdev->dev, data_len, &data_dma,
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ memset(&ci_upload, 0, sizeof(ci_upload));
+ mutex_lock(&mrioc->init_cmds.mutex);
+ if (mrioc->init_cmds.state & MPI3MR_CMD_PENDING) {
+ ioc_err(mrioc, "sending get package version failed due to command in use\n");
+ mutex_unlock(&mrioc->init_cmds.mutex);
+ goto out;
+ }
+ mrioc->init_cmds.state = MPI3MR_CMD_PENDING;
+ mrioc->init_cmds.is_waiting = 1;
+ mrioc->init_cmds.callback = NULL;
+ ci_upload.host_tag = cpu_to_le16(MPI3MR_HOSTTAG_INITCMDS);
+ ci_upload.function = MPI3_FUNCTION_CI_UPLOAD;
+ ci_upload.msg_flags = MPI3_CI_UPLOAD_MSGFLAGS_LOCATION_PRIMARY;
+ ci_upload.signature1 = cpu_to_le32(MPI3_IMAGE_HEADER_SIGNATURE1_MANIFEST);
+ ci_upload.image_offset = cpu_to_le32(MPI3_IMAGE_HEADER_SIZE);
+ ci_upload.segment_size = cpu_to_le32(data_len);
+
+ mpi3mr_add_sg_single(&ci_upload.sgl, sgl_flags, data_len,
+ data_dma);
+ init_completion(&mrioc->init_cmds.done);
+ retval = mpi3mr_admin_request_post(mrioc, &ci_upload,
+ sizeof(ci_upload), 1);
+ if (retval) {
+ ioc_err(mrioc, "posting get package version failed\n");
+ goto out_unlock;
+ }
+ wait_for_completion_timeout(&mrioc->init_cmds.done,
+ (MPI3MR_INTADMCMD_TIMEOUT * HZ));
+ if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
+ ioc_err(mrioc, "get package version timed out\n");
+ mpi3mr_check_rh_fault_ioc(mrioc,
+ MPI3MR_RESET_FROM_GETPKGVER_TIMEOUT);
+ retval = -1;
+ goto out_unlock;
+ }
+ if ((mrioc->init_cmds.ioc_status & MPI3_IOCSTATUS_STATUS_MASK)
+ == MPI3_IOCSTATUS_SUCCESS) {
+ manifest = (struct mpi3_ci_manifest_mpi *) data;
+ if (manifest->manifest_type == MPI3_CI_MANIFEST_TYPE_MPI) {
+ ioc_info(mrioc,
+ "firmware package version(%d.%d.%d.%d.%05d-%05d)\n",
+ manifest->package_version.gen_major,
+ manifest->package_version.gen_minor,
+ manifest->package_version.phase_major,
+ manifest->package_version.phase_minor,
+ manifest->package_version.customer_id,
+ manifest->package_version.build_num);
+ }
+ }
+ retval = 0;
+out_unlock:
+ mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
+ mutex_unlock(&mrioc->init_cmds.mutex);
+
+out:
+ if (data)
+ dma_free_coherent(&mrioc->pdev->dev, data_len, data,
+ data_dma);
+ return retval;
+}
+
+/**
* mpi3mr_watchdog_work - watchdog thread to monitor faults
* @work: work struct
*
@@ -1984,50 +2376,66 @@ static void mpi3mr_watchdog_work(struct work_struct *work)
container_of(work, struct mpi3mr_ioc, watchdog_work.work);
unsigned long flags;
enum mpi3mr_iocstate ioc_state;
- u32 fault, host_diagnostic;
+ u32 fault, host_diagnostic, ioc_status;
+ u32 reset_reason = MPI3MR_RESET_FROM_FAULT_WATCH;
+
+ if (mrioc->reset_in_progress || mrioc->unrecoverable)
+ return;
if (mrioc->ts_update_counter++ >= MPI3MR_TSUPDATE_INTERVAL) {
mrioc->ts_update_counter = 0;
mpi3mr_sync_timestamp(mrioc);
}
+ if ((mrioc->prepare_for_reset) &&
+ ((mrioc->prepare_for_reset_timeout_counter++) >=
+ MPI3MR_PREPARE_FOR_RESET_TIMEOUT)) {
+ mpi3mr_soft_reset_handler(mrioc,
+ MPI3MR_RESET_FROM_CIACTVRST_TIMER, 1);
+ return;
+ }
+
+ ioc_status = readl(&mrioc->sysif_regs->ioc_status);
+ if (ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) {
+ mpi3mr_soft_reset_handler(mrioc, MPI3MR_RESET_FROM_FIRMWARE, 0);
+ return;
+ }
+
/*Check for fault state every one second and issue Soft reset*/
ioc_state = mpi3mr_get_iocstate(mrioc);
- if (ioc_state == MRIOC_STATE_FAULT) {
- fault = readl(&mrioc->sysif_regs->fault) &
- MPI3_SYSIF_FAULT_CODE_MASK;
- host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic);
- if (host_diagnostic & MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS) {
- if (!mrioc->diagsave_timeout) {
- mpi3mr_print_fault_info(mrioc);
- ioc_warn(mrioc, "Diag save in progress\n");
- }
- if ((mrioc->diagsave_timeout++) <=
- MPI3_SYSIF_DIAG_SAVE_TIMEOUT)
- goto schedule_work;
- } else
- mpi3mr_print_fault_info(mrioc);
- mrioc->diagsave_timeout = 0;
+ if (ioc_state != MRIOC_STATE_FAULT)
+ goto schedule_work;
- if (fault == MPI3_SYSIF_FAULT_CODE_FACTORY_RESET) {
- ioc_info(mrioc,
- "Factory Reset fault occurred marking controller as unrecoverable"
- );
- mrioc->unrecoverable = 1;
- goto out;
+ fault = readl(&mrioc->sysif_regs->fault) & MPI3_SYSIF_FAULT_CODE_MASK;
+ host_diagnostic = readl(&mrioc->sysif_regs->host_diagnostic);
+ if (host_diagnostic & MPI3_SYSIF_HOST_DIAG_SAVE_IN_PROGRESS) {
+ if (!mrioc->diagsave_timeout) {
+ mpi3mr_print_fault_info(mrioc);
+ ioc_warn(mrioc, "diag save in progress\n");
}
+ if ((mrioc->diagsave_timeout++) <= MPI3_SYSIF_DIAG_SAVE_TIMEOUT)
+ goto schedule_work;
+ }
- if ((fault == MPI3_SYSIF_FAULT_CODE_DIAG_FAULT_RESET) ||
- (fault == MPI3_SYSIF_FAULT_CODE_SOFT_RESET_IN_PROGRESS) ||
- (mrioc->reset_in_progress))
- goto out;
- if (fault == MPI3_SYSIF_FAULT_CODE_CI_ACTIVATION_RESET)
- mpi3mr_soft_reset_handler(mrioc,
- MPI3MR_RESET_FROM_CIACTIV_FAULT, 0);
- else
- mpi3mr_soft_reset_handler(mrioc,
- MPI3MR_RESET_FROM_FAULT_WATCH, 0);
+ mpi3mr_print_fault_info(mrioc);
+ mrioc->diagsave_timeout = 0;
+
+ switch (fault) {
+ case MPI3_SYSIF_FAULT_CODE_POWER_CYCLE_REQUIRED:
+ ioc_info(mrioc,
+ "controller requires system power cycle, marking controller as unrecoverable\n");
+ mrioc->unrecoverable = 1;
+ return;
+ case MPI3_SYSIF_FAULT_CODE_SOFT_RESET_IN_PROGRESS:
+ return;
+ case MPI3_SYSIF_FAULT_CODE_CI_ACTIVATION_RESET:
+ reset_reason = MPI3MR_RESET_FROM_CIACTIV_FAULT;
+ break;
+ default:
+ break;
}
+ mpi3mr_soft_reset_handler(mrioc, reset_reason, 0);
+ return;
schedule_work:
spin_lock_irqsave(&mrioc->watchdog_lock, flags);
@@ -2036,7 +2444,6 @@ schedule_work:
&mrioc->watchdog_work,
msecs_to_jiffies(MPI3MR_WATCHDOG_INTERVAL));
spin_unlock_irqrestore(&mrioc->watchdog_lock, flags);
-out:
return;
}
@@ -2097,41 +2504,6 @@ void mpi3mr_stop_watchdog(struct mpi3mr_ioc *mrioc)
}
/**
- * mpi3mr_kill_ioc - Kill the controller
- * @mrioc: Adapter instance reference
- * @reason: reason for the failure.
- *
- * If fault debug is enabled, display the fault info else issue
- * diag fault and freeze the system for controller debug
- * purpose.
- *
- * Return: Nothing.
- */
-static void mpi3mr_kill_ioc(struct mpi3mr_ioc *mrioc, u32 reason)
-{
- enum mpi3mr_iocstate ioc_state;
-
- if (!mrioc->fault_dbg)
- return;
-
- dump_stack();
-
- ioc_state = mpi3mr_get_iocstate(mrioc);
- if (ioc_state == MRIOC_STATE_FAULT)
- mpi3mr_print_fault_info(mrioc);
- else {
- ioc_err(mrioc, "Firmware is halted due to the reason %d\n",
- reason);
- mpi3mr_diagfault_reset_handler(mrioc, reason);
- }
- if (mrioc->fault_dbg == 2)
- for (;;)
- ;
- else
- panic("panic in %s\n", __func__);
-}
-
-/**
* mpi3mr_setup_admin_qpair - Setup admin queue pair
* @mrioc: Adapter instance reference
*
@@ -2258,12 +2630,9 @@ static int mpi3mr_issue_iocfacts(struct mpi3mr_ioc *mrioc,
wait_for_completion_timeout(&mrioc->init_cmds.done,
(MPI3MR_INTADMCMD_TIMEOUT * HZ));
if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
- ioc_err(mrioc, "Issue IOCFacts: command timed out\n");
- mpi3mr_set_diagsave(mrioc);
- mpi3mr_issue_reset(mrioc,
- MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
+ ioc_err(mrioc, "ioc_facts timed out\n");
+ mpi3mr_check_rh_fault_ioc(mrioc,
MPI3MR_RESET_FROM_IOCFACTS_TIMEOUT);
- mrioc->unrecoverable = 1;
retval = -1;
goto out_unlock;
}
@@ -2277,6 +2646,7 @@ static int mpi3mr_issue_iocfacts(struct mpi3mr_ioc *mrioc,
goto out_unlock;
}
memcpy(facts_data, (u8 *)data, data_len);
+ mpi3mr_process_factsdata(mrioc, facts_data);
out_unlock:
mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
mutex_unlock(&mrioc->init_cmds.mutex);
@@ -2374,14 +2744,13 @@ static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc,
mrioc->facts.reply_sz = le16_to_cpu(facts_data->reply_frame_size) * 4;
mrioc->facts.exceptions = le16_to_cpu(facts_data->ioc_exceptions);
mrioc->facts.max_perids = le16_to_cpu(facts_data->max_persistent_id);
- mrioc->facts.max_pds = le16_to_cpu(facts_data->max_pds);
mrioc->facts.max_vds = le16_to_cpu(facts_data->max_vds);
mrioc->facts.max_hpds = le16_to_cpu(facts_data->max_host_pds);
- mrioc->facts.max_advhpds = le16_to_cpu(facts_data->max_advanced_host_pds);
- mrioc->facts.max_raidpds = le16_to_cpu(facts_data->max_raid_pds);
+ mrioc->facts.max_advhpds = le16_to_cpu(facts_data->max_adv_host_pds);
+ mrioc->facts.max_raid_pds = le16_to_cpu(facts_data->max_raid_pds);
mrioc->facts.max_nvme = le16_to_cpu(facts_data->max_nvme);
mrioc->facts.max_pcie_switches =
- le16_to_cpu(facts_data->max_pc_ie_switches);
+ le16_to_cpu(facts_data->max_pcie_switches);
mrioc->facts.max_sasexpanders =
le16_to_cpu(facts_data->max_sas_expanders);
mrioc->facts.max_sasinitiators =
@@ -2415,22 +2784,15 @@ static void mpi3mr_process_factsdata(struct mpi3mr_ioc *mrioc,
mrioc->facts.ioc_num, mrioc->facts.max_op_req_q,
mrioc->facts.max_op_reply_q, mrioc->facts.max_devhandle);
ioc_info(mrioc,
- "maxreqs(%d), mindh(%d) maxPDs(%d) maxvectors(%d) maxperids(%d)\n",
+ "maxreqs(%d), mindh(%d) maxvectors(%d) maxperids(%d)\n",
mrioc->facts.max_reqs, mrioc->facts.min_devhandle,
- mrioc->facts.max_pds, mrioc->facts.max_msix_vectors,
- mrioc->facts.max_perids);
+ mrioc->facts.max_msix_vectors, mrioc->facts.max_perids);
ioc_info(mrioc, "SGEModMask 0x%x SGEModVal 0x%x SGEModShift 0x%x ",
mrioc->facts.sge_mod_mask, mrioc->facts.sge_mod_value,
mrioc->facts.sge_mod_shift);
ioc_info(mrioc, "DMA mask %d InitialPE status 0x%x\n",
mrioc->facts.dma_mask, (facts_flags &
MPI3_IOCFACTS_FLAGS_INITIAL_PORT_ENABLE_MASK));
-
- mrioc->max_host_ios = mrioc->facts.max_reqs - MPI3MR_INTERNAL_CMDS_RESVD;
-
- if (reset_devices)
- mrioc->max_host_ios = min_t(int, mrioc->max_host_ios,
- MPI3MR_HOST_IOS_KDUMP);
}
/**
@@ -2446,23 +2808,29 @@ static int mpi3mr_alloc_reply_sense_bufs(struct mpi3mr_ioc *mrioc)
{
int retval = 0;
u32 sz, i;
- dma_addr_t phy_addr;
if (mrioc->init_cmds.reply)
- goto post_reply_sbuf;
+ return retval;
- mrioc->init_cmds.reply = kzalloc(mrioc->facts.reply_sz, GFP_KERNEL);
+ mrioc->init_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
if (!mrioc->init_cmds.reply)
goto out_failed;
for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++) {
- mrioc->dev_rmhs_cmds[i].reply = kzalloc(mrioc->facts.reply_sz,
+ mrioc->dev_rmhs_cmds[i].reply = kzalloc(mrioc->reply_sz,
GFP_KERNEL);
if (!mrioc->dev_rmhs_cmds[i].reply)
goto out_failed;
}
- mrioc->host_tm_cmds.reply = kzalloc(mrioc->facts.reply_sz, GFP_KERNEL);
+ for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) {
+ mrioc->evtack_cmds[i].reply = kzalloc(mrioc->reply_sz,
+ GFP_KERNEL);
+ if (!mrioc->evtack_cmds[i].reply)
+ goto out_failed;
+ }
+
+ mrioc->host_tm_cmds.reply = kzalloc(mrioc->reply_sz, GFP_KERNEL);
if (!mrioc->host_tm_cmds.reply)
goto out_failed;
@@ -2482,13 +2850,21 @@ static int mpi3mr_alloc_reply_sense_bufs(struct mpi3mr_ioc *mrioc)
if (!mrioc->devrem_bitmap)
goto out_failed;
+ mrioc->evtack_cmds_bitmap_sz = MPI3MR_NUM_EVTACKCMD / 8;
+ if (MPI3MR_NUM_EVTACKCMD % 8)
+ mrioc->evtack_cmds_bitmap_sz++;
+ mrioc->evtack_cmds_bitmap = kzalloc(mrioc->evtack_cmds_bitmap_sz,
+ GFP_KERNEL);
+ if (!mrioc->evtack_cmds_bitmap)
+ goto out_failed;
+
mrioc->num_reply_bufs = mrioc->facts.max_reqs + MPI3MR_NUM_EVT_REPLIES;
mrioc->reply_free_qsz = mrioc->num_reply_bufs + 1;
mrioc->num_sense_bufs = mrioc->facts.max_reqs / MPI3MR_SENSEBUF_FACTOR;
mrioc->sense_buf_q_sz = mrioc->num_sense_bufs + 1;
/* reply buffer pool, 16 byte align */
- sz = mrioc->num_reply_bufs * mrioc->facts.reply_sz;
+ sz = mrioc->num_reply_bufs * mrioc->reply_sz;
mrioc->reply_buf_pool = dma_pool_create("reply_buf pool",
&mrioc->pdev->dev, sz, 16, 0);
if (!mrioc->reply_buf_pool) {
@@ -2517,7 +2893,7 @@ static int mpi3mr_alloc_reply_sense_bufs(struct mpi3mr_ioc *mrioc)
goto out_failed;
/* sense buffer pool, 4 byte align */
- sz = mrioc->num_sense_bufs * MPI3MR_SENSEBUF_SZ;
+ sz = mrioc->num_sense_bufs * MPI3MR_SENSE_BUF_SZ;
mrioc->sense_buf_pool = dma_pool_create("sense_buf pool",
&mrioc->pdev->dev, sz, 4, 0);
if (!mrioc->sense_buf_pool) {
@@ -2542,21 +2918,42 @@ static int mpi3mr_alloc_reply_sense_bufs(struct mpi3mr_ioc *mrioc)
if (!mrioc->sense_buf_q)
goto out_failed;
-post_reply_sbuf:
- sz = mrioc->num_reply_bufs * mrioc->facts.reply_sz;
+ return retval;
+
+out_failed:
+ retval = -1;
+ return retval;
+}
+
+/**
+ * mpimr_initialize_reply_sbuf_queues - initialize reply sense
+ * buffers
+ * @mrioc: Adapter instance reference
+ *
+ * Helper function to initialize reply and sense buffers along
+ * with some debug prints.
+ *
+ * Return: None.
+ */
+static void mpimr_initialize_reply_sbuf_queues(struct mpi3mr_ioc *mrioc)
+{
+ u32 sz, i;
+ dma_addr_t phy_addr;
+
+ sz = mrioc->num_reply_bufs * mrioc->reply_sz;
ioc_info(mrioc,
"reply buf pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), reply_dma(0x%llx)\n",
- mrioc->reply_buf, mrioc->num_reply_bufs, mrioc->facts.reply_sz,
+ mrioc->reply_buf, mrioc->num_reply_bufs, mrioc->reply_sz,
(sz / 1024), (unsigned long long)mrioc->reply_buf_dma);
sz = mrioc->reply_free_qsz * 8;
ioc_info(mrioc,
"reply_free_q pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), reply_dma(0x%llx)\n",
mrioc->reply_free_q, mrioc->reply_free_qsz, 8, (sz / 1024),
(unsigned long long)mrioc->reply_free_q_dma);
- sz = mrioc->num_sense_bufs * MPI3MR_SENSEBUF_SZ;
+ sz = mrioc->num_sense_bufs * MPI3MR_SENSE_BUF_SZ;
ioc_info(mrioc,
"sense_buf pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB), sense_dma(0x%llx)\n",
- mrioc->sense_buf, mrioc->num_sense_bufs, MPI3MR_SENSEBUF_SZ,
+ mrioc->sense_buf, mrioc->num_sense_bufs, MPI3MR_SENSE_BUF_SZ,
(sz / 1024), (unsigned long long)mrioc->sense_buf_dma);
sz = mrioc->sense_buf_q_sz * 8;
ioc_info(mrioc,
@@ -2566,20 +2963,15 @@ post_reply_sbuf:
/* initialize Reply buffer Queue */
for (i = 0, phy_addr = mrioc->reply_buf_dma;
- i < mrioc->num_reply_bufs; i++, phy_addr += mrioc->facts.reply_sz)
+ i < mrioc->num_reply_bufs; i++, phy_addr += mrioc->reply_sz)
mrioc->reply_free_q[i] = cpu_to_le64(phy_addr);
mrioc->reply_free_q[i] = cpu_to_le64(0);
/* initialize Sense Buffer Queue */
for (i = 0, phy_addr = mrioc->sense_buf_dma;
- i < mrioc->num_sense_bufs; i++, phy_addr += MPI3MR_SENSEBUF_SZ)
+ i < mrioc->num_sense_bufs; i++, phy_addr += MPI3MR_SENSE_BUF_SZ)
mrioc->sense_buf_q[i] = cpu_to_le64(phy_addr);
mrioc->sense_buf_q[i] = cpu_to_le64(0);
- return retval;
-
-out_failed:
- retval = -1;
- return retval;
}
/**
@@ -2606,6 +2998,8 @@ static int mpi3mr_issue_iocinit(struct mpi3mr_ioc *mrioc)
retval = -1;
goto out;
}
+ mpimr_initialize_reply_sbuf_queues(mrioc);
+
drv_info->information_length = cpu_to_le32(data_len);
strscpy(drv_info->driver_signature, "Broadcom", sizeof(drv_info->driver_signature));
strscpy(drv_info->os_name, utsname()->sysname, sizeof(drv_info->os_name));
@@ -2639,7 +3033,7 @@ static int mpi3mr_issue_iocinit(struct mpi3mr_ioc *mrioc)
iocinit_req.reply_free_queue_depth = cpu_to_le16(mrioc->reply_free_qsz);
iocinit_req.reply_free_queue_address =
cpu_to_le64(mrioc->reply_free_q_dma);
- iocinit_req.sense_buffer_length = cpu_to_le16(MPI3MR_SENSEBUF_SZ);
+ iocinit_req.sense_buffer_length = cpu_to_le16(MPI3MR_SENSE_BUF_SZ);
iocinit_req.sense_buffer_free_queue_depth =
cpu_to_le16(mrioc->sense_buf_q_sz);
iocinit_req.sense_buffer_free_queue_address =
@@ -2659,12 +3053,9 @@ static int mpi3mr_issue_iocinit(struct mpi3mr_ioc *mrioc)
wait_for_completion_timeout(&mrioc->init_cmds.done,
(MPI3MR_INTADMCMD_TIMEOUT * HZ));
if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
- mpi3mr_set_diagsave(mrioc);
- mpi3mr_issue_reset(mrioc,
- MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
+ mpi3mr_check_rh_fault_ioc(mrioc,
MPI3MR_RESET_FROM_IOCINIT_TIMEOUT);
- mrioc->unrecoverable = 1;
- ioc_err(mrioc, "Issue IOCInit: command timed out\n");
+ ioc_err(mrioc, "ioc_init timed out\n");
retval = -1;
goto out_unlock;
}
@@ -2678,6 +3069,13 @@ static int mpi3mr_issue_iocinit(struct mpi3mr_ioc *mrioc)
goto out_unlock;
}
+ mrioc->reply_free_queue_host_index = mrioc->num_reply_bufs;
+ writel(mrioc->reply_free_queue_host_index,
+ &mrioc->sysif_regs->reply_free_host_index);
+
+ mrioc->sbq_host_index = mrioc->num_sense_bufs;
+ writel(mrioc->sbq_host_index,
+ &mrioc->sysif_regs->sense_buffer_free_host_index);
out_unlock:
mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
mutex_unlock(&mrioc->init_cmds.mutex);
@@ -2755,12 +3153,9 @@ static int mpi3mr_issue_event_notification(struct mpi3mr_ioc *mrioc)
wait_for_completion_timeout(&mrioc->init_cmds.done,
(MPI3MR_INTADMCMD_TIMEOUT * HZ));
if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
- ioc_err(mrioc, "Issue EvtNotify: command timed out\n");
- mpi3mr_set_diagsave(mrioc);
- mpi3mr_issue_reset(mrioc,
- MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
+ ioc_err(mrioc, "event notification timed out\n");
+ mpi3mr_check_rh_fault_ioc(mrioc,
MPI3MR_RESET_FROM_EVTNOTIFY_TIMEOUT);
- mrioc->unrecoverable = 1;
retval = -1;
goto out_unlock;
}
@@ -2782,17 +3177,17 @@ out:
}
/**
- * mpi3mr_send_event_ack - Send event acknowledgment
+ * mpi3mr_process_event_ack - Process event acknowledgment
* @mrioc: Adapter instance reference
* @event: MPI3 event ID
- * @event_ctx: Event context
+ * @event_ctx: event context
*
* Send event acknowledgment through admin queue and wait for
* it to complete.
*
* Return: 0 on success, non-zero on failures.
*/
-int mpi3mr_send_event_ack(struct mpi3mr_ioc *mrioc, u8 event,
+int mpi3mr_process_event_ack(struct mpi3mr_ioc *mrioc, u8 event,
u32 event_ctx)
{
struct mpi3_event_ack_request evtack_req;
@@ -2825,8 +3220,9 @@ int mpi3mr_send_event_ack(struct mpi3mr_ioc *mrioc, u8 event,
(MPI3MR_INTADMCMD_TIMEOUT * HZ));
if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
ioc_err(mrioc, "Issue EvtNotify: command timed out\n");
- mpi3mr_soft_reset_handler(mrioc,
- MPI3MR_RESET_FROM_EVTACK_TIMEOUT, 1);
+ if (!(mrioc->init_cmds.state & MPI3MR_CMD_RESET))
+ mpi3mr_soft_reset_handler(mrioc,
+ MPI3MR_RESET_FROM_EVTACK_TIMEOUT, 1);
retval = -1;
goto out_unlock;
}
@@ -2863,6 +3259,9 @@ static int mpi3mr_alloc_chain_bufs(struct mpi3mr_ioc *mrioc)
u32 sz, i;
u16 num_chains;
+ if (mrioc->chain_sgl_list)
+ return retval;
+
num_chains = mrioc->max_host_ios / MPI3MR_CHAINBUF_FACTOR;
if (prot_mask & (SHOST_DIX_TYPE0_PROTECTION
@@ -2966,29 +3365,28 @@ int mpi3mr_issue_port_enable(struct mpi3mr_ioc *mrioc, u8 async)
ioc_err(mrioc, "Issue PortEnable: Admin Post failed\n");
goto out_unlock;
}
- if (!async) {
- wait_for_completion_timeout(&mrioc->init_cmds.done,
- (pe_timeout * HZ));
- if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
- ioc_err(mrioc, "Issue PortEnable: command timed out\n");
- retval = -1;
- mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR;
- mpi3mr_set_diagsave(mrioc);
- mpi3mr_issue_reset(mrioc,
- MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
- MPI3MR_RESET_FROM_PE_TIMEOUT);
- mrioc->unrecoverable = 1;
- goto out_unlock;
- }
- mpi3mr_port_enable_complete(mrioc, &mrioc->init_cmds);
+ if (async) {
+ mutex_unlock(&mrioc->init_cmds.mutex);
+ goto out;
}
+
+ wait_for_completion_timeout(&mrioc->init_cmds.done, (pe_timeout * HZ));
+ if (!(mrioc->init_cmds.state & MPI3MR_CMD_COMPLETE)) {
+ ioc_err(mrioc, "port enable timed out\n");
+ retval = -1;
+ mpi3mr_check_rh_fault_ioc(mrioc, MPI3MR_RESET_FROM_PE_TIMEOUT);
+ goto out_unlock;
+ }
+ mpi3mr_port_enable_complete(mrioc, &mrioc->init_cmds);
+
out_unlock:
+ mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
mutex_unlock(&mrioc->init_cmds.mutex);
out:
return retval;
}
-/* Protocol type to name mapper structure*/
+/* Protocol type to name mapper structure */
static const struct {
u8 protocol;
char *name;
@@ -3181,6 +3579,10 @@ int mpi3mr_setup_resources(struct mpi3mr_ioc *mrioc)
mrioc->sysif_regs, memap_sz);
ioc_info(mrioc, "Number of MSI-X vectors found in capabilities: (%d)\n",
mrioc->msix_count);
+
+ if (!reset_devices && poll_queues > 0)
+ mrioc->requested_poll_qcount = min_t(int, poll_queues,
+ mrioc->msix_count - 2);
return retval;
out_failed:
@@ -3189,9 +3591,48 @@ out_failed:
}
/**
+ * mpi3mr_enable_events - Enable required events
+ * @mrioc: Adapter instance reference
+ *
+ * This routine unmasks the events required by the driver by
+ * sennding appropriate event mask bitmapt through an event
+ * notification request.
+ *
+ * Return: 0 on success and non-zero on failure.
+ */
+static int mpi3mr_enable_events(struct mpi3mr_ioc *mrioc)
+{
+ int retval = 0;
+ u32 i;
+
+ for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
+ mrioc->event_masks[i] = -1;
+
+ mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_ADDED);
+ mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_INFO_CHANGED);
+ mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_STATUS_CHANGE);
+ mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE);
+ mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
+ mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_DISCOVERY);
+ mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR);
+ mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_BROADCAST_PRIMITIVE);
+ mpi3mr_unmask_events(mrioc, MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST);
+ mpi3mr_unmask_events(mrioc, MPI3_EVENT_PCIE_ENUMERATION);
+ mpi3mr_unmask_events(mrioc, MPI3_EVENT_PREPARE_FOR_RESET);
+ mpi3mr_unmask_events(mrioc, MPI3_EVENT_CABLE_MGMT);
+ mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENERGY_PACK_CHANGE);
+ mpi3mr_unmask_events(mrioc, MPI3_EVENT_TEMP_THRESHOLD);
+
+ retval = mpi3mr_issue_event_notification(mrioc);
+ if (retval)
+ ioc_err(mrioc, "failed to issue event notification %d\n",
+ retval);
+ return retval;
+}
+
+/**
* mpi3mr_init_ioc - Initialize the controller
* @mrioc: Adapter instance reference
- * @init_type: Flag to indicate is the init_type
*
* This the controller initialization routine, executed either
* after soft reset or from pci probe callback.
@@ -3204,227 +3645,240 @@ out_failed:
*
* Return: 0 on success and non-zero on failure.
*/
-int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc, u8 init_type)
+int mpi3mr_init_ioc(struct mpi3mr_ioc *mrioc)
{
int retval = 0;
- enum mpi3mr_iocstate ioc_state;
- u64 base_info;
- u32 timeout;
- u32 ioc_status, ioc_config, i;
+ u8 retry = 0;
struct mpi3_ioc_facts_data facts_data;
- mrioc->irqpoll_sleep = MPI3MR_IRQ_POLL_SLEEP;
- mrioc->change_count = 0;
- if (init_type == MPI3MR_IT_INIT) {
- mrioc->cpu_count = num_online_cpus();
- retval = mpi3mr_setup_resources(mrioc);
- if (retval) {
- ioc_err(mrioc, "Failed to setup resources:error %d\n",
- retval);
- goto out_nocleanup;
- }
+retry_init:
+ retval = mpi3mr_bring_ioc_ready(mrioc);
+ if (retval) {
+ ioc_err(mrioc, "Failed to bring ioc ready: error %d\n",
+ retval);
+ goto out_failed_noretry;
}
- ioc_status = readl(&mrioc->sysif_regs->ioc_status);
- ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
+ retval = mpi3mr_setup_isr(mrioc, 1);
+ if (retval) {
+ ioc_err(mrioc, "Failed to setup ISR error %d\n",
+ retval);
+ goto out_failed_noretry;
+ }
- ioc_info(mrioc, "SOD status %x configuration %x\n",
- ioc_status, ioc_config);
+ retval = mpi3mr_issue_iocfacts(mrioc, &facts_data);
+ if (retval) {
+ ioc_err(mrioc, "Failed to Issue IOC Facts %d\n",
+ retval);
+ goto out_failed;
+ }
- base_info = lo_hi_readq(&mrioc->sysif_regs->ioc_information);
- ioc_info(mrioc, "SOD base_info %llx\n", base_info);
+ mrioc->max_host_ios = mrioc->facts.max_reqs - MPI3MR_INTERNAL_CMDS_RESVD;
- /*The timeout value is in 2sec unit, changing it to seconds*/
- mrioc->ready_timeout =
- ((base_info & MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_MASK) >>
- MPI3_SYSIF_IOC_INFO_LOW_TIMEOUT_SHIFT) * 2;
+ if (reset_devices)
+ mrioc->max_host_ios = min_t(int, mrioc->max_host_ios,
+ MPI3MR_HOST_IOS_KDUMP);
- ioc_info(mrioc, "IOC ready timeout %d\n", mrioc->ready_timeout);
+ mrioc->reply_sz = mrioc->facts.reply_sz;
- ioc_state = mpi3mr_get_iocstate(mrioc);
- ioc_info(mrioc, "IOC in %s state during detection\n",
- mpi3mr_iocstate_name(ioc_state));
+ retval = mpi3mr_check_reset_dma_mask(mrioc);
+ if (retval) {
+ ioc_err(mrioc, "Resetting dma mask failed %d\n",
+ retval);
+ goto out_failed_noretry;
+ }
- if (ioc_state == MRIOC_STATE_BECOMING_READY ||
- ioc_state == MRIOC_STATE_RESET_REQUESTED) {
- timeout = mrioc->ready_timeout * 10;
- do {
- msleep(100);
- } while (--timeout);
+ mpi3mr_print_ioc_info(mrioc);
- ioc_state = mpi3mr_get_iocstate(mrioc);
- ioc_info(mrioc,
- "IOC in %s state after waiting for reset time\n",
- mpi3mr_iocstate_name(ioc_state));
+ retval = mpi3mr_alloc_reply_sense_bufs(mrioc);
+ if (retval) {
+ ioc_err(mrioc,
+ "%s :Failed to allocated reply sense buffers %d\n",
+ __func__, retval);
+ goto out_failed_noretry;
}
- if (ioc_state == MRIOC_STATE_READY) {
- retval = mpi3mr_issue_and_process_mur(mrioc,
- MPI3MR_RESET_FROM_BRINGUP);
- if (retval) {
- ioc_err(mrioc, "Failed to MU reset IOC error %d\n",
- retval);
- }
- ioc_state = mpi3mr_get_iocstate(mrioc);
+ retval = mpi3mr_alloc_chain_bufs(mrioc);
+ if (retval) {
+ ioc_err(mrioc, "Failed to allocated chain buffers %d\n",
+ retval);
+ goto out_failed_noretry;
}
- if (ioc_state != MRIOC_STATE_RESET) {
- mpi3mr_print_fault_info(mrioc);
- retval = mpi3mr_issue_reset(mrioc,
- MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET,
- MPI3MR_RESET_FROM_BRINGUP);
- if (retval) {
- ioc_err(mrioc,
- "%s :Failed to soft reset IOC error %d\n",
- __func__, retval);
- goto out_failed;
- }
+
+ retval = mpi3mr_issue_iocinit(mrioc);
+ if (retval) {
+ ioc_err(mrioc, "Failed to Issue IOC Init %d\n",
+ retval);
+ goto out_failed;
}
- ioc_state = mpi3mr_get_iocstate(mrioc);
- if (ioc_state != MRIOC_STATE_RESET) {
- retval = -1;
- ioc_err(mrioc, "Cannot bring IOC to reset state\n");
+
+ retval = mpi3mr_print_pkg_ver(mrioc);
+ if (retval) {
+ ioc_err(mrioc, "failed to get package version\n");
goto out_failed;
}
- retval = mpi3mr_setup_admin_qpair(mrioc);
+ retval = mpi3mr_setup_isr(mrioc, 0);
if (retval) {
- ioc_err(mrioc, "Failed to setup admin Qs: error %d\n",
+ ioc_err(mrioc, "Failed to re-setup ISR, error %d\n",
+ retval);
+ goto out_failed_noretry;
+ }
+
+ retval = mpi3mr_create_op_queues(mrioc);
+ if (retval) {
+ ioc_err(mrioc, "Failed to create OpQueues error %d\n",
retval);
goto out_failed;
}
- retval = mpi3mr_bring_ioc_ready(mrioc);
+ retval = mpi3mr_enable_events(mrioc);
if (retval) {
- ioc_err(mrioc, "Failed to bring ioc ready: error %d\n",
+ ioc_err(mrioc, "failed to enable events %d\n",
retval);
goto out_failed;
}
- if (init_type != MPI3MR_IT_RESET) {
+ ioc_info(mrioc, "controller initialization completed successfully\n");
+ return retval;
+out_failed:
+ if (retry < 2) {
+ retry++;
+ ioc_warn(mrioc, "retrying controller initialization, retry_count:%d\n",
+ retry);
+ mpi3mr_memset_buffers(mrioc);
+ goto retry_init;
+ }
+out_failed_noretry:
+ ioc_err(mrioc, "controller initialization failed\n");
+ mpi3mr_issue_reset(mrioc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
+ MPI3MR_RESET_FROM_CTLR_CLEANUP);
+ mrioc->unrecoverable = 1;
+ return retval;
+}
+
+/**
+ * mpi3mr_reinit_ioc - Re-Initialize the controller
+ * @mrioc: Adapter instance reference
+ * @is_resume: Called from resume or reset path
+ *
+ * This the controller re-initialization routine, executed from
+ * the soft reset handler or resume callback. Creates
+ * operational reply queue pairs, allocate required memory for
+ * reply pool, sense buffer pool, issue IOC init request to the
+ * firmware, unmask the events and issue port enable to discover
+ * SAS/SATA/NVMe devices and RAID volumes.
+ *
+ * Return: 0 on success and non-zero on failure.
+ */
+int mpi3mr_reinit_ioc(struct mpi3mr_ioc *mrioc, u8 is_resume)
+{
+ int retval = 0;
+ u8 retry = 0;
+ struct mpi3_ioc_facts_data facts_data;
+
+retry_init:
+ dprint_reset(mrioc, "bringing up the controller to ready state\n");
+ retval = mpi3mr_bring_ioc_ready(mrioc);
+ if (retval) {
+ ioc_err(mrioc, "failed to bring to ready state\n");
+ goto out_failed_noretry;
+ }
+
+ if (is_resume) {
+ dprint_reset(mrioc, "setting up single ISR\n");
retval = mpi3mr_setup_isr(mrioc, 1);
if (retval) {
- ioc_err(mrioc, "Failed to setup ISR error %d\n",
- retval);
- goto out_failed;
+ ioc_err(mrioc, "failed to setup ISR\n");
+ goto out_failed_noretry;
}
} else
mpi3mr_ioc_enable_intr(mrioc);
+ dprint_reset(mrioc, "getting ioc_facts\n");
retval = mpi3mr_issue_iocfacts(mrioc, &facts_data);
if (retval) {
- ioc_err(mrioc, "Failed to Issue IOC Facts %d\n",
- retval);
+ ioc_err(mrioc, "failed to get ioc_facts\n");
goto out_failed;
}
- mpi3mr_process_factsdata(mrioc, &facts_data);
- if (init_type == MPI3MR_IT_INIT) {
- retval = mpi3mr_check_reset_dma_mask(mrioc);
- if (retval) {
- ioc_err(mrioc, "Resetting dma mask failed %d\n",
- retval);
- goto out_failed;
- }
+ dprint_reset(mrioc, "validating ioc_facts\n");
+ retval = mpi3mr_revalidate_factsdata(mrioc);
+ if (retval) {
+ ioc_err(mrioc, "failed to revalidate ioc_facts data\n");
+ goto out_failed_noretry;
}
mpi3mr_print_ioc_info(mrioc);
- retval = mpi3mr_alloc_reply_sense_bufs(mrioc);
+ dprint_reset(mrioc, "sending ioc_init\n");
+ retval = mpi3mr_issue_iocinit(mrioc);
if (retval) {
- ioc_err(mrioc,
- "%s :Failed to allocated reply sense buffers %d\n",
- __func__, retval);
+ ioc_err(mrioc, "failed to send ioc_init\n");
goto out_failed;
}
- if (init_type == MPI3MR_IT_INIT) {
- retval = mpi3mr_alloc_chain_bufs(mrioc);
- if (retval) {
- ioc_err(mrioc, "Failed to allocated chain buffers %d\n",
- retval);
- goto out_failed;
- }
- }
-
- retval = mpi3mr_issue_iocinit(mrioc);
+ dprint_reset(mrioc, "getting package version\n");
+ retval = mpi3mr_print_pkg_ver(mrioc);
if (retval) {
- ioc_err(mrioc, "Failed to Issue IOC Init %d\n",
- retval);
+ ioc_err(mrioc, "failed to get package version\n");
goto out_failed;
}
- mrioc->reply_free_queue_host_index = mrioc->num_reply_bufs;
- writel(mrioc->reply_free_queue_host_index,
- &mrioc->sysif_regs->reply_free_host_index);
- mrioc->sbq_host_index = mrioc->num_sense_bufs;
- writel(mrioc->sbq_host_index,
- &mrioc->sysif_regs->sense_buffer_free_host_index);
-
- if (init_type != MPI3MR_IT_RESET) {
+ if (is_resume) {
+ dprint_reset(mrioc, "setting up multiple ISR\n");
retval = mpi3mr_setup_isr(mrioc, 0);
if (retval) {
- ioc_err(mrioc, "Failed to re-setup ISR, error %d\n",
- retval);
- goto out_failed;
+ ioc_err(mrioc, "failed to re-setup ISR\n");
+ goto out_failed_noretry;
}
}
+ dprint_reset(mrioc, "creating operational queue pairs\n");
retval = mpi3mr_create_op_queues(mrioc);
if (retval) {
- ioc_err(mrioc, "Failed to create OpQueues error %d\n",
- retval);
+ ioc_err(mrioc, "failed to create operational queue pairs\n");
goto out_failed;
}
- if ((init_type != MPI3MR_IT_INIT) &&
- (mrioc->shost->nr_hw_queues > mrioc->num_op_reply_q)) {
- retval = -1;
+ if (mrioc->shost->nr_hw_queues > mrioc->num_op_reply_q) {
ioc_err(mrioc,
- "Cannot create minimum number of OpQueues expected:%d created:%d\n",
+ "cannot create minimum number of operational queues expected:%d created:%d\n",
mrioc->shost->nr_hw_queues, mrioc->num_op_reply_q);
- goto out_failed;
+ goto out_failed_noretry;
}
- for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
- mrioc->event_masks[i] = -1;
-
- mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_ADDED);
- mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_INFO_CHANGED);
- mpi3mr_unmask_events(mrioc, MPI3_EVENT_DEVICE_STATUS_CHANGE);
- mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE);
- mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
- mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_DISCOVERY);
- mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR);
- mpi3mr_unmask_events(mrioc, MPI3_EVENT_SAS_BROADCAST_PRIMITIVE);
- mpi3mr_unmask_events(mrioc, MPI3_EVENT_PCIE_TOPOLOGY_CHANGE_LIST);
- mpi3mr_unmask_events(mrioc, MPI3_EVENT_PCIE_ENUMERATION);
- mpi3mr_unmask_events(mrioc, MPI3_EVENT_CABLE_MGMT);
- mpi3mr_unmask_events(mrioc, MPI3_EVENT_ENERGY_PACK_CHANGE);
-
- retval = mpi3mr_issue_event_notification(mrioc);
+ dprint_reset(mrioc, "enabling events\n");
+ retval = mpi3mr_enable_events(mrioc);
if (retval) {
- ioc_err(mrioc, "Failed to issue event notification %d\n",
- retval);
+ ioc_err(mrioc, "failed to enable events\n");
goto out_failed;
}
- if (init_type != MPI3MR_IT_INIT) {
- ioc_info(mrioc, "Issuing Port Enable\n");
- retval = mpi3mr_issue_port_enable(mrioc, 0);
- if (retval) {
- ioc_err(mrioc, "Failed to issue port enable %d\n",
- retval);
- goto out_failed;
- }
+ ioc_info(mrioc, "sending port enable\n");
+ retval = mpi3mr_issue_port_enable(mrioc, 0);
+ if (retval) {
+ ioc_err(mrioc, "failed to issue port enable\n");
+ goto out_failed;
}
- return retval;
+ ioc_info(mrioc, "controller %s completed successfully\n",
+ (is_resume)?"resume":"re-initialization");
+ return retval;
out_failed:
- if (init_type == MPI3MR_IT_INIT)
- mpi3mr_cleanup_ioc(mrioc, MPI3MR_COMPLETE_CLEANUP);
- else
- mpi3mr_cleanup_ioc(mrioc, MPI3MR_REINIT_FAILURE);
-out_nocleanup:
+ if (retry < 2) {
+ retry++;
+ ioc_warn(mrioc, "retrying controller %s, retry_count:%d\n",
+ (is_resume)?"resume":"re-initialization", retry);
+ mpi3mr_memset_buffers(mrioc);
+ goto retry_init;
+ }
+out_failed_noretry:
+ ioc_err(mrioc, "controller %s is failed\n",
+ (is_resume)?"resume":"re-initialization");
+ mpi3mr_issue_reset(mrioc, MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT,
+ MPI3MR_RESET_FROM_CTLR_CLEANUP);
+ mrioc->unrecoverable = 1;
return retval;
}
@@ -3488,17 +3942,29 @@ void mpi3mr_memset_buffers(struct mpi3mr_ioc *mrioc)
{
u16 i;
- memset(mrioc->admin_req_base, 0, mrioc->admin_req_q_sz);
- memset(mrioc->admin_reply_base, 0, mrioc->admin_reply_q_sz);
-
- memset(mrioc->init_cmds.reply, 0, sizeof(*mrioc->init_cmds.reply));
- memset(mrioc->host_tm_cmds.reply, 0,
- sizeof(*mrioc->host_tm_cmds.reply));
- for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++)
- memset(mrioc->dev_rmhs_cmds[i].reply, 0,
- sizeof(*mrioc->dev_rmhs_cmds[i].reply));
- memset(mrioc->removepend_bitmap, 0, mrioc->dev_handle_bitmap_sz);
- memset(mrioc->devrem_bitmap, 0, mrioc->devrem_bitmap_sz);
+ mrioc->change_count = 0;
+ mrioc->active_poll_qcount = 0;
+ mrioc->default_qcount = 0;
+ if (mrioc->admin_req_base)
+ memset(mrioc->admin_req_base, 0, mrioc->admin_req_q_sz);
+ if (mrioc->admin_reply_base)
+ memset(mrioc->admin_reply_base, 0, mrioc->admin_reply_q_sz);
+
+ if (mrioc->init_cmds.reply) {
+ memset(mrioc->init_cmds.reply, 0, sizeof(*mrioc->init_cmds.reply));
+ memset(mrioc->host_tm_cmds.reply, 0,
+ sizeof(*mrioc->host_tm_cmds.reply));
+ for (i = 0; i < MPI3MR_NUM_DEVRMCMD; i++)
+ memset(mrioc->dev_rmhs_cmds[i].reply, 0,
+ sizeof(*mrioc->dev_rmhs_cmds[i].reply));
+ for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++)
+ memset(mrioc->evtack_cmds[i].reply, 0,
+ sizeof(*mrioc->evtack_cmds[i].reply));
+ memset(mrioc->removepend_bitmap, 0, mrioc->dev_handle_bitmap_sz);
+ memset(mrioc->devrem_bitmap, 0, mrioc->devrem_bitmap_sz);
+ memset(mrioc->evtack_cmds_bitmap, 0,
+ mrioc->evtack_cmds_bitmap_sz);
+ }
for (i = 0; i < mrioc->num_queues; i++) {
mrioc->op_reply_qinfo[i].qid = 0;
@@ -3527,7 +3993,7 @@ void mpi3mr_memset_buffers(struct mpi3mr_ioc *mrioc)
*
* Return: Nothing.
*/
-static void mpi3mr_free_mem(struct mpi3mr_ioc *mrioc)
+void mpi3mr_free_mem(struct mpi3mr_ioc *mrioc)
{
u16 i;
struct mpi3mr_intr_info *intr_info;
@@ -3591,12 +4057,20 @@ static void mpi3mr_free_mem(struct mpi3mr_ioc *mrioc)
kfree(mrioc->host_tm_cmds.reply);
mrioc->host_tm_cmds.reply = NULL;
+ for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) {
+ kfree(mrioc->evtack_cmds[i].reply);
+ mrioc->evtack_cmds[i].reply = NULL;
+ }
+
kfree(mrioc->removepend_bitmap);
mrioc->removepend_bitmap = NULL;
kfree(mrioc->devrem_bitmap);
mrioc->devrem_bitmap = NULL;
+ kfree(mrioc->evtack_cmds_bitmap);
+ mrioc->evtack_cmds_bitmap = NULL;
+
kfree(mrioc->chain_bitmap);
mrioc->chain_bitmap = NULL;
@@ -3663,7 +4137,7 @@ static void mpi3mr_issue_ioc_shutdown(struct mpi3mr_ioc *mrioc)
ioc_config = readl(&mrioc->sysif_regs->ioc_configuration);
ioc_config |= MPI3_SYSIF_IOC_CONFIG_SHUTDOWN_NORMAL;
- ioc_config |= MPI3_SYSIF_IOC_CONFIG_DEVICE_SHUTDOWN;
+ ioc_config |= MPI3_SYSIF_IOC_CONFIG_DEVICE_SHUTDOWN_SEND_REQ;
writel(ioc_config, &mrioc->sysif_regs->ioc_configuration);
@@ -3699,21 +4173,17 @@ static void mpi3mr_issue_ioc_shutdown(struct mpi3mr_ioc *mrioc)
/**
* mpi3mr_cleanup_ioc - Cleanup controller
* @mrioc: Adapter instance reference
- * @reason: Cleanup reason
*
* controller cleanup handler, Message unit reset or soft reset
- * and shutdown notification is issued to the controller and the
- * associated memory resources are freed.
+ * and shutdown notification is issued to the controller.
*
* Return: Nothing.
*/
-void mpi3mr_cleanup_ioc(struct mpi3mr_ioc *mrioc, u8 reason)
+void mpi3mr_cleanup_ioc(struct mpi3mr_ioc *mrioc)
{
enum mpi3mr_iocstate ioc_state;
- if (reason == MPI3MR_COMPLETE_CLEANUP)
- mpi3mr_stop_watchdog(mrioc);
-
+ dprint_exit(mrioc, "cleaning up the controller\n");
mpi3mr_ioc_disable_intr(mrioc);
ioc_state = mpi3mr_get_iocstate(mrioc);
@@ -3725,15 +4195,9 @@ void mpi3mr_cleanup_ioc(struct mpi3mr_ioc *mrioc, u8 reason)
mpi3mr_issue_reset(mrioc,
MPI3_SYSIF_HOST_DIAG_RESET_ACTION_SOFT_RESET,
MPI3MR_RESET_FROM_MUR_FAILURE);
-
- if (reason != MPI3MR_REINIT_FAILURE)
- mpi3mr_issue_ioc_shutdown(mrioc);
- }
-
- if (reason == MPI3MR_COMPLETE_CLEANUP) {
- mpi3mr_free_mem(mrioc);
- mpi3mr_cleanup_resources(mrioc);
+ mpi3mr_issue_ioc_shutdown(mrioc);
}
+ dprint_exit(mrioc, "controller cleanup completed\n");
}
/**
@@ -3782,41 +4246,11 @@ static void mpi3mr_flush_drv_cmds(struct mpi3mr_ioc *mrioc)
cmdptr = &mrioc->dev_rmhs_cmds[i];
mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
}
-}
-/**
- * mpi3mr_diagfault_reset_handler - Diag fault reset handler
- * @mrioc: Adapter instance reference
- * @reset_reason: Reset reason code
- *
- * This is an handler for issuing diag fault reset from the
- * applications through IOCTL path to stop the execution of the
- * controller
- *
- * Return: 0 on success, non-zero on failure.
- */
-int mpi3mr_diagfault_reset_handler(struct mpi3mr_ioc *mrioc,
- u32 reset_reason)
-{
- int retval = 0;
-
- ioc_info(mrioc, "Entry: reason code: %s\n",
- mpi3mr_reset_rc_name(reset_reason));
- mrioc->reset_in_progress = 1;
-
- mpi3mr_ioc_disable_intr(mrioc);
-
- retval = mpi3mr_issue_reset(mrioc,
- MPI3_SYSIF_HOST_DIAG_RESET_ACTION_DIAG_FAULT, reset_reason);
-
- if (retval) {
- ioc_err(mrioc, "The diag fault reset failed: reason %d\n",
- reset_reason);
- mpi3mr_ioc_enable_intr(mrioc);
+ for (i = 0; i < MPI3MR_NUM_EVTACKCMD; i++) {
+ cmdptr = &mrioc->evtack_cmds[i];
+ mpi3mr_drv_cmd_comp_reset(mrioc, cmdptr);
}
- ioc_info(mrioc, "%s\n", ((retval == 0) ? "SUCCESS" : "FAILED"));
- mrioc->reset_in_progress = 0;
- return retval;
}
/**
@@ -3847,34 +4281,44 @@ int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc,
unsigned long flags;
u32 host_diagnostic, timeout = MPI3_SYSIF_DIAG_SAVE_TIMEOUT * 10;
- if (mrioc->fault_dbg) {
- if (snapdump)
- mpi3mr_set_diagsave(mrioc);
- mpi3mr_kill_ioc(mrioc, reset_reason);
- }
-
+ /* Block the reset handler until diag save in progress*/
+ dprint_reset(mrioc,
+ "soft_reset_handler: check and block on diagsave_timeout(%d)\n",
+ mrioc->diagsave_timeout);
+ while (mrioc->diagsave_timeout)
+ ssleep(1);
/*
* Block new resets until the currently executing one is finished and
* return the status of the existing reset for all blocked resets
*/
+ dprint_reset(mrioc, "soft_reset_handler: acquiring reset_mutex\n");
if (!mutex_trylock(&mrioc->reset_mutex)) {
- ioc_info(mrioc, "Another reset in progress\n");
- return -1;
+ ioc_info(mrioc,
+ "controller reset triggered by %s is blocked due to another reset in progress\n",
+ mpi3mr_reset_rc_name(reset_reason));
+ do {
+ ssleep(1);
+ } while (mrioc->reset_in_progress == 1);
+ ioc_info(mrioc,
+ "returning previous reset result(%d) for the reset triggered by %s\n",
+ mrioc->prev_reset_result,
+ mpi3mr_reset_rc_name(reset_reason));
+ return mrioc->prev_reset_result;
}
+ ioc_info(mrioc, "controller reset is triggered by %s\n",
+ mpi3mr_reset_rc_name(reset_reason));
+
mrioc->reset_in_progress = 1;
+ mrioc->prev_reset_result = -1;
if ((!snapdump) && (reset_reason != MPI3MR_RESET_FROM_FAULT_WATCH) &&
+ (reset_reason != MPI3MR_RESET_FROM_FIRMWARE) &&
(reset_reason != MPI3MR_RESET_FROM_CIACTIV_FAULT)) {
for (i = 0; i < MPI3_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
mrioc->event_masks[i] = -1;
- retval = mpi3mr_issue_event_notification(mrioc);
-
- if (retval) {
- ioc_err(mrioc,
- "Failed to turn off events prior to reset %d\n",
- retval);
- }
+ dprint_reset(mrioc, "soft_reset_handler: masking events\n");
+ mpi3mr_issue_event_notification(mrioc);
}
mpi3mr_wait_for_host_io(mrioc, MPI3MR_RESET_HOST_IOWAIT_TIMEOUT);
@@ -3904,15 +4348,20 @@ int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc,
goto out;
}
- mpi3mr_flush_delayed_rmhs_list(mrioc);
+ mpi3mr_flush_delayed_cmd_lists(mrioc);
mpi3mr_flush_drv_cmds(mrioc);
memset(mrioc->devrem_bitmap, 0, mrioc->devrem_bitmap_sz);
memset(mrioc->removepend_bitmap, 0, mrioc->dev_handle_bitmap_sz);
+ memset(mrioc->evtack_cmds_bitmap, 0, mrioc->evtack_cmds_bitmap_sz);
mpi3mr_cleanup_fwevt_list(mrioc);
mpi3mr_flush_host_io(mrioc);
mpi3mr_invalidate_devhandles(mrioc);
+ if (mrioc->prepare_for_reset) {
+ mrioc->prepare_for_reset = 0;
+ mrioc->prepare_for_reset_timeout_counter = 0;
+ }
mpi3mr_memset_buffers(mrioc);
- retval = mpi3mr_init_ioc(mrioc, MPI3MR_IT_RESET);
+ retval = mpi3mr_reinit_ioc(mrioc, 0);
if (retval) {
pr_err(IOCNAME "reinit after soft reset failed: reason %d\n",
mrioc->name, reset_reason);
@@ -3922,8 +4371,8 @@ int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc,
out:
if (!retval) {
+ mrioc->diagsave_timeout = 0;
mrioc->reset_in_progress = 0;
- scsi_unblock_requests(mrioc->shost);
mpi3mr_rfresh_tgtdevs(mrioc);
mrioc->ts_update_counter = 0;
spin_lock_irqsave(&mrioc->watchdog_lock, flags);
@@ -3939,8 +4388,9 @@ out:
mrioc->reset_in_progress = 0;
retval = -1;
}
-
+ mrioc->prev_reset_result = retval;
mutex_unlock(&mrioc->reset_mutex);
- ioc_info(mrioc, "%s\n", ((retval == 0) ? "SUCCESS" : "FAILED"));
+ ioc_info(mrioc, "controller reset is %s\n",
+ ((retval == 0) ? "successful" : "failed"));
return retval;
}
diff --git a/drivers/scsi/mpi3mr/mpi3mr_os.c b/drivers/scsi/mpi3mr/mpi3mr_os.c
index fe10f257b5a4..284117da9086 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_os.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_os.c
@@ -34,6 +34,9 @@ MODULE_PARM_DESC(logging_level,
" bits for enabling additional logging info (default=0)");
/* Forward declarations*/
+static void mpi3mr_send_event_ack(struct mpi3mr_ioc *mrioc, u8 event,
+ struct mpi3mr_drv_cmd *cmdparam, u32 event_ctx);
+
/**
* mpi3mr_host_tag_for_scmd - Get host tag for a scmd
* @mrioc: Adapter instance reference
@@ -418,6 +421,74 @@ out:
}
/**
+ * mpi3mr_count_dev_pending - Count commands pending for a lun
+ * @rq: Block request
+ * @data: SCSI device reference
+ * @reserved: Unused
+ *
+ * This is an iterator function called for each SCSI command in
+ * a host and if the command is pending in the LLD for the
+ * specific device(lun) then device specific pending I/O counter
+ * is updated in the device structure.
+ *
+ * Return: true always.
+ */
+
+static bool mpi3mr_count_dev_pending(struct request *rq,
+ void *data, bool reserved)
+{
+ struct scsi_device *sdev = (struct scsi_device *)data;
+ struct mpi3mr_sdev_priv_data *sdev_priv_data = sdev->hostdata;
+ struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
+ struct scmd_priv *priv;
+
+ if (scmd) {
+ priv = scsi_cmd_priv(scmd);
+ if (!priv->in_lld_scope)
+ goto out;
+ if (scmd->device == sdev)
+ sdev_priv_data->pend_count++;
+ }
+
+out:
+ return true;
+}
+
+/**
+ * mpi3mr_count_tgt_pending - Count commands pending for target
+ * @rq: Block request
+ * @data: SCSI target reference
+ * @reserved: Unused
+ *
+ * This is an iterator function called for each SCSI command in
+ * a host and if the command is pending in the LLD for the
+ * specific target then target specific pending I/O counter is
+ * updated in the target structure.
+ *
+ * Return: true always.
+ */
+
+static bool mpi3mr_count_tgt_pending(struct request *rq,
+ void *data, bool reserved)
+{
+ struct scsi_target *starget = (struct scsi_target *)data;
+ struct mpi3mr_stgt_priv_data *stgt_priv_data = starget->hostdata;
+ struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
+ struct scmd_priv *priv;
+
+ if (scmd) {
+ priv = scsi_cmd_priv(scmd);
+ if (!priv->in_lld_scope)
+ goto out;
+ if (scmd->device && (scsi_target(scmd->device) == starget))
+ stgt_priv_data->pend_count++;
+ }
+
+out:
+ return true;
+}
+
+/**
* mpi3mr_flush_host_io - Flush host I/Os
* @mrioc: Adapter instance reference
*
@@ -742,11 +813,18 @@ mpi3mr_update_sdev(struct scsi_device *sdev, void *data)
switch (tgtdev->dev_type) {
case MPI3_DEVICE_DEVFORM_PCIE:
/*The block layer hw sector size = 512*/
- blk_queue_max_hw_sectors(sdev->request_queue,
- tgtdev->dev_spec.pcie_inf.mdts / 512);
- blk_queue_virt_boundary(sdev->request_queue,
- ((1 << tgtdev->dev_spec.pcie_inf.pgsz) - 1));
-
+ if ((tgtdev->dev_spec.pcie_inf.dev_info &
+ MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) ==
+ MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) {
+ blk_queue_max_hw_sectors(sdev->request_queue,
+ tgtdev->dev_spec.pcie_inf.mdts / 512);
+ if (tgtdev->dev_spec.pcie_inf.pgsz == 0)
+ blk_queue_virt_boundary(sdev->request_queue,
+ ((1 << MPI3MR_DEFAULT_PGSZEXP) - 1));
+ else
+ blk_queue_virt_boundary(sdev->request_queue,
+ ((1 << tgtdev->dev_spec.pcie_inf.pgsz) - 1));
+ }
break;
default:
break;
@@ -824,6 +902,17 @@ static void mpi3mr_update_tgtdev(struct mpi3mr_ioc *mrioc,
scsi_tgt_priv_data->dev_type = tgtdev->dev_type;
}
+ switch (dev_pg0->access_status) {
+ case MPI3_DEVICE0_ASTATUS_NO_ERRORS:
+ case MPI3_DEVICE0_ASTATUS_PREPARE:
+ case MPI3_DEVICE0_ASTATUS_NEEDS_INITIALIZATION:
+ case MPI3_DEVICE0_ASTATUS_DEVICE_MISSING_DELAY:
+ break;
+ default:
+ tgtdev->is_hidden = 1;
+ break;
+ }
+
switch (tgtdev->dev_type) {
case MPI3_DEVICE_DEVFORM_SAS_SATA:
{
@@ -848,6 +937,7 @@ static void mpi3mr_update_tgtdev(struct mpi3mr_ioc *mrioc,
&dev_pg0->device_specific.pcie_format;
u16 dev_info = le16_to_cpu(pcieinf->device_info);
+ tgtdev->dev_spec.pcie_inf.dev_info = dev_info;
tgtdev->dev_spec.pcie_inf.capb =
le32_to_cpu(pcieinf->capabilities);
tgtdev->dev_spec.pcie_inf.mdts = MPI3MR_DEFAULT_MDTS;
@@ -858,14 +948,18 @@ static void mpi3mr_update_tgtdev(struct mpi3mr_ioc *mrioc,
le32_to_cpu(pcieinf->maximum_data_transfer_size);
tgtdev->dev_spec.pcie_inf.pgsz = pcieinf->page_size;
tgtdev->dev_spec.pcie_inf.reset_to =
- pcieinf->controller_reset_to;
+ max_t(u8, pcieinf->controller_reset_to,
+ MPI3MR_INTADMCMD_TIMEOUT);
tgtdev->dev_spec.pcie_inf.abort_to =
- pcieinf->nv_me_abort_to;
+ max_t(u8, pcieinf->nvme_abort_to,
+ MPI3MR_INTADMCMD_TIMEOUT);
}
if (tgtdev->dev_spec.pcie_inf.mdts > (1024 * 1024))
tgtdev->dev_spec.pcie_inf.mdts = (1024 * 1024);
- if ((dev_info & MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) !=
- MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE)
+ if (((dev_info & MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) !=
+ MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) &&
+ ((dev_info & MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) !=
+ MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_SCSI_DEVICE))
tgtdev->is_hidden = 1;
if (!mrioc->shost)
break;
@@ -1313,7 +1407,7 @@ static void mpi3mr_fwevt_bh(struct mpi3mr_ioc *mrioc,
evt_ack:
if (fwevt->send_ack)
- mpi3mr_send_event_ack(mrioc, fwevt->event_id,
+ mpi3mr_process_event_ack(mrioc, fwevt->event_id,
fwevt->evt_ctx);
out:
/* Put fwevt reference count to neutralize kref_init increment */
@@ -1377,24 +1471,33 @@ static int mpi3mr_create_tgtdev(struct mpi3mr_ioc *mrioc,
}
/**
- * mpi3mr_flush_delayed_rmhs_list - Flush pending commands
+ * mpi3mr_flush_delayed_cmd_lists - Flush pending commands
* @mrioc: Adapter instance reference
*
- * Flush pending commands in the delayed removal handshake list
- * due to a controller reset or driver removal as a cleanup.
+ * Flush pending commands in the delayed lists due to a
+ * controller reset or driver removal as a cleanup.
*
* Return: Nothing
*/
-void mpi3mr_flush_delayed_rmhs_list(struct mpi3mr_ioc *mrioc)
+void mpi3mr_flush_delayed_cmd_lists(struct mpi3mr_ioc *mrioc)
{
struct delayed_dev_rmhs_node *_rmhs_node;
+ struct delayed_evt_ack_node *_evtack_node;
+ dprint_reset(mrioc, "flushing delayed dev_remove_hs commands\n");
while (!list_empty(&mrioc->delayed_rmhs_list)) {
_rmhs_node = list_entry(mrioc->delayed_rmhs_list.next,
struct delayed_dev_rmhs_node, list);
list_del(&_rmhs_node->list);
kfree(_rmhs_node);
}
+ dprint_reset(mrioc, "flushing delayed event ack commands\n");
+ while (!list_empty(&mrioc->delayed_evtack_cmds_list)) {
+ _evtack_node = list_entry(mrioc->delayed_evtack_cmds_list.next,
+ struct delayed_evt_ack_node, list);
+ list_del(&_evtack_node->list);
+ kfree(_evtack_node);
+ }
}
/**
@@ -1611,6 +1714,141 @@ out_failed:
}
/**
+ * mpi3mr_complete_evt_ack - event ack request completion
+ * @mrioc: Adapter instance reference
+ * @drv_cmd: Internal command tracker
+ *
+ * This is the completion handler for non blocking event
+ * acknowledgment sent to the firmware and this will issue any
+ * pending event acknowledgment request.
+ *
+ * Return: Nothing
+ */
+static void mpi3mr_complete_evt_ack(struct mpi3mr_ioc *mrioc,
+ struct mpi3mr_drv_cmd *drv_cmd)
+{
+ u16 cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN;
+ struct delayed_evt_ack_node *delayed_evtack = NULL;
+
+ if (drv_cmd->ioc_status != MPI3_IOCSTATUS_SUCCESS) {
+ dprint_event_th(mrioc,
+ "immediate event ack failed with ioc_status(0x%04x) log_info(0x%08x)\n",
+ (drv_cmd->ioc_status & MPI3_IOCSTATUS_STATUS_MASK),
+ drv_cmd->ioc_loginfo);
+ }
+
+ if (!list_empty(&mrioc->delayed_evtack_cmds_list)) {
+ delayed_evtack =
+ list_entry(mrioc->delayed_evtack_cmds_list.next,
+ struct delayed_evt_ack_node, list);
+ mpi3mr_send_event_ack(mrioc, delayed_evtack->event, drv_cmd,
+ delayed_evtack->event_ctx);
+ list_del(&delayed_evtack->list);
+ kfree(delayed_evtack);
+ return;
+ }
+ drv_cmd->state = MPI3MR_CMD_NOTUSED;
+ drv_cmd->callback = NULL;
+ clear_bit(cmd_idx, mrioc->evtack_cmds_bitmap);
+}
+
+/**
+ * mpi3mr_send_event_ack - Issue event acknwoledgment request
+ * @mrioc: Adapter instance reference
+ * @event: MPI3 event id
+ * @cmdparam: Internal command tracker
+ * @event_ctx: event context
+ *
+ * Issues event acknowledgment request to the firmware if there
+ * is a free command to send the event ack else it to a pend
+ * list so that it will be processed on a completion of a prior
+ * event acknowledgment .
+ *
+ * Return: Nothing
+ */
+static void mpi3mr_send_event_ack(struct mpi3mr_ioc *mrioc, u8 event,
+ struct mpi3mr_drv_cmd *cmdparam, u32 event_ctx)
+{
+ struct mpi3_event_ack_request evtack_req;
+ int retval = 0;
+ u8 retrycount = 5;
+ u16 cmd_idx = MPI3MR_NUM_EVTACKCMD;
+ struct mpi3mr_drv_cmd *drv_cmd = cmdparam;
+ struct delayed_evt_ack_node *delayed_evtack = NULL;
+
+ if (drv_cmd) {
+ dprint_event_th(mrioc,
+ "sending delayed event ack in the top half for event(0x%02x), event_ctx(0x%08x)\n",
+ event, event_ctx);
+ goto issue_cmd;
+ }
+ dprint_event_th(mrioc,
+ "sending event ack in the top half for event(0x%02x), event_ctx(0x%08x)\n",
+ event, event_ctx);
+ do {
+ cmd_idx = find_first_zero_bit(mrioc->evtack_cmds_bitmap,
+ MPI3MR_NUM_EVTACKCMD);
+ if (cmd_idx < MPI3MR_NUM_EVTACKCMD) {
+ if (!test_and_set_bit(cmd_idx,
+ mrioc->evtack_cmds_bitmap))
+ break;
+ cmd_idx = MPI3MR_NUM_EVTACKCMD;
+ }
+ } while (retrycount--);
+
+ if (cmd_idx >= MPI3MR_NUM_EVTACKCMD) {
+ delayed_evtack = kzalloc(sizeof(*delayed_evtack),
+ GFP_ATOMIC);
+ if (!delayed_evtack)
+ return;
+ INIT_LIST_HEAD(&delayed_evtack->list);
+ delayed_evtack->event = event;
+ delayed_evtack->event_ctx = event_ctx;
+ list_add_tail(&delayed_evtack->list,
+ &mrioc->delayed_evtack_cmds_list);
+ dprint_event_th(mrioc,
+ "event ack in the top half for event(0x%02x), event_ctx(0x%08x) is postponed\n",
+ event, event_ctx);
+ return;
+ }
+ drv_cmd = &mrioc->evtack_cmds[cmd_idx];
+
+issue_cmd:
+ cmd_idx = drv_cmd->host_tag - MPI3MR_HOSTTAG_EVTACKCMD_MIN;
+
+ memset(&evtack_req, 0, sizeof(evtack_req));
+ if (drv_cmd->state & MPI3MR_CMD_PENDING) {
+ dprint_event_th(mrioc,
+ "sending event ack failed due to command in use\n");
+ goto out;
+ }
+ drv_cmd->state = MPI3MR_CMD_PENDING;
+ drv_cmd->is_waiting = 0;
+ drv_cmd->callback = mpi3mr_complete_evt_ack;
+ evtack_req.host_tag = cpu_to_le16(drv_cmd->host_tag);
+ evtack_req.function = MPI3_FUNCTION_EVENT_ACK;
+ evtack_req.event = event;
+ evtack_req.event_context = cpu_to_le32(event_ctx);
+ retval = mpi3mr_admin_request_post(mrioc, &evtack_req,
+ sizeof(evtack_req), 1);
+ if (retval) {
+ dprint_event_th(mrioc,
+ "posting event ack request is failed\n");
+ goto out_failed;
+ }
+
+ dprint_event_th(mrioc,
+ "event ack in the top half for event(0x%02x), event_ctx(0x%08x) is posted\n",
+ event, event_ctx);
+out:
+ return;
+out_failed:
+ drv_cmd->state = MPI3MR_CMD_NOTUSED;
+ drv_cmd->callback = NULL;
+ clear_bit(cmd_idx, mrioc->evtack_cmds_bitmap);
+}
+
+/**
* mpi3mr_pcietopochg_evt_th - PCIETopologyChange evt tophalf
* @mrioc: Adapter instance reference
* @event_reply: event data
@@ -1819,6 +2057,40 @@ out:
}
/**
+ * mpi3mr_preparereset_evt_th - Prepare for reset event tophalf
+ * @mrioc: Adapter instance reference
+ * @event_reply: event data
+ *
+ * Blocks and unblocks host level I/O based on the reason code
+ *
+ * Return: Nothing
+ */
+static void mpi3mr_preparereset_evt_th(struct mpi3mr_ioc *mrioc,
+ struct mpi3_event_notification_reply *event_reply)
+{
+ struct mpi3_event_data_prepare_for_reset *evtdata =
+ (struct mpi3_event_data_prepare_for_reset *)event_reply->event_data;
+
+ if (evtdata->reason_code == MPI3_EVENT_PREPARE_RESET_RC_START) {
+ dprint_event_th(mrioc,
+ "prepare for reset event top half with rc=start\n");
+ if (mrioc->prepare_for_reset)
+ return;
+ mrioc->prepare_for_reset = 1;
+ mrioc->prepare_for_reset_timeout_counter = 0;
+ } else if (evtdata->reason_code == MPI3_EVENT_PREPARE_RESET_RC_ABORT) {
+ dprint_event_th(mrioc,
+ "prepare for reset top half with rc=abort\n");
+ mrioc->prepare_for_reset = 0;
+ mrioc->prepare_for_reset_timeout_counter = 0;
+ }
+ if ((event_reply->msg_flags & MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_MASK)
+ == MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_REQUIRED)
+ mpi3mr_send_event_ack(mrioc, event_reply->event, NULL,
+ le32_to_cpu(event_reply->event_context));
+}
+
+/**
* mpi3mr_energypackchg_evt_th - Energy pack change evt tophalf
* @mrioc: Adapter instance reference
* @event_reply: event data
@@ -1848,6 +2120,66 @@ static void mpi3mr_energypackchg_evt_th(struct mpi3mr_ioc *mrioc,
}
/**
+ * mpi3mr_tempthreshold_evt_th - Temp threshold event tophalf
+ * @mrioc: Adapter instance reference
+ * @event_reply: event data
+ *
+ * Displays temperature threshold event details and fault code
+ * if any is hit due to temperature exceeding threshold.
+ *
+ * Return: Nothing
+ */
+static void mpi3mr_tempthreshold_evt_th(struct mpi3mr_ioc *mrioc,
+ struct mpi3_event_notification_reply *event_reply)
+{
+ struct mpi3_event_data_temp_threshold *evtdata =
+ (struct mpi3_event_data_temp_threshold *)event_reply->event_data;
+
+ ioc_err(mrioc, "Temperature threshold levels %s%s%s exceeded for sensor: %d !!! Current temperature in Celsius: %d\n",
+ (le16_to_cpu(evtdata->status) & 0x1) ? "Warning " : " ",
+ (le16_to_cpu(evtdata->status) & 0x2) ? "Critical " : " ",
+ (le16_to_cpu(evtdata->status) & 0x4) ? "Fatal " : " ", evtdata->sensor_num,
+ le16_to_cpu(evtdata->current_temperature));
+ mpi3mr_print_fault_info(mrioc);
+}
+
+/**
+ * mpi3mr_cablemgmt_evt_th - Cable management event tophalf
+ * @mrioc: Adapter instance reference
+ * @event_reply: event data
+ *
+ * Displays Cable manegemt event details.
+ *
+ * Return: Nothing
+ */
+static void mpi3mr_cablemgmt_evt_th(struct mpi3mr_ioc *mrioc,
+ struct mpi3_event_notification_reply *event_reply)
+{
+ struct mpi3_event_data_cable_management *evtdata =
+ (struct mpi3_event_data_cable_management *)event_reply->event_data;
+
+ switch (evtdata->status) {
+ case MPI3_EVENT_CABLE_MGMT_STATUS_INSUFFICIENT_POWER:
+ {
+ ioc_info(mrioc, "An active cable with receptacle_id %d cannot be powered.\n"
+ "Devices connected to this cable are not detected.\n"
+ "This cable requires %d mW of power.\n",
+ evtdata->receptacle_id,
+ le32_to_cpu(evtdata->active_cable_power_requirement));
+ break;
+ }
+ case MPI3_EVENT_CABLE_MGMT_STATUS_DEGRADED:
+ {
+ ioc_info(mrioc, "A cable with receptacle_id %d is not running at optimal speed\n",
+ evtdata->receptacle_id);
+ break;
+ }
+ default:
+ break;
+ }
+}
+
+/**
* mpi3mr_os_handle_events - Firmware event handler
* @mrioc: Adapter instance reference
* @event_reply: event data
@@ -1905,6 +2237,12 @@ void mpi3mr_os_handle_events(struct mpi3mr_ioc *mrioc,
mpi3mr_pcietopochg_evt_th(mrioc, event_reply);
break;
}
+ case MPI3_EVENT_PREPARE_FOR_RESET:
+ {
+ mpi3mr_preparereset_evt_th(mrioc, event_reply);
+ ack_req = 0;
+ break;
+ }
case MPI3_EVENT_DEVICE_INFO_CHANGED:
{
process_evt_bh = 1;
@@ -1915,9 +2253,18 @@ void mpi3mr_os_handle_events(struct mpi3mr_ioc *mrioc,
mpi3mr_energypackchg_evt_th(mrioc, event_reply);
break;
}
+ case MPI3_EVENT_TEMP_THRESHOLD:
+ {
+ mpi3mr_tempthreshold_evt_th(mrioc, event_reply);
+ break;
+ }
+ case MPI3_EVENT_CABLE_MGMT:
+ {
+ mpi3mr_cablemgmt_evt_th(mrioc, event_reply);
+ break;
+ }
case MPI3_EVENT_ENCL_DEVICE_STATUS_CHANGE:
case MPI3_EVENT_SAS_DISCOVERY:
- case MPI3_EVENT_CABLE_MGMT:
case MPI3_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
case MPI3_EVENT_SAS_BROADCAST_PRIMITIVE:
case MPI3_EVENT_PCIE_ENUMERATION:
@@ -2520,49 +2867,63 @@ static int mpi3mr_build_sg_scmd(struct mpi3mr_ioc *mrioc,
}
/**
- * mpi3mr_print_response_code - print TM response as a string
- * @mrioc: Adapter instance reference
+ * mpi3mr_tm_response_name - get TM response as a string
* @resp_code: TM response code
*
- * Print TM response code as a readable string.
+ * Convert known task management response code as a readable
+ * string.
*
- * Return: Nothing.
+ * Return: response code string.
*/
-static void mpi3mr_print_response_code(struct mpi3mr_ioc *mrioc, u8 resp_code)
+static const char *mpi3mr_tm_response_name(u8 resp_code)
{
char *desc;
switch (resp_code) {
- case MPI3MR_RSP_TM_COMPLETE:
+ case MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE:
desc = "task management request completed";
break;
- case MPI3MR_RSP_INVALID_FRAME:
+ case MPI3_SCSITASKMGMT_RSPCODE_INVALID_FRAME:
desc = "invalid frame";
break;
- case MPI3MR_RSP_TM_NOT_SUPPORTED:
+ case MPI3_SCSITASKMGMT_RSPCODE_TM_FUNCTION_NOT_SUPPORTED:
desc = "task management request not supported";
break;
- case MPI3MR_RSP_TM_FAILED:
+ case MPI3_SCSITASKMGMT_RSPCODE_TM_FAILED:
desc = "task management request failed";
break;
- case MPI3MR_RSP_TM_SUCCEEDED:
+ case MPI3_SCSITASKMGMT_RSPCODE_TM_SUCCEEDED:
desc = "task management request succeeded";
break;
- case MPI3MR_RSP_TM_INVALID_LUN:
- desc = "invalid lun";
+ case MPI3_SCSITASKMGMT_RSPCODE_TM_INVALID_LUN:
+ desc = "invalid LUN";
break;
- case MPI3MR_RSP_TM_OVERLAPPED_TAG:
+ case MPI3_SCSITASKMGMT_RSPCODE_TM_OVERLAPPED_TAG:
desc = "overlapped tag attempted";
break;
- case MPI3MR_RSP_IO_QUEUED_ON_IOC:
+ case MPI3_SCSITASKMGMT_RSPCODE_IO_QUEUED_ON_IOC:
desc = "task queued, however not sent to target";
break;
+ case MPI3_SCSITASKMGMT_RSPCODE_TM_NVME_DENIED:
+ desc = "task management request denied by NVMe device";
+ break;
default:
desc = "unknown";
break;
}
- ioc_info(mrioc, "%s :response_code(0x%01x): %s\n", __func__,
- resp_code, desc);
+
+ return desc;
+}
+
+inline void mpi3mr_poll_pend_io_completions(struct mpi3mr_ioc *mrioc)
+{
+ int i;
+ int num_of_reply_queues =
+ mrioc->num_op_reply_q + mrioc->op_reply_q_offset;
+
+ for (i = mrioc->op_reply_q_offset; i < num_of_reply_queues; i++)
+ mpi3mr_process_op_reply_q(mrioc,
+ mrioc->intr_info[i].op_reply_q);
}
/**
@@ -2572,9 +2933,10 @@ static void mpi3mr_print_response_code(struct mpi3mr_ioc *mrioc, u8 resp_code)
* @handle: Device handle
* @lun: lun ID
* @htag: Host tag of the TM request
+ * @timeout: TM timeout value
* @drv_cmd: Internal command tracker
* @resp_code: Response code place holder
- * @cmd_priv: SCSI command private data
+ * @scmd: SCSI command
*
* Issues a Task Management Request to the controller for a
* specified target, lun and command and wait for its completion
@@ -2586,14 +2948,16 @@ static void mpi3mr_print_response_code(struct mpi3mr_ioc *mrioc, u8 resp_code)
static int mpi3mr_issue_tm(struct mpi3mr_ioc *mrioc, u8 tm_type,
u16 handle, uint lun, u16 htag, ulong timeout,
struct mpi3mr_drv_cmd *drv_cmd,
- u8 *resp_code, struct scmd_priv *cmd_priv)
+ u8 *resp_code, struct scsi_cmnd *scmd)
{
struct mpi3_scsi_task_mgmt_request tm_req;
struct mpi3_scsi_task_mgmt_reply *tm_reply = NULL;
int retval = 0;
struct mpi3mr_tgt_dev *tgtdev = NULL;
struct mpi3mr_stgt_priv_data *scsi_tgt_priv_data = NULL;
- struct op_req_qinfo *op_req_q = NULL;
+ struct scmd_priv *cmd_priv = NULL;
+ struct scsi_device *sdev = NULL;
+ struct mpi3mr_sdev_priv_data *sdev_priv_data = NULL;
ioc_info(mrioc, "%s :Issue TM: TM type (0x%x) for devhandle 0x%04x\n",
__func__, tm_type, handle);
@@ -2630,16 +2994,21 @@ static int mpi3mr_issue_tm(struct mpi3mr_ioc *mrioc, u8 tm_type,
tm_req.function = MPI3_FUNCTION_SCSI_TASK_MGMT;
tgtdev = mpi3mr_get_tgtdev_by_handle(mrioc, handle);
- if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata) {
- scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *)
- tgtdev->starget->hostdata;
- atomic_inc(&scsi_tgt_priv_data->block_io);
- }
- if (cmd_priv) {
- op_req_q = &mrioc->req_qinfo[cmd_priv->req_q_idx];
- tm_req.task_host_tag = cpu_to_le16(cmd_priv->host_tag);
- tm_req.task_request_queue_id = cpu_to_le16(op_req_q->qid);
+
+ if (scmd) {
+ sdev = scmd->device;
+ sdev_priv_data = sdev->hostdata;
+ scsi_tgt_priv_data = ((sdev_priv_data) ?
+ sdev_priv_data->tgt_priv_data : NULL);
+ } else {
+ if (tgtdev && tgtdev->starget && tgtdev->starget->hostdata)
+ scsi_tgt_priv_data = (struct mpi3mr_stgt_priv_data *)
+ tgtdev->starget->hostdata;
}
+
+ if (scsi_tgt_priv_data)
+ atomic_inc(&scsi_tgt_priv_data->block_io);
+
if (tgtdev && (tgtdev->dev_type == MPI3_DEVICE_DEVFORM_PCIE)) {
if (cmd_priv && tgtdev->dev_spec.pcie_inf.abort_to)
timeout = tgtdev->dev_spec.pcie_inf.abort_to;
@@ -2656,39 +3025,49 @@ static int mpi3mr_issue_tm(struct mpi3mr_ioc *mrioc, u8 tm_type,
wait_for_completion_timeout(&drv_cmd->done, (timeout * HZ));
if (!(drv_cmd->state & MPI3MR_CMD_COMPLETE)) {
- ioc_err(mrioc, "%s :Issue TM: command timed out\n", __func__);
drv_cmd->is_waiting = 0;
retval = -1;
- mpi3mr_soft_reset_handler(mrioc,
- MPI3MR_RESET_FROM_TM_TIMEOUT, 1);
+ if (!(drv_cmd->state & MPI3MR_CMD_RESET)) {
+ dprint_tm(mrioc,
+ "task management request timed out after %ld seconds\n",
+ timeout);
+ if (mrioc->logging_level & MPI3_DEBUG_TM)
+ dprint_dump_req(&tm_req, sizeof(tm_req)/4);
+ mpi3mr_soft_reset_handler(mrioc,
+ MPI3MR_RESET_FROM_TM_TIMEOUT, 1);
+ }
goto out_unlock;
}
- if (drv_cmd->state & MPI3MR_CMD_REPLY_VALID)
- tm_reply = (struct mpi3_scsi_task_mgmt_reply *)drv_cmd->reply;
-
- if (drv_cmd->ioc_status != MPI3_IOCSTATUS_SUCCESS) {
- ioc_err(mrioc,
- "%s :Issue TM: handle(0x%04x) Failed ioc_status(0x%04x) Loginfo(0x%08x)\n",
- __func__, handle, drv_cmd->ioc_status,
- drv_cmd->ioc_loginfo);
+ if (!(drv_cmd->state & MPI3MR_CMD_REPLY_VALID)) {
+ dprint_tm(mrioc, "invalid task management reply message\n");
retval = -1;
goto out_unlock;
}
- if (!tm_reply) {
- ioc_err(mrioc, "%s :Issue TM: No TM Reply message\n", __func__);
+ tm_reply = (struct mpi3_scsi_task_mgmt_reply *)drv_cmd->reply;
+
+ switch (drv_cmd->ioc_status) {
+ case MPI3_IOCSTATUS_SUCCESS:
+ *resp_code = le32_to_cpu(tm_reply->response_data) &
+ MPI3MR_RI_MASK_RESPCODE;
+ break;
+ case MPI3_IOCSTATUS_SCSI_IOC_TERMINATED:
+ *resp_code = MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE;
+ break;
+ default:
+ dprint_tm(mrioc,
+ "task management request to handle(0x%04x) is failed with ioc_status(0x%04x) log_info(0x%08x)\n",
+ handle, drv_cmd->ioc_status, drv_cmd->ioc_loginfo);
retval = -1;
goto out_unlock;
}
- *resp_code = le32_to_cpu(tm_reply->response_data) &
- MPI3MR_RI_MASK_RESPCODE;
switch (*resp_code) {
- case MPI3MR_RSP_TM_SUCCEEDED:
- case MPI3MR_RSP_TM_COMPLETE:
+ case MPI3_SCSITASKMGMT_RSPCODE_TM_SUCCEEDED:
+ case MPI3_SCSITASKMGMT_RSPCODE_TM_COMPLETE:
break;
- case MPI3MR_RSP_IO_QUEUED_ON_IOC:
+ case MPI3_SCSITASKMGMT_RSPCODE_IO_QUEUED_ON_IOC:
if (tm_type != MPI3_SCSITASKMGMT_TASKTYPE_QUERY_TASK)
retval = -1;
break;
@@ -2697,14 +3076,37 @@ static int mpi3mr_issue_tm(struct mpi3mr_ioc *mrioc, u8 tm_type,
break;
}
- ioc_info(mrioc,
- "%s :Issue TM: Completed TM type (0x%x) handle(0x%04x) ",
- __func__, tm_type, handle);
- ioc_info(mrioc,
- "with ioc_status(0x%04x), loginfo(0x%08x), term_count(0x%08x)\n",
- drv_cmd->ioc_status, drv_cmd->ioc_loginfo,
- le32_to_cpu(tm_reply->termination_count));
- mpi3mr_print_response_code(mrioc, *resp_code);
+ dprint_tm(mrioc,
+ "task management request type(%d) completed for handle(0x%04x) with ioc_status(0x%04x), log_info(0x%08x), termination_count(%d), response:%s(0x%x)\n",
+ tm_type, handle, drv_cmd->ioc_status, drv_cmd->ioc_loginfo,
+ le32_to_cpu(tm_reply->termination_count),
+ mpi3mr_tm_response_name(*resp_code), *resp_code);
+
+ if (!retval) {
+ mpi3mr_ioc_disable_intr(mrioc);
+ mpi3mr_poll_pend_io_completions(mrioc);
+ mpi3mr_ioc_enable_intr(mrioc);
+ mpi3mr_poll_pend_io_completions(mrioc);
+ }
+ switch (tm_type) {
+ case MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
+ if (!scsi_tgt_priv_data)
+ break;
+ scsi_tgt_priv_data->pend_count = 0;
+ blk_mq_tagset_busy_iter(&mrioc->shost->tag_set,
+ mpi3mr_count_tgt_pending,
+ (void *)scsi_tgt_priv_data->starget);
+ break;
+ case MPI3_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
+ if (!sdev_priv_data)
+ break;
+ sdev_priv_data->pend_count = 0;
+ blk_mq_tagset_busy_iter(&mrioc->shost->tag_set,
+ mpi3mr_count_dev_pending, (void *)sdev);
+ break;
+ default:
+ break;
+ }
out_unlock:
drv_cmd->state = MPI3MR_CMD_NOTUSED;
@@ -2713,14 +3115,6 @@ out_unlock:
atomic_dec_if_positive(&scsi_tgt_priv_data->block_io);
if (tgtdev)
mpi3mr_tgtdev_put(tgtdev);
- if (!retval) {
- /*
- * Flush all IRQ handlers by calling synchronize_irq().
- * mpi3mr_ioc_disable_intr() takes care of it.
- */
- mpi3mr_ioc_disable_intr(mrioc);
- mpi3mr_ioc_enable_intr(mrioc);
- }
out:
return retval;
}
@@ -2769,17 +3163,49 @@ static int mpi3mr_bios_param(struct scsi_device *sdev,
* mpi3mr_map_queues - Map queues callback handler
* @shost: SCSI host reference
*
- * Call the blk_mq_pci_map_queues with from which operational
- * queue the mapping has to be done
+ * Maps default and poll queues.
*
- * Return: return of blk_mq_pci_map_queues
+ * Return: return zero.
*/
static int mpi3mr_map_queues(struct Scsi_Host *shost)
{
struct mpi3mr_ioc *mrioc = shost_priv(shost);
+ int i, qoff, offset;
+ struct blk_mq_queue_map *map = NULL;
+
+ offset = mrioc->op_reply_q_offset;
+
+ for (i = 0, qoff = 0; i < HCTX_MAX_TYPES; i++) {
+ map = &shost->tag_set.map[i];
+
+ map->nr_queues = 0;
+
+ if (i == HCTX_TYPE_DEFAULT)
+ map->nr_queues = mrioc->default_qcount;
+ else if (i == HCTX_TYPE_POLL)
+ map->nr_queues = mrioc->active_poll_qcount;
+
+ if (!map->nr_queues) {
+ BUG_ON(i == HCTX_TYPE_DEFAULT);
+ continue;
+ }
+
+ /*
+ * The poll queue(s) doesn't have an IRQ (and hence IRQ
+ * affinity), so use the regular blk-mq cpu mapping
+ */
+ map->queue_offset = qoff;
+ if (i != HCTX_TYPE_POLL)
+ blk_mq_pci_map_queues(map, mrioc->pdev, offset);
+ else
+ blk_mq_map_queues(map);
+
+ qoff += map->nr_queues;
+ offset += map->nr_queues;
+ }
+
+ return 0;
- return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
- mrioc->pdev, mrioc->op_reply_q_offset);
}
/**
@@ -2938,6 +3364,13 @@ static int mpi3mr_eh_target_reset(struct scsi_cmnd *scmd)
stgt_priv_data = sdev_priv_data->tgt_priv_data;
dev_handle = stgt_priv_data->dev_handle;
+ if (stgt_priv_data->dev_removed) {
+ sdev_printk(KERN_INFO, scmd->device,
+ "%s:target(handle = 0x%04x) is removed, target reset is not issued\n",
+ mrioc->name, dev_handle);
+ retval = FAILED;
+ goto out;
+ }
sdev_printk(KERN_INFO, scmd->device,
"Target Reset is issued to handle(0x%04x)\n",
dev_handle);
@@ -2945,15 +3378,22 @@ static int mpi3mr_eh_target_reset(struct scsi_cmnd *scmd)
ret = mpi3mr_issue_tm(mrioc,
MPI3_SCSITASKMGMT_TASKTYPE_TARGET_RESET, dev_handle,
sdev_priv_data->lun_id, MPI3MR_HOSTTAG_BLK_TMS,
- MPI3MR_RESETTM_TIMEOUT, &mrioc->host_tm_cmds, &resp_code, NULL);
+ MPI3MR_RESETTM_TIMEOUT, &mrioc->host_tm_cmds, &resp_code, scmd);
if (ret)
goto out;
+ if (stgt_priv_data->pend_count) {
+ sdev_printk(KERN_INFO, scmd->device,
+ "%s: target has %d pending commands, target reset is failed\n",
+ mrioc->name, sdev_priv_data->pend_count);
+ goto out;
+ }
+
retval = SUCCESS;
out:
sdev_printk(KERN_INFO, scmd->device,
- "Target reset is %s for scmd(%p)\n",
+ "%s: target reset is %s for scmd(%p)\n", mrioc->name,
((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
return retval;
@@ -2992,21 +3432,34 @@ static int mpi3mr_eh_dev_reset(struct scsi_cmnd *scmd)
stgt_priv_data = sdev_priv_data->tgt_priv_data;
dev_handle = stgt_priv_data->dev_handle;
+ if (stgt_priv_data->dev_removed) {
+ sdev_printk(KERN_INFO, scmd->device,
+ "%s: device(handle = 0x%04x) is removed, device(LUN) reset is not issued\n",
+ mrioc->name, dev_handle);
+ retval = FAILED;
+ goto out;
+ }
sdev_printk(KERN_INFO, scmd->device,
"Device(lun) Reset is issued to handle(0x%04x)\n", dev_handle);
ret = mpi3mr_issue_tm(mrioc,
MPI3_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, dev_handle,
sdev_priv_data->lun_id, MPI3MR_HOSTTAG_BLK_TMS,
- MPI3MR_RESETTM_TIMEOUT, &mrioc->host_tm_cmds, &resp_code, NULL);
+ MPI3MR_RESETTM_TIMEOUT, &mrioc->host_tm_cmds, &resp_code, scmd);
if (ret)
goto out;
+ if (sdev_priv_data->pend_count) {
+ sdev_printk(KERN_INFO, scmd->device,
+ "%s: device has %d pending commands, device(LUN) reset is failed\n",
+ mrioc->name, sdev_priv_data->pend_count);
+ goto out;
+ }
retval = SUCCESS;
out:
sdev_printk(KERN_INFO, scmd->device,
- "Device(lun) reset is %s for scmd(%p)\n",
+ "%s: device(LUN) reset is %s for scmd(%p)\n", mrioc->name,
((retval == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
return retval;
@@ -3049,32 +3502,42 @@ static int mpi3mr_scan_finished(struct Scsi_Host *shost,
{
struct mpi3mr_ioc *mrioc = shost_priv(shost);
u32 pe_timeout = MPI3MR_PORTENABLE_TIMEOUT;
+ u32 ioc_status = readl(&mrioc->sysif_regs->ioc_status);
- if (time >= (pe_timeout * HZ)) {
+ if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) ||
+ (ioc_status & MPI3_SYSIF_IOC_STATUS_FAULT)) {
+ ioc_err(mrioc, "port enable failed due to fault or reset\n");
+ mpi3mr_print_fault_info(mrioc);
+ mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR;
+ mrioc->scan_started = 0;
mrioc->init_cmds.is_waiting = 0;
mrioc->init_cmds.callback = NULL;
mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
- ioc_err(mrioc, "%s :port enable request timed out\n", __func__);
- mrioc->is_driver_loading = 0;
- mpi3mr_soft_reset_handler(mrioc,
- MPI3MR_RESET_FROM_PE_TIMEOUT, 1);
}
- if (mrioc->scan_failed) {
- ioc_err(mrioc,
- "%s :port enable failed with (ioc_status=0x%08x)\n",
- __func__, mrioc->scan_failed);
- mrioc->is_driver_loading = 0;
- mrioc->stop_drv_processing = 1;
- return 1;
+ if (time >= (pe_timeout * HZ)) {
+ ioc_err(mrioc, "port enable failed due to time out\n");
+ mpi3mr_check_rh_fault_ioc(mrioc,
+ MPI3MR_RESET_FROM_PE_TIMEOUT);
+ mrioc->scan_failed = MPI3_IOCSTATUS_INTERNAL_ERROR;
+ mrioc->scan_started = 0;
+ mrioc->init_cmds.is_waiting = 0;
+ mrioc->init_cmds.callback = NULL;
+ mrioc->init_cmds.state = MPI3MR_CMD_NOTUSED;
}
if (mrioc->scan_started)
return 0;
- ioc_info(mrioc, "%s :port enable: SUCCESS\n", __func__);
+
+ if (mrioc->scan_failed) {
+ ioc_err(mrioc,
+ "port enable failed with status=0x%04x\n",
+ mrioc->scan_failed);
+ } else
+ ioc_info(mrioc, "port enable is successfully completed\n");
+
mpi3mr_start_watchdog(mrioc);
mrioc->is_driver_loading = 0;
-
return 1;
}
@@ -3189,10 +3652,18 @@ static int mpi3mr_slave_configure(struct scsi_device *sdev)
switch (tgt_dev->dev_type) {
case MPI3_DEVICE_DEVFORM_PCIE:
/*The block layer hw sector size = 512*/
- blk_queue_max_hw_sectors(sdev->request_queue,
- tgt_dev->dev_spec.pcie_inf.mdts / 512);
- blk_queue_virt_boundary(sdev->request_queue,
- ((1 << tgt_dev->dev_spec.pcie_inf.pgsz) - 1));
+ if ((tgt_dev->dev_spec.pcie_inf.dev_info &
+ MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_MASK) ==
+ MPI3_DEVICE0_PCIE_DEVICE_INFO_TYPE_NVME_DEVICE) {
+ blk_queue_max_hw_sectors(sdev->request_queue,
+ tgt_dev->dev_spec.pcie_inf.mdts / 512);
+ if (tgt_dev->dev_spec.pcie_inf.pgsz == 0)
+ blk_queue_virt_boundary(sdev->request_queue,
+ ((1 << MPI3MR_DEFAULT_PGSZEXP) - 1));
+ else
+ blk_queue_virt_boundary(sdev->request_queue,
+ ((1 << tgt_dev->dev_spec.pcie_inf.pgsz) - 1));
+ }
break;
default:
break;
@@ -3312,9 +3783,22 @@ static bool mpi3mr_check_return_unmap(struct mpi3mr_ioc *mrioc,
struct scsi_cmnd *scmd)
{
unsigned char *buf;
- u16 param_len, desc_len;
-
- param_len = get_unaligned_be16(scmd->cmnd + 7);
+ u16 param_len, desc_len, trunc_param_len;
+
+ trunc_param_len = param_len = get_unaligned_be16(scmd->cmnd + 7);
+
+ if (mrioc->pdev->revision) {
+ if ((param_len > 24) && ((param_len - 8) & 0xF)) {
+ trunc_param_len -= (param_len - 8) & 0xF;
+ dprint_scsi_command(mrioc, scmd, MPI3_DEBUG_SCSI_ERROR);
+ dprint_scsi_err(mrioc,
+ "truncating param_len from (%d) to (%d)\n",
+ param_len, trunc_param_len);
+ put_unaligned_be16(trunc_param_len, scmd->cmnd + 7);
+ dprint_scsi_command(mrioc, scmd, MPI3_DEBUG_SCSI_ERROR);
+ }
+ return false;
+ }
if (!param_len) {
ioc_warn(mrioc,
@@ -3374,12 +3858,12 @@ static bool mpi3mr_check_return_unmap(struct mpi3mr_ioc *mrioc,
}
if (param_len > (desc_len + 8)) {
+ trunc_param_len = desc_len + 8;
scsi_print_command(scmd);
- ioc_warn(mrioc,
- "%s: Truncating param_len(%d) to desc_len+8(%d)\n",
- __func__, param_len, (desc_len + 8));
- param_len = desc_len + 8;
- put_unaligned_be16(param_len, scmd->cmnd + 7);
+ dprint_scsi_err(mrioc,
+ "truncating param_len(%d) to desc_len+8(%d)\n",
+ param_len, trunc_param_len);
+ put_unaligned_be16(trunc_param_len, scmd->cmnd + 7);
scsi_print_command(scmd);
}
@@ -3434,6 +3918,7 @@ static int mpi3mr_qcmd(struct Scsi_Host *shost,
u32 scsiio_flags = 0;
struct request *rq = scsi_cmd_to_rq(scmd);
int iprio_class;
+ u8 is_pcie_dev = 0;
sdev_priv_data = scmd->device->hostdata;
if (!sdev_priv_data || !sdev_priv_data->tgt_priv_data) {
@@ -3478,8 +3963,10 @@ static int mpi3mr_qcmd(struct Scsi_Host *shost,
goto out;
}
- if ((scmd->cmnd[0] == UNMAP) &&
- (stgt_priv_data->dev_type == MPI3_DEVICE_DEVFORM_PCIE) &&
+ if (stgt_priv_data->dev_type == MPI3_DEVICE_DEVFORM_PCIE)
+ is_pcie_dev = 1;
+ if ((scmd->cmnd[0] == UNMAP) && is_pcie_dev &&
+ (mrioc->pdev->device == MPI3_MFGPAGE_DEVID_SAS4116) &&
mpi3mr_check_return_unmap(mrioc, scmd))
goto out;
@@ -3559,6 +4046,7 @@ static struct scsi_host_template mpi3mr_driver_template = {
.eh_host_reset_handler = mpi3mr_eh_host_reset,
.bios_param = mpi3mr_bios_param,
.map_queues = mpi3mr_map_queues,
+ .mq_poll = mpi3mr_blk_mq_poll,
.no_write_same = 1,
.can_queue = 1,
.this_id = -1,
@@ -3567,6 +4055,7 @@ static struct scsi_host_template mpi3mr_driver_template = {
*/
.max_sectors = 2048,
.cmd_per_lun = MPI3MR_MAX_CMDS_LUN,
+ .max_segment_size = 0xffffffff,
.track_queue_depth = 1,
.cmd_size = sizeof(struct scmd_priv),
};
@@ -3714,6 +4203,7 @@ mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
INIT_LIST_HEAD(&mrioc->fwevt_list);
INIT_LIST_HEAD(&mrioc->tgtdev_list);
INIT_LIST_HEAD(&mrioc->delayed_rmhs_list);
+ INIT_LIST_HEAD(&mrioc->delayed_evtack_cmds_list);
mutex_init(&mrioc->reset_mutex);
mpi3mr_init_drv_cmd(&mrioc->init_cmds, MPI3MR_HOSTTAG_INITCMDS);
@@ -3772,21 +4262,29 @@ mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ioc_err(mrioc, "failure at %s:%d/%s()!\n",
__FILE__, __LINE__, __func__);
retval = -ENODEV;
- goto out_fwevtthread_failed;
+ goto fwevtthread_failed;
}
mrioc->is_driver_loading = 1;
- if (mpi3mr_init_ioc(mrioc, MPI3MR_IT_INIT)) {
- ioc_err(mrioc, "failure at %s:%d/%s()!\n",
- __FILE__, __LINE__, __func__);
+ mrioc->cpu_count = num_online_cpus();
+ if (mpi3mr_setup_resources(mrioc)) {
+ ioc_err(mrioc, "setup resources failed\n");
retval = -ENODEV;
- goto out_iocinit_failed;
+ goto resource_alloc_failed;
+ }
+ if (mpi3mr_init_ioc(mrioc)) {
+ ioc_err(mrioc, "initializing IOC failed\n");
+ retval = -ENODEV;
+ goto init_ioc_failed;
}
shost->nr_hw_queues = mrioc->num_op_reply_q;
+ if (mrioc->active_poll_qcount)
+ shost->nr_maps = 3;
+
shost->can_queue = mrioc->max_host_ios;
shost->sg_tablesize = MPI3MR_SG_DEPTH;
- shost->max_id = mrioc->facts.max_perids;
+ shost->max_id = mrioc->facts.max_perids + 1;
retval = scsi_add_host(shost, &pdev->dev);
if (retval) {
@@ -3799,10 +4297,14 @@ mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return retval;
addhost_failed:
- mpi3mr_cleanup_ioc(mrioc, MPI3MR_COMPLETE_CLEANUP);
-out_iocinit_failed:
+ mpi3mr_stop_watchdog(mrioc);
+ mpi3mr_cleanup_ioc(mrioc);
+init_ioc_failed:
+ mpi3mr_free_mem(mrioc);
+ mpi3mr_cleanup_resources(mrioc);
+resource_alloc_failed:
destroy_workqueue(mrioc->fwevt_worker_thread);
-out_fwevtthread_failed:
+fwevtthread_failed:
spin_lock(&mrioc_list_lock);
list_del(&mrioc->list);
spin_unlock(&mrioc_list_lock);
@@ -3815,6 +4317,7 @@ shost_failed:
* mpi3mr_remove - PCI remove callback
* @pdev: PCI device instance
*
+ * Cleanup the IOC by issuing MUR and shutdown notification.
* Free up all memory and resources associated with the
* controllerand target devices, unregister the shost.
*
@@ -3851,7 +4354,10 @@ static void mpi3mr_remove(struct pci_dev *pdev)
mpi3mr_tgtdev_del_from_list(mrioc, tgtdev);
mpi3mr_tgtdev_put(tgtdev);
}
- mpi3mr_cleanup_ioc(mrioc, MPI3MR_COMPLETE_CLEANUP);
+ mpi3mr_stop_watchdog(mrioc);
+ mpi3mr_cleanup_ioc(mrioc);
+ mpi3mr_free_mem(mrioc);
+ mpi3mr_cleanup_resources(mrioc);
spin_lock(&mrioc_list_lock);
list_del(&mrioc->list);
@@ -3891,7 +4397,10 @@ static void mpi3mr_shutdown(struct pci_dev *pdev)
spin_unlock_irqrestore(&mrioc->fwevt_lock, flags);
if (wq)
destroy_workqueue(wq);
- mpi3mr_cleanup_ioc(mrioc, MPI3MR_COMPLETE_CLEANUP);
+
+ mpi3mr_stop_watchdog(mrioc);
+ mpi3mr_cleanup_ioc(mrioc);
+ mpi3mr_cleanup_resources(mrioc);
}
#ifdef CONFIG_PM
@@ -3921,7 +4430,7 @@ static int mpi3mr_suspend(struct pci_dev *pdev, pm_message_t state)
mpi3mr_cleanup_fwevt_list(mrioc);
scsi_block_requests(shost);
mpi3mr_stop_watchdog(mrioc);
- mpi3mr_cleanup_ioc(mrioc, MPI3MR_SUSPEND);
+ mpi3mr_cleanup_ioc(mrioc);
device_state = pci_choose_state(pdev, state);
ioc_info(mrioc, "pdev=0x%p, slot=%s, entering operating state [D%d]\n",
@@ -3970,7 +4479,11 @@ static int mpi3mr_resume(struct pci_dev *pdev)
mrioc->stop_drv_processing = 0;
mpi3mr_memset_buffers(mrioc);
- mpi3mr_init_ioc(mrioc, MPI3MR_IT_RESUME);
+ r = mpi3mr_reinit_ioc(mrioc, 1);
+ if (r) {
+ ioc_err(mrioc, "resuming controller failed[%d]\n", r);
+ return r;
+ }
scsi_unblock_requests(shost);
mpi3mr_start_watchdog(mrioc);
@@ -3980,8 +4493,8 @@ static int mpi3mr_resume(struct pci_dev *pdev)
static const struct pci_device_id mpi3mr_pci_id_table[] = {
{
- PCI_DEVICE_SUB(PCI_VENDOR_ID_LSI_LOGIC, 0x00A5,
- PCI_ANY_ID, PCI_ANY_ID)
+ PCI_DEVICE_SUB(MPI3_MFGPAGE_VENDORID_BROADCOM,
+ MPI3_MFGPAGE_DEVID_SAS4116, PCI_ANY_ID, PCI_ANY_ID)
},
{ 0 }
};
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 81dab9b82f79..511726f92d9a 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -3086,6 +3086,7 @@ _base_check_enable_msix(struct MPT3SAS_ADAPTER *ioc)
void
mpt3sas_base_free_irq(struct MPT3SAS_ADAPTER *ioc)
{
+ unsigned int irq;
struct adapter_reply_queue *reply_q, *next;
if (list_empty(&ioc->reply_queue_list))
@@ -3098,9 +3099,10 @@ mpt3sas_base_free_irq(struct MPT3SAS_ADAPTER *ioc)
continue;
}
- if (ioc->smp_affinity_enable)
- irq_set_affinity_hint(pci_irq_vector(ioc->pdev,
- reply_q->msix_index), NULL);
+ if (ioc->smp_affinity_enable) {
+ irq = pci_irq_vector(ioc->pdev, reply_q->msix_index);
+ irq_update_affinity_hint(irq, NULL);
+ }
free_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index),
reply_q);
kfree(reply_q);
@@ -3167,18 +3169,15 @@ out:
* @ioc: per adapter object
*
* The enduser would need to set the affinity via /proc/irq/#/smp_affinity
- *
- * It would nice if we could call irq_set_affinity, however it is not
- * an exported symbol
*/
static void
_base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc)
{
- unsigned int cpu, nr_cpus, nr_msix, index = 0;
+ unsigned int cpu, nr_cpus, nr_msix, index = 0, irq;
struct adapter_reply_queue *reply_q;
- int local_numa_node;
int iopoll_q_count = ioc->reply_queue_count -
ioc->iopoll_q_start_index;
+ const struct cpumask *mask;
if (!_base_is_controller_msix_enabled(ioc))
return;
@@ -3201,11 +3200,11 @@ _base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc)
* corresponding to high iops queues.
*/
if (ioc->high_iops_queues) {
- local_numa_node = dev_to_node(&ioc->pdev->dev);
+ mask = cpumask_of_node(dev_to_node(&ioc->pdev->dev));
for (index = 0; index < ioc->high_iops_queues;
index++) {
- irq_set_affinity_hint(pci_irq_vector(ioc->pdev,
- index), cpumask_of_node(local_numa_node));
+ irq = pci_irq_vector(ioc->pdev, index);
+ irq_set_affinity_and_hint(irq, mask);
}
}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index a0af986633d2..949e98d523e2 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -77,8 +77,8 @@
#define MPT3SAS_DRIVER_NAME "mpt3sas"
#define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>"
#define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver"
-#define MPT3SAS_DRIVER_VERSION "39.100.00.00"
-#define MPT3SAS_MAJOR_VERSION 39
+#define MPT3SAS_DRIVER_VERSION "40.100.00.00"
+#define MPT3SAS_MAJOR_VERSION 40
#define MPT3SAS_MINOR_VERSION 100
#define MPT3SAS_BUILD_VERSION 0
#define MPT3SAS_RELEASE_VERSION 00
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
index 05b6c6a073c3..d92ca140d298 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -3533,11 +3533,31 @@ diag_trigger_master_store(struct device *cdev,
{
struct Scsi_Host *shost = class_to_shost(cdev);
struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ struct SL_WH_MASTER_TRIGGER_T *master_tg;
unsigned long flags;
ssize_t rc;
+ bool set = 1;
- spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
rc = min(sizeof(struct SL_WH_MASTER_TRIGGER_T), count);
+
+ if (ioc->supports_trigger_pages) {
+ master_tg = kzalloc(sizeof(struct SL_WH_MASTER_TRIGGER_T),
+ GFP_KERNEL);
+ if (!master_tg)
+ return -ENOMEM;
+
+ memcpy(master_tg, buf, rc);
+ if (!master_tg->MasterData)
+ set = 0;
+ if (mpt3sas_config_update_driver_trigger_pg1(ioc, master_tg,
+ set)) {
+ kfree(master_tg);
+ return -EFAULT;
+ }
+ kfree(master_tg);
+ }
+
+ spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
memset(&ioc->diag_trigger_master, 0,
sizeof(struct SL_WH_MASTER_TRIGGER_T));
memcpy(&ioc->diag_trigger_master, buf, rc);
@@ -3589,11 +3609,31 @@ diag_trigger_event_store(struct device *cdev,
{
struct Scsi_Host *shost = class_to_shost(cdev);
struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ struct SL_WH_EVENT_TRIGGERS_T *event_tg;
unsigned long flags;
ssize_t sz;
+ bool set = 1;
- spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
sz = min(sizeof(struct SL_WH_EVENT_TRIGGERS_T), count);
+ if (ioc->supports_trigger_pages) {
+ event_tg = kzalloc(sizeof(struct SL_WH_EVENT_TRIGGERS_T),
+ GFP_KERNEL);
+ if (!event_tg)
+ return -ENOMEM;
+
+ memcpy(event_tg, buf, sz);
+ if (!event_tg->ValidEntries)
+ set = 0;
+ if (mpt3sas_config_update_driver_trigger_pg2(ioc, event_tg,
+ set)) {
+ kfree(event_tg);
+ return -EFAULT;
+ }
+ kfree(event_tg);
+ }
+
+ spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
+
memset(&ioc->diag_trigger_event, 0,
sizeof(struct SL_WH_EVENT_TRIGGERS_T));
memcpy(&ioc->diag_trigger_event, buf, sz);
@@ -3644,11 +3684,31 @@ diag_trigger_scsi_store(struct device *cdev,
{
struct Scsi_Host *shost = class_to_shost(cdev);
struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ struct SL_WH_SCSI_TRIGGERS_T *scsi_tg;
unsigned long flags;
ssize_t sz;
+ bool set = 1;
+
+ sz = min(sizeof(struct SL_WH_SCSI_TRIGGERS_T), count);
+ if (ioc->supports_trigger_pages) {
+ scsi_tg = kzalloc(sizeof(struct SL_WH_SCSI_TRIGGERS_T),
+ GFP_KERNEL);
+ if (!scsi_tg)
+ return -ENOMEM;
+
+ memcpy(scsi_tg, buf, sz);
+ if (!scsi_tg->ValidEntries)
+ set = 0;
+ if (mpt3sas_config_update_driver_trigger_pg3(ioc, scsi_tg,
+ set)) {
+ kfree(scsi_tg);
+ return -EFAULT;
+ }
+ kfree(scsi_tg);
+ }
spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
- sz = min(sizeof(ioc->diag_trigger_scsi), count);
+
memset(&ioc->diag_trigger_scsi, 0, sizeof(ioc->diag_trigger_scsi));
memcpy(&ioc->diag_trigger_scsi, buf, sz);
if (ioc->diag_trigger_scsi.ValidEntries > NUM_VALID_ENTRIES)
@@ -3698,11 +3758,30 @@ diag_trigger_mpi_store(struct device *cdev,
{
struct Scsi_Host *shost = class_to_shost(cdev);
struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ struct SL_WH_MPI_TRIGGERS_T *mpi_tg;
unsigned long flags;
ssize_t sz;
+ bool set = 1;
- spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
sz = min(sizeof(struct SL_WH_MPI_TRIGGERS_T), count);
+ if (ioc->supports_trigger_pages) {
+ mpi_tg = kzalloc(sizeof(struct SL_WH_MPI_TRIGGERS_T),
+ GFP_KERNEL);
+ if (!mpi_tg)
+ return -ENOMEM;
+
+ memcpy(mpi_tg, buf, sz);
+ if (!mpi_tg->ValidEntries)
+ set = 0;
+ if (mpt3sas_config_update_driver_trigger_pg4(ioc, mpi_tg,
+ set)) {
+ kfree(mpi_tg);
+ return -EFAULT;
+ }
+ kfree(mpi_tg);
+ }
+
+ spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
memset(&ioc->diag_trigger_mpi, 0,
sizeof(ioc->diag_trigger_mpi));
memcpy(&ioc->diag_trigger_mpi, buf, sz);
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
index 31d1ea5a5dd2..1e52bc7febfa 100644
--- a/drivers/scsi/mvsas/mv_sas.c
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -67,8 +67,10 @@ static struct mvs_info *mvs_find_dev_mvi(struct domain_device *dev)
while (sha->sas_port[i]) {
if (sha->sas_port[i] == dev->port) {
+ spin_lock(&sha->sas_port[i]->phy_list_lock);
phy = container_of(sha->sas_port[i]->phy_list.next,
struct asd_sas_phy, port_phy_el);
+ spin_unlock(&sha->sas_port[i]->phy_list_lock);
j = 0;
while (sha->sas_phy[j]) {
if (sha->sas_phy[j] == phy)
@@ -96,6 +98,8 @@ static int mvs_find_dev_phyno(struct domain_device *dev, int *phyno)
while (sha->sas_port[i]) {
if (sha->sas_port[i] == dev->port) {
struct asd_sas_phy *phy;
+
+ spin_lock(&sha->sas_port[i]->phy_list_lock);
list_for_each_entry(phy,
&sha->sas_port[i]->phy_list, port_phy_el) {
j = 0;
@@ -109,6 +113,7 @@ static int mvs_find_dev_phyno(struct domain_device *dev, int *phyno)
num++;
n++;
}
+ spin_unlock(&sha->sas_port[i]->phy_list_lock);
break;
}
i++;
diff --git a/drivers/scsi/myrb.c b/drivers/scsi/myrb.c
index 2a4506a5083e..71585528e8db 100644
--- a/drivers/scsi/myrb.c
+++ b/drivers/scsi/myrb.c
@@ -1674,7 +1674,7 @@ static int myrb_pdev_slave_alloc(struct scsi_device *sdev)
if (sdev->id > MYRB_MAX_TARGETS)
return -ENXIO;
- pdev_info = kzalloc(sizeof(*pdev_info), GFP_KERNEL|GFP_DMA);
+ pdev_info = kzalloc(sizeof(*pdev_info), GFP_KERNEL);
if (!pdev_info)
return -ENOMEM;
diff --git a/drivers/scsi/myrs.c b/drivers/scsi/myrs.c
index 6ea323e9a2e3..7eb8c39da366 100644
--- a/drivers/scsi/myrs.c
+++ b/drivers/scsi/myrs.c
@@ -538,13 +538,11 @@ static bool myrs_enable_mmio_mbox(struct myrs_hba *cs,
cs->fwstat_buf = NULL;
goto out_free;
}
- cs->ctlr_info = kzalloc(sizeof(struct myrs_ctlr_info),
- GFP_KERNEL | GFP_DMA);
+ cs->ctlr_info = kzalloc(sizeof(struct myrs_ctlr_info), GFP_KERNEL);
if (!cs->ctlr_info)
goto out_free;
- cs->event_buf = kzalloc(sizeof(struct myrs_event),
- GFP_KERNEL | GFP_DMA);
+ cs->event_buf = kzalloc(sizeof(struct myrs_event), GFP_KERNEL);
if (!cs->event_buf)
goto out_free;
@@ -1805,7 +1803,7 @@ static int myrs_slave_alloc(struct scsi_device *sdev)
ldev_num = myrs_translate_ldev(cs, sdev);
- ldev_info = kzalloc(sizeof(*ldev_info), GFP_KERNEL|GFP_DMA);
+ ldev_info = kzalloc(sizeof(*ldev_info), GFP_KERNEL);
if (!ldev_info)
return -ENOMEM;
@@ -1867,7 +1865,7 @@ static int myrs_slave_alloc(struct scsi_device *sdev)
} else {
struct myrs_pdev_info *pdev_info;
- pdev_info = kzalloc(sizeof(*pdev_info), GFP_KERNEL|GFP_DMA);
+ pdev_info = kzalloc(sizeof(*pdev_info), GFP_KERNEL);
if (!pdev_info)
return -ENOMEM;
@@ -2269,7 +2267,8 @@ static void myrs_cleanup(struct myrs_hba *cs)
myrs_unmap(cs);
if (cs->mmio_base) {
- cs->disable_intr(cs);
+ if (cs->disable_intr)
+ cs->disable_intr(cs);
iounmap(cs->mmio_base);
cs->mmio_base = NULL;
}
diff --git a/drivers/scsi/pcmcia/nsp_cs.c b/drivers/scsi/pcmcia/nsp_cs.c
index 8b9e889bc306..92c818a8a84a 100644
--- a/drivers/scsi/pcmcia/nsp_cs.c
+++ b/drivers/scsi/pcmcia/nsp_cs.c
@@ -1557,6 +1557,9 @@ static int nsp_cs_config_check(struct pcmcia_device *p_dev, void *priv_data)
data->MmioAddress = (unsigned long)
ioremap(p_dev->resource[2]->start,
resource_size(p_dev->resource[2]));
+ if (!data->MmioAddress)
+ goto next_entry;
+
data->MmioLength = resource_size(p_dev->resource[2]);
}
/* If we got this far, we're cool! */
diff --git a/drivers/scsi/pm8001/Makefile b/drivers/scsi/pm8001/Makefile
index 02b7338999cc..bbb51b7312f1 100644
--- a/drivers/scsi/pm8001/Makefile
+++ b/drivers/scsi/pm8001/Makefile
@@ -6,9 +6,12 @@
obj-$(CONFIG_SCSI_PM8001) += pm80xx.o
+
+CFLAGS_pm80xx_tracepoints.o := -I$(src)
+
pm80xx-y += pm8001_init.o \
pm8001_sas.o \
pm8001_ctl.o \
pm8001_hwi.o \
- pm80xx_hwi.o
-
+ pm80xx_hwi.o \
+ pm80xx_tracepoints.o
diff --git a/drivers/scsi/pm8001/pm8001_ctl.c b/drivers/scsi/pm8001/pm8001_ctl.c
index 397eb9f6a1dd..41a63c9b719b 100644
--- a/drivers/scsi/pm8001/pm8001_ctl.c
+++ b/drivers/scsi/pm8001/pm8001_ctl.c
@@ -889,14 +889,6 @@ static ssize_t pm8001_show_update_fw(struct device *cdev,
static DEVICE_ATTR(update_fw, S_IRUGO|S_IWUSR|S_IWGRP,
pm8001_show_update_fw, pm8001_store_update_fw);
-/**
- * ctl_mpi_state_show - controller MPI state check
- * @cdev: pointer to embedded class device
- * @buf: the buffer returned
- *
- * A sysfs 'read-only' shost attribute.
- */
-
static const char *const mpiStateText[] = {
"MPI is not initialized",
"MPI is successfully initialized",
@@ -904,6 +896,14 @@ static const char *const mpiStateText[] = {
"MPI initialization failed with error in [31:16]"
};
+/**
+ * ctl_mpi_state_show - controller MPI state check
+ * @cdev: pointer to embedded class device
+ * @attr: device attribute (unused)
+ * @buf: the buffer returned
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
static ssize_t ctl_mpi_state_show(struct device *cdev,
struct device_attribute *attr, char *buf)
{
@@ -920,11 +920,11 @@ static DEVICE_ATTR_RO(ctl_mpi_state);
/**
* ctl_hmi_error_show - controller MPI initialization fails
* @cdev: pointer to embedded class device
+ * @attr: device attribute (unused)
* @buf: the buffer returned
*
* A sysfs 'read-only' shost attribute.
*/
-
static ssize_t ctl_hmi_error_show(struct device *cdev,
struct device_attribute *attr, char *buf)
{
@@ -941,11 +941,11 @@ static DEVICE_ATTR_RO(ctl_hmi_error);
/**
* ctl_raae_count_show - controller raae count check
* @cdev: pointer to embedded class device
+ * @attr: device attribute (unused)
* @buf: the buffer returned
*
* A sysfs 'read-only' shost attribute.
*/
-
static ssize_t ctl_raae_count_show(struct device *cdev,
struct device_attribute *attr, char *buf)
{
@@ -962,11 +962,11 @@ static DEVICE_ATTR_RO(ctl_raae_count);
/**
* ctl_iop0_count_show - controller iop0 count check
* @cdev: pointer to embedded class device
+ * @attr: device attribute (unused)
* @buf: the buffer returned
*
* A sysfs 'read-only' shost attribute.
*/
-
static ssize_t ctl_iop0_count_show(struct device *cdev,
struct device_attribute *attr, char *buf)
{
@@ -983,11 +983,11 @@ static DEVICE_ATTR_RO(ctl_iop0_count);
/**
* ctl_iop1_count_show - controller iop1 count check
* @cdev: pointer to embedded class device
+ * @attr: device attribute (unused)
* @buf: the buffer returned
*
* A sysfs 'read-only' shost attribute.
*/
-
static ssize_t ctl_iop1_count_show(struct device *cdev,
struct device_attribute *attr, char *buf)
{
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index 124cb69740c6..9ec310b795c3 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -42,6 +42,7 @@
#include "pm8001_hwi.h"
#include "pm8001_chips.h"
#include "pm8001_ctl.h"
+ #include "pm80xx_tracepoints.h"
/**
* read_main_config_table - read the configure table and save it.
@@ -1324,8 +1325,14 @@ int pm8001_mpi_build_cmd(struct pm8001_hba_info *pm8001_ha,
unsigned long flags;
int q_index = circularQ - pm8001_ha->inbnd_q_tbl;
int rv;
+ u32 htag = le32_to_cpu(*(__le32 *)payload);
+
+ trace_pm80xx_mpi_build_cmd(pm8001_ha->id, opCode, htag, q_index,
+ circularQ->producer_idx, le32_to_cpu(circularQ->consumer_index));
+
+ if (WARN_ON(q_index >= pm8001_ha->max_q_num))
+ return -EINVAL;
- WARN_ON(q_index >= PM8001_MAX_INB_NUM);
spin_lock_irqsave(&circularQ->iq_lock, flags);
rv = pm8001_mpi_msg_free_get(circularQ, pm8001_ha->iomb_size,
&pMessage);
@@ -2304,21 +2311,17 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
psataPayload = (struct sata_completion_resp *)(piomb + 4);
status = le32_to_cpu(psataPayload->status);
+ param = le32_to_cpu(psataPayload->param);
tag = le32_to_cpu(psataPayload->tag);
if (!tag) {
pm8001_dbg(pm8001_ha, FAIL, "tag null\n");
return;
}
+
ccb = &pm8001_ha->ccb_info[tag];
- param = le32_to_cpu(psataPayload->param);
- if (ccb) {
- t = ccb->task;
- pm8001_dev = ccb->device;
- } else {
- pm8001_dbg(pm8001_ha, FAIL, "ccb null\n");
- return;
- }
+ t = ccb->task;
+ pm8001_dev = ccb->device;
if (t) {
if (t->dev && (t->dev->lldd_dev))
@@ -2335,10 +2338,6 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
}
ts = &t->task_status;
- if (!ts) {
- pm8001_dbg(pm8001_ha, FAIL, "ts null\n");
- return;
- }
if (status)
pm8001_dbg(pm8001_ha, IOERR,
@@ -2693,16 +2692,7 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
u32 tag = le32_to_cpu(psataPayload->tag);
u32 port_id = le32_to_cpu(psataPayload->port_id);
u32 dev_id = le32_to_cpu(psataPayload->device_id);
- unsigned long flags;
- ccb = &pm8001_ha->ccb_info[tag];
-
- if (ccb) {
- t = ccb->task;
- pm8001_dev = ccb->device;
- } else {
- pm8001_dbg(pm8001_ha, FAIL, "No CCB !!!. returning\n");
- }
if (event)
pm8001_dbg(pm8001_ha, FAIL, "SATA EVENT 0x%x\n", event);
@@ -2733,8 +2723,6 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DATA_OVERRUN;
ts->residual = 0;
- if (pm8001_dev)
- atomic_dec(&pm8001_dev->running_req);
break;
case IO_XFER_ERROR_BREAK:
pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_BREAK\n");
@@ -2776,7 +2764,6 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_QUEUE_FULL;
- pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
return;
}
break;
@@ -2862,20 +2849,6 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
ts->stat = SAS_OPEN_TO;
break;
}
- spin_lock_irqsave(&t->task_state_lock, flags);
- t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
- t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
- t->task_state_flags |= SAS_TASK_STATE_DONE;
- if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
- spin_unlock_irqrestore(&t->task_state_lock, flags);
- pm8001_dbg(pm8001_ha, FAIL,
- "task 0x%p done with io_status 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n",
- t, event, ts->resp, ts->stat);
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
- } else {
- spin_unlock_irqrestore(&t->task_state_lock, flags);
- pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
- }
}
/*See the comments for mpi_ssp_completion */
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index fbfeb0b046dd..d8a2121cb8d9 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -179,7 +179,7 @@ static void pm8001_free(struct pm8001_hba_info *pm8001_ha)
}
PM8001_CHIP_DISP->chip_iounmap(pm8001_ha);
flush_workqueue(pm8001_wq);
- kfree(pm8001_ha->tags);
+ bitmap_free(pm8001_ha->tags);
kfree(pm8001_ha);
}
@@ -1192,7 +1192,7 @@ pm8001_init_ccb_tag(struct pm8001_hba_info *pm8001_ha, struct Scsi_Host *shost,
can_queue = ccb_count - PM8001_RESERVE_SLOT;
shost->can_queue = can_queue;
- pm8001_ha->tags = kzalloc(ccb_count, GFP_KERNEL);
+ pm8001_ha->tags = bitmap_zalloc(ccb_count, GFP_KERNEL);
if (!pm8001_ha->tags)
goto err_out;
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index 83e73009db5c..32edda3e55c6 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -40,6 +40,7 @@
#include <linux/slab.h>
#include "pm8001_sas.h"
+#include "pm80xx_tracepoints.h"
/**
* pm8001_find_tag - from sas task to find out tag that belongs to this task
@@ -527,6 +528,9 @@ int pm8001_queue_command(struct sas_task *task, gfp_t gfp_flags)
void pm8001_ccb_task_free(struct pm8001_hba_info *pm8001_ha,
struct sas_task *task, struct pm8001_ccb_info *ccb, u32 ccb_idx)
{
+ struct ata_queued_cmd *qc;
+ struct pm8001_device *pm8001_dev;
+
if (!ccb->task)
return;
if (!sas_protocol_ata(task->task_proto))
@@ -549,6 +553,18 @@ void pm8001_ccb_task_free(struct pm8001_hba_info *pm8001_ha,
/* do nothing */
break;
}
+
+ if (sas_protocol_ata(task->task_proto)) {
+ // For SCSI/ATA commands uldd_task points to ata_queued_cmd
+ qc = task->uldd_task;
+ pm8001_dev = ccb->device;
+ trace_pm80xx_request_complete(pm8001_ha->id,
+ pm8001_dev ? pm8001_dev->attached_phy : PM8001_MAX_PHYS,
+ ccb_idx, 0 /* ctlr_opcode not known */,
+ qc ? qc->tf.command : 0, // ata opcode
+ pm8001_dev ? atomic_read(&pm8001_dev->running_req) : -1);
+ }
+
task->lldd_task = NULL;
ccb->task = NULL;
ccb->ccb_tag = 0xFFFFFFFF;
@@ -753,8 +769,13 @@ static int pm8001_exec_internal_tmf_task(struct domain_device *dev,
res = -TMF_RESP_FUNC_FAILED;
/* Even TMF timed out, return direct. */
if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
+ struct pm8001_ccb_info *ccb = task->lldd_task;
+
pm8001_dbg(pm8001_ha, FAIL, "TMF task[%x]timeout.\n",
tmf->tmf);
+
+ if (ccb)
+ ccb->task = NULL;
goto ex_err;
}
@@ -1183,7 +1204,7 @@ int pm8001_abort_task(struct sas_task *task)
struct pm8001_device *pm8001_dev;
struct pm8001_tmf_task tmf_task;
int rc = TMF_RESP_FUNC_FAILED, ret;
- u32 phy_id;
+ u32 phy_id, port_id;
struct sas_task_slow slow_task;
if (unlikely(!task || !task->lldd_task || !task->dev))
@@ -1230,6 +1251,7 @@ int pm8001_abort_task(struct sas_task *task)
DECLARE_COMPLETION_ONSTACK(completion_reset);
DECLARE_COMPLETION_ONSTACK(completion);
struct pm8001_phy *phy = pm8001_ha->phy + phy_id;
+ port_id = phy->port->port_id;
/* 1. Set Device state as Recovery */
pm8001_dev->setds_completion = &completion;
@@ -1281,6 +1303,10 @@ int pm8001_abort_task(struct sas_task *task)
PORT_RESET_TMO);
if (phy->port_reset_status == PORT_RESET_TMO) {
pm8001_dev_gone_notify(dev);
+ PM8001_CHIP_DISP->hw_event_ack_req(
+ pm8001_ha, 0,
+ 0x07, /*HW_EVENT_PHY_DOWN ack*/
+ port_id, phy_id, 0, 0);
goto out;
}
}
diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
index 83eec16d021d..a17da1cebce1 100644
--- a/drivers/scsi/pm8001/pm8001_sas.h
+++ b/drivers/scsi/pm8001/pm8001_sas.h
@@ -216,6 +216,9 @@ struct pm8001_dispatch {
u32 state);
int (*sas_re_init_req)(struct pm8001_hba_info *pm8001_ha);
int (*fatal_errors)(struct pm8001_hba_info *pm8001_ha);
+ void (*hw_event_ack_req)(struct pm8001_hba_info *pm8001_ha,
+ u32 Qnum, u32 SEA, u32 port_id, u32 phyId, u32 param0,
+ u32 param1);
};
struct pm8001_chip_info {
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
index 2101fc5761c3..9d20f8009b89 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.c
+++ b/drivers/scsi/pm8001/pm80xx_hwi.c
@@ -42,6 +42,7 @@
#include "pm80xx_hwi.h"
#include "pm8001_chips.h"
#include "pm8001_ctl.h"
+#include "pm80xx_tracepoints.h"
#define SMP_DIRECT 1
#define SMP_INDIRECT 2
@@ -2184,9 +2185,9 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_dbg(pm8001_ha, FAIL,
"task 0x%p done with io_status 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n",
t, status, ts->resp, ts->stat);
+ pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
if (t->slow_task)
complete(&t->slow_task->completion);
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
} else {
spin_unlock_irqrestore(&t->task_state_lock, flags);
pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
@@ -2400,21 +2401,17 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha,
psataPayload = (struct sata_completion_resp *)(piomb + 4);
status = le32_to_cpu(psataPayload->status);
+ param = le32_to_cpu(psataPayload->param);
tag = le32_to_cpu(psataPayload->tag);
if (!tag) {
pm8001_dbg(pm8001_ha, FAIL, "tag null\n");
return;
}
+
ccb = &pm8001_ha->ccb_info[tag];
- param = le32_to_cpu(psataPayload->param);
- if (ccb) {
- t = ccb->task;
- pm8001_dev = ccb->device;
- } else {
- pm8001_dbg(pm8001_ha, FAIL, "ccb null\n");
- return;
- }
+ t = ccb->task;
+ pm8001_dev = ccb->device;
if (t) {
if (t->dev && (t->dev->lldd_dev))
@@ -2431,10 +2428,6 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha,
}
ts = &t->task_status;
- if (!ts) {
- pm8001_dbg(pm8001_ha, FAIL, "ts null\n");
- return;
- }
if (status != IO_SUCCESS) {
pm8001_dbg(pm8001_ha, FAIL,
@@ -2801,9 +2794,9 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha,
pm8001_dbg(pm8001_ha, FAIL,
"task 0x%p done with io_status 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n",
t, status, ts->resp, ts->stat);
+ pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
if (t->slow_task)
complete(&t->slow_task->completion);
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
} else {
spin_unlock_irqrestore(&t->task_state_lock, flags);
spin_unlock_irqrestore(&circularQ->oq_lock,
@@ -2828,17 +2821,7 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha,
u32 tag = le32_to_cpu(psataPayload->tag);
u32 port_id = le32_to_cpu(psataPayload->port_id);
u32 dev_id = le32_to_cpu(psataPayload->device_id);
- unsigned long flags;
- ccb = &pm8001_ha->ccb_info[tag];
-
- if (ccb) {
- t = ccb->task;
- pm8001_dev = ccb->device;
- } else {
- pm8001_dbg(pm8001_ha, FAIL, "No CCB !!!. returning\n");
- return;
- }
if (event)
pm8001_dbg(pm8001_ha, FAIL, "SATA EVENT 0x%x\n", event);
@@ -2852,6 +2835,10 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha,
return;
}
+ ccb = &pm8001_ha->ccb_info[tag];
+ t = ccb->task;
+ pm8001_dev = ccb->device;
+
if (unlikely(!t || !t->lldd_task || !t->dev)) {
pm8001_dbg(pm8001_ha, FAIL, "task or dev null\n");
return;
@@ -2866,8 +2853,6 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha,
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DATA_OVERRUN;
ts->residual = 0;
- if (pm8001_dev)
- atomic_dec(&pm8001_dev->running_req);
break;
case IO_XFER_ERROR_BREAK:
pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_BREAK\n");
@@ -2916,11 +2901,6 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha,
IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS);
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_QUEUE_FULL;
- spin_unlock_irqrestore(&circularQ->oq_lock,
- circularQ->lock_flags);
- pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
- spin_lock_irqsave(&circularQ->oq_lock,
- circularQ->lock_flags);
return;
}
break;
@@ -3020,24 +3000,6 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha,
ts->stat = SAS_OPEN_TO;
break;
}
- spin_lock_irqsave(&t->task_state_lock, flags);
- t->task_state_flags &= ~SAS_TASK_STATE_PENDING;
- t->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
- t->task_state_flags |= SAS_TASK_STATE_DONE;
- if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) {
- spin_unlock_irqrestore(&t->task_state_lock, flags);
- pm8001_dbg(pm8001_ha, FAIL,
- "task 0x%p done with io_status 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n",
- t, event, ts->resp, ts->stat);
- pm8001_ccb_task_free(pm8001_ha, t, ccb, tag);
- } else {
- spin_unlock_irqrestore(&t->task_state_lock, flags);
- spin_unlock_irqrestore(&circularQ->oq_lock,
- circularQ->lock_flags);
- pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag);
- spin_lock_irqsave(&circularQ->oq_lock,
- circularQ->lock_flags);
- }
}
/*See the comments for mpi_ssp_completion */
@@ -3522,7 +3484,7 @@ static int mpi_phy_start_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
u32 status =
le32_to_cpu(pPayload->status);
u32 phy_id =
- le32_to_cpu(pPayload->phyid);
+ le32_to_cpu(pPayload->phyid) & 0xFF;
struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
pm8001_dbg(pm8001_ha, INIT,
@@ -3724,8 +3686,10 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
break;
case HW_EVENT_PORT_RESET_TIMER_TMO:
pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PORT_RESET_TIMER_TMO\n");
- pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN,
- port_id, phy_id, 0, 0);
+ if (!pm8001_ha->phy[phy_id].reset_completion) {
+ pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN,
+ port_id, phy_id, 0, 0);
+ }
sas_phy_disconnected(sas_phy);
phy->phy_attached = 0;
sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR,
@@ -3941,6 +3905,7 @@ static int ssp_coalesced_comp_resp(struct pm8001_hba_info *pm8001_ha,
/**
* process_one_iomb - process one outbound Queue memory block
* @pm8001_ha: our hba card information
+ * @circularQ: outbound circular queue
* @piomb: IO message buffer
*/
static void process_one_iomb(struct pm8001_hba_info *pm8001_ha,
@@ -4161,10 +4126,22 @@ static int process_oq(struct pm8001_hba_info *pm8001_ha, u8 vec)
u32 ret = MPI_IO_STATUS_FAIL;
u32 regval;
+ /*
+ * Fatal errors are programmed to be signalled in irq vector
+ * pm8001_ha->max_q_num - 1 through pm8001_ha->main_cfg_tbl.pm80xx_tbl.
+ * fatal_err_interrupt
+ */
if (vec == (pm8001_ha->max_q_num - 1)) {
+ u32 mipsall_ready;
+
+ if (pm8001_ha->chip_id == chip_8008 ||
+ pm8001_ha->chip_id == chip_8009)
+ mipsall_ready = SCRATCH_PAD_MIPSALL_READY_8PORT;
+ else
+ mipsall_ready = SCRATCH_PAD_MIPSALL_READY_16PORT;
+
regval = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
- if ((regval & SCRATCH_PAD_MIPSALL_READY) !=
- SCRATCH_PAD_MIPSALL_READY) {
+ if ((regval & mipsall_ready) != mipsall_ready) {
pm8001_ha->controller_fatal_error = true;
pm8001_dbg(pm8001_ha, FAIL,
"Firmware Fatal error! Regval:0x%x\n",
@@ -4547,6 +4524,7 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
struct sas_task *task = ccb->task;
struct domain_device *dev = task->dev;
struct pm8001_device *pm8001_ha_dev = dev->lldd_dev;
+ struct ata_queued_cmd *qc = task->uldd_task;
u32 tag = ccb->ccb_tag;
int ret;
u32 q_index, cpu_id;
@@ -4766,6 +4744,11 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
}
}
}
+ trace_pm80xx_request_issue(pm8001_ha->id,
+ ccb->device ? ccb->device->attached_phy : PM8001_MAX_PHYS,
+ ccb->ccb_tag, opc,
+ qc ? qc->tf.command : 0, // ata opcode
+ ccb->device ? atomic_read(&ccb->device->running_req) : 0);
ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc,
&sata_cmd, sizeof(sata_cmd), q_index);
return ret;
@@ -5061,4 +5044,5 @@ const struct pm8001_dispatch pm8001_80xx_dispatch = {
.fw_flash_update_req = pm8001_chip_fw_flash_update_req,
.set_dev_state_req = pm8001_chip_set_dev_state_req,
.fatal_errors = pm80xx_fatal_errors,
+ .hw_event_ack_req = pm80xx_hw_event_ack_req,
};
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.h b/drivers/scsi/pm8001/pm80xx_hwi.h
index c7e5d93bea92..c41ed039c92a 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.h
+++ b/drivers/scsi/pm8001/pm80xx_hwi.h
@@ -1405,8 +1405,12 @@ typedef struct SASProtocolTimerConfig SASProtocolTimerConfig_t;
#define SCRATCH_PAD_BOOT_LOAD_SUCCESS 0x0
#define SCRATCH_PAD_IOP0_READY 0xC00
#define SCRATCH_PAD_IOP1_READY 0x3000
-#define SCRATCH_PAD_MIPSALL_READY (SCRATCH_PAD_IOP1_READY | \
+#define SCRATCH_PAD_MIPSALL_READY_16PORT (SCRATCH_PAD_IOP1_READY | \
SCRATCH_PAD_IOP0_READY | \
+ SCRATCH_PAD_ILA_READY | \
+ SCRATCH_PAD_RAAE_READY)
+#define SCRATCH_PAD_MIPSALL_READY_8PORT (SCRATCH_PAD_IOP0_READY | \
+ SCRATCH_PAD_ILA_READY | \
SCRATCH_PAD_RAAE_READY)
/* boot loader state */
diff --git a/drivers/scsi/pm8001/pm80xx_tracepoints.c b/drivers/scsi/pm8001/pm80xx_tracepoints.c
new file mode 100644
index 000000000000..344aface9cdb
--- /dev/null
+++ b/drivers/scsi/pm8001/pm80xx_tracepoints.c
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Trace events in pm8001 driver.
+ *
+ * Copyright 2020 Google LLC
+ * Author: Akshat Jain <akshatzen@google.com>
+ */
+
+#define CREATE_TRACE_POINTS
+#include "pm80xx_tracepoints.h"
diff --git a/drivers/scsi/pm8001/pm80xx_tracepoints.h b/drivers/scsi/pm8001/pm80xx_tracepoints.h
new file mode 100644
index 000000000000..5e669a8a9344
--- /dev/null
+++ b/drivers/scsi/pm8001/pm80xx_tracepoints.h
@@ -0,0 +1,113 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Trace events in pm8001 driver.
+ *
+ * Copyright 2020 Google LLC
+ * Author: Akshat Jain <akshatzen@google.com>
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM pm80xx
+
+#if !defined(_TRACE_PM80XX_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_PM80XX_H
+
+#include <linux/tracepoint.h>
+#include "pm8001_sas.h"
+
+TRACE_EVENT(pm80xx_request_issue,
+ TP_PROTO(u32 id, u32 phy_id, u32 htag, u32 ctlr_opcode,
+ u16 ata_opcode, int running_req),
+
+ TP_ARGS(id, phy_id, htag, ctlr_opcode, ata_opcode, running_req),
+
+ TP_STRUCT__entry(
+ __field(u32, id)
+ __field(u32, phy_id)
+ __field(u32, htag)
+ __field(u32, ctlr_opcode)
+ __field(u16, ata_opcode)
+ __field(int, running_req)
+ ),
+
+ TP_fast_assign(
+ __entry->id = id;
+ __entry->phy_id = phy_id;
+ __entry->htag = htag;
+ __entry->ctlr_opcode = ctlr_opcode;
+ __entry->ata_opcode = ata_opcode;
+ __entry->running_req = running_req;
+ ),
+
+ TP_printk("ctlr_id = %u phy_id = %u htag = %#x, ctlr_opcode = %#x ata_opcode = %#x running_req = %d",
+ __entry->id, __entry->phy_id, __entry->htag,
+ __entry->ctlr_opcode, __entry->ata_opcode,
+ __entry->running_req)
+);
+
+TRACE_EVENT(pm80xx_request_complete,
+ TP_PROTO(u32 id, u32 phy_id, u32 htag, u32 ctlr_opcode,
+ u16 ata_opcode, int running_req),
+
+ TP_ARGS(id, phy_id, htag, ctlr_opcode, ata_opcode, running_req),
+
+ TP_STRUCT__entry(
+ __field(u32, id)
+ __field(u32, phy_id)
+ __field(u32, htag)
+ __field(u32, ctlr_opcode)
+ __field(u16, ata_opcode)
+ __field(int, running_req)
+ ),
+
+ TP_fast_assign(
+ __entry->id = id;
+ __entry->phy_id = phy_id;
+ __entry->htag = htag;
+ __entry->ctlr_opcode = ctlr_opcode;
+ __entry->ata_opcode = ata_opcode;
+ __entry->running_req = running_req;
+ ),
+
+ TP_printk("ctlr_id = %u phy_id = %u htag = %#x, ctlr_opcode = %#x ata_opcode = %#x running_req = %d",
+ __entry->id, __entry->phy_id, __entry->htag,
+ __entry->ctlr_opcode, __entry->ata_opcode,
+ __entry->running_req)
+);
+
+TRACE_EVENT(pm80xx_mpi_build_cmd,
+ TP_PROTO(u32 id, u32 opc, u32 htag, u32 qi, u32 pi, u32 ci),
+
+ TP_ARGS(id, opc, htag, qi, pi, ci),
+
+ TP_STRUCT__entry(
+ __field(u32, id)
+ __field(u32, opc)
+ __field(u32, htag)
+ __field(u32, qi)
+ __field(u32, pi)
+ __field(u32, ci)
+ ),
+
+ TP_fast_assign(
+ __entry->id = id;
+ __entry->opc = opc;
+ __entry->htag = htag;
+ __entry->qi = qi;
+ __entry->pi = pi;
+ __entry->ci = ci;
+ ),
+
+ TP_printk("ctlr_id = %u opc = %#x htag = %#x QI = %u PI = %u CI = %u",
+ __entry->id, __entry->opc, __entry->htag, __entry->qi,
+ __entry->pi, __entry->ci)
+);
+
+#endif /* _TRACE_PM80XX_H_ */
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE pm80xx_tracepoints
+
+#include <trace/define_trace.h>
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index 88046a793767..928532180d32 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -3221,8 +3221,8 @@ static struct pmcraid_sglist *pmcraid_alloc_sglist(int buflen)
return NULL;
sglist->order = order;
- sgl_alloc_order(buflen, order, false,
- GFP_KERNEL | GFP_DMA | __GFP_ZERO, &sglist->num_sg);
+ sgl_alloc_order(buflen, order, false, GFP_KERNEL | __GFP_ZERO,
+ &sglist->num_sg);
return sglist;
}
@@ -3302,7 +3302,6 @@ static int pmcraid_copy_sglist(
/**
* pmcraid_queuecommand_lck - Queue a mid-layer request
* @scsi_cmd: scsi command struct
- * @done: done function
*
* This function queues a request generated by the mid-layer. Midlayer calls
* this routine within host->lock. Some of the functions called by queuecommand
diff --git a/drivers/scsi/qedf/qedf_io.c b/drivers/scsi/qedf/qedf_io.c
index 99a56ca1fb16..fab43dabe5b3 100644
--- a/drivers/scsi/qedf/qedf_io.c
+++ b/drivers/scsi/qedf/qedf_io.c
@@ -2250,6 +2250,7 @@ process_els:
io_req->tm_flags == FCP_TMF_TGT_RESET) {
clear_bit(QEDF_CMD_OUTSTANDING, &io_req->flags);
io_req->sc_cmd = NULL;
+ kref_put(&io_req->refcount, qedf_release_cmd);
complete(&io_req->tm_done);
}
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index 1bf7a22d4948..6ad28bc8e948 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -911,7 +911,7 @@ void qedf_ctx_soft_reset(struct fc_lport *lport)
struct qed_link_output if_link;
if (lport->vport) {
- QEDF_ERR(NULL, "Cannot issue host reset on NPIV port.\n");
+ printk_ratelimited("Cannot issue host reset on NPIV port.\n");
return;
}
@@ -1415,6 +1415,8 @@ static void qedf_upload_connection(struct qedf_ctx *qedf,
*/
term_params = dma_alloc_coherent(&qedf->pdev->dev, QEDF_TERM_BUFF_SIZE,
&term_params_dma, GFP_KERNEL);
+ if (!term_params)
+ return;
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_CONN, "Uploading connection "
"port_id=%06x.\n", fcport->rdata->ids.port_id);
@@ -1862,6 +1864,7 @@ static int qedf_vport_create(struct fc_vport *vport, bool disabled)
vport_qedf->cmd_mgr = base_qedf->cmd_mgr;
init_completion(&vport_qedf->flogi_compl);
INIT_LIST_HEAD(&vport_qedf->fcports);
+ INIT_DELAYED_WORK(&vport_qedf->stag_work, qedf_stag_change_work);
rc = qedf_vport_libfc_config(vport, vn_port);
if (rc) {
@@ -3978,7 +3981,9 @@ void qedf_stag_change_work(struct work_struct *work)
struct qedf_ctx *qedf =
container_of(work, struct qedf_ctx, stag_work.work);
- QEDF_ERR(&qedf->dbg_ctx, "Performing software context reset.\n");
+ printk_ratelimited("[%s]:[%s:%d]:%d: Performing software context reset.",
+ dev_name(&qedf->pdev->dev), __func__, __LINE__,
+ qedf->dbg_ctx.host_no);
qedf_ctx_soft_reset(qedf->lport);
}
diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c
index 5916ed7662d5..4eb89aa4a39d 100644
--- a/drivers/scsi/qedi/qedi_fw.c
+++ b/drivers/scsi/qedi/qedi_fw.c
@@ -771,11 +771,10 @@ static void qedi_process_cmd_cleanup_resp(struct qedi_ctx *qedi,
qedi_cmd->list_tmf_work = NULL;
}
}
+ spin_unlock_bh(&qedi_conn->tmf_work_lock);
- if (!found) {
- spin_unlock_bh(&qedi_conn->tmf_work_lock);
+ if (!found)
goto check_cleanup_reqs;
- }
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
"TMF work, cqe->tid=0x%x, tmf flags=0x%x, cid=0x%x\n",
@@ -806,7 +805,6 @@ static void qedi_process_cmd_cleanup_resp(struct qedi_ctx *qedi,
qedi_cmd->state = CLEANUP_RECV;
unlock:
spin_unlock_bh(&conn->session->back_lock);
- spin_unlock_bh(&qedi_conn->tmf_work_lock);
wake_up_interruptible(&qedi_conn->wait_queue);
return;
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index 1dec814d8788..832a856dd367 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -1538,7 +1538,6 @@ static int qedi_alloc_bdq(struct qedi_ctx *qedi)
int i;
struct scsi_bd *pbl;
u64 *list;
- dma_addr_t page;
/* Alloc dma memory for BDQ buffers */
for (i = 0; i < QEDI_BDQ_NUM; i++) {
@@ -1608,11 +1607,9 @@ static int qedi_alloc_bdq(struct qedi_ctx *qedi)
qedi->bdq_pbl_list_num_entries = qedi->bdq_pbl_mem_size /
QEDI_PAGE_SIZE;
list = (u64 *)qedi->bdq_pbl_list;
- page = qedi->bdq_pbl_list_dma;
for (i = 0; i < qedi->bdq_pbl_list_num_entries; i++) {
*list = qedi->bdq_pbl_dma;
list++;
- page += QEDI_PAGE_SIZE;
}
return 0;
@@ -2089,8 +2086,7 @@ static ssize_t qedi_show_boot_eth_info(void *data, int type, char *buf)
rc = snprintf(buf, ip_len, fmt, gw);
break;
case ISCSI_BOOT_ETH_FLAGS:
- rc = snprintf(buf, 3, "%hhd\n",
- SYSFS_FLAG_FW_SEL_BOOT);
+ rc = snprintf(buf, 3, "%d\n", (char)SYSFS_FLAG_FW_SEL_BOOT);
break;
case ISCSI_BOOT_ETH_INDEX:
rc = snprintf(buf, 3, "0\n");
@@ -2257,7 +2253,7 @@ qedi_show_boot_tgt_info(struct qedi_ctx *qedi, int type,
mchap_secret);
break;
case ISCSI_BOOT_TGT_FLAGS:
- rc = snprintf(buf, 3, "%hhd\n", SYSFS_FLAG_FW_SEL_BOOT);
+ rc = snprintf(buf, 3, "%d\n", (char)SYSFS_FLAG_FW_SEL_BOOT);
break;
case ISCSI_BOOT_TGT_NIC_ASSOC:
rc = snprintf(buf, 3, "0\n");
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 032efb294ee5..db55737000ab 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -2700,7 +2700,13 @@ qla2x00_get_starget_port_id(struct scsi_target *starget)
static inline void
qla2x00_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)
{
+ fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
+
rport->dev_loss_tmo = timeout ? timeout : 1;
+
+ if (IS_ENABLED(CONFIG_NVME_FC) && fcport && fcport->nvme_remote_port)
+ nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port,
+ rport->dev_loss_tmo);
}
static void
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 070b636802d0..1fe4966fc2f6 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -5828,13 +5828,6 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
qla2x00_dfs_create_rport(vha, fcport);
- if (NVME_TARGET(vha->hw, fcport)) {
- qla_nvme_register_remote(vha, fcport);
- qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_COMPLETE);
- qla2x00_set_fcport_state(fcport, FCS_ONLINE);
- return;
- }
-
qla24xx_update_fcport_fcp_prio(vha, fcport);
switch (vha->host->active_mode) {
@@ -5856,6 +5849,9 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
break;
}
+ if (NVME_TARGET(vha->hw, fcport))
+ qla_nvme_register_remote(vha, fcport);
+
qla2x00_set_fcport_state(fcport, FCS_ONLINE);
if (IS_IIDMA_CAPABLE(vha->hw) && vha->hw->flags.gpsc_supported) {
diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
index 138ffdb5c92c..e22ec7cb65db 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.c
+++ b/drivers/scsi/qla2xxx/qla_nvme.c
@@ -43,7 +43,7 @@ int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport)
req.port_name = wwn_to_u64(fcport->port_name);
req.node_name = wwn_to_u64(fcport->node_name);
req.port_role = 0;
- req.dev_loss_tmo = 0;
+ req.dev_loss_tmo = fcport->dev_loss_tmo;
if (fcport->nvme_prli_service_param & NVME_PRLI_SP_INITIATOR)
req.port_role = FC_PORT_ROLE_NVME_INITIATOR;
@@ -70,6 +70,9 @@ int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport)
return ret;
}
+ nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port,
+ fcport->dev_loss_tmo);
+
if (fcport->nvme_prli_service_param & NVME_PRLI_SP_SLER)
ql_log(ql_log_info, vha, 0x212a,
"PortID:%06x Supports SLER\n", req.port_id);
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 8987acc24dac..0ae936d839f1 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -5734,7 +5734,7 @@ static ssize_t qla4xxx_show_boot_eth_info(void *data, int type, char *buf)
switch (type) {
case ISCSI_BOOT_ETH_FLAGS:
- rc = sprintf(str, "%d\n", SYSFS_FLAG_FW_SEL_BOOT);
+ rc = sprintf(str, "%d\n", (char)SYSFS_FLAG_FW_SEL_BOOT);
break;
case ISCSI_BOOT_ETH_INDEX:
rc = sprintf(str, "0\n");
@@ -5843,7 +5843,7 @@ qla4xxx_show_boot_tgt_info(struct ql4_boot_session_info *boot_sess, int type,
(char *)&boot_conn->chap.intr_secret);
break;
case ISCSI_BOOT_TGT_FLAGS:
- rc = sprintf(str, "%d\n", SYSFS_FLAG_FW_SEL_BOOT);
+ rc = sprintf(str, "%d\n", (char)SYSFS_FLAG_FW_SEL_BOOT);
break;
case ISCSI_BOOT_TGT_NIC_ASSOC:
rc = sprintf(str, "0\n");
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index f6af1562cba4..211aace69c22 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -55,7 +55,6 @@
#include <linux/notifier.h>
#include <linux/cpu.h>
#include <linux/mutex.h>
-#include <linux/async.h>
#include <asm/unaligned.h>
#include <scsi/scsi.h>
@@ -201,11 +200,11 @@ void scsi_finish_command(struct scsi_cmnd *cmd)
/*
- * 1024 is big enough for saturating the fast scsi LUN now
+ * 1024 is big enough for saturating fast SCSI LUNs.
*/
int scsi_device_max_queue_depth(struct scsi_device *sdev)
{
- return max_t(int, sdev->host->can_queue, 1024);
+ return min_t(int, sdev->host->can_queue, 1024);
}
/**
diff --git a/drivers/scsi/scsi_bsg.c b/drivers/scsi/scsi_bsg.c
index 081b84bb7985..b7a464383cc0 100644
--- a/drivers/scsi/scsi_bsg.c
+++ b/drivers/scsi/scsi_bsg.c
@@ -60,7 +60,7 @@ static int scsi_bsg_sg_io_fn(struct request_queue *q, struct sg_io_v4 *hdr,
goto out_free_cmd;
bio = rq->bio;
- blk_execute_rq(NULL, rq, !(hdr->flags & BSG_FLAG_Q_AT_TAIL));
+ blk_execute_rq(rq, !(hdr->flags & BSG_FLAG_Q_AT_TAIL));
/*
* fill in all the output members
diff --git a/drivers/scsi/scsi_debugfs.c b/drivers/scsi/scsi_debugfs.c
index d9109771f274..db8517f1a485 100644
--- a/drivers/scsi/scsi_debugfs.c
+++ b/drivers/scsi/scsi_debugfs.c
@@ -9,6 +9,7 @@
static const char *const scsi_cmd_flags[] = {
SCSI_CMD_FLAG_NAME(TAGGED),
SCSI_CMD_FLAG_NAME(INITIALIZED),
+ SCSI_CMD_FLAG_NAME(LAST),
};
#undef SCSI_CMD_FLAG_NAME
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 2371edbc3af4..60a6ae9d1219 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -133,23 +133,6 @@ static bool scsi_eh_should_retry_cmd(struct scsi_cmnd *cmd)
return true;
}
-static void scsi_eh_complete_abort(struct scsi_cmnd *scmd, struct Scsi_Host *shost)
-{
- unsigned long flags;
-
- spin_lock_irqsave(shost->host_lock, flags);
- list_del_init(&scmd->eh_entry);
- /*
- * If the abort succeeds, and there is no further
- * EH action, clear the ->last_reset time.
- */
- if (list_empty(&shost->eh_abort_list) &&
- list_empty(&shost->eh_cmd_q))
- if (shost->eh_deadline != -1)
- shost->last_reset = 0;
- spin_unlock_irqrestore(shost->host_lock, flags);
-}
-
/**
* scmd_eh_abort_handler - Handle command aborts
* @work: command to be aborted.
@@ -166,54 +149,72 @@ scmd_eh_abort_handler(struct work_struct *work)
struct scsi_cmnd *scmd =
container_of(work, struct scsi_cmnd, abort_work.work);
struct scsi_device *sdev = scmd->device;
+ struct Scsi_Host *shost = sdev->host;
enum scsi_disposition rtn;
unsigned long flags;
- if (scsi_host_eh_past_deadline(sdev->host)) {
+ if (scsi_host_eh_past_deadline(shost)) {
SCSI_LOG_ERROR_RECOVERY(3,
scmd_printk(KERN_INFO, scmd,
"eh timeout, not aborting\n"));
- } else {
- SCSI_LOG_ERROR_RECOVERY(3,
+ goto out;
+ }
+
+ SCSI_LOG_ERROR_RECOVERY(3,
scmd_printk(KERN_INFO, scmd,
"aborting command\n"));
- rtn = scsi_try_to_abort_cmd(sdev->host->hostt, scmd);
- if (rtn == SUCCESS) {
- set_host_byte(scmd, DID_TIME_OUT);
- if (scsi_host_eh_past_deadline(sdev->host)) {
- SCSI_LOG_ERROR_RECOVERY(3,
- scmd_printk(KERN_INFO, scmd,
- "eh timeout, not retrying "
- "aborted command\n"));
- } else if (!scsi_noretry_cmd(scmd) &&
- scsi_cmd_retry_allowed(scmd) &&
- scsi_eh_should_retry_cmd(scmd)) {
- SCSI_LOG_ERROR_RECOVERY(3,
- scmd_printk(KERN_WARNING, scmd,
- "retry aborted command\n"));
- scsi_eh_complete_abort(scmd, sdev->host);
- scsi_queue_insert(scmd, SCSI_MLQUEUE_EH_RETRY);
- return;
- } else {
- SCSI_LOG_ERROR_RECOVERY(3,
- scmd_printk(KERN_WARNING, scmd,
- "finish aborted command\n"));
- scsi_eh_complete_abort(scmd, sdev->host);
- scsi_finish_command(scmd);
- return;
- }
- } else {
- SCSI_LOG_ERROR_RECOVERY(3,
- scmd_printk(KERN_INFO, scmd,
- "cmd abort %s\n",
- (rtn == FAST_IO_FAIL) ?
- "not send" : "failed"));
- }
+ rtn = scsi_try_to_abort_cmd(shost->hostt, scmd);
+ if (rtn != SUCCESS) {
+ SCSI_LOG_ERROR_RECOVERY(3,
+ scmd_printk(KERN_INFO, scmd,
+ "cmd abort %s\n",
+ (rtn == FAST_IO_FAIL) ?
+ "not send" : "failed"));
+ goto out;
+ }
+ set_host_byte(scmd, DID_TIME_OUT);
+ if (scsi_host_eh_past_deadline(shost)) {
+ SCSI_LOG_ERROR_RECOVERY(3,
+ scmd_printk(KERN_INFO, scmd,
+ "eh timeout, not retrying "
+ "aborted command\n"));
+ goto out;
}
- spin_lock_irqsave(sdev->host->host_lock, flags);
+ spin_lock_irqsave(shost->host_lock, flags);
list_del_init(&scmd->eh_entry);
- spin_unlock_irqrestore(sdev->host->host_lock, flags);
+
+ /*
+ * If the abort succeeds, and there is no further
+ * EH action, clear the ->last_reset time.
+ */
+ if (list_empty(&shost->eh_abort_list) &&
+ list_empty(&shost->eh_cmd_q))
+ if (shost->eh_deadline != -1)
+ shost->last_reset = 0;
+
+ spin_unlock_irqrestore(shost->host_lock, flags);
+
+ if (!scsi_noretry_cmd(scmd) &&
+ scsi_cmd_retry_allowed(scmd) &&
+ scsi_eh_should_retry_cmd(scmd)) {
+ SCSI_LOG_ERROR_RECOVERY(3,
+ scmd_printk(KERN_WARNING, scmd,
+ "retry aborted command\n"));
+ scsi_queue_insert(scmd, SCSI_MLQUEUE_EH_RETRY);
+ } else {
+ SCSI_LOG_ERROR_RECOVERY(3,
+ scmd_printk(KERN_WARNING, scmd,
+ "finish aborted command\n"));
+ scsi_finish_command(scmd);
+ }
+ return;
+
+out:
+ spin_lock_irqsave(shost->host_lock, flags);
+ list_del_init(&scmd->eh_entry);
+ spin_unlock_irqrestore(shost->host_lock, flags);
+
scsi_eh_scmd_add(scmd);
}
@@ -1429,7 +1430,8 @@ static int scsi_eh_try_stu(struct scsi_cmnd *scmd)
enum scsi_disposition rtn = NEEDS_RETRY;
for (i = 0; rtn == NEEDS_RETRY && i < 2; i++)
- rtn = scsi_send_eh_cmnd(scmd, stu_command, 6, scmd->device->request_queue->rq_timeout, 0);
+ rtn = scsi_send_eh_cmnd(scmd, stu_command, 6,
+ scmd->device->eh_timeout, 0);
if (rtn == SUCCESS)
return 0;
@@ -2040,7 +2042,7 @@ static void scsi_eh_lock_door(struct scsi_device *sdev)
req->timeout = 10 * HZ;
rq->retries = 5;
- blk_execute_rq_nowait(NULL, req, 1, eh_lock_door_done);
+ blk_execute_rq_nowait(req, true, eh_lock_door_done);
}
/**
diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c
index 400df3354cd6..e13fd380deb6 100644
--- a/drivers/scsi/scsi_ioctl.c
+++ b/drivers/scsi/scsi_ioctl.c
@@ -408,8 +408,7 @@ static int scsi_complete_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr,
return ret;
}
-static int sg_io(struct scsi_device *sdev, struct gendisk *disk,
- struct sg_io_hdr *hdr, fmode_t mode)
+static int sg_io(struct scsi_device *sdev, struct sg_io_hdr *hdr, fmode_t mode)
{
unsigned long start_time;
ssize_t ret = 0;
@@ -483,7 +482,7 @@ static int sg_io(struct scsi_device *sdev, struct gendisk *disk,
start_time = jiffies;
- blk_execute_rq(disk, rq, at_head);
+ blk_execute_rq(rq, at_head);
hdr->duration = jiffies_to_msecs(jiffies - start_time);
@@ -499,19 +498,12 @@ out_put_request:
/**
* sg_scsi_ioctl -- handle deprecated SCSI_IOCTL_SEND_COMMAND ioctl
* @q: request queue to send scsi commands down
- * @disk: gendisk to operate on (option)
* @mode: mode used to open the file through which the ioctl has been
* submitted
* @sic: userspace structure describing the command to perform
*
* Send down the scsi command described by @sic to the device below
- * the request queue @q. If @file is non-NULL it's used to perform
- * fine-grained permission checks that allow users to send down
- * non-destructive SCSI commands. If the caller has a struct gendisk
- * available it should be passed in as @disk to allow the low level
- * driver to use the information contained in it. A non-NULL @disk
- * is only allowed if the caller knows that the low level driver doesn't
- * need it (e.g. in the scsi subsystem).
+ * the request queue @q.
*
* Notes:
* - This interface is deprecated - users should use the SG_IO
@@ -530,8 +522,8 @@ out_put_request:
* Positive numbers returned are the compacted SCSI error codes (4
* bytes in one int) where the lowest byte is the SCSI status.
*/
-static int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk,
- fmode_t mode, struct scsi_ioctl_command __user *sic)
+static int sg_scsi_ioctl(struct request_queue *q, fmode_t mode,
+ struct scsi_ioctl_command __user *sic)
{
enum { OMAX_SB_LEN = 16 }; /* For backward compatibility */
struct request *rq;
@@ -620,7 +612,7 @@ static int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk,
goto error;
}
- blk_execute_rq(disk, rq, 0);
+ blk_execute_rq(rq, false);
err = req->result & 0xff; /* only 8 bit SCSI status */
if (err) {
@@ -806,8 +798,8 @@ static int scsi_put_cdrom_generic_arg(const struct cdrom_generic_command *cgc,
return 0;
}
-static int scsi_cdrom_send_packet(struct scsi_device *sdev, struct gendisk *disk,
- fmode_t mode, void __user *arg)
+static int scsi_cdrom_send_packet(struct scsi_device *sdev, fmode_t mode,
+ void __user *arg)
{
struct cdrom_generic_command cgc;
struct sg_io_hdr hdr;
@@ -847,7 +839,7 @@ static int scsi_cdrom_send_packet(struct scsi_device *sdev, struct gendisk *disk
hdr.cmdp = ((struct cdrom_generic_command __user *) arg)->cmd;
hdr.cmd_len = sizeof(cgc.cmd);
- err = sg_io(sdev, disk, &hdr, mode);
+ err = sg_io(sdev, &hdr, mode);
if (err == -EFAULT)
return -EFAULT;
@@ -862,8 +854,8 @@ static int scsi_cdrom_send_packet(struct scsi_device *sdev, struct gendisk *disk
return err;
}
-static int scsi_ioctl_sg_io(struct scsi_device *sdev, struct gendisk *disk,
- fmode_t mode, void __user *argp)
+static int scsi_ioctl_sg_io(struct scsi_device *sdev, fmode_t mode,
+ void __user *argp)
{
struct sg_io_hdr hdr;
int error;
@@ -871,7 +863,7 @@ static int scsi_ioctl_sg_io(struct scsi_device *sdev, struct gendisk *disk,
error = get_sg_io_hdr(&hdr, argp);
if (error)
return error;
- error = sg_io(sdev, disk, &hdr, mode);
+ error = sg_io(sdev, &hdr, mode);
if (error == -EFAULT)
return error;
if (put_sg_io_hdr(&hdr, argp))
@@ -882,7 +874,6 @@ static int scsi_ioctl_sg_io(struct scsi_device *sdev, struct gendisk *disk,
/**
* scsi_ioctl - Dispatch ioctl to scsi device
* @sdev: scsi device receiving ioctl
- * @disk: disk receiving the ioctl
* @mode: mode the block/char device is opened with
* @cmd: which ioctl is it
* @arg: data associated with ioctl
@@ -891,8 +882,8 @@ static int scsi_ioctl_sg_io(struct scsi_device *sdev, struct gendisk *disk,
* does not take a major/minor number as the dev field. Rather, it takes
* a pointer to a &struct scsi_device.
*/
-int scsi_ioctl(struct scsi_device *sdev, struct gendisk *disk, fmode_t mode,
- int cmd, void __user *arg)
+int scsi_ioctl(struct scsi_device *sdev, fmode_t mode, int cmd,
+ void __user *arg)
{
struct request_queue *q = sdev->request_queue;
struct scsi_sense_hdr sense_hdr;
@@ -927,11 +918,11 @@ int scsi_ioctl(struct scsi_device *sdev, struct gendisk *disk, fmode_t mode,
case SG_EMULATED_HOST:
return sg_emulated_host(q, arg);
case SG_IO:
- return scsi_ioctl_sg_io(sdev, disk, mode, arg);
+ return scsi_ioctl_sg_io(sdev, mode, arg);
case SCSI_IOCTL_SEND_COMMAND:
- return sg_scsi_ioctl(q, disk, mode, arg);
+ return sg_scsi_ioctl(q, mode, arg);
case CDROM_SEND_PACKET:
- return scsi_cdrom_send_packet(sdev, disk, mode, arg);
+ return scsi_cdrom_send_packet(sdev, mode, arg);
case CDROMCLOSETRAY:
return scsi_send_start_stop(sdev, 3);
case CDROMEJECT:
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 621d841d819a..0a70aa763a96 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -241,7 +241,7 @@ int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
/*
* head injection *required* here otherwise quiesce won't work
*/
- blk_execute_rq(NULL, req, 1);
+ blk_execute_rq(req, true);
/*
* Some devices (USB mass-storage in particular) may transfer
@@ -543,8 +543,9 @@ static bool scsi_end_request(struct request *req, blk_status_t error,
if (blk_update_request(req, error, bytes))
return true;
+ // XXX:
if (blk_queue_add_random(q))
- add_disk_randomness(req->rq_disk);
+ add_disk_randomness(req->q->disk);
if (!blk_rq_is_passthrough(req)) {
WARN_ON_ONCE(!(cmd->flags & SCMD_INITIALIZED));
@@ -617,6 +618,46 @@ static blk_status_t scsi_result_to_blk_status(struct scsi_cmnd *cmd, int result)
}
}
+/**
+ * scsi_rq_err_bytes - determine number of bytes till the next failure boundary
+ * @rq: request to examine
+ *
+ * Description:
+ * A request could be merge of IOs which require different failure
+ * handling. This function determines the number of bytes which
+ * can be failed from the beginning of the request without
+ * crossing into area which need to be retried further.
+ *
+ * Return:
+ * The number of bytes to fail.
+ */
+static unsigned int scsi_rq_err_bytes(const struct request *rq)
+{
+ unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
+ unsigned int bytes = 0;
+ struct bio *bio;
+
+ if (!(rq->rq_flags & RQF_MIXED_MERGE))
+ return blk_rq_bytes(rq);
+
+ /*
+ * Currently the only 'mixing' which can happen is between
+ * different fastfail types. We can safely fail portions
+ * which have all the failfast bits that the first one has -
+ * the ones which are at least as eager to fail as the first
+ * one.
+ */
+ for (bio = rq->bio; bio; bio = bio->bi_next) {
+ if ((bio->bi_opf & ff) != ff)
+ break;
+ bytes += bio->bi_iter.bi_size;
+ }
+
+ /* this could lead to infinite loop */
+ BUG_ON(blk_rq_bytes(rq) && !bytes);
+ return bytes;
+}
+
/* Helper for scsi_io_completion() when "reprep" action required. */
static void scsi_io_completion_reprep(struct scsi_cmnd *cmd,
struct request_queue *q)
@@ -794,7 +835,7 @@ static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result)
scsi_print_command(cmd);
}
}
- if (!scsi_end_request(req, blk_stat, blk_rq_err_bytes(req)))
+ if (!scsi_end_request(req, blk_stat, scsi_rq_err_bytes(req)))
return;
fallthrough;
case ACTION_REPREP:
@@ -2026,7 +2067,6 @@ void scsi_exit_queue(void)
* @sdev: SCSI device to be queried
* @pf: Page format bit (1 == standard, 0 == vendor specific)
* @sp: Save page bit (0 == don't save, 1 == save)
- * @modepage: mode page being requested
* @buffer: request buffer (may not be smaller than eight bytes)
* @len: length of request buffer.
* @timeout: command timeout
@@ -2039,10 +2079,9 @@ void scsi_exit_queue(void)
* status on error
*
*/
-int
-scsi_mode_select(struct scsi_device *sdev, int pf, int sp, int modepage,
- unsigned char *buffer, int len, int timeout, int retries,
- struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr)
+int scsi_mode_select(struct scsi_device *sdev, int pf, int sp,
+ unsigned char *buffer, int len, int timeout, int retries,
+ struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr)
{
unsigned char cmd[10];
unsigned char *real_buffer;
diff --git a/drivers/scsi/scsi_logging.c b/drivers/scsi/scsi_logging.c
index ed9572252a42..1f8f80b2dbfc 100644
--- a/drivers/scsi/scsi_logging.c
+++ b/drivers/scsi/scsi_logging.c
@@ -30,7 +30,9 @@ static inline const char *scmd_name(const struct scsi_cmnd *scmd)
{
struct request *rq = scsi_cmd_to_rq((struct scsi_cmnd *)scmd);
- return rq->rq_disk ? rq->rq_disk->disk_name : NULL;
+ if (!rq->q->disk)
+ return NULL;
+ return rq->q->disk->disk_name;
}
static size_t sdev_format_header(char *logbuf, size_t logbuf_len,
diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c
index b5a858c29488..d581613d87c7 100644
--- a/drivers/scsi/scsi_pm.c
+++ b/drivers/scsi/scsi_pm.c
@@ -8,7 +8,6 @@
#include <linux/pm_runtime.h>
#include <linux/export.h>
-#include <linux/async.h>
#include <linux/blk-pm.h>
#include <scsi/scsi.h>
@@ -181,7 +180,7 @@ static int sdev_runtime_resume(struct device *dev)
blk_pre_runtime_resume(sdev->request_queue);
if (pm && pm->runtime_resume)
err = pm->runtime_resume(dev);
- blk_post_runtime_resume(sdev->request_queue, err);
+ blk_post_runtime_resume(sdev->request_queue);
return err;
}
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index a278fc8948f4..5c4786310a31 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -3,7 +3,6 @@
#define _SCSI_PRIV_H
#include <linux/device.h>
-#include <linux/async.h>
#include <scsi/scsi_device.h>
#include <linux/sbitmap.h>
@@ -144,7 +143,7 @@ extern struct scsi_transport_template blank_transport_template;
extern void __scsi_remove_device(struct scsi_device *);
extern struct bus_type scsi_bus_type;
-extern const struct attribute_group scsi_shost_attr_group;
+extern const struct attribute_group *scsi_shost_groups[];
/* scsi_netlink.c */
#ifdef CONFIG_SCSI_NETLINK
diff --git a/drivers/scsi/scsi_proc.c b/drivers/scsi/scsi_proc.c
index d6982d355739..95aee1ad1383 100644
--- a/drivers/scsi/scsi_proc.c
+++ b/drivers/scsi/scsi_proc.c
@@ -49,7 +49,7 @@ static DEFINE_MUTEX(global_host_template_mutex);
static ssize_t proc_scsi_host_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
- struct Scsi_Host *shost = PDE_DATA(file_inode(file));
+ struct Scsi_Host *shost = pde_data(file_inode(file));
ssize_t ret = -ENOMEM;
char *page;
@@ -79,7 +79,7 @@ static int proc_scsi_show(struct seq_file *m, void *v)
static int proc_scsi_host_open(struct inode *inode, struct file *file)
{
- return single_open_size(file, proc_scsi_show, PDE_DATA(inode),
+ return single_open_size(file, proc_scsi_show, pde_data(inode),
4 * PAGE_SIZE);
}
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 23e1c0acdeae..f4e6c68ac99e 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -97,7 +97,7 @@ MODULE_PARM_DESC(max_luns,
#define SCSI_SCAN_TYPE_DEFAULT "sync"
#endif
-char scsi_scan_type[7] = SCSI_SCAN_TYPE_DEFAULT;
+static char scsi_scan_type[7] = SCSI_SCAN_TYPE_DEFAULT;
module_param_string(scan, scsi_scan_type, sizeof(scsi_scan_type),
S_IRUGO|S_IWUSR);
@@ -122,7 +122,7 @@ struct async_scan_data {
struct completion prev_finished;
};
-/**
+/*
* scsi_enable_async_suspend - Enable async suspend and resume
*/
void scsi_enable_async_suspend(struct device *dev)
@@ -214,6 +214,48 @@ static void scsi_unlock_floptical(struct scsi_device *sdev,
SCSI_TIMEOUT, 3, NULL);
}
+static int scsi_realloc_sdev_budget_map(struct scsi_device *sdev,
+ unsigned int depth)
+{
+ int new_shift = sbitmap_calculate_shift(depth);
+ bool need_alloc = !sdev->budget_map.map;
+ bool need_free = false;
+ int ret;
+ struct sbitmap sb_backup;
+
+ /*
+ * realloc if new shift is calculated, which is caused by setting
+ * up one new default queue depth after calling ->slave_configure
+ */
+ if (!need_alloc && new_shift != sdev->budget_map.shift)
+ need_alloc = need_free = true;
+
+ if (!need_alloc)
+ return 0;
+
+ /*
+ * Request queue has to be frozen for reallocating budget map,
+ * and here disk isn't added yet, so freezing is pretty fast
+ */
+ if (need_free) {
+ blk_mq_freeze_queue(sdev->request_queue);
+ sb_backup = sdev->budget_map;
+ }
+ ret = sbitmap_init_node(&sdev->budget_map,
+ scsi_device_max_queue_depth(sdev),
+ new_shift, GFP_KERNEL,
+ sdev->request_queue->node, false, true);
+ if (need_free) {
+ if (ret)
+ sdev->budget_map = sb_backup;
+ else
+ sbitmap_free(&sb_backup);
+ ret = 0;
+ blk_mq_unfreeze_queue(sdev->request_queue);
+ }
+ return ret;
+}
+
/**
* scsi_alloc_sdev - allocate and setup a scsi_Device
* @starget: which target to allocate a &scsi_device for
@@ -306,11 +348,7 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
* default device queue depth to figure out sbitmap shift
* since we use this queue depth most of times.
*/
- if (sbitmap_init_node(&sdev->budget_map,
- scsi_device_max_queue_depth(sdev),
- sbitmap_calculate_shift(depth),
- GFP_KERNEL, sdev->request_queue->node,
- false, true)) {
+ if (scsi_realloc_sdev_budget_map(sdev, depth)) {
put_device(&starget->dev);
kfree(sdev);
goto out;
@@ -1017,6 +1055,13 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
}
return SCSI_SCAN_NO_RESPONSE;
}
+
+ /*
+ * The queue_depth is often changed in ->slave_configure.
+ * Set up budget map again since memory consumption of
+ * the map depends on actual queue depth.
+ */
+ scsi_realloc_sdev_budget_map(sdev, sdev->queue_depth);
}
if (sdev->scsi_level >= SCSI_3)
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index d4edce930a4a..f1e0c131b77c 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -424,10 +424,15 @@ static struct attribute *scsi_sysfs_shost_attrs[] = {
NULL
};
-const struct attribute_group scsi_shost_attr_group = {
+static const struct attribute_group scsi_shost_attr_group = {
.attrs = scsi_sysfs_shost_attrs,
};
+const struct attribute_group *scsi_shost_groups[] = {
+ &scsi_shost_attr_group,
+ NULL
+};
+
static void scsi_device_cls_release(struct device *class_dev)
{
struct scsi_device *sdev;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 65875a598d62..62eb9921cc94 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -51,7 +51,6 @@
#include <linux/major.h>
#include <linux/mutex.h>
#include <linux/string_helpers.h>
-#include <linux/async.h>
#include <linux/slab.h>
#include <linux/sed-opal.h>
#include <linux/pm_runtime.h>
@@ -210,7 +209,7 @@ cache_type_store(struct device *dev, struct device_attribute *attr,
*/
data.device_specific = 0;
- if (scsi_mode_select(sdp, 1, sp, 8, buffer_data, len, SD_TIMEOUT,
+ if (scsi_mode_select(sdp, 1, sp, buffer_data, len, SD_TIMEOUT,
sdkp->max_retries, &data, &sshdr)) {
if (scsi_sense_valid(&sshdr))
sd_print_sense_hdr(sdkp, &sshdr);
@@ -872,7 +871,7 @@ static blk_status_t sd_setup_unmap_cmnd(struct scsi_cmnd *cmd)
{
struct scsi_device *sdp = cmd->device;
struct request *rq = scsi_cmd_to_rq(cmd);
- struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
+ struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
unsigned int data_len = 24;
@@ -908,7 +907,7 @@ static blk_status_t sd_setup_write_same16_cmnd(struct scsi_cmnd *cmd,
{
struct scsi_device *sdp = cmd->device;
struct request *rq = scsi_cmd_to_rq(cmd);
- struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
+ struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
u32 data_len = sdp->sector_size;
@@ -940,7 +939,7 @@ static blk_status_t sd_setup_write_same10_cmnd(struct scsi_cmnd *cmd,
{
struct scsi_device *sdp = cmd->device;
struct request *rq = scsi_cmd_to_rq(cmd);
- struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
+ struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
u32 data_len = sdp->sector_size;
@@ -971,7 +970,7 @@ static blk_status_t sd_setup_write_zeroes_cmnd(struct scsi_cmnd *cmd)
{
struct request *rq = scsi_cmd_to_rq(cmd);
struct scsi_device *sdp = cmd->device;
- struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
+ struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
@@ -1068,7 +1067,7 @@ static blk_status_t sd_setup_write_same_cmnd(struct scsi_cmnd *cmd)
{
struct request *rq = scsi_cmd_to_rq(cmd);
struct scsi_device *sdp = cmd->device;
- struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
+ struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
struct bio *bio = rq->bio;
u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
@@ -1116,7 +1115,7 @@ static blk_status_t sd_setup_write_same_cmnd(struct scsi_cmnd *cmd)
static blk_status_t sd_setup_flush_cmnd(struct scsi_cmnd *cmd)
{
struct request *rq = scsi_cmd_to_rq(cmd);
- struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
+ struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
/* flush requests don't perform I/O, zero the S/G table */
memset(&cmd->sdb, 0, sizeof(cmd->sdb));
@@ -1215,7 +1214,7 @@ static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd)
{
struct request *rq = scsi_cmd_to_rq(cmd);
struct scsi_device *sdp = cmd->device;
- struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
+ struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
sector_t lba = sectors_to_logical(sdp, blk_rq_pos(rq));
sector_t threshold;
unsigned int nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
@@ -1236,7 +1235,7 @@ static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd)
goto fail;
}
- if (blk_rq_pos(rq) + blk_rq_sectors(rq) > get_capacity(rq->rq_disk)) {
+ if (blk_rq_pos(rq) + blk_rq_sectors(rq) > get_capacity(rq->q->disk)) {
scmd_printk(KERN_ERR, cmd, "access beyond end of device\n");
goto fail;
}
@@ -1331,7 +1330,7 @@ static blk_status_t sd_init_command(struct scsi_cmnd *cmd)
switch (req_op(rq)) {
case REQ_OP_DISCARD:
- switch (scsi_disk(rq->rq_disk)->provisioning_mode) {
+ switch (scsi_disk(rq->q->disk)->provisioning_mode) {
case SD_LBP_UNMAP:
return sd_setup_unmap_cmnd(cmd);
case SD_LBP_WS16:
@@ -1574,7 +1573,7 @@ static int sd_ioctl(struct block_device *bdev, fmode_t mode,
if (is_sed_ioctl(cmd))
return sed_ioctl(sdkp->opal_dev, cmd, p);
- return scsi_ioctl(sdp, disk, mode, cmd, p);
+ return scsi_ioctl(sdp, mode, cmd, p);
}
static void set_media_not_present(struct scsi_disk *sdkp)
@@ -1917,7 +1916,7 @@ static const struct block_device_operations sd_fops = {
**/
static void sd_eh_reset(struct scsi_cmnd *scmd)
{
- struct scsi_disk *sdkp = scsi_disk(scsi_cmd_to_rq(scmd)->rq_disk);
+ struct scsi_disk *sdkp = scsi_disk(scsi_cmd_to_rq(scmd)->q->disk);
/* New SCSI EH run, reset gate variable */
sdkp->ignore_medium_access_errors = false;
@@ -1937,7 +1936,7 @@ static void sd_eh_reset(struct scsi_cmnd *scmd)
**/
static int sd_eh_action(struct scsi_cmnd *scmd, int eh_disp)
{
- struct scsi_disk *sdkp = scsi_disk(scsi_cmd_to_rq(scmd)->rq_disk);
+ struct scsi_disk *sdkp = scsi_disk(scsi_cmd_to_rq(scmd)->q->disk);
struct scsi_device *sdev = scmd->device;
if (!scsi_device_online(sdev) ||
@@ -2034,7 +2033,7 @@ static int sd_done(struct scsi_cmnd *SCpnt)
unsigned int resid;
struct scsi_sense_hdr sshdr;
struct request *req = scsi_cmd_to_rq(SCpnt);
- struct scsi_disk *sdkp = scsi_disk(req->rq_disk);
+ struct scsi_disk *sdkp = scsi_disk(req->q->disk);
int sense_valid = 0;
int sense_deferred = 0;
@@ -3566,7 +3565,6 @@ static int sd_probe(struct device *dev)
sd_revalidate_disk(gd);
- gd->flags = GENHD_FL_EXT_DEVT;
if (sdp->removable) {
gd->flags |= GENHD_FL_REMOVABLE;
gd->events |= DISK_EVENT_MEDIA_CHANGE;
diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
index ed06798983f8..378d071e47cb 100644
--- a/drivers/scsi/sd_zbc.c
+++ b/drivers/scsi/sd_zbc.c
@@ -61,10 +61,10 @@ static int sd_zbc_parse_report(struct scsi_disk *sdkp, u8 *buf,
zone.len = logical_to_sectors(sdp, get_unaligned_be64(&buf[8]));
zone.capacity = zone.len;
zone.start = logical_to_sectors(sdp, get_unaligned_be64(&buf[16]));
- zone.wp = logical_to_sectors(sdp, get_unaligned_be64(&buf[24]));
- if (zone.type != ZBC_ZONE_TYPE_CONV &&
- zone.cond == ZBC_ZONE_COND_FULL)
+ if (zone.cond == ZBC_ZONE_COND_FULL)
zone.wp = zone.start + zone.len;
+ else
+ zone.wp = logical_to_sectors(sdp, get_unaligned_be64(&buf[24]));
ret = cb(&zone, idx, data);
if (ret)
@@ -244,7 +244,7 @@ out:
static blk_status_t sd_zbc_cmnd_checks(struct scsi_cmnd *cmd)
{
struct request *rq = scsi_cmd_to_rq(cmd);
- struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
+ struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
sector_t sector = blk_rq_pos(rq);
if (!sd_is_zoned(sdkp))
@@ -322,7 +322,7 @@ blk_status_t sd_zbc_prepare_zone_append(struct scsi_cmnd *cmd, sector_t *lba,
unsigned int nr_blocks)
{
struct request *rq = scsi_cmd_to_rq(cmd);
- struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
+ struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
unsigned int wp_offset, zno = blk_rq_zone_no(rq);
unsigned long flags;
blk_status_t ret;
@@ -388,7 +388,7 @@ blk_status_t sd_zbc_setup_zone_mgmt_cmnd(struct scsi_cmnd *cmd,
{
struct request *rq = scsi_cmd_to_rq(cmd);
sector_t sector = blk_rq_pos(rq);
- struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
+ struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
sector_t block = sectors_to_logical(sdkp->device, sector);
blk_status_t ret;
@@ -443,7 +443,7 @@ static unsigned int sd_zbc_zone_wp_update(struct scsi_cmnd *cmd,
{
int result = cmd->result;
struct request *rq = scsi_cmd_to_rq(cmd);
- struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
+ struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
unsigned int zno = blk_rq_zone_no(rq);
enum req_opf op = req_op(rq);
unsigned long flags;
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 141099ab9092..6b43e97bd417 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -77,7 +77,7 @@ static int sg_proc_init(void);
#define SG_DEFAULT_TIMEOUT mult_frac(SG_DEFAULT_TIMEOUT_USER, HZ, USER_HZ)
-int sg_big_buff = SG_DEF_RESERVED_SIZE;
+static int sg_big_buff = SG_DEF_RESERVED_SIZE;
/* N.B. This variable is readable and writeable via
/proc/scsi/sg/def_reserved_size . Each time sg_open() is called a buffer
of this size (or less if there is not enough memory) will be reserved
@@ -833,7 +833,7 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
srp->rq->timeout = timeout;
kref_get(&sfp->f_ref); /* sg_rq_end_io() does kref_put(). */
- blk_execute_rq_nowait(NULL, srp->rq, at_head, sg_rq_end_io);
+ blk_execute_rq_nowait(srp->rq, at_head, sg_rq_end_io);
return 0;
}
@@ -1109,7 +1109,7 @@ sg_ioctl_common(struct file *filp, Sg_device *sdp, Sg_fd *sfp,
case SCSI_IOCTL_SEND_COMMAND:
if (atomic_read(&sdp->detaching))
return -ENODEV;
- return scsi_ioctl(sdp->device, NULL, filp->f_mode, cmd_in, p);
+ return scsi_ioctl(sdp->device, filp->f_mode, cmd_in, p);
case SG_SET_DEBUG:
result = get_user(val, ip);
if (result)
@@ -1165,7 +1165,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
ret = sg_ioctl_common(filp, sdp, sfp, cmd_in, p);
if (ret != -ENOIOCTLCMD)
return ret;
- return scsi_ioctl(sdp->device, NULL, filp->f_mode, cmd_in, p);
+ return scsi_ioctl(sdp->device, filp->f_mode, cmd_in, p);
}
static __poll_t
@@ -1634,6 +1634,37 @@ MODULE_PARM_DESC(scatter_elem_sz, "scatter gather element "
MODULE_PARM_DESC(def_reserved_size, "size of buffer reserved for each fd");
MODULE_PARM_DESC(allow_dio, "allow direct I/O (default: 0 (disallow))");
+#ifdef CONFIG_SYSCTL
+#include <linux/sysctl.h>
+
+static struct ctl_table sg_sysctls[] = {
+ {
+ .procname = "sg-big-buff",
+ .data = &sg_big_buff,
+ .maxlen = sizeof(int),
+ .mode = 0444,
+ .proc_handler = proc_dointvec,
+ },
+ {}
+};
+
+static struct ctl_table_header *hdr;
+static void register_sg_sysctls(void)
+{
+ if (!hdr)
+ hdr = register_sysctl("kernel", sg_sysctls);
+}
+
+static void unregister_sg_sysctls(void)
+{
+ if (hdr)
+ unregister_sysctl_table(hdr);
+}
+#else
+#define register_sg_sysctls() do { } while (0)
+#define unregister_sg_sysctls() do { } while (0)
+#endif /* CONFIG_SYSCTL */
+
static int __init
init_sg(void)
{
@@ -1666,6 +1697,7 @@ init_sg(void)
return 0;
}
class_destroy(sg_sysfs_class);
+ register_sg_sysctls();
err_out:
unregister_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0), SG_MAX_DEVS);
return rc;
@@ -1674,6 +1706,7 @@ err_out:
static void __exit
exit_sg(void)
{
+ unregister_sg_sysctls();
#ifdef CONFIG_SCSI_PROC_FS
remove_proc_subtree("scsi/sg", NULL);
#endif /* CONFIG_SCSI_PROC_FS */
diff --git a/drivers/scsi/snic/snic_disc.c b/drivers/scsi/snic/snic_disc.c
index e9ccfb97773f..27e98df83b31 100644
--- a/drivers/scsi/snic/snic_disc.c
+++ b/drivers/scsi/snic/snic_disc.c
@@ -100,7 +100,7 @@ snic_queue_report_tgt_req(struct snic *snic)
SNIC_BUG_ON(ntgts == 0);
buf_len = ntgts * sizeof(struct snic_tgt_id) + SNIC_SG_DESC_ALIGN;
- buf = kzalloc(buf_len, GFP_KERNEL|GFP_DMA);
+ buf = kzalloc(buf_len, GFP_KERNEL);
if (!buf) {
snic_req_free(snic, rqi);
SNIC_HOST_ERR(snic->shost, "Resp Buf Alloc Failed.\n");
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 8e4af111c078..f925b1f1f9ad 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -335,7 +335,7 @@ static int sr_done(struct scsi_cmnd *SCpnt)
int block_sectors = 0;
long error_sector;
struct request *rq = scsi_cmd_to_rq(SCpnt);
- struct scsi_cd *cd = scsi_cd(rq->rq_disk);
+ struct scsi_cd *cd = scsi_cd(rq->q->disk);
#ifdef DEBUG
scmd_printk(KERN_INFO, SCpnt, "done: %x\n", result);
@@ -402,7 +402,7 @@ static blk_status_t sr_init_command(struct scsi_cmnd *SCpnt)
ret = scsi_alloc_sgtables(SCpnt);
if (ret != BLK_STS_OK)
return ret;
- cd = scsi_cd(rq->rq_disk);
+ cd = scsi_cd(rq->q->disk);
SCSI_LOG_HLQUEUE(1, scmd_printk(KERN_INFO, SCpnt,
"Doing sr request, block = %d\n", block));
@@ -561,8 +561,7 @@ static void sr_block_release(struct gendisk *disk, fmode_t mode)
static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
unsigned long arg)
{
- struct gendisk *disk = bdev->bd_disk;
- struct scsi_cd *cd = scsi_cd(disk);
+ struct scsi_cd *cd = scsi_cd(bdev->bd_disk);
struct scsi_device *sdev = cd->device;
void __user *argp = (void __user *)arg;
int ret;
@@ -584,7 +583,7 @@ static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
if (ret != -ENOSYS)
goto put;
}
- ret = scsi_ioctl(sdev, disk, mode, cmd, argp);
+ ret = scsi_ioctl(sdev, mode, cmd, argp);
put:
scsi_autopm_put_device(sdev);
@@ -684,9 +683,10 @@ static int sr_probe(struct device *dev)
disk->minors = 1;
sprintf(disk->disk_name, "sr%d", minor);
disk->fops = &sr_bdops;
- disk->flags = GENHD_FL_CD | GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE;
+ disk->flags |= GENHD_FL_REMOVABLE | GENHD_FL_NO_PART;
disk->events = DISK_EVENT_MEDIA_CHANGE | DISK_EVENT_EJECT_REQUEST;
- disk->event_flags = DISK_EVENT_FLAG_POLL | DISK_EVENT_FLAG_UEVENT;
+ disk->event_flags = DISK_EVENT_FLAG_POLL | DISK_EVENT_FLAG_UEVENT |
+ DISK_EVENT_FLAG_BLOCK_ON_EXCL_WRITE;
blk_queue_rq_timeout(sdev->request_queue, SR_TIMEOUT);
@@ -725,7 +725,6 @@ static int sr_probe(struct device *dev)
blk_pm_runtime_init(sdev->request_queue, dev);
dev_set_drvdata(dev, cd);
- disk->flags |= GENHD_FL_REMOVABLE;
sr_revalidate_disk(cd);
error = device_add_disk(&sdev->sdev_gendev, disk, NULL);
@@ -856,7 +855,7 @@ static void get_capabilities(struct scsi_cd *cd)
/* allocate transfer buffer */
- buffer = kmalloc(512, GFP_KERNEL | GFP_DMA);
+ buffer = kmalloc(512, GFP_KERNEL);
if (!buffer) {
sr_printk(KERN_ERR, cd, "out of memory.\n");
return;
@@ -994,7 +993,7 @@ static int sr_read_cdda_bpc(struct cdrom_device_info *cdi, void __user *ubuf,
rq->timeout = 60 * HZ;
bio = rq->bio;
- blk_execute_rq(disk, rq, 0);
+ blk_execute_rq(rq, false);
if (scsi_req(rq)->result) {
struct scsi_sense_hdr sshdr;
diff --git a/drivers/scsi/sr_vendor.c b/drivers/scsi/sr_vendor.c
index 1f988a1b9166..a61635326ae0 100644
--- a/drivers/scsi/sr_vendor.c
+++ b/drivers/scsi/sr_vendor.c
@@ -131,7 +131,7 @@ int sr_set_blocklength(Scsi_CD *cd, int blocklength)
if (cd->vendor == VENDOR_TOSHIBA)
density = (blocklength > 2048) ? 0x81 : 0x83;
- buffer = kmalloc(512, GFP_KERNEL | GFP_DMA);
+ buffer = kmalloc(512, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
@@ -179,7 +179,7 @@ int sr_cd_check(struct cdrom_device_info *cdi)
if (cd->cdi.mask & CDC_MULTI_SESSION)
return 0;
- buffer = kmalloc(512, GFP_KERNEL | GFP_DMA);
+ buffer = kmalloc(512, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index c2d5608f6b1a..e869e90e05af 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -581,7 +581,7 @@ static int st_scsi_execute(struct st_request *SRpnt, const unsigned char *cmd,
rq->retries = retries;
req->end_io_data = SRpnt;
- blk_execute_rq_nowait(NULL, req, 1, st_scsi_execute_end);
+ blk_execute_rq_nowait(req, true, st_scsi_execute_end);
return 0;
}
@@ -3829,7 +3829,7 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg)
break;
}
- retval = scsi_ioctl(STp->device, NULL, file->f_mode, cmd_in, p);
+ retval = scsi_ioctl(STp->device, file->f_mode, cmd_in, p);
if (!retval && cmd_in == SCSI_IOCTL_STOP_UNIT) {
/* unload */
STp->rew_at_close = 0;
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 20595c0ba0ae..9a0bba5a51a7 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -21,6 +21,8 @@
#include <linux/device.h>
#include <linux/hyperv.h>
#include <linux/blkdev.h>
+#include <linux/dma-mapping.h>
+
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_host.h>
@@ -1336,6 +1338,7 @@ static void storvsc_on_channel_callback(void *context)
continue;
}
request = (struct storvsc_cmd_request *)scsi_cmd_priv(scmnd);
+ scsi_dma_unmap(scmnd);
}
storvsc_on_receive(stor_device, packet, request);
@@ -1749,9 +1752,7 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
struct hv_host_device *host_dev = shost_priv(host);
struct hv_device *dev = host_dev->dev;
struct storvsc_cmd_request *cmd_request = scsi_cmd_priv(scmnd);
- int i;
struct scatterlist *sgl;
- unsigned int sg_count;
struct vmscsi_request *vm_srb;
struct vmbus_packet_mpb_array *payload;
u32 payload_sz;
@@ -1824,17 +1825,17 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
memcpy(vm_srb->cdb, scmnd->cmnd, vm_srb->cdb_length);
sgl = (struct scatterlist *)scsi_sglist(scmnd);
- sg_count = scsi_sg_count(scmnd);
length = scsi_bufflen(scmnd);
payload = (struct vmbus_packet_mpb_array *)&cmd_request->mpb;
payload_sz = sizeof(cmd_request->mpb);
- if (sg_count) {
- unsigned int hvpgoff, hvpfns_to_add;
+ if (scsi_sg_count(scmnd)) {
unsigned long offset_in_hvpg = offset_in_hvpage(sgl->offset);
unsigned int hvpg_count = HVPFN_UP(offset_in_hvpg + length);
- u64 hvpfn;
+ struct scatterlist *sg;
+ unsigned long hvpfn, hvpfns_to_add;
+ int j, i = 0, sg_count;
if (hvpg_count > MAX_PAGE_BUFFER_COUNT) {
@@ -1848,21 +1849,24 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
payload->range.len = length;
payload->range.offset = offset_in_hvpg;
+ sg_count = scsi_dma_map(scmnd);
+ if (sg_count < 0) {
+ ret = SCSI_MLQUEUE_DEVICE_BUSY;
+ goto err_free_payload;
+ }
- for (i = 0; sgl != NULL; sgl = sg_next(sgl)) {
+ for_each_sg(sgl, sg, sg_count, j) {
/*
- * Init values for the current sgl entry. hvpgoff
- * and hvpfns_to_add are in units of Hyper-V size
- * pages. Handling the PAGE_SIZE != HV_HYP_PAGE_SIZE
- * case also handles values of sgl->offset that are
- * larger than PAGE_SIZE. Such offsets are handled
- * even on other than the first sgl entry, provided
- * they are a multiple of PAGE_SIZE.
+ * Init values for the current sgl entry. hvpfns_to_add
+ * is in units of Hyper-V size pages. Handling the
+ * PAGE_SIZE != HV_HYP_PAGE_SIZE case also handles
+ * values of sgl->offset that are larger than PAGE_SIZE.
+ * Such offsets are handled even on other than the first
+ * sgl entry, provided they are a multiple of PAGE_SIZE.
*/
- hvpgoff = HVPFN_DOWN(sgl->offset);
- hvpfn = page_to_hvpfn(sg_page(sgl)) + hvpgoff;
- hvpfns_to_add = HVPFN_UP(sgl->offset + sgl->length) -
- hvpgoff;
+ hvpfn = HVPFN_DOWN(sg_dma_address(sg));
+ hvpfns_to_add = HVPFN_UP(sg_dma_address(sg) +
+ sg_dma_len(sg)) - hvpfn;
/*
* Fill the next portion of the PFN array with
@@ -1872,7 +1876,7 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
* the PFN array is filled.
*/
while (hvpfns_to_add--)
- payload->range.pfn_array[i++] = hvpfn++;
+ payload->range.pfn_array[i++] = hvpfn++;
}
}
@@ -1884,13 +1888,18 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
put_cpu();
if (ret == -EAGAIN) {
- if (payload_sz > sizeof(cmd_request->mpb))
- kfree(payload);
/* no more space */
- return SCSI_MLQUEUE_DEVICE_BUSY;
+ ret = SCSI_MLQUEUE_DEVICE_BUSY;
+ goto err_free_payload;
}
return 0;
+
+err_free_payload:
+ if (payload_sz > sizeof(cmd_request->mpb))
+ kfree(payload);
+
+ return ret;
}
static struct scsi_host_template scsi_driver = {
@@ -2016,6 +2025,7 @@ static int storvsc_probe(struct hv_device *device,
stor_device->vmscsi_size_delta = sizeof(struct vmscsi_win8_extension);
spin_lock_init(&stor_device->lock);
hv_set_drvdata(device, stor_device);
+ dma_set_min_align_mask(&device->device, HV_HYP_PAGE_SIZE - 1);
stor_device->port_number = host->host_no;
ret = storvsc_connect_to_vsp(device, storvsc_ringbuffer_size, is_fc);
diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig
index b2521b830be7..9fe27b01904e 100644
--- a/drivers/scsi/ufs/Kconfig
+++ b/drivers/scsi/ufs/Kconfig
@@ -50,9 +50,11 @@ config SCSI_UFSHCD
However, do not compile this as a module if your root file system
(the one containing the directory /) is located on a UFS device.
+if SCSI_UFSHCD
+
config SCSI_UFSHCD_PCI
tristate "PCI bus based UFS Controller support"
- depends on SCSI_UFSHCD && PCI
+ depends on PCI
help
This selects the PCI UFS Host Controller Interface. Select this if
you have UFS Host Controller with PCI Interface.
@@ -71,7 +73,6 @@ config SCSI_UFS_DWC_TC_PCI
config SCSI_UFSHCD_PLATFORM
tristate "Platform bus based UFS Controller support"
- depends on SCSI_UFSHCD
depends on HAS_IOMEM
help
This selects the UFS host controller support. Select this if
@@ -147,7 +148,6 @@ config SCSI_UFS_TI_J721E
config SCSI_UFS_BSG
bool "Universal Flash Storage BSG device node"
- depends on SCSI_UFSHCD
select BLK_DEV_BSGLIB
help
Universal Flash Storage (UFS) is SCSI transport specification for
@@ -177,7 +177,7 @@ config SCSI_UFS_EXYNOS
config SCSI_UFS_CRYPTO
bool "UFS Crypto Engine Support"
- depends on SCSI_UFSHCD && BLK_INLINE_ENCRYPTION
+ depends on BLK_INLINE_ENCRYPTION
help
Enable Crypto Engine Support in UFS.
Enabling this makes it possible for the kernel to use the crypto
@@ -186,7 +186,6 @@ config SCSI_UFS_CRYPTO
config SCSI_UFS_HPB
bool "Support UFS Host Performance Booster"
- depends on SCSI_UFSHCD
help
The UFS HPB feature improves random read performance. It caches
L2P (logical to physical) map of UFS to host DRAM. The driver uses HPB
@@ -195,16 +194,18 @@ config SCSI_UFS_HPB
config SCSI_UFS_FAULT_INJECTION
bool "UFS Fault Injection Support"
- depends on SCSI_UFSHCD && FAULT_INJECTION
+ depends on FAULT_INJECTION
help
Enable fault injection support in the UFS driver. This makes it easier
to test the UFS error handler and abort handler.
config SCSI_UFS_HWMON
- bool "UFS Temperature Notification"
+ bool "UFS Temperature Notification"
depends on SCSI_UFSHCD=HWMON || HWMON=y
help
This provides support for UFS hardware monitoring. If enabled,
a hardware monitoring device will be created for the UFS device.
If unsure, say N.
+
+endif
diff --git a/drivers/scsi/ufs/tc-dwc-g210-pci.c b/drivers/scsi/ufs/tc-dwc-g210-pci.c
index 679289e1a78e..7b08e2e07cc5 100644
--- a/drivers/scsi/ufs/tc-dwc-g210-pci.c
+++ b/drivers/scsi/ufs/tc-dwc-g210-pci.c
@@ -110,7 +110,6 @@ tc_dwc_g210_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return err;
}
- pci_set_drvdata(pdev, hba);
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_allow(&pdev->dev);
diff --git a/drivers/scsi/ufs/ufs-exynos.c b/drivers/scsi/ufs/ufs-exynos.c
index cd26bc82462e..474a4a064a68 100644
--- a/drivers/scsi/ufs/ufs-exynos.c
+++ b/drivers/scsi/ufs/ufs-exynos.c
@@ -853,14 +853,14 @@ static int exynos_ufs_post_pwr_mode(struct ufs_hba *hba,
}
static void exynos_ufs_specify_nexus_t_xfer_req(struct ufs_hba *hba,
- int tag, bool op)
+ int tag, bool is_scsi_cmd)
{
struct exynos_ufs *ufs = ufshcd_get_variant(hba);
u32 type;
type = hci_readl(ufs, HCI_UTRL_NEXUS_TYPE);
- if (op)
+ if (is_scsi_cmd)
hci_writel(ufs, type | (1 << tag), HCI_UTRL_NEXUS_TYPE);
else
hci_writel(ufs, type & ~(1 << tag), HCI_UTRL_NEXUS_TYPE);
diff --git a/drivers/scsi/ufs/ufs-hisi.c b/drivers/scsi/ufs/ufs-hisi.c
index 8c7e8d321746..ab1a7ebd89b1 100644
--- a/drivers/scsi/ufs/ufs-hisi.c
+++ b/drivers/scsi/ufs/ufs-hisi.c
@@ -396,6 +396,12 @@ out:
return ret;
}
+static int ufs_hisi_suspend_prepare(struct device *dev)
+{
+ /* RPM and SPM are different. Refer ufs_hisi_suspend() */
+ return __ufshcd_suspend_prepare(dev, false);
+}
+
static int ufs_hisi_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
enum ufs_notify_change_status status)
{
@@ -578,7 +584,7 @@ static int ufs_hisi_remove(struct platform_device *pdev)
static const struct dev_pm_ops ufs_hisi_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume)
SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL)
- .prepare = ufshcd_suspend_prepare,
+ .prepare = ufs_hisi_suspend_prepare,
.complete = ufshcd_resume_complete,
};
diff --git a/drivers/scsi/ufs/ufs-mediatek.c b/drivers/scsi/ufs/ufs-mediatek.c
index 5393b5c9dd9c..86a938075f30 100644
--- a/drivers/scsi/ufs/ufs-mediatek.c
+++ b/drivers/scsi/ufs/ufs-mediatek.c
@@ -557,7 +557,7 @@ static void ufs_mtk_init_va09_pwr_ctrl(struct ufs_hba *hba)
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
host->reg_va09 = regulator_get(hba->dev, "va09");
- if (!host->reg_va09)
+ if (IS_ERR(host->reg_va09))
dev_info(hba->dev, "failed to get va09");
else
host->caps |= UFS_MTK_CAP_VA09_PWR_CTRL;
diff --git a/drivers/scsi/ufs/ufshcd-pci.c b/drivers/scsi/ufs/ufshcd-pci.c
index f725248ba57f..f76692053ca1 100644
--- a/drivers/scsi/ufs/ufshcd-pci.c
+++ b/drivers/scsi/ufs/ufshcd-pci.c
@@ -538,8 +538,6 @@ ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return err;
}
- pci_set_drvdata(pdev, hba);
-
hba->vops = (struct ufs_hba_variant_ops *)id->driver_data;
err = ufshcd_init(hba, mmio_base, pdev->irq);
diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c
index eaeae83b999f..87975d1a21c8 100644
--- a/drivers/scsi/ufs/ufshcd-pltfrm.c
+++ b/drivers/scsi/ufs/ufshcd-pltfrm.c
@@ -92,6 +92,11 @@ static int ufshcd_parse_clock_info(struct ufs_hba *hba)
clki->min_freq = clkfreq[i];
clki->max_freq = clkfreq[i+1];
clki->name = devm_kstrdup(dev, name, GFP_KERNEL);
+ if (!clki->name) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
if (!strcmp(name, "ref_clk"))
clki->keep_link_active = true;
dev_dbg(dev, "%s: min %u max %u name %s\n", "freq-table-hz",
@@ -127,6 +132,8 @@ static int ufshcd_populate_vreg(struct device *dev, const char *name,
return -ENOMEM;
vreg->name = devm_kstrdup(dev, name, GFP_KERNEL);
+ if (!vreg->name)
+ return -ENOMEM;
snprintf(prop_name, MAX_PROP_SIZE, "%s-max-microamp", name);
if (of_property_read_u32(np, prop_name, &vreg->max_uA)) {
@@ -361,8 +368,6 @@ int ufshcd_pltfrm_init(struct platform_device *pdev,
goto dealloc_host;
}
- platform_set_drvdata(pdev, hba);
-
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 13c09dbd99b9..9349557b8a01 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -128,8 +128,9 @@ EXPORT_SYMBOL_GPL(ufshcd_dump_regs);
enum {
UFSHCD_MAX_CHANNEL = 0,
UFSHCD_MAX_ID = 1,
- UFSHCD_CMD_PER_LUN = 32,
- UFSHCD_CAN_QUEUE = 32,
+ UFSHCD_NUM_RESERVED = 1,
+ UFSHCD_CMD_PER_LUN = 32 - UFSHCD_NUM_RESERVED,
+ UFSHCD_CAN_QUEUE = 32 - UFSHCD_NUM_RESERVED,
};
static const char *const ufshcd_state_name[] = {
@@ -1069,13 +1070,32 @@ static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba,
return false;
}
+/*
+ * Determine the number of pending commands by counting the bits in the SCSI
+ * device budget maps. This approach has been selected because a bit is set in
+ * the budget map before scsi_host_queue_ready() checks the host_self_blocked
+ * flag. The host_self_blocked flag can be modified by calling
+ * scsi_block_requests() or scsi_unblock_requests().
+ */
+static u32 ufshcd_pending_cmds(struct ufs_hba *hba)
+{
+ struct scsi_device *sdev;
+ u32 pending = 0;
+
+ lockdep_assert_held(hba->host->host_lock);
+ __shost_for_each_device(sdev, hba->host)
+ pending += sbitmap_weight(&sdev->budget_map);
+
+ return pending;
+}
+
static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba,
u64 wait_timeout_us)
{
unsigned long flags;
int ret = 0;
u32 tm_doorbell;
- u32 tr_doorbell;
+ u32 tr_pending;
bool timeout = false, do_last_check = false;
ktime_t start;
@@ -1093,8 +1113,8 @@ static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba,
}
tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
- tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
- if (!tm_doorbell && !tr_doorbell) {
+ tr_pending = ufshcd_pending_cmds(hba);
+ if (!tm_doorbell && !tr_pending) {
timeout = false;
break;
} else if (do_last_check) {
@@ -1114,12 +1134,12 @@ static int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba,
do_last_check = true;
}
spin_lock_irqsave(hba->host->host_lock, flags);
- } while (tm_doorbell || tr_doorbell);
+ } while (tm_doorbell || tr_pending);
if (timeout) {
dev_err(hba->dev,
"%s: timedout waiting for doorbell to clear (tm=0x%x, tr=0x%x)\n",
- __func__, tm_doorbell, tr_doorbell);
+ __func__, tm_doorbell, tr_pending);
ret = -EBUSY;
}
out:
@@ -1352,25 +1372,6 @@ out:
return ret;
}
-static bool ufshcd_is_busy(struct request *req, void *priv, bool reserved)
-{
- int *busy = priv;
-
- WARN_ON_ONCE(reserved);
- (*busy)++;
- return false;
-}
-
-/* Whether or not any tag is in use by a request that is in progress. */
-static bool ufshcd_any_tag_in_use(struct ufs_hba *hba)
-{
- struct request_queue *q = hba->cmd_queue;
- int busy = 0;
-
- blk_mq_tagset_busy_iter(q->tag_set, ufshcd_is_busy, &busy);
- return busy;
-}
-
static int ufshcd_devfreq_get_dev_status(struct device *dev,
struct devfreq_dev_status *stat)
{
@@ -1666,7 +1667,8 @@ int ufshcd_hold(struct ufs_hba *hba, bool async)
bool flush_result;
unsigned long flags;
- if (!ufshcd_is_clkgating_allowed(hba))
+ if (!ufshcd_is_clkgating_allowed(hba) ||
+ !hba->clk_gating.is_initialized)
goto out;
spin_lock_irqsave(hba->host->host_lock, flags);
hba->clk_gating.active_reqs++;
@@ -1769,7 +1771,7 @@ static void ufshcd_gate_work(struct work_struct *work)
if (hba->clk_gating.active_reqs
|| hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
- || ufshcd_any_tag_in_use(hba) || hba->outstanding_tasks
+ || hba->outstanding_reqs || hba->outstanding_tasks
|| hba->active_uic_cmd || hba->uic_async_done)
goto rel_lock;
@@ -1826,7 +1828,7 @@ static void __ufshcd_release(struct ufs_hba *hba)
if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended ||
hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL ||
- hba->outstanding_tasks ||
+ hba->outstanding_tasks || !hba->clk_gating.is_initialized ||
hba->active_uic_cmd || hba->uic_async_done ||
hba->clk_gating.state == CLKS_OFF)
return;
@@ -1961,11 +1963,15 @@ static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
{
if (!hba->clk_gating.is_initialized)
return;
+
ufshcd_remove_clk_gating_sysfs(hba);
- cancel_work_sync(&hba->clk_gating.ungate_work);
- cancel_delayed_work_sync(&hba->clk_gating.gate_work);
- destroy_workqueue(hba->clk_gating.clk_gating_workq);
+
+ /* Ungate the clock if necessary. */
+ ufshcd_hold(hba, false);
hba->clk_gating.is_initialized = false;
+ ufshcd_release(hba);
+
+ destroy_workqueue(hba->clk_gating.clk_gating_workq);
}
/* Must be called with host lock acquired */
@@ -2189,6 +2195,7 @@ static inline int ufshcd_hba_capabilities(struct ufs_hba *hba)
hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
hba->nutmrs =
((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
+ hba->reserved_slot = hba->nutrs - 1;
/* Read crypto capabilities */
err = ufshcd_hba_init_crypto_capabilities(hba);
@@ -2650,17 +2657,42 @@ static inline u16 ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id)
return (upiu_wlun_id & ~UFS_UPIU_WLUN_ID) | SCSI_W_LUN_BASE;
}
-static inline bool is_rpmb_wlun(struct scsi_device *sdev)
-{
- return sdev->lun == ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN);
-}
-
static inline bool is_device_wlun(struct scsi_device *sdev)
{
return sdev->lun ==
ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN);
}
+/*
+ * Associate the UFS controller queue with the default and poll HCTX types.
+ * Initialize the mq_map[] arrays.
+ */
+static int ufshcd_map_queues(struct Scsi_Host *shost)
+{
+ int i, ret;
+
+ for (i = 0; i < shost->nr_maps; i++) {
+ struct blk_mq_queue_map *map = &shost->tag_set.map[i];
+
+ switch (i) {
+ case HCTX_TYPE_DEFAULT:
+ case HCTX_TYPE_POLL:
+ map->nr_queues = 1;
+ break;
+ case HCTX_TYPE_READ:
+ map->nr_queues = 0;
+ continue;
+ default:
+ WARN_ON_ONCE(true);
+ }
+ map->queue_offset = 0;
+ ret = blk_mq_map_queues(map);
+ WARN_ON_ONCE(ret);
+ }
+
+ return 0;
+}
+
static void ufshcd_init_lrb(struct ufs_hba *hba, struct ufshcd_lrb *lrb, int i)
{
struct utp_transfer_cmd_desc *cmd_descp = hba->ucdl_base_addr;
@@ -2696,10 +2728,13 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
struct ufshcd_lrb *lrbp;
int err = 0;
- WARN_ONCE(tag < 0, "Invalid tag %d\n", tag);
+ WARN_ONCE(tag < 0 || tag >= hba->nutrs, "Invalid tag %d\n", tag);
- if (!down_read_trylock(&hba->clk_scaling_lock))
- return SCSI_MLQUEUE_HOST_BUSY;
+ /*
+ * Allows the UFS error handler to wait for prior ufshcd_queuecommand()
+ * calls.
+ */
+ rcu_read_lock();
switch (hba->ufshcd_state) {
case UFSHCD_STATE_OPERATIONAL:
@@ -2779,8 +2814,9 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
}
ufshcd_send_command(hba, tag);
+
out:
- up_read(&hba->clk_scaling_lock);
+ rcu_read_unlock();
if (ufs_trigger_eh()) {
unsigned long flags;
@@ -2936,30 +2972,15 @@ static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
enum dev_cmd_type cmd_type, int timeout)
{
- struct request_queue *q = hba->cmd_queue;
DECLARE_COMPLETION_ONSTACK(wait);
- struct request *req;
+ const u32 tag = hba->reserved_slot;
struct ufshcd_lrb *lrbp;
int err;
- int tag;
- down_read(&hba->clk_scaling_lock);
+ /* Protects use of hba->reserved_slot. */
+ lockdep_assert_held(&hba->dev_cmd.lock);
- /*
- * Get free slot, sleep if slots are unavailable.
- * Even though we use wait_event() which sleeps indefinitely,
- * the maximum wait time is bounded by SCSI request timeout.
- */
- req = blk_mq_alloc_request(q, REQ_OP_DRV_OUT, 0);
- if (IS_ERR(req)) {
- err = PTR_ERR(req);
- goto out_unlock;
- }
- tag = req->tag;
- WARN_ONCE(tag < 0, "Invalid tag %d\n", tag);
- /* Set the timeout such that the SCSI error handler is not activated. */
- req->timeout = msecs_to_jiffies(2 * timeout);
- blk_mq_start_request(req);
+ down_read(&hba->clk_scaling_lock);
lrbp = &hba->lrb[tag];
WARN_ON(lrbp->cmd);
@@ -2977,8 +2998,6 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
(struct utp_upiu_req *)lrbp->ucd_rsp_ptr);
out:
- blk_mq_free_request(req);
-out_unlock:
up_read(&hba->clk_scaling_lock);
return err;
}
@@ -4960,11 +4979,7 @@ static int ufshcd_slave_alloc(struct scsi_device *sdev)
*/
static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth)
{
- struct ufs_hba *hba = shost_priv(sdev->host);
-
- if (depth > hba->nutrs)
- depth = hba->nutrs;
- return scsi_change_queue_depth(sdev, depth);
+ return scsi_change_queue_depth(sdev, min(depth, sdev->host->can_queue));
}
static void ufshcd_hpb_destroy(struct ufs_hba *hba, struct scsi_device *sdev)
@@ -5256,6 +5271,18 @@ static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
return retval;
}
+/* Release the resources allocated for processing a SCSI command. */
+static void ufshcd_release_scsi_cmd(struct ufs_hba *hba,
+ struct ufshcd_lrb *lrbp)
+{
+ struct scsi_cmnd *cmd = lrbp->cmd;
+
+ scsi_dma_unmap(cmd);
+ lrbp->cmd = NULL; /* Mark the command as completed. */
+ ufshcd_release(hba);
+ ufshcd_clk_scaling_update_busy(hba);
+}
+
/**
* __ufshcd_transfer_req_compl - handle SCSI and query command completion
* @hba: per adapter instance
@@ -5266,9 +5293,7 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
{
struct ufshcd_lrb *lrbp;
struct scsi_cmnd *cmd;
- int result;
int index;
- bool update_scaling = false;
for_each_set_bit(index, &completed_reqs, hba->nutrs) {
lrbp = &hba->lrb[index];
@@ -5278,29 +5303,47 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
ufshcd_update_monitor(hba, lrbp);
ufshcd_add_command_trace(hba, index, UFS_CMD_COMP);
- result = ufshcd_transfer_rsp_status(hba, lrbp);
- scsi_dma_unmap(cmd);
- cmd->result = result;
- /* Mark completed command as NULL in LRB */
- lrbp->cmd = NULL;
+ cmd->result = ufshcd_transfer_rsp_status(hba, lrbp);
+ ufshcd_release_scsi_cmd(hba, lrbp);
/* Do not touch lrbp after scsi done */
scsi_done(cmd);
- ufshcd_release(hba);
- update_scaling = true;
} else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE ||
lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) {
if (hba->dev_cmd.complete) {
ufshcd_add_command_trace(hba, index,
UFS_DEV_COMP);
complete(hba->dev_cmd.complete);
- update_scaling = true;
+ ufshcd_clk_scaling_update_busy(hba);
}
}
- if (update_scaling)
- ufshcd_clk_scaling_update_busy(hba);
}
}
+/*
+ * Returns > 0 if one or more commands have been completed or 0 if no
+ * requests have been completed.
+ */
+static int ufshcd_poll(struct Scsi_Host *shost, unsigned int queue_num)
+{
+ struct ufs_hba *hba = shost_priv(shost);
+ unsigned long completed_reqs, flags;
+ u32 tr_doorbell;
+
+ spin_lock_irqsave(&hba->outstanding_lock, flags);
+ tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
+ completed_reqs = ~tr_doorbell & hba->outstanding_reqs;
+ WARN_ONCE(completed_reqs & ~hba->outstanding_reqs,
+ "completed: %#lx; outstanding: %#lx\n", completed_reqs,
+ hba->outstanding_reqs);
+ hba->outstanding_reqs &= ~completed_reqs;
+ spin_unlock_irqrestore(&hba->outstanding_lock, flags);
+
+ if (completed_reqs)
+ __ufshcd_transfer_req_compl(hba, completed_reqs);
+
+ return completed_reqs;
+}
+
/**
* ufshcd_transfer_req_compl - handle SCSI and query command completion
* @hba: per adapter instance
@@ -5311,9 +5354,6 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
*/
static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba)
{
- unsigned long completed_reqs, flags;
- u32 tr_doorbell;
-
/* Resetting interrupt aggregation counters first and reading the
* DOOR_BELL afterward allows us to handle all the completed requests.
* In order to prevent other interrupts starvation the DB is read once
@@ -5328,21 +5368,13 @@ static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba)
if (ufs_fail_completion())
return IRQ_HANDLED;
- spin_lock_irqsave(&hba->outstanding_lock, flags);
- tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
- completed_reqs = ~tr_doorbell & hba->outstanding_reqs;
- WARN_ONCE(completed_reqs & ~hba->outstanding_reqs,
- "completed: %#lx; outstanding: %#lx\n", completed_reqs,
- hba->outstanding_reqs);
- hba->outstanding_reqs &= ~completed_reqs;
- spin_unlock_irqrestore(&hba->outstanding_lock, flags);
+ /*
+ * Ignore the ufshcd_poll() return value and return IRQ_HANDLED since we
+ * do not want polling to trigger spurious interrupt complaints.
+ */
+ ufshcd_poll(hba->host, 0);
- if (completed_reqs) {
- __ufshcd_transfer_req_compl(hba, completed_reqs);
- return IRQ_HANDLED;
- } else {
- return IRQ_NONE;
- }
+ return IRQ_HANDLED;
}
int __ufshcd_write_ee_control(struct ufs_hba *hba, u32 ee_ctrl_mask)
@@ -5986,8 +6018,7 @@ static void ufshcd_err_handling_prepare(struct ufs_hba *hba)
}
ufshcd_scsi_block_requests(hba);
/* Drain ufshcd_queuecommand() */
- down_write(&hba->clk_scaling_lock);
- up_write(&hba->clk_scaling_lock);
+ synchronize_rcu();
cancel_work_sync(&hba->eeh_work);
}
@@ -6594,6 +6625,8 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
spin_lock_irqsave(host->host_lock, flags);
task_tag = req->tag;
+ WARN_ONCE(task_tag < 0 || task_tag >= hba->nutmrs, "Invalid tag %d\n",
+ task_tag);
hba->tmf_rqs[req->tag] = req;
treq->upiu_req.req_header.dword_0 |= cpu_to_be32(task_tag);
@@ -6711,28 +6744,16 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
enum dev_cmd_type cmd_type,
enum query_opcode desc_op)
{
- struct request_queue *q = hba->cmd_queue;
DECLARE_COMPLETION_ONSTACK(wait);
- struct request *req;
+ const u32 tag = hba->reserved_slot;
struct ufshcd_lrb *lrbp;
int err = 0;
- int tag;
u8 upiu_flags;
- down_read(&hba->clk_scaling_lock);
-
- req = blk_mq_alloc_request(q, REQ_OP_DRV_OUT, 0);
- if (IS_ERR(req)) {
- err = PTR_ERR(req);
- goto out_unlock;
- }
- tag = req->tag;
- WARN_ONCE(tag < 0, "Invalid tag %d\n", tag);
+ /* Protects use of hba->reserved_slot. */
+ lockdep_assert_held(&hba->dev_cmd.lock);
- if (unlikely(test_bit(tag, &hba->outstanding_reqs))) {
- err = -EBUSY;
- goto out;
- }
+ down_read(&hba->clk_scaling_lock);
lrbp = &hba->lrb[tag];
WARN_ON(lrbp->cmd);
@@ -6801,9 +6822,6 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP,
(struct utp_upiu_req *)lrbp->ucd_rsp_ptr);
-out:
- blk_mq_free_request(req);
-out_unlock:
up_read(&hba->clk_scaling_lock);
return err;
}
@@ -7033,6 +7051,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
struct ufshcd_lrb *lrbp = &hba->lrb[tag];
unsigned long flags;
int err = FAILED;
+ bool outstanding;
u32 reg;
WARN_ONCE(tag < 0, "Invalid tag %d\n", tag);
@@ -7110,7 +7129,17 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
goto release;
}
- lrbp->cmd = NULL;
+ /*
+ * Clear the corresponding bit from outstanding_reqs since the command
+ * has been aborted successfully.
+ */
+ spin_lock_irqsave(&hba->outstanding_lock, flags);
+ outstanding = __test_and_clear_bit(tag, &hba->outstanding_reqs);
+ spin_unlock_irqrestore(&hba->outstanding_lock, flags);
+
+ if (outstanding)
+ ufshcd_release_scsi_cmd(hba, lrbp);
+
err = SUCCESS;
release:
@@ -7412,7 +7441,7 @@ static inline void ufshcd_blk_pm_runtime_init(struct scsi_device *sdev)
static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
{
int ret = 0;
- struct scsi_device *sdev_boot;
+ struct scsi_device *sdev_boot, *sdev_rpmb;
hba->sdev_ufs_device = __scsi_add_device(hba->host, 0, 0,
ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN), NULL);
@@ -7423,14 +7452,14 @@ static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
}
scsi_device_put(hba->sdev_ufs_device);
- hba->sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
+ sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL);
- if (IS_ERR(hba->sdev_rpmb)) {
- ret = PTR_ERR(hba->sdev_rpmb);
+ if (IS_ERR(sdev_rpmb)) {
+ ret = PTR_ERR(sdev_rpmb);
goto remove_sdev_ufs_device;
}
- ufshcd_blk_pm_runtime_init(hba->sdev_rpmb);
- scsi_device_put(hba->sdev_rpmb);
+ ufshcd_blk_pm_runtime_init(sdev_rpmb);
+ scsi_device_put(sdev_rpmb);
sdev_boot = __scsi_add_device(hba->host, 0, 0,
ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL);
@@ -7786,7 +7815,7 @@ static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba *hba)
peer_pa_tactivate_us = peer_pa_tactivate *
gran_to_us_table[peer_granularity - 1];
- if (pa_tactivate_us > peer_pa_tactivate_us) {
+ if (pa_tactivate_us >= peer_pa_tactivate_us) {
u32 new_peer_pa_tactivate;
new_peer_pa_tactivate = pa_tactivate_us /
@@ -8156,7 +8185,9 @@ static struct scsi_host_template ufshcd_driver_template = {
.module = THIS_MODULE,
.name = UFSHCD,
.proc_name = UFSHCD,
+ .map_queues = ufshcd_map_queues,
.queuecommand = ufshcd_queuecommand,
+ .mq_poll = ufshcd_poll,
.slave_alloc = ufshcd_slave_alloc,
.slave_configure = ufshcd_slave_configure,
.slave_destroy = ufshcd_slave_destroy,
@@ -8582,7 +8613,7 @@ static void ufshcd_hba_exit(struct ufs_hba *hba)
* @pwr_mode: device power mode to set
*
* Returns 0 if requested power mode is set successfully
- * Returns non-zero if failed to set the requested power mode
+ * Returns < 0 if failed to set the requested power mode
*/
static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
enum ufs_dev_pwr_mode pwr_mode)
@@ -8636,8 +8667,11 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
sdev_printk(KERN_WARNING, sdp,
"START_STOP failed for power mode: %d, result %x\n",
pwr_mode, ret);
- if (ret > 0 && scsi_sense_valid(&sshdr))
- scsi_print_sense_hdr(sdp, NULL, &sshdr);
+ if (ret > 0) {
+ if (scsi_sense_valid(&sshdr))
+ scsi_print_sense_hdr(sdp, NULL, &sshdr);
+ ret = -EIO;
+ }
}
if (!ret)
@@ -9384,7 +9418,6 @@ void ufshcd_remove(struct ufs_hba *hba)
ufs_sysfs_remove_nodes(hba->dev);
blk_cleanup_queue(hba->tmf_queue);
blk_mq_free_tag_set(&hba->tmf_tag_set);
- blk_cleanup_queue(hba->cmd_queue);
scsi_remove_host(hba->host);
/* disable interrupts */
ufshcd_disable_intr(hba, hba->intr_mask);
@@ -9445,6 +9478,7 @@ int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
err = -ENOMEM;
goto out_error;
}
+ host->nr_maps = HCTX_TYPE_POLL + 1;
hba = shost_priv(host);
hba->host = host;
hba->dev = dev;
@@ -9486,6 +9520,13 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
struct device *dev = hba->dev;
char eh_wq_name[sizeof("ufs_eh_wq_00")];
+ /*
+ * dev_set_drvdata() must be called before any callbacks are registered
+ * that use dev_get_drvdata() (frequency scaling, clock scaling, hwmon,
+ * sysfs).
+ */
+ dev_set_drvdata(dev, hba);
+
if (!mmio_base) {
dev_err(hba->dev,
"Invalid memory reference for mmio_base is NULL\n");
@@ -9528,8 +9569,8 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
/* Configure LRB */
ufshcd_host_memory_configure(hba);
- host->can_queue = hba->nutrs;
- host->cmd_per_lun = hba->nutrs;
+ host->can_queue = hba->nutrs - UFSHCD_NUM_RESERVED;
+ host->cmd_per_lun = hba->nutrs - UFSHCD_NUM_RESERVED;
host->max_id = UFSHCD_MAX_ID;
host->max_lun = UFS_MAX_LUNS;
host->max_channel = UFSHCD_MAX_CHANNEL;
@@ -9597,12 +9638,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
goto out_disable;
}
- hba->cmd_queue = blk_mq_init_queue(&hba->host->tag_set);
- if (IS_ERR(hba->cmd_queue)) {
- err = PTR_ERR(hba->cmd_queue);
- goto out_remove_scsi_host;
- }
-
hba->tmf_tag_set = (struct blk_mq_tag_set) {
.nr_hw_queues = 1,
.queue_depth = hba->nutmrs,
@@ -9611,7 +9646,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
};
err = blk_mq_alloc_tag_set(&hba->tmf_tag_set);
if (err < 0)
- goto free_cmd_queue;
+ goto out_remove_scsi_host;
hba->tmf_queue = blk_mq_init_queue(&hba->tmf_tag_set);
if (IS_ERR(hba->tmf_queue)) {
err = PTR_ERR(hba->tmf_queue);
@@ -9680,8 +9715,6 @@ free_tmf_queue:
blk_cleanup_queue(hba->tmf_queue);
free_tmf_tag_set:
blk_mq_free_tag_set(&hba->tmf_tag_set);
-free_cmd_queue:
- blk_cleanup_queue(hba->cmd_queue);
out_remove_scsi_host:
scsi_remove_host(hba->host);
out_disable:
@@ -9703,7 +9736,27 @@ void ufshcd_resume_complete(struct device *dev)
}
EXPORT_SYMBOL_GPL(ufshcd_resume_complete);
-int ufshcd_suspend_prepare(struct device *dev)
+static bool ufshcd_rpm_ok_for_spm(struct ufs_hba *hba)
+{
+ struct device *dev = &hba->sdev_ufs_device->sdev_gendev;
+ enum ufs_dev_pwr_mode dev_pwr_mode;
+ enum uic_link_state link_state;
+ unsigned long flags;
+ bool res;
+
+ spin_lock_irqsave(&dev->power.lock, flags);
+ dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl);
+ link_state = ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl);
+ res = pm_runtime_suspended(dev) &&
+ hba->curr_dev_pwr_mode == dev_pwr_mode &&
+ hba->uic_link_state == link_state &&
+ !hba->dev_info.b_rpm_dev_flush_capable;
+ spin_unlock_irqrestore(&dev->power.lock, flags);
+
+ return res;
+}
+
+int __ufshcd_suspend_prepare(struct device *dev, bool rpm_ok_for_spm)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
int ret;
@@ -9715,15 +9768,30 @@ int ufshcd_suspend_prepare(struct device *dev)
* Refer ufshcd_resume_complete()
*/
if (hba->sdev_ufs_device) {
- ret = ufshcd_rpm_get_sync(hba);
- if (ret < 0 && ret != -EACCES) {
- ufshcd_rpm_put(hba);
- return ret;
+ /* Prevent runtime suspend */
+ ufshcd_rpm_get_noresume(hba);
+ /*
+ * Check if already runtime suspended in same state as system
+ * suspend would be.
+ */
+ if (!rpm_ok_for_spm || !ufshcd_rpm_ok_for_spm(hba)) {
+ /* RPM state is not ok for SPM, so runtime resume */
+ ret = ufshcd_rpm_resume(hba);
+ if (ret < 0 && ret != -EACCES) {
+ ufshcd_rpm_put(hba);
+ return ret;
+ }
}
hba->complete_put = true;
}
return 0;
}
+EXPORT_SYMBOL_GPL(__ufshcd_suspend_prepare);
+
+int ufshcd_suspend_prepare(struct device *dev)
+{
+ return __ufshcd_suspend_prepare(dev, true);
+}
EXPORT_SYMBOL_GPL(ufshcd_suspend_prepare);
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 54750d72c8fb..88c20f3608c2 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -338,7 +338,8 @@ struct ufs_hba_variant_ops {
enum ufs_notify_change_status status,
struct ufs_pa_layer_attr *,
struct ufs_pa_layer_attr *);
- void (*setup_xfer_req)(struct ufs_hba *, int, bool);
+ void (*setup_xfer_req)(struct ufs_hba *hba, int tag,
+ bool is_scsi_cmd);
void (*setup_task_mgmt)(struct ufs_hba *, int, u8);
void (*hibern8_notify)(struct ufs_hba *, enum uic_cmd_dme,
enum ufs_notify_change_status);
@@ -737,13 +738,13 @@ struct ufs_hba_monitor {
* @host: Scsi_Host instance of the driver
* @dev: device handle
* @lrb: local reference block
- * @cmd_queue: Used to allocate command tags from hba->host->tag_set.
* @outstanding_tasks: Bits representing outstanding task requests
* @outstanding_lock: Protects @outstanding_reqs.
* @outstanding_reqs: Bits representing outstanding transfer requests
* @capabilities: UFS Controller Capabilities
* @nutrs: Transfer Request Queue depth supported by controller
* @nutmrs: Task Management Queue depth supported by controller
+ * @reserved_slot: Used to submit device commands. Protected by @dev_cmd.lock.
* @ufs_version: UFS Version to which controller complies
* @vops: pointer to variant specific operations
* @priv: pointer to variant specific private data
@@ -777,6 +778,7 @@ struct ufs_hba_monitor {
* @clk_list_head: UFS host controller clocks list node head
* @pwr_info: holds current power mode
* @max_pwr_info: keeps the device max valid pwm
+ * @clk_scaling_lock: used to serialize device commands and clock scaling
* @desc_size: descriptor sizes reported by device
* @urgent_bkops_lvl: keeps track of urgent bkops level for device
* @is_urgent_bkops_lvl_checked: keeps track if the urgent bkops level for
@@ -802,13 +804,11 @@ struct ufs_hba {
struct Scsi_Host *host;
struct device *dev;
- struct request_queue *cmd_queue;
/*
* This field is to keep a reference to "scsi_device" corresponding to
* "UFS device" W-LU.
*/
struct scsi_device *sdev_ufs_device;
- struct scsi_device *sdev_rpmb;
#ifdef CONFIG_SCSI_UFS_HWMON
struct device *hwmon_device;
@@ -836,6 +836,7 @@ struct ufs_hba {
u32 capabilities;
int nutrs;
int nutmrs;
+ u32 reserved_slot;
u32 ufs_version;
const struct ufs_hba_variant_ops *vops;
struct ufs_hba_variant_params *vps;
@@ -1211,6 +1212,7 @@ int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
int ufshcd_wb_toggle(struct ufs_hba *hba, bool enable);
int ufshcd_suspend_prepare(struct device *dev);
+int __ufshcd_suspend_prepare(struct device *dev, bool rpm_ok_for_spm);
void ufshcd_resume_complete(struct device *dev);
/* Wrapper functions for safely calling variant operations */
@@ -1420,6 +1422,16 @@ static inline int ufshcd_rpm_put_sync(struct ufs_hba *hba)
return pm_runtime_put_sync(&hba->sdev_ufs_device->sdev_gendev);
}
+static inline void ufshcd_rpm_get_noresume(struct ufs_hba *hba)
+{
+ pm_runtime_get_noresume(&hba->sdev_ufs_device->sdev_gendev);
+}
+
+static inline int ufshcd_rpm_resume(struct ufs_hba *hba)
+{
+ return pm_runtime_resume(&hba->sdev_ufs_device->sdev_gendev);
+}
+
static inline int ufshcd_rpm_put(struct ufs_hba *hba)
{
return pm_runtime_put(&hba->sdev_ufs_device->sdev_gendev);
diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h
index 6a295c88d850..a7ff0e5b5494 100644
--- a/drivers/scsi/ufs/ufshci.h
+++ b/drivers/scsi/ufs/ufshci.h
@@ -142,7 +142,8 @@ static inline u32 ufshci_version(u32 major, u32 minor)
#define INT_FATAL_ERRORS (DEVICE_FATAL_ERROR |\
CONTROLLER_FATAL_ERROR |\
SYSTEM_BUS_FATAL_ERROR |\
- CRYPTO_ENGINE_FATAL_ERROR)
+ CRYPTO_ENGINE_FATAL_ERROR |\
+ UIC_LINK_LOST)
/* HCS - Host Controller Status 30h */
#define DEVICE_PRESENT 0x1
diff --git a/drivers/scsi/ufs/ufshpb.c b/drivers/scsi/ufs/ufshpb.c
index ded5ba9b1466..2d36a0715fca 100644
--- a/drivers/scsi/ufs/ufshpb.c
+++ b/drivers/scsi/ufs/ufshpb.c
@@ -10,7 +10,6 @@
*/
#include <asm/unaligned.h>
-#include <linux/async.h>
#include "ufshcd.h"
#include "ufshpb.h"
@@ -677,7 +676,7 @@ static void ufshpb_execute_umap_req(struct ufshpb_lu *hpb,
ufshpb_set_unmap_cmd(rq->cmd, rgn);
rq->cmd_len = HPB_WRITE_BUFFER_CMD_LENGTH;
- blk_execute_rq_nowait(NULL, req, 1, ufshpb_umap_req_compl_fn);
+ blk_execute_rq_nowait(req, true, ufshpb_umap_req_compl_fn);
hpb->stats.umap_req_cnt++;
}
@@ -719,7 +718,7 @@ static int ufshpb_execute_map_req(struct ufshpb_lu *hpb,
map_req->rb.srgn_idx, mem_size);
rq->cmd_len = HPB_READ_BUFFER_CMD_LENGTH;
- blk_execute_rq_nowait(NULL, req, 1, ufshpb_map_req_compl_fn);
+ blk_execute_rq_nowait(req, true, ufshpb_map_req_compl_fn);
hpb->stats.map_req_cnt++;
return 0;
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index 28e1d98ae102..0e6110da69e7 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -528,7 +528,7 @@ static void virtio_scsi_init_hdr_pi(struct virtio_device *vdev,
if (!rq || !scsi_prot_sg_count(sc))
return;
- bi = blk_get_integrity(rq->rq_disk);
+ bi = blk_get_integrity(rq->q->disk);
if (sc->sc_data_direction == DMA_TO_DEVICE)
cmd_pi->pi_bytesout = cpu_to_virtio32(vdev,
@@ -778,7 +778,7 @@ static void virtscsi_init_vq(struct virtio_scsi_vq *virtscsi_vq,
static void virtscsi_remove_vqs(struct virtio_device *vdev)
{
/* Stop all the virtqueues. */
- vdev->config->reset(vdev);
+ virtio_reset_device(vdev);
vdev->config->del_vqs(vdev);
}
diff --git a/drivers/soc/aspeed/aspeed-lpc-ctrl.c b/drivers/soc/aspeed/aspeed-lpc-ctrl.c
index 72771e018c42..258894ed234b 100644
--- a/drivers/soc/aspeed/aspeed-lpc-ctrl.c
+++ b/drivers/soc/aspeed/aspeed-lpc-ctrl.c
@@ -306,10 +306,9 @@ static int aspeed_lpc_ctrl_probe(struct platform_device *pdev)
}
lpc_ctrl->clk = devm_clk_get(dev, NULL);
- if (IS_ERR(lpc_ctrl->clk)) {
- dev_err(dev, "couldn't get clock\n");
- return PTR_ERR(lpc_ctrl->clk);
- }
+ if (IS_ERR(lpc_ctrl->clk))
+ return dev_err_probe(dev, PTR_ERR(lpc_ctrl->clk),
+ "couldn't get clock\n");
rc = clk_prepare_enable(lpc_ctrl->clk);
if (rc) {
dev_err(dev, "couldn't enable clock\n");
diff --git a/drivers/soc/canaan/Kconfig b/drivers/soc/canaan/Kconfig
index 853096b7e84c..2527cf5757ec 100644
--- a/drivers/soc/canaan/Kconfig
+++ b/drivers/soc/canaan/Kconfig
@@ -5,7 +5,6 @@ config SOC_K210_SYSCTL
depends on RISCV && SOC_CANAAN && OF
default SOC_CANAAN
select PM
- select SYSCON
select MFD_SYSCON
help
Canaan Kendryte K210 SoC system controller driver.
diff --git a/drivers/soc/fsl/dpio/dpio-driver.c b/drivers/soc/fsl/dpio/dpio-driver.c
index dd948889eeab..5a2edc48dd79 100644
--- a/drivers/soc/fsl/dpio/dpio-driver.c
+++ b/drivers/soc/fsl/dpio/dpio-driver.c
@@ -88,7 +88,7 @@ static void unregister_dpio_irq_handlers(struct fsl_mc_device *dpio_dev)
irq = dpio_dev->irqs[0];
/* clear the affinity hint */
- irq_set_affinity_hint(irq->msi_desc->irq, NULL);
+ irq_set_affinity_hint(irq->virq, NULL);
}
static int register_dpio_irq_handlers(struct fsl_mc_device *dpio_dev, int cpu)
@@ -98,7 +98,7 @@ static int register_dpio_irq_handlers(struct fsl_mc_device *dpio_dev, int cpu)
irq = dpio_dev->irqs[0];
error = devm_request_irq(&dpio_dev->dev,
- irq->msi_desc->irq,
+ irq->virq,
dpio_irq_handler,
0,
dev_name(&dpio_dev->dev),
@@ -111,10 +111,10 @@ static int register_dpio_irq_handlers(struct fsl_mc_device *dpio_dev, int cpu)
}
/* set the affinity hint */
- if (irq_set_affinity_hint(irq->msi_desc->irq, cpumask_of(cpu)))
+ if (irq_set_affinity_hint(irq->virq, cpumask_of(cpu)))
dev_err(&dpio_dev->dev,
"irq_set_affinity failed irq %d cpu %d\n",
- irq->msi_desc->irq, cpu);
+ irq->virq, cpu);
return 0;
}
diff --git a/drivers/soc/fsl/qbman/bman_portal.c b/drivers/soc/fsl/qbman/bman_portal.c
index acda8a5637c5..4d7b9caee1c4 100644
--- a/drivers/soc/fsl/qbman/bman_portal.c
+++ b/drivers/soc/fsl/qbman/bman_portal.c
@@ -155,7 +155,7 @@ static int bman_portal_probe(struct platform_device *pdev)
}
spin_lock(&bman_lock);
- cpu = cpumask_next_zero(-1, &portal_cpus);
+ cpu = cpumask_first_zero(&portal_cpus);
if (cpu >= nr_cpu_ids) {
__bman_portals_probed = 1;
/* unassigned portal, skip init */
diff --git a/drivers/soc/fsl/qbman/qman_portal.c b/drivers/soc/fsl/qbman/qman_portal.c
index 96f74a1dc603..e23b60618c1a 100644
--- a/drivers/soc/fsl/qbman/qman_portal.c
+++ b/drivers/soc/fsl/qbman/qman_portal.c
@@ -248,7 +248,7 @@ static int qman_portal_probe(struct platform_device *pdev)
pcfg->pools = qm_get_pools_sdqcr();
spin_lock(&qman_lock);
- cpu = cpumask_next_zero(-1, &portal_cpus);
+ cpu = cpumask_first_zero(&portal_cpus);
if (cpu >= nr_cpu_ids) {
__qman_portals_probed = 1;
/* unassigned portal, skip init */
diff --git a/drivers/soc/samsung/Kconfig b/drivers/soc/samsung/Kconfig
index a9f8b224322e..02e319508cc6 100644
--- a/drivers/soc/samsung/Kconfig
+++ b/drivers/soc/samsung/Kconfig
@@ -31,7 +31,7 @@ config EXYNOS_USI
help
Enable support for USI block. USI (Universal Serial Interface) is an
IP-core found in modern Samsung Exynos SoCs, like Exynos850 and
- ExynosAutoV0. USI block can be configured to provide one of the
+ ExynosAutoV9. USI block can be configured to provide one of the
following serial protocols: UART, SPI or High Speed I2C.
This driver allows one to configure USI for desired protocol, which
diff --git a/drivers/soc/ti/k3-ringacc.c b/drivers/soc/ti/k3-ringacc.c
index 312ba0f98ad7..31ab6c657fec 100644
--- a/drivers/soc/ti/k3-ringacc.c
+++ b/drivers/soc/ti/k3-ringacc.c
@@ -358,8 +358,8 @@ struct k3_ring *k3_ringacc_request_ring(struct k3_ringacc *ringacc,
goto out;
if (flags & K3_RINGACC_RING_USE_PROXY) {
- proxy_id = find_next_zero_bit(ringacc->proxy_inuse,
- ringacc->num_proxies, 0);
+ proxy_id = find_first_zero_bit(ringacc->proxy_inuse,
+ ringacc->num_proxies);
if (proxy_id == ringacc->num_proxies)
goto error;
}
@@ -647,7 +647,7 @@ int k3_ringacc_get_ring_irq_num(struct k3_ring *ring)
if (!ring)
return -EINVAL;
- irq_num = ti_sci_inta_msi_get_virq(ring->parent->dev, ring->ring_id);
+ irq_num = msi_get_virq(ring->parent->dev, ring->ring_id);
if (irq_num <= 0)
irq_num = -EINVAL;
return irq_num;
@@ -1356,9 +1356,9 @@ static int k3_ringacc_init(struct platform_device *pdev,
struct resource *res;
int ret, i;
- dev->msi_domain = of_msi_get_domain(dev, dev->of_node,
+ dev->msi.domain = of_msi_get_domain(dev, dev->of_node,
DOMAIN_BUS_TI_SCI_INTA_MSI);
- if (!dev->msi_domain) {
+ if (!dev->msi.domain) {
dev_err(dev, "Failed to get MSI domain\n");
return -EPROBE_DEFER;
}
diff --git a/drivers/soc/ti/ti_sci_inta_msi.c b/drivers/soc/ti/ti_sci_inta_msi.c
index a1d9c027022a..991c78b34745 100644
--- a/drivers/soc/ti/ti_sci_inta_msi.c
+++ b/drivers/soc/ti/ti_sci_inta_msi.c
@@ -51,6 +51,7 @@ struct irq_domain *ti_sci_inta_msi_create_irq_domain(struct fwnode_handle *fwnod
struct irq_domain *domain;
ti_sci_inta_msi_update_chip_ops(info);
+ info->flags |= MSI_FLAG_FREE_MSI_DESCS;
domain = msi_create_irq_domain(fwnode, info, parent);
if (domain)
@@ -60,50 +61,32 @@ struct irq_domain *ti_sci_inta_msi_create_irq_domain(struct fwnode_handle *fwnod
}
EXPORT_SYMBOL_GPL(ti_sci_inta_msi_create_irq_domain);
-static void ti_sci_inta_msi_free_descs(struct device *dev)
-{
- struct msi_desc *desc, *tmp;
-
- list_for_each_entry_safe(desc, tmp, dev_to_msi_list(dev), list) {
- list_del(&desc->list);
- free_msi_entry(desc);
- }
-}
-
static int ti_sci_inta_msi_alloc_descs(struct device *dev,
struct ti_sci_resource *res)
{
- struct msi_desc *msi_desc;
+ struct msi_desc msi_desc;
int set, i, count = 0;
+ memset(&msi_desc, 0, sizeof(msi_desc));
+ msi_desc.nvec_used = 1;
+
for (set = 0; set < res->sets; set++) {
- for (i = 0; i < res->desc[set].num; i++) {
- msi_desc = alloc_msi_entry(dev, 1, NULL);
- if (!msi_desc) {
- ti_sci_inta_msi_free_descs(dev);
- return -ENOMEM;
- }
-
- msi_desc->inta.dev_index = res->desc[set].start + i;
- INIT_LIST_HEAD(&msi_desc->list);
- list_add_tail(&msi_desc->list, dev_to_msi_list(dev));
- count++;
+ for (i = 0; i < res->desc[set].num; i++, count++) {
+ msi_desc.msi_index = res->desc[set].start + i;
+ if (msi_add_msi_desc(dev, &msi_desc))
+ goto fail;
}
- for (i = 0; i < res->desc[set].num_sec; i++) {
- msi_desc = alloc_msi_entry(dev, 1, NULL);
- if (!msi_desc) {
- ti_sci_inta_msi_free_descs(dev);
- return -ENOMEM;
- }
-
- msi_desc->inta.dev_index = res->desc[set].start_sec + i;
- INIT_LIST_HEAD(&msi_desc->list);
- list_add_tail(&msi_desc->list, dev_to_msi_list(dev));
- count++;
+
+ for (i = 0; i < res->desc[set].num_sec; i++, count++) {
+ msi_desc.msi_index = res->desc[set].start_sec + i;
+ if (msi_add_msi_desc(dev, &msi_desc))
+ goto fail;
}
}
-
return count;
+fail:
+ msi_free_msi_descs(dev);
+ return -ENOMEM;
}
int ti_sci_inta_msi_domain_alloc_irqs(struct device *dev,
@@ -120,39 +103,22 @@ int ti_sci_inta_msi_domain_alloc_irqs(struct device *dev,
if (pdev->id < 0)
return -ENODEV;
- nvec = ti_sci_inta_msi_alloc_descs(dev, res);
- if (nvec <= 0)
- return nvec;
+ ret = msi_setup_device_data(dev);
+ if (ret)
+ return ret;
- ret = msi_domain_alloc_irqs(msi_domain, dev, nvec);
- if (ret) {
- dev_err(dev, "Failed to allocate IRQs %d\n", ret);
- goto cleanup;
+ msi_lock_descs(dev);
+ nvec = ti_sci_inta_msi_alloc_descs(dev, res);
+ if (nvec <= 0) {
+ ret = nvec;
+ goto unlock;
}
- return 0;
-
-cleanup:
- ti_sci_inta_msi_free_descs(&pdev->dev);
+ ret = msi_domain_alloc_irqs_descs_locked(msi_domain, dev, nvec);
+ if (ret)
+ dev_err(dev, "Failed to allocate IRQs %d\n", ret);
+unlock:
+ msi_unlock_descs(dev);
return ret;
}
EXPORT_SYMBOL_GPL(ti_sci_inta_msi_domain_alloc_irqs);
-
-void ti_sci_inta_msi_domain_free_irqs(struct device *dev)
-{
- msi_domain_free_irqs(dev->msi_domain, dev);
- ti_sci_inta_msi_free_descs(dev);
-}
-EXPORT_SYMBOL_GPL(ti_sci_inta_msi_domain_free_irqs);
-
-unsigned int ti_sci_inta_msi_get_virq(struct device *dev, u32 dev_index)
-{
- struct msi_desc *desc;
-
- for_each_msi_entry(desc, dev)
- if (desc->inta.dev_index == dev_index)
- return desc->irq;
-
- return -ENODEV;
-}
-EXPORT_SYMBOL_GPL(ti_sci_inta_msi_get_virq);
diff --git a/drivers/soc/xilinx/Kconfig b/drivers/soc/xilinx/Kconfig
index 53af9115dc31..8a755a5c8836 100644
--- a/drivers/soc/xilinx/Kconfig
+++ b/drivers/soc/xilinx/Kconfig
@@ -25,4 +25,14 @@ config ZYNQMP_PM_DOMAINS
Say yes to enable device power management through PM domains
If in doubt, say N.
+config XLNX_EVENT_MANAGER
+ bool "Enable Xilinx Event Management Driver"
+ depends on ZYNQMP_FIRMWARE
+ default ZYNQMP_FIRMWARE
+ help
+ Say yes to enable event management support for Xilinx.
+ This driver uses firmware driver as an interface for event/power
+ management request to firmware.
+
+ If in doubt, say N.
endmenu
diff --git a/drivers/soc/xilinx/Makefile b/drivers/soc/xilinx/Makefile
index 9854e6f6086b..41e585bc9c67 100644
--- a/drivers/soc/xilinx/Makefile
+++ b/drivers/soc/xilinx/Makefile
@@ -1,3 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_ZYNQMP_POWER) += zynqmp_power.o
obj-$(CONFIG_ZYNQMP_PM_DOMAINS) += zynqmp_pm_domains.o
+obj-$(CONFIG_XLNX_EVENT_MANAGER) += xlnx_event_manager.o
diff --git a/drivers/soc/xilinx/xlnx_event_manager.c b/drivers/soc/xilinx/xlnx_event_manager.c
new file mode 100644
index 000000000000..b27f8853508e
--- /dev/null
+++ b/drivers/soc/xilinx/xlnx_event_manager.c
@@ -0,0 +1,600 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Xilinx Event Management Driver
+ *
+ * Copyright (C) 2021 Xilinx, Inc.
+ *
+ * Abhyuday Godhasara <abhyuday.godhasara@xilinx.com>
+ */
+
+#include <linux/cpuhotplug.h>
+#include <linux/firmware/xlnx-event-manager.h>
+#include <linux/firmware/xlnx-zynqmp.h>
+#include <linux/hashtable.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/module.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+static DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number1);
+
+static int virq_sgi;
+static int event_manager_availability = -EACCES;
+
+/* SGI number used for Event management driver */
+#define XLNX_EVENT_SGI_NUM (15)
+
+/* Max number of driver can register for same event */
+#define MAX_DRIVER_PER_EVENT (10U)
+
+/* Max HashMap Order for PM API feature check (1<<7 = 128) */
+#define REGISTERED_DRIVER_MAX_ORDER (7)
+
+#define MAX_BITS (32U) /* Number of bits available for error mask */
+
+#define FIRMWARE_VERSION_MASK (0xFFFFU)
+#define REGISTER_NOTIFIER_FIRMWARE_VERSION (2U)
+
+static DEFINE_HASHTABLE(reg_driver_map, REGISTERED_DRIVER_MAX_ORDER);
+static int sgi_num = XLNX_EVENT_SGI_NUM;
+
+/**
+ * struct registered_event_data - Registered Event Data.
+ * @key: key is the combine id(Node-Id | Event-Id) of type u64
+ * where upper u32 for Node-Id and lower u32 for Event-Id,
+ * And this used as key to index into hashmap.
+ * @agent_data: Data passed back to handler function.
+ * @cb_type: Type of Api callback, like PM_NOTIFY_CB, etc.
+ * @eve_cb: Function pointer to store the callback function.
+ * @wake: If this flag set, firmware will wakeup processor if is
+ * in sleep or power down state.
+ * @hentry: hlist_node that hooks this entry into hashtable.
+ */
+struct registered_event_data {
+ u64 key;
+ enum pm_api_cb_id cb_type;
+ void *agent_data;
+
+ event_cb_func_t eve_cb;
+ bool wake;
+ struct hlist_node hentry;
+};
+
+static bool xlnx_is_error_event(const u32 node_id)
+{
+ if (node_id == EVENT_ERROR_PMC_ERR1 ||
+ node_id == EVENT_ERROR_PMC_ERR2 ||
+ node_id == EVENT_ERROR_PSM_ERR1 ||
+ node_id == EVENT_ERROR_PSM_ERR2)
+ return true;
+
+ return false;
+}
+
+static int xlnx_add_cb_for_notify_event(const u32 node_id, const u32 event, const bool wake,
+ event_cb_func_t cb_fun, void *data)
+{
+ u64 key = 0;
+ struct registered_event_data *eve_data;
+
+ key = ((u64)node_id << 32U) | (u64)event;
+ /* Check for existing entry in hash table for given key id */
+ hash_for_each_possible(reg_driver_map, eve_data, hentry, key) {
+ if (eve_data->key == key) {
+ pr_err("Found as already registered\n");
+ return -EINVAL;
+ }
+ }
+
+ /* Add new entry if not present */
+ eve_data = kmalloc(sizeof(*eve_data), GFP_KERNEL);
+ if (!eve_data)
+ return -ENOMEM;
+
+ eve_data->key = key;
+ eve_data->cb_type = PM_NOTIFY_CB;
+ eve_data->eve_cb = cb_fun;
+ eve_data->wake = wake;
+ eve_data->agent_data = data;
+
+ hash_add(reg_driver_map, &eve_data->hentry, key);
+
+ return 0;
+}
+
+static int xlnx_add_cb_for_suspend(event_cb_func_t cb_fun, void *data)
+{
+ struct registered_event_data *eve_data;
+
+ /* Check for existing entry in hash table for given cb_type */
+ hash_for_each_possible(reg_driver_map, eve_data, hentry, PM_INIT_SUSPEND_CB) {
+ if (eve_data->cb_type == PM_INIT_SUSPEND_CB) {
+ pr_err("Found as already registered\n");
+ return -EINVAL;
+ }
+ }
+
+ /* Add new entry if not present */
+ eve_data = kmalloc(sizeof(*eve_data), GFP_KERNEL);
+ if (!eve_data)
+ return -ENOMEM;
+
+ eve_data->key = 0;
+ eve_data->cb_type = PM_INIT_SUSPEND_CB;
+ eve_data->eve_cb = cb_fun;
+ eve_data->agent_data = data;
+
+ hash_add(reg_driver_map, &eve_data->hentry, PM_INIT_SUSPEND_CB);
+
+ return 0;
+}
+
+static int xlnx_remove_cb_for_suspend(event_cb_func_t cb_fun)
+{
+ bool is_callback_found = false;
+ struct registered_event_data *eve_data;
+
+ /* Check for existing entry in hash table for given cb_type */
+ hash_for_each_possible(reg_driver_map, eve_data, hentry, PM_INIT_SUSPEND_CB) {
+ if (eve_data->cb_type == PM_INIT_SUSPEND_CB &&
+ eve_data->eve_cb == cb_fun) {
+ is_callback_found = true;
+ /* remove an object from a hashtable */
+ hash_del(&eve_data->hentry);
+ kfree(eve_data);
+ }
+ }
+ if (!is_callback_found) {
+ pr_warn("Didn't find any registered callback for suspend event\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int xlnx_remove_cb_for_notify_event(const u32 node_id, const u32 event,
+ event_cb_func_t cb_fun)
+{
+ bool is_callback_found = false;
+ struct registered_event_data *eve_data;
+ u64 key = ((u64)node_id << 32U) | (u64)event;
+
+ /* Check for existing entry in hash table for given key id */
+ hash_for_each_possible(reg_driver_map, eve_data, hentry, key) {
+ if (eve_data->key == key &&
+ eve_data->eve_cb == cb_fun) {
+ is_callback_found = true;
+ /* remove an object from a hashtable */
+ hash_del(&eve_data->hentry);
+ kfree(eve_data);
+ }
+ }
+ if (!is_callback_found) {
+ pr_warn("Didn't find any registered callback for 0x%x 0x%x\n",
+ node_id, event);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * xlnx_register_event() - Register for the event.
+ * @cb_type: Type of callback from pm_api_cb_id,
+ * PM_NOTIFY_CB - for Error Events,
+ * PM_INIT_SUSPEND_CB - for suspend callback.
+ * @node_id: Node-Id related to event.
+ * @event: Event Mask for the Error Event.
+ * @wake: Flag specifying whether the subsystem should be woken upon
+ * event notification.
+ * @cb_fun: Function pointer to store the callback function.
+ * @data: Pointer for the driver instance.
+ *
+ * Return: Returns 0 on successful registration else error code.
+ */
+int xlnx_register_event(const enum pm_api_cb_id cb_type, const u32 node_id, const u32 event,
+ const bool wake, event_cb_func_t cb_fun, void *data)
+{
+ int ret = 0;
+ u32 eve;
+ int pos;
+
+ if (event_manager_availability)
+ return event_manager_availability;
+
+ if (cb_type != PM_NOTIFY_CB && cb_type != PM_INIT_SUSPEND_CB) {
+ pr_err("%s() Unsupported Callback 0x%x\n", __func__, cb_type);
+ return -EINVAL;
+ }
+
+ if (!cb_fun)
+ return -EFAULT;
+
+ if (cb_type == PM_INIT_SUSPEND_CB) {
+ ret = xlnx_add_cb_for_suspend(cb_fun, data);
+ } else {
+ if (!xlnx_is_error_event(node_id)) {
+ /* Add entry for Node-Id/Event in hash table */
+ ret = xlnx_add_cb_for_notify_event(node_id, event, wake, cb_fun, data);
+ } else {
+ /* Add into Hash table */
+ for (pos = 0; pos < MAX_BITS; pos++) {
+ eve = event & (1 << pos);
+ if (!eve)
+ continue;
+
+ /* Add entry for Node-Id/Eve in hash table */
+ ret = xlnx_add_cb_for_notify_event(node_id, eve, wake, cb_fun,
+ data);
+ /* Break the loop if got error */
+ if (ret)
+ break;
+ }
+ if (ret) {
+ /* Skip the Event for which got the error */
+ pos--;
+ /* Remove registered(during this call) event from hash table */
+ for ( ; pos >= 0; pos--) {
+ eve = event & (1 << pos);
+ if (!eve)
+ continue;
+ xlnx_remove_cb_for_notify_event(node_id, eve, cb_fun);
+ }
+ }
+ }
+
+ if (ret) {
+ pr_err("%s() failed for 0x%x and 0x%x: %d\r\n", __func__, node_id,
+ event, ret);
+ return ret;
+ }
+
+ /* Register for Node-Id/Event combination in firmware */
+ ret = zynqmp_pm_register_notifier(node_id, event, wake, true);
+ if (ret) {
+ pr_err("%s() failed for 0x%x and 0x%x: %d\r\n", __func__, node_id,
+ event, ret);
+ /* Remove already registered event from hash table */
+ if (xlnx_is_error_event(node_id)) {
+ for (pos = 0; pos < MAX_BITS; pos++) {
+ eve = event & (1 << pos);
+ if (!eve)
+ continue;
+ xlnx_remove_cb_for_notify_event(node_id, eve, cb_fun);
+ }
+ } else {
+ xlnx_remove_cb_for_notify_event(node_id, event, cb_fun);
+ }
+ return ret;
+ }
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(xlnx_register_event);
+
+/**
+ * xlnx_unregister_event() - Unregister for the event.
+ * @cb_type: Type of callback from pm_api_cb_id,
+ * PM_NOTIFY_CB - for Error Events,
+ * PM_INIT_SUSPEND_CB - for suspend callback.
+ * @node_id: Node-Id related to event.
+ * @event: Event Mask for the Error Event.
+ * @cb_fun: Function pointer of callback function.
+ *
+ * Return: Returns 0 on successful unregistration else error code.
+ */
+int xlnx_unregister_event(const enum pm_api_cb_id cb_type, const u32 node_id, const u32 event,
+ event_cb_func_t cb_fun)
+{
+ int ret;
+ u32 eve, pos;
+
+ if (event_manager_availability)
+ return event_manager_availability;
+
+ if (cb_type != PM_NOTIFY_CB && cb_type != PM_INIT_SUSPEND_CB) {
+ pr_err("%s() Unsupported Callback 0x%x\n", __func__, cb_type);
+ return -EINVAL;
+ }
+
+ if (!cb_fun)
+ return -EFAULT;
+
+ if (cb_type == PM_INIT_SUSPEND_CB) {
+ ret = xlnx_remove_cb_for_suspend(cb_fun);
+ } else {
+ /* Remove Node-Id/Event from hash table */
+ if (!xlnx_is_error_event(node_id)) {
+ xlnx_remove_cb_for_notify_event(node_id, event, cb_fun);
+ } else {
+ for (pos = 0; pos < MAX_BITS; pos++) {
+ eve = event & (1 << pos);
+ if (!eve)
+ continue;
+
+ xlnx_remove_cb_for_notify_event(node_id, eve, cb_fun);
+ }
+ }
+
+ /* Un-register for Node-Id/Event combination */
+ ret = zynqmp_pm_register_notifier(node_id, event, false, false);
+ if (ret) {
+ pr_err("%s() failed for 0x%x and 0x%x: %d\n",
+ __func__, node_id, event, ret);
+ return ret;
+ }
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(xlnx_unregister_event);
+
+static void xlnx_call_suspend_cb_handler(const u32 *payload)
+{
+ bool is_callback_found = false;
+ struct registered_event_data *eve_data;
+ u32 cb_type = payload[0];
+
+ /* Check for existing entry in hash table for given cb_type */
+ hash_for_each_possible(reg_driver_map, eve_data, hentry, cb_type) {
+ if (eve_data->cb_type == cb_type) {
+ eve_data->eve_cb(&payload[0], eve_data->agent_data);
+ is_callback_found = true;
+ }
+ }
+ if (!is_callback_found)
+ pr_warn("Didn't find any registered callback for suspend event\n");
+}
+
+static void xlnx_call_notify_cb_handler(const u32 *payload)
+{
+ bool is_callback_found = false;
+ struct registered_event_data *eve_data;
+ u64 key = ((u64)payload[1] << 32U) | (u64)payload[2];
+ int ret;
+
+ /* Check for existing entry in hash table for given key id */
+ hash_for_each_possible(reg_driver_map, eve_data, hentry, key) {
+ if (eve_data->key == key) {
+ eve_data->eve_cb(&payload[0], eve_data->agent_data);
+ is_callback_found = true;
+
+ /* re register with firmware to get future events */
+ ret = zynqmp_pm_register_notifier(payload[1], payload[2],
+ eve_data->wake, true);
+ if (ret) {
+ pr_err("%s() failed for 0x%x and 0x%x: %d\r\n", __func__,
+ payload[1], payload[2], ret);
+ /* Remove already registered event from hash table */
+ xlnx_remove_cb_for_notify_event(payload[1], payload[2],
+ eve_data->eve_cb);
+ }
+ }
+ }
+ if (!is_callback_found)
+ pr_warn("Didn't find any registered callback for 0x%x 0x%x\n",
+ payload[1], payload[2]);
+}
+
+static void xlnx_get_event_callback_data(u32 *buf)
+{
+ zynqmp_pm_invoke_fn(GET_CALLBACK_DATA, 0, 0, 0, 0, buf);
+}
+
+static irqreturn_t xlnx_event_handler(int irq, void *dev_id)
+{
+ u32 cb_type, node_id, event, pos;
+ u32 payload[CB_MAX_PAYLOAD_SIZE] = {0};
+ u32 event_data[CB_MAX_PAYLOAD_SIZE] = {0};
+
+ /* Get event data */
+ xlnx_get_event_callback_data(payload);
+
+ /* First element is callback type, others are callback arguments */
+ cb_type = payload[0];
+
+ if (cb_type == PM_NOTIFY_CB) {
+ node_id = payload[1];
+ event = payload[2];
+ if (!xlnx_is_error_event(node_id)) {
+ xlnx_call_notify_cb_handler(payload);
+ } else {
+ /*
+ * Each call back function expecting payload as an input arguments.
+ * We can get multiple error events as in one call back through error
+ * mask. So payload[2] may can contain multiple error events.
+ * In reg_driver_map database we store data in the combination of single
+ * node_id-error combination.
+ * So coping the payload message into event_data and update the
+ * event_data[2] with Error Mask for single error event and use
+ * event_data as input argument for registered call back function.
+ *
+ */
+ memcpy(event_data, payload, (4 * CB_MAX_PAYLOAD_SIZE));
+ /* Support Multiple Error Event */
+ for (pos = 0; pos < MAX_BITS; pos++) {
+ if ((0 == (event & (1 << pos))))
+ continue;
+ event_data[2] = (event & (1 << pos));
+ xlnx_call_notify_cb_handler(event_data);
+ }
+ }
+ } else if (cb_type == PM_INIT_SUSPEND_CB) {
+ xlnx_call_suspend_cb_handler(payload);
+ } else {
+ pr_err("%s() Unsupported Callback %d\n", __func__, cb_type);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int xlnx_event_cpuhp_start(unsigned int cpu)
+{
+ enable_percpu_irq(virq_sgi, IRQ_TYPE_NONE);
+
+ return 0;
+}
+
+static int xlnx_event_cpuhp_down(unsigned int cpu)
+{
+ disable_percpu_irq(virq_sgi);
+
+ return 0;
+}
+
+static void xlnx_disable_percpu_irq(void *data)
+{
+ disable_percpu_irq(virq_sgi);
+}
+
+static int xlnx_event_init_sgi(struct platform_device *pdev)
+{
+ int ret = 0;
+ int cpu = smp_processor_id();
+ /*
+ * IRQ related structures are used for the following:
+ * for each SGI interrupt ensure its mapped by GIC IRQ domain
+ * and that each corresponding linux IRQ for the HW IRQ has
+ * a handler for when receiving an interrupt from the remote
+ * processor.
+ */
+ struct irq_domain *domain;
+ struct irq_fwspec sgi_fwspec;
+ struct device_node *interrupt_parent = NULL;
+ struct device *parent = pdev->dev.parent;
+
+ /* Find GIC controller to map SGIs. */
+ interrupt_parent = of_irq_find_parent(parent->of_node);
+ if (!interrupt_parent) {
+ dev_err(&pdev->dev, "Failed to find property for Interrupt parent\n");
+ return -EINVAL;
+ }
+
+ /* Each SGI needs to be associated with GIC's IRQ domain. */
+ domain = irq_find_host(interrupt_parent);
+ of_node_put(interrupt_parent);
+
+ /* Each mapping needs GIC domain when finding IRQ mapping. */
+ sgi_fwspec.fwnode = domain->fwnode;
+
+ /*
+ * When irq domain looks at mapping each arg is as follows:
+ * 3 args for: interrupt type (SGI), interrupt # (set later), type
+ */
+ sgi_fwspec.param_count = 1;
+
+ /* Set SGI's hwirq */
+ sgi_fwspec.param[0] = sgi_num;
+ virq_sgi = irq_create_fwspec_mapping(&sgi_fwspec);
+
+ per_cpu(cpu_number1, cpu) = cpu;
+ ret = request_percpu_irq(virq_sgi, xlnx_event_handler, "xlnx_event_mgmt",
+ &cpu_number1);
+ WARN_ON(ret);
+ if (ret) {
+ irq_dispose_mapping(virq_sgi);
+ return ret;
+ }
+
+ irq_to_desc(virq_sgi);
+ irq_set_status_flags(virq_sgi, IRQ_PER_CPU);
+
+ return ret;
+}
+
+static void xlnx_event_cleanup_sgi(struct platform_device *pdev)
+{
+ int cpu = smp_processor_id();
+
+ per_cpu(cpu_number1, cpu) = cpu;
+
+ cpuhp_remove_state(CPUHP_AP_ONLINE_DYN);
+
+ on_each_cpu(xlnx_disable_percpu_irq, NULL, 1);
+
+ irq_clear_status_flags(virq_sgi, IRQ_PER_CPU);
+ free_percpu_irq(virq_sgi, &cpu_number1);
+ irq_dispose_mapping(virq_sgi);
+}
+
+static int xlnx_event_manager_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = zynqmp_pm_feature(PM_REGISTER_NOTIFIER);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Feature check failed with %d\n", ret);
+ return ret;
+ }
+
+ if ((ret & FIRMWARE_VERSION_MASK) <
+ REGISTER_NOTIFIER_FIRMWARE_VERSION) {
+ dev_err(&pdev->dev, "Register notifier version error. Expected Firmware: v%d - Found: v%d\n",
+ REGISTER_NOTIFIER_FIRMWARE_VERSION,
+ ret & FIRMWARE_VERSION_MASK);
+ return -EOPNOTSUPP;
+ }
+
+ /* Initialize the SGI */
+ ret = xlnx_event_init_sgi(pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "SGI Init has been failed with %d\n", ret);
+ return ret;
+ }
+
+ /* Setup function for the CPU hot-plug cases */
+ cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "soc/event:starting",
+ xlnx_event_cpuhp_start, xlnx_event_cpuhp_down);
+
+ ret = zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_REGISTER_SGI, sgi_num,
+ 0, NULL);
+ if (ret) {
+ dev_err(&pdev->dev, "SGI %d Registration over TF-A failed with %d\n", sgi_num, ret);
+ xlnx_event_cleanup_sgi(pdev);
+ return ret;
+ }
+
+ event_manager_availability = 0;
+
+ dev_info(&pdev->dev, "SGI %d Registered over TF-A\n", sgi_num);
+ dev_info(&pdev->dev, "Xilinx Event Management driver probed\n");
+
+ return ret;
+}
+
+static int xlnx_event_manager_remove(struct platform_device *pdev)
+{
+ int i;
+ struct registered_event_data *eve_data;
+ struct hlist_node *tmp;
+ int ret;
+
+ hash_for_each_safe(reg_driver_map, i, tmp, eve_data, hentry) {
+ hash_del(&eve_data->hentry);
+ kfree(eve_data);
+ }
+
+ ret = zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_REGISTER_SGI, 0, 1, NULL);
+ if (ret)
+ dev_err(&pdev->dev, "SGI unregistration over TF-A failed with %d\n", ret);
+
+ xlnx_event_cleanup_sgi(pdev);
+
+ event_manager_availability = -EACCES;
+
+ return ret;
+}
+
+static struct platform_driver xlnx_event_manager_driver = {
+ .probe = xlnx_event_manager_probe,
+ .remove = xlnx_event_manager_remove,
+ .driver = {
+ .name = "xlnx_event_manager",
+ },
+};
+module_param(sgi_num, uint, 0);
+module_platform_driver(xlnx_event_manager_driver);
diff --git a/drivers/soc/xilinx/zynqmp_power.c b/drivers/soc/xilinx/zynqmp_power.c
index f8c301984d4f..859dd31b6eff 100644
--- a/drivers/soc/xilinx/zynqmp_power.c
+++ b/drivers/soc/xilinx/zynqmp_power.c
@@ -16,6 +16,7 @@
#include <linux/suspend.h>
#include <linux/firmware/xlnx-zynqmp.h>
+#include <linux/firmware/xlnx-event-manager.h>
#include <linux/mailbox/zynqmp-ipi-message.h>
/**
@@ -30,6 +31,7 @@ struct zynqmp_pm_work_struct {
static struct zynqmp_pm_work_struct *zynqmp_pm_init_suspend_work;
static struct mbox_chan *rx_chan;
+static bool event_registered;
enum pm_suspend_mode {
PM_SUSPEND_MODE_FIRST = 0,
@@ -46,17 +48,24 @@ static const char *const suspend_modes[] = {
static enum pm_suspend_mode suspend_mode = PM_SUSPEND_MODE_STD;
-enum pm_api_cb_id {
- PM_INIT_SUSPEND_CB = 30,
- PM_ACKNOWLEDGE_CB,
- PM_NOTIFY_CB,
-};
-
static void zynqmp_pm_get_callback_data(u32 *buf)
{
zynqmp_pm_invoke_fn(GET_CALLBACK_DATA, 0, 0, 0, 0, buf);
}
+static void suspend_event_callback(const u32 *payload, void *data)
+{
+ /* First element is callback API ID, others are callback arguments */
+ if (work_pending(&zynqmp_pm_init_suspend_work->callback_work))
+ return;
+
+ /* Copy callback arguments into work's structure */
+ memcpy(zynqmp_pm_init_suspend_work->args, &payload[1],
+ sizeof(zynqmp_pm_init_suspend_work->args));
+
+ queue_work(system_unbound_wq, &zynqmp_pm_init_suspend_work->callback_work);
+}
+
static irqreturn_t zynqmp_pm_isr(int irq, void *data)
{
u32 payload[CB_PAYLOAD_SIZE];
@@ -184,7 +193,32 @@ static int zynqmp_pm_probe(struct platform_device *pdev)
if (pm_api_version < ZYNQMP_PM_VERSION)
return -ENODEV;
- if (of_find_property(pdev->dev.of_node, "mboxes", NULL)) {
+ /*
+ * First try to use Xilinx Event Manager by registering suspend_event_callback
+ * for suspend/shutdown event.
+ * If xlnx_register_event() returns -EACCES (Xilinx Event Manager
+ * is not available to use) or -ENODEV(Xilinx Event Manager not compiled),
+ * then use ipi-mailbox or interrupt method.
+ */
+ ret = xlnx_register_event(PM_INIT_SUSPEND_CB, 0, 0, false,
+ suspend_event_callback, NULL);
+ if (!ret) {
+ zynqmp_pm_init_suspend_work = devm_kzalloc(&pdev->dev,
+ sizeof(struct zynqmp_pm_work_struct),
+ GFP_KERNEL);
+ if (!zynqmp_pm_init_suspend_work) {
+ xlnx_unregister_event(PM_INIT_SUSPEND_CB, 0, 0,
+ suspend_event_callback);
+ return -ENOMEM;
+ }
+ event_registered = true;
+
+ INIT_WORK(&zynqmp_pm_init_suspend_work->callback_work,
+ zynqmp_pm_init_suspend_work_fn);
+ } else if (ret != -EACCES && ret != -ENODEV) {
+ dev_err(&pdev->dev, "Failed to Register with Xilinx Event manager %d\n", ret);
+ return ret;
+ } else if (of_find_property(pdev->dev.of_node, "mboxes", NULL)) {
zynqmp_pm_init_suspend_work =
devm_kzalloc(&pdev->dev,
sizeof(struct zynqmp_pm_work_struct),
@@ -228,6 +262,10 @@ static int zynqmp_pm_probe(struct platform_device *pdev)
ret = sysfs_create_file(&pdev->dev.kobj, &dev_attr_suspend_mode.attr);
if (ret) {
+ if (event_registered) {
+ xlnx_unregister_event(PM_INIT_SUSPEND_CB, 0, 0, suspend_event_callback);
+ event_registered = false;
+ }
dev_err(&pdev->dev, "unable to create sysfs interface\n");
return ret;
}
@@ -238,6 +276,8 @@ static int zynqmp_pm_probe(struct platform_device *pdev)
static int zynqmp_pm_remove(struct platform_device *pdev)
{
sysfs_remove_file(&pdev->dev.kobj, &dev_attr_suspend_mode.attr);
+ if (event_registered)
+ xlnx_unregister_event(PM_INIT_SUSPEND_CB, 0, 0, suspend_event_callback);
if (!rx_chan)
mbox_free_channel(rx_chan);
diff --git a/drivers/soundwire/cadence_master.c b/drivers/soundwire/cadence_master.c
index 4fcc3ba93004..558390af44b6 100644
--- a/drivers/soundwire/cadence_master.c
+++ b/drivers/soundwire/cadence_master.c
@@ -1178,9 +1178,6 @@ int sdw_cdns_pdi_init(struct sdw_cdns *cdns,
cdns->pcm.num_bd = config.pcm_bd;
cdns->pcm.num_in = config.pcm_in;
cdns->pcm.num_out = config.pcm_out;
- cdns->pdm.num_bd = config.pdm_bd;
- cdns->pdm.num_in = config.pdm_in;
- cdns->pdm.num_out = config.pdm_out;
/* Allocate PDIs for PCMs */
stream = &cdns->pcm;
@@ -1211,32 +1208,6 @@ int sdw_cdns_pdi_init(struct sdw_cdns *cdns,
stream->num_pdi = stream->num_bd + stream->num_in + stream->num_out;
cdns->num_ports = stream->num_pdi;
- /* Allocate PDIs for PDMs */
- stream = &cdns->pdm;
- ret = cdns_allocate_pdi(cdns, &stream->bd,
- stream->num_bd, offset);
- if (ret)
- return ret;
-
- offset += stream->num_bd;
-
- ret = cdns_allocate_pdi(cdns, &stream->in,
- stream->num_in, offset);
- if (ret)
- return ret;
-
- offset += stream->num_in;
-
- ret = cdns_allocate_pdi(cdns, &stream->out,
- stream->num_out, offset);
-
- if (ret)
- return ret;
-
- /* Update total number of PDM PDIs */
- stream->num_pdi = stream->num_bd + stream->num_in + stream->num_out;
- cdns->num_ports += stream->num_pdi;
-
return 0;
}
EXPORT_SYMBOL(sdw_cdns_pdi_init);
@@ -1681,7 +1652,7 @@ int sdw_cdns_probe(struct sdw_cdns *cdns)
EXPORT_SYMBOL(sdw_cdns_probe);
int cdns_set_sdw_stream(struct snd_soc_dai *dai,
- void *stream, bool pcm, int direction)
+ void *stream, int direction)
{
struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
struct sdw_cdns_dma_data *dma;
@@ -1705,10 +1676,7 @@ int cdns_set_sdw_stream(struct snd_soc_dai *dai,
if (!dma)
return -ENOMEM;
- if (pcm)
- dma->stream_type = SDW_STREAM_PCM;
- else
- dma->stream_type = SDW_STREAM_PDM;
+ dma->stream_type = SDW_STREAM_PCM;
dma->bus = &cdns->bus;
dma->link_id = cdns->instance;
diff --git a/drivers/soundwire/cadence_master.h b/drivers/soundwire/cadence_master.h
index e587aede63bf..595d72c15d97 100644
--- a/drivers/soundwire/cadence_master.h
+++ b/drivers/soundwire/cadence_master.h
@@ -17,7 +17,7 @@
* @h_ch_num: high channel for PDI
* @ch_count: total channel count for PDI
* @dir: data direction
- * @type: stream type, PDM or PCM
+ * @type: stream type, (only PCM supported)
*/
struct sdw_cdns_pdi {
int num;
@@ -62,17 +62,11 @@ struct sdw_cdns_streams {
* @pcm_bd: number of bidirectional PCM streams supported
* @pcm_in: number of input PCM streams supported
* @pcm_out: number of output PCM streams supported
- * @pdm_bd: number of bidirectional PDM streams supported
- * @pdm_in: number of input PDM streams supported
- * @pdm_out: number of output PDM streams supported
*/
struct sdw_cdns_stream_config {
unsigned int pcm_bd;
unsigned int pcm_in;
unsigned int pcm_out;
- unsigned int pdm_bd;
- unsigned int pdm_in;
- unsigned int pdm_out;
};
/**
@@ -86,6 +80,7 @@ struct sdw_cdns_stream_config {
* @link_id: Master link id
* @hw_params: hw_params to be applied in .prepare step
* @suspended: status set when suspended, to be used in .prepare
+ * @paused: status set in .trigger, to be used in suspend
*/
struct sdw_cdns_dma_data {
char *name;
@@ -96,6 +91,7 @@ struct sdw_cdns_dma_data {
int link_id;
struct snd_pcm_hw_params *hw_params;
bool suspended;
+ bool paused;
};
/**
@@ -109,7 +105,6 @@ struct sdw_cdns_dma_data {
* @ports: Data ports
* @num_ports: Total number of data ports
* @pcm: PCM streams
- * @pdm: PDM streams
* @registers: Cadence registers
* @link_up: Link status
* @msg_count: Messages sent on bus
@@ -127,7 +122,6 @@ struct sdw_cdns {
int num_ports;
struct sdw_cdns_streams pcm;
- struct sdw_cdns_streams pdm;
int pdi_loopback_source;
int pdi_loopback_target;
@@ -186,7 +180,7 @@ cdns_xfer_msg_defer(struct sdw_bus *bus,
int cdns_bus_conf(struct sdw_bus *bus, struct sdw_bus_params *params);
int cdns_set_sdw_stream(struct snd_soc_dai *dai,
- void *stream, bool pcm, int direction);
+ void *stream, int direction);
void sdw_cdns_check_self_clearing_bits(struct sdw_cdns *cdns, const char *string,
bool initial_delay, int reset_iterations);
diff --git a/drivers/soundwire/intel.c b/drivers/soundwire/intel.c
index 78037ffdb09b..122f7a29d8ca 100644
--- a/drivers/soundwire/intel.c
+++ b/drivers/soundwire/intel.c
@@ -564,7 +564,7 @@ static void intel_pdi_init(struct sdw_intel *sdw,
{
void __iomem *shim = sdw->link_res->shim;
unsigned int link_id = sdw->instance;
- int pcm_cap, pdm_cap;
+ int pcm_cap;
/* PCM Stream Capability */
pcm_cap = intel_readw(shim, SDW_SHIM_PCMSCAP(link_id));
@@ -575,41 +575,25 @@ static void intel_pdi_init(struct sdw_intel *sdw,
dev_dbg(sdw->cdns.dev, "PCM cap bd:%d in:%d out:%d\n",
config->pcm_bd, config->pcm_in, config->pcm_out);
-
- /* PDM Stream Capability */
- pdm_cap = intel_readw(shim, SDW_SHIM_PDMSCAP(link_id));
-
- config->pdm_bd = FIELD_GET(SDW_SHIM_PDMSCAP_BSS, pdm_cap);
- config->pdm_in = FIELD_GET(SDW_SHIM_PDMSCAP_ISS, pdm_cap);
- config->pdm_out = FIELD_GET(SDW_SHIM_PDMSCAP_OSS, pdm_cap);
-
- dev_dbg(sdw->cdns.dev, "PDM cap bd:%d in:%d out:%d\n",
- config->pdm_bd, config->pdm_in, config->pdm_out);
}
static int
-intel_pdi_get_ch_cap(struct sdw_intel *sdw, unsigned int pdi_num, bool pcm)
+intel_pdi_get_ch_cap(struct sdw_intel *sdw, unsigned int pdi_num)
{
void __iomem *shim = sdw->link_res->shim;
unsigned int link_id = sdw->instance;
int count;
- if (pcm) {
- count = intel_readw(shim, SDW_SHIM_PCMSYCHC(link_id, pdi_num));
+ count = intel_readw(shim, SDW_SHIM_PCMSYCHC(link_id, pdi_num));
- /*
- * WORKAROUND: on all existing Intel controllers, pdi
- * number 2 reports channel count as 1 even though it
- * supports 8 channels. Performing hardcoding for pdi
- * number 2.
- */
- if (pdi_num == 2)
- count = 7;
-
- } else {
- count = intel_readw(shim, SDW_SHIM_PDMSCAP(link_id));
- count = FIELD_GET(SDW_SHIM_PDMSCAP_CPSS, count);
- }
+ /*
+ * WORKAROUND: on all existing Intel controllers, pdi
+ * number 2 reports channel count as 1 even though it
+ * supports 8 channels. Performing hardcoding for pdi
+ * number 2.
+ */
+ if (pdi_num == 2)
+ count = 7;
/* zero based values for channel count in register */
count++;
@@ -620,12 +604,12 @@ intel_pdi_get_ch_cap(struct sdw_intel *sdw, unsigned int pdi_num, bool pcm)
static int intel_pdi_get_ch_update(struct sdw_intel *sdw,
struct sdw_cdns_pdi *pdi,
unsigned int num_pdi,
- unsigned int *num_ch, bool pcm)
+ unsigned int *num_ch)
{
int i, ch_count = 0;
for (i = 0; i < num_pdi; i++) {
- pdi->ch_count = intel_pdi_get_ch_cap(sdw, pdi->num, pcm);
+ pdi->ch_count = intel_pdi_get_ch_cap(sdw, pdi->num);
ch_count += pdi->ch_count;
pdi++;
}
@@ -635,25 +619,23 @@ static int intel_pdi_get_ch_update(struct sdw_intel *sdw,
}
static int intel_pdi_stream_ch_update(struct sdw_intel *sdw,
- struct sdw_cdns_streams *stream, bool pcm)
+ struct sdw_cdns_streams *stream)
{
intel_pdi_get_ch_update(sdw, stream->bd, stream->num_bd,
- &stream->num_ch_bd, pcm);
+ &stream->num_ch_bd);
intel_pdi_get_ch_update(sdw, stream->in, stream->num_in,
- &stream->num_ch_in, pcm);
+ &stream->num_ch_in);
intel_pdi_get_ch_update(sdw, stream->out, stream->num_out,
- &stream->num_ch_out, pcm);
+ &stream->num_ch_out);
return 0;
}
static int intel_pdi_ch_update(struct sdw_intel *sdw)
{
- /* First update PCM streams followed by PDM streams */
- intel_pdi_stream_ch_update(sdw, &sdw->cdns.pcm, true);
- intel_pdi_stream_ch_update(sdw, &sdw->cdns.pdm, false);
+ intel_pdi_stream_ch_update(sdw, &sdw->cdns.pcm);
return 0;
}
@@ -711,7 +693,7 @@ intel_pdi_alh_configure(struct sdw_intel *sdw, struct sdw_cdns_pdi *pdi)
}
static int intel_params_stream(struct sdw_intel *sdw,
- struct snd_pcm_substream *substream,
+ int stream,
struct snd_soc_dai *dai,
struct snd_pcm_hw_params *hw_params,
int link_id, int alh_stream_id)
@@ -719,7 +701,7 @@ static int intel_params_stream(struct sdw_intel *sdw,
struct sdw_intel_link_res *res = sdw->link_res;
struct sdw_intel_stream_params_data params_data;
- params_data.substream = substream;
+ params_data.stream = stream; /* direction */
params_data.dai = dai;
params_data.hw_params = hw_params;
params_data.link_id = link_id;
@@ -732,14 +714,14 @@ static int intel_params_stream(struct sdw_intel *sdw,
}
static int intel_free_stream(struct sdw_intel *sdw,
- struct snd_pcm_substream *substream,
+ int stream,
struct snd_soc_dai *dai,
int link_id)
{
struct sdw_intel_link_res *res = sdw->link_res;
struct sdw_intel_stream_free_data free_data;
- free_data.substream = substream;
+ free_data.stream = stream; /* direction */
free_data.dai = dai;
free_data.link_id = link_id;
@@ -840,7 +822,6 @@ static int intel_hw_params(struct snd_pcm_substream *substream,
struct sdw_port_config *pconfig;
int ch, dir;
int ret;
- bool pcm = true;
dma = snd_soc_dai_get_dma_data(dai, substream);
if (!dma)
@@ -852,13 +833,7 @@ static int intel_hw_params(struct snd_pcm_substream *substream,
else
dir = SDW_DATA_DIR_TX;
- if (dma->stream_type == SDW_STREAM_PDM)
- pcm = false;
-
- if (pcm)
- pdi = sdw_cdns_alloc_pdi(cdns, &cdns->pcm, ch, dir, dai->id);
- else
- pdi = sdw_cdns_alloc_pdi(cdns, &cdns->pdm, ch, dir, dai->id);
+ pdi = sdw_cdns_alloc_pdi(cdns, &cdns->pcm, ch, dir, dai->id);
if (!pdi) {
ret = -EINVAL;
@@ -871,12 +846,13 @@ static int intel_hw_params(struct snd_pcm_substream *substream,
sdw_cdns_config_stream(cdns, ch, dir, pdi);
/* store pdi and hw_params, may be needed in prepare step */
+ dma->paused = false;
dma->suspended = false;
dma->pdi = pdi;
dma->hw_params = params;
/* Inform DSP about PDI stream number */
- ret = intel_params_stream(sdw, substream, dai, params,
+ ret = intel_params_stream(sdw, substream->stream, dai, params,
sdw->instance,
pdi->intel_alh_id);
if (ret)
@@ -887,12 +863,7 @@ static int intel_hw_params(struct snd_pcm_substream *substream,
sconfig.frame_rate = params_rate(params);
sconfig.type = dma->stream_type;
- if (dma->stream_type == SDW_STREAM_PDM) {
- sconfig.frame_rate *= 50;
- sconfig.bps = 1;
- } else {
- sconfig.bps = snd_pcm_format_width(params_format(params));
- }
+ sconfig.bps = snd_pcm_format_width(params_format(params));
/* Port configuration */
pconfig = kzalloc(sizeof(*pconfig), GFP_KERNEL);
@@ -953,7 +924,7 @@ static int intel_prepare(struct snd_pcm_substream *substream,
sdw_cdns_config_stream(cdns, ch, dir, dma->pdi);
/* Inform DSP about PDI stream number */
- ret = intel_params_stream(sdw, substream, dai,
+ ret = intel_params_stream(sdw, substream->stream, dai,
dma->hw_params,
sdw->instance,
dma->pdi->intel_alh_id);
@@ -987,7 +958,7 @@ intel_hw_free(struct snd_pcm_substream *substream, struct snd_soc_dai *dai)
return ret;
}
- ret = intel_free_stream(sdw, substream, dai, sdw->instance);
+ ret = intel_free_stream(sdw, substream->stream, dai, sdw->instance);
if (ret < 0) {
dev_err(dai->dev, "intel_free_stream: failed %d\n", ret);
return ret;
@@ -1008,39 +979,10 @@ static void intel_shutdown(struct snd_pcm_substream *substream,
pm_runtime_put_autosuspend(cdns->dev);
}
-static int intel_component_dais_suspend(struct snd_soc_component *component)
-{
- struct sdw_cdns_dma_data *dma;
- struct snd_soc_dai *dai;
-
- for_each_component_dais(component, dai) {
- /*
- * we don't have a .suspend dai_ops, and we don't have access
- * to the substream, so let's mark both capture and playback
- * DMA contexts as suspended
- */
- dma = dai->playback_dma_data;
- if (dma)
- dma->suspended = true;
-
- dma = dai->capture_dma_data;
- if (dma)
- dma->suspended = true;
- }
-
- return 0;
-}
-
static int intel_pcm_set_sdw_stream(struct snd_soc_dai *dai,
void *stream, int direction)
{
- return cdns_set_sdw_stream(dai, stream, true, direction);
-}
-
-static int intel_pdm_set_sdw_stream(struct snd_soc_dai *dai,
- void *stream, int direction)
-{
- return cdns_set_sdw_stream(dai, stream, false, direction);
+ return cdns_set_sdw_stream(dai, stream, direction);
}
static void *intel_get_sdw_stream(struct snd_soc_dai *dai,
@@ -1059,24 +1001,100 @@ static void *intel_get_sdw_stream(struct snd_soc_dai *dai,
return dma->stream;
}
-static const struct snd_soc_dai_ops intel_pcm_dai_ops = {
- .startup = intel_startup,
- .hw_params = intel_hw_params,
- .prepare = intel_prepare,
- .hw_free = intel_hw_free,
- .shutdown = intel_shutdown,
- .set_sdw_stream = intel_pcm_set_sdw_stream,
- .get_sdw_stream = intel_get_sdw_stream,
-};
+static int intel_trigger(struct snd_pcm_substream *substream, int cmd, struct snd_soc_dai *dai)
+{
+ struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
+ struct sdw_intel *sdw = cdns_to_intel(cdns);
+ struct sdw_cdns_dma_data *dma;
+ int ret = 0;
-static const struct snd_soc_dai_ops intel_pdm_dai_ops = {
+ dma = snd_soc_dai_get_dma_data(dai, substream);
+ if (!dma) {
+ dev_err(dai->dev, "failed to get dma data in %s\n",
+ __func__);
+ return -EIO;
+ }
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+
+ /*
+ * The .prepare callback is used to deal with xruns and resume operations.
+ * In the case of xruns, the DMAs and SHIM registers cannot be touched,
+ * but for resume operations the DMAs and SHIM registers need to be initialized.
+ * the .trigger callback is used to track the suspend case only.
+ */
+
+ dma->suspended = true;
+
+ ret = intel_free_stream(sdw, substream->stream, dai, sdw->instance);
+ break;
+
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ dma->paused = true;
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ dma->paused = false;
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static int intel_component_dais_suspend(struct snd_soc_component *component)
+{
+ struct snd_soc_dai *dai;
+
+ /*
+ * In the corner case where a SUSPEND happens during a PAUSE, the ALSA core
+ * does not throw the TRIGGER_SUSPEND. This leaves the DAIs in an unbalanced state.
+ * Since the component suspend is called last, we can trap this corner case
+ * and force the DAIs to release their resources.
+ */
+ for_each_component_dais(component, dai) {
+ struct sdw_cdns *cdns = snd_soc_dai_get_drvdata(dai);
+ struct sdw_intel *sdw = cdns_to_intel(cdns);
+ struct sdw_cdns_dma_data *dma;
+ int stream;
+ int ret;
+
+ dma = dai->playback_dma_data;
+ stream = SNDRV_PCM_STREAM_PLAYBACK;
+ if (!dma) {
+ dma = dai->capture_dma_data;
+ stream = SNDRV_PCM_STREAM_CAPTURE;
+ }
+
+ if (!dma)
+ continue;
+
+ if (dma->suspended)
+ continue;
+
+ if (dma->paused) {
+ dma->suspended = true;
+
+ ret = intel_free_stream(sdw, stream, dai, sdw->instance);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static const struct snd_soc_dai_ops intel_pcm_dai_ops = {
.startup = intel_startup,
.hw_params = intel_hw_params,
.prepare = intel_prepare,
.hw_free = intel_hw_free,
+ .trigger = intel_trigger,
.shutdown = intel_shutdown,
- .set_sdw_stream = intel_pdm_set_sdw_stream,
- .get_sdw_stream = intel_get_sdw_stream,
+ .set_stream = intel_pcm_set_sdw_stream,
+ .get_stream = intel_get_sdw_stream,
};
static const struct snd_soc_component_driver dai_component = {
@@ -1087,7 +1105,7 @@ static const struct snd_soc_component_driver dai_component = {
static int intel_create_dai(struct sdw_cdns *cdns,
struct snd_soc_dai_driver *dais,
enum intel_pdi_type type,
- u32 num, u32 off, u32 max_ch, bool pcm)
+ u32 num, u32 off, u32 max_ch)
{
int i;
@@ -1116,10 +1134,7 @@ static int intel_create_dai(struct sdw_cdns *cdns,
dais[i].capture.formats = SNDRV_PCM_FMTBIT_S16_LE;
}
- if (pcm)
- dais[i].ops = &intel_pcm_dai_ops;
- else
- dais[i].ops = &intel_pdm_dai_ops;
+ dais[i].ops = &intel_pcm_dai_ops;
}
return 0;
@@ -1133,7 +1148,7 @@ static int intel_register_dai(struct sdw_intel *sdw)
int num_dai, ret, off = 0;
/* DAIs are created based on total number of PDIs supported */
- num_dai = cdns->pcm.num_pdi + cdns->pdm.num_pdi;
+ num_dai = cdns->pcm.num_pdi;
dais = devm_kcalloc(cdns->dev, num_dai, sizeof(*dais), GFP_KERNEL);
if (!dais)
@@ -1143,39 +1158,19 @@ static int intel_register_dai(struct sdw_intel *sdw)
stream = &cdns->pcm;
ret = intel_create_dai(cdns, dais, INTEL_PDI_IN, cdns->pcm.num_in,
- off, stream->num_ch_in, true);
+ off, stream->num_ch_in);
if (ret)
return ret;
off += cdns->pcm.num_in;
ret = intel_create_dai(cdns, dais, INTEL_PDI_OUT, cdns->pcm.num_out,
- off, stream->num_ch_out, true);
+ off, stream->num_ch_out);
if (ret)
return ret;
off += cdns->pcm.num_out;
ret = intel_create_dai(cdns, dais, INTEL_PDI_BD, cdns->pcm.num_bd,
- off, stream->num_ch_bd, true);
- if (ret)
- return ret;
-
- /* Create PDM DAIs */
- stream = &cdns->pdm;
- off += cdns->pcm.num_bd;
- ret = intel_create_dai(cdns, dais, INTEL_PDI_IN, cdns->pdm.num_in,
- off, stream->num_ch_in, false);
- if (ret)
- return ret;
-
- off += cdns->pdm.num_in;
- ret = intel_create_dai(cdns, dais, INTEL_PDI_OUT, cdns->pdm.num_out,
- off, stream->num_ch_out, false);
- if (ret)
- return ret;
-
- off += cdns->pdm.num_out;
- ret = intel_create_dai(cdns, dais, INTEL_PDI_BD, cdns->pdm.num_bd,
- off, stream->num_ch_bd, false);
+ off, stream->num_ch_bd);
if (ret)
return ret;
@@ -1293,7 +1288,7 @@ static int intel_link_probe(struct auxiliary_device *auxdev,
bus->ops = &sdw_intel_ops;
/* set driver data, accessed by snd_soc_dai_get_drvdata() */
- dev_set_drvdata(dev, cdns);
+ auxiliary_set_drvdata(auxdev, cdns);
/* use generic bandwidth allocation algorithm */
sdw->cdns.bus.compute_params = sdw_compute_params;
@@ -1321,7 +1316,7 @@ int intel_link_startup(struct auxiliary_device *auxdev)
{
struct sdw_cdns_stream_config config;
struct device *dev = &auxdev->dev;
- struct sdw_cdns *cdns = dev_get_drvdata(dev);
+ struct sdw_cdns *cdns = auxiliary_get_drvdata(auxdev);
struct sdw_intel *sdw = cdns_to_intel(cdns);
struct sdw_bus *bus = &cdns->bus;
int link_flags;
@@ -1463,7 +1458,7 @@ err_init:
static void intel_link_remove(struct auxiliary_device *auxdev)
{
struct device *dev = &auxdev->dev;
- struct sdw_cdns *cdns = dev_get_drvdata(dev);
+ struct sdw_cdns *cdns = auxiliary_get_drvdata(auxdev);
struct sdw_intel *sdw = cdns_to_intel(cdns);
struct sdw_bus *bus = &cdns->bus;
@@ -1488,7 +1483,7 @@ int intel_link_process_wakeen_event(struct auxiliary_device *auxdev)
void __iomem *shim;
u16 wake_sts;
- sdw = dev_get_drvdata(dev);
+ sdw = auxiliary_get_drvdata(auxdev);
bus = &sdw->cdns.bus;
if (bus->prop.hw_disabled || !sdw->startup_done) {
@@ -1549,7 +1544,7 @@ static int __maybe_unused intel_pm_prepare(struct device *dev)
struct sdw_intel *sdw = cdns_to_intel(cdns);
struct sdw_bus *bus = &cdns->bus;
u32 clock_stop_quirks;
- int ret = 0;
+ int ret;
if (bus->prop.hw_disabled || !sdw->startup_done) {
dev_dbg(dev, "SoundWire master %d is disabled or not-started, ignoring\n",
diff --git a/drivers/soundwire/intel_init.c b/drivers/soundwire/intel_init.c
index e329022e1669..d99807765dfe 100644
--- a/drivers/soundwire/intel_init.c
+++ b/drivers/soundwire/intel_init.c
@@ -244,7 +244,7 @@ static struct sdw_intel_ctx
goto err;
link = &ldev->link_res;
- link->cdns = dev_get_drvdata(&ldev->auxdev.dev);
+ link->cdns = auxiliary_get_drvdata(&ldev->auxdev);
if (!link->cdns) {
dev_err(&adev->dev, "failed to get link->cdns\n");
diff --git a/drivers/soundwire/qcom.c b/drivers/soundwire/qcom.c
index a317bea2d42d..54813417ef8e 100644
--- a/drivers/soundwire/qcom.c
+++ b/drivers/soundwire/qcom.c
@@ -1024,8 +1024,8 @@ static int qcom_swrm_startup(struct snd_pcm_substream *substream,
ctrl->sruntime[dai->id] = sruntime;
for_each_rtd_codec_dais(rtd, i, codec_dai) {
- ret = snd_soc_dai_set_sdw_stream(codec_dai, sruntime,
- substream->stream);
+ ret = snd_soc_dai_set_stream(codec_dai, sruntime,
+ substream->stream);
if (ret < 0 && ret != -ENOTSUPP) {
dev_err(dai->dev, "Failed to set sdw stream on %s\n",
codec_dai->name);
@@ -1051,8 +1051,8 @@ static const struct snd_soc_dai_ops qcom_swrm_pdm_dai_ops = {
.hw_free = qcom_swrm_hw_free,
.startup = qcom_swrm_startup,
.shutdown = qcom_swrm_shutdown,
- .set_sdw_stream = qcom_swrm_set_sdw_stream,
- .get_sdw_stream = qcom_swrm_get_sdw_stream,
+ .set_stream = qcom_swrm_set_sdw_stream,
+ .get_stream = qcom_swrm_get_sdw_stream,
};
static const struct snd_soc_component_driver qcom_swrm_dai_component = {
@@ -1156,11 +1156,7 @@ static int qcom_swrm_get_port_config(struct qcom_swrm_ctrl *ctrl)
ret = of_property_read_u8_array(np, "qcom,ports-block-pack-mode",
bp_mode, nports);
if (ret) {
- u32 version;
-
- ctrl->reg_read(ctrl, SWRM_COMP_HW_VERSION, &version);
-
- if (version <= 0x01030000)
+ if (ctrl->version <= 0x01030000)
memset(bp_mode, SWR_INVALID_PARAM, QCOM_SDW_MAX_PORTS);
else
return ret;
diff --git a/drivers/soundwire/stream.c b/drivers/soundwire/stream.c
index 5d4f6b308ef7..980f26d49b66 100644
--- a/drivers/soundwire/stream.c
+++ b/drivers/soundwire/stream.c
@@ -1863,7 +1863,7 @@ static int set_stream(struct snd_pcm_substream *substream,
/* Set stream pointer on all DAIs */
for_each_rtd_dais(rtd, i, dai) {
- ret = snd_soc_dai_set_sdw_stream(dai, sdw_stream, substream->stream);
+ ret = snd_soc_dai_set_stream(dai, sdw_stream, substream->stream);
if (ret < 0) {
dev_err(rtd->dev, "failed to set stream pointer on dai %s\n", dai->name);
break;
@@ -1934,7 +1934,7 @@ void sdw_shutdown_stream(void *sdw_substream)
/* Find stream from first CPU DAI */
dai = asoc_rtd_to_cpu(rtd, 0);
- sdw_stream = snd_soc_dai_get_sdw_stream(dai, substream->stream);
+ sdw_stream = snd_soc_dai_get_stream(dai, substream->stream);
if (IS_ERR(sdw_stream)) {
dev_err(rtd->dev, "no stream found for DAI %s\n", dai->name);
diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c
index c9a769b8594b..86c76211b3d3 100644
--- a/drivers/spi/spi-bcm-qspi.c
+++ b/drivers/spi/spi-bcm-qspi.c
@@ -585,7 +585,7 @@ static void bcm_qspi_chip_select(struct bcm_qspi *qspi, int cs)
u32 rd = 0;
u32 wr = 0;
- if (qspi->base[CHIP_SELECT]) {
+ if (cs >= 0 && qspi->base[CHIP_SELECT]) {
rd = bcm_qspi_read(qspi, CHIP_SELECT, 0);
wr = (rd & ~0xff) | (1 << cs);
if (rd == wr)
diff --git a/drivers/spi/spi-meson-spicc.c b/drivers/spi/spi-meson-spicc.c
index c208efeadd18..0bc7daa7afc8 100644
--- a/drivers/spi/spi-meson-spicc.c
+++ b/drivers/spi/spi-meson-spicc.c
@@ -693,6 +693,11 @@ static int meson_spicc_probe(struct platform_device *pdev)
writel_relaxed(0, spicc->base + SPICC_INTREG);
irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ ret = irq;
+ goto out_master;
+ }
+
ret = devm_request_irq(&pdev->dev, irq, meson_spicc_irq,
0, NULL, spicc);
if (ret) {
diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c
index a15de10ee286..753bd313e6fd 100644
--- a/drivers/spi/spi-mt65xx.c
+++ b/drivers/spi/spi-mt65xx.c
@@ -624,7 +624,7 @@ static irqreturn_t mtk_spi_interrupt(int irq, void *dev_id)
else
mdata->state = MTK_SPI_IDLE;
- if (!master->can_dma(master, master->cur_msg->spi, trans)) {
+ if (!master->can_dma(master, NULL, trans)) {
if (trans->rx_buf) {
cnt = mdata->xfer_len / 4;
ioread32_rep(mdata->base + SPI_RX_DATA_REG,
diff --git a/drivers/spi/spi-pic32.c b/drivers/spi/spi-pic32.c
index 5eb7b61bbb4d..f86433b29260 100644
--- a/drivers/spi/spi-pic32.c
+++ b/drivers/spi/spi-pic32.c
@@ -370,7 +370,6 @@ static int pic32_spi_dma_config(struct pic32_spi *pic32s, u32 dma_width)
cfg.src_addr_width = dma_width;
cfg.dst_addr_width = dma_width;
/* tx channel */
- cfg.slave_id = pic32s->tx_irq;
cfg.direction = DMA_MEM_TO_DEV;
ret = dmaengine_slave_config(master->dma_tx, &cfg);
if (ret) {
@@ -378,7 +377,6 @@ static int pic32_spi_dma_config(struct pic32_spi *pic32s, u32 dma_width)
return ret;
}
/* rx channel */
- cfg.slave_id = pic32s->rx_irq;
cfg.direction = DMA_DEV_TO_MEM;
ret = dmaengine_slave_config(master->dma_rx, &cfg);
if (ret)
diff --git a/drivers/spi/spi-stm32-qspi.c b/drivers/spi/spi-stm32-qspi.c
index 514337c86d2c..ffdc55f87e82 100644
--- a/drivers/spi/spi-stm32-qspi.c
+++ b/drivers/spi/spi-stm32-qspi.c
@@ -688,7 +688,7 @@ static int stm32_qspi_probe(struct platform_device *pdev)
struct resource *res;
int ret, irq;
- ctrl = spi_alloc_master(dev, sizeof(*qspi));
+ ctrl = devm_spi_alloc_master(dev, sizeof(*qspi));
if (!ctrl)
return -ENOMEM;
@@ -697,58 +697,46 @@ static int stm32_qspi_probe(struct platform_device *pdev)
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi");
qspi->io_base = devm_ioremap_resource(dev, res);
- if (IS_ERR(qspi->io_base)) {
- ret = PTR_ERR(qspi->io_base);
- goto err_master_put;
- }
+ if (IS_ERR(qspi->io_base))
+ return PTR_ERR(qspi->io_base);
qspi->phys_base = res->start;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi_mm");
qspi->mm_base = devm_ioremap_resource(dev, res);
- if (IS_ERR(qspi->mm_base)) {
- ret = PTR_ERR(qspi->mm_base);
- goto err_master_put;
- }
+ if (IS_ERR(qspi->mm_base))
+ return PTR_ERR(qspi->mm_base);
qspi->mm_size = resource_size(res);
- if (qspi->mm_size > STM32_QSPI_MAX_MMAP_SZ) {
- ret = -EINVAL;
- goto err_master_put;
- }
+ if (qspi->mm_size > STM32_QSPI_MAX_MMAP_SZ)
+ return -EINVAL;
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- ret = irq;
- goto err_master_put;
- }
+ if (irq < 0)
+ return irq;
ret = devm_request_irq(dev, irq, stm32_qspi_irq, 0,
dev_name(dev), qspi);
if (ret) {
dev_err(dev, "failed to request irq\n");
- goto err_master_put;
+ return ret;
}
init_completion(&qspi->data_completion);
init_completion(&qspi->match_completion);
qspi->clk = devm_clk_get(dev, NULL);
- if (IS_ERR(qspi->clk)) {
- ret = PTR_ERR(qspi->clk);
- goto err_master_put;
- }
+ if (IS_ERR(qspi->clk))
+ return PTR_ERR(qspi->clk);
qspi->clk_rate = clk_get_rate(qspi->clk);
- if (!qspi->clk_rate) {
- ret = -EINVAL;
- goto err_master_put;
- }
+ if (!qspi->clk_rate)
+ return -EINVAL;
ret = clk_prepare_enable(qspi->clk);
if (ret) {
dev_err(dev, "can not enable the clock\n");
- goto err_master_put;
+ return ret;
}
rstc = devm_reset_control_get_exclusive(dev, NULL);
@@ -784,7 +772,7 @@ static int stm32_qspi_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
pm_runtime_get_noresume(dev);
- ret = devm_spi_register_master(dev, ctrl);
+ ret = spi_register_master(ctrl);
if (ret)
goto err_pm_runtime_free;
@@ -806,8 +794,6 @@ err_dma_free:
stm32_qspi_dma_free(qspi);
err_clk_disable:
clk_disable_unprepare(qspi->clk);
-err_master_put:
- spi_master_put(qspi->ctrl);
return ret;
}
@@ -817,6 +803,7 @@ static int stm32_qspi_remove(struct platform_device *pdev)
struct stm32_qspi *qspi = platform_get_drvdata(pdev);
pm_runtime_get_sync(qspi->dev);
+ spi_unregister_master(qspi->ctrl);
/* disable qspi */
writel_relaxed(0, qspi->io_base + QSPI_CR);
stm32_qspi_dma_free(qspi);
diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c
index 9bd3fd1652f7..7fc24505a72c 100644
--- a/drivers/spi/spi-stm32.c
+++ b/drivers/spi/spi-stm32.c
@@ -221,7 +221,6 @@ struct stm32_spi;
* time between frames (if driver has this functionality)
* @set_number_of_data: optional routine to configure registers to desired
* number of data (if driver has this functionality)
- * @can_dma: routine to determine if the transfer is eligible for DMA use
* @transfer_one_dma_start: routine to start transfer a single spi_transfer
* using DMA
* @dma_rx_cb: routine to call after DMA RX channel operation is complete
@@ -232,7 +231,7 @@ struct stm32_spi;
* @baud_rate_div_min: minimum baud rate divisor
* @baud_rate_div_max: maximum baud rate divisor
* @has_fifo: boolean to know if fifo is used for driver
- * @has_startbit: boolean to know if start bit is used to start transfer
+ * @flags: compatible specific SPI controller flags used at registration time
*/
struct stm32_spi_cfg {
const struct stm32_spi_regspec *regs;
@@ -253,6 +252,7 @@ struct stm32_spi_cfg {
unsigned int baud_rate_div_min;
unsigned int baud_rate_div_max;
bool has_fifo;
+ u16 flags;
};
/**
@@ -1722,6 +1722,7 @@ static const struct stm32_spi_cfg stm32f4_spi_cfg = {
.baud_rate_div_min = STM32F4_SPI_BR_DIV_MIN,
.baud_rate_div_max = STM32F4_SPI_BR_DIV_MAX,
.has_fifo = false,
+ .flags = SPI_MASTER_MUST_TX,
};
static const struct stm32_spi_cfg stm32h7_spi_cfg = {
@@ -1854,7 +1855,7 @@ static int stm32_spi_probe(struct platform_device *pdev)
master->prepare_message = stm32_spi_prepare_msg;
master->transfer_one = stm32_spi_transfer_one;
master->unprepare_message = stm32_spi_unprepare_msg;
- master->flags = SPI_MASTER_MUST_TX;
+ master->flags = spi->cfg->flags;
spi->dma_tx = dma_request_chan(spi->dev, "tx");
if (IS_ERR(spi->dma_tx)) {
diff --git a/drivers/spi/spi-uniphier.c b/drivers/spi/spi-uniphier.c
index 342ee8d2c476..cc0da4822231 100644
--- a/drivers/spi/spi-uniphier.c
+++ b/drivers/spi/spi-uniphier.c
@@ -726,7 +726,7 @@ static int uniphier_spi_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev, "failed to get TX DMA capacities: %d\n",
ret);
- goto out_disable_clk;
+ goto out_release_dma;
}
dma_tx_burst = caps.max_burst;
}
@@ -735,7 +735,7 @@ static int uniphier_spi_probe(struct platform_device *pdev)
if (IS_ERR_OR_NULL(master->dma_rx)) {
if (PTR_ERR(master->dma_rx) == -EPROBE_DEFER) {
ret = -EPROBE_DEFER;
- goto out_disable_clk;
+ goto out_release_dma;
}
master->dma_rx = NULL;
dma_rx_burst = INT_MAX;
@@ -744,7 +744,7 @@ static int uniphier_spi_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev, "failed to get RX DMA capacities: %d\n",
ret);
- goto out_disable_clk;
+ goto out_release_dma;
}
dma_rx_burst = caps.max_burst;
}
@@ -753,10 +753,20 @@ static int uniphier_spi_probe(struct platform_device *pdev)
ret = devm_spi_register_master(&pdev->dev, master);
if (ret)
- goto out_disable_clk;
+ goto out_release_dma;
return 0;
+out_release_dma:
+ if (!IS_ERR_OR_NULL(master->dma_rx)) {
+ dma_release_channel(master->dma_rx);
+ master->dma_rx = NULL;
+ }
+ if (!IS_ERR_OR_NULL(master->dma_tx)) {
+ dma_release_channel(master->dma_tx);
+ master->dma_tx = NULL;
+ }
+
out_disable_clk:
clk_disable_unprepare(priv->clk);
diff --git a/drivers/spmi/Kconfig b/drivers/spmi/Kconfig
index 2874b6c26028..737802046314 100644
--- a/drivers/spmi/Kconfig
+++ b/drivers/spmi/Kconfig
@@ -34,4 +34,15 @@ config SPMI_MSM_PMIC_ARB
This is required for communicating with Qualcomm PMICs and
other devices that have the SPMI interface.
+config SPMI_MTK_PMIF
+ tristate "Mediatek SPMI Controller (PMIC Arbiter)"
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ help
+ If you say yes to this option, support will be included for the
+ built-in SPMI PMIC Arbiter interface on Mediatek family
+ processors.
+
+ This is required for communicating with Mediatek PMICs and
+ other devices that have the SPMI interface.
+
endif
diff --git a/drivers/spmi/Makefile b/drivers/spmi/Makefile
index 6e092e6f290c..9d974424c8c1 100644
--- a/drivers/spmi/Makefile
+++ b/drivers/spmi/Makefile
@@ -6,3 +6,4 @@ obj-$(CONFIG_SPMI) += spmi.o
obj-$(CONFIG_SPMI_HISI3670) += hisi-spmi-controller.o
obj-$(CONFIG_SPMI_MSM_PMIC_ARB) += spmi-pmic-arb.o
+obj-$(CONFIG_SPMI_MTK_PMIF) += spmi-mtk-pmif.o
diff --git a/drivers/spmi/spmi-mtk-pmif.c b/drivers/spmi/spmi-mtk-pmif.c
new file mode 100644
index 000000000000..ad511f2c3324
--- /dev/null
+++ b/drivers/spmi/spmi-mtk-pmif.c
@@ -0,0 +1,542 @@
+// SPDX-License-Identifier: GPL-2.0
+//
+// Copyright (c) 2021 MediaTek Inc.
+
+#include <linux/clk.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/spmi.h>
+
+#define SWINF_IDLE 0x00
+#define SWINF_WFVLDCLR 0x06
+
+#define GET_SWINF(x) (((x) >> 1) & 0x7)
+
+#define PMIF_CMD_REG_0 0
+#define PMIF_CMD_REG 1
+#define PMIF_CMD_EXT_REG 2
+#define PMIF_CMD_EXT_REG_LONG 3
+
+#define PMIF_DELAY_US 10
+#define PMIF_TIMEOUT_US (10 * 1000)
+
+#define PMIF_CHAN_OFFSET 0x5
+
+#define PMIF_MAX_CLKS 3
+
+#define SPMI_OP_ST_BUSY 1
+
+struct ch_reg {
+ u32 ch_sta;
+ u32 wdata;
+ u32 rdata;
+ u32 ch_send;
+ u32 ch_rdy;
+};
+
+struct pmif_data {
+ const u32 *regs;
+ const u32 *spmimst_regs;
+ u32 soc_chan;
+};
+
+struct pmif {
+ void __iomem *base;
+ void __iomem *spmimst_base;
+ struct ch_reg chan;
+ struct clk_bulk_data clks[PMIF_MAX_CLKS];
+ size_t nclks;
+ const struct pmif_data *data;
+};
+
+static const char * const pmif_clock_names[] = {
+ "pmif_sys_ck", "pmif_tmr_ck", "spmimst_clk_mux",
+};
+
+enum pmif_regs {
+ PMIF_INIT_DONE,
+ PMIF_INF_EN,
+ PMIF_ARB_EN,
+ PMIF_CMDISSUE_EN,
+ PMIF_TIMER_CTRL,
+ PMIF_SPI_MODE_CTRL,
+ PMIF_IRQ_EVENT_EN_0,
+ PMIF_IRQ_FLAG_0,
+ PMIF_IRQ_CLR_0,
+ PMIF_IRQ_EVENT_EN_1,
+ PMIF_IRQ_FLAG_1,
+ PMIF_IRQ_CLR_1,
+ PMIF_IRQ_EVENT_EN_2,
+ PMIF_IRQ_FLAG_2,
+ PMIF_IRQ_CLR_2,
+ PMIF_IRQ_EVENT_EN_3,
+ PMIF_IRQ_FLAG_3,
+ PMIF_IRQ_CLR_3,
+ PMIF_IRQ_EVENT_EN_4,
+ PMIF_IRQ_FLAG_4,
+ PMIF_IRQ_CLR_4,
+ PMIF_WDT_EVENT_EN_0,
+ PMIF_WDT_FLAG_0,
+ PMIF_WDT_EVENT_EN_1,
+ PMIF_WDT_FLAG_1,
+ PMIF_SWINF_0_STA,
+ PMIF_SWINF_0_WDATA_31_0,
+ PMIF_SWINF_0_RDATA_31_0,
+ PMIF_SWINF_0_ACC,
+ PMIF_SWINF_0_VLD_CLR,
+ PMIF_SWINF_1_STA,
+ PMIF_SWINF_1_WDATA_31_0,
+ PMIF_SWINF_1_RDATA_31_0,
+ PMIF_SWINF_1_ACC,
+ PMIF_SWINF_1_VLD_CLR,
+ PMIF_SWINF_2_STA,
+ PMIF_SWINF_2_WDATA_31_0,
+ PMIF_SWINF_2_RDATA_31_0,
+ PMIF_SWINF_2_ACC,
+ PMIF_SWINF_2_VLD_CLR,
+ PMIF_SWINF_3_STA,
+ PMIF_SWINF_3_WDATA_31_0,
+ PMIF_SWINF_3_RDATA_31_0,
+ PMIF_SWINF_3_ACC,
+ PMIF_SWINF_3_VLD_CLR,
+};
+
+static const u32 mt6873_regs[] = {
+ [PMIF_INIT_DONE] = 0x0000,
+ [PMIF_INF_EN] = 0x0024,
+ [PMIF_ARB_EN] = 0x0150,
+ [PMIF_CMDISSUE_EN] = 0x03B4,
+ [PMIF_TIMER_CTRL] = 0x03E0,
+ [PMIF_SPI_MODE_CTRL] = 0x0400,
+ [PMIF_IRQ_EVENT_EN_0] = 0x0418,
+ [PMIF_IRQ_FLAG_0] = 0x0420,
+ [PMIF_IRQ_CLR_0] = 0x0424,
+ [PMIF_IRQ_EVENT_EN_1] = 0x0428,
+ [PMIF_IRQ_FLAG_1] = 0x0430,
+ [PMIF_IRQ_CLR_1] = 0x0434,
+ [PMIF_IRQ_EVENT_EN_2] = 0x0438,
+ [PMIF_IRQ_FLAG_2] = 0x0440,
+ [PMIF_IRQ_CLR_2] = 0x0444,
+ [PMIF_IRQ_EVENT_EN_3] = 0x0448,
+ [PMIF_IRQ_FLAG_3] = 0x0450,
+ [PMIF_IRQ_CLR_3] = 0x0454,
+ [PMIF_IRQ_EVENT_EN_4] = 0x0458,
+ [PMIF_IRQ_FLAG_4] = 0x0460,
+ [PMIF_IRQ_CLR_4] = 0x0464,
+ [PMIF_WDT_EVENT_EN_0] = 0x046C,
+ [PMIF_WDT_FLAG_0] = 0x0470,
+ [PMIF_WDT_EVENT_EN_1] = 0x0474,
+ [PMIF_WDT_FLAG_1] = 0x0478,
+ [PMIF_SWINF_0_ACC] = 0x0C00,
+ [PMIF_SWINF_0_WDATA_31_0] = 0x0C04,
+ [PMIF_SWINF_0_RDATA_31_0] = 0x0C14,
+ [PMIF_SWINF_0_VLD_CLR] = 0x0C24,
+ [PMIF_SWINF_0_STA] = 0x0C28,
+ [PMIF_SWINF_1_ACC] = 0x0C40,
+ [PMIF_SWINF_1_WDATA_31_0] = 0x0C44,
+ [PMIF_SWINF_1_RDATA_31_0] = 0x0C54,
+ [PMIF_SWINF_1_VLD_CLR] = 0x0C64,
+ [PMIF_SWINF_1_STA] = 0x0C68,
+ [PMIF_SWINF_2_ACC] = 0x0C80,
+ [PMIF_SWINF_2_WDATA_31_0] = 0x0C84,
+ [PMIF_SWINF_2_RDATA_31_0] = 0x0C94,
+ [PMIF_SWINF_2_VLD_CLR] = 0x0CA4,
+ [PMIF_SWINF_2_STA] = 0x0CA8,
+ [PMIF_SWINF_3_ACC] = 0x0CC0,
+ [PMIF_SWINF_3_WDATA_31_0] = 0x0CC4,
+ [PMIF_SWINF_3_RDATA_31_0] = 0x0CD4,
+ [PMIF_SWINF_3_VLD_CLR] = 0x0CE4,
+ [PMIF_SWINF_3_STA] = 0x0CE8,
+};
+
+static const u32 mt8195_regs[] = {
+ [PMIF_INIT_DONE] = 0x0000,
+ [PMIF_INF_EN] = 0x0024,
+ [PMIF_ARB_EN] = 0x0150,
+ [PMIF_CMDISSUE_EN] = 0x03B8,
+ [PMIF_TIMER_CTRL] = 0x03E4,
+ [PMIF_SPI_MODE_CTRL] = 0x0408,
+ [PMIF_IRQ_EVENT_EN_0] = 0x0420,
+ [PMIF_IRQ_FLAG_0] = 0x0428,
+ [PMIF_IRQ_CLR_0] = 0x042C,
+ [PMIF_IRQ_EVENT_EN_1] = 0x0430,
+ [PMIF_IRQ_FLAG_1] = 0x0438,
+ [PMIF_IRQ_CLR_1] = 0x043C,
+ [PMIF_IRQ_EVENT_EN_2] = 0x0440,
+ [PMIF_IRQ_FLAG_2] = 0x0448,
+ [PMIF_IRQ_CLR_2] = 0x044C,
+ [PMIF_IRQ_EVENT_EN_3] = 0x0450,
+ [PMIF_IRQ_FLAG_3] = 0x0458,
+ [PMIF_IRQ_CLR_3] = 0x045C,
+ [PMIF_IRQ_EVENT_EN_4] = 0x0460,
+ [PMIF_IRQ_FLAG_4] = 0x0468,
+ [PMIF_IRQ_CLR_4] = 0x046C,
+ [PMIF_WDT_EVENT_EN_0] = 0x0474,
+ [PMIF_WDT_FLAG_0] = 0x0478,
+ [PMIF_WDT_EVENT_EN_1] = 0x047C,
+ [PMIF_WDT_FLAG_1] = 0x0480,
+ [PMIF_SWINF_0_ACC] = 0x0800,
+ [PMIF_SWINF_0_WDATA_31_0] = 0x0804,
+ [PMIF_SWINF_0_RDATA_31_0] = 0x0814,
+ [PMIF_SWINF_0_VLD_CLR] = 0x0824,
+ [PMIF_SWINF_0_STA] = 0x0828,
+ [PMIF_SWINF_1_ACC] = 0x0840,
+ [PMIF_SWINF_1_WDATA_31_0] = 0x0844,
+ [PMIF_SWINF_1_RDATA_31_0] = 0x0854,
+ [PMIF_SWINF_1_VLD_CLR] = 0x0864,
+ [PMIF_SWINF_1_STA] = 0x0868,
+ [PMIF_SWINF_2_ACC] = 0x0880,
+ [PMIF_SWINF_2_WDATA_31_0] = 0x0884,
+ [PMIF_SWINF_2_RDATA_31_0] = 0x0894,
+ [PMIF_SWINF_2_VLD_CLR] = 0x08A4,
+ [PMIF_SWINF_2_STA] = 0x08A8,
+ [PMIF_SWINF_3_ACC] = 0x08C0,
+ [PMIF_SWINF_3_WDATA_31_0] = 0x08C4,
+ [PMIF_SWINF_3_RDATA_31_0] = 0x08D4,
+ [PMIF_SWINF_3_VLD_CLR] = 0x08E4,
+ [PMIF_SWINF_3_STA] = 0x08E8,
+};
+
+enum spmi_regs {
+ SPMI_OP_ST_CTRL,
+ SPMI_GRP_ID_EN,
+ SPMI_OP_ST_STA,
+ SPMI_MST_SAMPL,
+ SPMI_MST_REQ_EN,
+ SPMI_REC_CTRL,
+ SPMI_REC0,
+ SPMI_REC1,
+ SPMI_REC2,
+ SPMI_REC3,
+ SPMI_REC4,
+ SPMI_MST_DBG,
+
+ /* MT8195 spmi regs */
+ SPMI_MST_RCS_CTRL,
+ SPMI_SLV_3_0_EINT,
+ SPMI_SLV_7_4_EINT,
+ SPMI_SLV_B_8_EINT,
+ SPMI_SLV_F_C_EINT,
+ SPMI_REC_CMD_DEC,
+ SPMI_DEC_DBG,
+};
+
+static const u32 mt6873_spmi_regs[] = {
+ [SPMI_OP_ST_CTRL] = 0x0000,
+ [SPMI_GRP_ID_EN] = 0x0004,
+ [SPMI_OP_ST_STA] = 0x0008,
+ [SPMI_MST_SAMPL] = 0x000c,
+ [SPMI_MST_REQ_EN] = 0x0010,
+ [SPMI_REC_CTRL] = 0x0040,
+ [SPMI_REC0] = 0x0044,
+ [SPMI_REC1] = 0x0048,
+ [SPMI_REC2] = 0x004c,
+ [SPMI_REC3] = 0x0050,
+ [SPMI_REC4] = 0x0054,
+ [SPMI_MST_DBG] = 0x00fc,
+};
+
+static const u32 mt8195_spmi_regs[] = {
+ [SPMI_OP_ST_CTRL] = 0x0000,
+ [SPMI_GRP_ID_EN] = 0x0004,
+ [SPMI_OP_ST_STA] = 0x0008,
+ [SPMI_MST_SAMPL] = 0x000C,
+ [SPMI_MST_REQ_EN] = 0x0010,
+ [SPMI_MST_RCS_CTRL] = 0x0014,
+ [SPMI_SLV_3_0_EINT] = 0x0020,
+ [SPMI_SLV_7_4_EINT] = 0x0024,
+ [SPMI_SLV_B_8_EINT] = 0x0028,
+ [SPMI_SLV_F_C_EINT] = 0x002C,
+ [SPMI_REC_CTRL] = 0x0040,
+ [SPMI_REC0] = 0x0044,
+ [SPMI_REC1] = 0x0048,
+ [SPMI_REC2] = 0x004C,
+ [SPMI_REC3] = 0x0050,
+ [SPMI_REC4] = 0x0054,
+ [SPMI_REC_CMD_DEC] = 0x005C,
+ [SPMI_DEC_DBG] = 0x00F8,
+ [SPMI_MST_DBG] = 0x00FC,
+};
+
+static u32 pmif_readl(struct pmif *arb, enum pmif_regs reg)
+{
+ return readl(arb->base + arb->data->regs[reg]);
+}
+
+static void pmif_writel(struct pmif *arb, u32 val, enum pmif_regs reg)
+{
+ writel(val, arb->base + arb->data->regs[reg]);
+}
+
+static void mtk_spmi_writel(struct pmif *arb, u32 val, enum spmi_regs reg)
+{
+ writel(val, arb->spmimst_base + arb->data->spmimst_regs[reg]);
+}
+
+static bool pmif_is_fsm_vldclr(struct pmif *arb)
+{
+ u32 reg_rdata;
+
+ reg_rdata = pmif_readl(arb, arb->chan.ch_sta);
+
+ return GET_SWINF(reg_rdata) == SWINF_WFVLDCLR;
+}
+
+static int pmif_arb_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid)
+{
+ struct pmif *arb = spmi_controller_get_drvdata(ctrl);
+ u32 rdata, cmd;
+ int ret;
+
+ /* Check the opcode */
+ if (opc < SPMI_CMD_RESET || opc > SPMI_CMD_WAKEUP)
+ return -EINVAL;
+
+ cmd = opc - SPMI_CMD_RESET;
+
+ mtk_spmi_writel(arb, (cmd << 0x4) | sid, SPMI_OP_ST_CTRL);
+ ret = readl_poll_timeout_atomic(arb->spmimst_base + arb->data->spmimst_regs[SPMI_OP_ST_STA],
+ rdata, (rdata & SPMI_OP_ST_BUSY) == SPMI_OP_ST_BUSY,
+ PMIF_DELAY_US, PMIF_TIMEOUT_US);
+ if (ret < 0)
+ dev_err(&ctrl->dev, "timeout, err = %d\n", ret);
+
+ return ret;
+}
+
+static int pmif_spmi_read_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
+ u16 addr, u8 *buf, size_t len)
+{
+ struct pmif *arb = spmi_controller_get_drvdata(ctrl);
+ struct ch_reg *inf_reg;
+ int ret;
+ u32 data, cmd;
+
+ /* Check for argument validation. */
+ if (sid & ~0xf) {
+ dev_err(&ctrl->dev, "exceed the max slv id\n");
+ return -EINVAL;
+ }
+
+ if (len > 4) {
+ dev_err(&ctrl->dev, "pmif supports 1..4 bytes per trans, but:%zu requested", len);
+
+ return -EINVAL;
+ }
+
+ if (opc >= 0x60 && opc <= 0x7f)
+ opc = PMIF_CMD_REG;
+ else if ((opc >= 0x20 && opc <= 0x2f) || (opc >= 0x38 && opc <= 0x3f))
+ opc = PMIF_CMD_EXT_REG_LONG;
+ else
+ return -EINVAL;
+
+ /* Wait for Software Interface FSM state to be IDLE. */
+ inf_reg = &arb->chan;
+ ret = readl_poll_timeout_atomic(arb->base + arb->data->regs[inf_reg->ch_sta],
+ data, GET_SWINF(data) == SWINF_IDLE,
+ PMIF_DELAY_US, PMIF_TIMEOUT_US);
+ if (ret < 0) {
+ /* set channel ready if the data has transferred */
+ if (pmif_is_fsm_vldclr(arb))
+ pmif_writel(arb, 1, inf_reg->ch_rdy);
+ dev_err(&ctrl->dev, "failed to wait for SWINF_IDLE\n");
+ return ret;
+ }
+
+ /* Send the command. */
+ cmd = (opc << 30) | (sid << 24) | ((len - 1) << 16) | addr;
+ pmif_writel(arb, cmd, inf_reg->ch_send);
+
+ /*
+ * Wait for Software Interface FSM state to be WFVLDCLR,
+ * read the data and clear the valid flag.
+ */
+ ret = readl_poll_timeout_atomic(arb->base + arb->data->regs[inf_reg->ch_sta],
+ data, GET_SWINF(data) == SWINF_WFVLDCLR,
+ PMIF_DELAY_US, PMIF_TIMEOUT_US);
+ if (ret < 0) {
+ dev_err(&ctrl->dev, "failed to wait for SWINF_WFVLDCLR\n");
+ return ret;
+ }
+
+ data = pmif_readl(arb, inf_reg->rdata);
+ memcpy(buf, &data, len);
+ pmif_writel(arb, 1, inf_reg->ch_rdy);
+
+ return 0;
+}
+
+static int pmif_spmi_write_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
+ u16 addr, const u8 *buf, size_t len)
+{
+ struct pmif *arb = spmi_controller_get_drvdata(ctrl);
+ struct ch_reg *inf_reg;
+ int ret;
+ u32 data, cmd;
+
+ if (len > 4) {
+ dev_err(&ctrl->dev, "pmif supports 1..4 bytes per trans, but:%zu requested", len);
+
+ return -EINVAL;
+ }
+
+ /* Check the opcode */
+ if (opc >= 0x40 && opc <= 0x5F)
+ opc = PMIF_CMD_REG;
+ else if ((opc <= 0xF) || (opc >= 0x30 && opc <= 0x37))
+ opc = PMIF_CMD_EXT_REG_LONG;
+ else if (opc >= 0x80)
+ opc = PMIF_CMD_REG_0;
+ else
+ return -EINVAL;
+
+ /* Wait for Software Interface FSM state to be IDLE. */
+ inf_reg = &arb->chan;
+ ret = readl_poll_timeout_atomic(arb->base + arb->data->regs[inf_reg->ch_sta],
+ data, GET_SWINF(data) == SWINF_IDLE,
+ PMIF_DELAY_US, PMIF_TIMEOUT_US);
+ if (ret < 0) {
+ /* set channel ready if the data has transferred */
+ if (pmif_is_fsm_vldclr(arb))
+ pmif_writel(arb, 1, inf_reg->ch_rdy);
+ dev_err(&ctrl->dev, "failed to wait for SWINF_IDLE\n");
+ return ret;
+ }
+
+ /* Set the write data. */
+ memcpy(&data, buf, len);
+ pmif_writel(arb, data, inf_reg->wdata);
+
+ /* Send the command. */
+ cmd = (opc << 30) | BIT(29) | (sid << 24) | ((len - 1) << 16) | addr;
+ pmif_writel(arb, cmd, inf_reg->ch_send);
+
+ return 0;
+}
+
+static const struct pmif_data mt6873_pmif_arb = {
+ .regs = mt6873_regs,
+ .spmimst_regs = mt6873_spmi_regs,
+ .soc_chan = 2,
+};
+
+static const struct pmif_data mt8195_pmif_arb = {
+ .regs = mt8195_regs,
+ .spmimst_regs = mt8195_spmi_regs,
+ .soc_chan = 2,
+};
+
+static int mtk_spmi_probe(struct platform_device *pdev)
+{
+ struct pmif *arb;
+ struct spmi_controller *ctrl;
+ int err, i;
+ u32 chan_offset;
+
+ ctrl = spmi_controller_alloc(&pdev->dev, sizeof(*arb));
+ if (!ctrl)
+ return -ENOMEM;
+
+ arb = spmi_controller_get_drvdata(ctrl);
+ arb->data = device_get_match_data(&pdev->dev);
+ if (!arb->data) {
+ err = -EINVAL;
+ dev_err(&pdev->dev, "Cannot get drv_data\n");
+ goto err_put_ctrl;
+ }
+
+ arb->base = devm_platform_ioremap_resource_byname(pdev, "pmif");
+ if (IS_ERR(arb->base)) {
+ err = PTR_ERR(arb->base);
+ goto err_put_ctrl;
+ }
+
+ arb->spmimst_base = devm_platform_ioremap_resource_byname(pdev, "spmimst");
+ if (IS_ERR(arb->spmimst_base)) {
+ err = PTR_ERR(arb->spmimst_base);
+ goto err_put_ctrl;
+ }
+
+ arb->nclks = ARRAY_SIZE(pmif_clock_names);
+ for (i = 0; i < arb->nclks; i++)
+ arb->clks[i].id = pmif_clock_names[i];
+
+ err = devm_clk_bulk_get(&pdev->dev, arb->nclks, arb->clks);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to get clocks: %d\n", err);
+ goto err_put_ctrl;
+ }
+
+ err = clk_bulk_prepare_enable(arb->nclks, arb->clks);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to enable clocks: %d\n", err);
+ goto err_put_ctrl;
+ }
+
+ ctrl->cmd = pmif_arb_cmd;
+ ctrl->read_cmd = pmif_spmi_read_cmd;
+ ctrl->write_cmd = pmif_spmi_write_cmd;
+
+ chan_offset = PMIF_CHAN_OFFSET * arb->data->soc_chan;
+ arb->chan.ch_sta = PMIF_SWINF_0_STA + chan_offset;
+ arb->chan.wdata = PMIF_SWINF_0_WDATA_31_0 + chan_offset;
+ arb->chan.rdata = PMIF_SWINF_0_RDATA_31_0 + chan_offset;
+ arb->chan.ch_send = PMIF_SWINF_0_ACC + chan_offset;
+ arb->chan.ch_rdy = PMIF_SWINF_0_VLD_CLR + chan_offset;
+
+ platform_set_drvdata(pdev, ctrl);
+
+ err = spmi_controller_add(ctrl);
+ if (err)
+ goto err_domain_remove;
+
+ return 0;
+
+err_domain_remove:
+ clk_bulk_disable_unprepare(arb->nclks, arb->clks);
+err_put_ctrl:
+ spmi_controller_put(ctrl);
+ return err;
+}
+
+static int mtk_spmi_remove(struct platform_device *pdev)
+{
+ struct spmi_controller *ctrl = platform_get_drvdata(pdev);
+ struct pmif *arb = spmi_controller_get_drvdata(ctrl);
+
+ clk_bulk_disable_unprepare(arb->nclks, arb->clks);
+ spmi_controller_remove(ctrl);
+ spmi_controller_put(ctrl);
+ return 0;
+}
+
+static const struct of_device_id mtk_spmi_match_table[] = {
+ {
+ .compatible = "mediatek,mt6873-spmi",
+ .data = &mt6873_pmif_arb,
+ }, {
+ .compatible = "mediatek,mt8195-spmi",
+ .data = &mt8195_pmif_arb,
+ }, {
+ /* sentinel */
+ },
+};
+MODULE_DEVICE_TABLE(of, mtk_spmi_match_table);
+
+static struct platform_driver mtk_spmi_driver = {
+ .driver = {
+ .name = "spmi-mtk",
+ .of_match_table = of_match_ptr(mtk_spmi_match_table),
+ },
+ .probe = mtk_spmi_probe,
+ .remove = mtk_spmi_remove,
+};
+module_platform_driver(mtk_spmi_driver);
+
+MODULE_AUTHOR("Hsin-Hsiung Wang <hsin-hsiung.wang@mediatek.com>");
+MODULE_DESCRIPTION("MediaTek SPMI Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c
index bbbd311eda03..2113be40b5a9 100644
--- a/drivers/spmi/spmi-pmic-arb.c
+++ b/drivers/spmi/spmi-pmic-arb.c
@@ -261,20 +261,21 @@ static int pmic_arb_wait_for_done(struct spmi_controller *ctrl,
if (status & PMIC_ARB_STATUS_DONE) {
if (status & PMIC_ARB_STATUS_DENIED) {
- dev_err(&ctrl->dev, "%s: transaction denied (0x%x)\n",
- __func__, status);
+ dev_err(&ctrl->dev, "%s: %#x %#x: transaction denied (%#x)\n",
+ __func__, sid, addr, status);
return -EPERM;
}
if (status & PMIC_ARB_STATUS_FAILURE) {
- dev_err(&ctrl->dev, "%s: transaction failed (0x%x)\n",
- __func__, status);
+ dev_err(&ctrl->dev, "%s: %#x %#x: transaction failed (%#x)\n",
+ __func__, sid, addr, status);
+ WARN_ON(1);
return -EIO;
}
if (status & PMIC_ARB_STATUS_DROPPED) {
- dev_err(&ctrl->dev, "%s: transaction dropped (0x%x)\n",
- __func__, status);
+ dev_err(&ctrl->dev, "%s: %#x %#x: transaction dropped (%#x)\n",
+ __func__, sid, addr, status);
return -EIO;
}
@@ -283,8 +284,8 @@ static int pmic_arb_wait_for_done(struct spmi_controller *ctrl,
udelay(1);
}
- dev_err(&ctrl->dev, "%s: timeout, status 0x%x\n",
- __func__, status);
+ dev_err(&ctrl->dev, "%s: %#x %#x: timeout, status %#x\n",
+ __func__, sid, addr, status);
return -ETIMEDOUT;
}
@@ -333,24 +334,20 @@ static int pmic_arb_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid)
return pmic_arb->ver_ops->non_data_cmd(ctrl, opc, sid);
}
-static int pmic_arb_read_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
- u16 addr, u8 *buf, size_t len)
+static int pmic_arb_fmt_read_cmd(struct spmi_pmic_arb *pmic_arb, u8 opc, u8 sid,
+ u16 addr, size_t len, u32 *cmd, u32 *offset)
{
- struct spmi_pmic_arb *pmic_arb = spmi_controller_get_drvdata(ctrl);
- unsigned long flags;
u8 bc = len - 1;
- u32 cmd;
int rc;
- u32 offset;
rc = pmic_arb->ver_ops->offset(pmic_arb, sid, addr,
PMIC_ARB_CHANNEL_OBS);
if (rc < 0)
return rc;
- offset = rc;
+ *offset = rc;
if (bc >= PMIC_ARB_MAX_TRANS_BYTES) {
- dev_err(&ctrl->dev, "pmic-arb supports 1..%d bytes per trans, but:%zu requested",
+ dev_err(&pmic_arb->spmic->dev, "pmic-arb supports 1..%d bytes per trans, but:%zu requested",
PMIC_ARB_MAX_TRANS_BYTES, len);
return -EINVAL;
}
@@ -365,14 +362,24 @@ static int pmic_arb_read_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
else
return -EINVAL;
- cmd = pmic_arb->ver_ops->fmt_cmd(opc, sid, addr, bc);
+ *cmd = pmic_arb->ver_ops->fmt_cmd(opc, sid, addr, bc);
+
+ return 0;
+}
+
+static int pmic_arb_read_cmd_unlocked(struct spmi_controller *ctrl, u32 cmd,
+ u32 offset, u8 sid, u16 addr, u8 *buf,
+ size_t len)
+{
+ struct spmi_pmic_arb *pmic_arb = spmi_controller_get_drvdata(ctrl);
+ u8 bc = len - 1;
+ int rc;
- raw_spin_lock_irqsave(&pmic_arb->lock, flags);
pmic_arb_set_rd_cmd(pmic_arb, offset + PMIC_ARB_CMD, cmd);
rc = pmic_arb_wait_for_done(ctrl, pmic_arb->rd_base, sid, addr,
PMIC_ARB_CHANNEL_OBS);
if (rc)
- goto done;
+ return rc;
pmic_arb_read_data(pmic_arb, buf, offset + PMIC_ARB_RDATA0,
min_t(u8, bc, 3));
@@ -380,30 +387,44 @@ static int pmic_arb_read_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
if (bc > 3)
pmic_arb_read_data(pmic_arb, buf + 4, offset + PMIC_ARB_RDATA1,
bc - 4);
+ return 0;
+}
-done:
+static int pmic_arb_read_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
+ u16 addr, u8 *buf, size_t len)
+{
+ struct spmi_pmic_arb *pmic_arb = spmi_controller_get_drvdata(ctrl);
+ unsigned long flags;
+ u32 cmd, offset;
+ int rc;
+
+ rc = pmic_arb_fmt_read_cmd(pmic_arb, opc, sid, addr, len, &cmd,
+ &offset);
+ if (rc)
+ return rc;
+
+ raw_spin_lock_irqsave(&pmic_arb->lock, flags);
+ rc = pmic_arb_read_cmd_unlocked(ctrl, cmd, offset, sid, addr, buf, len);
raw_spin_unlock_irqrestore(&pmic_arb->lock, flags);
+
return rc;
}
-static int pmic_arb_write_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
- u16 addr, const u8 *buf, size_t len)
+static int pmic_arb_fmt_write_cmd(struct spmi_pmic_arb *pmic_arb, u8 opc,
+ u8 sid, u16 addr, size_t len, u32 *cmd,
+ u32 *offset)
{
- struct spmi_pmic_arb *pmic_arb = spmi_controller_get_drvdata(ctrl);
- unsigned long flags;
u8 bc = len - 1;
- u32 cmd;
int rc;
- u32 offset;
rc = pmic_arb->ver_ops->offset(pmic_arb, sid, addr,
PMIC_ARB_CHANNEL_RW);
if (rc < 0)
return rc;
- offset = rc;
+ *offset = rc;
if (bc >= PMIC_ARB_MAX_TRANS_BYTES) {
- dev_err(&ctrl->dev, "pmic-arb supports 1..%d bytes per trans, but:%zu requested",
+ dev_err(&pmic_arb->spmic->dev, "pmic-arb supports 1..%d bytes per trans, but:%zu requested",
PMIC_ARB_MAX_TRANS_BYTES, len);
return -EINVAL;
}
@@ -420,10 +441,19 @@ static int pmic_arb_write_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
else
return -EINVAL;
- cmd = pmic_arb->ver_ops->fmt_cmd(opc, sid, addr, bc);
+ *cmd = pmic_arb->ver_ops->fmt_cmd(opc, sid, addr, bc);
+
+ return 0;
+}
+
+static int pmic_arb_write_cmd_unlocked(struct spmi_controller *ctrl, u32 cmd,
+ u32 offset, u8 sid, u16 addr,
+ const u8 *buf, size_t len)
+{
+ struct spmi_pmic_arb *pmic_arb = spmi_controller_get_drvdata(ctrl);
+ u8 bc = len - 1;
/* Write data to FIFOs */
- raw_spin_lock_irqsave(&pmic_arb->lock, flags);
pmic_arb_write_data(pmic_arb, buf, offset + PMIC_ARB_WDATA0,
min_t(u8, bc, 3));
if (bc > 3)
@@ -432,8 +462,62 @@ static int pmic_arb_write_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
/* Start the transaction */
pmic_arb_base_write(pmic_arb, offset + PMIC_ARB_CMD, cmd);
- rc = pmic_arb_wait_for_done(ctrl, pmic_arb->wr_base, sid, addr,
- PMIC_ARB_CHANNEL_RW);
+ return pmic_arb_wait_for_done(ctrl, pmic_arb->wr_base, sid, addr,
+ PMIC_ARB_CHANNEL_RW);
+}
+
+static int pmic_arb_write_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
+ u16 addr, const u8 *buf, size_t len)
+{
+ struct spmi_pmic_arb *pmic_arb = spmi_controller_get_drvdata(ctrl);
+ unsigned long flags;
+ u32 cmd, offset;
+ int rc;
+
+ rc = pmic_arb_fmt_write_cmd(pmic_arb, opc, sid, addr, len, &cmd,
+ &offset);
+ if (rc)
+ return rc;
+
+ raw_spin_lock_irqsave(&pmic_arb->lock, flags);
+ rc = pmic_arb_write_cmd_unlocked(ctrl, cmd, offset, sid, addr, buf,
+ len);
+ raw_spin_unlock_irqrestore(&pmic_arb->lock, flags);
+
+ return rc;
+}
+
+static int pmic_arb_masked_write(struct spmi_controller *ctrl, u8 sid, u16 addr,
+ const u8 *buf, const u8 *mask, size_t len)
+{
+ struct spmi_pmic_arb *pmic_arb = spmi_controller_get_drvdata(ctrl);
+ u32 read_cmd, read_offset, write_cmd, write_offset;
+ u8 temp[PMIC_ARB_MAX_TRANS_BYTES];
+ unsigned long flags;
+ int rc, i;
+
+ rc = pmic_arb_fmt_read_cmd(pmic_arb, SPMI_CMD_EXT_READL, sid, addr, len,
+ &read_cmd, &read_offset);
+ if (rc)
+ return rc;
+
+ rc = pmic_arb_fmt_write_cmd(pmic_arb, SPMI_CMD_EXT_WRITEL, sid, addr,
+ len, &write_cmd, &write_offset);
+ if (rc)
+ return rc;
+
+ raw_spin_lock_irqsave(&pmic_arb->lock, flags);
+ rc = pmic_arb_read_cmd_unlocked(ctrl, read_cmd, read_offset, sid, addr,
+ temp, len);
+ if (rc)
+ goto done;
+
+ for (i = 0; i < len; i++)
+ temp[i] = (temp[i] & ~mask[i]) | (buf[i] & mask[i]);
+
+ rc = pmic_arb_write_cmd_unlocked(ctrl, write_cmd, write_offset, sid,
+ addr, temp, len);
+done:
raw_spin_unlock_irqrestore(&pmic_arb->lock, flags);
return rc;
@@ -482,6 +566,23 @@ static void qpnpint_spmi_read(struct irq_data *d, u8 reg, void *buf, size_t len)
d->irq);
}
+static int qpnpint_spmi_masked_write(struct irq_data *d, u8 reg,
+ const void *buf, const void *mask,
+ size_t len)
+{
+ struct spmi_pmic_arb *pmic_arb = irq_data_get_irq_chip_data(d);
+ u8 sid = hwirq_to_sid(d->hwirq);
+ u8 per = hwirq_to_per(d->hwirq);
+ int rc;
+
+ rc = pmic_arb_masked_write(pmic_arb->spmic, sid, (per << 8) + reg, buf,
+ mask, len);
+ if (rc)
+ dev_err_ratelimited(&pmic_arb->spmic->dev, "failed irqchip transaction on %x rc=%d\n",
+ d->irq, rc);
+ return rc;
+}
+
static void cleanup_irq(struct spmi_pmic_arb *pmic_arb, u16 apid, int id)
{
u16 ppid = pmic_arb->apid_data[apid].ppid;
@@ -600,18 +701,18 @@ static void qpnpint_irq_unmask(struct irq_data *d)
static int qpnpint_irq_set_type(struct irq_data *d, unsigned int flow_type)
{
- struct spmi_pmic_arb_qpnpint_type type;
+ struct spmi_pmic_arb_qpnpint_type type = {0};
+ struct spmi_pmic_arb_qpnpint_type mask;
irq_flow_handler_t flow_handler;
- u8 irq = hwirq_to_irq(d->hwirq);
-
- qpnpint_spmi_read(d, QPNPINT_REG_SET_TYPE, &type, sizeof(type));
+ u8 irq_bit = BIT(hwirq_to_irq(d->hwirq));
+ int rc;
if (flow_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) {
- type.type |= BIT(irq);
+ type.type = irq_bit;
if (flow_type & IRQF_TRIGGER_RISING)
- type.polarity_high |= BIT(irq);
+ type.polarity_high = irq_bit;
if (flow_type & IRQF_TRIGGER_FALLING)
- type.polarity_low |= BIT(irq);
+ type.polarity_low = irq_bit;
flow_handler = handle_edge_irq;
} else {
@@ -619,19 +720,23 @@ static int qpnpint_irq_set_type(struct irq_data *d, unsigned int flow_type)
(flow_type & (IRQF_TRIGGER_LOW)))
return -EINVAL;
- type.type &= ~BIT(irq); /* level trig */
if (flow_type & IRQF_TRIGGER_HIGH)
- type.polarity_high |= BIT(irq);
+ type.polarity_high = irq_bit;
else
- type.polarity_low |= BIT(irq);
+ type.polarity_low = irq_bit;
flow_handler = handle_level_irq;
}
- qpnpint_spmi_write(d, QPNPINT_REG_SET_TYPE, &type, sizeof(type));
+ mask.type = irq_bit;
+ mask.polarity_high = irq_bit;
+ mask.polarity_low = irq_bit;
+
+ rc = qpnpint_spmi_masked_write(d, QPNPINT_REG_SET_TYPE, &type, &mask,
+ sizeof(type));
irq_set_handler_locked(d, flow_handler);
- return 0;
+ return rc;
}
static int qpnpint_irq_set_wake(struct irq_data *d, unsigned int on)
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 7fec86946131..8d41fdd40657 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -84,10 +84,6 @@ source "drivers/staging/vc04_services/Kconfig"
source "drivers/staging/pi433/Kconfig"
-source "drivers/staging/mt7621-dma/Kconfig"
-
-source "drivers/staging/ralink-gdma/Kconfig"
-
source "drivers/staging/mt7621-dts/Kconfig"
source "drivers/staging/axis-fifo/Kconfig"
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index e66e19c45425..02b01949b94e 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -32,8 +32,6 @@ obj-$(CONFIG_KS7010) += ks7010/
obj-$(CONFIG_GREYBUS) += greybus/
obj-$(CONFIG_BCM2835_VCHIQ) += vc04_services/
obj-$(CONFIG_PI433) += pi433/
-obj-$(CONFIG_SOC_MT7621) += mt7621-dma/
-obj-$(CONFIG_DMA_RALINK) += ralink-gdma/
obj-$(CONFIG_SOC_MT7621) += mt7621-dts/
obj-$(CONFIG_XIL_AXIS_FIFO) += axis-fifo/
obj-$(CONFIG_FIELDBUS_DEV) += fieldbus/
diff --git a/drivers/staging/axis-fifo/axis-fifo.c b/drivers/staging/axis-fifo/axis-fifo.c
index 632f140dddbc..dfd2b357f484 100644
--- a/drivers/staging/axis-fifo/axis-fifo.c
+++ b/drivers/staging/axis-fifo/axis-fifo.c
@@ -809,7 +809,6 @@ end:
static int axis_fifo_probe(struct platform_device *pdev)
{
- struct resource *r_irq; /* interrupt resources */
struct resource *r_mem; /* IO mem resources */
struct device *dev = &pdev->dev; /* OS device (from device tree) */
struct axis_fifo *fifo = NULL;
@@ -882,16 +881,12 @@ static int axis_fifo_probe(struct platform_device *pdev)
*/
/* get IRQ resource */
- r_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!r_irq) {
- dev_err(fifo->dt_device, "no IRQ found for 0x%pa\n",
- &r_mem->start);
- rc = -EIO;
+ rc = platform_get_irq(pdev, 0);
+ if (rc < 0)
goto err_initial;
- }
/* request IRQ */
- fifo->irq = r_irq->start;
+ fifo->irq = rc;
rc = devm_request_irq(fifo->dt_device, fifo->irq, &axis_fifo_irq, 0,
DRIVER_NAME, fifo);
if (rc) {
diff --git a/drivers/staging/fbtft/Kconfig b/drivers/staging/fbtft/Kconfig
index dad1ddcd7b0c..4d29e8c1014e 100644
--- a/drivers/staging/fbtft/Kconfig
+++ b/drivers/staging/fbtft/Kconfig
@@ -200,9 +200,3 @@ config FB_TFT_UPD161704
depends on FB_TFT
help
Generic Framebuffer support for uPD161704
-
-config FB_TFT_WATTEROTT
- tristate "FB driver for the WATTEROTT LCD Controller"
- depends on FB_TFT
- help
- Generic Framebuffer support for WATTEROTT
diff --git a/drivers/staging/fbtft/Makefile b/drivers/staging/fbtft/Makefile
index e87193f7df14..e9cdf0f0a7da 100644
--- a/drivers/staging/fbtft/Makefile
+++ b/drivers/staging/fbtft/Makefile
@@ -36,4 +36,3 @@ obj-$(CONFIG_FB_TFT_TLS8204) += fb_tls8204.o
obj-$(CONFIG_FB_TFT_UC1611) += fb_uc1611.o
obj-$(CONFIG_FB_TFT_UC1701) += fb_uc1701.o
obj-$(CONFIG_FB_TFT_UPD161704) += fb_upd161704.o
-obj-$(CONFIG_FB_TFT_WATTEROTT) += fb_watterott.o
diff --git a/drivers/staging/fbtft/fb_sh1106.c b/drivers/staging/fbtft/fb_sh1106.c
index 7b9ab39e1c1a..9685ca516a0e 100644
--- a/drivers/staging/fbtft/fb_sh1106.c
+++ b/drivers/staging/fbtft/fb_sh1106.c
@@ -173,12 +173,7 @@ static struct fbtft_display display = {
},
};
-FBTFT_REGISTER_DRIVER(DRVNAME, "sinowealth,sh1106", &display);
-
-MODULE_ALIAS("spi:" DRVNAME);
-MODULE_ALIAS("platform:" DRVNAME);
-MODULE_ALIAS("spi:sh1106");
-MODULE_ALIAS("platform:sh1106");
+FBTFT_REGISTER_SPI_DRIVER(DRVNAME, "sinowealth", "sh1106", &display);
MODULE_DESCRIPTION("SH1106 OLED Driver");
MODULE_AUTHOR("Heiner Kallweit");
diff --git a/drivers/staging/fbtft/fb_watterott.c b/drivers/staging/fbtft/fb_watterott.c
deleted file mode 100644
index a57e1f4feef3..000000000000
--- a/drivers/staging/fbtft/fb_watterott.c
+++ /dev/null
@@ -1,302 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * FB driver for the Watterott LCD Controller
- *
- * Copyright (C) 2013 Noralf Tronnes
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-
-#include "fbtft.h"
-
-#define DRVNAME "fb_watterott"
-#define WIDTH 320
-#define HEIGHT 240
-#define FPS 5
-#define TXBUFLEN 1024
-#define DEFAULT_BRIGHTNESS 50
-
-#define CMD_VERSION 0x01
-#define CMD_LCD_LED 0x10
-#define CMD_LCD_RESET 0x11
-#define CMD_LCD_ORIENTATION 0x20
-#define CMD_LCD_DRAWIMAGE 0x27
-#define COLOR_RGB323 8
-#define COLOR_RGB332 9
-#define COLOR_RGB233 10
-#define COLOR_RGB565 16
-
-static short mode = 565;
-module_param(mode, short, 0000);
-MODULE_PARM_DESC(mode, "RGB color transfer mode: 332, 565 (default)");
-
-static void write_reg8_bus8(struct fbtft_par *par, int len, ...)
-{
- va_list args;
- int i, ret;
- u8 *buf = par->buf;
-
- va_start(args, len);
- for (i = 0; i < len; i++)
- *buf++ = (u8)va_arg(args, unsigned int);
- va_end(args);
-
- fbtft_par_dbg_hex(DEBUG_WRITE_REGISTER, par,
- par->info->device, u8, par->buf,
- len, "%s: ", __func__);
-
- ret = par->fbtftops.write(par, par->buf, len);
- if (ret < 0) {
- dev_err(par->info->device,
- "write() failed and returned %d\n", ret);
- return;
- }
-}
-
-static int write_vmem(struct fbtft_par *par, size_t offset, size_t len)
-{
- unsigned int start_line, end_line;
- u16 *vmem16 = (u16 *)(par->info->screen_buffer + offset);
- __be16 *pos = par->txbuf.buf + 1;
- __be16 *buf16 = par->txbuf.buf + 10;
- int i, j;
- int ret = 0;
-
- start_line = offset / par->info->fix.line_length;
- end_line = start_line + (len / par->info->fix.line_length) - 1;
-
- /* Set command header. pos: x, y, w, h */
- ((u8 *)par->txbuf.buf)[0] = CMD_LCD_DRAWIMAGE;
- pos[0] = 0;
- pos[2] = cpu_to_be16(par->info->var.xres);
- pos[3] = cpu_to_be16(1);
- ((u8 *)par->txbuf.buf)[9] = COLOR_RGB565;
-
- for (i = start_line; i <= end_line; i++) {
- pos[1] = cpu_to_be16(i);
- for (j = 0; j < par->info->var.xres; j++)
- buf16[j] = cpu_to_be16(*vmem16++);
- ret = par->fbtftops.write(par,
- par->txbuf.buf, 10 + par->info->fix.line_length);
- if (ret < 0)
- return ret;
- udelay(300);
- }
-
- return 0;
-}
-
-static inline int rgb565_to_rgb332(u16 c)
-{
- return ((c & 0xE000) >> 8) | ((c & 000700) >> 6) | ((c & 0x0018) >> 3);
-}
-
-static int write_vmem_8bit(struct fbtft_par *par, size_t offset, size_t len)
-{
- unsigned int start_line, end_line;
- u16 *vmem16 = (u16 *)(par->info->screen_buffer + offset);
- __be16 *pos = par->txbuf.buf + 1;
- u8 *buf8 = par->txbuf.buf + 10;
- int i, j;
- int ret = 0;
-
- start_line = offset / par->info->fix.line_length;
- end_line = start_line + (len / par->info->fix.line_length) - 1;
-
- /* Set command header. pos: x, y, w, h */
- ((u8 *)par->txbuf.buf)[0] = CMD_LCD_DRAWIMAGE;
- pos[0] = 0;
- pos[2] = cpu_to_be16(par->info->var.xres);
- pos[3] = cpu_to_be16(1);
- ((u8 *)par->txbuf.buf)[9] = COLOR_RGB332;
-
- for (i = start_line; i <= end_line; i++) {
- pos[1] = cpu_to_be16(i);
- for (j = 0; j < par->info->var.xres; j++) {
- buf8[j] = rgb565_to_rgb332(*vmem16);
- vmem16++;
- }
- ret = par->fbtftops.write(par,
- par->txbuf.buf, 10 + par->info->var.xres);
- if (ret < 0)
- return ret;
- udelay(700);
- }
-
- return 0;
-}
-
-static unsigned int firmware_version(struct fbtft_par *par)
-{
- u8 rxbuf[4] = {0, };
-
- write_reg(par, CMD_VERSION);
- par->fbtftops.read(par, rxbuf, 4);
- if (rxbuf[1] != '.')
- return 0;
-
- return (rxbuf[0] - '0') << 8 | (rxbuf[2] - '0') << 4 | (rxbuf[3] - '0');
-}
-
-static int init_display(struct fbtft_par *par)
-{
- int ret;
- unsigned int version;
- u8 save_mode;
-
- /* enable SPI interface by having CS and MOSI low during reset */
- save_mode = par->spi->mode;
- /*
- * Set CS active inverse polarity: just setting SPI_CS_HIGH does not
- * work with GPIO based chip selects that are logically active high
- * but inverted inside the GPIO library, so enforce inverted
- * semantics.
- */
- par->spi->mode ^= SPI_CS_HIGH;
- ret = spi_setup(par->spi);
- if (ret) {
- dev_err(par->info->device,
- "Could not set inverse CS polarity\n");
- return ret;
- }
- write_reg(par, 0x00); /* make sure mode is set */
-
- mdelay(50);
- par->fbtftops.reset(par);
- mdelay(1000);
- par->spi->mode = save_mode;
- ret = spi_setup(par->spi);
- if (ret) {
- dev_err(par->info->device, "Could not restore SPI mode\n");
- return ret;
- }
- write_reg(par, 0x00);
-
- version = firmware_version(par);
- fbtft_par_dbg(DEBUG_INIT_DISPLAY, par, "Firmware version: %x.%02x\n",
- version >> 8, version & 0xFF);
-
- if (mode == 332)
- par->fbtftops.write_vmem = write_vmem_8bit;
- return 0;
-}
-
-static void set_addr_win(struct fbtft_par *par, int xs, int ys, int xe, int ye)
-{
- /* not used on this controller */
-}
-
-static int set_var(struct fbtft_par *par)
-{
- u8 rotate;
-
- /* this controller rotates clock wise */
- switch (par->info->var.rotate) {
- case 90:
- rotate = 27;
- break;
- case 180:
- rotate = 18;
- break;
- case 270:
- rotate = 9;
- break;
- default:
- rotate = 0;
- }
- write_reg(par, CMD_LCD_ORIENTATION, rotate);
-
- return 0;
-}
-
-static int verify_gpios(struct fbtft_par *par)
-{
- if (!par->gpio.reset) {
- dev_err(par->info->device, "Missing 'reset' gpio. Aborting.\n");
- return -EINVAL;
- }
- return 0;
-}
-
-#ifdef CONFIG_FB_BACKLIGHT
-static int backlight_chip_update_status(struct backlight_device *bd)
-{
- struct fbtft_par *par = bl_get_data(bd);
- int brightness = bd->props.brightness;
-
- fbtft_par_dbg(DEBUG_BACKLIGHT, par,
- "%s: brightness=%d, power=%d, fb_blank=%d\n", __func__,
- bd->props.brightness, bd->props.power,
- bd->props.fb_blank);
-
- if (bd->props.power != FB_BLANK_UNBLANK)
- brightness = 0;
-
- if (bd->props.fb_blank != FB_BLANK_UNBLANK)
- brightness = 0;
-
- write_reg(par, CMD_LCD_LED, brightness);
-
- return 0;
-}
-
-static const struct backlight_ops bl_ops = {
- .update_status = backlight_chip_update_status,
-};
-
-static void register_chip_backlight(struct fbtft_par *par)
-{
- struct backlight_device *bd;
- struct backlight_properties bl_props = { 0, };
-
- bl_props.type = BACKLIGHT_RAW;
- bl_props.power = FB_BLANK_POWERDOWN;
- bl_props.max_brightness = 100;
- bl_props.brightness = DEFAULT_BRIGHTNESS;
-
- bd = backlight_device_register(dev_driver_string(par->info->device),
- par->info->device, par, &bl_ops,
- &bl_props);
- if (IS_ERR(bd)) {
- dev_err(par->info->device,
- "cannot register backlight device (%ld)\n",
- PTR_ERR(bd));
- return;
- }
- par->info->bl_dev = bd;
-
- if (!par->fbtftops.unregister_backlight)
- par->fbtftops.unregister_backlight = fbtft_unregister_backlight;
-}
-#else
-#define register_chip_backlight NULL
-#endif
-
-static struct fbtft_display display = {
- .regwidth = 8,
- .buswidth = 8,
- .width = WIDTH,
- .height = HEIGHT,
- .fps = FPS,
- .txbuflen = TXBUFLEN,
- .fbtftops = {
- .write_register = write_reg8_bus8,
- .write_vmem = write_vmem,
- .init_display = init_display,
- .set_addr_win = set_addr_win,
- .set_var = set_var,
- .verify_gpios = verify_gpios,
- .register_backlight = register_chip_backlight,
- },
-};
-
-FBTFT_REGISTER_DRIVER(DRVNAME, "watterott,openlcd", &display);
-
-MODULE_ALIAS("spi:" DRVNAME);
-
-MODULE_DESCRIPTION("FB driver for the Watterott LCD Controller");
-MODULE_AUTHOR("Noralf Tronnes");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/fbtft/fbtft.h b/drivers/staging/fbtft/fbtft.h
index 6869f3603b0e..55677efc0138 100644
--- a/drivers/staging/fbtft/fbtft.h
+++ b/drivers/staging/fbtft/fbtft.h
@@ -334,7 +334,10 @@ static int __init fbtft_driver_module_init(void) \
ret = spi_register_driver(&fbtft_driver_spi_driver); \
if (ret < 0) \
return ret; \
- return platform_driver_register(&fbtft_driver_platform_driver); \
+ ret = platform_driver_register(&fbtft_driver_platform_driver); \
+ if (ret < 0) \
+ spi_unregister_driver(&fbtft_driver_spi_driver); \
+ return ret; \
} \
\
static void __exit fbtft_driver_module_exit(void) \
@@ -346,6 +349,47 @@ static void __exit fbtft_driver_module_exit(void) \
module_init(fbtft_driver_module_init); \
module_exit(fbtft_driver_module_exit);
+#define FBTFT_REGISTER_SPI_DRIVER(_name, _comp_vend, _comp_dev, _display) \
+ \
+static int fbtft_driver_probe_spi(struct spi_device *spi) \
+{ \
+ return fbtft_probe_common(_display, spi, NULL); \
+} \
+ \
+static int fbtft_driver_remove_spi(struct spi_device *spi) \
+{ \
+ struct fb_info *info = spi_get_drvdata(spi); \
+ \
+ fbtft_remove_common(&spi->dev, info); \
+ return 0; \
+} \
+ \
+static const struct of_device_id dt_ids[] = { \
+ { .compatible = _comp_vend "," _comp_dev }, \
+ {}, \
+}; \
+ \
+MODULE_DEVICE_TABLE(of, dt_ids); \
+ \
+static const struct spi_device_id spi_ids[] = { \
+ { .name = _comp_dev }, \
+ {}, \
+}; \
+ \
+MODULE_DEVICE_TABLE(spi, spi_ids); \
+ \
+static struct spi_driver fbtft_driver_spi_driver = { \
+ .driver = { \
+ .name = _name, \
+ .of_match_table = dt_ids, \
+ }, \
+ .id_table = spi_ids, \
+ .probe = fbtft_driver_probe_spi, \
+ .remove = fbtft_driver_remove_spi, \
+}; \
+ \
+module_spi_driver(fbtft_driver_spi_driver);
+
/* Debug macros */
/* shorthand debug levels */
diff --git a/drivers/staging/greybus/audio_manager_module.c b/drivers/staging/greybus/audio_manager_module.c
index 525cf8f8394f..0a0f0a394c84 100644
--- a/drivers/staging/greybus/audio_manager_module.c
+++ b/drivers/staging/greybus/audio_manager_module.c
@@ -142,11 +142,12 @@ static struct attribute *gb_audio_module_default_attrs[] = {
&gb_audio_module_op_devices_attribute.attr,
NULL, /* need to NULL terminate the list of attributes */
};
+ATTRIBUTE_GROUPS(gb_audio_module_default);
static struct kobj_type gb_audio_module_type = {
.sysfs_ops = &gb_audio_module_sysfs_ops,
.release = gb_audio_module_release,
- .default_attrs = gb_audio_module_default_attrs,
+ .default_groups = gb_audio_module_default_groups,
};
static void send_add_uevent(struct gb_audio_manager_module *module)
diff --git a/drivers/staging/greybus/audio_topology.c b/drivers/staging/greybus/audio_topology.c
index 1e613d42d823..62d7674852be 100644
--- a/drivers/staging/greybus/audio_topology.c
+++ b/drivers/staging/greybus/audio_topology.c
@@ -147,6 +147,9 @@ static const char **gb_generate_enum_strings(struct gbaudio_module_info *gb,
items = le32_to_cpu(gbenum->items);
strings = devm_kcalloc(gb->dev, items, sizeof(char *), GFP_KERNEL);
+ if (!strings)
+ return NULL;
+
data = gbenum->names;
for (i = 0; i < items; i++) {
@@ -655,6 +658,8 @@ static int gbaudio_tplg_create_enum_kctl(struct gbaudio_module_info *gb,
/* since count=1, and reg is dummy */
gbe->items = le32_to_cpu(gb_enum->items);
gbe->texts = gb_generate_enum_strings(gb, gb_enum);
+ if (!gbe->texts)
+ return -ENOMEM;
/* debug enum info */
dev_dbg(gb->dev, "Max:%d, name_length:%d\n", gbe->items,
@@ -862,6 +867,8 @@ static int gbaudio_tplg_create_enum_ctl(struct gbaudio_module_info *gb,
/* since count=1, and reg is dummy */
gbe->items = le32_to_cpu(gb_enum->items);
gbe->texts = gb_generate_enum_strings(gb, gb_enum);
+ if (!gbe->texts)
+ return -ENOMEM;
/* debug enum info */
dev_dbg(gb->dev, "Max:%d, name_length:%d\n", gbe->items,
@@ -974,6 +981,44 @@ static int gbaudio_widget_event(struct snd_soc_dapm_widget *w,
return ret;
}
+static const struct snd_soc_dapm_widget gbaudio_widgets[] = {
+ [snd_soc_dapm_spk] = SND_SOC_DAPM_SPK(NULL, gbcodec_event_spk),
+ [snd_soc_dapm_hp] = SND_SOC_DAPM_HP(NULL, gbcodec_event_hp),
+ [snd_soc_dapm_mic] = SND_SOC_DAPM_MIC(NULL, gbcodec_event_int_mic),
+ [snd_soc_dapm_output] = SND_SOC_DAPM_OUTPUT(NULL),
+ [snd_soc_dapm_input] = SND_SOC_DAPM_INPUT(NULL),
+ [snd_soc_dapm_switch] = SND_SOC_DAPM_SWITCH_E(NULL, SND_SOC_NOPM,
+ 0, 0, NULL,
+ gbaudio_widget_event,
+ SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_POST_PMD),
+ [snd_soc_dapm_pga] = SND_SOC_DAPM_PGA_E(NULL, SND_SOC_NOPM,
+ 0, 0, NULL, 0,
+ gbaudio_widget_event,
+ SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_POST_PMD),
+ [snd_soc_dapm_mixer] = SND_SOC_DAPM_MIXER_E(NULL, SND_SOC_NOPM,
+ 0, 0, NULL, 0,
+ gbaudio_widget_event,
+ SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_POST_PMD),
+ [snd_soc_dapm_mux] = SND_SOC_DAPM_MUX_E(NULL, SND_SOC_NOPM,
+ 0, 0, NULL,
+ gbaudio_widget_event,
+ SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_POST_PMD),
+ [snd_soc_dapm_aif_in] = SND_SOC_DAPM_AIF_IN_E(NULL, NULL, 0,
+ SND_SOC_NOPM, 0, 0,
+ gbaudio_widget_event,
+ SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_POST_PMD),
+ [snd_soc_dapm_aif_out] = SND_SOC_DAPM_AIF_OUT_E(NULL, NULL, 0,
+ SND_SOC_NOPM, 0, 0,
+ gbaudio_widget_event,
+ SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_POST_PMD),
+};
+
static int gbaudio_tplg_create_widget(struct gbaudio_module_info *module,
struct snd_soc_dapm_widget *dw,
struct gb_audio_widget *w, int *w_size)
@@ -1034,6 +1079,10 @@ static int gbaudio_tplg_create_widget(struct gbaudio_module_info *module,
csize += le16_to_cpu(gbenum->names_length);
control->texts = (const char * const *)
gb_generate_enum_strings(module, gbenum);
+ if (!control->texts) {
+ ret = -ENOMEM;
+ goto error;
+ }
control->items = le32_to_cpu(gbenum->items);
} else {
csize = sizeof(struct gb_audio_control);
@@ -1052,77 +1101,37 @@ static int gbaudio_tplg_create_widget(struct gbaudio_module_info *module,
switch (w->type) {
case snd_soc_dapm_spk:
- *dw = (struct snd_soc_dapm_widget)
- SND_SOC_DAPM_SPK(w->name, gbcodec_event_spk);
+ *dw = gbaudio_widgets[w->type];
module->op_devices |= GBAUDIO_DEVICE_OUT_SPEAKER;
break;
case snd_soc_dapm_hp:
- *dw = (struct snd_soc_dapm_widget)
- SND_SOC_DAPM_HP(w->name, gbcodec_event_hp);
+ *dw = gbaudio_widgets[w->type];
module->op_devices |= (GBAUDIO_DEVICE_OUT_WIRED_HEADSET
| GBAUDIO_DEVICE_OUT_WIRED_HEADPHONE);
module->ip_devices |= GBAUDIO_DEVICE_IN_WIRED_HEADSET;
break;
case snd_soc_dapm_mic:
- *dw = (struct snd_soc_dapm_widget)
- SND_SOC_DAPM_MIC(w->name, gbcodec_event_int_mic);
+ *dw = gbaudio_widgets[w->type];
module->ip_devices |= GBAUDIO_DEVICE_IN_BUILTIN_MIC;
break;
case snd_soc_dapm_output:
- *dw = (struct snd_soc_dapm_widget)SND_SOC_DAPM_OUTPUT(w->name);
- break;
case snd_soc_dapm_input:
- *dw = (struct snd_soc_dapm_widget)SND_SOC_DAPM_INPUT(w->name);
- break;
case snd_soc_dapm_switch:
- *dw = (struct snd_soc_dapm_widget)
- SND_SOC_DAPM_SWITCH_E(w->name, SND_SOC_NOPM, 0, 0,
- widget_kctls,
- gbaudio_widget_event,
- SND_SOC_DAPM_PRE_PMU |
- SND_SOC_DAPM_POST_PMD);
- break;
case snd_soc_dapm_pga:
- *dw = (struct snd_soc_dapm_widget)
- SND_SOC_DAPM_PGA_E(w->name, SND_SOC_NOPM, 0, 0, NULL, 0,
- gbaudio_widget_event,
- SND_SOC_DAPM_PRE_PMU |
- SND_SOC_DAPM_POST_PMD);
- break;
case snd_soc_dapm_mixer:
- *dw = (struct snd_soc_dapm_widget)
- SND_SOC_DAPM_MIXER_E(w->name, SND_SOC_NOPM, 0, 0, NULL,
- 0, gbaudio_widget_event,
- SND_SOC_DAPM_PRE_PMU |
- SND_SOC_DAPM_POST_PMD);
- break;
case snd_soc_dapm_mux:
- *dw = (struct snd_soc_dapm_widget)
- SND_SOC_DAPM_MUX_E(w->name, SND_SOC_NOPM, 0, 0,
- widget_kctls, gbaudio_widget_event,
- SND_SOC_DAPM_PRE_PMU |
- SND_SOC_DAPM_POST_PMD);
+ *dw = gbaudio_widgets[w->type];
break;
case snd_soc_dapm_aif_in:
- *dw = (struct snd_soc_dapm_widget)
- SND_SOC_DAPM_AIF_IN_E(w->name, w->sname, 0,
- SND_SOC_NOPM,
- 0, 0, gbaudio_widget_event,
- SND_SOC_DAPM_PRE_PMU |
- SND_SOC_DAPM_POST_PMD);
- break;
case snd_soc_dapm_aif_out:
- *dw = (struct snd_soc_dapm_widget)
- SND_SOC_DAPM_AIF_OUT_E(w->name, w->sname, 0,
- SND_SOC_NOPM,
- 0, 0, gbaudio_widget_event,
- SND_SOC_DAPM_PRE_PMU |
- SND_SOC_DAPM_POST_PMD);
+ *dw = gbaudio_widgets[w->type];
+ dw->sname = w->sname;
break;
default:
ret = -EINVAL;
goto error;
}
+ dw->name = w->name;
dev_dbg(module->dev, "%s: widget of type %d created\n", dw->name,
dw->id);
@@ -1183,6 +1192,10 @@ static int gbaudio_tplg_process_kcontrols(struct gbaudio_module_info *module,
csize += le16_to_cpu(gbenum->names_length);
control->texts = (const char * const *)
gb_generate_enum_strings(module, gbenum);
+ if (!control->texts) {
+ ret = -ENOMEM;
+ goto error;
+ }
control->items = le32_to_cpu(gbenum->items);
} else {
csize = sizeof(struct gb_audio_control);
diff --git a/drivers/staging/media/atomisp/pci/isp2400_input_system_local.h b/drivers/staging/media/atomisp/pci/isp2400_input_system_local.h
index 6880c9b6aa65..c3ae5014a039 100644
--- a/drivers/staging/media/atomisp/pci/isp2400_input_system_local.h
+++ b/drivers/staging/media/atomisp/pci/isp2400_input_system_local.h
@@ -29,8 +29,6 @@
#include "isp_acquisition_defs.h"
#include "input_system_ctrl_defs.h"
-typedef struct input_system_cfg2400_s input_system_cfg2400_t;
-
struct target_cfg2400_s {
input_switch_cfg_channel_t input_switch_channel_cfg;
target_isp_cfg_t target_isp_cfg;
diff --git a/drivers/staging/most/dim2/dim2.c b/drivers/staging/most/dim2/dim2.c
index bd102329d8c8..29f8ce2a47f5 100644
--- a/drivers/staging/most/dim2/dim2.c
+++ b/drivers/staging/most/dim2/dim2.c
@@ -971,7 +971,7 @@ static void fsl_mx6_disable(struct platform_device *pdev)
clk_disable_unprepare(dev->clk);
}
-static int rcar_h2_enable(struct platform_device *pdev)
+static int rcar_gen2_enable(struct platform_device *pdev)
{
struct dim2_hdm *dev = platform_get_drvdata(pdev);
int ret;
@@ -1006,7 +1006,7 @@ static int rcar_h2_enable(struct platform_device *pdev)
return 0;
}
-static void rcar_h2_disable(struct platform_device *pdev)
+static void rcar_gen2_disable(struct platform_device *pdev)
{
struct dim2_hdm *dev = platform_get_drvdata(pdev);
@@ -1016,7 +1016,7 @@ static void rcar_h2_disable(struct platform_device *pdev)
writel(0x0, dev->io_base + 0x600);
}
-static int rcar_m3_enable(struct platform_device *pdev)
+static int rcar_gen3_enable(struct platform_device *pdev)
{
struct dim2_hdm *dev = platform_get_drvdata(pdev);
u32 enable_512fs = dev->clk_speed == CLK_512FS;
@@ -1046,7 +1046,7 @@ static int rcar_m3_enable(struct platform_device *pdev)
return 0;
}
-static void rcar_m3_disable(struct platform_device *pdev)
+static void rcar_gen3_disable(struct platform_device *pdev)
{
struct dim2_hdm *dev = platform_get_drvdata(pdev);
@@ -1058,20 +1058,20 @@ static void rcar_m3_disable(struct platform_device *pdev)
/* ]] platform specific functions */
-enum dim2_platforms { FSL_MX6, RCAR_H2, RCAR_M3 };
+enum dim2_platforms { FSL_MX6, RCAR_GEN2, RCAR_GEN3 };
static struct dim2_platform_data plat_data[] = {
[FSL_MX6] = {
.enable = fsl_mx6_enable,
.disable = fsl_mx6_disable,
},
- [RCAR_H2] = {
- .enable = rcar_h2_enable,
- .disable = rcar_h2_disable,
+ [RCAR_GEN2] = {
+ .enable = rcar_gen2_enable,
+ .disable = rcar_gen2_disable,
},
- [RCAR_M3] = {
- .enable = rcar_m3_enable,
- .disable = rcar_m3_disable,
+ [RCAR_GEN3] = {
+ .enable = rcar_gen3_enable,
+ .disable = rcar_gen3_disable,
.fcnt = 3,
},
};
@@ -1083,11 +1083,11 @@ static const struct of_device_id dim2_of_match[] = {
},
{
.compatible = "renesas,mlp",
- .data = plat_data + RCAR_H2
+ .data = plat_data + RCAR_GEN2
},
{
- .compatible = "rcar,medialb-dim2",
- .data = plat_data + RCAR_M3
+ .compatible = "renesas,rcar-gen3-mlp",
+ .data = plat_data + RCAR_GEN3
},
{
.compatible = "xlnx,axi4-os62420_3pin-1.00.a",
diff --git a/drivers/staging/mt7621-dma/Kconfig b/drivers/staging/mt7621-dma/Kconfig
deleted file mode 100644
index 54a110288f92..000000000000
--- a/drivers/staging/mt7621-dma/Kconfig
+++ /dev/null
@@ -1,7 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-config MTK_HSDMA
- tristate "MTK HSDMA support"
- depends on RALINK && SOC_MT7621
- select DMA_ENGINE
- select DMA_VIRTUAL_CHANNELS
-
diff --git a/drivers/staging/mt7621-dma/Makefile b/drivers/staging/mt7621-dma/Makefile
deleted file mode 100644
index 23256d1286f3..000000000000
--- a/drivers/staging/mt7621-dma/Makefile
+++ /dev/null
@@ -1,4 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_MTK_HSDMA) += hsdma-mt7621.o
-
-ccflags-y += -I$(srctree)/drivers/dma
diff --git a/drivers/staging/mt7621-dma/TODO b/drivers/staging/mt7621-dma/TODO
deleted file mode 100644
index fdbc5002c32a..000000000000
--- a/drivers/staging/mt7621-dma/TODO
+++ /dev/null
@@ -1,5 +0,0 @@
-
-- general code review and clean up
-- ensure device-tree requirements are documented
-
-Cc: NeilBrown <neil@brown.name>
diff --git a/drivers/staging/mt7621-dma/hsdma-mt7621.c b/drivers/staging/mt7621-dma/hsdma-mt7621.c
deleted file mode 100644
index 1424d01d434b..000000000000
--- a/drivers/staging/mt7621-dma/hsdma-mt7621.c
+++ /dev/null
@@ -1,758 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * Copyright (C) 2015, Michael Lee <igvtee@gmail.com>
- * MTK HSDMA support
- */
-
-#include <linux/dmaengine.h>
-#include <linux/dma-mapping.h>
-#include <linux/err.h>
-#include <linux/init.h>
-#include <linux/list.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/irq.h>
-#include <linux/of_dma.h>
-#include <linux/reset.h>
-#include <linux/of_device.h>
-
-#include "virt-dma.h"
-
-#define HSDMA_BASE_OFFSET 0x800
-
-#define HSDMA_REG_TX_BASE 0x00
-#define HSDMA_REG_TX_CNT 0x04
-#define HSDMA_REG_TX_CTX 0x08
-#define HSDMA_REG_TX_DTX 0x0c
-#define HSDMA_REG_RX_BASE 0x100
-#define HSDMA_REG_RX_CNT 0x104
-#define HSDMA_REG_RX_CRX 0x108
-#define HSDMA_REG_RX_DRX 0x10c
-#define HSDMA_REG_INFO 0x200
-#define HSDMA_REG_GLO_CFG 0x204
-#define HSDMA_REG_RST_CFG 0x208
-#define HSDMA_REG_DELAY_INT 0x20c
-#define HSDMA_REG_FREEQ_THRES 0x210
-#define HSDMA_REG_INT_STATUS 0x220
-#define HSDMA_REG_INT_MASK 0x228
-#define HSDMA_REG_SCH_Q01 0x280
-#define HSDMA_REG_SCH_Q23 0x284
-
-#define HSDMA_DESCS_MAX 0xfff
-#define HSDMA_DESCS_NUM 8
-#define HSDMA_DESCS_MASK (HSDMA_DESCS_NUM - 1)
-#define HSDMA_NEXT_DESC(x) (((x) + 1) & HSDMA_DESCS_MASK)
-
-/* HSDMA_REG_INFO */
-#define HSDMA_INFO_INDEX_MASK 0xf
-#define HSDMA_INFO_INDEX_SHIFT 24
-#define HSDMA_INFO_BASE_MASK 0xff
-#define HSDMA_INFO_BASE_SHIFT 16
-#define HSDMA_INFO_RX_MASK 0xff
-#define HSDMA_INFO_RX_SHIFT 8
-#define HSDMA_INFO_TX_MASK 0xff
-#define HSDMA_INFO_TX_SHIFT 0
-
-/* HSDMA_REG_GLO_CFG */
-#define HSDMA_GLO_TX_2B_OFFSET BIT(31)
-#define HSDMA_GLO_CLK_GATE BIT(30)
-#define HSDMA_GLO_BYTE_SWAP BIT(29)
-#define HSDMA_GLO_MULTI_DMA BIT(10)
-#define HSDMA_GLO_TWO_BUF BIT(9)
-#define HSDMA_GLO_32B_DESC BIT(8)
-#define HSDMA_GLO_BIG_ENDIAN BIT(7)
-#define HSDMA_GLO_TX_DONE BIT(6)
-#define HSDMA_GLO_BT_MASK 0x3
-#define HSDMA_GLO_BT_SHIFT 4
-#define HSDMA_GLO_RX_BUSY BIT(3)
-#define HSDMA_GLO_RX_DMA BIT(2)
-#define HSDMA_GLO_TX_BUSY BIT(1)
-#define HSDMA_GLO_TX_DMA BIT(0)
-
-#define HSDMA_BT_SIZE_16BYTES (0 << HSDMA_GLO_BT_SHIFT)
-#define HSDMA_BT_SIZE_32BYTES (1 << HSDMA_GLO_BT_SHIFT)
-#define HSDMA_BT_SIZE_64BYTES (2 << HSDMA_GLO_BT_SHIFT)
-#define HSDMA_BT_SIZE_128BYTES (3 << HSDMA_GLO_BT_SHIFT)
-
-#define HSDMA_GLO_DEFAULT (HSDMA_GLO_MULTI_DMA | \
- HSDMA_GLO_RX_DMA | HSDMA_GLO_TX_DMA | HSDMA_BT_SIZE_32BYTES)
-
-/* HSDMA_REG_RST_CFG */
-#define HSDMA_RST_RX_SHIFT 16
-#define HSDMA_RST_TX_SHIFT 0
-
-/* HSDMA_REG_DELAY_INT */
-#define HSDMA_DELAY_INT_EN BIT(15)
-#define HSDMA_DELAY_PEND_OFFSET 8
-#define HSDMA_DELAY_TIME_OFFSET 0
-#define HSDMA_DELAY_TX_OFFSET 16
-#define HSDMA_DELAY_RX_OFFSET 0
-
-#define HSDMA_DELAY_INIT(x) (HSDMA_DELAY_INT_EN | \
- ((x) << HSDMA_DELAY_PEND_OFFSET))
-#define HSDMA_DELAY(x) ((HSDMA_DELAY_INIT(x) << \
- HSDMA_DELAY_TX_OFFSET) | HSDMA_DELAY_INIT(x))
-
-/* HSDMA_REG_INT_STATUS */
-#define HSDMA_INT_DELAY_RX_COH BIT(31)
-#define HSDMA_INT_DELAY_RX_INT BIT(30)
-#define HSDMA_INT_DELAY_TX_COH BIT(29)
-#define HSDMA_INT_DELAY_TX_INT BIT(28)
-#define HSDMA_INT_RX_MASK 0x3
-#define HSDMA_INT_RX_SHIFT 16
-#define HSDMA_INT_RX_Q0 BIT(16)
-#define HSDMA_INT_TX_MASK 0xf
-#define HSDMA_INT_TX_SHIFT 0
-#define HSDMA_INT_TX_Q0 BIT(0)
-
-/* tx/rx dma desc flags */
-#define HSDMA_PLEN_MASK 0x3fff
-#define HSDMA_DESC_DONE BIT(31)
-#define HSDMA_DESC_LS0 BIT(30)
-#define HSDMA_DESC_PLEN0(_x) (((_x) & HSDMA_PLEN_MASK) << 16)
-#define HSDMA_DESC_TAG BIT(15)
-#define HSDMA_DESC_LS1 BIT(14)
-#define HSDMA_DESC_PLEN1(_x) ((_x) & HSDMA_PLEN_MASK)
-
-/* align 4 bytes */
-#define HSDMA_ALIGN_SIZE 3
-/* align size 128bytes */
-#define HSDMA_MAX_PLEN 0x3f80
-
-struct hsdma_desc {
- u32 addr0;
- u32 flags;
- u32 addr1;
- u32 unused;
-};
-
-struct mtk_hsdma_sg {
- dma_addr_t src_addr;
- dma_addr_t dst_addr;
- u32 len;
-};
-
-struct mtk_hsdma_desc {
- struct virt_dma_desc vdesc;
- unsigned int num_sgs;
- struct mtk_hsdma_sg sg[1];
-};
-
-struct mtk_hsdma_chan {
- struct virt_dma_chan vchan;
- unsigned int id;
- dma_addr_t desc_addr;
- int tx_idx;
- int rx_idx;
- struct hsdma_desc *tx_ring;
- struct hsdma_desc *rx_ring;
- struct mtk_hsdma_desc *desc;
- unsigned int next_sg;
-};
-
-struct mtk_hsdam_engine {
- struct dma_device ddev;
- struct device_dma_parameters dma_parms;
- void __iomem *base;
- struct tasklet_struct task;
- volatile unsigned long chan_issued;
-
- struct mtk_hsdma_chan chan[1];
-};
-
-static inline struct mtk_hsdam_engine *mtk_hsdma_chan_get_dev(struct mtk_hsdma_chan *chan)
-{
- return container_of(chan->vchan.chan.device, struct mtk_hsdam_engine,
- ddev);
-}
-
-static inline struct mtk_hsdma_chan *to_mtk_hsdma_chan(struct dma_chan *c)
-{
- return container_of(c, struct mtk_hsdma_chan, vchan.chan);
-}
-
-static inline struct mtk_hsdma_desc *to_mtk_hsdma_desc(struct virt_dma_desc *vdesc)
-{
- return container_of(vdesc, struct mtk_hsdma_desc, vdesc);
-}
-
-static inline u32 mtk_hsdma_read(struct mtk_hsdam_engine *hsdma, u32 reg)
-{
- return readl(hsdma->base + reg);
-}
-
-static inline void mtk_hsdma_write(struct mtk_hsdam_engine *hsdma,
- unsigned int reg, u32 val)
-{
- writel(val, hsdma->base + reg);
-}
-
-static void mtk_hsdma_reset_chan(struct mtk_hsdam_engine *hsdma,
- struct mtk_hsdma_chan *chan)
-{
- chan->tx_idx = 0;
- chan->rx_idx = HSDMA_DESCS_NUM - 1;
-
- mtk_hsdma_write(hsdma, HSDMA_REG_TX_CTX, chan->tx_idx);
- mtk_hsdma_write(hsdma, HSDMA_REG_RX_CRX, chan->rx_idx);
-
- mtk_hsdma_write(hsdma, HSDMA_REG_RST_CFG,
- 0x1 << (chan->id + HSDMA_RST_TX_SHIFT));
- mtk_hsdma_write(hsdma, HSDMA_REG_RST_CFG,
- 0x1 << (chan->id + HSDMA_RST_RX_SHIFT));
-}
-
-static void hsdma_dump_reg(struct mtk_hsdam_engine *hsdma)
-{
- dev_dbg(hsdma->ddev.dev,
- "tbase %08x, tcnt %08x, tctx %08x, tdtx: %08x, rbase %08x, rcnt %08x, rctx %08x, rdtx %08x\n",
- mtk_hsdma_read(hsdma, HSDMA_REG_TX_BASE),
- mtk_hsdma_read(hsdma, HSDMA_REG_TX_CNT),
- mtk_hsdma_read(hsdma, HSDMA_REG_TX_CTX),
- mtk_hsdma_read(hsdma, HSDMA_REG_TX_DTX),
- mtk_hsdma_read(hsdma, HSDMA_REG_RX_BASE),
- mtk_hsdma_read(hsdma, HSDMA_REG_RX_CNT),
- mtk_hsdma_read(hsdma, HSDMA_REG_RX_CRX),
- mtk_hsdma_read(hsdma, HSDMA_REG_RX_DRX));
-
- dev_dbg(hsdma->ddev.dev,
- "info %08x, glo %08x, delay %08x, intr_stat %08x, intr_mask %08x\n",
- mtk_hsdma_read(hsdma, HSDMA_REG_INFO),
- mtk_hsdma_read(hsdma, HSDMA_REG_GLO_CFG),
- mtk_hsdma_read(hsdma, HSDMA_REG_DELAY_INT),
- mtk_hsdma_read(hsdma, HSDMA_REG_INT_STATUS),
- mtk_hsdma_read(hsdma, HSDMA_REG_INT_MASK));
-}
-
-static void hsdma_dump_desc(struct mtk_hsdam_engine *hsdma,
- struct mtk_hsdma_chan *chan)
-{
- struct hsdma_desc *tx_desc;
- struct hsdma_desc *rx_desc;
- int i;
-
- dev_dbg(hsdma->ddev.dev, "tx idx: %d, rx idx: %d\n",
- chan->tx_idx, chan->rx_idx);
-
- for (i = 0; i < HSDMA_DESCS_NUM; i++) {
- tx_desc = &chan->tx_ring[i];
- rx_desc = &chan->rx_ring[i];
-
- dev_dbg(hsdma->ddev.dev,
- "%d tx addr0: %08x, flags %08x, tx addr1: %08x, rx addr0 %08x, flags %08x\n",
- i, tx_desc->addr0, tx_desc->flags,
- tx_desc->addr1, rx_desc->addr0, rx_desc->flags);
- }
-}
-
-static void mtk_hsdma_reset(struct mtk_hsdam_engine *hsdma,
- struct mtk_hsdma_chan *chan)
-{
- int i;
-
- /* disable dma */
- mtk_hsdma_write(hsdma, HSDMA_REG_GLO_CFG, 0);
-
- /* disable intr */
- mtk_hsdma_write(hsdma, HSDMA_REG_INT_MASK, 0);
-
- /* init desc value */
- for (i = 0; i < HSDMA_DESCS_NUM; i++) {
- chan->tx_ring[i].addr0 = 0;
- chan->tx_ring[i].flags = HSDMA_DESC_LS0 | HSDMA_DESC_DONE;
- }
- for (i = 0; i < HSDMA_DESCS_NUM; i++) {
- chan->rx_ring[i].addr0 = 0;
- chan->rx_ring[i].flags = 0;
- }
-
- /* reset */
- mtk_hsdma_reset_chan(hsdma, chan);
-
- /* enable intr */
- mtk_hsdma_write(hsdma, HSDMA_REG_INT_MASK, HSDMA_INT_RX_Q0);
-
- /* enable dma */
- mtk_hsdma_write(hsdma, HSDMA_REG_GLO_CFG, HSDMA_GLO_DEFAULT);
-}
-
-static int mtk_hsdma_terminate_all(struct dma_chan *c)
-{
- struct mtk_hsdma_chan *chan = to_mtk_hsdma_chan(c);
- struct mtk_hsdam_engine *hsdma = mtk_hsdma_chan_get_dev(chan);
- unsigned long timeout;
- LIST_HEAD(head);
-
- spin_lock_bh(&chan->vchan.lock);
- chan->desc = NULL;
- clear_bit(chan->id, &hsdma->chan_issued);
- vchan_get_all_descriptors(&chan->vchan, &head);
- spin_unlock_bh(&chan->vchan.lock);
-
- vchan_dma_desc_free_list(&chan->vchan, &head);
-
- /* wait dma transfer complete */
- timeout = jiffies + msecs_to_jiffies(2000);
- while (mtk_hsdma_read(hsdma, HSDMA_REG_GLO_CFG) &
- (HSDMA_GLO_RX_BUSY | HSDMA_GLO_TX_BUSY)) {
- if (time_after_eq(jiffies, timeout)) {
- hsdma_dump_desc(hsdma, chan);
- mtk_hsdma_reset(hsdma, chan);
- dev_err(hsdma->ddev.dev, "timeout, reset it\n");
- break;
- }
- cpu_relax();
- }
-
- return 0;
-}
-
-static int mtk_hsdma_start_transfer(struct mtk_hsdam_engine *hsdma,
- struct mtk_hsdma_chan *chan)
-{
- dma_addr_t src, dst;
- size_t len, tlen;
- struct hsdma_desc *tx_desc, *rx_desc;
- struct mtk_hsdma_sg *sg;
- unsigned int i;
- int rx_idx;
-
- sg = &chan->desc->sg[0];
- len = sg->len;
- chan->desc->num_sgs = DIV_ROUND_UP(len, HSDMA_MAX_PLEN);
-
- /* tx desc */
- src = sg->src_addr;
- for (i = 0; i < chan->desc->num_sgs; i++) {
- tx_desc = &chan->tx_ring[chan->tx_idx];
-
- if (len > HSDMA_MAX_PLEN)
- tlen = HSDMA_MAX_PLEN;
- else
- tlen = len;
-
- if (i & 0x1) {
- tx_desc->addr1 = src;
- tx_desc->flags |= HSDMA_DESC_PLEN1(tlen);
- } else {
- tx_desc->addr0 = src;
- tx_desc->flags = HSDMA_DESC_PLEN0(tlen);
-
- /* update index */
- chan->tx_idx = HSDMA_NEXT_DESC(chan->tx_idx);
- }
-
- src += tlen;
- len -= tlen;
- }
- if (i & 0x1)
- tx_desc->flags |= HSDMA_DESC_LS0;
- else
- tx_desc->flags |= HSDMA_DESC_LS1;
-
- /* rx desc */
- rx_idx = HSDMA_NEXT_DESC(chan->rx_idx);
- len = sg->len;
- dst = sg->dst_addr;
- for (i = 0; i < chan->desc->num_sgs; i++) {
- rx_desc = &chan->rx_ring[rx_idx];
- if (len > HSDMA_MAX_PLEN)
- tlen = HSDMA_MAX_PLEN;
- else
- tlen = len;
-
- rx_desc->addr0 = dst;
- rx_desc->flags = HSDMA_DESC_PLEN0(tlen);
-
- dst += tlen;
- len -= tlen;
-
- /* update index */
- rx_idx = HSDMA_NEXT_DESC(rx_idx);
- }
-
- /* make sure desc and index all up to date */
- wmb();
- mtk_hsdma_write(hsdma, HSDMA_REG_TX_CTX, chan->tx_idx);
-
- return 0;
-}
-
-static int gdma_next_desc(struct mtk_hsdma_chan *chan)
-{
- struct virt_dma_desc *vdesc;
-
- vdesc = vchan_next_desc(&chan->vchan);
- if (!vdesc) {
- chan->desc = NULL;
- return 0;
- }
- chan->desc = to_mtk_hsdma_desc(vdesc);
- chan->next_sg = 0;
-
- return 1;
-}
-
-static void mtk_hsdma_chan_done(struct mtk_hsdam_engine *hsdma,
- struct mtk_hsdma_chan *chan)
-{
- struct mtk_hsdma_desc *desc;
- int chan_issued;
-
- chan_issued = 0;
- spin_lock_bh(&chan->vchan.lock);
- desc = chan->desc;
- if (likely(desc)) {
- if (chan->next_sg == desc->num_sgs) {
- list_del(&desc->vdesc.node);
- vchan_cookie_complete(&desc->vdesc);
- chan_issued = gdma_next_desc(chan);
- }
- } else {
- dev_dbg(hsdma->ddev.dev, "no desc to complete\n");
- }
-
- if (chan_issued)
- set_bit(chan->id, &hsdma->chan_issued);
- spin_unlock_bh(&chan->vchan.lock);
-}
-
-static irqreturn_t mtk_hsdma_irq(int irq, void *devid)
-{
- struct mtk_hsdam_engine *hsdma = devid;
- u32 status;
-
- status = mtk_hsdma_read(hsdma, HSDMA_REG_INT_STATUS);
- if (unlikely(!status))
- return IRQ_NONE;
-
- if (likely(status & HSDMA_INT_RX_Q0))
- tasklet_schedule(&hsdma->task);
- else
- dev_dbg(hsdma->ddev.dev, "unhandle irq status %08x\n", status);
- /* clean intr bits */
- mtk_hsdma_write(hsdma, HSDMA_REG_INT_STATUS, status);
-
- return IRQ_HANDLED;
-}
-
-static void mtk_hsdma_issue_pending(struct dma_chan *c)
-{
- struct mtk_hsdma_chan *chan = to_mtk_hsdma_chan(c);
- struct mtk_hsdam_engine *hsdma = mtk_hsdma_chan_get_dev(chan);
-
- spin_lock_bh(&chan->vchan.lock);
- if (vchan_issue_pending(&chan->vchan) && !chan->desc) {
- if (gdma_next_desc(chan)) {
- set_bit(chan->id, &hsdma->chan_issued);
- tasklet_schedule(&hsdma->task);
- } else {
- dev_dbg(hsdma->ddev.dev, "no desc to issue\n");
- }
- }
- spin_unlock_bh(&chan->vchan.lock);
-}
-
-static struct dma_async_tx_descriptor *mtk_hsdma_prep_dma_memcpy(
- struct dma_chan *c, dma_addr_t dest, dma_addr_t src,
- size_t len, unsigned long flags)
-{
- struct mtk_hsdma_chan *chan = to_mtk_hsdma_chan(c);
- struct mtk_hsdma_desc *desc;
-
- if (len <= 0)
- return NULL;
-
- desc = kzalloc(sizeof(*desc), GFP_ATOMIC);
- if (!desc) {
- dev_err(c->device->dev, "alloc memcpy decs error\n");
- return NULL;
- }
-
- desc->sg[0].src_addr = src;
- desc->sg[0].dst_addr = dest;
- desc->sg[0].len = len;
-
- return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
-}
-
-static enum dma_status mtk_hsdma_tx_status(struct dma_chan *c,
- dma_cookie_t cookie,
- struct dma_tx_state *state)
-{
- return dma_cookie_status(c, cookie, state);
-}
-
-static void mtk_hsdma_free_chan_resources(struct dma_chan *c)
-{
- vchan_free_chan_resources(to_virt_chan(c));
-}
-
-static void mtk_hsdma_desc_free(struct virt_dma_desc *vdesc)
-{
- kfree(container_of(vdesc, struct mtk_hsdma_desc, vdesc));
-}
-
-static void mtk_hsdma_tx(struct mtk_hsdam_engine *hsdma)
-{
- struct mtk_hsdma_chan *chan;
-
- if (test_and_clear_bit(0, &hsdma->chan_issued)) {
- chan = &hsdma->chan[0];
- if (chan->desc)
- mtk_hsdma_start_transfer(hsdma, chan);
- else
- dev_dbg(hsdma->ddev.dev, "chan 0 no desc to issue\n");
- }
-}
-
-static void mtk_hsdma_rx(struct mtk_hsdam_engine *hsdma)
-{
- struct mtk_hsdma_chan *chan;
- int next_idx, drx_idx, cnt;
-
- chan = &hsdma->chan[0];
- next_idx = HSDMA_NEXT_DESC(chan->rx_idx);
- drx_idx = mtk_hsdma_read(hsdma, HSDMA_REG_RX_DRX);
-
- cnt = (drx_idx - next_idx) & HSDMA_DESCS_MASK;
- if (!cnt)
- return;
-
- chan->next_sg += cnt;
- chan->rx_idx = (chan->rx_idx + cnt) & HSDMA_DESCS_MASK;
-
- /* update rx crx */
- wmb();
- mtk_hsdma_write(hsdma, HSDMA_REG_RX_CRX, chan->rx_idx);
-
- mtk_hsdma_chan_done(hsdma, chan);
-}
-
-static void mtk_hsdma_tasklet(struct tasklet_struct *t)
-{
- struct mtk_hsdam_engine *hsdma = from_tasklet(hsdma, t, task);
-
- mtk_hsdma_rx(hsdma);
- mtk_hsdma_tx(hsdma);
-}
-
-static int mtk_hsdam_alloc_desc(struct mtk_hsdam_engine *hsdma,
- struct mtk_hsdma_chan *chan)
-{
- int i;
-
- chan->tx_ring = dma_alloc_coherent(hsdma->ddev.dev,
- 2 * HSDMA_DESCS_NUM *
- sizeof(*chan->tx_ring),
- &chan->desc_addr, GFP_ATOMIC | __GFP_ZERO);
- if (!chan->tx_ring)
- goto no_mem;
-
- chan->rx_ring = &chan->tx_ring[HSDMA_DESCS_NUM];
-
- /* init tx ring value */
- for (i = 0; i < HSDMA_DESCS_NUM; i++)
- chan->tx_ring[i].flags = HSDMA_DESC_LS0 | HSDMA_DESC_DONE;
-
- return 0;
-no_mem:
- return -ENOMEM;
-}
-
-static void mtk_hsdam_free_desc(struct mtk_hsdam_engine *hsdma,
- struct mtk_hsdma_chan *chan)
-{
- if (chan->tx_ring) {
- dma_free_coherent(hsdma->ddev.dev,
- 2 * HSDMA_DESCS_NUM * sizeof(*chan->tx_ring),
- chan->tx_ring, chan->desc_addr);
- chan->tx_ring = NULL;
- chan->rx_ring = NULL;
- }
-}
-
-static int mtk_hsdma_init(struct mtk_hsdam_engine *hsdma)
-{
- struct mtk_hsdma_chan *chan;
- int ret;
- u32 reg;
-
- /* init desc */
- chan = &hsdma->chan[0];
- ret = mtk_hsdam_alloc_desc(hsdma, chan);
- if (ret)
- return ret;
-
- /* tx */
- mtk_hsdma_write(hsdma, HSDMA_REG_TX_BASE, chan->desc_addr);
- mtk_hsdma_write(hsdma, HSDMA_REG_TX_CNT, HSDMA_DESCS_NUM);
- /* rx */
- mtk_hsdma_write(hsdma, HSDMA_REG_RX_BASE, chan->desc_addr +
- (sizeof(struct hsdma_desc) * HSDMA_DESCS_NUM));
- mtk_hsdma_write(hsdma, HSDMA_REG_RX_CNT, HSDMA_DESCS_NUM);
- /* reset */
- mtk_hsdma_reset_chan(hsdma, chan);
-
- /* enable rx intr */
- mtk_hsdma_write(hsdma, HSDMA_REG_INT_MASK, HSDMA_INT_RX_Q0);
-
- /* enable dma */
- mtk_hsdma_write(hsdma, HSDMA_REG_GLO_CFG, HSDMA_GLO_DEFAULT);
-
- /* hardware info */
- reg = mtk_hsdma_read(hsdma, HSDMA_REG_INFO);
- dev_info(hsdma->ddev.dev, "rx: %d, tx: %d\n",
- (reg >> HSDMA_INFO_RX_SHIFT) & HSDMA_INFO_RX_MASK,
- (reg >> HSDMA_INFO_TX_SHIFT) & HSDMA_INFO_TX_MASK);
-
- hsdma_dump_reg(hsdma);
-
- return ret;
-}
-
-static void mtk_hsdma_uninit(struct mtk_hsdam_engine *hsdma)
-{
- struct mtk_hsdma_chan *chan;
-
- /* disable dma */
- mtk_hsdma_write(hsdma, HSDMA_REG_GLO_CFG, 0);
-
- /* disable intr */
- mtk_hsdma_write(hsdma, HSDMA_REG_INT_MASK, 0);
-
- /* free desc */
- chan = &hsdma->chan[0];
- mtk_hsdam_free_desc(hsdma, chan);
-
- /* tx */
- mtk_hsdma_write(hsdma, HSDMA_REG_TX_BASE, 0);
- mtk_hsdma_write(hsdma, HSDMA_REG_TX_CNT, 0);
- /* rx */
- mtk_hsdma_write(hsdma, HSDMA_REG_RX_BASE, 0);
- mtk_hsdma_write(hsdma, HSDMA_REG_RX_CNT, 0);
- /* reset */
- mtk_hsdma_reset_chan(hsdma, chan);
-}
-
-static const struct of_device_id mtk_hsdma_of_match[] = {
- { .compatible = "mediatek,mt7621-hsdma" },
- { },
-};
-
-static int mtk_hsdma_probe(struct platform_device *pdev)
-{
- const struct of_device_id *match;
- struct mtk_hsdma_chan *chan;
- struct mtk_hsdam_engine *hsdma;
- struct dma_device *dd;
- int ret;
- int irq;
- void __iomem *base;
-
- ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- if (ret)
- return ret;
-
- match = of_match_device(mtk_hsdma_of_match, &pdev->dev);
- if (!match)
- return -EINVAL;
-
- hsdma = devm_kzalloc(&pdev->dev, sizeof(*hsdma), GFP_KERNEL);
- if (!hsdma)
- return -EINVAL;
-
- base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(base))
- return PTR_ERR(base);
- hsdma->base = base + HSDMA_BASE_OFFSET;
- tasklet_setup(&hsdma->task, mtk_hsdma_tasklet);
-
- irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- return -EINVAL;
- ret = devm_request_irq(&pdev->dev, irq, mtk_hsdma_irq,
- 0, dev_name(&pdev->dev), hsdma);
- if (ret) {
- dev_err(&pdev->dev, "failed to request irq\n");
- return ret;
- }
-
- device_reset(&pdev->dev);
-
- dd = &hsdma->ddev;
- dma_cap_set(DMA_MEMCPY, dd->cap_mask);
- dd->copy_align = HSDMA_ALIGN_SIZE;
- dd->device_free_chan_resources = mtk_hsdma_free_chan_resources;
- dd->device_prep_dma_memcpy = mtk_hsdma_prep_dma_memcpy;
- dd->device_terminate_all = mtk_hsdma_terminate_all;
- dd->device_tx_status = mtk_hsdma_tx_status;
- dd->device_issue_pending = mtk_hsdma_issue_pending;
- dd->dev = &pdev->dev;
- dd->dev->dma_parms = &hsdma->dma_parms;
- dma_set_max_seg_size(dd->dev, HSDMA_MAX_PLEN);
- INIT_LIST_HEAD(&dd->channels);
-
- chan = &hsdma->chan[0];
- chan->id = 0;
- chan->vchan.desc_free = mtk_hsdma_desc_free;
- vchan_init(&chan->vchan, dd);
-
- /* init hardware */
- ret = mtk_hsdma_init(hsdma);
- if (ret) {
- dev_err(&pdev->dev, "failed to alloc ring descs\n");
- return ret;
- }
-
- ret = dma_async_device_register(dd);
- if (ret) {
- dev_err(&pdev->dev, "failed to register dma device\n");
- goto err_uninit_hsdma;
- }
-
- ret = of_dma_controller_register(pdev->dev.of_node,
- of_dma_xlate_by_chan_id, hsdma);
- if (ret) {
- dev_err(&pdev->dev, "failed to register of dma controller\n");
- goto err_unregister;
- }
-
- platform_set_drvdata(pdev, hsdma);
-
- return 0;
-
-err_unregister:
- dma_async_device_unregister(dd);
-err_uninit_hsdma:
- mtk_hsdma_uninit(hsdma);
- return ret;
-}
-
-static int mtk_hsdma_remove(struct platform_device *pdev)
-{
- struct mtk_hsdam_engine *hsdma = platform_get_drvdata(pdev);
-
- mtk_hsdma_uninit(hsdma);
-
- of_dma_controller_free(pdev->dev.of_node);
- dma_async_device_unregister(&hsdma->ddev);
-
- return 0;
-}
-
-static struct platform_driver mtk_hsdma_driver = {
- .probe = mtk_hsdma_probe,
- .remove = mtk_hsdma_remove,
- .driver = {
- .name = KBUILD_MODNAME,
- .of_match_table = mtk_hsdma_of_match,
- },
-};
-module_platform_driver(mtk_hsdma_driver);
-
-MODULE_AUTHOR("Michael Lee <igvtee@gmail.com>");
-MODULE_DESCRIPTION("MTK HSDMA driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/mt7621-dts/mt7621.dtsi b/drivers/staging/mt7621-dts/mt7621.dtsi
index 6d158e4f4b8c..644a65d1a6a1 100644
--- a/drivers/staging/mt7621-dts/mt7621.dtsi
+++ b/drivers/staging/mt7621-dts/mt7621.dtsi
@@ -146,44 +146,6 @@
pinctrl-names = "default";
pinctrl-0 = <&spi_pins>;
};
-
- gdma: gdma@2800 {
- compatible = "ralink,rt3883-gdma";
- reg = <0x2800 0x800>;
-
- clocks = <&sysc MT7621_CLK_GDMA>;
- clock-names = "gdma";
- resets = <&rstctrl 14>;
- reset-names = "dma";
-
- interrupt-parent = <&gic>;
- interrupts = <0 13 IRQ_TYPE_LEVEL_HIGH>;
-
- #dma-cells = <1>;
- #dma-channels = <16>;
- #dma-requests = <16>;
-
- status = "disabled";
- };
-
- hsdma: hsdma@7000 {
- compatible = "mediatek,mt7621-hsdma";
- reg = <0x7000 0x1000>;
-
- clocks = <&sysc MT7621_CLK_HSDMA>;
- clock-names = "hsdma";
- resets = <&rstctrl 5>;
- reset-names = "hsdma";
-
- interrupt-parent = <&gic>;
- interrupts = <0 11 IRQ_TYPE_LEVEL_HIGH>;
-
- #dma-cells = <1>;
- #dma-channels = <1>;
- #dma-requests = <1>;
-
- status = "disabled";
- };
};
pinctrl: pinctrl {
diff --git a/drivers/staging/pi433/pi433_if.c b/drivers/staging/pi433/pi433_if.c
index 29bd37669059..68c09fa016ed 100644
--- a/drivers/staging/pi433/pi433_if.c
+++ b/drivers/staging/pi433/pi433_if.c
@@ -92,7 +92,7 @@ struct pi433_device {
u32 rx_bytes_to_drop;
u32 rx_bytes_dropped;
unsigned int rx_position;
- struct mutex rx_lock;
+ struct mutex rx_lock; /* protects rx_* variable accesses */
wait_queue_head_t rx_wait_queue;
/* fifo wait queue */
diff --git a/drivers/staging/pi433/rf69.c b/drivers/staging/pi433/rf69.c
index 7d86bb8be245..d64df072d8e8 100644
--- a/drivers/staging/pi433/rf69.c
+++ b/drivers/staging/pi433/rf69.c
@@ -113,7 +113,7 @@ int rf69_set_mode(struct spi_device *spi, enum mode mode)
};
if (unlikely(mode >= ARRAY_SIZE(mode_map))) {
- dev_dbg(&spi->dev, "set: illegal input param");
+ dev_dbg(&spi->dev, "set: illegal mode %u", mode);
return -EINVAL;
}
@@ -143,7 +143,7 @@ int rf69_set_modulation(struct spi_device *spi, enum modulation modulation)
};
if (unlikely(modulation >= ARRAY_SIZE(modulation_map))) {
- dev_dbg(&spi->dev, "set: illegal input param");
+ dev_dbg(&spi->dev, "set: illegal modulation %u", modulation);
return -EINVAL;
}
@@ -191,7 +191,7 @@ int rf69_set_modulation_shaping(struct spi_device *spi,
MASK_DATAMODUL_MODULATION_SHAPE,
DATAMODUL_MODULATION_SHAPE_0_3);
default:
- dev_dbg(&spi->dev, "set: illegal input param");
+ dev_dbg(&spi->dev, "set: illegal mod shaping for FSK %u", mod_shaping);
return -EINVAL;
}
case OOK:
@@ -209,7 +209,7 @@ int rf69_set_modulation_shaping(struct spi_device *spi,
MASK_DATAMODUL_MODULATION_SHAPE,
DATAMODUL_MODULATION_SHAPE_2BR);
default:
- dev_dbg(&spi->dev, "set: illegal input param");
+ dev_dbg(&spi->dev, "set: illegal mod shaping for OOK %u", mod_shaping);
return -EINVAL;
}
default:
@@ -255,13 +255,25 @@ int rf69_set_deviation(struct spi_device *spi, u32 deviation)
int retval;
u64 f_reg;
u64 f_step;
+ u32 bit_rate_reg;
+ u32 bit_rate;
u8 msb;
u8 lsb;
u64 factor = 1000000; // to improve precision of calculation
- // TODO: Dependency to bitrate
- if (deviation < 600 || deviation > 500000) {
- dev_dbg(&spi->dev, "set_deviation: illegal input param");
+ // calculate bit rate
+ bit_rate_reg = rf69_read_reg(spi, REG_BITRATE_MSB) << 8;
+ bit_rate_reg |= rf69_read_reg(spi, REG_BITRATE_LSB);
+ bit_rate = F_OSC / bit_rate_reg;
+
+ /*
+ * frequency deviation must exceed 600 Hz but not exceed
+ * 500kHz when taking bitrate dependency into consideration
+ * to ensure proper modulation
+ */
+ if (deviation < 600 || (deviation + (bit_rate / 2)) > 500000) {
+ dev_dbg(&spi->dev,
+ "set_deviation: illegal input param: %u", deviation);
return -EINVAL;
}
@@ -392,7 +404,7 @@ int rf69_set_output_power_level(struct spi_device *spi, u8 power_level)
return rf69_read_mod_write(spi, REG_PALEVEL, MASK_PALEVEL_OUTPUT_POWER,
power_level);
failed:
- dev_dbg(&spi->dev, "set: illegal input param");
+ dev_dbg(&spi->dev, "set: illegal power level %u", power_level);
return -EINVAL;
}
@@ -417,7 +429,7 @@ int rf69_set_pa_ramp(struct spi_device *spi, enum pa_ramp pa_ramp)
};
if (unlikely(pa_ramp >= ARRAY_SIZE(pa_ramp_map))) {
- dev_dbg(&spi->dev, "set: illegal input param");
+ dev_dbg(&spi->dev, "set: illegal pa_ramp %u", pa_ramp);
return -EINVAL;
}
@@ -433,7 +445,7 @@ int rf69_set_antenna_impedance(struct spi_device *spi,
case two_hundred_ohm:
return rf69_set_bit(spi, REG_LNA, MASK_LNA_ZIN);
default:
- dev_dbg(&spi->dev, "set: illegal input param");
+ dev_dbg(&spi->dev, "set: illegal antenna impedance %u", antenna_impedance);
return -EINVAL;
}
}
@@ -451,7 +463,7 @@ int rf69_set_lna_gain(struct spi_device *spi, enum lna_gain lna_gain)
};
if (unlikely(lna_gain >= ARRAY_SIZE(lna_gain_map))) {
- dev_dbg(&spi->dev, "set: illegal input param");
+ dev_dbg(&spi->dev, "set: illegal lna gain %u", lna_gain);
return -EINVAL;
}
@@ -466,14 +478,14 @@ static int rf69_set_bandwidth_intern(struct spi_device *spi, u8 reg,
// check value for mantisse and exponent
if (exponent > 7) {
- dev_dbg(&spi->dev, "set: illegal input param");
+ dev_dbg(&spi->dev, "set: illegal bandwidth exponent %u", exponent);
return -EINVAL;
}
- if ((mantisse != mantisse16) &&
- (mantisse != mantisse20) &&
- (mantisse != mantisse24)) {
- dev_dbg(&spi->dev, "set: illegal input param");
+ if (mantisse != mantisse16 &&
+ mantisse != mantisse20 &&
+ mantisse != mantisse24) {
+ dev_dbg(&spi->dev, "set: illegal bandwidth mantisse %u", mantisse);
return -EINVAL;
}
@@ -531,7 +543,7 @@ int rf69_set_ook_threshold_dec(struct spi_device *spi,
};
if (unlikely(threshold_decrement >= ARRAY_SIZE(td_map))) {
- dev_dbg(&spi->dev, "set: illegal input param");
+ dev_dbg(&spi->dev, "set: illegal OOK threshold decrement %u", threshold_decrement);
return -EINVAL;
}
@@ -578,7 +590,7 @@ int rf69_set_dio_mapping(struct spi_device *spi, u8 dio_number, u8 value)
dio_addr = REG_DIOMAPPING2;
break;
default:
- dev_dbg(&spi->dev, "set: illegal input param");
+ dev_dbg(&spi->dev, "set: illegal dio number %u", dio_number);
return -EINVAL;
}
@@ -681,7 +693,7 @@ int rf69_set_fifo_fill_condition(struct spi_device *spi,
return rf69_clear_bit(spi, REG_SYNC_CONFIG,
MASK_SYNC_CONFIG_FIFO_FILL_CONDITION);
default:
- dev_dbg(&spi->dev, "set: illegal input param");
+ dev_dbg(&spi->dev, "set: illegal fifo fill condition %u", fifo_fill_condition);
return -EINVAL;
}
}
@@ -690,7 +702,7 @@ int rf69_set_sync_size(struct spi_device *spi, u8 sync_size)
{
// check input value
if (sync_size > 0x07) {
- dev_dbg(&spi->dev, "set: illegal input param");
+ dev_dbg(&spi->dev, "set: illegal sync size %u", sync_size);
return -EINVAL;
}
@@ -727,7 +739,7 @@ int rf69_set_packet_format(struct spi_device *spi,
return rf69_clear_bit(spi, REG_PACKETCONFIG1,
MASK_PACKETCONFIG1_PACKET_FORMAT_VARIABLE);
default:
- dev_dbg(&spi->dev, "set: illegal input param");
+ dev_dbg(&spi->dev, "set: illegal packet format %u", packet_format);
return -EINVAL;
}
}
@@ -753,7 +765,7 @@ int rf69_set_address_filtering(struct spi_device *spi,
};
if (unlikely(address_filtering >= ARRAY_SIZE(af_map))) {
- dev_dbg(&spi->dev, "set: illegal input param");
+ dev_dbg(&spi->dev, "set: illegal address filtering %u", address_filtering);
return -EINVAL;
}
@@ -788,7 +800,7 @@ int rf69_set_tx_start_condition(struct spi_device *spi,
return rf69_set_bit(spi, REG_FIFO_THRESH,
MASK_FIFO_THRESH_TXSTART);
default:
- dev_dbg(&spi->dev, "set: illegal input param");
+ dev_dbg(&spi->dev, "set: illegal tx start condition %u", tx_start_condition);
return -EINVAL;
}
}
@@ -799,7 +811,7 @@ int rf69_set_fifo_threshold(struct spi_device *spi, u8 threshold)
/* check input value */
if (threshold & 0x80) {
- dev_dbg(&spi->dev, "set: illegal input param");
+ dev_dbg(&spi->dev, "set: illegal fifo threshold %u", threshold);
return -EINVAL;
}
@@ -826,7 +838,7 @@ int rf69_set_dagc(struct spi_device *spi, enum dagc dagc)
};
if (unlikely(dagc >= ARRAY_SIZE(dagc_map))) {
- dev_dbg(&spi->dev, "set: illegal input param");
+ dev_dbg(&spi->dev, "set: illegal dagc %u", dagc);
return -EINVAL;
}
diff --git a/drivers/staging/pi433/rf69_enum.h b/drivers/staging/pi433/rf69_enum.h
index fbf56fcf5fe8..b33a33a85d3b 100644
--- a/drivers/staging/pi433/rf69_enum.h
+++ b/drivers/staging/pi433/rf69_enum.h
@@ -110,12 +110,24 @@ enum fifo_fill_condition {
};
enum packet_format {
+ /*
+ * Used when the size of payload is fixed in advance. This mode of
+ * operation may be of interest to minimize RF overhead by 1 byte as
+ * no length byte field is required
+ */
packet_length_fix,
+ /*
+ * Used when the size of payload isn't known in advance. It requires the
+ * transmitter to send the length byte in each packet so the receiver
+ * would know how to operate properly
+ */
packet_length_var
};
enum tx_start_condition {
+ /* the number of bytes in the FIFO exceeds FIFO_THRESHOLD */
fifo_level,
+ /* at least one byte in the FIFO */
fifo_not_empty
};
diff --git a/drivers/staging/r8188eu/Makefile b/drivers/staging/r8188eu/Makefile
index 62933b0f29b5..a7a486cc16dd 100644
--- a/drivers/staging/r8188eu/Makefile
+++ b/drivers/staging/r8188eu/Makefile
@@ -11,7 +11,6 @@ r8188eu-y = \
hal/hal_com.o \
hal/odm.o \
hal/odm_debug.o \
- hal/odm_interface.o \
hal/odm_HWConfig.o \
hal/odm_RegConfig8188E.o \
hal/odm_RTL8188E.o \
@@ -21,9 +20,7 @@ r8188eu-y = \
hal/rtl8188e_phycfg.o \
hal/rtl8188e_rf6052.o \
hal/rtl8188e_rxdesc.o \
- hal/rtl8188e_sreset.o \
hal/rtl8188e_xmit.o \
- hal/rtl8188eu_led.o \
hal/rtl8188eu_recv.o \
hal/rtl8188eu_xmit.o \
hal/usb_halinit.o \
diff --git a/drivers/staging/r8188eu/core/rtw_ap.c b/drivers/staging/r8188eu/core/rtw_ap.c
index c78feeb9c862..1675e2e8439c 100644
--- a/drivers/staging/r8188eu/core/rtw_ap.c
+++ b/drivers/staging/r8188eu/core/rtw_ap.c
@@ -318,7 +318,6 @@ void expire_timeout_chk(struct adapter *padapter)
void add_RATid(struct adapter *padapter, struct sta_info *psta, u8 rssi_level)
{
int i;
- u8 rf_type;
u32 init_rate = 0;
unsigned char sta_band = 0, raid, shortGIrate = false;
unsigned char limit;
@@ -342,11 +341,7 @@ void add_RATid(struct adapter *padapter, struct sta_info *psta, u8 rssi_level)
}
/* n mode ra_bitmap */
if (psta_ht->ht_option) {
- GetHwReg8188EU(padapter, HW_VAR_RF_TYPE, (u8 *)(&rf_type));
- if (rf_type == RF_2T2R)
- limit = 16;/* 2R */
- else
- limit = 8;/* 1R */
+ limit = 8; /* 1R */
for (i = 0; i < limit; i++) {
if (psta_ht->ht_cap.mcs.rx_mask[i / 8] & BIT(i % 8))
@@ -452,7 +447,7 @@ void update_bmc_sta(struct adapter *padapter)
init_rate = get_highest_rate_idx(tx_ra_bitmap & 0x0fffffff) & 0x3f;
/* ap mode */
- rtl8188e_SetHalODMVar(padapter, HAL_ODM_STA_INFO, psta, true);
+ rtl8188e_SetHalODMVar(padapter, psta, true);
{
u8 arg = 0;
@@ -504,7 +499,7 @@ void update_sta_info_apmode(struct adapter *padapter, struct sta_info *psta)
DBG_88E("%s\n", __func__);
/* ap mode */
- rtl8188e_SetHalODMVar(padapter, HAL_ODM_STA_INFO, psta, true);
+ rtl8188e_SetHalODMVar(padapter, psta, true);
if (psecuritypriv->dot11AuthAlgrthm == dot11AuthAlgrthm_8021X)
psta->ieee8021x_blocked = true;
diff --git a/drivers/staging/r8188eu/core/rtw_br_ext.c b/drivers/staging/r8188eu/core/rtw_br_ext.c
index bcd0f9dd64b1..4951f835feaf 100644
--- a/drivers/staging/r8188eu/core/rtw_br_ext.c
+++ b/drivers/staging/r8188eu/core/rtw_br_ext.c
@@ -105,19 +105,10 @@ static int skb_pull_and_merge(struct sk_buff *skb, unsigned char *src, int len)
return 0;
}
-static unsigned long __nat25_timeout(struct adapter *priv)
-{
- unsigned long timeout;
-
- timeout = jiffies - NAT25_AGEING_TIME*HZ;
-
- return timeout;
-}
-
static int __nat25_has_expired(struct adapter *priv,
struct nat25_network_db_entry *fdb)
{
- if (time_before_eq(fdb->ageing_timer, __nat25_timeout(priv)))
+ if (time_before_eq(fdb->ageing_timer, jiffies - NAT25_AGEING_TIME * HZ))
return 1;
return 0;
diff --git a/drivers/staging/r8188eu/core/rtw_cmd.c b/drivers/staging/r8188eu/core/rtw_cmd.c
index 48869a7056fd..8bfb01c2ebb5 100644
--- a/drivers/staging/r8188eu/core/rtw_cmd.c
+++ b/drivers/staging/r8188eu/core/rtw_cmd.c
@@ -10,7 +10,6 @@
#include "../include/rtw_br_ext.h"
#include "../include/rtw_mlme_ext.h"
#include "../include/rtl8188e_dm.h"
-#include "../include/rtl8188e_sreset.h"
/*
Caller and the rtw_cmd_thread can protect cmd_q by spin_lock.
@@ -51,7 +50,6 @@ static int _rtw_init_cmd_priv(struct cmd_priv *pcmdpriv)
pcmdpriv->rsp_buf = pcmdpriv->rsp_allocated_buf + 4 - ((size_t)(pcmdpriv->rsp_allocated_buf) & 3);
- pcmdpriv->cmd_issued_cnt = 0;
pcmdpriv->cmd_done_cnt = 0;
pcmdpriv->rsp_cnt = 0;
exit:
@@ -69,7 +67,7 @@ static int _rtw_init_evt_priv(struct evt_priv *pevtpriv)
atomic_set(&pevtpriv->event_seq, 0);
pevtpriv->evt_done_cnt = 0;
- _init_workitem(&pevtpriv->c2h_wk, c2h_wk_callback, NULL);
+ INIT_WORK(&pevtpriv->c2h_wk, c2h_wk_callback);
pevtpriv->c2h_wk_alive = false;
pevtpriv->c2h_queue = rtw_cbuf_alloc(C2H_QUEUE_MAX_LEN + 1);
@@ -78,7 +76,7 @@ static int _rtw_init_evt_priv(struct evt_priv *pevtpriv)
void rtw_free_evt_priv(struct evt_priv *pevtpriv)
{
- _cancel_workitem_sync(&pevtpriv->c2h_wk);
+ cancel_work_sync(&pevtpriv->c2h_wk);
while (pevtpriv->c2h_wk_alive)
msleep(10);
@@ -255,8 +253,9 @@ int rtw_cmd_thread(void *context)
_next:
if (padapter->bDriverStopped ||
padapter->bSurpriseRemoved) {
- DBG_88E("%s: DriverStopped(%d) SurpriseRemoved(%d) break at line %d\n",
- __func__, padapter->bDriverStopped, padapter->bSurpriseRemoved, __LINE__);
+ netdev_dbg(padapter->pnetdev,
+ "DriverStopped(%d) SurpriseRemoved(%d) break\n",
+ padapter->bDriverStopped, padapter->bSurpriseRemoved);
break;
}
@@ -269,8 +268,6 @@ _next:
goto post_process;
}
- pcmdpriv->cmd_issued_cnt++;
-
pcmd->cmdsz = _RND4((pcmd->cmdsz));/* _RND4 */
memcpy(pcmdbuf, pcmd->parmbuf, pcmd->cmdsz);
@@ -316,8 +313,6 @@ post_process:
if (!pcmd)
break;
- /* DBG_88E("%s: leaving... drop cmdcode:%u\n", __func__, pcmd->cmdcode); */
-
rtw_free_cmd_obj(pcmd);
} while (1);
@@ -579,7 +574,7 @@ u8 rtw_joinbss_cmd(struct adapter *padapter, struct wlan_network *pnetwork)
else
padapter->pwrctrlpriv.smart_ps = padapter->registrypriv.smart_ps;
- DBG_88E("%s: smart_ps =%d\n", __func__, padapter->pwrctrlpriv.smart_ps);
+ netdev_dbg(padapter->pnetdev, "smart_ps = %d\n", padapter->pwrctrlpriv.smart_ps);
pcmd->cmdsz = get_wlan_bssid_ex_sz(psecnetwork);/* get cmdsz before endian conversion */
@@ -800,8 +795,6 @@ u8 rtw_addbareq_cmd(struct adapter *padapter, u8 tid, u8 *addr)
init_h2fwcmd_w_parm_no_rsp(ph2c, paddbareq_parm, GEN_CMD_CODE(_AddBAReq));
- /* DBG_88E("rtw_addbareq_cmd, tid =%d\n", tid); */
-
/* rtw_enqueue_cmd(pcmdpriv, ph2c); */
res = rtw_enqueue_cmd(pcmdpriv, ph2c);
@@ -953,7 +946,19 @@ static void traffic_status_watchdog(struct adapter *padapter)
pmlmepriv->LinkDetectInfo.bHigherBusyTxTraffic = bHigherBusyTxTraffic;
}
-static void dynamic_chk_wk_hdl(struct adapter *padapter, u8 *pbuf, int sz)
+static void rtl8188e_sreset_xmit_status_check(struct adapter *padapter)
+{
+ u32 txdma_status;
+
+ txdma_status = rtw_read32(padapter, REG_TXDMA_STATUS);
+ if (txdma_status != 0x00) {
+ DBG_88E("%s REG_TXDMA_STATUS:0x%08x\n", __func__, txdma_status);
+ rtw_write32(padapter, REG_TXDMA_STATUS, txdma_status);
+ }
+ /* total xmit irp = 4 */
+}
+
+static void dynamic_chk_wk_hdl(struct adapter *padapter, u8 *pbuf)
{
struct mlme_priv *pmlmepriv;
@@ -1003,7 +1008,6 @@ static void lps_ctrl_wk_hdl(struct adapter *padapter, u8 lps_ctrl_type)
SetHwReg8188EU(padapter, HW_VAR_H2C_FW_JOINBSSRPT, (u8 *)(&mstatus));
break;
case LPS_CTRL_SPECIAL_PACKET:
- /* DBG_88E("LPS_CTRL_SPECIAL_PACKET\n"); */
pwrpriv->DelayLPSLastTimeStamp = jiffies;
LPS_Leave(padapter);
break;
@@ -1374,7 +1378,7 @@ u8 rtw_drvextra_cmd_hdl(struct adapter *padapter, unsigned char *pbuf)
switch (pdrvextra_cmd->ec_id) {
case DYNAMIC_CHK_WK_CID:
- dynamic_chk_wk_hdl(padapter, pdrvextra_cmd->pbuf, pdrvextra_cmd->type_size);
+ dynamic_chk_wk_hdl(padapter, pdrvextra_cmd->pbuf);
break;
case POWER_SAVING_CTRL_WK_CID:
rtw_ps_processor(padapter);
diff --git a/drivers/staging/r8188eu/core/rtw_efuse.c b/drivers/staging/r8188eu/core/rtw_efuse.c
index 03c8431b2ed3..0e0e60638880 100644
--- a/drivers/staging/r8188eu/core/rtw_efuse.c
+++ b/drivers/staging/r8188eu/core/rtw_efuse.c
@@ -6,70 +6,7 @@
#include "../include/osdep_service.h"
#include "../include/drv_types.h"
#include "../include/rtw_efuse.h"
-
-/*------------------------Define local variable------------------------------*/
-u8 fakeEfuseBank;
-u32 fakeEfuseUsedBytes;
-u8 fakeEfuseContent[EFUSE_MAX_HW_SIZE] = {0};
-u8 fakeEfuseInitMap[EFUSE_MAX_MAP_LEN] = {0};
-u8 fakeEfuseModifiedMap[EFUSE_MAX_MAP_LEN] = {0};
-
-u32 BTEfuseUsedBytes;
-u8 BTEfuseContent[EFUSE_MAX_BT_BANK][EFUSE_MAX_HW_SIZE];
-u8 BTEfuseInitMap[EFUSE_BT_MAX_MAP_LEN] = {0};
-u8 BTEfuseModifiedMap[EFUSE_BT_MAX_MAP_LEN] = {0};
-
-u32 fakeBTEfuseUsedBytes;
-u8 fakeBTEfuseContent[EFUSE_MAX_BT_BANK][EFUSE_MAX_HW_SIZE];
-u8 fakeBTEfuseInitMap[EFUSE_BT_MAX_MAP_LEN] = {0};
-u8 fakeBTEfuseModifiedMap[EFUSE_BT_MAX_MAP_LEN] = {0};
-/*------------------------Define local variable------------------------------*/
-
-#define REG_EFUSE_CTRL 0x0030
-#define EFUSE_CTRL REG_EFUSE_CTRL /* E-Fuse Control. */
-
-static bool Efuse_Read1ByteFromFakeContent(u16 Offset, u8 *Value)
-{
- if (Offset >= EFUSE_MAX_HW_SIZE)
- return false;
- if (fakeEfuseBank == 0)
- *Value = fakeEfuseContent[Offset];
- else
- *Value = fakeBTEfuseContent[fakeEfuseBank - 1][Offset];
- return true;
-}
-
-static bool
-Efuse_Write1ByteToFakeContent(
- struct adapter *pAdapter,
- u16 Offset,
- u8 Value)
-{
- if (Offset >= EFUSE_MAX_HW_SIZE)
- return false;
- if (fakeEfuseBank == 0) {
- fakeEfuseContent[Offset] = Value;
- } else {
- fakeBTEfuseContent[fakeEfuseBank - 1][Offset] = Value;
- }
- return true;
-}
-
-/* 11/16/2008 MH Add description. Get current efuse area enabled word!!. */
-u8
-Efuse_CalculateWordCnts(u8 word_en)
-{
- u8 word_cnts = 0;
- if (!(word_en & BIT(0)))
- word_cnts++; /* 0 : write enable */
- if (!(word_en & BIT(1)))
- word_cnts++;
- if (!(word_en & BIT(2)))
- word_cnts++;
- if (!(word_en & BIT(3)))
- word_cnts++;
- return word_cnts;
-}
+#include "../include/rtl8188e_hal.h"
/* */
/* Description: */
@@ -86,18 +23,12 @@ void
ReadEFuseByte(
struct adapter *Adapter,
u16 _offset,
- u8 *pbuf,
- bool pseudo)
+ u8 *pbuf)
{
u32 value32;
u8 readbyte;
u16 retry;
- if (pseudo) {
- Efuse_Read1ByteFromFakeContent(_offset, pbuf);
- return;
- }
-
/* Write Address */
rtw_write8(Adapter, EFUSE_CTRL + 1, (_offset & 0xff));
readbyte = rtw_read8(Adapter, EFUSE_CTRL + 2);
@@ -125,134 +56,6 @@ ReadEFuseByte(
*pbuf = (u8)(value32 & 0xff);
}
-/* 11/16/2008 MH Read one byte from real Efuse. */
-u8 efuse_OneByteRead(struct adapter *pAdapter, u16 addr, u8 *data, bool pseudo)
-{
- u8 tmpidx = 0;
- u8 result;
-
- if (pseudo) {
- result = Efuse_Read1ByteFromFakeContent(addr, data);
- return result;
- }
- /* -----------------e-fuse reg ctrl --------------------------------- */
- /* address */
- rtw_write8(pAdapter, EFUSE_CTRL + 1, (u8)(addr & 0xff));
- rtw_write8(pAdapter, EFUSE_CTRL + 2, ((u8)((addr >> 8) & 0x03)) |
- (rtw_read8(pAdapter, EFUSE_CTRL + 2) & 0xFC));
-
- rtw_write8(pAdapter, EFUSE_CTRL + 3, 0x72);/* read cmd */
-
- while (!(0x80 & rtw_read8(pAdapter, EFUSE_CTRL + 3)) && (tmpidx < 100))
- tmpidx++;
- if (tmpidx < 100) {
- *data = rtw_read8(pAdapter, EFUSE_CTRL);
- result = true;
- } else {
- *data = 0xff;
- result = false;
- }
- return result;
-}
-
-/* 11/16/2008 MH Write one byte to reald Efuse. */
-u8 efuse_OneByteWrite(struct adapter *pAdapter, u16 addr, u8 data, bool pseudo)
-{
- u8 tmpidx = 0;
- u8 result;
-
- if (pseudo) {
- result = Efuse_Write1ByteToFakeContent(pAdapter, addr, data);
- return result;
- }
-
- /* -----------------e-fuse reg ctrl --------------------------------- */
- /* address */
- rtw_write8(pAdapter, EFUSE_CTRL + 1, (u8)(addr & 0xff));
- rtw_write8(pAdapter, EFUSE_CTRL + 2,
- (rtw_read8(pAdapter, EFUSE_CTRL + 2) & 0xFC) |
- (u8)((addr >> 8) & 0x03));
- rtw_write8(pAdapter, EFUSE_CTRL, data);/* data */
-
- rtw_write8(pAdapter, EFUSE_CTRL + 3, 0xF2);/* write cmd */
-
- while ((0x80 & rtw_read8(pAdapter, EFUSE_CTRL + 3)) && (tmpidx < 100))
- tmpidx++;
-
- if (tmpidx < 100)
- result = true;
- else
- result = false;
-
- return result;
-}
-
-/*-----------------------------------------------------------------------------
- * Function: efuse_WordEnableDataRead
- *
- * Overview: Read allowed word in current efuse section data.
- *
- * Input: NONE
- *
- * Output: NONE
- *
- * Return: NONE
- *
- * Revised History:
- * When Who Remark
- * 11/16/2008 MHC Create Version 0.
- * 11/21/2008 MHC Fix Write bug when we only enable late word.
- *
- *---------------------------------------------------------------------------*/
-void efuse_WordEnableDataRead(u8 word_en, u8 *sourdata, u8 *targetdata)
-{
- if (!(word_en & BIT(0))) {
- targetdata[0] = sourdata[0];
- targetdata[1] = sourdata[1];
- }
- if (!(word_en & BIT(1))) {
- targetdata[2] = sourdata[2];
- targetdata[3] = sourdata[3];
- }
- if (!(word_en & BIT(2))) {
- targetdata[4] = sourdata[4];
- targetdata[5] = sourdata[5];
- }
- if (!(word_en & BIT(3))) {
- targetdata[6] = sourdata[6];
- targetdata[7] = sourdata[7];
- }
-}
-
-/*-----------------------------------------------------------------------------
- * Function: Efuse_ReadAllMap
- *
- * Overview: Read All Efuse content
- *
- * Input: NONE
- *
- * Output: NONE
- *
- * Return: NONE
- *
- * Revised History:
- * When Who Remark
- * 11/11/2008 MHC Create Version 0.
- *
- *---------------------------------------------------------------------------*/
-static void Efuse_ReadAllMap(struct adapter *pAdapter, u8 efuseType, u8 *Efuse, bool pseudo)
-{
- u16 mapLen = 0;
-
- rtl8188e_EfusePowerSwitch(pAdapter, false, true);
-
- rtl8188e_EFUSE_GetEfuseDefinition(pAdapter, efuseType, TYPE_EFUSE_MAP_LEN, (void *)&mapLen, pseudo);
-
- rtl8188e_ReadEFuse(pAdapter, efuseType, 0, mapLen, Efuse, pseudo);
-
- rtl8188e_EfusePowerSwitch(pAdapter, false, false);
-}
-
/*-----------------------------------------------------------------------------
* Function: EFUSE_ShadowMapUpdate
*
@@ -269,18 +72,16 @@ static void Efuse_ReadAllMap(struct adapter *pAdapter, u8 efuseType, u8 *Efuse,
* 11/13/2008 MHC Create Version 0.
*
*---------------------------------------------------------------------------*/
-void EFUSE_ShadowMapUpdate(
- struct adapter *pAdapter,
- u8 efuseType,
- bool pseudo)
+void EFUSE_ShadowMapUpdate(struct adapter *pAdapter)
{
struct eeprom_priv *pEEPROM = &pAdapter->eeprompriv;
- u16 mapLen = 0;
- rtl8188e_EFUSE_GetEfuseDefinition(pAdapter, efuseType, TYPE_EFUSE_MAP_LEN, (void *)&mapLen, pseudo);
+ if (pEEPROM->bautoload_fail_flag) {
+ memset(pEEPROM->efuse_eeprom_data, 0xFF, EFUSE_MAP_LEN_88E);
+ return;
+ }
- if (pEEPROM->bautoload_fail_flag)
- memset(pEEPROM->efuse_eeprom_data, 0xFF, mapLen);
- else
- Efuse_ReadAllMap(pAdapter, efuseType, pEEPROM->efuse_eeprom_data, pseudo);
-} /* EFUSE_ShadowMapUpdate */
+ rtl8188e_EfusePowerSwitch(pAdapter, true);
+ rtl8188e_ReadEFuse(pAdapter, 0, EFUSE_MAP_LEN_88E, pEEPROM->efuse_eeprom_data);
+ rtl8188e_EfusePowerSwitch(pAdapter, false);
+}
diff --git a/drivers/staging/r8188eu/core/rtw_ieee80211.c b/drivers/staging/r8188eu/core/rtw_ieee80211.c
index 343c2f9a4ce8..ad87954bdeb4 100644
--- a/drivers/staging/r8188eu/core/rtw_ieee80211.c
+++ b/drivers/staging/r8188eu/core/rtw_ieee80211.c
@@ -1160,63 +1160,26 @@ void rtw_get_bcn_info(struct wlan_network *pnetwork)
}
/* show MCS rate, unit: 100Kbps */
-u16 rtw_mcs_rate(u8 rf_type, u8 bw_40MHz, u8 short_GI_20, u8 short_GI_40, unsigned char *MCS_rate)
+u16 rtw_mcs_rate(u8 bw_40MHz, u8 short_GI_20, u8 short_GI_40, unsigned char *MCS_rate)
{
u16 max_rate = 0;
- if (rf_type == RF_1T1R) {
- if (MCS_rate[0] & BIT(7))
- max_rate = (bw_40MHz) ? ((short_GI_40) ? 1500 : 1350) : ((short_GI_20) ? 722 : 650);
- else if (MCS_rate[0] & BIT(6))
- max_rate = (bw_40MHz) ? ((short_GI_40) ? 1350 : 1215) : ((short_GI_20) ? 650 : 585);
- else if (MCS_rate[0] & BIT(5))
- max_rate = (bw_40MHz) ? ((short_GI_40) ? 1200 : 1080) : ((short_GI_20) ? 578 : 520);
- else if (MCS_rate[0] & BIT(4))
- max_rate = (bw_40MHz) ? ((short_GI_40) ? 900 : 810) : ((short_GI_20) ? 433 : 390);
- else if (MCS_rate[0] & BIT(3))
- max_rate = (bw_40MHz) ? ((short_GI_40) ? 600 : 540) : ((short_GI_20) ? 289 : 260);
- else if (MCS_rate[0] & BIT(2))
- max_rate = (bw_40MHz) ? ((short_GI_40) ? 450 : 405) : ((short_GI_20) ? 217 : 195);
- else if (MCS_rate[0] & BIT(1))
- max_rate = (bw_40MHz) ? ((short_GI_40) ? 300 : 270) : ((short_GI_20) ? 144 : 130);
- else if (MCS_rate[0] & BIT(0))
- max_rate = (bw_40MHz) ? ((short_GI_40) ? 150 : 135) : ((short_GI_20) ? 72 : 65);
- } else {
- if (MCS_rate[1]) {
- if (MCS_rate[1] & BIT(7))
- max_rate = (bw_40MHz) ? ((short_GI_40) ? 3000 : 2700) : ((short_GI_20) ? 1444 : 1300);
- else if (MCS_rate[1] & BIT(6))
- max_rate = (bw_40MHz) ? ((short_GI_40) ? 2700 : 2430) : ((short_GI_20) ? 1300 : 1170);
- else if (MCS_rate[1] & BIT(5))
- max_rate = (bw_40MHz) ? ((short_GI_40) ? 2400 : 2160) : ((short_GI_20) ? 1156 : 1040);
- else if (MCS_rate[1] & BIT(4))
- max_rate = (bw_40MHz) ? ((short_GI_40) ? 1800 : 1620) : ((short_GI_20) ? 867 : 780);
- else if (MCS_rate[1] & BIT(3))
- max_rate = (bw_40MHz) ? ((short_GI_40) ? 1200 : 1080) : ((short_GI_20) ? 578 : 520);
- else if (MCS_rate[1] & BIT(2))
- max_rate = (bw_40MHz) ? ((short_GI_40) ? 900 : 810) : ((short_GI_20) ? 433 : 390);
- else if (MCS_rate[1] & BIT(1))
- max_rate = (bw_40MHz) ? ((short_GI_40) ? 600 : 540) : ((short_GI_20) ? 289 : 260);
- else if (MCS_rate[1] & BIT(0))
- max_rate = (bw_40MHz) ? ((short_GI_40) ? 300 : 270) : ((short_GI_20) ? 144 : 130);
- } else {
- if (MCS_rate[0] & BIT(7))
- max_rate = (bw_40MHz) ? ((short_GI_40) ? 1500 : 1350) : ((short_GI_20) ? 722 : 650);
- else if (MCS_rate[0] & BIT(6))
- max_rate = (bw_40MHz) ? ((short_GI_40) ? 1350 : 1215) : ((short_GI_20) ? 650 : 585);
- else if (MCS_rate[0] & BIT(5))
- max_rate = (bw_40MHz) ? ((short_GI_40) ? 1200 : 1080) : ((short_GI_20) ? 578 : 520);
- else if (MCS_rate[0] & BIT(4))
- max_rate = (bw_40MHz) ? ((short_GI_40) ? 900 : 810) : ((short_GI_20) ? 433 : 390);
- else if (MCS_rate[0] & BIT(3))
- max_rate = (bw_40MHz) ? ((short_GI_40) ? 600 : 540) : ((short_GI_20) ? 289 : 260);
- else if (MCS_rate[0] & BIT(2))
- max_rate = (bw_40MHz) ? ((short_GI_40) ? 450 : 405) : ((short_GI_20) ? 217 : 195);
- else if (MCS_rate[0] & BIT(1))
- max_rate = (bw_40MHz) ? ((short_GI_40) ? 300 : 270) : ((short_GI_20) ? 144 : 130);
- else if (MCS_rate[0] & BIT(0))
- max_rate = (bw_40MHz) ? ((short_GI_40) ? 150 : 135) : ((short_GI_20) ? 72 : 65);
- }
- }
+ if (MCS_rate[0] & BIT(7))
+ max_rate = (bw_40MHz) ? ((short_GI_40) ? 1500 : 1350) : ((short_GI_20) ? 722 : 650);
+ else if (MCS_rate[0] & BIT(6))
+ max_rate = (bw_40MHz) ? ((short_GI_40) ? 1350 : 1215) : ((short_GI_20) ? 650 : 585);
+ else if (MCS_rate[0] & BIT(5))
+ max_rate = (bw_40MHz) ? ((short_GI_40) ? 1200 : 1080) : ((short_GI_20) ? 578 : 520);
+ else if (MCS_rate[0] & BIT(4))
+ max_rate = (bw_40MHz) ? ((short_GI_40) ? 900 : 810) : ((short_GI_20) ? 433 : 390);
+ else if (MCS_rate[0] & BIT(3))
+ max_rate = (bw_40MHz) ? ((short_GI_40) ? 600 : 540) : ((short_GI_20) ? 289 : 260);
+ else if (MCS_rate[0] & BIT(2))
+ max_rate = (bw_40MHz) ? ((short_GI_40) ? 450 : 405) : ((short_GI_20) ? 217 : 195);
+ else if (MCS_rate[0] & BIT(1))
+ max_rate = (bw_40MHz) ? ((short_GI_40) ? 300 : 270) : ((short_GI_20) ? 144 : 130);
+ else if (MCS_rate[0] & BIT(0))
+ max_rate = (bw_40MHz) ? ((short_GI_40) ? 150 : 135) : ((short_GI_20) ? 72 : 65);
+
return max_rate;
}
diff --git a/drivers/staging/r8188eu/core/rtw_ioctl_set.c b/drivers/staging/r8188eu/core/rtw_ioctl_set.c
index 411b06e135be..eadfbdb94dd5 100644
--- a/drivers/staging/r8188eu/core/rtw_ioctl_set.c
+++ b/drivers/staging/r8188eu/core/rtw_ioctl_set.c
@@ -110,7 +110,7 @@ u8 rtw_set_802_11_bssid(struct adapter *padapter, u8 *bssid)
u32 cur_time = 0;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- DBG_88E_LEVEL(_drv_info_, "set bssid:%pM\n", bssid);
+ netdev_dbg(padapter->pnetdev, "set bssid:%pM\n", bssid);
if ((bssid[0] == 0x00 && bssid[1] == 0x00 && bssid[2] == 0x00 &&
bssid[3] == 0x00 && bssid[4] == 0x00 && bssid[5] == 0x00) ||
@@ -185,8 +185,8 @@ u8 rtw_set_802_11_ssid(struct adapter *padapter, struct ndis_802_11_ssid *ssid)
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct wlan_network *pnetwork = &pmlmepriv->cur_network;
- DBG_88E_LEVEL(_drv_info_, "set ssid [%s] fw_state=0x%08x\n",
- ssid->Ssid, get_fwstate(pmlmepriv));
+ netdev_dbg(padapter->pnetdev, "set ssid [%s] fw_state=0x%08x\n",
+ ssid->Ssid, get_fwstate(pmlmepriv));
if (!padapter->hw_init_completed) {
status = _FAIL;
@@ -458,7 +458,6 @@ u16 rtw_get_cur_max_rate(struct adapter *adapter)
struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
struct wlan_bssid_ex *pcur_bss = &pmlmepriv->cur_network.network;
struct ieee80211_ht_cap *pht_capie;
- u8 rf_type = 0;
u8 bw_40MHz = 0, short_GI_20 = 0, short_GI_40 = 0;
u16 mcs_rate = 0;
u32 ht_ielen = 0;
@@ -480,14 +479,10 @@ u16 rtw_get_cur_max_rate(struct adapter *adapter)
short_GI_20 = (le16_to_cpu(pmlmeinfo->HT_caps.u.HT_cap_element.HT_caps_info) & IEEE80211_HT_CAP_SGI_20) ? 1 : 0;
short_GI_40 = (le16_to_cpu(pmlmeinfo->HT_caps.u.HT_cap_element.HT_caps_info) & IEEE80211_HT_CAP_SGI_40) ? 1 : 0;
- GetHwReg8188EU(adapter, HW_VAR_RF_TYPE, (u8 *)(&rf_type));
- max_rate = rtw_mcs_rate(
- rf_type,
- bw_40MHz & (pregistrypriv->cbw40_enable),
- short_GI_20,
- short_GI_40,
- pmlmeinfo->HT_caps.u.HT_cap_element.MCS_rate
- );
+ max_rate = rtw_mcs_rate(bw_40MHz & (pregistrypriv->cbw40_enable),
+ short_GI_20,
+ short_GI_40,
+ pmlmeinfo->HT_caps.u.HT_cap_element.MCS_rate);
}
} else {
while ((pcur_bss->SupportedRates[i] != 0) && (pcur_bss->SupportedRates[i] != 0xFF)) {
diff --git a/drivers/staging/r8188eu/core/rtw_led.c b/drivers/staging/r8188eu/core/rtw_led.c
index 0e3453639a8b..ccd43accb7dc 100644
--- a/drivers/staging/r8188eu/core/rtw_led.c
+++ b/drivers/staging/r8188eu/core/rtw_led.c
@@ -3,25 +3,20 @@
#include "../include/drv_types.h"
#include "../include/rtw_led.h"
+#include "../include/rtl8188e_spec.h"
-void BlinkTimerCallback(struct timer_list *t)
-{
- struct LED_871x *pLed = from_timer(pLed, t, BlinkTimer);
- struct adapter *padapter = pLed->padapter;
+#define LED_BLINK_NO_LINK_INTVL msecs_to_jiffies(1000)
+#define LED_BLINK_LINK_INTVL msecs_to_jiffies(500)
+#define LED_BLINK_SCAN_INTVL msecs_to_jiffies(180)
+#define LED_BLINK_FASTER_INTVL msecs_to_jiffies(50)
+#define LED_BLINK_WPS_SUCESS_INTVL msecs_to_jiffies(5000)
- if ((padapter->bSurpriseRemoved) || (padapter->bDriverStopped))
- return;
+#define IS_LED_WPS_BLINKING(l) \
+ ((l)->CurrLedState == LED_BLINK_WPS || \
+ (l)->CurrLedState == LED_BLINK_WPS_STOP || \
+ (l)->bLedWPSBlinkInProgress)
- _set_workitem(&pLed->BlinkWorkItem);
-}
-
-void BlinkWorkItemCallback(struct work_struct *work)
-{
- struct LED_871x *pLed = container_of(work, struct LED_871x, BlinkWorkItem);
- BlinkHandler(pLed);
-}
-
-void ResetLedStatus(struct LED_871x *pLed)
+static void ResetLedStatus(struct LED_871x *pLed)
{
pLed->CurrLedState = RTW_LED_OFF; /* Current LED state. */
pLed->bLedOn = false; /* true if LED is ON, false if LED is OFF. */
@@ -34,39 +29,48 @@ void ResetLedStatus(struct LED_871x *pLed)
pLed->bLedNoLinkBlinkInProgress = false;
pLed->bLedLinkBlinkInProgress = false;
- pLed->bLedStartToLinkBlinkInProgress = false;
pLed->bLedScanBlinkInProgress = false;
}
-void InitLed871x(struct adapter *padapter, struct LED_871x *pLed, enum LED_PIN_871x LedPin)
+static void SwLedOn(struct adapter *padapter, struct LED_871x *pLed)
{
- pLed->padapter = padapter;
- pLed->LedPin = LedPin;
+ u8 LedCfg;
- ResetLedStatus(pLed);
+ if (padapter->bSurpriseRemoved || padapter->bDriverStopped)
+ return;
- timer_setup(&pLed->BlinkTimer, BlinkTimerCallback, 0);
- _init_workitem(&pLed->BlinkWorkItem, BlinkWorkItemCallback, pLed);
+ LedCfg = rtw_read8(padapter, REG_LEDCFG2);
+ rtw_write8(padapter, REG_LEDCFG2, (LedCfg & 0xf0) | BIT(5) | BIT(6)); /* SW control led0 on. */
+ pLed->bLedOn = true;
}
-void DeInitLed871x(struct LED_871x *pLed)
+static void SwLedOff(struct adapter *padapter, struct LED_871x *pLed)
{
- _cancel_workitem_sync(&pLed->BlinkWorkItem);
- _cancel_timer_ex(&pLed->BlinkTimer);
- ResetLedStatus(pLed);
+ u8 LedCfg;
+
+ if (padapter->bSurpriseRemoved || padapter->bDriverStopped)
+ goto exit;
+
+ LedCfg = rtw_read8(padapter, REG_LEDCFG2);/* 0x4E */
+
+ LedCfg &= 0x90; /* Set to software control. */
+ rtw_write8(padapter, REG_LEDCFG2, (LedCfg | BIT(3)));
+ LedCfg = rtw_read8(padapter, REG_MAC_PINMUX_CFG);
+ LedCfg &= 0xFE;
+ rtw_write8(padapter, REG_MAC_PINMUX_CFG, LedCfg);
+exit:
+ pLed->bLedOn = false;
}
-static void SwLedBlink1(struct LED_871x *pLed)
+static void blink_work(struct work_struct *work)
{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct LED_871x *pLed = container_of(dwork, struct LED_871x, blink_work);
struct adapter *padapter = pLed->padapter;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- u8 bStopBlinking = false;
- /* Change LED according to BlinkingLedState specified. */
- if (pLed->BlinkingLedState == RTW_LED_ON)
- SwLedOn(padapter, pLed);
- else
- SwLedOff(padapter, pLed);
+ if ((padapter->bSurpriseRemoved) || (padapter->bDriverStopped))
+ return;
if (padapter->pwrctrlpriv.rf_pwrstate != rf_on) {
SwLedOff(padapter, pLed);
@@ -74,81 +78,67 @@ static void SwLedBlink1(struct LED_871x *pLed)
return;
}
+ /* Change LED according to BlinkingLedState specified. */
+ if (pLed->BlinkingLedState == RTW_LED_ON)
+ SwLedOn(padapter, pLed);
+ else
+ SwLedOff(padapter, pLed);
+
switch (pLed->CurrLedState) {
case LED_BLINK_SLOWLY:
if (pLed->bLedOn)
pLed->BlinkingLedState = RTW_LED_OFF;
else
pLed->BlinkingLedState = RTW_LED_ON;
- _set_timer(&pLed->BlinkTimer, LED_BLINK_NO_LINK_INTERVAL_ALPHA);
+ schedule_delayed_work(&pLed->blink_work, LED_BLINK_NO_LINK_INTVL);
break;
case LED_BLINK_NORMAL:
if (pLed->bLedOn)
pLed->BlinkingLedState = RTW_LED_OFF;
else
pLed->BlinkingLedState = RTW_LED_ON;
- _set_timer(&pLed->BlinkTimer, LED_BLINK_LINK_INTERVAL_ALPHA);
+ schedule_delayed_work(&pLed->blink_work, LED_BLINK_LINK_INTVL);
break;
case LED_BLINK_SCAN:
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
pLed->BlinkTimes--;
- if (pLed->BlinkTimes == 0)
- bStopBlinking = true;
- if (bStopBlinking) {
+ if (pLed->BlinkTimes == 0) {
if (check_fwstate(pmlmepriv, _FW_LINKED)) {
pLed->bLedLinkBlinkInProgress = true;
pLed->CurrLedState = LED_BLINK_NORMAL;
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- _set_timer(&pLed->BlinkTimer, LED_BLINK_LINK_INTERVAL_ALPHA);
+ schedule_delayed_work(&pLed->blink_work, LED_BLINK_LINK_INTVL);
} else if (!check_fwstate(pmlmepriv, _FW_LINKED)) {
pLed->bLedNoLinkBlinkInProgress = true;
pLed->CurrLedState = LED_BLINK_SLOWLY;
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- _set_timer(&pLed->BlinkTimer, LED_BLINK_NO_LINK_INTERVAL_ALPHA);
+ schedule_delayed_work(&pLed->blink_work, LED_BLINK_NO_LINK_INTVL);
}
pLed->bLedScanBlinkInProgress = false;
} else {
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- _set_timer(&pLed->BlinkTimer, LED_BLINK_SCAN_INTERVAL_ALPHA);
+ schedule_delayed_work(&pLed->blink_work, LED_BLINK_SCAN_INTVL);
}
break;
case LED_BLINK_TXRX:
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
pLed->BlinkTimes--;
- if (pLed->BlinkTimes == 0)
- bStopBlinking = true;
- if (bStopBlinking) {
+ if (pLed->BlinkTimes == 0) {
if (check_fwstate(pmlmepriv, _FW_LINKED)) {
pLed->bLedLinkBlinkInProgress = true;
pLed->CurrLedState = LED_BLINK_NORMAL;
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- _set_timer(&pLed->BlinkTimer, LED_BLINK_LINK_INTERVAL_ALPHA);
+ schedule_delayed_work(&pLed->blink_work, LED_BLINK_LINK_INTVL);
} else if (!check_fwstate(pmlmepriv, _FW_LINKED)) {
pLed->bLedNoLinkBlinkInProgress = true;
pLed->CurrLedState = LED_BLINK_SLOWLY;
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- _set_timer(&pLed->BlinkTimer, LED_BLINK_NO_LINK_INTERVAL_ALPHA);
+ schedule_delayed_work(&pLed->blink_work, LED_BLINK_NO_LINK_INTVL);
}
pLed->bLedBlinkInProgress = false;
} else {
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- _set_timer(&pLed->BlinkTimer, LED_BLINK_FASTER_INTERVAL_ALPHA);
+ schedule_delayed_work(&pLed->blink_work, LED_BLINK_FASTER_INTVL);
}
break;
case LED_BLINK_WPS:
@@ -156,27 +146,22 @@ static void SwLedBlink1(struct LED_871x *pLed)
pLed->BlinkingLedState = RTW_LED_OFF;
else
pLed->BlinkingLedState = RTW_LED_ON;
- _set_timer(&pLed->BlinkTimer, LED_BLINK_SCAN_INTERVAL_ALPHA);
+ schedule_delayed_work(&pLed->blink_work, LED_BLINK_SCAN_INTVL);
break;
case LED_BLINK_WPS_STOP: /* WPS success */
- if (pLed->BlinkingLedState == RTW_LED_ON)
- bStopBlinking = false;
- else
- bStopBlinking = true;
-
- if (bStopBlinking) {
+ if (pLed->BlinkingLedState != RTW_LED_ON) {
pLed->bLedLinkBlinkInProgress = true;
pLed->CurrLedState = LED_BLINK_NORMAL;
if (pLed->bLedOn)
pLed->BlinkingLedState = RTW_LED_OFF;
else
pLed->BlinkingLedState = RTW_LED_ON;
- _set_timer(&pLed->BlinkTimer, LED_BLINK_LINK_INTERVAL_ALPHA);
+ schedule_delayed_work(&pLed->blink_work, LED_BLINK_LINK_INTVL);
pLed->bLedWPSBlinkInProgress = false;
} else {
pLed->BlinkingLedState = RTW_LED_OFF;
- _set_timer(&pLed->BlinkTimer, LED_BLINK_WPS_SUCESS_INTERVAL_ALPHA);
+ schedule_delayed_work(&pLed->blink_work, LED_BLINK_WPS_SUCESS_INTVL);
}
break;
default:
@@ -184,25 +169,56 @@ static void SwLedBlink1(struct LED_871x *pLed)
}
}
-static void SwLedControlMode1(struct adapter *padapter, enum LED_CTL_MODE LedAction)
+void rtl8188eu_InitSwLeds(struct adapter *padapter)
+{
+ struct led_priv *pledpriv = &padapter->ledpriv;
+ struct LED_871x *pLed = &pledpriv->SwLed0;
+
+ pLed->padapter = padapter;
+ ResetLedStatus(pLed);
+ INIT_DELAYED_WORK(&pLed->blink_work, blink_work);
+}
+
+void rtl8188eu_DeInitSwLeds(struct adapter *padapter)
+{
+ struct led_priv *ledpriv = &padapter->ledpriv;
+ struct LED_871x *pLed = &ledpriv->SwLed0;
+
+ cancel_delayed_work_sync(&pLed->blink_work);
+ ResetLedStatus(pLed);
+ SwLedOff(padapter, pLed);
+}
+
+void rtw_led_control(struct adapter *padapter, enum LED_CTL_MODE LedAction)
{
struct led_priv *ledpriv = &padapter->ledpriv;
+ struct registry_priv *registry_par;
struct LED_871x *pLed = &ledpriv->SwLed0;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ if ((padapter->bSurpriseRemoved) || (padapter->bDriverStopped) ||
+ (!padapter->hw_init_completed))
+ return;
+
+ if (!ledpriv->bRegUseLed)
+ return;
+
+ registry_par = &padapter->registrypriv;
+ if (!registry_par->led_enable)
+ return;
+
switch (LedAction) {
- case LED_CTL_POWER_ON:
case LED_CTL_START_TO_LINK:
case LED_CTL_NO_LINK:
if (!pLed->bLedNoLinkBlinkInProgress) {
if (pLed->CurrLedState == LED_BLINK_SCAN || IS_LED_WPS_BLINKING(pLed))
return;
if (pLed->bLedLinkBlinkInProgress) {
- _cancel_timer_ex(&pLed->BlinkTimer);
+ cancel_delayed_work(&pLed->blink_work);
pLed->bLedLinkBlinkInProgress = false;
}
if (pLed->bLedBlinkInProgress) {
- _cancel_timer_ex(&pLed->BlinkTimer);
+ cancel_delayed_work(&pLed->blink_work);
pLed->bLedBlinkInProgress = false;
}
@@ -212,7 +228,7 @@ static void SwLedControlMode1(struct adapter *padapter, enum LED_CTL_MODE LedAct
pLed->BlinkingLedState = RTW_LED_OFF;
else
pLed->BlinkingLedState = RTW_LED_ON;
- _set_timer(&pLed->BlinkTimer, LED_BLINK_NO_LINK_INTERVAL_ALPHA);
+ schedule_delayed_work(&pLed->blink_work, LED_BLINK_NO_LINK_INTVL);
}
break;
case LED_CTL_LINK:
@@ -220,11 +236,11 @@ static void SwLedControlMode1(struct adapter *padapter, enum LED_CTL_MODE LedAct
if (pLed->CurrLedState == LED_BLINK_SCAN || IS_LED_WPS_BLINKING(pLed))
return;
if (pLed->bLedNoLinkBlinkInProgress) {
- _cancel_timer_ex(&pLed->BlinkTimer);
+ cancel_delayed_work(&pLed->blink_work);
pLed->bLedNoLinkBlinkInProgress = false;
}
if (pLed->bLedBlinkInProgress) {
- _cancel_timer_ex(&pLed->BlinkTimer);
+ cancel_delayed_work(&pLed->blink_work);
pLed->bLedBlinkInProgress = false;
}
pLed->bLedLinkBlinkInProgress = true;
@@ -233,7 +249,7 @@ static void SwLedControlMode1(struct adapter *padapter, enum LED_CTL_MODE LedAct
pLed->BlinkingLedState = RTW_LED_OFF;
else
pLed->BlinkingLedState = RTW_LED_ON;
- _set_timer(&pLed->BlinkTimer, LED_BLINK_LINK_INTERVAL_ALPHA);
+ schedule_delayed_work(&pLed->blink_work, LED_BLINK_LINK_INTVL);
}
break;
case LED_CTL_SITE_SURVEY:
@@ -243,15 +259,15 @@ static void SwLedControlMode1(struct adapter *padapter, enum LED_CTL_MODE LedAct
if (IS_LED_WPS_BLINKING(pLed))
return;
if (pLed->bLedNoLinkBlinkInProgress) {
- _cancel_timer_ex(&pLed->BlinkTimer);
+ cancel_delayed_work(&pLed->blink_work);
pLed->bLedNoLinkBlinkInProgress = false;
}
if (pLed->bLedLinkBlinkInProgress) {
- _cancel_timer_ex(&pLed->BlinkTimer);
+ cancel_delayed_work(&pLed->blink_work);
pLed->bLedLinkBlinkInProgress = false;
}
if (pLed->bLedBlinkInProgress) {
- _cancel_timer_ex(&pLed->BlinkTimer);
+ cancel_delayed_work(&pLed->blink_work);
pLed->bLedBlinkInProgress = false;
}
pLed->bLedScanBlinkInProgress = true;
@@ -261,7 +277,7 @@ static void SwLedControlMode1(struct adapter *padapter, enum LED_CTL_MODE LedAct
pLed->BlinkingLedState = RTW_LED_OFF;
else
pLed->BlinkingLedState = RTW_LED_ON;
- _set_timer(&pLed->BlinkTimer, LED_BLINK_SCAN_INTERVAL_ALPHA);
+ schedule_delayed_work(&pLed->blink_work, LED_BLINK_SCAN_INTVL);
}
break;
case LED_CTL_TX:
@@ -270,11 +286,11 @@ static void SwLedControlMode1(struct adapter *padapter, enum LED_CTL_MODE LedAct
if (pLed->CurrLedState == LED_BLINK_SCAN || IS_LED_WPS_BLINKING(pLed))
return;
if (pLed->bLedNoLinkBlinkInProgress) {
- _cancel_timer_ex(&pLed->BlinkTimer);
+ cancel_delayed_work(&pLed->blink_work);
pLed->bLedNoLinkBlinkInProgress = false;
}
if (pLed->bLedLinkBlinkInProgress) {
- _cancel_timer_ex(&pLed->BlinkTimer);
+ cancel_delayed_work(&pLed->blink_work);
pLed->bLedLinkBlinkInProgress = false;
}
pLed->bLedBlinkInProgress = true;
@@ -284,26 +300,25 @@ static void SwLedControlMode1(struct adapter *padapter, enum LED_CTL_MODE LedAct
pLed->BlinkingLedState = RTW_LED_OFF;
else
pLed->BlinkingLedState = RTW_LED_ON;
- _set_timer(&pLed->BlinkTimer, LED_BLINK_FASTER_INTERVAL_ALPHA);
+ schedule_delayed_work(&pLed->blink_work, LED_BLINK_FASTER_INTVL);
}
break;
case LED_CTL_START_WPS: /* wait until xinpin finish */
- case LED_CTL_START_WPS_BOTTON:
if (!pLed->bLedWPSBlinkInProgress) {
if (pLed->bLedNoLinkBlinkInProgress) {
- _cancel_timer_ex(&pLed->BlinkTimer);
+ cancel_delayed_work(&pLed->blink_work);
pLed->bLedNoLinkBlinkInProgress = false;
}
if (pLed->bLedLinkBlinkInProgress) {
- _cancel_timer_ex(&pLed->BlinkTimer);
+ cancel_delayed_work(&pLed->blink_work);
pLed->bLedLinkBlinkInProgress = false;
}
if (pLed->bLedBlinkInProgress) {
- _cancel_timer_ex(&pLed->BlinkTimer);
+ cancel_delayed_work(&pLed->blink_work);
pLed->bLedBlinkInProgress = false;
}
if (pLed->bLedScanBlinkInProgress) {
- _cancel_timer_ex(&pLed->BlinkTimer);
+ cancel_delayed_work(&pLed->blink_work);
pLed->bLedScanBlinkInProgress = false;
}
pLed->bLedWPSBlinkInProgress = true;
@@ -312,42 +327,42 @@ static void SwLedControlMode1(struct adapter *padapter, enum LED_CTL_MODE LedAct
pLed->BlinkingLedState = RTW_LED_OFF;
else
pLed->BlinkingLedState = RTW_LED_ON;
- _set_timer(&pLed->BlinkTimer, LED_BLINK_SCAN_INTERVAL_ALPHA);
+ schedule_delayed_work(&pLed->blink_work, LED_BLINK_SCAN_INTVL);
}
break;
case LED_CTL_STOP_WPS:
if (pLed->bLedNoLinkBlinkInProgress) {
- _cancel_timer_ex(&pLed->BlinkTimer);
+ cancel_delayed_work(&pLed->blink_work);
pLed->bLedNoLinkBlinkInProgress = false;
}
if (pLed->bLedLinkBlinkInProgress) {
- _cancel_timer_ex(&pLed->BlinkTimer);
+ cancel_delayed_work(&pLed->blink_work);
pLed->bLedLinkBlinkInProgress = false;
}
if (pLed->bLedBlinkInProgress) {
- _cancel_timer_ex(&pLed->BlinkTimer);
+ cancel_delayed_work(&pLed->blink_work);
pLed->bLedBlinkInProgress = false;
}
if (pLed->bLedScanBlinkInProgress) {
- _cancel_timer_ex(&pLed->BlinkTimer);
+ cancel_delayed_work(&pLed->blink_work);
pLed->bLedScanBlinkInProgress = false;
}
if (pLed->bLedWPSBlinkInProgress)
- _cancel_timer_ex(&pLed->BlinkTimer);
+ cancel_delayed_work(&pLed->blink_work);
else
pLed->bLedWPSBlinkInProgress = true;
pLed->CurrLedState = LED_BLINK_WPS_STOP;
if (pLed->bLedOn) {
pLed->BlinkingLedState = RTW_LED_OFF;
- _set_timer(&pLed->BlinkTimer, LED_BLINK_WPS_SUCESS_INTERVAL_ALPHA);
+ schedule_delayed_work(&pLed->blink_work, LED_BLINK_WPS_SUCESS_INTVL);
} else {
pLed->BlinkingLedState = RTW_LED_ON;
- _set_timer(&pLed->BlinkTimer, 0);
+ schedule_delayed_work(&pLed->blink_work, 0);
}
break;
case LED_CTL_STOP_WPS_FAIL:
if (pLed->bLedWPSBlinkInProgress) {
- _cancel_timer_ex(&pLed->BlinkTimer);
+ cancel_delayed_work(&pLed->blink_work);
pLed->bLedWPSBlinkInProgress = false;
}
pLed->bLedNoLinkBlinkInProgress = true;
@@ -356,29 +371,29 @@ static void SwLedControlMode1(struct adapter *padapter, enum LED_CTL_MODE LedAct
pLed->BlinkingLedState = RTW_LED_OFF;
else
pLed->BlinkingLedState = RTW_LED_ON;
- _set_timer(&pLed->BlinkTimer, LED_BLINK_NO_LINK_INTERVAL_ALPHA);
+ schedule_delayed_work(&pLed->blink_work, LED_BLINK_NO_LINK_INTVL);
break;
case LED_CTL_POWER_OFF:
pLed->CurrLedState = RTW_LED_OFF;
pLed->BlinkingLedState = RTW_LED_OFF;
if (pLed->bLedNoLinkBlinkInProgress) {
- _cancel_timer_ex(&pLed->BlinkTimer);
+ cancel_delayed_work(&pLed->blink_work);
pLed->bLedNoLinkBlinkInProgress = false;
}
if (pLed->bLedLinkBlinkInProgress) {
- _cancel_timer_ex(&pLed->BlinkTimer);
+ cancel_delayed_work(&pLed->blink_work);
pLed->bLedLinkBlinkInProgress = false;
}
if (pLed->bLedBlinkInProgress) {
- _cancel_timer_ex(&pLed->BlinkTimer);
+ cancel_delayed_work(&pLed->blink_work);
pLed->bLedBlinkInProgress = false;
}
if (pLed->bLedWPSBlinkInProgress) {
- _cancel_timer_ex(&pLed->BlinkTimer);
+ cancel_delayed_work(&pLed->blink_work);
pLed->bLedWPSBlinkInProgress = false;
}
if (pLed->bLedScanBlinkInProgress) {
- _cancel_timer_ex(&pLed->BlinkTimer);
+ cancel_delayed_work(&pLed->blink_work);
pLed->bLedScanBlinkInProgress = false;
}
SwLedOff(padapter, pLed);
@@ -387,41 +402,3 @@ static void SwLedControlMode1(struct adapter *padapter, enum LED_CTL_MODE LedAct
break;
}
}
-
-void BlinkHandler(struct LED_871x *pLed)
-{
- struct adapter *padapter = pLed->padapter;
-
- if ((padapter->bSurpriseRemoved) || (padapter->bDriverStopped))
- return;
-
- SwLedBlink1(pLed);
-}
-
-void LedControl8188eu(struct adapter *padapter, enum LED_CTL_MODE LedAction)
-{
- struct led_priv *ledpriv = &padapter->ledpriv;
- struct registry_priv *registry_par;
-
- if ((padapter->bSurpriseRemoved) || (padapter->bDriverStopped) ||
- (!padapter->hw_init_completed))
- return;
-
- if (!ledpriv->bRegUseLed)
- return;
-
- registry_par = &padapter->registrypriv;
- if (!registry_par->led_enable)
- return;
-
- if ((padapter->pwrctrlpriv.rf_pwrstate != rf_on &&
- padapter->pwrctrlpriv.rfoff_reason > RF_CHANGE_BY_PS) &&
- (LedAction == LED_CTL_TX || LedAction == LED_CTL_RX ||
- LedAction == LED_CTL_SITE_SURVEY ||
- LedAction == LED_CTL_LINK ||
- LedAction == LED_CTL_NO_LINK ||
- LedAction == LED_CTL_POWER_ON))
- return;
-
- SwLedControlMode1(padapter, LedAction);
-}
diff --git a/drivers/staging/r8188eu/core/rtw_mlme.c b/drivers/staging/r8188eu/core/rtw_mlme.c
index 8d14aff32f61..394e8a5ce03c 100644
--- a/drivers/staging/r8188eu/core/rtw_mlme.c
+++ b/drivers/staging/r8188eu/core/rtw_mlme.c
@@ -913,7 +913,7 @@ static struct sta_info *rtw_joinbss_update_stainfo(struct adapter *padapter, str
psta->aid = pnetwork->join_res;
psta->mac_id = 0;
/* sta mode */
- rtl8188e_SetHalODMVar(padapter, HAL_ODM_STA_INFO, psta, true);
+ rtl8188e_SetHalODMVar(padapter, psta, true);
/* security related */
if (padapter->securitypriv.dot11AuthAlgrthm == dot11AuthAlgrthm_8021X) {
padapter->securitypriv.binstallGrpkey = false;
@@ -1198,7 +1198,7 @@ void rtw_stassoc_event_callback(struct adapter *adapter, u8 *pbuf)
psta->mac_id = (uint)pstassoc->cam_id;
DBG_88E("%s\n", __func__);
/* for ad-hoc mode */
- rtl8188e_SetHalODMVar(adapter, HAL_ODM_STA_INFO, psta, true);
+ rtl8188e_SetHalODMVar(adapter, psta, true);
rtw_sta_media_status_rpt(adapter, psta, 1);
if (adapter->securitypriv.dot11AuthAlgrthm == dot11AuthAlgrthm_8021X)
psta->dot118021XPrivacy = adapter->securitypriv.dot11PrivacyAlgrthm;
@@ -1999,17 +1999,11 @@ void rtw_update_ht_cap(struct adapter *padapter, u8 *pie, uint ie_len)
(le16_to_cpu(pmlmeinfo->HT_caps.u.HT_cap_element.HT_caps_info) & BIT(1)) &&
(pmlmeinfo->HT_info.infos[0] & BIT(2))) {
int i;
- u8 rf_type;
-
- GetHwReg8188EU(padapter, HW_VAR_RF_TYPE, (u8 *)(&rf_type));
/* update the MCS rates */
- for (i = 0; i < 16; i++) {
- if ((rf_type == RF_1T1R) || (rf_type == RF_1T2R))
- pmlmeinfo->HT_caps.u.HT_cap_element.MCS_rate[i] &= MCS_rate_1R[i];
- else
- pmlmeinfo->HT_caps.u.HT_cap_element.MCS_rate[i] &= MCS_rate_2R[i];
- }
+ for (i = 0; i < 16; i++)
+ pmlmeinfo->HT_caps.u.HT_cap_element.MCS_rate[i] &= MCS_rate_1R[i];
+
/* switch to the 40M Hz mode according to the AP */
pmlmeext->cur_bwmode = HT_CHANNEL_WIDTH_40;
switch ((pmlmeinfo->HT_info.infos[0] & 0x3)) {
diff --git a/drivers/staging/r8188eu/core/rtw_mlme_ext.c b/drivers/staging/r8188eu/core/rtw_mlme_ext.c
index b4820ad2cee7..a9141ab1690e 100644
--- a/drivers/staging/r8188eu/core/rtw_mlme_ext.c
+++ b/drivers/staging/r8188eu/core/rtw_mlme_ext.c
@@ -10,7 +10,6 @@
#include "../include/wlan_bssdef.h"
#include "../include/mlme_osdep.h"
#include "../include/recv_osdep.h"
-#include "../include/rtl8188e_sreset.h"
#include "../include/rtl8188e_xmit.h"
#include "../include/rtl8188e_dm.h"
@@ -77,7 +76,7 @@ unsigned char MCS_rate_1R[16] = {0xff, 0x00, 0x0, 0x0, 0x01, 0x0, 0x0, 0x0, 0x0,
/********************************************************
ChannelPlan definitions
*********************************************************/
-static struct rt_channel_plan_2g RTW_ChannelPlan2G[RT_CHANNEL_DOMAIN_2G_MAX] = {
+static struct rt_channel_plan RTW_ChannelPlan2G[RT_CHANNEL_DOMAIN_2G_MAX] = {
{{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13}, 13}, /* 0x00, RT_CHANNEL_DOMAIN_2G_WORLD , Passive scan CH 12, 13 */
{{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13}, 13}, /* 0x01, RT_CHANNEL_DOMAIN_2G_ETSI1 */
{{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, 11}, /* 0x02, RT_CHANNEL_DOMAIN_2G_FCC1 */
@@ -252,7 +251,8 @@ static void init_mlme_ext_priv_value(struct adapter *padapter)
static int has_channel(struct rt_channel_info *channel_set,
u8 chanset_size,
- u8 chan) {
+ u8 chan)
+{
int i;
for (i = 0; i < chanset_size; i++) {
@@ -264,7 +264,8 @@ static int has_channel(struct rt_channel_info *channel_set,
static void init_channel_list(struct adapter *padapter, struct rt_channel_info *channel_set,
u8 chanset_size,
- struct p2p_channels *channel_list) {
+ struct p2p_channels *channel_list)
+{
struct p2p_oper_class_map op_class[] = {
{ IEEE80211G, 81, 1, 13, 1, BW20 },
{ IEEE80211G, 82, 14, 14, 1, BW20 },
@@ -655,9 +656,11 @@ unsigned int OnBeacon(struct adapter *padapter, struct recv_frame *precv_frame)
if (psta) {
ret = rtw_check_bcn_info(padapter, pframe, len);
if (!ret) {
- DBG_88E_LEVEL(_drv_info_, "ap has changed, disconnect now\n ");
- receive_disconnect(padapter, pmlmeinfo->network.MacAddress, 0);
- return _SUCCESS;
+ netdev_dbg(padapter->pnetdev,
+ "ap has changed, disconnect now\n");
+ receive_disconnect(padapter,
+ pmlmeinfo->network.MacAddress, 0);
+ return _SUCCESS;
}
/* update WMM, ERP in the beacon */
/* todo: the timer is used instead of the number of the beacon received */
@@ -931,7 +934,7 @@ unsigned int OnAuthClient(struct adapter *padapter, struct recv_frame *precv_fra
}
if (go2asoc) {
- DBG_88E_LEVEL(_drv_info_, "auth success, start assoc\n");
+ netdev_dbg(padapter->pnetdev, "auth success, start assoc\n");
start_clnt_assoc(padapter);
return _SUCCESS;
}
@@ -1503,8 +1506,9 @@ unsigned int OnDeAuth(struct adapter *padapter, struct recv_frame *precv_frame)
struct sta_info *psta;
struct sta_priv *pstapriv = &padapter->stapriv;
- DBG_88E_LEVEL(_drv_always_, "ap recv deauth reason code(%d) sta:%pM\n",
- reason, GetAddr2Ptr(pframe));
+ netdev_dbg(padapter->pnetdev,
+ "ap recv deauth reason code(%d) sta:%pM\n",
+ reason, GetAddr2Ptr(pframe));
psta = rtw_get_stainfo(pstapriv, GetAddr2Ptr(pframe));
if (psta) {
@@ -1540,8 +1544,9 @@ unsigned int OnDeAuth(struct adapter *padapter, struct recv_frame *precv_frame)
}
}
- DBG_88E_LEVEL(_drv_always_, "sta recv deauth reason code(%d) sta:%pM, ignore = %d\n",
- reason, GetAddr3Ptr(pframe), ignore_received_deauth);
+ netdev_dbg(padapter->pnetdev,
+ "sta recv deauth reason code(%d) sta:%pM, ignore = %d\n",
+ reason, GetAddr3Ptr(pframe), ignore_received_deauth);
if (!ignore_received_deauth)
receive_disconnect(padapter, GetAddr3Ptr(pframe), reason);
@@ -1576,8 +1581,9 @@ unsigned int OnDisassoc(struct adapter *padapter, struct recv_frame *precv_frame
struct sta_info *psta;
struct sta_priv *pstapriv = &padapter->stapriv;
- DBG_88E_LEVEL(_drv_always_, "ap recv disassoc reason code(%d) sta:%pM\n",
- reason, GetAddr2Ptr(pframe));
+ netdev_dbg(padapter->pnetdev,
+ "ap recv disassoc reason code(%d) sta:%pM\n",
+ reason, GetAddr2Ptr(pframe));
psta = rtw_get_stainfo(pstapriv, GetAddr2Ptr(pframe));
if (psta) {
@@ -1596,8 +1602,9 @@ unsigned int OnDisassoc(struct adapter *padapter, struct recv_frame *precv_frame
return _SUCCESS;
} else {
- DBG_88E_LEVEL(_drv_always_, "ap recv disassoc reason code(%d) sta:%pM\n",
- reason, GetAddr3Ptr(pframe));
+ netdev_dbg(padapter->pnetdev,
+ "ap recv disassoc reason code(%d) sta:%pM\n",
+ reason, GetAddr3Ptr(pframe));
receive_disconnect(padapter, GetAddr3Ptr(pframe), reason);
}
@@ -5062,7 +5069,7 @@ void issue_assocreq(struct adapter *padapter)
__le16 *fctrl;
__le16 le_tmp;
unsigned int i, j, ie_len, index = 0;
- unsigned char rf_type, bssrate[NumRates], sta_bssrate[NumRates];
+ unsigned char bssrate[NumRates], sta_bssrate[NumRates];
struct ndis_802_11_var_ie *pIE;
struct registry_priv *pregpriv = &padapter->registrypriv;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
@@ -5187,25 +5194,10 @@ void issue_assocreq(struct adapter *padapter)
/* todo: disable SM power save mode */
pmlmeinfo->HT_caps.u.HT_cap_element.HT_caps_info |= cpu_to_le16(0x000c);
- GetHwReg8188EU(padapter, HW_VAR_RF_TYPE, (u8 *)(&rf_type));
- switch (rf_type) {
- case RF_1T1R:
- if (pregpriv->rx_stbc)
- pmlmeinfo->HT_caps.u.HT_cap_element.HT_caps_info |= cpu_to_le16(0x0100);/* RX STBC One spatial stream */
- memcpy(pmlmeinfo->HT_caps.u.HT_cap_element.MCS_rate, MCS_rate_1R, 16);
- break;
- case RF_2T2R:
- case RF_1T2R:
- default:
- if ((pregpriv->rx_stbc == 0x3) ||/* enable for 2.4/5 GHz */
- ((pmlmeext->cur_wireless_mode & WIRELESS_11_24N) && (pregpriv->rx_stbc == 0x1)) || /* enable for 2.4GHz */
- (pregpriv->wifi_spec == 1)) {
- DBG_88E("declare supporting RX STBC\n");
- pmlmeinfo->HT_caps.u.HT_cap_element.HT_caps_info |= cpu_to_le16(0x0200);/* RX STBC two spatial stream */
- }
- memcpy(pmlmeinfo->HT_caps.u.HT_cap_element.MCS_rate, MCS_rate_2R, 16);
- break;
- }
+ if (pregpriv->rx_stbc)
+ pmlmeinfo->HT_caps.u.HT_cap_element.HT_caps_info |= cpu_to_le16(0x0100);/* RX STBC One spatial stream */
+ memcpy(pmlmeinfo->HT_caps.u.HT_cap_element.MCS_rate, MCS_rate_1R, 16);
+
pframe = rtw_set_ie(pframe, _HT_CAPABILITY_IE_, ie_len, (u8 *)(&pmlmeinfo->HT_caps), &pattrib->pktlen);
}
}
@@ -6493,7 +6485,7 @@ void start_clnt_auth(struct adapter *padapter)
/* For the Win8 P2P connection, it will be hard to have a successful connection if this Wi-Fi doesn't connect to it. */
issue_deauth(padapter, (&pmlmeinfo->network)->MacAddress, WLAN_REASON_DEAUTH_LEAVING);
- DBG_88E_LEVEL(_drv_info_, "start auth\n");
+ netdev_dbg(padapter->pnetdev, "start auth\n");
issue_auth(padapter, NULL, 0);
set_link_timer(pmlmeext, REAUTH_TO);
@@ -7132,8 +7124,7 @@ void mlmeext_sta_del_event_callback(struct adapter *padapter)
Following are the functions for the timer handlers
*****************************************************************************/
-void _linked_rx_signal_strehgth_display(struct adapter *padapter);
-void _linked_rx_signal_strehgth_display(struct adapter *padapter)
+static void _linked_rx_signal_strength_display(struct adapter *padapter)
{
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
@@ -7167,6 +7158,23 @@ static u8 chk_ap_is_alive(struct adapter *padapter, struct sta_info *psta)
return ret;
}
+static void rtl8188e_sreset_linked_status_check(struct adapter *padapter)
+{
+ u32 rx_dma_status = rtw_read32(padapter, REG_RXDMA_STATUS);
+ u8 fw_status;
+
+ if (rx_dma_status != 0x00) {
+ DBG_88E("%s REG_RXDMA_STATUS:0x%08x\n", __func__, rx_dma_status);
+ rtw_write32(padapter, REG_RXDMA_STATUS, rx_dma_status);
+ }
+
+ fw_status = rtw_read8(padapter, REG_FMETHR);
+ if (fw_status == 1)
+ DBG_88E("%s REG_FW_STATUS (0x%02x), Read_Efuse_Fail !!\n", __func__, fw_status);
+ else if (fw_status == 2)
+ DBG_88E("%s REG_FW_STATUS (0x%02x), Condition_No_Match !!\n", __func__, fw_status);
+}
+
void linked_status_chk(struct adapter *padapter)
{
u32 i;
@@ -7177,7 +7185,7 @@ void linked_status_chk(struct adapter *padapter)
struct sta_priv *pstapriv = &padapter->stapriv;
if (padapter->bRxRSSIDisplay)
- _linked_rx_signal_strehgth_display(padapter);
+ _linked_rx_signal_strength_display(padapter);
rtl8188e_sreset_linked_status_check(padapter);
@@ -7238,8 +7246,8 @@ void linked_status_chk(struct adapter *padapter)
if (rx_chk == _FAIL) {
pmlmeext->retry++;
if (pmlmeext->retry > rx_chk_limit) {
- DBG_88E_LEVEL(_drv_always_, FUNC_ADPT_FMT" disconnect or roaming\n",
- FUNC_ADPT_ARG(padapter));
+ netdev_dbg(padapter->pnetdev,
+ "disconnect or roaming\n");
receive_disconnect(padapter, pmlmeinfo->network.MacAddress,
WLAN_REASON_EXPIRATION_CHK);
return;
@@ -7764,8 +7772,9 @@ u8 setkey_hdl(struct adapter *padapter, u8 *pbuf)
/* write cam */
ctrl = BIT(15) | ((pparm->algorithm) << 2) | pparm->keyid;
- DBG_88E_LEVEL(_drv_info_, "set group key to hw: alg:%d(WEP40-1 WEP104-5 TKIP-2 AES-4) "
- "keyid:%d\n", pparm->algorithm, pparm->keyid);
+ netdev_dbg(padapter->pnetdev,
+ "set group key to hw: alg:%d(WEP40-1 WEP104-5 TKIP-2 AES-4) keyid:%d\n",
+ pparm->algorithm, pparm->keyid);
write_cam(padapter, pparm->keyid, ctrl, null_sta, pparm->key);
return H2C_SUCCESS;
@@ -7794,8 +7803,9 @@ u8 set_stakey_hdl(struct adapter *padapter, u8 *pbuf)
cam_id = 4;
- DBG_88E_LEVEL(_drv_info_, "set pairwise key to hw: alg:%d(WEP40-1 WEP104-5 TKIP-2 AES-4) camid:%d\n",
- pparm->algorithm, cam_id);
+ netdev_dbg(padapter->pnetdev,
+ "set pairwise key to hw: alg:%d(WEP40-1 WEP104-5 TKIP-2 AES-4) camid:%d\n",
+ pparm->algorithm, cam_id);
if ((pmlmeinfo->state & 0x03) == WIFI_FW_AP_STATE) {
struct sta_info *psta;
struct sta_priv *pstapriv = &padapter->stapriv;
diff --git a/drivers/staging/r8188eu/core/rtw_p2p.c b/drivers/staging/r8188eu/core/rtw_p2p.c
index b265b5e46851..7b30b9b64b41 100644
--- a/drivers/staging/r8188eu/core/rtw_p2p.c
+++ b/drivers/staging/r8188eu/core/rtw_p2p.c
@@ -1806,8 +1806,6 @@ void reset_global_wifidirect_info(struct adapter *padapter)
pwdinfo = &padapter->wdinfo;
pwdinfo->persistent_supported = 0;
pwdinfo->session_available = true;
- pwdinfo->wfd_tdls_enable = 0;
- pwdinfo->wfd_tdls_weaksec = 0;
}
void rtw_init_wifidirect_timers(struct adapter *padapter)
@@ -1912,7 +1910,6 @@ void init_wifidirect_info(struct adapter *padapter, enum P2P_ROLE role)
memset(pwdinfo->rx_prov_disc_info.strconfig_method_desc_of_prov_disc_req, 0x00, 4);
memset(pwdinfo->rx_prov_disc_info.strconfig_method_desc_of_prov_disc_req, '0', 3);
memset(&pwdinfo->groupid_info, 0x00, sizeof(struct group_id_info));
- pwdinfo->wfd_tdls_enable = 0;
memset(pwdinfo->p2p_peer_interface_addr, 0x00, ETH_ALEN);
memset(pwdinfo->p2p_peer_device_addr, 0x00, ETH_ALEN);
@@ -1944,7 +1941,6 @@ int rtw_p2p_enable(struct adapter *padapter, enum P2P_ROLE role)
/* Enable P2P function */
init_wifidirect_info(padapter, role);
- rtl8188e_SetHalODMVar(padapter, HAL_ODM_P2P_STATE, NULL, true);
} else if (role == P2P_ROLE_DISABLE) {
if (_FAIL == rtw_pwr_wakeup(padapter)) {
ret = _FAIL;
@@ -1963,8 +1959,6 @@ int rtw_p2p_enable(struct adapter *padapter, enum P2P_ROLE role)
memset(&pwdinfo->rx_prov_disc_info, 0x00, sizeof(struct rx_provdisc_req_info));
}
- rtl8188e_SetHalODMVar(padapter, HAL_ODM_P2P_STATE, NULL, false);
-
/* Restore to initial setting. */
update_tx_basic_rate(padapter, padapter->registrypriv.wireless_mode);
}
diff --git a/drivers/staging/r8188eu/core/rtw_pwrctrl.c b/drivers/staging/r8188eu/core/rtw_pwrctrl.c
index 5d595cf2a47e..46e44aee587f 100644
--- a/drivers/staging/r8188eu/core/rtw_pwrctrl.c
+++ b/drivers/staging/r8188eu/core/rtw_pwrctrl.c
@@ -15,9 +15,12 @@ void ips_enter(struct adapter *padapter)
if (pxmit_priv->free_xmitbuf_cnt != NR_XMITBUFF ||
pxmit_priv->free_xmit_extbuf_cnt != NR_XMIT_EXTBUFF) {
- DBG_88E_LEVEL(_drv_info_, "There are some pkts to transmit\n");
- DBG_88E_LEVEL(_drv_info_, "free_xmitbuf_cnt: %d, free_xmit_extbuf_cnt: %d\n",
- pxmit_priv->free_xmitbuf_cnt, pxmit_priv->free_xmit_extbuf_cnt);
+ netdev_dbg(padapter->pnetdev,
+ "There are some pkts to transmit\n");
+ netdev_dbg(padapter->pnetdev,
+ "free_xmitbuf_cnt: %d, free_xmit_extbuf_cnt: %d\n",
+ pxmit_priv->free_xmitbuf_cnt,
+ pxmit_priv->free_xmit_extbuf_cnt);
return;
}
@@ -32,7 +35,7 @@ void ips_enter(struct adapter *padapter)
DBG_88E("==>ips_enter cnts:%d\n", pwrpriv->ips_enter_cnts);
if (rf_off == pwrpriv->change_rfpwrstate) {
pwrpriv->bpower_saving = true;
- DBG_88E_LEVEL(_drv_info_, "nolinked power save enter\n");
+ netdev_dbg(padapter->pnetdev, "nolinked power save enter\n");
if (pwrpriv->ips_mode == IPS_LEVEL_2)
pwrpriv->bkeepfwalive = true;
@@ -65,7 +68,7 @@ int ips_leave(struct adapter *padapter)
if (result == _SUCCESS) {
pwrpriv->rf_pwrstate = rf_on;
}
- DBG_88E_LEVEL(_drv_info_, "nolinked power save leave\n");
+ netdev_dbg(padapter->pnetdev, "nolinked power save leave\n");
if ((_WEP40_ == psecuritypriv->dot11PrivacyAlgrthm) || (_WEP104_ == psecuritypriv->dot11PrivacyAlgrthm)) {
DBG_88E("==>%s, channel(%d), processing(%x)\n", __func__, padapter->mlmeextpriv.cur_channel, pwrpriv->bips_processing);
@@ -348,7 +351,6 @@ void rtw_init_pwrctrl_priv(struct adapter *padapter)
pwrctrlpriv->pwr_state_check_interval = RTW_PWR_STATE_CHK_INTERVAL;
pwrctrlpriv->pwr_state_check_cnts = 0;
- pwrctrlpriv->bInternalAutoSuspend = false;
pwrctrlpriv->bInSuspend = false;
pwrctrlpriv->bkeepfwalive = false;
@@ -393,7 +395,7 @@ int _rtw_pwr_wakeup(struct adapter *padapter, u32 ips_deffer_ms, const char *cal
}
/* System suspend is not allowed to wakeup */
- if ((!pwrpriv->bInternalAutoSuspend) && pwrpriv->bInSuspend) {
+ if (pwrpriv->bInSuspend) {
while (pwrpriv->bInSuspend &&
(rtw_get_passing_time_ms(start) <= 3000 ||
(rtw_get_passing_time_ms(start) <= 500)))
@@ -404,12 +406,6 @@ int _rtw_pwr_wakeup(struct adapter *padapter, u32 ips_deffer_ms, const char *cal
DBG_88E("%s wait bInSuspend done\n", __func__);
}
- /* block??? */
- if ((pwrpriv->bInternalAutoSuspend) && (padapter->net_closed)) {
- ret = _FAIL;
- goto exit;
- }
-
/* I think this should be check in IPS, LPS, autosuspend functions... */
if (check_fwstate(pmlmepriv, _FW_LINKED)) {
ret = _SUCCESS;
diff --git a/drivers/staging/r8188eu/core/rtw_rf.c b/drivers/staging/r8188eu/core/rtw_rf.c
index 2ec56012516e..e704092d31d0 100644
--- a/drivers/staging/r8188eu/core/rtw_rf.c
+++ b/drivers/staging/r8188eu/core/rtw_rf.c
@@ -35,7 +35,7 @@ static struct ch_freq ch_freq_map[] = {
{216, 5080},/* Japan, means J16 */
};
-static int ch_freq_map_num = (sizeof(ch_freq_map) / sizeof(struct ch_freq));
+static int ch_freq_map_num = ARRAY_SIZE(ch_freq_map);
u32 rtw_ch2freq(u32 channel)
{
diff --git a/drivers/staging/r8188eu/core/rtw_security.c b/drivers/staging/r8188eu/core/rtw_security.c
index db35f326bbb1..4e93c720c1b6 100644
--- a/drivers/staging/r8188eu/core/rtw_security.c
+++ b/drivers/staging/r8188eu/core/rtw_security.c
@@ -545,7 +545,8 @@ u32 rtw_tkip_decrypt(struct adapter *padapter, struct recv_frame *precvframe)
if (is_multicast_ether_addr(prxattrib->ra)) {
if (!psecuritypriv->binstallGrpkey) {
res = _FAIL;
- DBG_88E("%s:rx bc/mc packets, but didn't install group key!!!!!!!!!!\n", __func__);
+ netdev_dbg(padapter->pnetdev,
+ "rx bc/mc packets, but didn't install group key!\n");
goto exit;
}
prwskey = psecuritypriv->dot118021XGrpKey[prxattrib->key_index].skey;
@@ -1145,7 +1146,7 @@ u32 rtw_aes_encrypt(struct adapter *padapter, struct xmit_frame *pxmitframe)
return res;
}
-static int aes_decipher(u8 *key, uint hdrlen,
+static int aes_decipher(struct adapter *padapter, u8 *key, uint hdrlen,
u8 *pframe, uint plen)
{
static u8 message[MAX_MSG_SIZE];
@@ -1329,8 +1330,10 @@ static int aes_decipher(u8 *key, uint hdrlen,
/* compare the mic */
for (i = 0; i < 8; i++) {
if (pframe[hdrlen + 8 + plen - 8 + i] != message[hdrlen + 8 + plen - 8 + i]) {
- DBG_88E("aes_decipher:mic check error mic[%d]: pframe(%x)!=message(%x)\n",
- i, pframe[hdrlen + 8 + plen - 8 + i], message[hdrlen + 8 + plen - 8 + i]);
+ netdev_dbg(padapter->pnetdev,
+ "mic check error mic[%d]: pframe(%x)!=message(%x)\n",
+ i, pframe[hdrlen + 8 + plen - 8 + i],
+ message[hdrlen + 8 + plen - 8 + i]);
res = _FAIL;
}
}
@@ -1358,13 +1361,16 @@ u32 rtw_aes_decrypt(struct adapter *padapter, struct recv_frame *precvframe)
/* in concurrent we should use sw descrypt in group key, so we remove this message */
if (!psecuritypriv->binstallGrpkey) {
res = _FAIL;
- DBG_88E("%s:rx bc/mc packets, but didn't install group key!!!!!!!!!!\n", __func__);
+ netdev_dbg(padapter->pnetdev,
+ "rx bc/mc packets, but didn't install group key!\n");
goto exit;
}
prwskey = psecuritypriv->dot118021XGrpKey[prxattrib->key_index].skey;
if (psecuritypriv->dot118021XGrpKeyid != prxattrib->key_index) {
- DBG_88E("not match packet_index=%d, install_index=%d\n",
- prxattrib->key_index, psecuritypriv->dot118021XGrpKeyid);
+ netdev_dbg(padapter->pnetdev,
+ "not match packet_index=%d, install_index=%d\n",
+ prxattrib->key_index,
+ psecuritypriv->dot118021XGrpKeyid);
res = _FAIL;
goto exit;
}
@@ -1372,7 +1378,7 @@ u32 rtw_aes_decrypt(struct adapter *padapter, struct recv_frame *precvframe)
prwskey = &stainfo->dot118021x_UncstKey.skey[0];
}
length = precvframe->len - prxattrib->hdrlen - prxattrib->iv_len;
- res = aes_decipher(prwskey, prxattrib->hdrlen, pframe, length);
+ res = aes_decipher(padapter, prwskey, prxattrib->hdrlen, pframe, length);
} else {
res = _FAIL;
}
diff --git a/drivers/staging/r8188eu/core/rtw_sta_mgt.c b/drivers/staging/r8188eu/core/rtw_sta_mgt.c
index a3d4d5d8a785..54561ff239a0 100644
--- a/drivers/staging/r8188eu/core/rtw_sta_mgt.c
+++ b/drivers/staging/r8188eu/core/rtw_sta_mgt.c
@@ -310,7 +310,7 @@ u32 rtw_free_stainfo(struct adapter *padapter, struct sta_info *psta)
}
if (!(psta->state & WIFI_AP_STATE))
- rtl8188e_SetHalODMVar(padapter, HAL_ODM_STA_INFO, psta, false);
+ rtl8188e_SetHalODMVar(padapter, psta, false);
spin_lock_bh(&pstapriv->auth_list_lock);
if (!list_empty(&psta->auth_list)) {
diff --git a/drivers/staging/r8188eu/core/rtw_wlan_util.c b/drivers/staging/r8188eu/core/rtw_wlan_util.c
index 6d4e21a16783..d40669c21fc1 100644
--- a/drivers/staging/r8188eu/core/rtw_wlan_util.c
+++ b/drivers/staging/r8188eu/core/rtw_wlan_util.c
@@ -695,7 +695,6 @@ static void bwmode_update_check(struct adapter *padapter, struct ndis_802_11_var
void HT_caps_handler(struct adapter *padapter, struct ndis_802_11_var_ie *pIE)
{
unsigned int i;
- u8 rf_type;
u8 max_AMPDU_len, min_MPDU_spacing;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
@@ -716,29 +715,19 @@ void HT_caps_handler(struct adapter *padapter, struct ndis_802_11_var_ie *pIE)
pmlmeinfo->HT_caps.u.HT_cap[i] &= (pIE->data[i]);
} else {
/* modify from fw by Thomas 2010/11/17 */
- if ((pmlmeinfo->HT_caps.u.HT_cap_element.AMPDU_para & 0x3) > (pIE->data[i] & 0x3))
- max_AMPDU_len = (pIE->data[i] & 0x3);
- else
- max_AMPDU_len = (pmlmeinfo->HT_caps.u.HT_cap_element.AMPDU_para & 0x3);
+ max_AMPDU_len = min(pmlmeinfo->HT_caps.u.HT_cap_element.AMPDU_para & 0x3,
+ pIE->data[i] & 0x3);
- if ((pmlmeinfo->HT_caps.u.HT_cap_element.AMPDU_para & 0x1c) > (pIE->data[i] & 0x1c))
- min_MPDU_spacing = (pmlmeinfo->HT_caps.u.HT_cap_element.AMPDU_para & 0x1c);
- else
- min_MPDU_spacing = (pIE->data[i] & 0x1c);
+ min_MPDU_spacing = max(pmlmeinfo->HT_caps.u.HT_cap_element.AMPDU_para & 0x1c,
+ pIE->data[i] & 0x1c);
pmlmeinfo->HT_caps.u.HT_cap_element.AMPDU_para = max_AMPDU_len | min_MPDU_spacing;
}
}
- GetHwReg8188EU(padapter, HW_VAR_RF_TYPE, (u8 *)(&rf_type));
-
/* update the MCS rates */
- for (i = 0; i < 16; i++) {
- if ((rf_type == RF_1T1R) || (rf_type == RF_1T2R))
- pmlmeinfo->HT_caps.u.HT_cap_element.MCS_rate[i] &= MCS_rate_1R[i];
- else
- pmlmeinfo->HT_caps.u.HT_cap_element.MCS_rate[i] &= MCS_rate_2R[i];
- }
+ for (i = 0; i < 16; i++)
+ pmlmeinfo->HT_caps.u.HT_cap_element.MCS_rate[i] &= MCS_rate_1R[i];
}
void HT_info_handler(struct adapter *padapter, struct ndis_802_11_var_ie *pIE)
diff --git a/drivers/staging/r8188eu/core/rtw_xmit.c b/drivers/staging/r8188eu/core/rtw_xmit.c
index 0c033a077bf9..8503059edc46 100644
--- a/drivers/staging/r8188eu/core/rtw_xmit.c
+++ b/drivers/staging/r8188eu/core/rtw_xmit.c
@@ -461,7 +461,7 @@ static s32 update_attrib(struct adapter *padapter, struct sk_buff *pkt, struct p
}
}
} else if (0x888e == pattrib->ether_type) {
- DBG_88E_LEVEL(_drv_info_, "send eapol packet\n");
+ netdev_dbg(padapter->pnetdev, "send eapol packet\n");
}
if ((pattrib->ether_type == 0x888e) || (pattrib->dhcp_pkt == 1))
diff --git a/drivers/staging/r8188eu/hal/Hal8188EPwrSeq.c b/drivers/staging/r8188eu/hal/Hal8188EPwrSeq.c
index 48ede610cd28..6505e1fcb070 100644
--- a/drivers/staging/r8188eu/hal/Hal8188EPwrSeq.c
+++ b/drivers/staging/r8188eu/hal/Hal8188EPwrSeq.c
@@ -4,66 +4,42 @@
#include "../include/Hal8188EPwrSeq.h"
#include "../include/rtl8188e_hal.h"
-/*
- drivers should parse below arrays and do the corresponding actions
-*/
-/* 3 Power on Array */
-struct wl_pwr_cfg rtl8188E_power_on_flow[RTL8188E_TRANS_CARDEMU_TO_ACT_STEPS + RTL8188E_TRANS_END_STEPS] = {
- RTL8188E_TRANS_CARDEMU_TO_ACT
- RTL8188E_TRANS_END
-};
-
-/* 3Radio off Array */
-struct wl_pwr_cfg rtl8188E_radio_off_flow[RTL8188E_TRANS_ACT_TO_CARDEMU_STEPS + RTL8188E_TRANS_END_STEPS] = {
- RTL8188E_TRANS_ACT_TO_CARDEMU
- RTL8188E_TRANS_END
-};
-
-/* 3Card Disable Array */
-struct wl_pwr_cfg rtl8188E_card_disable_flow[RTL8188E_TRANS_ACT_TO_CARDEMU_STEPS + RTL8188E_TRANS_CARDEMU_TO_PDN_STEPS + RTL8188E_TRANS_END_STEPS] = {
- RTL8188E_TRANS_ACT_TO_CARDEMU
- RTL8188E_TRANS_CARDEMU_TO_CARDDIS
- RTL8188E_TRANS_END
-};
-
-/* 3 Card Enable Array */
-struct wl_pwr_cfg rtl8188E_card_enable_flow[RTL8188E_TRANS_ACT_TO_CARDEMU_STEPS + RTL8188E_TRANS_CARDEMU_TO_PDN_STEPS + RTL8188E_TRANS_END_STEPS] = {
- RTL8188E_TRANS_CARDDIS_TO_CARDEMU
- RTL8188E_TRANS_CARDEMU_TO_ACT
- RTL8188E_TRANS_END
-};
-
-/* 3Suspend Array */
-struct wl_pwr_cfg rtl8188E_suspend_flow[RTL8188E_TRANS_ACT_TO_CARDEMU_STEPS + RTL8188E_TRANS_CARDEMU_TO_SUS_STEPS + RTL8188E_TRANS_END_STEPS] = {
- RTL8188E_TRANS_ACT_TO_CARDEMU
- RTL8188E_TRANS_CARDEMU_TO_SUS
- RTL8188E_TRANS_END
-};
-
-/* 3 Resume Array */
-struct wl_pwr_cfg rtl8188E_resume_flow[RTL8188E_TRANS_ACT_TO_CARDEMU_STEPS + RTL8188E_TRANS_CARDEMU_TO_SUS_STEPS + RTL8188E_TRANS_END_STEPS] = {
- RTL8188E_TRANS_SUS_TO_CARDEMU
- RTL8188E_TRANS_CARDEMU_TO_ACT
- RTL8188E_TRANS_END
-};
-
-/* 3HWPDN Array */
-struct wl_pwr_cfg rtl8188E_hwpdn_flow[RTL8188E_TRANS_ACT_TO_CARDEMU_STEPS + RTL8188E_TRANS_CARDEMU_TO_PDN_STEPS + RTL8188E_TRANS_END_STEPS] = {
- RTL8188E_TRANS_ACT_TO_CARDEMU
- RTL8188E_TRANS_CARDEMU_TO_PDN
- RTL8188E_TRANS_END
-};
-
-/* 3 Enter LPS */
-struct wl_pwr_cfg rtl8188E_enter_lps_flow[RTL8188E_TRANS_ACT_TO_LPS_STEPS + RTL8188E_TRANS_END_STEPS] = {
- /* FW behavior */
- RTL8188E_TRANS_ACT_TO_LPS
- RTL8188E_TRANS_END
-};
-
-/* 3 Leave LPS */
-struct wl_pwr_cfg rtl8188E_leave_lps_flow[RTL8188E_TRANS_LPS_TO_ACT_STEPS + RTL8188E_TRANS_END_STEPS] = {
- /* FW behavior */
- RTL8188E_TRANS_LPS_TO_ACT
- RTL8188E_TRANS_END
+struct wl_pwr_cfg rtl8188E_power_on_flow[] = {
+ { 0x0006, PWR_CMD_POLLING, BIT(1), BIT(1) },
+ { 0x0002, PWR_CMD_WRITE, BIT(0) | BIT(1), 0 }, /* reset BB */
+ { 0x0026, PWR_CMD_WRITE, BIT(7), BIT(7) }, /* schmitt trigger */
+ { 0x0005, PWR_CMD_WRITE, BIT(7), 0 }, /* disable HWPDN (control by DRV)*/
+ { 0x0005, PWR_CMD_WRITE, BIT(4) | BIT(3), 0 }, /* disable WL suspend*/
+ { 0x0005, PWR_CMD_WRITE, BIT(0), BIT(0) },
+ { 0x0005, PWR_CMD_POLLING, BIT(0), 0 },
+ { 0x0023, PWR_CMD_WRITE, BIT(4), 0 },
+ { 0xFFFF, PWR_CMD_END, 0, 0 },
+};
+
+struct wl_pwr_cfg rtl8188E_card_disable_flow[] = {
+ { 0x001F, PWR_CMD_WRITE, 0xFF, 0 }, /* turn off RF */
+ { 0x0023, PWR_CMD_WRITE, BIT(4), BIT(4) }, /* LDO Sleep mode */
+ { 0x0005, PWR_CMD_WRITE, BIT(1), BIT(1) }, /* turn off MAC by HW state machine */
+ { 0x0005, PWR_CMD_POLLING, BIT(1), 0 },
+ { 0x0026, PWR_CMD_WRITE, BIT(7), BIT(7) }, /* schmitt trigger */
+ { 0x0005, PWR_CMD_WRITE, BIT(3) | BIT(4), BIT(3) }, /* enable WL suspend */
+ { 0x0007, PWR_CMD_WRITE, 0xFF, 0 }, /* enable bandgap mbias in suspend */
+ { 0x0041, PWR_CMD_WRITE, BIT(4), 0 }, /* Clear SIC_EN register */
+ { 0xfe10, PWR_CMD_WRITE, BIT(4), BIT(4) }, /* Set USB suspend enable local register */
+ { 0xFFFF, PWR_CMD_END, 0, 0 },
+};
+
+/* This is used by driver for LPSRadioOff Procedure, not for FW LPS Step */
+struct wl_pwr_cfg rtl8188E_enter_lps_flow[] = {
+ { 0x0522, PWR_CMD_WRITE, 0xFF, 0x7F },/* Tx Pause */
+ { 0x05F8, PWR_CMD_POLLING, 0xFF, 0 }, /* Should be zero if no packet is transmitted */
+ { 0x05F9, PWR_CMD_POLLING, 0xFF, 0 }, /* Should be zero if no packet is transmitted */
+ { 0x05FA, PWR_CMD_POLLING, 0xFF, 0 }, /* Should be zero if no packet is transmitted */
+ { 0x05FB, PWR_CMD_POLLING, 0xFF, 0 }, /* Should be zero if no packet is transmitted */
+ { 0x0002, PWR_CMD_WRITE, BIT(0), 0 }, /* CCK and OFDM are disabled, clocks are gated */
+ { 0x0002, PWR_CMD_DELAY, 0, PWRSEQ_DELAY_US },
+ { 0x0100, PWR_CMD_WRITE, 0xFF, 0x3F }, /* Reset MAC TRX */
+ { 0x0101, PWR_CMD_WRITE, BIT(1), 0 }, /* check if removed later */
+ { 0x0553, PWR_CMD_WRITE, BIT(5), BIT(5) }, /* Respond TxOK to scheduler */
+ { 0xFFFF, PWR_CMD_END, 0, 0 },
};
diff --git a/drivers/staging/r8188eu/hal/Hal8188ERateAdaptive.c b/drivers/staging/r8188eu/hal/Hal8188ERateAdaptive.c
index 2d351f831289..57e8f5573846 100644
--- a/drivers/staging/r8188eu/hal/Hal8188ERateAdaptive.c
+++ b/drivers/staging/r8188eu/hal/Hal8188ERateAdaptive.c
@@ -1,20 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (c) Realtek Semiconductor Corp.
-Module Name:
- RateAdaptive.c
+/* Copyright (c) Realtek Semiconductor Corp. */
-Abstract:
- Implement Rate Adaptive functions for common operations.
-
-Major Change History:
- When Who What
- ---------- --------------- -------------------------------
- 2011-08-12 Page Create.
-
---*/
-#include "../include/odm_precomp.h"
-
-/* Rate adaptive parameters */
+#include "../include/drv_types.h"
static u8 RETRY_PENALTY[PERENTRY][RETRYSIZE + 1] = {
{5, 4, 3, 2, 0, 3}, /* 92 , idx = 0 */
@@ -316,19 +303,19 @@ static int odm_ARFBRefresh_8188E(struct odm_dm_struct *dm_odm, struct odm_ra_inf
pRaInfo->RAUseRate = (pRaInfo->RateMask) & 0x0000000d;
break;
case 12:
- MaskFromReg = ODM_Read4Byte(dm_odm, REG_ARFR0);
+ MaskFromReg = rtw_read32(dm_odm->Adapter, REG_ARFR0);
pRaInfo->RAUseRate = (pRaInfo->RateMask) & MaskFromReg;
break;
case 13:
- MaskFromReg = ODM_Read4Byte(dm_odm, REG_ARFR1);
+ MaskFromReg = rtw_read32(dm_odm->Adapter, REG_ARFR1);
pRaInfo->RAUseRate = (pRaInfo->RateMask) & MaskFromReg;
break;
case 14:
- MaskFromReg = ODM_Read4Byte(dm_odm, REG_ARFR2);
+ MaskFromReg = rtw_read32(dm_odm->Adapter, REG_ARFR2);
pRaInfo->RAUseRate = (pRaInfo->RateMask) & MaskFromReg;
break;
case 15:
- MaskFromReg = ODM_Read4Byte(dm_odm, REG_ARFR3);
+ MaskFromReg = rtw_read32(dm_odm->Adapter, REG_ARFR3);
pRaInfo->RAUseRate = (pRaInfo->RateMask) & MaskFromReg;
break;
default:
@@ -590,7 +577,7 @@ void ODM_RA_SetRSSI_8188E(struct odm_dm_struct *dm_odm, u8 macid, u8 Rssi)
void ODM_RA_Set_TxRPT_Time(struct odm_dm_struct *dm_odm, u16 minRptTime)
{
- ODM_Write2Byte(dm_odm, REG_TX_RPT_TIME, minRptTime);
+ rtw_write16(dm_odm->Adapter, REG_TX_RPT_TIME, minRptTime);
}
void ODM_RA_TxRPT2Handle_8188E(struct odm_dm_struct *dm_odm, u8 *TxRPT_Buf, u16 TxRPT_Len, u32 macid_entry0, u32 macid_entry1)
diff --git a/drivers/staging/r8188eu/hal/HalHWImg8188E_BB.c b/drivers/staging/r8188eu/hal/HalHWImg8188E_BB.c
index f6e4243e0c7b..e7f834b02567 100644
--- a/drivers/staging/r8188eu/hal/HalHWImg8188E_BB.c
+++ b/drivers/staging/r8188eu/hal/HalHWImg8188E_BB.c
@@ -1,7 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-#include "../include/odm_precomp.h"
#include "../include/rtw_iol.h"
#define read_next_pair(array, v1, v2, i) \
diff --git a/drivers/staging/r8188eu/hal/HalHWImg8188E_MAC.c b/drivers/staging/r8188eu/hal/HalHWImg8188E_MAC.c
index b4c55863d3fb..20ce1571fc26 100644
--- a/drivers/staging/r8188eu/hal/HalHWImg8188E_MAC.c
+++ b/drivers/staging/r8188eu/hal/HalHWImg8188E_MAC.c
@@ -1,7 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-#include "../include/odm_precomp.h"
#include "../include/rtw_iol.h"
static bool Checkcondition(const u32 condition, const u32 hex)
diff --git a/drivers/staging/r8188eu/hal/HalHWImg8188E_RF.c b/drivers/staging/r8188eu/hal/HalHWImg8188E_RF.c
index 5e0a96200078..9dc888a66d09 100644
--- a/drivers/staging/r8188eu/hal/HalHWImg8188E_RF.c
+++ b/drivers/staging/r8188eu/hal/HalHWImg8188E_RF.c
@@ -1,7 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-#include "../include/odm_precomp.h"
#include "../include/rtw_iol.h"
static bool CheckCondition(const u32 Condition, const u32 Hex)
diff --git a/drivers/staging/r8188eu/hal/HalPhyRf_8188e.c b/drivers/staging/r8188eu/hal/HalPhyRf_8188e.c
index 60d4ba275196..21ecc90a558c 100644
--- a/drivers/staging/r8188eu/hal/HalPhyRf_8188e.c
+++ b/drivers/staging/r8188eu/hal/HalPhyRf_8188e.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-#include "../include/odm_precomp.h"
+#include "../include/drv_types.h"
/*---------------------------Define Local Constant---------------------------*/
/* 2010/04/25 MH Define the max tx power tracking tx agc power. */
@@ -98,7 +98,7 @@ odm_TXPowerTrackingCallback_ThermalMeter_8188E(
struct adapter *Adapter
)
{
- struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *pHalData = &Adapter->haldata;
u8 ThermalValue = 0, delta, delta_LCK, delta_IQK, offset;
u8 ThermalValue_AVG_count = 0;
u32 ThermalValue_AVG = 0;
@@ -137,7 +137,7 @@ odm_TXPowerTrackingCallback_ThermalMeter_8188E(
/* <Kordan> RFCalibrateInfo.RegA24 will be initialized when ODM HW configuring, but MP configures with para files. */
dm_odm->RFCalibrateInfo.RegA24 = 0x090e1317;
- ThermalValue = (u8)ODM_GetRFReg(dm_odm, RF_PATH_A, RF_T_METER_88E, 0xfc00); /* 0x42: RF Reg[15:10] 88E */
+ ThermalValue = (u8)rtl8188e_PHY_QueryRFReg(Adapter, RF_PATH_A, RF_T_METER_88E, 0xfc00); /* 0x42: RF Reg[15:10] 88E */
if (is2t)
rf = 2;
@@ -146,7 +146,7 @@ odm_TXPowerTrackingCallback_ThermalMeter_8188E(
if (ThermalValue) {
/* Query OFDM path A default setting */
- ele_D = ODM_GetBBReg(dm_odm, rOFDM0_XATxIQImbalance, bMaskDWord) & bMaskOFDM_D;
+ ele_D = rtl8188e_PHY_QueryBBReg(Adapter, rOFDM0_XATxIQImbalance, bMaskDWord) & bMaskOFDM_D;
for (i = 0; i < OFDM_TABLE_SIZE_92D; i++) { /* find the index */
if (ele_D == (OFDMSwingTable[i] & bMaskOFDM_D)) {
OFDM_index_old[0] = (u8)i;
@@ -157,7 +157,7 @@ odm_TXPowerTrackingCallback_ThermalMeter_8188E(
/* Query OFDM path B default setting */
if (is2t) {
- ele_D = ODM_GetBBReg(dm_odm, rOFDM0_XBTxIQImbalance, bMaskDWord) & bMaskOFDM_D;
+ ele_D = rtl8188e_PHY_QueryBBReg(Adapter, rOFDM0_XBTxIQImbalance, bMaskDWord) & bMaskOFDM_D;
for (i = 0; i < OFDM_TABLE_SIZE_92D; i++) { /* find the index */
if (ele_D == (OFDMSwingTable[i] & bMaskOFDM_D)) {
OFDM_index_old[1] = (u8)i;
@@ -171,13 +171,13 @@ odm_TXPowerTrackingCallback_ThermalMeter_8188E(
for (i = 0; i < CCK_TABLE_SIZE; i++) {
if (dm_odm->RFCalibrateInfo.bCCKinCH14) {
- if (ODM_CompareMemory(dm_odm, (void *)&TempCCk, (void *)&CCKSwingTable_Ch14[i][2], 4) == 0) {
+ if (memcmp((void *)&TempCCk, (void *)&CCKSwingTable_Ch14[i][2], 4)) {
CCK_index_old = (u8)i;
dm_odm->BbSwingIdxCckBase = (u8)i;
break;
}
} else {
- if (ODM_CompareMemory(dm_odm, (void *)&TempCCk, (void *)&CCKSwingTable_Ch1_Ch13[i][2], 4) == 0) {
+ if (memcmp((void *)&TempCCk, (void *)&CCKSwingTable_Ch1_Ch13[i][2], 4)) {
CCK_index_old = (u8)i;
dm_odm->BbSwingIdxCckBase = (u8)i;
break;
@@ -329,17 +329,17 @@ odm_TXPowerTrackingCallback_ThermalMeter_8188E(
/* wtite new elements A, C, D to regC88 and regC9C, element B is always 0 */
value32 = (ele_D << 22) | ((ele_C & 0x3F) << 16) | ele_A;
- ODM_SetBBReg(dm_odm, rOFDM0_XBTxIQImbalance, bMaskDWord, value32);
+ rtl8188e_PHY_SetBBReg(Adapter, rOFDM0_XBTxIQImbalance, bMaskDWord, value32);
value32 = (ele_C & 0x000003C0) >> 6;
- ODM_SetBBReg(dm_odm, rOFDM0_XDTxAFE, bMaskH4Bits, value32);
+ rtl8188e_PHY_SetBBReg(Adapter, rOFDM0_XDTxAFE, bMaskH4Bits, value32);
value32 = ((X * ele_D) >> 7) & 0x01;
- ODM_SetBBReg(dm_odm, rOFDM0_ECCAThreshold, BIT(28), value32);
+ rtl8188e_PHY_SetBBReg(Adapter, rOFDM0_ECCAThreshold, BIT(28), value32);
} else {
- ODM_SetBBReg(dm_odm, rOFDM0_XBTxIQImbalance, bMaskDWord, OFDMSwingTable[(u8)OFDM_index[1]]);
- ODM_SetBBReg(dm_odm, rOFDM0_XDTxAFE, bMaskH4Bits, 0x00);
- ODM_SetBBReg(dm_odm, rOFDM0_ECCAThreshold, BIT(28), 0x00);
+ rtl8188e_PHY_SetBBReg(Adapter, rOFDM0_XBTxIQImbalance, bMaskDWord, OFDMSwingTable[(u8)OFDM_index[1]]);
+ rtl8188e_PHY_SetBBReg(Adapter, rOFDM0_XDTxAFE, bMaskH4Bits, 0x00);
+ rtl8188e_PHY_SetBBReg(Adapter, rOFDM0_ECCAThreshold, BIT(28), 0x00);
}
}
}
@@ -361,35 +361,33 @@ odm_TXPowerTrackingCallback_ThermalMeter_8188E(
#define IQK_DELAY_TIME 1 /* ms */
static u8 /* bit0 = 1 => Tx OK, bit1 = 1 => Rx OK */
-phy_PathA_IQK_8188E(struct adapter *adapt, bool configPathB)
+phy_PathA_IQK_8188E(struct adapter *adapt)
{
u32 regeac, regE94, regE9C;
u8 result = 0x00;
- struct hal_data_8188e *pHalData = GET_HAL_DATA(adapt);
- struct odm_dm_struct *dm_odm = &pHalData->odmpriv;
/* 1 Tx IQK */
/* path-A IQK setting */
- ODM_SetBBReg(dm_odm, rTx_IQK_Tone_A, bMaskDWord, 0x10008c1c);
- ODM_SetBBReg(dm_odm, rRx_IQK_Tone_A, bMaskDWord, 0x30008c1c);
- ODM_SetBBReg(dm_odm, rTx_IQK_PI_A, bMaskDWord, 0x8214032a);
- ODM_SetBBReg(dm_odm, rRx_IQK_PI_A, bMaskDWord, 0x28160000);
+ rtl8188e_PHY_SetBBReg(adapt, rTx_IQK_Tone_A, bMaskDWord, 0x10008c1c);
+ rtl8188e_PHY_SetBBReg(adapt, rRx_IQK_Tone_A, bMaskDWord, 0x30008c1c);
+ rtl8188e_PHY_SetBBReg(adapt, rTx_IQK_PI_A, bMaskDWord, 0x8214032a);
+ rtl8188e_PHY_SetBBReg(adapt, rRx_IQK_PI_A, bMaskDWord, 0x28160000);
/* LO calibration setting */
- ODM_SetBBReg(dm_odm, rIQK_AGC_Rsp, bMaskDWord, 0x00462911);
+ rtl8188e_PHY_SetBBReg(adapt, rIQK_AGC_Rsp, bMaskDWord, 0x00462911);
/* One shot, path A LOK & IQK */
- ODM_SetBBReg(dm_odm, rIQK_AGC_Pts, bMaskDWord, 0xf9000000);
- ODM_SetBBReg(dm_odm, rIQK_AGC_Pts, bMaskDWord, 0xf8000000);
+ rtl8188e_PHY_SetBBReg(adapt, rIQK_AGC_Pts, bMaskDWord, 0xf9000000);
+ rtl8188e_PHY_SetBBReg(adapt, rIQK_AGC_Pts, bMaskDWord, 0xf8000000);
/* delay x ms */
/* PlatformStallExecution(IQK_DELAY_TIME_88E*1000); */
- ODM_delay_ms(IQK_DELAY_TIME_88E);
+ mdelay(IQK_DELAY_TIME_88E);
/* Check failed */
- regeac = ODM_GetBBReg(dm_odm, rRx_Power_After_IQK_A_2, bMaskDWord);
- regE94 = ODM_GetBBReg(dm_odm, rTx_Power_Before_IQK_A, bMaskDWord);
- regE9C = ODM_GetBBReg(dm_odm, rTx_Power_After_IQK_A, bMaskDWord);
+ regeac = rtl8188e_PHY_QueryBBReg(adapt, rRx_Power_After_IQK_A_2, bMaskDWord);
+ regE94 = rtl8188e_PHY_QueryBBReg(adapt, rTx_Power_Before_IQK_A, bMaskDWord);
+ regE9C = rtl8188e_PHY_QueryBBReg(adapt, rTx_Power_After_IQK_A, bMaskDWord);
if (!(regeac & BIT(28)) &&
(((regE94 & 0x03FF0000) >> 16) != 0x142) &&
@@ -399,51 +397,49 @@ phy_PathA_IQK_8188E(struct adapter *adapt, bool configPathB)
}
static u8 /* bit0 = 1 => Tx OK, bit1 = 1 => Rx OK */
-phy_PathA_RxIQK(struct adapter *adapt, bool configPathB)
+phy_PathA_RxIQK(struct adapter *adapt)
{
u32 regeac, regE94, regE9C, regEA4, u4tmp;
u8 result = 0x00;
- struct hal_data_8188e *pHalData = GET_HAL_DATA(adapt);
- struct odm_dm_struct *dm_odm = &pHalData->odmpriv;
/* 1 Get TXIMR setting */
/* modify RXIQK mode table */
- ODM_SetBBReg(dm_odm, rFPGA0_IQK, bMaskDWord, 0x00000000);
- ODM_SetRFReg(dm_odm, RF_PATH_A, RF_WE_LUT, bRFRegOffsetMask, 0x800a0);
- ODM_SetRFReg(dm_odm, RF_PATH_A, RF_RCK_OS, bRFRegOffsetMask, 0x30000);
- ODM_SetRFReg(dm_odm, RF_PATH_A, RF_TXPA_G1, bRFRegOffsetMask, 0x0000f);
- ODM_SetRFReg(dm_odm, RF_PATH_A, RF_TXPA_G2, bRFRegOffsetMask, 0xf117B);
+ rtl8188e_PHY_SetBBReg(adapt, rFPGA0_IQK, bMaskDWord, 0x00000000);
+ rtl8188e_PHY_SetRFReg(adapt, RF_PATH_A, RF_WE_LUT, bRFRegOffsetMask, 0x800a0);
+ rtl8188e_PHY_SetRFReg(adapt, RF_PATH_A, RF_RCK_OS, bRFRegOffsetMask, 0x30000);
+ rtl8188e_PHY_SetRFReg(adapt, RF_PATH_A, RF_TXPA_G1, bRFRegOffsetMask, 0x0000f);
+ rtl8188e_PHY_SetRFReg(adapt, RF_PATH_A, RF_TXPA_G2, bRFRegOffsetMask, 0xf117B);
/* PA,PAD off */
- ODM_SetRFReg(dm_odm, RF_PATH_A, 0xdf, bRFRegOffsetMask, 0x980);
- ODM_SetRFReg(dm_odm, RF_PATH_A, 0x56, bRFRegOffsetMask, 0x51000);
+ rtl8188e_PHY_SetRFReg(adapt, RF_PATH_A, 0xdf, bRFRegOffsetMask, 0x980);
+ rtl8188e_PHY_SetRFReg(adapt, RF_PATH_A, 0x56, bRFRegOffsetMask, 0x51000);
- ODM_SetBBReg(dm_odm, rFPGA0_IQK, bMaskDWord, 0x80800000);
+ rtl8188e_PHY_SetBBReg(adapt, rFPGA0_IQK, bMaskDWord, 0x80800000);
/* IQK setting */
- ODM_SetBBReg(dm_odm, rTx_IQK, bMaskDWord, 0x01007c00);
- ODM_SetBBReg(dm_odm, rRx_IQK, bMaskDWord, 0x81004800);
+ rtl8188e_PHY_SetBBReg(adapt, rTx_IQK, bMaskDWord, 0x01007c00);
+ rtl8188e_PHY_SetBBReg(adapt, rRx_IQK, bMaskDWord, 0x81004800);
/* path-A IQK setting */
- ODM_SetBBReg(dm_odm, rTx_IQK_Tone_A, bMaskDWord, 0x10008c1c);
- ODM_SetBBReg(dm_odm, rRx_IQK_Tone_A, bMaskDWord, 0x30008c1c);
- ODM_SetBBReg(dm_odm, rTx_IQK_PI_A, bMaskDWord, 0x82160c1f);
- ODM_SetBBReg(dm_odm, rRx_IQK_PI_A, bMaskDWord, 0x28160000);
+ rtl8188e_PHY_SetBBReg(adapt, rTx_IQK_Tone_A, bMaskDWord, 0x10008c1c);
+ rtl8188e_PHY_SetBBReg(adapt, rRx_IQK_Tone_A, bMaskDWord, 0x30008c1c);
+ rtl8188e_PHY_SetBBReg(adapt, rTx_IQK_PI_A, bMaskDWord, 0x82160c1f);
+ rtl8188e_PHY_SetBBReg(adapt, rRx_IQK_PI_A, bMaskDWord, 0x28160000);
/* LO calibration setting */
- ODM_SetBBReg(dm_odm, rIQK_AGC_Rsp, bMaskDWord, 0x0046a911);
+ rtl8188e_PHY_SetBBReg(adapt, rIQK_AGC_Rsp, bMaskDWord, 0x0046a911);
/* One shot, path A LOK & IQK */
- ODM_SetBBReg(dm_odm, rIQK_AGC_Pts, bMaskDWord, 0xf9000000);
- ODM_SetBBReg(dm_odm, rIQK_AGC_Pts, bMaskDWord, 0xf8000000);
+ rtl8188e_PHY_SetBBReg(adapt, rIQK_AGC_Pts, bMaskDWord, 0xf9000000);
+ rtl8188e_PHY_SetBBReg(adapt, rIQK_AGC_Pts, bMaskDWord, 0xf8000000);
/* delay x ms */
- ODM_delay_ms(IQK_DELAY_TIME_88E);
+ mdelay(IQK_DELAY_TIME_88E);
/* Check failed */
- regeac = ODM_GetBBReg(dm_odm, rRx_Power_After_IQK_A_2, bMaskDWord);
- regE94 = ODM_GetBBReg(dm_odm, rTx_Power_Before_IQK_A, bMaskDWord);
- regE9C = ODM_GetBBReg(dm_odm, rTx_Power_After_IQK_A, bMaskDWord);
+ regeac = rtl8188e_PHY_QueryBBReg(adapt, rRx_Power_After_IQK_A_2, bMaskDWord);
+ regE94 = rtl8188e_PHY_QueryBBReg(adapt, rTx_Power_Before_IQK_A, bMaskDWord);
+ regE9C = rtl8188e_PHY_QueryBBReg(adapt, rTx_Power_After_IQK_A, bMaskDWord);
if (!(regeac & BIT(28)) &&
(((regE94 & 0x03FF0000) >> 16) != 0x142) &&
@@ -453,46 +449,46 @@ phy_PathA_RxIQK(struct adapter *adapt, bool configPathB)
return result;
u4tmp = 0x80007C00 | (regE94 & 0x3FF0000) | ((regE9C & 0x3FF0000) >> 16);
- ODM_SetBBReg(dm_odm, rTx_IQK, bMaskDWord, u4tmp);
+ rtl8188e_PHY_SetBBReg(adapt, rTx_IQK, bMaskDWord, u4tmp);
/* 1 RX IQK */
/* modify RXIQK mode table */
- ODM_SetBBReg(dm_odm, rFPGA0_IQK, bMaskDWord, 0x00000000);
- ODM_SetRFReg(dm_odm, RF_PATH_A, RF_WE_LUT, bRFRegOffsetMask, 0x800a0);
- ODM_SetRFReg(dm_odm, RF_PATH_A, RF_RCK_OS, bRFRegOffsetMask, 0x30000);
- ODM_SetRFReg(dm_odm, RF_PATH_A, RF_TXPA_G1, bRFRegOffsetMask, 0x0000f);
- ODM_SetRFReg(dm_odm, RF_PATH_A, RF_TXPA_G2, bRFRegOffsetMask, 0xf7ffa);
- ODM_SetBBReg(dm_odm, rFPGA0_IQK, bMaskDWord, 0x80800000);
+ rtl8188e_PHY_SetBBReg(adapt, rFPGA0_IQK, bMaskDWord, 0x00000000);
+ rtl8188e_PHY_SetRFReg(adapt, RF_PATH_A, RF_WE_LUT, bRFRegOffsetMask, 0x800a0);
+ rtl8188e_PHY_SetRFReg(adapt, RF_PATH_A, RF_RCK_OS, bRFRegOffsetMask, 0x30000);
+ rtl8188e_PHY_SetRFReg(adapt, RF_PATH_A, RF_TXPA_G1, bRFRegOffsetMask, 0x0000f);
+ rtl8188e_PHY_SetRFReg(adapt, RF_PATH_A, RF_TXPA_G2, bRFRegOffsetMask, 0xf7ffa);
+ rtl8188e_PHY_SetBBReg(adapt, rFPGA0_IQK, bMaskDWord, 0x80800000);
/* IQK setting */
- ODM_SetBBReg(dm_odm, rRx_IQK, bMaskDWord, 0x01004800);
+ rtl8188e_PHY_SetBBReg(adapt, rRx_IQK, bMaskDWord, 0x01004800);
/* path-A IQK setting */
- ODM_SetBBReg(dm_odm, rTx_IQK_Tone_A, bMaskDWord, 0x38008c1c);
- ODM_SetBBReg(dm_odm, rRx_IQK_Tone_A, bMaskDWord, 0x18008c1c);
- ODM_SetBBReg(dm_odm, rTx_IQK_PI_A, bMaskDWord, 0x82160c05);
- ODM_SetBBReg(dm_odm, rRx_IQK_PI_A, bMaskDWord, 0x28160c1f);
+ rtl8188e_PHY_SetBBReg(adapt, rTx_IQK_Tone_A, bMaskDWord, 0x38008c1c);
+ rtl8188e_PHY_SetBBReg(adapt, rRx_IQK_Tone_A, bMaskDWord, 0x18008c1c);
+ rtl8188e_PHY_SetBBReg(adapt, rTx_IQK_PI_A, bMaskDWord, 0x82160c05);
+ rtl8188e_PHY_SetBBReg(adapt, rRx_IQK_PI_A, bMaskDWord, 0x28160c1f);
/* LO calibration setting */
- ODM_SetBBReg(dm_odm, rIQK_AGC_Rsp, bMaskDWord, 0x0046a911);
+ rtl8188e_PHY_SetBBReg(adapt, rIQK_AGC_Rsp, bMaskDWord, 0x0046a911);
/* One shot, path A LOK & IQK */
- ODM_SetBBReg(dm_odm, rIQK_AGC_Pts, bMaskDWord, 0xf9000000);
- ODM_SetBBReg(dm_odm, rIQK_AGC_Pts, bMaskDWord, 0xf8000000);
+ rtl8188e_PHY_SetBBReg(adapt, rIQK_AGC_Pts, bMaskDWord, 0xf9000000);
+ rtl8188e_PHY_SetBBReg(adapt, rIQK_AGC_Pts, bMaskDWord, 0xf8000000);
/* delay x ms */
/* PlatformStallExecution(IQK_DELAY_TIME_88E*1000); */
- ODM_delay_ms(IQK_DELAY_TIME_88E);
+ mdelay(IQK_DELAY_TIME_88E);
/* Check failed */
- regeac = ODM_GetBBReg(dm_odm, rRx_Power_After_IQK_A_2, bMaskDWord);
- regE94 = ODM_GetBBReg(dm_odm, rTx_Power_Before_IQK_A, bMaskDWord);
- regE9C = ODM_GetBBReg(dm_odm, rTx_Power_After_IQK_A, bMaskDWord);
- regEA4 = ODM_GetBBReg(dm_odm, rRx_Power_Before_IQK_A_2, bMaskDWord);
+ regeac = rtl8188e_PHY_QueryBBReg(adapt, rRx_Power_After_IQK_A_2, bMaskDWord);
+ regE94 = rtl8188e_PHY_QueryBBReg(adapt, rTx_Power_Before_IQK_A, bMaskDWord);
+ regE9C = rtl8188e_PHY_QueryBBReg(adapt, rTx_Power_After_IQK_A, bMaskDWord);
+ regEA4 = rtl8188e_PHY_QueryBBReg(adapt, rRx_Power_Before_IQK_A_2, bMaskDWord);
/* reload RF 0xdf */
- ODM_SetBBReg(dm_odm, rFPGA0_IQK, bMaskDWord, 0x00000000);
- ODM_SetRFReg(dm_odm, RF_PATH_A, 0xdf, bRFRegOffsetMask, 0x180);
+ rtl8188e_PHY_SetBBReg(adapt, rFPGA0_IQK, bMaskDWord, 0x00000000);
+ rtl8188e_PHY_SetRFReg(adapt, RF_PATH_A, 0xdf, bRFRegOffsetMask, 0x180);
if (!(regeac & BIT(27)) && /* if Tx is OK, check whether Rx is OK */
(((regEA4 & 0x03FF0000) >> 16) != 0x132) &&
@@ -502,95 +498,54 @@ phy_PathA_RxIQK(struct adapter *adapt, bool configPathB)
return result;
}
-static u8 /* bit0 = 1 => Tx OK, bit1 = 1 => Rx OK */
-phy_PathB_IQK_8188E(struct adapter *adapt)
-{
- u32 regeac, regeb4, regebc, regec4, regecc;
- u8 result = 0x00;
- struct hal_data_8188e *pHalData = GET_HAL_DATA(adapt);
- struct odm_dm_struct *dm_odm = &pHalData->odmpriv;
-
- /* One shot, path B LOK & IQK */
- ODM_SetBBReg(dm_odm, rIQK_AGC_Cont, bMaskDWord, 0x00000002);
- ODM_SetBBReg(dm_odm, rIQK_AGC_Cont, bMaskDWord, 0x00000000);
-
- /* delay x ms */
- ODM_delay_ms(IQK_DELAY_TIME_88E);
-
- /* Check failed */
- regeac = ODM_GetBBReg(dm_odm, rRx_Power_After_IQK_A_2, bMaskDWord);
- regeb4 = ODM_GetBBReg(dm_odm, rTx_Power_Before_IQK_B, bMaskDWord);
- regebc = ODM_GetBBReg(dm_odm, rTx_Power_After_IQK_B, bMaskDWord);
- regec4 = ODM_GetBBReg(dm_odm, rRx_Power_Before_IQK_B_2, bMaskDWord);
- regecc = ODM_GetBBReg(dm_odm, rRx_Power_After_IQK_B_2, bMaskDWord);
-
- if (!(regeac & BIT(31)) &&
- (((regeb4 & 0x03FF0000) >> 16) != 0x142) &&
- (((regebc & 0x03FF0000) >> 16) != 0x42))
- result |= 0x01;
- else
- return result;
-
- if (!(regeac & BIT(30)) &&
- (((regec4 & 0x03FF0000) >> 16) != 0x132) &&
- (((regecc & 0x03FF0000) >> 16) != 0x36))
- result |= 0x02;
-
- return result;
-}
-
static void patha_fill_iqk(struct adapter *adapt, bool iqkok, s32 result[][8], u8 final_candidate, bool txonly)
{
u32 Oldval_0, X, TX0_A, reg;
s32 Y, TX0_C;
- struct hal_data_8188e *pHalData = GET_HAL_DATA(adapt);
- struct odm_dm_struct *dm_odm = &pHalData->odmpriv;
if (final_candidate == 0xFF) {
return;
} else if (iqkok) {
- Oldval_0 = (ODM_GetBBReg(dm_odm, rOFDM0_XATxIQImbalance, bMaskDWord) >> 22) & 0x3FF;
+ Oldval_0 = (rtl8188e_PHY_QueryBBReg(adapt, rOFDM0_XATxIQImbalance, bMaskDWord) >> 22) & 0x3FF;
X = result[final_candidate][0];
if ((X & 0x00000200) != 0)
X = X | 0xFFFFFC00;
TX0_A = (X * Oldval_0) >> 8;
- ODM_SetBBReg(dm_odm, rOFDM0_XATxIQImbalance, 0x3FF, TX0_A);
+ rtl8188e_PHY_SetBBReg(adapt, rOFDM0_XATxIQImbalance, 0x3FF, TX0_A);
- ODM_SetBBReg(dm_odm, rOFDM0_ECCAThreshold, BIT(31), ((X * Oldval_0 >> 7) & 0x1));
+ rtl8188e_PHY_SetBBReg(adapt, rOFDM0_ECCAThreshold, BIT(31), ((X * Oldval_0 >> 7) & 0x1));
Y = result[final_candidate][1];
if ((Y & 0x00000200) != 0)
Y = Y | 0xFFFFFC00;
TX0_C = (Y * Oldval_0) >> 8;
- ODM_SetBBReg(dm_odm, rOFDM0_XCTxAFE, 0xF0000000, ((TX0_C & 0x3C0) >> 6));
- ODM_SetBBReg(dm_odm, rOFDM0_XATxIQImbalance, 0x003F0000, (TX0_C & 0x3F));
+ rtl8188e_PHY_SetBBReg(adapt, rOFDM0_XCTxAFE, 0xF0000000, ((TX0_C & 0x3C0) >> 6));
+ rtl8188e_PHY_SetBBReg(adapt, rOFDM0_XATxIQImbalance, 0x003F0000, (TX0_C & 0x3F));
- ODM_SetBBReg(dm_odm, rOFDM0_ECCAThreshold, BIT(29), ((Y * Oldval_0 >> 7) & 0x1));
+ rtl8188e_PHY_SetBBReg(adapt, rOFDM0_ECCAThreshold, BIT(29), ((Y * Oldval_0 >> 7) & 0x1));
if (txonly)
return;
reg = result[final_candidate][2];
- ODM_SetBBReg(dm_odm, rOFDM0_XARxIQImbalance, 0x3FF, reg);
+ rtl8188e_PHY_SetBBReg(adapt, rOFDM0_XARxIQImbalance, 0x3FF, reg);
reg = result[final_candidate][3] & 0x3F;
- ODM_SetBBReg(dm_odm, rOFDM0_XARxIQImbalance, 0xFC00, reg);
+ rtl8188e_PHY_SetBBReg(adapt, rOFDM0_XARxIQImbalance, 0xFC00, reg);
reg = (result[final_candidate][3] >> 6) & 0xF;
- ODM_SetBBReg(dm_odm, rOFDM0_RxIQExtAnta, 0xF0000000, reg);
+ rtl8188e_PHY_SetBBReg(adapt, rOFDM0_RxIQExtAnta, 0xF0000000, reg);
}
}
void _PHY_SaveADDARegisters(struct adapter *adapt, u32 *ADDAReg, u32 *ADDABackup, u32 RegisterNum)
{
u32 i;
- struct hal_data_8188e *pHalData = GET_HAL_DATA(adapt);
- struct odm_dm_struct *dm_odm = &pHalData->odmpriv;
for (i = 0; i < RegisterNum; i++) {
- ADDABackup[i] = ODM_GetBBReg(dm_odm, ADDAReg[i], bMaskDWord);
+ ADDABackup[i] = rtl8188e_PHY_QueryBBReg(adapt, ADDAReg[i], bMaskDWord);
}
}
@@ -601,22 +556,19 @@ static void _PHY_SaveMACRegisters(
)
{
u32 i;
- struct hal_data_8188e *pHalData = GET_HAL_DATA(adapt);
- struct odm_dm_struct *dm_odm = &pHalData->odmpriv;
- for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++) {
- MACBackup[i] = ODM_Read1Byte(dm_odm, MACReg[i]);
- }
- MACBackup[i] = ODM_Read4Byte(dm_odm, MACReg[i]);
+
+ for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++)
+ MACBackup[i] = rtw_read8(adapt, MACReg[i]);
+
+ MACBackup[i] = rtw_read32(adapt, MACReg[i]);
}
static void reload_adda_reg(struct adapter *adapt, u32 *ADDAReg, u32 *ADDABackup, u32 RegiesterNum)
{
u32 i;
- struct hal_data_8188e *pHalData = GET_HAL_DATA(adapt);
- struct odm_dm_struct *dm_odm = &pHalData->odmpriv;
for (i = 0; i < RegiesterNum; i++)
- ODM_SetBBReg(dm_odm, ADDAReg[i], bMaskDWord, ADDABackup[i]);
+ rtl8188e_PHY_SetBBReg(adapt, ADDAReg[i], bMaskDWord, ADDABackup[i]);
}
static void
@@ -627,38 +579,24 @@ _PHY_ReloadMACRegisters(
)
{
u32 i;
- struct hal_data_8188e *pHalData = GET_HAL_DATA(adapt);
- struct odm_dm_struct *dm_odm = &pHalData->odmpriv;
- for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++) {
- ODM_Write1Byte(dm_odm, MACReg[i], (u8)MACBackup[i]);
- }
- ODM_Write4Byte(dm_odm, MACReg[i], MACBackup[i]);
+ for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++)
+ rtw_write8(adapt, MACReg[i], (u8)MACBackup[i]);
+
+ rtw_write32(adapt, MACReg[i], MACBackup[i]);
}
-void
+static void
_PHY_PathADDAOn(
struct adapter *adapt,
- u32 *ADDAReg,
- bool isPathAOn,
- bool is2t
- )
+ u32 *ADDAReg)
{
- u32 pathOn;
u32 i;
- struct hal_data_8188e *pHalData = GET_HAL_DATA(adapt);
- struct odm_dm_struct *dm_odm = &pHalData->odmpriv;
- pathOn = isPathAOn ? 0x04db25a4 : 0x0b1b25a4;
- if (!is2t) {
- pathOn = 0x0bdb25a0;
- ODM_SetBBReg(dm_odm, ADDAReg[0], bMaskDWord, 0x0b1b25a0);
- } else {
- ODM_SetBBReg(dm_odm, ADDAReg[0], bMaskDWord, pathOn);
- }
+ rtl8188e_PHY_SetBBReg(adapt, ADDAReg[0], bMaskDWord, 0x0b1b25a0);
for (i = 1; i < IQK_ADDA_REG_NUM; i++)
- ODM_SetBBReg(dm_odm, ADDAReg[i], bMaskDWord, pathOn);
+ rtl8188e_PHY_SetBBReg(adapt, ADDAReg[i], bMaskDWord, 0x0bdb25a0);
}
void
@@ -669,28 +607,13 @@ _PHY_MACSettingCalibration(
)
{
u32 i = 0;
- struct hal_data_8188e *pHalData = GET_HAL_DATA(adapt);
- struct odm_dm_struct *dm_odm = &pHalData->odmpriv;
-
- ODM_Write1Byte(dm_odm, MACReg[i], 0x3F);
- for (i = 1; i < (IQK_MAC_REG_NUM - 1); i++) {
- ODM_Write1Byte(dm_odm, MACReg[i], (u8)(MACBackup[i] & (~BIT(3))));
- }
- ODM_Write1Byte(dm_odm, MACReg[i], (u8)(MACBackup[i] & (~BIT(5))));
-}
+ rtw_write8(adapt, MACReg[i], 0x3F);
-void
-_PHY_PathAStandBy(
- struct adapter *adapt
- )
-{
- struct hal_data_8188e *pHalData = GET_HAL_DATA(adapt);
- struct odm_dm_struct *dm_odm = &pHalData->odmpriv;
+ for (i = 1; i < (IQK_MAC_REG_NUM - 1); i++)
+ rtw_write8(adapt, MACReg[i], (u8)(MACBackup[i] & (~BIT(3))));
- ODM_SetBBReg(dm_odm, rFPGA0_IQK, bMaskDWord, 0x0);
- ODM_SetBBReg(dm_odm, 0x840, bMaskDWord, 0x00010000);
- ODM_SetBBReg(dm_odm, rFPGA0_IQK, bMaskDWord, 0x80800000);
+ rtw_write8(adapt, MACReg[i], (u8)(MACBackup[i] & (~BIT(5))));
}
static void _PHY_PIModeSwitch(
@@ -699,12 +622,10 @@ static void _PHY_PIModeSwitch(
)
{
u32 mode;
- struct hal_data_8188e *pHalData = GET_HAL_DATA(adapt);
- struct odm_dm_struct *dm_odm = &pHalData->odmpriv;
mode = PIMode ? 0x01000100 : 0x01000000;
- ODM_SetBBReg(dm_odm, rFPGA0_XA_HSSIParameter1, bMaskDWord, mode);
- ODM_SetBBReg(dm_odm, rFPGA0_XB_HSSIParameter1, bMaskDWord, mode);
+ rtl8188e_PHY_SetBBReg(adapt, rFPGA0_XA_HSSIParameter1, bMaskDWord, mode);
+ rtl8188e_PHY_SetBBReg(adapt, rFPGA0_XB_HSSIParameter1, bMaskDWord, mode);
}
static bool phy_SimularityCompare_8188E(
@@ -786,12 +707,12 @@ static bool phy_SimularityCompare_8188E(
}
}
-static void phy_IQCalibrate_8188E(struct adapter *adapt, s32 result[][8], u8 t, bool is2t)
+static void phy_IQCalibrate_8188E(struct adapter *adapt, s32 result[][8], u8 t)
{
- struct hal_data_8188e *pHalData = GET_HAL_DATA(adapt);
+ struct hal_data_8188e *pHalData = &adapt->haldata;
struct odm_dm_struct *dm_odm = &pHalData->odmpriv;
u32 i;
- u8 PathAOK, PathBOK;
+ u8 PathAOK;
u32 ADDA_REG[IQK_ADDA_REG_NUM] = {
rFPGA0_XCD_SwitchControl, rBlue_Tooth,
rRx_Wait_CCA, rTx_CCK_RFON,
@@ -823,9 +744,9 @@ static void phy_IQCalibrate_8188E(struct adapter *adapt, s32 result[][8], u8 t,
_PHY_SaveADDARegisters(adapt, IQK_BB_REG_92C, dm_odm->RFCalibrateInfo.IQK_BB_backup, IQK_BB_REG_NUM);
}
- _PHY_PathADDAOn(adapt, ADDA_REG, true, is2t);
+ _PHY_PathADDAOn(adapt, ADDA_REG);
if (t == 0)
- dm_odm->RFCalibrateInfo.bRfPiEnable = (u8)ODM_GetBBReg(dm_odm, rFPGA0_XA_HSSIParameter1, BIT(8));
+ dm_odm->RFCalibrateInfo.bRfPiEnable = (u8)rtl8188e_PHY_QueryBBReg(adapt, rFPGA0_XA_HSSIParameter1, BIT(8));
if (!dm_odm->RFCalibrateInfo.bRfPiEnable) {
/* Switch BB to PI mode to do IQ Calibration. */
@@ -833,77 +754,49 @@ static void phy_IQCalibrate_8188E(struct adapter *adapt, s32 result[][8], u8 t,
}
/* BB setting */
- ODM_SetBBReg(dm_odm, rFPGA0_RFMOD, BIT(24), 0x00);
- ODM_SetBBReg(dm_odm, rOFDM0_TRxPathEnable, bMaskDWord, 0x03a05600);
- ODM_SetBBReg(dm_odm, rOFDM0_TRMuxPar, bMaskDWord, 0x000800e4);
- ODM_SetBBReg(dm_odm, rFPGA0_XCD_RFInterfaceSW, bMaskDWord, 0x22204000);
-
- ODM_SetBBReg(dm_odm, rFPGA0_XAB_RFInterfaceSW, BIT(10), 0x01);
- ODM_SetBBReg(dm_odm, rFPGA0_XAB_RFInterfaceSW, BIT(26), 0x01);
- ODM_SetBBReg(dm_odm, rFPGA0_XA_RFInterfaceOE, BIT(10), 0x00);
- ODM_SetBBReg(dm_odm, rFPGA0_XB_RFInterfaceOE, BIT(10), 0x00);
-
- if (is2t) {
- ODM_SetBBReg(dm_odm, rFPGA0_XA_LSSIParameter, bMaskDWord, 0x00010000);
- ODM_SetBBReg(dm_odm, rFPGA0_XB_LSSIParameter, bMaskDWord, 0x00010000);
- }
+ rtl8188e_PHY_SetBBReg(adapt, rFPGA0_RFMOD, BIT(24), 0x00);
+ rtl8188e_PHY_SetBBReg(adapt, rOFDM0_TRxPathEnable, bMaskDWord, 0x03a05600);
+ rtl8188e_PHY_SetBBReg(adapt, rOFDM0_TRMuxPar, bMaskDWord, 0x000800e4);
+ rtl8188e_PHY_SetBBReg(adapt, rFPGA0_XCD_RFInterfaceSW, bMaskDWord, 0x22204000);
+
+ rtl8188e_PHY_SetBBReg(adapt, rFPGA0_XAB_RFInterfaceSW, BIT(10), 0x01);
+ rtl8188e_PHY_SetBBReg(adapt, rFPGA0_XAB_RFInterfaceSW, BIT(26), 0x01);
+ rtl8188e_PHY_SetBBReg(adapt, rFPGA0_XA_RFInterfaceOE, BIT(10), 0x00);
+ rtl8188e_PHY_SetBBReg(adapt, rFPGA0_XB_RFInterfaceOE, BIT(10), 0x00);
/* MAC settings */
_PHY_MACSettingCalibration(adapt, IQK_MAC_REG, dm_odm->RFCalibrateInfo.IQK_MAC_backup);
/* Page B init */
/* AP or IQK */
- ODM_SetBBReg(dm_odm, rConfig_AntA, bMaskDWord, 0x0f600000);
+ rtl8188e_PHY_SetBBReg(adapt, rConfig_AntA, bMaskDWord, 0x0f600000);
- if (is2t)
- ODM_SetBBReg(dm_odm, rConfig_AntB, bMaskDWord, 0x0f600000);
/* IQ calibration setting */
- ODM_SetBBReg(dm_odm, rFPGA0_IQK, bMaskDWord, 0x80800000);
- ODM_SetBBReg(dm_odm, rTx_IQK, bMaskDWord, 0x01007c00);
- ODM_SetBBReg(dm_odm, rRx_IQK, bMaskDWord, 0x81004800);
+ rtl8188e_PHY_SetBBReg(adapt, rFPGA0_IQK, bMaskDWord, 0x80800000);
+ rtl8188e_PHY_SetBBReg(adapt, rTx_IQK, bMaskDWord, 0x01007c00);
+ rtl8188e_PHY_SetBBReg(adapt, rRx_IQK, bMaskDWord, 0x81004800);
for (i = 0; i < retryCount; i++) {
- PathAOK = phy_PathA_IQK_8188E(adapt, is2t);
+ PathAOK = phy_PathA_IQK_8188E(adapt);
if (PathAOK == 0x01) {
- result[t][0] = (ODM_GetBBReg(dm_odm, rTx_Power_Before_IQK_A, bMaskDWord) & 0x3FF0000) >> 16;
- result[t][1] = (ODM_GetBBReg(dm_odm, rTx_Power_After_IQK_A, bMaskDWord) & 0x3FF0000) >> 16;
+ result[t][0] = (rtl8188e_PHY_QueryBBReg(adapt, rTx_Power_Before_IQK_A, bMaskDWord) & 0x3FF0000) >> 16;
+ result[t][1] = (rtl8188e_PHY_QueryBBReg(adapt, rTx_Power_After_IQK_A, bMaskDWord) & 0x3FF0000) >> 16;
break;
}
}
for (i = 0; i < retryCount; i++) {
- PathAOK = phy_PathA_RxIQK(adapt, is2t);
+ PathAOK = phy_PathA_RxIQK(adapt);
if (PathAOK == 0x03) {
- result[t][2] = (ODM_GetBBReg(dm_odm, rRx_Power_Before_IQK_A_2, bMaskDWord) & 0x3FF0000) >> 16;
- result[t][3] = (ODM_GetBBReg(dm_odm, rRx_Power_After_IQK_A_2, bMaskDWord) & 0x3FF0000) >> 16;
+ result[t][2] = (rtl8188e_PHY_QueryBBReg(adapt, rRx_Power_Before_IQK_A_2, bMaskDWord) & 0x3FF0000) >> 16;
+ result[t][3] = (rtl8188e_PHY_QueryBBReg(adapt, rRx_Power_After_IQK_A_2, bMaskDWord) & 0x3FF0000) >> 16;
break;
}
}
- if (is2t) {
- _PHY_PathAStandBy(adapt);
-
- /* Turn Path B ADDA on */
- _PHY_PathADDAOn(adapt, ADDA_REG, false, is2t);
-
- for (i = 0; i < retryCount; i++) {
- PathBOK = phy_PathB_IQK_8188E(adapt);
- if (PathBOK == 0x03) {
- result[t][4] = (ODM_GetBBReg(dm_odm, rTx_Power_Before_IQK_B, bMaskDWord) & 0x3FF0000) >> 16;
- result[t][5] = (ODM_GetBBReg(dm_odm, rTx_Power_After_IQK_B, bMaskDWord) & 0x3FF0000) >> 16;
- result[t][6] = (ODM_GetBBReg(dm_odm, rRx_Power_Before_IQK_B_2, bMaskDWord) & 0x3FF0000) >> 16;
- result[t][7] = (ODM_GetBBReg(dm_odm, rRx_Power_After_IQK_B_2, bMaskDWord) & 0x3FF0000) >> 16;
- break;
- } else if (i == (retryCount - 1) && PathBOK == 0x01) { /* Tx IQK OK */
- result[t][4] = (ODM_GetBBReg(dm_odm, rTx_Power_Before_IQK_B, bMaskDWord) & 0x3FF0000) >> 16;
- result[t][5] = (ODM_GetBBReg(dm_odm, rTx_Power_After_IQK_B, bMaskDWord) & 0x3FF0000) >> 16;
- }
- }
- }
-
/* Back to BB mode, load original value */
- ODM_SetBBReg(dm_odm, rFPGA0_IQK, bMaskDWord, 0);
+ rtl8188e_PHY_SetBBReg(adapt, rFPGA0_IQK, bMaskDWord, 0);
if (t != 0) {
if (!dm_odm->RFCalibrateInfo.bRfPiEnable) {
@@ -920,13 +813,11 @@ static void phy_IQCalibrate_8188E(struct adapter *adapt, s32 result[][8], u8 t,
reload_adda_reg(adapt, IQK_BB_REG_92C, dm_odm->RFCalibrateInfo.IQK_BB_backup, IQK_BB_REG_NUM);
/* Restore RX initial gain */
- ODM_SetBBReg(dm_odm, rFPGA0_XA_LSSIParameter, bMaskDWord, 0x00032ed3);
- if (is2t)
- ODM_SetBBReg(dm_odm, rFPGA0_XB_LSSIParameter, bMaskDWord, 0x00032ed3);
+ rtl8188e_PHY_SetBBReg(adapt, rFPGA0_XA_LSSIParameter, bMaskDWord, 0x00032ed3);
/* load 0xe30 IQC default value */
- ODM_SetBBReg(dm_odm, rTx_IQK_Tone_A, bMaskDWord, 0x01008c00);
- ODM_SetBBReg(dm_odm, rRx_IQK_Tone_A, bMaskDWord, 0x01008c00);
+ rtl8188e_PHY_SetBBReg(adapt, rTx_IQK_Tone_A, bMaskDWord, 0x01008c00);
+ rtl8188e_PHY_SetBBReg(adapt, rRx_IQK_Tone_A, bMaskDWord, 0x01008c00);
}
}
@@ -934,62 +825,60 @@ static void phy_LCCalibrate_8188E(struct adapter *adapt, bool is2t)
{
u8 tmpreg;
u32 RF_Amode = 0, RF_Bmode = 0, LC_Cal;
- struct hal_data_8188e *pHalData = GET_HAL_DATA(adapt);
- struct odm_dm_struct *dm_odm = &pHalData->odmpriv;
/* Check continuous TX and Packet TX */
- tmpreg = ODM_Read1Byte(dm_odm, 0xd03);
+ tmpreg = rtw_read8(adapt, 0xd03);
if ((tmpreg & 0x70) != 0) /* Deal with contisuous TX case */
- ODM_Write1Byte(dm_odm, 0xd03, tmpreg & 0x8F); /* disable all continuous TX */
+ rtw_write8(adapt, 0xd03, tmpreg & 0x8F); /* disable all continuous TX */
else /* Deal with Packet TX case */
- ODM_Write1Byte(dm_odm, REG_TXPAUSE, 0xFF); /* block all queues */
+ rtw_write8(adapt, REG_TXPAUSE, 0xFF); /* block all queues */
if ((tmpreg & 0x70) != 0) {
/* 1. Read original RF mode */
/* Path-A */
- RF_Amode = PHY_QueryRFReg(adapt, RF_PATH_A, RF_AC, bMask12Bits);
+ RF_Amode = rtl8188e_PHY_QueryRFReg(adapt, RF_PATH_A, RF_AC, bMask12Bits);
/* Path-B */
if (is2t)
- RF_Bmode = PHY_QueryRFReg(adapt, RF_PATH_B, RF_AC, bMask12Bits);
+ RF_Bmode = rtl8188e_PHY_QueryRFReg(adapt, RF_PATH_B, RF_AC, bMask12Bits);
/* 2. Set RF mode = standby mode */
/* Path-A */
- ODM_SetRFReg(dm_odm, RF_PATH_A, RF_AC, bMask12Bits, (RF_Amode & 0x8FFFF) | 0x10000);
+ rtl8188e_PHY_SetRFReg(adapt, RF_PATH_A, RF_AC, bMask12Bits, (RF_Amode & 0x8FFFF) | 0x10000);
/* Path-B */
if (is2t)
- ODM_SetRFReg(dm_odm, RF_PATH_B, RF_AC, bMask12Bits, (RF_Bmode & 0x8FFFF) | 0x10000);
+ rtl8188e_PHY_SetRFReg(adapt, RF_PATH_B, RF_AC, bMask12Bits, (RF_Bmode & 0x8FFFF) | 0x10000);
}
/* 3. Read RF reg18 */
- LC_Cal = PHY_QueryRFReg(adapt, RF_PATH_A, RF_CHNLBW, bMask12Bits);
+ LC_Cal = rtl8188e_PHY_QueryRFReg(adapt, RF_PATH_A, RF_CHNLBW, bMask12Bits);
/* 4. Set LC calibration begin bit15 */
- ODM_SetRFReg(dm_odm, RF_PATH_A, RF_CHNLBW, bMask12Bits, LC_Cal | 0x08000);
+ rtl8188e_PHY_SetRFReg(adapt, RF_PATH_A, RF_CHNLBW, bMask12Bits, LC_Cal | 0x08000);
- ODM_sleep_ms(100);
+ msleep(100);
/* Restore original situation */
if ((tmpreg & 0x70) != 0) {
/* Deal with continuous TX case */
/* Path-A */
- ODM_Write1Byte(dm_odm, 0xd03, tmpreg);
- ODM_SetRFReg(dm_odm, RF_PATH_A, RF_AC, bMask12Bits, RF_Amode);
+ rtw_write8(adapt, 0xd03, tmpreg);
+ rtl8188e_PHY_SetRFReg(adapt, RF_PATH_A, RF_AC, bMask12Bits, RF_Amode);
/* Path-B */
if (is2t)
- ODM_SetRFReg(dm_odm, RF_PATH_B, RF_AC, bMask12Bits, RF_Bmode);
+ rtl8188e_PHY_SetRFReg(adapt, RF_PATH_B, RF_AC, bMask12Bits, RF_Bmode);
} else {
/* Deal with Packet TX case */
- ODM_Write1Byte(dm_odm, REG_TXPAUSE, 0x00);
+ rtw_write8(adapt, REG_TXPAUSE, 0x00);
}
}
void PHY_IQCalibrate_8188E(struct adapter *adapt, bool recovery)
{
- struct hal_data_8188e *pHalData = GET_HAL_DATA(adapt);
+ struct hal_data_8188e *pHalData = &adapt->haldata;
struct odm_dm_struct *dm_odm = &pHalData->odmpriv;
s32 result[4][8]; /* last is final result */
u8 i, final_candidate;
@@ -1032,7 +921,7 @@ void PHY_IQCalibrate_8188E(struct adapter *adapt, bool recovery)
is13simular = false;
for (i = 0; i < 3; i++) {
- phy_IQCalibrate_8188E(adapt, result, i, false);
+ phy_IQCalibrate_8188E(adapt, result, i);
if (i == 1) {
is12simular = phy_SimularityCompare_8188E(adapt, result, 0, 1);
@@ -1101,7 +990,7 @@ void PHY_LCCalibrate_8188E(struct adapter *adapt)
{
bool singletone = false, carrier_sup = false;
u32 timeout = 2000, timecount = 0;
- struct hal_data_8188e *pHalData = GET_HAL_DATA(adapt);
+ struct hal_data_8188e *pHalData = &adapt->haldata;
struct odm_dm_struct *dm_odm = &pHalData->odmpriv;
if (!(dm_odm->SupportAbility & ODM_RF_CALIBRATION))
@@ -1111,7 +1000,7 @@ void PHY_LCCalibrate_8188E(struct adapter *adapt)
return;
while (*dm_odm->pbScanInProcess && timecount < timeout) {
- ODM_delay_ms(50);
+ mdelay(50);
timecount += 50;
}
diff --git a/drivers/staging/r8188eu/hal/HalPwrSeqCmd.c b/drivers/staging/r8188eu/hal/HalPwrSeqCmd.c
index 0fd11aca7ac7..47ad4ea273cc 100644
--- a/drivers/staging/r8188eu/hal/HalPwrSeqCmd.c
+++ b/drivers/staging/r8188eu/hal/HalPwrSeqCmd.c
@@ -25,8 +25,7 @@ Major Change History:
* Assumption:
* We should follow specific format which was released from HW SD.
*/
-u8 HalPwrSeqCmdParsing(struct adapter *padapter, u8 cut_vers, u8 fab_vers,
- u8 ifacetype, struct wl_pwr_cfg pwrseqcmd[])
+u8 HalPwrSeqCmdParsing(struct adapter *padapter, struct wl_pwr_cfg pwrseqcmd[])
{
struct wl_pwr_cfg pwrcfgcmd = {0};
u8 poll_bit = false;
@@ -39,54 +38,49 @@ u8 HalPwrSeqCmdParsing(struct adapter *padapter, u8 cut_vers, u8 fab_vers,
do {
pwrcfgcmd = pwrseqcmd[aryidx];
- /* 2 Only Handle the command whose FAB, CUT, and Interface are matched */
- if ((GET_PWR_CFG_FAB_MASK(pwrcfgcmd) & fab_vers) &&
- (GET_PWR_CFG_CUT_MASK(pwrcfgcmd) & cut_vers) &&
- (GET_PWR_CFG_INTF_MASK(pwrcfgcmd) & ifacetype)) {
- switch (GET_PWR_CFG_CMD(pwrcfgcmd)) {
- case PWR_CMD_WRITE:
- offset = GET_PWR_CFG_OFFSET(pwrcfgcmd);
+ switch (GET_PWR_CFG_CMD(pwrcfgcmd)) {
+ case PWR_CMD_WRITE:
+ offset = GET_PWR_CFG_OFFSET(pwrcfgcmd);
- /* Read the value from system register */
- value = rtw_read8(padapter, offset);
-
- value &= ~(GET_PWR_CFG_MASK(pwrcfgcmd));
- value |= (GET_PWR_CFG_VALUE(pwrcfgcmd) & GET_PWR_CFG_MASK(pwrcfgcmd));
+ /* Read the value from system register */
+ value = rtw_read8(padapter, offset);
- /* Write the value back to system register */
- rtw_write8(padapter, offset, value);
- break;
- case PWR_CMD_POLLING:
- poll_bit = false;
- offset = GET_PWR_CFG_OFFSET(pwrcfgcmd);
- do {
- value = rtw_read8(padapter, offset);
+ value &= ~(GET_PWR_CFG_MASK(pwrcfgcmd));
+ value |= (GET_PWR_CFG_VALUE(pwrcfgcmd) & GET_PWR_CFG_MASK(pwrcfgcmd));
- value &= GET_PWR_CFG_MASK(pwrcfgcmd);
- if (value == (GET_PWR_CFG_VALUE(pwrcfgcmd) & GET_PWR_CFG_MASK(pwrcfgcmd)))
- poll_bit = true;
- else
- udelay(10);
+ /* Write the value back to system register */
+ rtw_write8(padapter, offset, value);
+ break;
+ case PWR_CMD_POLLING:
+ poll_bit = false;
+ offset = GET_PWR_CFG_OFFSET(pwrcfgcmd);
+ do {
+ value = rtw_read8(padapter, offset);
- if (poll_count++ > max_poll_count) {
- DBG_88E("Fail to polling Offset[%#x]\n", offset);
- return false;
- }
- } while (!poll_bit);
- break;
- case PWR_CMD_DELAY:
- if (GET_PWR_CFG_VALUE(pwrcfgcmd) == PWRSEQ_DELAY_US)
- udelay(GET_PWR_CFG_OFFSET(pwrcfgcmd));
+ value &= GET_PWR_CFG_MASK(pwrcfgcmd);
+ if (value == (GET_PWR_CFG_VALUE(pwrcfgcmd) & GET_PWR_CFG_MASK(pwrcfgcmd)))
+ poll_bit = true;
else
- udelay(GET_PWR_CFG_OFFSET(pwrcfgcmd) * 1000);
- break;
- case PWR_CMD_END:
- /* When this command is parsed, end the process */
- return true;
- break;
- default:
- break;
- }
+ udelay(10);
+
+ if (poll_count++ > max_poll_count) {
+ DBG_88E("Fail to polling Offset[%#x]\n", offset);
+ return false;
+ }
+ } while (!poll_bit);
+ break;
+ case PWR_CMD_DELAY:
+ if (GET_PWR_CFG_VALUE(pwrcfgcmd) == PWRSEQ_DELAY_US)
+ udelay(GET_PWR_CFG_OFFSET(pwrcfgcmd));
+ else
+ udelay(GET_PWR_CFG_OFFSET(pwrcfgcmd) * 1000);
+ break;
+ case PWR_CMD_END:
+ /* When this command is parsed, end the process */
+ return true;
+ break;
+ default:
+ break;
}
aryidx++;/* Add Array Index */
diff --git a/drivers/staging/r8188eu/hal/odm.c b/drivers/staging/r8188eu/hal/odm.c
index 21f115194df8..d8fa587ff286 100644
--- a/drivers/staging/r8188eu/hal/odm.c
+++ b/drivers/staging/r8188eu/hal/odm.c
@@ -1,9 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-/* include files */
-
-#include "../include/odm_precomp.h"
+#include "../include/drv_types.h"
/* avoid to warn in FreeBSD ==> To DO modify */
static u32 EDCAParam[HT_IOT_PEER_MAX][3] = {
@@ -142,222 +140,26 @@ u8 CCKSwingTable_Ch14[CCK_TABLE_SIZE][8] = {
#define RxDefaultAnt1 0x65a9
#define RxDefaultAnt2 0x569a
-/* 3 Export Interface */
-
-/* 2011/09/21 MH Add to describe different team necessary resource allocate?? */
-void ODM_DMInit(struct odm_dm_struct *pDM_Odm)
-{
- /* 2012.05.03 Luke: For all IC series */
- odm_CommonInfoSelfInit(pDM_Odm);
- odm_DIGInit(pDM_Odm);
- odm_RateAdaptiveMaskInit(pDM_Odm);
-
- odm_PrimaryCCA_Init(pDM_Odm); /* Gary */
- odm_DynamicBBPowerSavingInit(pDM_Odm);
- odm_TXPowerTrackingInit(pDM_Odm);
- ODM_EdcaTurboInit(pDM_Odm);
- ODM_RAInfo_Init_all(pDM_Odm);
- if ((pDM_Odm->AntDivType == CG_TRX_HW_ANTDIV) ||
- (pDM_Odm->AntDivType == CGCS_RX_HW_ANTDIV) ||
- (pDM_Odm->AntDivType == CG_TRX_SMART_ANTDIV))
- odm_InitHybridAntDiv(pDM_Odm);
-}
-
-/* 2011/09/20 MH This is the entry pointer for all team to execute HW out source DM. */
-/* You can not add any dummy function here, be care, you can only use DM structure */
-/* to perform any new ODM_DM. */
-void ODM_DMWatchdog(struct odm_dm_struct *pDM_Odm)
-{
- /* 2012.05.03 Luke: For all IC series */
- odm_CommonInfoSelfUpdate(pDM_Odm);
- odm_FalseAlarmCounterStatistics(pDM_Odm);
- odm_RSSIMonitorCheck(pDM_Odm);
-
- odm_DIG(pDM_Odm);
- odm_CCKPacketDetectionThresh(pDM_Odm);
-
- if (*pDM_Odm->pbPowerSaving)
- return;
-
- odm_RefreshRateAdaptiveMask(pDM_Odm);
-
- if ((pDM_Odm->AntDivType == CG_TRX_HW_ANTDIV) ||
- (pDM_Odm->AntDivType == CGCS_RX_HW_ANTDIV) ||
- (pDM_Odm->AntDivType == CG_TRX_SMART_ANTDIV))
- odm_HwAntDiv(pDM_Odm);
-
- ODM_TXPowerTrackingCheck(pDM_Odm);
- odm_EdcaTurboCheck(pDM_Odm);
-}
-
-/* Init /.. Fixed HW value. Only init time. */
-void ODM_CmnInfoInit(struct odm_dm_struct *pDM_Odm, enum odm_common_info_def CmnInfo, u32 Value)
-{
- /* This section is used for init value */
- switch (CmnInfo) {
- /* Fixed ODM value. */
- case ODM_CMNINFO_ABILITY:
- pDM_Odm->SupportAbility = (u32)Value;
- break;
- case ODM_CMNINFO_MP_TEST_CHIP:
- pDM_Odm->bIsMPChip = (u8)Value;
- break;
- case ODM_CMNINFO_RF_ANTENNA_TYPE:
- pDM_Odm->AntDivType = (u8)Value;
- break;
- /* To remove the compiler warning, must add an empty default statement to handle the other values. */
- default:
- /* do nothing */
- break;
- }
-
- /* Tx power tracking BB swing table. */
- /* The base index = 12. +((12-n)/2)dB 13~?? = decrease tx pwr by -((n-12)/2)dB */
- pDM_Odm->BbSwingIdxOfdm = 12; /* Set defalut value as index 12. */
- pDM_Odm->BbSwingIdxOfdmCurrent = 12;
- pDM_Odm->BbSwingFlagOfdm = false;
-}
-
-void ODM_CmnInfoHook(struct odm_dm_struct *pDM_Odm, enum odm_common_info_def CmnInfo, void *pValue)
-{
- /* */
- /* Hook call by reference pointer. */
- /* */
- switch (CmnInfo) {
- /* Dynamic call by reference pointer. */
- case ODM_CMNINFO_TX_UNI:
- pDM_Odm->pNumTxBytesUnicast = (u64 *)pValue;
- break;
- case ODM_CMNINFO_RX_UNI:
- pDM_Odm->pNumRxBytesUnicast = (u64 *)pValue;
- break;
- case ODM_CMNINFO_WM_MODE:
- pDM_Odm->pWirelessMode = (u8 *)pValue;
- break;
- case ODM_CMNINFO_SEC_CHNL_OFFSET:
- pDM_Odm->pSecChOffset = (u8 *)pValue;
- break;
- case ODM_CMNINFO_SEC_MODE:
- pDM_Odm->pSecurity = (u8 *)pValue;
- break;
- case ODM_CMNINFO_BW:
- pDM_Odm->pBandWidth = (u8 *)pValue;
- break;
- case ODM_CMNINFO_CHNL:
- pDM_Odm->pChannel = (u8 *)pValue;
- break;
- case ODM_CMNINFO_SCAN:
- pDM_Odm->pbScanInProcess = (bool *)pValue;
- break;
- case ODM_CMNINFO_POWER_SAVING:
- pDM_Odm->pbPowerSaving = (bool *)pValue;
- break;
- case ODM_CMNINFO_NET_CLOSED:
- pDM_Odm->pbNet_closed = (bool *)pValue;
- break;
- /* To remove the compiler warning, must add an empty default statement to handle the other values. */
- default:
- /* do nothing */
- break;
- }
-}
-
-/* Update Band/CHannel/.. The values are dynamic but non-per-packet. */
-void ODM_CmnInfoUpdate(struct odm_dm_struct *pDM_Odm, u32 CmnInfo, u64 Value)
-{
- /* */
- /* This init variable may be changed in run time. */
- /* */
- switch (CmnInfo) {
- case ODM_CMNINFO_ABILITY:
- pDM_Odm->SupportAbility = (u32)Value;
- break;
- case ODM_CMNINFO_WIFI_DIRECT:
- pDM_Odm->bWIFI_Direct = (bool)Value;
- break;
- case ODM_CMNINFO_WIFI_DISPLAY:
- pDM_Odm->bWIFI_Display = (bool)Value;
- break;
- case ODM_CMNINFO_LINK:
- pDM_Odm->bLinked = (bool)Value;
- break;
- case ODM_CMNINFO_RSSI_MIN:
- pDM_Odm->RSSI_Min = (u8)Value;
- break;
- }
-}
-
-void odm_CommonInfoSelfInit(struct odm_dm_struct *pDM_Odm)
-{
- pDM_Odm->bCckHighPower = (bool)ODM_GetBBReg(pDM_Odm, 0x824, BIT(9));
- pDM_Odm->RFPathRxEnable = (u8)ODM_GetBBReg(pDM_Odm, 0xc04, 0x0F);
-}
-
-void odm_CommonInfoSelfUpdate(struct odm_dm_struct *pDM_Odm)
-{
- u8 EntryCnt = 0;
- u8 i;
- struct sta_info *pEntry;
-
- if (*pDM_Odm->pBandWidth == ODM_BW40M) {
- if (*pDM_Odm->pSecChOffset == 1)
- pDM_Odm->ControlChannel = *pDM_Odm->pChannel - 2;
- else if (*pDM_Odm->pSecChOffset == 2)
- pDM_Odm->ControlChannel = *pDM_Odm->pChannel + 2;
- } else {
- pDM_Odm->ControlChannel = *pDM_Odm->pChannel;
- }
-
- for (i = 0; i < ODM_ASSOCIATE_ENTRY_NUM; i++) {
- pEntry = pDM_Odm->pODM_StaInfo[i];
- if (IS_STA_VALID(pEntry))
- EntryCnt++;
- }
- if (EntryCnt == 1)
- pDM_Odm->bOneEntryOnly = true;
- else
- pDM_Odm->bOneEntryOnly = false;
-}
-
-void ODM_Write_DIG(struct odm_dm_struct *pDM_Odm, u8 CurrentIGI)
+static void odm_DIGInit(struct odm_dm_struct *pDM_Odm)
{
struct rtw_dig *pDM_DigTable = &pDM_Odm->DM_DigTable;
+ struct adapter *adapter = pDM_Odm->Adapter;
- if (pDM_DigTable->CurIGValue != CurrentIGI) {
- ODM_SetBBReg(pDM_Odm, ODM_REG_IGI_A_11N, ODM_BIT_IGI_11N, CurrentIGI);
- pDM_DigTable->CurIGValue = CurrentIGI;
- }
-}
-
-void odm_DIGInit(struct odm_dm_struct *pDM_Odm)
-{
- struct rtw_dig *pDM_DigTable = &pDM_Odm->DM_DigTable;
-
- pDM_DigTable->CurIGValue = (u8)ODM_GetBBReg(pDM_Odm, ODM_REG_IGI_A_11N, ODM_BIT_IGI_11N);
- pDM_DigTable->RssiLowThresh = DM_DIG_THRESH_LOW;
- pDM_DigTable->RssiHighThresh = DM_DIG_THRESH_HIGH;
- pDM_DigTable->FALowThresh = DM_false_ALARM_THRESH_LOW;
- pDM_DigTable->FAHighThresh = DM_false_ALARM_THRESH_HIGH;
+ pDM_DigTable->CurIGValue = (u8)rtl8188e_PHY_QueryBBReg(adapter, ODM_REG_IGI_A_11N, ODM_BIT_IGI_11N);
pDM_DigTable->rx_gain_range_max = DM_DIG_MAX_NIC;
pDM_DigTable->rx_gain_range_min = DM_DIG_MIN_NIC;
- pDM_DigTable->BackoffVal = DM_DIG_BACKOFF_DEFAULT;
- pDM_DigTable->BackoffVal_range_max = DM_DIG_BACKOFF_MAX;
- pDM_DigTable->BackoffVal_range_min = DM_DIG_BACKOFF_MIN;
- pDM_DigTable->PreCCK_CCAThres = 0xFF;
pDM_DigTable->CurCCK_CCAThres = 0x83;
pDM_DigTable->ForbiddenIGI = DM_DIG_MIN_NIC;
pDM_DigTable->LargeFAHit = 0;
pDM_DigTable->Recover_cnt = 0;
pDM_DigTable->DIG_Dynamic_MIN_0 = DM_DIG_MIN_NIC;
- pDM_DigTable->DIG_Dynamic_MIN_1 = DM_DIG_MIN_NIC;
pDM_DigTable->bMediaConnect_0 = false;
- pDM_DigTable->bMediaConnect_1 = false;
/* To Initialize pDM_Odm->bDMInitialGainEnable == false to avoid DIG error */
pDM_Odm->bDMInitialGainEnable = true;
}
-void odm_DIG(struct odm_dm_struct *pDM_Odm)
+static void odm_DIG(struct odm_dm_struct *pDM_Odm)
{
struct rtw_dig *pDM_DigTable = &pDM_Odm->DM_DigTable;
struct false_alarm_stats *pFalseAlmCnt = &pDM_Odm->FalseAlmCnt;
@@ -367,7 +169,7 @@ void odm_DIG(struct odm_dm_struct *pDM_Odm)
u8 dm_dig_max, dm_dig_min;
u8 CurrentIGI = pDM_DigTable->CurIGValue;
- if ((!(pDM_Odm->SupportAbility & ODM_BB_DIG)) || (!(pDM_Odm->SupportAbility & ODM_BB_FA_CNT)))
+ if (!(pDM_Odm->SupportAbility & ODM_BB_FA_CNT))
return;
if (*pDM_Odm->pbScanInProcess)
@@ -457,11 +259,11 @@ void odm_DIG(struct odm_dm_struct *pDM_Odm)
CurrentIGI = pDM_Odm->RSSI_Min;
} else {
if (pFalseAlmCnt->Cnt_all > DM_DIG_FA_TH2)
- CurrentIGI = CurrentIGI + 4;/* pDM_DigTable->CurIGValue = pDM_DigTable->PreIGValue+2; */
+ CurrentIGI = CurrentIGI + 4;/* pDM_DigTable->CurIGValue = pDM_DigTable->PreIGValue+2; */
else if (pFalseAlmCnt->Cnt_all > DM_DIG_FA_TH1)
- CurrentIGI = CurrentIGI + 2;/* pDM_DigTable->CurIGValue = pDM_DigTable->PreIGValue+1; */
+ CurrentIGI = CurrentIGI + 2;/* pDM_DigTable->CurIGValue = pDM_DigTable->PreIGValue+1; */
else if (pFalseAlmCnt->Cnt_all < DM_DIG_FA_TH0)
- CurrentIGI = CurrentIGI - 2;/* pDM_DigTable->CurIGValue =pDM_DigTable->PreIGValue-1; */
+ CurrentIGI = CurrentIGI - 2;/* pDM_DigTable->CurIGValue =pDM_DigTable->PreIGValue-1; */
}
} else {
if (FirstDisConnect) {
@@ -489,52 +291,119 @@ void odm_DIG(struct odm_dm_struct *pDM_Odm)
pDM_DigTable->DIG_Dynamic_MIN_0 = DIG_Dynamic_MIN;
}
-/* 3============================================================ */
-/* 3 FASLE ALARM CHECK */
-/* 3============================================================ */
+static void odm_CommonInfoSelfInit(struct odm_dm_struct *pDM_Odm)
+{
+ struct adapter *adapter = pDM_Odm->Adapter;
+
+ pDM_Odm->bCckHighPower = (bool)rtl8188e_PHY_QueryBBReg(adapter, 0x824, BIT(9));
+ pDM_Odm->RFPathRxEnable = (u8)rtl8188e_PHY_QueryBBReg(adapter, 0xc04, 0x0F);
+}
-void odm_FalseAlarmCounterStatistics(struct odm_dm_struct *pDM_Odm)
+static void odm_CommonInfoSelfUpdate(struct odm_dm_struct *pDM_Odm)
+{
+ u8 EntryCnt = 0;
+ u8 i;
+ struct sta_info *pEntry;
+
+ if (*pDM_Odm->pBandWidth == ODM_BW40M) {
+ if (*pDM_Odm->pSecChOffset == 1)
+ pDM_Odm->ControlChannel = *pDM_Odm->pChannel - 2;
+ else if (*pDM_Odm->pSecChOffset == 2)
+ pDM_Odm->ControlChannel = *pDM_Odm->pChannel + 2;
+ } else {
+ pDM_Odm->ControlChannel = *pDM_Odm->pChannel;
+ }
+
+ for (i = 0; i < ODM_ASSOCIATE_ENTRY_NUM; i++) {
+ pEntry = pDM_Odm->pODM_StaInfo[i];
+ if (IS_STA_VALID(pEntry))
+ EntryCnt++;
+ }
+ if (EntryCnt == 1)
+ pDM_Odm->bOneEntryOnly = true;
+ else
+ pDM_Odm->bOneEntryOnly = false;
+}
+
+static void odm_RateAdaptiveMaskInit(struct odm_dm_struct *pDM_Odm)
+{
+ struct odm_rate_adapt *pOdmRA = &pDM_Odm->RateAdaptive;
+
+ pOdmRA->RATRState = DM_RATR_STA_INIT;
+ pOdmRA->HighRSSIThresh = 50;
+ pOdmRA->LowRSSIThresh = 20;
+}
+
+static void odm_RefreshRateAdaptiveMask(struct odm_dm_struct *pDM_Odm)
+{
+ u8 i;
+ struct adapter *pAdapter = pDM_Odm->Adapter;
+
+ if (pAdapter->bDriverStopped)
+ return;
+
+ for (i = 0; i < ODM_ASSOCIATE_ENTRY_NUM; i++) {
+ struct sta_info *pstat = pDM_Odm->pODM_StaInfo[i];
+
+ if (IS_STA_VALID(pstat)) {
+ if (ODM_RAStateCheck(pDM_Odm, pstat->rssi_stat.UndecoratedSmoothedPWDB, false, &pstat->rssi_level))
+ rtw_hal_update_ra_mask(pAdapter, i, pstat->rssi_level);
+ }
+ }
+}
+
+static void odm_DynamicBBPowerSavingInit(struct odm_dm_struct *pDM_Odm)
+{
+ struct rtl_ps *pDM_PSTable = &pDM_Odm->DM_PSTable;
+
+ pDM_PSTable->pre_rf_state = RF_MAX;
+ pDM_PSTable->cur_rf_state = RF_MAX;
+ pDM_PSTable->initialize = 0;
+}
+
+static void odm_FalseAlarmCounterStatistics(struct odm_dm_struct *pDM_Odm)
{
u32 ret_value;
struct false_alarm_stats *FalseAlmCnt = &pDM_Odm->FalseAlmCnt;
+ struct adapter *adapter = pDM_Odm->Adapter;
if (!(pDM_Odm->SupportAbility & ODM_BB_FA_CNT))
return;
/* hold ofdm counter */
- ODM_SetBBReg(pDM_Odm, ODM_REG_OFDM_FA_HOLDC_11N, BIT(31), 1); /* hold page C counter */
- ODM_SetBBReg(pDM_Odm, ODM_REG_OFDM_FA_RSTD_11N, BIT(31), 1); /* hold page D counter */
+ rtl8188e_PHY_SetBBReg(adapter, ODM_REG_OFDM_FA_HOLDC_11N, BIT(31), 1); /* hold page C counter */
+ rtl8188e_PHY_SetBBReg(adapter, ODM_REG_OFDM_FA_RSTD_11N, BIT(31), 1); /* hold page D counter */
- ret_value = ODM_GetBBReg(pDM_Odm, ODM_REG_OFDM_FA_TYPE1_11N, bMaskDWord);
+ ret_value = rtl8188e_PHY_QueryBBReg(adapter, ODM_REG_OFDM_FA_TYPE1_11N, bMaskDWord);
FalseAlmCnt->Cnt_Fast_Fsync = (ret_value & 0xffff);
FalseAlmCnt->Cnt_SB_Search_fail = ((ret_value & 0xffff0000) >> 16);
- ret_value = ODM_GetBBReg(pDM_Odm, ODM_REG_OFDM_FA_TYPE2_11N, bMaskDWord);
+ ret_value = rtl8188e_PHY_QueryBBReg(adapter, ODM_REG_OFDM_FA_TYPE2_11N, bMaskDWord);
FalseAlmCnt->Cnt_OFDM_CCA = (ret_value & 0xffff);
FalseAlmCnt->Cnt_Parity_Fail = ((ret_value & 0xffff0000) >> 16);
- ret_value = ODM_GetBBReg(pDM_Odm, ODM_REG_OFDM_FA_TYPE3_11N, bMaskDWord);
+ ret_value = rtl8188e_PHY_QueryBBReg(adapter, ODM_REG_OFDM_FA_TYPE3_11N, bMaskDWord);
FalseAlmCnt->Cnt_Rate_Illegal = (ret_value & 0xffff);
FalseAlmCnt->Cnt_Crc8_fail = ((ret_value & 0xffff0000) >> 16);
- ret_value = ODM_GetBBReg(pDM_Odm, ODM_REG_OFDM_FA_TYPE4_11N, bMaskDWord);
+ ret_value = rtl8188e_PHY_QueryBBReg(adapter, ODM_REG_OFDM_FA_TYPE4_11N, bMaskDWord);
FalseAlmCnt->Cnt_Mcs_fail = (ret_value & 0xffff);
FalseAlmCnt->Cnt_Ofdm_fail = FalseAlmCnt->Cnt_Parity_Fail + FalseAlmCnt->Cnt_Rate_Illegal +
FalseAlmCnt->Cnt_Crc8_fail + FalseAlmCnt->Cnt_Mcs_fail +
FalseAlmCnt->Cnt_Fast_Fsync + FalseAlmCnt->Cnt_SB_Search_fail;
- ret_value = ODM_GetBBReg(pDM_Odm, ODM_REG_SC_CNT_11N, bMaskDWord);
+ ret_value = rtl8188e_PHY_QueryBBReg(adapter, ODM_REG_SC_CNT_11N, bMaskDWord);
FalseAlmCnt->Cnt_BW_LSC = (ret_value & 0xffff);
FalseAlmCnt->Cnt_BW_USC = ((ret_value & 0xffff0000) >> 16);
/* hold cck counter */
- ODM_SetBBReg(pDM_Odm, ODM_REG_CCK_FA_RST_11N, BIT(12), 1);
- ODM_SetBBReg(pDM_Odm, ODM_REG_CCK_FA_RST_11N, BIT(14), 1);
+ rtl8188e_PHY_SetBBReg(adapter, ODM_REG_CCK_FA_RST_11N, BIT(12), 1);
+ rtl8188e_PHY_SetBBReg(adapter, ODM_REG_CCK_FA_RST_11N, BIT(14), 1);
- ret_value = ODM_GetBBReg(pDM_Odm, ODM_REG_CCK_FA_LSB_11N, bMaskByte0);
+ ret_value = rtl8188e_PHY_QueryBBReg(adapter, ODM_REG_CCK_FA_LSB_11N, bMaskByte0);
FalseAlmCnt->Cnt_Cck_fail = ret_value;
- ret_value = ODM_GetBBReg(pDM_Odm, ODM_REG_CCK_FA_MSB_11N, bMaskByte3);
+ ret_value = rtl8188e_PHY_QueryBBReg(adapter, ODM_REG_CCK_FA_MSB_11N, bMaskByte3);
FalseAlmCnt->Cnt_Cck_fail += (ret_value & 0xff) << 8;
- ret_value = ODM_GetBBReg(pDM_Odm, ODM_REG_CCK_CCA_CNT_11N, bMaskDWord);
+ ret_value = rtl8188e_PHY_QueryBBReg(adapter, ODM_REG_CCK_CCA_CNT_11N, bMaskDWord);
FalseAlmCnt->Cnt_CCK_CCA = ((ret_value & 0xFF) << 8) | ((ret_value & 0xFF00) >> 8);
FalseAlmCnt->Cnt_all = (FalseAlmCnt->Cnt_Fast_Fsync +
@@ -548,11 +417,7 @@ void odm_FalseAlarmCounterStatistics(struct odm_dm_struct *pDM_Odm)
FalseAlmCnt->Cnt_CCA_all = FalseAlmCnt->Cnt_OFDM_CCA + FalseAlmCnt->Cnt_CCK_CCA;
}
-/* 3============================================================ */
-/* 3 CCK Packet Detect Threshold */
-/* 3============================================================ */
-
-void odm_CCKPacketDetectionThresh(struct odm_dm_struct *pDM_Odm)
+static void odm_CCKPacketDetectionThresh(struct odm_dm_struct *pDM_Odm)
{
u8 CurCCK_CCAThres;
struct false_alarm_stats *FalseAlmCnt = &pDM_Odm->FalseAlmCnt;
@@ -579,235 +444,9 @@ void odm_CCKPacketDetectionThresh(struct odm_dm_struct *pDM_Odm)
ODM_Write_CCK_CCA_Thres(pDM_Odm, CurCCK_CCAThres);
}
-void ODM_Write_CCK_CCA_Thres(struct odm_dm_struct *pDM_Odm, u8 CurCCK_CCAThres)
-{
- struct rtw_dig *pDM_DigTable = &pDM_Odm->DM_DigTable;
-
- if (pDM_DigTable->CurCCK_CCAThres != CurCCK_CCAThres) /* modify by Guo.Mingzhi 2012-01-03 */
- ODM_Write1Byte(pDM_Odm, ODM_REG_CCK_CCA_11N, CurCCK_CCAThres);
- pDM_DigTable->PreCCK_CCAThres = pDM_DigTable->CurCCK_CCAThres;
- pDM_DigTable->CurCCK_CCAThres = CurCCK_CCAThres;
-}
-
-/* 3============================================================ */
-/* 3 BB Power Save */
-/* 3============================================================ */
-void odm_DynamicBBPowerSavingInit(struct odm_dm_struct *pDM_Odm)
-{
- struct rtl_ps *pDM_PSTable = &pDM_Odm->DM_PSTable;
-
- pDM_PSTable->pre_cca_state = CCA_MAX;
- pDM_PSTable->cur_cca_state = CCA_MAX;
- pDM_PSTable->pre_rf_state = RF_MAX;
- pDM_PSTable->cur_rf_state = RF_MAX;
- pDM_PSTable->rssi_val_min = 0;
- pDM_PSTable->initialize = 0;
-}
-
-void ODM_RF_Saving(struct odm_dm_struct *pDM_Odm, u8 bForceInNormal)
-{
- struct rtl_ps *pDM_PSTable = &pDM_Odm->DM_PSTable;
- u8 Rssi_Up_bound = 30;
- u8 Rssi_Low_bound = 25;
-
- if (pDM_PSTable->initialize == 0) {
- pDM_PSTable->reg_874 = (ODM_GetBBReg(pDM_Odm, 0x874, bMaskDWord) & 0x1CC000) >> 14;
- pDM_PSTable->reg_c70 = (ODM_GetBBReg(pDM_Odm, 0xc70, bMaskDWord) & BIT(3)) >> 3;
- pDM_PSTable->reg_85c = (ODM_GetBBReg(pDM_Odm, 0x85c, bMaskDWord) & 0xFF000000) >> 24;
- pDM_PSTable->reg_a74 = (ODM_GetBBReg(pDM_Odm, 0xa74, bMaskDWord) & 0xF000) >> 12;
- pDM_PSTable->initialize = 1;
- }
-
- if (!bForceInNormal) {
- if (pDM_Odm->RSSI_Min != 0xFF) {
- if (pDM_PSTable->pre_rf_state == RF_Normal) {
- if (pDM_Odm->RSSI_Min >= Rssi_Up_bound)
- pDM_PSTable->cur_rf_state = RF_Save;
- else
- pDM_PSTable->cur_rf_state = RF_Normal;
- } else {
- if (pDM_Odm->RSSI_Min <= Rssi_Low_bound)
- pDM_PSTable->cur_rf_state = RF_Normal;
- else
- pDM_PSTable->cur_rf_state = RF_Save;
- }
- } else {
- pDM_PSTable->cur_rf_state = RF_MAX;
- }
- } else {
- pDM_PSTable->cur_rf_state = RF_Normal;
- }
-
- if (pDM_PSTable->pre_rf_state != pDM_PSTable->cur_rf_state) {
- if (pDM_PSTable->cur_rf_state == RF_Save) {
- ODM_SetBBReg(pDM_Odm, 0x874, 0x1C0000, 0x2); /* Reg874[20:18]=3'b010 */
- ODM_SetBBReg(pDM_Odm, 0xc70, BIT(3), 0); /* RegC70[3]=1'b0 */
- ODM_SetBBReg(pDM_Odm, 0x85c, 0xFF000000, 0x63); /* Reg85C[31:24]=0x63 */
- ODM_SetBBReg(pDM_Odm, 0x874, 0xC000, 0x2); /* Reg874[15:14]=2'b10 */
- ODM_SetBBReg(pDM_Odm, 0xa74, 0xF000, 0x3); /* RegA75[7:4]=0x3 */
- ODM_SetBBReg(pDM_Odm, 0x818, BIT(28), 0x0); /* Reg818[28]=1'b0 */
- ODM_SetBBReg(pDM_Odm, 0x818, BIT(28), 0x1); /* Reg818[28]=1'b1 */
- } else {
- ODM_SetBBReg(pDM_Odm, 0x874, 0x1CC000, pDM_PSTable->reg_874);
- ODM_SetBBReg(pDM_Odm, 0xc70, BIT(3), pDM_PSTable->reg_c70);
- ODM_SetBBReg(pDM_Odm, 0x85c, 0xFF000000, pDM_PSTable->reg_85c);
- ODM_SetBBReg(pDM_Odm, 0xa74, 0xF000, pDM_PSTable->reg_a74);
- ODM_SetBBReg(pDM_Odm, 0x818, BIT(28), 0x0);
- }
- pDM_PSTable->pre_rf_state = pDM_PSTable->cur_rf_state;
- }
-}
-
-/* 3============================================================ */
-/* 3 RATR MASK */
-/* 3============================================================ */
-/* 3============================================================ */
-/* 3 Rate Adaptive */
-/* 3============================================================ */
-
-void odm_RateAdaptiveMaskInit(struct odm_dm_struct *pDM_Odm)
-{
- struct odm_rate_adapt *pOdmRA = &pDM_Odm->RateAdaptive;
-
- pOdmRA->RATRState = DM_RATR_STA_INIT;
- pOdmRA->HighRSSIThresh = 50;
- pOdmRA->LowRSSIThresh = 20;
-}
-
-u32 ODM_Get_Rate_Bitmap(struct odm_dm_struct *pDM_Odm, u32 macid, u32 ra_mask, u8 rssi_level)
-{
- struct sta_info *pEntry;
- u32 rate_bitmap = 0x0fffffff;
- u8 WirelessMode;
-
- pEntry = pDM_Odm->pODM_StaInfo[macid];
- if (!IS_STA_VALID(pEntry))
- return ra_mask;
-
- WirelessMode = pEntry->wireless_mode;
-
- switch (WirelessMode) {
- case ODM_WM_B:
- if (ra_mask & 0x0000000c) /* 11M or 5.5M enable */
- rate_bitmap = 0x0000000d;
- else
- rate_bitmap = 0x0000000f;
- break;
- case (ODM_WM_B | ODM_WM_G):
- if (rssi_level == DM_RATR_STA_HIGH)
- rate_bitmap = 0x00000f00;
- else if (rssi_level == DM_RATR_STA_MIDDLE)
- rate_bitmap = 0x00000ff0;
- else
- rate_bitmap = 0x00000ff5;
- break;
- case (ODM_WM_B | ODM_WM_G | ODM_WM_N24G):
- if (rssi_level == DM_RATR_STA_HIGH) {
- rate_bitmap = 0x000f0000;
- } else if (rssi_level == DM_RATR_STA_MIDDLE) {
- rate_bitmap = 0x000ff000;
- } else {
- if (*pDM_Odm->pBandWidth == ODM_BW40M)
- rate_bitmap = 0x000ff015;
- else
- rate_bitmap = 0x000ff005;
- }
- break;
- default:
- /* case WIRELESS_11_24N: */
- rate_bitmap = 0x0fffffff;
- break;
- }
-
- return rate_bitmap;
-}
-
-/*-----------------------------------------------------------------------------
- * Function: odm_RefreshRateAdaptiveMask()
- *
- * Overview: Update rate table mask according to rssi
- *
- * Input: NONE
- *
- * Output: NONE
- *
- * Return: NONE
- *
- * Revised History:
- * When Who Remark
- * 05/27/2009 hpfan Create Version 0.
- *
- *---------------------------------------------------------------------------*/
-void odm_RefreshRateAdaptiveMask(struct odm_dm_struct *pDM_Odm)
-{
- u8 i;
- struct adapter *pAdapter = pDM_Odm->Adapter;
-
- if (!(pDM_Odm->SupportAbility & ODM_BB_RA_MASK))
- return;
-
- if (pAdapter->bDriverStopped)
- return;
-
- for (i = 0; i < ODM_ASSOCIATE_ENTRY_NUM; i++) {
- struct sta_info *pstat = pDM_Odm->pODM_StaInfo[i];
- if (IS_STA_VALID(pstat)) {
- if (ODM_RAStateCheck(pDM_Odm, pstat->rssi_stat.UndecoratedSmoothedPWDB, false, &pstat->rssi_level))
- rtw_hal_update_ra_mask(pAdapter, i, pstat->rssi_level);
- }
- }
-}
-
-/* Return Value: bool */
-/* - true: RATRState is changed. */
-bool ODM_RAStateCheck(struct odm_dm_struct *pDM_Odm, s32 RSSI, bool bForceUpdate, u8 *pRATRState)
-{
- struct odm_rate_adapt *pRA = &pDM_Odm->RateAdaptive;
- const u8 GoUpGap = 5;
- u8 HighRSSIThreshForRA = pRA->HighRSSIThresh;
- u8 LowRSSIThreshForRA = pRA->LowRSSIThresh;
- u8 RATRState;
-
- /* Threshold Adjustment: */
- /* when RSSI state trends to go up one or two levels, make sure RSSI is high enough. */
- /* Here GoUpGap is added to solve the boundary's level alternation issue. */
- switch (*pRATRState) {
- case DM_RATR_STA_INIT:
- case DM_RATR_STA_HIGH:
- break;
- case DM_RATR_STA_MIDDLE:
- HighRSSIThreshForRA += GoUpGap;
- break;
- case DM_RATR_STA_LOW:
- HighRSSIThreshForRA += GoUpGap;
- LowRSSIThreshForRA += GoUpGap;
- break;
- default:
- break;
- }
-
- /* Decide RATRState by RSSI. */
- if (RSSI > HighRSSIThreshForRA)
- RATRState = DM_RATR_STA_HIGH;
- else if (RSSI > LowRSSIThreshForRA)
- RATRState = DM_RATR_STA_MIDDLE;
- else
- RATRState = DM_RATR_STA_LOW;
-
- if (*pRATRState != RATRState || bForceUpdate) {
- *pRATRState = RATRState;
- return true;
- }
- return false;
-}
-
-/* 3============================================================ */
-/* 3 RSSI Monitor */
-/* 3============================================================ */
-
static void FindMinimumRSSI(struct adapter *pAdapter)
{
- struct hal_data_8188e *pHalData = GET_HAL_DATA(pAdapter);
+ struct hal_data_8188e *pHalData = &pAdapter->haldata;
struct dm_priv *pdmpriv = &pHalData->dmpriv;
struct mlme_priv *pmlmepriv = &pAdapter->mlmepriv;
@@ -819,10 +458,10 @@ static void FindMinimumRSSI(struct adapter *pAdapter)
pdmpriv->MinUndecoratedPWDBForDM = pdmpriv->EntryMinUndecoratedSmoothedPWDB;
}
-void odm_RSSIMonitorCheck(struct odm_dm_struct *pDM_Odm)
+static void odm_RSSIMonitorCheck(struct odm_dm_struct *pDM_Odm)
{
struct adapter *Adapter = pDM_Odm->Adapter;
- struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *pHalData = &Adapter->haldata;
struct dm_priv *pdmpriv = &pHalData->dmpriv;
int i;
int tmpEntryMaxPWDB = 0, tmpEntryMinPWDB = 0xff;
@@ -877,44 +516,15 @@ void odm_RSSIMonitorCheck(struct odm_dm_struct *pDM_Odm)
ODM_CmnInfoUpdate(&pHalData->odmpriv, ODM_CMNINFO_RSSI_MIN, pdmpriv->MinUndecoratedPWDBForDM);
}
-/* 3============================================================ */
-/* 3 Tx Power Tracking */
-/* 3============================================================ */
-
-void odm_TXPowerTrackingInit(struct odm_dm_struct *pDM_Odm)
-{
- odm_TXPowerTrackingThermalMeterInit(pDM_Odm);
-}
-
-void odm_TXPowerTrackingThermalMeterInit(struct odm_dm_struct *pDM_Odm)
+static void odm_TXPowerTrackingThermalMeterInit(struct odm_dm_struct *pDM_Odm)
{
pDM_Odm->RFCalibrateInfo.bTXPowerTracking = true;
pDM_Odm->RFCalibrateInfo.TXPowercount = 0;
pDM_Odm->RFCalibrateInfo.bTXPowerTrackingInit = false;
- MSG_88E("pDM_Odm TxPowerTrackControl = %d\n", pDM_Odm->RFCalibrateInfo.TxPowerTrackControl);
-
pDM_Odm->RFCalibrateInfo.TxPowerTrackControl = true;
}
-void ODM_TXPowerTrackingCheck(struct odm_dm_struct *pDM_Odm)
-{
- struct adapter *Adapter = pDM_Odm->Adapter;
-
- if (!(pDM_Odm->SupportAbility & ODM_RF_TX_PWR_TRACK))
- return;
-
- if (!pDM_Odm->RFCalibrateInfo.TM_Trigger) { /* at least delay 1 sec */
- PHY_SetRFReg(Adapter, RF_PATH_A, RF_T_METER_88E, BIT(17) | BIT(16), 0x03);
-
- pDM_Odm->RFCalibrateInfo.TM_Trigger = 1;
- return;
- } else {
- odm_TXPowerTrackingCallback_ThermalMeter_8188E(Adapter);
- pDM_Odm->RFCalibrateInfo.TM_Trigger = 0;
- }
-}
-
-void odm_InitHybridAntDiv(struct odm_dm_struct *pDM_Odm)
+static void odm_InitHybridAntDiv(struct odm_dm_struct *pDM_Odm)
{
if (!(pDM_Odm->SupportAbility & ODM_BB_ANT_DIV))
return;
@@ -922,7 +532,7 @@ void odm_InitHybridAntDiv(struct odm_dm_struct *pDM_Odm)
ODM_AntennaDiversityInit_88E(pDM_Odm);
}
-void odm_HwAntDiv(struct odm_dm_struct *pDM_Odm)
+static void odm_HwAntDiv(struct odm_dm_struct *pDM_Odm)
{
if (!(pDM_Odm->SupportAbility & ODM_BB_ANT_DIV))
return;
@@ -930,17 +540,15 @@ void odm_HwAntDiv(struct odm_dm_struct *pDM_Odm)
ODM_AntennaDiversity_88E(pDM_Odm);
}
-/* EDCA Turbo */
-void ODM_EdcaTurboInit(struct odm_dm_struct *pDM_Odm)
+static void ODM_EdcaTurboInit(struct odm_dm_struct *pDM_Odm)
{
struct adapter *Adapter = pDM_Odm->Adapter;
pDM_Odm->DM_EDCA_Table.bCurrentTurboEDCA = false;
pDM_Odm->DM_EDCA_Table.bIsCurRDLState = false;
Adapter->recvpriv.bIsAnyNonBEPkts = false;
+}
-} /* ODM_InitEdcaTurbo */
-
-void odm_EdcaTurboCheck(struct odm_dm_struct *pDM_Odm)
+static void odm_EdcaTurboCheck(struct odm_dm_struct *pDM_Odm)
{
struct adapter *Adapter = pDM_Odm->Adapter;
u32 trafficIndex;
@@ -948,7 +556,7 @@ void odm_EdcaTurboCheck(struct odm_dm_struct *pDM_Odm)
u64 cur_tx_bytes = 0;
u64 cur_rx_bytes = 0;
u8 bbtchange = false;
- struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *pHalData = &Adapter->haldata;
struct xmit_priv *pxmitpriv = &Adapter->xmitpriv;
struct recv_priv *precvpriv = &Adapter->recvpriv;
struct registry_priv *pregpriv = &Adapter->registrypriv;
@@ -1004,7 +612,7 @@ void odm_EdcaTurboCheck(struct odm_dm_struct *pDM_Odm)
} else {
/* Turn Off EDCA turbo here. */
/* Restore original EDCA according to the declaration of AP. */
- if (pDM_Odm->DM_EDCA_Table.bCurrentTurboEDCA) {
+ if (pDM_Odm->DM_EDCA_Table.bCurrentTurboEDCA) {
rtw_write32(Adapter, REG_EDCA_BE_PARAM, pHalData->AcParam_BE);
pDM_Odm->DM_EDCA_Table.bCurrentTurboEDCA = false;
}
@@ -1016,3 +624,306 @@ dm_CheckEdcaTurbo_EXIT:
pxmitpriv->last_tx_bytes = pxmitpriv->tx_bytes;
precvpriv->last_rx_bytes = precvpriv->rx_bytes;
}
+
+/* 2011/09/21 MH Add to describe different team necessary resource allocate?? */
+void ODM_DMInit(struct odm_dm_struct *pDM_Odm)
+{
+ /* 2012.05.03 Luke: For all IC series */
+ odm_CommonInfoSelfInit(pDM_Odm);
+ odm_DIGInit(pDM_Odm);
+ odm_RateAdaptiveMaskInit(pDM_Odm);
+
+ odm_DynamicBBPowerSavingInit(pDM_Odm);
+ odm_TXPowerTrackingThermalMeterInit(pDM_Odm);
+ ODM_EdcaTurboInit(pDM_Odm);
+ ODM_RAInfo_Init_all(pDM_Odm);
+ if ((pDM_Odm->AntDivType == CG_TRX_HW_ANTDIV) ||
+ (pDM_Odm->AntDivType == CGCS_RX_HW_ANTDIV) ||
+ (pDM_Odm->AntDivType == CG_TRX_SMART_ANTDIV))
+ odm_InitHybridAntDiv(pDM_Odm);
+}
+
+/* 2011/09/20 MH This is the entry pointer for all team to execute HW out source DM. */
+/* You can not add any dummy function here, be care, you can only use DM structure */
+/* to perform any new ODM_DM. */
+void ODM_DMWatchdog(struct odm_dm_struct *pDM_Odm)
+{
+ /* 2012.05.03 Luke: For all IC series */
+ odm_CommonInfoSelfUpdate(pDM_Odm);
+ odm_FalseAlarmCounterStatistics(pDM_Odm);
+ odm_RSSIMonitorCheck(pDM_Odm);
+
+ odm_DIG(pDM_Odm);
+ odm_CCKPacketDetectionThresh(pDM_Odm);
+
+ if (*pDM_Odm->pbPowerSaving)
+ return;
+
+ odm_RefreshRateAdaptiveMask(pDM_Odm);
+
+ if ((pDM_Odm->AntDivType == CG_TRX_HW_ANTDIV) ||
+ (pDM_Odm->AntDivType == CGCS_RX_HW_ANTDIV) ||
+ (pDM_Odm->AntDivType == CG_TRX_SMART_ANTDIV))
+ odm_HwAntDiv(pDM_Odm);
+
+ ODM_TXPowerTrackingCheck(pDM_Odm);
+ odm_EdcaTurboCheck(pDM_Odm);
+}
+
+/* Init /.. Fixed HW value. Only init time. */
+void ODM_CmnInfoInit(struct odm_dm_struct *pDM_Odm, enum odm_common_info_def CmnInfo, u32 Value)
+{
+ /* This section is used for init value */
+ switch (CmnInfo) {
+ /* Fixed ODM value. */
+ case ODM_CMNINFO_MP_TEST_CHIP:
+ pDM_Odm->bIsMPChip = (u8)Value;
+ break;
+ case ODM_CMNINFO_RF_ANTENNA_TYPE:
+ pDM_Odm->AntDivType = (u8)Value;
+ break;
+ default:
+ /* do nothing */
+ break;
+ }
+
+ /* Tx power tracking BB swing table. */
+ /* The base index = 12. +((12-n)/2)dB 13~?? = decrease tx pwr by -((n-12)/2)dB */
+ pDM_Odm->BbSwingIdxOfdm = 12; /* Set defalut value as index 12. */
+ pDM_Odm->BbSwingIdxOfdmCurrent = 12;
+ pDM_Odm->BbSwingFlagOfdm = false;
+}
+
+void ODM_CmnInfoHook(struct odm_dm_struct *pDM_Odm, enum odm_common_info_def CmnInfo, void *pValue)
+{
+ /* */
+ /* Hook call by reference pointer. */
+ /* */
+ switch (CmnInfo) {
+ /* Dynamic call by reference pointer. */
+ case ODM_CMNINFO_WM_MODE:
+ pDM_Odm->pWirelessMode = (u8 *)pValue;
+ break;
+ case ODM_CMNINFO_SEC_CHNL_OFFSET:
+ pDM_Odm->pSecChOffset = (u8 *)pValue;
+ break;
+ case ODM_CMNINFO_BW:
+ pDM_Odm->pBandWidth = (u8 *)pValue;
+ break;
+ case ODM_CMNINFO_CHNL:
+ pDM_Odm->pChannel = (u8 *)pValue;
+ break;
+ case ODM_CMNINFO_SCAN:
+ pDM_Odm->pbScanInProcess = (bool *)pValue;
+ break;
+ case ODM_CMNINFO_POWER_SAVING:
+ pDM_Odm->pbPowerSaving = (bool *)pValue;
+ break;
+ default:
+ /* do nothing */
+ break;
+ }
+}
+
+/* Update Band/CHannel/.. The values are dynamic but non-per-packet. */
+void ODM_CmnInfoUpdate(struct odm_dm_struct *pDM_Odm, u32 CmnInfo, u64 Value)
+{
+ /* */
+ /* This init variable may be changed in run time. */
+ /* */
+ switch (CmnInfo) {
+ case ODM_CMNINFO_ABILITY:
+ pDM_Odm->SupportAbility = (u32)Value;
+ break;
+ case ODM_CMNINFO_LINK:
+ pDM_Odm->bLinked = (bool)Value;
+ break;
+ case ODM_CMNINFO_RSSI_MIN:
+ pDM_Odm->RSSI_Min = (u8)Value;
+ break;
+ }
+}
+
+void ODM_Write_DIG(struct odm_dm_struct *pDM_Odm, u8 CurrentIGI)
+{
+ struct rtw_dig *pDM_DigTable = &pDM_Odm->DM_DigTable;
+ struct adapter *adapter = pDM_Odm->Adapter;
+
+ if (pDM_DigTable->CurIGValue != CurrentIGI) {
+ rtl8188e_PHY_SetBBReg(adapter, ODM_REG_IGI_A_11N, ODM_BIT_IGI_11N, CurrentIGI);
+ pDM_DigTable->CurIGValue = CurrentIGI;
+ }
+}
+
+void ODM_Write_CCK_CCA_Thres(struct odm_dm_struct *pDM_Odm, u8 CurCCK_CCAThres)
+{
+ struct rtw_dig *pDM_DigTable = &pDM_Odm->DM_DigTable;
+
+ if (pDM_DigTable->CurCCK_CCAThres != CurCCK_CCAThres) /* modify by Guo.Mingzhi 2012-01-03 */
+ rtw_write8(pDM_Odm->Adapter, ODM_REG_CCK_CCA_11N, CurCCK_CCAThres);
+ pDM_DigTable->CurCCK_CCAThres = CurCCK_CCAThres;
+}
+
+void ODM_RF_Saving(struct odm_dm_struct *pDM_Odm, u8 bForceInNormal)
+{
+ struct rtl_ps *pDM_PSTable = &pDM_Odm->DM_PSTable;
+ struct adapter *adapter = pDM_Odm->Adapter;
+ u8 Rssi_Up_bound = 30;
+ u8 Rssi_Low_bound = 25;
+
+ if (pDM_PSTable->initialize == 0) {
+ pDM_PSTable->reg_874 = (rtl8188e_PHY_QueryBBReg(adapter, 0x874, bMaskDWord) & 0x1CC000) >> 14;
+ pDM_PSTable->reg_c70 = (rtl8188e_PHY_QueryBBReg(adapter, 0xc70, bMaskDWord) & BIT(3)) >> 3;
+ pDM_PSTable->reg_85c = (rtl8188e_PHY_QueryBBReg(adapter, 0x85c, bMaskDWord) & 0xFF000000) >> 24;
+ pDM_PSTable->reg_a74 = (rtl8188e_PHY_QueryBBReg(adapter, 0xa74, bMaskDWord) & 0xF000) >> 12;
+ pDM_PSTable->initialize = 1;
+ }
+
+ if (!bForceInNormal) {
+ if (pDM_Odm->RSSI_Min != 0xFF) {
+ if (pDM_PSTable->pre_rf_state == RF_Normal) {
+ if (pDM_Odm->RSSI_Min >= Rssi_Up_bound)
+ pDM_PSTable->cur_rf_state = RF_Save;
+ else
+ pDM_PSTable->cur_rf_state = RF_Normal;
+ } else {
+ if (pDM_Odm->RSSI_Min <= Rssi_Low_bound)
+ pDM_PSTable->cur_rf_state = RF_Normal;
+ else
+ pDM_PSTable->cur_rf_state = RF_Save;
+ }
+ } else {
+ pDM_PSTable->cur_rf_state = RF_MAX;
+ }
+ } else {
+ pDM_PSTable->cur_rf_state = RF_Normal;
+ }
+
+ if (pDM_PSTable->pre_rf_state != pDM_PSTable->cur_rf_state) {
+ if (pDM_PSTable->cur_rf_state == RF_Save) {
+ rtl8188e_PHY_SetBBReg(adapter, 0x874, 0x1C0000, 0x2); /* Reg874[20:18]=3'b010 */
+ rtl8188e_PHY_SetBBReg(adapter, 0xc70, BIT(3), 0); /* RegC70[3]=1'b0 */
+ rtl8188e_PHY_SetBBReg(adapter, 0x85c, 0xFF000000, 0x63); /* Reg85C[31:24]=0x63 */
+ rtl8188e_PHY_SetBBReg(adapter, 0x874, 0xC000, 0x2); /* Reg874[15:14]=2'b10 */
+ rtl8188e_PHY_SetBBReg(adapter, 0xa74, 0xF000, 0x3); /* RegA75[7:4]=0x3 */
+ rtl8188e_PHY_SetBBReg(adapter, 0x818, BIT(28), 0x0); /* Reg818[28]=1'b0 */
+ rtl8188e_PHY_SetBBReg(adapter, 0x818, BIT(28), 0x1); /* Reg818[28]=1'b1 */
+ } else {
+ rtl8188e_PHY_SetBBReg(adapter, 0x874, 0x1CC000, pDM_PSTable->reg_874);
+ rtl8188e_PHY_SetBBReg(adapter, 0xc70, BIT(3), pDM_PSTable->reg_c70);
+ rtl8188e_PHY_SetBBReg(adapter, 0x85c, 0xFF000000, pDM_PSTable->reg_85c);
+ rtl8188e_PHY_SetBBReg(adapter, 0xa74, 0xF000, pDM_PSTable->reg_a74);
+ rtl8188e_PHY_SetBBReg(adapter, 0x818, BIT(28), 0x0);
+ }
+ pDM_PSTable->pre_rf_state = pDM_PSTable->cur_rf_state;
+ }
+}
+
+u32 ODM_Get_Rate_Bitmap(struct odm_dm_struct *pDM_Odm, u32 macid, u32 ra_mask, u8 rssi_level)
+{
+ struct sta_info *pEntry;
+ u32 rate_bitmap = 0x0fffffff;
+ u8 WirelessMode;
+
+ pEntry = pDM_Odm->pODM_StaInfo[macid];
+ if (!IS_STA_VALID(pEntry))
+ return ra_mask;
+
+ WirelessMode = pEntry->wireless_mode;
+
+ switch (WirelessMode) {
+ case ODM_WM_B:
+ if (ra_mask & 0x0000000c) /* 11M or 5.5M enable */
+ rate_bitmap = 0x0000000d;
+ else
+ rate_bitmap = 0x0000000f;
+ break;
+ case (ODM_WM_B | ODM_WM_G):
+ if (rssi_level == DM_RATR_STA_HIGH)
+ rate_bitmap = 0x00000f00;
+ else if (rssi_level == DM_RATR_STA_MIDDLE)
+ rate_bitmap = 0x00000ff0;
+ else
+ rate_bitmap = 0x00000ff5;
+ break;
+ case (ODM_WM_B | ODM_WM_G | ODM_WM_N24G):
+ if (rssi_level == DM_RATR_STA_HIGH) {
+ rate_bitmap = 0x000f0000;
+ } else if (rssi_level == DM_RATR_STA_MIDDLE) {
+ rate_bitmap = 0x000ff000;
+ } else {
+ if (*pDM_Odm->pBandWidth == ODM_BW40M)
+ rate_bitmap = 0x000ff015;
+ else
+ rate_bitmap = 0x000ff005;
+ }
+ break;
+ default:
+ /* case WIRELESS_11_24N: */
+ rate_bitmap = 0x0fffffff;
+ break;
+ }
+
+ return rate_bitmap;
+}
+
+/* Return Value: bool */
+/* - true: RATRState is changed. */
+bool ODM_RAStateCheck(struct odm_dm_struct *pDM_Odm, s32 RSSI, bool bForceUpdate, u8 *pRATRState)
+{
+ struct odm_rate_adapt *pRA = &pDM_Odm->RateAdaptive;
+ const u8 GoUpGap = 5;
+ u8 HighRSSIThreshForRA = pRA->HighRSSIThresh;
+ u8 LowRSSIThreshForRA = pRA->LowRSSIThresh;
+ u8 RATRState;
+
+ /* Threshold Adjustment: */
+ /* when RSSI state trends to go up one or two levels, make sure RSSI is high enough. */
+ /* Here GoUpGap is added to solve the boundary's level alternation issue. */
+ switch (*pRATRState) {
+ case DM_RATR_STA_INIT:
+ case DM_RATR_STA_HIGH:
+ break;
+ case DM_RATR_STA_MIDDLE:
+ HighRSSIThreshForRA += GoUpGap;
+ break;
+ case DM_RATR_STA_LOW:
+ HighRSSIThreshForRA += GoUpGap;
+ LowRSSIThreshForRA += GoUpGap;
+ break;
+ default:
+ break;
+ }
+
+ /* Decide RATRState by RSSI. */
+ if (RSSI > HighRSSIThreshForRA)
+ RATRState = DM_RATR_STA_HIGH;
+ else if (RSSI > LowRSSIThreshForRA)
+ RATRState = DM_RATR_STA_MIDDLE;
+ else
+ RATRState = DM_RATR_STA_LOW;
+
+ if (*pRATRState != RATRState || bForceUpdate) {
+ *pRATRState = RATRState;
+ return true;
+ }
+ return false;
+}
+
+void ODM_TXPowerTrackingCheck(struct odm_dm_struct *pDM_Odm)
+{
+ struct adapter *Adapter = pDM_Odm->Adapter;
+
+ if (!(pDM_Odm->SupportAbility & ODM_RF_TX_PWR_TRACK))
+ return;
+
+ if (!pDM_Odm->RFCalibrateInfo.TM_Trigger) { /* at least delay 1 sec */
+ rtl8188e_PHY_SetRFReg(Adapter, RF_PATH_A, RF_T_METER_88E, BIT(17) | BIT(16), 0x03);
+
+ pDM_Odm->RFCalibrateInfo.TM_Trigger = 1;
+ return;
+ } else {
+ odm_TXPowerTrackingCallback_ThermalMeter_8188E(Adapter);
+ pDM_Odm->RFCalibrateInfo.TM_Trigger = 0;
+ }
+}
diff --git a/drivers/staging/r8188eu/hal/odm_HWConfig.c b/drivers/staging/r8188eu/hal/odm_HWConfig.c
index 3125886e6731..d5212a166dd2 100644
--- a/drivers/staging/r8188eu/hal/odm_HWConfig.c
+++ b/drivers/staging/r8188eu/hal/odm_HWConfig.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-#include "../include/odm_precomp.h"
+#include "../include/drv_types.h"
#define READ_AND_CONFIG READ_AND_CONFIG_MP
@@ -251,17 +251,7 @@ static void odm_Process_RSSIForDM(struct odm_dm_struct *dm_odm,
isCCKrate = ((pPktinfo->Rate >= DESC92C_RATE1M) && (pPktinfo->Rate <= DESC92C_RATE11M)) ? true : false;
/* Smart Antenna Debug Message------------------ */
- if (dm_odm->AntDivType == CG_TRX_SMART_ANTDIV) {
- if (pDM_FatTable->FAT_State == FAT_TRAINING_STATE) {
- if (pPktinfo->bPacketToSelf) {
- antsel_tr_mux = (pDM_FatTable->antsel_rx_keep_2 << 2) |
- (pDM_FatTable->antsel_rx_keep_1 << 1) |
- pDM_FatTable->antsel_rx_keep_0;
- pDM_FatTable->antSumRSSI[antsel_tr_mux] += pPhyInfo->RxPWDBAll;
- pDM_FatTable->antRSSIcnt[antsel_tr_mux]++;
- }
- }
- } else if ((dm_odm->AntDivType == CG_TRX_HW_ANTDIV) || (dm_odm->AntDivType == CGCS_RX_HW_ANTDIV)) {
+ if ((dm_odm->AntDivType == CG_TRX_HW_ANTDIV) || (dm_odm->AntDivType == CGCS_RX_HW_ANTDIV)) {
if (pPktinfo->bPacketToSelf || pPktinfo->bPacketBeacon) {
antsel_tr_mux = (pDM_FatTable->antsel_rx_keep_2 << 2) |
(pDM_FatTable->antsel_rx_keep_1 << 1) | pDM_FatTable->antsel_rx_keep_0;
@@ -368,10 +358,8 @@ void ODM_PhyStatusQuery(struct odm_dm_struct *dm_odm,
struct odm_per_pkt_info *pPktinfo,
struct adapter *adapt)
{
- odm_RxPhyStatus92CSeries_Parsing(dm_odm, pPhyInfo, pPhyStatus,
- pPktinfo, adapt);
- if (!dm_odm->RSSI_test)
- odm_Process_RSSIForDM(dm_odm, pPhyInfo, pPktinfo);
+ odm_RxPhyStatus92CSeries_Parsing(dm_odm, pPhyInfo, pPhyStatus, pPktinfo, adapt);
+ odm_Process_RSSIForDM(dm_odm, pPhyInfo, pPktinfo);
}
enum HAL_STATUS ODM_ConfigRFWithHeaderFile(struct odm_dm_struct *dm_odm,
diff --git a/drivers/staging/r8188eu/hal/odm_RTL8188E.c b/drivers/staging/r8188eu/hal/odm_RTL8188E.c
index e7a765f375d6..c8a3c521bd60 100644
--- a/drivers/staging/r8188eu/hal/odm_RTL8188E.c
+++ b/drivers/staging/r8188eu/hal/odm_RTL8188E.c
@@ -1,130 +1,98 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-#include "../include/odm_precomp.h"
+#include "../include/drv_types.h"
static void odm_RX_HWAntDivInit(struct odm_dm_struct *dm_odm)
{
+ struct adapter *adapter = dm_odm->Adapter;
u32 value32;
/* MAC Setting */
- value32 = ODM_GetMACReg(dm_odm, ODM_REG_ANTSEL_PIN_11N, bMaskDWord);
- ODM_SetMACReg(dm_odm, ODM_REG_ANTSEL_PIN_11N, bMaskDWord, value32 | (BIT(23) | BIT(25))); /* Reg4C[25]=1, Reg4C[23]=1 for pin output */
+ value32 = rtl8188e_PHY_QueryBBReg(adapter, ODM_REG_ANTSEL_PIN_11N, bMaskDWord);
+ rtl8188e_PHY_SetBBReg(adapter, ODM_REG_ANTSEL_PIN_11N, bMaskDWord, value32 | (BIT(23) | BIT(25))); /* Reg4C[25]=1, Reg4C[23]=1 for pin output */
/* Pin Settings */
- ODM_SetBBReg(dm_odm, ODM_REG_PIN_CTRL_11N, BIT(9) | BIT(8), 0);/* Reg870[8]=1'b0, Reg870[9]=1'b0 antsel antselb by HW */
- ODM_SetBBReg(dm_odm, ODM_REG_RX_ANT_CTRL_11N, BIT(10), 0); /* Reg864[10]=1'b0 antsel2 by HW */
- ODM_SetBBReg(dm_odm, ODM_REG_LNA_SWITCH_11N, BIT(22), 1); /* Regb2c[22]=1'b0 disable CS/CG switch */
- ODM_SetBBReg(dm_odm, ODM_REG_LNA_SWITCH_11N, BIT(31), 1); /* Regb2c[31]=1'b1 output at CG only */
+ rtl8188e_PHY_SetBBReg(adapter, ODM_REG_PIN_CTRL_11N, BIT(9) | BIT(8), 0);/* Reg870[8]=1'b0, Reg870[9]=1'b0 antsel antselb by HW */
+ rtl8188e_PHY_SetBBReg(adapter, ODM_REG_RX_ANT_CTRL_11N, BIT(10), 0); /* Reg864[10]=1'b0 antsel2 by HW */
+ rtl8188e_PHY_SetBBReg(adapter, ODM_REG_LNA_SWITCH_11N, BIT(22), 1); /* Regb2c[22]=1'b0 disable CS/CG switch */
+ rtl8188e_PHY_SetBBReg(adapter, ODM_REG_LNA_SWITCH_11N, BIT(31), 1); /* Regb2c[31]=1'b1 output at CG only */
/* OFDM Settings */
- ODM_SetBBReg(dm_odm, ODM_REG_ANTDIV_PARA1_11N, bMaskDWord, 0x000000a0);
+ rtl8188e_PHY_SetBBReg(adapter, ODM_REG_ANTDIV_PARA1_11N, bMaskDWord, 0x000000a0);
/* CCK Settings */
- ODM_SetBBReg(dm_odm, ODM_REG_BB_PWR_SAV4_11N, BIT(7), 1); /* Fix CCK PHY status report issue */
- ODM_SetBBReg(dm_odm, ODM_REG_CCK_ANTDIV_PARA2_11N, BIT(4), 1); /* CCK complete HW AntDiv within 64 samples */
+ rtl8188e_PHY_SetBBReg(adapter, ODM_REG_BB_PWR_SAV4_11N, BIT(7), 1); /* Fix CCK PHY status report issue */
+ rtl8188e_PHY_SetBBReg(adapter, ODM_REG_CCK_ANTDIV_PARA2_11N, BIT(4), 1); /* CCK complete HW AntDiv within 64 samples */
ODM_UpdateRxIdleAnt_88E(dm_odm, MAIN_ANT);
- ODM_SetBBReg(dm_odm, ODM_REG_ANT_MAPPING1_11N, 0xFFFF, 0x0201); /* antenna mapping table */
+ rtl8188e_PHY_SetBBReg(adapter, ODM_REG_ANT_MAPPING1_11N, 0xFFFF, 0x0201); /* antenna mapping table */
}
static void odm_TRX_HWAntDivInit(struct odm_dm_struct *dm_odm)
{
+ struct adapter *adapter = dm_odm->Adapter;
u32 value32;
/* MAC Setting */
- value32 = ODM_GetMACReg(dm_odm, ODM_REG_ANTSEL_PIN_11N, bMaskDWord);
- ODM_SetMACReg(dm_odm, ODM_REG_ANTSEL_PIN_11N, bMaskDWord, value32 | (BIT(23) | BIT(25))); /* Reg4C[25]=1, Reg4C[23]=1 for pin output */
+ value32 = rtl8188e_PHY_QueryBBReg(adapter, ODM_REG_ANTSEL_PIN_11N, bMaskDWord);
+ rtl8188e_PHY_SetBBReg(adapter, ODM_REG_ANTSEL_PIN_11N, bMaskDWord, value32 | (BIT(23) | BIT(25))); /* Reg4C[25]=1, Reg4C[23]=1 for pin output */
/* Pin Settings */
- ODM_SetBBReg(dm_odm, ODM_REG_PIN_CTRL_11N, BIT(9) | BIT(8), 0);/* Reg870[8]=1'b0, Reg870[9]=1'b0 antsel antselb by HW */
- ODM_SetBBReg(dm_odm, ODM_REG_RX_ANT_CTRL_11N, BIT(10), 0); /* Reg864[10]=1'b0 antsel2 by HW */
- ODM_SetBBReg(dm_odm, ODM_REG_LNA_SWITCH_11N, BIT(22), 0); /* Regb2c[22]=1'b0 disable CS/CG switch */
- ODM_SetBBReg(dm_odm, ODM_REG_LNA_SWITCH_11N, BIT(31), 1); /* Regb2c[31]=1'b1 output at CG only */
+ rtl8188e_PHY_SetBBReg(adapter, ODM_REG_PIN_CTRL_11N, BIT(9) | BIT(8), 0);/* Reg870[8]=1'b0, Reg870[9]=1'b0 antsel antselb by HW */
+ rtl8188e_PHY_SetBBReg(adapter, ODM_REG_RX_ANT_CTRL_11N, BIT(10), 0); /* Reg864[10]=1'b0 antsel2 by HW */
+ rtl8188e_PHY_SetBBReg(adapter, ODM_REG_LNA_SWITCH_11N, BIT(22), 0); /* Regb2c[22]=1'b0 disable CS/CG switch */
+ rtl8188e_PHY_SetBBReg(adapter, ODM_REG_LNA_SWITCH_11N, BIT(31), 1); /* Regb2c[31]=1'b1 output at CG only */
/* OFDM Settings */
- ODM_SetBBReg(dm_odm, ODM_REG_ANTDIV_PARA1_11N, bMaskDWord, 0x000000a0);
+ rtl8188e_PHY_SetBBReg(adapter, ODM_REG_ANTDIV_PARA1_11N, bMaskDWord, 0x000000a0);
/* CCK Settings */
- ODM_SetBBReg(dm_odm, ODM_REG_BB_PWR_SAV4_11N, BIT(7), 1); /* Fix CCK PHY status report issue */
- ODM_SetBBReg(dm_odm, ODM_REG_CCK_ANTDIV_PARA2_11N, BIT(4), 1); /* CCK complete HW AntDiv within 64 samples */
+ rtl8188e_PHY_SetBBReg(adapter, ODM_REG_BB_PWR_SAV4_11N, BIT(7), 1); /* Fix CCK PHY status report issue */
+ rtl8188e_PHY_SetBBReg(adapter, ODM_REG_CCK_ANTDIV_PARA2_11N, BIT(4), 1); /* CCK complete HW AntDiv within 64 samples */
/* Tx Settings */
- ODM_SetBBReg(dm_odm, ODM_REG_TX_ANT_CTRL_11N, BIT(21), 0); /* Reg80c[21]=1'b0 from TX Reg */
+ rtl8188e_PHY_SetBBReg(adapter, ODM_REG_TX_ANT_CTRL_11N, BIT(21), 0); /* Reg80c[21]=1'b0 from TX Reg */
ODM_UpdateRxIdleAnt_88E(dm_odm, MAIN_ANT);
/* antenna mapping table */
if (!dm_odm->bIsMPChip) { /* testchip */
- ODM_SetBBReg(dm_odm, ODM_REG_RX_DEFUALT_A_11N, BIT(10) | BIT(9) | BIT(8), 1); /* Reg858[10:8]=3'b001 */
- ODM_SetBBReg(dm_odm, ODM_REG_RX_DEFUALT_A_11N, BIT(13) | BIT(12) | BIT(11), 2); /* Reg858[13:11]=3'b010 */
+ rtl8188e_PHY_SetBBReg(adapter, ODM_REG_RX_DEFUALT_A_11N, BIT(10) | BIT(9) | BIT(8), 1); /* Reg858[10:8]=3'b001 */
+ rtl8188e_PHY_SetBBReg(adapter, ODM_REG_RX_DEFUALT_A_11N, BIT(13) | BIT(12) | BIT(11), 2); /* Reg858[13:11]=3'b010 */
} else { /* MPchip */
- ODM_SetBBReg(dm_odm, ODM_REG_ANT_MAPPING1_11N, bMaskDWord, 0x0201); /* Reg914=3'b010, Reg915=3'b001 */
+ rtl8188e_PHY_SetBBReg(adapter, ODM_REG_ANT_MAPPING1_11N, bMaskDWord, 0x0201); /* Reg914=3'b010, Reg915=3'b001 */
}
}
static void odm_FastAntTrainingInit(struct odm_dm_struct *dm_odm)
{
- u32 value32, i;
- struct fast_ant_train *dm_fat_tbl = &dm_odm->DM_FatTable;
- u32 AntCombination = 2;
-
- for (i = 0; i < 6; i++) {
- dm_fat_tbl->Bssid[i] = 0;
- dm_fat_tbl->antSumRSSI[i] = 0;
- dm_fat_tbl->antRSSIcnt[i] = 0;
- dm_fat_tbl->antAveRSSI[i] = 0;
- }
- dm_fat_tbl->TrainIdx = 0;
- dm_fat_tbl->FAT_State = FAT_NORMAL_STATE;
+ struct adapter *adapter = dm_odm->Adapter;
+ u32 value32;
/* MAC Setting */
- value32 = ODM_GetMACReg(dm_odm, 0x4c, bMaskDWord);
- ODM_SetMACReg(dm_odm, 0x4c, bMaskDWord, value32 | (BIT(23) | BIT(25))); /* Reg4C[25]=1, Reg4C[23]=1 for pin output */
- value32 = ODM_GetMACReg(dm_odm, 0x7B4, bMaskDWord);
- ODM_SetMACReg(dm_odm, 0x7b4, bMaskDWord, value32 | (BIT(16) | BIT(17))); /* Reg7B4[16]=1 enable antenna training, Reg7B4[17]=1 enable A2 match */
+ value32 = rtl8188e_PHY_QueryBBReg(adapter, 0x4c, bMaskDWord);
+ rtl8188e_PHY_SetBBReg(adapter, 0x4c, bMaskDWord, value32 | (BIT(23) | BIT(25))); /* Reg4C[25]=1, Reg4C[23]=1 for pin output */
+ value32 = rtl8188e_PHY_QueryBBReg(adapter, 0x7B4, bMaskDWord);
+ rtl8188e_PHY_SetBBReg(adapter, 0x7b4, bMaskDWord, value32 | (BIT(16) | BIT(17))); /* Reg7B4[16]=1 enable antenna training, Reg7B4[17]=1 enable A2 match */
/* Match MAC ADDR */
- ODM_SetMACReg(dm_odm, 0x7b4, 0xFFFF, 0);
- ODM_SetMACReg(dm_odm, 0x7b0, bMaskDWord, 0);
+ rtl8188e_PHY_SetBBReg(adapter, 0x7b4, 0xFFFF, 0);
+ rtl8188e_PHY_SetBBReg(adapter, 0x7b0, bMaskDWord, 0);
- ODM_SetBBReg(dm_odm, 0x870, BIT(9) | BIT(8), 0);/* Reg870[8]=1'b0, Reg870[9]=1'b0 antsel antselb by HW */
- ODM_SetBBReg(dm_odm, 0x864, BIT(10), 0); /* Reg864[10]=1'b0 antsel2 by HW */
- ODM_SetBBReg(dm_odm, 0xb2c, BIT(22), 0); /* Regb2c[22]=1'b0 disable CS/CG switch */
- ODM_SetBBReg(dm_odm, 0xb2c, BIT(31), 1); /* Regb2c[31]=1'b1 output at CG only */
- ODM_SetBBReg(dm_odm, 0xca4, bMaskDWord, 0x000000a0);
+ rtl8188e_PHY_SetBBReg(adapter, 0x870, BIT(9) | BIT(8), 0);/* Reg870[8]=1'b0, Reg870[9]=1'b0 antsel antselb by HW */
+ rtl8188e_PHY_SetBBReg(adapter, 0x864, BIT(10), 0); /* Reg864[10]=1'b0 antsel2 by HW */
+ rtl8188e_PHY_SetBBReg(adapter, 0xb2c, BIT(22), 0); /* Regb2c[22]=1'b0 disable CS/CG switch */
+ rtl8188e_PHY_SetBBReg(adapter, 0xb2c, BIT(31), 1); /* Regb2c[31]=1'b1 output at CG only */
+ rtl8188e_PHY_SetBBReg(adapter, 0xca4, bMaskDWord, 0x000000a0);
- /* antenna mapping table */
- if (AntCombination == 2) {
- if (!dm_odm->bIsMPChip) { /* testchip */
- ODM_SetBBReg(dm_odm, 0x858, BIT(10) | BIT(9) | BIT(8), 1); /* Reg858[10:8]=3'b001 */
- ODM_SetBBReg(dm_odm, 0x858, BIT(13) | BIT(12) | BIT(11), 2); /* Reg858[13:11]=3'b010 */
- } else { /* MPchip */
- ODM_SetBBReg(dm_odm, 0x914, bMaskByte0, 1);
- ODM_SetBBReg(dm_odm, 0x914, bMaskByte1, 2);
- }
- } else if (AntCombination == 7) {
- if (!dm_odm->bIsMPChip) { /* testchip */
- ODM_SetBBReg(dm_odm, 0x858, BIT(10) | BIT(9) | BIT(8), 0); /* Reg858[10:8]=3'b000 */
- ODM_SetBBReg(dm_odm, 0x858, BIT(13) | BIT(12) | BIT(11), 1); /* Reg858[13:11]=3'b001 */
- ODM_SetBBReg(dm_odm, 0x878, BIT(16), 0);
- ODM_SetBBReg(dm_odm, 0x858, BIT(15) | BIT(14), 2); /* Reg878[0],Reg858[14:15])=3'b010 */
- ODM_SetBBReg(dm_odm, 0x878, BIT(19) | BIT(18) | BIT(17), 3);/* Reg878[3:1]=3b'011 */
- ODM_SetBBReg(dm_odm, 0x878, BIT(22) | BIT(21) | BIT(20), 4);/* Reg878[6:4]=3b'100 */
- ODM_SetBBReg(dm_odm, 0x878, BIT(25) | BIT(24) | BIT(23), 5);/* Reg878[9:7]=3b'101 */
- ODM_SetBBReg(dm_odm, 0x878, BIT(28) | BIT(27) | BIT(26), 6);/* Reg878[12:10]=3b'110 */
- ODM_SetBBReg(dm_odm, 0x878, BIT(31) | BIT(30) | BIT(29), 7);/* Reg878[15:13]=3b'111 */
- } else { /* MPchip */
- ODM_SetBBReg(dm_odm, 0x914, bMaskByte0, 0);
- ODM_SetBBReg(dm_odm, 0x914, bMaskByte1, 1);
- ODM_SetBBReg(dm_odm, 0x914, bMaskByte2, 2);
- ODM_SetBBReg(dm_odm, 0x914, bMaskByte3, 3);
- ODM_SetBBReg(dm_odm, 0x918, bMaskByte0, 4);
- ODM_SetBBReg(dm_odm, 0x918, bMaskByte1, 5);
- ODM_SetBBReg(dm_odm, 0x918, bMaskByte2, 6);
- ODM_SetBBReg(dm_odm, 0x918, bMaskByte3, 7);
- }
+ if (!dm_odm->bIsMPChip) { /* testchip */
+ rtl8188e_PHY_SetBBReg(adapter, 0x858, BIT(10) | BIT(9) | BIT(8), 1); /* Reg858[10:8]=3'b001 */
+ rtl8188e_PHY_SetBBReg(adapter, 0x858, BIT(13) | BIT(12) | BIT(11), 2); /* Reg858[13:11]=3'b010 */
+ } else { /* MPchip */
+ rtl8188e_PHY_SetBBReg(adapter, 0x914, bMaskByte0, 1);
+ rtl8188e_PHY_SetBBReg(adapter, 0x914, bMaskByte1, 2);
}
/* Default Ant Setting when no fast training */
- ODM_SetBBReg(dm_odm, 0x80c, BIT(21), 1); /* Reg80c[21]=1'b1 from TX Info */
- ODM_SetBBReg(dm_odm, 0x864, BIT(5) | BIT(4) | BIT(3), 0); /* Default RX */
- ODM_SetBBReg(dm_odm, 0x864, BIT(8) | BIT(7) | BIT(6), 1); /* Optional RX */
+ rtl8188e_PHY_SetBBReg(adapter, 0x80c, BIT(21), 1); /* Reg80c[21]=1'b1 from TX Info */
+ rtl8188e_PHY_SetBBReg(adapter, 0x864, BIT(5) | BIT(4) | BIT(3), 0); /* Default RX */
+ rtl8188e_PHY_SetBBReg(adapter, 0x864, BIT(8) | BIT(7) | BIT(6), 1); /* Optional RX */
- /* Enter Traing state */
- ODM_SetBBReg(dm_odm, 0x864, BIT(2) | BIT(1) | BIT(0), (AntCombination - 1)); /* Reg864[2:0]=3'd6 ant combination=reg864[2:0]+1 */
- ODM_SetBBReg(dm_odm, 0xc50, BIT(7), 1); /* RegC50[7]=1'b1 enable HW AntDiv */
+ /* Enter Training state */
+ rtl8188e_PHY_SetBBReg(adapter, 0x864, BIT(2) | BIT(1) | BIT(0), 1);
+ rtl8188e_PHY_SetBBReg(adapter, 0xc50, BIT(7), 1); /* RegC50[7]=1'b1 enable HW AntDiv */
}
void ODM_AntennaDiversityInit_88E(struct odm_dm_struct *dm_odm)
@@ -140,6 +108,7 @@ void ODM_AntennaDiversityInit_88E(struct odm_dm_struct *dm_odm)
void ODM_UpdateRxIdleAnt_88E(struct odm_dm_struct *dm_odm, u8 Ant)
{
struct fast_ant_train *dm_fat_tbl = &dm_odm->DM_FatTable;
+ struct adapter *adapter = dm_odm->Adapter;
u32 DefaultAnt, OptionalAnt;
if (dm_fat_tbl->RxIdleAnt != Ant) {
@@ -152,13 +121,13 @@ void ODM_UpdateRxIdleAnt_88E(struct odm_dm_struct *dm_odm, u8 Ant)
}
if (dm_odm->AntDivType == CG_TRX_HW_ANTDIV) {
- ODM_SetBBReg(dm_odm, ODM_REG_RX_ANT_CTRL_11N, BIT(5) | BIT(4) | BIT(3), DefaultAnt); /* Default RX */
- ODM_SetBBReg(dm_odm, ODM_REG_RX_ANT_CTRL_11N, BIT(8) | BIT(7) | BIT(6), OptionalAnt); /* Optional RX */
- ODM_SetBBReg(dm_odm, ODM_REG_ANTSEL_CTRL_11N, BIT(14) | BIT(13) | BIT(12), DefaultAnt); /* Default TX */
- ODM_SetMACReg(dm_odm, ODM_REG_RESP_TX_11N, BIT(6) | BIT(7), DefaultAnt); /* Resp Tx */
+ rtl8188e_PHY_SetBBReg(adapter, ODM_REG_RX_ANT_CTRL_11N, BIT(5) | BIT(4) | BIT(3), DefaultAnt); /* Default RX */
+ rtl8188e_PHY_SetBBReg(adapter, ODM_REG_RX_ANT_CTRL_11N, BIT(8) | BIT(7) | BIT(6), OptionalAnt); /* Optional RX */
+ rtl8188e_PHY_SetBBReg(adapter, ODM_REG_ANTSEL_CTRL_11N, BIT(14) | BIT(13) | BIT(12), DefaultAnt); /* Default TX */
+ rtl8188e_PHY_SetBBReg(adapter, ODM_REG_RESP_TX_11N, BIT(6) | BIT(7), DefaultAnt); /* Resp Tx */
} else if (dm_odm->AntDivType == CGCS_RX_HW_ANTDIV) {
- ODM_SetBBReg(dm_odm, ODM_REG_RX_ANT_CTRL_11N, BIT(5) | BIT(4) | BIT(3), DefaultAnt); /* Default RX */
- ODM_SetBBReg(dm_odm, ODM_REG_RX_ANT_CTRL_11N, BIT(8) | BIT(7) | BIT(6), OptionalAnt); /* Optional RX */
+ rtl8188e_PHY_SetBBReg(adapter, ODM_REG_RX_ANT_CTRL_11N, BIT(5) | BIT(4) | BIT(3), DefaultAnt); /* Default RX */
+ rtl8188e_PHY_SetBBReg(adapter, ODM_REG_RX_ANT_CTRL_11N, BIT(8) | BIT(7) | BIT(6), OptionalAnt); /* Optional RX */
}
}
dm_fat_tbl->RxIdleAnt = Ant;
@@ -267,42 +236,29 @@ static void odm_HWAntDiv(struct odm_dm_struct *dm_odm)
void ODM_AntennaDiversity_88E(struct odm_dm_struct *dm_odm)
{
struct fast_ant_train *dm_fat_tbl = &dm_odm->DM_FatTable;
+ struct adapter *adapter = dm_odm->Adapter;
+
if (!(dm_odm->SupportAbility & ODM_BB_ANT_DIV))
return;
if (!dm_odm->bLinked) {
if (dm_fat_tbl->bBecomeLinked) {
- ODM_SetBBReg(dm_odm, ODM_REG_IGI_A_11N, BIT(7), 0); /* RegC50[7]=1'b1 enable HW AntDiv */
- ODM_SetBBReg(dm_odm, ODM_REG_CCK_ANTDIV_PARA1_11N, BIT(15), 0); /* Enable CCK AntDiv */
+ rtl8188e_PHY_SetBBReg(adapter, ODM_REG_IGI_A_11N, BIT(7), 0); /* RegC50[7]=1'b1 enable HW AntDiv */
+ rtl8188e_PHY_SetBBReg(adapter, ODM_REG_CCK_ANTDIV_PARA1_11N, BIT(15), 0); /* Enable CCK AntDiv */
if (dm_odm->AntDivType == CG_TRX_HW_ANTDIV)
- ODM_SetBBReg(dm_odm, ODM_REG_TX_ANT_CTRL_11N, BIT(21), 0); /* Reg80c[21]=1'b0 from TX Reg */
+ rtl8188e_PHY_SetBBReg(adapter, ODM_REG_TX_ANT_CTRL_11N, BIT(21), 0); /* Reg80c[21]=1'b0 from TX Reg */
dm_fat_tbl->bBecomeLinked = dm_odm->bLinked;
}
return;
} else {
if (!dm_fat_tbl->bBecomeLinked) {
/* Because HW AntDiv is disabled before Link, we enable HW AntDiv after link */
- ODM_SetBBReg(dm_odm, ODM_REG_IGI_A_11N, BIT(7), 1); /* RegC50[7]=1'b1 enable HW AntDiv */
- ODM_SetBBReg(dm_odm, ODM_REG_CCK_ANTDIV_PARA1_11N, BIT(15), 1); /* Enable CCK AntDiv */
+ rtl8188e_PHY_SetBBReg(adapter, ODM_REG_IGI_A_11N, BIT(7), 1); /* RegC50[7]=1'b1 enable HW AntDiv */
+ rtl8188e_PHY_SetBBReg(adapter, ODM_REG_CCK_ANTDIV_PARA1_11N, BIT(15), 1); /* Enable CCK AntDiv */
if (dm_odm->AntDivType == CG_TRX_HW_ANTDIV)
- ODM_SetBBReg(dm_odm, ODM_REG_TX_ANT_CTRL_11N, BIT(21), 1); /* Reg80c[21]=1'b1 from TX Info */
+ rtl8188e_PHY_SetBBReg(adapter, ODM_REG_TX_ANT_CTRL_11N, BIT(21), 1); /* Reg80c[21]=1'b1 from TX Info */
dm_fat_tbl->bBecomeLinked = dm_odm->bLinked;
}
}
if ((dm_odm->AntDivType == CG_TRX_HW_ANTDIV) || (dm_odm->AntDivType == CGCS_RX_HW_ANTDIV))
odm_HWAntDiv(dm_odm);
}
-
-/* 3============================================================ */
-/* 3 Dynamic Primary CCA */
-/* 3============================================================ */
-
-void odm_PrimaryCCA_Init(struct odm_dm_struct *dm_odm)
-{
- struct dyn_primary_cca *PrimaryCCA = &dm_odm->DM_PriCCA;
-
- PrimaryCCA->dup_rts_flag = 0;
- PrimaryCCA->intf_flag = 0;
- PrimaryCCA->intf_type = 0;
- PrimaryCCA->monitor_flag = 0;
- PrimaryCCA->pri_cca_flag = 0;
-}
diff --git a/drivers/staging/r8188eu/hal/odm_RegConfig8188E.c b/drivers/staging/r8188eu/hal/odm_RegConfig8188E.c
index 5f6f0ae5196e..5fb5a88314ed 100644
--- a/drivers/staging/r8188eu/hal/odm_RegConfig8188E.c
+++ b/drivers/staging/r8188eu/hal/odm_RegConfig8188E.c
@@ -1,28 +1,28 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-#include "../include/odm_precomp.h"
+#include "../include/drv_types.h"
-void odm_ConfigRFReg_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr,
- u32 Data, enum rf_radio_path RF_PATH,
- u32 RegAddr)
+static void odm_ConfigRFReg_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr,
+ u32 Data, enum rf_radio_path RF_PATH,
+ u32 RegAddr)
{
- if (Addr == 0xffe) {
- ODM_sleep_ms(50);
+ if (Addr == 0xffe) {
+ msleep(50);
} else if (Addr == 0xfd) {
- ODM_delay_ms(5);
+ mdelay(5);
} else if (Addr == 0xfc) {
- ODM_delay_ms(1);
+ mdelay(1);
} else if (Addr == 0xfb) {
- ODM_delay_us(50);
+ udelay(50);
} else if (Addr == 0xfa) {
- ODM_delay_us(5);
+ udelay(5);
} else if (Addr == 0xf9) {
- ODM_delay_us(1);
+ udelay(1);
} else {
- ODM_SetRFReg(pDM_Odm, RF_PATH, RegAddr, bRFRegOffsetMask, Data);
+ rtl8188e_PHY_SetRFReg(pDM_Odm->Adapter, RF_PATH, RegAddr, bRFRegOffsetMask, Data);
/* Add 1us delay between BB/RF register setting. */
- ODM_delay_us(1);
+ udelay(1);
}
}
@@ -36,31 +36,31 @@ void odm_ConfigRF_RadioA_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr, u32 Data
void odm_ConfigMAC_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr, u8 Data)
{
- ODM_Write1Byte(pDM_Odm, Addr, Data);
+ rtw_write8(pDM_Odm->Adapter, Addr, Data);
}
void odm_ConfigBB_AGC_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr, u32 Bitmask, u32 Data)
{
- ODM_SetBBReg(pDM_Odm, Addr, Bitmask, Data);
+ rtl8188e_PHY_SetBBReg(pDM_Odm->Adapter, Addr, Bitmask, Data);
/* Add 1us delay between BB/RF register setting. */
- ODM_delay_us(1);
+ udelay(1);
}
void odm_ConfigBB_PHY_REG_PG_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr,
u32 Bitmask, u32 Data)
{
if (Addr == 0xfe)
- ODM_sleep_ms(50);
+ msleep(50);
else if (Addr == 0xfd)
- ODM_delay_ms(5);
+ mdelay(5);
else if (Addr == 0xfc)
- ODM_delay_ms(1);
+ mdelay(1);
else if (Addr == 0xfb)
- ODM_delay_us(50);
+ udelay(50);
else if (Addr == 0xfa)
- ODM_delay_us(5);
+ udelay(5);
else if (Addr == 0xf9)
- ODM_delay_us(1);
+ udelay(1);
else
storePwrIndexDiffRateOffset(pDM_Odm->Adapter, Addr, Bitmask, Data);
}
@@ -68,23 +68,23 @@ void odm_ConfigBB_PHY_REG_PG_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr,
void odm_ConfigBB_PHY_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr, u32 Bitmask, u32 Data)
{
if (Addr == 0xfe) {
- ODM_sleep_ms(50);
+ msleep(50);
} else if (Addr == 0xfd) {
- ODM_delay_ms(5);
+ mdelay(5);
} else if (Addr == 0xfc) {
- ODM_delay_ms(1);
+ mdelay(1);
} else if (Addr == 0xfb) {
- ODM_delay_us(50);
+ udelay(50);
} else if (Addr == 0xfa) {
- ODM_delay_us(5);
+ udelay(5);
} else if (Addr == 0xf9) {
- ODM_delay_us(1);
+ udelay(1);
} else {
if (Addr == 0xa24)
pDM_Odm->RFCalibrateInfo.RegA24 = Data;
- ODM_SetBBReg(pDM_Odm, Addr, Bitmask, Data);
+ rtl8188e_PHY_SetBBReg(pDM_Odm->Adapter, Addr, Bitmask, Data);
/* Add 1us delay between BB/RF register setting. */
- ODM_delay_us(1);
+ udelay(1);
}
}
diff --git a/drivers/staging/r8188eu/hal/odm_debug.c b/drivers/staging/r8188eu/hal/odm_debug.c
index 7029ec4f771e..7a134229fe39 100644
--- a/drivers/staging/r8188eu/hal/odm_debug.c
+++ b/drivers/staging/r8188eu/hal/odm_debug.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-#include "../include/odm_precomp.h"
+#include "../include/rtw_debug.h"
u32 GlobalDebugLevel;
diff --git a/drivers/staging/r8188eu/hal/odm_interface.c b/drivers/staging/r8188eu/hal/odm_interface.c
deleted file mode 100644
index 7ddba39a0f4b..000000000000
--- a/drivers/staging/r8188eu/hal/odm_interface.c
+++ /dev/null
@@ -1,93 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#include "../include/odm_precomp.h"
-/* ODM IO Relative API. */
-
-u8 ODM_Read1Byte(struct odm_dm_struct *pDM_Odm, u32 RegAddr)
-{
- struct adapter *Adapter = pDM_Odm->Adapter;
- return rtw_read8(Adapter, RegAddr);
-}
-
-u32 ODM_Read4Byte(struct odm_dm_struct *pDM_Odm, u32 RegAddr)
-{
- struct adapter *Adapter = pDM_Odm->Adapter;
- return rtw_read32(Adapter, RegAddr);
-}
-
-void ODM_Write1Byte(struct odm_dm_struct *pDM_Odm, u32 RegAddr, u8 Data)
-{
- struct adapter *Adapter = pDM_Odm->Adapter;
- rtw_write8(Adapter, RegAddr, Data);
-}
-
-void ODM_Write2Byte(struct odm_dm_struct *pDM_Odm, u32 RegAddr, u16 Data)
-{
- struct adapter *Adapter = pDM_Odm->Adapter;
- rtw_write16(Adapter, RegAddr, Data);
-}
-
-void ODM_Write4Byte(struct odm_dm_struct *pDM_Odm, u32 RegAddr, u32 Data)
-{
- struct adapter *Adapter = pDM_Odm->Adapter;
- rtw_write32(Adapter, RegAddr, Data);
-}
-
-void ODM_SetMACReg(struct odm_dm_struct *pDM_Odm, u32 RegAddr, u32 BitMask, u32 Data)
-{
- struct adapter *Adapter = pDM_Odm->Adapter;
- PHY_SetBBReg(Adapter, RegAddr, BitMask, Data);
-}
-
-u32 ODM_GetMACReg(struct odm_dm_struct *pDM_Odm, u32 RegAddr, u32 BitMask)
-{
- struct adapter *Adapter = pDM_Odm->Adapter;
- return PHY_QueryBBReg(Adapter, RegAddr, BitMask);
-}
-
-void ODM_SetBBReg(struct odm_dm_struct *pDM_Odm, u32 RegAddr, u32 BitMask, u32 Data)
-{
- struct adapter *Adapter = pDM_Odm->Adapter;
- PHY_SetBBReg(Adapter, RegAddr, BitMask, Data);
-}
-
-u32 ODM_GetBBReg(struct odm_dm_struct *pDM_Odm, u32 RegAddr, u32 BitMask)
-{
- struct adapter *Adapter = pDM_Odm->Adapter;
- return PHY_QueryBBReg(Adapter, RegAddr, BitMask);
-}
-
-void ODM_SetRFReg(struct odm_dm_struct *pDM_Odm, enum rf_radio_path eRFPath, u32 RegAddr, u32 BitMask, u32 Data)
-{
- struct adapter *Adapter = pDM_Odm->Adapter;
- PHY_SetRFReg(Adapter, (enum rf_radio_path)eRFPath, RegAddr, BitMask, Data);
-}
-
-u32 ODM_GetRFReg(struct odm_dm_struct *pDM_Odm, enum rf_radio_path eRFPath, u32 RegAddr, u32 BitMask)
-{
- struct adapter *Adapter = pDM_Odm->Adapter;
- return PHY_QueryRFReg(Adapter, (enum rf_radio_path)eRFPath, RegAddr, BitMask);
-}
-
-/* ODM Memory relative API. */
-s32 ODM_CompareMemory(struct odm_dm_struct *pDM_Odm, void *pBuf1, void *pBuf2, u32 length)
-{
- return !memcmp(pBuf1, pBuf2, length);
-}
-
-/* ODM Timer relative API. */
-void ODM_delay_ms(u32 ms)
-{
- mdelay(ms);
-}
-
-void ODM_delay_us(u32 us)
-{
- udelay(us);
-}
-
-void ODM_sleep_ms(u32 ms)
-{
- msleep(ms);
-}
diff --git a/drivers/staging/r8188eu/hal/rtl8188e_cmd.c b/drivers/staging/r8188eu/hal/rtl8188e_cmd.c
index e44bcde92cc3..a491c37777df 100644
--- a/drivers/staging/r8188eu/hal/rtl8188e_cmd.c
+++ b/drivers/staging/r8188eu/hal/rtl8188e_cmd.c
@@ -49,7 +49,7 @@ static s32 FillH2CCmd_88E(struct adapter *adapt, u8 ElementID, u32 CmdLen, u8 *p
u8 h2c_box_num;
u32 msgbox_addr;
u32 msgbox_ex_addr;
- struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
+ struct hal_data_8188e *haldata = &adapt->haldata;
u8 cmd_idx, ext_cmd_len;
u32 h2c_cmd = 0;
u32 h2c_cmd_ex = 0;
@@ -104,7 +104,7 @@ u8 rtl8188e_set_raid_cmd(struct adapter *adapt, u32 mask)
{
u8 buf[3];
u8 res = _SUCCESS;
- struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
+ struct hal_data_8188e *haldata = &adapt->haldata;
if (haldata->fw_ractrl) {
__le32 lmask;
@@ -128,7 +128,7 @@ u8 rtl8188e_set_raid_cmd(struct adapter *adapt, u32 mask)
/* arg[5] = Short GI */
void rtl8188e_Add_RateATid(struct adapter *pAdapter, u32 bitmap, u8 arg, u8 rssi_level)
{
- struct hal_data_8188e *haldata = GET_HAL_DATA(pAdapter);
+ struct hal_data_8188e *haldata = &pAdapter->haldata;
u8 macid, raid, short_gi_rate = false;
@@ -440,7 +440,6 @@ void CheckFwRsvdPageContent(struct adapter *Adapter)
/* 2009.10.15 by tynli. */
static void SetFwRsvdPagePkt(struct adapter *adapt, bool bDLFinished)
{
- struct hal_data_8188e *haldata;
struct xmit_frame *pmgntframe;
struct pkt_attrib *pattrib;
struct xmit_priv *pxmitpriv;
@@ -461,7 +460,6 @@ static void SetFwRsvdPagePkt(struct adapter *adapt, bool bDLFinished)
return;
}
- haldata = GET_HAL_DATA(adapt);
pxmitpriv = &adapt->xmitpriv;
pmlmeext = &adapt->mlmeextpriv;
pmlmeinfo = &pmlmeext->mlmext_info;
@@ -480,7 +478,6 @@ static void SetFwRsvdPagePkt(struct adapter *adapt, bool bDLFinished)
if (PageNeed == 1)
PageNeed += 1;
PageNum += PageNeed;
- haldata->FwRsvdPageStartOffset = PageNum;
BufIndex += PageNeed * 128;
@@ -547,7 +544,7 @@ exit:
void rtl8188e_set_FwJoinBssReport_cmd(struct adapter *adapt, u8 mstatus)
{
- struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
+ struct hal_data_8188e *haldata = &adapt->haldata;
struct mlme_ext_priv *pmlmeext = &adapt->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
bool bSendBeacon = false;
@@ -642,7 +639,7 @@ void rtl8188e_set_FwJoinBssReport_cmd(struct adapter *adapt, u8 mstatus)
void rtl8188e_set_p2p_ps_offload_cmd(struct adapter *adapt, u8 p2p_ps_state)
{
- struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
+ struct hal_data_8188e *haldata = &adapt->haldata;
struct wifidirect_info *pwdinfo = &adapt->wdinfo;
struct P2P_PS_Offload_t *p2p_ps_offload = &haldata->p2p_ps_offload;
u8 i;
diff --git a/drivers/staging/r8188eu/hal/rtl8188e_dm.c b/drivers/staging/r8188eu/hal/rtl8188e_dm.c
index 5d76f6ea91c4..bd6eb3878060 100644
--- a/drivers/staging/r8188eu/hal/rtl8188e_dm.c
+++ b/drivers/staging/r8188eu/hal/rtl8188e_dm.c
@@ -24,7 +24,7 @@ static void dm_InitGPIOSetting(struct adapter *Adapter)
/* */
static void Init_ODM_ComInfo_88E(struct adapter *Adapter)
{
- struct hal_data_8188e *hal_data = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *hal_data = &Adapter->haldata;
struct dm_priv *pdmpriv = &hal_data->dmpriv;
struct odm_dm_struct *dm_odm = &hal_data->odmpriv;
@@ -48,18 +48,14 @@ static void Update_ODM_ComInfo_88E(struct adapter *Adapter)
struct mlme_ext_priv *pmlmeext = &Adapter->mlmeextpriv;
struct mlme_priv *pmlmepriv = &Adapter->mlmepriv;
struct pwrctrl_priv *pwrctrlpriv = &Adapter->pwrctrlpriv;
- struct hal_data_8188e *hal_data = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *hal_data = &Adapter->haldata;
struct odm_dm_struct *dm_odm = &hal_data->odmpriv;
struct dm_priv *pdmpriv = &hal_data->dmpriv;
int i;
- pdmpriv->InitODMFlag = ODM_BB_DIG |
- ODM_BB_RA_MASK |
- ODM_BB_DYNAMIC_TXPWR |
- ODM_BB_FA_CNT |
+ pdmpriv->InitODMFlag = ODM_BB_FA_CNT |
ODM_BB_RSSI_MONITOR |
ODM_BB_CCK_PD |
- ODM_BB_PWR_SAVE |
ODM_MAC_EDCA_TURBO |
ODM_RF_CALIBRATION |
ODM_RF_TX_PWR_TRACK;
@@ -68,14 +64,10 @@ static void Update_ODM_ComInfo_88E(struct adapter *Adapter)
ODM_CmnInfoUpdate(dm_odm, ODM_CMNINFO_ABILITY, pdmpriv->InitODMFlag);
- ODM_CmnInfoHook(dm_odm, ODM_CMNINFO_TX_UNI, &Adapter->xmitpriv.tx_bytes);
- ODM_CmnInfoHook(dm_odm, ODM_CMNINFO_RX_UNI, &Adapter->recvpriv.rx_bytes);
ODM_CmnInfoHook(dm_odm, ODM_CMNINFO_WM_MODE, &pmlmeext->cur_wireless_mode);
ODM_CmnInfoHook(dm_odm, ODM_CMNINFO_SEC_CHNL_OFFSET, &hal_data->nCur40MhzPrimeSC);
- ODM_CmnInfoHook(dm_odm, ODM_CMNINFO_SEC_MODE, &Adapter->securitypriv.dot11PrivacyAlgrthm);
ODM_CmnInfoHook(dm_odm, ODM_CMNINFO_BW, &hal_data->CurrentChannelBW);
ODM_CmnInfoHook(dm_odm, ODM_CMNINFO_CHNL, &hal_data->CurrentChannel);
- ODM_CmnInfoHook(dm_odm, ODM_CMNINFO_NET_CLOSED, &Adapter->net_closed);
ODM_CmnInfoHook(dm_odm, ODM_CMNINFO_SCAN, &pmlmepriv->bScanInProcess);
ODM_CmnInfoHook(dm_odm, ODM_CMNINFO_POWER_SAVING, &pwrctrlpriv->bpower_saving);
ODM_CmnInfoInit(dm_odm, ODM_CMNINFO_RF_ANTENNA_TYPE, hal_data->TRxAntDivType);
@@ -86,19 +78,18 @@ static void Update_ODM_ComInfo_88E(struct adapter *Adapter)
void rtl8188e_InitHalDm(struct adapter *Adapter)
{
- struct hal_data_8188e *hal_data = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *hal_data = &Adapter->haldata;
struct odm_dm_struct *dm_odm = &hal_data->odmpriv;
dm_InitGPIOSetting(Adapter);
Update_ODM_ComInfo_88E(Adapter);
ODM_DMInit(dm_odm);
- Adapter->fix_rate = 0xFF;
}
void rtl8188e_HalDmWatchDog(struct adapter *Adapter)
{
u8 hw_init_completed = Adapter->hw_init_completed;
- struct hal_data_8188e *hal_data = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *hal_data = &Adapter->haldata;
struct mlme_priv *pmlmepriv = &Adapter->mlmepriv;
u8 bLinked = false;
@@ -120,7 +111,7 @@ void rtl8188e_HalDmWatchDog(struct adapter *Adapter)
void rtl8188e_init_dm_priv(struct adapter *Adapter)
{
- struct hal_data_8188e *hal_data = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *hal_data = &Adapter->haldata;
struct dm_priv *pdmpriv = &hal_data->dmpriv;
memset(pdmpriv, 0, sizeof(struct dm_priv));
@@ -131,7 +122,7 @@ void rtl8188e_init_dm_priv(struct adapter *Adapter)
/* Compare RSSI for deciding antenna */
void AntDivCompare8188E(struct adapter *Adapter, struct wlan_bssid_ex *dst, struct wlan_bssid_ex *src)
{
- struct hal_data_8188e *hal_data = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *hal_data = &Adapter->haldata;
if (0 != hal_data->AntDivCfg) {
/* select optimum_antenna for before linked =>For antenna diversity */
@@ -145,7 +136,7 @@ void AntDivCompare8188E(struct adapter *Adapter, struct wlan_bssid_ex *dst, stru
/* Add new function to reset the state of antenna diversity before link. */
u8 AntDivBeforeLink8188E(struct adapter *Adapter)
{
- struct hal_data_8188e *hal_data = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *hal_data = &Adapter->haldata;
struct odm_dm_struct *dm_odm = &hal_data->odmpriv;
struct sw_ant_switch *dm_swat_tbl = &dm_odm->DM_SWAT_Table;
struct mlme_priv *pmlmepriv = &Adapter->mlmepriv;
diff --git a/drivers/staging/r8188eu/hal/rtl8188e_hal_init.c b/drivers/staging/r8188eu/hal/rtl8188e_hal_init.c
index 8c00f2dd67da..b818872e0d19 100644
--- a/drivers/staging/r8188eu/hal/rtl8188e_hal_init.c
+++ b/drivers/staging/r8188eu/hal/rtl8188e_hal_init.c
@@ -508,8 +508,6 @@ static s32 _FWFreeToGo(struct adapter *padapter)
return _FAIL;
}
-#define IS_FW_81xxC(padapter) (((GET_HAL_DATA(padapter))->FirmwareSignature & 0xFFF0) == 0x88C0)
-
static int load_firmware(struct rt_firmware *pFirmware, struct device *device)
{
s32 rtStatus = _SUCCESS;
@@ -541,7 +539,7 @@ static int load_firmware(struct rt_firmware *pFirmware, struct device *device)
memcpy(pFirmware->szFwBuffer, fw->data, fw->size);
pFirmware->ulFwLength = fw->size;
release_firmware(fw);
- DBG_88E_LEVEL(_drv_info_, "+%s: !bUsedWoWLANFw, FmrmwareLen:%d+\n", __func__, pFirmware->ulFwLength);
+ dev_dbg(device, "!bUsedWoWLANFw, FmrmwareLen:%d+\n", pFirmware->ulFwLength);
Exit:
return rtStatus;
@@ -552,7 +550,7 @@ s32 rtl8188e_FirmwareDownload(struct adapter *padapter)
s32 rtStatus = _SUCCESS;
u8 writeFW_retry = 0;
u32 fwdl_start_time;
- struct hal_data_8188e *pHalData = GET_HAL_DATA(padapter);
+ struct hal_data_8188e *pHalData = &padapter->haldata;
struct dvobj_priv *dvobj = adapter_to_dvobj(padapter);
struct device *device = dvobj_to_dev(dvobj);
struct rt_firmware_hdr *pFwHdr = NULL;
@@ -628,7 +626,7 @@ Exit:
void rtl8188e_InitializeFirmwareVars(struct adapter *padapter)
{
- struct hal_data_8188e *pHalData = GET_HAL_DATA(padapter);
+ struct hal_data_8188e *pHalData = &padapter->haldata;
/* Init Fw LPS related. */
padapter->pwrctrlpriv.bFwCurrentInPSMode = false;
@@ -637,12 +635,6 @@ void rtl8188e_InitializeFirmwareVars(struct adapter *padapter)
pHalData->LastHMEBoxNum = 0;
}
-void rtl8188e_free_hal_data(struct adapter *padapter)
-{
- kfree(padapter->HalData);
- padapter->HalData = NULL;
-}
-
/* */
/* Efuse related code */
/* */
@@ -651,31 +643,8 @@ enum{
LDOE25_SHIFT = 28,
};
-static bool
-hal_EfusePgPacketWrite2ByteHeader(
- struct adapter *pAdapter,
- u8 efuseType,
- u16 *pAddr,
- struct pgpkt *pTargetPkt,
- bool bPseudoTest);
-static bool
-hal_EfusePgPacketWrite1ByteHeader(
- struct adapter *pAdapter,
- u8 efuseType,
- u16 *pAddr,
- struct pgpkt *pTargetPkt,
- bool bPseudoTest);
-static bool
-hal_EfusePgPacketWriteData(
- struct adapter *pAdapter,
- u8 efuseType,
- u16 *pAddr,
- struct pgpkt *pTargetPkt,
- bool bPseudoTest);
-
-void rtl8188e_EfusePowerSwitch(struct adapter *pAdapter, u8 bWrite, u8 PwrState)
+void rtl8188e_EfusePowerSwitch(struct adapter *pAdapter, u8 PwrState)
{
- u8 tempval;
u16 tmpV16;
if (PwrState) {
@@ -700,31 +669,15 @@ void rtl8188e_EfusePowerSwitch(struct adapter *pAdapter, u8 bWrite, u8 PwrState)
tmpV16 |= (LOADER_CLK_EN | ANA8M);
rtw_write16(pAdapter, REG_SYS_CLKR, tmpV16);
}
-
- if (bWrite) {
- /* Enable LDO 2.5V before read/write action */
- tempval = rtw_read8(pAdapter, EFUSE_TEST + 3);
- tempval &= 0x0F;
- tempval |= (VOLTAGE_V25 << 4);
- rtw_write8(pAdapter, EFUSE_TEST + 3, (tempval | 0x80));
- }
} else {
rtw_write8(pAdapter, REG_EFUSE_ACCESS, EFUSE_ACCESS_OFF);
-
- if (bWrite) {
- /* Disable LDO 2.5V after read/write action */
- tempval = rtw_read8(pAdapter, EFUSE_TEST + 3);
- rtw_write8(pAdapter, EFUSE_TEST + 3, (tempval & 0x7F));
- }
}
}
static void Hal_EfuseReadEFuse88E(struct adapter *Adapter,
u16 _offset,
u16 _size_byte,
- u8 *pbuf,
- bool bPseudoTest
- )
+ u8 *pbuf)
{
u8 *efuseTbl = NULL;
u8 rtemp8[1];
@@ -764,7 +717,7 @@ static void Hal_EfuseReadEFuse88E(struct adapter *Adapter,
/* 1. Read the first byte to check if efuse is empty!!! */
/* */
/* */
- ReadEFuseByte(Adapter, eFuse_Addr, rtemp8, bPseudoTest);
+ ReadEFuseByte(Adapter, eFuse_Addr, rtemp8);
if (*rtemp8 != 0xFF) {
efuse_utilized++;
eFuse_Addr++;
@@ -781,11 +734,11 @@ static void Hal_EfuseReadEFuse88E(struct adapter *Adapter,
if ((*rtemp8 & 0x1F) == 0x0F) { /* extended header */
u1temp = ((*rtemp8 & 0xE0) >> 5);
- ReadEFuseByte(Adapter, eFuse_Addr, rtemp8, bPseudoTest);
+ ReadEFuseByte(Adapter, eFuse_Addr, rtemp8);
if ((*rtemp8 & 0x0F) == 0x0F) {
eFuse_Addr++;
- ReadEFuseByte(Adapter, eFuse_Addr, rtemp8, bPseudoTest);
+ ReadEFuseByte(Adapter, eFuse_Addr, rtemp8);
if (*rtemp8 != 0xFF && (eFuse_Addr < EFUSE_REAL_CONTENT_LEN_88E))
eFuse_Addr++;
@@ -806,13 +759,13 @@ static void Hal_EfuseReadEFuse88E(struct adapter *Adapter,
for (i = 0; i < EFUSE_MAX_WORD_UNIT; i++) {
/* Check word enable condition in the section */
if (!(wren & 0x01)) {
- ReadEFuseByte(Adapter, eFuse_Addr, rtemp8, bPseudoTest);
+ ReadEFuseByte(Adapter, eFuse_Addr, rtemp8);
eFuse_Addr++;
efuse_utilized++;
eFuseWord[offset][i] = (*rtemp8 & 0xff);
if (eFuse_Addr >= EFUSE_REAL_CONTENT_LEN_88E)
break;
- ReadEFuseByte(Adapter, eFuse_Addr, rtemp8, bPseudoTest);
+ ReadEFuseByte(Adapter, eFuse_Addr, rtemp8);
eFuse_Addr++;
efuse_utilized++;
eFuseWord[offset][i] |= (((u16)*rtemp8 << 8) & 0xff00);
@@ -824,7 +777,7 @@ static void Hal_EfuseReadEFuse88E(struct adapter *Adapter,
}
/* Read next PG header */
- ReadEFuseByte(Adapter, eFuse_Addr, rtemp8, bPseudoTest);
+ ReadEFuseByte(Adapter, eFuse_Addr, rtemp8);
if (*rtemp8 != 0xFF && (eFuse_Addr < EFUSE_REAL_CONTENT_LEN_88E)) {
efuse_utilized++;
@@ -852,804 +805,37 @@ exit:
kfree(eFuseWord);
}
-static void ReadEFuseByIC(struct adapter *Adapter, u8 efuseType, u16 _offset, u16 _size_byte, u8 *pbuf, bool bPseudoTest)
-{
- if (!bPseudoTest) {
- int ret = _FAIL;
- if (rtw_IOL_applied(Adapter)) {
- rtl8188eu_InitPowerOn(Adapter);
-
- iol_mode_enable(Adapter, 1);
- ret = iol_read_efuse(Adapter, 0, _offset, _size_byte, pbuf);
- iol_mode_enable(Adapter, 0);
-
- if (_SUCCESS == ret)
- goto exit;
- }
- }
- Hal_EfuseReadEFuse88E(Adapter, _offset, _size_byte, pbuf, bPseudoTest);
-
-exit:
- return;
-}
-
-static void ReadEFuse_Pseudo(struct adapter *Adapter, u8 efuseType, u16 _offset, u16 _size_byte, u8 *pbuf, bool bPseudoTest)
+static void ReadEFuseByIC(struct adapter *Adapter, u16 _offset, u16 _size_byte, u8 *pbuf)
{
- Hal_EfuseReadEFuse88E(Adapter, _offset, _size_byte, pbuf, bPseudoTest);
-}
-
-void rtl8188e_ReadEFuse(struct adapter *Adapter, u8 efuseType,
- u16 _offset, u16 _size_byte, u8 *pbuf,
- bool bPseudoTest)
-{
- if (bPseudoTest)
- ReadEFuse_Pseudo(Adapter, efuseType, _offset, _size_byte, pbuf, bPseudoTest);
- else
- ReadEFuseByIC(Adapter, efuseType, _offset, _size_byte, pbuf, bPseudoTest);
-}
-
-/* Do not support BT */
-static void Hal_EFUSEGetEfuseDefinition88E(struct adapter *pAdapter, u8 efuseType, u8 type, void *pOut)
-{
- switch (type) {
- case TYPE_EFUSE_MAX_SECTION:
- {
- u8 *pMax_section;
- pMax_section = (u8 *)pOut;
- *pMax_section = EFUSE_MAX_SECTION_88E;
- }
- break;
- case TYPE_EFUSE_REAL_CONTENT_LEN:
- {
- u16 *pu2Tmp;
- pu2Tmp = (u16 *)pOut;
- *pu2Tmp = EFUSE_REAL_CONTENT_LEN_88E;
- }
- break;
- case TYPE_EFUSE_CONTENT_LEN_BANK:
- {
- u16 *pu2Tmp;
- pu2Tmp = (u16 *)pOut;
- *pu2Tmp = EFUSE_REAL_CONTENT_LEN_88E;
- }
- break;
- case TYPE_AVAILABLE_EFUSE_BYTES_BANK:
- {
- u16 *pu2Tmp;
- pu2Tmp = (u16 *)pOut;
- *pu2Tmp = (u16)(EFUSE_REAL_CONTENT_LEN_88E - EFUSE_OOB_PROTECT_BYTES_88E);
- }
- break;
- case TYPE_AVAILABLE_EFUSE_BYTES_TOTAL:
- {
- u16 *pu2Tmp;
- pu2Tmp = (u16 *)pOut;
- *pu2Tmp = (u16)(EFUSE_REAL_CONTENT_LEN_88E - EFUSE_OOB_PROTECT_BYTES_88E);
- }
- break;
- case TYPE_EFUSE_MAP_LEN:
- {
- u16 *pu2Tmp;
- pu2Tmp = (u16 *)pOut;
- *pu2Tmp = (u16)EFUSE_MAP_LEN_88E;
- }
- break;
- case TYPE_EFUSE_PROTECT_BYTES_BANK:
- {
- u8 *pu1Tmp;
- pu1Tmp = (u8 *)pOut;
- *pu1Tmp = (u8)(EFUSE_OOB_PROTECT_BYTES_88E);
- }
- break;
- default:
- {
- u8 *pu1Tmp;
- pu1Tmp = (u8 *)pOut;
- *pu1Tmp = 0;
- }
- break;
- }
-}
-
-static void Hal_EFUSEGetEfuseDefinition_Pseudo88E(struct adapter *pAdapter, u8 efuseType, u8 type, void *pOut)
-{
- switch (type) {
- case TYPE_EFUSE_MAX_SECTION:
- {
- u8 *pMax_section;
- pMax_section = (u8 *)pOut;
- *pMax_section = EFUSE_MAX_SECTION_88E;
- }
- break;
- case TYPE_EFUSE_REAL_CONTENT_LEN:
- {
- u16 *pu2Tmp;
- pu2Tmp = (u16 *)pOut;
- *pu2Tmp = EFUSE_REAL_CONTENT_LEN_88E;
- }
- break;
- case TYPE_EFUSE_CONTENT_LEN_BANK:
- {
- u16 *pu2Tmp;
- pu2Tmp = (u16 *)pOut;
- *pu2Tmp = EFUSE_REAL_CONTENT_LEN_88E;
- }
- break;
- case TYPE_AVAILABLE_EFUSE_BYTES_BANK:
- {
- u16 *pu2Tmp;
- pu2Tmp = (u16 *)pOut;
- *pu2Tmp = (u16)(EFUSE_REAL_CONTENT_LEN_88E - EFUSE_OOB_PROTECT_BYTES_88E);
- }
- break;
- case TYPE_AVAILABLE_EFUSE_BYTES_TOTAL:
- {
- u16 *pu2Tmp;
- pu2Tmp = (u16 *)pOut;
- *pu2Tmp = (u16)(EFUSE_REAL_CONTENT_LEN_88E - EFUSE_OOB_PROTECT_BYTES_88E);
- }
- break;
- case TYPE_EFUSE_MAP_LEN:
- {
- u16 *pu2Tmp;
- pu2Tmp = (u16 *)pOut;
- *pu2Tmp = (u16)EFUSE_MAP_LEN_88E;
- }
- break;
- case TYPE_EFUSE_PROTECT_BYTES_BANK:
- {
- u8 *pu1Tmp;
- pu1Tmp = (u8 *)pOut;
- *pu1Tmp = (u8)(EFUSE_OOB_PROTECT_BYTES_88E);
- }
- break;
- default:
- {
- u8 *pu1Tmp;
- pu1Tmp = (u8 *)pOut;
- *pu1Tmp = 0;
- }
- break;
- }
-}
-
-void rtl8188e_EFUSE_GetEfuseDefinition(struct adapter *pAdapter, u8 efuseType, u8 type, void *pOut, bool bPseudoTest)
-{
- if (bPseudoTest)
- Hal_EFUSEGetEfuseDefinition_Pseudo88E(pAdapter, efuseType, type, pOut);
- else
- Hal_EFUSEGetEfuseDefinition88E(pAdapter, efuseType, type, pOut);
-}
-
-static u8 Hal_EfuseWordEnableDataWrite(struct adapter *pAdapter, u16 efuse_addr, u8 word_en, u8 *data, bool bPseudoTest)
-{
- u16 tmpaddr = 0;
- u16 start_addr = efuse_addr;
- u8 badworden = 0x0F;
- u8 tmpdata[8];
-
- memset((void *)tmpdata, 0xff, PGPKT_DATA_SIZE);
-
- if (!(word_en & BIT(0))) {
- tmpaddr = start_addr;
- efuse_OneByteWrite(pAdapter, start_addr++, data[0], bPseudoTest);
- efuse_OneByteWrite(pAdapter, start_addr++, data[1], bPseudoTest);
-
- efuse_OneByteRead(pAdapter, tmpaddr, &tmpdata[0], bPseudoTest);
- efuse_OneByteRead(pAdapter, tmpaddr + 1, &tmpdata[1], bPseudoTest);
- if ((data[0] != tmpdata[0]) || (data[1] != tmpdata[1]))
- badworden &= (~BIT(0));
- }
- if (!(word_en & BIT(1))) {
- tmpaddr = start_addr;
- efuse_OneByteWrite(pAdapter, start_addr++, data[2], bPseudoTest);
- efuse_OneByteWrite(pAdapter, start_addr++, data[3], bPseudoTest);
-
- efuse_OneByteRead(pAdapter, tmpaddr, &tmpdata[2], bPseudoTest);
- efuse_OneByteRead(pAdapter, tmpaddr + 1, &tmpdata[3], bPseudoTest);
- if ((data[2] != tmpdata[2]) || (data[3] != tmpdata[3]))
- badworden &= (~BIT(1));
- }
- if (!(word_en & BIT(2))) {
- tmpaddr = start_addr;
- efuse_OneByteWrite(pAdapter, start_addr++, data[4], bPseudoTest);
- efuse_OneByteWrite(pAdapter, start_addr++, data[5], bPseudoTest);
-
- efuse_OneByteRead(pAdapter, tmpaddr, &tmpdata[4], bPseudoTest);
- efuse_OneByteRead(pAdapter, tmpaddr + 1, &tmpdata[5], bPseudoTest);
- if ((data[4] != tmpdata[4]) || (data[5] != tmpdata[5]))
- badworden &= (~BIT(2));
- }
- if (!(word_en & BIT(3))) {
- tmpaddr = start_addr;
- efuse_OneByteWrite(pAdapter, start_addr++, data[6], bPseudoTest);
- efuse_OneByteWrite(pAdapter, start_addr++, data[7], bPseudoTest);
-
- efuse_OneByteRead(pAdapter, tmpaddr, &tmpdata[6], bPseudoTest);
- efuse_OneByteRead(pAdapter, tmpaddr + 1, &tmpdata[7], bPseudoTest);
- if ((data[6] != tmpdata[6]) || (data[7] != tmpdata[7]))
- badworden &= (~BIT(3));
- }
- return badworden;
-}
-
-static u8 Hal_EfuseWordEnableDataWrite_Pseudo(struct adapter *pAdapter, u16 efuse_addr, u8 word_en, u8 *data, bool bPseudoTest)
-{
- u8 ret;
-
- ret = Hal_EfuseWordEnableDataWrite(pAdapter, efuse_addr, word_en, data, bPseudoTest);
- return ret;
-}
-
-static u8 rtl8188e_Efuse_WordEnableDataWrite(struct adapter *pAdapter, u16 efuse_addr, u8 word_en, u8 *data, bool bPseudoTest)
-{
- u8 ret = 0;
-
- if (bPseudoTest)
- ret = Hal_EfuseWordEnableDataWrite_Pseudo(pAdapter, efuse_addr, word_en, data, bPseudoTest);
- else
- ret = Hal_EfuseWordEnableDataWrite(pAdapter, efuse_addr, word_en, data, bPseudoTest);
- return ret;
-}
-
-static u16 hal_EfuseGetCurrentSize_8188e(struct adapter *pAdapter, bool bPseudoTest)
-{
- int bContinual = true;
- u16 efuse_addr = 0;
- u8 hworden = 0;
- u8 efuse_data, word_cnts = 0;
-
- if (bPseudoTest)
- efuse_addr = (u16)(fakeEfuseUsedBytes);
- else
- GetHwReg8188EU(pAdapter, HW_VAR_EFUSE_BYTES, (u8 *)&efuse_addr);
-
- while (bContinual &&
- efuse_OneByteRead(pAdapter, efuse_addr, &efuse_data, bPseudoTest) &&
- AVAILABLE_EFUSE_ADDR(efuse_addr)) {
- if (efuse_data != 0xFF) {
- if ((efuse_data & 0x1F) == 0x0F) { /* extended header */
- efuse_addr++;
- efuse_OneByteRead(pAdapter, efuse_addr, &efuse_data, bPseudoTest);
- if ((efuse_data & 0x0F) == 0x0F) {
- efuse_addr++;
- continue;
- } else {
- hworden = efuse_data & 0x0F;
- }
- } else {
- hworden = efuse_data & 0x0F;
- }
- word_cnts = Efuse_CalculateWordCnts(hworden);
- /* read next header */
- efuse_addr = efuse_addr + (word_cnts * 2) + 1;
- } else {
- bContinual = false;
- }
- }
-
- if (bPseudoTest)
- fakeEfuseUsedBytes = efuse_addr;
- else
- SetHwReg8188EU(pAdapter, HW_VAR_EFUSE_BYTES, (u8 *)&efuse_addr);
-
- return efuse_addr;
-}
-
-static u16 Hal_EfuseGetCurrentSize_Pseudo(struct adapter *pAdapter, bool bPseudoTest)
-{
- u16 ret = 0;
-
- ret = hal_EfuseGetCurrentSize_8188e(pAdapter, bPseudoTest);
- return ret;
-}
-
-u16 rtl8188e_EfuseGetCurrentSize(struct adapter *pAdapter, u8 efuseType, bool bPseudoTest)
-{
- u16 ret = 0;
-
- if (bPseudoTest)
- ret = Hal_EfuseGetCurrentSize_Pseudo(pAdapter, bPseudoTest);
- else
- ret = hal_EfuseGetCurrentSize_8188e(pAdapter, bPseudoTest);
- return ret;
-}
-
-static int hal_EfusePgPacketRead_8188e(struct adapter *pAdapter, u8 offset, u8 *data, bool bPseudoTest)
-{
- u8 ReadState = PG_STATE_HEADER;
- int bContinual = true;
- int bDataEmpty = true;
- u8 efuse_data, word_cnts = 0;
- u16 efuse_addr = 0;
- u8 hoffset = 0, hworden = 0;
- u8 tmpidx = 0;
- u8 tmpdata[8];
- u8 max_section = 0;
- u8 tmp_header = 0;
-
- rtl8188e_EFUSE_GetEfuseDefinition(pAdapter, EFUSE_WIFI, TYPE_EFUSE_MAX_SECTION, (void *)&max_section, bPseudoTest);
-
- if (!data)
- return false;
- if (offset > max_section)
- return false;
-
- memset((void *)data, 0xff, sizeof(u8) * PGPKT_DATA_SIZE);
- memset((void *)tmpdata, 0xff, sizeof(u8) * PGPKT_DATA_SIZE);
-
- /* <Roger_TODO> Efuse has been pre-programmed dummy 5Bytes at the end of Efuse by CP. */
- /* Skip dummy parts to prevent unexpected data read from Efuse. */
- /* By pass right now. 2009.02.19. */
- while (bContinual && AVAILABLE_EFUSE_ADDR(efuse_addr)) {
- /* Header Read ------------- */
- if (ReadState & PG_STATE_HEADER) {
- if (efuse_OneByteRead(pAdapter, efuse_addr, &efuse_data, bPseudoTest) && (efuse_data != 0xFF)) {
- if (EXT_HEADER(efuse_data)) {
- tmp_header = efuse_data;
- efuse_addr++;
- efuse_OneByteRead(pAdapter, efuse_addr, &efuse_data, bPseudoTest);
- if (!ALL_WORDS_DISABLED(efuse_data)) {
- hoffset = ((tmp_header & 0xE0) >> 5) | ((efuse_data & 0xF0) >> 1);
- hworden = efuse_data & 0x0F;
- } else {
- DBG_88E("Error, All words disabled\n");
- efuse_addr++;
- continue;
- }
- } else {
- hoffset = (efuse_data >> 4) & 0x0F;
- hworden = efuse_data & 0x0F;
- }
- word_cnts = Efuse_CalculateWordCnts(hworden);
- bDataEmpty = true;
-
- if (hoffset == offset) {
- for (tmpidx = 0; tmpidx < word_cnts * 2; tmpidx++) {
- if (efuse_OneByteRead(pAdapter, efuse_addr + 1 + tmpidx, &efuse_data, bPseudoTest)) {
- tmpdata[tmpidx] = efuse_data;
- if (efuse_data != 0xff)
- bDataEmpty = false;
- }
- }
- if (!bDataEmpty) {
- ReadState = PG_STATE_DATA;
- } else {/* read next header */
- efuse_addr = efuse_addr + (word_cnts * 2) + 1;
- ReadState = PG_STATE_HEADER;
- }
- } else {/* read next header */
- efuse_addr = efuse_addr + (word_cnts * 2) + 1;
- ReadState = PG_STATE_HEADER;
- }
- } else {
- bContinual = false;
- }
- } else if (ReadState & PG_STATE_DATA) {
- /* Data section Read ------------- */
- efuse_WordEnableDataRead(hworden, tmpdata, data);
- efuse_addr = efuse_addr + (word_cnts * 2) + 1;
- ReadState = PG_STATE_HEADER;
- }
-
- }
-
- if ((data[0] == 0xff) && (data[1] == 0xff) && (data[2] == 0xff) && (data[3] == 0xff) &&
- (data[4] == 0xff) && (data[5] == 0xff) && (data[6] == 0xff) && (data[7] == 0xff))
- return false;
- else
- return true;
-}
-
-static int Hal_EfusePgPacketRead(struct adapter *pAdapter, u8 offset, u8 *data, bool bPseudoTest)
-{
- int ret;
-
- ret = hal_EfusePgPacketRead_8188e(pAdapter, offset, data, bPseudoTest);
- return ret;
-}
-
-static int Hal_EfusePgPacketRead_Pseudo(struct adapter *pAdapter, u8 offset, u8 *data, bool bPseudoTest)
-{
- int ret;
-
- ret = hal_EfusePgPacketRead_8188e(pAdapter, offset, data, bPseudoTest);
- return ret;
-}
-
-int rtl8188e_Efuse_PgPacketRead(struct adapter *pAdapter, u8 offset, u8 *data, bool bPseudoTest)
-{
- int ret;
-
- if (bPseudoTest)
- ret = Hal_EfusePgPacketRead_Pseudo(pAdapter, offset, data, bPseudoTest);
- else
- ret = Hal_EfusePgPacketRead(pAdapter, offset, data, bPseudoTest);
- return ret;
-}
-
-static bool hal_EfuseFixHeaderProcess(struct adapter *pAdapter, u8 efuseType, struct pgpkt *pFixPkt, u16 *pAddr, bool bPseudoTest)
-{
- u8 originaldata[8], badworden = 0;
- u16 efuse_addr = *pAddr;
- u32 PgWriteSuccess = 0;
-
- memset((void *)originaldata, 0xff, 8);
-
- if (rtl8188e_Efuse_PgPacketRead(pAdapter, pFixPkt->offset, originaldata, bPseudoTest)) {
- /* check if data exist */
- badworden = rtl8188e_Efuse_WordEnableDataWrite(pAdapter, efuse_addr + 1, pFixPkt->word_en, originaldata, bPseudoTest);
-
- if (badworden != 0xf) { /* write fail */
- PgWriteSuccess = rtl8188e_Efuse_PgPacketWrite(pAdapter, pFixPkt->offset, badworden, originaldata, bPseudoTest);
-
- if (!PgWriteSuccess)
- return false;
- else
- efuse_addr = rtl8188e_EfuseGetCurrentSize(pAdapter, efuseType, bPseudoTest);
- } else {
- efuse_addr = efuse_addr + (pFixPkt->word_cnts * 2) + 1;
- }
- } else {
- efuse_addr = efuse_addr + (pFixPkt->word_cnts * 2) + 1;
- }
- *pAddr = efuse_addr;
- return true;
-}
-
-static bool hal_EfusePgPacketWrite2ByteHeader(struct adapter *pAdapter, u8 efuseType, u16 *pAddr, struct pgpkt *pTargetPkt, bool bPseudoTest)
-{
- bool bRet = false;
- u16 efuse_addr = *pAddr, efuse_max_available_len = 0;
- u8 pg_header = 0, tmp_header = 0, pg_header_temp = 0;
- u8 repeatcnt = 0;
-
- rtl8188e_EFUSE_GetEfuseDefinition(pAdapter, efuseType, TYPE_AVAILABLE_EFUSE_BYTES_BANK, (void *)&efuse_max_available_len, bPseudoTest);
-
- while (efuse_addr < efuse_max_available_len) {
- pg_header = ((pTargetPkt->offset & 0x07) << 5) | 0x0F;
- efuse_OneByteWrite(pAdapter, efuse_addr, pg_header, bPseudoTest);
- efuse_OneByteRead(pAdapter, efuse_addr, &tmp_header, bPseudoTest);
-
- while (tmp_header == 0xFF) {
- if (repeatcnt++ > EFUSE_REPEAT_THRESHOLD_)
- return false;
-
- efuse_OneByteWrite(pAdapter, efuse_addr, pg_header, bPseudoTest);
- efuse_OneByteRead(pAdapter, efuse_addr, &tmp_header, bPseudoTest);
- }
-
- /* to write ext_header */
- if (tmp_header == pg_header) {
- efuse_addr++;
- pg_header_temp = pg_header;
- pg_header = ((pTargetPkt->offset & 0x78) << 1) | pTargetPkt->word_en;
-
- efuse_OneByteWrite(pAdapter, efuse_addr, pg_header, bPseudoTest);
- efuse_OneByteRead(pAdapter, efuse_addr, &tmp_header, bPseudoTest);
-
- while (tmp_header == 0xFF) {
- if (repeatcnt++ > EFUSE_REPEAT_THRESHOLD_)
- return false;
-
- efuse_OneByteWrite(pAdapter, efuse_addr, pg_header, bPseudoTest);
- efuse_OneByteRead(pAdapter, efuse_addr, &tmp_header, bPseudoTest);
- }
-
- if ((tmp_header & 0x0F) == 0x0F) { /* word_en PG fail */
- if (repeatcnt++ > EFUSE_REPEAT_THRESHOLD_) {
- return false;
- } else {
- efuse_addr++;
- continue;
- }
- } else if (pg_header != tmp_header) { /* offset PG fail */
- struct pgpkt fixPkt;
- fixPkt.offset = ((pg_header_temp & 0xE0) >> 5) | ((tmp_header & 0xF0) >> 1);
- fixPkt.word_en = tmp_header & 0x0F;
- fixPkt.word_cnts = Efuse_CalculateWordCnts(fixPkt.word_en);
- if (!hal_EfuseFixHeaderProcess(pAdapter, efuseType, &fixPkt, &efuse_addr, bPseudoTest))
- return false;
- } else {
- bRet = true;
- break;
- }
- } else if ((tmp_header & 0x1F) == 0x0F) { /* wrong extended header */
- efuse_addr += 2;
- continue;
- }
- }
-
- *pAddr = efuse_addr;
- return bRet;
-}
-
-static bool hal_EfusePgPacketWrite1ByteHeader(struct adapter *pAdapter, u8 efuseType, u16 *pAddr, struct pgpkt *pTargetPkt, bool bPseudoTest)
-{
- bool bRet = false;
- u8 pg_header = 0, tmp_header = 0;
- u16 efuse_addr = *pAddr;
- u8 repeatcnt = 0;
-
- pg_header = ((pTargetPkt->offset << 4) & 0xf0) | pTargetPkt->word_en;
-
- efuse_OneByteWrite(pAdapter, efuse_addr, pg_header, bPseudoTest);
- efuse_OneByteRead(pAdapter, efuse_addr, &tmp_header, bPseudoTest);
-
- while (tmp_header == 0xFF) {
- if (repeatcnt++ > EFUSE_REPEAT_THRESHOLD_)
- return false;
- efuse_OneByteWrite(pAdapter, efuse_addr, pg_header, bPseudoTest);
- efuse_OneByteRead(pAdapter, efuse_addr, &tmp_header, bPseudoTest);
- }
-
- if (pg_header == tmp_header) {
- bRet = true;
- } else {
- struct pgpkt fixPkt;
- fixPkt.offset = (tmp_header >> 4) & 0x0F;
- fixPkt.word_en = tmp_header & 0x0F;
- fixPkt.word_cnts = Efuse_CalculateWordCnts(fixPkt.word_en);
- if (!hal_EfuseFixHeaderProcess(pAdapter, efuseType, &fixPkt, &efuse_addr, bPseudoTest))
- return false;
- }
-
- *pAddr = efuse_addr;
- return bRet;
-}
-
-static bool hal_EfusePgPacketWriteData(struct adapter *pAdapter, u8 efuseType, u16 *pAddr, struct pgpkt *pTargetPkt, bool bPseudoTest)
-{
- u16 efuse_addr = *pAddr;
- u8 badworden;
- u32 PgWriteSuccess = 0;
-
- badworden = rtl8188e_Efuse_WordEnableDataWrite(pAdapter, efuse_addr + 1, pTargetPkt->word_en, pTargetPkt->data, bPseudoTest);
- if (badworden == 0x0F) {
- /* write ok */
- return true;
- } else {
- /* reorganize other pg packet */
- PgWriteSuccess = rtl8188e_Efuse_PgPacketWrite(pAdapter, pTargetPkt->offset, badworden, pTargetPkt->data, bPseudoTest);
- if (!PgWriteSuccess)
- return false;
- else
- return true;
- }
-}
-
-static bool
-hal_EfusePgPacketWriteHeader(
- struct adapter *pAdapter,
- u8 efuseType,
- u16 *pAddr,
- struct pgpkt *pTargetPkt,
- bool bPseudoTest)
-{
- bool bRet = false;
-
- if (pTargetPkt->offset >= EFUSE_MAX_SECTION_BASE)
- bRet = hal_EfusePgPacketWrite2ByteHeader(pAdapter, efuseType, pAddr, pTargetPkt, bPseudoTest);
- else
- bRet = hal_EfusePgPacketWrite1ByteHeader(pAdapter, efuseType, pAddr, pTargetPkt, bPseudoTest);
-
- return bRet;
-}
-
-static bool wordEnMatched(struct pgpkt *pTargetPkt, struct pgpkt *pCurPkt,
- u8 *pWden)
-{
- u8 match_word_en = 0x0F; /* default all words are disabled */
-
- /* check if the same words are enabled both target and current PG packet */
- if (((pTargetPkt->word_en & BIT(0)) == 0) &&
- ((pCurPkt->word_en & BIT(0)) == 0))
- match_word_en &= ~BIT(0); /* enable word 0 */
- if (((pTargetPkt->word_en & BIT(1)) == 0) &&
- ((pCurPkt->word_en & BIT(1)) == 0))
- match_word_en &= ~BIT(1); /* enable word 1 */
- if (((pTargetPkt->word_en & BIT(2)) == 0) &&
- ((pCurPkt->word_en & BIT(2)) == 0))
- match_word_en &= ~BIT(2); /* enable word 2 */
- if (((pTargetPkt->word_en & BIT(3)) == 0) &&
- ((pCurPkt->word_en & BIT(3)) == 0))
- match_word_en &= ~BIT(3); /* enable word 3 */
-
- *pWden = match_word_en;
-
- if (match_word_en != 0xf)
- return true;
- else
- return false;
-}
-
-static bool hal_EfuseCheckIfDatafollowed(struct adapter *pAdapter, u8 word_cnts, u16 startAddr, bool bPseudoTest)
-{
- bool bRet = false;
- u8 i, efuse_data;
-
- for (i = 0; i < (word_cnts * 2); i++) {
- if (efuse_OneByteRead(pAdapter, (startAddr + i), &efuse_data, bPseudoTest) && (efuse_data != 0xFF))
- bRet = true;
- }
- return bRet;
-}
-
-static bool hal_EfusePartialWriteCheck(struct adapter *pAdapter, u8 efuseType, u16 *pAddr, struct pgpkt *pTargetPkt, bool bPseudoTest)
-{
- bool bRet = false;
- u8 i, efuse_data = 0, cur_header = 0;
- u8 matched_wden = 0, badworden = 0;
- u16 startAddr = 0, efuse_max_available_len = 0, efuse_max = 0;
- struct pgpkt curPkt;
-
- rtl8188e_EFUSE_GetEfuseDefinition(pAdapter, efuseType, TYPE_AVAILABLE_EFUSE_BYTES_BANK, (void *)&efuse_max_available_len, bPseudoTest);
- rtl8188e_EFUSE_GetEfuseDefinition(pAdapter, efuseType, TYPE_EFUSE_REAL_CONTENT_LEN, (void *)&efuse_max, bPseudoTest);
-
- if (efuseType == EFUSE_WIFI) {
- if (bPseudoTest) {
- startAddr = (u16)(fakeEfuseUsedBytes % EFUSE_REAL_CONTENT_LEN);
- } else {
- GetHwReg8188EU(pAdapter, HW_VAR_EFUSE_BYTES, (u8 *)&startAddr);
- startAddr %= EFUSE_REAL_CONTENT_LEN;
- }
- } else {
- if (bPseudoTest)
- startAddr = (u16)(fakeBTEfuseUsedBytes % EFUSE_REAL_CONTENT_LEN);
- else
- startAddr = (u16)(BTEfuseUsedBytes % EFUSE_REAL_CONTENT_LEN);
- }
+ int ret = _FAIL;
+ if (rtw_IOL_applied(Adapter)) {
+ rtl8188eu_InitPowerOn(Adapter);
- while (1) {
- if (startAddr >= efuse_max_available_len) {
- bRet = false;
- break;
- }
+ iol_mode_enable(Adapter, 1);
+ ret = iol_read_efuse(Adapter, 0, _offset, _size_byte, pbuf);
+ iol_mode_enable(Adapter, 0);
- if (efuse_OneByteRead(pAdapter, startAddr, &efuse_data, bPseudoTest) && (efuse_data != 0xFF)) {
- if (EXT_HEADER(efuse_data)) {
- cur_header = efuse_data;
- startAddr++;
- efuse_OneByteRead(pAdapter, startAddr, &efuse_data, bPseudoTest);
- if (ALL_WORDS_DISABLED(efuse_data)) {
- bRet = false;
- break;
- } else {
- curPkt.offset = ((cur_header & 0xE0) >> 5) | ((efuse_data & 0xF0) >> 1);
- curPkt.word_en = efuse_data & 0x0F;
- }
- } else {
- cur_header = efuse_data;
- curPkt.offset = (cur_header >> 4) & 0x0F;
- curPkt.word_en = cur_header & 0x0F;
- }
-
- curPkt.word_cnts = Efuse_CalculateWordCnts(curPkt.word_en);
- /* if same header is found but no data followed */
- /* write some part of data followed by the header. */
- if ((curPkt.offset == pTargetPkt->offset) &&
- (!hal_EfuseCheckIfDatafollowed(pAdapter, curPkt.word_cnts, startAddr + 1, bPseudoTest)) &&
- wordEnMatched(pTargetPkt, &curPkt, &matched_wden)) {
- /* Here to write partial data */
- badworden = rtl8188e_Efuse_WordEnableDataWrite(pAdapter, startAddr + 1, matched_wden, pTargetPkt->data, bPseudoTest);
- if (badworden != 0x0F) {
- u32 PgWriteSuccess = 0;
- /* if write fail on some words, write these bad words again */
-
- PgWriteSuccess = rtl8188e_Efuse_PgPacketWrite(pAdapter, pTargetPkt->offset, badworden, pTargetPkt->data, bPseudoTest);
-
- if (!PgWriteSuccess) {
- bRet = false; /* write fail, return */
- break;
- }
- }
- /* partial write ok, update the target packet for later use */
- for (i = 0; i < 4; i++) {
- if ((matched_wden & (0x1 << i)) == 0) /* this word has been written */
- pTargetPkt->word_en |= (0x1 << i); /* disable the word */
- }
- pTargetPkt->word_cnts = Efuse_CalculateWordCnts(pTargetPkt->word_en);
- }
- /* read from next header */
- startAddr = startAddr + (curPkt.word_cnts * 2) + 1;
- } else {
- /* not used header, 0xff */
- *pAddr = startAddr;
- bRet = true;
- break;
- }
+ if (_SUCCESS == ret)
+ return;
}
- return bRet;
-}
-
-static bool
-hal_EfusePgCheckAvailableAddr(
- struct adapter *pAdapter,
- u8 efuseType,
- bool bPseudoTest
- )
-{
- u16 efuse_max_available_len = 0;
- /* Change to check TYPE_EFUSE_MAP_LEN , because 8188E raw 256, logic map over 256. */
- rtl8188e_EFUSE_GetEfuseDefinition(pAdapter, EFUSE_WIFI, TYPE_EFUSE_MAP_LEN, (void *)&efuse_max_available_len, false);
-
- if (rtl8188e_EfuseGetCurrentSize(pAdapter, efuseType, bPseudoTest) >= efuse_max_available_len)
- return false;
- return true;
-}
-
-static void hal_EfuseConstructPGPkt(u8 offset, u8 word_en, u8 *pData, struct pgpkt *pTargetPkt)
-{
- memset((void *)pTargetPkt->data, 0xFF, sizeof(u8) * 8);
- pTargetPkt->offset = offset;
- pTargetPkt->word_en = word_en;
- efuse_WordEnableDataRead(word_en, pData, pTargetPkt->data);
- pTargetPkt->word_cnts = Efuse_CalculateWordCnts(pTargetPkt->word_en);
+ Hal_EfuseReadEFuse88E(Adapter, _offset, _size_byte, pbuf);
}
-static bool hal_EfusePgPacketWrite_8188e(struct adapter *pAdapter, u8 offset, u8 word_en, u8 *pData, bool bPseudoTest)
+void rtl8188e_ReadEFuse(struct adapter *Adapter, u16 _offset, u16 _size_byte, u8 *pbuf)
{
- struct pgpkt targetPkt;
- u16 startAddr = 0;
- u8 efuseType = EFUSE_WIFI;
-
- if (!hal_EfusePgCheckAvailableAddr(pAdapter, efuseType, bPseudoTest))
- return false;
-
- hal_EfuseConstructPGPkt(offset, word_en, pData, &targetPkt);
-
- if (!hal_EfusePartialWriteCheck(pAdapter, efuseType, &startAddr, &targetPkt, bPseudoTest))
- return false;
-
- if (!hal_EfusePgPacketWriteHeader(pAdapter, efuseType, &startAddr, &targetPkt, bPseudoTest))
- return false;
-
- if (!hal_EfusePgPacketWriteData(pAdapter, efuseType, &startAddr, &targetPkt, bPseudoTest))
- return false;
-
- return true;
-}
-
-static int Hal_EfusePgPacketWrite_Pseudo(struct adapter *pAdapter, u8 offset, u8 word_en, u8 *data, bool bPseudoTest)
-{
- int ret;
-
- ret = hal_EfusePgPacketWrite_8188e(pAdapter, offset, word_en, data, bPseudoTest);
- return ret;
-}
-
-static int Hal_EfusePgPacketWrite(struct adapter *pAdapter, u8 offset, u8 word_en, u8 *data, bool bPseudoTest)
-{
- int ret = 0;
- ret = hal_EfusePgPacketWrite_8188e(pAdapter, offset, word_en, data, bPseudoTest);
-
- return ret;
-}
-
-int rtl8188e_Efuse_PgPacketWrite(struct adapter *pAdapter, u8 offset, u8 word_en, u8 *data, bool bPseudoTest)
-{
- int ret;
-
- if (bPseudoTest)
- ret = Hal_EfusePgPacketWrite_Pseudo(pAdapter, offset, word_en, data, bPseudoTest);
- else
- ret = Hal_EfusePgPacketWrite(pAdapter, offset, word_en, data, bPseudoTest);
- return ret;
+ ReadEFuseByIC(Adapter, _offset, _size_byte, pbuf);
}
void rtl8188e_read_chip_version(struct adapter *padapter)
{
u32 value32;
struct HAL_VERSION ChipVersion;
- struct hal_data_8188e *pHalData;
-
- pHalData = GET_HAL_DATA(padapter);
+ struct hal_data_8188e *pHalData = &padapter->haldata;
value32 = rtw_read32(padapter, REG_SYS_CFG);
ChipVersion.ChipType = ((value32 & RTL_ID) ? TEST_CHIP : NORMAL_CHIP);
- ChipVersion.RFType = RF_TYPE_1T1R;
ChipVersion.VendorType = ((value32 & VENDOR_ID) ? CHIP_VENDOR_UMC : CHIP_VENDOR_TSMC);
ChipVersion.CUTVersion = (value32 & CHIP_VER_RTL_MASK) >> CHIP_VER_RTL_SHIFT; /* IC version (CUT) */
ChipVersion.ROMVer = 0; /* ROM code version. */
@@ -1657,39 +843,21 @@ void rtl8188e_read_chip_version(struct adapter *padapter)
dump_chip_info(ChipVersion);
pHalData->VersionID = ChipVersion;
-
- pHalData->rf_type = RF_1T1R;
-
- MSG_88E("RF_Type is %x!!\n", pHalData->rf_type);
}
-void rtl8188e_SetHalODMVar(struct adapter *Adapter, enum hal_odm_variable eVariable, void *pValue1, bool bSet)
+void rtl8188e_SetHalODMVar(struct adapter *Adapter, void *pValue1, bool bSet)
{
- struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *pHalData = &Adapter->haldata;
struct odm_dm_struct *podmpriv = &pHalData->odmpriv;
- switch (eVariable) {
- case HAL_ODM_STA_INFO:
- {
- struct sta_info *psta = (struct sta_info *)pValue1;
-
- if (bSet) {
- DBG_88E("### Set STA_(%d) info\n", psta->mac_id);
- podmpriv->pODM_StaInfo[psta->mac_id] = psta;
- ODM_RAInfo_Init(podmpriv, psta->mac_id);
- } else {
- DBG_88E("### Clean STA_(%d) info\n", psta->mac_id);
- podmpriv->pODM_StaInfo[psta->mac_id] = NULL;
- }
- }
- break;
- case HAL_ODM_P2P_STATE:
- ODM_CmnInfoUpdate(podmpriv, ODM_CMNINFO_WIFI_DIRECT, bSet);
- break;
- case HAL_ODM_WIFI_DISPLAY_STATE:
- ODM_CmnInfoUpdate(podmpriv, ODM_CMNINFO_WIFI_DISPLAY, bSet);
- break;
- default:
- break;
+ struct sta_info *psta = (struct sta_info *)pValue1;
+
+ if (bSet) {
+ DBG_88E("### Set STA_(%d) info\n", psta->mac_id);
+ podmpriv->pODM_StaInfo[psta->mac_id] = psta;
+ ODM_RAInfo_Init(podmpriv, psta->mac_id);
+ } else {
+ DBG_88E("### Clean STA_(%d) info\n", psta->mac_id);
+ podmpriv->pODM_StaInfo[psta->mac_id] = NULL;
}
}
@@ -1713,7 +881,8 @@ u8 GetEEPROMSize8188E(struct adapter *padapter)
/* 6: EEPROM used is 93C46, 4: boot from E-Fuse. */
size = (cr & BOOT_FROM_EEPROM) ? 6 : 4;
- MSG_88E("EEPROM type is %s\n", size == 4 ? "E-FUSE" : "93C46");
+ netdev_dbg(padapter->pnetdev, "EEPROM type is %s\n",
+ size == 4 ? "E-FUSE" : "93C46");
return size;
}
@@ -1787,23 +956,6 @@ s32 InitLLTTable(struct adapter *padapter, u8 txpktbuf_bndy)
}
void
-Hal_InitPGData88E(struct adapter *padapter)
-{
- struct eeprom_priv *pEEPROM = &padapter->eeprompriv;
-
- if (!pEEPROM->bautoload_fail_flag) { /* autoload OK. */
- if (!is_boot_from_eeprom(padapter)) {
- /* Read EFUSE real map to shadow. */
- EFUSE_ShadowMapUpdate(padapter, EFUSE_WIFI, false);
- }
- } else {/* autoload fail */
- /* update to default value 0xFF */
- if (!is_boot_from_eeprom(padapter))
- EFUSE_ShadowMapUpdate(padapter, EFUSE_WIFI, false);
- }
-}
-
-void
Hal_EfuseParseIDCode88E(
struct adapter *padapter,
u8 *hwinfo
@@ -1942,22 +1094,16 @@ static void hal_get_chnl_group_88e(u8 chnl, u8 *group)
void Hal_ReadPowerSavingMode88E(struct adapter *padapter, u8 *hwinfo, bool AutoLoadFail)
{
if (AutoLoadFail) {
- padapter->pwrctrlpriv.bHWPowerdown = false;
padapter->pwrctrlpriv.bSupportRemoteWakeup = false;
} else {
/* hw power down mode selection , 0:rf-off / 1:power down */
- if (padapter->registrypriv.hwpdn_mode == 2)
- padapter->pwrctrlpriv.bHWPowerdown = (hwinfo[EEPROM_RF_FEATURE_OPTION_88E] & BIT(4));
- else
- padapter->pwrctrlpriv.bHWPowerdown = padapter->registrypriv.hwpdn_mode;
-
/* decide hw if support remote wakeup function */
/* if hw supported, 8051 (SIE) will generate WeakUP signal(D+/D- toggle) when autoresume */
padapter->pwrctrlpriv.bSupportRemoteWakeup = (hwinfo[EEPROM_USB_OPTIONAL_FUNCTION0] & BIT(1)) ? true : false;
- DBG_88E("%s...bHWPowerdown(%x) , bSupportRemoteWakeup(%x)\n", __func__,
- padapter->pwrctrlpriv.bHWPowerdown, padapter->pwrctrlpriv.bSupportRemoteWakeup);
+ DBG_88E("%s , bSupportRemoteWakeup(%x)\n", __func__,
+ padapter->pwrctrlpriv.bSupportRemoteWakeup);
DBG_88E("### PS params => power_mgnt(%x), usbss_enable(%x) ###\n", padapter->registrypriv.power_mgnt, padapter->registrypriv.usbss_enable);
}
@@ -1965,40 +1111,32 @@ void Hal_ReadPowerSavingMode88E(struct adapter *padapter, u8 *hwinfo, bool AutoL
void Hal_ReadTxPowerInfo88E(struct adapter *padapter, u8 *PROMContent, bool AutoLoadFail)
{
- struct hal_data_8188e *pHalData = GET_HAL_DATA(padapter);
+ struct hal_data_8188e *pHalData = &padapter->haldata;
struct txpowerinfo24g pwrInfo24G;
- u8 rfPath = 0;
u8 ch, group;
u8 TxCount;
Hal_ReadPowerValueFromPROM_8188E(&pwrInfo24G, PROMContent, AutoLoadFail);
- if (!AutoLoadFail)
- pHalData->bTXPowerDataReadFromEEPORM = true;
-
for (ch = 0; ch < CHANNEL_MAX_NUMBER; ch++) {
hal_get_chnl_group_88e(ch, &group);
- pHalData->Index24G_CCK_Base[rfPath][ch] = pwrInfo24G.IndexCCK_Base[rfPath][group];
+ pHalData->Index24G_CCK_Base[ch] = pwrInfo24G.IndexCCK_Base[0][group];
if (ch == 14)
- pHalData->Index24G_BW40_Base[rfPath][ch] = pwrInfo24G.IndexBW40_Base[rfPath][4];
+ pHalData->Index24G_BW40_Base[ch] = pwrInfo24G.IndexBW40_Base[0][4];
else
- pHalData->Index24G_BW40_Base[rfPath][ch] = pwrInfo24G.IndexBW40_Base[rfPath][group];
+ pHalData->Index24G_BW40_Base[ch] = pwrInfo24G.IndexBW40_Base[0][group];
- DBG_88E("======= Path %d, Channel %d =======\n", rfPath, ch);
- DBG_88E("Index24G_CCK_Base[%d][%d] = 0x%x\n", rfPath, ch, pHalData->Index24G_CCK_Base[rfPath][ch]);
- DBG_88E("Index24G_BW40_Base[%d][%d] = 0x%x\n", rfPath, ch, pHalData->Index24G_BW40_Base[rfPath][ch]);
+ DBG_88E("======= Path 0, Channel %d =======\n", ch);
+ DBG_88E("Index24G_CCK_Base[%d] = 0x%x\n", ch, pHalData->Index24G_CCK_Base[ch]);
+ DBG_88E("Index24G_BW40_Base[%d] = 0x%x\n", ch, pHalData->Index24G_BW40_Base[ch]);
}
for (TxCount = 0; TxCount < MAX_TX_COUNT; TxCount++) {
- pHalData->CCK_24G_Diff[rfPath][TxCount] = pwrInfo24G.CCK_Diff[rfPath][TxCount];
- pHalData->OFDM_24G_Diff[rfPath][TxCount] = pwrInfo24G.OFDM_Diff[rfPath][TxCount];
- pHalData->BW20_24G_Diff[rfPath][TxCount] = pwrInfo24G.BW20_Diff[rfPath][TxCount];
- pHalData->BW40_24G_Diff[rfPath][TxCount] = pwrInfo24G.BW40_Diff[rfPath][TxCount];
+ pHalData->OFDM_24G_Diff[TxCount] = pwrInfo24G.OFDM_Diff[0][TxCount];
+ pHalData->BW20_24G_Diff[TxCount] = pwrInfo24G.BW20_Diff[0][TxCount];
DBG_88E("======= TxCount %d =======\n", TxCount);
- DBG_88E("CCK_24G_Diff[%d][%d] = %d\n", rfPath, TxCount, pHalData->CCK_24G_Diff[rfPath][TxCount]);
- DBG_88E("OFDM_24G_Diff[%d][%d] = %d\n", rfPath, TxCount, pHalData->OFDM_24G_Diff[rfPath][TxCount]);
- DBG_88E("BW20_24G_Diff[%d][%d] = %d\n", rfPath, TxCount, pHalData->BW20_24G_Diff[rfPath][TxCount]);
- DBG_88E("BW40_24G_Diff[%d][%d] = %d\n", rfPath, TxCount, pHalData->BW40_24G_Diff[rfPath][TxCount]);
+ DBG_88E("OFDM_24G_Diff[%d] = %d\n", TxCount, pHalData->OFDM_24G_Diff[TxCount]);
+ DBG_88E("BW20_24G_Diff[%d] = %d\n", TxCount, pHalData->BW20_24G_Diff[TxCount]);
}
/* 2010/10/19 MH Add Regulator recognize for CU. */
@@ -2014,7 +1152,7 @@ void Hal_ReadTxPowerInfo88E(struct adapter *padapter, u8 *PROMContent, bool Auto
void Hal_EfuseParseXtal_8188E(struct adapter *pAdapter, u8 *hwinfo, bool AutoLoadFail)
{
- struct hal_data_8188e *pHalData = GET_HAL_DATA(pAdapter);
+ struct hal_data_8188e *pHalData = &pAdapter->haldata;
if (!AutoLoadFail) {
pHalData->CrystalCap = hwinfo[EEPROM_XTAL_88E];
@@ -2026,30 +1164,6 @@ void Hal_EfuseParseXtal_8188E(struct adapter *pAdapter, u8 *hwinfo, bool AutoLoa
DBG_88E("CrystalCap: 0x%2x\n", pHalData->CrystalCap);
}
-void Hal_EfuseParseBoardType88E(struct adapter *pAdapter, u8 *hwinfo, bool AutoLoadFail)
-{
- struct hal_data_8188e *pHalData = GET_HAL_DATA(pAdapter);
-
- if (!AutoLoadFail)
- pHalData->BoardType = ((hwinfo[EEPROM_RF_BOARD_OPTION_88E] & 0xE0) >> 5);
- else
- pHalData->BoardType = 0;
- DBG_88E("Board Type: 0x%2x\n", pHalData->BoardType);
-}
-
-void Hal_EfuseParseEEPROMVer88E(struct adapter *padapter, u8 *hwinfo, bool AutoLoadFail)
-{
- struct hal_data_8188e *pHalData = GET_HAL_DATA(padapter);
-
- if (!AutoLoadFail) {
- pHalData->EEPROMVersion = hwinfo[EEPROM_VERSION_88E];
- if (pHalData->EEPROMVersion == 0xFF)
- pHalData->EEPROMVersion = EEPROM_Default_Version;
- } else {
- pHalData->EEPROMVersion = 1;
- }
-}
-
void rtl8188e_EfuseParseChnlPlan(struct adapter *padapter, u8 *hwinfo, bool AutoLoadFail)
{
padapter->mlmepriv.ChannelPlan =
@@ -2061,22 +1175,9 @@ void rtl8188e_EfuseParseChnlPlan(struct adapter *padapter, u8 *hwinfo, bool Auto
DBG_88E("mlmepriv.ChannelPlan = 0x%02x\n", padapter->mlmepriv.ChannelPlan);
}
-void Hal_EfuseParseCustomerID88E(struct adapter *padapter, u8 *hwinfo, bool AutoLoadFail)
-{
- struct hal_data_8188e *pHalData = GET_HAL_DATA(padapter);
-
- if (!AutoLoadFail) {
- pHalData->EEPROMCustomerID = hwinfo[EEPROM_CUSTOMERID_88E];
- } else {
- pHalData->EEPROMCustomerID = 0;
- pHalData->EEPROMSubCustomerID = 0;
- }
- DBG_88E("EEPROM Customer ID: 0x%2x\n", pHalData->EEPROMCustomerID);
-}
-
void Hal_ReadAntennaDiversity88E(struct adapter *pAdapter, u8 *PROMContent, bool AutoLoadFail)
{
- struct hal_data_8188e *pHalData = GET_HAL_DATA(pAdapter);
+ struct hal_data_8188e *pHalData = &pAdapter->haldata;
struct registry_priv *registry_par = &pAdapter->registrypriv;
if (!AutoLoadFail) {
@@ -2108,7 +1209,7 @@ void Hal_ReadAntennaDiversity88E(struct adapter *pAdapter, u8 *PROMContent, bool
void Hal_ReadThermalMeter_88E(struct adapter *Adapter, u8 *PROMContent, bool AutoloadFail)
{
- struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *pHalData = &Adapter->haldata;
/* ThermalMeter from EEPROM */
if (!AutoloadFail)
@@ -2116,9 +1217,8 @@ void Hal_ReadThermalMeter_88E(struct adapter *Adapter, u8 *PROMContent, bool Aut
else
pHalData->EEPROMThermalMeter = EEPROM_Default_ThermalMeter_88E;
- if (pHalData->EEPROMThermalMeter == 0xff || AutoloadFail) {
- pHalData->bAPKThermalMeterIgnore = true;
+ if (pHalData->EEPROMThermalMeter == 0xff || AutoloadFail)
pHalData->EEPROMThermalMeter = EEPROM_Default_ThermalMeter_88E;
- }
+
DBG_88E("ThermalMeter = 0x%x\n", pHalData->EEPROMThermalMeter);
}
diff --git a/drivers/staging/r8188eu/hal/rtl8188e_phycfg.c b/drivers/staging/r8188eu/hal/rtl8188e_phycfg.c
index bb0cda0c16a0..302b15b2874d 100644
--- a/drivers/staging/r8188eu/hal/rtl8188e_phycfg.c
+++ b/drivers/staging/r8188eu/hal/rtl8188e_phycfg.c
@@ -143,7 +143,7 @@ phy_RFSerialRead(
)
{
u32 retValue = 0;
- struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *pHalData = &Adapter->haldata;
struct bb_reg_def *pPhyReg = &pHalData->PHYRegDef[eRFPath];
u32 NewOffset;
u32 tmplong, tmplong2;
@@ -161,31 +161,31 @@ phy_RFSerialRead(
/* For 92S LSSI Read RFLSSIRead */
/* For RF A/B write 0x824/82c(does not work in the future) */
/* We must use 0x824 for RF A and B to execute read trigger */
- tmplong = PHY_QueryBBReg(Adapter, rFPGA0_XA_HSSIParameter2, bMaskDWord);
+ tmplong = rtl8188e_PHY_QueryBBReg(Adapter, rFPGA0_XA_HSSIParameter2, bMaskDWord);
if (eRFPath == RF_PATH_A)
tmplong2 = tmplong;
else
- tmplong2 = PHY_QueryBBReg(Adapter, pPhyReg->rfHSSIPara2, bMaskDWord);
+ tmplong2 = rtl8188e_PHY_QueryBBReg(Adapter, pPhyReg->rfHSSIPara2, bMaskDWord);
tmplong2 = (tmplong2 & (~bLSSIReadAddress)) | (NewOffset << 23) | bLSSIReadEdge; /* T65 RF */
- PHY_SetBBReg(Adapter, rFPGA0_XA_HSSIParameter2, bMaskDWord, tmplong & (~bLSSIReadEdge));
+ rtl8188e_PHY_SetBBReg(Adapter, rFPGA0_XA_HSSIParameter2, bMaskDWord, tmplong & (~bLSSIReadEdge));
udelay(10);/* PlatformStallExecution(10); */
- PHY_SetBBReg(Adapter, pPhyReg->rfHSSIPara2, bMaskDWord, tmplong2);
+ rtl8188e_PHY_SetBBReg(Adapter, pPhyReg->rfHSSIPara2, bMaskDWord, tmplong2);
udelay(100);/* PlatformStallExecution(100); */
udelay(10);/* PlatformStallExecution(10); */
if (eRFPath == RF_PATH_A)
- RfPiEnable = (u8)PHY_QueryBBReg(Adapter, rFPGA0_XA_HSSIParameter1, BIT(8));
+ RfPiEnable = (u8)rtl8188e_PHY_QueryBBReg(Adapter, rFPGA0_XA_HSSIParameter1, BIT(8));
else if (eRFPath == RF_PATH_B)
- RfPiEnable = (u8)PHY_QueryBBReg(Adapter, rFPGA0_XB_HSSIParameter1, BIT(8));
+ RfPiEnable = (u8)rtl8188e_PHY_QueryBBReg(Adapter, rFPGA0_XB_HSSIParameter1, BIT(8));
if (RfPiEnable) { /* Read from BBreg8b8, 12 bits for 8190, 20bits for T65 RF */
- retValue = PHY_QueryBBReg(Adapter, pPhyReg->rfLSSIReadBackPi, bLSSIReadBackData);
+ retValue = rtl8188e_PHY_QueryBBReg(Adapter, pPhyReg->rfLSSIReadBackPi, bLSSIReadBackData);
} else { /* Read from BBreg8a0, 12 bits for 8190, 20 bits for T65 RF */
- retValue = PHY_QueryBBReg(Adapter, pPhyReg->rfLSSIReadBack, bLSSIReadBackData);
+ retValue = rtl8188e_PHY_QueryBBReg(Adapter, pPhyReg->rfLSSIReadBack, bLSSIReadBackData);
}
return retValue;
}
@@ -242,7 +242,7 @@ phy_RFSerialWrite(
)
{
u32 DataAndAddr = 0;
- struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *pHalData = &Adapter->haldata;
struct bb_reg_def *pPhyReg = &pHalData->PHYRegDef[eRFPath];
u32 NewOffset;
@@ -263,7 +263,7 @@ phy_RFSerialWrite(
/* */
/* Write Operation */
/* */
- PHY_SetBBReg(Adapter, pPhyReg->rf3wireOffset, bMaskDWord, DataAndAddr);
+ rtl8188e_PHY_SetBBReg(Adapter, pPhyReg->rf3wireOffset, bMaskDWord, DataAndAddr);
}
/**
@@ -355,7 +355,7 @@ rtl8188e_PHY_SetRFReg(
*---------------------------------------------------------------------------*/
s32 PHY_MACConfig8188E(struct adapter *Adapter)
{
- struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *pHalData = &Adapter->haldata;
int rtStatus = _SUCCESS;
/* */
@@ -387,19 +387,15 @@ phy_InitBBRFRegisterDefinition(
struct adapter *Adapter
)
{
- struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *pHalData = &Adapter->haldata;
/* RF Interface Sowrtware Control */
pHalData->PHYRegDef[RF_PATH_A].rfintfs = rFPGA0_XAB_RFInterfaceSW; /* 16 LSBs if read 32-bit from 0x870 */
pHalData->PHYRegDef[RF_PATH_B].rfintfs = rFPGA0_XAB_RFInterfaceSW; /* 16 MSBs if read 32-bit from 0x870 (16-bit for 0x872) */
- pHalData->PHYRegDef[RF_PATH_C].rfintfs = rFPGA0_XCD_RFInterfaceSW;/* 16 LSBs if read 32-bit from 0x874 */
- pHalData->PHYRegDef[RF_PATH_D].rfintfs = rFPGA0_XCD_RFInterfaceSW;/* 16 MSBs if read 32-bit from 0x874 (16-bit for 0x876) */
/* RF Interface Readback Value */
pHalData->PHYRegDef[RF_PATH_A].rfintfi = rFPGA0_XAB_RFInterfaceRB; /* 16 LSBs if read 32-bit from 0x8E0 */
pHalData->PHYRegDef[RF_PATH_B].rfintfi = rFPGA0_XAB_RFInterfaceRB;/* 16 MSBs if read 32-bit from 0x8E0 (16-bit for 0x8E2) */
- pHalData->PHYRegDef[RF_PATH_C].rfintfi = rFPGA0_XCD_RFInterfaceRB;/* 16 LSBs if read 32-bit from 0x8E4 */
- pHalData->PHYRegDef[RF_PATH_D].rfintfi = rFPGA0_XCD_RFInterfaceRB;/* 16 MSBs if read 32-bit from 0x8E4 (16-bit for 0x8E6) */
/* RF Interface Output (and Enable) */
pHalData->PHYRegDef[RF_PATH_A].rfintfo = rFPGA0_XA_RFInterfaceOE; /* 16 LSBs if read 32-bit from 0x860 */
@@ -416,14 +412,10 @@ phy_InitBBRFRegisterDefinition(
/* RF parameter */
pHalData->PHYRegDef[RF_PATH_A].rfLSSI_Select = rFPGA0_XAB_RFParameter; /* BB Band Select */
pHalData->PHYRegDef[RF_PATH_B].rfLSSI_Select = rFPGA0_XAB_RFParameter;
- pHalData->PHYRegDef[RF_PATH_C].rfLSSI_Select = rFPGA0_XCD_RFParameter;
- pHalData->PHYRegDef[RF_PATH_D].rfLSSI_Select = rFPGA0_XCD_RFParameter;
/* Tx AGC Gain Stage (same for all path. Should we remove this?) */
pHalData->PHYRegDef[RF_PATH_A].rfTxGainStage = rFPGA0_TxGainStage; /* Tx gain stage */
pHalData->PHYRegDef[RF_PATH_B].rfTxGainStage = rFPGA0_TxGainStage; /* Tx gain stage */
- pHalData->PHYRegDef[RF_PATH_C].rfTxGainStage = rFPGA0_TxGainStage; /* Tx gain stage */
- pHalData->PHYRegDef[RF_PATH_D].rfTxGainStage = rFPGA0_TxGainStage; /* Tx gain stage */
/* Tranceiver A~D HSSI Parameter-1 */
pHalData->PHYRegDef[RF_PATH_A].rfHSSIPara1 = rFPGA0_XA_HSSIParameter1; /* wire control parameter1 */
@@ -436,50 +428,34 @@ phy_InitBBRFRegisterDefinition(
/* RF switch Control */
pHalData->PHYRegDef[RF_PATH_A].rfSwitchControl = rFPGA0_XAB_SwitchControl; /* TR/Ant switch control */
pHalData->PHYRegDef[RF_PATH_B].rfSwitchControl = rFPGA0_XAB_SwitchControl;
- pHalData->PHYRegDef[RF_PATH_C].rfSwitchControl = rFPGA0_XCD_SwitchControl;
- pHalData->PHYRegDef[RF_PATH_D].rfSwitchControl = rFPGA0_XCD_SwitchControl;
/* AGC control 1 */
pHalData->PHYRegDef[RF_PATH_A].rfAGCControl1 = rOFDM0_XAAGCCore1;
pHalData->PHYRegDef[RF_PATH_B].rfAGCControl1 = rOFDM0_XBAGCCore1;
- pHalData->PHYRegDef[RF_PATH_C].rfAGCControl1 = rOFDM0_XCAGCCore1;
- pHalData->PHYRegDef[RF_PATH_D].rfAGCControl1 = rOFDM0_XDAGCCore1;
/* AGC control 2 */
pHalData->PHYRegDef[RF_PATH_A].rfAGCControl2 = rOFDM0_XAAGCCore2;
pHalData->PHYRegDef[RF_PATH_B].rfAGCControl2 = rOFDM0_XBAGCCore2;
- pHalData->PHYRegDef[RF_PATH_C].rfAGCControl2 = rOFDM0_XCAGCCore2;
- pHalData->PHYRegDef[RF_PATH_D].rfAGCControl2 = rOFDM0_XDAGCCore2;
/* RX AFE control 1 */
pHalData->PHYRegDef[RF_PATH_A].rfRxIQImbalance = rOFDM0_XARxIQImbalance;
pHalData->PHYRegDef[RF_PATH_B].rfRxIQImbalance = rOFDM0_XBRxIQImbalance;
- pHalData->PHYRegDef[RF_PATH_C].rfRxIQImbalance = rOFDM0_XCRxIQImbalance;
- pHalData->PHYRegDef[RF_PATH_D].rfRxIQImbalance = rOFDM0_XDRxIQImbalance;
/* RX AFE control 1 */
pHalData->PHYRegDef[RF_PATH_A].rfRxAFE = rOFDM0_XARxAFE;
pHalData->PHYRegDef[RF_PATH_B].rfRxAFE = rOFDM0_XBRxAFE;
- pHalData->PHYRegDef[RF_PATH_C].rfRxAFE = rOFDM0_XCRxAFE;
- pHalData->PHYRegDef[RF_PATH_D].rfRxAFE = rOFDM0_XDRxAFE;
/* Tx AFE control 1 */
pHalData->PHYRegDef[RF_PATH_A].rfTxIQImbalance = rOFDM0_XATxIQImbalance;
pHalData->PHYRegDef[RF_PATH_B].rfTxIQImbalance = rOFDM0_XBTxIQImbalance;
- pHalData->PHYRegDef[RF_PATH_C].rfTxIQImbalance = rOFDM0_XCTxIQImbalance;
- pHalData->PHYRegDef[RF_PATH_D].rfTxIQImbalance = rOFDM0_XDTxIQImbalance;
/* Tx AFE control 2 */
pHalData->PHYRegDef[RF_PATH_A].rfTxAFE = rOFDM0_XATxAFE;
pHalData->PHYRegDef[RF_PATH_B].rfTxAFE = rOFDM0_XBTxAFE;
- pHalData->PHYRegDef[RF_PATH_C].rfTxAFE = rOFDM0_XCTxAFE;
- pHalData->PHYRegDef[RF_PATH_D].rfTxAFE = rOFDM0_XDTxAFE;
/* Tranceiver LSSI Readback SI mode */
pHalData->PHYRegDef[RF_PATH_A].rfLSSIReadBack = rFPGA0_XA_LSSIReadBack;
pHalData->PHYRegDef[RF_PATH_B].rfLSSIReadBack = rFPGA0_XB_LSSIReadBack;
- pHalData->PHYRegDef[RF_PATH_C].rfLSSIReadBack = rFPGA0_XC_LSSIReadBack;
- pHalData->PHYRegDef[RF_PATH_D].rfLSSIReadBack = rFPGA0_XD_LSSIReadBack;
/* Tranceiver LSSI Readback PI mode */
pHalData->PHYRegDef[RF_PATH_A].rfLSSIReadBackPi = TransceiverA_HSPI_Readback;
@@ -488,7 +464,7 @@ phy_InitBBRFRegisterDefinition(
void storePwrIndexDiffRateOffset(struct adapter *Adapter, u32 RegAddr, u32 BitMask, u32 Data)
{
- struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *pHalData = &Adapter->haldata;
if (RegAddr == rTxAGC_A_Rate18_06)
pHalData->MCSTxPowerLevelOriginalOffset[pHalData->pwrGroupCnt][0] = Data;
@@ -506,8 +482,7 @@ void storePwrIndexDiffRateOffset(struct adapter *Adapter, u32 RegAddr, u32 BitMa
pHalData->MCSTxPowerLevelOriginalOffset[pHalData->pwrGroupCnt][4] = Data;
if (RegAddr == rTxAGC_A_Mcs15_Mcs12) {
pHalData->MCSTxPowerLevelOriginalOffset[pHalData->pwrGroupCnt][5] = Data;
- if (pHalData->rf_type == RF_1T1R)
- pHalData->pwrGroupCnt++;
+ pHalData->pwrGroupCnt++;
}
if (RegAddr == rTxAGC_B_Rate18_06)
pHalData->MCSTxPowerLevelOriginalOffset[pHalData->pwrGroupCnt][8] = Data;
@@ -523,17 +498,14 @@ void storePwrIndexDiffRateOffset(struct adapter *Adapter, u32 RegAddr, u32 BitMa
pHalData->MCSTxPowerLevelOriginalOffset[pHalData->pwrGroupCnt][11] = Data;
if (RegAddr == rTxAGC_B_Mcs11_Mcs08)
pHalData->MCSTxPowerLevelOriginalOffset[pHalData->pwrGroupCnt][12] = Data;
- if (RegAddr == rTxAGC_B_Mcs15_Mcs12) {
+ if (RegAddr == rTxAGC_B_Mcs15_Mcs12)
pHalData->MCSTxPowerLevelOriginalOffset[pHalData->pwrGroupCnt][13] = Data;
- if (pHalData->rf_type != RF_1T1R)
- pHalData->pwrGroupCnt++;
- }
}
static int phy_BB8188E_Config_ParaFile(struct adapter *Adapter)
{
struct eeprom_priv *pEEPROM = &Adapter->eeprompriv;
- struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *pHalData = &Adapter->haldata;
int rtStatus = _SUCCESS;
/* */
@@ -574,7 +546,7 @@ PHY_BBConfig8188E(
)
{
int rtStatus = _SUCCESS;
- struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *pHalData = &Adapter->haldata;
u32 RegVal;
u8 CrystalCap;
@@ -595,7 +567,7 @@ PHY_BBConfig8188E(
/* write 0x24[16:11] = 0x24[22:17] = CrystalCap */
CrystalCap = pHalData->CrystalCap & 0x3F;
- PHY_SetBBReg(Adapter, REG_AFE_XTAL_CTRL, 0x7ff800, (CrystalCap | (CrystalCap << 6)));
+ rtl8188e_PHY_SetBBReg(Adapter, REG_AFE_XTAL_CTRL, 0x7ff800, (CrystalCap | (CrystalCap << 6)));
return rtStatus;
}
@@ -613,82 +585,25 @@ static void getTxPowerIndex88E(struct adapter *Adapter, u8 channel, u8 *cckPower
u8 *ofdmPowerLevel, u8 *BW20PowerLevel,
u8 *BW40PowerLevel)
{
- struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *pHalData = &Adapter->haldata;
u8 index = (channel - 1);
- u8 TxCount = 0, path_nums;
- if ((RF_1T2R == pHalData->rf_type) || (RF_1T1R == pHalData->rf_type))
- path_nums = 1;
- else
- path_nums = 2;
-
- for (TxCount = 0; TxCount < path_nums; TxCount++) {
- if (TxCount == RF_PATH_A) {
- /* 1. CCK */
- cckPowerLevel[TxCount] = pHalData->Index24G_CCK_Base[TxCount][index];
- /* 2. OFDM */
- ofdmPowerLevel[TxCount] = pHalData->Index24G_BW40_Base[RF_PATH_A][index] +
- pHalData->OFDM_24G_Diff[TxCount][RF_PATH_A];
- /* 1. BW20 */
- BW20PowerLevel[TxCount] = pHalData->Index24G_BW40_Base[RF_PATH_A][index] +
- pHalData->BW20_24G_Diff[TxCount][RF_PATH_A];
- /* 2. BW40 */
- BW40PowerLevel[TxCount] = pHalData->Index24G_BW40_Base[TxCount][index];
- } else if (TxCount == RF_PATH_B) {
- /* 1. CCK */
- cckPowerLevel[TxCount] = pHalData->Index24G_CCK_Base[TxCount][index];
- /* 2. OFDM */
- ofdmPowerLevel[TxCount] = pHalData->Index24G_BW40_Base[RF_PATH_A][index] +
- pHalData->BW20_24G_Diff[RF_PATH_A][index] +
- pHalData->BW20_24G_Diff[TxCount][index];
- /* 1. BW20 */
- BW20PowerLevel[TxCount] = pHalData->Index24G_BW40_Base[RF_PATH_A][index] +
- pHalData->BW20_24G_Diff[TxCount][RF_PATH_A] +
- pHalData->BW20_24G_Diff[TxCount][index];
- /* 2. BW40 */
- BW40PowerLevel[TxCount] = pHalData->Index24G_BW40_Base[TxCount][index];
- } else if (TxCount == RF_PATH_C) {
- /* 1. CCK */
- cckPowerLevel[TxCount] = pHalData->Index24G_CCK_Base[TxCount][index];
- /* 2. OFDM */
- ofdmPowerLevel[TxCount] = pHalData->Index24G_BW40_Base[RF_PATH_A][index] +
- pHalData->BW20_24G_Diff[RF_PATH_A][index] +
- pHalData->BW20_24G_Diff[RF_PATH_B][index] +
- pHalData->BW20_24G_Diff[TxCount][index];
- /* 1. BW20 */
- BW20PowerLevel[TxCount] = pHalData->Index24G_BW40_Base[RF_PATH_A][index] +
- pHalData->BW20_24G_Diff[RF_PATH_A][index] +
- pHalData->BW20_24G_Diff[RF_PATH_B][index] +
- pHalData->BW20_24G_Diff[TxCount][index];
- /* 2. BW40 */
- BW40PowerLevel[TxCount] = pHalData->Index24G_BW40_Base[TxCount][index];
- } else if (TxCount == RF_PATH_D) {
- /* 1. CCK */
- cckPowerLevel[TxCount] = pHalData->Index24G_CCK_Base[TxCount][index];
- /* 2. OFDM */
- ofdmPowerLevel[TxCount] = pHalData->Index24G_BW40_Base[RF_PATH_A][index] +
- pHalData->BW20_24G_Diff[RF_PATH_A][index] +
- pHalData->BW20_24G_Diff[RF_PATH_B][index] +
- pHalData->BW20_24G_Diff[RF_PATH_C][index] +
- pHalData->BW20_24G_Diff[TxCount][index];
-
- /* 1. BW20 */
- BW20PowerLevel[TxCount] = pHalData->Index24G_BW40_Base[RF_PATH_A][index] +
- pHalData->BW20_24G_Diff[RF_PATH_A][index] +
- pHalData->BW20_24G_Diff[RF_PATH_B][index] +
- pHalData->BW20_24G_Diff[RF_PATH_C][index] +
- pHalData->BW20_24G_Diff[TxCount][index];
-
- /* 2. BW40 */
- BW40PowerLevel[TxCount] = pHalData->Index24G_BW40_Base[TxCount][index];
- }
- }
+ /* 1. CCK */
+ cckPowerLevel[RF_PATH_A] = pHalData->Index24G_CCK_Base[index];
+ /* 2. OFDM */
+ ofdmPowerLevel[RF_PATH_A] = pHalData->Index24G_BW40_Base[index] +
+ pHalData->OFDM_24G_Diff[RF_PATH_A];
+ /* 1. BW20 */
+ BW20PowerLevel[RF_PATH_A] = pHalData->Index24G_BW40_Base[index] +
+ pHalData->BW20_24G_Diff[RF_PATH_A];
+ /* 2. BW40 */
+ BW40PowerLevel[RF_PATH_A] = pHalData->Index24G_BW40_Base[index];
}
static void phy_PowerIndexCheck88E(struct adapter *Adapter, u8 channel, u8 *cckPowerLevel,
u8 *ofdmPowerLevel, u8 *BW20PowerLevel, u8 *BW40PowerLevel)
{
- struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *pHalData = &Adapter->haldata;
pHalData->CurrentCckTxPwrIdx = cckPowerLevel[0];
pHalData->CurrentOfdm24GTxPwrIdx = ofdmPowerLevel[0];
@@ -752,17 +667,10 @@ _PHY_SetBWMode92C(
struct adapter *Adapter
)
{
- struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *pHalData = &Adapter->haldata;
u8 regBwOpMode;
u8 regRRSR_RSC;
- if (pHalData->rf_chip == RF_PSEUDO_11N)
- return;
-
- /* There is no 40MHz mode in RF_8225. */
- if (pHalData->rf_chip == RF_8225)
- return;
-
if (Adapter->bDriverStopped)
return;
@@ -796,17 +704,17 @@ _PHY_SetBWMode92C(
switch (pHalData->CurrentChannelBW) {
/* 20 MHz channel*/
case HT_CHANNEL_WIDTH_20:
- PHY_SetBBReg(Adapter, rFPGA0_RFMOD, bRFMOD, 0x0);
- PHY_SetBBReg(Adapter, rFPGA1_RFMOD, bRFMOD, 0x0);
+ rtl8188e_PHY_SetBBReg(Adapter, rFPGA0_RFMOD, bRFMOD, 0x0);
+ rtl8188e_PHY_SetBBReg(Adapter, rFPGA1_RFMOD, bRFMOD, 0x0);
break;
/* 40 MHz channel*/
case HT_CHANNEL_WIDTH_40:
- PHY_SetBBReg(Adapter, rFPGA0_RFMOD, bRFMOD, 0x1);
- PHY_SetBBReg(Adapter, rFPGA1_RFMOD, bRFMOD, 0x1);
+ rtl8188e_PHY_SetBBReg(Adapter, rFPGA0_RFMOD, bRFMOD, 0x1);
+ rtl8188e_PHY_SetBBReg(Adapter, rFPGA1_RFMOD, bRFMOD, 0x1);
/* Set Control channel to upper or lower. These settings are required only for 40MHz */
- PHY_SetBBReg(Adapter, rCCK0_System, bCCKSideBand, (pHalData->nCur40MhzPrimeSC >> 1));
- PHY_SetBBReg(Adapter, rOFDM1_LSTF, 0xC00, pHalData->nCur40MhzPrimeSC);
- PHY_SetBBReg(Adapter, 0x818, (BIT(26) | BIT(27)),
+ rtl8188e_PHY_SetBBReg(Adapter, rCCK0_System, bCCKSideBand, (pHalData->nCur40MhzPrimeSC >> 1));
+ rtl8188e_PHY_SetBBReg(Adapter, rOFDM1_LSTF, 0xC00, pHalData->nCur40MhzPrimeSC);
+ rtl8188e_PHY_SetBBReg(Adapter, 0x818, (BIT(26) | BIT(27)),
(pHalData->nCur40MhzPrimeSC == HAL_PRIME_CHNL_OFFSET_LOWER) ? 2 : 1);
break;
default:
@@ -814,21 +722,7 @@ _PHY_SetBWMode92C(
}
/* Skip over setting of J-mode in BB register here. Default value is "None J mode". Emily 20070315 */
- /* 3<3>Set RF related register */
- switch (pHalData->rf_chip) {
- case RF_8225:
- break;
- case RF_8256:
- /* Please implement this function in Hal8190PciPhy8256.c */
- break;
- case RF_PSEUDO_11N:
- break;
- case RF_6052:
- rtl8188e_PHY_RF6052SetBandwidth(Adapter, pHalData->CurrentChannelBW);
- break;
- default:
- break;
- }
+ rtl8188e_PHY_RF6052SetBandwidth(Adapter, pHalData->CurrentChannelBW);
}
/*-----------------------------------------------------------------------------
@@ -848,7 +742,7 @@ _PHY_SetBWMode92C(
void PHY_SetBWMode8188E(struct adapter *Adapter, enum ht_channel_width Bandwidth, /* 20M or 40M */
unsigned char Offset) /* Upper, Lower, or Don't care */
{
- struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *pHalData = &Adapter->haldata;
enum ht_channel_width tmpBW = pHalData->CurrentChannelBW;
pHalData->CurrentChannelBW = Bandwidth;
@@ -865,7 +759,7 @@ static void _PHY_SwChnl8192C(struct adapter *Adapter, u8 channel)
{
u8 eRFPath = 0;
u32 param1, param2;
- struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *pHalData = &Adapter->haldata;
if (Adapter->bNotifyChannelChange)
DBG_88E("[%s] ch = %d\n", __func__, channel);
@@ -877,16 +771,13 @@ static void _PHY_SwChnl8192C(struct adapter *Adapter, u8 channel)
param1 = RF_CHNLBW;
param2 = channel;
pHalData->RfRegChnlVal[eRFPath] = ((pHalData->RfRegChnlVal[eRFPath] & 0xfffffc00) | param2);
- PHY_SetRFReg(Adapter, (enum rf_radio_path)eRFPath, param1, bRFRegOffsetMask, pHalData->RfRegChnlVal[eRFPath]);
+ rtl8188e_PHY_SetRFReg(Adapter, (enum rf_radio_path)eRFPath, param1, bRFRegOffsetMask, pHalData->RfRegChnlVal[eRFPath]);
}
void PHY_SwChnl8188E(struct adapter *Adapter, u8 channel)
{
/* Call after initialization */
- struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
-
- if (pHalData->rf_chip == RF_PSEUDO_11N)
- return; /* return immediately if it is peudo-phy */
+ struct hal_data_8188e *pHalData = &Adapter->haldata;
if (channel == 0)
channel = 1;
diff --git a/drivers/staging/r8188eu/hal/rtl8188e_rf6052.c b/drivers/staging/r8188eu/hal/rtl8188e_rf6052.c
index 946a1b97d96f..6e0231099986 100644
--- a/drivers/staging/r8188eu/hal/rtl8188e_rf6052.c
+++ b/drivers/staging/r8188eu/hal/rtl8188e_rf6052.c
@@ -46,16 +46,16 @@
void rtl8188e_PHY_RF6052SetBandwidth(struct adapter *Adapter,
enum ht_channel_width Bandwidth)
{
- struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *pHalData = &Adapter->haldata;
switch (Bandwidth) {
case HT_CHANNEL_WIDTH_20:
pHalData->RfRegChnlVal[0] = ((pHalData->RfRegChnlVal[0] & 0xfffff3ff) | BIT(10) | BIT(11));
- PHY_SetRFReg(Adapter, RF_PATH_A, RF_CHNLBW, bRFRegOffsetMask, pHalData->RfRegChnlVal[0]);
+ rtl8188e_PHY_SetRFReg(Adapter, RF_PATH_A, RF_CHNLBW, bRFRegOffsetMask, pHalData->RfRegChnlVal[0]);
break;
case HT_CHANNEL_WIDTH_40:
pHalData->RfRegChnlVal[0] = ((pHalData->RfRegChnlVal[0] & 0xfffff3ff) | BIT(10));
- PHY_SetRFReg(Adapter, RF_PATH_A, RF_CHNLBW, bRFRegOffsetMask, pHalData->RfRegChnlVal[0]);
+ rtl8188e_PHY_SetRFReg(Adapter, RF_PATH_A, RF_CHNLBW, bRFRegOffsetMask, pHalData->RfRegChnlVal[0]);
break;
default:
break;
@@ -84,31 +84,24 @@ rtl8188e_PHY_RF6052SetCckTxPower(
struct adapter *Adapter,
u8 *pPowerlevel)
{
- struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *pHalData = &Adapter->haldata;
struct mlme_ext_priv *pmlmeext = &Adapter->mlmeextpriv;
u32 TxAGC[2] = {0, 0}, tmpval = 0, pwrtrac_value;
- bool TurboScanOff = false;
u8 idx1, idx2;
u8 *ptr;
u8 direction;
- /* FOR CE ,must disable turbo scan */
- TurboScanOff = true;
if (pmlmeext->sitesurvey_res.state == SCAN_PROCESS) {
TxAGC[RF_PATH_A] = 0x3f3f3f3f;
TxAGC[RF_PATH_B] = 0x3f3f3f3f;
- TurboScanOff = true;/* disable turbo scan */
-
- if (TurboScanOff) {
- for (idx1 = RF_PATH_A; idx1 <= RF_PATH_B; idx1++) {
- TxAGC[idx1] =
- pPowerlevel[idx1] | (pPowerlevel[idx1] << 8) |
- (pPowerlevel[idx1] << 16) | (pPowerlevel[idx1] << 24);
- /* 2010/10/18 MH For external PA module. We need to limit power index to be less than 0x20. */
- if (TxAGC[idx1] > 0x20 && pHalData->ExternalPA)
- TxAGC[idx1] = 0x20;
- }
+ for (idx1 = RF_PATH_A; idx1 <= RF_PATH_B; idx1++) {
+ TxAGC[idx1] =
+ pPowerlevel[idx1] | (pPowerlevel[idx1] << 8) |
+ (pPowerlevel[idx1] << 16) | (pPowerlevel[idx1] << 24);
+ /* 2010/10/18 MH For external PA module. We need to limit power index to be less than 0x20. */
+ if (TxAGC[idx1] > 0x20 && pHalData->ExternalPA)
+ TxAGC[idx1] = 0x20;
}
} else {
for (idx1 = RF_PATH_A; idx1 <= RF_PATH_B; idx1++) {
@@ -148,15 +141,15 @@ rtl8188e_PHY_RF6052SetCckTxPower(
/* rf-A cck tx power */
tmpval = TxAGC[RF_PATH_A] & 0xff;
- PHY_SetBBReg(Adapter, rTxAGC_A_CCK1_Mcs32, bMaskByte1, tmpval);
+ rtl8188e_PHY_SetBBReg(Adapter, rTxAGC_A_CCK1_Mcs32, bMaskByte1, tmpval);
tmpval = TxAGC[RF_PATH_A] >> 8;
- PHY_SetBBReg(Adapter, rTxAGC_B_CCK11_A_CCK2_11, 0xffffff00, tmpval);
+ rtl8188e_PHY_SetBBReg(Adapter, rTxAGC_B_CCK11_A_CCK2_11, 0xffffff00, tmpval);
/* rf-B cck tx power */
tmpval = TxAGC[RF_PATH_B] >> 24;
- PHY_SetBBReg(Adapter, rTxAGC_B_CCK11_A_CCK2_11, bMaskByte0, tmpval);
+ rtl8188e_PHY_SetBBReg(Adapter, rTxAGC_B_CCK11_A_CCK2_11, bMaskByte0, tmpval);
tmpval = TxAGC[RF_PATH_B] & 0x00ffffff;
- PHY_SetBBReg(Adapter, rTxAGC_B_CCK1_55_Mcs32, 0xffffff00, tmpval);
+ rtl8188e_PHY_SetBBReg(Adapter, rTxAGC_B_CCK1_55_Mcs32, 0xffffff00, tmpval);
} /* PHY_RF6052SetCckTxPower */
/* */
@@ -166,7 +159,7 @@ rtl8188e_PHY_RF6052SetCckTxPower(
static void getpowerbase88e(struct adapter *Adapter, u8 *pPowerLevelOFDM,
u8 *pPowerLevelBW20, u8 *pPowerLevelBW40, u8 Channel, u32 *OfdmBase, u32 *MCSBase)
{
- struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *pHalData = &Adapter->haldata;
u32 powerBase0, powerBase1;
u8 i;
@@ -190,7 +183,7 @@ static void get_rx_power_val_by_reg(struct adapter *Adapter, u8 Channel,
u8 index, u32 *powerBase0, u32 *powerBase1,
u32 *pOutWriteVal)
{
- struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *pHalData = &Adapter->haldata;
u8 i, chnlGroup = 0, pwr_diff_limit[4], customer_pwr_limit;
s8 pwr_diff = 0;
u32 writeVal, customer_limit, rf;
@@ -272,7 +265,6 @@ static void get_rx_power_val_by_reg(struct adapter *Adapter, u8 Channel,
}
static void writeOFDMPowerReg88E(struct adapter *Adapter, u8 index, u32 *pValue)
{
- struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
u16 regoffset_a[6] = {
rTxAGC_A_Rate18_06, rTxAGC_A_Rate54_24,
rTxAGC_A_Mcs03_Mcs00, rTxAGC_A_Mcs07_Mcs04,
@@ -299,13 +291,10 @@ static void writeOFDMPowerReg88E(struct adapter *Adapter, u8 index, u32 *pValue)
else
regoffset = regoffset_b[index];
- PHY_SetBBReg(Adapter, regoffset, bMaskDWord, writeVal);
+ rtl8188e_PHY_SetBBReg(Adapter, regoffset, bMaskDWord, writeVal);
/* 201005115 Joseph: Set Tx Power diff for Tx power training mechanism. */
- if (((pHalData->rf_type == RF_2T2R) &&
- (regoffset == rTxAGC_A_Mcs15_Mcs12 || regoffset == rTxAGC_B_Mcs15_Mcs12)) ||
- ((pHalData->rf_type != RF_2T2R) &&
- (regoffset == rTxAGC_A_Mcs07_Mcs04 || regoffset == rTxAGC_B_Mcs07_Mcs04))) {
+ if (regoffset == rTxAGC_A_Mcs07_Mcs04 || regoffset == rTxAGC_B_Mcs07_Mcs04) {
writeVal = pwr_val[3];
if (regoffset == rTxAGC_A_Mcs15_Mcs12 || regoffset == rTxAGC_A_Mcs07_Mcs04)
regoffset = 0xc90;
@@ -353,7 +342,7 @@ rtl8188e_PHY_RF6052SetOFDMTxPower(
u8 *pPowerLevelBW40,
u8 Channel)
{
- struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *pHalData = &Adapter->haldata;
u32 writeVal[2], powerBase0[2], powerBase1[2], pwrtrac_value;
u8 direction;
u8 index = 0;
@@ -383,7 +372,7 @@ rtl8188e_PHY_RF6052SetOFDMTxPower(
static int phy_RF6052_Config_ParaFile(struct adapter *Adapter)
{
struct bb_reg_def *pPhyReg;
- struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *pHalData = &Adapter->haldata;
u32 u4RegValue = 0;
u8 eRFPath = 0;
int rtStatus = _SUCCESS;
@@ -393,21 +382,21 @@ static int phy_RF6052_Config_ParaFile(struct adapter *Adapter)
pPhyReg = &pHalData->PHYRegDef[eRFPath];
/*----Store original RFENV control type----*/
- u4RegValue = PHY_QueryBBReg(Adapter, pPhyReg->rfintfs, bRFSI_RFENV);
+ u4RegValue = rtl8188e_PHY_QueryBBReg(Adapter, pPhyReg->rfintfs, bRFSI_RFENV);
/*----Set RF_ENV enable----*/
- PHY_SetBBReg(Adapter, pPhyReg->rfintfe, bRFSI_RFENV << 16, 0x1);
+ rtl8188e_PHY_SetBBReg(Adapter, pPhyReg->rfintfe, bRFSI_RFENV << 16, 0x1);
udelay(1);/* PlatformStallExecution(1); */
/*----Set RF_ENV output high----*/
- PHY_SetBBReg(Adapter, pPhyReg->rfintfo, bRFSI_RFENV, 0x1);
+ rtl8188e_PHY_SetBBReg(Adapter, pPhyReg->rfintfo, bRFSI_RFENV, 0x1);
udelay(1);/* PlatformStallExecution(1); */
/* Set bit number of Address and Data for RF register */
- PHY_SetBBReg(Adapter, pPhyReg->rfHSSIPara2, b3WireAddressLength, 0x0); /* Set 1 to 4 bits for 8255 */
+ rtl8188e_PHY_SetBBReg(Adapter, pPhyReg->rfHSSIPara2, b3WireAddressLength, 0x0); /* Set 1 to 4 bits for 8255 */
udelay(1);/* PlatformStallExecution(1); */
- PHY_SetBBReg(Adapter, pPhyReg->rfHSSIPara2, b3WireDataLength, 0x0); /* Set 0 to 12 bits for 8255 */
+ rtl8188e_PHY_SetBBReg(Adapter, pPhyReg->rfHSSIPara2, b3WireDataLength, 0x0); /* Set 0 to 12 bits for 8255 */
udelay(1);/* PlatformStallExecution(1); */
/*----Initialize RF fom connfiguration file----*/
@@ -415,7 +404,7 @@ static int phy_RF6052_Config_ParaFile(struct adapter *Adapter)
rtStatus = _FAIL;
/*----Restore RFENV control type----*/;
- PHY_SetBBReg(Adapter, pPhyReg->rfintfs, bRFSI_RFENV, u4RegValue);
+ rtl8188e_PHY_SetBBReg(Adapter, pPhyReg->rfintfs, bRFSI_RFENV, u4RegValue);
if (rtStatus != _SUCCESS)
goto phy_RF6052_Config_ParaFile_Fail;
diff --git a/drivers/staging/r8188eu/hal/rtl8188e_rxdesc.c b/drivers/staging/r8188eu/hal/rtl8188e_rxdesc.c
index 053d9549873d..90d426199f52 100644
--- a/drivers/staging/r8188eu/hal/rtl8188e_rxdesc.c
+++ b/drivers/staging/r8188eu/hal/rtl8188e_rxdesc.c
@@ -126,7 +126,7 @@ void update_recvframe_phyinfo_88e(struct recv_frame *precvframe, struct phy_stat
{
struct adapter *padapter = precvframe->adapter;
struct rx_pkt_attrib *pattrib = &precvframe->attrib;
- struct hal_data_8188e *pHalData = GET_HAL_DATA(padapter);
+ struct hal_data_8188e *pHalData = &padapter->haldata;
struct phy_info *pPHYInfo = &pattrib->phy_info;
u8 *wlanhdr;
struct odm_per_pkt_info pkt_info;
diff --git a/drivers/staging/r8188eu/hal/rtl8188e_sreset.c b/drivers/staging/r8188eu/hal/rtl8188e_sreset.c
deleted file mode 100644
index 7b3ac6e306ce..000000000000
--- a/drivers/staging/r8188eu/hal/rtl8188e_sreset.c
+++ /dev/null
@@ -1,37 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#define _RTL8188E_SRESET_C_
-
-#include "../include/rtl8188e_sreset.h"
-#include "../include/rtl8188e_hal.h"
-
-void rtl8188e_sreset_xmit_status_check(struct adapter *padapter)
-{
- u32 txdma_status;
-
- txdma_status = rtw_read32(padapter, REG_TXDMA_STATUS);
- if (txdma_status != 0x00) {
- DBG_88E("%s REG_TXDMA_STATUS:0x%08x\n", __func__, txdma_status);
- rtw_write32(padapter, REG_TXDMA_STATUS, txdma_status);
- }
- /* total xmit irp = 4 */
-}
-
-void rtl8188e_sreset_linked_status_check(struct adapter *padapter)
-{
- u32 rx_dma_status = 0;
- u8 fw_status = 0;
- rx_dma_status = rtw_read32(padapter, REG_RXDMA_STATUS);
- if (rx_dma_status != 0x00) {
- DBG_88E("%s REG_RXDMA_STATUS:0x%08x\n", __func__, rx_dma_status);
- rtw_write32(padapter, REG_RXDMA_STATUS, rx_dma_status);
- }
- fw_status = rtw_read8(padapter, REG_FMETHR);
- if (fw_status != 0x00) {
- if (fw_status == 1)
- DBG_88E("%s REG_FW_STATUS (0x%02x), Read_Efuse_Fail !!\n", __func__, fw_status);
- else if (fw_status == 2)
- DBG_88E("%s REG_FW_STATUS (0x%02x), Condition_No_Match !!\n", __func__, fw_status);
- }
-}
diff --git a/drivers/staging/r8188eu/hal/rtl8188eu_led.c b/drivers/staging/r8188eu/hal/rtl8188eu_led.c
deleted file mode 100644
index 452d4bb87aba..000000000000
--- a/drivers/staging/r8188eu/hal/rtl8188eu_led.c
+++ /dev/null
@@ -1,94 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#include "../include/osdep_service.h"
-#include "../include/drv_types.h"
-#include "../include/rtl8188e_hal.h"
-#include "../include/rtl8188e_led.h"
-
-/* LED object. */
-
-/* LED_819xUsb routines. */
-/* Description: */
-/* Turn on LED according to LedPin specified. */
-void SwLedOn(struct adapter *padapter, struct LED_871x *pLed)
-{
- u8 LedCfg;
-
- if (padapter->bSurpriseRemoved || padapter->bDriverStopped)
- return;
- LedCfg = rtw_read8(padapter, REG_LEDCFG2);
- switch (pLed->LedPin) {
- case LED_PIN_LED0:
- rtw_write8(padapter, REG_LEDCFG2, (LedCfg & 0xf0) | BIT(5) | BIT(6)); /* SW control led0 on. */
- break;
- case LED_PIN_LED1:
- rtw_write8(padapter, REG_LEDCFG2, (LedCfg & 0x0f) | BIT(5)); /* SW control led1 on. */
- break;
- default:
- break;
- }
- pLed->bLedOn = true;
-}
-
-/* Description: */
-/* Turn off LED according to LedPin specified. */
-void SwLedOff(struct adapter *padapter, struct LED_871x *pLed)
-{
- u8 LedCfg;
- struct hal_data_8188e *pHalData = GET_HAL_DATA(padapter);
-
- if (padapter->bSurpriseRemoved || padapter->bDriverStopped)
- goto exit;
-
- LedCfg = rtw_read8(padapter, REG_LEDCFG2);/* 0x4E */
-
- switch (pLed->LedPin) {
- case LED_PIN_LED0:
- if (pHalData->bLedOpenDrain) {
- /* Open-drain arrangement for controlling the LED) */
- LedCfg &= 0x90; /* Set to software control. */
- rtw_write8(padapter, REG_LEDCFG2, (LedCfg | BIT(3)));
- LedCfg = rtw_read8(padapter, REG_MAC_PINMUX_CFG);
- LedCfg &= 0xFE;
- rtw_write8(padapter, REG_MAC_PINMUX_CFG, LedCfg);
- } else {
- rtw_write8(padapter, REG_LEDCFG2, (LedCfg | BIT(3) | BIT(5) | BIT(6)));
- }
- break;
- case LED_PIN_LED1:
- LedCfg &= 0x0f; /* Set to software control. */
- rtw_write8(padapter, REG_LEDCFG2, (LedCfg | BIT(3)));
- break;
- default:
- break;
- }
-exit:
- pLed->bLedOn = false;
-}
-
-/* Interface to manipulate LED objects. */
-/* Default LED behavior. */
-
-/* Description: */
-/* Initialize all LED_871x objects. */
-void rtl8188eu_InitSwLeds(struct adapter *padapter)
-{
- struct led_priv *pledpriv = &padapter->ledpriv;
-
- pledpriv->LedControlHandler = LedControl8188eu;
-
- InitLed871x(padapter, &pledpriv->SwLed0, LED_PIN_LED0);
-
- InitLed871x(padapter, &pledpriv->SwLed1, LED_PIN_LED1);
-}
-
-/* Description: */
-/* DeInitialize all LED_819xUsb objects. */
-void rtl8188eu_DeInitSwLeds(struct adapter *padapter)
-{
- struct led_priv *ledpriv = &padapter->ledpriv;
-
- DeInitLed871x(&ledpriv->SwLed0);
- DeInitLed871x(&ledpriv->SwLed1);
-}
diff --git a/drivers/staging/r8188eu/hal/rtl8188eu_xmit.c b/drivers/staging/r8188eu/hal/rtl8188eu_xmit.c
index b7feb4d8c8aa..293541db597d 100644
--- a/drivers/staging/r8188eu/hal/rtl8188eu_xmit.c
+++ b/drivers/staging/r8188eu/hal/rtl8188eu_xmit.c
@@ -154,7 +154,7 @@ static s32 update_txdesc(struct xmit_frame *pxmitframe, u8 *pmem, s32 sz, u8 bag
u8 data_rate, pwr_status, offset;
struct adapter *adapt = pxmitframe->padapter;
struct pkt_attrib *pattrib = &pxmitframe->attrib;
- struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
+ struct hal_data_8188e *haldata = &adapt->haldata;
struct tx_desc *ptxdesc = (struct tx_desc *)pmem;
struct mlme_ext_priv *pmlmeext = &adapt->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
@@ -382,7 +382,7 @@ static u32 xmitframe_need_length(struct xmit_frame *pxmitframe)
s32 rtl8188eu_xmitframe_complete(struct adapter *adapt, struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf)
{
- struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
+ struct hal_data_8188e *haldata = &adapt->haldata;
struct xmit_frame *pxmitframe = NULL;
struct xmit_frame *pfirstframe = NULL;
diff --git a/drivers/staging/r8188eu/hal/usb_halinit.c b/drivers/staging/r8188eu/hal/usb_halinit.c
index ef1ae95d7db0..96db9a8e7667 100644
--- a/drivers/staging/r8188eu/hal/usb_halinit.c
+++ b/drivers/staging/r8188eu/hal/usb_halinit.c
@@ -8,10 +8,10 @@
#include "../include/rtw_efuse.h"
#include "../include/rtl8188e_hal.h"
-#include "../include/rtl8188e_led.h"
#include "../include/rtw_iol.h"
#include "../include/usb_ops.h"
#include "../include/usb_osintf.h"
+#include "../include/Hal8188EPwrSeq.h"
#define HAL_MAC_ENABLE 1
#define HAL_BB_ENABLE 1
@@ -19,7 +19,7 @@
static void _ConfigNormalChipOutEP_8188E(struct adapter *adapt, u8 NumOutPipe)
{
- struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
+ struct hal_data_8188e *haldata = &adapt->haldata;
switch (NumOutPipe) {
case 3:
@@ -40,29 +40,16 @@ static void _ConfigNormalChipOutEP_8188E(struct adapter *adapt, u8 NumOutPipe)
DBG_88E("%s OutEpQueueSel(0x%02x), OutEpNumber(%d)\n", __func__, haldata->OutEpQueueSel, haldata->OutEpNumber);
}
-static bool HalUsbSetQueuePipeMapping8188EUsb(struct adapter *adapt, u8 NumInPipe, u8 NumOutPipe)
+static bool HalUsbSetQueuePipeMapping8188EUsb(struct adapter *adapt, u8 NumOutPipe)
{
- struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
- bool result = false;
_ConfigNormalChipOutEP_8188E(adapt, NumOutPipe);
-
- /* Normal chip with one IN and one OUT doesn't have interrupt IN EP. */
- if (1 == haldata->OutEpNumber) {
- if (1 != NumInPipe)
- return result;
- }
-
- /* All config other than above support one Bulk IN and one Interrupt IN. */
-
- result = Hal_MappingOutPipe(adapt, NumOutPipe);
-
- return result;
+ return Hal_MappingOutPipe(adapt, NumOutPipe);
}
void rtl8188eu_interface_configure(struct adapter *adapt)
{
- struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
+ struct hal_data_8188e *haldata = &adapt->haldata;
struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(adapt);
if (pdvobjpriv->ishighspeed)
@@ -70,8 +57,6 @@ void rtl8188eu_interface_configure(struct adapter *adapt)
else
haldata->UsbBulkOutSize = USB_FULL_SPEED_BULK_SIZE;/* 64 bytes */
- haldata->interfaceIndex = pdvobjpriv->InterfaceNumber;
-
haldata->UsbTxAggMode = 1;
haldata->UsbTxAggDescNum = 0x6; /* only 4 bits */
@@ -81,19 +66,18 @@ void rtl8188eu_interface_configure(struct adapter *adapt)
haldata->UsbRxAggPageCount = 48; /* uint :128 b 0x0A; 10 = MAX_RX_DMA_BUFFER_SIZE/2/haldata->UsbBulkOutSize */
haldata->UsbRxAggPageTimeout = 0x4; /* 6, absolute time = 34ms/(2^6) */
- HalUsbSetQueuePipeMapping8188EUsb(adapt,
- pdvobjpriv->RtNumInPipes, pdvobjpriv->RtNumOutPipes);
+ HalUsbSetQueuePipeMapping8188EUsb(adapt, pdvobjpriv->RtNumOutPipes);
}
u32 rtl8188eu_InitPowerOn(struct adapter *adapt)
{
u16 value16;
/* HW Power on sequence */
- struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
+ struct hal_data_8188e *haldata = &adapt->haldata;
if (haldata->bMacPwrCtrlOn)
return _SUCCESS;
- if (!HalPwrSeqCmdParsing(adapt, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK, Rtl8188E_NIC_PWR_ON_FLOW)) {
+ if (!HalPwrSeqCmdParsing(adapt, Rtl8188E_NIC_PWR_ON_FLOW)) {
DBG_88E(KERN_ERR "%s: run power on flow fail\n", __func__);
return _FAIL;
}
@@ -144,7 +128,7 @@ static void _InitInterrupt(struct adapter *Adapter)
static void _InitQueueReservedPage(struct adapter *Adapter)
{
- struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *haldata = &Adapter->haldata;
struct registry_priv *pregistrypriv = &Adapter->registrypriv;
u32 numHQ = 0;
u32 numLQ = 0;
@@ -212,7 +196,7 @@ static void _InitNormalChipRegPriority(struct adapter *Adapter, u16 beQ,
static void _InitNormalChipOneOutEpPriority(struct adapter *Adapter)
{
- struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *haldata = &Adapter->haldata;
u16 value = 0;
switch (haldata->OutEpQueueSel) {
@@ -234,7 +218,7 @@ static void _InitNormalChipOneOutEpPriority(struct adapter *Adapter)
static void _InitNormalChipTwoOutEpPriority(struct adapter *Adapter)
{
- struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *haldata = &Adapter->haldata;
struct registry_priv *pregistrypriv = &Adapter->registrypriv;
u16 beQ, bkQ, viQ, voQ, mgtQ, hiQ;
u16 valueHi = 0;
@@ -300,7 +284,7 @@ static void _InitNormalChipThreeOutEpPriority(struct adapter *Adapter)
static void _InitQueuePriority(struct adapter *Adapter)
{
- struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *haldata = &Adapter->haldata;
switch (haldata->OutEpNumber) {
case 1:
@@ -344,7 +328,7 @@ static void _InitDriverInfoSize(struct adapter *Adapter, u8 drvInfoSize)
static void _InitWMACSetting(struct adapter *Adapter)
{
- struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *haldata = &Adapter->haldata;
haldata->ReceiveConfig = RCR_AAP | RCR_APM | RCR_AM | RCR_AB |
RCR_CBSSID_DATA | RCR_CBSSID_BCN |
@@ -400,13 +384,6 @@ static void _InitEDCA(struct adapter *Adapter)
rtw_write32(Adapter, REG_EDCA_VO_PARAM, 0x002FA226);
}
-static void _InitRDGSetting(struct adapter *Adapter)
-{
- rtw_write8(Adapter, REG_RD_CTRL, 0xFF);
- rtw_write16(Adapter, REG_RD_NAV_NXT, 0x200);
- rtw_write8(Adapter, REG_RD_RESP_PKT_TH, 0x05);
-}
-
static void _InitRetryFunction(struct adapter *Adapter)
{
u8 value8;
@@ -436,7 +413,7 @@ static void _InitRetryFunction(struct adapter *Adapter)
*---------------------------------------------------------------------------*/
static void usb_AggSettingTxUpdate(struct adapter *Adapter)
{
- struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *haldata = &Adapter->haldata;
u32 value32;
if (Adapter->registrypriv.wifi_spec)
@@ -471,7 +448,7 @@ usb_AggSettingRxUpdate(
struct adapter *Adapter
)
{
- struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *haldata = &Adapter->haldata;
u8 valueDMA;
u8 valueUSB;
@@ -525,16 +502,11 @@ usb_AggSettingRxUpdate(
static void InitUsbAggregationSetting(struct adapter *Adapter)
{
- struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
-
/* Tx aggregation setting */
usb_AggSettingTxUpdate(Adapter);
/* Rx aggregation setting */
usb_AggSettingRxUpdate(Adapter);
-
- /* 201/12/10 MH Add for USB agg mode dynamic switch. */
- haldata->UsbRxHighSpeedMode = false;
}
static void _InitOperationMode(struct adapter *Adapter)
@@ -543,7 +515,7 @@ static void _InitOperationMode(struct adapter *Adapter)
static void _InitBeaconParameters(struct adapter *Adapter)
{
- struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *haldata = &Adapter->haldata;
rtw_write16(Adapter, REG_BCN_CTRL, 0x1010);
@@ -572,8 +544,8 @@ static void _BeaconFunctionEnable(struct adapter *Adapter,
/* Set CCK and OFDM Block "ON" */
static void _BBTurnOnBlock(struct adapter *Adapter)
{
- PHY_SetBBReg(Adapter, rFPGA0_RFMOD, bCCKEn, 0x1);
- PHY_SetBBReg(Adapter, rFPGA0_RFMOD, bOFDMEn, 0x1);
+ rtl8188e_PHY_SetBBReg(Adapter, rFPGA0_RFMOD, bCCKEn, 0x1);
+ rtl8188e_PHY_SetBBReg(Adapter, rFPGA0_RFMOD, bOFDMEn, 0x1);
}
enum {
@@ -583,16 +555,16 @@ enum {
static void _InitAntenna_Selection(struct adapter *Adapter)
{
- struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *haldata = &Adapter->haldata;
if (haldata->AntDivCfg == 0)
return;
DBG_88E("==> %s ....\n", __func__);
rtw_write32(Adapter, REG_LEDCFG0, rtw_read32(Adapter, REG_LEDCFG0) | BIT(23));
- PHY_SetBBReg(Adapter, rFPGA0_XAB_RFParameter, BIT(13), 0x01);
+ rtl8188e_PHY_SetBBReg(Adapter, rFPGA0_XAB_RFParameter, BIT(13), 0x01);
- if (PHY_QueryBBReg(Adapter, rFPGA0_XA_RFInterfaceOE, 0x300) == Antenna_A)
+ if (rtl8188e_PHY_QueryBBReg(Adapter, rFPGA0_XA_RFInterfaceOE, 0x300) == Antenna_A)
haldata->CurAntenna = Antenna_A;
else
haldata->CurAntenna = Antenna_B;
@@ -605,18 +577,12 @@ u32 rtl8188eu_hal_init(struct adapter *Adapter)
u16 value16;
u8 txpktbuf_bndy;
u32 status = _SUCCESS;
- struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *haldata = &Adapter->haldata;
struct pwrctrl_priv *pwrctrlpriv = &Adapter->pwrctrlpriv;
struct registry_priv *pregistrypriv = &Adapter->registrypriv;
u32 init_start_time = jiffies;
- #define HAL_INIT_PROFILE_TAG(stage) do {} while (0)
-
- HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_BEGIN);
-
if (Adapter->pwrctrlpriv.bkeepfwalive) {
- _ps_open_RF(Adapter);
-
if (haldata->odmpriv.RFCalibrateInfo.bIQKInitialized) {
PHY_IQCalibrate_8188E(Adapter, true);
} else {
@@ -630,7 +596,6 @@ u32 rtl8188eu_hal_init(struct adapter *Adapter)
goto exit;
}
- HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_INIT_PW_ON);
status = rtl8188eu_InitPowerOn(Adapter);
if (status == _FAIL)
goto exit;
@@ -653,7 +618,6 @@ u32 rtl8188eu_hal_init(struct adapter *Adapter)
txpktbuf_bndy = WMM_NORMAL_TX_PAGE_BOUNDARY_88E;
}
- HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_MISC01);
_InitQueueReservedPage(Adapter);
_InitQueuePriority(Adapter);
_InitPageBoundary(Adapter);
@@ -661,7 +625,6 @@ u32 rtl8188eu_hal_init(struct adapter *Adapter)
_InitTxBufferBoundary(Adapter, 0);
- HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_DOWNLOAD_FW);
status = rtl8188e_FirmwareDownload(Adapter);
if (status != _SUCCESS) {
@@ -675,7 +638,6 @@ u32 rtl8188eu_hal_init(struct adapter *Adapter)
}
rtl8188e_InitializeFirmwareVars(Adapter);
- HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_MAC);
#if (HAL_MAC_ENABLE == 1)
status = PHY_MACConfig8188E(Adapter);
if (status == _FAIL) {
@@ -687,7 +649,6 @@ u32 rtl8188eu_hal_init(struct adapter *Adapter)
/* */
/* d. Initialize BB related configurations. */
/* */
- HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_BB);
#if (HAL_BB_ENABLE == 1)
status = PHY_BBConfig8188E(Adapter);
if (status == _FAIL) {
@@ -696,7 +657,6 @@ u32 rtl8188eu_hal_init(struct adapter *Adapter)
}
#endif
- HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_RF);
#if (HAL_RF_ENABLE == 1)
status = PHY_RFConfig8188E(Adapter);
if (status == _FAIL) {
@@ -705,7 +665,6 @@ u32 rtl8188eu_hal_init(struct adapter *Adapter)
}
#endif
- HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_EFUSE_PATCH);
status = rtl8188e_iol_efuse_patch(Adapter);
if (status == _FAIL) {
DBG_88E("%s rtl8188e_iol_efuse_patch failed\n", __func__);
@@ -714,12 +673,10 @@ u32 rtl8188eu_hal_init(struct adapter *Adapter)
_InitTxBufferBoundary(Adapter, txpktbuf_bndy);
- HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_INIT_LLTT);
status = InitLLTTable(Adapter, txpktbuf_bndy);
if (status == _FAIL)
goto exit;
- HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_MISC02);
/* Get Rx PHY status in order to report RSSI and others. */
_InitDriverInfoSize(Adapter, DRVINFO_SZ);
@@ -743,9 +700,6 @@ u32 rtl8188eu_hal_init(struct adapter *Adapter)
value16 |= (MACTXEN | MACRXEN);
rtw_write8(Adapter, REG_CR, value16);
- if (haldata->bRDGEnable)
- _InitRDGSetting(Adapter);
-
/* Enable TX Report */
/* Enable Tx Report Timer */
value8 = rtw_read8(Adapter, REG_TX_RPT_CTRL);
@@ -761,16 +715,13 @@ u32 rtl8188eu_hal_init(struct adapter *Adapter)
rtw_write16(Adapter, REG_PKT_BE_BK_LIFE_TIME, 0x0400); /* unit: 256us. 256ms */
/* Keep RfRegChnlVal for later use. */
- haldata->RfRegChnlVal[0] = PHY_QueryRFReg(Adapter, (enum rf_radio_path)0, RF_CHNLBW, bRFRegOffsetMask);
- haldata->RfRegChnlVal[1] = PHY_QueryRFReg(Adapter, (enum rf_radio_path)1, RF_CHNLBW, bRFRegOffsetMask);
+ haldata->RfRegChnlVal[0] = rtl8188e_PHY_QueryRFReg(Adapter, (enum rf_radio_path)0, RF_CHNLBW, bRFRegOffsetMask);
+ haldata->RfRegChnlVal[1] = rtl8188e_PHY_QueryRFReg(Adapter, (enum rf_radio_path)1, RF_CHNLBW, bRFRegOffsetMask);
- HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_TURN_ON_BLOCK);
_BBTurnOnBlock(Adapter);
- HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_INIT_SECURITY);
invalidate_cam_all(Adapter);
- HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_MISC11);
/* 2010/12/17 MH We need to set TX power according to EFUSE content at first. */
PHY_SetTxPowerLevel8188E(Adapter, haldata->CurrentChannel);
@@ -795,7 +746,6 @@ u32 rtl8188eu_hal_init(struct adapter *Adapter)
/* Nav limit , suggest by scott */
rtw_write8(Adapter, 0x652, 0x0);
- HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_INIT_HAL_DM);
rtl8188e_InitHalDm(Adapter);
/* 2010/08/11 MH Merge from 8192SE for Minicard init. We need to confirm current radio status */
@@ -819,7 +769,6 @@ u32 rtl8188eu_hal_init(struct adapter *Adapter)
/* enable tx DMA to drop the redundate data of packet */
rtw_write16(Adapter, REG_TXDMA_OFFSET_CHK, (rtw_read16(Adapter, REG_TXDMA_OFFSET_CHK) | DROP_DATA_EN));
- HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_IQK);
/* 2010/08/26 MH Merge from 8192CE. */
if (pwrctrlpriv->rf_pwrstate == rf_on) {
if (haldata->odmpriv.RFCalibrateInfo.bIQKInitialized) {
@@ -829,15 +778,11 @@ u32 rtl8188eu_hal_init(struct adapter *Adapter)
haldata->odmpriv.RFCalibrateInfo.bIQKInitialized = true;
}
- HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_PW_TRACK);
-
ODM_TXPowerTrackingCheck(&haldata->odmpriv);
- HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_LCK);
PHY_LCCalibrate_8188E(Adapter);
}
-/* HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_INIT_PABIAS); */
/* _InitPABias(Adapter); */
rtw_write8(Adapter, REG_USB_HRPWM, 0);
@@ -845,29 +790,15 @@ u32 rtl8188eu_hal_init(struct adapter *Adapter)
rtw_write32(Adapter, REG_FWHW_TXQ_CTRL, rtw_read32(Adapter, REG_FWHW_TXQ_CTRL) | BIT(12));
exit:
- HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_END);
-
DBG_88E("%s in %dms\n", __func__, rtw_get_passing_time_ms(init_start_time));
return status;
}
-void _ps_open_RF(struct adapter *adapt)
-{
- /* here call with bRegSSPwrLvl 1, bRegSSPwrLvl 2 needs to be verified */
- /* phy_SsPwrSwitch92CU(adapt, rf_on, 1); */
-}
-
-static void _ps_close_RF(struct adapter *adapt)
-{
- /* here call with bRegSSPwrLvl 1, bRegSSPwrLvl 2 needs to be verified */
- /* phy_SsPwrSwitch92CU(adapt, rf_off, 1); */
-}
-
static void CardDisableRTL8188EU(struct adapter *Adapter)
{
u8 val8;
- struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *haldata = &Adapter->haldata;
/* Stop Tx Report Timer. 0x4EC[Bit1]=b'0 */
val8 = rtw_read8(Adapter, REG_TX_RPT_CTRL);
@@ -877,7 +808,7 @@ static void CardDisableRTL8188EU(struct adapter *Adapter)
rtw_write8(Adapter, REG_CR, 0x0);
/* Run LPS WL RFOFF flow */
- HalPwrSeqCmdParsing(Adapter, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK, Rtl8188E_NIC_LPS_ENTER_FLOW);
+ HalPwrSeqCmdParsing(Adapter, Rtl8188E_NIC_LPS_ENTER_FLOW);
/* 2. 0x1F[7:0] = 0 turn off RF */
@@ -898,7 +829,7 @@ static void CardDisableRTL8188EU(struct adapter *Adapter)
rtw_write8(Adapter, REG_32K_CTRL, val8 & (~BIT(0)));
/* Card disable power action flow */
- HalPwrSeqCmdParsing(Adapter, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK, Rtl8188E_NIC_DISABLE_FLOW);
+ HalPwrSeqCmdParsing(Adapter, Rtl8188E_NIC_DISABLE_FLOW);
/* Reset MCU IO Wrapper */
val8 = rtw_read8(Adapter, REG_RSV_CTRL + 1);
@@ -929,9 +860,7 @@ u32 rtl8188eu_hal_deinit(struct adapter *Adapter)
rtw_write32(Adapter, REG_HIMRE_88E, IMR_DISABLED_88E);
DBG_88E("bkeepfwalive(%x)\n", Adapter->pwrctrlpriv.bkeepfwalive);
- if (Adapter->pwrctrlpriv.bkeepfwalive) {
- _ps_close_RF(Adapter);
- } else {
+ if (!Adapter->pwrctrlpriv.bkeepfwalive) {
if (Adapter->hw_init_completed) {
CardDisableRTL8188EU(Adapter);
}
@@ -948,12 +877,10 @@ unsigned int rtl8188eu_inirp_init(struct adapter *Adapter)
status = _SUCCESS;
- precvpriv->ff_hwaddr = RECV_BULK_IN_ADDR;
-
/* issue Rx irp to receive data */
precvbuf = (struct recv_buf *)precvpriv->precv_buf;
for (i = 0; i < NR_RECVBUFF; i++) {
- if (!rtw_read_port(Adapter, precvpriv->ff_hwaddr, 0, (unsigned char *)precvbuf)) {
+ if (!rtw_read_port(Adapter, (unsigned char *)precvbuf)) {
status = _FAIL;
goto exit;
}
@@ -971,39 +898,6 @@ exit:
/* EEPROM/EFUSE Content Parsing */
/* */
/* */
-static void _ReadLEDSetting(struct adapter *Adapter, u8 *PROMContent, bool AutoloadFail)
-{
- struct led_priv *pledpriv = &Adapter->ledpriv;
- struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
-
- pledpriv->bRegUseLed = true;
- haldata->bLedOpenDrain = true;/* Support Open-drain arrangement for controlling the LED. */
-}
-
-static void Hal_EfuseParsePIDVID_8188EU(struct adapter *adapt, u8 *hwinfo, bool AutoLoadFail)
-{
- struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
-
- if (!AutoLoadFail) {
- /* VID, PID */
- haldata->EEPROMVID = EF2BYTE(*(__le16 *)&hwinfo[EEPROM_VID_88EU]);
- haldata->EEPROMPID = EF2BYTE(*(__le16 *)&hwinfo[EEPROM_PID_88EU]);
-
- /* Customer ID, 0x00 and 0xff are reserved for Realtek. */
- haldata->EEPROMCustomerID = *(u8 *)&hwinfo[EEPROM_CUSTOMERID_88E];
- haldata->EEPROMSubCustomerID = EEPROM_Default_SubCustomerID;
- } else {
- haldata->EEPROMVID = EEPROM_Default_VID;
- haldata->EEPROMPID = EEPROM_Default_PID;
-
- /* Customer ID, 0x00 and 0xff are reserved for Realtek. */
- haldata->EEPROMCustomerID = EEPROM_Default_CustomerID;
- haldata->EEPROMSubCustomerID = EEPROM_Default_SubCustomerID;
- }
-
- DBG_88E("VID = 0x%04X, PID = 0x%04X\n", haldata->EEPROMVID, haldata->EEPROMPID);
- DBG_88E("Customer ID: 0x%02X, SubCustomer ID: 0x%02X\n", haldata->EEPROMCustomerID, haldata->EEPROMSubCustomerID);
-}
static void Hal_EfuseParseMACAddr_8188EU(struct adapter *adapt, u8 *hwinfo, bool AutoLoadFail)
{
@@ -1020,76 +914,43 @@ static void Hal_EfuseParseMACAddr_8188EU(struct adapter *adapt, u8 *hwinfo, bool
}
}
-static void
-readAdapterInfo_8188EU(
- struct adapter *adapt
- )
-{
- struct eeprom_priv *eeprom = &adapt->eeprompriv;
-
- /* parse the eeprom/efuse content */
- Hal_EfuseParseIDCode88E(adapt, eeprom->efuse_eeprom_data);
- Hal_EfuseParsePIDVID_8188EU(adapt, eeprom->efuse_eeprom_data, eeprom->bautoload_fail_flag);
- Hal_EfuseParseMACAddr_8188EU(adapt, eeprom->efuse_eeprom_data, eeprom->bautoload_fail_flag);
-
- Hal_ReadPowerSavingMode88E(adapt, eeprom->efuse_eeprom_data, eeprom->bautoload_fail_flag);
- Hal_ReadTxPowerInfo88E(adapt, eeprom->efuse_eeprom_data, eeprom->bautoload_fail_flag);
- Hal_EfuseParseEEPROMVer88E(adapt, eeprom->efuse_eeprom_data, eeprom->bautoload_fail_flag);
- rtl8188e_EfuseParseChnlPlan(adapt, eeprom->efuse_eeprom_data, eeprom->bautoload_fail_flag);
- Hal_EfuseParseXtal_8188E(adapt, eeprom->efuse_eeprom_data, eeprom->bautoload_fail_flag);
- Hal_EfuseParseCustomerID88E(adapt, eeprom->efuse_eeprom_data, eeprom->bautoload_fail_flag);
- Hal_ReadAntennaDiversity88E(adapt, eeprom->efuse_eeprom_data, eeprom->bautoload_fail_flag);
- Hal_EfuseParseBoardType88E(adapt, eeprom->efuse_eeprom_data, eeprom->bautoload_fail_flag);
- Hal_ReadThermalMeter_88E(adapt, eeprom->efuse_eeprom_data, eeprom->bautoload_fail_flag);
-
- _ReadLEDSetting(adapt, eeprom->efuse_eeprom_data, eeprom->bautoload_fail_flag);
-}
-
-static void _ReadPROMContent(
- struct adapter *Adapter
- )
+void ReadAdapterInfo8188EU(struct adapter *Adapter)
{
struct eeprom_priv *eeprom = &Adapter->eeprompriv;
+ struct led_priv *ledpriv = &Adapter->ledpriv;
u8 eeValue;
+ /* Read EEPROM size before call any EEPROM function */
+ Adapter->EepromAddressSize = GetEEPROMSize8188E(Adapter);
+
/* check system boot selection */
eeValue = rtw_read8(Adapter, REG_9346CR);
- eeprom->EepromOrEfuse = (eeValue & BOOT_FROM_EEPROM) ? true : false;
- eeprom->bautoload_fail_flag = (eeValue & EEPROM_EN) ? false : true;
+ eeprom->EepromOrEfuse = (eeValue & BOOT_FROM_EEPROM);
+ eeprom->bautoload_fail_flag = !(eeValue & EEPROM_EN);
DBG_88E("Boot from %s, Autoload %s !\n", (eeprom->EepromOrEfuse ? "EEPROM" : "EFUSE"),
(eeprom->bautoload_fail_flag ? "Fail" : "OK"));
- Hal_InitPGData88E(Adapter);
- readAdapterInfo_8188EU(Adapter);
-}
-
-static void _ReadRFType(struct adapter *Adapter)
-{
- struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
+ if (!is_boot_from_eeprom(Adapter))
+ EFUSE_ShadowMapUpdate(Adapter);
- haldata->rf_chip = RF_6052;
-}
+ /* parse the eeprom/efuse content */
+ Hal_EfuseParseIDCode88E(Adapter, eeprom->efuse_eeprom_data);
+ Hal_EfuseParseMACAddr_8188EU(Adapter, eeprom->efuse_eeprom_data, eeprom->bautoload_fail_flag);
-static int _ReadAdapterInfo8188EU(struct adapter *Adapter)
-{
- _ReadRFType(Adapter);/* rf_chip -> _InitRFType() */
- _ReadPROMContent(Adapter);
+ Hal_ReadPowerSavingMode88E(Adapter, eeprom->efuse_eeprom_data, eeprom->bautoload_fail_flag);
+ Hal_ReadTxPowerInfo88E(Adapter, eeprom->efuse_eeprom_data, eeprom->bautoload_fail_flag);
+ rtl8188e_EfuseParseChnlPlan(Adapter, eeprom->efuse_eeprom_data, eeprom->bautoload_fail_flag);
+ Hal_EfuseParseXtal_8188E(Adapter, eeprom->efuse_eeprom_data, eeprom->bautoload_fail_flag);
+ Hal_ReadAntennaDiversity88E(Adapter, eeprom->efuse_eeprom_data, eeprom->bautoload_fail_flag);
+ Hal_ReadThermalMeter_88E(Adapter, eeprom->efuse_eeprom_data, eeprom->bautoload_fail_flag);
- return _SUCCESS;
-}
-
-void ReadAdapterInfo8188EU(struct adapter *Adapter)
-{
- /* Read EEPROM size before call any EEPROM function */
- Adapter->EepromAddressSize = GetEEPROMSize8188E(Adapter);
-
- _ReadAdapterInfo8188EU(Adapter);
+ ledpriv->bRegUseLed = true;
}
static void ResumeTxBeacon(struct adapter *adapt)
{
- struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
+ struct hal_data_8188e *haldata = &adapt->haldata;
/* 2010.03.01. Marked by tynli. No need to call workitem beacause we record the value */
/* which should be read from register to a global variable. */
@@ -1103,7 +964,7 @@ static void ResumeTxBeacon(struct adapter *adapt)
static void StopTxBeacon(struct adapter *adapt)
{
- struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
+ struct hal_data_8188e *haldata = &adapt->haldata;
/* 2010.03.01. Marked by tynli. No need to call workitem beacause we record the value */
/* which should be read from register to a global variable. */
@@ -1210,7 +1071,7 @@ static void hw_var_set_bcn_func(struct adapter *Adapter, u8 variable, u8 *val)
void SetHwReg8188EU(struct adapter *Adapter, u8 variable, u8 *val)
{
- struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *haldata = &Adapter->haldata;
struct dm_priv *pdmpriv = &haldata->dmpriv;
struct odm_dm_struct *podmpriv = &haldata->odmpriv;
@@ -1727,7 +1588,7 @@ void SetHwReg8188EU(struct adapter *Adapter, u8 variable, u8 *val)
void GetHwReg8188EU(struct adapter *Adapter, u8 variable, u8 *val)
{
- struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *haldata = &Adapter->haldata;
struct odm_dm_struct *podmpriv = &haldata->odmpriv;
switch (variable) {
@@ -1744,9 +1605,6 @@ void GetHwReg8188EU(struct adapter *Adapter, u8 variable, u8 *val)
case HW_VAR_DM_FLAG:
val[0] = podmpriv->SupportAbility;
break;
- case HW_VAR_RF_TYPE:
- val[0] = haldata->rf_type;
- break;
case HW_VAR_FWLPS_RF_ON:
{
/* When we halt NIC, we should check if FW LPS is leave. */
@@ -1786,7 +1644,7 @@ void GetHwReg8188EU(struct adapter *Adapter, u8 variable, u8 *val)
/* Query setting of specified variable. */
u8 GetHalDefVar8188EUsb(struct adapter *Adapter, enum hal_def_variable eVariable, void *pValue)
{
- struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *haldata = &Adapter->haldata;
u8 bResult = _SUCCESS;
switch (eVariable) {
@@ -1871,7 +1729,7 @@ u8 GetHalDefVar8188EUsb(struct adapter *Adapter, enum hal_def_variable eVariable
/* Change default setting of specified variable. */
u8 SetHalDefVar8188EUsb(struct adapter *Adapter, enum hal_def_variable eVariable, void *pValue)
{
- struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
+ struct hal_data_8188e *haldata = &Adapter->haldata;
u8 bResult = _SUCCESS;
switch (eVariable) {
@@ -1925,7 +1783,7 @@ void UpdateHalRAMask8188EUsb(struct adapter *adapt, u32 mac_id, u8 rssi_level)
u8 shortGIrate = false;
int supportRateNum = 0;
struct sta_info *psta;
- struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
+ struct hal_data_8188e *haldata = &adapt->haldata;
struct mlme_ext_priv *pmlmeext = &adapt->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
struct wlan_bssid_ex *cur_network = &pmlmeinfo->network;
@@ -2036,11 +1894,10 @@ void SetBeaconRelatedRegisters8188EUsb(struct adapter *adapt)
void rtl8188eu_init_default_value(struct adapter *adapt)
{
- struct hal_data_8188e *haldata;
+ struct hal_data_8188e *haldata = &adapt->haldata;
struct pwrctrl_priv *pwrctrlpriv;
u8 i;
- haldata = GET_HAL_DATA(adapt);
pwrctrlpriv = &adapt->pwrctrlpriv;
/* init default value */
@@ -2057,11 +1914,3 @@ void rtl8188eu_init_default_value(struct adapter *adapt)
for (i = 0; i < HP_THERMAL_NUM; i++)
haldata->odmpriv.RFCalibrateInfo.ThermalValue_HP[i] = 0;
}
-
-void rtl8188eu_alloc_haldata(struct adapter *adapt)
-{
- adapt->HalData = kzalloc(sizeof(struct hal_data_8188e), GFP_KERNEL);
- if (!adapt->HalData)
- DBG_88E("cant not alloc memory for HAL DATA\n");
- adapt->hal_data_sz = sizeof(struct hal_data_8188e);
-}
diff --git a/drivers/staging/r8188eu/hal/usb_ops_linux.c b/drivers/staging/r8188eu/hal/usb_ops_linux.c
index e4a9350376bf..4a0ab4053e90 100644
--- a/drivers/staging/r8188eu/hal/usb_ops_linux.c
+++ b/drivers/staging/r8188eu/hal/usb_ops_linux.c
@@ -183,24 +183,6 @@ int rtw_writeN(struct adapter *adapter, u32 addr, u32 length, u8 *data)
return RTW_STATUS_CODE(ret);
}
-static void interrupt_handler_8188eu(struct adapter *adapt, u16 pkt_len, u8 *pbuf)
-{
- struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
-
- if (pkt_len != INTERRUPT_MSG_FORMAT_LEN) {
- DBG_88E("%s Invalid interrupt content length (%d)!\n", __func__, pkt_len);
- return;
- }
-
- /* HISR */
- memcpy(&haldata->IntArray[0], &pbuf[USB_INTR_CONTENT_HISR_OFFSET], 4);
- memcpy(&haldata->IntArray[1], &pbuf[USB_INTR_CONTENT_HISRE_OFFSET], 4);
-
- /* C2H Event */
- if (pbuf[0] != 0)
- memcpy(&haldata->C2hArray[0], &pbuf[USB_INTR_CONTENT_C2H_OFFSET], 16);
-}
-
static int recvbuf2recvframe(struct adapter *adapt, struct sk_buff *pskb)
{
u8 *pbuf;
@@ -213,7 +195,7 @@ static int recvbuf2recvframe(struct adapter *adapt, struct sk_buff *pskb)
struct sk_buff *pkt_copy = NULL;
struct recv_frame *precvframe = NULL;
struct rx_pkt_attrib *pattrib = NULL;
- struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
+ struct hal_data_8188e *haldata = &adapt->haldata;
struct recv_priv *precvpriv = &adapt->recvpriv;
struct __queue *pfree_recv_queue = &precvpriv->free_recv_queue;
@@ -342,8 +324,6 @@ static int recvbuf2recvframe(struct adapter *adapt, struct sk_buff *pskb)
pattrib->MacIDValidEntry[0],
pattrib->MacIDValidEntry[1]
);
- } else if (pattrib->pkt_rpt_type == HIS_REPORT) {
- interrupt_handler_8188eu(adapt, pattrib->pkt_len, precvframe->rx_data);
}
rtw_free_recvframe(precvframe, pfree_recv_queue);
}
@@ -401,7 +381,7 @@ static void usb_read_port_complete(struct urb *purb, struct pt_regs *regs)
if (purb->status == 0) { /* SUCCESS */
if ((purb->actual_length > MAX_RECVBUF_SZ) || (purb->actual_length < RXDESC_SIZE)) {
precvbuf->reuse = true;
- rtw_read_port(adapt, precvpriv->ff_hwaddr, 0, (unsigned char *)precvbuf);
+ rtw_read_port(adapt, (unsigned char *)precvbuf);
DBG_88E("%s()-%d: RX Warning!\n", __func__, __LINE__);
} else {
rtw_reset_continual_urb_error(adapter_to_dvobj(adapt));
@@ -415,7 +395,7 @@ static void usb_read_port_complete(struct urb *purb, struct pt_regs *regs)
precvbuf->pskb = NULL;
precvbuf->reuse = false;
- rtw_read_port(adapt, precvpriv->ff_hwaddr, 0, (unsigned char *)precvbuf);
+ rtw_read_port(adapt, (unsigned char *)precvbuf);
}
} else {
DBG_88E("###=> usb_read_port_complete => urb status(%d)\n", purb->status);
@@ -436,7 +416,7 @@ static void usb_read_port_complete(struct urb *purb, struct pt_regs *regs)
case -EPROTO:
case -EOVERFLOW:
precvbuf->reuse = true;
- rtw_read_port(adapt, precvpriv->ff_hwaddr, 0, (unsigned char *)precvbuf);
+ rtw_read_port(adapt, (unsigned char *)precvbuf);
break;
case -EINPROGRESS:
DBG_88E("ERROR: URB IS IN PROGRESS!/n");
@@ -447,7 +427,7 @@ static void usb_read_port_complete(struct urb *purb, struct pt_regs *regs)
}
}
-u32 rtw_read_port(struct adapter *adapter, u32 addr, u32 cnt, u8 *rmem)
+u32 rtw_read_port(struct adapter *adapter, u8 *rmem)
{
struct urb *purb = NULL;
struct recv_buf *precvbuf = (struct recv_buf *)rmem;
@@ -507,7 +487,7 @@ u32 rtw_read_port(struct adapter *adapter, u32 addr, u32 cnt, u8 *rmem)
purb = precvbuf->purb;
/* translate DMA FIFO addr to pipehandle */
- pipe = ffaddr2pipehdl(pdvobj, addr);
+ pipe = usb_rcvbulkpipe(pusbd, pdvobj->RtInPipe);
usb_fill_bulk_urb(purb, pusbd, pipe,
precvbuf->pbuf,
diff --git a/drivers/staging/r8188eu/include/Hal8188EPhyCfg.h b/drivers/staging/r8188eu/include/Hal8188EPhyCfg.h
index 6f901ce607e8..2517a08bc95a 100644
--- a/drivers/staging/r8188eu/include/Hal8188EPhyCfg.h
+++ b/drivers/staging/r8188eu/include/Hal8188EPhyCfg.h
@@ -4,37 +4,11 @@
#ifndef __INC_HAL8188EPHYCFG_H__
#define __INC_HAL8188EPHYCFG_H__
-/*--------------------------Define Parameters-------------------------------*/
-#define LOOP_LIMIT 5
-#define MAX_STALL_TIME 50 /* us */
-#define AntennaDiversityValue 0x80
-#define MAX_TXPWR_IDX_NMODE_92S 63
-#define Reset_Cnt_Limit 3
-
-#define IQK_MAC_REG_NUM 4
-#define IQK_ADDA_REG_NUM 16
-#define IQK_BB_REG_NUM 9
-#define HP_THERMAL_NUM 8
-
#define MAX_AGGR_NUM 0x07
-/*--------------------------Define Parameters-------------------------------*/
-
-/*------------------------------Define structure----------------------------*/
-
-enum hw90_block {
- HW90_BLOCK_MAC = 0,
- HW90_BLOCK_PHY0 = 1,
- HW90_BLOCK_PHY1 = 2,
- HW90_BLOCK_RF = 3,
- HW90_BLOCK_MAXIMUM = 4, /* Never use this */
-};
-
enum rf_radio_path {
RF_PATH_A = 0, /* Radio Path A */
RF_PATH_B = 1, /* Radio Path B */
- RF_PATH_C = 2, /* Radio Path C */
- RF_PATH_D = 3, /* Radio Path D */
};
#define MAX_PG_GROUP 13
@@ -46,17 +20,6 @@ enum rf_radio_path {
#define MAX_CHNL_GROUP_24G 6 /* ch1~2, ch3~5, ch6~8,
*ch9~11, ch12~13, CH 14
* total three groups */
-#define CHANNEL_GROUP_MAX_88E 6
-
-/* BB/RF related */
-enum RF_TYPE_8190P {
- RF_TYPE_MIN, /* 0 */
- RF_8225 = 1, /* 1 11b/g RF for verification only */
- RF_8256 = 2, /* 2 11b/g/n */
- RF_6052 = 4, /* 4 11b/g/n RF */
- /* TODO: We should remove this psudo PHY RF after we get new RF. */
- RF_PSEUDO_11N = 5, /* 5, It is a temporality RF. */
-};
struct bb_reg_def {
u32 rfintfs; /* set software control: */
@@ -106,18 +69,7 @@ struct bb_reg_def {
* Path A and B */
};
-/*------------------------------Define structure----------------------------*/
-
-/*------------------------Export global variable----------------------------*/
-/*------------------------Export global variable----------------------------*/
-
-/*------------------------Export Marco Definition---------------------------*/
-/*------------------------Export Marco Definition---------------------------*/
-
-/*--------------------------Exported Function prototype---------------------*/
-/* */
/* BB and RF register read/write */
-/* */
u32 rtl8188e_PHY_QueryBBReg(struct adapter *adapter, u32 regaddr, u32 mask);
void rtl8188e_PHY_SetBBReg(struct adapter *Adapter, u32 RegAddr,
u32 mask, u32 data);
@@ -144,15 +96,5 @@ void PHY_SwChnl8188E(struct adapter *adapter, u8 channel);
void storePwrIndexDiffRateOffset(struct adapter *adapter, u32 regaddr,
u32 mask, u32 data);
-/*--------------------------Exported Function prototype---------------------*/
-
-#define PHY_QueryBBReg(adapt, regaddr, mask) \
- rtl8188e_PHY_QueryBBReg((adapt), (regaddr), (mask))
-#define PHY_SetBBReg(adapt, regaddr, bitmask, data) \
- rtl8188e_PHY_SetBBReg((adapt), (regaddr), (bitmask), (data))
-#define PHY_QueryRFReg(adapt, rfpath, regaddr, bitmask) \
- rtl8188e_PHY_QueryRFReg((adapt), (rfpath), (regaddr), (bitmask))
-#define PHY_SetRFReg(adapt, rfpath, regaddr, bitmask, data) \
- rtl8188e_PHY_SetRFReg((adapt), (rfpath), (regaddr), (bitmask), (data))
-#endif /* __INC_HAL8192CPHYCFG_H */
+#endif
diff --git a/drivers/staging/r8188eu/include/Hal8188EPwrSeq.h b/drivers/staging/r8188eu/include/Hal8188EPwrSeq.h
index a73bd1a5d57b..e4c5b5d23cb4 100644
--- a/drivers/staging/r8188eu/include/Hal8188EPwrSeq.h
+++ b/drivers/staging/r8188eu/include/Hal8188EPwrSeq.h
@@ -6,150 +6,8 @@
#include "HalPwrSeqCmd.h"
-/*
- Check document WM-20110607-Paul-RTL8188E_Power_Architecture-R02.vsd
- There are 6 HW Power States:
- 0: POFF--Power Off
- 1: PDN--Power Down
- 2: CARDEMU--Card Emulation
- 3: ACT--Active Mode
- 4: LPS--Low Power State
- 5: SUS--Suspend
-
- The transision from different states are defined below
- TRANS_CARDEMU_TO_ACT
- TRANS_ACT_TO_CARDEMU
- TRANS_CARDEMU_TO_SUS
- TRANS_SUS_TO_CARDEMU
- TRANS_CARDEMU_TO_PDN
- TRANS_ACT_TO_LPS
- TRANS_LPS_TO_ACT
-
- TRANS_END
-
- PWR SEQ Version: rtl8188E_PwrSeq_V09.h
-*/
-#define RTL8188E_TRANS_CARDEMU_TO_ACT_STEPS 10
-#define RTL8188E_TRANS_ACT_TO_CARDEMU_STEPS 10
-#define RTL8188E_TRANS_CARDEMU_TO_SUS_STEPS 10
-#define RTL8188E_TRANS_SUS_TO_CARDEMU_STEPS 10
-#define RTL8188E_TRANS_CARDEMU_TO_PDN_STEPS 10
-#define RTL8188E_TRANS_PDN_TO_CARDEMU_STEPS 10
-#define RTL8188E_TRANS_ACT_TO_LPS_STEPS 15
-#define RTL8188E_TRANS_LPS_TO_ACT_STEPS 15
-#define RTL8188E_TRANS_END_STEPS 1
-
-#define RTL8188E_TRANS_CARDEMU_TO_ACT \
- /* format */ \
- /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, comments here*/ \
- {0x0006, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT(1), BIT(1)},/* wait till 0x04[17] = 1 power ready*/ \
- {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0)|BIT(1), 0}, /* 0x02[1:0] = 0 reset BB*/ \
- {0x0026, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(7), BIT(7)}, /*0x24[23] = 2b'01 schmit trigger */ \
- {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(7), 0}, /* 0x04[15] = 0 disable HWPDN (control by DRV)*/\
- {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4)|BIT(3), 0}, /*0x04[12:11] = 2b'00 disable WL suspend*/ \
- {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), BIT(0)}, /*0x04[8] = 1 polling until return 0*/ \
- {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT(0), 0}, /*wait till 0x04[8] = 0*/ \
- {0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), 0}, /*LDO normal mode*/ \
- {0x0074, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), BIT(4)}, /*SDIO Driving*/ \
-
-#define RTL8188E_TRANS_ACT_TO_CARDEMU \
- /* format */ \
- /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, comments here*/ \
- {0x001F, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0},/*0x1F[7:0] = 0 turn off RF*/ \
- {0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), BIT(4)}, /*LDO Sleep mode*/ \
- {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1), BIT(1)}, /*0x04[9] = 1 turn off MAC by HW state machine*/ \
- {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT(1), 0}, /*wait till 0x04[9] = 0 polling until return 0 to disable*/ \
-
-#define RTL8188E_TRANS_CARDEMU_TO_SUS \
- /* format */ \
- /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, comments here*/ \
- {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3)|BIT(4), BIT(3)}, /*0x04[12:11] = 2b'01enable WL suspend*/ \
- {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3)|BIT(4), BIT(3)|BIT(4)}, /*0x04[12:11] = 2b'11enable WL suspend for PCIe*/ \
- {0x0007, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, BIT(7)}, /* 0x04[31:30] = 2b'10 enable enable bandgap mbias in suspend */ \
- {0x0041, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), 0}, /*Clear SIC_EN register 0x40[12] = 1'b0 */ \
- {0xfe10, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), BIT(4)}, /*Set USB suspend enable local register 0xfe10[4]=1 */ \
- {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_SDIO, PWR_CMD_WRITE, BIT(0), BIT(0)}, /*Set SDIO suspend local register*/ \
- {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_SDIO, PWR_CMD_POLLING, BIT(1), 0}, /*wait power state to suspend*/
-
-#define RTL8188E_TRANS_SUS_TO_CARDEMU \
- /* format */ \
- /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, comments here*/ \
- {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_SDIO, PWR_CMD_WRITE, BIT(0), 0}, /*Set SDIO suspend local register*/ \
- {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_SDIO, PWR_CMD_POLLING, BIT(1), BIT(1)}, /*wait power state to suspend*/\
- {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3)|BIT(4), 0}, /*0x04[12:11] = 2b'01enable WL suspend*/
-
-#define RTL8188E_TRANS_CARDEMU_TO_CARDDIS \
- /* format */ \
- /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, comments here*/ \
- {0x0026, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(7), BIT(7)}, /*0x24[23] = 2b'01 schmit trigger */ \
- {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3)|BIT(4), BIT(3)}, /*0x04[12:11] = 2b'01 enable WL suspend*/ \
- {0x0007, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0}, /* 0x04[31:30] = 2b'10 enable enable bandgap mbias in suspend */ \
- {0x0041, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), 0}, /*Clear SIC_EN register 0x40[12] = 1'b0 */ \
- {0xfe10, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), BIT(4)}, /*Set USB suspend enable local register 0xfe10[4]=1 */ \
- {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_SDIO, PWR_CMD_WRITE, BIT(0), BIT(0)}, /*Set SDIO suspend local register*/ \
- {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_SDIO, PWR_CMD_POLLING, BIT(1), 0}, /*wait power state to suspend*/
-
-#define RTL8188E_TRANS_CARDDIS_TO_CARDEMU \
- /* format */ \
- /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, comments here*/ \
- {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_SDIO, PWR_CMD_WRITE, BIT(0), 0}, /*Set SDIO suspend local register*/ \
- {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_SDIO, PWR_CMD_POLLING, BIT(1), BIT(1)}, /*wait power state to suspend*/\
- {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(3)|BIT(4), 0}, /*0x04[12:11] = 2b'01enable WL suspend*/
-
-#define RTL8188E_TRANS_CARDEMU_TO_PDN \
- /* format */ \
- /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, comments here*/ \
- {0x0006, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), 0},/* 0x04[16] = 0*/\
- {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(7), BIT(7)},/* 0x04[15] = 1*/
-
-#define RTL8188E_TRANS_PDN_TO_CARDEMU \
- /* format */ \
- /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, comments here */ \
- {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(7), 0},/* 0x04[15] = 0*/
-
-/* This is used by driver for LPSRadioOff Procedure, not for FW LPS Step */
-#define RTL8188E_TRANS_ACT_TO_LPS \
- /* format */ \
- /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, comments here */ \
- {0x0522, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x7F},/*Tx Pause*/ \
- {0x05F8, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_POLLING, 0xFF, 0},/*Should be zero if no packet is transmitting*/ \
- {0x05F9, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_POLLING, 0xFF, 0},/*Should be zero if no packet is transmitting*/ \
- {0x05FA, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_POLLING, 0xFF, 0},/*Should be zero if no packet is transmitting*/ \
- {0x05FB, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_POLLING, 0xFF, 0},/*Should be zero if no packet is transmitting*/ \
- {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(0), 0},/*CCK and OFDM are disabled,and clock are gated*/ \
- {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_DELAY, 0, PWRSEQ_DELAY_US},/*Delay 1us*/ \
- {0x0100, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x3F},/*Reset MAC TRX*/ \
- {0x0101, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1), 0},/*check if removed later*/ \
- {0x0553, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(5), BIT(5)},/*Respond TxOK to scheduler*/ \
-
-#define RTL8188E_TRANS_LPS_TO_ACT \
- /* format */ \
- /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, comments here */ \
- {0x0080, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_SDIO, PWR_CMD_WRITE, 0xFF, 0x84}, /*SDIO RPWM*/\
- {0xFE58, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x84}, /*USB RPWM*/\
- {0x0361, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x84}, /*PCIe RPWM*/\
- {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_DELAY, 0, PWRSEQ_DELAY_MS}, /*Delay*/\
- {0x0008, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(4), 0}, /*. 0x08[4] = 0 switch TSF to 40M*/\
- {0x0109, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT(7), 0}, /*Polling 0x109[7]=0 TSF in 40M*/\
- {0x0029, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(6)|BIT(7), 0}, /*. 0x29[7:6] = 2b'00 enable BB clock*/\
- {0x0101, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1), BIT(1)}, /*. 0x101[1] = 1*/\
- {0x0100, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0xFF}, /*. 0x100[7:0] = 0xFF enable WMAC TRX*/\
- {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT(1)|BIT(0), BIT(1)|BIT(0)}, /*. 0x02[1:0] = 2b'11 enable BB macro*/\
- {0x0522, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0}, /*. 0x522 = 0*/
-
-#define RTL8188E_TRANS_END \
- /* format */ \
- /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, comments here*/ \
- {0xFFFF, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,0, PWR_CMD_END, 0, 0}, /* */
-
-extern struct wl_pwr_cfg rtl8188E_power_on_flow[RTL8188E_TRANS_CARDEMU_TO_ACT_STEPS+RTL8188E_TRANS_END_STEPS];
-extern struct wl_pwr_cfg rtl8188E_radio_off_flow[RTL8188E_TRANS_ACT_TO_CARDEMU_STEPS+RTL8188E_TRANS_END_STEPS];
-extern struct wl_pwr_cfg rtl8188E_card_disable_flow[RTL8188E_TRANS_ACT_TO_CARDEMU_STEPS+RTL8188E_TRANS_CARDEMU_TO_PDN_STEPS+RTL8188E_TRANS_END_STEPS];
-extern struct wl_pwr_cfg rtl8188E_card_enable_flow[RTL8188E_TRANS_ACT_TO_CARDEMU_STEPS+RTL8188E_TRANS_CARDEMU_TO_PDN_STEPS+RTL8188E_TRANS_END_STEPS];
-extern struct wl_pwr_cfg rtl8188E_suspend_flow[RTL8188E_TRANS_ACT_TO_CARDEMU_STEPS+RTL8188E_TRANS_CARDEMU_TO_SUS_STEPS+RTL8188E_TRANS_END_STEPS];
-extern struct wl_pwr_cfg rtl8188E_resume_flow[RTL8188E_TRANS_ACT_TO_CARDEMU_STEPS+RTL8188E_TRANS_CARDEMU_TO_SUS_STEPS+RTL8188E_TRANS_END_STEPS];
-extern struct wl_pwr_cfg rtl8188E_hwpdn_flow[RTL8188E_TRANS_ACT_TO_CARDEMU_STEPS+RTL8188E_TRANS_CARDEMU_TO_PDN_STEPS+RTL8188E_TRANS_END_STEPS];
-extern struct wl_pwr_cfg rtl8188E_enter_lps_flow[RTL8188E_TRANS_ACT_TO_LPS_STEPS+RTL8188E_TRANS_END_STEPS];
-extern struct wl_pwr_cfg rtl8188E_leave_lps_flow[RTL8188E_TRANS_LPS_TO_ACT_STEPS+RTL8188E_TRANS_END_STEPS];
+extern struct wl_pwr_cfg rtl8188E_power_on_flow[];
+extern struct wl_pwr_cfg rtl8188E_card_disable_flow[];
+extern struct wl_pwr_cfg rtl8188E_enter_lps_flow[];
#endif /* __HAL8188EPWRSEQ_H__ */
diff --git a/drivers/staging/r8188eu/include/HalPhyRf_8188e.h b/drivers/staging/r8188eu/include/HalPhyRf_8188e.h
index d4a27662309f..b75a5d869c56 100644
--- a/drivers/staging/r8188eu/include/HalPhyRf_8188e.h
+++ b/drivers/staging/r8188eu/include/HalPhyRf_8188e.h
@@ -30,12 +30,7 @@ void PHY_DigitalPredistortion_8188E(struct adapter *pAdapter);
void _PHY_SaveADDARegisters(struct adapter *pAdapter, u32 *ADDAReg,
u32 *ADDABackup, u32 RegisterNum);
-void _PHY_PathADDAOn(struct adapter *pAdapter, u32 *ADDAReg,
- bool isPathAOn, bool is2T);
-
void _PHY_MACSettingCalibration(struct adapter *pAdapter, u32 *MACReg,
u32 *MACBackup);
-void _PHY_PathAStandBy(struct adapter *pAdapter);
-
#endif /* #ifndef __HAL_PHY_RF_8188E_H__ */
diff --git a/drivers/staging/r8188eu/include/HalPwrSeqCmd.h b/drivers/staging/r8188eu/include/HalPwrSeqCmd.h
index fe7ac910beb8..49c02cce569e 100644
--- a/drivers/staging/r8188eu/include/HalPwrSeqCmd.h
+++ b/drivers/staging/r8188eu/include/HalPwrSeqCmd.h
@@ -9,11 +9,6 @@
/*---------------------------------------------*/
/* 3 The value of cmd: 4 bits */
/*---------------------------------------------*/
-#define PWR_CMD_READ 0x00
- /* offset: the read register offset */
- /* msk: the mask of the read value */
- /* value: N/A, left by 0 */
- /* note: dirver shall implement this function by read & msk */
#define PWR_CMD_WRITE 0x01
/* offset: the read register offset */
@@ -41,43 +36,6 @@
/* msk: N/A */
/* value: N/A */
-/*---------------------------------------------*/
-/* 3 The value of base: 4 bits */
-/*---------------------------------------------*/
- /* define the base address of each block */
-#define PWR_BASEADDR_MAC 0x00
-#define PWR_BASEADDR_USB 0x01
-#define PWR_BASEADDR_PCIE 0x02
-#define PWR_BASEADDR_SDIO 0x03
-
-/*---------------------------------------------*/
-/* 3 The value of interface_msk: 4 bits */
-/*---------------------------------------------*/
-#define PWR_INTF_SDIO_MSK BIT(0)
-#define PWR_INTF_USB_MSK BIT(1)
-#define PWR_INTF_PCI_MSK BIT(2)
-#define PWR_INTF_ALL_MSK (BIT(0)|BIT(1)|BIT(2)|BIT(3))
-
-/*---------------------------------------------*/
-/* 3 The value of fab_msk: 4 bits */
-/*---------------------------------------------*/
-#define PWR_FAB_TSMC_MSK BIT(0)
-#define PWR_FAB_UMC_MSK BIT(1)
-#define PWR_FAB_ALL_MSK (BIT(0)|BIT(1)|BIT(2)|BIT(3))
-
-/*---------------------------------------------*/
-/* 3 The value of cut_msk: 8 bits */
-/*---------------------------------------------*/
-#define PWR_CUT_TESTCHIP_MSK BIT(0)
-#define PWR_CUT_A_MSK BIT(1)
-#define PWR_CUT_B_MSK BIT(2)
-#define PWR_CUT_C_MSK BIT(3)
-#define PWR_CUT_D_MSK BIT(4)
-#define PWR_CUT_E_MSK BIT(5)
-#define PWR_CUT_F_MSK BIT(6)
-#define PWR_CUT_G_MSK BIT(7)
-#define PWR_CUT_ALL_MSK 0xFF
-
enum pwrseq_cmd_delat_unit {
PWRSEQ_DELAY_US,
PWRSEQ_DELAY_MS,
@@ -85,26 +43,17 @@ enum pwrseq_cmd_delat_unit {
struct wl_pwr_cfg {
u16 offset;
- u8 cut_msk;
- u8 fab_msk:4;
- u8 interface_msk:4;
- u8 base:4;
u8 cmd:4;
u8 msk;
u8 value;
};
#define GET_PWR_CFG_OFFSET(__PWR_CMD) __PWR_CMD.offset
-#define GET_PWR_CFG_CUT_MASK(__PWR_CMD) __PWR_CMD.cut_msk
-#define GET_PWR_CFG_FAB_MASK(__PWR_CMD) __PWR_CMD.fab_msk
-#define GET_PWR_CFG_INTF_MASK(__PWR_CMD) __PWR_CMD.interface_msk
-#define GET_PWR_CFG_BASE(__PWR_CMD) __PWR_CMD.base
#define GET_PWR_CFG_CMD(__PWR_CMD) __PWR_CMD.cmd
#define GET_PWR_CFG_MASK(__PWR_CMD) __PWR_CMD.msk
#define GET_PWR_CFG_VALUE(__PWR_CMD) __PWR_CMD.value
/* Prototype of protected function. */
-u8 HalPwrSeqCmdParsing(struct adapter *padapter, u8 CutVersion, u8 FabVersion,
- u8 InterfaceType, struct wl_pwr_cfg PwrCfgCmd[]);
+u8 HalPwrSeqCmdParsing(struct adapter *padapter, struct wl_pwr_cfg PwrCfgCmd[]);
#endif
diff --git a/drivers/staging/r8188eu/include/HalVerDef.h b/drivers/staging/r8188eu/include/HalVerDef.h
index 796a44a1e697..62b94c993f0d 100644
--- a/drivers/staging/r8188eu/include/HalVerDef.h
+++ b/drivers/staging/r8188eu/include/HalVerDef.h
@@ -24,22 +24,10 @@ enum HAL_VENDOR {
CHIP_VENDOR_UMC = 1,
};
-enum HAL_RF_TYPE {
- RF_TYPE_1T1R = 0,
- RF_TYPE_1T2R = 1,
- RF_TYPE_2T2R = 2,
- RF_TYPE_2T3R = 3,
- RF_TYPE_2T4R = 4,
- RF_TYPE_3T3R = 5,
- RF_TYPE_3T4R = 6,
- RF_TYPE_4T4R = 7,
-};
-
struct HAL_VERSION {
enum HAL_CHIP_TYPE ChipType;
enum HAL_CUT_VERSION CUTVersion;
enum HAL_VENDOR VendorType;
- enum HAL_RF_TYPE RFType;
u8 ROMVer;
};
@@ -47,7 +35,6 @@ struct HAL_VERSION {
#define GET_CVID_CHIP_TYPE(version) (((version).ChipType))
#define GET_CVID_MANUFACTUER(version) (((version).VendorType))
#define GET_CVID_CUT_VERSION(version) (((version).CUTVersion))
-#define GET_CVID_ROM_VERSION(version) (((version).ROMVer) & ROM_VERSION_MASK)
/* Common Macro. -- */
/* HAL_VERSION VersionID */
diff --git a/drivers/staging/r8188eu/include/drv_types.h b/drivers/staging/r8188eu/include/drv_types.h
index 3e4928320f17..2dd5ebaaa921 100644
--- a/drivers/staging/r8188eu/include/drv_types.h
+++ b/drivers/staging/r8188eu/include/drv_types.h
@@ -34,6 +34,7 @@
#include "rtw_p2p.h"
#include "rtw_ap.h"
#include "rtw_br_ext.h"
+#include "rtl8188e_hal.h"
#define DRIVERVERSION "v4.1.4_6773.20130222"
@@ -85,7 +86,6 @@ struct registry_priv {
u8 ampdu_amsdu;/* A-MPDU Supports A-MSDU is permitted */
u8 lowrate_two_xmit;
- u8 rf_config;
u8 low_power;
u8 wifi_spec;/* !turbo_mode */
@@ -114,12 +114,6 @@ struct registry_priv {
u8 notch_filter;
};
-/* For registry parameters */
-#define RGTRY_OFT(field) ((u32)FIELD_OFFSET(struct registry_priv, field))
-#define RGTRY_SZ(field) sizeof(((struct registry_priv *)0)->field)
-#define BSSID_OFT(field) ((u32)FIELD_OFFSET(struct wlan_bssid_ex, field))
-#define BSSID_SZ(field) sizeof(((struct wlan_bssid_ex *)0)->field)
-
#define MAX_CONTINUAL_URB_ERR 4
struct rt_firmware {
@@ -129,14 +123,13 @@ struct rt_firmware {
struct dvobj_priv {
struct adapter *if1;
- struct adapter *if2;
/* For 92D, DMDP have 2 interface. */
u8 InterfaceNumber;
u8 NumInterfaces;
/* In /Out Pipe information */
- int RtInPipe[2];
+ int RtInPipe;
int RtOutPipe[3];
u8 Queue2Pipe[HW_QUEUE_ENTRY];/* for out pipe mapping */
@@ -146,11 +139,8 @@ struct dvobj_priv {
/*-------- below is for USB INTERFACE --------*/
- u8 nr_endpoint;
u8 ishighspeed;
- u8 RtNumInPipes;
u8 RtNumOutPipes;
- int ep_num[5]; /* endpoint number */
int RegUsbSS;
struct semaphore usb_suspend_sema;
struct mutex usb_vendor_req_mutex;
@@ -210,8 +200,7 @@ struct adapter {
struct hostapd_priv *phostapdpriv;
struct wifidirect_info wdinfo;
- void *HalData;
- u32 hal_data_sz;
+ struct hal_data_8188e haldata;
s32 bDriverStopped;
s32 bSurpriseRemoved;
@@ -275,8 +264,6 @@ struct adapter {
unsigned char br_ip[4];
struct br_ext_info ethBrExtInfo;
- u8 fix_rate;
-
unsigned char in_cta_test;
};
diff --git a/drivers/staging/r8188eu/include/hal_intf.h b/drivers/staging/r8188eu/include/hal_intf.h
index d777ad9071e2..b4a7e0ce3116 100644
--- a/drivers/staging/r8188eu/include/hal_intf.h
+++ b/drivers/staging/r8188eu/include/hal_intf.h
@@ -29,7 +29,6 @@ enum hw_variables {
HW_VAR_ACK_PREAMBLE,
HW_VAR_SEC_CFG,
HW_VAR_BCN_VALID,
- HW_VAR_RF_TYPE,
HW_VAR_DM_FLAG,
HW_VAR_DM_FUNC_OP,
HW_VAR_DM_FUNC_SET,
@@ -95,40 +94,17 @@ enum hal_def_variable {
HAL_DEF_DBG_DUMP_TXPKT,
};
-enum hal_odm_variable {
- HAL_ODM_STA_INFO,
- HAL_ODM_P2P_STATE,
- HAL_ODM_WIFI_DISPLAY_STATE,
-};
-
typedef s32 (*c2h_id_filter)(u8 id);
-#define RF_CHANGE_BY_INIT 0
-#define RF_CHANGE_BY_IPS BIT(28)
-#define RF_CHANGE_BY_PS BIT(29)
-#define RF_CHANGE_BY_HW BIT(30)
-#define RF_CHANGE_BY_SW BIT(31)
-
#define is_boot_from_eeprom(adapter) (adapter->eeprompriv.EepromOrEfuse)
-void rtl8188eu_alloc_haldata(struct adapter *adapt);
-
void rtl8188eu_interface_configure(struct adapter *adapt);
void ReadAdapterInfo8188EU(struct adapter *Adapter);
void rtl8188eu_init_default_value(struct adapter *adapt);
-void rtl8188e_SetHalODMVar(struct adapter *Adapter,
- enum hal_odm_variable eVariable, void *pValue1, bool bSet);
+void rtl8188e_SetHalODMVar(struct adapter *Adapter, void *pValue1, bool bSet);
u32 rtl8188eu_InitPowerOn(struct adapter *adapt);
-void rtl8188e_free_hal_data(struct adapter *padapter);
-void rtl8188e_EfusePowerSwitch(struct adapter *pAdapter, u8 bWrite, u8 PwrState);
-void rtl8188e_ReadEFuse(struct adapter *Adapter, u8 efuseType,
- u16 _offset, u16 _size_byte, u8 *pbuf,
- bool bPseudoTest);
-void rtl8188e_EFUSE_GetEfuseDefinition(struct adapter *pAdapter, u8 efuseType,
- u8 type, void *pOut, bool bPseudoTest);
-u16 rtl8188e_EfuseGetCurrentSize(struct adapter *pAdapter, u8 efuseType, bool bPseudoTest);
-int rtl8188e_Efuse_PgPacketRead(struct adapter *pAdapter, u8 offset, u8 *data, bool bPseudoTest);
-int rtl8188e_Efuse_PgPacketWrite(struct adapter *pAdapter, u8 offset, u8 word_en, u8 *data, bool bPseudoTest);
+void rtl8188e_EfusePowerSwitch(struct adapter *pAdapter, u8 PwrState);
+void rtl8188e_ReadEFuse(struct adapter *Adapter, u16 _offset, u16 _size_byte, u8 *pbuf);
void hal_notch_filter_8188e(struct adapter *adapter, bool enable);
diff --git a/drivers/staging/r8188eu/include/ieee80211.h b/drivers/staging/r8188eu/include/ieee80211.h
index 6c8206bd5466..3a23d5299314 100644
--- a/drivers/staging/r8188eu/include/ieee80211.h
+++ b/drivers/staging/r8188eu/include/ieee80211.h
@@ -1149,7 +1149,6 @@ void rtw_get_bcn_info(struct wlan_network *pnetwork);
void rtw_macaddr_cfg(u8 *mac_addr);
-u16 rtw_mcs_rate(u8 rf_type, u8 bw_40MHz, u8 short_GI_20, u8 short_GI_40,
- unsigned char *MCS_rate);
+u16 rtw_mcs_rate(u8 bw_40MHz, u8 short_GI_20, u8 short_GI_40, unsigned char *MCS_rate);
#endif /* IEEE80211_H */
diff --git a/drivers/staging/r8188eu/include/ieee80211_ext.h b/drivers/staging/r8188eu/include/ieee80211_ext.h
deleted file mode 100644
index e7ade835d478..000000000000
--- a/drivers/staging/r8188eu/include/ieee80211_ext.h
+++ /dev/null
@@ -1,271 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#ifndef __IEEE80211_EXT_H
-#define __IEEE80211_EXT_H
-
-#include "osdep_service.h"
-#include "drv_types.h"
-
-#define WMM_OUI_TYPE 2
-#define WMM_OUI_SUBTYPE_INFORMATION_ELEMENT 0
-#define WMM_OUI_SUBTYPE_PARAMETER_ELEMENT 1
-#define WMM_OUI_SUBTYPE_TSPEC_ELEMENT 2
-#define WMM_VERSION 1
-
-#define WPA_PROTO_WPA BIT(0)
-#define WPA_PROTO_RSN BIT(1)
-
-#define WPA_KEY_MGMT_IEEE8021X BIT(0)
-#define WPA_KEY_MGMT_PSK BIT(1)
-#define WPA_KEY_MGMT_NONE BIT(2)
-#define WPA_KEY_MGMT_IEEE8021X_NO_WPA BIT(3)
-#define WPA_KEY_MGMT_WPA_NONE BIT(4)
-
-#define WPA_CAPABILITY_PREAUTH BIT(0)
-#define WPA_CAPABILITY_MGMT_FRAME_PROTECTION BIT(6)
-#define WPA_CAPABILITY_PEERKEY_ENABLED BIT(9)
-
-#define PMKID_LEN 16
-
-struct wpa_ie_hdr {
- u8 elem_id;
- u8 len;
- u8 oui[4]; /* 24-bit OUI followed by 8-bit OUI type */
- u8 version[2]; /* little endian */
-} __packed;
-
-struct rsn_ie_hdr {
- u8 elem_id; /* WLAN_EID_RSN */
- u8 len;
- u8 version[2]; /* little endian */
-} __packed;
-
-struct wme_ac_parameter {
-#if defined(__LITTLE_ENDIAN)
- /* byte 1 */
- u8 aifsn:4,
- acm:1,
- aci:2,
- reserved:1;
-
- /* byte 2 */
- u8 eCWmin:4,
- eCWmax:4;
-#elif defined(__BIG_ENDIAN)
- /* byte 1 */
- u8 reserved:1,
- aci:2,
- acm:1,
- aifsn:4;
-
- /* byte 2 */
- u8 eCWmax:4,
- eCWmin:4;
-#else
-#error "Please fix <endian.h>"
-#endif
-
- /* bytes 3 & 4 */
- u16 txopLimit;
-} __packed;
-
-struct wme_parameter_element {
- /* required fields for WME version 1 */
- u8 oui[3];
- u8 oui_type;
- u8 oui_subtype;
- u8 version;
- u8 acInfo;
- u8 reserved;
- struct wme_ac_parameter ac[4];
-
-} __packed;
-
-#define WPA_PUT_LE16(a, val) \
- do { \
- (a)[1] = ((u16) (val)) >> 8; \
- (a)[0] = ((u16) (val)) & 0xff; \
- } while (0)
-
-#define WPA_PUT_BE32(a, val) \
- do { \
- (a)[0] = (u8) ((((u32) (val)) >> 24) & 0xff); \
- (a)[1] = (u8) ((((u32) (val)) >> 16) & 0xff); \
- (a)[2] = (u8) ((((u32) (val)) >> 8) & 0xff); \
- (a)[3] = (u8) (((u32) (val)) & 0xff); \
- } while (0)
-
-#define WPA_PUT_LE32(a, val) \
- do { \
- (a)[3] = (u8) ((((u32) (val)) >> 24) & 0xff); \
- (a)[2] = (u8) ((((u32) (val)) >> 16) & 0xff); \
- (a)[1] = (u8) ((((u32) (val)) >> 8) & 0xff); \
- (a)[0] = (u8) (((u32) (val)) & 0xff); \
- } while (0)
-
-#define RSN_SELECTOR_PUT(a, val) WPA_PUT_BE32((u8 *)(a), (val))
-
-/* Action category code */
-enum ieee80211_category {
- WLAN_CATEGORY_SPECTRUM_MGMT = 0,
- WLAN_CATEGORY_QOS = 1,
- WLAN_CATEGORY_DLS = 2,
- WLAN_CATEGORY_BACK = 3,
- WLAN_CATEGORY_HT = 7,
- WLAN_CATEGORY_WMM = 17,
-};
-
-/* SPECTRUM_MGMT action code */
-enum ieee80211_spectrum_mgmt_actioncode {
- WLAN_ACTION_SPCT_MSR_REQ = 0,
- WLAN_ACTION_SPCT_MSR_RPRT = 1,
- WLAN_ACTION_SPCT_TPC_REQ = 2,
- WLAN_ACTION_SPCT_TPC_RPRT = 3,
- WLAN_ACTION_SPCT_CHL_SWITCH = 4,
- WLAN_ACTION_SPCT_EXT_CHL_SWITCH = 5,
-};
-
-/* BACK action code */
-enum ieee80211_back_actioncode {
- WLAN_ACTION_ADDBA_REQ = 0,
- WLAN_ACTION_ADDBA_RESP = 1,
- WLAN_ACTION_DELBA = 2,
-};
-
-/* HT features action code */
-enum ieee80211_ht_actioncode {
- WLAN_ACTION_NOTIFY_CH_WIDTH = 0,
- WLAN_ACTION_SM_PS = 1,
- WLAN_ACTION_PSPM = 2,
- WLAN_ACTION_PCO_PHASE = 3,
- WLAN_ACTION_MIMO_CSI_MX = 4,
- WLAN_ACTION_MIMO_NONCP_BF = 5,
- WLAN_ACTION_MIMP_CP_BF = 6,
- WLAN_ACTION_ASEL_INDICATES_FB = 7,
- WLAN_ACTION_HI_INFO_EXCHG = 8,
-};
-
-/* BACK (block-ack) parties */
-enum ieee80211_back_parties {
- WLAN_BACK_RECIPIENT = 0,
- WLAN_BACK_INITIATOR = 1,
- WLAN_BACK_TIMER = 2,
-};
-
-struct ieee80211_mgmt {
- u16 frame_control;
- u16 duration;
- u8 da[6];
- u8 sa[6];
- u8 bssid[6];
- u16 seq_ctrl;
- union {
- struct {
- u16 auth_alg;
- u16 auth_transaction;
- u16 status_code;
- /* possibly followed by Challenge text */
- u8 variable[0];
- } __packed auth;
- struct {
- u16 reason_code;
- } __packed deauth;
- struct {
- u16 capab_info;
- u16 listen_interval;
- /* followed by SSID and Supported rates */
- u8 variable[0];
- } __packed assoc_req;
- struct {
- u16 capab_info;
- u16 status_code;
- u16 aid;
- /* followed by Supported rates */
- u8 variable[0];
- } __packed assoc_resp, reassoc_resp;
- struct {
- u16 capab_info;
- u16 listen_interval;
- u8 current_ap[6];
- /* followed by SSID and Supported rates */
- u8 variable[0];
- } __packed reassoc_req;
- struct {
- u16 reason_code;
- } __packed disassoc;
- struct {
- __le64 timestamp;
- u16 beacon_int;
- u16 capab_info;
- /* followed by some of SSID, Supported rates,
- * FH Params, DS Params, CF Params, IBSS Params, TIM */
- u8 variable[0];
- } __packed beacon;
- struct {
- /* only variable items: SSID, Supported rates */
- u8 variable[0];
- } __packed probe_req;
- struct {
- __le64 timestamp;
- u16 beacon_int;
- u16 capab_info;
- /* followed by some of SSID, Supported rates,
- * FH Params, DS Params, CF Params, IBSS Params */
- u8 variable[0];
- } __packed probe_resp;
- struct {
- u8 category;
- union {
- struct {
- u8 action_code;
- u8 dialog_token;
- u8 status_code;
- u8 variable[0];
- } __packed wme_action;
- struct {
- u8 action_code;
- u8 dialog_token;
- u16 capab;
- u16 timeout;
- u16 start_seq_num;
- } __packed addba_req;
- struct {
- u8 action_code;
- u8 dialog_token;
- u16 status;
- u16 capab;
- u16 timeout;
- } __packed addba_resp;
- struct {
- u8 action_code;
- u16 params;
- u16 reason_code;
- } __packed delba;
- structi {
- u8 action_code;
- /* capab_info for open and confirm,
- * reason for close
- */
- u16 aux;
- /* Followed in plink_confirm by status
- * code, AID and supported rates,
- * and directly by supported rates in
- * plink_open and plink_close
- */
- u8 variable[0];
- } __packed plink_action;
- struct{
- u8 action_code;
- u8 variable[0];
- } __packed mesh_action;
- } __packed u;
- } __packed action;
- } __packed u;
-} __packed;
-
-/* mgmt header + 1 byte category code */
-#define IEEE80211_MIN_ACTION_SIZE \
- FIELD_OFFSET(struct ieee80211_mgmt, u.action.u)
-
-#endif
diff --git a/drivers/staging/r8188eu/include/odm.h b/drivers/staging/r8188eu/include/odm.h
index f08655208b32..23a151c558dc 100644
--- a/drivers/staging/r8188eu/include/odm.h
+++ b/drivers/staging/r8188eu/include/odm.h
@@ -4,64 +4,30 @@
#ifndef __HALDMOUTSRC_H__
#define __HALDMOUTSRC_H__
-/* Add for AP/ADSLpseudo DM structuer requirement. */
-/* We need to remove to other position??? */
-struct rtl8192cd_priv {
- u8 temp;
-};
-
struct rtw_dig {
- u8 Dig_Enable_Flag;
- u8 Dig_Ext_Port_Stage;
-
- int RssiLowThresh;
- int RssiHighThresh;
-
- u32 FALowThresh;
- u32 FAHighThresh;
-
- u8 CurSTAConnectState;
- u8 PreSTAConnectState;
- u8 CurMultiSTAConnectState;
-
u8 PreIGValue;
u8 CurIGValue;
u8 BackupIGValue;
- s8 BackoffVal;
- s8 BackoffVal_range_max;
- s8 BackoffVal_range_min;
u8 rx_gain_range_max;
u8 rx_gain_range_min;
- u8 Rssi_val_min;
- u8 PreCCK_CCAThres;
u8 CurCCK_CCAThres;
- u8 PreCCKPDState;
- u8 CurCCKPDState;
u8 LargeFAHit;
u8 ForbiddenIGI;
u32 Recover_cnt;
u8 DIG_Dynamic_MIN_0;
- u8 DIG_Dynamic_MIN_1;
bool bMediaConnect_0;
- bool bMediaConnect_1;
u32 AntDiv_RSSI_max;
u32 RSSI_max;
};
struct rtl_ps {
- u8 pre_cca_state;
- u8 cur_cca_state;
-
u8 pre_rf_state;
u8 cur_rf_state;
-
- int rssi_val_min;
-
u8 initialize;
u32 reg_874;
u32 reg_c70;
@@ -87,59 +53,11 @@ struct false_alarm_stats {
u32 Cnt_BW_LSC; /* Gary */
};
-struct dyn_primary_cca {
- u8 pri_cca_flag;
- u8 intf_flag;
- u8 intf_type;
- u8 dup_rts_flag;
- u8 monitor_flag;
-};
-
-struct rx_hpc {
- u8 RXHP_flag;
- u8 PSD_func_trigger;
- u8 PSD_bitmap_RXHP[80];
- u8 Pre_IGI;
- u8 Cur_IGI;
- u8 Pre_pw_th;
- u8 Cur_pw_th;
- bool First_time_enter;
- bool RXHP_enable;
- u8 TP_Mode;
- struct timer_list PSDTimer;
-};
-
#define ODM_ASSOCIATE_ENTRY_NUM 32 /* Max size of AsocEntry[]. */
struct sw_ant_switch {
- u8 try_flag;
- s32 PreRSSI;
u8 CurAntenna;
- u8 PreAntenna;
- u8 RSSI_Trying;
- u8 TestMode;
- u8 bTriggerAntennaSwitch;
- u8 SelectAntennaMap;
- u8 RSSI_target;
-
- /* Before link Antenna Switch check */
- u8 SWAS_NoLink_State;
- u32 SWAS_NoLink_BK_Reg860;
-
- s32 RSSI_sum_A;
- s32 RSSI_sum_B;
- s32 RSSI_cnt_A;
- s32 RSSI_cnt_B;
- u64 lastTxOkCnt;
- u64 lastRxOkCnt;
- u64 TXByteCnt_A;
- u64 TXByteCnt_B;
- u64 RXByteCnt_A;
- u64 RXByteCnt_B;
- u8 TrafficLoad;
- struct timer_list SwAntennaSwitchTimer;
- u8 TxAnt[ODM_ASSOCIATE_ENTRY_NUM];
- u8 TargetSTA;
+ u8 SWAS_NoLink_State; /* Before link Antenna Switch check */
u8 RxIdleAnt;
};
@@ -182,10 +100,6 @@ struct odm_per_pkt_info {
bool bPacketBeacon;
};
-struct odm_mac_status_info {
- u8 test;
-};
-
enum odm_ability {
/* BB Team */
ODM_DIG = 0x00000001,
@@ -202,22 +116,6 @@ enum odm_ability {
ODM_PSD2AFH = 0x00000800
};
-/* 2011/20/20 MH For MP driver RT_WLAN_STA = struct sta_info */
-/* Please declare below ODM relative info in your STA info structure. */
-
-struct odm_sta_info {
- /* Driver Write */
- bool bUsed; /* record the sta status link or not? */
- u8 IOTPeer; /* Enum value. HT_IOT_PEER_E */
-
- /* ODM Write */
- /* 1 PHY_STATUS_INFO */
- u8 RSSI_Path[4]; /* */
- u8 RSSI_Ave;
- u8 RXEVM[4];
- u8 RXSNR[4];
-};
-
/* 2011/10/20 MH Define Common info enum for all team. */
enum odm_common_info_def {
@@ -230,22 +128,16 @@ enum odm_common_info_def {
/* Dynamic value: */
/* POINTER REFERENCE----------- */
- ODM_CMNINFO_TX_UNI,
- ODM_CMNINFO_RX_UNI,
ODM_CMNINFO_WM_MODE, /* ODM_WIRELESS_MODE_E */
ODM_CMNINFO_SEC_CHNL_OFFSET, /* ODM_SEC_CHNL_OFFSET_E */
- ODM_CMNINFO_SEC_MODE, /* ODM_SECURITY_E */
ODM_CMNINFO_BW, /* ODM_BW_E */
ODM_CMNINFO_CHNL,
ODM_CMNINFO_SCAN,
ODM_CMNINFO_POWER_SAVING,
- ODM_CMNINFO_NET_CLOSED,
/* POINTER REFERENCE----------- */
/* CALL BY VALUE------------- */
- ODM_CMNINFO_WIFI_DIRECT,
- ODM_CMNINFO_WIFI_DISPLAY,
ODM_CMNINFO_LINK,
ODM_CMNINFO_RSSI_MIN,
ODM_CMNINFO_RF_ANTENNA_TYPE, /* u8 */
@@ -256,27 +148,17 @@ enum odm_common_info_def {
enum odm_ability_def {
/* BB ODM section BIT 0-15 */
- ODM_BB_DIG = BIT(0),
- ODM_BB_RA_MASK = BIT(1),
- ODM_BB_DYNAMIC_TXPWR = BIT(2),
ODM_BB_FA_CNT = BIT(3),
ODM_BB_RSSI_MONITOR = BIT(4),
ODM_BB_CCK_PD = BIT(5),
ODM_BB_ANT_DIV = BIT(6),
- ODM_BB_PWR_SAVE = BIT(7),
ODM_BB_PWR_TRA = BIT(8),
- ODM_BB_RATE_ADAPTIVE = BIT(9),
- ODM_BB_PATH_DIV = BIT(10),
- ODM_BB_PSD = BIT(11),
- ODM_BB_RXHP = BIT(12),
/* MAC DM section BIT 16-23 */
ODM_MAC_EDCA_TURBO = BIT(16),
- ODM_MAC_EARLY_MODE = BIT(17),
/* RF ODM section BIT 24-31 */
ODM_RF_TX_PWR_TRACK = BIT(24),
- ODM_RF_RX_GAIN_TRACK = BIT(25),
ODM_RF_CALIBRATION = BIT(26),
};
@@ -424,15 +306,9 @@ struct odm_rf_cal {
/* ODM Dynamic common info value definition */
struct fast_ant_train {
- u8 Bssid[6];
u8 antsel_rx_keep_0;
u8 antsel_rx_keep_1;
u8 antsel_rx_keep_2;
- u32 antSumRSSI[7];
- u32 antRSSIcnt[7];
- u32 antAveRSSI[7];
- u8 FAT_State;
- u32 TrainIdx;
u8 antsel_a[ODM_ASSOCIATE_ENTRY_NUM];
u8 antsel_b[ODM_ASSOCIATE_ENTRY_NUM];
u8 antsel_c[ODM_ASSOCIATE_ENTRY_NUM];
@@ -444,11 +320,6 @@ struct fast_ant_train {
bool bBecomeLinked;
};
-enum fat_state {
- FAT_NORMAL_STATE = 0,
- FAT_TRAINING_STATE = 1,
-};
-
enum ant_div_type {
NO_ANTDIV = 0xFF,
CG_TRX_HW_ANTDIV = 0x01,
@@ -459,13 +330,7 @@ enum ant_div_type {
/* Copy from SD4 defined structure. We use to support PHY DM integration. */
struct odm_dm_struct {
- /* Add for different team use temporarily */
struct adapter *Adapter; /* For CE/NIC team */
- struct rtl8192cd_priv *priv; /* For AP/ADSL team */
- /* WHen you use above pointers, they must be initialized. */
- bool odm_ready;
-
- struct rtl8192cd_priv *fake_priv;
/* ODM HANDLE, DRIVER NEEDS NOT TO HOOK------ */
bool bCckHighPower;
@@ -485,55 +350,25 @@ struct odm_dm_struct {
/* Dynamic Value */
/* POINTER REFERENCE----------- */
-
- u8 u8_temp;
- bool bool_temp;
- struct adapter *adapter_temp;
-
- /* TX Unicast byte count */
- u64 *pNumTxBytesUnicast;
- /* RX Unicast byte count */
- u64 *pNumRxBytesUnicast;
/* Wireless mode B/G/A/N = BIT(0)/BIT(1)/BIT(2)/BIT(3) */
u8 *pWirelessMode; /* ODM_WIRELESS_MODE_E */
/* Secondary channel offset don't_care/below/above = 0/1/2 */
u8 *pSecChOffset;
- /* Security mode Open/WEP/AES/TKIP = 0/1/2/3 */
- u8 *pSecurity;
/* BW info 20M/40M/80M = 0/1/2 */
u8 *pBandWidth;
/* Central channel location Ch1/Ch2/.... */
u8 *pChannel; /* central channel number */
- /* Common info for 92D DMSP */
- bool *pbGetValueFromOtherMac;
- struct adapter **pBuddyAdapter;
- bool *pbMasterOfDMSP; /* MAC0: master, MAC1: slave */
/* Common info for Status */
bool *pbScanInProcess;
bool *pbPowerSaving;
- /* CCA Path 2-path/path-A/path-B = 0/1/2; using ODM_CCA_PATH_E. */
- u8 *pOnePathCCA;
- /* pMgntInfo->AntennaTest */
- u8 *pAntennaTest;
- bool *pbNet_closed;
/* POINTER REFERENCE----------- */
/* */
/* CALL BY VALUE------------- */
- bool bWIFI_Direct;
- bool bWIFI_Display;
bool bLinked;
u8 RSSI_Min;
- u8 InterfaceIndex; /* Add for 92D dual MAC: 0--Mac0 1--Mac1 */
bool bIsMPChip;
bool bOneEntryOnly;
- /* Common info for BTDM */
- bool bBtDisabled; /* BT is disabled */
- bool bBtHsOperation; /* BT HS mode is under progress */
- u8 btHsDigVal; /* use BT rssi to decide the DIG value */
- bool bBtDisableEdcaTurbo;/* Under some condition, don't enable the
- * EDCA Turbo */
- bool bBtBusy; /* BT is busy. */
/* CALL BY VALUE------------- */
/* 2 Define STA info. */
@@ -549,38 +384,16 @@ struct odm_dm_struct {
/* Latest packet phy info (ODM write) */
struct odm_phy_dbg_info PhyDbgInfo;
- /* Latest packet phy info (ODM write) */
- struct odm_mac_status_info *pMacInfo;
-
- /* Different Team independt structure?? */
-
/* ODM Structure */
struct fast_ant_train DM_FatTable;
struct rtw_dig DM_DigTable;
struct rtl_ps DM_PSTable;
- struct dyn_primary_cca DM_PriCCA;
- struct rx_hpc DM_RXHP_Table;
struct false_alarm_stats FalseAlmCnt;
- struct false_alarm_stats FlaseAlmCntBuddyAdapter;
struct sw_ant_switch DM_SWAT_Table;
- bool RSSI_test;
struct edca_turbo DM_EDCA_Table;
- u32 WMMEDCA_BE;
- /* Copy from SD4 structure */
- /* */
- /* ================================================== */
- /* */
-
- bool *pbDriverStopped;
- bool *pbDriverIsGoingToPnpSetPowerSleep;
- bool *pinit_adpt_in_progress;
/* PSD */
- bool bUserAssignLevel;
- struct timer_list PSDTimer;
- u8 RSSI_BT; /* come from BT */
- bool bPSDinProcess;
bool bDMInitialGainEnable;
struct odm_rate_adapt RateAdaptive;
@@ -596,14 +409,7 @@ struct odm_dm_struct {
u8 BbSwingIdxCckCurrent;
u8 BbSwingIdxCckBase;
bool BbSwingFlagCck;
- /* ODM system resource. */
-
- /* ODM relative time. */
- struct timer_list PathDivSwitchTimer;
- /* 2011.09.27 add for Path Diversity */
- struct timer_list CCKPathDiversityTimer;
- struct timer_list FastAntTrainingTimer;
-}; /* DM_Dynamic_Mechanism_Structure */
+};
enum odm_bb_config_type {
CONFIG_BB_PHY_REG,
@@ -612,12 +418,6 @@ enum odm_bb_config_type {
CONFIG_BB_PHY_REG_PG,
};
-#define DM_DIG_THRESH_HIGH 40
-#define DM_DIG_THRESH_LOW 35
-
-#define DM_false_ALARM_THRESH_LOW 400
-#define DM_false_ALARM_THRESH_HIGH 1000
-
#define DM_DIG_MAX_NIC 0x4e
#define DM_DIG_MIN_NIC 0x1e /* 0x22/0x1c */
@@ -629,10 +429,6 @@ enum odm_bb_config_type {
#define DM_DIG_FA_TH1 0x300/* 0x100 */
#define DM_DIG_FA_TH2 0x400/* 0x200 */
-#define DM_DIG_BACKOFF_MAX 12
-#define DM_DIG_BACKOFF_MIN -4
-#define DM_DIG_BACKOFF_DEFAULT 10
-
/* 3=========================================================== */
/* 3 Rate Adaptive */
/* 3=========================================================== */
@@ -645,12 +441,6 @@ enum odm_bb_config_type {
/* 3 BB Power Save */
/* 3=========================================================== */
-enum dm_1r_cca {
- CCA_1R = 0,
- CCA_2R = 1,
- CCA_MAX = 2,
-};
-
enum dm_rf {
RF_Save = 0,
RF_Normal = 1,
@@ -680,8 +470,6 @@ extern u8 CCKSwingTable_Ch14 [CCK_TABLE_SIZE][8];
void ODM_Write_DIG(struct odm_dm_struct *pDM_Odm, u8 CurrentIGI);
void ODM_Write_CCK_CCA_Thres(struct odm_dm_struct *pDM_Odm, u8 CurCCK_CCAThres);
-void ODM_SetAntenna(struct odm_dm_struct *pDM_Odm, u8 Antenna);
-
void ODM_RF_Saving(struct odm_dm_struct *pDM_Odm, u8 bForceInNormal);
void ODM_TXPowerTrackingCheck(struct odm_dm_struct *pDM_Odm);
diff --git a/drivers/staging/r8188eu/include/odm_RTL8188E.h b/drivers/staging/r8188eu/include/odm_RTL8188E.h
index 96e50c9224aa..3c6471f1a893 100644
--- a/drivers/staging/r8188eu/include/odm_RTL8188E.h
+++ b/drivers/staging/r8188eu/include/odm_RTL8188E.h
@@ -25,10 +25,4 @@ void ODM_AntselStatistics_88E(struct odm_dm_struct *pDM_Odm, u8 antsel_tr_mux,
void odm_FastAntTraining(struct odm_dm_struct *pDM_Odm);
-void odm_FastAntTrainingCallback(struct odm_dm_struct *pDM_Odm);
-
-void odm_FastAntTrainingWorkItemCallback(struct odm_dm_struct *pDM_Odm);
-
-void odm_PrimaryCCA_Init(struct odm_dm_struct *pDM_Odm);
-
#endif
diff --git a/drivers/staging/r8188eu/include/odm_RegConfig8188E.h b/drivers/staging/r8188eu/include/odm_RegConfig8188E.h
index 634454bffdb6..683fa4a07956 100644
--- a/drivers/staging/r8188eu/include/odm_RegConfig8188E.h
+++ b/drivers/staging/r8188eu/include/odm_RegConfig8188E.h
@@ -4,9 +4,6 @@
#ifndef __INC_ODM_REGCONFIG_H_8188E
#define __INC_ODM_REGCONFIG_H_8188E
-void odm_ConfigRFReg_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr, u32 Data,
- enum rf_radio_path RF_PATH, u32 RegAddr);
-
void odm_ConfigRF_RadioA_8188E(struct odm_dm_struct *pDM_Odm,
u32 Addr, u32 Data);
diff --git a/drivers/staging/r8188eu/include/odm_interface.h b/drivers/staging/r8188eu/include/odm_interface.h
deleted file mode 100644
index 17a315d19a50..000000000000
--- a/drivers/staging/r8188eu/include/odm_interface.h
+++ /dev/null
@@ -1,59 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#ifndef __ODM_INTERFACE_H__
-#define __ODM_INTERFACE_H__
-
-enum odm_h2c_cmd {
- ODM_H2C_RSSI_REPORT = 0,
- ODM_H2C_PSD_RESULT= 1,
- ODM_H2C_PathDiv = 2,
- ODM_MAX_H2CCMD
-};
-
-/* 2012/02/17 MH For non-MP compile pass only. Linux does not support workitem. */
-/* Suggest HW team to use thread instead of workitem. Windows also support the feature. */
-typedef void (*RT_WORKITEM_CALL_BACK)(void *pContext);
-
-/* =========== Extern Variable ??? It should be forbidden. */
-
-/* =========== EXtern Function Prototype */
-
-u8 ODM_Read1Byte(struct odm_dm_struct *pDM_Odm, u32 RegAddr);
-
-u32 ODM_Read4Byte(struct odm_dm_struct *pDM_Odm, u32 RegAddr);
-
-void ODM_Write1Byte(struct odm_dm_struct *pDM_Odm, u32 RegAddr, u8 Data);
-
-void ODM_Write2Byte(struct odm_dm_struct *pDM_Odm, u32 RegAddr, u16 Data);
-
-void ODM_Write4Byte(struct odm_dm_struct *pDM_Odm, u32 RegAddr, u32 Data);
-
-void ODM_SetMACReg(struct odm_dm_struct *pDM_Odm, u32 RegAddr,
- u32 BitMask, u32 Data);
-
-u32 ODM_GetMACReg(struct odm_dm_struct *pDM_Odm, u32 RegAddr, u32 BitMask);
-
-void ODM_SetBBReg(struct odm_dm_struct *pDM_Odm, u32 RegAddr,
- u32 BitMask, u32 Data);
-
-u32 ODM_GetBBReg(struct odm_dm_struct *pDM_Odm, u32 RegAddr, u32 BitMask);
-
-void ODM_SetRFReg(struct odm_dm_struct *pDM_Odm, enum rf_radio_path eRFPath,
- u32 RegAddr, u32 BitMask, u32 Data);
-
-u32 ODM_GetRFReg(struct odm_dm_struct *pDM_Odm, enum rf_radio_path eRFPath,
- u32 RegAddr, u32 BitMask);
-
-/* Memory Relative Function. */
-s32 ODM_CompareMemory(struct odm_dm_struct *pDM_Odm, void *pBuf1, void *pBuf2,
- u32 length);
-
-/* ODM Timer relative API. */
-void ODM_delay_ms(u32 ms);
-
-void ODM_delay_us(u32 us);
-
-void ODM_sleep_ms(u32 ms);
-
-#endif /* __ODM_INTERFACE_H__ */
diff --git a/drivers/staging/r8188eu/include/odm_precomp.h b/drivers/staging/r8188eu/include/odm_precomp.h
deleted file mode 100644
index 22299f167af8..000000000000
--- a/drivers/staging/r8188eu/include/odm_precomp.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright(c) 2007 - 2011 Realtek Corporation. i*/
-
-#ifndef __ODM_PRECOMP_H__
-#define __ODM_PRECOMP_H__
-
-#include "odm_types.h"
-
-#define TEST_FALG___ 1
-
-/* 2 Config Flags and Structs - defined by each ODM Type */
-
-#include "osdep_service.h"
-#include "drv_types.h"
-#include "hal_intf.h"
-
-/* 2 OutSrc Header Files */
-
-#include "odm.h"
-#include "odm_HWConfig.h"
-#include "odm_RegDefine11N.h"
-
-#include "HalPhyRf_8188e.h"/* for IQK,LCK,Power-tracking */
-#include "Hal8188ERateAdaptive.h"/* for RA,Power training */
-#include "rtl8188e_hal.h"
-
-#include "odm_interface.h"
-
-#include "HalHWImg8188E_MAC.h"
-#include "HalHWImg8188E_RF.h"
-#include "HalHWImg8188E_BB.h"
-
-#include "odm_RegConfig8188E.h"
-#include "odm_RTL8188E.h"
-
-void odm_DIGInit(struct odm_dm_struct *pDM_Odm);
-void odm_RateAdaptiveMaskInit(struct odm_dm_struct *pDM_Odm);
-void odm_DynamicBBPowerSavingInit(struct odm_dm_struct *pDM_Odm);
-void odm_TXPowerTrackingInit(struct odm_dm_struct *pDM_Odm);
-void ODM_EdcaTurboInit(struct odm_dm_struct *pDM_Odm);
-void odm_SwAntDivInit_NIC(struct odm_dm_struct *pDM_Odm);
-void odm_CommonInfoSelfUpdate(struct odm_dm_struct *pDM_Odm);
-void odm_FalseAlarmCounterStatistics(struct odm_dm_struct *pDM_Odm);
-void odm_DIG(struct odm_dm_struct *pDM_Odm);
-void odm_CCKPacketDetectionThresh(struct odm_dm_struct *pDM_Odm);
-void odm_EdcaTurboCheck(struct odm_dm_struct *pDM_Odm);
-void odm_CommonInfoSelfInit(struct odm_dm_struct *pDM_Odm);
-void odm_RSSIMonitorCheck(struct odm_dm_struct *pDM_Odm);
-void odm_RefreshRateAdaptiveMask(struct odm_dm_struct *pDM_Odm);
-void odm_TXPowerTrackingThermalMeterInit(struct odm_dm_struct *pDM_Odm);
-void odm_InitHybridAntDiv(struct odm_dm_struct *pDM_Odm);
-void odm_HwAntDiv(struct odm_dm_struct *pDM_Odm);
-
-#endif /* __ODM_PRECOMP_H__ */
diff --git a/drivers/staging/r8188eu/include/osdep_service.h b/drivers/staging/r8188eu/include/osdep_service.h
index f6f5e4581212..6c8241372a06 100644
--- a/drivers/staging/r8188eu/include/osdep_service.h
+++ b/drivers/staging/r8188eu/include/osdep_service.h
@@ -74,38 +74,6 @@ static inline void _cancel_timer(struct timer_list *ptimer,u8 *bcancelled)
#define RTW_TIMER_HDL_NAME(name) rtw_##name##_timer_hdl
#define RTW_DECLARE_TIMER_HDL(name) void RTW_TIMER_HDL_NAME(name)(RTW_TIMER_HDL_ARGS)
-static inline void _init_workitem(struct work_struct *pwork, void *pfunc, void * cntx)
-{
- INIT_WORK(pwork, pfunc);
-}
-
-static inline void _set_workitem(struct work_struct *pwork)
-{
- schedule_work(pwork);
-}
-
-static inline void _cancel_workitem_sync(struct work_struct *pwork)
-{
- cancel_work_sync(pwork);
-}
-/* */
-/* Global Mutex: can only be used at PASSIVE level. */
-/* */
-
-#define ACQUIRE_GLOBAL_MUTEX(_MutexCounter) \
-{ \
- while (atomic_inc_return((atomic_t *)&(_MutexCounter)) != 1)\
- { \
- atomic_dec((atomic_t *)&(_MutexCounter)); \
- msleep(10); \
- } \
-}
-
-#define RELEASE_GLOBAL_MUTEX(_MutexCounter) \
-{ \
- atomic_dec((atomic_t *)&(_MutexCounter)); \
-}
-
static inline int rtw_netif_queue_stopped(struct net_device *pnetdev)
{
return netif_tx_queue_stopped(netdev_get_tx_queue(pnetdev, 0)) &&
diff --git a/drivers/staging/r8188eu/include/rtl8188e_dm.h b/drivers/staging/r8188eu/include/rtl8188e_dm.h
index 208bea050f6f..0b3a9a1a4e5c 100644
--- a/drivers/staging/r8188eu/include/rtl8188e_dm.h
+++ b/drivers/staging/r8188eu/include/rtl8188e_dm.h
@@ -8,12 +8,7 @@ enum{
UP_LINK,
DOWN_LINK,
};
-/* duplicate code,will move to ODM ######### */
-#define IQK_MAC_REG_NUM 4
-#define IQK_ADDA_REG_NUM 16
-#define IQK_BB_REG_NUM 9
-#define HP_THERMAL_NUM 8
-/* duplicate code,will move to ODM ######### */
+
struct dm_priv {
u32 InitODMFlag;
diff --git a/drivers/staging/r8188eu/include/rtl8188e_hal.h b/drivers/staging/r8188eu/include/rtl8188e_hal.h
index d7db1dfc39d0..8134a173ea07 100644
--- a/drivers/staging/r8188eu/include/rtl8188e_hal.h
+++ b/drivers/staging/r8188eu/include/rtl8188e_hal.h
@@ -13,11 +13,18 @@
#include "rtl8188e_recv.h"
#include "rtl8188e_xmit.h"
#include "rtl8188e_cmd.h"
-#include "Hal8188EPwrSeq.h"
-#include "rtl8188e_sreset.h"
#include "rtw_efuse.h"
-
-#include "odm_precomp.h"
+#include "odm_types.h"
+#include "odm.h"
+#include "odm_HWConfig.h"
+#include "odm_RegDefine11N.h"
+#include "HalPhyRf_8188e.h"
+#include "Hal8188ERateAdaptive.h"
+#include "HalHWImg8188E_MAC.h"
+#include "HalHWImg8188E_RF.h"
+#include "HalHWImg8188E_BB.h"
+#include "odm_RegConfig8188E.h"
+#include "odm_RTL8188E.h"
/* RTL8188E Power Configuration CMDs for USB/SDIO interfaces */
#define Rtl8188E_NIC_PWR_ON_FLOW rtl8188E_power_on_flow
@@ -168,42 +175,15 @@ struct hal_data_8188e {
u16 BasicRateSet;
- /* rf_ctrl */
- u8 rf_chip;
- u8 rf_type;
-
- u8 BoardType;
-
- /* EEPROM setting. */
- u16 EEPROMVID;
- u16 EEPROMPID;
- u16 EEPROMSVID;
- u16 EEPROMSDID;
- u8 EEPROMCustomerID;
- u8 EEPROMSubCustomerID;
- u8 EEPROMVersion;
u8 EEPROMRegulatory;
-
- u8 bTXPowerDataReadFromEEPORM;
u8 EEPROMThermalMeter;
- u8 bAPKThermalMeterIgnore;
-
- bool EepromOrEfuse;
- struct efuse_hal EfuseHal;
- u8 Index24G_CCK_Base[RF_PATH_MAX][CHANNEL_MAX_NUMBER];
- u8 Index24G_BW40_Base[RF_PATH_MAX][CHANNEL_MAX_NUMBER];
+ u8 Index24G_CCK_Base[CHANNEL_MAX_NUMBER];
+ u8 Index24G_BW40_Base[CHANNEL_MAX_NUMBER];
/* If only one tx, only BW20 and OFDM are used. */
- s8 CCK_24G_Diff[RF_PATH_MAX][MAX_TX_COUNT];
- s8 OFDM_24G_Diff[RF_PATH_MAX][MAX_TX_COUNT];
- s8 BW20_24G_Diff[RF_PATH_MAX][MAX_TX_COUNT];
- s8 BW40_24G_Diff[RF_PATH_MAX][MAX_TX_COUNT];
-
- u8 TxPwrLevelCck[RF_PATH_MAX][CHANNEL_MAX_NUMBER];
- /* For HT 40MHZ pwr */
- u8 TxPwrLevelHT40_1S[RF_PATH_MAX][CHANNEL_MAX_NUMBER];
- /* For HT 40MHZ pwr */
- u8 TxPwrLevelHT40_2S[RF_PATH_MAX][CHANNEL_MAX_NUMBER];
+ s8 OFDM_24G_Diff[MAX_TX_COUNT];
+ s8 BW20_24G_Diff[MAX_TX_COUNT];
+
/* HT 20<->40 Pwr diff */
u8 TxPwrHt20Diff[RF_PATH_MAX][CHANNEL_MAX_NUMBER];
/* For HT<->legacy pwr diff */
@@ -212,7 +192,6 @@ struct hal_data_8188e {
u8 PwrGroupHT20[RF_PATH_MAX][CHANNEL_MAX_NUMBER];
u8 PwrGroupHT40[RF_PATH_MAX][CHANNEL_MAX_NUMBER];
- u8 LegacyHTTxPowerDiff;/* Legacy to HT rate power diff */
/* The current Tx Power Level */
u8 CurrentCckTxPwrIdx;
u8 CurrentOfdm24GTxPwrIdx;
@@ -220,32 +199,18 @@ struct hal_data_8188e {
u8 CurrentBW4024GTxPwrIdx;
/* Read/write are allow for following hardware information variables */
- u8 framesync;
- u32 framesyncC34;
- u8 framesyncMonitor;
- u8 DefaultInitialGain[4];
u8 pwrGroupCnt;
u32 MCSTxPowerLevelOriginalOffset[MAX_PG_GROUP][16];
- u32 CCKTxPowerLevelOriginalOffset;
u8 CrystalCap;
- u32 AntennaTxPath; /* Antenna path Tx */
- u32 AntennaRxPath; /* Antenna path Rx */
u8 ExternalPA;
- u8 bLedOpenDrain; /* Open-drain support for controlling the LED.*/
-
- u8 b1x1RecvCombine; /* for 1T1R receive combining */
-
u32 AcParam_BE; /* Original parameter for BE, use for EDCA turbo. */
- struct bb_reg_def PHYRegDef[4]; /* Radio A/B/C/D */
+ struct bb_reg_def PHYRegDef[2]; /* Radio A/B */
u32 RfRegChnlVal[2];
- /* RDG enable */
- bool bRDGEnable;
-
/* for host message to fw */
u8 LastHMEBoxNum;
@@ -263,26 +228,10 @@ struct hal_data_8188e {
u8 bDumpRxPkt;/* for debug */
u8 bDumpTxPkt;/* for debug */
- u8 FwRsvdPageStartOffset; /* Reserve page start offset except
- * beacon in TxQ. */
-
- /* 2010/08/09 MH Add CU power down mode. */
- bool pwrdown;
-
- /* Add for dual MAC 0--Mac0 1--Mac1 */
- u32 interfaceIndex;
u8 OutEpQueueSel;
u8 OutEpNumber;
- /* Add for USB aggreation mode dynamic shceme. */
- bool UsbRxHighSpeedMode;
-
- /* 2010/11/22 MH Add for slim combo debug mode selective. */
- /* This is used for fix the drawback of CU TSMC-A/UMC-A cut.
- * HW auto suspend ability. Close BT clock. */
- bool SlimComboDbg;
-
u16 EfuseUsedBytes;
struct P2P_PS_Offload_t p2p_ps_offload;
@@ -293,12 +242,8 @@ struct hal_data_8188e {
u32 UsbBulkOutSize;
- /* Interrupt relatd register information. */
- u32 IntArray[3];/* HISR0,HISR1,HSISR */
- u8 C2hArray[16];
u8 UsbTxAggMode;
u8 UsbTxAggDescNum;
- u32 MaxUsbRxAggBlock;
enum usb_rx_agg_mode UsbRxAggMode;
u8 UsbRxAggBlockCount; /* USB Block count. Block size is
@@ -309,9 +254,6 @@ struct hal_data_8188e {
u8 UsbRxAggPageTimeout;
};
-#define GET_HAL_DATA(__pAdapter) \
- ((struct hal_data_8188e *)((__pAdapter)->HalData))
-
/* rtl8188e_hal_init.c */
s32 rtl8188e_FirmwareDownload(struct adapter *padapter);
void _8051Reset88E(struct adapter *padapter);
@@ -321,25 +263,18 @@ s32 InitLLTTable(struct adapter *padapter, u8 txpktbuf_bndy);
/* EFuse */
u8 GetEEPROMSize8188E(struct adapter *padapter);
-void Hal_InitPGData88E(struct adapter *padapter);
void Hal_EfuseParseIDCode88E(struct adapter *padapter, u8 *hwinfo);
void Hal_ReadTxPowerInfo88E(struct adapter *padapter, u8 *hwinfo,
bool AutoLoadFail);
-void Hal_EfuseParseEEPROMVer88E(struct adapter *padapter, u8 *hwinfo,
- bool AutoLoadFail);
void rtl8188e_EfuseParseChnlPlan(struct adapter *padapter, u8 *hwinfo,
bool AutoLoadFail);
-void Hal_EfuseParseCustomerID88E(struct adapter *padapter, u8 *hwinfo,
- bool AutoLoadFail);
void Hal_ReadAntennaDiversity88E(struct adapter *pAdapter,u8 *PROMContent,
bool AutoLoadFail);
void Hal_ReadThermalMeter_88E(struct adapter * dapter, u8 *PROMContent,
bool AutoloadFail);
void Hal_EfuseParseXtal_8188E(struct adapter *pAdapter, u8 *hwinfo,
bool AutoLoadFail);
-void Hal_EfuseParseBoardType88E(struct adapter *pAdapter, u8 *hwinfo,
- bool AutoLoadFail);
void Hal_ReadPowerSavingMode88E(struct adapter *pAdapter, u8 *hwinfo,
bool AutoLoadFail);
@@ -347,6 +282,5 @@ void rtl8188e_read_chip_version(struct adapter *padapter);
s32 rtl8188e_iol_efuse_patch(struct adapter *padapter);
void rtw_cancel_all_timer(struct adapter *padapter);
-void _ps_open_RF(struct adapter *adapt);
#endif /* __RTL8188E_HAL_H__ */
diff --git a/drivers/staging/r8188eu/include/rtl8188e_led.h b/drivers/staging/r8188eu/include/rtl8188e_led.h
deleted file mode 100644
index 02cdc970bb17..000000000000
--- a/drivers/staging/r8188eu/include/rtl8188e_led.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#ifndef __RTL8188E_LED_H__
-#define __RTL8188E_LED_H__
-
-#include "osdep_service.h"
-#include "drv_types.h"
-
-/* */
-/* Interface to manipulate LED objects. */
-/* */
-void rtl8188eu_InitSwLeds(struct adapter *padapter);
-void rtl8188eu_DeInitSwLeds(struct adapter *padapter);
-
-#endif
diff --git a/drivers/staging/r8188eu/include/rtl8188e_recv.h b/drivers/staging/r8188eu/include/rtl8188e_recv.h
index 2ab395ef579b..0be9896eaf0f 100644
--- a/drivers/staging/r8188eu/include/rtl8188e_recv.h
+++ b/drivers/staging/r8188eu/include/rtl8188e_recv.h
@@ -9,8 +9,6 @@
#define RECV_BLK_SZ 512
#define RECV_BLK_CNT 16
#define RECV_BLK_TH RECV_BLK_CNT
-#define RECV_BULK_IN_ADDR 0x80
-#define RECV_INT_IN_ADDR 0x81
#define NR_PREALLOC_RECV_SKB (8)
@@ -39,7 +37,6 @@ enum rx_packet_type {
HIS_REPORT,/* USB HISR RPT */
};
-#define INTERRUPT_MSG_FORMAT_LEN 60
void rtl8188eu_init_recvbuf(struct recv_buf *buf);
s32 rtl8188eu_init_recv_priv(struct adapter *padapter);
void rtl8188eu_free_recv_priv(struct adapter * padapter);
diff --git a/drivers/staging/r8188eu/include/rtl8188e_spec.h b/drivers/staging/r8188eu/include/rtl8188e_spec.h
index 01aeaa4ac605..009222b4a95d 100644
--- a/drivers/staging/r8188eu/include/rtl8188e_spec.h
+++ b/drivers/staging/r8188eu/include/rtl8188e_spec.h
@@ -475,13 +475,6 @@ Default: 00b.
#define MSR_INFRA 0x02
#define MSR_AP 0x03
-/* 88EU (MSR) Media Status Register (Offset 0x4C, 8 bits) */
-#define USB_INTR_CONTENT_C2H_OFFSET 0
-#define USB_INTR_CONTENT_CPWM1_OFFSET 16
-#define USB_INTR_CONTENT_CPWM2_OFFSET 20
-#define USB_INTR_CONTENT_HISR_OFFSET 48
-#define USB_INTR_CONTENT_HISRE_OFFSET 52
-
/* 88E Driver Initialization Offload REG_FDHM0(Offset 0x88, 8 bits) */
/* IOL config for REG_FDHM0(Reg0x88) */
#define CMD_INIT_LLT BIT(0)
@@ -1283,53 +1276,15 @@ Current IOREG MAP
#define EEPROM_RF_BOARD_OPTION_88E 0xC1
#define EEPROM_RF_FEATURE_OPTION_88E 0xC2
-#define EEPROM_RF_BT_SETTING_88E 0xC3
-#define EEPROM_VERSION_88E 0xC4
-#define EEPROM_CUSTOMERID_88E 0xC5
#define EEPROM_RF_ANTENNA_OPT_88E 0xC9
-/* RTL88EE */
-#define EEPROM_MAC_ADDR_88EE 0xD0
-#define EEPROM_VID_88EE 0xD6
-#define EEPROM_DID_88EE 0xD8
-#define EEPROM_SVID_88EE 0xDA
-#define EEPROM_SMID_88EE 0xDC
-
/* RTL88EU */
#define EEPROM_MAC_ADDR_88EU 0xD7
-#define EEPROM_VID_88EU 0xD0
-#define EEPROM_PID_88EU 0xD2
#define EEPROM_USB_OPTIONAL_FUNCTION0 0xD4
/* RTL88ES */
#define EEPROM_MAC_ADDR_88ES 0x11A
-/* EEPROM/Efuse Value Type */
-#define EETYPE_TX_PWR 0x0
-
-/* Default Value for EEPROM or EFUSE!!! */
-#define EEPROM_Default_TSSI 0x0
-#define EEPROM_Default_TxPowerDiff 0x0
-#define EEPROM_Default_CrystalCap 0x5
-/* Default: 2X2, RTL8192CE(QFPN68) */
-#define EEPROM_Default_BoardType 0x02
-#define EEPROM_Default_TxPower 0x1010
-#define EEPROM_Default_HT2T_TxPwr 0x10
-
-#define EEPROM_Default_LegacyHTTxPowerDiff 0x3
-#define EEPROM_Default_ThermalMeter 0x12
-
-#define EEPROM_Default_AntTxPowerDiff 0x0
-#define EEPROM_Default_TxPwDiff_CrystalCap 0x5
-#define EEPROM_Default_TxPowerLevel 0x2A
-
-#define EEPROM_Default_HT40_2SDiff 0x0
-/* HT20<->40 default Tx Power Index Difference */
-#define EEPROM_Default_HT20_Diff 2
-#define EEPROM_Default_LegacyHTTxPowerDiff 0x3
-#define EEPROM_Default_HT40_PwrMaxOffset 0
-#define EEPROM_Default_HT20_PwrMaxOffset 0
-
#define EEPROM_Default_CrystalCap_88E 0x20
#define EEPROM_Default_ThermalMeter_88E 0x18
@@ -1339,18 +1294,7 @@ Current IOREG MAP
#define EEPROM_DEFAULT_24G_OFDM_DIFF 0X04
#define EEPROM_DEFAULT_DIFF 0XFE
-#define EEPROM_DEFAULT_CHANNEL_PLAN 0x7F
#define EEPROM_DEFAULT_BOARD_OPTION 0x00
-#define EEPROM_DEFAULT_FEATURE_OPTION 0x00
-#define EEPROM_DEFAULT_BT_OPTION 0x10
-
-/* For debug */
-#define EEPROM_Default_PID 0x1234
-#define EEPROM_Default_VID 0x5678
-#define EEPROM_Default_CustomerID 0xAB
-#define EEPROM_Default_CustomerID_8188E 0x00
-#define EEPROM_Default_SubCustomerID 0xCD
-#define EEPROM_Default_Version 0
#define EEPROM_CHANNEL_PLAN_FCC 0x0
#define EEPROM_CHANNEL_PLAN_IC 0x1
@@ -1367,11 +1311,6 @@ Current IOREG MAP
#define EEPROM_USB_OPTIONAL1 0xE
#define EEPROM_CHANNEL_PLAN_BY_HW_MASK 0x80
-#define EEPROM_CID_DEFAULT 0x0
-#define EEPROM_CID_TOSHIBA 0x4
-#define EEPROM_CID_CCX 0x10 /* CCX test. */
-#define EEPROM_CID_QMI 0x0D
-#define EEPROM_CID_WHQL 0xFE
#define RTL_EEPROM_ID 0x8129
#endif /* __RTL8188E_SPEC_H__ */
diff --git a/drivers/staging/r8188eu/include/rtl8188e_sreset.h b/drivers/staging/r8188eu/include/rtl8188e_sreset.h
deleted file mode 100644
index bb8b0048fbf9..000000000000
--- a/drivers/staging/r8188eu/include/rtl8188e_sreset.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#ifndef _RTL8188E_SRESET_H_
-#define _RTL8188E_SRESET_H_
-
-#include "osdep_service.h"
-#include "drv_types.h"
-
-void rtl8188e_sreset_xmit_status_check(struct adapter *padapter);
-void rtl8188e_sreset_linked_status_check(struct adapter *padapter);
-
-#endif
diff --git a/drivers/staging/r8188eu/include/rtw_cmd.h b/drivers/staging/r8188eu/include/rtw_cmd.h
index 47c3c80cc24a..cf0945ae11c1 100644
--- a/drivers/staging/r8188eu/include/rtw_cmd.h
+++ b/drivers/staging/r8188eu/include/rtw_cmd.h
@@ -42,7 +42,6 @@ struct cmd_priv {
u8 *cmd_allocated_buf;
u8 *rsp_buf; /* shall be non-paged, and 4 bytes aligned */
u8 *rsp_allocated_buf;
- u32 cmd_issued_cnt;
u32 cmd_done_cnt;
u32 rsp_cnt;
u8 cmdthd_running;
diff --git a/drivers/staging/r8188eu/include/rtw_debug.h b/drivers/staging/r8188eu/include/rtw_debug.h
index 0a77e3e73a45..311051757715 100644
--- a/drivers/staging/r8188eu/include/rtw_debug.h
+++ b/drivers/staging/r8188eu/include/rtw_debug.h
@@ -54,22 +54,10 @@
extern u32 GlobalDebugLevel;
-#define DBG_88E_LEVEL(_level, fmt, arg...) \
- do { \
- if (_level <= GlobalDebugLevel) \
- pr_info(DRIVER_PREFIX"INFO " fmt, ##arg); \
- } while (0)
-
#define DBG_88E(...) \
do { \
if (_drv_err_ <= GlobalDebugLevel) \
pr_info(DRIVER_PREFIX __VA_ARGS__); \
} while (0)
-#define MSG_88E(...) \
- do { \
- if (_drv_err_ <= GlobalDebugLevel) \
- pr_info(DRIVER_PREFIX __VA_ARGS__); \
- } while (0)
-
#endif /* __RTW_DEBUG_H__ */
diff --git a/drivers/staging/r8188eu/include/rtw_eeprom.h b/drivers/staging/r8188eu/include/rtw_eeprom.h
index e517239bd75e..3e8d3bb48903 100644
--- a/drivers/staging/r8188eu/include/rtw_eeprom.h
+++ b/drivers/staging/r8188eu/include/rtw_eeprom.h
@@ -7,38 +7,7 @@
#include "osdep_service.h"
#include "drv_types.h"
-#define RTL8712_EEPROM_ID 0x8712
-
#define HWSET_MAX_SIZE_512 512
-#define EEPROM_MAX_SIZE HWSET_MAX_SIZE_512
-
-#define CLOCK_RATE 50 /* 100us */
-
-/* EEPROM opcodes */
-#define EEPROM_READ_OPCODE 06
-#define EEPROM_WRITE_OPCODE 05
-#define EEPROM_ERASE_OPCODE 07
-#define EEPROM_EWEN_OPCODE 19 /* Erase/write enable */
-#define EEPROM_EWDS_OPCODE 16 /* Erase/write disable */
-
-/* Country codes */
-#define USA 0x555320
-#define EUROPE 0x1 /* temp, should be provided later */
-#define JAPAN 0x2 /* temp, should be provided later */
-
-#define EEPROM_CID_DEFAULT 0x0
-#define EEPROM_CID_ALPHA 0x1
-#define EEPROM_CID_Senao 0x3
-#define EEPROM_CID_NetCore 0x5
-#define EEPROM_CID_CAMEO 0X8
-#define EEPROM_CID_SITECOM 0x9
-#define EEPROM_CID_COREGA 0xB
-#define EEPROM_CID_EDIMAX_BELK 0xC
-#define EEPROM_CID_SERCOMM_BELK 0xE
-#define EEPROM_CID_CAMEO1 0xF
-#define EEPROM_CID_WNC_COREGA 0x12
-#define EEPROM_CID_CLEVO 0x13
-#define EEPROM_CID_WHQL 0xFE
struct eeprom_priv {
u8 bautoload_fail_flag;
diff --git a/drivers/staging/r8188eu/include/rtw_efuse.h b/drivers/staging/r8188eu/include/rtw_efuse.h
index 2e19b7be1075..2daf69f554d5 100644
--- a/drivers/staging/r8188eu/include/rtw_efuse.h
+++ b/drivers/staging/r8188eu/include/rtw_efuse.h
@@ -4,110 +4,10 @@
#ifndef __RTW_EFUSE_H__
#define __RTW_EFUSE_H__
-#include "osdep_service.h"
-
-#define EFUSE_ERROE_HANDLE 1
-
-#define PG_STATE_HEADER 0x01
-#define PG_STATE_WORD_0 0x02
-#define PG_STATE_WORD_1 0x04
-#define PG_STATE_WORD_2 0x08
-#define PG_STATE_WORD_3 0x10
-#define PG_STATE_DATA 0x20
-
-#define PG_SWBYTE_H 0x01
-#define PG_SWBYTE_L 0x02
-
-#define PGPKT_DATA_SIZE 8
-
-#define EFUSE_WIFI 0
-#define EFUSE_BT 1
-
-enum _EFUSE_DEF_TYPE {
- TYPE_EFUSE_MAX_SECTION = 0,
- TYPE_EFUSE_REAL_CONTENT_LEN = 1,
- TYPE_AVAILABLE_EFUSE_BYTES_BANK = 2,
- TYPE_AVAILABLE_EFUSE_BYTES_TOTAL = 3,
- TYPE_EFUSE_MAP_LEN = 4,
- TYPE_EFUSE_PROTECT_BYTES_BANK = 5,
- TYPE_EFUSE_CONTENT_LEN_BANK = 6,
-};
-
-/* E-Fuse */
-#define EFUSE_MAP_SIZE 512
-#define EFUSE_MAX_SIZE 256
-/* end of E-Fuse */
-
-#define EFUSE_MAX_MAP_LEN 512
-#define EFUSE_MAX_HW_SIZE 512
-#define EFUSE_MAX_SECTION_BASE 16
-
-#define EXT_HEADER(header) ((header & 0x1F) == 0x0F)
-#define ALL_WORDS_DISABLED(wde) ((wde & 0x0F) == 0x0F)
-#define GET_HDR_OFFSET_2_0(header) ((header & 0xE0) >> 5)
-
-#define EFUSE_REPEAT_THRESHOLD_ 3
-
-/* The following is for BT Efuse definition */
-#define EFUSE_BT_MAX_MAP_LEN 1024
-#define EFUSE_MAX_BANK 4
-#define EFUSE_MAX_BT_BANK (EFUSE_MAX_BANK-1)
-/*--------------------------Define Parameters-------------------------------*/
#define EFUSE_MAX_WORD_UNIT 4
-/*------------------------------Define structure----------------------------*/
-struct pgpkt {
- u8 offset;
- u8 word_en;
- u8 data[8];
- u8 word_cnts;
-};
-
-/*------------------------------Define structure----------------------------*/
-struct efuse_hal {
- u8 fakeEfuseBank;
- u32 fakeEfuseUsedBytes;
- u8 fakeEfuseContent[EFUSE_MAX_HW_SIZE];
- u8 fakeEfuseInitMap[EFUSE_MAX_MAP_LEN];
- u8 fakeEfuseModifiedMap[EFUSE_MAX_MAP_LEN];
-
- u16 BTEfuseUsedBytes;
- u8 BTEfuseUsedPercentage;
- u8 BTEfuseContent[EFUSE_MAX_BT_BANK][EFUSE_MAX_HW_SIZE];
- u8 BTEfuseInitMap[EFUSE_BT_MAX_MAP_LEN];
- u8 BTEfuseModifiedMap[EFUSE_BT_MAX_MAP_LEN];
-
- u16 fakeBTEfuseUsedBytes;
- u8 fakeBTEfuseContent[EFUSE_MAX_BT_BANK][EFUSE_MAX_HW_SIZE];
- u8 fakeBTEfuseInitMap[EFUSE_BT_MAX_MAP_LEN];
- u8 fakeBTEfuseModifiedMap[EFUSE_BT_MAX_MAP_LEN];
-};
-
-/*------------------------Export global variable----------------------------*/
-extern u8 fakeEfuseBank;
-extern u32 fakeEfuseUsedBytes;
-extern u8 fakeEfuseContent[];
-extern u8 fakeEfuseInitMap[];
-extern u8 fakeEfuseModifiedMap[];
-
-extern u32 BTEfuseUsedBytes;
-extern u8 BTEfuseContent[EFUSE_MAX_BT_BANK][EFUSE_MAX_HW_SIZE];
-extern u8 BTEfuseInitMap[];
-extern u8 BTEfuseModifiedMap[];
-
-extern u32 fakeBTEfuseUsedBytes;
-extern u8 fakeBTEfuseContent[EFUSE_MAX_BT_BANK][EFUSE_MAX_HW_SIZE];
-extern u8 fakeBTEfuseInitMap[];
-extern u8 fakeBTEfuseModifiedMap[];
-/*------------------------Export global variable----------------------------*/
-
-u8 Efuse_CalculateWordCnts(u8 word_en);
-void ReadEFuseByte(struct adapter *adapter, u16 _offset, u8 *pbuf, bool test);
-u8 efuse_OneByteRead(struct adapter *adapter, u16 addr, u8 *data, bool test);
-u8 efuse_OneByteWrite(struct adapter *adapter, u16 addr, u8 data, bool test);
-
-void efuse_WordEnableDataRead(u8 word_en, u8 *sourdata, u8 *targetdata);
+void ReadEFuseByte(struct adapter *adapter, u16 _offset, u8 *pbuf);
-void EFUSE_ShadowMapUpdate(struct adapter *adapter, u8 efusetype, bool test);
+void EFUSE_ShadowMapUpdate(struct adapter *adapter);
#endif
diff --git a/drivers/staging/r8188eu/include/rtw_io.h b/drivers/staging/r8188eu/include/rtw_io.h
index c6a078210eeb..6910e2b430e2 100644
--- a/drivers/staging/r8188eu/include/rtw_io.h
+++ b/drivers/staging/r8188eu/include/rtw_io.h
@@ -224,7 +224,7 @@ u8 rtw_read8(struct adapter *adapter, u32 addr);
u16 rtw_read16(struct adapter *adapter, u32 addr);
u32 rtw_read32(struct adapter *adapter, u32 addr);
void _rtw_read_mem(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem);
-u32 rtw_read_port(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem);
+u32 rtw_read_port(struct adapter *adapter, u8 *pmem);
void rtw_read_port_cancel(struct adapter *adapter);
int rtw_write8(struct adapter *adapter, u32 addr, u8 val);
diff --git a/drivers/staging/r8188eu/include/rtw_led.h b/drivers/staging/r8188eu/include/rtw_led.h
index c035fe267635..2c14cb23d9ad 100644
--- a/drivers/staging/r8188eu/include/rtw_led.h
+++ b/drivers/staging/r8188eu/include/rtw_led.h
@@ -7,41 +7,7 @@
#include "osdep_service.h"
#include "drv_types.h"
-#define MSECS(t) (HZ * ((t) / 1000) + (HZ * ((t) % 1000)) / 1000)
-
-#define LED_BLINK_NORMAL_INTERVAL 100
-#define LED_BLINK_SLOWLY_INTERVAL 200
-#define LED_BLINK_LONG_INTERVAL 400
-
-#define LED_BLINK_NO_LINK_INTERVAL_ALPHA 1000
-#define LED_BLINK_LINK_INTERVAL_ALPHA 500 /* 500 */
-#define LED_BLINK_SCAN_INTERVAL_ALPHA 180 /* 150 */
-#define LED_BLINK_FASTER_INTERVAL_ALPHA 50
-#define LED_BLINK_WPS_SUCESS_INTERVAL_ALPHA 5000
-
-#define LED_BLINK_NORMAL_INTERVAL_NETTRONIX 100
-#define LED_BLINK_SLOWLY_INTERVAL_NETTRONIX 2000
-
-#define LED_BLINK_SLOWLY_INTERVAL_PORNET 1000
-#define LED_BLINK_NORMAL_INTERVAL_PORNET 100
-
-#define LED_BLINK_FAST_INTERVAL_BITLAND 30
-
-/* 060403, rcnjko: Customized for AzWave. */
-#define LED_CM2_BLINK_ON_INTERVAL 250
-#define LED_CM2_BLINK_OFF_INTERVAL 4750
-
-#define LED_CM8_BLINK_INTERVAL 500 /* for QMI */
-#define LED_CM8_BLINK_OFF_INTERVAL 3750 /* for QMI */
-
-/* 080124, lanhsin: Customized for RunTop */
-#define LED_RunTop_BLINK_INTERVAL 300
-
-/* 060421, rcnjko: Customized for Sercomm Printer Server case. */
-#define LED_CM3_BLINK_INTERVAL 1500
-
enum LED_CTL_MODE {
- LED_CTL_POWER_ON = 1,
LED_CTL_LINK = 2,
LED_CTL_NO_LINK = 3,
LED_CTL_TX = 4,
@@ -51,10 +17,7 @@ enum LED_CTL_MODE {
LED_CTL_START_TO_LINK = 8,
LED_CTL_START_WPS = 9,
LED_CTL_STOP_WPS = 10,
- LED_CTL_START_WPS_BOTTON = 11, /* added for runtop */
- LED_CTL_STOP_WPS_FAIL = 12, /* added for ALPHA */
- LED_CTL_STOP_WPS_FAIL_OVERLAP = 13, /* added for BELKIN */
- LED_CTL_CONNECTION_NO_TRANSFER = 14,
+ LED_CTL_STOP_WPS_FAIL = 12,
};
enum LED_STATE_871x {
@@ -63,99 +26,46 @@ enum LED_STATE_871x {
RTW_LED_OFF = 2,
LED_BLINK_NORMAL = 3,
LED_BLINK_SLOWLY = 4,
- LED_BLINK_POWER_ON = 5,
LED_BLINK_SCAN = 6, /* LED is blinking during scanning period,
* the # of times to blink is depend on time
* for scanning. */
- LED_BLINK_NO_LINK = 7, /* LED is blinking during no link state. */
LED_BLINK_StartToBlink = 8,/* Customzied for Sercomm Printer
* Server case */
LED_BLINK_TXRX = 9,
LED_BLINK_WPS = 10, /* LED is blinkg during WPS communication */
- LED_BLINK_WPS_STOP = 11, /* for ALPHA */
- LED_BLINK_WPS_STOP_OVERLAP = 12, /* for BELKIN */
+ LED_BLINK_WPS_STOP = 11,
LED_BLINK_RUNTOP = 13, /* Customized for RunTop */
- LED_BLINK_CAMEO = 14,
- LED_BLINK_XAVI = 15,
- LED_BLINK_ALWAYS_ON = 16,
-};
-
-enum LED_PIN_871x {
- LED_PIN_NULL = 0,
- LED_PIN_LED0 = 1,
- LED_PIN_LED1 = 2,
- LED_PIN_LED2 = 3,
- LED_PIN_GPIO0 = 4,
};
struct LED_871x {
struct adapter *padapter;
- enum LED_PIN_871x LedPin; /* Identify how to implement this
- * SW led. */
enum LED_STATE_871x CurrLedState; /* Current LED state. */
enum LED_STATE_871x BlinkingLedState; /* Next state for blinking,
* either RTW_LED_ON or RTW_LED_OFF are. */
- u8 bLedOn; /* true if LED is ON, false if LED is OFF. */
+ bool bLedOn; /* true if LED is ON, false if LED is OFF. */
- u8 bLedBlinkInProgress; /* true if it is blinking, false o.w.. */
+ bool bLedBlinkInProgress; /* true if it is blinking, false o.w.. */
- u8 bLedWPSBlinkInProgress;
+ bool bLedWPSBlinkInProgress;
u32 BlinkTimes; /* Number of times to toggle led state for blinking. */
- struct timer_list BlinkTimer; /* Timer object for led blinking. */
-
- /* ALPHA, added by chiyoko, 20090106 */
- u8 bLedNoLinkBlinkInProgress;
- u8 bLedLinkBlinkInProgress;
- u8 bLedStartToLinkBlinkInProgress;
- u8 bLedScanBlinkInProgress;
- struct work_struct BlinkWorkItem; /* Workitem used by BlinkTimer to
- * manipulate H/W to blink LED. */
+ bool bLedNoLinkBlinkInProgress;
+ bool bLedLinkBlinkInProgress;
+ bool bLedScanBlinkInProgress;
+ struct delayed_work blink_work;
};
-#define IS_LED_WPS_BLINKING(_LED_871x) \
- (((struct LED_871x *)_LED_871x)->CurrLedState == LED_BLINK_WPS || \
- ((struct LED_871x *)_LED_871x)->CurrLedState == LED_BLINK_WPS_STOP || \
- ((struct LED_871x *)_LED_871x)->bLedWPSBlinkInProgress)
-
-#define IS_LED_BLINKING(_LED_871x) \
- (((struct LED_871x *)_LED_871x)->bLedWPSBlinkInProgress || \
- ((struct LED_871x *)_LED_871x)->bLedScanBlinkInProgress)
-
-void LedControl8188eu(struct adapter *padapter, enum LED_CTL_MODE LedAction);
-
struct led_priv{
- /* add for led control */
struct LED_871x SwLed0;
- struct LED_871x SwLed1;
- u8 bRegUseLed;
- void (*LedControlHandler)(struct adapter *padapter,
- enum LED_CTL_MODE LedAction);
- /* add for led control */
+ bool bRegUseLed;
};
-#define rtw_led_control(adapt, action) \
- do { \
- if ((adapt)->ledpriv.LedControlHandler) \
- (adapt)->ledpriv.LedControlHandler((adapt), (action)); \
- } while (0)
-
-void BlinkTimerCallback(struct timer_list *t);
-void BlinkWorkItemCallback(struct work_struct *work);
-
-void ResetLedStatus(struct LED_871x * pLed);
-
-void InitLed871x(struct adapter *padapter, struct LED_871x *pLed,
- enum LED_PIN_871x LedPin);
-
-void DeInitLed871x(struct LED_871x *pLed);
+void rtl8188eu_InitSwLeds(struct adapter *padapter);
+void rtl8188eu_DeInitSwLeds(struct adapter *padapter);
-/* hal... */
-void BlinkHandler(struct LED_871x * pLed);
-void SwLedOn(struct adapter *padapter, struct LED_871x *pLed);
-void SwLedOff(struct adapter *padapter, struct LED_871x *pLed);
+void rtw_led_control(struct adapter *padapter, enum LED_CTL_MODE LedAction);
#endif /* __RTW_LED_H_ */
diff --git a/drivers/staging/r8188eu/include/rtw_mlme.h b/drivers/staging/r8188eu/include/rtw_mlme.h
index e8d51f495702..77169c15080a 100644
--- a/drivers/staging/r8188eu/include/rtw_mlme.h
+++ b/drivers/staging/r8188eu/include/rtw_mlme.h
@@ -243,18 +243,6 @@ struct wifidirect_info {
* by using the sta_preset CAPI. */
/* 0: disable */
/* 1: enable */
- u8 wfd_tdls_enable; /* Flag to enable or disable the TDLS by WFD Sigma*/
- /* 0: disable */
- /* 1: enable */
- u8 wfd_tdls_weaksec; /* Flag to enable or disable the weak security
- * function for TDLS by WFD Sigma */
- /* 0: disable */
- /* In this case, the driver can't issue the tdsl
- * setup request frame. */
- /* 1: enable */
- /* In this case, the driver can issue the tdls
- * setup request frame */
- /* even the current security is weak security. */
/* This field will store the WPS value (PIN value or PBC) that UI had
* got from the user. */
diff --git a/drivers/staging/r8188eu/include/rtw_mlme_ext.h b/drivers/staging/r8188eu/include/rtw_mlme_ext.h
index 5b307ad3afa5..26f31f20e428 100644
--- a/drivers/staging/r8188eu/include/rtw_mlme_ext.h
+++ b/drivers/staging/r8188eu/include/rtw_mlme_ext.h
@@ -162,11 +162,6 @@ struct rt_channel_plan {
unsigned char Len;
};
-struct rt_channel_plan_2g {
- unsigned char Channel[MAX_CHANNEL_NUM_2G];
- unsigned char Len;
-};
-
struct rt_channel_plan_map {
unsigned char Index2G;
};
diff --git a/drivers/staging/r8188eu/include/rtw_pwrctrl.h b/drivers/staging/r8188eu/include/rtw_pwrctrl.h
index b19ef796ab54..2d5298373d74 100644
--- a/drivers/staging/r8188eu/include/rtw_pwrctrl.h
+++ b/drivers/staging/r8188eu/include/rtw_pwrctrl.h
@@ -19,10 +19,6 @@ enum power_mgnt {
PS_MODE_DTIM,
PS_MODE_VOIP,
PS_MODE_UAPSD_WMM,
- PS_MODE_UAPSD,
- PS_MODE_IBSS,
- PS_MODE_WWLAN,
- PM_Radio_Off,
PM_Card_Disable,
PS_MODE_NUM
};
@@ -57,7 +53,6 @@ struct pwrctrl_priv {
u8 reg_rfoff;
u8 reg_pdnmode; /* powerdown mode */
- u32 rfoff_reason;
/* RF OFF Level */
u32 cur_ps_level;
@@ -80,7 +75,6 @@ struct pwrctrl_priv {
s32 pnp_current_pwr_state;
u8 pnp_bstop_trx;
- u8 bInternalAutoSuspend;
u8 bInSuspend;
u8 bSupportRemoteWakeup;
struct timer_list pwr_state_check_timer;
@@ -93,7 +87,6 @@ struct pwrctrl_priv {
enum rt_rf_power_state change_rfpwrstate;
u8 wepkeymask;
- u8 bHWPowerdown;/* if support hw power down */
u8 bkeepfwalive;
};
diff --git a/drivers/staging/r8188eu/include/rtw_recv.h b/drivers/staging/r8188eu/include/rtw_recv.h
index 1e28ec731547..b43a46887343 100644
--- a/drivers/staging/r8188eu/include/rtw_recv.h
+++ b/drivers/staging/r8188eu/include/rtw_recv.h
@@ -177,7 +177,6 @@ struct recv_priv {
uint rx_smallpacket_crcerr;
uint rx_middlepacket_crcerr;
struct semaphore allrxreturnevt;
- uint ff_hwaddr;
u8 rx_pending_cnt;
struct tasklet_struct irq_prepare_beacon_tasklet;
diff --git a/drivers/staging/r8188eu/include/rtw_rf.h b/drivers/staging/r8188eu/include/rtw_rf.h
index 7ec252fec054..b7267e75346c 100644
--- a/drivers/staging/r8188eu/include/rtw_rf.h
+++ b/drivers/staging/r8188eu/include/rtw_rf.h
@@ -6,33 +6,16 @@
#include "rtw_cmd.h"
-#define OFDM_PHY 1
-#define MIXED_PHY 2
-#define CCK_PHY 3
-
#define NumRates (13)
/* slot time for 11g */
#define SHORT_SLOT_TIME 9
#define NON_SHORT_SLOT_TIME 20
-#define RTL8711_RF_MAX_SENS 6
-#define RTL8711_RF_DEF_SENS 4
-
-/* We now define the following channels as the max channels in each
- * channel plan. */
-/* 2G, total 14 chnls */
-/* {1,2,3,4,5,6,7,8,9,10,11,12,13,14} */
-#define MAX_CHANNEL_NUM_2G 14
#define MAX_CHANNEL_NUM 14 /* 2.4 GHz only */
#define NUM_REGULATORYS 1
-/* Country codes */
-#define USA 0x555320
-#define EUROPE 0x1 /* temp, should be provided later */
-#define JAPAN 0x2 /* temp, should be provided later */
-
struct regulatory_class {
u32 starting_freq; /* MHz, */
u8 channel_set[MAX_CHANNEL_NUM];
@@ -69,13 +52,6 @@ enum _REG_PREAMBLE_MODE {
PREAMBLE_SHORT = 3,
};
-enum rf90_radio_path {
- RF90_PATH_A = 0, /* Radio Path A */
- RF90_PATH_B = 1, /* Radio Path B */
- RF90_PATH_C = 2, /* Radio Path C */
- RF90_PATH_D = 3 /* Radio Path D */
-};
-
/* Bandwidth Offset */
#define HAL_PRIME_CHNL_OFFSET_DONT_CARE 0
#define HAL_PRIME_CHNL_OFFSET_LOWER 1
@@ -99,16 +75,6 @@ enum ht_extchnl_offset {
HT_EXTCHNL_OFFSET_LOWER = 3,
};
-/* 2007/11/15 MH Define different RF type. */
-enum rt_rf_type_def {
- RF_1T2R = 0,
- RF_2T4R = 1,
- RF_2T2R = 2,
- RF_1T1R = 3,
- RF_2T2R_GREEN = 4,
- RF_819X_MAX_TYPE = 5,
-};
-
u32 rtw_ch2freq(u32 ch);
#endif /* _RTL8711_RF_H_ */
diff --git a/drivers/staging/r8188eu/include/usb_osintf.h b/drivers/staging/r8188eu/include/usb_osintf.h
index 624298b4bd0b..3e777ca52745 100644
--- a/drivers/staging/r8188eu/include/usb_osintf.h
+++ b/drivers/staging/r8188eu/include/usb_osintf.h
@@ -16,7 +16,6 @@ extern int rtw_mc2u_disable;
u8 usbvendorrequest(struct dvobj_priv *pdvobjpriv, enum bt_usb_request brequest,
enum rt_usb_wvalue wvalue, u8 windex, void *data,
u8 datalen, u8 isdirectionin);
-int pm_netdev_open(struct net_device *pnetdev, u8 bnormal);
void netdev_br_init(struct net_device *netdev);
void dhcp_flag_bcast(struct adapter *priv, struct sk_buff *skb);
void *scdb_findEntry(struct adapter *priv, unsigned char *ipAddr);
diff --git a/drivers/staging/r8188eu/include/wifi.h b/drivers/staging/r8188eu/include/wifi.h
index 193a557f0f47..0b0bd39a257f 100644
--- a/drivers/staging/r8188eu/include/wifi.h
+++ b/drivers/staging/r8188eu/include/wifi.h
@@ -13,32 +13,9 @@
#define BIT(x) (1 << (x))
#define WLAN_ETHHDR_LEN 14
-#define WLAN_ETHADDR_LEN 6
-#define WLAN_IEEE_OUI_LEN 3
-#define WLAN_ADDR_LEN 6
-#define WLAN_CRC_LEN 4
-#define WLAN_BSSID_LEN 6
-#define WLAN_BSS_TS_LEN 8
#define WLAN_HDR_A3_LEN 24
-#define WLAN_HDR_A4_LEN 30
#define WLAN_HDR_A3_QOS_LEN 26
-#define WLAN_HDR_A4_QOS_LEN 32
#define WLAN_SSID_MAXLEN 32
-#define WLAN_DATA_MAXLEN 2312
-
-#define WLAN_A3_PN_OFFSET 24
-#define WLAN_A4_PN_OFFSET 30
-
-#define WLAN_MIN_ETHFRM_LEN 60
-#define WLAN_MAX_ETHFRM_LEN 1514
-#define WLAN_ETHHDR_LEN 14
-
-#define P80211CAPTURE_VERSION 0x80211001
-
-/* This value is tested by WiFi 11n Test Plan 5.2.3. */
-/* This test verifies the WLAN NIC can update the NAV through sending
- * the CTS with large duration. */
-#define WiFiNavUpperUs 30000 /* 30 ms */
enum WIFI_FRAME_TYPE {
WIFI_MGT_TYPE = (0),
@@ -487,13 +464,6 @@ static inline int IsFrameTypeCtrl(unsigned char *pframe)
#define _STATUS_CODE_ 2
#define _TIMESTAMP_ 8
-#define AUTH_ODD_TO 0
-#define AUTH_EVEN_TO 1
-
-#define WLAN_ETHCONV_ENCAP 1
-#define WLAN_ETHCONV_RFC1042 2
-#define WLAN_ETHCONV_8021h 3
-
#define cap_ESS BIT(0)
#define cap_IBSS BIT(1)
#define cap_CFPollable BIT(2)
@@ -632,13 +602,6 @@ enum ht_cap_ampdu_factor {
#define HT_INFO_OPERATION_MODE_TRANSMIT_BURST_LIMIT ((u8) BIT(3))
#define HT_INFO_OPERATION_MODE_NON_HT_STA_PRESENT ((u8) BIT(4))
-#define HT_INFO_STBC_PARAM_DUAL_BEACON ((u16) BIT(6))
-#define HT_INFO_STBC_PARAM_DUAL_STBC_PROTECT ((u16) BIT(7))
-#define HT_INFO_STBC_PARAM_SECONDARY_BC ((u16) BIT(8))
-#define HT_INFO_STBC_PARAM_LSIG_TXOP_PROTECT_ALLOWED ((u16) BIT(9))
-#define HT_INFO_STBC_PARAM_PCO_ACTIVE ((u16) BIT(10))
-#define HT_INFO_STBC_PARAM_PCO_PHASE ((u16) BIT(11))
-
/* ===============WPS Section=============== */
/* For WPSv1.0 */
#define WPSOUI 0x0050f204
@@ -930,7 +893,7 @@ enum P2P_PROTO_WK_ID {
P2P_PRE_TX_PROVDISC_PROCESS_WK = 2,
P2P_PRE_TX_NEGOREQ_PROCESS_WK = 3,
P2P_PRE_TX_INVITEREQ_PROCESS_WK = 4,
- P2P_AP_P2P_CH_SWITCH_PROCESS_WK =5,
+ P2P_AP_P2P_CH_SWITCH_PROCESS_WK = 5,
P2P_RO_CH_WK = 6,
};
@@ -949,26 +912,6 @@ enum P2P_PS_MODE {
P2P_PS_MIX = 3, /* CTWindow and NoA */
};
-/* =====================WFD Section===================== */
-/* For Wi-Fi Display */
-#define WFD_ATTR_DEVICE_INFO 0x00
-#define WFD_ATTR_ASSOC_BSSID 0x01
-#define WFD_ATTR_COUPLED_SINK_INFO 0x06
-#define WFD_ATTR_LOCAL_IP_ADDR 0x08
-#define WFD_ATTR_SESSION_INFO 0x09
-#define WFD_ATTR_ALTER_MAC 0x0a
-
-/* For WFD Device Information Attribute */
-#define WFD_DEVINFO_SOURCE 0x0000
-#define WFD_DEVINFO_PSINK 0x0001
-#define WFD_DEVINFO_SSINK 0x0002
-#define WFD_DEVINFO_DUAL 0x0003
-
-#define WFD_DEVINFO_SESSION_AVAIL 0x0010
-#define WFD_DEVINFO_WSD 0x0040
-#define WFD_DEVINFO_PC_TDLS 0x0080
-#define WFD_DEVINFO_HDCP_SUPPORT 0x0100
-
#define IP_MCAST_MAC(mac) \
((mac[0] == 0x01) && (mac[1] == 0x00) && (mac[2] == 0x5e))
#define ICMPV6_MCAST_MAC(mac) \
diff --git a/drivers/staging/r8188eu/os_dep/ioctl_linux.c b/drivers/staging/r8188eu/os_dep/ioctl_linux.c
index 9404355726d0..41b457838a5b 100644
--- a/drivers/staging/r8188eu/os_dep/ioctl_linux.c
+++ b/drivers/staging/r8188eu/os_dep/ioctl_linux.c
@@ -1,8 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2007 - 2012 Realtek Corporation. */
-#define _IOCTL_LINUX_C_
-
#include "../include/osdep_service.h"
#include "../include/drv_types.h"
#include "../include/wlan_bssdef.h"
@@ -14,7 +12,7 @@
#include "../include/rtw_ioctl_set.h"
#include "../include/usb_ops.h"
#include "../include/rtl8188e_hal.h"
-#include "../include/rtl8188e_led.h"
+#include "../include/rtw_led.h"
#include "../include/rtw_iol.h"
@@ -60,7 +58,7 @@ void rtw_indicate_wx_assoc_event(struct adapter *padapter)
memcpy(wrqu.ap_addr.sa_data, pmlmepriv->cur_network.network.MacAddress, ETH_ALEN);
- DBG_88E_LEVEL(_drv_always_, "assoc success\n");
+ netdev_dbg(padapter->pnetdev, "assoc success\n");
wireless_send_event(padapter->pnetdev, SIOCGIWAP, &wrqu, NULL);
}
@@ -73,7 +71,7 @@ void rtw_indicate_wx_disassoc_event(struct adapter *padapter)
wrqu.ap_addr.sa_family = ARPHRD_ETHER;
memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN);
- DBG_88E_LEVEL(_drv_always_, "indicate disassoc\n");
+ netdev_dbg(padapter->pnetdev, "indicate disassoc\n");
wireless_send_event(padapter->pnetdev, SIOCGIWAP, &wrqu, NULL);
}
@@ -2092,18 +2090,6 @@ static int rtw_wx_write_rf(struct net_device *dev,
return 0;
}
-static int rtw_wx_priv_null(struct net_device *dev, struct iw_request_info *a,
- union iwreq_data *wrqu, char *b)
-{
- return -1;
-}
-
-static int dummy(struct net_device *dev, struct iw_request_info *a,
- union iwreq_data *wrqu, char *b)
-{
- return -1;
-}
-
static int rtw_wx_set_channel_plan(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
@@ -2120,37 +2106,6 @@ static int rtw_wx_set_channel_plan(struct net_device *dev,
return 0;
}
-static int rtw_wx_set_mtk_wps_probe_ie(struct net_device *dev,
- struct iw_request_info *a,
- union iwreq_data *wrqu, char *b)
-{
- return 0;
-}
-
-static int rtw_wx_get_sensitivity(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *buf)
-{
- return 0;
-}
-
-static int rtw_wx_set_mtk_wps_ie(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- return 0;
-}
-
-/*
- * For all data larger than 16 octets, we need to use a
- * pointer to memory allocated in user space.
- */
-static int rtw_drvext_hdl(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- return 0;
-}
-
static int rtw_get_ap_info(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
@@ -3043,7 +2998,7 @@ static int rtw_p2p_connect(struct net_device *dev,
struct list_head *plist, *phead;
struct __queue *queue = &pmlmepriv->scanned_queue;
struct wlan_network *pnetwork = NULL;
- uint uintPeerChannel = 0;
+ u32 peer_channel = 0;
/* Commented by Albert 20110304 */
/* The input data contains two informations. */
@@ -3073,7 +3028,7 @@ static int rtw_p2p_connect(struct net_device *dev,
while (phead != plist) {
pnetwork = container_of(plist, struct wlan_network, list);
if (!memcmp(pnetwork->network.MacAddress, peerMAC, ETH_ALEN)) {
- uintPeerChannel = pnetwork->network.Configuration.DSConfig;
+ peer_channel = pnetwork->network.Configuration.DSConfig;
break;
}
@@ -3082,11 +3037,11 @@ static int rtw_p2p_connect(struct net_device *dev,
spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
- if (uintPeerChannel) {
+ if (peer_channel) {
memset(&pwdinfo->nego_req_info, 0x00, sizeof(struct tx_nego_req_info));
memset(&pwdinfo->groupid_info, 0x00, sizeof(struct group_id_info));
- pwdinfo->nego_req_info.peer_channel_num[0] = uintPeerChannel;
+ pwdinfo->nego_req_info.peer_channel_num[0] = peer_channel;
memcpy(pwdinfo->nego_req_info.peerDevAddr, pnetwork->network.MacAddress, ETH_ALEN);
pwdinfo->nego_req_info.benable = true;
@@ -3121,7 +3076,7 @@ static int rtw_p2p_invite_req(struct net_device *dev,
struct list_head *plist, *phead;
struct __queue *queue = &pmlmepriv->scanned_queue;
struct wlan_network *pnetwork = NULL;
- uint uintPeerChannel = 0;
+ uint peer_channel = 0;
u8 attr_content[50] = {0x00};
u8 *p2pie;
uint p2pielen = 0, attr_contentlen = 0;
@@ -3177,13 +3132,13 @@ static int rtw_p2p_invite_req(struct net_device *dev,
if (rtw_get_p2p_attr_content(p2pie, p2pielen, P2P_ATTR_DEVICE_ID, attr_content, &attr_contentlen)) {
/* Handle the P2P Device ID attribute of Beacon first */
if (!memcmp(attr_content, pinvite_req_info->peer_macaddr, ETH_ALEN)) {
- uintPeerChannel = pnetwork->network.Configuration.DSConfig;
+ peer_channel = pnetwork->network.Configuration.DSConfig;
break;
}
} else if (rtw_get_p2p_attr_content(p2pie, p2pielen, P2P_ATTR_DEVICE_INFO, attr_content, &attr_contentlen)) {
/* Handle the P2P Device Info attribute of probe response */
if (!memcmp(attr_content, pinvite_req_info->peer_macaddr, ETH_ALEN)) {
- uintPeerChannel = pnetwork->network.Configuration.DSConfig;
+ peer_channel = pnetwork->network.Configuration.DSConfig;
break;
}
}
@@ -3193,7 +3148,7 @@ static int rtw_p2p_invite_req(struct net_device *dev,
spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
- if (uintPeerChannel) {
+ if (peer_channel) {
/* Store the GO's bssid */
for (jj = 0, kk = 18; jj < ETH_ALEN; jj++, kk += 3)
pinvite_req_info->go_bssid[jj] = key_2char2num(extra[kk], extra[kk + 1]);
@@ -3202,12 +3157,12 @@ static int rtw_p2p_invite_req(struct net_device *dev,
pinvite_req_info->ssidlen = wrqu->data.length - 36;
memcpy(pinvite_req_info->go_ssid, &extra[36], (u32)pinvite_req_info->ssidlen);
pinvite_req_info->benable = true;
- pinvite_req_info->peer_ch = uintPeerChannel;
+ pinvite_req_info->peer_ch = peer_channel;
rtw_p2p_set_pre_state(pwdinfo, rtw_p2p_state(pwdinfo));
rtw_p2p_set_state(pwdinfo, P2P_STATE_TX_INVITE_REQ);
- set_channel_bwmode(padapter, uintPeerChannel, HAL_PRIME_CHNL_OFFSET_DONT_CARE, HT_CHANNEL_WIDTH_20);
+ set_channel_bwmode(padapter, peer_channel, HAL_PRIME_CHNL_OFFSET_DONT_CARE, HT_CHANNEL_WIDTH_20);
_set_timer(&pwdinfo->pre_tx_scan_timer, P2P_TX_PRESCAN_TIMEOUT);
@@ -3260,7 +3215,7 @@ static int rtw_p2p_prov_disc(struct net_device *dev,
struct list_head *plist, *phead;
struct __queue *queue = &pmlmepriv->scanned_queue;
struct wlan_network *pnetwork = NULL;
- uint uintPeerChannel = 0;
+ uint peer_channel = 0;
u8 attr_content[100] = {0x00};
u8 *p2pie;
uint p2pielen = 0, attr_contentlen = 0;
@@ -3310,7 +3265,7 @@ static int rtw_p2p_prov_disc(struct net_device *dev,
plist = phead->next;
while (phead != plist) {
- if (uintPeerChannel != 0)
+ if (peer_channel != 0)
break;
pnetwork = container_of(plist, struct wlan_network, list);
@@ -3328,13 +3283,13 @@ static int rtw_p2p_prov_disc(struct net_device *dev,
if (rtw_get_p2p_attr_content(p2pie, p2pielen, P2P_ATTR_DEVICE_ID, attr_content, &attr_contentlen)) {
/* Handle the P2P Device ID attribute of Beacon first */
if (!memcmp(attr_content, peerMAC, ETH_ALEN)) {
- uintPeerChannel = pnetwork->network.Configuration.DSConfig;
+ peer_channel = pnetwork->network.Configuration.DSConfig;
break;
}
} else if (rtw_get_p2p_attr_content(p2pie, p2pielen, P2P_ATTR_DEVICE_INFO, attr_content, &attr_contentlen)) {
/* Handle the P2P Device Info attribute of probe response */
if (!memcmp(attr_content, peerMAC, ETH_ALEN)) {
- uintPeerChannel = pnetwork->network.Configuration.DSConfig;
+ peer_channel = pnetwork->network.Configuration.DSConfig;
break;
}
}
@@ -3349,11 +3304,11 @@ static int rtw_p2p_prov_disc(struct net_device *dev,
spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
- if (uintPeerChannel) {
- DBG_88E("[%s] peer channel: %d!\n", __func__, uintPeerChannel);
+ if (peer_channel) {
+ DBG_88E("[%s] peer channel: %d!\n", __func__, peer_channel);
memcpy(pwdinfo->tx_prov_disc_info.peerIFAddr, pnetwork->network.MacAddress, ETH_ALEN);
memcpy(pwdinfo->tx_prov_disc_info.peerDevAddr, peerMAC, ETH_ALEN);
- pwdinfo->tx_prov_disc_info.peer_channel_num[0] = (u16)uintPeerChannel;
+ pwdinfo->tx_prov_disc_info.peer_channel_num[0] = (u16)peer_channel;
pwdinfo->tx_prov_disc_info.benable = true;
rtw_p2p_set_pre_state(pwdinfo, rtw_p2p_state(pwdinfo));
rtw_p2p_set_state(pwdinfo, P2P_STATE_TX_PROVISION_DIS_REQ);
@@ -3365,7 +3320,7 @@ static int rtw_p2p_prov_disc(struct net_device *dev,
pwdinfo->tx_prov_disc_info.ssid.SsidLength = P2P_WILDCARD_SSID_LEN;
}
- set_channel_bwmode(padapter, uintPeerChannel, HAL_PRIME_CHNL_OFFSET_DONT_CARE, HT_CHANNEL_WIDTH_20);
+ set_channel_bwmode(padapter, peer_channel, HAL_PRIME_CHNL_OFFSET_DONT_CARE, HT_CHANNEL_WIDTH_20);
_set_timer(&pwdinfo->pre_tx_scan_timer, P2P_TX_PRESCAN_TIMEOUT);
@@ -3617,27 +3572,18 @@ static void bb_reg_dump(struct adapter *padapter)
static void rf_reg_dump(struct adapter *padapter)
{
- int i, j = 1, path;
+ int i, j = 1, path = 0;
u32 value;
- u8 rf_type, path_nums = 0;
- GetHwReg8188EU(padapter, HW_VAR_RF_TYPE, (u8 *)(&rf_type));
pr_info("\n ======= RF REG =======\n");
- if ((RF_1T2R == rf_type) || (RF_1T1R == rf_type))
- path_nums = 1;
- else
- path_nums = 2;
-
- for (path = 0; path < path_nums; path++) {
- pr_info("\nRF_Path(%x)\n", path);
- for (i = 0; i < 0x100; i++) {
- value = rtl8188e_PHY_QueryRFReg(padapter, path, i, 0xffffffff);
- if (j % 4 == 1)
- pr_info("0x%02x ", i);
- pr_info(" 0x%08x ", value);
- if ((j++) % 4 == 0)
- pr_info("\n");
- }
+ pr_info("\nRF_Path(%x)\n", path);
+ for (i = 0; i < 0x100; i++) {
+ value = rtl8188e_PHY_QueryRFReg(padapter, path, i, 0xffffffff);
+ if (j % 4 == 1)
+ pr_info("0x%02x ", i);
+ pr_info(" 0x%08x ", value);
+ if ((j++) % 4 == 0)
+ pr_info("\n");
}
}
@@ -4058,12 +4004,6 @@ static int rtw_dbg_port(struct net_device *dev,
DBG_88E("turn %s the bShowGetP2PState Variable\n", (extra_arg == 1) ? "on" : "off");
padapter->bShowGetP2PState = extra_arg;
break;
- case 0xaa:
- if (extra_arg > 0x13)
- extra_arg = 0xFF;
- DBG_88E("chang data rate to :0x%02x\n", extra_arg);
- padapter->fix_rate = extra_arg;
- break;
case 0xdd:/* registers dump, 0 for mac reg, 1 for bb reg, 2 for rf reg */
if (extra_arg == 0)
mac_reg_dump(padapter);
@@ -4230,81 +4170,21 @@ static int rtw_pm_set(struct net_device *dev,
return ret;
}
-extern int wifirate2_ratetbl_inx(unsigned char rate);
-
-static int rtw_tdls(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- return 0;
-}
-
-static int rtw_tdls_get(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- return 0;
-}
-
-static int rtw_test(
- struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- u32 len;
- u8 *pbuf, *pch;
- char *ptmp;
- u8 *delim = ",";
-
- DBG_88E("+%s\n", __func__);
- len = wrqu->data.length;
-
- pbuf = kzalloc(len, GFP_KERNEL);
- if (!pbuf) {
- DBG_88E("%s: no memory!\n", __func__);
- return -ENOMEM;
- }
-
- if (copy_from_user(pbuf, wrqu->data.pointer, len)) {
- kfree(pbuf);
- DBG_88E("%s: copy from user fail!\n", __func__);
- return -EFAULT;
- }
- DBG_88E("%s: string =\"%s\"\n", __func__, pbuf);
-
- ptmp = (char *)pbuf;
- pch = strsep(&ptmp, delim);
- if (!pch || strlen(pch) == 0) {
- kfree(pbuf);
- DBG_88E("%s: parameter error(level 1)!\n", __func__);
- return -EFAULT;
- }
- kfree(pbuf);
- return 0;
-}
-
static iw_handler rtw_handlers[] = {
IW_HANDLER(SIOCGIWNAME, rtw_wx_get_name),
- IW_HANDLER(SIOCSIWNWID, dummy),
- IW_HANDLER(SIOCGIWNWID, dummy),
IW_HANDLER(SIOCGIWFREQ, rtw_wx_get_freq),
IW_HANDLER(SIOCSIWMODE, rtw_wx_set_mode),
IW_HANDLER(SIOCGIWMODE, rtw_wx_get_mode),
- IW_HANDLER(SIOCSIWSENS, dummy),
IW_HANDLER(SIOCGIWSENS, rtw_wx_get_sens),
IW_HANDLER(SIOCGIWRANGE, rtw_wx_get_range),
IW_HANDLER(SIOCSIWPRIV, rtw_wx_set_priv),
- IW_HANDLER(SIOCSIWSPY, dummy),
- IW_HANDLER(SIOCGIWSPY, dummy),
IW_HANDLER(SIOCSIWAP, rtw_wx_set_wap),
IW_HANDLER(SIOCGIWAP, rtw_wx_get_wap),
IW_HANDLER(SIOCSIWMLME, rtw_wx_set_mlme),
- IW_HANDLER(SIOCGIWAPLIST, dummy),
IW_HANDLER(SIOCSIWSCAN, rtw_wx_set_scan),
IW_HANDLER(SIOCGIWSCAN, rtw_wx_get_scan),
IW_HANDLER(SIOCSIWESSID, rtw_wx_set_essid),
IW_HANDLER(SIOCGIWESSID, rtw_wx_get_essid),
- IW_HANDLER(SIOCSIWNICKN, dummy),
IW_HANDLER(SIOCGIWNICKN, rtw_wx_get_nick),
IW_HANDLER(SIOCSIWRATE, rtw_wx_set_rate),
IW_HANDLER(SIOCGIWRATE, rtw_wx_get_rate),
@@ -4312,13 +4192,9 @@ static iw_handler rtw_handlers[] = {
IW_HANDLER(SIOCGIWRTS, rtw_wx_get_rts),
IW_HANDLER(SIOCSIWFRAG, rtw_wx_set_frag),
IW_HANDLER(SIOCGIWFRAG, rtw_wx_get_frag),
- IW_HANDLER(SIOCSIWTXPOW, dummy),
- IW_HANDLER(SIOCGIWTXPOW, dummy),
- IW_HANDLER(SIOCSIWRETRY, dummy),
IW_HANDLER(SIOCGIWRETRY, rtw_wx_get_retry),
IW_HANDLER(SIOCSIWENCODE, rtw_wx_set_enc),
IW_HANDLER(SIOCGIWENCODE, rtw_wx_get_enc),
- IW_HANDLER(SIOCSIWPOWER, dummy),
IW_HANDLER(SIOCGIWPOWER, rtw_wx_get_power),
IW_HANDLER(SIOCSIWGENIE, rtw_wx_set_gen_ie),
IW_HANDLER(SIOCSIWAUTH, rtw_wx_set_auth),
@@ -4340,9 +4216,6 @@ static const struct iw_priv_args rtw_private_args[] = {
SIOCIWFIRSTPRIV + 0x2, 0, 0, "driver_ext"
},
{
- SIOCIWFIRSTPRIV + 0x3, 0, 0, "mp_ioctl"
- },
- {
SIOCIWFIRSTPRIV + 0x4,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "apinfo"
},
@@ -4355,19 +4228,6 @@ static const struct iw_priv_args rtw_private_args[] = {
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "wps_start"
},
{
- SIOCIWFIRSTPRIV + 0x7,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "get_sensitivity"
- },
- {
- SIOCIWFIRSTPRIV + 0x8,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "wps_prob_req_ie"
- },
- {
- SIOCIWFIRSTPRIV + 0x9,
- IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "wps_assoc_req_ie"
- },
-
- {
SIOCIWFIRSTPRIV + 0xA,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "channel_plan"
},
@@ -4396,33 +4256,18 @@ static const struct iw_priv_args rtw_private_args[] = {
SIOCIWFIRSTPRIV + 0x12,
IW_PRIV_TYPE_CHAR | P2P_PRIVATE_IOCTL_SET_LEN, IW_PRIV_TYPE_CHAR | IFNAMSIZ, "p2p_get2"
},
- {SIOCIWFIRSTPRIV + 0x13, IW_PRIV_TYPE_CHAR | 128, 0, "NULL"},
- {
- SIOCIWFIRSTPRIV + 0x14,
- IW_PRIV_TYPE_CHAR | 64, 0, "tdls"
- },
- {
- SIOCIWFIRSTPRIV + 0x15,
- IW_PRIV_TYPE_CHAR | P2P_PRIVATE_IOCTL_SET_LEN, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | P2P_PRIVATE_IOCTL_SET_LEN, "tdls_get"
- },
{
SIOCIWFIRSTPRIV + 0x16,
IW_PRIV_TYPE_CHAR | 64, 0, "pm_set"
},
{SIOCIWFIRSTPRIV + 0x18, IW_PRIV_TYPE_CHAR | IFNAMSIZ, 0, "rereg_nd_name"},
-
- {SIOCIWFIRSTPRIV + 0x1D, IW_PRIV_TYPE_CHAR | 40, IW_PRIV_TYPE_CHAR | 0x7FF, "test"
- },
-
- {SIOCIWFIRSTPRIV + 0x0E, IW_PRIV_TYPE_CHAR | 1024, 0, ""}, /* set */
- {SIOCIWFIRSTPRIV + 0x0F, IW_PRIV_TYPE_CHAR | 1024, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_MASK, ""},/* get */
};
static iw_handler rtw_private_handler[] = {
rtw_wx_write32, /* 0x00 */
rtw_wx_read32, /* 0x01 */
-rtw_drvext_hdl, /* 0x02 */
+ NULL, /* 0x02 */
NULL, /* 0x03 */
/* for MM DTV platform */
rtw_get_ap_info, /* 0x04 */
@@ -4430,9 +4275,9 @@ NULL, /* 0x03 */
rtw_set_pid, /* 0x05 */
rtw_wps_start, /* 0x06 */
- rtw_wx_get_sensitivity, /* 0x07 */
- rtw_wx_set_mtk_wps_probe_ie, /* 0x08 */
- rtw_wx_set_mtk_wps_ie, /* 0x09 */
+ NULL, /* 0x07 */
+ NULL, /* 0x08 */
+ NULL, /* 0x09 */
/* Set Channel depend on the country code */
rtw_wx_set_channel_plan, /* 0x0A */
@@ -4448,18 +4293,12 @@ NULL, /* 0x03 */
rtw_p2p_get2, /* 0x12 */
NULL, /* 0x13 */
- rtw_tdls, /* 0x14 */
- rtw_tdls_get, /* 0x15 */
+ NULL, /* 0x14 */
+ NULL, /* 0x15 */
rtw_pm_set, /* 0x16 */
- rtw_wx_priv_null, /* 0x17 */
+ NULL, /* 0x17 */
rtw_rereg_nd_name, /* 0x18 */
- rtw_wx_priv_null, /* 0x19 */
-
- NULL, /* 0x1A */
- NULL, /* 0x1B */
- NULL, /* 0x1C is reserved for hostapd */
- rtw_test, /* 0x1D */
};
static struct iw_statistics *rtw_get_wireless_stats(struct net_device *dev)
diff --git a/drivers/staging/r8188eu/os_dep/os_intfs.c b/drivers/staging/r8188eu/os_dep/os_intfs.c
index 10059240bf54..b65e44f97826 100644
--- a/drivers/staging/r8188eu/os_dep/os_intfs.c
+++ b/drivers/staging/r8188eu/os_dep/os_intfs.c
@@ -11,7 +11,7 @@
#include "../include/rtw_ioctl.h"
#include "../include/usb_osintf.h"
#include "../include/rtw_br_ext.h"
-#include "../include/rtl8188e_led.h"
+#include "../include/rtw_led.h"
#include "../include/rtl8188e_dm.h"
MODULE_LICENSE("GPL");
@@ -75,7 +75,6 @@ static int rtw_ampdu_amsdu;/* 0: disabled, 1:enabled, 2:auto */
static int rtw_lowrate_two_xmit = 1;/* Use 2 path Tx to transmit MCS0~7 and legacy mode */
-static int rtw_rf_config = RF_819X_MAX_TYPE; /* auto */
static int rtw_low_power;
static int rtw_wifi_spec;
static int rtw_channel_plan = RT_CHANNEL_DOMAIN_MAX;
@@ -123,7 +122,6 @@ module_param(rtw_ampdu_enable, int, 0644);
module_param(rtw_rx_stbc, int, 0644);
module_param(rtw_ampdu_amsdu, int, 0644);
module_param(rtw_lowrate_two_xmit, int, 0644);
-module_param(rtw_rf_config, int, 0644);
module_param(rtw_power_mgnt, int, 0644);
module_param(rtw_smart_ps, int, 0644);
module_param(rtw_low_power, int, 0644);
@@ -153,7 +151,7 @@ MODULE_PARM_DESC(rtw_notch_filter, "0:Disable, 1:Enable, 2:Enable only for P2P")
module_param_named(debug, rtw_debug, int, 0444);
MODULE_PARM_DESC(debug, "Set debug level (1-9) (default 1)");
-static uint loadparam(struct adapter *padapter, struct net_device *pnetdev)
+static uint loadparam(struct adapter *padapter)
{
struct registry_priv *registry_par = &padapter->registrypriv;
@@ -205,7 +203,6 @@ static uint loadparam(struct adapter *padapter, struct net_device *pnetdev)
registry_par->rx_stbc = (u8)rtw_rx_stbc;
registry_par->ampdu_amsdu = (u8)rtw_ampdu_amsdu;
registry_par->lowrate_two_xmit = (u8)rtw_lowrate_two_xmit;
- registry_par->rf_config = (u8)rtw_rf_config;
registry_par->low_power = (u8)rtw_low_power;
registry_par->wifi_spec = (u8)rtw_wifi_spec;
registry_par->channel_plan = (u8)rtw_channel_plan;
@@ -371,7 +368,7 @@ struct net_device *rtw_init_netdev(struct adapter *old_padapter)
pnetdev->wireless_handlers = (struct iw_handler_def *)&rtw_handlers_def;
/* step 2. */
- loadparam(padapter, pnetdev);
+ loadparam(padapter);
return pnetdev;
}
@@ -399,7 +396,7 @@ void rtw_stop_drv_threads(struct adapter *padapter)
wait_for_completion(&padapter->cmdpriv.stop_cmd_thread);
}
-static u8 rtw_init_default_value(struct adapter *padapter)
+static void rtw_init_default_value(struct adapter *padapter)
{
struct registry_priv *pregistrypriv = &padapter->registrypriv;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
@@ -444,7 +441,6 @@ static u8 rtw_init_default_value(struct adapter *padapter)
padapter->bRxRSSIDisplay = 0;
padapter->bNotifyChannelChange = 0;
padapter->bShowGetP2PState = 1;
- return _SUCCESS;
}
u8 rtw_reset_drv_sw(struct adapter *padapter)
@@ -478,50 +474,37 @@ u8 rtw_reset_drv_sw(struct adapter *padapter)
u8 rtw_init_drv_sw(struct adapter *padapter)
{
- u8 ret8 = _SUCCESS;
-
- if ((rtw_init_cmd_priv(&padapter->cmdpriv)) == _FAIL) {
- ret8 = _FAIL;
- goto exit;
- }
+ if ((rtw_init_cmd_priv(&padapter->cmdpriv)) == _FAIL)
+ return _FAIL;
padapter->cmdpriv.padapter = padapter;
- if ((rtw_init_evt_priv(&padapter->evtpriv)) == _FAIL) {
- ret8 = _FAIL;
- goto exit;
- }
+ if ((rtw_init_evt_priv(&padapter->evtpriv)) == _FAIL)
+ return _FAIL;
- if (rtw_init_mlme_priv(padapter) == _FAIL) {
- ret8 = _FAIL;
- goto exit;
- }
+ if (rtw_init_mlme_priv(padapter) == _FAIL)
+ return _FAIL;
rtw_init_wifidirect_timers(padapter);
init_wifidirect_info(padapter, P2P_ROLE_DISABLE);
reset_global_wifidirect_info(padapter);
- if (init_mlme_ext_priv(padapter) == _FAIL) {
- ret8 = _FAIL;
- goto exit;
- }
+ if (init_mlme_ext_priv(padapter) == _FAIL)
+ return _FAIL;
if (_rtw_init_xmit_priv(&padapter->xmitpriv, padapter) == _FAIL) {
DBG_88E("Can't _rtw_init_xmit_priv\n");
- ret8 = _FAIL;
- goto exit;
+ return _FAIL;
}
if (_rtw_init_recv_priv(&padapter->recvpriv, padapter) == _FAIL) {
DBG_88E("Can't _rtw_init_recv_priv\n");
- ret8 = _FAIL;
- goto exit;
+ return _FAIL;
}
if (_rtw_init_sta_priv(&padapter->stapriv) == _FAIL) {
DBG_88E("Can't _rtw_init_sta_priv\n");
- ret8 = _FAIL;
- goto exit;
+ return _FAIL;
}
padapter->stapriv.padapter = padapter;
@@ -530,15 +513,14 @@ u8 rtw_init_drv_sw(struct adapter *padapter)
rtw_init_pwrctrl_priv(padapter);
- ret8 = rtw_init_default_value(padapter);
+ rtw_init_default_value(padapter);
rtl8188e_init_dm_priv(padapter);
rtl8188eu_InitSwLeds(padapter);
spin_lock_init(&padapter->br_ext_lock);
-exit:
- return ret8;
+ return _SUCCESS;
}
void rtw_cancel_all_timer(struct adapter *padapter)
@@ -585,8 +567,6 @@ u8 rtw_free_drv_sw(struct adapter *padapter)
_rtw_free_recv_priv(&padapter->recvpriv);
- rtl8188e_free_hal_data(padapter);
-
/* free the old_pnetdev */
if (padapter->rereg_nd_name_priv.old_pnetdev) {
free_netdev(padapter->rereg_nd_name_priv.old_pnetdev);
@@ -775,26 +755,11 @@ void rtw_ips_dev_unload(struct adapter *padapter)
rtw_hal_deinit(padapter);
}
-int pm_netdev_open(struct net_device *pnetdev, u8 bnormal)
-{
- int status;
-
- if (bnormal)
- status = netdev_open(pnetdev);
- else
- status = (_SUCCESS == ips_netdrv_open((struct adapter *)rtw_netdev_priv(pnetdev))) ? (0) : (-1);
- return status;
-}
-
int netdev_close(struct net_device *pnetdev)
{
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(pnetdev);
struct dvobj_priv *dvobj = adapter_to_dvobj(padapter);
- if (padapter->pwrctrlpriv.bInternalAutoSuspend) {
- if (padapter->pwrctrlpriv.rf_pwrstate == rf_off)
- padapter->pwrctrlpriv.ps_flag = true;
- }
padapter->net_closed = true;
if (padapter->pwrctrlpriv.rf_pwrstate == rf_on) {
diff --git a/drivers/staging/r8188eu/os_dep/usb_intf.c b/drivers/staging/r8188eu/os_dep/usb_intf.c
index 5a35d9fe3fc9..91792dfd3bbe 100644
--- a/drivers/staging/r8188eu/os_dep/usb_intf.c
+++ b/drivers/staging/r8188eu/os_dep/usb_intf.c
@@ -29,13 +29,12 @@ static struct usb_device_id rtw_usb_id_tbl[] = {
/*=== Realtek demoboard ===*/
{USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8179)}, /* 8188EUS */
{USB_DEVICE(USB_VENDER_ID_REALTEK, 0x0179)}, /* 8188ETV */
- {USB_DEVICE(USB_VENDER_ID_REALTEK, 0xf179)}, /* 8188FU */
/*=== Customer ID ===*/
/****** 8188EUS ********/
{USB_DEVICE(0x07B8, 0x8179)}, /* Abocom - Abocom */
{USB_DEVICE(0x0DF6, 0x0076)}, /* Sitecom N150 v2 */
{USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */
- {USB_DEVICE(0x2001, 0x3310)}, /* Dlink DWA-123 REV D1 */
+ {USB_DEVICE(0x2001, 0x3310)}, /* Dlink DWA-123 REV D1 */
{USB_DEVICE(0x2001, 0x3311)}, /* DLink GO-USB-N150 REV B1 */
{USB_DEVICE(0x2001, 0x331B)}, /* D-Link DWA-121 rev B1 */
{USB_DEVICE(0x056E, 0x4008)}, /* Elecom WDC-150SU2M */
@@ -70,6 +69,7 @@ static struct rtw_usb_drv *usb_drv = &rtl8188e_usb_drv;
static struct dvobj_priv *usb_dvobj_init(struct usb_interface *usb_intf)
{
int i;
+ u8 rt_num_in_pipes = 0;
struct dvobj_priv *pdvobjpriv;
struct usb_host_config *phost_conf;
struct usb_config_descriptor *pconf_desc;
@@ -80,14 +80,13 @@ static struct dvobj_priv *usb_dvobj_init(struct usb_interface *usb_intf)
pdvobjpriv = kzalloc(sizeof(*pdvobjpriv), GFP_KERNEL);
if (!pdvobjpriv)
- goto exit;
+ goto err;
pdvobjpriv->pusbintf = usb_intf;
pusbd = interface_to_usbdev(usb_intf);
pdvobjpriv->pusbdev = pusbd;
usb_set_intfdata(usb_intf, pdvobjpriv);
- pdvobjpriv->RtNumInPipes = 0;
pdvobjpriv->RtNumOutPipes = 0;
phost_conf = pusbd->actconfig;
@@ -98,28 +97,26 @@ static struct dvobj_priv *usb_dvobj_init(struct usb_interface *usb_intf)
pdvobjpriv->NumInterfaces = pconf_desc->bNumInterfaces;
pdvobjpriv->InterfaceNumber = piface_desc->bInterfaceNumber;
- pdvobjpriv->nr_endpoint = piface_desc->bNumEndpoints;
- for (i = 0; i < pdvobjpriv->nr_endpoint; i++) {
+ for (i = 0; i < piface_desc->bNumEndpoints; i++) {
int ep_num;
pendp_desc = &phost_iface->endpoint[i].desc;
ep_num = usb_endpoint_num(pendp_desc);
if (usb_endpoint_is_bulk_in(pendp_desc)) {
- pdvobjpriv->RtInPipe[pdvobjpriv->RtNumInPipes] = ep_num;
- pdvobjpriv->RtNumInPipes++;
- } else if (usb_endpoint_is_int_in(pendp_desc)) {
- pdvobjpriv->RtInPipe[pdvobjpriv->RtNumInPipes] = ep_num;
- pdvobjpriv->RtNumInPipes++;
+ pdvobjpriv->RtInPipe = ep_num;
+ rt_num_in_pipes++;
} else if (usb_endpoint_is_bulk_out(pendp_desc)) {
pdvobjpriv->RtOutPipe[pdvobjpriv->RtNumOutPipes] =
ep_num;
pdvobjpriv->RtNumOutPipes++;
}
- pdvobjpriv->ep_num[i] = ep_num;
}
+ if (rt_num_in_pipes != 1)
+ goto err;
+
if (pusbd->speed == USB_SPEED_HIGH) {
pdvobjpriv->ishighspeed = true;
DBG_88E("USB_SPEED_HIGH\n");
@@ -133,9 +130,11 @@ static struct dvobj_priv *usb_dvobj_init(struct usb_interface *usb_intf)
rtw_reset_continual_urb_error(pdvobjpriv);
usb_get_dev(pusbd);
-
-exit:
return pdvobjpriv;
+
+err:
+ kfree(pdvobjpriv);
+ return NULL;
}
static void usb_dvobj_deinit(struct usb_interface *usb_intf)
@@ -193,8 +192,7 @@ static void rtw_dev_unload(struct adapter *padapter)
if (padapter->intf_stop)
padapter->intf_stop(padapter);
/* s4. */
- if (!padapter->pwrctrlpriv.bInternalAutoSuspend)
- rtw_stop_drv_threads(padapter);
+ rtw_stop_drv_threads(padapter);
/* s5. */
if (!padapter->bSurpriseRemoved) {
@@ -298,7 +296,7 @@ static int rtw_resume(struct usb_interface *pusb_intf)
pwrpriv->bkeepfwalive = false;
DBG_88E("bkeepfwalive(%x)\n", pwrpriv->bkeepfwalive);
- if (pm_netdev_open(pnetdev, true) != 0) {
+ if (netdev_open(pnetdev) != 0) {
mutex_unlock(&pwrpriv->lock);
goto exit;
}
@@ -362,9 +360,6 @@ static struct adapter *rtw_usb_if1_init(struct dvobj_priv *dvobj,
SET_NETDEV_DEV(pnetdev, dvobj_to_dev(dvobj));
padapter = rtw_netdev_priv(pnetdev);
- /* step 2. allocate HalData */
- rtl8188eu_alloc_haldata(padapter);
-
padapter->intf_start = &usb_intf_start;
padapter->intf_stop = &usb_intf_stop;
@@ -386,7 +381,7 @@ static struct adapter *rtw_usb_if1_init(struct dvobj_priv *dvobj,
/* step 5. */
if (rtw_init_drv_sw(padapter) == _FAIL)
- goto free_hal_data;
+ goto handle_dualmac;
#ifdef CONFIG_PM
if (padapter->pwrctrlpriv.bSupportRemoteWakeup) {
@@ -414,7 +409,7 @@ static struct adapter *rtw_usb_if1_init(struct dvobj_priv *dvobj,
/* step 6. Tell the network stack we exist */
if (register_netdev(pnetdev) != 0)
- goto free_hal_data;
+ goto handle_dualmac;
DBG_88E("bDriverStopped:%d, bSurpriseRemoved:%d, bup:%d, hw_init_completed:%d\n"
, padapter->bDriverStopped
@@ -425,9 +420,6 @@ static struct adapter *rtw_usb_if1_init(struct dvobj_priv *dvobj,
status = _SUCCESS;
-free_hal_data:
- if (status != _SUCCESS)
- kfree(padapter->HalData);
handle_dualmac:
if (status != _SUCCESS)
rtw_handle_dualmac(padapter, 0);
diff --git a/drivers/staging/r8188eu/os_dep/usb_ops_linux.c b/drivers/staging/r8188eu/os_dep/usb_ops_linux.c
index ef2ea68ae873..d1cb33d3e6a7 100644
--- a/drivers/staging/r8188eu/os_dep/usb_ops_linux.c
+++ b/drivers/staging/r8188eu/os_dep/usb_ops_linux.c
@@ -12,11 +12,7 @@ unsigned int ffaddr2pipehdl(struct dvobj_priv *pdvobj, u32 addr)
unsigned int pipe = 0, ep_num = 0;
struct usb_device *pusbd = pdvobj->pusbdev;
- if (addr == RECV_BULK_IN_ADDR) {
- pipe = usb_rcvbulkpipe(pusbd, pdvobj->RtInPipe[0]);
- } else if (addr == RECV_INT_IN_ADDR) {
- pipe = usb_rcvbulkpipe(pusbd, pdvobj->RtInPipe[1]);
- } else if (addr < HW_QUEUE_ENTRY) {
+ if (addr < HW_QUEUE_ENTRY) {
ep_num = pdvobj->Queue2Pipe[addr];
pipe = usb_sndbulkpipe(pusbd, ep_num);
}
diff --git a/drivers/staging/ralink-gdma/Kconfig b/drivers/staging/ralink-gdma/Kconfig
deleted file mode 100644
index 0017376234e2..000000000000
--- a/drivers/staging/ralink-gdma/Kconfig
+++ /dev/null
@@ -1,8 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-config DMA_RALINK
- tristate "RALINK DMA support"
- depends on RALINK && !SOC_RT288X
- depends on DMADEVICES
- select DMA_ENGINE
- select DMA_VIRTUAL_CHANNELS
-
diff --git a/drivers/staging/ralink-gdma/Makefile b/drivers/staging/ralink-gdma/Makefile
deleted file mode 100644
index 5c4566b2e405..000000000000
--- a/drivers/staging/ralink-gdma/Makefile
+++ /dev/null
@@ -1,4 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_DMA_RALINK) += ralink-gdma.o
-
-ccflags-y += -I$(srctree)/drivers/dma
diff --git a/drivers/staging/ralink-gdma/ralink-gdma.c b/drivers/staging/ralink-gdma/ralink-gdma.c
deleted file mode 100644
index b5229bc6eae5..000000000000
--- a/drivers/staging/ralink-gdma/ralink-gdma.c
+++ /dev/null
@@ -1,917 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * GDMA4740 DMAC support
- */
-
-#include <linux/dmaengine.h>
-#include <linux/dma-mapping.h>
-#include <linux/err.h>
-#include <linux/init.h>
-#include <linux/list.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/irq.h>
-#include <linux/of_dma.h>
-#include <linux/reset.h>
-#include <linux/of_device.h>
-
-#include "virt-dma.h"
-
-#define GDMA_REG_SRC_ADDR(x) (0x00 + (x) * 0x10)
-#define GDMA_REG_DST_ADDR(x) (0x04 + (x) * 0x10)
-
-#define GDMA_REG_CTRL0(x) (0x08 + (x) * 0x10)
-#define GDMA_REG_CTRL0_TX_MASK 0xffff
-#define GDMA_REG_CTRL0_TX_SHIFT 16
-#define GDMA_REG_CTRL0_CURR_MASK 0xff
-#define GDMA_REG_CTRL0_CURR_SHIFT 8
-#define GDMA_REG_CTRL0_SRC_ADDR_FIXED BIT(7)
-#define GDMA_REG_CTRL0_DST_ADDR_FIXED BIT(6)
-#define GDMA_REG_CTRL0_BURST_MASK 0x7
-#define GDMA_REG_CTRL0_BURST_SHIFT 3
-#define GDMA_REG_CTRL0_DONE_INT BIT(2)
-#define GDMA_REG_CTRL0_ENABLE BIT(1)
-#define GDMA_REG_CTRL0_SW_MODE BIT(0)
-
-#define GDMA_REG_CTRL1(x) (0x0c + (x) * 0x10)
-#define GDMA_REG_CTRL1_SEG_MASK 0xf
-#define GDMA_REG_CTRL1_SEG_SHIFT 22
-#define GDMA_REG_CTRL1_REQ_MASK 0x3f
-#define GDMA_REG_CTRL1_SRC_REQ_SHIFT 16
-#define GDMA_REG_CTRL1_DST_REQ_SHIFT 8
-#define GDMA_REG_CTRL1_NEXT_MASK 0x1f
-#define GDMA_REG_CTRL1_NEXT_SHIFT 3
-#define GDMA_REG_CTRL1_COHERENT BIT(2)
-#define GDMA_REG_CTRL1_FAIL BIT(1)
-#define GDMA_REG_CTRL1_MASK BIT(0)
-
-#define GDMA_REG_UNMASK_INT 0x200
-#define GDMA_REG_DONE_INT 0x204
-
-#define GDMA_REG_GCT 0x220
-#define GDMA_REG_GCT_CHAN_MASK 0x3
-#define GDMA_REG_GCT_CHAN_SHIFT 3
-#define GDMA_REG_GCT_VER_MASK 0x3
-#define GDMA_REG_GCT_VER_SHIFT 1
-#define GDMA_REG_GCT_ARBIT_RR BIT(0)
-
-#define GDMA_REG_REQSTS 0x2a0
-#define GDMA_REG_ACKSTS 0x2a4
-#define GDMA_REG_FINSTS 0x2a8
-
-/* for RT305X gdma registers */
-#define GDMA_RT305X_CTRL0_REQ_MASK 0xf
-#define GDMA_RT305X_CTRL0_SRC_REQ_SHIFT 12
-#define GDMA_RT305X_CTRL0_DST_REQ_SHIFT 8
-
-#define GDMA_RT305X_CTRL1_FAIL BIT(4)
-#define GDMA_RT305X_CTRL1_NEXT_MASK 0x7
-#define GDMA_RT305X_CTRL1_NEXT_SHIFT 1
-
-#define GDMA_RT305X_STATUS_INT 0x80
-#define GDMA_RT305X_STATUS_SIGNAL 0x84
-#define GDMA_RT305X_GCT 0x88
-
-/* for MT7621 gdma registers */
-#define GDMA_REG_PERF_START(x) (0x230 + (x) * 0x8)
-#define GDMA_REG_PERF_END(x) (0x234 + (x) * 0x8)
-
-enum gdma_dma_transfer_size {
- GDMA_TRANSFER_SIZE_4BYTE = 0,
- GDMA_TRANSFER_SIZE_8BYTE = 1,
- GDMA_TRANSFER_SIZE_16BYTE = 2,
- GDMA_TRANSFER_SIZE_32BYTE = 3,
- GDMA_TRANSFER_SIZE_64BYTE = 4,
-};
-
-struct gdma_dma_sg {
- dma_addr_t src_addr;
- dma_addr_t dst_addr;
- u32 len;
-};
-
-struct gdma_dma_desc {
- struct virt_dma_desc vdesc;
-
- enum dma_transfer_direction direction;
- bool cyclic;
-
- u32 residue;
- unsigned int num_sgs;
- struct gdma_dma_sg sg[];
-};
-
-struct gdma_dmaengine_chan {
- struct virt_dma_chan vchan;
- unsigned int id;
- unsigned int slave_id;
-
- dma_addr_t fifo_addr;
- enum gdma_dma_transfer_size burst_size;
-
- struct gdma_dma_desc *desc;
- unsigned int next_sg;
-};
-
-struct gdma_dma_dev {
- struct dma_device ddev;
- struct device_dma_parameters dma_parms;
- struct gdma_data *data;
- void __iomem *base;
- struct tasklet_struct task;
- volatile unsigned long chan_issued;
- atomic_t cnt;
-
- struct gdma_dmaengine_chan chan[];
-};
-
-struct gdma_data {
- int chancnt;
- u32 done_int_reg;
- void (*init)(struct gdma_dma_dev *dma_dev);
- int (*start_transfer)(struct gdma_dmaengine_chan *chan);
-};
-
-static struct gdma_dma_dev *gdma_dma_chan_get_dev(
- struct gdma_dmaengine_chan *chan)
-{
- return container_of(chan->vchan.chan.device, struct gdma_dma_dev,
- ddev);
-}
-
-static struct gdma_dmaengine_chan *to_gdma_dma_chan(struct dma_chan *c)
-{
- return container_of(c, struct gdma_dmaengine_chan, vchan.chan);
-}
-
-static struct gdma_dma_desc *to_gdma_dma_desc(struct virt_dma_desc *vdesc)
-{
- return container_of(vdesc, struct gdma_dma_desc, vdesc);
-}
-
-static inline uint32_t gdma_dma_read(struct gdma_dma_dev *dma_dev,
- unsigned int reg)
-{
- return readl(dma_dev->base + reg);
-}
-
-static inline void gdma_dma_write(struct gdma_dma_dev *dma_dev,
- unsigned int reg, uint32_t val)
-{
- writel(val, dma_dev->base + reg);
-}
-
-static enum gdma_dma_transfer_size gdma_dma_maxburst(u32 maxburst)
-{
- if (maxburst < 2)
- return GDMA_TRANSFER_SIZE_4BYTE;
- else if (maxburst < 4)
- return GDMA_TRANSFER_SIZE_8BYTE;
- else if (maxburst < 8)
- return GDMA_TRANSFER_SIZE_16BYTE;
- else if (maxburst < 16)
- return GDMA_TRANSFER_SIZE_32BYTE;
- else
- return GDMA_TRANSFER_SIZE_64BYTE;
-}
-
-static int gdma_dma_config(struct dma_chan *c,
- struct dma_slave_config *config)
-{
- struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c);
- struct gdma_dma_dev *dma_dev = gdma_dma_chan_get_dev(chan);
-
- if (config->device_fc) {
- dev_err(dma_dev->ddev.dev, "not support flow controller\n");
- return -EINVAL;
- }
-
- switch (config->direction) {
- case DMA_MEM_TO_DEV:
- if (config->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) {
- dev_err(dma_dev->ddev.dev, "only support 4 byte buswidth\n");
- return -EINVAL;
- }
- chan->slave_id = config->slave_id;
- chan->fifo_addr = config->dst_addr;
- chan->burst_size = gdma_dma_maxburst(config->dst_maxburst);
- break;
- case DMA_DEV_TO_MEM:
- if (config->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) {
- dev_err(dma_dev->ddev.dev, "only support 4 byte buswidth\n");
- return -EINVAL;
- }
- chan->slave_id = config->slave_id;
- chan->fifo_addr = config->src_addr;
- chan->burst_size = gdma_dma_maxburst(config->src_maxburst);
- break;
- default:
- dev_err(dma_dev->ddev.dev, "direction type %d error\n",
- config->direction);
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int gdma_dma_terminate_all(struct dma_chan *c)
-{
- struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c);
- struct gdma_dma_dev *dma_dev = gdma_dma_chan_get_dev(chan);
- unsigned long flags, timeout;
- LIST_HEAD(head);
- int i = 0;
-
- spin_lock_irqsave(&chan->vchan.lock, flags);
- chan->desc = NULL;
- clear_bit(chan->id, &dma_dev->chan_issued);
- vchan_get_all_descriptors(&chan->vchan, &head);
- spin_unlock_irqrestore(&chan->vchan.lock, flags);
-
- vchan_dma_desc_free_list(&chan->vchan, &head);
-
- /* wait dma transfer complete */
- timeout = jiffies + msecs_to_jiffies(5000);
- while (gdma_dma_read(dma_dev, GDMA_REG_CTRL0(chan->id)) &
- GDMA_REG_CTRL0_ENABLE) {
- if (time_after_eq(jiffies, timeout)) {
- dev_err(dma_dev->ddev.dev, "chan %d wait timeout\n",
- chan->id);
- /* restore to init value */
- gdma_dma_write(dma_dev, GDMA_REG_CTRL0(chan->id), 0);
- break;
- }
- cpu_relax();
- i++;
- }
-
- if (i)
- dev_dbg(dma_dev->ddev.dev, "terminate chan %d loops %d\n",
- chan->id, i);
-
- return 0;
-}
-
-static void rt305x_dump_reg(struct gdma_dma_dev *dma_dev, int id)
-{
- dev_dbg(dma_dev->ddev.dev, "chan %d, src %08x, dst %08x, ctr0 %08x, ctr1 %08x, intr %08x, signal %08x\n",
- id,
- gdma_dma_read(dma_dev, GDMA_REG_SRC_ADDR(id)),
- gdma_dma_read(dma_dev, GDMA_REG_DST_ADDR(id)),
- gdma_dma_read(dma_dev, GDMA_REG_CTRL0(id)),
- gdma_dma_read(dma_dev, GDMA_REG_CTRL1(id)),
- gdma_dma_read(dma_dev, GDMA_RT305X_STATUS_INT),
- gdma_dma_read(dma_dev, GDMA_RT305X_STATUS_SIGNAL));
-}
-
-static int rt305x_gdma_start_transfer(struct gdma_dmaengine_chan *chan)
-{
- struct gdma_dma_dev *dma_dev = gdma_dma_chan_get_dev(chan);
- dma_addr_t src_addr, dst_addr;
- struct gdma_dma_sg *sg;
- u32 ctrl0, ctrl1;
-
- /* verify chan is already stopped */
- ctrl0 = gdma_dma_read(dma_dev, GDMA_REG_CTRL0(chan->id));
- if (unlikely(ctrl0 & GDMA_REG_CTRL0_ENABLE)) {
- dev_err(dma_dev->ddev.dev, "chan %d is start(%08x).\n",
- chan->id, ctrl0);
- rt305x_dump_reg(dma_dev, chan->id);
- return -EINVAL;
- }
-
- sg = &chan->desc->sg[chan->next_sg];
- if (chan->desc->direction == DMA_MEM_TO_DEV) {
- src_addr = sg->src_addr;
- dst_addr = chan->fifo_addr;
- ctrl0 = GDMA_REG_CTRL0_DST_ADDR_FIXED |
- (8 << GDMA_RT305X_CTRL0_SRC_REQ_SHIFT) |
- (chan->slave_id << GDMA_RT305X_CTRL0_DST_REQ_SHIFT);
- } else if (chan->desc->direction == DMA_DEV_TO_MEM) {
- src_addr = chan->fifo_addr;
- dst_addr = sg->dst_addr;
- ctrl0 = GDMA_REG_CTRL0_SRC_ADDR_FIXED |
- (chan->slave_id << GDMA_RT305X_CTRL0_SRC_REQ_SHIFT) |
- (8 << GDMA_RT305X_CTRL0_DST_REQ_SHIFT);
- } else if (chan->desc->direction == DMA_MEM_TO_MEM) {
- /*
- * TODO: memcpy function have bugs. sometime it will copy
- * more 8 bytes data when using dmatest verify.
- */
- src_addr = sg->src_addr;
- dst_addr = sg->dst_addr;
- ctrl0 = GDMA_REG_CTRL0_SW_MODE |
- (8 << GDMA_REG_CTRL1_SRC_REQ_SHIFT) |
- (8 << GDMA_REG_CTRL1_DST_REQ_SHIFT);
- } else {
- dev_err(dma_dev->ddev.dev, "direction type %d error\n",
- chan->desc->direction);
- return -EINVAL;
- }
-
- ctrl0 |= (sg->len << GDMA_REG_CTRL0_TX_SHIFT) |
- (chan->burst_size << GDMA_REG_CTRL0_BURST_SHIFT) |
- GDMA_REG_CTRL0_DONE_INT | GDMA_REG_CTRL0_ENABLE;
- ctrl1 = chan->id << GDMA_REG_CTRL1_NEXT_SHIFT;
-
- chan->next_sg++;
- gdma_dma_write(dma_dev, GDMA_REG_SRC_ADDR(chan->id), src_addr);
- gdma_dma_write(dma_dev, GDMA_REG_DST_ADDR(chan->id), dst_addr);
- gdma_dma_write(dma_dev, GDMA_REG_CTRL1(chan->id), ctrl1);
-
- /* make sure next_sg is update */
- wmb();
- gdma_dma_write(dma_dev, GDMA_REG_CTRL0(chan->id), ctrl0);
-
- return 0;
-}
-
-static void rt3883_dump_reg(struct gdma_dma_dev *dma_dev, int id)
-{
- dev_dbg(dma_dev->ddev.dev, "chan %d, src %08x, dst %08x, ctr0 %08x, ctr1 %08x, unmask %08x, done %08x, req %08x, ack %08x, fin %08x\n",
- id,
- gdma_dma_read(dma_dev, GDMA_REG_SRC_ADDR(id)),
- gdma_dma_read(dma_dev, GDMA_REG_DST_ADDR(id)),
- gdma_dma_read(dma_dev, GDMA_REG_CTRL0(id)),
- gdma_dma_read(dma_dev, GDMA_REG_CTRL1(id)),
- gdma_dma_read(dma_dev, GDMA_REG_UNMASK_INT),
- gdma_dma_read(dma_dev, GDMA_REG_DONE_INT),
- gdma_dma_read(dma_dev, GDMA_REG_REQSTS),
- gdma_dma_read(dma_dev, GDMA_REG_ACKSTS),
- gdma_dma_read(dma_dev, GDMA_REG_FINSTS));
-}
-
-static int rt3883_gdma_start_transfer(struct gdma_dmaengine_chan *chan)
-{
- struct gdma_dma_dev *dma_dev = gdma_dma_chan_get_dev(chan);
- dma_addr_t src_addr, dst_addr;
- struct gdma_dma_sg *sg;
- u32 ctrl0, ctrl1;
-
- /* verify chan is already stopped */
- ctrl0 = gdma_dma_read(dma_dev, GDMA_REG_CTRL0(chan->id));
- if (unlikely(ctrl0 & GDMA_REG_CTRL0_ENABLE)) {
- dev_err(dma_dev->ddev.dev, "chan %d is start(%08x).\n",
- chan->id, ctrl0);
- rt3883_dump_reg(dma_dev, chan->id);
- return -EINVAL;
- }
-
- sg = &chan->desc->sg[chan->next_sg];
- if (chan->desc->direction == DMA_MEM_TO_DEV) {
- src_addr = sg->src_addr;
- dst_addr = chan->fifo_addr;
- ctrl0 = GDMA_REG_CTRL0_DST_ADDR_FIXED;
- ctrl1 = (32 << GDMA_REG_CTRL1_SRC_REQ_SHIFT) |
- (chan->slave_id << GDMA_REG_CTRL1_DST_REQ_SHIFT);
- } else if (chan->desc->direction == DMA_DEV_TO_MEM) {
- src_addr = chan->fifo_addr;
- dst_addr = sg->dst_addr;
- ctrl0 = GDMA_REG_CTRL0_SRC_ADDR_FIXED;
- ctrl1 = (chan->slave_id << GDMA_REG_CTRL1_SRC_REQ_SHIFT) |
- (32 << GDMA_REG_CTRL1_DST_REQ_SHIFT) |
- GDMA_REG_CTRL1_COHERENT;
- } else if (chan->desc->direction == DMA_MEM_TO_MEM) {
- src_addr = sg->src_addr;
- dst_addr = sg->dst_addr;
- ctrl0 = GDMA_REG_CTRL0_SW_MODE;
- ctrl1 = (32 << GDMA_REG_CTRL1_SRC_REQ_SHIFT) |
- (32 << GDMA_REG_CTRL1_DST_REQ_SHIFT) |
- GDMA_REG_CTRL1_COHERENT;
- } else {
- dev_err(dma_dev->ddev.dev, "direction type %d error\n",
- chan->desc->direction);
- return -EINVAL;
- }
-
- ctrl0 |= (sg->len << GDMA_REG_CTRL0_TX_SHIFT) |
- (chan->burst_size << GDMA_REG_CTRL0_BURST_SHIFT) |
- GDMA_REG_CTRL0_DONE_INT | GDMA_REG_CTRL0_ENABLE;
- ctrl1 |= chan->id << GDMA_REG_CTRL1_NEXT_SHIFT;
-
- chan->next_sg++;
- gdma_dma_write(dma_dev, GDMA_REG_SRC_ADDR(chan->id), src_addr);
- gdma_dma_write(dma_dev, GDMA_REG_DST_ADDR(chan->id), dst_addr);
- gdma_dma_write(dma_dev, GDMA_REG_CTRL1(chan->id), ctrl1);
-
- /* make sure next_sg is update */
- wmb();
- gdma_dma_write(dma_dev, GDMA_REG_CTRL0(chan->id), ctrl0);
-
- return 0;
-}
-
-static inline int gdma_start_transfer(struct gdma_dma_dev *dma_dev,
- struct gdma_dmaengine_chan *chan)
-{
- return dma_dev->data->start_transfer(chan);
-}
-
-static int gdma_next_desc(struct gdma_dmaengine_chan *chan)
-{
- struct virt_dma_desc *vdesc;
-
- vdesc = vchan_next_desc(&chan->vchan);
- if (!vdesc) {
- chan->desc = NULL;
- return 0;
- }
- chan->desc = to_gdma_dma_desc(vdesc);
- chan->next_sg = 0;
-
- return 1;
-}
-
-static void gdma_dma_chan_irq(struct gdma_dma_dev *dma_dev,
- struct gdma_dmaengine_chan *chan)
-{
- struct gdma_dma_desc *desc;
- unsigned long flags;
- int chan_issued;
-
- chan_issued = 0;
- spin_lock_irqsave(&chan->vchan.lock, flags);
- desc = chan->desc;
- if (desc) {
- if (desc->cyclic) {
- vchan_cyclic_callback(&desc->vdesc);
- if (chan->next_sg == desc->num_sgs)
- chan->next_sg = 0;
- chan_issued = 1;
- } else {
- desc->residue -= desc->sg[chan->next_sg - 1].len;
- if (chan->next_sg == desc->num_sgs) {
- list_del(&desc->vdesc.node);
- vchan_cookie_complete(&desc->vdesc);
- chan_issued = gdma_next_desc(chan);
- } else {
- chan_issued = 1;
- }
- }
- } else {
- dev_dbg(dma_dev->ddev.dev, "chan %d no desc to complete\n",
- chan->id);
- }
- if (chan_issued)
- set_bit(chan->id, &dma_dev->chan_issued);
- spin_unlock_irqrestore(&chan->vchan.lock, flags);
-}
-
-static irqreturn_t gdma_dma_irq(int irq, void *devid)
-{
- struct gdma_dma_dev *dma_dev = devid;
- u32 done, done_reg;
- unsigned int i;
-
- done_reg = dma_dev->data->done_int_reg;
- done = gdma_dma_read(dma_dev, done_reg);
- if (unlikely(!done))
- return IRQ_NONE;
-
- /* clean done bits */
- gdma_dma_write(dma_dev, done_reg, done);
-
- i = 0;
- while (done) {
- if (done & 0x1) {
- gdma_dma_chan_irq(dma_dev, &dma_dev->chan[i]);
- atomic_dec(&dma_dev->cnt);
- }
- done >>= 1;
- i++;
- }
-
- /* start only have work to do */
- if (dma_dev->chan_issued)
- tasklet_schedule(&dma_dev->task);
-
- return IRQ_HANDLED;
-}
-
-static void gdma_dma_issue_pending(struct dma_chan *c)
-{
- struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c);
- struct gdma_dma_dev *dma_dev = gdma_dma_chan_get_dev(chan);
- unsigned long flags;
-
- spin_lock_irqsave(&chan->vchan.lock, flags);
- if (vchan_issue_pending(&chan->vchan) && !chan->desc) {
- if (gdma_next_desc(chan)) {
- set_bit(chan->id, &dma_dev->chan_issued);
- tasklet_schedule(&dma_dev->task);
- } else {
- dev_dbg(dma_dev->ddev.dev, "chan %d no desc to issue\n",
- chan->id);
- }
- }
- spin_unlock_irqrestore(&chan->vchan.lock, flags);
-}
-
-static struct dma_async_tx_descriptor *gdma_dma_prep_slave_sg(
- struct dma_chan *c, struct scatterlist *sgl,
- unsigned int sg_len, enum dma_transfer_direction direction,
- unsigned long flags, void *context)
-{
- struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c);
- struct gdma_dma_desc *desc;
- struct scatterlist *sg;
- unsigned int i;
-
- desc = kzalloc(struct_size(desc, sg, sg_len), GFP_ATOMIC);
- if (!desc) {
- dev_err(c->device->dev, "alloc sg decs error\n");
- return NULL;
- }
- desc->residue = 0;
-
- for_each_sg(sgl, sg, sg_len, i) {
- if (direction == DMA_MEM_TO_DEV) {
- desc->sg[i].src_addr = sg_dma_address(sg);
- } else if (direction == DMA_DEV_TO_MEM) {
- desc->sg[i].dst_addr = sg_dma_address(sg);
- } else {
- dev_err(c->device->dev, "direction type %d error\n",
- direction);
- goto free_desc;
- }
-
- if (unlikely(sg_dma_len(sg) > GDMA_REG_CTRL0_TX_MASK)) {
- dev_err(c->device->dev, "sg len too large %d\n",
- sg_dma_len(sg));
- goto free_desc;
- }
- desc->sg[i].len = sg_dma_len(sg);
- desc->residue += sg_dma_len(sg);
- }
-
- desc->num_sgs = sg_len;
- desc->direction = direction;
- desc->cyclic = false;
-
- return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
-
-free_desc:
- kfree(desc);
- return NULL;
-}
-
-static struct dma_async_tx_descriptor *gdma_dma_prep_dma_memcpy(
- struct dma_chan *c, dma_addr_t dest, dma_addr_t src,
- size_t len, unsigned long flags)
-{
- struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c);
- struct gdma_dma_desc *desc;
- unsigned int num_periods, i;
- size_t xfer_count;
-
- if (len <= 0)
- return NULL;
-
- chan->burst_size = gdma_dma_maxburst(len >> 2);
-
- xfer_count = GDMA_REG_CTRL0_TX_MASK;
- num_periods = DIV_ROUND_UP(len, xfer_count);
-
- desc = kzalloc(struct_size(desc, sg, num_periods), GFP_ATOMIC);
- if (!desc) {
- dev_err(c->device->dev, "alloc memcpy decs error\n");
- return NULL;
- }
- desc->residue = len;
-
- for (i = 0; i < num_periods; i++) {
- desc->sg[i].src_addr = src;
- desc->sg[i].dst_addr = dest;
- if (len > xfer_count)
- desc->sg[i].len = xfer_count;
- else
- desc->sg[i].len = len;
- src += desc->sg[i].len;
- dest += desc->sg[i].len;
- len -= desc->sg[i].len;
- }
-
- desc->num_sgs = num_periods;
- desc->direction = DMA_MEM_TO_MEM;
- desc->cyclic = false;
-
- return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
-}
-
-static struct dma_async_tx_descriptor *gdma_dma_prep_dma_cyclic(
- struct dma_chan *c, dma_addr_t buf_addr, size_t buf_len,
- size_t period_len, enum dma_transfer_direction direction,
- unsigned long flags)
-{
- struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c);
- struct gdma_dma_desc *desc;
- unsigned int num_periods, i;
-
- if (buf_len % period_len)
- return NULL;
-
- if (period_len > GDMA_REG_CTRL0_TX_MASK) {
- dev_err(c->device->dev, "cyclic len too large %d\n",
- period_len);
- return NULL;
- }
-
- num_periods = buf_len / period_len;
- desc = kzalloc(struct_size(desc, sg, num_periods), GFP_ATOMIC);
- if (!desc) {
- dev_err(c->device->dev, "alloc cyclic decs error\n");
- return NULL;
- }
- desc->residue = buf_len;
-
- for (i = 0; i < num_periods; i++) {
- if (direction == DMA_MEM_TO_DEV) {
- desc->sg[i].src_addr = buf_addr;
- } else if (direction == DMA_DEV_TO_MEM) {
- desc->sg[i].dst_addr = buf_addr;
- } else {
- dev_err(c->device->dev, "direction type %d error\n",
- direction);
- goto free_desc;
- }
- desc->sg[i].len = period_len;
- buf_addr += period_len;
- }
-
- desc->num_sgs = num_periods;
- desc->direction = direction;
- desc->cyclic = true;
-
- return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
-
-free_desc:
- kfree(desc);
- return NULL;
-}
-
-static enum dma_status gdma_dma_tx_status(struct dma_chan *c,
- dma_cookie_t cookie,
- struct dma_tx_state *state)
-{
- struct gdma_dmaengine_chan *chan = to_gdma_dma_chan(c);
- struct virt_dma_desc *vdesc;
- enum dma_status status;
- unsigned long flags;
- struct gdma_dma_desc *desc;
-
- status = dma_cookie_status(c, cookie, state);
- if (status == DMA_COMPLETE || !state)
- return status;
-
- spin_lock_irqsave(&chan->vchan.lock, flags);
- desc = chan->desc;
- if (desc && (cookie == desc->vdesc.tx.cookie)) {
- /*
- * We never update edesc->residue in the cyclic case, so we
- * can tell the remaining room to the end of the circular
- * buffer.
- */
- if (desc->cyclic)
- state->residue = desc->residue -
- ((chan->next_sg - 1) * desc->sg[0].len);
- else
- state->residue = desc->residue;
- } else {
- vdesc = vchan_find_desc(&chan->vchan, cookie);
- if (vdesc)
- state->residue = to_gdma_dma_desc(vdesc)->residue;
- }
- spin_unlock_irqrestore(&chan->vchan.lock, flags);
-
- dev_dbg(c->device->dev, "tx residue %d bytes\n", state->residue);
-
- return status;
-}
-
-static void gdma_dma_free_chan_resources(struct dma_chan *c)
-{
- vchan_free_chan_resources(to_virt_chan(c));
-}
-
-static void gdma_dma_desc_free(struct virt_dma_desc *vdesc)
-{
- kfree(container_of(vdesc, struct gdma_dma_desc, vdesc));
-}
-
-static void gdma_dma_tasklet(struct tasklet_struct *t)
-{
- struct gdma_dma_dev *dma_dev = from_tasklet(dma_dev, t, task);
- struct gdma_dmaengine_chan *chan;
- static unsigned int last_chan;
- unsigned int i, chan_mask;
-
- /* record last chan to round robin all chans */
- i = last_chan;
- chan_mask = dma_dev->data->chancnt - 1;
- do {
- /*
- * on mt7621. when verify with dmatest with all
- * channel is enable. we need to limit only two
- * channel is working at the same time. otherwise the
- * data will have problem.
- */
- if (atomic_read(&dma_dev->cnt) >= 2) {
- last_chan = i;
- break;
- }
-
- if (test_and_clear_bit(i, &dma_dev->chan_issued)) {
- chan = &dma_dev->chan[i];
- if (chan->desc) {
- atomic_inc(&dma_dev->cnt);
- gdma_start_transfer(dma_dev, chan);
- } else {
- dev_dbg(dma_dev->ddev.dev,
- "chan %d no desc to issue\n",
- chan->id);
- }
- if (!dma_dev->chan_issued)
- break;
- }
-
- i = (i + 1) & chan_mask;
- } while (i != last_chan);
-}
-
-static void rt305x_gdma_init(struct gdma_dma_dev *dma_dev)
-{
- u32 gct;
-
- /* all chans round robin */
- gdma_dma_write(dma_dev, GDMA_RT305X_GCT, GDMA_REG_GCT_ARBIT_RR);
-
- gct = gdma_dma_read(dma_dev, GDMA_RT305X_GCT);
- dev_info(dma_dev->ddev.dev, "revision: %d, channels: %d\n",
- (gct >> GDMA_REG_GCT_VER_SHIFT) & GDMA_REG_GCT_VER_MASK,
- 8 << ((gct >> GDMA_REG_GCT_CHAN_SHIFT) &
- GDMA_REG_GCT_CHAN_MASK));
-}
-
-static void rt3883_gdma_init(struct gdma_dma_dev *dma_dev)
-{
- u32 gct;
-
- /* all chans round robin */
- gdma_dma_write(dma_dev, GDMA_REG_GCT, GDMA_REG_GCT_ARBIT_RR);
-
- gct = gdma_dma_read(dma_dev, GDMA_REG_GCT);
- dev_info(dma_dev->ddev.dev, "revision: %d, channels: %d\n",
- (gct >> GDMA_REG_GCT_VER_SHIFT) & GDMA_REG_GCT_VER_MASK,
- 8 << ((gct >> GDMA_REG_GCT_CHAN_SHIFT) &
- GDMA_REG_GCT_CHAN_MASK));
-}
-
-static struct gdma_data rt305x_gdma_data = {
- .chancnt = 8,
- .done_int_reg = GDMA_RT305X_STATUS_INT,
- .init = rt305x_gdma_init,
- .start_transfer = rt305x_gdma_start_transfer,
-};
-
-static struct gdma_data rt3883_gdma_data = {
- .chancnt = 16,
- .done_int_reg = GDMA_REG_DONE_INT,
- .init = rt3883_gdma_init,
- .start_transfer = rt3883_gdma_start_transfer,
-};
-
-static const struct of_device_id gdma_of_match_table[] = {
- { .compatible = "ralink,rt305x-gdma", .data = &rt305x_gdma_data },
- { .compatible = "ralink,rt3883-gdma", .data = &rt3883_gdma_data },
- { },
-};
-MODULE_DEVICE_TABLE(of, gdma_of_match_table);
-
-static int gdma_dma_probe(struct platform_device *pdev)
-{
- const struct of_device_id *match;
- struct gdma_dmaengine_chan *chan;
- struct gdma_dma_dev *dma_dev;
- struct dma_device *dd;
- unsigned int i;
- int ret;
- int irq;
- void __iomem *base;
- struct gdma_data *data;
-
- ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- if (ret)
- return ret;
-
- match = of_match_device(gdma_of_match_table, &pdev->dev);
- if (!match)
- return -EINVAL;
- data = (struct gdma_data *)match->data;
-
- dma_dev = devm_kzalloc(&pdev->dev,
- struct_size(dma_dev, chan, data->chancnt),
- GFP_KERNEL);
- if (!dma_dev)
- return -EINVAL;
- dma_dev->data = data;
-
- base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(base))
- return PTR_ERR(base);
- dma_dev->base = base;
- tasklet_setup(&dma_dev->task, gdma_dma_tasklet);
-
- irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- return -EINVAL;
- ret = devm_request_irq(&pdev->dev, irq, gdma_dma_irq,
- 0, dev_name(&pdev->dev), dma_dev);
- if (ret) {
- dev_err(&pdev->dev, "failed to request irq\n");
- return ret;
- }
-
- ret = device_reset(&pdev->dev);
- if (ret)
- dev_err(&pdev->dev, "failed to reset: %d\n", ret);
-
- dd = &dma_dev->ddev;
- dma_cap_set(DMA_MEMCPY, dd->cap_mask);
- dma_cap_set(DMA_SLAVE, dd->cap_mask);
- dma_cap_set(DMA_CYCLIC, dd->cap_mask);
- dd->device_free_chan_resources = gdma_dma_free_chan_resources;
- dd->device_prep_dma_memcpy = gdma_dma_prep_dma_memcpy;
- dd->device_prep_slave_sg = gdma_dma_prep_slave_sg;
- dd->device_prep_dma_cyclic = gdma_dma_prep_dma_cyclic;
- dd->device_config = gdma_dma_config;
- dd->device_terminate_all = gdma_dma_terminate_all;
- dd->device_tx_status = gdma_dma_tx_status;
- dd->device_issue_pending = gdma_dma_issue_pending;
-
- dd->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
- dd->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
- dd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
- dd->residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
-
- dd->dev = &pdev->dev;
- dd->dev->dma_parms = &dma_dev->dma_parms;
- dma_set_max_seg_size(dd->dev, GDMA_REG_CTRL0_TX_MASK);
- INIT_LIST_HEAD(&dd->channels);
-
- for (i = 0; i < data->chancnt; i++) {
- chan = &dma_dev->chan[i];
- chan->id = i;
- chan->vchan.desc_free = gdma_dma_desc_free;
- vchan_init(&chan->vchan, dd);
- }
-
- /* init hardware */
- data->init(dma_dev);
-
- ret = dma_async_device_register(dd);
- if (ret) {
- dev_err(&pdev->dev, "failed to register dma device\n");
- return ret;
- }
-
- ret = of_dma_controller_register(pdev->dev.of_node,
- of_dma_xlate_by_chan_id, dma_dev);
- if (ret) {
- dev_err(&pdev->dev, "failed to register of dma controller\n");
- goto err_unregister;
- }
-
- platform_set_drvdata(pdev, dma_dev);
-
- return 0;
-
-err_unregister:
- dma_async_device_unregister(dd);
- return ret;
-}
-
-static int gdma_dma_remove(struct platform_device *pdev)
-{
- struct gdma_dma_dev *dma_dev = platform_get_drvdata(pdev);
-
- tasklet_kill(&dma_dev->task);
- of_dma_controller_free(pdev->dev.of_node);
- dma_async_device_unregister(&dma_dev->ddev);
-
- return 0;
-}
-
-static struct platform_driver gdma_dma_driver = {
- .probe = gdma_dma_probe,
- .remove = gdma_dma_remove,
- .driver = {
- .name = "gdma-rt2880",
- .of_match_table = gdma_of_match_table,
- },
-};
-module_platform_driver(gdma_dma_driver);
-
-MODULE_DESCRIPTION("Ralink/MTK DMA driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/rtl8192e/rtllib.h b/drivers/staging/rtl8192e/rtllib.h
index c6f8b772335c..c985e4ebc545 100644
--- a/drivers/staging/rtl8192e/rtllib.h
+++ b/drivers/staging/rtl8192e/rtllib.h
@@ -1980,7 +1980,7 @@ void SendDisassociation(struct rtllib_device *ieee, bool deauth, u16 asRsn);
void rtllib_softmac_xmit(struct rtllib_txb *txb, struct rtllib_device *ieee);
void rtllib_start_ibss(struct rtllib_device *ieee);
-void rtllib_softmac_init(struct rtllib_device *ieee);
+int rtllib_softmac_init(struct rtllib_device *ieee);
void rtllib_softmac_free(struct rtllib_device *ieee);
void rtllib_disassociate(struct rtllib_device *ieee);
void rtllib_stop_scan(struct rtllib_device *ieee);
diff --git a/drivers/staging/rtl8192e/rtllib_module.c b/drivers/staging/rtl8192e/rtllib_module.c
index 64d9feee1f39..41697ef55dbd 100644
--- a/drivers/staging/rtl8192e/rtllib_module.c
+++ b/drivers/staging/rtl8192e/rtllib_module.c
@@ -88,7 +88,7 @@ struct net_device *alloc_rtllib(int sizeof_priv)
err = rtllib_networks_allocate(ieee);
if (err) {
pr_err("Unable to allocate beacon storage: %d\n", err);
- goto failed;
+ goto free_netdev;
}
rtllib_networks_initialize(ieee);
@@ -121,11 +121,13 @@ struct net_device *alloc_rtllib(int sizeof_priv)
ieee->hwsec_active = 0;
memset(ieee->swcamtable, 0, sizeof(struct sw_cam_table) * 32);
- rtllib_softmac_init(ieee);
+ err = rtllib_softmac_init(ieee);
+ if (err)
+ goto free_crypt_info;
ieee->pHTInfo = kzalloc(sizeof(struct rt_hi_throughput), GFP_KERNEL);
if (!ieee->pHTInfo)
- return NULL;
+ goto free_softmac;
HTUpdateDefaultSetting(ieee);
HTInitializeHTInfo(ieee);
@@ -141,8 +143,14 @@ struct net_device *alloc_rtllib(int sizeof_priv)
return dev;
- failed:
+free_softmac:
+ rtllib_softmac_free(ieee);
+free_crypt_info:
+ lib80211_crypt_info_free(&ieee->crypt_info);
+ rtllib_networks_free(ieee);
+free_netdev:
free_netdev(dev);
+
return NULL;
}
EXPORT_SYMBOL(alloc_rtllib);
@@ -153,7 +161,6 @@ void free_rtllib(struct net_device *dev)
netdev_priv_rsl(dev);
kfree(ieee->pHTInfo);
- ieee->pHTInfo = NULL;
rtllib_softmac_free(ieee);
lib80211_crypt_info_free(&ieee->crypt_info);
diff --git a/drivers/staging/rtl8192e/rtllib_softmac.c b/drivers/staging/rtl8192e/rtllib_softmac.c
index aabbea48223d..4b6c2295a3cf 100644
--- a/drivers/staging/rtl8192e/rtllib_softmac.c
+++ b/drivers/staging/rtl8192e/rtllib_softmac.c
@@ -2952,7 +2952,7 @@ void rtllib_start_protocol(struct rtllib_device *ieee)
}
}
-void rtllib_softmac_init(struct rtllib_device *ieee)
+int rtllib_softmac_init(struct rtllib_device *ieee)
{
int i;
@@ -2963,7 +2963,8 @@ void rtllib_softmac_init(struct rtllib_device *ieee)
ieee->seq_ctrl[i] = 0;
ieee->dot11d_info = kzalloc(sizeof(struct rt_dot11d_info), GFP_ATOMIC);
if (!ieee->dot11d_info)
- netdev_err(ieee->dev, "Can't alloc memory for DOT11D\n");
+ return -ENOMEM;
+
ieee->LinkDetectInfo.SlotIndex = 0;
ieee->LinkDetectInfo.SlotNum = 2;
ieee->LinkDetectInfo.NumRecvBcnInPeriod = 0;
@@ -3029,6 +3030,7 @@ void rtllib_softmac_init(struct rtllib_device *ieee)
tasklet_setup(&ieee->ps_task, rtllib_sta_ps);
+ return 0;
}
void rtllib_softmac_free(struct rtllib_device *ieee)
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
index 1a193f900779..1a43979939a8 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
@@ -1346,7 +1346,7 @@ inline void ieee80211_softmac_new_net(struct ieee80211_device *ieee, struct ieee
short apset, ssidset, ssidbroad, apmatch, ssidmatch;
- /* we are interested in new new only if we are not associated
+ /* we are interested in new only if we are not associated
* and we are not associating / authenticating
*/
if (ieee->state != IEEE80211_NOLINK)
@@ -2027,7 +2027,7 @@ ieee80211_rx_frame_softmac(struct ieee80211_device *ieee, struct sk_buff *skb,
* N = MAX_PACKET_SIZE / MIN_FRAG_THRESHOLD.
* In this way you need just one and the 802.11 stack
* will take care of buffering fragments and pass them to
- * to the driver later, when it wakes the queue.
+ * the driver later, when it wakes the queue.
*/
void ieee80211_softmac_xmit(struct ieee80211_txb *txb, struct ieee80211_device *ieee)
{
diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c
index 726d7ad9408b..364e1ca94f70 100644
--- a/drivers/staging/rtl8192u/r8192U_core.c
+++ b/drivers/staging/rtl8192u/r8192U_core.c
@@ -2531,13 +2531,13 @@ static short rtl8192_init(struct net_device *dev)
#ifdef PIPE12
{
int i = 0;
- u8 queuetopipe[] = {3, 2, 1, 0, 4, 8, 7, 6, 5};
+ static const u8 queuetopipe[] = {3, 2, 1, 0, 4, 8, 7, 6, 5};
memcpy(priv->txqueue_to_outpipemap, queuetopipe, 9);
}
#else
{
- u8 queuetopipe[] = {3, 2, 1, 0, 4, 4, 0, 4, 4};
+ const u8 queuetopipe[] = {3, 2, 1, 0, 4, 4, 0, 4, 4};
memcpy(priv->txqueue_to_outpipemap, queuetopipe, 9);
}
@@ -2666,14 +2666,7 @@ static bool rtl8192_adapter_start(struct net_device *dev)
/* config CPUReset Register */
/* Firmware Reset or not? */
read_nic_dword(dev, CPU_GEN, &dwRegRead);
- if (priv->pFirmware->firmware_status == FW_STATUS_0_INIT)
- dwRegRead |= CPU_GEN_SYSTEM_RESET; /* do nothing here? */
- else if (priv->pFirmware->firmware_status == FW_STATUS_5_READY)
- dwRegRead |= CPU_GEN_FIRMWARE_RESET;
- else
- RT_TRACE(COMP_ERR,
- "ERROR in %s(): undefined firmware state(%d)\n",
- __func__, priv->pFirmware->firmware_status);
+ dwRegRead |= CPU_GEN_SYSTEM_RESET; /* do nothing here? */
write_nic_dword(dev, CPU_GEN, dwRegRead);
/* config BB. */
diff --git a/drivers/staging/rtl8712/rtl8712_efuse.c b/drivers/staging/rtl8712/rtl8712_efuse.c
index 4f3b54a7c3be..c9400e40a1d6 100644
--- a/drivers/staging/rtl8712/rtl8712_efuse.c
+++ b/drivers/staging/rtl8712/rtl8712_efuse.c
@@ -298,25 +298,23 @@ static u8 fix_header(struct _adapter *adapter, u8 header, u16 header_addr)
continue;
}
for (i = 0; i < PGPKG_MAX_WORDS; i++) {
- if (BIT(i) & word_en) {
- if (BIT(i) & pkt.word_en) {
- if (efuse_one_byte_read(
- adapter, addr,
+ if (!(BIT(i) & word_en))
+ continue;
+ if (BIT(i) & pkt.word_en) {
+ if (efuse_one_byte_read(adapter,
+ addr,
&value))
- pkt.data[i * 2] = value;
- else
- return false;
- if (efuse_one_byte_read(
- adapter,
+ pkt.data[i * 2] = value;
+ else
+ return false;
+ if (efuse_one_byte_read(adapter,
addr + 1,
&value))
- pkt.data[i * 2 + 1] =
- value;
- else
- return false;
- }
- addr += 2;
+ pkt.data[i * 2 + 1] = value;
+ else
+ return false;
}
+ addr += 2;
}
}
if (addr != header_addr)
diff --git a/drivers/staging/rtl8723bs/core/rtw_cmd.c b/drivers/staging/rtl8723bs/core/rtw_cmd.c
index bd24d913b464..b28351a97cd3 100644
--- a/drivers/staging/rtl8723bs/core/rtw_cmd.c
+++ b/drivers/staging/rtl8723bs/core/rtw_cmd.c
@@ -1498,8 +1498,7 @@ static void rtw_lps_change_dtim_hdl(struct adapter *padapter, u8 dtim)
mutex_lock(&pwrpriv->lock);
- if (pwrpriv->dtim != dtim)
- pwrpriv->dtim = dtim;
+ pwrpriv->dtim = dtim;
if (pwrpriv->fw_current_in_ps_mode && (pwrpriv->pwr_mode > PS_MODE_ACTIVE)) {
u8 ps_mode = pwrpriv->pwr_mode;
diff --git a/drivers/staging/rtl8723bs/core/rtw_efuse.c b/drivers/staging/rtl8723bs/core/rtw_efuse.c
index 430e2d81924c..3d3c77273026 100644
--- a/drivers/staging/rtl8723bs/core/rtw_efuse.c
+++ b/drivers/staging/rtl8723bs/core/rtw_efuse.c
@@ -31,10 +31,7 @@ u8 fakeBTEfuseModifiedMap[EFUSE_BT_MAX_MAP_LEN] = {0};
#define EFUSE_CTRL REG_EFUSE_CTRL /* E-Fuse Control. */
static bool
-Efuse_Read1ByteFromFakeContent(
- struct adapter *padapter,
- u16 Offset,
- u8 *Value)
+Efuse_Read1ByteFromFakeContent(u16 Offset, u8 *Value)
{
if (Offset >= EFUSE_MAX_HW_SIZE)
return false;
@@ -46,10 +43,7 @@ Efuse_Read1ByteFromFakeContent(
}
static bool
-Efuse_Write1ByteToFakeContent(
- struct adapter *padapter,
- u16 Offset,
- u8 Value)
+Efuse_Write1ByteToFakeContent(u16 Offset, u8 Value)
{
if (Offset >= EFUSE_MAX_HW_SIZE)
return false;
@@ -250,7 +244,7 @@ bool bPseudoTest)
u8 readbyte;
if (bPseudoTest)
- return Efuse_Read1ByteFromFakeContent(padapter, addr, data);
+ return Efuse_Read1ByteFromFakeContent(addr, data);
/* <20130121, Kordan> For SMIC EFUSE specificatoin. */
/* 0x34[11]: SW force PGMEN input of efuse to high. (for the bank selected by 0x34[9:8]) */
@@ -291,7 +285,7 @@ u8 efuse_OneByteWrite(struct adapter *padapter, u16 addr, u8 data, bool bPseudoT
u32 efuseValue = 0;
if (bPseudoTest)
- return Efuse_Write1ByteToFakeContent(padapter, addr, data);
+ return Efuse_Write1ByteToFakeContent(addr, data);
/* -----------------e-fuse reg ctrl --------------------------------- */
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c b/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c
index 7fe3df863fe1..b5d5e922231c 100644
--- a/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c
+++ b/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c
@@ -31,9 +31,6 @@ static u8 rtw_sdio_wait_enough_TxOQT_space(struct adapter *padapter, u8 agg_num)
pHalData->SdioTxOQTFreeSpace -= agg_num;
- /* if (n > 1) */
- /* ++priv->pshare->nr_out_of_txoqt_space; */
-
return true;
}
@@ -310,8 +307,6 @@ static s32 xmit_xmitframes(struct adapter *padapter, struct xmit_priv *pxmitpriv
txlen = txdesc_size + pxmitframe->attrib.last_txcmdsz;
pxmitframe->pg_num = (txlen + 127) / 128;
pxmitbuf->pg_num += (txlen + 127) / 128;
- /* if (k != 1) */
- /* ((struct xmit_frame*)pxmitbuf->priv_data)->pg_num += pxmitframe->pg_num; */
pxmitbuf->ptail += _RND(txlen, 8); /* round to 8 bytes alignment */
pxmitbuf->len = _RND(pxmitbuf->len, 8) + txlen;
}
diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
index 0868f56e2979..5157b5b12597 100644
--- a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
+++ b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
@@ -2200,7 +2200,7 @@ static netdev_tx_t rtw_cfg80211_monitor_if_xmit_entry(struct sk_buff *skb, struc
memcpy(src_mac_addr, dot11_hdr->addr2, sizeof(src_mac_addr));
/* Skip the 802.11 header, QoS (if any) and SNAP, but leave spaces for
- * for two MAC addresses
+ * two MAC addresses
*/
skb_pull(skb, dot11_hdr_len + qos_len + snap_len - sizeof(src_mac_addr) * 2);
pdata = (unsigned char *)skb->data;
diff --git a/drivers/staging/rts5208/rtsx.c b/drivers/staging/rts5208/rtsx.c
index 91fcf85e150a..5a58dac76c88 100644
--- a/drivers/staging/rts5208/rtsx.c
+++ b/drivers/staging/rts5208/rtsx.c
@@ -450,13 +450,13 @@ skip_for_abort:
* after the down() -- that's necessary for the thread-shutdown
* case.
*
- * complete_and_exit() goes even further than this -- it is safe in
- * the case that the thread of the caller is going away (not just
- * the structure) -- this is necessary for the module-remove case.
- * This is important in preemption kernels, which transfer the flow
- * of execution immediately upon a complete().
+ * kthread_complete_and_exit() goes even further than this --
+ * it is safe in the case that the thread of the caller is going away
+ * (not just the structure) -- this is necessary for the module-remove
+ * case. This is important in preemption kernels, which transfer the
+ * flow of execution immediately upon a complete().
*/
- complete_and_exit(&dev->control_exit, 0);
+ kthread_complete_and_exit(&dev->control_exit, 0);
}
static int rtsx_polling_thread(void *__dev)
@@ -501,7 +501,7 @@ static int rtsx_polling_thread(void *__dev)
mutex_unlock(&dev->dev_mutex);
}
- complete_and_exit(&dev->polling_exit, 0);
+ kthread_complete_and_exit(&dev->polling_exit, 0);
}
/*
@@ -682,7 +682,7 @@ static int rtsx_scan_thread(void *__dev)
/* Should we unbind if no devices were detected? */
}
- complete_and_exit(&dev->scanning_done, 0);
+ kthread_complete_and_exit(&dev->scanning_done, 0);
}
static void rtsx_init_options(struct rtsx_chip *chip)
diff --git a/drivers/staging/unisys/visorhba/visorhba_main.c b/drivers/staging/unisys/visorhba/visorhba_main.c
index 694644112447..8eee131e834d 100644
--- a/drivers/staging/unisys/visorhba/visorhba_main.c
+++ b/drivers/staging/unisys/visorhba/visorhba_main.c
@@ -266,7 +266,7 @@ static int forward_taskmgmt_command(enum task_mgmt_types tasktype,
&notifyevent, &notifyresult);
if (ret) {
dev_dbg(&scsidev->sdev_gendev,
- "visorhba: setup_scsitaskmgmt_handles returned %d\n", ret);
+ "visorhba: setup_scsitaskmgmt_handles returned %d\n", ret);
return FAILED;
}
diff --git a/drivers/staging/unisys/visornic/visornic_main.c b/drivers/staging/unisys/visornic/visornic_main.c
index 62cd9b783732..643432458105 100644
--- a/drivers/staging/unisys/visornic/visornic_main.c
+++ b/drivers/staging/unisys/visornic/visornic_main.c
@@ -1590,7 +1590,7 @@ static void send_rcv_posts_if_needed(struct visornic_devdata *devdata)
netdev = devdata->netdev;
rcv_bufs_allocated = 0;
/* this code is trying to prevent getting stuck here forever,
- * but still retry it if you cant allocate them all this time.
+ * but still retry it if you can't allocate them all this time.
*/
cur_num_rcv_bufs_to_alloc = devdata->num_rcv_bufs_could_not_alloc;
while (cur_num_rcv_bufs_to_alloc > 0) {
@@ -1759,13 +1759,11 @@ static void visornic_channel_interrupt(struct visor_device *dev)
if (!devdata)
return;
- if (!visorchannel_signalempty(
- devdata->dev->visorchannel,
- IOCHAN_FROM_IOPART))
+ if (!visorchannel_signalempty(devdata->dev->visorchannel,
+ IOCHAN_FROM_IOPART))
napi_schedule(&devdata->napi);
atomic_set(&devdata->interrupt_rcvd, 0);
-
}
/* visornic_probe - probe function for visornic devices
diff --git a/drivers/staging/vc04_services/bcm2835-audio/bcm2835.c b/drivers/staging/vc04_services/bcm2835-audio/bcm2835.c
index c250fbef2fa3..628732d7bf6a 100644
--- a/drivers/staging/vc04_services/bcm2835-audio/bcm2835.c
+++ b/drivers/staging/vc04_services/bcm2835-audio/bcm2835.c
@@ -52,20 +52,14 @@ static int bcm2835_devm_add_vchi_ctx(struct device *dev)
return 0;
}
-typedef int (*bcm2835_audio_newpcm_func)(struct bcm2835_chip *chip,
- const char *name,
- enum snd_bcm2835_route route,
- u32 numchannels);
-
-typedef int (*bcm2835_audio_newctl_func)(struct bcm2835_chip *chip);
-
struct bcm2835_audio_driver {
struct device_driver driver;
const char *shortname;
const char *longname;
int minchannels;
- bcm2835_audio_newpcm_func newpcm;
- bcm2835_audio_newctl_func newctl;
+ int (*newpcm)(struct bcm2835_chip *chip, const char *name,
+ enum snd_bcm2835_route route, u32 numchannels);
+ int (*newctl)(struct bcm2835_chip *chip);
enum snd_bcm2835_route route;
};
diff --git a/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c b/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
index 1b184d5c6b82..253d755e547f 100644
--- a/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
+++ b/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Broadcom BM2835 V4L2 driver
+ * Broadcom BCM2835 V4L2 driver
*
* Copyright © 2013 Raspberry Pi (Trading) Ltd.
*
@@ -64,7 +64,7 @@ MODULE_PARM_DESC(max_video_height, "Threshold for video mode");
static atomic_t camera_instance = ATOMIC_INIT(0);
/* global device data array */
-static struct bm2835_mmal_dev *gdev[MAX_BCM2835_CAMERAS];
+static struct bcm2835_mmal_dev *gdev[MAX_BCM2835_CAMERAS];
#define FPS_MIN 1
#define FPS_MAX 90
@@ -210,7 +210,7 @@ static int queue_setup(struct vb2_queue *vq,
unsigned int *nbuffers, unsigned int *nplanes,
unsigned int sizes[], struct device *alloc_ctxs[])
{
- struct bm2835_mmal_dev *dev = vb2_get_drv_priv(vq);
+ struct bcm2835_mmal_dev *dev = vb2_get_drv_priv(vq);
unsigned long size;
/* refuse queue setup if port is not configured */
@@ -265,7 +265,7 @@ static int queue_setup(struct vb2_queue *vq,
static int buffer_init(struct vb2_buffer *vb)
{
- struct bm2835_mmal_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
+ struct bcm2835_mmal_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
struct vb2_v4l2_buffer *vb2 = to_vb2_v4l2_buffer(vb);
struct vb2_mmal_buffer *buf =
container_of(vb2, struct vb2_mmal_buffer, vb);
@@ -280,7 +280,7 @@ static int buffer_init(struct vb2_buffer *vb)
static int buffer_prepare(struct vb2_buffer *vb)
{
- struct bm2835_mmal_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
+ struct bcm2835_mmal_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
unsigned long size;
v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev, "%s: dev:%p, vb %p\n",
@@ -302,7 +302,7 @@ static int buffer_prepare(struct vb2_buffer *vb)
static void buffer_cleanup(struct vb2_buffer *vb)
{
- struct bm2835_mmal_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
+ struct bcm2835_mmal_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
struct vb2_v4l2_buffer *vb2 = to_vb2_v4l2_buffer(vb);
struct vb2_mmal_buffer *buf =
container_of(vb2, struct vb2_mmal_buffer, vb);
@@ -313,7 +313,7 @@ static void buffer_cleanup(struct vb2_buffer *vb)
mmal_vchi_buffer_cleanup(&buf->mmal);
}
-static inline bool is_capturing(struct bm2835_mmal_dev *dev)
+static inline bool is_capturing(struct bcm2835_mmal_dev *dev)
{
return dev->capture.camera_port ==
&dev->component[COMP_CAMERA]->output[CAM_PORT_CAPTURE];
@@ -324,7 +324,7 @@ static void buffer_cb(struct vchiq_mmal_instance *instance,
int status,
struct mmal_buffer *mmal_buf)
{
- struct bm2835_mmal_dev *dev = port->cb_ctx;
+ struct bcm2835_mmal_dev *dev = port->cb_ctx;
struct vb2_mmal_buffer *buf =
container_of(mmal_buf, struct vb2_mmal_buffer, mmal);
@@ -416,7 +416,7 @@ static void buffer_cb(struct vchiq_mmal_instance *instance,
}
}
-static int enable_camera(struct bm2835_mmal_dev *dev)
+static int enable_camera(struct bcm2835_mmal_dev *dev)
{
int ret;
@@ -447,7 +447,7 @@ static int enable_camera(struct bm2835_mmal_dev *dev)
return 0;
}
-static int disable_camera(struct bm2835_mmal_dev *dev)
+static int disable_camera(struct bcm2835_mmal_dev *dev)
{
int ret;
@@ -482,7 +482,7 @@ static int disable_camera(struct bm2835_mmal_dev *dev)
static void buffer_queue(struct vb2_buffer *vb)
{
- struct bm2835_mmal_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
+ struct bcm2835_mmal_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
struct vb2_v4l2_buffer *vb2 = to_vb2_v4l2_buffer(vb);
struct vb2_mmal_buffer *buf =
container_of(vb2, struct vb2_mmal_buffer, vb);
@@ -501,7 +501,7 @@ static void buffer_queue(struct vb2_buffer *vb)
static int start_streaming(struct vb2_queue *vq, unsigned int count)
{
- struct bm2835_mmal_dev *dev = vb2_get_drv_priv(vq);
+ struct bcm2835_mmal_dev *dev = vb2_get_drv_priv(vq);
int ret;
u32 parameter_size;
@@ -596,7 +596,7 @@ static void stop_streaming(struct vb2_queue *vq)
{
int ret;
unsigned long timeout;
- struct bm2835_mmal_dev *dev = vb2_get_drv_priv(vq);
+ struct bcm2835_mmal_dev *dev = vb2_get_drv_priv(vq);
struct vchiq_mmal_port *port = dev->capture.port;
v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev, "%s: dev:%p\n",
@@ -654,7 +654,7 @@ static void stop_streaming(struct vb2_queue *vq)
v4l2_err(&dev->v4l2_dev, "Failed to disable camera\n");
}
-static const struct vb2_ops bm2835_mmal_video_qops = {
+static const struct vb2_ops bcm2835_mmal_video_qops = {
.queue_setup = queue_setup,
.buf_init = buffer_init,
.buf_prepare = buffer_prepare,
@@ -671,7 +671,7 @@ static const struct vb2_ops bm2835_mmal_video_qops = {
* ------------------------------------------------------------------
*/
-static int set_overlay_params(struct bm2835_mmal_dev *dev,
+static int set_overlay_params(struct bcm2835_mmal_dev *dev,
struct vchiq_mmal_port *port)
{
struct mmal_parameter_displayregion prev_config = {
@@ -713,7 +713,7 @@ static int vidioc_enum_fmt_vid_overlay(struct file *file, void *priv,
static int vidioc_g_fmt_vid_overlay(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct bm2835_mmal_dev *dev = video_drvdata(file);
+ struct bcm2835_mmal_dev *dev = video_drvdata(file);
f->fmt.win = dev->overlay;
@@ -723,7 +723,7 @@ static int vidioc_g_fmt_vid_overlay(struct file *file, void *priv,
static int vidioc_try_fmt_vid_overlay(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct bm2835_mmal_dev *dev = video_drvdata(file);
+ struct bcm2835_mmal_dev *dev = video_drvdata(file);
f->fmt.win.field = V4L2_FIELD_NONE;
f->fmt.win.chromakey = 0;
@@ -754,7 +754,7 @@ static int vidioc_try_fmt_vid_overlay(struct file *file, void *priv,
static int vidioc_s_fmt_vid_overlay(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct bm2835_mmal_dev *dev = video_drvdata(file);
+ struct bcm2835_mmal_dev *dev = video_drvdata(file);
vidioc_try_fmt_vid_overlay(file, priv, f);
@@ -770,7 +770,7 @@ static int vidioc_s_fmt_vid_overlay(struct file *file, void *priv,
static int vidioc_overlay(struct file *file, void *f, unsigned int on)
{
int ret;
- struct bm2835_mmal_dev *dev = video_drvdata(file);
+ struct bcm2835_mmal_dev *dev = video_drvdata(file);
struct vchiq_mmal_port *src;
struct vchiq_mmal_port *dst;
@@ -829,7 +829,7 @@ static int vidioc_g_fbuf(struct file *file, void *fh,
/* The video overlay must stay within the framebuffer and can't be
* positioned independently.
*/
- struct bm2835_mmal_dev *dev = video_drvdata(file);
+ struct bcm2835_mmal_dev *dev = video_drvdata(file);
struct vchiq_mmal_port *preview_port =
&dev->component[COMP_CAMERA]->output[CAM_PORT_PREVIEW];
@@ -878,18 +878,16 @@ static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
static int vidioc_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
- struct bm2835_mmal_dev *dev = video_drvdata(file);
+ struct bcm2835_mmal_dev *dev = video_drvdata(file);
u32 major;
u32 minor;
vchiq_mmal_version(dev->instance, &major, &minor);
- strscpy(cap->driver, "bm2835 mmal", sizeof(cap->driver));
- snprintf((char *)cap->card, sizeof(cap->card), "mmal service %d.%d",
- major, minor);
+ strscpy(cap->driver, "bcm2835 mmal", sizeof(cap->driver));
+ snprintf((char *)cap->card, sizeof(cap->card), "mmal service %d.%d", major, minor);
- snprintf((char *)cap->bus_info, sizeof(cap->bus_info),
- "platform:%s", dev->v4l2_dev.name);
+ snprintf((char *)cap->bus_info, sizeof(cap->bus_info), "platform:%s", dev->v4l2_dev.name);
return 0;
}
@@ -911,7 +909,7 @@ static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct bm2835_mmal_dev *dev = video_drvdata(file);
+ struct bcm2835_mmal_dev *dev = video_drvdata(file);
f->fmt.pix.width = dev->capture.width;
f->fmt.pix.height = dev->capture.height;
@@ -936,7 +934,7 @@ static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- struct bm2835_mmal_dev *dev = video_drvdata(file);
+ struct bcm2835_mmal_dev *dev = video_drvdata(file);
struct mmal_fmt *mfmt;
mfmt = get_format(f);
@@ -1010,7 +1008,7 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
}
-static int mmal_setup_video_component(struct bm2835_mmal_dev *dev,
+static int mmal_setup_video_component(struct bcm2835_mmal_dev *dev,
struct v4l2_format *f)
{
bool overlay_enabled = !!dev->component[COMP_PREVIEW]->enabled;
@@ -1056,7 +1054,7 @@ static int mmal_setup_video_component(struct bm2835_mmal_dev *dev,
return ret;
}
-static int mmal_setup_encode_component(struct bm2835_mmal_dev *dev,
+static int mmal_setup_encode_component(struct bcm2835_mmal_dev *dev,
struct v4l2_format *f,
struct vchiq_mmal_port *port,
struct vchiq_mmal_port *camera_port,
@@ -1144,7 +1142,7 @@ static int mmal_setup_encode_component(struct bm2835_mmal_dev *dev,
return 0;
}
-static int mmal_setup_components(struct bm2835_mmal_dev *dev,
+static int mmal_setup_components(struct bcm2835_mmal_dev *dev,
struct v4l2_format *f)
{
int ret;
@@ -1290,7 +1288,7 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
int ret;
- struct bm2835_mmal_dev *dev = video_drvdata(file);
+ struct bcm2835_mmal_dev *dev = video_drvdata(file);
struct mmal_fmt *mfmt;
/* try the format to set valid parameters */
@@ -1333,7 +1331,7 @@ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
static int vidioc_enum_framesizes(struct file *file, void *fh,
struct v4l2_frmsizeenum *fsize)
{
- struct bm2835_mmal_dev *dev = video_drvdata(file);
+ struct bcm2835_mmal_dev *dev = video_drvdata(file);
static const struct v4l2_frmsize_stepwise sizes = {
MIN_WIDTH, 0, 2,
MIN_HEIGHT, 0, 2
@@ -1358,7 +1356,7 @@ static int vidioc_enum_framesizes(struct file *file, void *fh,
static int vidioc_enum_frameintervals(struct file *file, void *priv,
struct v4l2_frmivalenum *fival)
{
- struct bm2835_mmal_dev *dev = video_drvdata(file);
+ struct bcm2835_mmal_dev *dev = video_drvdata(file);
int i;
if (fival->index)
@@ -1388,7 +1386,7 @@ static int vidioc_enum_frameintervals(struct file *file, void *priv,
static int vidioc_g_parm(struct file *file, void *priv,
struct v4l2_streamparm *parm)
{
- struct bm2835_mmal_dev *dev = video_drvdata(file);
+ struct bcm2835_mmal_dev *dev = video_drvdata(file);
if (parm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
@@ -1402,7 +1400,7 @@ static int vidioc_g_parm(struct file *file, void *priv,
static int vidioc_s_parm(struct file *file, void *priv,
struct v4l2_streamparm *parm)
{
- struct bm2835_mmal_dev *dev = video_drvdata(file);
+ struct bcm2835_mmal_dev *dev = video_drvdata(file);
struct v4l2_fract tpf;
if (parm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
@@ -1530,7 +1528,7 @@ static int get_num_cameras(struct vchiq_mmal_instance *instance,
static int set_camera_parameters(struct vchiq_mmal_instance *instance,
struct vchiq_mmal_component *camera,
- struct bm2835_mmal_dev *dev)
+ struct bcm2835_mmal_dev *dev)
{
struct mmal_parameter_camera_config cam_config = {
.max_stills_w = dev->max_width,
@@ -1555,7 +1553,7 @@ static int set_camera_parameters(struct vchiq_mmal_instance *instance,
#define MAX_SUPPORTED_ENCODINGS 20
/* MMAL instance and component init */
-static int mmal_init(struct bm2835_mmal_dev *dev)
+static int mmal_init(struct bcm2835_mmal_dev *dev)
{
int ret;
struct mmal_es_format_local *format;
@@ -1735,7 +1733,7 @@ static int mmal_init(struct bm2835_mmal_dev *dev)
&enable,
sizeof(enable));
}
- ret = bm2835_mmal_set_all_camera_controls(dev);
+ ret = bcm2835_mmal_set_all_camera_controls(dev);
if (ret < 0) {
v4l2_err(&dev->v4l2_dev, "%s: failed to set all camera controls: %d\n",
__func__, ret);
@@ -1769,8 +1767,7 @@ unreg_mmal:
return ret;
}
-static int bm2835_mmal_init_device(struct bm2835_mmal_dev *dev,
- struct video_device *vfd)
+static int bcm2835_mmal_init_device(struct bcm2835_mmal_dev *dev, struct video_device *vfd)
{
int ret;
@@ -1798,7 +1795,7 @@ static int bm2835_mmal_init_device(struct bm2835_mmal_dev *dev,
return 0;
}
-static void bcm2835_cleanup_instance(struct bm2835_mmal_dev *dev)
+static void bcm2835_cleanup_instance(struct bcm2835_mmal_dev *dev)
{
if (!dev)
return;
@@ -1849,7 +1846,7 @@ static struct v4l2_format default_v4l2_format = {
static int bcm2835_mmal_probe(struct platform_device *pdev)
{
int ret;
- struct bm2835_mmal_dev *dev;
+ struct bcm2835_mmal_dev *dev;
struct vb2_queue *q;
int camera;
unsigned int num_cameras;
@@ -1908,7 +1905,7 @@ static int bcm2835_mmal_probe(struct platform_device *pdev)
}
/* setup v4l controls */
- ret = bm2835_mmal_init_controls(dev, &dev->ctrl_handler);
+ ret = bcm2835_mmal_init_controls(dev, &dev->ctrl_handler);
if (ret < 0) {
v4l2_err(&dev->v4l2_dev, "%s: could not init controls: %d\n",
__func__, ret);
@@ -1931,7 +1928,7 @@ static int bcm2835_mmal_probe(struct platform_device *pdev)
q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_READ;
q->drv_priv = dev;
q->buf_struct_size = sizeof(struct vb2_mmal_buffer);
- q->ops = &bm2835_mmal_video_qops;
+ q->ops = &bcm2835_mmal_video_qops;
q->mem_ops = &vb2_vmalloc_memops;
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
q->lock = &dev->mutex;
@@ -1940,7 +1937,7 @@ static int bcm2835_mmal_probe(struct platform_device *pdev)
goto unreg_dev;
/* initialise video devices */
- ret = bm2835_mmal_init_device(dev, &dev->vdev);
+ ret = bcm2835_mmal_init_device(dev, &dev->vdev);
if (ret < 0) {
v4l2_err(&dev->v4l2_dev, "%s: could not init device: %d\n",
__func__, ret);
diff --git a/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.h b/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.h
index 75524adff0f5..0f0c6f7a3764 100644
--- a/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.h
+++ b/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Broadcom BM2835 V4L2 driver
+ * Broadcom BCM2835 V4L2 driver
*
* Copyright © 2013 Raspberry Pi (Trading) Ltd.
*
@@ -32,7 +32,7 @@ enum {
extern int bcm2835_v4l2_debug;
-struct bm2835_mmal_dev {
+struct bcm2835_mmal_dev {
/* v4l2 devices */
struct v4l2_device v4l2_dev;
struct video_device vdev;
@@ -110,12 +110,10 @@ struct bm2835_mmal_dev {
unsigned int rgb_bgr_swapped;
};
-int bm2835_mmal_init_controls(
- struct bm2835_mmal_dev *dev,
- struct v4l2_ctrl_handler *hdl);
+int bcm2835_mmal_init_controls(struct bcm2835_mmal_dev *dev, struct v4l2_ctrl_handler *hdl);
-int bm2835_mmal_set_all_camera_controls(struct bm2835_mmal_dev *dev);
-int set_framerate_params(struct bm2835_mmal_dev *dev);
+int bcm2835_mmal_set_all_camera_controls(struct bcm2835_mmal_dev *dev);
+int set_framerate_params(struct bcm2835_mmal_dev *dev);
/* Debug helpers */
diff --git a/drivers/staging/vc04_services/bcm2835-camera/controls.c b/drivers/staging/vc04_services/bcm2835-camera/controls.c
index b096a12387f7..eb722f16fb91 100644
--- a/drivers/staging/vc04_services/bcm2835-camera/controls.c
+++ b/drivers/staging/vc04_services/bcm2835-camera/controls.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Broadcom BM2835 V4L2 driver
+ * Broadcom BCM2835 V4L2 driver
*
* Copyright © 2013 Raspberry Pi (Trading) Ltd.
*
@@ -58,23 +58,16 @@ static const u32 iso_values[] = {
0, 100, 200, 400, 800,
};
-enum bm2835_mmal_ctrl_type {
+enum bcm2835_mmal_ctrl_type {
MMAL_CONTROL_TYPE_STD,
MMAL_CONTROL_TYPE_STD_MENU,
MMAL_CONTROL_TYPE_INT_MENU,
MMAL_CONTROL_TYPE_CLUSTER, /* special cluster entry */
};
-struct bm2835_mmal_v4l2_ctrl;
-
-typedef int(bm2835_mmal_v4l2_ctrl_cb)(
- struct bm2835_mmal_dev *dev,
- struct v4l2_ctrl *ctrl,
- const struct bm2835_mmal_v4l2_ctrl *mmal_ctrl);
-
-struct bm2835_mmal_v4l2_ctrl {
+struct bcm2835_mmal_v4l2_ctrl {
u32 id; /* v4l2 control identifier */
- enum bm2835_mmal_ctrl_type type;
+ enum bcm2835_mmal_ctrl_type type;
/* control minimum value or
* mask for MMAL_CONTROL_TYPE_STD_MENU
*/
@@ -84,7 +77,8 @@ struct bm2835_mmal_v4l2_ctrl {
u64 step; /* step size of the control */
const s64 *imenu; /* integer menu array */
u32 mmal_id; /* mmal parameter id */
- bm2835_mmal_v4l2_ctrl_cb *setter;
+ int (*setter)(struct bcm2835_mmal_dev *dev, struct v4l2_ctrl *ctrl,
+ const struct bcm2835_mmal_v4l2_ctrl *mmal_ctrl);
};
struct v4l2_to_mmal_effects_setting {
@@ -156,9 +150,9 @@ static const struct v4l2_mmal_scene_config scene_configs[] = {
/* control handlers*/
-static int ctrl_set_rational(struct bm2835_mmal_dev *dev,
+static int ctrl_set_rational(struct bcm2835_mmal_dev *dev,
struct v4l2_ctrl *ctrl,
- const struct bm2835_mmal_v4l2_ctrl *mmal_ctrl)
+ const struct bcm2835_mmal_v4l2_ctrl *mmal_ctrl)
{
struct mmal_parameter_rational rational_value;
struct vchiq_mmal_port *control;
@@ -174,9 +168,9 @@ static int ctrl_set_rational(struct bm2835_mmal_dev *dev,
sizeof(rational_value));
}
-static int ctrl_set_value(struct bm2835_mmal_dev *dev,
+static int ctrl_set_value(struct bcm2835_mmal_dev *dev,
struct v4l2_ctrl *ctrl,
- const struct bm2835_mmal_v4l2_ctrl *mmal_ctrl)
+ const struct bcm2835_mmal_v4l2_ctrl *mmal_ctrl)
{
u32 u32_value;
struct vchiq_mmal_port *control;
@@ -190,9 +184,9 @@ static int ctrl_set_value(struct bm2835_mmal_dev *dev,
&u32_value, sizeof(u32_value));
}
-static int ctrl_set_iso(struct bm2835_mmal_dev *dev,
+static int ctrl_set_iso(struct bcm2835_mmal_dev *dev,
struct v4l2_ctrl *ctrl,
- const struct bm2835_mmal_v4l2_ctrl *mmal_ctrl)
+ const struct bcm2835_mmal_v4l2_ctrl *mmal_ctrl)
{
u32 u32_value;
struct vchiq_mmal_port *control;
@@ -218,9 +212,9 @@ static int ctrl_set_iso(struct bm2835_mmal_dev *dev,
&u32_value, sizeof(u32_value));
}
-static int ctrl_set_value_ev(struct bm2835_mmal_dev *dev,
+static int ctrl_set_value_ev(struct bcm2835_mmal_dev *dev,
struct v4l2_ctrl *ctrl,
- const struct bm2835_mmal_v4l2_ctrl *mmal_ctrl)
+ const struct bcm2835_mmal_v4l2_ctrl *mmal_ctrl)
{
s32 s32_value;
struct vchiq_mmal_port *control;
@@ -234,9 +228,9 @@ static int ctrl_set_value_ev(struct bm2835_mmal_dev *dev,
&s32_value, sizeof(s32_value));
}
-static int ctrl_set_rotate(struct bm2835_mmal_dev *dev,
+static int ctrl_set_rotate(struct bcm2835_mmal_dev *dev,
struct v4l2_ctrl *ctrl,
- const struct bm2835_mmal_v4l2_ctrl *mmal_ctrl)
+ const struct bcm2835_mmal_v4l2_ctrl *mmal_ctrl)
{
int ret;
u32 u32_value;
@@ -263,9 +257,9 @@ static int ctrl_set_rotate(struct bm2835_mmal_dev *dev,
&u32_value, sizeof(u32_value));
}
-static int ctrl_set_flip(struct bm2835_mmal_dev *dev,
+static int ctrl_set_flip(struct bcm2835_mmal_dev *dev,
struct v4l2_ctrl *ctrl,
- const struct bm2835_mmal_v4l2_ctrl *mmal_ctrl)
+ const struct bcm2835_mmal_v4l2_ctrl *mmal_ctrl)
{
int ret;
u32 u32_value;
@@ -304,9 +298,9 @@ static int ctrl_set_flip(struct bm2835_mmal_dev *dev,
&u32_value, sizeof(u32_value));
}
-static int ctrl_set_exposure(struct bm2835_mmal_dev *dev,
+static int ctrl_set_exposure(struct bcm2835_mmal_dev *dev,
struct v4l2_ctrl *ctrl,
- const struct bm2835_mmal_v4l2_ctrl *mmal_ctrl)
+ const struct bcm2835_mmal_v4l2_ctrl *mmal_ctrl)
{
enum mmal_parameter_exposuremode exp_mode = dev->exposure_mode_user;
u32 shutter_speed = 0;
@@ -360,9 +354,9 @@ static int ctrl_set_exposure(struct bm2835_mmal_dev *dev,
return ret;
}
-static int ctrl_set_metering_mode(struct bm2835_mmal_dev *dev,
+static int ctrl_set_metering_mode(struct bcm2835_mmal_dev *dev,
struct v4l2_ctrl *ctrl,
- const struct bm2835_mmal_v4l2_ctrl *mmal_ctrl)
+ const struct bcm2835_mmal_v4l2_ctrl *mmal_ctrl)
{
switch (ctrl->val) {
case V4L2_EXPOSURE_METERING_AVERAGE:
@@ -396,9 +390,9 @@ static int ctrl_set_metering_mode(struct bm2835_mmal_dev *dev,
}
}
-static int ctrl_set_flicker_avoidance(struct bm2835_mmal_dev *dev,
+static int ctrl_set_flicker_avoidance(struct bcm2835_mmal_dev *dev,
struct v4l2_ctrl *ctrl,
- const struct bm2835_mmal_v4l2_ctrl *mmal_ctrl)
+ const struct bcm2835_mmal_v4l2_ctrl *mmal_ctrl)
{
u32 u32_value;
struct vchiq_mmal_port *control;
@@ -425,9 +419,9 @@ static int ctrl_set_flicker_avoidance(struct bm2835_mmal_dev *dev,
&u32_value, sizeof(u32_value));
}
-static int ctrl_set_awb_mode(struct bm2835_mmal_dev *dev,
+static int ctrl_set_awb_mode(struct bcm2835_mmal_dev *dev,
struct v4l2_ctrl *ctrl,
- const struct bm2835_mmal_v4l2_ctrl *mmal_ctrl)
+ const struct bcm2835_mmal_v4l2_ctrl *mmal_ctrl)
{
u32 u32_value;
struct vchiq_mmal_port *control;
@@ -481,9 +475,9 @@ static int ctrl_set_awb_mode(struct bm2835_mmal_dev *dev,
&u32_value, sizeof(u32_value));
}
-static int ctrl_set_awb_gains(struct bm2835_mmal_dev *dev,
+static int ctrl_set_awb_gains(struct bcm2835_mmal_dev *dev,
struct v4l2_ctrl *ctrl,
- const struct bm2835_mmal_v4l2_ctrl *mmal_ctrl)
+ const struct bcm2835_mmal_v4l2_ctrl *mmal_ctrl)
{
struct vchiq_mmal_port *control;
struct mmal_parameter_awbgains gains;
@@ -504,9 +498,9 @@ static int ctrl_set_awb_gains(struct bm2835_mmal_dev *dev,
&gains, sizeof(gains));
}
-static int ctrl_set_image_effect(struct bm2835_mmal_dev *dev,
+static int ctrl_set_image_effect(struct bcm2835_mmal_dev *dev,
struct v4l2_ctrl *ctrl,
- const struct bm2835_mmal_v4l2_ctrl *mmal_ctrl)
+ const struct bcm2835_mmal_v4l2_ctrl *mmal_ctrl)
{
int ret = -EINVAL;
int i, j;
@@ -561,9 +555,9 @@ exit:
return (ret == 0 ? 0 : -EINVAL);
}
-static int ctrl_set_colfx(struct bm2835_mmal_dev *dev,
+static int ctrl_set_colfx(struct bcm2835_mmal_dev *dev,
struct v4l2_ctrl *ctrl,
- const struct bm2835_mmal_v4l2_ctrl *mmal_ctrl)
+ const struct bcm2835_mmal_v4l2_ctrl *mmal_ctrl)
{
int ret;
struct vchiq_mmal_port *control;
@@ -585,9 +579,9 @@ static int ctrl_set_colfx(struct bm2835_mmal_dev *dev,
return (ret == 0 ? 0 : -EINVAL);
}
-static int ctrl_set_bitrate(struct bm2835_mmal_dev *dev,
+static int ctrl_set_bitrate(struct bcm2835_mmal_dev *dev,
struct v4l2_ctrl *ctrl,
- const struct bm2835_mmal_v4l2_ctrl *mmal_ctrl)
+ const struct bcm2835_mmal_v4l2_ctrl *mmal_ctrl)
{
int ret;
struct vchiq_mmal_port *encoder_out;
@@ -613,9 +607,9 @@ static int ctrl_set_bitrate(struct bm2835_mmal_dev *dev,
return 0;
}
-static int ctrl_set_bitrate_mode(struct bm2835_mmal_dev *dev,
+static int ctrl_set_bitrate_mode(struct bcm2835_mmal_dev *dev,
struct v4l2_ctrl *ctrl,
- const struct bm2835_mmal_v4l2_ctrl *mmal_ctrl)
+ const struct bcm2835_mmal_v4l2_ctrl *mmal_ctrl)
{
u32 bitrate_mode;
struct vchiq_mmal_port *encoder_out;
@@ -640,9 +634,9 @@ static int ctrl_set_bitrate_mode(struct bm2835_mmal_dev *dev,
return 0;
}
-static int ctrl_set_image_encode_output(struct bm2835_mmal_dev *dev,
+static int ctrl_set_image_encode_output(struct bcm2835_mmal_dev *dev,
struct v4l2_ctrl *ctrl,
- const struct bm2835_mmal_v4l2_ctrl *mmal_ctrl)
+ const struct bcm2835_mmal_v4l2_ctrl *mmal_ctrl)
{
u32 u32_value;
struct vchiq_mmal_port *jpeg_out;
@@ -656,9 +650,9 @@ static int ctrl_set_image_encode_output(struct bm2835_mmal_dev *dev,
&u32_value, sizeof(u32_value));
}
-static int ctrl_set_video_encode_param_output(struct bm2835_mmal_dev *dev,
+static int ctrl_set_video_encode_param_output(struct bcm2835_mmal_dev *dev,
struct v4l2_ctrl *ctrl,
- const struct bm2835_mmal_v4l2_ctrl *mmal_ctrl)
+ const struct bcm2835_mmal_v4l2_ctrl *mmal_ctrl)
{
u32 u32_value;
struct vchiq_mmal_port *vid_enc_ctl;
@@ -672,9 +666,9 @@ static int ctrl_set_video_encode_param_output(struct bm2835_mmal_dev *dev,
&u32_value, sizeof(u32_value));
}
-static int ctrl_set_video_encode_profile_level(struct bm2835_mmal_dev *dev,
+static int ctrl_set_video_encode_profile_level(struct bcm2835_mmal_dev *dev,
struct v4l2_ctrl *ctrl,
- const struct bm2835_mmal_v4l2_ctrl *mmal_ctrl)
+ const struct bcm2835_mmal_v4l2_ctrl *mmal_ctrl)
{
struct mmal_parameter_video_profile param;
int ret = 0;
@@ -783,9 +777,9 @@ static int ctrl_set_video_encode_profile_level(struct bm2835_mmal_dev *dev,
return ret;
}
-static int ctrl_set_scene_mode(struct bm2835_mmal_dev *dev,
+static int ctrl_set_scene_mode(struct bcm2835_mmal_dev *dev,
struct v4l2_ctrl *ctrl,
- const struct bm2835_mmal_v4l2_ctrl *mmal_ctrl)
+ const struct bcm2835_mmal_v4l2_ctrl *mmal_ctrl)
{
int ret = 0;
int shutter_speed;
@@ -890,12 +884,11 @@ static int ctrl_set_scene_mode(struct bm2835_mmal_dev *dev,
return 0;
}
-static int bm2835_mmal_s_ctrl(struct v4l2_ctrl *ctrl)
+static int bcm2835_mmal_s_ctrl(struct v4l2_ctrl *ctrl)
{
- struct bm2835_mmal_dev *dev =
- container_of(ctrl->handler, struct bm2835_mmal_dev,
- ctrl_handler);
- const struct bm2835_mmal_v4l2_ctrl *mmal_ctrl = ctrl->priv;
+ struct bcm2835_mmal_dev *dev = container_of(ctrl->handler, struct bcm2835_mmal_dev,
+ ctrl_handler);
+ const struct bcm2835_mmal_v4l2_ctrl *mmal_ctrl = ctrl->priv;
int ret;
if (!mmal_ctrl || mmal_ctrl->id != ctrl->id || !mmal_ctrl->setter) {
@@ -910,11 +903,11 @@ static int bm2835_mmal_s_ctrl(struct v4l2_ctrl *ctrl)
return ret;
}
-static const struct v4l2_ctrl_ops bm2835_mmal_ctrl_ops = {
- .s_ctrl = bm2835_mmal_s_ctrl,
+static const struct v4l2_ctrl_ops bcm2835_mmal_ctrl_ops = {
+ .s_ctrl = bcm2835_mmal_s_ctrl,
};
-static const struct bm2835_mmal_v4l2_ctrl v4l2_ctrls[V4L2_CTRL_COUNT] = {
+static const struct bcm2835_mmal_v4l2_ctrl v4l2_ctrls[V4L2_CTRL_COUNT] = {
{
.id = V4L2_CID_SATURATION,
.type = MMAL_CONTROL_TYPE_STD,
@@ -1253,7 +1246,7 @@ static const struct bm2835_mmal_v4l2_ctrl v4l2_ctrls[V4L2_CTRL_COUNT] = {
},
};
-int bm2835_mmal_set_all_camera_controls(struct bm2835_mmal_dev *dev)
+int bcm2835_mmal_set_all_camera_controls(struct bcm2835_mmal_dev *dev)
{
int c;
int ret = 0;
@@ -1273,7 +1266,7 @@ int bm2835_mmal_set_all_camera_controls(struct bm2835_mmal_dev *dev)
return ret;
}
-int set_framerate_params(struct bm2835_mmal_dev *dev)
+int set_framerate_params(struct bcm2835_mmal_dev *dev)
{
struct mmal_parameter_fps_range fps_range;
int ret;
@@ -1318,11 +1311,10 @@ int set_framerate_params(struct bm2835_mmal_dev *dev)
return ret;
}
-int bm2835_mmal_init_controls(struct bm2835_mmal_dev *dev,
- struct v4l2_ctrl_handler *hdl)
+int bcm2835_mmal_init_controls(struct bcm2835_mmal_dev *dev, struct v4l2_ctrl_handler *hdl)
{
int c;
- const struct bm2835_mmal_v4l2_ctrl *ctrl;
+ const struct bcm2835_mmal_v4l2_ctrl *ctrl;
v4l2_ctrl_handler_init(hdl, V4L2_CTRL_COUNT);
@@ -1331,12 +1323,9 @@ int bm2835_mmal_init_controls(struct bm2835_mmal_dev *dev,
switch (ctrl->type) {
case MMAL_CONTROL_TYPE_STD:
- dev->ctrls[c] =
- v4l2_ctrl_new_std(hdl,
- &bm2835_mmal_ctrl_ops,
- ctrl->id, ctrl->min,
- ctrl->max, ctrl->step,
- ctrl->def);
+ dev->ctrls[c] = v4l2_ctrl_new_std(hdl, &bcm2835_mmal_ctrl_ops,
+ ctrl->id, ctrl->min, ctrl->max,
+ ctrl->step, ctrl->def);
break;
case MMAL_CONTROL_TYPE_STD_MENU:
@@ -1360,20 +1349,16 @@ int bm2835_mmal_init_controls(struct bm2835_mmal_dev *dev,
mask = ~mask;
}
- dev->ctrls[c] =
- v4l2_ctrl_new_std_menu(hdl,
- &bm2835_mmal_ctrl_ops,
- ctrl->id, ctrl->max,
- mask, ctrl->def);
+ dev->ctrls[c] = v4l2_ctrl_new_std_menu(hdl, &bcm2835_mmal_ctrl_ops,
+ ctrl->id, ctrl->max, mask,
+ ctrl->def);
break;
}
case MMAL_CONTROL_TYPE_INT_MENU:
- dev->ctrls[c] =
- v4l2_ctrl_new_int_menu(hdl,
- &bm2835_mmal_ctrl_ops,
- ctrl->id, ctrl->max,
- ctrl->def, ctrl->imenu);
+ dev->ctrls[c] = v4l2_ctrl_new_int_menu(hdl, &bcm2835_mmal_ctrl_ops,
+ ctrl->id, ctrl->max,
+ ctrl->def, ctrl->imenu);
break;
case MMAL_CONTROL_TYPE_CLUSTER:
diff --git a/drivers/staging/vc04_services/interface/TODO b/drivers/staging/vc04_services/interface/TODO
index 39810ce017cd..241ca004735c 100644
--- a/drivers/staging/vc04_services/interface/TODO
+++ b/drivers/staging/vc04_services/interface/TODO
@@ -80,11 +80,7 @@ vchiq-core.ko and vchiq-dev.ko. This would also ease the upstreaming process.
The code in vchiq_bcm2835_arm.c should fit in the generic platform file.
-12) Get rid of all the struct typedefs
-
-Most structs are typedefd, it's not encouraged in the kernel.
-
-13) Get rid of all non essential global structures and create a proper per
+11) Get rid of all non essential global structures and create a proper per
device structure
The first thing one generally sees in a probe function is a memory allocation
@@ -92,6 +88,6 @@ for all the device specific data. This structure is then passed all over the
driver. This is good practice since it makes the driver work regardless of the
number of devices probed.
-14) Clean up Sparse warnings from __user annotations. See
+12) Clean up Sparse warnings from __user annotations. See
vchiq_irq_queue_bulk_tx_rx(). Ensure that the address of "&waiter->bulk_waiter"
is never disclosed to userspace.
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
index c650a32bcedf..3a2e4582db8e 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
@@ -1058,15 +1058,27 @@ service_callback(enum vchiq_reason reason, struct vchiq_header *header,
DEBUG_TRACE(SERVICE_CALLBACK_LINE);
+ rcu_read_lock();
service = handle_to_service(handle);
- if (WARN_ON(!service))
+ if (WARN_ON(!service)) {
+ rcu_read_unlock();
return VCHIQ_SUCCESS;
+ }
user_service = (struct user_service *)service->base.userdata;
instance = user_service->instance;
- if (!instance || instance->closing)
+ if (!instance || instance->closing) {
+ rcu_read_unlock();
return VCHIQ_SUCCESS;
+ }
+
+ /*
+ * As hopping around different synchronization mechanism,
+ * taking an extra reference results in simpler implementation.
+ */
+ vchiq_service_get(service);
+ rcu_read_unlock();
vchiq_log_trace(vchiq_arm_log_level,
"%s - service %lx(%d,%p), reason %d, header %lx, instance %lx, bulk_userdata %lx",
@@ -1097,6 +1109,7 @@ service_callback(enum vchiq_reason reason, struct vchiq_header *header,
bulk_userdata);
if (status != VCHIQ_SUCCESS) {
DEBUG_TRACE(SERVICE_CALLBACK_LINE);
+ vchiq_service_put(service);
return status;
}
}
@@ -1105,10 +1118,12 @@ service_callback(enum vchiq_reason reason, struct vchiq_header *header,
if (wait_for_completion_interruptible(&user_service->remove_event)) {
vchiq_log_info(vchiq_arm_log_level, "%s interrupted", __func__);
DEBUG_TRACE(SERVICE_CALLBACK_LINE);
+ vchiq_service_put(service);
return VCHIQ_RETRY;
} else if (instance->closing) {
vchiq_log_info(vchiq_arm_log_level, "%s closing", __func__);
DEBUG_TRACE(SERVICE_CALLBACK_LINE);
+ vchiq_service_put(service);
return VCHIQ_ERROR;
}
DEBUG_TRACE(SERVICE_CALLBACK_LINE);
@@ -1137,6 +1152,7 @@ service_callback(enum vchiq_reason reason, struct vchiq_header *header,
header = NULL;
}
DEBUG_TRACE(SERVICE_CALLBACK_LINE);
+ vchiq_service_put(service);
if (skip_completion)
return VCHIQ_SUCCESS;
@@ -1661,7 +1677,7 @@ vchiq_dump_service_use_state(struct vchiq_state *state)
service_data[i].clientid, service_data[i].use_count,
service_data[i].use_count ? nz : "");
}
- vchiq_log_warning(vchiq_susp_log_level, "----- VCHIQ use count count %d", peer_count);
+ vchiq_log_warning(vchiq_susp_log_level, "----- VCHIQ use count %d", peer_count);
vchiq_log_warning(vchiq_susp_log_level, "--- Overall vchiq instance use count %d",
vc_use_count);
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
index 53a98949b294..55abaf02a196 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
@@ -74,8 +74,6 @@
((fourcc) >> 8) & 0xff, \
(fourcc) & 0xff
-static_assert((sizeof(u32) * 8) == 32);
-
#define BITSET_SIZE(b) ((b + 31) >> 5)
#define BITSET_WORD(b) (b >> 5)
#define BITSET_BIT(b) (1 << (b & 31))
diff --git a/drivers/staging/vc04_services/vchiq-mmal/mmal-common.h b/drivers/staging/vc04_services/vchiq-mmal/mmal-common.h
index 5bd7410a034a..b33129403a30 100644
--- a/drivers/staging/vc04_services/vchiq-mmal/mmal-common.h
+++ b/drivers/staging/vc04_services/vchiq-mmal/mmal-common.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Broadcom BM2835 V4L2 driver
+ * Broadcom BCM2835 V4L2 driver
*
* Copyright © 2013 Raspberry Pi (Trading) Ltd.
*
diff --git a/drivers/staging/vc04_services/vchiq-mmal/mmal-encodings.h b/drivers/staging/vc04_services/vchiq-mmal/mmal-encodings.h
index 2be9941a1f30..e15ae7b24f73 100644
--- a/drivers/staging/vc04_services/vchiq-mmal/mmal-encodings.h
+++ b/drivers/staging/vc04_services/vchiq-mmal/mmal-encodings.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Broadcom BM2835 V4L2 driver
+ * Broadcom BCM2835 V4L2 driver
*
* Copyright © 2013 Raspberry Pi (Trading) Ltd.
*
diff --git a/drivers/staging/vc04_services/vchiq-mmal/mmal-msg-common.h b/drivers/staging/vc04_services/vchiq-mmal/mmal-msg-common.h
index 342c9b670f7e..d77e15f25dda 100644
--- a/drivers/staging/vc04_services/vchiq-mmal/mmal-msg-common.h
+++ b/drivers/staging/vc04_services/vchiq-mmal/mmal-msg-common.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Broadcom BM2835 V4L2 driver
+ * Broadcom BCM2835 V4L2 driver
*
* Copyright © 2013 Raspberry Pi (Trading) Ltd.
*
diff --git a/drivers/staging/vc04_services/vchiq-mmal/mmal-msg-format.h b/drivers/staging/vc04_services/vchiq-mmal/mmal-msg-format.h
index a118efd21d98..1e996d8cd283 100644
--- a/drivers/staging/vc04_services/vchiq-mmal/mmal-msg-format.h
+++ b/drivers/staging/vc04_services/vchiq-mmal/mmal-msg-format.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Broadcom BM2835 V4L2 driver
+ * Broadcom BCM2835 V4L2 driver
*
* Copyright © 2013 Raspberry Pi (Trading) Ltd.
*
diff --git a/drivers/staging/vc04_services/vchiq-mmal/mmal-msg-port.h b/drivers/staging/vc04_services/vchiq-mmal/mmal-msg-port.h
index 3fa3f2a578f0..6ee4c1ed7f19 100644
--- a/drivers/staging/vc04_services/vchiq-mmal/mmal-msg-port.h
+++ b/drivers/staging/vc04_services/vchiq-mmal/mmal-msg-port.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Broadcom BM2835 V4L2 driver
+ * Broadcom BCM2835 V4L2 driver
*
* Copyright © 2013 Raspberry Pi (Trading) Ltd.
*
diff --git a/drivers/staging/vc04_services/vchiq-mmal/mmal-msg.h b/drivers/staging/vc04_services/vchiq-mmal/mmal-msg.h
index b636e889c8a1..471413248a14 100644
--- a/drivers/staging/vc04_services/vchiq-mmal/mmal-msg.h
+++ b/drivers/staging/vc04_services/vchiq-mmal/mmal-msg.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Broadcom BM2835 V4L2 driver
+ * Broadcom BCM2835 V4L2 driver
*
* Copyright © 2013 Raspberry Pi (Trading) Ltd.
*
diff --git a/drivers/staging/vc04_services/vchiq-mmal/mmal-parameters.h b/drivers/staging/vc04_services/vchiq-mmal/mmal-parameters.h
index a1e39b1b1701..2277e05b1e31 100644
--- a/drivers/staging/vc04_services/vchiq-mmal/mmal-parameters.h
+++ b/drivers/staging/vc04_services/vchiq-mmal/mmal-parameters.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Broadcom BM2835 V4L2 driver
+ * Broadcom BCM2835 V4L2 driver
*
* Copyright © 2013 Raspberry Pi (Trading) Ltd.
*
diff --git a/drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c b/drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c
index 76d3f0399964..cb6cdbfaf6ec 100644
--- a/drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c
+++ b/drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Broadcom BM2835 V4L2 driver
+ * Broadcom BCM2835 V4L2 driver
*
* Copyright © 2013 Raspberry Pi (Trading) Ltd.
*
diff --git a/drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.h b/drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.h
index 1dc81ecf9268..6006e29232b3 100644
--- a/drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.h
+++ b/drivers/staging/vc04_services/vchiq-mmal/mmal-vchiq.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Broadcom BM2835 V4L2 driver
+ * Broadcom BCM2835 V4L2 driver
*
* Copyright © 2013 Raspberry Pi (Trading) Ltd.
*
diff --git a/drivers/staging/vt6655/card.c b/drivers/staging/vt6655/card.c
index 26e08fec6e6a..ee2d145778ed 100644
--- a/drivers/staging/vt6655/card.c
+++ b/drivers/staging/vt6655/card.c
@@ -183,7 +183,7 @@ bool CARDbSetPhyParameter(struct vnt_private *priv, u8 bb_type)
unsigned char bySlot = 0;
unsigned char bySIFS = 0;
unsigned char byDIFS = 0;
- unsigned char byData;
+ unsigned char data;
int i;
/* Set SIFS, DIFS, EIFS, SlotTime, CwMin */
@@ -194,15 +194,15 @@ bool CARDbSetPhyParameter(struct vnt_private *priv, u8 bb_type)
priv->abyBBVGA[0] = 0x20;
priv->abyBBVGA[2] = 0x10;
priv->abyBBVGA[3] = 0x10;
- bb_read_embedded(priv, 0xE7, &byData);
- if (byData == 0x1C)
+ bb_read_embedded(priv, 0xE7, &data);
+ if (data == 0x1C)
bb_write_embedded(priv, 0xE7, priv->abyBBVGA[0]);
} else if (priv->byRFType == RF_UW2452) {
MACvSetBBType(priv->port_offset, BB_TYPE_11A);
priv->abyBBVGA[0] = 0x18;
- bb_read_embedded(priv, 0xE7, &byData);
- if (byData == 0x14) {
+ bb_read_embedded(priv, 0xE7, &data);
+ if (data == 0x14) {
bb_write_embedded(priv, 0xE7, priv->abyBBVGA[0]);
bb_write_embedded(priv, 0xE1, 0x57);
}
@@ -220,14 +220,14 @@ bool CARDbSetPhyParameter(struct vnt_private *priv, u8 bb_type)
priv->abyBBVGA[0] = 0x1C;
priv->abyBBVGA[2] = 0x00;
priv->abyBBVGA[3] = 0x00;
- bb_read_embedded(priv, 0xE7, &byData);
- if (byData == 0x20)
+ bb_read_embedded(priv, 0xE7, &data);
+ if (data == 0x20)
bb_write_embedded(priv, 0xE7, priv->abyBBVGA[0]);
} else if (priv->byRFType == RF_UW2452) {
priv->abyBBVGA[0] = 0x14;
- bb_read_embedded(priv, 0xE7, &byData);
- if (byData == 0x18) {
+ bb_read_embedded(priv, 0xE7, &data);
+ if (data == 0x18) {
bb_write_embedded(priv, 0xE7, priv->abyBBVGA[0]);
bb_write_embedded(priv, 0xE1, 0xD3);
}
@@ -243,14 +243,14 @@ bool CARDbSetPhyParameter(struct vnt_private *priv, u8 bb_type)
priv->abyBBVGA[0] = 0x1C;
priv->abyBBVGA[2] = 0x00;
priv->abyBBVGA[3] = 0x00;
- bb_read_embedded(priv, 0xE7, &byData);
- if (byData == 0x20)
+ bb_read_embedded(priv, 0xE7, &data);
+ if (data == 0x20)
bb_write_embedded(priv, 0xE7, priv->abyBBVGA[0]);
} else if (priv->byRFType == RF_UW2452) {
priv->abyBBVGA[0] = 0x14;
- bb_read_embedded(priv, 0xE7, &byData);
- if (byData == 0x18) {
+ bb_read_embedded(priv, 0xE7, &data);
+ if (data == 0x18) {
bb_write_embedded(priv, 0xE7, priv->abyBBVGA[0]);
bb_write_embedded(priv, 0xE1, 0xD3);
}
@@ -404,7 +404,7 @@ bool CARDbSetBeaconPeriod(struct vnt_private *priv,
*/
void CARDbRadioPowerOff(struct vnt_private *priv)
{
- if (priv->bRadioOff)
+ if (priv->radio_off)
return;
switch (priv->byRFType) {
@@ -429,7 +429,7 @@ void CARDbRadioPowerOff(struct vnt_private *priv)
bb_set_deep_sleep(priv, priv->local_id);
- priv->bRadioOff = true;
+ priv->radio_off = true;
pr_debug("chester power off\n");
MACvRegBitsOn(priv->port_offset, MAC_REG_GPIOCTL0,
LED_ACTSET); /* LED issue */
@@ -798,12 +798,12 @@ bool CARDbGetCurrentTSF(struct vnt_private *priv, u64 *pqwCurrTSF)
{
void __iomem *iobase = priv->port_offset;
unsigned short ww;
- unsigned char byData;
+ unsigned char data;
MACvRegBitsOn(iobase, MAC_REG_TFTCTL, TFTCTL_TSFCNTRRD);
for (ww = 0; ww < W_MAX_TIMEOUT; ww++) {
- VNSvInPortB(iobase + MAC_REG_TFTCTL, &byData);
- if (!(byData & TFTCTL_TSFCNTRRD))
+ VNSvInPortB(iobase + MAC_REG_TFTCTL, &data);
+ if (!(data & TFTCTL_TSFCNTRRD))
break;
}
if (ww == W_MAX_TIMEOUT)
diff --git a/drivers/staging/vt6655/channel.c b/drivers/staging/vt6655/channel.c
index b550a1a0844e..e37c8e35a45b 100644
--- a/drivers/staging/vt6655/channel.c
+++ b/drivers/staging/vt6655/channel.c
@@ -189,7 +189,7 @@ bool set_channel(struct vnt_private *priv, struct ieee80211_channel *ch)
/* Init Synthesizer Table */
if (priv->bEnablePSMode)
- RFvWriteWakeProgSyn(priv, priv->byRFType, ch->hw_value);
+ rf_write_wake_prog_syn(priv, priv->byRFType, ch->hw_value);
bb_software_reset(priv);
diff --git a/drivers/staging/vt6655/device.h b/drivers/staging/vt6655/device.h
index 4706bde1ec1d..c272a4ab2fa0 100644
--- a/drivers/staging/vt6655/device.h
+++ b/drivers/staging/vt6655/device.h
@@ -128,8 +128,6 @@ struct vnt_private {
u32 memaddr;
u32 ioaddr;
- unsigned char byRxMode;
-
spinlock_t lock;
volatile int iTDUsed[TYPE_MAXTD];
@@ -157,7 +155,7 @@ struct vnt_private {
unsigned char local_id;
unsigned char byRFType;
- unsigned char byMaxPwrLevel;
+ unsigned char max_pwr_level;
unsigned char byZoneType;
bool bZoneRegExist;
unsigned char byOriginalZonetype;
@@ -165,7 +163,7 @@ struct vnt_private {
unsigned char abyCurrentNetAddr[ETH_ALEN]; __aligned(2)
bool bLinkPass; /* link status: OK or fail */
- unsigned int uCurrRSSI;
+ unsigned int current_rssi;
unsigned char byCurrSQ;
unsigned long dwTxAntennaSel;
@@ -221,7 +219,7 @@ struct vnt_private {
bool bBarkerPreambleMd;
bool bRadioControlOff;
- bool bRadioOff;
+ bool radio_off;
bool bEnablePSMode;
unsigned short wListenInterval;
bool bPWBitOn;
@@ -229,7 +227,7 @@ struct vnt_private {
/* GPIO Radio Control */
unsigned char byRadioCtl;
unsigned char byGPIO;
- bool bHWRadioOff;
+ bool hw_radio_off;
bool bPrvActive4RadioOFF;
bool bGPIOBlockRead;
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index 212d2a287b2c..897d70cf32b8 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -369,11 +369,11 @@ static void device_init_registers(struct vnt_private *priv)
/* Set Short Slot Time, xIFS, and RSPINF. */
priv->wCurrentRate = RATE_54M;
- priv->bRadioOff = false;
+ priv->radio_off = false;
priv->byRadioCtl = SROMbyReadEmbedded(priv->port_offset,
EEP_OFS_RADIOCTL);
- priv->bHWRadioOff = false;
+ priv->hw_radio_off = false;
if (priv->byRadioCtl & EEP_RADIOCTL_ENABLE) {
/* Get GPIO */
@@ -383,10 +383,10 @@ static void device_init_registers(struct vnt_private *priv)
!(priv->byRadioCtl & EEP_RADIOCTL_INV)) ||
(!(priv->byGPIO & GPIO0_DATA) &&
(priv->byRadioCtl & EEP_RADIOCTL_INV)))
- priv->bHWRadioOff = true;
+ priv->hw_radio_off = true;
}
- if (priv->bHWRadioOff || priv->bRadioControlOff)
+ if (priv->hw_radio_off || priv->bRadioControlOff)
CARDbRadioPowerOff(priv);
/* get Permanent network address */
@@ -980,10 +980,10 @@ static void vnt_check_bb_vga(struct vnt_private *priv)
if (priv->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
return;
- if (!(priv->vif->bss_conf.assoc && priv->uCurrRSSI))
+ if (!(priv->vif->bss_conf.assoc && priv->current_rssi))
return;
- RFvRSSITodBm(priv, (u8)priv->uCurrRSSI, &dbm);
+ RFvRSSITodBm(priv, (u8)priv->current_rssi, &dbm);
for (i = 0; i < BB_VGA_LEVEL; i++) {
if (dbm < priv->dbm_threshold[i]) {
diff --git a/drivers/staging/vt6655/dpc.c b/drivers/staging/vt6655/dpc.c
index a7d1d35de5d4..c6ed3537f439 100644
--- a/drivers/staging/vt6655/dpc.c
+++ b/drivers/staging/vt6655/dpc.c
@@ -80,7 +80,7 @@ static bool vnt_rx_data(struct vnt_private *priv, struct sk_buff *skb,
RFvRSSITodBm(priv, *rssi, &rx_dbm);
priv->byBBPreEDRSSI = (u8)rx_dbm + 1;
- priv->uCurrRSSI = *rssi;
+ priv->current_rssi = *rssi;
skb_pull(skb, 4);
skb_trim(skb, frame_size);
diff --git a/drivers/staging/vt6655/rf.c b/drivers/staging/vt6655/rf.c
index bc4abe77db7b..ba7056f5a5da 100644
--- a/drivers/staging/vt6655/rf.c
+++ b/drivers/staging/vt6655/rf.c
@@ -609,11 +609,11 @@ bool RFbInit(struct vnt_private *priv)
switch (priv->byRFType) {
case RF_AIROHA:
case RF_AL2230S:
- priv->byMaxPwrLevel = AL2230_PWR_IDX_LEN;
+ priv->max_pwr_level = AL2230_PWR_IDX_LEN;
ret = RFbAL2230Init(priv);
break;
case RF_AIROHA7230:
- priv->byMaxPwrLevel = AL7230_PWR_IDX_LEN;
+ priv->max_pwr_level = AL7230_PWR_IDX_LEN;
ret = s_bAL7230Init(priv);
break;
case RF_NOTHING:
@@ -669,20 +669,22 @@ bool RFbSelectChannel(struct vnt_private *priv, unsigned char byRFType,
*
* Parameters:
* In:
- * iobase - I/O base address
- * channel - channel number
- * bySleepCnt - SleepProgSyn count
+ * priv - Device Structure
+ * rf_type - RF type
+ * channel - Channel number
*
- * Return Value: None.
+ * Return Value: true if succeeded; false if failed.
*
*/
-bool RFvWriteWakeProgSyn(struct vnt_private *priv, unsigned char rf_type,
- u16 channel)
+bool rf_write_wake_prog_syn(struct vnt_private *priv, unsigned char rf_type,
+ u16 channel)
{
void __iomem *iobase = priv->port_offset;
int i;
unsigned char init_count = 0;
unsigned char sleep_count = 0;
+ unsigned short idx = MISCFIFO_SYNDATA_IDX;
+ const unsigned long *init_table;
VNSvOutPortW(iobase + MAC_REG_MISCFFNDEX, 0);
switch (rf_type) {
@@ -695,15 +697,12 @@ bool RFvWriteWakeProgSyn(struct vnt_private *priv, unsigned char rf_type,
/* Init Reg + Channel Reg (2) */
init_count = CB_AL2230_INIT_SEQ + 2;
sleep_count = 0;
- if (init_count > (MISCFIFO_SYNDATASIZE - sleep_count))
- return false;
for (i = 0; i < CB_AL2230_INIT_SEQ; i++)
- MACvSetMISCFifo(priv, (unsigned short)(MISCFIFO_SYNDATA_IDX + i), al2230_init_table[i]);
+ MACvSetMISCFifo(priv, idx++, al2230_init_table[i]);
- MACvSetMISCFifo(priv, (unsigned short)(MISCFIFO_SYNDATA_IDX + i), al2230_channel_table0[channel - 1]);
- i++;
- MACvSetMISCFifo(priv, (unsigned short)(MISCFIFO_SYNDATA_IDX + i), al2230_channel_table1[channel - 1]);
+ MACvSetMISCFifo(priv, idx++, al2230_channel_table0[channel - 1]);
+ MACvSetMISCFifo(priv, idx++, al2230_channel_table1[channel - 1]);
break;
/* Need to check, PLLON need to be low for channel setting */
@@ -711,22 +710,15 @@ bool RFvWriteWakeProgSyn(struct vnt_private *priv, unsigned char rf_type,
/* Init Reg + Channel Reg (3) */
init_count = CB_AL7230_INIT_SEQ + 3;
sleep_count = 0;
- if (init_count > (MISCFIFO_SYNDATASIZE - sleep_count))
- return false;
- if (channel <= CB_MAX_CHANNEL_24G) {
- for (i = 0; i < CB_AL7230_INIT_SEQ; i++)
- MACvSetMISCFifo(priv, (unsigned short)(MISCFIFO_SYNDATA_IDX + i), al7230_init_table[i]);
- } else {
- for (i = 0; i < CB_AL7230_INIT_SEQ; i++)
- MACvSetMISCFifo(priv, (unsigned short)(MISCFIFO_SYNDATA_IDX + i), al7230_init_table_a_mode[i]);
- }
+ init_table = (channel <= CB_MAX_CHANNEL_24G) ?
+ al7230_init_table : al7230_init_table_a_mode;
+ for (i = 0; i < CB_AL7230_INIT_SEQ; i++)
+ MACvSetMISCFifo(priv, idx++, init_table[i]);
- MACvSetMISCFifo(priv, (unsigned short)(MISCFIFO_SYNDATA_IDX + i), al7230_channel_table0[channel - 1]);
- i++;
- MACvSetMISCFifo(priv, (unsigned short)(MISCFIFO_SYNDATA_IDX + i), al7230_channel_table1[channel - 1]);
- i++;
- MACvSetMISCFifo(priv, (unsigned short)(MISCFIFO_SYNDATA_IDX + i), al7230_channel_table2[channel - 1]);
+ MACvSetMISCFifo(priv, idx++, al7230_channel_table0[channel - 1]);
+ MACvSetMISCFifo(priv, idx++, al7230_channel_table1[channel - 1]);
+ MACvSetMISCFifo(priv, idx++, al7230_channel_table2[channel - 1]);
break;
case RF_NOTHING:
@@ -786,8 +778,8 @@ bool RFbSetPower(struct vnt_private *priv, unsigned int rate, u16 uCH)
else
byDec = byPwr + 10;
- if (byDec >= priv->byMaxPwrLevel)
- byDec = priv->byMaxPwrLevel - 1;
+ if (byDec >= priv->max_pwr_level)
+ byDec = priv->max_pwr_level - 1;
byPwr = byDec;
break;
@@ -829,7 +821,7 @@ bool RFbRawSetPower(struct vnt_private *priv, unsigned char byPwr,
bool ret = true;
unsigned long dwMax7230Pwr = 0;
- if (byPwr >= priv->byMaxPwrLevel)
+ if (byPwr >= priv->max_pwr_level)
return false;
switch (priv->byRFType) {
diff --git a/drivers/staging/vt6655/rf.h b/drivers/staging/vt6655/rf.h
index 0939937d47a8..9fef81846a9f 100644
--- a/drivers/staging/vt6655/rf.h
+++ b/drivers/staging/vt6655/rf.h
@@ -60,7 +60,7 @@
bool IFRFbWriteEmbedded(struct vnt_private *priv, unsigned long dwData);
bool RFbSelectChannel(struct vnt_private *priv, unsigned char byRFType, u16 byChannel);
bool RFbInit(struct vnt_private *priv);
-bool RFvWriteWakeProgSyn(struct vnt_private *priv, unsigned char rf_type, u16 channel);
+bool rf_write_wake_prog_syn(struct vnt_private *priv, unsigned char rf_type, u16 channel);
bool RFbSetPower(struct vnt_private *priv, unsigned int rate, u16 uCH);
bool RFbRawSetPower(struct vnt_private *priv, unsigned char byPwr,
unsigned int rate);
diff --git a/drivers/staging/wlan-ng/prism2mib.c b/drivers/staging/wlan-ng/prism2mib.c
index 24ba10d6bd0b..fcf8313870af 100644
--- a/drivers/staging/wlan-ng/prism2mib.c
+++ b/drivers/staging/wlan-ng/prism2mib.c
@@ -679,16 +679,8 @@ static int prism2mib_priv(struct mibrec *mib,
HFA384x_RID_CNFWPADATA,
(u8 *)&wpa,
sizeof(wpa));
- /*
- pstr->len = le16_to_cpu(wpa.datalen);
- memcpy(pstr->data, wpa.data, pstr->len);
- */
pstr->len = 0;
} else {
- /*
- wpa.datalen = cpu_to_le16(pstr->len);
- memcpy(wpa.data, pstr->data, pstr->len);
- */
wpa.datalen = 0;
hfa384x_drvr_setconfig(hw,
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
index 8075f60fd02c..2d5cf1714ae0 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.c
+++ b/drivers/target/iscsi/iscsi_target_tpg.c
@@ -443,6 +443,9 @@ static bool iscsit_tpg_check_network_portal(
break;
}
spin_unlock(&tpg->tpg_np_lock);
+
+ if (match)
+ break;
}
spin_unlock(&tiqn->tiqn_tpg_lock);
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 7fa57fb57bf2..807d06ecadee 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -1005,7 +1005,7 @@ pscsi_execute_cmd(struct se_cmd *cmd)
req->timeout = PS_TIMEOUT_OTHER;
scsi_req(req)->retries = PS_RETRY;
- blk_execute_rq_nowait(NULL, req, (cmd->sam_task_attr == TCM_HEAD_TAG),
+ blk_execute_rq_nowait(req, cmd->sam_task_attr == TCM_HEAD_TAG,
pscsi_req_done);
return 0;
diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c
index 1ca320885fad..17a6f51d3089 100644
--- a/drivers/tee/optee/core.c
+++ b/drivers/tee/optee/core.c
@@ -158,6 +158,7 @@ void optee_remove_common(struct optee *optee)
optee_unregister_devices();
optee_notif_uninit(optee);
+ teedev_close_context(optee->ctx);
/*
* The two devices have to be unregistered before we can free the
* other resources.
diff --git a/drivers/tee/optee/ffa_abi.c b/drivers/tee/optee/ffa_abi.c
index 20a1b1a3d965..f2bf6c61197f 100644
--- a/drivers/tee/optee/ffa_abi.c
+++ b/drivers/tee/optee/ffa_abi.c
@@ -424,6 +424,7 @@ static struct tee_shm_pool_mgr *optee_ffa_shm_pool_alloc_pages(void)
*/
static void handle_ffa_rpc_func_cmd_shm_alloc(struct tee_context *ctx,
+ struct optee *optee,
struct optee_msg_arg *arg)
{
struct tee_shm *shm;
@@ -439,7 +440,7 @@ static void handle_ffa_rpc_func_cmd_shm_alloc(struct tee_context *ctx,
shm = optee_rpc_cmd_alloc_suppl(ctx, arg->params[0].u.value.b);
break;
case OPTEE_RPC_SHM_TYPE_KERNEL:
- shm = tee_shm_alloc(ctx, arg->params[0].u.value.b,
+ shm = tee_shm_alloc(optee->ctx, arg->params[0].u.value.b,
TEE_SHM_MAPPED | TEE_SHM_PRIV);
break;
default:
@@ -493,14 +494,13 @@ err_bad_param:
}
static void handle_ffa_rpc_func_cmd(struct tee_context *ctx,
+ struct optee *optee,
struct optee_msg_arg *arg)
{
- struct optee *optee = tee_get_drvdata(ctx->teedev);
-
arg->ret_origin = TEEC_ORIGIN_COMMS;
switch (arg->cmd) {
case OPTEE_RPC_CMD_SHM_ALLOC:
- handle_ffa_rpc_func_cmd_shm_alloc(ctx, arg);
+ handle_ffa_rpc_func_cmd_shm_alloc(ctx, optee, arg);
break;
case OPTEE_RPC_CMD_SHM_FREE:
handle_ffa_rpc_func_cmd_shm_free(ctx, optee, arg);
@@ -510,12 +510,12 @@ static void handle_ffa_rpc_func_cmd(struct tee_context *ctx,
}
}
-static void optee_handle_ffa_rpc(struct tee_context *ctx, u32 cmd,
- struct optee_msg_arg *arg)
+static void optee_handle_ffa_rpc(struct tee_context *ctx, struct optee *optee,
+ u32 cmd, struct optee_msg_arg *arg)
{
switch (cmd) {
case OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD:
- handle_ffa_rpc_func_cmd(ctx, arg);
+ handle_ffa_rpc_func_cmd(ctx, optee, arg);
break;
case OPTEE_FFA_YIELDING_CALL_RETURN_INTERRUPT:
/* Interrupt delivered by now */
@@ -582,7 +582,7 @@ static int optee_ffa_yielding_call(struct tee_context *ctx,
* above.
*/
cond_resched();
- optee_handle_ffa_rpc(ctx, data->data1, rpc_arg);
+ optee_handle_ffa_rpc(ctx, optee, data->data1, rpc_arg);
cmd = OPTEE_FFA_YIELDING_CALL_RESUME;
data->data0 = cmd;
data->data1 = 0;
@@ -619,9 +619,18 @@ static int optee_ffa_do_call_with_arg(struct tee_context *ctx,
.data2 = (u32)(shm->sec_world_id >> 32),
.data3 = shm->offset,
};
- struct optee_msg_arg *arg = tee_shm_get_va(shm, 0);
- unsigned int rpc_arg_offs = OPTEE_MSG_GET_ARG_SIZE(arg->num_params);
- struct optee_msg_arg *rpc_arg = tee_shm_get_va(shm, rpc_arg_offs);
+ struct optee_msg_arg *arg;
+ unsigned int rpc_arg_offs;
+ struct optee_msg_arg *rpc_arg;
+
+ arg = tee_shm_get_va(shm, 0);
+ if (IS_ERR(arg))
+ return PTR_ERR(arg);
+
+ rpc_arg_offs = OPTEE_MSG_GET_ARG_SIZE(arg->num_params);
+ rpc_arg = tee_shm_get_va(shm, rpc_arg_offs);
+ if (IS_ERR(rpc_arg))
+ return PTR_ERR(rpc_arg);
return optee_ffa_yielding_call(ctx, &data, rpc_arg);
}
@@ -793,7 +802,9 @@ static int optee_ffa_probe(struct ffa_device *ffa_dev)
{
const struct ffa_dev_ops *ffa_ops;
unsigned int rpc_arg_count;
+ struct tee_shm_pool *pool;
struct tee_device *teedev;
+ struct tee_context *ctx;
struct optee *optee;
int rc;
@@ -813,12 +824,12 @@ static int optee_ffa_probe(struct ffa_device *ffa_dev)
if (!optee)
return -ENOMEM;
- optee->pool = optee_ffa_config_dyn_shm();
- if (IS_ERR(optee->pool)) {
- rc = PTR_ERR(optee->pool);
- optee->pool = NULL;
- goto err;
+ pool = optee_ffa_config_dyn_shm();
+ if (IS_ERR(pool)) {
+ rc = PTR_ERR(pool);
+ goto err_free_optee;
}
+ optee->pool = pool;
optee->ops = &optee_ffa_ops;
optee->ffa.ffa_dev = ffa_dev;
@@ -829,7 +840,7 @@ static int optee_ffa_probe(struct ffa_device *ffa_dev)
optee);
if (IS_ERR(teedev)) {
rc = PTR_ERR(teedev);
- goto err;
+ goto err_free_pool;
}
optee->teedev = teedev;
@@ -837,50 +848,57 @@ static int optee_ffa_probe(struct ffa_device *ffa_dev)
optee);
if (IS_ERR(teedev)) {
rc = PTR_ERR(teedev);
- goto err;
+ goto err_unreg_teedev;
}
optee->supp_teedev = teedev;
rc = tee_device_register(optee->teedev);
if (rc)
- goto err;
+ goto err_unreg_supp_teedev;
rc = tee_device_register(optee->supp_teedev);
if (rc)
- goto err;
+ goto err_unreg_supp_teedev;
rc = rhashtable_init(&optee->ffa.global_ids, &shm_rhash_params);
if (rc)
- goto err;
+ goto err_unreg_supp_teedev;
mutex_init(&optee->ffa.mutex);
mutex_init(&optee->call_queue.mutex);
INIT_LIST_HEAD(&optee->call_queue.waiters);
optee_supp_init(&optee->supp);
ffa_dev_set_drvdata(ffa_dev, optee);
+ ctx = teedev_open(optee->teedev);
+ if (IS_ERR(ctx))
+ goto err_rhashtable_free;
+ optee->ctx = ctx;
rc = optee_notif_init(optee, OPTEE_DEFAULT_MAX_NOTIF_VALUE);
- if (rc) {
- optee_ffa_remove(ffa_dev);
- return rc;
- }
+ if (rc)
+ goto err_close_ctx;
rc = optee_enumerate_devices(PTA_CMD_GET_DEVICES);
- if (rc) {
- optee_ffa_remove(ffa_dev);
- return rc;
- }
+ if (rc)
+ goto err_unregister_devices;
pr_info("initialized driver\n");
return 0;
-err:
- /*
- * tee_device_unregister() is safe to call even if the
- * devices hasn't been registered with
- * tee_device_register() yet.
- */
+
+err_unregister_devices:
+ optee_unregister_devices();
+ optee_notif_uninit(optee);
+err_close_ctx:
+ teedev_close_context(ctx);
+err_rhashtable_free:
+ rhashtable_free_and_destroy(&optee->ffa.global_ids, rh_free_fn, NULL);
+ optee_supp_uninit(&optee->supp);
+ mutex_destroy(&optee->call_queue.mutex);
+err_unreg_supp_teedev:
tee_device_unregister(optee->supp_teedev);
+err_unreg_teedev:
tee_device_unregister(optee->teedev);
- if (optee->pool)
- tee_shm_pool_free(optee->pool);
+err_free_pool:
+ tee_shm_pool_free(pool);
+err_free_optee:
kfree(optee);
return rc;
}
diff --git a/drivers/tee/optee/notif.c b/drivers/tee/optee/notif.c
index a28fa03dcd0e..05212842b0a5 100644
--- a/drivers/tee/optee/notif.c
+++ b/drivers/tee/optee/notif.c
@@ -121,5 +121,5 @@ int optee_notif_init(struct optee *optee, u_int max_key)
void optee_notif_uninit(struct optee *optee)
{
- kfree(optee->notif.bitmap);
+ bitmap_free(optee->notif.bitmap);
}
diff --git a/drivers/tee/optee/optee_private.h b/drivers/tee/optee/optee_private.h
index 46f74ab07c7e..92bc47bef95f 100644
--- a/drivers/tee/optee/optee_private.h
+++ b/drivers/tee/optee/optee_private.h
@@ -53,7 +53,6 @@ struct optee_call_queue {
struct optee_notif {
u_int max_key;
- struct tee_context *ctx;
/* Serializes access to the elements below in this struct */
spinlock_t lock;
struct list_head db;
@@ -134,9 +133,10 @@ struct optee_ops {
/**
* struct optee - main service struct
* @supp_teedev: supplicant device
+ * @teedev: client device
* @ops: internal callbacks for different ways to reach secure
* world
- * @teedev: client device
+ * @ctx: driver internal TEE context
* @smc: specific to SMC ABI
* @ffa: specific to FF-A ABI
* @call_queue: queue of threads waiting to call @invoke_fn
@@ -152,6 +152,7 @@ struct optee {
struct tee_device *supp_teedev;
struct tee_device *teedev;
const struct optee_ops *ops;
+ struct tee_context *ctx;
union {
struct optee_smc smc;
struct optee_ffa ffa;
diff --git a/drivers/tee/optee/smc_abi.c b/drivers/tee/optee/smc_abi.c
index 449d6a72d289..1a55339c7072 100644
--- a/drivers/tee/optee/smc_abi.c
+++ b/drivers/tee/optee/smc_abi.c
@@ -75,16 +75,6 @@ static int from_msg_param_tmp_mem(struct tee_param *p, u32 attr,
p->u.memref.shm_offs = mp->u.tmem.buf_ptr - pa;
p->u.memref.shm = shm;
- /* Check that the memref is covered by the shm object */
- if (p->u.memref.size) {
- size_t o = p->u.memref.shm_offs +
- p->u.memref.size - 1;
-
- rc = tee_shm_get_pa(shm, o, NULL);
- if (rc)
- return rc;
- }
-
return 0;
}
@@ -622,6 +612,7 @@ static void handle_rpc_func_cmd_shm_free(struct tee_context *ctx,
}
static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx,
+ struct optee *optee,
struct optee_msg_arg *arg,
struct optee_call_ctx *call_ctx)
{
@@ -651,7 +642,8 @@ static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx,
shm = optee_rpc_cmd_alloc_suppl(ctx, sz);
break;
case OPTEE_RPC_SHM_TYPE_KERNEL:
- shm = tee_shm_alloc(ctx, sz, TEE_SHM_MAPPED | TEE_SHM_PRIV);
+ shm = tee_shm_alloc(optee->ctx, sz,
+ TEE_SHM_MAPPED | TEE_SHM_PRIV);
break;
default:
arg->ret = TEEC_ERROR_BAD_PARAMETERS;
@@ -747,7 +739,7 @@ static void handle_rpc_func_cmd(struct tee_context *ctx, struct optee *optee,
switch (arg->cmd) {
case OPTEE_RPC_CMD_SHM_ALLOC:
free_pages_list(call_ctx);
- handle_rpc_func_cmd_shm_alloc(ctx, arg, call_ctx);
+ handle_rpc_func_cmd_shm_alloc(ctx, optee, arg, call_ctx);
break;
case OPTEE_RPC_CMD_SHM_FREE:
handle_rpc_func_cmd_shm_free(ctx, arg);
@@ -776,7 +768,7 @@ static void optee_handle_rpc(struct tee_context *ctx,
switch (OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0)) {
case OPTEE_SMC_RPC_FUNC_ALLOC:
- shm = tee_shm_alloc(ctx, param->a1,
+ shm = tee_shm_alloc(optee->ctx, param->a1,
TEE_SHM_MAPPED | TEE_SHM_PRIV);
if (!IS_ERR(shm) && !tee_shm_get_pa(shm, 0, &pa)) {
reg_pair_from_64(&param->a1, &param->a2, pa);
@@ -954,57 +946,34 @@ static irqreturn_t notif_irq_thread_fn(int irq, void *dev_id)
{
struct optee *optee = dev_id;
- optee_smc_do_bottom_half(optee->notif.ctx);
+ optee_smc_do_bottom_half(optee->ctx);
return IRQ_HANDLED;
}
static int optee_smc_notif_init_irq(struct optee *optee, u_int irq)
{
- struct tee_context *ctx;
int rc;
- ctx = teedev_open(optee->teedev);
- if (IS_ERR(ctx))
- return PTR_ERR(ctx);
-
- optee->notif.ctx = ctx;
rc = request_threaded_irq(irq, notif_irq_handler,
notif_irq_thread_fn,
0, "optee_notification", optee);
if (rc)
- goto err_close_ctx;
+ return rc;
optee->smc.notif_irq = irq;
return 0;
-
-err_close_ctx:
- teedev_close_context(optee->notif.ctx);
- optee->notif.ctx = NULL;
-
- return rc;
}
static void optee_smc_notif_uninit_irq(struct optee *optee)
{
- if (optee->notif.ctx) {
- optee_smc_stop_async_notif(optee->notif.ctx);
+ if (optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_ASYNC_NOTIF) {
+ optee_smc_stop_async_notif(optee->ctx);
if (optee->smc.notif_irq) {
free_irq(optee->smc.notif_irq, optee);
irq_dispose_mapping(optee->smc.notif_irq);
}
-
- /*
- * The thread normally working with optee->notif.ctx was
- * stopped with free_irq() above.
- *
- * Note we're not using teedev_close_context() or
- * tee_client_close_context() since we have already called
- * tee_device_put() while initializing to avoid a circular
- * reference counting.
- */
- teedev_close_context(optee->notif.ctx);
}
}
@@ -1366,6 +1335,7 @@ static int optee_probe(struct platform_device *pdev)
struct optee *optee = NULL;
void *memremaped_shm = NULL;
struct tee_device *teedev;
+ struct tee_context *ctx;
u32 max_notif_value;
u32 sec_caps;
int rc;
@@ -1446,9 +1416,13 @@ static int optee_probe(struct platform_device *pdev)
optee->pool = pool;
platform_set_drvdata(pdev, optee);
+ ctx = teedev_open(optee->teedev);
+ if (IS_ERR(ctx))
+ goto err_supp_uninit;
+ optee->ctx = ctx;
rc = optee_notif_init(optee, max_notif_value);
if (rc)
- goto err_supp_uninit;
+ goto err_close_ctx;
if (sec_caps & OPTEE_SMC_SEC_CAP_ASYNC_NOTIF) {
unsigned int irq;
@@ -1496,6 +1470,8 @@ err_disable_shm_cache:
optee_unregister_devices();
err_notif_uninit:
optee_notif_uninit(optee);
+err_close_ctx:
+ teedev_close_context(ctx);
err_supp_uninit:
optee_supp_uninit(&optee->supp);
mutex_destroy(&optee->call_queue.mutex);
diff --git a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
index 8502b7d8df89..72acb1f61849 100644
--- a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
+++ b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
@@ -596,6 +596,7 @@ static const struct acpi_device_id int3400_thermal_match[] = {
{"INT3400", 0},
{"INTC1040", 0},
{"INTC1041", 0},
+ {"INTC10A0", 0},
{}
};
diff --git a/drivers/thermal/intel/int340x_thermal/int3403_thermal.c b/drivers/thermal/intel/int340x_thermal/int3403_thermal.c
index c3c4c4d34542..07e25321dfe3 100644
--- a/drivers/thermal/intel/int340x_thermal/int3403_thermal.c
+++ b/drivers/thermal/intel/int340x_thermal/int3403_thermal.c
@@ -285,6 +285,7 @@ static const struct acpi_device_id int3403_device_ids[] = {
{"INT3403", 0},
{"INTC1043", 0},
{"INTC1046", 0},
+ {"INTC10A1", 0},
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, int3403_device_ids);
diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.h b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.h
index 9b2a64ef55d0..49932a68abac 100644
--- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.h
+++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.h
@@ -24,6 +24,7 @@
#define PCI_DEVICE_ID_INTEL_HSB_THERMAL 0x0A03
#define PCI_DEVICE_ID_INTEL_ICL_THERMAL 0x8a03
#define PCI_DEVICE_ID_INTEL_JSL_THERMAL 0x4E03
+#define PCI_DEVICE_ID_INTEL_RPL_THERMAL 0xA71D
#define PCI_DEVICE_ID_INTEL_SKL_THERMAL 0x1903
#define PCI_DEVICE_ID_INTEL_TGL_THERMAL 0x9A03
diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c
index b4bcd3fe9eb2..ca40b0967cdd 100644
--- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c
+++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c
@@ -358,6 +358,7 @@ static SIMPLE_DEV_PM_OPS(proc_thermal_pci_pm, proc_thermal_pci_suspend,
static const struct pci_device_id proc_thermal_pci_ids[] = {
{ PCI_DEVICE_DATA(INTEL, ADL_THERMAL, PROC_THERMAL_FEATURE_RAPL | PROC_THERMAL_FEATURE_FIVR | PROC_THERMAL_FEATURE_DVFS | PROC_THERMAL_FEATURE_MBOX) },
+ { PCI_DEVICE_DATA(INTEL, RPL_THERMAL, PROC_THERMAL_FEATURE_RAPL | PROC_THERMAL_FEATURE_FIVR | PROC_THERMAL_FEATURE_DVFS | PROC_THERMAL_FEATURE_MBOX) },
{ },
};
diff --git a/drivers/thunderbolt/acpi.c b/drivers/thunderbolt/acpi.c
index b67e72d5644b..79b5abf9d042 100644
--- a/drivers/thunderbolt/acpi.c
+++ b/drivers/thunderbolt/acpi.c
@@ -7,6 +7,7 @@
*/
#include <linux/acpi.h>
+#include <linux/pm_runtime.h>
#include "tb.h"
@@ -31,7 +32,7 @@ static acpi_status tb_acpi_add_link(acpi_handle handle, u32 level, void *data,
return AE_OK;
/* It needs to reference this NHI */
- if (nhi->pdev->dev.fwnode != args.fwnode)
+ if (dev_fwnode(&nhi->pdev->dev) != args.fwnode)
goto out_put;
/*
@@ -74,8 +75,18 @@ static acpi_status tb_acpi_add_link(acpi_handle handle, u32 level, void *data,
pci_pcie_type(pdev) == PCI_EXP_TYPE_DOWNSTREAM))) {
const struct device_link *link;
+ /*
+ * Make them both active first to make sure the NHI does
+ * not runtime suspend before the consumer. The
+ * pm_runtime_put() below then allows the consumer to
+ * runtime suspend again (which then allows NHI runtime
+ * suspend too now that the device link is established).
+ */
+ pm_runtime_get_sync(&pdev->dev);
+
link = device_link_add(&pdev->dev, &nhi->pdev->dev,
DL_FLAG_AUTOREMOVE_SUPPLIER |
+ DL_FLAG_RPM_ACTIVE |
DL_FLAG_PM_RUNTIME);
if (link) {
dev_dbg(&nhi->pdev->dev, "created link from %s\n",
@@ -84,6 +95,8 @@ static acpi_status tb_acpi_add_link(acpi_handle handle, u32 level, void *data,
dev_warn(&nhi->pdev->dev, "device link creation from %s failed\n",
dev_name(&pdev->dev));
}
+
+ pm_runtime_put(&pdev->dev);
}
out_put:
diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c
index 6255f1ef9599..fff0c740c8f3 100644
--- a/drivers/thunderbolt/icm.c
+++ b/drivers/thunderbolt/icm.c
@@ -1741,8 +1741,13 @@ static void icm_handle_event(struct tb *tb, enum tb_cfg_pkg_type type,
if (!n)
return;
- INIT_WORK(&n->work, icm_handle_notification);
n->pkg = kmemdup(buf, size, GFP_KERNEL);
+ if (!n->pkg) {
+ kfree(n);
+ return;
+ }
+
+ INIT_WORK(&n->work, icm_handle_notification);
n->tb = tb;
queue_work(tb->wq, &n->work);
diff --git a/drivers/thunderbolt/lc.c b/drivers/thunderbolt/lc.c
index c178f0d7beab..53495a38b4eb 100644
--- a/drivers/thunderbolt/lc.c
+++ b/drivers/thunderbolt/lc.c
@@ -193,6 +193,30 @@ int tb_lc_start_lane_initialization(struct tb_port *port)
return tb_sw_write(sw, &ctrl, TB_CFG_SWITCH, cap + TB_LC_SX_CTRL, 1);
}
+/**
+ * tb_lc_is_clx_supported() - Check whether CLx is supported by the lane adapter
+ * @port: Lane adapter
+ *
+ * TB_LC_LINK_ATTR_CPS bit reflects if the link supports CLx including
+ * active cables (if connected on the link).
+ */
+bool tb_lc_is_clx_supported(struct tb_port *port)
+{
+ struct tb_switch *sw = port->sw;
+ int cap, ret;
+ u32 val;
+
+ cap = find_port_lc_cap(port);
+ if (cap < 0)
+ return false;
+
+ ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, cap + TB_LC_LINK_ATTR, 1);
+ if (ret)
+ return false;
+
+ return !!(val & TB_LC_LINK_ATTR_CPS);
+}
+
static int tb_lc_set_wake_one(struct tb_switch *sw, unsigned int offset,
unsigned int flags)
{
diff --git a/drivers/thunderbolt/path.c b/drivers/thunderbolt/path.c
index 564e2f42cebd..299712accfe9 100644
--- a/drivers/thunderbolt/path.c
+++ b/drivers/thunderbolt/path.c
@@ -85,11 +85,12 @@ static int tb_path_find_src_hopid(struct tb_port *src,
* @dst_hopid: HopID to the @dst (%-1 if don't care)
* @last: Last port is filled here if not %NULL
* @name: Name of the path
+ * @alloc_hopid: Allocate HopIDs for the ports
*
* Follows a path starting from @src and @src_hopid to the last output
- * port of the path. Allocates HopIDs for the visited ports. Call
- * tb_path_free() to release the path and allocated HopIDs when the path
- * is not needed anymore.
+ * port of the path. Allocates HopIDs for the visited ports (if
+ * @alloc_hopid is true). Call tb_path_free() to release the path and
+ * allocated HopIDs when the path is not needed anymore.
*
* Note function discovers also incomplete paths so caller should check
* that the @dst port is the expected one. If it is not, the path can be
@@ -99,7 +100,8 @@ static int tb_path_find_src_hopid(struct tb_port *src,
*/
struct tb_path *tb_path_discover(struct tb_port *src, int src_hopid,
struct tb_port *dst, int dst_hopid,
- struct tb_port **last, const char *name)
+ struct tb_port **last, const char *name,
+ bool alloc_hopid)
{
struct tb_port *out_port;
struct tb_regs_hop hop;
@@ -156,6 +158,7 @@ struct tb_path *tb_path_discover(struct tb_port *src, int src_hopid,
path->tb = src->sw->tb;
path->path_length = num_hops;
path->activated = true;
+ path->alloc_hopid = alloc_hopid;
path->hops = kcalloc(num_hops, sizeof(*path->hops), GFP_KERNEL);
if (!path->hops) {
@@ -177,13 +180,14 @@ struct tb_path *tb_path_discover(struct tb_port *src, int src_hopid,
goto err;
}
- if (tb_port_alloc_in_hopid(p, h, h) < 0)
+ if (alloc_hopid && tb_port_alloc_in_hopid(p, h, h) < 0)
goto err;
out_port = &sw->ports[hop.out_port];
next_hop = hop.next_hop;
- if (tb_port_alloc_out_hopid(out_port, next_hop, next_hop) < 0) {
+ if (alloc_hopid &&
+ tb_port_alloc_out_hopid(out_port, next_hop, next_hop) < 0) {
tb_port_release_in_hopid(p, h);
goto err;
}
@@ -263,6 +267,8 @@ struct tb_path *tb_path_alloc(struct tb *tb, struct tb_port *src, int src_hopid,
return NULL;
}
+ path->alloc_hopid = true;
+
in_hopid = src_hopid;
out_port = NULL;
@@ -345,17 +351,19 @@ err:
*/
void tb_path_free(struct tb_path *path)
{
- int i;
-
- for (i = 0; i < path->path_length; i++) {
- const struct tb_path_hop *hop = &path->hops[i];
-
- if (hop->in_port)
- tb_port_release_in_hopid(hop->in_port,
- hop->in_hop_index);
- if (hop->out_port)
- tb_port_release_out_hopid(hop->out_port,
- hop->next_hop_index);
+ if (path->alloc_hopid) {
+ int i;
+
+ for (i = 0; i < path->path_length; i++) {
+ const struct tb_path_hop *hop = &path->hops[i];
+
+ if (hop->in_port)
+ tb_port_release_in_hopid(hop->in_port,
+ hop->in_hop_index);
+ if (hop->out_port)
+ tb_port_release_out_hopid(hop->out_port,
+ hop->next_hop_index);
+ }
}
kfree(path->hops);
diff --git a/drivers/thunderbolt/retimer.c b/drivers/thunderbolt/retimer.c
index 722694052f4a..8c29bd556ae0 100644
--- a/drivers/thunderbolt/retimer.c
+++ b/drivers/thunderbolt/retimer.c
@@ -324,15 +324,10 @@ struct device_type tb_retimer_type = {
static int tb_retimer_add(struct tb_port *port, u8 index, u32 auth_status)
{
- struct usb4_port *usb4;
struct tb_retimer *rt;
u32 vendor, device;
int ret;
- usb4 = port->usb4;
- if (!usb4)
- return -EINVAL;
-
ret = usb4_port_retimer_read(port, index, USB4_SB_VENDOR_ID, &vendor,
sizeof(vendor));
if (ret) {
@@ -374,7 +369,7 @@ static int tb_retimer_add(struct tb_port *port, u8 index, u32 auth_status)
rt->port = port;
rt->tb = port->sw->tb;
- rt->dev.parent = &usb4->dev;
+ rt->dev.parent = &port->usb4->dev;
rt->dev.bus = &tb_bus_type;
rt->dev.type = &tb_retimer_type;
dev_set_name(&rt->dev, "%s:%u.%u", dev_name(&port->sw->dev),
@@ -453,6 +448,13 @@ int tb_retimer_scan(struct tb_port *port, bool add)
{
u32 status[TB_MAX_RETIMER_INDEX + 1] = {};
int ret, i, last_idx = 0;
+ struct usb4_port *usb4;
+
+ usb4 = port->usb4;
+ if (!usb4)
+ return 0;
+
+ pm_runtime_get_sync(&usb4->dev);
/*
* Send broadcast RT to make sure retimer indices facing this
@@ -460,7 +462,7 @@ int tb_retimer_scan(struct tb_port *port, bool add)
*/
ret = usb4_port_enumerate_retimers(port);
if (ret)
- return ret;
+ goto out;
/*
* Enable sideband channel for each retimer. We can do this
@@ -490,8 +492,10 @@ int tb_retimer_scan(struct tb_port *port, bool add)
break;
}
- if (!last_idx)
- return 0;
+ if (!last_idx) {
+ ret = 0;
+ goto out;
+ }
/* Add on-board retimers if they do not exist already */
for (i = 1; i <= last_idx; i++) {
@@ -507,7 +511,11 @@ int tb_retimer_scan(struct tb_port *port, bool add)
}
}
- return 0;
+out:
+ pm_runtime_mark_last_busy(&usb4->dev);
+ pm_runtime_put_autosuspend(&usb4->dev);
+
+ return ret;
}
static int remove_retimer(struct device *dev, void *data)
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index 3014146081c1..d026e305fe5d 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -13,6 +13,7 @@
#include <linux/sched/signal.h>
#include <linux/sizes.h>
#include <linux/slab.h>
+#include <linux/module.h>
#include "tb.h"
@@ -26,6 +27,10 @@ struct nvm_auth_status {
u32 status;
};
+static bool clx_enabled = true;
+module_param_named(clx, clx_enabled, bool, 0444);
+MODULE_PARM_DESC(clx, "allow low power states on the high-speed lanes (default: true)");
+
/*
* Hold NVM authentication failure status per switch This information
* needs to stay around even when the switch gets power cycled so we
@@ -623,6 +628,9 @@ int tb_port_add_nfc_credits(struct tb_port *port, int credits)
return 0;
nfc_credits = port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK;
+ if (credits < 0)
+ credits = max_t(int, -nfc_credits, credits);
+
nfc_credits += credits;
tb_port_dbg(port, "adding %d NFC credits to %lu", credits,
@@ -1319,7 +1327,9 @@ int tb_dp_port_hpd_clear(struct tb_port *port)
* @aux_tx: AUX TX Hop ID
* @aux_rx: AUX RX Hop ID
*
- * Programs specified Hop IDs for DP IN/OUT port.
+ * Programs specified Hop IDs for DP IN/OUT port. Can be called for USB4
+ * router DP adapters too but does not program the values as the fields
+ * are read-only.
*/
int tb_dp_port_set_hops(struct tb_port *port, unsigned int video,
unsigned int aux_tx, unsigned int aux_rx)
@@ -1327,6 +1337,9 @@ int tb_dp_port_set_hops(struct tb_port *port, unsigned int video,
u32 data[2];
int ret;
+ if (tb_switch_is_usb4(port->sw))
+ return 0;
+
ret = tb_port_read(port, data, TB_CFG_PORT,
port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data));
if (ret)
@@ -1449,6 +1462,40 @@ int tb_switch_reset(struct tb_switch *sw)
return res.err;
}
+/**
+ * tb_switch_wait_for_bit() - Wait for specified value of bits in offset
+ * @sw: Router to read the offset value from
+ * @offset: Offset in the router config space to read from
+ * @bit: Bit mask in the offset to wait for
+ * @value: Value of the bits to wait for
+ * @timeout_msec: Timeout in ms how long to wait
+ *
+ * Wait till the specified bits in specified offset reach specified value.
+ * Returns %0 in case of success, %-ETIMEDOUT if the @value was not reached
+ * within the given timeout or a negative errno in case of failure.
+ */
+int tb_switch_wait_for_bit(struct tb_switch *sw, u32 offset, u32 bit,
+ u32 value, int timeout_msec)
+{
+ ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
+
+ do {
+ u32 val;
+ int ret;
+
+ ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1);
+ if (ret)
+ return ret;
+
+ if ((val & bit) == value)
+ return 0;
+
+ usleep_range(50, 100);
+ } while (ktime_before(ktime_get(), timeout));
+
+ return -ETIMEDOUT;
+}
+
/*
* tb_plug_events_active() - enable/disable plug events on a switch
*
@@ -2186,10 +2233,18 @@ struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent,
if (ret > 0)
sw->cap_plug_events = ret;
+ ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_TIME2);
+ if (ret > 0)
+ sw->cap_vsec_tmu = ret;
+
ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_LINK_CONTROLLER);
if (ret > 0)
sw->cap_lc = ret;
+ ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_CP_LP);
+ if (ret > 0)
+ sw->cap_lp = ret;
+
/* Root switch is always authorized */
if (!route)
sw->authorized = true;
@@ -2996,6 +3051,13 @@ void tb_switch_suspend(struct tb_switch *sw, bool runtime)
tb_sw_dbg(sw, "suspending switch\n");
+ /*
+ * Actually only needed for Titan Ridge but for simplicity can be
+ * done for USB4 device too as CLx is re-enabled at resume.
+ */
+ if (tb_switch_disable_clx(sw, TB_CL0S))
+ tb_sw_warn(sw, "failed to disable CLx on upstream port\n");
+
err = tb_plug_events_active(sw, false);
if (err)
return;
@@ -3048,9 +3110,20 @@ bool tb_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in)
*/
int tb_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
{
+ int ret;
+
if (tb_switch_is_usb4(sw))
- return usb4_switch_alloc_dp_resource(sw, in);
- return tb_lc_dp_sink_alloc(sw, in);
+ ret = usb4_switch_alloc_dp_resource(sw, in);
+ else
+ ret = tb_lc_dp_sink_alloc(sw, in);
+
+ if (ret)
+ tb_sw_warn(sw, "failed to allocate DP resource for port %d\n",
+ in->port);
+ else
+ tb_sw_dbg(sw, "allocated DP resource for port %d\n", in->port);
+
+ return ret;
}
/**
@@ -3073,6 +3146,8 @@ void tb_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in)
if (ret)
tb_sw_warn(sw, "failed to de-allocate DP resource for port %d\n",
in->port);
+ else
+ tb_sw_dbg(sw, "released DP resource for port %d\n", in->port);
}
struct tb_sw_lookup {
@@ -3202,3 +3277,415 @@ struct tb_port *tb_switch_find_port(struct tb_switch *sw,
return NULL;
}
+
+static int __tb_port_pm_secondary_set(struct tb_port *port, bool secondary)
+{
+ u32 phy;
+ int ret;
+
+ ret = tb_port_read(port, &phy, TB_CFG_PORT,
+ port->cap_phy + LANE_ADP_CS_1, 1);
+ if (ret)
+ return ret;
+
+ if (secondary)
+ phy |= LANE_ADP_CS_1_PMS;
+ else
+ phy &= ~LANE_ADP_CS_1_PMS;
+
+ return tb_port_write(port, &phy, TB_CFG_PORT,
+ port->cap_phy + LANE_ADP_CS_1, 1);
+}
+
+static int tb_port_pm_secondary_enable(struct tb_port *port)
+{
+ return __tb_port_pm_secondary_set(port, true);
+}
+
+static int tb_port_pm_secondary_disable(struct tb_port *port)
+{
+ return __tb_port_pm_secondary_set(port, false);
+}
+
+static int tb_switch_pm_secondary_resolve(struct tb_switch *sw)
+{
+ struct tb_switch *parent = tb_switch_parent(sw);
+ struct tb_port *up, *down;
+ int ret;
+
+ if (!tb_route(sw))
+ return 0;
+
+ up = tb_upstream_port(sw);
+ down = tb_port_at(tb_route(sw), parent);
+ ret = tb_port_pm_secondary_enable(up);
+ if (ret)
+ return ret;
+
+ return tb_port_pm_secondary_disable(down);
+}
+
+/* Called for USB4 or Titan Ridge routers only */
+static bool tb_port_clx_supported(struct tb_port *port, enum tb_clx clx)
+{
+ u32 mask, val;
+ bool ret;
+
+ /* Don't enable CLx in case of two single-lane links */
+ if (!port->bonded && port->dual_link_port)
+ return false;
+
+ /* Don't enable CLx in case of inter-domain link */
+ if (port->xdomain)
+ return false;
+
+ if (tb_switch_is_usb4(port->sw)) {
+ if (!usb4_port_clx_supported(port))
+ return false;
+ } else if (!tb_lc_is_clx_supported(port)) {
+ return false;
+ }
+
+ switch (clx) {
+ case TB_CL0S:
+ /* CL0s support requires also CL1 support */
+ mask = LANE_ADP_CS_0_CL0S_SUPPORT | LANE_ADP_CS_0_CL1_SUPPORT;
+ break;
+
+ /* For now we support only CL0s. Not CL1, CL2 */
+ case TB_CL1:
+ case TB_CL2:
+ default:
+ return false;
+ }
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_phy + LANE_ADP_CS_0, 1);
+ if (ret)
+ return false;
+
+ return !!(val & mask);
+}
+
+static inline bool tb_port_cl0s_supported(struct tb_port *port)
+{
+ return tb_port_clx_supported(port, TB_CL0S);
+}
+
+static int __tb_port_cl0s_set(struct tb_port *port, bool enable)
+{
+ u32 phy, mask;
+ int ret;
+
+ /* To enable CL0s also required to enable CL1 */
+ mask = LANE_ADP_CS_1_CL0S_ENABLE | LANE_ADP_CS_1_CL1_ENABLE;
+ ret = tb_port_read(port, &phy, TB_CFG_PORT,
+ port->cap_phy + LANE_ADP_CS_1, 1);
+ if (ret)
+ return ret;
+
+ if (enable)
+ phy |= mask;
+ else
+ phy &= ~mask;
+
+ return tb_port_write(port, &phy, TB_CFG_PORT,
+ port->cap_phy + LANE_ADP_CS_1, 1);
+}
+
+static int tb_port_cl0s_disable(struct tb_port *port)
+{
+ return __tb_port_cl0s_set(port, false);
+}
+
+static int tb_port_cl0s_enable(struct tb_port *port)
+{
+ return __tb_port_cl0s_set(port, true);
+}
+
+static int tb_switch_enable_cl0s(struct tb_switch *sw)
+{
+ struct tb_switch *parent = tb_switch_parent(sw);
+ bool up_cl0s_support, down_cl0s_support;
+ struct tb_port *up, *down;
+ int ret;
+
+ if (!tb_switch_is_clx_supported(sw))
+ return 0;
+
+ /*
+ * Enable CLx for host router's downstream port as part of the
+ * downstream router enabling procedure.
+ */
+ if (!tb_route(sw))
+ return 0;
+
+ /* Enable CLx only for first hop router (depth = 1) */
+ if (tb_route(parent))
+ return 0;
+
+ ret = tb_switch_pm_secondary_resolve(sw);
+ if (ret)
+ return ret;
+
+ up = tb_upstream_port(sw);
+ down = tb_port_at(tb_route(sw), parent);
+
+ up_cl0s_support = tb_port_cl0s_supported(up);
+ down_cl0s_support = tb_port_cl0s_supported(down);
+
+ tb_port_dbg(up, "CL0s %ssupported\n",
+ up_cl0s_support ? "" : "not ");
+ tb_port_dbg(down, "CL0s %ssupported\n",
+ down_cl0s_support ? "" : "not ");
+
+ if (!up_cl0s_support || !down_cl0s_support)
+ return -EOPNOTSUPP;
+
+ ret = tb_port_cl0s_enable(up);
+ if (ret)
+ return ret;
+
+ ret = tb_port_cl0s_enable(down);
+ if (ret) {
+ tb_port_cl0s_disable(up);
+ return ret;
+ }
+
+ ret = tb_switch_mask_clx_objections(sw);
+ if (ret) {
+ tb_port_cl0s_disable(up);
+ tb_port_cl0s_disable(down);
+ return ret;
+ }
+
+ sw->clx = TB_CL0S;
+
+ tb_port_dbg(up, "CL0s enabled\n");
+ return 0;
+}
+
+/**
+ * tb_switch_enable_clx() - Enable CLx on upstream port of specified router
+ * @sw: Router to enable CLx for
+ * @clx: The CLx state to enable
+ *
+ * Enable CLx state only for first hop router. That is the most common
+ * use-case, that is intended for better thermal management, and so helps
+ * to improve performance. CLx is enabled only if both sides of the link
+ * support CLx, and if both sides of the link are not configured as two
+ * single lane links and only if the link is not inter-domain link. The
+ * complete set of conditions is descibed in CM Guide 1.0 section 8.1.
+ *
+ * Return: Returns 0 on success or an error code on failure.
+ */
+int tb_switch_enable_clx(struct tb_switch *sw, enum tb_clx clx)
+{
+ struct tb_switch *root_sw = sw->tb->root_switch;
+
+ if (!clx_enabled)
+ return 0;
+
+ /*
+ * CLx is not enabled and validated on Intel USB4 platforms before
+ * Alder Lake.
+ */
+ if (root_sw->generation < 4 || tb_switch_is_tiger_lake(root_sw))
+ return 0;
+
+ switch (clx) {
+ case TB_CL0S:
+ return tb_switch_enable_cl0s(sw);
+
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int tb_switch_disable_cl0s(struct tb_switch *sw)
+{
+ struct tb_switch *parent = tb_switch_parent(sw);
+ struct tb_port *up, *down;
+ int ret;
+
+ if (!tb_switch_is_clx_supported(sw))
+ return 0;
+
+ /*
+ * Disable CLx for host router's downstream port as part of the
+ * downstream router enabling procedure.
+ */
+ if (!tb_route(sw))
+ return 0;
+
+ /* Disable CLx only for first hop router (depth = 1) */
+ if (tb_route(parent))
+ return 0;
+
+ up = tb_upstream_port(sw);
+ down = tb_port_at(tb_route(sw), parent);
+ ret = tb_port_cl0s_disable(up);
+ if (ret)
+ return ret;
+
+ ret = tb_port_cl0s_disable(down);
+ if (ret)
+ return ret;
+
+ sw->clx = TB_CLX_DISABLE;
+
+ tb_port_dbg(up, "CL0s disabled\n");
+ return 0;
+}
+
+/**
+ * tb_switch_disable_clx() - Disable CLx on upstream port of specified router
+ * @sw: Router to disable CLx for
+ * @clx: The CLx state to disable
+ *
+ * Return: Returns 0 on success or an error code on failure.
+ */
+int tb_switch_disable_clx(struct tb_switch *sw, enum tb_clx clx)
+{
+ if (!clx_enabled)
+ return 0;
+
+ switch (clx) {
+ case TB_CL0S:
+ return tb_switch_disable_cl0s(sw);
+
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/**
+ * tb_switch_mask_clx_objections() - Mask CLx objections for a router
+ * @sw: Router to mask objections for
+ *
+ * Mask the objections coming from the second depth routers in order to
+ * stop these objections from interfering with the CLx states of the first
+ * depth link.
+ */
+int tb_switch_mask_clx_objections(struct tb_switch *sw)
+{
+ int up_port = sw->config.upstream_port_number;
+ u32 offset, val[2], mask_obj, unmask_obj;
+ int ret, i;
+
+ /* Only Titan Ridge of pre-USB4 devices support CLx states */
+ if (!tb_switch_is_titan_ridge(sw))
+ return 0;
+
+ if (!tb_route(sw))
+ return 0;
+
+ /*
+ * In Titan Ridge there are only 2 dual-lane Thunderbolt ports:
+ * Port A consists of lane adapters 1,2 and
+ * Port B consists of lane adapters 3,4
+ * If upstream port is A, (lanes are 1,2), we mask objections from
+ * port B (lanes 3,4) and unmask objections from Port A and vice-versa.
+ */
+ if (up_port == 1) {
+ mask_obj = TB_LOW_PWR_C0_PORT_B_MASK;
+ unmask_obj = TB_LOW_PWR_C1_PORT_A_MASK;
+ offset = TB_LOW_PWR_C1_CL1;
+ } else {
+ mask_obj = TB_LOW_PWR_C1_PORT_A_MASK;
+ unmask_obj = TB_LOW_PWR_C0_PORT_B_MASK;
+ offset = TB_LOW_PWR_C3_CL1;
+ }
+
+ ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
+ sw->cap_lp + offset, ARRAY_SIZE(val));
+ if (ret)
+ return ret;
+
+ for (i = 0; i < ARRAY_SIZE(val); i++) {
+ val[i] |= mask_obj;
+ val[i] &= ~unmask_obj;
+ }
+
+ return tb_sw_write(sw, &val, TB_CFG_SWITCH,
+ sw->cap_lp + offset, ARRAY_SIZE(val));
+}
+
+/*
+ * Can be used for read/write a specified PCIe bridge for any Thunderbolt 3
+ * device. For now used only for Titan Ridge.
+ */
+static int tb_switch_pcie_bridge_write(struct tb_switch *sw, unsigned int bridge,
+ unsigned int pcie_offset, u32 value)
+{
+ u32 offset, command, val;
+ int ret;
+
+ if (sw->generation != 3)
+ return -EOPNOTSUPP;
+
+ offset = sw->cap_plug_events + TB_PLUG_EVENTS_PCIE_WR_DATA;
+ ret = tb_sw_write(sw, &value, TB_CFG_SWITCH, offset, 1);
+ if (ret)
+ return ret;
+
+ command = pcie_offset & TB_PLUG_EVENTS_PCIE_CMD_DW_OFFSET_MASK;
+ command |= BIT(bridge + TB_PLUG_EVENTS_PCIE_CMD_BR_SHIFT);
+ command |= TB_PLUG_EVENTS_PCIE_CMD_RD_WR_MASK;
+ command |= TB_PLUG_EVENTS_PCIE_CMD_COMMAND_VAL
+ << TB_PLUG_EVENTS_PCIE_CMD_COMMAND_SHIFT;
+ command |= TB_PLUG_EVENTS_PCIE_CMD_REQ_ACK_MASK;
+
+ offset = sw->cap_plug_events + TB_PLUG_EVENTS_PCIE_CMD;
+
+ ret = tb_sw_write(sw, &command, TB_CFG_SWITCH, offset, 1);
+ if (ret)
+ return ret;
+
+ ret = tb_switch_wait_for_bit(sw, offset,
+ TB_PLUG_EVENTS_PCIE_CMD_REQ_ACK_MASK, 0, 100);
+ if (ret)
+ return ret;
+
+ ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1);
+ if (ret)
+ return ret;
+
+ if (val & TB_PLUG_EVENTS_PCIE_CMD_TIMEOUT_MASK)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+/**
+ * tb_switch_pcie_l1_enable() - Enable PCIe link to enter L1 state
+ * @sw: Router to enable PCIe L1
+ *
+ * For Titan Ridge switch to enter CLx state, its PCIe bridges shall enable
+ * entry to PCIe L1 state. Shall be called after the upstream PCIe tunnel
+ * was configured. Due to Intel platforms limitation, shall be called only
+ * for first hop switch.
+ */
+int tb_switch_pcie_l1_enable(struct tb_switch *sw)
+{
+ struct tb_switch *parent = tb_switch_parent(sw);
+ int ret;
+
+ if (!tb_route(sw))
+ return 0;
+
+ if (!tb_switch_is_titan_ridge(sw))
+ return 0;
+
+ /* Enable PCIe L1 enable only for first hop router (depth = 1) */
+ if (tb_route(parent))
+ return 0;
+
+ /* Write to downstream PCIe bridge #5 aka Dn4 */
+ ret = tb_switch_pcie_bridge_write(sw, 5, 0x143, 0x0c7806b1);
+ if (ret)
+ return ret;
+
+ /* Write to Upstream PCIe bridge #0 aka Up0 */
+ return tb_switch_pcie_bridge_write(sw, 0, 0x143, 0x0c5806b1);
+}
diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c
index 2897a77d44c3..cbd0ad85ffb1 100644
--- a/drivers/thunderbolt/tb.c
+++ b/drivers/thunderbolt/tb.c
@@ -105,10 +105,11 @@ static void tb_remove_dp_resources(struct tb_switch *sw)
}
}
-static void tb_discover_tunnels(struct tb_switch *sw)
+static void tb_switch_discover_tunnels(struct tb_switch *sw,
+ struct list_head *list,
+ bool alloc_hopids)
{
struct tb *tb = sw->tb;
- struct tb_cm *tcm = tb_priv(tb);
struct tb_port *port;
tb_switch_for_each_port(sw, port) {
@@ -116,24 +117,41 @@ static void tb_discover_tunnels(struct tb_switch *sw)
switch (port->config.type) {
case TB_TYPE_DP_HDMI_IN:
- tunnel = tb_tunnel_discover_dp(tb, port);
+ tunnel = tb_tunnel_discover_dp(tb, port, alloc_hopids);
break;
case TB_TYPE_PCIE_DOWN:
- tunnel = tb_tunnel_discover_pci(tb, port);
+ tunnel = tb_tunnel_discover_pci(tb, port, alloc_hopids);
break;
case TB_TYPE_USB3_DOWN:
- tunnel = tb_tunnel_discover_usb3(tb, port);
+ tunnel = tb_tunnel_discover_usb3(tb, port, alloc_hopids);
break;
default:
break;
}
- if (!tunnel)
- continue;
+ if (tunnel)
+ list_add_tail(&tunnel->list, list);
+ }
+
+ tb_switch_for_each_port(sw, port) {
+ if (tb_port_has_remote(port)) {
+ tb_switch_discover_tunnels(port->remote->sw, list,
+ alloc_hopids);
+ }
+ }
+}
+static void tb_discover_tunnels(struct tb *tb)
+{
+ struct tb_cm *tcm = tb_priv(tb);
+ struct tb_tunnel *tunnel;
+
+ tb_switch_discover_tunnels(tb->root_switch, &tcm->tunnel_list, true);
+
+ list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
if (tb_tunnel_is_pci(tunnel)) {
struct tb_switch *parent = tunnel->dst_port->sw;
@@ -146,13 +164,6 @@ static void tb_discover_tunnels(struct tb_switch *sw)
pm_runtime_get_sync(&tunnel->src_port->sw->dev);
pm_runtime_get_sync(&tunnel->dst_port->sw->dev);
}
-
- list_add_tail(&tunnel->list, &tcm->tunnel_list);
- }
-
- tb_switch_for_each_port(sw, port) {
- if (tb_port_has_remote(port))
- tb_discover_tunnels(port->remote->sw);
}
}
@@ -210,7 +221,7 @@ static int tb_enable_tmu(struct tb_switch *sw)
int ret;
/* If it is already enabled in correct mode, don't touch it */
- if (tb_switch_tmu_is_enabled(sw))
+ if (tb_switch_tmu_hifi_is_enabled(sw, sw->tmu.unidirectional_request))
return 0;
ret = tb_switch_tmu_disable(sw);
@@ -658,6 +669,11 @@ static void tb_scan_port(struct tb_port *port)
tb_switch_lane_bonding_enable(sw);
/* Set the link configured */
tb_switch_configure_link(sw);
+ if (tb_switch_enable_clx(sw, TB_CL0S))
+ tb_sw_warn(sw, "failed to enable CLx on upstream port\n");
+
+ tb_switch_tmu_configure(sw, TB_SWITCH_TMU_RATE_HIFI,
+ tb_switch_is_clx_enabled(sw));
if (tb_enable_tmu(sw))
tb_sw_warn(sw, "failed to enable TMU\n");
@@ -1076,6 +1092,13 @@ static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw)
return -EIO;
}
+ /*
+ * PCIe L1 is needed to enable CL0s for Titan Ridge so enable it
+ * here.
+ */
+ if (tb_switch_pcie_l1_enable(sw))
+ tb_sw_warn(sw, "failed to enable PCIe L1 for Titan Ridge\n");
+
list_add_tail(&tunnel->list, &tcm->tunnel_list);
return 0;
}
@@ -1364,12 +1387,13 @@ static int tb_start(struct tb *tb)
return ret;
}
+ tb_switch_tmu_configure(tb->root_switch, TB_SWITCH_TMU_RATE_HIFI, false);
/* Enable TMU if it is off */
tb_switch_tmu_enable(tb->root_switch);
/* Full scan to discover devices added before the driver was loaded. */
tb_scan_switch(tb->root_switch);
/* Find out tunnels created by the boot firmware */
- tb_discover_tunnels(tb->root_switch);
+ tb_discover_tunnels(tb);
/*
* If the boot firmware did not create USB 3.x tunnels create them
* now for the whole topology.
@@ -1407,6 +1431,14 @@ static void tb_restore_children(struct tb_switch *sw)
if (sw->is_unplugged)
return;
+ if (tb_switch_enable_clx(sw, TB_CL0S))
+ tb_sw_warn(sw, "failed to re-enable CLx on upstream port\n");
+
+ /*
+ * tb_switch_tmu_configure() was already called when the switch was
+ * added before entering system sleep or runtime suspend,
+ * so no need to call it again before enabling TMU.
+ */
if (tb_enable_tmu(sw))
tb_sw_warn(sw, "failed to restore TMU configuration\n");
@@ -1429,6 +1461,8 @@ static int tb_resume_noirq(struct tb *tb)
{
struct tb_cm *tcm = tb_priv(tb);
struct tb_tunnel *tunnel, *n;
+ unsigned int usb3_delay = 0;
+ LIST_HEAD(tunnels);
tb_dbg(tb, "resuming...\n");
@@ -1439,8 +1473,31 @@ static int tb_resume_noirq(struct tb *tb)
tb_free_invalid_tunnels(tb);
tb_free_unplugged_children(tb->root_switch);
tb_restore_children(tb->root_switch);
- list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list)
+
+ /*
+ * If we get here from suspend to disk the boot firmware or the
+ * restore kernel might have created tunnels of its own. Since
+ * we cannot be sure they are usable for us we find and tear
+ * them down.
+ */
+ tb_switch_discover_tunnels(tb->root_switch, &tunnels, false);
+ list_for_each_entry_safe_reverse(tunnel, n, &tunnels, list) {
+ if (tb_tunnel_is_usb3(tunnel))
+ usb3_delay = 500;
+ tb_tunnel_deactivate(tunnel);
+ tb_tunnel_free(tunnel);
+ }
+
+ /* Re-create our tunnels now */
+ list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) {
+ /* USB3 requires delay before it can be re-activated */
+ if (tb_tunnel_is_usb3(tunnel)) {
+ msleep(usb3_delay);
+ /* Only need to do it once */
+ usb3_delay = 0;
+ }
tb_tunnel_restart(tunnel);
+ }
if (!list_empty(&tcm->tunnel_list)) {
/*
* the pcie links need some time to get going.
diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
index 725104c83e3d..74d3b14f004e 100644
--- a/drivers/thunderbolt/tb.h
+++ b/drivers/thunderbolt/tb.h
@@ -89,15 +89,31 @@ enum tb_switch_tmu_rate {
* @cap: Offset to the TMU capability (%0 if not found)
* @has_ucap: Does the switch support uni-directional mode
* @rate: TMU refresh rate related to upstream switch. In case of root
- * switch this holds the domain rate.
+ * switch this holds the domain rate. Reflects the HW setting.
* @unidirectional: Is the TMU in uni-directional or bi-directional mode
- * related to upstream switch. Don't case for root switch.
+ * related to upstream switch. Don't care for root switch.
+ * Reflects the HW setting.
+ * @unidirectional_request: Is the new TMU mode: uni-directional or bi-directional
+ * that is requested to be set. Related to upstream switch.
+ * Don't care for root switch.
+ * @rate_request: TMU new refresh rate related to upstream switch that is
+ * requested to be set. In case of root switch, this holds
+ * the new domain rate that is requested to be set.
*/
struct tb_switch_tmu {
int cap;
bool has_ucap;
enum tb_switch_tmu_rate rate;
bool unidirectional;
+ bool unidirectional_request;
+ enum tb_switch_tmu_rate rate_request;
+};
+
+enum tb_clx {
+ TB_CLX_DISABLE,
+ TB_CL0S,
+ TB_CL1,
+ TB_CL2,
};
/**
@@ -122,7 +138,9 @@ struct tb_switch_tmu {
* @link_usb4: Upstream link is USB4
* @generation: Switch Thunderbolt generation
* @cap_plug_events: Offset to the plug events capability (%0 if not found)
+ * @cap_vsec_tmu: Offset to the TMU vendor specific capability (%0 if not found)
* @cap_lc: Offset to the link controller capability (%0 if not found)
+ * @cap_lp: Offset to the low power (CLx for TBT) capability (%0 if not found)
* @is_unplugged: The switch is going away
* @drom: DROM of the switch (%NULL if not found)
* @nvm: Pointer to the NVM if the switch has one (%NULL otherwise)
@@ -148,6 +166,7 @@ struct tb_switch_tmu {
* @min_dp_main_credits: Router preferred minimum number of buffers for DP MAIN
* @max_pcie_credits: Router preferred number of buffers for PCIe
* @max_dma_credits: Router preferred number of buffers for DMA/P2P
+ * @clx: CLx state on the upstream link of the router
*
* When the switch is being added or removed to the domain (other
* switches) you need to have domain lock held.
@@ -172,7 +191,9 @@ struct tb_switch {
bool link_usb4;
unsigned int generation;
int cap_plug_events;
+ int cap_vsec_tmu;
int cap_lc;
+ int cap_lp;
bool is_unplugged;
u8 *drom;
struct tb_nvm *nvm;
@@ -196,6 +217,7 @@ struct tb_switch {
unsigned int min_dp_main_credits;
unsigned int max_pcie_credits;
unsigned int max_dma_credits;
+ enum tb_clx clx;
};
/**
@@ -354,6 +376,7 @@ enum tb_path_port {
* when deactivating this path
* @hops: Path hops
* @path_length: How many hops the path uses
+ * @alloc_hopid: Does this path consume port HopID
*
* A path consists of a number of hops (see &struct tb_path_hop). To
* establish a PCIe tunnel two paths have to be created between the two
@@ -374,6 +397,7 @@ struct tb_path {
bool clear_fc;
struct tb_path_hop *hops;
int path_length;
+ bool alloc_hopid;
};
/* HopIDs 0-7 are reserved by the Thunderbolt protocol */
@@ -740,6 +764,8 @@ void tb_switch_remove(struct tb_switch *sw);
void tb_switch_suspend(struct tb_switch *sw, bool runtime);
int tb_switch_resume(struct tb_switch *sw);
int tb_switch_reset(struct tb_switch *sw);
+int tb_switch_wait_for_bit(struct tb_switch *sw, u32 offset, u32 bit,
+ u32 value, int timeout_msec);
void tb_sw_set_unplugged(struct tb_switch *sw);
struct tb_port *tb_switch_find_port(struct tb_switch *sw,
enum tb_port_type type);
@@ -851,6 +877,20 @@ static inline bool tb_switch_is_titan_ridge(const struct tb_switch *sw)
return false;
}
+static inline bool tb_switch_is_tiger_lake(const struct tb_switch *sw)
+{
+ if (sw->config.vendor_id == PCI_VENDOR_ID_INTEL) {
+ switch (sw->config.device_id) {
+ case PCI_DEVICE_ID_INTEL_TGL_NHI0:
+ case PCI_DEVICE_ID_INTEL_TGL_NHI1:
+ case PCI_DEVICE_ID_INTEL_TGL_H_NHI0:
+ case PCI_DEVICE_ID_INTEL_TGL_H_NHI1:
+ return true;
+ }
+ }
+ return false;
+}
+
/**
* tb_switch_is_usb4() - Is the switch USB4 compliant
* @sw: Switch to check
@@ -889,13 +929,64 @@ int tb_switch_tmu_init(struct tb_switch *sw);
int tb_switch_tmu_post_time(struct tb_switch *sw);
int tb_switch_tmu_disable(struct tb_switch *sw);
int tb_switch_tmu_enable(struct tb_switch *sw);
-
-static inline bool tb_switch_tmu_is_enabled(const struct tb_switch *sw)
+void tb_switch_tmu_configure(struct tb_switch *sw,
+ enum tb_switch_tmu_rate rate,
+ bool unidirectional);
+/**
+ * tb_switch_tmu_hifi_is_enabled() - Checks if the specified TMU mode is enabled
+ * @sw: Router whose TMU mode to check
+ * @unidirectional: If uni-directional (bi-directional otherwise)
+ *
+ * Return true if hardware TMU configuration matches the one passed in
+ * as parameter. That is HiFi and either uni-directional or bi-directional.
+ */
+static inline bool tb_switch_tmu_hifi_is_enabled(const struct tb_switch *sw,
+ bool unidirectional)
{
return sw->tmu.rate == TB_SWITCH_TMU_RATE_HIFI &&
- !sw->tmu.unidirectional;
+ sw->tmu.unidirectional == unidirectional;
}
+int tb_switch_enable_clx(struct tb_switch *sw, enum tb_clx clx);
+int tb_switch_disable_clx(struct tb_switch *sw, enum tb_clx clx);
+
+/**
+ * tb_switch_is_clx_enabled() - Checks if the CLx is enabled
+ * @sw: Router to check the CLx state for
+ *
+ * Checks if the CLx is enabled on the router upstream link.
+ * Not applicable for a host router.
+ */
+static inline bool tb_switch_is_clx_enabled(const struct tb_switch *sw)
+{
+ return sw->clx != TB_CLX_DISABLE;
+}
+
+/**
+ * tb_switch_is_cl0s_enabled() - Checks if the CL0s is enabled
+ * @sw: Router to check for the CL0s
+ *
+ * Checks if the CL0s is enabled on the router upstream link.
+ * Not applicable for a host router.
+ */
+static inline bool tb_switch_is_cl0s_enabled(const struct tb_switch *sw)
+{
+ return sw->clx == TB_CL0S;
+}
+
+/**
+ * tb_switch_is_clx_supported() - Is CLx supported on this type of router
+ * @sw: The router to check CLx support for
+ */
+static inline bool tb_switch_is_clx_supported(const struct tb_switch *sw)
+{
+ return tb_switch_is_usb4(sw) || tb_switch_is_titan_ridge(sw);
+}
+
+int tb_switch_mask_clx_objections(struct tb_switch *sw);
+
+int tb_switch_pcie_l1_enable(struct tb_switch *sw);
+
int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged);
int tb_port_add_nfc_credits(struct tb_port *port, int credits);
int tb_port_clear_counter(struct tb_port *port, int counter);
@@ -957,7 +1048,8 @@ int tb_dp_port_enable(struct tb_port *port, bool enable);
struct tb_path *tb_path_discover(struct tb_port *src, int src_hopid,
struct tb_port *dst, int dst_hopid,
- struct tb_port **last, const char *name);
+ struct tb_port **last, const char *name,
+ bool alloc_hopid);
struct tb_path *tb_path_alloc(struct tb *tb, struct tb_port *src, int src_hopid,
struct tb_port *dst, int dst_hopid, int link_nr,
const char *name);
@@ -988,6 +1080,7 @@ void tb_lc_unconfigure_port(struct tb_port *port);
int tb_lc_configure_xdomain(struct tb_port *port);
void tb_lc_unconfigure_xdomain(struct tb_port *port);
int tb_lc_start_lane_initialization(struct tb_port *port);
+bool tb_lc_is_clx_supported(struct tb_port *port);
int tb_lc_set_wake(struct tb_switch *sw, unsigned int flags);
int tb_lc_set_sleep(struct tb_switch *sw);
bool tb_lc_lane_bonding_possible(struct tb_switch *sw);
@@ -1074,6 +1167,7 @@ void usb4_port_unconfigure_xdomain(struct tb_port *port);
int usb4_port_router_offline(struct tb_port *port);
int usb4_port_router_online(struct tb_port *port);
int usb4_port_enumerate_retimers(struct tb_port *port);
+bool usb4_port_clx_supported(struct tb_port *port);
int usb4_port_retimer_set_inbound_sbtx(struct tb_port *port, u8 index);
int usb4_port_retimer_read(struct tb_port *port, u8 index, u8 reg, void *buf,
diff --git a/drivers/thunderbolt/tb_msgs.h b/drivers/thunderbolt/tb_msgs.h
index bcabfcb2fd03..fe1afa44c56d 100644
--- a/drivers/thunderbolt/tb_msgs.h
+++ b/drivers/thunderbolt/tb_msgs.h
@@ -535,15 +535,25 @@ struct tb_xdp_header {
u32 type;
};
+struct tb_xdp_error_response {
+ struct tb_xdp_header hdr;
+ u32 error;
+};
+
struct tb_xdp_uuid {
struct tb_xdp_header hdr;
};
struct tb_xdp_uuid_response {
- struct tb_xdp_header hdr;
- uuid_t src_uuid;
- u32 src_route_hi;
- u32 src_route_lo;
+ union {
+ struct tb_xdp_error_response err;
+ struct {
+ struct tb_xdp_header hdr;
+ uuid_t src_uuid;
+ u32 src_route_hi;
+ u32 src_route_lo;
+ };
+ };
};
struct tb_xdp_properties {
@@ -555,13 +565,18 @@ struct tb_xdp_properties {
};
struct tb_xdp_properties_response {
- struct tb_xdp_header hdr;
- uuid_t src_uuid;
- uuid_t dst_uuid;
- u16 offset;
- u16 data_length;
- u32 generation;
- u32 data[0];
+ union {
+ struct tb_xdp_error_response err;
+ struct {
+ struct tb_xdp_header hdr;
+ uuid_t src_uuid;
+ uuid_t dst_uuid;
+ u16 offset;
+ u16 data_length;
+ u32 generation;
+ u32 data[];
+ };
+ };
};
/*
@@ -580,7 +595,10 @@ struct tb_xdp_properties_changed {
};
struct tb_xdp_properties_changed_response {
- struct tb_xdp_header hdr;
+ union {
+ struct tb_xdp_error_response err;
+ struct tb_xdp_header hdr;
+ };
};
enum tb_xdp_error {
@@ -591,9 +609,4 @@ enum tb_xdp_error {
ERROR_NOT_READY,
};
-struct tb_xdp_error_response {
- struct tb_xdp_header hdr;
- u32 error;
-};
-
#endif
diff --git a/drivers/thunderbolt/tb_regs.h b/drivers/thunderbolt/tb_regs.h
index 484f25be2849..a74f4878d3e7 100644
--- a/drivers/thunderbolt/tb_regs.h
+++ b/drivers/thunderbolt/tb_regs.h
@@ -33,7 +33,7 @@ enum tb_switch_cap {
enum tb_switch_vse_cap {
TB_VSE_CAP_PLUG_EVENTS = 0x01, /* also EEPROM */
TB_VSE_CAP_TIME2 = 0x03,
- TB_VSE_CAP_IECS = 0x04,
+ TB_VSE_CAP_CP_LP = 0x04,
TB_VSE_CAP_LINK_CONTROLLER = 0x06, /* also IECS */
};
@@ -246,6 +246,7 @@ enum usb4_switch_op {
#define TMU_RTR_CS_3_TS_PACKET_INTERVAL_SHIFT 16
#define TMU_RTR_CS_22 0x16
#define TMU_RTR_CS_24 0x18
+#define TMU_RTR_CS_25 0x19
enum tb_port_type {
TB_TYPE_INACTIVE = 0x000000,
@@ -305,16 +306,22 @@ struct tb_regs_port_header {
/* TMU adapter registers */
#define TMU_ADP_CS_3 0x03
#define TMU_ADP_CS_3_UDM BIT(29)
+#define TMU_ADP_CS_6 0x06
+#define TMU_ADP_CS_6_DTS BIT(1)
/* Lane adapter registers */
#define LANE_ADP_CS_0 0x00
#define LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK GENMASK(25, 20)
#define LANE_ADP_CS_0_SUPPORTED_WIDTH_SHIFT 20
+#define LANE_ADP_CS_0_CL0S_SUPPORT BIT(26)
+#define LANE_ADP_CS_0_CL1_SUPPORT BIT(27)
#define LANE_ADP_CS_1 0x01
#define LANE_ADP_CS_1_TARGET_WIDTH_MASK GENMASK(9, 4)
#define LANE_ADP_CS_1_TARGET_WIDTH_SHIFT 4
#define LANE_ADP_CS_1_TARGET_WIDTH_SINGLE 0x1
#define LANE_ADP_CS_1_TARGET_WIDTH_DUAL 0x3
+#define LANE_ADP_CS_1_CL0S_ENABLE BIT(10)
+#define LANE_ADP_CS_1_CL1_ENABLE BIT(11)
#define LANE_ADP_CS_1_LD BIT(14)
#define LANE_ADP_CS_1_LB BIT(15)
#define LANE_ADP_CS_1_CURRENT_SPEED_MASK GENMASK(19, 16)
@@ -323,6 +330,7 @@ struct tb_regs_port_header {
#define LANE_ADP_CS_1_CURRENT_SPEED_GEN3 0x4
#define LANE_ADP_CS_1_CURRENT_WIDTH_MASK GENMASK(25, 20)
#define LANE_ADP_CS_1_CURRENT_WIDTH_SHIFT 20
+#define LANE_ADP_CS_1_PMS BIT(30)
/* USB4 port registers */
#define PORT_CS_1 0x01
@@ -338,6 +346,7 @@ struct tb_regs_port_header {
#define PORT_CS_18 0x12
#define PORT_CS_18_BE BIT(8)
#define PORT_CS_18_TCM BIT(9)
+#define PORT_CS_18_CPS BIT(10)
#define PORT_CS_18_WOU4S BIT(18)
#define PORT_CS_19 0x13
#define PORT_CS_19_PC BIT(3)
@@ -437,39 +446,79 @@ struct tb_regs_hop {
u32 unknown3:3; /* set to zero */
} __packed;
+/* TMU Thunderbolt 3 registers */
+#define TB_TIME_VSEC_3_CS_9 0x9
+#define TB_TIME_VSEC_3_CS_9_TMU_OBJ_MASK GENMASK(17, 16)
+#define TB_TIME_VSEC_3_CS_26 0x1a
+#define TB_TIME_VSEC_3_CS_26_TD BIT(22)
+
+/*
+ * Used for Titan Ridge only. Bits are part of the same register: TMU_ADP_CS_6
+ * (see above) as in USB4 spec, but these specific bits used for Titan Ridge
+ * only and reserved in USB4 spec.
+ */
+#define TMU_ADP_CS_6_DISABLE_TMU_OBJ_MASK GENMASK(3, 2)
+#define TMU_ADP_CS_6_DISABLE_TMU_OBJ_CL1 BIT(2)
+#define TMU_ADP_CS_6_DISABLE_TMU_OBJ_CL2 BIT(3)
+
+/* Plug Events registers */
+#define TB_PLUG_EVENTS_PCIE_WR_DATA 0x1b
+#define TB_PLUG_EVENTS_PCIE_CMD 0x1c
+#define TB_PLUG_EVENTS_PCIE_CMD_DW_OFFSET_MASK GENMASK(9, 0)
+#define TB_PLUG_EVENTS_PCIE_CMD_BR_SHIFT 10
+#define TB_PLUG_EVENTS_PCIE_CMD_BR_MASK GENMASK(17, 10)
+#define TB_PLUG_EVENTS_PCIE_CMD_RD_WR_MASK BIT(21)
+#define TB_PLUG_EVENTS_PCIE_CMD_WR 0x1
+#define TB_PLUG_EVENTS_PCIE_CMD_COMMAND_SHIFT 22
+#define TB_PLUG_EVENTS_PCIE_CMD_COMMAND_MASK GENMASK(24, 22)
+#define TB_PLUG_EVENTS_PCIE_CMD_COMMAND_VAL 0x2
+#define TB_PLUG_EVENTS_PCIE_CMD_REQ_ACK_MASK BIT(30)
+#define TB_PLUG_EVENTS_PCIE_CMD_TIMEOUT_MASK BIT(31)
+#define TB_PLUG_EVENTS_PCIE_CMD_RD_DATA 0x1d
+
+/* CP Low Power registers */
+#define TB_LOW_PWR_C1_CL1 0x1
+#define TB_LOW_PWR_C1_CL1_OBJ_MASK GENMASK(4, 1)
+#define TB_LOW_PWR_C1_CL2_OBJ_MASK GENMASK(4, 1)
+#define TB_LOW_PWR_C1_PORT_A_MASK GENMASK(2, 1)
+#define TB_LOW_PWR_C0_PORT_B_MASK GENMASK(4, 3)
+#define TB_LOW_PWR_C3_CL1 0x3
+
/* Common link controller registers */
-#define TB_LC_DESC 0x02
-#define TB_LC_DESC_NLC_MASK GENMASK(3, 0)
-#define TB_LC_DESC_SIZE_SHIFT 8
-#define TB_LC_DESC_SIZE_MASK GENMASK(15, 8)
-#define TB_LC_DESC_PORT_SIZE_SHIFT 16
-#define TB_LC_DESC_PORT_SIZE_MASK GENMASK(27, 16)
-#define TB_LC_FUSE 0x03
-#define TB_LC_SNK_ALLOCATION 0x10
-#define TB_LC_SNK_ALLOCATION_SNK0_MASK GENMASK(3, 0)
-#define TB_LC_SNK_ALLOCATION_SNK0_CM 0x1
-#define TB_LC_SNK_ALLOCATION_SNK1_SHIFT 4
-#define TB_LC_SNK_ALLOCATION_SNK1_MASK GENMASK(7, 4)
-#define TB_LC_SNK_ALLOCATION_SNK1_CM 0x1
-#define TB_LC_POWER 0x740
+#define TB_LC_DESC 0x02
+#define TB_LC_DESC_NLC_MASK GENMASK(3, 0)
+#define TB_LC_DESC_SIZE_SHIFT 8
+#define TB_LC_DESC_SIZE_MASK GENMASK(15, 8)
+#define TB_LC_DESC_PORT_SIZE_SHIFT 16
+#define TB_LC_DESC_PORT_SIZE_MASK GENMASK(27, 16)
+#define TB_LC_FUSE 0x03
+#define TB_LC_SNK_ALLOCATION 0x10
+#define TB_LC_SNK_ALLOCATION_SNK0_MASK GENMASK(3, 0)
+#define TB_LC_SNK_ALLOCATION_SNK0_CM 0x1
+#define TB_LC_SNK_ALLOCATION_SNK1_SHIFT 4
+#define TB_LC_SNK_ALLOCATION_SNK1_MASK GENMASK(7, 4)
+#define TB_LC_SNK_ALLOCATION_SNK1_CM 0x1
+#define TB_LC_POWER 0x740
/* Link controller registers */
-#define TB_LC_PORT_ATTR 0x8d
-#define TB_LC_PORT_ATTR_BE BIT(12)
-
-#define TB_LC_SX_CTRL 0x96
-#define TB_LC_SX_CTRL_WOC BIT(1)
-#define TB_LC_SX_CTRL_WOD BIT(2)
-#define TB_LC_SX_CTRL_WODPC BIT(3)
-#define TB_LC_SX_CTRL_WODPD BIT(4)
-#define TB_LC_SX_CTRL_WOU4 BIT(5)
-#define TB_LC_SX_CTRL_WOP BIT(6)
-#define TB_LC_SX_CTRL_L1C BIT(16)
-#define TB_LC_SX_CTRL_L1D BIT(17)
-#define TB_LC_SX_CTRL_L2C BIT(20)
-#define TB_LC_SX_CTRL_L2D BIT(21)
-#define TB_LC_SX_CTRL_SLI BIT(29)
-#define TB_LC_SX_CTRL_UPSTREAM BIT(30)
-#define TB_LC_SX_CTRL_SLP BIT(31)
+#define TB_LC_PORT_ATTR 0x8d
+#define TB_LC_PORT_ATTR_BE BIT(12)
+
+#define TB_LC_SX_CTRL 0x96
+#define TB_LC_SX_CTRL_WOC BIT(1)
+#define TB_LC_SX_CTRL_WOD BIT(2)
+#define TB_LC_SX_CTRL_WODPC BIT(3)
+#define TB_LC_SX_CTRL_WODPD BIT(4)
+#define TB_LC_SX_CTRL_WOU4 BIT(5)
+#define TB_LC_SX_CTRL_WOP BIT(6)
+#define TB_LC_SX_CTRL_L1C BIT(16)
+#define TB_LC_SX_CTRL_L1D BIT(17)
+#define TB_LC_SX_CTRL_L2C BIT(20)
+#define TB_LC_SX_CTRL_L2D BIT(21)
+#define TB_LC_SX_CTRL_SLI BIT(29)
+#define TB_LC_SX_CTRL_UPSTREAM BIT(30)
+#define TB_LC_SX_CTRL_SLP BIT(31)
+#define TB_LC_LINK_ATTR 0x97
+#define TB_LC_LINK_ATTR_CPS BIT(18)
#endif
diff --git a/drivers/thunderbolt/tmu.c b/drivers/thunderbolt/tmu.c
index 039c42a06000..e4a07a26f693 100644
--- a/drivers/thunderbolt/tmu.c
+++ b/drivers/thunderbolt/tmu.c
@@ -115,6 +115,11 @@ static inline int tb_port_tmu_unidirectional_disable(struct tb_port *port)
return tb_port_tmu_set_unidirectional(port, false);
}
+static inline int tb_port_tmu_unidirectional_enable(struct tb_port *port)
+{
+ return tb_port_tmu_set_unidirectional(port, true);
+}
+
static bool tb_port_tmu_is_unidirectional(struct tb_port *port)
{
int ret;
@@ -128,23 +133,46 @@ static bool tb_port_tmu_is_unidirectional(struct tb_port *port)
return val & TMU_ADP_CS_3_UDM;
}
+static int tb_port_tmu_time_sync(struct tb_port *port, bool time_sync)
+{
+ u32 val = time_sync ? TMU_ADP_CS_6_DTS : 0;
+
+ return tb_port_tmu_write(port, TMU_ADP_CS_6, TMU_ADP_CS_6_DTS, val);
+}
+
+static int tb_port_tmu_time_sync_disable(struct tb_port *port)
+{
+ return tb_port_tmu_time_sync(port, true);
+}
+
+static int tb_port_tmu_time_sync_enable(struct tb_port *port)
+{
+ return tb_port_tmu_time_sync(port, false);
+}
+
static int tb_switch_tmu_set_time_disruption(struct tb_switch *sw, bool set)
{
+ u32 val, offset, bit;
int ret;
- u32 val;
- ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
- sw->tmu.cap + TMU_RTR_CS_0, 1);
+ if (tb_switch_is_usb4(sw)) {
+ offset = sw->tmu.cap + TMU_RTR_CS_0;
+ bit = TMU_RTR_CS_0_TD;
+ } else {
+ offset = sw->cap_vsec_tmu + TB_TIME_VSEC_3_CS_26;
+ bit = TB_TIME_VSEC_3_CS_26_TD;
+ }
+
+ ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1);
if (ret)
return ret;
if (set)
- val |= TMU_RTR_CS_0_TD;
+ val |= bit;
else
- val &= ~TMU_RTR_CS_0_TD;
+ val &= ~bit;
- return tb_sw_write(sw, &val, TB_CFG_SWITCH,
- sw->tmu.cap + TMU_RTR_CS_0, 1);
+ return tb_sw_write(sw, &val, TB_CFG_SWITCH, offset, 1);
}
/**
@@ -207,7 +235,8 @@ int tb_switch_tmu_init(struct tb_switch *sw)
*/
int tb_switch_tmu_post_time(struct tb_switch *sw)
{
- unsigned int post_local_time_offset, post_time_offset;
+ unsigned int post_time_high_offset, post_time_high = 0;
+ unsigned int post_local_time_offset, post_time_offset;
struct tb_switch *root_switch = sw->tb->root_switch;
u64 hi, mid, lo, local_time, post_time;
int i, ret, retries = 100;
@@ -247,6 +276,7 @@ int tb_switch_tmu_post_time(struct tb_switch *sw)
post_local_time_offset = sw->tmu.cap + TMU_RTR_CS_22;
post_time_offset = sw->tmu.cap + TMU_RTR_CS_24;
+ post_time_high_offset = sw->tmu.cap + TMU_RTR_CS_25;
/*
* Write the Grandmaster time to the Post Local Time registers
@@ -258,17 +288,24 @@ int tb_switch_tmu_post_time(struct tb_switch *sw)
goto out;
/*
- * Have the new switch update its local time (by writing 1 to
- * the post_time registers) and wait for the completion of the
- * same (post_time register becomes 0). This means the time has
- * been converged properly.
+ * Have the new switch update its local time by:
+ * 1) writing 0x1 to the Post Time Low register and 0xffffffff to
+ * Post Time High register.
+ * 2) write 0 to Post Time High register and then wait for
+ * the completion of the post_time register becomes 0.
+ * This means the time has been converged properly.
*/
- post_time = 1;
+ post_time = 0xffffffff00000001ULL;
ret = tb_sw_write(sw, &post_time, TB_CFG_SWITCH, post_time_offset, 2);
if (ret)
goto out;
+ ret = tb_sw_write(sw, &post_time_high, TB_CFG_SWITCH,
+ post_time_high_offset, 1);
+ if (ret)
+ goto out;
+
do {
usleep_range(5, 10);
ret = tb_sw_read(sw, &post_time, TB_CFG_SWITCH,
@@ -297,30 +334,54 @@ out:
*/
int tb_switch_tmu_disable(struct tb_switch *sw)
{
- int ret;
-
- if (!tb_switch_is_usb4(sw))
+ /*
+ * No need to disable TMU on devices that don't support CLx since
+ * on these devices e.g. Alpine Ridge and earlier, the TMU mode
+ * HiFi bi-directional is enabled by default and we don't change it.
+ */
+ if (!tb_switch_is_clx_supported(sw))
return 0;
/* Already disabled? */
if (sw->tmu.rate == TB_SWITCH_TMU_RATE_OFF)
return 0;
- if (sw->tmu.unidirectional) {
+
+ if (tb_route(sw)) {
+ bool unidirectional = tb_switch_tmu_hifi_is_enabled(sw, true);
struct tb_switch *parent = tb_switch_parent(sw);
- struct tb_port *up, *down;
+ struct tb_port *down, *up;
+ int ret;
- up = tb_upstream_port(sw);
down = tb_port_at(tb_route(sw), parent);
-
- /* The switch may be unplugged so ignore any errors */
- tb_port_tmu_unidirectional_disable(up);
- ret = tb_port_tmu_unidirectional_disable(down);
+ up = tb_upstream_port(sw);
+ /*
+ * In case of uni-directional time sync, TMU handshake is
+ * initiated by upstream router. In case of bi-directional
+ * time sync, TMU handshake is initiated by downstream router.
+ * Therefore, we change the rate to off in the respective
+ * router.
+ */
+ if (unidirectional)
+ tb_switch_tmu_rate_write(parent, TB_SWITCH_TMU_RATE_OFF);
+ else
+ tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_OFF);
+
+ tb_port_tmu_time_sync_disable(up);
+ ret = tb_port_tmu_time_sync_disable(down);
if (ret)
return ret;
- }
- tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_OFF);
+ if (unidirectional) {
+ /* The switch may be unplugged so ignore any errors */
+ tb_port_tmu_unidirectional_disable(up);
+ ret = tb_port_tmu_unidirectional_disable(down);
+ if (ret)
+ return ret;
+ }
+ } else {
+ tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_OFF);
+ }
sw->tmu.unidirectional = false;
sw->tmu.rate = TB_SWITCH_TMU_RATE_OFF;
@@ -329,55 +390,231 @@ int tb_switch_tmu_disable(struct tb_switch *sw)
return 0;
}
-/**
- * tb_switch_tmu_enable() - Enable TMU on a switch
- * @sw: Switch whose TMU to enable
- *
- * Enables TMU of a switch to be in bi-directional, HiFi mode. In this mode
- * all tunneling should work.
+static void __tb_switch_tmu_off(struct tb_switch *sw, bool unidirectional)
+{
+ struct tb_switch *parent = tb_switch_parent(sw);
+ struct tb_port *down, *up;
+
+ down = tb_port_at(tb_route(sw), parent);
+ up = tb_upstream_port(sw);
+ /*
+ * In case of any failure in one of the steps when setting
+ * bi-directional or uni-directional TMU mode, get back to the TMU
+ * configurations in off mode. In case of additional failures in
+ * the functions below, ignore them since the caller shall already
+ * report a failure.
+ */
+ tb_port_tmu_time_sync_disable(down);
+ tb_port_tmu_time_sync_disable(up);
+ if (unidirectional)
+ tb_switch_tmu_rate_write(parent, TB_SWITCH_TMU_RATE_OFF);
+ else
+ tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_OFF);
+
+ tb_port_tmu_unidirectional_disable(down);
+ tb_port_tmu_unidirectional_disable(up);
+}
+
+/*
+ * This function is called when the previous TMU mode was
+ * TB_SWITCH_TMU_RATE_OFF.
*/
-int tb_switch_tmu_enable(struct tb_switch *sw)
+static int __tb_switch_tmu_enable_bidirectional(struct tb_switch *sw)
{
+ struct tb_switch *parent = tb_switch_parent(sw);
+ struct tb_port *up, *down;
int ret;
- if (!tb_switch_is_usb4(sw))
- return 0;
+ up = tb_upstream_port(sw);
+ down = tb_port_at(tb_route(sw), parent);
- if (tb_switch_tmu_is_enabled(sw))
- return 0;
+ ret = tb_port_tmu_unidirectional_disable(up);
+ if (ret)
+ return ret;
- ret = tb_switch_tmu_set_time_disruption(sw, true);
+ ret = tb_port_tmu_unidirectional_disable(down);
+ if (ret)
+ goto out;
+
+ ret = tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_HIFI);
+ if (ret)
+ goto out;
+
+ ret = tb_port_tmu_time_sync_enable(up);
+ if (ret)
+ goto out;
+
+ ret = tb_port_tmu_time_sync_enable(down);
+ if (ret)
+ goto out;
+
+ return 0;
+
+out:
+ __tb_switch_tmu_off(sw, false);
+ return ret;
+}
+
+static int tb_switch_tmu_objection_mask(struct tb_switch *sw)
+{
+ u32 val;
+ int ret;
+
+ ret = tb_sw_read(sw, &val, TB_CFG_SWITCH,
+ sw->cap_vsec_tmu + TB_TIME_VSEC_3_CS_9, 1);
if (ret)
return ret;
- /* Change mode to bi-directional */
- if (tb_route(sw) && sw->tmu.unidirectional) {
- struct tb_switch *parent = tb_switch_parent(sw);
- struct tb_port *up, *down;
+ val &= ~TB_TIME_VSEC_3_CS_9_TMU_OBJ_MASK;
- up = tb_upstream_port(sw);
- down = tb_port_at(tb_route(sw), parent);
+ return tb_sw_write(sw, &val, TB_CFG_SWITCH,
+ sw->cap_vsec_tmu + TB_TIME_VSEC_3_CS_9, 1);
+}
- ret = tb_port_tmu_unidirectional_disable(down);
- if (ret)
- return ret;
+static int tb_switch_tmu_unidirectional_enable(struct tb_switch *sw)
+{
+ struct tb_port *up = tb_upstream_port(sw);
- ret = tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_HIFI);
+ return tb_port_tmu_write(up, TMU_ADP_CS_6,
+ TMU_ADP_CS_6_DISABLE_TMU_OBJ_MASK,
+ TMU_ADP_CS_6_DISABLE_TMU_OBJ_MASK);
+}
+
+/*
+ * This function is called when the previous TMU mode was
+ * TB_SWITCH_TMU_RATE_OFF.
+ */
+static int __tb_switch_tmu_enable_unidirectional(struct tb_switch *sw)
+{
+ struct tb_switch *parent = tb_switch_parent(sw);
+ struct tb_port *up, *down;
+ int ret;
+
+ up = tb_upstream_port(sw);
+ down = tb_port_at(tb_route(sw), parent);
+ ret = tb_switch_tmu_rate_write(parent, TB_SWITCH_TMU_RATE_HIFI);
+ if (ret)
+ return ret;
+
+ ret = tb_port_tmu_unidirectional_enable(up);
+ if (ret)
+ goto out;
+
+ ret = tb_port_tmu_time_sync_enable(up);
+ if (ret)
+ goto out;
+
+ ret = tb_port_tmu_unidirectional_enable(down);
+ if (ret)
+ goto out;
+
+ ret = tb_port_tmu_time_sync_enable(down);
+ if (ret)
+ goto out;
+
+ return 0;
+
+out:
+ __tb_switch_tmu_off(sw, true);
+ return ret;
+}
+
+static int tb_switch_tmu_hifi_enable(struct tb_switch *sw)
+{
+ bool unidirectional = sw->tmu.unidirectional_request;
+ int ret;
+
+ if (unidirectional && !sw->tmu.has_ucap)
+ return -EOPNOTSUPP;
+
+ /*
+ * No need to enable TMU on devices that don't support CLx since on
+ * these devices e.g. Alpine Ridge and earlier, the TMU mode HiFi
+ * bi-directional is enabled by default.
+ */
+ if (!tb_switch_is_clx_supported(sw))
+ return 0;
+
+ if (tb_switch_tmu_hifi_is_enabled(sw, sw->tmu.unidirectional_request))
+ return 0;
+
+ if (tb_switch_is_titan_ridge(sw) && unidirectional) {
+ /* Titan Ridge supports only CL0s */
+ if (!tb_switch_is_cl0s_enabled(sw))
+ return -EOPNOTSUPP;
+
+ ret = tb_switch_tmu_objection_mask(sw);
if (ret)
return ret;
- ret = tb_port_tmu_unidirectional_disable(up);
+ ret = tb_switch_tmu_unidirectional_enable(sw);
if (ret)
return ret;
+ }
+
+ ret = tb_switch_tmu_set_time_disruption(sw, true);
+ if (ret)
+ return ret;
+
+ if (tb_route(sw)) {
+ /* The used mode changes are from OFF to HiFi-Uni/HiFi-BiDir */
+ if (sw->tmu.rate == TB_SWITCH_TMU_RATE_OFF) {
+ if (unidirectional)
+ ret = __tb_switch_tmu_enable_unidirectional(sw);
+ else
+ ret = __tb_switch_tmu_enable_bidirectional(sw);
+ if (ret)
+ return ret;
+ }
+ sw->tmu.unidirectional = unidirectional;
} else {
+ /*
+ * Host router port configurations are written as
+ * part of configurations for downstream port of the parent
+ * of the child node - see above.
+ * Here only the host router' rate configuration is written.
+ */
ret = tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_HIFI);
if (ret)
return ret;
}
- sw->tmu.unidirectional = false;
sw->tmu.rate = TB_SWITCH_TMU_RATE_HIFI;
- tb_sw_dbg(sw, "TMU: mode set to: %s\n", tb_switch_tmu_mode_name(sw));
+ tb_sw_dbg(sw, "TMU: mode set to: %s\n", tb_switch_tmu_mode_name(sw));
return tb_switch_tmu_set_time_disruption(sw, false);
}
+
+/**
+ * tb_switch_tmu_enable() - Enable TMU on a router
+ * @sw: Router whose TMU to enable
+ *
+ * Enables TMU of a router to be in uni-directional or bi-directional HiFi mode.
+ * Calling tb_switch_tmu_configure() is required before calling this function,
+ * to select the mode HiFi and directionality (uni-directional/bi-directional).
+ * In both modes all tunneling should work. Uni-directional mode is required for
+ * CLx (Link Low-Power) to work.
+ */
+int tb_switch_tmu_enable(struct tb_switch *sw)
+{
+ if (sw->tmu.rate_request == TB_SWITCH_TMU_RATE_NORMAL)
+ return -EOPNOTSUPP;
+
+ return tb_switch_tmu_hifi_enable(sw);
+}
+
+/**
+ * tb_switch_tmu_configure() - Configure the TMU rate and directionality
+ * @sw: Router whose mode to change
+ * @rate: Rate to configure Off/LowRes/HiFi
+ * @unidirectional: If uni-directional (bi-directional otherwise)
+ *
+ * Selects the rate of the TMU and directionality (uni-directional or
+ * bi-directional). Must be called before tb_switch_tmu_enable().
+ */
+void tb_switch_tmu_configure(struct tb_switch *sw,
+ enum tb_switch_tmu_rate rate, bool unidirectional)
+{
+ sw->tmu.unidirectional_request = unidirectional;
+ sw->tmu.rate_request = rate;
+}
diff --git a/drivers/thunderbolt/tunnel.c b/drivers/thunderbolt/tunnel.c
index bb5cc480fc9a..a473cc7d9a8d 100644
--- a/drivers/thunderbolt/tunnel.c
+++ b/drivers/thunderbolt/tunnel.c
@@ -207,12 +207,14 @@ static int tb_pci_init_path(struct tb_path *path)
* tb_tunnel_discover_pci() - Discover existing PCIe tunnels
* @tb: Pointer to the domain structure
* @down: PCIe downstream adapter
+ * @alloc_hopid: Allocate HopIDs from visited ports
*
* If @down adapter is active, follows the tunnel to the PCIe upstream
* adapter and back. Returns the discovered tunnel or %NULL if there was
* no tunnel.
*/
-struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down)
+struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down,
+ bool alloc_hopid)
{
struct tb_tunnel *tunnel;
struct tb_path *path;
@@ -233,7 +235,7 @@ struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down)
* case.
*/
path = tb_path_discover(down, TB_PCI_HOPID, NULL, -1,
- &tunnel->dst_port, "PCIe Up");
+ &tunnel->dst_port, "PCIe Up", alloc_hopid);
if (!path) {
/* Just disable the downstream port */
tb_pci_port_enable(down, false);
@@ -244,7 +246,7 @@ struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down)
goto err_free;
path = tb_path_discover(tunnel->dst_port, -1, down, TB_PCI_HOPID, NULL,
- "PCIe Down");
+ "PCIe Down", alloc_hopid);
if (!path)
goto err_deactivate;
tunnel->paths[TB_PCI_PATH_DOWN] = path;
@@ -761,6 +763,7 @@ static int tb_dp_init_video_path(struct tb_path *path)
* tb_tunnel_discover_dp() - Discover existing Display Port tunnels
* @tb: Pointer to the domain structure
* @in: DP in adapter
+ * @alloc_hopid: Allocate HopIDs from visited ports
*
* If @in adapter is active, follows the tunnel to the DP out adapter
* and back. Returns the discovered tunnel or %NULL if there was no
@@ -768,7 +771,8 @@ static int tb_dp_init_video_path(struct tb_path *path)
*
* Return: DP tunnel or %NULL if no tunnel found.
*/
-struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in)
+struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in,
+ bool alloc_hopid)
{
struct tb_tunnel *tunnel;
struct tb_port *port;
@@ -787,7 +791,7 @@ struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in)
tunnel->src_port = in;
path = tb_path_discover(in, TB_DP_VIDEO_HOPID, NULL, -1,
- &tunnel->dst_port, "Video");
+ &tunnel->dst_port, "Video", alloc_hopid);
if (!path) {
/* Just disable the DP IN port */
tb_dp_port_enable(in, false);
@@ -797,14 +801,15 @@ struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in)
if (tb_dp_init_video_path(tunnel->paths[TB_DP_VIDEO_PATH_OUT]))
goto err_free;
- path = tb_path_discover(in, TB_DP_AUX_TX_HOPID, NULL, -1, NULL, "AUX TX");
+ path = tb_path_discover(in, TB_DP_AUX_TX_HOPID, NULL, -1, NULL, "AUX TX",
+ alloc_hopid);
if (!path)
goto err_deactivate;
tunnel->paths[TB_DP_AUX_PATH_OUT] = path;
tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_OUT]);
path = tb_path_discover(tunnel->dst_port, -1, in, TB_DP_AUX_RX_HOPID,
- &port, "AUX RX");
+ &port, "AUX RX", alloc_hopid);
if (!path)
goto err_deactivate;
tunnel->paths[TB_DP_AUX_PATH_IN] = path;
@@ -1343,12 +1348,14 @@ static void tb_usb3_init_path(struct tb_path *path)
* tb_tunnel_discover_usb3() - Discover existing USB3 tunnels
* @tb: Pointer to the domain structure
* @down: USB3 downstream adapter
+ * @alloc_hopid: Allocate HopIDs from visited ports
*
* If @down adapter is active, follows the tunnel to the USB3 upstream
* adapter and back. Returns the discovered tunnel or %NULL if there was
* no tunnel.
*/
-struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down)
+struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down,
+ bool alloc_hopid)
{
struct tb_tunnel *tunnel;
struct tb_path *path;
@@ -1369,7 +1376,7 @@ struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down)
* case.
*/
path = tb_path_discover(down, TB_USB3_HOPID, NULL, -1,
- &tunnel->dst_port, "USB3 Down");
+ &tunnel->dst_port, "USB3 Down", alloc_hopid);
if (!path) {
/* Just disable the downstream port */
tb_usb3_port_enable(down, false);
@@ -1379,7 +1386,7 @@ struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down)
tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_DOWN]);
path = tb_path_discover(tunnel->dst_port, -1, down, TB_USB3_HOPID, NULL,
- "USB3 Up");
+ "USB3 Up", alloc_hopid);
if (!path)
goto err_deactivate;
tunnel->paths[TB_USB3_PATH_UP] = path;
diff --git a/drivers/thunderbolt/tunnel.h b/drivers/thunderbolt/tunnel.h
index eea14e24f7e0..03e56076b5bc 100644
--- a/drivers/thunderbolt/tunnel.h
+++ b/drivers/thunderbolt/tunnel.h
@@ -64,10 +64,12 @@ struct tb_tunnel {
int allocated_down;
};
-struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down);
+struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down,
+ bool alloc_hopid);
struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up,
struct tb_port *down);
-struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in);
+struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in,
+ bool alloc_hopid);
struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in,
struct tb_port *out, int max_up,
int max_down);
@@ -77,7 +79,8 @@ struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi,
int receive_ring);
bool tb_tunnel_match_dma(const struct tb_tunnel *tunnel, int transmit_path,
int transmit_ring, int receive_path, int receive_ring);
-struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down);
+struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down,
+ bool alloc_hopid);
struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up,
struct tb_port *down, int max_up,
int max_down);
diff --git a/drivers/thunderbolt/usb4.c b/drivers/thunderbolt/usb4.c
index ceddbe7e9f93..3a2e7126db9d 100644
--- a/drivers/thunderbolt/usb4.c
+++ b/drivers/thunderbolt/usb4.c
@@ -50,28 +50,6 @@ enum usb4_ba_index {
#define USB4_BA_VALUE_MASK GENMASK(31, 16)
#define USB4_BA_VALUE_SHIFT 16
-static int usb4_switch_wait_for_bit(struct tb_switch *sw, u32 offset, u32 bit,
- u32 value, int timeout_msec)
-{
- ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec);
-
- do {
- u32 val;
- int ret;
-
- ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1);
- if (ret)
- return ret;
-
- if ((val & bit) == value)
- return 0;
-
- usleep_range(50, 100);
- } while (ktime_before(ktime_get(), timeout));
-
- return -ETIMEDOUT;
-}
-
static int usb4_native_switch_op(struct tb_switch *sw, u16 opcode,
u32 *metadata, u8 *status,
const void *tx_data, size_t tx_dwords,
@@ -97,7 +75,7 @@ static int usb4_native_switch_op(struct tb_switch *sw, u16 opcode,
if (ret)
return ret;
- ret = usb4_switch_wait_for_bit(sw, ROUTER_CS_26, ROUTER_CS_26_OV, 0, 500);
+ ret = tb_switch_wait_for_bit(sw, ROUTER_CS_26, ROUTER_CS_26_OV, 0, 500);
if (ret)
return ret;
@@ -303,8 +281,8 @@ int usb4_switch_setup(struct tb_switch *sw)
if (ret)
return ret;
- return usb4_switch_wait_for_bit(sw, ROUTER_CS_6, ROUTER_CS_6_CR,
- ROUTER_CS_6_CR, 50);
+ return tb_switch_wait_for_bit(sw, ROUTER_CS_6, ROUTER_CS_6_CR,
+ ROUTER_CS_6_CR, 50);
}
/**
@@ -480,8 +458,8 @@ int usb4_switch_set_sleep(struct tb_switch *sw)
if (ret)
return ret;
- return usb4_switch_wait_for_bit(sw, ROUTER_CS_6, ROUTER_CS_6_SLPR,
- ROUTER_CS_6_SLPR, 500);
+ return tb_switch_wait_for_bit(sw, ROUTER_CS_6, ROUTER_CS_6_SLPR,
+ ROUTER_CS_6_SLPR, 500);
}
/**
@@ -1386,6 +1364,26 @@ int usb4_port_enumerate_retimers(struct tb_port *port)
USB4_SB_OPCODE, &val, sizeof(val));
}
+/**
+ * usb4_port_clx_supported() - Check if CLx is supported by the link
+ * @port: Port to check for CLx support for
+ *
+ * PORT_CS_18_CPS bit reflects if the link supports CLx including
+ * active cables (if connected on the link).
+ */
+bool usb4_port_clx_supported(struct tb_port *port)
+{
+ int ret;
+ u32 val;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_usb4 + PORT_CS_18, 1);
+ if (ret)
+ return false;
+
+ return !!(val & PORT_CS_18_CPS);
+}
+
static inline int usb4_port_retimer_op(struct tb_port *port, u8 index,
enum usb4_sb_opcode opcode,
int timeout_msec)
diff --git a/drivers/thunderbolt/xdomain.c b/drivers/thunderbolt/xdomain.c
index eff32499610f..01d6b724ca51 100644
--- a/drivers/thunderbolt/xdomain.c
+++ b/drivers/thunderbolt/xdomain.c
@@ -214,16 +214,12 @@ static inline void tb_xdp_fill_header(struct tb_xdp_header *hdr, u64 route,
memcpy(&hdr->uuid, &tb_xdp_uuid, sizeof(tb_xdp_uuid));
}
-static int tb_xdp_handle_error(const struct tb_xdp_header *hdr)
+static int tb_xdp_handle_error(const struct tb_xdp_error_response *res)
{
- const struct tb_xdp_error_response *error;
-
- if (hdr->type != ERROR_RESPONSE)
+ if (res->hdr.type != ERROR_RESPONSE)
return 0;
- error = (const struct tb_xdp_error_response *)hdr;
-
- switch (error->error) {
+ switch (res->error) {
case ERROR_UNKNOWN_PACKET:
case ERROR_UNKNOWN_DOMAIN:
return -EIO;
@@ -257,7 +253,7 @@ static int tb_xdp_uuid_request(struct tb_ctl *ctl, u64 route, int retry,
if (ret)
return ret;
- ret = tb_xdp_handle_error(&res.hdr);
+ ret = tb_xdp_handle_error(&res.err);
if (ret)
return ret;
@@ -329,7 +325,7 @@ static int tb_xdp_properties_request(struct tb_ctl *ctl, u64 route,
if (ret)
goto err;
- ret = tb_xdp_handle_error(&res->hdr);
+ ret = tb_xdp_handle_error(&res->err);
if (ret)
goto err;
@@ -462,7 +458,7 @@ static int tb_xdp_properties_changed_request(struct tb_ctl *ctl, u64 route,
if (ret)
return ret;
- return tb_xdp_handle_error(&res.hdr);
+ return tb_xdp_handle_error(&res.err);
}
static int
diff --git a/drivers/tty/goldfish.c b/drivers/tty/goldfish.c
index d24af649a8bb..5ed19a9857ad 100644
--- a/drivers/tty/goldfish.c
+++ b/drivers/tty/goldfish.c
@@ -151,7 +151,7 @@ static irqreturn_t goldfish_tty_interrupt(int irq, void *dev_id)
address = (unsigned long)(void *)buf;
goldfish_tty_rw(qtty, address, count, 0);
- tty_schedule_flip(&qtty->port);
+ tty_flip_buffer_push(&qtty->port);
return IRQ_HANDLED;
}
@@ -298,7 +298,7 @@ static int goldfish_tty_probe(struct platform_device *pdev)
struct resource *r;
struct device *ttydev;
void __iomem *base;
- u32 irq;
+ int irq;
unsigned int line;
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -313,14 +313,12 @@ static int goldfish_tty_probe(struct platform_device *pdev)
return -ENOMEM;
}
- r = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!r) {
- pr_err("goldfish_tty: No IRQ resource available!\n");
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ ret = irq;
goto err_unmap;
}
- irq = r->start;
-
mutex_lock(&goldfish_tty_lock);
if (pdev->id == PLATFORM_DEVID_NONE)
diff --git a/drivers/tty/mips_ejtag_fdc.c b/drivers/tty/mips_ejtag_fdc.c
index 02c10a968de1..31dceb5039b5 100644
--- a/drivers/tty/mips_ejtag_fdc.c
+++ b/drivers/tty/mips_ejtag_fdc.c
@@ -955,19 +955,18 @@ static int mips_ejtag_fdc_tty_probe(struct mips_cdmm_device *dev)
mips_ejtag_fdc_con.tty_drv = driver;
init_waitqueue_head(&priv->waitqueue);
- priv->thread = kthread_create(mips_ejtag_fdc_put, priv, priv->fdc_name);
- if (IS_ERR(priv->thread)) {
- ret = PTR_ERR(priv->thread);
- dev_err(priv->dev, "Couldn't create kthread (%d)\n", ret);
- goto err_destroy_ports;
- }
/*
* Bind the writer thread to the right CPU so it can't migrate.
* The channels are per-CPU and we want all channel I/O to be on a
* single predictable CPU.
*/
- kthread_bind(priv->thread, dev->cpu);
- wake_up_process(priv->thread);
+ priv->thread = kthread_run_on_cpu(mips_ejtag_fdc_put, priv,
+ dev->cpu, "ttyFDC/%u");
+ if (IS_ERR(priv->thread)) {
+ ret = PTR_ERR(priv->thread);
+ dev_err(priv->dev, "Couldn't create kthread (%d)\n", ret);
+ goto err_destroy_ports;
+ }
/* Look for an FDC IRQ */
priv->irq = get_c0_fdc_int();
@@ -1095,15 +1094,14 @@ static int mips_ejtag_fdc_tty_cpu_up(struct mips_cdmm_device *dev)
}
/* Restart the kthread */
- priv->thread = kthread_create(mips_ejtag_fdc_put, priv, priv->fdc_name);
+ /* Bind it back to the right CPU and set it off */
+ priv->thread = kthread_run_on_cpu(mips_ejtag_fdc_put, priv,
+ dev->cpu, "ttyFDC/%u");
if (IS_ERR(priv->thread)) {
ret = PTR_ERR(priv->thread);
dev_err(priv->dev, "Couldn't re-create kthread (%d)\n", ret);
goto out;
}
- /* Bind it back to the right CPU and set it off */
- kthread_bind(priv->thread, dev->cpu);
- wake_up_process(priv->thread);
out:
return ret;
}
diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c
index e37683e25055..f3c72ab1476c 100644
--- a/drivers/tty/moxa.c
+++ b/drivers/tty/moxa.c
@@ -1683,7 +1683,7 @@ static int moxa_poll_port(struct moxa_port *p, unsigned int handle,
if (inited && !tty_throttled(tty) &&
MoxaPortRxQueue(p) > 0) { /* RX */
MoxaPortReadData(p);
- tty_schedule_flip(&p->port);
+ tty_flip_buffer_push(&p->port);
}
} else {
clear_bit(EMPTYWAIT, &p->statusflags);
@@ -1708,7 +1708,7 @@ static int moxa_poll_port(struct moxa_port *p, unsigned int handle,
if (tty && (intr & IntrBreak) && !I_IGNBRK(tty)) { /* BREAK */
tty_insert_flip_char(&p->port, 0, TTY_BREAK);
- tty_schedule_flip(&p->port);
+ tty_flip_buffer_push(&p->port);
}
if (intr & IntrLine)
diff --git a/drivers/tty/mxser.c b/drivers/tty/mxser.c
index 93a95a135a71..c858aff721c4 100644
--- a/drivers/tty/mxser.c
+++ b/drivers/tty/mxser.c
@@ -159,14 +159,32 @@
#define MXSER_BAUD_BASE 921600
#define MXSER_CUSTOM_DIVISOR (MXSER_BAUD_BASE * 16)
-#define PCI_DEVICE_ID_POS104UL 0x1044
-#define PCI_DEVICE_ID_CB108 0x1080
-#define PCI_DEVICE_ID_CP102UF 0x1023
-#define PCI_DEVICE_ID_CP112UL 0x1120
-#define PCI_DEVICE_ID_CB114 0x1142
-#define PCI_DEVICE_ID_CP114UL 0x1143
-#define PCI_DEVICE_ID_CB134I 0x1341
-#define PCI_DEVICE_ID_CP138U 0x1380
+#define PCI_DEVICE_ID_MOXA_RC7000 0x0001
+#define PCI_DEVICE_ID_MOXA_CP102 0x1020
+#define PCI_DEVICE_ID_MOXA_CP102UL 0x1021
+#define PCI_DEVICE_ID_MOXA_CP102U 0x1022
+#define PCI_DEVICE_ID_MOXA_CP102UF 0x1023
+#define PCI_DEVICE_ID_MOXA_C104 0x1040
+#define PCI_DEVICE_ID_MOXA_CP104U 0x1041
+#define PCI_DEVICE_ID_MOXA_CP104JU 0x1042
+#define PCI_DEVICE_ID_MOXA_CP104EL 0x1043
+#define PCI_DEVICE_ID_MOXA_POS104UL 0x1044
+#define PCI_DEVICE_ID_MOXA_CB108 0x1080
+#define PCI_DEVICE_ID_MOXA_CP112UL 0x1120
+#define PCI_DEVICE_ID_MOXA_CT114 0x1140
+#define PCI_DEVICE_ID_MOXA_CP114 0x1141
+#define PCI_DEVICE_ID_MOXA_CB114 0x1142
+#define PCI_DEVICE_ID_MOXA_CP114UL 0x1143
+#define PCI_DEVICE_ID_MOXA_CP118U 0x1180
+#define PCI_DEVICE_ID_MOXA_CP118EL 0x1181
+#define PCI_DEVICE_ID_MOXA_CP132 0x1320
+#define PCI_DEVICE_ID_MOXA_CP132U 0x1321
+#define PCI_DEVICE_ID_MOXA_CP134U 0x1340
+#define PCI_DEVICE_ID_MOXA_CB134I 0x1341
+#define PCI_DEVICE_ID_MOXA_CP138U 0x1380
+#define PCI_DEVICE_ID_MOXA_C168 0x1680
+#define PCI_DEVICE_ID_MOXA_CP168U 0x1681
+#define PCI_DEVICE_ID_MOXA_CP168EL 0x1682
#define MXSER_NPORTS(ddata) ((ddata) & 0xffU)
#define MXSER_HIGHBAUD 0x0100
@@ -194,32 +212,32 @@ static const struct {
/* driver_data correspond to the lines in the structure above
see also ISA probe function before you change something */
static const struct pci_device_id mxser_pcibrds[] = {
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_C168), .driver_data = 8 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_C104), .driver_data = 4 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP132), .driver_data = 2 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP114), .driver_data = 4 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CT114), .driver_data = 4 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP102), .driver_data = 2 | MXSER_HIGHBAUD },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP104U), .driver_data = 4 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP168U), .driver_data = 8 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP132U), .driver_data = 2 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP134U), .driver_data = 4 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP104JU),.driver_data = 4 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_RC7000), .driver_data = 8 }, /* RC7000 */
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP118U), .driver_data = 8 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP102UL),.driver_data = 2 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP102U), .driver_data = 2 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP118EL),.driver_data = 8 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP168EL),.driver_data = 8 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP104EL),.driver_data = 4 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_CB108), .driver_data = 8 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_CB114), .driver_data = 4 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_CB134I), .driver_data = 4 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_CP138U), .driver_data = 8 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_POS104UL), .driver_data = 4 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_CP114UL), .driver_data = 4 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_CP102UF), .driver_data = 2 },
- { PCI_VDEVICE(MOXA, PCI_DEVICE_ID_CP112UL), .driver_data = 2 },
+ { PCI_DEVICE_DATA(MOXA, C168, 8) },
+ { PCI_DEVICE_DATA(MOXA, C104, 4) },
+ { PCI_DEVICE_DATA(MOXA, CP132, 2) },
+ { PCI_DEVICE_DATA(MOXA, CP114, 4) },
+ { PCI_DEVICE_DATA(MOXA, CT114, 4) },
+ { PCI_DEVICE_DATA(MOXA, CP102, 2 | MXSER_HIGHBAUD) },
+ { PCI_DEVICE_DATA(MOXA, CP104U, 4) },
+ { PCI_DEVICE_DATA(MOXA, CP168U, 8) },
+ { PCI_DEVICE_DATA(MOXA, CP132U, 2) },
+ { PCI_DEVICE_DATA(MOXA, CP134U, 4) },
+ { PCI_DEVICE_DATA(MOXA, CP104JU, 4) },
+ { PCI_DEVICE_DATA(MOXA, RC7000, 8) }, /* RC7000 */
+ { PCI_DEVICE_DATA(MOXA, CP118U, 8) },
+ { PCI_DEVICE_DATA(MOXA, CP102UL, 2) },
+ { PCI_DEVICE_DATA(MOXA, CP102U, 2) },
+ { PCI_DEVICE_DATA(MOXA, CP118EL, 8) },
+ { PCI_DEVICE_DATA(MOXA, CP168EL, 8) },
+ { PCI_DEVICE_DATA(MOXA, CP104EL, 4) },
+ { PCI_DEVICE_DATA(MOXA, CB108, 8) },
+ { PCI_DEVICE_DATA(MOXA, CB114, 4) },
+ { PCI_DEVICE_DATA(MOXA, CB134I, 4) },
+ { PCI_DEVICE_DATA(MOXA, CP138U, 8) },
+ { PCI_DEVICE_DATA(MOXA, POS104UL, 4) },
+ { PCI_DEVICE_DATA(MOXA, CP114UL, 4) },
+ { PCI_DEVICE_DATA(MOXA, CP102UF, 2) },
+ { PCI_DEVICE_DATA(MOXA, CP112UL, 2) },
{ }
};
MODULE_DEVICE_TABLE(pci, mxser_pcibrds);
@@ -251,8 +269,6 @@ struct mxser_port {
u8 MCR; /* Modem control register */
u8 FCR; /* FIFO control register */
- bool ldisc_stop_rx;
-
struct async_icount icount; /* kernel counters for 4 input interrupts */
unsigned int timeout;
@@ -262,7 +278,6 @@ struct mxser_port {
unsigned int xmit_head;
unsigned int xmit_tail;
unsigned int xmit_cnt;
- int closing;
spinlock_t slock;
};
@@ -684,27 +699,34 @@ static void mxser_change_speed(struct tty_struct *tty, struct ktermios *old_term
outb(cval, info->ioaddr + UART_LCR);
}
-static void mxser_check_modem_status(struct tty_struct *tty,
- struct mxser_port *port, int status)
+static u8 mxser_check_modem_status(struct tty_struct *tty,
+ struct mxser_port *port)
{
+ u8 msr = inb(port->ioaddr + UART_MSR);
+
+ if (!(msr & UART_MSR_ANY_DELTA))
+ return msr;
+
/* update input line counters */
- if (status & UART_MSR_TERI)
+ if (msr & UART_MSR_TERI)
port->icount.rng++;
- if (status & UART_MSR_DDSR)
+ if (msr & UART_MSR_DDSR)
port->icount.dsr++;
- if (status & UART_MSR_DDCD)
+ if (msr & UART_MSR_DDCD)
port->icount.dcd++;
- if (status & UART_MSR_DCTS)
+ if (msr & UART_MSR_DCTS)
port->icount.cts++;
wake_up_interruptible(&port->port.delta_msr_wait);
- if (tty_port_check_carrier(&port->port) && (status & UART_MSR_DDCD)) {
- if (status & UART_MSR_DCD)
+ if (tty_port_check_carrier(&port->port) && (msr & UART_MSR_DDCD)) {
+ if (msr & UART_MSR_DCD)
wake_up_interruptible(&port->port.open_wait);
}
if (tty_port_cts_enabled(&port->port))
- mxser_handle_cts(tty, port, status);
+ mxser_handle_cts(tty, port, msr);
+
+ return msr;
}
static void mxser_disable_and_clear_FIFO(struct mxser_port *info)
@@ -802,6 +824,20 @@ static int mxser_activate(struct tty_port *port, struct tty_struct *tty)
}
/*
+ * To stop accepting input, we disable the receive line status interrupts, and
+ * tell the interrupt driver to stop checking the data ready bit in the line
+ * status register.
+ */
+static void mxser_stop_rx(struct mxser_port *info)
+{
+ info->IER &= ~UART_IER_RLSI;
+ if (info->board->must_hwid)
+ info->IER &= ~MOXA_MUST_RECV_ISR;
+
+ outb(info->IER, info->ioaddr + UART_IER);
+}
+
+/*
* This routine will shutdown a serial port
*/
static void mxser_shutdown_port(struct tty_port *port)
@@ -811,6 +847,8 @@ static void mxser_shutdown_port(struct tty_port *port)
spin_lock_irqsave(&info->slock, flags);
+ mxser_stop_rx(info);
+
/*
* clear delta_msr_wait queue to avoid mem leaks: we may free the irq
* here so the queue might never be waken up
@@ -874,64 +912,9 @@ static void mxser_flush_buffer(struct tty_struct *tty)
tty_wakeup(tty);
}
-
-static void mxser_close_port(struct tty_port *port)
-{
- struct mxser_port *info = container_of(port, struct mxser_port, port);
- unsigned long timeout;
- /*
- * At this point we stop accepting input. To do this, we
- * disable the receive line status interrupts, and tell the
- * interrupt driver to stop checking the data ready bit in the
- * line status register.
- */
- info->IER &= ~UART_IER_RLSI;
- if (info->board->must_hwid)
- info->IER &= ~MOXA_MUST_RECV_ISR;
-
- outb(info->IER, info->ioaddr + UART_IER);
- /*
- * Before we drop DTR, make sure the UART transmitter
- * has completely drained; this is especially
- * important if there is a transmit FIFO!
- */
- timeout = jiffies + HZ;
- while (!(inb(info->ioaddr + UART_LSR) & UART_LSR_TEMT)) {
- schedule_timeout_interruptible(5);
- if (time_after(jiffies, timeout))
- break;
- }
-}
-
-/*
- * This routine is called when the serial port gets closed. First, we
- * wait for the last remaining data to be sent. Then, we unlink its
- * async structure from the interrupt chain if necessary, and we free
- * that IRQ if nothing is left in the chain.
- */
static void mxser_close(struct tty_struct *tty, struct file *filp)
{
- struct mxser_port *info = tty->driver_data;
- struct tty_port *port = &info->port;
-
- if (info == NULL)
- return;
- if (tty_port_close_start(port, tty, filp) == 0)
- return;
- info->closing = 1;
- mutex_lock(&port->mutex);
- mxser_close_port(port);
- mxser_flush_buffer(tty);
- if (tty_port_initialized(port) && C_HUPCL(tty))
- tty_port_lower_dtr_rts(port);
- mxser_shutdown_port(port);
- tty_port_set_initialized(port, 0);
- mutex_unlock(&port->mutex);
- info->closing = 0;
- /* Right now the tty_port set is done outside of the close_end helper
- as we don't yet have everyone using refcounts */
- tty_port_close_end(port, tty);
- tty_port_tty_set(port, NULL);
+ tty_port_close(tty->port, tty, filp);
}
static int mxser_write(struct tty_struct *tty, const unsigned char *buf, int count)
@@ -940,9 +923,6 @@ static int mxser_write(struct tty_struct *tty, const unsigned char *buf, int cou
struct mxser_port *info = tty->driver_data;
unsigned long flags;
- if (!info->port.xmit_buf)
- return 0;
-
while (1) {
c = min_t(int, count, min(SERIAL_XMIT_SIZE - info->xmit_cnt - 1,
SERIAL_XMIT_SIZE - info->xmit_head));
@@ -973,9 +953,6 @@ static int mxser_put_char(struct tty_struct *tty, unsigned char ch)
struct mxser_port *info = tty->driver_data;
unsigned long flags;
- if (!info->port.xmit_buf)
- return 0;
-
if (info->xmit_cnt >= SERIAL_XMIT_SIZE - 1)
return 0;
@@ -993,7 +970,7 @@ static void mxser_flush_chars(struct tty_struct *tty)
{
struct mxser_port *info = tty->driver_data;
- if (!info->xmit_cnt || tty->flow.stopped || !info->port.xmit_buf ||
+ if (!info->xmit_cnt || tty->flow.stopped ||
(tty->hw_stopped && !mxser_16550A_or_MUST(info)))
return;
@@ -1153,25 +1130,24 @@ static int mxser_get_lsr_info(struct mxser_port *info,
static int mxser_tiocmget(struct tty_struct *tty)
{
struct mxser_port *info = tty->driver_data;
- unsigned char control, status;
+ unsigned char control;
unsigned long flags;
+ u8 msr;
if (tty_io_error(tty))
return -EIO;
spin_lock_irqsave(&info->slock, flags);
control = info->MCR;
- status = inb(info->ioaddr + UART_MSR);
- if (status & UART_MSR_ANY_DELTA)
- mxser_check_modem_status(tty, info, status);
+ msr = mxser_check_modem_status(tty, info);
spin_unlock_irqrestore(&info->slock, flags);
return ((control & UART_MCR_RTS) ? TIOCM_RTS : 0) |
((control & UART_MCR_DTR) ? TIOCM_DTR : 0) |
- ((status & UART_MSR_DCD) ? TIOCM_CAR : 0) |
- ((status & UART_MSR_RI) ? TIOCM_RNG : 0) |
- ((status & UART_MSR_DSR) ? TIOCM_DSR : 0) |
- ((status & UART_MSR_CTS) ? TIOCM_CTS : 0);
+ ((msr & UART_MSR_DCD) ? TIOCM_CAR : 0) |
+ ((msr & UART_MSR_RI) ? TIOCM_RNG : 0) |
+ ((msr & UART_MSR_DSR) ? TIOCM_DSR : 0) |
+ ((msr & UART_MSR_CTS) ? TIOCM_CTS : 0);
}
static int mxser_tiocmset(struct tty_struct *tty,
@@ -1326,11 +1302,14 @@ static int mxser_get_icount(struct tty_struct *tty,
return 0;
}
-static void mxser_stoprx(struct tty_struct *tty)
+/*
+ * This routine is called by the upper-layer tty layer to signal that
+ * incoming characters should be throttled.
+ */
+static void mxser_throttle(struct tty_struct *tty)
{
struct mxser_port *info = tty->driver_data;
- info->ldisc_stop_rx = true;
if (I_IXOFF(tty)) {
if (info->board->must_hwid) {
info->IER &= ~MOXA_MUST_RECV_ISR;
@@ -1349,21 +1328,11 @@ static void mxser_stoprx(struct tty_struct *tty)
}
}
-/*
- * This routine is called by the upper-layer tty layer to signal that
- * incoming characters should be throttled.
- */
-static void mxser_throttle(struct tty_struct *tty)
-{
- mxser_stoprx(tty);
-}
-
static void mxser_unthrottle(struct tty_struct *tty)
{
struct mxser_port *info = tty->driver_data;
/* startrx */
- info->ldisc_stop_rx = false;
if (I_IXOFF(tty)) {
if (info->x_char)
info->x_char = 0;
@@ -1409,7 +1378,7 @@ static void mxser_start(struct tty_struct *tty)
unsigned long flags;
spin_lock_irqsave(&info->slock, flags);
- if (info->xmit_cnt && info->port.xmit_buf)
+ if (info->xmit_cnt)
__mxser_start_tx(info);
spin_unlock_irqrestore(&info->slock, flags);
}
@@ -1442,15 +1411,25 @@ static void mxser_set_termios(struct tty_struct *tty, struct ktermios *old_termi
}
}
+static bool mxser_tx_empty(struct mxser_port *info)
+{
+ unsigned long flags;
+ u8 lsr;
+
+ spin_lock_irqsave(&info->slock, flags);
+ lsr = inb(info->ioaddr + UART_LSR);
+ spin_unlock_irqrestore(&info->slock, flags);
+
+ return !(lsr & UART_LSR_TEMT);
+}
+
/*
* mxser_wait_until_sent() --- wait until the transmitter is empty
*/
static void mxser_wait_until_sent(struct tty_struct *tty, int timeout)
{
struct mxser_port *info = tty->driver_data;
- unsigned long orig_jiffies, char_time;
- unsigned long flags;
- int lsr;
+ unsigned long expire, char_time;
if (info->type == PORT_UNKNOWN)
return;
@@ -1458,7 +1437,6 @@ static void mxser_wait_until_sent(struct tty_struct *tty, int timeout)
if (info->xmit_fifo_size == 0)
return; /* Just in case.... */
- orig_jiffies = jiffies;
/*
* Set the check interval to be 1/5 of the estimated time to
* send a single character, and make it at least 1. The check
@@ -1473,6 +1451,9 @@ static void mxser_wait_until_sent(struct tty_struct *tty, int timeout)
char_time = 1;
if (timeout && timeout < char_time)
char_time = timeout;
+
+ char_time = jiffies_to_msecs(char_time);
+
/*
* If the transmitter hasn't cleared in twice the approximate
* amount of time to send the entire FIFO, it probably won't
@@ -1485,18 +1466,15 @@ static void mxser_wait_until_sent(struct tty_struct *tty, int timeout)
if (!timeout || timeout > 2 * info->timeout)
timeout = 2 * info->timeout;
- spin_lock_irqsave(&info->slock, flags);
- while (!((lsr = inb(info->ioaddr + UART_LSR)) & UART_LSR_TEMT)) {
- spin_unlock_irqrestore(&info->slock, flags);
- schedule_timeout_interruptible(char_time);
- spin_lock_irqsave(&info->slock, flags);
+ expire = jiffies + timeout;
+
+ while (mxser_tx_empty(info)) {
+ msleep_interruptible(char_time);
if (signal_pending(current))
break;
- if (timeout && time_after(jiffies, orig_jiffies + timeout))
+ if (time_after(jiffies, expire))
break;
}
- spin_unlock_irqrestore(&info->slock, flags);
- set_current_state(TASK_RUNNING);
}
/*
@@ -1531,8 +1509,7 @@ static int mxser_rs_break(struct tty_struct *tty, int break_state)
return 0;
}
-static bool mxser_receive_chars_new(struct tty_struct *tty,
- struct mxser_port *port, u8 status)
+static bool mxser_receive_chars_new(struct mxser_port *port, u8 status)
{
enum mxser_must_hwid hwid = port->board->must_hwid;
u8 gdl;
@@ -1546,12 +1523,10 @@ static bool mxser_receive_chars_new(struct tty_struct *tty,
if (hwid == MOXA_MUST_MU150_HWID)
gdl &= MOXA_MUST_GDL_MASK;
- if (gdl >= tty->receive_room && !port->ldisc_stop_rx)
- mxser_stoprx(tty);
-
while (gdl--) {
u8 ch = inb(port->ioaddr + UART_RX);
- tty_insert_flip_char(&port->port, ch, 0);
+ if (!tty_insert_flip_char(&port->port, ch, 0))
+ port->icount.buf_overrun++;
}
return true;
@@ -1561,10 +1536,8 @@ static u8 mxser_receive_chars_old(struct tty_struct *tty,
struct mxser_port *port, u8 status)
{
enum mxser_must_hwid hwid = port->board->must_hwid;
- int recv_room = tty->receive_room;
int ignored = 0;
int max = 256;
- int cnt = 0;
u8 ch;
do {
@@ -1599,14 +1572,10 @@ static u8 mxser_receive_chars_old(struct tty_struct *tty,
port->icount.overrun++;
}
}
- tty_insert_flip_char(&port->port, ch, flag);
- cnt++;
- if (cnt >= recv_room) {
- if (!port->ldisc_stop_rx)
- mxser_stoprx(tty);
+ if (!tty_insert_flip_char(&port->port, ch, flag)) {
+ port->icount.buf_overrun++;
break;
}
-
}
if (hwid)
@@ -1621,10 +1590,7 @@ static u8 mxser_receive_chars_old(struct tty_struct *tty,
static u8 mxser_receive_chars(struct tty_struct *tty,
struct mxser_port *port, u8 status)
{
- if (tty->receive_room == 0 && !port->ldisc_stop_rx)
- mxser_stoprx(tty);
-
- if (!mxser_receive_chars_new(tty, port, status))
+ if (!mxser_receive_chars_new(port, status))
status = mxser_receive_chars_old(tty, port, status);
tty_flip_buffer_push(&port->port);
@@ -1634,7 +1600,7 @@ static u8 mxser_receive_chars(struct tty_struct *tty,
static void mxser_transmit_chars(struct tty_struct *tty, struct mxser_port *port)
{
- int count, cnt;
+ int count;
if (port->x_char) {
outb(port->x_char, port->ioaddr + UART_TX);
@@ -1643,27 +1609,22 @@ static void mxser_transmit_chars(struct tty_struct *tty, struct mxser_port *port
return;
}
- if (port->port.xmit_buf == NULL)
- return;
-
if (!port->xmit_cnt || tty->flow.stopped ||
(tty->hw_stopped && !mxser_16550A_or_MUST(port))) {
__mxser_stop_tx(port);
return;
}
- cnt = port->xmit_cnt;
count = port->xmit_fifo_size;
do {
outb(port->port.xmit_buf[port->xmit_tail++],
port->ioaddr + UART_TX);
- port->xmit_tail = port->xmit_tail & (SERIAL_XMIT_SIZE - 1);
+ port->xmit_tail &= SERIAL_XMIT_SIZE - 1;
+ port->icount.tx++;
if (!--port->xmit_cnt)
break;
} while (--count > 0);
- port->icount.tx += (cnt - port->xmit_cnt);
-
if (port->xmit_cnt < WAKEUP_CHARS)
tty_wakeup(tty);
@@ -1674,7 +1635,7 @@ static void mxser_transmit_chars(struct tty_struct *tty, struct mxser_port *port
static bool mxser_port_isr(struct mxser_port *port)
{
struct tty_struct *tty;
- u8 iir, msr, status;
+ u8 iir, status;
bool error = false;
iir = inb(port->ioaddr + UART_IIR);
@@ -1683,7 +1644,7 @@ static bool mxser_port_isr(struct mxser_port *port)
iir &= MOXA_MUST_IIR_MASK;
tty = tty_port_tty_get(&port->port);
- if (!tty || port->closing || !tty_port_initialized(&port->port)) {
+ if (!tty) {
status = inb(port->ioaddr + UART_LSR);
outb(port->FCR | UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT,
port->ioaddr + UART_FCR);
@@ -1707,9 +1668,7 @@ static bool mxser_port_isr(struct mxser_port *port)
status = mxser_receive_chars(tty, port, status);
}
- msr = inb(port->ioaddr + UART_MSR);
- if (msr & UART_MSR_ANY_DELTA)
- mxser_check_modem_status(tty, port, msr);
+ mxser_check_modem_status(tty, port);
if (port->board->must_hwid) {
if (iir == 0x02 && (status & UART_LSR_THRE))
@@ -1836,7 +1795,6 @@ static void mxser_initbrd(struct mxser_board *brd, bool high_baud)
tty_port_init(&info->port);
info->port.ops = &mxser_port_ops;
info->board = brd;
- info->ldisc_stop_rx = false;
/* Enhance mode enabled here */
if (brd->must_hwid != MOXA_OTHER_UART)
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index 0b96b14bbfe1..0b1808e3a912 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -322,6 +322,7 @@ static int addr_cnt;
#define GSM1_ESCAPE_BITS 0x20
#define XON 0x11
#define XOFF 0x13
+#define ISO_IEC_646_MASK 0x7F
static const struct tty_port_operations gsm_port_ops;
@@ -531,7 +532,8 @@ static int gsm_stuff_frame(const u8 *input, u8 *output, int len)
int olen = 0;
while (len--) {
if (*input == GSM1_SOF || *input == GSM1_ESCAPE
- || *input == XON || *input == XOFF) {
+ || (*input & ISO_IEC_646_MASK) == XON
+ || (*input & ISO_IEC_646_MASK) == XOFF) {
*output++ = GSM1_ESCAPE;
*output++ = *input++ ^ GSM1_ESCAPE_BITS;
olen++;
@@ -2074,8 +2076,6 @@ static void gsm1_receive(struct gsm_mux *gsm, unsigned char c)
/**
* gsm_error - handle tty error
* @gsm: ldisc data
- * @data: byte received (may be invalid)
- * @flag: error received
*
* Handle an error in the receipt of data for a frame. Currently we just
* go back to hunting for a SOF.
@@ -2083,8 +2083,7 @@ static void gsm1_receive(struct gsm_mux *gsm, unsigned char c)
* FIXME: better diagnostics ?
*/
-static void gsm_error(struct gsm_mux *gsm,
- unsigned char data, unsigned char flag)
+static void gsm_error(struct gsm_mux *gsm)
{
gsm->state = GSM_SEARCH;
gsm->io_error++;
@@ -2504,7 +2503,7 @@ static void gsmld_receive_buf(struct tty_struct *tty, const unsigned char *cp,
case TTY_BREAK:
case TTY_PARITY:
case TTY_FRAME:
- gsm_error(gsm, *cp, flags);
+ gsm_error(gsm);
break;
default:
WARN_ONCE(1, "%s: unknown flag %d\n",
@@ -2690,8 +2689,8 @@ static __poll_t gsmld_poll(struct tty_struct *tty, struct file *file,
return mask;
}
-static int gsmld_ioctl(struct tty_struct *tty, struct file *file,
- unsigned int cmd, unsigned long arg)
+static int gsmld_ioctl(struct tty_struct *tty, unsigned int cmd,
+ unsigned long arg)
{
struct gsm_config c;
struct gsm_mux *gsm = tty->disc_data;
diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c
index 23ba1fc99df8..94c1ec2dd754 100644
--- a/drivers/tty/n_hdlc.c
+++ b/drivers/tty/n_hdlc.c
@@ -593,14 +593,13 @@ static ssize_t n_hdlc_tty_write(struct tty_struct *tty, struct file *file,
/**
* n_hdlc_tty_ioctl - process IOCTL system call for the tty device.
* @tty: pointer to tty instance data
- * @file: pointer to open file object for device
* @cmd: IOCTL command code
* @arg: argument for IOCTL call (cmd dependent)
*
* Returns command dependent result.
*/
-static int n_hdlc_tty_ioctl(struct tty_struct *tty, struct file *file,
- unsigned int cmd, unsigned long arg)
+static int n_hdlc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
+ unsigned long arg)
{
struct n_hdlc *n_hdlc = tty->disc_data;
int error = 0;
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index 5be6d02dc690..efc72104c840 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -186,17 +186,16 @@ static void tty_copy(struct tty_struct *tty, void *to, size_t tail, size_t n)
}
/**
- * n_tty_kick_worker - start input worker (if required)
- * @tty: terminal
+ * n_tty_kick_worker - start input worker (if required)
+ * @tty: terminal
*
- * Re-schedules the flip buffer work if it may have stopped
+ * Re-schedules the flip buffer work if it may have stopped.
*
- * Caller holds exclusive termios_rwsem
- * or
- * n_tty_read()/consumer path:
- * holds non-exclusive termios_rwsem
+ * Locking:
+ * * Caller holds exclusive %termios_rwsem, or
+ * * n_tty_read()/consumer path:
+ * holds non-exclusive %termios_rwsem
*/
-
static void n_tty_kick_worker(struct tty_struct *tty)
{
struct n_tty_data *ldata = tty->disc_data;
@@ -230,14 +229,12 @@ static ssize_t chars_in_buffer(struct tty_struct *tty)
}
/**
- * n_tty_write_wakeup - asynchronous I/O notifier
- * @tty: tty device
+ * n_tty_write_wakeup - asynchronous I/O notifier
+ * @tty: tty device
*
- * Required for the ptys, serial driver etc. since processes
- * that attach themselves to the master and rely on ASYNC
- * IO must be woken up
+ * Required for the ptys, serial driver etc. since processes that attach
+ * themselves to the master and rely on ASYNC IO must be woken up.
*/
-
static void n_tty_write_wakeup(struct tty_struct *tty)
{
clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
@@ -300,16 +297,16 @@ static void n_tty_check_unthrottle(struct tty_struct *tty)
}
/**
- * put_tty_queue - add character to tty
- * @c: character
- * @ldata: n_tty data
+ * put_tty_queue - add character to tty
+ * @c: character
+ * @ldata: n_tty data
*
- * Add a character to the tty read_buf queue.
+ * Add a character to the tty read_buf queue.
*
- * n_tty_receive_buf()/producer path:
- * caller holds non-exclusive termios_rwsem
+ * Locking:
+ * * n_tty_receive_buf()/producer path:
+ * caller holds non-exclusive %termios_rwsem
*/
-
static inline void put_tty_queue(unsigned char c, struct n_tty_data *ldata)
{
*read_buf_addr(ldata, ldata->read_head) = c;
@@ -317,16 +314,16 @@ static inline void put_tty_queue(unsigned char c, struct n_tty_data *ldata)
}
/**
- * reset_buffer_flags - reset buffer state
- * @ldata: line disc data to reset
+ * reset_buffer_flags - reset buffer state
+ * @ldata: line disc data to reset
*
- * Reset the read buffer counters and clear the flags.
- * Called from n_tty_open() and n_tty_flush_buffer().
+ * Reset the read buffer counters and clear the flags. Called from
+ * n_tty_open() and n_tty_flush_buffer().
*
- * Locking: caller holds exclusive termios_rwsem
- * (or locking is not required)
+ * Locking:
+ * * caller holds exclusive %termios_rwsem, or
+ * * (locking is not required)
*/
-
static void reset_buffer_flags(struct n_tty_data *ldata)
{
ldata->read_head = ldata->canon_head = ldata->read_tail = 0;
@@ -351,19 +348,18 @@ static void n_tty_packet_mode_flush(struct tty_struct *tty)
}
/**
- * n_tty_flush_buffer - clean input queue
- * @tty: terminal device
+ * n_tty_flush_buffer - clean input queue
+ * @tty: terminal device
*
- * Flush the input buffer. Called when the tty layer wants the
- * buffer flushed (eg at hangup) or when the N_TTY line discipline
- * internally has to clean the pending queue (for example some signals).
+ * Flush the input buffer. Called when the tty layer wants the buffer flushed
+ * (eg at hangup) or when the %N_TTY line discipline internally has to clean
+ * the pending queue (for example some signals).
*
- * Holds termios_rwsem to exclude producer/consumer while
- * buffer indices are reset.
+ * Holds %termios_rwsem to exclude producer/consumer while buffer indices are
+ * reset.
*
- * Locking: ctrl.lock, exclusive termios_rwsem
+ * Locking: %ctrl.lock, exclusive %termios_rwsem
*/
-
static void n_tty_flush_buffer(struct tty_struct *tty)
{
down_write(&tty->termios_rwsem);
@@ -376,55 +372,50 @@ static void n_tty_flush_buffer(struct tty_struct *tty)
}
/**
- * is_utf8_continuation - utf8 multibyte check
- * @c: byte to check
+ * is_utf8_continuation - utf8 multibyte check
+ * @c: byte to check
*
- * Returns true if the utf8 character 'c' is a multibyte continuation
- * character. We use this to correctly compute the on screen size
- * of the character when printing
+ * Returns: true if the utf8 character @c is a multibyte continuation
+ * character. We use this to correctly compute the on-screen size of the
+ * character when printing.
*/
-
static inline int is_utf8_continuation(unsigned char c)
{
return (c & 0xc0) == 0x80;
}
/**
- * is_continuation - multibyte check
- * @c: byte to check
- * @tty: terminal device
+ * is_continuation - multibyte check
+ * @c: byte to check
+ * @tty: terminal device
*
- * Returns true if the utf8 character 'c' is a multibyte continuation
- * character and the terminal is in unicode mode.
+ * Returns: true if the utf8 character @c is a multibyte continuation character
+ * and the terminal is in unicode mode.
*/
-
static inline int is_continuation(unsigned char c, struct tty_struct *tty)
{
return I_IUTF8(tty) && is_utf8_continuation(c);
}
/**
- * do_output_char - output one character
- * @c: character (or partial unicode symbol)
- * @tty: terminal device
- * @space: space available in tty driver write buffer
+ * do_output_char - output one character
+ * @c: character (or partial unicode symbol)
+ * @tty: terminal device
+ * @space: space available in tty driver write buffer
*
- * This is a helper function that handles one output character
- * (including special characters like TAB, CR, LF, etc.),
- * doing OPOST processing and putting the results in the
- * tty driver's write buffer.
+ * This is a helper function that handles one output character (including
+ * special characters like TAB, CR, LF, etc.), doing OPOST processing and
+ * putting the results in the tty driver's write buffer.
*
- * Note that Linux currently ignores TABDLY, CRDLY, VTDLY, FFDLY
- * and NLDLY. They simply aren't relevant in the world today.
- * If you ever need them, add them here.
+ * Note that Linux currently ignores TABDLY, CRDLY, VTDLY, FFDLY and NLDLY.
+ * They simply aren't relevant in the world today. If you ever need them, add
+ * them here.
*
- * Returns the number of bytes of buffer space used or -1 if
- * no space left.
+ * Returns: the number of bytes of buffer space used or -1 if no space left.
*
- * Locking: should be called under the output_lock to protect
- * the column state and space left in the buffer
+ * Locking: should be called under the %output_lock to protect the column state
+ * and space left in the buffer.
*/
-
static int do_output_char(unsigned char c, struct tty_struct *tty, int space)
{
struct n_tty_data *ldata = tty->disc_data;
@@ -487,19 +478,18 @@ static int do_output_char(unsigned char c, struct tty_struct *tty, int space)
}
/**
- * process_output - output post processor
- * @c: character (or partial unicode symbol)
- * @tty: terminal device
+ * process_output - output post processor
+ * @c: character (or partial unicode symbol)
+ * @tty: terminal device
*
- * Output one character with OPOST processing.
- * Returns -1 when the output device is full and the character
- * must be retried.
+ * Output one character with OPOST processing.
*
- * Locking: output_lock to protect column state and space left
- * (also, this is called from n_tty_write under the
- * tty layer write lock)
+ * Returns: -1 when the output device is full and the character must be
+ * retried.
+ *
+ * Locking: %output_lock to protect column state and space left (also, this is
+ *called from n_tty_write() under the tty layer write lock).
*/
-
static int process_output(unsigned char c, struct tty_struct *tty)
{
struct n_tty_data *ldata = tty->disc_data;
@@ -518,24 +508,23 @@ static int process_output(unsigned char c, struct tty_struct *tty)
}
/**
- * process_output_block - block post processor
- * @tty: terminal device
- * @buf: character buffer
- * @nr: number of bytes to output
- *
- * Output a block of characters with OPOST processing.
- * Returns the number of characters output.
- *
- * This path is used to speed up block console writes, among other
- * things when processing blocks of output data. It handles only
- * the simple cases normally found and helps to generate blocks of
- * symbols for the console driver and thus improve performance.
- *
- * Locking: output_lock to protect column state and space left
- * (also, this is called from n_tty_write under the
- * tty layer write lock)
+ * process_output_block - block post processor
+ * @tty: terminal device
+ * @buf: character buffer
+ * @nr: number of bytes to output
+ *
+ * Output a block of characters with OPOST processing.
+ *
+ * This path is used to speed up block console writes, among other things when
+ * processing blocks of output data. It handles only the simple cases normally
+ * found and helps to generate blocks of symbols for the console driver and
+ * thus improve performance.
+ *
+ * Returns: the number of characters output.
+ *
+ * Locking: %output_lock to protect column state and space left (also, this is
+ * called from n_tty_write() under the tty layer write lock).
*/
-
static ssize_t process_output_block(struct tty_struct *tty,
const unsigned char *buf, unsigned int nr)
{
@@ -596,30 +585,27 @@ break_out:
}
/**
- * process_echoes - write pending echo characters
- * @tty: terminal device
+ * __process_echoes - write pending echo characters
+ * @tty: terminal device
*
- * Write previously buffered echo (and other ldisc-generated)
- * characters to the tty.
+ * Write previously buffered echo (and other ldisc-generated) characters to the
+ * tty.
*
- * Characters generated by the ldisc (including echoes) need to
- * be buffered because the driver's write buffer can fill during
- * heavy program output. Echoing straight to the driver will
- * often fail under these conditions, causing lost characters and
- * resulting mismatches of ldisc state information.
+ * Characters generated by the ldisc (including echoes) need to be buffered
+ * because the driver's write buffer can fill during heavy program output.
+ * Echoing straight to the driver will often fail under these conditions,
+ * causing lost characters and resulting mismatches of ldisc state information.
*
- * Since the ldisc state must represent the characters actually sent
- * to the driver at the time of the write, operations like certain
- * changes in column state are also saved in the buffer and executed
- * here.
+ * Since the ldisc state must represent the characters actually sent to the
+ * driver at the time of the write, operations like certain changes in column
+ * state are also saved in the buffer and executed here.
*
- * A circular fifo buffer is used so that the most recent characters
- * are prioritized. Also, when control characters are echoed with a
- * prefixed "^", the pair is treated atomically and thus not separated.
+ * A circular fifo buffer is used so that the most recent characters are
+ * prioritized. Also, when control characters are echoed with a prefixed "^",
+ * the pair is treated atomically and thus not separated.
*
- * Locking: callers must hold output_lock
+ * Locking: callers must hold %output_lock.
*/
-
static size_t __process_echoes(struct tty_struct *tty)
{
struct n_tty_data *ldata = tty->disc_data;
@@ -828,13 +814,12 @@ static void flush_echoes(struct tty_struct *tty)
}
/**
- * add_echo_byte - add a byte to the echo buffer
- * @c: unicode byte to echo
- * @ldata: n_tty data
+ * add_echo_byte - add a byte to the echo buffer
+ * @c: unicode byte to echo
+ * @ldata: n_tty data
*
- * Add a character or operation byte to the echo buffer.
+ * Add a character or operation byte to the echo buffer.
*/
-
static inline void add_echo_byte(unsigned char c, struct n_tty_data *ldata)
{
*echo_buf_addr(ldata, ldata->echo_head) = c;
@@ -843,12 +828,11 @@ static inline void add_echo_byte(unsigned char c, struct n_tty_data *ldata)
}
/**
- * echo_move_back_col - add operation to move back a column
- * @ldata: n_tty data
+ * echo_move_back_col - add operation to move back a column
+ * @ldata: n_tty data
*
- * Add an operation to the echo buffer to move back one column.
+ * Add an operation to the echo buffer to move back one column.
*/
-
static void echo_move_back_col(struct n_tty_data *ldata)
{
add_echo_byte(ECHO_OP_START, ldata);
@@ -856,13 +840,12 @@ static void echo_move_back_col(struct n_tty_data *ldata)
}
/**
- * echo_set_canon_col - add operation to set the canon column
- * @ldata: n_tty data
+ * echo_set_canon_col - add operation to set the canon column
+ * @ldata: n_tty data
*
- * Add an operation to the echo buffer to set the canon column
- * to the current column.
+ * Add an operation to the echo buffer to set the canon column to the current
+ * column.
*/
-
static void echo_set_canon_col(struct n_tty_data *ldata)
{
add_echo_byte(ECHO_OP_START, ldata);
@@ -870,20 +853,18 @@ static void echo_set_canon_col(struct n_tty_data *ldata)
}
/**
- * echo_erase_tab - add operation to erase a tab
- * @num_chars: number of character columns already used
- * @after_tab: true if num_chars starts after a previous tab
- * @ldata: n_tty data
- *
- * Add an operation to the echo buffer to erase a tab.
- *
- * Called by the eraser function, which knows how many character
- * columns have been used since either a previous tab or the start
- * of input. This information will be used later, along with
- * canon column (if applicable), to go back the correct number
- * of columns.
+ * echo_erase_tab - add operation to erase a tab
+ * @num_chars: number of character columns already used
+ * @after_tab: true if num_chars starts after a previous tab
+ * @ldata: n_tty data
+ *
+ * Add an operation to the echo buffer to erase a tab.
+ *
+ * Called by the eraser function, which knows how many character columns have
+ * been used since either a previous tab or the start of input. This
+ * information will be used later, along with canon column (if applicable), to
+ * go back the correct number of columns.
*/
-
static void echo_erase_tab(unsigned int num_chars, int after_tab,
struct n_tty_data *ldata)
{
@@ -901,16 +882,15 @@ static void echo_erase_tab(unsigned int num_chars, int after_tab,
}
/**
- * echo_char_raw - echo a character raw
- * @c: unicode byte to echo
- * @ldata: line disc data
+ * echo_char_raw - echo a character raw
+ * @c: unicode byte to echo
+ * @ldata: line disc data
*
- * Echo user input back onto the screen. This must be called only when
- * L_ECHO(tty) is true. Called from the driver receive_buf path.
+ * Echo user input back onto the screen. This must be called only when
+ * L_ECHO(tty) is true. Called from the &tty_driver.receive_buf() path.
*
- * This variant does not treat control characters specially.
+ * This variant does not treat control characters specially.
*/
-
static void echo_char_raw(unsigned char c, struct n_tty_data *ldata)
{
if (c == ECHO_OP_START) {
@@ -922,17 +902,16 @@ static void echo_char_raw(unsigned char c, struct n_tty_data *ldata)
}
/**
- * echo_char - echo a character
- * @c: unicode byte to echo
- * @tty: terminal device
+ * echo_char - echo a character
+ * @c: unicode byte to echo
+ * @tty: terminal device
*
- * Echo user input back onto the screen. This must be called only when
- * L_ECHO(tty) is true. Called from the driver receive_buf path.
+ * Echo user input back onto the screen. This must be called only when
+ * L_ECHO(tty) is true. Called from the &tty_driver.receive_buf() path.
*
- * This variant tags control characters to be echoed as "^X"
- * (where X is the letter representing the control char).
+ * This variant tags control characters to be echoed as "^X" (where X is the
+ * letter representing the control char).
*/
-
static void echo_char(unsigned char c, struct tty_struct *tty)
{
struct n_tty_data *ldata = tty->disc_data;
@@ -948,10 +927,9 @@ static void echo_char(unsigned char c, struct tty_struct *tty)
}
/**
- * finish_erasing - complete erase
- * @ldata: n_tty data
+ * finish_erasing - complete erase
+ * @ldata: n_tty data
*/
-
static inline void finish_erasing(struct n_tty_data *ldata)
{
if (ldata->erasing) {
@@ -961,18 +939,17 @@ static inline void finish_erasing(struct n_tty_data *ldata)
}
/**
- * eraser - handle erase function
- * @c: character input
- * @tty: terminal device
+ * eraser - handle erase function
+ * @c: character input
+ * @tty: terminal device
*
- * Perform erase and necessary output when an erase character is
- * present in the stream from the driver layer. Handles the complexities
- * of UTF-8 multibyte symbols.
+ * Perform erase and necessary output when an erase character is present in the
+ * stream from the driver layer. Handles the complexities of UTF-8 multibyte
+ * symbols.
*
- * n_tty_receive_buf()/producer path:
- * caller holds non-exclusive termios_rwsem
+ * Locking: n_tty_receive_buf()/producer path:
+ * caller holds non-exclusive %termios_rwsem
*/
-
static void eraser(unsigned char c, struct tty_struct *tty)
{
struct n_tty_data *ldata = tty->disc_data;
@@ -1091,20 +1068,6 @@ static void eraser(unsigned char c, struct tty_struct *tty)
finish_erasing(ldata);
}
-/**
- * isig - handle the ISIG optio
- * @sig: signal
- * @tty: terminal
- *
- * Called when a signal is being sent due to terminal input.
- * Called from the driver receive_buf path so serialized.
- *
- * Performs input and output flush if !NOFLSH. In this context, the echo
- * buffer is 'output'. The signal is processed first to alert any current
- * readers or writers to discontinue and exit their i/o loops.
- *
- * Locking: ctrl.lock
- */
static void __isig(int sig, struct tty_struct *tty)
{
@@ -1115,6 +1078,20 @@ static void __isig(int sig, struct tty_struct *tty)
}
}
+/**
+ * isig - handle the ISIG optio
+ * @sig: signal
+ * @tty: terminal
+ *
+ * Called when a signal is being sent due to terminal input. Called from the
+ * &tty_driver.receive_buf() path, so serialized.
+ *
+ * Performs input and output flush if !NOFLSH. In this context, the echo
+ * buffer is 'output'. The signal is processed first to alert any current
+ * readers or writers to discontinue and exit their i/o loops.
+ *
+ * Locking: %ctrl.lock
+ */
static void isig(int sig, struct tty_struct *tty)
{
struct n_tty_data *ldata = tty->disc_data;
@@ -1151,18 +1128,17 @@ static void isig(int sig, struct tty_struct *tty)
}
/**
- * n_tty_receive_break - handle break
- * @tty: terminal
+ * n_tty_receive_break - handle break
+ * @tty: terminal
*
- * An RS232 break event has been hit in the incoming bitstream. This
- * can cause a variety of events depending upon the termios settings.
+ * An RS232 break event has been hit in the incoming bitstream. This can cause
+ * a variety of events depending upon the termios settings.
*
- * n_tty_receive_buf()/producer path:
- * caller holds non-exclusive termios_rwsem
+ * Locking: n_tty_receive_buf()/producer path:
+ * caller holds non-exclusive termios_rwsem
*
- * Note: may get exclusive termios_rwsem if flushing input buffer
+ * Note: may get exclusive %termios_rwsem if flushing input buffer
*/
-
static void n_tty_receive_break(struct tty_struct *tty)
{
struct n_tty_data *ldata = tty->disc_data;
@@ -1181,18 +1157,15 @@ static void n_tty_receive_break(struct tty_struct *tty)
}
/**
- * n_tty_receive_overrun - handle overrun reporting
- * @tty: terminal
+ * n_tty_receive_overrun - handle overrun reporting
+ * @tty: terminal
*
- * Data arrived faster than we could process it. While the tty
- * driver has flagged this the bits that were missed are gone
- * forever.
+ * Data arrived faster than we could process it. While the tty driver has
+ * flagged this the bits that were missed are gone forever.
*
- * Called from the receive_buf path so single threaded. Does not
- * need locking as num_overrun and overrun_time are function
- * private.
+ * Called from the receive_buf path so single threaded. Does not need locking
+ * as num_overrun and overrun_time are function private.
*/
-
static void n_tty_receive_overrun(struct tty_struct *tty)
{
struct n_tty_data *ldata = tty->disc_data;
@@ -1207,15 +1180,15 @@ static void n_tty_receive_overrun(struct tty_struct *tty)
}
/**
- * n_tty_receive_parity_error - error notifier
- * @tty: terminal device
- * @c: character
+ * n_tty_receive_parity_error - error notifier
+ * @tty: terminal device
+ * @c: character
*
- * Process a parity error and queue the right data to indicate
- * the error case if necessary.
+ * Process a parity error and queue the right data to indicate the error case
+ * if necessary.
*
- * n_tty_receive_buf()/producer path:
- * caller holds non-exclusive termios_rwsem
+ * Locking: n_tty_receive_buf()/producer path:
+ * caller holds non-exclusive %termios_rwsem
*/
static void n_tty_receive_parity_error(struct tty_struct *tty, unsigned char c)
{
@@ -1247,19 +1220,6 @@ n_tty_receive_signal_char(struct tty_struct *tty, int signal, unsigned char c)
process_echoes(tty);
}
-/**
- * n_tty_receive_char - perform processing
- * @tty: terminal device
- * @c: character
- *
- * Process an individual character of input received from the driver.
- * This is serialized with respect to itself by the rules for the
- * driver above.
- *
- * n_tty_receive_buf()/producer path:
- * caller holds non-exclusive termios_rwsem
- * publishes canon_head if canonical mode is active
- */
static void n_tty_receive_char_special(struct tty_struct *tty, unsigned char c)
{
struct n_tty_data *ldata = tty->disc_data;
@@ -1369,7 +1329,7 @@ handle_newline:
put_tty_queue(c, ldata);
smp_store_release(&ldata->canon_head, ldata->read_head);
kill_fasync(&tty->fasync, SIGIO, POLL_IN);
- wake_up_interruptible_poll(&tty->read_wait, EPOLLIN);
+ wake_up_interruptible_poll(&tty->read_wait, EPOLLIN | EPOLLRDNORM);
return;
}
}
@@ -1394,6 +1354,18 @@ handle_newline:
put_tty_queue(c, ldata);
}
+/**
+ * n_tty_receive_char - perform processing
+ * @tty: terminal device
+ * @c: character
+ *
+ * Process an individual character of input received from the driver. This is
+ * serialized with respect to itself by the rules for the driver above.
+ *
+ * Locking: n_tty_receive_buf()/producer path:
+ * caller holds non-exclusive %termios_rwsem
+ * publishes canon_head if canonical mode is active
+ */
static void n_tty_receive_char(struct tty_struct *tty, unsigned char c)
{
struct n_tty_data *ldata = tty->disc_data;
@@ -1589,43 +1561,42 @@ static void __receive_buf(struct tty_struct *tty, const unsigned char *cp,
if (read_cnt(ldata)) {
kill_fasync(&tty->fasync, SIGIO, POLL_IN);
- wake_up_interruptible_poll(&tty->read_wait, EPOLLIN);
+ wake_up_interruptible_poll(&tty->read_wait, EPOLLIN | EPOLLRDNORM);
}
}
/**
- * n_tty_receive_buf_common - process input
- * @tty: device to receive input
- * @cp: input chars
- * @fp: flags for each char (if NULL, all chars are TTY_NORMAL)
- * @count: number of input chars in @cp
- * @flow: enable flow control
- *
- * Called by the terminal driver when a block of characters has
- * been received. This function must be called from soft contexts
- * not from interrupt context. The driver is responsible for making
- * calls one at a time and in order (or using flush_to_ldisc)
- *
- * Returns the # of input chars from @cp which were processed.
- *
- * In canonical mode, the maximum line length is 4096 chars (including
- * the line termination char); lines longer than 4096 chars are
- * truncated. After 4095 chars, input data is still processed but
- * not stored. Overflow processing ensures the tty can always
- * receive more input until at least one line can be read.
- *
- * In non-canonical mode, the read buffer will only accept 4095 chars;
- * this provides the necessary space for a newline char if the input
- * mode is switched to canonical.
- *
- * Note it is possible for the read buffer to _contain_ 4096 chars
- * in non-canonical mode: the read buffer could already contain the
- * maximum canon line of 4096 chars when the mode is switched to
- * non-canonical.
- *
- * n_tty_receive_buf()/producer path:
- * claims non-exclusive termios_rwsem
- * publishes commit_head or canon_head
+ * n_tty_receive_buf_common - process input
+ * @tty: device to receive input
+ * @cp: input chars
+ * @fp: flags for each char (if %NULL, all chars are %TTY_NORMAL)
+ * @count: number of input chars in @cp
+ * @flow: enable flow control
+ *
+ * Called by the terminal driver when a block of characters has been received.
+ * This function must be called from soft contexts not from interrupt context.
+ * The driver is responsible for making calls one at a time and in order (or
+ * using flush_to_ldisc()).
+ *
+ * Returns: the # of input chars from @cp which were processed.
+ *
+ * In canonical mode, the maximum line length is 4096 chars (including the line
+ * termination char); lines longer than 4096 chars are truncated. After 4095
+ * chars, input data is still processed but not stored. Overflow processing
+ * ensures the tty can always receive more input until at least one line can be
+ * read.
+ *
+ * In non-canonical mode, the read buffer will only accept 4095 chars; this
+ * provides the necessary space for a newline char if the input mode is
+ * switched to canonical.
+ *
+ * Note it is possible for the read buffer to _contain_ 4096 chars in
+ * non-canonical mode: the read buffer could already contain the maximum canon
+ * line of 4096 chars when the mode is switched to non-canonical.
+ *
+ * Locking: n_tty_receive_buf()/producer path:
+ * claims non-exclusive %termios_rwsem
+ * publishes commit_head or canon_head
*/
static int
n_tty_receive_buf_common(struct tty_struct *tty, const unsigned char *cp,
@@ -1710,19 +1681,17 @@ static int n_tty_receive_buf2(struct tty_struct *tty, const unsigned char *cp,
}
/**
- * n_tty_set_termios - termios data changed
- * @tty: terminal
- * @old: previous data
+ * n_tty_set_termios - termios data changed
+ * @tty: terminal
+ * @old: previous data
*
- * Called by the tty layer when the user changes termios flags so
- * that the line discipline can plan ahead. This function cannot sleep
- * and is protected from re-entry by the tty layer. The user is
- * guaranteed that this function will not be re-entered or in progress
- * when the ldisc is closed.
+ * Called by the tty layer when the user changes termios flags so that the line
+ * discipline can plan ahead. This function cannot sleep and is protected from
+ * re-entry by the tty layer. The user is guaranteed that this function will
+ * not be re-entered or in progress when the ldisc is closed.
*
- * Locking: Caller holds tty->termios_rwsem
+ * Locking: Caller holds @tty->termios_rwsem
*/
-
static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old)
{
struct n_tty_data *ldata = tty->disc_data;
@@ -1808,15 +1777,13 @@ static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old)
}
/**
- * n_tty_close - close the ldisc for this tty
- * @tty: device
+ * n_tty_close - close the ldisc for this tty
+ * @tty: device
*
- * Called from the terminal layer when this line discipline is
- * being shut down, either because of a close or becsuse of a
- * discipline change. The function will not be called while other
- * ldisc methods are in progress.
+ * Called from the terminal layer when this line discipline is being shut down,
+ * either because of a close or becsuse of a discipline change. The function
+ * will not be called while other ldisc methods are in progress.
*/
-
static void n_tty_close(struct tty_struct *tty)
{
struct n_tty_data *ldata = tty->disc_data;
@@ -1831,15 +1798,13 @@ static void n_tty_close(struct tty_struct *tty)
}
/**
- * n_tty_open - open an ldisc
- * @tty: terminal to open
+ * n_tty_open - open an ldisc
+ * @tty: terminal to open
*
- * Called when this line discipline is being attached to the
- * terminal device. Can sleep. Called serialized so that no
- * other events will occur in parallel. No further open will occur
- * until a close.
+ * Called when this line discipline is being attached to the terminal device.
+ * Can sleep. Called serialized so that no other events will occur in parallel.
+ * No further open will occur until a close.
*/
-
static int n_tty_open(struct tty_struct *tty)
{
struct n_tty_data *ldata;
@@ -1874,24 +1839,23 @@ static inline int input_available_p(struct tty_struct *tty, int poll)
}
/**
- * copy_from_read_buf - copy read data directly
- * @tty: terminal device
- * @kbp: data
- * @nr: size of data
- *
- * Helper function to speed up n_tty_read. It is only called when
- * ICANON is off; it copies characters straight from the tty queue.
+ * copy_from_read_buf - copy read data directly
+ * @tty: terminal device
+ * @kbp: data
+ * @nr: size of data
*
- * Called under the ldata->atomic_read_lock sem
+ * Helper function to speed up n_tty_read(). It is only called when %ICANON is
+ * off; it copies characters straight from the tty queue.
*
- * Returns true if it successfully copied data, but there is still
- * more data to be had.
+ * Returns: true if it successfully copied data, but there is still more data
+ * to be had.
*
- * n_tty_read()/consumer path:
- * caller holds non-exclusive termios_rwsem
+ * Locking:
+ * * called under the @ldata->atomic_read_lock sem
+ * * n_tty_read()/consumer path:
+ * caller holds non-exclusive %termios_rwsem;
* read_tail published
*/
-
static bool copy_from_read_buf(struct tty_struct *tty,
unsigned char **kbp,
size_t *nr)
@@ -1926,28 +1890,27 @@ static bool copy_from_read_buf(struct tty_struct *tty,
}
/**
- * canon_copy_from_read_buf - copy read data in canonical mode
- * @tty: terminal device
- * @kbp: data
- * @nr: size of data
- *
- * Helper function for n_tty_read. It is only called when ICANON is on;
- * it copies one line of input up to and including the line-delimiting
- * character into the result buffer.
- *
- * NB: When termios is changed from non-canonical to canonical mode and
- * the read buffer contains data, n_tty_set_termios() simulates an EOF
- * push (as if C-d were input) _without_ the DISABLED_CHAR in the buffer.
- * This causes data already processed as input to be immediately available
- * as input although a newline has not been received.
- *
- * Called under the atomic_read_lock mutex
- *
- * n_tty_read()/consumer path:
- * caller holds non-exclusive termios_rwsem
- * read_tail published
+ * canon_copy_from_read_buf - copy read data in canonical mode
+ * @tty: terminal device
+ * @kbp: data
+ * @nr: size of data
+ *
+ * Helper function for n_tty_read(). It is only called when %ICANON is on; it
+ * copies one line of input up to and including the line-delimiting character
+ * into the result buffer.
+ *
+ * Note: When termios is changed from non-canonical to canonical mode and the
+ * read buffer contains data, n_tty_set_termios() simulates an EOF push (as if
+ * C-d were input) _without_ the %DISABLED_CHAR in the buffer. This causes data
+ * already processed as input to be immediately available as input although a
+ * newline has not been received.
+ *
+ * Locking:
+ * * called under the %atomic_read_lock mutex
+ * * n_tty_read()/consumer path:
+ * caller holds non-exclusive %termios_rwsem;
+ * read_tail published
*/
-
static bool canon_copy_from_read_buf(struct tty_struct *tty,
unsigned char **kbp,
size_t *nr)
@@ -1963,7 +1926,7 @@ static bool canon_copy_from_read_buf(struct tty_struct *tty,
return false;
canon_head = smp_load_acquire(&ldata->canon_head);
- n = min(*nr + 1, canon_head - ldata->read_tail);
+ n = min(*nr, canon_head - ldata->read_tail);
tail = ldata->read_tail & (N_TTY_BUF_SIZE - 1);
size = min_t(size_t, tail + n, N_TTY_BUF_SIZE);
@@ -1975,7 +1938,7 @@ static bool canon_copy_from_read_buf(struct tty_struct *tty,
more = n - (size - tail);
if (eol == N_TTY_BUF_SIZE && more) {
/* scan wrapped without finding set bit */
- eol = find_next_bit(ldata->read_flags, more, 0);
+ eol = find_first_bit(ldata->read_flags, more);
found = eol != more;
} else
found = eol != size;
@@ -1985,10 +1948,8 @@ static bool canon_copy_from_read_buf(struct tty_struct *tty,
n += N_TTY_BUF_SIZE;
c = n + found;
- if (!found || read_buf(ldata, eol) != __DISABLED_CHAR) {
- c = min(*nr, c);
+ if (!found || read_buf(ldata, eol) != __DISABLED_CHAR)
n = c;
- }
n_tty_trace("%s: eol:%zu found:%d n:%zu c:%zu tail:%zu more:%zu\n",
__func__, eol, found, n, c, tail, more);
@@ -2015,19 +1976,19 @@ static bool canon_copy_from_read_buf(struct tty_struct *tty,
}
/**
- * job_control - check job control
- * @tty: tty
- * @file: file handle
- *
- * Perform job control management checks on this file/tty descriptor
- * and if appropriate send any needed signals and return a negative
- * error code if action should be taken.
- *
- * Locking: redirected write test is safe
- * current->signal->tty check is safe
- * ctrl.lock to safely reference tty->ctrl.pgrp
+ * job_control - check job control
+ * @tty: tty
+ * @file: file handle
+ *
+ * Perform job control management checks on this @file/@tty descriptor and if
+ * appropriate send any needed signals and return a negative error code if
+ * action should be taken.
+ *
+ * Locking:
+ * * redirected write test is safe
+ * * current->signal->tty check is safe
+ * * ctrl.lock to safely reference @tty->ctrl.pgrp
*/
-
static int job_control(struct tty_struct *tty, struct file *file)
{
/* Job control check -- must be done at start and after
@@ -2043,24 +2004,25 @@ static int job_control(struct tty_struct *tty, struct file *file)
/**
- * n_tty_read - read function for tty
- * @tty: tty device
- * @file: file object
- * @buf: userspace buffer pointer
- * @nr: size of I/O
- *
- * Perform reads for the line discipline. We are guaranteed that the
- * line discipline will not be closed under us but we may get multiple
- * parallel readers and must handle this ourselves. We may also get
- * a hangup. Always called in user context, may sleep.
- *
- * This code must be sure never to sleep through a hangup.
- *
- * n_tty_read()/consumer path:
- * claims non-exclusive termios_rwsem
- * publishes read_tail
+ * n_tty_read - read function for tty
+ * @tty: tty device
+ * @file: file object
+ * @kbuf: kernelspace buffer pointer
+ * @nr: size of I/O
+ * @cookie: if non-%NULL, this is a continuation read
+ * @offset: where to continue reading from (unused in n_tty)
+ *
+ * Perform reads for the line discipline. We are guaranteed that the line
+ * discipline will not be closed under us but we may get multiple parallel
+ * readers and must handle this ourselves. We may also get a hangup. Always
+ * called in user context, may sleep.
+ *
+ * This code must be sure never to sleep through a hangup.
+ *
+ * Locking: n_tty_read()/consumer path:
+ * claims non-exclusive termios_rwsem;
+ * publishes read_tail
*/
-
static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
unsigned char *kbuf, size_t nr,
void **cookie, unsigned long offset)
@@ -2232,25 +2194,23 @@ more_to_be_read:
}
/**
- * n_tty_write - write function for tty
- * @tty: tty device
- * @file: file object
- * @buf: userspace buffer pointer
- * @nr: size of I/O
- *
- * Write function of the terminal device. This is serialized with
- * respect to other write callers but not to termios changes, reads
- * and other such events. Since the receive code will echo characters,
- * thus calling driver write methods, the output_lock is used in
- * the output processing functions called here as well as in the
- * echo processing function to protect the column state and space
- * left in the buffer.
- *
- * This code must be sure never to sleep through a hangup.
- *
- * Locking: output_lock to protect column state and space left
- * (note that the process_output*() functions take this
- * lock themselves)
+ * n_tty_write - write function for tty
+ * @tty: tty device
+ * @file: file object
+ * @buf: userspace buffer pointer
+ * @nr: size of I/O
+ *
+ * Write function of the terminal device. This is serialized with respect to
+ * other write callers but not to termios changes, reads and other such events.
+ * Since the receive code will echo characters, thus calling driver write
+ * methods, the %output_lock is used in the output processing functions called
+ * here as well as in the echo processing function to protect the column state
+ * and space left in the buffer.
+ *
+ * This code must be sure never to sleep through a hangup.
+ *
+ * Locking: output_lock to protect column state and space left
+ * (note that the process_output*() functions take this lock themselves)
*/
static ssize_t n_tty_write(struct tty_struct *tty, struct file *file,
@@ -2341,19 +2301,19 @@ break_out:
}
/**
- * n_tty_poll - poll method for N_TTY
- * @tty: terminal device
- * @file: file accessing it
- * @wait: poll table
+ * n_tty_poll - poll method for N_TTY
+ * @tty: terminal device
+ * @file: file accessing it
+ * @wait: poll table
*
- * Called when the line discipline is asked to poll() for data or
- * for special events. This code is not serialized with respect to
- * other events save open/close.
+ * Called when the line discipline is asked to poll() for data or for special
+ * events. This code is not serialized with respect to other events save
+ * open/close.
*
- * This code must be sure never to sleep through a hangup.
- * Called without the kernel lock held - fine
+ * This code must be sure never to sleep through a hangup.
+ *
+ * Locking: called without the kernel lock held -- fine.
*/
-
static __poll_t n_tty_poll(struct tty_struct *tty, struct file *file,
poll_table *wait)
{
@@ -2400,8 +2360,8 @@ static unsigned long inq_canon(struct n_tty_data *ldata)
return nr;
}
-static int n_tty_ioctl(struct tty_struct *tty, struct file *file,
- unsigned int cmd, unsigned long arg)
+static int n_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
+ unsigned long arg)
{
struct n_tty_data *ldata = tty->disc_data;
int retval;
diff --git a/drivers/tty/rpmsg_tty.c b/drivers/tty/rpmsg_tty.c
index dae2a4e44f38..29db413bbc03 100644
--- a/drivers/tty/rpmsg_tty.c
+++ b/drivers/tty/rpmsg_tty.c
@@ -50,10 +50,17 @@ static int rpmsg_tty_cb(struct rpmsg_device *rpdev, void *data, int len, void *p
static int rpmsg_tty_install(struct tty_driver *driver, struct tty_struct *tty)
{
struct rpmsg_tty_port *cport = idr_find(&tty_idr, tty->index);
+ struct tty_port *port;
tty->driver_data = cport;
- return tty_port_install(&cport->port, driver, tty);
+ port = tty_port_get(&cport->port);
+ return tty_port_install(port, driver, tty);
+}
+
+static void rpmsg_tty_cleanup(struct tty_struct *tty)
+{
+ tty_port_put(tty->port);
}
static int rpmsg_tty_open(struct tty_struct *tty, struct file *filp)
@@ -106,12 +113,19 @@ static unsigned int rpmsg_tty_write_room(struct tty_struct *tty)
return size;
}
+static void rpmsg_tty_hangup(struct tty_struct *tty)
+{
+ tty_port_hangup(tty->port);
+}
+
static const struct tty_operations rpmsg_tty_ops = {
.install = rpmsg_tty_install,
.open = rpmsg_tty_open,
.close = rpmsg_tty_close,
.write = rpmsg_tty_write,
.write_room = rpmsg_tty_write_room,
+ .hangup = rpmsg_tty_hangup,
+ .cleanup = rpmsg_tty_cleanup,
};
static struct rpmsg_tty_port *rpmsg_tty_alloc_cport(void)
@@ -137,8 +151,10 @@ static struct rpmsg_tty_port *rpmsg_tty_alloc_cport(void)
return cport;
}
-static void rpmsg_tty_release_cport(struct rpmsg_tty_port *cport)
+static void rpmsg_tty_destruct_port(struct tty_port *port)
{
+ struct rpmsg_tty_port *cport = container_of(port, struct rpmsg_tty_port, port);
+
mutex_lock(&idr_lock);
idr_remove(&tty_idr, cport->id);
mutex_unlock(&idr_lock);
@@ -146,7 +162,10 @@ static void rpmsg_tty_release_cport(struct rpmsg_tty_port *cport)
kfree(cport);
}
-static const struct tty_port_operations rpmsg_tty_port_ops = { };
+static const struct tty_port_operations rpmsg_tty_port_ops = {
+ .destruct = rpmsg_tty_destruct_port,
+};
+
static int rpmsg_tty_probe(struct rpmsg_device *rpdev)
{
@@ -166,7 +185,8 @@ static int rpmsg_tty_probe(struct rpmsg_device *rpdev)
cport->id, dev);
if (IS_ERR(tty_dev)) {
ret = dev_err_probe(dev, PTR_ERR(tty_dev), "Failed to register tty port\n");
- goto err_destroy;
+ tty_port_put(&cport->port);
+ return ret;
}
cport->rpdev = rpdev;
@@ -177,12 +197,6 @@ static int rpmsg_tty_probe(struct rpmsg_device *rpdev)
rpdev->src, rpdev->dst, cport->id);
return 0;
-
-err_destroy:
- tty_port_destroy(&cport->port);
- rpmsg_tty_release_cport(cport);
-
- return ret;
}
static void rpmsg_tty_remove(struct rpmsg_device *rpdev)
@@ -192,13 +206,11 @@ static void rpmsg_tty_remove(struct rpmsg_device *rpdev)
dev_dbg(&rpdev->dev, "Removing rpmsg tty device %d\n", cport->id);
/* User hang up to release the tty */
- if (tty_port_initialized(&cport->port))
- tty_port_tty_hangup(&cport->port, false);
+ tty_port_tty_hangup(&cport->port, false);
tty_unregister_device(rpmsg_tty_driver, cport->id);
- tty_port_destroy(&cport->port);
- rpmsg_tty_release_cport(cport);
+ tty_port_put(&cport->port);
}
static struct rpmsg_device_id rpmsg_driver_tty_id_table[] = {
diff --git a/drivers/tty/serial/8250/8250.h b/drivers/tty/serial/8250/8250.h
index 6473361525d1..db784ace25d8 100644
--- a/drivers/tty/serial/8250/8250.h
+++ b/drivers/tty/serial/8250/8250.h
@@ -241,16 +241,8 @@ static inline int serial8250_in_MCR(struct uart_8250_port *up)
return mctrl;
}
-#if defined(__alpha__) && !defined(CONFIG_PCI)
-/*
- * Digital did something really horribly wrong with the OUT1 and OUT2
- * lines on at least some ALPHA's. The failure mode is that if either
- * is cleared, the machine locks up with endless interrupts.
- */
-#define ALPHA_KLUDGE_MCR (UART_MCR_OUT2 | UART_MCR_OUT1)
-#else
-#define ALPHA_KLUDGE_MCR 0
-#endif
+bool alpha_jensen(void);
+void alpha_jensen_set_mctrl(struct uart_port *port, unsigned int mctrl);
#ifdef CONFIG_SERIAL_8250_PNP
int serial8250_pnp_init(void);
diff --git a/drivers/tty/serial/8250/8250_alpha.c b/drivers/tty/serial/8250/8250_alpha.c
new file mode 100644
index 000000000000..58e70328aa4d
--- /dev/null
+++ b/drivers/tty/serial/8250/8250_alpha.c
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <asm/machvec.h>
+#include "8250.h"
+
+bool alpha_jensen(void)
+{
+ return !strcmp(alpha_mv.vector_name, "Jensen");
+}
+
+void alpha_jensen_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+ /*
+ * Digital did something really horribly wrong with the OUT1 and OUT2
+ * lines on Alpha Jensen. The failure mode is that if either is
+ * cleared, the machine locks up with endless interrupts.
+ */
+ mctrl |= TIOCM_OUT1 | TIOCM_OUT2;
+
+ serial8250_do_set_mctrl(port, mctrl);
+}
diff --git a/drivers/tty/serial/8250/8250_bcm7271.c b/drivers/tty/serial/8250/8250_bcm7271.c
index 5163d60756b7..9b878d023dac 100644
--- a/drivers/tty/serial/8250/8250_bcm7271.c
+++ b/drivers/tty/serial/8250/8250_bcm7271.c
@@ -941,7 +941,7 @@ static int brcmuart_probe(struct platform_device *pdev)
struct brcmuart_priv *priv;
struct clk *baud_mux_clk;
struct uart_8250_port up;
- struct resource *irq;
+ int irq;
void __iomem *membase = NULL;
resource_size_t mapbase = 0;
u32 clk_rate = 0;
@@ -952,11 +952,9 @@ static int brcmuart_probe(struct platform_device *pdev)
"uart", "dma_rx", "dma_tx", "dma_intr2", "dma_arb"
};
- irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!irq) {
- dev_err(dev, "missing irq\n");
- return -EINVAL;
- }
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
priv = devm_kzalloc(dev, sizeof(struct brcmuart_priv),
GFP_KERNEL);
if (!priv)
@@ -1044,7 +1042,7 @@ static int brcmuart_probe(struct platform_device *pdev)
up.port.dev = dev;
up.port.mapbase = mapbase;
up.port.membase = membase;
- up.port.irq = irq->start;
+ up.port.irq = irq;
up.port.handle_irq = brcmuart_handle_irq;
up.port.regshift = 2;
up.port.iotype = of_device_is_big_endian(np) ?
@@ -1076,14 +1074,18 @@ static int brcmuart_probe(struct platform_device *pdev)
priv->rx_bufs = dma_alloc_coherent(dev,
priv->rx_size,
&priv->rx_addr, GFP_KERNEL);
- if (!priv->rx_bufs)
+ if (!priv->rx_bufs) {
+ ret = -ENOMEM;
goto err;
+ }
priv->tx_size = UART_XMIT_SIZE;
priv->tx_buf = dma_alloc_coherent(dev,
priv->tx_size,
&priv->tx_addr, GFP_KERNEL);
- if (!priv->tx_buf)
+ if (!priv->tx_buf) {
+ ret = -ENOMEM;
goto err;
+ }
}
ret = serial8250_register_8250_port(&up);
@@ -1097,6 +1099,7 @@ static int brcmuart_probe(struct platform_device *pdev)
if (priv->dma_enabled) {
dma_irq = platform_get_irq_byname(pdev, "dma");
if (dma_irq < 0) {
+ ret = dma_irq;
dev_err(dev, "no IRQ resource info\n");
goto err1;
}
@@ -1116,7 +1119,7 @@ err1:
err:
brcmuart_free_bufs(dev, priv);
brcmuart_arbitration(priv, 0);
- return -ENODEV;
+ return ret;
}
static int brcmuart_remove(struct platform_device *pdev)
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
index 1ce193daea7f..01d30f6ed8fb 100644
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -509,11 +509,10 @@ static void __init serial8250_isa_init_ports(void)
up->ops = &univ8250_driver_ops;
- /*
- * ALPHA_KLUDGE_MCR needs to be killed.
- */
- up->mcr_mask = ~ALPHA_KLUDGE_MCR;
- up->mcr_force = ALPHA_KLUDGE_MCR;
+ if (IS_ENABLED(CONFIG_ALPHA_JENSEN) ||
+ (IS_ENABLED(CONFIG_ALPHA_GENERIC) && alpha_jensen()))
+ port->set_mctrl = alpha_jensen_set_mctrl;
+
serial8250_set_defaults(up);
}
diff --git a/drivers/tty/serial/8250/8250_gsc.c b/drivers/tty/serial/8250/8250_gsc.c
index 673cda3d011d..948d0a1c6ae8 100644
--- a/drivers/tty/serial/8250/8250_gsc.c
+++ b/drivers/tty/serial/8250/8250_gsc.c
@@ -26,7 +26,7 @@ static int __init serial_init_chip(struct parisc_device *dev)
unsigned long address;
int err;
-#ifdef CONFIG_64BIT
+#if defined(CONFIG_64BIT) && defined(CONFIG_IOSAPIC)
if (!dev->irq && (dev->id.sversion == 0xad))
dev->irq = iosapic_serial_irq(dev);
#endif
diff --git a/drivers/tty/serial/8250/8250_of.c b/drivers/tty/serial/8250/8250_of.c
index bce28729dd7b..be8626234627 100644
--- a/drivers/tty/serial/8250/8250_of.c
+++ b/drivers/tty/serial/8250/8250_of.c
@@ -83,8 +83,17 @@ static int of_platform_serial_setup(struct platform_device *ofdev,
port->mapsize = resource_size(&resource);
/* Check for shifted address mapping */
- if (of_property_read_u32(np, "reg-offset", &prop) == 0)
+ if (of_property_read_u32(np, "reg-offset", &prop) == 0) {
+ if (prop >= port->mapsize) {
+ dev_warn(&ofdev->dev, "reg-offset %u exceeds region size %pa\n",
+ prop, &port->mapsize);
+ ret = -EINVAL;
+ goto err_unprepare;
+ }
+
port->mapbase += prop;
+ port->mapsize -= prop;
+ }
port->iotype = UPIO_MEM;
if (of_property_read_u32(np, "reg-io-width", &prop) == 0) {
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index 60f8fffdfd77..e17e97ea86fa 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -1278,7 +1278,7 @@ static int pci_quatech_init(struct pci_dev *dev)
outl(inl(base + 0x38) | 0x00002000, base + 0x38);
tmp = inl(base + 0x3c);
outl(tmp | 0x01000000, base + 0x3c);
- outl(tmp &= ~0x01000000, base + 0x3c);
+ outl(tmp & ~0x01000000, base + 0x3c);
}
}
return 0;
@@ -1318,89 +1318,6 @@ static int pci_default_setup(struct serial_private *priv,
return setup_port(priv, port, bar, offset, board->reg_shift);
}
-static void
-pericom_do_set_divisor(struct uart_port *port, unsigned int baud,
- unsigned int quot, unsigned int quot_frac)
-{
- int scr;
- int lcr;
-
- for (scr = 16; scr > 4; scr--) {
- unsigned int maxrate = port->uartclk / scr;
- unsigned int divisor = max(maxrate / baud, 1U);
- int delta = maxrate / divisor - baud;
-
- if (baud > maxrate + baud / 50)
- continue;
-
- if (delta > baud / 50)
- divisor++;
-
- if (divisor > 0xffff)
- continue;
-
- /* Update delta due to possible divisor change */
- delta = maxrate / divisor - baud;
- if (abs(delta) < baud / 50) {
- lcr = serial_port_in(port, UART_LCR);
- serial_port_out(port, UART_LCR, lcr | 0x80);
- serial_port_out(port, UART_DLL, divisor & 0xff);
- serial_port_out(port, UART_DLM, divisor >> 8 & 0xff);
- serial_port_out(port, 2, 16 - scr);
- serial_port_out(port, UART_LCR, lcr);
- return;
- }
- }
-}
-static int pci_pericom_setup(struct serial_private *priv,
- const struct pciserial_board *board,
- struct uart_8250_port *port, int idx)
-{
- unsigned int bar, offset = board->first_offset, maxnr;
-
- bar = FL_GET_BASE(board->flags);
- if (board->flags & FL_BASE_BARS)
- bar += idx;
- else
- offset += idx * board->uart_offset;
-
-
- maxnr = (pci_resource_len(priv->dev, bar) - board->first_offset) >>
- (board->reg_shift + 3);
-
- if (board->flags & FL_REGION_SZ_CAP && idx >= maxnr)
- return 1;
-
- port->port.set_divisor = pericom_do_set_divisor;
-
- return setup_port(priv, port, bar, offset, board->reg_shift);
-}
-
-static int pci_pericom_setup_four_at_eight(struct serial_private *priv,
- const struct pciserial_board *board,
- struct uart_8250_port *port, int idx)
-{
- unsigned int bar, offset = board->first_offset, maxnr;
-
- bar = FL_GET_BASE(board->flags);
- if (board->flags & FL_BASE_BARS)
- bar += idx;
- else
- offset += idx * board->uart_offset;
-
- if (idx==3)
- offset = 0x38;
-
- maxnr = (pci_resource_len(priv->dev, bar) - board->first_offset) >>
- (board->reg_shift + 3);
-
- if (board->flags & FL_REGION_SZ_CAP && idx >= maxnr)
- return 1;
-
- port->port.set_divisor = pericom_do_set_divisor;
-
- return setup_port(priv, port, bar, offset, board->reg_shift);
-}
static int
ce4100_serial_setup(struct serial_private *priv,
@@ -1886,42 +1803,6 @@ pci_moxa_setup(struct serial_private *priv,
#define PCIE_DEVICE_ID_WCH_CH384_8S 0x3853
#define PCIE_DEVICE_ID_WCH_CH382_2S 0x3253
-#define PCI_VENDOR_ID_ACCESIO 0x494f
-#define PCI_DEVICE_ID_ACCESIO_PCIE_COM_2SDB 0x1051
-#define PCI_DEVICE_ID_ACCESIO_MPCIE_COM_2S 0x1053
-#define PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SDB 0x105C
-#define PCI_DEVICE_ID_ACCESIO_MPCIE_COM_4S 0x105E
-#define PCI_DEVICE_ID_ACCESIO_PCIE_COM232_2DB 0x1091
-#define PCI_DEVICE_ID_ACCESIO_MPCIE_COM232_2 0x1093
-#define PCI_DEVICE_ID_ACCESIO_PCIE_COM232_4DB 0x1099
-#define PCI_DEVICE_ID_ACCESIO_MPCIE_COM232_4 0x109B
-#define PCI_DEVICE_ID_ACCESIO_PCIE_COM_2SMDB 0x10D1
-#define PCI_DEVICE_ID_ACCESIO_MPCIE_COM_2SM 0x10D3
-#define PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SMDB 0x10DA
-#define PCI_DEVICE_ID_ACCESIO_MPCIE_COM_4SM 0x10DC
-#define PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_1 0x1108
-#define PCI_DEVICE_ID_ACCESIO_MPCIE_ICM422_2 0x1110
-#define PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_2 0x1111
-#define PCI_DEVICE_ID_ACCESIO_MPCIE_ICM422_4 0x1118
-#define PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_4 0x1119
-#define PCI_DEVICE_ID_ACCESIO_PCIE_ICM_2S 0x1152
-#define PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4S 0x115A
-#define PCI_DEVICE_ID_ACCESIO_PCIE_ICM232_2 0x1190
-#define PCI_DEVICE_ID_ACCESIO_MPCIE_ICM232_2 0x1191
-#define PCI_DEVICE_ID_ACCESIO_PCIE_ICM232_4 0x1198
-#define PCI_DEVICE_ID_ACCESIO_MPCIE_ICM232_4 0x1199
-#define PCI_DEVICE_ID_ACCESIO_PCIE_ICM_2SM 0x11D0
-#define PCI_DEVICE_ID_ACCESIO_PCIE_COM422_4 0x105A
-#define PCI_DEVICE_ID_ACCESIO_PCIE_COM485_4 0x105B
-#define PCI_DEVICE_ID_ACCESIO_PCIE_COM422_8 0x106A
-#define PCI_DEVICE_ID_ACCESIO_PCIE_COM485_8 0x106B
-#define PCI_DEVICE_ID_ACCESIO_PCIE_COM232_4 0x1098
-#define PCI_DEVICE_ID_ACCESIO_PCIE_COM232_8 0x10A9
-#define PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SM 0x10D9
-#define PCI_DEVICE_ID_ACCESIO_PCIE_COM_8SM 0x10E9
-#define PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4SM 0x11D8
-
-
#define PCI_DEVICE_ID_MOXA_CP102E 0x1024
#define PCI_DEVICE_ID_MOXA_CP102EL 0x1025
#define PCI_DEVICE_ID_MOXA_CP104EL_A 0x1045
@@ -2199,16 +2080,6 @@ static struct pci_serial_quirk pci_serial_quirks[] = {
.exit = pci_plx9050_exit,
},
/*
- * Pericom (Only 7954 - It have a offset jump for port 4)
- */
- {
- .vendor = PCI_VENDOR_ID_PERICOM,
- .device = PCI_DEVICE_ID_PERICOM_PI7C9X7954,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .setup = pci_pericom_setup_four_at_eight,
- },
- /*
* PLX
*/
{
@@ -2238,125 +2109,7 @@ static struct pci_serial_quirk pci_serial_quirks[] = {
.setup = pci_default_setup,
.exit = pci_plx9050_exit,
},
- {
- .vendor = PCI_VENDOR_ID_ACCESIO,
- .device = PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SDB,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .setup = pci_pericom_setup_four_at_eight,
- },
- {
- .vendor = PCI_VENDOR_ID_ACCESIO,
- .device = PCI_DEVICE_ID_ACCESIO_MPCIE_COM_4S,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .setup = pci_pericom_setup_four_at_eight,
- },
- {
- .vendor = PCI_VENDOR_ID_ACCESIO,
- .device = PCI_DEVICE_ID_ACCESIO_PCIE_COM232_4DB,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .setup = pci_pericom_setup_four_at_eight,
- },
- {
- .vendor = PCI_VENDOR_ID_ACCESIO,
- .device = PCI_DEVICE_ID_ACCESIO_MPCIE_COM232_4,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .setup = pci_pericom_setup_four_at_eight,
- },
- {
- .vendor = PCI_VENDOR_ID_ACCESIO,
- .device = PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SMDB,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .setup = pci_pericom_setup_four_at_eight,
- },
- {
- .vendor = PCI_VENDOR_ID_ACCESIO,
- .device = PCI_DEVICE_ID_ACCESIO_MPCIE_COM_4SM,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .setup = pci_pericom_setup_four_at_eight,
- },
- {
- .vendor = PCI_VENDOR_ID_ACCESIO,
- .device = PCI_DEVICE_ID_ACCESIO_MPCIE_ICM422_4,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .setup = pci_pericom_setup_four_at_eight,
- },
- {
- .vendor = PCI_VENDOR_ID_ACCESIO,
- .device = PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_4,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .setup = pci_pericom_setup_four_at_eight,
- },
- {
- .vendor = PCI_VENDOR_ID_ACCESIO,
- .device = PCI_DEVICE_ID_ACCESIO_PCIE_ICM232_4,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .setup = pci_pericom_setup_four_at_eight,
- },
- {
- .vendor = PCI_VENDOR_ID_ACCESIO,
- .device = PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4S,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .setup = pci_pericom_setup_four_at_eight,
- },
- {
- .vendor = PCI_VENDOR_ID_ACCESIO,
- .device = PCI_DEVICE_ID_ACCESIO_MPCIE_ICM232_4,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .setup = pci_pericom_setup_four_at_eight,
- },
- {
- .vendor = PCI_VENDOR_ID_ACCESIO,
- .device = PCI_DEVICE_ID_ACCESIO_PCIE_COM422_4,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .setup = pci_pericom_setup_four_at_eight,
- },
- {
- .vendor = PCI_VENDOR_ID_ACCESIO,
- .device = PCI_DEVICE_ID_ACCESIO_PCIE_COM485_4,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .setup = pci_pericom_setup_four_at_eight,
- },
- {
- .vendor = PCI_VENDOR_ID_ACCESIO,
- .device = PCI_DEVICE_ID_ACCESIO_PCIE_COM232_4,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .setup = pci_pericom_setup_four_at_eight,
- },
- {
- .vendor = PCI_VENDOR_ID_ACCESIO,
- .device = PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SM,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .setup = pci_pericom_setup_four_at_eight,
- },
- {
- .vendor = PCI_VENDOR_ID_ACCESIO,
- .device = PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4SM,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .setup = pci_pericom_setup_four_at_eight,
- },
- {
- .vendor = PCI_VENDOR_ID_ACCESIO,
- .device = PCI_ANY_ID,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- .setup = pci_pericom_setup,
- }, /*
+ /*
* SBS Technologies, Inc., PMC-OCTALPRO 232
*/
{
@@ -2948,10 +2701,6 @@ enum pci_board_num_t {
pbn_wch382_2,
pbn_wch384_4,
pbn_wch384_8,
- pbn_pericom_PI7C9X7951,
- pbn_pericom_PI7C9X7952,
- pbn_pericom_PI7C9X7954,
- pbn_pericom_PI7C9X7958,
pbn_sunix_pci_1s,
pbn_sunix_pci_2s,
pbn_sunix_pci_4s,
@@ -3696,33 +3445,6 @@ static struct pciserial_board pci_boards[] = {
.uart_offset = 8,
.first_offset = 0x00,
},
- /*
- * Pericom PI7C9X795[1248] Uno/Dual/Quad/Octal UART
- */
- [pbn_pericom_PI7C9X7951] = {
- .flags = FL_BASE0,
- .num_ports = 1,
- .base_baud = 921600,
- .uart_offset = 0x8,
- },
- [pbn_pericom_PI7C9X7952] = {
- .flags = FL_BASE0,
- .num_ports = 2,
- .base_baud = 921600,
- .uart_offset = 0x8,
- },
- [pbn_pericom_PI7C9X7954] = {
- .flags = FL_BASE0,
- .num_ports = 4,
- .base_baud = 921600,
- .uart_offset = 0x8,
- },
- [pbn_pericom_PI7C9X7958] = {
- .flags = FL_BASE0,
- .num_ports = 8,
- .base_baud = 921600,
- .uart_offset = 0x8,
- },
[pbn_sunix_pci_1s] = {
.num_ports = 1,
.base_baud = 921600,
@@ -3834,6 +3556,10 @@ static const struct pci_device_id blacklist[] = {
{ PCI_VDEVICE(EXAR, PCI_ANY_ID), },
{ PCI_VDEVICE(COMMTECH, PCI_ANY_ID), },
+ /* Pericom devices */
+ { PCI_VDEVICE(PERICOM, PCI_ANY_ID), },
+ { PCI_VDEVICE(ACCESSIO, PCI_ANY_ID), },
+
/* End of the black list */
{ }
};
@@ -5028,127 +4754,6 @@ static const struct pci_device_id serial_pci_tbl[] = {
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_b3_8_115200 },
/*
- * Pericom PI7C9X795[1248] Uno/Dual/Quad/Octal UART
- */
- { PCI_VENDOR_ID_PERICOM, PCI_DEVICE_ID_PERICOM_PI7C9X7951,
- PCI_ANY_ID, PCI_ANY_ID,
- 0,
- 0, pbn_pericom_PI7C9X7951 },
- { PCI_VENDOR_ID_PERICOM, PCI_DEVICE_ID_PERICOM_PI7C9X7952,
- PCI_ANY_ID, PCI_ANY_ID,
- 0,
- 0, pbn_pericom_PI7C9X7952 },
- { PCI_VENDOR_ID_PERICOM, PCI_DEVICE_ID_PERICOM_PI7C9X7954,
- PCI_ANY_ID, PCI_ANY_ID,
- 0,
- 0, pbn_pericom_PI7C9X7954 },
- { PCI_VENDOR_ID_PERICOM, PCI_DEVICE_ID_PERICOM_PI7C9X7958,
- PCI_ANY_ID, PCI_ANY_ID,
- 0,
- 0, pbn_pericom_PI7C9X7958 },
- /*
- * ACCES I/O Products quad
- */
- { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_2SDB,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7952 },
- { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM_2S,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7952 },
- { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SDB,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7954 },
- { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM_4S,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7954 },
- { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM232_2DB,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7952 },
- { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM232_2,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7952 },
- { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM232_4DB,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7954 },
- { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM232_4,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7954 },
- { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_2SMDB,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7952 },
- { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM_2SM,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7952 },
- { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SMDB,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7954 },
- { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_COM_4SM,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7954 },
- { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_1,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7951 },
- { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM422_2,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7952 },
- { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_2,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7952 },
- { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM422_4,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7954 },
- { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM485_4,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7954 },
- { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM_2S,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7952 },
- { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4S,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7954 },
- { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM232_2,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7952 },
- { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM232_2,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7952 },
- { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM232_4,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7954 },
- { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_MPCIE_ICM232_4,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7954 },
- { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM_2SM,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7952 },
- { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM422_4,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7954 },
- { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM485_4,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7954 },
- { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM422_8,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7958 },
- { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM485_8,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7958 },
- { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM232_4,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7954 },
- { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM232_8,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7958 },
- { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_4SM,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7954 },
- { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_COM_8SM,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7958 },
- { PCI_VENDOR_ID_ACCESIO, PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4SM,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0,
- pbn_pericom_PI7C9X7954 },
- /*
* Topic TP560 Data/Fax/Voice 56k modem (reported by Evan Clarke)
*/
{ PCI_VENDOR_ID_TOPIC, PCI_DEVICE_ID_TOPIC_TP560,
@@ -5174,8 +4779,30 @@ static const struct pci_device_id serial_pci_tbl[] = {
{ PCI_VENDOR_ID_INTASHIELD, PCI_DEVICE_ID_INTASHIELD_IS400,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, /* 135a.0dc0 */
pbn_b2_4_115200 },
+ /* Brainboxes Devices */
+ /*
+ * Brainboxes UC-101
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x0BA1,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_2_115200 },
+ /*
+ * Brainboxes UC-235/246
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x0AA1,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_1_115200 },
+ /*
+ * Brainboxes UC-257
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x0861,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_2_115200 },
/*
- * BrainBoxes UC-260
+ * Brainboxes UC-260/271/701/756
*/
{ PCI_VENDOR_ID_INTASHIELD, 0x0D21,
PCI_ANY_ID, PCI_ANY_ID,
@@ -5183,7 +4810,81 @@ static const struct pci_device_id serial_pci_tbl[] = {
pbn_b2_4_115200 },
{ PCI_VENDOR_ID_INTASHIELD, 0x0E34,
PCI_ANY_ID, PCI_ANY_ID,
- PCI_CLASS_COMMUNICATION_MULTISERIAL << 8, 0xffff00,
+ PCI_CLASS_COMMUNICATION_MULTISERIAL << 8, 0xffff00,
+ pbn_b2_4_115200 },
+ /*
+ * Brainboxes UC-268
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x0841,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_4_115200 },
+ /*
+ * Brainboxes UC-275/279
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x0881,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_8_115200 },
+ /*
+ * Brainboxes UC-302
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x08E1,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_2_115200 },
+ /*
+ * Brainboxes UC-310
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x08C1,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_2_115200 },
+ /*
+ * Brainboxes UC-313
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x08A3,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_2_115200 },
+ /*
+ * Brainboxes UC-320/324
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x0A61,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_1_115200 },
+ /*
+ * Brainboxes UC-346
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x0B02,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_4_115200 },
+ /*
+ * Brainboxes UC-357
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x0A81,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_2_115200 },
+ { PCI_VENDOR_ID_INTASHIELD, 0x0A83,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_2_115200 },
+ /*
+ * Brainboxes UC-368
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x0C41,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ pbn_b2_4_115200 },
+ /*
+ * Brainboxes UC-420/431
+ */
+ { PCI_VENDOR_ID_INTASHIELD, 0x0921,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
pbn_b2_4_115200 },
/*
* Perle PCI-RAS cards
diff --git a/drivers/tty/serial/8250/8250_pericom.c b/drivers/tty/serial/8250/8250_pericom.c
new file mode 100644
index 000000000000..95ff10f25d58
--- /dev/null
+++ b/drivers/tty/serial/8250/8250_pericom.c
@@ -0,0 +1,214 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Driver for Pericom UART */
+
+#include <linux/bits.h>
+#include <linux/module.h>
+#include <linux/overflow.h>
+#include <linux/pci.h>
+
+#include "8250.h"
+
+#define PCI_DEVICE_ID_ACCESSIO_PCIE_COM_2SDB 0x1051
+#define PCI_DEVICE_ID_ACCESSIO_MPCIE_COM_2S 0x1053
+#define PCI_DEVICE_ID_ACCESSIO_PCIE_COM422_4 0x105a
+#define PCI_DEVICE_ID_ACCESSIO_PCIE_COM485_4 0x105b
+#define PCI_DEVICE_ID_ACCESSIO_PCIE_COM_4SDB 0x105c
+#define PCI_DEVICE_ID_ACCESSIO_MPCIE_COM_4S 0x105e
+#define PCI_DEVICE_ID_ACCESSIO_PCIE_COM422_8 0x106a
+#define PCI_DEVICE_ID_ACCESSIO_PCIE_COM485_8 0x106b
+#define PCI_DEVICE_ID_ACCESSIO_PCIE_COM232_2DB 0x1091
+#define PCI_DEVICE_ID_ACCESSIO_MPCIE_COM232_2 0x1093
+#define PCI_DEVICE_ID_ACCESSIO_PCIE_COM232_4 0x1098
+#define PCI_DEVICE_ID_ACCESSIO_PCIE_COM232_4DB 0x1099
+#define PCI_DEVICE_ID_ACCESSIO_MPCIE_COM232_4 0x109b
+#define PCI_DEVICE_ID_ACCESSIO_PCIE_COM232_8 0x10a9
+#define PCI_DEVICE_ID_ACCESSIO_PCIE_COM_2SMDB 0x10d1
+#define PCI_DEVICE_ID_ACCESSIO_MPCIE_COM_2SM 0x10d3
+#define PCI_DEVICE_ID_ACCESSIO_PCIE_COM_4SM 0x10d9
+#define PCI_DEVICE_ID_ACCESSIO_PCIE_COM_4SMDB 0x10da
+#define PCI_DEVICE_ID_ACCESSIO_MPCIE_COM_4SM 0x10dc
+#define PCI_DEVICE_ID_ACCESSIO_PCIE_COM_8SM 0x10e9
+#define PCI_DEVICE_ID_ACCESSIO_MPCIE_ICM485_1 0x1108
+#define PCI_DEVICE_ID_ACCESSIO_MPCIE_ICM422_2 0x1110
+#define PCI_DEVICE_ID_ACCESSIO_MPCIE_ICM485_2 0x1111
+#define PCI_DEVICE_ID_ACCESSIO_MPCIE_ICM422_4 0x1118
+#define PCI_DEVICE_ID_ACCESSIO_MPCIE_ICM485_4 0x1119
+#define PCI_DEVICE_ID_ACCESSIO_PCIE_ICM_2S 0x1152
+#define PCI_DEVICE_ID_ACCESSIO_PCIE_ICM_4S 0x115a
+#define PCI_DEVICE_ID_ACCESSIO_PCIE_ICM232_2 0x1190
+#define PCI_DEVICE_ID_ACCESSIO_MPCIE_ICM232_2 0x1191
+#define PCI_DEVICE_ID_ACCESSIO_PCIE_ICM232_4 0x1198
+#define PCI_DEVICE_ID_ACCESSIO_MPCIE_ICM232_4 0x1199
+#define PCI_DEVICE_ID_ACCESSIO_PCIE_ICM_2SM 0x11d0
+#define PCI_DEVICE_ID_ACCESSIO_PCIE_ICM_4SM 0x11d8
+
+struct pericom8250 {
+ void __iomem *virt;
+ unsigned int nr;
+ int line[];
+};
+
+static void pericom_do_set_divisor(struct uart_port *port, unsigned int baud,
+ unsigned int quot, unsigned int quot_frac)
+{
+ int scr;
+
+ for (scr = 16; scr > 4; scr--) {
+ unsigned int maxrate = port->uartclk / scr;
+ unsigned int divisor = max(maxrate / baud, 1U);
+ int delta = maxrate / divisor - baud;
+
+ if (baud > maxrate + baud / 50)
+ continue;
+
+ if (delta > baud / 50)
+ divisor++;
+
+ if (divisor > 0xffff)
+ continue;
+
+ /* Update delta due to possible divisor change */
+ delta = maxrate / divisor - baud;
+ if (abs(delta) < baud / 50) {
+ struct uart_8250_port *up = up_to_u8250p(port);
+ int lcr = serial_port_in(port, UART_LCR);
+
+ serial_port_out(port, UART_LCR, lcr | 0x80);
+ serial_dl_write(up, divisor);
+ serial_port_out(port, 2, 16 - scr);
+ serial_port_out(port, UART_LCR, lcr);
+ return;
+ }
+ }
+}
+
+static int pericom8250_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ unsigned int nr, i, bar = 0, maxnr;
+ struct pericom8250 *pericom;
+ struct uart_8250_port uart;
+ int ret;
+
+ ret = pcim_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ maxnr = pci_resource_len(pdev, bar) >> 3;
+
+ if (pdev->vendor == PCI_VENDOR_ID_PERICOM)
+ nr = pdev->device & 0x0f;
+ else if (pdev->vendor == PCI_VENDOR_ID_ACCESSIO)
+ nr = BIT(((pdev->device & 0x38) >> 3) - 1);
+ else
+ nr = 1;
+
+ pericom = devm_kzalloc(&pdev->dev, struct_size(pericom, line, nr), GFP_KERNEL);
+ if (!pericom)
+ return -ENOMEM;
+
+ pericom->virt = pcim_iomap(pdev, bar, 0);
+ if (!pericom->virt)
+ return -ENOMEM;
+
+ memset(&uart, 0, sizeof(uart));
+
+ uart.port.dev = &pdev->dev;
+ uart.port.irq = pdev->irq;
+ uart.port.private_data = pericom;
+ uart.port.iotype = UPIO_PORT;
+ uart.port.uartclk = 921600 * 16;
+ uart.port.flags = UPF_SKIP_TEST | UPF_BOOT_AUTOCONF | UPF_SHARE_IRQ;
+ uart.port.set_divisor = pericom_do_set_divisor;
+ for (i = 0; i < nr && i < maxnr; i++) {
+ unsigned int offset = (i == 3 && nr == 4) ? 0x38 : i * 0x8;
+
+ uart.port.iobase = pci_resource_start(pdev, bar) + offset;
+
+ dev_dbg(&pdev->dev, "Setup PCI port: port %lx, irq %d, type %d\n",
+ uart.port.iobase, uart.port.irq, uart.port.iotype);
+
+ pericom->line[i] = serial8250_register_8250_port(&uart);
+ if (pericom->line[i] < 0) {
+ dev_err(&pdev->dev,
+ "Couldn't register serial port %lx, irq %d, type %d, error %d\n",
+ uart.port.iobase, uart.port.irq,
+ uart.port.iotype, pericom->line[i]);
+ break;
+ }
+ }
+ pericom->nr = i;
+
+ pci_set_drvdata(pdev, pericom);
+ return 0;
+}
+
+static void pericom8250_remove(struct pci_dev *pdev)
+{
+ struct pericom8250 *pericom = pci_get_drvdata(pdev);
+ unsigned int i;
+
+ for (i = 0; i < pericom->nr; i++)
+ serial8250_unregister_port(pericom->line[i]);
+}
+
+static const struct pci_device_id pericom8250_pci_ids[] = {
+ /*
+ * Pericom PI7C9X795[1248] Uno/Dual/Quad/Octal UART
+ * (Only 7954 has an offset jump for port 4)
+ */
+ { PCI_VDEVICE(PERICOM, PCI_DEVICE_ID_PERICOM_PI7C9X7951) },
+ { PCI_VDEVICE(PERICOM, PCI_DEVICE_ID_PERICOM_PI7C9X7952) },
+ { PCI_VDEVICE(PERICOM, PCI_DEVICE_ID_PERICOM_PI7C9X7954) },
+ { PCI_VDEVICE(PERICOM, PCI_DEVICE_ID_PERICOM_PI7C9X7958) },
+
+ /*
+ * ACCES I/O Products quad
+ * (Only 7954 has an offset jump for port 4)
+ */
+ { PCI_VDEVICE(ACCESSIO, PCI_DEVICE_ID_ACCESSIO_PCIE_COM_2SDB) },
+ { PCI_VDEVICE(ACCESSIO, PCI_DEVICE_ID_ACCESSIO_MPCIE_COM_2S) },
+ { PCI_VDEVICE(ACCESSIO, PCI_DEVICE_ID_ACCESSIO_PCIE_COM422_4) },
+ { PCI_VDEVICE(ACCESSIO, PCI_DEVICE_ID_ACCESSIO_PCIE_COM485_4) },
+ { PCI_VDEVICE(ACCESSIO, PCI_DEVICE_ID_ACCESSIO_PCIE_COM_4SDB) },
+ { PCI_VDEVICE(ACCESSIO, PCI_DEVICE_ID_ACCESSIO_MPCIE_COM_4S) },
+ { PCI_VDEVICE(ACCESSIO, PCI_DEVICE_ID_ACCESSIO_PCIE_COM422_8) },
+ { PCI_VDEVICE(ACCESSIO, PCI_DEVICE_ID_ACCESSIO_PCIE_COM485_8) },
+ { PCI_VDEVICE(ACCESSIO, PCI_DEVICE_ID_ACCESSIO_PCIE_COM232_2DB) },
+ { PCI_VDEVICE(ACCESSIO, PCI_DEVICE_ID_ACCESSIO_MPCIE_COM232_2) },
+ { PCI_VDEVICE(ACCESSIO, PCI_DEVICE_ID_ACCESSIO_PCIE_COM232_4) },
+ { PCI_VDEVICE(ACCESSIO, PCI_DEVICE_ID_ACCESSIO_PCIE_COM232_4DB) },
+ { PCI_VDEVICE(ACCESSIO, PCI_DEVICE_ID_ACCESSIO_MPCIE_COM232_4) },
+ { PCI_VDEVICE(ACCESSIO, PCI_DEVICE_ID_ACCESSIO_PCIE_COM232_8) },
+ { PCI_VDEVICE(ACCESSIO, PCI_DEVICE_ID_ACCESSIO_PCIE_COM_2SMDB) },
+ { PCI_VDEVICE(ACCESSIO, PCI_DEVICE_ID_ACCESSIO_MPCIE_COM_2SM) },
+ { PCI_VDEVICE(ACCESSIO, PCI_DEVICE_ID_ACCESSIO_PCIE_COM_4SM) },
+ { PCI_VDEVICE(ACCESSIO, PCI_DEVICE_ID_ACCESSIO_PCIE_COM_4SMDB) },
+ { PCI_VDEVICE(ACCESSIO, PCI_DEVICE_ID_ACCESSIO_MPCIE_COM_4SM) },
+ { PCI_VDEVICE(ACCESSIO, PCI_DEVICE_ID_ACCESSIO_PCIE_COM_8SM) },
+ { PCI_VDEVICE(ACCESSIO, PCI_DEVICE_ID_ACCESSIO_MPCIE_ICM485_1) },
+ { PCI_VDEVICE(ACCESSIO, PCI_DEVICE_ID_ACCESSIO_MPCIE_ICM422_2) },
+ { PCI_VDEVICE(ACCESSIO, PCI_DEVICE_ID_ACCESSIO_MPCIE_ICM485_2) },
+ { PCI_VDEVICE(ACCESSIO, PCI_DEVICE_ID_ACCESSIO_MPCIE_ICM422_4) },
+ { PCI_VDEVICE(ACCESSIO, PCI_DEVICE_ID_ACCESSIO_MPCIE_ICM485_4) },
+ { PCI_VDEVICE(ACCESSIO, PCI_DEVICE_ID_ACCESSIO_PCIE_ICM_2S) },
+ { PCI_VDEVICE(ACCESSIO, PCI_DEVICE_ID_ACCESSIO_PCIE_ICM_4S) },
+ { PCI_VDEVICE(ACCESSIO, PCI_DEVICE_ID_ACCESSIO_PCIE_ICM232_2) },
+ { PCI_VDEVICE(ACCESSIO, PCI_DEVICE_ID_ACCESSIO_MPCIE_ICM232_2) },
+ { PCI_VDEVICE(ACCESSIO, PCI_DEVICE_ID_ACCESSIO_PCIE_ICM232_4) },
+ { PCI_VDEVICE(ACCESSIO, PCI_DEVICE_ID_ACCESSIO_MPCIE_ICM232_4) },
+ { PCI_VDEVICE(ACCESSIO, PCI_DEVICE_ID_ACCESSIO_PCIE_ICM_2SM) },
+ { PCI_VDEVICE(ACCESSIO, PCI_DEVICE_ID_ACCESSIO_PCIE_ICM_4SM) },
+ { }
+};
+MODULE_DEVICE_TABLE(pci, pericom8250_pci_ids);
+
+static struct pci_driver pericom8250_pci_driver = {
+ .name = "8250_pericom",
+ .id_table = pericom8250_pci_ids,
+ .probe = pericom8250_probe,
+ .remove = pericom8250_remove,
+};
+module_pci_driver(pericom8250_pci_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Pericom UART driver");
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index 46e2079ad1aa..3b12bfc1ed67 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -2026,7 +2026,7 @@ void serial8250_do_set_mctrl(struct uart_port *port, unsigned int mctrl)
mcr = serial8250_TIOCM_to_MCR(mctrl);
- mcr = (mcr & up->mcr_mask) | up->mcr_force | up->mcr;
+ mcr |= up->mcr;
serial8250_out_MCR(up, mcr);
}
@@ -3092,7 +3092,7 @@ static ssize_t rx_trig_bytes_show(struct device *dev,
if (rxtrig_bytes < 0)
return rxtrig_bytes;
- return snprintf(buf, PAGE_SIZE, "%d\n", rxtrig_bytes);
+ return sysfs_emit(buf, "%d\n", rxtrig_bytes);
}
static int do_set_rxtrig(struct tty_port *port, unsigned char bytes)
diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig
index 8cd11aa63ed5..9d415a38cc71 100644
--- a/drivers/tty/serial/8250/Kconfig
+++ b/drivers/tty/serial/8250/Kconfig
@@ -498,6 +498,14 @@ config SERIAL_8250_MID
present on the UART found on Intel Medfield SOC and various other
Intel platforms.
+config SERIAL_8250_PERICOM
+ tristate "Support for Pericom and Acces I/O serial ports"
+ default SERIAL_8250
+ depends on SERIAL_8250 && PCI
+ help
+ Selecting this option will enable handling of the extra features
+ present on the Pericom and Acces I/O UARTs.
+
config SERIAL_8250_PXA
tristate "PXA serial port support"
depends on SERIAL_8250
diff --git a/drivers/tty/serial/8250/Makefile b/drivers/tty/serial/8250/Makefile
index b9bcd73c8997..bee908f99ea0 100644
--- a/drivers/tty/serial/8250/Makefile
+++ b/drivers/tty/serial/8250/Makefile
@@ -5,6 +5,8 @@
obj-$(CONFIG_SERIAL_8250) += 8250.o 8250_base.o
8250-y := 8250_core.o
+8250-$(CONFIG_ALPHA_GENERIC) += 8250_alpha.o
+8250-$(CONFIG_ALPHA_JENSEN) += 8250_alpha.o
8250-$(CONFIG_SERIAL_8250_PNP) += 8250_pnp.o
8250_base-y := 8250_port.o
8250_base-$(CONFIG_SERIAL_8250_DMA) += 8250_dma.o
@@ -36,6 +38,7 @@ obj-$(CONFIG_SERIAL_8250_UNIPHIER) += 8250_uniphier.o
obj-$(CONFIG_SERIAL_8250_INGENIC) += 8250_ingenic.o
obj-$(CONFIG_SERIAL_8250_LPSS) += 8250_lpss.o
obj-$(CONFIG_SERIAL_8250_MID) += 8250_mid.o
+obj-$(CONFIG_SERIAL_8250_PERICOM) += 8250_pericom.o
obj-$(CONFIG_SERIAL_8250_PXA) += 8250_pxa.o
obj-$(CONFIG_SERIAL_8250_TEGRA) += 8250_tegra.o
obj-$(CONFIG_SERIAL_8250_BCM7271) += 8250_bcm7271.o
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index fc543ac97c13..0e5ccb25bdb1 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -263,7 +263,7 @@ config SERIAL_SAMSUNG_UARTS
config SERIAL_SAMSUNG_CONSOLE
bool "Support for console on Samsung SoC serial port"
- depends on SERIAL_SAMSUNG=y
+ depends on SERIAL_SAMSUNG
select SERIAL_CORE_CONSOLE
select SERIAL_EARLYCON
help
diff --git a/drivers/tty/serial/altera_jtaguart.c b/drivers/tty/serial/altera_jtaguart.c
index 23c4e0e79694..37bffe406b18 100644
--- a/drivers/tty/serial/altera_jtaguart.c
+++ b/drivers/tty/serial/altera_jtaguart.c
@@ -418,8 +418,9 @@ static int altera_jtaguart_probe(struct platform_device *pdev)
struct altera_jtaguart_platform_uart *platp =
dev_get_platdata(&pdev->dev);
struct uart_port *port;
- struct resource *res_irq, *res_mem;
+ struct resource *res_mem;
int i = pdev->id;
+ int irq;
/* -1 emphasizes that the platform must have one port, no .N suffix */
if (i == -1)
@@ -438,9 +439,11 @@ static int altera_jtaguart_probe(struct platform_device *pdev)
else
return -ENODEV;
- res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (res_irq)
- port->irq = res_irq->start;
+ irq = platform_get_irq_optional(pdev, 0);
+ if (irq < 0 && irq != -ENXIO)
+ return irq;
+ if (irq > 0)
+ port->irq = irq;
else if (platp)
port->irq = platp->irq;
else
diff --git a/drivers/tty/serial/altera_uart.c b/drivers/tty/serial/altera_uart.c
index 7c5f4e966b59..64a352b40197 100644
--- a/drivers/tty/serial/altera_uart.c
+++ b/drivers/tty/serial/altera_uart.c
@@ -553,7 +553,6 @@ static int altera_uart_probe(struct platform_device *pdev)
struct altera_uart_platform_uart *platp = dev_get_platdata(&pdev->dev);
struct uart_port *port;
struct resource *res_mem;
- struct resource *res_irq;
int i = pdev->id;
int ret;
@@ -577,9 +576,11 @@ static int altera_uart_probe(struct platform_device *pdev)
else
return -EINVAL;
- res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (res_irq)
- port->irq = res_irq->start;
+ ret = platform_get_irq_optional(pdev, 0);
+ if (ret < 0 && ret != -ENXIO)
+ return ret;
+ if (ret > 0)
+ port->irq = ret;
else if (platp)
port->irq = platp->irq;
diff --git a/drivers/tty/serial/amba-pl010.c b/drivers/tty/serial/amba-pl010.c
index e744b953ca34..47654073123d 100644
--- a/drivers/tty/serial/amba-pl010.c
+++ b/drivers/tty/serial/amba-pl010.c
@@ -446,14 +446,11 @@ pl010_set_termios(struct uart_port *port, struct ktermios *termios,
if ((termios->c_cflag & CREAD) == 0)
uap->port.ignore_status_mask |= UART_DUMMY_RSR_RX;
- /* first, disable everything */
old_cr = readb(uap->port.membase + UART010_CR) & ~UART010_CR_MSIE;
if (UART_ENABLE_MS(port, termios->c_cflag))
old_cr |= UART010_CR_MSIE;
- writel(0, uap->port.membase + UART010_CR);
-
/* Set baud rate */
quot -= 1;
writel((quot & 0xf00) >> 8, uap->port.membase + UART010_LCRM);
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 52518a606c06..ba053a68529f 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -188,38 +188,6 @@ static struct vendor_data vendor_st = {
.get_fifosize = get_fifosize_st,
};
-static const u16 pl011_zte_offsets[REG_ARRAY_SIZE] = {
- [REG_DR] = ZX_UART011_DR,
- [REG_FR] = ZX_UART011_FR,
- [REG_LCRH_RX] = ZX_UART011_LCRH,
- [REG_LCRH_TX] = ZX_UART011_LCRH,
- [REG_IBRD] = ZX_UART011_IBRD,
- [REG_FBRD] = ZX_UART011_FBRD,
- [REG_CR] = ZX_UART011_CR,
- [REG_IFLS] = ZX_UART011_IFLS,
- [REG_IMSC] = ZX_UART011_IMSC,
- [REG_RIS] = ZX_UART011_RIS,
- [REG_MIS] = ZX_UART011_MIS,
- [REG_ICR] = ZX_UART011_ICR,
- [REG_DMACR] = ZX_UART011_DMACR,
-};
-
-static unsigned int get_fifosize_zte(struct amba_device *dev)
-{
- return 16;
-}
-
-static struct vendor_data vendor_zte = {
- .reg_offset = pl011_zte_offsets,
- .access_32b = true,
- .ifls = UART011_IFLS_RX4_8|UART011_IFLS_TX4_8,
- .fr_busy = ZX_UART01x_FR_BUSY,
- .fr_dsr = ZX_UART01x_FR_DSR,
- .fr_cts = ZX_UART01x_FR_CTS,
- .fr_ri = ZX_UART011_FR_RI,
- .get_fifosize = get_fifosize_zte,
-};
-
/* Deals with DMA transactions */
struct pl011_sgbuf {
@@ -262,7 +230,6 @@ struct uart_amba_port {
unsigned int im; /* interrupt mask */
unsigned int old_status;
unsigned int fifosize; /* vendor-specific */
- unsigned int old_cr; /* state during shutdown */
unsigned int fixed_baud; /* vendor-set fixed baud rate */
char type[12];
bool rs485_tx_started;
@@ -1615,9 +1582,6 @@ static void pl011_set_mctrl(struct uart_port *port, unsigned int mctrl)
container_of(port, struct uart_amba_port, port);
unsigned int cr;
- if (port->rs485.flags & SER_RS485_ENABLED)
- mctrl &= ~TIOCM_RTS;
-
cr = pl011_read(uap, REG_CR);
#define TIOCMBIT(tiocmbit, uartbit) \
@@ -1837,18 +1801,12 @@ static int pl011_startup(struct uart_port *port)
spin_lock_irq(&uap->port.lock);
- /* restore RTS and DTR */
- cr = uap->old_cr & (UART011_CR_RTS | UART011_CR_DTR);
+ cr = pl011_read(uap, REG_CR);
+ cr &= UART011_CR_RTS | UART011_CR_DTR;
cr |= UART01x_CR_UARTEN | UART011_CR_RXE;
- if (port->rs485.flags & SER_RS485_ENABLED) {
- if (port->rs485.flags & SER_RS485_RTS_AFTER_SEND)
- cr &= ~UART011_CR_RTS;
- else
- cr |= UART011_CR_RTS;
- } else {
+ if (!(port->rs485.flags & SER_RS485_ENABLED))
cr |= UART011_CR_TXE;
- }
pl011_write(cr, uap, REG_CR);
@@ -1915,7 +1873,6 @@ static void pl011_disable_uart(struct uart_amba_port *uap)
uap->port.status &= ~(UPSTAT_AUTOCTS | UPSTAT_AUTORTS);
spin_lock_irq(&uap->port.lock);
cr = pl011_read(uap, REG_CR);
- uap->old_cr = cr;
cr &= UART011_CR_RTS | UART011_CR_DTR;
cr |= UART01x_CR_UARTEN | UART011_CR_TXE;
pl011_write(cr, uap, REG_CR);
@@ -2105,9 +2062,7 @@ pl011_set_termios(struct uart_port *port, struct ktermios *termios,
if (port->rs485.flags & SER_RS485_ENABLED)
termios->c_cflag &= ~CRTSCTS;
- /* first, disable everything */
old_cr = pl011_read(uap, REG_CR);
- pl011_write(0, uap, REG_CR);
if (termios->c_cflag & CRTSCTS) {
if (old_cr & UART011_CR_RTS)
@@ -2184,31 +2139,12 @@ static const char *pl011_type(struct uart_port *port)
}
/*
- * Release the memory region(s) being used by 'port'
- */
-static void pl011_release_port(struct uart_port *port)
-{
- release_mem_region(port->mapbase, SZ_4K);
-}
-
-/*
- * Request the memory region(s) being used by 'port'
- */
-static int pl011_request_port(struct uart_port *port)
-{
- return request_mem_region(port->mapbase, SZ_4K, "uart-pl011")
- != NULL ? 0 : -EBUSY;
-}
-
-/*
* Configure/autoconfigure the port.
*/
static void pl011_config_port(struct uart_port *port, int flags)
{
- if (flags & UART_CONFIG_TYPE) {
+ if (flags & UART_CONFIG_TYPE)
port->type = PORT_AMBA;
- pl011_request_port(port);
- }
}
/*
@@ -2223,6 +2159,8 @@ static int pl011_verify_port(struct uart_port *port, struct serial_struct *ser)
ret = -EINVAL;
if (ser->baud_base < 9600)
ret = -EINVAL;
+ if (port->mapbase != (unsigned long) ser->iomem_base)
+ ret = -EINVAL;
return ret;
}
@@ -2275,8 +2213,6 @@ static const struct uart_ops amba_pl011_pops = {
.flush_buffer = pl011_dma_flush_buffer,
.set_termios = pl011_set_termios,
.type = pl011_type,
- .release_port = pl011_release_port,
- .request_port = pl011_request_port,
.config_port = pl011_config_port,
.verify_port = pl011_verify_port,
#ifdef CONFIG_CONSOLE_POLL
@@ -2306,8 +2242,6 @@ static const struct uart_ops sbsa_uart_pops = {
.shutdown = sbsa_uart_shutdown,
.set_termios = sbsa_uart_set_termios,
.type = pl011_type,
- .release_port = pl011_release_port,
- .request_port = pl011_request_port,
.config_port = pl011_config_port,
.verify_port = pl011_verify_port,
#ifdef CONFIG_CONSOLE_POLL
@@ -2754,7 +2688,6 @@ static int pl011_setup_port(struct device *dev, struct uart_amba_port *uap,
index = pl011_probe_dt_alias(index, dev);
- uap->old_cr = 0;
uap->port.dev = dev;
uap->port.mapbase = mmiobase->start;
uap->port.membase = base;
@@ -2975,11 +2908,6 @@ static const struct amba_id pl011_ids[] = {
.mask = 0x00ffffff,
.data = &vendor_st,
},
- {
- .id = AMBA_LINUX_ID(0x00, 0x1, 0xffe),
- .mask = 0x00ffffff,
- .data = &vendor_zte,
- },
{ 0, 0 },
};
diff --git a/drivers/tty/serial/ar933x_uart.c b/drivers/tty/serial/ar933x_uart.c
index 4379ca4842ae..8cabe50c4a33 100644
--- a/drivers/tty/serial/ar933x_uart.c
+++ b/drivers/tty/serial/ar933x_uart.c
@@ -707,11 +707,11 @@ static int ar933x_uart_probe(struct platform_device *pdev)
struct ar933x_uart_port *up;
struct uart_port *port;
struct resource *mem_res;
- struct resource *irq_res;
struct device_node *np;
unsigned int baud;
int id;
int ret;
+ int irq;
np = pdev->dev.of_node;
if (IS_ENABLED(CONFIG_OF) && np) {
@@ -730,11 +730,9 @@ static int ar933x_uart_probe(struct platform_device *pdev)
if (id >= CONFIG_SERIAL_AR933X_NR_UARTS)
return -EINVAL;
- irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!irq_res) {
- dev_err(&pdev->dev, "no IRQ resource\n");
- return -EINVAL;
- }
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
up = devm_kzalloc(&pdev->dev, sizeof(struct ar933x_uart_port),
GFP_KERNEL);
@@ -766,7 +764,7 @@ static int ar933x_uart_probe(struct platform_device *pdev)
port->mapbase = mem_res->start;
port->line = id;
- port->irq = irq_res->start;
+ port->irq = irq;
port->dev = &pdev->dev;
port->type = PORT_AR933X;
port->iotype = UPIO_MEM32;
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index 2c99a47a2535..c370eddc651b 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -1004,6 +1004,13 @@ static void atmel_tx_dma(struct uart_port *port)
desc->callback = atmel_complete_tx_dma;
desc->callback_param = atmel_port;
atmel_port->cookie_tx = dmaengine_submit(desc);
+ if (dma_submit_error(atmel_port->cookie_tx)) {
+ dev_err(port->dev, "dma_submit_error %d\n",
+ atmel_port->cookie_tx);
+ return;
+ }
+
+ dma_async_issue_pending(chan);
}
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
@@ -1258,6 +1265,13 @@ static int atmel_prepare_rx_dma(struct uart_port *port)
desc->callback_param = port;
atmel_port->desc_rx = desc;
atmel_port->cookie_rx = dmaengine_submit(desc);
+ if (dma_submit_error(atmel_port->cookie_rx)) {
+ dev_err(port->dev, "dma_submit_error %d\n",
+ atmel_port->cookie_rx);
+ goto chan_err;
+ }
+
+ dma_async_issue_pending(atmel_port->chan_rx);
return 0;
@@ -2479,7 +2493,7 @@ static int atmel_init_port(struct atmel_uart_port *atmel_port,
port->fifosize = 1;
port->dev = &pdev->dev;
port->mapbase = mpdev->resource[0].start;
- port->irq = mpdev->resource[1].start;
+ port->irq = platform_get_irq(mpdev, 0);
port->rs485_config = atmel_config_rs485;
port->iso7816_config = atmel_config_iso7816;
port->membase = NULL;
diff --git a/drivers/tty/serial/bcm63xx_uart.c b/drivers/tty/serial/bcm63xx_uart.c
index 5fb0e84f7fd1..6471a54b616b 100644
--- a/drivers/tty/serial/bcm63xx_uart.c
+++ b/drivers/tty/serial/bcm63xx_uart.c
@@ -804,7 +804,7 @@ static struct uart_driver bcm_uart_driver = {
*/
static int bcm_uart_probe(struct platform_device *pdev)
{
- struct resource *res_mem, *res_irq;
+ struct resource *res_mem;
struct uart_port *port;
struct clk *clk;
int ret;
@@ -833,9 +833,10 @@ static int bcm_uart_probe(struct platform_device *pdev)
if (IS_ERR(port->membase))
return PTR_ERR(port->membase);
- res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!res_irq)
- return -ENODEV;
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0)
+ return ret;
+ port->irq = ret;
clk = clk_get(&pdev->dev, "refclk");
if (IS_ERR(clk) && pdev->dev.of_node)
@@ -845,7 +846,6 @@ static int bcm_uart_probe(struct platform_device *pdev)
return -ENODEV;
port->iotype = UPIO_MEM;
- port->irq = res_irq->start;
port->ops = &bcm_uart_ops;
port->flags = UPF_BOOT_AUTOCONF;
port->dev = &pdev->dev;
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index ac5112def40d..ce3e26144689 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -247,6 +247,7 @@ enum lpuart_type {
LS1028A_LPUART,
IMX7ULP_LPUART,
IMX8QXP_LPUART,
+ IMXRT1050_LPUART,
};
struct lpuart_port {
@@ -310,6 +311,11 @@ static struct lpuart_soc_data imx8qxp_data = {
.iotype = UPIO_MEM32,
.reg_off = IMX_REG_OFF,
};
+static struct lpuart_soc_data imxrt1050_data = {
+ .devtype = IMXRT1050_LPUART,
+ .iotype = UPIO_MEM32,
+ .reg_off = IMX_REG_OFF,
+};
static const struct of_device_id lpuart_dt_ids[] = {
{ .compatible = "fsl,vf610-lpuart", .data = &vf_data, },
@@ -317,6 +323,7 @@ static const struct of_device_id lpuart_dt_ids[] = {
{ .compatible = "fsl,ls1028a-lpuart", .data = &ls1028a_data, },
{ .compatible = "fsl,imx7ulp-lpuart", .data = &imx7ulp_data, },
{ .compatible = "fsl,imx8qxp-lpuart", .data = &imx8qxp_data, },
+ { .compatible = "fsl,imxrt1050-lpuart", .data = &imxrt1050_data},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, lpuart_dt_ids);
@@ -1793,8 +1800,8 @@ static void lpuart_dma_shutdown(struct lpuart_port *sport)
}
if (sport->lpuart_dma_tx_use) {
- if (wait_event_interruptible(sport->dma_wait,
- !sport->dma_tx_in_progress) != false) {
+ if (wait_event_interruptible_timeout(sport->dma_wait,
+ !sport->dma_tx_in_progress, msecs_to_jiffies(300)) <= 0) {
sport->dma_tx_in_progress = false;
dmaengine_terminate_all(sport->dma_tx_chan);
}
@@ -2626,6 +2633,7 @@ OF_EARLYCON_DECLARE(lpuart32, "fsl,ls1021a-lpuart", lpuart32_early_console_setup
OF_EARLYCON_DECLARE(lpuart32, "fsl,ls1028a-lpuart", ls1028a_early_console_setup);
OF_EARLYCON_DECLARE(lpuart32, "fsl,imx7ulp-lpuart", lpuart32_imx_early_console_setup);
OF_EARLYCON_DECLARE(lpuart32, "fsl,imx8qxp-lpuart", lpuart32_imx_early_console_setup);
+OF_EARLYCON_DECLARE(lpuart32, "fsl,imxrt1050-lpuart", lpuart32_imx_early_console_setup);
EARLYCON_DECLARE(lpuart, lpuart_early_console_setup);
EARLYCON_DECLARE(lpuart32, lpuart32_early_console_setup);
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 90f82e6c54e4..df8a0c8b8b29 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -486,18 +486,21 @@ static void imx_uart_stop_tx(struct uart_port *port)
static void imx_uart_stop_rx(struct uart_port *port)
{
struct imx_port *sport = (struct imx_port *)port;
- u32 ucr1, ucr2;
+ u32 ucr1, ucr2, ucr4;
ucr1 = imx_uart_readl(sport, UCR1);
ucr2 = imx_uart_readl(sport, UCR2);
+ ucr4 = imx_uart_readl(sport, UCR4);
if (sport->dma_is_enabled) {
ucr1 &= ~(UCR1_RXDMAEN | UCR1_ATDMAEN);
} else {
ucr1 &= ~UCR1_RRDYEN;
ucr2 &= ~UCR2_ATEN;
+ ucr4 &= ~UCR4_OREN;
}
imx_uart_writel(sport, ucr1, UCR1);
+ imx_uart_writel(sport, ucr4, UCR4);
ucr2 &= ~UCR2_RXEN;
imx_uart_writel(sport, ucr2, UCR2);
@@ -1544,7 +1547,7 @@ static void imx_uart_shutdown(struct uart_port *port)
imx_uart_writel(sport, ucr1, UCR1);
ucr4 = imx_uart_readl(sport, UCR4);
- ucr4 &= ~(UCR4_OREN | UCR4_TCEN);
+ ucr4 &= ~UCR4_TCEN;
imx_uart_writel(sport, ucr4, UCR4);
spin_unlock_irqrestore(&sport->port.lock, flags);
@@ -2482,10 +2485,12 @@ static void imx_uart_enable_wakeup(struct imx_port *sport, bool on)
if (sport->have_rtscts) {
u32 ucr1 = imx_uart_readl(sport, UCR1);
- if (on)
+ if (on) {
+ imx_uart_writel(sport, USR1_RTSD, USR1);
ucr1 |= UCR1_RTSDEN;
- else
+ } else {
ucr1 &= ~UCR1_RTSDEN;
+ }
imx_uart_writel(sport, ucr1, UCR1);
}
}
diff --git a/drivers/tty/serial/lantiq.c b/drivers/tty/serial/lantiq.c
index 497b334bc845..3e324d3f0a6d 100644
--- a/drivers/tty/serial/lantiq.c
+++ b/drivers/tty/serial/lantiq.c
@@ -16,8 +16,6 @@
#include <linux/ioport.h>
#include <linux/lantiq.h>
#include <linux/module.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/serial.h>
#include <linux/serial_core.h>
@@ -728,19 +726,23 @@ static struct uart_driver lqasc_reg = {
static int fetch_irq_lantiq(struct device *dev, struct ltq_uart_port *ltq_port)
{
struct uart_port *port = &ltq_port->port;
- struct resource irqres[3];
- int ret;
-
- ret = of_irq_to_resource_table(dev->of_node, irqres, 3);
- if (ret != 3) {
- dev_err(dev,
- "failed to get IRQs for serial port\n");
- return -ENODEV;
- }
- ltq_port->tx_irq = irqres[0].start;
- ltq_port->rx_irq = irqres[1].start;
- ltq_port->err_irq = irqres[2].start;
- port->irq = irqres[0].start;
+ struct platform_device *pdev = to_platform_device(dev);
+ int irq;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+ ltq_port->tx_irq = irq;
+ irq = platform_get_irq(pdev, 1);
+ if (irq < 0)
+ return irq;
+ ltq_port->rx_irq = irq;
+ irq = platform_get_irq(pdev, 2);
+ if (irq < 0)
+ return irq;
+ ltq_port->err_irq = irq;
+
+ port->irq = ltq_port->tx_irq;
return 0;
}
@@ -793,7 +795,7 @@ static int fetch_irq_intel(struct device *dev, struct ltq_uart_port *ltq_port)
struct uart_port *port = &ltq_port->port;
int ret;
- ret = of_irq_get(dev->of_node, 0);
+ ret = platform_get_irq(to_platform_device(dev), 0);
if (ret < 0) {
dev_err(dev, "failed to fetch IRQ for serial port\n");
return ret;
diff --git a/drivers/tty/serial/liteuart.c b/drivers/tty/serial/liteuart.c
index 2941659e5274..7f74bf7bdcff 100644
--- a/drivers/tty/serial/liteuart.c
+++ b/drivers/tty/serial/liteuart.c
@@ -436,4 +436,4 @@ module_exit(liteuart_exit);
MODULE_AUTHOR("Antmicro <www.antmicro.com>");
MODULE_DESCRIPTION("LiteUART serial driver");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform: liteuart");
+MODULE_ALIAS("platform:liteuart");
diff --git a/drivers/tty/serial/lpc32xx_hs.c b/drivers/tty/serial/lpc32xx_hs.c
index b199d7859961..07c4161eb4cc 100644
--- a/drivers/tty/serial/lpc32xx_hs.c
+++ b/drivers/tty/serial/lpc32xx_hs.c
@@ -341,7 +341,7 @@ static irqreturn_t serial_lpc32xx_interrupt(int irq, void *dev_id)
LPC32XX_HSUART_IIR(port->membase));
port->icount.overrun++;
tty_insert_flip_char(tport, 0, TTY_OVERRUN);
- tty_schedule_flip(tport);
+ tty_flip_buffer_push(tport);
}
/* Data received? */
diff --git a/drivers/tty/serial/meson_uart.c b/drivers/tty/serial/meson_uart.c
index efee3935917f..45e00d928253 100644
--- a/drivers/tty/serial/meson_uart.c
+++ b/drivers/tty/serial/meson_uart.c
@@ -622,10 +622,7 @@ meson_serial_early_console_setup(struct earlycon_device *device, const char *opt
device->con->write = meson_serial_early_console_write;
return 0;
}
-/* Legacy bindings, should be removed when no more used */
-OF_EARLYCON_DECLARE(meson, "amlogic,meson-uart",
- meson_serial_early_console_setup);
-/* Stable bindings */
+
OF_EARLYCON_DECLARE(meson, "amlogic,meson-ao-uart",
meson_serial_early_console_setup);
@@ -668,25 +665,6 @@ static inline struct clk *meson_uart_probe_clock(struct device *dev,
return clk;
}
-/*
- * This function gets clocks in the legacy non-stable DT bindings.
- * This code will be remove once all the platforms switch to the
- * new DT bindings.
- */
-static int meson_uart_probe_clocks_legacy(struct platform_device *pdev,
- struct uart_port *port)
-{
- struct clk *clk = NULL;
-
- clk = meson_uart_probe_clock(&pdev->dev, NULL);
- if (IS_ERR(clk))
- return PTR_ERR(clk);
-
- port->uartclk = clk_get_rate(clk);
-
- return 0;
-}
-
static int meson_uart_probe_clocks(struct platform_device *pdev,
struct uart_port *port)
{
@@ -713,10 +691,11 @@ static int meson_uart_probe_clocks(struct platform_device *pdev,
static int meson_uart_probe(struct platform_device *pdev)
{
- struct resource *res_mem, *res_irq;
+ struct resource *res_mem;
struct uart_port *port;
u32 fifosize = 64; /* Default is 64, 128 for EE UART_0 */
int ret = 0;
+ int irq;
if (pdev->dev.of_node)
pdev->id = of_alias_get_id(pdev->dev.of_node, "serial");
@@ -739,9 +718,9 @@ static int meson_uart_probe(struct platform_device *pdev)
if (!res_mem)
return -ENODEV;
- res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!res_irq)
- return -ENODEV;
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
of_property_read_u32(pdev->dev.of_node, "fifo-size", &fifosize);
@@ -754,19 +733,14 @@ static int meson_uart_probe(struct platform_device *pdev)
if (!port)
return -ENOMEM;
- /* Use legacy way until all platforms switch to new bindings */
- if (of_device_is_compatible(pdev->dev.of_node, "amlogic,meson-uart"))
- ret = meson_uart_probe_clocks_legacy(pdev, port);
- else
- ret = meson_uart_probe_clocks(pdev, port);
-
+ ret = meson_uart_probe_clocks(pdev, port);
if (ret)
return ret;
port->iotype = UPIO_MEM;
port->mapbase = res_mem->start;
port->mapsize = resource_size(res_mem);
- port->irq = res_irq->start;
+ port->irq = irq;
port->flags = UPF_BOOT_AUTOCONF | UPF_LOW_LATENCY;
port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_MESON_CONSOLE);
port->dev = &pdev->dev;
@@ -804,9 +778,6 @@ static int meson_uart_remove(struct platform_device *pdev)
}
static const struct of_device_id meson_uart_dt_match[] = {
- /* Legacy bindings, should be removed when no more used */
- { .compatible = "amlogic,meson-uart" },
- /* Stable bindings */
{ .compatible = "amlogic,meson6-uart" },
{ .compatible = "amlogic,meson8-uart" },
{ .compatible = "amlogic,meson8b-uart" },
diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
index 489d19274f9a..23c94b927776 100644
--- a/drivers/tty/serial/msm_serial.c
+++ b/drivers/tty/serial/msm_serial.c
@@ -9,6 +9,7 @@
#include <linux/kernel.h>
#include <linux/atomic.h>
+#include <linux/dma/qcom_adm.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
#include <linux/module.h>
@@ -290,6 +291,7 @@ static void msm_request_tx_dma(struct msm_port *msm_port, resource_size_t base)
{
struct device *dev = msm_port->uart.dev;
struct dma_slave_config conf;
+ struct qcom_adm_peripheral_config periph_conf = {};
struct msm_dma *dma;
u32 crci = 0;
int ret;
@@ -308,7 +310,11 @@ static void msm_request_tx_dma(struct msm_port *msm_port, resource_size_t base)
conf.device_fc = true;
conf.dst_addr = base + UARTDM_TF;
conf.dst_maxburst = UARTDM_BURST_SIZE;
- conf.slave_id = crci;
+ if (crci) {
+ conf.peripheral_config = &periph_conf;
+ conf.peripheral_size = sizeof(periph_conf);
+ periph_conf.crci = crci;
+ }
ret = dmaengine_slave_config(dma->chan, &conf);
if (ret)
@@ -333,6 +339,7 @@ static void msm_request_rx_dma(struct msm_port *msm_port, resource_size_t base)
{
struct device *dev = msm_port->uart.dev;
struct dma_slave_config conf;
+ struct qcom_adm_peripheral_config periph_conf = {};
struct msm_dma *dma;
u32 crci = 0;
int ret;
@@ -355,7 +362,11 @@ static void msm_request_rx_dma(struct msm_port *msm_port, resource_size_t base)
conf.device_fc = true;
conf.src_addr = base + UARTDM_RF;
conf.src_maxburst = UARTDM_BURST_SIZE;
- conf.slave_id = crci;
+ if (crci) {
+ conf.peripheral_config = &periph_conf;
+ conf.peripheral_size = sizeof(periph_conf);
+ periph_conf.crci = crci;
+ }
ret = dmaengine_slave_config(dma->chan, &conf);
if (ret)
diff --git a/drivers/tty/serial/pmac_zilog.c b/drivers/tty/serial/pmac_zilog.c
index 12ce150b0ad4..5359236b32d6 100644
--- a/drivers/tty/serial/pmac_zilog.c
+++ b/drivers/tty/serial/pmac_zilog.c
@@ -1702,17 +1702,21 @@ extern struct platform_device scc_a_pdev, scc_b_pdev;
static int __init pmz_init_port(struct uart_pmac_port *uap)
{
- struct resource *r_ports, *r_irq;
+ struct resource *r_ports;
+ int irq;
r_ports = platform_get_resource(uap->pdev, IORESOURCE_MEM, 0);
- r_irq = platform_get_resource(uap->pdev, IORESOURCE_IRQ, 0);
- if (!r_ports || !r_irq)
+ if (!r_ports)
return -ENODEV;
+ irq = platform_get_irq(uap->pdev, 0);
+ if (irq < 0)
+ return irq;
+
uap->port.mapbase = r_ports->start;
uap->port.membase = (unsigned char __iomem *) r_ports->start;
uap->port.iotype = UPIO_MEM;
- uap->port.irq = r_irq->start;
+ uap->port.irq = irq;
uap->port.uartclk = ZS_CLOCK;
uap->port.fifosize = 1;
uap->port.ops = &pmz_pops;
diff --git a/drivers/tty/serial/pxa.c b/drivers/tty/serial/pxa.c
index 41319ef96fa6..30b099746a75 100644
--- a/drivers/tty/serial/pxa.c
+++ b/drivers/tty/serial/pxa.c
@@ -842,14 +842,18 @@ static int serial_pxa_probe_dt(struct platform_device *pdev,
static int serial_pxa_probe(struct platform_device *dev)
{
struct uart_pxa_port *sport;
- struct resource *mmres, *irqres;
+ struct resource *mmres;
int ret;
+ int irq;
mmres = platform_get_resource(dev, IORESOURCE_MEM, 0);
- irqres = platform_get_resource(dev, IORESOURCE_IRQ, 0);
- if (!mmres || !irqres)
+ if (!mmres)
return -ENODEV;
+ irq = platform_get_irq(dev, 0);
+ if (irq < 0)
+ return irq;
+
sport = kzalloc(sizeof(struct uart_pxa_port), GFP_KERNEL);
if (!sport)
return -ENOMEM;
@@ -869,7 +873,7 @@ static int serial_pxa_probe(struct platform_device *dev)
sport->port.type = PORT_PXA;
sport->port.iotype = UPIO_MEM;
sport->port.mapbase = mmres->start;
- sport->port.irq = irqres->start;
+ sport->port.irq = irq;
sport->port.fifosize = 64;
sport->port.ops = &serial_pxa_pops;
sport->port.dev = &dev->dev;
diff --git a/drivers/tty/serial/samsung_tty.c b/drivers/tty/serial/samsung_tty.c
index ca084c10d0bb..d002a4e48ed9 100644
--- a/drivers/tty/serial/samsung_tty.c
+++ b/drivers/tty/serial/samsung_tty.c
@@ -65,7 +65,6 @@ enum s3c24xx_port_type {
struct s3c24xx_uart_info {
char *name;
enum s3c24xx_port_type type;
- bool has_usi;
unsigned int port_type;
unsigned int fifosize;
unsigned long rx_fifomask;
@@ -1357,28 +1356,6 @@ static int apple_s5l_serial_startup(struct uart_port *port)
return ret;
}
-static void exynos_usi_init(struct uart_port *port)
-{
- struct s3c24xx_uart_port *ourport = to_ourport(port);
- struct s3c24xx_uart_info *info = ourport->info;
- unsigned int val;
-
- if (!info->has_usi)
- return;
-
- /* Clear the software reset of USI block (it's set at startup) */
- val = rd_regl(port, USI_CON);
- val &= ~USI_CON_RESET_MASK;
- wr_regl(port, USI_CON, val);
- udelay(1);
-
- /* Continuously provide the clock to USI IP w/o gating (for Rx mode) */
- val = rd_regl(port, USI_OPTION);
- val &= ~USI_OPTION_HWACG_MASK;
- val |= USI_OPTION_HWACG_CLKREQ_ON;
- wr_regl(port, USI_OPTION, val);
-}
-
/* power power management control */
static void s3c24xx_serial_pm(struct uart_port *port, unsigned int level,
@@ -1405,8 +1382,6 @@ static void s3c24xx_serial_pm(struct uart_port *port, unsigned int level,
if (!IS_ERR(ourport->baudclk))
clk_prepare_enable(ourport->baudclk);
-
- exynos_usi_init(port);
break;
default:
dev_err(port->dev, "s3c24xx_serial: unknown pm %d\n", level);
@@ -1740,15 +1715,21 @@ s3c24xx_serial_verify_port(struct uart_port *port, struct serial_struct *ser)
static struct console s3c24xx_serial_console;
-static int __init s3c24xx_serial_console_init(void)
+static void __init s3c24xx_serial_register_console(void)
{
register_console(&s3c24xx_serial_console);
- return 0;
}
-console_initcall(s3c24xx_serial_console_init);
+
+static void s3c24xx_serial_unregister_console(void)
+{
+ if (s3c24xx_serial_console.flags & CON_ENABLED)
+ unregister_console(&s3c24xx_serial_console);
+}
#define S3C24XX_SERIAL_CONSOLE &s3c24xx_serial_console
#else
+static inline void s3c24xx_serial_register_console(void) { }
+static inline void s3c24xx_serial_unregister_console(void) { }
#define S3C24XX_SERIAL_CONSOLE NULL
#endif
@@ -2130,8 +2111,6 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
if (ret)
pr_warn("uart: failed to enable baudclk\n");
- exynos_usi_init(port);
-
/* Keep all interrupts masked and cleared */
switch (ourport->info->type) {
case TYPE_S3C6400:
@@ -2521,7 +2500,8 @@ s3c24xx_serial_console_write(struct console *co, const char *s,
uart_console_write(cons_uart, s, count, s3c24xx_serial_console_putchar);
}
-static void __init
+/* Shouldn't be __init, as it can be instantiated from other module */
+static void
s3c24xx_serial_get_options(struct uart_port *port, int *baud,
int *parity, int *bits)
{
@@ -2584,7 +2564,8 @@ s3c24xx_serial_get_options(struct uart_port *port, int *baud,
}
}
-static int __init
+/* Shouldn't be __init, as it can be instantiated from other module */
+static int
s3c24xx_serial_console_setup(struct console *co, char *options)
{
struct uart_port *port;
@@ -2780,11 +2761,10 @@ static struct s3c24xx_serial_drv_data s5pv210_serial_drv_data = {
#endif
#if defined(CONFIG_ARCH_EXYNOS)
-#define EXYNOS_COMMON_SERIAL_DRV_DATA(_has_usi) \
+#define EXYNOS_COMMON_SERIAL_DRV_DATA() \
.info = &(struct s3c24xx_uart_info) { \
.name = "Samsung Exynos UART", \
.type = TYPE_S3C6400, \
- .has_usi = _has_usi, \
.port_type = PORT_S3C6400, \
.has_divslot = 1, \
.rx_fifomask = S5PV210_UFSTAT_RXMASK, \
@@ -2805,17 +2785,17 @@ static struct s3c24xx_serial_drv_data s5pv210_serial_drv_data = {
} \
static struct s3c24xx_serial_drv_data exynos4210_serial_drv_data = {
- EXYNOS_COMMON_SERIAL_DRV_DATA(false),
+ EXYNOS_COMMON_SERIAL_DRV_DATA(),
.fifosize = { 256, 64, 16, 16 },
};
static struct s3c24xx_serial_drv_data exynos5433_serial_drv_data = {
- EXYNOS_COMMON_SERIAL_DRV_DATA(false),
+ EXYNOS_COMMON_SERIAL_DRV_DATA(),
.fifosize = { 64, 256, 16, 256 },
};
static struct s3c24xx_serial_drv_data exynos850_serial_drv_data = {
- EXYNOS_COMMON_SERIAL_DRV_DATA(true),
+ EXYNOS_COMMON_SERIAL_DRV_DATA(),
.fifosize = { 256, 64, 64, 64 },
};
@@ -2926,7 +2906,29 @@ static struct platform_driver samsung_serial_driver = {
},
};
-module_platform_driver(samsung_serial_driver);
+static int __init samsung_serial_init(void)
+{
+ int ret;
+
+ s3c24xx_serial_register_console();
+
+ ret = platform_driver_register(&samsung_serial_driver);
+ if (ret) {
+ s3c24xx_serial_unregister_console();
+ return ret;
+ }
+
+ return 0;
+}
+
+static void __exit samsung_serial_exit(void)
+{
+ platform_driver_unregister(&samsung_serial_driver);
+ s3c24xx_serial_unregister_console();
+}
+
+module_init(samsung_serial_init);
+module_exit(samsung_serial_exit);
#ifdef CONFIG_SERIAL_SAMSUNG_CONSOLE
/*
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 61e3dd0222af..0db90be4c3bc 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -144,6 +144,11 @@ uart_update_mctrl(struct uart_port *port, unsigned int set, unsigned int clear)
unsigned long flags;
unsigned int old;
+ if (port->rs485.flags & SER_RS485_ENABLED) {
+ set &= ~TIOCM_RTS;
+ clear &= ~TIOCM_RTS;
+ }
+
spin_lock_irqsave(&port->lock, flags);
old = port->mctrl;
port->mctrl = (old & ~clear) | set;
@@ -157,23 +162,10 @@ uart_update_mctrl(struct uart_port *port, unsigned int set, unsigned int clear)
static void uart_port_dtr_rts(struct uart_port *uport, int raise)
{
- int rs485_on = uport->rs485_config &&
- (uport->rs485.flags & SER_RS485_ENABLED);
- int RTS_after_send = !!(uport->rs485.flags & SER_RS485_RTS_AFTER_SEND);
-
- if (raise) {
- if (rs485_on && !RTS_after_send) {
- uart_set_mctrl(uport, TIOCM_DTR);
- uart_clear_mctrl(uport, TIOCM_RTS);
- } else {
- uart_set_mctrl(uport, TIOCM_DTR | TIOCM_RTS);
- }
- } else {
- unsigned int clear = TIOCM_DTR;
-
- clear |= (!rs485_on || !RTS_after_send) ? TIOCM_RTS : 0;
- uart_clear_mctrl(uport, clear);
- }
+ if (raise)
+ uart_set_mctrl(uport, TIOCM_DTR | TIOCM_RTS);
+ else
+ uart_clear_mctrl(uport, TIOCM_DTR | TIOCM_RTS);
}
/*
@@ -1075,11 +1067,6 @@ uart_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear)
goto out;
if (!tty_io_error(tty)) {
- if (uport->rs485.flags & SER_RS485_ENABLED) {
- set &= ~TIOCM_RTS;
- clear &= ~TIOCM_RTS;
- }
-
uart_update_mctrl(uport, set, clear);
ret = 0;
}
@@ -1701,17 +1688,13 @@ static void uart_port_shutdown(struct tty_port *port)
*/
wake_up_interruptible(&port->delta_msr_wait);
- /*
- * Free the IRQ and disable the port.
- */
- if (uport)
+ if (uport) {
+ /* Free the IRQ and disable the port. */
uport->ops->shutdown(uport);
- /*
- * Ensure that the IRQ handler isn't running on another CPU.
- */
- if (uport)
+ /* Ensure that the IRQ handler isn't running on another CPU. */
synchronize_irq(uport->irq);
+ }
}
static int uart_carrier_raised(struct tty_port *port)
@@ -2393,7 +2376,11 @@ uart_configure_port(struct uart_driver *drv, struct uart_state *state,
* We probably don't need a spinlock around this, but
*/
spin_lock_irqsave(&port->lock, flags);
- port->ops->set_mctrl(port, port->mctrl & TIOCM_DTR);
+ port->mctrl &= TIOCM_DTR;
+ if (port->rs485.flags & SER_RS485_ENABLED &&
+ !(port->rs485.flags & SER_RS485_RTS_AFTER_SEND))
+ port->mctrl |= TIOCM_RTS;
+ port->ops->set_mctrl(port, port->mctrl);
spin_unlock_irqrestore(&port->lock, flags);
/*
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 89ee43061d3a..968967d722d4 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -37,6 +37,7 @@
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/reset.h>
#include <linux/scatterlist.h>
#include <linux/serial.h>
#include <linux/serial_sci.h>
@@ -895,11 +896,9 @@ static void sci_receive_chars(struct uart_port *port)
if (status & SCxSR_FER(port)) {
flag = TTY_FRAME;
port->icount.frame++;
- dev_notice(port->dev, "frame error\n");
} else if (status & SCxSR_PER(port)) {
flag = TTY_PARITY;
port->icount.parity++;
- dev_notice(port->dev, "parity error\n");
} else
flag = TTY_NORMAL;
@@ -939,8 +938,6 @@ static int sci_handle_errors(struct uart_port *port)
/* overrun error */
if (tty_insert_flip_char(tport, 0, TTY_OVERRUN))
copied++;
-
- dev_notice(port->dev, "overrun error\n");
}
if (status & SCxSR_FER(port)) {
@@ -949,8 +946,6 @@ static int sci_handle_errors(struct uart_port *port)
if (tty_insert_flip_char(tport, 0, TTY_FRAME))
copied++;
-
- dev_notice(port->dev, "frame error\n");
}
if (status & SCxSR_PER(port)) {
@@ -959,8 +954,6 @@ static int sci_handle_errors(struct uart_port *port)
if (tty_insert_flip_char(tport, 0, TTY_PARITY))
copied++;
-
- dev_notice(port->dev, "parity error\n");
}
if (copied)
@@ -990,8 +983,6 @@ static int sci_handle_fifo_overrun(struct uart_port *port)
tty_insert_flip_char(tport, 0, TTY_OVERRUN);
tty_flip_buffer_push(tport);
-
- dev_dbg(port->dev, "overrun error\n");
copied++;
}
@@ -1013,8 +1004,6 @@ static int sci_handle_breaks(struct uart_port *port)
/* Notify of BREAK */
if (tty_insert_flip_char(tport, 0, TTY_BREAK))
copied++;
-
- dev_dbg(port->dev, "BREAK detected\n");
}
if (copied)
@@ -2778,44 +2767,29 @@ static int sci_init_clocks(struct sci_port *sci_port, struct device *dev)
clk_names[SCI_SCK] = "hsck";
for (i = 0; i < SCI_NUM_CLKS; i++) {
- clk = devm_clk_get(dev, clk_names[i]);
- if (PTR_ERR(clk) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
-
- if (IS_ERR(clk) && i == SCI_FCK) {
- /*
- * "fck" used to be called "sci_ick", and we need to
- * maintain DT backward compatibility.
- */
- clk = devm_clk_get(dev, "sci_ick");
- if (PTR_ERR(clk) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
-
- if (!IS_ERR(clk))
- goto found;
+ clk = devm_clk_get_optional(dev, clk_names[i]);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+ if (!clk && i == SCI_FCK) {
/*
* Not all SH platforms declare a clock lookup entry
* for SCI devices, in which case we need to get the
* global "peripheral_clk" clock.
*/
clk = devm_clk_get(dev, "peripheral_clk");
- if (!IS_ERR(clk))
- goto found;
-
- dev_err(dev, "failed to get %s (%ld)\n", clk_names[i],
- PTR_ERR(clk));
- return PTR_ERR(clk);
+ if (IS_ERR(clk))
+ return dev_err_probe(dev, PTR_ERR(clk),
+ "failed to get %s\n",
+ clk_names[i]);
}
-found:
- if (IS_ERR(clk))
- dev_dbg(dev, "failed to get %s (%ld)\n", clk_names[i],
- PTR_ERR(clk));
+ if (!clk)
+ dev_dbg(dev, "failed to get %s\n", clk_names[i]);
else
dev_dbg(dev, "clk %s is %pC rate %lu\n", clk_names[i],
clk, clk_get_rate(clk));
- sci_port->clks[i] = IS_ERR(clk) ? NULL : clk;
+ sci_port->clks[i] = clk;
}
return 0;
}
@@ -3180,6 +3154,9 @@ static const struct of_device_id of_sci_match[] = {
}, {
.compatible = "renesas,rcar-gen3-scif",
.data = SCI_OF_DATA(PORT_SCIF, SCIx_SH4_SCIF_BRG_REGTYPE),
+ }, {
+ .compatible = "renesas,rcar-gen4-scif",
+ .data = SCI_OF_DATA(PORT_SCIF, SCIx_SH4_SCIF_BRG_REGTYPE),
},
/* Generic types */
{
@@ -3203,23 +3180,47 @@ static const struct of_device_id of_sci_match[] = {
};
MODULE_DEVICE_TABLE(of, of_sci_match);
+static void sci_reset_control_assert(void *data)
+{
+ reset_control_assert(data);
+}
+
static struct plat_sci_port *sci_parse_dt(struct platform_device *pdev,
unsigned int *dev_id)
{
struct device_node *np = pdev->dev.of_node;
+ struct reset_control *rstc;
struct plat_sci_port *p;
struct sci_port *sp;
const void *data;
- int id;
+ int id, ret;
if (!IS_ENABLED(CONFIG_OF) || !np)
- return NULL;
+ return ERR_PTR(-EINVAL);
data = of_device_get_match_data(&pdev->dev);
+ rstc = devm_reset_control_get_optional_exclusive(&pdev->dev, NULL);
+ if (IS_ERR(rstc))
+ return ERR_PTR(dev_err_probe(&pdev->dev, PTR_ERR(rstc),
+ "failed to get reset ctrl\n"));
+
+ ret = reset_control_deassert(rstc);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to deassert reset %d\n", ret);
+ return ERR_PTR(ret);
+ }
+
+ ret = devm_add_action_or_reset(&pdev->dev, sci_reset_control_assert, rstc);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register assert devm action, %d\n",
+ ret);
+ return ERR_PTR(ret);
+ }
+
p = devm_kzalloc(&pdev->dev, sizeof(struct plat_sci_port), GFP_KERNEL);
if (!p)
- return NULL;
+ return ERR_PTR(-ENOMEM);
/* Get the line number from the aliases node. */
id = of_alias_get_id(np, "serial");
@@ -3227,11 +3228,11 @@ static struct plat_sci_port *sci_parse_dt(struct platform_device *pdev,
id = ffz(sci_ports_in_use);
if (id < 0) {
dev_err(&pdev->dev, "failed to get alias id (%d)\n", id);
- return NULL;
+ return ERR_PTR(-EINVAL);
}
if (id >= ARRAY_SIZE(sci_ports)) {
dev_err(&pdev->dev, "serial%d out of range\n", id);
- return NULL;
+ return ERR_PTR(-EINVAL);
}
sp = &sci_ports[id];
@@ -3318,8 +3319,8 @@ static int sci_probe(struct platform_device *dev)
if (dev->dev.of_node) {
p = sci_parse_dt(dev, &dev_id);
- if (p == NULL)
- return -EINVAL;
+ if (IS_ERR(p))
+ return PTR_ERR(p);
} else {
p = dev->dev.platform_data;
if (p == NULL) {
diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
index 3244e7f6818c..9570002d07e7 100644
--- a/drivers/tty/serial/stm32-usart.c
+++ b/drivers/tty/serial/stm32-usart.c
@@ -365,6 +365,31 @@ static unsigned int stm32_usart_receive_chars(struct uart_port *port, bool force
return size;
}
+static void stm32_usart_tx_dma_terminate(struct stm32_port *stm32_port)
+{
+ dmaengine_terminate_async(stm32_port->tx_ch);
+ stm32_port->tx_dma_busy = false;
+}
+
+static bool stm32_usart_tx_dma_started(struct stm32_port *stm32_port)
+{
+ /*
+ * We cannot use the function "dmaengine_tx_status" to know the
+ * status of DMA. This function does not show if the "dma complete"
+ * callback of the DMA transaction has been called. So we prefer
+ * to use "tx_dma_busy" flag to prevent dual DMA transaction at the
+ * same time.
+ */
+ return stm32_port->tx_dma_busy;
+}
+
+static bool stm32_usart_tx_dma_enabled(struct stm32_port *stm32_port)
+{
+ const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+
+ return !!(readl_relaxed(stm32_port->port.membase + ofs->cr3) & USART_CR3_DMAT);
+}
+
static void stm32_usart_tx_dma_complete(void *arg)
{
struct uart_port *port = arg;
@@ -372,9 +397,8 @@ static void stm32_usart_tx_dma_complete(void *arg)
const struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
unsigned long flags;
- dmaengine_terminate_async(stm32port->tx_ch);
stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
- stm32port->tx_dma_busy = false;
+ stm32_usart_tx_dma_terminate(stm32port);
/* Let's see if we have pending data to send */
spin_lock_irqsave(&port->lock, flags);
@@ -428,10 +452,8 @@ static void stm32_usart_transmit_chars_pio(struct uart_port *port)
const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
struct circ_buf *xmit = &port->state->xmit;
- if (stm32_port->tx_dma_busy) {
+ if (stm32_usart_tx_dma_enabled(stm32_port))
stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
- stm32_port->tx_dma_busy = false;
- }
while (!uart_circ_empty(xmit)) {
/* Check that TDR is empty before filling FIFO */
@@ -455,12 +477,13 @@ static void stm32_usart_transmit_chars_dma(struct uart_port *port)
const struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
struct circ_buf *xmit = &port->state->xmit;
struct dma_async_tx_descriptor *desc = NULL;
- unsigned int count, i;
+ unsigned int count;
- if (stm32port->tx_dma_busy)
+ if (stm32_usart_tx_dma_started(stm32port)) {
+ if (!stm32_usart_tx_dma_enabled(stm32port))
+ stm32_usart_set_bits(port, ofs->cr3, USART_CR3_DMAT);
return;
-
- stm32port->tx_dma_busy = true;
+ }
count = uart_circ_chars_pending(xmit);
@@ -491,13 +514,21 @@ static void stm32_usart_transmit_chars_dma(struct uart_port *port)
if (!desc)
goto fallback_err;
+ /*
+ * Set "tx_dma_busy" flag. This flag will be released when
+ * dmaengine_terminate_async will be called. This flag helps
+ * transmit_chars_dma not to start another DMA transaction
+ * if the callback of the previous is not yet called.
+ */
+ stm32port->tx_dma_busy = true;
+
desc->callback = stm32_usart_tx_dma_complete;
desc->callback_param = port;
/* Push current DMA TX transaction in the pending queue */
if (dma_submit_error(dmaengine_submit(desc))) {
/* dma no yet started, safe to free resources */
- dmaengine_terminate_async(stm32port->tx_ch);
+ stm32_usart_tx_dma_terminate(stm32port);
goto fallback_err;
}
@@ -511,8 +542,7 @@ static void stm32_usart_transmit_chars_dma(struct uart_port *port)
return;
fallback_err:
- for (i = count; i > 0; i--)
- stm32_usart_transmit_chars_pio(port);
+ stm32_usart_transmit_chars_pio(port);
}
static void stm32_usart_transmit_chars(struct uart_port *port)
@@ -520,14 +550,27 @@ static void stm32_usart_transmit_chars(struct uart_port *port)
struct stm32_port *stm32_port = to_stm32_port(port);
const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
struct circ_buf *xmit = &port->state->xmit;
+ u32 isr;
+ int ret;
if (port->x_char) {
- if (stm32_port->tx_dma_busy)
+ if (stm32_usart_tx_dma_started(stm32_port) &&
+ stm32_usart_tx_dma_enabled(stm32_port))
stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
+
+ /* Check that TDR is empty before filling FIFO */
+ ret =
+ readl_relaxed_poll_timeout_atomic(port->membase + ofs->isr,
+ isr,
+ (isr & USART_SR_TXE),
+ 10, 1000);
+ if (ret)
+ dev_warn(port->dev, "1 character may be erased\n");
+
writel_relaxed(port->x_char, port->membase + ofs->tdr);
port->x_char = 0;
port->icount.tx++;
- if (stm32_port->tx_dma_busy)
+ if (stm32_usart_tx_dma_started(stm32_port))
stm32_usart_set_bits(port, ofs->cr3, USART_CR3_DMAT);
return;
}
@@ -675,8 +718,11 @@ static void stm32_usart_stop_tx(struct uart_port *port)
{
struct stm32_port *stm32_port = to_stm32_port(port);
struct serial_rs485 *rs485conf = &port->rs485;
+ const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
stm32_usart_tx_interrupt_disable(port);
+ if (stm32_usart_tx_dma_started(stm32_port) && stm32_usart_tx_dma_enabled(stm32_port))
+ stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
if (rs485conf->flags & SER_RS485_ENABLED) {
if (rs485conf->flags & SER_RS485_RTS_ON_SEND) {
@@ -696,7 +742,7 @@ static void stm32_usart_start_tx(struct uart_port *port)
struct serial_rs485 *rs485conf = &port->rs485;
struct circ_buf *xmit = &port->state->xmit;
- if (uart_circ_empty(xmit))
+ if (uart_circ_empty(xmit) && !port->x_char)
return;
if (rs485conf->flags & SER_RS485_ENABLED) {
@@ -719,9 +765,8 @@ static void stm32_usart_flush_buffer(struct uart_port *port)
const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
if (stm32_port->tx_ch) {
- dmaengine_terminate_async(stm32_port->tx_ch);
+ stm32_usart_tx_dma_terminate(stm32_port);
stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
- stm32_port->tx_dma_busy = false;
}
}
@@ -883,6 +928,12 @@ static void stm32_usart_shutdown(struct uart_port *port)
u32 val, isr;
int ret;
+ if (stm32_usart_tx_dma_enabled(stm32_port))
+ stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
+
+ if (stm32_usart_tx_dma_started(stm32_port))
+ stm32_usart_tx_dma_terminate(stm32_port);
+
/* Disable modem control interrupts */
stm32_usart_disable_ms(port);
@@ -1419,8 +1470,6 @@ static int stm32_usart_of_dma_tx_probe(struct stm32_port *stm32port,
struct dma_slave_config config;
int ret;
- stm32port->tx_dma_busy = false;
-
stm32port->tx_buf = dma_alloc_coherent(dev, TX_BUF_L,
&stm32port->tx_dma_buf,
GFP_KERNEL);
@@ -1570,7 +1619,6 @@ static int stm32_usart_serial_remove(struct platform_device *pdev)
writel_relaxed(cr3, port->membase + ofs->cr3);
if (stm32_port->tx_ch) {
- dmaengine_terminate_async(stm32_port->tx_ch);
stm32_usart_of_dma_tx_remove(stm32_port, pdev);
dma_release_channel(stm32_port->tx_ch);
}
diff --git a/drivers/tty/serial/stm32-usart.h b/drivers/tty/serial/stm32-usart.h
index e23916bfbb60..feab952aec16 100644
--- a/drivers/tty/serial/stm32-usart.h
+++ b/drivers/tty/serial/stm32-usart.h
@@ -264,7 +264,7 @@ struct stm32_port {
u32 cr1_irq; /* USART_CR1_RXNEIE or RTOIE */
u32 cr3_irq; /* USART_CR3_RXFTIE */
int last_res;
- bool tx_dma_busy; /* dma tx busy */
+ bool tx_dma_busy; /* dma tx transaction in progress */
bool throttled; /* port throttled */
bool hw_flow_control;
bool swap; /* swap RX & TX pins */
diff --git a/drivers/tty/serial/sunsu.c b/drivers/tty/serial/sunsu.c
index 425a016f9db7..98b2f4fb9a99 100644
--- a/drivers/tty/serial/sunsu.c
+++ b/drivers/tty/serial/sunsu.c
@@ -127,7 +127,8 @@ static void serial_out(struct uart_sunsu_port *up, int offset, int value)
* gate outputs a logical one. Since we use level triggered interrupts
* we have lockup and watchdog reset. We cannot mask IRQ because
* keyboard shares IRQ with us (Word has it as Bob Smelik's design).
- * This problem is similar to what Alpha people suffer, see serial.c.
+ * This problem is similar to what Alpha people suffer, see
+ * 8250_alpha.c.
*/
if (offset == UART_MCR)
value |= UART_MCR_OUT2;
diff --git a/drivers/tty/serial/uartlite.c b/drivers/tty/serial/uartlite.c
index d3d9566e5dbd..e1fa52d31474 100644
--- a/drivers/tty/serial/uartlite.c
+++ b/drivers/tty/serial/uartlite.c
@@ -626,7 +626,7 @@ static struct uart_driver ulite_uart_driver = {
*
* Returns: 0 on success, <0 otherwise
*/
-static int ulite_assign(struct device *dev, int id, u32 base, int irq,
+static int ulite_assign(struct device *dev, int id, phys_addr_t base, int irq,
struct uartlite_data *pdata)
{
struct uart_port *port;
diff --git a/drivers/tty/serial/vt8500_serial.c b/drivers/tty/serial/vt8500_serial.c
index e15b2bf69904..9adfe3dc970f 100644
--- a/drivers/tty/serial/vt8500_serial.c
+++ b/drivers/tty/serial/vt8500_serial.c
@@ -621,21 +621,25 @@ static const struct of_device_id wmt_dt_ids[] = {
static int vt8500_serial_probe(struct platform_device *pdev)
{
struct vt8500_port *vt8500_port;
- struct resource *mmres, *irqres;
+ struct resource *mmres;
struct device_node *np = pdev->dev.of_node;
const unsigned int *flags;
int ret;
int port;
+ int irq;
flags = of_device_get_match_data(&pdev->dev);
if (!flags)
return -EINVAL;
mmres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- irqres = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!mmres || !irqres)
+ if (!mmres)
return -ENODEV;
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
if (np) {
port = of_alias_get_id(np, "serial");
if (port >= VT8500_MAX_PORTS)
@@ -688,7 +692,7 @@ static int vt8500_serial_probe(struct platform_device *pdev)
vt8500_port->uart.type = PORT_VT8500;
vt8500_port->uart.iotype = UPIO_MEM;
vt8500_port->uart.mapbase = mmres->start;
- vt8500_port->uart.irq = irqres->start;
+ vt8500_port->uart.irq = irq;
vt8500_port->uart.fifosize = 16;
vt8500_port->uart.ops = &vt8500_uart_pops;
vt8500_port->uart.line = port;
diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
index 6c7e65b1d9a1..646510476c30 100644
--- a/drivers/tty/tty_buffer.c
+++ b/drivers/tty/tty_buffer.c
@@ -39,20 +39,15 @@
#define TTY_BUFFER_PAGE (((PAGE_SIZE - sizeof(struct tty_buffer)) / 2) & ~0xFF)
/**
- * tty_buffer_lock_exclusive - gain exclusive access to buffer
- * tty_buffer_unlock_exclusive - release exclusive access
+ * tty_buffer_lock_exclusive - gain exclusive access to buffer
+ * @port: tty port owning the flip buffer
*
- * @port: tty port owning the flip buffer
+ * Guarantees safe use of the &tty_ldisc_ops.receive_buf() method by excluding
+ * the buffer work and any pending flush from using the flip buffer. Data can
+ * continue to be added concurrently to the flip buffer from the driver side.
*
- * Guarantees safe use of the line discipline's receive_buf() method by
- * excluding the buffer work and any pending flush from using the flip
- * buffer. Data can continue to be added concurrently to the flip buffer
- * from the driver side.
- *
- * On release, the buffer work is restarted if there is data in the
- * flip buffer
+ * See also tty_buffer_unlock_exclusive().
*/
-
void tty_buffer_lock_exclusive(struct tty_port *port)
{
struct tty_bufhead *buf = &port->buf;
@@ -62,6 +57,14 @@ void tty_buffer_lock_exclusive(struct tty_port *port)
}
EXPORT_SYMBOL_GPL(tty_buffer_lock_exclusive);
+/**
+ * tty_buffer_unlock_exclusive - release exclusive access
+ * @port: tty port owning the flip buffer
+ *
+ * The buffer work is restarted if there is data in the flip buffer.
+ *
+ * See also tty_buffer_lock_exclusive().
+ */
void tty_buffer_unlock_exclusive(struct tty_port *port)
{
struct tty_bufhead *buf = &port->buf;
@@ -77,17 +80,16 @@ void tty_buffer_unlock_exclusive(struct tty_port *port)
EXPORT_SYMBOL_GPL(tty_buffer_unlock_exclusive);
/**
- * tty_buffer_space_avail - return unused buffer space
- * @port: tty port owning the flip buffer
+ * tty_buffer_space_avail - return unused buffer space
+ * @port: tty port owning the flip buffer
*
- * Returns the # of bytes which can be written by the driver without
- * reaching the buffer limit.
+ * Returns: the # of bytes which can be written by the driver without reaching
+ * the buffer limit.
*
- * Note: this does not guarantee that memory is available to write
- * the returned # of bytes (use tty_prepare_flip_string_xxx() to
- * pre-allocate if memory guarantee is required).
+ * Note: this does not guarantee that memory is available to write the returned
+ * # of bytes (use tty_prepare_flip_string() to pre-allocate if memory
+ * guarantee is required).
*/
-
unsigned int tty_buffer_space_avail(struct tty_port *port)
{
int space = port->buf.mem_limit - atomic_read(&port->buf.mem_used);
@@ -107,13 +109,12 @@ static void tty_buffer_reset(struct tty_buffer *p, size_t size)
}
/**
- * tty_buffer_free_all - free buffers used by a tty
- * @port: tty port to free from
+ * tty_buffer_free_all - free buffers used by a tty
+ * @port: tty port to free from
*
- * Remove all the buffers pending on a tty whether queued with data
- * or in the free ring. Must be called when the tty is no longer in use
+ * Remove all the buffers pending on a tty whether queued with data or in the
+ * free ring. Must be called when the tty is no longer in use.
*/
-
void tty_buffer_free_all(struct tty_port *port)
{
struct tty_bufhead *buf = &port->buf;
@@ -142,17 +143,17 @@ void tty_buffer_free_all(struct tty_port *port)
}
/**
- * tty_buffer_alloc - allocate a tty buffer
- * @port: tty port
- * @size: desired size (characters)
- *
- * Allocate a new tty buffer to hold the desired number of characters.
- * We round our buffers off in 256 character chunks to get better
- * allocation behaviour.
- * Return NULL if out of memory or the allocation would exceed the
- * per device queue
+ * tty_buffer_alloc - allocate a tty buffer
+ * @port: tty port
+ * @size: desired size (characters)
+ *
+ * Allocate a new tty buffer to hold the desired number of characters. We
+ * round our buffers off in 256 character chunks to get better allocation
+ * behaviour.
+ *
+ * Returns: %NULL if out of memory or the allocation would exceed the per
+ * device queue.
*/
-
static struct tty_buffer *tty_buffer_alloc(struct tty_port *port, size_t size)
{
struct llist_node *free;
@@ -185,14 +186,13 @@ found:
}
/**
- * tty_buffer_free - free a tty buffer
- * @port: tty port owning the buffer
- * @b: the buffer to free
+ * tty_buffer_free - free a tty buffer
+ * @port: tty port owning the buffer
+ * @b: the buffer to free
*
- * Free a tty buffer, or add it to the free list according to our
- * internal strategy
+ * Free a tty buffer, or add it to the free list according to our internal
+ * strategy.
*/
-
static void tty_buffer_free(struct tty_port *port, struct tty_buffer *b)
{
struct tty_bufhead *buf = &port->buf;
@@ -207,17 +207,15 @@ static void tty_buffer_free(struct tty_port *port, struct tty_buffer *b)
}
/**
- * tty_buffer_flush - flush full tty buffers
- * @tty: tty to flush
- * @ld: optional ldisc ptr (must be referenced)
+ * tty_buffer_flush - flush full tty buffers
+ * @tty: tty to flush
+ * @ld: optional ldisc ptr (must be referenced)
*
- * flush all the buffers containing receive data. If ld != NULL,
- * flush the ldisc input buffer.
+ * Flush all the buffers containing receive data. If @ld != %NULL, flush the
+ * ldisc input buffer.
*
- * Locking: takes buffer lock to ensure single-threaded flip buffer
- * 'consumer'
+ * Locking: takes buffer lock to ensure single-threaded flip buffer 'consumer'.
*/
-
void tty_buffer_flush(struct tty_struct *tty, struct tty_ldisc *ld)
{
struct tty_port *port = tty->port;
@@ -244,17 +242,18 @@ void tty_buffer_flush(struct tty_struct *tty, struct tty_ldisc *ld)
}
/**
- * __tty_buffer_request_room - grow tty buffer if needed
- * @port: tty port
- * @size: size desired
- * @flags: buffer flags if new buffer allocated (default = 0)
+ * __tty_buffer_request_room - grow tty buffer if needed
+ * @port: tty port
+ * @size: size desired
+ * @flags: buffer flags if new buffer allocated (default = 0)
+ *
+ * Make at least @size bytes of linear space available for the tty buffer.
*
- * Make at least size bytes of linear space available for the tty
- * buffer. If we fail return the size we managed to find.
+ * Will change over to a new buffer if the current buffer is encoded as
+ * %TTY_NORMAL (so has no flags buffer) and the new buffer requires a flags
+ * buffer.
*
- * Will change over to a new buffer if the current buffer is encoded as
- * TTY_NORMAL (so has no flags buffer) and the new buffer requires
- * a flags buffer.
+ * Returns: the size we managed to find.
*/
static int __tty_buffer_request_room(struct tty_port *port, size_t size,
int flags)
@@ -300,16 +299,17 @@ int tty_buffer_request_room(struct tty_port *port, size_t size)
EXPORT_SYMBOL_GPL(tty_buffer_request_room);
/**
- * tty_insert_flip_string_fixed_flag - Add characters to the tty buffer
- * @port: tty port
- * @chars: characters
- * @flag: flag value for each character
- * @size: size
- *
- * Queue a series of bytes to the tty buffering. All the characters
- * passed are marked with the supplied flag. Returns the number added.
+ * tty_insert_flip_string_fixed_flag - add characters to the tty buffer
+ * @port: tty port
+ * @chars: characters
+ * @flag: flag value for each character
+ * @size: size
+ *
+ * Queue a series of bytes to the tty buffering. All the characters passed are
+ * marked with the supplied flag.
+ *
+ * Returns: the number added.
*/
-
int tty_insert_flip_string_fixed_flag(struct tty_port *port,
const unsigned char *chars, char flag, size_t size)
{
@@ -338,17 +338,17 @@ int tty_insert_flip_string_fixed_flag(struct tty_port *port,
EXPORT_SYMBOL(tty_insert_flip_string_fixed_flag);
/**
- * tty_insert_flip_string_flags - Add characters to the tty buffer
- * @port: tty port
- * @chars: characters
- * @flags: flag bytes
- * @size: size
- *
- * Queue a series of bytes to the tty buffering. For each character
- * the flags array indicates the status of the character. Returns the
- * number added.
+ * tty_insert_flip_string_flags - add characters to the tty buffer
+ * @port: tty port
+ * @chars: characters
+ * @flags: flag bytes
+ * @size: size
+ *
+ * Queue a series of bytes to the tty buffering. For each character the flags
+ * array indicates the status of the character.
+ *
+ * Returns: the number added.
*/
-
int tty_insert_flip_string_flags(struct tty_port *port,
const unsigned char *chars, const char *flags, size_t size)
{
@@ -376,13 +376,13 @@ int tty_insert_flip_string_flags(struct tty_port *port,
EXPORT_SYMBOL(tty_insert_flip_string_flags);
/**
- * __tty_insert_flip_char - Add one character to the tty buffer
- * @port: tty port
- * @ch: character
- * @flag: flag byte
+ * __tty_insert_flip_char - add one character to the tty buffer
+ * @port: tty port
+ * @ch: character
+ * @flag: flag byte
*
- * Queue a single byte to the tty buffering, with an optional flag.
- * This is the slow path of tty_insert_flip_char.
+ * Queue a single byte @ch to the tty buffering, with an optional flag. This is
+ * the slow path of tty_insert_flip_char().
*/
int __tty_insert_flip_char(struct tty_port *port, unsigned char ch, char flag)
{
@@ -402,39 +402,19 @@ int __tty_insert_flip_char(struct tty_port *port, unsigned char ch, char flag)
EXPORT_SYMBOL(__tty_insert_flip_char);
/**
- * tty_schedule_flip - push characters to ldisc
- * @port: tty port to push from
+ * tty_prepare_flip_string - make room for characters
+ * @port: tty port
+ * @chars: return pointer for character write area
+ * @size: desired size
*
- * Takes any pending buffers and transfers their ownership to the
- * ldisc side of the queue. It then schedules those characters for
- * processing by the line discipline.
- */
-
-void tty_schedule_flip(struct tty_port *port)
-{
- struct tty_bufhead *buf = &port->buf;
-
- /* paired w/ acquire in flush_to_ldisc(); ensures
- * flush_to_ldisc() sees buffer data.
- */
- smp_store_release(&buf->tail->commit, buf->tail->used);
- queue_work(system_unbound_wq, &buf->work);
-}
-EXPORT_SYMBOL(tty_schedule_flip);
-
-/**
- * tty_prepare_flip_string - make room for characters
- * @port: tty port
- * @chars: return pointer for character write area
- * @size: desired size
- *
- * Prepare a block of space in the buffer for data. Returns the length
- * available and buffer pointer to the space which is now allocated and
- * accounted for as ready for normal characters. This is used for drivers
- * that need their own block copy routines into the buffer. There is no
- * guarantee the buffer is a DMA target!
+ * Prepare a block of space in the buffer for data.
+ *
+ * This is used for drivers that need their own block copy routines into the
+ * buffer. There is no guarantee the buffer is a DMA target!
+ *
+ * Returns: the length available and buffer pointer (@chars) to the space which
+ * is now allocated and accounted for as ready for normal characters.
*/
-
int tty_prepare_flip_string(struct tty_port *port, unsigned char **chars,
size_t size)
{
@@ -453,16 +433,16 @@ int tty_prepare_flip_string(struct tty_port *port, unsigned char **chars,
EXPORT_SYMBOL_GPL(tty_prepare_flip_string);
/**
- * tty_ldisc_receive_buf - forward data to line discipline
- * @ld: line discipline to process input
- * @p: char buffer
- * @f: TTY_* flags buffer
- * @count: number of bytes to process
+ * tty_ldisc_receive_buf - forward data to line discipline
+ * @ld: line discipline to process input
+ * @p: char buffer
+ * @f: %TTY_NORMAL, %TTY_BREAK, etc. flags buffer
+ * @count: number of bytes to process
*
- * Callers other than flush_to_ldisc() need to exclude the kworker
- * from concurrent use of the line discipline, see paste_selection().
+ * Callers other than flush_to_ldisc() need to exclude the kworker from
+ * concurrent use of the line discipline, see paste_selection().
*
- * Returns the number of bytes processed
+ * Returns: the number of bytes processed.
*/
int tty_ldisc_receive_buf(struct tty_ldisc *ld, const unsigned char *p,
const char *f, int count)
@@ -495,18 +475,16 @@ receive_buf(struct tty_port *port, struct tty_buffer *head, int count)
}
/**
- * flush_to_ldisc
- * @work: tty structure passed from work queue.
+ * flush_to_ldisc - flush data from buffer to ldisc
+ * @work: tty structure passed from work queue.
*
- * This routine is called out of the software interrupt to flush data
- * from the buffer chain to the line discipline.
+ * This routine is called out of the software interrupt to flush data from the
+ * buffer chain to the line discipline.
*
- * The receive_buf method is single threaded for each tty instance.
+ * The receive_buf() method is single threaded for each tty instance.
*
- * Locking: takes buffer lock to ensure single-threaded flip buffer
- * 'consumer'
+ * Locking: takes buffer lock to ensure single-threaded flip buffer 'consumer'.
*/
-
static void flush_to_ldisc(struct work_struct *work)
{
struct tty_port *port = container_of(work, struct tty_port, buf.work);
@@ -554,30 +532,35 @@ static void flush_to_ldisc(struct work_struct *work)
}
/**
- * tty_flip_buffer_push - terminal
- * @port: tty port to push
+ * tty_flip_buffer_push - push terminal buffers
+ * @port: tty port to push
*
- * Queue a push of the terminal flip buffers to the line discipline.
- * Can be called from IRQ/atomic context.
+ * Queue a push of the terminal flip buffers to the line discipline. Can be
+ * called from IRQ/atomic context.
*
- * In the event of the queue being busy for flipping the work will be
- * held off and retried later.
+ * In the event of the queue being busy for flipping the work will be held off
+ * and retried later.
*/
-
void tty_flip_buffer_push(struct tty_port *port)
{
- tty_schedule_flip(port);
+ struct tty_bufhead *buf = &port->buf;
+
+ /*
+ * Paired w/ acquire in flush_to_ldisc(); ensures flush_to_ldisc() sees
+ * buffer data.
+ */
+ smp_store_release(&buf->tail->commit, buf->tail->used);
+ queue_work(system_unbound_wq, &buf->work);
}
EXPORT_SYMBOL(tty_flip_buffer_push);
/**
- * tty_buffer_init - prepare a tty buffer structure
- * @port: tty port to initialise
+ * tty_buffer_init - prepare a tty buffer structure
+ * @port: tty port to initialise
*
- * Set up the initial state of the buffer management for a tty device.
- * Must be called before the other tty buffer functions are used.
+ * Set up the initial state of the buffer management for a tty device. Must be
+ * called before the other tty buffer functions are used.
*/
-
void tty_buffer_init(struct tty_port *port)
{
struct tty_bufhead *buf = &port->buf;
@@ -594,14 +577,14 @@ void tty_buffer_init(struct tty_port *port)
}
/**
- * tty_buffer_set_limit - change the tty buffer memory limit
- * @port: tty port to change
- * @limit: memory limit to set
+ * tty_buffer_set_limit - change the tty buffer memory limit
+ * @port: tty port to change
+ * @limit: memory limit to set
*
- * Change the tty buffer memory limit.
- * Must be called before the other tty buffer functions are used.
+ * Change the tty buffer memory limit.
+ *
+ * Must be called before the other tty buffer functions are used.
*/
-
int tty_buffer_set_limit(struct tty_port *port, int limit)
{
if (limit < MIN_TTYB_SIZE)
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 6616d4a0d41d..7e8b3bd59c7b 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -158,19 +158,18 @@ static int tty_fasync(int fd, struct file *filp, int on);
static void release_tty(struct tty_struct *tty, int idx);
/**
- * free_tty_struct - free a disused tty
- * @tty: tty struct to free
+ * free_tty_struct - free a disused tty
+ * @tty: tty struct to free
*
- * Free the write buffers, tty queue and tty memory itself.
+ * Free the write buffers, tty queue and tty memory itself.
*
- * Locking: none. Must be called after tty is definitely unused
+ * Locking: none. Must be called after tty is definitely unused
*/
-
static void free_tty_struct(struct tty_struct *tty)
{
tty_ldisc_deinit(tty);
put_device(tty->dev);
- kfree(tty->write_buf);
+ kvfree(tty->write_buf);
tty->magic = 0xDEADDEAD;
kfree(tty);
}
@@ -206,8 +205,9 @@ void tty_add_file(struct tty_struct *tty, struct file *file)
spin_unlock(&tty->files_lock);
}
-/*
+/**
* tty_free_file - free file->private_data
+ * @file: to free private_data of
*
* This shall be used only for fail path handling when tty_add_file was not
* called yet.
@@ -233,15 +233,14 @@ static void tty_del_file(struct file *file)
}
/**
- * tty_name - return tty naming
- * @tty: tty structure
+ * tty_name - return tty naming
+ * @tty: tty structure
*
- * Convert a tty structure into a name. The name reflects the kernel
- * naming policy and if udev is in use may not reflect user space
+ * Convert a tty structure into a name. The name reflects the kernel naming
+ * policy and if udev is in use may not reflect user space
*
- * Locking: none
+ * Locking: none
*/
-
const char *tty_name(const struct tty_struct *tty)
{
if (!tty) /* Hmm. NULL pointer. That's fun. */
@@ -303,16 +302,15 @@ static int check_tty_count(struct tty_struct *tty, const char *routine)
}
/**
- * get_tty_driver - find device of a tty
- * @device: device identifier
- * @index: returns the index of the tty
+ * get_tty_driver - find device of a tty
+ * @device: device identifier
+ * @index: returns the index of the tty
*
- * This routine returns a tty driver structure, given a device number
- * and also passes back the index number.
+ * This routine returns a tty driver structure, given a device number and also
+ * passes back the index number.
*
- * Locking: caller must hold tty_mutex
+ * Locking: caller must hold tty_mutex
*/
-
static struct tty_driver *get_tty_driver(dev_t device, int *index)
{
struct tty_driver *p;
@@ -329,17 +327,17 @@ static struct tty_driver *get_tty_driver(dev_t device, int *index)
}
/**
- * tty_dev_name_to_number - return dev_t for device name
- * @name: user space name of device under /dev
- * @number: pointer to dev_t that this function will populate
+ * tty_dev_name_to_number - return dev_t for device name
+ * @name: user space name of device under /dev
+ * @number: pointer to dev_t that this function will populate
*
- * This function converts device names like ttyS0 or ttyUSB1 into dev_t
- * like (4, 64) or (188, 1). If no corresponding driver is registered then
- * the function returns -ENODEV.
+ * This function converts device names like ttyS0 or ttyUSB1 into dev_t like
+ * (4, 64) or (188, 1). If no corresponding driver is registered then the
+ * function returns -%ENODEV.
*
- * Locking: this acquires tty_mutex to protect the tty_drivers list from
- * being modified while we are traversing it, and makes sure to
- * release it before exiting.
+ * Locking: this acquires tty_mutex to protect the tty_drivers list from
+ * being modified while we are traversing it, and makes sure to
+ * release it before exiting.
*/
int tty_dev_name_to_number(const char *name, dev_t *number)
{
@@ -381,13 +379,12 @@ EXPORT_SYMBOL_GPL(tty_dev_name_to_number);
#ifdef CONFIG_CONSOLE_POLL
/**
- * tty_find_polling_driver - find device of a polled tty
- * @name: name string to match
- * @line: pointer to resulting tty line nr
+ * tty_find_polling_driver - find device of a polled tty
+ * @name: name string to match
+ * @line: pointer to resulting tty line nr
*
- * This routine returns a tty driver structure, given a name
- * and the condition that the tty driver is capable of polled
- * operation.
+ * This routine returns a tty driver structure, given a name and the condition
+ * that the tty driver is capable of polled operation.
*/
struct tty_driver *tty_find_polling_driver(char *name, int *line)
{
@@ -515,14 +512,13 @@ static DEFINE_SPINLOCK(redirect_lock);
static struct file *redirect;
/**
- * tty_wakeup - request more data
- * @tty: terminal
+ * tty_wakeup - request more data
+ * @tty: terminal
*
- * Internal and external helper for wakeups of tty. This function
- * informs the line discipline if present that the driver is ready
- * to receive more output data.
+ * Internal and external helper for wakeups of tty. This function informs the
+ * line discipline if present that the driver is ready to receive more output
+ * data.
*/
-
void tty_wakeup(struct tty_struct *tty)
{
struct tty_ldisc *ld;
@@ -540,11 +536,11 @@ void tty_wakeup(struct tty_struct *tty)
EXPORT_SYMBOL_GPL(tty_wakeup);
/**
- * tty_release_redirect - Release a redirect on a pty if present
- * @tty: tty device
+ * tty_release_redirect - Release a redirect on a pty if present
+ * @tty: tty device
*
- * This is available to the pty code so if the master closes, if the
- * slave is a redirect it can release the redirect.
+ * This is available to the pty code so if the master closes, if the slave is a
+ * redirect it can release the redirect.
*/
static struct file *tty_release_redirect(struct tty_struct *tty)
{
@@ -561,27 +557,29 @@ static struct file *tty_release_redirect(struct tty_struct *tty)
}
/**
- * __tty_hangup - actual handler for hangup events
- * @tty: tty device
- * @exit_session: if non-zero, signal all foreground group processes
+ * __tty_hangup - actual handler for hangup events
+ * @tty: tty device
+ * @exit_session: if non-zero, signal all foreground group processes
+ *
+ * This can be called by a "kworker" kernel thread. That is process synchronous
+ * but doesn't hold any locks, so we need to make sure we have the appropriate
+ * locks for what we're doing.
+ *
+ * The hangup event clears any pending redirections onto the hung up device. It
+ * ensures future writes will error and it does the needed line discipline
+ * hangup and signal delivery. The tty object itself remains intact.
+ *
+ * Locking:
+ * * BTM
*
- * This can be called by a "kworker" kernel thread. That is process
- * synchronous but doesn't hold any locks, so we need to make sure we
- * have the appropriate locks for what we're doing.
+ * * redirect lock for undoing redirection
+ * * file list lock for manipulating list of ttys
+ * * tty_ldiscs_lock from called functions
+ * * termios_rwsem resetting termios data
+ * * tasklist_lock to walk task list for hangup event
*
- * The hangup event clears any pending redirections onto the hung up
- * device. It ensures future writes will error and it does the needed
- * line discipline hangup and signal delivery. The tty object itself
- * remains intact.
+ * * ->siglock to protect ->signal/->sighand
*
- * Locking:
- * BTM
- * redirect lock for undoing redirection
- * file list lock for manipulating list of ttys
- * tty_ldiscs_lock from called functions
- * termios_rwsem resetting termios data
- * tasklist_lock to walk task list for hangup event
- * ->siglock to protect ->signal/->sighand
*/
static void __tty_hangup(struct tty_struct *tty, int exit_session)
{
@@ -682,13 +680,12 @@ static void do_tty_hangup(struct work_struct *work)
}
/**
- * tty_hangup - trigger a hangup event
- * @tty: tty to hangup
+ * tty_hangup - trigger a hangup event
+ * @tty: tty to hangup
*
- * A carrier loss (virtual or otherwise) has occurred on this like
- * schedule a hangup sequence to run after this event.
+ * A carrier loss (virtual or otherwise) has occurred on @tty. Schedule a
+ * hangup sequence to run after this event.
*/
-
void tty_hangup(struct tty_struct *tty)
{
tty_debug_hangup(tty, "hangup\n");
@@ -697,14 +694,13 @@ void tty_hangup(struct tty_struct *tty)
EXPORT_SYMBOL(tty_hangup);
/**
- * tty_vhangup - process vhangup
- * @tty: tty to hangup
+ * tty_vhangup - process vhangup
+ * @tty: tty to hangup
*
- * The user has asked via system call for the terminal to be hung up.
- * We do this synchronously so that when the syscall returns the process
- * is complete. That guarantee is necessary for security reasons.
+ * The user has asked via system call for the terminal to be hung up. We do
+ * this synchronously so that when the syscall returns the process is complete.
+ * That guarantee is necessary for security reasons.
*/
-
void tty_vhangup(struct tty_struct *tty)
{
tty_debug_hangup(tty, "vhangup\n");
@@ -714,11 +710,10 @@ EXPORT_SYMBOL(tty_vhangup);
/**
- * tty_vhangup_self - process vhangup for own ctty
+ * tty_vhangup_self - process vhangup for own ctty
*
- * Perform a vhangup on the current controlling tty
+ * Perform a vhangup on the current controlling tty
*/
-
void tty_vhangup_self(void)
{
struct tty_struct *tty;
@@ -731,16 +726,15 @@ void tty_vhangup_self(void)
}
/**
- * tty_vhangup_session - hangup session leader exit
- * @tty: tty to hangup
+ * tty_vhangup_session - hangup session leader exit
+ * @tty: tty to hangup
*
- * The session leader is exiting and hanging up its controlling terminal.
- * Every process in the foreground process group is signalled SIGHUP.
+ * The session leader is exiting and hanging up its controlling terminal.
+ * Every process in the foreground process group is signalled %SIGHUP.
*
- * We do this synchronously so that when the syscall returns the process
- * is complete. That guarantee is necessary for security reasons.
+ * We do this synchronously so that when the syscall returns the process is
+ * complete. That guarantee is necessary for security reasons.
*/
-
void tty_vhangup_session(struct tty_struct *tty)
{
tty_debug_hangup(tty, "session hangup\n");
@@ -748,13 +742,11 @@ void tty_vhangup_session(struct tty_struct *tty)
}
/**
- * tty_hung_up_p - was tty hung up
- * @filp: file pointer of tty
+ * tty_hung_up_p - was tty hung up
+ * @filp: file pointer of tty
*
- * Return true if the tty has been subject to a vhangup or a carrier
- * loss
+ * Return: true if the tty has been subject to a vhangup or a carrier loss
*/
-
int tty_hung_up_p(struct file *filp)
{
return (filp && filp->f_op == &hung_up_tty_fops);
@@ -771,20 +763,18 @@ void __stop_tty(struct tty_struct *tty)
}
/**
- * stop_tty - propagate flow control
- * @tty: tty to stop
+ * stop_tty - propagate flow control
+ * @tty: tty to stop
*
- * Perform flow control to the driver. May be called
- * on an already stopped device and will not re-call the driver
- * method.
+ * Perform flow control to the driver. May be called on an already stopped
+ * device and will not re-call the &tty_driver->stop() method.
*
- * This functionality is used by both the line disciplines for
- * halting incoming flow and by the driver. It may therefore be
- * called from any context, may be under the tty atomic_write_lock
- * but not always.
+ * This functionality is used by both the line disciplines for halting incoming
+ * flow and by the driver. It may therefore be called from any context, may be
+ * under the tty %atomic_write_lock but not always.
*
- * Locking:
- * flow.lock
+ * Locking:
+ * flow.lock
*/
void stop_tty(struct tty_struct *tty)
{
@@ -807,15 +797,15 @@ void __start_tty(struct tty_struct *tty)
}
/**
- * start_tty - propagate flow control
- * @tty: tty to start
+ * start_tty - propagate flow control
+ * @tty: tty to start
*
- * Start a tty that has been stopped if at all possible. If this
- * tty was previous stopped and is now being started, the driver
- * start method is invoked and the line discipline woken.
+ * Start a tty that has been stopped if at all possible. If @tty was previously
+ * stopped and is now being started, the &tty_driver->start() method is invoked
+ * and the line discipline woken.
*
- * Locking:
- * flow.lock
+ * Locking:
+ * flow.lock
*/
void start_tty(struct tty_struct *tty)
{
@@ -908,18 +898,17 @@ static int iterate_tty_read(struct tty_ldisc *ld, struct tty_struct *tty,
/**
- * tty_read - read method for tty device files
- * @iocb: kernel I/O control block
- * @to: destination for the data read
+ * tty_read - read method for tty device files
+ * @iocb: kernel I/O control block
+ * @to: destination for the data read
*
- * Perform the read system call function on this terminal device. Checks
- * for hung up devices before calling the line discipline method.
+ * Perform the read system call function on this terminal device. Checks
+ * for hung up devices before calling the line discipline method.
*
- * Locking:
- * Locks the line discipline internally while needed. Multiple
- * read calls may be outstanding in parallel.
+ * Locking:
+ * Locks the line discipline internally while needed. Multiple read calls
+ * may be outstanding in parallel.
*/
-
static ssize_t tty_read(struct kiocb *iocb, struct iov_iter *to)
{
int i;
@@ -997,9 +986,6 @@ static inline ssize_t do_tty_write(
* layer has problems with bigger chunks. It will
* claim to be able to handle more characters than
* it actually does.
- *
- * FIXME: This can probably go away now except that 64K chunks
- * are too likely to fail unless switched to vmalloc...
*/
chunk = 2048;
if (test_bit(TTY_NO_WRITE_SPLIT, &tty->flags))
@@ -1014,12 +1000,12 @@ static inline ssize_t do_tty_write(
if (chunk < 1024)
chunk = 1024;
- buf_chunk = kmalloc(chunk, GFP_KERNEL);
+ buf_chunk = kvmalloc(chunk, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
if (!buf_chunk) {
ret = -ENOMEM;
goto out;
}
- kfree(tty->write_buf);
+ kvfree(tty->write_buf);
tty->write_cnt = chunk;
tty->write_buf = buf_chunk;
}
@@ -1069,13 +1055,12 @@ out:
* @tty: the destination tty_struct
* @msg: the message to write
*
- * This is used for messages that need to be redirected to a specific tty.
- * We don't put it into the syslog queue right now maybe in the future if
- * really needed.
+ * This is used for messages that need to be redirected to a specific tty. We
+ * don't put it into the syslog queue right now maybe in the future if really
+ * needed.
*
* We must still hold the BTM and test the CLOSING flag for the moment.
*/
-
void tty_write_message(struct tty_struct *tty, char *msg)
{
if (tty) {
@@ -1113,18 +1098,18 @@ static ssize_t file_tty_write(struct file *file, struct kiocb *iocb, struct iov_
}
/**
- * tty_write - write method for tty device file
- * @iocb: kernel I/O control block
- * @from: iov_iter with data to write
+ * tty_write - write method for tty device file
+ * @iocb: kernel I/O control block
+ * @from: iov_iter with data to write
*
- * Write data to a tty device via the line discipline.
+ * Write data to a tty device via the line discipline.
*
- * Locking:
- * Locks the line discipline as required
- * Writes to the tty driver are serialized by the atomic_write_lock
- * and are then processed in chunks to the device. The line
- * discipline write method will not be invoked in parallel for
- * each device.
+ * Locking:
+ * Locks the line discipline as required
+ * Writes to the tty driver are serialized by the atomic_write_lock
+ * and are then processed in chunks to the device. The line
+ * discipline write method will not be invoked in parallel for
+ * each device.
*/
static ssize_t tty_write(struct kiocb *iocb, struct iov_iter *from)
{
@@ -1154,14 +1139,15 @@ ssize_t redirected_tty_write(struct kiocb *iocb, struct iov_iter *iter)
return tty_write(iocb, iter);
}
-/*
- * tty_send_xchar - send priority character
+/**
+ * tty_send_xchar - send priority character
+ * @tty: the tty to send to
+ * @ch: xchar to send
*
- * Send a high priority character to the tty even if stopped
+ * Send a high priority character to the tty even if stopped.
*
- * Locking: none for xchar method, write ordering for write method.
+ * Locking: none for xchar method, write ordering for write method.
*/
-
int tty_send_xchar(struct tty_struct *tty, char ch)
{
bool was_stopped = tty->flow.stopped;
@@ -1188,15 +1174,15 @@ int tty_send_xchar(struct tty_struct *tty, char ch)
}
/**
- * pty_line_name - generate name for a pty
- * @driver: the tty driver in use
- * @index: the minor number
- * @p: output buffer of at least 6 bytes
+ * pty_line_name - generate name for a pty
+ * @driver: the tty driver in use
+ * @index: the minor number
+ * @p: output buffer of at least 6 bytes
*
- * Generate a name from a driver reference and write it to the output
- * buffer.
+ * Generate a name from a @driver reference and write it to the output buffer
+ * @p.
*
- * Locking: None
+ * Locking: None
*/
static void pty_line_name(struct tty_driver *driver, int index, char *p)
{
@@ -1209,15 +1195,15 @@ static void pty_line_name(struct tty_driver *driver, int index, char *p)
}
/**
- * tty_line_name - generate name for a tty
- * @driver: the tty driver in use
- * @index: the minor number
- * @p: output buffer of at least 7 bytes
+ * tty_line_name - generate name for a tty
+ * @driver: the tty driver in use
+ * @index: the minor number
+ * @p: output buffer of at least 7 bytes
*
- * Generate a name from a driver reference and write it to the output
- * buffer.
+ * Generate a name from a @driver reference and write it to the output buffer
+ * @p.
*
- * Locking: None
+ * Locking: None
*/
static ssize_t tty_line_name(struct tty_driver *driver, int index, char *p)
{
@@ -1229,15 +1215,15 @@ static ssize_t tty_line_name(struct tty_driver *driver, int index, char *p)
}
/**
- * tty_driver_lookup_tty() - find an existing tty, if any
- * @driver: the driver for the tty
- * @file: file object
- * @idx: the minor number
+ * tty_driver_lookup_tty() - find an existing tty, if any
+ * @driver: the driver for the tty
+ * @file: file object
+ * @idx: the minor number
*
- * Return the tty, if found. If not found, return NULL or ERR_PTR() if the
- * driver lookup() method returns an error.
+ * Return: the tty, if found. If not found, return %NULL or ERR_PTR() if the
+ * driver lookup() method returns an error.
*
- * Locking: tty_mutex must be held. If the tty is found, bump the tty kref.
+ * Locking: tty_mutex must be held. If the tty is found, bump the tty kref.
*/
static struct tty_struct *tty_driver_lookup_tty(struct tty_driver *driver,
struct file *file, int idx)
@@ -1258,13 +1244,12 @@ static struct tty_struct *tty_driver_lookup_tty(struct tty_driver *driver,
}
/**
- * tty_init_termios - helper for termios setup
- * @tty: the tty to set up
+ * tty_init_termios - helper for termios setup
+ * @tty: the tty to set up
*
- * Initialise the termios structure for this tty. This runs under
- * the tty_mutex currently so we can be relaxed about ordering.
+ * Initialise the termios structure for this tty. This runs under the
+ * %tty_mutex currently so we can be relaxed about ordering.
*/
-
void tty_init_termios(struct tty_struct *tty)
{
struct ktermios *tp;
@@ -1287,6 +1272,14 @@ void tty_init_termios(struct tty_struct *tty)
}
EXPORT_SYMBOL_GPL(tty_init_termios);
+/**
+ * tty_standard_install - usual tty->ops->install
+ * @driver: the driver for the tty
+ * @tty: the tty
+ *
+ * If the @driver overrides @tty->ops->install, it still can call this function
+ * to perform the standard install operations.
+ */
int tty_standard_install(struct tty_driver *driver, struct tty_struct *tty)
{
tty_init_termios(tty);
@@ -1298,16 +1291,15 @@ int tty_standard_install(struct tty_driver *driver, struct tty_struct *tty)
EXPORT_SYMBOL_GPL(tty_standard_install);
/**
- * tty_driver_install_tty() - install a tty entry in the driver
- * @driver: the driver for the tty
- * @tty: the tty
+ * tty_driver_install_tty() - install a tty entry in the driver
+ * @driver: the driver for the tty
+ * @tty: the tty
*
- * Install a tty object into the driver tables. The tty->index field
- * will be set by the time this is called. This method is responsible
- * for ensuring any need additional structures are allocated and
- * configured.
+ * Install a tty object into the driver tables. The @tty->index field will be
+ * set by the time this is called. This method is responsible for ensuring any
+ * need additional structures are allocated and configured.
*
- * Locking: tty_mutex for now
+ * Locking: tty_mutex for now
*/
static int tty_driver_install_tty(struct tty_driver *driver,
struct tty_struct *tty)
@@ -1317,14 +1309,14 @@ static int tty_driver_install_tty(struct tty_driver *driver,
}
/**
- * tty_driver_remove_tty() - remove a tty from the driver tables
- * @driver: the driver for the tty
- * @tty: tty to remove
+ * tty_driver_remove_tty() - remove a tty from the driver tables
+ * @driver: the driver for the tty
+ * @tty: tty to remove
*
- * Remvoe a tty object from the driver tables. The tty->index field
- * will be set by the time this is called.
+ * Remove a tty object from the driver tables. The tty->index field will be set
+ * by the time this is called.
*
- * Locking: tty_mutex for now
+ * Locking: tty_mutex for now
*/
static void tty_driver_remove_tty(struct tty_driver *driver, struct tty_struct *tty)
{
@@ -1335,13 +1327,13 @@ static void tty_driver_remove_tty(struct tty_driver *driver, struct tty_struct *
}
/**
- * tty_reopen() - fast re-open of an open tty
- * @tty: the tty to open
+ * tty_reopen() - fast re-open of an open tty
+ * @tty: the tty to open
*
- * Return 0 on success, -errno on error.
- * Re-opens on master ptys are not allowed and return -EIO.
+ * Re-opens on master ptys are not allowed and return -%EIO.
*
- * Locking: Caller must hold tty_lock
+ * Locking: Caller must hold tty_lock
+ * Return: 0 on success, -errno on error.
*/
static int tty_reopen(struct tty_struct *tty)
{
@@ -1379,30 +1371,28 @@ static int tty_reopen(struct tty_struct *tty)
}
/**
- * tty_init_dev - initialise a tty device
- * @driver: tty driver we are opening a device on
- * @idx: device index
+ * tty_init_dev - initialise a tty device
+ * @driver: tty driver we are opening a device on
+ * @idx: device index
*
- * Prepare a tty device. This may not be a "new" clean device but
- * could also be an active device. The pty drivers require special
- * handling because of this.
+ * Prepare a tty device. This may not be a "new" clean device but could also be
+ * an active device. The pty drivers require special handling because of this.
*
- * Locking:
- * The function is called under the tty_mutex, which
- * protects us from the tty struct or driver itself going away.
+ * Locking:
+ * The function is called under the tty_mutex, which protects us from the
+ * tty struct or driver itself going away.
*
- * On exit the tty device has the line discipline attached and
- * a reference count of 1. If a pair was created for pty/tty use
- * and the other was a pty master then it too has a reference count of 1.
+ * On exit the tty device has the line discipline attached and a reference
+ * count of 1. If a pair was created for pty/tty use and the other was a pty
+ * master then it too has a reference count of 1.
*
- * WSH 06/09/97: Rewritten to remove races and properly clean up after a
- * failed open. The new code protects the open with a mutex, so it's
- * really quite straightforward. The mutex locking can probably be
- * relaxed for the (most common) case of reopening a tty.
+ * WSH 06/09/97: Rewritten to remove races and properly clean up after a failed
+ * open. The new code protects the open with a mutex, so it's really quite
+ * straightforward. The mutex locking can probably be relaxed for the (most
+ * common) case of reopening a tty.
*
- * Return: returned tty structure
+ * Return: new tty structure
*/
-
struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx)
{
struct tty_struct *tty;
@@ -1503,10 +1493,10 @@ void tty_save_termios(struct tty_struct *tty)
EXPORT_SYMBOL_GPL(tty_save_termios);
/**
- * tty_flush_works - flush all works of a tty/pty pair
- * @tty: tty device to flush works for (or either end of a pty pair)
+ * tty_flush_works - flush all works of a tty/pty pair
+ * @tty: tty device to flush works for (or either end of a pty pair)
*
- * Sync flush all works belonging to @tty (and the 'other' tty).
+ * Sync flush all works belonging to @tty (and the 'other' tty).
*/
static void tty_flush_works(struct tty_struct *tty)
{
@@ -1519,19 +1509,19 @@ static void tty_flush_works(struct tty_struct *tty)
}
/**
- * release_one_tty - release tty structure memory
- * @work: work of tty we are obliterating
+ * release_one_tty - release tty structure memory
+ * @work: work of tty we are obliterating
*
- * Releases memory associated with a tty structure, and clears out the
- * driver table slots. This function is called when a device is no longer
- * in use. It also gets called when setup of a device fails.
+ * Releases memory associated with a tty structure, and clears out the
+ * driver table slots. This function is called when a device is no longer
+ * in use. It also gets called when setup of a device fails.
*
- * Locking:
- * takes the file list lock internally when working on the list
- * of ttys that the driver keeps.
+ * Locking:
+ * takes the file list lock internally when working on the list of ttys
+ * that the driver keeps.
*
- * This method gets called from a work queue so that the driver private
- * cleanup ops can sleep (needed for USB at least)
+ * This method gets called from a work queue so that the driver private
+ * cleanup ops can sleep (needed for USB at least)
*/
static void release_one_tty(struct work_struct *work)
{
@@ -1568,13 +1558,12 @@ static void queue_release_one_tty(struct kref *kref)
}
/**
- * tty_kref_put - release a tty kref
- * @tty: tty device
+ * tty_kref_put - release a tty kref
+ * @tty: tty device
*
- * Release a reference to a tty device and if need be let the kref
- * layer destruct the object for us
+ * Release a reference to the @tty device and if need be let the kref layer
+ * destruct the object for us.
*/
-
void tty_kref_put(struct tty_struct *tty)
{
if (tty)
@@ -1583,18 +1572,17 @@ void tty_kref_put(struct tty_struct *tty)
EXPORT_SYMBOL(tty_kref_put);
/**
- * release_tty - release tty structure memory
- * @tty: tty device release
- * @idx: index of the tty device release
+ * release_tty - release tty structure memory
+ * @tty: tty device release
+ * @idx: index of the tty device release
*
- * Release both @tty and a possible linked partner (think pty pair),
- * and decrement the refcount of the backing module.
- *
- * Locking:
- * tty_mutex
- * takes the file list lock internally when working on the list
- * of ttys that the driver keeps.
+ * Release both @tty and a possible linked partner (think pty pair),
+ * and decrement the refcount of the backing module.
*
+ * Locking:
+ * tty_mutex
+ * takes the file list lock internally when working on the list of ttys
+ * that the driver keeps.
*/
static void release_tty(struct tty_struct *tty, int idx)
{
@@ -1619,12 +1607,12 @@ static void release_tty(struct tty_struct *tty, int idx)
}
/**
- * tty_release_checks - check a tty before real release
- * @tty: tty to check
- * @idx: index of the tty
+ * tty_release_checks - check a tty before real release
+ * @tty: tty to check
+ * @idx: index of the tty
*
- * Performs some paranoid checking before true release of the @tty.
- * This is a no-op unless TTY_PARANOIA_CHECK is defined.
+ * Performs some paranoid checking before true release of the @tty. This is a
+ * no-op unless %TTY_PARANOIA_CHECK is defined.
*/
static int tty_release_checks(struct tty_struct *tty, int idx)
{
@@ -1661,12 +1649,12 @@ static int tty_release_checks(struct tty_struct *tty, int idx)
}
/**
- * tty_kclose - closes tty opened by tty_kopen
- * @tty: tty device
+ * tty_kclose - closes tty opened by tty_kopen
+ * @tty: tty device
*
- * Performs the final steps to release and free a tty device. It is the
- * same as tty_release_struct except that it also resets TTY_PORT_KOPENED
- * flag on tty->port.
+ * Performs the final steps to release and free a tty device. It is the same as
+ * tty_release_struct() except that it also resets %TTY_PORT_KOPENED flag on
+ * @tty->port.
*/
void tty_kclose(struct tty_struct *tty)
{
@@ -1691,12 +1679,12 @@ void tty_kclose(struct tty_struct *tty)
EXPORT_SYMBOL_GPL(tty_kclose);
/**
- * tty_release_struct - release a tty struct
- * @tty: tty device
- * @idx: index of the tty
+ * tty_release_struct - release a tty struct
+ * @tty: tty device
+ * @idx: index of the tty
*
- * Performs the final steps to release and free a tty device. It is
- * roughly the reverse of tty_init_dev.
+ * Performs the final steps to release and free a tty device. It is roughly the
+ * reverse of tty_init_dev().
*/
void tty_release_struct(struct tty_struct *tty, int idx)
{
@@ -1720,24 +1708,23 @@ void tty_release_struct(struct tty_struct *tty, int idx)
EXPORT_SYMBOL_GPL(tty_release_struct);
/**
- * tty_release - vfs callback for close
- * @inode: inode of tty
- * @filp: file pointer for handle to tty
+ * tty_release - vfs callback for close
+ * @inode: inode of tty
+ * @filp: file pointer for handle to tty
*
- * Called the last time each file handle is closed that references
- * this tty. There may however be several such references.
+ * Called the last time each file handle is closed that references this tty.
+ * There may however be several such references.
*
- * Locking:
- * Takes bkl. See tty_release_dev
+ * Locking:
+ * Takes BKL. See tty_release_dev().
*
- * Even releasing the tty structures is a tricky business.. We have
- * to be very careful that the structures are all released at the
- * same time, as interrupts might otherwise get the wrong pointers.
+ * Even releasing the tty structures is a tricky business. We have to be very
+ * careful that the structures are all released at the same time, as interrupts
+ * might otherwise get the wrong pointers.
*
* WSH 09/09/97: rewritten to avoid some nasty race conditions that could
* lead to double frees or releasing memory still in use.
*/
-
int tty_release(struct inode *inode, struct file *filp)
{
struct tty_struct *tty = file_tty(filp);
@@ -1880,15 +1867,15 @@ int tty_release(struct inode *inode, struct file *filp)
}
/**
- * tty_open_current_tty - get locked tty of current task
- * @device: device number
- * @filp: file pointer to tty
- * @return: locked tty of the current task iff @device is /dev/tty
+ * tty_open_current_tty - get locked tty of current task
+ * @device: device number
+ * @filp: file pointer to tty
+ * @return: locked tty of the current task iff @device is /dev/tty
*
- * Performs a re-open of the current task's controlling tty.
+ * Performs a re-open of the current task's controlling tty.
*
- * We cannot return driver and index like for the other nodes because
- * devpts will not work then. It expects inodes to be from devpts FS.
+ * We cannot return driver and index like for the other nodes because devpts
+ * will not work then. It expects inodes to be from devpts FS.
*/
static struct tty_struct *tty_open_current_tty(dev_t device, struct file *filp)
{
@@ -1916,16 +1903,17 @@ static struct tty_struct *tty_open_current_tty(dev_t device, struct file *filp)
}
/**
- * tty_lookup_driver - lookup a tty driver for a given device file
- * @device: device number
- * @filp: file pointer to tty
- * @index: index for the device in the @return driver
- * @return: driver for this inode (with increased refcount)
+ * tty_lookup_driver - lookup a tty driver for a given device file
+ * @device: device number
+ * @filp: file pointer to tty
+ * @index: index for the device in the @return driver
+ *
+ * If returned value is not erroneous, the caller is responsible to decrement
+ * the refcount by tty_driver_kref_put().
*
- * If @return is not erroneous, the caller is responsible to decrement the
- * refcount by tty_driver_kref_put.
+ * Locking: %tty_mutex protects get_tty_driver()
*
- * Locking: tty_mutex protects get_tty_driver
+ * Return: driver for this inode (with increased refcount)
*/
static struct tty_driver *tty_lookup_driver(dev_t device, struct file *filp,
int *index)
@@ -2001,19 +1989,18 @@ out:
}
/**
- * tty_kopen_exclusive - open a tty device for kernel
- * @device: dev_t of device to open
+ * tty_kopen_exclusive - open a tty device for kernel
+ * @device: dev_t of device to open
*
- * Opens tty exclusively for kernel. Performs the driver lookup,
- * makes sure it's not already opened and performs the first-time
- * tty initialization.
+ * Opens tty exclusively for kernel. Performs the driver lookup, makes sure
+ * it's not already opened and performs the first-time tty initialization.
*
- * Returns the locked initialized &tty_struct
+ * Claims the global %tty_mutex to serialize:
+ * * concurrent first-time tty initialization
+ * * concurrent tty driver removal w/ lookup
+ * * concurrent tty removal from driver table
*
- * Claims the global tty_mutex to serialize:
- * - concurrent first-time tty initialization
- * - concurrent tty driver removal w/ lookup
- * - concurrent tty removal from driver table
+ * Return: the locked initialized &tty_struct
*/
struct tty_struct *tty_kopen_exclusive(dev_t device)
{
@@ -2022,13 +2009,13 @@ struct tty_struct *tty_kopen_exclusive(dev_t device)
EXPORT_SYMBOL_GPL(tty_kopen_exclusive);
/**
- * tty_kopen_shared - open a tty device for shared in-kernel use
- * @device: dev_t of device to open
+ * tty_kopen_shared - open a tty device for shared in-kernel use
+ * @device: dev_t of device to open
*
- * Opens an already existing tty for in-kernel use. Compared to
- * tty_kopen_exclusive() above it doesn't ensure to be the only user.
+ * Opens an already existing tty for in-kernel use. Compared to
+ * tty_kopen_exclusive() above it doesn't ensure to be the only user.
*
- * Locking is identical to tty_kopen() above.
+ * Locking: identical to tty_kopen() above.
*/
struct tty_struct *tty_kopen_shared(dev_t device)
{
@@ -2037,19 +2024,20 @@ struct tty_struct *tty_kopen_shared(dev_t device)
EXPORT_SYMBOL_GPL(tty_kopen_shared);
/**
- * tty_open_by_driver - open a tty device
- * @device: dev_t of device to open
- * @filp: file pointer to tty
+ * tty_open_by_driver - open a tty device
+ * @device: dev_t of device to open
+ * @filp: file pointer to tty
*
- * Performs the driver lookup, checks for a reopen, or otherwise
- * performs the first-time tty initialization.
+ * Performs the driver lookup, checks for a reopen, or otherwise performs the
+ * first-time tty initialization.
*
- * Returns the locked initialized or re-opened &tty_struct
*
- * Claims the global tty_mutex to serialize:
- * - concurrent first-time tty initialization
- * - concurrent tty driver removal w/ lookup
- * - concurrent tty removal from driver table
+ * Claims the global tty_mutex to serialize:
+ * * concurrent first-time tty initialization
+ * * concurrent tty driver removal w/ lookup
+ * * concurrent tty removal from driver table
+ *
+ * Return: the locked initialized or re-opened &tty_struct
*/
static struct tty_struct *tty_open_by_driver(dev_t device,
struct file *filp)
@@ -2104,29 +2092,28 @@ out:
}
/**
- * tty_open - open a tty device
- * @inode: inode of device file
- * @filp: file pointer to tty
+ * tty_open - open a tty device
+ * @inode: inode of device file
+ * @filp: file pointer to tty
*
- * tty_open and tty_release keep up the tty count that contains the
- * number of opens done on a tty. We cannot use the inode-count, as
- * different inodes might point to the same tty.
+ * tty_open() and tty_release() keep up the tty count that contains the number
+ * of opens done on a tty. We cannot use the inode-count, as different inodes
+ * might point to the same tty.
*
- * Open-counting is needed for pty masters, as well as for keeping
- * track of serial lines: DTR is dropped when the last close happens.
- * (This is not done solely through tty->count, now. - Ted 1/27/92)
+ * Open-counting is needed for pty masters, as well as for keeping track of
+ * serial lines: DTR is dropped when the last close happens.
+ * (This is not done solely through tty->count, now. - Ted 1/27/92)
*
- * The termios state of a pty is reset on first open so that
- * settings don't persist across reuse.
+ * The termios state of a pty is reset on the first open so that settings don't
+ * persist across reuse.
*
- * Locking: tty_mutex protects tty, tty_lookup_driver and tty_init_dev.
- * tty->count should protect the rest.
- * ->siglock protects ->signal/->sighand
+ * Locking:
+ * * %tty_mutex protects tty, tty_lookup_driver() and tty_init_dev().
+ * * @tty->count should protect the rest.
+ * * ->siglock protects ->signal/->sighand
*
- * Note: the tty_unlock/lock cases without a ref are only safe due to
- * tty_mutex
+ * Note: the tty_unlock/lock cases without a ref are only safe due to %tty_mutex
*/
-
static int tty_open(struct inode *inode, struct file *filp)
{
struct tty_struct *tty;
@@ -2198,19 +2185,17 @@ retry_open:
}
-
/**
- * tty_poll - check tty status
- * @filp: file being polled
- * @wait: poll wait structures to update
+ * tty_poll - check tty status
+ * @filp: file being polled
+ * @wait: poll wait structures to update
*
- * Call the line discipline polling method to obtain the poll
- * status of the device.
+ * Call the line discipline polling method to obtain the poll status of the
+ * device.
*
- * Locking: locks called line discipline but ldisc poll method
- * may be re-entered freely by other callers.
+ * Locking: locks called line discipline but ldisc poll method may be
+ * re-entered freely by other callers.
*/
-
static __poll_t tty_poll(struct file *filp, poll_table *wait)
{
struct tty_struct *tty = file_tty(filp);
@@ -2278,20 +2263,18 @@ static int tty_fasync(int fd, struct file *filp, int on)
}
/**
- * tiocsti - fake input character
- * @tty: tty to fake input into
- * @p: pointer to character
+ * tiocsti - fake input character
+ * @tty: tty to fake input into
+ * @p: pointer to character
*
- * Fake input to a tty device. Does the necessary locking and
- * input management.
+ * Fake input to a tty device. Does the necessary locking and input management.
*
- * FIXME: does not honour flow control ??
+ * FIXME: does not honour flow control ??
*
- * Locking:
- * Called functions take tty_ldiscs_lock
- * current->signal->tty check is safe without locks
+ * Locking:
+ * * Called functions take tty_ldiscs_lock
+ * * current->signal->tty check is safe without locks
*/
-
static int tiocsti(struct tty_struct *tty, char __user *p)
{
char ch, mbz = 0;
@@ -2314,16 +2297,15 @@ static int tiocsti(struct tty_struct *tty, char __user *p)
}
/**
- * tiocgwinsz - implement window query ioctl
- * @tty: tty
- * @arg: user buffer for result
+ * tiocgwinsz - implement window query ioctl
+ * @tty: tty
+ * @arg: user buffer for result
*
- * Copies the kernel idea of the window size into the user buffer.
+ * Copies the kernel idea of the window size into the user buffer.
*
- * Locking: tty->winsize_mutex is taken to ensure the winsize data
- * is consistent.
+ * Locking: @tty->winsize_mutex is taken to ensure the winsize data is
+ * consistent.
*/
-
static int tiocgwinsz(struct tty_struct *tty, struct winsize __user *arg)
{
int err;
@@ -2336,14 +2318,13 @@ static int tiocgwinsz(struct tty_struct *tty, struct winsize __user *arg)
}
/**
- * tty_do_resize - resize event
- * @tty: tty being resized
- * @ws: new dimensions
+ * tty_do_resize - resize event
+ * @tty: tty being resized
+ * @ws: new dimensions
*
- * Update the termios variables and send the necessary signals to
- * peform a terminal resize correctly
+ * Update the termios variables and send the necessary signals to peform a
+ * terminal resize correctly.
*/
-
int tty_do_resize(struct tty_struct *tty, struct winsize *ws)
{
struct pid *pgrp;
@@ -2367,20 +2348,19 @@ done:
EXPORT_SYMBOL(tty_do_resize);
/**
- * tiocswinsz - implement window size set ioctl
- * @tty: tty side of tty
- * @arg: user buffer for result
+ * tiocswinsz - implement window size set ioctl
+ * @tty: tty side of tty
+ * @arg: user buffer for result
*
- * Copies the user idea of the window size to the kernel. Traditionally
- * this is just advisory information but for the Linux console it
- * actually has driver level meaning and triggers a VC resize.
+ * Copies the user idea of the window size to the kernel. Traditionally this is
+ * just advisory information but for the Linux console it actually has driver
+ * level meaning and triggers a VC resize.
*
- * Locking:
- * Driver dependent. The default do_resize method takes the
- * tty termios mutex and ctrl.lock. The console takes its own lock
- * then calls into the default method.
+ * Locking:
+ * Driver dependent. The default do_resize method takes the tty termios
+ * mutex and ctrl.lock. The console takes its own lock then calls into the
+ * default method.
*/
-
static int tiocswinsz(struct tty_struct *tty, struct winsize __user *arg)
{
struct winsize tmp_ws;
@@ -2395,14 +2375,13 @@ static int tiocswinsz(struct tty_struct *tty, struct winsize __user *arg)
}
/**
- * tioccons - allow admin to move logical console
- * @file: the file to become console
+ * tioccons - allow admin to move logical console
+ * @file: the file to become console
*
- * Allow the administrator to move the redirected console device
+ * Allow the administrator to move the redirected console device.
*
- * Locking: uses redirect_lock to guard the redirect information
+ * Locking: uses redirect_lock to guard the redirect information
*/
-
static int tioccons(struct file *file)
{
if (!capable(CAP_SYS_ADMIN))
@@ -2435,15 +2414,14 @@ static int tioccons(struct file *file)
}
/**
- * tiocsetd - set line discipline
- * @tty: tty device
- * @p: pointer to user data
+ * tiocsetd - set line discipline
+ * @tty: tty device
+ * @p: pointer to user data
*
- * Set the line discipline according to user request.
+ * Set the line discipline according to user request.
*
- * Locking: see tty_set_ldisc, this function is just a helper
+ * Locking: see tty_set_ldisc(), this function is just a helper
*/
-
static int tiocsetd(struct tty_struct *tty, int __user *p)
{
int disc;
@@ -2458,16 +2436,15 @@ static int tiocsetd(struct tty_struct *tty, int __user *p)
}
/**
- * tiocgetd - get line discipline
- * @tty: tty device
- * @p: pointer to user data
+ * tiocgetd - get line discipline
+ * @tty: tty device
+ * @p: pointer to user data
*
- * Retrieves the line discipline id directly from the ldisc.
+ * Retrieves the line discipline id directly from the ldisc.
*
- * Locking: waits for ldisc reference (in case the line discipline
- * is changing or the tty is being hungup)
+ * Locking: waits for ldisc reference (in case the line discipline is changing
+ * or the @tty is being hungup)
*/
-
static int tiocgetd(struct tty_struct *tty, int __user *p)
{
struct tty_ldisc *ld;
@@ -2482,18 +2459,16 @@ static int tiocgetd(struct tty_struct *tty, int __user *p)
}
/**
- * send_break - performed time break
- * @tty: device to break on
- * @duration: timeout in mS
- *
- * Perform a timed break on hardware that lacks its own driver level
- * timed break functionality.
+ * send_break - performed time break
+ * @tty: device to break on
+ * @duration: timeout in mS
*
- * Locking:
- * atomic_write_lock serializes
+ * Perform a timed break on hardware that lacks its own driver level timed
+ * break functionality.
*
+ * Locking:
+ * @tty->atomic_write_lock serializes
*/
-
static int send_break(struct tty_struct *tty, unsigned int duration)
{
int retval;
@@ -2522,16 +2497,15 @@ out:
}
/**
- * tty_tiocmget - get modem status
- * @tty: tty device
- * @p: pointer to result
+ * tty_tiocmget - get modem status
+ * @tty: tty device
+ * @p: pointer to result
*
- * Obtain the modem status bits from the tty driver if the feature
- * is supported. Return -ENOTTY if it is not available.
+ * Obtain the modem status bits from the tty driver if the feature is
+ * supported. Return -%ENOTTY if it is not available.
*
- * Locking: none (up to the driver)
+ * Locking: none (up to the driver)
*/
-
static int tty_tiocmget(struct tty_struct *tty, int __user *p)
{
int retval = -ENOTTY;
@@ -2546,17 +2520,16 @@ static int tty_tiocmget(struct tty_struct *tty, int __user *p)
}
/**
- * tty_tiocmset - set modem status
- * @tty: tty device
- * @cmd: command - clear bits, set bits or set all
- * @p: pointer to desired bits
+ * tty_tiocmset - set modem status
+ * @tty: tty device
+ * @cmd: command - clear bits, set bits or set all
+ * @p: pointer to desired bits
*
- * Set the modem status bits from the tty driver if the feature
- * is supported. Return -ENOTTY if it is not available.
+ * Set the modem status bits from the tty driver if the feature
+ * is supported. Return -%ENOTTY if it is not available.
*
- * Locking: none (up to the driver)
+ * Locking: none (up to the driver)
*/
-
static int tty_tiocmset(struct tty_struct *tty, unsigned int cmd,
unsigned __user *p)
{
@@ -2588,13 +2561,13 @@ static int tty_tiocmset(struct tty_struct *tty, unsigned int cmd,
}
/**
- * tty_get_icount - get tty statistics
- * @tty: tty device
- * @icount: output parameter
+ * tty_get_icount - get tty statistics
+ * @tty: tty device
+ * @icount: output parameter
*
- * Gets a copy of the tty's icount statistics.
+ * Gets a copy of the @tty's icount statistics.
*
- * Locking: none (up to the driver)
+ * Locking: none (up to the driver)
*/
int tty_get_icount(struct tty_struct *tty,
struct serial_icounter_struct *icount)
@@ -2811,7 +2784,7 @@ long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
return hung_up_tty_ioctl(file, cmd, arg);
retval = -EINVAL;
if (ld->ops->ioctl) {
- retval = ld->ops->ioctl(tty, file, cmd, arg);
+ retval = ld->ops->ioctl(tty, cmd, arg);
if (retval == -ENOIOCTLCMD)
retval = -ENOTTY;
}
@@ -2990,10 +2963,10 @@ static long tty_compat_ioctl(struct file *file, unsigned int cmd,
if (!ld)
return hung_up_tty_compat_ioctl(file, cmd, arg);
if (ld->ops->compat_ioctl)
- retval = ld->ops->compat_ioctl(tty, file, cmd, arg);
+ retval = ld->ops->compat_ioctl(tty, cmd, arg);
if (retval == -ENOIOCTLCMD && ld->ops->ioctl)
- retval = ld->ops->ioctl(tty, file,
- (unsigned long)compat_ptr(cmd), arg);
+ retval = ld->ops->ioctl(tty, (unsigned long)compat_ptr(cmd),
+ arg);
tty_ldisc_deref(ld);
return retval;
@@ -3028,17 +3001,11 @@ static int this_tty(const void *t, struct file *file, unsigned fd)
*/
void __do_SAK(struct tty_struct *tty)
{
-#ifdef TTY_SOFT_SAK
- tty_hangup(tty);
-#else
struct task_struct *g, *p;
struct pid *session;
- int i;
+ int i;
unsigned long flags;
- if (!tty)
- return;
-
spin_lock_irqsave(&tty->ctrl.lock, flags);
session = get_pid(tty->ctrl.session);
spin_unlock_irqrestore(&tty->ctrl.lock, flags);
@@ -3060,7 +3027,8 @@ void __do_SAK(struct tty_struct *tty)
if (p->signal->tty == tty) {
tty_notice(tty, "SAK: killed process %d (%s): by controlling tty\n",
task_pid_nr(p), p->comm);
- group_send_sig_info(SIGKILL, SEND_SIG_PRIV, p, PIDTYPE_SID);
+ group_send_sig_info(SIGKILL, SEND_SIG_PRIV, p,
+ PIDTYPE_SID);
continue;
}
task_lock(p);
@@ -3068,13 +3036,13 @@ void __do_SAK(struct tty_struct *tty)
if (i != 0) {
tty_notice(tty, "SAK: killed process %d (%s): by fd#%d\n",
task_pid_nr(p), p->comm, i - 1);
- group_send_sig_info(SIGKILL, SEND_SIG_PRIV, p, PIDTYPE_SID);
+ group_send_sig_info(SIGKILL, SEND_SIG_PRIV, p,
+ PIDTYPE_SID);
}
task_unlock(p);
} while_each_thread(g, p);
read_unlock(&tasklist_lock);
put_pid(session);
-#endif
}
static void do_SAK_work(struct work_struct *work)
@@ -3107,14 +3075,15 @@ static struct device *tty_get_device(struct tty_struct *tty)
}
-/*
- * alloc_tty_struct
+/**
+ * alloc_tty_struct - allocate a new tty
+ * @driver: driver which will handle the returned tty
+ * @idx: minor of the tty
*
- * This subroutine allocates and initializes a tty structure.
+ * This subroutine allocates and initializes a tty structure.
*
- * Locking: none - tty in question is not exposed at this point
+ * Locking: none - @tty in question is not exposed at this point
*/
-
struct tty_struct *alloc_tty_struct(struct tty_driver *driver, int idx)
{
struct tty_struct *tty;
@@ -3156,17 +3125,18 @@ struct tty_struct *alloc_tty_struct(struct tty_driver *driver, int idx)
}
/**
- * tty_put_char - write one character to a tty
- * @tty: tty
- * @ch: character
+ * tty_put_char - write one character to a tty
+ * @tty: tty
+ * @ch: character to write
+ *
+ * Write one byte to the @tty using the provided @tty->ops->put_char() method
+ * if present.
*
- * Write one byte to the tty using the provided put_char method
- * if present. Returns the number of characters successfully output.
+ * Note: the specific put_char operation in the driver layer may go
+ * away soon. Don't call it directly, use this method
*
- * Note: the specific put_char operation in the driver layer may go
- * away soon. Don't call it directly, use this method
+ * Return: the number of characters successfully output.
*/
-
int tty_put_char(struct tty_struct *tty, unsigned char ch)
{
if (tty->ops->put_char)
@@ -3195,24 +3165,23 @@ static int tty_cdev_add(struct tty_driver *driver, dev_t dev,
}
/**
- * tty_register_device - register a tty device
- * @driver: the tty driver that describes the tty device
- * @index: the index in the tty driver for this tty device
- * @device: a struct device that is associated with this tty device.
- * This field is optional, if there is no known struct device
- * for this tty device it can be set to NULL safely.
+ * tty_register_device - register a tty device
+ * @driver: the tty driver that describes the tty device
+ * @index: the index in the tty driver for this tty device
+ * @device: a struct device that is associated with this tty device.
+ * This field is optional, if there is no known struct device
+ * for this tty device it can be set to NULL safely.
*
- * Returns a pointer to the struct device for this tty device
- * (or ERR_PTR(-EFOO) on error).
+ * This call is required to be made to register an individual tty device
+ * if the tty driver's flags have the %TTY_DRIVER_DYNAMIC_DEV bit set. If
+ * that bit is not set, this function should not be called by a tty
+ * driver.
*
- * This call is required to be made to register an individual tty device
- * if the tty driver's flags have the TTY_DRIVER_DYNAMIC_DEV bit set. If
- * that bit is not set, this function should not be called by a tty
- * driver.
+ * Locking: ??
*
- * Locking: ??
+ * Return: A pointer to the struct device for this tty device (or
+ * ERR_PTR(-EFOO) on error).
*/
-
struct device *tty_register_device(struct tty_driver *driver, unsigned index,
struct device *device)
{
@@ -3227,24 +3196,23 @@ static void tty_device_create_release(struct device *dev)
}
/**
- * tty_register_device_attr - register a tty device
- * @driver: the tty driver that describes the tty device
- * @index: the index in the tty driver for this tty device
- * @device: a struct device that is associated with this tty device.
- * This field is optional, if there is no known struct device
- * for this tty device it can be set to NULL safely.
- * @drvdata: Driver data to be set to device.
- * @attr_grp: Attribute group to be set on device.
+ * tty_register_device_attr - register a tty device
+ * @driver: the tty driver that describes the tty device
+ * @index: the index in the tty driver for this tty device
+ * @device: a struct device that is associated with this tty device.
+ * This field is optional, if there is no known struct device
+ * for this tty device it can be set to %NULL safely.
+ * @drvdata: Driver data to be set to device.
+ * @attr_grp: Attribute group to be set on device.
*
- * Returns a pointer to the struct device for this tty device
- * (or ERR_PTR(-EFOO) on error).
+ * This call is required to be made to register an individual tty device if the
+ * tty driver's flags have the %TTY_DRIVER_DYNAMIC_DEV bit set. If that bit is
+ * not set, this function should not be called by a tty driver.
*
- * This call is required to be made to register an individual tty device
- * if the tty driver's flags have the TTY_DRIVER_DYNAMIC_DEV bit set. If
- * that bit is not set, this function should not be called by a tty
- * driver.
+ * Locking: ??
*
- * Locking: ??
+ * Return: A pointer to the struct device for this tty device (or
+ * ERR_PTR(-EFOO) on error).
*/
struct device *tty_register_device_attr(struct tty_driver *driver,
unsigned index, struct device *device,
@@ -3317,16 +3285,15 @@ err_put:
EXPORT_SYMBOL_GPL(tty_register_device_attr);
/**
- * tty_unregister_device - unregister a tty device
- * @driver: the tty driver that describes the tty device
- * @index: the index in the tty driver for this tty device
+ * tty_unregister_device - unregister a tty device
+ * @driver: the tty driver that describes the tty device
+ * @index: the index in the tty driver for this tty device
*
- * If a tty device is registered with a call to tty_register_device() then
- * this function must be called when the tty device is gone.
+ * If a tty device is registered with a call to tty_register_device() then
+ * this function must be called when the tty device is gone.
*
- * Locking: ??
+ * Locking: ??
*/
-
void tty_unregister_device(struct tty_driver *driver, unsigned index)
{
device_destroy(tty_class,
@@ -3342,10 +3309,10 @@ EXPORT_SYMBOL(tty_unregister_device);
* __tty_alloc_driver -- allocate tty driver
* @lines: count of lines this driver can handle at most
* @owner: module which is responsible for this driver
- * @flags: some of TTY_DRIVER_* flags, will be set in driver->flags
+ * @flags: some of %TTY_DRIVER_ flags, will be set in driver->flags
*
* This should not be called directly, some of the provided macros should be
- * used instead. Use IS_ERR and friends on @retval.
+ * used instead. Use IS_ERR() and friends on @retval.
*/
struct tty_driver *__tty_alloc_driver(unsigned int lines, struct module *owner,
unsigned long flags)
@@ -3432,13 +3399,22 @@ static void destruct_tty_driver(struct kref *kref)
kfree(driver);
}
+/**
+ * tty_driver_kref_put -- drop a reference to a tty driver
+ * @driver: driver of which to drop the reference
+ *
+ * The final put will destroy and free up the driver.
+ */
void tty_driver_kref_put(struct tty_driver *driver)
{
kref_put(&driver->kref, destruct_tty_driver);
}
EXPORT_SYMBOL(tty_driver_kref_put);
-/*
+/**
+ * tty_register_driver -- register a tty driver
+ * @driver: driver to register
+ *
* Called by a tty driver to register itself.
*/
int tty_register_driver(struct tty_driver *driver)
@@ -3500,7 +3476,10 @@ err:
}
EXPORT_SYMBOL(tty_register_driver);
-/*
+/**
+ * tty_unregister_driver -- unregister a tty driver
+ * @driver: driver to unregister
+ *
* Called by a tty driver to unregister itself.
*/
void tty_unregister_driver(struct tty_driver *driver)
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
index 3e4e0b20b4bb..776d8a62f77c 100644
--- a/drivers/tty/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -47,17 +47,14 @@ static DEFINE_RAW_SPINLOCK(tty_ldiscs_lock);
static struct tty_ldisc_ops *tty_ldiscs[NR_LDISCS];
/**
- * tty_register_ldisc - install a line discipline
- * @new_ldisc: pointer to the ldisc object
+ * tty_register_ldisc - install a line discipline
+ * @new_ldisc: pointer to the ldisc object
*
- * Installs a new line discipline into the kernel. The discipline
- * is set up as unreferenced and then made available to the kernel
- * from this point onwards.
+ * Installs a new line discipline into the kernel. The discipline is set up as
+ * unreferenced and then made available to the kernel from this point onwards.
*
- * Locking:
- * takes tty_ldiscs_lock to guard against ldisc races
+ * Locking: takes %tty_ldiscs_lock to guard against ldisc races
*/
-
int tty_register_ldisc(struct tty_ldisc_ops *new_ldisc)
{
unsigned long flags;
@@ -75,14 +72,13 @@ int tty_register_ldisc(struct tty_ldisc_ops *new_ldisc)
EXPORT_SYMBOL(tty_register_ldisc);
/**
- * tty_unregister_ldisc - unload a line discipline
- * @ldisc: ldisc number
+ * tty_unregister_ldisc - unload a line discipline
+ * @ldisc: ldisc number
*
- * Remove a line discipline from the kernel providing it is not
- * currently in use.
+ * Remove a line discipline from the kernel providing it is not currently in
+ * use.
*
- * Locking:
- * takes tty_ldiscs_lock to guard against ldisc races
+ * Locking: takes %tty_ldiscs_lock to guard against ldisc races
*/
void tty_unregister_ldisc(struct tty_ldisc_ops *ldisc)
@@ -122,27 +118,25 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
}
static int tty_ldisc_autoload = IS_BUILTIN(CONFIG_LDISC_AUTOLOAD);
+
/**
- * tty_ldisc_get - take a reference to an ldisc
- * @tty: tty device
- * @disc: ldisc number
- *
- * Takes a reference to a line discipline. Deals with refcounts and
- * module locking counts.
- *
- * Returns: -EINVAL if the discipline index is not [N_TTY..NR_LDISCS] or
- * if the discipline is not registered
- * -EAGAIN if request_module() failed to load or register the
- * discipline
- * -ENOMEM if allocation failure
- *
- * Otherwise, returns a pointer to the discipline and bumps the
- * ref count
- *
- * Locking:
- * takes tty_ldiscs_lock to guard against ldisc races
+ * tty_ldisc_get - take a reference to an ldisc
+ * @tty: tty device
+ * @disc: ldisc number
+ *
+ * Takes a reference to a line discipline. Deals with refcounts and module
+ * locking counts. If the discipline is not available, its module loaded, if
+ * possible.
+ *
+ * Returns:
+ * * -%EINVAL if the discipline index is not [%N_TTY .. %NR_LDISCS] or if the
+ * discipline is not registered
+ * * -%EAGAIN if request_module() failed to load or register the discipline
+ * * -%ENOMEM if allocation failure
+ * * Otherwise, returns a pointer to the discipline and bumps the ref count
+ *
+ * Locking: takes %tty_ldiscs_lock to guard against ldisc races
*/
-
static struct tty_ldisc *tty_ldisc_get(struct tty_struct *tty, int disc)
{
struct tty_ldisc *ld;
@@ -176,10 +170,11 @@ static struct tty_ldisc *tty_ldisc_get(struct tty_struct *tty, int disc)
return ld;
}
-/*
- * tty_ldisc_put - release the ldisc
+/**
+ * tty_ldisc_put - release the ldisc
+ * @ld: lisdsc to release
*
- * Complement of tty_ldisc_get().
+ * Complement of tty_ldisc_get().
*/
static void tty_ldisc_put(struct tty_ldisc *ld)
{
@@ -226,25 +221,22 @@ const struct seq_operations tty_ldiscs_seq_ops = {
};
/**
- * tty_ldisc_ref_wait - wait for the tty ldisc
- * @tty: tty device
+ * tty_ldisc_ref_wait - wait for the tty ldisc
+ * @tty: tty device
*
- * Dereference the line discipline for the terminal and take a
- * reference to it. If the line discipline is in flux then
- * wait patiently until it changes.
+ * Dereference the line discipline for the terminal and take a reference to it.
+ * If the line discipline is in flux then wait patiently until it changes.
*
- * Returns: NULL if the tty has been hungup and not re-opened with
- * a new file descriptor, otherwise valid ldisc reference
+ * Returns: %NULL if the tty has been hungup and not re-opened with a new file
+ * descriptor, otherwise valid ldisc reference
*
- * Note 1: Must not be called from an IRQ/timer context. The caller
- * must also be careful not to hold other locks that will deadlock
- * against a discipline change, such as an existing ldisc reference
- * (which we check for)
+ * Note 1: Must not be called from an IRQ/timer context. The caller must also
+ * be careful not to hold other locks that will deadlock against a discipline
+ * change, such as an existing ldisc reference (which we check for).
*
- * Note 2: a file_operations routine (read/poll/write) should use this
- * function to wait for any ldisc lifetime events to finish.
+ * Note 2: a file_operations routine (read/poll/write) should use this function
+ * to wait for any ldisc lifetime events to finish.
*/
-
struct tty_ldisc *tty_ldisc_ref_wait(struct tty_struct *tty)
{
struct tty_ldisc *ld;
@@ -258,14 +250,13 @@ struct tty_ldisc *tty_ldisc_ref_wait(struct tty_struct *tty)
EXPORT_SYMBOL_GPL(tty_ldisc_ref_wait);
/**
- * tty_ldisc_ref - get the tty ldisc
- * @tty: tty device
+ * tty_ldisc_ref - get the tty ldisc
+ * @tty: tty device
*
- * Dereference the line discipline for the terminal and take a
- * reference to it. If the line discipline is in flux then
- * return NULL. Can be called from IRQ and timer functions.
+ * Dereference the line discipline for the terminal and take a reference to it.
+ * If the line discipline is in flux then return %NULL. Can be called from IRQ
+ * and timer functions.
*/
-
struct tty_ldisc *tty_ldisc_ref(struct tty_struct *tty)
{
struct tty_ldisc *ld = NULL;
@@ -280,13 +271,12 @@ struct tty_ldisc *tty_ldisc_ref(struct tty_struct *tty)
EXPORT_SYMBOL_GPL(tty_ldisc_ref);
/**
- * tty_ldisc_deref - free a tty ldisc reference
- * @ld: reference to free up
+ * tty_ldisc_deref - free a tty ldisc reference
+ * @ld: reference to free up
*
- * Undoes the effect of tty_ldisc_ref or tty_ldisc_ref_wait. May
- * be called in IRQ context.
+ * Undoes the effect of tty_ldisc_ref() or tty_ldisc_ref_wait(). May be called
+ * in IRQ context.
*/
-
void tty_ldisc_deref(struct tty_ldisc *ld)
{
ldsem_up_read(&ld->tty->ldisc_sem);
@@ -386,13 +376,12 @@ static void tty_ldisc_unlock_pair(struct tty_struct *tty,
}
/**
- * tty_ldisc_flush - flush line discipline queue
- * @tty: tty
+ * tty_ldisc_flush - flush line discipline queue
+ * @tty: tty to flush ldisc for
*
- * Flush the line discipline queue (if any) and the tty flip buffers
- * for this tty.
+ * Flush the line discipline queue (if any) and the tty flip buffers for this
+ * @tty.
*/
-
void tty_ldisc_flush(struct tty_struct *tty)
{
struct tty_ldisc *ld = tty_ldisc_ref(tty);
@@ -404,21 +393,18 @@ void tty_ldisc_flush(struct tty_struct *tty)
EXPORT_SYMBOL_GPL(tty_ldisc_flush);
/**
- * tty_set_termios_ldisc - set ldisc field
- * @tty: tty structure
- * @disc: line discipline number
+ * tty_set_termios_ldisc - set ldisc field
+ * @tty: tty structure
+ * @disc: line discipline number
*
- * This is probably overkill for real world processors but
- * they are not on hot paths so a little discipline won't do
- * any harm.
+ * This is probably overkill for real world processors but they are not on hot
+ * paths so a little discipline won't do any harm.
*
- * The line discipline-related tty_struct fields are reset to
- * prevent the ldisc driver from re-using stale information for
- * the new ldisc instance.
+ * The line discipline-related tty_struct fields are reset to prevent the ldisc
+ * driver from re-using stale information for the new ldisc instance.
*
- * Locking: takes termios_rwsem
+ * Locking: takes termios_rwsem
*/
-
static void tty_set_termios_ldisc(struct tty_struct *tty, int disc)
{
down_write(&tty->termios_rwsem);
@@ -430,16 +416,14 @@ static void tty_set_termios_ldisc(struct tty_struct *tty, int disc)
}
/**
- * tty_ldisc_open - open a line discipline
- * @tty: tty we are opening the ldisc on
- * @ld: discipline to open
+ * tty_ldisc_open - open a line discipline
+ * @tty: tty we are opening the ldisc on
+ * @ld: discipline to open
*
- * A helper opening method. Also a convenient debugging and check
- * point.
+ * A helper opening method. Also a convenient debugging and check point.
*
- * Locking: always called with BTM already held.
+ * Locking: always called with BTM already held.
*/
-
static int tty_ldisc_open(struct tty_struct *tty, struct tty_ldisc *ld)
{
WARN_ON(test_and_set_bit(TTY_LDISC_OPEN, &tty->flags));
@@ -457,14 +441,12 @@ static int tty_ldisc_open(struct tty_struct *tty, struct tty_ldisc *ld)
}
/**
- * tty_ldisc_close - close a line discipline
- * @tty: tty we are opening the ldisc on
- * @ld: discipline to close
+ * tty_ldisc_close - close a line discipline
+ * @tty: tty we are opening the ldisc on
+ * @ld: discipline to close
*
- * A helper close method. Also a convenient debugging and check
- * point.
+ * A helper close method. Also a convenient debugging and check point.
*/
-
static void tty_ldisc_close(struct tty_struct *tty, struct tty_ldisc *ld)
{
lockdep_assert_held_write(&tty->ldisc_sem);
@@ -476,14 +458,13 @@ static void tty_ldisc_close(struct tty_struct *tty, struct tty_ldisc *ld)
}
/**
- * tty_ldisc_failto - helper for ldisc failback
- * @tty: tty to open the ldisc on
- * @ld: ldisc we are trying to fail back to
+ * tty_ldisc_failto - helper for ldisc failback
+ * @tty: tty to open the ldisc on
+ * @ld: ldisc we are trying to fail back to
*
- * Helper to try and recover a tty when switching back to the old
- * ldisc fails and we need something attached.
+ * Helper to try and recover a tty when switching back to the old ldisc fails
+ * and we need something attached.
*/
-
static int tty_ldisc_failto(struct tty_struct *tty, int ld)
{
struct tty_ldisc *disc = tty_ldisc_get(tty, ld);
@@ -501,14 +482,13 @@ static int tty_ldisc_failto(struct tty_struct *tty, int ld)
}
/**
- * tty_ldisc_restore - helper for tty ldisc change
- * @tty: tty to recover
- * @old: previous ldisc
+ * tty_ldisc_restore - helper for tty ldisc change
+ * @tty: tty to recover
+ * @old: previous ldisc
*
- * Restore the previous line discipline or N_TTY when a line discipline
- * change fails due to an open error
+ * Restore the previous line discipline or %N_TTY when a line discipline change
+ * fails due to an open error
*/
-
static void tty_ldisc_restore(struct tty_struct *tty, struct tty_ldisc *old)
{
/* There is an outstanding reference here so this is safe */
@@ -528,16 +508,15 @@ static void tty_ldisc_restore(struct tty_struct *tty, struct tty_ldisc *old)
}
/**
- * tty_set_ldisc - set line discipline
- * @tty: the terminal to set
- * @disc: the line discipline number
- *
- * Set the discipline of a tty line. Must be called from a process
- * context. The ldisc change logic has to protect itself against any
- * overlapping ldisc change (including on the other end of pty pairs),
- * the close of one side of a tty/pty pair, and eventually hangup.
+ * tty_set_ldisc - set line discipline
+ * @tty: the terminal to set
+ * @disc: the line discipline number
+ *
+ * Set the discipline of a tty line. Must be called from a process context. The
+ * ldisc change logic has to protect itself against any overlapping ldisc
+ * change (including on the other end of pty pairs), the close of one side of a
+ * tty/pty pair, and eventually hangup.
*/
-
int tty_set_ldisc(struct tty_struct *tty, int disc)
{
int retval;
@@ -613,10 +592,10 @@ err:
EXPORT_SYMBOL_GPL(tty_set_ldisc);
/**
- * tty_ldisc_kill - teardown ldisc
- * @tty: tty being released
+ * tty_ldisc_kill - teardown ldisc
+ * @tty: tty being released
*
- * Perform final close of the ldisc and reset tty->ldisc
+ * Perform final close of the ldisc and reset @tty->ldisc
*/
static void tty_ldisc_kill(struct tty_struct *tty)
{
@@ -633,12 +612,11 @@ static void tty_ldisc_kill(struct tty_struct *tty)
}
/**
- * tty_reset_termios - reset terminal state
- * @tty: tty to reset
+ * tty_reset_termios - reset terminal state
+ * @tty: tty to reset
*
- * Restore a terminal to the driver default state.
+ * Restore a terminal to the driver default state.
*/
-
static void tty_reset_termios(struct tty_struct *tty)
{
down_write(&tty->termios_rwsem);
@@ -650,19 +628,17 @@ static void tty_reset_termios(struct tty_struct *tty)
/**
- * tty_ldisc_reinit - reinitialise the tty ldisc
- * @tty: tty to reinit
- * @disc: line discipline to reinitialize
+ * tty_ldisc_reinit - reinitialise the tty ldisc
+ * @tty: tty to reinit
+ * @disc: line discipline to reinitialize
*
- * Completely reinitialize the line discipline state, by closing the
- * current instance, if there is one, and opening a new instance. If
- * an error occurs opening the new non-N_TTY instance, the instance
- * is dropped and tty->ldisc reset to NULL. The caller can then retry
- * with N_TTY instead.
+ * Completely reinitialize the line discipline state, by closing the current
+ * instance, if there is one, and opening a new instance. If an error occurs
+ * opening the new non-%N_TTY instance, the instance is dropped and @tty->ldisc
+ * reset to %NULL. The caller can then retry with %N_TTY instead.
*
- * Returns 0 if successful, otherwise error code < 0
+ * Returns: 0 if successful, otherwise error code < 0
*/
-
int tty_ldisc_reinit(struct tty_struct *tty, int disc)
{
struct tty_ldisc *ld;
@@ -692,21 +668,20 @@ int tty_ldisc_reinit(struct tty_struct *tty, int disc)
}
/**
- * tty_ldisc_hangup - hangup ldisc reset
- * @tty: tty being hung up
- * @reinit: whether to re-initialise the tty
+ * tty_ldisc_hangup - hangup ldisc reset
+ * @tty: tty being hung up
+ * @reinit: whether to re-initialise the tty
*
- * Some tty devices reset their termios when they receive a hangup
- * event. In that situation we must also switch back to N_TTY properly
- * before we reset the termios data.
+ * Some tty devices reset their termios when they receive a hangup event. In
+ * that situation we must also switch back to %N_TTY properly before we reset
+ * the termios data.
*
- * Locking: We can take the ldisc mutex as the rest of the code is
- * careful to allow for this.
+ * Locking: We can take the ldisc mutex as the rest of the code is careful to
+ * allow for this.
*
- * In the pty pair case this occurs in the close() path of the
- * tty itself so we must be careful about locking rules.
+ * In the pty pair case this occurs in the close() path of the tty itself so we
+ * must be careful about locking rules.
*/
-
void tty_ldisc_hangup(struct tty_struct *tty, bool reinit)
{
struct tty_ldisc *ld;
@@ -752,15 +727,14 @@ void tty_ldisc_hangup(struct tty_struct *tty, bool reinit)
}
/**
- * tty_ldisc_setup - open line discipline
- * @tty: tty being shut down
- * @o_tty: pair tty for pty/tty pairs
+ * tty_ldisc_setup - open line discipline
+ * @tty: tty being shut down
+ * @o_tty: pair tty for pty/tty pairs
*
- * Called during the initial open of a tty/pty pair in order to set up the
- * line disciplines and bind them to the tty. This has no locking issues
- * as the device isn't yet active.
+ * Called during the initial open of a tty/pty pair in order to set up the line
+ * disciplines and bind them to the @tty. This has no locking issues as the
+ * device isn't yet active.
*/
-
int tty_ldisc_setup(struct tty_struct *tty, struct tty_struct *o_tty)
{
int retval = tty_ldisc_open(tty, tty->ldisc);
@@ -783,13 +757,12 @@ int tty_ldisc_setup(struct tty_struct *tty, struct tty_struct *o_tty)
}
/**
- * tty_ldisc_release - release line discipline
- * @tty: tty being shut down (or one end of pty pair)
+ * tty_ldisc_release - release line discipline
+ * @tty: tty being shut down (or one end of pty pair)
*
- * Called during the final close of a tty or a pty pair in order to shut
- * down the line discpline layer. On exit, each tty's ldisc is NULL.
+ * Called during the final close of a tty or a pty pair in order to shut down
+ * the line discpline layer. On exit, each tty's ldisc is %NULL.
*/
-
void tty_ldisc_release(struct tty_struct *tty)
{
struct tty_struct *o_tty = tty->link;
@@ -814,13 +787,12 @@ void tty_ldisc_release(struct tty_struct *tty)
}
/**
- * tty_ldisc_init - ldisc setup for new tty
- * @tty: tty being allocated
+ * tty_ldisc_init - ldisc setup for new tty
+ * @tty: tty being allocated
*
- * Set up the line discipline objects for a newly allocated tty. Note that
- * the tty structure is not completely set up when this call is made.
+ * Set up the line discipline objects for a newly allocated tty. Note that the
+ * tty structure is not completely set up when this call is made.
*/
-
int tty_ldisc_init(struct tty_struct *tty)
{
struct tty_ldisc *ld = tty_ldisc_get(tty, N_TTY);
@@ -832,11 +804,11 @@ int tty_ldisc_init(struct tty_struct *tty)
}
/**
- * tty_ldisc_deinit - ldisc cleanup for new tty
- * @tty: tty that was allocated recently
+ * tty_ldisc_deinit - ldisc cleanup for new tty
+ * @tty: tty that was allocated recently
*
- * The tty structure must not becompletely set up (tty_ldisc_setup) when
- * this call is made.
+ * The tty structure must not be completely set up (tty_ldisc_setup()) when
+ * this call is made.
*/
void tty_ldisc_deinit(struct tty_struct *tty)
{
diff --git a/drivers/tty/tty_ldsem.c b/drivers/tty/tty_ldsem.c
index ce8291053af3..3be428c16260 100644
--- a/drivers/tty/tty_ldsem.c
+++ b/drivers/tty/tty_ldsem.c
@@ -163,7 +163,7 @@ down_read_failed(struct ld_semaphore *sem, long count, long timeout)
/*
* Try to reverse the lock attempt but if the count has changed
- * so that reversing fails, check if there are are no waiters,
+ * so that reversing fails, check if there are no waiters,
* and early-out if not
*/
do {
diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
index 2f1061a9d926..7709ce655f44 100644
--- a/drivers/tty/tty_port.c
+++ b/drivers/tty/tty_port.c
@@ -59,6 +59,15 @@ const struct tty_port_client_operations tty_port_default_client_ops = {
};
EXPORT_SYMBOL_GPL(tty_port_default_client_ops);
+/**
+ * tty_port_init -- initialize tty_port
+ * @port: tty_port to initialize
+ *
+ * Initializes the state of struct tty_port. When a port was initialized using
+ * this function, one has to destroy the port by tty_port_destroy(). Either
+ * indirectly by using &tty_port refcounting (tty_port_put()) or directly if
+ * refcounting is not used.
+ */
void tty_port_init(struct tty_port *port)
{
memset(port, 0, sizeof(*port));
@@ -82,9 +91,9 @@ EXPORT_SYMBOL(tty_port_init);
* @index: index of the tty
*
* Provide the tty layer with a link from a tty (specified by @index) to a
- * tty_port (@port). Use this only if neither tty_port_register_device nor
- * tty_port_install is used in the driver. If used, this has to be called before
- * tty_register_driver.
+ * tty_port (@port). Use this only if neither tty_port_register_device() nor
+ * tty_port_install() is used in the driver. If used, this has to be called
+ * before tty_register_driver().
*/
void tty_port_link_device(struct tty_port *port,
struct tty_driver *driver, unsigned index)
@@ -102,9 +111,9 @@ EXPORT_SYMBOL_GPL(tty_port_link_device);
* @index: index of the tty
* @device: parent if exists, otherwise NULL
*
- * It is the same as tty_register_device except the provided @port is linked to
- * a concrete tty specified by @index. Use this or tty_port_install (or both).
- * Call tty_port_link_device as a last resort.
+ * It is the same as tty_register_device() except the provided @port is linked
+ * to a concrete tty specified by @index. Use this or tty_port_install() (or
+ * both). Call tty_port_link_device() as a last resort.
*/
struct device *tty_port_register_device(struct tty_port *port,
struct tty_driver *driver, unsigned index,
@@ -123,9 +132,9 @@ EXPORT_SYMBOL_GPL(tty_port_register_device);
* @drvdata: Driver data to be set to device.
* @attr_grp: Attribute group to be set on device.
*
- * It is the same as tty_register_device_attr except the provided @port is
- * linked to a concrete tty specified by @index. Use this or tty_port_install
- * (or both). Call tty_port_link_device as a last resort.
+ * It is the same as tty_register_device_attr() except the provided @port is
+ * linked to a concrete tty specified by @index. Use this or tty_port_install()
+ * (or both). Call tty_port_link_device() as a last resort.
*/
struct device *tty_port_register_device_attr(struct tty_port *port,
struct tty_driver *driver, unsigned index,
@@ -240,9 +249,9 @@ EXPORT_SYMBOL(tty_port_free_xmit_buf);
* tty_port_destroy -- destroy inited port
* @port: tty port to be destroyed
*
- * When a port was initialized using tty_port_init, one has to destroy the
- * port by this function. Either indirectly by using tty_port refcounting
- * (tty_port_put) or directly if refcounting is not used.
+ * When a port was initialized using tty_port_init(), one has to destroy the
+ * port by this function. Either indirectly by using &tty_port refcounting
+ * (tty_port_put()) or directly if refcounting is not used.
*/
void tty_port_destroy(struct tty_port *port)
{
@@ -267,6 +276,13 @@ static void tty_port_destructor(struct kref *kref)
kfree(port);
}
+/**
+ * tty_port_put -- drop a reference to tty_port
+ * @port: port to drop a reference of (can be NULL)
+ *
+ * The final put will destroy and free up the @port using
+ * @port->ops->destruct() hook, or using kfree() if not provided.
+ */
void tty_port_put(struct tty_port *port)
{
if (port)
@@ -275,11 +291,11 @@ void tty_port_put(struct tty_port *port)
EXPORT_SYMBOL(tty_port_put);
/**
- * tty_port_tty_get - get a tty reference
- * @port: tty port
+ * tty_port_tty_get - get a tty reference
+ * @port: tty port
*
- * Return a refcount protected tty instance or NULL if the port is not
- * associated with a tty (eg due to close or hangup)
+ * Return a refcount protected tty instance or %NULL if the port is not
+ * associated with a tty (eg due to close or hangup).
*/
struct tty_struct *tty_port_tty_get(struct tty_port *port)
{
@@ -294,12 +310,12 @@ struct tty_struct *tty_port_tty_get(struct tty_port *port)
EXPORT_SYMBOL(tty_port_tty_get);
/**
- * tty_port_tty_set - set the tty of a port
- * @port: tty port
- * @tty: the tty
+ * tty_port_tty_set - set the tty of a port
+ * @port: tty port
+ * @tty: the tty
*
- * Associate the port and tty pair. Manages any internal refcounts.
- * Pass NULL to deassociate a port
+ * Associate the port and tty pair. Manages any internal refcounts. Pass %NULL
+ * to deassociate a port.
*/
void tty_port_tty_set(struct tty_port *port, struct tty_struct *tty)
{
@@ -312,6 +328,16 @@ void tty_port_tty_set(struct tty_port *port, struct tty_struct *tty)
}
EXPORT_SYMBOL(tty_port_tty_set);
+/**
+ * tty_port_shutdown - internal helper to shutdown the device
+ * @port: tty port to be shut down
+ * @tty: the associated tty
+ *
+ * It is used by tty_port_hangup() and tty_port_close(). Its task is to
+ * shutdown the device if it was initialized (note consoles remain
+ * functioning). It lowers DTR/RTS (if @tty has HUPCL set) and invokes
+ * @port->ops->shutdown().
+ */
static void tty_port_shutdown(struct tty_port *port, struct tty_struct *tty)
{
mutex_lock(&port->mutex);
@@ -335,13 +361,13 @@ out:
}
/**
- * tty_port_hangup - hangup helper
- * @port: tty port
+ * tty_port_hangup - hangup helper
+ * @port: tty port
*
- * Perform port level tty hangup flag and count changes. Drop the tty
- * reference.
+ * Perform port level tty hangup flag and count changes. Drop the tty
+ * reference.
*
- * Caller holds tty lock.
+ * Caller holds tty lock.
*/
void tty_port_hangup(struct tty_port *port)
{
@@ -365,9 +391,8 @@ EXPORT_SYMBOL(tty_port_hangup);
/**
* tty_port_tty_hangup - helper to hang up a tty
- *
* @port: tty port
- * @check_clocal: hang only ttys with CLOCAL unset?
+ * @check_clocal: hang only ttys with %CLOCAL unset?
*/
void tty_port_tty_hangup(struct tty_port *port, bool check_clocal)
{
@@ -381,7 +406,6 @@ EXPORT_SYMBOL_GPL(tty_port_tty_hangup);
/**
* tty_port_tty_wakeup - helper to wake up a tty
- *
* @port: tty port
*/
void tty_port_tty_wakeup(struct tty_port *port)
@@ -391,12 +415,12 @@ void tty_port_tty_wakeup(struct tty_port *port)
EXPORT_SYMBOL_GPL(tty_port_tty_wakeup);
/**
- * tty_port_carrier_raised - carrier raised check
- * @port: tty port
+ * tty_port_carrier_raised - carrier raised check
+ * @port: tty port
*
- * Wrapper for the carrier detect logic. For the moment this is used
- * to hide some internal details. This will eventually become entirely
- * internal to the tty port.
+ * Wrapper for the carrier detect logic. For the moment this is used
+ * to hide some internal details. This will eventually become entirely
+ * internal to the tty port.
*/
int tty_port_carrier_raised(struct tty_port *port)
{
@@ -407,12 +431,12 @@ int tty_port_carrier_raised(struct tty_port *port)
EXPORT_SYMBOL(tty_port_carrier_raised);
/**
- * tty_port_raise_dtr_rts - Raise DTR/RTS
- * @port: tty port
+ * tty_port_raise_dtr_rts - Raise DTR/RTS
+ * @port: tty port
*
- * Wrapper for the DTR/RTS raise logic. For the moment this is used
- * to hide some internal details. This will eventually become entirely
- * internal to the tty port.
+ * Wrapper for the DTR/RTS raise logic. For the moment this is used to hide
+ * some internal details. This will eventually become entirely internal to the
+ * tty port.
*/
void tty_port_raise_dtr_rts(struct tty_port *port)
{
@@ -422,12 +446,12 @@ void tty_port_raise_dtr_rts(struct tty_port *port)
EXPORT_SYMBOL(tty_port_raise_dtr_rts);
/**
- * tty_port_lower_dtr_rts - Lower DTR/RTS
- * @port: tty port
+ * tty_port_lower_dtr_rts - Lower DTR/RTS
+ * @port: tty port
*
- * Wrapper for the DTR/RTS raise logic. For the moment this is used
- * to hide some internal details. This will eventually become entirely
- * internal to the tty port.
+ * Wrapper for the DTR/RTS raise logic. For the moment this is used to hide
+ * some internal details. This will eventually become entirely internal to the
+ * tty port.
*/
void tty_port_lower_dtr_rts(struct tty_port *port)
{
@@ -437,28 +461,29 @@ void tty_port_lower_dtr_rts(struct tty_port *port)
EXPORT_SYMBOL(tty_port_lower_dtr_rts);
/**
- * tty_port_block_til_ready - Waiting logic for tty open
- * @port: the tty port being opened
- * @tty: the tty device being bound
- * @filp: the file pointer of the opener or NULL
- *
- * Implement the core POSIX/SuS tty behaviour when opening a tty device.
- * Handles:
- * - hangup (both before and during)
- * - non blocking open
- * - rts/dtr/dcd
- * - signals
- * - port flags and counts
- *
- * The passed tty_port must implement the carrier_raised method if it can
- * do carrier detect and the dtr_rts method if it supports software
- * management of these lines. Note that the dtr/rts raise is done each
- * iteration as a hangup may have previously dropped them while we wait.
- *
- * Caller holds tty lock.
- *
- * NB: May drop and reacquire tty lock when blocking, so tty and tty_port
- * may have changed state (eg., may have been hung up).
+ * tty_port_block_til_ready - Waiting logic for tty open
+ * @port: the tty port being opened
+ * @tty: the tty device being bound
+ * @filp: the file pointer of the opener or %NULL
+ *
+ * Implement the core POSIX/SuS tty behaviour when opening a tty device.
+ * Handles:
+ *
+ * - hangup (both before and during)
+ * - non blocking open
+ * - rts/dtr/dcd
+ * - signals
+ * - port flags and counts
+ *
+ * The passed @port must implement the @port->ops->carrier_raised method if it
+ * can do carrier detect and the @port->ops->dtr_rts method if it supports
+ * software management of these lines. Note that the dtr/rts raise is done each
+ * iteration as a hangup may have previously dropped them while we wait.
+ *
+ * Caller holds tty lock.
+ *
+ * Note: May drop and reacquire tty lock when blocking, so @tty and @port may
+ * have changed state (eg., may have been hung up).
*/
int tty_port_block_til_ready(struct tty_port *port,
struct tty_struct *tty, struct file *filp)
@@ -560,7 +585,21 @@ static void tty_port_drain_delay(struct tty_port *port, struct tty_struct *tty)
schedule_timeout_interruptible(timeout);
}
-/* Caller holds tty lock. */
+/**
+ * tty_port_close_start - helper for tty->ops->close, part 1/2
+ * @port: tty_port of the device
+ * @tty: tty being closed
+ * @filp: passed file pointer
+ *
+ * Decrements and checks open count. Flushes the port if this is the last
+ * close. That means, dropping the data from the outpu buffer on the device and
+ * waiting for sending logic to finish. The rest of close handling is performed
+ * in tty_port_close_end().
+ *
+ * Locking: Caller holds tty lock.
+ *
+ * Return: 1 if this is the last close, otherwise 0
+ */
int tty_port_close_start(struct tty_port *port,
struct tty_struct *tty, struct file *filp)
{
@@ -606,7 +645,17 @@ int tty_port_close_start(struct tty_port *port,
}
EXPORT_SYMBOL(tty_port_close_start);
-/* Caller holds tty lock */
+/**
+ * tty_port_close_end - helper for tty->ops->close, part 2/2
+ * @port: tty_port of the device
+ * @tty: tty being closed
+ *
+ * This is a continuation of the first part: tty_port_close_start(). This
+ * should be called after turning off the device. It flushes the data from the
+ * line discipline and delays the close by @port->close_delay.
+ *
+ * Locking: Caller holds tty lock.
+ */
void tty_port_close_end(struct tty_port *port, struct tty_struct *tty)
{
unsigned long flags;
@@ -628,10 +677,18 @@ void tty_port_close_end(struct tty_port *port, struct tty_struct *tty)
}
EXPORT_SYMBOL(tty_port_close_end);
-/*
- * tty_port_close
+/**
+ * tty_port_close - generic tty->ops->close handler
+ * @port: tty_port of the device
+ * @tty: tty being closed
+ * @filp: passed file pointer
+ *
+ * It is a generic helper to be used in driver's @tty->ops->close. It wraps a
+ * sequence of tty_port_close_start(), tty_port_shutdown(), and
+ * tty_port_close_end(). The latter two are called only if this is the last
+ * close. See the respective functions for the details.
*
- * Caller holds tty lock
+ * Locking: Caller holds tty lock
*/
void tty_port_close(struct tty_port *port, struct tty_struct *tty,
struct file *filp)
@@ -652,9 +709,9 @@ EXPORT_SYMBOL(tty_port_close);
* @driver: tty_driver for this device
* @tty: tty to be installed
*
- * It is the same as tty_standard_install except the provided @port is linked
- * to a concrete tty specified by @tty. Use this or tty_port_register_device
- * (or both). Call tty_port_link_device as a last resort.
+ * It is the same as tty_standard_install() except the provided @port is linked
+ * to a concrete tty specified by @tty. Use this or tty_port_register_device()
+ * (or both). Call tty_port_link_device() as a last resort.
*/
int tty_port_install(struct tty_port *port, struct tty_driver *driver,
struct tty_struct *tty)
@@ -664,13 +721,21 @@ int tty_port_install(struct tty_port *port, struct tty_driver *driver,
}
EXPORT_SYMBOL_GPL(tty_port_install);
-/*
- * tty_port_open
+/**
+ * tty_port_open - generic tty->ops->open handler
+ * @port: tty_port of the device
+ * @tty: tty to be opened
+ * @filp: passed file pointer
*
- * Caller holds tty lock.
+ * It is a generic helper to be used in driver's @tty->ops->open. It activates
+ * the devices using @port->ops->activate if not active already. And waits for
+ * the device to be ready using tty_port_block_til_ready() (e.g. raises
+ * DTR/CTS and waits for carrier).
+ *
+ * Locking: Caller holds tty lock.
*
- * NB: may drop and reacquire tty lock (in tty_port_block_til_ready()) so
- * tty and tty_port may have changed state (eg., may be hung up now)
+ * Note: may drop and reacquire tty lock (in tty_port_block_til_ready()) so
+ * @tty and @port may have changed state (eg., may be hung up now).
*/
int tty_port_open(struct tty_port *port, struct tty_struct *tty,
struct file *filp)
diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
index c7fbbcdcc346..be8313cdbac3 100644
--- a/drivers/tty/vt/keyboard.c
+++ b/drivers/tty/vt/keyboard.c
@@ -153,6 +153,7 @@ static int shift_state = 0;
static unsigned int ledstate = -1U; /* undefined */
static unsigned char ledioctl;
+static bool vt_switch;
/*
* Notifier list for console keyboard events
@@ -324,13 +325,13 @@ int kbd_rate(struct kbd_repeat *rpt)
static void put_queue(struct vc_data *vc, int ch)
{
tty_insert_flip_char(&vc->port, ch, 0);
- tty_schedule_flip(&vc->port);
+ tty_flip_buffer_push(&vc->port);
}
static void puts_queue(struct vc_data *vc, const char *cp)
{
tty_insert_flip_string(&vc->port, cp, strlen(cp));
- tty_schedule_flip(&vc->port);
+ tty_flip_buffer_push(&vc->port);
}
static void applkey(struct vc_data *vc, int key, char mode)
@@ -414,6 +415,12 @@ void vt_set_leds_compute_shiftstate(void)
{
unsigned long flags;
+ /*
+ * When VT is switched, the keyboard led needs to be set once.
+ * Ensure that after the switch is completed, the state of the
+ * keyboard LED is consistent with the state of the keyboard lock.
+ */
+ vt_switch = true;
set_leds();
spin_lock_irqsave(&kbd_event_lock, flags);
@@ -584,7 +591,7 @@ static void fn_inc_console(struct vc_data *vc)
static void fn_send_intr(struct vc_data *vc)
{
tty_insert_flip_char(&vc->port, 0, TTY_BREAK);
- tty_schedule_flip(&vc->port);
+ tty_flip_buffer_push(&vc->port);
}
static void fn_scroll_forw(struct vc_data *vc)
@@ -1255,6 +1262,11 @@ static void kbd_bh(struct tasklet_struct *unused)
leds |= (unsigned int)kbd->lockstate << 8;
spin_unlock_irqrestore(&led_lock, flags);
+ if (vt_switch) {
+ ledstate = ~leds;
+ vt_switch = false;
+ }
+
if (leds != ledstate) {
kbd_propagate_led_state(ledstate, leds);
ledstate = leds;
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 7359c3e80d63..f8c87c4d7399 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -1833,7 +1833,7 @@ static void csi_m(struct vc_data *vc)
static void respond_string(const char *p, size_t len, struct tty_port *port)
{
tty_insert_flip_string(port, p, len);
- tty_schedule_flip(port);
+ tty_flip_buffer_push(port);
}
static void cursor_report(struct vc_data *vc, struct tty_struct *tty)
diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c
index 3639bb6dc372..58013698635f 100644
--- a/drivers/tty/vt/vt_ioctl.c
+++ b/drivers/tty/vt/vt_ioctl.c
@@ -599,8 +599,8 @@ static int vt_setactivate(struct vt_setactivate __user *sa)
if (vsa.console == 0 || vsa.console > MAX_NR_CONSOLES)
return -ENXIO;
- vsa.console = array_index_nospec(vsa.console, MAX_NR_CONSOLES + 1);
vsa.console--;
+ vsa.console = array_index_nospec(vsa.console, MAX_NR_CONSOLES);
console_lock();
ret = vc_allocate(vsa.console);
if (ret) {
@@ -845,6 +845,7 @@ int vt_ioctl(struct tty_struct *tty,
return -ENXIO;
arg--;
+ arg = array_index_nospec(arg, MAX_NR_CONSOLES);
console_lock();
ret = vc_allocate(arg);
console_unlock();
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index ea96e319c8a0..43afbb7c5ab9 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio.c
@@ -83,13 +83,14 @@ static struct map_sysfs_entry size_attribute =
static struct map_sysfs_entry offset_attribute =
__ATTR(offset, S_IRUGO, map_offset_show, NULL);
-static struct attribute *attrs[] = {
+static struct attribute *map_attrs[] = {
&name_attribute.attr,
&addr_attribute.attr,
&size_attribute.attr,
&offset_attribute.attr,
NULL, /* need to NULL terminate the list of attributes */
};
+ATTRIBUTE_GROUPS(map);
static void map_release(struct kobject *kobj)
{
@@ -119,7 +120,7 @@ static const struct sysfs_ops map_sysfs_ops = {
static struct kobj_type map_attr_type = {
.release = map_release,
.sysfs_ops = &map_sysfs_ops,
- .default_attrs = attrs,
+ .default_groups = map_groups,
};
struct uio_portio {
@@ -178,6 +179,7 @@ static struct attribute *portio_attrs[] = {
&portio_porttype_attribute.attr,
NULL,
};
+ATTRIBUTE_GROUPS(portio);
static void portio_release(struct kobject *kobj)
{
@@ -207,7 +209,7 @@ static const struct sysfs_ops portio_sysfs_ops = {
static struct kobj_type portio_attr_type = {
.release = portio_release,
.sysfs_ops = &portio_sysfs_ops,
- .default_attrs = portio_attrs,
+ .default_groups = portio_groups,
};
static ssize_t name_show(struct device *dev,
diff --git a/drivers/uio/uio_dmem_genirq.c b/drivers/uio/uio_dmem_genirq.c
index 6b5cfa5b0673..1106f3376404 100644
--- a/drivers/uio/uio_dmem_genirq.c
+++ b/drivers/uio/uio_dmem_genirq.c
@@ -188,7 +188,11 @@ static int uio_dmem_genirq_probe(struct platform_device *pdev)
return -ENOMEM;
}
- dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(&pdev->dev, "DMA enable failed\n");
+ return ret;
+ }
priv->uioinfo = uioinfo;
spin_lock_init(&priv->lock);
diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
index da17be1ef64e..e3a49d837609 100644
--- a/drivers/usb/atm/usbatm.c
+++ b/drivers/usb/atm/usbatm.c
@@ -969,7 +969,7 @@ static int usbatm_do_heavy_init(void *arg)
instance->thread = NULL;
mutex_unlock(&instance->serialize);
- complete_and_exit(&instance->thread_exited, ret);
+ kthread_complete_and_exit(&instance->thread_exited, ret);
}
static int usbatm_heavy_init(struct usbatm_data *instance)
diff --git a/drivers/usb/cdns3/cdns3-plat.c b/drivers/usb/cdns3/cdns3-plat.c
index 4d0f027e5bd3..dc068e940ed5 100644
--- a/drivers/usb/cdns3/cdns3-plat.c
+++ b/drivers/usb/cdns3/cdns3-plat.c
@@ -13,6 +13,7 @@
*/
#include <linux/module.h>
+#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
@@ -65,13 +66,14 @@ static int cdns3_plat_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, cdns);
- res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "host");
- if (!res) {
- dev_err(dev, "missing host IRQ\n");
- return -ENODEV;
- }
+ ret = platform_get_irq_byname(pdev, "host");
+ if (ret < 0)
+ return ret;
- cdns->xhci_res[0] = *res;
+ cdns->xhci_res[0].start = ret;
+ cdns->xhci_res[0].end = ret;
+ cdns->xhci_res[0].flags = IORESOURCE_IRQ | irq_get_trigger_type(ret);
+ cdns->xhci_res[0].name = "host";
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "xhci");
if (!res) {
diff --git a/drivers/usb/cdns3/cdnsp-gadget.c b/drivers/usb/cdns3/cdnsp-gadget.c
index e85bf768c66d..5c9d07cc5410 100644
--- a/drivers/usb/cdns3/cdnsp-gadget.c
+++ b/drivers/usb/cdns3/cdnsp-gadget.c
@@ -81,7 +81,7 @@ int cdnsp_find_next_ext_cap(void __iomem *base, u32 start, int id)
offset = HCC_EXT_CAPS(val) << 2;
if (!offset)
return 0;
- };
+ }
do {
val = readl(base + offset);
diff --git a/drivers/usb/cdns3/core.h b/drivers/usb/cdns3/core.h
index ab0cb68acd23..2d332a788871 100644
--- a/drivers/usb/cdns3/core.h
+++ b/drivers/usb/cdns3/core.h
@@ -8,12 +8,12 @@
* Authors: Peter Chen <peter.chen@nxp.com>
* Pawel Laszczak <pawell@cadence.com>
*/
-#include <linux/usb/otg.h>
-#include <linux/usb/role.h>
-
#ifndef __LINUX_CDNS3_CORE_H
#define __LINUX_CDNS3_CORE_H
+#include <linux/usb/otg.h>
+#include <linux/usb/role.h>
+
struct cdns;
/**
diff --git a/drivers/usb/cdns3/drd.c b/drivers/usb/cdns3/drd.c
index 55c73b1d8704..d00ff98dffab 100644
--- a/drivers/usb/cdns3/drd.c
+++ b/drivers/usb/cdns3/drd.c
@@ -483,11 +483,11 @@ int cdns_drd_exit(struct cdns *cdns)
/* Indicate the cdns3 core was power lost before */
bool cdns_power_is_lost(struct cdns *cdns)
{
- if (cdns->version == CDNS3_CONTROLLER_V1) {
- if (!(readl(&cdns->otg_v1_regs->simulate) & BIT(0)))
+ if (cdns->version == CDNS3_CONTROLLER_V0) {
+ if (!(readl(&cdns->otg_v0_regs->simulate) & BIT(0)))
return true;
} else {
- if (!(readl(&cdns->otg_v0_regs->simulate) & BIT(0)))
+ if (!(readl(&cdns->otg_v1_regs->simulate) & BIT(0)))
return true;
}
return false;
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index a56f06368d14..5359b2a2e4d2 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -864,6 +864,7 @@ struct platform_device *ci_hdrc_add_device(struct device *dev,
}
pdev->dev.parent = dev;
+ device_set_of_node_from_dev(&pdev->dev, dev);
ret = platform_device_add_resources(pdev, res, nres);
if (ret)
diff --git a/drivers/usb/chipidea/otg.c b/drivers/usb/chipidea/otg.c
index 8dd59282827b..7b53274ef966 100644
--- a/drivers/usb/chipidea/otg.c
+++ b/drivers/usb/chipidea/otg.c
@@ -255,10 +255,9 @@ int ci_hdrc_otg_init(struct ci_hdrc *ci)
*/
void ci_hdrc_otg_destroy(struct ci_hdrc *ci)
{
- if (ci->wq) {
- flush_workqueue(ci->wq);
+ if (ci->wq)
destroy_workqueue(ci->wq);
- }
+
/* Disable all OTG irq and clear status */
hw_write_otgsc(ci, OTGSC_INT_EN_BITS | OTGSC_INT_STATUS_BITS,
OTGSC_INT_STATUS_BITS);
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index b3ce7338cb6b..9b9aea24d58c 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -685,10 +685,6 @@ static int acm_port_activate(struct tty_port *port, struct tty_struct *tty)
if (retval)
goto error_get_interface;
- /*
- * FIXME: Why do we need this? Allocating 64K of physically contiguous
- * memory is really nasty...
- */
set_bit(TTY_NO_WRITE_SPLIT, &tty->flags);
acm->control->needs_remote_wakeup = 1;
diff --git a/drivers/usb/common/debug.c b/drivers/usb/common/debug.c
index a76a086b9c54..075f6b1b2a1a 100644
--- a/drivers/usb/common/debug.c
+++ b/drivers/usb/common/debug.c
@@ -8,6 +8,7 @@
* Sebastian Andrzej Siewior <bigeasy@linutronix.de>
*/
+#include <linux/kernel.h>
#include <linux/usb/ch9.h>
static void usb_decode_get_status(__u8 bRequestType, __u16 wIndex,
diff --git a/drivers/usb/common/ulpi.c b/drivers/usb/common/ulpi.c
index 4169cf40a03b..5509d3847af4 100644
--- a/drivers/usb/common/ulpi.c
+++ b/drivers/usb/common/ulpi.c
@@ -39,8 +39,11 @@ static int ulpi_match(struct device *dev, struct device_driver *driver)
struct ulpi *ulpi = to_ulpi_dev(dev);
const struct ulpi_device_id *id;
- /* Some ULPI devices don't have a vendor id so rely on OF match */
- if (ulpi->id.vendor == 0)
+ /*
+ * Some ULPI devices don't have a vendor id
+ * or provide an id_table so rely on OF match.
+ */
+ if (ulpi->id.vendor == 0 || !drv->id_table)
return of_driver_match_device(dev, driver);
for (id = drv->id_table; id->vendor; id++)
@@ -127,6 +130,7 @@ static const struct attribute_group *ulpi_dev_attr_groups[] = {
static void ulpi_dev_release(struct device *dev)
{
+ of_node_put(dev->of_node);
kfree(to_ulpi_dev(dev));
}
@@ -244,12 +248,16 @@ static int ulpi_register(struct device *dev, struct ulpi *ulpi)
return ret;
ret = ulpi_read_id(ulpi);
- if (ret)
+ if (ret) {
+ of_node_put(ulpi->dev.of_node);
return ret;
+ }
ret = device_register(&ulpi->dev);
- if (ret)
+ if (ret) {
+ put_device(&ulpi->dev);
return ret;
+ }
dev_dbg(&ulpi->dev, "registered ULPI PHY: vendor %04x, product %04x\n",
ulpi->id.vendor, ulpi->id.product);
@@ -296,7 +304,6 @@ EXPORT_SYMBOL_GPL(ulpi_register_interface);
*/
void ulpi_unregister_interface(struct ulpi *ulpi)
{
- of_node_put(ulpi->dev.of_node);
device_unregister(&ulpi->dev);
}
EXPORT_SYMBOL_GPL(ulpi_unregister_interface);
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index 072968c40ade..355ed33a2179 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * drivers/usb/driver.c - most of the driver model stuff for usb
+ * drivers/usb/core/driver.c - most of the driver model stuff for usb
*
* (C) Copyright 2005 Greg Kroah-Hartman <gregkh@suse.de>
*
@@ -834,6 +834,7 @@ const struct usb_device_id *usb_device_match_id(struct usb_device *udev,
return NULL;
}
+EXPORT_SYMBOL_GPL(usb_device_match_id);
bool usb_driver_applicable(struct usb_device *udev,
struct usb_device_driver *udrv)
diff --git a/drivers/usb/core/generic.c b/drivers/usb/core/generic.c
index 26f9fb9f67ca..740342a2812a 100644
--- a/drivers/usb/core/generic.c
+++ b/drivers/usb/core/generic.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * drivers/usb/generic.c - generic driver for USB devices (not interfaces)
+ * drivers/usb/core/generic.c - generic driver for USB devices (not interfaces)
*
* (C) Copyright 2005 Greg Kroah-Hartman <gregkh@suse.de>
*
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 4d326ee12c36..d9712c2602af 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -753,6 +753,7 @@ void usb_hcd_poll_rh_status(struct usb_hcd *hcd)
{
struct urb *urb;
int length;
+ int status;
unsigned long flags;
char buffer[6]; /* Any root hubs with > 31 ports? */
@@ -770,11 +771,17 @@ void usb_hcd_poll_rh_status(struct usb_hcd *hcd)
if (urb) {
clear_bit(HCD_FLAG_POLL_PENDING, &hcd->flags);
hcd->status_urb = NULL;
+ if (urb->transfer_buffer_length >= length) {
+ status = 0;
+ } else {
+ status = -EOVERFLOW;
+ length = urb->transfer_buffer_length;
+ }
urb->actual_length = length;
memcpy(urb->transfer_buffer, buffer, length);
usb_hcd_unlink_urb_from_ep(hcd, urb);
- usb_hcd_giveback_urb(hcd, urb, 0);
+ usb_hcd_giveback_urb(hcd, urb, status);
} else {
length = 0;
set_bit(HCD_FLAG_POLL_PENDING, &hcd->flags);
@@ -1281,7 +1288,7 @@ static int hcd_alloc_coherent(struct usb_bus *bus,
return -EFAULT;
}
- vaddr = hcd_buffer_alloc(bus, size + sizeof(vaddr),
+ vaddr = hcd_buffer_alloc(bus, size + sizeof(unsigned long),
mem_flags, dma_handle);
if (!vaddr)
return -ENOMEM;
@@ -1556,6 +1563,13 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
urb->hcpriv = NULL;
INIT_LIST_HEAD(&urb->urb_list);
atomic_dec(&urb->use_count);
+ /*
+ * Order the write of urb->use_count above before the read
+ * of urb->reject below. Pairs with the memory barriers in
+ * usb_kill_urb() and usb_poison_urb().
+ */
+ smp_mb__after_atomic();
+
atomic_dec(&urb->dev->urbnum);
if (atomic_read(&urb->reject))
wake_up(&usb_kill_urb_queue);
@@ -1658,6 +1672,13 @@ static void __usb_hcd_giveback_urb(struct urb *urb)
usb_anchor_resume_wakeups(anchor);
atomic_dec(&urb->use_count);
+ /*
+ * Order the write of urb->use_count above before the read
+ * of urb->reject below. Pairs with the memory barriers in
+ * usb_kill_urb() and usb_poison_urb().
+ */
+ smp_mb__after_atomic();
+
if (unlikely(atomic_read(&urb->reject)))
wake_up(&usb_kill_urb_queue);
usb_put_urb(urb);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 00070a8a6507..47a1c8bddf86 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -1110,7 +1110,10 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
} else {
hub_power_on(hub, true);
}
- }
+ /* Give some time on remote wakeup to let links to transit to U0 */
+ } else if (hub_is_superspeed(hub->hdev))
+ msleep(20);
+
init2:
/*
@@ -1225,7 +1228,7 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
*/
if (portchange || (hub_is_superspeed(hub->hdev) &&
port_resumed))
- set_bit(port1, hub->change_bits);
+ set_bit(port1, hub->event_bits);
} else if (udev->persist_enabled) {
#ifdef CONFIG_PM
@@ -2777,6 +2780,8 @@ static unsigned hub_is_wusb(struct usb_hub *hub)
#define PORT_INIT_TRIES 4
#endif /* CONFIG_USB_FEW_INIT_RETRIES */
+#define DETECT_DISCONNECT_TRIES 5
+
#define HUB_ROOT_RESET_TIME 60 /* times are in msec */
#define HUB_SHORT_RESET_TIME 10
#define HUB_BH_RESET_TIME 50
@@ -3570,7 +3575,7 @@ static int finish_port_resume(struct usb_device *udev)
* This routine should only be called when persist is enabled.
*/
static int wait_for_connected(struct usb_device *udev,
- struct usb_hub *hub, int *port1,
+ struct usb_hub *hub, int port1,
u16 *portchange, u16 *portstatus)
{
int status = 0, delay_ms = 0;
@@ -3584,7 +3589,7 @@ static int wait_for_connected(struct usb_device *udev,
}
msleep(20);
delay_ms += 20;
- status = hub_port_status(hub, *port1, portstatus, portchange);
+ status = hub_port_status(hub, port1, portstatus, portchange);
}
dev_dbg(&udev->dev, "Waited %dms for CONNECT\n", delay_ms);
return status;
@@ -3690,7 +3695,7 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
}
if (udev->persist_enabled)
- status = wait_for_connected(udev, hub, &port1, &portchange,
+ status = wait_for_connected(udev, hub, port1, &portchange,
&portstatus);
status = check_port_resume_type(udev,
@@ -5543,6 +5548,7 @@ static void port_event(struct usb_hub *hub, int port1)
struct usb_device *udev = port_dev->child;
struct usb_device *hdev = hub->hdev;
u16 portstatus, portchange;
+ int i = 0;
connect_change = test_bit(port1, hub->change_bits);
clear_bit(port1, hub->event_bits);
@@ -5619,17 +5625,27 @@ static void port_event(struct usb_hub *hub, int port1)
connect_change = 1;
/*
- * Warm reset a USB3 protocol port if it's in
- * SS.Inactive state.
+ * Avoid trying to recover a USB3 SS.Inactive port with a warm reset if
+ * the device was disconnected. A 12ms disconnect detect timer in
+ * SS.Inactive state transitions the port to RxDetect automatically.
+ * SS.Inactive link error state is common during device disconnect.
*/
- if (hub_port_warm_reset_required(hub, port1, portstatus)) {
- dev_dbg(&port_dev->dev, "do warm reset\n");
- if (!udev || !(portstatus & USB_PORT_STAT_CONNECTION)
+ while (hub_port_warm_reset_required(hub, port1, portstatus)) {
+ if ((i++ < DETECT_DISCONNECT_TRIES) && udev) {
+ u16 unused;
+
+ msleep(20);
+ hub_port_status(hub, port1, &portstatus, &unused);
+ dev_dbg(&port_dev->dev, "Wait for inactive link disconnect detect\n");
+ continue;
+ } else if (!udev || !(portstatus & USB_PORT_STAT_CONNECTION)
|| udev->state == USB_STATE_NOTATTACHED) {
+ dev_dbg(&port_dev->dev, "do warm reset, port only\n");
if (hub_port_reset(hub, port1, NULL,
HUB_BH_RESET_TIME, true) < 0)
hub_port_disable(hub, port1, 1);
} else {
+ dev_dbg(&port_dev->dev, "do warm reset, full device\n");
usb_unlock_port(port_dev);
usb_lock_device(udev);
usb_reset_device(udev);
@@ -5637,6 +5653,7 @@ static void port_event(struct usb_hub *hub, int port1)
usb_lock_port(port_dev);
connect_change = 0;
}
+ break;
}
if (connect_change)
diff --git a/drivers/usb/core/port.c b/drivers/usb/core/port.c
index dfcca9c876c7..d5bc36ca5b1f 100644
--- a/drivers/usb/core/port.c
+++ b/drivers/usb/core/port.c
@@ -9,6 +9,7 @@
#include <linux/slab.h>
#include <linux/pm_qos.h>
+#include <linux/component.h>
#include "hub.h"
@@ -528,6 +529,32 @@ static void find_and_link_peer(struct usb_hub *hub, int port1)
link_peers_report(port_dev, peer);
}
+static int connector_bind(struct device *dev, struct device *connector, void *data)
+{
+ int ret;
+
+ ret = sysfs_create_link(&dev->kobj, &connector->kobj, "connector");
+ if (ret)
+ return ret;
+
+ ret = sysfs_create_link(&connector->kobj, &dev->kobj, dev_name(dev));
+ if (ret)
+ sysfs_remove_link(&dev->kobj, "connector");
+
+ return ret;
+}
+
+static void connector_unbind(struct device *dev, struct device *connector, void *data)
+{
+ sysfs_remove_link(&connector->kobj, dev_name(dev));
+ sysfs_remove_link(&dev->kobj, "connector");
+}
+
+static const struct component_ops connector_ops = {
+ .bind = connector_bind,
+ .unbind = connector_unbind,
+};
+
int usb_hub_create_port_device(struct usb_hub *hub, int port1)
{
struct usb_port *port_dev;
@@ -575,6 +602,13 @@ int usb_hub_create_port_device(struct usb_hub *hub, int port1)
return retval;
}
+ retval = component_add(&port_dev->dev, &connector_ops);
+ if (retval) {
+ dev_warn(&port_dev->dev, "failed to add component\n");
+ device_unregister(&port_dev->dev);
+ return retval;
+ }
+
find_and_link_peer(hub, port1);
/*
@@ -619,5 +653,6 @@ void usb_hub_remove_port_device(struct usb_hub *hub, int port1)
peer = port_dev->peer;
if (peer)
unlink_peers(port_dev, peer);
+ component_del(&port_dev->dev, &connector_ops);
device_unregister(&port_dev->dev);
}
diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c
index 30727729a44c..33d62d7e3929 100644
--- a/drivers/usb/core/urb.c
+++ b/drivers/usb/core/urb.c
@@ -715,6 +715,12 @@ void usb_kill_urb(struct urb *urb)
if (!(urb && urb->dev && urb->ep))
return;
atomic_inc(&urb->reject);
+ /*
+ * Order the write of urb->reject above before the read
+ * of urb->use_count below. Pairs with the barriers in
+ * __usb_hcd_giveback_urb() and usb_hcd_submit_urb().
+ */
+ smp_mb__after_atomic();
usb_hcd_unlink_urb(urb, -ENOENT);
wait_event(usb_kill_urb_queue, atomic_read(&urb->use_count) == 0);
@@ -756,6 +762,12 @@ void usb_poison_urb(struct urb *urb)
if (!urb)
return;
atomic_inc(&urb->reject);
+ /*
+ * Order the write of urb->reject above before the read
+ * of urb->use_count below. Pairs with the barriers in
+ * __usb_hcd_giveback_urb() and usb_hcd_submit_urb().
+ */
+ smp_mb__after_atomic();
if (!urb->dev || !urb->ep)
return;
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index 62368c4ed37a..2ce3667ec6fa 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -398,52 +398,6 @@ int usb_for_each_dev(void *data, int (*fn)(struct usb_device *, void *))
}
EXPORT_SYMBOL_GPL(usb_for_each_dev);
-struct each_hub_arg {
- void *data;
- int (*fn)(struct device *, void *);
-};
-
-static int __each_hub(struct usb_device *hdev, void *data)
-{
- struct each_hub_arg *arg = (struct each_hub_arg *)data;
- struct usb_hub *hub;
- int ret = 0;
- int i;
-
- hub = usb_hub_to_struct_hub(hdev);
- if (!hub)
- return 0;
-
- mutex_lock(&usb_port_peer_mutex);
-
- for (i = 0; i < hdev->maxchild; i++) {
- ret = arg->fn(&hub->ports[i]->dev, arg->data);
- if (ret)
- break;
- }
-
- mutex_unlock(&usb_port_peer_mutex);
-
- return ret;
-}
-
-/**
- * usb_for_each_port - interate over all USB ports in the system
- * @data: data pointer that will be handed to the callback function
- * @fn: callback function to be called for each USB port
- *
- * Iterate over all USB ports and call @fn for each, passing it @data. If it
- * returns anything other than 0, we break the iteration prematurely and return
- * that value.
- */
-int usb_for_each_port(void *data, int (*fn)(struct device *, void *))
-{
- struct each_hub_arg arg = {data, fn};
-
- return usb_for_each_dev(&arg, __each_hub);
-}
-EXPORT_SYMBOL_GPL(usb_for_each_port);
-
/**
* usb_release_dev - free a usb device structure when all users of it are finished.
* @dev: device that's been disconnected
diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
index 37185eb66ae4..8a63da3ab39d 100644
--- a/drivers/usb/dwc2/core.h
+++ b/drivers/usb/dwc2/core.h
@@ -869,6 +869,8 @@ struct dwc2_hregs_backup {
* - USB_DR_MODE_HOST
* - USB_DR_MODE_OTG
* @role_sw: usb_role_switch handle
+ * @role_sw_default_mode: default operation mode of controller while usb role
+ * is USB_ROLE_NONE
* @hcd_enabled: Host mode sub-driver initialization indicator.
* @gadget_enabled: Peripheral mode sub-driver initialization indicator.
* @ll_hw_enabled: Status of low-level hardware resources.
@@ -1065,6 +1067,7 @@ struct dwc2_hsotg {
enum usb_otg_state op_state;
enum usb_dr_mode dr_mode;
struct usb_role_switch *role_sw;
+ enum usb_dr_mode role_sw_default_mode;
unsigned int hcd_enabled:1;
unsigned int gadget_enabled:1;
unsigned int ll_hw_enabled:1;
@@ -1151,8 +1154,7 @@ struct dwc2_hsotg {
struct list_head periodic_sched_queued;
struct list_head split_order;
u16 periodic_usecs;
- unsigned long hs_periodic_bitmap[
- DIV_ROUND_UP(DWC2_HS_SCHEDULE_US, BITS_PER_LONG)];
+ DECLARE_BITMAP(hs_periodic_bitmap, DWC2_HS_SCHEDULE_US);
u16 periodic_qh_count;
bool new_connection;
diff --git a/drivers/usb/dwc2/drd.c b/drivers/usb/dwc2/drd.c
index aa6eb76f64dd..1b39c4776369 100644
--- a/drivers/usb/dwc2/drd.c
+++ b/drivers/usb/dwc2/drd.c
@@ -13,6 +13,10 @@
#include <linux/usb/role.h>
#include "core.h"
+#define dwc2_ovr_gotgctl(gotgctl) \
+ ((gotgctl) |= GOTGCTL_BVALOEN | GOTGCTL_AVALOEN | GOTGCTL_VBVALOEN | \
+ GOTGCTL_DBNCE_FLTR_BYPASS)
+
static void dwc2_ovr_init(struct dwc2_hsotg *hsotg)
{
unsigned long flags;
@@ -21,9 +25,12 @@ static void dwc2_ovr_init(struct dwc2_hsotg *hsotg)
spin_lock_irqsave(&hsotg->lock, flags);
gotgctl = dwc2_readl(hsotg, GOTGCTL);
- gotgctl |= GOTGCTL_BVALOEN | GOTGCTL_AVALOEN | GOTGCTL_VBVALOEN;
- gotgctl |= GOTGCTL_DBNCE_FLTR_BYPASS;
+ dwc2_ovr_gotgctl(gotgctl);
gotgctl &= ~(GOTGCTL_BVALOVAL | GOTGCTL_AVALOVAL | GOTGCTL_VBVALOVAL);
+ if (hsotg->role_sw_default_mode == USB_DR_MODE_HOST)
+ gotgctl |= GOTGCTL_AVALOVAL | GOTGCTL_VBVALOVAL;
+ else if (hsotg->role_sw_default_mode == USB_DR_MODE_PERIPHERAL)
+ gotgctl |= GOTGCTL_BVALOVAL | GOTGCTL_VBVALOVAL;
dwc2_writel(hsotg, gotgctl, GOTGCTL);
spin_unlock_irqrestore(&hsotg->lock, flags);
@@ -40,6 +47,9 @@ static int dwc2_ovr_avalid(struct dwc2_hsotg *hsotg, bool valid)
(!valid && !(gotgctl & GOTGCTL_ASESVLD)))
return -EALREADY;
+ /* Always enable overrides to handle the resume case */
+ dwc2_ovr_gotgctl(gotgctl);
+
gotgctl &= ~GOTGCTL_BVALOVAL;
if (valid)
gotgctl |= GOTGCTL_AVALOVAL | GOTGCTL_VBVALOVAL;
@@ -59,6 +69,9 @@ static int dwc2_ovr_bvalid(struct dwc2_hsotg *hsotg, bool valid)
(!valid && !(gotgctl & GOTGCTL_BSESVLD)))
return -EALREADY;
+ /* Always enable overrides to handle the resume case */
+ dwc2_ovr_gotgctl(gotgctl);
+
gotgctl &= ~GOTGCTL_AVALOVAL;
if (valid)
gotgctl |= GOTGCTL_BVALOVAL | GOTGCTL_VBVALOVAL;
@@ -105,6 +118,14 @@ static int dwc2_drd_role_sw_set(struct usb_role_switch *sw, enum usb_role role)
spin_lock_irqsave(&hsotg->lock, flags);
+ if (role == USB_ROLE_NONE) {
+ /* default operation mode when usb role is USB_ROLE_NONE */
+ if (hsotg->role_sw_default_mode == USB_DR_MODE_HOST)
+ role = USB_ROLE_HOST;
+ else if (hsotg->role_sw_default_mode == USB_DR_MODE_PERIPHERAL)
+ role = USB_ROLE_DEVICE;
+ }
+
if (role == USB_ROLE_HOST) {
already = dwc2_ovr_avalid(hsotg, true);
} else if (role == USB_ROLE_DEVICE) {
@@ -146,6 +167,7 @@ int dwc2_drd_init(struct dwc2_hsotg *hsotg)
if (!device_property_read_bool(hsotg->dev, "usb-role-switch"))
return 0;
+ hsotg->role_sw_default_mode = usb_get_role_switch_default_mode(hsotg->dev);
role_sw_desc.driver_data = hsotg;
role_sw_desc.fwnode = dev_fwnode(hsotg->dev);
role_sw_desc.set = dwc2_drd_role_sw_set;
@@ -183,6 +205,31 @@ void dwc2_drd_suspend(struct dwc2_hsotg *hsotg)
void dwc2_drd_resume(struct dwc2_hsotg *hsotg)
{
u32 gintsts, gintmsk;
+ enum usb_role role;
+
+ if (hsotg->role_sw) {
+ /* get last known role (as the get ops isn't implemented by this driver) */
+ role = usb_role_switch_get_role(hsotg->role_sw);
+
+ if (role == USB_ROLE_NONE) {
+ if (hsotg->role_sw_default_mode == USB_DR_MODE_HOST)
+ role = USB_ROLE_HOST;
+ else if (hsotg->role_sw_default_mode == USB_DR_MODE_PERIPHERAL)
+ role = USB_ROLE_DEVICE;
+ }
+
+ /* restore last role that may have been lost */
+ if (role == USB_ROLE_HOST)
+ dwc2_ovr_avalid(hsotg, true);
+ else if (role == USB_ROLE_DEVICE)
+ dwc2_ovr_bvalid(hsotg, true);
+
+ dwc2_force_mode(hsotg, role == USB_ROLE_HOST);
+
+ dev_dbg(hsotg->dev, "resuming %s-session valid\n",
+ role == USB_ROLE_NONE ? "No" :
+ role == USB_ROLE_HOST ? "A" : "B");
+ }
if (hsotg->role_sw && !hsotg->params.external_id_pin_ctl) {
gintsts = dwc2_readl(hsotg, GINTSTS);
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index ab8d7dad9f56..eee3504397e6 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -4974,7 +4974,18 @@ int dwc2_gadget_init(struct dwc2_hsotg *hsotg)
hsotg->params.g_np_tx_fifo_size);
dev_dbg(dev, "RXFIFO size: %d\n", hsotg->params.g_rx_fifo_size);
- hsotg->gadget.max_speed = USB_SPEED_HIGH;
+ switch (hsotg->params.speed) {
+ case DWC2_SPEED_PARAM_LOW:
+ hsotg->gadget.max_speed = USB_SPEED_LOW;
+ break;
+ case DWC2_SPEED_PARAM_FULL:
+ hsotg->gadget.max_speed = USB_SPEED_FULL;
+ break;
+ default:
+ hsotg->gadget.max_speed = USB_SPEED_HIGH;
+ break;
+ }
+
hsotg->gadget.ops = &dwc2_hsotg_gadget_ops;
hsotg->gadget.name = dev_name(dev);
hsotg->gadget.otg_caps = &hsotg->params.otg_caps;
@@ -5086,7 +5097,7 @@ int dwc2_hsotg_suspend(struct dwc2_hsotg *hsotg)
hsotg->gadget.speed = USB_SPEED_UNKNOWN;
spin_unlock_irqrestore(&hsotg->lock, flags);
- for (ep = 0; ep < hsotg->num_of_eps; ep++) {
+ for (ep = 1; ep < hsotg->num_of_eps; ep++) {
if (hsotg->eps_in[ep])
dwc2_hsotg_ep_disable_lock(&hsotg->eps_in[ep]->ep);
if (hsotg->eps_out[ep])
@@ -5217,7 +5228,7 @@ int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg, int remote_wakeup)
* as result BNA interrupt asserted on hibernation exit
* by restoring from saved area.
*/
- if (hsotg->params.g_dma_desc &&
+ if (using_desc_dma(hsotg) &&
(dr->diepctl[i] & DXEPCTL_EPENA))
dr->diepdma[i] = hsotg->eps_in[i]->desc_list_dma;
dwc2_writel(hsotg, dr->dtxfsiz[i], DPTXFSIZN(i));
@@ -5229,7 +5240,7 @@ int dwc2_restore_device_registers(struct dwc2_hsotg *hsotg, int remote_wakeup)
* as result BNA interrupt asserted on hibernation exit
* by restoring from saved area.
*/
- if (hsotg->params.g_dma_desc &&
+ if (using_desc_dma(hsotg) &&
(dr->doepctl[i] & DXEPCTL_EPENA))
dr->doepdma[i] = hsotg->eps_out[i]->desc_list_dma;
dwc2_writel(hsotg, dr->doepdma[i], DOEPDMA(i));
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
index 13c779a28e94..f63a27d11fac 100644
--- a/drivers/usb/dwc2/hcd.c
+++ b/drivers/usb/dwc2/hcd.c
@@ -4399,11 +4399,12 @@ static int _dwc2_hcd_suspend(struct usb_hcd *hcd)
* If not hibernation nor partial power down are supported,
* clock gating is used to save power.
*/
- if (!hsotg->params.no_clock_gating)
+ if (!hsotg->params.no_clock_gating) {
dwc2_host_enter_clock_gating(hsotg);
- /* After entering suspend, hardware is not accessible */
- clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
+ /* After entering suspend, hardware is not accessible */
+ clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
+ }
break;
default:
goto skip_power_saving;
diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c
index c331a5128c2c..c8ba87df7abe 100644
--- a/drivers/usb/dwc2/platform.c
+++ b/drivers/usb/dwc2/platform.c
@@ -222,20 +222,16 @@ static int dwc2_lowlevel_hw_init(struct dwc2_hsotg *hsotg)
int i, ret;
hsotg->reset = devm_reset_control_get_optional(hsotg->dev, "dwc2");
- if (IS_ERR(hsotg->reset)) {
- ret = PTR_ERR(hsotg->reset);
- dev_err(hsotg->dev, "error getting reset control %d\n", ret);
- return ret;
- }
+ if (IS_ERR(hsotg->reset))
+ return dev_err_probe(hsotg->dev, PTR_ERR(hsotg->reset),
+ "error getting reset control\n");
reset_control_deassert(hsotg->reset);
hsotg->reset_ecc = devm_reset_control_get_optional(hsotg->dev, "dwc2-ecc");
- if (IS_ERR(hsotg->reset_ecc)) {
- ret = PTR_ERR(hsotg->reset_ecc);
- dev_err(hsotg->dev, "error getting reset control for ecc %d\n", ret);
- return ret;
- }
+ if (IS_ERR(hsotg->reset_ecc))
+ return dev_err_probe(hsotg->dev, PTR_ERR(hsotg->reset_ecc),
+ "error getting reset control for ecc\n");
reset_control_deassert(hsotg->reset_ecc);
@@ -251,11 +247,8 @@ static int dwc2_lowlevel_hw_init(struct dwc2_hsotg *hsotg)
case -ENOSYS:
hsotg->phy = NULL;
break;
- case -EPROBE_DEFER:
- return ret;
default:
- dev_err(hsotg->dev, "error getting phy %d\n", ret);
- return ret;
+ return dev_err_probe(hsotg->dev, ret, "error getting phy\n");
}
}
@@ -268,12 +261,8 @@ static int dwc2_lowlevel_hw_init(struct dwc2_hsotg *hsotg)
case -ENXIO:
hsotg->uphy = NULL;
break;
- case -EPROBE_DEFER:
- return ret;
default:
- dev_err(hsotg->dev, "error getting usb phy %d\n",
- ret);
- return ret;
+ return dev_err_probe(hsotg->dev, ret, "error getting usb phy\n");
}
}
}
@@ -282,10 +271,8 @@ static int dwc2_lowlevel_hw_init(struct dwc2_hsotg *hsotg)
/* Clock */
hsotg->clk = devm_clk_get_optional(hsotg->dev, "otg");
- if (IS_ERR(hsotg->clk)) {
- dev_err(hsotg->dev, "cannot get otg clock\n");
- return PTR_ERR(hsotg->clk);
- }
+ if (IS_ERR(hsotg->clk))
+ return dev_err_probe(hsotg->dev, PTR_ERR(hsotg->clk), "cannot get otg clock\n");
/* Regulators */
for (i = 0; i < ARRAY_SIZE(hsotg->supplies); i++)
@@ -293,12 +280,9 @@ static int dwc2_lowlevel_hw_init(struct dwc2_hsotg *hsotg)
ret = devm_regulator_bulk_get(hsotg->dev, ARRAY_SIZE(hsotg->supplies),
hsotg->supplies);
- if (ret) {
- if (ret != -EPROBE_DEFER)
- dev_err(hsotg->dev, "failed to request supplies: %d\n",
- ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(hsotg->dev, ret, "failed to request supplies\n");
+
return 0;
}
@@ -558,16 +542,12 @@ static int dwc2_driver_probe(struct platform_device *dev)
hsotg->usb33d = devm_regulator_get(hsotg->dev, "usb33d");
if (IS_ERR(hsotg->usb33d)) {
retval = PTR_ERR(hsotg->usb33d);
- if (retval != -EPROBE_DEFER)
- dev_err(hsotg->dev,
- "failed to request usb33d supply: %d\n",
- retval);
+ dev_err_probe(hsotg->dev, retval, "failed to request usb33d supply\n");
goto error;
}
retval = regulator_enable(hsotg->usb33d);
if (retval) {
- dev_err(hsotg->dev,
- "failed to enable usb33d supply: %d\n", retval);
+ dev_err_probe(hsotg->dev, retval, "failed to enable usb33d supply\n");
goto error;
}
@@ -582,8 +562,7 @@ static int dwc2_driver_probe(struct platform_device *dev)
retval = dwc2_drd_init(hsotg);
if (retval) {
- if (retval != -EPROBE_DEFER)
- dev_err(hsotg->dev, "failed to initialize dual-role\n");
+ dev_err_probe(hsotg->dev, retval, "failed to initialize dual-role\n");
goto error_init;
}
@@ -751,10 +730,12 @@ static int __maybe_unused dwc2_resume(struct device *dev)
spin_unlock_irqrestore(&dwc2->lock, flags);
}
- /* Need to restore FORCEDEVMODE/FORCEHOSTMODE */
- dwc2_force_dr_mode(dwc2);
-
- dwc2_drd_resume(dwc2);
+ if (!dwc2->role_sw) {
+ /* Need to restore FORCEDEVMODE/FORCEHOSTMODE */
+ dwc2_force_dr_mode(dwc2);
+ } else {
+ dwc2_drd_resume(dwc2);
+ }
if (dwc2_is_device_mode(dwc2))
ret = dwc2_hsotg_resume(dwc2);
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index 5c491d0a19d7..e1cc3f7398fb 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -153,6 +153,7 @@
#define DWC3_DGCMDPAR 0xc710
#define DWC3_DGCMD 0xc714
#define DWC3_DALEPENA 0xc720
+#define DWC3_DCFG1 0xc740 /* DWC_usb32 only */
#define DWC3_DEP_BASE(n) (0xc800 + ((n) * 0x10))
#define DWC3_DEPCMDPAR2 0x00
@@ -382,6 +383,7 @@
/* Global HWPARAMS9 Register */
#define DWC3_GHWPARAMS9_DEV_TXF_FLUSH_BYPASS BIT(0)
+#define DWC3_GHWPARAMS9_DEV_MST BIT(1)
/* Global Frame Length Adjustment Register */
#define DWC3_GFLADJ_30MHZ_SDBND_SEL BIT(7)
@@ -558,6 +560,9 @@
/* The EP number goes 0..31 so ep0 is always out and ep1 is always in */
#define DWC3_DALEPENA_EP(n) BIT(n)
+/* DWC_usb32 DCFG1 config */
+#define DWC3_DCFG1_DIS_MST_ENH BIT(1)
+
#define DWC3_DEPCMD_TYPE_CONTROL 0
#define DWC3_DEPCMD_TYPE_ISOC 1
#define DWC3_DEPCMD_TYPE_BULK 2
@@ -888,6 +893,10 @@ struct dwc3_hwparams {
/* HWPARAMS7 */
#define DWC3_RAM1_DEPTH(n) ((n) & 0xffff)
+/* HWPARAMS9 */
+#define DWC3_MST_CAPABLE(p) (!!((p)->hwparams9 & \
+ DWC3_GHWPARAMS9_DEV_MST))
+
/**
* struct dwc3_request - representation of a transfer request
* @request: struct usb_request to be transferred
diff --git a/drivers/usb/dwc3/dwc3-meson-g12a.c b/drivers/usb/dwc3/dwc3-meson-g12a.c
index d0f9b7c296b0..bd814df3bf8b 100644
--- a/drivers/usb/dwc3/dwc3-meson-g12a.c
+++ b/drivers/usb/dwc3/dwc3-meson-g12a.c
@@ -755,16 +755,16 @@ static int dwc3_meson_g12a_probe(struct platform_device *pdev)
ret = dwc3_meson_g12a_get_phys(priv);
if (ret)
- goto err_disable_clks;
+ goto err_rearm;
ret = priv->drvdata->setup_regmaps(priv, base);
if (ret)
- goto err_disable_clks;
+ goto err_rearm;
if (priv->vbus) {
ret = regulator_enable(priv->vbus);
if (ret)
- goto err_disable_clks;
+ goto err_rearm;
}
/* Get dr_mode */
@@ -825,6 +825,9 @@ err_disable_regulator:
if (priv->vbus)
regulator_disable(priv->vbus);
+err_rearm:
+ reset_control_rearm(priv->reset);
+
err_disable_clks:
clk_bulk_disable_unprepare(priv->drvdata->num_clks,
priv->drvdata->clks);
@@ -852,6 +855,8 @@ static int dwc3_meson_g12a_remove(struct platform_device *pdev)
pm_runtime_put_noidle(dev);
pm_runtime_set_suspended(dev);
+ reset_control_rearm(priv->reset);
+
clk_bulk_disable_unprepare(priv->drvdata->num_clks,
priv->drvdata->clks);
@@ -892,7 +897,7 @@ static int __maybe_unused dwc3_meson_g12a_suspend(struct device *dev)
phy_exit(priv->phys[i]);
}
- reset_control_assert(priv->reset);
+ reset_control_rearm(priv->reset);
return 0;
}
@@ -902,7 +907,9 @@ static int __maybe_unused dwc3_meson_g12a_resume(struct device *dev)
struct dwc3_meson_g12a *priv = dev_get_drvdata(dev);
int i, ret;
- reset_control_deassert(priv->reset);
+ ret = reset_control_reset(priv->reset);
+ if (ret)
+ return ret;
ret = priv->drvdata->usb_init(priv);
if (ret)
diff --git a/drivers/usb/dwc3/dwc3-qcom.c b/drivers/usb/dwc3/dwc3-qcom.c
index 3cb01cdd02c2..6cba990da32e 100644
--- a/drivers/usb/dwc3/dwc3-qcom.c
+++ b/drivers/usb/dwc3/dwc3-qcom.c
@@ -598,8 +598,10 @@ static int dwc3_qcom_acpi_register_core(struct platform_device *pdev)
qcom->dwc3->dev.coherent_dma_mask = dev->coherent_dma_mask;
child_res = kcalloc(2, sizeof(*child_res), GFP_KERNEL);
- if (!child_res)
+ if (!child_res) {
+ platform_device_put(qcom->dwc3);
return -ENOMEM;
+ }
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
@@ -637,9 +639,13 @@ static int dwc3_qcom_acpi_register_core(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev, "failed to add device\n");
device_remove_software_node(&qcom->dwc3->dev);
+ goto out;
}
+ kfree(child_res);
+ return 0;
out:
+ platform_device_put(qcom->dwc3);
kfree(child_res);
return ret;
}
@@ -769,9 +775,12 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
if (qcom->acpi_pdata->is_urs) {
qcom->urs_usb = dwc3_qcom_create_urs_usb_platdev(dev);
- if (!qcom->urs_usb) {
+ if (IS_ERR_OR_NULL(qcom->urs_usb)) {
dev_err(dev, "failed to create URS USB platdev\n");
- return -ENODEV;
+ if (!qcom->urs_usb)
+ return -ENODEV;
+ else
+ return PTR_ERR(qcom->urs_usb);
}
}
}
diff --git a/drivers/usb/dwc3/dwc3-xilinx.c b/drivers/usb/dwc3/dwc3-xilinx.c
index 9cc3ad701a29..a6f3a9b38789 100644
--- a/drivers/usb/dwc3/dwc3-xilinx.c
+++ b/drivers/usb/dwc3/dwc3-xilinx.c
@@ -99,17 +99,29 @@ static int dwc3_xlnx_init_zynqmp(struct dwc3_xlnx *priv_data)
struct device *dev = priv_data->dev;
struct reset_control *crst, *hibrst, *apbrst;
struct phy *usb3_phy;
- int ret;
+ int ret = 0;
u32 reg;
- usb3_phy = devm_phy_get(dev, "usb3-phy");
- if (PTR_ERR(usb3_phy) == -EPROBE_DEFER) {
- ret = -EPROBE_DEFER;
+ usb3_phy = devm_phy_optional_get(dev, "usb3-phy");
+ if (IS_ERR(usb3_phy)) {
+ ret = PTR_ERR(usb3_phy);
+ dev_err_probe(dev, ret,
+ "failed to get USB3 PHY\n");
goto err;
- } else if (IS_ERR(usb3_phy)) {
- usb3_phy = NULL;
}
+ /*
+ * The following core resets are not required unless a USB3 PHY
+ * is used, and the subsequent register settings are not required
+ * unless a core reset is performed (they should be set properly
+ * by the first-stage boot loader, but may be reverted by a core
+ * reset). They may also break the configuration if USB3 is actually
+ * in use but the usb3-phy entry is missing from the device tree.
+ * Therefore, skip these operations in this case.
+ */
+ if (!usb3_phy)
+ goto skip_usb3_phy;
+
crst = devm_reset_control_get_exclusive(dev, "usb_crst");
if (IS_ERR(crst)) {
ret = PTR_ERR(crst);
@@ -188,6 +200,7 @@ static int dwc3_xlnx_init_zynqmp(struct dwc3_xlnx *priv_data)
goto err;
}
+skip_usb3_phy:
/*
* This routes the USB DMA traffic to go through FPD path instead
* of reaching DDR directly. This traffic routing is needed to
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 7e3db00e9759..183b90923f51 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -331,9 +331,17 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned int cmd,
}
}
- dwc3_writel(dep->regs, DWC3_DEPCMDPAR0, params->param0);
- dwc3_writel(dep->regs, DWC3_DEPCMDPAR1, params->param1);
- dwc3_writel(dep->regs, DWC3_DEPCMDPAR2, params->param2);
+ /*
+ * For some commands such as Update Transfer command, DEPCMDPARn
+ * registers are reserved. Since the driver often sends Update Transfer
+ * command, don't write to DEPCMDPARn to avoid register write delays and
+ * improve performance.
+ */
+ if (DWC3_DEPCMD_CMD(cmd) != DWC3_DEPCMD_UPDATETRANSFER) {
+ dwc3_writel(dep->regs, DWC3_DEPCMDPAR0, params->param0);
+ dwc3_writel(dep->regs, DWC3_DEPCMDPAR1, params->param1);
+ dwc3_writel(dep->regs, DWC3_DEPCMDPAR2, params->param2);
+ }
/*
* Synopsys Databook 2.60a states in section 6.3.2.5.6 of that if we're
@@ -357,6 +365,12 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned int cmd,
cmd |= DWC3_DEPCMD_CMDACT;
dwc3_writel(dep->regs, DWC3_DEPCMD, cmd);
+
+ if (!(cmd & DWC3_DEPCMD_CMDACT)) {
+ ret = 0;
+ goto skip_status;
+ }
+
do {
reg = dwc3_readl(dep->regs, DWC3_DEPCMD);
if (!(reg & DWC3_DEPCMD_CMDACT)) {
@@ -398,6 +412,7 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned int cmd,
cmd_status = -ETIMEDOUT;
}
+skip_status:
trace_dwc3_gadget_ep_cmd(dep, cmd, params, cmd_status);
if (DWC3_DEPCMD_CMD(cmd) == DWC3_DEPCMD_STARTTRANSFER) {
@@ -1260,17 +1275,35 @@ static void __dwc3_prepare_one_trb(struct dwc3_ep *dep, struct dwc3_trb *trb,
trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI;
}
+ /* All TRBs setup for MST must set CSP=1 when LST=0 */
+ if (dep->stream_capable && DWC3_MST_CAPABLE(&dwc->hwparams))
+ trb->ctrl |= DWC3_TRB_CTRL_CSP;
+
if ((!no_interrupt && !chain) || must_interrupt)
trb->ctrl |= DWC3_TRB_CTRL_IOC;
if (chain)
trb->ctrl |= DWC3_TRB_CTRL_CHN;
- else if (dep->stream_capable && is_last)
+ else if (dep->stream_capable && is_last &&
+ !DWC3_MST_CAPABLE(&dwc->hwparams))
trb->ctrl |= DWC3_TRB_CTRL_LST;
if (usb_endpoint_xfer_bulk(dep->endpoint.desc) && dep->stream_capable)
trb->ctrl |= DWC3_TRB_CTRL_SID_SOFN(stream_id);
+ /*
+ * As per data book 4.2.3.2TRB Control Bit Rules section
+ *
+ * The controller autonomously checks the HWO field of a TRB to determine if the
+ * entire TRB is valid. Therefore, software must ensure that the rest of the TRB
+ * is valid before setting the HWO field to '1'. In most systems, this means that
+ * software must update the fourth DWORD of a TRB last.
+ *
+ * However there is a possibility of CPU re-ordering here which can cause
+ * controller to observe the HWO bit set prematurely.
+ * Add a write memory barrier to prevent CPU re-ordering.
+ */
+ wmb();
trb->ctrl |= DWC3_TRB_CTRL_HWO;
dwc3_ep_inc_enq(dep);
@@ -1513,7 +1546,8 @@ static int dwc3_prepare_trbs(struct dwc3_ep *dep)
* burst capability may try to read and use TRBs beyond the
* active transfer instead of stopping.
*/
- if (dep->stream_capable && req->request.is_last)
+ if (dep->stream_capable && req->request.is_last &&
+ !DWC3_MST_CAPABLE(&dep->dwc->hwparams))
return ret;
}
@@ -1546,7 +1580,8 @@ static int dwc3_prepare_trbs(struct dwc3_ep *dep)
* burst capability may try to read and use TRBs beyond the
* active transfer instead of stopping.
*/
- if (dep->stream_capable && req->request.is_last)
+ if (dep->stream_capable && req->request.is_last &&
+ !DWC3_MST_CAPABLE(&dwc->hwparams))
return ret;
}
@@ -1623,7 +1658,8 @@ static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep)
return ret;
}
- if (dep->stream_capable && req->request.is_last)
+ if (dep->stream_capable && req->request.is_last &&
+ !DWC3_MST_CAPABLE(&dep->dwc->hwparams))
dep->flags |= DWC3_EP_WAIT_TRANSFER_COMPLETE;
return 0;
@@ -2638,6 +2674,13 @@ static int __dwc3_gadget_start(struct dwc3 *dwc)
reg |= DWC3_DCFG_IGNSTRMPP;
dwc3_writel(dwc->regs, DWC3_DCFG, reg);
+ /* Enable MST by default if the device is capable of MST */
+ if (DWC3_MST_CAPABLE(&dwc->hwparams)) {
+ reg = dwc3_readl(dwc->regs, DWC3_DCFG1);
+ reg &= ~DWC3_DCFG1_DIS_MST_ENH;
+ dwc3_writel(dwc->regs, DWC3_DCFG1, reg);
+ }
+
/* Start with SuperSpeed Default */
dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
@@ -3437,7 +3480,8 @@ static void dwc3_gadget_endpoint_stream_event(struct dwc3_ep *dep,
case DEPEVT_STREAM_NOSTREAM:
if ((dep->flags & DWC3_EP_IGNORE_NEXT_NOSTREAM) ||
!(dep->flags & DWC3_EP_FORCE_RESTART_STREAM) ||
- !(dep->flags & DWC3_EP_WAIT_TRANSFER_COMPLETE))
+ (!DWC3_MST_CAPABLE(&dwc->hwparams) &&
+ !(dep->flags & DWC3_EP_WAIT_TRANSFER_COMPLETE)))
break;
/*
@@ -4067,7 +4111,6 @@ static irqreturn_t dwc3_process_event_buf(struct dwc3_event_buffer *evt)
struct dwc3 *dwc = evt->dwc;
irqreturn_t ret = IRQ_NONE;
int left;
- u32 reg;
left = evt->count;
@@ -4099,9 +4142,8 @@ static irqreturn_t dwc3_process_event_buf(struct dwc3_event_buffer *evt)
ret = IRQ_HANDLED;
/* Unmask interrupt */
- reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(0));
- reg &= ~DWC3_GEVNTSIZ_INTMASK;
- dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), reg);
+ dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0),
+ DWC3_GEVNTSIZ_SIZE(evt->length));
if (dwc->imod_interval) {
dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), DWC3_GEVNTCOUNT_EHB);
@@ -4130,7 +4172,6 @@ static irqreturn_t dwc3_check_event_buf(struct dwc3_event_buffer *evt)
struct dwc3 *dwc = evt->dwc;
u32 amount;
u32 count;
- u32 reg;
if (pm_runtime_suspended(dwc->dev)) {
pm_runtime_get(dwc->dev);
@@ -4157,9 +4198,8 @@ static irqreturn_t dwc3_check_event_buf(struct dwc3_event_buffer *evt)
evt->flags |= DWC3_EVENT_PENDING;
/* Mask interrupt */
- reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(0));
- reg |= DWC3_GEVNTSIZ_INTMASK;
- dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0), reg);
+ dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0),
+ DWC3_GEVNTSIZ_INTMASK | DWC3_GEVNTSIZ_SIZE(evt->length));
amount = min(count, evt->length - evt->lpos);
memcpy(evt->cache + evt->lpos, evt->buf + evt->lpos, amount);
diff --git a/drivers/usb/dwc3/host.c b/drivers/usb/dwc3/host.c
index f29a264635aa..eda871973d6c 100644
--- a/drivers/usb/dwc3/host.c
+++ b/drivers/usb/dwc3/host.c
@@ -8,32 +8,55 @@
*/
#include <linux/acpi.h>
+#include <linux/irq.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include "core.h"
+static void dwc3_host_fill_xhci_irq_res(struct dwc3 *dwc,
+ int irq, char *name)
+{
+ struct platform_device *pdev = to_platform_device(dwc->dev);
+ struct device_node *np = dev_of_node(&pdev->dev);
+
+ dwc->xhci_resources[1].start = irq;
+ dwc->xhci_resources[1].end = irq;
+ dwc->xhci_resources[1].flags = IORESOURCE_IRQ | irq_get_trigger_type(irq);
+ if (!name && np)
+ dwc->xhci_resources[1].name = of_node_full_name(pdev->dev.of_node);
+ else
+ dwc->xhci_resources[1].name = name;
+}
+
static int dwc3_host_get_irq(struct dwc3 *dwc)
{
struct platform_device *dwc3_pdev = to_platform_device(dwc->dev);
int irq;
irq = platform_get_irq_byname_optional(dwc3_pdev, "host");
- if (irq > 0)
+ if (irq > 0) {
+ dwc3_host_fill_xhci_irq_res(dwc, irq, "host");
goto out;
+ }
if (irq == -EPROBE_DEFER)
goto out;
irq = platform_get_irq_byname_optional(dwc3_pdev, "dwc_usb3");
- if (irq > 0)
+ if (irq > 0) {
+ dwc3_host_fill_xhci_irq_res(dwc, irq, "dwc_usb3");
goto out;
+ }
if (irq == -EPROBE_DEFER)
goto out;
irq = platform_get_irq(dwc3_pdev, 0);
- if (irq > 0)
+ if (irq > 0) {
+ dwc3_host_fill_xhci_irq_res(dwc, irq, NULL);
goto out;
+ }
if (!irq)
irq = -EINVAL;
@@ -47,28 +70,12 @@ int dwc3_host_init(struct dwc3 *dwc)
struct property_entry props[4];
struct platform_device *xhci;
int ret, irq;
- struct resource *res;
- struct platform_device *dwc3_pdev = to_platform_device(dwc->dev);
int prop_idx = 0;
irq = dwc3_host_get_irq(dwc);
if (irq < 0)
return irq;
- res = platform_get_resource_byname(dwc3_pdev, IORESOURCE_IRQ, "host");
- if (!res)
- res = platform_get_resource_byname(dwc3_pdev, IORESOURCE_IRQ,
- "dwc_usb3");
- if (!res)
- res = platform_get_resource(dwc3_pdev, IORESOURCE_IRQ, 0);
- if (!res)
- return -ENOMEM;
-
- dwc->xhci_resources[1].start = irq;
- dwc->xhci_resources[1].end = irq;
- dwc->xhci_resources[1].flags = res->flags;
- dwc->xhci_resources[1].name = res->name;
-
xhci = platform_device_alloc("xhci-hcd", PLATFORM_DEVID_AUTO);
if (!xhci) {
dev_err(dwc->dev, "couldn't allocate xHCI device\n");
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index 3789c329183c..9315313108c9 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -159,6 +159,8 @@ int config_ep_by_speed_and_alt(struct usb_gadget *g,
int want_comp_desc = 0;
struct usb_descriptor_header **d_spd; /* cursor for speed desc */
+ struct usb_composite_dev *cdev;
+ bool incomplete_desc = false;
if (!g || !f || !_ep)
return -EIO;
@@ -167,28 +169,43 @@ int config_ep_by_speed_and_alt(struct usb_gadget *g,
switch (g->speed) {
case USB_SPEED_SUPER_PLUS:
if (gadget_is_superspeed_plus(g)) {
- speed_desc = f->ssp_descriptors;
- want_comp_desc = 1;
- break;
+ if (f->ssp_descriptors) {
+ speed_desc = f->ssp_descriptors;
+ want_comp_desc = 1;
+ break;
+ }
+ incomplete_desc = true;
}
fallthrough;
case USB_SPEED_SUPER:
if (gadget_is_superspeed(g)) {
- speed_desc = f->ss_descriptors;
- want_comp_desc = 1;
- break;
+ if (f->ss_descriptors) {
+ speed_desc = f->ss_descriptors;
+ want_comp_desc = 1;
+ break;
+ }
+ incomplete_desc = true;
}
fallthrough;
case USB_SPEED_HIGH:
if (gadget_is_dualspeed(g)) {
- speed_desc = f->hs_descriptors;
- break;
+ if (f->hs_descriptors) {
+ speed_desc = f->hs_descriptors;
+ break;
+ }
+ incomplete_desc = true;
}
fallthrough;
default:
speed_desc = f->fs_descriptors;
}
+ cdev = get_gadget_data(g);
+ if (incomplete_desc)
+ WARNING(cdev,
+ "%s doesn't hold the descriptors for current speed\n",
+ f->name);
+
/* find correct alternate setting descriptor */
for_each_desc(speed_desc, d_spd, USB_DT_INTERFACE) {
int_desc = (struct usb_interface_descriptor *)*d_spd;
@@ -244,12 +261,8 @@ ep_found:
_ep->maxburst = comp_desc->bMaxBurst + 1;
break;
default:
- if (comp_desc->bMaxBurst != 0) {
- struct usb_composite_dev *cdev;
-
- cdev = get_gadget_data(g);
+ if (comp_desc->bMaxBurst != 0)
ERROR(cdev, "ep0 bMaxBurst must be 0\n");
- }
_ep->maxburst = 1;
break;
}
@@ -1975,6 +1988,9 @@ unknown:
if (w_index != 0x5 || (w_value >> 8))
break;
interface = w_value & 0xFF;
+ if (interface >= MAX_CONFIG_INTERFACES ||
+ !os_desc_cfg->interface[interface])
+ break;
buf[6] = w_index;
count = count_ext_prop(os_desc_cfg,
interface);
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index 36c611d1d8d0..d4a678c0806e 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -89,10 +89,6 @@ struct gadget_strings {
struct list_head list;
};
-struct os_desc {
- struct config_group group;
-};
-
struct gadget_config_name {
struct usb_gadget_strings stringtab_dev;
struct usb_string strings;
@@ -420,9 +416,8 @@ static int config_usb_cfg_link(
struct config_usb_cfg *cfg = to_config_usb_cfg(usb_cfg_ci);
struct gadget_info *gi = cfg_to_gadget_info(cfg);
- struct config_group *group = to_config_group(usb_func_ci);
- struct usb_function_instance *fi = container_of(group,
- struct usb_function_instance, group);
+ struct usb_function_instance *fi =
+ to_usb_function_instance(usb_func_ci);
struct usb_function_instance *a_fi;
struct usb_function *f;
int ret;
@@ -470,9 +465,8 @@ static void config_usb_cfg_unlink(
struct config_usb_cfg *cfg = to_config_usb_cfg(usb_cfg_ci);
struct gadget_info *gi = cfg_to_gadget_info(cfg);
- struct config_group *group = to_config_group(usb_func_ci);
- struct usb_function_instance *fi = container_of(group,
- struct usb_function_instance, group);
+ struct usb_function_instance *fi =
+ to_usb_function_instance(usb_func_ci);
struct usb_function *f;
/*
@@ -783,15 +777,11 @@ static void gadget_strings_attr_release(struct config_item *item)
USB_CONFIG_STRING_RW_OPS(gadget_strings);
USB_CONFIG_STRINGS_LANG(gadget_strings, gadget_info);
-static inline struct os_desc *to_os_desc(struct config_item *item)
-{
- return container_of(to_config_group(item), struct os_desc, group);
-}
-
static inline struct gadget_info *os_desc_item_to_gadget_info(
struct config_item *item)
{
- return to_gadget_info(to_os_desc(item)->group.cg_item.ci_parent);
+ return container_of(to_config_group(item),
+ struct gadget_info, os_desc_group);
}
static ssize_t os_desc_use_show(struct config_item *item, char *page)
@@ -886,21 +876,12 @@ static struct configfs_attribute *os_desc_attrs[] = {
NULL,
};
-static void os_desc_attr_release(struct config_item *item)
-{
- struct os_desc *os_desc = to_os_desc(item);
- kfree(os_desc);
-}
-
static int os_desc_link(struct config_item *os_desc_ci,
struct config_item *usb_cfg_ci)
{
- struct gadget_info *gi = container_of(to_config_group(os_desc_ci),
- struct gadget_info, os_desc_group);
+ struct gadget_info *gi = os_desc_item_to_gadget_info(os_desc_ci);
struct usb_composite_dev *cdev = &gi->cdev;
- struct config_usb_cfg *c_target =
- container_of(to_config_group(usb_cfg_ci),
- struct config_usb_cfg, group);
+ struct config_usb_cfg *c_target = to_config_usb_cfg(usb_cfg_ci);
struct usb_configuration *c;
int ret;
@@ -930,8 +911,7 @@ out:
static void os_desc_unlink(struct config_item *os_desc_ci,
struct config_item *usb_cfg_ci)
{
- struct gadget_info *gi = container_of(to_config_group(os_desc_ci),
- struct gadget_info, os_desc_group);
+ struct gadget_info *gi = os_desc_item_to_gadget_info(os_desc_ci);
struct usb_composite_dev *cdev = &gi->cdev;
mutex_lock(&gi->lock);
@@ -943,7 +923,6 @@ static void os_desc_unlink(struct config_item *os_desc_ci,
}
static struct configfs_item_operations os_desc_ops = {
- .release = os_desc_attr_release,
.allow_link = os_desc_link,
.drop_link = os_desc_unlink,
};
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index a7e069b18544..1922fd02043c 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -614,7 +614,7 @@ static int ffs_ep0_open(struct inode *inode, struct file *file)
file->private_data = ffs;
ffs_data_opened(ffs);
- return 0;
+ return stream_open(inode, file);
}
static int ffs_ep0_release(struct inode *inode, struct file *file)
@@ -1154,7 +1154,7 @@ ffs_epfile_open(struct inode *inode, struct file *file)
file->private_data = epfile;
ffs_data_opened(epfile->ffs);
- return 0;
+ return stream_open(inode, file);
}
static int ffs_aio_cancel(struct kiocb *kiocb)
@@ -1711,16 +1711,24 @@ static void ffs_data_put(struct ffs_data *ffs)
static void ffs_data_closed(struct ffs_data *ffs)
{
+ struct ffs_epfile *epfiles;
+ unsigned long flags;
+
ENTER();
if (atomic_dec_and_test(&ffs->opened)) {
if (ffs->no_disconnect) {
ffs->state = FFS_DEACTIVATED;
- if (ffs->epfiles) {
- ffs_epfiles_destroy(ffs->epfiles,
- ffs->eps_count);
- ffs->epfiles = NULL;
- }
+ spin_lock_irqsave(&ffs->eps_lock, flags);
+ epfiles = ffs->epfiles;
+ ffs->epfiles = NULL;
+ spin_unlock_irqrestore(&ffs->eps_lock,
+ flags);
+
+ if (epfiles)
+ ffs_epfiles_destroy(epfiles,
+ ffs->eps_count);
+
if (ffs->setup_state == FFS_SETUP_PENDING)
__ffs_ep0_stall(ffs);
} else {
@@ -1767,14 +1775,27 @@ static struct ffs_data *ffs_data_new(const char *dev_name)
static void ffs_data_clear(struct ffs_data *ffs)
{
+ struct ffs_epfile *epfiles;
+ unsigned long flags;
+
ENTER();
ffs_closed(ffs);
BUG_ON(ffs->gadget);
- if (ffs->epfiles) {
- ffs_epfiles_destroy(ffs->epfiles, ffs->eps_count);
+ spin_lock_irqsave(&ffs->eps_lock, flags);
+ epfiles = ffs->epfiles;
+ ffs->epfiles = NULL;
+ spin_unlock_irqrestore(&ffs->eps_lock, flags);
+
+ /*
+ * potential race possible between ffs_func_eps_disable
+ * & ffs_epfile_release therefore maintaining a local
+ * copy of epfile will save us from use-after-free.
+ */
+ if (epfiles) {
+ ffs_epfiles_destroy(epfiles, ffs->eps_count);
ffs->epfiles = NULL;
}
@@ -1922,12 +1943,15 @@ static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count)
static void ffs_func_eps_disable(struct ffs_function *func)
{
- struct ffs_ep *ep = func->eps;
- struct ffs_epfile *epfile = func->ffs->epfiles;
- unsigned count = func->ffs->eps_count;
+ struct ffs_ep *ep;
+ struct ffs_epfile *epfile;
+ unsigned short count;
unsigned long flags;
spin_lock_irqsave(&func->ffs->eps_lock, flags);
+ count = func->ffs->eps_count;
+ epfile = func->ffs->epfiles;
+ ep = func->eps;
while (count--) {
/* pending requests get nuked */
if (ep->ep)
@@ -1945,14 +1969,18 @@ static void ffs_func_eps_disable(struct ffs_function *func)
static int ffs_func_eps_enable(struct ffs_function *func)
{
- struct ffs_data *ffs = func->ffs;
- struct ffs_ep *ep = func->eps;
- struct ffs_epfile *epfile = ffs->epfiles;
- unsigned count = ffs->eps_count;
+ struct ffs_data *ffs;
+ struct ffs_ep *ep;
+ struct ffs_epfile *epfile;
+ unsigned short count;
unsigned long flags;
int ret = 0;
spin_lock_irqsave(&func->ffs->eps_lock, flags);
+ ffs = func->ffs;
+ ep = func->eps;
+ epfile = ffs->epfiles;
+ count = ffs->eps_count;
while(count--) {
ep->ep->driver_data = ep;
diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c
index 752439690fda..46dd11dcb3a8 100644
--- a/drivers/usb/gadget/function/f_mass_storage.c
+++ b/drivers/usb/gadget/function/f_mass_storage.c
@@ -2547,7 +2547,7 @@ static int fsg_main_thread(void *common_)
up_write(&common->filesem);
/* Let fsg_unbind() know the thread has exited */
- complete_and_exit(&common->thread_notifier, 0);
+ kthread_complete_and_exit(&common->thread_notifier, 0);
}
diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c
index 71a1a26e85c7..fddf539008a9 100644
--- a/drivers/usb/gadget/function/f_midi.c
+++ b/drivers/usb/gadget/function/f_midi.c
@@ -1097,7 +1097,7 @@ static ssize_t f_midi_opts_##name##_show(struct config_item *item, char *page) \
int result; \
\
mutex_lock(&opts->lock); \
- result = sprintf(page, "%d\n", opts->name); \
+ result = sprintf(page, "%u\n", opts->name); \
mutex_unlock(&opts->lock); \
\
return result; \
@@ -1134,7 +1134,51 @@ end: \
\
CONFIGFS_ATTR(f_midi_opts_, name);
-F_MIDI_OPT(index, true, SNDRV_CARDS);
+#define F_MIDI_OPT_SIGNED(name, test_limit, limit) \
+static ssize_t f_midi_opts_##name##_show(struct config_item *item, char *page) \
+{ \
+ struct f_midi_opts *opts = to_f_midi_opts(item); \
+ int result; \
+ \
+ mutex_lock(&opts->lock); \
+ result = sprintf(page, "%d\n", opts->name); \
+ mutex_unlock(&opts->lock); \
+ \
+ return result; \
+} \
+ \
+static ssize_t f_midi_opts_##name##_store(struct config_item *item, \
+ const char *page, size_t len) \
+{ \
+ struct f_midi_opts *opts = to_f_midi_opts(item); \
+ int ret; \
+ s32 num; \
+ \
+ mutex_lock(&opts->lock); \
+ if (opts->refcnt > 1) { \
+ ret = -EBUSY; \
+ goto end; \
+ } \
+ \
+ ret = kstrtos32(page, 0, &num); \
+ if (ret) \
+ goto end; \
+ \
+ if (test_limit && num > limit) { \
+ ret = -EINVAL; \
+ goto end; \
+ } \
+ opts->name = num; \
+ ret = len; \
+ \
+end: \
+ mutex_unlock(&opts->lock); \
+ return ret; \
+} \
+ \
+CONFIGFS_ATTR(f_midi_opts_, name);
+
+F_MIDI_OPT_SIGNED(index, true, SNDRV_CARDS);
F_MIDI_OPT(buflen, false, 0);
F_MIDI_OPT(qlen, false, 0);
F_MIDI_OPT(in_ports, true, MAX_PORTS);
diff --git a/drivers/usb/gadget/function/f_sourcesink.c b/drivers/usb/gadget/function/f_sourcesink.c
index 1abf08e5164a..6803cd60cc6d 100644
--- a/drivers/usb/gadget/function/f_sourcesink.c
+++ b/drivers/usb/gadget/function/f_sourcesink.c
@@ -584,6 +584,7 @@ static int source_sink_start_ep(struct f_sourcesink *ss, bool is_in,
if (is_iso) {
switch (speed) {
+ case USB_SPEED_SUPER_PLUS:
case USB_SPEED_SUPER:
size = ss->isoc_maxpacket *
(ss->isoc_mult + 1) *
diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
index 36fa6ef0581b..097a709549d6 100644
--- a/drivers/usb/gadget/function/f_uac2.c
+++ b/drivers/usb/gadget/function/f_uac2.c
@@ -203,7 +203,7 @@ static struct uac2_input_terminal_descriptor io_in_it_desc = {
.bDescriptorSubtype = UAC_INPUT_TERMINAL,
/* .bTerminalID = DYNAMIC */
- .wTerminalType = cpu_to_le16(UAC_INPUT_TERMINAL_UNDEFINED),
+ .wTerminalType = cpu_to_le16(UAC_INPUT_TERMINAL_MICROPHONE),
.bAssocTerminal = 0,
/* .bCSourceID = DYNAMIC */
.iChannelNames = 0,
@@ -231,7 +231,7 @@ static struct uac2_output_terminal_descriptor io_out_ot_desc = {
.bDescriptorSubtype = UAC_OUTPUT_TERMINAL,
/* .bTerminalID = DYNAMIC */
- .wTerminalType = cpu_to_le16(UAC_OUTPUT_TERMINAL_UNDEFINED),
+ .wTerminalType = cpu_to_le16(UAC_OUTPUT_TERMINAL_SPEAKER),
.bAssocTerminal = 0,
/* .bSourceID = DYNAMIC */
/* .bCSourceID = DYNAMIC */
diff --git a/drivers/usb/gadget/function/rndis.c b/drivers/usb/gadget/function/rndis.c
index 64de9f1b874c..b7ccf1803656 100644
--- a/drivers/usb/gadget/function/rndis.c
+++ b/drivers/usb/gadget/function/rndis.c
@@ -637,14 +637,17 @@ static int rndis_set_response(struct rndis_params *params,
rndis_set_cmplt_type *resp;
rndis_resp_t *r;
+ BufLength = le32_to_cpu(buf->InformationBufferLength);
+ BufOffset = le32_to_cpu(buf->InformationBufferOffset);
+ if ((BufLength > RNDIS_MAX_TOTAL_SIZE) ||
+ (BufOffset + 8 >= RNDIS_MAX_TOTAL_SIZE))
+ return -EINVAL;
+
r = rndis_add_response(params, sizeof(rndis_set_cmplt_type));
if (!r)
return -ENOMEM;
resp = (rndis_set_cmplt_type *)r->buf;
- BufLength = le32_to_cpu(buf->InformationBufferLength);
- BufOffset = le32_to_cpu(buf->InformationBufferOffset);
-
#ifdef VERBOSE_DEBUG
pr_debug("%s: Length: %d\n", __func__, BufLength);
pr_debug("%s: Offset: %d\n", __func__, BufOffset);
@@ -1117,7 +1120,7 @@ static int rndis_proc_show(struct seq_file *m, void *v)
static ssize_t rndis_proc_write(struct file *file, const char __user *buffer,
size_t count, loff_t *ppos)
{
- rndis_params *p = PDE_DATA(file_inode(file));
+ rndis_params *p = pde_data(file_inode(file));
u32 speed = 0;
int i, fl_speed = 0;
@@ -1161,7 +1164,7 @@ static ssize_t rndis_proc_write(struct file *file, const char __user *buffer,
static int rndis_proc_open(struct inode *inode, struct file *file)
{
- return single_open(file, rndis_proc_show, PDE_DATA(inode));
+ return single_open(file, rndis_proc_show, pde_data(inode));
}
static const struct proc_ops rndis_proc_ops = {
diff --git a/drivers/usb/gadget/function/u_audio.c b/drivers/usb/gadget/function/u_audio.c
index c46400be5464..4561d7a183ff 100644
--- a/drivers/usb/gadget/function/u_audio.c
+++ b/drivers/usb/gadget/function/u_audio.c
@@ -76,8 +76,8 @@ struct snd_uac_chip {
struct snd_pcm *pcm;
/* pre-calculated values for playback iso completion */
- unsigned long long p_interval_mil;
unsigned long long p_residue_mil;
+ unsigned int p_interval;
unsigned int p_framesize;
};
@@ -194,21 +194,24 @@ static void u_audio_iso_complete(struct usb_ep *ep, struct usb_request *req)
* If there is a residue from this division, add it to the
* residue accumulator.
*/
+ unsigned long long p_interval_mil = uac->p_interval * 1000000ULL;
+
pitched_rate_mil = (unsigned long long)
params->p_srate * prm->pitch;
div_result = pitched_rate_mil;
- do_div(div_result, uac->p_interval_mil);
+ do_div(div_result, uac->p_interval);
+ do_div(div_result, 1000000);
frames = (unsigned int) div_result;
pr_debug("p_srate %d, pitch %d, interval_mil %llu, frames %d\n",
- params->p_srate, prm->pitch, uac->p_interval_mil, frames);
+ params->p_srate, prm->pitch, p_interval_mil, frames);
p_pktsize = min_t(unsigned int,
uac->p_framesize * frames,
ep->maxpacket);
if (p_pktsize < ep->maxpacket) {
- residue_frames_mil = pitched_rate_mil - frames * uac->p_interval_mil;
+ residue_frames_mil = pitched_rate_mil - frames * p_interval_mil;
p_pktsize_residue_mil = uac->p_framesize * residue_frames_mil;
} else
p_pktsize_residue_mil = 0;
@@ -222,11 +225,11 @@ static void u_audio_iso_complete(struct usb_ep *ep, struct usb_request *req)
* size and decrease the accumulator.
*/
div_result = uac->p_residue_mil;
- do_div(div_result, uac->p_interval_mil);
+ do_div(div_result, uac->p_interval);
+ do_div(div_result, 1000000);
if ((unsigned int) div_result >= uac->p_framesize) {
req->length += uac->p_framesize;
- uac->p_residue_mil -= uac->p_framesize *
- uac->p_interval_mil;
+ uac->p_residue_mil -= uac->p_framesize * p_interval_mil;
pr_debug("increased req length to %d\n", req->length);
}
pr_debug("remains uac->p_residue_mil %llu\n", uac->p_residue_mil);
@@ -591,7 +594,7 @@ int u_audio_start_playback(struct g_audio *audio_dev)
unsigned int factor;
const struct usb_endpoint_descriptor *ep_desc;
int req_len, i;
- unsigned int p_interval, p_pktsize;
+ unsigned int p_pktsize;
ep = audio_dev->in_ep;
prm = &uac->p_prm;
@@ -612,11 +615,10 @@ int u_audio_start_playback(struct g_audio *audio_dev)
/* pre-compute some values for iso_complete() */
uac->p_framesize = params->p_ssize *
num_channels(params->p_chmask);
- p_interval = factor / (1 << (ep_desc->bInterval - 1));
- uac->p_interval_mil = (unsigned long long) p_interval * 1000000;
+ uac->p_interval = factor / (1 << (ep_desc->bInterval - 1));
p_pktsize = min_t(unsigned int,
uac->p_framesize *
- (params->p_srate / p_interval),
+ (params->p_srate / uac->p_interval),
ep->maxpacket);
req_len = p_pktsize;
@@ -1145,7 +1147,7 @@ int g_audio_setup(struct g_audio *g_audio, const char *pcm_name,
}
kctl->id.device = pcm->device;
- kctl->id.subdevice = i;
+ kctl->id.subdevice = 0;
err = snd_ctl_add(card, kctl);
if (err < 0)
@@ -1168,7 +1170,7 @@ int g_audio_setup(struct g_audio *g_audio, const char *pcm_name,
}
kctl->id.device = pcm->device;
- kctl->id.subdevice = i;
+ kctl->id.subdevice = 0;
kctl->tlv.c = u_audio_volume_tlv;
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index 3b58f4fc0a80..51f9d96827b1 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -1242,7 +1242,7 @@ out:
return mask;
}
-static long dev_ioctl (struct file *fd, unsigned code, unsigned long value)
+static long gadget_dev_ioctl (struct file *fd, unsigned code, unsigned long value)
{
struct dev_data *dev = fd->private_data;
struct usb_gadget *gadget = dev->gadget;
@@ -1826,8 +1826,9 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
spin_lock_irq (&dev->lock);
value = -EINVAL;
if (dev->buf) {
+ spin_unlock_irq(&dev->lock);
kfree(kbuf);
- goto fail;
+ return value;
}
dev->buf = kbuf;
@@ -1874,8 +1875,8 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
value = usb_gadget_probe_driver(&gadgetfs_driver);
if (value != 0) {
- kfree (dev->buf);
- dev->buf = NULL;
+ spin_lock_irq(&dev->lock);
+ goto fail;
} else {
/* at this point "good" hardware has for the first time
* let the USB the host see us. alternatively, if users
@@ -1892,6 +1893,9 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
return value;
fail:
+ dev->config = NULL;
+ dev->hs_config = NULL;
+ dev->dev = NULL;
spin_unlock_irq (&dev->lock);
pr_debug ("%s: %s fail %zd, %p\n", shortname, __func__, value, dev);
kfree (dev->buf);
@@ -1900,7 +1904,7 @@ fail:
}
static int
-dev_open (struct inode *inode, struct file *fd)
+gadget_dev_open (struct inode *inode, struct file *fd)
{
struct dev_data *dev = inode->i_private;
int value = -EBUSY;
@@ -1920,12 +1924,12 @@ dev_open (struct inode *inode, struct file *fd)
static const struct file_operations ep0_operations = {
.llseek = no_llseek,
- .open = dev_open,
+ .open = gadget_dev_open,
.read = ep0_read,
.write = dev_config,
.fasync = ep0_fasync,
.poll = ep0_poll,
- .unlocked_ioctl = dev_ioctl,
+ .unlocked_ioctl = gadget_dev_ioctl,
.release = dev_release,
};
diff --git a/drivers/usb/gadget/legacy/raw_gadget.c b/drivers/usb/gadget/legacy/raw_gadget.c
index c5a2c734234a..d86c3a36441e 100644
--- a/drivers/usb/gadget/legacy/raw_gadget.c
+++ b/drivers/usb/gadget/legacy/raw_gadget.c
@@ -1004,7 +1004,7 @@ static int raw_process_ep_io(struct raw_dev *dev, struct usb_raw_ep_io *io,
ret = -EBUSY;
goto out_unlock;
}
- if ((in && !ep->ep->caps.dir_in) || (!in && ep->ep->caps.dir_in)) {
+ if (in != usb_endpoint_dir_in(ep->ep->desc)) {
dev_dbg(&dev->gadget->dev, "fail, wrong direction\n");
ret = -EINVAL;
goto out_unlock;
diff --git a/drivers/usb/gadget/udc/aspeed-vhub/dev.c b/drivers/usb/gadget/udc/aspeed-vhub/dev.c
index d918e8b2af3c..b0dfca43fbdc 100644
--- a/drivers/usb/gadget/udc/aspeed-vhub/dev.c
+++ b/drivers/usb/gadget/udc/aspeed-vhub/dev.c
@@ -110,15 +110,26 @@ static int ast_vhub_dev_feature(struct ast_vhub_dev *d,
u16 wIndex, u16 wValue,
bool is_set)
{
+ u32 val;
+
DDBG(d, "%s_FEATURE(dev val=%02x)\n",
is_set ? "SET" : "CLEAR", wValue);
- if (wValue != USB_DEVICE_REMOTE_WAKEUP)
- return std_req_driver;
+ if (wValue == USB_DEVICE_REMOTE_WAKEUP) {
+ d->wakeup_en = is_set;
+ return std_req_complete;
+ }
- d->wakeup_en = is_set;
+ if (wValue == USB_DEVICE_TEST_MODE) {
+ val = readl(d->vhub->regs + AST_VHUB_CTRL);
+ val &= ~GENMASK(10, 8);
+ val |= VHUB_CTRL_SET_TEST_MODE((wIndex >> 8) & 0x7);
+ writel(val, d->vhub->regs + AST_VHUB_CTRL);
- return std_req_complete;
+ return std_req_complete;
+ }
+
+ return std_req_driver;
}
static int ast_vhub_ep_feature(struct ast_vhub_dev *d,
diff --git a/drivers/usb/gadget/udc/aspeed-vhub/ep0.c b/drivers/usb/gadget/udc/aspeed-vhub/ep0.c
index 74ea36c19b1e..b4cf46249fea 100644
--- a/drivers/usb/gadget/udc/aspeed-vhub/ep0.c
+++ b/drivers/usb/gadget/udc/aspeed-vhub/ep0.c
@@ -251,6 +251,13 @@ static void ast_vhub_ep0_do_receive(struct ast_vhub_ep *ep, struct ast_vhub_req
len = remain;
rc = -EOVERFLOW;
}
+
+ /* Hardware return wrong data len */
+ if (len < ep->ep.maxpacket && len != remain) {
+ EPDBG(ep, "using expected data len instead\n");
+ len = remain;
+ }
+
if (len && req->req.buf)
memcpy(req->req.buf + req->req.actual, ep->buf, len);
req->req.actual += len;
diff --git a/drivers/usb/gadget/udc/aspeed-vhub/hub.c b/drivers/usb/gadget/udc/aspeed-vhub/hub.c
index b9960fdd8a51..65cd4e46f031 100644
--- a/drivers/usb/gadget/udc/aspeed-vhub/hub.c
+++ b/drivers/usb/gadget/udc/aspeed-vhub/hub.c
@@ -68,6 +68,18 @@ static const struct usb_device_descriptor ast_vhub_dev_desc = {
.bNumConfigurations = 1,
};
+static const struct usb_qualifier_descriptor ast_vhub_qual_desc = {
+ .bLength = 0xA,
+ .bDescriptorType = USB_DT_DEVICE_QUALIFIER,
+ .bcdUSB = cpu_to_le16(0x0200),
+ .bDeviceClass = USB_CLASS_HUB,
+ .bDeviceSubClass = 0,
+ .bDeviceProtocol = 0,
+ .bMaxPacketSize0 = 64,
+ .bNumConfigurations = 1,
+ .bRESERVED = 0,
+};
+
/*
* Configuration descriptor: same comments as above
* regarding handling USB1 mode.
@@ -200,17 +212,28 @@ static int ast_vhub_hub_dev_feature(struct ast_vhub_ep *ep,
u16 wIndex, u16 wValue,
bool is_set)
{
+ u32 val;
+
EPDBG(ep, "%s_FEATURE(dev val=%02x)\n",
is_set ? "SET" : "CLEAR", wValue);
- if (wValue != USB_DEVICE_REMOTE_WAKEUP)
- return std_req_stall;
+ if (wValue == USB_DEVICE_REMOTE_WAKEUP) {
+ ep->vhub->wakeup_en = is_set;
+ EPDBG(ep, "Hub remote wakeup %s\n",
+ is_set ? "enabled" : "disabled");
+ return std_req_complete;
+ }
- ep->vhub->wakeup_en = is_set;
- EPDBG(ep, "Hub remote wakeup %s\n",
- is_set ? "enabled" : "disabled");
+ if (wValue == USB_DEVICE_TEST_MODE) {
+ val = readl(ep->vhub->regs + AST_VHUB_CTRL);
+ val &= ~GENMASK(10, 8);
+ val |= VHUB_CTRL_SET_TEST_MODE((wIndex >> 8) & 0x7);
+ writel(val, ep->vhub->regs + AST_VHUB_CTRL);
- return std_req_complete;
+ return std_req_complete;
+ }
+
+ return std_req_stall;
}
static int ast_vhub_hub_ep_feature(struct ast_vhub_ep *ep,
@@ -271,9 +294,11 @@ static int ast_vhub_rep_desc(struct ast_vhub_ep *ep,
BUILD_BUG_ON(dsize > sizeof(vhub->vhub_dev_desc));
BUILD_BUG_ON(USB_DT_DEVICE_SIZE >= AST_VHUB_EP0_MAX_PACKET);
break;
+ case USB_DT_OTHER_SPEED_CONFIG:
case USB_DT_CONFIG:
dsize = AST_VHUB_CONF_DESC_SIZE;
memcpy(ep->buf, &vhub->vhub_conf_desc, dsize);
+ ((u8 *)ep->buf)[1] = desc_type;
BUILD_BUG_ON(dsize > sizeof(vhub->vhub_conf_desc));
BUILD_BUG_ON(AST_VHUB_CONF_DESC_SIZE >= AST_VHUB_EP0_MAX_PACKET);
break;
@@ -283,6 +308,10 @@ static int ast_vhub_rep_desc(struct ast_vhub_ep *ep,
BUILD_BUG_ON(dsize > sizeof(vhub->vhub_hub_desc));
BUILD_BUG_ON(AST_VHUB_HUB_DESC_SIZE >= AST_VHUB_EP0_MAX_PACKET);
break;
+ case USB_DT_DEVICE_QUALIFIER:
+ dsize = sizeof(vhub->vhub_qual_desc);
+ memcpy(ep->buf, &vhub->vhub_qual_desc, dsize);
+ break;
default:
return std_req_stall;
}
@@ -428,6 +457,8 @@ enum std_req_rc ast_vhub_std_hub_request(struct ast_vhub_ep *ep,
switch (wValue >> 8) {
case USB_DT_DEVICE:
case USB_DT_CONFIG:
+ case USB_DT_DEVICE_QUALIFIER:
+ case USB_DT_OTHER_SPEED_CONFIG:
return ast_vhub_rep_desc(ep, wValue >> 8,
wLength);
case USB_DT_STRING:
@@ -1033,6 +1064,10 @@ static int ast_vhub_init_desc(struct ast_vhub *vhub)
else
ret = ast_vhub_str_alloc_add(vhub, &ast_vhub_strings);
+ /* Initialize vhub Qualifier Descriptor. */
+ memcpy(&vhub->vhub_qual_desc, &ast_vhub_qual_desc,
+ sizeof(vhub->vhub_qual_desc));
+
return ret;
}
diff --git a/drivers/usb/gadget/udc/aspeed-vhub/vhub.h b/drivers/usb/gadget/udc/aspeed-vhub/vhub.h
index 87a5dea12d3c..6b9dfa6e10eb 100644
--- a/drivers/usb/gadget/udc/aspeed-vhub/vhub.h
+++ b/drivers/usb/gadget/udc/aspeed-vhub/vhub.h
@@ -425,6 +425,7 @@ struct ast_vhub {
struct ast_vhub_full_cdesc vhub_conf_desc;
struct usb_hub_descriptor vhub_hub_desc;
struct list_head vhub_str_desc;
+ struct usb_qualifier_descriptor vhub_qual_desc;
};
/* Standard request handlers result codes */
diff --git a/drivers/usb/gadget/udc/at91_udc.c b/drivers/usb/gadget/udc/at91_udc.c
index d9ad9adf7348..9040a0561466 100644
--- a/drivers/usb/gadget/udc/at91_udc.c
+++ b/drivers/usb/gadget/udc/at91_udc.c
@@ -25,7 +25,7 @@
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/platform_data/atmel.h>
#include <linux/regmap.h>
#include <linux/mfd/syscon.h>
@@ -1510,7 +1510,6 @@ static irqreturn_t at91_udc_irq (int irq, void *_udc)
static void at91_vbus_update(struct at91_udc *udc, unsigned value)
{
- value ^= udc->board.vbus_active_low;
if (value != udc->vbus)
at91_vbus_session(&udc->gadget, value);
}
@@ -1521,7 +1520,7 @@ static irqreturn_t at91_vbus_irq(int irq, void *_udc)
/* vbus needs at least brief debouncing */
udelay(10);
- at91_vbus_update(udc, gpio_get_value(udc->board.vbus_pin));
+ at91_vbus_update(udc, gpiod_get_value(udc->board.vbus_pin));
return IRQ_HANDLED;
}
@@ -1531,7 +1530,7 @@ static void at91_vbus_timer_work(struct work_struct *work)
struct at91_udc *udc = container_of(work, struct at91_udc,
vbus_timer_work);
- at91_vbus_update(udc, gpio_get_value_cansleep(udc->board.vbus_pin));
+ at91_vbus_update(udc, gpiod_get_value_cansleep(udc->board.vbus_pin));
if (!timer_pending(&udc->vbus_timer))
mod_timer(&udc->vbus_timer, jiffies + VBUS_POLL_TIMEOUT);
@@ -1595,7 +1594,6 @@ static void at91udc_shutdown(struct platform_device *dev)
static int at91rm9200_udc_init(struct at91_udc *udc)
{
struct at91_ep *ep;
- int ret;
int i;
for (i = 0; i < NUM_ENDPOINTS; i++) {
@@ -1615,32 +1613,23 @@ static int at91rm9200_udc_init(struct at91_udc *udc)
}
}
- if (!gpio_is_valid(udc->board.pullup_pin)) {
+ if (!udc->board.pullup_pin) {
DBG("no D+ pullup?\n");
return -ENODEV;
}
- ret = devm_gpio_request(&udc->pdev->dev, udc->board.pullup_pin,
- "udc_pullup");
- if (ret) {
- DBG("D+ pullup is busy\n");
- return ret;
- }
-
- gpio_direction_output(udc->board.pullup_pin,
- udc->board.pullup_active_low);
+ gpiod_direction_output(udc->board.pullup_pin,
+ gpiod_is_active_low(udc->board.pullup_pin));
return 0;
}
static void at91rm9200_udc_pullup(struct at91_udc *udc, int is_on)
{
- int active = !udc->board.pullup_active_low;
-
if (is_on)
- gpio_set_value(udc->board.pullup_pin, active);
+ gpiod_set_value(udc->board.pullup_pin, 1);
else
- gpio_set_value(udc->board.pullup_pin, !active);
+ gpiod_set_value(udc->board.pullup_pin, 0);
}
static const struct at91_udc_caps at91rm9200_udc_caps = {
@@ -1783,20 +1772,20 @@ static void at91udc_of_init(struct at91_udc *udc, struct device_node *np)
{
struct at91_udc_data *board = &udc->board;
const struct of_device_id *match;
- enum of_gpio_flags flags;
u32 val;
if (of_property_read_u32(np, "atmel,vbus-polled", &val) == 0)
board->vbus_polled = 1;
- board->vbus_pin = of_get_named_gpio_flags(np, "atmel,vbus-gpio", 0,
- &flags);
- board->vbus_active_low = (flags & OF_GPIO_ACTIVE_LOW) ? 1 : 0;
+ board->vbus_pin = gpiod_get_from_of_node(np, "atmel,vbus-gpio", 0,
+ GPIOD_IN, "udc_vbus");
+ if (IS_ERR(board->vbus_pin))
+ board->vbus_pin = NULL;
- board->pullup_pin = of_get_named_gpio_flags(np, "atmel,pullup-gpio", 0,
- &flags);
-
- board->pullup_active_low = (flags & OF_GPIO_ACTIVE_LOW) ? 1 : 0;
+ board->pullup_pin = gpiod_get_from_of_node(np, "atmel,pullup-gpio", 0,
+ GPIOD_ASIS, "udc_pullup");
+ if (IS_ERR(board->pullup_pin))
+ board->pullup_pin = NULL;
match = of_match_node(at91_udc_dt_ids, np);
if (match)
@@ -1886,22 +1875,14 @@ static int at91udc_probe(struct platform_device *pdev)
goto err_unprepare_iclk;
}
- if (gpio_is_valid(udc->board.vbus_pin)) {
- retval = devm_gpio_request(dev, udc->board.vbus_pin,
- "udc_vbus");
- if (retval) {
- DBG("request vbus pin failed\n");
- goto err_unprepare_iclk;
- }
-
- gpio_direction_input(udc->board.vbus_pin);
+ if (udc->board.vbus_pin) {
+ gpiod_direction_input(udc->board.vbus_pin);
/*
* Get the initial state of VBUS - we cannot expect
* a pending interrupt.
*/
- udc->vbus = gpio_get_value_cansleep(udc->board.vbus_pin) ^
- udc->board.vbus_active_low;
+ udc->vbus = gpiod_get_value_cansleep(udc->board.vbus_pin);
if (udc->board.vbus_polled) {
INIT_WORK(&udc->vbus_timer_work, at91_vbus_timer_work);
@@ -1910,11 +1891,11 @@ static int at91udc_probe(struct platform_device *pdev)
jiffies + VBUS_POLL_TIMEOUT);
} else {
retval = devm_request_irq(dev,
- gpio_to_irq(udc->board.vbus_pin),
+ gpiod_to_irq(udc->board.vbus_pin),
at91_vbus_irq, 0, driver_name, udc);
if (retval) {
DBG("request vbus irq %d failed\n",
- udc->board.vbus_pin);
+ desc_to_gpio(udc->board.vbus_pin));
goto err_unprepare_iclk;
}
}
@@ -1988,8 +1969,8 @@ static int at91udc_suspend(struct platform_device *pdev, pm_message_t mesg)
enable_irq_wake(udc->udp_irq);
udc->active_suspend = wake;
- if (gpio_is_valid(udc->board.vbus_pin) && !udc->board.vbus_polled && wake)
- enable_irq_wake(udc->board.vbus_pin);
+ if (udc->board.vbus_pin && !udc->board.vbus_polled && wake)
+ enable_irq_wake(gpiod_to_irq(udc->board.vbus_pin));
return 0;
}
@@ -1998,9 +1979,9 @@ static int at91udc_resume(struct platform_device *pdev)
struct at91_udc *udc = platform_get_drvdata(pdev);
unsigned long flags;
- if (gpio_is_valid(udc->board.vbus_pin) && !udc->board.vbus_polled &&
+ if (udc->board.vbus_pin && !udc->board.vbus_polled &&
udc->active_suspend)
- disable_irq_wake(udc->board.vbus_pin);
+ disable_irq_wake(gpiod_to_irq(udc->board.vbus_pin));
/* maybe reconnect to host; if so, clocks on */
if (udc->active_suspend)
diff --git a/drivers/usb/gadget/udc/at91_udc.h b/drivers/usb/gadget/udc/at91_udc.h
index fd58c5b81826..28c1042f8623 100644
--- a/drivers/usb/gadget/udc/at91_udc.h
+++ b/drivers/usb/gadget/udc/at91_udc.h
@@ -109,11 +109,9 @@ struct at91_udc_caps {
};
struct at91_udc_data {
- int vbus_pin; /* high == host powering us */
- u8 vbus_active_low; /* vbus polarity */
- u8 vbus_polled; /* Use polling, not interrupt */
- int pullup_pin; /* active == D+ pulled up */
- u8 pullup_active_low; /* true == pullup_pin is active low */
+ struct gpio_desc *vbus_pin; /* high == host powering us */
+ u8 vbus_polled; /* Use polling, not interrupt */
+ struct gpio_desc *pullup_pin; /* active == D+ pulled up */
};
/*
diff --git a/drivers/usb/gadget/udc/bcm63xx_udc.c b/drivers/usb/gadget/udc/bcm63xx_udc.c
index a9f07c59fc37..2cdb07905bde 100644
--- a/drivers/usb/gadget/udc/bcm63xx_udc.c
+++ b/drivers/usb/gadget/udc/bcm63xx_udc.c
@@ -2321,8 +2321,10 @@ static int bcm63xx_udc_probe(struct platform_device *pdev)
/* IRQ resource #0: control interrupt (VBUS, speed, etc.) */
irq = platform_get_irq(pdev, 0);
- if (irq < 0)
+ if (irq < 0) {
+ rc = irq;
goto out_uninit;
+ }
if (devm_request_irq(dev, irq, &bcm63xx_udc_ctrl_isr, 0,
dev_name(dev), udc) < 0)
goto report_request_failure;
@@ -2330,8 +2332,10 @@ static int bcm63xx_udc_probe(struct platform_device *pdev)
/* IRQ resources #1-6: data interrupts for IUDMA channels 0-5 */
for (i = 0; i < BCM63XX_NUM_IUDMA; i++) {
irq = platform_get_irq(pdev, i + 1);
- if (irq < 0)
+ if (irq < 0) {
+ rc = irq;
goto out_uninit;
+ }
if (devm_request_irq(dev, irq, &bcm63xx_udc_data_isr, 0,
dev_name(dev), &udc->iudma[i]) < 0)
goto report_request_failure;
diff --git a/drivers/usb/gadget/udc/bdc/bdc_core.c b/drivers/usb/gadget/udc/bdc/bdc_core.c
index fa1a3908ec3b..9849e0c86e23 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_core.c
+++ b/drivers/usb/gadget/udc/bdc/bdc_core.c
@@ -623,6 +623,7 @@ static int bdc_resume(struct device *dev)
ret = bdc_reinit(bdc);
if (ret) {
dev_err(bdc->dev, "err in bdc reinit\n");
+ clk_disable_unprepare(bdc->clk);
return ret;
}
diff --git a/drivers/usb/gadget/udc/mv_udc_core.c b/drivers/usb/gadget/udc/mv_udc_core.c
index 7f24ce400b59..b6d34dda028b 100644
--- a/drivers/usb/gadget/udc/mv_udc_core.c
+++ b/drivers/usb/gadget/udc/mv_udc_core.c
@@ -2084,10 +2084,8 @@ static int mv_udc_remove(struct platform_device *pdev)
usb_del_gadget_udc(&udc->gadget);
- if (udc->qwork) {
- flush_workqueue(udc->qwork);
+ if (udc->qwork)
destroy_workqueue(udc->qwork);
- }
/* free memory allocated in probe */
dma_pool_destroy(udc->dtd_pool);
diff --git a/drivers/usb/gadget/udc/pxa25x_udc.c b/drivers/usb/gadget/udc/pxa25x_udc.c
index 52cdfd8212d6..b38747fd3bb0 100644
--- a/drivers/usb/gadget/udc/pxa25x_udc.c
+++ b/drivers/usb/gadget/udc/pxa25x_udc.c
@@ -2364,7 +2364,7 @@ static int pxa25x_udc_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0)
- return -ENODEV;
+ return irq;
dev->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dev->regs))
diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
index 57d417a7c3e0..601829a6b4ba 100644
--- a/drivers/usb/gadget/udc/renesas_usb3.c
+++ b/drivers/usb/gadget/udc/renesas_usb3.c
@@ -2378,6 +2378,8 @@ static void handle_ext_role_switch_states(struct device *dev,
switch (role) {
case USB_ROLE_NONE:
usb3->connection_state = USB_ROLE_NONE;
+ if (cur_role == USB_ROLE_HOST)
+ device_release_driver(host);
if (usb3->driver)
usb3_disconnect(usb3);
usb3_vbus_out(usb3, false);
diff --git a/drivers/usb/gadget/udc/udc-xilinx.c b/drivers/usb/gadget/udc/udc-xilinx.c
index 857159dd5ae0..6ce886fb7bfe 100644
--- a/drivers/usb/gadget/udc/udc-xilinx.c
+++ b/drivers/usb/gadget/udc/udc-xilinx.c
@@ -2179,6 +2179,61 @@ static int xudc_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static int xudc_suspend(struct device *dev)
+{
+ struct xusb_udc *udc;
+ u32 crtlreg;
+ unsigned long flags;
+
+ udc = dev_get_drvdata(dev);
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ crtlreg = udc->read_fn(udc->addr + XUSB_CONTROL_OFFSET);
+ crtlreg &= ~XUSB_CONTROL_USB_READY_MASK;
+
+ udc->write_fn(udc->addr, XUSB_CONTROL_OFFSET, crtlreg);
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+ if (udc->driver && udc->driver->suspend)
+ udc->driver->suspend(&udc->gadget);
+
+ clk_disable(udc->clk);
+
+ return 0;
+}
+
+static int xudc_resume(struct device *dev)
+{
+ struct xusb_udc *udc;
+ u32 crtlreg;
+ unsigned long flags;
+ int ret;
+
+ udc = dev_get_drvdata(dev);
+
+ ret = clk_enable(udc->clk);
+ if (ret < 0)
+ return ret;
+
+ spin_lock_irqsave(&udc->lock, flags);
+
+ crtlreg = udc->read_fn(udc->addr + XUSB_CONTROL_OFFSET);
+ crtlreg |= XUSB_CONTROL_USB_READY_MASK;
+
+ udc->write_fn(udc->addr, XUSB_CONTROL_OFFSET, crtlreg);
+
+ spin_unlock_irqrestore(&udc->lock, flags);
+
+ return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static const struct dev_pm_ops xudc_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(xudc_suspend, xudc_resume)
+};
+
/* Match table for of_platform binding */
static const struct of_device_id usb_of_match[] = {
{ .compatible = "xlnx,usb2-device-4.00.a", },
@@ -2190,6 +2245,7 @@ static struct platform_driver xudc_driver = {
.driver = {
.name = driver_name,
.of_match_table = usb_of_match,
+ .pm = &xudc_pm_ops,
},
.probe = xudc_probe,
.remove = xudc_remove,
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index d1d926f8f9c2..57ca5f97a3dc 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -772,3 +772,14 @@ config USB_HCD_TEST_MODE
This option is of interest only to developers who need to validate
their USB hardware designs. It is not needed for normal use. If
unsure, say N.
+
+config USB_XEN_HCD
+ tristate "Xen usb virtual host driver"
+ depends on XEN
+ select XEN_XENBUS_FRONTEND
+ help
+ The Xen usb virtual host driver serves as a frontend driver enabling
+ a Xen guest system to access USB Devices passed through to the guest
+ by the Xen host (usually Dom0).
+ Only needed if the kernel is running in a Xen guest and generic
+ access to a USB device is needed.
diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile
index 171de4df50bd..2948983618fb 100644
--- a/drivers/usb/host/Makefile
+++ b/drivers/usb/host/Makefile
@@ -85,3 +85,4 @@ obj-$(CONFIG_USB_HCD_BCMA) += bcma-hcd.o
obj-$(CONFIG_USB_HCD_SSB) += ssb-hcd.o
obj-$(CONFIG_USB_FOTG210_HCD) += fotg210-hcd.o
obj-$(CONFIG_USB_MAX3421_HCD) += max3421-hcd.o
+obj-$(CONFIG_USB_XEN_HCD) += xen-hcd.o
diff --git a/drivers/usb/host/ehci-brcm.c b/drivers/usb/host/ehci-brcm.c
index d3626bfa966b..6a0f64c9e5e8 100644
--- a/drivers/usb/host/ehci-brcm.c
+++ b/drivers/usb/host/ehci-brcm.c
@@ -62,8 +62,12 @@ static int ehci_brcm_hub_control(
u32 __iomem *status_reg;
unsigned long flags;
int retval, irq_disabled = 0;
+ u32 temp;
- status_reg = &ehci->regs->port_status[(wIndex & 0xff) - 1];
+ temp = (wIndex & 0xff) - 1;
+ if (temp >= HCS_N_PORTS_MAX) /* Avoid index-out-of-bounds warning */
+ temp = 0;
+ status_reg = &ehci->regs->port_status[temp];
/*
* RESUME is cleared when GetPortStatus() is called 20ms after start
diff --git a/drivers/usb/host/fotg210-hcd.c b/drivers/usb/host/fotg210-hcd.c
index b590995a6b3e..7af17c8e069b 100644
--- a/drivers/usb/host/fotg210-hcd.c
+++ b/drivers/usb/host/fotg210-hcd.c
@@ -5576,14 +5576,9 @@ static int fotg210_hcd_probe(struct platform_device *pdev)
pdev->dev.power.power_state = PMSG_ON;
- res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!res) {
- dev_err(dev, "Found HC with no IRQ. Check %s setup!\n",
- dev_name(dev));
- return -ENODEV;
- }
-
- irq = res->start;
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
hcd = usb_create_hcd(&fotg210_fotg210_hc_driver, dev,
dev_name(dev));
diff --git a/drivers/usb/host/ohci-omap.c b/drivers/usb/host/ohci-omap.c
index ded9738392e4..45dcf8292072 100644
--- a/drivers/usb/host/ohci-omap.c
+++ b/drivers/usb/host/ohci-omap.c
@@ -306,7 +306,7 @@ static int ohci_hcd_omap_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
- retval = -ENXIO;
+ retval = irq;
goto err3;
}
retval = usb_add_hcd(hcd, irq, 0);
diff --git a/drivers/usb/host/ohci-s3c2410.c b/drivers/usb/host/ohci-s3c2410.c
index 1bec9b585e2d..12264c048601 100644
--- a/drivers/usb/host/ohci-s3c2410.c
+++ b/drivers/usb/host/ohci-s3c2410.c
@@ -356,7 +356,7 @@ static int ohci_hcd_s3c2410_probe(struct platform_device *dev)
{
struct usb_hcd *hcd = NULL;
struct s3c2410_hcd_info *info = dev_get_platdata(&dev->dev);
- int retval;
+ int retval, irq;
s3c2410_usb_set_power(info, 1, 1);
s3c2410_usb_set_power(info, 2, 1);
@@ -388,9 +388,15 @@ static int ohci_hcd_s3c2410_probe(struct platform_device *dev)
goto err_put;
}
+ irq = platform_get_irq(dev, 0);
+ if (irq < 0) {
+ retval = irq;
+ goto err_put;
+ }
+
s3c2410_start_hc(dev, hcd);
- retval = usb_add_hcd(hcd, dev->resource[1].start, 0);
+ retval = usb_add_hcd(hcd, irq, 0);
if (retval != 0)
goto err_ioremap;
diff --git a/drivers/usb/host/ohci-spear.c b/drivers/usb/host/ohci-spear.c
index b4cd9e6c72fd..9b81f420656d 100644
--- a/drivers/usb/host/ohci-spear.c
+++ b/drivers/usb/host/ohci-spear.c
@@ -76,7 +76,7 @@ static int spear_ohci_hcd_drv_probe(struct platform_device *pdev)
goto err_put_hcd;
}
- hcd->rsrc_start = pdev->resource[0].start;
+ hcd->rsrc_start = res->start;
hcd->rsrc_len = resource_size(res);
sohci_p = to_spear_ohci(hcd);
diff --git a/drivers/usb/host/ohci-tmio.c b/drivers/usb/host/ohci-tmio.c
index 3f3d62dc0674..49539b9f0e94 100644
--- a/drivers/usb/host/ohci-tmio.c
+++ b/drivers/usb/host/ohci-tmio.c
@@ -21,11 +21,6 @@
* usb-ohci-tc6393.c(C) Copyright 2004 Lineo Solutions, Inc.
*/
-/*#include <linux/fs.h>
-#include <linux/mount.h>
-#include <linux/pagemap.h>
-#include <linux/namei.h>
-#include <linux/sched.h>*/
#include <linux/platform_device.h>
#include <linux/mfd/core.h>
#include <linux/mfd/tmio.h>
diff --git a/drivers/usb/host/u132-hcd.c b/drivers/usb/host/u132-hcd.c
index ae882d76612b..d879d6af5710 100644
--- a/drivers/usb/host/u132-hcd.c
+++ b/drivers/usb/host/u132-hcd.c
@@ -3211,7 +3211,6 @@ static void __exit u132_hcd_exit(void)
platform_driver_unregister(&u132_platform_driver);
printk(KERN_INFO "u132-hcd driver deregistered\n");
wait_event(u132_hcd_wait, u132_instances == 0);
- flush_workqueue(workqueue);
destroy_workqueue(workqueue);
}
diff --git a/drivers/usb/host/uhci-platform.c b/drivers/usb/host/uhci-platform.c
index 70dbd95c3f06..b2049b47a08d 100644
--- a/drivers/usb/host/uhci-platform.c
+++ b/drivers/usb/host/uhci-platform.c
@@ -113,7 +113,8 @@ static int uhci_hcd_platform_probe(struct platform_device *pdev)
num_ports);
}
if (of_device_is_compatible(np, "aspeed,ast2400-uhci") ||
- of_device_is_compatible(np, "aspeed,ast2500-uhci")) {
+ of_device_is_compatible(np, "aspeed,ast2500-uhci") ||
+ of_device_is_compatible(np, "aspeed,ast2600-uhci")) {
uhci->is_aspeed = 1;
dev_info(&pdev->dev,
"Enabled Aspeed implementation workarounds\n");
@@ -132,7 +133,11 @@ static int uhci_hcd_platform_probe(struct platform_device *pdev)
goto err_rmr;
}
- ret = usb_add_hcd(hcd, pdev->resource[1].start, IRQF_SHARED);
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0)
+ goto err_clk;
+
+ ret = usb_add_hcd(hcd, ret, IRQF_SHARED);
if (ret)
goto err_clk;
diff --git a/drivers/usb/host/xen-hcd.c b/drivers/usb/host/xen-hcd.c
new file mode 100644
index 000000000000..be09fd9bac58
--- /dev/null
+++ b/drivers/usb/host/xen-hcd.c
@@ -0,0 +1,1609 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * xen-hcd.c
+ *
+ * Xen USB Virtual Host Controller driver
+ *
+ * Copyright (C) 2009, FUJITSU LABORATORIES LTD.
+ * Author: Noboru Iwamatsu <n_iwamatsu@jp.fujitsu.com>
+ */
+
+#include <linux/module.h>
+#include <linux/usb.h>
+#include <linux/list.h>
+#include <linux/usb/hcd.h>
+#include <linux/io.h>
+
+#include <xen/xen.h>
+#include <xen/xenbus.h>
+#include <xen/grant_table.h>
+#include <xen/events.h>
+#include <xen/page.h>
+
+#include <xen/interface/io/usbif.h>
+
+/* Private per-URB data */
+struct urb_priv {
+ struct list_head list;
+ struct urb *urb;
+ int req_id; /* RING_REQUEST id for submitting */
+ int unlink_req_id; /* RING_REQUEST id for unlinking */
+ int status;
+ bool unlinked; /* dequeued marker */
+};
+
+/* virtual roothub port status */
+struct rhport_status {
+ __u32 status;
+ bool resuming; /* in resuming */
+ bool c_connection; /* connection changed */
+ unsigned long timeout;
+};
+
+/* status of attached device */
+struct vdevice_status {
+ int devnum;
+ enum usb_device_state status;
+ enum usb_device_speed speed;
+};
+
+/* RING request shadow */
+struct usb_shadow {
+ struct xenusb_urb_request req;
+ struct urb *urb;
+};
+
+struct xenhcd_info {
+ /* Virtual Host Controller has 4 urb queues */
+ struct list_head pending_submit_list;
+ struct list_head pending_unlink_list;
+ struct list_head in_progress_list;
+ struct list_head giveback_waiting_list;
+
+ spinlock_t lock;
+
+ /* timer that kick pending and giveback waiting urbs */
+ struct timer_list watchdog;
+ unsigned long actions;
+
+ /* virtual root hub */
+ int rh_numports;
+ struct rhport_status ports[XENUSB_MAX_PORTNR];
+ struct vdevice_status devices[XENUSB_MAX_PORTNR];
+
+ /* Xen related staff */
+ struct xenbus_device *xbdev;
+ int urb_ring_ref;
+ int conn_ring_ref;
+ struct xenusb_urb_front_ring urb_ring;
+ struct xenusb_conn_front_ring conn_ring;
+
+ unsigned int evtchn;
+ unsigned int irq;
+ struct usb_shadow shadow[XENUSB_URB_RING_SIZE];
+ unsigned int shadow_free;
+
+ bool error;
+};
+
+#define GRANT_INVALID_REF 0
+
+#define XENHCD_RING_JIFFIES (HZ/200)
+#define XENHCD_SCAN_JIFFIES 1
+
+enum xenhcd_timer_action {
+ TIMER_RING_WATCHDOG,
+ TIMER_SCAN_PENDING_URBS,
+};
+
+static struct kmem_cache *xenhcd_urbp_cachep;
+
+static inline struct xenhcd_info *xenhcd_hcd_to_info(struct usb_hcd *hcd)
+{
+ return (struct xenhcd_info *)hcd->hcd_priv;
+}
+
+static inline struct usb_hcd *xenhcd_info_to_hcd(struct xenhcd_info *info)
+{
+ return container_of((void *)info, struct usb_hcd, hcd_priv);
+}
+
+static void xenhcd_set_error(struct xenhcd_info *info, const char *msg)
+{
+ info->error = true;
+
+ pr_alert("xen-hcd: protocol error: %s!\n", msg);
+}
+
+static inline void xenhcd_timer_action_done(struct xenhcd_info *info,
+ enum xenhcd_timer_action action)
+{
+ clear_bit(action, &info->actions);
+}
+
+static void xenhcd_timer_action(struct xenhcd_info *info,
+ enum xenhcd_timer_action action)
+{
+ if (timer_pending(&info->watchdog) &&
+ test_bit(TIMER_SCAN_PENDING_URBS, &info->actions))
+ return;
+
+ if (!test_and_set_bit(action, &info->actions)) {
+ unsigned long t;
+
+ switch (action) {
+ case TIMER_RING_WATCHDOG:
+ t = XENHCD_RING_JIFFIES;
+ break;
+ default:
+ t = XENHCD_SCAN_JIFFIES;
+ break;
+ }
+ mod_timer(&info->watchdog, t + jiffies);
+ }
+}
+
+/*
+ * set virtual port connection status
+ */
+static void xenhcd_set_connect_state(struct xenhcd_info *info, int portnum)
+{
+ int port;
+
+ port = portnum - 1;
+ if (info->ports[port].status & USB_PORT_STAT_POWER) {
+ switch (info->devices[port].speed) {
+ case XENUSB_SPEED_NONE:
+ info->ports[port].status &=
+ ~(USB_PORT_STAT_CONNECTION |
+ USB_PORT_STAT_ENABLE |
+ USB_PORT_STAT_LOW_SPEED |
+ USB_PORT_STAT_HIGH_SPEED |
+ USB_PORT_STAT_SUSPEND);
+ break;
+ case XENUSB_SPEED_LOW:
+ info->ports[port].status |= USB_PORT_STAT_CONNECTION;
+ info->ports[port].status |= USB_PORT_STAT_LOW_SPEED;
+ break;
+ case XENUSB_SPEED_FULL:
+ info->ports[port].status |= USB_PORT_STAT_CONNECTION;
+ break;
+ case XENUSB_SPEED_HIGH:
+ info->ports[port].status |= USB_PORT_STAT_CONNECTION;
+ info->ports[port].status |= USB_PORT_STAT_HIGH_SPEED;
+ break;
+ default: /* error */
+ return;
+ }
+ info->ports[port].status |= (USB_PORT_STAT_C_CONNECTION << 16);
+ }
+}
+
+/*
+ * set virtual device connection status
+ */
+static int xenhcd_rhport_connect(struct xenhcd_info *info, __u8 portnum,
+ __u8 speed)
+{
+ int port;
+
+ if (portnum < 1 || portnum > info->rh_numports)
+ return -EINVAL; /* invalid port number */
+
+ port = portnum - 1;
+ if (info->devices[port].speed != speed) {
+ switch (speed) {
+ case XENUSB_SPEED_NONE: /* disconnect */
+ info->devices[port].status = USB_STATE_NOTATTACHED;
+ break;
+ case XENUSB_SPEED_LOW:
+ case XENUSB_SPEED_FULL:
+ case XENUSB_SPEED_HIGH:
+ info->devices[port].status = USB_STATE_ATTACHED;
+ break;
+ default: /* error */
+ return -EINVAL;
+ }
+ info->devices[port].speed = speed;
+ info->ports[port].c_connection = true;
+
+ xenhcd_set_connect_state(info, portnum);
+ }
+
+ return 0;
+}
+
+/*
+ * SetPortFeature(PORT_SUSPENDED)
+ */
+static void xenhcd_rhport_suspend(struct xenhcd_info *info, int portnum)
+{
+ int port;
+
+ port = portnum - 1;
+ info->ports[port].status |= USB_PORT_STAT_SUSPEND;
+ info->devices[port].status = USB_STATE_SUSPENDED;
+}
+
+/*
+ * ClearPortFeature(PORT_SUSPENDED)
+ */
+static void xenhcd_rhport_resume(struct xenhcd_info *info, int portnum)
+{
+ int port;
+
+ port = portnum - 1;
+ if (info->ports[port].status & USB_PORT_STAT_SUSPEND) {
+ info->ports[port].resuming = true;
+ info->ports[port].timeout = jiffies + msecs_to_jiffies(20);
+ }
+}
+
+/*
+ * SetPortFeature(PORT_POWER)
+ */
+static void xenhcd_rhport_power_on(struct xenhcd_info *info, int portnum)
+{
+ int port;
+
+ port = portnum - 1;
+ if ((info->ports[port].status & USB_PORT_STAT_POWER) == 0) {
+ info->ports[port].status |= USB_PORT_STAT_POWER;
+ if (info->devices[port].status != USB_STATE_NOTATTACHED)
+ info->devices[port].status = USB_STATE_POWERED;
+ if (info->ports[port].c_connection)
+ xenhcd_set_connect_state(info, portnum);
+ }
+}
+
+/*
+ * ClearPortFeature(PORT_POWER)
+ * SetConfiguration(non-zero)
+ * Power_Source_Off
+ * Over-current
+ */
+static void xenhcd_rhport_power_off(struct xenhcd_info *info, int portnum)
+{
+ int port;
+
+ port = portnum - 1;
+ if (info->ports[port].status & USB_PORT_STAT_POWER) {
+ info->ports[port].status = 0;
+ if (info->devices[port].status != USB_STATE_NOTATTACHED)
+ info->devices[port].status = USB_STATE_ATTACHED;
+ }
+}
+
+/*
+ * ClearPortFeature(PORT_ENABLE)
+ */
+static void xenhcd_rhport_disable(struct xenhcd_info *info, int portnum)
+{
+ int port;
+
+ port = portnum - 1;
+ info->ports[port].status &= ~USB_PORT_STAT_ENABLE;
+ info->ports[port].status &= ~USB_PORT_STAT_SUSPEND;
+ info->ports[port].resuming = false;
+ if (info->devices[port].status != USB_STATE_NOTATTACHED)
+ info->devices[port].status = USB_STATE_POWERED;
+}
+
+/*
+ * SetPortFeature(PORT_RESET)
+ */
+static void xenhcd_rhport_reset(struct xenhcd_info *info, int portnum)
+{
+ int port;
+
+ port = portnum - 1;
+ info->ports[port].status &= ~(USB_PORT_STAT_ENABLE |
+ USB_PORT_STAT_LOW_SPEED |
+ USB_PORT_STAT_HIGH_SPEED);
+ info->ports[port].status |= USB_PORT_STAT_RESET;
+
+ if (info->devices[port].status != USB_STATE_NOTATTACHED)
+ info->devices[port].status = USB_STATE_ATTACHED;
+
+ /* 10msec reset signaling */
+ info->ports[port].timeout = jiffies + msecs_to_jiffies(10);
+}
+
+#ifdef CONFIG_PM
+static int xenhcd_bus_suspend(struct usb_hcd *hcd)
+{
+ struct xenhcd_info *info = xenhcd_hcd_to_info(hcd);
+ int ret = 0;
+ int i, ports;
+
+ ports = info->rh_numports;
+
+ spin_lock_irq(&info->lock);
+ if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags)) {
+ ret = -ESHUTDOWN;
+ } else {
+ /* suspend any active ports*/
+ for (i = 1; i <= ports; i++)
+ xenhcd_rhport_suspend(info, i);
+ }
+ spin_unlock_irq(&info->lock);
+
+ del_timer_sync(&info->watchdog);
+
+ return ret;
+}
+
+static int xenhcd_bus_resume(struct usb_hcd *hcd)
+{
+ struct xenhcd_info *info = xenhcd_hcd_to_info(hcd);
+ int ret = 0;
+ int i, ports;
+
+ ports = info->rh_numports;
+
+ spin_lock_irq(&info->lock);
+ if (!test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags)) {
+ ret = -ESHUTDOWN;
+ } else {
+ /* resume any suspended ports*/
+ for (i = 1; i <= ports; i++)
+ xenhcd_rhport_resume(info, i);
+ }
+ spin_unlock_irq(&info->lock);
+
+ return ret;
+}
+#endif
+
+static void xenhcd_hub_descriptor(struct xenhcd_info *info,
+ struct usb_hub_descriptor *desc)
+{
+ __u16 temp;
+ int ports = info->rh_numports;
+
+ desc->bDescriptorType = 0x29;
+ desc->bPwrOn2PwrGood = 10; /* EHCI says 20ms max */
+ desc->bHubContrCurrent = 0;
+ desc->bNbrPorts = ports;
+
+ /* size of DeviceRemovable and PortPwrCtrlMask fields */
+ temp = 1 + (ports / 8);
+ desc->bDescLength = 7 + 2 * temp;
+
+ /* bitmaps for DeviceRemovable and PortPwrCtrlMask */
+ memset(&desc->u.hs.DeviceRemovable[0], 0, temp);
+ memset(&desc->u.hs.DeviceRemovable[temp], 0xff, temp);
+
+ /* per-port over current reporting and no power switching */
+ temp = 0x000a;
+ desc->wHubCharacteristics = cpu_to_le16(temp);
+}
+
+/* port status change mask for hub_status_data */
+#define PORT_C_MASK ((USB_PORT_STAT_C_CONNECTION | \
+ USB_PORT_STAT_C_ENABLE | \
+ USB_PORT_STAT_C_SUSPEND | \
+ USB_PORT_STAT_C_OVERCURRENT | \
+ USB_PORT_STAT_C_RESET) << 16)
+
+/*
+ * See USB 2.0 Spec, 11.12.4 Hub and Port Status Change Bitmap.
+ * If port status changed, writes the bitmap to buf and return
+ * that length(number of bytes).
+ * If Nothing changed, return 0.
+ */
+static int xenhcd_hub_status_data(struct usb_hcd *hcd, char *buf)
+{
+ struct xenhcd_info *info = xenhcd_hcd_to_info(hcd);
+ int ports;
+ int i;
+ unsigned long flags;
+ int ret;
+ int changed = 0;
+
+ /* initialize the status to no-changes */
+ ports = info->rh_numports;
+ ret = 1 + (ports / 8);
+ memset(buf, 0, ret);
+
+ spin_lock_irqsave(&info->lock, flags);
+
+ for (i = 0; i < ports; i++) {
+ /* check status for each port */
+ if (info->ports[i].status & PORT_C_MASK) {
+ buf[(i + 1) / 8] |= 1 << (i + 1) % 8;
+ changed = 1;
+ }
+ }
+
+ if ((hcd->state == HC_STATE_SUSPENDED) && (changed == 1))
+ usb_hcd_resume_root_hub(hcd);
+
+ spin_unlock_irqrestore(&info->lock, flags);
+
+ return changed ? ret : 0;
+}
+
+static int xenhcd_hub_control(struct usb_hcd *hcd, __u16 typeReq, __u16 wValue,
+ __u16 wIndex, char *buf, __u16 wLength)
+{
+ struct xenhcd_info *info = xenhcd_hcd_to_info(hcd);
+ int ports = info->rh_numports;
+ unsigned long flags;
+ int ret = 0;
+ int i;
+ int changed = 0;
+
+ spin_lock_irqsave(&info->lock, flags);
+ switch (typeReq) {
+ case ClearHubFeature:
+ /* ignore this request */
+ break;
+ case ClearPortFeature:
+ if (!wIndex || wIndex > ports)
+ goto error;
+
+ switch (wValue) {
+ case USB_PORT_FEAT_SUSPEND:
+ xenhcd_rhport_resume(info, wIndex);
+ break;
+ case USB_PORT_FEAT_POWER:
+ xenhcd_rhport_power_off(info, wIndex);
+ break;
+ case USB_PORT_FEAT_ENABLE:
+ xenhcd_rhport_disable(info, wIndex);
+ break;
+ case USB_PORT_FEAT_C_CONNECTION:
+ info->ports[wIndex - 1].c_connection = false;
+ fallthrough;
+ default:
+ info->ports[wIndex - 1].status &= ~(1 << wValue);
+ break;
+ }
+ break;
+ case GetHubDescriptor:
+ xenhcd_hub_descriptor(info, (struct usb_hub_descriptor *)buf);
+ break;
+ case GetHubStatus:
+ /* always local power supply good and no over-current exists. */
+ *(__le32 *)buf = cpu_to_le32(0);
+ break;
+ case GetPortStatus:
+ if (!wIndex || wIndex > ports)
+ goto error;
+
+ wIndex--;
+
+ /* resume completion */
+ if (info->ports[wIndex].resuming &&
+ time_after_eq(jiffies, info->ports[wIndex].timeout)) {
+ info->ports[wIndex].status |=
+ USB_PORT_STAT_C_SUSPEND << 16;
+ info->ports[wIndex].status &= ~USB_PORT_STAT_SUSPEND;
+ }
+
+ /* reset completion */
+ if ((info->ports[wIndex].status & USB_PORT_STAT_RESET) != 0 &&
+ time_after_eq(jiffies, info->ports[wIndex].timeout)) {
+ info->ports[wIndex].status |=
+ USB_PORT_STAT_C_RESET << 16;
+ info->ports[wIndex].status &= ~USB_PORT_STAT_RESET;
+
+ if (info->devices[wIndex].status !=
+ USB_STATE_NOTATTACHED) {
+ info->ports[wIndex].status |=
+ USB_PORT_STAT_ENABLE;
+ info->devices[wIndex].status =
+ USB_STATE_DEFAULT;
+ }
+
+ switch (info->devices[wIndex].speed) {
+ case XENUSB_SPEED_LOW:
+ info->ports[wIndex].status |=
+ USB_PORT_STAT_LOW_SPEED;
+ break;
+ case XENUSB_SPEED_HIGH:
+ info->ports[wIndex].status |=
+ USB_PORT_STAT_HIGH_SPEED;
+ break;
+ default:
+ break;
+ }
+ }
+
+ *(__le32 *)buf = cpu_to_le32(info->ports[wIndex].status);
+ break;
+ case SetPortFeature:
+ if (!wIndex || wIndex > ports)
+ goto error;
+
+ switch (wValue) {
+ case USB_PORT_FEAT_POWER:
+ xenhcd_rhport_power_on(info, wIndex);
+ break;
+ case USB_PORT_FEAT_RESET:
+ xenhcd_rhport_reset(info, wIndex);
+ break;
+ case USB_PORT_FEAT_SUSPEND:
+ xenhcd_rhport_suspend(info, wIndex);
+ break;
+ default:
+ if (info->ports[wIndex-1].status & USB_PORT_STAT_POWER)
+ info->ports[wIndex-1].status |= (1 << wValue);
+ }
+ break;
+
+ case SetHubFeature:
+ /* not supported */
+ default:
+error:
+ ret = -EPIPE;
+ }
+ spin_unlock_irqrestore(&info->lock, flags);
+
+ /* check status for each port */
+ for (i = 0; i < ports; i++) {
+ if (info->ports[i].status & PORT_C_MASK)
+ changed = 1;
+ }
+ if (changed)
+ usb_hcd_poll_rh_status(hcd);
+
+ return ret;
+}
+
+static void xenhcd_free_urb_priv(struct urb_priv *urbp)
+{
+ urbp->urb->hcpriv = NULL;
+ kmem_cache_free(xenhcd_urbp_cachep, urbp);
+}
+
+static inline unsigned int xenhcd_get_id_from_freelist(struct xenhcd_info *info)
+{
+ unsigned int free;
+
+ free = info->shadow_free;
+ info->shadow_free = info->shadow[free].req.id;
+ info->shadow[free].req.id = 0x0fff; /* debug */
+ return free;
+}
+
+static inline void xenhcd_add_id_to_freelist(struct xenhcd_info *info,
+ unsigned int id)
+{
+ info->shadow[id].req.id = info->shadow_free;
+ info->shadow[id].urb = NULL;
+ info->shadow_free = id;
+}
+
+static inline int xenhcd_count_pages(void *addr, int length)
+{
+ unsigned long vaddr = (unsigned long)addr;
+
+ return PFN_UP(vaddr + length) - PFN_DOWN(vaddr);
+}
+
+static void xenhcd_gnttab_map(struct xenhcd_info *info, void *addr, int length,
+ grant_ref_t *gref_head,
+ struct xenusb_request_segment *seg,
+ int nr_pages, int flags)
+{
+ grant_ref_t ref;
+ unsigned long buffer_mfn;
+ unsigned int offset;
+ unsigned int len = length;
+ unsigned int bytes;
+ int i;
+
+ for (i = 0; i < nr_pages; i++) {
+ buffer_mfn = PFN_DOWN(arbitrary_virt_to_machine(addr).maddr);
+ offset = offset_in_page(addr);
+
+ bytes = PAGE_SIZE - offset;
+ if (bytes > len)
+ bytes = len;
+
+ ref = gnttab_claim_grant_reference(gref_head);
+ gnttab_grant_foreign_access_ref(ref, info->xbdev->otherend_id,
+ buffer_mfn, flags);
+ seg[i].gref = ref;
+ seg[i].offset = (__u16)offset;
+ seg[i].length = (__u16)bytes;
+
+ addr += bytes;
+ len -= bytes;
+ }
+}
+
+static __u32 xenhcd_pipe_urb_to_xenusb(__u32 urb_pipe, __u8 port)
+{
+ static __u32 pipe;
+
+ pipe = usb_pipedevice(urb_pipe) << XENUSB_PIPE_DEV_SHIFT;
+ pipe |= usb_pipeendpoint(urb_pipe) << XENUSB_PIPE_EP_SHIFT;
+ if (usb_pipein(urb_pipe))
+ pipe |= XENUSB_PIPE_DIR;
+ switch (usb_pipetype(urb_pipe)) {
+ case PIPE_ISOCHRONOUS:
+ pipe |= XENUSB_PIPE_TYPE_ISOC << XENUSB_PIPE_TYPE_SHIFT;
+ break;
+ case PIPE_INTERRUPT:
+ pipe |= XENUSB_PIPE_TYPE_INT << XENUSB_PIPE_TYPE_SHIFT;
+ break;
+ case PIPE_CONTROL:
+ pipe |= XENUSB_PIPE_TYPE_CTRL << XENUSB_PIPE_TYPE_SHIFT;
+ break;
+ case PIPE_BULK:
+ pipe |= XENUSB_PIPE_TYPE_BULK << XENUSB_PIPE_TYPE_SHIFT;
+ break;
+ }
+ pipe = xenusb_setportnum_pipe(pipe, port);
+
+ return pipe;
+}
+
+static int xenhcd_map_urb_for_request(struct xenhcd_info *info, struct urb *urb,
+ struct xenusb_urb_request *req)
+{
+ grant_ref_t gref_head;
+ int nr_buff_pages = 0;
+ int nr_isodesc_pages = 0;
+ int nr_grants = 0;
+
+ if (urb->transfer_buffer_length) {
+ nr_buff_pages = xenhcd_count_pages(urb->transfer_buffer,
+ urb->transfer_buffer_length);
+
+ if (usb_pipeisoc(urb->pipe))
+ nr_isodesc_pages = xenhcd_count_pages(
+ &urb->iso_frame_desc[0],
+ sizeof(struct usb_iso_packet_descriptor) *
+ urb->number_of_packets);
+
+ nr_grants = nr_buff_pages + nr_isodesc_pages;
+ if (nr_grants > XENUSB_MAX_SEGMENTS_PER_REQUEST) {
+ pr_err("xenhcd: error: %d grants\n", nr_grants);
+ return -E2BIG;
+ }
+
+ if (gnttab_alloc_grant_references(nr_grants, &gref_head)) {
+ pr_err("xenhcd: gnttab_alloc_grant_references() error\n");
+ return -ENOMEM;
+ }
+
+ xenhcd_gnttab_map(info, urb->transfer_buffer,
+ urb->transfer_buffer_length, &gref_head,
+ &req->seg[0], nr_buff_pages,
+ usb_pipein(urb->pipe) ? 0 : GTF_readonly);
+ }
+
+ req->pipe = xenhcd_pipe_urb_to_xenusb(urb->pipe, urb->dev->portnum);
+ req->transfer_flags = 0;
+ if (urb->transfer_flags & URB_SHORT_NOT_OK)
+ req->transfer_flags |= XENUSB_SHORT_NOT_OK;
+ req->buffer_length = urb->transfer_buffer_length;
+ req->nr_buffer_segs = nr_buff_pages;
+
+ switch (usb_pipetype(urb->pipe)) {
+ case PIPE_ISOCHRONOUS:
+ req->u.isoc.interval = urb->interval;
+ req->u.isoc.start_frame = urb->start_frame;
+ req->u.isoc.number_of_packets = urb->number_of_packets;
+ req->u.isoc.nr_frame_desc_segs = nr_isodesc_pages;
+
+ xenhcd_gnttab_map(info, &urb->iso_frame_desc[0],
+ sizeof(struct usb_iso_packet_descriptor) *
+ urb->number_of_packets,
+ &gref_head, &req->seg[nr_buff_pages],
+ nr_isodesc_pages, 0);
+ break;
+ case PIPE_INTERRUPT:
+ req->u.intr.interval = urb->interval;
+ break;
+ case PIPE_CONTROL:
+ if (urb->setup_packet)
+ memcpy(req->u.ctrl, urb->setup_packet, 8);
+ break;
+ case PIPE_BULK:
+ break;
+ default:
+ break;
+ }
+
+ if (nr_grants)
+ gnttab_free_grant_references(gref_head);
+
+ return 0;
+}
+
+static void xenhcd_gnttab_done(struct usb_shadow *shadow)
+{
+ int nr_segs = 0;
+ int i;
+
+ nr_segs = shadow->req.nr_buffer_segs;
+
+ if (xenusb_pipeisoc(shadow->req.pipe))
+ nr_segs += shadow->req.u.isoc.nr_frame_desc_segs;
+
+ for (i = 0; i < nr_segs; i++)
+ gnttab_end_foreign_access(shadow->req.seg[i].gref, 0, 0UL);
+
+ shadow->req.nr_buffer_segs = 0;
+ shadow->req.u.isoc.nr_frame_desc_segs = 0;
+}
+
+static int xenhcd_translate_status(int status)
+{
+ switch (status) {
+ case XENUSB_STATUS_OK:
+ return 0;
+ case XENUSB_STATUS_NODEV:
+ return -ENODEV;
+ case XENUSB_STATUS_INVAL:
+ return -EINVAL;
+ case XENUSB_STATUS_STALL:
+ return -EPIPE;
+ case XENUSB_STATUS_IOERROR:
+ return -EPROTO;
+ case XENUSB_STATUS_BABBLE:
+ return -EOVERFLOW;
+ default:
+ return -ESHUTDOWN;
+ }
+}
+
+static void xenhcd_giveback_urb(struct xenhcd_info *info, struct urb *urb,
+ int status)
+{
+ struct urb_priv *urbp = (struct urb_priv *)urb->hcpriv;
+ int priv_status = urbp->status;
+
+ list_del_init(&urbp->list);
+ xenhcd_free_urb_priv(urbp);
+
+ if (urb->status == -EINPROGRESS)
+ urb->status = xenhcd_translate_status(status);
+
+ spin_unlock(&info->lock);
+ usb_hcd_giveback_urb(xenhcd_info_to_hcd(info), urb,
+ priv_status <= 0 ? priv_status : urb->status);
+ spin_lock(&info->lock);
+}
+
+static int xenhcd_do_request(struct xenhcd_info *info, struct urb_priv *urbp)
+{
+ struct xenusb_urb_request *req;
+ struct urb *urb = urbp->urb;
+ unsigned int id;
+ int notify;
+ int ret;
+
+ id = xenhcd_get_id_from_freelist(info);
+ req = &info->shadow[id].req;
+ req->id = id;
+
+ if (unlikely(urbp->unlinked)) {
+ req->u.unlink.unlink_id = urbp->req_id;
+ req->pipe = xenusb_setunlink_pipe(xenhcd_pipe_urb_to_xenusb(
+ urb->pipe, urb->dev->portnum));
+ urbp->unlink_req_id = id;
+ } else {
+ ret = xenhcd_map_urb_for_request(info, urb, req);
+ if (ret) {
+ xenhcd_add_id_to_freelist(info, id);
+ return ret;
+ }
+ urbp->req_id = id;
+ }
+
+ req = RING_GET_REQUEST(&info->urb_ring, info->urb_ring.req_prod_pvt);
+ *req = info->shadow[id].req;
+
+ info->urb_ring.req_prod_pvt++;
+ info->shadow[id].urb = urb;
+
+ RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&info->urb_ring, notify);
+ if (notify)
+ notify_remote_via_irq(info->irq);
+
+ return 0;
+}
+
+static void xenhcd_kick_pending_urbs(struct xenhcd_info *info)
+{
+ struct urb_priv *urbp;
+
+ while (!list_empty(&info->pending_submit_list)) {
+ if (RING_FULL(&info->urb_ring)) {
+ xenhcd_timer_action(info, TIMER_RING_WATCHDOG);
+ return;
+ }
+
+ urbp = list_entry(info->pending_submit_list.next,
+ struct urb_priv, list);
+ if (!xenhcd_do_request(info, urbp))
+ list_move_tail(&urbp->list, &info->in_progress_list);
+ else
+ xenhcd_giveback_urb(info, urbp->urb, -ESHUTDOWN);
+ }
+ xenhcd_timer_action_done(info, TIMER_SCAN_PENDING_URBS);
+}
+
+/*
+ * caller must lock info->lock
+ */
+static void xenhcd_cancel_all_enqueued_urbs(struct xenhcd_info *info)
+{
+ struct urb_priv *urbp, *tmp;
+ int req_id;
+
+ list_for_each_entry_safe(urbp, tmp, &info->in_progress_list, list) {
+ req_id = urbp->req_id;
+ if (!urbp->unlinked) {
+ xenhcd_gnttab_done(&info->shadow[req_id]);
+ if (urbp->urb->status == -EINPROGRESS)
+ /* not dequeued */
+ xenhcd_giveback_urb(info, urbp->urb,
+ -ESHUTDOWN);
+ else /* dequeued */
+ xenhcd_giveback_urb(info, urbp->urb,
+ urbp->urb->status);
+ }
+ info->shadow[req_id].urb = NULL;
+ }
+
+ list_for_each_entry_safe(urbp, tmp, &info->pending_submit_list, list)
+ xenhcd_giveback_urb(info, urbp->urb, -ESHUTDOWN);
+}
+
+/*
+ * caller must lock info->lock
+ */
+static void xenhcd_giveback_unlinked_urbs(struct xenhcd_info *info)
+{
+ struct urb_priv *urbp, *tmp;
+
+ list_for_each_entry_safe(urbp, tmp, &info->giveback_waiting_list, list)
+ xenhcd_giveback_urb(info, urbp->urb, urbp->urb->status);
+}
+
+static int xenhcd_submit_urb(struct xenhcd_info *info, struct urb_priv *urbp)
+{
+ int ret;
+
+ if (RING_FULL(&info->urb_ring)) {
+ list_add_tail(&urbp->list, &info->pending_submit_list);
+ xenhcd_timer_action(info, TIMER_RING_WATCHDOG);
+ return 0;
+ }
+
+ if (!list_empty(&info->pending_submit_list)) {
+ list_add_tail(&urbp->list, &info->pending_submit_list);
+ xenhcd_timer_action(info, TIMER_SCAN_PENDING_URBS);
+ return 0;
+ }
+
+ ret = xenhcd_do_request(info, urbp);
+ if (ret == 0)
+ list_add_tail(&urbp->list, &info->in_progress_list);
+
+ return ret;
+}
+
+static int xenhcd_unlink_urb(struct xenhcd_info *info, struct urb_priv *urbp)
+{
+ int ret;
+
+ /* already unlinked? */
+ if (urbp->unlinked)
+ return -EBUSY;
+
+ urbp->unlinked = true;
+
+ /* the urb is still in pending_submit queue */
+ if (urbp->req_id == ~0) {
+ list_move_tail(&urbp->list, &info->giveback_waiting_list);
+ xenhcd_timer_action(info, TIMER_SCAN_PENDING_URBS);
+ return 0;
+ }
+
+ /* send unlink request to backend */
+ if (RING_FULL(&info->urb_ring)) {
+ list_move_tail(&urbp->list, &info->pending_unlink_list);
+ xenhcd_timer_action(info, TIMER_RING_WATCHDOG);
+ return 0;
+ }
+
+ if (!list_empty(&info->pending_unlink_list)) {
+ list_move_tail(&urbp->list, &info->pending_unlink_list);
+ xenhcd_timer_action(info, TIMER_SCAN_PENDING_URBS);
+ return 0;
+ }
+
+ ret = xenhcd_do_request(info, urbp);
+ if (ret == 0)
+ list_move_tail(&urbp->list, &info->in_progress_list);
+
+ return ret;
+}
+
+static int xenhcd_urb_request_done(struct xenhcd_info *info)
+{
+ struct xenusb_urb_response res;
+ struct urb *urb;
+ RING_IDX i, rp;
+ __u16 id;
+ int more_to_do = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&info->lock, flags);
+
+ rp = info->urb_ring.sring->rsp_prod;
+ if (RING_RESPONSE_PROD_OVERFLOW(&info->urb_ring, rp)) {
+ xenhcd_set_error(info, "Illegal index on urb-ring");
+ spin_unlock_irqrestore(&info->lock, flags);
+ return 0;
+ }
+ rmb(); /* ensure we see queued responses up to "rp" */
+
+ for (i = info->urb_ring.rsp_cons; i != rp; i++) {
+ RING_COPY_RESPONSE(&info->urb_ring, i, &res);
+ id = res.id;
+ if (id >= XENUSB_URB_RING_SIZE) {
+ xenhcd_set_error(info, "Illegal data on urb-ring");
+ continue;
+ }
+
+ if (likely(xenusb_pipesubmit(info->shadow[id].req.pipe))) {
+ xenhcd_gnttab_done(&info->shadow[id]);
+ urb = info->shadow[id].urb;
+ if (likely(urb)) {
+ urb->actual_length = res.actual_length;
+ urb->error_count = res.error_count;
+ urb->start_frame = res.start_frame;
+ xenhcd_giveback_urb(info, urb, res.status);
+ }
+ }
+
+ xenhcd_add_id_to_freelist(info, id);
+ }
+ info->urb_ring.rsp_cons = i;
+
+ if (i != info->urb_ring.req_prod_pvt)
+ RING_FINAL_CHECK_FOR_RESPONSES(&info->urb_ring, more_to_do);
+ else
+ info->urb_ring.sring->rsp_event = i + 1;
+
+ spin_unlock_irqrestore(&info->lock, flags);
+
+ return more_to_do;
+}
+
+static int xenhcd_conn_notify(struct xenhcd_info *info)
+{
+ struct xenusb_conn_response res;
+ struct xenusb_conn_request *req;
+ RING_IDX rc, rp;
+ __u16 id;
+ __u8 portnum, speed;
+ int more_to_do = 0;
+ int notify;
+ int port_changed = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&info->lock, flags);
+
+ rc = info->conn_ring.rsp_cons;
+ rp = info->conn_ring.sring->rsp_prod;
+ if (RING_RESPONSE_PROD_OVERFLOW(&info->conn_ring, rp)) {
+ xenhcd_set_error(info, "Illegal index on conn-ring");
+ spin_unlock_irqrestore(&info->lock, flags);
+ return 0;
+ }
+ rmb(); /* ensure we see queued responses up to "rp" */
+
+ while (rc != rp) {
+ RING_COPY_RESPONSE(&info->conn_ring, rc, &res);
+ id = res.id;
+ portnum = res.portnum;
+ speed = res.speed;
+ info->conn_ring.rsp_cons = ++rc;
+
+ if (xenhcd_rhport_connect(info, portnum, speed)) {
+ xenhcd_set_error(info, "Illegal data on conn-ring");
+ spin_unlock_irqrestore(&info->lock, flags);
+ return 0;
+ }
+
+ if (info->ports[portnum - 1].c_connection)
+ port_changed = 1;
+
+ barrier();
+
+ req = RING_GET_REQUEST(&info->conn_ring,
+ info->conn_ring.req_prod_pvt);
+ req->id = id;
+ info->conn_ring.req_prod_pvt++;
+ }
+
+ if (rc != info->conn_ring.req_prod_pvt)
+ RING_FINAL_CHECK_FOR_RESPONSES(&info->conn_ring, more_to_do);
+ else
+ info->conn_ring.sring->rsp_event = rc + 1;
+
+ RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&info->conn_ring, notify);
+ if (notify)
+ notify_remote_via_irq(info->irq);
+
+ spin_unlock_irqrestore(&info->lock, flags);
+
+ if (port_changed)
+ usb_hcd_poll_rh_status(xenhcd_info_to_hcd(info));
+
+ return more_to_do;
+}
+
+static irqreturn_t xenhcd_int(int irq, void *dev_id)
+{
+ struct xenhcd_info *info = (struct xenhcd_info *)dev_id;
+
+ if (unlikely(info->error))
+ return IRQ_HANDLED;
+
+ while (xenhcd_urb_request_done(info) | xenhcd_conn_notify(info))
+ /* Yield point for this unbounded loop. */
+ cond_resched();
+
+ return IRQ_HANDLED;
+}
+
+static void xenhcd_destroy_rings(struct xenhcd_info *info)
+{
+ if (info->irq)
+ unbind_from_irqhandler(info->irq, info);
+ info->irq = 0;
+
+ if (info->urb_ring_ref != GRANT_INVALID_REF) {
+ gnttab_end_foreign_access(info->urb_ring_ref, 0,
+ (unsigned long)info->urb_ring.sring);
+ info->urb_ring_ref = GRANT_INVALID_REF;
+ }
+ info->urb_ring.sring = NULL;
+
+ if (info->conn_ring_ref != GRANT_INVALID_REF) {
+ gnttab_end_foreign_access(info->conn_ring_ref, 0,
+ (unsigned long)info->conn_ring.sring);
+ info->conn_ring_ref = GRANT_INVALID_REF;
+ }
+ info->conn_ring.sring = NULL;
+}
+
+static int xenhcd_setup_rings(struct xenbus_device *dev,
+ struct xenhcd_info *info)
+{
+ struct xenusb_urb_sring *urb_sring;
+ struct xenusb_conn_sring *conn_sring;
+ grant_ref_t gref;
+ int err;
+
+ info->urb_ring_ref = GRANT_INVALID_REF;
+ info->conn_ring_ref = GRANT_INVALID_REF;
+
+ urb_sring = (struct xenusb_urb_sring *)get_zeroed_page(
+ GFP_NOIO | __GFP_HIGH);
+ if (!urb_sring) {
+ xenbus_dev_fatal(dev, -ENOMEM, "allocating urb ring");
+ return -ENOMEM;
+ }
+ SHARED_RING_INIT(urb_sring);
+ FRONT_RING_INIT(&info->urb_ring, urb_sring, PAGE_SIZE);
+
+ err = xenbus_grant_ring(dev, urb_sring, 1, &gref);
+ if (err < 0) {
+ free_page((unsigned long)urb_sring);
+ info->urb_ring.sring = NULL;
+ goto fail;
+ }
+ info->urb_ring_ref = gref;
+
+ conn_sring = (struct xenusb_conn_sring *)get_zeroed_page(
+ GFP_NOIO | __GFP_HIGH);
+ if (!conn_sring) {
+ xenbus_dev_fatal(dev, -ENOMEM, "allocating conn ring");
+ err = -ENOMEM;
+ goto fail;
+ }
+ SHARED_RING_INIT(conn_sring);
+ FRONT_RING_INIT(&info->conn_ring, conn_sring, PAGE_SIZE);
+
+ err = xenbus_grant_ring(dev, conn_sring, 1, &gref);
+ if (err < 0) {
+ free_page((unsigned long)conn_sring);
+ info->conn_ring.sring = NULL;
+ goto fail;
+ }
+ info->conn_ring_ref = gref;
+
+ err = xenbus_alloc_evtchn(dev, &info->evtchn);
+ if (err) {
+ xenbus_dev_fatal(dev, err, "xenbus_alloc_evtchn");
+ goto fail;
+ }
+
+ err = bind_evtchn_to_irq(info->evtchn);
+ if (err <= 0) {
+ xenbus_dev_fatal(dev, err, "bind_evtchn_to_irq");
+ goto fail;
+ }
+
+ info->irq = err;
+
+ err = request_threaded_irq(info->irq, NULL, xenhcd_int,
+ IRQF_ONESHOT, "xenhcd", info);
+ if (err) {
+ xenbus_dev_fatal(dev, err, "request_threaded_irq");
+ goto free_irq;
+ }
+
+ return 0;
+
+free_irq:
+ unbind_from_irqhandler(info->irq, info);
+fail:
+ xenhcd_destroy_rings(info);
+ return err;
+}
+
+static int xenhcd_talk_to_backend(struct xenbus_device *dev,
+ struct xenhcd_info *info)
+{
+ const char *message;
+ struct xenbus_transaction xbt;
+ int err;
+
+ err = xenhcd_setup_rings(dev, info);
+ if (err)
+ return err;
+
+again:
+ err = xenbus_transaction_start(&xbt);
+ if (err) {
+ xenbus_dev_fatal(dev, err, "starting transaction");
+ goto destroy_ring;
+ }
+
+ err = xenbus_printf(xbt, dev->nodename, "urb-ring-ref", "%u",
+ info->urb_ring_ref);
+ if (err) {
+ message = "writing urb-ring-ref";
+ goto abort_transaction;
+ }
+
+ err = xenbus_printf(xbt, dev->nodename, "conn-ring-ref", "%u",
+ info->conn_ring_ref);
+ if (err) {
+ message = "writing conn-ring-ref";
+ goto abort_transaction;
+ }
+
+ err = xenbus_printf(xbt, dev->nodename, "event-channel", "%u",
+ info->evtchn);
+ if (err) {
+ message = "writing event-channel";
+ goto abort_transaction;
+ }
+
+ err = xenbus_transaction_end(xbt, 0);
+ if (err) {
+ if (err == -EAGAIN)
+ goto again;
+ xenbus_dev_fatal(dev, err, "completing transaction");
+ goto destroy_ring;
+ }
+
+ return 0;
+
+abort_transaction:
+ xenbus_transaction_end(xbt, 1);
+ xenbus_dev_fatal(dev, err, "%s", message);
+
+destroy_ring:
+ xenhcd_destroy_rings(info);
+
+ return err;
+}
+
+static int xenhcd_connect(struct xenbus_device *dev)
+{
+ struct xenhcd_info *info = dev_get_drvdata(&dev->dev);
+ struct xenusb_conn_request *req;
+ int idx, err;
+ int notify;
+ char name[TASK_COMM_LEN];
+ struct usb_hcd *hcd;
+
+ hcd = xenhcd_info_to_hcd(info);
+ snprintf(name, TASK_COMM_LEN, "xenhcd.%d", hcd->self.busnum);
+
+ err = xenhcd_talk_to_backend(dev, info);
+ if (err)
+ return err;
+
+ /* prepare ring for hotplug notification */
+ for (idx = 0; idx < XENUSB_CONN_RING_SIZE; idx++) {
+ req = RING_GET_REQUEST(&info->conn_ring, idx);
+ req->id = idx;
+ }
+ info->conn_ring.req_prod_pvt = idx;
+
+ RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&info->conn_ring, notify);
+ if (notify)
+ notify_remote_via_irq(info->irq);
+
+ return 0;
+}
+
+static void xenhcd_disconnect(struct xenbus_device *dev)
+{
+ struct xenhcd_info *info = dev_get_drvdata(&dev->dev);
+ struct usb_hcd *hcd = xenhcd_info_to_hcd(info);
+
+ usb_remove_hcd(hcd);
+ xenbus_frontend_closed(dev);
+}
+
+static void xenhcd_watchdog(struct timer_list *timer)
+{
+ struct xenhcd_info *info = from_timer(info, timer, watchdog);
+ unsigned long flags;
+
+ spin_lock_irqsave(&info->lock, flags);
+ if (likely(HC_IS_RUNNING(xenhcd_info_to_hcd(info)->state))) {
+ xenhcd_timer_action_done(info, TIMER_RING_WATCHDOG);
+ xenhcd_giveback_unlinked_urbs(info);
+ xenhcd_kick_pending_urbs(info);
+ }
+ spin_unlock_irqrestore(&info->lock, flags);
+}
+
+/*
+ * one-time HC init
+ */
+static int xenhcd_setup(struct usb_hcd *hcd)
+{
+ struct xenhcd_info *info = xenhcd_hcd_to_info(hcd);
+
+ spin_lock_init(&info->lock);
+ INIT_LIST_HEAD(&info->pending_submit_list);
+ INIT_LIST_HEAD(&info->pending_unlink_list);
+ INIT_LIST_HEAD(&info->in_progress_list);
+ INIT_LIST_HEAD(&info->giveback_waiting_list);
+ timer_setup(&info->watchdog, xenhcd_watchdog, 0);
+
+ hcd->has_tt = (hcd->driver->flags & HCD_MASK) != HCD_USB11;
+
+ return 0;
+}
+
+/*
+ * start HC running
+ */
+static int xenhcd_run(struct usb_hcd *hcd)
+{
+ hcd->uses_new_polling = 1;
+ clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+ hcd->state = HC_STATE_RUNNING;
+ return 0;
+}
+
+/*
+ * stop running HC
+ */
+static void xenhcd_stop(struct usb_hcd *hcd)
+{
+ struct xenhcd_info *info = xenhcd_hcd_to_info(hcd);
+
+ del_timer_sync(&info->watchdog);
+ spin_lock_irq(&info->lock);
+ /* cancel all urbs */
+ hcd->state = HC_STATE_HALT;
+ xenhcd_cancel_all_enqueued_urbs(info);
+ xenhcd_giveback_unlinked_urbs(info);
+ spin_unlock_irq(&info->lock);
+}
+
+/*
+ * called as .urb_enqueue()
+ * non-error returns are promise to giveback the urb later
+ */
+static int xenhcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
+ gfp_t mem_flags)
+{
+ struct xenhcd_info *info = xenhcd_hcd_to_info(hcd);
+ struct urb_priv *urbp;
+ unsigned long flags;
+ int ret;
+
+ if (unlikely(info->error))
+ return -ESHUTDOWN;
+
+ urbp = kmem_cache_zalloc(xenhcd_urbp_cachep, mem_flags);
+ if (!urbp)
+ return -ENOMEM;
+
+ spin_lock_irqsave(&info->lock, flags);
+
+ urbp->urb = urb;
+ urb->hcpriv = urbp;
+ urbp->req_id = ~0;
+ urbp->unlink_req_id = ~0;
+ INIT_LIST_HEAD(&urbp->list);
+ urbp->status = 1;
+ urb->unlinked = false;
+
+ ret = xenhcd_submit_urb(info, urbp);
+
+ if (ret)
+ xenhcd_free_urb_priv(urbp);
+
+ spin_unlock_irqrestore(&info->lock, flags);
+
+ return ret;
+}
+
+/*
+ * called as .urb_dequeue()
+ */
+static int xenhcd_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
+{
+ struct xenhcd_info *info = xenhcd_hcd_to_info(hcd);
+ struct urb_priv *urbp;
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&info->lock, flags);
+
+ urbp = urb->hcpriv;
+ if (urbp) {
+ urbp->status = status;
+ ret = xenhcd_unlink_urb(info, urbp);
+ }
+
+ spin_unlock_irqrestore(&info->lock, flags);
+
+ return ret;
+}
+
+/*
+ * called from usb_get_current_frame_number(),
+ * but, almost all drivers not use such function.
+ */
+static int xenhcd_get_frame(struct usb_hcd *hcd)
+{
+ /* it means error, but probably no problem :-) */
+ return 0;
+}
+
+static struct hc_driver xenhcd_usb20_hc_driver = {
+ .description = "xen-hcd",
+ .product_desc = "Xen USB2.0 Virtual Host Controller",
+ .hcd_priv_size = sizeof(struct xenhcd_info),
+ .flags = HCD_USB2,
+
+ /* basic HC lifecycle operations */
+ .reset = xenhcd_setup,
+ .start = xenhcd_run,
+ .stop = xenhcd_stop,
+
+ /* managing urb I/O */
+ .urb_enqueue = xenhcd_urb_enqueue,
+ .urb_dequeue = xenhcd_urb_dequeue,
+ .get_frame_number = xenhcd_get_frame,
+
+ /* root hub operations */
+ .hub_status_data = xenhcd_hub_status_data,
+ .hub_control = xenhcd_hub_control,
+#ifdef CONFIG_PM
+ .bus_suspend = xenhcd_bus_suspend,
+ .bus_resume = xenhcd_bus_resume,
+#endif
+};
+
+static struct hc_driver xenhcd_usb11_hc_driver = {
+ .description = "xen-hcd",
+ .product_desc = "Xen USB1.1 Virtual Host Controller",
+ .hcd_priv_size = sizeof(struct xenhcd_info),
+ .flags = HCD_USB11,
+
+ /* basic HC lifecycle operations */
+ .reset = xenhcd_setup,
+ .start = xenhcd_run,
+ .stop = xenhcd_stop,
+
+ /* managing urb I/O */
+ .urb_enqueue = xenhcd_urb_enqueue,
+ .urb_dequeue = xenhcd_urb_dequeue,
+ .get_frame_number = xenhcd_get_frame,
+
+ /* root hub operations */
+ .hub_status_data = xenhcd_hub_status_data,
+ .hub_control = xenhcd_hub_control,
+#ifdef CONFIG_PM
+ .bus_suspend = xenhcd_bus_suspend,
+ .bus_resume = xenhcd_bus_resume,
+#endif
+};
+
+static struct usb_hcd *xenhcd_create_hcd(struct xenbus_device *dev)
+{
+ int i;
+ int err = 0;
+ int num_ports;
+ int usb_ver;
+ struct usb_hcd *hcd = NULL;
+ struct xenhcd_info *info;
+
+ err = xenbus_scanf(XBT_NIL, dev->otherend, "num-ports", "%d",
+ &num_ports);
+ if (err != 1) {
+ xenbus_dev_fatal(dev, err, "reading num-ports");
+ return ERR_PTR(-EINVAL);
+ }
+ if (num_ports < 1 || num_ports > XENUSB_MAX_PORTNR) {
+ xenbus_dev_fatal(dev, err, "invalid num-ports");
+ return ERR_PTR(-EINVAL);
+ }
+
+ err = xenbus_scanf(XBT_NIL, dev->otherend, "usb-ver", "%d", &usb_ver);
+ if (err != 1) {
+ xenbus_dev_fatal(dev, err, "reading usb-ver");
+ return ERR_PTR(-EINVAL);
+ }
+ switch (usb_ver) {
+ case XENUSB_VER_USB11:
+ hcd = usb_create_hcd(&xenhcd_usb11_hc_driver, &dev->dev,
+ dev_name(&dev->dev));
+ break;
+ case XENUSB_VER_USB20:
+ hcd = usb_create_hcd(&xenhcd_usb20_hc_driver, &dev->dev,
+ dev_name(&dev->dev));
+ break;
+ default:
+ xenbus_dev_fatal(dev, err, "invalid usb-ver");
+ return ERR_PTR(-EINVAL);
+ }
+ if (!hcd) {
+ xenbus_dev_fatal(dev, err,
+ "fail to allocate USB host controller");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ info = xenhcd_hcd_to_info(hcd);
+ info->xbdev = dev;
+ info->rh_numports = num_ports;
+
+ for (i = 0; i < XENUSB_URB_RING_SIZE; i++) {
+ info->shadow[i].req.id = i + 1;
+ info->shadow[i].urb = NULL;
+ }
+ info->shadow[XENUSB_URB_RING_SIZE - 1].req.id = 0x0fff;
+
+ return hcd;
+}
+
+static void xenhcd_backend_changed(struct xenbus_device *dev,
+ enum xenbus_state backend_state)
+{
+ switch (backend_state) {
+ case XenbusStateInitialising:
+ case XenbusStateReconfiguring:
+ case XenbusStateReconfigured:
+ case XenbusStateUnknown:
+ break;
+
+ case XenbusStateInitWait:
+ case XenbusStateInitialised:
+ case XenbusStateConnected:
+ if (dev->state != XenbusStateInitialising)
+ break;
+ if (!xenhcd_connect(dev))
+ xenbus_switch_state(dev, XenbusStateConnected);
+ break;
+
+ case XenbusStateClosed:
+ if (dev->state == XenbusStateClosed)
+ break;
+ fallthrough; /* Missed the backend's Closing state. */
+ case XenbusStateClosing:
+ xenhcd_disconnect(dev);
+ break;
+
+ default:
+ xenbus_dev_fatal(dev, -EINVAL, "saw state %d at frontend",
+ backend_state);
+ break;
+ }
+}
+
+static int xenhcd_remove(struct xenbus_device *dev)
+{
+ struct xenhcd_info *info = dev_get_drvdata(&dev->dev);
+ struct usb_hcd *hcd = xenhcd_info_to_hcd(info);
+
+ xenhcd_destroy_rings(info);
+ usb_put_hcd(hcd);
+
+ return 0;
+}
+
+static int xenhcd_probe(struct xenbus_device *dev,
+ const struct xenbus_device_id *id)
+{
+ int err;
+ struct usb_hcd *hcd;
+ struct xenhcd_info *info;
+
+ if (usb_disabled())
+ return -ENODEV;
+
+ hcd = xenhcd_create_hcd(dev);
+ if (IS_ERR(hcd)) {
+ err = PTR_ERR(hcd);
+ xenbus_dev_fatal(dev, err,
+ "fail to create usb host controller");
+ return err;
+ }
+
+ info = xenhcd_hcd_to_info(hcd);
+ dev_set_drvdata(&dev->dev, info);
+
+ err = usb_add_hcd(hcd, 0, 0);
+ if (err) {
+ xenbus_dev_fatal(dev, err, "fail to add USB host controller");
+ usb_put_hcd(hcd);
+ dev_set_drvdata(&dev->dev, NULL);
+ }
+
+ return err;
+}
+
+static const struct xenbus_device_id xenhcd_ids[] = {
+ { "vusb" },
+ { "" },
+};
+
+static struct xenbus_driver xenhcd_driver = {
+ .ids = xenhcd_ids,
+ .probe = xenhcd_probe,
+ .otherend_changed = xenhcd_backend_changed,
+ .remove = xenhcd_remove,
+};
+
+static int __init xenhcd_init(void)
+{
+ if (!xen_domain())
+ return -ENODEV;
+
+ xenhcd_urbp_cachep = kmem_cache_create("xenhcd_urb_priv",
+ sizeof(struct urb_priv), 0, 0, NULL);
+ if (!xenhcd_urbp_cachep) {
+ pr_err("xenhcd failed to create kmem cache\n");
+ return -ENOMEM;
+ }
+
+ return xenbus_register_frontend(&xenhcd_driver);
+}
+module_init(xenhcd_init);
+
+static void __exit xenhcd_exit(void)
+{
+ kmem_cache_destroy(xenhcd_urbp_cachep);
+ xenbus_unregister_driver(&xenhcd_driver);
+}
+module_exit(xenhcd_exit);
+
+MODULE_ALIAS("xen:vusb");
+MODULE_AUTHOR("Juergen Gross <jgross@suse.com>");
+MODULE_DESCRIPTION("Xen USB Virtual Host Controller driver (xen-hcd)");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c
index 58a0eae4f41b..91738af0ab14 100644
--- a/drivers/usb/host/xhci-mtk.c
+++ b/drivers/usb/host/xhci-mtk.c
@@ -245,11 +245,12 @@ static int xhci_mtk_host_disable(struct xhci_hcd_mtk *mtk)
/* wait for host ip to sleep */
ret = readl_poll_timeout(&ippc->ip_pw_sts1, value,
(value & STS1_IP_SLEEP_STS), 100, 100000);
- if (ret) {
+ if (ret)
dev_err(mtk->dev, "ip sleep failed!!!\n");
- return ret;
- }
- return 0;
+ else /* workaound for platforms using low level latch */
+ usleep_range(100, 200);
+
+ return ret;
}
static int xhci_mtk_ssusb_config(struct xhci_hcd_mtk *mtk)
@@ -300,7 +301,7 @@ static void usb_wakeup_ip_sleep_set(struct xhci_hcd_mtk *mtk, bool enable)
case SSUSB_UWK_V1_1:
reg = mtk->uwk_reg_base + PERI_WK_CTRL0;
msk = WC0_IS_EN | WC0_IS_C(0xf) | WC0_IS_P;
- val = enable ? (WC0_IS_EN | WC0_IS_C(0x8)) : 0;
+ val = enable ? (WC0_IS_EN | WC0_IS_C(0x1)) : 0;
break;
case SSUSB_UWK_V1_2:
reg = mtk->uwk_reg_base + PERI_WK_CTRL0;
@@ -437,11 +438,8 @@ static int xhci_mtk_setup(struct usb_hcd *hcd)
if (ret)
return ret;
- if (usb_hcd_is_primary_hcd(hcd)) {
+ if (usb_hcd_is_primary_hcd(hcd))
ret = xhci_mtk_sch_init(mtk);
- if (ret)
- return ret;
- }
return ret;
}
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index c1edcc9b13ce..dc570ce4e831 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -437,6 +437,9 @@ static int __maybe_unused xhci_plat_suspend(struct device *dev)
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
int ret;
+ if (pm_runtime_suspended(dev))
+ pm_runtime_resume(dev);
+
ret = xhci_priv_suspend_quirk(hcd);
if (ret)
return ret;
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index f5b1bcc875de..dc357cabb265 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -4998,10 +4998,8 @@ static int calculate_max_exit_latency(struct usb_device *udev,
enabling_u2)
u2_mel_us = DIV_ROUND_UP(udev->u2_params.mel, 1000);
- if (u1_mel_us > u2_mel_us)
- mel_us = u1_mel_us;
- else
- mel_us = u2_mel_us;
+ mel_us = max(u1_mel_us, u2_mel_us);
+
/* xHCI host controller max exit latency field is only 16 bits wide. */
if (mel_us > MAX_EXIT) {
dev_warn(&udev->dev, "Link PM max exit latency of %lluus "
diff --git a/drivers/usb/isp1760/isp1760-if.c b/drivers/usb/isp1760/isp1760-if.c
index 7cc349c0b2ad..65ba5aca2a4f 100644
--- a/drivers/usb/isp1760/isp1760-if.c
+++ b/drivers/usb/isp1760/isp1760-if.c
@@ -13,6 +13,7 @@
#include <linux/usb.h>
#include <linux/io.h>
+#include <linux/irq.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
@@ -191,17 +192,15 @@ static int isp1760_plat_probe(struct platform_device *pdev)
unsigned long irqflags;
unsigned int devflags = 0;
struct resource *mem_res;
- struct resource *irq_res;
+ int irq;
int ret;
mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!irq_res) {
- pr_warn("isp1760: IRQ resource not available\n");
- return -ENODEV;
- }
- irqflags = irq_res->flags & IRQF_TRIGGER_MASK;
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+ irqflags = irq_get_trigger_type(irq);
if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node) {
struct device_node *dp = pdev->dev.of_node;
@@ -239,8 +238,7 @@ static int isp1760_plat_probe(struct platform_device *pdev)
return -ENXIO;
}
- ret = isp1760_register(mem_res, irq_res->start, irqflags, &pdev->dev,
- devflags);
+ ret = isp1760_register(mem_res, irq, irqflags, &pdev->dev, devflags);
if (ret < 0)
return ret;
diff --git a/drivers/usb/misc/ehset.c b/drivers/usb/misc/ehset.c
index f87890f9cd26..986d6589f053 100644
--- a/drivers/usb/misc/ehset.c
+++ b/drivers/usb/misc/ehset.c
@@ -18,6 +18,52 @@
#define TEST_SINGLE_STEP_GET_DEV_DESC 0x0107
#define TEST_SINGLE_STEP_SET_FEATURE 0x0108
+extern const struct usb_device_id *usb_device_match_id(struct usb_device *udev,
+ const struct usb_device_id *id);
+
+/*
+ * A list of USB hubs which requires to disable the power
+ * to the port before starting the testing procedures.
+ */
+static const struct usb_device_id ehset_hub_list[] = {
+ { USB_DEVICE(0x0424, 0x4502) },
+ { USB_DEVICE(0x0424, 0x4913) },
+ { USB_DEVICE(0x0451, 0x8027) },
+ { }
+};
+
+static int ehset_prepare_port_for_testing(struct usb_device *hub_udev, u16 portnum)
+{
+ int ret = 0;
+
+ /*
+ * The USB2.0 spec chapter 11.24.2.13 says that the USB port which is
+ * going under test needs to be put in suspend before sending the
+ * test command. Most hubs don't enforce this precondition, but there
+ * are some hubs which needs to disable the power to the port before
+ * starting the test.
+ */
+ if (usb_device_match_id(hub_udev, ehset_hub_list)) {
+ ret = usb_control_msg_send(hub_udev, 0, USB_REQ_CLEAR_FEATURE,
+ USB_RT_PORT, USB_PORT_FEAT_ENABLE,
+ portnum, NULL, 0, 1000, GFP_KERNEL);
+ /*
+ * Wait for the port to be disabled. It's an arbitrary value
+ * which worked every time.
+ */
+ msleep(100);
+ } else {
+ /*
+ * For the hubs which are compliant with the spec,
+ * put the port in SUSPEND.
+ */
+ ret = usb_control_msg_send(hub_udev, 0, USB_REQ_SET_FEATURE,
+ USB_RT_PORT, USB_PORT_FEAT_SUSPEND,
+ portnum, NULL, 0, 1000, GFP_KERNEL);
+ }
+ return ret;
+}
+
static int ehset_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
@@ -30,24 +76,36 @@ static int ehset_probe(struct usb_interface *intf,
switch (test_pid) {
case TEST_SE0_NAK_PID:
+ ret = ehset_prepare_port_for_testing(hub_udev, portnum);
+ if (!ret)
+ break;
ret = usb_control_msg_send(hub_udev, 0, USB_REQ_SET_FEATURE,
USB_RT_PORT, USB_PORT_FEAT_TEST,
(USB_TEST_SE0_NAK << 8) | portnum,
NULL, 0, 1000, GFP_KERNEL);
break;
case TEST_J_PID:
+ ret = ehset_prepare_port_for_testing(hub_udev, portnum);
+ if (!ret)
+ break;
ret = usb_control_msg_send(hub_udev, 0, USB_REQ_SET_FEATURE,
USB_RT_PORT, USB_PORT_FEAT_TEST,
(USB_TEST_J << 8) | portnum, NULL, 0,
1000, GFP_KERNEL);
break;
case TEST_K_PID:
+ ret = ehset_prepare_port_for_testing(hub_udev, portnum);
+ if (!ret)
+ break;
ret = usb_control_msg_send(hub_udev, 0, USB_REQ_SET_FEATURE,
USB_RT_PORT, USB_PORT_FEAT_TEST,
(USB_TEST_K << 8) | portnum, NULL, 0,
1000, GFP_KERNEL);
break;
case TEST_PACKET_PID:
+ ret = ehset_prepare_port_for_testing(hub_udev, portnum);
+ if (!ret)
+ break;
ret = usb_control_msg_send(hub_udev, 0, USB_REQ_SET_FEATURE,
USB_RT_PORT, USB_PORT_FEAT_TEST,
(USB_TEST_PACKET << 8) | portnum,
diff --git a/drivers/usb/misc/ftdi-elan.c b/drivers/usb/misc/ftdi-elan.c
index e5a8fcdbb78e..6c38c62d29b2 100644
--- a/drivers/usb/misc/ftdi-elan.c
+++ b/drivers/usb/misc/ftdi-elan.c
@@ -202,6 +202,7 @@ static void ftdi_elan_delete(struct kref *kref)
mutex_unlock(&ftdi_module_lock);
kfree(ftdi->bulk_in_buffer);
ftdi->bulk_in_buffer = NULL;
+ kfree(ftdi);
}
static void ftdi_elan_put_kref(struct usb_ftdi *ftdi)
diff --git a/drivers/usb/misc/usb251xb.c b/drivers/usb/misc/usb251xb.c
index 507deef1f709..04c4e3fed094 100644
--- a/drivers/usb/misc/usb251xb.c
+++ b/drivers/usb/misc/usb251xb.c
@@ -543,6 +543,9 @@ static int usb251xb_get_ofdata(struct usb251xb *hub,
if (of_property_read_u16_array(np, "language-id", &hub->lang_id, 1))
hub->lang_id = USB251XB_DEF_LANGUAGE_ID;
+ if (of_property_read_u8(np, "boost-up", &hub->boost_up))
+ hub->boost_up = USB251XB_DEF_BOOST_UP;
+
cproperty_char = of_get_property(np, "manufacturer", NULL);
strlcpy(str, cproperty_char ? : USB251XB_DEF_MANUFACTURER_STRING,
sizeof(str));
@@ -584,7 +587,6 @@ static int usb251xb_get_ofdata(struct usb251xb *hub,
* may be as soon as needed.
*/
hub->bat_charge_en = USB251XB_DEF_BATTERY_CHARGING_ENABLE;
- hub->boost_up = USB251XB_DEF_BOOST_UP;
hub->boost_57 = USB251XB_DEF_BOOST_57;
hub->boost_14 = USB251XB_DEF_BOOST_14;
hub->port_map12 = USB251XB_DEF_PORT_MAP_12;
diff --git a/drivers/usb/musb/am35x.c b/drivers/usb/musb/am35x.c
index 660641ab1545..bf2c0fa6cb32 100644
--- a/drivers/usb/musb/am35x.c
+++ b/drivers/usb/musb/am35x.c
@@ -500,6 +500,8 @@ static int am35x_probe(struct platform_device *pdev)
pinfo.num_res = pdev->num_resources;
pinfo.data = pdata;
pinfo.size_data = sizeof(*pdata);
+ pinfo.fwnode = of_fwnode_handle(pdev->dev.of_node);
+ pinfo.of_node_reused = true;
glue->musb = musb = platform_device_register_full(&pinfo);
if (IS_ERR(musb)) {
diff --git a/drivers/usb/musb/da8xx.c b/drivers/usb/musb/da8xx.c
index 1c023c0091c4..fd4ae2dd24e5 100644
--- a/drivers/usb/musb/da8xx.c
+++ b/drivers/usb/musb/da8xx.c
@@ -505,7 +505,6 @@ static struct of_dev_auxdata da8xx_auxdata_lookup[] = {
static int da8xx_probe(struct platform_device *pdev)
{
- struct resource musb_resources[2];
struct musb_hdrc_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct da8xx_glue *glue;
struct platform_device_info pinfo;
@@ -558,25 +557,14 @@ static int da8xx_probe(struct platform_device *pdev)
if (ret)
return ret;
- memset(musb_resources, 0x00, sizeof(*musb_resources) *
- ARRAY_SIZE(musb_resources));
-
- musb_resources[0].name = pdev->resource[0].name;
- musb_resources[0].start = pdev->resource[0].start;
- musb_resources[0].end = pdev->resource[0].end;
- musb_resources[0].flags = pdev->resource[0].flags;
-
- musb_resources[1].name = pdev->resource[1].name;
- musb_resources[1].start = pdev->resource[1].start;
- musb_resources[1].end = pdev->resource[1].end;
- musb_resources[1].flags = pdev->resource[1].flags;
-
pinfo = da8xx_dev_info;
pinfo.parent = &pdev->dev;
- pinfo.res = musb_resources;
- pinfo.num_res = ARRAY_SIZE(musb_resources);
+ pinfo.res = pdev->resource;
+ pinfo.num_res = pdev->num_resources;
pinfo.data = pdata;
pinfo.size_data = sizeof(*pdata);
+ pinfo.fwnode = of_fwnode_handle(np);
+ pinfo.of_node_reused = true;
glue->musb = platform_device_register_full(&pinfo);
ret = PTR_ERR_OR_ZERO(glue->musb);
diff --git a/drivers/usb/musb/jz4740.c b/drivers/usb/musb/jz4740.c
index 5b7d576bf6ee..417c30bff9ca 100644
--- a/drivers/usb/musb/jz4740.c
+++ b/drivers/usb/musb/jz4740.c
@@ -231,6 +231,7 @@ static int jz4740_probe(struct platform_device *pdev)
musb->dev.parent = dev;
musb->dev.dma_mask = &musb->dev.coherent_dma_mask;
musb->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ device_set_of_node_from_dev(&musb->dev, dev);
glue->pdev = musb;
glue->clk = clk;
diff --git a/drivers/usb/musb/mediatek.c b/drivers/usb/musb/mediatek.c
index f5d97eb84cb5..1aeb34dbe24f 100644
--- a/drivers/usb/musb/mediatek.c
+++ b/drivers/usb/musb/mediatek.c
@@ -538,6 +538,8 @@ static int mtk_musb_probe(struct platform_device *pdev)
pinfo.num_res = pdev->num_resources;
pinfo.data = pdata;
pinfo.size_data = sizeof(*pdata);
+ pinfo.fwnode = of_fwnode_handle(np);
+ pinfo.of_node_reused = true;
glue->musb_pdev = platform_device_register_full(&pinfo);
if (IS_ERR(glue->musb_pdev)) {
diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
index b5935834f9d2..f75cde0f2b43 100644
--- a/drivers/usb/musb/musb_dsps.c
+++ b/drivers/usb/musb/musb_dsps.c
@@ -15,6 +15,7 @@
*/
#include <linux/io.h>
+#include <linux/irq.h>
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
@@ -739,12 +740,14 @@ static int dsps_create_musb_pdev(struct dsps_glue *glue,
}
resources[0] = *res;
- res = platform_get_resource_byname(parent, IORESOURCE_IRQ, "mc");
- if (!res) {
- dev_err(dev, "failed to get irq.\n");
- return -EINVAL;
- }
- resources[1] = *res;
+ ret = platform_get_irq_byname(parent, "mc");
+ if (ret < 0)
+ return ret;
+
+ resources[1].start = ret;
+ resources[1].end = ret;
+ resources[1].flags = IORESOURCE_IRQ | irq_get_trigger_type(ret);
+ resources[1].name = "mc";
/* allocate the child platform device */
musb = platform_device_alloc("musb-hdrc",
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
index f086960fe2b5..7d4d0713f4f0 100644
--- a/drivers/usb/musb/omap2430.c
+++ b/drivers/usb/musb/omap2430.c
@@ -301,7 +301,6 @@ static u64 omap2430_dmamask = DMA_BIT_MASK(32);
static int omap2430_probe(struct platform_device *pdev)
{
- struct resource musb_resources[3];
struct musb_hdrc_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct omap_musb_board_data *data;
struct platform_device *musb;
@@ -328,6 +327,7 @@ static int omap2430_probe(struct platform_device *pdev)
musb->dev.parent = &pdev->dev;
musb->dev.dma_mask = &omap2430_dmamask;
musb->dev.coherent_dma_mask = omap2430_dmamask;
+ device_set_of_node_from_dev(&musb->dev, &pdev->dev);
glue->dev = &pdev->dev;
glue->musb = musb;
@@ -383,26 +383,7 @@ static int omap2430_probe(struct platform_device *pdev)
INIT_WORK(&glue->omap_musb_mailbox_work, omap_musb_mailbox_work);
- memset(musb_resources, 0x00, sizeof(*musb_resources) *
- ARRAY_SIZE(musb_resources));
-
- musb_resources[0].name = pdev->resource[0].name;
- musb_resources[0].start = pdev->resource[0].start;
- musb_resources[0].end = pdev->resource[0].end;
- musb_resources[0].flags = pdev->resource[0].flags;
-
- musb_resources[1].name = pdev->resource[1].name;
- musb_resources[1].start = pdev->resource[1].start;
- musb_resources[1].end = pdev->resource[1].end;
- musb_resources[1].flags = pdev->resource[1].flags;
-
- musb_resources[2].name = pdev->resource[2].name;
- musb_resources[2].start = pdev->resource[2].start;
- musb_resources[2].end = pdev->resource[2].end;
- musb_resources[2].flags = pdev->resource[2].flags;
-
- ret = platform_device_add_resources(musb, musb_resources,
- ARRAY_SIZE(musb_resources));
+ ret = platform_device_add_resources(musb, pdev->resource, pdev->num_resources);
if (ret) {
dev_err(&pdev->dev, "failed to add resources\n");
goto err2;
diff --git a/drivers/usb/musb/ux500.c b/drivers/usb/musb/ux500.c
index 73538d1d0524..8ea62c344328 100644
--- a/drivers/usb/musb/ux500.c
+++ b/drivers/usb/musb/ux500.c
@@ -216,7 +216,6 @@ ux500_of_probe(struct platform_device *pdev, struct device_node *np)
static int ux500_probe(struct platform_device *pdev)
{
- struct resource musb_resources[2];
struct musb_hdrc_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct device_node *np = pdev->dev.of_node;
struct platform_device *musb;
@@ -263,6 +262,7 @@ static int ux500_probe(struct platform_device *pdev)
musb->dev.parent = &pdev->dev;
musb->dev.dma_mask = &pdev->dev.coherent_dma_mask;
musb->dev.coherent_dma_mask = pdev->dev.coherent_dma_mask;
+ device_set_of_node_from_dev(&musb->dev, &pdev->dev);
glue->dev = &pdev->dev;
glue->musb = musb;
@@ -273,21 +273,7 @@ static int ux500_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, glue);
- memset(musb_resources, 0x00, sizeof(*musb_resources) *
- ARRAY_SIZE(musb_resources));
-
- musb_resources[0].name = pdev->resource[0].name;
- musb_resources[0].start = pdev->resource[0].start;
- musb_resources[0].end = pdev->resource[0].end;
- musb_resources[0].flags = pdev->resource[0].flags;
-
- musb_resources[1].name = pdev->resource[1].name;
- musb_resources[1].start = pdev->resource[1].start;
- musb_resources[1].end = pdev->resource[1].end;
- musb_resources[1].flags = pdev->resource[1].flags;
-
- ret = platform_device_add_resources(musb, musb_resources,
- ARRAY_SIZE(musb_resources));
+ ret = platform_device_add_resources(musb, pdev->resource, pdev->num_resources);
if (ret) {
dev_err(&pdev->dev, "failed to add resources\n");
goto err2;
diff --git a/drivers/usb/phy/phy-mv-usb.c b/drivers/usb/phy/phy-mv-usb.c
index 576d925af77c..86503b7d695c 100644
--- a/drivers/usb/phy/phy-mv-usb.c
+++ b/drivers/usb/phy/phy-mv-usb.c
@@ -648,10 +648,8 @@ static int mv_otg_remove(struct platform_device *pdev)
{
struct mv_otg *mvotg = platform_get_drvdata(pdev);
- if (mvotg->qwork) {
- flush_workqueue(mvotg->qwork);
+ if (mvotg->qwork)
destroy_workqueue(mvotg->qwork);
- }
mv_otg_disable(mvotg);
@@ -825,7 +823,6 @@ static int mv_otg_probe(struct platform_device *pdev)
err_disable_clk:
mv_otg_disable_internal(mvotg);
err_destroy_workqueue:
- flush_workqueue(mvotg->qwork);
destroy_workqueue(mvotg->qwork);
return retval;
diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c
index 3af91b2b8f76..96f3939a65e2 100644
--- a/drivers/usb/renesas_usbhs/common.c
+++ b/drivers/usb/renesas_usbhs/common.c
@@ -589,11 +589,11 @@ static int usbhs_probe(struct platform_device *pdev)
{
const struct renesas_usbhs_platform_info *info;
struct usbhs_priv *priv;
- struct resource *irq_res;
struct device *dev = &pdev->dev;
struct gpio_desc *gpiod;
int ret;
u32 tmp;
+ int irq;
/* check device node */
if (dev_of_node(dev))
@@ -608,11 +608,9 @@ static int usbhs_probe(struct platform_device *pdev)
}
/* platform data */
- irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!irq_res) {
- dev_err(dev, "Not enough Renesas USB platform resources.\n");
- return -ENODEV;
- }
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
/* usb private data */
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
@@ -669,9 +667,7 @@ static int usbhs_probe(struct platform_device *pdev)
/*
* priv settings
*/
- priv->irq = irq_res->start;
- if (irq_res->flags & IORESOURCE_IRQ_SHAREABLE)
- priv->irqflags = IRQF_SHARED;
+ priv->irq = irq;
priv->pdev = pdev;
INIT_DELAYED_WORK(&priv->notify_hotplug_work, usbhsc_notify_hotplug);
spin_lock_init(usbhs_priv_to_lock(priv));
diff --git a/drivers/usb/renesas_usbhs/common.h b/drivers/usb/renesas_usbhs/common.h
index eb34d762a63d..3fb5bc94dc0d 100644
--- a/drivers/usb/renesas_usbhs/common.h
+++ b/drivers/usb/renesas_usbhs/common.h
@@ -252,7 +252,6 @@ struct usbhs_priv {
void __iomem *base;
unsigned int irq;
- unsigned long irqflags;
const struct renesas_usbhs_platform_callback *pfunc;
struct renesas_usbhs_driver_param dparam;
diff --git a/drivers/usb/renesas_usbhs/mod.c b/drivers/usb/renesas_usbhs/mod.c
index b98112cefaa4..f2ea3e1412d2 100644
--- a/drivers/usb/renesas_usbhs/mod.c
+++ b/drivers/usb/renesas_usbhs/mod.c
@@ -142,7 +142,7 @@ int usbhs_mod_probe(struct usbhs_priv *priv)
/* irq settings */
ret = devm_request_irq(dev, priv->irq, usbhs_interrupt,
- priv->irqflags, dev_name(dev), priv);
+ 0, dev_name(dev), priv);
if (ret) {
dev_err(dev, "irq request err\n");
goto mod_init_gadget_err;
@@ -219,18 +219,6 @@ static int usbhs_status_get_each_irq(struct usbhs_priv *priv,
usbhs_unlock(priv, flags);
/******************** spin unlock ******************/
- /*
- * Check whether the irq enable registers and the irq status are set
- * when IRQF_SHARED is set.
- */
- if (priv->irqflags & IRQF_SHARED) {
- if (!(intenb0 & state->intsts0) &&
- !(intenb1 & state->intsts1) &&
- !(state->bempsts) &&
- !(state->brdysts))
- return -EIO;
- }
-
return 0;
}
diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
index 29f4b87a9e74..58cba8ee0277 100644
--- a/drivers/usb/serial/ch341.c
+++ b/drivers/usb/serial/ch341.c
@@ -85,6 +85,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x1a86, 0x5523) },
{ USB_DEVICE(0x1a86, 0x7522) },
{ USB_DEVICE(0x1a86, 0x7523) },
+ { USB_DEVICE(0x2184, 0x0057) },
{ USB_DEVICE(0x4348, 0x5523) },
{ USB_DEVICE(0x9986, 0x7523) },
{ },
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 8a60c0d56863..a27f7efcec6a 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -51,6 +51,7 @@ static void cp210x_enable_event_mode(struct usb_serial_port *port);
static void cp210x_disable_event_mode(struct usb_serial_port *port);
static const struct usb_device_id id_table[] = {
+ { USB_DEVICE(0x0404, 0x034C) }, /* NCR Retail IO Box */
{ USB_DEVICE(0x045B, 0x0053) }, /* Renesas RX610 RX-Stick */
{ USB_DEVICE(0x0471, 0x066A) }, /* AKTAKOM ACE-1001 cable */
{ USB_DEVICE(0x0489, 0xE000) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */
@@ -68,6 +69,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x0FCF, 0x1004) }, /* Dynastream ANT2USB */
{ USB_DEVICE(0x0FCF, 0x1006) }, /* Dynastream ANT development board */
{ USB_DEVICE(0x0FDE, 0xCA05) }, /* OWL Wireless Electricity Monitor CM-160 */
+ { USB_DEVICE(0x106F, 0x0003) }, /* CPI / Money Controls Bulk Coin Recycler */
{ USB_DEVICE(0x10A6, 0xAA26) }, /* Knock-off DCU-11 cable */
{ USB_DEVICE(0x10AB, 0x10C5) }, /* Siemens MC60 Cable */
{ USB_DEVICE(0x10B5, 0xAC70) }, /* Nokia CA-42 USB */
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 4edebd14ef29..49c08f07c969 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -969,6 +969,7 @@ static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_VX_023_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_VX_034_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_101_PID) },
+ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_159_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_1_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_2_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_3_PID) },
@@ -977,12 +978,14 @@ static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_6_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_7_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_8_PID) },
+ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_235_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_257_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_279_1_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_279_2_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_279_3_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_279_4_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_313_PID) },
+ { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_320_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_324_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_346_1_PID) },
{ USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_346_2_PID) },
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 755858ca20ba..d1a9564697a4 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -1506,6 +1506,9 @@
#define BRAINBOXES_VX_023_PID 0x1003 /* VX-023 ExpressCard 1 Port RS422/485 */
#define BRAINBOXES_VX_034_PID 0x1004 /* VX-034 ExpressCard 2 Port RS422/485 */
#define BRAINBOXES_US_101_PID 0x1011 /* US-101 1xRS232 */
+#define BRAINBOXES_US_159_PID 0x1021 /* US-159 1xRS232 */
+#define BRAINBOXES_US_235_PID 0x1017 /* US-235 1xRS232 */
+#define BRAINBOXES_US_320_PID 0x1019 /* US-320 1xRS422/485 */
#define BRAINBOXES_US_324_PID 0x1013 /* US-324 1xRS422/485 1Mbaud */
#define BRAINBOXES_US_606_1_PID 0x2001 /* US-606 6 Port RS232 Serial Port 1 and 2 */
#define BRAINBOXES_US_606_2_PID 0x2002 /* US-606 6 Port RS232 Serial Port 3 and 4 */
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 42420bfc983c..962e9943fc20 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -1649,6 +1649,8 @@ static const struct usb_device_id option_ids[] = {
.driver_info = RSVD(2) },
{ USB_DEVICE_INTERFACE_CLASS(ZTE_VENDOR_ID, 0x1476, 0xff) }, /* GosunCn ZTE WeLink ME3630 (ECM/NCM mode) */
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1481, 0xff, 0x00, 0x00) }, /* ZTE MF871A */
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1485, 0xff, 0xff, 0xff), /* ZTE MF286D */
+ .driver_info = RSVD(5) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1533, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1534, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1535, 0xff, 0xff, 0xff) },
diff --git a/drivers/usb/storage/sierra_ms.c b/drivers/usb/storage/sierra_ms.c
index b9f78ef3edc3..0774ba22fb66 100644
--- a/drivers/usb/storage/sierra_ms.c
+++ b/drivers/usb/storage/sierra_ms.c
@@ -130,8 +130,6 @@ int sierra_ms_init(struct us_data *us)
struct swoc_info *swocInfo;
struct usb_device *udev;
- retries = 3;
- result = 0;
udev = us->pusb_dev;
/* Force Modem mode */
diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c
index 4c5a0a49035f..1928b3918242 100644
--- a/drivers/usb/storage/transport.c
+++ b/drivers/usb/storage/transport.c
@@ -551,7 +551,7 @@ static void last_sector_hacks(struct us_data *us, struct scsi_cmnd *srb)
/* Did this command access the last sector? */
sector = (srb->cmnd[2] << 24) | (srb->cmnd[3] << 16) |
(srb->cmnd[4] << 8) | (srb->cmnd[5]);
- disk = scsi_cmd_to_rq(srb)->rq_disk;
+ disk = scsi_cmd_to_rq(srb)->q->disk;
if (!disk)
goto done;
sdkp = scsi_disk(disk);
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 29191d33c0e3..1a05e3dcfec8 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -2301,6 +2301,16 @@ UNUSUAL_DEV( 0x2027, 0xa001, 0x0000, 0x9999,
USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_euscsi_init,
US_FL_SCM_MULT_TARG ),
+/*
+ * Reported by DocMAX <mail@vacharakis.de>
+ * and Thomas Weißschuh <linux@weissschuh.net>
+ */
+UNUSUAL_DEV( 0x2109, 0x0715, 0x9999, 0x9999,
+ "VIA Labs, Inc.",
+ "VL817 SATA Bridge",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_IGNORE_UAS),
+
UNUSUAL_DEV( 0x2116, 0x0320, 0x0001, 0x0001,
"ST",
"2A",
diff --git a/drivers/usb/typec/Makefile b/drivers/usb/typec/Makefile
index a0adb8947a30..57870a2bd787 100644
--- a/drivers/usb/typec/Makefile
+++ b/drivers/usb/typec/Makefile
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_TYPEC) += typec.o
-typec-y := class.o mux.o bus.o port-mapper.o
+typec-y := class.o mux.o bus.o
+typec-$(CONFIG_ACPI) += port-mapper.o
obj-$(CONFIG_TYPEC) += altmodes/
obj-$(CONFIG_TYPEC_TCPM) += tcpm/
obj-$(CONFIG_TYPEC_UCSI) += ucsi/
diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c
index aeef453aa658..45a6f0c807cb 100644
--- a/drivers/usb/typec/class.c
+++ b/drivers/usb/typec/class.c
@@ -2039,8 +2039,6 @@ struct typec_port *typec_register_port(struct device *parent,
ida_init(&port->mode_ids);
mutex_init(&port->port_type_lock);
- mutex_init(&port->port_list_lock);
- INIT_LIST_HEAD(&port->port_list);
port->id = id;
port->ops = cap->ops;
diff --git a/drivers/usb/typec/class.h b/drivers/usb/typec/class.h
index aef03eb7e152..0f1bd6d19d67 100644
--- a/drivers/usb/typec/class.h
+++ b/drivers/usb/typec/class.h
@@ -54,11 +54,6 @@ struct typec_port {
const struct typec_capability *cap;
const struct typec_operations *ops;
-
- struct list_head port_list;
- struct mutex port_list_lock; /* Port list lock */
-
- void *pld;
};
#define to_typec_port(_dev_) container_of(_dev_, struct typec_port, dev)
@@ -79,7 +74,12 @@ extern const struct device_type typec_port_dev_type;
extern struct class typec_mux_class;
extern struct class typec_class;
+#if defined(CONFIG_ACPI)
int typec_link_ports(struct typec_port *connector);
void typec_unlink_ports(struct typec_port *connector);
+#else
+static inline int typec_link_ports(struct typec_port *connector) { return 0; }
+static inline void typec_unlink_ports(struct typec_port *connector) { }
+#endif
#endif /* __USB_TYPEC_CLASS__ */
diff --git a/drivers/usb/typec/port-mapper.c b/drivers/usb/typec/port-mapper.c
index 9b0991bdf391..a7d507802509 100644
--- a/drivers/usb/typec/port-mapper.c
+++ b/drivers/usb/typec/port-mapper.c
@@ -7,273 +7,78 @@
*/
#include <linux/acpi.h>
-#include <linux/usb.h>
-#include <linux/usb/typec.h>
+#include <linux/component.h>
#include "class.h"
-struct port_node {
- struct list_head list;
- struct device *dev;
- void *pld;
-};
-
-static int acpi_pld_match(const struct acpi_pld_info *pld1,
- const struct acpi_pld_info *pld2)
-{
- if (!pld1 || !pld2)
- return 0;
-
- /*
- * To speed things up, first checking only the group_position. It seems
- * to often have the first unique value in the _PLD.
- */
- if (pld1->group_position == pld2->group_position)
- return !memcmp(pld1, pld2, sizeof(struct acpi_pld_info));
-
- return 0;
-}
-
-static void *get_pld(struct device *dev)
-{
-#ifdef CONFIG_ACPI
- struct acpi_pld_info *pld;
- acpi_status status;
-
- if (!has_acpi_companion(dev))
- return NULL;
-
- status = acpi_get_physical_device_location(ACPI_HANDLE(dev), &pld);
- if (ACPI_FAILURE(status))
- return NULL;
-
- return pld;
-#else
- return NULL;
-#endif
-}
-
-static void free_pld(void *pld)
-{
-#ifdef CONFIG_ACPI
- ACPI_FREE(pld);
-#endif
-}
-
-static int __link_port(struct typec_port *con, struct port_node *node)
-{
- int ret;
-
- ret = sysfs_create_link(&node->dev->kobj, &con->dev.kobj, "connector");
- if (ret)
- return ret;
-
- ret = sysfs_create_link(&con->dev.kobj, &node->dev->kobj,
- dev_name(node->dev));
- if (ret) {
- sysfs_remove_link(&node->dev->kobj, "connector");
- return ret;
- }
-
- list_add_tail(&node->list, &con->port_list);
-
- return 0;
-}
-
-static int link_port(struct typec_port *con, struct port_node *node)
-{
- int ret;
-
- mutex_lock(&con->port_list_lock);
- ret = __link_port(con, node);
- mutex_unlock(&con->port_list_lock);
-
- return ret;
-}
-
-static void __unlink_port(struct typec_port *con, struct port_node *node)
-{
- sysfs_remove_link(&con->dev.kobj, dev_name(node->dev));
- sysfs_remove_link(&node->dev->kobj, "connector");
- list_del(&node->list);
-}
-
-static void unlink_port(struct typec_port *con, struct port_node *node)
-{
- mutex_lock(&con->port_list_lock);
- __unlink_port(con, node);
- mutex_unlock(&con->port_list_lock);
-}
-
-static struct port_node *create_port_node(struct device *port)
-{
- struct port_node *node;
-
- node = kzalloc(sizeof(*node), GFP_KERNEL);
- if (!node)
- return ERR_PTR(-ENOMEM);
-
- node->dev = get_device(port);
- node->pld = get_pld(port);
-
- return node;
-}
-
-static void remove_port_node(struct port_node *node)
+static int typec_aggregate_bind(struct device *dev)
{
- put_device(node->dev);
- free_pld(node->pld);
- kfree(node);
+ return component_bind_all(dev, NULL);
}
-static int connector_match(struct device *dev, const void *data)
+static void typec_aggregate_unbind(struct device *dev)
{
- const struct port_node *node = data;
-
- if (!is_typec_port(dev))
- return 0;
-
- return acpi_pld_match(to_typec_port(dev)->pld, node->pld);
-}
-
-static struct device *find_connector(struct port_node *node)
-{
- if (!node->pld)
- return NULL;
-
- return class_find_device(&typec_class, NULL, node, connector_match);
-}
-
-/**
- * typec_link_port - Link a port to its connector
- * @port: The port device
- *
- * Find the connector of @port and create symlink named "connector" for it.
- * Returns 0 on success, or errno in case of a failure.
- *
- * NOTE. The function increments the reference count of @port on success.
- */
-int typec_link_port(struct device *port)
-{
- struct device *connector;
- struct port_node *node;
- int ret;
-
- node = create_port_node(port);
- if (IS_ERR(node))
- return PTR_ERR(node);
-
- connector = find_connector(node);
- if (!connector) {
- ret = 0;
- goto remove_node;
- }
-
- ret = link_port(to_typec_port(connector), node);
- if (ret)
- goto put_connector;
-
- return 0;
-
-put_connector:
- put_device(connector);
-remove_node:
- remove_port_node(node);
-
- return ret;
+ component_unbind_all(dev, NULL);
}
-EXPORT_SYMBOL_GPL(typec_link_port);
-
-static int port_match_and_unlink(struct device *connector, void *port)
-{
- struct port_node *node;
- struct port_node *tmp;
- int ret = 0;
-
- if (!is_typec_port(connector))
- return 0;
- mutex_lock(&to_typec_port(connector)->port_list_lock);
- list_for_each_entry_safe(node, tmp, &to_typec_port(connector)->port_list, list) {
- ret = node->dev == port;
- if (ret) {
- unlink_port(to_typec_port(connector), node);
- remove_port_node(node);
- put_device(connector);
- break;
- }
- }
- mutex_unlock(&to_typec_port(connector)->port_list_lock);
+static const struct component_master_ops typec_aggregate_ops = {
+ .bind = typec_aggregate_bind,
+ .unbind = typec_aggregate_unbind,
+};
- return ret;
-}
+struct each_port_arg {
+ struct typec_port *port;
+ struct component_match *match;
+};
-/**
- * typec_unlink_port - Unlink port from its connector
- * @port: The port device
- *
- * Removes the symlink "connector" and decrements the reference count of @port.
- */
-void typec_unlink_port(struct device *port)
+static int typec_port_compare(struct device *dev, void *fwnode)
{
- class_for_each_device(&typec_class, NULL, port, port_match_and_unlink);
+ return device_match_fwnode(dev, fwnode);
}
-EXPORT_SYMBOL_GPL(typec_unlink_port);
-static int each_port(struct device *port, void *connector)
+static int typec_port_match(struct device *dev, void *data)
{
- struct port_node *node;
- int ret;
+ struct acpi_device *adev = to_acpi_device(dev);
+ struct each_port_arg *arg = data;
+ struct acpi_device *con_adev;
- node = create_port_node(port);
- if (IS_ERR(node))
- return PTR_ERR(node);
-
- if (!connector_match(connector, node)) {
- remove_port_node(node);
+ con_adev = ACPI_COMPANION(&arg->port->dev);
+ if (con_adev == adev)
return 0;
- }
-
- ret = link_port(to_typec_port(connector), node);
- if (ret) {
- remove_port_node(node->pld);
- return ret;
- }
-
- get_device(connector);
+ if (con_adev->pld_crc == adev->pld_crc)
+ component_match_add(&arg->port->dev, &arg->match, typec_port_compare,
+ acpi_fwnode_handle(adev));
return 0;
}
int typec_link_ports(struct typec_port *con)
{
- int ret = 0;
+ struct each_port_arg arg = { .port = con, .match = NULL };
- con->pld = get_pld(&con->dev);
- if (!con->pld)
+ if (!has_acpi_companion(&con->dev))
return 0;
- ret = usb_for_each_port(&con->dev, each_port);
- if (ret)
- typec_unlink_ports(con);
+ bus_for_each_dev(&acpi_bus_type, NULL, &arg, typec_port_match);
+ if (!arg.match)
+ return 0;
- return ret;
+ /*
+ * REVISIT: Now each connector can have only a single component master.
+ * So far only the USB ports connected to the USB Type-C connector share
+ * the _PLD with it, but if there one day is something else (like maybe
+ * the DisplayPort ACPI device object) that also shares the _PLD with
+ * the connector, every one of those needs to have its own component
+ * master, because each different type of component needs to be bind to
+ * the connector independently of the other components. That requires
+ * improvements to the component framework. Right now you can only have
+ * one master per device.
+ */
+ return component_master_add_with_match(&con->dev, &typec_aggregate_ops, arg.match);
}
void typec_unlink_ports(struct typec_port *con)
{
- struct port_node *node;
- struct port_node *tmp;
-
- mutex_lock(&con->port_list_lock);
-
- list_for_each_entry_safe(node, tmp, &con->port_list, list) {
- __unlink_port(con, node);
- remove_port_node(node);
- put_device(&con->dev);
- }
-
- mutex_unlock(&con->port_list_lock);
-
- free_pld(con->pld);
+ if (has_acpi_companion(&con->dev))
+ component_master_del(&con->dev, &typec_aggregate_ops);
}
diff --git a/drivers/usb/typec/tcpm/tcpci.c b/drivers/usb/typec/tcpm/tcpci.c
index 35a1307349a2..e07d26a3cd8e 100644
--- a/drivers/usb/typec/tcpm/tcpci.c
+++ b/drivers/usb/typec/tcpm/tcpci.c
@@ -75,9 +75,25 @@ static int tcpci_write16(struct tcpci *tcpci, unsigned int reg, u16 val)
static int tcpci_set_cc(struct tcpc_dev *tcpc, enum typec_cc_status cc)
{
struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
+ bool vconn_pres;
+ enum typec_cc_polarity polarity = TYPEC_POLARITY_CC1;
unsigned int reg;
int ret;
+ ret = regmap_read(tcpci->regmap, TCPC_POWER_STATUS, &reg);
+ if (ret < 0)
+ return ret;
+
+ vconn_pres = !!(reg & TCPC_POWER_STATUS_VCONN_PRES);
+ if (vconn_pres) {
+ ret = regmap_read(tcpci->regmap, TCPC_TCPC_CTRL, &reg);
+ if (ret < 0)
+ return ret;
+
+ if (reg & TCPC_TCPC_CTRL_ORIENTATION)
+ polarity = TYPEC_POLARITY_CC2;
+ }
+
switch (cc) {
case TYPEC_CC_RA:
reg = (TCPC_ROLE_CTRL_CC_RA << TCPC_ROLE_CTRL_CC1_SHIFT) |
@@ -112,6 +128,16 @@ static int tcpci_set_cc(struct tcpc_dev *tcpc, enum typec_cc_status cc)
break;
}
+ if (vconn_pres) {
+ if (polarity == TYPEC_POLARITY_CC2) {
+ reg &= ~(TCPC_ROLE_CTRL_CC1_MASK << TCPC_ROLE_CTRL_CC1_SHIFT);
+ reg |= (TCPC_ROLE_CTRL_CC_OPEN << TCPC_ROLE_CTRL_CC1_SHIFT);
+ } else {
+ reg &= ~(TCPC_ROLE_CTRL_CC2_MASK << TCPC_ROLE_CTRL_CC2_SHIFT);
+ reg |= (TCPC_ROLE_CTRL_CC_OPEN << TCPC_ROLE_CTRL_CC2_SHIFT);
+ }
+ }
+
ret = regmap_write(tcpci->regmap, TCPC_ROLE_CTRL, reg);
if (ret < 0)
return ret;
diff --git a/drivers/usb/typec/tcpm/tcpci.h b/drivers/usb/typec/tcpm/tcpci.h
index 2be7a77d400e..b2edd45f13c6 100644
--- a/drivers/usb/typec/tcpm/tcpci.h
+++ b/drivers/usb/typec/tcpm/tcpci.h
@@ -98,6 +98,7 @@
#define TCPC_POWER_STATUS_SOURCING_VBUS BIT(4)
#define TCPC_POWER_STATUS_VBUS_DET BIT(3)
#define TCPC_POWER_STATUS_VBUS_PRES BIT(2)
+#define TCPC_POWER_STATUS_VCONN_PRES BIT(1)
#define TCPC_POWER_STATUS_SINKING_VBUS BIT(0)
#define TCPC_FAULT_STATUS 0x1f
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index 59d4fa2443f2..5fce795b69c7 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -5156,7 +5156,8 @@ static void _tcpm_pd_vbus_off(struct tcpm_port *port)
case SNK_TRYWAIT_DEBOUNCE:
break;
case SNK_ATTACH_WAIT:
- tcpm_set_state(port, SNK_UNATTACHED, 0);
+ case SNK_DEBOUNCED:
+ /* Do nothing, as TCPM is still waiting for vbus to reaach VSAFE5V to connect */
break;
case SNK_NEGOTIATE_CAPABILITIES:
@@ -5263,6 +5264,10 @@ static void _tcpm_pd_vbus_vsafe0v(struct tcpm_port *port)
case PR_SWAP_SNK_SRC_SOURCE_ON:
/* Do nothing, vsafe0v is expected during transition */
break;
+ case SNK_ATTACH_WAIT:
+ case SNK_DEBOUNCED:
+ /*Do nothing, still waiting for VSAFE5V for connect */
+ break;
default:
if (port->pwr_role == TYPEC_SINK && port->auto_vbus_discharge_enabled)
tcpm_set_state(port, SNK_UNATTACHED, 0);
diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
index 08561bf7c40c..f0c2fa19f3e0 100644
--- a/drivers/usb/typec/ucsi/ucsi.c
+++ b/drivers/usb/typec/ucsi/ucsi.c
@@ -303,6 +303,17 @@ static int ucsi_next_altmode(struct typec_altmode **alt)
return -ENOENT;
}
+static int ucsi_get_num_altmode(struct typec_altmode **alt)
+{
+ int i;
+
+ for (i = 0; i < UCSI_MAX_ALTMODES; i++)
+ if (!alt[i])
+ break;
+
+ return i;
+}
+
static int ucsi_register_altmode(struct ucsi_connector *con,
struct typec_altmode_desc *desc,
u8 recipient)
@@ -607,7 +618,7 @@ static int ucsi_get_src_pdos(struct ucsi_connector *con)
static int ucsi_check_altmodes(struct ucsi_connector *con)
{
- int ret;
+ int ret, num_partner_am;
ret = ucsi_register_altmodes(con, UCSI_RECIPIENT_SOP);
if (ret && ret != -ETIMEDOUT)
@@ -617,6 +628,9 @@ static int ucsi_check_altmodes(struct ucsi_connector *con)
/* Ignoring the errors in this case. */
if (con->partner_altmode[0]) {
+ num_partner_am = ucsi_get_num_altmode(con->partner_altmode);
+ if (num_partner_am > 0)
+ typec_partner_set_num_altmodes(con->partner, num_partner_am);
ucsi_altmode_update_active(con);
return 0;
}
diff --git a/drivers/usb/typec/ucsi/ucsi_ccg.c b/drivers/usb/typec/ucsi/ucsi_ccg.c
index bff96d64dddf..6db7c8ddd51c 100644
--- a/drivers/usb/typec/ucsi/ucsi_ccg.c
+++ b/drivers/usb/typec/ucsi/ucsi_ccg.c
@@ -325,7 +325,7 @@ static int ucsi_ccg_init(struct ucsi_ccg *uc)
if (status < 0)
return status;
- if (!data)
+ if (!(data & DEV_INT))
return 0;
status = ccg_write(uc, CCGX_RAB_INTR_REG, &data, sizeof(data));
diff --git a/drivers/usb/usbip/usbip_event.c b/drivers/usb/usbip/usbip_event.c
index 086ca76dd053..26513540bcdb 100644
--- a/drivers/usb/usbip/usbip_event.c
+++ b/drivers/usb/usbip/usbip_event.c
@@ -137,7 +137,6 @@ int usbip_init_eh(void)
void usbip_finish_eh(void)
{
- flush_workqueue(usbip_queue);
destroy_workqueue(usbip_queue);
usbip_queue = NULL;
}
diff --git a/drivers/vdpa/alibaba/eni_vdpa.c b/drivers/vdpa/alibaba/eni_vdpa.c
index 3f788794571a..f480d54f308c 100644
--- a/drivers/vdpa/alibaba/eni_vdpa.c
+++ b/drivers/vdpa/alibaba/eni_vdpa.c
@@ -58,7 +58,7 @@ static struct virtio_pci_legacy_device *vdpa_to_ldev(struct vdpa_device *vdpa)
return &eni_vdpa->ldev;
}
-static u64 eni_vdpa_get_features(struct vdpa_device *vdpa)
+static u64 eni_vdpa_get_device_features(struct vdpa_device *vdpa)
{
struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa);
u64 features = vp_legacy_get_features(ldev);
@@ -69,7 +69,7 @@ static u64 eni_vdpa_get_features(struct vdpa_device *vdpa)
return features;
}
-static int eni_vdpa_set_features(struct vdpa_device *vdpa, u64 features)
+static int eni_vdpa_set_driver_features(struct vdpa_device *vdpa, u64 features)
{
struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa);
@@ -84,6 +84,13 @@ static int eni_vdpa_set_features(struct vdpa_device *vdpa, u64 features)
return 0;
}
+static u64 eni_vdpa_get_driver_features(struct vdpa_device *vdpa)
+{
+ struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa);
+
+ return vp_legacy_get_driver_features(ldev);
+}
+
static u8 eni_vdpa_get_status(struct vdpa_device *vdpa)
{
struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa);
@@ -401,8 +408,9 @@ static void eni_vdpa_set_config_cb(struct vdpa_device *vdpa,
}
static const struct vdpa_config_ops eni_vdpa_ops = {
- .get_features = eni_vdpa_get_features,
- .set_features = eni_vdpa_set_features,
+ .get_device_features = eni_vdpa_get_device_features,
+ .set_driver_features = eni_vdpa_set_driver_features,
+ .get_driver_features = eni_vdpa_get_driver_features,
.get_status = eni_vdpa_get_status,
.set_status = eni_vdpa_set_status,
.reset = eni_vdpa_reset,
@@ -450,11 +458,6 @@ static u16 eni_vdpa_get_num_queues(struct eni_vdpa *eni_vdpa)
return num;
}
-static void eni_vdpa_free_irq_vectors(void *data)
-{
- pci_free_irq_vectors(data);
-}
-
static int eni_vdpa_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct device *dev = &pdev->dev;
@@ -488,13 +491,6 @@ static int eni_vdpa_probe(struct pci_dev *pdev, const struct pci_device_id *id)
eni_vdpa->vdpa.dma_dev = &pdev->dev;
eni_vdpa->queues = eni_vdpa_get_num_queues(eni_vdpa);
- ret = devm_add_action_or_reset(dev, eni_vdpa_free_irq_vectors, pdev);
- if (ret) {
- ENI_ERR(pdev,
- "failed for adding devres for freeing irq vectors\n");
- goto err;
- }
-
eni_vdpa->vring = devm_kcalloc(&pdev->dev, eni_vdpa->queues,
sizeof(*eni_vdpa->vring),
GFP_KERNEL);
diff --git a/drivers/vdpa/ifcvf/ifcvf_base.c b/drivers/vdpa/ifcvf/ifcvf_base.c
index 2808f1ba9f7b..7d41dfe48ade 100644
--- a/drivers/vdpa/ifcvf/ifcvf_base.c
+++ b/drivers/vdpa/ifcvf/ifcvf_base.c
@@ -143,8 +143,8 @@ int ifcvf_init_hw(struct ifcvf_hw *hw, struct pci_dev *pdev)
IFCVF_DBG(pdev, "hw->isr = %p\n", hw->isr);
break;
case VIRTIO_PCI_CAP_DEVICE_CFG:
- hw->net_cfg = get_cap_addr(hw, &cap);
- IFCVF_DBG(pdev, "hw->net_cfg = %p\n", hw->net_cfg);
+ hw->dev_cfg = get_cap_addr(hw, &cap);
+ IFCVF_DBG(pdev, "hw->dev_cfg = %p\n", hw->dev_cfg);
break;
}
@@ -153,7 +153,7 @@ next:
}
if (hw->common_cfg == NULL || hw->notify_base == NULL ||
- hw->isr == NULL || hw->net_cfg == NULL) {
+ hw->isr == NULL || hw->dev_cfg == NULL) {
IFCVF_ERR(pdev, "Incomplete PCI capabilities\n");
return -EIO;
}
@@ -174,7 +174,7 @@ next:
IFCVF_DBG(pdev,
"PCI capability mapping: common cfg: %p, notify base: %p\n, isr cfg: %p, device cfg: %p, multiplier: %u\n",
hw->common_cfg, hw->notify_base, hw->isr,
- hw->net_cfg, hw->notify_off_multiplier);
+ hw->dev_cfg, hw->notify_off_multiplier);
return 0;
}
@@ -242,33 +242,54 @@ int ifcvf_verify_min_features(struct ifcvf_hw *hw, u64 features)
return 0;
}
-void ifcvf_read_net_config(struct ifcvf_hw *hw, u64 offset,
+u32 ifcvf_get_config_size(struct ifcvf_hw *hw)
+{
+ struct ifcvf_adapter *adapter;
+ u32 config_size;
+
+ adapter = vf_to_adapter(hw);
+ switch (hw->dev_type) {
+ case VIRTIO_ID_NET:
+ config_size = sizeof(struct virtio_net_config);
+ break;
+ case VIRTIO_ID_BLOCK:
+ config_size = sizeof(struct virtio_blk_config);
+ break;
+ default:
+ config_size = 0;
+ IFCVF_ERR(adapter->pdev, "VIRTIO ID %u not supported\n", hw->dev_type);
+ }
+
+ return config_size;
+}
+
+void ifcvf_read_dev_config(struct ifcvf_hw *hw, u64 offset,
void *dst, int length)
{
u8 old_gen, new_gen, *p;
int i;
- WARN_ON(offset + length > sizeof(struct virtio_net_config));
+ WARN_ON(offset + length > hw->config_size);
do {
old_gen = ifc_ioread8(&hw->common_cfg->config_generation);
p = dst;
for (i = 0; i < length; i++)
- *p++ = ifc_ioread8(hw->net_cfg + offset + i);
+ *p++ = ifc_ioread8(hw->dev_cfg + offset + i);
new_gen = ifc_ioread8(&hw->common_cfg->config_generation);
} while (old_gen != new_gen);
}
-void ifcvf_write_net_config(struct ifcvf_hw *hw, u64 offset,
+void ifcvf_write_dev_config(struct ifcvf_hw *hw, u64 offset,
const void *src, int length)
{
const u8 *p;
int i;
p = src;
- WARN_ON(offset + length > sizeof(struct virtio_net_config));
+ WARN_ON(offset + length > hw->config_size);
for (i = 0; i < length; i++)
- ifc_iowrite8(*p++, hw->net_cfg + offset + i);
+ ifc_iowrite8(*p++, hw->dev_cfg + offset + i);
}
static void ifcvf_set_features(struct ifcvf_hw *hw, u64 features)
diff --git a/drivers/vdpa/ifcvf/ifcvf_base.h b/drivers/vdpa/ifcvf/ifcvf_base.h
index 09918af3ecf8..c486873f370a 100644
--- a/drivers/vdpa/ifcvf/ifcvf_base.h
+++ b/drivers/vdpa/ifcvf/ifcvf_base.h
@@ -71,12 +71,14 @@ struct ifcvf_hw {
u64 hw_features;
u32 dev_type;
struct virtio_pci_common_cfg __iomem *common_cfg;
- void __iomem *net_cfg;
+ void __iomem *dev_cfg;
struct vring_info vring[IFCVF_MAX_QUEUES];
void __iomem * const *base;
char config_msix_name[256];
struct vdpa_callback config_cb;
unsigned int config_irq;
+ /* virtio-net or virtio-blk device config size */
+ u32 config_size;
};
struct ifcvf_adapter {
@@ -105,9 +107,9 @@ int ifcvf_init_hw(struct ifcvf_hw *hw, struct pci_dev *dev);
int ifcvf_start_hw(struct ifcvf_hw *hw);
void ifcvf_stop_hw(struct ifcvf_hw *hw);
void ifcvf_notify_queue(struct ifcvf_hw *hw, u16 qid);
-void ifcvf_read_net_config(struct ifcvf_hw *hw, u64 offset,
+void ifcvf_read_dev_config(struct ifcvf_hw *hw, u64 offset,
void *dst, int length);
-void ifcvf_write_net_config(struct ifcvf_hw *hw, u64 offset,
+void ifcvf_write_dev_config(struct ifcvf_hw *hw, u64 offset,
const void *src, int length);
u8 ifcvf_get_status(struct ifcvf_hw *hw);
void ifcvf_set_status(struct ifcvf_hw *hw, u8 status);
@@ -120,4 +122,5 @@ u16 ifcvf_get_vq_state(struct ifcvf_hw *hw, u16 qid);
int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u16 num);
struct ifcvf_adapter *vf_to_adapter(struct ifcvf_hw *hw);
int ifcvf_probed_virtio_net(struct ifcvf_hw *hw);
+u32 ifcvf_get_config_size(struct ifcvf_hw *hw);
#endif /* _IFCVF_H_ */
diff --git a/drivers/vdpa/ifcvf/ifcvf_main.c b/drivers/vdpa/ifcvf/ifcvf_main.c
index 6dc75ca70b37..d1a6b5ab543c 100644
--- a/drivers/vdpa/ifcvf/ifcvf_main.c
+++ b/drivers/vdpa/ifcvf/ifcvf_main.c
@@ -169,7 +169,7 @@ static struct ifcvf_hw *vdpa_to_vf(struct vdpa_device *vdpa_dev)
return &adapter->vf;
}
-static u64 ifcvf_vdpa_get_features(struct vdpa_device *vdpa_dev)
+static u64 ifcvf_vdpa_get_device_features(struct vdpa_device *vdpa_dev)
{
struct ifcvf_adapter *adapter = vdpa_to_adapter(vdpa_dev);
struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
@@ -187,7 +187,7 @@ static u64 ifcvf_vdpa_get_features(struct vdpa_device *vdpa_dev)
return features;
}
-static int ifcvf_vdpa_set_features(struct vdpa_device *vdpa_dev, u64 features)
+static int ifcvf_vdpa_set_driver_features(struct vdpa_device *vdpa_dev, u64 features)
{
struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
int ret;
@@ -201,6 +201,13 @@ static int ifcvf_vdpa_set_features(struct vdpa_device *vdpa_dev, u64 features)
return 0;
}
+static u64 ifcvf_vdpa_get_driver_features(struct vdpa_device *vdpa_dev)
+{
+ struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
+
+ return vf->req_features;
+}
+
static u8 ifcvf_vdpa_get_status(struct vdpa_device *vdpa_dev)
{
struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
@@ -366,24 +373,9 @@ static u32 ifcvf_vdpa_get_vq_align(struct vdpa_device *vdpa_dev)
static size_t ifcvf_vdpa_get_config_size(struct vdpa_device *vdpa_dev)
{
- struct ifcvf_adapter *adapter = vdpa_to_adapter(vdpa_dev);
struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
- struct pci_dev *pdev = adapter->pdev;
- size_t size;
-
- switch (vf->dev_type) {
- case VIRTIO_ID_NET:
- size = sizeof(struct virtio_net_config);
- break;
- case VIRTIO_ID_BLOCK:
- size = sizeof(struct virtio_blk_config);
- break;
- default:
- size = 0;
- IFCVF_ERR(pdev, "VIRTIO ID %u not supported\n", vf->dev_type);
- }
- return size;
+ return vf->config_size;
}
static void ifcvf_vdpa_get_config(struct vdpa_device *vdpa_dev,
@@ -392,8 +384,7 @@ static void ifcvf_vdpa_get_config(struct vdpa_device *vdpa_dev,
{
struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
- WARN_ON(offset + len > sizeof(struct virtio_net_config));
- ifcvf_read_net_config(vf, offset, buf, len);
+ ifcvf_read_dev_config(vf, offset, buf, len);
}
static void ifcvf_vdpa_set_config(struct vdpa_device *vdpa_dev,
@@ -402,8 +393,7 @@ static void ifcvf_vdpa_set_config(struct vdpa_device *vdpa_dev,
{
struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
- WARN_ON(offset + len > sizeof(struct virtio_net_config));
- ifcvf_write_net_config(vf, offset, buf, len);
+ ifcvf_write_dev_config(vf, offset, buf, len);
}
static void ifcvf_vdpa_set_config_cb(struct vdpa_device *vdpa_dev,
@@ -443,8 +433,9 @@ static struct vdpa_notification_area ifcvf_get_vq_notification(struct vdpa_devic
* implemented set_map()/dma_map()/dma_unmap()
*/
static const struct vdpa_config_ops ifc_vdpa_ops = {
- .get_features = ifcvf_vdpa_get_features,
- .set_features = ifcvf_vdpa_set_features,
+ .get_device_features = ifcvf_vdpa_get_device_features,
+ .set_driver_features = ifcvf_vdpa_set_driver_features,
+ .get_driver_features = ifcvf_vdpa_get_driver_features,
.get_status = ifcvf_vdpa_get_status,
.set_status = ifcvf_vdpa_set_status,
.reset = ifcvf_vdpa_reset,
@@ -542,6 +533,7 @@ static int ifcvf_vdpa_dev_add(struct vdpa_mgmt_dev *mdev, const char *name,
vf->vring[i].irq = -EINVAL;
vf->hw_features = ifcvf_get_hw_features(vf);
+ vf->config_size = ifcvf_get_config_size(vf);
adapter->vdpa.mdev = &ifcvf_mgmt_dev->mdev;
ret = _vdpa_register_device(&adapter->vdpa, vf->nr_vring);
diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
index 63813fbb5f62..f648f1c54a0f 100644
--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
+++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
@@ -131,25 +131,24 @@ struct mlx5_vdpa_virtqueue {
struct mlx5_vq_restore_info ri;
};
-/* We will remove this limitation once mlx5_vdpa_alloc_resources()
- * provides for driver space allocation
- */
-#define MLX5_MAX_SUPPORTED_VQS 16
-
static bool is_index_valid(struct mlx5_vdpa_dev *mvdev, u16 idx)
{
- if (unlikely(idx > mvdev->max_idx))
- return false;
+ if (!(mvdev->actual_features & BIT_ULL(VIRTIO_NET_F_MQ))) {
+ if (!(mvdev->actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ)))
+ return idx < 2;
+ else
+ return idx < 3;
+ }
- return true;
+ return idx <= mvdev->max_idx;
}
struct mlx5_vdpa_net {
struct mlx5_vdpa_dev mvdev;
struct mlx5_vdpa_net_resources res;
struct virtio_net_config config;
- struct mlx5_vdpa_virtqueue vqs[MLX5_MAX_SUPPORTED_VQS];
- struct vdpa_callback event_cbs[MLX5_MAX_SUPPORTED_VQS + 1];
+ struct mlx5_vdpa_virtqueue *vqs;
+ struct vdpa_callback *event_cbs;
/* Serialize vq resources creation and destruction. This is required
* since memory map might change and we need to destroy and create
@@ -876,8 +875,6 @@ static int create_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtque
MLX5_SET(virtio_q, vq_ctx, umem_3_id, mvq->umem3.id);
MLX5_SET(virtio_q, vq_ctx, umem_3_size, mvq->umem3.size);
MLX5_SET(virtio_q, vq_ctx, pd, ndev->mvdev.res.pdn);
- if (MLX5_CAP_DEV_VDPA_EMULATION(ndev->mvdev.mdev, eth_frame_offload_type))
- MLX5_SET(virtio_q, vq_ctx, virtio_version_1_0, 1);
err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out));
if (err)
@@ -1218,7 +1215,7 @@ static void suspend_vqs(struct mlx5_vdpa_net *ndev)
{
int i;
- for (i = 0; i < MLX5_MAX_SUPPORTED_VQS; i++)
+ for (i = 0; i < ndev->mvdev.max_vqs; i++)
suspend_vq(ndev, &ndev->vqs[i]);
}
@@ -1244,8 +1241,14 @@ static int create_rqt(struct mlx5_vdpa_net *ndev)
void *in;
int i, j;
int err;
+ int num;
- max_rqt = min_t(int, MLX5_MAX_SUPPORTED_VQS / 2,
+ if (!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_MQ)))
+ num = 1;
+ else
+ num = ndev->cur_num_vqs / 2;
+
+ max_rqt = min_t(int, roundup_pow_of_two(num),
1 << MLX5_CAP_GEN(ndev->mvdev.mdev, log_max_rqt_size));
if (max_rqt < 1)
return -EOPNOTSUPP;
@@ -1261,17 +1264,10 @@ static int create_rqt(struct mlx5_vdpa_net *ndev)
MLX5_SET(rqtc, rqtc, list_q_type, MLX5_RQTC_LIST_Q_TYPE_VIRTIO_NET_Q);
MLX5_SET(rqtc, rqtc, rqt_max_size, max_rqt);
list = MLX5_ADDR_OF(rqtc, rqtc, rq_num[0]);
- for (i = 0, j = 0; j < max_rqt; j++) {
- if (!ndev->vqs[j].initialized)
- continue;
-
- if (!vq_is_tx(ndev->vqs[j].index)) {
- list[i] = cpu_to_be32(ndev->vqs[j].virtq_id);
- i++;
- }
- }
- MLX5_SET(rqtc, rqtc, rqt_actual_size, i);
+ for (i = 0, j = 0; i < max_rqt; i++, j += 2)
+ list[i] = cpu_to_be32(ndev->vqs[j % (2 * num)].virtq_id);
+ MLX5_SET(rqtc, rqtc, rqt_actual_size, max_rqt);
err = mlx5_vdpa_create_rqt(&ndev->mvdev, in, inlen, &ndev->res.rqtn);
kfree(in);
if (err)
@@ -1292,7 +1288,7 @@ static int modify_rqt(struct mlx5_vdpa_net *ndev, int num)
int i, j;
int err;
- max_rqt = min_t(int, ndev->cur_num_vqs / 2,
+ max_rqt = min_t(int, roundup_pow_of_two(ndev->cur_num_vqs / 2),
1 << MLX5_CAP_GEN(ndev->mvdev.mdev, log_max_rqt_size));
if (max_rqt < 1)
return -EOPNOTSUPP;
@@ -1308,16 +1304,10 @@ static int modify_rqt(struct mlx5_vdpa_net *ndev, int num)
MLX5_SET(rqtc, rqtc, list_q_type, MLX5_RQTC_LIST_Q_TYPE_VIRTIO_NET_Q);
list = MLX5_ADDR_OF(rqtc, rqtc, rq_num[0]);
- for (i = 0, j = 0; j < num; j++) {
- if (!ndev->vqs[j].initialized)
- continue;
+ for (i = 0, j = 0; i < max_rqt; i++, j += 2)
+ list[i] = cpu_to_be32(ndev->vqs[j % num].virtq_id);
- if (!vq_is_tx(ndev->vqs[j].index)) {
- list[i] = cpu_to_be32(ndev->vqs[j].virtq_id);
- i++;
- }
- }
- MLX5_SET(rqtc, rqtc, rqt_actual_size, i);
+ MLX5_SET(rqtc, rqtc, rqt_actual_size, max_rqt);
err = mlx5_vdpa_modify_rqt(&ndev->mvdev, in, inlen, ndev->res.rqtn);
kfree(in);
if (err)
@@ -1554,9 +1544,11 @@ static int change_num_qps(struct mlx5_vdpa_dev *mvdev, int newqps)
return 0;
clean_added:
- for (--i; i >= cur_qps; --i)
+ for (--i; i >= 2 * cur_qps; --i)
teardown_vq(ndev, &ndev->vqs[i]);
+ ndev->cur_num_vqs = 2 * cur_qps;
+
return err;
}
@@ -1581,9 +1573,6 @@ static virtio_net_ctrl_ack handle_ctrl_mq(struct mlx5_vdpa_dev *mvdev, u8 cmd)
break;
}
- if (newqps & (newqps - 1))
- break;
-
if (!change_num_qps(mvdev, newqps))
status = VIRTIO_NET_OK;
@@ -1880,21 +1869,29 @@ static u64 mlx_to_vritio_features(u16 dev_features)
return result;
}
-static u64 mlx5_vdpa_get_features(struct vdpa_device *vdev)
+static u64 get_supported_features(struct mlx5_core_dev *mdev)
{
- struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
- struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
+ u64 mlx_vdpa_features = 0;
u16 dev_features;
- dev_features = MLX5_CAP_DEV_VDPA_EMULATION(mvdev->mdev, device_features_bits_mask);
- ndev->mvdev.mlx_features |= mlx_to_vritio_features(dev_features);
- if (MLX5_CAP_DEV_VDPA_EMULATION(mvdev->mdev, virtio_version_1_0))
- ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_F_VERSION_1);
- ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_F_ACCESS_PLATFORM);
- ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_NET_F_CTRL_VQ);
- ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR);
- ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_NET_F_MQ);
- ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_NET_F_STATUS);
+ dev_features = MLX5_CAP_DEV_VDPA_EMULATION(mdev, device_features_bits_mask);
+ mlx_vdpa_features |= mlx_to_vritio_features(dev_features);
+ if (MLX5_CAP_DEV_VDPA_EMULATION(mdev, virtio_version_1_0))
+ mlx_vdpa_features |= BIT_ULL(VIRTIO_F_VERSION_1);
+ mlx_vdpa_features |= BIT_ULL(VIRTIO_F_ACCESS_PLATFORM);
+ mlx_vdpa_features |= BIT_ULL(VIRTIO_NET_F_CTRL_VQ);
+ mlx_vdpa_features |= BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR);
+ mlx_vdpa_features |= BIT_ULL(VIRTIO_NET_F_MQ);
+ mlx_vdpa_features |= BIT_ULL(VIRTIO_NET_F_STATUS);
+ mlx_vdpa_features |= BIT_ULL(VIRTIO_NET_F_MTU);
+
+ return mlx_vdpa_features;
+}
+
+static u64 mlx5_vdpa_get_device_features(struct vdpa_device *vdev)
+{
+ struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+ struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
print_features(mvdev, ndev->mvdev.mlx_features, false);
return ndev->mvdev.mlx_features;
@@ -1972,7 +1969,7 @@ static void update_cvq_info(struct mlx5_vdpa_dev *mvdev)
}
}
-static int mlx5_vdpa_set_features(struct vdpa_device *vdev, u64 features)
+static int mlx5_vdpa_set_driver_features(struct vdpa_device *vdev, u64 features)
{
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
@@ -1985,6 +1982,11 @@ static int mlx5_vdpa_set_features(struct vdpa_device *vdev, u64 features)
return err;
ndev->mvdev.actual_features = features & ndev->mvdev.mlx_features;
+ if (ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_MQ))
+ ndev->cur_num_vqs = 2 * mlx5vdpa16_to_cpu(mvdev, ndev->config.max_virtqueue_pairs);
+ else
+ ndev->cur_num_vqs = 2;
+
update_cvq_info(mvdev);
return err;
}
@@ -2235,7 +2237,8 @@ static int mlx5_vdpa_reset(struct vdpa_device *vdev)
clear_vqs_ready(ndev);
mlx5_vdpa_destroy_mr(&ndev->mvdev);
ndev->mvdev.status = 0;
- memset(ndev->event_cbs, 0, sizeof(ndev->event_cbs));
+ ndev->cur_num_vqs = 0;
+ memset(ndev->event_cbs, 0, sizeof(*ndev->event_cbs) * (mvdev->max_vqs + 1));
ndev->mvdev.actual_features = 0;
++mvdev->generation;
if (MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) {
@@ -2308,6 +2311,8 @@ static void mlx5_vdpa_free(struct vdpa_device *vdev)
}
mlx5_vdpa_free_resources(&ndev->mvdev);
mutex_destroy(&ndev->reslock);
+ kfree(ndev->event_cbs);
+ kfree(ndev->vqs);
}
static struct vdpa_notification_area mlx5_get_vq_notification(struct vdpa_device *vdev, u16 idx)
@@ -2339,6 +2344,13 @@ static int mlx5_get_vq_irq(struct vdpa_device *vdv, u16 idx)
return -EOPNOTSUPP;
}
+static u64 mlx5_vdpa_get_driver_features(struct vdpa_device *vdev)
+{
+ struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+
+ return mvdev->actual_features;
+}
+
static const struct vdpa_config_ops mlx5_vdpa_ops = {
.set_vq_address = mlx5_vdpa_set_vq_address,
.set_vq_num = mlx5_vdpa_set_vq_num,
@@ -2351,8 +2363,9 @@ static const struct vdpa_config_ops mlx5_vdpa_ops = {
.get_vq_notification = mlx5_get_vq_notification,
.get_vq_irq = mlx5_get_vq_irq,
.get_vq_align = mlx5_vdpa_get_vq_align,
- .get_features = mlx5_vdpa_get_features,
- .set_features = mlx5_vdpa_set_features,
+ .get_device_features = mlx5_vdpa_get_device_features,
+ .set_driver_features = mlx5_vdpa_set_driver_features,
+ .get_driver_features = mlx5_vdpa_get_driver_features,
.set_config_cb = mlx5_vdpa_set_config_cb,
.get_vq_num_max = mlx5_vdpa_get_vq_num_max,
.get_device_id = mlx5_vdpa_get_device_id,
@@ -2545,18 +2558,39 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
return -EOPNOTSUPP;
}
- /* we save one virtqueue for control virtqueue should we require it */
max_vqs = MLX5_CAP_DEV_VDPA_EMULATION(mdev, max_num_virtio_queues);
- max_vqs = min_t(u32, max_vqs, MLX5_MAX_SUPPORTED_VQS);
+ if (max_vqs < 2) {
+ dev_warn(mdev->device,
+ "%d virtqueues are supported. At least 2 are required\n",
+ max_vqs);
+ return -EAGAIN;
+ }
+
+ if (add_config->mask & BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MAX_VQP)) {
+ if (add_config->net.max_vq_pairs > max_vqs / 2)
+ return -EINVAL;
+ max_vqs = min_t(u32, max_vqs, 2 * add_config->net.max_vq_pairs);
+ } else {
+ max_vqs = 2;
+ }
ndev = vdpa_alloc_device(struct mlx5_vdpa_net, mvdev.vdev, mdev->device, &mlx5_vdpa_ops,
name, false);
if (IS_ERR(ndev))
return PTR_ERR(ndev);
+ ndev->mvdev.mlx_features = mgtdev->mgtdev.supported_features;
ndev->mvdev.max_vqs = max_vqs;
mvdev = &ndev->mvdev;
mvdev->mdev = mdev;
+
+ ndev->vqs = kcalloc(max_vqs, sizeof(*ndev->vqs), GFP_KERNEL);
+ ndev->event_cbs = kcalloc(max_vqs + 1, sizeof(*ndev->event_cbs), GFP_KERNEL);
+ if (!ndev->vqs || !ndev->event_cbs) {
+ err = -ENOMEM;
+ goto err_alloc;
+ }
+
init_mvqs(ndev);
mutex_init(&ndev->reslock);
config = &ndev->config;
@@ -2612,9 +2646,8 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
ndev->nb.notifier_call = event_handler;
mlx5_notifier_register(mdev, &ndev->nb);
- ndev->cur_num_vqs = 2 * mlx5_vdpa_max_qps(max_vqs);
mvdev->vdev.mdev = &mgtdev->mgtdev;
- err = _vdpa_register_device(&mvdev->vdev, ndev->cur_num_vqs + 1);
+ err = _vdpa_register_device(&mvdev->vdev, 2 * mlx5_vdpa_max_qps(max_vqs) + 1);
if (err)
goto err_reg;
@@ -2634,6 +2667,7 @@ err_mpfs:
mlx5_mpfs_del_mac(pfmdev, config->mac);
err_mtu:
mutex_destroy(&ndev->reslock);
+err_alloc:
put_device(&mvdev->vdev.dev);
return err;
}
@@ -2676,14 +2710,18 @@ static int mlx5v_probe(struct auxiliary_device *adev,
mgtdev->mgtdev.ops = &mdev_ops;
mgtdev->mgtdev.device = mdev->device;
mgtdev->mgtdev.id_table = id_table;
- mgtdev->mgtdev.config_attr_mask = (1 << VDPA_ATTR_DEV_NET_CFG_MACADDR);
+ mgtdev->mgtdev.config_attr_mask = BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MACADDR) |
+ BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MAX_VQP);
+ mgtdev->mgtdev.max_supported_vqs =
+ MLX5_CAP_DEV_VDPA_EMULATION(mdev, max_num_virtio_queues) + 1;
+ mgtdev->mgtdev.supported_features = get_supported_features(mdev);
mgtdev->madev = madev;
err = vdpa_mgmtdev_register(&mgtdev->mgtdev);
if (err)
goto reg_err;
- dev_set_drvdata(&adev->dev, mgtdev);
+ auxiliary_set_drvdata(adev, mgtdev);
return 0;
@@ -2696,7 +2734,7 @@ static void mlx5v_remove(struct auxiliary_device *adev)
{
struct mlx5_vdpa_mgmtdev *mgtdev;
- mgtdev = dev_get_drvdata(&adev->dev);
+ mgtdev = auxiliary_get_drvdata(adev);
vdpa_mgmtdev_unregister(&mgtdev->mgtdev);
kfree(mgtdev);
}
diff --git a/drivers/vdpa/vdpa.c b/drivers/vdpa/vdpa.c
index 09bbe53c3ac4..9846c9de4bfa 100644
--- a/drivers/vdpa/vdpa.c
+++ b/drivers/vdpa/vdpa.c
@@ -21,6 +21,14 @@ static LIST_HEAD(mdev_head);
static DEFINE_MUTEX(vdpa_dev_mutex);
static DEFINE_IDA(vdpa_index_ida);
+void vdpa_set_status(struct vdpa_device *vdev, u8 status)
+{
+ mutex_lock(&vdev->cf_mutex);
+ vdev->config->set_status(vdev, status);
+ mutex_unlock(&vdev->cf_mutex);
+}
+EXPORT_SYMBOL(vdpa_set_status);
+
static struct genl_family vdpa_nl_family;
static int vdpa_dev_probe(struct device *d)
@@ -52,8 +60,81 @@ static void vdpa_dev_remove(struct device *d)
drv->remove(vdev);
}
+static int vdpa_dev_match(struct device *dev, struct device_driver *drv)
+{
+ struct vdpa_device *vdev = dev_to_vdpa(dev);
+
+ /* Check override first, and if set, only use the named driver */
+ if (vdev->driver_override)
+ return strcmp(vdev->driver_override, drv->name) == 0;
+
+ /* Currently devices must be supported by all vDPA bus drivers */
+ return 1;
+}
+
+static ssize_t driver_override_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct vdpa_device *vdev = dev_to_vdpa(dev);
+ const char *driver_override, *old;
+ char *cp;
+
+ /* We need to keep extra room for a newline */
+ if (count >= (PAGE_SIZE - 1))
+ return -EINVAL;
+
+ driver_override = kstrndup(buf, count, GFP_KERNEL);
+ if (!driver_override)
+ return -ENOMEM;
+
+ cp = strchr(driver_override, '\n');
+ if (cp)
+ *cp = '\0';
+
+ device_lock(dev);
+ old = vdev->driver_override;
+ if (strlen(driver_override)) {
+ vdev->driver_override = driver_override;
+ } else {
+ kfree(driver_override);
+ vdev->driver_override = NULL;
+ }
+ device_unlock(dev);
+
+ kfree(old);
+
+ return count;
+}
+
+static ssize_t driver_override_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct vdpa_device *vdev = dev_to_vdpa(dev);
+ ssize_t len;
+
+ device_lock(dev);
+ len = snprintf(buf, PAGE_SIZE, "%s\n", vdev->driver_override);
+ device_unlock(dev);
+
+ return len;
+}
+static DEVICE_ATTR_RW(driver_override);
+
+static struct attribute *vdpa_dev_attrs[] = {
+ &dev_attr_driver_override.attr,
+ NULL,
+};
+
+static const struct attribute_group vdpa_dev_group = {
+ .attrs = vdpa_dev_attrs,
+};
+__ATTRIBUTE_GROUPS(vdpa_dev);
+
static struct bus_type vdpa_bus = {
.name = "vdpa",
+ .dev_groups = vdpa_dev_groups,
+ .match = vdpa_dev_match,
.probe = vdpa_dev_probe,
.remove = vdpa_dev_remove,
};
@@ -68,6 +149,7 @@ static void vdpa_release_dev(struct device *d)
ida_simple_remove(&vdpa_index_ida, vdev->index);
mutex_destroy(&vdev->cf_mutex);
+ kfree(vdev->driver_override);
kfree(vdev);
}
@@ -300,6 +382,21 @@ void vdpa_mgmtdev_unregister(struct vdpa_mgmt_dev *mdev)
}
EXPORT_SYMBOL_GPL(vdpa_mgmtdev_unregister);
+static void vdpa_get_config_unlocked(struct vdpa_device *vdev,
+ unsigned int offset,
+ void *buf, unsigned int len)
+{
+ const struct vdpa_config_ops *ops = vdev->config;
+
+ /*
+ * Config accesses aren't supposed to trigger before features are set.
+ * If it does happen we assume a legacy guest.
+ */
+ if (!vdev->features_valid)
+ vdpa_set_features(vdev, 0, true);
+ ops->get_config(vdev, offset, buf, len);
+}
+
/**
* vdpa_get_config - Get one or more device configuration fields.
* @vdev: vdpa device to operate on
@@ -310,16 +407,8 @@ EXPORT_SYMBOL_GPL(vdpa_mgmtdev_unregister);
void vdpa_get_config(struct vdpa_device *vdev, unsigned int offset,
void *buf, unsigned int len)
{
- const struct vdpa_config_ops *ops = vdev->config;
-
mutex_lock(&vdev->cf_mutex);
- /*
- * Config accesses aren't supposed to trigger before features are set.
- * If it does happen we assume a legacy guest.
- */
- if (!vdev->features_valid)
- vdpa_set_features(vdev, 0);
- ops->get_config(vdev, offset, buf, len);
+ vdpa_get_config_unlocked(vdev, offset, buf, len);
mutex_unlock(&vdev->cf_mutex);
}
EXPORT_SYMBOL_GPL(vdpa_get_config);
@@ -414,6 +503,16 @@ static int vdpa_mgmtdev_fill(const struct vdpa_mgmt_dev *mdev, struct sk_buff *m
err = -EMSGSIZE;
goto msg_err;
}
+ if (nla_put_u32(msg, VDPA_ATTR_DEV_MGMTDEV_MAX_VQS,
+ mdev->max_supported_vqs)) {
+ err = -EMSGSIZE;
+ goto msg_err;
+ }
+ if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_SUPPORTED_FEATURES,
+ mdev->supported_features, VDPA_ATTR_PAD)) {
+ err = -EMSGSIZE;
+ goto msg_err;
+ }
genlmsg_end(msg, hdr);
return 0;
@@ -480,8 +579,9 @@ out:
return msg->len;
}
-#define VDPA_DEV_NET_ATTRS_MASK ((1 << VDPA_ATTR_DEV_NET_CFG_MACADDR) | \
- (1 << VDPA_ATTR_DEV_NET_CFG_MTU))
+#define VDPA_DEV_NET_ATTRS_MASK (BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MACADDR) | \
+ BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MTU) | \
+ BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MAX_VQP))
static int vdpa_nl_cmd_dev_add_set_doit(struct sk_buff *skb, struct genl_info *info)
{
@@ -500,12 +600,22 @@ static int vdpa_nl_cmd_dev_add_set_doit(struct sk_buff *skb, struct genl_info *i
if (nl_attrs[VDPA_ATTR_DEV_NET_CFG_MACADDR]) {
macaddr = nla_data(nl_attrs[VDPA_ATTR_DEV_NET_CFG_MACADDR]);
memcpy(config.net.mac, macaddr, sizeof(config.net.mac));
- config.mask |= (1 << VDPA_ATTR_DEV_NET_CFG_MACADDR);
+ config.mask |= BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MACADDR);
}
if (nl_attrs[VDPA_ATTR_DEV_NET_CFG_MTU]) {
config.net.mtu =
nla_get_u16(nl_attrs[VDPA_ATTR_DEV_NET_CFG_MTU]);
- config.mask |= (1 << VDPA_ATTR_DEV_NET_CFG_MTU);
+ config.mask |= BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MTU);
+ }
+ if (nl_attrs[VDPA_ATTR_DEV_NET_CFG_MAX_VQP]) {
+ config.net.max_vq_pairs =
+ nla_get_u16(nl_attrs[VDPA_ATTR_DEV_NET_CFG_MAX_VQP]);
+ if (!config.net.max_vq_pairs) {
+ NL_SET_ERR_MSG_MOD(info->extack,
+ "At least one pair of VQs is required");
+ return -EINVAL;
+ }
+ config.mask |= BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MAX_VQP);
}
/* Skip checking capability if user didn't prefer to configure any
@@ -707,7 +817,7 @@ static int vdpa_dev_net_mq_config_fill(struct vdpa_device *vdev,
{
u16 val_u16;
- if ((features & (1ULL << VIRTIO_NET_F_MQ)) == 0)
+ if ((features & BIT_ULL(VIRTIO_NET_F_MQ)) == 0)
return 0;
val_u16 = le16_to_cpu(config->max_virtqueue_pairs);
@@ -720,7 +830,7 @@ static int vdpa_dev_net_config_fill(struct vdpa_device *vdev, struct sk_buff *ms
u64 features;
u16 val_u16;
- vdpa_get_config(vdev, 0, &config, sizeof(config));
+ vdpa_get_config_unlocked(vdev, 0, &config, sizeof(config));
if (nla_put(msg, VDPA_ATTR_DEV_NET_CFG_MACADDR, sizeof(config.mac),
config.mac))
@@ -734,7 +844,10 @@ static int vdpa_dev_net_config_fill(struct vdpa_device *vdev, struct sk_buff *ms
if (nla_put_u16(msg, VDPA_ATTR_DEV_NET_CFG_MTU, val_u16))
return -EMSGSIZE;
- features = vdev->config->get_features(vdev);
+ features = vdev->config->get_driver_features(vdev);
+ if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_NEGOTIATED_FEATURES, features,
+ VDPA_ATTR_PAD))
+ return -EMSGSIZE;
return vdpa_dev_net_mq_config_fill(vdev, msg, features, &config);
}
@@ -745,12 +858,23 @@ vdpa_dev_config_fill(struct vdpa_device *vdev, struct sk_buff *msg, u32 portid,
{
u32 device_id;
void *hdr;
+ u8 status;
int err;
+ mutex_lock(&vdev->cf_mutex);
+ status = vdev->config->get_status(vdev);
+ if (!(status & VIRTIO_CONFIG_S_FEATURES_OK)) {
+ NL_SET_ERR_MSG_MOD(extack, "Features negotiation not completed");
+ err = -EAGAIN;
+ goto out;
+ }
+
hdr = genlmsg_put(msg, portid, seq, &vdpa_nl_family, flags,
VDPA_CMD_DEV_CONFIG_GET);
- if (!hdr)
- return -EMSGSIZE;
+ if (!hdr) {
+ err = -EMSGSIZE;
+ goto out;
+ }
if (nla_put_string(msg, VDPA_ATTR_DEV_NAME, dev_name(&vdev->dev))) {
err = -EMSGSIZE;
@@ -774,11 +898,14 @@ vdpa_dev_config_fill(struct vdpa_device *vdev, struct sk_buff *msg, u32 portid,
if (err)
goto msg_err;
+ mutex_unlock(&vdev->cf_mutex);
genlmsg_end(msg, hdr);
return 0;
msg_err:
genlmsg_cancel(msg, hdr);
+out:
+ mutex_unlock(&vdev->cf_mutex);
return err;
}
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c
index 41b0cd17fcba..ddbe142af09a 100644
--- a/drivers/vdpa/vdpa_sim/vdpa_sim.c
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c
@@ -399,14 +399,14 @@ static u32 vdpasim_get_vq_align(struct vdpa_device *vdpa)
return VDPASIM_QUEUE_ALIGN;
}
-static u64 vdpasim_get_features(struct vdpa_device *vdpa)
+static u64 vdpasim_get_device_features(struct vdpa_device *vdpa)
{
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
return vdpasim->dev_attr.supported_features;
}
-static int vdpasim_set_features(struct vdpa_device *vdpa, u64 features)
+static int vdpasim_set_driver_features(struct vdpa_device *vdpa, u64 features)
{
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
@@ -419,6 +419,13 @@ static int vdpasim_set_features(struct vdpa_device *vdpa, u64 features)
return 0;
}
+static u64 vdpasim_get_driver_features(struct vdpa_device *vdpa)
+{
+ struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
+
+ return vdpasim->features;
+}
+
static void vdpasim_set_config_cb(struct vdpa_device *vdpa,
struct vdpa_callback *cb)
{
@@ -613,8 +620,9 @@ static const struct vdpa_config_ops vdpasim_config_ops = {
.set_vq_state = vdpasim_set_vq_state,
.get_vq_state = vdpasim_get_vq_state,
.get_vq_align = vdpasim_get_vq_align,
- .get_features = vdpasim_get_features,
- .set_features = vdpasim_set_features,
+ .get_device_features = vdpasim_get_device_features,
+ .set_driver_features = vdpasim_set_driver_features,
+ .get_driver_features = vdpasim_get_driver_features,
.set_config_cb = vdpasim_set_config_cb,
.get_vq_num_max = vdpasim_get_vq_num_max,
.get_device_id = vdpasim_get_device_id,
@@ -642,8 +650,9 @@ static const struct vdpa_config_ops vdpasim_batch_config_ops = {
.set_vq_state = vdpasim_set_vq_state,
.get_vq_state = vdpasim_get_vq_state,
.get_vq_align = vdpasim_get_vq_align,
- .get_features = vdpasim_get_features,
- .set_features = vdpasim_set_features,
+ .get_device_features = vdpasim_get_device_features,
+ .set_driver_features = vdpasim_set_driver_features,
+ .get_driver_features = vdpasim_get_driver_features,
.set_config_cb = vdpasim_set_config_cb,
.get_vq_num_max = vdpasim_get_vq_num_max,
.get_device_id = vdpasim_get_device_id,
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim_net.c b/drivers/vdpa/vdpa_sim/vdpa_sim_net.c
index 76dd24abc791..d5324f6fd8c7 100644
--- a/drivers/vdpa/vdpa_sim/vdpa_sim_net.c
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim_net.c
@@ -191,6 +191,8 @@ static struct vdpa_mgmt_dev mgmt_dev = {
.ops = &vdpasim_net_mgmtdev_ops,
.config_attr_mask = (1 << VDPA_ATTR_DEV_NET_CFG_MACADDR |
1 << VDPA_ATTR_DEV_NET_CFG_MTU),
+ .max_supported_vqs = VDPASIM_NET_VQ_NUM,
+ .supported_features = VDPASIM_NET_FEATURES,
};
static int __init vdpasim_net_init(void)
diff --git a/drivers/vdpa/vdpa_user/iova_domain.c b/drivers/vdpa/vdpa_user/iova_domain.c
index 1daae2608860..2b1143f11d8f 100644
--- a/drivers/vdpa/vdpa_user/iova_domain.c
+++ b/drivers/vdpa/vdpa_user/iova_domain.c
@@ -292,14 +292,6 @@ vduse_domain_alloc_iova(struct iova_domain *iovad,
unsigned long iova_len = iova_align(iovad, size) >> shift;
unsigned long iova_pfn;
- /*
- * Freeing non-power-of-two-sized allocations back into the IOVA caches
- * will come back to bite us badly, so we have to waste a bit of space
- * rounding up anything cacheable to make sure that can't happen. The
- * order of the unadjusted size will still match upon freeing.
- */
- if (iova_len < (1 << (IOVA_RANGE_CACHE_MAX_SIZE - 1)))
- iova_len = roundup_pow_of_two(iova_len);
iova_pfn = alloc_iova_fast(iovad, iova_len, limit >> shift, true);
return iova_pfn << shift;
diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c
index eddcb64a910a..f85d1a08ed87 100644
--- a/drivers/vdpa/vdpa_user/vduse_dev.c
+++ b/drivers/vdpa/vdpa_user/vduse_dev.c
@@ -573,14 +573,14 @@ static u32 vduse_vdpa_get_vq_align(struct vdpa_device *vdpa)
return dev->vq_align;
}
-static u64 vduse_vdpa_get_features(struct vdpa_device *vdpa)
+static u64 vduse_vdpa_get_device_features(struct vdpa_device *vdpa)
{
struct vduse_dev *dev = vdpa_to_vduse(vdpa);
return dev->device_features;
}
-static int vduse_vdpa_set_features(struct vdpa_device *vdpa, u64 features)
+static int vduse_vdpa_set_driver_features(struct vdpa_device *vdpa, u64 features)
{
struct vduse_dev *dev = vdpa_to_vduse(vdpa);
@@ -588,6 +588,13 @@ static int vduse_vdpa_set_features(struct vdpa_device *vdpa, u64 features)
return 0;
}
+static u64 vduse_vdpa_get_driver_features(struct vdpa_device *vdpa)
+{
+ struct vduse_dev *dev = vdpa_to_vduse(vdpa);
+
+ return dev->driver_features;
+}
+
static void vduse_vdpa_set_config_cb(struct vdpa_device *vdpa,
struct vdpa_callback *cb)
{
@@ -721,8 +728,9 @@ static const struct vdpa_config_ops vduse_vdpa_config_ops = {
.set_vq_state = vduse_vdpa_set_vq_state,
.get_vq_state = vduse_vdpa_get_vq_state,
.get_vq_align = vduse_vdpa_get_vq_align,
- .get_features = vduse_vdpa_get_features,
- .set_features = vduse_vdpa_set_features,
+ .get_device_features = vduse_vdpa_get_device_features,
+ .set_driver_features = vduse_vdpa_set_driver_features,
+ .get_driver_features = vduse_vdpa_get_driver_features,
.set_config_cb = vduse_vdpa_set_config_cb,
.get_vq_num_max = vduse_vdpa_get_vq_num_max,
.get_device_id = vduse_vdpa_get_device_id,
@@ -1357,7 +1365,6 @@ err_domain:
err_str:
vduse_dev_destroy(dev);
err:
- kvfree(config_buf);
return ret;
}
@@ -1408,6 +1415,8 @@ static long vduse_ioctl(struct file *file, unsigned int cmd,
}
config.name[VDUSE_NAME_MAX - 1] = '\0';
ret = vduse_create_dev(&config, buf, control->api_version);
+ if (ret)
+ kvfree(buf);
break;
}
case VDUSE_DESTROY_DEV: {
diff --git a/drivers/vdpa/virtio_pci/vp_vdpa.c b/drivers/vdpa/virtio_pci/vp_vdpa.c
index e3ff7875e123..a57e381e830b 100644
--- a/drivers/vdpa/virtio_pci/vp_vdpa.c
+++ b/drivers/vdpa/virtio_pci/vp_vdpa.c
@@ -53,14 +53,14 @@ static struct virtio_pci_modern_device *vdpa_to_mdev(struct vdpa_device *vdpa)
return &vp_vdpa->mdev;
}
-static u64 vp_vdpa_get_features(struct vdpa_device *vdpa)
+static u64 vp_vdpa_get_device_features(struct vdpa_device *vdpa)
{
struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
return vp_modern_get_features(mdev);
}
-static int vp_vdpa_set_features(struct vdpa_device *vdpa, u64 features)
+static int vp_vdpa_set_driver_features(struct vdpa_device *vdpa, u64 features)
{
struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
@@ -69,6 +69,13 @@ static int vp_vdpa_set_features(struct vdpa_device *vdpa, u64 features)
return 0;
}
+static u64 vp_vdpa_get_driver_features(struct vdpa_device *vdpa)
+{
+ struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
+
+ return vp_modern_get_driver_features(mdev);
+}
+
static u8 vp_vdpa_get_status(struct vdpa_device *vdpa)
{
struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
@@ -415,8 +422,9 @@ vp_vdpa_get_vq_notification(struct vdpa_device *vdpa, u16 qid)
}
static const struct vdpa_config_ops vp_vdpa_ops = {
- .get_features = vp_vdpa_get_features,
- .set_features = vp_vdpa_set_features,
+ .get_device_features = vp_vdpa_get_device_features,
+ .set_driver_features = vp_vdpa_set_driver_features,
+ .get_driver_features = vp_vdpa_get_driver_features,
.get_status = vp_vdpa_get_status,
.set_status = vp_vdpa_set_status,
.reset = vp_vdpa_reset,
diff --git a/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c b/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c
index 77e584093a23..7b428eac3d3e 100644
--- a/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c
+++ b/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c
@@ -67,7 +67,7 @@ static int vfio_set_trigger(struct vfio_fsl_mc_device *vdev,
int hwirq;
int ret;
- hwirq = vdev->mc_dev->irqs[index]->msi_desc->irq;
+ hwirq = vdev->mc_dev->irqs[index]->virq;
if (irq->trigger) {
free_irq(hwirq, irq);
kfree(irq->name);
@@ -137,7 +137,7 @@ static int vfio_fsl_mc_set_irq_trigger(struct vfio_fsl_mc_device *vdev,
return vfio_set_trigger(vdev, index, fd);
}
- hwirq = vdev->mc_dev->irqs[index]->msi_desc->irq;
+ hwirq = vdev->mc_dev->irqs[index]->virq;
irq = &vdev->mc_irqs[index];
diff --git a/drivers/vfio/pci/vfio_pci_igd.c b/drivers/vfio/pci/vfio_pci_igd.c
index 362f91ec8845..352c725ccf18 100644
--- a/drivers/vfio/pci/vfio_pci_igd.c
+++ b/drivers/vfio/pci/vfio_pci_igd.c
@@ -309,13 +309,14 @@ static ssize_t vfio_pci_igd_cfg_rw(struct vfio_pci_core_device *vdev,
if ((pos & 3) && size > 2) {
u16 val;
+ __le16 lval;
ret = pci_user_read_config_word(pdev, pos, &val);
if (ret)
return ret;
- val = cpu_to_le16(val);
- if (copy_to_user(buf + count - size, &val, 2))
+ lval = cpu_to_le16(val);
+ if (copy_to_user(buf + count - size, &lval, 2))
return -EFAULT;
pos += 2;
@@ -324,13 +325,14 @@ static ssize_t vfio_pci_igd_cfg_rw(struct vfio_pci_core_device *vdev,
while (size > 3) {
u32 val;
+ __le32 lval;
ret = pci_user_read_config_dword(pdev, pos, &val);
if (ret)
return ret;
- val = cpu_to_le32(val);
- if (copy_to_user(buf + count - size, &val, 4))
+ lval = cpu_to_le32(val);
+ if (copy_to_user(buf + count - size, &lval, 4))
return -EFAULT;
pos += 4;
@@ -339,13 +341,14 @@ static ssize_t vfio_pci_igd_cfg_rw(struct vfio_pci_core_device *vdev,
while (size >= 2) {
u16 val;
+ __le16 lval;
ret = pci_user_read_config_word(pdev, pos, &val);
if (ret)
return ret;
- val = cpu_to_le16(val);
- if (copy_to_user(buf + count - size, &val, 2))
+ lval = cpu_to_le16(val);
+ if (copy_to_user(buf + count - size, &lval, 2))
return -EFAULT;
pos += 2;
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index f17490ab238f..9394aa9444c1 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -256,7 +256,7 @@ static int vfio_dma_bitmap_alloc(struct vfio_dma *dma, size_t pgsize)
static void vfio_dma_bitmap_free(struct vfio_dma *dma)
{
- kfree(dma->bitmap);
+ kvfree(dma->bitmap);
dma->bitmap = NULL;
}
diff --git a/drivers/vhost/test.c b/drivers/vhost/test.c
index a09dedc79f68..05740cba1cd8 100644
--- a/drivers/vhost/test.c
+++ b/drivers/vhost/test.c
@@ -166,6 +166,7 @@ static int vhost_test_release(struct inode *inode, struct file *f)
/* We do an extra flush before freeing memory,
* since jobs can re-queue themselves. */
vhost_test_flush(n);
+ kfree(n->dev.vqs);
kfree(n);
return 0;
}
diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
index e3c4f059b21a..851539807bc9 100644
--- a/drivers/vhost/vdpa.c
+++ b/drivers/vhost/vdpa.c
@@ -170,7 +170,7 @@ static long vhost_vdpa_set_status(struct vhost_vdpa *v, u8 __user *statusp)
* Userspace shouldn't remove status bits unless reset the
* status to 0.
*/
- if (status != 0 && (ops->get_status(vdpa) & ~status) != 0)
+ if (status != 0 && (status_old & ~status) != 0)
return -EINVAL;
if ((status_old & VIRTIO_CONFIG_S_DRIVER_OK) && !(status & VIRTIO_CONFIG_S_DRIVER_OK))
@@ -178,11 +178,11 @@ static long vhost_vdpa_set_status(struct vhost_vdpa *v, u8 __user *statusp)
vhost_vdpa_unsetup_vq_irq(v, i);
if (status == 0) {
- ret = ops->reset(vdpa);
+ ret = vdpa_reset(vdpa);
if (ret)
return ret;
} else
- ops->set_status(vdpa, status);
+ vdpa_set_status(vdpa, status);
if ((status & VIRTIO_CONFIG_S_DRIVER_OK) && !(status_old & VIRTIO_CONFIG_S_DRIVER_OK))
for (i = 0; i < nvqs; i++)
@@ -195,7 +195,7 @@ static int vhost_vdpa_config_validate(struct vhost_vdpa *v,
struct vhost_vdpa_config *c)
{
struct vdpa_device *vdpa = v->vdpa;
- long size = vdpa->config->get_config_size(vdpa);
+ size_t size = vdpa->config->get_config_size(vdpa);
if (c->len == 0 || c->off > size)
return -EINVAL;
@@ -262,7 +262,7 @@ static long vhost_vdpa_get_features(struct vhost_vdpa *v, u64 __user *featurep)
const struct vdpa_config_ops *ops = vdpa->config;
u64 features;
- features = ops->get_features(vdpa);
+ features = ops->get_device_features(vdpa);
if (copy_to_user(featurep, &features, sizeof(features)))
return -EFAULT;
@@ -286,7 +286,7 @@ static long vhost_vdpa_set_features(struct vhost_vdpa *v, u64 __user *featurep)
if (copy_from_user(&features, featurep, sizeof(features)))
return -EFAULT;
- if (vdpa_set_features(vdpa, features))
+ if (vdpa_set_features(vdpa, features, false))
return -EINVAL;
return 0;
diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig
index 840d9813b0bc..fcc46380e7c9 100644
--- a/drivers/video/console/Kconfig
+++ b/drivers/video/console/Kconfig
@@ -78,6 +78,26 @@ config FRAMEBUFFER_CONSOLE
help
Low-level framebuffer-based console driver.
+config FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION
+ bool "Enable legacy fbcon hardware acceleration code"
+ depends on FRAMEBUFFER_CONSOLE
+ default y if PARISC
+ default n
+ help
+ This option enables the fbcon (framebuffer text-based) hardware
+ acceleration for graphics drivers which were written for the fbdev
+ graphics interface.
+
+ On modern machines, on mainstream machines (like x86-64) or when
+ using a modern Linux distribution those fbdev drivers usually aren't used.
+ So enabling this option wouldn't have any effect, which is why you want
+ to disable this option on such newer machines.
+
+ If you compile this kernel for older machines which still require the
+ fbdev drivers, you may want to say Y.
+
+ If unsure, select n.
+
config FRAMEBUFFER_CONSOLE_DETECT_PRIMARY
bool "Map the console to the primary display device"
depends on FRAMEBUFFER_CONSOLE
diff --git a/drivers/video/fbdev/core/bitblit.c b/drivers/video/fbdev/core/bitblit.c
index 01fae2c96965..f98e8f298bc1 100644
--- a/drivers/video/fbdev/core/bitblit.c
+++ b/drivers/video/fbdev/core/bitblit.c
@@ -43,6 +43,21 @@ static void update_attr(u8 *dst, u8 *src, int attribute,
}
}
+static void bit_bmove(struct vc_data *vc, struct fb_info *info, int sy,
+ int sx, int dy, int dx, int height, int width)
+{
+ struct fb_copyarea area;
+
+ area.sx = sx * vc->vc_font.width;
+ area.sy = sy * vc->vc_font.height;
+ area.dx = dx * vc->vc_font.width;
+ area.dy = dy * vc->vc_font.height;
+ area.height = height * vc->vc_font.height;
+ area.width = width * vc->vc_font.width;
+
+ info->fbops->fb_copyarea(info, &area);
+}
+
static void bit_clear(struct vc_data *vc, struct fb_info *info, int sy,
int sx, int height, int width)
{
@@ -378,6 +393,7 @@ static int bit_update_start(struct fb_info *info)
void fbcon_set_bitops(struct fbcon_ops *ops)
{
+ ops->bmove = bit_bmove;
ops->clear = bit_clear;
ops->putcs = bit_putcs;
ops->clear_margins = bit_clear_margins;
diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
index 99ecd9a6d844..2fc1b80a26ad 100644
--- a/drivers/video/fbdev/core/fbcon.c
+++ b/drivers/video/fbdev/core/fbcon.c
@@ -173,6 +173,8 @@ static void fbcon_putcs(struct vc_data *vc, const unsigned short *s,
int count, int ypos, int xpos);
static void fbcon_clear_margins(struct vc_data *vc, int bottom_only);
static void fbcon_cursor(struct vc_data *vc, int mode);
+static void fbcon_bmove(struct vc_data *vc, int sy, int sx, int dy, int dx,
+ int height, int width);
static int fbcon_switch(struct vc_data *vc);
static int fbcon_blank(struct vc_data *vc, int blank, int mode_switch);
static void fbcon_set_palette(struct vc_data *vc, const unsigned char *table);
@@ -180,8 +182,16 @@ static void fbcon_set_palette(struct vc_data *vc, const unsigned char *table);
/*
* Internal routines
*/
+static __inline__ void ywrap_up(struct vc_data *vc, int count);
+static __inline__ void ywrap_down(struct vc_data *vc, int count);
+static __inline__ void ypan_up(struct vc_data *vc, int count);
+static __inline__ void ypan_down(struct vc_data *vc, int count);
+static void fbcon_bmove_rec(struct vc_data *vc, struct fbcon_display *p, int sy, int sx,
+ int dy, int dx, int height, int width, u_int y_break);
static void fbcon_set_disp(struct fb_info *info, struct fb_var_screeninfo *var,
int unit);
+static void fbcon_redraw_move(struct vc_data *vc, struct fbcon_display *p,
+ int line, int count, int dy);
static void fbcon_modechanged(struct fb_info *info);
static void fbcon_set_all_vcs(struct fb_info *info);
static void fbcon_start(void);
@@ -1125,6 +1135,14 @@ static void fbcon_init(struct vc_data *vc, int init)
ops->graphics = 0;
+#ifdef CONFIG_FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION
+ if ((info->flags & FBINFO_HWACCEL_COPYAREA) &&
+ !(info->flags & FBINFO_HWACCEL_DISABLED))
+ p->scrollmode = SCROLL_MOVE;
+ else /* default to something safe */
+ p->scrollmode = SCROLL_REDRAW;
+#endif
+
/*
* ++guenther: console.c:vc_allocate() relies on initializing
* vc_{cols,rows}, but we must not set those if we are only
@@ -1211,13 +1229,14 @@ finished:
* This system is now divided into two levels because of complications
* caused by hardware scrolling. Top level functions:
*
- * fbcon_clear(), fbcon_putc(), fbcon_clear_margins()
+ * fbcon_bmove(), fbcon_clear(), fbcon_putc(), fbcon_clear_margins()
*
* handles y values in range [0, scr_height-1] that correspond to real
* screen positions. y_wrap shift means that first line of bitmap may be
* anywhere on this display. These functions convert lineoffsets to
* bitmap offsets and deal with the wrap-around case by splitting blits.
*
+ * fbcon_bmove_physical_8() -- These functions fast implementations
* fbcon_clear_physical_8() -- of original fbcon_XXX fns.
* fbcon_putc_physical_8() -- (font width != 8) may be added later
*
@@ -1390,6 +1409,224 @@ static void fbcon_set_disp(struct fb_info *info, struct fb_var_screeninfo *var,
}
}
+static __inline__ void ywrap_up(struct vc_data *vc, int count)
+{
+ struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
+ struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_display *p = &fb_display[vc->vc_num];
+
+ p->yscroll += count;
+ if (p->yscroll >= p->vrows) /* Deal with wrap */
+ p->yscroll -= p->vrows;
+ ops->var.xoffset = 0;
+ ops->var.yoffset = p->yscroll * vc->vc_font.height;
+ ops->var.vmode |= FB_VMODE_YWRAP;
+ ops->update_start(info);
+ scrollback_max += count;
+ if (scrollback_max > scrollback_phys_max)
+ scrollback_max = scrollback_phys_max;
+ scrollback_current = 0;
+}
+
+static __inline__ void ywrap_down(struct vc_data *vc, int count)
+{
+ struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
+ struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_display *p = &fb_display[vc->vc_num];
+
+ p->yscroll -= count;
+ if (p->yscroll < 0) /* Deal with wrap */
+ p->yscroll += p->vrows;
+ ops->var.xoffset = 0;
+ ops->var.yoffset = p->yscroll * vc->vc_font.height;
+ ops->var.vmode |= FB_VMODE_YWRAP;
+ ops->update_start(info);
+ scrollback_max -= count;
+ if (scrollback_max < 0)
+ scrollback_max = 0;
+ scrollback_current = 0;
+}
+
+static __inline__ void ypan_up(struct vc_data *vc, int count)
+{
+ struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
+ struct fbcon_display *p = &fb_display[vc->vc_num];
+ struct fbcon_ops *ops = info->fbcon_par;
+
+ p->yscroll += count;
+ if (p->yscroll > p->vrows - vc->vc_rows) {
+ ops->bmove(vc, info, p->vrows - vc->vc_rows,
+ 0, 0, 0, vc->vc_rows, vc->vc_cols);
+ p->yscroll -= p->vrows - vc->vc_rows;
+ }
+
+ ops->var.xoffset = 0;
+ ops->var.yoffset = p->yscroll * vc->vc_font.height;
+ ops->var.vmode &= ~FB_VMODE_YWRAP;
+ ops->update_start(info);
+ fbcon_clear_margins(vc, 1);
+ scrollback_max += count;
+ if (scrollback_max > scrollback_phys_max)
+ scrollback_max = scrollback_phys_max;
+ scrollback_current = 0;
+}
+
+static __inline__ void ypan_up_redraw(struct vc_data *vc, int t, int count)
+{
+ struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
+ struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_display *p = &fb_display[vc->vc_num];
+
+ p->yscroll += count;
+
+ if (p->yscroll > p->vrows - vc->vc_rows) {
+ p->yscroll -= p->vrows - vc->vc_rows;
+ fbcon_redraw_move(vc, p, t + count, vc->vc_rows - count, t);
+ }
+
+ ops->var.xoffset = 0;
+ ops->var.yoffset = p->yscroll * vc->vc_font.height;
+ ops->var.vmode &= ~FB_VMODE_YWRAP;
+ ops->update_start(info);
+ fbcon_clear_margins(vc, 1);
+ scrollback_max += count;
+ if (scrollback_max > scrollback_phys_max)
+ scrollback_max = scrollback_phys_max;
+ scrollback_current = 0;
+}
+
+static __inline__ void ypan_down(struct vc_data *vc, int count)
+{
+ struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
+ struct fbcon_display *p = &fb_display[vc->vc_num];
+ struct fbcon_ops *ops = info->fbcon_par;
+
+ p->yscroll -= count;
+ if (p->yscroll < 0) {
+ ops->bmove(vc, info, 0, 0, p->vrows - vc->vc_rows,
+ 0, vc->vc_rows, vc->vc_cols);
+ p->yscroll += p->vrows - vc->vc_rows;
+ }
+
+ ops->var.xoffset = 0;
+ ops->var.yoffset = p->yscroll * vc->vc_font.height;
+ ops->var.vmode &= ~FB_VMODE_YWRAP;
+ ops->update_start(info);
+ fbcon_clear_margins(vc, 1);
+ scrollback_max -= count;
+ if (scrollback_max < 0)
+ scrollback_max = 0;
+ scrollback_current = 0;
+}
+
+static __inline__ void ypan_down_redraw(struct vc_data *vc, int t, int count)
+{
+ struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
+ struct fbcon_ops *ops = info->fbcon_par;
+ struct fbcon_display *p = &fb_display[vc->vc_num];
+
+ p->yscroll -= count;
+
+ if (p->yscroll < 0) {
+ p->yscroll += p->vrows - vc->vc_rows;
+ fbcon_redraw_move(vc, p, t, vc->vc_rows - count, t + count);
+ }
+
+ ops->var.xoffset = 0;
+ ops->var.yoffset = p->yscroll * vc->vc_font.height;
+ ops->var.vmode &= ~FB_VMODE_YWRAP;
+ ops->update_start(info);
+ fbcon_clear_margins(vc, 1);
+ scrollback_max -= count;
+ if (scrollback_max < 0)
+ scrollback_max = 0;
+ scrollback_current = 0;
+}
+
+static void fbcon_redraw_move(struct vc_data *vc, struct fbcon_display *p,
+ int line, int count, int dy)
+{
+ unsigned short *s = (unsigned short *)
+ (vc->vc_origin + vc->vc_size_row * line);
+
+ while (count--) {
+ unsigned short *start = s;
+ unsigned short *le = advance_row(s, 1);
+ unsigned short c;
+ int x = 0;
+ unsigned short attr = 1;
+
+ do {
+ c = scr_readw(s);
+ if (attr != (c & 0xff00)) {
+ attr = c & 0xff00;
+ if (s > start) {
+ fbcon_putcs(vc, start, s - start,
+ dy, x);
+ x += s - start;
+ start = s;
+ }
+ }
+ console_conditional_schedule();
+ s++;
+ } while (s < le);
+ if (s > start)
+ fbcon_putcs(vc, start, s - start, dy, x);
+ console_conditional_schedule();
+ dy++;
+ }
+}
+
+static void fbcon_redraw_blit(struct vc_data *vc, struct fb_info *info,
+ struct fbcon_display *p, int line, int count, int ycount)
+{
+ int offset = ycount * vc->vc_cols;
+ unsigned short *d = (unsigned short *)
+ (vc->vc_origin + vc->vc_size_row * line);
+ unsigned short *s = d + offset;
+ struct fbcon_ops *ops = info->fbcon_par;
+
+ while (count--) {
+ unsigned short *start = s;
+ unsigned short *le = advance_row(s, 1);
+ unsigned short c;
+ int x = 0;
+
+ do {
+ c = scr_readw(s);
+
+ if (c == scr_readw(d)) {
+ if (s > start) {
+ ops->bmove(vc, info, line + ycount, x,
+ line, x, 1, s-start);
+ x += s - start + 1;
+ start = s + 1;
+ } else {
+ x++;
+ start++;
+ }
+ }
+
+ scr_writew(c, d);
+ console_conditional_schedule();
+ s++;
+ d++;
+ } while (s < le);
+ if (s > start)
+ ops->bmove(vc, info, line + ycount, x, line, x, 1,
+ s-start);
+ console_conditional_schedule();
+ if (ycount > 0)
+ line++;
+ else {
+ line--;
+ /* NOTE: We subtract two lines from these pointers */
+ s -= vc->vc_size_row;
+ d -= vc->vc_size_row;
+ }
+ }
+}
+
static void fbcon_redraw(struct vc_data *vc, struct fbcon_display *p,
int line, int count, int offset)
{
@@ -1450,6 +1687,7 @@ static bool fbcon_scroll(struct vc_data *vc, unsigned int t, unsigned int b,
{
struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
struct fbcon_display *p = &fb_display[vc->vc_num];
+ int scroll_partial = info->flags & FBINFO_PARTIAL_PAN_OK;
if (fbcon_is_inactive(vc, info))
return true;
@@ -1466,32 +1704,291 @@ static bool fbcon_scroll(struct vc_data *vc, unsigned int t, unsigned int b,
case SM_UP:
if (count > vc->vc_rows) /* Maximum realistic size */
count = vc->vc_rows;
- fbcon_redraw(vc, p, t, b - t - count,
- count * vc->vc_cols);
- fbcon_clear(vc, b - count, 0, count, vc->vc_cols);
- scr_memsetw((unsigned short *) (vc->vc_origin +
- vc->vc_size_row *
- (b - count)),
- vc->vc_video_erase_char,
- vc->vc_size_row * count);
- return true;
+ if (logo_shown >= 0)
+ goto redraw_up;
+ switch (fb_scrollmode(p)) {
+ case SCROLL_MOVE:
+ fbcon_redraw_blit(vc, info, p, t, b - t - count,
+ count);
+ fbcon_clear(vc, b - count, 0, count, vc->vc_cols);
+ scr_memsetw((unsigned short *) (vc->vc_origin +
+ vc->vc_size_row *
+ (b - count)),
+ vc->vc_video_erase_char,
+ vc->vc_size_row * count);
+ return true;
+
+ case SCROLL_WRAP_MOVE:
+ if (b - t - count > 3 * vc->vc_rows >> 2) {
+ if (t > 0)
+ fbcon_bmove(vc, 0, 0, count, 0, t,
+ vc->vc_cols);
+ ywrap_up(vc, count);
+ if (vc->vc_rows - b > 0)
+ fbcon_bmove(vc, b - count, 0, b, 0,
+ vc->vc_rows - b,
+ vc->vc_cols);
+ } else if (info->flags & FBINFO_READS_FAST)
+ fbcon_bmove(vc, t + count, 0, t, 0,
+ b - t - count, vc->vc_cols);
+ else
+ goto redraw_up;
+ fbcon_clear(vc, b - count, 0, count, vc->vc_cols);
+ break;
+
+ case SCROLL_PAN_REDRAW:
+ if ((p->yscroll + count <=
+ 2 * (p->vrows - vc->vc_rows))
+ && ((!scroll_partial && (b - t == vc->vc_rows))
+ || (scroll_partial
+ && (b - t - count >
+ 3 * vc->vc_rows >> 2)))) {
+ if (t > 0)
+ fbcon_redraw_move(vc, p, 0, t, count);
+ ypan_up_redraw(vc, t, count);
+ if (vc->vc_rows - b > 0)
+ fbcon_redraw_move(vc, p, b,
+ vc->vc_rows - b, b);
+ } else
+ fbcon_redraw_move(vc, p, t + count, b - t - count, t);
+ fbcon_clear(vc, b - count, 0, count, vc->vc_cols);
+ break;
+
+ case SCROLL_PAN_MOVE:
+ if ((p->yscroll + count <=
+ 2 * (p->vrows - vc->vc_rows))
+ && ((!scroll_partial && (b - t == vc->vc_rows))
+ || (scroll_partial
+ && (b - t - count >
+ 3 * vc->vc_rows >> 2)))) {
+ if (t > 0)
+ fbcon_bmove(vc, 0, 0, count, 0, t,
+ vc->vc_cols);
+ ypan_up(vc, count);
+ if (vc->vc_rows - b > 0)
+ fbcon_bmove(vc, b - count, 0, b, 0,
+ vc->vc_rows - b,
+ vc->vc_cols);
+ } else if (info->flags & FBINFO_READS_FAST)
+ fbcon_bmove(vc, t + count, 0, t, 0,
+ b - t - count, vc->vc_cols);
+ else
+ goto redraw_up;
+ fbcon_clear(vc, b - count, 0, count, vc->vc_cols);
+ break;
+
+ case SCROLL_REDRAW:
+ redraw_up:
+ fbcon_redraw(vc, p, t, b - t - count,
+ count * vc->vc_cols);
+ fbcon_clear(vc, b - count, 0, count, vc->vc_cols);
+ scr_memsetw((unsigned short *) (vc->vc_origin +
+ vc->vc_size_row *
+ (b - count)),
+ vc->vc_video_erase_char,
+ vc->vc_size_row * count);
+ return true;
+ }
+ break;
case SM_DOWN:
if (count > vc->vc_rows) /* Maximum realistic size */
count = vc->vc_rows;
- fbcon_redraw(vc, p, b - 1, b - t - count,
- -count * vc->vc_cols);
- fbcon_clear(vc, t, 0, count, vc->vc_cols);
- scr_memsetw((unsigned short *) (vc->vc_origin +
- vc->vc_size_row *
- t),
- vc->vc_video_erase_char,
- vc->vc_size_row * count);
- return true;
+ if (logo_shown >= 0)
+ goto redraw_down;
+ switch (fb_scrollmode(p)) {
+ case SCROLL_MOVE:
+ fbcon_redraw_blit(vc, info, p, b - 1, b - t - count,
+ -count);
+ fbcon_clear(vc, t, 0, count, vc->vc_cols);
+ scr_memsetw((unsigned short *) (vc->vc_origin +
+ vc->vc_size_row *
+ t),
+ vc->vc_video_erase_char,
+ vc->vc_size_row * count);
+ return true;
+
+ case SCROLL_WRAP_MOVE:
+ if (b - t - count > 3 * vc->vc_rows >> 2) {
+ if (vc->vc_rows - b > 0)
+ fbcon_bmove(vc, b, 0, b - count, 0,
+ vc->vc_rows - b,
+ vc->vc_cols);
+ ywrap_down(vc, count);
+ if (t > 0)
+ fbcon_bmove(vc, count, 0, 0, 0, t,
+ vc->vc_cols);
+ } else if (info->flags & FBINFO_READS_FAST)
+ fbcon_bmove(vc, t, 0, t + count, 0,
+ b - t - count, vc->vc_cols);
+ else
+ goto redraw_down;
+ fbcon_clear(vc, t, 0, count, vc->vc_cols);
+ break;
+
+ case SCROLL_PAN_MOVE:
+ if ((count - p->yscroll <= p->vrows - vc->vc_rows)
+ && ((!scroll_partial && (b - t == vc->vc_rows))
+ || (scroll_partial
+ && (b - t - count >
+ 3 * vc->vc_rows >> 2)))) {
+ if (vc->vc_rows - b > 0)
+ fbcon_bmove(vc, b, 0, b - count, 0,
+ vc->vc_rows - b,
+ vc->vc_cols);
+ ypan_down(vc, count);
+ if (t > 0)
+ fbcon_bmove(vc, count, 0, 0, 0, t,
+ vc->vc_cols);
+ } else if (info->flags & FBINFO_READS_FAST)
+ fbcon_bmove(vc, t, 0, t + count, 0,
+ b - t - count, vc->vc_cols);
+ else
+ goto redraw_down;
+ fbcon_clear(vc, t, 0, count, vc->vc_cols);
+ break;
+
+ case SCROLL_PAN_REDRAW:
+ if ((count - p->yscroll <= p->vrows - vc->vc_rows)
+ && ((!scroll_partial && (b - t == vc->vc_rows))
+ || (scroll_partial
+ && (b - t - count >
+ 3 * vc->vc_rows >> 2)))) {
+ if (vc->vc_rows - b > 0)
+ fbcon_redraw_move(vc, p, b, vc->vc_rows - b,
+ b - count);
+ ypan_down_redraw(vc, t, count);
+ if (t > 0)
+ fbcon_redraw_move(vc, p, count, t, 0);
+ } else
+ fbcon_redraw_move(vc, p, t, b - t - count, t + count);
+ fbcon_clear(vc, t, 0, count, vc->vc_cols);
+ break;
+
+ case SCROLL_REDRAW:
+ redraw_down:
+ fbcon_redraw(vc, p, b - 1, b - t - count,
+ -count * vc->vc_cols);
+ fbcon_clear(vc, t, 0, count, vc->vc_cols);
+ scr_memsetw((unsigned short *) (vc->vc_origin +
+ vc->vc_size_row *
+ t),
+ vc->vc_video_erase_char,
+ vc->vc_size_row * count);
+ return true;
+ }
}
return false;
}
+
+static void fbcon_bmove(struct vc_data *vc, int sy, int sx, int dy, int dx,
+ int height, int width)
+{
+ struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
+ struct fbcon_display *p = &fb_display[vc->vc_num];
+
+ if (fbcon_is_inactive(vc, info))
+ return;
+
+ if (!width || !height)
+ return;
+
+ /* Split blits that cross physical y_wrap case.
+ * Pathological case involves 4 blits, better to use recursive
+ * code rather than unrolled case
+ *
+ * Recursive invocations don't need to erase the cursor over and
+ * over again, so we use fbcon_bmove_rec()
+ */
+ fbcon_bmove_rec(vc, p, sy, sx, dy, dx, height, width,
+ p->vrows - p->yscroll);
+}
+
+static void fbcon_bmove_rec(struct vc_data *vc, struct fbcon_display *p, int sy, int sx,
+ int dy, int dx, int height, int width, u_int y_break)
+{
+ struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
+ struct fbcon_ops *ops = info->fbcon_par;
+ u_int b;
+
+ if (sy < y_break && sy + height > y_break) {
+ b = y_break - sy;
+ if (dy < sy) { /* Avoid trashing self */
+ fbcon_bmove_rec(vc, p, sy, sx, dy, dx, b, width,
+ y_break);
+ fbcon_bmove_rec(vc, p, sy + b, sx, dy + b, dx,
+ height - b, width, y_break);
+ } else {
+ fbcon_bmove_rec(vc, p, sy + b, sx, dy + b, dx,
+ height - b, width, y_break);
+ fbcon_bmove_rec(vc, p, sy, sx, dy, dx, b, width,
+ y_break);
+ }
+ return;
+ }
+
+ if (dy < y_break && dy + height > y_break) {
+ b = y_break - dy;
+ if (dy < sy) { /* Avoid trashing self */
+ fbcon_bmove_rec(vc, p, sy, sx, dy, dx, b, width,
+ y_break);
+ fbcon_bmove_rec(vc, p, sy + b, sx, dy + b, dx,
+ height - b, width, y_break);
+ } else {
+ fbcon_bmove_rec(vc, p, sy + b, sx, dy + b, dx,
+ height - b, width, y_break);
+ fbcon_bmove_rec(vc, p, sy, sx, dy, dx, b, width,
+ y_break);
+ }
+ return;
+ }
+ ops->bmove(vc, info, real_y(p, sy), sx, real_y(p, dy), dx,
+ height, width);
+}
+
+static void updatescrollmode_accel(struct fbcon_display *p,
+ struct fb_info *info,
+ struct vc_data *vc)
+{
+#ifdef CONFIG_FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION
+ struct fbcon_ops *ops = info->fbcon_par;
+ int cap = info->flags;
+ u16 t = 0;
+ int ypan = FBCON_SWAP(ops->rotate, info->fix.ypanstep,
+ info->fix.xpanstep);
+ int ywrap = FBCON_SWAP(ops->rotate, info->fix.ywrapstep, t);
+ int yres = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres);
+ int vyres = FBCON_SWAP(ops->rotate, info->var.yres_virtual,
+ info->var.xres_virtual);
+ int good_pan = (cap & FBINFO_HWACCEL_YPAN) &&
+ divides(ypan, vc->vc_font.height) && vyres > yres;
+ int good_wrap = (cap & FBINFO_HWACCEL_YWRAP) &&
+ divides(ywrap, vc->vc_font.height) &&
+ divides(vc->vc_font.height, vyres) &&
+ divides(vc->vc_font.height, yres);
+ int reading_fast = cap & FBINFO_READS_FAST;
+ int fast_copyarea = (cap & FBINFO_HWACCEL_COPYAREA) &&
+ !(cap & FBINFO_HWACCEL_DISABLED);
+ int fast_imageblit = (cap & FBINFO_HWACCEL_IMAGEBLIT) &&
+ !(cap & FBINFO_HWACCEL_DISABLED);
+
+ if (good_wrap || good_pan) {
+ if (reading_fast || fast_copyarea)
+ p->scrollmode = good_wrap ?
+ SCROLL_WRAP_MOVE : SCROLL_PAN_MOVE;
+ else
+ p->scrollmode = good_wrap ? SCROLL_REDRAW :
+ SCROLL_PAN_REDRAW;
+ } else {
+ if (reading_fast || (fast_copyarea && !fast_imageblit))
+ p->scrollmode = SCROLL_MOVE;
+ else
+ p->scrollmode = SCROLL_REDRAW;
+ }
+#endif
+}
+
static void updatescrollmode(struct fbcon_display *p,
struct fb_info *info,
struct vc_data *vc)
@@ -1507,6 +2004,9 @@ static void updatescrollmode(struct fbcon_display *p,
p->vrows -= (yres - (fh * vc->vc_rows)) / fh;
if ((yres % fh) && (vyres % fh < yres % fh))
p->vrows--;
+
+ /* update scrollmode in case hardware acceleration is used */
+ updatescrollmode_accel(p, info, vc);
}
#define PITCH(w) (((w) + 7) >> 3)
@@ -1664,7 +2164,21 @@ static int fbcon_switch(struct vc_data *vc)
updatescrollmode(p, info, vc);
- scrollback_phys_max = 0;
+ switch (fb_scrollmode(p)) {
+ case SCROLL_WRAP_MOVE:
+ scrollback_phys_max = p->vrows - vc->vc_rows;
+ break;
+ case SCROLL_PAN_MOVE:
+ case SCROLL_PAN_REDRAW:
+ scrollback_phys_max = p->vrows - 2 * vc->vc_rows;
+ if (scrollback_phys_max < 0)
+ scrollback_phys_max = 0;
+ break;
+ default:
+ scrollback_phys_max = 0;
+ break;
+ }
+
scrollback_max = 0;
scrollback_current = 0;
diff --git a/drivers/video/fbdev/core/fbcon.h b/drivers/video/fbdev/core/fbcon.h
index a00603b4451a..969d41ecede5 100644
--- a/drivers/video/fbdev/core/fbcon.h
+++ b/drivers/video/fbdev/core/fbcon.h
@@ -29,6 +29,9 @@ struct fbcon_display {
/* Filled in by the low-level console driver */
const u_char *fontdata;
int userfont; /* != 0 if fontdata kmalloc()ed */
+#ifdef CONFIG_FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION
+ u_short scrollmode; /* Scroll Method, use fb_scrollmode() */
+#endif
u_short inverse; /* != 0 text black on white as default */
short yscroll; /* Hardware scrolling */
int vrows; /* number of virtual rows */
@@ -51,6 +54,8 @@ struct fbcon_display {
};
struct fbcon_ops {
+ void (*bmove)(struct vc_data *vc, struct fb_info *info, int sy,
+ int sx, int dy, int dx, int height, int width);
void (*clear)(struct vc_data *vc, struct fb_info *info, int sy,
int sx, int height, int width);
void (*putcs)(struct vc_data *vc, struct fb_info *info,
@@ -149,6 +154,73 @@ static inline int attr_col_ec(int shift, struct vc_data *vc,
#define attr_bgcol_ec(bgshift, vc, info) attr_col_ec(bgshift, vc, info, 0)
#define attr_fgcol_ec(fgshift, vc, info) attr_col_ec(fgshift, vc, info, 1)
+ /*
+ * Scroll Method
+ */
+
+/* There are several methods fbcon can use to move text around the screen:
+ *
+ * Operation Pan Wrap
+ *---------------------------------------------
+ * SCROLL_MOVE copyarea No No
+ * SCROLL_PAN_MOVE copyarea Yes No
+ * SCROLL_WRAP_MOVE copyarea No Yes
+ * SCROLL_REDRAW imageblit No No
+ * SCROLL_PAN_REDRAW imageblit Yes No
+ * SCROLL_WRAP_REDRAW imageblit No Yes
+ *
+ * (SCROLL_WRAP_REDRAW is not implemented yet)
+ *
+ * In general, fbcon will choose the best scrolling
+ * method based on the rule below:
+ *
+ * Pan/Wrap > accel imageblit > accel copyarea >
+ * soft imageblit > (soft copyarea)
+ *
+ * Exception to the rule: Pan + accel copyarea is
+ * preferred over Pan + accel imageblit.
+ *
+ * The above is typical for PCI/AGP cards. Unless
+ * overridden, fbcon will never use soft copyarea.
+ *
+ * If you need to override the above rule, set the
+ * appropriate flags in fb_info->flags. For example,
+ * to prefer copyarea over imageblit, set
+ * FBINFO_READS_FAST.
+ *
+ * Other notes:
+ * + use the hardware engine to move the text
+ * (hw-accelerated copyarea() and fillrect())
+ * + use hardware-supported panning on a large virtual screen
+ * + amifb can not only pan, but also wrap the display by N lines
+ * (i.e. visible line i = physical line (i+N) % yres).
+ * + read what's already rendered on the screen and
+ * write it in a different place (this is cfb_copyarea())
+ * + re-render the text to the screen
+ *
+ * Whether to use wrapping or panning can only be figured out at
+ * runtime (when we know whether our font height is a multiple
+ * of the pan/wrap step)
+ *
+ */
+
+#define SCROLL_MOVE 0x001
+#define SCROLL_PAN_MOVE 0x002
+#define SCROLL_WRAP_MOVE 0x003
+#define SCROLL_REDRAW 0x004
+#define SCROLL_PAN_REDRAW 0x005
+
+static inline u_short fb_scrollmode(struct fbcon_display *fb)
+{
+#ifdef CONFIG_FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION
+ return fb->scrollmode;
+#else
+ /* hardcoded to SCROLL_REDRAW if acceleration was disabled. */
+ return SCROLL_REDRAW;
+#endif
+}
+
+
#ifdef CONFIG_FB_TILEBLITTING
extern void fbcon_set_tileops(struct vc_data *vc, struct fb_info *info);
#endif
diff --git a/drivers/video/fbdev/core/fbcon_ccw.c b/drivers/video/fbdev/core/fbcon_ccw.c
index ffa78936eaab..2789ace79634 100644
--- a/drivers/video/fbdev/core/fbcon_ccw.c
+++ b/drivers/video/fbdev/core/fbcon_ccw.c
@@ -59,12 +59,31 @@ static void ccw_update_attr(u8 *dst, u8 *src, int attribute,
}
}
+
+static void ccw_bmove(struct vc_data *vc, struct fb_info *info, int sy,
+ int sx, int dy, int dx, int height, int width)
+{
+ struct fbcon_ops *ops = info->fbcon_par;
+ struct fb_copyarea area;
+ u32 vyres = GETVYRES(ops->p, info);
+
+ area.sx = sy * vc->vc_font.height;
+ area.sy = vyres - ((sx + width) * vc->vc_font.width);
+ area.dx = dy * vc->vc_font.height;
+ area.dy = vyres - ((dx + width) * vc->vc_font.width);
+ area.width = height * vc->vc_font.height;
+ area.height = width * vc->vc_font.width;
+
+ info->fbops->fb_copyarea(info, &area);
+}
+
static void ccw_clear(struct vc_data *vc, struct fb_info *info, int sy,
int sx, int height, int width)
{
+ struct fbcon_ops *ops = info->fbcon_par;
struct fb_fillrect region;
int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
- u32 vyres = info->var.yres;
+ u32 vyres = GETVYRES(ops->p, info);
region.color = attr_bgcol_ec(bgshift,vc,info);
region.dx = sy * vc->vc_font.height;
@@ -121,7 +140,7 @@ static void ccw_putcs(struct vc_data *vc, struct fb_info *info,
u32 cnt, pitch, size;
u32 attribute = get_attribute(info, scr_readw(s));
u8 *dst, *buf = NULL;
- u32 vyres = info->var.yres;
+ u32 vyres = GETVYRES(ops->p, info);
if (!ops->fontbuffer)
return;
@@ -210,7 +229,7 @@ static void ccw_cursor(struct vc_data *vc, struct fb_info *info, int mode,
int attribute, use_sw = vc->vc_cursor_type & CUR_SW;
int err = 1, dx, dy;
char *src;
- u32 vyres = info->var.yres;
+ u32 vyres = GETVYRES(ops->p, info);
if (!ops->fontbuffer)
return;
@@ -368,7 +387,7 @@ static int ccw_update_start(struct fb_info *info)
{
struct fbcon_ops *ops = info->fbcon_par;
u32 yoffset;
- u32 vyres = info->var.yres;
+ u32 vyres = GETVYRES(ops->p, info);
int err;
yoffset = (vyres - info->var.yres) - ops->var.xoffset;
@@ -383,6 +402,7 @@ static int ccw_update_start(struct fb_info *info)
void fbcon_rotate_ccw(struct fbcon_ops *ops)
{
+ ops->bmove = ccw_bmove;
ops->clear = ccw_clear;
ops->putcs = ccw_putcs;
ops->clear_margins = ccw_clear_margins;
diff --git a/drivers/video/fbdev/core/fbcon_cw.c b/drivers/video/fbdev/core/fbcon_cw.c
index 92e5b7fb51ee..86a254c1b2b7 100644
--- a/drivers/video/fbdev/core/fbcon_cw.c
+++ b/drivers/video/fbdev/core/fbcon_cw.c
@@ -44,12 +44,31 @@ static void cw_update_attr(u8 *dst, u8 *src, int attribute,
}
}
+
+static void cw_bmove(struct vc_data *vc, struct fb_info *info, int sy,
+ int sx, int dy, int dx, int height, int width)
+{
+ struct fbcon_ops *ops = info->fbcon_par;
+ struct fb_copyarea area;
+ u32 vxres = GETVXRES(ops->p, info);
+
+ area.sx = vxres - ((sy + height) * vc->vc_font.height);
+ area.sy = sx * vc->vc_font.width;
+ area.dx = vxres - ((dy + height) * vc->vc_font.height);
+ area.dy = dx * vc->vc_font.width;
+ area.width = height * vc->vc_font.height;
+ area.height = width * vc->vc_font.width;
+
+ info->fbops->fb_copyarea(info, &area);
+}
+
static void cw_clear(struct vc_data *vc, struct fb_info *info, int sy,
int sx, int height, int width)
{
+ struct fbcon_ops *ops = info->fbcon_par;
struct fb_fillrect region;
int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
- u32 vxres = info->var.xres;
+ u32 vxres = GETVXRES(ops->p, info);
region.color = attr_bgcol_ec(bgshift,vc,info);
region.dx = vxres - ((sy + height) * vc->vc_font.height);
@@ -106,7 +125,7 @@ static void cw_putcs(struct vc_data *vc, struct fb_info *info,
u32 cnt, pitch, size;
u32 attribute = get_attribute(info, scr_readw(s));
u8 *dst, *buf = NULL;
- u32 vxres = info->var.xres;
+ u32 vxres = GETVXRES(ops->p, info);
if (!ops->fontbuffer)
return;
@@ -193,7 +212,7 @@ static void cw_cursor(struct vc_data *vc, struct fb_info *info, int mode,
int attribute, use_sw = vc->vc_cursor_type & CUR_SW;
int err = 1, dx, dy;
char *src;
- u32 vxres = info->var.xres;
+ u32 vxres = GETVXRES(ops->p, info);
if (!ops->fontbuffer)
return;
@@ -350,7 +369,7 @@ static void cw_cursor(struct vc_data *vc, struct fb_info *info, int mode,
static int cw_update_start(struct fb_info *info)
{
struct fbcon_ops *ops = info->fbcon_par;
- u32 vxres = info->var.xres;
+ u32 vxres = GETVXRES(ops->p, info);
u32 xoffset;
int err;
@@ -366,6 +385,7 @@ static int cw_update_start(struct fb_info *info)
void fbcon_rotate_cw(struct fbcon_ops *ops)
{
+ ops->bmove = cw_bmove;
ops->clear = cw_clear;
ops->putcs = cw_putcs;
ops->clear_margins = cw_clear_margins;
diff --git a/drivers/video/fbdev/core/fbcon_rotate.h b/drivers/video/fbdev/core/fbcon_rotate.h
index b528b2e54283..01cbe303b8a2 100644
--- a/drivers/video/fbdev/core/fbcon_rotate.h
+++ b/drivers/video/fbdev/core/fbcon_rotate.h
@@ -11,6 +11,15 @@
#ifndef _FBCON_ROTATE_H
#define _FBCON_ROTATE_H
+#define GETVYRES(s,i) ({ \
+ (fb_scrollmode(s) == SCROLL_REDRAW || fb_scrollmode(s) == SCROLL_MOVE) ? \
+ (i)->var.yres : (i)->var.yres_virtual; })
+
+#define GETVXRES(s,i) ({ \
+ (fb_scrollmode(s) == SCROLL_REDRAW || fb_scrollmode(s) == SCROLL_MOVE || !(i)->fix.xpanstep) ? \
+ (i)->var.xres : (i)->var.xres_virtual; })
+
+
static inline int pattern_test_bit(u32 x, u32 y, u32 pitch, const char *pat)
{
u32 tmp = (y * pitch) + x, index = tmp / 8, bit = tmp % 8;
diff --git a/drivers/video/fbdev/core/fbcon_ud.c b/drivers/video/fbdev/core/fbcon_ud.c
index 09619bd8e021..23bc045769d0 100644
--- a/drivers/video/fbdev/core/fbcon_ud.c
+++ b/drivers/video/fbdev/core/fbcon_ud.c
@@ -44,13 +44,33 @@ static void ud_update_attr(u8 *dst, u8 *src, int attribute,
}
}
+
+static void ud_bmove(struct vc_data *vc, struct fb_info *info, int sy,
+ int sx, int dy, int dx, int height, int width)
+{
+ struct fbcon_ops *ops = info->fbcon_par;
+ struct fb_copyarea area;
+ u32 vyres = GETVYRES(ops->p, info);
+ u32 vxres = GETVXRES(ops->p, info);
+
+ area.sy = vyres - ((sy + height) * vc->vc_font.height);
+ area.sx = vxres - ((sx + width) * vc->vc_font.width);
+ area.dy = vyres - ((dy + height) * vc->vc_font.height);
+ area.dx = vxres - ((dx + width) * vc->vc_font.width);
+ area.height = height * vc->vc_font.height;
+ area.width = width * vc->vc_font.width;
+
+ info->fbops->fb_copyarea(info, &area);
+}
+
static void ud_clear(struct vc_data *vc, struct fb_info *info, int sy,
int sx, int height, int width)
{
+ struct fbcon_ops *ops = info->fbcon_par;
struct fb_fillrect region;
int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
- u32 vyres = info->var.yres;
- u32 vxres = info->var.xres;
+ u32 vyres = GETVYRES(ops->p, info);
+ u32 vxres = GETVXRES(ops->p, info);
region.color = attr_bgcol_ec(bgshift,vc,info);
region.dy = vyres - ((sy + height) * vc->vc_font.height);
@@ -142,8 +162,8 @@ static void ud_putcs(struct vc_data *vc, struct fb_info *info,
u32 mod = vc->vc_font.width % 8, cnt, pitch, size;
u32 attribute = get_attribute(info, scr_readw(s));
u8 *dst, *buf = NULL;
- u32 vyres = info->var.yres;
- u32 vxres = info->var.xres;
+ u32 vyres = GETVYRES(ops->p, info);
+ u32 vxres = GETVXRES(ops->p, info);
if (!ops->fontbuffer)
return;
@@ -239,8 +259,8 @@ static void ud_cursor(struct vc_data *vc, struct fb_info *info, int mode,
int attribute, use_sw = vc->vc_cursor_type & CUR_SW;
int err = 1, dx, dy;
char *src;
- u32 vyres = info->var.yres;
- u32 vxres = info->var.xres;
+ u32 vyres = GETVYRES(ops->p, info);
+ u32 vxres = GETVXRES(ops->p, info);
if (!ops->fontbuffer)
return;
@@ -390,8 +410,8 @@ static int ud_update_start(struct fb_info *info)
{
struct fbcon_ops *ops = info->fbcon_par;
int xoffset, yoffset;
- u32 vyres = info->var.yres;
- u32 vxres = info->var.xres;
+ u32 vyres = GETVYRES(ops->p, info);
+ u32 vxres = GETVXRES(ops->p, info);
int err;
xoffset = vxres - info->var.xres - ops->var.xoffset;
@@ -409,6 +429,7 @@ static int ud_update_start(struct fb_info *info)
void fbcon_rotate_ud(struct fbcon_ops *ops)
{
+ ops->bmove = ud_bmove;
ops->clear = ud_clear;
ops->putcs = ud_putcs;
ops->clear_margins = ud_clear_margins;
diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
index 0fa7ede94fa6..13083ad8d751 100644
--- a/drivers/video/fbdev/core/fbmem.c
+++ b/drivers/video/fbdev/core/fbmem.c
@@ -1160,6 +1160,8 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
ret = fbcon_set_con2fb_map_ioctl(argp);
break;
case FBIOBLANK:
+ if (arg > FB_BLANK_POWERDOWN)
+ return -EINVAL;
console_lock();
lock_fb_info(info);
ret = fb_blank(info, arg);
diff --git a/drivers/video/fbdev/core/tileblit.c b/drivers/video/fbdev/core/tileblit.c
index 72af95053bcb..2768eff247ba 100644
--- a/drivers/video/fbdev/core/tileblit.c
+++ b/drivers/video/fbdev/core/tileblit.c
@@ -16,6 +16,21 @@
#include <asm/types.h>
#include "fbcon.h"
+static void tile_bmove(struct vc_data *vc, struct fb_info *info, int sy,
+ int sx, int dy, int dx, int height, int width)
+{
+ struct fb_tilearea area;
+
+ area.sx = sx;
+ area.sy = sy;
+ area.dx = dx;
+ area.dy = dy;
+ area.height = height;
+ area.width = width;
+
+ info->tileops->fb_tilecopy(info, &area);
+}
+
static void tile_clear(struct vc_data *vc, struct fb_info *info, int sy,
int sx, int height, int width)
{
@@ -118,6 +133,7 @@ void fbcon_set_tileops(struct vc_data *vc, struct fb_info *info)
struct fb_tilemap map;
struct fbcon_ops *ops = info->fbcon_par;
+ ops->bmove = tile_bmove;
ops->clear = tile_clear;
ops->putcs = tile_putcs;
ops->clear_margins = tile_clear_margins;
diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c
index 23999df52739..c8e0ea27caf1 100644
--- a/drivers/video/fbdev/hyperv_fb.c
+++ b/drivers/video/fbdev/hyperv_fb.c
@@ -287,8 +287,6 @@ struct hvfb_par {
static uint screen_width = HVFB_WIDTH;
static uint screen_height = HVFB_HEIGHT;
-static uint screen_width_max = HVFB_WIDTH;
-static uint screen_height_max = HVFB_HEIGHT;
static uint screen_depth;
static uint screen_fb_size;
static uint dio_fb_size; /* FB size for deferred IO */
@@ -582,7 +580,6 @@ static int synthvid_get_supported_resolution(struct hv_device *hdev)
int ret = 0;
unsigned long t;
u8 index;
- int i;
memset(msg, 0, sizeof(struct synthvid_msg));
msg->vid_hdr.type = SYNTHVID_RESOLUTION_REQUEST;
@@ -613,13 +610,6 @@ static int synthvid_get_supported_resolution(struct hv_device *hdev)
goto out;
}
- for (i = 0; i < msg->resolution_resp.resolution_count; i++) {
- screen_width_max = max_t(unsigned int, screen_width_max,
- msg->resolution_resp.supported_resolution[i].width);
- screen_height_max = max_t(unsigned int, screen_height_max,
- msg->resolution_resp.supported_resolution[i].height);
- }
-
screen_width =
msg->resolution_resp.supported_resolution[index].width;
screen_height =
@@ -941,7 +931,7 @@ static void hvfb_get_option(struct fb_info *info)
if (x < HVFB_WIDTH_MIN || y < HVFB_HEIGHT_MIN ||
(synthvid_ver_ge(par->synthvid_version, SYNTHVID_VERSION_WIN10) &&
- (x > screen_width_max || y > screen_height_max)) ||
+ (x * y * screen_depth / 8 > screen_fb_size)) ||
(par->synthvid_version == SYNTHVID_VERSION_WIN8 &&
x * y * screen_depth / 8 > SYNTHVID_FB_SIZE_WIN8) ||
(par->synthvid_version == SYNTHVID_VERSION_WIN7 &&
@@ -1194,8 +1184,8 @@ static int hvfb_probe(struct hv_device *hdev,
}
hvfb_get_option(info);
- pr_info("Screen resolution: %dx%d, Color depth: %d\n",
- screen_width, screen_height, screen_depth);
+ pr_info("Screen resolution: %dx%d, Color depth: %d, Frame buffer size: %d\n",
+ screen_width, screen_height, screen_depth, screen_fb_size);
ret = hvfb_getmem(hdev, info);
if (ret) {
diff --git a/drivers/video/fbdev/skeletonfb.c b/drivers/video/fbdev/skeletonfb.c
index 0fe922f726e9..bcacfb6934fa 100644
--- a/drivers/video/fbdev/skeletonfb.c
+++ b/drivers/video/fbdev/skeletonfb.c
@@ -505,15 +505,15 @@ void xxxfb_fillrect(struct fb_info *p, const struct fb_fillrect *region)
}
/**
- * xxxfb_copyarea - OBSOLETE function.
+ * xxxfb_copyarea - REQUIRED function. Can use generic routines if
+ * non acclerated hardware and packed pixel based.
* Copies one area of the screen to another area.
- * Will be deleted in a future version
*
* @info: frame buffer structure that represents a single frame buffer
* @area: Structure providing the data to copy the framebuffer contents
* from one region to another.
*
- * This drawing operation copied a rectangular area from one area of the
+ * This drawing operation copies a rectangular area from one area of the
* screen to another area.
*/
void xxxfb_copyarea(struct fb_info *p, const struct fb_copyarea *area)
@@ -645,9 +645,9 @@ static const struct fb_ops xxxfb_ops = {
.fb_setcolreg = xxxfb_setcolreg,
.fb_blank = xxxfb_blank,
.fb_pan_display = xxxfb_pan_display,
- .fb_fillrect = xxxfb_fillrect, /* Needed !!! */
- .fb_copyarea = xxxfb_copyarea, /* Obsolete */
- .fb_imageblit = xxxfb_imageblit, /* Needed !!! */
+ .fb_fillrect = xxxfb_fillrect, /* Needed !!! */
+ .fb_copyarea = xxxfb_copyarea, /* Needed !!! */
+ .fb_imageblit = xxxfb_imageblit, /* Needed !!! */
.fb_cursor = xxxfb_cursor, /* Optional !!! */
.fb_sync = xxxfb_sync,
.fb_ioctl = xxxfb_ioctl,
diff --git a/drivers/video/fbdev/vga16fb.c b/drivers/video/fbdev/vga16fb.c
index e2757ff1c23d..96e312a3eac7 100644
--- a/drivers/video/fbdev/vga16fb.c
+++ b/drivers/video/fbdev/vga16fb.c
@@ -184,6 +184,25 @@ static inline void setindex(int index)
vga_io_w(VGA_GFX_I, index);
}
+/* Check if the video mode is supported by the driver */
+static inline int check_mode_supported(void)
+{
+ /* non-x86 architectures treat orig_video_isVGA as a boolean flag */
+#if defined(CONFIG_X86)
+ /* only EGA and VGA in 16 color graphic mode are supported */
+ if (screen_info.orig_video_isVGA != VIDEO_TYPE_EGAC &&
+ screen_info.orig_video_isVGA != VIDEO_TYPE_VGAC)
+ return -ENODEV;
+
+ if (screen_info.orig_video_mode != 0x0D && /* 320x200/4 (EGA) */
+ screen_info.orig_video_mode != 0x0E && /* 640x200/4 (EGA) */
+ screen_info.orig_video_mode != 0x10 && /* 640x350/4 (EGA) */
+ screen_info.orig_video_mode != 0x12) /* 640x480/4 (VGA) */
+ return -ENODEV;
+#endif
+ return 0;
+}
+
static void vga16fb_pan_var(struct fb_info *info,
struct fb_var_screeninfo *var)
{
@@ -1422,6 +1441,11 @@ static int __init vga16fb_init(void)
vga16fb_setup(option);
#endif
+
+ ret = check_mode_supported();
+ if (ret)
+ return ret;
+
ret = platform_driver_register(&vga16fb_driver);
if (!ret) {
diff --git a/drivers/virt/acrn/ioreq.c b/drivers/virt/acrn/ioreq.c
index 80b2e3f0e276..5ff1c53740c0 100644
--- a/drivers/virt/acrn/ioreq.c
+++ b/drivers/virt/acrn/ioreq.c
@@ -246,8 +246,7 @@ void acrn_ioreq_request_clear(struct acrn_vm *vm)
spin_lock_bh(&vm->ioreq_clients_lock);
client = vm->default_client;
if (client) {
- vcpu = find_next_bit(client->ioreqs_map,
- ACRN_IO_REQUEST_MAX, 0);
+ vcpu = find_first_bit(client->ioreqs_map, ACRN_IO_REQUEST_MAX);
while (vcpu < ACRN_IO_REQUEST_MAX) {
acrn_ioreq_complete_request(client, vcpu, NULL);
vcpu = find_next_bit(client->ioreqs_map,
diff --git a/drivers/virt/nitro_enclaves/Kconfig b/drivers/virt/nitro_enclaves/Kconfig
index f53740b941c0..2d3d98158121 100644
--- a/drivers/virt/nitro_enclaves/Kconfig
+++ b/drivers/virt/nitro_enclaves/Kconfig
@@ -14,3 +14,12 @@ config NITRO_ENCLAVES
To compile this driver as a module, choose M here.
The module will be called nitro_enclaves.
+
+config NITRO_ENCLAVES_MISC_DEV_TEST
+ bool "Tests for the misc device functionality of the Nitro Enclaves"
+ depends on NITRO_ENCLAVES && KUNIT=y
+ help
+ Enable KUnit tests for the misc device functionality of the Nitro
+ Enclaves. Select this option only if you will boot the kernel for
+ the purpose of running unit tests (e.g. under UML or qemu). If
+ unsure, say N.
diff --git a/drivers/virt/nitro_enclaves/ne_misc_dev.c b/drivers/virt/nitro_enclaves/ne_misc_dev.c
index 6894ccb868a6..20c881b6a4b6 100644
--- a/drivers/virt/nitro_enclaves/ne_misc_dev.c
+++ b/drivers/virt/nitro_enclaves/ne_misc_dev.c
@@ -24,6 +24,7 @@
#include <linux/nitro_enclaves.h>
#include <linux/pci.h>
#include <linux/poll.h>
+#include <linux/range.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <uapi/linux/vm_sockets.h>
@@ -126,6 +127,16 @@ struct ne_cpu_pool {
static struct ne_cpu_pool ne_cpu_pool;
/**
+ * struct ne_phys_contig_mem_regions - Contiguous physical memory regions.
+ * @num: The number of regions that currently has.
+ * @regions: The array of physical memory regions.
+ */
+struct ne_phys_contig_mem_regions {
+ unsigned long num;
+ struct range *regions;
+};
+
+/**
* ne_check_enclaves_created() - Verify if at least one enclave has been created.
* @void: No parameters provided.
*
@@ -825,6 +836,72 @@ static int ne_sanity_check_user_mem_region_page(struct ne_enclave *ne_enclave,
}
/**
+ * ne_sanity_check_phys_mem_region() - Sanity check the start address and the size
+ * of a physical memory region.
+ * @phys_mem_region_paddr : Physical start address of the region to be sanity checked.
+ * @phys_mem_region_size : Length of the region to be sanity checked.
+ *
+ * Context: Process context. This function is called with the ne_enclave mutex held.
+ * Return:
+ * * 0 on success.
+ * * Negative return value on failure.
+ */
+static int ne_sanity_check_phys_mem_region(u64 phys_mem_region_paddr,
+ u64 phys_mem_region_size)
+{
+ if (phys_mem_region_size & (NE_MIN_MEM_REGION_SIZE - 1)) {
+ dev_err_ratelimited(ne_misc_dev.this_device,
+ "Physical mem region size is not multiple of 2 MiB\n");
+
+ return -EINVAL;
+ }
+
+ if (!IS_ALIGNED(phys_mem_region_paddr, NE_MIN_MEM_REGION_SIZE)) {
+ dev_err_ratelimited(ne_misc_dev.this_device,
+ "Physical mem region address is not 2 MiB aligned\n");
+
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * ne_merge_phys_contig_memory_regions() - Add a memory region and merge the adjacent
+ * regions if they are physically contiguous.
+ * @phys_contig_regions : Private data associated with the contiguous physical memory regions.
+ * @page_paddr : Physical start address of the region to be added.
+ * @page_size : Length of the region to be added.
+ *
+ * Context: Process context. This function is called with the ne_enclave mutex held.
+ * Return:
+ * * 0 on success.
+ * * Negative return value on failure.
+ */
+static int
+ne_merge_phys_contig_memory_regions(struct ne_phys_contig_mem_regions *phys_contig_regions,
+ u64 page_paddr, u64 page_size)
+{
+ unsigned long num = phys_contig_regions->num;
+ int rc = 0;
+
+ rc = ne_sanity_check_phys_mem_region(page_paddr, page_size);
+ if (rc < 0)
+ return rc;
+
+ /* Physically contiguous, just merge */
+ if (num && (phys_contig_regions->regions[num - 1].end + 1) == page_paddr) {
+ phys_contig_regions->regions[num - 1].end += page_size;
+ } else {
+ phys_contig_regions->regions[num].start = page_paddr;
+ phys_contig_regions->regions[num].end = page_paddr + page_size - 1;
+ phys_contig_regions->num++;
+ }
+
+ return 0;
+}
+
+/**
* ne_set_user_memory_region_ioctl() - Add user space memory region to the slot
* associated with the current enclave.
* @ne_enclave : Private data associated with the current enclave.
@@ -843,9 +920,8 @@ static int ne_set_user_memory_region_ioctl(struct ne_enclave *ne_enclave,
unsigned long max_nr_pages = 0;
unsigned long memory_size = 0;
struct ne_mem_region *ne_mem_region = NULL;
- unsigned long nr_phys_contig_mem_regions = 0;
struct pci_dev *pdev = ne_devs.ne_pci_dev->pdev;
- struct page **phys_contig_mem_regions = NULL;
+ struct ne_phys_contig_mem_regions phys_contig_mem_regions = {};
int rc = -EINVAL;
rc = ne_sanity_check_user_mem_region(ne_enclave, mem_region);
@@ -866,9 +942,10 @@ static int ne_set_user_memory_region_ioctl(struct ne_enclave *ne_enclave,
goto free_mem_region;
}
- phys_contig_mem_regions = kcalloc(max_nr_pages, sizeof(*phys_contig_mem_regions),
- GFP_KERNEL);
- if (!phys_contig_mem_regions) {
+ phys_contig_mem_regions.regions = kcalloc(max_nr_pages,
+ sizeof(*phys_contig_mem_regions.regions),
+ GFP_KERNEL);
+ if (!phys_contig_mem_regions.regions) {
rc = -ENOMEM;
goto free_mem_region;
@@ -902,26 +979,18 @@ static int ne_set_user_memory_region_ioctl(struct ne_enclave *ne_enclave,
if (rc < 0)
goto put_pages;
- /*
- * TODO: Update once handled non-contiguous memory regions
- * received from user space or contiguous physical memory regions
- * larger than 2 MiB e.g. 8 MiB.
- */
- phys_contig_mem_regions[i] = ne_mem_region->pages[i];
+ rc = ne_merge_phys_contig_memory_regions(&phys_contig_mem_regions,
+ page_to_phys(ne_mem_region->pages[i]),
+ page_size(ne_mem_region->pages[i]));
+ if (rc < 0)
+ goto put_pages;
memory_size += page_size(ne_mem_region->pages[i]);
ne_mem_region->nr_pages++;
} while (memory_size < mem_region.memory_size);
- /*
- * TODO: Update once handled non-contiguous memory regions received
- * from user space or contiguous physical memory regions larger than
- * 2 MiB e.g. 8 MiB.
- */
- nr_phys_contig_mem_regions = ne_mem_region->nr_pages;
-
- if ((ne_enclave->nr_mem_regions + nr_phys_contig_mem_regions) >
+ if ((ne_enclave->nr_mem_regions + phys_contig_mem_regions.num) >
ne_enclave->max_mem_regions) {
dev_err_ratelimited(ne_misc_dev.this_device,
"Reached max memory regions %lld\n",
@@ -932,27 +1001,13 @@ static int ne_set_user_memory_region_ioctl(struct ne_enclave *ne_enclave,
goto put_pages;
}
- for (i = 0; i < nr_phys_contig_mem_regions; i++) {
- u64 phys_region_addr = page_to_phys(phys_contig_mem_regions[i]);
- u64 phys_region_size = page_size(phys_contig_mem_regions[i]);
-
- if (phys_region_size & (NE_MIN_MEM_REGION_SIZE - 1)) {
- dev_err_ratelimited(ne_misc_dev.this_device,
- "Physical mem region size is not multiple of 2 MiB\n");
-
- rc = -EINVAL;
-
- goto put_pages;
- }
-
- if (!IS_ALIGNED(phys_region_addr, NE_MIN_MEM_REGION_SIZE)) {
- dev_err_ratelimited(ne_misc_dev.this_device,
- "Physical mem region address is not 2 MiB aligned\n");
-
- rc = -EINVAL;
+ for (i = 0; i < phys_contig_mem_regions.num; i++) {
+ u64 phys_region_addr = phys_contig_mem_regions.regions[i].start;
+ u64 phys_region_size = range_len(&phys_contig_mem_regions.regions[i]);
+ rc = ne_sanity_check_phys_mem_region(phys_region_addr, phys_region_size);
+ if (rc < 0)
goto put_pages;
- }
}
ne_mem_region->memory_size = mem_region.memory_size;
@@ -960,13 +1015,13 @@ static int ne_set_user_memory_region_ioctl(struct ne_enclave *ne_enclave,
list_add(&ne_mem_region->mem_region_list_entry, &ne_enclave->mem_regions_list);
- for (i = 0; i < nr_phys_contig_mem_regions; i++) {
+ for (i = 0; i < phys_contig_mem_regions.num; i++) {
struct ne_pci_dev_cmd_reply cmd_reply = {};
struct slot_add_mem_req slot_add_mem_req = {};
slot_add_mem_req.slot_uid = ne_enclave->slot_uid;
- slot_add_mem_req.paddr = page_to_phys(phys_contig_mem_regions[i]);
- slot_add_mem_req.size = page_size(phys_contig_mem_regions[i]);
+ slot_add_mem_req.paddr = phys_contig_mem_regions.regions[i].start;
+ slot_add_mem_req.size = range_len(&phys_contig_mem_regions.regions[i]);
rc = ne_do_request(pdev, SLOT_ADD_MEM,
&slot_add_mem_req, sizeof(slot_add_mem_req),
@@ -975,7 +1030,7 @@ static int ne_set_user_memory_region_ioctl(struct ne_enclave *ne_enclave,
dev_err_ratelimited(ne_misc_dev.this_device,
"Error in slot add mem [rc=%d]\n", rc);
- kfree(phys_contig_mem_regions);
+ kfree(phys_contig_mem_regions.regions);
/*
* Exit here without put pages as memory regions may
@@ -988,7 +1043,7 @@ static int ne_set_user_memory_region_ioctl(struct ne_enclave *ne_enclave,
ne_enclave->nr_mem_regions++;
}
- kfree(phys_contig_mem_regions);
+ kfree(phys_contig_mem_regions.regions);
return 0;
@@ -996,7 +1051,7 @@ put_pages:
for (i = 0; i < ne_mem_region->nr_pages; i++)
put_page(ne_mem_region->pages[i]);
free_mem_region:
- kfree(phys_contig_mem_regions);
+ kfree(phys_contig_mem_regions.regions);
kfree(ne_mem_region->pages);
kfree(ne_mem_region);
@@ -1702,8 +1757,37 @@ static long ne_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
return 0;
}
+#if defined(CONFIG_NITRO_ENCLAVES_MISC_DEV_TEST)
+#include "ne_misc_dev_test.c"
+
+static inline int ne_misc_dev_test_init(void)
+{
+ return __kunit_test_suites_init(ne_misc_dev_test_suites);
+}
+
+static inline void ne_misc_dev_test_exit(void)
+{
+ __kunit_test_suites_exit(ne_misc_dev_test_suites);
+}
+#else
+static inline int ne_misc_dev_test_init(void)
+{
+ return 0;
+}
+
+static inline void ne_misc_dev_test_exit(void)
+{
+}
+#endif
+
static int __init ne_init(void)
{
+ int rc = 0;
+
+ rc = ne_misc_dev_test_init();
+ if (rc < 0)
+ return rc;
+
mutex_init(&ne_cpu_pool.mutex);
return pci_register_driver(&ne_pci_driver);
@@ -1714,6 +1798,8 @@ static void __exit ne_exit(void)
pci_unregister_driver(&ne_pci_driver);
ne_teardown_cpu_pool();
+
+ ne_misc_dev_test_exit();
}
module_init(ne_init);
diff --git a/drivers/virt/nitro_enclaves/ne_misc_dev_test.c b/drivers/virt/nitro_enclaves/ne_misc_dev_test.c
new file mode 100644
index 000000000000..265797bed0ea
--- /dev/null
+++ b/drivers/virt/nitro_enclaves/ne_misc_dev_test.c
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <kunit/test.h>
+
+#define MAX_PHYS_REGIONS 16
+#define INVALID_VALUE (~0ull)
+
+struct ne_phys_regions_test {
+ u64 paddr;
+ u64 size;
+ int expect_rc;
+ unsigned long expect_num;
+ u64 expect_last_paddr;
+ u64 expect_last_size;
+} phys_regions_test_cases[] = {
+ /*
+ * Add the region from 0x1000 to (0x1000 + 0x200000 - 1):
+ * Expected result:
+ * Failed, start address is not 2M-aligned
+ *
+ * Now the instance of struct ne_phys_contig_mem_regions is:
+ * num = 0
+ * regions = {}
+ */
+ {0x1000, 0x200000, -EINVAL, 0, INVALID_VALUE, INVALID_VALUE},
+
+ /*
+ * Add the region from 0x200000 to (0x200000 + 0x1000 - 1):
+ * Expected result:
+ * Failed, size is not 2M-aligned
+ *
+ * Now the instance of struct ne_phys_contig_mem_regions is:
+ * num = 0
+ * regions = {}
+ */
+ {0x200000, 0x1000, -EINVAL, 0, INVALID_VALUE, INVALID_VALUE},
+
+ /*
+ * Add the region from 0x200000 to (0x200000 + 0x200000 - 1):
+ * Expected result:
+ * Successful
+ *
+ * Now the instance of struct ne_phys_contig_mem_regions is:
+ * num = 1
+ * regions = {
+ * {start=0x200000, end=0x3fffff}, // len=0x200000
+ * }
+ */
+ {0x200000, 0x200000, 0, 1, 0x200000, 0x200000},
+
+ /*
+ * Add the region from 0x0 to (0x0 + 0x200000 - 1):
+ * Expected result:
+ * Successful
+ *
+ * Now the instance of struct ne_phys_contig_mem_regions is:
+ * num = 2
+ * regions = {
+ * {start=0x200000, end=0x3fffff}, // len=0x200000
+ * {start=0x0, end=0x1fffff}, // len=0x200000
+ * }
+ */
+ {0x0, 0x200000, 0, 2, 0x0, 0x200000},
+
+ /*
+ * Add the region from 0x600000 to (0x600000 + 0x400000 - 1):
+ * Expected result:
+ * Successful
+ *
+ * Now the instance of struct ne_phys_contig_mem_regions is:
+ * num = 3
+ * regions = {
+ * {start=0x200000, end=0x3fffff}, // len=0x200000
+ * {start=0x0, end=0x1fffff}, // len=0x200000
+ * {start=0x600000, end=0x9fffff}, // len=0x400000
+ * }
+ */
+ {0x600000, 0x400000, 0, 3, 0x600000, 0x400000},
+
+ /*
+ * Add the region from 0xa00000 to (0xa00000 + 0x400000 - 1):
+ * Expected result:
+ * Successful, merging case!
+ *
+ * Now the instance of struct ne_phys_contig_mem_regions is:
+ * num = 3
+ * regions = {
+ * {start=0x200000, end=0x3fffff}, // len=0x200000
+ * {start=0x0, end=0x1fffff}, // len=0x200000
+ * {start=0x600000, end=0xdfffff}, // len=0x800000
+ * }
+ */
+ {0xa00000, 0x400000, 0, 3, 0x600000, 0x800000},
+
+ /*
+ * Add the region from 0x1000 to (0x1000 + 0x200000 - 1):
+ * Expected result:
+ * Failed, start address is not 2M-aligned
+ *
+ * Now the instance of struct ne_phys_contig_mem_regions is:
+ * num = 3
+ * regions = {
+ * {start=0x200000, end=0x3fffff}, // len=0x200000
+ * {start=0x0, end=0x1fffff}, // len=0x200000
+ * {start=0x600000, end=0xdfffff}, // len=0x800000
+ * }
+ */
+ {0x1000, 0x200000, -EINVAL, 3, 0x600000, 0x800000},
+};
+
+static void ne_misc_dev_test_merge_phys_contig_memory_regions(struct kunit *test)
+{
+ struct ne_phys_contig_mem_regions phys_contig_mem_regions = {};
+ int rc = 0;
+ int i = 0;
+
+ phys_contig_mem_regions.regions = kunit_kcalloc(test, MAX_PHYS_REGIONS,
+ sizeof(*phys_contig_mem_regions.regions),
+ GFP_KERNEL);
+ KUNIT_ASSERT_TRUE(test, phys_contig_mem_regions.regions);
+
+ for (i = 0; i < ARRAY_SIZE(phys_regions_test_cases); i++) {
+ struct ne_phys_regions_test *test_case = &phys_regions_test_cases[i];
+ unsigned long num = 0;
+
+ rc = ne_merge_phys_contig_memory_regions(&phys_contig_mem_regions,
+ test_case->paddr, test_case->size);
+ KUNIT_EXPECT_EQ(test, rc, test_case->expect_rc);
+ KUNIT_EXPECT_EQ(test, phys_contig_mem_regions.num, test_case->expect_num);
+
+ if (test_case->expect_last_paddr == INVALID_VALUE)
+ continue;
+
+ num = phys_contig_mem_regions.num;
+ KUNIT_EXPECT_EQ(test, phys_contig_mem_regions.regions[num - 1].start,
+ test_case->expect_last_paddr);
+ KUNIT_EXPECT_EQ(test, range_len(&phys_contig_mem_regions.regions[num - 1]),
+ test_case->expect_last_size);
+ }
+
+ kunit_kfree(test, phys_contig_mem_regions.regions);
+}
+
+static struct kunit_case ne_misc_dev_test_cases[] = {
+ KUNIT_CASE(ne_misc_dev_test_merge_phys_contig_memory_regions),
+ {}
+};
+
+static struct kunit_suite ne_misc_dev_test_suite = {
+ .name = "ne_misc_dev_test",
+ .test_cases = ne_misc_dev_test_cases,
+};
+
+static struct kunit_suite *ne_misc_dev_test_suites[] = {
+ &ne_misc_dev_test_suite,
+ NULL
+};
diff --git a/drivers/virt/nitro_enclaves/ne_pci_dev.c b/drivers/virt/nitro_enclaves/ne_pci_dev.c
index 40b49ec8e30b..6b81e8f3a5dc 100644
--- a/drivers/virt/nitro_enclaves/ne_pci_dev.c
+++ b/drivers/virt/nitro_enclaves/ne_pci_dev.c
@@ -376,7 +376,6 @@ static void ne_teardown_msix(struct pci_dev *pdev)
free_irq(pci_irq_vector(pdev, NE_VEC_EVENT), ne_pci_dev);
flush_work(&ne_pci_dev->notify_work);
- flush_workqueue(ne_pci_dev->event_wq);
destroy_workqueue(ne_pci_dev->event_wq);
free_irq(pci_irq_vector(pdev, NE_VEC_REPLY), ne_pci_dev);
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
index 236081afe9a2..00ac9db792a4 100644
--- a/drivers/virtio/virtio.c
+++ b/drivers/virtio/virtio.c
@@ -204,6 +204,12 @@ int virtio_finalize_features(struct virtio_device *dev)
}
EXPORT_SYMBOL_GPL(virtio_finalize_features);
+void virtio_reset_device(struct virtio_device *dev)
+{
+ dev->config->reset(dev);
+}
+EXPORT_SYMBOL_GPL(virtio_reset_device);
+
static int virtio_dev_probe(struct device *_d)
{
int err, i;
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index c22ff0117b46..f4c34a2a6b8e 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -1056,7 +1056,7 @@ static void remove_common(struct virtio_balloon *vb)
return_free_pages_to_mm(vb, ULONG_MAX);
/* Now we reset the device so we can clean up the queues. */
- vb->vdev->config->reset(vb->vdev);
+ virtio_reset_device(vb->vdev);
vb->vdev->config->del_vqs(vb->vdev);
}
diff --git a/drivers/virtio/virtio_input.c b/drivers/virtio/virtio_input.c
index ce51ae165943..3aa46703872d 100644
--- a/drivers/virtio/virtio_input.c
+++ b/drivers/virtio/virtio_input.c
@@ -347,7 +347,7 @@ static void virtinput_remove(struct virtio_device *vdev)
spin_unlock_irqrestore(&vi->lock, flags);
input_unregister_device(vi->idev);
- vdev->config->reset(vdev);
+ virtio_reset_device(vdev);
while ((buf = virtqueue_detach_unused_buf(vi->sts)) != NULL)
kfree(buf);
vdev->config->del_vqs(vdev);
diff --git a/drivers/virtio/virtio_mem.c b/drivers/virtio/virtio_mem.c
index 96e5a8782769..38becd8d578c 100644
--- a/drivers/virtio/virtio_mem.c
+++ b/drivers/virtio/virtio_mem.c
@@ -20,6 +20,7 @@
#include <linux/mutex.h>
#include <linux/bitmap.h>
#include <linux/lockdep.h>
+#include <linux/log2.h>
#include <acpi/acpi_numa.h>
@@ -592,7 +593,7 @@ static int virtio_mem_sbm_sb_states_prepare_next_mb(struct virtio_mem *vm)
return -ENOMEM;
mutex_lock(&vm->hotplug_mutex);
- if (new_bitmap)
+ if (vm->sbm.sb_states)
memcpy(new_bitmap, vm->sbm.sb_states, old_pages * PAGE_SIZE);
old_bitmap = vm->sbm.sb_states;
@@ -1120,15 +1121,18 @@ static void virtio_mem_clear_fake_offline(unsigned long pfn,
*/
static void virtio_mem_fake_online(unsigned long pfn, unsigned long nr_pages)
{
- const unsigned long max_nr_pages = MAX_ORDER_NR_PAGES;
+ unsigned long order = MAX_ORDER - 1;
unsigned long i;
/*
- * We are always called at least with MAX_ORDER_NR_PAGES
- * granularity/alignment (e.g., the way subblocks work). All pages
- * inside such a block are alike.
+ * We might get called for ranges that don't cover properly aligned
+ * MAX_ORDER - 1 pages; however, we can only online properly aligned
+ * pages with an order of MAX_ORDER - 1 at maximum.
*/
- for (i = 0; i < nr_pages; i += max_nr_pages) {
+ while (!IS_ALIGNED(pfn | nr_pages, 1 << order))
+ order--;
+
+ for (i = 0; i < nr_pages; i += 1 << order) {
struct page *page = pfn_to_page(pfn + i);
/*
@@ -1138,14 +1142,12 @@ static void virtio_mem_fake_online(unsigned long pfn, unsigned long nr_pages)
* alike.
*/
if (PageDirty(page)) {
- virtio_mem_clear_fake_offline(pfn + i, max_nr_pages,
- false);
- generic_online_page(page, MAX_ORDER - 1);
+ virtio_mem_clear_fake_offline(pfn + i, 1 << order, false);
+ generic_online_page(page, order);
} else {
- virtio_mem_clear_fake_offline(pfn + i, max_nr_pages,
- true);
- free_contig_range(pfn + i, max_nr_pages);
- adjust_managed_page_count(page, max_nr_pages);
+ virtio_mem_clear_fake_offline(pfn + i, 1 << order, true);
+ free_contig_range(pfn + i, 1 << order);
+ adjust_managed_page_count(page, 1 << order);
}
}
}
@@ -1228,28 +1230,46 @@ static void virtio_mem_fake_offline_cancel_offline(unsigned long pfn,
page_ref_inc(pfn_to_page(pfn + i));
}
-static void virtio_mem_online_page_cb(struct page *page, unsigned int order)
+static void virtio_mem_online_page(struct virtio_mem *vm,
+ struct page *page, unsigned int order)
{
- const unsigned long addr = page_to_phys(page);
- unsigned long id, sb_id;
- struct virtio_mem *vm;
+ const unsigned long start = page_to_phys(page);
+ const unsigned long end = start + PFN_PHYS(1 << order);
+ unsigned long addr, next, id, sb_id, count;
bool do_online;
- rcu_read_lock();
- list_for_each_entry_rcu(vm, &virtio_mem_devices, next) {
- if (!virtio_mem_contains_range(vm, addr, PFN_PHYS(1 << order)))
- continue;
+ /*
+ * We can get called with any order up to MAX_ORDER - 1. If our
+ * subblock size is smaller than that and we have a mixture of plugged
+ * and unplugged subblocks within such a page, we have to process in
+ * smaller granularity. In that case we'll adjust the order exactly once
+ * within the loop.
+ */
+ for (addr = start; addr < end; ) {
+ next = addr + PFN_PHYS(1 << order);
if (vm->in_sbm) {
- /*
- * We exploit here that subblocks have at least
- * MAX_ORDER_NR_PAGES size/alignment - so we cannot
- * cross subblocks within one call.
- */
id = virtio_mem_phys_to_mb_id(addr);
sb_id = virtio_mem_phys_to_sb_id(vm, addr);
- do_online = virtio_mem_sbm_test_sb_plugged(vm, id,
- sb_id, 1);
+ count = virtio_mem_phys_to_sb_id(vm, next - 1) - sb_id + 1;
+
+ if (virtio_mem_sbm_test_sb_plugged(vm, id, sb_id, count)) {
+ /* Fully plugged. */
+ do_online = true;
+ } else if (count == 1 ||
+ virtio_mem_sbm_test_sb_unplugged(vm, id, sb_id, count)) {
+ /* Fully unplugged. */
+ do_online = false;
+ } else {
+ /*
+ * Mixture, process sub-blocks instead. This
+ * will be at least the size of a pageblock.
+ * We'll run into this case exactly once.
+ */
+ order = ilog2(vm->sbm.sb_size) - PAGE_SHIFT;
+ do_online = virtio_mem_sbm_test_sb_plugged(vm, id, sb_id, 1);
+ continue;
+ }
} else {
/*
* If the whole block is marked fake offline, keep
@@ -1260,18 +1280,38 @@ static void virtio_mem_online_page_cb(struct page *page, unsigned int order)
VIRTIO_MEM_BBM_BB_FAKE_OFFLINE;
}
+ if (do_online)
+ generic_online_page(pfn_to_page(PFN_DOWN(addr)), order);
+ else
+ virtio_mem_set_fake_offline(PFN_DOWN(addr), 1 << order,
+ false);
+ addr = next;
+ }
+}
+
+static void virtio_mem_online_page_cb(struct page *page, unsigned int order)
+{
+ const unsigned long addr = page_to_phys(page);
+ struct virtio_mem *vm;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(vm, &virtio_mem_devices, next) {
+ /*
+ * Pages we're onlining will never cross memory blocks and,
+ * therefore, not virtio-mem devices.
+ */
+ if (!virtio_mem_contains_range(vm, addr, PFN_PHYS(1 << order)))
+ continue;
+
/*
- * virtio_mem_set_fake_offline() might sleep, we don't need
- * the device anymore. See virtio_mem_remove() how races
+ * virtio_mem_set_fake_offline() might sleep. We can safely
+ * drop the RCU lock at this point because the device
+ * cannot go away. See virtio_mem_remove() how races
* between memory onlining and device removal are handled.
*/
rcu_read_unlock();
- if (do_online)
- generic_online_page(page, order);
- else
- virtio_mem_set_fake_offline(PFN_DOWN(addr), 1 << order,
- false);
+ virtio_mem_online_page(vm, page, order);
return;
}
rcu_read_unlock();
@@ -2438,8 +2478,6 @@ static int virtio_mem_init_hotplug(struct virtio_mem *vm)
/*
* We want subblocks to span at least MAX_ORDER_NR_PAGES and
* pageblock_nr_pages pages. This:
- * - Simplifies our page onlining code (virtio_mem_online_page_cb)
- * and fake page onlining code (virtio_mem_fake_online).
* - Is required for now for alloc_contig_range() to work reliably -
* it doesn't properly handle smaller granularity on ZONE_NORMAL.
*/
@@ -2850,7 +2888,7 @@ static void virtio_mem_remove(struct virtio_device *vdev)
virtio_mem_deinit_hotplug(vm);
/* reset the device and cleanup the queues */
- vdev->config->reset(vdev);
+ virtio_reset_device(vdev);
vdev->config->del_vqs(vdev);
kfree(vm);
diff --git a/drivers/virtio/virtio_pci_legacy.c b/drivers/virtio/virtio_pci_legacy.c
index b3f8128b7983..34141b9abe27 100644
--- a/drivers/virtio/virtio_pci_legacy.c
+++ b/drivers/virtio/virtio_pci_legacy.c
@@ -138,7 +138,7 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
q_pfn = virtqueue_get_desc_addr(vq) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT;
if (q_pfn >> 32) {
dev_err(&vp_dev->pci_dev->dev,
- "platform bug: legacy virtio-mmio must not be used with RAM above 0x%llxGB\n",
+ "platform bug: legacy virtio-pci must not be used with RAM above 0x%llxGB\n",
0x1ULL << (32 + PAGE_SHIFT - 30));
err = -E2BIG;
goto out_del_vq;
diff --git a/drivers/virtio/virtio_pci_legacy_dev.c b/drivers/virtio/virtio_pci_legacy_dev.c
index 9b97680dd02b..677d1f68bc9b 100644
--- a/drivers/virtio/virtio_pci_legacy_dev.c
+++ b/drivers/virtio/virtio_pci_legacy_dev.c
@@ -45,8 +45,10 @@ int vp_legacy_probe(struct virtio_pci_legacy_device *ldev)
return rc;
ldev->ioaddr = pci_iomap(pci_dev, 0, 0);
- if (!ldev->ioaddr)
+ if (!ldev->ioaddr) {
+ rc = -EIO;
goto err_iomap;
+ }
ldev->isr = ldev->ioaddr + VIRTIO_PCI_ISR;
diff --git a/drivers/virtio/virtio_pci_modern_dev.c b/drivers/virtio/virtio_pci_modern_dev.c
index e11ed748e661..e8b3ff2b9fbc 100644
--- a/drivers/virtio/virtio_pci_modern_dev.c
+++ b/drivers/virtio/virtio_pci_modern_dev.c
@@ -345,7 +345,7 @@ err_map_common:
EXPORT_SYMBOL_GPL(vp_modern_probe);
/*
- * vp_modern_probe: remove and cleanup the modern virtio pci device
+ * vp_modern_remove: remove and cleanup the modern virtio pci device
* @mdev: the modern virtio-pci device
*/
void vp_modern_remove(struct virtio_pci_modern_device *mdev)
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 028b05d44546..962f1477b1fa 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -1197,8 +1197,10 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq,
if (virtqueue_use_indirect(_vq, total_sg)) {
err = virtqueue_add_indirect_packed(vq, sgs, total_sg, out_sgs,
in_sgs, data, gfp);
- if (err != -ENOMEM)
+ if (err != -ENOMEM) {
+ END_USE(vq);
return err;
+ }
/* fall back on direct */
}
diff --git a/drivers/virtio/virtio_vdpa.c b/drivers/virtio/virtio_vdpa.c
index f85f860bc10b..7767a7f0119b 100644
--- a/drivers/virtio/virtio_vdpa.c
+++ b/drivers/virtio/virtio_vdpa.c
@@ -91,9 +91,8 @@ static u8 virtio_vdpa_get_status(struct virtio_device *vdev)
static void virtio_vdpa_set_status(struct virtio_device *vdev, u8 status)
{
struct vdpa_device *vdpa = vd_get_vdpa(vdev);
- const struct vdpa_config_ops *ops = vdpa->config;
- return ops->set_status(vdpa, status);
+ return vdpa_set_status(vdpa, status);
}
static void virtio_vdpa_reset(struct virtio_device *vdev)
@@ -308,7 +307,7 @@ static u64 virtio_vdpa_get_features(struct virtio_device *vdev)
struct vdpa_device *vdpa = vd_get_vdpa(vdev);
const struct vdpa_config_ops *ops = vdpa->config;
- return ops->get_features(vdpa);
+ return ops->get_device_features(vdpa);
}
static int virtio_vdpa_finalize_features(struct virtio_device *vdev)
@@ -318,7 +317,7 @@ static int virtio_vdpa_finalize_features(struct virtio_device *vdev)
/* Give virtio_ring a chance to accept features. */
vring_transport_features(vdev);
- return vdpa_set_features(vdpa, vdev->features);
+ return vdpa_set_features(vdpa, vdev->features, false);
}
static const char *virtio_vdpa_bus_name(struct virtio_device *vdev)
diff --git a/drivers/w1/slaves/w1_ds28e04.c b/drivers/w1/slaves/w1_ds28e04.c
index e4f336111edc..6cef6e2edb89 100644
--- a/drivers/w1/slaves/w1_ds28e04.c
+++ b/drivers/w1/slaves/w1_ds28e04.c
@@ -32,7 +32,7 @@ static int w1_strong_pullup = 1;
module_param_named(strong_pullup, w1_strong_pullup, int, 0);
/* enable/disable CRC checking on DS28E04-100 memory accesses */
-static char w1_enable_crccheck = 1;
+static bool w1_enable_crccheck = true;
#define W1_EEPROM_SIZE 512
#define W1_PAGE_COUNT 16
@@ -339,32 +339,18 @@ static BIN_ATTR_RW(pio, 1);
static ssize_t crccheck_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- if (put_user(w1_enable_crccheck + 0x30, buf))
- return -EFAULT;
-
- return sizeof(w1_enable_crccheck);
+ return sysfs_emit(buf, "%d\n", w1_enable_crccheck);
}
static ssize_t crccheck_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- char val;
-
- if (count != 1 || !buf)
- return -EINVAL;
+ int err = kstrtobool(buf, &w1_enable_crccheck);
- if (get_user(val, buf))
- return -EFAULT;
+ if (err)
+ return err;
- /* convert to decimal */
- val = val - 0x30;
- if (val != 0 && val != 1)
- return -EINVAL;
-
- /* set the new value */
- w1_enable_crccheck = val;
-
- return sizeof(w1_enable_crccheck);
+ return count;
}
static DEVICE_ATTR_RW(crccheck);
diff --git a/drivers/w1/slaves/w1_therm.c b/drivers/w1/slaves/w1_therm.c
index ca70c5f03206..565578002d79 100644
--- a/drivers/w1/slaves/w1_therm.c
+++ b/drivers/w1/slaves/w1_therm.c
@@ -1785,7 +1785,7 @@ static ssize_t alarms_store(struct device *device,
u8 new_config_register[3]; /* array of data to be written */
int temp, ret;
char *token = NULL;
- s8 tl, th, tt; /* 1 byte per value + temp ring order */
+ s8 tl, th; /* 1 byte per value + temp ring order */
char *p_args, *orig;
p_args = orig = kmalloc(size, GFP_KERNEL);
@@ -1836,9 +1836,8 @@ static ssize_t alarms_store(struct device *device,
th = int_to_short(temp);
/* Reorder if required th and tl */
- if (tl > th) {
- tt = tl; tl = th; th = tt;
- }
+ if (tl > th)
+ swap(tl, th);
/*
* Read the scratchpad to change only the required bits
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 1dc86eb1361a..c8fa79da23b3 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -207,6 +207,7 @@ config DA9055_WATCHDOG
config DA9063_WATCHDOG
tristate "Dialog DA9063 Watchdog"
depends on MFD_DA9063 || COMPILE_TEST
+ depends on I2C
select WATCHDOG_CORE
help
Support for the watchdog in the DA9063 PMIC.
@@ -680,10 +681,10 @@ config MAX77620_WATCHDOG
depends on MFD_MAX77620 || COMPILE_TEST
select WATCHDOG_CORE
help
- This is the driver for the Max77620 watchdog timer.
- Say 'Y' here to enable the watchdog timer support for
- MAX77620 chips. To compile this driver as a module,
- choose M here: the module will be called max77620_wdt.
+ This is the driver for the Max77620 watchdog timer.
+ Say 'Y' here to enable the watchdog timer support for
+ MAX77620 chips. To compile this driver as a module,
+ choose M here: the module will be called max77620_wdt.
config IMX2_WDT
tristate "IMX2+ Watchdog"
@@ -822,6 +823,7 @@ config MESON_WATCHDOG
config MEDIATEK_WATCHDOG
tristate "Mediatek SoCs watchdog support"
depends on ARCH_MEDIATEK || COMPILE_TEST
+ default ARCH_MEDIATEK
select WATCHDOG_CORE
select RESET_CONTROLLER
help
@@ -881,6 +883,14 @@ config RENESAS_RZAWDT
This driver adds watchdog support for the integrated watchdogs in the
Renesas RZ/A SoCs. These watchdogs can be used to reset a system.
+config RENESAS_RZG2LWDT
+ tristate "Renesas RZ/G2L WDT Watchdog"
+ depends on ARCH_RENESAS || COMPILE_TEST
+ select WATCHDOG_CORE
+ help
+ This driver adds watchdog support for the integrated watchdogs in the
+ Renesas RZ/G2L SoCs. These watchdogs can be used to reset a system.
+
config ASPEED_WATCHDOG
tristate "Aspeed BMC watchdog support"
depends on ARCH_ASPEED || COMPILE_TEST
@@ -940,6 +950,19 @@ config RTD119X_WATCHDOG
Say Y here to include support for the watchdog timer in
Realtek RTD1295 SoCs.
+config REALTEK_OTTO_WDT
+ tristate "Realtek Otto MIPS watchdog support"
+ depends on MACH_REALTEK_RTL || COMPILE_TEST
+ depends on COMMON_CLK
+ select WATCHDOG_CORE
+ default MACH_REALTEK_RTL
+ help
+ Say Y here to include support for the watchdog timer on Realtek
+ RTL838x, RTL839x, RTL930x SoCs. This watchdog has pretimeout
+ notifications and system reset on timeout.
+
+ When built as a module this will be called realtek_otto_wdt.
+
config SPRD_WATCHDOG
tristate "Spreadtrum watchdog support"
depends on ARCH_SPRD || COMPILE_TEST
@@ -976,6 +999,18 @@ config MSC313E_WATCHDOG
To compile this driver as a module, choose M here: the
module will be called msc313e_wdt.
+config APPLE_WATCHDOG
+ tristate "Apple SoC watchdog"
+ depends on ARCH_APPLE || COMPILE_TEST
+ select WATCHDOG_CORE
+ help
+ Say Y here to include support for the Watchdog found in Apple
+ SoCs such as the M1. Next to the common watchdog features this
+ driver is also required in order to reboot these SoCs.
+
+ To compile this driver as a module, choose M here: the
+ module will be called apple_wdt.
+
# X86 (i386 + ia64 + x86_64) Architecture
config ACQUIRE_WDT
@@ -1440,26 +1475,26 @@ config TQMX86_WDT
depends on X86
select WATCHDOG_CORE
help
- This is the driver for the hardware watchdog timer in the TQMX86 IO
- controller found on some of their ComExpress Modules.
+ This is the driver for the hardware watchdog timer in the TQMX86 IO
+ controller found on some of their ComExpress Modules.
- To compile this driver as a module, choose M here; the module
- will be called tqmx86_wdt.
+ To compile this driver as a module, choose M here; the module
+ will be called tqmx86_wdt.
- Most people will say N.
+ Most people will say N.
config VIA_WDT
tristate "VIA Watchdog Timer"
depends on X86 && PCI
select WATCHDOG_CORE
help
- This is the driver for the hardware watchdog timer on VIA
- southbridge chipset CX700, VX800/VX820 or VX855/VX875.
+ This is the driver for the hardware watchdog timer on VIA
+ southbridge chipset CX700, VX800/VX820 or VX855/VX875.
- To compile this driver as a module, choose M here; the module
- will be called via_wdt.
+ To compile this driver as a module, choose M here; the module
+ will be called via_wdt.
- Most people will say N.
+ Most people will say N.
config W83627HF_WDT
tristate "Watchdog timer for W83627HF/W83627DHG and compatibles"
@@ -1707,16 +1742,6 @@ config OCTEON_WDT
from the first interrupt, it is then only poked when the
device is written.
-config BCM63XX_WDT
- tristate "Broadcom BCM63xx hardware watchdog"
- depends on BCM63XX
- help
- Watchdog driver for the built in watchdog hardware in Broadcom
- BCM63xx SoC.
-
- To compile this driver as a loadable module, choose M here.
- The module will be called bcm63xx_wdt.
-
config BCM2835_WDT
tristate "Broadcom BCM2835 hardware watchdog"
depends on ARCH_BCM2835 || (OF && COMPILE_TEST)
@@ -1751,15 +1776,16 @@ config BCM_KONA_WDT_DEBUG
If in doubt, say 'N'.
config BCM7038_WDT
- tristate "BCM7038 Watchdog"
+ tristate "BCM63xx/BCM7038 Watchdog"
select WATCHDOG_CORE
depends on HAS_IOMEM
- depends on ARCH_BRCMSTB || BMIPS_GENERIC || COMPILE_TEST
+ depends on ARCH_BRCMSTB || BMIPS_GENERIC || BCM63XX || COMPILE_TEST
help
- Watchdog driver for the built-in hardware in Broadcom 7038 and
- later SoCs used in set-top boxes. BCM7038 was made public
- during the 2004 CES, and since then, many Broadcom chips use this
- watchdog block, including some cable modem chips.
+ Watchdog driver for the built-in hardware in Broadcom 7038 and
+ later SoCs used in set-top boxes. BCM7038 was made public
+ during the 2004 CES, and since then, many Broadcom chips use this
+ watchdog block, including some cable modem chips and DSL (63xx)
+ chips.
config IMGPDC_WDT
tristate "Imagination Technologies PDC Watchdog Timer"
@@ -2120,12 +2146,12 @@ config KEEMBAY_WATCHDOG
depends on ARCH_KEEMBAY || (ARM64 && COMPILE_TEST)
select WATCHDOG_CORE
help
- This option enable support for an In-secure watchdog timer driver for
- Intel Keem Bay SoC. This WDT has a 32 bit timer and decrements in every
- count unit. An interrupt will be triggered, when the count crosses
- the threshold configured in the register.
+ This option enable support for an In-secure watchdog timer driver for
+ Intel Keem Bay SoC. This WDT has a 32 bit timer and decrements in every
+ count unit. An interrupt will be triggered, when the count crosses
+ the threshold configured in the register.
- To compile this driver as a module, choose M here: the
- module will be called keembay_wdt.
+ To compile this driver as a module, choose M here: the
+ module will be called keembay_wdt.
endif # WATCHDOG
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index 31b931846e32..f7da867e8782 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -84,6 +84,7 @@ obj-$(CONFIG_LPC18XX_WATCHDOG) += lpc18xx_wdt.o
obj-$(CONFIG_BCM7038_WDT) += bcm7038_wdt.o
obj-$(CONFIG_RENESAS_WDT) += renesas_wdt.o
obj-$(CONFIG_RENESAS_RZAWDT) += rza_wdt.o
+obj-$(CONFIG_RENESAS_RZG2LWDT) += rzg2l_wdt.o
obj-$(CONFIG_ASPEED_WATCHDOG) += aspeed_wdt.o
obj-$(CONFIG_STM32_WATCHDOG) += stm32_iwdg.o
obj-$(CONFIG_UNIPHIER_WATCHDOG) += uniphier_wdt.o
@@ -93,6 +94,7 @@ obj-$(CONFIG_PM8916_WATCHDOG) += pm8916_wdt.o
obj-$(CONFIG_ARM_SMC_WATCHDOG) += arm_smc_wdt.o
obj-$(CONFIG_VISCONTI_WATCHDOG) += visconti_wdt.o
obj-$(CONFIG_MSC313E_WATCHDOG) += msc313e_wdt.o
+obj-$(CONFIG_APPLE_WATCHDOG) += apple_wdt.o
# X86 (i386 + ia64 + x86_64) Architecture
obj-$(CONFIG_ACQUIRE_WDT) += acquirewdt.o
@@ -154,7 +156,6 @@ obj-$(CONFIG_XILINX_WATCHDOG) += of_xilinx_wdt.o
# MIPS Architecture
obj-$(CONFIG_ATH79_WDT) += ath79_wdt.o
obj-$(CONFIG_BCM47XX_WDT) += bcm47xx_wdt.o
-obj-$(CONFIG_BCM63XX_WDT) += bcm63xx_wdt.o
obj-$(CONFIG_RC32434_WDT) += rc32434_wdt.o
obj-$(CONFIG_INDYDOG) += indydog.o
obj-$(CONFIG_JZ4740_WDT) += jz4740_wdt.o
@@ -171,6 +172,7 @@ obj-$(CONFIG_IMGPDC_WDT) += imgpdc_wdt.o
obj-$(CONFIG_MT7621_WDT) += mt7621_wdt.o
obj-$(CONFIG_PIC32_WDT) += pic32-wdt.o
obj-$(CONFIG_PIC32_DMT) += pic32-dmt.o
+obj-$(CONFIG_REALTEK_OTTO_WDT) += realtek_otto_wdt.o
# PARISC Architecture
diff --git a/drivers/watchdog/apple_wdt.c b/drivers/watchdog/apple_wdt.c
new file mode 100644
index 000000000000..16aca21f13d6
--- /dev/null
+++ b/drivers/watchdog/apple_wdt.c
@@ -0,0 +1,226 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/*
+ * Apple SoC Watchdog driver
+ *
+ * Copyright (C) The Asahi Linux Contributors
+ */
+
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/limits.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/watchdog.h>
+
+/*
+ * Apple Watchdog MMIO registers
+ *
+ * This HW block has three separate watchdogs. WD0 resets the machine
+ * to recovery mode and is not very useful for us. WD1 and WD2 trigger a normal
+ * machine reset. WD0 additionally supports a configurable interrupt.
+ * This information can be used to implement pretimeout support at a later time.
+ *
+ * APPLE_WDT_WDx_CUR_TIME is a simple counter incremented for each tick of the
+ * reference clock. It can also be overwritten to any value.
+ * Whenever APPLE_WDT_CTRL_RESET_EN is set in APPLE_WDT_WDx_CTRL and
+ * APPLE_WDT_WDx_CUR_TIME >= APPLE_WDT_WDx_BITE_TIME the entire machine is
+ * reset.
+ * Whenever APPLE_WDT_CTRL_IRQ_EN is set and APPLE_WDTx_WD1_CUR_TIME >=
+ * APPLE_WDTx_WD1_BARK_TIME an interrupt is triggered and
+ * APPLE_WDT_CTRL_IRQ_STATUS is set. The interrupt can be cleared by writing
+ * 1 to APPLE_WDT_CTRL_IRQ_STATUS.
+ */
+#define APPLE_WDT_WD0_CUR_TIME 0x00
+#define APPLE_WDT_WD0_BITE_TIME 0x04
+#define APPLE_WDT_WD0_BARK_TIME 0x08
+#define APPLE_WDT_WD0_CTRL 0x0c
+
+#define APPLE_WDT_WD1_CUR_TIME 0x10
+#define APPLE_WDT_WD1_BITE_TIME 0x14
+#define APPLE_WDT_WD1_CTRL 0x1c
+
+#define APPLE_WDT_WD2_CUR_TIME 0x20
+#define APPLE_WDT_WD2_BITE_TIME 0x24
+#define APPLE_WDT_WD2_CTRL 0x2c
+
+#define APPLE_WDT_CTRL_IRQ_EN BIT(0)
+#define APPLE_WDT_CTRL_IRQ_STATUS BIT(1)
+#define APPLE_WDT_CTRL_RESET_EN BIT(2)
+
+#define APPLE_WDT_TIMEOUT_DEFAULT 30
+
+struct apple_wdt {
+ struct watchdog_device wdd;
+ void __iomem *regs;
+ unsigned long clk_rate;
+};
+
+static struct apple_wdt *to_apple_wdt(struct watchdog_device *wdd)
+{
+ return container_of(wdd, struct apple_wdt, wdd);
+}
+
+static int apple_wdt_start(struct watchdog_device *wdd)
+{
+ struct apple_wdt *wdt = to_apple_wdt(wdd);
+
+ writel_relaxed(0, wdt->regs + APPLE_WDT_WD1_CUR_TIME);
+ writel_relaxed(APPLE_WDT_CTRL_RESET_EN, wdt->regs + APPLE_WDT_WD1_CTRL);
+
+ return 0;
+}
+
+static int apple_wdt_stop(struct watchdog_device *wdd)
+{
+ struct apple_wdt *wdt = to_apple_wdt(wdd);
+
+ writel_relaxed(0, wdt->regs + APPLE_WDT_WD1_CTRL);
+
+ return 0;
+}
+
+static int apple_wdt_ping(struct watchdog_device *wdd)
+{
+ struct apple_wdt *wdt = to_apple_wdt(wdd);
+
+ writel_relaxed(0, wdt->regs + APPLE_WDT_WD1_CUR_TIME);
+
+ return 0;
+}
+
+static int apple_wdt_set_timeout(struct watchdog_device *wdd, unsigned int s)
+{
+ struct apple_wdt *wdt = to_apple_wdt(wdd);
+
+ writel_relaxed(0, wdt->regs + APPLE_WDT_WD1_CUR_TIME);
+ writel_relaxed(wdt->clk_rate * s, wdt->regs + APPLE_WDT_WD1_BITE_TIME);
+
+ wdd->timeout = s;
+
+ return 0;
+}
+
+static unsigned int apple_wdt_get_timeleft(struct watchdog_device *wdd)
+{
+ struct apple_wdt *wdt = to_apple_wdt(wdd);
+ u32 cur_time, reset_time;
+
+ cur_time = readl_relaxed(wdt->regs + APPLE_WDT_WD1_CUR_TIME);
+ reset_time = readl_relaxed(wdt->regs + APPLE_WDT_WD1_BITE_TIME);
+
+ return (reset_time - cur_time) / wdt->clk_rate;
+}
+
+static int apple_wdt_restart(struct watchdog_device *wdd, unsigned long mode,
+ void *cmd)
+{
+ struct apple_wdt *wdt = to_apple_wdt(wdd);
+
+ writel_relaxed(APPLE_WDT_CTRL_RESET_EN, wdt->regs + APPLE_WDT_WD1_CTRL);
+ writel_relaxed(0, wdt->regs + APPLE_WDT_WD1_BITE_TIME);
+ writel_relaxed(0, wdt->regs + APPLE_WDT_WD1_CUR_TIME);
+
+ /*
+ * Flush writes and then wait for the SoC to reset. Even though the
+ * reset is queued almost immediately experiments have shown that it
+ * can take up to ~20-25ms until the SoC is actually reset. Just wait
+ * 50ms here to be safe.
+ */
+ (void)readl_relaxed(wdt->regs + APPLE_WDT_WD1_CUR_TIME);
+ mdelay(50);
+
+ return 0;
+}
+
+static void apple_wdt_clk_disable_unprepare(void *data)
+{
+ clk_disable_unprepare(data);
+}
+
+static struct watchdog_ops apple_wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = apple_wdt_start,
+ .stop = apple_wdt_stop,
+ .ping = apple_wdt_ping,
+ .set_timeout = apple_wdt_set_timeout,
+ .get_timeleft = apple_wdt_get_timeleft,
+ .restart = apple_wdt_restart,
+};
+
+static struct watchdog_info apple_wdt_info = {
+ .identity = "Apple SoC Watchdog",
+ .options = WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT,
+};
+
+static int apple_wdt_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct apple_wdt *wdt;
+ struct clk *clk;
+ u32 wdt_ctrl;
+ int ret;
+
+ wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL);
+ if (!wdt)
+ return -ENOMEM;
+
+ wdt->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(wdt->regs))
+ return PTR_ERR(wdt->regs);
+
+ clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(dev, apple_wdt_clk_disable_unprepare,
+ clk);
+ if (ret)
+ return ret;
+
+ wdt->clk_rate = clk_get_rate(clk);
+ if (!wdt->clk_rate)
+ return -EINVAL;
+
+ wdt->wdd.ops = &apple_wdt_ops;
+ wdt->wdd.info = &apple_wdt_info;
+ wdt->wdd.max_timeout = U32_MAX / wdt->clk_rate;
+ wdt->wdd.timeout = APPLE_WDT_TIMEOUT_DEFAULT;
+
+ wdt_ctrl = readl_relaxed(wdt->regs + APPLE_WDT_WD1_CTRL);
+ if (wdt_ctrl & APPLE_WDT_CTRL_RESET_EN)
+ set_bit(WDOG_HW_RUNNING, &wdt->wdd.status);
+
+ watchdog_init_timeout(&wdt->wdd, 0, dev);
+ apple_wdt_set_timeout(&wdt->wdd, wdt->wdd.timeout);
+ watchdog_stop_on_unregister(&wdt->wdd);
+ watchdog_set_restart_priority(&wdt->wdd, 128);
+
+ return devm_watchdog_register_device(dev, &wdt->wdd);
+}
+
+static const struct of_device_id apple_wdt_of_match[] = {
+ { .compatible = "apple,wdt" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, apple_wdt_of_match);
+
+static struct platform_driver apple_wdt_driver = {
+ .driver = {
+ .name = "apple-watchdog",
+ .of_match_table = apple_wdt_of_match,
+ },
+ .probe = apple_wdt_probe,
+};
+module_platform_driver(apple_wdt_driver);
+
+MODULE_DESCRIPTION("Apple SoC watchdog driver");
+MODULE_AUTHOR("Sven Peter <sven@svenpeter.dev>");
+MODULE_LICENSE("Dual MIT/GPL");
diff --git a/drivers/watchdog/bcm63xx_wdt.c b/drivers/watchdog/bcm63xx_wdt.c
deleted file mode 100644
index 56cc262571a5..000000000000
--- a/drivers/watchdog/bcm63xx_wdt.c
+++ /dev/null
@@ -1,317 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * Broadcom BCM63xx SoC watchdog driver
- *
- * Copyright (C) 2007, Miguel Gaio <miguel.gaio@efixo.com>
- * Copyright (C) 2008, Florian Fainelli <florian@openwrt.org>
- *
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/bitops.h>
-#include <linux/errno.h>
-#include <linux/fs.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/miscdevice.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/types.h>
-#include <linux/uaccess.h>
-#include <linux/watchdog.h>
-#include <linux/timer.h>
-#include <linux/jiffies.h>
-#include <linux/interrupt.h>
-#include <linux/ptrace.h>
-#include <linux/resource.h>
-#include <linux/platform_device.h>
-
-#include <bcm63xx_cpu.h>
-#include <bcm63xx_io.h>
-#include <bcm63xx_regs.h>
-#include <bcm63xx_timer.h>
-
-#define PFX KBUILD_MODNAME
-
-#define WDT_HZ 50000000 /* Fclk */
-#define WDT_DEFAULT_TIME 30 /* seconds */
-#define WDT_MAX_TIME 256 /* seconds */
-
-static struct {
- void __iomem *regs;
- struct timer_list timer;
- unsigned long inuse;
- atomic_t ticks;
-} bcm63xx_wdt_device;
-
-static int expect_close;
-
-static int wdt_time = WDT_DEFAULT_TIME;
-static bool nowayout = WATCHDOG_NOWAYOUT;
-module_param(nowayout, bool, 0);
-MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
- __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
-
-/* HW functions */
-static void bcm63xx_wdt_hw_start(void)
-{
- bcm_writel(0xfffffffe, bcm63xx_wdt_device.regs + WDT_DEFVAL_REG);
- bcm_writel(WDT_START_1, bcm63xx_wdt_device.regs + WDT_CTL_REG);
- bcm_writel(WDT_START_2, bcm63xx_wdt_device.regs + WDT_CTL_REG);
-}
-
-static void bcm63xx_wdt_hw_stop(void)
-{
- bcm_writel(WDT_STOP_1, bcm63xx_wdt_device.regs + WDT_CTL_REG);
- bcm_writel(WDT_STOP_2, bcm63xx_wdt_device.regs + WDT_CTL_REG);
-}
-
-static void bcm63xx_wdt_isr(void *data)
-{
- struct pt_regs *regs = get_irq_regs();
-
- die(PFX " fire", regs);
-}
-
-static void bcm63xx_timer_tick(struct timer_list *unused)
-{
- if (!atomic_dec_and_test(&bcm63xx_wdt_device.ticks)) {
- bcm63xx_wdt_hw_start();
- mod_timer(&bcm63xx_wdt_device.timer, jiffies + HZ);
- } else
- pr_crit("watchdog will restart system\n");
-}
-
-static void bcm63xx_wdt_pet(void)
-{
- atomic_set(&bcm63xx_wdt_device.ticks, wdt_time);
-}
-
-static void bcm63xx_wdt_start(void)
-{
- bcm63xx_wdt_pet();
- bcm63xx_timer_tick(0);
-}
-
-static void bcm63xx_wdt_pause(void)
-{
- del_timer_sync(&bcm63xx_wdt_device.timer);
- bcm63xx_wdt_hw_stop();
-}
-
-static int bcm63xx_wdt_settimeout(int new_time)
-{
- if ((new_time <= 0) || (new_time > WDT_MAX_TIME))
- return -EINVAL;
-
- wdt_time = new_time;
-
- return 0;
-}
-
-static int bcm63xx_wdt_open(struct inode *inode, struct file *file)
-{
- if (test_and_set_bit(0, &bcm63xx_wdt_device.inuse))
- return -EBUSY;
-
- bcm63xx_wdt_start();
- return stream_open(inode, file);
-}
-
-static int bcm63xx_wdt_release(struct inode *inode, struct file *file)
-{
- if (expect_close == 42)
- bcm63xx_wdt_pause();
- else {
- pr_crit("Unexpected close, not stopping watchdog!\n");
- bcm63xx_wdt_start();
- }
- clear_bit(0, &bcm63xx_wdt_device.inuse);
- expect_close = 0;
- return 0;
-}
-
-static ssize_t bcm63xx_wdt_write(struct file *file, const char *data,
- size_t len, loff_t *ppos)
-{
- if (len) {
- if (!nowayout) {
- size_t i;
-
- /* In case it was set long ago */
- expect_close = 0;
-
- for (i = 0; i != len; i++) {
- char c;
- if (get_user(c, data + i))
- return -EFAULT;
- if (c == 'V')
- expect_close = 42;
- }
- }
- bcm63xx_wdt_pet();
- }
- return len;
-}
-
-static struct watchdog_info bcm63xx_wdt_info = {
- .identity = PFX,
- .options = WDIOF_SETTIMEOUT |
- WDIOF_KEEPALIVEPING |
- WDIOF_MAGICCLOSE,
-};
-
-
-static long bcm63xx_wdt_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- void __user *argp = (void __user *)arg;
- int __user *p = argp;
- int new_value, retval = -EINVAL;
-
- switch (cmd) {
- case WDIOC_GETSUPPORT:
- return copy_to_user(argp, &bcm63xx_wdt_info,
- sizeof(bcm63xx_wdt_info)) ? -EFAULT : 0;
-
- case WDIOC_GETSTATUS:
- case WDIOC_GETBOOTSTATUS:
- return put_user(0, p);
-
- case WDIOC_SETOPTIONS:
- if (get_user(new_value, p))
- return -EFAULT;
-
- if (new_value & WDIOS_DISABLECARD) {
- bcm63xx_wdt_pause();
- retval = 0;
- }
- if (new_value & WDIOS_ENABLECARD) {
- bcm63xx_wdt_start();
- retval = 0;
- }
-
- return retval;
-
- case WDIOC_KEEPALIVE:
- bcm63xx_wdt_pet();
- return 0;
-
- case WDIOC_SETTIMEOUT:
- if (get_user(new_value, p))
- return -EFAULT;
-
- if (bcm63xx_wdt_settimeout(new_value))
- return -EINVAL;
-
- bcm63xx_wdt_pet();
-
- fallthrough;
-
- case WDIOC_GETTIMEOUT:
- return put_user(wdt_time, p);
-
- default:
- return -ENOTTY;
-
- }
-}
-
-static const struct file_operations bcm63xx_wdt_fops = {
- .owner = THIS_MODULE,
- .llseek = no_llseek,
- .write = bcm63xx_wdt_write,
- .unlocked_ioctl = bcm63xx_wdt_ioctl,
- .compat_ioctl = compat_ptr_ioctl,
- .open = bcm63xx_wdt_open,
- .release = bcm63xx_wdt_release,
-};
-
-static struct miscdevice bcm63xx_wdt_miscdev = {
- .minor = WATCHDOG_MINOR,
- .name = "watchdog",
- .fops = &bcm63xx_wdt_fops,
-};
-
-
-static int bcm63xx_wdt_probe(struct platform_device *pdev)
-{
- int ret;
- struct resource *r;
-
- timer_setup(&bcm63xx_wdt_device.timer, bcm63xx_timer_tick, 0);
-
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!r) {
- dev_err(&pdev->dev, "failed to get resources\n");
- return -ENODEV;
- }
-
- bcm63xx_wdt_device.regs = devm_ioremap(&pdev->dev, r->start,
- resource_size(r));
- if (!bcm63xx_wdt_device.regs) {
- dev_err(&pdev->dev, "failed to remap I/O resources\n");
- return -ENXIO;
- }
-
- ret = bcm63xx_timer_register(TIMER_WDT_ID, bcm63xx_wdt_isr, NULL);
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to register wdt timer isr\n");
- return ret;
- }
-
- if (bcm63xx_wdt_settimeout(wdt_time)) {
- bcm63xx_wdt_settimeout(WDT_DEFAULT_TIME);
- dev_info(&pdev->dev,
- ": wdt_time value must be 1 <= wdt_time <= 256, using %d\n",
- wdt_time);
- }
-
- ret = misc_register(&bcm63xx_wdt_miscdev);
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to register watchdog device\n");
- goto unregister_timer;
- }
-
- dev_info(&pdev->dev, " started, timer margin: %d sec\n",
- WDT_DEFAULT_TIME);
-
- return 0;
-
-unregister_timer:
- bcm63xx_timer_unregister(TIMER_WDT_ID);
- return ret;
-}
-
-static int bcm63xx_wdt_remove(struct platform_device *pdev)
-{
- if (!nowayout)
- bcm63xx_wdt_pause();
-
- misc_deregister(&bcm63xx_wdt_miscdev);
- bcm63xx_timer_unregister(TIMER_WDT_ID);
- return 0;
-}
-
-static void bcm63xx_wdt_shutdown(struct platform_device *pdev)
-{
- bcm63xx_wdt_pause();
-}
-
-static struct platform_driver bcm63xx_wdt_driver = {
- .probe = bcm63xx_wdt_probe,
- .remove = bcm63xx_wdt_remove,
- .shutdown = bcm63xx_wdt_shutdown,
- .driver = {
- .name = "bcm63xx-wdt",
- }
-};
-
-module_platform_driver(bcm63xx_wdt_driver);
-
-MODULE_AUTHOR("Miguel Gaio <miguel.gaio@efixo.com>");
-MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>");
-MODULE_DESCRIPTION("Driver for the Broadcom BCM63xx SoC watchdog");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:bcm63xx-wdt");
diff --git a/drivers/watchdog/bcm7038_wdt.c b/drivers/watchdog/bcm7038_wdt.c
index acaaa0005d5b..8656a137e9a4 100644
--- a/drivers/watchdog/bcm7038_wdt.c
+++ b/drivers/watchdog/bcm7038_wdt.c
@@ -10,6 +10,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/platform_data/bcm7038_wdt.h>
#include <linux/pm.h>
#include <linux/watchdog.h>
@@ -133,8 +134,10 @@ static void bcm7038_clk_disable_unprepare(void *data)
static int bcm7038_wdt_probe(struct platform_device *pdev)
{
+ struct bcm7038_wdt_platform_data *pdata = pdev->dev.platform_data;
struct device *dev = &pdev->dev;
struct bcm7038_watchdog *wdt;
+ const char *clk_name = NULL;
int err;
wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL);
@@ -147,7 +150,10 @@ static int bcm7038_wdt_probe(struct platform_device *pdev)
if (IS_ERR(wdt->base))
return PTR_ERR(wdt->base);
- wdt->clk = devm_clk_get(dev, NULL);
+ if (pdata && pdata->clk_name)
+ clk_name = pdata->clk_name;
+
+ wdt->clk = devm_clk_get(dev, clk_name);
/* If unable to get clock, use default frequency */
if (!IS_ERR(wdt->clk)) {
err = clk_prepare_enable(wdt->clk);
@@ -217,8 +223,15 @@ static const struct of_device_id bcm7038_wdt_match[] = {
};
MODULE_DEVICE_TABLE(of, bcm7038_wdt_match);
+static const struct platform_device_id bcm7038_wdt_devtype[] = {
+ { .name = "bcm63xx-wdt" },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(platform, bcm7038_wdt_devtype);
+
static struct platform_driver bcm7038_wdt_driver = {
.probe = bcm7038_wdt_probe,
+ .id_table = bcm7038_wdt_devtype,
.driver = {
.name = "bcm7038-wdt",
.of_match_table = bcm7038_wdt_match,
diff --git a/drivers/watchdog/da9063_wdt.c b/drivers/watchdog/da9063_wdt.c
index d79ce64e26a9..9adad1862bbd 100644
--- a/drivers/watchdog/da9063_wdt.c
+++ b/drivers/watchdog/da9063_wdt.c
@@ -14,6 +14,7 @@
#include <linux/platform_device.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
+#include <linux/i2c.h>
#include <linux/delay.h>
#include <linux/mfd/da9063/registers.h>
#include <linux/mfd/da9063/core.h>
@@ -169,14 +170,19 @@ static int da9063_wdt_restart(struct watchdog_device *wdd, unsigned long action,
void *data)
{
struct da9063 *da9063 = watchdog_get_drvdata(wdd);
+ struct i2c_client *client = to_i2c_client(da9063->dev);
int ret;
- ret = regmap_write(da9063->regmap, DA9063_REG_CONTROL_F,
- DA9063_SHUTDOWN);
- if (ret)
+ /* Don't use regmap because it is not atomic safe */
+ ret = i2c_smbus_write_byte_data(client, DA9063_REG_CONTROL_F,
+ DA9063_SHUTDOWN);
+ if (ret < 0)
dev_alert(da9063->dev, "Failed to shutdown (err = %d)\n",
ret);
+ /* wait for reset to assert... */
+ mdelay(500);
+
return ret;
}
diff --git a/drivers/watchdog/davinci_wdt.c b/drivers/watchdog/davinci_wdt.c
index e6eaba6bae5b..584a56893b81 100644
--- a/drivers/watchdog/davinci_wdt.c
+++ b/drivers/watchdog/davinci_wdt.c
@@ -134,7 +134,7 @@ static unsigned int davinci_wdt_get_timeleft(struct watchdog_device *wdd)
timer_counter = ioread32(davinci_wdt->base + TIM12);
timer_counter |= ((u64)ioread32(davinci_wdt->base + TIM34) << 32);
- do_div(timer_counter, freq);
+ timer_counter = div64_ul(timer_counter, freq);
return wdd->timeout - timer_counter;
}
diff --git a/drivers/watchdog/f71808e_wdt.c b/drivers/watchdog/f71808e_wdt.c
index ee90c5f943f9..7f59c680de25 100644
--- a/drivers/watchdog/f71808e_wdt.c
+++ b/drivers/watchdog/f71808e_wdt.c
@@ -49,6 +49,7 @@
#define SIO_F81803_ID 0x1210 /* Chipset ID */
#define SIO_F81865_ID 0x0704 /* Chipset ID */
#define SIO_F81866_ID 0x1010 /* Chipset ID */
+#define SIO_F81966_ID 0x1502 /* F81804 chipset ID, same for f81966 */
#define F71808FG_REG_WDO_CONF 0xf0
#define F71808FG_REG_WDT_CONF 0xf5
@@ -105,7 +106,7 @@ MODULE_PARM_DESC(start_withtimeout, "Start watchdog timer on module load with"
" given initial timeout. Zero (default) disables this feature.");
enum chips { f71808fg, f71858fg, f71862fg, f71868, f71869, f71882fg, f71889fg,
- f81803, f81865, f81866};
+ f81803, f81865, f81866, f81966};
static const char * const fintek_wdt_names[] = {
"f71808fg",
@@ -118,6 +119,7 @@ static const char * const fintek_wdt_names[] = {
"f81803",
"f81865",
"f81866",
+ "f81966"
};
/* Super-I/O Function prototypes */
@@ -347,6 +349,7 @@ static int fintek_wdt_start(struct watchdog_device *wdd)
break;
case f81866:
+ case f81966:
/*
* GPIO1 Control Register when 27h BIT3:2 = 01 & BIT0 = 0.
* The PIN 70(GPIO15/WDTRST) is controlled by 2Ch:
@@ -373,7 +376,7 @@ static int fintek_wdt_start(struct watchdog_device *wdd)
superio_select(wd->sioaddr, SIO_F71808FG_LD_WDT);
superio_set_bit(wd->sioaddr, SIO_REG_ENABLE, 0);
- if (wd->type == f81865 || wd->type == f81866)
+ if (wd->type == f81865 || wd->type == f81866 || wd->type == f81966)
superio_set_bit(wd->sioaddr, F81865_REG_WDO_CONF,
F81865_FLAG_WDOUT_EN);
else
@@ -580,6 +583,9 @@ static int __init fintek_wdt_find(int sioaddr)
case SIO_F81866_ID:
type = f81866;
break;
+ case SIO_F81966_ID:
+ type = f81966;
+ break;
default:
pr_info("Unrecognized Fintek device: %04x\n",
(unsigned int)devid);
diff --git a/drivers/watchdog/meson_gxbb_wdt.c b/drivers/watchdog/meson_gxbb_wdt.c
index 945f5e65db57..d3c9e2f6e63b 100644
--- a/drivers/watchdog/meson_gxbb_wdt.c
+++ b/drivers/watchdog/meson_gxbb_wdt.c
@@ -198,7 +198,6 @@ static int meson_gxbb_wdt_probe(struct platform_device *pdev)
meson_gxbb_wdt_set_timeout(&data->wdt_dev, data->wdt_dev.timeout);
- watchdog_stop_on_reboot(&data->wdt_dev);
return devm_watchdog_register_device(dev, &data->wdt_dev);
}
diff --git a/drivers/watchdog/msc313e_wdt.c b/drivers/watchdog/msc313e_wdt.c
index 0d497aa0fb7d..90171431fc59 100644
--- a/drivers/watchdog/msc313e_wdt.c
+++ b/drivers/watchdog/msc313e_wdt.c
@@ -120,6 +120,10 @@ static int msc313e_wdt_probe(struct platform_device *pdev)
priv->wdev.max_timeout = U32_MAX / clk_get_rate(priv->clk);
priv->wdev.timeout = MSC313E_WDT_DEFAULT_TIMEOUT;
+ /* If the period is non-zero the WDT is running */
+ if (readw(priv->base + REG_WDT_MAX_PRD_L) | (readw(priv->base + REG_WDT_MAX_PRD_H) << 16))
+ set_bit(WDOG_HW_RUNNING, &priv->wdev.status);
+
watchdog_set_drvdata(&priv->wdev, priv);
watchdog_init_timeout(&priv->wdev, timeout, dev);
diff --git a/drivers/watchdog/mtk_wdt.c b/drivers/watchdog/mtk_wdt.c
index 543cf38bd04e..4577a76dd464 100644
--- a/drivers/watchdog/mtk_wdt.c
+++ b/drivers/watchdog/mtk_wdt.c
@@ -339,7 +339,7 @@ static int mtk_wdt_probe(struct platform_device *pdev)
if (IS_ERR(mtk_wdt->wdt_base))
return PTR_ERR(mtk_wdt->wdt_base);
- irq = platform_get_irq(pdev, 0);
+ irq = platform_get_irq_optional(pdev, 0);
if (irq > 0) {
err = devm_request_irq(&pdev->dev, irq, mtk_wdt_isr, 0, "wdt_bark",
&mtk_wdt->wdt_dev);
diff --git a/drivers/watchdog/realtek_otto_wdt.c b/drivers/watchdog/realtek_otto_wdt.c
new file mode 100644
index 000000000000..60058a0c3ec4
--- /dev/null
+++ b/drivers/watchdog/realtek_otto_wdt.c
@@ -0,0 +1,384 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+/*
+ * Realtek Otto MIPS platform watchdog
+ *
+ * Watchdog timer that will reset the system after timeout, using the selected
+ * reset mode.
+ *
+ * Counter scaling and timeouts:
+ * - Base prescale of (2 << 25), providing tick duration T_0: 168ms @ 200MHz
+ * - PRESCALE: logarithmic prescaler adding a factor of {1, 2, 4, 8}
+ * - Phase 1: Times out after (PHASE1 + 1) × PRESCALE × T_0
+ * Generates an interrupt, WDT cannot be stopped after phase 1
+ * - Phase 2: starts after phase 1, times out after (PHASE2 + 1) × PRESCALE × T_0
+ * Resets the system according to RST_MODE
+ */
+
+#include <linux/bits.h>
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/math.h>
+#include <linux/minmax.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/reboot.h>
+#include <linux/watchdog.h>
+
+#define OTTO_WDT_REG_CNTR 0x0
+#define OTTO_WDT_CNTR_PING BIT(31)
+
+#define OTTO_WDT_REG_INTR 0x4
+#define OTTO_WDT_INTR_PHASE_1 BIT(31)
+#define OTTO_WDT_INTR_PHASE_2 BIT(30)
+
+#define OTTO_WDT_REG_CTRL 0x8
+#define OTTO_WDT_CTRL_ENABLE BIT(31)
+#define OTTO_WDT_CTRL_PRESCALE GENMASK(30, 29)
+#define OTTO_WDT_CTRL_PHASE1 GENMASK(26, 22)
+#define OTTO_WDT_CTRL_PHASE2 GENMASK(19, 15)
+#define OTTO_WDT_CTRL_RST_MODE GENMASK(1, 0)
+#define OTTO_WDT_MODE_SOC 0
+#define OTTO_WDT_MODE_CPU 1
+#define OTTO_WDT_MODE_SOFTWARE 2
+#define OTTO_WDT_CTRL_DEFAULT OTTO_WDT_MODE_CPU
+
+#define OTTO_WDT_PRESCALE_MAX 3
+
+/*
+ * One higher than the max values contained in PHASE{1,2}, since a value of 0
+ * corresponds to one tick.
+ */
+#define OTTO_WDT_PHASE_TICKS_MAX 32
+
+/*
+ * The maximum reset delay is actually 2×32 ticks, but that would require large
+ * pretimeout values for timeouts longer than 32 ticks. Limit the maximum timeout
+ * to 32 + 1 to ensure small pretimeout values can be configured as expected.
+ */
+#define OTTO_WDT_TIMEOUT_TICKS_MAX (OTTO_WDT_PHASE_TICKS_MAX + 1)
+
+struct otto_wdt_ctrl {
+ struct watchdog_device wdev;
+ struct device *dev;
+ void __iomem *base;
+ unsigned int clk_rate_khz;
+ int irq_phase1;
+};
+
+static int otto_wdt_start(struct watchdog_device *wdev)
+{
+ struct otto_wdt_ctrl *ctrl = watchdog_get_drvdata(wdev);
+ u32 v;
+
+ v = ioread32(ctrl->base + OTTO_WDT_REG_CTRL);
+ v |= OTTO_WDT_CTRL_ENABLE;
+ iowrite32(v, ctrl->base + OTTO_WDT_REG_CTRL);
+
+ return 0;
+}
+
+static int otto_wdt_stop(struct watchdog_device *wdev)
+{
+ struct otto_wdt_ctrl *ctrl = watchdog_get_drvdata(wdev);
+ u32 v;
+
+ v = ioread32(ctrl->base + OTTO_WDT_REG_CTRL);
+ v &= ~OTTO_WDT_CTRL_ENABLE;
+ iowrite32(v, ctrl->base + OTTO_WDT_REG_CTRL);
+
+ return 0;
+}
+
+static int otto_wdt_ping(struct watchdog_device *wdev)
+{
+ struct otto_wdt_ctrl *ctrl = watchdog_get_drvdata(wdev);
+
+ iowrite32(OTTO_WDT_CNTR_PING, ctrl->base + OTTO_WDT_REG_CNTR);
+
+ return 0;
+}
+
+static int otto_wdt_tick_ms(struct otto_wdt_ctrl *ctrl, int prescale)
+{
+ return DIV_ROUND_CLOSEST(1 << (25 + prescale), ctrl->clk_rate_khz);
+}
+
+/*
+ * The timer asserts the PHASE1/PHASE2 IRQs when the number of ticks exceeds
+ * the value stored in those fields. This means each phase will run for at least
+ * one tick, so small values need to be clamped to correctly reflect the timeout.
+ */
+static inline unsigned int div_round_ticks(unsigned int val, unsigned int tick_duration,
+ unsigned int min_ticks)
+{
+ return max(min_ticks, DIV_ROUND_UP(val, tick_duration));
+}
+
+static int otto_wdt_determine_timeouts(struct watchdog_device *wdev, unsigned int timeout,
+ unsigned int pretimeout)
+{
+ struct otto_wdt_ctrl *ctrl = watchdog_get_drvdata(wdev);
+ unsigned int pretimeout_ms = pretimeout * 1000;
+ unsigned int timeout_ms = timeout * 1000;
+ unsigned int prescale_next = 0;
+ unsigned int phase1_ticks;
+ unsigned int phase2_ticks;
+ unsigned int total_ticks;
+ unsigned int prescale;
+ unsigned int tick_ms;
+ u32 v;
+
+ do {
+ prescale = prescale_next;
+ if (prescale > OTTO_WDT_PRESCALE_MAX)
+ return -EINVAL;
+
+ tick_ms = otto_wdt_tick_ms(ctrl, prescale);
+ total_ticks = div_round_ticks(timeout_ms, tick_ms, 2);
+ phase1_ticks = div_round_ticks(timeout_ms - pretimeout_ms, tick_ms, 1);
+ phase2_ticks = total_ticks - phase1_ticks;
+
+ prescale_next++;
+ } while (phase1_ticks > OTTO_WDT_PHASE_TICKS_MAX
+ || phase2_ticks > OTTO_WDT_PHASE_TICKS_MAX);
+
+ v = ioread32(ctrl->base + OTTO_WDT_REG_CTRL);
+
+ v &= ~(OTTO_WDT_CTRL_PRESCALE | OTTO_WDT_CTRL_PHASE1 | OTTO_WDT_CTRL_PHASE2);
+ v |= FIELD_PREP(OTTO_WDT_CTRL_PHASE1, phase1_ticks - 1);
+ v |= FIELD_PREP(OTTO_WDT_CTRL_PHASE2, phase2_ticks - 1);
+ v |= FIELD_PREP(OTTO_WDT_CTRL_PRESCALE, prescale);
+
+ iowrite32(v, ctrl->base + OTTO_WDT_REG_CTRL);
+
+ timeout_ms = total_ticks * tick_ms;
+ ctrl->wdev.timeout = timeout_ms / 1000;
+
+ pretimeout_ms = phase2_ticks * tick_ms;
+ ctrl->wdev.pretimeout = pretimeout_ms / 1000;
+
+ return 0;
+}
+
+static int otto_wdt_set_timeout(struct watchdog_device *wdev, unsigned int val)
+{
+ return otto_wdt_determine_timeouts(wdev, val, min(wdev->pretimeout, val - 1));
+}
+
+static int otto_wdt_set_pretimeout(struct watchdog_device *wdev, unsigned int val)
+{
+ return otto_wdt_determine_timeouts(wdev, wdev->timeout, val);
+}
+
+static int otto_wdt_restart(struct watchdog_device *wdev, unsigned long reboot_mode,
+ void *data)
+{
+ struct otto_wdt_ctrl *ctrl = watchdog_get_drvdata(wdev);
+ u32 reset_mode;
+ u32 v;
+
+ disable_irq(ctrl->irq_phase1);
+
+ switch (reboot_mode) {
+ case REBOOT_SOFT:
+ reset_mode = OTTO_WDT_MODE_SOFTWARE;
+ break;
+ case REBOOT_WARM:
+ reset_mode = OTTO_WDT_MODE_CPU;
+ break;
+ default:
+ reset_mode = OTTO_WDT_MODE_SOC;
+ break;
+ }
+
+ /* Configure for shortest timeout and wait for reset to occur */
+ v = FIELD_PREP(OTTO_WDT_CTRL_RST_MODE, reset_mode) | OTTO_WDT_CTRL_ENABLE;
+ iowrite32(v, ctrl->base + OTTO_WDT_REG_CTRL);
+
+ mdelay(3 * otto_wdt_tick_ms(ctrl, 0));
+
+ return 0;
+}
+
+static irqreturn_t otto_wdt_phase1_isr(int irq, void *dev_id)
+{
+ struct otto_wdt_ctrl *ctrl = dev_id;
+
+ iowrite32(OTTO_WDT_INTR_PHASE_1, ctrl->base + OTTO_WDT_REG_INTR);
+ dev_crit(ctrl->dev, "phase 1 timeout\n");
+ watchdog_notify_pretimeout(&ctrl->wdev);
+
+ return IRQ_HANDLED;
+}
+
+static const struct watchdog_ops otto_wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = otto_wdt_start,
+ .stop = otto_wdt_stop,
+ .ping = otto_wdt_ping,
+ .set_timeout = otto_wdt_set_timeout,
+ .set_pretimeout = otto_wdt_set_pretimeout,
+ .restart = otto_wdt_restart,
+};
+
+static const struct watchdog_info otto_wdt_info = {
+ .identity = "Realtek Otto watchdog timer",
+ .options = WDIOF_KEEPALIVEPING |
+ WDIOF_MAGICCLOSE |
+ WDIOF_SETTIMEOUT |
+ WDIOF_PRETIMEOUT,
+};
+
+static void otto_wdt_clock_action(void *data)
+{
+ clk_disable_unprepare(data);
+}
+
+static int otto_wdt_probe_clk(struct otto_wdt_ctrl *ctrl)
+{
+ struct clk *clk = devm_clk_get(ctrl->dev, NULL);
+ int ret;
+
+ if (IS_ERR(clk))
+ return dev_err_probe(ctrl->dev, PTR_ERR(clk), "Failed to get clock\n");
+
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ return dev_err_probe(ctrl->dev, ret, "Failed to enable clock\n");
+
+ ret = devm_add_action_or_reset(ctrl->dev, otto_wdt_clock_action, clk);
+ if (ret)
+ return ret;
+
+ ctrl->clk_rate_khz = clk_get_rate(clk) / 1000;
+ if (ctrl->clk_rate_khz == 0)
+ return dev_err_probe(ctrl->dev, -ENXIO, "Failed to get clock rate\n");
+
+ return 0;
+}
+
+static int otto_wdt_probe_reset_mode(struct otto_wdt_ctrl *ctrl)
+{
+ static const char *mode_property = "realtek,reset-mode";
+ const struct fwnode_handle *node = ctrl->dev->fwnode;
+ int mode_count;
+ u32 mode;
+ u32 v;
+
+ if (!node)
+ return -ENXIO;
+
+ mode_count = fwnode_property_string_array_count(node, mode_property);
+ if (mode_count < 0)
+ return mode_count;
+ else if (mode_count == 0)
+ return 0;
+ else if (mode_count != 1)
+ return -EINVAL;
+
+ if (fwnode_property_match_string(node, mode_property, "soc") == 0)
+ mode = OTTO_WDT_MODE_SOC;
+ else if (fwnode_property_match_string(node, mode_property, "cpu") == 0)
+ mode = OTTO_WDT_MODE_CPU;
+ else if (fwnode_property_match_string(node, mode_property, "software") == 0)
+ mode = OTTO_WDT_MODE_SOFTWARE;
+ else
+ return -EINVAL;
+
+ v = ioread32(ctrl->base + OTTO_WDT_REG_CTRL);
+ v &= ~OTTO_WDT_CTRL_RST_MODE;
+ v |= FIELD_PREP(OTTO_WDT_CTRL_RST_MODE, mode);
+ iowrite32(v, ctrl->base + OTTO_WDT_REG_CTRL);
+
+ return 0;
+}
+
+static int otto_wdt_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct otto_wdt_ctrl *ctrl;
+ unsigned int max_tick_ms;
+ int ret;
+
+ ctrl = devm_kzalloc(dev, sizeof(*ctrl), GFP_KERNEL);
+ if (!ctrl)
+ return -ENOMEM;
+
+ ctrl->dev = dev;
+ ctrl->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(ctrl->base))
+ return PTR_ERR(ctrl->base);
+
+ /* Clear any old interrupts and reset initial state */
+ iowrite32(OTTO_WDT_INTR_PHASE_1 | OTTO_WDT_INTR_PHASE_2,
+ ctrl->base + OTTO_WDT_REG_INTR);
+ iowrite32(OTTO_WDT_CTRL_DEFAULT, ctrl->base + OTTO_WDT_REG_CTRL);
+
+ ret = otto_wdt_probe_clk(ctrl);
+ if (ret)
+ return ret;
+
+ ctrl->irq_phase1 = platform_get_irq_byname(pdev, "phase1");
+ if (ctrl->irq_phase1 < 0)
+ return ctrl->irq_phase1;
+
+ ret = devm_request_irq(dev, ctrl->irq_phase1, otto_wdt_phase1_isr, 0,
+ "realtek-otto-wdt", ctrl);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get IRQ for phase1\n");
+
+ ret = otto_wdt_probe_reset_mode(ctrl);
+ if (ret)
+ return dev_err_probe(dev, ret, "Invalid reset mode specified\n");
+
+ ctrl->wdev.parent = dev;
+ ctrl->wdev.info = &otto_wdt_info;
+ ctrl->wdev.ops = &otto_wdt_ops;
+
+ /*
+ * Since pretimeout cannot be disabled, min. timeout is twice the
+ * subsystem resolution. Max. timeout is ca. 43s at a bus clock of 200MHz.
+ */
+ ctrl->wdev.min_timeout = 2;
+ max_tick_ms = otto_wdt_tick_ms(ctrl, OTTO_WDT_PRESCALE_MAX);
+ ctrl->wdev.max_hw_heartbeat_ms = max_tick_ms * OTTO_WDT_TIMEOUT_TICKS_MAX;
+ ctrl->wdev.timeout = min(30U, ctrl->wdev.max_hw_heartbeat_ms / 1000);
+
+ watchdog_set_drvdata(&ctrl->wdev, ctrl);
+ watchdog_init_timeout(&ctrl->wdev, 0, dev);
+ watchdog_stop_on_reboot(&ctrl->wdev);
+ watchdog_set_restart_priority(&ctrl->wdev, 128);
+
+ ret = otto_wdt_determine_timeouts(&ctrl->wdev, ctrl->wdev.timeout, 1);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to set timeout\n");
+
+ return devm_watchdog_register_device(dev, &ctrl->wdev);
+}
+
+static const struct of_device_id otto_wdt_ids[] = {
+ { .compatible = "realtek,rtl8380-wdt" },
+ { .compatible = "realtek,rtl8390-wdt" },
+ { .compatible = "realtek,rtl9300-wdt" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, otto_wdt_ids);
+
+static struct platform_driver otto_wdt_driver = {
+ .probe = otto_wdt_probe,
+ .driver = {
+ .name = "realtek-otto-watchdog",
+ .of_match_table = otto_wdt_ids,
+ },
+};
+module_platform_driver(otto_wdt_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Sander Vanheule <sander@svanheule.net>");
+MODULE_DESCRIPTION("Realtek Otto watchdog timer driver");
diff --git a/drivers/watchdog/rzg2l_wdt.c b/drivers/watchdog/rzg2l_wdt.c
new file mode 100644
index 000000000000..6b426df34fd6
--- /dev/null
+++ b/drivers/watchdog/rzg2l_wdt.c
@@ -0,0 +1,263 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Renesas RZ/G2L WDT Watchdog Driver
+ *
+ * Copyright (C) 2021 Renesas Electronics Corporation
+ */
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+#include <linux/units.h>
+#include <linux/watchdog.h>
+
+#define WDTCNT 0x00
+#define WDTSET 0x04
+#define WDTTIM 0x08
+#define WDTINT 0x0C
+#define WDTCNT_WDTEN BIT(0)
+#define WDTINT_INTDISP BIT(0)
+
+#define WDT_DEFAULT_TIMEOUT 60U
+
+/* Setting period time register only 12 bit set in WDTSET[31:20] */
+#define WDTSET_COUNTER_MASK (0xFFF00000)
+#define WDTSET_COUNTER_VAL(f) ((f) << 20)
+
+#define F2CYCLE_NSEC(f) (1000000000 / (f))
+
+static bool nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, bool, 0);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
+ __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
+struct rzg2l_wdt_priv {
+ void __iomem *base;
+ struct watchdog_device wdev;
+ struct reset_control *rstc;
+ unsigned long osc_clk_rate;
+ unsigned long delay;
+};
+
+static void rzg2l_wdt_wait_delay(struct rzg2l_wdt_priv *priv)
+{
+ /* delay timer when change the setting register */
+ ndelay(priv->delay);
+}
+
+static u32 rzg2l_wdt_get_cycle_usec(unsigned long cycle, u32 wdttime)
+{
+ u64 timer_cycle_us = 1024 * 1024 * (wdttime + 1) * MICRO;
+
+ return div64_ul(timer_cycle_us, cycle);
+}
+
+static void rzg2l_wdt_write(struct rzg2l_wdt_priv *priv, u32 val, unsigned int reg)
+{
+ if (reg == WDTSET)
+ val &= WDTSET_COUNTER_MASK;
+
+ writel_relaxed(val, priv->base + reg);
+ /* Registers other than the WDTINT is always synchronized with WDT_CLK */
+ if (reg != WDTINT)
+ rzg2l_wdt_wait_delay(priv);
+}
+
+static void rzg2l_wdt_init_timeout(struct watchdog_device *wdev)
+{
+ struct rzg2l_wdt_priv *priv = watchdog_get_drvdata(wdev);
+ u32 time_out;
+
+ /* Clear Lapsed Time Register and clear Interrupt */
+ rzg2l_wdt_write(priv, WDTINT_INTDISP, WDTINT);
+ /* 2 consecutive overflow cycle needed to trigger reset */
+ time_out = (wdev->timeout * (MICRO / 2)) /
+ rzg2l_wdt_get_cycle_usec(priv->osc_clk_rate, 0);
+ rzg2l_wdt_write(priv, WDTSET_COUNTER_VAL(time_out), WDTSET);
+}
+
+static int rzg2l_wdt_start(struct watchdog_device *wdev)
+{
+ struct rzg2l_wdt_priv *priv = watchdog_get_drvdata(wdev);
+
+ reset_control_deassert(priv->rstc);
+ pm_runtime_get_sync(wdev->parent);
+
+ /* Initialize time out */
+ rzg2l_wdt_init_timeout(wdev);
+
+ /* Initialize watchdog counter register */
+ rzg2l_wdt_write(priv, 0, WDTTIM);
+
+ /* Enable watchdog timer*/
+ rzg2l_wdt_write(priv, WDTCNT_WDTEN, WDTCNT);
+
+ return 0;
+}
+
+static int rzg2l_wdt_stop(struct watchdog_device *wdev)
+{
+ struct rzg2l_wdt_priv *priv = watchdog_get_drvdata(wdev);
+
+ pm_runtime_put(wdev->parent);
+ reset_control_assert(priv->rstc);
+
+ return 0;
+}
+
+static int rzg2l_wdt_restart(struct watchdog_device *wdev,
+ unsigned long action, void *data)
+{
+ struct rzg2l_wdt_priv *priv = watchdog_get_drvdata(wdev);
+
+ /* Reset the module before we modify any register */
+ reset_control_reset(priv->rstc);
+ pm_runtime_get_sync(wdev->parent);
+
+ /* smallest counter value to reboot soon */
+ rzg2l_wdt_write(priv, WDTSET_COUNTER_VAL(1), WDTSET);
+
+ /* Enable watchdog timer*/
+ rzg2l_wdt_write(priv, WDTCNT_WDTEN, WDTCNT);
+
+ return 0;
+}
+
+static const struct watchdog_info rzg2l_wdt_ident = {
+ .options = WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT,
+ .identity = "Renesas RZ/G2L WDT Watchdog",
+};
+
+static int rzg2l_wdt_ping(struct watchdog_device *wdev)
+{
+ struct rzg2l_wdt_priv *priv = watchdog_get_drvdata(wdev);
+
+ rzg2l_wdt_write(priv, WDTINT_INTDISP, WDTINT);
+
+ return 0;
+}
+
+static const struct watchdog_ops rzg2l_wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = rzg2l_wdt_start,
+ .stop = rzg2l_wdt_stop,
+ .ping = rzg2l_wdt_ping,
+ .restart = rzg2l_wdt_restart,
+};
+
+static void rzg2l_wdt_reset_assert_pm_disable_put(void *data)
+{
+ struct watchdog_device *wdev = data;
+ struct rzg2l_wdt_priv *priv = watchdog_get_drvdata(wdev);
+
+ pm_runtime_put(wdev->parent);
+ pm_runtime_disable(wdev->parent);
+ reset_control_assert(priv->rstc);
+}
+
+static int rzg2l_wdt_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rzg2l_wdt_priv *priv;
+ unsigned long pclk_rate;
+ struct clk *wdt_clk;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ /* Get watchdog main clock */
+ wdt_clk = clk_get(&pdev->dev, "oscclk");
+ if (IS_ERR(wdt_clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(wdt_clk), "no oscclk");
+
+ priv->osc_clk_rate = clk_get_rate(wdt_clk);
+ clk_put(wdt_clk);
+ if (!priv->osc_clk_rate)
+ return dev_err_probe(&pdev->dev, -EINVAL, "oscclk rate is 0");
+
+ /* Get Peripheral clock */
+ wdt_clk = clk_get(&pdev->dev, "pclk");
+ if (IS_ERR(wdt_clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(wdt_clk), "no pclk");
+
+ pclk_rate = clk_get_rate(wdt_clk);
+ clk_put(wdt_clk);
+ if (!pclk_rate)
+ return dev_err_probe(&pdev->dev, -EINVAL, "pclk rate is 0");
+
+ priv->delay = F2CYCLE_NSEC(priv->osc_clk_rate) * 6 + F2CYCLE_NSEC(pclk_rate) * 9;
+
+ priv->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
+ if (IS_ERR(priv->rstc))
+ return dev_err_probe(&pdev->dev, PTR_ERR(priv->rstc),
+ "failed to get cpg reset");
+
+ reset_control_deassert(priv->rstc);
+ pm_runtime_enable(&pdev->dev);
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret < 0) {
+ dev_err(dev, "pm_runtime_resume_and_get failed ret=%pe", ERR_PTR(ret));
+ goto out_pm_get;
+ }
+
+ priv->wdev.info = &rzg2l_wdt_ident;
+ priv->wdev.ops = &rzg2l_wdt_ops;
+ priv->wdev.parent = dev;
+ priv->wdev.min_timeout = 1;
+ priv->wdev.max_timeout = rzg2l_wdt_get_cycle_usec(priv->osc_clk_rate, 0xfff) /
+ USEC_PER_SEC;
+ priv->wdev.timeout = WDT_DEFAULT_TIMEOUT;
+
+ watchdog_set_drvdata(&priv->wdev, priv);
+ ret = devm_add_action_or_reset(&pdev->dev,
+ rzg2l_wdt_reset_assert_pm_disable_put,
+ &priv->wdev);
+ if (ret < 0)
+ return ret;
+
+ watchdog_set_nowayout(&priv->wdev, nowayout);
+ watchdog_stop_on_unregister(&priv->wdev);
+
+ ret = watchdog_init_timeout(&priv->wdev, 0, dev);
+ if (ret)
+ dev_warn(dev, "Specified timeout invalid, using default");
+
+ return devm_watchdog_register_device(&pdev->dev, &priv->wdev);
+
+out_pm_get:
+ pm_runtime_disable(dev);
+ reset_control_assert(priv->rstc);
+
+ return ret;
+}
+
+static const struct of_device_id rzg2l_wdt_ids[] = {
+ { .compatible = "renesas,rzg2l-wdt", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, rzg2l_wdt_ids);
+
+static struct platform_driver rzg2l_wdt_driver = {
+ .driver = {
+ .name = "rzg2l_wdt",
+ .of_match_table = rzg2l_wdt_ids,
+ },
+ .probe = rzg2l_wdt_probe,
+};
+module_platform_driver(rzg2l_wdt_driver);
+
+MODULE_DESCRIPTION("Renesas RZ/G2L WDT Watchdog Driver");
+MODULE_AUTHOR("Biju Das <biju.das.jz@bp.renesas.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c
index 2395f353e52d..6db22f2e3a4f 100644
--- a/drivers/watchdog/s3c2410_wdt.c
+++ b/drivers/watchdog/s3c2410_wdt.c
@@ -56,13 +56,58 @@
#define EXYNOS5_RST_STAT_REG_OFFSET 0x0404
#define EXYNOS5_WDT_DISABLE_REG_OFFSET 0x0408
#define EXYNOS5_WDT_MASK_RESET_REG_OFFSET 0x040c
-#define QUIRK_HAS_PMU_CONFIG (1 << 0)
-#define QUIRK_HAS_RST_STAT (1 << 1)
-#define QUIRK_HAS_WTCLRINT_REG (1 << 2)
+#define EXYNOS850_CLUSTER0_NONCPU_OUT 0x1220
+#define EXYNOS850_CLUSTER0_NONCPU_INT_EN 0x1244
+#define EXYNOS850_CLUSTER1_NONCPU_OUT 0x1620
+#define EXYNOS850_CLUSTER1_NONCPU_INT_EN 0x1644
+
+#define EXYNOS850_CLUSTER0_WDTRESET_BIT 24
+#define EXYNOS850_CLUSTER1_WDTRESET_BIT 23
+
+/**
+ * DOC: Quirk flags for different Samsung watchdog IP-cores
+ *
+ * This driver supports multiple Samsung SoCs, each of which might have
+ * different set of registers and features supported. As watchdog block
+ * sometimes requires modifying PMU registers for proper functioning, register
+ * differences in both watchdog and PMU IP-cores should be accounted for. Quirk
+ * flags described below serve the purpose of telling the driver about mentioned
+ * SoC traits, and can be specified in driver data for each particular supported
+ * device.
+ *
+ * %QUIRK_HAS_WTCLRINT_REG: Watchdog block has WTCLRINT register. It's used to
+ * clear the interrupt once the interrupt service routine is complete. It's
+ * write-only, writing any values to this register clears the interrupt, but
+ * reading is not permitted.
+ *
+ * %QUIRK_HAS_PMU_MASK_RESET: PMU block has the register for disabling/enabling
+ * WDT reset request. On old SoCs it's usually called MASK_WDT_RESET_REQUEST,
+ * new SoCs have CLUSTERx_NONCPU_INT_EN register, which 'mask_bit' value is
+ * inverted compared to the former one.
+ *
+ * %QUIRK_HAS_PMU_RST_STAT: PMU block has RST_STAT (reset status) register,
+ * which contains bits indicating the reason for most recent CPU reset. If
+ * present, driver will use this register to check if previous reboot was due to
+ * watchdog timer reset.
+ *
+ * %QUIRK_HAS_PMU_AUTO_DISABLE: PMU block has AUTOMATIC_WDT_RESET_DISABLE
+ * register. If 'mask_bit' bit is set, PMU will disable WDT reset when
+ * corresponding processor is in reset state.
+ *
+ * %QUIRK_HAS_PMU_CNT_EN: PMU block has some register (e.g. CLUSTERx_NONCPU_OUT)
+ * with "watchdog counter enable" bit. That bit should be set to make watchdog
+ * counter running.
+ */
+#define QUIRK_HAS_WTCLRINT_REG (1 << 0)
+#define QUIRK_HAS_PMU_MASK_RESET (1 << 1)
+#define QUIRK_HAS_PMU_RST_STAT (1 << 2)
+#define QUIRK_HAS_PMU_AUTO_DISABLE (1 << 3)
+#define QUIRK_HAS_PMU_CNT_EN (1 << 4)
/* These quirks require that we have a PMU register map */
-#define QUIRKS_HAVE_PMUREG (QUIRK_HAS_PMU_CONFIG | \
- QUIRK_HAS_RST_STAT)
+#define QUIRKS_HAVE_PMUREG \
+ (QUIRK_HAS_PMU_MASK_RESET | QUIRK_HAS_PMU_RST_STAT | \
+ QUIRK_HAS_PMU_AUTO_DISABLE | QUIRK_HAS_PMU_CNT_EN)
static bool nowayout = WATCHDOG_NOWAYOUT;
static int tmr_margin;
@@ -90,26 +135,33 @@ MODULE_PARM_DESC(soft_noboot, "Watchdog action, set to 1 to ignore reboots, 0 to
* timer reset functionality.
* @mask_reset_reg: Offset in pmureg for the register that masks the watchdog
* timer reset functionality.
+ * @mask_reset_inv: If set, mask_reset_reg value will have inverted meaning.
* @mask_bit: Bit number for the watchdog timer in the disable register and the
* mask reset register.
* @rst_stat_reg: Offset in pmureg for the register that has the reset status.
* @rst_stat_bit: Bit number in the rst_stat register indicating a watchdog
* reset.
+ * @cnt_en_reg: Offset in pmureg for the register that enables WDT counter.
+ * @cnt_en_bit: Bit number for "watchdog counter enable" in cnt_en register.
* @quirks: A bitfield of quirks.
*/
struct s3c2410_wdt_variant {
int disable_reg;
int mask_reset_reg;
+ bool mask_reset_inv;
int mask_bit;
int rst_stat_reg;
int rst_stat_bit;
+ int cnt_en_reg;
+ int cnt_en_bit;
u32 quirks;
};
struct s3c2410_wdt {
struct device *dev;
- struct clk *clock;
+ struct clk *bus_clk; /* for register interface (PCLK) */
+ struct clk *src_clk; /* for WDT counter */
void __iomem *reg_base;
unsigned int count;
spinlock_t lock;
@@ -136,8 +188,8 @@ static const struct s3c2410_wdt_variant drv_data_exynos5250 = {
.mask_bit = 20,
.rst_stat_reg = EXYNOS5_RST_STAT_REG_OFFSET,
.rst_stat_bit = 20,
- .quirks = QUIRK_HAS_PMU_CONFIG | QUIRK_HAS_RST_STAT \
- | QUIRK_HAS_WTCLRINT_REG,
+ .quirks = QUIRK_HAS_WTCLRINT_REG | QUIRK_HAS_PMU_MASK_RESET | \
+ QUIRK_HAS_PMU_RST_STAT | QUIRK_HAS_PMU_AUTO_DISABLE,
};
static const struct s3c2410_wdt_variant drv_data_exynos5420 = {
@@ -146,8 +198,8 @@ static const struct s3c2410_wdt_variant drv_data_exynos5420 = {
.mask_bit = 0,
.rst_stat_reg = EXYNOS5_RST_STAT_REG_OFFSET,
.rst_stat_bit = 9,
- .quirks = QUIRK_HAS_PMU_CONFIG | QUIRK_HAS_RST_STAT \
- | QUIRK_HAS_WTCLRINT_REG,
+ .quirks = QUIRK_HAS_WTCLRINT_REG | QUIRK_HAS_PMU_MASK_RESET | \
+ QUIRK_HAS_PMU_RST_STAT | QUIRK_HAS_PMU_AUTO_DISABLE,
};
static const struct s3c2410_wdt_variant drv_data_exynos7 = {
@@ -156,8 +208,32 @@ static const struct s3c2410_wdt_variant drv_data_exynos7 = {
.mask_bit = 23,
.rst_stat_reg = EXYNOS5_RST_STAT_REG_OFFSET,
.rst_stat_bit = 23, /* A57 WDTRESET */
- .quirks = QUIRK_HAS_PMU_CONFIG | QUIRK_HAS_RST_STAT \
- | QUIRK_HAS_WTCLRINT_REG,
+ .quirks = QUIRK_HAS_WTCLRINT_REG | QUIRK_HAS_PMU_MASK_RESET | \
+ QUIRK_HAS_PMU_RST_STAT | QUIRK_HAS_PMU_AUTO_DISABLE,
+};
+
+static const struct s3c2410_wdt_variant drv_data_exynos850_cl0 = {
+ .mask_reset_reg = EXYNOS850_CLUSTER0_NONCPU_INT_EN,
+ .mask_bit = 2,
+ .mask_reset_inv = true,
+ .rst_stat_reg = EXYNOS5_RST_STAT_REG_OFFSET,
+ .rst_stat_bit = EXYNOS850_CLUSTER0_WDTRESET_BIT,
+ .cnt_en_reg = EXYNOS850_CLUSTER0_NONCPU_OUT,
+ .cnt_en_bit = 7,
+ .quirks = QUIRK_HAS_WTCLRINT_REG | QUIRK_HAS_PMU_MASK_RESET | \
+ QUIRK_HAS_PMU_RST_STAT | QUIRK_HAS_PMU_CNT_EN,
+};
+
+static const struct s3c2410_wdt_variant drv_data_exynos850_cl1 = {
+ .mask_reset_reg = EXYNOS850_CLUSTER1_NONCPU_INT_EN,
+ .mask_bit = 2,
+ .mask_reset_inv = true,
+ .rst_stat_reg = EXYNOS5_RST_STAT_REG_OFFSET,
+ .rst_stat_bit = EXYNOS850_CLUSTER1_WDTRESET_BIT,
+ .cnt_en_reg = EXYNOS850_CLUSTER1_NONCPU_OUT,
+ .cnt_en_bit = 7,
+ .quirks = QUIRK_HAS_WTCLRINT_REG | QUIRK_HAS_PMU_MASK_RESET | \
+ QUIRK_HAS_PMU_RST_STAT | QUIRK_HAS_PMU_CNT_EN,
};
static const struct of_device_id s3c2410_wdt_match[] = {
@@ -171,6 +247,8 @@ static const struct of_device_id s3c2410_wdt_match[] = {
.data = &drv_data_exynos5420 },
{ .compatible = "samsung,exynos7-wdt",
.data = &drv_data_exynos7 },
+ { .compatible = "samsung,exynos850-wdt",
+ .data = &drv_data_exynos850_cl0 },
{},
};
MODULE_DEVICE_TABLE(of, s3c2410_wdt_match);
@@ -187,9 +265,14 @@ MODULE_DEVICE_TABLE(platform, s3c2410_wdt_ids);
/* functions */
-static inline unsigned int s3c2410wdt_max_timeout(struct clk *clock)
+static inline unsigned long s3c2410wdt_get_freq(struct s3c2410_wdt *wdt)
{
- unsigned long freq = clk_get_rate(clock);
+ return clk_get_rate(wdt->src_clk ? wdt->src_clk : wdt->bus_clk);
+}
+
+static inline unsigned int s3c2410wdt_max_timeout(struct s3c2410_wdt *wdt)
+{
+ const unsigned long freq = s3c2410wdt_get_freq(wdt);
return S3C2410_WTCNT_MAXCNT / (freq / (S3C2410_WTCON_PRESCALE_MAX + 1)
/ S3C2410_WTCON_MAXDIV);
@@ -200,35 +283,74 @@ static inline struct s3c2410_wdt *freq_to_wdt(struct notifier_block *nb)
return container_of(nb, struct s3c2410_wdt, freq_transition);
}
-static int s3c2410wdt_mask_and_disable_reset(struct s3c2410_wdt *wdt, bool mask)
+static int s3c2410wdt_disable_wdt_reset(struct s3c2410_wdt *wdt, bool mask)
{
+ const u32 mask_val = BIT(wdt->drv_data->mask_bit);
+ const u32 val = mask ? mask_val : 0;
int ret;
- u32 mask_val = 1 << wdt->drv_data->mask_bit;
- u32 val = 0;
- /* No need to do anything if no PMU CONFIG needed */
- if (!(wdt->drv_data->quirks & QUIRK_HAS_PMU_CONFIG))
- return 0;
+ ret = regmap_update_bits(wdt->pmureg, wdt->drv_data->disable_reg,
+ mask_val, val);
+ if (ret < 0)
+ dev_err(wdt->dev, "failed to update reg(%d)\n", ret);
- if (mask)
- val = mask_val;
+ return ret;
+}
+
+static int s3c2410wdt_mask_wdt_reset(struct s3c2410_wdt *wdt, bool mask)
+{
+ const u32 mask_val = BIT(wdt->drv_data->mask_bit);
+ const bool val_inv = wdt->drv_data->mask_reset_inv;
+ const u32 val = (mask ^ val_inv) ? mask_val : 0;
+ int ret;
- ret = regmap_update_bits(wdt->pmureg,
- wdt->drv_data->disable_reg,
- mask_val, val);
+ ret = regmap_update_bits(wdt->pmureg, wdt->drv_data->mask_reset_reg,
+ mask_val, val);
if (ret < 0)
- goto error;
+ dev_err(wdt->dev, "failed to update reg(%d)\n", ret);
+
+ return ret;
+}
- ret = regmap_update_bits(wdt->pmureg,
- wdt->drv_data->mask_reset_reg,
- mask_val, val);
- error:
+static int s3c2410wdt_enable_counter(struct s3c2410_wdt *wdt, bool en)
+{
+ const u32 mask_val = BIT(wdt->drv_data->cnt_en_bit);
+ const u32 val = en ? mask_val : 0;
+ int ret;
+
+ ret = regmap_update_bits(wdt->pmureg, wdt->drv_data->cnt_en_reg,
+ mask_val, val);
if (ret < 0)
dev_err(wdt->dev, "failed to update reg(%d)\n", ret);
return ret;
}
+static int s3c2410wdt_enable(struct s3c2410_wdt *wdt, bool en)
+{
+ int ret;
+
+ if (wdt->drv_data->quirks & QUIRK_HAS_PMU_AUTO_DISABLE) {
+ ret = s3c2410wdt_disable_wdt_reset(wdt, !en);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (wdt->drv_data->quirks & QUIRK_HAS_PMU_MASK_RESET) {
+ ret = s3c2410wdt_mask_wdt_reset(wdt, !en);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (wdt->drv_data->quirks & QUIRK_HAS_PMU_CNT_EN) {
+ ret = s3c2410wdt_enable_counter(wdt, en);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
static int s3c2410wdt_keepalive(struct watchdog_device *wdd)
{
struct s3c2410_wdt *wdt = watchdog_get_drvdata(wdd);
@@ -300,7 +422,7 @@ static int s3c2410wdt_set_heartbeat(struct watchdog_device *wdd,
unsigned int timeout)
{
struct s3c2410_wdt *wdt = watchdog_get_drvdata(wdd);
- unsigned long freq = clk_get_rate(wdt->clock);
+ unsigned long freq = s3c2410wdt_get_freq(wdt);
unsigned int count;
unsigned int divisor = 1;
unsigned long wtcon;
@@ -482,7 +604,7 @@ static inline unsigned int s3c2410wdt_get_bootstatus(struct s3c2410_wdt *wdt)
unsigned int rst_stat;
int ret;
- if (!(wdt->drv_data->quirks & QUIRK_HAS_RST_STAT))
+ if (!(wdt->drv_data->quirks & QUIRK_HAS_PMU_RST_STAT))
return 0;
ret = regmap_read(wdt->pmureg, wdt->drv_data->rst_stat_reg, &rst_stat);
@@ -498,14 +620,40 @@ static inline const struct s3c2410_wdt_variant *
s3c2410_get_wdt_drv_data(struct platform_device *pdev)
{
const struct s3c2410_wdt_variant *variant;
+ struct device *dev = &pdev->dev;
- variant = of_device_get_match_data(&pdev->dev);
+ variant = of_device_get_match_data(dev);
if (!variant) {
/* Device matched by platform_device_id */
variant = (struct s3c2410_wdt_variant *)
platform_get_device_id(pdev)->driver_data;
}
+#ifdef CONFIG_OF
+ /* Choose Exynos850 driver data w.r.t. cluster index */
+ if (variant == &drv_data_exynos850_cl0) {
+ u32 index;
+ int err;
+
+ err = of_property_read_u32(dev->of_node,
+ "samsung,cluster-index", &index);
+ if (err) {
+ dev_err(dev, "failed to get cluster index\n");
+ return NULL;
+ }
+
+ switch (index) {
+ case 0:
+ return &drv_data_exynos850_cl0;
+ case 1:
+ return &drv_data_exynos850_cl1;
+ default:
+ dev_err(dev, "wrong cluster index: %u\n", index);
+ return NULL;
+ }
+ }
+#endif
+
return variant;
}
@@ -513,9 +661,8 @@ static int s3c2410wdt_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct s3c2410_wdt *wdt;
- struct resource *wdt_irq;
unsigned int wtcon;
- int started = 0;
+ int wdt_irq;
int ret;
wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL);
@@ -527,6 +674,9 @@ static int s3c2410wdt_probe(struct platform_device *pdev)
wdt->wdt_device = s3c2410_wdd;
wdt->drv_data = s3c2410_get_wdt_drv_data(pdev);
+ if (!wdt->drv_data)
+ return -EINVAL;
+
if (wdt->drv_data->quirks & QUIRKS_HAVE_PMUREG) {
wdt->pmureg = syscon_regmap_lookup_by_phandle(dev->of_node,
"samsung,syscon-phandle");
@@ -536,40 +686,52 @@ static int s3c2410wdt_probe(struct platform_device *pdev)
}
}
- wdt_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (wdt_irq == NULL) {
- dev_err(dev, "no irq resource specified\n");
- ret = -ENOENT;
- goto err;
- }
+ wdt_irq = platform_get_irq(pdev, 0);
+ if (wdt_irq < 0)
+ return wdt_irq;
/* get the memory region for the watchdog timer */
wdt->reg_base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(wdt->reg_base)) {
- ret = PTR_ERR(wdt->reg_base);
- goto err;
- }
+ if (IS_ERR(wdt->reg_base))
+ return PTR_ERR(wdt->reg_base);
- wdt->clock = devm_clk_get(dev, "watchdog");
- if (IS_ERR(wdt->clock)) {
- dev_err(dev, "failed to find watchdog clock source\n");
- ret = PTR_ERR(wdt->clock);
- goto err;
+ wdt->bus_clk = devm_clk_get(dev, "watchdog");
+ if (IS_ERR(wdt->bus_clk)) {
+ dev_err(dev, "failed to find bus clock\n");
+ return PTR_ERR(wdt->bus_clk);
}
- ret = clk_prepare_enable(wdt->clock);
+ ret = clk_prepare_enable(wdt->bus_clk);
if (ret < 0) {
- dev_err(dev, "failed to enable clock\n");
+ dev_err(dev, "failed to enable bus clock\n");
return ret;
}
+ /*
+ * "watchdog_src" clock is optional; if it's not present -- just skip it
+ * and use "watchdog" clock as both bus and source clock.
+ */
+ wdt->src_clk = devm_clk_get_optional(dev, "watchdog_src");
+ if (IS_ERR(wdt->src_clk)) {
+ dev_err_probe(dev, PTR_ERR(wdt->src_clk),
+ "failed to get source clock\n");
+ ret = PTR_ERR(wdt->src_clk);
+ goto err_bus_clk;
+ }
+
+ ret = clk_prepare_enable(wdt->src_clk);
+ if (ret) {
+ dev_err(dev, "failed to enable source clock\n");
+ goto err_bus_clk;
+ }
+
wdt->wdt_device.min_timeout = 1;
- wdt->wdt_device.max_timeout = s3c2410wdt_max_timeout(wdt->clock);
+ wdt->wdt_device.max_timeout = s3c2410wdt_max_timeout(wdt);
ret = s3c2410wdt_cpufreq_register(wdt);
if (ret < 0) {
dev_err(dev, "failed to register cpufreq\n");
- goto err_clk;
+ goto err_src_clk;
}
watchdog_set_drvdata(&wdt->wdt_device, wdt);
@@ -581,19 +743,19 @@ static int s3c2410wdt_probe(struct platform_device *pdev)
ret = s3c2410wdt_set_heartbeat(&wdt->wdt_device,
wdt->wdt_device.timeout);
if (ret) {
- started = s3c2410wdt_set_heartbeat(&wdt->wdt_device,
- S3C2410_WATCHDOG_DEFAULT_TIME);
-
- if (started == 0)
- dev_info(dev,
- "tmr_margin value out of range, default %d used\n",
+ ret = s3c2410wdt_set_heartbeat(&wdt->wdt_device,
+ S3C2410_WATCHDOG_DEFAULT_TIME);
+ if (ret == 0) {
+ dev_warn(dev, "tmr_margin value out of range, default %d used\n",
S3C2410_WATCHDOG_DEFAULT_TIME);
- else
- dev_info(dev, "default timer value is out of range, cannot start\n");
+ } else {
+ dev_err(dev, "failed to use default timeout\n");
+ goto err_cpufreq;
+ }
}
- ret = devm_request_irq(dev, wdt_irq->start, s3c2410wdt_irq, 0,
- pdev->name, pdev);
+ ret = devm_request_irq(dev, wdt_irq, s3c2410wdt_irq, 0,
+ pdev->name, pdev);
if (ret != 0) {
dev_err(dev, "failed to install irq (%d)\n", ret);
goto err_cpufreq;
@@ -605,25 +767,29 @@ static int s3c2410wdt_probe(struct platform_device *pdev)
wdt->wdt_device.bootstatus = s3c2410wdt_get_bootstatus(wdt);
wdt->wdt_device.parent = dev;
+ /*
+ * If "tmr_atboot" param is non-zero, start the watchdog right now. Also
+ * set WDOG_HW_RUNNING bit, so that watchdog core can kick the watchdog.
+ *
+ * If we're not enabling the watchdog, then ensure it is disabled if it
+ * has been left running from the bootloader or other source.
+ */
+ if (tmr_atboot) {
+ dev_info(dev, "starting watchdog timer\n");
+ s3c2410wdt_start(&wdt->wdt_device);
+ set_bit(WDOG_HW_RUNNING, &wdt->wdt_device.status);
+ } else {
+ s3c2410wdt_stop(&wdt->wdt_device);
+ }
+
ret = watchdog_register_device(&wdt->wdt_device);
if (ret)
goto err_cpufreq;
- ret = s3c2410wdt_mask_and_disable_reset(wdt, false);
+ ret = s3c2410wdt_enable(wdt, true);
if (ret < 0)
goto err_unregister;
- if (tmr_atboot && started == 0) {
- dev_info(dev, "starting watchdog timer\n");
- s3c2410wdt_start(&wdt->wdt_device);
- } else if (!tmr_atboot) {
- /* if we're not enabling the watchdog, then ensure it is
- * disabled if it has been left running from the bootloader
- * or other source */
-
- s3c2410wdt_stop(&wdt->wdt_device);
- }
-
platform_set_drvdata(pdev, wdt);
/* print out a statement of readiness */
@@ -643,10 +809,12 @@ static int s3c2410wdt_probe(struct platform_device *pdev)
err_cpufreq:
s3c2410wdt_cpufreq_deregister(wdt);
- err_clk:
- clk_disable_unprepare(wdt->clock);
+ err_src_clk:
+ clk_disable_unprepare(wdt->src_clk);
+
+ err_bus_clk:
+ clk_disable_unprepare(wdt->bus_clk);
- err:
return ret;
}
@@ -655,7 +823,7 @@ static int s3c2410wdt_remove(struct platform_device *dev)
int ret;
struct s3c2410_wdt *wdt = platform_get_drvdata(dev);
- ret = s3c2410wdt_mask_and_disable_reset(wdt, true);
+ ret = s3c2410wdt_enable(wdt, false);
if (ret < 0)
return ret;
@@ -663,7 +831,8 @@ static int s3c2410wdt_remove(struct platform_device *dev)
s3c2410wdt_cpufreq_deregister(wdt);
- clk_disable_unprepare(wdt->clock);
+ clk_disable_unprepare(wdt->src_clk);
+ clk_disable_unprepare(wdt->bus_clk);
return 0;
}
@@ -672,8 +841,7 @@ static void s3c2410wdt_shutdown(struct platform_device *dev)
{
struct s3c2410_wdt *wdt = platform_get_drvdata(dev);
- s3c2410wdt_mask_and_disable_reset(wdt, true);
-
+ s3c2410wdt_enable(wdt, false);
s3c2410wdt_stop(&wdt->wdt_device);
}
@@ -688,7 +856,7 @@ static int s3c2410wdt_suspend(struct device *dev)
wdt->wtcon_save = readl(wdt->reg_base + S3C2410_WTCON);
wdt->wtdat_save = readl(wdt->reg_base + S3C2410_WTDAT);
- ret = s3c2410wdt_mask_and_disable_reset(wdt, true);
+ ret = s3c2410wdt_enable(wdt, false);
if (ret < 0)
return ret;
@@ -708,7 +876,7 @@ static int s3c2410wdt_resume(struct device *dev)
writel(wdt->wtdat_save, wdt->reg_base + S3C2410_WTCNT);/* Reset count */
writel(wdt->wtcon_save, wdt->reg_base + S3C2410_WTCON);
- ret = s3c2410wdt_mask_and_disable_reset(wdt, false);
+ ret = s3c2410wdt_enable(wdt, true);
if (ret < 0)
return ret;
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index 33e941e40082..120d32f164ac 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -327,7 +327,7 @@ config XEN_FRONT_PGDIR_SHBUF
config XEN_UNPOPULATED_ALLOC
bool "Use unpopulated memory ranges for guest mappings"
- depends on X86 && ZONE_DEVICE
+ depends on ZONE_DEVICE
default XEN_BACKEND || XEN_GNTDEV || XEN_DOM0
help
Use unpopulated memory ranges in order to create mappings for guest
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index ba2ea11e0d3d..a2c4fc49c483 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -581,7 +581,6 @@ void balloon_set_new_target(unsigned long target)
}
EXPORT_SYMBOL_GPL(balloon_set_new_target);
-#ifndef CONFIG_XEN_UNPOPULATED_ALLOC
static int add_ballooned_pages(unsigned int nr_pages)
{
enum bp_state st;
@@ -610,12 +609,12 @@ static int add_ballooned_pages(unsigned int nr_pages)
}
/**
- * xen_alloc_unpopulated_pages - get pages that have been ballooned out
+ * xen_alloc_ballooned_pages - get pages that have been ballooned out
* @nr_pages: Number of pages to get
* @pages: pages returned
* @return 0 on success, error otherwise
*/
-int xen_alloc_unpopulated_pages(unsigned int nr_pages, struct page **pages)
+int xen_alloc_ballooned_pages(unsigned int nr_pages, struct page **pages)
{
unsigned int pgno = 0;
struct page *page;
@@ -652,23 +651,23 @@ int xen_alloc_unpopulated_pages(unsigned int nr_pages, struct page **pages)
return 0;
out_undo:
mutex_unlock(&balloon_mutex);
- xen_free_unpopulated_pages(pgno, pages);
+ xen_free_ballooned_pages(pgno, pages);
/*
- * NB: free_xenballooned_pages will only subtract pgno pages, but since
+ * NB: xen_free_ballooned_pages will only subtract pgno pages, but since
* target_unpopulated is incremented with nr_pages at the start we need
* to remove the remaining ones also, or accounting will be screwed.
*/
balloon_stats.target_unpopulated -= nr_pages - pgno;
return ret;
}
-EXPORT_SYMBOL(xen_alloc_unpopulated_pages);
+EXPORT_SYMBOL(xen_alloc_ballooned_pages);
/**
- * xen_free_unpopulated_pages - return pages retrieved with get_ballooned_pages
+ * xen_free_ballooned_pages - return pages retrieved with get_ballooned_pages
* @nr_pages: Number of pages
* @pages: pages to return
*/
-void xen_free_unpopulated_pages(unsigned int nr_pages, struct page **pages)
+void xen_free_ballooned_pages(unsigned int nr_pages, struct page **pages)
{
unsigned int i;
@@ -687,9 +686,9 @@ void xen_free_unpopulated_pages(unsigned int nr_pages, struct page **pages)
mutex_unlock(&balloon_mutex);
}
-EXPORT_SYMBOL(xen_free_unpopulated_pages);
+EXPORT_SYMBOL(xen_free_ballooned_pages);
-#if defined(CONFIG_XEN_PV)
+#if defined(CONFIG_XEN_PV) && !defined(CONFIG_XEN_UNPOPULATED_ALLOC)
static void __init balloon_add_region(unsigned long start_pfn,
unsigned long pages)
{
@@ -712,7 +711,6 @@ static void __init balloon_add_region(unsigned long start_pfn,
balloon_stats.total_pages += extra_pfn_end - start_pfn;
}
#endif
-#endif
static int __init balloon_init(void)
{
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index fec1b6537166..59ffea800079 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -250,13 +250,13 @@ void gntdev_put_map(struct gntdev_priv *priv, struct gntdev_grant_map *map)
if (!refcount_dec_and_test(&map->users))
return;
+ if (map->pages && !use_ptemod)
+ unmap_grant_pages(map, 0, map->count);
+
if (map->notify.flags & UNMAP_NOTIFY_SEND_EVENT) {
notify_remote_via_evtchn(map->notify.event);
evtchn_put(map->notify.event);
}
-
- if (map->pages && !use_ptemod)
- unmap_grant_pages(map, 0, map->count);
gntdev_free_map(map);
}
diff --git a/drivers/xen/pci.c b/drivers/xen/pci.c
index 2c890f4f2cbc..72d4e3f193af 100644
--- a/drivers/xen/pci.c
+++ b/drivers/xen/pci.c
@@ -264,7 +264,7 @@ struct xen_device_domain_owner {
};
static DEFINE_SPINLOCK(dev_domain_list_spinlock);
-static struct list_head dev_domain_list = LIST_HEAD_INIT(dev_domain_list);
+static LIST_HEAD(dev_domain_list);
static struct xen_device_domain_owner *find_device(struct pci_dev *dev)
{
diff --git a/drivers/xen/unpopulated-alloc.c b/drivers/xen/unpopulated-alloc.c
index 87e6b7db892f..a8b41057c382 100644
--- a/drivers/xen/unpopulated-alloc.c
+++ b/drivers/xen/unpopulated-alloc.c
@@ -8,6 +8,7 @@
#include <asm/page.h>
+#include <xen/balloon.h>
#include <xen/page.h>
#include <xen/xen.h>
@@ -15,13 +16,29 @@ static DEFINE_MUTEX(list_lock);
static struct page *page_list;
static unsigned int list_count;
+static struct resource *target_resource;
+
+/*
+ * If arch is not happy with system "iomem_resource" being used for
+ * the region allocation it can provide it's own view by creating specific
+ * Xen resource with unused regions of guest physical address space provided
+ * by the hypervisor.
+ */
+int __weak __init arch_xen_unpopulated_init(struct resource **res)
+{
+ *res = &iomem_resource;
+
+ return 0;
+}
+
static int fill_list(unsigned int nr_pages)
{
struct dev_pagemap *pgmap;
- struct resource *res;
+ struct resource *res, *tmp_res = NULL;
void *vaddr;
unsigned int i, alloc_pages = round_up(nr_pages, PAGES_PER_SECTION);
- int ret = -ENOMEM;
+ struct range mhp_range;
+ int ret;
res = kzalloc(sizeof(*res), GFP_KERNEL);
if (!res)
@@ -30,14 +47,40 @@ static int fill_list(unsigned int nr_pages)
res->name = "Xen scratch";
res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
- ret = allocate_resource(&iomem_resource, res,
- alloc_pages * PAGE_SIZE, 0, -1,
+ mhp_range = mhp_get_pluggable_range(true);
+
+ ret = allocate_resource(target_resource, res,
+ alloc_pages * PAGE_SIZE, mhp_range.start, mhp_range.end,
PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL);
if (ret < 0) {
pr_err("Cannot allocate new IOMEM resource\n");
goto err_resource;
}
+ /*
+ * Reserve the region previously allocated from Xen resource to avoid
+ * re-using it by someone else.
+ */
+ if (target_resource != &iomem_resource) {
+ tmp_res = kzalloc(sizeof(*tmp_res), GFP_KERNEL);
+ if (!tmp_res) {
+ ret = -ENOMEM;
+ goto err_insert;
+ }
+
+ tmp_res->name = res->name;
+ tmp_res->start = res->start;
+ tmp_res->end = res->end;
+ tmp_res->flags = res->flags;
+
+ ret = request_resource(&iomem_resource, tmp_res);
+ if (ret < 0) {
+ pr_err("Cannot request resource %pR (%d)\n", tmp_res, ret);
+ kfree(tmp_res);
+ goto err_insert;
+ }
+ }
+
pgmap = kzalloc(sizeof(*pgmap), GFP_KERNEL);
if (!pgmap) {
ret = -ENOMEM;
@@ -85,7 +128,6 @@ static int fill_list(unsigned int nr_pages)
for (i = 0; i < alloc_pages; i++) {
struct page *pg = virt_to_page(vaddr + PAGE_SIZE * i);
- BUG_ON(!virt_addr_valid(vaddr + PAGE_SIZE * i));
pg->zone_device_data = page_list;
page_list = pg;
list_count++;
@@ -96,6 +138,11 @@ static int fill_list(unsigned int nr_pages)
err_memremap:
kfree(pgmap);
err_pgmap:
+ if (tmp_res) {
+ release_resource(tmp_res);
+ kfree(tmp_res);
+ }
+err_insert:
release_resource(res);
err_resource:
kfree(res);
@@ -113,6 +160,14 @@ int xen_alloc_unpopulated_pages(unsigned int nr_pages, struct page **pages)
unsigned int i;
int ret = 0;
+ /*
+ * Fallback to default behavior if we do not have any suitable resource
+ * to allocate required region from and as the result we won't be able to
+ * construct pages.
+ */
+ if (!target_resource)
+ return xen_alloc_ballooned_pages(nr_pages, pages);
+
mutex_lock(&list_lock);
if (list_count < nr_pages) {
ret = fill_list(nr_pages - list_count);
@@ -160,6 +215,11 @@ void xen_free_unpopulated_pages(unsigned int nr_pages, struct page **pages)
{
unsigned int i;
+ if (!target_resource) {
+ xen_free_ballooned_pages(nr_pages, pages);
+ return;
+ }
+
mutex_lock(&list_lock);
for (i = 0; i < nr_pages; i++) {
pages[i]->zone_device_data = page_list;
@@ -202,3 +262,20 @@ static int __init init(void)
}
subsys_initcall(init);
#endif
+
+static int __init unpopulated_init(void)
+{
+ int ret;
+
+ if (!xen_domain())
+ return -ENODEV;
+
+ ret = arch_xen_unpopulated_init(&target_resource);
+ if (ret) {
+ pr_err("xen:unpopulated: Cannot initialize target resource\n");
+ target_resource = NULL;
+ }
+
+ return ret;
+}
+early_initcall(unpopulated_init);
diff --git a/drivers/zorro/proc.c b/drivers/zorro/proc.c
index 1c9ae08225d8..f916bf60b312 100644
--- a/drivers/zorro/proc.c
+++ b/drivers/zorro/proc.c
@@ -30,7 +30,7 @@ proc_bus_zorro_lseek(struct file *file, loff_t off, int whence)
static ssize_t
proc_bus_zorro_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
{
- struct zorro_dev *z = PDE_DATA(file_inode(file));
+ struct zorro_dev *z = pde_data(file_inode(file));
struct ConfigDev cd;
loff_t pos = *ppos;
diff --git a/fs/9p/cache.c b/fs/9p/cache.c
index f2ba131cede1..55e108e5e133 100644
--- a/fs/9p/cache.c
+++ b/fs/9p/cache.c
@@ -16,186 +16,61 @@
#include "v9fs.h"
#include "cache.h"
-#define CACHETAG_LEN 11
-
-struct fscache_netfs v9fs_cache_netfs = {
- .name = "9p",
- .version = 0,
-};
-
-/*
- * v9fs_random_cachetag - Generate a random tag to be associated
- * with a new cache session.
- *
- * The value of jiffies is used for a fairly randomly cache tag.
- */
-
-static
-int v9fs_random_cachetag(struct v9fs_session_info *v9ses)
+int v9fs_cache_session_get_cookie(struct v9fs_session_info *v9ses,
+ const char *dev_name)
{
- v9ses->cachetag = kmalloc(CACHETAG_LEN, GFP_KERNEL);
- if (!v9ses->cachetag)
- return -ENOMEM;
+ struct fscache_volume *vcookie;
+ char *name, *p;
- return scnprintf(v9ses->cachetag, CACHETAG_LEN, "%lu", jiffies);
-}
-
-const struct fscache_cookie_def v9fs_cache_session_index_def = {
- .name = "9P.session",
- .type = FSCACHE_COOKIE_TYPE_INDEX,
-};
+ name = kasprintf(GFP_KERNEL, "9p,%s,%s",
+ dev_name, v9ses->cachetag ?: v9ses->aname);
+ if (!name)
+ return -ENOMEM;
-void v9fs_cache_session_get_cookie(struct v9fs_session_info *v9ses)
-{
- /* If no cache session tag was specified, we generate a random one. */
- if (!v9ses->cachetag) {
- if (v9fs_random_cachetag(v9ses) < 0) {
- v9ses->fscache = NULL;
- kfree(v9ses->cachetag);
- v9ses->cachetag = NULL;
- return;
+ for (p = name; *p; p++)
+ if (*p == '/')
+ *p = ';';
+
+ vcookie = fscache_acquire_volume(name, NULL, NULL, 0);
+ p9_debug(P9_DEBUG_FSC, "session %p get volume %p (%s)\n",
+ v9ses, vcookie, name);
+ if (IS_ERR(vcookie)) {
+ if (vcookie != ERR_PTR(-EBUSY)) {
+ kfree(name);
+ return PTR_ERR(vcookie);
}
+ pr_err("Cache volume key already in use (%s)\n", name);
+ vcookie = NULL;
}
-
- v9ses->fscache = fscache_acquire_cookie(v9fs_cache_netfs.primary_index,
- &v9fs_cache_session_index_def,
- v9ses->cachetag,
- strlen(v9ses->cachetag),
- NULL, 0,
- v9ses, 0, true);
- p9_debug(P9_DEBUG_FSC, "session %p get cookie %p\n",
- v9ses, v9ses->fscache);
-}
-
-void v9fs_cache_session_put_cookie(struct v9fs_session_info *v9ses)
-{
- p9_debug(P9_DEBUG_FSC, "session %p put cookie %p\n",
- v9ses, v9ses->fscache);
- fscache_relinquish_cookie(v9ses->fscache, NULL, false);
- v9ses->fscache = NULL;
-}
-
-static enum
-fscache_checkaux v9fs_cache_inode_check_aux(void *cookie_netfs_data,
- const void *buffer,
- uint16_t buflen,
- loff_t object_size)
-{
- const struct v9fs_inode *v9inode = cookie_netfs_data;
-
- if (buflen != sizeof(v9inode->qid.version))
- return FSCACHE_CHECKAUX_OBSOLETE;
-
- if (memcmp(buffer, &v9inode->qid.version,
- sizeof(v9inode->qid.version)))
- return FSCACHE_CHECKAUX_OBSOLETE;
-
- return FSCACHE_CHECKAUX_OKAY;
+ v9ses->fscache = vcookie;
+ kfree(name);
+ return 0;
}
-const struct fscache_cookie_def v9fs_cache_inode_index_def = {
- .name = "9p.inode",
- .type = FSCACHE_COOKIE_TYPE_DATAFILE,
- .check_aux = v9fs_cache_inode_check_aux,
-};
-
void v9fs_cache_inode_get_cookie(struct inode *inode)
{
struct v9fs_inode *v9inode;
struct v9fs_session_info *v9ses;
+ __le32 version;
+ __le64 path;
if (!S_ISREG(inode->i_mode))
return;
v9inode = V9FS_I(inode);
- if (v9inode->fscache)
+ if (WARN_ON(v9inode->fscache))
return;
+ version = cpu_to_le32(v9inode->qid.version);
+ path = cpu_to_le64(v9inode->qid.path);
v9ses = v9fs_inode2v9ses(inode);
- v9inode->fscache = fscache_acquire_cookie(v9ses->fscache,
- &v9fs_cache_inode_index_def,
- &v9inode->qid.path,
- sizeof(v9inode->qid.path),
- &v9inode->qid.version,
- sizeof(v9inode->qid.version),
- v9inode,
- i_size_read(&v9inode->vfs_inode),
- true);
+ v9inode->fscache =
+ fscache_acquire_cookie(v9fs_session_cache(v9ses),
+ 0,
+ &path, sizeof(path),
+ &version, sizeof(version),
+ i_size_read(&v9inode->vfs_inode));
p9_debug(P9_DEBUG_FSC, "inode %p get cookie %p\n",
inode, v9inode->fscache);
}
-
-void v9fs_cache_inode_put_cookie(struct inode *inode)
-{
- struct v9fs_inode *v9inode = V9FS_I(inode);
-
- if (!v9inode->fscache)
- return;
- p9_debug(P9_DEBUG_FSC, "inode %p put cookie %p\n",
- inode, v9inode->fscache);
-
- fscache_relinquish_cookie(v9inode->fscache, &v9inode->qid.version,
- false);
- v9inode->fscache = NULL;
-}
-
-void v9fs_cache_inode_flush_cookie(struct inode *inode)
-{
- struct v9fs_inode *v9inode = V9FS_I(inode);
-
- if (!v9inode->fscache)
- return;
- p9_debug(P9_DEBUG_FSC, "inode %p flush cookie %p\n",
- inode, v9inode->fscache);
-
- fscache_relinquish_cookie(v9inode->fscache, NULL, true);
- v9inode->fscache = NULL;
-}
-
-void v9fs_cache_inode_set_cookie(struct inode *inode, struct file *filp)
-{
- struct v9fs_inode *v9inode = V9FS_I(inode);
-
- if (!v9inode->fscache)
- return;
-
- mutex_lock(&v9inode->fscache_lock);
-
- if ((filp->f_flags & O_ACCMODE) != O_RDONLY)
- v9fs_cache_inode_flush_cookie(inode);
- else
- v9fs_cache_inode_get_cookie(inode);
-
- mutex_unlock(&v9inode->fscache_lock);
-}
-
-void v9fs_cache_inode_reset_cookie(struct inode *inode)
-{
- struct v9fs_inode *v9inode = V9FS_I(inode);
- struct v9fs_session_info *v9ses;
- struct fscache_cookie *old;
-
- if (!v9inode->fscache)
- return;
-
- old = v9inode->fscache;
-
- mutex_lock(&v9inode->fscache_lock);
- fscache_relinquish_cookie(v9inode->fscache, NULL, true);
-
- v9ses = v9fs_inode2v9ses(inode);
- v9inode->fscache = fscache_acquire_cookie(v9ses->fscache,
- &v9fs_cache_inode_index_def,
- &v9inode->qid.path,
- sizeof(v9inode->qid.path),
- &v9inode->qid.version,
- sizeof(v9inode->qid.version),
- v9inode,
- i_size_read(&v9inode->vfs_inode),
- true);
- p9_debug(P9_DEBUG_FSC, "inode %p revalidating cookie old %p new %p\n",
- inode, old, v9inode->fscache);
-
- mutex_unlock(&v9inode->fscache_lock);
-}
diff --git a/fs/9p/cache.h b/fs/9p/cache.h
index 7480b4b49fea..1923affcdc62 100644
--- a/fs/9p/cache.h
+++ b/fs/9p/cache.h
@@ -7,26 +7,15 @@
#ifndef _9P_CACHE_H
#define _9P_CACHE_H
-#define FSCACHE_USE_NEW_IO_API
+
#include <linux/fscache.h>
#ifdef CONFIG_9P_FSCACHE
-extern struct fscache_netfs v9fs_cache_netfs;
-extern const struct fscache_cookie_def v9fs_cache_session_index_def;
-extern const struct fscache_cookie_def v9fs_cache_inode_index_def;
-
-extern void v9fs_cache_session_get_cookie(struct v9fs_session_info *v9ses);
-extern void v9fs_cache_session_put_cookie(struct v9fs_session_info *v9ses);
+extern int v9fs_cache_session_get_cookie(struct v9fs_session_info *v9ses,
+ const char *dev_name);
extern void v9fs_cache_inode_get_cookie(struct inode *inode);
-extern void v9fs_cache_inode_put_cookie(struct inode *inode);
-extern void v9fs_cache_inode_flush_cookie(struct inode *inode);
-extern void v9fs_cache_inode_set_cookie(struct inode *inode, struct file *filp);
-extern void v9fs_cache_inode_reset_cookie(struct inode *inode);
-
-extern int __v9fs_cache_register(void);
-extern void __v9fs_cache_unregister(void);
#else /* CONFIG_9P_FSCACHE */
@@ -34,13 +23,5 @@ static inline void v9fs_cache_inode_get_cookie(struct inode *inode)
{
}
-static inline void v9fs_cache_inode_put_cookie(struct inode *inode)
-{
-}
-
-static inline void v9fs_cache_inode_set_cookie(struct inode *inode, struct file *file)
-{
-}
-
#endif /* CONFIG_9P_FSCACHE */
#endif /* _9P_CACHE_H */
diff --git a/fs/9p/fid.c b/fs/9p/fid.c
index 6aab046c98e2..79df61fe0e59 100644
--- a/fs/9p/fid.c
+++ b/fs/9p/fid.c
@@ -96,12 +96,8 @@ static struct p9_fid *v9fs_fid_find(struct dentry *dentry, kuid_t uid, int any)
dentry, dentry, from_kuid(&init_user_ns, uid),
any);
ret = NULL;
-
- if (d_inode(dentry))
- ret = v9fs_fid_find_inode(d_inode(dentry), uid);
-
/* we'll recheck under lock if there's anything to look in */
- if (!ret && dentry->d_fsdata) {
+ if (dentry->d_fsdata) {
struct hlist_head *h = (struct hlist_head *)&dentry->d_fsdata;
spin_lock(&dentry->d_lock);
@@ -113,6 +109,9 @@ static struct p9_fid *v9fs_fid_find(struct dentry *dentry, kuid_t uid, int any)
}
}
spin_unlock(&dentry->d_lock);
+ } else {
+ if (dentry->d_inode)
+ ret = v9fs_fid_find_inode(dentry->d_inode, uid);
}
return ret;
diff --git a/fs/9p/v9fs.c b/fs/9p/v9fs.c
index e32dd5f7721b..08f65c40af4f 100644
--- a/fs/9p/v9fs.c
+++ b/fs/9p/v9fs.c
@@ -469,7 +469,11 @@ struct p9_fid *v9fs_session_init(struct v9fs_session_info *v9ses,
#ifdef CONFIG_9P_FSCACHE
/* register the session for caching */
- v9fs_cache_session_get_cookie(v9ses);
+ if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) {
+ rc = v9fs_cache_session_get_cookie(v9ses, dev_name);
+ if (rc < 0)
+ goto err_clnt;
+ }
#endif
spin_lock(&v9fs_sessionlist_lock);
list_add(&v9ses->slist, &v9fs_sessionlist);
@@ -502,8 +506,7 @@ void v9fs_session_close(struct v9fs_session_info *v9ses)
}
#ifdef CONFIG_9P_FSCACHE
- if (v9ses->fscache)
- v9fs_cache_session_put_cookie(v9ses);
+ fscache_relinquish_volume(v9fs_session_cache(v9ses), NULL, false);
kfree(v9ses->cachetag);
#endif
kfree(v9ses->uname);
@@ -665,20 +668,12 @@ static int v9fs_cache_register(void)
ret = v9fs_init_inode_cache();
if (ret < 0)
return ret;
-#ifdef CONFIG_9P_FSCACHE
- ret = fscache_register_netfs(&v9fs_cache_netfs);
- if (ret < 0)
- v9fs_destroy_inode_cache();
-#endif
return ret;
}
static void v9fs_cache_unregister(void)
{
v9fs_destroy_inode_cache();
-#ifdef CONFIG_9P_FSCACHE
- fscache_unregister_netfs(&v9fs_cache_netfs);
-#endif
}
/**
diff --git a/fs/9p/v9fs.h b/fs/9p/v9fs.h
index 1647a8e63671..bc8b30205d36 100644
--- a/fs/9p/v9fs.h
+++ b/fs/9p/v9fs.h
@@ -89,7 +89,7 @@ struct v9fs_session_info {
unsigned int cache;
#ifdef CONFIG_9P_FSCACHE
char *cachetag;
- struct fscache_cookie *fscache;
+ struct fscache_volume *fscache;
#endif
char *uname; /* user name to mount as */
@@ -109,7 +109,6 @@ struct v9fs_session_info {
struct v9fs_inode {
#ifdef CONFIG_9P_FSCACHE
- struct mutex fscache_lock;
struct fscache_cookie *fscache;
#endif
struct p9_qid qid;
@@ -133,6 +132,16 @@ static inline struct fscache_cookie *v9fs_inode_cookie(struct v9fs_inode *v9inod
#endif
}
+static inline struct fscache_volume *v9fs_session_cache(struct v9fs_session_info *v9ses)
+{
+#ifdef CONFIG_9P_FSCACHE
+ return v9ses->fscache;
+#else
+ return NULL;
+#endif
+}
+
+
extern int v9fs_show_options(struct seq_file *m, struct dentry *root);
struct p9_fid *v9fs_session_init(struct v9fs_session_info *v9ses,
diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c
index fac918ccb305..9a10e68c5f30 100644
--- a/fs/9p/vfs_addr.c
+++ b/fs/9p/vfs_addr.c
@@ -16,6 +16,7 @@
#include <linux/pagemap.h>
#include <linux/idr.h>
#include <linux/sched.h>
+#include <linux/swap.h>
#include <linux/uio.h>
#include <linux/netfs.h>
#include <net/9p/9p.h>
@@ -42,6 +43,11 @@ static void v9fs_req_issue_op(struct netfs_read_subrequest *subreq)
iov_iter_xarray(&to, READ, &rreq->mapping->i_pages, pos, len);
total = p9_client_read(fid, pos, &to, &err);
+
+ /* if we just extended the file size, any portion not in
+ * cache won't be on server and is zeroes */
+ __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
+
netfs_subreq_terminated(subreq, err ?: total, false);
}
@@ -78,7 +84,7 @@ static bool v9fs_is_cache_enabled(struct inode *inode)
{
struct fscache_cookie *cookie = v9fs_inode_cookie(V9FS_I(inode));
- return fscache_cookie_enabled(cookie) && !hlist_empty(&cookie->backing_objects);
+ return fscache_cookie_enabled(cookie) && cookie->cache_priv;
}
/**
@@ -87,9 +93,13 @@ static bool v9fs_is_cache_enabled(struct inode *inode)
*/
static int v9fs_begin_cache_operation(struct netfs_read_request *rreq)
{
+#ifdef CONFIG_9P_FSCACHE
struct fscache_cookie *cookie = v9fs_inode_cookie(V9FS_I(rreq->inode));
- return fscache_begin_read_operation(rreq, cookie);
+ return fscache_begin_read_operation(&rreq->cache_resources, cookie);
+#else
+ return -ENOBUFS;
+#endif
}
static const struct netfs_read_request_ops v9fs_req_ops = {
@@ -133,16 +143,18 @@ static void v9fs_vfs_readahead(struct readahead_control *ractl)
static int v9fs_release_page(struct page *page, gfp_t gfp)
{
struct folio *folio = page_folio(page);
+ struct inode *inode = folio_inode(folio);
if (folio_test_private(folio))
return 0;
#ifdef CONFIG_9P_FSCACHE
if (folio_test_fscache(folio)) {
- if (!(gfp & __GFP_DIRECT_RECLAIM) || !(gfp & __GFP_FS))
+ if (current_is_kswapd() || !(gfp & __GFP_FS))
return 0;
folio_wait_fscache(folio);
}
#endif
+ fscache_note_page_release(v9fs_inode_cookie(V9FS_I(inode)));
return 1;
}
@@ -161,10 +173,25 @@ static void v9fs_invalidate_page(struct page *page, unsigned int offset,
folio_wait_fscache(folio);
}
+static void v9fs_write_to_cache_done(void *priv, ssize_t transferred_or_error,
+ bool was_async)
+{
+ struct v9fs_inode *v9inode = priv;
+ __le32 version;
+
+ if (IS_ERR_VALUE(transferred_or_error) &&
+ transferred_or_error != -ENOBUFS) {
+ version = cpu_to_le32(v9inode->qid.version);
+ fscache_invalidate(v9fs_inode_cookie(v9inode), &version,
+ i_size_read(&v9inode->vfs_inode), 0);
+ }
+}
+
static int v9fs_vfs_write_folio_locked(struct folio *folio)
{
struct inode *inode = folio_inode(folio);
struct v9fs_inode *v9inode = V9FS_I(inode);
+ struct fscache_cookie *cookie = v9fs_inode_cookie(v9inode);
loff_t start = folio_pos(folio);
loff_t i_size = i_size_read(inode);
struct iov_iter from;
@@ -181,10 +208,21 @@ static int v9fs_vfs_write_folio_locked(struct folio *folio)
/* We should have writeback_fid always set */
BUG_ON(!v9inode->writeback_fid);
+ folio_wait_fscache(folio);
folio_start_writeback(folio);
p9_client_write(v9inode->writeback_fid, start, &from, &err);
+ if (err == 0 &&
+ fscache_cookie_enabled(cookie) &&
+ test_bit(FSCACHE_COOKIE_IS_CACHING, &cookie->flags)) {
+ folio_start_fscache(folio);
+ fscache_write_to_cache(v9fs_inode_cookie(v9inode),
+ folio_mapping(folio), start, len, i_size,
+ v9fs_write_to_cache_done, v9inode,
+ true);
+ }
+
folio_end_writeback(folio);
return err;
}
@@ -303,6 +341,7 @@ static int v9fs_write_end(struct file *filp, struct address_space *mapping,
loff_t last_pos = pos + copied;
struct folio *folio = page_folio(subpage);
struct inode *inode = mapping->host;
+ struct v9fs_inode *v9inode = V9FS_I(inode);
p9_debug(P9_DEBUG_VFS, "filp %p, mapping %p\n", filp, mapping);
@@ -322,6 +361,7 @@ static int v9fs_write_end(struct file *filp, struct address_space *mapping,
if (last_pos > inode->i_size) {
inode_add_bytes(inode, last_pos - inode->i_size);
i_size_write(inode, last_pos);
+ fscache_update_cookie(v9fs_inode_cookie(v9inode), NULL, &last_pos);
}
folio_mark_dirty(folio);
out:
@@ -331,11 +371,25 @@ out:
return copied;
}
+#ifdef CONFIG_9P_FSCACHE
+/*
+ * Mark a page as having been made dirty and thus needing writeback. We also
+ * need to pin the cache object to write back to.
+ */
+static int v9fs_set_page_dirty(struct page *page)
+{
+ struct v9fs_inode *v9inode = V9FS_I(page->mapping->host);
+
+ return fscache_set_page_dirty(page, v9fs_inode_cookie(v9inode));
+}
+#else
+#define v9fs_set_page_dirty __set_page_dirty_nobuffers
+#endif
const struct address_space_operations v9fs_addr_operations = {
.readpage = v9fs_vfs_readpage,
.readahead = v9fs_vfs_readahead,
- .set_page_dirty = __set_page_dirty_nobuffers,
+ .set_page_dirty = v9fs_set_page_dirty,
.writepage = v9fs_vfs_writepage,
.write_begin = v9fs_write_begin,
.write_end = v9fs_write_end,
diff --git a/fs/9p/vfs_dir.c b/fs/9p/vfs_dir.c
index 8c854d8cb0cd..958680f7f23e 100644
--- a/fs/9p/vfs_dir.c
+++ b/fs/9p/vfs_dir.c
@@ -17,6 +17,7 @@
#include <linux/idr.h>
#include <linux/slab.h>
#include <linux/uio.h>
+#include <linux/fscache.h>
#include <net/9p/9p.h>
#include <net/9p/client.h>
@@ -205,7 +206,10 @@ static int v9fs_dir_readdir_dotl(struct file *file, struct dir_context *ctx)
int v9fs_dir_release(struct inode *inode, struct file *filp)
{
+ struct v9fs_inode *v9inode = V9FS_I(inode);
struct p9_fid *fid;
+ __le32 version;
+ loff_t i_size;
fid = filp->private_data;
p9_debug(P9_DEBUG_VFS, "inode: %p filp: %p fid: %d\n",
@@ -216,6 +220,15 @@ int v9fs_dir_release(struct inode *inode, struct file *filp)
spin_unlock(&inode->i_lock);
p9_client_clunk(fid);
}
+
+ if ((filp->f_mode & FMODE_WRITE)) {
+ version = cpu_to_le32(v9inode->qid.version);
+ i_size = i_size_read(inode);
+ fscache_unuse_cookie(v9fs_inode_cookie(v9inode),
+ &version, &i_size);
+ } else {
+ fscache_unuse_cookie(v9fs_inode_cookie(v9inode), NULL, NULL);
+ }
return 0;
}
diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
index 612e297f3763..2573c08f335c 100644
--- a/fs/9p/vfs_file.c
+++ b/fs/9p/vfs_file.c
@@ -93,7 +93,8 @@ int v9fs_file_open(struct inode *inode, struct file *file)
}
mutex_unlock(&v9inode->v_mutex);
if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE)
- v9fs_cache_inode_set_cookie(inode, file);
+ fscache_use_cookie(v9fs_inode_cookie(v9inode),
+ file->f_mode & FMODE_WRITE);
v9fs_open_fid_add(inode, fid);
return 0;
out_error:
@@ -114,7 +115,6 @@ out_error:
static int v9fs_file_lock(struct file *filp, int cmd, struct file_lock *fl)
{
- int res = 0;
struct inode *inode = file_inode(filp);
p9_debug(P9_DEBUG_VFS, "filp: %p lock: %p\n", filp, fl);
@@ -124,7 +124,7 @@ static int v9fs_file_lock(struct file *filp, int cmd, struct file_lock *fl)
invalidate_mapping_pages(&inode->i_data, 0, -1);
}
- return res;
+ return 0;
}
static int v9fs_file_do_lock(struct file *filp, int cmd, struct file_lock *fl)
@@ -139,8 +139,7 @@ static int v9fs_file_do_lock(struct file *filp, int cmd, struct file_lock *fl)
fid = filp->private_data;
BUG_ON(fid == NULL);
- if ((fl->fl_flags & FL_POSIX) != FL_POSIX)
- BUG();
+ BUG_ON((fl->fl_flags & FL_POSIX) != FL_POSIX);
res = locks_lock_file_wait(filp, fl);
if (res < 0)
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index 328c338ff304..2a10242c79c7 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -233,7 +233,6 @@ struct inode *v9fs_alloc_inode(struct super_block *sb)
return NULL;
#ifdef CONFIG_9P_FSCACHE
v9inode->fscache = NULL;
- mutex_init(&v9inode->fscache_lock);
#endif
v9inode->writeback_fid = NULL;
v9inode->cache_validity = 0;
@@ -381,12 +380,16 @@ struct inode *v9fs_get_inode(struct super_block *sb, umode_t mode, dev_t rdev)
void v9fs_evict_inode(struct inode *inode)
{
struct v9fs_inode *v9inode = V9FS_I(inode);
+ __le32 version;
truncate_inode_pages_final(&inode->i_data);
+ version = cpu_to_le32(v9inode->qid.version);
+ fscache_clear_inode_writeback(v9fs_inode_cookie(v9inode), inode,
+ &version);
clear_inode(inode);
filemap_fdatawrite(&inode->i_data);
- v9fs_cache_inode_put_cookie(inode);
+ fscache_relinquish_cookie(v9fs_inode_cookie(v9inode), false);
/* clunk the fid stashed in writeback_fid */
if (v9inode->writeback_fid) {
p9_client_clunk(v9inode->writeback_fid);
@@ -869,7 +872,8 @@ v9fs_vfs_atomic_open(struct inode *dir, struct dentry *dentry,
file->private_data = fid;
if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE)
- v9fs_cache_inode_set_cookie(d_inode(dentry), file);
+ fscache_use_cookie(v9fs_inode_cookie(v9inode),
+ file->f_mode & FMODE_WRITE);
v9fs_open_fid_add(inode, fid);
file->f_mode |= FMODE_CREATED;
@@ -1072,6 +1076,8 @@ static int v9fs_vfs_setattr(struct user_namespace *mnt_userns,
struct dentry *dentry, struct iattr *iattr)
{
int retval, use_dentry = 0;
+ struct inode *inode = d_inode(dentry);
+ struct v9fs_inode *v9inode = V9FS_I(inode);
struct v9fs_session_info *v9ses;
struct p9_fid *fid = NULL;
struct p9_wstat wstat;
@@ -1117,7 +1123,7 @@ static int v9fs_vfs_setattr(struct user_namespace *mnt_userns,
/* Write all dirty data */
if (d_is_reg(dentry))
- filemap_write_and_wait(d_inode(dentry)->i_mapping);
+ filemap_write_and_wait(inode->i_mapping);
retval = p9_client_wstat(fid, &wstat);
@@ -1128,13 +1134,15 @@ static int v9fs_vfs_setattr(struct user_namespace *mnt_userns,
return retval;
if ((iattr->ia_valid & ATTR_SIZE) &&
- iattr->ia_size != i_size_read(d_inode(dentry)))
- truncate_setsize(d_inode(dentry), iattr->ia_size);
+ iattr->ia_size != i_size_read(inode)) {
+ truncate_setsize(inode, iattr->ia_size);
+ fscache_resize_cookie(v9fs_inode_cookie(v9inode), iattr->ia_size);
+ }
- v9fs_invalidate_inode_attr(d_inode(dentry));
+ v9fs_invalidate_inode_attr(inode);
- setattr_copy(&init_user_ns, d_inode(dentry), iattr);
- mark_inode_dirty(d_inode(dentry));
+ setattr_copy(&init_user_ns, inode, iattr);
+ mark_inode_dirty(inode);
return 0;
}
diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c
index 7dee89ba32e7..d17502a738a9 100644
--- a/fs/9p/vfs_inode_dotl.c
+++ b/fs/9p/vfs_inode_dotl.c
@@ -344,7 +344,8 @@ v9fs_vfs_atomic_open_dotl(struct inode *dir, struct dentry *dentry,
goto err_clunk_old_fid;
file->private_data = ofid;
if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE)
- v9fs_cache_inode_set_cookie(inode, file);
+ fscache_use_cookie(v9fs_inode_cookie(v9inode),
+ file->f_mode & FMODE_WRITE);
v9fs_open_fid_add(inode, ofid);
file->f_mode |= FMODE_CREATED;
out:
@@ -551,7 +552,10 @@ int v9fs_vfs_setattr_dotl(struct user_namespace *mnt_userns,
{
int retval, use_dentry = 0;
struct p9_fid *fid = NULL;
- struct p9_iattr_dotl p9attr;
+ struct p9_iattr_dotl p9attr = {
+ .uid = INVALID_UID,
+ .gid = INVALID_GID,
+ };
struct inode *inode = d_inode(dentry);
p9_debug(P9_DEBUG_VFS, "\n");
@@ -561,14 +565,22 @@ int v9fs_vfs_setattr_dotl(struct user_namespace *mnt_userns,
return retval;
p9attr.valid = v9fs_mapped_iattr_valid(iattr->ia_valid);
- p9attr.mode = iattr->ia_mode;
- p9attr.uid = iattr->ia_uid;
- p9attr.gid = iattr->ia_gid;
- p9attr.size = iattr->ia_size;
- p9attr.atime_sec = iattr->ia_atime.tv_sec;
- p9attr.atime_nsec = iattr->ia_atime.tv_nsec;
- p9attr.mtime_sec = iattr->ia_mtime.tv_sec;
- p9attr.mtime_nsec = iattr->ia_mtime.tv_nsec;
+ if (iattr->ia_valid & ATTR_MODE)
+ p9attr.mode = iattr->ia_mode;
+ if (iattr->ia_valid & ATTR_UID)
+ p9attr.uid = iattr->ia_uid;
+ if (iattr->ia_valid & ATTR_GID)
+ p9attr.gid = iattr->ia_gid;
+ if (iattr->ia_valid & ATTR_SIZE)
+ p9attr.size = iattr->ia_size;
+ if (iattr->ia_valid & ATTR_ATIME_SET) {
+ p9attr.atime_sec = iattr->ia_atime.tv_sec;
+ p9attr.atime_nsec = iattr->ia_atime.tv_nsec;
+ }
+ if (iattr->ia_valid & ATTR_MTIME_SET) {
+ p9attr.mtime_sec = iattr->ia_mtime.tv_sec;
+ p9attr.mtime_nsec = iattr->ia_mtime.tv_nsec;
+ }
if (iattr->ia_valid & ATTR_FILE) {
fid = iattr->ia_file->private_data;
diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c
index b739e02f5ef7..97e23b4e6982 100644
--- a/fs/9p/vfs_super.c
+++ b/fs/9p/vfs_super.c
@@ -20,6 +20,7 @@
#include <linux/slab.h>
#include <linux/statfs.h>
#include <linux/magic.h>
+#include <linux/fscache.h>
#include <net/9p/9p.h>
#include <net/9p/client.h>
@@ -309,6 +310,7 @@ static int v9fs_write_inode(struct inode *inode,
__mark_inode_dirty(inode, I_DIRTY_DATASYNC);
return ret;
}
+ fscache_unpin_writeback(wbc, v9fs_inode_cookie(v9inode));
return 0;
}
@@ -332,6 +334,7 @@ static int v9fs_write_inode_dotl(struct inode *inode,
__mark_inode_dirty(inode, I_DIRTY_DATASYNC);
return ret;
}
+ fscache_unpin_writeback(wbc, v9fs_inode_cookie(v9inode));
return 0;
}
diff --git a/fs/Kconfig b/fs/Kconfig
index a6313a969bc5..6c7dc1387beb 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -15,11 +15,11 @@ config VALIDATE_FS_PARSER
Enable this to perform validation of the parameter description for a
filesystem when it is registered.
-if BLOCK
-
config FS_IOMAP
bool
+if BLOCK
+
source "fs/ext2/Kconfig"
source "fs/ext4/Kconfig"
source "fs/jbd2/Kconfig"
@@ -42,6 +42,8 @@ source "fs/nilfs2/Kconfig"
source "fs/f2fs/Kconfig"
source "fs/zonefs/Kconfig"
+endif # BLOCK
+
config FS_DAX
bool "File system based Direct Access (DAX) support"
depends on MMU
@@ -89,8 +91,6 @@ config FS_DAX_PMD
config FS_DAX_LIMITED
bool
-endif # BLOCK
-
# Posix ACL utility routines
#
# Note: Posix ACLs can be implemented without these helpers. Never use
@@ -369,8 +369,8 @@ source "fs/ksmbd/Kconfig"
config SMBFS_COMMON
tristate
- default y if CIFS=y
- default m if CIFS=m
+ default y if CIFS=y || SMB_SERVER=y
+ default m if CIFS=m || SMB_SERVER=m
source "fs/coda/Kconfig"
source "fs/afs/Kconfig"
diff --git a/fs/Makefile b/fs/Makefile
index 84c5e4cdfee5..208a74e0b00e 100644
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -6,6 +6,8 @@
# Rewritten to use lists instead of if-statements.
#
+obj-$(CONFIG_SYSCTL) += sysctls.o
+
obj-y := open.o read_write.o file_table.o super.o \
char_dev.o stat.o exec.o pipe.o namei.o fcntl.o \
ioctl.o readdir.o select.o dcache.o inode.o \
@@ -94,7 +96,7 @@ obj-$(CONFIG_EXPORTFS) += exportfs/
obj-$(CONFIG_NFSD) += nfsd/
obj-$(CONFIG_LOCKD) += lockd/
obj-$(CONFIG_NLS) += nls/
-obj-$(CONFIG_UNICODE) += unicode/
+obj-y += unicode/
obj-$(CONFIG_SYSV_FS) += sysv/
obj-$(CONFIG_SMBFS_COMMON) += smbfs_common/
obj-$(CONFIG_CIFS) += cifs/
diff --git a/fs/adfs/inode.c b/fs/adfs/inode.c
index adbb3a1edcbf..5156821bfe6a 100644
--- a/fs/adfs/inode.c
+++ b/fs/adfs/inode.c
@@ -355,7 +355,6 @@ int adfs_write_inode(struct inode *inode, struct writeback_control *wbc)
{
struct super_block *sb = inode->i_sb;
struct object_info obj;
- int ret;
obj.indaddr = ADFS_I(inode)->indaddr;
obj.name_len = 0;
@@ -365,6 +364,5 @@ int adfs_write_inode(struct inode *inode, struct writeback_control *wbc)
obj.attr = ADFS_I(inode)->attr;
obj.size = inode->i_size;
- ret = adfs_dir_update(sb, &obj, wbc->sync_mode == WB_SYNC_ALL);
- return ret;
+ return adfs_dir_update(sb, &obj, wbc->sync_mode == WB_SYNC_ALL);
}
diff --git a/fs/afs/Makefile b/fs/afs/Makefile
index 75c4e4043d1d..e8956b65d7ff 100644
--- a/fs/afs/Makefile
+++ b/fs/afs/Makefile
@@ -3,10 +3,7 @@
# Makefile for Red Hat Linux AFS client.
#
-afs-cache-$(CONFIG_AFS_FSCACHE) := cache.o
-
kafs-y := \
- $(afs-cache-y) \
addr_list.o \
callback.o \
cell.o \
diff --git a/fs/afs/cache.c b/fs/afs/cache.c
deleted file mode 100644
index 037af93e3aba..000000000000
--- a/fs/afs/cache.c
+++ /dev/null
@@ -1,68 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/* AFS caching stuff
- *
- * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- */
-
-#include <linux/sched.h>
-#include "internal.h"
-
-static enum fscache_checkaux afs_vnode_cache_check_aux(void *cookie_netfs_data,
- const void *buffer,
- uint16_t buflen,
- loff_t object_size);
-
-struct fscache_netfs afs_cache_netfs = {
- .name = "afs",
- .version = 2,
-};
-
-struct fscache_cookie_def afs_cell_cache_index_def = {
- .name = "AFS.cell",
- .type = FSCACHE_COOKIE_TYPE_INDEX,
-};
-
-struct fscache_cookie_def afs_volume_cache_index_def = {
- .name = "AFS.volume",
- .type = FSCACHE_COOKIE_TYPE_INDEX,
-};
-
-struct fscache_cookie_def afs_vnode_cache_index_def = {
- .name = "AFS.vnode",
- .type = FSCACHE_COOKIE_TYPE_DATAFILE,
- .check_aux = afs_vnode_cache_check_aux,
-};
-
-/*
- * check that the auxiliary data indicates that the entry is still valid
- */
-static enum fscache_checkaux afs_vnode_cache_check_aux(void *cookie_netfs_data,
- const void *buffer,
- uint16_t buflen,
- loff_t object_size)
-{
- struct afs_vnode *vnode = cookie_netfs_data;
- struct afs_vnode_cache_aux aux;
-
- _enter("{%llx,%x,%llx},%p,%u",
- vnode->fid.vnode, vnode->fid.unique, vnode->status.data_version,
- buffer, buflen);
-
- memcpy(&aux, buffer, sizeof(aux));
-
- /* check the size of the data is what we're expecting */
- if (buflen != sizeof(aux)) {
- _leave(" = OBSOLETE [len %hx != %zx]", buflen, sizeof(aux));
- return FSCACHE_CHECKAUX_OBSOLETE;
- }
-
- if (vnode->status.data_version != aux.data_version) {
- _leave(" = OBSOLETE [vers %llx != %llx]",
- aux.data_version, vnode->status.data_version);
- return FSCACHE_CHECKAUX_OBSOLETE;
- }
-
- _leave(" = SUCCESS");
- return FSCACHE_CHECKAUX_OKAY;
-}
diff --git a/fs/afs/cell.c b/fs/afs/cell.c
index d88407fb9bc0..07ad744eef77 100644
--- a/fs/afs/cell.c
+++ b/fs/afs/cell.c
@@ -680,13 +680,6 @@ static int afs_activate_cell(struct afs_net *net, struct afs_cell *cell)
return ret;
}
-#ifdef CONFIG_AFS_FSCACHE
- cell->cache = fscache_acquire_cookie(afs_cache_netfs.primary_index,
- &afs_cell_cache_index_def,
- cell->name, strlen(cell->name),
- NULL, 0,
- cell, 0, true);
-#endif
ret = afs_proc_cell_setup(cell);
if (ret < 0)
return ret;
@@ -723,11 +716,6 @@ static void afs_deactivate_cell(struct afs_net *net, struct afs_cell *cell)
afs_dynroot_rmdir(net, cell);
mutex_unlock(&net->proc_cells_lock);
-#ifdef CONFIG_AFS_FSCACHE
- fscache_relinquish_cookie(cell->cache, NULL, false);
- cell->cache = NULL;
-#endif
-
_leave("");
}
diff --git a/fs/afs/file.c b/fs/afs/file.c
index afe4b803f84b..720818a7c166 100644
--- a/fs/afs/file.c
+++ b/fs/afs/file.c
@@ -14,6 +14,7 @@
#include <linux/gfp.h>
#include <linux/task_io_accounting_ops.h>
#include <linux/mm.h>
+#include <linux/swap.h>
#include <linux/netfs.h>
#include "internal.h"
@@ -158,7 +159,9 @@ int afs_open(struct inode *inode, struct file *file)
if (file->f_flags & O_TRUNC)
set_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags);
-
+
+ fscache_use_cookie(afs_vnode_cache(vnode), file->f_mode & FMODE_WRITE);
+
file->private_data = af;
_leave(" = 0");
return 0;
@@ -177,8 +180,10 @@ error:
*/
int afs_release(struct inode *inode, struct file *file)
{
+ struct afs_vnode_cache_aux aux;
struct afs_vnode *vnode = AFS_FS_I(inode);
struct afs_file *af = file->private_data;
+ loff_t i_size;
int ret = 0;
_enter("{%llx:%llu},", vnode->fid.vid, vnode->fid.vnode);
@@ -189,6 +194,15 @@ int afs_release(struct inode *inode, struct file *file)
file->private_data = NULL;
if (af->wb)
afs_put_wb_key(af->wb);
+
+ if ((file->f_mode & FMODE_WRITE)) {
+ i_size = i_size_read(&vnode->vfs_inode);
+ afs_set_cache_aux(vnode, &aux);
+ fscache_unuse_cookie(afs_vnode_cache(vnode), &aux, &i_size);
+ } else {
+ fscache_unuse_cookie(afs_vnode_cache(vnode), NULL, NULL);
+ }
+
key_put(af->key);
kfree(af);
afs_prune_wb_keys(vnode);
@@ -354,14 +368,19 @@ static bool afs_is_cache_enabled(struct inode *inode)
{
struct fscache_cookie *cookie = afs_vnode_cache(AFS_FS_I(inode));
- return fscache_cookie_enabled(cookie) && !hlist_empty(&cookie->backing_objects);
+ return fscache_cookie_enabled(cookie) && cookie->cache_priv;
}
static int afs_begin_cache_operation(struct netfs_read_request *rreq)
{
+#ifdef CONFIG_AFS_FSCACHE
struct afs_vnode *vnode = AFS_FS_I(rreq->inode);
- return fscache_begin_read_operation(rreq, afs_vnode_cache(vnode));
+ return fscache_begin_read_operation(&rreq->cache_resources,
+ afs_vnode_cache(vnode));
+#else
+ return -ENOBUFS;
+#endif
}
static int afs_check_write_begin(struct file *file, loff_t pos, unsigned len,
@@ -398,6 +417,12 @@ static void afs_readahead(struct readahead_control *ractl)
netfs_readahead(ractl, &afs_req_ops, NULL);
}
+int afs_write_inode(struct inode *inode, struct writeback_control *wbc)
+{
+ fscache_unpin_writeback(wbc, afs_vnode_cache(AFS_FS_I(inode)));
+ return 0;
+}
+
/*
* Adjust the dirty region of the page on truncation or full invalidation,
* getting rid of the markers altogether if the region is entirely invalidated.
@@ -480,23 +505,24 @@ static void afs_invalidatepage(struct page *page, unsigned int offset,
* release a page and clean up its private state if it's not busy
* - return true if the page can now be released, false if not
*/
-static int afs_releasepage(struct page *page, gfp_t gfp_flags)
+static int afs_releasepage(struct page *page, gfp_t gfp)
{
struct folio *folio = page_folio(page);
struct afs_vnode *vnode = AFS_FS_I(folio_inode(folio));
_enter("{{%llx:%llu}[%lu],%lx},%x",
vnode->fid.vid, vnode->fid.vnode, folio_index(folio), folio->flags,
- gfp_flags);
+ gfp);
/* deny if page is being written to the cache and the caller hasn't
* elected to wait */
#ifdef CONFIG_AFS_FSCACHE
if (folio_test_fscache(folio)) {
- if (!(gfp_flags & __GFP_DIRECT_RECLAIM) || !(gfp_flags & __GFP_FS))
+ if (current_is_kswapd() || !(gfp & __GFP_FS))
return false;
folio_wait_fscache(folio);
}
+ fscache_note_page_release(afs_vnode_cache(vnode));
#endif
if (folio_test_private(folio)) {
diff --git a/fs/afs/inode.c b/fs/afs/inode.c
index 16906eb592d9..5964f8aee090 100644
--- a/fs/afs/inode.c
+++ b/fs/afs/inode.c
@@ -413,9 +413,9 @@ static void afs_get_inode_cache(struct afs_vnode *vnode)
{
#ifdef CONFIG_AFS_FSCACHE
struct {
- u32 vnode_id;
- u32 unique;
- u32 vnode_id_ext[2]; /* Allow for a 96-bit key */
+ __be32 vnode_id;
+ __be32 unique;
+ __be32 vnode_id_ext[2]; /* Allow for a 96-bit key */
} __packed key;
struct afs_vnode_cache_aux aux;
@@ -424,17 +424,18 @@ static void afs_get_inode_cache(struct afs_vnode *vnode)
return;
}
- key.vnode_id = vnode->fid.vnode;
- key.unique = vnode->fid.unique;
- key.vnode_id_ext[0] = vnode->fid.vnode >> 32;
- key.vnode_id_ext[1] = vnode->fid.vnode_hi;
- aux.data_version = vnode->status.data_version;
-
- vnode->cache = fscache_acquire_cookie(vnode->volume->cache,
- &afs_vnode_cache_index_def,
- &key, sizeof(key),
- &aux, sizeof(aux),
- vnode, vnode->status.size, true);
+ key.vnode_id = htonl(vnode->fid.vnode);
+ key.unique = htonl(vnode->fid.unique);
+ key.vnode_id_ext[0] = htonl(vnode->fid.vnode >> 32);
+ key.vnode_id_ext[1] = htonl(vnode->fid.vnode_hi);
+ afs_set_cache_aux(vnode, &aux);
+
+ vnode->cache = fscache_acquire_cookie(
+ vnode->volume->cache,
+ vnode->status.type == AFS_FTYPE_FILE ? 0 : FSCACHE_ADV_SINGLE_CHUNK,
+ &key, sizeof(key),
+ &aux, sizeof(aux),
+ vnode->status.size);
#endif
}
@@ -563,9 +564,7 @@ static void afs_zap_data(struct afs_vnode *vnode)
{
_enter("{%llx:%llu}", vnode->fid.vid, vnode->fid.vnode);
-#ifdef CONFIG_AFS_FSCACHE
- fscache_invalidate(vnode->cache);
-#endif
+ afs_invalidate_cache(vnode, 0);
/* nuke all the non-dirty pages that aren't locked, mapped or being
* written back in a regular file and completely discard the pages in a
@@ -762,9 +761,8 @@ int afs_drop_inode(struct inode *inode)
*/
void afs_evict_inode(struct inode *inode)
{
- struct afs_vnode *vnode;
-
- vnode = AFS_FS_I(inode);
+ struct afs_vnode_cache_aux aux;
+ struct afs_vnode *vnode = AFS_FS_I(inode);
_enter("{%llx:%llu.%d}",
vnode->fid.vid,
@@ -776,6 +774,9 @@ void afs_evict_inode(struct inode *inode)
ASSERTCMP(inode->i_ino, ==, vnode->fid.vnode);
truncate_inode_pages_final(&inode->i_data);
+
+ afs_set_cache_aux(vnode, &aux);
+ fscache_clear_inode_writeback(afs_vnode_cache(vnode), inode, &aux);
clear_inode(inode);
while (!list_empty(&vnode->wb_keys)) {
@@ -786,14 +787,9 @@ void afs_evict_inode(struct inode *inode)
}
#ifdef CONFIG_AFS_FSCACHE
- {
- struct afs_vnode_cache_aux aux;
-
- aux.data_version = vnode->status.data_version;
- fscache_relinquish_cookie(vnode->cache, &aux,
- test_bit(AFS_VNODE_DELETED, &vnode->flags));
- vnode->cache = NULL;
- }
+ fscache_relinquish_cookie(vnode->cache,
+ test_bit(AFS_VNODE_DELETED, &vnode->flags));
+ vnode->cache = NULL;
#endif
afs_prune_wb_keys(vnode);
@@ -833,6 +829,9 @@ static void afs_setattr_edit_file(struct afs_operation *op)
if (size < i_size)
truncate_pagecache(inode, size);
+ if (size != i_size)
+ fscache_resize_cookie(afs_vnode_cache(vp->vnode),
+ vp->scb.status.size);
}
}
@@ -849,40 +848,67 @@ static const struct afs_operation_ops afs_setattr_operation = {
int afs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
struct iattr *attr)
{
+ const unsigned int supported =
+ ATTR_SIZE | ATTR_MODE | ATTR_UID | ATTR_GID |
+ ATTR_MTIME | ATTR_MTIME_SET | ATTR_TIMES_SET | ATTR_TOUCH;
struct afs_operation *op;
struct afs_vnode *vnode = AFS_FS_I(d_inode(dentry));
+ struct inode *inode = &vnode->vfs_inode;
+ loff_t i_size;
int ret;
_enter("{%llx:%llu},{n=%pd},%x",
vnode->fid.vid, vnode->fid.vnode, dentry,
attr->ia_valid);
- if (!(attr->ia_valid & (ATTR_SIZE | ATTR_MODE | ATTR_UID | ATTR_GID |
- ATTR_MTIME | ATTR_MTIME_SET | ATTR_TIMES_SET |
- ATTR_TOUCH))) {
+ if (!(attr->ia_valid & supported)) {
_leave(" = 0 [unsupported]");
return 0;
}
+ i_size = i_size_read(inode);
if (attr->ia_valid & ATTR_SIZE) {
- if (!S_ISREG(vnode->vfs_inode.i_mode))
+ if (!S_ISREG(inode->i_mode))
return -EISDIR;
- ret = inode_newsize_ok(&vnode->vfs_inode, attr->ia_size);
+ ret = inode_newsize_ok(inode, attr->ia_size);
if (ret)
return ret;
- if (attr->ia_size == i_size_read(&vnode->vfs_inode))
+ if (attr->ia_size == i_size)
attr->ia_valid &= ~ATTR_SIZE;
}
- /* flush any dirty data outstanding on a regular file */
- if (S_ISREG(vnode->vfs_inode.i_mode))
- filemap_write_and_wait(vnode->vfs_inode.i_mapping);
+ fscache_use_cookie(afs_vnode_cache(vnode), true);
/* Prevent any new writebacks from starting whilst we do this. */
down_write(&vnode->validate_lock);
+ if ((attr->ia_valid & ATTR_SIZE) && S_ISREG(inode->i_mode)) {
+ loff_t size = attr->ia_size;
+
+ /* Wait for any outstanding writes to the server to complete */
+ loff_t from = min(size, i_size);
+ loff_t to = max(size, i_size);
+ ret = filemap_fdatawait_range(inode->i_mapping, from, to);
+ if (ret < 0)
+ goto out_unlock;
+
+ /* Don't talk to the server if we're just shortening in-memory
+ * writes that haven't gone to the server yet.
+ */
+ if (!(attr->ia_valid & (supported & ~ATTR_SIZE & ~ATTR_MTIME)) &&
+ attr->ia_size < i_size &&
+ attr->ia_size > vnode->status.size) {
+ truncate_pagecache(inode, attr->ia_size);
+ fscache_resize_cookie(afs_vnode_cache(vnode),
+ attr->ia_size);
+ i_size_write(inode, attr->ia_size);
+ ret = 0;
+ goto out_unlock;
+ }
+ }
+
op = afs_alloc_operation(((attr->ia_valid & ATTR_FILE) ?
afs_file_key(attr->ia_file) : NULL),
vnode->volume);
@@ -907,6 +933,7 @@ int afs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
out_unlock:
up_write(&vnode->validate_lock);
+ fscache_unuse_cookie(afs_vnode_cache(vnode), NULL, NULL);
_leave(" = %d", ret);
return ret;
}
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index aa4c0d6c9780..b6f02321fc09 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -14,7 +14,6 @@
#include <linux/key.h>
#include <linux/workqueue.h>
#include <linux/sched.h>
-#define FSCACHE_USE_NEW_IO_API
#include <linux/fscache.h>
#include <linux/backing-dev.h>
#include <linux/uuid.h>
@@ -364,9 +363,6 @@ struct afs_cell {
struct key *anonymous_key; /* anonymous user key for this cell */
struct work_struct manager; /* Manager for init/deinit/dns */
struct hlist_node proc_link; /* /proc cell list link */
-#ifdef CONFIG_AFS_FSCACHE
- struct fscache_cookie *cache; /* caching cookie */
-#endif
time64_t dns_expiry; /* Time AFSDB/SRV record expires */
time64_t last_inactive; /* Time of last drop of usage count */
atomic_t ref; /* Struct refcount */
@@ -590,7 +586,7 @@ struct afs_volume {
#define AFS_VOLUME_BUSY 5 /* - T if volume busy notice given */
#define AFS_VOLUME_MAYBE_NO_IBULK 6 /* - T if some servers don't have InlineBulkStatus */
#ifdef CONFIG_AFS_FSCACHE
- struct fscache_cookie *cache; /* caching cookie */
+ struct fscache_volume *cache; /* Caching cookie */
#endif
struct afs_server_list __rcu *servers; /* List of servers on which volume resides */
rwlock_t servers_lock; /* Lock for ->servers */
@@ -872,9 +868,24 @@ struct afs_operation {
* Cache auxiliary data.
*/
struct afs_vnode_cache_aux {
- u64 data_version;
+ __be64 data_version;
} __packed;
+static inline void afs_set_cache_aux(struct afs_vnode *vnode,
+ struct afs_vnode_cache_aux *aux)
+{
+ aux->data_version = cpu_to_be64(vnode->status.data_version);
+}
+
+static inline void afs_invalidate_cache(struct afs_vnode *vnode, unsigned int flags)
+{
+ struct afs_vnode_cache_aux aux;
+
+ afs_set_cache_aux(vnode, &aux);
+ fscache_invalidate(afs_vnode_cache(vnode), &aux,
+ i_size_read(&vnode->vfs_inode), flags);
+}
+
/*
* We use folio->private to hold the amount of the folio that we've written to,
* splitting the field into two parts. However, we need to represent a range
@@ -962,13 +973,6 @@ extern void afs_merge_fs_addr6(struct afs_addr_list *, __be32 *, u16);
*/
#ifdef CONFIG_AFS_FSCACHE
extern struct fscache_netfs afs_cache_netfs;
-extern struct fscache_cookie_def afs_cell_cache_index_def;
-extern struct fscache_cookie_def afs_volume_cache_index_def;
-extern struct fscache_cookie_def afs_vnode_cache_index_def;
-#else
-#define afs_cell_cache_index_def (*(struct fscache_cookie_def *) NULL)
-#define afs_volume_cache_index_def (*(struct fscache_cookie_def *) NULL)
-#define afs_vnode_cache_index_def (*(struct fscache_cookie_def *) NULL)
#endif
/*
@@ -1068,6 +1072,7 @@ extern int afs_release(struct inode *, struct file *);
extern int afs_fetch_data(struct afs_vnode *, struct afs_read *);
extern struct afs_read *afs_alloc_read(gfp_t);
extern void afs_put_read(struct afs_read *);
+extern int afs_write_inode(struct inode *, struct writeback_control *);
static inline struct afs_read *afs_get_read(struct afs_read *req)
{
@@ -1506,7 +1511,7 @@ extern struct afs_vlserver_list *afs_extract_vlserver_list(struct afs_cell *,
* volume.c
*/
extern struct afs_volume *afs_create_volume(struct afs_fs_context *);
-extern void afs_activate_volume(struct afs_volume *);
+extern int afs_activate_volume(struct afs_volume *);
extern void afs_deactivate_volume(struct afs_volume *);
extern struct afs_volume *afs_get_volume(struct afs_volume *, enum afs_volume_trace);
extern void afs_put_volume(struct afs_net *, struct afs_volume *, enum afs_volume_trace);
@@ -1515,7 +1520,11 @@ extern int afs_check_volume_status(struct afs_volume *, struct afs_operation *);
/*
* write.c
*/
+#ifdef CONFIG_AFS_FSCACHE
extern int afs_set_page_dirty(struct page *);
+#else
+#define afs_set_page_dirty __set_page_dirty_nobuffers
+#endif
extern int afs_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata);
diff --git a/fs/afs/main.c b/fs/afs/main.c
index 179004b15566..eae288c8d40a 100644
--- a/fs/afs/main.c
+++ b/fs/afs/main.c
@@ -186,13 +186,6 @@ static int __init afs_init(void)
if (!afs_lock_manager)
goto error_lockmgr;
-#ifdef CONFIG_AFS_FSCACHE
- /* we want to be able to cache */
- ret = fscache_register_netfs(&afs_cache_netfs);
- if (ret < 0)
- goto error_cache;
-#endif
-
ret = register_pernet_device(&afs_net_ops);
if (ret < 0)
goto error_net;
@@ -215,10 +208,6 @@ error_proc:
error_fs:
unregister_pernet_device(&afs_net_ops);
error_net:
-#ifdef CONFIG_AFS_FSCACHE
- fscache_unregister_netfs(&afs_cache_netfs);
-error_cache:
-#endif
destroy_workqueue(afs_lock_manager);
error_lockmgr:
destroy_workqueue(afs_async_calls);
@@ -245,9 +234,6 @@ static void __exit afs_exit(void)
proc_remove(afs_proc_symlink);
afs_fs_exit();
unregister_pernet_device(&afs_net_ops);
-#ifdef CONFIG_AFS_FSCACHE
- fscache_unregister_netfs(&afs_cache_netfs);
-#endif
destroy_workqueue(afs_lock_manager);
destroy_workqueue(afs_async_calls);
destroy_workqueue(afs_wq);
diff --git a/fs/afs/proc.c b/fs/afs/proc.c
index 065a28bfa3f1..e1b863449296 100644
--- a/fs/afs/proc.c
+++ b/fs/afs/proc.c
@@ -227,7 +227,7 @@ static int afs_proc_cell_volumes_show(struct seq_file *m, void *v)
static void *afs_proc_cell_volumes_start(struct seq_file *m, loff_t *_pos)
__acquires(cell->proc_lock)
{
- struct afs_cell *cell = PDE_DATA(file_inode(m->file));
+ struct afs_cell *cell = pde_data(file_inode(m->file));
rcu_read_lock();
return seq_hlist_start_head_rcu(&cell->proc_volumes, *_pos);
@@ -236,7 +236,7 @@ static void *afs_proc_cell_volumes_start(struct seq_file *m, loff_t *_pos)
static void *afs_proc_cell_volumes_next(struct seq_file *m, void *v,
loff_t *_pos)
{
- struct afs_cell *cell = PDE_DATA(file_inode(m->file));
+ struct afs_cell *cell = pde_data(file_inode(m->file));
return seq_hlist_next_rcu(v, &cell->proc_volumes, _pos);
}
@@ -322,7 +322,7 @@ static void *afs_proc_cell_vlservers_start(struct seq_file *m, loff_t *_pos)
{
struct afs_vl_seq_net_private *priv = m->private;
struct afs_vlserver_list *vllist;
- struct afs_cell *cell = PDE_DATA(file_inode(m->file));
+ struct afs_cell *cell = pde_data(file_inode(m->file));
loff_t pos = *_pos;
rcu_read_lock();
diff --git a/fs/afs/super.c b/fs/afs/super.c
index 34c68724c98b..5ec9fd97eccc 100644
--- a/fs/afs/super.c
+++ b/fs/afs/super.c
@@ -55,6 +55,7 @@ int afs_net_id;
static const struct super_operations afs_super_ops = {
.statfs = afs_statfs,
.alloc_inode = afs_alloc_inode,
+ .write_inode = afs_write_inode,
.drop_inode = afs_drop_inode,
.destroy_inode = afs_destroy_inode,
.free_inode = afs_free_inode,
diff --git a/fs/afs/volume.c b/fs/afs/volume.c
index f84194b791d3..94a3d247924b 100644
--- a/fs/afs/volume.c
+++ b/fs/afs/volume.c
@@ -268,15 +268,30 @@ void afs_put_volume(struct afs_net *net, struct afs_volume *volume,
/*
* Activate a volume.
*/
-void afs_activate_volume(struct afs_volume *volume)
+int afs_activate_volume(struct afs_volume *volume)
{
#ifdef CONFIG_AFS_FSCACHE
- volume->cache = fscache_acquire_cookie(volume->cell->cache,
- &afs_volume_cache_index_def,
- &volume->vid, sizeof(volume->vid),
- NULL, 0,
- volume, 0, true);
+ struct fscache_volume *vcookie;
+ char *name;
+
+ name = kasprintf(GFP_KERNEL, "afs,%s,%llx",
+ volume->cell->name, volume->vid);
+ if (!name)
+ return -ENOMEM;
+
+ vcookie = fscache_acquire_volume(name, NULL, NULL, 0);
+ if (IS_ERR(vcookie)) {
+ if (vcookie != ERR_PTR(-EBUSY)) {
+ kfree(name);
+ return PTR_ERR(vcookie);
+ }
+ pr_err("AFS: Cache volume key already in use (%s)\n", name);
+ vcookie = NULL;
+ }
+ volume->cache = vcookie;
+ kfree(name);
#endif
+ return 0;
}
/*
@@ -287,7 +302,7 @@ void afs_deactivate_volume(struct afs_volume *volume)
_enter("%s", volume->name);
#ifdef CONFIG_AFS_FSCACHE
- fscache_relinquish_cookie(volume->cache, NULL,
+ fscache_relinquish_volume(volume->cache, NULL,
test_bit(AFS_VOLUME_DELETED, &volume->flags));
volume->cache = NULL;
#endif
diff --git a/fs/afs/write.c b/fs/afs/write.c
index ca4909baf5e6..5e9157d0da29 100644
--- a/fs/afs/write.c
+++ b/fs/afs/write.c
@@ -12,17 +12,30 @@
#include <linux/writeback.h>
#include <linux/pagevec.h>
#include <linux/netfs.h>
-#include <linux/fscache.h>
#include "internal.h"
+static void afs_write_to_cache(struct afs_vnode *vnode, loff_t start, size_t len,
+ loff_t i_size, bool caching);
+
+#ifdef CONFIG_AFS_FSCACHE
/*
- * mark a page as having been made dirty and thus needing writeback
+ * Mark a page as having been made dirty and thus needing writeback. We also
+ * need to pin the cache object to write back to.
*/
int afs_set_page_dirty(struct page *page)
{
- _enter("");
- return __set_page_dirty_nobuffers(page);
+ return fscache_set_page_dirty(page, afs_vnode_cache(AFS_FS_I(page->mapping->host)));
+}
+static void afs_folio_start_fscache(bool caching, struct folio *folio)
+{
+ if (caching)
+ folio_start_fscache(folio);
+}
+#else
+static void afs_folio_start_fscache(bool caching, struct folio *folio)
+{
}
+#endif
/*
* prepare to perform part of a write to a page
@@ -114,7 +127,7 @@ int afs_write_end(struct file *file, struct address_space *mapping,
unsigned long priv;
unsigned int f, from = offset_in_folio(folio, pos);
unsigned int t, to = from + copied;
- loff_t i_size, maybe_i_size;
+ loff_t i_size, write_end_pos;
_enter("{%llx:%llu},{%lx}",
vnode->fid.vid, vnode->fid.vnode, folio_index(folio));
@@ -131,15 +144,16 @@ int afs_write_end(struct file *file, struct address_space *mapping,
if (copied == 0)
goto out;
- maybe_i_size = pos + copied;
+ write_end_pos = pos + copied;
i_size = i_size_read(&vnode->vfs_inode);
- if (maybe_i_size > i_size) {
+ if (write_end_pos > i_size) {
write_seqlock(&vnode->cb_lock);
i_size = i_size_read(&vnode->vfs_inode);
- if (maybe_i_size > i_size)
- afs_set_i_size(vnode, maybe_i_size);
+ if (write_end_pos > i_size)
+ afs_set_i_size(vnode, write_end_pos);
write_sequnlock(&vnode->cb_lock);
+ fscache_update_cookie(afs_vnode_cache(vnode), NULL, &write_end_pos);
}
if (folio_test_private(folio)) {
@@ -418,6 +432,7 @@ static void afs_extend_writeback(struct address_space *mapping,
loff_t start,
loff_t max_len,
bool new_content,
+ bool caching,
unsigned int *_len)
{
struct pagevec pvec;
@@ -464,7 +479,9 @@ static void afs_extend_writeback(struct address_space *mapping,
folio_put(folio);
break;
}
- if (!folio_test_dirty(folio) || folio_test_writeback(folio)) {
+ if (!folio_test_dirty(folio) ||
+ folio_test_writeback(folio) ||
+ folio_test_fscache(folio)) {
folio_unlock(folio);
folio_put(folio);
break;
@@ -512,6 +529,7 @@ static void afs_extend_writeback(struct address_space *mapping,
BUG();
if (folio_start_writeback(folio))
BUG();
+ afs_folio_start_fscache(caching, folio);
*_count -= folio_nr_pages(folio);
folio_unlock(folio);
@@ -539,6 +557,7 @@ static ssize_t afs_write_back_from_locked_folio(struct address_space *mapping,
unsigned int offset, to, len, max_len;
loff_t i_size = i_size_read(&vnode->vfs_inode);
bool new_content = test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags);
+ bool caching = fscache_cookie_enabled(afs_vnode_cache(vnode));
long count = wbc->nr_to_write;
int ret;
@@ -546,6 +565,7 @@ static ssize_t afs_write_back_from_locked_folio(struct address_space *mapping,
if (folio_start_writeback(folio))
BUG();
+ afs_folio_start_fscache(caching, folio);
count -= folio_nr_pages(folio);
@@ -572,7 +592,8 @@ static ssize_t afs_write_back_from_locked_folio(struct address_space *mapping,
if (len < max_len &&
(to == folio_size(folio) || new_content))
afs_extend_writeback(mapping, vnode, &count,
- start, max_len, new_content, &len);
+ start, max_len, new_content,
+ caching, &len);
len = min_t(loff_t, len, max_len);
}
@@ -585,12 +606,19 @@ static ssize_t afs_write_back_from_locked_folio(struct address_space *mapping,
if (start < i_size) {
_debug("write back %x @%llx [%llx]", len, start, i_size);
+ /* Speculatively write to the cache. We have to fix this up
+ * later if the store fails.
+ */
+ afs_write_to_cache(vnode, start, len, i_size, caching);
+
iov_iter_xarray(&iter, WRITE, &mapping->i_pages, start, len);
ret = afs_store_data(vnode, &iter, start, false);
} else {
_debug("write discard %x @%llx [%llx]", len, start, i_size);
/* The dirty region was entirely beyond the EOF. */
+ fscache_clear_page_bits(afs_vnode_cache(vnode),
+ mapping, start, len, caching);
afs_pages_written_back(vnode, start, len);
ret = 0;
}
@@ -649,6 +677,10 @@ int afs_writepage(struct page *subpage, struct writeback_control *wbc)
_enter("{%lx},", folio_index(folio));
+#ifdef CONFIG_AFS_FSCACHE
+ folio_wait_fscache(folio);
+#endif
+
start = folio_index(folio) * PAGE_SIZE;
ret = afs_write_back_from_locked_folio(folio_mapping(folio), wbc,
folio, start, LLONG_MAX - start);
@@ -714,10 +746,15 @@ static int afs_writepages_region(struct address_space *mapping,
continue;
}
- if (folio_test_writeback(folio)) {
+ if (folio_test_writeback(folio) ||
+ folio_test_fscache(folio)) {
folio_unlock(folio);
- if (wbc->sync_mode != WB_SYNC_NONE)
+ if (wbc->sync_mode != WB_SYNC_NONE) {
folio_wait_writeback(folio);
+#ifdef CONFIG_AFS_FSCACHE
+ folio_wait_fscache(folio);
+#endif
+ }
folio_put(folio);
continue;
}
@@ -970,3 +1007,28 @@ int afs_launder_page(struct page *subpage)
folio_wait_fscache(folio);
return ret;
}
+
+/*
+ * Deal with the completion of writing the data to the cache.
+ */
+static void afs_write_to_cache_done(void *priv, ssize_t transferred_or_error,
+ bool was_async)
+{
+ struct afs_vnode *vnode = priv;
+
+ if (IS_ERR_VALUE(transferred_or_error) &&
+ transferred_or_error != -ENOBUFS)
+ afs_invalidate_cache(vnode, 0);
+}
+
+/*
+ * Save the write to the cache also.
+ */
+static void afs_write_to_cache(struct afs_vnode *vnode,
+ loff_t start, size_t len, loff_t i_size,
+ bool caching)
+{
+ fscache_write_to_cache(afs_vnode_cache(vnode),
+ vnode->vfs_inode.i_mapping, start, len, i_size,
+ afs_write_to_cache_done, vnode, caching);
+}
diff --git a/fs/aio.c b/fs/aio.c
index f6f1cbffef9e..4ceba13a7db0 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -220,9 +220,35 @@ struct aio_kiocb {
/*------ sysctl variables----*/
static DEFINE_SPINLOCK(aio_nr_lock);
-unsigned long aio_nr; /* current system wide number of aio requests */
-unsigned long aio_max_nr = 0x10000; /* system wide maximum number of aio requests */
+static unsigned long aio_nr; /* current system wide number of aio requests */
+static unsigned long aio_max_nr = 0x10000; /* system wide maximum number of aio requests */
/*----end sysctl variables---*/
+#ifdef CONFIG_SYSCTL
+static struct ctl_table aio_sysctls[] = {
+ {
+ .procname = "aio-nr",
+ .data = &aio_nr,
+ .maxlen = sizeof(aio_nr),
+ .mode = 0444,
+ .proc_handler = proc_doulongvec_minmax,
+ },
+ {
+ .procname = "aio-max-nr",
+ .data = &aio_max_nr,
+ .maxlen = sizeof(aio_max_nr),
+ .mode = 0644,
+ .proc_handler = proc_doulongvec_minmax,
+ },
+ {}
+};
+
+static void __init aio_sysctl_init(void)
+{
+ register_sysctl_init("fs", aio_sysctls);
+}
+#else
+#define aio_sysctl_init() do { } while (0)
+#endif
static struct kmem_cache *kiocb_cachep;
static struct kmem_cache *kioctx_cachep;
@@ -275,6 +301,7 @@ static int __init aio_setup(void)
kiocb_cachep = KMEM_CACHE(aio_kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC);
kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC);
+ aio_sysctl_init();
return 0;
}
__initcall(aio_setup);
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index f8c7f26f1fbb..9e11e6f13e83 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -1116,11 +1116,11 @@ out_free_interp:
* independently randomized mmap region (0 load_bias
* without MAP_FIXED nor MAP_FIXED_NOREPLACE).
*/
- if (interpreter) {
+ alignment = maximum_alignment(elf_phdata, elf_ex->e_phnum);
+ if (interpreter || alignment > ELF_MIN_ALIGN) {
load_bias = ELF_ET_DYN_BASE;
if (current->flags & PF_RANDOMIZE)
load_bias += arch_mmap_rnd();
- alignment = maximum_alignment(elf_phdata, elf_ex->e_phnum);
if (alignment)
load_bias &= ~(alignment - 1);
elf_flags |= MAP_FIXED_NOREPLACE;
@@ -1585,7 +1585,7 @@ static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p,
SET_UID(psinfo->pr_uid, from_kuid_munged(cred->user_ns, cred->uid));
SET_GID(psinfo->pr_gid, from_kgid_munged(cred->user_ns, cred->gid));
rcu_read_unlock();
- strncpy(psinfo->pr_fname, p->comm, sizeof(psinfo->pr_fname));
+ get_task_comm(psinfo->pr_fname, p);
return 0;
}
diff --git a/fs/btrfs/Kconfig b/fs/btrfs/Kconfig
index 520a0f6a7d9e..183e5c4aed34 100644
--- a/fs/btrfs/Kconfig
+++ b/fs/btrfs/Kconfig
@@ -18,8 +18,7 @@ config BTRFS_FS
select RAID6_PQ
select XOR_BLOCKS
select SRCU
- depends on !PPC_256K_PAGES # powerpc
- depends on !PAGE_SIZE_256KB # hexagon
+ depends on PAGE_SIZE_LESS_THAN_256KB
help
Btrfs is a general purpose copy-on-write filesystem with extents,
diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
index 1db24e6d6d90..8202ad6aa131 100644
--- a/fs/btrfs/block-group.c
+++ b/fs/btrfs/block-group.c
@@ -124,7 +124,16 @@ void btrfs_put_block_group(struct btrfs_block_group *cache)
{
if (refcount_dec_and_test(&cache->refs)) {
WARN_ON(cache->pinned > 0);
- WARN_ON(cache->reserved > 0);
+ /*
+ * If there was a failure to cleanup a log tree, very likely due
+ * to an IO failure on a writeback attempt of one or more of its
+ * extent buffers, we could not do proper (and cheap) unaccounting
+ * of their reserved space, so don't warn on reserved > 0 in that
+ * case.
+ */
+ if (!(cache->flags & BTRFS_BLOCK_GROUP_METADATA) ||
+ !BTRFS_FS_LOG_CLEANUP_ERROR(cache->fs_info))
+ WARN_ON(cache->reserved > 0);
/*
* A block_group shouldn't be on the discard_list anymore.
@@ -2544,6 +2553,19 @@ int btrfs_inc_block_group_ro(struct btrfs_block_group *cache,
int ret;
bool dirty_bg_running;
+ /*
+ * This can only happen when we are doing read-only scrub on read-only
+ * mount.
+ * In that case we should not start a new transaction on read-only fs.
+ * Thus here we skip all chunk allocations.
+ */
+ if (sb_rdonly(fs_info->sb)) {
+ mutex_lock(&fs_info->ro_block_group_mutex);
+ ret = inc_block_group_ro(cache, 0);
+ mutex_unlock(&fs_info->ro_block_group_mutex);
+ return ret;
+ }
+
do {
trans = btrfs_join_transaction(root);
if (IS_ERR(trans))
@@ -3974,9 +3996,22 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info)
* important and indicates a real bug if this happens.
*/
if (WARN_ON(space_info->bytes_pinned > 0 ||
- space_info->bytes_reserved > 0 ||
space_info->bytes_may_use > 0))
btrfs_dump_space_info(info, space_info, 0, 0);
+
+ /*
+ * If there was a failure to cleanup a log tree, very likely due
+ * to an IO failure on a writeback attempt of one or more of its
+ * extent buffers, we could not do proper (and cheap) unaccounting
+ * of their reserved space, so don't warn on bytes_reserved > 0 in
+ * that case.
+ */
+ if (!(space_info->flags & BTRFS_BLOCK_GROUP_METADATA) ||
+ !BTRFS_FS_LOG_CLEANUP_ERROR(info)) {
+ if (WARN_ON(space_info->bytes_reserved > 0))
+ btrfs_dump_space_info(info, space_info, 0, 0);
+ }
+
WARN_ON(space_info->reclaim_size > 0);
list_del(&space_info->list);
btrfs_sysfs_remove_space_info(space_info);
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index b4a9b1c58d22..8992e0096163 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -145,6 +145,9 @@ enum {
BTRFS_FS_STATE_DUMMY_FS_INFO,
BTRFS_FS_STATE_NO_CSUMS,
+
+ /* Indicates there was an error cleaning up a log tree. */
+ BTRFS_FS_STATE_LOG_CLEANUP_ERROR,
};
#define BTRFS_BACKREF_REV_MAX 256
@@ -3593,6 +3596,9 @@ do { \
#define BTRFS_FS_ERROR(fs_info) (unlikely(test_bit(BTRFS_FS_STATE_ERROR, \
&(fs_info)->fs_state)))
+#define BTRFS_FS_LOG_CLEANUP_ERROR(fs_info) \
+ (unlikely(test_bit(BTRFS_FS_STATE_LOG_CLEANUP_ERROR, \
+ &(fs_info)->fs_state)))
__printf(5, 6)
__cold
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index d6d48ecf823c..409bad3928db 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -12,7 +12,6 @@
#include <linux/writeback.h>
#include <linux/pagevec.h>
#include <linux/prefetch.h>
-#include <linux/cleancache.h>
#include <linux/fsverity.h>
#include "misc.h"
#include "extent_io.h"
@@ -3578,15 +3577,6 @@ int btrfs_do_readpage(struct page *page, struct extent_map **em_cached,
goto out;
}
- if (!PageUptodate(page)) {
- if (cleancache_get_page(page) == 0) {
- BUG_ON(blocksize != PAGE_SIZE);
- unlock_extent(tree, start, end);
- unlock_page(page);
- goto out;
- }
- }
-
if (page->index == last_byte >> PAGE_SHIFT) {
size_t zero_offset = offset_in_page(last_byte);
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index a5bd6926f7ff..927771d1853f 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -805,10 +805,7 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir,
goto fail;
}
- spin_lock(&fs_info->trans_lock);
- list_add(&pending_snapshot->list,
- &trans->transaction->pending_snapshots);
- spin_unlock(&fs_info->trans_lock);
+ trans->pending_snapshot = pending_snapshot;
ret = btrfs_commit_transaction(trans);
if (ret)
@@ -1213,6 +1210,39 @@ static int defrag_collect_targets(struct btrfs_inode *inode,
if (em->generation < newer_than)
goto next;
+ /* This em is under writeback, no need to defrag */
+ if (em->generation == (u64)-1)
+ goto next;
+
+ /*
+ * Our start offset might be in the middle of an existing extent
+ * map, so take that into account.
+ */
+ range_len = em->len - (cur - em->start);
+ /*
+ * If this range of the extent map is already flagged for delalloc,
+ * skip it, because:
+ *
+ * 1) We could deadlock later, when trying to reserve space for
+ * delalloc, because in case we can't immediately reserve space
+ * the flusher can start delalloc and wait for the respective
+ * ordered extents to complete. The deadlock would happen
+ * because we do the space reservation while holding the range
+ * locked, and starting writeback, or finishing an ordered
+ * extent, requires locking the range;
+ *
+ * 2) If there's delalloc there, it means there's dirty pages for
+ * which writeback has not started yet (we clean the delalloc
+ * flag when starting writeback and after creating an ordered
+ * extent). If we mark pages in an adjacent range for defrag,
+ * then we will have a larger contiguous range for delalloc,
+ * very likely resulting in a larger extent after writeback is
+ * triggered (except in a case of free space fragmentation).
+ */
+ if (test_range_bit(&inode->io_tree, cur, cur + range_len - 1,
+ EXTENT_DELALLOC, 0, NULL))
+ goto next;
+
/*
* For do_compress case, we want to compress all valid file
* extents, thus no @extent_thresh or mergeable check.
@@ -1221,7 +1251,7 @@ static int defrag_collect_targets(struct btrfs_inode *inode,
goto add;
/* Skip too large extent */
- if (em->len >= extent_thresh)
+ if (range_len >= extent_thresh)
goto next;
next_mergeable = defrag_check_next_extent(&inode->vfs_inode, em,
@@ -1442,9 +1472,11 @@ static int defrag_one_cluster(struct btrfs_inode *inode,
list_for_each_entry(entry, &target_list, list) {
u32 range_len = entry->len;
- /* Reached the limit */
- if (max_sectors && max_sectors == *sectors_defragged)
+ /* Reached or beyond the limit */
+ if (max_sectors && *sectors_defragged >= max_sectors) {
+ ret = 1;
break;
+ }
if (max_sectors)
range_len = min_t(u32, range_len,
@@ -1465,7 +1497,8 @@ static int defrag_one_cluster(struct btrfs_inode *inode,
extent_thresh, newer_than, do_compress);
if (ret < 0)
break;
- *sectors_defragged += range_len;
+ *sectors_defragged += range_len >>
+ inode->root->fs_info->sectorsize_bits;
}
out:
list_for_each_entry_safe(entry, tmp, &target_list, list) {
@@ -1484,6 +1517,12 @@ out:
* @newer_than: minimum transid to defrag
* @max_to_defrag: max number of sectors to be defragged, if 0, the whole inode
* will be defragged.
+ *
+ * Return <0 for error.
+ * Return >=0 for the number of sectors defragged, and range->start will be updated
+ * to indicate the file offset where next defrag should be started at.
+ * (Mostly for autodefrag, which sets @max_to_defrag thus we may exit early without
+ * defragging all the range).
*/
int btrfs_defrag_file(struct inode *inode, struct file_ra_state *ra,
struct btrfs_ioctl_defrag_range_args *range,
@@ -1499,6 +1538,7 @@ int btrfs_defrag_file(struct inode *inode, struct file_ra_state *ra,
int compress_type = BTRFS_COMPRESS_ZLIB;
int ret = 0;
u32 extent_thresh = range->extent_thresh;
+ pgoff_t start_index;
if (isize == 0)
return 0;
@@ -1518,12 +1558,16 @@ int btrfs_defrag_file(struct inode *inode, struct file_ra_state *ra,
if (range->start + range->len > range->start) {
/* Got a specific range */
- last_byte = min(isize, range->start + range->len) - 1;
+ last_byte = min(isize, range->start + range->len);
} else {
/* Defrag until file end */
- last_byte = isize - 1;
+ last_byte = isize;
}
+ /* Align the range */
+ cur = round_down(range->start, fs_info->sectorsize);
+ last_byte = round_up(last_byte, fs_info->sectorsize) - 1;
+
/*
* If we were not given a ra, allocate a readahead context. As
* readahead is just an optimization, defrag will work without it so
@@ -1536,16 +1580,26 @@ int btrfs_defrag_file(struct inode *inode, struct file_ra_state *ra,
file_ra_state_init(ra, inode->i_mapping);
}
- /* Align the range */
- cur = round_down(range->start, fs_info->sectorsize);
- last_byte = round_up(last_byte, fs_info->sectorsize) - 1;
+ /*
+ * Make writeback start from the beginning of the range, so that the
+ * defrag range can be written sequentially.
+ */
+ start_index = cur >> PAGE_SHIFT;
+ if (start_index < inode->i_mapping->writeback_index)
+ inode->i_mapping->writeback_index = start_index;
while (cur < last_byte) {
+ const unsigned long prev_sectors_defragged = sectors_defragged;
u64 cluster_end;
/* The cluster size 256K should always be page aligned */
BUILD_BUG_ON(!IS_ALIGNED(CLUSTER_SIZE, PAGE_SIZE));
+ if (btrfs_defrag_cancelled(fs_info)) {
+ ret = -EAGAIN;
+ break;
+ }
+
/* We want the cluster end at page boundary when possible */
cluster_end = (((cur >> PAGE_SHIFT) +
(SZ_256K >> PAGE_SHIFT)) << PAGE_SHIFT) - 1;
@@ -1567,14 +1621,28 @@ int btrfs_defrag_file(struct inode *inode, struct file_ra_state *ra,
cluster_end + 1 - cur, extent_thresh,
newer_than, do_compress,
&sectors_defragged, max_to_defrag);
+
+ if (sectors_defragged > prev_sectors_defragged)
+ balance_dirty_pages_ratelimited(inode->i_mapping);
+
btrfs_inode_unlock(inode, 0);
if (ret < 0)
break;
cur = cluster_end + 1;
+ if (ret > 0) {
+ ret = 0;
+ break;
+ }
+ cond_resched();
}
if (ra_allocated)
kfree(ra);
+ /*
+ * Update range.start for autodefrag, this will indicate where to start
+ * in next run.
+ */
+ range->start = cur;
if (sectors_defragged) {
/*
* We have defragged some sectors, for compression case they
@@ -3086,10 +3154,8 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
btrfs_inode_lock(inode, 0);
err = btrfs_delete_subvolume(dir, dentry);
btrfs_inode_unlock(inode, 0);
- if (!err) {
- fsnotify_rmdir(dir, dentry);
- d_delete(dentry);
- }
+ if (!err)
+ d_delete_notify(dir, dentry);
out_dput:
dput(dentry);
@@ -3290,7 +3356,7 @@ static long btrfs_ioctl_rm_dev(struct file *file, void __user *arg)
struct block_device *bdev = NULL;
fmode_t mode;
int ret;
- bool cancel;
+ bool cancel = false;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index 8928275823a1..f12dc687350c 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -1185,9 +1185,24 @@ int btrfs_quota_disable(struct btrfs_fs_info *fs_info)
struct btrfs_trans_handle *trans = NULL;
int ret = 0;
+ /*
+ * We need to have subvol_sem write locked, to prevent races between
+ * concurrent tasks trying to disable quotas, because we will unlock
+ * and relock qgroup_ioctl_lock across BTRFS_FS_QUOTA_ENABLED changes.
+ */
+ lockdep_assert_held_write(&fs_info->subvol_sem);
+
mutex_lock(&fs_info->qgroup_ioctl_lock);
if (!fs_info->quota_root)
goto out;
+
+ /*
+ * Request qgroup rescan worker to complete and wait for it. This wait
+ * must be done before transaction start for quota disable since it may
+ * deadlock with transaction by the qgroup rescan worker.
+ */
+ clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
+ btrfs_qgroup_wait_for_completion(fs_info, false);
mutex_unlock(&fs_info->qgroup_ioctl_lock);
/*
@@ -1205,14 +1220,13 @@ int btrfs_quota_disable(struct btrfs_fs_info *fs_info)
if (IS_ERR(trans)) {
ret = PTR_ERR(trans);
trans = NULL;
+ set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
goto out;
}
if (!fs_info->quota_root)
goto out;
- clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags);
- btrfs_qgroup_wait_for_completion(fs_info, false);
spin_lock(&fs_info->qgroup_lock);
quota_root = fs_info->quota_root;
fs_info->quota_root = NULL;
@@ -3383,6 +3397,9 @@ qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
btrfs_warn(fs_info,
"qgroup rescan init failed, qgroup is not enabled");
ret = -EINVAL;
+ } else if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) {
+ /* Quota disable is in progress */
+ ret = -EBUSY;
}
if (ret) {
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index d8ccb62aa7d2..201eb2628aea 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -4999,6 +4999,10 @@ static int put_file_data(struct send_ctx *sctx, u64 offset, u32 len)
lock_page(page);
if (!PageUptodate(page)) {
unlock_page(page);
+ btrfs_err(fs_info,
+ "send: IO error at offset %llu for inode %llu root %llu",
+ page_offset(page), sctx->cur_ino,
+ sctx->send_root->root_key.objectid);
put_page(page);
ret = -EIO;
break;
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 0ec09fe01be6..4d947ba32da9 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -23,7 +23,6 @@
#include <linux/miscdevice.h>
#include <linux/magic.h>
#include <linux/slab.h>
-#include <linux/cleancache.h>
#include <linux/ratelimit.h>
#include <linux/crc32c.h>
#include <linux/btrfs.h>
@@ -1374,7 +1373,6 @@ static int btrfs_fill_super(struct super_block *sb,
goto fail_close;
}
- cleancache_init_fs(sb);
sb->s_flags |= SB_ACTIVE;
return 0;
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 03de89b45f27..c3cfdfd8de9b 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -1981,16 +1981,24 @@ static void btrfs_cleanup_pending_block_groups(struct btrfs_trans_handle *trans)
static inline int btrfs_start_delalloc_flush(struct btrfs_fs_info *fs_info)
{
/*
- * We use writeback_inodes_sb here because if we used
+ * We use try_to_writeback_inodes_sb() here because if we used
* btrfs_start_delalloc_roots we would deadlock with fs freeze.
* Currently are holding the fs freeze lock, if we do an async flush
* we'll do btrfs_join_transaction() and deadlock because we need to
* wait for the fs freeze lock. Using the direct flushing we benefit
* from already being in a transaction and our join_transaction doesn't
* have to re-take the fs freeze lock.
+ *
+ * Note that try_to_writeback_inodes_sb() will only trigger writeback
+ * if it can read lock sb->s_umount. It will always be able to lock it,
+ * except when the filesystem is being unmounted or being frozen, but in
+ * those cases sync_filesystem() is called, which results in calling
+ * writeback_inodes_sb() while holding a write lock on sb->s_umount.
+ * Note that we don't call writeback_inodes_sb() directly, because it
+ * will emit a warning if sb->s_umount is not locked.
*/
if (btrfs_test_opt(fs_info, FLUSHONCOMMIT))
- writeback_inodes_sb(fs_info->sb, WB_REASON_SYNC);
+ try_to_writeback_inodes_sb(fs_info->sb, WB_REASON_SYNC);
return 0;
}
@@ -2000,6 +2008,27 @@ static inline void btrfs_wait_delalloc_flush(struct btrfs_fs_info *fs_info)
btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1);
}
+/*
+ * Add a pending snapshot associated with the given transaction handle to the
+ * respective handle. This must be called after the transaction commit started
+ * and while holding fs_info->trans_lock.
+ * This serves to guarantee a caller of btrfs_commit_transaction() that it can
+ * safely free the pending snapshot pointer in case btrfs_commit_transaction()
+ * returns an error.
+ */
+static void add_pending_snapshot(struct btrfs_trans_handle *trans)
+{
+ struct btrfs_transaction *cur_trans = trans->transaction;
+
+ if (!trans->pending_snapshot)
+ return;
+
+ lockdep_assert_held(&trans->fs_info->trans_lock);
+ ASSERT(cur_trans->state >= TRANS_STATE_COMMIT_START);
+
+ list_add(&trans->pending_snapshot->list, &cur_trans->pending_snapshots);
+}
+
int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
{
struct btrfs_fs_info *fs_info = trans->fs_info;
@@ -2073,6 +2102,8 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
if (cur_trans->state >= TRANS_STATE_COMMIT_START) {
enum btrfs_trans_state want_state = TRANS_STATE_COMPLETED;
+ add_pending_snapshot(trans);
+
spin_unlock(&fs_info->trans_lock);
refcount_inc(&cur_trans->use_count);
@@ -2163,6 +2194,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
* COMMIT_DOING so make sure to wait for num_writers to == 1 again.
*/
spin_lock(&fs_info->trans_lock);
+ add_pending_snapshot(trans);
cur_trans->state = TRANS_STATE_COMMIT_DOING;
spin_unlock(&fs_info->trans_lock);
wait_event(cur_trans->writer_wait,
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index 1852ed9de7fd..9402d8d94484 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -123,6 +123,8 @@ struct btrfs_trans_handle {
struct btrfs_transaction *transaction;
struct btrfs_block_rsv *block_rsv;
struct btrfs_block_rsv *orig_rsv;
+ /* Set by a task that wants to create a snapshot. */
+ struct btrfs_pending_snapshot *pending_snapshot;
refcount_t use_count;
unsigned int type;
/*
diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c
index 72e1c942197d..9fd145f1c4bc 100644
--- a/fs/btrfs/tree-checker.c
+++ b/fs/btrfs/tree-checker.c
@@ -965,6 +965,7 @@ static int check_dev_item(struct extent_buffer *leaf,
struct btrfs_key *key, int slot)
{
struct btrfs_dev_item *ditem;
+ const u32 item_size = btrfs_item_size(leaf, slot);
if (unlikely(key->objectid != BTRFS_DEV_ITEMS_OBJECTID)) {
dev_item_err(leaf, slot,
@@ -972,6 +973,13 @@ static int check_dev_item(struct extent_buffer *leaf,
key->objectid, BTRFS_DEV_ITEMS_OBJECTID);
return -EUCLEAN;
}
+
+ if (unlikely(item_size != sizeof(*ditem))) {
+ dev_item_err(leaf, slot, "invalid item size: has %u expect %zu",
+ item_size, sizeof(*ditem));
+ return -EUCLEAN;
+ }
+
ditem = btrfs_item_ptr(leaf, slot, struct btrfs_dev_item);
if (unlikely(btrfs_device_id(leaf, ditem) != key->offset)) {
dev_item_err(leaf, slot,
@@ -1007,6 +1015,7 @@ static int check_inode_item(struct extent_buffer *leaf,
struct btrfs_inode_item *iitem;
u64 super_gen = btrfs_super_generation(fs_info->super_copy);
u32 valid_mask = (S_IFMT | S_ISUID | S_ISGID | S_ISVTX | 0777);
+ const u32 item_size = btrfs_item_size(leaf, slot);
u32 mode;
int ret;
u32 flags;
@@ -1016,6 +1025,12 @@ static int check_inode_item(struct extent_buffer *leaf,
if (unlikely(ret < 0))
return ret;
+ if (unlikely(item_size != sizeof(*iitem))) {
+ generic_err(leaf, slot, "invalid item size: has %u expect %zu",
+ item_size, sizeof(*iitem));
+ return -EUCLEAN;
+ }
+
iitem = btrfs_item_ptr(leaf, slot, struct btrfs_inode_item);
/* Here we use super block generation + 1 to handle log tree */
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index c1ddbe800897..3ee014c06b82 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -3414,6 +3414,29 @@ static void free_log_tree(struct btrfs_trans_handle *trans,
if (log->node) {
ret = walk_log_tree(trans, log, &wc);
if (ret) {
+ /*
+ * We weren't able to traverse the entire log tree, the
+ * typical scenario is getting an -EIO when reading an
+ * extent buffer of the tree, due to a previous writeback
+ * failure of it.
+ */
+ set_bit(BTRFS_FS_STATE_LOG_CLEANUP_ERROR,
+ &log->fs_info->fs_state);
+
+ /*
+ * Some extent buffers of the log tree may still be dirty
+ * and not yet written back to storage, because we may
+ * have updates to a log tree without syncing a log tree,
+ * such as during rename and link operations. So flush
+ * them out and wait for their writeback to complete, so
+ * that we properly cleanup their state and pages.
+ */
+ btrfs_write_marked_extents(log->fs_info,
+ &log->dirty_log_pages,
+ EXTENT_DIRTY | EXTENT_NEW);
+ btrfs_wait_tree_log_extents(log,
+ EXTENT_DIRTY | EXTENT_NEW);
+
if (trans)
btrfs_abort_transaction(trans, ret);
else
diff --git a/fs/buffer.c b/fs/buffer.c
index 46bc589b7a03..8e112b6bd371 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -1969,34 +1969,34 @@ iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh,
}
}
-int __block_write_begin_int(struct page *page, loff_t pos, unsigned len,
+int __block_write_begin_int(struct folio *folio, loff_t pos, unsigned len,
get_block_t *get_block, const struct iomap *iomap)
{
unsigned from = pos & (PAGE_SIZE - 1);
unsigned to = from + len;
- struct inode *inode = page->mapping->host;
+ struct inode *inode = folio->mapping->host;
unsigned block_start, block_end;
sector_t block;
int err = 0;
unsigned blocksize, bbits;
struct buffer_head *bh, *head, *wait[2], **wait_bh=wait;
- BUG_ON(!PageLocked(page));
+ BUG_ON(!folio_test_locked(folio));
BUG_ON(from > PAGE_SIZE);
BUG_ON(to > PAGE_SIZE);
BUG_ON(from > to);
- head = create_page_buffers(page, inode, 0);
+ head = create_page_buffers(&folio->page, inode, 0);
blocksize = head->b_size;
bbits = block_size_bits(blocksize);
- block = (sector_t)page->index << (PAGE_SHIFT - bbits);
+ block = (sector_t)folio->index << (PAGE_SHIFT - bbits);
for(bh = head, block_start = 0; bh != head || !block_start;
block++, block_start=block_end, bh = bh->b_this_page) {
block_end = block_start + blocksize;
if (block_end <= from || block_start >= to) {
- if (PageUptodate(page)) {
+ if (folio_test_uptodate(folio)) {
if (!buffer_uptodate(bh))
set_buffer_uptodate(bh);
}
@@ -2016,20 +2016,20 @@ int __block_write_begin_int(struct page *page, loff_t pos, unsigned len,
if (buffer_new(bh)) {
clean_bdev_bh_alias(bh);
- if (PageUptodate(page)) {
+ if (folio_test_uptodate(folio)) {
clear_buffer_new(bh);
set_buffer_uptodate(bh);
mark_buffer_dirty(bh);
continue;
}
if (block_end > to || block_start < from)
- zero_user_segments(page,
+ folio_zero_segments(folio,
to, block_end,
block_start, from);
continue;
}
}
- if (PageUptodate(page)) {
+ if (folio_test_uptodate(folio)) {
if (!buffer_uptodate(bh))
set_buffer_uptodate(bh);
continue;
@@ -2050,14 +2050,15 @@ int __block_write_begin_int(struct page *page, loff_t pos, unsigned len,
err = -EIO;
}
if (unlikely(err))
- page_zero_new_buffers(page, from, to);
+ page_zero_new_buffers(&folio->page, from, to);
return err;
}
int __block_write_begin(struct page *page, loff_t pos, unsigned len,
get_block_t *get_block)
{
- return __block_write_begin_int(page, pos, len, get_block, NULL);
+ return __block_write_begin_int(page_folio(page), pos, len, get_block,
+ NULL);
}
EXPORT_SYMBOL(__block_write_begin);
diff --git a/fs/cachefiles/Kconfig b/fs/cachefiles/Kconfig
index 6827b40f7ddc..719faeeda168 100644
--- a/fs/cachefiles/Kconfig
+++ b/fs/cachefiles/Kconfig
@@ -19,3 +19,10 @@ config CACHEFILES_DEBUG
caching on files module. If this is set, the debugging output may be
enabled by setting bits in /sys/modules/cachefiles/parameter/debug or
by including a debugging specifier in /etc/cachefilesd.conf.
+
+config CACHEFILES_ERROR_INJECTION
+ bool "Provide error injection for cachefiles"
+ depends on CACHEFILES && SYSCTL
+ help
+ This permits error injection to be enabled in cachefiles whilst a
+ cache is in service.
diff --git a/fs/cachefiles/Makefile b/fs/cachefiles/Makefile
index 02fd17731769..16d811f1a2fa 100644
--- a/fs/cachefiles/Makefile
+++ b/fs/cachefiles/Makefile
@@ -4,15 +4,17 @@
#
cachefiles-y := \
- bind.o \
+ cache.o \
daemon.o \
interface.o \
io.o \
key.o \
main.o \
namei.o \
- rdwr.o \
security.o \
+ volume.o \
xattr.o
+cachefiles-$(CONFIG_CACHEFILES_ERROR_INJECTION) += error_inject.o
+
obj-$(CONFIG_CACHEFILES) := cachefiles.o
diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
deleted file mode 100644
index 146291be6263..000000000000
--- a/fs/cachefiles/bind.c
+++ /dev/null
@@ -1,278 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/* Bind and unbind a cache from the filesystem backing it
- *
- * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/sched.h>
-#include <linux/completion.h>
-#include <linux/slab.h>
-#include <linux/fs.h>
-#include <linux/file.h>
-#include <linux/namei.h>
-#include <linux/mount.h>
-#include <linux/statfs.h>
-#include <linux/ctype.h>
-#include <linux/xattr.h>
-#include "internal.h"
-
-static int cachefiles_daemon_add_cache(struct cachefiles_cache *caches);
-
-/*
- * bind a directory as a cache
- */
-int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
-{
- _enter("{%u,%u,%u,%u,%u,%u},%s",
- cache->frun_percent,
- cache->fcull_percent,
- cache->fstop_percent,
- cache->brun_percent,
- cache->bcull_percent,
- cache->bstop_percent,
- args);
-
- /* start by checking things over */
- ASSERT(cache->fstop_percent >= 0 &&
- cache->fstop_percent < cache->fcull_percent &&
- cache->fcull_percent < cache->frun_percent &&
- cache->frun_percent < 100);
-
- ASSERT(cache->bstop_percent >= 0 &&
- cache->bstop_percent < cache->bcull_percent &&
- cache->bcull_percent < cache->brun_percent &&
- cache->brun_percent < 100);
-
- if (*args) {
- pr_err("'bind' command doesn't take an argument\n");
- return -EINVAL;
- }
-
- if (!cache->rootdirname) {
- pr_err("No cache directory specified\n");
- return -EINVAL;
- }
-
- /* don't permit already bound caches to be re-bound */
- if (test_bit(CACHEFILES_READY, &cache->flags)) {
- pr_err("Cache already bound\n");
- return -EBUSY;
- }
-
- /* make sure we have copies of the tag and dirname strings */
- if (!cache->tag) {
- /* the tag string is released by the fops->release()
- * function, so we don't release it on error here */
- cache->tag = kstrdup("CacheFiles", GFP_KERNEL);
- if (!cache->tag)
- return -ENOMEM;
- }
-
- /* add the cache */
- return cachefiles_daemon_add_cache(cache);
-}
-
-/*
- * add a cache
- */
-static int cachefiles_daemon_add_cache(struct cachefiles_cache *cache)
-{
- struct cachefiles_object *fsdef;
- struct path path;
- struct kstatfs stats;
- struct dentry *graveyard, *cachedir, *root;
- const struct cred *saved_cred;
- int ret;
-
- _enter("");
-
- /* we want to work under the module's security ID */
- ret = cachefiles_get_security_ID(cache);
- if (ret < 0)
- return ret;
-
- cachefiles_begin_secure(cache, &saved_cred);
-
- /* allocate the root index object */
- ret = -ENOMEM;
-
- fsdef = kmem_cache_alloc(cachefiles_object_jar, GFP_KERNEL);
- if (!fsdef)
- goto error_root_object;
-
- ASSERTCMP(fsdef->backer, ==, NULL);
-
- atomic_set(&fsdef->usage, 1);
- fsdef->type = FSCACHE_COOKIE_TYPE_INDEX;
-
- /* look up the directory at the root of the cache */
- ret = kern_path(cache->rootdirname, LOOKUP_DIRECTORY, &path);
- if (ret < 0)
- goto error_open_root;
-
- cache->mnt = path.mnt;
- root = path.dentry;
-
- ret = -EINVAL;
- if (is_idmapped_mnt(path.mnt)) {
- pr_warn("File cache on idmapped mounts not supported");
- goto error_unsupported;
- }
-
- /* check parameters */
- ret = -EOPNOTSUPP;
- if (d_is_negative(root) ||
- !d_backing_inode(root)->i_op->lookup ||
- !d_backing_inode(root)->i_op->mkdir ||
- !(d_backing_inode(root)->i_opflags & IOP_XATTR) ||
- !root->d_sb->s_op->statfs ||
- !root->d_sb->s_op->sync_fs)
- goto error_unsupported;
-
- ret = -EROFS;
- if (sb_rdonly(root->d_sb))
- goto error_unsupported;
-
- /* determine the security of the on-disk cache as this governs
- * security ID of files we create */
- ret = cachefiles_determine_cache_security(cache, root, &saved_cred);
- if (ret < 0)
- goto error_unsupported;
-
- /* get the cache size and blocksize */
- ret = vfs_statfs(&path, &stats);
- if (ret < 0)
- goto error_unsupported;
-
- ret = -ERANGE;
- if (stats.f_bsize <= 0)
- goto error_unsupported;
-
- ret = -EOPNOTSUPP;
- if (stats.f_bsize > PAGE_SIZE)
- goto error_unsupported;
-
- cache->bsize = stats.f_bsize;
- cache->bshift = 0;
- if (stats.f_bsize < PAGE_SIZE)
- cache->bshift = PAGE_SHIFT - ilog2(stats.f_bsize);
-
- _debug("blksize %u (shift %u)",
- cache->bsize, cache->bshift);
-
- _debug("size %llu, avail %llu",
- (unsigned long long) stats.f_blocks,
- (unsigned long long) stats.f_bavail);
-
- /* set up caching limits */
- do_div(stats.f_files, 100);
- cache->fstop = stats.f_files * cache->fstop_percent;
- cache->fcull = stats.f_files * cache->fcull_percent;
- cache->frun = stats.f_files * cache->frun_percent;
-
- _debug("limits {%llu,%llu,%llu} files",
- (unsigned long long) cache->frun,
- (unsigned long long) cache->fcull,
- (unsigned long long) cache->fstop);
-
- stats.f_blocks >>= cache->bshift;
- do_div(stats.f_blocks, 100);
- cache->bstop = stats.f_blocks * cache->bstop_percent;
- cache->bcull = stats.f_blocks * cache->bcull_percent;
- cache->brun = stats.f_blocks * cache->brun_percent;
-
- _debug("limits {%llu,%llu,%llu} blocks",
- (unsigned long long) cache->brun,
- (unsigned long long) cache->bcull,
- (unsigned long long) cache->bstop);
-
- /* get the cache directory and check its type */
- cachedir = cachefiles_get_directory(cache, root, "cache");
- if (IS_ERR(cachedir)) {
- ret = PTR_ERR(cachedir);
- goto error_unsupported;
- }
-
- fsdef->dentry = cachedir;
- fsdef->fscache.cookie = NULL;
-
- ret = cachefiles_check_object_type(fsdef);
- if (ret < 0)
- goto error_unsupported;
-
- /* get the graveyard directory */
- graveyard = cachefiles_get_directory(cache, root, "graveyard");
- if (IS_ERR(graveyard)) {
- ret = PTR_ERR(graveyard);
- goto error_unsupported;
- }
-
- cache->graveyard = graveyard;
-
- /* publish the cache */
- fscache_init_cache(&cache->cache,
- &cachefiles_cache_ops,
- "%s",
- fsdef->dentry->d_sb->s_id);
-
- fscache_object_init(&fsdef->fscache, &fscache_fsdef_index,
- &cache->cache);
-
- ret = fscache_add_cache(&cache->cache, &fsdef->fscache, cache->tag);
- if (ret < 0)
- goto error_add_cache;
-
- /* done */
- set_bit(CACHEFILES_READY, &cache->flags);
- dput(root);
-
- pr_info("File cache on %s registered\n", cache->cache.identifier);
-
- /* check how much space the cache has */
- cachefiles_has_space(cache, 0, 0);
- cachefiles_end_secure(cache, saved_cred);
- return 0;
-
-error_add_cache:
- dput(cache->graveyard);
- cache->graveyard = NULL;
-error_unsupported:
- mntput(cache->mnt);
- cache->mnt = NULL;
- dput(fsdef->dentry);
- fsdef->dentry = NULL;
- dput(root);
-error_open_root:
- kmem_cache_free(cachefiles_object_jar, fsdef);
-error_root_object:
- cachefiles_end_secure(cache, saved_cred);
- pr_err("Failed to register: %d\n", ret);
- return ret;
-}
-
-/*
- * unbind a cache on fd release
- */
-void cachefiles_daemon_unbind(struct cachefiles_cache *cache)
-{
- _enter("");
-
- if (test_bit(CACHEFILES_READY, &cache->flags)) {
- pr_info("File cache on %s unregistering\n",
- cache->cache.identifier);
-
- fscache_withdraw_cache(&cache->cache);
- }
-
- dput(cache->graveyard);
- mntput(cache->mnt);
-
- kfree(cache->rootdirname);
- kfree(cache->secctx);
- kfree(cache->tag);
-
- _leave("");
-}
diff --git a/fs/cachefiles/cache.c b/fs/cachefiles/cache.c
new file mode 100644
index 000000000000..7077f72e6f47
--- /dev/null
+++ b/fs/cachefiles/cache.c
@@ -0,0 +1,383 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Manage high-level VFS aspects of a cache.
+ *
+ * Copyright (C) 2007, 2021 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/slab.h>
+#include <linux/statfs.h>
+#include <linux/namei.h>
+#include "internal.h"
+
+/*
+ * Bring a cache online.
+ */
+int cachefiles_add_cache(struct cachefiles_cache *cache)
+{
+ struct fscache_cache *cache_cookie;
+ struct path path;
+ struct kstatfs stats;
+ struct dentry *graveyard, *cachedir, *root;
+ const struct cred *saved_cred;
+ int ret;
+
+ _enter("");
+
+ cache_cookie = fscache_acquire_cache(cache->tag);
+ if (IS_ERR(cache_cookie))
+ return PTR_ERR(cache_cookie);
+
+ /* we want to work under the module's security ID */
+ ret = cachefiles_get_security_ID(cache);
+ if (ret < 0)
+ goto error_getsec;
+
+ cachefiles_begin_secure(cache, &saved_cred);
+
+ /* look up the directory at the root of the cache */
+ ret = kern_path(cache->rootdirname, LOOKUP_DIRECTORY, &path);
+ if (ret < 0)
+ goto error_open_root;
+
+ cache->mnt = path.mnt;
+ root = path.dentry;
+
+ ret = -EINVAL;
+ if (is_idmapped_mnt(path.mnt)) {
+ pr_warn("File cache on idmapped mounts not supported");
+ goto error_unsupported;
+ }
+
+ /* Check features of the backing filesystem:
+ * - Directories must support looking up and directory creation
+ * - We create tmpfiles to handle invalidation
+ * - We use xattrs to store metadata
+ * - We need to be able to query the amount of space available
+ * - We want to be able to sync the filesystem when stopping the cache
+ * - We use DIO to/from pages, so the blocksize mustn't be too big.
+ */
+ ret = -EOPNOTSUPP;
+ if (d_is_negative(root) ||
+ !d_backing_inode(root)->i_op->lookup ||
+ !d_backing_inode(root)->i_op->mkdir ||
+ !d_backing_inode(root)->i_op->tmpfile ||
+ !(d_backing_inode(root)->i_opflags & IOP_XATTR) ||
+ !root->d_sb->s_op->statfs ||
+ !root->d_sb->s_op->sync_fs ||
+ root->d_sb->s_blocksize > PAGE_SIZE)
+ goto error_unsupported;
+
+ ret = -EROFS;
+ if (sb_rdonly(root->d_sb))
+ goto error_unsupported;
+
+ /* determine the security of the on-disk cache as this governs
+ * security ID of files we create */
+ ret = cachefiles_determine_cache_security(cache, root, &saved_cred);
+ if (ret < 0)
+ goto error_unsupported;
+
+ /* get the cache size and blocksize */
+ ret = vfs_statfs(&path, &stats);
+ if (ret < 0)
+ goto error_unsupported;
+
+ ret = -ERANGE;
+ if (stats.f_bsize <= 0)
+ goto error_unsupported;
+
+ ret = -EOPNOTSUPP;
+ if (stats.f_bsize > PAGE_SIZE)
+ goto error_unsupported;
+
+ cache->bsize = stats.f_bsize;
+ cache->bshift = ilog2(stats.f_bsize);
+
+ _debug("blksize %u (shift %u)",
+ cache->bsize, cache->bshift);
+
+ _debug("size %llu, avail %llu",
+ (unsigned long long) stats.f_blocks,
+ (unsigned long long) stats.f_bavail);
+
+ /* set up caching limits */
+ do_div(stats.f_files, 100);
+ cache->fstop = stats.f_files * cache->fstop_percent;
+ cache->fcull = stats.f_files * cache->fcull_percent;
+ cache->frun = stats.f_files * cache->frun_percent;
+
+ _debug("limits {%llu,%llu,%llu} files",
+ (unsigned long long) cache->frun,
+ (unsigned long long) cache->fcull,
+ (unsigned long long) cache->fstop);
+
+ do_div(stats.f_blocks, 100);
+ cache->bstop = stats.f_blocks * cache->bstop_percent;
+ cache->bcull = stats.f_blocks * cache->bcull_percent;
+ cache->brun = stats.f_blocks * cache->brun_percent;
+
+ _debug("limits {%llu,%llu,%llu} blocks",
+ (unsigned long long) cache->brun,
+ (unsigned long long) cache->bcull,
+ (unsigned long long) cache->bstop);
+
+ /* get the cache directory and check its type */
+ cachedir = cachefiles_get_directory(cache, root, "cache", NULL);
+ if (IS_ERR(cachedir)) {
+ ret = PTR_ERR(cachedir);
+ goto error_unsupported;
+ }
+
+ cache->store = cachedir;
+
+ /* get the graveyard directory */
+ graveyard = cachefiles_get_directory(cache, root, "graveyard", NULL);
+ if (IS_ERR(graveyard)) {
+ ret = PTR_ERR(graveyard);
+ goto error_unsupported;
+ }
+
+ cache->graveyard = graveyard;
+ cache->cache = cache_cookie;
+
+ ret = fscache_add_cache(cache_cookie, &cachefiles_cache_ops, cache);
+ if (ret < 0)
+ goto error_add_cache;
+
+ /* done */
+ set_bit(CACHEFILES_READY, &cache->flags);
+ dput(root);
+
+ pr_info("File cache on %s registered\n", cache_cookie->name);
+
+ /* check how much space the cache has */
+ cachefiles_has_space(cache, 0, 0, cachefiles_has_space_check);
+ cachefiles_end_secure(cache, saved_cred);
+ _leave(" = 0 [%px]", cache->cache);
+ return 0;
+
+error_add_cache:
+ cachefiles_put_directory(cache->graveyard);
+ cache->graveyard = NULL;
+error_unsupported:
+ cachefiles_put_directory(cache->store);
+ cache->store = NULL;
+ mntput(cache->mnt);
+ cache->mnt = NULL;
+ dput(root);
+error_open_root:
+ cachefiles_end_secure(cache, saved_cred);
+error_getsec:
+ fscache_relinquish_cache(cache_cookie);
+ cache->cache = NULL;
+ pr_err("Failed to register: %d\n", ret);
+ return ret;
+}
+
+/*
+ * See if we have space for a number of pages and/or a number of files in the
+ * cache
+ */
+int cachefiles_has_space(struct cachefiles_cache *cache,
+ unsigned fnr, unsigned bnr,
+ enum cachefiles_has_space_for reason)
+{
+ struct kstatfs stats;
+ u64 b_avail, b_writing;
+ int ret;
+
+ struct path path = {
+ .mnt = cache->mnt,
+ .dentry = cache->mnt->mnt_root,
+ };
+
+ //_enter("{%llu,%llu,%llu,%llu,%llu,%llu},%u,%u",
+ // (unsigned long long) cache->frun,
+ // (unsigned long long) cache->fcull,
+ // (unsigned long long) cache->fstop,
+ // (unsigned long long) cache->brun,
+ // (unsigned long long) cache->bcull,
+ // (unsigned long long) cache->bstop,
+ // fnr, bnr);
+
+ /* find out how many pages of blockdev are available */
+ memset(&stats, 0, sizeof(stats));
+
+ ret = vfs_statfs(&path, &stats);
+ if (ret < 0) {
+ trace_cachefiles_vfs_error(NULL, d_inode(path.dentry), ret,
+ cachefiles_trace_statfs_error);
+ if (ret == -EIO)
+ cachefiles_io_error(cache, "statfs failed");
+ _leave(" = %d", ret);
+ return ret;
+ }
+
+ b_avail = stats.f_bavail;
+ b_writing = atomic_long_read(&cache->b_writing);
+ if (b_avail > b_writing)
+ b_avail -= b_writing;
+ else
+ b_avail = 0;
+
+ //_debug("avail %llu,%llu",
+ // (unsigned long long)stats.f_ffree,
+ // (unsigned long long)b_avail);
+
+ /* see if there is sufficient space */
+ if (stats.f_ffree > fnr)
+ stats.f_ffree -= fnr;
+ else
+ stats.f_ffree = 0;
+
+ if (b_avail > bnr)
+ b_avail -= bnr;
+ else
+ b_avail = 0;
+
+ ret = -ENOBUFS;
+ if (stats.f_ffree < cache->fstop ||
+ b_avail < cache->bstop)
+ goto stop_and_begin_cull;
+
+ ret = 0;
+ if (stats.f_ffree < cache->fcull ||
+ b_avail < cache->bcull)
+ goto begin_cull;
+
+ if (test_bit(CACHEFILES_CULLING, &cache->flags) &&
+ stats.f_ffree >= cache->frun &&
+ b_avail >= cache->brun &&
+ test_and_clear_bit(CACHEFILES_CULLING, &cache->flags)
+ ) {
+ _debug("cease culling");
+ cachefiles_state_changed(cache);
+ }
+
+ //_leave(" = 0");
+ return 0;
+
+stop_and_begin_cull:
+ switch (reason) {
+ case cachefiles_has_space_for_write:
+ fscache_count_no_write_space();
+ break;
+ case cachefiles_has_space_for_create:
+ fscache_count_no_create_space();
+ break;
+ default:
+ break;
+ }
+begin_cull:
+ if (!test_and_set_bit(CACHEFILES_CULLING, &cache->flags)) {
+ _debug("### CULL CACHE ###");
+ cachefiles_state_changed(cache);
+ }
+
+ _leave(" = %d", ret);
+ return ret;
+}
+
+/*
+ * Mark all the objects as being out of service and queue them all for cleanup.
+ */
+static void cachefiles_withdraw_objects(struct cachefiles_cache *cache)
+{
+ struct cachefiles_object *object;
+ unsigned int count = 0;
+
+ _enter("");
+
+ spin_lock(&cache->object_list_lock);
+
+ while (!list_empty(&cache->object_list)) {
+ object = list_first_entry(&cache->object_list,
+ struct cachefiles_object, cache_link);
+ cachefiles_see_object(object, cachefiles_obj_see_withdrawal);
+ list_del_init(&object->cache_link);
+ fscache_withdraw_cookie(object->cookie);
+ count++;
+ if ((count & 63) == 0) {
+ spin_unlock(&cache->object_list_lock);
+ cond_resched();
+ spin_lock(&cache->object_list_lock);
+ }
+ }
+
+ spin_unlock(&cache->object_list_lock);
+ _leave(" [%u objs]", count);
+}
+
+/*
+ * Withdraw volumes.
+ */
+static void cachefiles_withdraw_volumes(struct cachefiles_cache *cache)
+{
+ _enter("");
+
+ for (;;) {
+ struct cachefiles_volume *volume = NULL;
+
+ spin_lock(&cache->object_list_lock);
+ if (!list_empty(&cache->volumes)) {
+ volume = list_first_entry(&cache->volumes,
+ struct cachefiles_volume, cache_link);
+ list_del_init(&volume->cache_link);
+ }
+ spin_unlock(&cache->object_list_lock);
+ if (!volume)
+ break;
+
+ cachefiles_withdraw_volume(volume);
+ }
+
+ _leave("");
+}
+
+/*
+ * Sync a cache to backing disk.
+ */
+static void cachefiles_sync_cache(struct cachefiles_cache *cache)
+{
+ const struct cred *saved_cred;
+ int ret;
+
+ _enter("%s", cache->cache->name);
+
+ /* make sure all pages pinned by operations on behalf of the netfs are
+ * written to disc */
+ cachefiles_begin_secure(cache, &saved_cred);
+ down_read(&cache->mnt->mnt_sb->s_umount);
+ ret = sync_filesystem(cache->mnt->mnt_sb);
+ up_read(&cache->mnt->mnt_sb->s_umount);
+ cachefiles_end_secure(cache, saved_cred);
+
+ if (ret == -EIO)
+ cachefiles_io_error(cache,
+ "Attempt to sync backing fs superblock returned error %d",
+ ret);
+}
+
+/*
+ * Withdraw cache objects.
+ */
+void cachefiles_withdraw_cache(struct cachefiles_cache *cache)
+{
+ struct fscache_cache *fscache = cache->cache;
+
+ pr_info("File cache on %s unregistering\n", fscache->name);
+
+ fscache_withdraw_cache(fscache);
+
+ /* we now have to destroy all the active objects pertaining to this
+ * cache - which we do by passing them off to thread pool to be
+ * disposed of */
+ cachefiles_withdraw_objects(cache);
+ fscache_wait_for_objects(fscache);
+
+ cachefiles_withdraw_volumes(cache);
+ cachefiles_sync_cache(cache);
+ cache->cache = NULL;
+ fscache_relinquish_cache(fscache);
+}
diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
index 752c1e43416f..7ac04ee2c0a0 100644
--- a/fs/cachefiles/daemon.c
+++ b/fs/cachefiles/daemon.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/* Daemon interface
*
- * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Copyright (C) 2007, 2021 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*/
@@ -41,6 +41,8 @@ static int cachefiles_daemon_dir(struct cachefiles_cache *, char *);
static int cachefiles_daemon_inuse(struct cachefiles_cache *, char *);
static int cachefiles_daemon_secctx(struct cachefiles_cache *, char *);
static int cachefiles_daemon_tag(struct cachefiles_cache *, char *);
+static int cachefiles_daemon_bind(struct cachefiles_cache *, char *);
+static void cachefiles_daemon_unbind(struct cachefiles_cache *);
static unsigned long cachefiles_open;
@@ -78,7 +80,7 @@ static const struct cachefiles_daemon_cmd cachefiles_daemon_cmds[] = {
/*
- * do various checks
+ * Prepare a cache for caching.
*/
static int cachefiles_daemon_open(struct inode *inode, struct file *file)
{
@@ -102,9 +104,10 @@ static int cachefiles_daemon_open(struct inode *inode, struct file *file)
}
mutex_init(&cache->daemon_mutex);
- cache->active_nodes = RB_ROOT;
- rwlock_init(&cache->active_lock);
init_waitqueue_head(&cache->daemon_pollwq);
+ INIT_LIST_HEAD(&cache->volumes);
+ INIT_LIST_HEAD(&cache->object_list);
+ spin_lock_init(&cache->object_list_lock);
/* set default caching limits
* - limit at 1% free space and/or free files
@@ -124,7 +127,7 @@ static int cachefiles_daemon_open(struct inode *inode, struct file *file)
}
/*
- * release a cache
+ * Release a cache.
*/
static int cachefiles_daemon_release(struct inode *inode, struct file *file)
{
@@ -138,8 +141,6 @@ static int cachefiles_daemon_release(struct inode *inode, struct file *file)
cachefiles_daemon_unbind(cache);
- ASSERT(!cache->active_nodes.rb_node);
-
/* clean up the control file interface */
cache->cachefilesd = NULL;
file->private_data = NULL;
@@ -152,7 +153,7 @@ static int cachefiles_daemon_release(struct inode *inode, struct file *file)
}
/*
- * read the cache state
+ * Read the cache state.
*/
static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
size_t buflen, loff_t *pos)
@@ -169,7 +170,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
return 0;
/* check how much space the cache has */
- cachefiles_has_space(cache, 0, 0);
+ cachefiles_has_space(cache, 0, 0, cachefiles_has_space_check);
/* summarise */
f_released = atomic_xchg(&cache->f_released, 0);
@@ -206,7 +207,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
}
/*
- * command the cache
+ * Take a command from cachefilesd, parse it and act on it.
*/
static ssize_t cachefiles_daemon_write(struct file *file,
const char __user *_data,
@@ -225,7 +226,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
if (test_bit(CACHEFILES_DEAD, &cache->flags))
return -EIO;
- if (datalen < 0 || datalen > PAGE_SIZE - 1)
+ if (datalen > PAGE_SIZE - 1)
return -EOPNOTSUPP;
/* drag the command string into the kernel so we can parse it */
@@ -284,7 +285,7 @@ found_command:
}
/*
- * poll for culling state
+ * Poll for culling state
* - use EPOLLOUT to indicate culling state
*/
static __poll_t cachefiles_daemon_poll(struct file *file,
@@ -306,7 +307,7 @@ static __poll_t cachefiles_daemon_poll(struct file *file,
}
/*
- * give a range error for cache space constraints
+ * Give a range error for cache space constraints
* - can be tail-called
*/
static int cachefiles_daemon_range_error(struct cachefiles_cache *cache,
@@ -318,7 +319,7 @@ static int cachefiles_daemon_range_error(struct cachefiles_cache *cache,
}
/*
- * set the percentage of files at which to stop culling
+ * Set the percentage of files at which to stop culling
* - command: "frun <N>%"
*/
static int cachefiles_daemon_frun(struct cachefiles_cache *cache, char *args)
@@ -342,7 +343,7 @@ static int cachefiles_daemon_frun(struct cachefiles_cache *cache, char *args)
}
/*
- * set the percentage of files at which to start culling
+ * Set the percentage of files at which to start culling
* - command: "fcull <N>%"
*/
static int cachefiles_daemon_fcull(struct cachefiles_cache *cache, char *args)
@@ -366,7 +367,7 @@ static int cachefiles_daemon_fcull(struct cachefiles_cache *cache, char *args)
}
/*
- * set the percentage of files at which to stop allocating
+ * Set the percentage of files at which to stop allocating
* - command: "fstop <N>%"
*/
static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
@@ -382,7 +383,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
if (args[0] != '%' || args[1] != '\0')
return -EINVAL;
- if (fstop < 0 || fstop >= cache->fcull_percent)
+ if (fstop >= cache->fcull_percent)
return cachefiles_daemon_range_error(cache, args);
cache->fstop_percent = fstop;
@@ -390,7 +391,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
}
/*
- * set the percentage of blocks at which to stop culling
+ * Set the percentage of blocks at which to stop culling
* - command: "brun <N>%"
*/
static int cachefiles_daemon_brun(struct cachefiles_cache *cache, char *args)
@@ -414,7 +415,7 @@ static int cachefiles_daemon_brun(struct cachefiles_cache *cache, char *args)
}
/*
- * set the percentage of blocks at which to start culling
+ * Set the percentage of blocks at which to start culling
* - command: "bcull <N>%"
*/
static int cachefiles_daemon_bcull(struct cachefiles_cache *cache, char *args)
@@ -438,7 +439,7 @@ static int cachefiles_daemon_bcull(struct cachefiles_cache *cache, char *args)
}
/*
- * set the percentage of blocks at which to stop allocating
+ * Set the percentage of blocks at which to stop allocating
* - command: "bstop <N>%"
*/
static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
@@ -454,7 +455,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
if (args[0] != '%' || args[1] != '\0')
return -EINVAL;
- if (bstop < 0 || bstop >= cache->bcull_percent)
+ if (bstop >= cache->bcull_percent)
return cachefiles_daemon_range_error(cache, args);
cache->bstop_percent = bstop;
@@ -462,7 +463,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
}
/*
- * set the cache directory
+ * Set the cache directory
* - command: "dir <name>"
*/
static int cachefiles_daemon_dir(struct cachefiles_cache *cache, char *args)
@@ -490,7 +491,7 @@ static int cachefiles_daemon_dir(struct cachefiles_cache *cache, char *args)
}
/*
- * set the cache security context
+ * Set the cache security context
* - command: "secctx <ctx>"
*/
static int cachefiles_daemon_secctx(struct cachefiles_cache *cache, char *args)
@@ -518,7 +519,7 @@ static int cachefiles_daemon_secctx(struct cachefiles_cache *cache, char *args)
}
/*
- * set the cache tag
+ * Set the cache tag
* - command: "tag <name>"
*/
static int cachefiles_daemon_tag(struct cachefiles_cache *cache, char *args)
@@ -544,7 +545,7 @@ static int cachefiles_daemon_tag(struct cachefiles_cache *cache, char *args)
}
/*
- * request a node in the cache be culled from the current working directory
+ * Request a node in the cache be culled from the current working directory
* - command: "cull <name>"
*/
static int cachefiles_daemon_cull(struct cachefiles_cache *cache, char *args)
@@ -568,7 +569,6 @@ static int cachefiles_daemon_cull(struct cachefiles_cache *cache, char *args)
return -EIO;
}
- /* extract the directory dentry from the cwd */
get_fs_pwd(current->fs, &path);
if (!d_can_lookup(path.dentry))
@@ -593,7 +593,7 @@ inval:
}
/*
- * set debugging mode
+ * Set debugging mode
* - command: "debug <mask>"
*/
static int cachefiles_daemon_debug(struct cachefiles_cache *cache, char *args)
@@ -616,7 +616,7 @@ inval:
}
/*
- * find out whether an object in the current working directory is in use or not
+ * Find out whether an object in the current working directory is in use or not
* - command: "inuse <name>"
*/
static int cachefiles_daemon_inuse(struct cachefiles_cache *cache, char *args)
@@ -640,7 +640,6 @@ static int cachefiles_daemon_inuse(struct cachefiles_cache *cache, char *args)
return -EIO;
}
- /* extract the directory dentry from the cwd */
get_fs_pwd(current->fs, &path);
if (!d_can_lookup(path.dentry))
@@ -665,84 +664,76 @@ inval:
}
/*
- * see if we have space for a number of pages and/or a number of files in the
- * cache
+ * Bind a directory as a cache
*/
-int cachefiles_has_space(struct cachefiles_cache *cache,
- unsigned fnr, unsigned bnr)
+static int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
{
- struct kstatfs stats;
- struct path path = {
- .mnt = cache->mnt,
- .dentry = cache->mnt->mnt_root,
- };
- int ret;
+ _enter("{%u,%u,%u,%u,%u,%u},%s",
+ cache->frun_percent,
+ cache->fcull_percent,
+ cache->fstop_percent,
+ cache->brun_percent,
+ cache->bcull_percent,
+ cache->bstop_percent,
+ args);
+
+ if (cache->fstop_percent >= cache->fcull_percent ||
+ cache->fcull_percent >= cache->frun_percent ||
+ cache->frun_percent >= 100)
+ return -ERANGE;
+
+ if (cache->bstop_percent >= cache->bcull_percent ||
+ cache->bcull_percent >= cache->brun_percent ||
+ cache->brun_percent >= 100)
+ return -ERANGE;
- //_enter("{%llu,%llu,%llu,%llu,%llu,%llu},%u,%u",
- // (unsigned long long) cache->frun,
- // (unsigned long long) cache->fcull,
- // (unsigned long long) cache->fstop,
- // (unsigned long long) cache->brun,
- // (unsigned long long) cache->bcull,
- // (unsigned long long) cache->bstop,
- // fnr, bnr);
-
- /* find out how many pages of blockdev are available */
- memset(&stats, 0, sizeof(stats));
-
- ret = vfs_statfs(&path, &stats);
- if (ret < 0) {
- if (ret == -EIO)
- cachefiles_io_error(cache, "statfs failed");
- _leave(" = %d", ret);
- return ret;
+ if (*args) {
+ pr_err("'bind' command doesn't take an argument\n");
+ return -EINVAL;
}
- stats.f_bavail >>= cache->bshift;
-
- //_debug("avail %llu,%llu",
- // (unsigned long long) stats.f_ffree,
- // (unsigned long long) stats.f_bavail);
-
- /* see if there is sufficient space */
- if (stats.f_ffree > fnr)
- stats.f_ffree -= fnr;
- else
- stats.f_ffree = 0;
-
- if (stats.f_bavail > bnr)
- stats.f_bavail -= bnr;
- else
- stats.f_bavail = 0;
-
- ret = -ENOBUFS;
- if (stats.f_ffree < cache->fstop ||
- stats.f_bavail < cache->bstop)
- goto begin_cull;
-
- ret = 0;
- if (stats.f_ffree < cache->fcull ||
- stats.f_bavail < cache->bcull)
- goto begin_cull;
-
- if (test_bit(CACHEFILES_CULLING, &cache->flags) &&
- stats.f_ffree >= cache->frun &&
- stats.f_bavail >= cache->brun &&
- test_and_clear_bit(CACHEFILES_CULLING, &cache->flags)
- ) {
- _debug("cease culling");
- cachefiles_state_changed(cache);
+ if (!cache->rootdirname) {
+ pr_err("No cache directory specified\n");
+ return -EINVAL;
}
- //_leave(" = 0");
- return 0;
+ /* Don't permit already bound caches to be re-bound */
+ if (test_bit(CACHEFILES_READY, &cache->flags)) {
+ pr_err("Cache already bound\n");
+ return -EBUSY;
+ }
-begin_cull:
- if (!test_and_set_bit(CACHEFILES_CULLING, &cache->flags)) {
- _debug("### CULL CACHE ###");
- cachefiles_state_changed(cache);
+ /* Make sure we have copies of the tag string */
+ if (!cache->tag) {
+ /*
+ * The tag string is released by the fops->release()
+ * function, so we don't release it on error here
+ */
+ cache->tag = kstrdup("CacheFiles", GFP_KERNEL);
+ if (!cache->tag)
+ return -ENOMEM;
}
- _leave(" = %d", ret);
- return ret;
+ return cachefiles_add_cache(cache);
+}
+
+/*
+ * Unbind a cache.
+ */
+static void cachefiles_daemon_unbind(struct cachefiles_cache *cache)
+{
+ _enter("");
+
+ if (test_bit(CACHEFILES_READY, &cache->flags))
+ cachefiles_withdraw_cache(cache);
+
+ cachefiles_put_directory(cache->graveyard);
+ cachefiles_put_directory(cache->store);
+ mntput(cache->mnt);
+
+ kfree(cache->rootdirname);
+ kfree(cache->secctx);
+ kfree(cache->tag);
+
+ _leave("");
}
diff --git a/fs/cachefiles/error_inject.c b/fs/cachefiles/error_inject.c
new file mode 100644
index 000000000000..58f8aec964e4
--- /dev/null
+++ b/fs/cachefiles/error_inject.c
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Error injection handling.
+ *
+ * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/sysctl.h>
+#include "internal.h"
+
+unsigned int cachefiles_error_injection_state;
+
+static struct ctl_table_header *cachefiles_sysctl;
+static struct ctl_table cachefiles_sysctls[] = {
+ {
+ .procname = "error_injection",
+ .data = &cachefiles_error_injection_state,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_douintvec,
+ },
+ {}
+};
+
+static struct ctl_table cachefiles_sysctls_root[] = {
+ {
+ .procname = "cachefiles",
+ .mode = 0555,
+ .child = cachefiles_sysctls,
+ },
+ {}
+};
+
+int __init cachefiles_register_error_injection(void)
+{
+ cachefiles_sysctl = register_sysctl_table(cachefiles_sysctls_root);
+ if (!cachefiles_sysctl)
+ return -ENOMEM;
+ return 0;
+
+}
+
+void cachefiles_unregister_error_injection(void)
+{
+ unregister_sysctl_table(cachefiles_sysctl);
+}
diff --git a/fs/cachefiles/interface.c b/fs/cachefiles/interface.c
index da28ac1fa225..51c968cd00a6 100644
--- a/fs/cachefiles/interface.c
+++ b/fs/cachefiles/interface.c
@@ -1,572 +1,445 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/* FS-Cache interface to CacheFiles
*
- * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*/
#include <linux/slab.h>
#include <linux/mount.h>
+#include <linux/xattr.h>
+#include <linux/file.h>
+#include <linux/falloc.h>
+#include <trace/events/fscache.h>
#include "internal.h"
-struct cachefiles_lookup_data {
- struct cachefiles_xattr *auxdata; /* auxiliary data */
- char *key; /* key path */
-};
-
-static int cachefiles_attr_changed(struct fscache_object *_object);
+static atomic_t cachefiles_object_debug_id;
/*
- * allocate an object record for a cookie lookup and prepare the lookup data
+ * Allocate a cache object record.
*/
-static struct fscache_object *cachefiles_alloc_object(
- struct fscache_cache *_cache,
- struct fscache_cookie *cookie)
+static
+struct cachefiles_object *cachefiles_alloc_object(struct fscache_cookie *cookie)
{
- struct cachefiles_lookup_data *lookup_data;
+ struct fscache_volume *vcookie = cookie->volume;
+ struct cachefiles_volume *volume = vcookie->cache_priv;
struct cachefiles_object *object;
- struct cachefiles_cache *cache;
- struct cachefiles_xattr *auxdata;
- unsigned keylen, auxlen;
- void *buffer, *p;
- char *key;
- cache = container_of(_cache, struct cachefiles_cache, cache);
+ _enter("{%s},%x,", vcookie->key, cookie->debug_id);
- _enter("{%s},%x,", cache->cache.identifier, cookie->debug_id);
-
- lookup_data = kmalloc(sizeof(*lookup_data), cachefiles_gfp);
- if (!lookup_data)
- goto nomem_lookup_data;
-
- /* create a new object record and a temporary leaf image */
- object = kmem_cache_alloc(cachefiles_object_jar, cachefiles_gfp);
+ object = kmem_cache_zalloc(cachefiles_object_jar, GFP_KERNEL);
if (!object)
- goto nomem_object;
-
- ASSERTCMP(object->backer, ==, NULL);
+ return NULL;
- BUG_ON(test_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags));
- atomic_set(&object->usage, 1);
+ refcount_set(&object->ref, 1);
- fscache_object_init(&object->fscache, cookie, &cache->cache);
+ spin_lock_init(&object->lock);
+ INIT_LIST_HEAD(&object->cache_link);
+ object->volume = volume;
+ object->debug_id = atomic_inc_return(&cachefiles_object_debug_id);
+ object->cookie = fscache_get_cookie(cookie, fscache_cookie_get_attach_object);
- object->type = cookie->def->type;
-
- /* get hold of the raw key
- * - stick the length on the front and leave space on the back for the
- * encoder
- */
- buffer = kmalloc((2 + 512) + 3, cachefiles_gfp);
- if (!buffer)
- goto nomem_buffer;
-
- keylen = cookie->key_len;
- if (keylen <= sizeof(cookie->inline_key))
- p = cookie->inline_key;
- else
- p = cookie->key;
- memcpy(buffer + 2, p, keylen);
-
- *(uint16_t *)buffer = keylen;
- ((char *)buffer)[keylen + 2] = 0;
- ((char *)buffer)[keylen + 3] = 0;
- ((char *)buffer)[keylen + 4] = 0;
-
- /* turn the raw key into something that can work with as a filename */
- key = cachefiles_cook_key(buffer, keylen + 2, object->type);
- if (!key)
- goto nomem_key;
-
- /* get hold of the auxiliary data and prepend the object type */
- auxdata = buffer;
- auxlen = cookie->aux_len;
- if (auxlen) {
- if (auxlen <= sizeof(cookie->inline_aux))
- p = cookie->inline_aux;
- else
- p = cookie->aux;
- memcpy(auxdata->data, p, auxlen);
- }
-
- auxdata->len = auxlen + 1;
- auxdata->type = cookie->type;
-
- lookup_data->auxdata = auxdata;
- lookup_data->key = key;
- object->lookup_data = lookup_data;
-
- _leave(" = %x [%p]", object->fscache.debug_id, lookup_data);
- return &object->fscache;
-
-nomem_key:
- kfree(buffer);
-nomem_buffer:
- BUG_ON(test_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags));
- kmem_cache_free(cachefiles_object_jar, object);
- fscache_object_destroyed(&cache->cache);
-nomem_object:
- kfree(lookup_data);
-nomem_lookup_data:
- _leave(" = -ENOMEM");
- return ERR_PTR(-ENOMEM);
+ fscache_count_object(vcookie->cache);
+ trace_cachefiles_ref(object->debug_id, cookie->debug_id, 1,
+ cachefiles_obj_new);
+ return object;
}
/*
- * attempt to look up the nominated node in this cache
- * - return -ETIMEDOUT to be scheduled again
+ * Note that an object has been seen.
*/
-static int cachefiles_lookup_object(struct fscache_object *_object)
+void cachefiles_see_object(struct cachefiles_object *object,
+ enum cachefiles_obj_ref_trace why)
{
- struct cachefiles_lookup_data *lookup_data;
- struct cachefiles_object *parent, *object;
- struct cachefiles_cache *cache;
- const struct cred *saved_cred;
- int ret;
-
- _enter("{OBJ%x}", _object->debug_id);
-
- cache = container_of(_object->cache, struct cachefiles_cache, cache);
- parent = container_of(_object->parent,
- struct cachefiles_object, fscache);
- object = container_of(_object, struct cachefiles_object, fscache);
- lookup_data = object->lookup_data;
-
- ASSERTCMP(lookup_data, !=, NULL);
-
- /* look up the key, creating any missing bits */
- cachefiles_begin_secure(cache, &saved_cred);
- ret = cachefiles_walk_to_object(parent, object,
- lookup_data->key,
- lookup_data->auxdata);
- cachefiles_end_secure(cache, saved_cred);
-
- /* polish off by setting the attributes of non-index files */
- if (ret == 0 &&
- object->fscache.cookie->def->type != FSCACHE_COOKIE_TYPE_INDEX)
- cachefiles_attr_changed(&object->fscache);
-
- if (ret < 0 && ret != -ETIMEDOUT) {
- if (ret != -ENOBUFS)
- pr_warn("Lookup failed error %d\n", ret);
- fscache_object_lookup_error(&object->fscache);
- }
-
- _leave(" [%d]", ret);
- return ret;
+ trace_cachefiles_ref(object->debug_id, object->cookie->debug_id,
+ refcount_read(&object->ref), why);
}
/*
- * indication of lookup completion
+ * Increment the usage count on an object;
*/
-static void cachefiles_lookup_complete(struct fscache_object *_object)
+struct cachefiles_object *cachefiles_grab_object(struct cachefiles_object *object,
+ enum cachefiles_obj_ref_trace why)
{
- struct cachefiles_object *object;
-
- object = container_of(_object, struct cachefiles_object, fscache);
-
- _enter("{OBJ%x,%p}", object->fscache.debug_id, object->lookup_data);
+ int r;
- if (object->lookup_data) {
- kfree(object->lookup_data->key);
- kfree(object->lookup_data->auxdata);
- kfree(object->lookup_data);
- object->lookup_data = NULL;
- }
+ __refcount_inc(&object->ref, &r);
+ trace_cachefiles_ref(object->debug_id, object->cookie->debug_id, r, why);
+ return object;
}
/*
- * increment the usage count on an inode object (may fail if unmounting)
+ * dispose of a reference to an object
*/
-static
-struct fscache_object *cachefiles_grab_object(struct fscache_object *_object,
- enum fscache_obj_ref_trace why)
+void cachefiles_put_object(struct cachefiles_object *object,
+ enum cachefiles_obj_ref_trace why)
{
- struct cachefiles_object *object =
- container_of(_object, struct cachefiles_object, fscache);
- int u;
+ unsigned int object_debug_id = object->debug_id;
+ unsigned int cookie_debug_id = object->cookie->debug_id;
+ struct fscache_cache *cache;
+ bool done;
+ int r;
+
+ done = __refcount_dec_and_test(&object->ref, &r);
+ trace_cachefiles_ref(object_debug_id, cookie_debug_id, r, why);
+ if (done) {
+ _debug("- kill object OBJ%x", object_debug_id);
+
+ ASSERTCMP(object->file, ==, NULL);
- _enter("{OBJ%x,%d}", _object->debug_id, atomic_read(&object->usage));
+ kfree(object->d_name);
-#ifdef CACHEFILES_DEBUG_SLAB
- ASSERT((atomic_read(&object->usage) & 0xffff0000) != 0x6b6b0000);
-#endif
+ cache = object->volume->cache->cache;
+ fscache_put_cookie(object->cookie, fscache_cookie_put_object);
+ object->cookie = NULL;
+ kmem_cache_free(cachefiles_object_jar, object);
+ fscache_uncount_object(cache);
+ }
- u = atomic_inc_return(&object->usage);
- trace_cachefiles_ref(object, _object->cookie,
- (enum cachefiles_obj_ref_trace)why, u);
- return &object->fscache;
+ _leave("");
}
/*
- * update the auxiliary data for an object object on disk
+ * Adjust the size of a cache file if necessary to match the DIO size. We keep
+ * the EOF marker a multiple of DIO blocks so that we don't fall back to doing
+ * non-DIO for a partial block straddling the EOF, but we also have to be
+ * careful of someone expanding the file and accidentally accreting the
+ * padding.
*/
-static void cachefiles_update_object(struct fscache_object *_object)
+static int cachefiles_adjust_size(struct cachefiles_object *object)
{
- struct cachefiles_object *object;
- struct cachefiles_xattr *auxdata;
- struct cachefiles_cache *cache;
- struct fscache_cookie *cookie;
- const struct cred *saved_cred;
- const void *aux;
- unsigned auxlen;
+ struct iattr newattrs;
+ struct file *file = object->file;
+ uint64_t ni_size;
+ loff_t oi_size;
+ int ret;
- _enter("{OBJ%x}", _object->debug_id);
+ ni_size = object->cookie->object_size;
+ ni_size = round_up(ni_size, CACHEFILES_DIO_BLOCK_SIZE);
- object = container_of(_object, struct cachefiles_object, fscache);
- cache = container_of(object->fscache.cache, struct cachefiles_cache,
- cache);
+ _enter("{OBJ%x},[%llu]",
+ object->debug_id, (unsigned long long) ni_size);
- if (!fscache_use_cookie(_object)) {
- _leave(" [relinq]");
- return;
- }
+ if (!file)
+ return -ENOBUFS;
- cookie = object->fscache.cookie;
- auxlen = cookie->aux_len;
+ oi_size = i_size_read(file_inode(file));
+ if (oi_size == ni_size)
+ return 0;
- if (!auxlen) {
- fscache_unuse_cookie(_object);
- _leave(" [no aux]");
- return;
- }
+ inode_lock(file_inode(file));
- auxdata = kmalloc(2 + auxlen + 3, cachefiles_gfp);
- if (!auxdata) {
- fscache_unuse_cookie(_object);
- _leave(" [nomem]");
- return;
+ /* if there's an extension to a partial page at the end of the backing
+ * file, we need to discard the partial page so that we pick up new
+ * data after it */
+ if (oi_size & ~PAGE_MASK && ni_size > oi_size) {
+ _debug("discard tail %llx", oi_size);
+ newattrs.ia_valid = ATTR_SIZE;
+ newattrs.ia_size = oi_size & PAGE_MASK;
+ ret = cachefiles_inject_remove_error();
+ if (ret == 0)
+ ret = notify_change(&init_user_ns, file->f_path.dentry,
+ &newattrs, NULL);
+ if (ret < 0)
+ goto truncate_failed;
}
- aux = (auxlen <= sizeof(cookie->inline_aux)) ?
- cookie->inline_aux : cookie->aux;
+ newattrs.ia_valid = ATTR_SIZE;
+ newattrs.ia_size = ni_size;
+ ret = cachefiles_inject_write_error();
+ if (ret == 0)
+ ret = notify_change(&init_user_ns, file->f_path.dentry,
+ &newattrs, NULL);
- memcpy(auxdata->data, aux, auxlen);
- fscache_unuse_cookie(_object);
+truncate_failed:
+ inode_unlock(file_inode(file));
- auxdata->len = auxlen + 1;
- auxdata->type = cookie->type;
+ if (ret < 0)
+ trace_cachefiles_io_error(NULL, file_inode(file), ret,
+ cachefiles_trace_notify_change_error);
+ if (ret == -EIO) {
+ cachefiles_io_error_obj(object, "Size set failed");
+ ret = -ENOBUFS;
+ }
- cachefiles_begin_secure(cache, &saved_cred);
- cachefiles_update_object_xattr(object, auxdata);
- cachefiles_end_secure(cache, saved_cred);
- kfree(auxdata);
- _leave("");
+ _leave(" = %d", ret);
+ return ret;
}
/*
- * discard the resources pinned by an object and effect retirement if
- * requested
+ * Attempt to look up the nominated node in this cache
*/
-static void cachefiles_drop_object(struct fscache_object *_object)
+static bool cachefiles_lookup_cookie(struct fscache_cookie *cookie)
{
struct cachefiles_object *object;
- struct cachefiles_cache *cache;
+ struct cachefiles_cache *cache = cookie->volume->cache->cache_priv;
const struct cred *saved_cred;
- struct inode *inode;
- blkcnt_t i_blocks = 0;
+ bool success;
- ASSERT(_object);
+ object = cachefiles_alloc_object(cookie);
+ if (!object)
+ goto fail;
- object = container_of(_object, struct cachefiles_object, fscache);
+ _enter("{OBJ%x}", object->debug_id);
- _enter("{OBJ%x,%d}",
- object->fscache.debug_id, atomic_read(&object->usage));
+ if (!cachefiles_cook_key(object))
+ goto fail_put;
- cache = container_of(object->fscache.cache,
- struct cachefiles_cache, cache);
+ cookie->cache_priv = object;
-#ifdef CACHEFILES_DEBUG_SLAB
- ASSERT((atomic_read(&object->usage) & 0xffff0000) != 0x6b6b0000);
-#endif
+ cachefiles_begin_secure(cache, &saved_cred);
- /* We need to tidy the object up if we did in fact manage to open it.
- * It's possible for us to get here before the object is fully
- * initialised if the parent goes away or the object gets retired
- * before we set it up.
- */
- if (object->dentry) {
- /* delete retired objects */
- if (test_bit(FSCACHE_OBJECT_RETIRED, &object->fscache.flags) &&
- _object != cache->cache.fsdef
- ) {
- _debug("- retire object OBJ%x", object->fscache.debug_id);
- inode = d_backing_inode(object->dentry);
- if (inode)
- i_blocks = inode->i_blocks;
-
- cachefiles_begin_secure(cache, &saved_cred);
- cachefiles_delete_object(cache, object);
- cachefiles_end_secure(cache, saved_cred);
- }
+ success = cachefiles_look_up_object(object);
+ if (!success)
+ goto fail_withdraw;
- /* close the filesystem stuff attached to the object */
- if (object->backer != object->dentry)
- dput(object->backer);
- object->backer = NULL;
- }
+ cachefiles_see_object(object, cachefiles_obj_see_lookup_cookie);
+
+ spin_lock(&cache->object_list_lock);
+ list_add(&object->cache_link, &cache->object_list);
+ spin_unlock(&cache->object_list_lock);
+ cachefiles_adjust_size(object);
- /* note that the object is now inactive */
- if (test_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags))
- cachefiles_mark_object_inactive(cache, object, i_blocks);
+ cachefiles_end_secure(cache, saved_cred);
+ _leave(" = t");
+ return true;
- dput(object->dentry);
- object->dentry = NULL;
+fail_withdraw:
+ cachefiles_end_secure(cache, saved_cred);
+ cachefiles_see_object(object, cachefiles_obj_see_lookup_failed);
+ fscache_caching_failed(cookie);
+ _debug("failed c=%08x o=%08x", cookie->debug_id, object->debug_id);
+ /* The caller holds an access count on the cookie, so we need them to
+ * drop it before we can withdraw the object.
+ */
+ return false;
- _leave("");
+fail_put:
+ cachefiles_put_object(object, cachefiles_obj_put_alloc_fail);
+fail:
+ return false;
}
/*
- * dispose of a reference to an object
+ * Shorten the backing object to discard any dirty data and free up
+ * any unused granules.
*/
-void cachefiles_put_object(struct fscache_object *_object,
- enum fscache_obj_ref_trace why)
+static bool cachefiles_shorten_object(struct cachefiles_object *object,
+ struct file *file, loff_t new_size)
{
- struct cachefiles_object *object;
- struct fscache_cache *cache;
- int u;
-
- ASSERT(_object);
-
- object = container_of(_object, struct cachefiles_object, fscache);
-
- _enter("{OBJ%x,%d}",
- object->fscache.debug_id, atomic_read(&object->usage));
-
-#ifdef CACHEFILES_DEBUG_SLAB
- ASSERT((atomic_read(&object->usage) & 0xffff0000) != 0x6b6b0000);
-#endif
-
- ASSERTIFCMP(object->fscache.parent,
- object->fscache.parent->n_children, >, 0);
-
- u = atomic_dec_return(&object->usage);
- trace_cachefiles_ref(object, _object->cookie,
- (enum cachefiles_obj_ref_trace)why, u);
- ASSERTCMP(u, !=, -1);
- if (u == 0) {
- _debug("- kill object OBJ%x", object->fscache.debug_id);
+ struct cachefiles_cache *cache = object->volume->cache;
+ struct inode *inode = file_inode(file);
+ loff_t i_size, dio_size;
+ int ret;
- ASSERT(!test_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags));
- ASSERTCMP(object->fscache.parent, ==, NULL);
- ASSERTCMP(object->backer, ==, NULL);
- ASSERTCMP(object->dentry, ==, NULL);
- ASSERTCMP(object->fscache.n_ops, ==, 0);
- ASSERTCMP(object->fscache.n_children, ==, 0);
+ dio_size = round_up(new_size, CACHEFILES_DIO_BLOCK_SIZE);
+ i_size = i_size_read(inode);
+
+ trace_cachefiles_trunc(object, inode, i_size, dio_size,
+ cachefiles_trunc_shrink);
+ ret = cachefiles_inject_remove_error();
+ if (ret == 0)
+ ret = vfs_truncate(&file->f_path, dio_size);
+ if (ret < 0) {
+ trace_cachefiles_io_error(object, file_inode(file), ret,
+ cachefiles_trace_trunc_error);
+ cachefiles_io_error_obj(object, "Trunc-to-size failed %d", ret);
+ cachefiles_remove_object_xattr(cache, object, file->f_path.dentry);
+ return false;
+ }
- if (object->lookup_data) {
- kfree(object->lookup_data->key);
- kfree(object->lookup_data->auxdata);
- kfree(object->lookup_data);
- object->lookup_data = NULL;
+ if (new_size < dio_size) {
+ trace_cachefiles_trunc(object, inode, dio_size, new_size,
+ cachefiles_trunc_dio_adjust);
+ ret = cachefiles_inject_write_error();
+ if (ret == 0)
+ ret = vfs_fallocate(file, FALLOC_FL_ZERO_RANGE,
+ new_size, dio_size);
+ if (ret < 0) {
+ trace_cachefiles_io_error(object, file_inode(file), ret,
+ cachefiles_trace_fallocate_error);
+ cachefiles_io_error_obj(object, "Trunc-to-dio-size failed %d", ret);
+ cachefiles_remove_object_xattr(cache, object, file->f_path.dentry);
+ return false;
}
-
- cache = object->fscache.cache;
- fscache_object_destroy(&object->fscache);
- kmem_cache_free(cachefiles_object_jar, object);
- fscache_object_destroyed(cache);
}
- _leave("");
+ return true;
}
/*
- * sync a cache
+ * Resize the backing object.
*/
-static void cachefiles_sync_cache(struct fscache_cache *_cache)
+static void cachefiles_resize_cookie(struct netfs_cache_resources *cres,
+ loff_t new_size)
{
- struct cachefiles_cache *cache;
+ struct cachefiles_object *object = cachefiles_cres_object(cres);
+ struct cachefiles_cache *cache = object->volume->cache;
+ struct fscache_cookie *cookie = object->cookie;
const struct cred *saved_cred;
- int ret;
+ struct file *file = cachefiles_cres_file(cres);
+ loff_t old_size = cookie->object_size;
- _enter("%s", _cache->tag->name);
+ _enter("%llu->%llu", old_size, new_size);
- cache = container_of(_cache, struct cachefiles_cache, cache);
-
- /* make sure all pages pinned by operations on behalf of the netfs are
- * written to disc */
- cachefiles_begin_secure(cache, &saved_cred);
- down_read(&cache->mnt->mnt_sb->s_umount);
- ret = sync_filesystem(cache->mnt->mnt_sb);
- up_read(&cache->mnt->mnt_sb->s_umount);
- cachefiles_end_secure(cache, saved_cred);
+ if (new_size < old_size) {
+ cachefiles_begin_secure(cache, &saved_cred);
+ cachefiles_shorten_object(object, file, new_size);
+ cachefiles_end_secure(cache, saved_cred);
+ object->cookie->object_size = new_size;
+ return;
+ }
- if (ret == -EIO)
- cachefiles_io_error(cache,
- "Attempt to sync backing fs superblock"
- " returned error %d",
- ret);
+ /* The file is being expanded. We don't need to do anything
+ * particularly. cookie->initial_size doesn't change and so the point
+ * at which we have to download before doesn't change.
+ */
+ cookie->object_size = new_size;
}
/*
- * check if the backing cache is updated to FS-Cache
- * - called by FS-Cache when evaluates if need to invalidate the cache
+ * Commit changes to the object as we drop it.
*/
-static int cachefiles_check_consistency(struct fscache_operation *op)
+static void cachefiles_commit_object(struct cachefiles_object *object,
+ struct cachefiles_cache *cache)
{
- struct cachefiles_object *object;
- struct cachefiles_cache *cache;
- const struct cred *saved_cred;
- int ret;
+ bool update = false;
- _enter("{OBJ%x}", op->object->debug_id);
+ if (test_and_clear_bit(FSCACHE_COOKIE_LOCAL_WRITE, &object->cookie->flags))
+ update = true;
+ if (test_and_clear_bit(FSCACHE_COOKIE_NEEDS_UPDATE, &object->cookie->flags))
+ update = true;
+ if (update)
+ cachefiles_set_object_xattr(object);
- object = container_of(op->object, struct cachefiles_object, fscache);
- cache = container_of(object->fscache.cache,
- struct cachefiles_cache, cache);
+ if (test_bit(CACHEFILES_OBJECT_USING_TMPFILE, &object->flags))
+ cachefiles_commit_tmpfile(cache, object);
+}
- cachefiles_begin_secure(cache, &saved_cred);
- ret = cachefiles_check_auxdata(object);
- cachefiles_end_secure(cache, saved_cred);
+/*
+ * Finalise and object and close the VFS structs that we have.
+ */
+static void cachefiles_clean_up_object(struct cachefiles_object *object,
+ struct cachefiles_cache *cache)
+{
+ if (test_bit(FSCACHE_COOKIE_RETIRED, &object->cookie->flags)) {
+ if (!test_bit(CACHEFILES_OBJECT_USING_TMPFILE, &object->flags)) {
+ cachefiles_see_object(object, cachefiles_obj_see_clean_delete);
+ _debug("- inval object OBJ%x", object->debug_id);
+ cachefiles_delete_object(object, FSCACHE_OBJECT_WAS_RETIRED);
+ } else {
+ cachefiles_see_object(object, cachefiles_obj_see_clean_drop_tmp);
+ _debug("- inval object OBJ%x tmpfile", object->debug_id);
+ }
+ } else {
+ cachefiles_see_object(object, cachefiles_obj_see_clean_commit);
+ cachefiles_commit_object(object, cache);
+ }
- _leave(" = %d", ret);
- return ret;
+ cachefiles_unmark_inode_in_use(object, object->file);
+ if (object->file) {
+ fput(object->file);
+ object->file = NULL;
+ }
}
/*
- * notification the attributes on an object have changed
- * - called with reads/writes excluded by FS-Cache
+ * Withdraw caching for a cookie.
*/
-static int cachefiles_attr_changed(struct fscache_object *_object)
+static void cachefiles_withdraw_cookie(struct fscache_cookie *cookie)
{
- struct cachefiles_object *object;
- struct cachefiles_cache *cache;
+ struct cachefiles_object *object = cookie->cache_priv;
+ struct cachefiles_cache *cache = object->volume->cache;
const struct cred *saved_cred;
- struct iattr newattrs;
- uint64_t ni_size;
- loff_t oi_size;
- int ret;
-
- ni_size = _object->store_limit_l;
-
- _enter("{OBJ%x},[%llu]",
- _object->debug_id, (unsigned long long) ni_size);
-
- object = container_of(_object, struct cachefiles_object, fscache);
- cache = container_of(object->fscache.cache,
- struct cachefiles_cache, cache);
-
- if (ni_size == object->i_size)
- return 0;
-
- if (!object->backer)
- return -ENOBUFS;
- ASSERT(d_is_reg(object->backer));
+ _enter("o=%x", object->debug_id);
+ cachefiles_see_object(object, cachefiles_obj_see_withdraw_cookie);
- fscache_set_store_limit(&object->fscache, ni_size);
-
- oi_size = i_size_read(d_backing_inode(object->backer));
- if (oi_size == ni_size)
- return 0;
-
- cachefiles_begin_secure(cache, &saved_cred);
- inode_lock(d_inode(object->backer));
-
- /* if there's an extension to a partial page at the end of the backing
- * file, we need to discard the partial page so that we pick up new
- * data after it */
- if (oi_size & ~PAGE_MASK && ni_size > oi_size) {
- _debug("discard tail %llx", oi_size);
- newattrs.ia_valid = ATTR_SIZE;
- newattrs.ia_size = oi_size & PAGE_MASK;
- ret = notify_change(&init_user_ns, object->backer, &newattrs, NULL);
- if (ret < 0)
- goto truncate_failed;
+ if (!list_empty(&object->cache_link)) {
+ spin_lock(&cache->object_list_lock);
+ cachefiles_see_object(object, cachefiles_obj_see_withdrawal);
+ list_del_init(&object->cache_link);
+ spin_unlock(&cache->object_list_lock);
}
- newattrs.ia_valid = ATTR_SIZE;
- newattrs.ia_size = ni_size;
- ret = notify_change(&init_user_ns, object->backer, &newattrs, NULL);
-
-truncate_failed:
- inode_unlock(d_inode(object->backer));
- cachefiles_end_secure(cache, saved_cred);
-
- if (ret == -EIO) {
- fscache_set_store_limit(&object->fscache, 0);
- cachefiles_io_error_obj(object, "Size set failed");
- ret = -ENOBUFS;
+ if (object->file) {
+ cachefiles_begin_secure(cache, &saved_cred);
+ cachefiles_clean_up_object(object, cache);
+ cachefiles_end_secure(cache, saved_cred);
}
- _leave(" = %d", ret);
- return ret;
+ cookie->cache_priv = NULL;
+ cachefiles_put_object(object, cachefiles_obj_put_detach);
}
/*
- * Invalidate an object
+ * Invalidate the storage associated with a cookie.
*/
-static void cachefiles_invalidate_object(struct fscache_operation *op)
+static bool cachefiles_invalidate_cookie(struct fscache_cookie *cookie)
{
- struct cachefiles_object *object;
- struct cachefiles_cache *cache;
- const struct cred *saved_cred;
- struct path path;
- uint64_t ni_size;
- int ret;
+ struct cachefiles_object *object = cookie->cache_priv;
+ struct file *new_file, *old_file;
+ bool old_tmpfile;
- object = container_of(op->object, struct cachefiles_object, fscache);
- cache = container_of(object->fscache.cache,
- struct cachefiles_cache, cache);
+ _enter("o=%x,[%llu]", object->debug_id, object->cookie->object_size);
- ni_size = op->object->store_limit_l;
+ old_tmpfile = test_bit(CACHEFILES_OBJECT_USING_TMPFILE, &object->flags);
- _enter("{OBJ%x},[%llu]",
- op->object->debug_id, (unsigned long long)ni_size);
+ if (!object->file) {
+ fscache_resume_after_invalidation(cookie);
+ _leave(" = t [light]");
+ return true;
+ }
- if (object->backer) {
- ASSERT(d_is_reg(object->backer));
+ new_file = cachefiles_create_tmpfile(object);
+ if (IS_ERR(new_file))
+ goto failed;
- fscache_set_store_limit(&object->fscache, ni_size);
+ /* Substitute the VFS target */
+ _debug("sub");
+ spin_lock(&object->lock);
- path.dentry = object->backer;
- path.mnt = cache->mnt;
+ old_file = object->file;
+ object->file = new_file;
+ object->content_info = CACHEFILES_CONTENT_NO_DATA;
+ set_bit(CACHEFILES_OBJECT_USING_TMPFILE, &object->flags);
+ set_bit(FSCACHE_COOKIE_NEEDS_UPDATE, &object->cookie->flags);
- cachefiles_begin_secure(cache, &saved_cred);
- ret = vfs_truncate(&path, 0);
- if (ret == 0)
- ret = vfs_truncate(&path, ni_size);
- cachefiles_end_secure(cache, saved_cred);
+ spin_unlock(&object->lock);
+ _debug("subbed");
+
+ /* Allow I/O to take place again */
+ fscache_resume_after_invalidation(cookie);
+
+ if (old_file) {
+ if (!old_tmpfile) {
+ struct cachefiles_volume *volume = object->volume;
+ struct dentry *fan = volume->fanout[(u8)cookie->key_hash];
- if (ret != 0) {
- fscache_set_store_limit(&object->fscache, 0);
- if (ret == -EIO)
- cachefiles_io_error_obj(object,
- "Invalidate failed");
+ inode_lock_nested(d_inode(fan), I_MUTEX_PARENT);
+ cachefiles_bury_object(volume->cache, object, fan,
+ old_file->f_path.dentry,
+ FSCACHE_OBJECT_INVALIDATED);
}
+ fput(old_file);
}
- fscache_op_complete(op, true);
- _leave("");
-}
+ _leave(" = t");
+ return true;
-/*
- * dissociate a cache from all the pages it was backing
- */
-static void cachefiles_dissociate_pages(struct fscache_cache *cache)
-{
- _enter("");
+failed:
+ _leave(" = f");
+ return false;
}
const struct fscache_cache_ops cachefiles_cache_ops = {
.name = "cachefiles",
- .alloc_object = cachefiles_alloc_object,
- .lookup_object = cachefiles_lookup_object,
- .lookup_complete = cachefiles_lookup_complete,
- .grab_object = cachefiles_grab_object,
- .update_object = cachefiles_update_object,
- .invalidate_object = cachefiles_invalidate_object,
- .drop_object = cachefiles_drop_object,
- .put_object = cachefiles_put_object,
- .sync_cache = cachefiles_sync_cache,
- .attr_changed = cachefiles_attr_changed,
- .read_or_alloc_page = cachefiles_read_or_alloc_page,
- .read_or_alloc_pages = cachefiles_read_or_alloc_pages,
- .allocate_page = cachefiles_allocate_page,
- .allocate_pages = cachefiles_allocate_pages,
- .write_page = cachefiles_write_page,
- .uncache_page = cachefiles_uncache_page,
- .dissociate_pages = cachefiles_dissociate_pages,
- .check_consistency = cachefiles_check_consistency,
- .begin_read_operation = cachefiles_begin_read_operation,
+ .acquire_volume = cachefiles_acquire_volume,
+ .free_volume = cachefiles_free_volume,
+ .lookup_cookie = cachefiles_lookup_cookie,
+ .withdraw_cookie = cachefiles_withdraw_cookie,
+ .invalidate_cookie = cachefiles_invalidate_cookie,
+ .begin_operation = cachefiles_begin_operation,
+ .resize_cookie = cachefiles_resize_cookie,
+ .prepare_to_write = cachefiles_prepare_to_write,
};
diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
index 0a511c36dab8..c793d33b0224 100644
--- a/fs/cachefiles/internal.h
+++ b/fs/cachefiles/internal.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/* General netfs cache on cache files internal defs
*
- * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*/
@@ -13,58 +13,72 @@
#include <linux/fscache-cache.h>
-#include <linux/timer.h>
-#include <linux/wait_bit.h>
#include <linux/cred.h>
-#include <linux/workqueue.h>
#include <linux/security.h>
+#define CACHEFILES_DIO_BLOCK_SIZE 4096
+
struct cachefiles_cache;
struct cachefiles_object;
-extern unsigned cachefiles_debug;
-#define CACHEFILES_DEBUG_KENTER 1
-#define CACHEFILES_DEBUG_KLEAVE 2
-#define CACHEFILES_DEBUG_KDEBUG 4
+enum cachefiles_content {
+ /* These values are saved on disk */
+ CACHEFILES_CONTENT_NO_DATA = 0, /* No content stored */
+ CACHEFILES_CONTENT_SINGLE = 1, /* Content is monolithic, all is present */
+ CACHEFILES_CONTENT_ALL = 2, /* Content is all present, no map */
+ CACHEFILES_CONTENT_BACKFS_MAP = 3, /* Content is piecemeal, mapped through backing fs */
+ CACHEFILES_CONTENT_DIRTY = 4, /* Content is dirty (only seen on disk) */
+ nr__cachefiles_content
+};
-#define cachefiles_gfp (__GFP_RECLAIM | __GFP_NORETRY | __GFP_NOMEMALLOC)
+/*
+ * Cached volume representation.
+ */
+struct cachefiles_volume {
+ struct cachefiles_cache *cache;
+ struct list_head cache_link; /* Link in cache->volumes */
+ struct fscache_volume *vcookie; /* The netfs's representation */
+ struct dentry *dentry; /* The volume dentry */
+ struct dentry *fanout[256]; /* Fanout subdirs */
+};
/*
- * node records
+ * Backing file state.
*/
struct cachefiles_object {
- struct fscache_object fscache; /* fscache handle */
- struct cachefiles_lookup_data *lookup_data; /* cached lookup data */
- struct dentry *dentry; /* the file/dir representing this object */
- struct dentry *backer; /* backing file */
- loff_t i_size; /* object size */
+ struct fscache_cookie *cookie; /* Netfs data storage object cookie */
+ struct cachefiles_volume *volume; /* Cache volume that holds this object */
+ struct list_head cache_link; /* Link in cache->*_list */
+ struct file *file; /* The file representing this object */
+ char *d_name; /* Backing file name */
+ int debug_id;
+ spinlock_t lock;
+ refcount_t ref;
+ u8 d_name_len; /* Length of filename */
+ enum cachefiles_content content_info:8; /* Info about content presence */
unsigned long flags;
-#define CACHEFILES_OBJECT_ACTIVE 0 /* T if marked active */
- atomic_t usage; /* object usage count */
- uint8_t type; /* object type */
- uint8_t new; /* T if object new */
- spinlock_t work_lock;
- struct rb_node active_node; /* link in active tree (dentry is key) */
+#define CACHEFILES_OBJECT_USING_TMPFILE 0 /* Have an unlinked tmpfile */
};
-extern struct kmem_cache *cachefiles_object_jar;
-
/*
* Cache files cache definition
*/
struct cachefiles_cache {
- struct fscache_cache cache; /* FS-Cache record */
+ struct fscache_cache *cache; /* Cache cookie */
struct vfsmount *mnt; /* mountpoint holding the cache */
+ struct dentry *store; /* Directory into which live objects go */
struct dentry *graveyard; /* directory into which dead objects go */
struct file *cachefilesd; /* manager daemon handle */
+ struct list_head volumes; /* List of volume objects */
+ struct list_head object_list; /* List of active objects */
+ spinlock_t object_list_lock; /* Lock for volumes and object_list */
const struct cred *cache_cred; /* security override for accessing cache */
struct mutex daemon_mutex; /* command serialisation mutex */
wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
- struct rb_root active_nodes; /* active nodes (can't be culled) */
- rwlock_t active_lock; /* lock for active_nodes */
atomic_t gravecounter; /* graveyard uniquifier */
atomic_t f_released; /* number of objects released lately */
atomic_long_t b_released; /* number of blocks released lately */
+ atomic_long_t b_writing; /* Number of blocks being written */
unsigned frun_percent; /* when to stop culling (% files) */
unsigned fcull_percent; /* when to start culling (% files) */
unsigned fstop_percent; /* when to stop allocating (% files) */
@@ -72,7 +86,7 @@ struct cachefiles_cache {
unsigned bcull_percent; /* when to start culling (% blocks) */
unsigned bstop_percent; /* when to stop allocating (% blocks) */
unsigned bsize; /* cache's block size */
- unsigned bshift; /* min(ilog2(PAGE_SIZE / bsize), 0) */
+ unsigned bshift; /* ilog2(bsize) */
uint64_t frun; /* when to stop culling */
uint64_t fcull; /* when to start culling */
uint64_t fstop; /* when to stop allocating */
@@ -89,38 +103,19 @@ struct cachefiles_cache {
char *tag; /* cache binding tag */
};
-/*
- * backing file read tracking
- */
-struct cachefiles_one_read {
- wait_queue_entry_t monitor; /* link into monitored waitqueue */
- struct page *back_page; /* backing file page we're waiting for */
- struct page *netfs_page; /* netfs page we're going to fill */
- struct fscache_retrieval *op; /* retrieval op covering this */
- struct list_head op_link; /* link in op's todo list */
-};
-
-/*
- * backing file write tracking
- */
-struct cachefiles_one_write {
- struct page *netfs_page; /* netfs page to copy */
- struct cachefiles_object *object;
- struct list_head obj_link; /* link in object's lists */
- fscache_rw_complete_t end_io_func;
- void *context;
-};
+#include <trace/events/cachefiles.h>
-/*
- * auxiliary data xattr buffer
- */
-struct cachefiles_xattr {
- uint16_t len;
- uint8_t type;
- uint8_t data[];
-};
+static inline
+struct file *cachefiles_cres_file(struct netfs_cache_resources *cres)
+{
+ return cres->cache_priv2;
+}
-#include <trace/events/cachefiles.h>
+static inline
+struct cachefiles_object *cachefiles_cres_object(struct netfs_cache_resources *cres)
+{
+ return fscache_cres_cookie(cres)->cache_priv;
+}
/*
* note change of state for daemon
@@ -132,74 +127,118 @@ static inline void cachefiles_state_changed(struct cachefiles_cache *cache)
}
/*
- * bind.c
+ * cache.c
*/
-extern int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args);
-extern void cachefiles_daemon_unbind(struct cachefiles_cache *cache);
+extern int cachefiles_add_cache(struct cachefiles_cache *cache);
+extern void cachefiles_withdraw_cache(struct cachefiles_cache *cache);
+
+enum cachefiles_has_space_for {
+ cachefiles_has_space_check,
+ cachefiles_has_space_for_write,
+ cachefiles_has_space_for_create,
+};
+extern int cachefiles_has_space(struct cachefiles_cache *cache,
+ unsigned fnr, unsigned bnr,
+ enum cachefiles_has_space_for reason);
/*
* daemon.c
*/
extern const struct file_operations cachefiles_daemon_fops;
-extern int cachefiles_has_space(struct cachefiles_cache *cache,
- unsigned fnr, unsigned bnr);
+/*
+ * error_inject.c
+ */
+#ifdef CONFIG_CACHEFILES_ERROR_INJECTION
+extern unsigned int cachefiles_error_injection_state;
+extern int cachefiles_register_error_injection(void);
+extern void cachefiles_unregister_error_injection(void);
+
+#else
+#define cachefiles_error_injection_state 0
+
+static inline int cachefiles_register_error_injection(void)
+{
+ return 0;
+}
+
+static inline void cachefiles_unregister_error_injection(void)
+{
+}
+#endif
+
+
+static inline int cachefiles_inject_read_error(void)
+{
+ return cachefiles_error_injection_state & 2 ? -EIO : 0;
+}
+
+static inline int cachefiles_inject_write_error(void)
+{
+ return cachefiles_error_injection_state & 2 ? -EIO :
+ cachefiles_error_injection_state & 1 ? -ENOSPC :
+ 0;
+}
+
+static inline int cachefiles_inject_remove_error(void)
+{
+ return cachefiles_error_injection_state & 2 ? -EIO : 0;
+}
/*
* interface.c
*/
extern const struct fscache_cache_ops cachefiles_cache_ops;
+extern void cachefiles_see_object(struct cachefiles_object *object,
+ enum cachefiles_obj_ref_trace why);
+extern struct cachefiles_object *cachefiles_grab_object(struct cachefiles_object *object,
+ enum cachefiles_obj_ref_trace why);
+extern void cachefiles_put_object(struct cachefiles_object *object,
+ enum cachefiles_obj_ref_trace why);
-void cachefiles_put_object(struct fscache_object *_object,
- enum fscache_obj_ref_trace why);
+/*
+ * io.c
+ */
+extern bool cachefiles_begin_operation(struct netfs_cache_resources *cres,
+ enum fscache_want_state want_state);
/*
* key.c
*/
-extern char *cachefiles_cook_key(const u8 *raw, int keylen, uint8_t type);
+extern bool cachefiles_cook_key(struct cachefiles_object *object);
+
+/*
+ * main.c
+ */
+extern struct kmem_cache *cachefiles_object_jar;
/*
* namei.c
*/
-extern void cachefiles_mark_object_inactive(struct cachefiles_cache *cache,
- struct cachefiles_object *object,
- blkcnt_t i_blocks);
-extern int cachefiles_delete_object(struct cachefiles_cache *cache,
- struct cachefiles_object *object);
-extern int cachefiles_walk_to_object(struct cachefiles_object *parent,
- struct cachefiles_object *object,
- const char *key,
- struct cachefiles_xattr *auxdata);
+extern void cachefiles_unmark_inode_in_use(struct cachefiles_object *object,
+ struct file *file);
+extern int cachefiles_bury_object(struct cachefiles_cache *cache,
+ struct cachefiles_object *object,
+ struct dentry *dir,
+ struct dentry *rep,
+ enum fscache_why_object_killed why);
+extern int cachefiles_delete_object(struct cachefiles_object *object,
+ enum fscache_why_object_killed why);
+extern bool cachefiles_look_up_object(struct cachefiles_object *object);
extern struct dentry *cachefiles_get_directory(struct cachefiles_cache *cache,
struct dentry *dir,
- const char *name);
+ const char *name,
+ bool *_is_new);
+extern void cachefiles_put_directory(struct dentry *dir);
extern int cachefiles_cull(struct cachefiles_cache *cache, struct dentry *dir,
char *filename);
extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
struct dentry *dir, char *filename);
-
-/*
- * rdwr.c
- */
-extern int cachefiles_read_or_alloc_page(struct fscache_retrieval *,
- struct page *, gfp_t);
-extern int cachefiles_read_or_alloc_pages(struct fscache_retrieval *,
- struct list_head *, unsigned *,
- gfp_t);
-extern int cachefiles_allocate_page(struct fscache_retrieval *, struct page *,
- gfp_t);
-extern int cachefiles_allocate_pages(struct fscache_retrieval *,
- struct list_head *, unsigned *, gfp_t);
-extern int cachefiles_write_page(struct fscache_storage *, struct page *);
-extern void cachefiles_uncache_page(struct fscache_object *, struct page *);
-
-/*
- * rdwr2.c
- */
-extern int cachefiles_begin_read_operation(struct netfs_read_request *,
- struct fscache_retrieval *);
+extern struct file *cachefiles_create_tmpfile(struct cachefiles_object *object);
+extern bool cachefiles_commit_tmpfile(struct cachefiles_cache *cache,
+ struct cachefiles_object *object);
/*
* security.c
@@ -222,28 +261,32 @@ static inline void cachefiles_end_secure(struct cachefiles_cache *cache,
}
/*
+ * volume.c
+ */
+void cachefiles_acquire_volume(struct fscache_volume *volume);
+void cachefiles_free_volume(struct fscache_volume *volume);
+void cachefiles_withdraw_volume(struct cachefiles_volume *volume);
+
+/*
* xattr.c
*/
-extern int cachefiles_check_object_type(struct cachefiles_object *object);
-extern int cachefiles_set_object_xattr(struct cachefiles_object *object,
- struct cachefiles_xattr *auxdata);
-extern int cachefiles_update_object_xattr(struct cachefiles_object *object,
- struct cachefiles_xattr *auxdata);
-extern int cachefiles_check_auxdata(struct cachefiles_object *object);
-extern int cachefiles_check_object_xattr(struct cachefiles_object *object,
- struct cachefiles_xattr *auxdata);
+extern int cachefiles_set_object_xattr(struct cachefiles_object *object);
+extern int cachefiles_check_auxdata(struct cachefiles_object *object,
+ struct file *file);
extern int cachefiles_remove_object_xattr(struct cachefiles_cache *cache,
+ struct cachefiles_object *object,
struct dentry *dentry);
-
+extern void cachefiles_prepare_to_write(struct fscache_cookie *cookie);
+extern bool cachefiles_set_volume_xattr(struct cachefiles_volume *volume);
+extern int cachefiles_check_volume_xattr(struct cachefiles_volume *volume);
/*
- * error handling
+ * Error handling
*/
-
#define cachefiles_io_error(___cache, FMT, ...) \
do { \
pr_err("I/O Error: " FMT"\n", ##__VA_ARGS__); \
- fscache_io_error(&(___cache)->cache); \
+ fscache_io_error((___cache)->cache); \
set_bit(CACHEFILES_DEAD, &(___cache)->flags); \
} while (0)
@@ -251,15 +294,20 @@ do { \
do { \
struct cachefiles_cache *___cache; \
\
- ___cache = container_of((object)->fscache.cache, \
- struct cachefiles_cache, cache); \
- cachefiles_io_error(___cache, FMT, ##__VA_ARGS__); \
+ ___cache = (object)->volume->cache; \
+ cachefiles_io_error(___cache, FMT " [o=%08x]", ##__VA_ARGS__, \
+ (object)->debug_id); \
} while (0)
/*
- * debug tracing
+ * Debug tracing
*/
+extern unsigned cachefiles_debug;
+#define CACHEFILES_DEBUG_KENTER 1
+#define CACHEFILES_DEBUG_KLEAVE 2
+#define CACHEFILES_DEBUG_KDEBUG 4
+
#define dbgprintk(FMT, ...) \
printk(KERN_DEBUG "[%-6.6s] "FMT"\n", current->comm, ##__VA_ARGS__)
diff --git a/fs/cachefiles/io.c b/fs/cachefiles/io.c
index effe37ef8629..753986ea1583 100644
--- a/fs/cachefiles/io.c
+++ b/fs/cachefiles/io.c
@@ -9,8 +9,9 @@
#include <linux/slab.h>
#include <linux/file.h>
#include <linux/uio.h>
+#include <linux/falloc.h>
#include <linux/sched/mm.h>
-#include <linux/netfs.h>
+#include <trace/events/fscache.h>
#include "internal.h"
struct cachefiles_kiocb {
@@ -21,14 +22,18 @@ struct cachefiles_kiocb {
size_t skipped;
size_t len;
};
+ struct cachefiles_object *object;
netfs_io_terminated_t term_func;
void *term_func_priv;
bool was_async;
+ unsigned int inval_counter; /* Copy of cookie->inval_counter */
+ u64 b_writing;
};
static inline void cachefiles_put_kiocb(struct cachefiles_kiocb *ki)
{
if (refcount_dec_and_test(&ki->ki_refcnt)) {
+ cachefiles_put_object(ki->object, cachefiles_obj_put_ioreq);
fput(ki->iocb.ki_filp);
kfree(ki);
}
@@ -40,12 +45,22 @@ static inline void cachefiles_put_kiocb(struct cachefiles_kiocb *ki)
static void cachefiles_read_complete(struct kiocb *iocb, long ret)
{
struct cachefiles_kiocb *ki = container_of(iocb, struct cachefiles_kiocb, iocb);
+ struct inode *inode = file_inode(ki->iocb.ki_filp);
_enter("%ld", ret);
+ if (ret < 0)
+ trace_cachefiles_io_error(ki->object, inode, ret,
+ cachefiles_trace_read_error);
+
if (ki->term_func) {
- if (ret >= 0)
- ret += ki->skipped;
+ if (ret >= 0) {
+ if (ki->object->cookie->inval_counter == ki->inval_counter)
+ ki->skipped += ret;
+ else
+ ret = -ESTALE;
+ }
+
ki->term_func(ki->term_func_priv, ret, ki->was_async);
}
@@ -58,16 +73,24 @@ static void cachefiles_read_complete(struct kiocb *iocb, long ret)
static int cachefiles_read(struct netfs_cache_resources *cres,
loff_t start_pos,
struct iov_iter *iter,
- bool seek_data,
+ enum netfs_read_from_hole read_hole,
netfs_io_terminated_t term_func,
void *term_func_priv)
{
+ struct cachefiles_object *object;
struct cachefiles_kiocb *ki;
- struct file *file = cres->cache_priv2;
+ struct file *file;
unsigned int old_nofs;
ssize_t ret = -ENOBUFS;
size_t len = iov_iter_count(iter), skipped = 0;
+ if (!fscache_wait_for_operation(cres, FSCACHE_WANT_READ))
+ goto presubmission_error;
+
+ fscache_count_read();
+ object = cachefiles_cres_object(cres);
+ file = cachefiles_cres_file(cres);
+
_enter("%pD,%li,%llx,%zx/%llx",
file, file_inode(file)->i_ino, start_pos, len,
i_size_read(file_inode(file)));
@@ -75,10 +98,12 @@ static int cachefiles_read(struct netfs_cache_resources *cres,
/* If the caller asked us to seek for data before doing the read, then
* we should do that now. If we find a gap, we fill it with zeros.
*/
- if (seek_data) {
+ if (read_hole != NETFS_READ_HOLE_IGNORE) {
loff_t off = start_pos, off2;
- off2 = vfs_llseek(file, off, SEEK_DATA);
+ off2 = cachefiles_inject_read_error();
+ if (off2 == 0)
+ off2 = vfs_llseek(file, off, SEEK_DATA);
if (off2 < 0 && off2 >= (loff_t)-MAX_ERRNO && off2 != -ENXIO) {
skipped = 0;
ret = off2;
@@ -90,6 +115,10 @@ static int cachefiles_read(struct netfs_cache_resources *cres,
* in the region, so clear the rest of the buffer and
* return success.
*/
+ ret = -ENODATA;
+ if (read_hole == NETFS_READ_HOLE_FAIL)
+ goto presubmission_error;
+
iov_iter_zero(len, iter);
skipped = len;
ret = 0;
@@ -100,7 +129,7 @@ static int cachefiles_read(struct netfs_cache_resources *cres,
iov_iter_zero(skipped, iter);
}
- ret = -ENOBUFS;
+ ret = -ENOMEM;
ki = kzalloc(sizeof(struct cachefiles_kiocb), GFP_KERNEL);
if (!ki)
goto presubmission_error;
@@ -112,6 +141,8 @@ static int cachefiles_read(struct netfs_cache_resources *cres,
ki->iocb.ki_hint = ki_hint_validate(file_write_hint(file));
ki->iocb.ki_ioprio = get_current_ioprio();
ki->skipped = skipped;
+ ki->object = object;
+ ki->inval_counter = cres->inval_counter;
ki->term_func = term_func;
ki->term_func_priv = term_func_priv;
ki->was_async = true;
@@ -120,9 +151,13 @@ static int cachefiles_read(struct netfs_cache_resources *cres,
ki->iocb.ki_complete = cachefiles_read_complete;
get_file(ki->iocb.ki_filp);
+ cachefiles_grab_object(object, cachefiles_obj_get_ioreq);
+ trace_cachefiles_read(object, file_inode(file), ki->iocb.ki_pos, len - skipped);
old_nofs = memalloc_nofs_save();
- ret = vfs_iocb_iter_read(file, &ki->iocb, iter);
+ ret = cachefiles_inject_read_error();
+ if (ret == 0)
+ ret = vfs_iocb_iter_read(file, &ki->iocb, iter);
memalloc_nofs_restore(old_nofs);
switch (ret) {
case -EIOCBQUEUED:
@@ -157,11 +192,70 @@ presubmission_error:
}
/*
+ * Query the occupancy of the cache in a region, returning where the next chunk
+ * of data starts and how long it is.
+ */
+static int cachefiles_query_occupancy(struct netfs_cache_resources *cres,
+ loff_t start, size_t len, size_t granularity,
+ loff_t *_data_start, size_t *_data_len)
+{
+ struct cachefiles_object *object;
+ struct file *file;
+ loff_t off, off2;
+
+ *_data_start = -1;
+ *_data_len = 0;
+
+ if (!fscache_wait_for_operation(cres, FSCACHE_WANT_READ))
+ return -ENOBUFS;
+
+ object = cachefiles_cres_object(cres);
+ file = cachefiles_cres_file(cres);
+ granularity = max_t(size_t, object->volume->cache->bsize, granularity);
+
+ _enter("%pD,%li,%llx,%zx/%llx",
+ file, file_inode(file)->i_ino, start, len,
+ i_size_read(file_inode(file)));
+
+ off = cachefiles_inject_read_error();
+ if (off == 0)
+ off = vfs_llseek(file, start, SEEK_DATA);
+ if (off == -ENXIO)
+ return -ENODATA; /* Beyond EOF */
+ if (off < 0 && off >= (loff_t)-MAX_ERRNO)
+ return -ENOBUFS; /* Error. */
+ if (round_up(off, granularity) >= start + len)
+ return -ENODATA; /* No data in range */
+
+ off2 = cachefiles_inject_read_error();
+ if (off2 == 0)
+ off2 = vfs_llseek(file, off, SEEK_HOLE);
+ if (off2 == -ENXIO)
+ return -ENODATA; /* Beyond EOF */
+ if (off2 < 0 && off2 >= (loff_t)-MAX_ERRNO)
+ return -ENOBUFS; /* Error. */
+
+ /* Round away partial blocks */
+ off = round_up(off, granularity);
+ off2 = round_down(off2, granularity);
+ if (off2 <= off)
+ return -ENODATA;
+
+ *_data_start = off;
+ if (off2 > start + len)
+ *_data_len = len;
+ else
+ *_data_len = off2 - off;
+ return 0;
+}
+
+/*
* Handle completion of a write to the cache.
*/
static void cachefiles_write_complete(struct kiocb *iocb, long ret)
{
struct cachefiles_kiocb *ki = container_of(iocb, struct cachefiles_kiocb, iocb);
+ struct cachefiles_object *object = ki->object;
struct inode *inode = file_inode(ki->iocb.ki_filp);
_enter("%ld", ret);
@@ -170,9 +264,14 @@ static void cachefiles_write_complete(struct kiocb *iocb, long ret)
__sb_writers_acquired(inode->i_sb, SB_FREEZE_WRITE);
__sb_end_write(inode->i_sb, SB_FREEZE_WRITE);
+ if (ret < 0)
+ trace_cachefiles_io_error(object, inode, ret,
+ cachefiles_trace_write_error);
+
+ atomic_long_sub(ki->b_writing, &object->volume->cache->b_writing);
+ set_bit(FSCACHE_COOKIE_HAVE_DATA, &object->cookie->flags);
if (ki->term_func)
ki->term_func(ki->term_func_priv, ret, ki->was_async);
-
cachefiles_put_kiocb(ki);
}
@@ -185,17 +284,27 @@ static int cachefiles_write(struct netfs_cache_resources *cres,
netfs_io_terminated_t term_func,
void *term_func_priv)
{
+ struct cachefiles_object *object;
+ struct cachefiles_cache *cache;
struct cachefiles_kiocb *ki;
struct inode *inode;
- struct file *file = cres->cache_priv2;
+ struct file *file;
unsigned int old_nofs;
ssize_t ret = -ENOBUFS;
size_t len = iov_iter_count(iter);
+ if (!fscache_wait_for_operation(cres, FSCACHE_WANT_WRITE))
+ goto presubmission_error;
+ fscache_count_write();
+ object = cachefiles_cres_object(cres);
+ cache = object->volume->cache;
+ file = cachefiles_cres_file(cres);
+
_enter("%pD,%li,%llx,%zx/%llx",
file, file_inode(file)->i_ino, start_pos, len,
i_size_read(file_inode(file)));
+ ret = -ENOMEM;
ki = kzalloc(sizeof(struct cachefiles_kiocb), GFP_KERNEL);
if (!ki)
goto presubmission_error;
@@ -206,14 +315,18 @@ static int cachefiles_write(struct netfs_cache_resources *cres,
ki->iocb.ki_flags = IOCB_DIRECT | IOCB_WRITE;
ki->iocb.ki_hint = ki_hint_validate(file_write_hint(file));
ki->iocb.ki_ioprio = get_current_ioprio();
+ ki->object = object;
+ ki->inval_counter = cres->inval_counter;
ki->start = start_pos;
ki->len = len;
ki->term_func = term_func;
ki->term_func_priv = term_func_priv;
ki->was_async = true;
+ ki->b_writing = (len + (1 << cache->bshift) - 1) >> cache->bshift;
if (ki->term_func)
ki->iocb.ki_complete = cachefiles_write_complete;
+ atomic_long_add(ki->b_writing, &cache->b_writing);
/* Open-code file_start_write here to grab freeze protection, which
* will be released by another thread in aio_complete_rw(). Fool
@@ -225,9 +338,13 @@ static int cachefiles_write(struct netfs_cache_resources *cres,
__sb_writers_release(inode->i_sb, SB_FREEZE_WRITE);
get_file(ki->iocb.ki_filp);
+ cachefiles_grab_object(object, cachefiles_obj_get_ioreq);
+ trace_cachefiles_write(object, inode, ki->iocb.ki_pos, len);
old_nofs = memalloc_nofs_save();
- ret = vfs_iocb_iter_write(file, &ki->iocb, iter);
+ ret = cachefiles_inject_write_error();
+ if (ret == 0)
+ ret = vfs_iocb_iter_write(file, &ki->iocb, iter);
memalloc_nofs_restore(old_nofs);
switch (ret) {
case -EIOCBQUEUED:
@@ -257,8 +374,8 @@ in_progress:
presubmission_error:
if (term_func)
- term_func(term_func_priv, -ENOMEM, false);
- return -ENOMEM;
+ term_func(term_func_priv, ret, false);
+ return ret;
}
/*
@@ -268,47 +385,82 @@ presubmission_error:
static enum netfs_read_source cachefiles_prepare_read(struct netfs_read_subrequest *subreq,
loff_t i_size)
{
- struct fscache_retrieval *op = subreq->rreq->cache_resources.cache_priv;
+ enum cachefiles_prepare_read_trace why;
+ struct netfs_read_request *rreq = subreq->rreq;
+ struct netfs_cache_resources *cres = &rreq->cache_resources;
struct cachefiles_object *object;
struct cachefiles_cache *cache;
+ struct fscache_cookie *cookie = fscache_cres_cookie(cres);
const struct cred *saved_cred;
- struct file *file = subreq->rreq->cache_resources.cache_priv2;
+ struct file *file = cachefiles_cres_file(cres);
+ enum netfs_read_source ret = NETFS_DOWNLOAD_FROM_SERVER;
loff_t off, to;
+ ino_t ino = file ? file_inode(file)->i_ino : 0;
_enter("%zx @%llx/%llx", subreq->len, subreq->start, i_size);
- object = container_of(op->op.object,
- struct cachefiles_object, fscache);
- cache = container_of(object->fscache.cache,
- struct cachefiles_cache, cache);
+ if (subreq->start >= i_size) {
+ ret = NETFS_FILL_WITH_ZEROES;
+ why = cachefiles_trace_read_after_eof;
+ goto out_no_object;
+ }
- if (!file)
- goto cache_fail_nosec;
+ if (test_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags)) {
+ __set_bit(NETFS_SREQ_WRITE_TO_CACHE, &subreq->flags);
+ why = cachefiles_trace_read_no_data;
+ goto out_no_object;
+ }
- if (subreq->start >= i_size)
- return NETFS_FILL_WITH_ZEROES;
+ /* The object and the file may be being created in the background. */
+ if (!file) {
+ why = cachefiles_trace_read_no_file;
+ if (!fscache_wait_for_operation(cres, FSCACHE_WANT_READ))
+ goto out_no_object;
+ file = cachefiles_cres_file(cres);
+ if (!file)
+ goto out_no_object;
+ ino = file_inode(file)->i_ino;
+ }
+ object = cachefiles_cres_object(cres);
+ cache = object->volume->cache;
cachefiles_begin_secure(cache, &saved_cred);
- off = vfs_llseek(file, subreq->start, SEEK_DATA);
+ off = cachefiles_inject_read_error();
+ if (off == 0)
+ off = vfs_llseek(file, subreq->start, SEEK_DATA);
if (off < 0 && off >= (loff_t)-MAX_ERRNO) {
- if (off == (loff_t)-ENXIO)
+ if (off == (loff_t)-ENXIO) {
+ why = cachefiles_trace_read_seek_nxio;
goto download_and_store;
- goto cache_fail;
+ }
+ trace_cachefiles_io_error(object, file_inode(file), off,
+ cachefiles_trace_seek_error);
+ why = cachefiles_trace_read_seek_error;
+ goto out;
}
- if (off >= subreq->start + subreq->len)
+ if (off >= subreq->start + subreq->len) {
+ why = cachefiles_trace_read_found_hole;
goto download_and_store;
+ }
if (off > subreq->start) {
off = round_up(off, cache->bsize);
subreq->len = off - subreq->start;
+ why = cachefiles_trace_read_found_part;
goto download_and_store;
}
- to = vfs_llseek(file, subreq->start, SEEK_HOLE);
- if (to < 0 && to >= (loff_t)-MAX_ERRNO)
- goto cache_fail;
+ to = cachefiles_inject_read_error();
+ if (to == 0)
+ to = vfs_llseek(file, subreq->start, SEEK_HOLE);
+ if (to < 0 && to >= (loff_t)-MAX_ERRNO) {
+ trace_cachefiles_io_error(object, file_inode(file), to,
+ cachefiles_trace_seek_error);
+ why = cachefiles_trace_read_seek_error;
+ goto out;
+ }
if (to < subreq->start + subreq->len) {
if (subreq->start + subreq->len >= i_size)
@@ -318,32 +470,119 @@ static enum netfs_read_source cachefiles_prepare_read(struct netfs_read_subreque
subreq->len = to - subreq->start;
}
- cachefiles_end_secure(cache, saved_cred);
- return NETFS_READ_FROM_CACHE;
+ why = cachefiles_trace_read_have_data;
+ ret = NETFS_READ_FROM_CACHE;
+ goto out;
download_and_store:
- if (cachefiles_has_space(cache, 0, (subreq->len + PAGE_SIZE - 1) / PAGE_SIZE) == 0)
- __set_bit(NETFS_SREQ_WRITE_TO_CACHE, &subreq->flags);
-cache_fail:
+ __set_bit(NETFS_SREQ_WRITE_TO_CACHE, &subreq->flags);
+out:
cachefiles_end_secure(cache, saved_cred);
-cache_fail_nosec:
- return NETFS_DOWNLOAD_FROM_SERVER;
+out_no_object:
+ trace_cachefiles_prep_read(subreq, ret, why, ino);
+ return ret;
}
/*
* Prepare for a write to occur.
*/
-static int cachefiles_prepare_write(struct netfs_cache_resources *cres,
- loff_t *_start, size_t *_len, loff_t i_size)
+static int __cachefiles_prepare_write(struct netfs_cache_resources *cres,
+ loff_t *_start, size_t *_len, loff_t i_size,
+ bool no_space_allocated_yet)
{
- loff_t start = *_start;
+ struct cachefiles_object *object = cachefiles_cres_object(cres);
+ struct cachefiles_cache *cache = object->volume->cache;
+ struct file *file = cachefiles_cres_file(cres);
+ loff_t start = *_start, pos;
size_t len = *_len, down;
+ int ret;
/* Round to DIO size */
down = start - round_down(start, PAGE_SIZE);
*_start = start - down;
*_len = round_up(down + len, PAGE_SIZE);
- return 0;
+
+ /* We need to work out whether there's sufficient disk space to perform
+ * the write - but we can skip that check if we have space already
+ * allocated.
+ */
+ if (no_space_allocated_yet)
+ goto check_space;
+
+ pos = cachefiles_inject_read_error();
+ if (pos == 0)
+ pos = vfs_llseek(file, *_start, SEEK_DATA);
+ if (pos < 0 && pos >= (loff_t)-MAX_ERRNO) {
+ if (pos == -ENXIO)
+ goto check_space; /* Unallocated tail */
+ trace_cachefiles_io_error(object, file_inode(file), pos,
+ cachefiles_trace_seek_error);
+ return pos;
+ }
+ if ((u64)pos >= (u64)*_start + *_len)
+ goto check_space; /* Unallocated region */
+
+ /* We have a block that's at least partially filled - if we're low on
+ * space, we need to see if it's fully allocated. If it's not, we may
+ * want to cull it.
+ */
+ if (cachefiles_has_space(cache, 0, *_len / PAGE_SIZE,
+ cachefiles_has_space_check) == 0)
+ return 0; /* Enough space to simply overwrite the whole block */
+
+ pos = cachefiles_inject_read_error();
+ if (pos == 0)
+ pos = vfs_llseek(file, *_start, SEEK_HOLE);
+ if (pos < 0 && pos >= (loff_t)-MAX_ERRNO) {
+ trace_cachefiles_io_error(object, file_inode(file), pos,
+ cachefiles_trace_seek_error);
+ return pos;
+ }
+ if ((u64)pos >= (u64)*_start + *_len)
+ return 0; /* Fully allocated */
+
+ /* Partially allocated, but insufficient space: cull. */
+ fscache_count_no_write_space();
+ ret = cachefiles_inject_remove_error();
+ if (ret == 0)
+ ret = vfs_fallocate(file, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
+ *_start, *_len);
+ if (ret < 0) {
+ trace_cachefiles_io_error(object, file_inode(file), ret,
+ cachefiles_trace_fallocate_error);
+ cachefiles_io_error_obj(object,
+ "CacheFiles: fallocate failed (%d)\n", ret);
+ ret = -EIO;
+ }
+
+ return ret;
+
+check_space:
+ return cachefiles_has_space(cache, 0, *_len / PAGE_SIZE,
+ cachefiles_has_space_for_write);
+}
+
+static int cachefiles_prepare_write(struct netfs_cache_resources *cres,
+ loff_t *_start, size_t *_len, loff_t i_size,
+ bool no_space_allocated_yet)
+{
+ struct cachefiles_object *object = cachefiles_cres_object(cres);
+ struct cachefiles_cache *cache = object->volume->cache;
+ const struct cred *saved_cred;
+ int ret;
+
+ if (!cachefiles_cres_file(cres)) {
+ if (!fscache_wait_for_operation(cres, FSCACHE_WANT_WRITE))
+ return -ENOBUFS;
+ if (!cachefiles_cres_file(cres))
+ return -ENOBUFS;
+ }
+
+ cachefiles_begin_secure(cache, &saved_cred);
+ ret = __cachefiles_prepare_write(cres, _start, _len, i_size,
+ no_space_allocated_yet);
+ cachefiles_end_secure(cache, saved_cred);
+ return ret;
}
/*
@@ -351,19 +590,11 @@ static int cachefiles_prepare_write(struct netfs_cache_resources *cres,
*/
static void cachefiles_end_operation(struct netfs_cache_resources *cres)
{
- struct fscache_retrieval *op = cres->cache_priv;
- struct file *file = cres->cache_priv2;
-
- _enter("");
+ struct file *file = cachefiles_cres_file(cres);
if (file)
fput(file);
- if (op) {
- fscache_op_complete(&op->op, false);
- fscache_put_retrieval(op);
- }
-
- _leave("");
+ fscache_end_cookie_access(fscache_cres_cookie(cres), fscache_access_io_end);
}
static const struct netfs_cache_ops cachefiles_netfs_cache_ops = {
@@ -372,49 +603,31 @@ static const struct netfs_cache_ops cachefiles_netfs_cache_ops = {
.write = cachefiles_write,
.prepare_read = cachefiles_prepare_read,
.prepare_write = cachefiles_prepare_write,
+ .query_occupancy = cachefiles_query_occupancy,
};
/*
* Open the cache file when beginning a cache operation.
*/
-int cachefiles_begin_read_operation(struct netfs_read_request *rreq,
- struct fscache_retrieval *op)
+bool cachefiles_begin_operation(struct netfs_cache_resources *cres,
+ enum fscache_want_state want_state)
{
- struct cachefiles_object *object;
- struct cachefiles_cache *cache;
- struct path path;
- struct file *file;
-
- _enter("");
-
- object = container_of(op->op.object,
- struct cachefiles_object, fscache);
- cache = container_of(object->fscache.cache,
- struct cachefiles_cache, cache);
-
- path.mnt = cache->mnt;
- path.dentry = object->backer;
- file = open_with_fake_path(&path, O_RDWR | O_LARGEFILE | O_DIRECT,
- d_inode(object->backer), cache->cache_cred);
- if (IS_ERR(file))
- return PTR_ERR(file);
- if (!S_ISREG(file_inode(file)->i_mode))
- goto error_file;
- if (unlikely(!file->f_op->read_iter) ||
- unlikely(!file->f_op->write_iter)) {
- pr_notice("Cache does not support read_iter and write_iter\n");
- goto error_file;
+ struct cachefiles_object *object = cachefiles_cres_object(cres);
+
+ if (!cachefiles_cres_file(cres)) {
+ cres->ops = &cachefiles_netfs_cache_ops;
+ if (object->file) {
+ spin_lock(&object->lock);
+ if (!cres->cache_priv2 && object->file)
+ cres->cache_priv2 = get_file(object->file);
+ spin_unlock(&object->lock);
+ }
}
- fscache_get_retrieval(op);
- rreq->cache_resources.cache_priv = op;
- rreq->cache_resources.cache_priv2 = file;
- rreq->cache_resources.ops = &cachefiles_netfs_cache_ops;
- rreq->cache_resources.debug_id = object->fscache.debug_id;
- _leave("");
- return 0;
+ if (!cachefiles_cres_file(cres) && want_state != FSCACHE_WANT_PARAMS) {
+ pr_err("failed to get cres->file\n");
+ return false;
+ }
-error_file:
- fput(file);
- return -EIO;
+ return true;
}
diff --git a/fs/cachefiles/key.c b/fs/cachefiles/key.c
index 7f94efc97e23..bf935e25bdbe 100644
--- a/fs/cachefiles/key.c
+++ b/fs/cachefiles/key.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/* Key to pathname encoder
*
- * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*/
@@ -22,134 +22,117 @@ static const char cachefiles_filecharmap[256] = {
[48 ... 127] = 1, /* '0' -> '~' */
};
+static inline unsigned int how_many_hex_digits(unsigned int x)
+{
+ return x ? round_up(ilog2(x) + 1, 4) / 4 : 0;
+}
+
/*
* turn the raw key into something cooked
- * - the raw key should include the length in the two bytes at the front
- * - the key may be up to 514 bytes in length (including the length word)
+ * - the key may be up to NAME_MAX in length (including the length word)
* - "base64" encode the strange keys, mapping 3 bytes of raw to four of
* cooked
* - need to cut the cooked key into 252 char lengths (189 raw bytes)
*/
-char *cachefiles_cook_key(const u8 *raw, int keylen, uint8_t type)
+bool cachefiles_cook_key(struct cachefiles_object *object)
{
- unsigned char csum, ch;
- unsigned int acc;
- char *key;
- int loop, len, max, seg, mark, print;
+ const u8 *key = fscache_get_key(object->cookie), *kend;
+ unsigned char ch;
+ unsigned int acc, i, n, nle, nbe, keylen = object->cookie->key_len;
+ unsigned int b64len, len, print, pad;
+ char *name, sep;
- _enter(",%d", keylen);
+ _enter(",%u,%*phN", keylen, keylen, key);
- BUG_ON(keylen < 2 || keylen > 514);
+ BUG_ON(keylen > NAME_MAX - 3);
- csum = raw[0] + raw[1];
print = 1;
- for (loop = 2; loop < keylen; loop++) {
- ch = raw[loop];
- csum += ch;
+ for (i = 0; i < keylen; i++) {
+ ch = key[i];
print &= cachefiles_filecharmap[ch];
}
+ /* If the path is usable ASCII, then we render it directly */
if (print) {
- /* if the path is usable ASCII, then we render it directly */
- max = keylen - 2;
- max += 2; /* two base64'd length chars on the front */
- max += 5; /* @checksum/M */
- max += 3 * 2; /* maximum number of segment dividers (".../M")
- * is ((514 + 251) / 252) = 3
- */
- max += 1; /* NUL on end */
- } else {
- /* calculate the maximum length of the cooked key */
- keylen = (keylen + 2) / 3;
-
- max = keylen * 4;
- max += 5; /* @checksum/M */
- max += 3 * 2; /* maximum number of segment dividers (".../M")
- * is ((514 + 188) / 189) = 3
- */
- max += 1; /* NUL on end */
+ len = 1 + keylen;
+ name = kmalloc(len + 1, GFP_KERNEL);
+ if (!name)
+ return false;
+
+ name[0] = 'D'; /* Data object type, string encoding */
+ memcpy(name + 1, key, keylen);
+ goto success;
}
- max += 1; /* 2nd NUL on end */
-
- _debug("max: %d", max);
-
- key = kmalloc(max, cachefiles_gfp);
- if (!key)
- return NULL;
-
- len = 0;
-
- /* build the cooked key */
- sprintf(key, "@%02x%c+", (unsigned) csum, 0);
- len = 5;
- mark = len - 1;
-
- if (print) {
- acc = *(uint16_t *) raw;
- raw += 2;
-
- key[len + 1] = cachefiles_charmap[acc & 63];
- acc >>= 6;
- key[len] = cachefiles_charmap[acc & 63];
- len += 2;
-
- seg = 250;
- for (loop = keylen; loop > 0; loop--) {
- if (seg <= 0) {
- key[len++] = '\0';
- mark = len;
- key[len++] = '+';
- seg = 252;
- }
-
- key[len++] = *raw++;
- ASSERT(len < max);
- }
-
- switch (type) {
- case FSCACHE_COOKIE_TYPE_INDEX: type = 'I'; break;
- case FSCACHE_COOKIE_TYPE_DATAFILE: type = 'D'; break;
- default: type = 'S'; break;
- }
- } else {
- seg = 252;
- for (loop = keylen; loop > 0; loop--) {
- if (seg <= 0) {
- key[len++] = '\0';
- mark = len;
- key[len++] = '+';
- seg = 252;
- }
-
- acc = *raw++;
- acc |= *raw++ << 8;
- acc |= *raw++ << 16;
-
- _debug("acc: %06x", acc);
-
- key[len++] = cachefiles_charmap[acc & 63];
- acc >>= 6;
- key[len++] = cachefiles_charmap[acc & 63];
- acc >>= 6;
- key[len++] = cachefiles_charmap[acc & 63];
- acc >>= 6;
- key[len++] = cachefiles_charmap[acc & 63];
-
- ASSERT(len < max);
- }
+ /* See if it makes sense to encode it as "hex,hex,hex" for each 32-bit
+ * chunk. We rely on the key having been padded out to a whole number
+ * of 32-bit words.
+ */
+ n = round_up(keylen, 4);
+ nbe = nle = 0;
+ for (i = 0; i < n; i += 4) {
+ u32 be = be32_to_cpu(*(__be32 *)(key + i));
+ u32 le = le32_to_cpu(*(__le32 *)(key + i));
+
+ nbe += 1 + how_many_hex_digits(be);
+ nle += 1 + how_many_hex_digits(le);
+ }
- switch (type) {
- case FSCACHE_COOKIE_TYPE_INDEX: type = 'J'; break;
- case FSCACHE_COOKIE_TYPE_DATAFILE: type = 'E'; break;
- default: type = 'T'; break;
+ b64len = DIV_ROUND_UP(keylen, 3);
+ pad = b64len * 3 - keylen;
+ b64len = 2 + b64len * 4; /* Length if we base64-encode it */
+ _debug("len=%u nbe=%u nle=%u b64=%u", keylen, nbe, nle, b64len);
+ if (nbe < b64len || nle < b64len) {
+ unsigned int nlen = min(nbe, nle) + 1;
+ name = kmalloc(nlen, GFP_KERNEL);
+ if (!name)
+ return false;
+ sep = (nbe <= nle) ? 'S' : 'T'; /* Encoding indicator */
+ len = 0;
+ for (i = 0; i < n; i += 4) {
+ u32 x;
+ if (nbe <= nle)
+ x = be32_to_cpu(*(__be32 *)(key + i));
+ else
+ x = le32_to_cpu(*(__le32 *)(key + i));
+ name[len++] = sep;
+ if (x != 0)
+ len += snprintf(name + len, nlen - len, "%x", x);
+ sep = ',';
}
+ goto success;
}
- key[mark] = type;
- key[len++] = 0;
- key[len] = 0;
+ /* We need to base64-encode it */
+ name = kmalloc(b64len + 1, GFP_KERNEL);
+ if (!name)
+ return false;
+
+ name[0] = 'E';
+ name[1] = '0' + pad;
+ len = 2;
+ kend = key + keylen;
+ do {
+ acc = *key++;
+ if (key < kend) {
+ acc |= *key++ << 8;
+ if (key < kend)
+ acc |= *key++ << 16;
+ }
- _leave(" = %s %d", key, len);
- return key;
+ name[len++] = cachefiles_charmap[acc & 63];
+ acc >>= 6;
+ name[len++] = cachefiles_charmap[acc & 63];
+ acc >>= 6;
+ name[len++] = cachefiles_charmap[acc & 63];
+ acc >>= 6;
+ name[len++] = cachefiles_charmap[acc & 63];
+ } while (key < kend);
+
+success:
+ name[len] = 0;
+ object->d_name = name;
+ object->d_name_len = len;
+ _leave(" = %s", object->d_name);
+ return true;
}
diff --git a/fs/cachefiles/main.c b/fs/cachefiles/main.c
index 9c8d34c49b12..3f369c6f816d 100644
--- a/fs/cachefiles/main.c
+++ b/fs/cachefiles/main.c
@@ -2,7 +2,7 @@
/* Network filesystem caching backend to use cache files on a premounted
* filesystem
*
- * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*/
@@ -18,6 +18,8 @@
#include <linux/statfs.h>
#include <linux/sysctl.h>
#include <linux/miscdevice.h>
+#include <linux/netfs.h>
+#include <trace/events/netfs.h>
#define CREATE_TRACE_POINTS
#include "internal.h"
@@ -37,14 +39,6 @@ static struct miscdevice cachefiles_dev = {
.fops = &cachefiles_daemon_fops,
};
-static void cachefiles_object_init_once(void *_object)
-{
- struct cachefiles_object *object = _object;
-
- memset(object, 0, sizeof(*object));
- spin_lock_init(&object->work_lock);
-}
-
/*
* initialise the fs caching module
*/
@@ -52,6 +46,9 @@ static int __init cachefiles_init(void)
{
int ret;
+ ret = cachefiles_register_error_injection();
+ if (ret < 0)
+ goto error_einj;
ret = misc_register(&cachefiles_dev);
if (ret < 0)
goto error_dev;
@@ -61,9 +58,7 @@ static int __init cachefiles_init(void)
cachefiles_object_jar =
kmem_cache_create("cachefiles_object_jar",
sizeof(struct cachefiles_object),
- 0,
- SLAB_HWCACHE_ALIGN,
- cachefiles_object_init_once);
+ 0, SLAB_HWCACHE_ALIGN, NULL);
if (!cachefiles_object_jar) {
pr_notice("Failed to allocate an object jar\n");
goto error_object_jar;
@@ -75,6 +70,8 @@ static int __init cachefiles_init(void)
error_object_jar:
misc_deregister(&cachefiles_dev);
error_dev:
+ cachefiles_unregister_error_injection();
+error_einj:
pr_err("failed to register: %d\n", ret);
return ret;
}
@@ -90,6 +87,7 @@ static void __exit cachefiles_exit(void)
kmem_cache_destroy(cachefiles_object_jar);
misc_deregister(&cachefiles_dev);
+ cachefiles_unregister_error_injection();
}
module_exit(cachefiles_exit);
diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
index a9aca5ab5970..f256c8aff7bb 100644
--- a/fs/cachefiles/namei.c
+++ b/fs/cachefiles/namei.c
@@ -1,295 +1,272 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/* CacheFiles path walking and related routines
*
- * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*/
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/file.h>
#include <linux/fs.h>
-#include <linux/fsnotify.h>
-#include <linux/quotaops.h>
-#include <linux/xattr.h>
-#include <linux/mount.h>
#include <linux/namei.h>
-#include <linux/security.h>
-#include <linux/slab.h>
#include "internal.h"
-#define CACHEFILES_KEYBUF_SIZE 512
-
/*
- * dump debugging info about an object
+ * Mark the backing file as being a cache file if it's not already in use. The
+ * mark tells the culling request command that it's not allowed to cull the
+ * file or directory. The caller must hold the inode lock.
*/
-static noinline
-void __cachefiles_printk_object(struct cachefiles_object *object,
- const char *prefix)
+static bool __cachefiles_mark_inode_in_use(struct cachefiles_object *object,
+ struct dentry *dentry)
{
- struct fscache_cookie *cookie;
- const u8 *k;
- unsigned loop;
-
- pr_err("%sobject: OBJ%x\n", prefix, object->fscache.debug_id);
- pr_err("%sobjstate=%s fl=%lx wbusy=%x ev=%lx[%lx]\n",
- prefix, object->fscache.state->name,
- object->fscache.flags, work_busy(&object->fscache.work),
- object->fscache.events, object->fscache.event_mask);
- pr_err("%sops=%u inp=%u exc=%u\n",
- prefix, object->fscache.n_ops, object->fscache.n_in_progress,
- object->fscache.n_exclusive);
- pr_err("%sparent=%x\n",
- prefix, object->fscache.parent ? object->fscache.parent->debug_id : 0);
-
- spin_lock(&object->fscache.lock);
- cookie = object->fscache.cookie;
- if (cookie) {
- pr_err("%scookie=%x [pr=%x nd=%p fl=%lx]\n",
- prefix,
- cookie->debug_id,
- cookie->parent ? cookie->parent->debug_id : 0,
- cookie->netfs_data,
- cookie->flags);
- pr_err("%skey=[%u] '", prefix, cookie->key_len);
- k = (cookie->key_len <= sizeof(cookie->inline_key)) ?
- cookie->inline_key : cookie->key;
- for (loop = 0; loop < cookie->key_len; loop++)
- pr_cont("%02x", k[loop]);
- pr_cont("'\n");
+ struct inode *inode = d_backing_inode(dentry);
+ bool can_use = false;
+
+ if (!(inode->i_flags & S_KERNEL_FILE)) {
+ inode->i_flags |= S_KERNEL_FILE;
+ trace_cachefiles_mark_active(object, inode);
+ can_use = true;
} else {
- pr_err("%scookie=NULL\n", prefix);
+ trace_cachefiles_mark_failed(object, inode);
+ pr_notice("cachefiles: Inode already in use: %pd (B=%lx)\n",
+ dentry, inode->i_ino);
}
- spin_unlock(&object->fscache.lock);
+
+ return can_use;
}
-/*
- * dump debugging info about a pair of objects
- */
-static noinline void cachefiles_printk_object(struct cachefiles_object *object,
- struct cachefiles_object *xobject)
+static bool cachefiles_mark_inode_in_use(struct cachefiles_object *object,
+ struct dentry *dentry)
{
- if (object)
- __cachefiles_printk_object(object, "");
- if (xobject)
- __cachefiles_printk_object(xobject, "x");
+ struct inode *inode = d_backing_inode(dentry);
+ bool can_use;
+
+ inode_lock(inode);
+ can_use = __cachefiles_mark_inode_in_use(object, dentry);
+ inode_unlock(inode);
+ return can_use;
}
/*
- * mark the owner of a dentry, if there is one, to indicate that that dentry
- * has been preemptively deleted
- * - the caller must hold the i_mutex on the dentry's parent as required to
- * call vfs_unlink(), vfs_rmdir() or vfs_rename()
+ * Unmark a backing inode. The caller must hold the inode lock.
*/
-static void cachefiles_mark_object_buried(struct cachefiles_cache *cache,
- struct dentry *dentry,
- enum fscache_why_object_killed why)
+static void __cachefiles_unmark_inode_in_use(struct cachefiles_object *object,
+ struct dentry *dentry)
{
- struct cachefiles_object *object;
- struct rb_node *p;
-
- _enter(",'%pd'", dentry);
+ struct inode *inode = d_backing_inode(dentry);
- write_lock(&cache->active_lock);
+ inode->i_flags &= ~S_KERNEL_FILE;
+ trace_cachefiles_mark_inactive(object, inode);
+}
- p = cache->active_nodes.rb_node;
- while (p) {
- object = rb_entry(p, struct cachefiles_object, active_node);
- if (object->dentry > dentry)
- p = p->rb_left;
- else if (object->dentry < dentry)
- p = p->rb_right;
- else
- goto found_dentry;
+/*
+ * Unmark a backing inode and tell cachefilesd that there's something that can
+ * be culled.
+ */
+void cachefiles_unmark_inode_in_use(struct cachefiles_object *object,
+ struct file *file)
+{
+ struct cachefiles_cache *cache = object->volume->cache;
+ struct inode *inode = file_inode(file);
+
+ if (inode) {
+ inode_lock(inode);
+ __cachefiles_unmark_inode_in_use(object, file->f_path.dentry);
+ inode_unlock(inode);
+
+ if (!test_bit(CACHEFILES_OBJECT_USING_TMPFILE, &object->flags)) {
+ atomic_long_add(inode->i_blocks, &cache->b_released);
+ if (atomic_inc_return(&cache->f_released))
+ cachefiles_state_changed(cache);
+ }
}
+}
- write_unlock(&cache->active_lock);
- trace_cachefiles_mark_buried(NULL, dentry, why);
- _leave(" [no owner]");
- return;
+/*
+ * get a subdirectory
+ */
+struct dentry *cachefiles_get_directory(struct cachefiles_cache *cache,
+ struct dentry *dir,
+ const char *dirname,
+ bool *_is_new)
+{
+ struct dentry *subdir;
+ struct path path;
+ int ret;
- /* found the dentry for */
-found_dentry:
- kdebug("preemptive burial: OBJ%x [%s] %pd",
- object->fscache.debug_id,
- object->fscache.state->name,
- dentry);
+ _enter(",,%s", dirname);
- trace_cachefiles_mark_buried(object, dentry, why);
+ /* search the current directory for the element name */
+ inode_lock_nested(d_inode(dir), I_MUTEX_PARENT);
- if (fscache_object_is_live(&object->fscache)) {
- pr_err("\n");
- pr_err("Error: Can't preemptively bury live object\n");
- cachefiles_printk_object(object, NULL);
- } else {
- if (why != FSCACHE_OBJECT_IS_STALE)
- fscache_object_mark_killed(&object->fscache, why);
+retry:
+ ret = cachefiles_inject_read_error();
+ if (ret == 0)
+ subdir = lookup_one_len(dirname, dir, strlen(dirname));
+ else
+ subdir = ERR_PTR(ret);
+ trace_cachefiles_lookup(NULL, dir, subdir);
+ if (IS_ERR(subdir)) {
+ trace_cachefiles_vfs_error(NULL, d_backing_inode(dir),
+ PTR_ERR(subdir),
+ cachefiles_trace_lookup_error);
+ if (PTR_ERR(subdir) == -ENOMEM)
+ goto nomem_d_alloc;
+ goto lookup_error;
}
- write_unlock(&cache->active_lock);
- _leave(" [owner marked]");
-}
+ _debug("subdir -> %pd %s",
+ subdir, d_backing_inode(subdir) ? "positive" : "negative");
-/*
- * record the fact that an object is now active
- */
-static int cachefiles_mark_object_active(struct cachefiles_cache *cache,
- struct cachefiles_object *object)
-{
- struct cachefiles_object *xobject;
- struct rb_node **_p, *_parent = NULL;
- struct dentry *dentry;
+ /* we need to create the subdir if it doesn't exist yet */
+ if (d_is_negative(subdir)) {
+ ret = cachefiles_has_space(cache, 1, 0,
+ cachefiles_has_space_for_create);
+ if (ret < 0)
+ goto mkdir_error;
- _enter(",%x", object->fscache.debug_id);
+ _debug("attempt mkdir");
-try_again:
- write_lock(&cache->active_lock);
+ path.mnt = cache->mnt;
+ path.dentry = dir;
+ ret = security_path_mkdir(&path, subdir, 0700);
+ if (ret < 0)
+ goto mkdir_error;
+ ret = cachefiles_inject_write_error();
+ if (ret == 0)
+ ret = vfs_mkdir(&init_user_ns, d_inode(dir), subdir, 0700);
+ if (ret < 0) {
+ trace_cachefiles_vfs_error(NULL, d_inode(dir), ret,
+ cachefiles_trace_mkdir_error);
+ goto mkdir_error;
+ }
+ trace_cachefiles_mkdir(dir, subdir);
- dentry = object->dentry;
- trace_cachefiles_mark_active(object, dentry);
+ if (unlikely(d_unhashed(subdir))) {
+ cachefiles_put_directory(subdir);
+ goto retry;
+ }
+ ASSERT(d_backing_inode(subdir));
- if (test_and_set_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags)) {
- pr_err("Error: Object already active\n");
- cachefiles_printk_object(object, NULL);
- BUG();
+ _debug("mkdir -> %pd{ino=%lu}",
+ subdir, d_backing_inode(subdir)->i_ino);
+ if (_is_new)
+ *_is_new = true;
}
- _p = &cache->active_nodes.rb_node;
- while (*_p) {
- _parent = *_p;
- xobject = rb_entry(_parent,
- struct cachefiles_object, active_node);
+ /* Tell rmdir() it's not allowed to delete the subdir */
+ inode_lock(d_inode(subdir));
+ inode_unlock(d_inode(dir));
- ASSERT(xobject != object);
+ if (!__cachefiles_mark_inode_in_use(NULL, subdir))
+ goto mark_error;
- if (xobject->dentry > dentry)
- _p = &(*_p)->rb_left;
- else if (xobject->dentry < dentry)
- _p = &(*_p)->rb_right;
- else
- goto wait_for_old_object;
- }
+ inode_unlock(d_inode(subdir));
- rb_link_node(&object->active_node, _parent, _p);
- rb_insert_color(&object->active_node, &cache->active_nodes);
+ /* we need to make sure the subdir is a directory */
+ ASSERT(d_backing_inode(subdir));
- write_unlock(&cache->active_lock);
- _leave(" = 0");
- return 0;
+ if (!d_can_lookup(subdir)) {
+ pr_err("%s is not a directory\n", dirname);
+ ret = -EIO;
+ goto check_error;
+ }
- /* an old object from a previous incarnation is hogging the slot - we
- * need to wait for it to be destroyed */
-wait_for_old_object:
- trace_cachefiles_wait_active(object, dentry, xobject);
- clear_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags);
-
- if (fscache_object_is_live(&xobject->fscache)) {
- pr_err("\n");
- pr_err("Error: Unexpected object collision\n");
- cachefiles_printk_object(object, xobject);
- }
- atomic_inc(&xobject->usage);
- write_unlock(&cache->active_lock);
-
- if (test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags)) {
- wait_queue_head_t *wq;
-
- signed long timeout = 60 * HZ;
- wait_queue_entry_t wait;
- bool requeue;
-
- /* if the object we're waiting for is queued for processing,
- * then just put ourselves on the queue behind it */
- if (work_pending(&xobject->fscache.work)) {
- _debug("queue OBJ%x behind OBJ%x immediately",
- object->fscache.debug_id,
- xobject->fscache.debug_id);
- goto requeue;
- }
+ ret = -EPERM;
+ if (!(d_backing_inode(subdir)->i_opflags & IOP_XATTR) ||
+ !d_backing_inode(subdir)->i_op->lookup ||
+ !d_backing_inode(subdir)->i_op->mkdir ||
+ !d_backing_inode(subdir)->i_op->rename ||
+ !d_backing_inode(subdir)->i_op->rmdir ||
+ !d_backing_inode(subdir)->i_op->unlink)
+ goto check_error;
- /* otherwise we sleep until either the object we're waiting for
- * is done, or the fscache_object is congested */
- wq = bit_waitqueue(&xobject->flags, CACHEFILES_OBJECT_ACTIVE);
- init_wait(&wait);
- requeue = false;
- do {
- prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE);
- if (!test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags))
- break;
-
- requeue = fscache_object_sleep_till_congested(&timeout);
- } while (timeout > 0 && !requeue);
- finish_wait(wq, &wait);
-
- if (requeue &&
- test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags)) {
- _debug("queue OBJ%x behind OBJ%x after wait",
- object->fscache.debug_id,
- xobject->fscache.debug_id);
- goto requeue;
- }
+ _leave(" = [%lu]", d_backing_inode(subdir)->i_ino);
+ return subdir;
- if (timeout <= 0) {
- pr_err("\n");
- pr_err("Error: Overlong wait for old active object to go away\n");
- cachefiles_printk_object(object, xobject);
- goto requeue;
- }
- }
+check_error:
+ cachefiles_put_directory(subdir);
+ _leave(" = %d [check]", ret);
+ return ERR_PTR(ret);
- ASSERT(!test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags));
+mark_error:
+ inode_unlock(d_inode(subdir));
+ dput(subdir);
+ return ERR_PTR(-EBUSY);
- cache->cache.ops->put_object(&xobject->fscache,
- (enum fscache_obj_ref_trace)cachefiles_obj_put_wait_retry);
- goto try_again;
+mkdir_error:
+ inode_unlock(d_inode(dir));
+ dput(subdir);
+ pr_err("mkdir %s failed with error %d\n", dirname, ret);
+ return ERR_PTR(ret);
+
+lookup_error:
+ inode_unlock(d_inode(dir));
+ ret = PTR_ERR(subdir);
+ pr_err("Lookup %s failed with error %d\n", dirname, ret);
+ return ERR_PTR(ret);
-requeue:
- cache->cache.ops->put_object(&xobject->fscache,
- (enum fscache_obj_ref_trace)cachefiles_obj_put_wait_timeo);
- _leave(" = -ETIMEDOUT");
- return -ETIMEDOUT;
+nomem_d_alloc:
+ inode_unlock(d_inode(dir));
+ _leave(" = -ENOMEM");
+ return ERR_PTR(-ENOMEM);
}
/*
- * Mark an object as being inactive.
+ * Put a subdirectory.
*/
-void cachefiles_mark_object_inactive(struct cachefiles_cache *cache,
- struct cachefiles_object *object,
- blkcnt_t i_blocks)
+void cachefiles_put_directory(struct dentry *dir)
{
- struct dentry *dentry = object->dentry;
- struct inode *inode = d_backing_inode(dentry);
-
- trace_cachefiles_mark_inactive(object, dentry, inode);
+ if (dir) {
+ inode_lock(dir->d_inode);
+ __cachefiles_unmark_inode_in_use(NULL, dir);
+ inode_unlock(dir->d_inode);
+ dput(dir);
+ }
+}
- write_lock(&cache->active_lock);
- rb_erase(&object->active_node, &cache->active_nodes);
- clear_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags);
- write_unlock(&cache->active_lock);
+/*
+ * Remove a regular file from the cache.
+ */
+static int cachefiles_unlink(struct cachefiles_cache *cache,
+ struct cachefiles_object *object,
+ struct dentry *dir, struct dentry *dentry,
+ enum fscache_why_object_killed why)
+{
+ struct path path = {
+ .mnt = cache->mnt,
+ .dentry = dir,
+ };
+ int ret;
- wake_up_bit(&object->flags, CACHEFILES_OBJECT_ACTIVE);
+ trace_cachefiles_unlink(object, d_inode(dentry)->i_ino, why);
+ ret = security_path_unlink(&path, dentry);
+ if (ret < 0) {
+ cachefiles_io_error(cache, "Unlink security error");
+ return ret;
+ }
- /* This object can now be culled, so we need to let the daemon know
- * that there is something it can remove if it needs to.
- */
- atomic_long_add(i_blocks, &cache->b_released);
- if (atomic_inc_return(&cache->f_released))
- cachefiles_state_changed(cache);
+ ret = cachefiles_inject_remove_error();
+ if (ret == 0) {
+ ret = vfs_unlink(&init_user_ns, d_backing_inode(dir), dentry, NULL);
+ if (ret == -EIO)
+ cachefiles_io_error(cache, "Unlink failed");
+ }
+ if (ret != 0)
+ trace_cachefiles_vfs_error(object, d_backing_inode(dir), ret,
+ cachefiles_trace_unlink_error);
+ return ret;
}
/*
- * delete an object representation from the cache
- * - file backed objects are unlinked
- * - directory backed objects are stuffed into the graveyard for userspace to
+ * Delete an object representation from the cache
+ * - File backed objects are unlinked
+ * - Directory backed objects are stuffed into the graveyard for userspace to
* delete
- * - unlocks the directory mutex
*/
-static int cachefiles_bury_object(struct cachefiles_cache *cache,
- struct cachefiles_object *object,
- struct dentry *dir,
- struct dentry *rep,
- bool preemptive,
- enum fscache_why_object_killed why)
+int cachefiles_bury_object(struct cachefiles_cache *cache,
+ struct cachefiles_object *object,
+ struct dentry *dir,
+ struct dentry *rep,
+ enum fscache_why_object_killed why)
{
struct dentry *grave, *trap;
struct path path, path_to_graveyard;
@@ -298,29 +275,21 @@ static int cachefiles_bury_object(struct cachefiles_cache *cache,
_enter(",'%pd','%pd'", dir, rep);
+ if (rep->d_parent != dir) {
+ inode_unlock(d_inode(dir));
+ _leave(" = -ESTALE");
+ return -ESTALE;
+ }
+
/* non-directories can just be unlinked */
if (!d_is_dir(rep)) {
- _debug("unlink stale object");
-
- path.mnt = cache->mnt;
- path.dentry = dir;
- ret = security_path_unlink(&path, rep);
- if (ret < 0) {
- cachefiles_io_error(cache, "Unlink security error");
- } else {
- trace_cachefiles_unlink(object, rep, why);
- ret = vfs_unlink(&init_user_ns, d_inode(dir), rep,
- NULL);
-
- if (preemptive)
- cachefiles_mark_object_buried(cache, rep, why);
- }
+ dget(rep); /* Stop the dentry being negated if it's only pinned
+ * by a file struct.
+ */
+ ret = cachefiles_unlink(cache, object, dir, rep, why);
+ dput(rep);
inode_unlock(d_inode(dir));
-
- if (ret == -EIO)
- cachefiles_io_error(cache, "Unlink failed");
-
_leave(" = %d", ret);
return ret;
}
@@ -368,14 +337,16 @@ try_again:
grave = lookup_one_len(nbuffer, cache->graveyard, strlen(nbuffer));
if (IS_ERR(grave)) {
unlock_rename(cache->graveyard, dir);
+ trace_cachefiles_vfs_error(object, d_inode(cache->graveyard),
+ PTR_ERR(grave),
+ cachefiles_trace_lookup_error);
if (PTR_ERR(grave) == -ENOMEM) {
_leave(" = -ENOMEM");
return -ENOMEM;
}
- cachefiles_io_error(cache, "Lookup error %ld",
- PTR_ERR(grave));
+ cachefiles_io_error(cache, "Lookup error %ld", PTR_ERR(grave));
return -EIO;
}
@@ -419,16 +390,19 @@ try_again:
.new_dir = d_inode(cache->graveyard),
.new_dentry = grave,
};
- trace_cachefiles_rename(object, rep, grave, why);
- ret = vfs_rename(&rd);
+ trace_cachefiles_rename(object, d_inode(rep)->i_ino, why);
+ ret = cachefiles_inject_read_error();
+ if (ret == 0)
+ ret = vfs_rename(&rd);
+ if (ret != 0)
+ trace_cachefiles_vfs_error(object, d_inode(dir), ret,
+ cachefiles_trace_rename_error);
if (ret != 0 && ret != -ENOMEM)
cachefiles_io_error(cache,
"Rename failed with error %d", ret);
-
- if (preemptive)
- cachefiles_mark_object_buried(cache, rep, why);
}
+ __cachefiles_unmark_inode_in_use(object, rep);
unlock_rename(cache->graveyard, dir);
dput(grave);
_leave(" = 0");
@@ -436,493 +410,358 @@ try_again:
}
/*
- * delete an object representation from the cache
+ * Delete a cache file.
*/
-int cachefiles_delete_object(struct cachefiles_cache *cache,
- struct cachefiles_object *object)
+int cachefiles_delete_object(struct cachefiles_object *object,
+ enum fscache_why_object_killed why)
{
- struct dentry *dir;
+ struct cachefiles_volume *volume = object->volume;
+ struct dentry *dentry = object->file->f_path.dentry;
+ struct dentry *fan = volume->fanout[(u8)object->cookie->key_hash];
int ret;
- _enter(",OBJ%x{%pd}", object->fscache.debug_id, object->dentry);
-
- ASSERT(object->dentry);
- ASSERT(d_backing_inode(object->dentry));
- ASSERT(object->dentry->d_parent);
+ _enter(",OBJ%x{%pD}", object->debug_id, object->file);
- dir = dget_parent(object->dentry);
+ /* Stop the dentry being negated if it's only pinned by a file struct. */
+ dget(dentry);
- inode_lock_nested(d_inode(dir), I_MUTEX_PARENT);
-
- if (test_bit(FSCACHE_OBJECT_KILLED_BY_CACHE, &object->fscache.flags)) {
- /* object allocation for the same key preemptively deleted this
- * object's file so that it could create its own file */
- _debug("object preemptively buried");
- inode_unlock(d_inode(dir));
- ret = 0;
- } else {
- /* we need to check that our parent is _still_ our parent - it
- * may have been renamed */
- if (dir == object->dentry->d_parent) {
- ret = cachefiles_bury_object(cache, object, dir,
- object->dentry, false,
- FSCACHE_OBJECT_WAS_RETIRED);
- } else {
- /* it got moved, presumably by cachefilesd culling it,
- * so it's no longer in the key path and we can ignore
- * it */
- inode_unlock(d_inode(dir));
- ret = 0;
- }
- }
-
- dput(dir);
- _leave(" = %d", ret);
+ inode_lock_nested(d_backing_inode(fan), I_MUTEX_PARENT);
+ ret = cachefiles_unlink(volume->cache, object, fan, dentry, why);
+ inode_unlock(d_backing_inode(fan));
+ dput(dentry);
return ret;
}
/*
- * walk from the parent object to the child object through the backing
- * filesystem, creating directories as we go
+ * Create a temporary file and leave it unattached and un-xattr'd until the
+ * time comes to discard the object from memory.
*/
-int cachefiles_walk_to_object(struct cachefiles_object *parent,
- struct cachefiles_object *object,
- const char *key,
- struct cachefiles_xattr *auxdata)
+struct file *cachefiles_create_tmpfile(struct cachefiles_object *object)
{
- struct cachefiles_cache *cache;
- struct dentry *dir, *next = NULL;
- struct inode *inode;
+ struct cachefiles_volume *volume = object->volume;
+ struct cachefiles_cache *cache = volume->cache;
+ const struct cred *saved_cred;
+ struct dentry *fan = volume->fanout[(u8)object->cookie->key_hash];
+ struct file *file;
struct path path;
- const char *name;
- int ret, nlen;
-
- _enter("OBJ%x{%pd},OBJ%x,%s,",
- parent->fscache.debug_id, parent->dentry,
- object->fscache.debug_id, key);
-
- cache = container_of(parent->fscache.cache,
- struct cachefiles_cache, cache);
- path.mnt = cache->mnt;
-
- ASSERT(parent->dentry);
- ASSERT(d_backing_inode(parent->dentry));
-
- if (!(d_is_dir(parent->dentry))) {
- // TODO: convert file to dir
- _leave("looking up in none directory");
- return -ENOBUFS;
- }
-
- dir = dget(parent->dentry);
-
-advance:
- /* attempt to transit the first directory component */
- name = key;
- nlen = strlen(key);
-
- /* key ends in a double NUL */
- key = key + nlen + 1;
- if (!*key)
- key = NULL;
-
-lookup_again:
- /* search the current directory for the element name */
- _debug("lookup '%s'", name);
-
- inode_lock_nested(d_inode(dir), I_MUTEX_PARENT);
+ uint64_t ni_size = object->cookie->object_size;
+ long ret;
- next = lookup_one_len(name, dir, nlen);
- if (IS_ERR(next)) {
- trace_cachefiles_lookup(object, next, NULL);
- goto lookup_error;
- }
+ ni_size = round_up(ni_size, CACHEFILES_DIO_BLOCK_SIZE);
- inode = d_backing_inode(next);
- trace_cachefiles_lookup(object, next, inode);
- _debug("next -> %pd %s", next, inode ? "positive" : "negative");
-
- if (!key)
- object->new = !inode;
-
- /* if this element of the path doesn't exist, then the lookup phase
- * failed, and we can release any readers in the certain knowledge that
- * there's nothing for them to actually read */
- if (d_is_negative(next))
- fscache_object_lookup_negative(&object->fscache);
-
- /* we need to create the object if it's negative */
- if (key || object->type == FSCACHE_COOKIE_TYPE_INDEX) {
- /* index objects and intervening tree levels must be subdirs */
- if (d_is_negative(next)) {
- ret = cachefiles_has_space(cache, 1, 0);
- if (ret < 0)
- goto no_space_error;
-
- path.dentry = dir;
- ret = security_path_mkdir(&path, next, 0);
- if (ret < 0)
- goto create_error;
- ret = vfs_mkdir(&init_user_ns, d_inode(dir), next, 0);
- if (!key)
- trace_cachefiles_mkdir(object, next, ret);
- if (ret < 0)
- goto create_error;
-
- if (unlikely(d_unhashed(next))) {
- dput(next);
- inode_unlock(d_inode(dir));
- goto lookup_again;
- }
- ASSERT(d_backing_inode(next));
-
- _debug("mkdir -> %pd{ino=%lu}",
- next, d_backing_inode(next)->i_ino);
-
- } else if (!d_can_lookup(next)) {
- pr_err("inode %lu is not a directory\n",
- d_backing_inode(next)->i_ino);
- ret = -ENOBUFS;
- goto error;
- }
+ cachefiles_begin_secure(cache, &saved_cred);
- } else {
- /* non-index objects start out life as files */
- if (d_is_negative(next)) {
- ret = cachefiles_has_space(cache, 1, 0);
- if (ret < 0)
- goto no_space_error;
-
- path.dentry = dir;
- ret = security_path_mknod(&path, next, S_IFREG, 0);
- if (ret < 0)
- goto create_error;
- ret = vfs_create(&init_user_ns, d_inode(dir), next,
- S_IFREG, true);
- trace_cachefiles_create(object, next, ret);
- if (ret < 0)
- goto create_error;
-
- ASSERT(d_backing_inode(next));
-
- _debug("create -> %pd{ino=%lu}",
- next, d_backing_inode(next)->i_ino);
-
- } else if (!d_can_lookup(next) &&
- !d_is_reg(next)
- ) {
- pr_err("inode %lu is not a file or directory\n",
- d_backing_inode(next)->i_ino);
- ret = -ENOBUFS;
- goto error;
+ path.mnt = cache->mnt;
+ ret = cachefiles_inject_write_error();
+ if (ret == 0)
+ path.dentry = vfs_tmpfile(&init_user_ns, fan, S_IFREG, O_RDWR);
+ else
+ path.dentry = ERR_PTR(ret);
+ if (IS_ERR(path.dentry)) {
+ trace_cachefiles_vfs_error(object, d_inode(fan), PTR_ERR(path.dentry),
+ cachefiles_trace_tmpfile_error);
+ if (PTR_ERR(path.dentry) == -EIO)
+ cachefiles_io_error_obj(object, "Failed to create tmpfile");
+ file = ERR_CAST(path.dentry);
+ goto out;
+ }
+
+ trace_cachefiles_tmpfile(object, d_backing_inode(path.dentry));
+
+ if (!cachefiles_mark_inode_in_use(object, path.dentry)) {
+ file = ERR_PTR(-EBUSY);
+ goto out_dput;
+ }
+
+ if (ni_size > 0) {
+ trace_cachefiles_trunc(object, d_backing_inode(path.dentry), 0, ni_size,
+ cachefiles_trunc_expand_tmpfile);
+ ret = cachefiles_inject_write_error();
+ if (ret == 0)
+ ret = vfs_truncate(&path, ni_size);
+ if (ret < 0) {
+ trace_cachefiles_vfs_error(
+ object, d_backing_inode(path.dentry), ret,
+ cachefiles_trace_trunc_error);
+ file = ERR_PTR(ret);
+ goto out_dput;
}
}
- /* process the next component */
- if (key) {
- _debug("advance");
- inode_unlock(d_inode(dir));
- dput(dir);
- dir = next;
- next = NULL;
- goto advance;
+ file = open_with_fake_path(&path, O_RDWR | O_LARGEFILE | O_DIRECT,
+ d_backing_inode(path.dentry), cache->cache_cred);
+ if (IS_ERR(file)) {
+ trace_cachefiles_vfs_error(object, d_backing_inode(path.dentry),
+ PTR_ERR(file),
+ cachefiles_trace_open_error);
+ goto out_dput;
+ }
+ if (unlikely(!file->f_op->read_iter) ||
+ unlikely(!file->f_op->write_iter)) {
+ fput(file);
+ pr_notice("Cache does not support read_iter and write_iter\n");
+ file = ERR_PTR(-EINVAL);
}
- /* we've found the object we were looking for */
- object->dentry = next;
-
- /* if we've found that the terminal object exists, then we need to
- * check its attributes and delete it if it's out of date */
- if (!object->new) {
- _debug("validate '%pd'", next);
-
- ret = cachefiles_check_object_xattr(object, auxdata);
- if (ret == -ESTALE) {
- /* delete the object (the deleter drops the directory
- * mutex) */
- object->dentry = NULL;
+out_dput:
+ dput(path.dentry);
+out:
+ cachefiles_end_secure(cache, saved_cred);
+ return file;
+}
- ret = cachefiles_bury_object(cache, object, dir, next,
- true,
- FSCACHE_OBJECT_IS_STALE);
- dput(next);
- next = NULL;
+/*
+ * Create a new file.
+ */
+static bool cachefiles_create_file(struct cachefiles_object *object)
+{
+ struct file *file;
+ int ret;
- if (ret < 0)
- goto delete_error;
+ ret = cachefiles_has_space(object->volume->cache, 1, 0,
+ cachefiles_has_space_for_create);
+ if (ret < 0)
+ return false;
- _debug("redo lookup");
- fscache_object_retrying_stale(&object->fscache);
- goto lookup_again;
- }
- }
+ file = cachefiles_create_tmpfile(object);
+ if (IS_ERR(file))
+ return false;
- /* note that we're now using this object */
- ret = cachefiles_mark_object_active(cache, object);
+ set_bit(FSCACHE_COOKIE_NEEDS_UPDATE, &object->cookie->flags);
+ set_bit(CACHEFILES_OBJECT_USING_TMPFILE, &object->flags);
+ _debug("create -> %pD{ino=%lu}", file, file_inode(file)->i_ino);
+ object->file = file;
+ return true;
+}
- inode_unlock(d_inode(dir));
- dput(dir);
- dir = NULL;
+/*
+ * Open an existing file, checking its attributes and replacing it if it is
+ * stale.
+ */
+static bool cachefiles_open_file(struct cachefiles_object *object,
+ struct dentry *dentry)
+{
+ struct cachefiles_cache *cache = object->volume->cache;
+ struct file *file;
+ struct path path;
+ int ret;
- if (ret == -ETIMEDOUT)
- goto mark_active_timed_out;
+ _enter("%pd", dentry);
- _debug("=== OBTAINED_OBJECT ===");
+ if (!cachefiles_mark_inode_in_use(object, dentry))
+ return false;
- if (object->new) {
- /* attach data to a newly constructed terminal object */
- ret = cachefiles_set_object_xattr(object, auxdata);
- if (ret < 0)
- goto check_error;
- } else {
- /* always update the atime on an object we've just looked up
- * (this is used to keep track of culling, and atimes are only
- * updated by read, write and readdir but not lookup or
- * open) */
- path.dentry = next;
- touch_atime(&path);
- }
-
- /* open a file interface onto a data file */
- if (object->type != FSCACHE_COOKIE_TYPE_INDEX) {
- if (d_is_reg(object->dentry)) {
- const struct address_space_operations *aops;
-
- ret = -EPERM;
- aops = d_backing_inode(object->dentry)->i_mapping->a_ops;
- if (!aops->bmap)
- goto check_error;
- if (object->dentry->d_sb->s_blocksize > PAGE_SIZE)
- goto check_error;
-
- object->backer = object->dentry;
- } else {
- BUG(); // TODO: open file in data-class subdir
- }
+ /* We need to open a file interface onto a data file now as we can't do
+ * it on demand because writeback called from do_exit() sees
+ * current->fs == NULL - which breaks d_path() called from ext4 open.
+ */
+ path.mnt = cache->mnt;
+ path.dentry = dentry;
+ file = open_with_fake_path(&path, O_RDWR | O_LARGEFILE | O_DIRECT,
+ d_backing_inode(dentry), cache->cache_cred);
+ if (IS_ERR(file)) {
+ trace_cachefiles_vfs_error(object, d_backing_inode(dentry),
+ PTR_ERR(file),
+ cachefiles_trace_open_error);
+ goto error;
}
- object->new = 0;
- fscache_obtained_object(&object->fscache);
-
- _leave(" = 0 [%lu]", d_backing_inode(object->dentry)->i_ino);
- return 0;
-
-no_space_error:
- fscache_object_mark_killed(&object->fscache, FSCACHE_OBJECT_NO_SPACE);
-create_error:
- _debug("create error %d", ret);
- if (ret == -EIO)
- cachefiles_io_error(cache, "Create/mkdir failed");
- goto error;
+ if (unlikely(!file->f_op->read_iter) ||
+ unlikely(!file->f_op->write_iter)) {
+ pr_notice("Cache does not support read_iter and write_iter\n");
+ goto error_fput;
+ }
+ _debug("file -> %pd positive", dentry);
-mark_active_timed_out:
- _debug("mark active timed out");
- goto release_dentry;
+ ret = cachefiles_check_auxdata(object, file);
+ if (ret < 0)
+ goto check_failed;
-check_error:
- _debug("check error %d", ret);
- cachefiles_mark_object_inactive(
- cache, object, d_backing_inode(object->dentry)->i_blocks);
-release_dentry:
- dput(object->dentry);
- object->dentry = NULL;
- goto error_out;
-
-delete_error:
- _debug("delete error %d", ret);
- goto error_out2;
+ object->file = file;
-lookup_error:
- _debug("lookup error %ld", PTR_ERR(next));
- ret = PTR_ERR(next);
- if (ret == -EIO)
- cachefiles_io_error(cache, "Lookup failed");
- next = NULL;
+ /* Always update the atime on an object we've just looked up (this is
+ * used to keep track of culling, and atimes are only updated by read,
+ * write and readdir but not lookup or open).
+ */
+ touch_atime(&file->f_path);
+ dput(dentry);
+ return true;
+
+check_failed:
+ fscache_cookie_lookup_negative(object->cookie);
+ cachefiles_unmark_inode_in_use(object, file);
+ if (ret == -ESTALE) {
+ fput(file);
+ dput(dentry);
+ return cachefiles_create_file(object);
+ }
+error_fput:
+ fput(file);
error:
- inode_unlock(d_inode(dir));
- dput(next);
-error_out2:
- dput(dir);
-error_out:
- _leave(" = error %d", -ret);
- return ret;
+ dput(dentry);
+ return false;
}
/*
- * get a subdirectory
+ * walk from the parent object to the child object through the backing
+ * filesystem, creating directories as we go
*/
-struct dentry *cachefiles_get_directory(struct cachefiles_cache *cache,
- struct dentry *dir,
- const char *dirname)
+bool cachefiles_look_up_object(struct cachefiles_object *object)
{
- struct dentry *subdir;
- struct path path;
+ struct cachefiles_volume *volume = object->volume;
+ struct dentry *dentry, *fan = volume->fanout[(u8)object->cookie->key_hash];
int ret;
- _enter(",,%s", dirname);
-
- /* search the current directory for the element name */
- inode_lock(d_inode(dir));
-
-retry:
- subdir = lookup_one_len(dirname, dir, strlen(dirname));
- if (IS_ERR(subdir)) {
- if (PTR_ERR(subdir) == -ENOMEM)
- goto nomem_d_alloc;
- goto lookup_error;
+ _enter("OBJ%x,%s,", object->debug_id, object->d_name);
+
+ /* Look up path "cache/vol/fanout/file". */
+ ret = cachefiles_inject_read_error();
+ if (ret == 0)
+ dentry = lookup_positive_unlocked(object->d_name, fan,
+ object->d_name_len);
+ else
+ dentry = ERR_PTR(ret);
+ trace_cachefiles_lookup(object, fan, dentry);
+ if (IS_ERR(dentry)) {
+ if (dentry == ERR_PTR(-ENOENT))
+ goto new_file;
+ if (dentry == ERR_PTR(-EIO))
+ cachefiles_io_error_obj(object, "Lookup failed");
+ return false;
+ }
+
+ if (!d_is_reg(dentry)) {
+ pr_err("%pd is not a file\n", dentry);
+ inode_lock_nested(d_inode(fan), I_MUTEX_PARENT);
+ ret = cachefiles_bury_object(volume->cache, object, fan, dentry,
+ FSCACHE_OBJECT_IS_WEIRD);
+ dput(dentry);
+ if (ret < 0)
+ return false;
+ goto new_file;
}
- _debug("subdir -> %pd %s",
- subdir, d_backing_inode(subdir) ? "positive" : "negative");
+ if (!cachefiles_open_file(object, dentry))
+ return false;
- /* we need to create the subdir if it doesn't exist yet */
- if (d_is_negative(subdir)) {
- ret = cachefiles_has_space(cache, 1, 0);
- if (ret < 0)
- goto mkdir_error;
+ _leave(" = t [%lu]", file_inode(object->file)->i_ino);
+ return true;
- _debug("attempt mkdir");
+new_file:
+ fscache_cookie_lookup_negative(object->cookie);
+ return cachefiles_create_file(object);
+}
- path.mnt = cache->mnt;
- path.dentry = dir;
- ret = security_path_mkdir(&path, subdir, 0700);
- if (ret < 0)
- goto mkdir_error;
- ret = vfs_mkdir(&init_user_ns, d_inode(dir), subdir, 0700);
- if (ret < 0)
- goto mkdir_error;
+/*
+ * Attempt to link a temporary file into its rightful place in the cache.
+ */
+bool cachefiles_commit_tmpfile(struct cachefiles_cache *cache,
+ struct cachefiles_object *object)
+{
+ struct cachefiles_volume *volume = object->volume;
+ struct dentry *dentry, *fan = volume->fanout[(u8)object->cookie->key_hash];
+ bool success = false;
+ int ret;
- if (unlikely(d_unhashed(subdir))) {
- dput(subdir);
- goto retry;
+ _enter(",%pD", object->file);
+
+ inode_lock_nested(d_inode(fan), I_MUTEX_PARENT);
+ ret = cachefiles_inject_read_error();
+ if (ret == 0)
+ dentry = lookup_one_len(object->d_name, fan, object->d_name_len);
+ else
+ dentry = ERR_PTR(ret);
+ if (IS_ERR(dentry)) {
+ trace_cachefiles_vfs_error(object, d_inode(fan), PTR_ERR(dentry),
+ cachefiles_trace_lookup_error);
+ _debug("lookup fail %ld", PTR_ERR(dentry));
+ goto out_unlock;
+ }
+
+ if (!d_is_negative(dentry)) {
+ if (d_backing_inode(dentry) == file_inode(object->file)) {
+ success = true;
+ goto out_dput;
}
- ASSERT(d_backing_inode(subdir));
- _debug("mkdir -> %pd{ino=%lu}",
- subdir, d_backing_inode(subdir)->i_ino);
- }
-
- inode_unlock(d_inode(dir));
-
- /* we need to make sure the subdir is a directory */
- ASSERT(d_backing_inode(subdir));
+ ret = cachefiles_unlink(volume->cache, object, fan, dentry,
+ FSCACHE_OBJECT_IS_STALE);
+ if (ret < 0)
+ goto out_dput;
- if (!d_can_lookup(subdir)) {
- pr_err("%s is not a directory\n", dirname);
- ret = -EIO;
- goto check_error;
+ dput(dentry);
+ ret = cachefiles_inject_read_error();
+ if (ret == 0)
+ dentry = lookup_one_len(object->d_name, fan, object->d_name_len);
+ else
+ dentry = ERR_PTR(ret);
+ if (IS_ERR(dentry)) {
+ trace_cachefiles_vfs_error(object, d_inode(fan), PTR_ERR(dentry),
+ cachefiles_trace_lookup_error);
+ _debug("lookup fail %ld", PTR_ERR(dentry));
+ goto out_unlock;
+ }
}
- ret = -EPERM;
- if (!(d_backing_inode(subdir)->i_opflags & IOP_XATTR) ||
- !d_backing_inode(subdir)->i_op->lookup ||
- !d_backing_inode(subdir)->i_op->mkdir ||
- !d_backing_inode(subdir)->i_op->create ||
- !d_backing_inode(subdir)->i_op->rename ||
- !d_backing_inode(subdir)->i_op->rmdir ||
- !d_backing_inode(subdir)->i_op->unlink)
- goto check_error;
-
- _leave(" = [%lu]", d_backing_inode(subdir)->i_ino);
- return subdir;
-
-check_error:
- dput(subdir);
- _leave(" = %d [check]", ret);
- return ERR_PTR(ret);
-
-mkdir_error:
- inode_unlock(d_inode(dir));
- dput(subdir);
- pr_err("mkdir %s failed with error %d\n", dirname, ret);
- return ERR_PTR(ret);
-
-lookup_error:
- inode_unlock(d_inode(dir));
- ret = PTR_ERR(subdir);
- pr_err("Lookup %s failed with error %d\n", dirname, ret);
- return ERR_PTR(ret);
-
-nomem_d_alloc:
- inode_unlock(d_inode(dir));
- _leave(" = -ENOMEM");
- return ERR_PTR(-ENOMEM);
+ ret = cachefiles_inject_read_error();
+ if (ret == 0)
+ ret = vfs_link(object->file->f_path.dentry, &init_user_ns,
+ d_inode(fan), dentry, NULL);
+ if (ret < 0) {
+ trace_cachefiles_vfs_error(object, d_inode(fan), ret,
+ cachefiles_trace_link_error);
+ _debug("link fail %d", ret);
+ } else {
+ trace_cachefiles_link(object, file_inode(object->file));
+ spin_lock(&object->lock);
+ /* TODO: Do we want to switch the file pointer to the new dentry? */
+ clear_bit(CACHEFILES_OBJECT_USING_TMPFILE, &object->flags);
+ spin_unlock(&object->lock);
+ success = true;
+ }
+
+out_dput:
+ dput(dentry);
+out_unlock:
+ inode_unlock(d_inode(fan));
+ _leave(" = %u", success);
+ return success;
}
/*
- * find out if an object is in use or not
- * - if finds object and it's not in use:
- * - returns a pointer to the object and a reference on it
- * - returns with the directory locked
+ * Look up an inode to be checked or culled. Return -EBUSY if the inode is
+ * marked in use.
*/
-static struct dentry *cachefiles_check_active(struct cachefiles_cache *cache,
- struct dentry *dir,
- char *filename)
+static struct dentry *cachefiles_lookup_for_cull(struct cachefiles_cache *cache,
+ struct dentry *dir,
+ char *filename)
{
- struct cachefiles_object *object;
- struct rb_node *_n;
struct dentry *victim;
- int ret;
-
- //_enter(",%pd/,%s",
- // dir, filename);
+ int ret = -ENOENT;
- /* look up the victim */
inode_lock_nested(d_inode(dir), I_MUTEX_PARENT);
victim = lookup_one_len(filename, dir, strlen(filename));
if (IS_ERR(victim))
goto lookup_error;
-
- //_debug("victim -> %pd %s",
- // victim, d_backing_inode(victim) ? "positive" : "negative");
-
- /* if the object is no longer there then we probably retired the object
- * at the netfs's request whilst the cull was in progress
- */
- if (d_is_negative(victim)) {
- inode_unlock(d_inode(dir));
- dput(victim);
- _leave(" = -ENOENT [absent]");
- return ERR_PTR(-ENOENT);
- }
-
- /* check to see if we're using this object */
- read_lock(&cache->active_lock);
-
- _n = cache->active_nodes.rb_node;
-
- while (_n) {
- object = rb_entry(_n, struct cachefiles_object, active_node);
-
- if (object->dentry > victim)
- _n = _n->rb_left;
- else if (object->dentry < victim)
- _n = _n->rb_right;
- else
- goto object_in_use;
- }
-
- read_unlock(&cache->active_lock);
-
- //_leave(" = %pd", victim);
+ if (d_is_negative(victim))
+ goto lookup_put;
+ if (d_inode(victim)->i_flags & S_KERNEL_FILE)
+ goto lookup_busy;
return victim;
-object_in_use:
- read_unlock(&cache->active_lock);
+lookup_busy:
+ ret = -EBUSY;
+lookup_put:
inode_unlock(d_inode(dir));
dput(victim);
- //_leave(" = -EBUSY [in use]");
- return ERR_PTR(-EBUSY);
+ return ERR_PTR(ret);
lookup_error:
inode_unlock(d_inode(dir));
ret = PTR_ERR(victim);
- if (ret == -ENOENT) {
- /* file or dir now absent - probably retired by netfs */
- _leave(" = -ESTALE [absent]");
- return ERR_PTR(-ESTALE);
- }
+ if (ret == -ENOENT)
+ return ERR_PTR(-ESTALE); /* Probably got retired by the netfs */
if (ret == -EIO) {
cachefiles_io_error(cache, "Lookup failed");
@@ -931,46 +770,46 @@ lookup_error:
ret = -EIO;
}
- _leave(" = %d", ret);
return ERR_PTR(ret);
}
/*
- * cull an object if it's not in use
+ * Cull an object if it's not in use
* - called only by cache manager daemon
*/
int cachefiles_cull(struct cachefiles_cache *cache, struct dentry *dir,
char *filename)
{
struct dentry *victim;
+ struct inode *inode;
int ret;
_enter(",%pd/,%s", dir, filename);
- victim = cachefiles_check_active(cache, dir, filename);
+ victim = cachefiles_lookup_for_cull(cache, dir, filename);
if (IS_ERR(victim))
return PTR_ERR(victim);
- _debug("victim -> %pd %s",
- victim, d_backing_inode(victim) ? "positive" : "negative");
-
- /* okay... the victim is not being used so we can cull it
- * - start by marking it as stale
- */
- _debug("victim is cullable");
-
- ret = cachefiles_remove_object_xattr(cache, victim);
+ /* check to see if someone is using this object */
+ inode = d_inode(victim);
+ inode_lock(inode);
+ if (inode->i_flags & S_KERNEL_FILE) {
+ ret = -EBUSY;
+ } else {
+ /* Stop the cache from picking it back up */
+ inode->i_flags |= S_KERNEL_FILE;
+ ret = 0;
+ }
+ inode_unlock(inode);
if (ret < 0)
goto error_unlock;
- /* actually remove the victim (drops the dir mutex) */
- _debug("bury");
-
- ret = cachefiles_bury_object(cache, NULL, dir, victim, false,
+ ret = cachefiles_bury_object(cache, NULL, dir, victim,
FSCACHE_OBJECT_WAS_CULLED);
if (ret < 0)
goto error;
+ fscache_count_culled();
dput(victim);
_leave(" = 0");
return 0;
@@ -979,11 +818,8 @@ error_unlock:
inode_unlock(d_inode(dir));
error:
dput(victim);
- if (ret == -ENOENT) {
- /* file or dir now absent - probably retired by netfs */
- _leave(" = -ESTALE [absent]");
- return -ESTALE;
- }
+ if (ret == -ENOENT)
+ return -ESTALE; /* Probably got retired by the netfs */
if (ret != -ENOMEM) {
pr_err("Internal error: %d\n", ret);
@@ -995,7 +831,7 @@ error:
}
/*
- * find out if an object is in use or not
+ * Find out if an object is in use or not
* - called only by cache manager daemon
* - returns -EBUSY or 0 to indicate whether an object is in use or not
*/
@@ -1003,16 +839,13 @@ int cachefiles_check_in_use(struct cachefiles_cache *cache, struct dentry *dir,
char *filename)
{
struct dentry *victim;
+ int ret = 0;
- //_enter(",%pd/,%s",
- // dir, filename);
-
- victim = cachefiles_check_active(cache, dir, filename);
+ victim = cachefiles_lookup_for_cull(cache, dir, filename);
if (IS_ERR(victim))
return PTR_ERR(victim);
inode_unlock(d_inode(dir));
dput(victim);
- //_leave(" = 0");
- return 0;
+ return ret;
}
diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
deleted file mode 100644
index fcf4f3b72923..000000000000
--- a/fs/cachefiles/rdwr.c
+++ /dev/null
@@ -1,972 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/* Storage object read/write
- *
- * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- */
-
-#include <linux/mount.h>
-#include <linux/slab.h>
-#include <linux/file.h>
-#include <linux/swap.h>
-#include "internal.h"
-
-/*
- * detect wake up events generated by the unlocking of pages in which we're
- * interested
- * - we use this to detect read completion of backing pages
- * - the caller holds the waitqueue lock
- */
-static int cachefiles_read_waiter(wait_queue_entry_t *wait, unsigned mode,
- int sync, void *_key)
-{
- struct cachefiles_one_read *monitor =
- container_of(wait, struct cachefiles_one_read, monitor);
- struct cachefiles_object *object;
- struct fscache_retrieval *op = monitor->op;
- struct wait_page_key *key = _key;
- struct folio *folio = wait->private;
-
- ASSERT(key);
-
- _enter("{%lu},%u,%d,{%p,%u}",
- monitor->netfs_page->index, mode, sync,
- key->folio, key->bit_nr);
-
- if (key->folio != folio || key->bit_nr != PG_locked)
- return 0;
-
- _debug("--- monitor %p %lx ---", folio, folio->flags);
-
- if (!folio_test_uptodate(folio) && !folio_test_error(folio)) {
- /* unlocked, not uptodate and not erronous? */
- _debug("page probably truncated");
- }
-
- /* remove from the waitqueue */
- list_del(&wait->entry);
-
- /* move onto the action list and queue for FS-Cache thread pool */
- ASSERT(op);
-
- /* We need to temporarily bump the usage count as we don't own a ref
- * here otherwise cachefiles_read_copier() may free the op between the
- * monitor being enqueued on the op->to_do list and the op getting
- * enqueued on the work queue.
- */
- fscache_get_retrieval(op);
-
- object = container_of(op->op.object, struct cachefiles_object, fscache);
- spin_lock(&object->work_lock);
- list_add_tail(&monitor->op_link, &op->to_do);
- fscache_enqueue_retrieval(op);
- spin_unlock(&object->work_lock);
-
- fscache_put_retrieval(op);
- return 0;
-}
-
-/*
- * handle a probably truncated page
- * - check to see if the page is still relevant and reissue the read if
- * possible
- * - return -EIO on error, -ENODATA if the page is gone, -EINPROGRESS if we
- * must wait again and 0 if successful
- */
-static int cachefiles_read_reissue(struct cachefiles_object *object,
- struct cachefiles_one_read *monitor)
-{
- struct address_space *bmapping = d_backing_inode(object->backer)->i_mapping;
- struct page *backpage = monitor->back_page, *backpage2;
- int ret;
-
- _enter("{ino=%lx},{%lx,%lx}",
- d_backing_inode(object->backer)->i_ino,
- backpage->index, backpage->flags);
-
- /* skip if the page was truncated away completely */
- if (backpage->mapping != bmapping) {
- _leave(" = -ENODATA [mapping]");
- return -ENODATA;
- }
-
- backpage2 = find_get_page(bmapping, backpage->index);
- if (!backpage2) {
- _leave(" = -ENODATA [gone]");
- return -ENODATA;
- }
-
- if (backpage != backpage2) {
- put_page(backpage2);
- _leave(" = -ENODATA [different]");
- return -ENODATA;
- }
-
- /* the page is still there and we already have a ref on it, so we don't
- * need a second */
- put_page(backpage2);
-
- INIT_LIST_HEAD(&monitor->op_link);
- folio_add_wait_queue(page_folio(backpage), &monitor->monitor);
-
- if (trylock_page(backpage)) {
- ret = -EIO;
- if (PageError(backpage))
- goto unlock_discard;
- ret = 0;
- if (PageUptodate(backpage))
- goto unlock_discard;
-
- _debug("reissue read");
- ret = bmapping->a_ops->readpage(NULL, backpage);
- if (ret < 0)
- goto discard;
- }
-
- /* but the page may have been read before the monitor was installed, so
- * the monitor may miss the event - so we have to ensure that we do get
- * one in such a case */
- if (trylock_page(backpage)) {
- _debug("jumpstart %p {%lx}", backpage, backpage->flags);
- unlock_page(backpage);
- }
-
- /* it'll reappear on the todo list */
- _leave(" = -EINPROGRESS");
- return -EINPROGRESS;
-
-unlock_discard:
- unlock_page(backpage);
-discard:
- spin_lock_irq(&object->work_lock);
- list_del(&monitor->op_link);
- spin_unlock_irq(&object->work_lock);
- _leave(" = %d", ret);
- return ret;
-}
-
-/*
- * copy data from backing pages to netfs pages to complete a read operation
- * - driven by FS-Cache's thread pool
- */
-static void cachefiles_read_copier(struct fscache_operation *_op)
-{
- struct cachefiles_one_read *monitor;
- struct cachefiles_object *object;
- struct fscache_retrieval *op;
- int error, max;
-
- op = container_of(_op, struct fscache_retrieval, op);
- object = container_of(op->op.object,
- struct cachefiles_object, fscache);
-
- _enter("{ino=%lu}", d_backing_inode(object->backer)->i_ino);
-
- max = 8;
- spin_lock_irq(&object->work_lock);
-
- while (!list_empty(&op->to_do)) {
- monitor = list_entry(op->to_do.next,
- struct cachefiles_one_read, op_link);
- list_del(&monitor->op_link);
-
- spin_unlock_irq(&object->work_lock);
-
- _debug("- copy {%lu}", monitor->back_page->index);
-
- recheck:
- if (test_bit(FSCACHE_COOKIE_INVALIDATING,
- &object->fscache.cookie->flags)) {
- error = -ESTALE;
- } else if (PageUptodate(monitor->back_page)) {
- copy_highpage(monitor->netfs_page, monitor->back_page);
- fscache_mark_page_cached(monitor->op,
- monitor->netfs_page);
- error = 0;
- } else if (!PageError(monitor->back_page)) {
- /* the page has probably been truncated */
- error = cachefiles_read_reissue(object, monitor);
- if (error == -EINPROGRESS)
- goto next;
- goto recheck;
- } else {
- cachefiles_io_error_obj(
- object,
- "Readpage failed on backing file %lx",
- (unsigned long) monitor->back_page->flags);
- error = -EIO;
- }
-
- put_page(monitor->back_page);
-
- fscache_end_io(op, monitor->netfs_page, error);
- put_page(monitor->netfs_page);
- fscache_retrieval_complete(op, 1);
- fscache_put_retrieval(op);
- kfree(monitor);
-
- next:
- /* let the thread pool have some air occasionally */
- max--;
- if (max < 0 || need_resched()) {
- if (!list_empty(&op->to_do))
- fscache_enqueue_retrieval(op);
- _leave(" [maxed out]");
- return;
- }
-
- spin_lock_irq(&object->work_lock);
- }
-
- spin_unlock_irq(&object->work_lock);
- _leave("");
-}
-
-/*
- * read the corresponding page to the given set from the backing file
- * - an uncertain page is simply discarded, to be tried again another time
- */
-static int cachefiles_read_backing_file_one(struct cachefiles_object *object,
- struct fscache_retrieval *op,
- struct page *netpage)
-{
- struct cachefiles_one_read *monitor;
- struct address_space *bmapping;
- struct page *newpage, *backpage;
- int ret;
-
- _enter("");
-
- _debug("read back %p{%lu,%d}",
- netpage, netpage->index, page_count(netpage));
-
- monitor = kzalloc(sizeof(*monitor), cachefiles_gfp);
- if (!monitor)
- goto nomem;
-
- monitor->netfs_page = netpage;
- monitor->op = fscache_get_retrieval(op);
-
- init_waitqueue_func_entry(&monitor->monitor, cachefiles_read_waiter);
-
- /* attempt to get hold of the backing page */
- bmapping = d_backing_inode(object->backer)->i_mapping;
- newpage = NULL;
-
- for (;;) {
- backpage = find_get_page(bmapping, netpage->index);
- if (backpage)
- goto backing_page_already_present;
-
- if (!newpage) {
- newpage = __page_cache_alloc(cachefiles_gfp);
- if (!newpage)
- goto nomem_monitor;
- }
-
- ret = add_to_page_cache_lru(newpage, bmapping,
- netpage->index, cachefiles_gfp);
- if (ret == 0)
- goto installed_new_backing_page;
- if (ret != -EEXIST)
- goto nomem_page;
- }
-
- /* we've installed a new backing page, so now we need to start
- * it reading */
-installed_new_backing_page:
- _debug("- new %p", newpage);
-
- backpage = newpage;
- newpage = NULL;
-
-read_backing_page:
- ret = bmapping->a_ops->readpage(NULL, backpage);
- if (ret < 0)
- goto read_error;
-
- /* set the monitor to transfer the data across */
-monitor_backing_page:
- _debug("- monitor add");
-
- /* install the monitor */
- get_page(monitor->netfs_page);
- get_page(backpage);
- monitor->back_page = backpage;
- monitor->monitor.private = backpage;
- folio_add_wait_queue(page_folio(backpage), &monitor->monitor);
- monitor = NULL;
-
- /* but the page may have been read before the monitor was installed, so
- * the monitor may miss the event - so we have to ensure that we do get
- * one in such a case */
- if (trylock_page(backpage)) {
- _debug("jumpstart %p {%lx}", backpage, backpage->flags);
- unlock_page(backpage);
- }
- goto success;
-
- /* if the backing page is already present, it can be in one of
- * three states: read in progress, read failed or read okay */
-backing_page_already_present:
- _debug("- present");
-
- if (newpage) {
- put_page(newpage);
- newpage = NULL;
- }
-
- if (PageError(backpage))
- goto io_error;
-
- if (PageUptodate(backpage))
- goto backing_page_already_uptodate;
-
- if (!trylock_page(backpage))
- goto monitor_backing_page;
- _debug("read %p {%lx}", backpage, backpage->flags);
- goto read_backing_page;
-
- /* the backing page is already up to date, attach the netfs
- * page to the pagecache and LRU and copy the data across */
-backing_page_already_uptodate:
- _debug("- uptodate");
-
- fscache_mark_page_cached(op, netpage);
-
- copy_highpage(netpage, backpage);
- fscache_end_io(op, netpage, 0);
- fscache_retrieval_complete(op, 1);
-
-success:
- _debug("success");
- ret = 0;
-
-out:
- if (backpage)
- put_page(backpage);
- if (monitor) {
- fscache_put_retrieval(monitor->op);
- kfree(monitor);
- }
- _leave(" = %d", ret);
- return ret;
-
-read_error:
- _debug("read error %d", ret);
- if (ret == -ENOMEM) {
- fscache_retrieval_complete(op, 1);
- goto out;
- }
-io_error:
- cachefiles_io_error_obj(object, "Page read error on backing file");
- fscache_retrieval_complete(op, 1);
- ret = -ENOBUFS;
- goto out;
-
-nomem_page:
- put_page(newpage);
-nomem_monitor:
- fscache_put_retrieval(monitor->op);
- kfree(monitor);
-nomem:
- fscache_retrieval_complete(op, 1);
- _leave(" = -ENOMEM");
- return -ENOMEM;
-}
-
-/*
- * read a page from the cache or allocate a block in which to store it
- * - cache withdrawal is prevented by the caller
- * - returns -EINTR if interrupted
- * - returns -ENOMEM if ran out of memory
- * - returns -ENOBUFS if no buffers can be made available
- * - returns -ENOBUFS if page is beyond EOF
- * - if the page is backed by a block in the cache:
- * - a read will be started which will call the callback on completion
- * - 0 will be returned
- * - else if the page is unbacked:
- * - the metadata will be retained
- * - -ENODATA will be returned
- */
-int cachefiles_read_or_alloc_page(struct fscache_retrieval *op,
- struct page *page,
- gfp_t gfp)
-{
- struct cachefiles_object *object;
- struct cachefiles_cache *cache;
- struct inode *inode;
- sector_t block;
- unsigned shift;
- int ret, ret2;
-
- object = container_of(op->op.object,
- struct cachefiles_object, fscache);
- cache = container_of(object->fscache.cache,
- struct cachefiles_cache, cache);
-
- _enter("{%p},{%lx},,,", object, page->index);
-
- if (!object->backer)
- goto enobufs;
-
- inode = d_backing_inode(object->backer);
- ASSERT(S_ISREG(inode->i_mode));
-
- /* calculate the shift required to use bmap */
- shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits;
-
- op->op.flags &= FSCACHE_OP_KEEP_FLAGS;
- op->op.flags |= FSCACHE_OP_ASYNC;
- op->op.processor = cachefiles_read_copier;
-
- /* we assume the absence or presence of the first block is a good
- * enough indication for the page as a whole
- * - TODO: don't use bmap() for this as it is _not_ actually good
- * enough for this as it doesn't indicate errors, but it's all we've
- * got for the moment
- */
- block = page->index;
- block <<= shift;
-
- ret2 = bmap(inode, &block);
- ASSERT(ret2 == 0);
-
- _debug("%llx -> %llx",
- (unsigned long long) (page->index << shift),
- (unsigned long long) block);
-
- if (block) {
- /* submit the apparently valid page to the backing fs to be
- * read from disk */
- ret = cachefiles_read_backing_file_one(object, op, page);
- } else if (cachefiles_has_space(cache, 0, 1) == 0) {
- /* there's space in the cache we can use */
- fscache_mark_page_cached(op, page);
- fscache_retrieval_complete(op, 1);
- ret = -ENODATA;
- } else {
- goto enobufs;
- }
-
- _leave(" = %d", ret);
- return ret;
-
-enobufs:
- fscache_retrieval_complete(op, 1);
- _leave(" = -ENOBUFS");
- return -ENOBUFS;
-}
-
-/*
- * read the corresponding pages to the given set from the backing file
- * - any uncertain pages are simply discarded, to be tried again another time
- */
-static int cachefiles_read_backing_file(struct cachefiles_object *object,
- struct fscache_retrieval *op,
- struct list_head *list)
-{
- struct cachefiles_one_read *monitor = NULL;
- struct address_space *bmapping = d_backing_inode(object->backer)->i_mapping;
- struct page *newpage = NULL, *netpage, *_n, *backpage = NULL;
- int ret = 0;
-
- _enter("");
-
- list_for_each_entry_safe(netpage, _n, list, lru) {
- list_del(&netpage->lru);
-
- _debug("read back %p{%lu,%d}",
- netpage, netpage->index, page_count(netpage));
-
- if (!monitor) {
- monitor = kzalloc(sizeof(*monitor), cachefiles_gfp);
- if (!monitor)
- goto nomem;
-
- monitor->op = fscache_get_retrieval(op);
- init_waitqueue_func_entry(&monitor->monitor,
- cachefiles_read_waiter);
- }
-
- for (;;) {
- backpage = find_get_page(bmapping, netpage->index);
- if (backpage)
- goto backing_page_already_present;
-
- if (!newpage) {
- newpage = __page_cache_alloc(cachefiles_gfp);
- if (!newpage)
- goto nomem;
- }
-
- ret = add_to_page_cache_lru(newpage, bmapping,
- netpage->index,
- cachefiles_gfp);
- if (ret == 0)
- goto installed_new_backing_page;
- if (ret != -EEXIST)
- goto nomem;
- }
-
- /* we've installed a new backing page, so now we need
- * to start it reading */
- installed_new_backing_page:
- _debug("- new %p", newpage);
-
- backpage = newpage;
- newpage = NULL;
-
- reread_backing_page:
- ret = bmapping->a_ops->readpage(NULL, backpage);
- if (ret < 0)
- goto read_error;
-
- /* add the netfs page to the pagecache and LRU, and set the
- * monitor to transfer the data across */
- monitor_backing_page:
- _debug("- monitor add");
-
- ret = add_to_page_cache_lru(netpage, op->mapping,
- netpage->index, cachefiles_gfp);
- if (ret < 0) {
- if (ret == -EEXIST) {
- put_page(backpage);
- backpage = NULL;
- put_page(netpage);
- netpage = NULL;
- fscache_retrieval_complete(op, 1);
- continue;
- }
- goto nomem;
- }
-
- /* install a monitor */
- get_page(netpage);
- monitor->netfs_page = netpage;
-
- get_page(backpage);
- monitor->back_page = backpage;
- monitor->monitor.private = backpage;
- folio_add_wait_queue(page_folio(backpage), &monitor->monitor);
- monitor = NULL;
-
- /* but the page may have been read before the monitor was
- * installed, so the monitor may miss the event - so we have to
- * ensure that we do get one in such a case */
- if (trylock_page(backpage)) {
- _debug("2unlock %p {%lx}", backpage, backpage->flags);
- unlock_page(backpage);
- }
-
- put_page(backpage);
- backpage = NULL;
-
- put_page(netpage);
- netpage = NULL;
- continue;
-
- /* if the backing page is already present, it can be in one of
- * three states: read in progress, read failed or read okay */
- backing_page_already_present:
- _debug("- present %p", backpage);
-
- if (PageError(backpage))
- goto io_error;
-
- if (PageUptodate(backpage))
- goto backing_page_already_uptodate;
-
- _debug("- not ready %p{%lx}", backpage, backpage->flags);
-
- if (!trylock_page(backpage))
- goto monitor_backing_page;
-
- if (PageError(backpage)) {
- _debug("error %lx", backpage->flags);
- unlock_page(backpage);
- goto io_error;
- }
-
- if (PageUptodate(backpage))
- goto backing_page_already_uptodate_unlock;
-
- /* we've locked a page that's neither up to date nor erroneous,
- * so we need to attempt to read it again */
- goto reread_backing_page;
-
- /* the backing page is already up to date, attach the netfs
- * page to the pagecache and LRU and copy the data across */
- backing_page_already_uptodate_unlock:
- _debug("uptodate %lx", backpage->flags);
- unlock_page(backpage);
- backing_page_already_uptodate:
- _debug("- uptodate");
-
- ret = add_to_page_cache_lru(netpage, op->mapping,
- netpage->index, cachefiles_gfp);
- if (ret < 0) {
- if (ret == -EEXIST) {
- put_page(backpage);
- backpage = NULL;
- put_page(netpage);
- netpage = NULL;
- fscache_retrieval_complete(op, 1);
- continue;
- }
- goto nomem;
- }
-
- copy_highpage(netpage, backpage);
-
- put_page(backpage);
- backpage = NULL;
-
- fscache_mark_page_cached(op, netpage);
-
- /* the netpage is unlocked and marked up to date here */
- fscache_end_io(op, netpage, 0);
- put_page(netpage);
- netpage = NULL;
- fscache_retrieval_complete(op, 1);
- continue;
- }
-
- netpage = NULL;
-
- _debug("out");
-
-out:
- /* tidy up */
- if (newpage)
- put_page(newpage);
- if (netpage)
- put_page(netpage);
- if (backpage)
- put_page(backpage);
- if (monitor) {
- fscache_put_retrieval(op);
- kfree(monitor);
- }
-
- list_for_each_entry_safe(netpage, _n, list, lru) {
- list_del(&netpage->lru);
- put_page(netpage);
- fscache_retrieval_complete(op, 1);
- }
-
- _leave(" = %d", ret);
- return ret;
-
-nomem:
- _debug("nomem");
- ret = -ENOMEM;
- goto record_page_complete;
-
-read_error:
- _debug("read error %d", ret);
- if (ret == -ENOMEM)
- goto record_page_complete;
-io_error:
- cachefiles_io_error_obj(object, "Page read error on backing file");
- ret = -ENOBUFS;
-record_page_complete:
- fscache_retrieval_complete(op, 1);
- goto out;
-}
-
-/*
- * read a list of pages from the cache or allocate blocks in which to store
- * them
- */
-int cachefiles_read_or_alloc_pages(struct fscache_retrieval *op,
- struct list_head *pages,
- unsigned *nr_pages,
- gfp_t gfp)
-{
- struct cachefiles_object *object;
- struct cachefiles_cache *cache;
- struct list_head backpages;
- struct pagevec pagevec;
- struct inode *inode;
- struct page *page, *_n;
- unsigned shift, nrbackpages;
- int ret, ret2, space;
-
- object = container_of(op->op.object,
- struct cachefiles_object, fscache);
- cache = container_of(object->fscache.cache,
- struct cachefiles_cache, cache);
-
- _enter("{OBJ%x,%d},,%d,,",
- object->fscache.debug_id, atomic_read(&op->op.usage),
- *nr_pages);
-
- if (!object->backer)
- goto all_enobufs;
-
- space = 1;
- if (cachefiles_has_space(cache, 0, *nr_pages) < 0)
- space = 0;
-
- inode = d_backing_inode(object->backer);
- ASSERT(S_ISREG(inode->i_mode));
-
- /* calculate the shift required to use bmap */
- shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits;
-
- pagevec_init(&pagevec);
-
- op->op.flags &= FSCACHE_OP_KEEP_FLAGS;
- op->op.flags |= FSCACHE_OP_ASYNC;
- op->op.processor = cachefiles_read_copier;
-
- INIT_LIST_HEAD(&backpages);
- nrbackpages = 0;
-
- ret = space ? -ENODATA : -ENOBUFS;
- list_for_each_entry_safe(page, _n, pages, lru) {
- sector_t block;
-
- /* we assume the absence or presence of the first block is a
- * good enough indication for the page as a whole
- * - TODO: don't use bmap() for this as it is _not_ actually
- * good enough for this as it doesn't indicate errors, but
- * it's all we've got for the moment
- */
- block = page->index;
- block <<= shift;
-
- ret2 = bmap(inode, &block);
- ASSERT(ret2 == 0);
-
- _debug("%llx -> %llx",
- (unsigned long long) (page->index << shift),
- (unsigned long long) block);
-
- if (block) {
- /* we have data - add it to the list to give to the
- * backing fs */
- list_move(&page->lru, &backpages);
- (*nr_pages)--;
- nrbackpages++;
- } else if (space && pagevec_add(&pagevec, page) == 0) {
- fscache_mark_pages_cached(op, &pagevec);
- fscache_retrieval_complete(op, 1);
- ret = -ENODATA;
- } else {
- fscache_retrieval_complete(op, 1);
- }
- }
-
- if (pagevec_count(&pagevec) > 0)
- fscache_mark_pages_cached(op, &pagevec);
-
- if (list_empty(pages))
- ret = 0;
-
- /* submit the apparently valid pages to the backing fs to be read from
- * disk */
- if (nrbackpages > 0) {
- ret2 = cachefiles_read_backing_file(object, op, &backpages);
- if (ret2 == -ENOMEM || ret2 == -EINTR)
- ret = ret2;
- }
-
- _leave(" = %d [nr=%u%s]",
- ret, *nr_pages, list_empty(pages) ? " empty" : "");
- return ret;
-
-all_enobufs:
- fscache_retrieval_complete(op, *nr_pages);
- return -ENOBUFS;
-}
-
-/*
- * allocate a block in the cache in which to store a page
- * - cache withdrawal is prevented by the caller
- * - returns -EINTR if interrupted
- * - returns -ENOMEM if ran out of memory
- * - returns -ENOBUFS if no buffers can be made available
- * - returns -ENOBUFS if page is beyond EOF
- * - otherwise:
- * - the metadata will be retained
- * - 0 will be returned
- */
-int cachefiles_allocate_page(struct fscache_retrieval *op,
- struct page *page,
- gfp_t gfp)
-{
- struct cachefiles_object *object;
- struct cachefiles_cache *cache;
- int ret;
-
- object = container_of(op->op.object,
- struct cachefiles_object, fscache);
- cache = container_of(object->fscache.cache,
- struct cachefiles_cache, cache);
-
- _enter("%p,{%lx},", object, page->index);
-
- ret = cachefiles_has_space(cache, 0, 1);
- if (ret == 0)
- fscache_mark_page_cached(op, page);
- else
- ret = -ENOBUFS;
-
- fscache_retrieval_complete(op, 1);
- _leave(" = %d", ret);
- return ret;
-}
-
-/*
- * allocate blocks in the cache in which to store a set of pages
- * - cache withdrawal is prevented by the caller
- * - returns -EINTR if interrupted
- * - returns -ENOMEM if ran out of memory
- * - returns -ENOBUFS if some buffers couldn't be made available
- * - returns -ENOBUFS if some pages are beyond EOF
- * - otherwise:
- * - -ENODATA will be returned
- * - metadata will be retained for any page marked
- */
-int cachefiles_allocate_pages(struct fscache_retrieval *op,
- struct list_head *pages,
- unsigned *nr_pages,
- gfp_t gfp)
-{
- struct cachefiles_object *object;
- struct cachefiles_cache *cache;
- struct pagevec pagevec;
- struct page *page;
- int ret;
-
- object = container_of(op->op.object,
- struct cachefiles_object, fscache);
- cache = container_of(object->fscache.cache,
- struct cachefiles_cache, cache);
-
- _enter("%p,,,%d,", object, *nr_pages);
-
- ret = cachefiles_has_space(cache, 0, *nr_pages);
- if (ret == 0) {
- pagevec_init(&pagevec);
-
- list_for_each_entry(page, pages, lru) {
- if (pagevec_add(&pagevec, page) == 0)
- fscache_mark_pages_cached(op, &pagevec);
- }
-
- if (pagevec_count(&pagevec) > 0)
- fscache_mark_pages_cached(op, &pagevec);
- ret = -ENODATA;
- } else {
- ret = -ENOBUFS;
- }
-
- fscache_retrieval_complete(op, *nr_pages);
- _leave(" = %d", ret);
- return ret;
-}
-
-/*
- * request a page be stored in the cache
- * - cache withdrawal is prevented by the caller
- * - this request may be ignored if there's no cache block available, in which
- * case -ENOBUFS will be returned
- * - if the op is in progress, 0 will be returned
- */
-int cachefiles_write_page(struct fscache_storage *op, struct page *page)
-{
- struct cachefiles_object *object;
- struct cachefiles_cache *cache;
- struct file *file;
- struct path path;
- loff_t pos, eof;
- size_t len;
- void *data;
- int ret = -ENOBUFS;
-
- ASSERT(op != NULL);
- ASSERT(page != NULL);
-
- object = container_of(op->op.object,
- struct cachefiles_object, fscache);
-
- _enter("%p,%p{%lx},,,", object, page, page->index);
-
- if (!object->backer) {
- _leave(" = -ENOBUFS");
- return -ENOBUFS;
- }
-
- ASSERT(d_is_reg(object->backer));
-
- cache = container_of(object->fscache.cache,
- struct cachefiles_cache, cache);
-
- pos = (loff_t)page->index << PAGE_SHIFT;
-
- /* We mustn't write more data than we have, so we have to beware of a
- * partial page at EOF.
- */
- eof = object->fscache.store_limit_l;
- if (pos >= eof)
- goto error;
-
- /* write the page to the backing filesystem and let it store it in its
- * own time */
- path.mnt = cache->mnt;
- path.dentry = object->backer;
- file = dentry_open(&path, O_RDWR | O_LARGEFILE, cache->cache_cred);
- if (IS_ERR(file)) {
- ret = PTR_ERR(file);
- goto error_2;
- }
-
- len = PAGE_SIZE;
- if (eof & ~PAGE_MASK) {
- if (eof - pos < PAGE_SIZE) {
- _debug("cut short %llx to %llx",
- pos, eof);
- len = eof - pos;
- ASSERTCMP(pos + len, ==, eof);
- }
- }
-
- data = kmap(page);
- ret = kernel_write(file, data, len, &pos);
- kunmap(page);
- fput(file);
- if (ret != len)
- goto error_eio;
-
- _leave(" = 0");
- return 0;
-
-error_eio:
- ret = -EIO;
-error_2:
- if (ret == -EIO)
- cachefiles_io_error_obj(object,
- "Write page to backing file failed");
-error:
- _leave(" = -ENOBUFS [%d]", ret);
- return -ENOBUFS;
-}
-
-/*
- * detach a backing block from a page
- * - cache withdrawal is prevented by the caller
- */
-void cachefiles_uncache_page(struct fscache_object *_object, struct page *page)
- __releases(&object->fscache.cookie->lock)
-{
- struct cachefiles_object *object;
-
- object = container_of(_object, struct cachefiles_object, fscache);
-
- _enter("%p,{%lu}", object, page->index);
-
- spin_unlock(&object->fscache.cookie->lock);
-}
diff --git a/fs/cachefiles/security.c b/fs/cachefiles/security.c
index aec13fd94692..fe777164f1d8 100644
--- a/fs/cachefiles/security.c
+++ b/fs/cachefiles/security.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/* CacheFiles security management
*
- * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Copyright (C) 2007, 2021 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*/
diff --git a/fs/cachefiles/volume.c b/fs/cachefiles/volume.c
new file mode 100644
index 000000000000..89df0ba8ba5e
--- /dev/null
+++ b/fs/cachefiles/volume.c
@@ -0,0 +1,139 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Volume handling.
+ *
+ * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include "internal.h"
+#include <trace/events/fscache.h>
+
+/*
+ * Allocate and set up a volume representation. We make sure all the fanout
+ * directories are created and pinned.
+ */
+void cachefiles_acquire_volume(struct fscache_volume *vcookie)
+{
+ struct cachefiles_volume *volume;
+ struct cachefiles_cache *cache = vcookie->cache->cache_priv;
+ const struct cred *saved_cred;
+ struct dentry *vdentry, *fan;
+ size_t len;
+ char *name;
+ bool is_new = false;
+ int ret, n_accesses, i;
+
+ _enter("");
+
+ volume = kzalloc(sizeof(struct cachefiles_volume), GFP_KERNEL);
+ if (!volume)
+ return;
+ volume->vcookie = vcookie;
+ volume->cache = cache;
+ INIT_LIST_HEAD(&volume->cache_link);
+
+ cachefiles_begin_secure(cache, &saved_cred);
+
+ len = vcookie->key[0];
+ name = kmalloc(len + 3, GFP_NOFS);
+ if (!name)
+ goto error_vol;
+ name[0] = 'I';
+ memcpy(name + 1, vcookie->key + 1, len);
+ name[len + 1] = 0;
+
+retry:
+ vdentry = cachefiles_get_directory(cache, cache->store, name, &is_new);
+ if (IS_ERR(vdentry))
+ goto error_name;
+ volume->dentry = vdentry;
+
+ if (is_new) {
+ if (!cachefiles_set_volume_xattr(volume))
+ goto error_dir;
+ } else {
+ ret = cachefiles_check_volume_xattr(volume);
+ if (ret < 0) {
+ if (ret != -ESTALE)
+ goto error_dir;
+ inode_lock_nested(d_inode(cache->store), I_MUTEX_PARENT);
+ cachefiles_bury_object(cache, NULL, cache->store, vdentry,
+ FSCACHE_VOLUME_IS_WEIRD);
+ cachefiles_put_directory(volume->dentry);
+ cond_resched();
+ goto retry;
+ }
+ }
+
+ for (i = 0; i < 256; i++) {
+ sprintf(name, "@%02x", i);
+ fan = cachefiles_get_directory(cache, vdentry, name, NULL);
+ if (IS_ERR(fan))
+ goto error_fan;
+ volume->fanout[i] = fan;
+ }
+
+ cachefiles_end_secure(cache, saved_cred);
+
+ vcookie->cache_priv = volume;
+ n_accesses = atomic_inc_return(&vcookie->n_accesses); /* Stop wakeups on dec-to-0 */
+ trace_fscache_access_volume(vcookie->debug_id, 0,
+ refcount_read(&vcookie->ref),
+ n_accesses, fscache_access_cache_pin);
+
+ spin_lock(&cache->object_list_lock);
+ list_add(&volume->cache_link, &volume->cache->volumes);
+ spin_unlock(&cache->object_list_lock);
+
+ kfree(name);
+ return;
+
+error_fan:
+ for (i = 0; i < 256; i++)
+ cachefiles_put_directory(volume->fanout[i]);
+error_dir:
+ cachefiles_put_directory(volume->dentry);
+error_name:
+ kfree(name);
+error_vol:
+ kfree(volume);
+ cachefiles_end_secure(cache, saved_cred);
+}
+
+/*
+ * Release a volume representation.
+ */
+static void __cachefiles_free_volume(struct cachefiles_volume *volume)
+{
+ int i;
+
+ _enter("");
+
+ volume->vcookie->cache_priv = NULL;
+
+ for (i = 0; i < 256; i++)
+ cachefiles_put_directory(volume->fanout[i]);
+ cachefiles_put_directory(volume->dentry);
+ kfree(volume);
+}
+
+void cachefiles_free_volume(struct fscache_volume *vcookie)
+{
+ struct cachefiles_volume *volume = vcookie->cache_priv;
+
+ if (volume) {
+ spin_lock(&volume->cache->object_list_lock);
+ list_del_init(&volume->cache_link);
+ spin_unlock(&volume->cache->object_list_lock);
+ __cachefiles_free_volume(volume);
+ }
+}
+
+void cachefiles_withdraw_volume(struct cachefiles_volume *volume)
+{
+ fscache_withdraw_volume(volume->vcookie);
+ cachefiles_set_volume_xattr(volume);
+ __cachefiles_free_volume(volume);
+}
diff --git a/fs/cachefiles/xattr.c b/fs/cachefiles/xattr.c
index 9e82de668595..83f41bd0c3a9 100644
--- a/fs/cachefiles/xattr.c
+++ b/fs/cachefiles/xattr.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/* CacheFiles extended attribute management
*
- * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*/
@@ -15,310 +15,245 @@
#include <linux/slab.h>
#include "internal.h"
+#define CACHEFILES_COOKIE_TYPE_DATA 1
+
+struct cachefiles_xattr {
+ __be64 object_size; /* Actual size of the object */
+ __be64 zero_point; /* Size after which server has no data not written by us */
+ __u8 type; /* Type of object */
+ __u8 content; /* Content presence (enum cachefiles_content) */
+ __u8 data[]; /* netfs coherency data */
+} __packed;
+
static const char cachefiles_xattr_cache[] =
XATTR_USER_PREFIX "CacheFiles.cache";
/*
- * check the type label on an object
- * - done using xattrs
+ * set the state xattr on a cache file
*/
-int cachefiles_check_object_type(struct cachefiles_object *object)
+int cachefiles_set_object_xattr(struct cachefiles_object *object)
{
- struct dentry *dentry = object->dentry;
- char type[3], xtype[3];
+ struct cachefiles_xattr *buf;
+ struct dentry *dentry;
+ struct file *file = object->file;
+ unsigned int len = object->cookie->aux_len;
int ret;
- ASSERT(dentry);
- ASSERT(d_backing_inode(dentry));
-
- if (!object->fscache.cookie)
- strcpy(type, "C3");
- else
- snprintf(type, 3, "%02x", object->fscache.cookie->def->type);
-
- _enter("%x{%s}", object->fscache.debug_id, type);
+ if (!file)
+ return -ESTALE;
+ dentry = file->f_path.dentry;
- /* attempt to install a type label directly */
- ret = vfs_setxattr(&init_user_ns, dentry, cachefiles_xattr_cache, type,
- 2, XATTR_CREATE);
- if (ret == 0) {
- _debug("SET"); /* we succeeded */
- goto error;
- }
+ _enter("%x,#%d", object->debug_id, len);
- if (ret != -EEXIST) {
- pr_err("Can't set xattr on %pd [%lu] (err %d)\n",
- dentry, d_backing_inode(dentry)->i_ino,
- -ret);
- goto error;
- }
+ buf = kmalloc(sizeof(struct cachefiles_xattr) + len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
- /* read the current type label */
- ret = vfs_getxattr(&init_user_ns, dentry, cachefiles_xattr_cache, xtype,
- 3);
+ buf->object_size = cpu_to_be64(object->cookie->object_size);
+ buf->zero_point = 0;
+ buf->type = CACHEFILES_COOKIE_TYPE_DATA;
+ buf->content = object->content_info;
+ if (test_bit(FSCACHE_COOKIE_LOCAL_WRITE, &object->cookie->flags))
+ buf->content = CACHEFILES_CONTENT_DIRTY;
+ if (len > 0)
+ memcpy(buf->data, fscache_get_aux(object->cookie), len);
+
+ ret = cachefiles_inject_write_error();
+ if (ret == 0)
+ ret = vfs_setxattr(&init_user_ns, dentry, cachefiles_xattr_cache,
+ buf, sizeof(struct cachefiles_xattr) + len, 0);
if (ret < 0) {
- if (ret == -ERANGE)
- goto bad_type_length;
-
- pr_err("Can't read xattr on %pd [%lu] (err %d)\n",
- dentry, d_backing_inode(dentry)->i_ino,
- -ret);
- goto error;
+ trace_cachefiles_vfs_error(object, file_inode(file), ret,
+ cachefiles_trace_setxattr_error);
+ trace_cachefiles_coherency(object, file_inode(file)->i_ino,
+ buf->content,
+ cachefiles_coherency_set_fail);
+ if (ret != -ENOMEM)
+ cachefiles_io_error_obj(
+ object,
+ "Failed to set xattr with error %d", ret);
+ } else {
+ trace_cachefiles_coherency(object, file_inode(file)->i_ino,
+ buf->content,
+ cachefiles_coherency_set_ok);
}
- /* check the type is what we're expecting */
- if (ret != 2)
- goto bad_type_length;
-
- if (xtype[0] != type[0] || xtype[1] != type[1])
- goto bad_type;
-
- ret = 0;
-
-error:
+ kfree(buf);
_leave(" = %d", ret);
return ret;
-
-bad_type_length:
- pr_err("Cache object %lu type xattr length incorrect\n",
- d_backing_inode(dentry)->i_ino);
- ret = -EIO;
- goto error;
-
-bad_type:
- xtype[2] = 0;
- pr_err("Cache object %pd [%lu] type %s not %s\n",
- dentry, d_backing_inode(dentry)->i_ino,
- xtype, type);
- ret = -EIO;
- goto error;
}
/*
- * set the state xattr on a cache file
+ * check the consistency between the backing cache and the FS-Cache cookie
*/
-int cachefiles_set_object_xattr(struct cachefiles_object *object,
- struct cachefiles_xattr *auxdata)
+int cachefiles_check_auxdata(struct cachefiles_object *object, struct file *file)
{
- struct dentry *dentry = object->dentry;
- int ret;
-
- ASSERT(dentry);
-
- _enter("%p,#%d", object, auxdata->len);
+ struct cachefiles_xattr *buf;
+ struct dentry *dentry = file->f_path.dentry;
+ unsigned int len = object->cookie->aux_len, tlen;
+ const void *p = fscache_get_aux(object->cookie);
+ enum cachefiles_coherency_trace why;
+ ssize_t xlen;
+ int ret = -ESTALE;
- /* attempt to install the cache metadata directly */
- _debug("SET #%u", auxdata->len);
+ tlen = sizeof(struct cachefiles_xattr) + len;
+ buf = kmalloc(tlen, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
- clear_bit(FSCACHE_COOKIE_AUX_UPDATED, &object->fscache.cookie->flags);
- ret = vfs_setxattr(&init_user_ns, dentry, cachefiles_xattr_cache,
- &auxdata->type, auxdata->len, XATTR_CREATE);
- if (ret < 0 && ret != -ENOMEM)
- cachefiles_io_error_obj(
- object,
- "Failed to set xattr with error %d", ret);
+ xlen = cachefiles_inject_read_error();
+ if (xlen == 0)
+ xlen = vfs_getxattr(&init_user_ns, dentry, cachefiles_xattr_cache, buf, tlen);
+ if (xlen != tlen) {
+ if (xlen < 0)
+ trace_cachefiles_vfs_error(object, file_inode(file), xlen,
+ cachefiles_trace_getxattr_error);
+ if (xlen == -EIO)
+ cachefiles_io_error_obj(
+ object,
+ "Failed to read aux with error %zd", xlen);
+ why = cachefiles_coherency_check_xattr;
+ } else if (buf->type != CACHEFILES_COOKIE_TYPE_DATA) {
+ why = cachefiles_coherency_check_type;
+ } else if (memcmp(buf->data, p, len) != 0) {
+ why = cachefiles_coherency_check_aux;
+ } else if (be64_to_cpu(buf->object_size) != object->cookie->object_size) {
+ why = cachefiles_coherency_check_objsize;
+ } else if (buf->content == CACHEFILES_CONTENT_DIRTY) {
+ // TODO: Begin conflict resolution
+ pr_warn("Dirty object in cache\n");
+ why = cachefiles_coherency_check_dirty;
+ } else {
+ why = cachefiles_coherency_check_ok;
+ ret = 0;
+ }
- _leave(" = %d", ret);
+ trace_cachefiles_coherency(object, file_inode(file)->i_ino,
+ buf->content, why);
+ kfree(buf);
return ret;
}
/*
- * update the state xattr on a cache file
+ * remove the object's xattr to mark it stale
*/
-int cachefiles_update_object_xattr(struct cachefiles_object *object,
- struct cachefiles_xattr *auxdata)
+int cachefiles_remove_object_xattr(struct cachefiles_cache *cache,
+ struct cachefiles_object *object,
+ struct dentry *dentry)
{
- struct dentry *dentry = object->dentry;
int ret;
- if (!dentry)
- return -ESTALE;
-
- _enter("%x,#%d", object->fscache.debug_id, auxdata->len);
-
- /* attempt to install the cache metadata directly */
- _debug("SET #%u", auxdata->len);
-
- clear_bit(FSCACHE_COOKIE_AUX_UPDATED, &object->fscache.cookie->flags);
- ret = vfs_setxattr(&init_user_ns, dentry, cachefiles_xattr_cache,
- &auxdata->type, auxdata->len, XATTR_REPLACE);
- if (ret < 0 && ret != -ENOMEM)
- cachefiles_io_error_obj(
- object,
- "Failed to update xattr with error %d", ret);
+ ret = cachefiles_inject_remove_error();
+ if (ret == 0)
+ ret = vfs_removexattr(&init_user_ns, dentry, cachefiles_xattr_cache);
+ if (ret < 0) {
+ trace_cachefiles_vfs_error(object, d_inode(dentry), ret,
+ cachefiles_trace_remxattr_error);
+ if (ret == -ENOENT || ret == -ENODATA)
+ ret = 0;
+ else if (ret != -ENOMEM)
+ cachefiles_io_error(cache,
+ "Can't remove xattr from %lu"
+ " (error %d)",
+ d_backing_inode(dentry)->i_ino, -ret);
+ }
_leave(" = %d", ret);
return ret;
}
/*
- * check the consistency between the backing cache and the FS-Cache cookie
+ * Stick a marker on the cache object to indicate that it's dirty.
*/
-int cachefiles_check_auxdata(struct cachefiles_object *object)
+void cachefiles_prepare_to_write(struct fscache_cookie *cookie)
{
- struct cachefiles_xattr *auxbuf;
- enum fscache_checkaux validity;
- struct dentry *dentry = object->dentry;
- ssize_t xlen;
- int ret;
-
- ASSERT(dentry);
- ASSERT(d_backing_inode(dentry));
- ASSERT(object->fscache.cookie->def->check_aux);
-
- auxbuf = kmalloc(sizeof(struct cachefiles_xattr) + 512, GFP_KERNEL);
- if (!auxbuf)
- return -ENOMEM;
+ const struct cred *saved_cred;
+ struct cachefiles_object *object = cookie->cache_priv;
+ struct cachefiles_cache *cache = object->volume->cache;
- xlen = vfs_getxattr(&init_user_ns, dentry, cachefiles_xattr_cache,
- &auxbuf->type, 512 + 1);
- ret = -ESTALE;
- if (xlen < 1 ||
- auxbuf->type != object->fscache.cookie->def->type)
- goto error;
+ _enter("c=%08x", object->cookie->debug_id);
- xlen--;
- validity = fscache_check_aux(&object->fscache, &auxbuf->data, xlen,
- i_size_read(d_backing_inode(dentry)));
- if (validity != FSCACHE_CHECKAUX_OKAY)
- goto error;
-
- ret = 0;
-error:
- kfree(auxbuf);
- return ret;
+ if (!test_bit(CACHEFILES_OBJECT_USING_TMPFILE, &object->flags)) {
+ cachefiles_begin_secure(cache, &saved_cred);
+ cachefiles_set_object_xattr(object);
+ cachefiles_end_secure(cache, saved_cred);
+ }
}
/*
- * check the state xattr on a cache file
- * - return -ESTALE if the object should be deleted
+ * Set the state xattr on a volume directory.
*/
-int cachefiles_check_object_xattr(struct cachefiles_object *object,
- struct cachefiles_xattr *auxdata)
+bool cachefiles_set_volume_xattr(struct cachefiles_volume *volume)
{
- struct cachefiles_xattr *auxbuf;
- struct dentry *dentry = object->dentry;
+ unsigned int len = volume->vcookie->coherency_len;
+ const void *p = volume->vcookie->coherency;
+ struct dentry *dentry = volume->dentry;
int ret;
- _enter("%p,#%d", object, auxdata->len);
-
- ASSERT(dentry);
- ASSERT(d_backing_inode(dentry));
-
- auxbuf = kmalloc(sizeof(struct cachefiles_xattr) + 512, cachefiles_gfp);
- if (!auxbuf) {
- _leave(" = -ENOMEM");
- return -ENOMEM;
- }
+ _enter("%x,#%d", volume->vcookie->debug_id, len);
- /* read the current type label */
- ret = vfs_getxattr(&init_user_ns, dentry, cachefiles_xattr_cache,
- &auxbuf->type, 512 + 1);
+ ret = cachefiles_inject_write_error();
+ if (ret == 0)
+ ret = vfs_setxattr(&init_user_ns, dentry, cachefiles_xattr_cache,
+ p, len, 0);
if (ret < 0) {
- if (ret == -ENODATA)
- goto stale; /* no attribute - power went off
- * mid-cull? */
-
- if (ret == -ERANGE)
- goto bad_type_length;
-
- cachefiles_io_error_obj(object,
- "Can't read xattr on %lu (err %d)",
- d_backing_inode(dentry)->i_ino, -ret);
- goto error;
+ trace_cachefiles_vfs_error(NULL, d_inode(dentry), ret,
+ cachefiles_trace_setxattr_error);
+ trace_cachefiles_vol_coherency(volume, d_inode(dentry)->i_ino,
+ cachefiles_coherency_vol_set_fail);
+ if (ret != -ENOMEM)
+ cachefiles_io_error(
+ volume->cache, "Failed to set xattr with error %d", ret);
+ } else {
+ trace_cachefiles_vol_coherency(volume, d_inode(dentry)->i_ino,
+ cachefiles_coherency_vol_set_ok);
}
- /* check the on-disk object */
- if (ret < 1)
- goto bad_type_length;
-
- if (auxbuf->type != auxdata->type)
- goto stale;
-
- auxbuf->len = ret;
-
- /* consult the netfs */
- if (object->fscache.cookie->def->check_aux) {
- enum fscache_checkaux result;
- unsigned int dlen;
-
- dlen = auxbuf->len - 1;
-
- _debug("checkaux %s #%u",
- object->fscache.cookie->def->name, dlen);
-
- result = fscache_check_aux(&object->fscache,
- &auxbuf->data, dlen,
- i_size_read(d_backing_inode(dentry)));
-
- switch (result) {
- /* entry okay as is */
- case FSCACHE_CHECKAUX_OKAY:
- goto okay;
-
- /* entry requires update */
- case FSCACHE_CHECKAUX_NEEDS_UPDATE:
- break;
-
- /* entry requires deletion */
- case FSCACHE_CHECKAUX_OBSOLETE:
- goto stale;
-
- default:
- BUG();
- }
-
- /* update the current label */
- ret = vfs_setxattr(&init_user_ns, dentry,
- cachefiles_xattr_cache, &auxdata->type,
- auxdata->len, XATTR_REPLACE);
- if (ret < 0) {
- cachefiles_io_error_obj(object,
- "Can't update xattr on %lu"
- " (error %d)",
- d_backing_inode(dentry)->i_ino, -ret);
- goto error;
- }
- }
-
-okay:
- ret = 0;
-
-error:
- kfree(auxbuf);
_leave(" = %d", ret);
- return ret;
-
-bad_type_length:
- pr_err("Cache object %lu xattr length incorrect\n",
- d_backing_inode(dentry)->i_ino);
- ret = -EIO;
- goto error;
-
-stale:
- ret = -ESTALE;
- goto error;
+ return ret == 0;
}
/*
- * remove the object's xattr to mark it stale
+ * Check the consistency between the backing cache and the volume cookie.
*/
-int cachefiles_remove_object_xattr(struct cachefiles_cache *cache,
- struct dentry *dentry)
+int cachefiles_check_volume_xattr(struct cachefiles_volume *volume)
{
- int ret;
+ struct cachefiles_xattr *buf;
+ struct dentry *dentry = volume->dentry;
+ unsigned int len = volume->vcookie->coherency_len;
+ const void *p = volume->vcookie->coherency;
+ enum cachefiles_coherency_trace why;
+ ssize_t xlen;
+ int ret = -ESTALE;
- ret = vfs_removexattr(&init_user_ns, dentry, cachefiles_xattr_cache);
- if (ret < 0) {
- if (ret == -ENOENT || ret == -ENODATA)
- ret = 0;
- else if (ret != -ENOMEM)
- cachefiles_io_error(cache,
- "Can't remove xattr from %lu"
- " (error %d)",
- d_backing_inode(dentry)->i_ino, -ret);
+ _enter("");
+
+ buf = kmalloc(len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ xlen = cachefiles_inject_read_error();
+ if (xlen == 0)
+ xlen = vfs_getxattr(&init_user_ns, dentry, cachefiles_xattr_cache, buf, len);
+ if (xlen != len) {
+ if (xlen < 0) {
+ trace_cachefiles_vfs_error(NULL, d_inode(dentry), xlen,
+ cachefiles_trace_getxattr_error);
+ if (xlen == -EIO)
+ cachefiles_io_error(
+ volume->cache,
+ "Failed to read xattr with error %zd", xlen);
+ }
+ why = cachefiles_coherency_vol_check_xattr;
+ } else if (memcmp(buf->data, p, len) != 0) {
+ why = cachefiles_coherency_vol_check_cmp;
+ } else {
+ why = cachefiles_coherency_vol_check_ok;
+ ret = 0;
}
+ trace_cachefiles_vol_coherency(volume, d_inode(dentry)->i_ino, why);
+ kfree(buf);
_leave(" = %d", ret);
return ret;
}
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index e53c8541f5b2..c98e5238a1b6 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -4,8 +4,8 @@
#include <linux/backing-dev.h>
#include <linux/fs.h>
#include <linux/mm.h>
+#include <linux/swap.h>
#include <linux/pagemap.h>
-#include <linux/writeback.h> /* generic_writepages */
#include <linux/slab.h>
#include <linux/pagevec.h>
#include <linux/task_io_accounting_ops.h>
@@ -126,7 +126,7 @@ static int ceph_set_page_dirty(struct page *page)
BUG_ON(PagePrivate(page));
attach_page_private(page, snapc);
- return __set_page_dirty_nobuffers(page);
+ return ceph_fscache_set_page_dirty(page);
}
/*
@@ -141,8 +141,6 @@ static void ceph_invalidatepage(struct page *page, unsigned int offset,
struct ceph_inode_info *ci;
struct ceph_snap_context *snapc;
- wait_on_page_fscache(page);
-
inode = page->mapping->host;
ci = ceph_inode(inode);
@@ -153,28 +151,36 @@ static void ceph_invalidatepage(struct page *page, unsigned int offset,
}
WARN_ON(!PageLocked(page));
- if (!PagePrivate(page))
- return;
+ if (PagePrivate(page)) {
+ dout("%p invalidatepage %p idx %lu full dirty page\n",
+ inode, page, page->index);
- dout("%p invalidatepage %p idx %lu full dirty page\n",
- inode, page, page->index);
+ snapc = detach_page_private(page);
+ ceph_put_wrbuffer_cap_refs(ci, 1, snapc);
+ ceph_put_snap_context(snapc);
+ }
- snapc = detach_page_private(page);
- ceph_put_wrbuffer_cap_refs(ci, 1, snapc);
- ceph_put_snap_context(snapc);
+ wait_on_page_fscache(page);
}
static int ceph_releasepage(struct page *page, gfp_t gfp)
{
- dout("%p releasepage %p idx %lu (%sdirty)\n", page->mapping->host,
- page, page->index, PageDirty(page) ? "" : "not ");
+ struct inode *inode = page->mapping->host;
+
+ dout("%llx:%llx releasepage %p idx %lu (%sdirty)\n",
+ ceph_vinop(inode), page,
+ page->index, PageDirty(page) ? "" : "not ");
+
+ if (PagePrivate(page))
+ return 0;
if (PageFsCache(page)) {
- if (!(gfp & __GFP_DIRECT_RECLAIM) || !(gfp & __GFP_FS))
+ if (current_is_kswapd() || !(gfp & __GFP_FS))
return 0;
wait_on_page_fscache(page);
}
- return !PagePrivate(page);
+ ceph_fscache_note_page_release(inode);
+ return 1;
}
static void ceph_netfs_expand_readahead(struct netfs_read_request *rreq)
@@ -291,10 +297,6 @@ out:
dout("%s: result %d\n", __func__, err);
}
-static void ceph_init_rreq(struct netfs_read_request *rreq, struct file *file)
-{
-}
-
static void ceph_readahead_cleanup(struct address_space *mapping, void *priv)
{
struct inode *inode = mapping->host;
@@ -306,7 +308,6 @@ static void ceph_readahead_cleanup(struct address_space *mapping, void *priv)
}
static const struct netfs_read_request_ops ceph_netfs_read_ops = {
- .init_rreq = ceph_init_rreq,
.is_cache_enabled = ceph_is_cache_enabled,
.begin_cache_operation = ceph_begin_cache_operation,
.issue_op = ceph_netfs_issue_op,
@@ -378,6 +379,38 @@ static void ceph_readahead(struct readahead_control *ractl)
netfs_readahead(ractl, &ceph_netfs_read_ops, (void *)(uintptr_t)got);
}
+#ifdef CONFIG_CEPH_FSCACHE
+static void ceph_set_page_fscache(struct page *page)
+{
+ set_page_fscache(page);
+}
+
+static void ceph_fscache_write_terminated(void *priv, ssize_t error, bool was_async)
+{
+ struct inode *inode = priv;
+
+ if (IS_ERR_VALUE(error) && error != -ENOBUFS)
+ ceph_fscache_invalidate(inode, false);
+}
+
+static void ceph_fscache_write_to_cache(struct inode *inode, u64 off, u64 len, bool caching)
+{
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ struct fscache_cookie *cookie = ceph_fscache_cookie(ci);
+
+ fscache_write_to_cache(cookie, inode->i_mapping, off, len, i_size_read(inode),
+ ceph_fscache_write_terminated, inode, caching);
+}
+#else
+static inline void ceph_set_page_fscache(struct page *page)
+{
+}
+
+static inline void ceph_fscache_write_to_cache(struct inode *inode, u64 off, u64 len, bool caching)
+{
+}
+#endif /* CONFIG_CEPH_FSCACHE */
+
struct ceph_writeback_ctl
{
loff_t i_size;
@@ -493,6 +526,7 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
struct ceph_writeback_ctl ceph_wbc;
struct ceph_osd_client *osdc = &fsc->client->osdc;
struct ceph_osd_request *req;
+ bool caching = ceph_is_cache_enabled(inode);
dout("writepage %p idx %lu\n", page, page->index);
@@ -531,16 +565,17 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
CONGESTION_ON_THRESH(fsc->mount_options->congestion_kb))
set_bdi_congested(inode_to_bdi(inode), BLK_RW_ASYNC);
- set_page_writeback(page);
req = ceph_osdc_new_request(osdc, &ci->i_layout, ceph_vino(inode), page_off, &len, 0, 1,
CEPH_OSD_OP_WRITE, CEPH_OSD_FLAG_WRITE, snapc,
ceph_wbc.truncate_seq, ceph_wbc.truncate_size,
true);
- if (IS_ERR(req)) {
- redirty_page_for_writepage(wbc, page);
- end_page_writeback(page);
+ if (IS_ERR(req))
return PTR_ERR(req);
- }
+
+ set_page_writeback(page);
+ if (caching)
+ ceph_set_page_fscache(page);
+ ceph_fscache_write_to_cache(inode, page_off, len, caching);
/* it may be a short write due to an object boundary */
WARN_ON_ONCE(len > thp_size(page));
@@ -599,6 +634,9 @@ static int ceph_writepage(struct page *page, struct writeback_control *wbc)
struct inode *inode = page->mapping->host;
BUG_ON(!inode);
ihold(inode);
+
+ wait_on_page_fscache(page);
+
err = writepage_nounlock(page, wbc);
if (err == -ERESTARTSYS) {
/* direct memory reclaimer was killed by SIGKILL. return 0
@@ -720,6 +758,7 @@ static int ceph_writepages_start(struct address_space *mapping,
struct ceph_writeback_ctl ceph_wbc;
bool should_loop, range_whole = false;
bool done = false;
+ bool caching = ceph_is_cache_enabled(inode);
dout("writepages_start %p (mode=%s)\n", inode,
wbc->sync_mode == WB_SYNC_NONE ? "NONE" :
@@ -843,7 +882,7 @@ get_more_pages:
unlock_page(page);
break;
}
- if (PageWriteback(page)) {
+ if (PageWriteback(page) || PageFsCache(page)) {
if (wbc->sync_mode == WB_SYNC_NONE) {
dout("%p under writeback\n", page);
unlock_page(page);
@@ -851,6 +890,7 @@ get_more_pages:
}
dout("waiting on writeback %p\n", page);
wait_on_page_writeback(page);
+ wait_on_page_fscache(page);
}
if (!clear_page_dirty_for_io(page)) {
@@ -983,9 +1023,19 @@ new_request:
op_idx = 0;
for (i = 0; i < locked_pages; i++) {
u64 cur_offset = page_offset(pages[i]);
+ /*
+ * Discontinuity in page range? Ceph can handle that by just passing
+ * multiple extents in the write op.
+ */
if (offset + len != cur_offset) {
+ /* If it's full, stop here */
if (op_idx + 1 == req->r_num_ops)
break;
+
+ /* Kick off an fscache write with what we have so far. */
+ ceph_fscache_write_to_cache(inode, offset, len, caching);
+
+ /* Start a new extent */
osd_req_op_extent_dup_last(req, op_idx,
cur_offset - offset);
dout("writepages got pages at %llu~%llu\n",
@@ -996,14 +1046,17 @@ new_request:
osd_req_op_extent_update(req, op_idx, len);
len = 0;
- offset = cur_offset;
+ offset = cur_offset;
data_pages = pages + i;
op_idx++;
}
set_page_writeback(pages[i]);
+ if (caching)
+ ceph_set_page_fscache(pages[i]);
len += thp_size(page);
}
+ ceph_fscache_write_to_cache(inode, offset, len, caching);
if (ceph_wbc.size_stable) {
len = min(len, ceph_wbc.i_size - offset);
diff --git a/fs/ceph/cache.c b/fs/ceph/cache.c
index 457afda5498a..7d22850623ef 100644
--- a/fs/ceph/cache.c
+++ b/fs/ceph/cache.c
@@ -12,199 +12,99 @@
#include "super.h"
#include "cache.h"
-struct fscache_netfs ceph_cache_netfs = {
- .name = "ceph",
- .version = 0,
-};
-
-static DEFINE_MUTEX(ceph_fscache_lock);
-static LIST_HEAD(ceph_fscache_list);
-
-struct ceph_fscache_entry {
- struct list_head list;
- struct fscache_cookie *fscache;
- size_t uniq_len;
- /* The following members must be last */
- struct ceph_fsid fsid;
- char uniquifier[];
-};
-
-static const struct fscache_cookie_def ceph_fscache_fsid_object_def = {
- .name = "CEPH.fsid",
- .type = FSCACHE_COOKIE_TYPE_INDEX,
-};
-
-int __init ceph_fscache_register(void)
-{
- return fscache_register_netfs(&ceph_cache_netfs);
-}
-
-void ceph_fscache_unregister(void)
-{
- fscache_unregister_netfs(&ceph_cache_netfs);
-}
-
-int ceph_fscache_register_fs(struct ceph_fs_client* fsc, struct fs_context *fc)
+void ceph_fscache_register_inode_cookie(struct inode *inode)
{
- const struct ceph_fsid *fsid = &fsc->client->fsid;
- const char *fscache_uniq = fsc->mount_options->fscache_uniq;
- size_t uniq_len = fscache_uniq ? strlen(fscache_uniq) : 0;
- struct ceph_fscache_entry *ent;
- int err = 0;
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
- mutex_lock(&ceph_fscache_lock);
- list_for_each_entry(ent, &ceph_fscache_list, list) {
- if (memcmp(&ent->fsid, fsid, sizeof(*fsid)))
- continue;
- if (ent->uniq_len != uniq_len)
- continue;
- if (uniq_len && memcmp(ent->uniquifier, fscache_uniq, uniq_len))
- continue;
-
- errorfc(fc, "fscache cookie already registered for fsid %pU, use fsc=<uniquifier> option",
- fsid);
- err = -EBUSY;
- goto out_unlock;
- }
+ /* No caching for filesystem? */
+ if (!fsc->fscache)
+ return;
- ent = kzalloc(sizeof(*ent) + uniq_len, GFP_KERNEL);
- if (!ent) {
- err = -ENOMEM;
- goto out_unlock;
- }
+ /* Regular files only */
+ if (!S_ISREG(inode->i_mode))
+ return;
- memcpy(&ent->fsid, fsid, sizeof(*fsid));
- if (uniq_len > 0) {
- memcpy(&ent->uniquifier, fscache_uniq, uniq_len);
- ent->uniq_len = uniq_len;
- }
+ /* Only new inodes! */
+ if (!(inode->i_state & I_NEW))
+ return;
- fsc->fscache = fscache_acquire_cookie(ceph_cache_netfs.primary_index,
- &ceph_fscache_fsid_object_def,
- &ent->fsid, sizeof(ent->fsid) + uniq_len,
- NULL, 0,
- fsc, 0, true);
+ WARN_ON_ONCE(ci->fscache);
- if (fsc->fscache) {
- ent->fscache = fsc->fscache;
- list_add_tail(&ent->list, &ceph_fscache_list);
- } else {
- kfree(ent);
- errorfc(fc, "unable to register fscache cookie for fsid %pU",
- fsid);
- /* all other fs ignore this error */
- }
-out_unlock:
- mutex_unlock(&ceph_fscache_lock);
- return err;
+ ci->fscache = fscache_acquire_cookie(fsc->fscache, 0,
+ &ci->i_vino, sizeof(ci->i_vino),
+ &ci->i_version, sizeof(ci->i_version),
+ i_size_read(inode));
}
-static enum fscache_checkaux ceph_fscache_inode_check_aux(
- void *cookie_netfs_data, const void *data, uint16_t dlen,
- loff_t object_size)
+void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info* ci)
{
- struct ceph_inode_info* ci = cookie_netfs_data;
- struct inode* inode = &ci->vfs_inode;
+ struct fscache_cookie *cookie = ci->fscache;
- if (dlen != sizeof(ci->i_version) ||
- i_size_read(inode) != object_size)
- return FSCACHE_CHECKAUX_OBSOLETE;
+ fscache_relinquish_cookie(cookie, false);
+}
- if (*(u64 *)data != ci->i_version)
- return FSCACHE_CHECKAUX_OBSOLETE;
+void ceph_fscache_use_cookie(struct inode *inode, bool will_modify)
+{
+ struct ceph_inode_info *ci = ceph_inode(inode);
- dout("ceph inode 0x%p cached okay\n", ci);
- return FSCACHE_CHECKAUX_OKAY;
+ fscache_use_cookie(ci->fscache, will_modify);
}
-static const struct fscache_cookie_def ceph_fscache_inode_object_def = {
- .name = "CEPH.inode",
- .type = FSCACHE_COOKIE_TYPE_DATAFILE,
- .check_aux = ceph_fscache_inode_check_aux,
-};
-
-void ceph_fscache_register_inode_cookie(struct inode *inode)
+void ceph_fscache_unuse_cookie(struct inode *inode, bool update)
{
struct ceph_inode_info *ci = ceph_inode(inode);
- struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
-
- /* No caching for filesystem */
- if (!fsc->fscache)
- return;
- /* Only cache for regular files that are read only */
- if (!S_ISREG(inode->i_mode))
- return;
+ if (update) {
+ loff_t i_size = i_size_read(inode);
- inode_lock_nested(inode, I_MUTEX_CHILD);
- if (!ci->fscache) {
- ci->fscache = fscache_acquire_cookie(fsc->fscache,
- &ceph_fscache_inode_object_def,
- &ci->i_vino, sizeof(ci->i_vino),
- &ci->i_version, sizeof(ci->i_version),
- ci, i_size_read(inode), false);
+ fscache_unuse_cookie(ci->fscache, &ci->i_version, &i_size);
+ } else {
+ fscache_unuse_cookie(ci->fscache, NULL, NULL);
}
- inode_unlock(inode);
}
-void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info* ci)
+void ceph_fscache_update(struct inode *inode)
{
- struct fscache_cookie* cookie;
-
- if ((cookie = ci->fscache) == NULL)
- return;
-
- ci->fscache = NULL;
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ loff_t i_size = i_size_read(inode);
- fscache_relinquish_cookie(cookie, &ci->i_vino, false);
+ fscache_update_cookie(ci->fscache, &ci->i_version, &i_size);
}
-static bool ceph_fscache_can_enable(void *data)
+void ceph_fscache_invalidate(struct inode *inode, bool dio_write)
{
- struct inode *inode = data;
- return !inode_is_open_for_write(inode);
+ struct ceph_inode_info *ci = ceph_inode(inode);
+
+ fscache_invalidate(ceph_inode(inode)->fscache,
+ &ci->i_version, i_size_read(inode),
+ dio_write ? FSCACHE_INVAL_DIO_WRITE : 0);
}
-void ceph_fscache_file_set_cookie(struct inode *inode, struct file *filp)
+int ceph_fscache_register_fs(struct ceph_fs_client* fsc, struct fs_context *fc)
{
- struct ceph_inode_info *ci = ceph_inode(inode);
+ const struct ceph_fsid *fsid = &fsc->client->fsid;
+ const char *fscache_uniq = fsc->mount_options->fscache_uniq;
+ size_t uniq_len = fscache_uniq ? strlen(fscache_uniq) : 0;
+ char *name;
+ int err = 0;
- if (!fscache_cookie_valid(ci->fscache))
- return;
+ name = kasprintf(GFP_KERNEL, "ceph,%pU%s%s", fsid, uniq_len ? "," : "",
+ uniq_len ? fscache_uniq : "");
+ if (!name)
+ return -ENOMEM;
- if (inode_is_open_for_write(inode)) {
- dout("fscache_file_set_cookie %p %p disabling cache\n",
- inode, filp);
- fscache_disable_cookie(ci->fscache, &ci->i_vino, false);
- } else {
- fscache_enable_cookie(ci->fscache, &ci->i_vino, i_size_read(inode),
- ceph_fscache_can_enable, inode);
- if (fscache_cookie_enabled(ci->fscache)) {
- dout("fscache_file_set_cookie %p %p enabling cache\n",
- inode, filp);
- }
+ fsc->fscache = fscache_acquire_volume(name, NULL, NULL, 0);
+ if (IS_ERR_OR_NULL(fsc->fscache)) {
+ errorfc(fc, "Unable to register fscache cookie for %s", name);
+ err = fsc->fscache ? PTR_ERR(fsc->fscache) : -EOPNOTSUPP;
+ fsc->fscache = NULL;
}
+ kfree(name);
+ return err;
}
void ceph_fscache_unregister_fs(struct ceph_fs_client* fsc)
{
- if (fscache_cookie_valid(fsc->fscache)) {
- struct ceph_fscache_entry *ent;
- bool found = false;
-
- mutex_lock(&ceph_fscache_lock);
- list_for_each_entry(ent, &ceph_fscache_list, list) {
- if (ent->fscache == fsc->fscache) {
- list_del(&ent->list);
- kfree(ent);
- found = true;
- break;
- }
- }
- WARN_ON_ONCE(!found);
- mutex_unlock(&ceph_fscache_lock);
-
- __fscache_relinquish_cookie(fsc->fscache, NULL, false);
- }
- fsc->fscache = NULL;
+ fscache_relinquish_volume(fsc->fscache, NULL, false);
}
diff --git a/fs/ceph/cache.h b/fs/ceph/cache.h
index 058ea2a04376..09164389fa66 100644
--- a/fs/ceph/cache.h
+++ b/fs/ceph/cache.h
@@ -12,19 +12,19 @@
#include <linux/netfs.h>
#ifdef CONFIG_CEPH_FSCACHE
-
-extern struct fscache_netfs ceph_cache_netfs;
-
-int ceph_fscache_register(void);
-void ceph_fscache_unregister(void);
+#include <linux/fscache.h>
int ceph_fscache_register_fs(struct ceph_fs_client* fsc, struct fs_context *fc);
void ceph_fscache_unregister_fs(struct ceph_fs_client* fsc);
void ceph_fscache_register_inode_cookie(struct inode *inode);
void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info* ci);
-void ceph_fscache_file_set_cookie(struct inode *inode, struct file *filp);
-void ceph_fscache_revalidate_cookie(struct ceph_inode_info *ci);
+
+void ceph_fscache_use_cookie(struct inode *inode, bool will_modify);
+void ceph_fscache_unuse_cookie(struct inode *inode, bool update);
+
+void ceph_fscache_update(struct inode *inode);
+void ceph_fscache_invalidate(struct inode *inode, bool dio_write);
static inline void ceph_fscache_inode_init(struct ceph_inode_info *ci)
{
@@ -36,37 +36,51 @@ static inline struct fscache_cookie *ceph_fscache_cookie(struct ceph_inode_info
return ci->fscache;
}
-static inline void ceph_fscache_invalidate(struct inode *inode)
+static inline void ceph_fscache_resize(struct inode *inode, loff_t to)
{
- fscache_invalidate(ceph_inode(inode)->fscache);
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ struct fscache_cookie *cookie = ceph_fscache_cookie(ci);
+
+ if (cookie) {
+ ceph_fscache_use_cookie(inode, true);
+ fscache_resize_cookie(cookie, to);
+ ceph_fscache_unuse_cookie(inode, true);
+ }
}
-static inline bool ceph_is_cache_enabled(struct inode *inode)
+static inline void ceph_fscache_unpin_writeback(struct inode *inode,
+ struct writeback_control *wbc)
{
- struct fscache_cookie *cookie = ceph_fscache_cookie(ceph_inode(inode));
+ fscache_unpin_writeback(wbc, ceph_fscache_cookie(ceph_inode(inode)));
+}
+
+static inline int ceph_fscache_set_page_dirty(struct page *page)
+{
+ struct inode *inode = page->mapping->host;
+ struct ceph_inode_info *ci = ceph_inode(inode);
- if (!cookie)
- return false;
- return fscache_cookie_enabled(cookie);
+ return fscache_set_page_dirty(page, ceph_fscache_cookie(ci));
}
static inline int ceph_begin_cache_operation(struct netfs_read_request *rreq)
{
struct fscache_cookie *cookie = ceph_fscache_cookie(ceph_inode(rreq->inode));
- return fscache_begin_read_operation(rreq, cookie);
+ return fscache_begin_read_operation(&rreq->cache_resources, cookie);
}
-#else
-static inline int ceph_fscache_register(void)
+static inline bool ceph_is_cache_enabled(struct inode *inode)
{
- return 0;
+ return fscache_cookie_enabled(ceph_fscache_cookie(ceph_inode(inode)));
}
-static inline void ceph_fscache_unregister(void)
+static inline void ceph_fscache_note_page_release(struct inode *inode)
{
-}
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ fscache_note_page_release(ceph_fscache_cookie(ci));
+}
+#else /* CONFIG_CEPH_FSCACHE */
static inline int ceph_fscache_register_fs(struct ceph_fs_client* fsc,
struct fs_context *fc)
{
@@ -81,28 +95,49 @@ static inline void ceph_fscache_inode_init(struct ceph_inode_info *ci)
{
}
-static inline struct fscache_cookie *ceph_fscache_cookie(struct ceph_inode_info *ci)
+static inline void ceph_fscache_register_inode_cookie(struct inode *inode)
{
- return NULL;
}
-static inline void ceph_fscache_register_inode_cookie(struct inode *inode)
+static inline void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info* ci)
{
}
-static inline void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info* ci)
+static inline void ceph_fscache_use_cookie(struct inode *inode, bool will_modify)
{
}
-static inline void ceph_fscache_file_set_cookie(struct inode *inode,
- struct file *filp)
+static inline void ceph_fscache_unuse_cookie(struct inode *inode, bool update)
{
}
-static inline void ceph_fscache_invalidate(struct inode *inode)
+static inline void ceph_fscache_update(struct inode *inode)
{
}
+static inline void ceph_fscache_invalidate(struct inode *inode, bool dio_write)
+{
+}
+
+static inline struct fscache_cookie *ceph_fscache_cookie(struct ceph_inode_info *ci)
+{
+ return NULL;
+}
+
+static inline void ceph_fscache_resize(struct inode *inode, loff_t to)
+{
+}
+
+static inline void ceph_fscache_unpin_writeback(struct inode *inode,
+ struct writeback_control *wbc)
+{
+}
+
+static inline int ceph_fscache_set_page_dirty(struct page *page)
+{
+ return __set_page_dirty_nobuffers(page);
+}
+
static inline bool ceph_is_cache_enabled(struct inode *inode)
{
return false;
@@ -112,6 +147,10 @@ static inline int ceph_begin_cache_operation(struct netfs_read_request *rreq)
{
return -ENOBUFS;
}
-#endif
-#endif /* _CEPH_CACHE_H */
+static inline void ceph_fscache_note_page_release(struct inode *inode)
+{
+}
+#endif /* CONFIG_CEPH_FSCACHE */
+
+#endif
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index c447fa2e2d1f..b472cd066d1c 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -1856,7 +1856,7 @@ static int try_nonblocking_invalidate(struct inode *inode)
u32 invalidating_gen = ci->i_rdcache_gen;
spin_unlock(&ci->i_ceph_lock);
- ceph_fscache_invalidate(inode);
+ ceph_fscache_invalidate(inode, false);
invalidate_mapping_pages(&inode->i_data, 0, -1);
spin_lock(&ci->i_ceph_lock);
@@ -2218,6 +2218,7 @@ static int unsafe_request_wait(struct inode *inode)
struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_mds_request *req1 = NULL, *req2 = NULL;
+ unsigned int max_sessions;
int ret, err = 0;
spin_lock(&ci->i_unsafe_lock);
@@ -2236,36 +2237,44 @@ static int unsafe_request_wait(struct inode *inode)
spin_unlock(&ci->i_unsafe_lock);
/*
+ * The mdsc->max_sessions is unlikely to be changed
+ * mostly, here we will retry it by reallocating the
+ * sessions array memory to get rid of the mdsc->mutex
+ * lock.
+ */
+retry:
+ max_sessions = mdsc->max_sessions;
+
+ /*
* Trigger to flush the journal logs in all the relevant MDSes
* manually, or in the worst case we must wait at most 5 seconds
* to wait the journal logs to be flushed by the MDSes periodically.
*/
- if (req1 || req2) {
+ if ((req1 || req2) && likely(max_sessions)) {
struct ceph_mds_session **sessions = NULL;
struct ceph_mds_session *s;
struct ceph_mds_request *req;
- unsigned int max;
int i;
- /*
- * The mdsc->max_sessions is unlikely to be changed
- * mostly, here we will retry it by reallocating the
- * sessions arrary memory to get rid of the mdsc->mutex
- * lock.
- */
-retry:
- max = mdsc->max_sessions;
- sessions = krealloc(sessions, max * sizeof(s), __GFP_ZERO);
- if (!sessions)
- return -ENOMEM;
+ sessions = kzalloc(max_sessions * sizeof(s), GFP_KERNEL);
+ if (!sessions) {
+ err = -ENOMEM;
+ goto out;
+ }
spin_lock(&ci->i_unsafe_lock);
if (req1) {
list_for_each_entry(req, &ci->i_unsafe_dirops,
r_unsafe_dir_item) {
s = req->r_session;
- if (unlikely(s->s_mds >= max)) {
+ if (unlikely(s->s_mds >= max_sessions)) {
spin_unlock(&ci->i_unsafe_lock);
+ for (i = 0; i < max_sessions; i++) {
+ s = sessions[i];
+ if (s)
+ ceph_put_mds_session(s);
+ }
+ kfree(sessions);
goto retry;
}
if (!sessions[s->s_mds]) {
@@ -2278,8 +2287,14 @@ retry:
list_for_each_entry(req, &ci->i_unsafe_iops,
r_unsafe_target_item) {
s = req->r_session;
- if (unlikely(s->s_mds >= max)) {
+ if (unlikely(s->s_mds >= max_sessions)) {
spin_unlock(&ci->i_unsafe_lock);
+ for (i = 0; i < max_sessions; i++) {
+ s = sessions[i];
+ if (s)
+ ceph_put_mds_session(s);
+ }
+ kfree(sessions);
goto retry;
}
if (!sessions[s->s_mds]) {
@@ -2300,7 +2315,7 @@ retry:
spin_unlock(&ci->i_ceph_lock);
/* send flush mdlog request to MDSes */
- for (i = 0; i < max; i++) {
+ for (i = 0; i < max_sessions; i++) {
s = sessions[i];
if (s) {
send_flush_mdlog(s);
@@ -2317,15 +2332,19 @@ retry:
ceph_timeout_jiffies(req1->r_timeout));
if (ret)
err = -EIO;
- ceph_mdsc_put_request(req1);
}
if (req2) {
ret = !wait_for_completion_timeout(&req2->r_safe_completion,
ceph_timeout_jiffies(req2->r_timeout));
if (ret)
err = -EIO;
- ceph_mdsc_put_request(req2);
}
+
+out:
+ if (req1)
+ ceph_mdsc_put_request(req1);
+ if (req2)
+ ceph_mdsc_put_request(req2);
return err;
}
@@ -2388,6 +2407,7 @@ int ceph_write_inode(struct inode *inode, struct writeback_control *wbc)
int wait = (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync);
dout("write_inode %p wait=%d\n", inode, wait);
+ ceph_fscache_unpin_writeback(inode, wbc);
if (wait) {
dirty = try_flush_caps(inode, &flush_tid);
if (dirty)
@@ -3375,8 +3395,7 @@ static void handle_cap_grant(struct inode *inode,
if ((newcaps & CEPH_CAP_LINK_SHARED) &&
(extra_info->issued & CEPH_CAP_LINK_EXCL) == 0) {
set_nlink(inode, le32_to_cpu(grant->nlink));
- if (inode->i_nlink == 0 &&
- (newcaps & (CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL)))
+ if (inode->i_nlink == 0)
deleted_inode = true;
}
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index c138e8126286..bbed3224ad68 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -204,6 +204,8 @@ static int ceph_init_file_info(struct inode *inode, struct file *file,
int fmode, bool isdir)
{
struct ceph_inode_info *ci = ceph_inode(inode);
+ struct ceph_mount_options *opt =
+ ceph_inode_to_client(&ci->vfs_inode)->mount_options;
struct ceph_file_info *fi;
dout("%s %p %p 0%o (%s)\n", __func__, inode, file,
@@ -225,6 +227,9 @@ static int ceph_init_file_info(struct inode *inode, struct file *file,
if (!fi)
return -ENOMEM;
+ if (opt->flags & CEPH_MOUNT_OPT_NOPAGECACHE)
+ fi->flags |= CEPH_F_SYNC;
+
file->private_data = fi;
}
@@ -248,8 +253,7 @@ static int ceph_init_file(struct inode *inode, struct file *file, int fmode)
switch (inode->i_mode & S_IFMT) {
case S_IFREG:
- ceph_fscache_register_inode_cookie(inode);
- ceph_fscache_file_set_cookie(inode, file);
+ ceph_fscache_use_cookie(inode, file->f_mode & FMODE_WRITE);
fallthrough;
case S_IFDIR:
ret = ceph_init_file_info(inode, file, fmode,
@@ -579,6 +583,7 @@ static int ceph_finish_async_create(struct inode *dir, struct dentry *dentry,
struct ceph_inode_info *ci = ceph_inode(dir);
struct inode *inode;
struct timespec64 now;
+ struct ceph_string *pool_ns;
struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(dir->i_sb);
struct ceph_vino vino = { .ino = req->r_deleg_ino,
.snap = CEPH_NOSNAP };
@@ -628,6 +633,12 @@ static int ceph_finish_async_create(struct inode *dir, struct dentry *dentry,
in.max_size = cpu_to_le64(lo->stripe_unit);
ceph_file_layout_to_legacy(lo, &in.layout);
+ /* lo is private, so pool_ns can't change */
+ pool_ns = rcu_dereference_raw(lo->pool_ns);
+ if (pool_ns) {
+ iinfo.pool_ns_len = pool_ns->len;
+ iinfo.pool_ns_data = pool_ns->str;
+ }
down_read(&mdsc->snap_rwsem);
ret = ceph_fill_inode(inode, NULL, &iinfo, NULL, req->r_session,
@@ -746,8 +757,10 @@ retry:
restore_deleg_ino(dir, req->r_deleg_ino);
ceph_mdsc_put_request(req);
try_async = false;
+ ceph_put_string(rcu_dereference_raw(lo.pool_ns));
goto retry;
}
+ ceph_put_string(rcu_dereference_raw(lo.pool_ns));
goto out_req;
}
}
@@ -822,6 +835,7 @@ int ceph_release(struct inode *inode, struct file *file)
dout("release inode %p regular file %p\n", inode, file);
WARN_ON(!list_empty(&fi->rw_contexts));
+ ceph_fscache_unuse_cookie(inode, file->f_mode & FMODE_WRITE);
ceph_put_fmode(ci, fi->fmode, 1);
kmem_cache_free(ceph_file_cachep, fi);
@@ -1218,7 +1232,11 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
snapc, snapc ? snapc->seq : 0);
if (write) {
- int ret2 = invalidate_inode_pages2_range(inode->i_mapping,
+ int ret2;
+
+ ceph_fscache_invalidate(inode, true);
+
+ ret2 = invalidate_inode_pages2_range(inode->i_mapping,
pos >> PAGE_SHIFT,
(pos + count - 1) >> PAGE_SHIFT);
if (ret2 < 0)
@@ -1429,6 +1447,7 @@ ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
if (ret < 0)
return ret;
+ ceph_fscache_invalidate(inode, false);
ret = invalidate_inode_pages2_range(inode->i_mapping,
pos >> PAGE_SHIFT,
(pos + count - 1) >> PAGE_SHIFT);
@@ -1536,7 +1555,7 @@ static ssize_t ceph_read_iter(struct kiocb *iocb, struct iov_iter *to)
struct ceph_inode_info *ci = ceph_inode(inode);
bool direct_lock = iocb->ki_flags & IOCB_DIRECT;
ssize_t ret;
- int want, got = 0;
+ int want = 0, got = 0;
int retry_op = 0, read = 0;
again:
@@ -1551,13 +1570,14 @@ again:
else
ceph_start_io_read(inode);
+ if (!(fi->flags & CEPH_F_SYNC) && !direct_lock)
+ want |= CEPH_CAP_FILE_CACHE;
if (fi->fmode & CEPH_FILE_MODE_LAZY)
- want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
- else
- want = CEPH_CAP_FILE_CACHE;
+ want |= CEPH_CAP_FILE_LAZYIO;
+
ret = ceph_get_caps(filp, CEPH_CAP_FILE_RD, want, -1, &got);
if (ret < 0) {
- if (iocb->ki_flags & IOCB_DIRECT)
+ if (direct_lock)
ceph_end_io_direct(inode);
else
ceph_end_io_read(inode);
@@ -1691,7 +1711,7 @@ static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
struct ceph_osd_client *osdc = &fsc->client->osdc;
struct ceph_cap_flush *prealloc_cf;
ssize_t count, written = 0;
- int err, want, got;
+ int err, want = 0, got;
bool direct_lock = false;
u32 map_flags;
u64 pool_flags;
@@ -1766,10 +1786,10 @@ retry_snap:
dout("aio_write %p %llx.%llx %llu~%zd getting caps. i_size %llu\n",
inode, ceph_vinop(inode), pos, count, i_size_read(inode));
+ if (!(fi->flags & CEPH_F_SYNC) && !direct_lock)
+ want |= CEPH_CAP_FILE_BUFFER;
if (fi->fmode & CEPH_FILE_MODE_LAZY)
- want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
- else
- want = CEPH_CAP_FILE_BUFFER;
+ want |= CEPH_CAP_FILE_LAZYIO;
got = 0;
err = ceph_get_caps(file, CEPH_CAP_FILE_WR, want, pos + count, &got);
if (err < 0)
@@ -2113,6 +2133,7 @@ static long ceph_fallocate(struct file *file, int mode,
goto unlock;
filemap_invalidate_lock(inode->i_mapping);
+ ceph_fscache_invalidate(inode, false);
ceph_zero_pagecache_range(inode, offset, length);
ret = ceph_zero_objects(inode, offset, length);
@@ -2437,6 +2458,7 @@ static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off,
goto out_caps;
/* Drop dst file cached pages */
+ ceph_fscache_invalidate(dst_inode, false);
ret = invalidate_inode_pages2_range(dst_inode->i_mapping,
dst_off >> PAGE_SHIFT,
(dst_off + len) >> PAGE_SHIFT);
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index e3322fcb2e8d..ef4a980a7bf3 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -564,6 +564,8 @@ void ceph_evict_inode(struct inode *inode)
percpu_counter_dec(&mdsc->metric.total_inodes);
truncate_inode_pages_final(&inode->i_data);
+ if (inode->i_state & I_PINNING_FSCACHE_WB)
+ ceph_fscache_unuse_cookie(inode, true);
clear_inode(inode);
ceph_fscache_unregister_inode_cookie(ci);
@@ -634,6 +636,12 @@ int ceph_fill_file_size(struct inode *inode, int issued,
}
i_size_write(inode, size);
inode->i_blocks = calc_inode_blocks(size);
+ /*
+ * If we're expanding, then we should be able to just update
+ * the existing cookie.
+ */
+ if (size > isize)
+ ceph_fscache_update(inode);
ci->i_reported_size = size;
if (truncate_seq != ci->i_truncate_seq) {
dout("truncate_seq %u -> %u\n",
@@ -666,10 +674,6 @@ int ceph_fill_file_size(struct inode *inode, int issued,
truncate_size);
ci->i_truncate_size = truncate_size;
}
-
- if (queue_trunc)
- ceph_fscache_invalidate(inode);
-
return queue_trunc;
}
@@ -1053,6 +1057,8 @@ int ceph_fill_inode(struct inode *inode, struct page *locked_page,
spin_unlock(&ci->i_ceph_lock);
+ ceph_fscache_register_inode_cookie(inode);
+
if (fill_inline)
ceph_fill_inline_data(inode, locked_page,
iinfo->inline_data, iinfo->inline_len);
@@ -1814,11 +1820,13 @@ bool ceph_inode_set_size(struct inode *inode, loff_t size)
spin_lock(&ci->i_ceph_lock);
dout("set_size %p %llu -> %llu\n", inode, i_size_read(inode), size);
i_size_write(inode, size);
+ ceph_fscache_update(inode);
inode->i_blocks = calc_inode_blocks(size);
ret = __ceph_should_report_size(ci);
spin_unlock(&ci->i_ceph_lock);
+
return ret;
}
@@ -1844,6 +1852,8 @@ static void ceph_do_invalidate_pages(struct inode *inode)
u32 orig_gen;
int check = 0;
+ ceph_fscache_invalidate(inode, false);
+
mutex_lock(&ci->i_truncate_mutex);
if (ceph_inode_is_shutdown(inode)) {
@@ -1868,7 +1878,7 @@ static void ceph_do_invalidate_pages(struct inode *inode)
orig_gen = ci->i_rdcache_gen;
spin_unlock(&ci->i_ceph_lock);
- ceph_fscache_invalidate(inode);
+ ceph_fscache_invalidate(inode, false);
if (invalidate_inode_pages2(inode->i_mapping) < 0) {
pr_err("invalidate_inode_pages2 %llx.%llx failed\n",
ceph_vinop(inode));
@@ -1937,6 +1947,7 @@ retry:
ci->i_truncate_pending, to);
spin_unlock(&ci->i_ceph_lock);
+ ceph_fscache_resize(inode, to);
truncate_pagecache(inode, to);
spin_lock(&ci->i_ceph_lock);
@@ -2184,7 +2195,6 @@ int __ceph_setattr(struct inode *inode, struct iattr *attr)
if (inode_dirty_flags)
__mark_inode_dirty(inode, inode_dirty_flags);
-
if (mask) {
req->r_inode = inode;
ihold(inode);
diff --git a/fs/ceph/metric.c b/fs/ceph/metric.c
index c57699d8408d..0fcba68f9a99 100644
--- a/fs/ceph/metric.c
+++ b/fs/ceph/metric.c
@@ -160,8 +160,6 @@ static bool ceph_mdsc_send_metrics(struct ceph_mds_client *mdsc,
msg->hdr.version = cpu_to_le16(1);
msg->hdr.compat_version = cpu_to_le16(1);
msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
- dout("client%llu send metrics to mds%d\n",
- ceph_client_gid(mdsc->fsc->client), s->s_mds);
ceph_con_send(&s->s_con, msg);
return true;
diff --git a/fs/ceph/quota.c b/fs/ceph/quota.c
index 620c691af40e..a338a3ec0dc4 100644
--- a/fs/ceph/quota.c
+++ b/fs/ceph/quota.c
@@ -30,6 +30,9 @@ static inline bool ceph_has_realms_with_quotas(struct inode *inode)
/* if root is the real CephFS root, we don't have quota realms */
if (root && ceph_ino(root) == CEPH_INO_ROOT)
return false;
+ /* MDS stray dirs have no quota realms */
+ if (ceph_vino_is_reserved(ceph_inode(inode)->i_vino))
+ return false;
/* otherwise, we can't know for sure */
return true;
}
@@ -494,10 +497,24 @@ bool ceph_quota_update_statfs(struct ceph_fs_client *fsc, struct kstatfs *buf)
if (ci->i_max_bytes) {
total = ci->i_max_bytes >> CEPH_BLOCK_SHIFT;
used = ci->i_rbytes >> CEPH_BLOCK_SHIFT;
+ /* For quota size less than 4MB, use 4KB block size */
+ if (!total) {
+ total = ci->i_max_bytes >> CEPH_4K_BLOCK_SHIFT;
+ used = ci->i_rbytes >> CEPH_4K_BLOCK_SHIFT;
+ buf->f_frsize = 1 << CEPH_4K_BLOCK_SHIFT;
+ }
/* It is possible for a quota to be exceeded.
* Report 'zero' in that case
*/
free = total > used ? total - used : 0;
+ /* For quota size less than 4KB, report the
+ * total=used=4KB,free=0 when quota is full
+ * and total=free=4KB, used=0 otherwise */
+ if (!total) {
+ total = 1;
+ free = ci->i_max_bytes > ci->i_rbytes ? 1 : 0;
+ buf->f_frsize = 1 << CEPH_4K_BLOCK_SHIFT;
+ }
}
spin_unlock(&ci->i_ceph_lock);
if (total) {
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index bab61232dc5a..bf79f369aec6 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -27,6 +27,8 @@
#include <linux/ceph/auth.h>
#include <linux/ceph/debugfs.h>
+#include <uapi/linux/magic.h>
+
static DEFINE_SPINLOCK(ceph_fsc_lock);
static LIST_HEAD(ceph_fsc_list);
@@ -146,6 +148,7 @@ enum {
Opt_mds_namespace,
Opt_recover_session,
Opt_source,
+ Opt_mon_addr,
/* string args above */
Opt_dirstat,
Opt_rbytes,
@@ -159,6 +162,7 @@ enum {
Opt_quotadf,
Opt_copyfrom,
Opt_wsync,
+ Opt_pagecache,
};
enum ceph_recover_session_mode {
@@ -197,8 +201,10 @@ static const struct fs_parameter_spec ceph_mount_parameters[] = {
fsparam_u32 ("rsize", Opt_rsize),
fsparam_string ("snapdirname", Opt_snapdirname),
fsparam_string ("source", Opt_source),
+ fsparam_string ("mon_addr", Opt_mon_addr),
fsparam_u32 ("wsize", Opt_wsize),
fsparam_flag_no ("wsync", Opt_wsync),
+ fsparam_flag_no ("pagecache", Opt_pagecache),
{}
};
@@ -228,9 +234,92 @@ static void canonicalize_path(char *path)
}
/*
- * Parse the source parameter. Distinguish the server list from the path.
+ * Check if the mds namespace in ceph_mount_options matches
+ * the passed in namespace string. First time match (when
+ * ->mds_namespace is NULL) is treated specially, since
+ * ->mds_namespace needs to be initialized by the caller.
+ */
+static int namespace_equals(struct ceph_mount_options *fsopt,
+ const char *namespace, size_t len)
+{
+ return !(fsopt->mds_namespace &&
+ (strlen(fsopt->mds_namespace) != len ||
+ strncmp(fsopt->mds_namespace, namespace, len)));
+}
+
+static int ceph_parse_old_source(const char *dev_name, const char *dev_name_end,
+ struct fs_context *fc)
+{
+ int r;
+ struct ceph_parse_opts_ctx *pctx = fc->fs_private;
+ struct ceph_mount_options *fsopt = pctx->opts;
+
+ if (*dev_name_end != ':')
+ return invalfc(fc, "separator ':' missing in source");
+
+ r = ceph_parse_mon_ips(dev_name, dev_name_end - dev_name,
+ pctx->copts, fc->log.log, ',');
+ if (r)
+ return r;
+
+ fsopt->new_dev_syntax = false;
+ return 0;
+}
+
+static int ceph_parse_new_source(const char *dev_name, const char *dev_name_end,
+ struct fs_context *fc)
+{
+ size_t len;
+ struct ceph_fsid fsid;
+ struct ceph_parse_opts_ctx *pctx = fc->fs_private;
+ struct ceph_mount_options *fsopt = pctx->opts;
+ char *fsid_start, *fs_name_start;
+
+ if (*dev_name_end != '=') {
+ dout("separator '=' missing in source");
+ return -EINVAL;
+ }
+
+ fsid_start = strchr(dev_name, '@');
+ if (!fsid_start)
+ return invalfc(fc, "missing cluster fsid");
+ ++fsid_start; /* start of cluster fsid */
+
+ fs_name_start = strchr(fsid_start, '.');
+ if (!fs_name_start)
+ return invalfc(fc, "missing file system name");
+
+ if (ceph_parse_fsid(fsid_start, &fsid))
+ return invalfc(fc, "Invalid FSID");
+
+ ++fs_name_start; /* start of file system name */
+ len = dev_name_end - fs_name_start;
+
+ if (!namespace_equals(fsopt, fs_name_start, len))
+ return invalfc(fc, "Mismatching mds_namespace");
+ kfree(fsopt->mds_namespace);
+ fsopt->mds_namespace = kstrndup(fs_name_start, len, GFP_KERNEL);
+ if (!fsopt->mds_namespace)
+ return -ENOMEM;
+ dout("file system (mds namespace) '%s'\n", fsopt->mds_namespace);
+
+ fsopt->new_dev_syntax = true;
+ return 0;
+}
+
+/*
+ * Parse the source parameter for new device format. Distinguish the device
+ * spec from the path. Try parsing new device format and fallback to old
+ * format if needed.
*
- * The source will look like:
+ * New device syntax will looks like:
+ * <device_spec>=/<path>
+ * where
+ * <device_spec> is name@fsid.fsname
+ * <path> is optional, but if present must begin with '/'
+ * (monitor addresses are passed via mount option)
+ *
+ * Old device syntax is:
* <server_spec>[,<server_spec>...]:[<path>]
* where
* <server_spec> is <ip>[:<port>]
@@ -263,24 +352,44 @@ static int ceph_parse_source(struct fs_parameter *param, struct fs_context *fc)
dev_name_end = dev_name + strlen(dev_name);
}
- dev_name_end--; /* back up to ':' separator */
- if (dev_name_end < dev_name || *dev_name_end != ':')
- return invalfc(fc, "No path or : separator in source");
+ dev_name_end--; /* back up to separator */
+ if (dev_name_end < dev_name)
+ return invalfc(fc, "Path missing in source");
dout("device name '%.*s'\n", (int)(dev_name_end - dev_name), dev_name);
if (fsopt->server_path)
dout("server path '%s'\n", fsopt->server_path);
- ret = ceph_parse_mon_ips(param->string, dev_name_end - dev_name,
- pctx->copts, fc->log.log);
- if (ret)
- return ret;
+ dout("trying new device syntax");
+ ret = ceph_parse_new_source(dev_name, dev_name_end, fc);
+ if (ret) {
+ if (ret != -EINVAL)
+ return ret;
+ dout("trying old device syntax");
+ ret = ceph_parse_old_source(dev_name, dev_name_end, fc);
+ if (ret)
+ return ret;
+ }
fc->source = param->string;
param->string = NULL;
return 0;
}
+static int ceph_parse_mon_addr(struct fs_parameter *param,
+ struct fs_context *fc)
+{
+ struct ceph_parse_opts_ctx *pctx = fc->fs_private;
+ struct ceph_mount_options *fsopt = pctx->opts;
+
+ kfree(fsopt->mon_addr);
+ fsopt->mon_addr = param->string;
+ param->string = NULL;
+
+ return ceph_parse_mon_ips(fsopt->mon_addr, strlen(fsopt->mon_addr),
+ pctx->copts, fc->log.log, '/');
+}
+
static int ceph_parse_mount_param(struct fs_context *fc,
struct fs_parameter *param)
{
@@ -306,6 +415,8 @@ static int ceph_parse_mount_param(struct fs_context *fc,
param->string = NULL;
break;
case Opt_mds_namespace:
+ if (!namespace_equals(fsopt, param->string, strlen(param->string)))
+ return invalfc(fc, "Mismatching mds_namespace");
kfree(fsopt->mds_namespace);
fsopt->mds_namespace = param->string;
param->string = NULL;
@@ -323,6 +434,8 @@ static int ceph_parse_mount_param(struct fs_context *fc,
if (fc->source)
return invalfc(fc, "Multiple sources specified");
return ceph_parse_source(param, fc);
+ case Opt_mon_addr:
+ return ceph_parse_mon_addr(param, fc);
case Opt_wsize:
if (result.uint_32 < PAGE_SIZE ||
result.uint_32 > CEPH_MAX_WRITE_SIZE)
@@ -455,6 +568,12 @@ static int ceph_parse_mount_param(struct fs_context *fc,
else
fsopt->flags |= CEPH_MOUNT_OPT_ASYNC_DIROPS;
break;
+ case Opt_pagecache:
+ if (result.negated)
+ fsopt->flags |= CEPH_MOUNT_OPT_NOPAGECACHE;
+ else
+ fsopt->flags &= ~CEPH_MOUNT_OPT_NOPAGECACHE;
+ break;
default:
BUG();
}
@@ -474,6 +593,7 @@ static void destroy_mount_options(struct ceph_mount_options *args)
kfree(args->mds_namespace);
kfree(args->server_path);
kfree(args->fscache_uniq);
+ kfree(args->mon_addr);
kfree(args);
}
@@ -517,6 +637,10 @@ static int compare_mount_options(struct ceph_mount_options *new_fsopt,
if (ret)
return ret;
+ ret = strcmp_null(fsopt1->mon_addr, fsopt2->mon_addr);
+ if (ret)
+ return ret;
+
return ceph_compare_options(new_opt, fsc->client);
}
@@ -572,15 +696,22 @@ static int ceph_show_options(struct seq_file *m, struct dentry *root)
if ((fsopt->flags & CEPH_MOUNT_OPT_NOCOPYFROM) == 0)
seq_puts(m, ",copyfrom");
- if (fsopt->mds_namespace)
+ /* dump mds_namespace when old device syntax is in use */
+ if (fsopt->mds_namespace && !fsopt->new_dev_syntax)
seq_show_option(m, "mds_namespace", fsopt->mds_namespace);
+ if (fsopt->mon_addr)
+ seq_printf(m, ",mon_addr=%s", fsopt->mon_addr);
+
if (fsopt->flags & CEPH_MOUNT_OPT_CLEANRECOVER)
seq_show_option(m, "recover_session", "clean");
if (!(fsopt->flags & CEPH_MOUNT_OPT_ASYNC_DIROPS))
seq_puts(m, ",wsync");
+ if (fsopt->flags & CEPH_MOUNT_OPT_NOPAGECACHE)
+ seq_puts(m, ",nopagecache");
+
if (fsopt->wsize != CEPH_MAX_WRITE_SIZE)
seq_printf(m, ",wsize=%u", fsopt->wsize);
if (fsopt->rsize != CEPH_MAX_READ_SIZE)
@@ -787,16 +918,10 @@ static int __init init_caches(void)
if (!ceph_wb_pagevec_pool)
goto bad_pagevec_pool;
- error = ceph_fscache_register();
- if (error)
- goto bad_fscache;
-
return 0;
-bad_fscache:
- kmem_cache_destroy(ceph_mds_request_cachep);
bad_pagevec_pool:
- mempool_destroy(ceph_wb_pagevec_pool);
+ kmem_cache_destroy(ceph_mds_request_cachep);
bad_mds_req:
kmem_cache_destroy(ceph_dir_file_cachep);
bad_dir_file:
@@ -828,8 +953,6 @@ static void destroy_caches(void)
kmem_cache_destroy(ceph_dir_file_cachep);
kmem_cache_destroy(ceph_mds_request_cachep);
mempool_destroy(ceph_wb_pagevec_pool);
-
- ceph_fscache_unregister();
}
static void __ceph_umount_begin(struct ceph_fs_client *fsc)
@@ -1060,6 +1183,7 @@ static int ceph_setup_bdi(struct super_block *sb, struct ceph_fs_client *fsc)
static int ceph_get_tree(struct fs_context *fc)
{
struct ceph_parse_opts_ctx *pctx = fc->fs_private;
+ struct ceph_mount_options *fsopt = pctx->opts;
struct super_block *sb;
struct ceph_fs_client *fsc;
struct dentry *res;
@@ -1071,6 +1195,8 @@ static int ceph_get_tree(struct fs_context *fc)
if (!fc->source)
return invalfc(fc, "No source");
+ if (fsopt->new_dev_syntax && !fsopt->mon_addr)
+ return invalfc(fc, "No monitor address");
/* create client (which we may/may not use) */
fsc = create_fs_client(pctx->opts, pctx->copts);
@@ -1156,6 +1282,13 @@ static int ceph_reconfigure_fc(struct fs_context *fc)
else
ceph_clear_mount_opt(fsc, ASYNC_DIROPS);
+ if (strcmp_null(fsc->mount_options->mon_addr, fsopt->mon_addr)) {
+ kfree(fsc->mount_options->mon_addr);
+ fsc->mount_options->mon_addr = fsopt->mon_addr;
+ fsopt->mon_addr = NULL;
+ pr_notice("ceph: monitor addresses recorded, but not used for reconnection");
+ }
+
sync_filesystem(fc->root->d_sb);
return 0;
}
@@ -1333,6 +1466,14 @@ bool disable_send_metrics = false;
module_param_cb(disable_send_metrics, &param_ops_metrics, &disable_send_metrics, 0644);
MODULE_PARM_DESC(disable_send_metrics, "Enable sending perf metrics to ceph cluster (default: on)");
+/* for both v1 and v2 syntax */
+static bool mount_support = true;
+static const struct kernel_param_ops param_ops_mount_syntax = {
+ .get = param_get_bool,
+};
+module_param_cb(mount_syntax_v1, &param_ops_mount_syntax, &mount_support, 0444);
+module_param_cb(mount_syntax_v2, &param_ops_mount_syntax, &mount_support, 0444);
+
module_init(init_ceph);
module_exit(exit_ceph);
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index ac331aa07cfa..67f145e1ae7a 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -21,17 +21,14 @@
#include <linux/ceph/libceph.h>
#ifdef CONFIG_CEPH_FSCACHE
-#define FSCACHE_USE_NEW_IO_API
#include <linux/fscache.h>
#endif
-/* f_type in struct statfs */
-#define CEPH_SUPER_MAGIC 0x00c36400
-
/* large granularity for statfs utilization stats to facilitate
* large volume sizes on 32-bit machines. */
#define CEPH_BLOCK_SHIFT 22 /* 4 MB */
#define CEPH_BLOCK (1 << CEPH_BLOCK_SHIFT)
+#define CEPH_4K_BLOCK_SHIFT 12 /* 4 KB */
#define CEPH_MOUNT_OPT_CLEANRECOVER (1<<1) /* auto reonnect (clean mode) after blocklisted */
#define CEPH_MOUNT_OPT_DIRSTAT (1<<4) /* `cat dirname` for stats */
@@ -45,6 +42,7 @@
#define CEPH_MOUNT_OPT_NOQUOTADF (1<<13) /* no root dir quota in statfs */
#define CEPH_MOUNT_OPT_NOCOPYFROM (1<<14) /* don't use RADOS 'copy-from' op */
#define CEPH_MOUNT_OPT_ASYNC_DIROPS (1<<15) /* allow async directory ops */
+#define CEPH_MOUNT_OPT_NOPAGECACHE (1<<16) /* bypass pagecache altogether */
#define CEPH_MOUNT_OPT_DEFAULT \
(CEPH_MOUNT_OPT_DCACHE | \
@@ -89,6 +87,8 @@ struct ceph_mount_options {
unsigned int max_readdir; /* max readdir result (entries) */
unsigned int max_readdir_bytes; /* max readdir result (bytes) */
+ bool new_dev_syntax;
+
/*
* everything above this point can be memcmp'd; everything below
* is handled in compare_mount_options()
@@ -98,6 +98,7 @@ struct ceph_mount_options {
char *mds_namespace; /* default NULL */
char *server_path; /* default NULL (means "/") */
char *fscache_uniq; /* default NULL */
+ char *mon_addr;
};
struct ceph_fs_client {
@@ -135,7 +136,7 @@ struct ceph_fs_client {
#endif
#ifdef CONFIG_CEPH_FSCACHE
- struct fscache_cookie *fscache;
+ struct fscache_volume *fscache;
#endif
};
@@ -535,19 +536,23 @@ static inline int ceph_ino_compare(struct inode *inode, void *data)
*
* These come from src/mds/mdstypes.h in the ceph sources.
*/
-#define CEPH_MAX_MDS 0x100
-#define CEPH_NUM_STRAY 10
+#define CEPH_MAX_MDS 0x100
+#define CEPH_NUM_STRAY 10
#define CEPH_MDS_INO_MDSDIR_OFFSET (1 * CEPH_MAX_MDS)
+#define CEPH_MDS_INO_LOG_OFFSET (2 * CEPH_MAX_MDS)
#define CEPH_INO_SYSTEM_BASE ((6*CEPH_MAX_MDS) + (CEPH_MAX_MDS * CEPH_NUM_STRAY))
static inline bool ceph_vino_is_reserved(const struct ceph_vino vino)
{
- if (vino.ino < CEPH_INO_SYSTEM_BASE &&
- vino.ino >= CEPH_MDS_INO_MDSDIR_OFFSET) {
- WARN_RATELIMIT(1, "Attempt to access reserved inode number 0x%llx", vino.ino);
- return true;
- }
- return false;
+ if (vino.ino >= CEPH_INO_SYSTEM_BASE ||
+ vino.ino < CEPH_MDS_INO_MDSDIR_OFFSET)
+ return false;
+
+ /* Don't warn on mdsdirs */
+ WARN_RATELIMIT(vino.ino >= CEPH_MDS_INO_LOG_OFFSET,
+ "Attempt to access reserved inode number 0x%llx",
+ vino.ino);
+ return true;
}
static inline struct inode *ceph_find_inode(struct super_block *sb,
diff --git a/fs/cifs/Makefile b/fs/cifs/Makefile
index 87fcacdf3de7..cc8fdcb35b71 100644
--- a/fs/cifs/Makefile
+++ b/fs/cifs/Makefile
@@ -25,7 +25,7 @@ cifs-$(CONFIG_CIFS_DFS_UPCALL) += cifs_dfs_ref.o dfs_cache.o
cifs-$(CONFIG_CIFS_SWN_UPCALL) += netlink.o cifs_swn.o
-cifs-$(CONFIG_CIFS_FSCACHE) += fscache.o cache.o
+cifs-$(CONFIG_CIFS_FSCACHE) += fscache.o
cifs-$(CONFIG_CIFS_SMB_DIRECT) += smbdirect.o
diff --git a/fs/cifs/cache.c b/fs/cifs/cache.c
deleted file mode 100644
index 8be57aaedab6..000000000000
--- a/fs/cifs/cache.c
+++ /dev/null
@@ -1,105 +0,0 @@
-// SPDX-License-Identifier: LGPL-2.1
-/*
- * CIFS filesystem cache index structure definitions
- *
- * Copyright (c) 2010 Novell, Inc.
- * Authors(s): Suresh Jayaraman (sjayaraman@suse.de>
- *
- */
-#include "fscache.h"
-#include "cifs_debug.h"
-
-/*
- * CIFS filesystem definition for FS-Cache
- */
-struct fscache_netfs cifs_fscache_netfs = {
- .name = "cifs",
- .version = 0,
-};
-
-/*
- * Register CIFS for caching with FS-Cache
- */
-int cifs_fscache_register(void)
-{
- return fscache_register_netfs(&cifs_fscache_netfs);
-}
-
-/*
- * Unregister CIFS for caching
- */
-void cifs_fscache_unregister(void)
-{
- fscache_unregister_netfs(&cifs_fscache_netfs);
-}
-
-/*
- * Server object for FS-Cache
- */
-const struct fscache_cookie_def cifs_fscache_server_index_def = {
- .name = "CIFS.server",
- .type = FSCACHE_COOKIE_TYPE_INDEX,
-};
-
-static enum
-fscache_checkaux cifs_fscache_super_check_aux(void *cookie_netfs_data,
- const void *data,
- uint16_t datalen,
- loff_t object_size)
-{
- struct cifs_fscache_super_auxdata auxdata;
- const struct cifs_tcon *tcon = cookie_netfs_data;
-
- if (datalen != sizeof(auxdata))
- return FSCACHE_CHECKAUX_OBSOLETE;
-
- memset(&auxdata, 0, sizeof(auxdata));
- auxdata.resource_id = tcon->resource_id;
- auxdata.vol_create_time = tcon->vol_create_time;
- auxdata.vol_serial_number = tcon->vol_serial_number;
-
- if (memcmp(data, &auxdata, datalen) != 0)
- return FSCACHE_CHECKAUX_OBSOLETE;
-
- return FSCACHE_CHECKAUX_OKAY;
-}
-
-/*
- * Superblock object for FS-Cache
- */
-const struct fscache_cookie_def cifs_fscache_super_index_def = {
- .name = "CIFS.super",
- .type = FSCACHE_COOKIE_TYPE_INDEX,
- .check_aux = cifs_fscache_super_check_aux,
-};
-
-static enum
-fscache_checkaux cifs_fscache_inode_check_aux(void *cookie_netfs_data,
- const void *data,
- uint16_t datalen,
- loff_t object_size)
-{
- struct cifs_fscache_inode_auxdata auxdata;
- struct cifsInodeInfo *cifsi = cookie_netfs_data;
-
- if (datalen != sizeof(auxdata))
- return FSCACHE_CHECKAUX_OBSOLETE;
-
- memset(&auxdata, 0, sizeof(auxdata));
- auxdata.eof = cifsi->server_eof;
- auxdata.last_write_time_sec = cifsi->vfs_inode.i_mtime.tv_sec;
- auxdata.last_change_time_sec = cifsi->vfs_inode.i_ctime.tv_sec;
- auxdata.last_write_time_nsec = cifsi->vfs_inode.i_mtime.tv_nsec;
- auxdata.last_change_time_nsec = cifsi->vfs_inode.i_ctime.tv_nsec;
-
- if (memcmp(data, &auxdata, datalen) != 0)
- return FSCACHE_CHECKAUX_OBSOLETE;
-
- return FSCACHE_CHECKAUX_OKAY;
-}
-
-const struct fscache_cookie_def cifs_fscache_inode_object_def = {
- .name = "CIFS.uniqueid",
- .type = FSCACHE_COOKIE_TYPE_DATAFILE,
- .check_aux = cifs_fscache_inode_check_aux,
-};
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
index d282caf9f037..ea00e1a91250 100644
--- a/fs/cifs/cifs_debug.c
+++ b/fs/cifs/cifs_debug.c
@@ -416,11 +416,17 @@ skip_rdma:
from_kuid(&init_user_ns, ses->cred_uid));
spin_lock(&ses->chan_lock);
+ if (CIFS_CHAN_NEEDS_RECONNECT(ses, 0))
+ seq_puts(m, "\tPrimary channel: DISCONNECTED ");
+
if (ses->chan_count > 1) {
seq_printf(m, "\n\n\tExtra Channels: %zu ",
ses->chan_count-1);
- for (j = 1; j < ses->chan_count; j++)
+ for (j = 1; j < ses->chan_count; j++) {
cifs_dump_channel(m, j, &ses->chans[j]);
+ if (CIFS_CHAN_NEEDS_RECONNECT(ses, j))
+ seq_puts(m, "\tDISCONNECTED ");
+ }
}
spin_unlock(&ses->chan_lock);
diff --git a/fs/cifs/cifs_spnego.c b/fs/cifs/cifs_spnego.c
index 353bd0dd7026..342717bf1dc2 100644
--- a/fs/cifs/cifs_spnego.c
+++ b/fs/cifs/cifs_spnego.c
@@ -84,9 +84,9 @@ struct key_type cifs_spnego_key_type = {
/* get a key struct with a SPNEGO security blob, suitable for session setup */
struct key *
-cifs_get_spnego_key(struct cifs_ses *sesInfo)
+cifs_get_spnego_key(struct cifs_ses *sesInfo,
+ struct TCP_Server_Info *server)
{
- struct TCP_Server_Info *server = cifs_ses_server(sesInfo);
struct sockaddr_in *sa = (struct sockaddr_in *) &server->dstaddr;
struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *) &server->dstaddr;
char *description, *dp;
diff --git a/fs/cifs/cifs_spnego.h b/fs/cifs/cifs_spnego.h
index e6a0451877d4..7f102ffeb675 100644
--- a/fs/cifs/cifs_spnego.h
+++ b/fs/cifs/cifs_spnego.h
@@ -29,7 +29,8 @@ struct cifs_spnego_msg {
#ifdef __KERNEL__
extern struct key_type cifs_spnego_key_type;
-extern struct key *cifs_get_spnego_key(struct cifs_ses *sesInfo);
+extern struct key *cifs_get_spnego_key(struct cifs_ses *sesInfo,
+ struct TCP_Server_Info *server);
#endif /* KERNEL */
#endif /* _CIFS_SPNEGO_H */
diff --git a/fs/cifs/cifs_swn.c b/fs/cifs/cifs_swn.c
index 23a1ed2fb769..cdce1609c5c2 100644
--- a/fs/cifs/cifs_swn.c
+++ b/fs/cifs/cifs_swn.c
@@ -396,11 +396,11 @@ static int cifs_swn_resource_state_changed(struct cifs_swn_reg *swnreg, const ch
switch (state) {
case CIFS_SWN_RESOURCE_STATE_UNAVAILABLE:
cifs_dbg(FYI, "%s: resource name '%s' become unavailable\n", __func__, name);
- cifs_ses_mark_for_reconnect(swnreg->tcon->ses);
+ cifs_mark_tcp_ses_conns_for_reconnect(swnreg->tcon->ses->server, true);
break;
case CIFS_SWN_RESOURCE_STATE_AVAILABLE:
cifs_dbg(FYI, "%s: resource name '%s' become available\n", __func__, name);
- cifs_ses_mark_for_reconnect(swnreg->tcon->ses);
+ cifs_mark_tcp_ses_conns_for_reconnect(swnreg->tcon->ses->server, true);
break;
case CIFS_SWN_RESOURCE_STATE_UNKNOWN:
cifs_dbg(FYI, "%s: resource name '%s' changed to unknown state\n", __func__, name);
@@ -498,10 +498,7 @@ static int cifs_swn_reconnect(struct cifs_tcon *tcon, struct sockaddr_storage *a
goto unlock;
}
- spin_lock(&GlobalMid_Lock);
- if (tcon->ses->server->tcpStatus != CifsExiting)
- tcon->ses->server->tcpStatus = CifsNeedReconnect;
- spin_unlock(&GlobalMid_Lock);
+ cifs_mark_tcp_ses_conns_for_reconnect(tcon->ses->server, false);
unlock:
mutex_unlock(&tcon->ses->server->srv_mutex);
diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c
index ee3aab3dd4ac..bf861fef2f0c 100644
--- a/fs/cifs/cifsacl.c
+++ b/fs/cifs/cifsacl.c
@@ -949,6 +949,9 @@ static void populate_new_aces(char *nacl_base,
pnntace = (struct cifs_ace *) (nacl_base + nsize);
nsize += setup_special_mode_ACE(pnntace, nmode);
num_aces++;
+ pnntace = (struct cifs_ace *) (nacl_base + nsize);
+ nsize += setup_authusers_ACE(pnntace);
+ num_aces++;
goto set_size;
}
@@ -1297,7 +1300,7 @@ static int build_sec_desc(struct cifs_ntsd *pntsd, struct cifs_ntsd *pnntsd,
if (uid_valid(uid)) { /* chown */
uid_t id;
- nowner_sid_ptr = kmalloc(sizeof(struct cifs_sid),
+ nowner_sid_ptr = kzalloc(sizeof(struct cifs_sid),
GFP_KERNEL);
if (!nowner_sid_ptr) {
rc = -ENOMEM;
@@ -1326,7 +1329,7 @@ static int build_sec_desc(struct cifs_ntsd *pntsd, struct cifs_ntsd *pnntsd,
}
if (gid_valid(gid)) { /* chgrp */
gid_t id;
- ngroup_sid_ptr = kmalloc(sizeof(struct cifs_sid),
+ ngroup_sid_ptr = kzalloc(sizeof(struct cifs_sid),
GFP_KERNEL);
if (!ngroup_sid_ptr) {
rc = -ENOMEM;
@@ -1613,7 +1616,7 @@ id_mode_to_cifs_acl(struct inode *inode, const char *path, __u64 *pnmode,
nsecdesclen = secdesclen;
if (pnmode && *pnmode != NO_CHANGE_64) { /* chmod */
if (mode_from_sid)
- nsecdesclen += sizeof(struct cifs_ace);
+ nsecdesclen += 2 * sizeof(struct cifs_ace);
else /* cifsacl */
nsecdesclen += 5 * sizeof(struct cifs_ace);
} else { /* chown */
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
index d118282071b3..0912d8bbbac1 100644
--- a/fs/cifs/cifsencrypt.c
+++ b/fs/cifs/cifsencrypt.c
@@ -141,9 +141,13 @@ int cifs_sign_rqst(struct smb_rqst *rqst, struct TCP_Server_Info *server,
if ((cifs_pdu == NULL) || (server == NULL))
return -EINVAL;
+ spin_lock(&cifs_tcp_ses_lock);
if (!(cifs_pdu->Flags2 & SMBFLG2_SECURITY_SIGNATURE) ||
- server->tcpStatus == CifsNeedNegotiate)
+ server->tcpStatus == CifsNeedNegotiate) {
+ spin_unlock(&cifs_tcp_ses_lock);
return rc;
+ }
+ spin_unlock(&cifs_tcp_ses_lock);
if (!server->session_estab) {
memcpy(cifs_pdu->Signature.SecuritySignature, "BSRSPYL", 8);
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index dca42aa87d30..082c21478686 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -26,6 +26,7 @@
#include <linux/random.h>
#include <linux/uuid.h>
#include <linux/xattr.h>
+#include <uapi/linux/magic.h>
#include <net/ipv6.h>
#include "cifsfs.h"
#include "cifspdu.h"
@@ -202,7 +203,7 @@ cifs_read_super(struct super_block *sb)
sb->s_time_max = ts.tv_sec;
}
- sb->s_magic = CIFS_MAGIC_NUMBER;
+ sb->s_magic = CIFS_SUPER_MAGIC;
sb->s_op = &cifs_super_ops;
sb->s_xattr = cifs_xattr_handlers;
rc = super_setup_bdi(sb);
@@ -396,6 +397,9 @@ static void
cifs_evict_inode(struct inode *inode)
{
truncate_inode_pages_final(&inode->i_data);
+ if (inode->i_state & I_PINNING_FSCACHE_WB)
+ cifs_fscache_unuse_inode_cookie(inode, true);
+ cifs_fscache_release_inode_cookie(inode);
clear_inode(inode);
}
@@ -720,6 +724,12 @@ static int cifs_show_stats(struct seq_file *s, struct dentry *root)
}
#endif
+static int cifs_write_inode(struct inode *inode, struct writeback_control *wbc)
+{
+ fscache_unpin_writeback(wbc, cifs_inode_cookie(inode));
+ return 0;
+}
+
static int cifs_drop_inode(struct inode *inode)
{
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
@@ -732,6 +742,7 @@ static int cifs_drop_inode(struct inode *inode)
static const struct super_operations cifs_super_ops = {
.statfs = cifs_statfs,
.alloc_inode = cifs_alloc_inode,
+ .write_inode = cifs_write_inode,
.free_inode = cifs_free_inode,
.drop_inode = cifs_drop_inode,
.evict_inode = cifs_evict_inode,
@@ -773,7 +784,7 @@ cifs_get_root(struct smb3_fs_context *ctx, struct super_block *sb)
sep = CIFS_DIR_SEP(cifs_sb);
dentry = dget(sb->s_root);
- p = s = full_path;
+ s = full_path;
do {
struct inode *dir = d_inode(dentry);
@@ -908,6 +919,7 @@ cifs_smb3_do_mount(struct file_system_type *fs_type,
out_super:
deactivate_locked_super(sb);
+ return root;
out:
if (cifs_sb) {
kfree(cifs_sb->prepath);
@@ -1624,13 +1636,9 @@ init_cifs(void)
goto out_destroy_cifsoplockd_wq;
}
- rc = cifs_fscache_register();
- if (rc)
- goto out_destroy_deferredclose_wq;
-
rc = cifs_init_inodecache();
if (rc)
- goto out_unreg_fscache;
+ goto out_destroy_deferredclose_wq;
rc = cifs_init_mids();
if (rc)
@@ -1692,8 +1700,6 @@ out_destroy_mids:
cifs_destroy_mids();
out_destroy_inodecache:
cifs_destroy_inodecache();
-out_unreg_fscache:
- cifs_fscache_unregister();
out_destroy_deferredclose_wq:
destroy_workqueue(deferredclose_wq);
out_destroy_cifsoplockd_wq:
@@ -1729,7 +1735,6 @@ exit_cifs(void)
cifs_destroy_request_bufs();
cifs_destroy_mids();
cifs_destroy_inodecache();
- cifs_fscache_unregister();
destroy_workqueue(deferredclose_wq);
destroy_workqueue(cifsoplockd_wq);
destroy_workqueue(decrypt_wq);
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index 9e5d9e192ef0..15a5c5db038b 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -152,5 +152,6 @@ extern struct dentry *cifs_smb3_do_mount(struct file_system_type *fs_type,
extern const struct export_operations cifs_export_ops;
#endif /* CONFIG_CIFS_NFSD_EXPORT */
-#define CIFS_VERSION "2.34"
+#define SMB3_PRODUCT_BUILD 35
+#define CIFS_VERSION "2.35"
#endif /* _CIFSFS_H */
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index be74606724c7..48b343d03430 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -24,8 +24,6 @@
#include "../smbfs_common/smb2pdu.h"
#include "smb2pdu.h"
-#define CIFS_MAGIC_NUMBER 0xFF534D42 /* the first four bytes of SMB PDUs */
-
#define SMB_PATH_MAX 260
#define CIFS_PORT 445
#define RFC1001_PORT 139
@@ -113,7 +111,14 @@ enum statusEnum {
CifsGood,
CifsExiting,
CifsNeedReconnect,
- CifsNeedNegotiate
+ CifsNeedNegotiate,
+ CifsInNegotiate,
+ CifsNeedSessSetup,
+ CifsInSessSetup,
+ CifsNeedTcon,
+ CifsInTcon,
+ CifsNeedFilesInvalidate,
+ CifsInFilesInvalidate
};
enum securityEnum {
@@ -263,13 +268,16 @@ struct smb_version_operations {
/* check if we need to negotiate */
bool (*need_neg)(struct TCP_Server_Info *);
/* negotiate to the server */
- int (*negotiate)(const unsigned int, struct cifs_ses *);
+ int (*negotiate)(const unsigned int xid,
+ struct cifs_ses *ses,
+ struct TCP_Server_Info *server);
/* set negotiated write size */
unsigned int (*negotiate_wsize)(struct cifs_tcon *tcon, struct smb3_fs_context *ctx);
/* set negotiated read size */
unsigned int (*negotiate_rsize)(struct cifs_tcon *tcon, struct smb3_fs_context *ctx);
/* setup smb sessionn */
int (*sess_setup)(const unsigned int, struct cifs_ses *,
+ struct TCP_Server_Info *server,
const struct nls_table *);
/* close smb session */
int (*logoff)(const unsigned int, struct cifs_ses *);
@@ -414,7 +422,8 @@ struct smb_version_operations {
void (*set_lease_key)(struct inode *, struct cifs_fid *);
/* generate new lease key */
void (*new_lease_key)(struct cifs_fid *);
- int (*generate_signingkey)(struct cifs_ses *);
+ int (*generate_signingkey)(struct cifs_ses *ses,
+ struct TCP_Server_Info *server);
int (*calc_signature)(struct smb_rqst *, struct TCP_Server_Info *,
bool allocate_crypto);
int (*set_integrity)(const unsigned int, struct cifs_tcon *tcon,
@@ -582,7 +591,7 @@ struct TCP_Server_Info {
char server_RFC1001_name[RFC1001_NAME_LEN_WITH_NULL];
struct smb_version_operations *ops;
struct smb_version_values *vals;
- /* updates to tcpStatus protected by GlobalMid_Lock */
+ /* updates to tcpStatus protected by cifs_tcp_ses_lock */
enum statusEnum tcpStatus; /* what we think the status is */
char *hostname; /* hostname portion of UNC string */
struct socket *ssocket;
@@ -659,9 +668,6 @@ struct TCP_Server_Info {
unsigned int total_read; /* total amount of data read in this pass */
atomic_t in_send; /* requests trying to send */
atomic_t num_waiters; /* blocked waiting to get in sendrecv */
-#ifdef CONFIG_CIFS_FSCACHE
- struct fscache_cookie *fscache; /* client index cache cookie */
-#endif
#ifdef CONFIG_CIFS_STATS2
atomic_t num_cmds[NUMBER_OF_SMB2_COMMANDS]; /* total requests by cmd */
atomic_t smb2slowcmd[NUMBER_OF_SMB2_COMMANDS]; /* count resps > 1 sec */
@@ -915,12 +921,13 @@ struct cifs_chan {
*/
struct cifs_ses {
struct list_head smb_ses_list;
+ struct list_head rlist; /* reconnect list */
struct list_head tcon_list;
struct cifs_tcon *tcon_ipc;
struct mutex session_mutex;
struct TCP_Server_Info *server; /* pointer to server info */
int ses_count; /* reference counter */
- enum statusEnum status; /* updates protected by GlobalMid_Lock */
+ enum statusEnum status; /* updates protected by cifs_tcp_ses_lock */
unsigned overrideSecFlg; /* if non-zero override global sec flags */
char *serverOS; /* name of operating system underlying server */
char *serverNOS; /* name of network operating system of server */
@@ -939,17 +946,13 @@ struct cifs_ses {
struct ntlmssp_auth *ntlmssp; /* ciphertext, flags, server challenge */
enum securityEnum sectype; /* what security flavor was specified? */
bool sign; /* is signing required? */
- bool need_reconnect:1; /* connection reset, uid now invalid */
bool domainAuto:1;
- bool binding:1; /* are we binding the session? */
__u16 session_flags;
__u8 smb3signingkey[SMB3_SIGN_KEY_SIZE];
__u8 smb3encryptionkey[SMB3_ENC_DEC_KEY_SIZE];
__u8 smb3decryptionkey[SMB3_ENC_DEC_KEY_SIZE];
__u8 preauth_sha_hash[SMB2_PREAUTH_HASH_SIZE];
- __u8 binding_preauth_sha_hash[SMB2_PREAUTH_HASH_SIZE];
-
/*
* Network interfaces available on the server this session is
* connected to.
@@ -969,45 +972,34 @@ struct cifs_ses {
spinlock_t chan_lock;
/* ========= begin: protected by chan_lock ======== */
#define CIFS_MAX_CHANNELS 16
+#define CIFS_ALL_CHANNELS_SET(ses) \
+ ((1UL << (ses)->chan_count) - 1)
+#define CIFS_ALL_CHANS_NEED_RECONNECT(ses) \
+ ((ses)->chans_need_reconnect == CIFS_ALL_CHANNELS_SET(ses))
+#define CIFS_SET_ALL_CHANS_NEED_RECONNECT(ses) \
+ ((ses)->chans_need_reconnect = CIFS_ALL_CHANNELS_SET(ses))
+#define CIFS_CHAN_NEEDS_RECONNECT(ses, index) \
+ test_bit((index), &(ses)->chans_need_reconnect)
+
struct cifs_chan chans[CIFS_MAX_CHANNELS];
- struct cifs_chan *binding_chan;
size_t chan_count;
size_t chan_max;
atomic_t chan_seq; /* round robin state */
+
+ /*
+ * chans_need_reconnect is a bitmap indicating which of the channels
+ * under this smb session needs to be reconnected.
+ * If not multichannel session, only one bit will be used.
+ *
+ * We will ask for sess and tcon reconnection only if all the
+ * channels are marked for needing reconnection. This will
+ * enable the sessions on top to continue to live till any
+ * of the channels below are active.
+ */
+ unsigned long chans_need_reconnect;
/* ========= end: protected by chan_lock ======== */
};
-/*
- * When binding a new channel, we need to access the channel which isn't fully
- * established yet.
- */
-
-static inline
-struct cifs_chan *cifs_ses_binding_channel(struct cifs_ses *ses)
-{
- if (ses->binding)
- return ses->binding_chan;
- else
- return NULL;
-}
-
-/*
- * Returns the server pointer of the session. When binding a new
- * channel this returns the last channel which isn't fully established
- * yet.
- *
- * This function should be use for negprot/sess.setup codepaths. For
- * the other requests see cifs_pick_channel().
- */
-static inline
-struct TCP_Server_Info *cifs_ses_server(struct cifs_ses *ses)
-{
- if (ses->binding)
- return ses->binding_chan->server;
- else
- return ses->server;
-}
-
static inline bool
cap_unix(struct cifs_ses *ses)
{
@@ -1117,7 +1109,7 @@ struct cifs_tcon {
__u32 max_bytes_copy;
#ifdef CONFIG_CIFS_FSCACHE
u64 resource_id; /* server resource id */
- struct fscache_cookie *fscache; /* cookie for share */
+ struct fscache_volume *fscache; /* cookie for share */
#endif
struct list_head pending_opens; /* list of incomplete opens */
struct cached_fid crfid; /* Cached root fid */
diff --git a/fs/cifs/cifspdu.h b/fs/cifs/cifspdu.h
index d2ff438fd31f..68b9a436af4b 100644
--- a/fs/cifs/cifspdu.h
+++ b/fs/cifs/cifspdu.h
@@ -2560,7 +2560,7 @@ typedef struct {
__le32 EaSize; /* length of the xattrs */
__u8 ShortNameLength;
__u8 Reserved;
- __u8 ShortName[12];
+ __u8 ShortName[24];
char FileName[1];
} __attribute__((packed)) FILE_BOTH_DIRECTORY_INFO; /* level 0x104 FFrsp data */
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index 4f5a3e857df4..d3701295402d 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -131,7 +131,11 @@ extern int SendReceiveBlockingLock(const unsigned int xid,
struct smb_hdr *in_buf ,
struct smb_hdr *out_buf,
int *bytes_returned);
-extern int cifs_reconnect(struct TCP_Server_Info *server);
+void
+cifs_mark_tcp_ses_conns_for_reconnect(struct TCP_Server_Info *server,
+ bool mark_smb_session);
+extern int cifs_reconnect(struct TCP_Server_Info *server,
+ bool mark_smb_session);
extern int checkSMB(char *buf, unsigned int len, struct TCP_Server_Info *srvr);
extern bool is_valid_oplock_break(char *, struct TCP_Server_Info *);
extern bool backup_cred(struct cifs_sb_info *);
@@ -164,6 +168,7 @@ extern int small_smb_init_no_tc(const int smb_cmd, const int wct,
extern enum securityEnum select_sectype(struct TCP_Server_Info *server,
enum securityEnum requested);
extern int CIFS_SessSetup(const unsigned int xid, struct cifs_ses *ses,
+ struct TCP_Server_Info *server,
const struct nls_table *nls_cp);
extern struct timespec64 cifs_NTtimeToUnix(__le64 utc_nanoseconds_since_1601);
extern u64 cifs_UnixTimeToNT(struct timespec64);
@@ -293,11 +298,15 @@ extern int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon,
const struct nls_table *nlsc);
extern int cifs_negotiate_protocol(const unsigned int xid,
- struct cifs_ses *ses);
+ struct cifs_ses *ses,
+ struct TCP_Server_Info *server);
extern int cifs_setup_session(const unsigned int xid, struct cifs_ses *ses,
+ struct TCP_Server_Info *server,
struct nls_table *nls_info);
extern int cifs_enable_signing(struct TCP_Server_Info *server, bool mnt_sign_required);
-extern int CIFSSMBNegotiate(const unsigned int xid, struct cifs_ses *ses);
+extern int CIFSSMBNegotiate(const unsigned int xid,
+ struct cifs_ses *ses,
+ struct TCP_Server_Info *server);
extern int CIFSTCon(const unsigned int xid, struct cifs_ses *ses,
const char *tree, struct cifs_tcon *tcon,
@@ -504,8 +513,10 @@ extern int cifs_verify_signature(struct smb_rqst *rqst,
extern int setup_ntlmv2_rsp(struct cifs_ses *, const struct nls_table *);
extern void cifs_crypto_secmech_release(struct TCP_Server_Info *server);
extern int calc_seckey(struct cifs_ses *);
-extern int generate_smb30signingkey(struct cifs_ses *);
-extern int generate_smb311signingkey(struct cifs_ses *);
+extern int generate_smb30signingkey(struct cifs_ses *ses,
+ struct TCP_Server_Info *server);
+extern int generate_smb311signingkey(struct cifs_ses *ses,
+ struct TCP_Server_Info *server);
extern int CIFSSMBCopy(unsigned int xid,
struct cifs_tcon *source_tcon,
@@ -601,6 +612,19 @@ bool is_server_using_iface(struct TCP_Server_Info *server,
bool is_ses_using_iface(struct cifs_ses *ses, struct cifs_server_iface *iface);
void cifs_ses_mark_for_reconnect(struct cifs_ses *ses);
+unsigned int
+cifs_ses_get_chan_index(struct cifs_ses *ses,
+ struct TCP_Server_Info *server);
+void
+cifs_chan_set_need_reconnect(struct cifs_ses *ses,
+ struct TCP_Server_Info *server);
+void
+cifs_chan_clear_need_reconnect(struct cifs_ses *ses,
+ struct TCP_Server_Info *server);
+bool
+cifs_chan_needs_reconnect(struct cifs_ses *ses,
+ struct TCP_Server_Info *server);
+
void extract_unc_hostname(const char *unc, const char **h, size_t *len);
int copy_path_name(char *dst, const char *src);
int smb2_parse_query_directory(struct cifs_tcon *tcon, struct kvec *rsp_iov,
@@ -626,6 +650,11 @@ static inline int get_dfs_path(const unsigned int xid, struct cifs_ses *ses,
int match_target_ip(struct TCP_Server_Info *server,
const char *share, size_t share_len,
bool *result);
+
+int cifs_dfs_query_info_nonascii_quirk(const unsigned int xid,
+ struct cifs_tcon *tcon,
+ struct cifs_sb_info *cifs_sb,
+ const char *dfs_link_path);
#endif
static inline int cifs_create_options(struct cifs_sb_info *cifs_sb, int options)
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 243d17696f06..071e2f21a7db 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -73,6 +73,16 @@ cifs_mark_open_files_invalid(struct cifs_tcon *tcon)
struct list_head *tmp;
struct list_head *tmp1;
+ /* only send once per connect */
+ spin_lock(&cifs_tcp_ses_lock);
+ if (tcon->ses->status != CifsGood ||
+ tcon->tidStatus != CifsNeedReconnect) {
+ spin_unlock(&cifs_tcp_ses_lock);
+ return;
+ }
+ tcon->tidStatus = CifsInFilesInvalidate;
+ spin_unlock(&cifs_tcp_ses_lock);
+
/* list all files open on tree connection and mark them invalid */
spin_lock(&tcon->open_file_lock);
list_for_each_safe(tmp, tmp1, &tcon->openFileList) {
@@ -89,6 +99,11 @@ cifs_mark_open_files_invalid(struct cifs_tcon *tcon)
memset(tcon->crfid.fid, 0, sizeof(struct cifs_fid));
mutex_unlock(&tcon->crfid.fid_mutex);
+ spin_lock(&cifs_tcp_ses_lock);
+ if (tcon->tidStatus == CifsInFilesInvalidate)
+ tcon->tidStatus = CifsNeedTcon;
+ spin_unlock(&cifs_tcp_ses_lock);
+
/*
* BB Add call to invalidate_inodes(sb) for all superblocks mounted
* to this tcon.
@@ -120,15 +135,18 @@ cifs_reconnect_tcon(struct cifs_tcon *tcon, int smb_command)
* only tree disconnect, open, and write, (and ulogoff which does not
* have tcon) are allowed as we start force umount
*/
+ spin_lock(&cifs_tcp_ses_lock);
if (tcon->tidStatus == CifsExiting) {
if (smb_command != SMB_COM_WRITE_ANDX &&
smb_command != SMB_COM_OPEN_ANDX &&
smb_command != SMB_COM_TREE_DISCONNECT) {
+ spin_unlock(&cifs_tcp_ses_lock);
cifs_dbg(FYI, "can not send cmd %d while umounting\n",
smb_command);
return -ENODEV;
}
}
+ spin_unlock(&cifs_tcp_ses_lock);
retries = server->nr_targets;
@@ -148,8 +166,12 @@ cifs_reconnect_tcon(struct cifs_tcon *tcon, int smb_command)
}
/* are we still trying to reconnect? */
- if (server->tcpStatus != CifsNeedReconnect)
+ spin_lock(&cifs_tcp_ses_lock);
+ if (server->tcpStatus != CifsNeedReconnect) {
+ spin_unlock(&cifs_tcp_ses_lock);
break;
+ }
+ spin_unlock(&cifs_tcp_ses_lock);
if (retries && --retries)
continue;
@@ -166,31 +188,49 @@ cifs_reconnect_tcon(struct cifs_tcon *tcon, int smb_command)
retries = server->nr_targets;
}
- if (!ses->need_reconnect && !tcon->need_reconnect)
+ spin_lock(&ses->chan_lock);
+ if (!cifs_chan_needs_reconnect(ses, server) && !tcon->need_reconnect) {
+ spin_unlock(&ses->chan_lock);
return 0;
+ }
+ spin_unlock(&ses->chan_lock);
nls_codepage = load_nls_default();
/*
- * need to prevent multiple threads trying to simultaneously
- * reconnect the same SMB session
- */
- mutex_lock(&ses->session_mutex);
-
- /*
* Recheck after acquire mutex. If another thread is negotiating
* and the server never sends an answer the socket will be closed
* and tcpStatus set to reconnect.
*/
+ spin_lock(&cifs_tcp_ses_lock);
if (server->tcpStatus == CifsNeedReconnect) {
+ spin_unlock(&cifs_tcp_ses_lock);
rc = -EHOSTDOWN;
- mutex_unlock(&ses->session_mutex);
goto out;
}
+ spin_unlock(&cifs_tcp_ses_lock);
- rc = cifs_negotiate_protocol(0, ses);
- if (rc == 0 && ses->need_reconnect)
- rc = cifs_setup_session(0, ses, nls_codepage);
+ /*
+ * need to prevent multiple threads trying to simultaneously
+ * reconnect the same SMB session
+ */
+ spin_lock(&ses->chan_lock);
+ if (!cifs_chan_needs_reconnect(ses, server)) {
+ spin_unlock(&ses->chan_lock);
+
+ /* this means that we only need to tree connect */
+ if (tcon->need_reconnect)
+ goto skip_sess_setup;
+
+ rc = -EHOSTDOWN;
+ goto out;
+ }
+ spin_unlock(&ses->chan_lock);
+
+ mutex_lock(&ses->session_mutex);
+ rc = cifs_negotiate_protocol(0, ses, server);
+ if (!rc)
+ rc = cifs_setup_session(0, ses, server, nls_codepage);
/* do we need to reconnect tcon? */
if (rc || !tcon->need_reconnect) {
@@ -198,6 +238,7 @@ cifs_reconnect_tcon(struct cifs_tcon *tcon, int smb_command)
goto out;
}
+skip_sess_setup:
cifs_mark_open_files_invalid(tcon);
rc = cifs_tree_connect(0, tcon, nls_codepage);
mutex_unlock(&ses->session_mutex);
@@ -337,8 +378,13 @@ static int
smb_init_no_reconnect(int smb_command, int wct, struct cifs_tcon *tcon,
void **request_buf, void **response_buf)
{
- if (tcon->ses->need_reconnect || tcon->need_reconnect)
+ spin_lock(&tcon->ses->chan_lock);
+ if (cifs_chan_needs_reconnect(tcon->ses, tcon->ses->server) ||
+ tcon->need_reconnect) {
+ spin_unlock(&tcon->ses->chan_lock);
return -EHOSTDOWN;
+ }
+ spin_unlock(&tcon->ses->chan_lock);
return __smb_init(smb_command, wct, tcon, request_buf, response_buf);
}
@@ -476,14 +522,15 @@ should_set_ext_sec_flag(enum securityEnum sectype)
}
int
-CIFSSMBNegotiate(const unsigned int xid, struct cifs_ses *ses)
+CIFSSMBNegotiate(const unsigned int xid,
+ struct cifs_ses *ses,
+ struct TCP_Server_Info *server)
{
NEGOTIATE_REQ *pSMB;
NEGOTIATE_RSP *pSMBr;
int rc = 0;
int bytes_returned;
int i;
- struct TCP_Server_Info *server = ses->server;
u16 count;
if (!server) {
@@ -600,8 +647,12 @@ CIFSSMBTDis(const unsigned int xid, struct cifs_tcon *tcon)
* the tcon is no longer on the list, so no need to take lock before
* checking this.
*/
- if ((tcon->need_reconnect) || (tcon->ses->need_reconnect))
- return 0;
+ spin_lock(&tcon->ses->chan_lock);
+ if ((tcon->need_reconnect) || CIFS_ALL_CHANS_NEED_RECONNECT(tcon->ses)) {
+ spin_unlock(&tcon->ses->chan_lock);
+ return -EIO;
+ }
+ spin_unlock(&tcon->ses->chan_lock);
rc = small_smb_init(SMB_COM_TREE_DISCONNECT, 0, tcon,
(void **)&smb_buffer);
@@ -696,9 +747,14 @@ CIFSSMBLogoff(const unsigned int xid, struct cifs_ses *ses)
return -EIO;
mutex_lock(&ses->session_mutex);
- if (ses->need_reconnect)
+ spin_lock(&ses->chan_lock);
+ if (CIFS_ALL_CHANS_NEED_RECONNECT(ses)) {
+ spin_unlock(&ses->chan_lock);
goto session_already_dead; /* no need to send SMBlogoff if uid
already closed due to reconnect */
+ }
+ spin_unlock(&ses->chan_lock);
+
rc = small_smb_init(SMB_COM_LOGOFF_ANDX, 2, NULL, (void **)&pSMB);
if (rc) {
mutex_unlock(&ses->session_mutex);
@@ -1401,7 +1457,7 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
if (server->ops->is_session_expired &&
server->ops->is_session_expired(buf)) {
- cifs_reconnect(server);
+ cifs_reconnect(server, true);
return -1;
}
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 1060164b984a..053cb449eb16 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -162,42 +162,68 @@ static void cifs_resolve_server(struct work_struct *work)
mutex_unlock(&server->srv_mutex);
}
-/**
+/*
* Mark all sessions and tcons for reconnect.
*
* @server needs to be previously set to CifsNeedReconnect.
+ *
*/
-static void cifs_mark_tcp_ses_conns_for_reconnect(struct TCP_Server_Info *server)
+void
+cifs_mark_tcp_ses_conns_for_reconnect(struct TCP_Server_Info *server,
+ bool mark_smb_session)
{
+ struct TCP_Server_Info *pserver;
struct cifs_ses *ses;
struct cifs_tcon *tcon;
- struct mid_q_entry *mid, *nmid;
- struct list_head retry_list;
- struct TCP_Server_Info *pserver;
-
- server->maxBuf = 0;
- server->max_read = 0;
- cifs_dbg(FYI, "Mark tcp session as need reconnect\n");
- trace_smb3_reconnect(server->CurrentMid, server->conn_id, server->hostname);
/*
* before reconnecting the tcp session, mark the smb session (uid) and the tid bad so they
* are not used until reconnected.
*/
- cifs_dbg(FYI, "%s: marking sessions and tcons for reconnect\n", __func__);
+ cifs_dbg(FYI, "%s: marking necessary sessions and tcons for reconnect\n", __func__);
/* If server is a channel, select the primary channel */
pserver = CIFS_SERVER_IS_CHAN(server) ? server->primary_server : server;
+
spin_lock(&cifs_tcp_ses_lock);
list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) {
- ses->need_reconnect = true;
- list_for_each_entry(tcon, &ses->tcon_list, tcon_list)
+ spin_lock(&ses->chan_lock);
+ if (!mark_smb_session && cifs_chan_needs_reconnect(ses, server))
+ goto next_session;
+
+ if (mark_smb_session)
+ CIFS_SET_ALL_CHANS_NEED_RECONNECT(ses);
+ else
+ cifs_chan_set_need_reconnect(ses, server);
+
+ /* If all channels need reconnect, then tcon needs reconnect */
+ if (!mark_smb_session && !CIFS_ALL_CHANS_NEED_RECONNECT(ses))
+ goto next_session;
+
+ ses->status = CifsNeedReconnect;
+
+ list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
tcon->need_reconnect = true;
+ tcon->tidStatus = CifsNeedReconnect;
+ }
if (ses->tcon_ipc)
ses->tcon_ipc->need_reconnect = true;
+
+next_session:
+ spin_unlock(&ses->chan_lock);
}
spin_unlock(&cifs_tcp_ses_lock);
+}
+
+static void
+cifs_abort_connection(struct TCP_Server_Info *server)
+{
+ struct mid_q_entry *mid, *nmid;
+ struct list_head retry_list;
+
+ server->maxBuf = 0;
+ server->max_read = 0;
/* do not want to be sending data on a socket we are freeing */
cifs_dbg(FYI, "%s: tearing down socket\n", __func__);
@@ -248,16 +274,21 @@ static void cifs_mark_tcp_ses_conns_for_reconnect(struct TCP_Server_Info *server
static bool cifs_tcp_ses_needs_reconnect(struct TCP_Server_Info *server, int num_targets)
{
- spin_lock(&GlobalMid_Lock);
+ spin_lock(&cifs_tcp_ses_lock);
server->nr_targets = num_targets;
if (server->tcpStatus == CifsExiting) {
/* the demux thread will exit normally next time through the loop */
- spin_unlock(&GlobalMid_Lock);
+ spin_unlock(&cifs_tcp_ses_lock);
wake_up(&server->response_q);
return false;
}
+
+ cifs_dbg(FYI, "Mark tcp session as need reconnect\n");
+ trace_smb3_reconnect(server->CurrentMid, server->conn_id,
+ server->hostname);
server->tcpStatus = CifsNeedReconnect;
- spin_unlock(&GlobalMid_Lock);
+
+ spin_unlock(&cifs_tcp_ses_lock);
return true;
}
@@ -268,15 +299,23 @@ static bool cifs_tcp_ses_needs_reconnect(struct TCP_Server_Info *server, int num
* mark all smb sessions as reconnecting for tcp session
* reconnect tcp session
* wake up waiters on reconnection? - (not needed currently)
+ *
+ * if mark_smb_session is passed as true, unconditionally mark
+ * the smb session (and tcon) for reconnect as well. This value
+ * doesn't really matter for non-multichannel scenario.
+ *
*/
-static int __cifs_reconnect(struct TCP_Server_Info *server)
+static int __cifs_reconnect(struct TCP_Server_Info *server,
+ bool mark_smb_session)
{
int rc = 0;
if (!cifs_tcp_ses_needs_reconnect(server, 1))
return 0;
- cifs_mark_tcp_ses_conns_for_reconnect(server);
+ cifs_mark_tcp_ses_conns_for_reconnect(server, mark_smb_session);
+
+ cifs_abort_connection(server);
do {
try_to_freeze();
@@ -299,17 +338,20 @@ static int __cifs_reconnect(struct TCP_Server_Info *server)
} else {
atomic_inc(&tcpSesReconnectCount);
set_credits(server, 1);
- spin_lock(&GlobalMid_Lock);
+ spin_lock(&cifs_tcp_ses_lock);
if (server->tcpStatus != CifsExiting)
server->tcpStatus = CifsNeedNegotiate;
- spin_unlock(&GlobalMid_Lock);
+ spin_unlock(&cifs_tcp_ses_lock);
cifs_swn_reset_server_dstaddr(server);
mutex_unlock(&server->srv_mutex);
+ mod_delayed_work(cifsiod_wq, &server->reconnect, 0);
}
} while (server->tcpStatus == CifsNeedReconnect);
+ spin_lock(&cifs_tcp_ses_lock);
if (server->tcpStatus == CifsNeedNegotiate)
mod_delayed_work(cifsiod_wq, &server->echo, 0);
+ spin_unlock(&cifs_tcp_ses_lock);
wake_up(&server->response_q);
return rc;
@@ -371,7 +413,9 @@ static int reconnect_target_unlocked(struct TCP_Server_Info *server, struct dfs_
return rc;
}
-static int reconnect_dfs_server(struct TCP_Server_Info *server)
+static int
+reconnect_dfs_server(struct TCP_Server_Info *server,
+ bool mark_smb_session)
{
int rc = 0;
const char *refpath = server->current_fullpath + 1;
@@ -395,7 +439,9 @@ static int reconnect_dfs_server(struct TCP_Server_Info *server)
if (!cifs_tcp_ses_needs_reconnect(server, num_targets))
return 0;
- cifs_mark_tcp_ses_conns_for_reconnect(server);
+ cifs_mark_tcp_ses_conns_for_reconnect(server, mark_smb_session);
+
+ cifs_abort_connection(server);
do {
try_to_freeze();
@@ -416,12 +462,13 @@ static int reconnect_dfs_server(struct TCP_Server_Info *server)
*/
atomic_inc(&tcpSesReconnectCount);
set_credits(server, 1);
- spin_lock(&GlobalMid_Lock);
+ spin_lock(&cifs_tcp_ses_lock);
if (server->tcpStatus != CifsExiting)
server->tcpStatus = CifsNeedNegotiate;
- spin_unlock(&GlobalMid_Lock);
+ spin_unlock(&cifs_tcp_ses_lock);
cifs_swn_reset_server_dstaddr(server);
mutex_unlock(&server->srv_mutex);
+ mod_delayed_work(cifsiod_wq, &server->reconnect, 0);
} while (server->tcpStatus == CifsNeedReconnect);
if (target_hint)
@@ -430,29 +477,32 @@ static int reconnect_dfs_server(struct TCP_Server_Info *server)
dfs_cache_free_tgts(&tl);
/* Need to set up echo worker again once connection has been established */
+ spin_lock(&cifs_tcp_ses_lock);
if (server->tcpStatus == CifsNeedNegotiate)
mod_delayed_work(cifsiod_wq, &server->echo, 0);
+ spin_unlock(&cifs_tcp_ses_lock);
+
wake_up(&server->response_q);
return rc;
}
-int cifs_reconnect(struct TCP_Server_Info *server)
+int cifs_reconnect(struct TCP_Server_Info *server, bool mark_smb_session)
{
/* If tcp session is not an dfs connection, then reconnect to last target server */
spin_lock(&cifs_tcp_ses_lock);
if (!server->is_dfs_conn || !server->origin_fullpath || !server->leaf_fullpath) {
spin_unlock(&cifs_tcp_ses_lock);
- return __cifs_reconnect(server);
+ return __cifs_reconnect(server, mark_smb_session);
}
spin_unlock(&cifs_tcp_ses_lock);
- return reconnect_dfs_server(server);
+ return reconnect_dfs_server(server, mark_smb_session);
}
#else
-int cifs_reconnect(struct TCP_Server_Info *server)
+int cifs_reconnect(struct TCP_Server_Info *server, bool mark_smb_session)
{
- return __cifs_reconnect(server);
+ return __cifs_reconnect(server, mark_smb_session);
}
#endif
@@ -534,15 +584,18 @@ server_unresponsive(struct TCP_Server_Info *server)
* 65s kernel_recvmsg times out, and we see that we haven't gotten
* a response in >60s.
*/
+ spin_lock(&cifs_tcp_ses_lock);
if ((server->tcpStatus == CifsGood ||
server->tcpStatus == CifsNeedNegotiate) &&
(!server->ops->can_echo || server->ops->can_echo(server)) &&
time_after(jiffies, server->lstrp + 3 * server->echo_interval)) {
+ spin_unlock(&cifs_tcp_ses_lock);
cifs_server_dbg(VFS, "has not responded in %lu seconds. Reconnecting...\n",
(3 * server->echo_interval) / HZ);
- cifs_reconnect(server);
+ cifs_reconnect(server, false);
return true;
}
+ spin_unlock(&cifs_tcp_ses_lock);
return false;
}
@@ -576,7 +629,7 @@ cifs_readv_from_socket(struct TCP_Server_Info *server, struct msghdr *smb_msg)
/* reconnect if no credits and no requests in flight */
if (zero_credits(server)) {
- cifs_reconnect(server);
+ cifs_reconnect(server, false);
return -ECONNABORTED;
}
@@ -587,13 +640,18 @@ cifs_readv_from_socket(struct TCP_Server_Info *server, struct msghdr *smb_msg)
else
length = sock_recvmsg(server->ssocket, smb_msg, 0);
- if (server->tcpStatus == CifsExiting)
+ spin_lock(&cifs_tcp_ses_lock);
+ if (server->tcpStatus == CifsExiting) {
+ spin_unlock(&cifs_tcp_ses_lock);
return -ESHUTDOWN;
+ }
if (server->tcpStatus == CifsNeedReconnect) {
- cifs_reconnect(server);
+ spin_unlock(&cifs_tcp_ses_lock);
+ cifs_reconnect(server, false);
return -ECONNABORTED;
}
+ spin_unlock(&cifs_tcp_ses_lock);
if (length == -ERESTARTSYS ||
length == -EAGAIN ||
@@ -610,7 +668,7 @@ cifs_readv_from_socket(struct TCP_Server_Info *server, struct msghdr *smb_msg)
if (length <= 0) {
cifs_dbg(FYI, "Received no data or error: %d\n", length);
- cifs_reconnect(server);
+ cifs_reconnect(server, false);
return -ECONNABORTED;
}
}
@@ -689,11 +747,11 @@ is_smb_response(struct TCP_Server_Info *server, unsigned char type)
* initialize frame).
*/
cifs_set_port((struct sockaddr *)&server->dstaddr, CIFS_PORT);
- cifs_reconnect(server);
+ cifs_reconnect(server, true);
break;
default:
cifs_server_dbg(VFS, "RFC 1002 unknown response type 0x%x\n", type);
- cifs_reconnect(server);
+ cifs_reconnect(server, true);
}
return false;
@@ -771,9 +829,9 @@ static void clean_demultiplex_info(struct TCP_Server_Info *server)
cancel_delayed_work_sync(&server->echo);
cancel_delayed_work_sync(&server->resolve);
- spin_lock(&GlobalMid_Lock);
+ spin_lock(&cifs_tcp_ses_lock);
server->tcpStatus = CifsExiting;
- spin_unlock(&GlobalMid_Lock);
+ spin_unlock(&cifs_tcp_ses_lock);
wake_up_all(&server->response_q);
/* check if we have blocked requests that need to free */
@@ -866,7 +924,7 @@ standard_receive3(struct TCP_Server_Info *server, struct mid_q_entry *mid)
if (pdu_length > CIFSMaxBufSize + MAX_HEADER_SIZE(server) -
server->vals->header_preamble_size) {
cifs_server_dbg(VFS, "SMB response too long (%u bytes)\n", pdu_length);
- cifs_reconnect(server);
+ cifs_reconnect(server, true);
return -ECONNABORTED;
}
@@ -913,7 +971,7 @@ cifs_handle_standard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
if (server->ops->is_session_expired &&
server->ops->is_session_expired(buf)) {
- cifs_reconnect(server);
+ cifs_reconnect(server, true);
return -1;
}
@@ -1017,7 +1075,7 @@ next_pdu:
server->vals->header_preamble_size) {
cifs_server_dbg(VFS, "SMB response too short (%u bytes)\n",
server->pdu_size);
- cifs_reconnect(server);
+ cifs_reconnect(server, true);
continue;
}
@@ -1069,7 +1127,7 @@ next_pdu:
server->ops->is_status_io_timeout(buf)) {
num_io_timeout++;
if (num_io_timeout > NUM_STATUS_IO_TIMEOUT) {
- cifs_reconnect(server);
+ cifs_reconnect(server, false);
num_io_timeout = 0;
continue;
}
@@ -1139,7 +1197,7 @@ next_pdu:
}
memalloc_noreclaim_restore(noreclaim_flag);
- module_put_and_exit(0);
+ module_put_and_kthread_exit(0);
}
/*
@@ -1390,16 +1448,12 @@ cifs_put_tcp_session(struct TCP_Server_Info *server, int from_reconnect)
else
cancel_delayed_work_sync(&server->reconnect);
- spin_lock(&GlobalMid_Lock);
+ spin_lock(&cifs_tcp_ses_lock);
server->tcpStatus = CifsExiting;
- spin_unlock(&GlobalMid_Lock);
+ spin_unlock(&cifs_tcp_ses_lock);
cifs_crypto_secmech_release(server);
- /* fscache server cookies are based on primary channel only */
- if (!CIFS_SERVER_IS_CHAN(server))
- cifs_fscache_release_client_cookie(server);
-
kfree(server->session_key.response);
server->session_key.response = NULL;
server->session_key.len = 0;
@@ -1545,7 +1599,9 @@ smbd_connected:
* to the struct since the kernel thread not created yet
* no need to spinlock this update of tcpStatus
*/
+ spin_lock(&cifs_tcp_ses_lock);
tcp_ses->tcpStatus = CifsNeedNegotiate;
+ spin_unlock(&cifs_tcp_ses_lock);
if ((ctx->max_credits < 20) || (ctx->max_credits > 60000))
tcp_ses->max_credits = SMB2_MAX_CREDITS_AVAILABLE;
@@ -1559,14 +1615,6 @@ smbd_connected:
list_add(&tcp_ses->tcp_ses_list, &cifs_tcp_ses_list);
spin_unlock(&cifs_tcp_ses_lock);
- /* fscache server cookies are based on primary channel only */
- if (!CIFS_SERVER_IS_CHAN(tcp_ses))
- cifs_fscache_get_client_cookie(tcp_ses);
-#ifdef CONFIG_CIFS_FSCACHE
- else
- tcp_ses->fscache = tcp_ses->primary_server->fscache;
-#endif /* CONFIG_CIFS_FSCACHE */
-
/* queue echo request delayed work */
queue_delayed_work(cifsiod_wq, &tcp_ses->echo, tcp_ses->echo_interval);
@@ -1762,15 +1810,13 @@ void cifs_put_smb_ses(struct cifs_ses *ses)
spin_unlock(&cifs_tcp_ses_lock);
return;
}
- spin_unlock(&cifs_tcp_ses_lock);
/* ses_count can never go negative */
WARN_ON(ses->ses_count < 0);
- spin_lock(&GlobalMid_Lock);
if (ses->status == CifsGood)
ses->status = CifsExiting;
- spin_unlock(&GlobalMid_Lock);
+ spin_unlock(&cifs_tcp_ses_lock);
cifs_free_ipc(ses);
@@ -1789,23 +1835,19 @@ void cifs_put_smb_ses(struct cifs_ses *ses)
spin_lock(&ses->chan_lock);
chan_count = ses->chan_count;
- spin_unlock(&ses->chan_lock);
/* close any extra channels */
if (chan_count > 1) {
int i;
for (i = 1; i < chan_count; i++) {
- /*
- * note: for now, we're okay accessing ses->chans
- * without chan_lock. But when chans can go away, we'll
- * need to introduce ref counting to make sure that chan
- * is not freed from under us.
- */
+ spin_unlock(&ses->chan_lock);
cifs_put_tcp_session(ses->chans[i].server, 0);
+ spin_lock(&ses->chan_lock);
ses->chans[i].server = NULL;
}
}
+ spin_unlock(&ses->chan_lock);
sesInfoFree(ses);
cifs_put_tcp_session(server, 0);
@@ -1945,6 +1987,19 @@ cifs_set_cifscreds(struct smb3_fs_context *ctx, struct cifs_ses *ses)
}
}
+ ctx->workstation_name = kstrdup(ses->workstation_name, GFP_KERNEL);
+ if (!ctx->workstation_name) {
+ cifs_dbg(FYI, "Unable to allocate memory for workstation_name\n");
+ rc = -ENOMEM;
+ kfree(ctx->username);
+ ctx->username = NULL;
+ kfree_sensitive(ctx->password);
+ ctx->password = NULL;
+ kfree(ctx->domainname);
+ ctx->domainname = NULL;
+ goto out_key_put;
+ }
+
out_key_put:
up_read(&key->sem);
key_put(key);
@@ -1987,11 +2042,13 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
cifs_dbg(FYI, "Existing smb sess found (status=%d)\n",
ses->status);
- mutex_lock(&ses->session_mutex);
- if (ses->need_reconnect) {
+ spin_lock(&ses->chan_lock);
+ if (cifs_chan_needs_reconnect(ses, server)) {
+ spin_unlock(&ses->chan_lock);
cifs_dbg(FYI, "Session needs reconnect\n");
- rc = cifs_negotiate_protocol(xid, ses);
+ mutex_lock(&ses->session_mutex);
+ rc = cifs_negotiate_protocol(xid, ses, server);
if (rc) {
mutex_unlock(&ses->session_mutex);
/* problem -- put our ses reference */
@@ -2000,7 +2057,7 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
return ERR_PTR(rc);
}
- rc = cifs_setup_session(xid, ses,
+ rc = cifs_setup_session(xid, ses, server,
ctx->local_nls);
if (rc) {
mutex_unlock(&ses->session_mutex);
@@ -2009,8 +2066,11 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
free_xid(xid);
return ERR_PTR(rc);
}
+ mutex_unlock(&ses->session_mutex);
+
+ spin_lock(&ses->chan_lock);
}
- mutex_unlock(&ses->session_mutex);
+ spin_unlock(&ses->chan_lock);
/* existing SMB ses has a server reference already */
cifs_put_tcp_session(server, 0);
@@ -2060,28 +2120,35 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
ses->sectype = ctx->sectype;
ses->sign = ctx->sign;
- mutex_lock(&ses->session_mutex);
/* add server as first channel */
spin_lock(&ses->chan_lock);
ses->chans[0].server = server;
ses->chan_count = 1;
ses->chan_max = ctx->multichannel ? ctx->max_channels:1;
+ ses->chans_need_reconnect = 1;
spin_unlock(&ses->chan_lock);
- rc = cifs_negotiate_protocol(xid, ses);
+ mutex_lock(&ses->session_mutex);
+ rc = cifs_negotiate_protocol(xid, ses, server);
if (!rc)
- rc = cifs_setup_session(xid, ses, ctx->local_nls);
+ rc = cifs_setup_session(xid, ses, server, ctx->local_nls);
+ mutex_unlock(&ses->session_mutex);
/* each channel uses a different signing key */
+ spin_lock(&ses->chan_lock);
memcpy(ses->chans[0].signkey, ses->smb3signingkey,
sizeof(ses->smb3signingkey));
+ spin_unlock(&ses->chan_lock);
- mutex_unlock(&ses->session_mutex);
if (rc)
goto get_ses_fail;
- /* success, put it on the list and add it as first channel */
+ /*
+ * success, put it on the list and add it as first channel
+ * note: the session becomes active soon after this. So you'll
+ * need to lock before changing something in the session.
+ */
spin_lock(&cifs_tcp_ses_lock);
list_add(&ses->smb_ses_list, &server->smb_ses_list);
spin_unlock(&cifs_tcp_ses_lock);
@@ -2161,6 +2228,9 @@ cifs_put_tcon(struct cifs_tcon *tcon)
/* tc_count can never go negative */
WARN_ON(tcon->tc_count < 0);
+ list_del_init(&tcon->tcon_list);
+ spin_unlock(&cifs_tcp_ses_lock);
+
if (tcon->use_witness) {
int rc;
@@ -2171,9 +2241,6 @@ cifs_put_tcon(struct cifs_tcon *tcon)
}
}
- list_del_init(&tcon->tcon_list);
- spin_unlock(&cifs_tcp_ses_lock);
-
xid = get_xid();
if (ses->server->ops->tree_disconnect)
ses->server->ops->tree_disconnect(xid, tcon);
@@ -2283,17 +2350,22 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx)
if (ses->server->posix_ext_supported) {
tcon->posix_extensions = true;
pr_warn_once("SMB3.11 POSIX Extensions are experimental\n");
- } else {
+ } else if ((ses->server->vals->protocol_id == SMB311_PROT_ID) ||
+ (strcmp(ses->server->vals->version_string,
+ SMB3ANY_VERSION_STRING) == 0) ||
+ (strcmp(ses->server->vals->version_string,
+ SMBDEFAULT_VERSION_STRING) == 0)) {
cifs_dbg(VFS, "Server does not support mounting with posix SMB3.11 extensions\n");
rc = -EOPNOTSUPP;
goto out_fail;
+ } else {
+ cifs_dbg(VFS, "Check vers= mount option. SMB3.11 "
+ "disabled but required for POSIX extensions\n");
+ rc = -EOPNOTSUPP;
+ goto out_fail;
}
}
- /*
- * BB Do we need to wrap session_mutex around this TCon call and Unix
- * SetFS as we do on SessSetup and reconnect?
- */
xid = get_xid();
rc = ses->server->ops->tree_connect(xid, ses, ctx->UNC, tcon,
ctx->local_nls);
@@ -3029,12 +3101,15 @@ static int mount_get_conns(struct mount_ctx *mnt_ctx)
* for just this mount.
*/
reset_cifs_unix_caps(xid, tcon, cifs_sb, ctx);
+ spin_lock(&cifs_tcp_ses_lock);
if ((tcon->ses->server->tcpStatus == CifsNeedReconnect) &&
(le64_to_cpu(tcon->fsUnixInfo.Capability) &
CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP)) {
+ spin_unlock(&cifs_tcp_ses_lock);
rc = -EACCES;
goto out;
}
+ spin_unlock(&cifs_tcp_ses_lock);
} else
tcon->unix_ext = 0; /* server does not support them */
@@ -3069,7 +3144,8 @@ static int mount_get_conns(struct mount_ctx *mnt_ctx)
* Inside cifs_fscache_get_super_cookie it checks
* that we do not get super cookie twice.
*/
- cifs_fscache_get_super_cookie(tcon);
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE)
+ cifs_fscache_get_super_cookie(tcon);
out:
mnt_ctx->server = server;
@@ -3322,6 +3398,11 @@ static int is_path_remote(struct mount_ctx *mnt_ctx)
rc = server->ops->is_path_accessible(xid, tcon, cifs_sb,
full_path);
+#ifdef CONFIG_CIFS_DFS_UPCALL
+ if (rc == -ENOENT && is_tcon_dfs(tcon))
+ rc = cifs_dfs_query_info_nonascii_quirk(xid, tcon, cifs_sb,
+ full_path);
+#endif
if (rc != 0 && rc != -EREMOTE) {
kfree(full_path);
return rc;
@@ -3709,8 +3790,6 @@ CIFSTCon(const unsigned int xid, struct cifs_ses *ses,
if (rc == 0) {
bool is_unicode;
- tcon->tidStatus = CifsGood;
- tcon->need_reconnect = false;
tcon->tid = smb_buffer_response->Tid;
bcc_ptr = pByteArea(smb_buffer_response);
bytes_left = get_bcc(smb_buffer_response);
@@ -3799,26 +3878,37 @@ cifs_umount(struct cifs_sb_info *cifs_sb)
}
int
-cifs_negotiate_protocol(const unsigned int xid, struct cifs_ses *ses)
+cifs_negotiate_protocol(const unsigned int xid, struct cifs_ses *ses,
+ struct TCP_Server_Info *server)
{
int rc = 0;
- struct TCP_Server_Info *server = cifs_ses_server(ses);
if (!server->ops->need_neg || !server->ops->negotiate)
return -ENOSYS;
/* only send once per connect */
- if (!server->ops->need_neg(server))
+ spin_lock(&cifs_tcp_ses_lock);
+ if (!server->ops->need_neg(server) ||
+ server->tcpStatus != CifsNeedNegotiate) {
+ spin_unlock(&cifs_tcp_ses_lock);
return 0;
+ }
+ server->tcpStatus = CifsInNegotiate;
+ spin_unlock(&cifs_tcp_ses_lock);
- rc = server->ops->negotiate(xid, ses);
+ rc = server->ops->negotiate(xid, ses, server);
if (rc == 0) {
- spin_lock(&GlobalMid_Lock);
- if (server->tcpStatus == CifsNeedNegotiate)
- server->tcpStatus = CifsGood;
+ spin_lock(&cifs_tcp_ses_lock);
+ if (server->tcpStatus == CifsInNegotiate)
+ server->tcpStatus = CifsNeedSessSetup;
else
rc = -EHOSTDOWN;
- spin_unlock(&GlobalMid_Lock);
+ spin_unlock(&cifs_tcp_ses_lock);
+ } else {
+ spin_lock(&cifs_tcp_ses_lock);
+ if (server->tcpStatus == CifsInNegotiate)
+ server->tcpStatus = CifsNeedNegotiate;
+ spin_unlock(&cifs_tcp_ses_lock);
}
return rc;
@@ -3826,12 +3916,26 @@ cifs_negotiate_protocol(const unsigned int xid, struct cifs_ses *ses)
int
cifs_setup_session(const unsigned int xid, struct cifs_ses *ses,
+ struct TCP_Server_Info *server,
struct nls_table *nls_info)
{
int rc = -ENOSYS;
- struct TCP_Server_Info *server = cifs_ses_server(ses);
+ bool is_binding = false;
- if (!ses->binding) {
+ /* only send once per connect */
+ spin_lock(&cifs_tcp_ses_lock);
+ if (server->tcpStatus != CifsNeedSessSetup) {
+ spin_unlock(&cifs_tcp_ses_lock);
+ return 0;
+ }
+ server->tcpStatus = CifsInSessSetup;
+ spin_unlock(&cifs_tcp_ses_lock);
+
+ spin_lock(&ses->chan_lock);
+ is_binding = !CIFS_ALL_CHANS_NEED_RECONNECT(ses);
+ spin_unlock(&ses->chan_lock);
+
+ if (!is_binding) {
ses->capabilities = server->capabilities;
if (!linuxExtEnabled)
ses->capabilities &= (~server->vals->cap_unix);
@@ -3849,10 +3953,26 @@ cifs_setup_session(const unsigned int xid, struct cifs_ses *ses,
server->sec_mode, server->capabilities, server->timeAdj);
if (server->ops->sess_setup)
- rc = server->ops->sess_setup(xid, ses, nls_info);
+ rc = server->ops->sess_setup(xid, ses, server, nls_info);
- if (rc)
+ if (rc) {
cifs_server_dbg(VFS, "Send error in SessSetup = %d\n", rc);
+ spin_lock(&cifs_tcp_ses_lock);
+ if (server->tcpStatus == CifsInSessSetup)
+ server->tcpStatus = CifsNeedSessSetup;
+ spin_unlock(&cifs_tcp_ses_lock);
+ } else {
+ spin_lock(&cifs_tcp_ses_lock);
+ if (server->tcpStatus == CifsInSessSetup)
+ server->tcpStatus = CifsGood;
+ /* Even if one channel is active, session is in good state */
+ ses->status = CifsGood;
+ spin_unlock(&cifs_tcp_ses_lock);
+
+ spin_lock(&ses->chan_lock);
+ cifs_chan_clear_need_reconnect(ses, server);
+ spin_unlock(&ses->chan_lock);
+ }
return rc;
}
@@ -4296,7 +4416,7 @@ static int tree_connect_dfs_target(const unsigned int xid, struct cifs_tcon *tco
*/
if (rc && server->current_fullpath != server->origin_fullpath) {
server->current_fullpath = server->origin_fullpath;
- cifs_ses_mark_for_reconnect(tcon->ses);
+ cifs_reconnect(tcon->ses->server, true);
}
dfs_cache_free_tgts(tl);
@@ -4314,9 +4434,22 @@ int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon, const stru
char *tree;
struct dfs_info3_param ref = {0};
+ /* only send once per connect */
+ spin_lock(&cifs_tcp_ses_lock);
+ if (tcon->ses->status != CifsGood ||
+ (tcon->tidStatus != CifsNew &&
+ tcon->tidStatus != CifsNeedTcon)) {
+ spin_unlock(&cifs_tcp_ses_lock);
+ return 0;
+ }
+ tcon->tidStatus = CifsInTcon;
+ spin_unlock(&cifs_tcp_ses_lock);
+
tree = kzalloc(MAX_TREE_SIZE, GFP_KERNEL);
- if (!tree)
- return -ENOMEM;
+ if (!tree) {
+ rc = -ENOMEM;
+ goto out;
+ }
if (tcon->ipc) {
scnprintf(tree, MAX_TREE_SIZE, "\\\\%s\\IPC$", server->hostname);
@@ -4348,13 +4481,52 @@ out:
kfree(tree);
cifs_put_tcp_super(sb);
+ if (rc) {
+ spin_lock(&cifs_tcp_ses_lock);
+ if (tcon->tidStatus == CifsInTcon)
+ tcon->tidStatus = CifsNeedTcon;
+ spin_unlock(&cifs_tcp_ses_lock);
+ } else {
+ spin_lock(&cifs_tcp_ses_lock);
+ if (tcon->tidStatus == CifsInTcon)
+ tcon->tidStatus = CifsGood;
+ spin_unlock(&cifs_tcp_ses_lock);
+ tcon->need_reconnect = false;
+ }
+
return rc;
}
#else
int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon, const struct nls_table *nlsc)
{
+ int rc;
const struct smb_version_operations *ops = tcon->ses->server->ops;
- return ops->tree_connect(xid, tcon->ses, tcon->treeName, tcon, nlsc);
+ /* only send once per connect */
+ spin_lock(&cifs_tcp_ses_lock);
+ if (tcon->ses->status != CifsGood ||
+ (tcon->tidStatus != CifsNew &&
+ tcon->tidStatus != CifsNeedTcon)) {
+ spin_unlock(&cifs_tcp_ses_lock);
+ return 0;
+ }
+ tcon->tidStatus = CifsInTcon;
+ spin_unlock(&cifs_tcp_ses_lock);
+
+ rc = ops->tree_connect(xid, tcon->ses, tcon->treeName, tcon, nlsc);
+ if (rc) {
+ spin_lock(&cifs_tcp_ses_lock);
+ if (tcon->tidStatus == CifsInTcon)
+ tcon->tidStatus = CifsNeedTcon;
+ spin_unlock(&cifs_tcp_ses_lock);
+ } else {
+ spin_lock(&cifs_tcp_ses_lock);
+ if (tcon->tidStatus == CifsInTcon)
+ tcon->tidStatus = CifsGood;
+ spin_unlock(&cifs_tcp_ses_lock);
+ tcon->need_reconnect = false;
+ }
+
+ return rc;
}
#endif
diff --git a/fs/cifs/dfs_cache.c b/fs/cifs/dfs_cache.c
index e9b0fa2a9614..831f42458bf6 100644
--- a/fs/cifs/dfs_cache.c
+++ b/fs/cifs/dfs_cache.c
@@ -1355,7 +1355,7 @@ static void mark_for_reconnect_if_needed(struct cifs_tcon *tcon, struct dfs_cach
}
cifs_dbg(FYI, "%s: no cached or matched targets. mark dfs share for reconnect.\n", __func__);
- cifs_ses_mark_for_reconnect(tcon->ses);
+ cifs_mark_tcp_ses_conns_for_reconnect(tcon->ses->server, true);
}
/* Refresh dfs referral of tcon and mark it for reconnect if needed */
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index 6e8e7cc26ae2..ce9b22aecfba 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -22,6 +22,7 @@
#include "cifs_unicode.h"
#include "fs_context.h"
#include "cifs_ioctl.h"
+#include "fscache.h"
static void
renew_parental_timestamps(struct dentry *direntry)
@@ -507,8 +508,12 @@ cifs_atomic_open(struct inode *inode, struct dentry *direntry,
server->ops->close(xid, tcon, &fid);
cifs_del_pending_open(&open);
rc = -ENOMEM;
+ goto out;
}
+ fscache_use_cookie(cifs_inode_cookie(file_inode(file)),
+ file->f_mode & FMODE_WRITE);
+
out:
cifs_put_tlink(tlink);
out_free_xid:
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 9fee3af83a73..e7af802dcfa6 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -376,8 +376,6 @@ static void cifsFileInfo_put_final(struct cifsFileInfo *cifs_file)
struct cifsLockInfo *li, *tmp;
struct super_block *sb = inode->i_sb;
- cifs_fscache_release_inode_cookie(inode);
-
/*
* Delete any outstanding lock records. We'll lose them when the file
* is closed anyway.
@@ -570,7 +568,7 @@ int cifs_open(struct inode *inode, struct file *file)
spin_lock(&CIFS_I(inode)->deferred_lock);
cifs_del_deferred_close(cfile);
spin_unlock(&CIFS_I(inode)->deferred_lock);
- goto out;
+ goto use_cache;
} else {
_cifsFileInfo_put(cfile, true, false);
}
@@ -632,8 +630,6 @@ int cifs_open(struct inode *inode, struct file *file)
goto out;
}
- cifs_fscache_set_inode_cookie(inode, file);
-
if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
/*
* Time to set mode which we can not set earlier due to
@@ -652,6 +648,15 @@ int cifs_open(struct inode *inode, struct file *file)
cfile->pid);
}
+use_cache:
+ fscache_use_cookie(cifs_inode_cookie(file_inode(file)),
+ file->f_mode & FMODE_WRITE);
+ if (file->f_flags & O_DIRECT &&
+ (!((file->f_flags & O_ACCMODE) != O_RDONLY) ||
+ file->f_flags & O_APPEND))
+ cifs_invalidate_cache(file_inode(file),
+ FSCACHE_INVAL_DIO_WRITE);
+
out:
free_dentry_path(page);
free_xid(xid);
@@ -876,6 +881,8 @@ int cifs_close(struct inode *inode, struct file *file)
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
struct cifs_deferred_close *dclose;
+ cifs_fscache_unuse_inode_cookie(inode, file->f_mode & FMODE_WRITE);
+
if (file->private_data != NULL) {
cfile = file->private_data;
file->private_data = NULL;
@@ -886,7 +893,6 @@ int cifs_close(struct inode *inode, struct file *file)
dclose) {
if (test_and_clear_bit(CIFS_INO_MODIFIED_ATTR, &cinode->flags)) {
inode->i_ctime = inode->i_mtime = current_time(inode);
- cifs_fscache_update_inode_cookie(inode);
}
spin_lock(&cinode->deferred_lock);
cifs_add_deferred_close(cfile, dclose);
@@ -4198,10 +4204,12 @@ static vm_fault_t
cifs_page_mkwrite(struct vm_fault *vmf)
{
struct page *page = vmf->page;
- struct file *file = vmf->vma->vm_file;
- struct inode *inode = file_inode(file);
- cifs_fscache_wait_on_page_write(inode, page);
+#ifdef CONFIG_CIFS_FSCACHE
+ if (PageFsCache(page) &&
+ wait_on_page_fscache_killable(page) < 0)
+ return VM_FAULT_RETRY;
+#endif
lock_page(page);
return VM_FAULT_LOCKED;
@@ -4261,8 +4269,6 @@ cifs_readv_complete(struct work_struct *work)
for (i = 0; i < rdata->nr_pages; i++) {
struct page *page = rdata->pages[i];
- lru_cache_add(page);
-
if (rdata->result == 0 ||
(rdata->result == -EAGAIN && got_bytes)) {
flush_dcache_page(page);
@@ -4270,13 +4276,11 @@ cifs_readv_complete(struct work_struct *work)
} else
SetPageError(page);
- unlock_page(page);
-
if (rdata->result == 0 ||
(rdata->result == -EAGAIN && got_bytes))
cifs_readpage_to_fscache(rdata->mapping->host, page);
- else
- cifs_fscache_uncache_page(rdata->mapping->host, page);
+
+ unlock_page(page);
got_bytes -= min_t(unsigned int, PAGE_SIZE, got_bytes);
@@ -4334,7 +4338,6 @@ readpages_fill_pages(struct TCP_Server_Info *server,
* fill them until the writes are flushed.
*/
zero_user(page, 0, PAGE_SIZE);
- lru_cache_add(page);
flush_dcache_page(page);
SetPageUptodate(page);
unlock_page(page);
@@ -4344,7 +4347,6 @@ readpages_fill_pages(struct TCP_Server_Info *server,
continue;
} else {
/* no need to hold page hostage */
- lru_cache_add(page);
unlock_page(page);
put_page(page);
rdata->pages[i] = NULL;
@@ -4387,92 +4389,20 @@ cifs_readpages_copy_into_pages(struct TCP_Server_Info *server,
return readpages_fill_pages(server, rdata, iter, iter->count);
}
-static int
-readpages_get_pages(struct address_space *mapping, struct list_head *page_list,
- unsigned int rsize, struct list_head *tmplist,
- unsigned int *nr_pages, loff_t *offset, unsigned int *bytes)
+static void cifs_readahead(struct readahead_control *ractl)
{
- struct page *page, *tpage;
- unsigned int expected_index;
int rc;
- gfp_t gfp = readahead_gfp_mask(mapping);
-
- INIT_LIST_HEAD(tmplist);
-
- page = lru_to_page(page_list);
-
- /*
- * Lock the page and put it in the cache. Since no one else
- * should have access to this page, we're safe to simply set
- * PG_locked without checking it first.
- */
- __SetPageLocked(page);
- rc = add_to_page_cache_locked(page, mapping,
- page->index, gfp);
-
- /* give up if we can't stick it in the cache */
- if (rc) {
- __ClearPageLocked(page);
- return rc;
- }
-
- /* move first page to the tmplist */
- *offset = (loff_t)page->index << PAGE_SHIFT;
- *bytes = PAGE_SIZE;
- *nr_pages = 1;
- list_move_tail(&page->lru, tmplist);
-
- /* now try and add more pages onto the request */
- expected_index = page->index + 1;
- list_for_each_entry_safe_reverse(page, tpage, page_list, lru) {
- /* discontinuity ? */
- if (page->index != expected_index)
- break;
-
- /* would this page push the read over the rsize? */
- if (*bytes + PAGE_SIZE > rsize)
- break;
-
- __SetPageLocked(page);
- rc = add_to_page_cache_locked(page, mapping, page->index, gfp);
- if (rc) {
- __ClearPageLocked(page);
- break;
- }
- list_move_tail(&page->lru, tmplist);
- (*bytes) += PAGE_SIZE;
- expected_index++;
- (*nr_pages)++;
- }
- return rc;
-}
-
-static int cifs_readpages(struct file *file, struct address_space *mapping,
- struct list_head *page_list, unsigned num_pages)
-{
- int rc;
- int err = 0;
- struct list_head tmplist;
- struct cifsFileInfo *open_file = file->private_data;
- struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
+ struct cifsFileInfo *open_file = ractl->file->private_data;
+ struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(ractl->file);
struct TCP_Server_Info *server;
pid_t pid;
- unsigned int xid;
+ unsigned int xid, nr_pages, last_batch_size = 0, cache_nr_pages = 0;
+ pgoff_t next_cached = ULONG_MAX;
+ bool caching = fscache_cookie_enabled(cifs_inode_cookie(ractl->mapping->host)) &&
+ cifs_inode_cookie(ractl->mapping->host)->cache_priv;
+ bool check_cache = caching;
xid = get_xid();
- /*
- * Reads as many pages as possible from fscache. Returns -ENOBUFS
- * immediately if the cookie is negative
- *
- * After this point, every page in the list might have PG_fscache set,
- * so we will need to clean that up off of every page we don't use.
- */
- rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
- &num_pages);
- if (rc == 0) {
- free_xid(xid);
- return rc;
- }
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
pid = open_file->pid;
@@ -4483,39 +4413,73 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
server = cifs_pick_channel(tlink_tcon(open_file->tlink)->ses);
cifs_dbg(FYI, "%s: file=%p mapping=%p num_pages=%u\n",
- __func__, file, mapping, num_pages);
+ __func__, ractl->file, ractl->mapping, readahead_count(ractl));
/*
- * Start with the page at end of list and move it to private
- * list. Do the same with any following pages until we hit
- * the rsize limit, hit an index discontinuity, or run out of
- * pages. Issue the async read and then start the loop again
- * until the list is empty.
- *
- * Note that list order is important. The page_list is in
- * the order of declining indexes. When we put the pages in
- * the rdata->pages, then we want them in increasing order.
+ * Chop the readahead request up into rsize-sized read requests.
*/
- while (!list_empty(page_list) && !err) {
- unsigned int i, nr_pages, bytes, rsize;
- loff_t offset;
- struct page *page, *tpage;
+ while ((nr_pages = readahead_count(ractl) - last_batch_size)) {
+ unsigned int i, got, rsize;
+ struct page *page;
struct cifs_readdata *rdata;
struct cifs_credits credits_on_stack;
struct cifs_credits *credits = &credits_on_stack;
+ pgoff_t index = readahead_index(ractl) + last_batch_size;
+
+ /*
+ * Find out if we have anything cached in the range of
+ * interest, and if so, where the next chunk of cached data is.
+ */
+ if (caching) {
+ if (check_cache) {
+ rc = cifs_fscache_query_occupancy(
+ ractl->mapping->host, index, nr_pages,
+ &next_cached, &cache_nr_pages);
+ if (rc < 0)
+ caching = false;
+ check_cache = false;
+ }
+
+ if (index == next_cached) {
+ /*
+ * TODO: Send a whole batch of pages to be read
+ * by the cache.
+ */
+ page = readahead_page(ractl);
+ last_batch_size = 1 << thp_order(page);
+ if (cifs_readpage_from_fscache(ractl->mapping->host,
+ page) < 0) {
+ /*
+ * TODO: Deal with cache read failure
+ * here, but for the moment, delegate
+ * that to readpage.
+ */
+ caching = false;
+ }
+ unlock_page(page);
+ next_cached++;
+ cache_nr_pages--;
+ if (cache_nr_pages == 0)
+ check_cache = true;
+ continue;
+ }
+ }
if (open_file->invalidHandle) {
rc = cifs_reopen_file(open_file, true);
- if (rc == -EAGAIN)
- continue;
- else if (rc)
+ if (rc) {
+ if (rc == -EAGAIN)
+ continue;
break;
+ }
}
rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->rsize,
&rsize, credits);
if (rc)
break;
+ nr_pages = min_t(size_t, rsize / PAGE_SIZE, readahead_count(ractl));
+ nr_pages = min_t(size_t, nr_pages, next_cached - index);
/*
* Give up immediately if rsize is too small to read an entire
@@ -4523,16 +4487,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
* reach this point however since we set ra_pages to 0 when the
* rsize is smaller than a cache page.
*/
- if (unlikely(rsize < PAGE_SIZE)) {
- add_credits_and_wake_if(server, credits, 0);
- free_xid(xid);
- return 0;
- }
-
- nr_pages = 0;
- err = readpages_get_pages(mapping, page_list, rsize, &tmplist,
- &nr_pages, &offset, &bytes);
- if (!nr_pages) {
+ if (unlikely(!nr_pages)) {
add_credits_and_wake_if(server, credits, 0);
break;
}
@@ -4540,36 +4495,31 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
rdata = cifs_readdata_alloc(nr_pages, cifs_readv_complete);
if (!rdata) {
/* best to give up if we're out of mem */
- list_for_each_entry_safe(page, tpage, &tmplist, lru) {
- list_del(&page->lru);
- lru_cache_add(page);
- unlock_page(page);
- put_page(page);
- }
- rc = -ENOMEM;
add_credits_and_wake_if(server, credits, 0);
break;
}
- rdata->cfile = cifsFileInfo_get(open_file);
- rdata->server = server;
- rdata->mapping = mapping;
- rdata->offset = offset;
- rdata->bytes = bytes;
- rdata->pid = pid;
- rdata->pagesz = PAGE_SIZE;
- rdata->tailsz = PAGE_SIZE;
+ got = __readahead_batch(ractl, rdata->pages, nr_pages);
+ if (got != nr_pages) {
+ pr_warn("__readahead_batch() returned %u/%u\n",
+ got, nr_pages);
+ nr_pages = got;
+ }
+
+ rdata->nr_pages = nr_pages;
+ rdata->bytes = readahead_batch_length(ractl);
+ rdata->cfile = cifsFileInfo_get(open_file);
+ rdata->server = server;
+ rdata->mapping = ractl->mapping;
+ rdata->offset = readahead_pos(ractl);
+ rdata->pid = pid;
+ rdata->pagesz = PAGE_SIZE;
+ rdata->tailsz = PAGE_SIZE;
rdata->read_into_pages = cifs_readpages_read_into_pages;
rdata->copy_into_pages = cifs_readpages_copy_into_pages;
- rdata->credits = credits_on_stack;
-
- list_for_each_entry_safe(page, tpage, &tmplist, lru) {
- list_del(&page->lru);
- rdata->pages[rdata->nr_pages++] = page;
- }
+ rdata->credits = credits_on_stack;
rc = adjust_credits(server, &rdata->credits, rdata->bytes);
-
if (!rc) {
if (rdata->cfile->invalidHandle)
rc = -EAGAIN;
@@ -4581,7 +4531,6 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
add_credits_and_wake_if(server, &rdata->credits, 0);
for (i = 0; i < rdata->nr_pages; i++) {
page = rdata->pages[i];
- lru_cache_add(page);
unlock_page(page);
put_page(page);
}
@@ -4591,15 +4540,10 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
}
kref_put(&rdata->refcount, cifs_readdata_release);
+ last_batch_size = nr_pages;
}
- /* Any pages that have been shown to fscache but didn't get added to
- * the pagecache must be uncached before they get returned to the
- * allocator.
- */
- cifs_fscache_readpages_cancel(mapping->host, page_list);
free_xid(xid);
- return rc;
}
/*
@@ -4801,17 +4745,19 @@ static int cifs_release_page(struct page *page, gfp_t gfp)
{
if (PagePrivate(page))
return 0;
-
- return cifs_fscache_release_page(page, gfp);
+ if (PageFsCache(page)) {
+ if (current_is_kswapd() || !(gfp & __GFP_FS))
+ return false;
+ wait_on_page_fscache(page);
+ }
+ fscache_note_page_release(cifs_inode_cookie(page->mapping->host));
+ return true;
}
static void cifs_invalidate_page(struct page *page, unsigned int offset,
unsigned int length)
{
- struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
-
- if (offset == 0 && length == PAGE_SIZE)
- cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
+ wait_on_page_fscache(page);
}
static int cifs_launder_page(struct page *page)
@@ -4831,7 +4777,7 @@ static int cifs_launder_page(struct page *page)
if (clear_page_dirty_for_io(page))
rc = cifs_writepage_locked(page, &wbc);
- cifs_fscache_invalidate_page(page, page->mapping->host);
+ wait_on_page_fscache(page);
return rc;
}
@@ -4921,7 +4867,7 @@ oplock_break_done:
* In the non-cached mode (mount with cache=none), we shunt off direct read and write requests
* so this method should never be called.
*
- * Direct IO is not yet supported in the cached mode.
+ * Direct IO is not yet supported in the cached mode.
*/
static ssize_t
cifs_direct_io(struct kiocb *iocb, struct iov_iter *iter)
@@ -4988,14 +4934,27 @@ static void cifs_swap_deactivate(struct file *file)
/* do we need to unpin (or unlock) the file */
}
+/*
+ * Mark a page as having been made dirty and thus needing writeback. We also
+ * need to pin the cache object to write back to.
+ */
+#ifdef CONFIG_CIFS_FSCACHE
+static int cifs_set_page_dirty(struct page *page)
+{
+ return fscache_set_page_dirty(page, cifs_inode_cookie(page->mapping->host));
+}
+#else
+#define cifs_set_page_dirty __set_page_dirty_nobuffers
+#endif
+
const struct address_space_operations cifs_addr_ops = {
.readpage = cifs_readpage,
- .readpages = cifs_readpages,
+ .readahead = cifs_readahead,
.writepage = cifs_writepage,
.writepages = cifs_writepages,
.write_begin = cifs_write_begin,
.write_end = cifs_write_end,
- .set_page_dirty = __set_page_dirty_nobuffers,
+ .set_page_dirty = cifs_set_page_dirty,
.releasepage = cifs_release_page,
.direct_IO = cifs_direct_io,
.invalidatepage = cifs_invalidate_page,
@@ -5020,7 +4979,7 @@ const struct address_space_operations cifs_addr_ops_smallbuf = {
.writepages = cifs_writepages,
.write_begin = cifs_write_begin,
.write_end = cifs_write_end,
- .set_page_dirty = __set_page_dirty_nobuffers,
+ .set_page_dirty = cifs_set_page_dirty,
.releasepage = cifs_release_page,
.invalidatepage = cifs_invalidate_page,
.launder_page = cifs_launder_page,
diff --git a/fs/cifs/fs_context.c b/fs/cifs/fs_context.c
index e3ed25dc6f3f..a92e9eec521f 100644
--- a/fs/cifs/fs_context.c
+++ b/fs/cifs/fs_context.c
@@ -37,6 +37,8 @@
#include "rfc1002pdu.h"
#include "fs_context.h"
+static DEFINE_MUTEX(cifs_mount_mutex);
+
static const match_table_t cifs_smb_version_tokens = {
{ Smb_1, SMB1_VERSION_STRING },
{ Smb_20, SMB20_VERSION_STRING},
@@ -147,7 +149,7 @@ const struct fs_parameter_spec smb3_fs_parameters[] = {
fsparam_u32("echo_interval", Opt_echo_interval),
fsparam_u32("max_credits", Opt_max_credits),
fsparam_u32("handletimeout", Opt_handletimeout),
- fsparam_u32("snapshot", Opt_snapshot),
+ fsparam_u64("snapshot", Opt_snapshot),
fsparam_u32("max_channels", Opt_max_channels),
/* Mount options which take string value */
@@ -707,10 +709,14 @@ static int smb3_get_tree_common(struct fs_context *fc)
static int smb3_get_tree(struct fs_context *fc)
{
int err = smb3_fs_context_validate(fc);
+ int ret;
if (err)
return err;
- return smb3_get_tree_common(fc);
+ mutex_lock(&cifs_mount_mutex);
+ ret = smb3_get_tree_common(fc);
+ mutex_unlock(&cifs_mount_mutex);
+ return ret;
}
static void smb3_fs_context_free(struct fs_context *fc)
@@ -1072,7 +1078,7 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
ctx->echo_interval = result.uint_32;
break;
case Opt_snapshot:
- ctx->snapshot_time = result.uint_32;
+ ctx->snapshot_time = result.uint_64;
break;
case Opt_max_credits:
if (result.uint_32 < 20 || result.uint_32 > 60000) {
diff --git a/fs/cifs/fscache.c b/fs/cifs/fscache.c
index 003c5f1f4dfb..33af72e0ac0c 100644
--- a/fs/cifs/fscache.c
+++ b/fs/cifs/fscache.c
@@ -12,332 +12,249 @@
#include "cifs_fs_sb.h"
#include "cifsproto.h"
-/*
- * Key layout of CIFS server cache index object
- */
-struct cifs_server_key {
- __u64 conn_id;
-} __packed;
-
-/*
- * Get a cookie for a server object keyed by {IPaddress,port,family} tuple
- */
-void cifs_fscache_get_client_cookie(struct TCP_Server_Info *server)
-{
- struct cifs_server_key key;
-
- /*
- * Check if cookie was already initialized so don't reinitialize it.
- * In the future, as we integrate with newer fscache features,
- * we may want to instead add a check if cookie has changed
- */
- if (server->fscache)
- return;
-
- memset(&key, 0, sizeof(key));
- key.conn_id = server->conn_id;
-
- server->fscache =
- fscache_acquire_cookie(cifs_fscache_netfs.primary_index,
- &cifs_fscache_server_index_def,
- &key, sizeof(key),
- NULL, 0,
- server, 0, true);
- cifs_dbg(FYI, "%s: (0x%p/0x%p)\n",
- __func__, server, server->fscache);
-}
-
-void cifs_fscache_release_client_cookie(struct TCP_Server_Info *server)
+static void cifs_fscache_fill_volume_coherency(
+ struct cifs_tcon *tcon,
+ struct cifs_fscache_volume_coherency_data *cd)
{
- cifs_dbg(FYI, "%s: (0x%p/0x%p)\n",
- __func__, server, server->fscache);
- fscache_relinquish_cookie(server->fscache, NULL, false);
- server->fscache = NULL;
+ memset(cd, 0, sizeof(*cd));
+ cd->resource_id = cpu_to_le64(tcon->resource_id);
+ cd->vol_create_time = tcon->vol_create_time;
+ cd->vol_serial_number = cpu_to_le32(tcon->vol_serial_number);
}
-void cifs_fscache_get_super_cookie(struct cifs_tcon *tcon)
+int cifs_fscache_get_super_cookie(struct cifs_tcon *tcon)
{
+ struct cifs_fscache_volume_coherency_data cd;
struct TCP_Server_Info *server = tcon->ses->server;
+ struct fscache_volume *vcookie;
+ const struct sockaddr *sa = (struct sockaddr *)&server->dstaddr;
+ size_t slen, i;
char *sharename;
- struct cifs_fscache_super_auxdata auxdata;
+ char *key;
+ int ret = -ENOMEM;
+
+ tcon->fscache = NULL;
+ switch (sa->sa_family) {
+ case AF_INET:
+ case AF_INET6:
+ break;
+ default:
+ cifs_dbg(VFS, "Unknown network family '%d'\n", sa->sa_family);
+ return -EINVAL;
+ }
- /*
- * Check if cookie was already initialized so don't reinitialize it.
- * In the future, as we integrate with newer fscache features,
- * we may want to instead add a check if cookie has changed
- */
- if (tcon->fscache)
- return;
+ memset(&key, 0, sizeof(key));
sharename = extract_sharename(tcon->treeName);
if (IS_ERR(sharename)) {
cifs_dbg(FYI, "%s: couldn't extract sharename\n", __func__);
- tcon->fscache = NULL;
- return;
+ return -EINVAL;
+ }
+
+ slen = strlen(sharename);
+ for (i = 0; i < slen; i++)
+ if (sharename[i] == '/')
+ sharename[i] = ';';
+
+ key = kasprintf(GFP_KERNEL, "cifs,%pISpc,%s", sa, sharename);
+ if (!key)
+ goto out;
+
+ cifs_fscache_fill_volume_coherency(tcon, &cd);
+ vcookie = fscache_acquire_volume(key,
+ NULL, /* preferred_cache */
+ &cd, sizeof(cd));
+ cifs_dbg(FYI, "%s: (%s/0x%p)\n", __func__, key, vcookie);
+ if (IS_ERR(vcookie)) {
+ if (vcookie != ERR_PTR(-EBUSY)) {
+ ret = PTR_ERR(vcookie);
+ goto out_2;
+ }
+ pr_err("Cache volume key already in use (%s)\n", key);
+ vcookie = NULL;
}
- memset(&auxdata, 0, sizeof(auxdata));
- auxdata.resource_id = tcon->resource_id;
- auxdata.vol_create_time = tcon->vol_create_time;
- auxdata.vol_serial_number = tcon->vol_serial_number;
-
- tcon->fscache =
- fscache_acquire_cookie(server->fscache,
- &cifs_fscache_super_index_def,
- sharename, strlen(sharename),
- &auxdata, sizeof(auxdata),
- tcon, 0, true);
+ tcon->fscache = vcookie;
+ ret = 0;
+out_2:
+ kfree(key);
+out:
kfree(sharename);
- cifs_dbg(FYI, "%s: (0x%p/0x%p)\n",
- __func__, server->fscache, tcon->fscache);
+ return ret;
}
void cifs_fscache_release_super_cookie(struct cifs_tcon *tcon)
{
- struct cifs_fscache_super_auxdata auxdata;
-
- memset(&auxdata, 0, sizeof(auxdata));
- auxdata.resource_id = tcon->resource_id;
- auxdata.vol_create_time = tcon->vol_create_time;
- auxdata.vol_serial_number = tcon->vol_serial_number;
+ struct cifs_fscache_volume_coherency_data cd;
cifs_dbg(FYI, "%s: (0x%p)\n", __func__, tcon->fscache);
- fscache_relinquish_cookie(tcon->fscache, &auxdata, false);
- tcon->fscache = NULL;
-}
-
-static void cifs_fscache_acquire_inode_cookie(struct cifsInodeInfo *cifsi,
- struct cifs_tcon *tcon)
-{
- struct cifs_fscache_inode_auxdata auxdata;
- memset(&auxdata, 0, sizeof(auxdata));
- auxdata.eof = cifsi->server_eof;
- auxdata.last_write_time_sec = cifsi->vfs_inode.i_mtime.tv_sec;
- auxdata.last_change_time_sec = cifsi->vfs_inode.i_ctime.tv_sec;
- auxdata.last_write_time_nsec = cifsi->vfs_inode.i_mtime.tv_nsec;
- auxdata.last_change_time_nsec = cifsi->vfs_inode.i_ctime.tv_nsec;
-
- cifsi->fscache =
- fscache_acquire_cookie(tcon->fscache,
- &cifs_fscache_inode_object_def,
- &cifsi->uniqueid, sizeof(cifsi->uniqueid),
- &auxdata, sizeof(auxdata),
- cifsi, cifsi->vfs_inode.i_size, true);
+ cifs_fscache_fill_volume_coherency(tcon, &cd);
+ fscache_relinquish_volume(tcon->fscache, &cd, false);
+ tcon->fscache = NULL;
}
-static void cifs_fscache_enable_inode_cookie(struct inode *inode)
+void cifs_fscache_get_inode_cookie(struct inode *inode)
{
+ struct cifs_fscache_inode_coherency_data cd;
struct cifsInodeInfo *cifsi = CIFS_I(inode);
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
- if (cifsi->fscache)
- return;
-
- if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE))
- return;
-
- cifs_fscache_acquire_inode_cookie(cifsi, tcon);
+ cifs_fscache_fill_coherency(&cifsi->vfs_inode, &cd);
- cifs_dbg(FYI, "%s: got FH cookie (0x%p/0x%p)\n",
- __func__, tcon->fscache, cifsi->fscache);
+ cifsi->fscache =
+ fscache_acquire_cookie(tcon->fscache, 0,
+ &cifsi->uniqueid, sizeof(cifsi->uniqueid),
+ &cd, sizeof(cd),
+ i_size_read(&cifsi->vfs_inode));
}
-void cifs_fscache_release_inode_cookie(struct inode *inode)
+void cifs_fscache_unuse_inode_cookie(struct inode *inode, bool update)
{
- struct cifs_fscache_inode_auxdata auxdata;
- struct cifsInodeInfo *cifsi = CIFS_I(inode);
-
- if (cifsi->fscache) {
- memset(&auxdata, 0, sizeof(auxdata));
- auxdata.eof = cifsi->server_eof;
- auxdata.last_write_time_sec = cifsi->vfs_inode.i_mtime.tv_sec;
- auxdata.last_change_time_sec = cifsi->vfs_inode.i_ctime.tv_sec;
- auxdata.last_write_time_nsec = cifsi->vfs_inode.i_mtime.tv_nsec;
- auxdata.last_change_time_nsec = cifsi->vfs_inode.i_ctime.tv_nsec;
-
- cifs_dbg(FYI, "%s: (0x%p)\n", __func__, cifsi->fscache);
- /* fscache_relinquish_cookie does not seem to update auxdata */
- fscache_update_cookie(cifsi->fscache, &auxdata);
- fscache_relinquish_cookie(cifsi->fscache, &auxdata, false);
- cifsi->fscache = NULL;
+ if (update) {
+ struct cifs_fscache_inode_coherency_data cd;
+ loff_t i_size = i_size_read(inode);
+
+ cifs_fscache_fill_coherency(inode, &cd);
+ fscache_unuse_cookie(cifs_inode_cookie(inode), &cd, &i_size);
+ } else {
+ fscache_unuse_cookie(cifs_inode_cookie(inode), NULL, NULL);
}
}
-void cifs_fscache_update_inode_cookie(struct inode *inode)
+void cifs_fscache_release_inode_cookie(struct inode *inode)
{
- struct cifs_fscache_inode_auxdata auxdata;
struct cifsInodeInfo *cifsi = CIFS_I(inode);
if (cifsi->fscache) {
- memset(&auxdata, 0, sizeof(auxdata));
- auxdata.eof = cifsi->server_eof;
- auxdata.last_write_time_sec = cifsi->vfs_inode.i_mtime.tv_sec;
- auxdata.last_change_time_sec = cifsi->vfs_inode.i_ctime.tv_sec;
- auxdata.last_write_time_nsec = cifsi->vfs_inode.i_mtime.tv_nsec;
- auxdata.last_change_time_nsec = cifsi->vfs_inode.i_ctime.tv_nsec;
-
cifs_dbg(FYI, "%s: (0x%p)\n", __func__, cifsi->fscache);
- fscache_update_cookie(cifsi->fscache, &auxdata);
- }
-}
-
-void cifs_fscache_set_inode_cookie(struct inode *inode, struct file *filp)
-{
- cifs_fscache_enable_inode_cookie(inode);
-}
-
-void cifs_fscache_reset_inode_cookie(struct inode *inode)
-{
- struct cifsInodeInfo *cifsi = CIFS_I(inode);
- struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
- struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
- struct fscache_cookie *old = cifsi->fscache;
-
- if (cifsi->fscache) {
- /* retire the current fscache cache and get a new one */
- fscache_relinquish_cookie(cifsi->fscache, NULL, true);
-
- cifs_fscache_acquire_inode_cookie(cifsi, tcon);
- cifs_dbg(FYI, "%s: new cookie 0x%p oldcookie 0x%p\n",
- __func__, cifsi->fscache, old);
+ fscache_relinquish_cookie(cifsi->fscache, false);
+ cifsi->fscache = NULL;
}
}
-int cifs_fscache_release_page(struct page *page, gfp_t gfp)
+static inline void fscache_end_operation(struct netfs_cache_resources *cres)
{
- if (PageFsCache(page)) {
- struct inode *inode = page->mapping->host;
- struct cifsInodeInfo *cifsi = CIFS_I(inode);
-
- cifs_dbg(FYI, "%s: (0x%p/0x%p)\n",
- __func__, page, cifsi->fscache);
- if (!fscache_maybe_release_page(cifsi->fscache, page, gfp))
- return 0;
- }
+ const struct netfs_cache_ops *ops = fscache_operation_valid(cres);
- return 1;
-}
-
-static void cifs_readpage_from_fscache_complete(struct page *page, void *ctx,
- int error)
-{
- cifs_dbg(FYI, "%s: (0x%p/%d)\n", __func__, page, error);
- if (!error)
- SetPageUptodate(page);
- unlock_page(page);
+ if (ops)
+ ops->end_operation(cres);
}
/*
- * Retrieve a page from FS-Cache
+ * Fallback page reading interface.
*/
-int __cifs_readpage_from_fscache(struct inode *inode, struct page *page)
+static int fscache_fallback_read_page(struct inode *inode, struct page *page)
{
+ struct netfs_cache_resources cres;
+ struct fscache_cookie *cookie = cifs_inode_cookie(inode);
+ struct iov_iter iter;
+ struct bio_vec bvec[1];
int ret;
- cifs_dbg(FYI, "%s: (fsc:%p, p:%p, i:0x%p\n",
- __func__, CIFS_I(inode)->fscache, page, inode);
- ret = fscache_read_or_alloc_page(CIFS_I(inode)->fscache, page,
- cifs_readpage_from_fscache_complete,
- NULL,
- GFP_KERNEL);
- switch (ret) {
-
- case 0: /* page found in fscache, read submitted */
- cifs_dbg(FYI, "%s: submitted\n", __func__);
+ memset(&cres, 0, sizeof(cres));
+ bvec[0].bv_page = page;
+ bvec[0].bv_offset = 0;
+ bvec[0].bv_len = PAGE_SIZE;
+ iov_iter_bvec(&iter, READ, bvec, ARRAY_SIZE(bvec), PAGE_SIZE);
+
+ ret = fscache_begin_read_operation(&cres, cookie);
+ if (ret < 0)
return ret;
- case -ENOBUFS: /* page won't be cached */
- case -ENODATA: /* page not in cache */
- cifs_dbg(FYI, "%s: %d\n", __func__, ret);
- return 1;
- default:
- cifs_dbg(VFS, "unknown error ret = %d\n", ret);
- }
+ ret = fscache_read(&cres, page_offset(page), &iter, NETFS_READ_HOLE_FAIL,
+ NULL, NULL);
+ fscache_end_operation(&cres);
return ret;
}
/*
- * Retrieve a set of pages from FS-Cache
+ * Fallback page writing interface.
*/
-int __cifs_readpages_from_fscache(struct inode *inode,
- struct address_space *mapping,
- struct list_head *pages,
- unsigned *nr_pages)
+static int fscache_fallback_write_page(struct inode *inode, struct page *page,
+ bool no_space_allocated_yet)
{
+ struct netfs_cache_resources cres;
+ struct fscache_cookie *cookie = cifs_inode_cookie(inode);
+ struct iov_iter iter;
+ struct bio_vec bvec[1];
+ loff_t start = page_offset(page);
+ size_t len = PAGE_SIZE;
int ret;
- cifs_dbg(FYI, "%s: (0x%p/%u/0x%p)\n",
- __func__, CIFS_I(inode)->fscache, *nr_pages, inode);
- ret = fscache_read_or_alloc_pages(CIFS_I(inode)->fscache, mapping,
- pages, nr_pages,
- cifs_readpage_from_fscache_complete,
- NULL,
- mapping_gfp_mask(mapping));
- switch (ret) {
- case 0: /* read submitted to the cache for all pages */
- cifs_dbg(FYI, "%s: submitted\n", __func__);
- return ret;
-
- case -ENOBUFS: /* some pages are not cached and can't be */
- case -ENODATA: /* some pages are not cached */
- cifs_dbg(FYI, "%s: no page\n", __func__);
- return 1;
+ memset(&cres, 0, sizeof(cres));
+ bvec[0].bv_page = page;
+ bvec[0].bv_offset = 0;
+ bvec[0].bv_len = PAGE_SIZE;
+ iov_iter_bvec(&iter, WRITE, bvec, ARRAY_SIZE(bvec), PAGE_SIZE);
- default:
- cifs_dbg(FYI, "unknown error ret = %d\n", ret);
- }
+ ret = fscache_begin_write_operation(&cres, cookie);
+ if (ret < 0)
+ return ret;
+ ret = cres.ops->prepare_write(&cres, &start, &len, i_size_read(inode),
+ no_space_allocated_yet);
+ if (ret == 0)
+ ret = fscache_write(&cres, page_offset(page), &iter, NULL, NULL);
+ fscache_end_operation(&cres);
return ret;
}
-void __cifs_readpage_to_fscache(struct inode *inode, struct page *page)
+/*
+ * Retrieve a page from FS-Cache
+ */
+int __cifs_readpage_from_fscache(struct inode *inode, struct page *page)
{
- struct cifsInodeInfo *cifsi = CIFS_I(inode);
int ret;
- WARN_ON(!cifsi->fscache);
+ cifs_dbg(FYI, "%s: (fsc:%p, p:%p, i:0x%p\n",
+ __func__, cifs_inode_cookie(inode), page, inode);
- cifs_dbg(FYI, "%s: (fsc: %p, p: %p, i: %p)\n",
- __func__, cifsi->fscache, page, inode);
- ret = fscache_write_page(cifsi->fscache, page,
- cifsi->vfs_inode.i_size, GFP_KERNEL);
- if (ret != 0)
- fscache_uncache_page(cifsi->fscache, page);
-}
+ ret = fscache_fallback_read_page(inode, page);
+ if (ret < 0)
+ return ret;
-void __cifs_fscache_readpages_cancel(struct inode *inode, struct list_head *pages)
-{
- cifs_dbg(FYI, "%s: (fsc: %p, i: %p)\n",
- __func__, CIFS_I(inode)->fscache, inode);
- fscache_readpages_cancel(CIFS_I(inode)->fscache, pages);
+ /* Read completed synchronously */
+ SetPageUptodate(page);
+ return 0;
}
-void __cifs_fscache_invalidate_page(struct page *page, struct inode *inode)
+void __cifs_readpage_to_fscache(struct inode *inode, struct page *page)
{
- struct cifsInodeInfo *cifsi = CIFS_I(inode);
- struct fscache_cookie *cookie = cifsi->fscache;
+ cifs_dbg(FYI, "%s: (fsc: %p, p: %p, i: %p)\n",
+ __func__, cifs_inode_cookie(inode), page, inode);
- cifs_dbg(FYI, "%s: (0x%p/0x%p)\n", __func__, page, cookie);
- fscache_wait_on_page_write(cookie, page);
- fscache_uncache_page(cookie, page);
+ fscache_fallback_write_page(inode, page, true);
}
-void __cifs_fscache_wait_on_page_write(struct inode *inode, struct page *page)
+/*
+ * Query the cache occupancy.
+ */
+int __cifs_fscache_query_occupancy(struct inode *inode,
+ pgoff_t first, unsigned int nr_pages,
+ pgoff_t *_data_first,
+ unsigned int *_data_nr_pages)
{
- struct cifsInodeInfo *cifsi = CIFS_I(inode);
- struct fscache_cookie *cookie = cifsi->fscache;
+ struct netfs_cache_resources cres;
+ struct fscache_cookie *cookie = cifs_inode_cookie(inode);
+ loff_t start, data_start;
+ size_t len, data_len;
+ int ret;
- cifs_dbg(FYI, "%s: (0x%p/0x%p)\n", __func__, page, cookie);
- fscache_wait_on_page_write(cookie, page);
-}
+ ret = fscache_begin_read_operation(&cres, cookie);
+ if (ret < 0)
+ return ret;
-void __cifs_fscache_uncache_page(struct inode *inode, struct page *page)
-{
- struct cifsInodeInfo *cifsi = CIFS_I(inode);
- struct fscache_cookie *cookie = cifsi->fscache;
+ start = first * PAGE_SIZE;
+ len = nr_pages * PAGE_SIZE;
+ ret = cres.ops->query_occupancy(&cres, start, len, PAGE_SIZE,
+ &data_start, &data_len);
+ if (ret == 0) {
+ *_data_first = data_start / PAGE_SIZE;
+ *_data_nr_pages = len / PAGE_SIZE;
+ }
- cifs_dbg(FYI, "%s: (0x%p/0x%p)\n", __func__, page, cookie);
- fscache_uncache_page(cookie, page);
+ fscache_end_operation(&cres);
+ return ret;
}
diff --git a/fs/cifs/fscache.h b/fs/cifs/fscache.h
index 9baa1d0f22bd..55129908e2c1 100644
--- a/fs/cifs/fscache.h
+++ b/fs/cifs/fscache.h
@@ -9,173 +9,154 @@
#ifndef _CIFS_FSCACHE_H
#define _CIFS_FSCACHE_H
+#include <linux/swap.h>
#include <linux/fscache.h>
#include "cifsglob.h"
-#ifdef CONFIG_CIFS_FSCACHE
-
/*
- * Auxiliary data attached to CIFS superblock within the cache
+ * Coherency data attached to CIFS volume within the cache
*/
-struct cifs_fscache_super_auxdata {
- u64 resource_id; /* unique server resource id */
+struct cifs_fscache_volume_coherency_data {
+ __le64 resource_id; /* unique server resource id */
__le64 vol_create_time;
- u32 vol_serial_number;
+ __le32 vol_serial_number;
} __packed;
/*
- * Auxiliary data attached to CIFS inode within the cache
+ * Coherency data attached to CIFS inode within the cache.
*/
-struct cifs_fscache_inode_auxdata {
- u64 last_write_time_sec;
- u64 last_change_time_sec;
- u32 last_write_time_nsec;
- u32 last_change_time_nsec;
- u64 eof;
+struct cifs_fscache_inode_coherency_data {
+ __le64 last_write_time_sec;
+ __le64 last_change_time_sec;
+ __le32 last_write_time_nsec;
+ __le32 last_change_time_nsec;
};
-/*
- * cache.c
- */
-extern struct fscache_netfs cifs_fscache_netfs;
-extern const struct fscache_cookie_def cifs_fscache_server_index_def;
-extern const struct fscache_cookie_def cifs_fscache_super_index_def;
-extern const struct fscache_cookie_def cifs_fscache_inode_object_def;
-
-extern int cifs_fscache_register(void);
-extern void cifs_fscache_unregister(void);
+#ifdef CONFIG_CIFS_FSCACHE
/*
* fscache.c
*/
-extern void cifs_fscache_get_client_cookie(struct TCP_Server_Info *);
-extern void cifs_fscache_release_client_cookie(struct TCP_Server_Info *);
-extern void cifs_fscache_get_super_cookie(struct cifs_tcon *);
+extern int cifs_fscache_get_super_cookie(struct cifs_tcon *);
extern void cifs_fscache_release_super_cookie(struct cifs_tcon *);
+extern void cifs_fscache_get_inode_cookie(struct inode *inode);
extern void cifs_fscache_release_inode_cookie(struct inode *);
-extern void cifs_fscache_update_inode_cookie(struct inode *inode);
-extern void cifs_fscache_set_inode_cookie(struct inode *, struct file *);
-extern void cifs_fscache_reset_inode_cookie(struct inode *);
-
-extern void __cifs_fscache_invalidate_page(struct page *, struct inode *);
-extern void __cifs_fscache_wait_on_page_write(struct inode *inode, struct page *page);
-extern void __cifs_fscache_uncache_page(struct inode *inode, struct page *page);
-extern int cifs_fscache_release_page(struct page *page, gfp_t gfp);
-extern int __cifs_readpage_from_fscache(struct inode *, struct page *);
-extern int __cifs_readpages_from_fscache(struct inode *,
- struct address_space *,
- struct list_head *,
- unsigned *);
-extern void __cifs_fscache_readpages_cancel(struct inode *, struct list_head *);
-
-extern void __cifs_readpage_to_fscache(struct inode *, struct page *);
-
-static inline void cifs_fscache_invalidate_page(struct page *page,
- struct inode *inode)
+extern void cifs_fscache_unuse_inode_cookie(struct inode *inode, bool update);
+
+static inline
+void cifs_fscache_fill_coherency(struct inode *inode,
+ struct cifs_fscache_inode_coherency_data *cd)
{
- if (PageFsCache(page))
- __cifs_fscache_invalidate_page(page, inode);
+ struct cifsInodeInfo *cifsi = CIFS_I(inode);
+
+ memset(cd, 0, sizeof(*cd));
+ cd->last_write_time_sec = cpu_to_le64(cifsi->vfs_inode.i_mtime.tv_sec);
+ cd->last_write_time_nsec = cpu_to_le32(cifsi->vfs_inode.i_mtime.tv_nsec);
+ cd->last_change_time_sec = cpu_to_le64(cifsi->vfs_inode.i_ctime.tv_sec);
+ cd->last_change_time_nsec = cpu_to_le32(cifsi->vfs_inode.i_ctime.tv_nsec);
}
-static inline void cifs_fscache_wait_on_page_write(struct inode *inode,
- struct page *page)
+
+static inline struct fscache_cookie *cifs_inode_cookie(struct inode *inode)
{
- if (PageFsCache(page))
- __cifs_fscache_wait_on_page_write(inode, page);
+ return CIFS_I(inode)->fscache;
}
-static inline void cifs_fscache_uncache_page(struct inode *inode,
- struct page *page)
+static inline void cifs_invalidate_cache(struct inode *inode, unsigned int flags)
{
- if (PageFsCache(page))
- __cifs_fscache_uncache_page(inode, page);
+ struct cifs_fscache_inode_coherency_data cd;
+
+ cifs_fscache_fill_coherency(inode, &cd);
+ fscache_invalidate(cifs_inode_cookie(inode), &cd,
+ i_size_read(inode), flags);
}
-static inline int cifs_readpage_from_fscache(struct inode *inode,
- struct page *page)
-{
- if (CIFS_I(inode)->fscache)
- return __cifs_readpage_from_fscache(inode, page);
+extern int __cifs_fscache_query_occupancy(struct inode *inode,
+ pgoff_t first, unsigned int nr_pages,
+ pgoff_t *_data_first,
+ unsigned int *_data_nr_pages);
- return -ENOBUFS;
+static inline int cifs_fscache_query_occupancy(struct inode *inode,
+ pgoff_t first, unsigned int nr_pages,
+ pgoff_t *_data_first,
+ unsigned int *_data_nr_pages)
+{
+ if (!cifs_inode_cookie(inode))
+ return -ENOBUFS;
+ return __cifs_fscache_query_occupancy(inode, first, nr_pages,
+ _data_first, _data_nr_pages);
}
-static inline int cifs_readpages_from_fscache(struct inode *inode,
- struct address_space *mapping,
- struct list_head *pages,
- unsigned *nr_pages)
+extern int __cifs_readpage_from_fscache(struct inode *pinode, struct page *ppage);
+extern void __cifs_readpage_to_fscache(struct inode *pinode, struct page *ppage);
+
+
+static inline int cifs_readpage_from_fscache(struct inode *inode,
+ struct page *page)
{
- if (CIFS_I(inode)->fscache)
- return __cifs_readpages_from_fscache(inode, mapping, pages,
- nr_pages);
+ if (cifs_inode_cookie(inode))
+ return __cifs_readpage_from_fscache(inode, page);
return -ENOBUFS;
}
static inline void cifs_readpage_to_fscache(struct inode *inode,
struct page *page)
{
- if (PageFsCache(page))
+ if (cifs_inode_cookie(inode))
__cifs_readpage_to_fscache(inode, page);
}
-static inline void cifs_fscache_readpages_cancel(struct inode *inode,
- struct list_head *pages)
+static inline int cifs_fscache_release_page(struct page *page, gfp_t gfp)
{
- if (CIFS_I(inode)->fscache)
- return __cifs_fscache_readpages_cancel(inode, pages);
+ if (PageFsCache(page)) {
+ if (current_is_kswapd() || !(gfp & __GFP_FS))
+ return false;
+ wait_on_page_fscache(page);
+ fscache_note_page_release(cifs_inode_cookie(page->mapping->host));
+ }
+ return true;
}
#else /* CONFIG_CIFS_FSCACHE */
-static inline int cifs_fscache_register(void) { return 0; }
-static inline void cifs_fscache_unregister(void) {}
-
-static inline void
-cifs_fscache_get_client_cookie(struct TCP_Server_Info *server) {}
-static inline void
-cifs_fscache_release_client_cookie(struct TCP_Server_Info *server) {}
-static inline void cifs_fscache_get_super_cookie(struct cifs_tcon *tcon) {}
-static inline void
-cifs_fscache_release_super_cookie(struct cifs_tcon *tcon) {}
-
-static inline void cifs_fscache_release_inode_cookie(struct inode *inode) {}
-static inline void cifs_fscache_update_inode_cookie(struct inode *inode) {}
-static inline void cifs_fscache_set_inode_cookie(struct inode *inode,
- struct file *filp) {}
-static inline void cifs_fscache_reset_inode_cookie(struct inode *inode) {}
-static inline int cifs_fscache_release_page(struct page *page, gfp_t gfp)
+static inline
+void cifs_fscache_fill_coherency(struct inode *inode,
+ struct cifs_fscache_inode_coherency_data *cd)
{
- return 1; /* May release page */
}
-static inline void cifs_fscache_invalidate_page(struct page *page,
- struct inode *inode) {}
-static inline void cifs_fscache_wait_on_page_write(struct inode *inode,
- struct page *page) {}
-static inline void cifs_fscache_uncache_page(struct inode *inode,
- struct page *page) {}
+static inline int cifs_fscache_get_super_cookie(struct cifs_tcon *tcon) { return 0; }
+static inline void cifs_fscache_release_super_cookie(struct cifs_tcon *tcon) {}
-static inline int
-cifs_readpage_from_fscache(struct inode *inode, struct page *page)
+static inline void cifs_fscache_get_inode_cookie(struct inode *inode) {}
+static inline void cifs_fscache_release_inode_cookie(struct inode *inode) {}
+static inline void cifs_fscache_unuse_inode_cookie(struct inode *inode, bool update) {}
+static inline struct fscache_cookie *cifs_inode_cookie(struct inode *inode) { return NULL; }
+static inline void cifs_invalidate_cache(struct inode *inode, unsigned int flags) {}
+
+static inline int cifs_fscache_query_occupancy(struct inode *inode,
+ pgoff_t first, unsigned int nr_pages,
+ pgoff_t *_data_first,
+ unsigned int *_data_nr_pages)
{
+ *_data_first = ULONG_MAX;
+ *_data_nr_pages = 0;
return -ENOBUFS;
}
-static inline int cifs_readpages_from_fscache(struct inode *inode,
- struct address_space *mapping,
- struct list_head *pages,
- unsigned *nr_pages)
+static inline int
+cifs_readpage_from_fscache(struct inode *inode, struct page *page)
{
return -ENOBUFS;
}
-static inline void cifs_readpage_to_fscache(struct inode *inode,
- struct page *page) {}
+static inline
+void cifs_readpage_to_fscache(struct inode *inode, struct page *page) {}
-static inline void cifs_fscache_readpages_cancel(struct inode *inode,
- struct list_head *pages)
+static inline int nfs_fscache_release_page(struct page *page, gfp_t gfp)
{
+ return true; /* May release page */
}
#endif /* CONFIG_CIFS_FSCACHE */
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 279622e4eb1c..60d853c92f6a 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -83,6 +83,7 @@ static void cifs_set_ops(struct inode *inode)
static void
cifs_revalidate_cache(struct inode *inode, struct cifs_fattr *fattr)
{
+ struct cifs_fscache_inode_coherency_data cd;
struct cifsInodeInfo *cifs_i = CIFS_I(inode);
cifs_dbg(FYI, "%s: revalidating inode %llu\n",
@@ -113,6 +114,9 @@ cifs_revalidate_cache(struct inode *inode, struct cifs_fattr *fattr)
cifs_dbg(FYI, "%s: invalidating inode %llu mapping\n",
__func__, cifs_i->uniqueid);
set_bit(CIFS_INO_INVALID_MAPPING, &cifs_i->flags);
+ /* Invalidate fscache cookie */
+ cifs_fscache_fill_coherency(&cifs_i->vfs_inode, &cd);
+ fscache_invalidate(cifs_inode_cookie(inode), &cd, i_size_read(inode), 0);
}
/*
@@ -952,6 +956,12 @@ cifs_get_inode_info(struct inode **inode,
rc = server->ops->query_path_info(xid, tcon, cifs_sb,
full_path, tmp_data,
&adjust_tz, &is_reparse_point);
+#ifdef CONFIG_CIFS_DFS_UPCALL
+ if (rc == -ENOENT && is_tcon_dfs(tcon))
+ rc = cifs_dfs_query_info_nonascii_quirk(xid, tcon,
+ cifs_sb,
+ full_path);
+#endif
data = tmp_data;
}
@@ -1298,10 +1308,7 @@ retry_iget5_locked:
inode->i_flags |= S_NOATIME | S_NOCMTIME;
if (inode->i_state & I_NEW) {
inode->i_ino = hash;
-#ifdef CONFIG_CIFS_FSCACHE
- /* initialize per-inode cache cookie pointer */
- CIFS_I(inode)->fscache = NULL;
-#endif
+ cifs_fscache_get_inode_cookie(inode);
unlock_new_inode(inode);
}
}
@@ -1370,6 +1377,7 @@ iget_no_retry:
iget_failed(inode);
inode = ERR_PTR(rc);
}
+
out:
kfree(path);
free_xid(xid);
@@ -2266,7 +2274,6 @@ cifs_invalidate_mapping(struct inode *inode)
__func__, inode);
}
- cifs_fscache_reset_inode_cookie(inode);
return rc;
}
@@ -2771,8 +2778,10 @@ cifs_setattr_unix(struct dentry *direntry, struct iattr *attrs)
goto out;
if ((attrs->ia_valid & ATTR_SIZE) &&
- attrs->ia_size != i_size_read(inode))
+ attrs->ia_size != i_size_read(inode)) {
truncate_setsize(inode, attrs->ia_size);
+ fscache_resize_cookie(cifs_inode_cookie(inode), attrs->ia_size);
+ }
setattr_copy(&init_user_ns, inode, attrs);
mark_inode_dirty(inode);
@@ -2967,8 +2976,10 @@ cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs)
goto cifs_setattr_exit;
if ((attrs->ia_valid & ATTR_SIZE) &&
- attrs->ia_size != i_size_read(inode))
+ attrs->ia_size != i_size_read(inode)) {
truncate_setsize(inode, attrs->ia_size);
+ fscache_resize_cookie(cifs_inode_cookie(inode), attrs->ia_size);
+ }
setattr_copy(&init_user_ns, inode, attrs);
mark_inode_dirty(inode);
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index 5148d48d6a35..56598f7dbe00 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -1302,4 +1302,53 @@ int cifs_update_super_prepath(struct cifs_sb_info *cifs_sb, char *prefix)
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
return 0;
}
+
+/** cifs_dfs_query_info_nonascii_quirk
+ * Handle weird Windows SMB server behaviour. It responds with
+ * STATUS_OBJECT_NAME_INVALID code to SMB2 QUERY_INFO request
+ * for "\<server>\<dfsname>\<linkpath>" DFS reference,
+ * where <dfsname> contains non-ASCII unicode symbols.
+ *
+ * Check such DFS reference and emulate -ENOENT if it is actual.
+ */
+int cifs_dfs_query_info_nonascii_quirk(const unsigned int xid,
+ struct cifs_tcon *tcon,
+ struct cifs_sb_info *cifs_sb,
+ const char *linkpath)
+{
+ char *treename, *dfspath, sep;
+ int treenamelen, linkpathlen, rc;
+
+ treename = tcon->treeName;
+ /* MS-DFSC: All paths in REQ_GET_DFS_REFERRAL and RESP_GET_DFS_REFERRAL
+ * messages MUST be encoded with exactly one leading backslash, not two
+ * leading backslashes.
+ */
+ sep = CIFS_DIR_SEP(cifs_sb);
+ if (treename[0] == sep && treename[1] == sep)
+ treename++;
+ linkpathlen = strlen(linkpath);
+ treenamelen = strnlen(treename, MAX_TREE_SIZE + 1);
+ dfspath = kzalloc(treenamelen + linkpathlen + 1, GFP_KERNEL);
+ if (!dfspath)
+ return -ENOMEM;
+ if (treenamelen)
+ memcpy(dfspath, treename, treenamelen);
+ memcpy(dfspath + treenamelen, linkpath, linkpathlen);
+ rc = dfs_cache_find(xid, tcon->ses, cifs_sb->local_nls,
+ cifs_remap(cifs_sb), dfspath, NULL, NULL);
+ if (rc == 0) {
+ cifs_dbg(FYI, "DFS ref '%s' is found, emulate -EREMOTE\n",
+ dfspath);
+ rc = -EREMOTE;
+ } else if (rc == -EEXIST) {
+ cifs_dbg(FYI, "DFS ref '%s' is not found, emulate -ENOENT\n",
+ dfspath);
+ rc = -ENOENT;
+ } else {
+ cifs_dbg(FYI, "%s: dfs_cache_find returned %d\n", __func__, rc);
+ }
+ kfree(dfspath);
+ return rc;
+}
#endif
diff --git a/fs/cifs/netmisc.c b/fs/cifs/netmisc.c
index fa9fbd6a819c..ebe236b9d9f5 100644
--- a/fs/cifs/netmisc.c
+++ b/fs/cifs/netmisc.c
@@ -896,10 +896,7 @@ map_and_check_smb_error(struct mid_q_entry *mid, bool logErr)
if (class == ERRSRV && code == ERRbaduid) {
cifs_dbg(FYI, "Server returned 0x%x, reconnecting session...\n",
code);
- spin_lock(&GlobalMid_Lock);
- if (mid->server->tcpStatus != CifsExiting)
- mid->server->tcpStatus = CifsNeedReconnect;
- spin_unlock(&GlobalMid_Lock);
+ cifs_reconnect(mid->server, false);
}
}
diff --git a/fs/cifs/ntlmssp.h b/fs/cifs/ntlmssp.h
index fe707f45da89..298458404252 100644
--- a/fs/cifs/ntlmssp.h
+++ b/fs/cifs/ntlmssp.h
@@ -40,7 +40,7 @@
#define NTLMSSP_REQUEST_NON_NT_KEY 0x400000
#define NTLMSSP_NEGOTIATE_TARGET_INFO 0x800000
/* #define reserved4 0x1000000 */
-#define NTLMSSP_NEGOTIATE_VERSION 0x2000000 /* we do not set */
+#define NTLMSSP_NEGOTIATE_VERSION 0x2000000 /* we only set for SMB2+ */
/* #define reserved3 0x4000000 */
/* #define reserved2 0x8000000 */
/* #define reserved1 0x10000000 */
@@ -87,6 +87,30 @@ typedef struct _NEGOTIATE_MESSAGE {
/* followed by WorkstationString */
} __attribute__((packed)) NEGOTIATE_MESSAGE, *PNEGOTIATE_MESSAGE;
+#define NTLMSSP_REVISION_W2K3 0x0F
+
+/* See MS-NLMP section 2.2.2.10 */
+struct ntlmssp_version {
+ __u8 ProductMajorVersion;
+ __u8 ProductMinorVersion;
+ __le16 ProductBuild; /* we send the cifs.ko module version here */
+ __u8 Reserved[3];
+ __u8 NTLMRevisionCurrent; /* currently 0x0F */
+} __packed;
+
+/* see MS-NLMP section 2.2.1.1 */
+struct negotiate_message {
+ __u8 Signature[sizeof(NTLMSSP_SIGNATURE)];
+ __le32 MessageType; /* NtLmNegotiate = 1 */
+ __le32 NegotiateFlags;
+ SECURITY_BUFFER DomainName; /* RFC 1001 style and ASCII */
+ SECURITY_BUFFER WorkstationName; /* RFC 1001 and ASCII */
+ struct ntlmssp_version Version;
+ /* SECURITY_BUFFER */
+ char DomainString[0];
+ /* followed by WorkstationString */
+} __packed;
+
typedef struct _CHALLENGE_MESSAGE {
__u8 Signature[sizeof(NTLMSSP_SIGNATURE)];
__le32 MessageType; /* NtLmChallenge = 2 */
@@ -121,7 +145,13 @@ typedef struct _AUTHENTICATE_MESSAGE {
int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len, struct cifs_ses *ses);
int build_ntlmssp_negotiate_blob(unsigned char **pbuffer, u16 *buflen,
struct cifs_ses *ses,
+ struct TCP_Server_Info *server,
+ const struct nls_table *nls_cp);
+int build_ntlmssp_smb3_negotiate_blob(unsigned char **pbuffer, u16 *buflen,
+ struct cifs_ses *ses,
+ struct TCP_Server_Info *server,
const struct nls_table *nls_cp);
int build_ntlmssp_auth_blob(unsigned char **pbuffer, u16 *buflen,
struct cifs_ses *ses,
+ struct TCP_Server_Info *server,
const struct nls_table *nls_cp);
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
index 035dc3e245dc..32f478c7a66d 100644
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -17,6 +17,8 @@
#include "nterr.h"
#include <linux/utsname.h>
#include <linux/slab.h>
+#include <linux/version.h>
+#include "cifsfs.h"
#include "cifs_spnego.h"
#include "smb2proto.h"
#include "fs_context.h"
@@ -65,6 +67,55 @@ bool is_ses_using_iface(struct cifs_ses *ses, struct cifs_server_iface *iface)
return false;
}
+/* channel helper functions. assumed that chan_lock is held by caller. */
+
+unsigned int
+cifs_ses_get_chan_index(struct cifs_ses *ses,
+ struct TCP_Server_Info *server)
+{
+ unsigned int i;
+
+ for (i = 0; i < ses->chan_count; i++) {
+ if (ses->chans[i].server == server)
+ return i;
+ }
+
+ /* If we didn't find the channel, it is likely a bug */
+ WARN_ON(1);
+ return 0;
+}
+
+void
+cifs_chan_set_need_reconnect(struct cifs_ses *ses,
+ struct TCP_Server_Info *server)
+{
+ unsigned int chan_index = cifs_ses_get_chan_index(ses, server);
+
+ set_bit(chan_index, &ses->chans_need_reconnect);
+ cifs_dbg(FYI, "Set reconnect bitmask for chan %u; now 0x%lx\n",
+ chan_index, ses->chans_need_reconnect);
+}
+
+void
+cifs_chan_clear_need_reconnect(struct cifs_ses *ses,
+ struct TCP_Server_Info *server)
+{
+ unsigned int chan_index = cifs_ses_get_chan_index(ses, server);
+
+ clear_bit(chan_index, &ses->chans_need_reconnect);
+ cifs_dbg(FYI, "Cleared reconnect bitmask for chan %u; now 0x%lx\n",
+ chan_index, ses->chans_need_reconnect);
+}
+
+bool
+cifs_chan_needs_reconnect(struct cifs_ses *ses,
+ struct TCP_Server_Info *server)
+{
+ unsigned int chan_index = cifs_ses_get_chan_index(ses, server);
+
+ return CIFS_CHAN_NEEDS_RECONNECT(ses, chan_index);
+}
+
/* returns number of channels added */
int cifs_try_adding_channels(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses)
{
@@ -76,21 +127,22 @@ int cifs_try_adding_channels(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses)
struct cifs_server_iface *ifaces = NULL;
size_t iface_count;
- if (ses->server->dialect < SMB30_PROT_ID) {
- cifs_dbg(VFS, "multichannel is not supported on this protocol version, use 3.0 or above\n");
- return 0;
- }
-
spin_lock(&ses->chan_lock);
new_chan_count = old_chan_count = ses->chan_count;
left = ses->chan_max - ses->chan_count;
if (left <= 0) {
+ spin_unlock(&ses->chan_lock);
cifs_dbg(FYI,
"ses already at max_channels (%zu), nothing to open\n",
ses->chan_max);
+ return 0;
+ }
+
+ if (ses->server->dialect < SMB30_PROT_ID) {
spin_unlock(&ses->chan_lock);
+ cifs_dbg(VFS, "multichannel is not supported on this protocol version, use 3.0 or above\n");
return 0;
}
@@ -261,9 +313,8 @@ cifs_ses_add_channel(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses,
chan_server = cifs_get_tcp_session(&ctx, ses->server);
- mutex_lock(&ses->session_mutex);
spin_lock(&ses->chan_lock);
- chan = ses->binding_chan = &ses->chans[ses->chan_count];
+ chan = &ses->chans[ses->chan_count];
chan->server = chan_server;
if (IS_ERR(chan->server)) {
rc = PTR_ERR(chan->server);
@@ -271,8 +322,15 @@ cifs_ses_add_channel(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses,
spin_unlock(&ses->chan_lock);
goto out;
}
+ ses->chan_count++;
+ atomic_set(&ses->chan_seq, 0);
+
+ /* Mark this channel as needing connect/setup */
+ cifs_chan_set_need_reconnect(ses, chan->server);
+
spin_unlock(&ses->chan_lock);
+ mutex_lock(&ses->session_mutex);
/*
* We need to allocate the server crypto now as we will need
* to sign packets before we generate the channel signing key
@@ -281,37 +339,29 @@ cifs_ses_add_channel(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses,
rc = smb311_crypto_shash_allocate(chan->server);
if (rc) {
cifs_dbg(VFS, "%s: crypto alloc failed\n", __func__);
+ mutex_unlock(&ses->session_mutex);
goto out;
}
- ses->binding = true;
- rc = cifs_negotiate_protocol(xid, ses);
- if (rc)
- goto out;
-
- rc = cifs_setup_session(xid, ses, cifs_sb->local_nls);
- if (rc)
- goto out;
+ rc = cifs_negotiate_protocol(xid, ses, chan->server);
+ if (!rc)
+ rc = cifs_setup_session(xid, ses, chan->server, cifs_sb->local_nls);
- /* success, put it on the list
- * XXX: sharing ses between 2 tcp servers is not possible, the
- * way "internal" linked lists works in linux makes element
- * only able to belong to one list
- *
- * the binding session is already established so the rest of
- * the code should be able to look it up, no need to add the
- * ses to the new server.
- */
-
- spin_lock(&ses->chan_lock);
- ses->chan_count++;
- atomic_set(&ses->chan_seq, 0);
- spin_unlock(&ses->chan_lock);
+ mutex_unlock(&ses->session_mutex);
out:
- ses->binding = false;
- ses->binding_chan = NULL;
- mutex_unlock(&ses->session_mutex);
+ if (rc && chan->server) {
+ spin_lock(&ses->chan_lock);
+ /* we rely on all bits beyond chan_count to be clear */
+ cifs_chan_clear_need_reconnect(ses, chan->server);
+ ses->chan_count--;
+ /*
+ * chan_count should never reach 0 as at least the primary
+ * channel is always allocated
+ */
+ WARN_ON(ses->chan_count < 1);
+ spin_unlock(&ses->chan_lock);
+ }
if (rc && chan->server)
cifs_put_tcp_session(chan->server, 0);
@@ -319,20 +369,9 @@ out:
return rc;
}
-/* Mark all session channels for reconnect */
-void cifs_ses_mark_for_reconnect(struct cifs_ses *ses)
-{
- int i;
-
- for (i = 0; i < ses->chan_count; i++) {
- spin_lock(&GlobalMid_Lock);
- if (ses->chans[i].server->tcpStatus != CifsExiting)
- ses->chans[i].server->tcpStatus = CifsNeedReconnect;
- spin_unlock(&GlobalMid_Lock);
- }
-}
-
-static __u32 cifs_ssetup_hdr(struct cifs_ses *ses, SESSION_SETUP_ANDX *pSMB)
+static __u32 cifs_ssetup_hdr(struct cifs_ses *ses,
+ struct TCP_Server_Info *server,
+ SESSION_SETUP_ANDX *pSMB)
{
__u32 capabilities = 0;
@@ -345,7 +384,7 @@ static __u32 cifs_ssetup_hdr(struct cifs_ses *ses, SESSION_SETUP_ANDX *pSMB)
pSMB->req.MaxBufferSize = cpu_to_le16(min_t(u32,
CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4,
USHRT_MAX));
- pSMB->req.MaxMpxCount = cpu_to_le16(ses->server->maxReq);
+ pSMB->req.MaxMpxCount = cpu_to_le16(server->maxReq);
pSMB->req.VcNumber = cpu_to_le16(1);
/* Now no need to set SMBFLG_CASELESS or obsolete CANONICAL PATH */
@@ -356,7 +395,7 @@ static __u32 cifs_ssetup_hdr(struct cifs_ses *ses, SESSION_SETUP_ANDX *pSMB)
capabilities = CAP_LARGE_FILES | CAP_NT_SMBS | CAP_LEVEL_II_OPLOCKS |
CAP_LARGE_WRITE_X | CAP_LARGE_READ_X;
- if (ses->server->sign)
+ if (server->sign)
pSMB->req.hdr.Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
if (ses->capabilities & CAP_UNICODE) {
@@ -675,7 +714,11 @@ static int size_of_ntlmssp_blob(struct cifs_ses *ses, int base_size)
else
sz += sizeof(__le16);
- sz += sizeof(__le16) * strnlen(ses->workstation_name, CIFS_MAX_WORKSTATION_LEN);
+ if (ses->workstation_name)
+ sz += sizeof(__le16) * strnlen(ses->workstation_name,
+ CIFS_MAX_WORKSTATION_LEN);
+ else
+ sz += sizeof(__le16);
return sz;
}
@@ -719,10 +762,10 @@ static inline void cifs_security_buffer_from_str(SECURITY_BUFFER *pbuf,
int build_ntlmssp_negotiate_blob(unsigned char **pbuffer,
u16 *buflen,
struct cifs_ses *ses,
+ struct TCP_Server_Info *server,
const struct nls_table *nls_cp)
{
int rc = 0;
- struct TCP_Server_Info *server = cifs_ses_server(ses);
NEGOTIATE_MESSAGE *sec_blob;
__u32 flags;
unsigned char *tmp;
@@ -773,9 +816,78 @@ setup_ntlm_neg_ret:
return rc;
}
+/*
+ * Build ntlmssp blob with additional fields, such as version,
+ * supported by modern servers. For safety limit to SMB3 or later
+ * See notes in MS-NLMP Section 2.2.2.1 e.g.
+ */
+int build_ntlmssp_smb3_negotiate_blob(unsigned char **pbuffer,
+ u16 *buflen,
+ struct cifs_ses *ses,
+ struct TCP_Server_Info *server,
+ const struct nls_table *nls_cp)
+{
+ int rc = 0;
+ struct negotiate_message *sec_blob;
+ __u32 flags;
+ unsigned char *tmp;
+ int len;
+
+ len = size_of_ntlmssp_blob(ses, sizeof(struct negotiate_message));
+ *pbuffer = kmalloc(len, GFP_KERNEL);
+ if (!*pbuffer) {
+ rc = -ENOMEM;
+ cifs_dbg(VFS, "Error %d during NTLMSSP allocation\n", rc);
+ *buflen = 0;
+ goto setup_ntlm_smb3_neg_ret;
+ }
+ sec_blob = (struct negotiate_message *)*pbuffer;
+
+ memset(*pbuffer, 0, sizeof(struct negotiate_message));
+ memcpy(sec_blob->Signature, NTLMSSP_SIGNATURE, 8);
+ sec_blob->MessageType = NtLmNegotiate;
+
+ /* BB is NTLMV2 session security format easier to use here? */
+ flags = NTLMSSP_NEGOTIATE_56 | NTLMSSP_REQUEST_TARGET |
+ NTLMSSP_NEGOTIATE_128 | NTLMSSP_NEGOTIATE_UNICODE |
+ NTLMSSP_NEGOTIATE_NTLM | NTLMSSP_NEGOTIATE_EXTENDED_SEC |
+ NTLMSSP_NEGOTIATE_ALWAYS_SIGN | NTLMSSP_NEGOTIATE_SEAL |
+ NTLMSSP_NEGOTIATE_SIGN | NTLMSSP_NEGOTIATE_VERSION;
+ if (!server->session_estab || ses->ntlmssp->sesskey_per_smbsess)
+ flags |= NTLMSSP_NEGOTIATE_KEY_XCH;
+
+ sec_blob->Version.ProductMajorVersion = LINUX_VERSION_MAJOR;
+ sec_blob->Version.ProductMinorVersion = LINUX_VERSION_PATCHLEVEL;
+ sec_blob->Version.ProductBuild = cpu_to_le16(SMB3_PRODUCT_BUILD);
+ sec_blob->Version.NTLMRevisionCurrent = NTLMSSP_REVISION_W2K3;
+
+ tmp = *pbuffer + sizeof(struct negotiate_message);
+ ses->ntlmssp->client_flags = flags;
+ sec_blob->NegotiateFlags = cpu_to_le32(flags);
+
+ /* these fields should be null in negotiate phase MS-NLMP 3.1.5.1.1 */
+ cifs_security_buffer_from_str(&sec_blob->DomainName,
+ NULL,
+ CIFS_MAX_DOMAINNAME_LEN,
+ *pbuffer, &tmp,
+ nls_cp);
+
+ cifs_security_buffer_from_str(&sec_blob->WorkstationName,
+ NULL,
+ CIFS_MAX_WORKSTATION_LEN,
+ *pbuffer, &tmp,
+ nls_cp);
+
+ *buflen = tmp - *pbuffer;
+setup_ntlm_smb3_neg_ret:
+ return rc;
+}
+
+
int build_ntlmssp_auth_blob(unsigned char **pbuffer,
u16 *buflen,
struct cifs_ses *ses,
+ struct TCP_Server_Info *server,
const struct nls_table *nls_cp)
{
int rc;
@@ -912,6 +1024,7 @@ cifs_select_sectype(struct TCP_Server_Info *server, enum securityEnum requested)
struct sess_data {
unsigned int xid;
struct cifs_ses *ses;
+ struct TCP_Server_Info *server;
struct nls_table *nls_cp;
void (*func)(struct sess_data *);
int result;
@@ -978,31 +1091,27 @@ static int
sess_establish_session(struct sess_data *sess_data)
{
struct cifs_ses *ses = sess_data->ses;
+ struct TCP_Server_Info *server = sess_data->server;
- mutex_lock(&ses->server->srv_mutex);
- if (!ses->server->session_estab) {
- if (ses->server->sign) {
- ses->server->session_key.response =
+ mutex_lock(&server->srv_mutex);
+ if (!server->session_estab) {
+ if (server->sign) {
+ server->session_key.response =
kmemdup(ses->auth_key.response,
ses->auth_key.len, GFP_KERNEL);
- if (!ses->server->session_key.response) {
- mutex_unlock(&ses->server->srv_mutex);
+ if (!server->session_key.response) {
+ mutex_unlock(&server->srv_mutex);
return -ENOMEM;
}
- ses->server->session_key.len =
+ server->session_key.len =
ses->auth_key.len;
}
- ses->server->sequence_number = 0x2;
- ses->server->session_estab = true;
+ server->sequence_number = 0x2;
+ server->session_estab = true;
}
- mutex_unlock(&ses->server->srv_mutex);
+ mutex_unlock(&server->srv_mutex);
cifs_dbg(FYI, "CIFS session established successfully\n");
- spin_lock(&GlobalMid_Lock);
- ses->status = CifsGood;
- ses->need_reconnect = false;
- spin_unlock(&GlobalMid_Lock);
-
return 0;
}
@@ -1036,6 +1145,7 @@ sess_auth_ntlmv2(struct sess_data *sess_data)
SESSION_SETUP_ANDX *pSMB;
char *bcc_ptr;
struct cifs_ses *ses = sess_data->ses;
+ struct TCP_Server_Info *server = sess_data->server;
__u32 capabilities;
__u16 bytes_remaining;
@@ -1047,7 +1157,7 @@ sess_auth_ntlmv2(struct sess_data *sess_data)
pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base;
bcc_ptr = sess_data->iov[2].iov_base;
- capabilities = cifs_ssetup_hdr(ses, pSMB);
+ capabilities = cifs_ssetup_hdr(ses, server, pSMB);
pSMB->req_no_secext.Capabilities = cpu_to_le32(capabilities);
@@ -1145,6 +1255,7 @@ sess_auth_kerberos(struct sess_data *sess_data)
SESSION_SETUP_ANDX *pSMB;
char *bcc_ptr;
struct cifs_ses *ses = sess_data->ses;
+ struct TCP_Server_Info *server = sess_data->server;
__u32 capabilities;
__u16 bytes_remaining;
struct key *spnego_key = NULL;
@@ -1159,9 +1270,9 @@ sess_auth_kerberos(struct sess_data *sess_data)
pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base;
bcc_ptr = sess_data->iov[2].iov_base;
- capabilities = cifs_ssetup_hdr(ses, pSMB);
+ capabilities = cifs_ssetup_hdr(ses, server, pSMB);
- spnego_key = cifs_get_spnego_key(ses);
+ spnego_key = cifs_get_spnego_key(ses, server);
if (IS_ERR(spnego_key)) {
rc = PTR_ERR(spnego_key);
spnego_key = NULL;
@@ -1285,12 +1396,13 @@ _sess_auth_rawntlmssp_assemble_req(struct sess_data *sess_data)
{
SESSION_SETUP_ANDX *pSMB;
struct cifs_ses *ses = sess_data->ses;
+ struct TCP_Server_Info *server = sess_data->server;
__u32 capabilities;
char *bcc_ptr;
pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base;
- capabilities = cifs_ssetup_hdr(ses, pSMB);
+ capabilities = cifs_ssetup_hdr(ses, server, pSMB);
if ((pSMB->req.hdr.Flags2 & SMBFLG2_UNICODE) == 0) {
cifs_dbg(VFS, "NTLMSSP requires Unicode support\n");
return -ENOSYS;
@@ -1324,6 +1436,7 @@ sess_auth_rawntlmssp_negotiate(struct sess_data *sess_data)
struct smb_hdr *smb_buf;
SESSION_SETUP_ANDX *pSMB;
struct cifs_ses *ses = sess_data->ses;
+ struct TCP_Server_Info *server = sess_data->server;
__u16 bytes_remaining;
char *bcc_ptr;
unsigned char *ntlmsspblob = NULL;
@@ -1351,10 +1464,10 @@ sess_auth_rawntlmssp_negotiate(struct sess_data *sess_data)
/* Build security blob before we assemble the request */
rc = build_ntlmssp_negotiate_blob(&ntlmsspblob,
- &blob_len, ses,
+ &blob_len, ses, server,
sess_data->nls_cp);
if (rc)
- goto out;
+ goto out_free_ntlmsspblob;
sess_data->iov[1].iov_len = blob_len;
sess_data->iov[1].iov_base = ntlmsspblob;
@@ -1362,7 +1475,7 @@ sess_auth_rawntlmssp_negotiate(struct sess_data *sess_data)
rc = _sess_auth_rawntlmssp_assemble_req(sess_data);
if (rc)
- goto out;
+ goto out_free_ntlmsspblob;
rc = sess_sendreceive(sess_data);
@@ -1376,14 +1489,14 @@ sess_auth_rawntlmssp_negotiate(struct sess_data *sess_data)
rc = 0;
if (rc)
- goto out;
+ goto out_free_ntlmsspblob;
cifs_dbg(FYI, "rawntlmssp session setup challenge phase\n");
if (smb_buf->WordCount != 4) {
rc = -EIO;
cifs_dbg(VFS, "bad word count %d\n", smb_buf->WordCount);
- goto out;
+ goto out_free_ntlmsspblob;
}
ses->Suid = smb_buf->Uid; /* UID left in wire format (le) */
@@ -1397,10 +1510,13 @@ sess_auth_rawntlmssp_negotiate(struct sess_data *sess_data)
cifs_dbg(VFS, "bad security blob length %d\n",
blob_len);
rc = -EINVAL;
- goto out;
+ goto out_free_ntlmsspblob;
}
rc = decode_ntlmssp_challenge(bcc_ptr, blob_len, ses);
+
+out_free_ntlmsspblob:
+ kfree(ntlmsspblob);
out:
sess_free_buffer(sess_data);
@@ -1426,6 +1542,7 @@ sess_auth_rawntlmssp_authenticate(struct sess_data *sess_data)
struct smb_hdr *smb_buf;
SESSION_SETUP_ANDX *pSMB;
struct cifs_ses *ses = sess_data->ses;
+ struct TCP_Server_Info *server = sess_data->server;
__u16 bytes_remaining;
char *bcc_ptr;
unsigned char *ntlmsspblob = NULL;
@@ -1442,7 +1559,8 @@ sess_auth_rawntlmssp_authenticate(struct sess_data *sess_data)
pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base;
smb_buf = (struct smb_hdr *)pSMB;
rc = build_ntlmssp_auth_blob(&ntlmsspblob,
- &blob_len, ses, sess_data->nls_cp);
+ &blob_len, ses, server,
+ sess_data->nls_cp);
if (rc)
goto out_free_ntlmsspblob;
sess_data->iov[1].iov_len = blob_len;
@@ -1513,7 +1631,7 @@ out_free_ntlmsspblob:
out:
sess_free_buffer(sess_data);
- if (!rc)
+ if (!rc)
rc = sess_establish_session(sess_data);
/* Cleanup */
@@ -1526,11 +1644,13 @@ out:
sess_data->result = rc;
}
-static int select_sec(struct cifs_ses *ses, struct sess_data *sess_data)
+static int select_sec(struct sess_data *sess_data)
{
int type;
+ struct cifs_ses *ses = sess_data->ses;
+ struct TCP_Server_Info *server = sess_data->server;
- type = cifs_select_sectype(ses->server, ses->sectype);
+ type = cifs_select_sectype(server, ses->sectype);
cifs_dbg(FYI, "sess setup type %d\n", type);
if (type == Unspecified) {
cifs_dbg(VFS, "Unable to select appropriate authentication method!\n");
@@ -1561,7 +1681,8 @@ static int select_sec(struct cifs_ses *ses, struct sess_data *sess_data)
}
int CIFS_SessSetup(const unsigned int xid, struct cifs_ses *ses,
- const struct nls_table *nls_cp)
+ struct TCP_Server_Info *server,
+ const struct nls_table *nls_cp)
{
int rc = 0;
struct sess_data *sess_data;
@@ -1575,15 +1696,16 @@ int CIFS_SessSetup(const unsigned int xid, struct cifs_ses *ses,
if (!sess_data)
return -ENOMEM;
- rc = select_sec(ses, sess_data);
- if (rc)
- goto out;
-
sess_data->xid = xid;
sess_data->ses = ses;
+ sess_data->server = server;
sess_data->buf0_type = CIFS_NO_BUFFER;
sess_data->nls_cp = (struct nls_table *) nls_cp;
+ rc = select_sec(sess_data);
+ if (rc)
+ goto out;
+
while (sess_data->func)
sess_data->func(sess_data);
diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
index 3b83839fc2c2..b2fb7bd11936 100644
--- a/fs/cifs/smb1ops.c
+++ b/fs/cifs/smb1ops.c
@@ -7,6 +7,7 @@
#include <linux/pagemap.h>
#include <linux/vfs.h>
+#include <uapi/linux/magic.h>
#include "cifsglob.h"
#include "cifsproto.h"
#include "cifs_debug.h"
@@ -163,7 +164,7 @@ cifs_get_next_mid(struct TCP_Server_Info *server)
{
__u64 mid = 0;
__u16 last_mid, cur_mid;
- bool collision;
+ bool collision, reconnect = false;
spin_lock(&GlobalMid_Lock);
@@ -215,7 +216,7 @@ cifs_get_next_mid(struct TCP_Server_Info *server)
* an eventual reconnect to clean out the pending_mid_q.
*/
if (num_mids > 32768)
- server->tcpStatus = CifsNeedReconnect;
+ reconnect = true;
if (!collision) {
mid = (__u64)cur_mid;
@@ -225,6 +226,11 @@ cifs_get_next_mid(struct TCP_Server_Info *server)
cur_mid++;
}
spin_unlock(&GlobalMid_Lock);
+
+ if (reconnect) {
+ cifs_mark_tcp_ses_conns_for_reconnect(server, false);
+ }
+
return mid;
}
@@ -414,14 +420,16 @@ cifs_need_neg(struct TCP_Server_Info *server)
}
static int
-cifs_negotiate(const unsigned int xid, struct cifs_ses *ses)
+cifs_negotiate(const unsigned int xid,
+ struct cifs_ses *ses,
+ struct TCP_Server_Info *server)
{
int rc;
- rc = CIFSSMBNegotiate(xid, ses);
+ rc = CIFSSMBNegotiate(xid, ses, server);
if (rc == -EAGAIN) {
/* retry only once on 1st time connection */
- set_credits(ses->server, 1);
- rc = CIFSSMBNegotiate(xid, ses);
+ set_credits(server, 1);
+ rc = CIFSSMBNegotiate(xid, ses, server);
if (rc == -EAGAIN)
rc = -EHOSTDOWN;
}
@@ -878,7 +886,7 @@ cifs_queryfs(const unsigned int xid, struct cifs_tcon *tcon,
{
int rc = -EOPNOTSUPP;
- buf->f_type = CIFS_MAGIC_NUMBER;
+ buf->f_type = CIFS_SUPER_MAGIC;
/*
* We could add a second check for a QFS Unix capability bit
diff --git a/fs/cifs/smb2glob.h b/fs/cifs/smb2glob.h
index ca692b2283cd..4125fd113cfb 100644
--- a/fs/cifs/smb2glob.h
+++ b/fs/cifs/smb2glob.h
@@ -13,8 +13,6 @@
#ifndef _SMB2_GLOB_H
#define _SMB2_GLOB_H
-#define SMB2_MAGIC_NUMBER 0xFE534D42
-
/*
*****************************************************************
* Constants go here
diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
index cdcdef32759e..b25623e3fe3d 100644
--- a/fs/cifs/smb2misc.c
+++ b/fs/cifs/smb2misc.c
@@ -847,16 +847,17 @@ smb2_handle_cancelled_mid(struct mid_q_entry *mid, struct TCP_Server_Info *serve
* SMB2 header.
*
* @ses: server session structure
+ * @server: pointer to server info
* @iov: array containing the SMB request we will send to the server
* @nvec: number of array entries for the iov
*/
int
-smb311_update_preauth_hash(struct cifs_ses *ses, struct kvec *iov, int nvec)
+smb311_update_preauth_hash(struct cifs_ses *ses, struct TCP_Server_Info *server,
+ struct kvec *iov, int nvec)
{
int i, rc;
struct sdesc *d;
struct smb2_hdr *hdr;
- struct TCP_Server_Info *server = cifs_ses_server(ses);
hdr = (struct smb2_hdr *)iov[0].iov_base;
/* neg prot are always taken */
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index c5b1dea54ebc..af5d0830bc8a 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -13,6 +13,7 @@
#include <linux/sort.h>
#include <crypto/aead.h>
#include <linux/fiemap.h>
+#include <uapi/linux/magic.h>
#include "cifsfs.h"
#include "cifsglob.h"
#include "smb2pdu.h"
@@ -121,9 +122,13 @@ smb2_add_credits(struct TCP_Server_Info *server,
optype, scredits, add);
}
+ spin_lock(&cifs_tcp_ses_lock);
if (server->tcpStatus == CifsNeedReconnect
- || server->tcpStatus == CifsExiting)
+ || server->tcpStatus == CifsExiting) {
+ spin_unlock(&cifs_tcp_ses_lock);
return;
+ }
+ spin_unlock(&cifs_tcp_ses_lock);
switch (rc) {
case -1:
@@ -208,11 +213,15 @@ smb2_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
return rc;
spin_lock(&server->req_lock);
} else {
+ spin_unlock(&server->req_lock);
+ spin_lock(&cifs_tcp_ses_lock);
if (server->tcpStatus == CifsExiting) {
- spin_unlock(&server->req_lock);
+ spin_unlock(&cifs_tcp_ses_lock);
return -ENOENT;
}
+ spin_unlock(&cifs_tcp_ses_lock);
+ spin_lock(&server->req_lock);
scredits = server->credits;
/* can deadlock with reopen */
if (scredits <= 8) {
@@ -384,14 +393,16 @@ smb2_need_neg(struct TCP_Server_Info *server)
}
static int
-smb2_negotiate(const unsigned int xid, struct cifs_ses *ses)
+smb2_negotiate(const unsigned int xid,
+ struct cifs_ses *ses,
+ struct TCP_Server_Info *server)
{
int rc;
spin_lock(&GlobalMid_Lock);
- cifs_ses_server(ses)->CurrentMid = 0;
+ server->CurrentMid = 0;
spin_unlock(&GlobalMid_Lock);
- rc = SMB2_negotiate(xid, ses);
+ rc = SMB2_negotiate(xid, ses, server);
/* BB we probably don't need to retry with modern servers */
if (rc == -EAGAIN)
rc = -EHOSTDOWN;
@@ -2747,7 +2758,7 @@ smb2_queryfs(const unsigned int xid, struct cifs_tcon *tcon,
goto qfs_exit;
rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
- buf->f_type = SMB2_MAGIC_NUMBER;
+ buf->f_type = SMB2_SUPER_MAGIC;
info = (struct smb2_fs_full_size_info *)(
le16_to_cpu(rsp->OutputBufferOffset) + (char *)rsp);
rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset),
@@ -2789,7 +2800,7 @@ smb311_queryfs(const unsigned int xid, struct cifs_tcon *tcon,
rc = SMB311_posix_qfs_info(xid, tcon, fid.persistent_fid,
fid.volatile_fid, buf);
- buf->f_type = SMB2_MAGIC_NUMBER;
+ buf->f_type = SMB2_SUPER_MAGIC;
SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
return rc;
}
@@ -4808,7 +4819,7 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
if (server->ops->is_session_expired &&
server->ops->is_session_expired(buf)) {
if (!is_offloaded)
- cifs_reconnect(server);
+ cifs_reconnect(server, true);
return -1;
}
@@ -4981,10 +4992,12 @@ static void smb2_decrypt_offload(struct work_struct *work)
mid->callback(mid);
} else {
+ spin_lock(&cifs_tcp_ses_lock);
spin_lock(&GlobalMid_Lock);
if (dw->server->tcpStatus == CifsNeedReconnect) {
mid->mid_state = MID_RETRY_NEEDED;
spin_unlock(&GlobalMid_Lock);
+ spin_unlock(&cifs_tcp_ses_lock);
mid->callback(mid);
} else {
mid->mid_state = MID_REQUEST_SUBMITTED;
@@ -4992,6 +5005,7 @@ static void smb2_decrypt_offload(struct work_struct *work)
list_add_tail(&mid->qhead,
&dw->server->pending_mid_q);
spin_unlock(&GlobalMid_Lock);
+ spin_unlock(&cifs_tcp_ses_lock);
}
}
cifs_mid_q_entry_release(mid);
@@ -5221,13 +5235,13 @@ smb3_receive_transform(struct TCP_Server_Info *server,
sizeof(struct smb2_hdr)) {
cifs_server_dbg(VFS, "Transform message is too small (%u)\n",
pdu_length);
- cifs_reconnect(server);
+ cifs_reconnect(server, true);
return -ECONNABORTED;
}
if (pdu_length < orig_len + sizeof(struct smb2_transform_hdr)) {
cifs_server_dbg(VFS, "Transform message is broken\n");
- cifs_reconnect(server);
+ cifs_reconnect(server, true);
return -ECONNABORTED;
}
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index 8b3670388cda..7e7909b1ae11 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -162,6 +162,7 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon,
if (smb2_command == SMB2_TREE_CONNECT || smb2_command == SMB2_IOCTL)
return 0;
+ spin_lock(&cifs_tcp_ses_lock);
if (tcon->tidStatus == CifsExiting) {
/*
* only tree disconnect, open, and write,
@@ -171,11 +172,13 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon,
if ((smb2_command != SMB2_WRITE) &&
(smb2_command != SMB2_CREATE) &&
(smb2_command != SMB2_TREE_DISCONNECT)) {
+ spin_unlock(&cifs_tcp_ses_lock);
cifs_dbg(FYI, "can not send cmd %d while umounting\n",
smb2_command);
return -ENODEV;
}
}
+ spin_unlock(&cifs_tcp_ses_lock);
if ((!tcon->ses) || (tcon->ses->status == CifsExiting) ||
(!tcon->ses->server) || !server)
return -EIO;
@@ -214,8 +217,12 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon,
}
/* are we still trying to reconnect? */
- if (server->tcpStatus != CifsNeedReconnect)
+ spin_lock(&cifs_tcp_ses_lock);
+ if (server->tcpStatus != CifsNeedReconnect) {
+ spin_unlock(&cifs_tcp_ses_lock);
break;
+ }
+ spin_unlock(&cifs_tcp_ses_lock);
if (retries && --retries)
continue;
@@ -232,64 +239,74 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon,
retries = server->nr_targets;
}
- if (!tcon->ses->need_reconnect && !tcon->need_reconnect)
+ spin_lock(&ses->chan_lock);
+ if (!cifs_chan_needs_reconnect(ses, server) && !tcon->need_reconnect) {
+ spin_unlock(&ses->chan_lock);
return 0;
+ }
+ spin_unlock(&ses->chan_lock);
+ cifs_dbg(FYI, "sess reconnect mask: 0x%lx, tcon reconnect: %d",
+ tcon->ses->chans_need_reconnect,
+ tcon->need_reconnect);
nls_codepage = load_nls_default();
/*
- * need to prevent multiple threads trying to simultaneously reconnect
- * the same SMB session
- */
- mutex_lock(&tcon->ses->session_mutex);
-
- /*
* Recheck after acquire mutex. If another thread is negotiating
* and the server never sends an answer the socket will be closed
* and tcpStatus set to reconnect.
*/
+ spin_lock(&cifs_tcp_ses_lock);
if (server->tcpStatus == CifsNeedReconnect) {
+ spin_unlock(&cifs_tcp_ses_lock);
rc = -EHOSTDOWN;
- mutex_unlock(&tcon->ses->session_mutex);
goto out;
}
+ spin_unlock(&cifs_tcp_ses_lock);
/*
- * If we are reconnecting an extra channel, bind
+ * need to prevent multiple threads trying to simultaneously
+ * reconnect the same SMB session
*/
- if (CIFS_SERVER_IS_CHAN(server)) {
- ses->binding = true;
- ses->binding_chan = cifs_ses_find_chan(ses, server);
+ spin_lock(&ses->chan_lock);
+ if (!cifs_chan_needs_reconnect(ses, server)) {
+ spin_unlock(&ses->chan_lock);
+
+ /* this means that we only need to tree connect */
+ if (tcon->need_reconnect)
+ goto skip_sess_setup;
+
+ goto out;
}
+ spin_unlock(&ses->chan_lock);
- rc = cifs_negotiate_protocol(0, tcon->ses);
- if (!rc && tcon->ses->need_reconnect) {
- rc = cifs_setup_session(0, tcon->ses, nls_codepage);
+ mutex_lock(&ses->session_mutex);
+ rc = cifs_negotiate_protocol(0, ses, server);
+ if (!rc) {
+ rc = cifs_setup_session(0, ses, server, nls_codepage);
if ((rc == -EACCES) && !tcon->retry) {
+ mutex_unlock(&ses->session_mutex);
rc = -EHOSTDOWN;
- ses->binding = false;
- ses->binding_chan = NULL;
- mutex_unlock(&tcon->ses->session_mutex);
goto failed;
}
+ } else {
+ mutex_unlock(&ses->session_mutex);
+ goto out;
}
- /*
- * End of channel binding
- */
- ses->binding = false;
- ses->binding_chan = NULL;
+ mutex_unlock(&ses->session_mutex);
- if (rc || !tcon->need_reconnect) {
- mutex_unlock(&tcon->ses->session_mutex);
+skip_sess_setup:
+ mutex_lock(&ses->session_mutex);
+ if (!tcon->need_reconnect) {
+ mutex_unlock(&ses->session_mutex);
goto out;
}
-
cifs_mark_open_files_invalid(tcon);
if (tcon->use_persistent)
tcon->need_reopen_files = true;
rc = cifs_tree_connect(0, tcon, nls_codepage);
- mutex_unlock(&tcon->ses->session_mutex);
+ mutex_unlock(&ses->session_mutex);
cifs_dbg(FYI, "reconnect tcon rc = %d\n", rc);
if (rc) {
@@ -833,7 +850,9 @@ add_posix_context(struct kvec *iov, unsigned int *num_iovec, umode_t mode)
*/
int
-SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
+SMB2_negotiate(const unsigned int xid,
+ struct cifs_ses *ses,
+ struct TCP_Server_Info *server)
{
struct smb_rqst rqst;
struct smb2_negotiate_req *req;
@@ -842,7 +861,6 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
struct kvec rsp_iov;
int rc = 0;
int resp_buftype;
- struct TCP_Server_Info *server = cifs_ses_server(ses);
int blob_offset, blob_length;
char *security_blob;
int flags = CIFS_NEG_OP;
@@ -1221,6 +1239,7 @@ smb2_select_sectype(struct TCP_Server_Info *server, enum securityEnum requested)
struct SMB2_sess_data {
unsigned int xid;
struct cifs_ses *ses;
+ struct TCP_Server_Info *server;
struct nls_table *nls_cp;
void (*func)(struct SMB2_sess_data *);
int result;
@@ -1242,9 +1261,10 @@ SMB2_sess_alloc_buffer(struct SMB2_sess_data *sess_data)
{
int rc;
struct cifs_ses *ses = sess_data->ses;
+ struct TCP_Server_Info *server = sess_data->server;
struct smb2_sess_setup_req *req;
- struct TCP_Server_Info *server = cifs_ses_server(ses);
unsigned int total_len;
+ bool is_binding = false;
rc = smb2_plain_req_init(SMB2_SESSION_SETUP, NULL, server,
(void **) &req,
@@ -1252,11 +1272,16 @@ SMB2_sess_alloc_buffer(struct SMB2_sess_data *sess_data)
if (rc)
return rc;
- if (sess_data->ses->binding) {
- req->hdr.SessionId = cpu_to_le64(sess_data->ses->Suid);
+ spin_lock(&ses->chan_lock);
+ is_binding = !CIFS_ALL_CHANS_NEED_RECONNECT(ses);
+ spin_unlock(&ses->chan_lock);
+
+ if (is_binding) {
+ req->hdr.SessionId = cpu_to_le64(ses->Suid);
req->hdr.Flags |= SMB2_FLAGS_SIGNED;
req->PreviousSessionId = 0;
req->Flags = SMB2_SESSION_REQ_FLAG_BINDING;
+ cifs_dbg(FYI, "Binding to sess id: %llx\n", ses->Suid);
} else {
/* First session, not a reauthenticate */
req->hdr.SessionId = 0;
@@ -1266,6 +1291,8 @@ SMB2_sess_alloc_buffer(struct SMB2_sess_data *sess_data)
*/
req->PreviousSessionId = cpu_to_le64(sess_data->previous_session);
req->Flags = 0; /* MBZ */
+ cifs_dbg(FYI, "Fresh session. Previous: %llx\n",
+ sess_data->previous_session);
}
/* enough to enable echos and oplocks and one max size write */
@@ -1325,7 +1352,7 @@ SMB2_sess_sendreceive(struct SMB2_sess_data *sess_data)
/* BB add code to build os and lm fields */
rc = cifs_send_recv(sess_data->xid, sess_data->ses,
- cifs_ses_server(sess_data->ses),
+ sess_data->server,
&rqst,
&sess_data->buf0_type,
CIFS_LOG_ERROR | CIFS_SESS_OP, &rsp_iov);
@@ -1340,11 +1367,11 @@ SMB2_sess_establish_session(struct SMB2_sess_data *sess_data)
{
int rc = 0;
struct cifs_ses *ses = sess_data->ses;
- struct TCP_Server_Info *server = cifs_ses_server(ses);
+ struct TCP_Server_Info *server = sess_data->server;
mutex_lock(&server->srv_mutex);
if (server->ops->generate_signingkey) {
- rc = server->ops->generate_signingkey(ses);
+ rc = server->ops->generate_signingkey(ses, server);
if (rc) {
cifs_dbg(FYI,
"SMB3 session key generation failed\n");
@@ -1359,14 +1386,6 @@ SMB2_sess_establish_session(struct SMB2_sess_data *sess_data)
mutex_unlock(&server->srv_mutex);
cifs_dbg(FYI, "SMB2/3 session established successfully\n");
- /* keep existing ses state if binding */
- if (!ses->binding) {
- spin_lock(&GlobalMid_Lock);
- ses->status = CifsGood;
- ses->need_reconnect = false;
- spin_unlock(&GlobalMid_Lock);
- }
-
return rc;
}
@@ -1376,15 +1395,17 @@ SMB2_auth_kerberos(struct SMB2_sess_data *sess_data)
{
int rc;
struct cifs_ses *ses = sess_data->ses;
+ struct TCP_Server_Info *server = sess_data->server;
struct cifs_spnego_msg *msg;
struct key *spnego_key = NULL;
struct smb2_sess_setup_rsp *rsp = NULL;
+ bool is_binding = false;
rc = SMB2_sess_alloc_buffer(sess_data);
if (rc)
goto out;
- spnego_key = cifs_get_spnego_key(ses);
+ spnego_key = cifs_get_spnego_key(ses, server);
if (IS_ERR(spnego_key)) {
rc = PTR_ERR(spnego_key);
if (rc == -ENOKEY)
@@ -1405,8 +1426,12 @@ SMB2_auth_kerberos(struct SMB2_sess_data *sess_data)
goto out_put_spnego_key;
}
+ spin_lock(&ses->chan_lock);
+ is_binding = !CIFS_ALL_CHANS_NEED_RECONNECT(ses);
+ spin_unlock(&ses->chan_lock);
+
/* keep session key if binding */
- if (!ses->binding) {
+ if (!is_binding) {
ses->auth_key.response = kmemdup(msg->data, msg->sesskey_len,
GFP_KERNEL);
if (!ses->auth_key.response) {
@@ -1427,7 +1452,7 @@ SMB2_auth_kerberos(struct SMB2_sess_data *sess_data)
rsp = (struct smb2_sess_setup_rsp *)sess_data->iov[0].iov_base;
/* keep session id and flags if binding */
- if (!ses->binding) {
+ if (!is_binding) {
ses->Suid = le64_to_cpu(rsp->hdr.SessionId);
ses->session_flags = le16_to_cpu(rsp->SessionFlags);
}
@@ -1459,10 +1484,12 @@ SMB2_sess_auth_rawntlmssp_negotiate(struct SMB2_sess_data *sess_data)
{
int rc;
struct cifs_ses *ses = sess_data->ses;
+ struct TCP_Server_Info *server = sess_data->server;
struct smb2_sess_setup_rsp *rsp = NULL;
unsigned char *ntlmssp_blob = NULL;
bool use_spnego = false; /* else use raw ntlmssp */
u16 blob_length = 0;
+ bool is_binding = false;
/*
* If memory allocation is successful, caller of this function
@@ -1479,8 +1506,8 @@ SMB2_sess_auth_rawntlmssp_negotiate(struct SMB2_sess_data *sess_data)
if (rc)
goto out_err;
- rc = build_ntlmssp_negotiate_blob(&ntlmssp_blob,
- &blob_length, ses,
+ rc = build_ntlmssp_smb3_negotiate_blob(&ntlmssp_blob,
+ &blob_length, ses, server,
sess_data->nls_cp);
if (rc)
goto out_err;
@@ -1519,8 +1546,12 @@ SMB2_sess_auth_rawntlmssp_negotiate(struct SMB2_sess_data *sess_data)
cifs_dbg(FYI, "rawntlmssp session setup challenge phase\n");
+ spin_lock(&ses->chan_lock);
+ is_binding = !CIFS_ALL_CHANS_NEED_RECONNECT(ses);
+ spin_unlock(&ses->chan_lock);
+
/* keep existing ses id and flags if binding */
- if (!ses->binding) {
+ if (!is_binding) {
ses->Suid = le64_to_cpu(rsp->hdr.SessionId);
ses->session_flags = le16_to_cpu(rsp->SessionFlags);
}
@@ -1545,11 +1576,13 @@ SMB2_sess_auth_rawntlmssp_authenticate(struct SMB2_sess_data *sess_data)
{
int rc;
struct cifs_ses *ses = sess_data->ses;
+ struct TCP_Server_Info *server = sess_data->server;
struct smb2_sess_setup_req *req;
struct smb2_sess_setup_rsp *rsp = NULL;
unsigned char *ntlmssp_blob = NULL;
bool use_spnego = false; /* else use raw ntlmssp */
u16 blob_length = 0;
+ bool is_binding = false;
rc = SMB2_sess_alloc_buffer(sess_data);
if (rc)
@@ -1558,8 +1591,9 @@ SMB2_sess_auth_rawntlmssp_authenticate(struct SMB2_sess_data *sess_data)
req = (struct smb2_sess_setup_req *) sess_data->iov[0].iov_base;
req->hdr.SessionId = cpu_to_le64(ses->Suid);
- rc = build_ntlmssp_auth_blob(&ntlmssp_blob, &blob_length, ses,
- sess_data->nls_cp);
+ rc = build_ntlmssp_auth_blob(&ntlmssp_blob, &blob_length,
+ ses, server,
+ sess_data->nls_cp);
if (rc) {
cifs_dbg(FYI, "build_ntlmssp_auth_blob failed %d\n", rc);
goto out;
@@ -1580,8 +1614,12 @@ SMB2_sess_auth_rawntlmssp_authenticate(struct SMB2_sess_data *sess_data)
rsp = (struct smb2_sess_setup_rsp *)sess_data->iov[0].iov_base;
+ spin_lock(&ses->chan_lock);
+ is_binding = !CIFS_ALL_CHANS_NEED_RECONNECT(ses);
+ spin_unlock(&ses->chan_lock);
+
/* keep existing ses id and flags if binding */
- if (!ses->binding) {
+ if (!is_binding) {
ses->Suid = le64_to_cpu(rsp->hdr.SessionId);
ses->session_flags = le16_to_cpu(rsp->SessionFlags);
}
@@ -1612,11 +1650,13 @@ out:
}
static int
-SMB2_select_sec(struct cifs_ses *ses, struct SMB2_sess_data *sess_data)
+SMB2_select_sec(struct SMB2_sess_data *sess_data)
{
int type;
+ struct cifs_ses *ses = sess_data->ses;
+ struct TCP_Server_Info *server = sess_data->server;
- type = smb2_select_sectype(cifs_ses_server(ses), ses->sectype);
+ type = smb2_select_sectype(server, ses->sectype);
cifs_dbg(FYI, "sess setup type %d\n", type);
if (type == Unspecified) {
cifs_dbg(VFS, "Unable to select appropriate authentication method!\n");
@@ -1640,10 +1680,10 @@ SMB2_select_sec(struct cifs_ses *ses, struct SMB2_sess_data *sess_data)
int
SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses,
+ struct TCP_Server_Info *server,
const struct nls_table *nls_cp)
{
int rc = 0;
- struct TCP_Server_Info *server = cifs_ses_server(ses);
struct SMB2_sess_data *sess_data;
cifs_dbg(FYI, "Session Setup\n");
@@ -1657,15 +1697,17 @@ SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses,
if (!sess_data)
return -ENOMEM;
- rc = SMB2_select_sec(ses, sess_data);
- if (rc)
- goto out;
sess_data->xid = xid;
sess_data->ses = ses;
+ sess_data->server = server;
sess_data->buf0_type = CIFS_NO_BUFFER;
sess_data->nls_cp = (struct nls_table *) nls_cp;
sess_data->previous_session = ses->Suid;
+ rc = SMB2_select_sec(sess_data);
+ if (rc)
+ goto out;
+
/*
* Initialize the session hash with the server one.
*/
@@ -1704,8 +1746,12 @@ SMB2_logoff(const unsigned int xid, struct cifs_ses *ses)
return -EIO;
/* no need to send SMB logoff if uid already closed due to reconnect */
- if (ses->need_reconnect)
+ spin_lock(&ses->chan_lock);
+ if (CIFS_ALL_CHANS_NEED_RECONNECT(ses)) {
+ spin_unlock(&ses->chan_lock);
goto smb2_session_already_dead;
+ }
+ spin_unlock(&ses->chan_lock);
rc = smb2_plain_req_init(SMB2_LOGOFF, NULL, ses->server,
(void **) &req, &total_len);
@@ -1867,8 +1913,6 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
tcon->share_flags = le32_to_cpu(rsp->ShareFlags);
tcon->capabilities = rsp->Capabilities; /* we keep caps little endian */
tcon->maximal_access = le32_to_cpu(rsp->MaximalAccess);
- tcon->tidStatus = CifsGood;
- tcon->need_reconnect = false;
tcon->tid = le32_to_cpu(rsp->hdr.Id.SyncId.TreeId);
strlcpy(tcon->treeName, tree, sizeof(tcon->treeName));
@@ -1913,8 +1957,13 @@ SMB2_tdis(const unsigned int xid, struct cifs_tcon *tcon)
if (!ses || !(ses->server))
return -EIO;
- if ((tcon->need_reconnect) || (tcon->ses->need_reconnect))
+ spin_lock(&ses->chan_lock);
+ if ((tcon->need_reconnect) ||
+ (CIFS_ALL_CHANS_NEED_RECONNECT(tcon->ses))) {
+ spin_unlock(&ses->chan_lock);
return 0;
+ }
+ spin_unlock(&ses->chan_lock);
close_cached_dir_lease(&tcon->crfid);
@@ -2527,8 +2576,13 @@ alloc_path_with_tree_prefix(__le16 **out_path, int *out_size, int *out_len,
cp = load_nls_default();
cifs_strtoUTF16(*out_path, treename, treename_len, cp);
- UniStrcat(*out_path, sep);
- UniStrcat(*out_path, path);
+
+ /* Do not append the separator if the path is empty */
+ if (path[0] != cpu_to_le16(0x0000)) {
+ UniStrcat(*out_path, sep);
+ UniStrcat(*out_path, path);
+ }
+
unload_nls(cp);
return 0;
@@ -3722,27 +3776,35 @@ void smb2_reconnect_server(struct work_struct *work)
{
struct TCP_Server_Info *server = container_of(work,
struct TCP_Server_Info, reconnect.work);
- struct cifs_ses *ses;
+ struct TCP_Server_Info *pserver;
+ struct cifs_ses *ses, *ses2;
struct cifs_tcon *tcon, *tcon2;
- struct list_head tmp_list;
- int tcon_exist = false;
+ struct list_head tmp_list, tmp_ses_list;
+ bool tcon_exist = false, ses_exist = false;
+ bool tcon_selected = false;
int rc;
- int resched = false;
+ bool resched = false;
+ /* If server is a channel, select the primary channel */
+ pserver = CIFS_SERVER_IS_CHAN(server) ? server->primary_server : server;
/* Prevent simultaneous reconnects that can corrupt tcon->rlist list */
- mutex_lock(&server->reconnect_mutex);
+ mutex_lock(&pserver->reconnect_mutex);
INIT_LIST_HEAD(&tmp_list);
- cifs_dbg(FYI, "Need negotiate, reconnecting tcons\n");
+ INIT_LIST_HEAD(&tmp_ses_list);
+ cifs_dbg(FYI, "Reconnecting tcons and channels\n");
spin_lock(&cifs_tcp_ses_lock);
- list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
+ list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) {
+
+ tcon_selected = false;
+
list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
if (tcon->need_reconnect || tcon->need_reopen_files) {
tcon->tc_count++;
list_add_tail(&tcon->rlist, &tmp_list);
- tcon_exist = true;
+ tcon_selected = tcon_exist = true;
}
}
/*
@@ -3751,15 +3813,27 @@ void smb2_reconnect_server(struct work_struct *work)
*/
if (ses->tcon_ipc && ses->tcon_ipc->need_reconnect) {
list_add_tail(&ses->tcon_ipc->rlist, &tmp_list);
- tcon_exist = true;
+ tcon_selected = tcon_exist = true;
ses->ses_count++;
}
+ /*
+ * handle the case where channel needs to reconnect
+ * binding session, but tcon is healthy (some other channel
+ * is active)
+ */
+ spin_lock(&ses->chan_lock);
+ if (!tcon_selected && cifs_chan_needs_reconnect(ses, server)) {
+ list_add_tail(&ses->rlist, &tmp_ses_list);
+ ses_exist = true;
+ ses->ses_count++;
+ }
+ spin_unlock(&ses->chan_lock);
}
/*
* Get the reference to server struct to be sure that the last call of
* cifs_put_tcon() in the loop below won't release the server pointer.
*/
- if (tcon_exist)
+ if (tcon_exist || ses_exist)
server->srv_count++;
spin_unlock(&cifs_tcp_ses_lock);
@@ -3777,13 +3851,41 @@ void smb2_reconnect_server(struct work_struct *work)
cifs_put_tcon(tcon);
}
- cifs_dbg(FYI, "Reconnecting tcons finished\n");
+ if (!ses_exist)
+ goto done;
+
+ /* allocate a dummy tcon struct used for reconnect */
+ tcon = kzalloc(sizeof(struct cifs_tcon), GFP_KERNEL);
+ if (!tcon) {
+ resched = true;
+ list_del_init(&ses->rlist);
+ cifs_put_smb_ses(ses);
+ goto done;
+ }
+
+ tcon->tidStatus = CifsGood;
+ tcon->retry = false;
+ tcon->need_reconnect = false;
+
+ /* now reconnect sessions for necessary channels */
+ list_for_each_entry_safe(ses, ses2, &tmp_ses_list, rlist) {
+ tcon->ses = ses;
+ rc = smb2_reconnect(SMB2_INTERNAL_CMD, tcon, server);
+ if (rc)
+ resched = true;
+ list_del_init(&ses->rlist);
+ cifs_put_smb_ses(ses);
+ }
+ kfree(tcon);
+
+done:
+ cifs_dbg(FYI, "Reconnecting tcons and channels finished\n");
if (resched)
queue_delayed_work(cifsiod_wq, &server->reconnect, 2 * HZ);
- mutex_unlock(&server->reconnect_mutex);
+ mutex_unlock(&pserver->reconnect_mutex);
/* now we can safely release srv struct */
- if (tcon_exist)
+ if (tcon_exist || ses_exist)
cifs_put_tcp_session(server, 1);
}
@@ -3797,13 +3899,16 @@ SMB2_echo(struct TCP_Server_Info *server)
.rq_nvec = 1 };
unsigned int total_len;
- cifs_dbg(FYI, "In echo request\n");
+ cifs_dbg(FYI, "In echo request for conn_id %lld\n", server->conn_id);
+ spin_lock(&cifs_tcp_ses_lock);
if (server->tcpStatus == CifsNeedNegotiate) {
+ spin_unlock(&cifs_tcp_ses_lock);
/* No need to send echo on newly established connections */
mod_delayed_work(cifsiod_wq, &server->reconnect, 0);
return rc;
}
+ spin_unlock(&cifs_tcp_ses_lock);
rc = smb2_plain_req_init(SMB2_ECHO, NULL, server,
(void **)&req, &total_len);
diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h
index 096fada16ebd..4a7062fd1c26 100644
--- a/fs/cifs/smb2proto.h
+++ b/fs/cifs/smb2proto.h
@@ -123,8 +123,11 @@ extern void smb2_set_related(struct smb_rqst *rqst);
* SMB2 Worker functions - most of protocol specific implementation details
* are contained within these calls.
*/
-extern int SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses);
+extern int SMB2_negotiate(const unsigned int xid,
+ struct cifs_ses *ses,
+ struct TCP_Server_Info *server);
extern int SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses,
+ struct TCP_Server_Info *server,
const struct nls_table *nls_cp);
extern int SMB2_logoff(const unsigned int xid, struct cifs_ses *ses);
extern int SMB2_tcon(const unsigned int xid, struct cifs_ses *ses,
@@ -276,6 +279,7 @@ extern void smb2_copy_fs_info_to_kstatfs(
struct kstatfs *kst);
extern int smb311_crypto_shash_allocate(struct TCP_Server_Info *server);
extern int smb311_update_preauth_hash(struct cifs_ses *ses,
+ struct TCP_Server_Info *server,
struct kvec *iov, int nvec);
extern int smb2_query_info_compound(const unsigned int xid,
struct cifs_tcon *tcon,
diff --git a/fs/cifs/smb2transport.c b/fs/cifs/smb2transport.c
index 2bf047b390a9..2af79093b78b 100644
--- a/fs/cifs/smb2transport.c
+++ b/fs/cifs/smb2transport.c
@@ -100,13 +100,16 @@ int smb2_get_sign_key(__u64 ses_id, struct TCP_Server_Info *server, u8 *key)
goto out;
found:
- if (ses->binding) {
+ spin_lock(&ses->chan_lock);
+ if (cifs_chan_needs_reconnect(ses, server) &&
+ !CIFS_ALL_CHANS_NEED_RECONNECT(ses)) {
/*
* If we are in the process of binding a new channel
* to an existing session, use the master connection
* session key
*/
memcpy(key, ses->smb3signingkey, SMB3_SIGN_KEY_SIZE);
+ spin_unlock(&ses->chan_lock);
goto out;
}
@@ -118,9 +121,11 @@ found:
chan = ses->chans + i;
if (chan->server == server) {
memcpy(key, chan->signkey, SMB3_SIGN_KEY_SIZE);
+ spin_unlock(&ses->chan_lock);
goto out;
}
}
+ spin_unlock(&ses->chan_lock);
cifs_dbg(VFS,
"%s: Could not find channel signing key for session 0x%llx\n",
@@ -390,12 +395,18 @@ struct derivation_triplet {
static int
generate_smb3signingkey(struct cifs_ses *ses,
+ struct TCP_Server_Info *server,
const struct derivation_triplet *ptriplet)
{
int rc;
-#ifdef CONFIG_CIFS_DEBUG_DUMP_KEYS
- struct TCP_Server_Info *server = ses->server;
-#endif
+ bool is_binding = false;
+ int chan_index = 0;
+
+ spin_lock(&ses->chan_lock);
+ is_binding = !CIFS_ALL_CHANS_NEED_RECONNECT(ses);
+ chan_index = cifs_ses_get_chan_index(ses, server);
+ /* TODO: introduce ref counting for channels when the can be freed */
+ spin_unlock(&ses->chan_lock);
/*
* All channels use the same encryption/decryption keys but
@@ -407,10 +418,10 @@ generate_smb3signingkey(struct cifs_ses *ses,
* master connection signing key stored in the session
*/
- if (ses->binding) {
+ if (is_binding) {
rc = generate_key(ses, ptriplet->signing.label,
ptriplet->signing.context,
- cifs_ses_binding_channel(ses)->signkey,
+ ses->chans[chan_index].signkey,
SMB3_SIGN_KEY_SIZE);
if (rc)
return rc;
@@ -422,8 +433,11 @@ generate_smb3signingkey(struct cifs_ses *ses,
if (rc)
return rc;
+ /* safe to access primary channel, since it will never go away */
+ spin_lock(&ses->chan_lock);
memcpy(ses->chans[0].signkey, ses->smb3signingkey,
SMB3_SIGN_KEY_SIZE);
+ spin_unlock(&ses->chan_lock);
rc = generate_key(ses, ptriplet->encryption.label,
ptriplet->encryption.context,
@@ -470,7 +484,8 @@ generate_smb3signingkey(struct cifs_ses *ses,
}
int
-generate_smb30signingkey(struct cifs_ses *ses)
+generate_smb30signingkey(struct cifs_ses *ses,
+ struct TCP_Server_Info *server)
{
struct derivation_triplet triplet;
@@ -494,11 +509,12 @@ generate_smb30signingkey(struct cifs_ses *ses)
d->context.iov_base = "ServerOut";
d->context.iov_len = 10;
- return generate_smb3signingkey(ses, &triplet);
+ return generate_smb3signingkey(ses, server, &triplet);
}
int
-generate_smb311signingkey(struct cifs_ses *ses)
+generate_smb311signingkey(struct cifs_ses *ses,
+ struct TCP_Server_Info *server)
{
struct derivation_triplet triplet;
@@ -522,7 +538,7 @@ generate_smb311signingkey(struct cifs_ses *ses)
d->context.iov_base = ses->preauth_sha_hash;
d->context.iov_len = 64;
- return generate_smb3signingkey(ses, &triplet);
+ return generate_smb3signingkey(ses, server, &triplet);
}
int
@@ -624,8 +640,12 @@ smb2_sign_rqst(struct smb_rqst *rqst, struct TCP_Server_Info *server)
if (!is_signed)
return 0;
- if (server->tcpStatus == CifsNeedNegotiate)
+ spin_lock(&cifs_tcp_ses_lock);
+ if (server->tcpStatus == CifsNeedNegotiate) {
+ spin_unlock(&cifs_tcp_ses_lock);
return 0;
+ }
+ spin_unlock(&cifs_tcp_ses_lock);
if (!is_binding && !server->session_estab) {
strncpy(shdr->Signature, "BSRSPYL", 8);
return 0;
@@ -741,30 +761,41 @@ static int
smb2_get_mid_entry(struct cifs_ses *ses, struct TCP_Server_Info *server,
struct smb2_hdr *shdr, struct mid_q_entry **mid)
{
- if (server->tcpStatus == CifsExiting)
+ spin_lock(&cifs_tcp_ses_lock);
+ if (server->tcpStatus == CifsExiting) {
+ spin_unlock(&cifs_tcp_ses_lock);
return -ENOENT;
+ }
if (server->tcpStatus == CifsNeedReconnect) {
+ spin_unlock(&cifs_tcp_ses_lock);
cifs_dbg(FYI, "tcp session dead - return to caller to retry\n");
return -EAGAIN;
}
if (server->tcpStatus == CifsNeedNegotiate &&
- shdr->Command != SMB2_NEGOTIATE)
+ shdr->Command != SMB2_NEGOTIATE) {
+ spin_unlock(&cifs_tcp_ses_lock);
return -EAGAIN;
+ }
if (ses->status == CifsNew) {
if ((shdr->Command != SMB2_SESSION_SETUP) &&
- (shdr->Command != SMB2_NEGOTIATE))
+ (shdr->Command != SMB2_NEGOTIATE)) {
+ spin_unlock(&cifs_tcp_ses_lock);
return -EAGAIN;
+ }
/* else ok - we are setting up session */
}
if (ses->status == CifsExiting) {
- if (shdr->Command != SMB2_LOGOFF)
+ if (shdr->Command != SMB2_LOGOFF) {
+ spin_unlock(&cifs_tcp_ses_lock);
return -EAGAIN;
+ }
/* else ok - we are shutting down the session */
}
+ spin_unlock(&cifs_tcp_ses_lock);
*mid = smb2_mid_entry_alloc(shdr, server);
if (*mid == NULL)
@@ -837,9 +868,13 @@ smb2_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
(struct smb2_hdr *)rqst->rq_iov[0].iov_base;
struct mid_q_entry *mid;
+ spin_lock(&cifs_tcp_ses_lock);
if (server->tcpStatus == CifsNeedNegotiate &&
- shdr->Command != SMB2_NEGOTIATE)
+ shdr->Command != SMB2_NEGOTIATE) {
+ spin_unlock(&cifs_tcp_ses_lock);
return ERR_PTR(-EAGAIN);
+ }
+ spin_unlock(&cifs_tcp_ses_lock);
smb2_seq_num_into_buf(server, shdr);
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index 61ea3d3f95b4..a4c3e027cca2 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -430,9 +430,7 @@ unmask:
* be taken as the remainder of this one. We need to kill the
* socket so the server throws away the partial SMB
*/
- spin_lock(&GlobalMid_Lock);
- server->tcpStatus = CifsNeedReconnect;
- spin_unlock(&GlobalMid_Lock);
+ cifs_mark_tcp_ses_conns_for_reconnect(server, false);
trace_smb3_partial_send_reconnect(server->CurrentMid,
server->conn_id, server->hostname);
}
@@ -578,10 +576,14 @@ wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
return -ERESTARTSYS;
spin_lock(&server->req_lock);
} else {
+ spin_unlock(&server->req_lock);
+
+ spin_lock(&cifs_tcp_ses_lock);
if (server->tcpStatus == CifsExiting) {
- spin_unlock(&server->req_lock);
+ spin_unlock(&cifs_tcp_ses_lock);
return -ENOENT;
}
+ spin_unlock(&cifs_tcp_ses_lock);
/*
* For normal commands, reserve the last MAX_COMPOUND
@@ -596,6 +598,7 @@ wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
* for servers that are slow to hand out credits on
* new sessions.
*/
+ spin_lock(&server->req_lock);
if (!optype && num_credits == 1 &&
server->in_flight > 2 * MAX_COMPOUND &&
*credits <= MAX_COMPOUND) {
@@ -723,28 +726,25 @@ cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
struct mid_q_entry **ppmidQ)
{
- if (ses->server->tcpStatus == CifsExiting) {
- return -ENOENT;
- }
-
- if (ses->server->tcpStatus == CifsNeedReconnect) {
- cifs_dbg(FYI, "tcp session dead - return to caller to retry\n");
- return -EAGAIN;
- }
-
+ spin_lock(&cifs_tcp_ses_lock);
if (ses->status == CifsNew) {
if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
- (in_buf->Command != SMB_COM_NEGOTIATE))
+ (in_buf->Command != SMB_COM_NEGOTIATE)) {
+ spin_unlock(&cifs_tcp_ses_lock);
return -EAGAIN;
+ }
/* else ok - we are setting up session */
}
if (ses->status == CifsExiting) {
/* check if SMB session is bad because we are setting it up */
- if (in_buf->Command != SMB_COM_LOGOFF_ANDX)
+ if (in_buf->Command != SMB_COM_LOGOFF_ANDX) {
+ spin_unlock(&cifs_tcp_ses_lock);
return -EAGAIN;
+ }
/* else ok - we are shutting down session */
}
+ spin_unlock(&cifs_tcp_ses_lock);
*ppmidQ = AllocMidQEntry(in_buf, ses->server);
if (*ppmidQ == NULL)
@@ -1044,19 +1044,14 @@ struct TCP_Server_Info *cifs_pick_channel(struct cifs_ses *ses)
if (!ses)
return NULL;
+ /* round robin */
+ index = (uint)atomic_inc_return(&ses->chan_seq);
+
spin_lock(&ses->chan_lock);
- if (!ses->binding) {
- /* round robin */
- if (ses->chan_count > 1) {
- index = (uint)atomic_inc_return(&ses->chan_seq);
- index %= ses->chan_count;
- }
- spin_unlock(&ses->chan_lock);
- return ses->chans[index].server;
- } else {
- spin_unlock(&ses->chan_lock);
- return cifs_ses_server(ses);
- }
+ index %= ses->chan_count;
+ spin_unlock(&ses->chan_lock);
+
+ return ses->chans[index].server;
}
int
@@ -1084,8 +1079,12 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
return -EIO;
}
- if (server->tcpStatus == CifsExiting)
+ spin_lock(&cifs_tcp_ses_lock);
+ if (server->tcpStatus == CifsExiting) {
+ spin_unlock(&cifs_tcp_ses_lock);
return -ENOENT;
+ }
+ spin_unlock(&cifs_tcp_ses_lock);
/*
* Wait for all the requests to become available.
@@ -1188,12 +1187,17 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
/*
* Compounding is never used during session establish.
*/
+ spin_lock(&cifs_tcp_ses_lock);
if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) {
+ spin_unlock(&cifs_tcp_ses_lock);
+
mutex_lock(&server->srv_mutex);
- smb311_update_preauth_hash(ses, rqst[0].rq_iov,
- rqst[0].rq_nvec);
+ smb311_update_preauth_hash(ses, server, rqst[0].rq_iov, rqst[0].rq_nvec);
mutex_unlock(&server->srv_mutex);
+
+ spin_lock(&cifs_tcp_ses_lock);
}
+ spin_unlock(&cifs_tcp_ses_lock);
for (i = 0; i < num_rqst; i++) {
rc = wait_for_response(server, midQ[i]);
@@ -1256,15 +1260,19 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
/*
* Compounding is never used during session establish.
*/
+ spin_lock(&cifs_tcp_ses_lock);
if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) {
struct kvec iov = {
.iov_base = resp_iov[0].iov_base,
.iov_len = resp_iov[0].iov_len
};
+ spin_unlock(&cifs_tcp_ses_lock);
mutex_lock(&server->srv_mutex);
- smb311_update_preauth_hash(ses, &iov, 1);
+ smb311_update_preauth_hash(ses, server, &iov, 1);
mutex_unlock(&server->srv_mutex);
+ spin_lock(&cifs_tcp_ses_lock);
}
+ spin_unlock(&cifs_tcp_ses_lock);
out:
/*
@@ -1353,8 +1361,12 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses,
return -EIO;
}
- if (server->tcpStatus == CifsExiting)
+ spin_lock(&cifs_tcp_ses_lock);
+ if (server->tcpStatus == CifsExiting) {
+ spin_unlock(&cifs_tcp_ses_lock);
return -ENOENT;
+ }
+ spin_unlock(&cifs_tcp_ses_lock);
/* Ensure that we do not send more than 50 overlapping requests
to the same server. We may make this configurable later or
@@ -1494,8 +1506,12 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
return -EIO;
}
- if (server->tcpStatus == CifsExiting)
+ spin_lock(&cifs_tcp_ses_lock);
+ if (server->tcpStatus == CifsExiting) {
+ spin_unlock(&cifs_tcp_ses_lock);
return -ENOENT;
+ }
+ spin_unlock(&cifs_tcp_ses_lock);
/* Ensure that we do not send more than 50 overlapping requests
to the same server. We may make this configurable later or
@@ -1553,10 +1569,12 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
(server->tcpStatus != CifsNew)));
/* Were we interrupted by a signal ? */
+ spin_lock(&cifs_tcp_ses_lock);
if ((rc == -ERESTARTSYS) &&
(midQ->mid_state == MID_REQUEST_SUBMITTED) &&
((server->tcpStatus == CifsGood) ||
(server->tcpStatus == CifsNew))) {
+ spin_unlock(&cifs_tcp_ses_lock);
if (in_buf->Command == SMB_COM_TRANSACTION2) {
/* POSIX lock. We send a NT_CANCEL SMB to cause the
@@ -1595,7 +1613,9 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
/* We got the response - restart system call. */
rstart = 1;
+ spin_lock(&cifs_tcp_ses_lock);
}
+ spin_unlock(&cifs_tcp_ses_lock);
rc = cifs_sync_mid_result(midQ, server);
if (rc != 0)
diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c
index 7d8b72d67c80..9d486fbbfbbd 100644
--- a/fs/cifs/xattr.c
+++ b/fs/cifs/xattr.c
@@ -175,11 +175,13 @@ static int cifs_xattr_set(const struct xattr_handler *handler,
switch (handler->flags) {
case XATTR_CIFS_NTSD_FULL:
aclflags = (CIFS_ACL_OWNER |
+ CIFS_ACL_GROUP |
CIFS_ACL_DACL |
CIFS_ACL_SACL);
break;
case XATTR_CIFS_NTSD:
aclflags = (CIFS_ACL_OWNER |
+ CIFS_ACL_GROUP |
CIFS_ACL_DACL);
break;
case XATTR_CIFS_ACL:
diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
index 1466b5d01cbb..d3cd2a94d1e8 100644
--- a/fs/configfs/dir.c
+++ b/fs/configfs/dir.c
@@ -1780,8 +1780,8 @@ void configfs_unregister_group(struct config_group *group)
configfs_detach_group(&group->cg_item);
d_inode(dentry)->i_flags |= S_DEAD;
dont_mount(dentry);
+ d_drop(dentry);
fsnotify_rmdir(d_inode(parent), dentry);
- d_delete(dentry);
inode_unlock(d_inode(parent));
dput(dentry);
@@ -1922,10 +1922,10 @@ void configfs_unregister_subsystem(struct configfs_subsystem *subsys)
configfs_detach_group(&group->cg_item);
d_inode(dentry)->i_flags |= S_DEAD;
dont_mount(dentry);
- fsnotify_rmdir(d_inode(root), dentry);
inode_unlock(d_inode(dentry));
- d_delete(dentry);
+ d_drop(dentry);
+ fsnotify_rmdir(d_inode(root), dentry);
inode_unlock(d_inode(root));
diff --git a/fs/coredump.c b/fs/coredump.c
index a6b3c196cdef..1c060c0a2d72 100644
--- a/fs/coredump.c
+++ b/fs/coredump.c
@@ -41,6 +41,7 @@
#include <linux/fs.h>
#include <linux/path.h>
#include <linux/timekeeping.h>
+#include <linux/sysctl.h>
#include <linux/uaccess.h>
#include <asm/mmu_context.h>
@@ -52,9 +53,9 @@
#include <trace/events/sched.h>
-int core_uses_pid;
-unsigned int core_pipe_limit;
-char core_pattern[CORENAME_MAX_SIZE] = "core";
+static int core_uses_pid;
+static unsigned int core_pipe_limit;
+static char core_pattern[CORENAME_MAX_SIZE] = "core";
static int core_name_size = CORENAME_MAX_SIZE;
struct core_name {
@@ -62,8 +63,6 @@ struct core_name {
int used, size;
};
-/* The maximal length of core_pattern is also specified in sysctl.c */
-
static int expand_corename(struct core_name *cn, int size)
{
char *corename = krealloc(cn->corename, size, GFP_KERNEL);
@@ -347,13 +346,13 @@ out:
return ispipe;
}
-static int zap_process(struct task_struct *start, int exit_code, int flags)
+static int zap_process(struct task_struct *start, int exit_code)
{
struct task_struct *t;
int nr = 0;
/* ignore all signals except SIGKILL, see prepare_signal() */
- start->signal->flags = SIGNAL_GROUP_COREDUMP | flags;
+ start->signal->flags = SIGNAL_GROUP_EXIT;
start->signal->group_exit_code = exit_code;
start->signal->group_stop_count = 0;
@@ -372,13 +371,13 @@ static int zap_process(struct task_struct *start, int exit_code, int flags)
static int zap_threads(struct task_struct *tsk,
struct core_state *core_state, int exit_code)
{
+ struct signal_struct *signal = tsk->signal;
int nr = -EAGAIN;
spin_lock_irq(&tsk->sighand->siglock);
- if (!signal_group_exit(tsk->signal)) {
- tsk->signal->core_state = core_state;
- tsk->signal->group_exit_task = tsk;
- nr = zap_process(tsk, exit_code, 0);
+ if (!(signal->flags & SIGNAL_GROUP_EXIT) && !signal->group_exec_task) {
+ signal->core_state = core_state;
+ nr = zap_process(tsk, exit_code);
clear_tsk_thread_flag(tsk, TIF_SIGPENDING);
tsk->flags |= PF_DUMPCORE;
atomic_set(&core_state->nr_threads, nr);
@@ -426,8 +425,6 @@ static void coredump_finish(bool core_dumped)
spin_lock_irq(&current->sighand->siglock);
if (core_dumped && !__fatal_signal_pending(current))
current->signal->group_exit_code |= 0x80;
- current->signal->group_exit_task = NULL;
- current->signal->flags = SIGNAL_GROUP_EXIT;
next = current->signal->core_state->dumper.next;
current->signal->core_state = NULL;
spin_unlock_irq(&current->sighand->siglock);
@@ -895,6 +892,63 @@ int dump_align(struct coredump_params *cprm, int align)
}
EXPORT_SYMBOL(dump_align);
+#ifdef CONFIG_SYSCTL
+
+void validate_coredump_safety(void)
+{
+ if (suid_dumpable == SUID_DUMP_ROOT &&
+ core_pattern[0] != '/' && core_pattern[0] != '|') {
+ pr_warn(
+"Unsafe core_pattern used with fs.suid_dumpable=2.\n"
+"Pipe handler or fully qualified core dump path required.\n"
+"Set kernel.core_pattern before fs.suid_dumpable.\n"
+ );
+ }
+}
+
+static int proc_dostring_coredump(struct ctl_table *table, int write,
+ void *buffer, size_t *lenp, loff_t *ppos)
+{
+ int error = proc_dostring(table, write, buffer, lenp, ppos);
+
+ if (!error)
+ validate_coredump_safety();
+ return error;
+}
+
+static struct ctl_table coredump_sysctls[] = {
+ {
+ .procname = "core_uses_pid",
+ .data = &core_uses_pid,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
+ .procname = "core_pattern",
+ .data = core_pattern,
+ .maxlen = CORENAME_MAX_SIZE,
+ .mode = 0644,
+ .proc_handler = proc_dostring_coredump,
+ },
+ {
+ .procname = "core_pipe_limit",
+ .data = &core_pipe_limit,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ { }
+};
+
+static int __init init_fs_coredump_sysctls(void)
+{
+ register_sysctl_init("kernel", coredump_sysctls);
+ return 0;
+}
+fs_initcall(init_fs_coredump_sysctls);
+#endif /* CONFIG_SYSCTL */
+
/*
* The purpose of always_dump_vma() is to make sure that special kernel mappings
* that are useful for post-mortem analysis are included in every core dump.
diff --git a/fs/dax.c b/fs/dax.c
index 4e3e5a283a91..cd03485867a7 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -709,26 +709,26 @@ int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
return __dax_invalidate_entry(mapping, index, false);
}
-static int copy_cow_page_dax(struct block_device *bdev, struct dax_device *dax_dev,
- sector_t sector, struct page *to, unsigned long vaddr)
+static pgoff_t dax_iomap_pgoff(const struct iomap *iomap, loff_t pos)
{
+ return PHYS_PFN(iomap->addr + (pos & PAGE_MASK) - iomap->offset);
+}
+
+static int copy_cow_page_dax(struct vm_fault *vmf, const struct iomap_iter *iter)
+{
+ pgoff_t pgoff = dax_iomap_pgoff(&iter->iomap, iter->pos);
void *vto, *kaddr;
- pgoff_t pgoff;
long rc;
int id;
- rc = bdev_dax_pgoff(bdev, sector, PAGE_SIZE, &pgoff);
- if (rc)
- return rc;
-
id = dax_read_lock();
- rc = dax_direct_access(dax_dev, pgoff, 1, &kaddr, NULL);
+ rc = dax_direct_access(iter->iomap.dax_dev, pgoff, 1, &kaddr, NULL);
if (rc < 0) {
dax_read_unlock(id);
return rc;
}
- vto = kmap_atomic(to);
- copy_user_page(vto, (void __force *)kaddr, vaddr, to);
+ vto = kmap_atomic(vmf->cow_page);
+ copy_user_page(vto, kaddr, vmf->address, vmf->cow_page);
kunmap_atomic(vto);
dax_read_unlock(id);
return 0;
@@ -1005,22 +1005,13 @@ int dax_writeback_mapping_range(struct address_space *mapping,
}
EXPORT_SYMBOL_GPL(dax_writeback_mapping_range);
-static sector_t dax_iomap_sector(const struct iomap *iomap, loff_t pos)
-{
- return (iomap->addr + (pos & PAGE_MASK) - iomap->offset) >> 9;
-}
-
static int dax_iomap_pfn(const struct iomap *iomap, loff_t pos, size_t size,
pfn_t *pfnp)
{
- const sector_t sector = dax_iomap_sector(iomap, pos);
- pgoff_t pgoff;
+ pgoff_t pgoff = dax_iomap_pgoff(iomap, pos);
int id, rc;
long length;
- rc = bdev_dax_pgoff(iomap->bdev, sector, size, &pgoff);
- if (rc)
- return rc;
id = dax_read_lock();
length = dax_direct_access(iomap->dax_dev, pgoff, PHYS_PFN(size),
NULL, pfnp);
@@ -1126,42 +1117,87 @@ static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
}
#endif /* CONFIG_FS_DAX_PMD */
-s64 dax_iomap_zero(loff_t pos, u64 length, struct iomap *iomap)
+static int dax_memzero(struct dax_device *dax_dev, pgoff_t pgoff,
+ unsigned int offset, size_t size)
{
- sector_t sector = iomap_sector(iomap, pos & PAGE_MASK);
- pgoff_t pgoff;
- long rc, id;
void *kaddr;
- bool page_aligned = false;
- unsigned offset = offset_in_page(pos);
- unsigned size = min_t(u64, PAGE_SIZE - offset, length);
+ long ret;
- if (IS_ALIGNED(sector << SECTOR_SHIFT, PAGE_SIZE) &&
- (size == PAGE_SIZE))
- page_aligned = true;
+ ret = dax_direct_access(dax_dev, pgoff, 1, &kaddr, NULL);
+ if (ret > 0) {
+ memset(kaddr + offset, 0, size);
+ dax_flush(dax_dev, kaddr + offset, size);
+ }
+ return ret;
+}
- rc = bdev_dax_pgoff(iomap->bdev, sector, PAGE_SIZE, &pgoff);
- if (rc)
- return rc;
+static s64 dax_zero_iter(struct iomap_iter *iter, bool *did_zero)
+{
+ const struct iomap *iomap = &iter->iomap;
+ const struct iomap *srcmap = iomap_iter_srcmap(iter);
+ loff_t pos = iter->pos;
+ u64 length = iomap_length(iter);
+ s64 written = 0;
+
+ /* already zeroed? we're done. */
+ if (srcmap->type == IOMAP_HOLE || srcmap->type == IOMAP_UNWRITTEN)
+ return length;
+
+ do {
+ unsigned offset = offset_in_page(pos);
+ unsigned size = min_t(u64, PAGE_SIZE - offset, length);
+ pgoff_t pgoff = dax_iomap_pgoff(iomap, pos);
+ long rc;
+ int id;
+
+ id = dax_read_lock();
+ if (IS_ALIGNED(pos, PAGE_SIZE) && size == PAGE_SIZE)
+ rc = dax_zero_page_range(iomap->dax_dev, pgoff, 1);
+ else
+ rc = dax_memzero(iomap->dax_dev, pgoff, offset, size);
+ dax_read_unlock(id);
- id = dax_read_lock();
+ if (rc < 0)
+ return rc;
+ pos += size;
+ length -= size;
+ written += size;
+ if (did_zero)
+ *did_zero = true;
+ } while (length > 0);
- if (page_aligned)
- rc = dax_zero_page_range(iomap->dax_dev, pgoff, 1);
- else
- rc = dax_direct_access(iomap->dax_dev, pgoff, 1, &kaddr, NULL);
- if (rc < 0) {
- dax_read_unlock(id);
- return rc;
- }
+ return written;
+}
- if (!page_aligned) {
- memset(kaddr + offset, 0, size);
- dax_flush(iomap->dax_dev, kaddr + offset, size);
- }
- dax_read_unlock(id);
- return size;
+int dax_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
+ const struct iomap_ops *ops)
+{
+ struct iomap_iter iter = {
+ .inode = inode,
+ .pos = pos,
+ .len = len,
+ .flags = IOMAP_DAX | IOMAP_ZERO,
+ };
+ int ret;
+
+ while ((ret = iomap_iter(&iter, ops)) > 0)
+ iter.processed = dax_zero_iter(&iter, did_zero);
+ return ret;
}
+EXPORT_SYMBOL_GPL(dax_zero_range);
+
+int dax_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
+ const struct iomap_ops *ops)
+{
+ unsigned int blocksize = i_blocksize(inode);
+ unsigned int off = pos & (blocksize - 1);
+
+ /* Block boundary? Nothing to do */
+ if (!off)
+ return 0;
+ return dax_zero_range(inode, pos, blocksize - off, did_zero, ops);
+}
+EXPORT_SYMBOL_GPL(dax_truncate_page);
static loff_t dax_iomap_iter(const struct iomap_iter *iomi,
struct iov_iter *iter)
@@ -1169,7 +1205,6 @@ static loff_t dax_iomap_iter(const struct iomap_iter *iomi,
const struct iomap *iomap = &iomi->iomap;
loff_t length = iomap_length(iomi);
loff_t pos = iomi->pos;
- struct block_device *bdev = iomap->bdev;
struct dax_device *dax_dev = iomap->dax_dev;
loff_t end = pos + length, done = 0;
ssize_t ret = 0;
@@ -1203,9 +1238,8 @@ static loff_t dax_iomap_iter(const struct iomap_iter *iomi,
while (pos < end) {
unsigned offset = pos & (PAGE_SIZE - 1);
const size_t size = ALIGN(length + offset, PAGE_SIZE);
- const sector_t sector = dax_iomap_sector(iomap, pos);
+ pgoff_t pgoff = dax_iomap_pgoff(iomap, pos);
ssize_t map_len;
- pgoff_t pgoff;
void *kaddr;
if (fatal_signal_pending(current)) {
@@ -1213,10 +1247,6 @@ static loff_t dax_iomap_iter(const struct iomap_iter *iomi,
break;
}
- ret = bdev_dax_pgoff(bdev, sector, size, &pgoff);
- if (ret)
- break;
-
map_len = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size),
&kaddr, NULL);
if (map_len < 0) {
@@ -1230,11 +1260,6 @@ static loff_t dax_iomap_iter(const struct iomap_iter *iomi,
if (map_len > end - pos)
map_len = end - pos;
- /*
- * The userspace address for the memory copy has already been
- * validated via access_ok() in either vfs_read() or
- * vfs_write(), depending on which operation we are doing.
- */
if (iov_iter_rw(iter) == WRITE)
xfer = dax_copy_from_iter(dax_dev, pgoff, kaddr,
map_len, iter);
@@ -1274,6 +1299,7 @@ dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
.inode = iocb->ki_filp->f_mapping->host,
.pos = iocb->ki_pos,
.len = iov_iter_count(iter),
+ .flags = IOMAP_DAX,
};
loff_t done = 0;
int ret;
@@ -1332,19 +1358,16 @@ static vm_fault_t dax_fault_synchronous_pfnp(pfn_t *pfnp, pfn_t pfn)
static vm_fault_t dax_fault_cow_page(struct vm_fault *vmf,
const struct iomap_iter *iter)
{
- sector_t sector = dax_iomap_sector(&iter->iomap, iter->pos);
- unsigned long vaddr = vmf->address;
vm_fault_t ret;
int error = 0;
switch (iter->iomap.type) {
case IOMAP_HOLE:
case IOMAP_UNWRITTEN:
- clear_user_highpage(vmf->cow_page, vaddr);
+ clear_user_highpage(vmf->cow_page, vmf->address);
break;
case IOMAP_MAPPED:
- error = copy_cow_page_dax(iter->iomap.bdev, iter->iomap.dax_dev,
- sector, vmf->cow_page, vaddr);
+ error = copy_cow_page_dax(vmf, iter);
break;
default:
WARN_ON_ONCE(1);
@@ -1430,7 +1453,7 @@ static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
.inode = mapping->host,
.pos = (loff_t)vmf->pgoff << PAGE_SHIFT,
.len = PAGE_SIZE,
- .flags = IOMAP_FAULT,
+ .flags = IOMAP_DAX | IOMAP_FAULT,
};
vm_fault_t ret = 0;
void *entry;
@@ -1539,7 +1562,7 @@ static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
struct iomap_iter iter = {
.inode = mapping->host,
.len = PMD_SIZE,
- .flags = IOMAP_FAULT,
+ .flags = IOMAP_DAX | IOMAP_FAULT,
};
vm_fault_t ret = VM_FAULT_FALLBACK;
pgoff_t max_pgoff;
diff --git a/fs/dcache.c b/fs/dcache.c
index cf871a81f4fd..c84269c6e8bf 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -115,10 +115,13 @@ static inline struct hlist_bl_head *in_lookup_hash(const struct dentry *parent,
return in_lookup_hashtable + hash_32(hash, IN_LOOKUP_SHIFT);
}
-
-/* Statistics gathering. */
-struct dentry_stat_t dentry_stat = {
- .age_limit = 45,
+struct dentry_stat_t {
+ long nr_dentry;
+ long nr_unused;
+ long age_limit; /* age in seconds */
+ long want_pages; /* pages requested by system */
+ long nr_negative; /* # of unused negative dentries */
+ long dummy; /* Reserved for future use */
};
static DEFINE_PER_CPU(long, nr_dentry);
@@ -126,6 +129,10 @@ static DEFINE_PER_CPU(long, nr_dentry_unused);
static DEFINE_PER_CPU(long, nr_dentry_negative);
#if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
+/* Statistics gathering. */
+static struct dentry_stat_t dentry_stat = {
+ .age_limit = 45,
+};
/*
* Here we resort to our own counters instead of using generic per-cpu counters
@@ -167,14 +174,32 @@ static long get_nr_dentry_negative(void)
return sum < 0 ? 0 : sum;
}
-int proc_nr_dentry(struct ctl_table *table, int write, void *buffer,
- size_t *lenp, loff_t *ppos)
+static int proc_nr_dentry(struct ctl_table *table, int write, void *buffer,
+ size_t *lenp, loff_t *ppos)
{
dentry_stat.nr_dentry = get_nr_dentry();
dentry_stat.nr_unused = get_nr_dentry_unused();
dentry_stat.nr_negative = get_nr_dentry_negative();
return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
}
+
+static struct ctl_table fs_dcache_sysctls[] = {
+ {
+ .procname = "dentry-state",
+ .data = &dentry_stat,
+ .maxlen = 6*sizeof(long),
+ .mode = 0444,
+ .proc_handler = proc_nr_dentry,
+ },
+ { }
+};
+
+static int __init init_fs_dcache_sysctls(void)
+{
+ register_sysctl_init("fs", fs_dcache_sysctls);
+ return 0;
+}
+fs_initcall(init_fs_dcache_sysctls);
#endif
/*
diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c
index 7d162b0efbf0..950c63fa4d0b 100644
--- a/fs/debugfs/file.c
+++ b/fs/debugfs/file.c
@@ -147,7 +147,7 @@ static int debugfs_locked_down(struct inode *inode,
struct file *filp,
const struct file_operations *real_fops)
{
- if ((inode->i_mode & 07777) == 0444 &&
+ if ((inode->i_mode & 07777 & ~0444) == 0 &&
!(filp->f_mode & FMODE_WRITE) &&
!real_fops->unlocked_ioctl &&
!real_fops->compat_ioctl &&
diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c
index 42e5a766d33c..4f25015aa534 100644
--- a/fs/devpts/inode.c
+++ b/fs/devpts/inode.c
@@ -621,8 +621,8 @@ void devpts_pty_kill(struct dentry *dentry)
dentry->d_fsdata = NULL;
drop_nlink(dentry->d_inode);
- fsnotify_unlink(d_inode(dentry->d_parent), dentry);
d_drop(dentry);
+ fsnotify_unlink(d_inode(dentry->d_parent), dentry);
dput(dentry); /* d_alloc_name() in devpts_pty_new() */
}
diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
index 31384e7d6f90..0d3833a124a3 100644
--- a/fs/dlm/lockspace.c
+++ b/fs/dlm/lockspace.c
@@ -216,8 +216,7 @@ static int do_uevent(struct dlm_ls *ls, int in)
return ls->ls_uevent_result;
}
-static int dlm_uevent(struct kset *kset, struct kobject *kobj,
- struct kobj_uevent_env *env)
+static int dlm_uevent(struct kobject *kobj, struct kobj_uevent_env *env)
{
struct dlm_ls *ls = container_of(kobj, struct dlm_ls, ls_kobj);
diff --git a/fs/erofs/data.c b/fs/erofs/data.c
index e18476c85fa2..226a57c57ee6 100644
--- a/fs/erofs/data.c
+++ b/fs/erofs/data.c
@@ -192,6 +192,7 @@ int erofs_map_dev(struct super_block *sb, struct erofs_map_dev *map)
/* primary device by default */
map->m_bdev = sb->s_bdev;
map->m_daxdev = EROFS_SB(sb)->dax_dev;
+ map->m_dax_part_off = EROFS_SB(sb)->dax_part_off;
if (map->m_deviceid) {
down_read(&devs->rwsem);
@@ -202,6 +203,7 @@ int erofs_map_dev(struct super_block *sb, struct erofs_map_dev *map)
}
map->m_bdev = dif->bdev;
map->m_daxdev = dif->dax_dev;
+ map->m_dax_part_off = dif->dax_part_off;
up_read(&devs->rwsem);
} else if (devs->extra_devices) {
down_read(&devs->rwsem);
@@ -218,6 +220,7 @@ int erofs_map_dev(struct super_block *sb, struct erofs_map_dev *map)
map->m_pa -= startoff;
map->m_bdev = dif->bdev;
map->m_daxdev = dif->dax_dev;
+ map->m_dax_part_off = dif->dax_part_off;
break;
}
}
@@ -248,9 +251,11 @@ static int erofs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
if (ret)
return ret;
- iomap->bdev = mdev.m_bdev;
- iomap->dax_dev = mdev.m_daxdev;
iomap->offset = map.m_la;
+ if (flags & IOMAP_DAX)
+ iomap->dax_dev = mdev.m_daxdev;
+ else
+ iomap->bdev = mdev.m_bdev;
iomap->length = map.m_llen;
iomap->flags = 0;
iomap->private = NULL;
@@ -277,6 +282,8 @@ static int erofs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
} else {
iomap->type = IOMAP_MAPPED;
iomap->addr = mdev.m_pa;
+ if (flags & IOMAP_DAX)
+ iomap->addr += mdev.m_dax_part_off;
}
return 0;
}
diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h
index 3db494a398b2..b8272fb95fd6 100644
--- a/fs/erofs/internal.h
+++ b/fs/erofs/internal.h
@@ -51,6 +51,7 @@ struct erofs_device_info {
char *path;
struct block_device *bdev;
struct dax_device *dax_dev;
+ u64 dax_part_off;
u32 blocks;
u32 mapped_blkaddr;
@@ -115,6 +116,7 @@ struct erofs_sb_info {
#endif /* CONFIG_EROFS_FS_ZIP */
struct erofs_dev_context *devs;
struct dax_device *dax_dev;
+ u64 dax_part_off;
u64 total_blocks;
u32 primarydevice_blocks;
@@ -467,6 +469,7 @@ static inline int z_erofs_map_blocks_iter(struct inode *inode,
struct erofs_map_dev {
struct block_device *m_bdev;
struct dax_device *m_daxdev;
+ u64 m_dax_part_off;
erofs_off_t m_pa;
unsigned int m_deviceid;
diff --git a/fs/erofs/super.c b/fs/erofs/super.c
index 5c137647fa8a..915eefe0d7e2 100644
--- a/fs/erofs/super.c
+++ b/fs/erofs/super.c
@@ -267,7 +267,7 @@ static int erofs_init_devices(struct super_block *sb,
break;
}
dif->bdev = bdev;
- dif->dax_dev = fs_dax_get_by_bdev(bdev);
+ dif->dax_dev = fs_dax_get_by_bdev(bdev, &dif->dax_part_off);
dif->blocks = le32_to_cpu(dis->blocks);
dif->mapped_blkaddr = le32_to_cpu(dis->mapped_blkaddr);
sbi->total_blocks += dif->blocks;
@@ -597,7 +597,7 @@ static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc)
sb->s_fs_info = sbi;
sbi->opt = ctx->opt;
- sbi->dax_dev = fs_dax_get_by_bdev(sb->s_bdev);
+ sbi->dax_dev = fs_dax_get_by_bdev(sb->s_bdev, &sbi->dax_part_off);
sbi->devs = ctx->devs;
ctx->devs = NULL;
@@ -605,10 +605,13 @@ static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc)
if (err)
return err;
- if (test_opt(&sbi->opt, DAX_ALWAYS) &&
- !dax_supported(sbi->dax_dev, sb->s_bdev, EROFS_BLKSIZ, 0, bdev_nr_sectors(sb->s_bdev))) {
- errorfc(fc, "DAX unsupported by block device. Turning off DAX.");
- clear_opt(&sbi->opt, DAX_ALWAYS);
+ if (test_opt(&sbi->opt, DAX_ALWAYS)) {
+ BUILD_BUG_ON(EROFS_BLKSIZ != PAGE_SIZE);
+
+ if (!sbi->dax_dev) {
+ errorfc(fc, "DAX unsupported by block device. Turning off DAX.");
+ clear_opt(&sbi->opt, DAX_ALWAYS);
+ }
}
sb->s_flags |= SB_RDONLY | SB_NOATIME;
sb->s_maxbytes = MAX_LFS_FILESIZE;
diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
index 498b7666efe8..423bc1a61da5 100644
--- a/fs/erofs/zdata.c
+++ b/fs/erofs/zdata.c
@@ -810,68 +810,11 @@ static bool z_erofs_get_sync_decompress_policy(struct erofs_sb_info *sbi,
return false;
}
-static void z_erofs_decompressqueue_work(struct work_struct *work);
-static void z_erofs_decompress_kickoff(struct z_erofs_decompressqueue *io,
- bool sync, int bios)
-{
- struct erofs_sb_info *const sbi = EROFS_SB(io->sb);
-
- /* wake up the caller thread for sync decompression */
- if (sync) {
- unsigned long flags;
-
- spin_lock_irqsave(&io->u.wait.lock, flags);
- if (!atomic_add_return(bios, &io->pending_bios))
- wake_up_locked(&io->u.wait);
- spin_unlock_irqrestore(&io->u.wait.lock, flags);
- return;
- }
-
- if (atomic_add_return(bios, &io->pending_bios))
- return;
- /* Use workqueue and sync decompression for atomic contexts only */
- if (in_atomic() || irqs_disabled()) {
- queue_work(z_erofs_workqueue, &io->u.work);
- /* enable sync decompression for readahead */
- if (sbi->opt.sync_decompress == EROFS_SYNC_DECOMPRESS_AUTO)
- sbi->opt.sync_decompress = EROFS_SYNC_DECOMPRESS_FORCE_ON;
- return;
- }
- z_erofs_decompressqueue_work(&io->u.work);
-}
-
static bool z_erofs_page_is_invalidated(struct page *page)
{
return !page->mapping && !z_erofs_is_shortlived_page(page);
}
-static void z_erofs_decompressqueue_endio(struct bio *bio)
-{
- tagptr1_t t = tagptr_init(tagptr1_t, bio->bi_private);
- struct z_erofs_decompressqueue *q = tagptr_unfold_ptr(t);
- blk_status_t err = bio->bi_status;
- struct bio_vec *bvec;
- struct bvec_iter_all iter_all;
-
- bio_for_each_segment_all(bvec, bio, iter_all) {
- struct page *page = bvec->bv_page;
-
- DBG_BUGON(PageUptodate(page));
- DBG_BUGON(z_erofs_page_is_invalidated(page));
-
- if (err)
- SetPageError(page);
-
- if (erofs_page_is_managed(EROFS_SB(q->sb), page)) {
- if (!err)
- SetPageUptodate(page);
- unlock_page(page);
- }
- }
- z_erofs_decompress_kickoff(q, tagptr_unfold_tags(t), -1);
- bio_put(bio);
-}
-
static int z_erofs_decompress_pcluster(struct super_block *sb,
struct z_erofs_pcluster *pcl,
struct page **pagepool)
@@ -1123,6 +1066,35 @@ static void z_erofs_decompressqueue_work(struct work_struct *work)
kvfree(bgq);
}
+static void z_erofs_decompress_kickoff(struct z_erofs_decompressqueue *io,
+ bool sync, int bios)
+{
+ struct erofs_sb_info *const sbi = EROFS_SB(io->sb);
+
+ /* wake up the caller thread for sync decompression */
+ if (sync) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&io->u.wait.lock, flags);
+ if (!atomic_add_return(bios, &io->pending_bios))
+ wake_up_locked(&io->u.wait);
+ spin_unlock_irqrestore(&io->u.wait.lock, flags);
+ return;
+ }
+
+ if (atomic_add_return(bios, &io->pending_bios))
+ return;
+ /* Use workqueue and sync decompression for atomic contexts only */
+ if (in_atomic() || irqs_disabled()) {
+ queue_work(z_erofs_workqueue, &io->u.work);
+ /* enable sync decompression for readahead */
+ if (sbi->opt.sync_decompress == EROFS_SYNC_DECOMPRESS_AUTO)
+ sbi->opt.sync_decompress = EROFS_SYNC_DECOMPRESS_FORCE_ON;
+ return;
+ }
+ z_erofs_decompressqueue_work(&io->u.work);
+}
+
static struct page *pickup_page_for_submission(struct z_erofs_pcluster *pcl,
unsigned int nr,
struct page **pagepool,
@@ -1300,6 +1272,33 @@ static void move_to_bypass_jobqueue(struct z_erofs_pcluster *pcl,
qtail[JQ_BYPASS] = &pcl->next;
}
+static void z_erofs_decompressqueue_endio(struct bio *bio)
+{
+ tagptr1_t t = tagptr_init(tagptr1_t, bio->bi_private);
+ struct z_erofs_decompressqueue *q = tagptr_unfold_ptr(t);
+ blk_status_t err = bio->bi_status;
+ struct bio_vec *bvec;
+ struct bvec_iter_all iter_all;
+
+ bio_for_each_segment_all(bvec, bio, iter_all) {
+ struct page *page = bvec->bv_page;
+
+ DBG_BUGON(PageUptodate(page));
+ DBG_BUGON(z_erofs_page_is_invalidated(page));
+
+ if (err)
+ SetPageError(page);
+
+ if (erofs_page_is_managed(EROFS_SB(q->sb), page)) {
+ if (!err)
+ SetPageUptodate(page);
+ unlock_page(page);
+ }
+ }
+ z_erofs_decompress_kickoff(q, tagptr_unfold_tags(t), -1);
+ bio_put(bio);
+}
+
static void z_erofs_submit_queue(struct super_block *sb,
struct z_erofs_decompress_frontend *f,
struct page **pagepool,
diff --git a/fs/erofs/zmap.c b/fs/erofs/zmap.c
index 18d7fd1a5064..361b1d6e4bf9 100644
--- a/fs/erofs/zmap.c
+++ b/fs/erofs/zmap.c
@@ -630,6 +630,13 @@ static int z_erofs_do_map_blocks(struct inode *inode,
if (endoff >= m.clusterofs) {
m.headtype = m.type;
map->m_la = (m.lcn << lclusterbits) | m.clusterofs;
+ /*
+ * For ztailpacking files, in order to inline data more
+ * effectively, special EOF lclusters are now supported
+ * which can have three parts at most.
+ */
+ if (ztailpacking && end > inode->i_size)
+ end = inode->i_size;
break;
}
/* m.lcn should be >= 1 if endoff < m.clusterofs */
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 06f4c5ae1451..e2daa940ebce 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -307,7 +307,7 @@ static void unlist_file(struct epitems_head *head)
static long long_zero;
static long long_max = LONG_MAX;
-struct ctl_table epoll_table[] = {
+static struct ctl_table epoll_table[] = {
{
.procname = "max_user_watches",
.data = &max_user_watches,
@@ -319,6 +319,13 @@ struct ctl_table epoll_table[] = {
},
{ }
};
+
+static void __init epoll_sysctls_init(void)
+{
+ register_sysctl("fs/epoll", epoll_table);
+}
+#else
+#define epoll_sysctls_init() do { } while (0)
#endif /* CONFIG_SYSCTL */
static const struct file_operations eventpoll_fops;
@@ -2378,6 +2385,7 @@ static int __init eventpoll_init(void)
/* Allocates slab cache used to allocate "struct eppoll_entry" */
pwq_cache = kmem_cache_create("eventpoll_pwq",
sizeof(struct eppoll_entry), 0, SLAB_PANIC|SLAB_ACCOUNT, NULL);
+ epoll_sysctls_init();
ephead_cache = kmem_cache_create("ep_head",
sizeof(struct epitems_head), 0, SLAB_PANIC|SLAB_ACCOUNT, NULL);
diff --git a/fs/exec.c b/fs/exec.c
index 537d92c41105..79f2c9483302 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -65,6 +65,7 @@
#include <linux/vmalloc.h>
#include <linux/io_uring.h>
#include <linux/syscall_user_dispatch.h>
+#include <linux/coredump.h>
#include <linux/uaccess.h>
#include <asm/mmu_context.h>
@@ -1045,7 +1046,7 @@ static int de_thread(struct task_struct *tsk)
* Kill all other threads in the thread group.
*/
spin_lock_irq(lock);
- if (signal_group_exit(sig)) {
+ if ((sig->flags & SIGNAL_GROUP_EXIT) || sig->group_exec_task) {
/*
* Another group action in progress, just
* return so that the signal is processed.
@@ -1054,7 +1055,7 @@ static int de_thread(struct task_struct *tsk)
return -EAGAIN;
}
- sig->group_exit_task = tsk;
+ sig->group_exec_task = tsk;
sig->notify_count = zap_other_threads(tsk);
if (!thread_group_leader(tsk))
sig->notify_count--;
@@ -1082,7 +1083,7 @@ static int de_thread(struct task_struct *tsk)
write_lock_irq(&tasklist_lock);
/*
* Do this under tasklist_lock to ensure that
- * exit_notify() can't miss ->group_exit_task
+ * exit_notify() can't miss ->group_exec_task
*/
sig->notify_count = -1;
if (likely(leader->exit_state))
@@ -1149,7 +1150,7 @@ static int de_thread(struct task_struct *tsk)
release_task(leader);
}
- sig->group_exit_task = NULL;
+ sig->group_exec_task = NULL;
sig->notify_count = 0;
no_thread_group:
@@ -1162,7 +1163,7 @@ no_thread_group:
killed:
/* protects against exit_notify() and __exit_signal() */
read_lock(&tasklist_lock);
- sig->group_exit_task = NULL;
+ sig->group_exec_task = NULL;
sig->notify_count = 0;
read_unlock(&tasklist_lock);
return -EAGAIN;
@@ -1207,7 +1208,8 @@ static int unshare_sighand(struct task_struct *me)
char *__get_task_comm(char *buf, size_t buf_size, struct task_struct *tsk)
{
task_lock(tsk);
- strncpy(buf, tsk->comm, buf_size);
+ /* Always NUL terminated and zero-padded */
+ strscpy_pad(buf, tsk->comm, buf_size);
task_unlock(tsk);
return buf;
}
@@ -1222,7 +1224,7 @@ void __set_task_comm(struct task_struct *tsk, const char *buf, bool exec)
{
task_lock(tsk);
trace_task_rename(tsk, buf);
- strlcpy(tsk->comm, buf, sizeof(tsk->comm));
+ strscpy_pad(tsk->comm, buf, sizeof(tsk->comm));
task_unlock(tsk);
perf_event_comm(tsk, exec);
}
@@ -1307,6 +1309,8 @@ int begin_new_exec(struct linux_binprm * bprm)
*/
force_uaccess_begin();
+ if (me->flags & PF_KTHREAD)
+ free_kthread_struct(me);
me->flags &= ~(PF_RANDOMIZE | PF_FORKNOEXEC | PF_KTHREAD |
PF_NOFREEZE | PF_NO_SETAFFINITY);
flush_thread();
@@ -2096,3 +2100,37 @@ COMPAT_SYSCALL_DEFINE5(execveat, int, fd,
argv, envp, flags);
}
#endif
+
+#ifdef CONFIG_SYSCTL
+
+static int proc_dointvec_minmax_coredump(struct ctl_table *table, int write,
+ void *buffer, size_t *lenp, loff_t *ppos)
+{
+ int error = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+
+ if (!error)
+ validate_coredump_safety();
+ return error;
+}
+
+static struct ctl_table fs_exec_sysctls[] = {
+ {
+ .procname = "suid_dumpable",
+ .data = &suid_dumpable,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax_coredump,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_TWO,
+ },
+ { }
+};
+
+static int __init init_fs_exec_sysctls(void)
+{
+ register_sysctl_init("fs", fs_exec_sysctls);
+ return 0;
+}
+
+fs_initcall(init_fs_exec_sysctls);
+#endif /* CONFIG_SYSCTL */
diff --git a/fs/exfat/balloc.c b/fs/exfat/balloc.c
index cc5cffc4a769..03f142307174 100644
--- a/fs/exfat/balloc.c
+++ b/fs/exfat/balloc.c
@@ -105,7 +105,7 @@ int exfat_load_bitmap(struct super_block *sb)
struct exfat_dentry *ep;
struct buffer_head *bh;
- ep = exfat_get_dentry(sb, &clu, i, &bh, NULL);
+ ep = exfat_get_dentry(sb, &clu, i, &bh);
if (!ep)
return -EIO;
diff --git a/fs/exfat/dir.c b/fs/exfat/dir.c
index cb1c0d8c1714..a27b55ec060a 100644
--- a/fs/exfat/dir.c
+++ b/fs/exfat/dir.c
@@ -64,7 +64,6 @@ static int exfat_readdir(struct inode *inode, loff_t *cpos, struct exfat_dir_ent
{
int i, dentries_per_clu, dentries_per_clu_bits = 0, num_ext;
unsigned int type, clu_offset, max_dentries;
- sector_t sector;
struct exfat_chain dir, clu;
struct exfat_uni_name uni_name;
struct exfat_dentry *ep;
@@ -115,7 +114,7 @@ static int exfat_readdir(struct inode *inode, loff_t *cpos, struct exfat_dir_ent
i = dentry & (dentries_per_clu - 1);
for ( ; i < dentries_per_clu; i++, dentry++) {
- ep = exfat_get_dentry(sb, &clu, i, &bh, &sector);
+ ep = exfat_get_dentry(sb, &clu, i, &bh);
if (!ep)
return -EIO;
@@ -156,7 +155,7 @@ static int exfat_readdir(struct inode *inode, loff_t *cpos, struct exfat_dir_ent
dir_entry->namebuf.lfnbuf_len);
brelse(bh);
- ep = exfat_get_dentry(sb, &clu, i + 1, &bh, NULL);
+ ep = exfat_get_dentry(sb, &clu, i + 1, &bh);
if (!ep)
return -EIO;
dir_entry->size =
@@ -445,7 +444,6 @@ int exfat_init_dir_entry(struct inode *inode, struct exfat_chain *p_dir,
struct super_block *sb = inode->i_sb;
struct exfat_sb_info *sbi = EXFAT_SB(sb);
struct timespec64 ts = current_time(inode);
- sector_t sector;
struct exfat_dentry *ep;
struct buffer_head *bh;
@@ -453,7 +451,7 @@ int exfat_init_dir_entry(struct inode *inode, struct exfat_chain *p_dir,
* We cannot use exfat_get_dentry_set here because file ep is not
* initialized yet.
*/
- ep = exfat_get_dentry(sb, p_dir, entry, &bh, &sector);
+ ep = exfat_get_dentry(sb, p_dir, entry, &bh);
if (!ep)
return -EIO;
@@ -477,7 +475,7 @@ int exfat_init_dir_entry(struct inode *inode, struct exfat_chain *p_dir,
exfat_update_bh(bh, IS_DIRSYNC(inode));
brelse(bh);
- ep = exfat_get_dentry(sb, p_dir, entry + 1, &bh, &sector);
+ ep = exfat_get_dentry(sb, p_dir, entry + 1, &bh);
if (!ep)
return -EIO;
@@ -496,12 +494,11 @@ int exfat_update_dir_chksum(struct inode *inode, struct exfat_chain *p_dir,
struct super_block *sb = inode->i_sb;
int ret = 0;
int i, num_entries;
- sector_t sector;
u16 chksum;
struct exfat_dentry *ep, *fep;
struct buffer_head *fbh, *bh;
- fep = exfat_get_dentry(sb, p_dir, entry, &fbh, &sector);
+ fep = exfat_get_dentry(sb, p_dir, entry, &fbh);
if (!fep)
return -EIO;
@@ -509,7 +506,7 @@ int exfat_update_dir_chksum(struct inode *inode, struct exfat_chain *p_dir,
chksum = exfat_calc_chksum16(fep, DENTRY_SIZE, 0, CS_DIR_ENTRY);
for (i = 1; i < num_entries; i++) {
- ep = exfat_get_dentry(sb, p_dir, entry + i, &bh, NULL);
+ ep = exfat_get_dentry(sb, p_dir, entry + i, &bh);
if (!ep) {
ret = -EIO;
goto release_fbh;
@@ -531,13 +528,12 @@ int exfat_init_ext_entry(struct inode *inode, struct exfat_chain *p_dir,
{
struct super_block *sb = inode->i_sb;
int i;
- sector_t sector;
unsigned short *uniname = p_uniname->name;
struct exfat_dentry *ep;
struct buffer_head *bh;
int sync = IS_DIRSYNC(inode);
- ep = exfat_get_dentry(sb, p_dir, entry, &bh, &sector);
+ ep = exfat_get_dentry(sb, p_dir, entry, &bh);
if (!ep)
return -EIO;
@@ -545,7 +541,7 @@ int exfat_init_ext_entry(struct inode *inode, struct exfat_chain *p_dir,
exfat_update_bh(bh, sync);
brelse(bh);
- ep = exfat_get_dentry(sb, p_dir, entry + 1, &bh, &sector);
+ ep = exfat_get_dentry(sb, p_dir, entry + 1, &bh);
if (!ep)
return -EIO;
@@ -555,7 +551,7 @@ int exfat_init_ext_entry(struct inode *inode, struct exfat_chain *p_dir,
brelse(bh);
for (i = EXFAT_FIRST_CLUSTER; i < num_entries; i++) {
- ep = exfat_get_dentry(sb, p_dir, entry + i, &bh, &sector);
+ ep = exfat_get_dentry(sb, p_dir, entry + i, &bh);
if (!ep)
return -EIO;
@@ -574,12 +570,11 @@ int exfat_remove_entries(struct inode *inode, struct exfat_chain *p_dir,
{
struct super_block *sb = inode->i_sb;
int i;
- sector_t sector;
struct exfat_dentry *ep;
struct buffer_head *bh;
for (i = order; i < num_entries; i++) {
- ep = exfat_get_dentry(sb, p_dir, entry + i, &bh, &sector);
+ ep = exfat_get_dentry(sb, p_dir, entry + i, &bh);
if (!ep)
return -EIO;
@@ -656,8 +651,8 @@ static int exfat_walk_fat_chain(struct super_block *sb,
return 0;
}
-int exfat_find_location(struct super_block *sb, struct exfat_chain *p_dir,
- int entry, sector_t *sector, int *offset)
+static int exfat_find_location(struct super_block *sb, struct exfat_chain *p_dir,
+ int entry, sector_t *sector, int *offset)
{
int ret;
unsigned int off, clu = 0;
@@ -717,8 +712,7 @@ static int exfat_dir_readahead(struct super_block *sb, sector_t sec)
}
struct exfat_dentry *exfat_get_dentry(struct super_block *sb,
- struct exfat_chain *p_dir, int entry, struct buffer_head **bh,
- sector_t *sector)
+ struct exfat_chain *p_dir, int entry, struct buffer_head **bh)
{
unsigned int dentries_per_page = EXFAT_B_TO_DEN(PAGE_SIZE);
int off;
@@ -740,8 +734,6 @@ struct exfat_dentry *exfat_get_dentry(struct super_block *sb,
if (!*bh)
return NULL;
- if (sector)
- *sector = sec;
return (struct exfat_dentry *)((*bh)->b_data + off);
}
@@ -892,7 +884,7 @@ struct exfat_entry_set_cache *exfat_get_dentry_set(struct super_block *sb,
es->bh[es->num_bh++] = bh;
}
- /* validiate cached dentries */
+ /* validate cached dentries */
for (i = 1; i < num_entries; i++) {
ep = exfat_get_dentry_cached(es, i);
if (!exfat_validate_entry(exfat_get_entry_type(ep), &mode))
@@ -960,7 +952,7 @@ rewind:
if (rewind && dentry == end_eidx)
goto not_found;
- ep = exfat_get_dentry(sb, &clu, i, &bh, NULL);
+ ep = exfat_get_dentry(sb, &clu, i, &bh);
if (!ep)
return -EIO;
@@ -1145,7 +1137,7 @@ int exfat_count_ext_entries(struct super_block *sb, struct exfat_chain *p_dir,
struct buffer_head *bh;
for (i = 0, entry++; i < ep->dentry.file.num_ext; i++, entry++) {
- ext_ep = exfat_get_dentry(sb, p_dir, entry, &bh, NULL);
+ ext_ep = exfat_get_dentry(sb, p_dir, entry, &bh);
if (!ext_ep)
return -EIO;
@@ -1175,7 +1167,7 @@ int exfat_count_dir_entries(struct super_block *sb, struct exfat_chain *p_dir)
while (clu.dir != EXFAT_EOF_CLUSTER) {
for (i = 0; i < dentries_per_clu; i++) {
- ep = exfat_get_dentry(sb, &clu, i, &bh, NULL);
+ ep = exfat_get_dentry(sb, &clu, i, &bh);
if (!ep)
return -EIO;
entry_type = exfat_get_entry_type(ep);
diff --git a/fs/exfat/exfat_fs.h b/fs/exfat/exfat_fs.h
index 1d6da61157c9..619e5b4bed10 100644
--- a/fs/exfat/exfat_fs.h
+++ b/fs/exfat/exfat_fs.h
@@ -10,7 +10,6 @@
#include <linux/ratelimit.h>
#include <linux/nls.h>
-#define EXFAT_SUPER_MAGIC 0x2011BAB0UL
#define EXFAT_ROOT_INO 1
#define EXFAT_CLUSTERS_UNTRACKED (~0u)
@@ -459,11 +458,8 @@ int exfat_find_dir_entry(struct super_block *sb, struct exfat_inode_info *ei,
struct exfat_chain *p_dir, struct exfat_uni_name *p_uniname,
int num_entries, unsigned int type, struct exfat_hint *hint_opt);
int exfat_alloc_new_dir(struct inode *inode, struct exfat_chain *clu);
-int exfat_find_location(struct super_block *sb, struct exfat_chain *p_dir,
- int entry, sector_t *sector, int *offset);
struct exfat_dentry *exfat_get_dentry(struct super_block *sb,
- struct exfat_chain *p_dir, int entry, struct buffer_head **bh,
- sector_t *sector);
+ struct exfat_chain *p_dir, int entry, struct buffer_head **bh);
struct exfat_dentry *exfat_get_dentry_cached(struct exfat_entry_set_cache *es,
int num);
struct exfat_entry_set_cache *exfat_get_dentry_set(struct super_block *sb,
diff --git a/fs/exfat/fatent.c b/fs/exfat/fatent.c
index e949e563443c..a3464e56a7e1 100644
--- a/fs/exfat/fatent.c
+++ b/fs/exfat/fatent.c
@@ -84,9 +84,7 @@ int exfat_ent_set(struct super_block *sb, unsigned int loc,
static inline bool is_valid_cluster(struct exfat_sb_info *sbi,
unsigned int clus)
{
- if (clus < EXFAT_FIRST_CLUSTER || sbi->num_clusters <= clus)
- return false;
- return true;
+ return clus >= EXFAT_FIRST_CLUSTER && clus < sbi->num_clusters;
}
int exfat_ent_get(struct super_block *sb, unsigned int loc,
diff --git a/fs/exfat/file.c b/fs/exfat/file.c
index 6af0191b648f..d890fd34bb2d 100644
--- a/fs/exfat/file.c
+++ b/fs/exfat/file.c
@@ -110,8 +110,7 @@ int __exfat_truncate(struct inode *inode, loff_t new_size)
exfat_set_volume_dirty(sb);
num_clusters_new = EXFAT_B_TO_CLU_ROUND_UP(i_size_read(inode), sbi);
- num_clusters_phys =
- EXFAT_B_TO_CLU_ROUND_UP(EXFAT_I(inode)->i_size_ondisk, sbi);
+ num_clusters_phys = EXFAT_B_TO_CLU_ROUND_UP(ei->i_size_ondisk, sbi);
exfat_chain_set(&clu, ei->start_clu, num_clusters_phys, ei->flags);
@@ -228,12 +227,13 @@ void exfat_truncate(struct inode *inode, loff_t size)
{
struct super_block *sb = inode->i_sb;
struct exfat_sb_info *sbi = EXFAT_SB(sb);
+ struct exfat_inode_info *ei = EXFAT_I(inode);
unsigned int blocksize = i_blocksize(inode);
loff_t aligned_size;
int err;
mutex_lock(&sbi->s_lock);
- if (EXFAT_I(inode)->start_clu == 0) {
+ if (ei->start_clu == 0) {
/*
* Empty start_clu != ~0 (not allocated)
*/
@@ -251,8 +251,8 @@ void exfat_truncate(struct inode *inode, loff_t size)
else
mark_inode_dirty(inode);
- inode->i_blocks = ((i_size_read(inode) + (sbi->cluster_size - 1)) &
- ~(sbi->cluster_size - 1)) >> inode->i_blkbits;
+ inode->i_blocks = round_up(i_size_read(inode), sbi->cluster_size) >>
+ inode->i_blkbits;
write_size:
aligned_size = i_size_read(inode);
if (aligned_size & (blocksize - 1)) {
@@ -260,11 +260,11 @@ write_size:
aligned_size++;
}
- if (EXFAT_I(inode)->i_size_ondisk > i_size_read(inode))
- EXFAT_I(inode)->i_size_ondisk = aligned_size;
+ if (ei->i_size_ondisk > i_size_read(inode))
+ ei->i_size_ondisk = aligned_size;
- if (EXFAT_I(inode)->i_size_aligned > i_size_read(inode))
- EXFAT_I(inode)->i_size_aligned = aligned_size;
+ if (ei->i_size_aligned > i_size_read(inode))
+ ei->i_size_aligned = aligned_size;
mutex_unlock(&sbi->s_lock);
}
diff --git a/fs/exfat/inode.c b/fs/exfat/inode.c
index 1c7aa1ea4724..df805bd05508 100644
--- a/fs/exfat/inode.c
+++ b/fs/exfat/inode.c
@@ -31,7 +31,7 @@ static int __exfat_write_inode(struct inode *inode, int sync)
return 0;
/*
- * If the indode is already unlinked, there is no need for updating it.
+ * If the inode is already unlinked, there is no need for updating it.
*/
if (ei->dir.dir == DIR_DELETED)
return 0;
@@ -114,10 +114,9 @@ static int exfat_map_cluster(struct inode *inode, unsigned int clu_offset,
unsigned int local_clu_offset = clu_offset;
unsigned int num_to_be_allocated = 0, num_clusters = 0;
- if (EXFAT_I(inode)->i_size_ondisk > 0)
+ if (ei->i_size_ondisk > 0)
num_clusters =
- EXFAT_B_TO_CLU_ROUND_UP(EXFAT_I(inode)->i_size_ondisk,
- sbi);
+ EXFAT_B_TO_CLU_ROUND_UP(ei->i_size_ondisk, sbi);
if (clu_offset >= num_clusters)
num_to_be_allocated = clu_offset - num_clusters + 1;
@@ -416,10 +415,10 @@ static int exfat_write_end(struct file *file, struct address_space *mapping,
err = generic_write_end(file, mapping, pos, len, copied, pagep, fsdata);
- if (EXFAT_I(inode)->i_size_aligned < i_size_read(inode)) {
+ if (ei->i_size_aligned < i_size_read(inode)) {
exfat_fs_error(inode->i_sb,
"invalid size(size(%llu) > aligned(%llu)\n",
- i_size_read(inode), EXFAT_I(inode)->i_size_aligned);
+ i_size_read(inode), ei->i_size_aligned);
return -EIO;
}
@@ -603,8 +602,8 @@ static int exfat_fill_inode(struct inode *inode, struct exfat_dir_entry *info)
exfat_save_attr(inode, info->attr);
- inode->i_blocks = ((i_size_read(inode) + (sbi->cluster_size - 1)) &
- ~((loff_t)sbi->cluster_size - 1)) >> inode->i_blkbits;
+ inode->i_blocks = round_up(i_size_read(inode), sbi->cluster_size) >>
+ inode->i_blkbits;
inode->i_mtime = info->mtime;
inode->i_ctime = info->mtime;
ei->i_crtime = info->crtime;
diff --git a/fs/exfat/misc.c b/fs/exfat/misc.c
index d34e6193258d..d5bd8e6d9741 100644
--- a/fs/exfat/misc.c
+++ b/fs/exfat/misc.c
@@ -10,6 +10,7 @@
#include <linux/fs.h>
#include <linux/slab.h>
#include <linux/buffer_head.h>
+#include <linux/blk_types.h>
#include "exfat_raw.h"
#include "exfat_fs.h"
@@ -180,7 +181,7 @@ int exfat_update_bhs(struct buffer_head **bhs, int nr_bhs, int sync)
set_buffer_uptodate(bhs[i]);
mark_buffer_dirty(bhs[i]);
if (sync)
- write_dirty_buffer(bhs[i], 0);
+ write_dirty_buffer(bhs[i], REQ_SYNC);
}
for (i = 0; i < nr_bhs && sync; i++) {
diff --git a/fs/exfat/namei.c b/fs/exfat/namei.c
index 24b41103d1cc..af4eb39cc0c3 100644
--- a/fs/exfat/namei.c
+++ b/fs/exfat/namei.c
@@ -229,7 +229,7 @@ static int exfat_search_empty_slot(struct super_block *sb,
i = dentry & (dentries_per_clu - 1);
for (; i < dentries_per_clu; i++, dentry++) {
- ep = exfat_get_dentry(sb, &clu, i, &bh, NULL);
+ ep = exfat_get_dentry(sb, &clu, i, &bh);
if (!ep)
return -EIO;
type = exfat_get_entry_type(ep);
@@ -306,7 +306,6 @@ static int exfat_find_empty_entry(struct inode *inode,
{
int dentry;
unsigned int ret, last_clu;
- sector_t sector;
loff_t size = 0;
struct exfat_chain clu;
struct exfat_dentry *ep = NULL;
@@ -379,7 +378,7 @@ static int exfat_find_empty_entry(struct inode *inode,
struct buffer_head *bh;
ep = exfat_get_dentry(sb,
- &(ei->dir), ei->entry + 1, &bh, &sector);
+ &(ei->dir), ei->entry + 1, &bh);
if (!ep)
return -EIO;
@@ -395,9 +394,9 @@ static int exfat_find_empty_entry(struct inode *inode,
/* directory inode should be updated in here */
i_size_write(inode, size);
- EXFAT_I(inode)->i_size_ondisk += sbi->cluster_size;
- EXFAT_I(inode)->i_size_aligned += sbi->cluster_size;
- EXFAT_I(inode)->flags = p_dir->flags;
+ ei->i_size_ondisk += sbi->cluster_size;
+ ei->i_size_aligned += sbi->cluster_size;
+ ei->flags = p_dir->flags;
inode->i_blocks += 1 << sbi->sect_per_clus_bits;
}
@@ -779,7 +778,6 @@ static int exfat_unlink(struct inode *dir, struct dentry *dentry)
struct inode *inode = dentry->d_inode;
struct exfat_inode_info *ei = EXFAT_I(inode);
struct buffer_head *bh;
- sector_t sector;
int num_entries, entry, err = 0;
mutex_lock(&EXFAT_SB(sb)->s_lock);
@@ -791,7 +789,7 @@ static int exfat_unlink(struct inode *dir, struct dentry *dentry)
goto unlock;
}
- ep = exfat_get_dentry(sb, &cdir, entry, &bh, &sector);
+ ep = exfat_get_dentry(sb, &cdir, entry, &bh);
if (!ep) {
err = -EIO;
goto unlock;
@@ -895,7 +893,7 @@ static int exfat_check_dir_empty(struct super_block *sb,
while (clu.dir != EXFAT_EOF_CLUSTER) {
for (i = 0; i < dentries_per_clu; i++) {
- ep = exfat_get_dentry(sb, &clu, i, &bh, NULL);
+ ep = exfat_get_dentry(sb, &clu, i, &bh);
if (!ep)
return -EIO;
type = exfat_get_entry_type(ep);
@@ -932,7 +930,6 @@ static int exfat_rmdir(struct inode *dir, struct dentry *dentry)
struct exfat_sb_info *sbi = EXFAT_SB(sb);
struct exfat_inode_info *ei = EXFAT_I(inode);
struct buffer_head *bh;
- sector_t sector;
int num_entries, entry, err;
mutex_lock(&EXFAT_SB(inode->i_sb)->s_lock);
@@ -957,7 +954,7 @@ static int exfat_rmdir(struct inode *dir, struct dentry *dentry)
goto unlock;
}
- ep = exfat_get_dentry(sb, &cdir, entry, &bh, &sector);
+ ep = exfat_get_dentry(sb, &cdir, entry, &bh);
if (!ep) {
err = -EIO;
goto unlock;
@@ -1005,13 +1002,12 @@ static int exfat_rename_file(struct inode *inode, struct exfat_chain *p_dir,
struct exfat_inode_info *ei)
{
int ret, num_old_entries, num_new_entries;
- sector_t sector_old, sector_new;
struct exfat_dentry *epold, *epnew;
struct super_block *sb = inode->i_sb;
struct buffer_head *new_bh, *old_bh;
int sync = IS_DIRSYNC(inode);
- epold = exfat_get_dentry(sb, p_dir, oldentry, &old_bh, &sector_old);
+ epold = exfat_get_dentry(sb, p_dir, oldentry, &old_bh);
if (!epold)
return -EIO;
@@ -1032,8 +1028,7 @@ static int exfat_rename_file(struct inode *inode, struct exfat_chain *p_dir,
if (newentry < 0)
return newentry; /* -EIO or -ENOSPC */
- epnew = exfat_get_dentry(sb, p_dir, newentry, &new_bh,
- &sector_new);
+ epnew = exfat_get_dentry(sb, p_dir, newentry, &new_bh);
if (!epnew)
return -EIO;
@@ -1046,12 +1041,10 @@ static int exfat_rename_file(struct inode *inode, struct exfat_chain *p_dir,
brelse(old_bh);
brelse(new_bh);
- epold = exfat_get_dentry(sb, p_dir, oldentry + 1, &old_bh,
- &sector_old);
+ epold = exfat_get_dentry(sb, p_dir, oldentry + 1, &old_bh);
if (!epold)
return -EIO;
- epnew = exfat_get_dentry(sb, p_dir, newentry + 1, &new_bh,
- &sector_new);
+ epnew = exfat_get_dentry(sb, p_dir, newentry + 1, &new_bh);
if (!epnew) {
brelse(old_bh);
return -EIO;
@@ -1093,12 +1086,11 @@ static int exfat_move_file(struct inode *inode, struct exfat_chain *p_olddir,
struct exfat_uni_name *p_uniname, struct exfat_inode_info *ei)
{
int ret, newentry, num_new_entries, num_old_entries;
- sector_t sector_mov, sector_new;
struct exfat_dentry *epmov, *epnew;
struct super_block *sb = inode->i_sb;
struct buffer_head *mov_bh, *new_bh;
- epmov = exfat_get_dentry(sb, p_olddir, oldentry, &mov_bh, &sector_mov);
+ epmov = exfat_get_dentry(sb, p_olddir, oldentry, &mov_bh);
if (!epmov)
return -EIO;
@@ -1116,7 +1108,7 @@ static int exfat_move_file(struct inode *inode, struct exfat_chain *p_olddir,
if (newentry < 0)
return newentry; /* -EIO or -ENOSPC */
- epnew = exfat_get_dentry(sb, p_newdir, newentry, &new_bh, &sector_new);
+ epnew = exfat_get_dentry(sb, p_newdir, newentry, &new_bh);
if (!epnew)
return -EIO;
@@ -1129,12 +1121,10 @@ static int exfat_move_file(struct inode *inode, struct exfat_chain *p_olddir,
brelse(mov_bh);
brelse(new_bh);
- epmov = exfat_get_dentry(sb, p_olddir, oldentry + 1, &mov_bh,
- &sector_mov);
+ epmov = exfat_get_dentry(sb, p_olddir, oldentry + 1, &mov_bh);
if (!epmov)
return -EIO;
- epnew = exfat_get_dentry(sb, p_newdir, newentry + 1, &new_bh,
- &sector_new);
+ epnew = exfat_get_dentry(sb, p_newdir, newentry + 1, &new_bh);
if (!epnew) {
brelse(mov_bh);
return -EIO;
@@ -1216,7 +1206,7 @@ static int __exfat_rename(struct inode *old_parent_inode,
exfat_chain_dup(&olddir, &ei->dir);
dentry = ei->entry;
- ep = exfat_get_dentry(sb, &olddir, dentry, &old_bh, NULL);
+ ep = exfat_get_dentry(sb, &olddir, dentry, &old_bh);
if (!ep) {
ret = -EIO;
goto out;
@@ -1237,7 +1227,7 @@ static int __exfat_rename(struct inode *old_parent_inode,
p_dir = &(new_ei->dir);
new_entry = new_ei->entry;
- ep = exfat_get_dentry(sb, p_dir, new_entry, &new_bh, NULL);
+ ep = exfat_get_dentry(sb, p_dir, new_entry, &new_bh);
if (!ep)
goto out;
@@ -1277,7 +1267,7 @@ static int __exfat_rename(struct inode *old_parent_inode,
if (!ret && new_inode) {
/* delete entries of new_dir */
- ep = exfat_get_dentry(sb, p_dir, new_entry, &new_bh, NULL);
+ ep = exfat_get_dentry(sb, p_dir, new_entry, &new_bh);
if (!ep) {
ret = -EIO;
goto del_out;
diff --git a/fs/exfat/nls.c b/fs/exfat/nls.c
index 314d5407a1be..ef115e673406 100644
--- a/fs/exfat/nls.c
+++ b/fs/exfat/nls.c
@@ -761,7 +761,7 @@ int exfat_create_upcase_table(struct super_block *sb)
while (clu.dir != EXFAT_EOF_CLUSTER) {
for (i = 0; i < sbi->dentries_per_clu; i++) {
- ep = exfat_get_dentry(sb, &clu, i, &bh, NULL);
+ ep = exfat_get_dentry(sb, &clu, i, &bh);
if (!ep)
return -EIO;
diff --git a/fs/exfat/super.c b/fs/exfat/super.c
index 5539ffc20d16..8c9fb7dcec16 100644
--- a/fs/exfat/super.c
+++ b/fs/exfat/super.c
@@ -17,6 +17,7 @@
#include <linux/iversion.h>
#include <linux/nls.h>
#include <linux/buffer_head.h>
+#include <linux/magic.h>
#include "exfat_raw.h"
#include "exfat_fs.h"
@@ -364,11 +365,11 @@ static int exfat_read_root(struct inode *inode)
inode->i_op = &exfat_dir_inode_operations;
inode->i_fop = &exfat_dir_operations;
- inode->i_blocks = ((i_size_read(inode) + (sbi->cluster_size - 1))
- & ~(sbi->cluster_size - 1)) >> inode->i_blkbits;
- EXFAT_I(inode)->i_pos = ((loff_t)sbi->root_dir << 32) | 0xffffffff;
- EXFAT_I(inode)->i_size_aligned = i_size_read(inode);
- EXFAT_I(inode)->i_size_ondisk = i_size_read(inode);
+ inode->i_blocks = round_up(i_size_read(inode), sbi->cluster_size) >>
+ inode->i_blkbits;
+ ei->i_pos = ((loff_t)sbi->root_dir << 32) | 0xffffffff;
+ ei->i_size_aligned = i_size_read(inode);
+ ei->i_size_ondisk = i_size_read(inode);
exfat_save_attr(inode, ATTR_SUBDIR);
inode->i_mtime = inode->i_atime = inode->i_ctime = ei->i_crtime =
diff --git a/fs/ext2/ext2.h b/fs/ext2/ext2.h
index 3be9dd6412b7..d4f306aa5ace 100644
--- a/fs/ext2/ext2.h
+++ b/fs/ext2/ext2.h
@@ -118,6 +118,7 @@ struct ext2_sb_info {
spinlock_t s_lock;
struct mb_cache *s_ea_block_cache;
struct dax_device *s_daxdev;
+ u64 s_dax_part_off;
};
static inline spinlock_t *
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index 333fa62661d5..602578b72d8c 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -36,6 +36,7 @@
#include <linux/iomap.h>
#include <linux/namei.h>
#include <linux/uio.h>
+#include <linux/dax.h>
#include "ext2.h"
#include "acl.h"
#include "xattr.h"
@@ -816,9 +817,11 @@ static int ext2_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
return ret;
iomap->flags = 0;
- iomap->bdev = inode->i_sb->s_bdev;
iomap->offset = (u64)first_block << blkbits;
- iomap->dax_dev = sbi->s_daxdev;
+ if (flags & IOMAP_DAX)
+ iomap->dax_dev = sbi->s_daxdev;
+ else
+ iomap->bdev = inode->i_sb->s_bdev;
if (ret == 0) {
iomap->type = IOMAP_HOLE;
@@ -827,6 +830,8 @@ static int ext2_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
} else {
iomap->type = IOMAP_MAPPED;
iomap->addr = (u64)bno << blkbits;
+ if (flags & IOMAP_DAX)
+ iomap->addr += sbi->s_dax_part_off;
iomap->length = (u64)ret << blkbits;
iomap->flags |= IOMAP_F_MERGED;
}
@@ -1297,9 +1302,9 @@ static int ext2_setsize(struct inode *inode, loff_t newsize)
inode_dio_wait(inode);
if (IS_DAX(inode)) {
- error = iomap_zero_range(inode, newsize,
- PAGE_ALIGN(newsize) - newsize, NULL,
- &ext2_iomap_ops);
+ error = dax_zero_range(inode, newsize,
+ PAGE_ALIGN(newsize) - newsize, NULL,
+ &ext2_iomap_ops);
} else if (test_opt(inode->i_sb, NOBH))
error = nobh_truncate_page(inode->i_mapping,
newsize, ext2_get_block);
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index d8d580b609ba..94f1fbd7d3ac 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -802,7 +802,6 @@ static unsigned long descriptor_loc(struct super_block *sb,
static int ext2_fill_super(struct super_block *sb, void *data, int silent)
{
- struct dax_device *dax_dev = fs_dax_get_by_bdev(sb->s_bdev);
struct buffer_head * bh;
struct ext2_sb_info * sbi;
struct ext2_super_block * es;
@@ -822,17 +821,17 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
if (!sbi)
- goto failed;
+ return -ENOMEM;
sbi->s_blockgroup_lock =
kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL);
if (!sbi->s_blockgroup_lock) {
kfree(sbi);
- goto failed;
+ return -ENOMEM;
}
sb->s_fs_info = sbi;
sbi->s_sb_block = sb_block;
- sbi->s_daxdev = dax_dev;
+ sbi->s_daxdev = fs_dax_get_by_bdev(sb->s_bdev, &sbi->s_dax_part_off);
spin_lock_init(&sbi->s_lock);
ret = -EINVAL;
@@ -946,11 +945,13 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
blocksize = BLOCK_SIZE << le32_to_cpu(sbi->s_es->s_log_block_size);
if (test_opt(sb, DAX)) {
- if (!dax_supported(dax_dev, sb->s_bdev, blocksize, 0,
- bdev_nr_sectors(sb->s_bdev))) {
+ if (!sbi->s_daxdev) {
ext2_msg(sb, KERN_ERR,
"DAX unsupported by block device. Turning off DAX.");
clear_opt(sbi->s_mount_opt, DAX);
+ } else if (blocksize != PAGE_SIZE) {
+ ext2_msg(sb, KERN_ERR, "unsupported blocksize for DAX\n");
+ clear_opt(sbi->s_mount_opt, DAX);
}
}
@@ -1199,11 +1200,10 @@ failed_mount_group_desc:
failed_mount:
brelse(bh);
failed_sbi:
+ fs_put_dax(sbi->s_daxdev);
sb->s_fs_info = NULL;
kfree(sbi->s_blockgroup_lock);
kfree(sbi);
-failed:
- fs_put_dax(dax_dev);
return ret;
}
diff --git a/fs/ext4/acl.c b/fs/ext4/acl.c
index 5a35768d6149..57e82e25f8e2 100644
--- a/fs/ext4/acl.c
+++ b/fs/ext4/acl.c
@@ -139,7 +139,7 @@ fail:
/*
* Inode operation get_posix_acl().
*
- * inode->i_mutex: don't care
+ * inode->i_rwsem: don't care
*/
struct posix_acl *
ext4_get_acl(struct inode *inode, int type, bool rcu)
@@ -183,7 +183,7 @@ ext4_get_acl(struct inode *inode, int type, bool rcu)
/*
* Set the access or default ACL of an inode.
*
- * inode->i_mutex: down unless called from ext4_new_inode
+ * inode->i_rwsem: down unless called from ext4_new_inode
*/
static int
__ext4_set_acl(handle_t *handle, struct inode *inode, int type,
@@ -271,8 +271,8 @@ out_stop:
/*
* Initialize the ACLs of a new inode. Called from ext4_new_inode.
*
- * dir->i_mutex: down
- * inode->i_mutex: up (access to inode is still exclusive)
+ * dir->i_rwsem: down
+ * inode->i_rwsem: up (access to inode is still exclusive)
*/
int
ext4_init_acl(handle_t *handle, struct inode *inode, struct inode *dir)
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 715ee206dfe1..bcd3b9bf8069 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -1028,7 +1028,7 @@ struct ext4_inode_info {
/*
* Extended attributes can be read independently of the main file
- * data. Taking i_mutex even when reading would cause contention
+ * data. Taking i_rwsem even when reading would cause contention
* between readers of EAs and writers of regular file data, so
* instead we synchronize on xattr_sem when reading or changing
* EAs.
@@ -1699,6 +1699,7 @@ struct ext4_sb_info {
*/
struct percpu_rw_semaphore s_writepages_rwsem;
struct dax_device *s_daxdev;
+ u64 s_dax_part_off;
#ifdef CONFIG_EXT4_DEBUG
unsigned long s_simulate_fail;
#endif
@@ -1749,6 +1750,7 @@ struct ext4_sb_info {
spinlock_t s_fc_lock;
struct buffer_head *s_fc_bh;
struct ext4_fc_stats s_fc_stats;
+ tid_t s_fc_ineligible_tid;
#ifdef CONFIG_EXT4_DEBUG
int s_fc_debug_max_replay;
#endif
@@ -1794,10 +1796,7 @@ static inline int ext4_valid_inum(struct super_block *sb, unsigned long ino)
enum {
EXT4_MF_MNTDIR_SAMPLED,
EXT4_MF_FS_ABORTED, /* Fatal error detected */
- EXT4_MF_FC_INELIGIBLE, /* Fast commit ineligible */
- EXT4_MF_FC_COMMITTING /* File system underoing a fast
- * commit.
- */
+ EXT4_MF_FC_INELIGIBLE /* Fast commit ineligible */
};
static inline void ext4_set_mount_flag(struct super_block *sb, int bit)
@@ -2484,7 +2483,7 @@ struct ext4_filename {
#ifdef CONFIG_FS_ENCRYPTION
struct fscrypt_str crypto_buf;
#endif
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
struct fscrypt_str cf_name;
#endif
};
@@ -2720,7 +2719,7 @@ extern unsigned ext4_free_clusters_after_init(struct super_block *sb,
struct ext4_group_desc *gdp);
ext4_fsblk_t ext4_inode_to_goal_block(struct inode *);
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
extern int ext4_fname_setup_ci_filename(struct inode *dir,
const struct qstr *iname,
struct ext4_filename *fname);
@@ -2753,7 +2752,7 @@ static inline int ext4_fname_setup_filename(struct inode *dir,
ext4_fname_from_fscrypt_name(fname, &name);
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
err = ext4_fname_setup_ci_filename(dir, iname, fname);
#endif
return err;
@@ -2772,7 +2771,7 @@ static inline int ext4_fname_prepare_lookup(struct inode *dir,
ext4_fname_from_fscrypt_name(fname, &name);
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
err = ext4_fname_setup_ci_filename(dir, &dentry->d_name, fname);
#endif
return err;
@@ -2789,7 +2788,7 @@ static inline void ext4_fname_free_filename(struct ext4_filename *fname)
fname->usr_fname = NULL;
fname->disk_name.name = NULL;
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
kfree(fname->cf_name.name);
fname->cf_name.name = NULL;
#endif
@@ -2805,7 +2804,7 @@ static inline int ext4_fname_setup_filename(struct inode *dir,
fname->disk_name.name = (unsigned char *) iname->name;
fname->disk_name.len = iname->len;
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
err = ext4_fname_setup_ci_filename(dir, iname, fname);
#endif
@@ -2821,7 +2820,7 @@ static inline int ext4_fname_prepare_lookup(struct inode *dir,
static inline void ext4_fname_free_filename(struct ext4_filename *fname)
{
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
kfree(fname->cf_name.name);
fname->cf_name.name = NULL;
#endif
@@ -2925,7 +2924,7 @@ void __ext4_fc_track_create(handle_t *handle, struct inode *inode,
struct dentry *dentry);
void ext4_fc_track_create(handle_t *handle, struct dentry *dentry);
void ext4_fc_track_inode(handle_t *handle, struct inode *inode);
-void ext4_fc_mark_ineligible(struct super_block *sb, int reason);
+void ext4_fc_mark_ineligible(struct super_block *sb, int reason, handle_t *handle);
void ext4_fc_start_update(struct inode *inode);
void ext4_fc_stop_update(struct inode *inode);
void ext4_fc_del(struct inode *inode);
@@ -2934,6 +2933,9 @@ void ext4_fc_replay_cleanup(struct super_block *sb);
int ext4_fc_commit(journal_t *journal, tid_t commit_tid);
int __init ext4_fc_init_dentry_cache(void);
void ext4_fc_destroy_dentry_cache(void);
+int ext4_fc_record_regions(struct super_block *sb, int ino,
+ ext4_lblk_t lblk, ext4_fsblk_t pblk,
+ int len, int replay);
/* mballoc.c */
extern const struct seq_operations ext4_mb_seq_groups_ops;
@@ -3406,7 +3408,7 @@ do { \
#define EXT4_FREECLUSTERS_WATERMARK 0
#endif
-/* Update i_disksize. Requires i_mutex to avoid races with truncate */
+/* Update i_disksize. Requires i_rwsem to avoid races with truncate */
static inline void ext4_update_i_disksize(struct inode *inode, loff_t newsize)
{
WARN_ON_ONCE(S_ISREG(inode->i_mode) &&
@@ -3417,7 +3419,7 @@ static inline void ext4_update_i_disksize(struct inode *inode, loff_t newsize)
up_write(&EXT4_I(inode)->i_data_sem);
}
-/* Update i_size, i_disksize. Requires i_mutex to avoid races with truncate */
+/* Update i_size, i_disksize. Requires i_rwsem to avoid races with truncate */
static inline int ext4_update_inode_size(struct inode *inode, loff_t newsize)
{
int changed = 0;
diff --git a/fs/ext4/ext4_jbd2.h b/fs/ext4/ext4_jbd2.h
index 0e4fa644df01..db2ae4a2b38d 100644
--- a/fs/ext4/ext4_jbd2.h
+++ b/fs/ext4/ext4_jbd2.h
@@ -491,7 +491,7 @@ static inline int ext4_free_data_revoke_credits(struct inode *inode, int blocks)
/*
* This function controls whether or not we should try to go down the
* dioread_nolock code paths, which makes it safe to avoid taking
- * i_mutex for direct I/O reads. This only works for extent-based
+ * i_rwsem for direct I/O reads. This only works for extent-based
* files, and it doesn't work if data journaling is enabled, since the
* dioread_nolock code uses b_private to pass information back to the
* I/O completion handler, and this conflicts with the jbd's use of
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 1077ce7e189f..c0f3f83e0c1b 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -27,8 +27,8 @@
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/fiemap.h>
-#include <linux/backing-dev.h>
#include <linux/iomap.h>
+#include <linux/sched/mm.h>
#include "ext4_jbd2.h"
#include "ext4_extents.h"
#include "xattr.h"
@@ -97,7 +97,7 @@ static int ext4_ext_trunc_restart_fn(struct inode *inode, int *dropped)
* Drop i_data_sem to avoid deadlock with ext4_map_blocks. At this
* moment, get_block can be called only for blocks inside i_size since
* page cache has been already dropped and writes are blocked by
- * i_mutex. So we can safely drop the i_data_sem here.
+ * i_rwsem. So we can safely drop the i_data_sem here.
*/
BUG_ON(EXT4_JOURNAL(inode) == NULL);
ext4_discard_preallocations(inode, 0);
@@ -4404,8 +4404,7 @@ retry:
err = ext4_es_remove_extent(inode, last_block,
EXT_MAX_BLOCKS - last_block);
if (err == -ENOMEM) {
- cond_resched();
- congestion_wait(BLK_RW_ASYNC, HZ/50);
+ memalloc_retry_wait(GFP_ATOMIC);
goto retry;
}
if (err)
@@ -4413,8 +4412,7 @@ retry:
retry_remove_space:
err = ext4_ext_remove_space(inode, last_block, EXT_MAX_BLOCKS - 1);
if (err == -ENOMEM) {
- cond_resched();
- congestion_wait(BLK_RW_ASYNC, HZ/50);
+ memalloc_retry_wait(GFP_ATOMIC);
goto retry_remove_space;
}
return err;
@@ -4574,7 +4572,7 @@ static long ext4_zero_range(struct file *file, loff_t offset,
flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT;
- /* Wait all existing dio workers, newcomers will block on i_mutex */
+ /* Wait all existing dio workers, newcomers will block on i_rwsem */
inode_dio_wait(inode);
/* Preallocate the range including the unaligned edges */
@@ -4740,7 +4738,7 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
goto out;
}
- /* Wait all existing dio workers, newcomers will block on i_mutex */
+ /* Wait all existing dio workers, newcomers will block on i_rwsem */
inode_dio_wait(inode);
ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size, flags);
@@ -5336,7 +5334,7 @@ static int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
ret = PTR_ERR(handle);
goto out_mmap;
}
- ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_FALLOC_RANGE);
+ ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_FALLOC_RANGE, handle);
down_write(&EXT4_I(inode)->i_data_sem);
ext4_discard_preallocations(inode, 0);
@@ -5476,7 +5474,7 @@ static int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len)
ret = PTR_ERR(handle);
goto out_mmap;
}
- ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_FALLOC_RANGE);
+ ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_FALLOC_RANGE, handle);
/* Expand file to avoid data loss if there is error while shifting */
inode->i_size += len;
@@ -5573,7 +5571,7 @@ out_mutex:
* stuff such as page-cache locking consistency, bh mapping consistency or
* extent's data copying must be performed by caller.
* Locking:
- * i_mutex is held for both inodes
+ * i_rwsem is held for both inodes
* i_data_sem is locked for write for both inodes
* Assumptions:
* All pages from requested range are locked for both inodes
@@ -6093,11 +6091,15 @@ int ext4_ext_clear_bb(struct inode *inode)
ext4_mb_mark_bb(inode->i_sb,
path[j].p_block, 1, 0);
+ ext4_fc_record_regions(inode->i_sb, inode->i_ino,
+ 0, path[j].p_block, 1, 1);
}
ext4_ext_drop_refs(path);
kfree(path);
}
ext4_mb_mark_bb(inode->i_sb, map.m_pblk, map.m_len, 0);
+ ext4_fc_record_regions(inode->i_sb, inode->i_ino,
+ map.m_lblk, map.m_pblk, map.m_len, 1);
}
cur = cur + map.m_len;
}
diff --git a/fs/ext4/fast_commit.c b/fs/ext4/fast_commit.c
index 5ae8026a0c56..7964ee34e322 100644
--- a/fs/ext4/fast_commit.c
+++ b/fs/ext4/fast_commit.c
@@ -300,18 +300,32 @@ restart:
}
/*
- * Mark file system as fast commit ineligible. This means that next commit
- * operation would result in a full jbd2 commit.
+ * Mark file system as fast commit ineligible, and record latest
+ * ineligible transaction tid. This means until the recorded
+ * transaction, commit operation would result in a full jbd2 commit.
*/
-void ext4_fc_mark_ineligible(struct super_block *sb, int reason)
+void ext4_fc_mark_ineligible(struct super_block *sb, int reason, handle_t *handle)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
+ tid_t tid;
if (!test_opt2(sb, JOURNAL_FAST_COMMIT) ||
(EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY))
return;
ext4_set_mount_flag(sb, EXT4_MF_FC_INELIGIBLE);
+ if (handle && !IS_ERR(handle))
+ tid = handle->h_transaction->t_tid;
+ else {
+ read_lock(&sbi->s_journal->j_state_lock);
+ tid = sbi->s_journal->j_running_transaction ?
+ sbi->s_journal->j_running_transaction->t_tid : 0;
+ read_unlock(&sbi->s_journal->j_state_lock);
+ }
+ spin_lock(&sbi->s_fc_lock);
+ if (sbi->s_fc_ineligible_tid < tid)
+ sbi->s_fc_ineligible_tid = tid;
+ spin_unlock(&sbi->s_fc_lock);
WARN_ON(reason >= EXT4_FC_REASON_MAX);
sbi->s_fc_stats.fc_ineligible_reason_count[reason]++;
}
@@ -361,7 +375,8 @@ static int ext4_fc_track_template(
spin_lock(&sbi->s_fc_lock);
if (list_empty(&EXT4_I(inode)->i_fc_list))
list_add_tail(&EXT4_I(inode)->i_fc_list,
- (ext4_test_mount_flag(inode->i_sb, EXT4_MF_FC_COMMITTING)) ?
+ (sbi->s_journal->j_flags & JBD2_FULL_COMMIT_ONGOING ||
+ sbi->s_journal->j_flags & JBD2_FAST_COMMIT_ONGOING) ?
&sbi->s_fc_q[FC_Q_STAGING] :
&sbi->s_fc_q[FC_Q_MAIN]);
spin_unlock(&sbi->s_fc_lock);
@@ -387,7 +402,7 @@ static int __track_dentry_update(struct inode *inode, void *arg, bool update)
mutex_unlock(&ei->i_fc_lock);
node = kmem_cache_alloc(ext4_fc_dentry_cachep, GFP_NOFS);
if (!node) {
- ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_NOMEM);
+ ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_NOMEM, NULL);
mutex_lock(&ei->i_fc_lock);
return -ENOMEM;
}
@@ -400,7 +415,7 @@ static int __track_dentry_update(struct inode *inode, void *arg, bool update)
if (!node->fcd_name.name) {
kmem_cache_free(ext4_fc_dentry_cachep, node);
ext4_fc_mark_ineligible(inode->i_sb,
- EXT4_FC_REASON_NOMEM);
+ EXT4_FC_REASON_NOMEM, NULL);
mutex_lock(&ei->i_fc_lock);
return -ENOMEM;
}
@@ -414,7 +429,8 @@ static int __track_dentry_update(struct inode *inode, void *arg, bool update)
node->fcd_name.len = dentry->d_name.len;
spin_lock(&sbi->s_fc_lock);
- if (ext4_test_mount_flag(inode->i_sb, EXT4_MF_FC_COMMITTING))
+ if (sbi->s_journal->j_flags & JBD2_FULL_COMMIT_ONGOING ||
+ sbi->s_journal->j_flags & JBD2_FAST_COMMIT_ONGOING)
list_add_tail(&node->fcd_list,
&sbi->s_fc_dentry_q[FC_Q_STAGING]);
else
@@ -502,7 +518,7 @@ void ext4_fc_track_inode(handle_t *handle, struct inode *inode)
if (ext4_should_journal_data(inode)) {
ext4_fc_mark_ineligible(inode->i_sb,
- EXT4_FC_REASON_INODE_JOURNAL_DATA);
+ EXT4_FC_REASON_INODE_JOURNAL_DATA, handle);
return;
}
@@ -879,7 +895,6 @@ static int ext4_fc_submit_inode_data_all(journal_t *journal)
int ret = 0;
spin_lock(&sbi->s_fc_lock);
- ext4_set_mount_flag(sb, EXT4_MF_FC_COMMITTING);
list_for_each_entry(ei, &sbi->s_fc_q[FC_Q_MAIN], i_fc_list) {
ext4_set_inode_state(&ei->vfs_inode, EXT4_STATE_FC_COMMITTING);
while (atomic_read(&ei->i_fc_updates)) {
@@ -1179,7 +1194,7 @@ fallback:
* Fast commit cleanup routine. This is called after every fast commit and
* full commit. full is true if we are called after a full commit.
*/
-static void ext4_fc_cleanup(journal_t *journal, int full)
+static void ext4_fc_cleanup(journal_t *journal, int full, tid_t tid)
{
struct super_block *sb = journal->j_private;
struct ext4_sb_info *sbi = EXT4_SB(sb);
@@ -1197,7 +1212,8 @@ static void ext4_fc_cleanup(journal_t *journal, int full)
list_del_init(&iter->i_fc_list);
ext4_clear_inode_state(&iter->vfs_inode,
EXT4_STATE_FC_COMMITTING);
- ext4_fc_reset_inode(&iter->vfs_inode);
+ if (iter->i_sync_tid <= tid)
+ ext4_fc_reset_inode(&iter->vfs_inode);
/* Make sure EXT4_STATE_FC_COMMITTING bit is clear */
smp_mb();
#if (BITS_PER_LONG < 64)
@@ -1226,8 +1242,10 @@ static void ext4_fc_cleanup(journal_t *journal, int full)
list_splice_init(&sbi->s_fc_q[FC_Q_STAGING],
&sbi->s_fc_q[FC_Q_MAIN]);
- ext4_clear_mount_flag(sb, EXT4_MF_FC_COMMITTING);
- ext4_clear_mount_flag(sb, EXT4_MF_FC_INELIGIBLE);
+ if (tid >= sbi->s_fc_ineligible_tid) {
+ sbi->s_fc_ineligible_tid = 0;
+ ext4_clear_mount_flag(sb, EXT4_MF_FC_INELIGIBLE);
+ }
if (full)
sbi->s_fc_bytes = 0;
@@ -1392,14 +1410,15 @@ static int ext4_fc_record_modified_inode(struct super_block *sb, int ino)
if (state->fc_modified_inodes[i] == ino)
return 0;
if (state->fc_modified_inodes_used == state->fc_modified_inodes_size) {
- state->fc_modified_inodes_size +=
- EXT4_FC_REPLAY_REALLOC_INCREMENT;
state->fc_modified_inodes = krealloc(
- state->fc_modified_inodes, sizeof(int) *
- state->fc_modified_inodes_size,
- GFP_KERNEL);
+ state->fc_modified_inodes,
+ sizeof(int) * (state->fc_modified_inodes_size +
+ EXT4_FC_REPLAY_REALLOC_INCREMENT),
+ GFP_KERNEL);
if (!state->fc_modified_inodes)
return -ENOMEM;
+ state->fc_modified_inodes_size +=
+ EXT4_FC_REPLAY_REALLOC_INCREMENT;
}
state->fc_modified_inodes[state->fc_modified_inodes_used++] = ino;
return 0;
@@ -1431,7 +1450,9 @@ static int ext4_fc_replay_inode(struct super_block *sb, struct ext4_fc_tl *tl,
}
inode = NULL;
- ext4_fc_record_modified_inode(sb, ino);
+ ret = ext4_fc_record_modified_inode(sb, ino);
+ if (ret)
+ goto out;
raw_fc_inode = (struct ext4_inode *)
(val + offsetof(struct ext4_fc_inode, fc_raw_inode));
@@ -1563,16 +1584,23 @@ out:
}
/*
- * Record physical disk regions which are in use as per fast commit area. Our
- * simple replay phase allocator excludes these regions from allocation.
+ * Record physical disk regions which are in use as per fast commit area,
+ * and used by inodes during replay phase. Our simple replay phase
+ * allocator excludes these regions from allocation.
*/
-static int ext4_fc_record_regions(struct super_block *sb, int ino,
- ext4_lblk_t lblk, ext4_fsblk_t pblk, int len)
+int ext4_fc_record_regions(struct super_block *sb, int ino,
+ ext4_lblk_t lblk, ext4_fsblk_t pblk, int len, int replay)
{
struct ext4_fc_replay_state *state;
struct ext4_fc_alloc_region *region;
state = &EXT4_SB(sb)->s_fc_replay_state;
+ /*
+ * during replay phase, the fc_regions_valid may not same as
+ * fc_regions_used, update it when do new additions.
+ */
+ if (replay && state->fc_regions_used != state->fc_regions_valid)
+ state->fc_regions_used = state->fc_regions_valid;
if (state->fc_regions_used == state->fc_regions_size) {
state->fc_regions_size +=
EXT4_FC_REPLAY_REALLOC_INCREMENT;
@@ -1590,6 +1618,9 @@ static int ext4_fc_record_regions(struct super_block *sb, int ino,
region->pblk = pblk;
region->len = len;
+ if (replay)
+ state->fc_regions_valid++;
+
return 0;
}
@@ -1621,6 +1652,8 @@ static int ext4_fc_replay_add_range(struct super_block *sb,
}
ret = ext4_fc_record_modified_inode(sb, inode->i_ino);
+ if (ret)
+ goto out;
start = le32_to_cpu(ex->ee_block);
start_pblk = ext4_ext_pblock(ex);
@@ -1638,18 +1671,14 @@ static int ext4_fc_replay_add_range(struct super_block *sb,
map.m_pblk = 0;
ret = ext4_map_blocks(NULL, inode, &map, 0);
- if (ret < 0) {
- iput(inode);
- return 0;
- }
+ if (ret < 0)
+ goto out;
if (ret == 0) {
/* Range is not mapped */
path = ext4_find_extent(inode, cur, NULL, 0);
- if (IS_ERR(path)) {
- iput(inode);
- return 0;
- }
+ if (IS_ERR(path))
+ goto out;
memset(&newex, 0, sizeof(newex));
newex.ee_block = cpu_to_le32(cur);
ext4_ext_store_pblock(
@@ -1663,10 +1692,8 @@ static int ext4_fc_replay_add_range(struct super_block *sb,
up_write((&EXT4_I(inode)->i_data_sem));
ext4_ext_drop_refs(path);
kfree(path);
- if (ret) {
- iput(inode);
- return 0;
- }
+ if (ret)
+ goto out;
goto next;
}
@@ -1679,10 +1706,8 @@ static int ext4_fc_replay_add_range(struct super_block *sb,
ret = ext4_ext_replay_update_ex(inode, cur, map.m_len,
ext4_ext_is_unwritten(ex),
start_pblk + cur - start);
- if (ret) {
- iput(inode);
- return 0;
- }
+ if (ret)
+ goto out;
/*
* Mark the old blocks as free since they aren't used
* anymore. We maintain an array of all the modified
@@ -1702,10 +1727,8 @@ static int ext4_fc_replay_add_range(struct super_block *sb,
ext4_ext_is_unwritten(ex), map.m_pblk);
ret = ext4_ext_replay_update_ex(inode, cur, map.m_len,
ext4_ext_is_unwritten(ex), map.m_pblk);
- if (ret) {
- iput(inode);
- return 0;
- }
+ if (ret)
+ goto out;
/*
* We may have split the extent tree while toggling the state.
* Try to shrink the extent tree now.
@@ -1717,6 +1740,7 @@ next:
}
ext4_ext_replay_shrink_inode(inode, i_size_read(inode) >>
sb->s_blocksize_bits);
+out:
iput(inode);
return 0;
}
@@ -1746,6 +1770,8 @@ ext4_fc_replay_del_range(struct super_block *sb, struct ext4_fc_tl *tl,
}
ret = ext4_fc_record_modified_inode(sb, inode->i_ino);
+ if (ret)
+ goto out;
jbd_debug(1, "DEL_RANGE, inode %ld, lblk %d, len %d\n",
inode->i_ino, le32_to_cpu(lrange.fc_lblk),
@@ -1755,10 +1781,8 @@ ext4_fc_replay_del_range(struct super_block *sb, struct ext4_fc_tl *tl,
map.m_len = remaining;
ret = ext4_map_blocks(NULL, inode, &map, 0);
- if (ret < 0) {
- iput(inode);
- return 0;
- }
+ if (ret < 0)
+ goto out;
if (ret > 0) {
remaining -= ret;
cur += ret;
@@ -1770,18 +1794,17 @@ ext4_fc_replay_del_range(struct super_block *sb, struct ext4_fc_tl *tl,
}
down_write(&EXT4_I(inode)->i_data_sem);
- ret = ext4_ext_remove_space(inode, lrange.fc_lblk,
- lrange.fc_lblk + lrange.fc_len - 1);
+ ret = ext4_ext_remove_space(inode, le32_to_cpu(lrange.fc_lblk),
+ le32_to_cpu(lrange.fc_lblk) +
+ le32_to_cpu(lrange.fc_len) - 1);
up_write(&EXT4_I(inode)->i_data_sem);
- if (ret) {
- iput(inode);
- return 0;
- }
+ if (ret)
+ goto out;
ext4_ext_replay_shrink_inode(inode,
i_size_read(inode) >> sb->s_blocksize_bits);
ext4_mark_inode_dirty(NULL, inode);
+out:
iput(inode);
-
return 0;
}
@@ -1937,7 +1960,7 @@ static int ext4_fc_replay_scan(journal_t *journal,
ret = ext4_fc_record_regions(sb,
le32_to_cpu(ext.fc_ino),
le32_to_cpu(ex->ee_block), ext4_ext_pblock(ex),
- ext4_ext_get_actual_len(ex));
+ ext4_ext_get_actual_len(ex), 0);
if (ret < 0)
break;
ret = JBD2_FC_REPLAY_CONTINUE;
diff --git a/fs/ext4/hash.c b/fs/ext4/hash.c
index f34f4176c1e7..147b5241dd94 100644
--- a/fs/ext4/hash.c
+++ b/fs/ext4/hash.c
@@ -290,7 +290,7 @@ static int __ext4fs_dirhash(const struct inode *dir, const char *name, int len,
int ext4fs_dirhash(const struct inode *dir, const char *name, int len,
struct dx_hash_info *hinfo)
{
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
const struct unicode_map *um = dir->i_sb->s_encoding;
int r, dlen;
unsigned char *buff;
diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c
index 89efa78ed4b2..07a8c75b65ed 100644
--- a/fs/ext4/indirect.c
+++ b/fs/ext4/indirect.c
@@ -696,7 +696,7 @@ static int ext4_ind_trunc_restart_fn(handle_t *handle, struct inode *inode,
* Drop i_data_sem to avoid deadlock with ext4_map_blocks. At this
* moment, get_block can be called only for blocks inside i_size since
* page cache has been already dropped and writes are blocked by
- * i_mutex. So we can safely drop the i_data_sem here.
+ * i_rwsem. So we can safely drop the i_data_sem here.
*/
BUG_ON(EXT4_JOURNAL(inode) == NULL);
ext4_discard_preallocations(inode, 0);
diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
index 39a1ab129fdc..e42941803605 100644
--- a/fs/ext4/inline.c
+++ b/fs/ext4/inline.c
@@ -7,7 +7,7 @@
#include <linux/iomap.h>
#include <linux/fiemap.h>
#include <linux/iversion.h>
-#include <linux/backing-dev.h>
+#include <linux/sched/mm.h>
#include "ext4_jbd2.h"
#include "ext4.h"
@@ -911,7 +911,7 @@ int ext4_da_write_inline_data_begin(struct address_space *mapping,
struct page **pagep,
void **fsdata)
{
- int ret, inline_size;
+ int ret;
handle_t *handle;
struct page *page;
struct ext4_iloc iloc;
@@ -928,14 +928,9 @@ retry_journal:
goto out;
}
- inline_size = ext4_get_max_inline_size(inode);
-
- ret = -ENOSPC;
- if (inline_size >= pos + len) {
- ret = ext4_prepare_inline_data(handle, inode, pos + len);
- if (ret && ret != -ENOSPC)
- goto out_journal;
- }
+ ret = ext4_prepare_inline_data(handle, inode, pos + len);
+ if (ret && ret != -ENOSPC)
+ goto out_journal;
/*
* We cannot recurse into the filesystem as the transaction
@@ -1133,7 +1128,15 @@ static void ext4_restore_inline_data(handle_t *handle, struct inode *inode,
struct ext4_iloc *iloc,
void *buf, int inline_size)
{
- ext4_create_inline_data(handle, inode, inline_size);
+ int ret;
+
+ ret = ext4_create_inline_data(handle, inode, inline_size);
+ if (ret) {
+ ext4_msg(inode->i_sb, KERN_EMERG,
+ "error restoring inline_data for inode -- potential data loss! (inode %lu, error %d)",
+ inode->i_ino, ret);
+ return;
+ }
ext4_write_inline_data(inode, iloc, buf, 0, inline_size);
ext4_set_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
}
@@ -1929,8 +1932,7 @@ int ext4_inline_data_truncate(struct inode *inode, int *has_inline)
retry:
err = ext4_es_remove_extent(inode, 0, EXT_MAX_BLOCKS);
if (err == -ENOMEM) {
- cond_resched();
- congestion_wait(BLK_RW_ASYNC, HZ/50);
+ memalloc_retry_wait(GFP_ATOMIC);
goto retry;
}
if (err)
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 9dbeb772de60..01c9e4f743ba 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -41,6 +41,7 @@
#include <linux/bitops.h>
#include <linux/iomap.h>
#include <linux/iversion.h>
+#include <linux/dax.h>
#include "ext4_jbd2.h"
#include "xattr.h"
@@ -337,7 +338,7 @@ stop_handle:
return;
no_delete:
if (!list_empty(&EXT4_I(inode)->i_fc_list))
- ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_NOMEM);
+ ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_NOMEM, NULL);
ext4_clear_inode(inode); /* We must guarantee clearing of inode... */
}
@@ -1223,7 +1224,7 @@ retry_journal:
/*
* __block_write_begin may have instantiated a few blocks
* outside i_size. Trim these off again. Don't need
- * i_size_read because we hold i_mutex.
+ * i_size_read because we hold i_rwsem.
*
* Add inode to orphan list in case we crash before
* truncate finishes
@@ -3253,7 +3254,7 @@ static bool ext4_inode_datasync_dirty(struct inode *inode)
static void ext4_set_iomap(struct inode *inode, struct iomap *iomap,
struct ext4_map_blocks *map, loff_t offset,
- loff_t length)
+ loff_t length, unsigned int flags)
{
u8 blkbits = inode->i_blkbits;
@@ -3270,8 +3271,10 @@ static void ext4_set_iomap(struct inode *inode, struct iomap *iomap,
if (map->m_flags & EXT4_MAP_NEW)
iomap->flags |= IOMAP_F_NEW;
- iomap->bdev = inode->i_sb->s_bdev;
- iomap->dax_dev = EXT4_SB(inode->i_sb)->s_daxdev;
+ if (flags & IOMAP_DAX)
+ iomap->dax_dev = EXT4_SB(inode->i_sb)->s_daxdev;
+ else
+ iomap->bdev = inode->i_sb->s_bdev;
iomap->offset = (u64) map->m_lblk << blkbits;
iomap->length = (u64) map->m_len << blkbits;
@@ -3291,9 +3294,13 @@ static void ext4_set_iomap(struct inode *inode, struct iomap *iomap,
if (map->m_flags & EXT4_MAP_UNWRITTEN) {
iomap->type = IOMAP_UNWRITTEN;
iomap->addr = (u64) map->m_pblk << blkbits;
+ if (flags & IOMAP_DAX)
+ iomap->addr += EXT4_SB(inode->i_sb)->s_dax_part_off;
} else if (map->m_flags & EXT4_MAP_MAPPED) {
iomap->type = IOMAP_MAPPED;
iomap->addr = (u64) map->m_pblk << blkbits;
+ if (flags & IOMAP_DAX)
+ iomap->addr += EXT4_SB(inode->i_sb)->s_dax_part_off;
} else {
iomap->type = IOMAP_HOLE;
iomap->addr = IOMAP_NULL_ADDR;
@@ -3330,8 +3337,8 @@ retry:
* DAX and direct I/O are the only two operations that are currently
* supported with IOMAP_WRITE.
*/
- WARN_ON(!IS_DAX(inode) && !(flags & IOMAP_DIRECT));
- if (IS_DAX(inode))
+ WARN_ON(!(flags & (IOMAP_DAX | IOMAP_DIRECT)));
+ if (flags & IOMAP_DAX)
m_flags = EXT4_GET_BLOCKS_CREATE_ZERO;
/*
* We use i_size instead of i_disksize here because delalloc writeback
@@ -3402,7 +3409,7 @@ static int ext4_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
if (ret < 0)
return ret;
out:
- ext4_set_iomap(inode, iomap, &map, offset, length);
+ ext4_set_iomap(inode, iomap, &map, offset, length, flags);
return 0;
}
@@ -3522,7 +3529,7 @@ static int ext4_iomap_begin_report(struct inode *inode, loff_t offset,
delalloc = ext4_iomap_is_delalloc(inode, &map);
set_iomap:
- ext4_set_iomap(inode, iomap, &map, offset, length);
+ ext4_set_iomap(inode, iomap, &map, offset, length, flags);
if (delalloc && iomap->type == IOMAP_HOLE)
iomap->type = IOMAP_DELALLOC;
@@ -3762,8 +3769,8 @@ static int ext4_block_zero_page_range(handle_t *handle,
length = max;
if (IS_DAX(inode)) {
- return iomap_zero_range(inode, from, length, NULL,
- &ext4_iomap_ops);
+ return dax_zero_range(inode, from, length, NULL,
+ &ext4_iomap_ops);
}
return __ext4_block_zero_page_range(handle, mapping, from, length);
}
@@ -3972,7 +3979,7 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
}
- /* Wait all existing dio workers, newcomers will block on i_mutex */
+ /* Wait all existing dio workers, newcomers will block on i_rwsem */
inode_dio_wait(inode);
/*
@@ -4122,7 +4129,7 @@ int ext4_truncate(struct inode *inode)
/*
* There is a possibility that we're either freeing the inode
* or it's a completely new inode. In those cases we might not
- * have i_mutex locked because it's not necessary.
+ * have i_rwsem locked because it's not necessary.
*/
if (!(inode->i_state & (I_NEW|I_FREEING)))
WARN_ON(!inode_is_locked(inode));
@@ -5264,7 +5271,7 @@ static void ext4_wait_for_tail_page_commit(struct inode *inode)
* transaction are already on disk (truncate waits for pages under
* writeback).
*
- * Called with inode->i_mutex down.
+ * Called with inode->i_rwsem down.
*/
int ext4_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
struct iattr *attr)
@@ -5976,7 +5983,7 @@ int ext4_change_inode_journal_flag(struct inode *inode, int val)
return PTR_ERR(handle);
ext4_fc_mark_ineligible(inode->i_sb,
- EXT4_FC_REASON_JOURNAL_FLAG_CHANGE);
+ EXT4_FC_REASON_JOURNAL_FLAG_CHANGE, handle);
err = ext4_mark_inode_dirty(handle, inode);
ext4_handle_sync(handle);
ext4_journal_stop(handle);
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index bbbedf27b71c..a8022c2c6a58 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -411,7 +411,7 @@ static long swap_inode_boot_loader(struct super_block *sb,
err = -EINVAL;
goto err_out;
}
- ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_SWAP_BOOT);
+ ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_SWAP_BOOT, handle);
/* Protect extent tree against block allocations via delalloc */
ext4_double_down_write_data_sem(inode, inode_bl);
@@ -1373,7 +1373,7 @@ mext_out:
err = ext4_resize_fs(sb, n_blocks_count);
if (EXT4_SB(sb)->s_journal) {
- ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_RESIZE);
+ ext4_fc_mark_ineligible(sb, EXT4_FC_REASON_RESIZE, NULL);
jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal);
err2 = jbd2_journal_flush(EXT4_SB(sb)->s_journal, 0);
jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal);
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index cf2fd9fc7d98..67ac95c4cd9b 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -2834,7 +2834,7 @@ out:
static void *ext4_mb_seq_groups_start(struct seq_file *seq, loff_t *pos)
{
- struct super_block *sb = PDE_DATA(file_inode(seq->file));
+ struct super_block *sb = pde_data(file_inode(seq->file));
ext4_group_t group;
if (*pos < 0 || *pos >= ext4_get_groups_count(sb))
@@ -2845,7 +2845,7 @@ static void *ext4_mb_seq_groups_start(struct seq_file *seq, loff_t *pos)
static void *ext4_mb_seq_groups_next(struct seq_file *seq, void *v, loff_t *pos)
{
- struct super_block *sb = PDE_DATA(file_inode(seq->file));
+ struct super_block *sb = pde_data(file_inode(seq->file));
ext4_group_t group;
++*pos;
@@ -2857,7 +2857,7 @@ static void *ext4_mb_seq_groups_next(struct seq_file *seq, void *v, loff_t *pos)
static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
{
- struct super_block *sb = PDE_DATA(file_inode(seq->file));
+ struct super_block *sb = pde_data(file_inode(seq->file));
ext4_group_t group = (ext4_group_t) ((unsigned long) v);
int i;
int err, buddy_loaded = 0;
@@ -2985,7 +2985,7 @@ int ext4_seq_mb_stats_show(struct seq_file *seq, void *offset)
static void *ext4_mb_seq_structs_summary_start(struct seq_file *seq, loff_t *pos)
__acquires(&EXT4_SB(sb)->s_mb_rb_lock)
{
- struct super_block *sb = PDE_DATA(file_inode(seq->file));
+ struct super_block *sb = pde_data(file_inode(seq->file));
unsigned long position;
read_lock(&EXT4_SB(sb)->s_mb_rb_lock);
@@ -2998,7 +2998,7 @@ __acquires(&EXT4_SB(sb)->s_mb_rb_lock)
static void *ext4_mb_seq_structs_summary_next(struct seq_file *seq, void *v, loff_t *pos)
{
- struct super_block *sb = PDE_DATA(file_inode(seq->file));
+ struct super_block *sb = pde_data(file_inode(seq->file));
unsigned long position;
++*pos;
@@ -3010,7 +3010,7 @@ static void *ext4_mb_seq_structs_summary_next(struct seq_file *seq, void *v, lof
static int ext4_mb_seq_structs_summary_show(struct seq_file *seq, void *v)
{
- struct super_block *sb = PDE_DATA(file_inode(seq->file));
+ struct super_block *sb = pde_data(file_inode(seq->file));
struct ext4_sb_info *sbi = EXT4_SB(sb);
unsigned long position = ((unsigned long) v);
struct ext4_group_info *grp;
@@ -3058,7 +3058,7 @@ static int ext4_mb_seq_structs_summary_show(struct seq_file *seq, void *v)
static void ext4_mb_seq_structs_summary_stop(struct seq_file *seq, void *v)
__releases(&EXT4_SB(sb)->s_mb_rb_lock)
{
- struct super_block *sb = PDE_DATA(file_inode(seq->file));
+ struct super_block *sb = pde_data(file_inode(seq->file));
read_unlock(&EXT4_SB(sb)->s_mb_rb_lock);
}
@@ -5753,7 +5753,8 @@ static ext4_fsblk_t ext4_mb_new_blocks_simple(handle_t *handle,
struct super_block *sb = ar->inode->i_sb;
ext4_group_t group;
ext4_grpblk_t blkoff;
- int i = sb->s_blocksize;
+ ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb);
+ ext4_grpblk_t i = 0;
ext4_fsblk_t goal, block;
struct ext4_super_block *es = EXT4_SB(sb)->s_es;
@@ -5775,19 +5776,26 @@ static ext4_fsblk_t ext4_mb_new_blocks_simple(handle_t *handle,
ext4_get_group_no_and_offset(sb,
max(ext4_group_first_block_no(sb, group), goal),
NULL, &blkoff);
- i = mb_find_next_zero_bit(bitmap_bh->b_data, sb->s_blocksize,
+ while (1) {
+ i = mb_find_next_zero_bit(bitmap_bh->b_data, max,
blkoff);
+ if (i >= max)
+ break;
+ if (ext4_fc_replay_check_excluded(sb,
+ ext4_group_first_block_no(sb, group) + i)) {
+ blkoff = i + 1;
+ } else
+ break;
+ }
brelse(bitmap_bh);
- if (i >= sb->s_blocksize)
- continue;
- if (ext4_fc_replay_check_excluded(sb,
- ext4_group_first_block_no(sb, group) + i))
- continue;
- break;
+ if (i < max)
+ break;
}
- if (group >= ext4_get_groups_count(sb) && i >= sb->s_blocksize)
+ if (group >= ext4_get_groups_count(sb) || i >= max) {
+ *errp = -ENOSPC;
return 0;
+ }
block = ext4_group_first_block_no(sb, group) + i;
ext4_mb_mark_bb(sb, block, 1, 1);
diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c
index ff8916e1d38e..7a5353a8cfd7 100644
--- a/fs/ext4/migrate.c
+++ b/fs/ext4/migrate.c
@@ -485,7 +485,7 @@ int ext4_ext_migrate(struct inode *inode)
* when we add extents we extent the journal
*/
/*
- * Even though we take i_mutex we can still cause block
+ * Even though we take i_rwsem we can still cause block
* allocation via mmap write to holes. If we have allocated
* new blocks we fail migrate. New block allocation will
* clear EXT4_STATE_EXT_MIGRATE flag. The flag is updated
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 52c9bd154122..8cf0a924a49b 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -1317,7 +1317,7 @@ static void dx_insert_block(struct dx_frame *frame, u32 hash, ext4_lblk_t block)
dx_set_count(entries, count + 1);
}
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
/*
* Test whether a case-insensitive directory entry matches the filename
* being searched for. If quick is set, assume the name being looked up
@@ -1428,7 +1428,7 @@ static bool ext4_match(struct inode *parent,
f.crypto_buf = fname->crypto_buf;
#endif
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
if (parent->i_sb->s_encoding && IS_CASEFOLDED(parent) &&
(!IS_ENCRYPTED(parent) || fscrypt_has_encryption_key(parent))) {
if (fname->cf_name.name) {
@@ -1800,7 +1800,7 @@ static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, unsi
}
}
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
if (!inode && IS_CASEFOLDED(dir)) {
/* Eventually we want to call d_add_ci(dentry, NULL)
* for negative dentries in the encoding case as
@@ -2308,7 +2308,7 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
if (fscrypt_is_nokey_name(dentry))
return -ENOKEY;
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
if (sb_has_strict_encoding(sb) && IS_CASEFOLDED(dir) &&
sb->s_encoding && utf8_validate(sb->s_encoding, &dentry->d_name))
return -EINVAL;
@@ -3126,7 +3126,7 @@ static int ext4_rmdir(struct inode *dir, struct dentry *dentry)
ext4_fc_track_unlink(handle, dentry);
retval = ext4_mark_inode_dirty(handle, dir);
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
/* VFS negative dentries are incompatible with Encoding and
* Case-insensitiveness. Eventually we'll want avoid
* invalidating the dentries here, alongside with returning the
@@ -3231,7 +3231,7 @@ static int ext4_unlink(struct inode *dir, struct dentry *dentry)
retval = __ext4_unlink(handle, dir, &dentry->d_name, d_inode(dentry));
if (!retval)
ext4_fc_track_unlink(handle, dentry);
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
/* VFS negative dentries are incompatible with Encoding and
* Case-insensitiveness. Eventually we'll want avoid
* invalidating the dentries here, alongside with returning the
@@ -3889,7 +3889,7 @@ static int ext4_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
* dirents in directories.
*/
ext4_fc_mark_ineligible(old.inode->i_sb,
- EXT4_FC_REASON_RENAME_DIR);
+ EXT4_FC_REASON_RENAME_DIR, handle);
} else {
if (new.inode)
ext4_fc_track_unlink(handle, new.dentry);
@@ -4049,7 +4049,7 @@ static int ext4_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
if (unlikely(retval))
goto end_rename;
ext4_fc_mark_ineligible(new.inode->i_sb,
- EXT4_FC_REASON_CROSS_RENAME);
+ EXT4_FC_REASON_CROSS_RENAME, handle);
if (old.dir_bh) {
retval = ext4_rename_dir_finish(handle, &old, new.dir->i_ino);
if (retval)
diff --git a/fs/ext4/orphan.c b/fs/ext4/orphan.c
index 53adc8f570a3..7de0612eb42d 100644
--- a/fs/ext4/orphan.c
+++ b/fs/ext4/orphan.c
@@ -93,7 +93,7 @@ static int ext4_orphan_file_add(handle_t *handle, struct inode *inode)
* At filesystem recovery time, we walk this list deleting unlinked
* inodes and truncating linked inodes in ext4_orphan_cleanup().
*
- * Orphan list manipulation functions must be called under i_mutex unless
+ * Orphan list manipulation functions must be called under i_rwsem unless
* we are just creating the inode or deleting it.
*/
int ext4_orphan_add(handle_t *handle, struct inode *inode)
@@ -119,7 +119,7 @@ int ext4_orphan_add(handle_t *handle, struct inode *inode)
/*
* Orphan handling is only valid for files with data blocks
* being truncated, or files being unlinked. Note that we either
- * hold i_mutex, or the inode can not be referenced from outside,
+ * hold i_rwsem, or the inode can not be referenced from outside,
* so i_nlink should not be bumped due to race
*/
ASSERT((S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index 9cb261714991..1d370364230e 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -24,7 +24,7 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/mm.h>
-#include <linux/backing-dev.h>
+#include <linux/sched/mm.h>
#include "ext4_jbd2.h"
#include "xattr.h"
@@ -523,12 +523,13 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
ret = PTR_ERR(bounce_page);
if (ret == -ENOMEM &&
(io->io_bio || wbc->sync_mode == WB_SYNC_ALL)) {
- gfp_flags = GFP_NOFS;
+ gfp_t new_gfp_flags = GFP_NOFS;
if (io->io_bio)
ext4_io_submit(io);
else
- gfp_flags |= __GFP_NOFAIL;
- congestion_wait(BLK_RW_ASYNC, HZ/50);
+ new_gfp_flags |= __GFP_NOFAIL;
+ memalloc_retry_wait(gfp_flags);
+ gfp_flags = new_gfp_flags;
goto retry_encrypt;
}
diff --git a/fs/ext4/readpage.c b/fs/ext4/readpage.c
index 3db923403505..4cd62f1d848c 100644
--- a/fs/ext4/readpage.c
+++ b/fs/ext4/readpage.c
@@ -43,7 +43,6 @@
#include <linux/writeback.h>
#include <linux/backing-dev.h>
#include <linux/pagevec.h>
-#include <linux/cleancache.h>
#include "ext4.h"
@@ -350,11 +349,6 @@ int ext4_mpage_readpages(struct inode *inode,
} else if (fully_mapped) {
SetPageMappedToDisk(page);
}
- if (fully_mapped && blocks_per_page == 1 &&
- !PageUptodate(page) && cleancache_get_page(page) == 0) {
- SetPageUptodate(page);
- goto confused;
- }
/*
* This page will go to BIO. Do we need to send this
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 9a936ecbaa3b..c5021ca0a28a 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -39,7 +39,6 @@
#include <linux/log2.h>
#include <linux/crc16.h>
#include <linux/dax.h>
-#include <linux/cleancache.h>
#include <linux/uaccess.h>
#include <linux/iversion.h>
#include <linux/unicode.h>
@@ -1302,7 +1301,7 @@ static void ext4_put_super(struct super_block *sb)
kfree(sbi->s_blockgroup_lock);
fs_put_dax(sbi->s_daxdev);
fscrypt_free_dummy_policy(&sbi->s_dummy_enc_policy);
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
utf8_unload(sb->s_encoding);
#endif
kfree(sbi);
@@ -1962,33 +1961,26 @@ static const struct mount_opts {
{Opt_err, 0, 0}
};
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
static const struct ext4_sb_encodings {
__u16 magic;
char *name;
- char *version;
+ unsigned int version;
} ext4_sb_encoding_map[] = {
- {EXT4_ENC_UTF8_12_1, "utf8", "12.1.0"},
+ {EXT4_ENC_UTF8_12_1, "utf8", UNICODE_AGE(12, 1, 0)},
};
-static int ext4_sb_read_encoding(const struct ext4_super_block *es,
- const struct ext4_sb_encodings **encoding,
- __u16 *flags)
+static const struct ext4_sb_encodings *
+ext4_sb_read_encoding(const struct ext4_super_block *es)
{
__u16 magic = le16_to_cpu(es->s_encoding);
int i;
for (i = 0; i < ARRAY_SIZE(ext4_sb_encoding_map); i++)
if (magic == ext4_sb_encoding_map[i].magic)
- break;
-
- if (i >= ARRAY_SIZE(ext4_sb_encoding_map))
- return -EINVAL;
+ return &ext4_sb_encoding_map[i];
- *encoding = &ext4_sb_encoding_map[i];
- *flags = le16_to_cpu(es->s_encoding_flags);
-
- return 0;
+ return NULL;
}
#endif
@@ -3156,8 +3148,6 @@ done:
EXT4_BLOCKS_PER_GROUP(sb),
EXT4_INODES_PER_GROUP(sb),
sbi->s_mount_opt, sbi->s_mount_opt2);
-
- cleancache_init_fs(sb);
return err;
}
@@ -3616,7 +3606,7 @@ int ext4_feature_set_ok(struct super_block *sb, int readonly)
return 0;
}
-#ifndef CONFIG_UNICODE
+#if !IS_ENABLED(CONFIG_UNICODE)
if (ext4_has_feature_casefold(sb)) {
ext4_msg(sb, KERN_ERR,
"Filesystem with casefold feature cannot be "
@@ -4338,7 +4328,7 @@ static struct ext4_sb_info *ext4_alloc_sbi(struct super_block *sb)
if (!sbi)
return NULL;
- sbi->s_daxdev = fs_dax_get_by_bdev(sb->s_bdev);
+ sbi->s_daxdev = fs_dax_get_by_bdev(sb->s_bdev, &sbi->s_dax_part_off);
sbi->s_blockgroup_lock =
kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL);
@@ -4620,14 +4610,14 @@ static int __ext4_fill_super(struct fs_context *fc, struct super_block *sb)
if (err < 0)
goto failed_mount;
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
if (ext4_has_feature_casefold(sb) && !sb->s_encoding) {
const struct ext4_sb_encodings *encoding_info;
struct unicode_map *encoding;
- __u16 encoding_flags;
+ __u16 encoding_flags = le16_to_cpu(es->s_encoding_flags);
- if (ext4_sb_read_encoding(es, &encoding_info,
- &encoding_flags)) {
+ encoding_info = ext4_sb_read_encoding(es);
+ if (!encoding_info) {
ext4_msg(sb, KERN_ERR,
"Encoding requested by superblock is unknown");
goto failed_mount;
@@ -4636,15 +4626,21 @@ static int __ext4_fill_super(struct fs_context *fc, struct super_block *sb)
encoding = utf8_load(encoding_info->version);
if (IS_ERR(encoding)) {
ext4_msg(sb, KERN_ERR,
- "can't mount with superblock charset: %s-%s "
+ "can't mount with superblock charset: %s-%u.%u.%u "
"not supported by the kernel. flags: 0x%x.",
- encoding_info->name, encoding_info->version,
+ encoding_info->name,
+ unicode_major(encoding_info->version),
+ unicode_minor(encoding_info->version),
+ unicode_rev(encoding_info->version),
encoding_flags);
goto failed_mount;
}
ext4_msg(sb, KERN_INFO,"Using encoding defined by superblock: "
- "%s-%s with flags 0x%hx", encoding_info->name,
- encoding_info->version?:"\b", encoding_flags);
+ "%s-%u.%u.%u with flags 0x%hx", encoding_info->name,
+ unicode_major(encoding_info->version),
+ unicode_minor(encoding_info->version),
+ unicode_rev(encoding_info->version),
+ encoding_flags);
sb->s_encoding = encoding;
sb->s_encoding_flags = encoding_flags;
@@ -4756,9 +4752,12 @@ static int __ext4_fill_super(struct fs_context *fc, struct super_block *sb)
goto failed_mount;
}
- if (dax_supported(sbi->s_daxdev, sb->s_bdev, blocksize, 0,
- bdev_nr_sectors(sb->s_bdev)))
- set_bit(EXT4_FLAGS_BDEV_IS_DAX, &sbi->s_ext4_flags);
+ if (sbi->s_daxdev) {
+ if (blocksize == PAGE_SIZE)
+ set_bit(EXT4_FLAGS_BDEV_IS_DAX, &sbi->s_ext4_flags);
+ else
+ ext4_msg(sb, KERN_ERR, "unsupported blocksize for DAX\n");
+ }
if (sbi->s_mount_opt & EXT4_MOUNT_DAX_ALWAYS) {
if (ext4_has_feature_inline_data(sb)) {
@@ -5083,7 +5082,7 @@ static int __ext4_fill_super(struct fs_context *fc, struct super_block *sb)
INIT_LIST_HEAD(&sbi->s_fc_dentry_q[FC_Q_STAGING]);
sbi->s_fc_bytes = 0;
ext4_clear_mount_flag(sb, EXT4_MF_FC_INELIGIBLE);
- ext4_clear_mount_flag(sb, EXT4_MF_FC_COMMITTING);
+ sbi->s_fc_ineligible_tid = 0;
spin_lock_init(&sbi->s_fc_lock);
memset(&sbi->s_fc_stats, 0, sizeof(sbi->s_fc_stats));
sbi->s_fc_replay_state.fc_regions = NULL;
@@ -5515,7 +5514,7 @@ failed_mount:
if (sbi->s_chksum_driver)
crypto_free_shash(sbi->s_chksum_driver);
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
utf8_unload(sb->s_encoding);
#endif
@@ -5541,7 +5540,7 @@ static int ext4_fill_super(struct super_block *sb, struct fs_context *fc)
sbi = ext4_alloc_sbi(sb);
if (!sbi)
- ret = -ENOMEM;
+ return -ENOMEM;
fc->s_fs_info = sbi;
diff --git a/fs/ext4/sysfs.c b/fs/ext4/sysfs.c
index f61e65ae27d8..d233c24ea342 100644
--- a/fs/ext4/sysfs.c
+++ b/fs/ext4/sysfs.c
@@ -309,7 +309,7 @@ EXT4_ATTR_FEATURE(meta_bg_resize);
EXT4_ATTR_FEATURE(encryption);
EXT4_ATTR_FEATURE(test_dummy_encryption_v2);
#endif
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
EXT4_ATTR_FEATURE(casefold);
#endif
#ifdef CONFIG_FS_VERITY
@@ -317,7 +317,7 @@ EXT4_ATTR_FEATURE(verity);
#endif
EXT4_ATTR_FEATURE(metadata_csum_seed);
EXT4_ATTR_FEATURE(fast_commit);
-#if defined(CONFIG_UNICODE) && defined(CONFIG_FS_ENCRYPTION)
+#if IS_ENABLED(CONFIG_UNICODE) && defined(CONFIG_FS_ENCRYPTION)
EXT4_ATTR_FEATURE(encrypted_casefold);
#endif
@@ -329,7 +329,7 @@ static struct attribute *ext4_feat_attrs[] = {
ATTR_LIST(encryption),
ATTR_LIST(test_dummy_encryption_v2),
#endif
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
ATTR_LIST(casefold),
#endif
#ifdef CONFIG_FS_VERITY
@@ -337,7 +337,7 @@ static struct attribute *ext4_feat_attrs[] = {
#endif
ATTR_LIST(metadata_csum_seed),
ATTR_LIST(fast_commit),
-#if defined(CONFIG_UNICODE) && defined(CONFIG_FS_ENCRYPTION)
+#if IS_ENABLED(CONFIG_UNICODE) && defined(CONFIG_FS_ENCRYPTION)
ATTR_LIST(encrypted_casefold),
#endif
NULL,
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index 1e0fc1ed845b..042325349098 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -2408,7 +2408,7 @@ retry_inode:
if (IS_SYNC(inode))
ext4_handle_sync(handle);
}
- ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_XATTR);
+ ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_XATTR, handle);
cleanup:
brelse(is.iloc.bh);
@@ -2486,7 +2486,7 @@ retry:
if (error == 0)
error = error2;
}
- ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_XATTR);
+ ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_XATTR, NULL);
return error;
}
@@ -2920,7 +2920,7 @@ int ext4_xattr_delete_inode(handle_t *handle, struct inode *inode,
error);
goto cleanup;
}
- ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_XATTR);
+ ext4_fc_mark_ineligible(inode->i_sb, EXT4_FC_REASON_XATTR, handle);
}
error = 0;
cleanup:
diff --git a/fs/f2fs/Kconfig b/fs/f2fs/Kconfig
index 7eea3cfd894d..f46a7339d6cf 100644
--- a/fs/f2fs/Kconfig
+++ b/fs/f2fs/Kconfig
@@ -7,6 +7,7 @@ config F2FS_FS
select CRYPTO_CRC32
select F2FS_FS_XATTR if FS_ENCRYPTION
select FS_ENCRYPTION_ALGS if FS_ENCRYPTION
+ select FS_IOMAP
select LZ4_COMPRESS if F2FS_FS_LZ4
select LZ4_DECOMPRESS if F2FS_FS_LZ4
select LZ4HC_COMPRESS if F2FS_FS_LZ4HC
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
index f1693d45bb78..982f0170639f 100644
--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -664,7 +664,7 @@ static int recover_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
/* truncate all the data during iput */
iput(inode);
- err = f2fs_get_node_info(sbi, ino, &ni);
+ err = f2fs_get_node_info(sbi, ino, &ni, false);
if (err)
goto err_out;
@@ -1302,8 +1302,8 @@ static void update_ckpt_flags(struct f2fs_sb_info *sbi, struct cp_control *cpc)
unsigned long flags;
if (cpc->reason & CP_UMOUNT) {
- if (le32_to_cpu(ckpt->cp_pack_total_block_count) >
- sbi->blocks_per_seg - NM_I(sbi)->nat_bits_blocks) {
+ if (le32_to_cpu(ckpt->cp_pack_total_block_count) +
+ NM_I(sbi)->nat_bits_blocks > sbi->blocks_per_seg) {
clear_ckpt_flags(sbi, CP_NAT_BITS_FLAG);
f2fs_notice(sbi, "Disable nat_bits due to no space");
} else if (!is_set_ckpt_flags(sbi, CP_NAT_BITS_FLAG) &&
diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
index 49121a21f749..d0c3aeba5945 100644
--- a/fs/f2fs/compress.c
+++ b/fs/f2fs/compress.c
@@ -154,6 +154,7 @@ void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse)
cc->rpages = NULL;
cc->nr_rpages = 0;
cc->nr_cpages = 0;
+ cc->valid_nr_cpages = 0;
if (!reuse)
cc->cluster_idx = NULL_CLUSTER;
}
@@ -620,7 +621,6 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
const struct f2fs_compress_ops *cops =
f2fs_cops[fi->i_compress_algorithm];
unsigned int max_len, new_nr_cpages;
- struct page **new_cpages;
u32 chksum = 0;
int i, ret;
@@ -635,6 +635,7 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
max_len = COMPRESS_HEADER_SIZE + cc->clen;
cc->nr_cpages = DIV_ROUND_UP(max_len, PAGE_SIZE);
+ cc->valid_nr_cpages = cc->nr_cpages;
cc->cpages = page_array_alloc(cc->inode, cc->nr_cpages);
if (!cc->cpages) {
@@ -685,13 +686,6 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
new_nr_cpages = DIV_ROUND_UP(cc->clen + COMPRESS_HEADER_SIZE, PAGE_SIZE);
- /* Now we're going to cut unnecessary tail pages */
- new_cpages = page_array_alloc(cc->inode, new_nr_cpages);
- if (!new_cpages) {
- ret = -ENOMEM;
- goto out_vunmap_cbuf;
- }
-
/* zero out any unused part of the last page */
memset(&cc->cbuf->cdata[cc->clen], 0,
(new_nr_cpages * PAGE_SIZE) -
@@ -701,10 +695,8 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
vm_unmap_ram(cc->rbuf, cc->cluster_size);
for (i = 0; i < cc->nr_cpages; i++) {
- if (i < new_nr_cpages) {
- new_cpages[i] = cc->cpages[i];
+ if (i < new_nr_cpages)
continue;
- }
f2fs_compress_free_page(cc->cpages[i]);
cc->cpages[i] = NULL;
}
@@ -712,9 +704,7 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
if (cops->destroy_compress_ctx)
cops->destroy_compress_ctx(cc);
- page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
- cc->cpages = new_cpages;
- cc->nr_cpages = new_nr_cpages;
+ cc->valid_nr_cpages = new_nr_cpages;
trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
cc->clen, ret);
@@ -1296,7 +1286,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
psize = (loff_t)(cc->rpages[last_index]->index + 1) << PAGE_SHIFT;
- err = f2fs_get_node_info(fio.sbi, dn.nid, &ni);
+ err = f2fs_get_node_info(fio.sbi, dn.nid, &ni, false);
if (err)
goto out_put_dnode;
@@ -1308,14 +1298,14 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
cic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
cic->inode = inode;
- atomic_set(&cic->pending_pages, cc->nr_cpages);
+ atomic_set(&cic->pending_pages, cc->valid_nr_cpages);
cic->rpages = page_array_alloc(cc->inode, cc->cluster_size);
if (!cic->rpages)
goto out_put_cic;
cic->nr_rpages = cc->cluster_size;
- for (i = 0; i < cc->nr_cpages; i++) {
+ for (i = 0; i < cc->valid_nr_cpages; i++) {
f2fs_set_compressed_page(cc->cpages[i], inode,
cc->rpages[i + 1]->index, cic);
fio.compressed_page = cc->cpages[i];
@@ -1360,7 +1350,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
if (fio.compr_blocks && __is_valid_data_blkaddr(blkaddr))
fio.compr_blocks++;
- if (i > cc->nr_cpages) {
+ if (i > cc->valid_nr_cpages) {
if (__is_valid_data_blkaddr(blkaddr)) {
f2fs_invalidate_blocks(sbi, blkaddr);
f2fs_update_data_blkaddr(&dn, NEW_ADDR);
@@ -1385,8 +1375,8 @@ unlock_continue:
if (fio.compr_blocks)
f2fs_i_compr_blocks_update(inode, fio.compr_blocks - 1, false);
- f2fs_i_compr_blocks_update(inode, cc->nr_cpages, true);
- add_compr_block_stat(inode, cc->nr_cpages);
+ f2fs_i_compr_blocks_update(inode, cc->valid_nr_cpages, true);
+ add_compr_block_stat(inode, cc->valid_nr_cpages);
set_inode_flag(cc->inode, FI_APPEND_WRITE);
if (cc->cluster_idx == 0)
@@ -1424,9 +1414,7 @@ out_unlock_op:
else
f2fs_unlock_op(sbi);
out_free:
- for (i = 0; i < cc->nr_cpages; i++) {
- if (!cc->cpages[i])
- continue;
+ for (i = 0; i < cc->valid_nr_cpages; i++) {
f2fs_compress_free_page(cc->cpages[i]);
cc->cpages[i] = NULL;
}
@@ -1468,25 +1456,38 @@ static int f2fs_write_raw_pages(struct compress_ctx *cc,
enum iostat_type io_type)
{
struct address_space *mapping = cc->inode->i_mapping;
- int _submitted, compr_blocks, ret;
- int i = -1, err = 0;
+ int _submitted, compr_blocks, ret, i;
compr_blocks = f2fs_compressed_blocks(cc);
- if (compr_blocks < 0) {
- err = compr_blocks;
- goto out_err;
+
+ for (i = 0; i < cc->cluster_size; i++) {
+ if (!cc->rpages[i])
+ continue;
+
+ redirty_page_for_writepage(wbc, cc->rpages[i]);
+ unlock_page(cc->rpages[i]);
}
+ if (compr_blocks < 0)
+ return compr_blocks;
+
for (i = 0; i < cc->cluster_size; i++) {
if (!cc->rpages[i])
continue;
retry_write:
+ lock_page(cc->rpages[i]);
+
if (cc->rpages[i]->mapping != mapping) {
+continue_unlock:
unlock_page(cc->rpages[i]);
continue;
}
- BUG_ON(!PageLocked(cc->rpages[i]));
+ if (!PageDirty(cc->rpages[i]))
+ goto continue_unlock;
+
+ if (!clear_page_dirty_for_io(cc->rpages[i]))
+ goto continue_unlock;
ret = f2fs_write_single_data_page(cc->rpages[i], &_submitted,
NULL, NULL, wbc, io_type,
@@ -1501,26 +1502,15 @@ retry_write:
* avoid deadlock caused by cluster update race
* from foreground operation.
*/
- if (IS_NOQUOTA(cc->inode)) {
- err = 0;
- goto out_err;
- }
+ if (IS_NOQUOTA(cc->inode))
+ return 0;
ret = 0;
cond_resched();
congestion_wait(BLK_RW_ASYNC,
DEFAULT_IO_TIMEOUT);
- lock_page(cc->rpages[i]);
-
- if (!PageDirty(cc->rpages[i])) {
- unlock_page(cc->rpages[i]);
- continue;
- }
-
- clear_page_dirty_for_io(cc->rpages[i]);
goto retry_write;
}
- err = ret;
- goto out_err;
+ return ret;
}
*submitted += _submitted;
@@ -1529,14 +1519,6 @@ retry_write:
f2fs_balance_fs(F2FS_M_SB(mapping), true);
return 0;
-out_err:
- for (++i; i < cc->cluster_size; i++) {
- if (!cc->rpages[i])
- continue;
- redirty_page_for_writepage(wbc, cc->rpages[i]);
- unlock_page(cc->rpages[i]);
- }
- return err;
}
int f2fs_write_multi_pages(struct compress_ctx *cc,
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 9f754aaef558..8c417864c66a 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -8,9 +8,9 @@
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
#include <linux/buffer_head.h>
+#include <linux/sched/mm.h>
#include <linux/mpage.h>
#include <linux/writeback.h>
-#include <linux/backing-dev.h>
#include <linux/pagevec.h>
#include <linux/blkdev.h>
#include <linux/bio.h>
@@ -18,9 +18,9 @@
#include <linux/swap.h>
#include <linux/prefetch.h>
#include <linux/uio.h>
-#include <linux/cleancache.h>
#include <linux/sched/signal.h>
#include <linux/fiemap.h>
+#include <linux/iomap.h>
#include "f2fs.h"
#include "node.h"
@@ -1354,7 +1354,7 @@ static int __allocate_data_block(struct dnode_of_data *dn, int seg_type)
if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
return -EPERM;
- err = f2fs_get_node_info(sbi, dn->nid, &ni);
+ err = f2fs_get_node_info(sbi, dn->nid, &ni, false);
if (err)
return err;
@@ -1376,61 +1376,9 @@ alloc:
f2fs_invalidate_compress_page(sbi, old_blkaddr);
}
f2fs_update_data_blkaddr(dn, dn->data_blkaddr);
-
- /*
- * i_size will be updated by direct_IO. Otherwise, we'll get stale
- * data from unwritten block via dio_read.
- */
return 0;
}
-int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from)
-{
- struct inode *inode = file_inode(iocb->ki_filp);
- struct f2fs_map_blocks map;
- int flag;
- int err = 0;
- bool direct_io = iocb->ki_flags & IOCB_DIRECT;
-
- map.m_lblk = F2FS_BLK_ALIGN(iocb->ki_pos);
- map.m_len = F2FS_BYTES_TO_BLK(iocb->ki_pos + iov_iter_count(from));
- if (map.m_len > map.m_lblk)
- map.m_len -= map.m_lblk;
- else
- map.m_len = 0;
-
- map.m_next_pgofs = NULL;
- map.m_next_extent = NULL;
- map.m_seg_type = NO_CHECK_TYPE;
- map.m_may_create = true;
-
- if (direct_io) {
- map.m_seg_type = f2fs_rw_hint_to_seg_type(iocb->ki_hint);
- flag = f2fs_force_buffered_io(inode, iocb, from) ?
- F2FS_GET_BLOCK_PRE_AIO :
- F2FS_GET_BLOCK_PRE_DIO;
- goto map_blocks;
- }
- if (iocb->ki_pos + iov_iter_count(from) > MAX_INLINE_DATA(inode)) {
- err = f2fs_convert_inline_inode(inode);
- if (err)
- return err;
- }
- if (f2fs_has_inline_data(inode))
- return err;
-
- flag = F2FS_GET_BLOCK_PRE_AIO;
-
-map_blocks:
- err = f2fs_map_blocks(inode, &map, 1, flag);
- if (map.m_len > 0 && err == -ENOSPC) {
- if (!direct_io)
- set_inode_flag(inode, FI_NO_PREALLOC);
- err = 0;
- }
- return err;
-}
-
void f2fs_do_map_lock(struct f2fs_sb_info *sbi, int flag, bool lock)
{
if (flag == F2FS_GET_BLOCK_PRE_AIO) {
@@ -1590,8 +1538,11 @@ next_block:
flag != F2FS_GET_BLOCK_DIO);
err = __allocate_data_block(&dn,
map->m_seg_type);
- if (!err)
+ if (!err) {
+ if (flag == F2FS_GET_BLOCK_PRE_DIO)
+ file_need_truncate(inode);
set_inode_flag(inode, FI_APPEND_WRITE);
+ }
}
if (err)
goto sync_out;
@@ -1786,50 +1737,6 @@ static inline u64 blks_to_bytes(struct inode *inode, u64 blks)
return (blks << inode->i_blkbits);
}
-static int __get_data_block(struct inode *inode, sector_t iblock,
- struct buffer_head *bh, int create, int flag,
- pgoff_t *next_pgofs, int seg_type, bool may_write)
-{
- struct f2fs_map_blocks map;
- int err;
-
- map.m_lblk = iblock;
- map.m_len = bytes_to_blks(inode, bh->b_size);
- map.m_next_pgofs = next_pgofs;
- map.m_next_extent = NULL;
- map.m_seg_type = seg_type;
- map.m_may_create = may_write;
-
- err = f2fs_map_blocks(inode, &map, create, flag);
- if (!err) {
- map_bh(bh, inode->i_sb, map.m_pblk);
- bh->b_state = (bh->b_state & ~F2FS_MAP_FLAGS) | map.m_flags;
- bh->b_size = blks_to_bytes(inode, map.m_len);
-
- if (map.m_multidev_dio)
- bh->b_bdev = map.m_bdev;
- }
- return err;
-}
-
-static int get_data_block_dio_write(struct inode *inode, sector_t iblock,
- struct buffer_head *bh_result, int create)
-{
- return __get_data_block(inode, iblock, bh_result, create,
- F2FS_GET_BLOCK_DIO, NULL,
- f2fs_rw_hint_to_seg_type(inode->i_write_hint),
- true);
-}
-
-static int get_data_block_dio(struct inode *inode, sector_t iblock,
- struct buffer_head *bh_result, int create)
-{
- return __get_data_block(inode, iblock, bh_result, create,
- F2FS_GET_BLOCK_DIO, NULL,
- f2fs_rw_hint_to_seg_type(inode->i_write_hint),
- false);
-}
-
static int f2fs_xattr_fiemap(struct inode *inode,
struct fiemap_extent_info *fieinfo)
{
@@ -1849,7 +1756,7 @@ static int f2fs_xattr_fiemap(struct inode *inode,
if (!page)
return -ENOMEM;
- err = f2fs_get_node_info(sbi, inode->i_ino, &ni);
+ err = f2fs_get_node_info(sbi, inode->i_ino, &ni, false);
if (err) {
f2fs_put_page(page, 1);
return err;
@@ -1881,7 +1788,7 @@ static int f2fs_xattr_fiemap(struct inode *inode,
if (!page)
return -ENOMEM;
- err = f2fs_get_node_info(sbi, xnid, &ni);
+ err = f2fs_get_node_info(sbi, xnid, &ni, false);
if (err) {
f2fs_put_page(page, 1);
return err;
@@ -2127,12 +2034,6 @@ got_it:
block_nr = map->m_pblk + block_in_file - map->m_lblk;
SetPageMappedToDisk(page);
- if (!PageUptodate(page) && (!PageSwapCache(page) &&
- !cleancache_get_page(page))) {
- SetPageUptodate(page);
- goto confused;
- }
-
if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), block_nr,
DATA_GENERIC_ENHANCE_READ)) {
ret = -EFSCORRUPTED;
@@ -2188,12 +2089,6 @@ submit_and_realloc:
ClearPageError(page);
*last_block_in_bio = block_nr;
goto out;
-confused:
- if (bio) {
- __submit_bio(F2FS_I_SB(inode), bio, DATA);
- bio = NULL;
- }
- unlock_page(page);
out:
*bio_ret = bio;
return ret;
@@ -2542,7 +2437,7 @@ retry_encrypt:
/* flush pending IOs and wait for a while in the ENOMEM case */
if (PTR_ERR(fio->encrypted_page) == -ENOMEM) {
f2fs_flush_merged_writes(fio->sbi);
- congestion_wait(BLK_RW_ASYNC, DEFAULT_IO_TIMEOUT);
+ memalloc_retry_wait(GFP_NOFS);
gfp_flags |= __GFP_NOFAIL;
goto retry_encrypt;
}
@@ -2617,6 +2512,11 @@ bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ /* The below cases were checked when setting it. */
+ if (f2fs_is_pinned_file(inode))
+ return false;
+ if (fio && is_sbi_flag_set(sbi, SBI_NEED_FSCK))
+ return true;
if (f2fs_lfs_mode(sbi))
return true;
if (S_ISDIR(inode->i_mode))
@@ -2625,8 +2525,6 @@ bool f2fs_should_update_outplace(struct inode *inode, struct f2fs_io_info *fio)
return true;
if (f2fs_is_atomic_file(inode))
return true;
- if (is_sbi_flag_set(sbi, SBI_NEED_FSCK))
- return true;
/* swap file is migrating in aligned write mode */
if (is_inode_flag_set(inode, FI_ALIGNED_WRITE))
@@ -2738,7 +2636,7 @@ got_it:
fio->need_lock = LOCK_REQ;
}
- err = f2fs_get_node_info(fio->sbi, dn.nid, &ni);
+ err = f2fs_get_node_info(fio->sbi, dn.nid, &ni, false);
if (err)
goto out_writepage;
@@ -2987,6 +2885,7 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
.rpages = NULL,
.nr_rpages = 0,
.cpages = NULL,
+ .valid_nr_cpages = 0,
.rbuf = NULL,
.cbuf = NULL,
.rlen = PAGE_SIZE * F2FS_I(inode)->i_cluster_size,
@@ -3305,7 +3204,7 @@ static int f2fs_write_data_pages(struct address_space *mapping,
FS_CP_DATA_IO : FS_DATA_IO);
}
-static void f2fs_write_failed(struct inode *inode, loff_t to)
+void f2fs_write_failed(struct inode *inode, loff_t to)
{
loff_t i_size = i_size_read(inode);
@@ -3339,12 +3238,10 @@ static int prepare_write_begin(struct f2fs_sb_info *sbi,
int flag;
/*
- * we already allocated all the blocks, so we don't need to get
- * the block addresses when there is no need to fill the page.
+ * If a whole page is being written and we already preallocated all the
+ * blocks, then there is no need to get a block address now.
*/
- if (!f2fs_has_inline_data(inode) && len == PAGE_SIZE &&
- !is_inode_flag_set(inode, FI_NO_PREALLOC) &&
- !f2fs_verity_in_progress(inode))
+ if (len == PAGE_SIZE && is_inode_flag_set(inode, FI_PREALLOCATED_ALL))
return 0;
/* f2fs_lock_op avoids race between write CP and convert_inline_page */
@@ -3595,158 +3492,6 @@ unlock_out:
return copied;
}
-static int check_direct_IO(struct inode *inode, struct iov_iter *iter,
- loff_t offset)
-{
- unsigned i_blkbits = READ_ONCE(inode->i_blkbits);
- unsigned blkbits = i_blkbits;
- unsigned blocksize_mask = (1 << blkbits) - 1;
- unsigned long align = offset | iov_iter_alignment(iter);
- struct block_device *bdev = inode->i_sb->s_bdev;
-
- if (iov_iter_rw(iter) == READ && offset >= i_size_read(inode))
- return 1;
-
- if (align & blocksize_mask) {
- if (bdev)
- blkbits = blksize_bits(bdev_logical_block_size(bdev));
- blocksize_mask = (1 << blkbits) - 1;
- if (align & blocksize_mask)
- return -EINVAL;
- return 1;
- }
- return 0;
-}
-
-static void f2fs_dio_end_io(struct bio *bio)
-{
- struct f2fs_private_dio *dio = bio->bi_private;
-
- dec_page_count(F2FS_I_SB(dio->inode),
- dio->write ? F2FS_DIO_WRITE : F2FS_DIO_READ);
-
- bio->bi_private = dio->orig_private;
- bio->bi_end_io = dio->orig_end_io;
-
- kfree(dio);
-
- bio_endio(bio);
-}
-
-static void f2fs_dio_submit_bio(struct bio *bio, struct inode *inode,
- loff_t file_offset)
-{
- struct f2fs_private_dio *dio;
- bool write = (bio_op(bio) == REQ_OP_WRITE);
-
- dio = f2fs_kzalloc(F2FS_I_SB(inode),
- sizeof(struct f2fs_private_dio), GFP_NOFS);
- if (!dio)
- goto out;
-
- dio->inode = inode;
- dio->orig_end_io = bio->bi_end_io;
- dio->orig_private = bio->bi_private;
- dio->write = write;
-
- bio->bi_end_io = f2fs_dio_end_io;
- bio->bi_private = dio;
-
- inc_page_count(F2FS_I_SB(inode),
- write ? F2FS_DIO_WRITE : F2FS_DIO_READ);
-
- submit_bio(bio);
- return;
-out:
- bio->bi_status = BLK_STS_IOERR;
- bio_endio(bio);
-}
-
-static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
-{
- struct address_space *mapping = iocb->ki_filp->f_mapping;
- struct inode *inode = mapping->host;
- struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
- struct f2fs_inode_info *fi = F2FS_I(inode);
- size_t count = iov_iter_count(iter);
- loff_t offset = iocb->ki_pos;
- int rw = iov_iter_rw(iter);
- int err;
- enum rw_hint hint = iocb->ki_hint;
- int whint_mode = F2FS_OPTION(sbi).whint_mode;
- bool do_opu;
-
- err = check_direct_IO(inode, iter, offset);
- if (err)
- return err < 0 ? err : 0;
-
- if (f2fs_force_buffered_io(inode, iocb, iter))
- return 0;
-
- do_opu = rw == WRITE && f2fs_lfs_mode(sbi);
-
- trace_f2fs_direct_IO_enter(inode, offset, count, rw);
-
- if (rw == WRITE && whint_mode == WHINT_MODE_OFF)
- iocb->ki_hint = WRITE_LIFE_NOT_SET;
-
- if (iocb->ki_flags & IOCB_NOWAIT) {
- if (!down_read_trylock(&fi->i_gc_rwsem[rw])) {
- iocb->ki_hint = hint;
- err = -EAGAIN;
- goto out;
- }
- if (do_opu && !down_read_trylock(&fi->i_gc_rwsem[READ])) {
- up_read(&fi->i_gc_rwsem[rw]);
- iocb->ki_hint = hint;
- err = -EAGAIN;
- goto out;
- }
- } else {
- down_read(&fi->i_gc_rwsem[rw]);
- if (do_opu)
- down_read(&fi->i_gc_rwsem[READ]);
- }
-
- err = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev,
- iter, rw == WRITE ? get_data_block_dio_write :
- get_data_block_dio, NULL, f2fs_dio_submit_bio,
- rw == WRITE ? DIO_LOCKING | DIO_SKIP_HOLES :
- DIO_SKIP_HOLES);
-
- if (do_opu)
- up_read(&fi->i_gc_rwsem[READ]);
-
- up_read(&fi->i_gc_rwsem[rw]);
-
- if (rw == WRITE) {
- if (whint_mode == WHINT_MODE_OFF)
- iocb->ki_hint = hint;
- if (err > 0) {
- f2fs_update_iostat(F2FS_I_SB(inode), APP_DIRECT_IO,
- err);
- if (!do_opu)
- set_inode_flag(inode, FI_UPDATE_WRITE);
- } else if (err == -EIOCBQUEUED) {
- f2fs_update_iostat(F2FS_I_SB(inode), APP_DIRECT_IO,
- count - iov_iter_count(iter));
- } else if (err < 0) {
- f2fs_write_failed(inode, offset + count);
- }
- } else {
- if (err > 0)
- f2fs_update_iostat(sbi, APP_DIRECT_READ_IO, err);
- else if (err == -EIOCBQUEUED)
- f2fs_update_iostat(F2FS_I_SB(inode), APP_DIRECT_READ_IO,
- count - iov_iter_count(iter));
- }
-
-out:
- trace_f2fs_direct_IO_exit(inode, offset, count, rw, err);
-
- return err;
-}
-
void f2fs_invalidate_page(struct page *page, unsigned int offset,
unsigned int length)
{
@@ -3770,12 +3515,9 @@ void f2fs_invalidate_page(struct page *page, unsigned int offset,
clear_page_private_gcing(page);
- if (test_opt(sbi, COMPRESS_CACHE)) {
- if (f2fs_compressed_file(inode))
- f2fs_invalidate_compress_pages(sbi, inode->i_ino);
- if (inode->i_ino == F2FS_COMPRESS_INO(sbi))
- clear_page_private_data(page);
- }
+ if (test_opt(sbi, COMPRESS_CACHE) &&
+ inode->i_ino == F2FS_COMPRESS_INO(sbi))
+ clear_page_private_data(page);
if (page_private_atomic(page))
return f2fs_drop_inmem_page(inode, page);
@@ -3795,12 +3537,9 @@ int f2fs_release_page(struct page *page, gfp_t wait)
return 0;
if (test_opt(F2FS_P_SB(page), COMPRESS_CACHE)) {
- struct f2fs_sb_info *sbi = F2FS_P_SB(page);
struct inode *inode = page->mapping->host;
- if (f2fs_compressed_file(inode))
- f2fs_invalidate_compress_pages(sbi, inode->i_ino);
- if (inode->i_ino == F2FS_COMPRESS_INO(sbi))
+ if (inode->i_ino == F2FS_COMPRESS_INO(F2FS_I_SB(inode)))
clear_page_private_data(page);
}
@@ -4202,7 +3941,7 @@ const struct address_space_operations f2fs_dblock_aops = {
.set_page_dirty = f2fs_set_data_page_dirty,
.invalidatepage = f2fs_invalidate_page,
.releasepage = f2fs_release_page,
- .direct_IO = f2fs_direct_IO,
+ .direct_IO = noop_direct_IO,
.bmap = f2fs_bmap,
.swap_activate = f2fs_swap_activate,
.swap_deactivate = f2fs_swap_deactivate,
@@ -4282,3 +4021,58 @@ void f2fs_destroy_bio_entry_cache(void)
{
kmem_cache_destroy(bio_entry_slab);
}
+
+static int f2fs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
+ unsigned int flags, struct iomap *iomap,
+ struct iomap *srcmap)
+{
+ struct f2fs_map_blocks map = {};
+ pgoff_t next_pgofs = 0;
+ int err;
+
+ map.m_lblk = bytes_to_blks(inode, offset);
+ map.m_len = bytes_to_blks(inode, offset + length - 1) - map.m_lblk + 1;
+ map.m_next_pgofs = &next_pgofs;
+ map.m_seg_type = f2fs_rw_hint_to_seg_type(inode->i_write_hint);
+ if (flags & IOMAP_WRITE)
+ map.m_may_create = true;
+
+ err = f2fs_map_blocks(inode, &map, flags & IOMAP_WRITE,
+ F2FS_GET_BLOCK_DIO);
+ if (err)
+ return err;
+
+ iomap->offset = blks_to_bytes(inode, map.m_lblk);
+
+ if (map.m_flags & (F2FS_MAP_MAPPED | F2FS_MAP_UNWRITTEN)) {
+ iomap->length = blks_to_bytes(inode, map.m_len);
+ if (map.m_flags & F2FS_MAP_MAPPED) {
+ iomap->type = IOMAP_MAPPED;
+ iomap->flags |= IOMAP_F_MERGED;
+ } else {
+ iomap->type = IOMAP_UNWRITTEN;
+ }
+ if (WARN_ON_ONCE(!__is_valid_data_blkaddr(map.m_pblk)))
+ return -EINVAL;
+
+ iomap->bdev = map.m_bdev;
+ iomap->addr = blks_to_bytes(inode, map.m_pblk);
+ } else {
+ iomap->length = blks_to_bytes(inode, next_pgofs) -
+ iomap->offset;
+ iomap->type = IOMAP_HOLE;
+ iomap->addr = IOMAP_NULL_ADDR;
+ }
+
+ if (map.m_flags & F2FS_MAP_NEW)
+ iomap->flags |= IOMAP_F_NEW;
+ if ((inode->i_state & I_DIRTY_DATASYNC) ||
+ offset + length > i_size_read(inode))
+ iomap->flags |= IOMAP_F_DIRTY;
+
+ return 0;
+}
+
+const struct iomap_ops f2fs_iomap_ops = {
+ .iomap_begin = f2fs_iomap_begin,
+};
diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
index 1820e9c106f7..166f08623362 100644
--- a/fs/f2fs/dir.c
+++ b/fs/f2fs/dir.c
@@ -16,7 +16,7 @@
#include "xattr.h"
#include <trace/events/f2fs.h>
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
extern struct kmem_cache *f2fs_cf_name_slab;
#endif
@@ -79,7 +79,7 @@ unsigned char f2fs_get_de_type(struct f2fs_dir_entry *de)
int f2fs_init_casefolded_name(const struct inode *dir,
struct f2fs_filename *fname)
{
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
struct super_block *sb = dir->i_sb;
if (IS_CASEFOLDED(dir)) {
@@ -174,7 +174,7 @@ void f2fs_free_filename(struct f2fs_filename *fname)
kfree(fname->crypto_buf.name);
fname->crypto_buf.name = NULL;
#endif
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
if (fname->cf_name.name) {
kmem_cache_free(f2fs_cf_name_slab, fname->cf_name.name);
fname->cf_name.name = NULL;
@@ -208,7 +208,7 @@ static struct f2fs_dir_entry *find_in_block(struct inode *dir,
return f2fs_find_target_dentry(&d, fname, max_slots);
}
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
/*
* Test whether a case-insensitive directory entry matches the filename
* being searched for.
@@ -266,7 +266,7 @@ static inline int f2fs_match_name(const struct inode *dir,
{
struct fscrypt_name f;
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
if (fname->cf_name.name) {
struct qstr cf = FSTR_TO_QSTR(&fname->cf_name);
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index ce9fc9f13000..68b44015514f 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -28,6 +28,8 @@
#include <linux/fscrypt.h>
#include <linux/fsverity.h>
+struct pagevec;
+
#ifdef CONFIG_F2FS_CHECK_FS
#define f2fs_bug_on(sbi, condition) BUG_ON(condition)
#else
@@ -56,6 +58,7 @@ enum {
FAULT_WRITE_IO,
FAULT_SLAB_ALLOC,
FAULT_DQUOT_INIT,
+ FAULT_LOCK_OP,
FAULT_MAX,
};
@@ -485,7 +488,7 @@ struct f2fs_filename {
*/
struct fscrypt_str crypto_buf;
#endif
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
/*
* For casefolded directories: the casefolded name, but it's left NULL
* if the original name is not valid Unicode, if the directory is both
@@ -654,6 +657,7 @@ enum {
#define FADVISE_KEEP_SIZE_BIT 0x10
#define FADVISE_HOT_BIT 0x20
#define FADVISE_VERITY_BIT 0x40
+#define FADVISE_TRUNC_BIT 0x80
#define FADVISE_MODIFIABLE_BITS (FADVISE_COLD_BIT | FADVISE_HOT_BIT)
@@ -681,6 +685,10 @@ enum {
#define file_is_verity(inode) is_file(inode, FADVISE_VERITY_BIT)
#define file_set_verity(inode) set_file(inode, FADVISE_VERITY_BIT)
+#define file_should_truncate(inode) is_file(inode, FADVISE_TRUNC_BIT)
+#define file_need_truncate(inode) set_file(inode, FADVISE_TRUNC_BIT)
+#define file_dont_truncate(inode) clear_file(inode, FADVISE_TRUNC_BIT)
+
#define DEF_DIR_LEVEL 0
enum {
@@ -715,7 +723,7 @@ enum {
FI_INLINE_DOTS, /* indicate inline dot dentries */
FI_DO_DEFRAG, /* indicate defragment is running */
FI_DIRTY_FILE, /* indicate regular/symlink has dirty pages */
- FI_NO_PREALLOC, /* indicate skipped preallocated blocks */
+ FI_PREALLOCATED_ALL, /* all blocks for write were preallocated */
FI_HOT_DATA, /* indicate file is hot */
FI_EXTRA_ATTR, /* indicate file has extra attribute */
FI_PROJ_INHERIT, /* indicate file inherits projectid */
@@ -1018,6 +1026,7 @@ struct f2fs_sm_info {
unsigned int segment_count; /* total # of segments */
unsigned int main_segments; /* # of segments in main area */
unsigned int reserved_segments; /* # of reserved segments */
+ unsigned int additional_reserved_segments;/* reserved segs for IO align feature */
unsigned int ovp_segments; /* # of overprovision segments */
/* a threshold to reclaim prefree segments */
@@ -1486,6 +1495,7 @@ struct compress_ctx {
unsigned int nr_rpages; /* total page number in rpages */
struct page **cpages; /* pages store compressed data in cluster */
unsigned int nr_cpages; /* total page number in cpages */
+ unsigned int valid_nr_cpages; /* valid page number in cpages */
void *rbuf; /* virtual mapped address on rpages */
struct compress_data *cbuf; /* virtual mapped address on cpages */
size_t rlen; /* valid data length in rbuf */
@@ -1677,6 +1687,9 @@ struct f2fs_sb_info {
unsigned int cur_victim_sec; /* current victim section num */
unsigned int gc_mode; /* current GC state */
unsigned int next_victim_seg[2]; /* next segment in victim section */
+ spinlock_t gc_urgent_high_lock;
+ bool gc_urgent_high_limited; /* indicates having limited trial count */
+ unsigned int gc_urgent_high_remaining; /* remaining trial count for GC_URGENT_HIGH */
/* for skip statistic */
unsigned int atomic_files; /* # of opened atomic file */
@@ -1801,13 +1814,6 @@ struct f2fs_sb_info {
#endif
};
-struct f2fs_private_dio {
- struct inode *inode;
- void *orig_private;
- bio_end_io_t *orig_end_io;
- bool write;
-};
-
#ifdef CONFIG_F2FS_FAULT_INJECTION
#define f2fs_show_injection_info(sbi, type) \
printk_ratelimited("%sF2FS-fs (%s) : inject %s in %s of %pS\n", \
@@ -2093,6 +2099,10 @@ static inline void f2fs_lock_op(struct f2fs_sb_info *sbi)
static inline int f2fs_trylock_op(struct f2fs_sb_info *sbi)
{
+ if (time_to_inject(sbi, FAULT_LOCK_OP)) {
+ f2fs_show_injection_info(sbi, FAULT_LOCK_OP);
+ return 0;
+ }
return down_read_trylock(&sbi->cp_rwsem);
}
@@ -2198,6 +2208,11 @@ static inline int inc_valid_block_count(struct f2fs_sb_info *sbi,
if (!__allow_reserved_blocks(sbi, inode, true))
avail_user_block_count -= F2FS_OPTION(sbi).root_reserved_blocks;
+
+ if (F2FS_IO_ALIGNED(sbi))
+ avail_user_block_count -= sbi->blocks_per_seg *
+ SM_I(sbi)->additional_reserved_segments;
+
if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
if (avail_user_block_count > sbi->unusable_block_count)
avail_user_block_count -= sbi->unusable_block_count;
@@ -2444,6 +2459,11 @@ static inline int inc_valid_node_count(struct f2fs_sb_info *sbi,
if (!__allow_reserved_blocks(sbi, inode, false))
valid_block_count += F2FS_OPTION(sbi).root_reserved_blocks;
+
+ if (F2FS_IO_ALIGNED(sbi))
+ valid_block_count += sbi->blocks_per_seg *
+ SM_I(sbi)->additional_reserved_segments;
+
user_block_count = sbi->user_block_count;
if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
user_block_count -= sbi->unusable_block_count;
@@ -3116,12 +3136,16 @@ static inline int is_file(struct inode *inode, int type)
static inline void set_file(struct inode *inode, int type)
{
+ if (is_file(inode, type))
+ return;
F2FS_I(inode)->i_advise |= type;
f2fs_mark_inode_dirty_sync(inode, true);
}
static inline void clear_file(struct inode *inode, int type)
{
+ if (!is_file(inode, type))
+ return;
F2FS_I(inode)->i_advise &= ~type;
f2fs_mark_inode_dirty_sync(inode, true);
}
@@ -3406,7 +3430,7 @@ int f2fs_need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid);
bool f2fs_is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid);
bool f2fs_need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino);
int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid,
- struct node_info *ni);
+ struct node_info *ni, bool checkpoint_context);
pgoff_t f2fs_get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs);
int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode);
int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from);
@@ -3614,7 +3638,6 @@ void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr);
int f2fs_reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count);
int f2fs_reserve_new_block(struct dnode_of_data *dn);
int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index);
-int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from);
int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index);
struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index,
int op_flags, bool for_write);
@@ -3637,6 +3660,7 @@ int f2fs_write_single_data_page(struct page *page, int *submitted,
struct writeback_control *wbc,
enum iostat_type io_type,
int compr_blocks, bool allow_balance);
+void f2fs_write_failed(struct inode *inode, loff_t to);
void f2fs_invalidate_page(struct page *page, unsigned int offset,
unsigned int length);
int f2fs_release_page(struct page *page, gfp_t wait);
@@ -3650,6 +3674,7 @@ int f2fs_init_post_read_processing(void);
void f2fs_destroy_post_read_processing(void);
int f2fs_init_post_read_wq(struct f2fs_sb_info *sbi);
void f2fs_destroy_post_read_wq(struct f2fs_sb_info *sbi);
+extern const struct iomap_ops f2fs_iomap_ops;
/*
* gc.c
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index 92ec2699bc85..3c98ef6af97d 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -24,6 +24,7 @@
#include <linux/sched/signal.h>
#include <linux/fileattr.h>
#include <linux/fadvise.h>
+#include <linux/iomap.h>
#include "f2fs.h"
#include "node.h"
@@ -1232,7 +1233,7 @@ static int __clone_blkaddrs(struct inode *src_inode, struct inode *dst_inode,
if (ret)
return ret;
- ret = f2fs_get_node_info(sbi, dn.nid, &ni);
+ ret = f2fs_get_node_info(sbi, dn.nid, &ni, false);
if (ret) {
f2fs_put_dnode(&dn);
return ret;
@@ -1687,6 +1688,7 @@ next_alloc:
map.m_seg_type = CURSEG_COLD_DATA_PINNED;
err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_DIO);
+ file_dont_truncate(inode);
up_write(&sbi->pin_sem);
@@ -1748,7 +1750,11 @@ static long f2fs_fallocate(struct file *file, int mode,
(mode & (FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_INSERT_RANGE)))
return -EOPNOTSUPP;
- if (f2fs_compressed_file(inode) &&
+ /*
+ * Pinned file should not support partial trucation since the block
+ * can be used by applications.
+ */
+ if ((f2fs_compressed_file(inode) || f2fs_is_pinned_file(inode)) &&
(mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_COLLAPSE_RANGE |
FALLOC_FL_ZERO_RANGE | FALLOC_FL_INSERT_RANGE)))
return -EOPNOTSUPP;
@@ -3143,17 +3149,17 @@ static int f2fs_ioc_set_pin_file(struct file *filp, unsigned long arg)
inode_lock(inode);
- if (f2fs_should_update_outplace(inode, NULL)) {
- ret = -EINVAL;
- goto out;
- }
-
if (!pin) {
clear_inode_flag(inode, FI_PIN_FILE);
f2fs_i_gc_failures_write(inode, 0);
goto done;
}
+ if (f2fs_should_update_outplace(inode, NULL)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
if (f2fs_pin_file_control(inode, false)) {
ret = -EAGAIN;
goto out;
@@ -4218,27 +4224,385 @@ long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
return __f2fs_ioctl(filp, cmd, arg);
}
-static ssize_t f2fs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
+/*
+ * Return %true if the given read or write request should use direct I/O, or
+ * %false if it should use buffered I/O.
+ */
+static bool f2fs_should_use_dio(struct inode *inode, struct kiocb *iocb,
+ struct iov_iter *iter)
+{
+ unsigned int align;
+
+ if (!(iocb->ki_flags & IOCB_DIRECT))
+ return false;
+
+ if (f2fs_force_buffered_io(inode, iocb, iter))
+ return false;
+
+ /*
+ * Direct I/O not aligned to the disk's logical_block_size will be
+ * attempted, but will fail with -EINVAL.
+ *
+ * f2fs additionally requires that direct I/O be aligned to the
+ * filesystem block size, which is often a stricter requirement.
+ * However, f2fs traditionally falls back to buffered I/O on requests
+ * that are logical_block_size-aligned but not fs-block aligned.
+ *
+ * The below logic implements this behavior.
+ */
+ align = iocb->ki_pos | iov_iter_alignment(iter);
+ if (!IS_ALIGNED(align, i_blocksize(inode)) &&
+ IS_ALIGNED(align, bdev_logical_block_size(inode->i_sb->s_bdev)))
+ return false;
+
+ return true;
+}
+
+static int f2fs_dio_read_end_io(struct kiocb *iocb, ssize_t size, int error,
+ unsigned int flags)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(iocb->ki_filp));
+
+ dec_page_count(sbi, F2FS_DIO_READ);
+ if (error)
+ return error;
+ f2fs_update_iostat(sbi, APP_DIRECT_READ_IO, size);
+ return 0;
+}
+
+static const struct iomap_dio_ops f2fs_iomap_dio_read_ops = {
+ .end_io = f2fs_dio_read_end_io,
+};
+
+static ssize_t f2fs_dio_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
struct file *file = iocb->ki_filp;
struct inode *inode = file_inode(file);
- int ret;
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ struct f2fs_inode_info *fi = F2FS_I(inode);
+ const loff_t pos = iocb->ki_pos;
+ const size_t count = iov_iter_count(to);
+ struct iomap_dio *dio;
+ ssize_t ret;
+
+ if (count == 0)
+ return 0; /* skip atime update */
+
+ trace_f2fs_direct_IO_enter(inode, iocb, count, READ);
+
+ if (iocb->ki_flags & IOCB_NOWAIT) {
+ if (!down_read_trylock(&fi->i_gc_rwsem[READ])) {
+ ret = -EAGAIN;
+ goto out;
+ }
+ } else {
+ down_read(&fi->i_gc_rwsem[READ]);
+ }
+
+ /*
+ * We have to use __iomap_dio_rw() and iomap_dio_complete() instead of
+ * the higher-level function iomap_dio_rw() in order to ensure that the
+ * F2FS_DIO_READ counter will be decremented correctly in all cases.
+ */
+ inc_page_count(sbi, F2FS_DIO_READ);
+ dio = __iomap_dio_rw(iocb, to, &f2fs_iomap_ops,
+ &f2fs_iomap_dio_read_ops, 0, 0);
+ if (IS_ERR_OR_NULL(dio)) {
+ ret = PTR_ERR_OR_ZERO(dio);
+ if (ret != -EIOCBQUEUED)
+ dec_page_count(sbi, F2FS_DIO_READ);
+ } else {
+ ret = iomap_dio_complete(dio);
+ }
+
+ up_read(&fi->i_gc_rwsem[READ]);
+
+ file_accessed(file);
+out:
+ trace_f2fs_direct_IO_exit(inode, pos, count, READ, ret);
+ return ret;
+}
+
+static ssize_t f2fs_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
+{
+ struct inode *inode = file_inode(iocb->ki_filp);
+ ssize_t ret;
if (!f2fs_is_compress_backend_ready(inode))
return -EOPNOTSUPP;
- ret = generic_file_read_iter(iocb, iter);
+ if (f2fs_should_use_dio(inode, iocb, to))
+ return f2fs_dio_read_iter(iocb, to);
+ ret = filemap_read(iocb, to, 0);
if (ret > 0)
- f2fs_update_iostat(F2FS_I_SB(inode), APP_READ_IO, ret);
+ f2fs_update_iostat(F2FS_I_SB(inode), APP_BUFFERED_READ_IO, ret);
+ return ret;
+}
+
+static ssize_t f2fs_write_checks(struct kiocb *iocb, struct iov_iter *from)
+{
+ struct file *file = iocb->ki_filp;
+ struct inode *inode = file_inode(file);
+ ssize_t count;
+ int err;
+
+ if (IS_IMMUTABLE(inode))
+ return -EPERM;
+
+ if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED))
+ return -EPERM;
+
+ count = generic_write_checks(iocb, from);
+ if (count <= 0)
+ return count;
+
+ err = file_modified(file);
+ if (err)
+ return err;
+ return count;
+}
+
+/*
+ * Preallocate blocks for a write request, if it is possible and helpful to do
+ * so. Returns a positive number if blocks may have been preallocated, 0 if no
+ * blocks were preallocated, or a negative errno value if something went
+ * seriously wrong. Also sets FI_PREALLOCATED_ALL on the inode if *all* the
+ * requested blocks (not just some of them) have been allocated.
+ */
+static int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *iter,
+ bool dio)
+{
+ struct inode *inode = file_inode(iocb->ki_filp);
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ const loff_t pos = iocb->ki_pos;
+ const size_t count = iov_iter_count(iter);
+ struct f2fs_map_blocks map = {};
+ int flag;
+ int ret;
+
+ /* If it will be an out-of-place direct write, don't bother. */
+ if (dio && f2fs_lfs_mode(sbi))
+ return 0;
+ /*
+ * Don't preallocate holes aligned to DIO_SKIP_HOLES which turns into
+ * buffered IO, if DIO meets any holes.
+ */
+ if (dio && i_size_read(inode) &&
+ (F2FS_BYTES_TO_BLK(pos) < F2FS_BLK_ALIGN(i_size_read(inode))))
+ return 0;
+
+ /* No-wait I/O can't allocate blocks. */
+ if (iocb->ki_flags & IOCB_NOWAIT)
+ return 0;
+
+ /* If it will be a short write, don't bother. */
+ if (fault_in_iov_iter_readable(iter, count))
+ return 0;
+
+ if (f2fs_has_inline_data(inode)) {
+ /* If the data will fit inline, don't bother. */
+ if (pos + count <= MAX_INLINE_DATA(inode))
+ return 0;
+ ret = f2fs_convert_inline_inode(inode);
+ if (ret)
+ return ret;
+ }
+
+ /* Do not preallocate blocks that will be written partially in 4KB. */
+ map.m_lblk = F2FS_BLK_ALIGN(pos);
+ map.m_len = F2FS_BYTES_TO_BLK(pos + count);
+ if (map.m_len > map.m_lblk)
+ map.m_len -= map.m_lblk;
+ else
+ map.m_len = 0;
+ map.m_may_create = true;
+ if (dio) {
+ map.m_seg_type = f2fs_rw_hint_to_seg_type(inode->i_write_hint);
+ flag = F2FS_GET_BLOCK_PRE_DIO;
+ } else {
+ map.m_seg_type = NO_CHECK_TYPE;
+ flag = F2FS_GET_BLOCK_PRE_AIO;
+ }
+
+ ret = f2fs_map_blocks(inode, &map, 1, flag);
+ /* -ENOSPC|-EDQUOT are fine to report the number of allocated blocks. */
+ if (ret < 0 && !((ret == -ENOSPC || ret == -EDQUOT) && map.m_len > 0))
+ return ret;
+ if (ret == 0)
+ set_inode_flag(inode, FI_PREALLOCATED_ALL);
+ return map.m_len;
+}
+
+static ssize_t f2fs_buffered_write_iter(struct kiocb *iocb,
+ struct iov_iter *from)
+{
+ struct file *file = iocb->ki_filp;
+ struct inode *inode = file_inode(file);
+ ssize_t ret;
+ if (iocb->ki_flags & IOCB_NOWAIT)
+ return -EOPNOTSUPP;
+
+ current->backing_dev_info = inode_to_bdi(inode);
+ ret = generic_perform_write(file, from, iocb->ki_pos);
+ current->backing_dev_info = NULL;
+
+ if (ret > 0) {
+ iocb->ki_pos += ret;
+ f2fs_update_iostat(F2FS_I_SB(inode), APP_BUFFERED_IO, ret);
+ }
return ret;
}
-static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
+static int f2fs_dio_write_end_io(struct kiocb *iocb, ssize_t size, int error,
+ unsigned int flags)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(iocb->ki_filp));
+
+ dec_page_count(sbi, F2FS_DIO_WRITE);
+ if (error)
+ return error;
+ f2fs_update_iostat(sbi, APP_DIRECT_IO, size);
+ return 0;
+}
+
+static const struct iomap_dio_ops f2fs_iomap_dio_write_ops = {
+ .end_io = f2fs_dio_write_end_io,
+};
+
+static ssize_t f2fs_dio_write_iter(struct kiocb *iocb, struct iov_iter *from,
+ bool *may_need_sync)
{
struct file *file = iocb->ki_filp;
struct inode *inode = file_inode(file);
+ struct f2fs_inode_info *fi = F2FS_I(inode);
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ const bool do_opu = f2fs_lfs_mode(sbi);
+ const int whint_mode = F2FS_OPTION(sbi).whint_mode;
+ const loff_t pos = iocb->ki_pos;
+ const ssize_t count = iov_iter_count(from);
+ const enum rw_hint hint = iocb->ki_hint;
+ unsigned int dio_flags;
+ struct iomap_dio *dio;
+ ssize_t ret;
+
+ trace_f2fs_direct_IO_enter(inode, iocb, count, WRITE);
+
+ if (iocb->ki_flags & IOCB_NOWAIT) {
+ /* f2fs_convert_inline_inode() and block allocation can block */
+ if (f2fs_has_inline_data(inode) ||
+ !f2fs_overwrite_io(inode, pos, count)) {
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ if (!down_read_trylock(&fi->i_gc_rwsem[WRITE])) {
+ ret = -EAGAIN;
+ goto out;
+ }
+ if (do_opu && !down_read_trylock(&fi->i_gc_rwsem[READ])) {
+ up_read(&fi->i_gc_rwsem[WRITE]);
+ ret = -EAGAIN;
+ goto out;
+ }
+ } else {
+ ret = f2fs_convert_inline_inode(inode);
+ if (ret)
+ goto out;
+
+ down_read(&fi->i_gc_rwsem[WRITE]);
+ if (do_opu)
+ down_read(&fi->i_gc_rwsem[READ]);
+ }
+ if (whint_mode == WHINT_MODE_OFF)
+ iocb->ki_hint = WRITE_LIFE_NOT_SET;
+
+ /*
+ * We have to use __iomap_dio_rw() and iomap_dio_complete() instead of
+ * the higher-level function iomap_dio_rw() in order to ensure that the
+ * F2FS_DIO_WRITE counter will be decremented correctly in all cases.
+ */
+ inc_page_count(sbi, F2FS_DIO_WRITE);
+ dio_flags = 0;
+ if (pos + count > inode->i_size)
+ dio_flags |= IOMAP_DIO_FORCE_WAIT;
+ dio = __iomap_dio_rw(iocb, from, &f2fs_iomap_ops,
+ &f2fs_iomap_dio_write_ops, dio_flags, 0);
+ if (IS_ERR_OR_NULL(dio)) {
+ ret = PTR_ERR_OR_ZERO(dio);
+ if (ret == -ENOTBLK)
+ ret = 0;
+ if (ret != -EIOCBQUEUED)
+ dec_page_count(sbi, F2FS_DIO_WRITE);
+ } else {
+ ret = iomap_dio_complete(dio);
+ }
+
+ if (whint_mode == WHINT_MODE_OFF)
+ iocb->ki_hint = hint;
+ if (do_opu)
+ up_read(&fi->i_gc_rwsem[READ]);
+ up_read(&fi->i_gc_rwsem[WRITE]);
+
+ if (ret < 0)
+ goto out;
+ if (pos + ret > inode->i_size)
+ f2fs_i_size_write(inode, pos + ret);
+ if (!do_opu)
+ set_inode_flag(inode, FI_UPDATE_WRITE);
+
+ if (iov_iter_count(from)) {
+ ssize_t ret2;
+ loff_t bufio_start_pos = iocb->ki_pos;
+
+ /*
+ * The direct write was partial, so we need to fall back to a
+ * buffered write for the remainder.
+ */
+
+ ret2 = f2fs_buffered_write_iter(iocb, from);
+ if (iov_iter_count(from))
+ f2fs_write_failed(inode, iocb->ki_pos);
+ if (ret2 < 0)
+ goto out;
+
+ /*
+ * Ensure that the pagecache pages are written to disk and
+ * invalidated to preserve the expected O_DIRECT semantics.
+ */
+ if (ret2 > 0) {
+ loff_t bufio_end_pos = bufio_start_pos + ret2 - 1;
+
+ ret += ret2;
+
+ ret2 = filemap_write_and_wait_range(file->f_mapping,
+ bufio_start_pos,
+ bufio_end_pos);
+ if (ret2 < 0)
+ goto out;
+ invalidate_mapping_pages(file->f_mapping,
+ bufio_start_pos >> PAGE_SHIFT,
+ bufio_end_pos >> PAGE_SHIFT);
+ }
+ } else {
+ /* iomap_dio_rw() already handled the generic_write_sync(). */
+ *may_need_sync = false;
+ }
+out:
+ trace_f2fs_direct_IO_exit(inode, pos, count, WRITE, ret);
+ return ret;
+}
+
+static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
+{
+ struct inode *inode = file_inode(iocb->ki_filp);
+ const loff_t orig_pos = iocb->ki_pos;
+ const size_t orig_count = iov_iter_count(from);
+ loff_t target_size;
+ bool dio;
+ bool may_need_sync = true;
+ int preallocated;
ssize_t ret;
if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) {
@@ -4260,91 +4624,42 @@ static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
inode_lock(inode);
}
- if (unlikely(IS_IMMUTABLE(inode))) {
- ret = -EPERM;
- goto unlock;
- }
-
- if (is_inode_flag_set(inode, FI_COMPRESS_RELEASED)) {
- ret = -EPERM;
- goto unlock;
- }
-
- ret = generic_write_checks(iocb, from);
- if (ret > 0) {
- bool preallocated = false;
- size_t target_size = 0;
- int err;
-
- if (fault_in_iov_iter_readable(from, iov_iter_count(from)))
- set_inode_flag(inode, FI_NO_PREALLOC);
-
- if ((iocb->ki_flags & IOCB_NOWAIT)) {
- if (!f2fs_overwrite_io(inode, iocb->ki_pos,
- iov_iter_count(from)) ||
- f2fs_has_inline_data(inode) ||
- f2fs_force_buffered_io(inode, iocb, from)) {
- clear_inode_flag(inode, FI_NO_PREALLOC);
- inode_unlock(inode);
- ret = -EAGAIN;
- goto out;
- }
- goto write;
- }
-
- if (is_inode_flag_set(inode, FI_NO_PREALLOC))
- goto write;
-
- if (iocb->ki_flags & IOCB_DIRECT) {
- /*
- * Convert inline data for Direct I/O before entering
- * f2fs_direct_IO().
- */
- err = f2fs_convert_inline_inode(inode);
- if (err)
- goto out_err;
- /*
- * If force_buffere_io() is true, we have to allocate
- * blocks all the time, since f2fs_direct_IO will fall
- * back to buffered IO.
- */
- if (!f2fs_force_buffered_io(inode, iocb, from) &&
- f2fs_lfs_mode(F2FS_I_SB(inode)))
- goto write;
- }
- preallocated = true;
- target_size = iocb->ki_pos + iov_iter_count(from);
+ ret = f2fs_write_checks(iocb, from);
+ if (ret <= 0)
+ goto out_unlock;
- err = f2fs_preallocate_blocks(iocb, from);
- if (err) {
-out_err:
- clear_inode_flag(inode, FI_NO_PREALLOC);
- inode_unlock(inode);
- ret = err;
- goto out;
- }
-write:
- ret = __generic_file_write_iter(iocb, from);
- clear_inode_flag(inode, FI_NO_PREALLOC);
+ /* Determine whether we will do a direct write or a buffered write. */
+ dio = f2fs_should_use_dio(inode, iocb, from);
- /* if we couldn't write data, we should deallocate blocks. */
- if (preallocated && i_size_read(inode) < target_size) {
- down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
- filemap_invalidate_lock(inode->i_mapping);
- f2fs_truncate(inode);
- filemap_invalidate_unlock(inode->i_mapping);
- up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
- }
+ /* Possibly preallocate the blocks for the write. */
+ target_size = iocb->ki_pos + iov_iter_count(from);
+ preallocated = f2fs_preallocate_blocks(iocb, from, dio);
+ if (preallocated < 0)
+ ret = preallocated;
+ else
+ /* Do the actual write. */
+ ret = dio ?
+ f2fs_dio_write_iter(iocb, from, &may_need_sync):
+ f2fs_buffered_write_iter(iocb, from);
- if (ret > 0)
- f2fs_update_iostat(F2FS_I_SB(inode), APP_WRITE_IO, ret);
+ /* Don't leave any preallocated blocks around past i_size. */
+ if (preallocated && i_size_read(inode) < target_size) {
+ down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ filemap_invalidate_lock(inode->i_mapping);
+ if (!f2fs_truncate(inode))
+ file_dont_truncate(inode);
+ filemap_invalidate_unlock(inode->i_mapping);
+ up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ } else {
+ file_dont_truncate(inode);
}
-unlock:
+
+ clear_inode_flag(inode, FI_PREALLOCATED_ALL);
+out_unlock:
inode_unlock(inode);
out:
- trace_f2fs_file_write_iter(inode, iocb->ki_pos,
- iov_iter_count(from), ret);
- if (ret > 0)
+ trace_f2fs_file_write_iter(inode, orig_pos, orig_count, ret);
+ if (ret > 0 && may_need_sync)
ret = generic_write_sync(iocb, ret);
return ret;
}
@@ -4352,12 +4667,12 @@ out:
static int f2fs_file_fadvise(struct file *filp, loff_t offset, loff_t len,
int advice)
{
- struct inode *inode;
struct address_space *mapping;
struct backing_dev_info *bdi;
+ struct inode *inode = file_inode(filp);
+ int err;
if (advice == POSIX_FADV_SEQUENTIAL) {
- inode = file_inode(filp);
if (S_ISFIFO(inode->i_mode))
return -ESPIPE;
@@ -4374,7 +4689,13 @@ static int f2fs_file_fadvise(struct file *filp, loff_t offset, loff_t len,
return 0;
}
- return generic_fadvise(filp, offset, len, advice);
+ err = generic_fadvise(filp, offset, len, advice);
+ if (!err && advice == POSIX_FADV_DONTNEED &&
+ test_opt(F2FS_I_SB(inode), COMPRESS_CACHE) &&
+ f2fs_compressed_file(inode))
+ f2fs_invalidate_compress_pages(F2FS_I_SB(inode), inode->i_ino);
+
+ return err;
}
#ifdef CONFIG_COMPAT
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index a946ce0ead34..ee308a8de432 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -7,7 +7,6 @@
*/
#include <linux/fs.h>
#include <linux/module.h>
-#include <linux/backing-dev.h>
#include <linux/init.h>
#include <linux/f2fs_fs.h>
#include <linux/kthread.h>
@@ -15,6 +14,7 @@
#include <linux/freezer.h>
#include <linux/sched/signal.h>
#include <linux/random.h>
+#include <linux/sched/mm.h>
#include "f2fs.h"
#include "node.h"
@@ -92,6 +92,18 @@ static int gc_thread_func(void *data)
* So, I'd like to wait some time to collect dirty segments.
*/
if (sbi->gc_mode == GC_URGENT_HIGH) {
+ spin_lock(&sbi->gc_urgent_high_lock);
+ if (sbi->gc_urgent_high_limited) {
+ if (!sbi->gc_urgent_high_remaining) {
+ sbi->gc_urgent_high_limited = false;
+ spin_unlock(&sbi->gc_urgent_high_lock);
+ sbi->gc_mode = GC_NORMAL;
+ continue;
+ }
+ sbi->gc_urgent_high_remaining--;
+ }
+ spin_unlock(&sbi->gc_urgent_high_lock);
+
wait_ms = gc_th->urgent_sleep_time;
down_write(&sbi->gc_lock);
goto do_gc;
@@ -947,7 +959,7 @@ next_step:
continue;
}
- if (f2fs_get_node_info(sbi, nid, &ni)) {
+ if (f2fs_get_node_info(sbi, nid, &ni, false)) {
f2fs_put_page(node_page, 1);
continue;
}
@@ -1015,7 +1027,7 @@ static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
if (IS_ERR(node_page))
return false;
- if (f2fs_get_node_info(sbi, nid, dni)) {
+ if (f2fs_get_node_info(sbi, nid, dni, false)) {
f2fs_put_page(node_page, 1);
return false;
}
@@ -1026,6 +1038,9 @@ static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
set_sbi_flag(sbi, SBI_NEED_FSCK);
}
+ if (f2fs_check_nid_range(sbi, dni->ino))
+ return false;
+
*nofs = ofs_of_node(node_page);
source_blkaddr = data_blkaddr(NULL, node_page, ofs_in_node);
f2fs_put_page(node_page, 1);
@@ -1039,7 +1054,7 @@ static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
if (!test_and_set_bit(segno, SIT_I(sbi)->invalid_segmap)) {
f2fs_err(sbi, "mismatched blkaddr %u (source_blkaddr %u) in seg %u",
blkaddr, source_blkaddr, segno);
- f2fs_bug_on(sbi, 1);
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
}
}
#endif
@@ -1206,7 +1221,7 @@ static int move_data_block(struct inode *inode, block_t bidx,
f2fs_wait_on_block_writeback(inode, dn.data_blkaddr);
- err = f2fs_get_node_info(fio.sbi, dn.nid, &ni);
+ err = f2fs_get_node_info(fio.sbi, dn.nid, &ni, false);
if (err)
goto put_out;
@@ -1375,8 +1390,7 @@ retry:
if (err) {
clear_page_private_gcing(page);
if (err == -ENOMEM) {
- congestion_wait(BLK_RW_ASYNC,
- DEFAULT_IO_TIMEOUT);
+ memalloc_retry_wait(GFP_NOFS);
goto retry;
}
if (is_dirty)
@@ -1457,7 +1471,8 @@ next_step:
if (phase == 3) {
inode = f2fs_iget(sb, dni.ino);
- if (IS_ERR(inode) || is_bad_inode(inode))
+ if (IS_ERR(inode) || is_bad_inode(inode) ||
+ special_file(inode->i_mode))
continue;
if (!down_write_trylock(
diff --git a/fs/f2fs/hash.c b/fs/f2fs/hash.c
index e3beac546c63..3cb1e7a24740 100644
--- a/fs/f2fs/hash.c
+++ b/fs/f2fs/hash.c
@@ -105,7 +105,7 @@ void f2fs_hash_filename(const struct inode *dir, struct f2fs_filename *fname)
return;
}
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
if (IS_CASEFOLDED(dir)) {
/*
* If the casefolded name is provided, hash it instead of the
diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
index ea08f0dfa1bd..4b5cefa3f90c 100644
--- a/fs/f2fs/inline.c
+++ b/fs/f2fs/inline.c
@@ -131,7 +131,7 @@ int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)
if (err)
return err;
- err = f2fs_get_node_info(fio.sbi, dn->nid, &ni);
+ err = f2fs_get_node_info(fio.sbi, dn->nid, &ni, false);
if (err) {
f2fs_truncate_data_blocks_range(dn, 1);
f2fs_put_dnode(dn);
@@ -786,7 +786,7 @@ int f2fs_inline_data_fiemap(struct inode *inode,
ilen = start + len;
ilen -= start;
- err = f2fs_get_node_info(F2FS_I_SB(inode), inode->i_ino, &ni);
+ err = f2fs_get_node_info(F2FS_I_SB(inode), inode->i_ino, &ni, false);
if (err)
goto out;
diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
index 0f8b2df3e1e0..0ec8e32a00b4 100644
--- a/fs/f2fs/inode.c
+++ b/fs/f2fs/inode.c
@@ -8,8 +8,8 @@
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
#include <linux/buffer_head.h>
-#include <linux/backing-dev.h>
#include <linux/writeback.h>
+#include <linux/sched/mm.h>
#include "f2fs.h"
#include "node.h"
@@ -516,6 +516,11 @@ make_now:
} else if (ino == F2FS_COMPRESS_INO(sbi)) {
#ifdef CONFIG_F2FS_FS_COMPRESSION
inode->i_mapping->a_ops = &f2fs_compress_aops;
+ /*
+ * generic_error_remove_page only truncates pages of regular
+ * inode
+ */
+ inode->i_mode |= S_IFREG;
#endif
mapping_set_gfp_mask(inode->i_mapping,
GFP_NOFS | __GFP_HIGHMEM | __GFP_MOVABLE);
@@ -544,6 +549,14 @@ make_now:
goto bad_inode;
}
f2fs_set_inode_flags(inode);
+
+ if (file_should_truncate(inode)) {
+ ret = f2fs_truncate(inode);
+ if (ret)
+ goto bad_inode;
+ file_dont_truncate(inode);
+ }
+
unlock_new_inode(inode);
trace_f2fs_iget(inode);
return inode;
@@ -562,7 +575,7 @@ retry:
inode = f2fs_iget(sb, ino);
if (IS_ERR(inode)) {
if (PTR_ERR(inode) == -ENOMEM) {
- congestion_wait(BLK_RW_ASYNC, DEFAULT_IO_TIMEOUT);
+ memalloc_retry_wait(GFP_NOFS);
goto retry;
}
}
@@ -738,7 +751,8 @@ void f2fs_evict_inode(struct inode *inode)
trace_f2fs_evict_inode(inode);
truncate_inode_pages_final(&inode->i_data);
- if (test_opt(sbi, COMPRESS_CACHE) && f2fs_compressed_file(inode))
+ if ((inode->i_nlink || is_bad_inode(inode)) &&
+ test_opt(sbi, COMPRESS_CACHE) && f2fs_compressed_file(inode))
f2fs_invalidate_compress_pages(sbi, inode->i_ino);
if (inode->i_ino == F2FS_NODE_INO(sbi) ||
@@ -868,7 +882,7 @@ void f2fs_handle_failed_inode(struct inode *inode)
* so we can prevent losing this orphan when encoutering checkpoint
* and following suddenly power-off.
*/
- err = f2fs_get_node_info(sbi, inode->i_ino, &ni);
+ err = f2fs_get_node_info(sbi, inode->i_ino, &ni, false);
if (err) {
set_sbi_flag(sbi, SBI_NEED_FSCK);
f2fs_warn(sbi, "May loss orphan inode, run fsck to fix.");
diff --git a/fs/f2fs/iostat.c b/fs/f2fs/iostat.c
index cdcf54ae0db8..be599f31d3c4 100644
--- a/fs/f2fs/iostat.c
+++ b/fs/f2fs/iostat.c
@@ -92,7 +92,7 @@ static inline void __record_iostat_latency(struct f2fs_sb_info *sbi)
struct f2fs_iostat_latency iostat_lat[MAX_IO_TYPE][NR_PAGE_TYPE];
struct iostat_lat_info *io_lat = sbi->iostat_io_lat;
- spin_lock_irq(&sbi->iostat_lat_lock);
+ spin_lock_bh(&sbi->iostat_lat_lock);
for (idx = 0; idx < MAX_IO_TYPE; idx++) {
for (io = 0; io < NR_PAGE_TYPE; io++) {
cnt = io_lat->bio_cnt[idx][io];
@@ -106,7 +106,7 @@ static inline void __record_iostat_latency(struct f2fs_sb_info *sbi)
io_lat->bio_cnt[idx][io] = 0;
}
}
- spin_unlock_irq(&sbi->iostat_lat_lock);
+ spin_unlock_bh(&sbi->iostat_lat_lock);
trace_f2fs_iostat_latency(sbi, iostat_lat);
}
@@ -120,9 +120,9 @@ static inline void f2fs_record_iostat(struct f2fs_sb_info *sbi)
return;
/* Need double check under the lock */
- spin_lock(&sbi->iostat_lock);
+ spin_lock_bh(&sbi->iostat_lock);
if (time_is_after_jiffies(sbi->iostat_next_period)) {
- spin_unlock(&sbi->iostat_lock);
+ spin_unlock_bh(&sbi->iostat_lock);
return;
}
sbi->iostat_next_period = jiffies +
@@ -133,7 +133,7 @@ static inline void f2fs_record_iostat(struct f2fs_sb_info *sbi)
sbi->prev_rw_iostat[i];
sbi->prev_rw_iostat[i] = sbi->rw_iostat[i];
}
- spin_unlock(&sbi->iostat_lock);
+ spin_unlock_bh(&sbi->iostat_lock);
trace_f2fs_iostat(sbi, iostat_diff);
@@ -145,16 +145,16 @@ void f2fs_reset_iostat(struct f2fs_sb_info *sbi)
struct iostat_lat_info *io_lat = sbi->iostat_io_lat;
int i;
- spin_lock(&sbi->iostat_lock);
+ spin_lock_bh(&sbi->iostat_lock);
for (i = 0; i < NR_IO_TYPE; i++) {
sbi->rw_iostat[i] = 0;
sbi->prev_rw_iostat[i] = 0;
}
- spin_unlock(&sbi->iostat_lock);
+ spin_unlock_bh(&sbi->iostat_lock);
- spin_lock_irq(&sbi->iostat_lat_lock);
+ spin_lock_bh(&sbi->iostat_lat_lock);
memset(io_lat, 0, sizeof(struct iostat_lat_info));
- spin_unlock_irq(&sbi->iostat_lat_lock);
+ spin_unlock_bh(&sbi->iostat_lat_lock);
}
void f2fs_update_iostat(struct f2fs_sb_info *sbi,
@@ -163,19 +163,16 @@ void f2fs_update_iostat(struct f2fs_sb_info *sbi,
if (!sbi->iostat_enable)
return;
- spin_lock(&sbi->iostat_lock);
+ spin_lock_bh(&sbi->iostat_lock);
sbi->rw_iostat[type] += io_bytes;
- if (type == APP_WRITE_IO || type == APP_DIRECT_IO)
- sbi->rw_iostat[APP_BUFFERED_IO] =
- sbi->rw_iostat[APP_WRITE_IO] -
- sbi->rw_iostat[APP_DIRECT_IO];
+ if (type == APP_BUFFERED_IO || type == APP_DIRECT_IO)
+ sbi->rw_iostat[APP_WRITE_IO] += io_bytes;
- if (type == APP_READ_IO || type == APP_DIRECT_READ_IO)
- sbi->rw_iostat[APP_BUFFERED_READ_IO] =
- sbi->rw_iostat[APP_READ_IO] -
- sbi->rw_iostat[APP_DIRECT_READ_IO];
- spin_unlock(&sbi->iostat_lock);
+ if (type == APP_BUFFERED_READ_IO || type == APP_DIRECT_READ_IO)
+ sbi->rw_iostat[APP_READ_IO] += io_bytes;
+
+ spin_unlock_bh(&sbi->iostat_lock);
f2fs_record_iostat(sbi);
}
@@ -185,7 +182,6 @@ static inline void __update_iostat_latency(struct bio_iostat_ctx *iostat_ctx,
{
unsigned long ts_diff;
unsigned int iotype = iostat_ctx->type;
- unsigned long flags;
struct f2fs_sb_info *sbi = iostat_ctx->sbi;
struct iostat_lat_info *io_lat = sbi->iostat_io_lat;
int idx;
@@ -206,12 +202,12 @@ static inline void __update_iostat_latency(struct bio_iostat_ctx *iostat_ctx,
idx = WRITE_ASYNC_IO;
}
- spin_lock_irqsave(&sbi->iostat_lat_lock, flags);
+ spin_lock_bh(&sbi->iostat_lat_lock);
io_lat->sum_lat[idx][iotype] += ts_diff;
io_lat->bio_cnt[idx][iotype]++;
if (ts_diff > io_lat->peak_lat[idx][iotype])
io_lat->peak_lat[idx][iotype] = ts_diff;
- spin_unlock_irqrestore(&sbi->iostat_lat_lock, flags);
+ spin_unlock_bh(&sbi->iostat_lat_lock);
}
void iostat_update_and_unbind_ctx(struct bio *bio, int rw)
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
index a728a0af9ce0..5f213f05556d 100644
--- a/fs/f2fs/namei.c
+++ b/fs/f2fs/namei.c
@@ -561,7 +561,7 @@ static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry,
goto out_iput;
}
out_splice:
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
if (!inode && IS_CASEFOLDED(dir)) {
/* Eventually we want to call d_add_ci(dentry, NULL)
* for negative dentries in the encoding case as
@@ -622,7 +622,7 @@ static int f2fs_unlink(struct inode *dir, struct dentry *dentry)
goto fail;
}
f2fs_delete_entry(de, page, dir, inode);
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
/* VFS negative dentries are incompatible with Encoding and
* Case-insensitiveness. Eventually we'll want avoid
* invalidating the dentries here, alongside with returning the
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index 556fcd8457f3..50b2874e758c 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -8,7 +8,7 @@
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
#include <linux/mpage.h>
-#include <linux/backing-dev.h>
+#include <linux/sched/mm.h>
#include <linux/blkdev.h>
#include <linux/pagevec.h>
#include <linux/swap.h>
@@ -430,6 +430,10 @@ static void cache_nat_entry(struct f2fs_sb_info *sbi, nid_t nid,
struct f2fs_nm_info *nm_i = NM_I(sbi);
struct nat_entry *new, *e;
+ /* Let's mitigate lock contention of nat_tree_lock during checkpoint */
+ if (rwsem_is_locked(&sbi->cp_global_sem))
+ return;
+
new = __alloc_nat_entry(sbi, nid, false);
if (!new)
return;
@@ -539,7 +543,7 @@ int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
}
int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid,
- struct node_info *ni)
+ struct node_info *ni, bool checkpoint_context)
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
@@ -572,9 +576,10 @@ retry:
* nat_tree_lock. Therefore, we should retry, if we failed to grab here
* while not bothering checkpoint.
*/
- if (!rwsem_is_locked(&sbi->cp_global_sem)) {
+ if (!rwsem_is_locked(&sbi->cp_global_sem) || checkpoint_context) {
down_read(&curseg->journal_rwsem);
- } else if (!down_read_trylock(&curseg->journal_rwsem)) {
+ } else if (rwsem_is_contended(&nm_i->nat_tree_lock) ||
+ !down_read_trylock(&curseg->journal_rwsem)) {
up_read(&nm_i->nat_tree_lock);
goto retry;
}
@@ -887,7 +892,7 @@ static int truncate_node(struct dnode_of_data *dn)
int err;
pgoff_t index;
- err = f2fs_get_node_info(sbi, dn->nid, &ni);
+ err = f2fs_get_node_info(sbi, dn->nid, &ni, false);
if (err)
return err;
@@ -1286,7 +1291,7 @@ struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs)
goto fail;
#ifdef CONFIG_F2FS_CHECK_FS
- err = f2fs_get_node_info(sbi, dn->nid, &new_ni);
+ err = f2fs_get_node_info(sbi, dn->nid, &new_ni, false);
if (err) {
dec_valid_node_count(sbi, dn->inode, !ofs);
goto fail;
@@ -1348,7 +1353,7 @@ static int read_node_page(struct page *page, int op_flags)
return LOCKED_PAGE;
}
- err = f2fs_get_node_info(sbi, page->index, &ni);
+ err = f2fs_get_node_info(sbi, page->index, &ni, false);
if (err)
return err;
@@ -1600,7 +1605,7 @@ static int __write_node_page(struct page *page, bool atomic, bool *submitted,
nid = nid_of_node(page);
f2fs_bug_on(sbi, page->index != nid);
- if (f2fs_get_node_info(sbi, nid, &ni))
+ if (f2fs_get_node_info(sbi, nid, &ni, !do_balance))
goto redirty_out;
if (wbc->for_reclaim) {
@@ -2701,7 +2706,7 @@ int f2fs_recover_xattr_data(struct inode *inode, struct page *page)
goto recover_xnid;
/* 1: invalidate the previous xattr nid */
- err = f2fs_get_node_info(sbi, prev_xnid, &ni);
+ err = f2fs_get_node_info(sbi, prev_xnid, &ni, false);
if (err)
return err;
@@ -2741,7 +2746,7 @@ int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
struct page *ipage;
int err;
- err = f2fs_get_node_info(sbi, ino, &old_ni);
+ err = f2fs_get_node_info(sbi, ino, &old_ni, false);
if (err)
return err;
@@ -2750,7 +2755,7 @@ int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
retry:
ipage = f2fs_grab_cache_page(NODE_MAPPING(sbi), ino, false);
if (!ipage) {
- congestion_wait(BLK_RW_ASYNC, DEFAULT_IO_TIMEOUT);
+ memalloc_retry_wait(GFP_NOFS);
goto retry;
}
diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
index 6a1b4668d933..79773d322c47 100644
--- a/fs/f2fs/recovery.c
+++ b/fs/f2fs/recovery.c
@@ -8,6 +8,7 @@
#include <asm/unaligned.h>
#include <linux/fs.h>
#include <linux/f2fs_fs.h>
+#include <linux/sched/mm.h>
#include "f2fs.h"
#include "node.h"
#include "segment.h"
@@ -45,7 +46,7 @@
static struct kmem_cache *fsync_entry_slab;
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
extern struct kmem_cache *f2fs_cf_name_slab;
#endif
@@ -148,7 +149,7 @@ static int init_recovered_filename(const struct inode *dir,
if (err)
return err;
f2fs_hash_filename(dir, fname);
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
/* Case-sensitive match is fine for recovery */
kmem_cache_free(f2fs_cf_name_slab, fname->cf_name.name);
fname->cf_name.name = NULL;
@@ -587,7 +588,7 @@ retry_dn:
err = f2fs_get_dnode_of_data(&dn, start, ALLOC_NODE);
if (err) {
if (err == -ENOMEM) {
- congestion_wait(BLK_RW_ASYNC, DEFAULT_IO_TIMEOUT);
+ memalloc_retry_wait(GFP_NOFS);
goto retry_dn;
}
goto out;
@@ -595,7 +596,7 @@ retry_dn:
f2fs_wait_on_page_writeback(dn.node_page, NODE, true, true);
- err = f2fs_get_node_info(sbi, dn.nid, &ni);
+ err = f2fs_get_node_info(sbi, dn.nid, &ni, false);
if (err)
goto err;
@@ -670,8 +671,7 @@ retry_prev:
err = check_index_in_prev_nodes(sbi, dest, &dn);
if (err) {
if (err == -ENOMEM) {
- congestion_wait(BLK_RW_ASYNC,
- DEFAULT_IO_TIMEOUT);
+ memalloc_retry_wait(GFP_NOFS);
goto retry_prev;
}
goto err;
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index df9ed75f0b7a..1dabc8244083 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -9,6 +9,7 @@
#include <linux/f2fs_fs.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
+#include <linux/sched/mm.h>
#include <linux/prefetch.h>
#include <linux/kthread.h>
#include <linux/swap.h>
@@ -245,16 +246,14 @@ retry:
LOOKUP_NODE);
if (err) {
if (err == -ENOMEM) {
- congestion_wait(BLK_RW_ASYNC,
- DEFAULT_IO_TIMEOUT);
- cond_resched();
+ memalloc_retry_wait(GFP_NOFS);
goto retry;
}
err = -EAGAIN;
goto next;
}
- err = f2fs_get_node_info(sbi, dn.nid, &ni);
+ err = f2fs_get_node_info(sbi, dn.nid, &ni, false);
if (err) {
f2fs_put_dnode(&dn);
return err;
@@ -424,9 +423,7 @@ retry:
err = f2fs_do_write_data_page(&fio);
if (err) {
if (err == -ENOMEM) {
- congestion_wait(BLK_RW_ASYNC,
- DEFAULT_IO_TIMEOUT);
- cond_resched();
+ memalloc_retry_wait(GFP_NOFS);
goto retry;
}
unlock_page(page);
@@ -2558,8 +2555,8 @@ find_other_zone:
secno = find_next_zero_bit(free_i->free_secmap, MAIN_SECS(sbi), hint);
if (secno >= MAIN_SECS(sbi)) {
if (dir == ALLOC_RIGHT) {
- secno = find_next_zero_bit(free_i->free_secmap,
- MAIN_SECS(sbi), 0);
+ secno = find_first_zero_bit(free_i->free_secmap,
+ MAIN_SECS(sbi));
f2fs_bug_on(sbi, secno >= MAIN_SECS(sbi));
} else {
go_left = 1;
@@ -2574,8 +2571,8 @@ find_other_zone:
left_start--;
continue;
}
- left_start = find_next_zero_bit(free_i->free_secmap,
- MAIN_SECS(sbi), 0);
+ left_start = find_first_zero_bit(free_i->free_secmap,
+ MAIN_SECS(sbi));
f2fs_bug_on(sbi, left_start >= MAIN_SECS(sbi));
break;
}
diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
index 46fde9f3f28e..0291cd55cf09 100644
--- a/fs/f2fs/segment.h
+++ b/fs/f2fs/segment.h
@@ -538,7 +538,8 @@ static inline unsigned int free_segments(struct f2fs_sb_info *sbi)
static inline unsigned int reserved_segments(struct f2fs_sb_info *sbi)
{
- return SM_I(sbi)->reserved_segments;
+ return SM_I(sbi)->reserved_segments +
+ SM_I(sbi)->additional_reserved_segments;
}
static inline unsigned int free_sections(struct f2fs_sb_info *sbi)
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 040b6d02e1d8..baefd398ec1a 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -8,9 +8,9 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/fs.h>
+#include <linux/sched/mm.h>
#include <linux/statfs.h>
#include <linux/buffer_head.h>
-#include <linux/backing-dev.h>
#include <linux/kthread.h>
#include <linux/parser.h>
#include <linux/mount.h>
@@ -59,6 +59,7 @@ const char *f2fs_fault_name[FAULT_MAX] = {
[FAULT_WRITE_IO] = "write IO error",
[FAULT_SLAB_ALLOC] = "slab alloc",
[FAULT_DQUOT_INIT] = "dquot initialize",
+ [FAULT_LOCK_OP] = "lock_op",
};
void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate,
@@ -256,33 +257,26 @@ void f2fs_printk(struct f2fs_sb_info *sbi, const char *fmt, ...)
va_end(args);
}
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
static const struct f2fs_sb_encodings {
__u16 magic;
char *name;
- char *version;
+ unsigned int version;
} f2fs_sb_encoding_map[] = {
- {F2FS_ENC_UTF8_12_1, "utf8", "12.1.0"},
+ {F2FS_ENC_UTF8_12_1, "utf8", UNICODE_AGE(12, 1, 0)},
};
-static int f2fs_sb_read_encoding(const struct f2fs_super_block *sb,
- const struct f2fs_sb_encodings **encoding,
- __u16 *flags)
+static const struct f2fs_sb_encodings *
+f2fs_sb_read_encoding(const struct f2fs_super_block *sb)
{
__u16 magic = le16_to_cpu(sb->s_encoding);
int i;
for (i = 0; i < ARRAY_SIZE(f2fs_sb_encoding_map); i++)
if (magic == f2fs_sb_encoding_map[i].magic)
- break;
-
- if (i >= ARRAY_SIZE(f2fs_sb_encoding_map))
- return -EINVAL;
-
- *encoding = &f2fs_sb_encoding_map[i];
- *flags = le16_to_cpu(sb->s_encoding_flags);
+ return &f2fs_sb_encoding_map[i];
- return 0;
+ return NULL;
}
struct kmem_cache *f2fs_cf_name_slab;
@@ -328,6 +322,46 @@ static inline void limit_reserve_root(struct f2fs_sb_info *sbi)
F2FS_OPTION(sbi).s_resgid));
}
+static inline int adjust_reserved_segment(struct f2fs_sb_info *sbi)
+{
+ unsigned int sec_blks = sbi->blocks_per_seg * sbi->segs_per_sec;
+ unsigned int avg_vblocks;
+ unsigned int wanted_reserved_segments;
+ block_t avail_user_block_count;
+
+ if (!F2FS_IO_ALIGNED(sbi))
+ return 0;
+
+ /* average valid block count in section in worst case */
+ avg_vblocks = sec_blks / F2FS_IO_SIZE(sbi);
+
+ /*
+ * we need enough free space when migrating one section in worst case
+ */
+ wanted_reserved_segments = (F2FS_IO_SIZE(sbi) / avg_vblocks) *
+ reserved_segments(sbi);
+ wanted_reserved_segments -= reserved_segments(sbi);
+
+ avail_user_block_count = sbi->user_block_count -
+ sbi->current_reserved_blocks -
+ F2FS_OPTION(sbi).root_reserved_blocks;
+
+ if (wanted_reserved_segments * sbi->blocks_per_seg >
+ avail_user_block_count) {
+ f2fs_err(sbi, "IO align feature can't grab additional reserved segment: %u, available segments: %u",
+ wanted_reserved_segments,
+ avail_user_block_count >> sbi->log_blocks_per_seg);
+ return -ENOSPC;
+ }
+
+ SM_I(sbi)->additional_reserved_segments = wanted_reserved_segments;
+
+ f2fs_info(sbi, "IO align feature needs additional reserved segment: %u",
+ wanted_reserved_segments);
+
+ return 0;
+}
+
static inline void adjust_unusable_cap_perc(struct f2fs_sb_info *sbi)
{
if (!F2FS_OPTION(sbi).unusable_cap_perc)
@@ -1225,7 +1259,7 @@ default_check:
return -EINVAL;
}
#endif
-#ifndef CONFIG_UNICODE
+#if !IS_ENABLED(CONFIG_UNICODE)
if (f2fs_sb_has_casefold(sbi)) {
f2fs_err(sbi,
"Filesystem with casefold feature cannot be mounted without CONFIG_UNICODE");
@@ -1585,7 +1619,7 @@ static void f2fs_put_super(struct super_block *sb)
f2fs_destroy_iostat(sbi);
for (i = 0; i < NR_PAGE_TYPE; i++)
kvfree(sbi->write_io[i]);
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
utf8_unload(sb->s_encoding);
#endif
kfree(sbi);
@@ -2415,8 +2449,7 @@ repeat:
page = read_cache_page_gfp(mapping, blkidx, GFP_NOFS);
if (IS_ERR(page)) {
if (PTR_ERR(page) == -ENOMEM) {
- congestion_wait(BLK_RW_ASYNC,
- DEFAULT_IO_TIMEOUT);
+ memalloc_retry_wait(GFP_NOFS);
goto repeat;
}
set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
@@ -3548,6 +3581,7 @@ static void init_sb_info(struct f2fs_sb_info *sbi)
sbi->seq_file_ra_mul = MIN_RA_MUL;
sbi->max_fragment_chunk = DEF_FRAGMENT_SIZE;
sbi->max_fragment_hole = DEF_FRAGMENT_SIZE;
+ spin_lock_init(&sbi->gc_urgent_high_lock);
sbi->dir_level = DEF_DIR_LEVEL;
sbi->interval_time[CP_TIME] = DEF_CP_INTERVAL;
@@ -3869,31 +3903,38 @@ static int f2fs_scan_devices(struct f2fs_sb_info *sbi)
static int f2fs_setup_casefold(struct f2fs_sb_info *sbi)
{
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
if (f2fs_sb_has_casefold(sbi) && !sbi->sb->s_encoding) {
const struct f2fs_sb_encodings *encoding_info;
struct unicode_map *encoding;
__u16 encoding_flags;
- if (f2fs_sb_read_encoding(sbi->raw_super, &encoding_info,
- &encoding_flags)) {
+ encoding_info = f2fs_sb_read_encoding(sbi->raw_super);
+ if (!encoding_info) {
f2fs_err(sbi,
"Encoding requested by superblock is unknown");
return -EINVAL;
}
+ encoding_flags = le16_to_cpu(sbi->raw_super->s_encoding_flags);
encoding = utf8_load(encoding_info->version);
if (IS_ERR(encoding)) {
f2fs_err(sbi,
- "can't mount with superblock charset: %s-%s "
+ "can't mount with superblock charset: %s-%u.%u.%u "
"not supported by the kernel. flags: 0x%x.",
- encoding_info->name, encoding_info->version,
+ encoding_info->name,
+ unicode_major(encoding_info->version),
+ unicode_minor(encoding_info->version),
+ unicode_rev(encoding_info->version),
encoding_flags);
return PTR_ERR(encoding);
}
f2fs_info(sbi, "Using encoding defined by superblock: "
- "%s-%s with flags 0x%hx", encoding_info->name,
- encoding_info->version?:"\b", encoding_flags);
+ "%s-%u.%u.%u with flags 0x%hx", encoding_info->name,
+ unicode_major(encoding_info->version),
+ unicode_minor(encoding_info->version),
+ unicode_rev(encoding_info->version),
+ encoding_flags);
sbi->sb->s_encoding = encoding;
sbi->sb->s_encoding_flags = encoding_flags;
@@ -4180,6 +4221,10 @@ try_onemore:
goto free_nm;
}
+ err = adjust_reserved_segment(sbi);
+ if (err)
+ goto free_nm;
+
/* For write statistics */
sbi->sectors_written_start = f2fs_get_sectors_written(sbi);
@@ -4413,7 +4458,7 @@ free_bio_info:
for (i = 0; i < NR_PAGE_TYPE; i++)
kvfree(sbi->write_io[i]);
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
utf8_unload(sb->s_encoding);
sb->s_encoding = NULL;
#endif
diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c
index 7d289249cd7e..8ac506671245 100644
--- a/fs/f2fs/sysfs.c
+++ b/fs/f2fs/sysfs.c
@@ -118,6 +118,15 @@ static ssize_t sb_status_show(struct f2fs_attr *a,
return sprintf(buf, "%lx\n", sbi->s_flag);
}
+static ssize_t pending_discard_show(struct f2fs_attr *a,
+ struct f2fs_sb_info *sbi, char *buf)
+{
+ if (!SM_I(sbi)->dcc_info)
+ return -EINVAL;
+ return sprintf(buf, "%llu\n", (unsigned long long)atomic_read(
+ &SM_I(sbi)->dcc_info->discard_cmd_cnt));
+}
+
static ssize_t features_show(struct f2fs_attr *a,
struct f2fs_sb_info *sbi, char *buf)
{
@@ -192,12 +201,11 @@ static ssize_t unusable_show(struct f2fs_attr *a,
static ssize_t encoding_show(struct f2fs_attr *a,
struct f2fs_sb_info *sbi, char *buf)
{
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
struct super_block *sb = sbi->sb;
if (f2fs_sb_has_casefold(sbi))
- return sysfs_emit(buf, "%s (%d.%d.%d)\n",
- sb->s_encoding->charset,
+ return sysfs_emit(buf, "UTF-8 (%d.%d.%d)\n",
(sb->s_encoding->version >> 16) & 0xff,
(sb->s_encoding->version >> 8) & 0xff,
sb->s_encoding->version & 0xff);
@@ -415,7 +423,9 @@ out:
if (a->struct_type == RESERVED_BLOCKS) {
spin_lock(&sbi->stat_lock);
if (t > (unsigned long)(sbi->user_block_count -
- F2FS_OPTION(sbi).root_reserved_blocks)) {
+ F2FS_OPTION(sbi).root_reserved_blocks -
+ sbi->blocks_per_seg *
+ SM_I(sbi)->additional_reserved_segments)) {
spin_unlock(&sbi->stat_lock);
return -EINVAL;
}
@@ -478,6 +488,15 @@ out:
return count;
}
+ if (!strcmp(a->attr.name, "gc_urgent_high_remaining")) {
+ spin_lock(&sbi->gc_urgent_high_lock);
+ sbi->gc_urgent_high_limited = t != 0;
+ sbi->gc_urgent_high_remaining = t;
+ spin_unlock(&sbi->gc_urgent_high_lock);
+
+ return count;
+ }
+
#ifdef CONFIG_F2FS_IOSTAT
if (!strcmp(a->attr.name, "iostat_enable")) {
sbi->iostat_enable = !!t;
@@ -733,6 +752,7 @@ F2FS_RW_ATTR(FAULT_INFO_TYPE, f2fs_fault_info, inject_type, inject_type);
#endif
F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, data_io_flag, data_io_flag);
F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, node_io_flag, node_io_flag);
+F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, gc_urgent_high_remaining, gc_urgent_high_remaining);
F2FS_RW_ATTR(CPRC_INFO, ckpt_req_control, ckpt_thread_ioprio, ckpt_thread_ioprio);
F2FS_GENERAL_RO_ATTR(dirty_segments);
F2FS_GENERAL_RO_ATTR(free_segments);
@@ -744,6 +764,7 @@ F2FS_GENERAL_RO_ATTR(unusable);
F2FS_GENERAL_RO_ATTR(encoding);
F2FS_GENERAL_RO_ATTR(mounted_time_sec);
F2FS_GENERAL_RO_ATTR(main_blkaddr);
+F2FS_GENERAL_RO_ATTR(pending_discard);
#ifdef CONFIG_F2FS_STAT_FS
F2FS_STAT_ATTR(STAT_INFO, f2fs_stat_info, cp_foreground_calls, cp_count);
F2FS_STAT_ATTR(STAT_INFO, f2fs_stat_info, cp_background_calls, bg_cp_count);
@@ -757,7 +778,7 @@ F2FS_GENERAL_RO_ATTR(avg_vblocks);
#ifdef CONFIG_FS_ENCRYPTION
F2FS_FEATURE_RO_ATTR(encryption);
F2FS_FEATURE_RO_ATTR(test_dummy_encryption_v2);
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
F2FS_FEATURE_RO_ATTR(encrypted_casefold);
#endif
#endif /* CONFIG_FS_ENCRYPTION */
@@ -776,7 +797,7 @@ F2FS_FEATURE_RO_ATTR(lost_found);
F2FS_FEATURE_RO_ATTR(verity);
#endif
F2FS_FEATURE_RO_ATTR(sb_checksum);
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
F2FS_FEATURE_RO_ATTR(casefold);
#endif
F2FS_FEATURE_RO_ATTR(readonly);
@@ -812,6 +833,7 @@ static struct attribute *f2fs_attrs[] = {
ATTR_LIST(main_blkaddr),
ATTR_LIST(max_small_discards),
ATTR_LIST(discard_granularity),
+ ATTR_LIST(pending_discard),
ATTR_LIST(batched_trim_sections),
ATTR_LIST(ipu_policy),
ATTR_LIST(min_ipu_util),
@@ -844,6 +866,7 @@ static struct attribute *f2fs_attrs[] = {
#endif
ATTR_LIST(data_io_flag),
ATTR_LIST(node_io_flag),
+ ATTR_LIST(gc_urgent_high_remaining),
ATTR_LIST(ckpt_thread_ioprio),
ATTR_LIST(dirty_segments),
ATTR_LIST(free_segments),
@@ -887,7 +910,7 @@ static struct attribute *f2fs_feat_attrs[] = {
#ifdef CONFIG_FS_ENCRYPTION
ATTR_LIST(encryption),
ATTR_LIST(test_dummy_encryption_v2),
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
ATTR_LIST(encrypted_casefold),
#endif
#endif /* CONFIG_FS_ENCRYPTION */
@@ -906,7 +929,7 @@ static struct attribute *f2fs_feat_attrs[] = {
ATTR_LIST(verity),
#endif
ATTR_LIST(sb_checksum),
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
ATTR_LIST(casefold),
#endif
ATTR_LIST(readonly),
diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c
index e348f33bcb2b..8e5cd9c916ff 100644
--- a/fs/f2fs/xattr.c
+++ b/fs/f2fs/xattr.c
@@ -226,15 +226,18 @@ static inline const struct xattr_handler *f2fs_xattr_handler(int index)
}
static struct f2fs_xattr_entry *__find_xattr(void *base_addr,
- void *last_base_addr, int index,
- size_t len, const char *name)
+ void *last_base_addr, void **last_addr,
+ int index, size_t len, const char *name)
{
struct f2fs_xattr_entry *entry;
list_for_each_xattr(entry, base_addr) {
if ((void *)(entry) + sizeof(__u32) > last_base_addr ||
- (void *)XATTR_NEXT_ENTRY(entry) > last_base_addr)
+ (void *)XATTR_NEXT_ENTRY(entry) > last_base_addr) {
+ if (last_addr)
+ *last_addr = entry;
return NULL;
+ }
if (entry->e_name_index != index)
continue;
@@ -254,19 +257,9 @@ static struct f2fs_xattr_entry *__find_inline_xattr(struct inode *inode,
unsigned int inline_size = inline_xattr_size(inode);
void *max_addr = base_addr + inline_size;
- list_for_each_xattr(entry, base_addr) {
- if ((void *)entry + sizeof(__u32) > max_addr ||
- (void *)XATTR_NEXT_ENTRY(entry) > max_addr) {
- *last_addr = entry;
- return NULL;
- }
- if (entry->e_name_index != index)
- continue;
- if (entry->e_name_len != len)
- continue;
- if (!memcmp(entry->e_name, name, len))
- break;
- }
+ entry = __find_xattr(base_addr, max_addr, last_addr, index, len, name);
+ if (!entry)
+ return NULL;
/* inline xattr header or entry across max inline xattr size */
if (IS_XATTR_LAST_ENTRY(entry) &&
@@ -368,7 +361,7 @@ static int lookup_all_xattrs(struct inode *inode, struct page *ipage,
else
cur_addr = txattr_addr;
- *xe = __find_xattr(cur_addr, last_txattr_addr, index, len, name);
+ *xe = __find_xattr(cur_addr, last_txattr_addr, NULL, index, len, name);
if (!*xe) {
f2fs_err(F2FS_I_SB(inode), "inode (%lu) has corrupted xattr",
inode->i_ino);
@@ -659,7 +652,7 @@ static int __f2fs_setxattr(struct inode *inode, int index,
last_base_addr = (void *)base_addr + XATTR_SIZE(inode);
/* find entry with wanted name. */
- here = __find_xattr(base_addr, last_base_addr, index, len, name);
+ here = __find_xattr(base_addr, last_base_addr, NULL, index, len, name);
if (!here) {
f2fs_err(F2FS_I_SB(inode), "inode (%lu) has corrupted xattr",
inode->i_ino);
@@ -684,8 +677,17 @@ static int __f2fs_setxattr(struct inode *inode, int index,
}
last = here;
- while (!IS_XATTR_LAST_ENTRY(last))
+ while (!IS_XATTR_LAST_ENTRY(last)) {
+ if ((void *)(last) + sizeof(__u32) > last_base_addr ||
+ (void *)XATTR_NEXT_ENTRY(last) > last_base_addr) {
+ f2fs_err(F2FS_I_SB(inode), "inode (%lu) has invalid last xattr entry, entry_size: %zu",
+ inode->i_ino, ENTRY_SIZE(last));
+ set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK);
+ error = -EFSCORRUPTED;
+ goto exit;
+ }
last = XATTR_NEXT_ENTRY(last);
+ }
newsize = XATTR_ALIGN(sizeof(struct f2fs_xattr_entry) + len + size);
diff --git a/fs/fat/file.c b/fs/fat/file.c
index 13855ba49cd9..a5a309fcc7fa 100644
--- a/fs/fat/file.c
+++ b/fs/fat/file.c
@@ -175,9 +175,10 @@ long fat_generic_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
static int fat_file_release(struct inode *inode, struct file *filp)
{
if ((filp->f_mode & FMODE_WRITE) &&
- MSDOS_SB(inode->i_sb)->options.flush) {
+ MSDOS_SB(inode->i_sb)->options.flush) {
fat_flush_inodes(inode->i_sb, inode, NULL);
- congestion_wait(BLK_RW_ASYNC, HZ/10);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ io_schedule_timeout(HZ/10);
}
return 0;
}
diff --git a/fs/file_table.c b/fs/file_table.c
index 45437f8e1003..7d2e692b66a9 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -27,13 +27,14 @@
#include <linux/task_work.h>
#include <linux/ima.h>
#include <linux/swap.h>
+#include <linux/kmemleak.h>
#include <linux/atomic.h>
#include "internal.h"
/* sysctl tunables... */
-struct files_stat_struct files_stat = {
+static struct files_stat_struct files_stat = {
.max_files = NR_FILE
};
@@ -75,22 +76,58 @@ unsigned long get_max_files(void)
}
EXPORT_SYMBOL_GPL(get_max_files);
+#if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
+
/*
* Handle nr_files sysctl
*/
-#if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
-int proc_nr_files(struct ctl_table *table, int write,
- void *buffer, size_t *lenp, loff_t *ppos)
+static int proc_nr_files(struct ctl_table *table, int write, void *buffer,
+ size_t *lenp, loff_t *ppos)
{
files_stat.nr_files = get_nr_files();
return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
}
-#else
-int proc_nr_files(struct ctl_table *table, int write,
- void *buffer, size_t *lenp, loff_t *ppos)
+
+static struct ctl_table fs_stat_sysctls[] = {
+ {
+ .procname = "file-nr",
+ .data = &files_stat,
+ .maxlen = sizeof(files_stat),
+ .mode = 0444,
+ .proc_handler = proc_nr_files,
+ },
+ {
+ .procname = "file-max",
+ .data = &files_stat.max_files,
+ .maxlen = sizeof(files_stat.max_files),
+ .mode = 0644,
+ .proc_handler = proc_doulongvec_minmax,
+ .extra1 = SYSCTL_LONG_ZERO,
+ .extra2 = SYSCTL_LONG_MAX,
+ },
+ {
+ .procname = "nr_open",
+ .data = &sysctl_nr_open,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &sysctl_nr_open_min,
+ .extra2 = &sysctl_nr_open_max,
+ },
+ { }
+};
+
+static int __init init_fs_stat_sysctls(void)
{
- return -ENOSYS;
+ register_sysctl_init("fs", fs_stat_sysctls);
+ if (IS_ENABLED(CONFIG_BINFMT_MISC)) {
+ struct ctl_table_header *hdr;
+ hdr = register_sysctl_mount_point("fs/binfmt_misc");
+ kmemleak_not_leak(hdr);
+ }
+ return 0;
}
+fs_initcall(init_fs_stat_sysctls);
#endif
static struct file *__alloc_file(int flags, const struct cred *cred)
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 67f0e88eed01..f8d7fe6db989 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -372,7 +372,7 @@ static bool inode_do_switch_wbs(struct inode *inode,
{
struct address_space *mapping = inode->i_mapping;
XA_STATE(xas, &mapping->i_pages, 0);
- struct page *page;
+ struct folio *folio;
bool switched = false;
spin_lock(&inode->i_lock);
@@ -389,21 +389,23 @@ static bool inode_do_switch_wbs(struct inode *inode,
/*
* Count and transfer stats. Note that PAGECACHE_TAG_DIRTY points
- * to possibly dirty pages while PAGECACHE_TAG_WRITEBACK points to
- * pages actually under writeback.
+ * to possibly dirty folios while PAGECACHE_TAG_WRITEBACK points to
+ * folios actually under writeback.
*/
- xas_for_each_marked(&xas, page, ULONG_MAX, PAGECACHE_TAG_DIRTY) {
- if (PageDirty(page)) {
- dec_wb_stat(old_wb, WB_RECLAIMABLE);
- inc_wb_stat(new_wb, WB_RECLAIMABLE);
+ xas_for_each_marked(&xas, folio, ULONG_MAX, PAGECACHE_TAG_DIRTY) {
+ if (folio_test_dirty(folio)) {
+ long nr = folio_nr_pages(folio);
+ wb_stat_mod(old_wb, WB_RECLAIMABLE, -nr);
+ wb_stat_mod(new_wb, WB_RECLAIMABLE, nr);
}
}
xas_set(&xas, 0);
- xas_for_each_marked(&xas, page, ULONG_MAX, PAGECACHE_TAG_WRITEBACK) {
- WARN_ON_ONCE(!PageWriteback(page));
- dec_wb_stat(old_wb, WB_WRITEBACK);
- inc_wb_stat(new_wb, WB_WRITEBACK);
+ xas_for_each_marked(&xas, folio, ULONG_MAX, PAGECACHE_TAG_WRITEBACK) {
+ long nr = folio_nr_pages(folio);
+ WARN_ON_ONCE(!folio_test_writeback(folio));
+ wb_stat_mod(old_wb, WB_WRITEBACK, -nr);
+ wb_stat_mod(new_wb, WB_WRITEBACK, nr);
}
if (mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK)) {
@@ -1666,6 +1668,13 @@ __writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
inode->i_state |= I_DIRTY_PAGES;
+ else if (unlikely(inode->i_state & I_PINNING_FSCACHE_WB)) {
+ if (!(inode->i_state & I_DIRTY_PAGES)) {
+ inode->i_state &= ~I_PINNING_FSCACHE_WB;
+ wbc->unpinned_fscache_wb = true;
+ dirty |= I_PINNING_FSCACHE_WB; /* Cause write_inode */
+ }
+ }
spin_unlock(&inode->i_lock);
@@ -1675,6 +1684,7 @@ __writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
if (ret == 0)
ret = err;
}
+ wbc->unpinned_fscache_wb = false;
trace_writeback_single_inode(inode, wbc, nr_to_write);
return ret;
}
diff --git a/fs/fs_context.c b/fs/fs_context.c
index b7e43a780a62..24ce12f0db32 100644
--- a/fs/fs_context.c
+++ b/fs/fs_context.c
@@ -548,7 +548,7 @@ static int legacy_parse_param(struct fs_context *fc, struct fs_parameter *param)
param->key);
}
- if (len > PAGE_SIZE - 2 - size)
+ if (size + len + 2 > PAGE_SIZE)
return invalf(fc, "VFS: Legacy: Cumulative options too large");
if (strchr(param->key, ',') ||
(param->type == fs_value_is_string &&
diff --git a/fs/fscache/Kconfig b/fs/fscache/Kconfig
index b313a978ae0a..76316c4a3fb7 100644
--- a/fs/fscache/Kconfig
+++ b/fs/fscache/Kconfig
@@ -38,3 +38,6 @@ config FSCACHE_DEBUG
enabled by setting bits in /sys/modules/fscache/parameter/debug.
See Documentation/filesystems/caching/fscache.rst for more information.
+
+config FSCACHE_OLD_API
+ bool
diff --git a/fs/fscache/Makefile b/fs/fscache/Makefile
index 03a871d689bb..afb090ea16c4 100644
--- a/fs/fscache/Makefile
+++ b/fs/fscache/Makefile
@@ -6,13 +6,9 @@
fscache-y := \
cache.o \
cookie.o \
- fsdef.o \
io.o \
main.o \
- netfs.o \
- object.o \
- operation.o \
- page.o
+ volume.o
fscache-$(CONFIG_PROC_FS) += proc.o
fscache-$(CONFIG_FSCACHE_STATS) += stats.o
diff --git a/fs/fscache/cache.c b/fs/fscache/cache.c
index bd4f44c1cce0..2749933852a9 100644
--- a/fs/fscache/cache.c
+++ b/fs/fscache/cache.c
@@ -1,209 +1,229 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/* FS-Cache cache handling
*
- * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*/
#define FSCACHE_DEBUG_LEVEL CACHE
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/slab.h>
#include "internal.h"
-LIST_HEAD(fscache_cache_list);
+static LIST_HEAD(fscache_caches);
DECLARE_RWSEM(fscache_addremove_sem);
-DECLARE_WAIT_QUEUE_HEAD(fscache_cache_cleared_wq);
-EXPORT_SYMBOL(fscache_cache_cleared_wq);
+EXPORT_SYMBOL(fscache_addremove_sem);
+DECLARE_WAIT_QUEUE_HEAD(fscache_clearance_waiters);
+EXPORT_SYMBOL(fscache_clearance_waiters);
-static LIST_HEAD(fscache_cache_tag_list);
+static atomic_t fscache_cache_debug_id;
/*
- * look up a cache tag
+ * Allocate a cache cookie.
*/
-struct fscache_cache_tag *__fscache_lookup_cache_tag(const char *name)
+static struct fscache_cache *fscache_alloc_cache(const char *name)
{
- struct fscache_cache_tag *tag, *xtag;
-
- /* firstly check for the existence of the tag under read lock */
- down_read(&fscache_addremove_sem);
-
- list_for_each_entry(tag, &fscache_cache_tag_list, link) {
- if (strcmp(tag->name, name) == 0) {
- atomic_inc(&tag->usage);
- up_read(&fscache_addremove_sem);
- return tag;
- }
- }
-
- up_read(&fscache_addremove_sem);
-
- /* the tag does not exist - create a candidate */
- xtag = kzalloc(sizeof(*xtag) + strlen(name) + 1, GFP_KERNEL);
- if (!xtag)
- /* return a dummy tag if out of memory */
- return ERR_PTR(-ENOMEM);
-
- atomic_set(&xtag->usage, 1);
- strcpy(xtag->name, name);
-
- /* write lock, search again and add if still not present */
- down_write(&fscache_addremove_sem);
+ struct fscache_cache *cache;
- list_for_each_entry(tag, &fscache_cache_tag_list, link) {
- if (strcmp(tag->name, name) == 0) {
- atomic_inc(&tag->usage);
- up_write(&fscache_addremove_sem);
- kfree(xtag);
- return tag;
+ cache = kzalloc(sizeof(*cache), GFP_KERNEL);
+ if (cache) {
+ if (name) {
+ cache->name = kstrdup(name, GFP_KERNEL);
+ if (!cache->name) {
+ kfree(cache);
+ return NULL;
+ }
}
+ refcount_set(&cache->ref, 1);
+ INIT_LIST_HEAD(&cache->cache_link);
+ cache->debug_id = atomic_inc_return(&fscache_cache_debug_id);
}
-
- list_add_tail(&xtag->link, &fscache_cache_tag_list);
- up_write(&fscache_addremove_sem);
- return xtag;
+ return cache;
}
-/*
- * release a reference to a cache tag
- */
-void __fscache_release_cache_tag(struct fscache_cache_tag *tag)
+static bool fscache_get_cache_maybe(struct fscache_cache *cache,
+ enum fscache_cache_trace where)
{
- if (tag != ERR_PTR(-ENOMEM)) {
- down_write(&fscache_addremove_sem);
+ bool success;
+ int ref;
- if (atomic_dec_and_test(&tag->usage))
- list_del_init(&tag->link);
- else
- tag = NULL;
-
- up_write(&fscache_addremove_sem);
-
- kfree(tag);
- }
+ success = __refcount_inc_not_zero(&cache->ref, &ref);
+ if (success)
+ trace_fscache_cache(cache->debug_id, ref + 1, where);
+ return success;
}
/*
- * select a cache in which to store an object
- * - the cache addremove semaphore must be at least read-locked by the caller
- * - the object will never be an index
+ * Look up a cache cookie.
*/
-struct fscache_cache *fscache_select_cache_for_object(
- struct fscache_cookie *cookie)
+struct fscache_cache *fscache_lookup_cache(const char *name, bool is_cache)
{
- struct fscache_cache_tag *tag;
- struct fscache_object *object;
- struct fscache_cache *cache;
+ struct fscache_cache *candidate, *cache, *unnamed = NULL;
- _enter("");
+ /* firstly check for the existence of the cache under read lock */
+ down_read(&fscache_addremove_sem);
- if (list_empty(&fscache_cache_list)) {
- _leave(" = NULL [no cache]");
- return NULL;
+ list_for_each_entry(cache, &fscache_caches, cache_link) {
+ if (cache->name && name && strcmp(cache->name, name) == 0 &&
+ fscache_get_cache_maybe(cache, fscache_cache_get_acquire))
+ goto got_cache_r;
+ if (!cache->name && !name &&
+ fscache_get_cache_maybe(cache, fscache_cache_get_acquire))
+ goto got_cache_r;
}
- /* we check the parent to determine the cache to use */
- spin_lock(&cookie->lock);
+ if (!name) {
+ list_for_each_entry(cache, &fscache_caches, cache_link) {
+ if (cache->name &&
+ fscache_get_cache_maybe(cache, fscache_cache_get_acquire))
+ goto got_cache_r;
+ }
+ }
- /* the first in the parent's backing list should be the preferred
- * cache */
- if (!hlist_empty(&cookie->backing_objects)) {
- object = hlist_entry(cookie->backing_objects.first,
- struct fscache_object, cookie_link);
+ up_read(&fscache_addremove_sem);
- cache = object->cache;
- if (fscache_object_is_dying(object) ||
- test_bit(FSCACHE_IOERROR, &cache->flags))
- cache = NULL;
+ /* the cache does not exist - create a candidate */
+ candidate = fscache_alloc_cache(name);
+ if (!candidate)
+ return ERR_PTR(-ENOMEM);
- spin_unlock(&cookie->lock);
- _leave(" = %s [parent]", cache ? cache->tag->name : "NULL");
- return cache;
- }
+ /* write lock, search again and add if still not present */
+ down_write(&fscache_addremove_sem);
- /* the parent is unbacked */
- if (cookie->type != FSCACHE_COOKIE_TYPE_INDEX) {
- /* cookie not an index and is unbacked */
- spin_unlock(&cookie->lock);
- _leave(" = NULL [cookie ub,ni]");
- return NULL;
+ list_for_each_entry(cache, &fscache_caches, cache_link) {
+ if (cache->name && name && strcmp(cache->name, name) == 0 &&
+ fscache_get_cache_maybe(cache, fscache_cache_get_acquire))
+ goto got_cache_w;
+ if (!cache->name) {
+ unnamed = cache;
+ if (!name &&
+ fscache_get_cache_maybe(cache, fscache_cache_get_acquire))
+ goto got_cache_w;
+ }
}
- spin_unlock(&cookie->lock);
+ if (unnamed && is_cache &&
+ fscache_get_cache_maybe(unnamed, fscache_cache_get_acquire))
+ goto use_unnamed_cache;
- if (!cookie->def->select_cache)
- goto no_preference;
+ if (!name) {
+ list_for_each_entry(cache, &fscache_caches, cache_link) {
+ if (cache->name &&
+ fscache_get_cache_maybe(cache, fscache_cache_get_acquire))
+ goto got_cache_w;
+ }
+ }
- /* ask the netfs for its preference */
- tag = cookie->def->select_cache(cookie->parent->netfs_data,
- cookie->netfs_data);
- if (!tag)
- goto no_preference;
+ list_add_tail(&candidate->cache_link, &fscache_caches);
+ trace_fscache_cache(candidate->debug_id,
+ refcount_read(&candidate->ref),
+ fscache_cache_new_acquire);
+ up_write(&fscache_addremove_sem);
+ return candidate;
- if (tag == ERR_PTR(-ENOMEM)) {
- _leave(" = NULL [nomem tag]");
- return NULL;
- }
+got_cache_r:
+ up_read(&fscache_addremove_sem);
+ return cache;
+use_unnamed_cache:
+ cache = unnamed;
+ cache->name = candidate->name;
+ candidate->name = NULL;
+got_cache_w:
+ up_write(&fscache_addremove_sem);
+ kfree(candidate->name);
+ kfree(candidate);
+ return cache;
+}
- if (!tag->cache) {
- _leave(" = NULL [unbacked tag]");
- return NULL;
- }
+/**
+ * fscache_acquire_cache - Acquire a cache-level cookie.
+ * @name: The name of the cache.
+ *
+ * Get a cookie to represent an actual cache. If a name is given and there is
+ * a nameless cache record available, this will acquire that and set its name,
+ * directing all the volumes using it to this cache.
+ *
+ * The cache will be switched over to the preparing state if not currently in
+ * use, otherwise -EBUSY will be returned.
+ */
+struct fscache_cache *fscache_acquire_cache(const char *name)
+{
+ struct fscache_cache *cache;
- if (test_bit(FSCACHE_IOERROR, &tag->cache->flags))
- return NULL;
+ ASSERT(name);
+ cache = fscache_lookup_cache(name, true);
+ if (IS_ERR(cache))
+ return cache;
- _leave(" = %s [specific]", tag->name);
- return tag->cache;
+ if (!fscache_set_cache_state_maybe(cache,
+ FSCACHE_CACHE_IS_NOT_PRESENT,
+ FSCACHE_CACHE_IS_PREPARING)) {
+ pr_warn("Cache tag %s in use\n", name);
+ fscache_put_cache(cache, fscache_cache_put_cache);
+ return ERR_PTR(-EBUSY);
+ }
-no_preference:
- /* netfs has no preference - just select first cache */
- cache = list_entry(fscache_cache_list.next,
- struct fscache_cache, link);
- _leave(" = %s [first]", cache->tag->name);
return cache;
}
+EXPORT_SYMBOL(fscache_acquire_cache);
/**
- * fscache_init_cache - Initialise a cache record
- * @cache: The cache record to be initialised
- * @ops: The cache operations to be installed in that record
- * @idfmt: Format string to define identifier
- * @...: sprintf-style arguments
+ * fscache_put_cache - Release a cache-level cookie.
+ * @cache: The cache cookie to be released
+ * @where: An indication of where the release happened
*
- * Initialise a record of a cache and fill in the name.
- *
- * See Documentation/filesystems/caching/backend-api.rst for a complete
- * description.
+ * Release the caller's reference on a cache-level cookie. The @where
+ * indication should give information about the circumstances in which the call
+ * occurs and will be logged through a tracepoint.
*/
-void fscache_init_cache(struct fscache_cache *cache,
- const struct fscache_cache_ops *ops,
- const char *idfmt,
- ...)
+void fscache_put_cache(struct fscache_cache *cache,
+ enum fscache_cache_trace where)
{
- va_list va;
+ unsigned int debug_id = cache->debug_id;
+ bool zero;
+ int ref;
- memset(cache, 0, sizeof(*cache));
+ if (IS_ERR_OR_NULL(cache))
+ return;
- cache->ops = ops;
+ zero = __refcount_dec_and_test(&cache->ref, &ref);
+ trace_fscache_cache(debug_id, ref - 1, where);
- va_start(va, idfmt);
- vsnprintf(cache->identifier, sizeof(cache->identifier), idfmt, va);
- va_end(va);
+ if (zero) {
+ down_write(&fscache_addremove_sem);
+ list_del_init(&cache->cache_link);
+ up_write(&fscache_addremove_sem);
+ kfree(cache->name);
+ kfree(cache);
+ }
+}
- INIT_WORK(&cache->op_gc, fscache_operation_gc);
- INIT_LIST_HEAD(&cache->link);
- INIT_LIST_HEAD(&cache->object_list);
- INIT_LIST_HEAD(&cache->op_gc_list);
- spin_lock_init(&cache->object_list_lock);
- spin_lock_init(&cache->op_gc_list_lock);
+/**
+ * fscache_relinquish_cache - Reset cache state and release cookie
+ * @cache: The cache cookie to be released
+ *
+ * Reset the state of a cache and release the caller's reference on a cache
+ * cookie.
+ */
+void fscache_relinquish_cache(struct fscache_cache *cache)
+{
+ enum fscache_cache_trace where =
+ (cache->state == FSCACHE_CACHE_IS_PREPARING) ?
+ fscache_cache_put_prep_failed :
+ fscache_cache_put_relinquish;
+
+ cache->ops = NULL;
+ cache->cache_priv = NULL;
+ smp_store_release(&cache->state, FSCACHE_CACHE_IS_NOT_PRESENT);
+ fscache_put_cache(cache, where);
}
-EXPORT_SYMBOL(fscache_init_cache);
+EXPORT_SYMBOL(fscache_relinquish_cache);
/**
* fscache_add_cache - Declare a cache as being open for business
- * @cache: The record describing the cache
- * @ifsdef: The record of the cache object describing the top-level index
- * @tagname: The tag describing this cache
+ * @cache: The cache-level cookie representing the cache
+ * @ops: Table of cache operations to use
+ * @cache_priv: Private data for the cache record
*
* Add a cache to the system, making it available for netfs's to use.
*
@@ -211,93 +231,97 @@ EXPORT_SYMBOL(fscache_init_cache);
* description.
*/
int fscache_add_cache(struct fscache_cache *cache,
- struct fscache_object *ifsdef,
- const char *tagname)
+ const struct fscache_cache_ops *ops,
+ void *cache_priv)
{
- struct fscache_cache_tag *tag;
-
- ASSERTCMP(ifsdef->cookie, ==, &fscache_fsdef_index);
- BUG_ON(!cache->ops);
- BUG_ON(!ifsdef);
+ int n_accesses;
- cache->flags = 0;
- ifsdef->event_mask =
- ((1 << NR_FSCACHE_OBJECT_EVENTS) - 1) &
- ~(1 << FSCACHE_OBJECT_EV_CLEARED);
- __set_bit(FSCACHE_OBJECT_IS_AVAILABLE, &ifsdef->flags);
+ _enter("{%s,%s}", ops->name, cache->name);
- if (!tagname)
- tagname = cache->identifier;
+ BUG_ON(fscache_cache_state(cache) != FSCACHE_CACHE_IS_PREPARING);
- BUG_ON(!tagname[0]);
-
- _enter("{%s.%s},,%s", cache->ops->name, cache->identifier, tagname);
-
- /* we use the cache tag to uniquely identify caches */
- tag = __fscache_lookup_cache_tag(tagname);
- if (IS_ERR(tag))
- goto nomem;
-
- if (test_and_set_bit(FSCACHE_TAG_RESERVED, &tag->flags))
- goto tag_in_use;
-
- cache->kobj = kobject_create_and_add(tagname, fscache_root);
- if (!cache->kobj)
- goto error;
-
- ifsdef->cache = cache;
- cache->fsdef = ifsdef;
+ /* Get a ref on the cache cookie and keep its n_accesses counter raised
+ * by 1 to prevent wakeups from transitioning it to 0 until we're
+ * withdrawing caching services from it.
+ */
+ n_accesses = atomic_inc_return(&cache->n_accesses);
+ trace_fscache_access_cache(cache->debug_id, refcount_read(&cache->ref),
+ n_accesses, fscache_access_cache_pin);
down_write(&fscache_addremove_sem);
- tag->cache = cache;
- cache->tag = tag;
-
- /* add the cache to the list */
- list_add(&cache->link, &fscache_cache_list);
-
- /* add the cache's netfs definition index object to the cache's
- * list */
- spin_lock(&cache->object_list_lock);
- list_add_tail(&ifsdef->cache_link, &cache->object_list);
- spin_unlock(&cache->object_list_lock);
-
- /* add the cache's netfs definition index object to the top level index
- * cookie as a known backing object */
- spin_lock(&fscache_fsdef_index.lock);
-
- hlist_add_head(&ifsdef->cookie_link,
- &fscache_fsdef_index.backing_objects);
-
- refcount_inc(&fscache_fsdef_index.ref);
+ cache->ops = ops;
+ cache->cache_priv = cache_priv;
+ fscache_set_cache_state(cache, FSCACHE_CACHE_IS_ACTIVE);
- /* done */
- spin_unlock(&fscache_fsdef_index.lock);
up_write(&fscache_addremove_sem);
-
- pr_notice("Cache \"%s\" added (type %s)\n",
- cache->tag->name, cache->ops->name);
- kobject_uevent(cache->kobj, KOBJ_ADD);
-
- _leave(" = 0 [%s]", cache->identifier);
+ pr_notice("Cache \"%s\" added (type %s)\n", cache->name, ops->name);
+ _leave(" = 0 [%s]", cache->name);
return 0;
+}
+EXPORT_SYMBOL(fscache_add_cache);
-tag_in_use:
- pr_err("Cache tag '%s' already in use\n", tagname);
- __fscache_release_cache_tag(tag);
- _leave(" = -EXIST");
- return -EEXIST;
-
-error:
- __fscache_release_cache_tag(tag);
- _leave(" = -EINVAL");
- return -EINVAL;
+/**
+ * fscache_begin_cache_access - Pin a cache so it can be accessed
+ * @cache: The cache-level cookie
+ * @why: An indication of the circumstances of the access for tracing
+ *
+ * Attempt to pin the cache to prevent it from going away whilst we're
+ * accessing it and returns true if successful. This works as follows:
+ *
+ * (1) If the cache tests as not live (state is not FSCACHE_CACHE_IS_ACTIVE),
+ * then we return false to indicate access was not permitted.
+ *
+ * (2) If the cache tests as live, then we increment the n_accesses count and
+ * then recheck the liveness, ending the access if it ceased to be live.
+ *
+ * (3) When we end the access, we decrement n_accesses and wake up the any
+ * waiters if it reaches 0.
+ *
+ * (4) Whilst the cache is caching, n_accesses is kept artificially
+ * incremented to prevent wakeups from happening.
+ *
+ * (5) When the cache is taken offline, the state is changed to prevent new
+ * accesses, n_accesses is decremented and we wait for n_accesses to
+ * become 0.
+ */
+bool fscache_begin_cache_access(struct fscache_cache *cache, enum fscache_access_trace why)
+{
+ int n_accesses;
+
+ if (!fscache_cache_is_live(cache))
+ return false;
+
+ n_accesses = atomic_inc_return(&cache->n_accesses);
+ smp_mb__after_atomic(); /* Reread live flag after n_accesses */
+ trace_fscache_access_cache(cache->debug_id, refcount_read(&cache->ref),
+ n_accesses, why);
+ if (!fscache_cache_is_live(cache)) {
+ fscache_end_cache_access(cache, fscache_access_unlive);
+ return false;
+ }
+ return true;
+}
-nomem:
- _leave(" = -ENOMEM");
- return -ENOMEM;
+/**
+ * fscache_end_cache_access - Unpin a cache at the end of an access.
+ * @cache: The cache-level cookie
+ * @why: An indication of the circumstances of the access for tracing
+ *
+ * Unpin a cache after we've accessed it. The @why indicator is merely
+ * provided for tracing purposes.
+ */
+void fscache_end_cache_access(struct fscache_cache *cache, enum fscache_access_trace why)
+{
+ int n_accesses;
+
+ smp_mb__before_atomic();
+ n_accesses = atomic_dec_return(&cache->n_accesses);
+ trace_fscache_access_cache(cache->debug_id, refcount_read(&cache->ref),
+ n_accesses, why);
+ if (n_accesses == 0)
+ wake_up_var(&cache->n_accesses);
}
-EXPORT_SYMBOL(fscache_add_cache);
/**
* fscache_io_error - Note a cache I/O error
@@ -311,106 +335,94 @@ EXPORT_SYMBOL(fscache_add_cache);
*/
void fscache_io_error(struct fscache_cache *cache)
{
- if (!test_and_set_bit(FSCACHE_IOERROR, &cache->flags))
+ if (fscache_set_cache_state_maybe(cache,
+ FSCACHE_CACHE_IS_ACTIVE,
+ FSCACHE_CACHE_GOT_IOERROR))
pr_err("Cache '%s' stopped due to I/O error\n",
- cache->ops->name);
+ cache->name);
}
EXPORT_SYMBOL(fscache_io_error);
-/*
- * request withdrawal of all the objects in a cache
- * - all the objects being withdrawn are moved onto the supplied list
+/**
+ * fscache_withdraw_cache - Withdraw a cache from the active service
+ * @cache: The cache cookie
+ *
+ * Begin the process of withdrawing a cache from service. This stops new
+ * cache-level and volume-level accesses from taking place and waits for
+ * currently ongoing cache-level accesses to end.
*/
-static void fscache_withdraw_all_objects(struct fscache_cache *cache,
- struct list_head *dying_objects)
+void fscache_withdraw_cache(struct fscache_cache *cache)
{
- struct fscache_object *object;
+ int n_accesses;
- while (!list_empty(&cache->object_list)) {
- spin_lock(&cache->object_list_lock);
+ pr_notice("Withdrawing cache \"%s\" (%u objs)\n",
+ cache->name, atomic_read(&cache->object_count));
- if (!list_empty(&cache->object_list)) {
- object = list_entry(cache->object_list.next,
- struct fscache_object, cache_link);
- list_move_tail(&object->cache_link, dying_objects);
+ fscache_set_cache_state(cache, FSCACHE_CACHE_IS_WITHDRAWN);
- _debug("withdraw %x", object->cookie->debug_id);
+ /* Allow wakeups on dec-to-0 */
+ n_accesses = atomic_dec_return(&cache->n_accesses);
+ trace_fscache_access_cache(cache->debug_id, refcount_read(&cache->ref),
+ n_accesses, fscache_access_cache_unpin);
- /* This must be done under object_list_lock to prevent
- * a race with fscache_drop_object().
- */
- fscache_raise_event(object, FSCACHE_OBJECT_EV_KILL);
- }
-
- spin_unlock(&cache->object_list_lock);
- cond_resched();
- }
+ wait_var_event(&cache->n_accesses,
+ atomic_read(&cache->n_accesses) == 0);
}
+EXPORT_SYMBOL(fscache_withdraw_cache);
-/**
- * fscache_withdraw_cache - Withdraw a cache from the active service
- * @cache: The record describing the cache
- *
- * Withdraw a cache from service, unbinding all its cache objects from the
- * netfs cookies they're currently representing.
- *
- * See Documentation/filesystems/caching/backend-api.rst for a complete
- * description.
+#ifdef CONFIG_PROC_FS
+static const char fscache_cache_states[NR__FSCACHE_CACHE_STATE] = "-PAEW";
+
+/*
+ * Generate a list of caches in /proc/fs/fscache/caches
*/
-void fscache_withdraw_cache(struct fscache_cache *cache)
+static int fscache_caches_seq_show(struct seq_file *m, void *v)
{
- LIST_HEAD(dying_objects);
+ struct fscache_cache *cache;
- _enter("");
+ if (v == &fscache_caches) {
+ seq_puts(m,
+ "CACHE REF VOLS OBJS ACCES S NAME\n"
+ "======== ===== ===== ===== ===== = ===============\n"
+ );
+ return 0;
+ }
- pr_notice("Withdrawing cache \"%s\"\n",
- cache->tag->name);
+ cache = list_entry(v, struct fscache_cache, cache_link);
+ seq_printf(m,
+ "%08x %5d %5d %5d %5d %c %s\n",
+ cache->debug_id,
+ refcount_read(&cache->ref),
+ atomic_read(&cache->n_volumes),
+ atomic_read(&cache->object_count),
+ atomic_read(&cache->n_accesses),
+ fscache_cache_states[cache->state],
+ cache->name ?: "-");
+ return 0;
+}
- /* make the cache unavailable for cookie acquisition */
- if (test_and_set_bit(FSCACHE_CACHE_WITHDRAWN, &cache->flags))
- BUG();
+static void *fscache_caches_seq_start(struct seq_file *m, loff_t *_pos)
+ __acquires(fscache_addremove_sem)
+{
+ down_read(&fscache_addremove_sem);
+ return seq_list_start_head(&fscache_caches, *_pos);
+}
- down_write(&fscache_addremove_sem);
- list_del_init(&cache->link);
- cache->tag->cache = NULL;
- up_write(&fscache_addremove_sem);
+static void *fscache_caches_seq_next(struct seq_file *m, void *v, loff_t *_pos)
+{
+ return seq_list_next(v, &fscache_caches, _pos);
+}
- /* make sure all pages pinned by operations on behalf of the netfs are
- * written to disk */
- fscache_stat(&fscache_n_cop_sync_cache);
- cache->ops->sync_cache(cache);
- fscache_stat_d(&fscache_n_cop_sync_cache);
-
- /* dissociate all the netfs pages backed by this cache from the block
- * mappings in the cache */
- fscache_stat(&fscache_n_cop_dissociate_pages);
- cache->ops->dissociate_pages(cache);
- fscache_stat_d(&fscache_n_cop_dissociate_pages);
-
- /* we now have to destroy all the active objects pertaining to this
- * cache - which we do by passing them off to thread pool to be
- * disposed of */
- _debug("destroy");
-
- fscache_withdraw_all_objects(cache, &dying_objects);
-
- /* wait for all extant objects to finish their outstanding operations
- * and go away */
- _debug("wait for finish");
- wait_event(fscache_cache_cleared_wq,
- atomic_read(&cache->object_count) == 0);
- _debug("wait for clearance");
- wait_event(fscache_cache_cleared_wq,
- list_empty(&cache->object_list));
- _debug("cleared");
- ASSERT(list_empty(&dying_objects));
-
- kobject_put(cache->kobj);
-
- clear_bit(FSCACHE_TAG_RESERVED, &cache->tag->flags);
- fscache_release_cache_tag(cache->tag);
- cache->tag = NULL;
-
- _leave("");
+static void fscache_caches_seq_stop(struct seq_file *m, void *v)
+ __releases(fscache_addremove_sem)
+{
+ up_read(&fscache_addremove_sem);
}
-EXPORT_SYMBOL(fscache_withdraw_cache);
+
+const struct seq_operations fscache_caches_seq_ops = {
+ .start = fscache_caches_seq_start,
+ .next = fscache_caches_seq_next,
+ .stop = fscache_caches_seq_stop,
+ .show = fscache_caches_seq_show,
+};
+#endif /* CONFIG_PROC_FS */
diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
index cd42be646ed3..9bb1ab5fe5ed 100644
--- a/fs/fscache/cookie.c
+++ b/fs/fscache/cookie.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/* netfs cookie management
*
- * Copyright (C) 2004-2007 Red Hat, Inc. All Rights Reserved.
+ * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* See Documentation/filesystems/caching/netfs-api.rst for more information on
@@ -15,70 +15,258 @@
struct kmem_cache *fscache_cookie_jar;
-static atomic_t fscache_object_debug_id = ATOMIC_INIT(0);
+static void fscache_cookie_lru_timed_out(struct timer_list *timer);
+static void fscache_cookie_lru_worker(struct work_struct *work);
+static void fscache_cookie_worker(struct work_struct *work);
+static void fscache_unhash_cookie(struct fscache_cookie *cookie);
+static void fscache_perform_invalidation(struct fscache_cookie *cookie);
#define fscache_cookie_hash_shift 15
static struct hlist_bl_head fscache_cookie_hash[1 << fscache_cookie_hash_shift];
static LIST_HEAD(fscache_cookies);
static DEFINE_RWLOCK(fscache_cookies_lock);
-
-static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie,
- loff_t object_size);
-static int fscache_alloc_object(struct fscache_cache *cache,
- struct fscache_cookie *cookie);
-static int fscache_attach_object(struct fscache_cookie *cookie,
- struct fscache_object *object);
-
-static void fscache_print_cookie(struct fscache_cookie *cookie, char prefix)
+static LIST_HEAD(fscache_cookie_lru);
+static DEFINE_SPINLOCK(fscache_cookie_lru_lock);
+DEFINE_TIMER(fscache_cookie_lru_timer, fscache_cookie_lru_timed_out);
+static DECLARE_WORK(fscache_cookie_lru_work, fscache_cookie_lru_worker);
+static const char fscache_cookie_states[FSCACHE_COOKIE_STATE__NR] = "-LCAIFUWRD";
+unsigned int fscache_lru_cookie_timeout = 10 * HZ;
+
+void fscache_print_cookie(struct fscache_cookie *cookie, char prefix)
{
- struct fscache_object *object;
- struct hlist_node *o;
const u8 *k;
- unsigned loop;
- pr_err("%c-cookie c=%08x [p=%08x fl=%lx nc=%u na=%u]\n",
+ pr_err("%c-cookie c=%08x [fl=%lx na=%u nA=%u s=%c]\n",
prefix,
cookie->debug_id,
- cookie->parent ? cookie->parent->debug_id : 0,
cookie->flags,
- atomic_read(&cookie->n_children),
- atomic_read(&cookie->n_active));
- pr_err("%c-cookie d=%p{%s} n=%p\n",
+ atomic_read(&cookie->n_active),
+ atomic_read(&cookie->n_accesses),
+ fscache_cookie_states[cookie->state]);
+ pr_err("%c-cookie V=%08x [%s]\n",
prefix,
- cookie->def,
- cookie->def ? cookie->def->name : "?",
- cookie->netfs_data);
-
- o = READ_ONCE(cookie->backing_objects.first);
- if (o) {
- object = hlist_entry(o, struct fscache_object, cookie_link);
- pr_err("%c-cookie o=%u\n", prefix, object->debug_id);
- }
+ cookie->volume->debug_id,
+ cookie->volume->key);
- pr_err("%c-key=[%u] '", prefix, cookie->key_len);
k = (cookie->key_len <= sizeof(cookie->inline_key)) ?
cookie->inline_key : cookie->key;
- for (loop = 0; loop < cookie->key_len; loop++)
- pr_cont("%02x", k[loop]);
- pr_cont("'\n");
+ pr_err("%c-key=[%u] '%*phN'\n", prefix, cookie->key_len, cookie->key_len, k);
}
-void fscache_free_cookie(struct fscache_cookie *cookie)
+static void fscache_free_cookie(struct fscache_cookie *cookie)
{
- if (cookie) {
- BUG_ON(!hlist_empty(&cookie->backing_objects));
- write_lock(&fscache_cookies_lock);
- list_del(&cookie->proc_link);
- write_unlock(&fscache_cookies_lock);
- if (cookie->aux_len > sizeof(cookie->inline_aux))
- kfree(cookie->aux);
- if (cookie->key_len > sizeof(cookie->inline_key))
- kfree(cookie->key);
- kmem_cache_free(fscache_cookie_jar, cookie);
+ if (WARN_ON_ONCE(!list_empty(&cookie->commit_link))) {
+ spin_lock(&fscache_cookie_lru_lock);
+ list_del_init(&cookie->commit_link);
+ spin_unlock(&fscache_cookie_lru_lock);
+ fscache_stat_d(&fscache_n_cookies_lru);
+ fscache_stat(&fscache_n_cookies_lru_removed);
+ }
+
+ if (WARN_ON_ONCE(test_bit(FSCACHE_COOKIE_IS_HASHED, &cookie->flags))) {
+ fscache_print_cookie(cookie, 'F');
+ return;
}
+
+ write_lock(&fscache_cookies_lock);
+ list_del(&cookie->proc_link);
+ write_unlock(&fscache_cookies_lock);
+ if (cookie->aux_len > sizeof(cookie->inline_aux))
+ kfree(cookie->aux);
+ if (cookie->key_len > sizeof(cookie->inline_key))
+ kfree(cookie->key);
+ fscache_stat_d(&fscache_n_cookies);
+ kmem_cache_free(fscache_cookie_jar, cookie);
+}
+
+static void __fscache_queue_cookie(struct fscache_cookie *cookie)
+{
+ if (!queue_work(fscache_wq, &cookie->work))
+ fscache_put_cookie(cookie, fscache_cookie_put_over_queued);
+}
+
+static void fscache_queue_cookie(struct fscache_cookie *cookie,
+ enum fscache_cookie_trace where)
+{
+ fscache_get_cookie(cookie, where);
+ __fscache_queue_cookie(cookie);
}
/*
+ * Initialise the access gate on a cookie by setting a flag to prevent the
+ * state machine from being queued when the access counter transitions to 0.
+ * We're only interested in this when we withdraw caching services from the
+ * cookie.
+ */
+static void fscache_init_access_gate(struct fscache_cookie *cookie)
+{
+ int n_accesses;
+
+ n_accesses = atomic_read(&cookie->n_accesses);
+ trace_fscache_access(cookie->debug_id, refcount_read(&cookie->ref),
+ n_accesses, fscache_access_cache_pin);
+ set_bit(FSCACHE_COOKIE_NO_ACCESS_WAKE, &cookie->flags);
+}
+
+/**
+ * fscache_end_cookie_access - Unpin a cache at the end of an access.
+ * @cookie: A data file cookie
+ * @why: An indication of the circumstances of the access for tracing
+ *
+ * Unpin a cache cookie after we've accessed it and bring a deferred
+ * relinquishment or withdrawal state into effect.
+ *
+ * The @why indicator is provided for tracing purposes.
+ */
+void fscache_end_cookie_access(struct fscache_cookie *cookie,
+ enum fscache_access_trace why)
+{
+ int n_accesses;
+
+ smp_mb__before_atomic();
+ n_accesses = atomic_dec_return(&cookie->n_accesses);
+ trace_fscache_access(cookie->debug_id, refcount_read(&cookie->ref),
+ n_accesses, why);
+ if (n_accesses == 0 &&
+ !test_bit(FSCACHE_COOKIE_NO_ACCESS_WAKE, &cookie->flags))
+ fscache_queue_cookie(cookie, fscache_cookie_get_end_access);
+}
+EXPORT_SYMBOL(fscache_end_cookie_access);
+
+/*
+ * Pin the cache behind a cookie so that we can access it.
+ */
+static void __fscache_begin_cookie_access(struct fscache_cookie *cookie,
+ enum fscache_access_trace why)
+{
+ int n_accesses;
+
+ n_accesses = atomic_inc_return(&cookie->n_accesses);
+ smp_mb__after_atomic(); /* (Future) read state after is-caching.
+ * Reread n_accesses after is-caching
+ */
+ trace_fscache_access(cookie->debug_id, refcount_read(&cookie->ref),
+ n_accesses, why);
+}
+
+/**
+ * fscache_begin_cookie_access - Pin a cache so data can be accessed
+ * @cookie: A data file cookie
+ * @why: An indication of the circumstances of the access for tracing
+ *
+ * Attempt to pin the cache to prevent it from going away whilst we're
+ * accessing data and returns true if successful. This works as follows:
+ *
+ * (1) If the cookie is not being cached (ie. FSCACHE_COOKIE_IS_CACHING is not
+ * set), we return false to indicate access was not permitted.
+ *
+ * (2) If the cookie is being cached, we increment its n_accesses count and
+ * then recheck the IS_CACHING flag, ending the access if it got cleared.
+ *
+ * (3) When we end the access, we decrement the cookie's n_accesses and wake
+ * up the any waiters if it reaches 0.
+ *
+ * (4) Whilst the cookie is actively being cached, its n_accesses is kept
+ * artificially incremented to prevent wakeups from happening.
+ *
+ * (5) When the cache is taken offline or if the cookie is culled, the flag is
+ * cleared to prevent new accesses, the cookie's n_accesses is decremented
+ * and we wait for it to become 0.
+ *
+ * The @why indicator are merely provided for tracing purposes.
+ */
+bool fscache_begin_cookie_access(struct fscache_cookie *cookie,
+ enum fscache_access_trace why)
+{
+ if (!test_bit(FSCACHE_COOKIE_IS_CACHING, &cookie->flags))
+ return false;
+ __fscache_begin_cookie_access(cookie, why);
+ if (!test_bit(FSCACHE_COOKIE_IS_CACHING, &cookie->flags) ||
+ !fscache_cache_is_live(cookie->volume->cache)) {
+ fscache_end_cookie_access(cookie, fscache_access_unlive);
+ return false;
+ }
+ return true;
+}
+
+static inline void wake_up_cookie_state(struct fscache_cookie *cookie)
+{
+ /* Use a barrier to ensure that waiters see the state variable
+ * change, as spin_unlock doesn't guarantee a barrier.
+ *
+ * See comments over wake_up_bit() and waitqueue_active().
+ */
+ smp_mb();
+ wake_up_var(&cookie->state);
+}
+
+/*
+ * Change the state a cookie is at and wake up anyone waiting for that. Impose
+ * an ordering between the stuff stored in the cookie and the state member.
+ * Paired with fscache_cookie_state().
+ */
+static void __fscache_set_cookie_state(struct fscache_cookie *cookie,
+ enum fscache_cookie_state state)
+{
+ smp_store_release(&cookie->state, state);
+}
+
+static void fscache_set_cookie_state(struct fscache_cookie *cookie,
+ enum fscache_cookie_state state)
+{
+ spin_lock(&cookie->lock);
+ __fscache_set_cookie_state(cookie, state);
+ spin_unlock(&cookie->lock);
+ wake_up_cookie_state(cookie);
+}
+
+/**
+ * fscache_cookie_lookup_negative - Note negative lookup
+ * @cookie: The cookie that was being looked up
+ *
+ * Note that some part of the metadata path in the cache doesn't exist and so
+ * we can release any waiting readers in the certain knowledge that there's
+ * nothing for them to actually read.
+ *
+ * This function uses no locking and must only be called from the state machine.
+ */
+void fscache_cookie_lookup_negative(struct fscache_cookie *cookie)
+{
+ set_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags);
+ fscache_set_cookie_state(cookie, FSCACHE_COOKIE_STATE_CREATING);
+}
+EXPORT_SYMBOL(fscache_cookie_lookup_negative);
+
+/**
+ * fscache_resume_after_invalidation - Allow I/O to resume after invalidation
+ * @cookie: The cookie that was invalidated
+ *
+ * Tell fscache that invalidation is sufficiently complete that I/O can be
+ * allowed again.
+ */
+void fscache_resume_after_invalidation(struct fscache_cookie *cookie)
+{
+ fscache_set_cookie_state(cookie, FSCACHE_COOKIE_STATE_ACTIVE);
+}
+EXPORT_SYMBOL(fscache_resume_after_invalidation);
+
+/**
+ * fscache_caching_failed - Report that a failure stopped caching on a cookie
+ * @cookie: The cookie that was affected
+ *
+ * Tell fscache that caching on a cookie needs to be stopped due to some sort
+ * of failure.
+ *
+ * This function uses no locking and must only be called from the state machine.
+ */
+void fscache_caching_failed(struct fscache_cookie *cookie)
+{
+ clear_bit(FSCACHE_COOKIE_IS_CACHING, &cookie->flags);
+ fscache_set_cookie_state(cookie, FSCACHE_COOKIE_STATE_FAILED);
+}
+EXPORT_SYMBOL(fscache_caching_failed);
+
+/*
* Set the index key in a cookie. The cookie struct has space for a 16-byte
* key plus length and hash, but if that's not big enough, it's instead a
* pointer to a buffer containing 3 bytes of hash, 1 byte of length and then
@@ -87,38 +275,35 @@ void fscache_free_cookie(struct fscache_cookie *cookie)
static int fscache_set_key(struct fscache_cookie *cookie,
const void *index_key, size_t index_key_len)
{
- u32 *buf;
- int bufs;
+ void *buf;
+ size_t buf_size;
- bufs = DIV_ROUND_UP(index_key_len, sizeof(*buf));
+ buf_size = round_up(index_key_len, sizeof(__le32));
if (index_key_len > sizeof(cookie->inline_key)) {
- buf = kcalloc(bufs, sizeof(*buf), GFP_KERNEL);
+ buf = kzalloc(buf_size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
cookie->key = buf;
} else {
- buf = (u32 *)cookie->inline_key;
+ buf = cookie->inline_key;
}
memcpy(buf, index_key, index_key_len);
- cookie->key_hash = fscache_hash(0, buf, bufs);
+ cookie->key_hash = fscache_hash(cookie->volume->key_hash,
+ buf, buf_size);
return 0;
}
-static long fscache_compare_cookie(const struct fscache_cookie *a,
- const struct fscache_cookie *b)
+static bool fscache_cookie_same(const struct fscache_cookie *a,
+ const struct fscache_cookie *b)
{
const void *ka, *kb;
- if (a->key_hash != b->key_hash)
- return (long)a->key_hash - (long)b->key_hash;
- if (a->parent != b->parent)
- return (long)a->parent - (long)b->parent;
- if (a->key_len != b->key_len)
- return (long)a->key_len - (long)b->key_len;
- if (a->type != b->type)
- return (long)a->type - (long)b->type;
+ if (a->key_hash != b->key_hash ||
+ a->volume != b->volume ||
+ a->key_len != b->key_len)
+ return false;
if (a->key_len <= sizeof(a->inline_key)) {
ka = &a->inline_key;
@@ -127,7 +312,7 @@ static long fscache_compare_cookie(const struct fscache_cookie *a,
ka = a->key;
kb = b->key;
}
- return memcmp(ka, kb, a->key_len);
+ return memcmp(ka, kb, a->key_len) == 0;
}
static atomic_t fscache_cookie_debug_id = ATOMIC_INIT(1);
@@ -135,12 +320,11 @@ static atomic_t fscache_cookie_debug_id = ATOMIC_INIT(1);
/*
* Allocate a cookie.
*/
-struct fscache_cookie *fscache_alloc_cookie(
- struct fscache_cookie *parent,
- const struct fscache_cookie_def *def,
+static struct fscache_cookie *fscache_alloc_cookie(
+ struct fscache_volume *volume,
+ u8 advice,
const void *index_key, size_t index_key_len,
const void *aux_data, size_t aux_data_len,
- void *netfs_data,
loff_t object_size)
{
struct fscache_cookie *cookie;
@@ -149,9 +333,15 @@ struct fscache_cookie *fscache_alloc_cookie(
cookie = kmem_cache_zalloc(fscache_cookie_jar, GFP_KERNEL);
if (!cookie)
return NULL;
+ fscache_stat(&fscache_n_cookies);
- cookie->key_len = index_key_len;
- cookie->aux_len = aux_data_len;
+ cookie->volume = volume;
+ cookie->advice = advice;
+ cookie->key_len = index_key_len;
+ cookie->aux_len = aux_data_len;
+ cookie->object_size = object_size;
+ if (object_size == 0)
+ __set_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags);
if (fscache_set_key(cookie, index_key, index_key_len) < 0)
goto nomem;
@@ -165,30 +355,16 @@ struct fscache_cookie *fscache_alloc_cookie(
}
refcount_set(&cookie->ref, 1);
- atomic_set(&cookie->n_children, 0);
cookie->debug_id = atomic_inc_return(&fscache_cookie_debug_id);
-
- /* We keep the active count elevated until relinquishment to prevent an
- * attempt to wake up every time the object operations queue quiesces.
- */
- atomic_set(&cookie->n_active, 1);
-
- cookie->def = def;
- cookie->parent = parent;
- cookie->netfs_data = netfs_data;
- cookie->flags = (1 << FSCACHE_COOKIE_NO_DATA_YET);
- cookie->type = def->type;
spin_lock_init(&cookie->lock);
- spin_lock_init(&cookie->stores_lock);
- INIT_HLIST_HEAD(&cookie->backing_objects);
-
- /* radix tree insertion won't use the preallocation pool unless it's
- * told it may not wait */
- INIT_RADIX_TREE(&cookie->stores, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
+ INIT_LIST_HEAD(&cookie->commit_link);
+ INIT_WORK(&cookie->work, fscache_cookie_worker);
+ __fscache_set_cookie_state(cookie, FSCACHE_COOKIE_STATE_QUIESCENT);
write_lock(&fscache_cookies_lock);
list_add_tail(&cookie->proc_link, &fscache_cookies);
write_unlock(&fscache_cookies_lock);
+ fscache_see_cookie(cookie, fscache_cookie_new_acquire);
return cookie;
nomem:
@@ -196,13 +372,28 @@ nomem:
return NULL;
}
+static void fscache_wait_on_collision(struct fscache_cookie *candidate,
+ struct fscache_cookie *wait_for)
+{
+ enum fscache_cookie_state *statep = &wait_for->state;
+
+ wait_var_event_timeout(statep, READ_ONCE(*statep) == FSCACHE_COOKIE_STATE_DROPPED,
+ 20 * HZ);
+ if (READ_ONCE(*statep) != FSCACHE_COOKIE_STATE_DROPPED) {
+ pr_notice("Potential collision c=%08x old: c=%08x",
+ candidate->debug_id, wait_for->debug_id);
+ wait_var_event(statep, READ_ONCE(*statep) == FSCACHE_COOKIE_STATE_DROPPED);
+ }
+}
+
/*
* Attempt to insert the new cookie into the hash. If there's a collision, we
- * return the old cookie if it's not in use and an error otherwise.
+ * wait for the old cookie to complete if it's being relinquished and an error
+ * otherwise.
*/
-struct fscache_cookie *fscache_hash_cookie(struct fscache_cookie *candidate)
+static bool fscache_hash_cookie(struct fscache_cookie *candidate)
{
- struct fscache_cookie *cursor;
+ struct fscache_cookie *cursor, *wait_for = NULL;
struct hlist_bl_head *h;
struct hlist_bl_node *p;
unsigned int bucket;
@@ -212,64 +403,53 @@ struct fscache_cookie *fscache_hash_cookie(struct fscache_cookie *candidate)
hlist_bl_lock(h);
hlist_bl_for_each_entry(cursor, p, h, hash_link) {
- if (fscache_compare_cookie(candidate, cursor) == 0)
- goto collision;
+ if (fscache_cookie_same(candidate, cursor)) {
+ if (!test_bit(FSCACHE_COOKIE_RELINQUISHED, &cursor->flags))
+ goto collision;
+ wait_for = fscache_get_cookie(cursor,
+ fscache_cookie_get_hash_collision);
+ break;
+ }
}
- __set_bit(FSCACHE_COOKIE_ACQUIRED, &candidate->flags);
- fscache_cookie_get(candidate->parent, fscache_cookie_get_acquire_parent);
- atomic_inc(&candidate->parent->n_children);
+ fscache_get_volume(candidate->volume, fscache_volume_get_cookie);
+ atomic_inc(&candidate->volume->n_cookies);
hlist_bl_add_head(&candidate->hash_link, h);
+ set_bit(FSCACHE_COOKIE_IS_HASHED, &candidate->flags);
hlist_bl_unlock(h);
- return candidate;
-collision:
- if (test_and_set_bit(FSCACHE_COOKIE_ACQUIRED, &cursor->flags)) {
- trace_fscache_cookie(cursor->debug_id, refcount_read(&cursor->ref),
- fscache_cookie_collision);
- pr_err("Duplicate cookie detected\n");
- fscache_print_cookie(cursor, 'O');
- fscache_print_cookie(candidate, 'N');
- hlist_bl_unlock(h);
- return NULL;
+ if (wait_for) {
+ fscache_wait_on_collision(candidate, wait_for);
+ fscache_put_cookie(wait_for, fscache_cookie_put_hash_collision);
}
+ return true;
- fscache_cookie_get(cursor, fscache_cookie_get_reacquire);
+collision:
+ trace_fscache_cookie(cursor->debug_id, refcount_read(&cursor->ref),
+ fscache_cookie_collision);
+ pr_err("Duplicate cookie detected\n");
+ fscache_print_cookie(cursor, 'O');
+ fscache_print_cookie(candidate, 'N');
hlist_bl_unlock(h);
- return cursor;
+ return false;
}
/*
- * request a cookie to represent an object (index, datafile, xattr, etc)
- * - parent specifies the parent object
- * - the top level index cookie for each netfs is stored in the fscache_netfs
- * struct upon registration
- * - def points to the definition
- * - the netfs_data will be passed to the functions pointed to in *def
- * - all attached caches will be searched to see if they contain this object
- * - index objects aren't stored on disk until there's a dependent file that
- * needs storing
- * - other objects are stored in a selected cache immediately, and all the
- * indices forming the path to it are instantiated if necessary
- * - we never let on to the netfs about errors
- * - we may set a negative cookie pointer, but that's okay
+ * Request a cookie to represent a data storage object within a volume.
+ *
+ * We never let on to the netfs about errors. We may set a negative cookie
+ * pointer, but that's okay
*/
struct fscache_cookie *__fscache_acquire_cookie(
- struct fscache_cookie *parent,
- const struct fscache_cookie_def *def,
+ struct fscache_volume *volume,
+ u8 advice,
const void *index_key, size_t index_key_len,
const void *aux_data, size_t aux_data_len,
- void *netfs_data,
- loff_t object_size,
- bool enable)
+ loff_t object_size)
{
- struct fscache_cookie *candidate, *cookie;
-
- BUG_ON(!def);
+ struct fscache_cookie *cookie;
- _enter("{%s},{%s},%p,%u",
- parent ? (char *) parent->def->name : "<no-parent>",
- def->name, netfs_data, enable);
+ _enter("V=%x", volume->debug_id);
if (!index_key || !index_key_len || index_key_len > 255 || aux_data_len > 255)
return NULL;
@@ -280,563 +460,440 @@ struct fscache_cookie *__fscache_acquire_cookie(
fscache_stat(&fscache_n_acquires);
- /* if there's no parent cookie, then we don't create one here either */
- if (!parent) {
- fscache_stat(&fscache_n_acquires_null);
- _leave(" [no parent]");
- return NULL;
- }
-
- /* validate the definition */
- BUG_ON(!def->name[0]);
-
- BUG_ON(def->type == FSCACHE_COOKIE_TYPE_INDEX &&
- parent->type != FSCACHE_COOKIE_TYPE_INDEX);
-
- candidate = fscache_alloc_cookie(parent, def,
- index_key, index_key_len,
- aux_data, aux_data_len,
- netfs_data, object_size);
- if (!candidate) {
+ cookie = fscache_alloc_cookie(volume, advice,
+ index_key, index_key_len,
+ aux_data, aux_data_len,
+ object_size);
+ if (!cookie) {
fscache_stat(&fscache_n_acquires_oom);
- _leave(" [ENOMEM]");
return NULL;
}
- cookie = fscache_hash_cookie(candidate);
- if (!cookie) {
- trace_fscache_cookie(candidate->debug_id, 1,
- fscache_cookie_discard);
- goto out;
- }
-
- if (cookie == candidate)
- candidate = NULL;
-
- switch (cookie->type) {
- case FSCACHE_COOKIE_TYPE_INDEX:
- fscache_stat(&fscache_n_cookie_index);
- break;
- case FSCACHE_COOKIE_TYPE_DATAFILE:
- fscache_stat(&fscache_n_cookie_data);
- break;
- default:
- fscache_stat(&fscache_n_cookie_special);
- break;
+ if (!fscache_hash_cookie(cookie)) {
+ fscache_see_cookie(cookie, fscache_cookie_discard);
+ fscache_free_cookie(cookie);
+ return NULL;
}
trace_fscache_acquire(cookie);
-
- if (enable) {
- /* if the object is an index then we need do nothing more here
- * - we create indices on disk when we need them as an index
- * may exist in multiple caches */
- if (cookie->type != FSCACHE_COOKIE_TYPE_INDEX) {
- if (fscache_acquire_non_index_cookie(cookie, object_size) == 0) {
- set_bit(FSCACHE_COOKIE_ENABLED, &cookie->flags);
- } else {
- atomic_dec(&parent->n_children);
- fscache_cookie_put(cookie,
- fscache_cookie_put_acquire_nobufs);
- fscache_stat(&fscache_n_acquires_nobufs);
- _leave(" = NULL");
- return NULL;
- }
- } else {
- set_bit(FSCACHE_COOKIE_ENABLED, &cookie->flags);
- }
- }
-
fscache_stat(&fscache_n_acquires_ok);
-
-out:
- fscache_free_cookie(candidate);
+ _leave(" = c=%08x", cookie->debug_id);
return cookie;
}
EXPORT_SYMBOL(__fscache_acquire_cookie);
/*
- * Enable a cookie to permit it to accept new operations.
+ * Prepare a cache object to be written to.
*/
-void __fscache_enable_cookie(struct fscache_cookie *cookie,
- const void *aux_data,
- loff_t object_size,
- bool (*can_enable)(void *data),
- void *data)
+static void fscache_prepare_to_write(struct fscache_cookie *cookie)
{
- _enter("%x", cookie->debug_id);
-
- trace_fscache_enable(cookie);
-
- wait_on_bit_lock(&cookie->flags, FSCACHE_COOKIE_ENABLEMENT_LOCK,
- TASK_UNINTERRUPTIBLE);
-
- fscache_update_aux(cookie, aux_data);
-
- if (test_bit(FSCACHE_COOKIE_ENABLED, &cookie->flags))
- goto out_unlock;
-
- if (can_enable && !can_enable(data)) {
- /* The netfs decided it didn't want to enable after all */
- } else if (cookie->type != FSCACHE_COOKIE_TYPE_INDEX) {
- /* Wait for outstanding disablement to complete */
- __fscache_wait_on_invalidate(cookie);
-
- if (fscache_acquire_non_index_cookie(cookie, object_size) == 0)
- set_bit(FSCACHE_COOKIE_ENABLED, &cookie->flags);
- } else {
- set_bit(FSCACHE_COOKIE_ENABLED, &cookie->flags);
- }
-
-out_unlock:
- clear_bit_unlock(FSCACHE_COOKIE_ENABLEMENT_LOCK, &cookie->flags);
- wake_up_bit(&cookie->flags, FSCACHE_COOKIE_ENABLEMENT_LOCK);
+ cookie->volume->cache->ops->prepare_to_write(cookie);
}
-EXPORT_SYMBOL(__fscache_enable_cookie);
/*
- * acquire a non-index cookie
- * - this must make sure the index chain is instantiated and instantiate the
- * object representation too
+ * Look up a cookie in the cache.
*/
-static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie,
- loff_t object_size)
+static void fscache_perform_lookup(struct fscache_cookie *cookie)
{
- struct fscache_object *object;
- struct fscache_cache *cache;
- int ret;
+ enum fscache_access_trace trace = fscache_access_lookup_cookie_end_failed;
+ bool need_withdraw = false;
_enter("");
- set_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags);
-
- /* now we need to see whether the backing objects for this cookie yet
- * exist, if not there'll be nothing to search */
- down_read(&fscache_addremove_sem);
-
- if (list_empty(&fscache_cache_list)) {
- up_read(&fscache_addremove_sem);
- _leave(" = 0 [no caches]");
- return 0;
- }
-
- /* select a cache in which to store the object */
- cache = fscache_select_cache_for_object(cookie->parent);
- if (!cache) {
- up_read(&fscache_addremove_sem);
- fscache_stat(&fscache_n_acquires_no_cache);
- _leave(" = -ENOMEDIUM [no cache]");
- return -ENOMEDIUM;
- }
-
- _debug("cache %s", cache->tag->name);
-
- set_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags);
-
- /* ask the cache to allocate objects for this cookie and its parent
- * chain */
- ret = fscache_alloc_object(cache, cookie);
- if (ret < 0) {
- up_read(&fscache_addremove_sem);
- _leave(" = %d", ret);
- return ret;
- }
-
- spin_lock(&cookie->lock);
- if (hlist_empty(&cookie->backing_objects)) {
- spin_unlock(&cookie->lock);
- goto unavailable;
+ if (!cookie->volume->cache_priv) {
+ fscache_create_volume(cookie->volume, true);
+ if (!cookie->volume->cache_priv) {
+ fscache_set_cookie_state(cookie, FSCACHE_COOKIE_STATE_QUIESCENT);
+ goto out;
+ }
}
- object = hlist_entry(cookie->backing_objects.first,
- struct fscache_object, cookie_link);
-
- fscache_set_store_limit(object, object_size);
-
- /* initiate the process of looking up all the objects in the chain
- * (done by fscache_initialise_object()) */
- fscache_raise_event(object, FSCACHE_OBJECT_EV_NEW_CHILD);
-
- spin_unlock(&cookie->lock);
-
- /* we may be required to wait for lookup to complete at this point */
- if (!fscache_defer_lookup) {
- wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
- TASK_UNINTERRUPTIBLE);
- if (test_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags))
- goto unavailable;
+ if (!cookie->volume->cache->ops->lookup_cookie(cookie)) {
+ if (cookie->state != FSCACHE_COOKIE_STATE_FAILED)
+ fscache_set_cookie_state(cookie, FSCACHE_COOKIE_STATE_QUIESCENT);
+ need_withdraw = true;
+ _leave(" [fail]");
+ goto out;
}
- up_read(&fscache_addremove_sem);
- _leave(" = 0 [deferred]");
- return 0;
+ fscache_see_cookie(cookie, fscache_cookie_see_active);
+ fscache_set_cookie_state(cookie, FSCACHE_COOKIE_STATE_ACTIVE);
+ trace = fscache_access_lookup_cookie_end;
-unavailable:
- up_read(&fscache_addremove_sem);
- _leave(" = -ENOBUFS");
- return -ENOBUFS;
+out:
+ fscache_end_cookie_access(cookie, trace);
+ if (need_withdraw)
+ fscache_withdraw_cookie(cookie);
+ fscache_end_volume_access(cookie->volume, cookie, trace);
}
/*
- * recursively allocate cache object records for a cookie/cache combination
- * - caller must be holding the addremove sem
+ * Begin the process of looking up a cookie. We offload the actual process to
+ * a worker thread.
*/
-static int fscache_alloc_object(struct fscache_cache *cache,
- struct fscache_cookie *cookie)
+static bool fscache_begin_lookup(struct fscache_cookie *cookie, bool will_modify)
{
- struct fscache_object *object;
- int ret;
-
- _enter("%s,%x{%s}", cache->tag->name, cookie->debug_id, cookie->def->name);
-
- spin_lock(&cookie->lock);
- hlist_for_each_entry(object, &cookie->backing_objects,
- cookie_link) {
- if (object->cache == cache)
- goto object_already_extant;
+ if (will_modify) {
+ set_bit(FSCACHE_COOKIE_LOCAL_WRITE, &cookie->flags);
+ set_bit(FSCACHE_COOKIE_DO_PREP_TO_WRITE, &cookie->flags);
}
- spin_unlock(&cookie->lock);
-
- /* ask the cache to allocate an object (we may end up with duplicate
- * objects at this stage, but we sort that out later) */
- fscache_stat(&fscache_n_cop_alloc_object);
- object = cache->ops->alloc_object(cache, cookie);
- fscache_stat_d(&fscache_n_cop_alloc_object);
- if (IS_ERR(object)) {
- fscache_stat(&fscache_n_object_no_alloc);
- ret = PTR_ERR(object);
- goto error;
- }
-
- ASSERTCMP(object->cookie, ==, cookie);
- fscache_stat(&fscache_n_object_alloc);
-
- object->debug_id = atomic_inc_return(&fscache_object_debug_id);
-
- _debug("ALLOC OBJ%x: %s {%lx}",
- object->debug_id, cookie->def->name, object->events);
-
- ret = fscache_alloc_object(cache, cookie->parent);
- if (ret < 0)
- goto error_put;
-
- /* only attach if we managed to allocate all we needed, otherwise
- * discard the object we just allocated and instead use the one
- * attached to the cookie */
- if (fscache_attach_object(cookie, object) < 0) {
- fscache_stat(&fscache_n_cop_put_object);
- cache->ops->put_object(object, fscache_obj_put_attach_fail);
- fscache_stat_d(&fscache_n_cop_put_object);
- }
-
- _leave(" = 0");
- return 0;
-
-object_already_extant:
- ret = -ENOBUFS;
- if (fscache_object_is_dying(object) ||
- fscache_cache_is_broken(object)) {
- spin_unlock(&cookie->lock);
- goto error;
- }
- spin_unlock(&cookie->lock);
- _leave(" = 0 [found]");
- return 0;
-
-error_put:
- fscache_stat(&fscache_n_cop_put_object);
- cache->ops->put_object(object, fscache_obj_put_alloc_fail);
- fscache_stat_d(&fscache_n_cop_put_object);
-error:
- _leave(" = %d", ret);
- return ret;
+ if (!fscache_begin_volume_access(cookie->volume, cookie,
+ fscache_access_lookup_cookie))
+ return false;
+
+ __fscache_begin_cookie_access(cookie, fscache_access_lookup_cookie);
+ __fscache_set_cookie_state(cookie, FSCACHE_COOKIE_STATE_LOOKING_UP);
+ set_bit(FSCACHE_COOKIE_IS_CACHING, &cookie->flags);
+ set_bit(FSCACHE_COOKIE_HAS_BEEN_CACHED, &cookie->flags);
+ return true;
}
/*
- * attach a cache object to a cookie
+ * Start using the cookie for I/O. This prevents the backing object from being
+ * reaped by VM pressure.
*/
-static int fscache_attach_object(struct fscache_cookie *cookie,
- struct fscache_object *object)
+void __fscache_use_cookie(struct fscache_cookie *cookie, bool will_modify)
{
- struct fscache_object *p;
- struct fscache_cache *cache = object->cache;
- int ret;
+ enum fscache_cookie_state state;
+ bool queue = false;
+ int n_active;
- _enter("{%s},{OBJ%x}", cookie->def->name, object->debug_id);
+ _enter("c=%08x", cookie->debug_id);
- ASSERTCMP(object->cookie, ==, cookie);
+ if (WARN(test_bit(FSCACHE_COOKIE_RELINQUISHED, &cookie->flags),
+ "Trying to use relinquished cookie\n"))
+ return;
spin_lock(&cookie->lock);
- /* there may be multiple initial creations of this object, but we only
- * want one */
- ret = -EEXIST;
- hlist_for_each_entry(p, &cookie->backing_objects, cookie_link) {
- if (p->cache == object->cache) {
- if (fscache_object_is_dying(p))
- ret = -ENOBUFS;
- goto cant_attach_object;
- }
- }
+ n_active = atomic_inc_return(&cookie->n_active);
+ trace_fscache_active(cookie->debug_id, refcount_read(&cookie->ref),
+ n_active, atomic_read(&cookie->n_accesses),
+ will_modify ?
+ fscache_active_use_modify : fscache_active_use);
+
+again:
+ state = fscache_cookie_state(cookie);
+ switch (state) {
+ case FSCACHE_COOKIE_STATE_QUIESCENT:
+ queue = fscache_begin_lookup(cookie, will_modify);
+ break;
- /* pin the parent object */
- spin_lock_nested(&cookie->parent->lock, 1);
- hlist_for_each_entry(p, &cookie->parent->backing_objects,
- cookie_link) {
- if (p->cache == object->cache) {
- if (fscache_object_is_dying(p)) {
- ret = -ENOBUFS;
- spin_unlock(&cookie->parent->lock);
- goto cant_attach_object;
- }
- object->parent = p;
- spin_lock(&p->lock);
- p->n_children++;
- spin_unlock(&p->lock);
- break;
+ case FSCACHE_COOKIE_STATE_LOOKING_UP:
+ case FSCACHE_COOKIE_STATE_CREATING:
+ if (will_modify)
+ set_bit(FSCACHE_COOKIE_LOCAL_WRITE, &cookie->flags);
+ break;
+ case FSCACHE_COOKIE_STATE_ACTIVE:
+ case FSCACHE_COOKIE_STATE_INVALIDATING:
+ if (will_modify &&
+ !test_and_set_bit(FSCACHE_COOKIE_LOCAL_WRITE, &cookie->flags)) {
+ set_bit(FSCACHE_COOKIE_DO_PREP_TO_WRITE, &cookie->flags);
+ queue = true;
}
- }
- spin_unlock(&cookie->parent->lock);
-
- /* attach to the cache's object list */
- if (list_empty(&object->cache_link)) {
- spin_lock(&cache->object_list_lock);
- list_add(&object->cache_link, &cache->object_list);
- spin_unlock(&cache->object_list_lock);
- }
-
- /* Attach to the cookie. The object already has a ref on it. */
- hlist_add_head(&object->cookie_link, &cookie->backing_objects);
- ret = 0;
-
-cant_attach_object:
- spin_unlock(&cookie->lock);
- _leave(" = %d", ret);
- return ret;
-}
-
-/*
- * Invalidate an object. Callable with spinlocks held.
- */
-void __fscache_invalidate(struct fscache_cookie *cookie)
-{
- struct fscache_object *object;
-
- _enter("{%s}", cookie->def->name);
-
- fscache_stat(&fscache_n_invalidates);
+ break;
- /* Only permit invalidation of data files. Invalidating an index will
- * require the caller to release all its attachments to the tree rooted
- * there, and if it's doing that, it may as well just retire the
- * cookie.
- */
- ASSERTCMP(cookie->type, ==, FSCACHE_COOKIE_TYPE_DATAFILE);
+ case FSCACHE_COOKIE_STATE_FAILED:
+ case FSCACHE_COOKIE_STATE_WITHDRAWING:
+ break;
- /* If there's an object, we tell the object state machine to handle the
- * invalidation on our behalf, otherwise there's nothing to do.
- */
- if (!hlist_empty(&cookie->backing_objects)) {
+ case FSCACHE_COOKIE_STATE_LRU_DISCARDING:
+ spin_unlock(&cookie->lock);
+ wait_var_event(&cookie->state,
+ fscache_cookie_state(cookie) !=
+ FSCACHE_COOKIE_STATE_LRU_DISCARDING);
spin_lock(&cookie->lock);
+ goto again;
- if (fscache_cookie_enabled(cookie) &&
- !hlist_empty(&cookie->backing_objects) &&
- !test_and_set_bit(FSCACHE_COOKIE_INVALIDATING,
- &cookie->flags)) {
- object = hlist_entry(cookie->backing_objects.first,
- struct fscache_object,
- cookie_link);
- if (fscache_object_is_live(object))
- fscache_raise_event(
- object, FSCACHE_OBJECT_EV_INVALIDATE);
- }
-
- spin_unlock(&cookie->lock);
+ case FSCACHE_COOKIE_STATE_DROPPED:
+ case FSCACHE_COOKIE_STATE_RELINQUISHING:
+ WARN(1, "Can't use cookie in state %u\n", state);
+ break;
}
+ spin_unlock(&cookie->lock);
+ if (queue)
+ fscache_queue_cookie(cookie, fscache_cookie_get_use_work);
_leave("");
}
-EXPORT_SYMBOL(__fscache_invalidate);
+EXPORT_SYMBOL(__fscache_use_cookie);
-/*
- * Wait for object invalidation to complete.
- */
-void __fscache_wait_on_invalidate(struct fscache_cookie *cookie)
+static void fscache_unuse_cookie_locked(struct fscache_cookie *cookie)
{
- _enter("%x", cookie->debug_id);
+ clear_bit(FSCACHE_COOKIE_DISABLED, &cookie->flags);
+ if (!test_bit(FSCACHE_COOKIE_IS_CACHING, &cookie->flags))
+ return;
- wait_on_bit(&cookie->flags, FSCACHE_COOKIE_INVALIDATING,
- TASK_UNINTERRUPTIBLE);
+ cookie->unused_at = jiffies;
+ spin_lock(&fscache_cookie_lru_lock);
+ if (list_empty(&cookie->commit_link)) {
+ fscache_get_cookie(cookie, fscache_cookie_get_lru);
+ fscache_stat(&fscache_n_cookies_lru);
+ }
+ list_move_tail(&cookie->commit_link, &fscache_cookie_lru);
- _leave("");
+ spin_unlock(&fscache_cookie_lru_lock);
+ timer_reduce(&fscache_cookie_lru_timer,
+ jiffies + fscache_lru_cookie_timeout);
}
-EXPORT_SYMBOL(__fscache_wait_on_invalidate);
/*
- * update the index entries backing a cookie
+ * Stop using the cookie for I/O.
*/
-void __fscache_update_cookie(struct fscache_cookie *cookie, const void *aux_data)
+void __fscache_unuse_cookie(struct fscache_cookie *cookie,
+ const void *aux_data, const loff_t *object_size)
{
- struct fscache_object *object;
-
- fscache_stat(&fscache_n_updates);
-
- if (!cookie) {
- fscache_stat(&fscache_n_updates_null);
- _leave(" [no cookie]");
+ unsigned int debug_id = cookie->debug_id;
+ unsigned int r = refcount_read(&cookie->ref);
+ unsigned int a = atomic_read(&cookie->n_accesses);
+ unsigned int c;
+
+ if (aux_data || object_size)
+ __fscache_update_cookie(cookie, aux_data, object_size);
+
+ /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
+ c = atomic_fetch_add_unless(&cookie->n_active, -1, 1);
+ if (c != 1) {
+ trace_fscache_active(debug_id, r, c - 1, a, fscache_active_unuse);
return;
}
- _enter("{%s}", cookie->def->name);
-
spin_lock(&cookie->lock);
-
- fscache_update_aux(cookie, aux_data);
-
- if (fscache_cookie_enabled(cookie)) {
- /* update the index entry on disk in each cache backing this
- * cookie.
- */
- hlist_for_each_entry(object,
- &cookie->backing_objects, cookie_link) {
- fscache_raise_event(object, FSCACHE_OBJECT_EV_UPDATE);
- }
- }
-
+ r = refcount_read(&cookie->ref);
+ a = atomic_read(&cookie->n_accesses);
+ c = atomic_dec_return(&cookie->n_active);
+ trace_fscache_active(debug_id, r, c, a, fscache_active_unuse);
+ if (c == 0)
+ fscache_unuse_cookie_locked(cookie);
spin_unlock(&cookie->lock);
- _leave("");
}
-EXPORT_SYMBOL(__fscache_update_cookie);
+EXPORT_SYMBOL(__fscache_unuse_cookie);
/*
- * Disable a cookie to stop it from accepting new requests from the netfs.
+ * Perform work upon the cookie, such as committing its cache state,
+ * relinquishing it or withdrawing the backing cache. We're protected from the
+ * cache going away under us as object withdrawal must come through this
+ * non-reentrant work item.
*/
-void __fscache_disable_cookie(struct fscache_cookie *cookie,
- const void *aux_data,
- bool invalidate)
+static void fscache_cookie_state_machine(struct fscache_cookie *cookie)
{
- struct fscache_object *object;
- bool awaken = false;
+ enum fscache_cookie_state state;
+ bool wake = false;
- _enter("%x,%u", cookie->debug_id, invalidate);
+ _enter("c=%x", cookie->debug_id);
- trace_fscache_disable(cookie);
-
- ASSERTCMP(atomic_read(&cookie->n_active), >, 0);
-
- if (atomic_read(&cookie->n_children) != 0) {
- pr_err("Cookie '%s' still has children\n",
- cookie->def->name);
- BUG();
- }
+again:
+ spin_lock(&cookie->lock);
+again_locked:
+ state = cookie->state;
+ switch (state) {
+ case FSCACHE_COOKIE_STATE_QUIESCENT:
+ /* The QUIESCENT state is jumped to the LOOKING_UP state by
+ * fscache_use_cookie().
+ */
- wait_on_bit_lock(&cookie->flags, FSCACHE_COOKIE_ENABLEMENT_LOCK,
- TASK_UNINTERRUPTIBLE);
+ if (atomic_read(&cookie->n_accesses) == 0 &&
+ test_bit(FSCACHE_COOKIE_DO_RELINQUISH, &cookie->flags)) {
+ __fscache_set_cookie_state(cookie,
+ FSCACHE_COOKIE_STATE_RELINQUISHING);
+ wake = true;
+ goto again_locked;
+ }
+ break;
- fscache_update_aux(cookie, aux_data);
+ case FSCACHE_COOKIE_STATE_LOOKING_UP:
+ spin_unlock(&cookie->lock);
+ fscache_init_access_gate(cookie);
+ fscache_perform_lookup(cookie);
+ goto again;
- if (!test_and_clear_bit(FSCACHE_COOKIE_ENABLED, &cookie->flags))
- goto out_unlock_enable;
+ case FSCACHE_COOKIE_STATE_INVALIDATING:
+ spin_unlock(&cookie->lock);
+ fscache_perform_invalidation(cookie);
+ goto again;
+
+ case FSCACHE_COOKIE_STATE_ACTIVE:
+ if (test_and_clear_bit(FSCACHE_COOKIE_DO_PREP_TO_WRITE, &cookie->flags)) {
+ spin_unlock(&cookie->lock);
+ fscache_prepare_to_write(cookie);
+ spin_lock(&cookie->lock);
+ }
+ if (test_bit(FSCACHE_COOKIE_DO_LRU_DISCARD, &cookie->flags)) {
+ __fscache_set_cookie_state(cookie,
+ FSCACHE_COOKIE_STATE_LRU_DISCARDING);
+ wake = true;
+ goto again_locked;
+ }
+ fallthrough;
- /* If the cookie is being invalidated, wait for that to complete first
- * so that we can reuse the flag.
- */
- __fscache_wait_on_invalidate(cookie);
+ case FSCACHE_COOKIE_STATE_FAILED:
+ if (atomic_read(&cookie->n_accesses) != 0)
+ break;
+ if (test_bit(FSCACHE_COOKIE_DO_RELINQUISH, &cookie->flags)) {
+ __fscache_set_cookie_state(cookie,
+ FSCACHE_COOKIE_STATE_RELINQUISHING);
+ wake = true;
+ goto again_locked;
+ }
+ if (test_bit(FSCACHE_COOKIE_DO_WITHDRAW, &cookie->flags)) {
+ __fscache_set_cookie_state(cookie,
+ FSCACHE_COOKIE_STATE_WITHDRAWING);
+ wake = true;
+ goto again_locked;
+ }
+ break;
- /* Dispose of the backing objects */
- set_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags);
+ case FSCACHE_COOKIE_STATE_LRU_DISCARDING:
+ case FSCACHE_COOKIE_STATE_RELINQUISHING:
+ case FSCACHE_COOKIE_STATE_WITHDRAWING:
+ if (cookie->cache_priv) {
+ spin_unlock(&cookie->lock);
+ cookie->volume->cache->ops->withdraw_cookie(cookie);
+ spin_lock(&cookie->lock);
+ }
- spin_lock(&cookie->lock);
- if (!hlist_empty(&cookie->backing_objects)) {
- hlist_for_each_entry(object, &cookie->backing_objects, cookie_link) {
- if (invalidate)
- set_bit(FSCACHE_OBJECT_RETIRED, &object->flags);
- clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
- fscache_raise_event(object, FSCACHE_OBJECT_EV_KILL);
+ switch (state) {
+ case FSCACHE_COOKIE_STATE_RELINQUISHING:
+ fscache_see_cookie(cookie, fscache_cookie_see_relinquish);
+ fscache_unhash_cookie(cookie);
+ __fscache_set_cookie_state(cookie,
+ FSCACHE_COOKIE_STATE_DROPPED);
+ wake = true;
+ goto out;
+ case FSCACHE_COOKIE_STATE_LRU_DISCARDING:
+ fscache_see_cookie(cookie, fscache_cookie_see_lru_discard);
+ break;
+ case FSCACHE_COOKIE_STATE_WITHDRAWING:
+ fscache_see_cookie(cookie, fscache_cookie_see_withdraw);
+ break;
+ default:
+ BUG();
}
- } else {
- if (test_and_clear_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags))
- awaken = true;
- }
- spin_unlock(&cookie->lock);
- if (awaken)
- wake_up_bit(&cookie->flags, FSCACHE_COOKIE_INVALIDATING);
- /* Wait for cessation of activity requiring access to the netfs (when
- * n_active reaches 0). This makes sure outstanding reads and writes
- * have completed.
- */
- if (!atomic_dec_and_test(&cookie->n_active)) {
- wait_var_event(&cookie->n_active,
- !atomic_read(&cookie->n_active));
- }
+ clear_bit(FSCACHE_COOKIE_NEEDS_UPDATE, &cookie->flags);
+ clear_bit(FSCACHE_COOKIE_DO_WITHDRAW, &cookie->flags);
+ clear_bit(FSCACHE_COOKIE_DO_LRU_DISCARD, &cookie->flags);
+ clear_bit(FSCACHE_COOKIE_DO_PREP_TO_WRITE, &cookie->flags);
+ set_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags);
+ __fscache_set_cookie_state(cookie, FSCACHE_COOKIE_STATE_QUIESCENT);
+ wake = true;
+ goto again_locked;
- /* Make sure any pending writes are cancelled. */
- if (cookie->type != FSCACHE_COOKIE_TYPE_INDEX)
- fscache_invalidate_writes(cookie);
+ case FSCACHE_COOKIE_STATE_DROPPED:
+ break;
- /* Reset the cookie state if it wasn't relinquished */
- if (!test_bit(FSCACHE_COOKIE_RELINQUISHED, &cookie->flags)) {
- atomic_inc(&cookie->n_active);
- set_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
+ default:
+ WARN_ONCE(1, "Cookie %x in unexpected state %u\n",
+ cookie->debug_id, state);
+ break;
}
-out_unlock_enable:
- clear_bit_unlock(FSCACHE_COOKIE_ENABLEMENT_LOCK, &cookie->flags);
- wake_up_bit(&cookie->flags, FSCACHE_COOKIE_ENABLEMENT_LOCK);
+out:
+ spin_unlock(&cookie->lock);
+ if (wake)
+ wake_up_cookie_state(cookie);
_leave("");
}
-EXPORT_SYMBOL(__fscache_disable_cookie);
+
+static void fscache_cookie_worker(struct work_struct *work)
+{
+ struct fscache_cookie *cookie = container_of(work, struct fscache_cookie, work);
+
+ fscache_see_cookie(cookie, fscache_cookie_see_work);
+ fscache_cookie_state_machine(cookie);
+ fscache_put_cookie(cookie, fscache_cookie_put_work);
+}
/*
- * release a cookie back to the cache
- * - the object will be marked as recyclable on disk if retire is true
- * - all dependents of this cookie must have already been unregistered
- * (indices/files/pages)
+ * Wait for the object to become inactive. The cookie's work item will be
+ * scheduled when someone transitions n_accesses to 0 - but if someone's
+ * already done that, schedule it anyway.
*/
-void __fscache_relinquish_cookie(struct fscache_cookie *cookie,
- const void *aux_data,
- bool retire)
+static void __fscache_withdraw_cookie(struct fscache_cookie *cookie)
{
- fscache_stat(&fscache_n_relinquishes);
- if (retire)
- fscache_stat(&fscache_n_relinquishes_retire);
+ int n_accesses;
+ bool unpinned;
+
+ unpinned = test_and_clear_bit(FSCACHE_COOKIE_NO_ACCESS_WAKE, &cookie->flags);
+
+ /* Need to read the access count after unpinning */
+ n_accesses = atomic_read(&cookie->n_accesses);
+ if (unpinned)
+ trace_fscache_access(cookie->debug_id, refcount_read(&cookie->ref),
+ n_accesses, fscache_access_cache_unpin);
+ if (n_accesses == 0)
+ fscache_queue_cookie(cookie, fscache_cookie_get_end_access);
+}
- if (!cookie) {
- fscache_stat(&fscache_n_relinquishes_null);
- _leave(" [no cookie]");
- return;
- }
+static void fscache_cookie_lru_do_one(struct fscache_cookie *cookie)
+{
+ fscache_see_cookie(cookie, fscache_cookie_see_lru_do_one);
- _enter("%x{%s,%d},%d",
- cookie->debug_id, cookie->def->name,
- atomic_read(&cookie->n_active), retire);
+ spin_lock(&cookie->lock);
+ if (cookie->state != FSCACHE_COOKIE_STATE_ACTIVE ||
+ time_before(jiffies, cookie->unused_at + fscache_lru_cookie_timeout) ||
+ atomic_read(&cookie->n_active) > 0) {
+ spin_unlock(&cookie->lock);
+ fscache_stat(&fscache_n_cookies_lru_removed);
+ } else {
+ set_bit(FSCACHE_COOKIE_DO_LRU_DISCARD, &cookie->flags);
+ spin_unlock(&cookie->lock);
+ fscache_stat(&fscache_n_cookies_lru_expired);
+ _debug("lru c=%x", cookie->debug_id);
+ __fscache_withdraw_cookie(cookie);
+ }
- trace_fscache_relinquish(cookie, retire);
+ fscache_put_cookie(cookie, fscache_cookie_put_lru);
+}
- /* No further netfs-accessing operations on this cookie permitted */
- if (test_and_set_bit(FSCACHE_COOKIE_RELINQUISHED, &cookie->flags))
- BUG();
+static void fscache_cookie_lru_worker(struct work_struct *work)
+{
+ struct fscache_cookie *cookie;
+ unsigned long unused_at;
- __fscache_disable_cookie(cookie, aux_data, retire);
+ spin_lock(&fscache_cookie_lru_lock);
- /* Clear pointers back to the netfs */
- cookie->netfs_data = NULL;
- cookie->def = NULL;
- BUG_ON(!radix_tree_empty(&cookie->stores));
+ while (!list_empty(&fscache_cookie_lru)) {
+ cookie = list_first_entry(&fscache_cookie_lru,
+ struct fscache_cookie, commit_link);
+ unused_at = cookie->unused_at + fscache_lru_cookie_timeout;
+ if (time_before(jiffies, unused_at)) {
+ timer_reduce(&fscache_cookie_lru_timer, unused_at);
+ break;
+ }
- if (cookie->parent) {
- ASSERTCMP(refcount_read(&cookie->parent->ref), >, 0);
- ASSERTCMP(atomic_read(&cookie->parent->n_children), >, 0);
- atomic_dec(&cookie->parent->n_children);
+ list_del_init(&cookie->commit_link);
+ fscache_stat_d(&fscache_n_cookies_lru);
+ spin_unlock(&fscache_cookie_lru_lock);
+ fscache_cookie_lru_do_one(cookie);
+ spin_lock(&fscache_cookie_lru_lock);
}
- /* Dispose of the netfs's link to the cookie */
- fscache_cookie_put(cookie, fscache_cookie_put_relinquish);
+ spin_unlock(&fscache_cookie_lru_lock);
+}
- _leave("");
+static void fscache_cookie_lru_timed_out(struct timer_list *timer)
+{
+ queue_work(fscache_wq, &fscache_cookie_lru_work);
+}
+
+static void fscache_cookie_drop_from_lru(struct fscache_cookie *cookie)
+{
+ bool need_put = false;
+
+ if (!list_empty(&cookie->commit_link)) {
+ spin_lock(&fscache_cookie_lru_lock);
+ if (!list_empty(&cookie->commit_link)) {
+ list_del_init(&cookie->commit_link);
+ fscache_stat_d(&fscache_n_cookies_lru);
+ fscache_stat(&fscache_n_cookies_lru_dropped);
+ need_put = true;
+ }
+ spin_unlock(&fscache_cookie_lru_lock);
+ if (need_put)
+ fscache_put_cookie(cookie, fscache_cookie_put_lru);
+ }
}
-EXPORT_SYMBOL(__fscache_relinquish_cookie);
/*
* Remove a cookie from the hash table.
@@ -851,43 +908,91 @@ static void fscache_unhash_cookie(struct fscache_cookie *cookie)
hlist_bl_lock(h);
hlist_bl_del(&cookie->hash_link);
+ clear_bit(FSCACHE_COOKIE_IS_HASHED, &cookie->flags);
hlist_bl_unlock(h);
+ fscache_stat(&fscache_n_relinquishes_dropped);
}
+static void fscache_drop_withdraw_cookie(struct fscache_cookie *cookie)
+{
+ fscache_cookie_drop_from_lru(cookie);
+ __fscache_withdraw_cookie(cookie);
+}
+
+/**
+ * fscache_withdraw_cookie - Mark a cookie for withdrawal
+ * @cookie: The cookie to be withdrawn.
+ *
+ * Allow the cache backend to withdraw the backing for a cookie for its own
+ * reasons, even if that cookie is in active use.
+ */
+void fscache_withdraw_cookie(struct fscache_cookie *cookie)
+{
+ set_bit(FSCACHE_COOKIE_DO_WITHDRAW, &cookie->flags);
+ fscache_drop_withdraw_cookie(cookie);
+}
+EXPORT_SYMBOL(fscache_withdraw_cookie);
+
/*
- * Drop a reference to a cookie.
+ * Allow the netfs to release a cookie back to the cache.
+ * - the object will be marked as recyclable on disk if retire is true
*/
-void fscache_cookie_put(struct fscache_cookie *cookie,
- enum fscache_cookie_trace where)
+void __fscache_relinquish_cookie(struct fscache_cookie *cookie, bool retire)
{
- struct fscache_cookie *parent;
- int ref;
+ fscache_stat(&fscache_n_relinquishes);
+ if (retire)
+ fscache_stat(&fscache_n_relinquishes_retire);
+
+ _enter("c=%08x{%d},%d",
+ cookie->debug_id, atomic_read(&cookie->n_active), retire);
- _enter("%x", cookie->debug_id);
+ if (WARN(test_and_set_bit(FSCACHE_COOKIE_RELINQUISHED, &cookie->flags),
+ "Cookie c=%x already relinquished\n", cookie->debug_id))
+ return;
- do {
- unsigned int cookie_debug_id = cookie->debug_id;
- bool zero = __refcount_dec_and_test(&cookie->ref, &ref);
+ if (retire)
+ set_bit(FSCACHE_COOKIE_RETIRED, &cookie->flags);
+ trace_fscache_relinquish(cookie, retire);
- trace_fscache_cookie(cookie_debug_id, ref - 1, where);
- if (!zero)
- return;
+ ASSERTCMP(atomic_read(&cookie->n_active), ==, 0);
+ ASSERTCMP(atomic_read(&cookie->volume->n_cookies), >, 0);
+ atomic_dec(&cookie->volume->n_cookies);
- parent = cookie->parent;
+ if (test_bit(FSCACHE_COOKIE_HAS_BEEN_CACHED, &cookie->flags)) {
+ set_bit(FSCACHE_COOKIE_DO_RELINQUISH, &cookie->flags);
+ fscache_drop_withdraw_cookie(cookie);
+ } else {
+ fscache_set_cookie_state(cookie, FSCACHE_COOKIE_STATE_DROPPED);
fscache_unhash_cookie(cookie);
- fscache_free_cookie(cookie);
+ }
+ fscache_put_cookie(cookie, fscache_cookie_put_relinquish);
+}
+EXPORT_SYMBOL(__fscache_relinquish_cookie);
- cookie = parent;
- where = fscache_cookie_put_parent;
- } while (cookie);
+/*
+ * Drop a reference to a cookie.
+ */
+void fscache_put_cookie(struct fscache_cookie *cookie,
+ enum fscache_cookie_trace where)
+{
+ struct fscache_volume *volume = cookie->volume;
+ unsigned int cookie_debug_id = cookie->debug_id;
+ bool zero;
+ int ref;
- _leave("");
+ zero = __refcount_dec_and_test(&cookie->ref, &ref);
+ trace_fscache_cookie(cookie_debug_id, ref - 1, where);
+ if (zero) {
+ fscache_free_cookie(cookie);
+ fscache_put_volume(volume, fscache_volume_put_cookie);
+ }
}
+EXPORT_SYMBOL(fscache_put_cookie);
/*
* Get a reference to a cookie.
*/
-struct fscache_cookie *fscache_cookie_get(struct fscache_cookie *cookie,
+struct fscache_cookie *fscache_get_cookie(struct fscache_cookie *cookie,
enum fscache_cookie_trace where)
{
int ref;
@@ -896,85 +1001,73 @@ struct fscache_cookie *fscache_cookie_get(struct fscache_cookie *cookie,
trace_fscache_cookie(cookie->debug_id, ref + 1, where);
return cookie;
}
+EXPORT_SYMBOL(fscache_get_cookie);
/*
- * check the consistency between the netfs inode and the backing cache
- *
- * NOTE: it only serves no-index type
+ * Ask the cache to effect invalidation of a cookie.
*/
-int __fscache_check_consistency(struct fscache_cookie *cookie,
- const void *aux_data)
+static void fscache_perform_invalidation(struct fscache_cookie *cookie)
{
- struct fscache_operation *op;
- struct fscache_object *object;
- bool wake_cookie = false;
- int ret;
-
- _enter("%p,", cookie);
+ if (!cookie->volume->cache->ops->invalidate_cookie(cookie))
+ fscache_caching_failed(cookie);
+ fscache_end_cookie_access(cookie, fscache_access_invalidate_cookie_end);
+}
- ASSERTCMP(cookie->type, ==, FSCACHE_COOKIE_TYPE_DATAFILE);
+/*
+ * Invalidate an object.
+ */
+void __fscache_invalidate(struct fscache_cookie *cookie,
+ const void *aux_data, loff_t new_size,
+ unsigned int flags)
+{
+ bool is_caching;
- if (fscache_wait_for_deferred_lookup(cookie) < 0)
- return -ERESTARTSYS;
+ _enter("c=%x", cookie->debug_id);
- if (hlist_empty(&cookie->backing_objects))
- return 0;
+ fscache_stat(&fscache_n_invalidates);
- op = kzalloc(sizeof(*op), GFP_NOIO | __GFP_NOMEMALLOC | __GFP_NORETRY);
- if (!op)
- return -ENOMEM;
+ if (WARN(test_bit(FSCACHE_COOKIE_RELINQUISHED, &cookie->flags),
+ "Trying to invalidate relinquished cookie\n"))
+ return;
- fscache_operation_init(cookie, op, NULL, NULL, NULL);
- op->flags = FSCACHE_OP_MYTHREAD |
- (1 << FSCACHE_OP_WAITING) |
- (1 << FSCACHE_OP_UNUSE_COOKIE);
- trace_fscache_page_op(cookie, NULL, op, fscache_page_op_check_consistency);
+ if ((flags & FSCACHE_INVAL_DIO_WRITE) &&
+ test_and_set_bit(FSCACHE_COOKIE_DISABLED, &cookie->flags))
+ return;
spin_lock(&cookie->lock);
+ set_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags);
+ fscache_update_aux(cookie, aux_data, &new_size);
+ cookie->inval_counter++;
+ trace_fscache_invalidate(cookie, new_size);
- fscache_update_aux(cookie, aux_data);
-
- if (!fscache_cookie_enabled(cookie) ||
- hlist_empty(&cookie->backing_objects))
- goto inconsistent;
- object = hlist_entry(cookie->backing_objects.first,
- struct fscache_object, cookie_link);
- if (test_bit(FSCACHE_IOERROR, &object->cache->flags))
- goto inconsistent;
-
- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
+ switch (cookie->state) {
+ case FSCACHE_COOKIE_STATE_INVALIDATING: /* is_still_valid will catch it */
+ default:
+ spin_unlock(&cookie->lock);
+ _leave(" [no %u]", cookie->state);
+ return;
- __fscache_use_cookie(cookie);
- if (fscache_submit_op(object, op) < 0)
- goto submit_failed;
+ case FSCACHE_COOKIE_STATE_LOOKING_UP:
+ case FSCACHE_COOKIE_STATE_CREATING:
+ spin_unlock(&cookie->lock);
+ _leave(" [look %x]", cookie->inval_counter);
+ return;
- /* the work queue now carries its own ref on the object */
- spin_unlock(&cookie->lock);
+ case FSCACHE_COOKIE_STATE_ACTIVE:
+ is_caching = fscache_begin_cookie_access(
+ cookie, fscache_access_invalidate_cookie);
+ if (is_caching)
+ __fscache_set_cookie_state(cookie, FSCACHE_COOKIE_STATE_INVALIDATING);
+ spin_unlock(&cookie->lock);
+ wake_up_cookie_state(cookie);
- ret = fscache_wait_for_operation_activation(object, op, NULL, NULL);
- if (ret == 0) {
- /* ask the cache to honour the operation */
- ret = object->cache->ops->check_consistency(op);
- fscache_op_complete(op, false);
- } else if (ret == -ENOBUFS) {
- ret = 0;
+ if (is_caching)
+ fscache_queue_cookie(cookie, fscache_cookie_get_inval_work);
+ _leave(" [inv]");
+ return;
}
-
- fscache_put_operation(op);
- _leave(" = %d", ret);
- return ret;
-
-submit_failed:
- wake_cookie = __fscache_unuse_cookie(cookie);
-inconsistent:
- spin_unlock(&cookie->lock);
- if (wake_cookie)
- __fscache_wake_unused_cookie(cookie);
- kfree(op);
- _leave(" = -ESTALE");
- return -ESTALE;
}
-EXPORT_SYMBOL(__fscache_check_consistency);
+EXPORT_SYMBOL(__fscache_invalidate);
/*
* Generate a list of extant cookies in /proc/fs/fscache/cookies
@@ -983,44 +1076,27 @@ static int fscache_cookies_seq_show(struct seq_file *m, void *v)
{
struct fscache_cookie *cookie;
unsigned int keylen = 0, auxlen = 0;
- char _type[3], *type;
u8 *p;
if (v == &fscache_cookies) {
seq_puts(m,
- "COOKIE PARENT USAGE CHILD ACT TY FL DEF NETFS_DATA\n"
- "======== ======== ===== ===== === == === ================ ==========\n"
+ "COOKIE VOLUME REF ACT ACC S FL DEF \n"
+ "======== ======== === === === = == ================\n"
);
return 0;
}
cookie = list_entry(v, struct fscache_cookie, proc_link);
- switch (cookie->type) {
- case 0:
- type = "IX";
- break;
- case 1:
- type = "DT";
- break;
- default:
- snprintf(_type, sizeof(_type), "%02u",
- cookie->type);
- type = _type;
- break;
- }
-
seq_printf(m,
- "%08x %08x %5u %5u %3u %s %03lx %-16s %px",
+ "%08x %08x %3d %3d %3d %c %02lx",
cookie->debug_id,
- cookie->parent ? cookie->parent->debug_id : 0,
+ cookie->volume->debug_id,
refcount_read(&cookie->ref),
- atomic_read(&cookie->n_children),
atomic_read(&cookie->n_active),
- type,
- cookie->flags,
- cookie->def->name,
- cookie->netfs_data);
+ atomic_read(&cookie->n_accesses),
+ fscache_cookie_states[cookie->state],
+ cookie->flags);
keylen = cookie->key_len;
auxlen = cookie->aux_len;
diff --git a/fs/fscache/fsdef.c b/fs/fscache/fsdef.c
deleted file mode 100644
index 0402673c680e..000000000000
--- a/fs/fscache/fsdef.c
+++ /dev/null
@@ -1,98 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/* Filesystem index definition
- *
- * Copyright (C) 2004-2007 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- */
-
-#define FSCACHE_DEBUG_LEVEL CACHE
-#include <linux/module.h>
-#include "internal.h"
-
-static
-enum fscache_checkaux fscache_fsdef_netfs_check_aux(void *cookie_netfs_data,
- const void *data,
- uint16_t datalen,
- loff_t object_size);
-
-/*
- * The root index is owned by FS-Cache itself.
- *
- * When a netfs requests caching facilities, FS-Cache will, if one doesn't
- * already exist, create an entry in the root index with the key being the name
- * of the netfs ("AFS" for example), and the auxiliary data holding the index
- * structure version supplied by the netfs:
- *
- * FSDEF
- * |
- * +-----------+
- * | |
- * NFS AFS
- * [v=1] [v=1]
- *
- * If an entry with the appropriate name does already exist, the version is
- * compared. If the version is different, the entire subtree from that entry
- * will be discarded and a new entry created.
- *
- * The new entry will be an index, and a cookie referring to it will be passed
- * to the netfs. This is then the root handle by which the netfs accesses the
- * cache. It can create whatever objects it likes in that index, including
- * further indices.
- */
-static struct fscache_cookie_def fscache_fsdef_index_def = {
- .name = ".FS-Cache",
- .type = FSCACHE_COOKIE_TYPE_INDEX,
-};
-
-struct fscache_cookie fscache_fsdef_index = {
- .debug_id = 1,
- .ref = REFCOUNT_INIT(1),
- .n_active = ATOMIC_INIT(1),
- .lock = __SPIN_LOCK_UNLOCKED(fscache_fsdef_index.lock),
- .backing_objects = HLIST_HEAD_INIT,
- .def = &fscache_fsdef_index_def,
- .flags = 1 << FSCACHE_COOKIE_ENABLED,
- .type = FSCACHE_COOKIE_TYPE_INDEX,
-};
-EXPORT_SYMBOL(fscache_fsdef_index);
-
-/*
- * Definition of an entry in the root index. Each entry is an index, keyed to
- * a specific netfs and only applicable to a particular version of the index
- * structure used by that netfs.
- */
-struct fscache_cookie_def fscache_fsdef_netfs_def = {
- .name = "FSDEF.netfs",
- .type = FSCACHE_COOKIE_TYPE_INDEX,
- .check_aux = fscache_fsdef_netfs_check_aux,
-};
-
-/*
- * check that the index structure version number stored in the auxiliary data
- * matches the one the netfs gave us
- */
-static enum fscache_checkaux fscache_fsdef_netfs_check_aux(
- void *cookie_netfs_data,
- const void *data,
- uint16_t datalen,
- loff_t object_size)
-{
- struct fscache_netfs *netfs = cookie_netfs_data;
- uint32_t version;
-
- _enter("{%s},,%hu", netfs->name, datalen);
-
- if (datalen != sizeof(version)) {
- _leave(" = OBSOLETE [dl=%d v=%zu]", datalen, sizeof(version));
- return FSCACHE_CHECKAUX_OBSOLETE;
- }
-
- memcpy(&version, data, sizeof(version));
- if (version != netfs->version) {
- _leave(" = OBSOLETE [ver=%x net=%x]", version, netfs->version);
- return FSCACHE_CHECKAUX_OBSOLETE;
- }
-
- _leave(" = OKAY");
- return FSCACHE_CHECKAUX_OKAY;
-}
diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
index c3e4804b8fcb..f121c21590dc 100644
--- a/fs/fscache/internal.h
+++ b/fs/fscache/internal.h
@@ -1,65 +1,69 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/* Internal definitions for FS-Cache
*
- * Copyright (C) 2004-2007 Red Hat, Inc. All Rights Reserved.
+ * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*/
-/*
- * Lock order, in the order in which multiple locks should be obtained:
- * - fscache_addremove_sem
- * - cookie->lock
- * - cookie->parent->lock
- * - cache->object_list_lock
- * - object->lock
- * - object->parent->lock
- * - cookie->stores_lock
- * - fscache_thread_lock
- *
- */
-
#ifdef pr_fmt
#undef pr_fmt
#endif
#define pr_fmt(fmt) "FS-Cache: " fmt
+#include <linux/slab.h>
#include <linux/fscache-cache.h>
#include <trace/events/fscache.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
-#define FSCACHE_MIN_THREADS 4
-#define FSCACHE_MAX_THREADS 32
-
/*
* cache.c
*/
-extern struct list_head fscache_cache_list;
-extern struct rw_semaphore fscache_addremove_sem;
+#ifdef CONFIG_PROC_FS
+extern const struct seq_operations fscache_caches_seq_ops;
+#endif
+bool fscache_begin_cache_access(struct fscache_cache *cache, enum fscache_access_trace why);
+void fscache_end_cache_access(struct fscache_cache *cache, enum fscache_access_trace why);
+struct fscache_cache *fscache_lookup_cache(const char *name, bool is_cache);
+void fscache_put_cache(struct fscache_cache *cache, enum fscache_cache_trace where);
+
+static inline enum fscache_cache_state fscache_cache_state(const struct fscache_cache *cache)
+{
+ return smp_load_acquire(&cache->state);
+}
+
+static inline bool fscache_cache_is_live(const struct fscache_cache *cache)
+{
+ return fscache_cache_state(cache) == FSCACHE_CACHE_IS_ACTIVE;
+}
-extern struct fscache_cache *fscache_select_cache_for_object(
- struct fscache_cookie *);
+static inline void fscache_set_cache_state(struct fscache_cache *cache,
+ enum fscache_cache_state new_state)
+{
+ smp_store_release(&cache->state, new_state);
+
+}
+
+static inline bool fscache_set_cache_state_maybe(struct fscache_cache *cache,
+ enum fscache_cache_state old_state,
+ enum fscache_cache_state new_state)
+{
+ return try_cmpxchg_release(&cache->state, &old_state, new_state);
+}
/*
* cookie.c
*/
extern struct kmem_cache *fscache_cookie_jar;
extern const struct seq_operations fscache_cookies_seq_ops;
+extern struct timer_list fscache_cookie_lru_timer;
+
+extern void fscache_print_cookie(struct fscache_cookie *cookie, char prefix);
+extern bool fscache_begin_cookie_access(struct fscache_cookie *cookie,
+ enum fscache_access_trace why);
-extern void fscache_free_cookie(struct fscache_cookie *);
-extern struct fscache_cookie *fscache_alloc_cookie(struct fscache_cookie *,
- const struct fscache_cookie_def *,
- const void *, size_t,
- const void *, size_t,
- void *, loff_t);
-extern struct fscache_cookie *fscache_hash_cookie(struct fscache_cookie *);
-extern struct fscache_cookie *fscache_cookie_get(struct fscache_cookie *,
- enum fscache_cookie_trace);
-extern void fscache_cookie_put(struct fscache_cookie *,
- enum fscache_cookie_trace);
-
-static inline void fscache_cookie_see(struct fscache_cookie *cookie,
+static inline void fscache_see_cookie(struct fscache_cookie *cookie,
enum fscache_cookie_trace where)
{
trace_fscache_cookie(cookie->debug_id, refcount_read(&cookie->ref),
@@ -67,60 +71,22 @@ static inline void fscache_cookie_see(struct fscache_cookie *cookie,
}
/*
- * fsdef.c
+ * io.c
*/
-extern struct fscache_cookie fscache_fsdef_index;
-extern struct fscache_cookie_def fscache_fsdef_netfs_def;
-
-/*
- * main.c
- */
-extern unsigned fscache_defer_lookup;
-extern unsigned fscache_defer_create;
-extern unsigned fscache_debug;
-extern struct kobject *fscache_root;
-extern struct workqueue_struct *fscache_object_wq;
-extern struct workqueue_struct *fscache_op_wq;
-DECLARE_PER_CPU(wait_queue_head_t, fscache_object_cong_wait);
-
-extern unsigned int fscache_hash(unsigned int salt, unsigned int *data, unsigned int n);
-
-static inline bool fscache_object_congested(void)
+static inline void fscache_end_operation(struct netfs_cache_resources *cres)
{
- return workqueue_congested(WORK_CPU_UNBOUND, fscache_object_wq);
+ const struct netfs_cache_ops *ops = fscache_operation_valid(cres);
+
+ if (ops)
+ ops->end_operation(cres);
}
/*
- * object.c
+ * main.c
*/
-extern void fscache_enqueue_object(struct fscache_object *);
+extern unsigned fscache_debug;
-/*
- * operation.c
- */
-extern int fscache_submit_exclusive_op(struct fscache_object *,
- struct fscache_operation *);
-extern int fscache_submit_op(struct fscache_object *,
- struct fscache_operation *);
-extern int fscache_cancel_op(struct fscache_operation *, bool);
-extern void fscache_cancel_all_ops(struct fscache_object *);
-extern void fscache_abort_object(struct fscache_object *);
-extern void fscache_start_operations(struct fscache_object *);
-extern void fscache_operation_gc(struct work_struct *);
-
-/*
- * page.c
- */
-extern int fscache_wait_for_deferred_lookup(struct fscache_cookie *);
-extern int fscache_wait_for_operation_activation(struct fscache_object *,
- struct fscache_operation *,
- atomic_t *,
- atomic_t *);
-extern void fscache_invalidate_writes(struct fscache_cookie *);
-struct fscache_retrieval *fscache_alloc_retrieval(struct fscache_cookie *cookie,
- struct address_space *mapping,
- fscache_rw_complete_t end_io_func,
- void *context);
+extern unsigned int fscache_hash(unsigned int salt, const void *data, size_t len);
/*
* proc.c
@@ -137,125 +103,27 @@ extern void fscache_proc_cleanup(void);
* stats.c
*/
#ifdef CONFIG_FSCACHE_STATS
-extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
-extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
-
-extern atomic_t fscache_n_op_pend;
-extern atomic_t fscache_n_op_run;
-extern atomic_t fscache_n_op_enqueue;
-extern atomic_t fscache_n_op_deferred_release;
-extern atomic_t fscache_n_op_initialised;
-extern atomic_t fscache_n_op_release;
-extern atomic_t fscache_n_op_gc;
-extern atomic_t fscache_n_op_cancelled;
-extern atomic_t fscache_n_op_rejected;
-
-extern atomic_t fscache_n_attr_changed;
-extern atomic_t fscache_n_attr_changed_ok;
-extern atomic_t fscache_n_attr_changed_nobufs;
-extern atomic_t fscache_n_attr_changed_nomem;
-extern atomic_t fscache_n_attr_changed_calls;
-
-extern atomic_t fscache_n_allocs;
-extern atomic_t fscache_n_allocs_ok;
-extern atomic_t fscache_n_allocs_wait;
-extern atomic_t fscache_n_allocs_nobufs;
-extern atomic_t fscache_n_allocs_intr;
-extern atomic_t fscache_n_allocs_object_dead;
-extern atomic_t fscache_n_alloc_ops;
-extern atomic_t fscache_n_alloc_op_waits;
-
-extern atomic_t fscache_n_retrievals;
-extern atomic_t fscache_n_retrievals_ok;
-extern atomic_t fscache_n_retrievals_wait;
-extern atomic_t fscache_n_retrievals_nodata;
-extern atomic_t fscache_n_retrievals_nobufs;
-extern atomic_t fscache_n_retrievals_intr;
-extern atomic_t fscache_n_retrievals_nomem;
-extern atomic_t fscache_n_retrievals_object_dead;
-extern atomic_t fscache_n_retrieval_ops;
-extern atomic_t fscache_n_retrieval_op_waits;
-
-extern atomic_t fscache_n_stores;
-extern atomic_t fscache_n_stores_ok;
-extern atomic_t fscache_n_stores_again;
-extern atomic_t fscache_n_stores_nobufs;
-extern atomic_t fscache_n_stores_oom;
-extern atomic_t fscache_n_store_ops;
-extern atomic_t fscache_n_store_calls;
-extern atomic_t fscache_n_store_pages;
-extern atomic_t fscache_n_store_radix_deletes;
-extern atomic_t fscache_n_store_pages_over_limit;
-
-extern atomic_t fscache_n_store_vmscan_not_storing;
-extern atomic_t fscache_n_store_vmscan_gone;
-extern atomic_t fscache_n_store_vmscan_busy;
-extern atomic_t fscache_n_store_vmscan_cancelled;
-extern atomic_t fscache_n_store_vmscan_wait;
-
-extern atomic_t fscache_n_marks;
-extern atomic_t fscache_n_uncaches;
+extern atomic_t fscache_n_volumes;
+extern atomic_t fscache_n_volumes_collision;
+extern atomic_t fscache_n_volumes_nomem;
+extern atomic_t fscache_n_cookies;
+extern atomic_t fscache_n_cookies_lru;
+extern atomic_t fscache_n_cookies_lru_expired;
+extern atomic_t fscache_n_cookies_lru_removed;
+extern atomic_t fscache_n_cookies_lru_dropped;
extern atomic_t fscache_n_acquires;
-extern atomic_t fscache_n_acquires_null;
-extern atomic_t fscache_n_acquires_no_cache;
extern atomic_t fscache_n_acquires_ok;
-extern atomic_t fscache_n_acquires_nobufs;
extern atomic_t fscache_n_acquires_oom;
extern atomic_t fscache_n_invalidates;
-extern atomic_t fscache_n_invalidates_run;
-
-extern atomic_t fscache_n_updates;
-extern atomic_t fscache_n_updates_null;
-extern atomic_t fscache_n_updates_run;
extern atomic_t fscache_n_relinquishes;
-extern atomic_t fscache_n_relinquishes_null;
-extern atomic_t fscache_n_relinquishes_waitcrt;
extern atomic_t fscache_n_relinquishes_retire;
+extern atomic_t fscache_n_relinquishes_dropped;
-extern atomic_t fscache_n_cookie_index;
-extern atomic_t fscache_n_cookie_data;
-extern atomic_t fscache_n_cookie_special;
-
-extern atomic_t fscache_n_object_alloc;
-extern atomic_t fscache_n_object_no_alloc;
-extern atomic_t fscache_n_object_lookups;
-extern atomic_t fscache_n_object_lookups_negative;
-extern atomic_t fscache_n_object_lookups_positive;
-extern atomic_t fscache_n_object_lookups_timed_out;
-extern atomic_t fscache_n_object_created;
-extern atomic_t fscache_n_object_avail;
-extern atomic_t fscache_n_object_dead;
-
-extern atomic_t fscache_n_checkaux_none;
-extern atomic_t fscache_n_checkaux_okay;
-extern atomic_t fscache_n_checkaux_update;
-extern atomic_t fscache_n_checkaux_obsolete;
-
-extern atomic_t fscache_n_cop_alloc_object;
-extern atomic_t fscache_n_cop_lookup_object;
-extern atomic_t fscache_n_cop_lookup_complete;
-extern atomic_t fscache_n_cop_grab_object;
-extern atomic_t fscache_n_cop_invalidate_object;
-extern atomic_t fscache_n_cop_update_object;
-extern atomic_t fscache_n_cop_drop_object;
-extern atomic_t fscache_n_cop_put_object;
-extern atomic_t fscache_n_cop_sync_cache;
-extern atomic_t fscache_n_cop_attr_changed;
-extern atomic_t fscache_n_cop_read_or_alloc_page;
-extern atomic_t fscache_n_cop_read_or_alloc_pages;
-extern atomic_t fscache_n_cop_allocate_page;
-extern atomic_t fscache_n_cop_allocate_pages;
-extern atomic_t fscache_n_cop_write_page;
-extern atomic_t fscache_n_cop_uncache_page;
-extern atomic_t fscache_n_cop_dissociate_pages;
-
-extern atomic_t fscache_n_cache_no_space_reject;
-extern atomic_t fscache_n_cache_stale_objects;
-extern atomic_t fscache_n_cache_retired_objects;
-extern atomic_t fscache_n_cache_culled_objects;
+extern atomic_t fscache_n_resizes;
+extern atomic_t fscache_n_resizes_null;
static inline void fscache_stat(atomic_t *stat)
{
@@ -278,71 +146,26 @@ int fscache_stats_show(struct seq_file *m, void *v);
#endif
/*
- * raise an event on an object
- * - if the event is not masked for that object, then the object is
- * queued for attention by the thread pool.
- */
-static inline void fscache_raise_event(struct fscache_object *object,
- unsigned event)
-{
- BUG_ON(event >= NR_FSCACHE_OBJECT_EVENTS);
-#if 0
- printk("*** fscache_raise_event(OBJ%d{%lx},%x)\n",
- object->debug_id, object->event_mask, (1 << event));
-#endif
- if (!test_and_set_bit(event, &object->events) &&
- test_bit(event, &object->event_mask))
- fscache_enqueue_object(object);
-}
-
-/*
- * get an extra reference to a netfs retrieval context
+ * volume.c
*/
-static inline
-void *fscache_get_context(struct fscache_cookie *cookie, void *context)
-{
- if (cookie->def->get_context)
- cookie->def->get_context(cookie->netfs_data, context);
- return context;
-}
+extern const struct seq_operations fscache_volumes_seq_ops;
-/*
- * release a reference to a netfs retrieval context
- */
-static inline
-void fscache_put_context(struct fscache_cookie *cookie, void *context)
-{
- if (cookie->def->put_context)
- cookie->def->put_context(cookie->netfs_data, context);
-}
+struct fscache_volume *fscache_get_volume(struct fscache_volume *volume,
+ enum fscache_volume_trace where);
+void fscache_put_volume(struct fscache_volume *volume,
+ enum fscache_volume_trace where);
+bool fscache_begin_volume_access(struct fscache_volume *volume,
+ struct fscache_cookie *cookie,
+ enum fscache_access_trace why);
+void fscache_create_volume(struct fscache_volume *volume, bool wait);
-/*
- * Update the auxiliary data on a cookie.
- */
-static inline
-void fscache_update_aux(struct fscache_cookie *cookie, const void *aux_data)
-{
- void *p;
-
- if (!aux_data)
- return;
- if (cookie->aux_len <= sizeof(cookie->inline_aux))
- p = cookie->inline_aux;
- else
- p = cookie->aux;
-
- if (memcmp(p, aux_data, cookie->aux_len) != 0) {
- memcpy(p, aux_data, cookie->aux_len);
- set_bit(FSCACHE_COOKIE_AUX_UPDATED, &cookie->flags);
- }
-}
/*****************************************************************************/
/*
* debug tracing
*/
#define dbgprintk(FMT, ...) \
- printk(KERN_DEBUG "[%-6.6s] "FMT"\n", current->comm, ##__VA_ARGS__)
+ printk("[%-6.6s] "FMT"\n", current->comm, ##__VA_ARGS__)
#define kenter(FMT, ...) dbgprintk("==> %s("FMT")", __func__, ##__VA_ARGS__)
#define kleave(FMT, ...) dbgprintk("<== %s()"FMT"", __func__, ##__VA_ARGS__)
@@ -395,7 +218,7 @@ do { \
#define FSCACHE_DEBUG_CACHE 0
#define FSCACHE_DEBUG_COOKIE 1
-#define FSCACHE_DEBUG_PAGE 2
+#define FSCACHE_DEBUG_OBJECT 2
#define FSCACHE_DEBUG_OPERATION 3
#define FSCACHE_POINT_ENTER 1
diff --git a/fs/fscache/io.c b/fs/fscache/io.c
index 8ecc1141802f..7a769ea57720 100644
--- a/fs/fscache/io.c
+++ b/fs/fscache/io.c
@@ -4,113 +4,323 @@
* Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*/
-
-#define FSCACHE_DEBUG_LEVEL PAGE
-#include <linux/module.h>
-#define FSCACHE_USE_NEW_IO_API
+#define FSCACHE_DEBUG_LEVEL OPERATION
#include <linux/fscache-cache.h>
+#include <linux/uio.h>
+#include <linux/bvec.h>
#include <linux/slab.h>
-#include <linux/netfs.h>
+#include <linux/uio.h>
#include "internal.h"
-/*
- * Start a cache read operation.
- * - we return:
- * -ENOMEM - out of memory, some pages may be being read
- * -ERESTARTSYS - interrupted, some pages may be being read
- * -ENOBUFS - no backing object or space available in which to cache any
- * pages not being read
- * -ENODATA - no data available in the backing object for some or all of
- * the pages
- * 0 - dispatched a read on all pages
+/**
+ * fscache_wait_for_operation - Wait for an object become accessible
+ * @cres: The cache resources for the operation being performed
+ * @want_state: The minimum state the object must be at
+ *
+ * See if the target cache object is at the specified minimum state of
+ * accessibility yet, and if not, wait for it.
*/
-int __fscache_begin_read_operation(struct netfs_read_request *rreq,
- struct fscache_cookie *cookie)
+bool fscache_wait_for_operation(struct netfs_cache_resources *cres,
+ enum fscache_want_state want_state)
{
- struct fscache_retrieval *op;
- struct fscache_object *object;
- bool wake_cookie = false;
- int ret;
+ struct fscache_cookie *cookie = fscache_cres_cookie(cres);
+ enum fscache_cookie_state state;
- _enter("rr=%08x", rreq->debug_id);
+again:
+ if (!fscache_cache_is_live(cookie->volume->cache)) {
+ _leave(" [broken]");
+ return false;
+ }
- fscache_stat(&fscache_n_retrievals);
+ state = fscache_cookie_state(cookie);
+ _enter("c=%08x{%u},%x", cookie->debug_id, state, want_state);
- if (hlist_empty(&cookie->backing_objects))
- goto nobufs;
+ switch (state) {
+ case FSCACHE_COOKIE_STATE_CREATING:
+ case FSCACHE_COOKIE_STATE_INVALIDATING:
+ if (want_state == FSCACHE_WANT_PARAMS)
+ goto ready; /* There can be no content */
+ fallthrough;
+ case FSCACHE_COOKIE_STATE_LOOKING_UP:
+ case FSCACHE_COOKIE_STATE_LRU_DISCARDING:
+ wait_var_event(&cookie->state,
+ fscache_cookie_state(cookie) != state);
+ goto again;
- if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
- _leave(" = -ENOBUFS [invalidating]");
- return -ENOBUFS;
+ case FSCACHE_COOKIE_STATE_ACTIVE:
+ goto ready;
+ case FSCACHE_COOKIE_STATE_DROPPED:
+ case FSCACHE_COOKIE_STATE_RELINQUISHING:
+ default:
+ _leave(" [not live]");
+ return false;
}
- ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
+ready:
+ if (!cres->cache_priv2)
+ return cookie->volume->cache->ops->begin_operation(cres, want_state);
+ return true;
+}
+EXPORT_SYMBOL(fscache_wait_for_operation);
+
+/*
+ * Begin an I/O operation on the cache, waiting till we reach the right state.
+ *
+ * Attaches the resources required to the operation resources record.
+ */
+static int fscache_begin_operation(struct netfs_cache_resources *cres,
+ struct fscache_cookie *cookie,
+ enum fscache_want_state want_state,
+ enum fscache_access_trace why)
+{
+ enum fscache_cookie_state state;
+ long timeo;
+ bool once_only = false;
- if (fscache_wait_for_deferred_lookup(cookie) < 0)
- return -ERESTARTSYS;
+ cres->ops = NULL;
+ cres->cache_priv = cookie;
+ cres->cache_priv2 = NULL;
+ cres->debug_id = cookie->debug_id;
+ cres->inval_counter = cookie->inval_counter;
- op = fscache_alloc_retrieval(cookie, NULL, NULL, NULL);
- if (!op)
- return -ENOMEM;
- trace_fscache_page_op(cookie, NULL, &op->op, fscache_page_op_retr_multi);
+ if (!fscache_begin_cookie_access(cookie, why))
+ return -ENOBUFS;
+again:
spin_lock(&cookie->lock);
- if (!fscache_cookie_enabled(cookie) ||
- hlist_empty(&cookie->backing_objects))
- goto nobufs_unlock;
- object = hlist_entry(cookie->backing_objects.first,
- struct fscache_object, cookie_link);
+ state = fscache_cookie_state(cookie);
+ _enter("c=%08x{%u},%x", cookie->debug_id, state, want_state);
- __fscache_use_cookie(cookie);
- atomic_inc(&object->n_reads);
- __set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags);
+ switch (state) {
+ case FSCACHE_COOKIE_STATE_LOOKING_UP:
+ case FSCACHE_COOKIE_STATE_LRU_DISCARDING:
+ case FSCACHE_COOKIE_STATE_INVALIDATING:
+ goto wait_for_file_wrangling;
+ case FSCACHE_COOKIE_STATE_CREATING:
+ if (want_state == FSCACHE_WANT_PARAMS)
+ goto ready; /* There can be no content */
+ goto wait_for_file_wrangling;
+ case FSCACHE_COOKIE_STATE_ACTIVE:
+ goto ready;
+ case FSCACHE_COOKIE_STATE_DROPPED:
+ case FSCACHE_COOKIE_STATE_RELINQUISHING:
+ WARN(1, "Can't use cookie in state %u\n", cookie->state);
+ goto not_live;
+ default:
+ goto not_live;
+ }
- if (fscache_submit_op(object, &op->op) < 0)
- goto nobufs_unlock_dec;
+ready:
spin_unlock(&cookie->lock);
+ if (!cookie->volume->cache->ops->begin_operation(cres, want_state))
+ goto failed;
+ return 0;
- fscache_stat(&fscache_n_retrieval_ops);
+wait_for_file_wrangling:
+ spin_unlock(&cookie->lock);
+ trace_fscache_access(cookie->debug_id, refcount_read(&cookie->ref),
+ atomic_read(&cookie->n_accesses),
+ fscache_access_io_wait);
+ timeo = wait_var_event_timeout(&cookie->state,
+ fscache_cookie_state(cookie) != state, 20 * HZ);
+ if (timeo <= 1 && !once_only) {
+ pr_warn("%s: cookie state change wait timed out: cookie->state=%u state=%u",
+ __func__, fscache_cookie_state(cookie), state);
+ fscache_print_cookie(cookie, 'O');
+ once_only = true;
+ }
+ goto again;
- /* we wait for the operation to become active, and then process it
- * *here*, in this thread, and not in the thread pool */
- ret = fscache_wait_for_operation_activation(
- object, &op->op,
- __fscache_stat(&fscache_n_retrieval_op_waits),
- __fscache_stat(&fscache_n_retrievals_object_dead));
- if (ret < 0)
- goto error;
-
- /* ask the cache to honour the operation */
- ret = object->cache->ops->begin_read_operation(rreq, op);
-
-error:
- if (ret == -ENOMEM)
- fscache_stat(&fscache_n_retrievals_nomem);
- else if (ret == -ERESTARTSYS)
- fscache_stat(&fscache_n_retrievals_intr);
- else if (ret == -ENODATA)
- fscache_stat(&fscache_n_retrievals_nodata);
- else if (ret < 0)
- fscache_stat(&fscache_n_retrievals_nobufs);
- else
- fscache_stat(&fscache_n_retrievals_ok);
-
- fscache_put_retrieval(op);
- _leave(" = %d", ret);
- return ret;
-
-nobufs_unlock_dec:
- atomic_dec(&object->n_reads);
- wake_cookie = __fscache_unuse_cookie(cookie);
-nobufs_unlock:
+not_live:
spin_unlock(&cookie->lock);
- fscache_put_retrieval(op);
- if (wake_cookie)
- __fscache_wake_unused_cookie(cookie);
-nobufs:
- fscache_stat(&fscache_n_retrievals_nobufs);
+failed:
+ cres->cache_priv = NULL;
+ cres->ops = NULL;
+ fscache_end_cookie_access(cookie, fscache_access_io_not_live);
_leave(" = -ENOBUFS");
return -ENOBUFS;
}
+
+int __fscache_begin_read_operation(struct netfs_cache_resources *cres,
+ struct fscache_cookie *cookie)
+{
+ return fscache_begin_operation(cres, cookie, FSCACHE_WANT_PARAMS,
+ fscache_access_io_read);
+}
EXPORT_SYMBOL(__fscache_begin_read_operation);
+
+int __fscache_begin_write_operation(struct netfs_cache_resources *cres,
+ struct fscache_cookie *cookie)
+{
+ return fscache_begin_operation(cres, cookie, FSCACHE_WANT_PARAMS,
+ fscache_access_io_write);
+}
+EXPORT_SYMBOL(__fscache_begin_write_operation);
+
+/**
+ * fscache_set_page_dirty - Mark page dirty and pin a cache object for writeback
+ * @page: The page being dirtied
+ * @cookie: The cookie referring to the cache object
+ *
+ * Set the dirty flag on a page and pin an in-use cache object in memory when
+ * dirtying a page so that writeback can later write to it. This is intended
+ * to be called from the filesystem's ->set_page_dirty() method.
+ *
+ * Returns 1 if PG_dirty was set on the page, 0 otherwise.
+ */
+int fscache_set_page_dirty(struct page *page, struct fscache_cookie *cookie)
+{
+ struct inode *inode = page->mapping->host;
+ bool need_use = false;
+
+ _enter("");
+
+ if (!__set_page_dirty_nobuffers(page))
+ return 0;
+ if (!fscache_cookie_valid(cookie))
+ return 1;
+
+ if (!(inode->i_state & I_PINNING_FSCACHE_WB)) {
+ spin_lock(&inode->i_lock);
+ if (!(inode->i_state & I_PINNING_FSCACHE_WB)) {
+ inode->i_state |= I_PINNING_FSCACHE_WB;
+ need_use = true;
+ }
+ spin_unlock(&inode->i_lock);
+
+ if (need_use)
+ fscache_use_cookie(cookie, true);
+ }
+ return 1;
+}
+EXPORT_SYMBOL(fscache_set_page_dirty);
+
+struct fscache_write_request {
+ struct netfs_cache_resources cache_resources;
+ struct address_space *mapping;
+ loff_t start;
+ size_t len;
+ bool set_bits;
+ netfs_io_terminated_t term_func;
+ void *term_func_priv;
+};
+
+void __fscache_clear_page_bits(struct address_space *mapping,
+ loff_t start, size_t len)
+{
+ pgoff_t first = start / PAGE_SIZE;
+ pgoff_t last = (start + len - 1) / PAGE_SIZE;
+ struct page *page;
+
+ if (len) {
+ XA_STATE(xas, &mapping->i_pages, first);
+
+ rcu_read_lock();
+ xas_for_each(&xas, page, last) {
+ end_page_fscache(page);
+ }
+ rcu_read_unlock();
+ }
+}
+EXPORT_SYMBOL(__fscache_clear_page_bits);
+
+/*
+ * Deal with the completion of writing the data to the cache.
+ */
+static void fscache_wreq_done(void *priv, ssize_t transferred_or_error,
+ bool was_async)
+{
+ struct fscache_write_request *wreq = priv;
+
+ fscache_clear_page_bits(fscache_cres_cookie(&wreq->cache_resources),
+ wreq->mapping, wreq->start, wreq->len,
+ wreq->set_bits);
+
+ if (wreq->term_func)
+ wreq->term_func(wreq->term_func_priv, transferred_or_error,
+ was_async);
+ fscache_end_operation(&wreq->cache_resources);
+ kfree(wreq);
+}
+
+void __fscache_write_to_cache(struct fscache_cookie *cookie,
+ struct address_space *mapping,
+ loff_t start, size_t len, loff_t i_size,
+ netfs_io_terminated_t term_func,
+ void *term_func_priv,
+ bool cond)
+{
+ struct fscache_write_request *wreq;
+ struct netfs_cache_resources *cres;
+ struct iov_iter iter;
+ int ret = -ENOBUFS;
+
+ if (len == 0)
+ goto abandon;
+
+ _enter("%llx,%zx", start, len);
+
+ wreq = kzalloc(sizeof(struct fscache_write_request), GFP_NOFS);
+ if (!wreq)
+ goto abandon;
+ wreq->mapping = mapping;
+ wreq->start = start;
+ wreq->len = len;
+ wreq->set_bits = cond;
+ wreq->term_func = term_func;
+ wreq->term_func_priv = term_func_priv;
+
+ cres = &wreq->cache_resources;
+ if (fscache_begin_operation(cres, cookie, FSCACHE_WANT_WRITE,
+ fscache_access_io_write) < 0)
+ goto abandon_free;
+
+ ret = cres->ops->prepare_write(cres, &start, &len, i_size, false);
+ if (ret < 0)
+ goto abandon_end;
+
+ /* TODO: Consider clearing page bits now for space the write isn't
+ * covering. This is more complicated than it appears when THPs are
+ * taken into account.
+ */
+
+ iov_iter_xarray(&iter, WRITE, &mapping->i_pages, start, len);
+ fscache_write(cres, start, &iter, fscache_wreq_done, wreq);
+ return;
+
+abandon_end:
+ return fscache_wreq_done(wreq, ret, false);
+abandon_free:
+ kfree(wreq);
+abandon:
+ fscache_clear_page_bits(cookie, mapping, start, len, cond);
+ if (term_func)
+ term_func(term_func_priv, ret, false);
+}
+EXPORT_SYMBOL(__fscache_write_to_cache);
+
+/*
+ * Change the size of a backing object.
+ */
+void __fscache_resize_cookie(struct fscache_cookie *cookie, loff_t new_size)
+{
+ struct netfs_cache_resources cres;
+
+ trace_fscache_resize(cookie, new_size);
+ if (fscache_begin_operation(&cres, cookie, FSCACHE_WANT_WRITE,
+ fscache_access_io_resize) == 0) {
+ fscache_stat(&fscache_n_resizes);
+ set_bit(FSCACHE_COOKIE_NEEDS_UPDATE, &cookie->flags);
+
+ /* We cannot defer a resize as we need to do it inside the
+ * netfs's inode lock so that we're serialised with respect to
+ * writes.
+ */
+ cookie->volume->cache->ops->resize_cookie(&cres, new_size);
+ fscache_end_operation(&cres);
+ } else {
+ fscache_stat(&fscache_n_resizes_null);
+ }
+}
+EXPORT_SYMBOL(__fscache_resize_cookie);
diff --git a/fs/fscache/main.c b/fs/fscache/main.c
index 4207f98e405f..dad85fd84f6f 100644
--- a/fs/fscache/main.c
+++ b/fs/fscache/main.c
@@ -1,17 +1,13 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/* General filesystem local caching manager
*
- * Copyright (C) 2004-2007 Red Hat, Inc. All Rights Reserved.
+ * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*/
#define FSCACHE_DEBUG_LEVEL CACHE
#include <linux/module.h>
#include <linux/init.h>
-#include <linux/sched.h>
-#include <linux/completion.h>
-#include <linux/slab.h>
-#include <linux/seq_file.h>
#define CREATE_TRACE_POINTS
#include "internal.h"
@@ -19,79 +15,18 @@ MODULE_DESCRIPTION("FS Cache Manager");
MODULE_AUTHOR("Red Hat, Inc.");
MODULE_LICENSE("GPL");
-unsigned fscache_defer_lookup = 1;
-module_param_named(defer_lookup, fscache_defer_lookup, uint,
- S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(fscache_defer_lookup,
- "Defer cookie lookup to background thread");
-
-unsigned fscache_defer_create = 1;
-module_param_named(defer_create, fscache_defer_create, uint,
- S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(fscache_defer_create,
- "Defer cookie creation to background thread");
-
unsigned fscache_debug;
module_param_named(debug, fscache_debug, uint,
S_IWUSR | S_IRUGO);
MODULE_PARM_DESC(fscache_debug,
"FS-Cache debugging mask");
-struct kobject *fscache_root;
-struct workqueue_struct *fscache_object_wq;
-struct workqueue_struct *fscache_op_wq;
-
-DEFINE_PER_CPU(wait_queue_head_t, fscache_object_cong_wait);
+EXPORT_TRACEPOINT_SYMBOL(fscache_access_cache);
+EXPORT_TRACEPOINT_SYMBOL(fscache_access_volume);
+EXPORT_TRACEPOINT_SYMBOL(fscache_access);
-/* these values serve as lower bounds, will be adjusted in fscache_init() */
-static unsigned fscache_object_max_active = 4;
-static unsigned fscache_op_max_active = 2;
-
-#ifdef CONFIG_SYSCTL
-static struct ctl_table_header *fscache_sysctl_header;
-
-static int fscache_max_active_sysctl(struct ctl_table *table, int write,
- void *buffer, size_t *lenp, loff_t *ppos)
-{
- struct workqueue_struct **wqp = table->extra1;
- unsigned int *datap = table->data;
- int ret;
-
- ret = proc_dointvec(table, write, buffer, lenp, ppos);
- if (ret == 0)
- workqueue_set_max_active(*wqp, *datap);
- return ret;
-}
-
-static struct ctl_table fscache_sysctls[] = {
- {
- .procname = "object_max_active",
- .data = &fscache_object_max_active,
- .maxlen = sizeof(unsigned),
- .mode = 0644,
- .proc_handler = fscache_max_active_sysctl,
- .extra1 = &fscache_object_wq,
- },
- {
- .procname = "operation_max_active",
- .data = &fscache_op_max_active,
- .maxlen = sizeof(unsigned),
- .mode = 0644,
- .proc_handler = fscache_max_active_sysctl,
- .extra1 = &fscache_op_wq,
- },
- {}
-};
-
-static struct ctl_table fscache_sysctls_root[] = {
- {
- .procname = "fscache",
- .mode = 0555,
- .child = fscache_sysctls,
- },
- {}
-};
-#endif
+struct workqueue_struct *fscache_wq;
+EXPORT_SYMBOL(fscache_wq);
/*
* Mixing scores (in bits) for (7,20):
@@ -118,15 +53,16 @@ static inline unsigned int fold_hash(unsigned long x, unsigned long y)
/*
* Generate a hash. This is derived from full_name_hash(), but we want to be
* sure it is arch independent and that it doesn't change as bits of the
- * computed hash value might appear on disk. The caller also guarantees that
- * the hashed data will be a series of aligned 32-bit words.
+ * computed hash value might appear on disk. The caller must guarantee that
+ * the source data is a multiple of four bytes in size.
*/
-unsigned int fscache_hash(unsigned int salt, unsigned int *data, unsigned int n)
+unsigned int fscache_hash(unsigned int salt, const void *data, size_t len)
{
- unsigned int a, x = 0, y = salt;
+ const __le32 *p = data;
+ unsigned int a, x = 0, y = salt, n = len / sizeof(__le32);
for (; n; n--) {
- a = *data++;
+ a = le32_to_cpu(*p++);
HASH_MIX(x, y, a);
}
return fold_hash(x, y);
@@ -137,44 +73,16 @@ unsigned int fscache_hash(unsigned int salt, unsigned int *data, unsigned int n)
*/
static int __init fscache_init(void)
{
- unsigned int nr_cpus = num_possible_cpus();
- unsigned int cpu;
- int ret;
-
- fscache_object_max_active =
- clamp_val(nr_cpus,
- fscache_object_max_active, WQ_UNBOUND_MAX_ACTIVE);
-
- ret = -ENOMEM;
- fscache_object_wq = alloc_workqueue("fscache_object", WQ_UNBOUND,
- fscache_object_max_active);
- if (!fscache_object_wq)
- goto error_object_wq;
-
- fscache_op_max_active =
- clamp_val(fscache_object_max_active / 2,
- fscache_op_max_active, WQ_UNBOUND_MAX_ACTIVE);
+ int ret = -ENOMEM;
- ret = -ENOMEM;
- fscache_op_wq = alloc_workqueue("fscache_operation", WQ_UNBOUND,
- fscache_op_max_active);
- if (!fscache_op_wq)
- goto error_op_wq;
-
- for_each_possible_cpu(cpu)
- init_waitqueue_head(&per_cpu(fscache_object_cong_wait, cpu));
+ fscache_wq = alloc_workqueue("fscache", WQ_UNBOUND | WQ_FREEZABLE, 0);
+ if (!fscache_wq)
+ goto error_wq;
ret = fscache_proc_init();
if (ret < 0)
goto error_proc;
-#ifdef CONFIG_SYSCTL
- ret = -ENOMEM;
- fscache_sysctl_header = register_sysctl_table(fscache_sysctls_root);
- if (!fscache_sysctl_header)
- goto error_sysctl;
-#endif
-
fscache_cookie_jar = kmem_cache_create("fscache_cookie_jar",
sizeof(struct fscache_cookie),
0, 0, NULL);
@@ -184,26 +92,14 @@ static int __init fscache_init(void)
goto error_cookie_jar;
}
- fscache_root = kobject_create_and_add("fscache", kernel_kobj);
- if (!fscache_root)
- goto error_kobj;
-
pr_notice("Loaded\n");
return 0;
-error_kobj:
- kmem_cache_destroy(fscache_cookie_jar);
error_cookie_jar:
-#ifdef CONFIG_SYSCTL
- unregister_sysctl_table(fscache_sysctl_header);
-error_sysctl:
-#endif
fscache_proc_cleanup();
error_proc:
- destroy_workqueue(fscache_op_wq);
-error_op_wq:
- destroy_workqueue(fscache_object_wq);
-error_object_wq:
+ destroy_workqueue(fscache_wq);
+error_wq:
return ret;
}
@@ -216,14 +112,9 @@ static void __exit fscache_exit(void)
{
_enter("");
- kobject_put(fscache_root);
kmem_cache_destroy(fscache_cookie_jar);
-#ifdef CONFIG_SYSCTL
- unregister_sysctl_table(fscache_sysctl_header);
-#endif
fscache_proc_cleanup();
- destroy_workqueue(fscache_op_wq);
- destroy_workqueue(fscache_object_wq);
+ destroy_workqueue(fscache_wq);
pr_notice("Unloaded\n");
}
diff --git a/fs/fscache/netfs.c b/fs/fscache/netfs.c
deleted file mode 100644
index d6bdb7b5e723..000000000000
--- a/fs/fscache/netfs.c
+++ /dev/null
@@ -1,74 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/* FS-Cache netfs (client) registration
- *
- * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- */
-
-#define FSCACHE_DEBUG_LEVEL COOKIE
-#include <linux/module.h>
-#include <linux/slab.h>
-#include "internal.h"
-
-/*
- * register a network filesystem for caching
- */
-int __fscache_register_netfs(struct fscache_netfs *netfs)
-{
- struct fscache_cookie *candidate, *cookie;
-
- _enter("{%s}", netfs->name);
-
- /* allocate a cookie for the primary index */
- candidate = fscache_alloc_cookie(&fscache_fsdef_index,
- &fscache_fsdef_netfs_def,
- netfs->name, strlen(netfs->name),
- &netfs->version, sizeof(netfs->version),
- netfs, 0);
- if (!candidate) {
- _leave(" = -ENOMEM");
- return -ENOMEM;
- }
-
- candidate->flags = 1 << FSCACHE_COOKIE_ENABLED;
-
- /* check the netfs type is not already present */
- cookie = fscache_hash_cookie(candidate);
- if (!cookie)
- goto already_registered;
- if (cookie != candidate) {
- trace_fscache_cookie(candidate->debug_id, 1, fscache_cookie_discard);
- fscache_free_cookie(candidate);
- }
-
- fscache_cookie_get(cookie->parent, fscache_cookie_get_register_netfs);
- atomic_inc(&cookie->parent->n_children);
-
- netfs->primary_index = cookie;
-
- pr_notice("Netfs '%s' registered for caching\n", netfs->name);
- trace_fscache_netfs(netfs);
- _leave(" = 0");
- return 0;
-
-already_registered:
- fscache_cookie_put(candidate, fscache_cookie_put_dup_netfs);
- _leave(" = -EEXIST");
- return -EEXIST;
-}
-EXPORT_SYMBOL(__fscache_register_netfs);
-
-/*
- * unregister a network filesystem from the cache
- * - all cookies must have been released first
- */
-void __fscache_unregister_netfs(struct fscache_netfs *netfs)
-{
- _enter("{%s.%u}", netfs->name, netfs->version);
-
- fscache_relinquish_cookie(netfs->primary_index, NULL, false);
- pr_notice("Netfs '%s' unregistered from caching\n", netfs->name);
-
- _leave("");
-}
-EXPORT_SYMBOL(__fscache_unregister_netfs);
diff --git a/fs/fscache/object.c b/fs/fscache/object.c
deleted file mode 100644
index 6a675652129b..000000000000
--- a/fs/fscache/object.c
+++ /dev/null
@@ -1,1125 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/* FS-Cache object state machine handler
- *
- * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * See Documentation/filesystems/caching/object.rst for a description of the
- * object state machine and the in-kernel representations.
- */
-
-#define FSCACHE_DEBUG_LEVEL COOKIE
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/prefetch.h>
-#include "internal.h"
-
-static const struct fscache_state *fscache_abort_initialisation(struct fscache_object *, int);
-static const struct fscache_state *fscache_kill_dependents(struct fscache_object *, int);
-static const struct fscache_state *fscache_drop_object(struct fscache_object *, int);
-static const struct fscache_state *fscache_initialise_object(struct fscache_object *, int);
-static const struct fscache_state *fscache_invalidate_object(struct fscache_object *, int);
-static const struct fscache_state *fscache_jumpstart_dependents(struct fscache_object *, int);
-static const struct fscache_state *fscache_kill_object(struct fscache_object *, int);
-static const struct fscache_state *fscache_lookup_failure(struct fscache_object *, int);
-static const struct fscache_state *fscache_look_up_object(struct fscache_object *, int);
-static const struct fscache_state *fscache_object_available(struct fscache_object *, int);
-static const struct fscache_state *fscache_parent_ready(struct fscache_object *, int);
-static const struct fscache_state *fscache_update_object(struct fscache_object *, int);
-static const struct fscache_state *fscache_object_dead(struct fscache_object *, int);
-
-#define __STATE_NAME(n) fscache_osm_##n
-#define STATE(n) (&__STATE_NAME(n))
-
-/*
- * Define a work state. Work states are execution states. No event processing
- * is performed by them. The function attached to a work state returns a
- * pointer indicating the next state to which the state machine should
- * transition. Returning NO_TRANSIT repeats the current state, but goes back
- * to the scheduler first.
- */
-#define WORK_STATE(n, sn, f) \
- const struct fscache_state __STATE_NAME(n) = { \
- .name = #n, \
- .short_name = sn, \
- .work = f \
- }
-
-/*
- * Returns from work states.
- */
-#define transit_to(state) ({ prefetch(&STATE(state)->work); STATE(state); })
-
-#define NO_TRANSIT ((struct fscache_state *)NULL)
-
-/*
- * Define a wait state. Wait states are event processing states. No execution
- * is performed by them. Wait states are just tables of "if event X occurs,
- * clear it and transition to state Y". The dispatcher returns to the
- * scheduler if none of the events in which the wait state has an interest are
- * currently pending.
- */
-#define WAIT_STATE(n, sn, ...) \
- const struct fscache_state __STATE_NAME(n) = { \
- .name = #n, \
- .short_name = sn, \
- .work = NULL, \
- .transitions = { __VA_ARGS__, { 0, NULL } } \
- }
-
-#define TRANSIT_TO(state, emask) \
- { .events = (emask), .transit_to = STATE(state) }
-
-/*
- * The object state machine.
- */
-static WORK_STATE(INIT_OBJECT, "INIT", fscache_initialise_object);
-static WORK_STATE(PARENT_READY, "PRDY", fscache_parent_ready);
-static WORK_STATE(ABORT_INIT, "ABRT", fscache_abort_initialisation);
-static WORK_STATE(LOOK_UP_OBJECT, "LOOK", fscache_look_up_object);
-static WORK_STATE(OBJECT_AVAILABLE, "AVBL", fscache_object_available);
-static WORK_STATE(JUMPSTART_DEPS, "JUMP", fscache_jumpstart_dependents);
-
-static WORK_STATE(INVALIDATE_OBJECT, "INVL", fscache_invalidate_object);
-static WORK_STATE(UPDATE_OBJECT, "UPDT", fscache_update_object);
-
-static WORK_STATE(LOOKUP_FAILURE, "LCFL", fscache_lookup_failure);
-static WORK_STATE(KILL_OBJECT, "KILL", fscache_kill_object);
-static WORK_STATE(KILL_DEPENDENTS, "KDEP", fscache_kill_dependents);
-static WORK_STATE(DROP_OBJECT, "DROP", fscache_drop_object);
-static WORK_STATE(OBJECT_DEAD, "DEAD", fscache_object_dead);
-
-static WAIT_STATE(WAIT_FOR_INIT, "?INI",
- TRANSIT_TO(INIT_OBJECT, 1 << FSCACHE_OBJECT_EV_NEW_CHILD));
-
-static WAIT_STATE(WAIT_FOR_PARENT, "?PRN",
- TRANSIT_TO(PARENT_READY, 1 << FSCACHE_OBJECT_EV_PARENT_READY));
-
-static WAIT_STATE(WAIT_FOR_CMD, "?CMD",
- TRANSIT_TO(INVALIDATE_OBJECT, 1 << FSCACHE_OBJECT_EV_INVALIDATE),
- TRANSIT_TO(UPDATE_OBJECT, 1 << FSCACHE_OBJECT_EV_UPDATE),
- TRANSIT_TO(JUMPSTART_DEPS, 1 << FSCACHE_OBJECT_EV_NEW_CHILD));
-
-static WAIT_STATE(WAIT_FOR_CLEARANCE, "?CLR",
- TRANSIT_TO(KILL_OBJECT, 1 << FSCACHE_OBJECT_EV_CLEARED));
-
-/*
- * Out-of-band event transition tables. These are for handling unexpected
- * events, such as an I/O error. If an OOB event occurs, the state machine
- * clears and disables the event and forces a transition to the nominated work
- * state (acurrently executing work states will complete first).
- *
- * In such a situation, object->state remembers the state the machine should
- * have been in/gone to and returning NO_TRANSIT returns to that.
- */
-static const struct fscache_transition fscache_osm_init_oob[] = {
- TRANSIT_TO(ABORT_INIT,
- (1 << FSCACHE_OBJECT_EV_ERROR) |
- (1 << FSCACHE_OBJECT_EV_KILL)),
- { 0, NULL }
-};
-
-static const struct fscache_transition fscache_osm_lookup_oob[] = {
- TRANSIT_TO(LOOKUP_FAILURE,
- (1 << FSCACHE_OBJECT_EV_ERROR) |
- (1 << FSCACHE_OBJECT_EV_KILL)),
- { 0, NULL }
-};
-
-static const struct fscache_transition fscache_osm_run_oob[] = {
- TRANSIT_TO(KILL_OBJECT,
- (1 << FSCACHE_OBJECT_EV_ERROR) |
- (1 << FSCACHE_OBJECT_EV_KILL)),
- { 0, NULL }
-};
-
-static int fscache_get_object(struct fscache_object *,
- enum fscache_obj_ref_trace);
-static void fscache_put_object(struct fscache_object *,
- enum fscache_obj_ref_trace);
-static bool fscache_enqueue_dependents(struct fscache_object *, int);
-static void fscache_dequeue_object(struct fscache_object *);
-static void fscache_update_aux_data(struct fscache_object *);
-
-/*
- * we need to notify the parent when an op completes that we had outstanding
- * upon it
- */
-static inline void fscache_done_parent_op(struct fscache_object *object)
-{
- struct fscache_object *parent = object->parent;
-
- _enter("OBJ%x {OBJ%x,%x}",
- object->debug_id, parent->debug_id, parent->n_ops);
-
- spin_lock_nested(&parent->lock, 1);
- parent->n_obj_ops--;
- parent->n_ops--;
- if (parent->n_ops == 0)
- fscache_raise_event(parent, FSCACHE_OBJECT_EV_CLEARED);
- spin_unlock(&parent->lock);
-}
-
-/*
- * Object state machine dispatcher.
- */
-static void fscache_object_sm_dispatcher(struct fscache_object *object)
-{
- const struct fscache_transition *t;
- const struct fscache_state *state, *new_state;
- unsigned long events, event_mask;
- bool oob;
- int event = -1;
-
- ASSERT(object != NULL);
-
- _enter("{OBJ%x,%s,%lx}",
- object->debug_id, object->state->name, object->events);
-
- event_mask = object->event_mask;
-restart:
- object->event_mask = 0; /* Mask normal event handling */
- state = object->state;
-restart_masked:
- events = object->events;
-
- /* Handle any out-of-band events (typically an error) */
- if (events & object->oob_event_mask) {
- _debug("{OBJ%x} oob %lx",
- object->debug_id, events & object->oob_event_mask);
- oob = true;
- for (t = object->oob_table; t->events; t++) {
- if (events & t->events) {
- state = t->transit_to;
- ASSERT(state->work != NULL);
- event = fls(events & t->events) - 1;
- __clear_bit(event, &object->oob_event_mask);
- clear_bit(event, &object->events);
- goto execute_work_state;
- }
- }
- }
- oob = false;
-
- /* Wait states are just transition tables */
- if (!state->work) {
- if (events & event_mask) {
- for (t = state->transitions; t->events; t++) {
- if (events & t->events) {
- new_state = t->transit_to;
- event = fls(events & t->events) - 1;
- trace_fscache_osm(object, state,
- true, false, event);
- clear_bit(event, &object->events);
- _debug("{OBJ%x} ev %d: %s -> %s",
- object->debug_id, event,
- state->name, new_state->name);
- object->state = state = new_state;
- goto execute_work_state;
- }
- }
-
- /* The event mask didn't include all the tabled bits */
- BUG();
- }
- /* Randomly woke up */
- goto unmask_events;
- }
-
-execute_work_state:
- _debug("{OBJ%x} exec %s", object->debug_id, state->name);
-
- trace_fscache_osm(object, state, false, oob, event);
- new_state = state->work(object, event);
- event = -1;
- if (new_state == NO_TRANSIT) {
- _debug("{OBJ%x} %s notrans", object->debug_id, state->name);
- if (unlikely(state == STATE(OBJECT_DEAD))) {
- _leave(" [dead]");
- return;
- }
- fscache_enqueue_object(object);
- event_mask = object->oob_event_mask;
- goto unmask_events;
- }
-
- _debug("{OBJ%x} %s -> %s",
- object->debug_id, state->name, new_state->name);
- object->state = state = new_state;
-
- if (state->work) {
- if (unlikely(state == STATE(OBJECT_DEAD))) {
- _leave(" [dead]");
- return;
- }
- goto restart_masked;
- }
-
- /* Transited to wait state */
- event_mask = object->oob_event_mask;
- for (t = state->transitions; t->events; t++)
- event_mask |= t->events;
-
-unmask_events:
- object->event_mask = event_mask;
- smp_mb();
- events = object->events;
- if (events & event_mask)
- goto restart;
- _leave(" [msk %lx]", event_mask);
-}
-
-/*
- * execute an object
- */
-static void fscache_object_work_func(struct work_struct *work)
-{
- struct fscache_object *object =
- container_of(work, struct fscache_object, work);
-
- _enter("{OBJ%x}", object->debug_id);
-
- fscache_object_sm_dispatcher(object);
- fscache_put_object(object, fscache_obj_put_work);
-}
-
-/**
- * fscache_object_init - Initialise a cache object description
- * @object: Object description
- * @cookie: Cookie object will be attached to
- * @cache: Cache in which backing object will be found
- *
- * Initialise a cache object description to its basic values.
- *
- * See Documentation/filesystems/caching/backend-api.rst for a complete
- * description.
- */
-void fscache_object_init(struct fscache_object *object,
- struct fscache_cookie *cookie,
- struct fscache_cache *cache)
-{
- const struct fscache_transition *t;
-
- atomic_inc(&cache->object_count);
-
- object->state = STATE(WAIT_FOR_INIT);
- object->oob_table = fscache_osm_init_oob;
- object->flags = 1 << FSCACHE_OBJECT_IS_LIVE;
- spin_lock_init(&object->lock);
- INIT_LIST_HEAD(&object->cache_link);
- INIT_HLIST_NODE(&object->cookie_link);
- INIT_WORK(&object->work, fscache_object_work_func);
- INIT_LIST_HEAD(&object->dependents);
- INIT_LIST_HEAD(&object->dep_link);
- INIT_LIST_HEAD(&object->pending_ops);
- object->n_children = 0;
- object->n_ops = object->n_in_progress = object->n_exclusive = 0;
- object->events = 0;
- object->store_limit = 0;
- object->store_limit_l = 0;
- object->cache = cache;
- object->cookie = cookie;
- fscache_cookie_get(cookie, fscache_cookie_get_attach_object);
- object->parent = NULL;
-#ifdef CONFIG_FSCACHE_OBJECT_LIST
- RB_CLEAR_NODE(&object->objlist_link);
-#endif
-
- object->oob_event_mask = 0;
- for (t = object->oob_table; t->events; t++)
- object->oob_event_mask |= t->events;
- object->event_mask = object->oob_event_mask;
- for (t = object->state->transitions; t->events; t++)
- object->event_mask |= t->events;
-}
-EXPORT_SYMBOL(fscache_object_init);
-
-/*
- * Mark the object as no longer being live, making sure that we synchronise
- * against op submission.
- */
-static inline void fscache_mark_object_dead(struct fscache_object *object)
-{
- spin_lock(&object->lock);
- clear_bit(FSCACHE_OBJECT_IS_LIVE, &object->flags);
- spin_unlock(&object->lock);
-}
-
-/*
- * Abort object initialisation before we start it.
- */
-static const struct fscache_state *fscache_abort_initialisation(struct fscache_object *object,
- int event)
-{
- _enter("{OBJ%x},%d", object->debug_id, event);
-
- object->oob_event_mask = 0;
- fscache_dequeue_object(object);
- return transit_to(KILL_OBJECT);
-}
-
-/*
- * initialise an object
- * - check the specified object's parent to see if we can make use of it
- * immediately to do a creation
- * - we may need to start the process of creating a parent and we need to wait
- * for the parent's lookup and creation to complete if it's not there yet
- */
-static const struct fscache_state *fscache_initialise_object(struct fscache_object *object,
- int event)
-{
- struct fscache_object *parent;
- bool success;
-
- _enter("{OBJ%x},%d", object->debug_id, event);
-
- ASSERT(list_empty(&object->dep_link));
-
- parent = object->parent;
- if (!parent) {
- _leave(" [no parent]");
- return transit_to(DROP_OBJECT);
- }
-
- _debug("parent: %s of:%lx", parent->state->name, parent->flags);
-
- if (fscache_object_is_dying(parent)) {
- _leave(" [bad parent]");
- return transit_to(DROP_OBJECT);
- }
-
- if (fscache_object_is_available(parent)) {
- _leave(" [ready]");
- return transit_to(PARENT_READY);
- }
-
- _debug("wait");
-
- spin_lock(&parent->lock);
- fscache_stat(&fscache_n_cop_grab_object);
- success = false;
- if (fscache_object_is_live(parent) &&
- object->cache->ops->grab_object(object, fscache_obj_get_add_to_deps)) {
- list_add(&object->dep_link, &parent->dependents);
- success = true;
- }
- fscache_stat_d(&fscache_n_cop_grab_object);
- spin_unlock(&parent->lock);
- if (!success) {
- _leave(" [grab failed]");
- return transit_to(DROP_OBJECT);
- }
-
- /* fscache_acquire_non_index_cookie() uses this
- * to wake the chain up */
- fscache_raise_event(parent, FSCACHE_OBJECT_EV_NEW_CHILD);
- _leave(" [wait]");
- return transit_to(WAIT_FOR_PARENT);
-}
-
-/*
- * Once the parent object is ready, we should kick off our lookup op.
- */
-static const struct fscache_state *fscache_parent_ready(struct fscache_object *object,
- int event)
-{
- struct fscache_object *parent = object->parent;
-
- _enter("{OBJ%x},%d", object->debug_id, event);
-
- ASSERT(parent != NULL);
-
- spin_lock(&parent->lock);
- parent->n_ops++;
- parent->n_obj_ops++;
- spin_unlock(&parent->lock);
-
- _leave("");
- return transit_to(LOOK_UP_OBJECT);
-}
-
-/*
- * look an object up in the cache from which it was allocated
- * - we hold an "access lock" on the parent object, so the parent object cannot
- * be withdrawn by either party till we've finished
- */
-static const struct fscache_state *fscache_look_up_object(struct fscache_object *object,
- int event)
-{
- struct fscache_cookie *cookie = object->cookie;
- struct fscache_object *parent = object->parent;
- int ret;
-
- _enter("{OBJ%x},%d", object->debug_id, event);
-
- object->oob_table = fscache_osm_lookup_oob;
-
- ASSERT(parent != NULL);
- ASSERTCMP(parent->n_ops, >, 0);
- ASSERTCMP(parent->n_obj_ops, >, 0);
-
- /* make sure the parent is still available */
- ASSERT(fscache_object_is_available(parent));
-
- if (fscache_object_is_dying(parent) ||
- test_bit(FSCACHE_IOERROR, &object->cache->flags) ||
- !fscache_use_cookie(object)) {
- _leave(" [unavailable]");
- return transit_to(LOOKUP_FAILURE);
- }
-
- _debug("LOOKUP \"%s\" in \"%s\"",
- cookie->def->name, object->cache->tag->name);
-
- fscache_stat(&fscache_n_object_lookups);
- fscache_stat(&fscache_n_cop_lookup_object);
- ret = object->cache->ops->lookup_object(object);
- fscache_stat_d(&fscache_n_cop_lookup_object);
-
- fscache_unuse_cookie(object);
-
- if (ret == -ETIMEDOUT) {
- /* probably stuck behind another object, so move this one to
- * the back of the queue */
- fscache_stat(&fscache_n_object_lookups_timed_out);
- _leave(" [timeout]");
- return NO_TRANSIT;
- }
-
- if (ret < 0) {
- _leave(" [error]");
- return transit_to(LOOKUP_FAILURE);
- }
-
- _leave(" [ok]");
- return transit_to(OBJECT_AVAILABLE);
-}
-
-/**
- * fscache_object_lookup_negative - Note negative cookie lookup
- * @object: Object pointing to cookie to mark
- *
- * Note negative lookup, permitting those waiting to read data from an already
- * existing backing object to continue as there's no data for them to read.
- */
-void fscache_object_lookup_negative(struct fscache_object *object)
-{
- struct fscache_cookie *cookie = object->cookie;
-
- _enter("{OBJ%x,%s}", object->debug_id, object->state->name);
-
- if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
- fscache_stat(&fscache_n_object_lookups_negative);
-
- /* Allow write requests to begin stacking up and read requests to begin
- * returning ENODATA.
- */
- set_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
- clear_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags);
-
- clear_bit_unlock(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags);
- wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP);
- }
- _leave("");
-}
-EXPORT_SYMBOL(fscache_object_lookup_negative);
-
-/**
- * fscache_obtained_object - Note successful object lookup or creation
- * @object: Object pointing to cookie to mark
- *
- * Note successful lookup and/or creation, permitting those waiting to write
- * data to a backing object to continue.
- *
- * Note that after calling this, an object's cookie may be relinquished by the
- * netfs, and so must be accessed with object lock held.
- */
-void fscache_obtained_object(struct fscache_object *object)
-{
- struct fscache_cookie *cookie = object->cookie;
-
- _enter("{OBJ%x,%s}", object->debug_id, object->state->name);
-
- /* if we were still looking up, then we must have a positive lookup
- * result, in which case there may be data available */
- if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
- fscache_stat(&fscache_n_object_lookups_positive);
-
- /* We do (presumably) have data */
- clear_bit_unlock(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
- clear_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags);
-
- /* Allow write requests to begin stacking up and read requests
- * to begin shovelling data.
- */
- clear_bit_unlock(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags);
- wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP);
- } else {
- fscache_stat(&fscache_n_object_created);
- }
-
- set_bit(FSCACHE_OBJECT_IS_AVAILABLE, &object->flags);
- _leave("");
-}
-EXPORT_SYMBOL(fscache_obtained_object);
-
-/*
- * handle an object that has just become available
- */
-static const struct fscache_state *fscache_object_available(struct fscache_object *object,
- int event)
-{
- _enter("{OBJ%x},%d", object->debug_id, event);
-
- object->oob_table = fscache_osm_run_oob;
-
- spin_lock(&object->lock);
-
- fscache_done_parent_op(object);
- if (object->n_in_progress == 0) {
- if (object->n_ops > 0) {
- ASSERTCMP(object->n_ops, >=, object->n_obj_ops);
- fscache_start_operations(object);
- } else {
- ASSERT(list_empty(&object->pending_ops));
- }
- }
- spin_unlock(&object->lock);
-
- fscache_stat(&fscache_n_cop_lookup_complete);
- object->cache->ops->lookup_complete(object);
- fscache_stat_d(&fscache_n_cop_lookup_complete);
-
- fscache_stat(&fscache_n_object_avail);
-
- _leave("");
- return transit_to(JUMPSTART_DEPS);
-}
-
-/*
- * Wake up this object's dependent objects now that we've become available.
- */
-static const struct fscache_state *fscache_jumpstart_dependents(struct fscache_object *object,
- int event)
-{
- _enter("{OBJ%x},%d", object->debug_id, event);
-
- if (!fscache_enqueue_dependents(object, FSCACHE_OBJECT_EV_PARENT_READY))
- return NO_TRANSIT; /* Not finished; requeue */
- return transit_to(WAIT_FOR_CMD);
-}
-
-/*
- * Handle lookup or creation failute.
- */
-static const struct fscache_state *fscache_lookup_failure(struct fscache_object *object,
- int event)
-{
- struct fscache_cookie *cookie;
-
- _enter("{OBJ%x},%d", object->debug_id, event);
-
- object->oob_event_mask = 0;
-
- fscache_stat(&fscache_n_cop_lookup_complete);
- object->cache->ops->lookup_complete(object);
- fscache_stat_d(&fscache_n_cop_lookup_complete);
-
- set_bit(FSCACHE_OBJECT_KILLED_BY_CACHE, &object->flags);
-
- cookie = object->cookie;
- set_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags);
- if (test_and_clear_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags))
- wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP);
-
- fscache_done_parent_op(object);
- return transit_to(KILL_OBJECT);
-}
-
-/*
- * Wait for completion of all active operations on this object and the death of
- * all child objects of this object.
- */
-static const struct fscache_state *fscache_kill_object(struct fscache_object *object,
- int event)
-{
- _enter("{OBJ%x,%d,%d},%d",
- object->debug_id, object->n_ops, object->n_children, event);
-
- fscache_mark_object_dead(object);
- object->oob_event_mask = 0;
-
- if (test_bit(FSCACHE_OBJECT_RETIRED, &object->flags)) {
- /* Reject any new read/write ops and abort any that are pending. */
- clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
- fscache_cancel_all_ops(object);
- }
-
- if (list_empty(&object->dependents) &&
- object->n_ops == 0 &&
- object->n_children == 0)
- return transit_to(DROP_OBJECT);
-
- if (object->n_in_progress == 0) {
- spin_lock(&object->lock);
- if (object->n_ops > 0 && object->n_in_progress == 0)
- fscache_start_operations(object);
- spin_unlock(&object->lock);
- }
-
- if (!list_empty(&object->dependents))
- return transit_to(KILL_DEPENDENTS);
-
- return transit_to(WAIT_FOR_CLEARANCE);
-}
-
-/*
- * Kill dependent objects.
- */
-static const struct fscache_state *fscache_kill_dependents(struct fscache_object *object,
- int event)
-{
- _enter("{OBJ%x},%d", object->debug_id, event);
-
- if (!fscache_enqueue_dependents(object, FSCACHE_OBJECT_EV_KILL))
- return NO_TRANSIT; /* Not finished */
- return transit_to(WAIT_FOR_CLEARANCE);
-}
-
-/*
- * Drop an object's attachments
- */
-static const struct fscache_state *fscache_drop_object(struct fscache_object *object,
- int event)
-{
- struct fscache_object *parent = object->parent;
- struct fscache_cookie *cookie = object->cookie;
- struct fscache_cache *cache = object->cache;
- bool awaken = false;
-
- _enter("{OBJ%x,%d},%d", object->debug_id, object->n_children, event);
-
- ASSERT(cookie != NULL);
- ASSERT(!hlist_unhashed(&object->cookie_link));
-
- if (test_bit(FSCACHE_COOKIE_AUX_UPDATED, &cookie->flags)) {
- _debug("final update");
- fscache_update_aux_data(object);
- }
-
- /* Make sure the cookie no longer points here and that the netfs isn't
- * waiting for us.
- */
- spin_lock(&cookie->lock);
- hlist_del_init(&object->cookie_link);
- if (hlist_empty(&cookie->backing_objects) &&
- test_and_clear_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags))
- awaken = true;
- spin_unlock(&cookie->lock);
-
- if (awaken)
- wake_up_bit(&cookie->flags, FSCACHE_COOKIE_INVALIDATING);
- if (test_and_clear_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags))
- wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP);
-
-
- /* Prevent a race with our last child, which has to signal EV_CLEARED
- * before dropping our spinlock.
- */
- spin_lock(&object->lock);
- spin_unlock(&object->lock);
-
- /* Discard from the cache's collection of objects */
- spin_lock(&cache->object_list_lock);
- list_del_init(&object->cache_link);
- spin_unlock(&cache->object_list_lock);
-
- fscache_stat(&fscache_n_cop_drop_object);
- cache->ops->drop_object(object);
- fscache_stat_d(&fscache_n_cop_drop_object);
-
- /* The parent object wants to know when all it dependents have gone */
- if (parent) {
- _debug("release parent OBJ%x {%d}",
- parent->debug_id, parent->n_children);
-
- spin_lock(&parent->lock);
- parent->n_children--;
- if (parent->n_children == 0)
- fscache_raise_event(parent, FSCACHE_OBJECT_EV_CLEARED);
- spin_unlock(&parent->lock);
- object->parent = NULL;
- }
-
- /* this just shifts the object release to the work processor */
- fscache_put_object(object, fscache_obj_put_drop_obj);
- fscache_stat(&fscache_n_object_dead);
-
- _leave("");
- return transit_to(OBJECT_DEAD);
-}
-
-/*
- * get a ref on an object
- */
-static int fscache_get_object(struct fscache_object *object,
- enum fscache_obj_ref_trace why)
-{
- int ret;
-
- fscache_stat(&fscache_n_cop_grab_object);
- ret = object->cache->ops->grab_object(object, why) ? 0 : -EAGAIN;
- fscache_stat_d(&fscache_n_cop_grab_object);
- return ret;
-}
-
-/*
- * Discard a ref on an object
- */
-static void fscache_put_object(struct fscache_object *object,
- enum fscache_obj_ref_trace why)
-{
- fscache_stat(&fscache_n_cop_put_object);
- object->cache->ops->put_object(object, why);
- fscache_stat_d(&fscache_n_cop_put_object);
-}
-
-/**
- * fscache_object_destroy - Note that a cache object is about to be destroyed
- * @object: The object to be destroyed
- *
- * Note the imminent destruction and deallocation of a cache object record.
- */
-void fscache_object_destroy(struct fscache_object *object)
-{
- /* We can get rid of the cookie now */
- fscache_cookie_put(object->cookie, fscache_cookie_put_object);
- object->cookie = NULL;
-}
-EXPORT_SYMBOL(fscache_object_destroy);
-
-/*
- * enqueue an object for metadata-type processing
- */
-void fscache_enqueue_object(struct fscache_object *object)
-{
- _enter("{OBJ%x}", object->debug_id);
-
- if (fscache_get_object(object, fscache_obj_get_queue) >= 0) {
- wait_queue_head_t *cong_wq =
- &get_cpu_var(fscache_object_cong_wait);
-
- if (queue_work(fscache_object_wq, &object->work)) {
- if (fscache_object_congested())
- wake_up(cong_wq);
- } else
- fscache_put_object(object, fscache_obj_put_queue);
-
- put_cpu_var(fscache_object_cong_wait);
- }
-}
-
-/**
- * fscache_object_sleep_till_congested - Sleep until object wq is congested
- * @timeoutp: Scheduler sleep timeout
- *
- * Allow an object handler to sleep until the object workqueue is congested.
- *
- * The caller must set up a wake up event before calling this and must have set
- * the appropriate sleep mode (such as TASK_UNINTERRUPTIBLE) and tested its own
- * condition before calling this function as no test is made here.
- *
- * %true is returned if the object wq is congested, %false otherwise.
- */
-bool fscache_object_sleep_till_congested(signed long *timeoutp)
-{
- wait_queue_head_t *cong_wq = this_cpu_ptr(&fscache_object_cong_wait);
- DEFINE_WAIT(wait);
-
- if (fscache_object_congested())
- return true;
-
- add_wait_queue_exclusive(cong_wq, &wait);
- if (!fscache_object_congested())
- *timeoutp = schedule_timeout(*timeoutp);
- finish_wait(cong_wq, &wait);
-
- return fscache_object_congested();
-}
-EXPORT_SYMBOL_GPL(fscache_object_sleep_till_congested);
-
-/*
- * Enqueue the dependents of an object for metadata-type processing.
- *
- * If we don't manage to finish the list before the scheduler wants to run
- * again then return false immediately. We return true if the list was
- * cleared.
- */
-static bool fscache_enqueue_dependents(struct fscache_object *object, int event)
-{
- struct fscache_object *dep;
- bool ret = true;
-
- _enter("{OBJ%x}", object->debug_id);
-
- if (list_empty(&object->dependents))
- return true;
-
- spin_lock(&object->lock);
-
- while (!list_empty(&object->dependents)) {
- dep = list_entry(object->dependents.next,
- struct fscache_object, dep_link);
- list_del_init(&dep->dep_link);
-
- fscache_raise_event(dep, event);
- fscache_put_object(dep, fscache_obj_put_enq_dep);
-
- if (!list_empty(&object->dependents) && need_resched()) {
- ret = false;
- break;
- }
- }
-
- spin_unlock(&object->lock);
- return ret;
-}
-
-/*
- * remove an object from whatever queue it's waiting on
- */
-static void fscache_dequeue_object(struct fscache_object *object)
-{
- _enter("{OBJ%x}", object->debug_id);
-
- if (!list_empty(&object->dep_link)) {
- spin_lock(&object->parent->lock);
- list_del_init(&object->dep_link);
- spin_unlock(&object->parent->lock);
- }
-
- _leave("");
-}
-
-/**
- * fscache_check_aux - Ask the netfs whether an object on disk is still valid
- * @object: The object to ask about
- * @data: The auxiliary data for the object
- * @datalen: The size of the auxiliary data
- * @object_size: The size of the object according to the server.
- *
- * This function consults the netfs about the coherency state of an object.
- * The caller must be holding a ref on cookie->n_active (held by
- * fscache_look_up_object() on behalf of the cache backend during object lookup
- * and creation).
- */
-enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
- const void *data, uint16_t datalen,
- loff_t object_size)
-{
- enum fscache_checkaux result;
-
- if (!object->cookie->def->check_aux) {
- fscache_stat(&fscache_n_checkaux_none);
- return FSCACHE_CHECKAUX_OKAY;
- }
-
- result = object->cookie->def->check_aux(object->cookie->netfs_data,
- data, datalen, object_size);
- switch (result) {
- /* entry okay as is */
- case FSCACHE_CHECKAUX_OKAY:
- fscache_stat(&fscache_n_checkaux_okay);
- break;
-
- /* entry requires update */
- case FSCACHE_CHECKAUX_NEEDS_UPDATE:
- fscache_stat(&fscache_n_checkaux_update);
- break;
-
- /* entry requires deletion */
- case FSCACHE_CHECKAUX_OBSOLETE:
- fscache_stat(&fscache_n_checkaux_obsolete);
- break;
-
- default:
- BUG();
- }
-
- return result;
-}
-EXPORT_SYMBOL(fscache_check_aux);
-
-/*
- * Asynchronously invalidate an object.
- */
-static const struct fscache_state *_fscache_invalidate_object(struct fscache_object *object,
- int event)
-{
- struct fscache_operation *op;
- struct fscache_cookie *cookie = object->cookie;
-
- _enter("{OBJ%x},%d", object->debug_id, event);
-
- /* We're going to need the cookie. If the cookie is not available then
- * retire the object instead.
- */
- if (!fscache_use_cookie(object)) {
- ASSERT(radix_tree_empty(&object->cookie->stores));
- set_bit(FSCACHE_OBJECT_RETIRED, &object->flags);
- _leave(" [no cookie]");
- return transit_to(KILL_OBJECT);
- }
-
- /* Reject any new read/write ops and abort any that are pending. */
- fscache_invalidate_writes(cookie);
- clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
- fscache_cancel_all_ops(object);
-
- /* Now we have to wait for in-progress reads and writes */
- op = kzalloc(sizeof(*op), GFP_KERNEL);
- if (!op)
- goto nomem;
-
- fscache_operation_init(cookie, op, object->cache->ops->invalidate_object,
- NULL, NULL);
- op->flags = FSCACHE_OP_ASYNC |
- (1 << FSCACHE_OP_EXCLUSIVE) |
- (1 << FSCACHE_OP_UNUSE_COOKIE);
- trace_fscache_page_op(cookie, NULL, op, fscache_page_op_invalidate);
-
- spin_lock(&cookie->lock);
- if (fscache_submit_exclusive_op(object, op) < 0)
- goto submit_op_failed;
- spin_unlock(&cookie->lock);
- fscache_put_operation(op);
-
- /* Once we've completed the invalidation, we know there will be no data
- * stored in the cache and thus we can reinstate the data-check-skip
- * optimisation.
- */
- set_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
-
- /* We can allow read and write requests to come in once again. They'll
- * queue up behind our exclusive invalidation operation.
- */
- if (test_and_clear_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags))
- wake_up_bit(&cookie->flags, FSCACHE_COOKIE_INVALIDATING);
- _leave(" [ok]");
- return transit_to(UPDATE_OBJECT);
-
-nomem:
- fscache_mark_object_dead(object);
- fscache_unuse_cookie(object);
- _leave(" [ENOMEM]");
- return transit_to(KILL_OBJECT);
-
-submit_op_failed:
- fscache_mark_object_dead(object);
- spin_unlock(&cookie->lock);
- fscache_unuse_cookie(object);
- kfree(op);
- _leave(" [EIO]");
- return transit_to(KILL_OBJECT);
-}
-
-static const struct fscache_state *fscache_invalidate_object(struct fscache_object *object,
- int event)
-{
- const struct fscache_state *s;
-
- fscache_stat(&fscache_n_invalidates_run);
- fscache_stat(&fscache_n_cop_invalidate_object);
- s = _fscache_invalidate_object(object, event);
- fscache_stat_d(&fscache_n_cop_invalidate_object);
- return s;
-}
-
-/*
- * Update auxiliary data.
- */
-static void fscache_update_aux_data(struct fscache_object *object)
-{
- fscache_stat(&fscache_n_updates_run);
- fscache_stat(&fscache_n_cop_update_object);
- object->cache->ops->update_object(object);
- fscache_stat_d(&fscache_n_cop_update_object);
-}
-
-/*
- * Asynchronously update an object.
- */
-static const struct fscache_state *fscache_update_object(struct fscache_object *object,
- int event)
-{
- _enter("{OBJ%x},%d", object->debug_id, event);
-
- fscache_update_aux_data(object);
-
- _leave("");
- return transit_to(WAIT_FOR_CMD);
-}
-
-/**
- * fscache_object_retrying_stale - Note retrying stale object
- * @object: The object that will be retried
- *
- * Note that an object lookup found an on-disk object that was adjudged to be
- * stale and has been deleted. The lookup will be retried.
- */
-void fscache_object_retrying_stale(struct fscache_object *object)
-{
- fscache_stat(&fscache_n_cache_no_space_reject);
-}
-EXPORT_SYMBOL(fscache_object_retrying_stale);
-
-/**
- * fscache_object_mark_killed - Note that an object was killed
- * @object: The object that was culled
- * @why: The reason the object was killed.
- *
- * Note that an object was killed. Returns true if the object was
- * already marked killed, false if it wasn't.
- */
-void fscache_object_mark_killed(struct fscache_object *object,
- enum fscache_why_object_killed why)
-{
- if (test_and_set_bit(FSCACHE_OBJECT_KILLED_BY_CACHE, &object->flags)) {
- pr_err("Error: Object already killed by cache [%s]\n",
- object->cache->identifier);
- return;
- }
-
- switch (why) {
- case FSCACHE_OBJECT_NO_SPACE:
- fscache_stat(&fscache_n_cache_no_space_reject);
- break;
- case FSCACHE_OBJECT_IS_STALE:
- fscache_stat(&fscache_n_cache_stale_objects);
- break;
- case FSCACHE_OBJECT_WAS_RETIRED:
- fscache_stat(&fscache_n_cache_retired_objects);
- break;
- case FSCACHE_OBJECT_WAS_CULLED:
- fscache_stat(&fscache_n_cache_culled_objects);
- break;
- }
-}
-EXPORT_SYMBOL(fscache_object_mark_killed);
-
-/*
- * The object is dead. We can get here if an object gets queued by an event
- * that would lead to its death (such as EV_KILL) when the dispatcher is
- * already running (and so can be requeued) but hasn't yet cleared the event
- * mask.
- */
-static const struct fscache_state *fscache_object_dead(struct fscache_object *object,
- int event)
-{
- if (!test_and_set_bit(FSCACHE_OBJECT_RUN_AFTER_DEAD,
- &object->flags))
- return NO_TRANSIT;
-
- WARN(true, "FS-Cache object redispatched after death");
- return NO_TRANSIT;
-}
diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
deleted file mode 100644
index e002cdfaf3cc..000000000000
--- a/fs/fscache/operation.c
+++ /dev/null
@@ -1,633 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/* FS-Cache worker operation management routines
- *
- * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * See Documentation/filesystems/caching/operations.rst
- */
-
-#define FSCACHE_DEBUG_LEVEL OPERATION
-#include <linux/module.h>
-#include <linux/seq_file.h>
-#include <linux/slab.h>
-#include "internal.h"
-
-atomic_t fscache_op_debug_id;
-EXPORT_SYMBOL(fscache_op_debug_id);
-
-static void fscache_operation_dummy_cancel(struct fscache_operation *op)
-{
-}
-
-/**
- * fscache_operation_init - Do basic initialisation of an operation
- * @cookie: The cookie to operate on
- * @op: The operation to initialise
- * @processor: The function to perform the operation
- * @cancel: A function to handle operation cancellation
- * @release: The release function to assign
- *
- * Do basic initialisation of an operation. The caller must still set flags,
- * object and processor if needed.
- */
-void fscache_operation_init(struct fscache_cookie *cookie,
- struct fscache_operation *op,
- fscache_operation_processor_t processor,
- fscache_operation_cancel_t cancel,
- fscache_operation_release_t release)
-{
- INIT_WORK(&op->work, fscache_op_work_func);
- atomic_set(&op->usage, 1);
- op->state = FSCACHE_OP_ST_INITIALISED;
- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
- op->processor = processor;
- op->cancel = cancel ?: fscache_operation_dummy_cancel;
- op->release = release;
- INIT_LIST_HEAD(&op->pend_link);
- fscache_stat(&fscache_n_op_initialised);
- trace_fscache_op(cookie, op, fscache_op_init);
-}
-EXPORT_SYMBOL(fscache_operation_init);
-
-/**
- * fscache_enqueue_operation - Enqueue an operation for processing
- * @op: The operation to enqueue
- *
- * Enqueue an operation for processing by the FS-Cache thread pool.
- *
- * This will get its own ref on the object.
- */
-void fscache_enqueue_operation(struct fscache_operation *op)
-{
- struct fscache_cookie *cookie = op->object->cookie;
-
- _enter("{OBJ%x OP%x,%u}",
- op->object->debug_id, op->debug_id, atomic_read(&op->usage));
-
- ASSERT(list_empty(&op->pend_link));
- ASSERT(op->processor != NULL);
- ASSERT(fscache_object_is_available(op->object));
- ASSERTCMP(atomic_read(&op->usage), >, 0);
- ASSERTIFCMP(op->state != FSCACHE_OP_ST_IN_PROGRESS,
- op->state, ==, FSCACHE_OP_ST_CANCELLED);
-
- fscache_stat(&fscache_n_op_enqueue);
- switch (op->flags & FSCACHE_OP_TYPE) {
- case FSCACHE_OP_ASYNC:
- trace_fscache_op(cookie, op, fscache_op_enqueue_async);
- _debug("queue async");
- atomic_inc(&op->usage);
- if (!queue_work(fscache_op_wq, &op->work))
- fscache_put_operation(op);
- break;
- case FSCACHE_OP_MYTHREAD:
- trace_fscache_op(cookie, op, fscache_op_enqueue_mythread);
- _debug("queue for caller's attention");
- break;
- default:
- pr_err("Unexpected op type %lx", op->flags);
- BUG();
- break;
- }
-}
-EXPORT_SYMBOL(fscache_enqueue_operation);
-
-/*
- * start an op running
- */
-static void fscache_run_op(struct fscache_object *object,
- struct fscache_operation *op)
-{
- ASSERTCMP(op->state, ==, FSCACHE_OP_ST_PENDING);
-
- op->state = FSCACHE_OP_ST_IN_PROGRESS;
- object->n_in_progress++;
- if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags))
- wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
- if (op->processor)
- fscache_enqueue_operation(op);
- else
- trace_fscache_op(object->cookie, op, fscache_op_run);
- fscache_stat(&fscache_n_op_run);
-}
-
-/*
- * report an unexpected submission
- */
-static void fscache_report_unexpected_submission(struct fscache_object *object,
- struct fscache_operation *op,
- const struct fscache_state *ostate)
-{
- static bool once_only;
- struct fscache_operation *p;
- unsigned n;
-
- if (once_only)
- return;
- once_only = true;
-
- kdebug("unexpected submission OP%x [OBJ%x %s]",
- op->debug_id, object->debug_id, object->state->name);
- kdebug("objstate=%s [%s]", object->state->name, ostate->name);
- kdebug("objflags=%lx", object->flags);
- kdebug("objevent=%lx [%lx]", object->events, object->event_mask);
- kdebug("ops=%u inp=%u exc=%u",
- object->n_ops, object->n_in_progress, object->n_exclusive);
-
- if (!list_empty(&object->pending_ops)) {
- n = 0;
- list_for_each_entry(p, &object->pending_ops, pend_link) {
- ASSERTCMP(p->object, ==, object);
- kdebug("%p %p", op->processor, op->release);
- n++;
- }
-
- kdebug("n=%u", n);
- }
-
- dump_stack();
-}
-
-/*
- * submit an exclusive operation for an object
- * - other ops are excluded from running simultaneously with this one
- * - this gets any extra refs it needs on an op
- */
-int fscache_submit_exclusive_op(struct fscache_object *object,
- struct fscache_operation *op)
-{
- const struct fscache_state *ostate;
- unsigned long flags;
- int ret;
-
- _enter("{OBJ%x OP%x},", object->debug_id, op->debug_id);
-
- trace_fscache_op(object->cookie, op, fscache_op_submit_ex);
-
- ASSERTCMP(op->state, ==, FSCACHE_OP_ST_INITIALISED);
- ASSERTCMP(atomic_read(&op->usage), >, 0);
-
- spin_lock(&object->lock);
- ASSERTCMP(object->n_ops, >=, object->n_in_progress);
- ASSERTCMP(object->n_ops, >=, object->n_exclusive);
- ASSERT(list_empty(&op->pend_link));
-
- ostate = object->state;
- smp_rmb();
-
- op->state = FSCACHE_OP_ST_PENDING;
- flags = READ_ONCE(object->flags);
- if (unlikely(!(flags & BIT(FSCACHE_OBJECT_IS_LIVE)))) {
- fscache_stat(&fscache_n_op_rejected);
- op->cancel(op);
- op->state = FSCACHE_OP_ST_CANCELLED;
- ret = -ENOBUFS;
- } else if (unlikely(fscache_cache_is_broken(object))) {
- op->cancel(op);
- op->state = FSCACHE_OP_ST_CANCELLED;
- ret = -EIO;
- } else if (flags & BIT(FSCACHE_OBJECT_IS_AVAILABLE)) {
- op->object = object;
- object->n_ops++;
- object->n_exclusive++; /* reads and writes must wait */
-
- if (object->n_in_progress > 0) {
- atomic_inc(&op->usage);
- list_add_tail(&op->pend_link, &object->pending_ops);
- fscache_stat(&fscache_n_op_pend);
- } else if (!list_empty(&object->pending_ops)) {
- atomic_inc(&op->usage);
- list_add_tail(&op->pend_link, &object->pending_ops);
- fscache_stat(&fscache_n_op_pend);
- fscache_start_operations(object);
- } else {
- ASSERTCMP(object->n_in_progress, ==, 0);
- fscache_run_op(object, op);
- }
-
- /* need to issue a new write op after this */
- clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
- ret = 0;
- } else if (flags & BIT(FSCACHE_OBJECT_IS_LOOKED_UP)) {
- op->object = object;
- object->n_ops++;
- object->n_exclusive++; /* reads and writes must wait */
- atomic_inc(&op->usage);
- list_add_tail(&op->pend_link, &object->pending_ops);
- fscache_stat(&fscache_n_op_pend);
- ret = 0;
- } else if (flags & BIT(FSCACHE_OBJECT_KILLED_BY_CACHE)) {
- op->cancel(op);
- op->state = FSCACHE_OP_ST_CANCELLED;
- ret = -ENOBUFS;
- } else {
- fscache_report_unexpected_submission(object, op, ostate);
- op->cancel(op);
- op->state = FSCACHE_OP_ST_CANCELLED;
- ret = -ENOBUFS;
- }
-
- spin_unlock(&object->lock);
- return ret;
-}
-
-/*
- * submit an operation for an object
- * - objects may be submitted only in the following states:
- * - during object creation (write ops may be submitted)
- * - whilst the object is active
- * - after an I/O error incurred in one of the two above states (op rejected)
- * - this gets any extra refs it needs on an op
- */
-int fscache_submit_op(struct fscache_object *object,
- struct fscache_operation *op)
-{
- const struct fscache_state *ostate;
- unsigned long flags;
- int ret;
-
- _enter("{OBJ%x OP%x},{%u}",
- object->debug_id, op->debug_id, atomic_read(&op->usage));
-
- trace_fscache_op(object->cookie, op, fscache_op_submit);
-
- ASSERTCMP(op->state, ==, FSCACHE_OP_ST_INITIALISED);
- ASSERTCMP(atomic_read(&op->usage), >, 0);
-
- spin_lock(&object->lock);
- ASSERTCMP(object->n_ops, >=, object->n_in_progress);
- ASSERTCMP(object->n_ops, >=, object->n_exclusive);
- ASSERT(list_empty(&op->pend_link));
-
- ostate = object->state;
- smp_rmb();
-
- op->state = FSCACHE_OP_ST_PENDING;
- flags = READ_ONCE(object->flags);
- if (unlikely(!(flags & BIT(FSCACHE_OBJECT_IS_LIVE)))) {
- fscache_stat(&fscache_n_op_rejected);
- op->cancel(op);
- op->state = FSCACHE_OP_ST_CANCELLED;
- ret = -ENOBUFS;
- } else if (unlikely(fscache_cache_is_broken(object))) {
- op->cancel(op);
- op->state = FSCACHE_OP_ST_CANCELLED;
- ret = -EIO;
- } else if (flags & BIT(FSCACHE_OBJECT_IS_AVAILABLE)) {
- op->object = object;
- object->n_ops++;
-
- if (object->n_exclusive > 0) {
- atomic_inc(&op->usage);
- list_add_tail(&op->pend_link, &object->pending_ops);
- fscache_stat(&fscache_n_op_pend);
- } else if (!list_empty(&object->pending_ops)) {
- atomic_inc(&op->usage);
- list_add_tail(&op->pend_link, &object->pending_ops);
- fscache_stat(&fscache_n_op_pend);
- fscache_start_operations(object);
- } else {
- ASSERTCMP(object->n_exclusive, ==, 0);
- fscache_run_op(object, op);
- }
- ret = 0;
- } else if (flags & BIT(FSCACHE_OBJECT_IS_LOOKED_UP)) {
- op->object = object;
- object->n_ops++;
- atomic_inc(&op->usage);
- list_add_tail(&op->pend_link, &object->pending_ops);
- fscache_stat(&fscache_n_op_pend);
- ret = 0;
- } else if (flags & BIT(FSCACHE_OBJECT_KILLED_BY_CACHE)) {
- op->cancel(op);
- op->state = FSCACHE_OP_ST_CANCELLED;
- ret = -ENOBUFS;
- } else {
- fscache_report_unexpected_submission(object, op, ostate);
- ASSERT(!fscache_object_is_active(object));
- op->cancel(op);
- op->state = FSCACHE_OP_ST_CANCELLED;
- ret = -ENOBUFS;
- }
-
- spin_unlock(&object->lock);
- return ret;
-}
-
-/*
- * queue an object for withdrawal on error, aborting all following asynchronous
- * operations
- */
-void fscache_abort_object(struct fscache_object *object)
-{
- _enter("{OBJ%x}", object->debug_id);
-
- fscache_raise_event(object, FSCACHE_OBJECT_EV_ERROR);
-}
-
-/*
- * Jump start the operation processing on an object. The caller must hold
- * object->lock.
- */
-void fscache_start_operations(struct fscache_object *object)
-{
- struct fscache_operation *op;
- bool stop = false;
-
- while (!list_empty(&object->pending_ops) && !stop) {
- op = list_entry(object->pending_ops.next,
- struct fscache_operation, pend_link);
-
- if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags)) {
- if (object->n_in_progress > 0)
- break;
- stop = true;
- }
- list_del_init(&op->pend_link);
- fscache_run_op(object, op);
-
- /* the pending queue was holding a ref on the object */
- fscache_put_operation(op);
- }
-
- ASSERTCMP(object->n_in_progress, <=, object->n_ops);
-
- _debug("woke %d ops on OBJ%x",
- object->n_in_progress, object->debug_id);
-}
-
-/*
- * cancel an operation that's pending on an object
- */
-int fscache_cancel_op(struct fscache_operation *op,
- bool cancel_in_progress_op)
-{
- struct fscache_object *object = op->object;
- bool put = false;
- int ret;
-
- _enter("OBJ%x OP%x}", op->object->debug_id, op->debug_id);
-
- trace_fscache_op(object->cookie, op, fscache_op_cancel);
-
- ASSERTCMP(op->state, >=, FSCACHE_OP_ST_PENDING);
- ASSERTCMP(op->state, !=, FSCACHE_OP_ST_CANCELLED);
- ASSERTCMP(atomic_read(&op->usage), >, 0);
-
- spin_lock(&object->lock);
-
- ret = -EBUSY;
- if (op->state == FSCACHE_OP_ST_PENDING) {
- ASSERT(!list_empty(&op->pend_link));
- list_del_init(&op->pend_link);
- put = true;
-
- fscache_stat(&fscache_n_op_cancelled);
- op->cancel(op);
- op->state = FSCACHE_OP_ST_CANCELLED;
- if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
- object->n_exclusive--;
- if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags))
- wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
- ret = 0;
- } else if (op->state == FSCACHE_OP_ST_IN_PROGRESS && cancel_in_progress_op) {
- ASSERTCMP(object->n_in_progress, >, 0);
- if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
- object->n_exclusive--;
- object->n_in_progress--;
- if (object->n_in_progress == 0)
- fscache_start_operations(object);
-
- fscache_stat(&fscache_n_op_cancelled);
- op->cancel(op);
- op->state = FSCACHE_OP_ST_CANCELLED;
- if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
- object->n_exclusive--;
- if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags))
- wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
- ret = 0;
- }
-
- if (put)
- fscache_put_operation(op);
- spin_unlock(&object->lock);
- _leave(" = %d", ret);
- return ret;
-}
-
-/*
- * Cancel all pending operations on an object
- */
-void fscache_cancel_all_ops(struct fscache_object *object)
-{
- struct fscache_operation *op;
-
- _enter("OBJ%x", object->debug_id);
-
- spin_lock(&object->lock);
-
- while (!list_empty(&object->pending_ops)) {
- op = list_entry(object->pending_ops.next,
- struct fscache_operation, pend_link);
- fscache_stat(&fscache_n_op_cancelled);
- list_del_init(&op->pend_link);
-
- trace_fscache_op(object->cookie, op, fscache_op_cancel_all);
-
- ASSERTCMP(op->state, ==, FSCACHE_OP_ST_PENDING);
- op->cancel(op);
- op->state = FSCACHE_OP_ST_CANCELLED;
-
- if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
- object->n_exclusive--;
- if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags))
- wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
- fscache_put_operation(op);
- cond_resched_lock(&object->lock);
- }
-
- spin_unlock(&object->lock);
- _leave("");
-}
-
-/*
- * Record the completion or cancellation of an in-progress operation.
- */
-void fscache_op_complete(struct fscache_operation *op, bool cancelled)
-{
- struct fscache_object *object = op->object;
-
- _enter("OBJ%x", object->debug_id);
-
- ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS);
- ASSERTCMP(object->n_in_progress, >, 0);
- ASSERTIFCMP(test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags),
- object->n_exclusive, >, 0);
- ASSERTIFCMP(test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags),
- object->n_in_progress, ==, 1);
-
- spin_lock(&object->lock);
-
- if (!cancelled) {
- trace_fscache_op(object->cookie, op, fscache_op_completed);
- op->state = FSCACHE_OP_ST_COMPLETE;
- } else {
- op->cancel(op);
- trace_fscache_op(object->cookie, op, fscache_op_cancelled);
- op->state = FSCACHE_OP_ST_CANCELLED;
- }
-
- if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
- object->n_exclusive--;
- object->n_in_progress--;
- if (object->n_in_progress == 0)
- fscache_start_operations(object);
-
- spin_unlock(&object->lock);
- _leave("");
-}
-EXPORT_SYMBOL(fscache_op_complete);
-
-/*
- * release an operation
- * - queues pending ops if this is the last in-progress op
- */
-void fscache_put_operation(struct fscache_operation *op)
-{
- struct fscache_object *object;
- struct fscache_cache *cache;
-
- _enter("{OBJ%x OP%x,%d}",
- op->object ? op->object->debug_id : 0,
- op->debug_id, atomic_read(&op->usage));
-
- ASSERTCMP(atomic_read(&op->usage), >, 0);
-
- if (!atomic_dec_and_test(&op->usage))
- return;
-
- trace_fscache_op(op->object ? op->object->cookie : NULL, op, fscache_op_put);
-
- _debug("PUT OP");
- ASSERTIFCMP(op->state != FSCACHE_OP_ST_INITIALISED &&
- op->state != FSCACHE_OP_ST_COMPLETE,
- op->state, ==, FSCACHE_OP_ST_CANCELLED);
-
- fscache_stat(&fscache_n_op_release);
-
- if (op->release) {
- op->release(op);
- op->release = NULL;
- }
- op->state = FSCACHE_OP_ST_DEAD;
-
- object = op->object;
- if (likely(object)) {
- if (test_bit(FSCACHE_OP_DEC_READ_CNT, &op->flags))
- atomic_dec(&object->n_reads);
- if (test_bit(FSCACHE_OP_UNUSE_COOKIE, &op->flags))
- fscache_unuse_cookie(object);
-
- /* now... we may get called with the object spinlock held, so we
- * complete the cleanup here only if we can immediately acquire the
- * lock, and defer it otherwise */
- if (!spin_trylock(&object->lock)) {
- _debug("defer put");
- fscache_stat(&fscache_n_op_deferred_release);
-
- cache = object->cache;
- spin_lock(&cache->op_gc_list_lock);
- list_add_tail(&op->pend_link, &cache->op_gc_list);
- spin_unlock(&cache->op_gc_list_lock);
- schedule_work(&cache->op_gc);
- _leave(" [defer]");
- return;
- }
-
- ASSERTCMP(object->n_ops, >, 0);
- object->n_ops--;
- if (object->n_ops == 0)
- fscache_raise_event(object, FSCACHE_OBJECT_EV_CLEARED);
-
- spin_unlock(&object->lock);
- }
-
- kfree(op);
- _leave(" [done]");
-}
-EXPORT_SYMBOL(fscache_put_operation);
-
-/*
- * garbage collect operations that have had their release deferred
- */
-void fscache_operation_gc(struct work_struct *work)
-{
- struct fscache_operation *op;
- struct fscache_object *object;
- struct fscache_cache *cache =
- container_of(work, struct fscache_cache, op_gc);
- int count = 0;
-
- _enter("");
-
- do {
- spin_lock(&cache->op_gc_list_lock);
- if (list_empty(&cache->op_gc_list)) {
- spin_unlock(&cache->op_gc_list_lock);
- break;
- }
-
- op = list_entry(cache->op_gc_list.next,
- struct fscache_operation, pend_link);
- list_del(&op->pend_link);
- spin_unlock(&cache->op_gc_list_lock);
-
- object = op->object;
- trace_fscache_op(object->cookie, op, fscache_op_gc);
-
- spin_lock(&object->lock);
-
- _debug("GC DEFERRED REL OBJ%x OP%x",
- object->debug_id, op->debug_id);
- fscache_stat(&fscache_n_op_gc);
-
- ASSERTCMP(atomic_read(&op->usage), ==, 0);
- ASSERTCMP(op->state, ==, FSCACHE_OP_ST_DEAD);
-
- ASSERTCMP(object->n_ops, >, 0);
- object->n_ops--;
- if (object->n_ops == 0)
- fscache_raise_event(object, FSCACHE_OBJECT_EV_CLEARED);
-
- spin_unlock(&object->lock);
- kfree(op);
-
- } while (count++ < 20);
-
- if (!list_empty(&cache->op_gc_list))
- schedule_work(&cache->op_gc);
-
- _leave("");
-}
-
-/*
- * execute an operation using fs_op_wq to provide processing context -
- * the caller holds a ref to this object, so we don't need to hold one
- */
-void fscache_op_work_func(struct work_struct *work)
-{
- struct fscache_operation *op =
- container_of(work, struct fscache_operation, work);
-
- _enter("{OBJ%x OP%x,%d}",
- op->object->debug_id, op->debug_id, atomic_read(&op->usage));
-
- trace_fscache_op(op->object->cookie, op, fscache_op_work);
-
- ASSERT(op->processor != NULL);
- op->processor(op);
- fscache_put_operation(op);
-
- _leave("");
-}
diff --git a/fs/fscache/page.c b/fs/fscache/page.c
deleted file mode 100644
index 27df94ef0e0b..000000000000
--- a/fs/fscache/page.c
+++ /dev/null
@@ -1,1242 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/* Cache page management and data I/O routines
- *
- * Copyright (C) 2004-2008 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- */
-
-#define FSCACHE_DEBUG_LEVEL PAGE
-#include <linux/module.h>
-#include <linux/fscache-cache.h>
-#include <linux/buffer_head.h>
-#include <linux/pagevec.h>
-#include <linux/slab.h>
-#include "internal.h"
-
-/*
- * check to see if a page is being written to the cache
- */
-bool __fscache_check_page_write(struct fscache_cookie *cookie, struct page *page)
-{
- void *val;
-
- rcu_read_lock();
- val = radix_tree_lookup(&cookie->stores, page->index);
- rcu_read_unlock();
- trace_fscache_check_page(cookie, page, val, 0);
-
- return val != NULL;
-}
-EXPORT_SYMBOL(__fscache_check_page_write);
-
-/*
- * wait for a page to finish being written to the cache
- */
-void __fscache_wait_on_page_write(struct fscache_cookie *cookie, struct page *page)
-{
- wait_queue_head_t *wq = bit_waitqueue(&cookie->flags, 0);
-
- trace_fscache_page(cookie, page, fscache_page_write_wait);
-
- wait_event(*wq, !__fscache_check_page_write(cookie, page));
-}
-EXPORT_SYMBOL(__fscache_wait_on_page_write);
-
-/*
- * wait for a page to finish being written to the cache. Put a timeout here
- * since we might be called recursively via parent fs.
- */
-static
-bool release_page_wait_timeout(struct fscache_cookie *cookie, struct page *page)
-{
- wait_queue_head_t *wq = bit_waitqueue(&cookie->flags, 0);
-
- return wait_event_timeout(*wq, !__fscache_check_page_write(cookie, page),
- HZ);
-}
-
-/*
- * decide whether a page can be released, possibly by cancelling a store to it
- * - we're allowed to sleep if __GFP_DIRECT_RECLAIM is flagged
- */
-bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
- struct page *page,
- gfp_t gfp)
-{
- struct page *xpage;
- void *val;
-
- _enter("%p,%p,%x", cookie, page, gfp);
-
- trace_fscache_page(cookie, page, fscache_page_maybe_release);
-
-try_again:
- rcu_read_lock();
- val = radix_tree_lookup(&cookie->stores, page->index);
- if (!val) {
- rcu_read_unlock();
- fscache_stat(&fscache_n_store_vmscan_not_storing);
- __fscache_uncache_page(cookie, page);
- return true;
- }
-
- /* see if the page is actually undergoing storage - if so we can't get
- * rid of it till the cache has finished with it */
- if (radix_tree_tag_get(&cookie->stores, page->index,
- FSCACHE_COOKIE_STORING_TAG)) {
- rcu_read_unlock();
- goto page_busy;
- }
-
- /* the page is pending storage, so we attempt to cancel the store and
- * discard the store request so that the page can be reclaimed */
- spin_lock(&cookie->stores_lock);
- rcu_read_unlock();
-
- if (radix_tree_tag_get(&cookie->stores, page->index,
- FSCACHE_COOKIE_STORING_TAG)) {
- /* the page started to undergo storage whilst we were looking,
- * so now we can only wait or return */
- spin_unlock(&cookie->stores_lock);
- goto page_busy;
- }
-
- xpage = radix_tree_delete(&cookie->stores, page->index);
- trace_fscache_page(cookie, page, fscache_page_radix_delete);
- spin_unlock(&cookie->stores_lock);
-
- if (xpage) {
- fscache_stat(&fscache_n_store_vmscan_cancelled);
- fscache_stat(&fscache_n_store_radix_deletes);
- ASSERTCMP(xpage, ==, page);
- } else {
- fscache_stat(&fscache_n_store_vmscan_gone);
- }
-
- wake_up_bit(&cookie->flags, 0);
- trace_fscache_wake_cookie(cookie);
- if (xpage)
- put_page(xpage);
- __fscache_uncache_page(cookie, page);
- return true;
-
-page_busy:
- /* We will wait here if we're allowed to, but that could deadlock the
- * allocator as the work threads writing to the cache may all end up
- * sleeping on memory allocation, so we may need to impose a timeout
- * too. */
- if (!(gfp & __GFP_DIRECT_RECLAIM) || !(gfp & __GFP_FS)) {
- fscache_stat(&fscache_n_store_vmscan_busy);
- return false;
- }
-
- fscache_stat(&fscache_n_store_vmscan_wait);
- if (!release_page_wait_timeout(cookie, page))
- _debug("fscache writeout timeout page: %p{%lx}",
- page, page->index);
-
- gfp &= ~__GFP_DIRECT_RECLAIM;
- goto try_again;
-}
-EXPORT_SYMBOL(__fscache_maybe_release_page);
-
-/*
- * note that a page has finished being written to the cache
- */
-static void fscache_end_page_write(struct fscache_object *object,
- struct page *page)
-{
- struct fscache_cookie *cookie;
- struct page *xpage = NULL, *val;
-
- spin_lock(&object->lock);
- cookie = object->cookie;
- if (cookie) {
- /* delete the page from the tree if it is now no longer
- * pending */
- spin_lock(&cookie->stores_lock);
- radix_tree_tag_clear(&cookie->stores, page->index,
- FSCACHE_COOKIE_STORING_TAG);
- trace_fscache_page(cookie, page, fscache_page_radix_clear_store);
- if (!radix_tree_tag_get(&cookie->stores, page->index,
- FSCACHE_COOKIE_PENDING_TAG)) {
- fscache_stat(&fscache_n_store_radix_deletes);
- xpage = radix_tree_delete(&cookie->stores, page->index);
- trace_fscache_page(cookie, page, fscache_page_radix_delete);
- trace_fscache_page(cookie, page, fscache_page_write_end);
-
- val = radix_tree_lookup(&cookie->stores, page->index);
- trace_fscache_check_page(cookie, page, val, 1);
- } else {
- trace_fscache_page(cookie, page, fscache_page_write_end_pend);
- }
- spin_unlock(&cookie->stores_lock);
- wake_up_bit(&cookie->flags, 0);
- trace_fscache_wake_cookie(cookie);
- } else {
- trace_fscache_page(cookie, page, fscache_page_write_end_noc);
- }
- spin_unlock(&object->lock);
- if (xpage)
- put_page(xpage);
-}
-
-/*
- * actually apply the changed attributes to a cache object
- */
-static void fscache_attr_changed_op(struct fscache_operation *op)
-{
- struct fscache_object *object = op->object;
- int ret;
-
- _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
-
- fscache_stat(&fscache_n_attr_changed_calls);
-
- if (fscache_object_is_active(object)) {
- fscache_stat(&fscache_n_cop_attr_changed);
- ret = object->cache->ops->attr_changed(object);
- fscache_stat_d(&fscache_n_cop_attr_changed);
- if (ret < 0)
- fscache_abort_object(object);
- fscache_op_complete(op, ret < 0);
- } else {
- fscache_op_complete(op, true);
- }
-
- _leave("");
-}
-
-/*
- * notification that the attributes on an object have changed
- */
-int __fscache_attr_changed(struct fscache_cookie *cookie)
-{
- struct fscache_operation *op;
- struct fscache_object *object;
- bool wake_cookie = false;
-
- _enter("%p", cookie);
-
- ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
-
- fscache_stat(&fscache_n_attr_changed);
-
- op = kzalloc(sizeof(*op), GFP_KERNEL);
- if (!op) {
- fscache_stat(&fscache_n_attr_changed_nomem);
- _leave(" = -ENOMEM");
- return -ENOMEM;
- }
-
- fscache_operation_init(cookie, op, fscache_attr_changed_op, NULL, NULL);
- trace_fscache_page_op(cookie, NULL, op, fscache_page_op_attr_changed);
- op->flags = FSCACHE_OP_ASYNC |
- (1 << FSCACHE_OP_EXCLUSIVE) |
- (1 << FSCACHE_OP_UNUSE_COOKIE);
-
- spin_lock(&cookie->lock);
-
- if (!fscache_cookie_enabled(cookie) ||
- hlist_empty(&cookie->backing_objects))
- goto nobufs;
- object = hlist_entry(cookie->backing_objects.first,
- struct fscache_object, cookie_link);
-
- __fscache_use_cookie(cookie);
- if (fscache_submit_exclusive_op(object, op) < 0)
- goto nobufs_dec;
- spin_unlock(&cookie->lock);
- fscache_stat(&fscache_n_attr_changed_ok);
- fscache_put_operation(op);
- _leave(" = 0");
- return 0;
-
-nobufs_dec:
- wake_cookie = __fscache_unuse_cookie(cookie);
-nobufs:
- spin_unlock(&cookie->lock);
- fscache_put_operation(op);
- if (wake_cookie)
- __fscache_wake_unused_cookie(cookie);
- fscache_stat(&fscache_n_attr_changed_nobufs);
- _leave(" = %d", -ENOBUFS);
- return -ENOBUFS;
-}
-EXPORT_SYMBOL(__fscache_attr_changed);
-
-/*
- * Handle cancellation of a pending retrieval op
- */
-static void fscache_do_cancel_retrieval(struct fscache_operation *_op)
-{
- struct fscache_retrieval *op =
- container_of(_op, struct fscache_retrieval, op);
-
- atomic_set(&op->n_pages, 0);
-}
-
-/*
- * release a retrieval op reference
- */
-static void fscache_release_retrieval_op(struct fscache_operation *_op)
-{
- struct fscache_retrieval *op =
- container_of(_op, struct fscache_retrieval, op);
-
- _enter("{OP%x}", op->op.debug_id);
-
- ASSERTIFCMP(op->op.state != FSCACHE_OP_ST_INITIALISED,
- atomic_read(&op->n_pages), ==, 0);
-
- if (op->context)
- fscache_put_context(op->cookie, op->context);
-
- _leave("");
-}
-
-/*
- * allocate a retrieval op
- */
-struct fscache_retrieval *fscache_alloc_retrieval(
- struct fscache_cookie *cookie,
- struct address_space *mapping,
- fscache_rw_complete_t end_io_func,
- void *context)
-{
- struct fscache_retrieval *op;
-
- /* allocate a retrieval operation and attempt to submit it */
- op = kzalloc(sizeof(*op), GFP_NOIO);
- if (!op) {
- fscache_stat(&fscache_n_retrievals_nomem);
- return NULL;
- }
-
- fscache_operation_init(cookie, &op->op, NULL,
- fscache_do_cancel_retrieval,
- fscache_release_retrieval_op);
- op->op.flags = FSCACHE_OP_MYTHREAD |
- (1UL << FSCACHE_OP_WAITING) |
- (1UL << FSCACHE_OP_UNUSE_COOKIE);
- op->cookie = cookie;
- op->mapping = mapping;
- op->end_io_func = end_io_func;
- op->context = context;
- INIT_LIST_HEAD(&op->to_do);
-
- /* Pin the netfs read context in case we need to do the actual netfs
- * read because we've encountered a cache read failure.
- */
- if (context)
- fscache_get_context(op->cookie, context);
- return op;
-}
-
-/*
- * wait for a deferred lookup to complete
- */
-int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
-{
- _enter("");
-
- if (!test_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags)) {
- _leave(" = 0 [imm]");
- return 0;
- }
-
- fscache_stat(&fscache_n_retrievals_wait);
-
- if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
- TASK_INTERRUPTIBLE) != 0) {
- fscache_stat(&fscache_n_retrievals_intr);
- _leave(" = -ERESTARTSYS");
- return -ERESTARTSYS;
- }
-
- ASSERT(!test_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags));
-
- smp_rmb();
- _leave(" = 0 [dly]");
- return 0;
-}
-
-/*
- * wait for an object to become active (or dead)
- */
-int fscache_wait_for_operation_activation(struct fscache_object *object,
- struct fscache_operation *op,
- atomic_t *stat_op_waits,
- atomic_t *stat_object_dead)
-{
- int ret;
-
- if (!test_bit(FSCACHE_OP_WAITING, &op->flags))
- goto check_if_dead;
-
- _debug(">>> WT");
- if (stat_op_waits)
- fscache_stat(stat_op_waits);
- if (wait_on_bit(&op->flags, FSCACHE_OP_WAITING,
- TASK_INTERRUPTIBLE) != 0) {
- trace_fscache_op(object->cookie, op, fscache_op_signal);
- ret = fscache_cancel_op(op, false);
- if (ret == 0)
- return -ERESTARTSYS;
-
- /* it's been removed from the pending queue by another party,
- * so we should get to run shortly */
- wait_on_bit(&op->flags, FSCACHE_OP_WAITING,
- TASK_UNINTERRUPTIBLE);
- }
- _debug("<<< GO");
-
-check_if_dead:
- if (op->state == FSCACHE_OP_ST_CANCELLED) {
- if (stat_object_dead)
- fscache_stat(stat_object_dead);
- _leave(" = -ENOBUFS [cancelled]");
- return -ENOBUFS;
- }
- if (unlikely(fscache_object_is_dying(object) ||
- fscache_cache_is_broken(object))) {
- enum fscache_operation_state state = op->state;
- trace_fscache_op(object->cookie, op, fscache_op_signal);
- fscache_cancel_op(op, true);
- if (stat_object_dead)
- fscache_stat(stat_object_dead);
- _leave(" = -ENOBUFS [obj dead %d]", state);
- return -ENOBUFS;
- }
- return 0;
-}
-
-/*
- * read a page from the cache or allocate a block in which to store it
- * - we return:
- * -ENOMEM - out of memory, nothing done
- * -ERESTARTSYS - interrupted
- * -ENOBUFS - no backing object available in which to cache the block
- * -ENODATA - no data available in the backing object for this block
- * 0 - dispatched a read - it'll call end_io_func() when finished
- */
-int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
- struct page *page,
- fscache_rw_complete_t end_io_func,
- void *context,
- gfp_t gfp)
-{
- struct fscache_retrieval *op;
- struct fscache_object *object;
- bool wake_cookie = false;
- int ret;
-
- _enter("%p,%p,,,", cookie, page);
-
- fscache_stat(&fscache_n_retrievals);
-
- if (hlist_empty(&cookie->backing_objects))
- goto nobufs;
-
- if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
- _leave(" = -ENOBUFS [invalidating]");
- return -ENOBUFS;
- }
-
- ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
- ASSERTCMP(page, !=, NULL);
-
- if (fscache_wait_for_deferred_lookup(cookie) < 0)
- return -ERESTARTSYS;
-
- op = fscache_alloc_retrieval(cookie, page->mapping,
- end_io_func, context);
- if (!op) {
- _leave(" = -ENOMEM");
- return -ENOMEM;
- }
- atomic_set(&op->n_pages, 1);
- trace_fscache_page_op(cookie, page, &op->op, fscache_page_op_retr_one);
-
- spin_lock(&cookie->lock);
-
- if (!fscache_cookie_enabled(cookie) ||
- hlist_empty(&cookie->backing_objects))
- goto nobufs_unlock;
- object = hlist_entry(cookie->backing_objects.first,
- struct fscache_object, cookie_link);
-
- ASSERT(test_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags));
-
- __fscache_use_cookie(cookie);
- atomic_inc(&object->n_reads);
- __set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags);
-
- if (fscache_submit_op(object, &op->op) < 0)
- goto nobufs_unlock_dec;
- spin_unlock(&cookie->lock);
-
- fscache_stat(&fscache_n_retrieval_ops);
-
- /* we wait for the operation to become active, and then process it
- * *here*, in this thread, and not in the thread pool */
- ret = fscache_wait_for_operation_activation(
- object, &op->op,
- __fscache_stat(&fscache_n_retrieval_op_waits),
- __fscache_stat(&fscache_n_retrievals_object_dead));
- if (ret < 0)
- goto error;
-
- /* ask the cache to honour the operation */
- if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) {
- fscache_stat(&fscache_n_cop_allocate_page);
- ret = object->cache->ops->allocate_page(op, page, gfp);
- fscache_stat_d(&fscache_n_cop_allocate_page);
- if (ret == 0)
- ret = -ENODATA;
- } else {
- fscache_stat(&fscache_n_cop_read_or_alloc_page);
- ret = object->cache->ops->read_or_alloc_page(op, page, gfp);
- fscache_stat_d(&fscache_n_cop_read_or_alloc_page);
- }
-
-error:
- if (ret == -ENOMEM)
- fscache_stat(&fscache_n_retrievals_nomem);
- else if (ret == -ERESTARTSYS)
- fscache_stat(&fscache_n_retrievals_intr);
- else if (ret == -ENODATA)
- fscache_stat(&fscache_n_retrievals_nodata);
- else if (ret < 0)
- fscache_stat(&fscache_n_retrievals_nobufs);
- else
- fscache_stat(&fscache_n_retrievals_ok);
-
- fscache_put_retrieval(op);
- _leave(" = %d", ret);
- return ret;
-
-nobufs_unlock_dec:
- atomic_dec(&object->n_reads);
- wake_cookie = __fscache_unuse_cookie(cookie);
-nobufs_unlock:
- spin_unlock(&cookie->lock);
- if (wake_cookie)
- __fscache_wake_unused_cookie(cookie);
- fscache_put_retrieval(op);
-nobufs:
- fscache_stat(&fscache_n_retrievals_nobufs);
- _leave(" = -ENOBUFS");
- return -ENOBUFS;
-}
-EXPORT_SYMBOL(__fscache_read_or_alloc_page);
-
-/*
- * read a list of page from the cache or allocate a block in which to store
- * them
- * - we return:
- * -ENOMEM - out of memory, some pages may be being read
- * -ERESTARTSYS - interrupted, some pages may be being read
- * -ENOBUFS - no backing object or space available in which to cache any
- * pages not being read
- * -ENODATA - no data available in the backing object for some or all of
- * the pages
- * 0 - dispatched a read on all pages
- *
- * end_io_func() will be called for each page read from the cache as it is
- * finishes being read
- *
- * any pages for which a read is dispatched will be removed from pages and
- * nr_pages
- */
-int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
- struct address_space *mapping,
- struct list_head *pages,
- unsigned *nr_pages,
- fscache_rw_complete_t end_io_func,
- void *context,
- gfp_t gfp)
-{
- struct fscache_retrieval *op;
- struct fscache_object *object;
- bool wake_cookie = false;
- int ret;
-
- _enter("%p,,%d,,,", cookie, *nr_pages);
-
- fscache_stat(&fscache_n_retrievals);
-
- if (hlist_empty(&cookie->backing_objects))
- goto nobufs;
-
- if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
- _leave(" = -ENOBUFS [invalidating]");
- return -ENOBUFS;
- }
-
- ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
- ASSERTCMP(*nr_pages, >, 0);
- ASSERT(!list_empty(pages));
-
- if (fscache_wait_for_deferred_lookup(cookie) < 0)
- return -ERESTARTSYS;
-
- op = fscache_alloc_retrieval(cookie, mapping, end_io_func, context);
- if (!op)
- return -ENOMEM;
- atomic_set(&op->n_pages, *nr_pages);
- trace_fscache_page_op(cookie, NULL, &op->op, fscache_page_op_retr_multi);
-
- spin_lock(&cookie->lock);
-
- if (!fscache_cookie_enabled(cookie) ||
- hlist_empty(&cookie->backing_objects))
- goto nobufs_unlock;
- object = hlist_entry(cookie->backing_objects.first,
- struct fscache_object, cookie_link);
-
- __fscache_use_cookie(cookie);
- atomic_inc(&object->n_reads);
- __set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags);
-
- if (fscache_submit_op(object, &op->op) < 0)
- goto nobufs_unlock_dec;
- spin_unlock(&cookie->lock);
-
- fscache_stat(&fscache_n_retrieval_ops);
-
- /* we wait for the operation to become active, and then process it
- * *here*, in this thread, and not in the thread pool */
- ret = fscache_wait_for_operation_activation(
- object, &op->op,
- __fscache_stat(&fscache_n_retrieval_op_waits),
- __fscache_stat(&fscache_n_retrievals_object_dead));
- if (ret < 0)
- goto error;
-
- /* ask the cache to honour the operation */
- if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) {
- fscache_stat(&fscache_n_cop_allocate_pages);
- ret = object->cache->ops->allocate_pages(
- op, pages, nr_pages, gfp);
- fscache_stat_d(&fscache_n_cop_allocate_pages);
- } else {
- fscache_stat(&fscache_n_cop_read_or_alloc_pages);
- ret = object->cache->ops->read_or_alloc_pages(
- op, pages, nr_pages, gfp);
- fscache_stat_d(&fscache_n_cop_read_or_alloc_pages);
- }
-
-error:
- if (ret == -ENOMEM)
- fscache_stat(&fscache_n_retrievals_nomem);
- else if (ret == -ERESTARTSYS)
- fscache_stat(&fscache_n_retrievals_intr);
- else if (ret == -ENODATA)
- fscache_stat(&fscache_n_retrievals_nodata);
- else if (ret < 0)
- fscache_stat(&fscache_n_retrievals_nobufs);
- else
- fscache_stat(&fscache_n_retrievals_ok);
-
- fscache_put_retrieval(op);
- _leave(" = %d", ret);
- return ret;
-
-nobufs_unlock_dec:
- atomic_dec(&object->n_reads);
- wake_cookie = __fscache_unuse_cookie(cookie);
-nobufs_unlock:
- spin_unlock(&cookie->lock);
- fscache_put_retrieval(op);
- if (wake_cookie)
- __fscache_wake_unused_cookie(cookie);
-nobufs:
- fscache_stat(&fscache_n_retrievals_nobufs);
- _leave(" = -ENOBUFS");
- return -ENOBUFS;
-}
-EXPORT_SYMBOL(__fscache_read_or_alloc_pages);
-
-/*
- * allocate a block in the cache on which to store a page
- * - we return:
- * -ENOMEM - out of memory, nothing done
- * -ERESTARTSYS - interrupted
- * -ENOBUFS - no backing object available in which to cache the block
- * 0 - block allocated
- */
-int __fscache_alloc_page(struct fscache_cookie *cookie,
- struct page *page,
- gfp_t gfp)
-{
- struct fscache_retrieval *op;
- struct fscache_object *object;
- bool wake_cookie = false;
- int ret;
-
- _enter("%p,%p,,,", cookie, page);
-
- fscache_stat(&fscache_n_allocs);
-
- if (hlist_empty(&cookie->backing_objects))
- goto nobufs;
-
- ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
- ASSERTCMP(page, !=, NULL);
-
- if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
- _leave(" = -ENOBUFS [invalidating]");
- return -ENOBUFS;
- }
-
- if (fscache_wait_for_deferred_lookup(cookie) < 0)
- return -ERESTARTSYS;
-
- op = fscache_alloc_retrieval(cookie, page->mapping, NULL, NULL);
- if (!op)
- return -ENOMEM;
- atomic_set(&op->n_pages, 1);
- trace_fscache_page_op(cookie, page, &op->op, fscache_page_op_alloc_one);
-
- spin_lock(&cookie->lock);
-
- if (!fscache_cookie_enabled(cookie) ||
- hlist_empty(&cookie->backing_objects))
- goto nobufs_unlock;
- object = hlist_entry(cookie->backing_objects.first,
- struct fscache_object, cookie_link);
-
- __fscache_use_cookie(cookie);
- if (fscache_submit_op(object, &op->op) < 0)
- goto nobufs_unlock_dec;
- spin_unlock(&cookie->lock);
-
- fscache_stat(&fscache_n_alloc_ops);
-
- ret = fscache_wait_for_operation_activation(
- object, &op->op,
- __fscache_stat(&fscache_n_alloc_op_waits),
- __fscache_stat(&fscache_n_allocs_object_dead));
- if (ret < 0)
- goto error;
-
- /* ask the cache to honour the operation */
- fscache_stat(&fscache_n_cop_allocate_page);
- ret = object->cache->ops->allocate_page(op, page, gfp);
- fscache_stat_d(&fscache_n_cop_allocate_page);
-
-error:
- if (ret == -ERESTARTSYS)
- fscache_stat(&fscache_n_allocs_intr);
- else if (ret < 0)
- fscache_stat(&fscache_n_allocs_nobufs);
- else
- fscache_stat(&fscache_n_allocs_ok);
-
- fscache_put_retrieval(op);
- _leave(" = %d", ret);
- return ret;
-
-nobufs_unlock_dec:
- wake_cookie = __fscache_unuse_cookie(cookie);
-nobufs_unlock:
- spin_unlock(&cookie->lock);
- fscache_put_retrieval(op);
- if (wake_cookie)
- __fscache_wake_unused_cookie(cookie);
-nobufs:
- fscache_stat(&fscache_n_allocs_nobufs);
- _leave(" = -ENOBUFS");
- return -ENOBUFS;
-}
-EXPORT_SYMBOL(__fscache_alloc_page);
-
-/*
- * Unmark pages allocate in the readahead code path (via:
- * fscache_readpages_or_alloc) after delegating to the base filesystem
- */
-void __fscache_readpages_cancel(struct fscache_cookie *cookie,
- struct list_head *pages)
-{
- struct page *page;
-
- list_for_each_entry(page, pages, lru) {
- if (PageFsCache(page))
- __fscache_uncache_page(cookie, page);
- }
-}
-EXPORT_SYMBOL(__fscache_readpages_cancel);
-
-/*
- * release a write op reference
- */
-static void fscache_release_write_op(struct fscache_operation *_op)
-{
- _enter("{OP%x}", _op->debug_id);
-}
-
-/*
- * perform the background storage of a page into the cache
- */
-static void fscache_write_op(struct fscache_operation *_op)
-{
- struct fscache_storage *op =
- container_of(_op, struct fscache_storage, op);
- struct fscache_object *object = op->op.object;
- struct fscache_cookie *cookie;
- struct page *page;
- unsigned n;
- void *results[1];
- int ret;
-
- _enter("{OP%x,%d}", op->op.debug_id, atomic_read(&op->op.usage));
-
-again:
- spin_lock(&object->lock);
- cookie = object->cookie;
-
- if (!fscache_object_is_active(object)) {
- /* If we get here, then the on-disk cache object likely no
- * longer exists, so we should just cancel this write
- * operation.
- */
- spin_unlock(&object->lock);
- fscache_op_complete(&op->op, true);
- _leave(" [inactive]");
- return;
- }
-
- if (!cookie) {
- /* If we get here, then the cookie belonging to the object was
- * detached, probably by the cookie being withdrawn due to
- * memory pressure, which means that the pages we might write
- * to the cache from no longer exist - therefore, we can just
- * cancel this write operation.
- */
- spin_unlock(&object->lock);
- fscache_op_complete(&op->op, true);
- _leave(" [cancel] op{f=%lx s=%u} obj{s=%s f=%lx}",
- _op->flags, _op->state, object->state->short_name,
- object->flags);
- return;
- }
-
- spin_lock(&cookie->stores_lock);
-
- fscache_stat(&fscache_n_store_calls);
-
- /* find a page to store */
- results[0] = NULL;
- page = NULL;
- n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0, 1,
- FSCACHE_COOKIE_PENDING_TAG);
- trace_fscache_gang_lookup(cookie, &op->op, results, n, op->store_limit);
- if (n != 1)
- goto superseded;
- page = results[0];
- _debug("gang %d [%lx]", n, page->index);
-
- radix_tree_tag_set(&cookie->stores, page->index,
- FSCACHE_COOKIE_STORING_TAG);
- radix_tree_tag_clear(&cookie->stores, page->index,
- FSCACHE_COOKIE_PENDING_TAG);
- trace_fscache_page(cookie, page, fscache_page_radix_pend2store);
-
- spin_unlock(&cookie->stores_lock);
- spin_unlock(&object->lock);
-
- if (page->index >= op->store_limit)
- goto discard_page;
-
- fscache_stat(&fscache_n_store_pages);
- fscache_stat(&fscache_n_cop_write_page);
- ret = object->cache->ops->write_page(op, page);
- fscache_stat_d(&fscache_n_cop_write_page);
- trace_fscache_wrote_page(cookie, page, &op->op, ret);
- fscache_end_page_write(object, page);
- if (ret < 0) {
- fscache_abort_object(object);
- fscache_op_complete(&op->op, true);
- } else {
- fscache_enqueue_operation(&op->op);
- }
-
- _leave("");
- return;
-
-discard_page:
- fscache_stat(&fscache_n_store_pages_over_limit);
- trace_fscache_wrote_page(cookie, page, &op->op, -ENOBUFS);
- fscache_end_page_write(object, page);
- goto again;
-
-superseded:
- /* this writer is going away and there aren't any more things to
- * write */
- _debug("cease");
- spin_unlock(&cookie->stores_lock);
- clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
- spin_unlock(&object->lock);
- fscache_op_complete(&op->op, false);
- _leave("");
-}
-
-/*
- * Clear the pages pending writing for invalidation
- */
-void fscache_invalidate_writes(struct fscache_cookie *cookie)
-{
- struct page *page;
- void *results[16];
- int n, i;
-
- _enter("");
-
- for (;;) {
- spin_lock(&cookie->stores_lock);
- n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0,
- ARRAY_SIZE(results),
- FSCACHE_COOKIE_PENDING_TAG);
- if (n == 0) {
- spin_unlock(&cookie->stores_lock);
- break;
- }
-
- for (i = n - 1; i >= 0; i--) {
- page = results[i];
- radix_tree_delete(&cookie->stores, page->index);
- trace_fscache_page(cookie, page, fscache_page_radix_delete);
- trace_fscache_page(cookie, page, fscache_page_inval);
- }
-
- spin_unlock(&cookie->stores_lock);
-
- for (i = n - 1; i >= 0; i--)
- put_page(results[i]);
- }
-
- wake_up_bit(&cookie->flags, 0);
- trace_fscache_wake_cookie(cookie);
-
- _leave("");
-}
-
-/*
- * request a page be stored in the cache
- * - returns:
- * -ENOMEM - out of memory, nothing done
- * -ENOBUFS - no backing object available in which to cache the page
- * 0 - dispatched a write - it'll call end_io_func() when finished
- *
- * if the cookie still has a backing object at this point, that object can be
- * in one of a few states with respect to storage processing:
- *
- * (1) negative lookup, object not yet created (FSCACHE_COOKIE_CREATING is
- * set)
- *
- * (a) no writes yet
- *
- * (b) writes deferred till post-creation (mark page for writing and
- * return immediately)
- *
- * (2) negative lookup, object created, initial fill being made from netfs
- *
- * (a) fill point not yet reached this page (mark page for writing and
- * return)
- *
- * (b) fill point passed this page (queue op to store this page)
- *
- * (3) object extant (queue op to store this page)
- *
- * any other state is invalid
- */
-int __fscache_write_page(struct fscache_cookie *cookie,
- struct page *page,
- loff_t object_size,
- gfp_t gfp)
-{
- struct fscache_storage *op;
- struct fscache_object *object;
- bool wake_cookie = false;
- int ret;
-
- _enter("%p,%x,", cookie, (u32) page->flags);
-
- ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
- ASSERT(PageFsCache(page));
-
- fscache_stat(&fscache_n_stores);
-
- if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
- _leave(" = -ENOBUFS [invalidating]");
- return -ENOBUFS;
- }
-
- op = kzalloc(sizeof(*op), GFP_NOIO | __GFP_NOMEMALLOC | __GFP_NORETRY);
- if (!op)
- goto nomem;
-
- fscache_operation_init(cookie, &op->op, fscache_write_op, NULL,
- fscache_release_write_op);
- op->op.flags = FSCACHE_OP_ASYNC |
- (1 << FSCACHE_OP_WAITING) |
- (1 << FSCACHE_OP_UNUSE_COOKIE);
-
- ret = radix_tree_maybe_preload(gfp & ~__GFP_HIGHMEM);
- if (ret < 0)
- goto nomem_free;
-
- trace_fscache_page_op(cookie, page, &op->op, fscache_page_op_write_one);
-
- ret = -ENOBUFS;
- spin_lock(&cookie->lock);
-
- if (!fscache_cookie_enabled(cookie) ||
- hlist_empty(&cookie->backing_objects))
- goto nobufs;
- object = hlist_entry(cookie->backing_objects.first,
- struct fscache_object, cookie_link);
- if (test_bit(FSCACHE_IOERROR, &object->cache->flags))
- goto nobufs;
-
- trace_fscache_page(cookie, page, fscache_page_write);
-
- /* add the page to the pending-storage radix tree on the backing
- * object */
- spin_lock(&object->lock);
-
- if (object->store_limit_l != object_size)
- fscache_set_store_limit(object, object_size);
-
- spin_lock(&cookie->stores_lock);
-
- _debug("store limit %llx", (unsigned long long) object->store_limit);
-
- ret = radix_tree_insert(&cookie->stores, page->index, page);
- if (ret < 0) {
- if (ret == -EEXIST)
- goto already_queued;
- _debug("insert failed %d", ret);
- goto nobufs_unlock_obj;
- }
-
- trace_fscache_page(cookie, page, fscache_page_radix_insert);
- radix_tree_tag_set(&cookie->stores, page->index,
- FSCACHE_COOKIE_PENDING_TAG);
- trace_fscache_page(cookie, page, fscache_page_radix_set_pend);
- get_page(page);
-
- /* we only want one writer at a time, but we do need to queue new
- * writers after exclusive ops */
- if (test_and_set_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags))
- goto already_pending;
-
- spin_unlock(&cookie->stores_lock);
- spin_unlock(&object->lock);
-
- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
- op->store_limit = object->store_limit;
-
- __fscache_use_cookie(cookie);
- if (fscache_submit_op(object, &op->op) < 0)
- goto submit_failed;
-
- spin_unlock(&cookie->lock);
- radix_tree_preload_end();
- fscache_stat(&fscache_n_store_ops);
- fscache_stat(&fscache_n_stores_ok);
-
- /* the work queue now carries its own ref on the object */
- fscache_put_operation(&op->op);
- _leave(" = 0");
- return 0;
-
-already_queued:
- fscache_stat(&fscache_n_stores_again);
-already_pending:
- spin_unlock(&cookie->stores_lock);
- spin_unlock(&object->lock);
- spin_unlock(&cookie->lock);
- radix_tree_preload_end();
- fscache_put_operation(&op->op);
- fscache_stat(&fscache_n_stores_ok);
- _leave(" = 0");
- return 0;
-
-submit_failed:
- spin_lock(&cookie->stores_lock);
- radix_tree_delete(&cookie->stores, page->index);
- trace_fscache_page(cookie, page, fscache_page_radix_delete);
- spin_unlock(&cookie->stores_lock);
- wake_cookie = __fscache_unuse_cookie(cookie);
- put_page(page);
- ret = -ENOBUFS;
- goto nobufs;
-
-nobufs_unlock_obj:
- spin_unlock(&cookie->stores_lock);
- spin_unlock(&object->lock);
-nobufs:
- spin_unlock(&cookie->lock);
- radix_tree_preload_end();
- fscache_put_operation(&op->op);
- if (wake_cookie)
- __fscache_wake_unused_cookie(cookie);
- fscache_stat(&fscache_n_stores_nobufs);
- _leave(" = -ENOBUFS");
- return -ENOBUFS;
-
-nomem_free:
- fscache_put_operation(&op->op);
-nomem:
- fscache_stat(&fscache_n_stores_oom);
- _leave(" = -ENOMEM");
- return -ENOMEM;
-}
-EXPORT_SYMBOL(__fscache_write_page);
-
-/*
- * remove a page from the cache
- */
-void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
-{
- struct fscache_object *object;
-
- _enter(",%p", page);
-
- ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
- ASSERTCMP(page, !=, NULL);
-
- fscache_stat(&fscache_n_uncaches);
-
- /* cache withdrawal may beat us to it */
- if (!PageFsCache(page))
- goto done;
-
- trace_fscache_page(cookie, page, fscache_page_uncache);
-
- /* get the object */
- spin_lock(&cookie->lock);
-
- if (hlist_empty(&cookie->backing_objects)) {
- ClearPageFsCache(page);
- goto done_unlock;
- }
-
- object = hlist_entry(cookie->backing_objects.first,
- struct fscache_object, cookie_link);
-
- /* there might now be stuff on disk we could read */
- clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
-
- /* only invoke the cache backend if we managed to mark the page
- * uncached here; this deals with synchronisation vs withdrawal */
- if (TestClearPageFsCache(page) &&
- object->cache->ops->uncache_page) {
- /* the cache backend releases the cookie lock */
- fscache_stat(&fscache_n_cop_uncache_page);
- object->cache->ops->uncache_page(object, page);
- fscache_stat_d(&fscache_n_cop_uncache_page);
- goto done;
- }
-
-done_unlock:
- spin_unlock(&cookie->lock);
-done:
- _leave("");
-}
-EXPORT_SYMBOL(__fscache_uncache_page);
-
-/**
- * fscache_mark_page_cached - Mark a page as being cached
- * @op: The retrieval op pages are being marked for
- * @page: The page to be marked
- *
- * Mark a netfs page as being cached. After this is called, the netfs
- * must call fscache_uncache_page() to remove the mark.
- */
-void fscache_mark_page_cached(struct fscache_retrieval *op, struct page *page)
-{
- struct fscache_cookie *cookie = op->op.object->cookie;
-
-#ifdef CONFIG_FSCACHE_STATS
- atomic_inc(&fscache_n_marks);
-#endif
-
- trace_fscache_page(cookie, page, fscache_page_cached);
-
- _debug("- mark %p{%lx}", page, page->index);
- if (TestSetPageFsCache(page)) {
- static bool once_only;
- if (!once_only) {
- once_only = true;
- pr_warn("Cookie type %s marked page %lx multiple times\n",
- cookie->def->name, page->index);
- }
- }
-
- if (cookie->def->mark_page_cached)
- cookie->def->mark_page_cached(cookie->netfs_data,
- op->mapping, page);
-}
-EXPORT_SYMBOL(fscache_mark_page_cached);
-
-/**
- * fscache_mark_pages_cached - Mark pages as being cached
- * @op: The retrieval op pages are being marked for
- * @pagevec: The pages to be marked
- *
- * Mark a bunch of netfs pages as being cached. After this is called,
- * the netfs must call fscache_uncache_page() to remove the mark.
- */
-void fscache_mark_pages_cached(struct fscache_retrieval *op,
- struct pagevec *pagevec)
-{
- unsigned long loop;
-
- for (loop = 0; loop < pagevec->nr; loop++)
- fscache_mark_page_cached(op, pagevec->pages[loop]);
-
- pagevec_reinit(pagevec);
-}
-EXPORT_SYMBOL(fscache_mark_pages_cached);
-
-/*
- * Uncache all the pages in an inode that are marked PG_fscache, assuming them
- * to be associated with the given cookie.
- */
-void __fscache_uncache_all_inode_pages(struct fscache_cookie *cookie,
- struct inode *inode)
-{
- struct address_space *mapping = inode->i_mapping;
- struct pagevec pvec;
- pgoff_t next;
- int i;
-
- _enter("%p,%p", cookie, inode);
-
- if (!mapping || mapping->nrpages == 0) {
- _leave(" [no pages]");
- return;
- }
-
- pagevec_init(&pvec);
- next = 0;
- do {
- if (!pagevec_lookup(&pvec, mapping, &next))
- break;
- for (i = 0; i < pagevec_count(&pvec); i++) {
- struct page *page = pvec.pages[i];
- if (PageFsCache(page)) {
- __fscache_wait_on_page_write(cookie, page);
- __fscache_uncache_page(cookie, page);
- }
- }
- pagevec_release(&pvec);
- cond_resched();
- } while (next);
-
- _leave("");
-}
-EXPORT_SYMBOL(__fscache_uncache_all_inode_pages);
diff --git a/fs/fscache/proc.c b/fs/fscache/proc.c
index 061df8f61ffc..dc3b0e9c8cce 100644
--- a/fs/fscache/proc.c
+++ b/fs/fscache/proc.c
@@ -1,11 +1,11 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/* FS-Cache statistics viewing interface
*
- * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*/
-#define FSCACHE_DEBUG_LEVEL OPERATION
+#define FSCACHE_DEBUG_LEVEL CACHE
#include <linux/module.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
@@ -16,42 +16,32 @@
*/
int __init fscache_proc_init(void)
{
- _enter("");
-
if (!proc_mkdir("fs/fscache", NULL))
goto error_dir;
+ if (!proc_create_seq("fs/fscache/caches", S_IFREG | 0444, NULL,
+ &fscache_caches_seq_ops))
+ goto error;
+
+ if (!proc_create_seq("fs/fscache/volumes", S_IFREG | 0444, NULL,
+ &fscache_volumes_seq_ops))
+ goto error;
+
if (!proc_create_seq("fs/fscache/cookies", S_IFREG | 0444, NULL,
&fscache_cookies_seq_ops))
- goto error_cookies;
+ goto error;
#ifdef CONFIG_FSCACHE_STATS
if (!proc_create_single("fs/fscache/stats", S_IFREG | 0444, NULL,
- fscache_stats_show))
- goto error_stats;
+ fscache_stats_show))
+ goto error;
#endif
-#ifdef CONFIG_FSCACHE_OBJECT_LIST
- if (!proc_create("fs/fscache/objects", S_IFREG | 0444, NULL,
- &fscache_objlist_proc_ops))
- goto error_objects;
-#endif
-
- _leave(" = 0");
return 0;
-#ifdef CONFIG_FSCACHE_OBJECT_LIST
-error_objects:
-#endif
-#ifdef CONFIG_FSCACHE_STATS
- remove_proc_entry("fs/fscache/stats", NULL);
-error_stats:
-#endif
- remove_proc_entry("fs/fscache/cookies", NULL);
-error_cookies:
+error:
remove_proc_entry("fs/fscache", NULL);
error_dir:
- _leave(" = -ENOMEM");
return -ENOMEM;
}
@@ -60,12 +50,5 @@ error_dir:
*/
void fscache_proc_cleanup(void)
{
-#ifdef CONFIG_FSCACHE_OBJECT_LIST
- remove_proc_entry("fs/fscache/objects", NULL);
-#endif
-#ifdef CONFIG_FSCACHE_STATS
- remove_proc_entry("fs/fscache/stats", NULL);
-#endif
- remove_proc_entry("fs/fscache/cookies", NULL);
- remove_proc_entry("fs/fscache", NULL);
+ remove_proc_subtree("fs/fscache", NULL);
}
diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
index a7c3ed89a3e0..fc94e5e79f1c 100644
--- a/fs/fscache/stats.c
+++ b/fs/fscache/stats.c
@@ -1,12 +1,11 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/* FS-Cache statistics
*
- * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*/
-#define FSCACHE_DEBUG_LEVEL THREAD
-#include <linux/module.h>
+#define FSCACHE_DEBUG_LEVEL CACHE
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include "internal.h"
@@ -14,122 +13,41 @@
/*
* operation counters
*/
-atomic_t fscache_n_op_pend;
-atomic_t fscache_n_op_run;
-atomic_t fscache_n_op_enqueue;
-atomic_t fscache_n_op_deferred_release;
-atomic_t fscache_n_op_initialised;
-atomic_t fscache_n_op_release;
-atomic_t fscache_n_op_gc;
-atomic_t fscache_n_op_cancelled;
-atomic_t fscache_n_op_rejected;
-
-atomic_t fscache_n_attr_changed;
-atomic_t fscache_n_attr_changed_ok;
-atomic_t fscache_n_attr_changed_nobufs;
-atomic_t fscache_n_attr_changed_nomem;
-atomic_t fscache_n_attr_changed_calls;
-
-atomic_t fscache_n_allocs;
-atomic_t fscache_n_allocs_ok;
-atomic_t fscache_n_allocs_wait;
-atomic_t fscache_n_allocs_nobufs;
-atomic_t fscache_n_allocs_intr;
-atomic_t fscache_n_allocs_object_dead;
-atomic_t fscache_n_alloc_ops;
-atomic_t fscache_n_alloc_op_waits;
-
-atomic_t fscache_n_retrievals;
-atomic_t fscache_n_retrievals_ok;
-atomic_t fscache_n_retrievals_wait;
-atomic_t fscache_n_retrievals_nodata;
-atomic_t fscache_n_retrievals_nobufs;
-atomic_t fscache_n_retrievals_intr;
-atomic_t fscache_n_retrievals_nomem;
-atomic_t fscache_n_retrievals_object_dead;
-atomic_t fscache_n_retrieval_ops;
-atomic_t fscache_n_retrieval_op_waits;
-
-atomic_t fscache_n_stores;
-atomic_t fscache_n_stores_ok;
-atomic_t fscache_n_stores_again;
-atomic_t fscache_n_stores_nobufs;
-atomic_t fscache_n_stores_oom;
-atomic_t fscache_n_store_ops;
-atomic_t fscache_n_store_calls;
-atomic_t fscache_n_store_pages;
-atomic_t fscache_n_store_radix_deletes;
-atomic_t fscache_n_store_pages_over_limit;
-
-atomic_t fscache_n_store_vmscan_not_storing;
-atomic_t fscache_n_store_vmscan_gone;
-atomic_t fscache_n_store_vmscan_busy;
-atomic_t fscache_n_store_vmscan_cancelled;
-atomic_t fscache_n_store_vmscan_wait;
-
-atomic_t fscache_n_marks;
-atomic_t fscache_n_uncaches;
+atomic_t fscache_n_volumes;
+atomic_t fscache_n_volumes_collision;
+atomic_t fscache_n_volumes_nomem;
+atomic_t fscache_n_cookies;
+atomic_t fscache_n_cookies_lru;
+atomic_t fscache_n_cookies_lru_expired;
+atomic_t fscache_n_cookies_lru_removed;
+atomic_t fscache_n_cookies_lru_dropped;
atomic_t fscache_n_acquires;
-atomic_t fscache_n_acquires_null;
-atomic_t fscache_n_acquires_no_cache;
atomic_t fscache_n_acquires_ok;
-atomic_t fscache_n_acquires_nobufs;
atomic_t fscache_n_acquires_oom;
atomic_t fscache_n_invalidates;
-atomic_t fscache_n_invalidates_run;
atomic_t fscache_n_updates;
-atomic_t fscache_n_updates_null;
-atomic_t fscache_n_updates_run;
+EXPORT_SYMBOL(fscache_n_updates);
atomic_t fscache_n_relinquishes;
-atomic_t fscache_n_relinquishes_null;
-atomic_t fscache_n_relinquishes_waitcrt;
atomic_t fscache_n_relinquishes_retire;
-
-atomic_t fscache_n_cookie_index;
-atomic_t fscache_n_cookie_data;
-atomic_t fscache_n_cookie_special;
-
-atomic_t fscache_n_object_alloc;
-atomic_t fscache_n_object_no_alloc;
-atomic_t fscache_n_object_lookups;
-atomic_t fscache_n_object_lookups_negative;
-atomic_t fscache_n_object_lookups_positive;
-atomic_t fscache_n_object_lookups_timed_out;
-atomic_t fscache_n_object_created;
-atomic_t fscache_n_object_avail;
-atomic_t fscache_n_object_dead;
-
-atomic_t fscache_n_checkaux_none;
-atomic_t fscache_n_checkaux_okay;
-atomic_t fscache_n_checkaux_update;
-atomic_t fscache_n_checkaux_obsolete;
-
-atomic_t fscache_n_cop_alloc_object;
-atomic_t fscache_n_cop_lookup_object;
-atomic_t fscache_n_cop_lookup_complete;
-atomic_t fscache_n_cop_grab_object;
-atomic_t fscache_n_cop_invalidate_object;
-atomic_t fscache_n_cop_update_object;
-atomic_t fscache_n_cop_drop_object;
-atomic_t fscache_n_cop_put_object;
-atomic_t fscache_n_cop_sync_cache;
-atomic_t fscache_n_cop_attr_changed;
-atomic_t fscache_n_cop_read_or_alloc_page;
-atomic_t fscache_n_cop_read_or_alloc_pages;
-atomic_t fscache_n_cop_allocate_page;
-atomic_t fscache_n_cop_allocate_pages;
-atomic_t fscache_n_cop_write_page;
-atomic_t fscache_n_cop_uncache_page;
-atomic_t fscache_n_cop_dissociate_pages;
-
-atomic_t fscache_n_cache_no_space_reject;
-atomic_t fscache_n_cache_stale_objects;
-atomic_t fscache_n_cache_retired_objects;
-atomic_t fscache_n_cache_culled_objects;
+atomic_t fscache_n_relinquishes_dropped;
+
+atomic_t fscache_n_resizes;
+atomic_t fscache_n_resizes_null;
+
+atomic_t fscache_n_read;
+EXPORT_SYMBOL(fscache_n_read);
+atomic_t fscache_n_write;
+EXPORT_SYMBOL(fscache_n_write);
+atomic_t fscache_n_no_write_space;
+EXPORT_SYMBOL(fscache_n_no_write_space);
+atomic_t fscache_n_no_create_space;
+EXPORT_SYMBOL(fscache_n_no_create_space);
+atomic_t fscache_n_culled;
+EXPORT_SYMBOL(fscache_n_culled);
/*
* display the general statistics
@@ -137,147 +55,48 @@ atomic_t fscache_n_cache_culled_objects;
int fscache_stats_show(struct seq_file *m, void *v)
{
seq_puts(m, "FS-Cache statistics\n");
-
- seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
- atomic_read(&fscache_n_cookie_index),
- atomic_read(&fscache_n_cookie_data),
- atomic_read(&fscache_n_cookie_special));
-
- seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
- atomic_read(&fscache_n_object_alloc),
- atomic_read(&fscache_n_object_no_alloc),
- atomic_read(&fscache_n_object_avail),
- atomic_read(&fscache_n_object_dead));
- seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
- atomic_read(&fscache_n_checkaux_none),
- atomic_read(&fscache_n_checkaux_okay),
- atomic_read(&fscache_n_checkaux_update),
- atomic_read(&fscache_n_checkaux_obsolete));
-
- seq_printf(m, "Pages : mrk=%u unc=%u\n",
- atomic_read(&fscache_n_marks),
- atomic_read(&fscache_n_uncaches));
-
- seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
- " oom=%u\n",
+ seq_printf(m, "Cookies: n=%d v=%d vcol=%u voom=%u\n",
+ atomic_read(&fscache_n_cookies),
+ atomic_read(&fscache_n_volumes),
+ atomic_read(&fscache_n_volumes_collision),
+ atomic_read(&fscache_n_volumes_nomem)
+ );
+
+ seq_printf(m, "Acquire: n=%u ok=%u oom=%u\n",
atomic_read(&fscache_n_acquires),
- atomic_read(&fscache_n_acquires_null),
- atomic_read(&fscache_n_acquires_no_cache),
atomic_read(&fscache_n_acquires_ok),
- atomic_read(&fscache_n_acquires_nobufs),
atomic_read(&fscache_n_acquires_oom));
- seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
- atomic_read(&fscache_n_object_lookups),
- atomic_read(&fscache_n_object_lookups_negative),
- atomic_read(&fscache_n_object_lookups_positive),
- atomic_read(&fscache_n_object_created),
- atomic_read(&fscache_n_object_lookups_timed_out));
+ seq_printf(m, "LRU : n=%u exp=%u rmv=%u drp=%u at=%ld\n",
+ atomic_read(&fscache_n_cookies_lru),
+ atomic_read(&fscache_n_cookies_lru_expired),
+ atomic_read(&fscache_n_cookies_lru_removed),
+ atomic_read(&fscache_n_cookies_lru_dropped),
+ timer_pending(&fscache_cookie_lru_timer) ?
+ fscache_cookie_lru_timer.expires - jiffies : 0);
- seq_printf(m, "Invals : n=%u run=%u\n",
- atomic_read(&fscache_n_invalidates),
- atomic_read(&fscache_n_invalidates_run));
+ seq_printf(m, "Invals : n=%u\n",
+ atomic_read(&fscache_n_invalidates));
- seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
+ seq_printf(m, "Updates: n=%u rsz=%u rsn=%u\n",
atomic_read(&fscache_n_updates),
- atomic_read(&fscache_n_updates_null),
- atomic_read(&fscache_n_updates_run));
+ atomic_read(&fscache_n_resizes),
+ atomic_read(&fscache_n_resizes_null));
- seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
+ seq_printf(m, "Relinqs: n=%u rtr=%u drop=%u\n",
atomic_read(&fscache_n_relinquishes),
- atomic_read(&fscache_n_relinquishes_null),
- atomic_read(&fscache_n_relinquishes_waitcrt),
- atomic_read(&fscache_n_relinquishes_retire));
-
- seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
- atomic_read(&fscache_n_attr_changed),
- atomic_read(&fscache_n_attr_changed_ok),
- atomic_read(&fscache_n_attr_changed_nobufs),
- atomic_read(&fscache_n_attr_changed_nomem),
- atomic_read(&fscache_n_attr_changed_calls));
-
- seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
- atomic_read(&fscache_n_allocs),
- atomic_read(&fscache_n_allocs_ok),
- atomic_read(&fscache_n_allocs_wait),
- atomic_read(&fscache_n_allocs_nobufs),
- atomic_read(&fscache_n_allocs_intr));
- seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
- atomic_read(&fscache_n_alloc_ops),
- atomic_read(&fscache_n_alloc_op_waits),
- atomic_read(&fscache_n_allocs_object_dead));
-
- seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
- " int=%u oom=%u\n",
- atomic_read(&fscache_n_retrievals),
- atomic_read(&fscache_n_retrievals_ok),
- atomic_read(&fscache_n_retrievals_wait),
- atomic_read(&fscache_n_retrievals_nodata),
- atomic_read(&fscache_n_retrievals_nobufs),
- atomic_read(&fscache_n_retrievals_intr),
- atomic_read(&fscache_n_retrievals_nomem));
- seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
- atomic_read(&fscache_n_retrieval_ops),
- atomic_read(&fscache_n_retrieval_op_waits),
- atomic_read(&fscache_n_retrievals_object_dead));
-
- seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
- atomic_read(&fscache_n_stores),
- atomic_read(&fscache_n_stores_ok),
- atomic_read(&fscache_n_stores_again),
- atomic_read(&fscache_n_stores_nobufs),
- atomic_read(&fscache_n_stores_oom));
- seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
- atomic_read(&fscache_n_store_ops),
- atomic_read(&fscache_n_store_calls),
- atomic_read(&fscache_n_store_pages),
- atomic_read(&fscache_n_store_radix_deletes),
- atomic_read(&fscache_n_store_pages_over_limit));
+ atomic_read(&fscache_n_relinquishes_retire),
+ atomic_read(&fscache_n_relinquishes_dropped));
- seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u wt=%u\n",
- atomic_read(&fscache_n_store_vmscan_not_storing),
- atomic_read(&fscache_n_store_vmscan_gone),
- atomic_read(&fscache_n_store_vmscan_busy),
- atomic_read(&fscache_n_store_vmscan_cancelled),
- atomic_read(&fscache_n_store_vmscan_wait));
+ seq_printf(m, "NoSpace: nwr=%u ncr=%u cull=%u\n",
+ atomic_read(&fscache_n_no_write_space),
+ atomic_read(&fscache_n_no_create_space),
+ atomic_read(&fscache_n_culled));
- seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
- atomic_read(&fscache_n_op_pend),
- atomic_read(&fscache_n_op_run),
- atomic_read(&fscache_n_op_enqueue),
- atomic_read(&fscache_n_op_cancelled),
- atomic_read(&fscache_n_op_rejected));
- seq_printf(m, "Ops : ini=%u dfr=%u rel=%u gc=%u\n",
- atomic_read(&fscache_n_op_initialised),
- atomic_read(&fscache_n_op_deferred_release),
- atomic_read(&fscache_n_op_release),
- atomic_read(&fscache_n_op_gc));
+ seq_printf(m, "IO : rd=%u wr=%u\n",
+ atomic_read(&fscache_n_read),
+ atomic_read(&fscache_n_write));
- seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
- atomic_read(&fscache_n_cop_alloc_object),
- atomic_read(&fscache_n_cop_lookup_object),
- atomic_read(&fscache_n_cop_lookup_complete),
- atomic_read(&fscache_n_cop_grab_object));
- seq_printf(m, "CacheOp: inv=%d upo=%d dro=%d pto=%d atc=%d syn=%d\n",
- atomic_read(&fscache_n_cop_invalidate_object),
- atomic_read(&fscache_n_cop_update_object),
- atomic_read(&fscache_n_cop_drop_object),
- atomic_read(&fscache_n_cop_put_object),
- atomic_read(&fscache_n_cop_attr_changed),
- atomic_read(&fscache_n_cop_sync_cache));
- seq_printf(m, "CacheOp: rap=%d ras=%d alp=%d als=%d wrp=%d ucp=%d dsp=%d\n",
- atomic_read(&fscache_n_cop_read_or_alloc_page),
- atomic_read(&fscache_n_cop_read_or_alloc_pages),
- atomic_read(&fscache_n_cop_allocate_page),
- atomic_read(&fscache_n_cop_allocate_pages),
- atomic_read(&fscache_n_cop_write_page),
- atomic_read(&fscache_n_cop_uncache_page),
- atomic_read(&fscache_n_cop_dissociate_pages));
- seq_printf(m, "CacheEv: nsp=%d stl=%d rtr=%d cul=%d\n",
- atomic_read(&fscache_n_cache_no_space_reject),
- atomic_read(&fscache_n_cache_stale_objects),
- atomic_read(&fscache_n_cache_retired_objects),
- atomic_read(&fscache_n_cache_culled_objects));
netfs_stats_show(m);
return 0;
}
diff --git a/fs/fscache/volume.c b/fs/fscache/volume.c
new file mode 100644
index 000000000000..f2aa7dbad766
--- /dev/null
+++ b/fs/fscache/volume.c
@@ -0,0 +1,517 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Volume-level cache cookie handling.
+ *
+ * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#define FSCACHE_DEBUG_LEVEL COOKIE
+#include <linux/export.h>
+#include <linux/slab.h>
+#include "internal.h"
+
+#define fscache_volume_hash_shift 10
+static struct hlist_bl_head fscache_volume_hash[1 << fscache_volume_hash_shift];
+static atomic_t fscache_volume_debug_id;
+static LIST_HEAD(fscache_volumes);
+
+static void fscache_create_volume_work(struct work_struct *work);
+
+struct fscache_volume *fscache_get_volume(struct fscache_volume *volume,
+ enum fscache_volume_trace where)
+{
+ int ref;
+
+ __refcount_inc(&volume->ref, &ref);
+ trace_fscache_volume(volume->debug_id, ref + 1, where);
+ return volume;
+}
+
+static void fscache_see_volume(struct fscache_volume *volume,
+ enum fscache_volume_trace where)
+{
+ int ref = refcount_read(&volume->ref);
+
+ trace_fscache_volume(volume->debug_id, ref, where);
+}
+
+/*
+ * Pin the cache behind a volume so that we can access it.
+ */
+static void __fscache_begin_volume_access(struct fscache_volume *volume,
+ struct fscache_cookie *cookie,
+ enum fscache_access_trace why)
+{
+ int n_accesses;
+
+ n_accesses = atomic_inc_return(&volume->n_accesses);
+ smp_mb__after_atomic();
+ trace_fscache_access_volume(volume->debug_id, cookie ? cookie->debug_id : 0,
+ refcount_read(&volume->ref),
+ n_accesses, why);
+}
+
+/**
+ * fscache_begin_volume_access - Pin a cache so a volume can be accessed
+ * @volume: The volume cookie
+ * @cookie: A datafile cookie for a tracing reference (or NULL)
+ * @why: An indication of the circumstances of the access for tracing
+ *
+ * Attempt to pin the cache to prevent it from going away whilst we're
+ * accessing a volume and returns true if successful. This works as follows:
+ *
+ * (1) If the cache tests as not live (state is not FSCACHE_CACHE_IS_ACTIVE),
+ * then we return false to indicate access was not permitted.
+ *
+ * (2) If the cache tests as live, then we increment the volume's n_accesses
+ * count and then recheck the cache liveness, ending the access if it
+ * ceased to be live.
+ *
+ * (3) When we end the access, we decrement the volume's n_accesses and wake
+ * up the any waiters if it reaches 0.
+ *
+ * (4) Whilst the cache is caching, the volume's n_accesses is kept
+ * artificially incremented to prevent wakeups from happening.
+ *
+ * (5) When the cache is taken offline, the state is changed to prevent new
+ * accesses, the volume's n_accesses is decremented and we wait for it to
+ * become 0.
+ *
+ * The datafile @cookie and the @why indicator are merely provided for tracing
+ * purposes.
+ */
+bool fscache_begin_volume_access(struct fscache_volume *volume,
+ struct fscache_cookie *cookie,
+ enum fscache_access_trace why)
+{
+ if (!fscache_cache_is_live(volume->cache))
+ return false;
+ __fscache_begin_volume_access(volume, cookie, why);
+ if (!fscache_cache_is_live(volume->cache)) {
+ fscache_end_volume_access(volume, cookie, fscache_access_unlive);
+ return false;
+ }
+ return true;
+}
+
+/**
+ * fscache_end_volume_access - Unpin a cache at the end of an access.
+ * @volume: The volume cookie
+ * @cookie: A datafile cookie for a tracing reference (or NULL)
+ * @why: An indication of the circumstances of the access for tracing
+ *
+ * Unpin a cache volume after we've accessed it. The datafile @cookie and the
+ * @why indicator are merely provided for tracing purposes.
+ */
+void fscache_end_volume_access(struct fscache_volume *volume,
+ struct fscache_cookie *cookie,
+ enum fscache_access_trace why)
+{
+ int n_accesses;
+
+ smp_mb__before_atomic();
+ n_accesses = atomic_dec_return(&volume->n_accesses);
+ trace_fscache_access_volume(volume->debug_id, cookie ? cookie->debug_id : 0,
+ refcount_read(&volume->ref),
+ n_accesses, why);
+ if (n_accesses == 0)
+ wake_up_var(&volume->n_accesses);
+}
+EXPORT_SYMBOL(fscache_end_volume_access);
+
+static bool fscache_volume_same(const struct fscache_volume *a,
+ const struct fscache_volume *b)
+{
+ size_t klen;
+
+ if (a->key_hash != b->key_hash ||
+ a->cache != b->cache ||
+ a->key[0] != b->key[0])
+ return false;
+
+ klen = round_up(a->key[0] + 1, sizeof(__le32));
+ return memcmp(a->key, b->key, klen) == 0;
+}
+
+static bool fscache_is_acquire_pending(struct fscache_volume *volume)
+{
+ return test_bit(FSCACHE_VOLUME_ACQUIRE_PENDING, &volume->flags);
+}
+
+static void fscache_wait_on_volume_collision(struct fscache_volume *candidate,
+ unsigned int collidee_debug_id)
+{
+ wait_var_event_timeout(&candidate->flags,
+ !fscache_is_acquire_pending(candidate), 20 * HZ);
+ if (!fscache_is_acquire_pending(candidate)) {
+ pr_notice("Potential volume collision new=%08x old=%08x",
+ candidate->debug_id, collidee_debug_id);
+ fscache_stat(&fscache_n_volumes_collision);
+ wait_var_event(&candidate->flags, !fscache_is_acquire_pending(candidate));
+ }
+}
+
+/*
+ * Attempt to insert the new volume into the hash. If there's a collision, we
+ * wait for the old volume to complete if it's being relinquished and an error
+ * otherwise.
+ */
+static bool fscache_hash_volume(struct fscache_volume *candidate)
+{
+ struct fscache_volume *cursor;
+ struct hlist_bl_head *h;
+ struct hlist_bl_node *p;
+ unsigned int bucket, collidee_debug_id = 0;
+
+ bucket = candidate->key_hash & (ARRAY_SIZE(fscache_volume_hash) - 1);
+ h = &fscache_volume_hash[bucket];
+
+ hlist_bl_lock(h);
+ hlist_bl_for_each_entry(cursor, p, h, hash_link) {
+ if (fscache_volume_same(candidate, cursor)) {
+ if (!test_bit(FSCACHE_VOLUME_RELINQUISHED, &cursor->flags))
+ goto collision;
+ fscache_see_volume(cursor, fscache_volume_get_hash_collision);
+ set_bit(FSCACHE_VOLUME_COLLIDED_WITH, &cursor->flags);
+ set_bit(FSCACHE_VOLUME_ACQUIRE_PENDING, &candidate->flags);
+ collidee_debug_id = cursor->debug_id;
+ break;
+ }
+ }
+
+ hlist_bl_add_head(&candidate->hash_link, h);
+ hlist_bl_unlock(h);
+
+ if (test_bit(FSCACHE_VOLUME_ACQUIRE_PENDING, &candidate->flags))
+ fscache_wait_on_volume_collision(candidate, collidee_debug_id);
+ return true;
+
+collision:
+ fscache_see_volume(cursor, fscache_volume_collision);
+ hlist_bl_unlock(h);
+ return false;
+}
+
+/*
+ * Allocate and initialise a volume representation cookie.
+ */
+static struct fscache_volume *fscache_alloc_volume(const char *volume_key,
+ const char *cache_name,
+ const void *coherency_data,
+ size_t coherency_len)
+{
+ struct fscache_volume *volume;
+ struct fscache_cache *cache;
+ size_t klen, hlen;
+ char *key;
+
+ if (!coherency_data)
+ coherency_len = 0;
+
+ cache = fscache_lookup_cache(cache_name, false);
+ if (IS_ERR(cache))
+ return NULL;
+
+ volume = kzalloc(struct_size(volume, coherency, coherency_len),
+ GFP_KERNEL);
+ if (!volume)
+ goto err_cache;
+
+ volume->cache = cache;
+ volume->coherency_len = coherency_len;
+ if (coherency_data)
+ memcpy(volume->coherency, coherency_data, coherency_len);
+ INIT_LIST_HEAD(&volume->proc_link);
+ INIT_WORK(&volume->work, fscache_create_volume_work);
+ refcount_set(&volume->ref, 1);
+ spin_lock_init(&volume->lock);
+
+ /* Stick the length on the front of the key and pad it out to make
+ * hashing easier.
+ */
+ klen = strlen(volume_key);
+ hlen = round_up(1 + klen + 1, sizeof(__le32));
+ key = kzalloc(hlen, GFP_KERNEL);
+ if (!key)
+ goto err_vol;
+ key[0] = klen;
+ memcpy(key + 1, volume_key, klen);
+
+ volume->key = key;
+ volume->key_hash = fscache_hash(0, key, hlen);
+
+ volume->debug_id = atomic_inc_return(&fscache_volume_debug_id);
+ down_write(&fscache_addremove_sem);
+ atomic_inc(&cache->n_volumes);
+ list_add_tail(&volume->proc_link, &fscache_volumes);
+ fscache_see_volume(volume, fscache_volume_new_acquire);
+ fscache_stat(&fscache_n_volumes);
+ up_write(&fscache_addremove_sem);
+ _leave(" = v=%x", volume->debug_id);
+ return volume;
+
+err_vol:
+ kfree(volume);
+err_cache:
+ fscache_put_cache(cache, fscache_cache_put_alloc_volume);
+ fscache_stat(&fscache_n_volumes_nomem);
+ return NULL;
+}
+
+/*
+ * Create a volume's representation on disk. Have a volume ref and a cache
+ * access we have to release.
+ */
+static void fscache_create_volume_work(struct work_struct *work)
+{
+ const struct fscache_cache_ops *ops;
+ struct fscache_volume *volume =
+ container_of(work, struct fscache_volume, work);
+
+ fscache_see_volume(volume, fscache_volume_see_create_work);
+
+ ops = volume->cache->ops;
+ if (ops->acquire_volume)
+ ops->acquire_volume(volume);
+ fscache_end_cache_access(volume->cache,
+ fscache_access_acquire_volume_end);
+
+ clear_bit_unlock(FSCACHE_VOLUME_CREATING, &volume->flags);
+ wake_up_bit(&volume->flags, FSCACHE_VOLUME_CREATING);
+ fscache_put_volume(volume, fscache_volume_put_create_work);
+}
+
+/*
+ * Dispatch a worker thread to create a volume's representation on disk.
+ */
+void fscache_create_volume(struct fscache_volume *volume, bool wait)
+{
+ if (test_and_set_bit(FSCACHE_VOLUME_CREATING, &volume->flags))
+ goto maybe_wait;
+ if (volume->cache_priv)
+ goto no_wait; /* We raced */
+ if (!fscache_begin_cache_access(volume->cache,
+ fscache_access_acquire_volume))
+ goto no_wait;
+
+ fscache_get_volume(volume, fscache_volume_get_create_work);
+ if (!schedule_work(&volume->work))
+ fscache_put_volume(volume, fscache_volume_put_create_work);
+
+maybe_wait:
+ if (wait) {
+ fscache_see_volume(volume, fscache_volume_wait_create_work);
+ wait_on_bit(&volume->flags, FSCACHE_VOLUME_CREATING,
+ TASK_UNINTERRUPTIBLE);
+ }
+ return;
+no_wait:
+ clear_bit_unlock(FSCACHE_VOLUME_CREATING, &volume->flags);
+ wake_up_bit(&volume->flags, FSCACHE_VOLUME_CREATING);
+}
+
+/*
+ * Acquire a volume representation cookie and link it to a (proposed) cache.
+ */
+struct fscache_volume *__fscache_acquire_volume(const char *volume_key,
+ const char *cache_name,
+ const void *coherency_data,
+ size_t coherency_len)
+{
+ struct fscache_volume *volume;
+
+ volume = fscache_alloc_volume(volume_key, cache_name,
+ coherency_data, coherency_len);
+ if (!volume)
+ return ERR_PTR(-ENOMEM);
+
+ if (!fscache_hash_volume(volume)) {
+ fscache_put_volume(volume, fscache_volume_put_hash_collision);
+ return ERR_PTR(-EBUSY);
+ }
+
+ fscache_create_volume(volume, false);
+ return volume;
+}
+EXPORT_SYMBOL(__fscache_acquire_volume);
+
+static void fscache_wake_pending_volume(struct fscache_volume *volume,
+ struct hlist_bl_head *h)
+{
+ struct fscache_volume *cursor;
+ struct hlist_bl_node *p;
+
+ hlist_bl_for_each_entry(cursor, p, h, hash_link) {
+ if (fscache_volume_same(cursor, volume)) {
+ fscache_see_volume(cursor, fscache_volume_see_hash_wake);
+ clear_bit(FSCACHE_VOLUME_ACQUIRE_PENDING, &cursor->flags);
+ wake_up_bit(&cursor->flags, FSCACHE_VOLUME_ACQUIRE_PENDING);
+ return;
+ }
+ }
+}
+
+/*
+ * Remove a volume cookie from the hash table.
+ */
+static void fscache_unhash_volume(struct fscache_volume *volume)
+{
+ struct hlist_bl_head *h;
+ unsigned int bucket;
+
+ bucket = volume->key_hash & (ARRAY_SIZE(fscache_volume_hash) - 1);
+ h = &fscache_volume_hash[bucket];
+
+ hlist_bl_lock(h);
+ hlist_bl_del(&volume->hash_link);
+ if (test_bit(FSCACHE_VOLUME_COLLIDED_WITH, &volume->flags))
+ fscache_wake_pending_volume(volume, h);
+ hlist_bl_unlock(h);
+}
+
+/*
+ * Drop a cache's volume attachments.
+ */
+static void fscache_free_volume(struct fscache_volume *volume)
+{
+ struct fscache_cache *cache = volume->cache;
+
+ if (volume->cache_priv) {
+ __fscache_begin_volume_access(volume, NULL,
+ fscache_access_relinquish_volume);
+ if (volume->cache_priv)
+ cache->ops->free_volume(volume);
+ fscache_end_volume_access(volume, NULL,
+ fscache_access_relinquish_volume_end);
+ }
+
+ down_write(&fscache_addremove_sem);
+ list_del_init(&volume->proc_link);
+ atomic_dec(&volume->cache->n_volumes);
+ up_write(&fscache_addremove_sem);
+
+ if (!hlist_bl_unhashed(&volume->hash_link))
+ fscache_unhash_volume(volume);
+
+ trace_fscache_volume(volume->debug_id, 0, fscache_volume_free);
+ kfree(volume->key);
+ kfree(volume);
+ fscache_stat_d(&fscache_n_volumes);
+ fscache_put_cache(cache, fscache_cache_put_volume);
+}
+
+/*
+ * Drop a reference to a volume cookie.
+ */
+void fscache_put_volume(struct fscache_volume *volume,
+ enum fscache_volume_trace where)
+{
+ if (volume) {
+ unsigned int debug_id = volume->debug_id;
+ bool zero;
+ int ref;
+
+ zero = __refcount_dec_and_test(&volume->ref, &ref);
+ trace_fscache_volume(debug_id, ref - 1, where);
+ if (zero)
+ fscache_free_volume(volume);
+ }
+}
+
+/*
+ * Relinquish a volume representation cookie.
+ */
+void __fscache_relinquish_volume(struct fscache_volume *volume,
+ const void *coherency_data,
+ bool invalidate)
+{
+ if (WARN_ON(test_and_set_bit(FSCACHE_VOLUME_RELINQUISHED, &volume->flags)))
+ return;
+
+ if (invalidate) {
+ set_bit(FSCACHE_VOLUME_INVALIDATE, &volume->flags);
+ } else if (coherency_data) {
+ memcpy(volume->coherency, coherency_data, volume->coherency_len);
+ }
+
+ fscache_put_volume(volume, fscache_volume_put_relinquish);
+}
+EXPORT_SYMBOL(__fscache_relinquish_volume);
+
+/**
+ * fscache_withdraw_volume - Withdraw a volume from being cached
+ * @volume: Volume cookie
+ *
+ * Withdraw a cache volume from service, waiting for all accesses to complete
+ * before returning.
+ */
+void fscache_withdraw_volume(struct fscache_volume *volume)
+{
+ int n_accesses;
+
+ _debug("withdraw V=%x", volume->debug_id);
+
+ /* Allow wakeups on dec-to-0 */
+ n_accesses = atomic_dec_return(&volume->n_accesses);
+ trace_fscache_access_volume(volume->debug_id, 0,
+ refcount_read(&volume->ref),
+ n_accesses, fscache_access_cache_unpin);
+
+ wait_var_event(&volume->n_accesses,
+ atomic_read(&volume->n_accesses) == 0);
+}
+EXPORT_SYMBOL(fscache_withdraw_volume);
+
+#ifdef CONFIG_PROC_FS
+/*
+ * Generate a list of volumes in /proc/fs/fscache/volumes
+ */
+static int fscache_volumes_seq_show(struct seq_file *m, void *v)
+{
+ struct fscache_volume *volume;
+
+ if (v == &fscache_volumes) {
+ seq_puts(m,
+ "VOLUME REF nCOOK ACC FL CACHE KEY\n"
+ "======== ===== ===== === == =============== ================\n");
+ return 0;
+ }
+
+ volume = list_entry(v, struct fscache_volume, proc_link);
+ seq_printf(m,
+ "%08x %5d %5d %3d %02lx %-15.15s %s\n",
+ volume->debug_id,
+ refcount_read(&volume->ref),
+ atomic_read(&volume->n_cookies),
+ atomic_read(&volume->n_accesses),
+ volume->flags,
+ volume->cache->name ?: "-",
+ volume->key + 1);
+ return 0;
+}
+
+static void *fscache_volumes_seq_start(struct seq_file *m, loff_t *_pos)
+ __acquires(&fscache_addremove_sem)
+{
+ down_read(&fscache_addremove_sem);
+ return seq_list_start_head(&fscache_volumes, *_pos);
+}
+
+static void *fscache_volumes_seq_next(struct seq_file *m, void *v, loff_t *_pos)
+{
+ return seq_list_next(v, &fscache_volumes, _pos);
+}
+
+static void fscache_volumes_seq_stop(struct seq_file *m, void *v)
+ __releases(&fscache_addremove_sem)
+{
+ up_read(&fscache_addremove_sem);
+}
+
+const struct seq_operations fscache_volumes_seq_ops = {
+ .start = fscache_volumes_seq_start,
+ .next = fscache_volumes_seq_next,
+ .stop = fscache_volumes_seq_stop,
+ .show = fscache_volumes_seq_show,
+};
+#endif /* CONFIG_PROC_FS */
diff --git a/fs/fuse/Kconfig b/fs/fuse/Kconfig
index 40ce9a1c12e5..038ed0b9aaa5 100644
--- a/fs/fuse/Kconfig
+++ b/fs/fuse/Kconfig
@@ -45,7 +45,7 @@ config FUSE_DAX
select INTERVAL_TREE
depends on VIRTIO_FS
depends on FS_DAX
- depends on DAX_DRIVER
+ depends on DAX
help
This allows bypassing guest page cache and allows mapping host page
cache directly in guest address space.
diff --git a/fs/fuse/dax.c b/fs/fuse/dax.c
index 713818d74de6..182b24a14804 100644
--- a/fs/fuse/dax.c
+++ b/fs/fuse/dax.c
@@ -1279,11 +1279,14 @@ out_err:
return ret;
}
-int fuse_dax_conn_alloc(struct fuse_conn *fc, struct dax_device *dax_dev)
+int fuse_dax_conn_alloc(struct fuse_conn *fc, enum fuse_dax_mode dax_mode,
+ struct dax_device *dax_dev)
{
struct fuse_conn_dax *fcd;
int err;
+ fc->dax_mode = dax_mode;
+
if (!dax_dev)
return 0;
@@ -1327,17 +1330,46 @@ static const struct address_space_operations fuse_dax_file_aops = {
.invalidatepage = noop_invalidatepage,
};
-void fuse_dax_inode_init(struct inode *inode)
+static bool fuse_should_enable_dax(struct inode *inode, unsigned int flags)
{
struct fuse_conn *fc = get_fuse_conn(inode);
+ enum fuse_dax_mode dax_mode = fc->dax_mode;
+
+ if (dax_mode == FUSE_DAX_NEVER)
+ return false;
+ /*
+ * fc->dax may be NULL in 'inode' mode when filesystem device doesn't
+ * support DAX, in which case it will silently fallback to 'never' mode.
+ */
if (!fc->dax)
+ return false;
+
+ if (dax_mode == FUSE_DAX_ALWAYS)
+ return true;
+
+ /* dax_mode is FUSE_DAX_INODE* */
+ return fc->inode_dax && (flags & FUSE_ATTR_DAX);
+}
+
+void fuse_dax_inode_init(struct inode *inode, unsigned int flags)
+{
+ if (!fuse_should_enable_dax(inode, flags))
return;
inode->i_flags |= S_DAX;
inode->i_data.a_ops = &fuse_dax_file_aops;
}
+void fuse_dax_dontcache(struct inode *inode, unsigned int flags)
+{
+ struct fuse_conn *fc = get_fuse_conn(inode);
+
+ if (fuse_is_inode_dax_mode(fc->dax_mode) &&
+ ((bool) IS_DAX(inode) != (bool) (flags & FUSE_ATTR_DAX)))
+ d_mark_dontcache(inode);
+}
+
bool fuse_dax_check_alignment(struct fuse_conn *fc, unsigned int map_alignment)
{
if (fc->dax && (map_alignment > FUSE_DAX_SHIFT)) {
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 0654bfedcbb0..656e921f3506 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -17,6 +17,9 @@
#include <linux/xattr.h>
#include <linux/iversion.h>
#include <linux/posix_acl.h>
+#include <linux/security.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
static void fuse_advise_use_readdirplus(struct inode *dir)
{
@@ -456,6 +459,62 @@ static struct dentry *fuse_lookup(struct inode *dir, struct dentry *entry,
return ERR_PTR(err);
}
+static int get_security_context(struct dentry *entry, umode_t mode,
+ void **security_ctx, u32 *security_ctxlen)
+{
+ struct fuse_secctx *fctx;
+ struct fuse_secctx_header *header;
+ void *ctx = NULL, *ptr;
+ u32 ctxlen, total_len = sizeof(*header);
+ int err, nr_ctx = 0;
+ const char *name;
+ size_t namelen;
+
+ err = security_dentry_init_security(entry, mode, &entry->d_name,
+ &name, &ctx, &ctxlen);
+ if (err) {
+ if (err != -EOPNOTSUPP)
+ goto out_err;
+ /* No LSM is supporting this security hook. Ignore error */
+ ctxlen = 0;
+ ctx = NULL;
+ }
+
+ if (ctxlen) {
+ nr_ctx = 1;
+ namelen = strlen(name) + 1;
+ err = -EIO;
+ if (WARN_ON(namelen > XATTR_NAME_MAX + 1 || ctxlen > S32_MAX))
+ goto out_err;
+ total_len += FUSE_REC_ALIGN(sizeof(*fctx) + namelen + ctxlen);
+ }
+
+ err = -ENOMEM;
+ header = ptr = kzalloc(total_len, GFP_KERNEL);
+ if (!ptr)
+ goto out_err;
+
+ header->nr_secctx = nr_ctx;
+ header->size = total_len;
+ ptr += sizeof(*header);
+ if (nr_ctx) {
+ fctx = ptr;
+ fctx->size = ctxlen;
+ ptr += sizeof(*fctx);
+
+ strcpy(ptr, name);
+ ptr += namelen;
+
+ memcpy(ptr, ctx, ctxlen);
+ }
+ *security_ctxlen = total_len;
+ *security_ctx = header;
+ err = 0;
+out_err:
+ kfree(ctx);
+ return err;
+}
+
/*
* Atomic create+open operation
*
@@ -476,6 +535,8 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry,
struct fuse_entry_out outentry;
struct fuse_inode *fi;
struct fuse_file *ff;
+ void *security_ctx = NULL;
+ u32 security_ctxlen;
/* Userspace expects S_IFREG in create mode */
BUG_ON((mode & S_IFMT) != S_IFREG);
@@ -517,7 +578,20 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry,
args.out_args[0].value = &outentry;
args.out_args[1].size = sizeof(outopen);
args.out_args[1].value = &outopen;
+
+ if (fm->fc->init_security) {
+ err = get_security_context(entry, mode, &security_ctx,
+ &security_ctxlen);
+ if (err)
+ goto out_put_forget_req;
+
+ args.in_numargs = 3;
+ args.in_args[2].size = security_ctxlen;
+ args.in_args[2].value = security_ctx;
+ }
+
err = fuse_simple_request(fm, &args);
+ kfree(security_ctx);
if (err)
goto out_free_ff;
@@ -620,6 +694,8 @@ static int create_new_entry(struct fuse_mount *fm, struct fuse_args *args,
struct dentry *d;
int err;
struct fuse_forget_link *forget;
+ void *security_ctx = NULL;
+ u32 security_ctxlen;
if (fuse_is_bad(dir))
return -EIO;
@@ -633,7 +709,22 @@ static int create_new_entry(struct fuse_mount *fm, struct fuse_args *args,
args->out_numargs = 1;
args->out_args[0].size = sizeof(outarg);
args->out_args[0].value = &outarg;
+
+ if (fm->fc->init_security && args->opcode != FUSE_LINK) {
+ err = get_security_context(entry, mode, &security_ctx,
+ &security_ctxlen);
+ if (err)
+ goto out_put_forget_req;
+
+ BUG_ON(args->in_numargs != 2);
+
+ args->in_numargs = 3;
+ args->in_args[2].size = security_ctxlen;
+ args->in_args[2].value = security_ctx;
+ }
+
err = fuse_simple_request(fm, args);
+ kfree(security_ctx);
if (err)
goto out_put_forget_req;
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 9d6c5f6361f7..829094451774 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -2910,7 +2910,7 @@ fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
static int fuse_writeback_range(struct inode *inode, loff_t start, loff_t end)
{
- int err = filemap_write_and_wait_range(inode->i_mapping, start, -1);
+ int err = filemap_write_and_wait_range(inode->i_mapping, start, LLONG_MAX);
if (!err)
fuse_sync_writes(inode);
@@ -3169,7 +3169,7 @@ static const struct address_space_operations fuse_file_aops = {
.write_end = fuse_write_end,
};
-void fuse_init_file_inode(struct inode *inode)
+void fuse_init_file_inode(struct inode *inode, unsigned int flags)
{
struct fuse_inode *fi = get_fuse_inode(inode);
@@ -3183,5 +3183,5 @@ void fuse_init_file_inode(struct inode *inode)
fi->writepages = RB_ROOT;
if (IS_ENABLED(CONFIG_FUSE_DAX))
- fuse_dax_inode_init(inode);
+ fuse_dax_inode_init(inode, flags);
}
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index 198637b41e19..e8e59fbdefeb 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -480,6 +480,18 @@ struct fuse_dev {
struct list_head entry;
};
+enum fuse_dax_mode {
+ FUSE_DAX_INODE_DEFAULT, /* default */
+ FUSE_DAX_ALWAYS, /* "-o dax=always" */
+ FUSE_DAX_NEVER, /* "-o dax=never" */
+ FUSE_DAX_INODE_USER, /* "-o dax=inode" */
+};
+
+static inline bool fuse_is_inode_dax_mode(enum fuse_dax_mode mode)
+{
+ return mode == FUSE_DAX_INODE_DEFAULT || mode == FUSE_DAX_INODE_USER;
+}
+
struct fuse_fs_context {
int fd;
struct file *file;
@@ -497,7 +509,7 @@ struct fuse_fs_context {
bool no_control:1;
bool no_force_umount:1;
bool legacy_opts_show:1;
- bool dax:1;
+ enum fuse_dax_mode dax_mode;
unsigned int max_read;
unsigned int blksize;
const char *subtype;
@@ -765,6 +777,12 @@ struct fuse_conn {
/* Propagate syncfs() to server */
unsigned int sync_fs:1;
+ /* Initialize security xattrs when creating a new inode */
+ unsigned int init_security:1;
+
+ /* Does the filesystem support per inode DAX? */
+ unsigned int inode_dax:1;
+
/** The number of requests waiting for completion */
atomic_t num_waiting;
@@ -802,6 +820,9 @@ struct fuse_conn {
struct list_head devices;
#ifdef CONFIG_FUSE_DAX
+ /* Dax mode */
+ enum fuse_dax_mode dax_mode;
+
/* Dax specific conn data, non-NULL if DAX is enabled */
struct fuse_conn_dax *dax;
#endif
@@ -1007,7 +1028,7 @@ int fuse_notify_poll_wakeup(struct fuse_conn *fc,
/**
* Initialize file operations on a regular file
*/
-void fuse_init_file_inode(struct inode *inode);
+void fuse_init_file_inode(struct inode *inode, unsigned int flags);
/**
* Initialize inode operations on regular files and special files
@@ -1269,11 +1290,13 @@ ssize_t fuse_dax_read_iter(struct kiocb *iocb, struct iov_iter *to);
ssize_t fuse_dax_write_iter(struct kiocb *iocb, struct iov_iter *from);
int fuse_dax_mmap(struct file *file, struct vm_area_struct *vma);
int fuse_dax_break_layouts(struct inode *inode, u64 dmap_start, u64 dmap_end);
-int fuse_dax_conn_alloc(struct fuse_conn *fc, struct dax_device *dax_dev);
+int fuse_dax_conn_alloc(struct fuse_conn *fc, enum fuse_dax_mode mode,
+ struct dax_device *dax_dev);
void fuse_dax_conn_free(struct fuse_conn *fc);
bool fuse_dax_inode_alloc(struct super_block *sb, struct fuse_inode *fi);
-void fuse_dax_inode_init(struct inode *inode);
+void fuse_dax_inode_init(struct inode *inode, unsigned int flags);
void fuse_dax_inode_cleanup(struct inode *inode);
+void fuse_dax_dontcache(struct inode *inode, unsigned int flags);
bool fuse_dax_check_alignment(struct fuse_conn *fc, unsigned int map_alignment);
void fuse_dax_cancel_work(struct fuse_conn *fc);
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 8b89e3ba7df3..ee846ce371d8 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -301,6 +301,9 @@ void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr,
if (inval)
invalidate_inode_pages2(inode->i_mapping);
}
+
+ if (IS_ENABLED(CONFIG_FUSE_DAX))
+ fuse_dax_dontcache(inode, attr->flags);
}
static void fuse_init_inode(struct inode *inode, struct fuse_attr *attr)
@@ -313,7 +316,7 @@ static void fuse_init_inode(struct inode *inode, struct fuse_attr *attr)
inode->i_ctime.tv_nsec = attr->ctimensec;
if (S_ISREG(inode->i_mode)) {
fuse_init_common(inode);
- fuse_init_file_inode(inode);
+ fuse_init_file_inode(inode, attr->flags);
} else if (S_ISDIR(inode->i_mode))
fuse_init_dir(inode);
else if (S_ISLNK(inode->i_mode))
@@ -767,8 +770,12 @@ static int fuse_show_options(struct seq_file *m, struct dentry *root)
seq_printf(m, ",blksize=%lu", sb->s_blocksize);
}
#ifdef CONFIG_FUSE_DAX
- if (fc->dax)
- seq_puts(m, ",dax");
+ if (fc->dax_mode == FUSE_DAX_ALWAYS)
+ seq_puts(m, ",dax=always");
+ else if (fc->dax_mode == FUSE_DAX_NEVER)
+ seq_puts(m, ",dax=never");
+ else if (fc->dax_mode == FUSE_DAX_INODE_USER)
+ seq_puts(m, ",dax=inode");
#endif
return 0;
@@ -1109,73 +1116,80 @@ static void process_init_reply(struct fuse_mount *fm, struct fuse_args *args,
process_init_limits(fc, arg);
if (arg->minor >= 6) {
+ u64 flags = arg->flags | (u64) arg->flags2 << 32;
+
ra_pages = arg->max_readahead / PAGE_SIZE;
- if (arg->flags & FUSE_ASYNC_READ)
+ if (flags & FUSE_ASYNC_READ)
fc->async_read = 1;
- if (!(arg->flags & FUSE_POSIX_LOCKS))
+ if (!(flags & FUSE_POSIX_LOCKS))
fc->no_lock = 1;
if (arg->minor >= 17) {
- if (!(arg->flags & FUSE_FLOCK_LOCKS))
+ if (!(flags & FUSE_FLOCK_LOCKS))
fc->no_flock = 1;
} else {
- if (!(arg->flags & FUSE_POSIX_LOCKS))
+ if (!(flags & FUSE_POSIX_LOCKS))
fc->no_flock = 1;
}
- if (arg->flags & FUSE_ATOMIC_O_TRUNC)
+ if (flags & FUSE_ATOMIC_O_TRUNC)
fc->atomic_o_trunc = 1;
if (arg->minor >= 9) {
/* LOOKUP has dependency on proto version */
- if (arg->flags & FUSE_EXPORT_SUPPORT)
+ if (flags & FUSE_EXPORT_SUPPORT)
fc->export_support = 1;
}
- if (arg->flags & FUSE_BIG_WRITES)
+ if (flags & FUSE_BIG_WRITES)
fc->big_writes = 1;
- if (arg->flags & FUSE_DONT_MASK)
+ if (flags & FUSE_DONT_MASK)
fc->dont_mask = 1;
- if (arg->flags & FUSE_AUTO_INVAL_DATA)
+ if (flags & FUSE_AUTO_INVAL_DATA)
fc->auto_inval_data = 1;
- else if (arg->flags & FUSE_EXPLICIT_INVAL_DATA)
+ else if (flags & FUSE_EXPLICIT_INVAL_DATA)
fc->explicit_inval_data = 1;
- if (arg->flags & FUSE_DO_READDIRPLUS) {
+ if (flags & FUSE_DO_READDIRPLUS) {
fc->do_readdirplus = 1;
- if (arg->flags & FUSE_READDIRPLUS_AUTO)
+ if (flags & FUSE_READDIRPLUS_AUTO)
fc->readdirplus_auto = 1;
}
- if (arg->flags & FUSE_ASYNC_DIO)
+ if (flags & FUSE_ASYNC_DIO)
fc->async_dio = 1;
- if (arg->flags & FUSE_WRITEBACK_CACHE)
+ if (flags & FUSE_WRITEBACK_CACHE)
fc->writeback_cache = 1;
- if (arg->flags & FUSE_PARALLEL_DIROPS)
+ if (flags & FUSE_PARALLEL_DIROPS)
fc->parallel_dirops = 1;
- if (arg->flags & FUSE_HANDLE_KILLPRIV)
+ if (flags & FUSE_HANDLE_KILLPRIV)
fc->handle_killpriv = 1;
if (arg->time_gran && arg->time_gran <= 1000000000)
fm->sb->s_time_gran = arg->time_gran;
- if ((arg->flags & FUSE_POSIX_ACL)) {
+ if ((flags & FUSE_POSIX_ACL)) {
fc->default_permissions = 1;
fc->posix_acl = 1;
fm->sb->s_xattr = fuse_acl_xattr_handlers;
}
- if (arg->flags & FUSE_CACHE_SYMLINKS)
+ if (flags & FUSE_CACHE_SYMLINKS)
fc->cache_symlinks = 1;
- if (arg->flags & FUSE_ABORT_ERROR)
+ if (flags & FUSE_ABORT_ERROR)
fc->abort_err = 1;
- if (arg->flags & FUSE_MAX_PAGES) {
+ if (flags & FUSE_MAX_PAGES) {
fc->max_pages =
min_t(unsigned int, fc->max_pages_limit,
max_t(unsigned int, arg->max_pages, 1));
}
- if (IS_ENABLED(CONFIG_FUSE_DAX) &&
- arg->flags & FUSE_MAP_ALIGNMENT &&
- !fuse_dax_check_alignment(fc, arg->map_alignment)) {
- ok = false;
+ if (IS_ENABLED(CONFIG_FUSE_DAX)) {
+ if (flags & FUSE_MAP_ALIGNMENT &&
+ !fuse_dax_check_alignment(fc, arg->map_alignment)) {
+ ok = false;
+ }
+ if (flags & FUSE_HAS_INODE_DAX)
+ fc->inode_dax = 1;
}
- if (arg->flags & FUSE_HANDLE_KILLPRIV_V2) {
+ if (flags & FUSE_HANDLE_KILLPRIV_V2) {
fc->handle_killpriv_v2 = 1;
fm->sb->s_flags |= SB_NOSEC;
}
- if (arg->flags & FUSE_SETXATTR_EXT)
+ if (flags & FUSE_SETXATTR_EXT)
fc->setxattr_ext = 1;
+ if (flags & FUSE_SECURITY_CTX)
+ fc->init_security = 1;
} else {
ra_pages = fc->max_read / PAGE_SIZE;
fc->no_lock = 1;
@@ -1203,13 +1217,14 @@ static void process_init_reply(struct fuse_mount *fm, struct fuse_args *args,
void fuse_send_init(struct fuse_mount *fm)
{
struct fuse_init_args *ia;
+ u64 flags;
ia = kzalloc(sizeof(*ia), GFP_KERNEL | __GFP_NOFAIL);
ia->in.major = FUSE_KERNEL_VERSION;
ia->in.minor = FUSE_KERNEL_MINOR_VERSION;
ia->in.max_readahead = fm->sb->s_bdi->ra_pages * PAGE_SIZE;
- ia->in.flags |=
+ flags =
FUSE_ASYNC_READ | FUSE_POSIX_LOCKS | FUSE_ATOMIC_O_TRUNC |
FUSE_EXPORT_SUPPORT | FUSE_BIG_WRITES | FUSE_DONT_MASK |
FUSE_SPLICE_WRITE | FUSE_SPLICE_MOVE | FUSE_SPLICE_READ |
@@ -1219,13 +1234,19 @@ void fuse_send_init(struct fuse_mount *fm)
FUSE_PARALLEL_DIROPS | FUSE_HANDLE_KILLPRIV | FUSE_POSIX_ACL |
FUSE_ABORT_ERROR | FUSE_MAX_PAGES | FUSE_CACHE_SYMLINKS |
FUSE_NO_OPENDIR_SUPPORT | FUSE_EXPLICIT_INVAL_DATA |
- FUSE_HANDLE_KILLPRIV_V2 | FUSE_SETXATTR_EXT;
+ FUSE_HANDLE_KILLPRIV_V2 | FUSE_SETXATTR_EXT | FUSE_INIT_EXT |
+ FUSE_SECURITY_CTX;
#ifdef CONFIG_FUSE_DAX
if (fm->fc->dax)
- ia->in.flags |= FUSE_MAP_ALIGNMENT;
+ flags |= FUSE_MAP_ALIGNMENT;
+ if (fuse_is_inode_dax_mode(fm->fc->dax_mode))
+ flags |= FUSE_HAS_INODE_DAX;
#endif
if (fm->fc->auto_submounts)
- ia->in.flags |= FUSE_SUBMOUNTS;
+ flags |= FUSE_SUBMOUNTS;
+
+ ia->in.flags = flags;
+ ia->in.flags2 = flags >> 32;
ia->args.opcode = FUSE_INIT;
ia->args.in_numargs = 1;
@@ -1514,7 +1535,7 @@ int fuse_fill_super_common(struct super_block *sb, struct fuse_fs_context *ctx)
sb->s_subtype = ctx->subtype;
ctx->subtype = NULL;
if (IS_ENABLED(CONFIG_FUSE_DAX)) {
- err = fuse_dax_conn_alloc(fc, ctx->dax_dev);
+ err = fuse_dax_conn_alloc(fc, ctx->dax_mode, ctx->dax_dev);
if (err)
goto err;
}
diff --git a/fs/fuse/virtio_fs.c b/fs/fuse/virtio_fs.c
index 4cfa4bc1f579..9d737904d07c 100644
--- a/fs/fuse/virtio_fs.c
+++ b/fs/fuse/virtio_fs.c
@@ -88,12 +88,21 @@ struct virtio_fs_req_work {
static int virtio_fs_enqueue_req(struct virtio_fs_vq *fsvq,
struct fuse_req *req, bool in_flight);
+static const struct constant_table dax_param_enums[] = {
+ {"always", FUSE_DAX_ALWAYS },
+ {"never", FUSE_DAX_NEVER },
+ {"inode", FUSE_DAX_INODE_USER },
+ {}
+};
+
enum {
OPT_DAX,
+ OPT_DAX_ENUM,
};
static const struct fs_parameter_spec virtio_fs_parameters[] = {
fsparam_flag("dax", OPT_DAX),
+ fsparam_enum("dax", OPT_DAX_ENUM, dax_param_enums),
{}
};
@@ -110,7 +119,10 @@ static int virtio_fs_parse_param(struct fs_context *fsc,
switch (opt) {
case OPT_DAX:
- ctx->dax = 1;
+ ctx->dax_mode = FUSE_DAX_ALWAYS;
+ break;
+ case OPT_DAX_ENUM:
+ ctx->dax_mode = result.uint_32;
break;
default:
return -EINVAL;
@@ -753,20 +765,6 @@ static long virtio_fs_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
return nr_pages > max_nr_pages ? max_nr_pages : nr_pages;
}
-static size_t virtio_fs_copy_from_iter(struct dax_device *dax_dev,
- pgoff_t pgoff, void *addr,
- size_t bytes, struct iov_iter *i)
-{
- return copy_from_iter(addr, bytes, i);
-}
-
-static size_t virtio_fs_copy_to_iter(struct dax_device *dax_dev,
- pgoff_t pgoff, void *addr,
- size_t bytes, struct iov_iter *i)
-{
- return copy_to_iter(addr, bytes, i);
-}
-
static int virtio_fs_zero_page_range(struct dax_device *dax_dev,
pgoff_t pgoff, size_t nr_pages)
{
@@ -783,8 +781,6 @@ static int virtio_fs_zero_page_range(struct dax_device *dax_dev,
static const struct dax_operations virtio_fs_dax_ops = {
.direct_access = virtio_fs_direct_access,
- .copy_from_iter = virtio_fs_copy_from_iter,
- .copy_to_iter = virtio_fs_copy_to_iter,
.zero_page_range = virtio_fs_zero_page_range,
};
@@ -850,7 +846,7 @@ static int virtio_fs_setup_dax(struct virtio_device *vdev, struct virtio_fs *fs)
dev_dbg(&vdev->dev, "%s: window kaddr 0x%px phys_addr 0x%llx len 0x%llx\n",
__func__, fs->window_kaddr, cache_reg.addr, cache_reg.len);
- fs->dax_dev = alloc_dax(fs, NULL, &virtio_fs_dax_ops, 0);
+ fs->dax_dev = alloc_dax(fs, &virtio_fs_dax_ops);
if (IS_ERR(fs->dax_dev))
return PTR_ERR(fs->dax_dev);
@@ -895,7 +891,7 @@ static int virtio_fs_probe(struct virtio_device *vdev)
return 0;
out_vqs:
- vdev->config->reset(vdev);
+ virtio_reset_device(vdev);
virtio_fs_cleanup_vqs(vdev, fs);
kfree(fs->vqs);
@@ -927,7 +923,7 @@ static void virtio_fs_remove(struct virtio_device *vdev)
list_del_init(&fs->list);
virtio_fs_stop_all_queues(fs);
virtio_fs_drain_all_queues_locked(fs);
- vdev->config->reset(vdev);
+ virtio_reset_device(vdev);
virtio_fs_cleanup_vqs(vdev, fs);
vdev->priv = NULL;
@@ -1326,8 +1322,8 @@ static int virtio_fs_fill_super(struct super_block *sb, struct fs_context *fsc)
/* virtiofs allocates and installs its own fuse devices */
ctx->fudptr = NULL;
- if (ctx->dax) {
- if (!fs->dax_dev) {
+ if (ctx->dax_mode != FUSE_DAX_NEVER) {
+ if (ctx->dax_mode == FUSE_DAX_ALWAYS && !fs->dax_dev) {
err = -EINVAL;
pr_err("virtio-fs: dax can't be enabled as filesystem"
" device does not support it.\n");
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index 3e718cfc19a7..8c39a8571b1f 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -704,10 +704,11 @@ static int gfs2_release(struct inode *inode, struct file *file)
kfree(file->private_data);
file->private_data = NULL;
- if (gfs2_rs_active(&ip->i_res))
- gfs2_rs_delete(ip, &inode->i_writecount);
- if (file->f_mode & FMODE_WRITE)
+ if (file->f_mode & FMODE_WRITE) {
+ if (gfs2_rs_active(&ip->i_res))
+ gfs2_rs_delete(ip, &inode->i_writecount);
gfs2_qa_put(ip);
+ }
return 0;
}
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index b7ab8430333c..6b23399eaee0 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -301,9 +301,6 @@ void gfs2_glock_queue_put(struct gfs2_glock *gl)
void gfs2_glock_put(struct gfs2_glock *gl)
{
- /* last put could call sleepable dlm api */
- might_sleep();
-
if (lockref_put_or_lock(&gl->gl_lockref))
return;
diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c
index c0a34d9ddee4..a6002b2d146d 100644
--- a/fs/gfs2/sys.c
+++ b/fs/gfs2/sys.c
@@ -767,8 +767,7 @@ void gfs2_sys_fs_del(struct gfs2_sbd *sdp)
wait_for_completion(&sdp->sd_kobj_unregister);
}
-static int gfs2_uevent(struct kset *kset, struct kobject *kobj,
- struct kobj_uevent_env *env)
+static int gfs2_uevent(struct kobject *kobj, struct kobj_uevent_env *env)
{
struct gfs2_sbd *sdp = container_of(kobj, struct gfs2_sbd, sd_kobj);
struct super_block *s = sdp->sd_vfs;
diff --git a/fs/hfsplus/hfsplus_raw.h b/fs/hfsplus/hfsplus_raw.h
index 456e87aec7fd..68b4240c6191 100644
--- a/fs/hfsplus/hfsplus_raw.h
+++ b/fs/hfsplus/hfsplus_raw.h
@@ -260,8 +260,10 @@ struct hfsplus_cat_folder {
__be32 access_date;
__be32 backup_date;
struct hfsplus_perm permissions;
- struct DInfo user_info;
- struct DXInfo finder_info;
+ struct_group_attr(info, __packed,
+ struct DInfo user_info;
+ struct DXInfo finder_info;
+ );
__be32 text_encoding;
__be32 subfolders; /* Subfolder count in HFSX. Reserved in HFS+. */
} __packed;
@@ -294,8 +296,10 @@ struct hfsplus_cat_file {
__be32 access_date;
__be32 backup_date;
struct hfsplus_perm permissions;
- struct FInfo user_info;
- struct FXInfo finder_info;
+ struct_group_attr(info, __packed,
+ struct FInfo user_info;
+ struct FXInfo finder_info;
+ );
__be32 text_encoding;
u32 reserved2;
diff --git a/fs/hfsplus/xattr.c b/fs/hfsplus/xattr.c
index e2855ceefd39..49891b12c415 100644
--- a/fs/hfsplus/xattr.c
+++ b/fs/hfsplus/xattr.c
@@ -296,7 +296,7 @@ int __hfsplus_setxattr(struct inode *inode, const char *name,
sizeof(hfsplus_cat_entry));
if (be16_to_cpu(entry.type) == HFSPLUS_FOLDER) {
if (size == folder_finderinfo_len) {
- memcpy(&entry.folder.user_info, value,
+ memcpy(&entry.folder.info, value,
folder_finderinfo_len);
hfs_bnode_write(cat_fd.bnode, &entry,
cat_fd.entryoffset,
@@ -309,7 +309,7 @@ int __hfsplus_setxattr(struct inode *inode, const char *name,
}
} else if (be16_to_cpu(entry.type) == HFSPLUS_FILE) {
if (size == file_finderinfo_len) {
- memcpy(&entry.file.user_info, value,
+ memcpy(&entry.file.info, value,
file_finderinfo_len);
hfs_bnode_write(cat_fd.bnode, &entry,
cat_fd.entryoffset,
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 49d2e686be74..a7c6c7498be0 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -409,10 +409,11 @@ hugetlb_vmdelete_list(struct rb_root_cached *root, pgoff_t start, pgoff_t end)
struct vm_area_struct *vma;
/*
- * end == 0 indicates that the entire range after
- * start should be unmapped.
+ * end == 0 indicates that the entire range after start should be
+ * unmapped. Note, end is exclusive, whereas the interval tree takes
+ * an inclusive "last".
*/
- vma_interval_tree_foreach(vma, root, start, end ? end : ULONG_MAX) {
+ vma_interval_tree_foreach(vma, root, start, end ? end - 1 : ULONG_MAX) {
unsigned long v_offset;
unsigned long v_end;
diff --git a/fs/inode.c b/fs/inode.c
index 6b80a51129d5..63324df6fa27 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -67,11 +67,6 @@ const struct address_space_operations empty_aops = {
};
EXPORT_SYMBOL(empty_aops);
-/*
- * Statistics gathering..
- */
-struct inodes_stat_t inodes_stat;
-
static DEFINE_PER_CPU(unsigned long, nr_inodes);
static DEFINE_PER_CPU(unsigned long, nr_unused);
@@ -106,13 +101,43 @@ long get_nr_dirty_inodes(void)
* Handle nr_inode sysctl
*/
#ifdef CONFIG_SYSCTL
-int proc_nr_inodes(struct ctl_table *table, int write,
- void *buffer, size_t *lenp, loff_t *ppos)
+/*
+ * Statistics gathering..
+ */
+static struct inodes_stat_t inodes_stat;
+
+static int proc_nr_inodes(struct ctl_table *table, int write, void *buffer,
+ size_t *lenp, loff_t *ppos)
{
inodes_stat.nr_inodes = get_nr_inodes();
inodes_stat.nr_unused = get_nr_inodes_unused();
return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
}
+
+static struct ctl_table inodes_sysctls[] = {
+ {
+ .procname = "inode-nr",
+ .data = &inodes_stat,
+ .maxlen = 2*sizeof(long),
+ .mode = 0444,
+ .proc_handler = proc_nr_inodes,
+ },
+ {
+ .procname = "inode-state",
+ .data = &inodes_stat,
+ .maxlen = 7*sizeof(long),
+ .mode = 0444,
+ .proc_handler = proc_nr_inodes,
+ },
+ { }
+};
+
+static int __init init_fs_inode_sysctls(void)
+{
+ register_sysctl_init("fs", inodes_sysctls);
+ return 0;
+}
+early_initcall(init_fs_inode_sysctls);
#endif
static int no_open(struct inode *inode, struct file *file)
@@ -526,6 +551,55 @@ void __remove_inode_hash(struct inode *inode)
}
EXPORT_SYMBOL(__remove_inode_hash);
+void dump_mapping(const struct address_space *mapping)
+{
+ struct inode *host;
+ const struct address_space_operations *a_ops;
+ struct hlist_node *dentry_first;
+ struct dentry *dentry_ptr;
+ struct dentry dentry;
+ unsigned long ino;
+
+ /*
+ * If mapping is an invalid pointer, we don't want to crash
+ * accessing it, so probe everything depending on it carefully.
+ */
+ if (get_kernel_nofault(host, &mapping->host) ||
+ get_kernel_nofault(a_ops, &mapping->a_ops)) {
+ pr_warn("invalid mapping:%px\n", mapping);
+ return;
+ }
+
+ if (!host) {
+ pr_warn("aops:%ps\n", a_ops);
+ return;
+ }
+
+ if (get_kernel_nofault(dentry_first, &host->i_dentry.first) ||
+ get_kernel_nofault(ino, &host->i_ino)) {
+ pr_warn("aops:%ps invalid inode:%px\n", a_ops, host);
+ return;
+ }
+
+ if (!dentry_first) {
+ pr_warn("aops:%ps ino:%lx\n", a_ops, ino);
+ return;
+ }
+
+ dentry_ptr = container_of(dentry_first, struct dentry, d_u.d_alias);
+ if (get_kernel_nofault(dentry, dentry_ptr)) {
+ pr_warn("aops:%ps ino:%lx invalid dentry:%px\n",
+ a_ops, ino, dentry_ptr);
+ return;
+ }
+
+ /*
+ * if dentry is corrupted, the %pd handler may still crash,
+ * but it's unlikely that we reach here with a corrupt mapping
+ */
+ pr_warn("aops:%ps ino:%lx dentry name:\"%pd\"\n", a_ops, ino, &dentry);
+}
+
void clear_inode(struct inode *inode)
{
/*
diff --git a/fs/internal.h b/fs/internal.h
index 7979ff8d168c..8590c973c2f4 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -37,7 +37,7 @@ static inline int emergency_thaw_bdev(struct super_block *sb)
/*
* buffer.c
*/
-int __block_write_begin_int(struct page *page, loff_t pos, unsigned len,
+int __block_write_begin_int(struct folio *folio, loff_t pos, unsigned len,
get_block_t *get_block, const struct iomap *iomap);
/*
diff --git a/fs/io-wq.c b/fs/io-wq.c
index 5c4f582d6549..bb7f161bb19c 100644
--- a/fs/io-wq.c
+++ b/fs/io-wq.c
@@ -48,7 +48,8 @@ struct io_worker {
struct io_wqe *wqe;
struct io_wq_work *cur_work;
- spinlock_t lock;
+ struct io_wq_work *next_work;
+ raw_spinlock_t lock;
struct completion ref_done;
@@ -405,8 +406,7 @@ static void io_wqe_dec_running(struct io_worker *worker)
* Worker will start processing some work. Move it to the busy list, if
* it's currently on the freelist
*/
-static void __io_worker_busy(struct io_wqe *wqe, struct io_worker *worker,
- struct io_wq_work *work)
+static void __io_worker_busy(struct io_wqe *wqe, struct io_worker *worker)
__must_hold(wqe->lock)
{
if (worker->flags & IO_WORKER_F_FREE) {
@@ -529,9 +529,10 @@ static void io_assign_current_work(struct io_worker *worker,
cond_resched();
}
- spin_lock(&worker->lock);
+ raw_spin_lock(&worker->lock);
worker->cur_work = work;
- spin_unlock(&worker->lock);
+ worker->next_work = NULL;
+ raw_spin_unlock(&worker->lock);
}
static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work);
@@ -546,7 +547,7 @@ static void io_worker_handle_work(struct io_worker *worker)
do {
struct io_wq_work *work;
-get_next:
+
/*
* If we got some work, mark us as busy. If we didn't, but
* the list isn't empty, it means we stalled on hashed work.
@@ -555,9 +556,20 @@ get_next:
* clear the stalled flag.
*/
work = io_get_next_work(acct, worker);
- if (work)
- __io_worker_busy(wqe, worker, work);
-
+ if (work) {
+ __io_worker_busy(wqe, worker);
+
+ /*
+ * Make sure cancelation can find this, even before
+ * it becomes the active work. That avoids a window
+ * where the work has been removed from our general
+ * work list, but isn't yet discoverable as the
+ * current work item for this worker.
+ */
+ raw_spin_lock(&worker->lock);
+ worker->next_work = work;
+ raw_spin_unlock(&worker->lock);
+ }
raw_spin_unlock(&wqe->lock);
if (!work)
break;
@@ -594,11 +606,6 @@ get_next:
spin_unlock_irq(&wq->hash->wait.lock);
if (wq_has_sleeper(&wq->hash->wait))
wake_up(&wq->hash->wait);
- raw_spin_lock(&wqe->lock);
- /* skip unnecessary unlock-lock wqe->lock */
- if (!work)
- goto get_next;
- raw_spin_unlock(&wqe->lock);
}
} while (work);
@@ -670,7 +677,7 @@ loop:
*/
void io_wq_worker_running(struct task_struct *tsk)
{
- struct io_worker *worker = tsk->pf_io_worker;
+ struct io_worker *worker = tsk->worker_private;
if (!worker)
return;
@@ -688,7 +695,7 @@ void io_wq_worker_running(struct task_struct *tsk)
*/
void io_wq_worker_sleeping(struct task_struct *tsk)
{
- struct io_worker *worker = tsk->pf_io_worker;
+ struct io_worker *worker = tsk->worker_private;
if (!worker)
return;
@@ -707,7 +714,7 @@ void io_wq_worker_sleeping(struct task_struct *tsk)
static void io_init_new_worker(struct io_wqe *wqe, struct io_worker *worker,
struct task_struct *tsk)
{
- tsk->pf_io_worker = worker;
+ tsk->worker_private = worker;
worker->task = tsk;
set_cpus_allowed_ptr(tsk, wqe->cpu_mask);
tsk->flags |= PF_NO_SETAFFINITY;
@@ -815,7 +822,7 @@ fail:
refcount_set(&worker->ref, 1);
worker->wqe = wqe;
- spin_lock_init(&worker->lock);
+ raw_spin_lock_init(&worker->lock);
init_completion(&worker->ref_done);
if (index == IO_WQ_ACCT_BOUND)
@@ -973,6 +980,19 @@ void io_wq_hash_work(struct io_wq_work *work, void *val)
work->flags |= (IO_WQ_WORK_HASHED | (bit << IO_WQ_HASH_SHIFT));
}
+static bool __io_wq_worker_cancel(struct io_worker *worker,
+ struct io_cb_cancel_data *match,
+ struct io_wq_work *work)
+{
+ if (work && match->fn(work, match->data)) {
+ work->flags |= IO_WQ_WORK_CANCEL;
+ set_notify_signal(worker->task);
+ return true;
+ }
+
+ return false;
+}
+
static bool io_wq_worker_cancel(struct io_worker *worker, void *data)
{
struct io_cb_cancel_data *match = data;
@@ -981,13 +1001,11 @@ static bool io_wq_worker_cancel(struct io_worker *worker, void *data)
* Hold the lock to avoid ->cur_work going out of scope, caller
* may dereference the passed in work.
*/
- spin_lock(&worker->lock);
- if (worker->cur_work &&
- match->fn(worker->cur_work, match->data)) {
- set_notify_signal(worker->task);
+ raw_spin_lock(&worker->lock);
+ if (__io_wq_worker_cancel(worker, match, worker->cur_work) ||
+ __io_wq_worker_cancel(worker, match, worker->next_work))
match->nr_running++;
- }
- spin_unlock(&worker->lock);
+ raw_spin_unlock(&worker->lock);
return match->nr_running && !match->cancel_all;
}
@@ -1039,17 +1057,16 @@ static void io_wqe_cancel_pending_work(struct io_wqe *wqe,
{
int i;
retry:
- raw_spin_lock(&wqe->lock);
for (i = 0; i < IO_WQ_ACCT_NR; i++) {
struct io_wqe_acct *acct = io_get_acct(wqe, i == 0);
if (io_acct_cancel_pending_work(wqe, acct, match)) {
+ raw_spin_lock(&wqe->lock);
if (match->cancel_all)
goto retry;
- return;
+ break;
}
}
- raw_spin_unlock(&wqe->lock);
}
static void io_wqe_cancel_running_work(struct io_wqe *wqe,
@@ -1074,25 +1091,27 @@ enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel,
* First check pending list, if we're lucky we can just remove it
* from there. CANCEL_OK means that the work is returned as-new,
* no completion will be posted for it.
- */
- for_each_node(node) {
- struct io_wqe *wqe = wq->wqes[node];
-
- io_wqe_cancel_pending_work(wqe, &match);
- if (match.nr_pending && !match.cancel_all)
- return IO_WQ_CANCEL_OK;
- }
-
- /*
- * Now check if a free (going busy) or busy worker has the work
+ *
+ * Then check if a free (going busy) or busy worker has the work
* currently running. If we find it there, we'll return CANCEL_RUNNING
* as an indication that we attempt to signal cancellation. The
* completion will run normally in this case.
+ *
+ * Do both of these while holding the wqe->lock, to ensure that
+ * we'll find a work item regardless of state.
*/
for_each_node(node) {
struct io_wqe *wqe = wq->wqes[node];
+ raw_spin_lock(&wqe->lock);
+ io_wqe_cancel_pending_work(wqe, &match);
+ if (match.nr_pending && !match.cancel_all) {
+ raw_spin_unlock(&wqe->lock);
+ return IO_WQ_CANCEL_OK;
+ }
+
io_wqe_cancel_running_work(wqe, &match);
+ raw_spin_unlock(&wqe->lock);
if (match.nr_running && !match.cancel_all)
return IO_WQ_CANCEL_RUNNING;
}
@@ -1263,7 +1282,9 @@ static void io_wq_destroy(struct io_wq *wq)
.fn = io_wq_work_match_all,
.cancel_all = true,
};
+ raw_spin_lock(&wqe->lock);
io_wqe_cancel_pending_work(wqe, &match);
+ raw_spin_unlock(&wqe->lock);
free_cpumask_var(wqe->cpu_mask);
kfree(wqe);
}
diff --git a/fs/io-wq.h b/fs/io-wq.h
index 41bf37674a49..dbecd27656c7 100644
--- a/fs/io-wq.h
+++ b/fs/io-wq.h
@@ -52,6 +52,28 @@ static inline void wq_list_add_after(struct io_wq_work_node *node,
list->last = node;
}
+/**
+ * wq_list_merge - merge the second list to the first one.
+ * @list0: the first list
+ * @list1: the second list
+ * Return the first node after mergence.
+ */
+static inline struct io_wq_work_node *wq_list_merge(struct io_wq_work_list *list0,
+ struct io_wq_work_list *list1)
+{
+ struct io_wq_work_node *ret;
+
+ if (!list0->first) {
+ ret = list1->first;
+ } else {
+ ret = list0->first;
+ list0->last->next = list1->first;
+ }
+ INIT_WQ_LIST(list0);
+ INIT_WQ_LIST(list1);
+ return ret;
+}
+
static inline void wq_list_add_tail(struct io_wq_work_node *node,
struct io_wq_work_list *list)
{
@@ -200,6 +222,6 @@ static inline void io_wq_worker_running(struct task_struct *tsk)
static inline bool io_wq_current_is_worker(void)
{
return in_task() && (current->flags & PF_IO_WORKER) &&
- current->pf_io_worker;
+ current->worker_private;
}
#endif
diff --git a/fs/io_uring.c b/fs/io_uring.c
index fb2a0cb4aaf8..77b9c7e4793b 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -57,7 +57,7 @@
#include <linux/mman.h>
#include <linux/percpu.h>
#include <linux/slab.h>
-#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
#include <linux/bvec.h>
#include <linux/net.h>
#include <net/sock.h>
@@ -108,7 +108,8 @@
#define SQE_COMMON_FLAGS (IOSQE_FIXED_FILE | IOSQE_IO_LINK | \
IOSQE_IO_HARDLINK | IOSQE_ASYNC)
-#define SQE_VALID_FLAGS (SQE_COMMON_FLAGS|IOSQE_BUFFER_SELECT|IOSQE_IO_DRAIN)
+#define SQE_VALID_FLAGS (SQE_COMMON_FLAGS | IOSQE_BUFFER_SELECT | \
+ IOSQE_IO_DRAIN | IOSQE_CQE_SKIP_SUCCESS)
#define IO_REQ_CLEAN_FLAGS (REQ_F_BUFFER_SELECTED | REQ_F_NEED_CLEANUP | \
REQ_F_POLLED | REQ_F_INFLIGHT | REQ_F_CREDS | \
@@ -320,6 +321,7 @@ struct io_submit_state {
bool plug_started;
bool need_plug;
+ bool flush_cqes;
unsigned short submit_nr;
struct blk_plug plug;
};
@@ -337,6 +339,7 @@ struct io_ring_ctx {
unsigned int restricted: 1;
unsigned int off_timeout_used: 1;
unsigned int drain_active: 1;
+ unsigned int drain_disabled: 1;
} ____cacheline_aligned_in_smp;
/* submission data */
@@ -471,6 +474,7 @@ struct io_uring_task {
spinlock_t task_lock;
struct io_wq_work_list task_list;
+ struct io_wq_work_list prior_task_list;
struct callback_head task_work;
bool task_running;
};
@@ -483,8 +487,6 @@ struct io_poll_iocb {
struct file *file;
struct wait_queue_head *head;
__poll_t events;
- bool done;
- bool canceled;
struct wait_queue_entry wait;
};
@@ -721,6 +723,7 @@ enum {
REQ_F_HARDLINK_BIT = IOSQE_IO_HARDLINK_BIT,
REQ_F_FORCE_ASYNC_BIT = IOSQE_ASYNC_BIT,
REQ_F_BUFFER_SELECT_BIT = IOSQE_BUFFER_SELECT_BIT,
+ REQ_F_CQE_SKIP_BIT = IOSQE_CQE_SKIP_SUCCESS_BIT,
/* first byte is taken by user flags, shift it to not overlap */
REQ_F_FAIL_BIT = 8,
@@ -737,6 +740,7 @@ enum {
REQ_F_REFCOUNT_BIT,
REQ_F_ARM_LTIMEOUT_BIT,
REQ_F_ASYNC_DATA_BIT,
+ REQ_F_SKIP_LINK_CQES_BIT,
/* keep async read/write and isreg together and in order */
REQ_F_SUPPORT_NOWAIT_BIT,
REQ_F_ISREG_BIT,
@@ -758,6 +762,8 @@ enum {
REQ_F_FORCE_ASYNC = BIT(REQ_F_FORCE_ASYNC_BIT),
/* IOSQE_BUFFER_SELECT */
REQ_F_BUFFER_SELECT = BIT(REQ_F_BUFFER_SELECT_BIT),
+ /* IOSQE_CQE_SKIP_SUCCESS */
+ REQ_F_CQE_SKIP = BIT(REQ_F_CQE_SKIP_BIT),
/* fail rest of links */
REQ_F_FAIL = BIT(REQ_F_FAIL_BIT),
@@ -791,6 +797,8 @@ enum {
REQ_F_ARM_LTIMEOUT = BIT(REQ_F_ARM_LTIMEOUT_BIT),
/* ->async_data allocated */
REQ_F_ASYNC_DATA = BIT(REQ_F_ASYNC_DATA_BIT),
+ /* don't post CQEs while failing linked requests */
+ REQ_F_SKIP_LINK_CQES = BIT(REQ_F_SKIP_LINK_CQES_BIT),
};
struct async_poll {
@@ -882,6 +890,7 @@ struct io_kiocb {
const struct cred *creds;
/* stores selected buf, valid IFF REQ_F_BUFFER_SELECTED is set */
struct io_buffer *kbuf;
+ atomic_t poll_refs;
};
struct io_tctx_node {
@@ -1108,8 +1117,8 @@ static void io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
bool cancel_all);
static void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd);
-static bool io_cqring_fill_event(struct io_ring_ctx *ctx, u64 user_data,
- s32 res, u32 cflags);
+static void io_fill_cqe_req(struct io_kiocb *req, s32 res, u32 cflags);
+
static void io_put_req(struct io_kiocb *req);
static void io_put_req_deferred(struct io_kiocb *req);
static void io_dismantle_req(struct io_kiocb *req);
@@ -1183,12 +1192,6 @@ static inline bool req_ref_put_and_test(struct io_kiocb *req)
return atomic_dec_and_test(&req->refs);
}
-static inline void req_ref_put(struct io_kiocb *req)
-{
- WARN_ON_ONCE(!(req->flags & REQ_F_REFCOUNT));
- WARN_ON_ONCE(req_ref_put_and_test(req));
-}
-
static inline void req_ref_get(struct io_kiocb *req)
{
WARN_ON_ONCE(!(req->flags & REQ_F_REFCOUNT));
@@ -1264,6 +1267,26 @@ static inline void io_req_set_rsrc_node(struct io_kiocb *req,
}
}
+static unsigned int __io_put_kbuf(struct io_kiocb *req)
+{
+ struct io_buffer *kbuf = req->kbuf;
+ unsigned int cflags;
+
+ cflags = kbuf->bid << IORING_CQE_BUFFER_SHIFT;
+ cflags |= IORING_CQE_F_BUFFER;
+ req->flags &= ~REQ_F_BUFFER_SELECTED;
+ kfree(kbuf);
+ req->kbuf = NULL;
+ return cflags;
+}
+
+static inline unsigned int io_put_kbuf(struct io_kiocb *req)
+{
+ if (likely(!(req->flags & REQ_F_BUFFER_SELECTED)))
+ return 0;
+ return __io_put_kbuf(req);
+}
+
static void io_refs_resurrect(struct percpu_ref *ref, struct completion *compl)
{
bool got = percpu_ref_tryget(ref);
@@ -1340,6 +1363,10 @@ static inline bool req_has_async_data(struct io_kiocb *req)
static inline void req_set_fail(struct io_kiocb *req)
{
req->flags |= REQ_F_FAIL;
+ if (req->flags & REQ_F_CQE_SKIP) {
+ req->flags &= ~REQ_F_CQE_SKIP;
+ req->flags |= REQ_F_SKIP_LINK_CQES;
+ }
}
static inline void req_fail_link_node(struct io_kiocb *req, int res)
@@ -1553,8 +1580,11 @@ static void io_prep_async_link(struct io_kiocb *req)
static inline void io_req_add_compl_list(struct io_kiocb *req)
{
- struct io_submit_state *state = &req->ctx->submit_state;
+ struct io_ring_ctx *ctx = req->ctx;
+ struct io_submit_state *state = &ctx->submit_state;
+ if (!(req->flags & REQ_F_CQE_SKIP))
+ ctx->submit_state.flush_cqes = true;
wq_list_add_tail(&req->comp_list, &state->compl_reqs);
}
@@ -1599,7 +1629,7 @@ static void io_kill_timeout(struct io_kiocb *req, int status)
atomic_set(&req->ctx->cq_timeouts,
atomic_read(&req->ctx->cq_timeouts) + 1);
list_del_init(&req->timeout.list);
- io_cqring_fill_event(req->ctx, req->user_data, status, 0);
+ io_fill_cqe_req(req, status, 0);
io_put_req_deferred(req);
}
}
@@ -1830,6 +1860,18 @@ static inline void io_get_task_refs(int nr)
io_task_refs_refill(tctx);
}
+static __cold void io_uring_drop_tctx_refs(struct task_struct *task)
+{
+ struct io_uring_task *tctx = task->io_uring;
+ unsigned int refs = tctx->cached_refs;
+
+ if (refs) {
+ tctx->cached_refs = 0;
+ percpu_counter_sub(&tctx->inflight, refs);
+ put_task_struct_many(task, refs);
+ }
+}
+
static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data,
s32 res, u32 cflags)
{
@@ -1858,8 +1900,8 @@ static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data,
return true;
}
-static inline bool __io_cqring_fill_event(struct io_ring_ctx *ctx, u64 user_data,
- s32 res, u32 cflags)
+static inline bool __io_fill_cqe(struct io_ring_ctx *ctx, u64 user_data,
+ s32 res, u32 cflags)
{
struct io_uring_cqe *cqe;
@@ -1880,20 +1922,26 @@ static inline bool __io_cqring_fill_event(struct io_ring_ctx *ctx, u64 user_data
return io_cqring_event_overflow(ctx, user_data, res, cflags);
}
-/* not as hot to bloat with inlining */
-static noinline bool io_cqring_fill_event(struct io_ring_ctx *ctx, u64 user_data,
- s32 res, u32 cflags)
+static noinline void io_fill_cqe_req(struct io_kiocb *req, s32 res, u32 cflags)
{
- return __io_cqring_fill_event(ctx, user_data, res, cflags);
+ if (!(req->flags & REQ_F_CQE_SKIP))
+ __io_fill_cqe(req->ctx, req->user_data, res, cflags);
}
-static void io_req_complete_post(struct io_kiocb *req, s32 res,
- u32 cflags)
+static noinline bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data,
+ s32 res, u32 cflags)
+{
+ ctx->cq_extra++;
+ return __io_fill_cqe(ctx, user_data, res, cflags);
+}
+
+static void __io_req_complete_post(struct io_kiocb *req, s32 res,
+ u32 cflags)
{
struct io_ring_ctx *ctx = req->ctx;
- spin_lock(&ctx->completion_lock);
- __io_cqring_fill_event(ctx, req->user_data, res, cflags);
+ if (!(req->flags & REQ_F_CQE_SKIP))
+ __io_fill_cqe(ctx, req->user_data, res, cflags);
/*
* If we're the last reference to this request, add to our locked
* free_list cache.
@@ -1913,6 +1961,15 @@ static void io_req_complete_post(struct io_kiocb *req, s32 res,
wq_list_add_head(&req->comp_list, &ctx->locked_free_list);
ctx->locked_free_nr++;
}
+}
+
+static void io_req_complete_post(struct io_kiocb *req, s32 res,
+ u32 cflags)
+{
+ struct io_ring_ctx *ctx = req->ctx;
+
+ spin_lock(&ctx->completion_lock);
+ __io_req_complete_post(req, res, cflags);
io_commit_cqring(ctx);
spin_unlock(&ctx->completion_lock);
io_cqring_ev_posted(ctx);
@@ -2101,8 +2158,8 @@ static bool io_kill_linked_timeout(struct io_kiocb *req)
link->timeout.head = NULL;
if (hrtimer_try_to_cancel(&io->timer) != -1) {
list_del(&link->timeout.list);
- io_cqring_fill_event(link->ctx, link->user_data,
- -ECANCELED, 0);
+ /* leave REQ_F_CQE_SKIP to io_fill_cqe_req */
+ io_fill_cqe_req(link, -ECANCELED, 0);
io_put_req_deferred(link);
return true;
}
@@ -2114,6 +2171,7 @@ static void io_fail_links(struct io_kiocb *req)
__must_hold(&req->ctx->completion_lock)
{
struct io_kiocb *nxt, *link = req->link;
+ bool ignore_cqes = req->flags & REQ_F_SKIP_LINK_CQES;
req->link = NULL;
while (link) {
@@ -2126,7 +2184,10 @@ static void io_fail_links(struct io_kiocb *req)
link->link = NULL;
trace_io_uring_fail_link(req, link);
- io_cqring_fill_event(link->ctx, link->user_data, res, 0);
+ if (!ignore_cqes) {
+ link->flags &= ~REQ_F_CQE_SKIP;
+ io_fill_cqe_req(link, res, 0);
+ }
io_put_req_deferred(link);
link = nxt;
}
@@ -2143,8 +2204,8 @@ static bool io_disarm_next(struct io_kiocb *req)
req->flags &= ~REQ_F_ARM_LTIMEOUT;
if (link && link->opcode == IORING_OP_LINK_TIMEOUT) {
io_remove_next_linked(req);
- io_cqring_fill_event(link->ctx, link->user_data,
- -ECANCELED, 0);
+ /* leave REQ_F_CQE_SKIP to io_fill_cqe_req */
+ io_fill_cqe_req(link, -ECANCELED, 0);
io_put_req_deferred(link);
posted = true;
}
@@ -2171,7 +2232,7 @@ static void __io_req_find_next_prep(struct io_kiocb *req)
spin_lock(&ctx->completion_lock);
posted = io_disarm_next(req);
if (posted)
- io_commit_cqring(req->ctx);
+ io_commit_cqring(ctx);
spin_unlock(&ctx->completion_lock);
if (posted)
io_cqring_ev_posted(ctx);
@@ -2208,51 +2269,108 @@ static void ctx_flush_and_put(struct io_ring_ctx *ctx, bool *locked)
percpu_ref_put(&ctx->refs);
}
+static inline void ctx_commit_and_unlock(struct io_ring_ctx *ctx)
+{
+ io_commit_cqring(ctx);
+ spin_unlock(&ctx->completion_lock);
+ io_cqring_ev_posted(ctx);
+}
+
+static void handle_prev_tw_list(struct io_wq_work_node *node,
+ struct io_ring_ctx **ctx, bool *uring_locked)
+{
+ if (*ctx && !*uring_locked)
+ spin_lock(&(*ctx)->completion_lock);
+
+ do {
+ struct io_wq_work_node *next = node->next;
+ struct io_kiocb *req = container_of(node, struct io_kiocb,
+ io_task_work.node);
+
+ if (req->ctx != *ctx) {
+ if (unlikely(!*uring_locked && *ctx))
+ ctx_commit_and_unlock(*ctx);
+
+ ctx_flush_and_put(*ctx, uring_locked);
+ *ctx = req->ctx;
+ /* if not contended, grab and improve batching */
+ *uring_locked = mutex_trylock(&(*ctx)->uring_lock);
+ percpu_ref_get(&(*ctx)->refs);
+ if (unlikely(!*uring_locked))
+ spin_lock(&(*ctx)->completion_lock);
+ }
+ if (likely(*uring_locked))
+ req->io_task_work.func(req, uring_locked);
+ else
+ __io_req_complete_post(req, req->result, io_put_kbuf(req));
+ node = next;
+ } while (node);
+
+ if (unlikely(!*uring_locked))
+ ctx_commit_and_unlock(*ctx);
+}
+
+static void handle_tw_list(struct io_wq_work_node *node,
+ struct io_ring_ctx **ctx, bool *locked)
+{
+ do {
+ struct io_wq_work_node *next = node->next;
+ struct io_kiocb *req = container_of(node, struct io_kiocb,
+ io_task_work.node);
+
+ if (req->ctx != *ctx) {
+ ctx_flush_and_put(*ctx, locked);
+ *ctx = req->ctx;
+ /* if not contended, grab and improve batching */
+ *locked = mutex_trylock(&(*ctx)->uring_lock);
+ percpu_ref_get(&(*ctx)->refs);
+ }
+ req->io_task_work.func(req, locked);
+ node = next;
+ } while (node);
+}
+
static void tctx_task_work(struct callback_head *cb)
{
- bool locked = false;
+ bool uring_locked = false;
struct io_ring_ctx *ctx = NULL;
struct io_uring_task *tctx = container_of(cb, struct io_uring_task,
task_work);
while (1) {
- struct io_wq_work_node *node;
+ struct io_wq_work_node *node1, *node2;
- if (!tctx->task_list.first && locked)
+ if (!tctx->task_list.first &&
+ !tctx->prior_task_list.first && uring_locked)
io_submit_flush_completions(ctx);
spin_lock_irq(&tctx->task_lock);
- node = tctx->task_list.first;
+ node1 = tctx->prior_task_list.first;
+ node2 = tctx->task_list.first;
INIT_WQ_LIST(&tctx->task_list);
- if (!node)
+ INIT_WQ_LIST(&tctx->prior_task_list);
+ if (!node2 && !node1)
tctx->task_running = false;
spin_unlock_irq(&tctx->task_lock);
- if (!node)
+ if (!node2 && !node1)
break;
- do {
- struct io_wq_work_node *next = node->next;
- struct io_kiocb *req = container_of(node, struct io_kiocb,
- io_task_work.node);
-
- if (req->ctx != ctx) {
- ctx_flush_and_put(ctx, &locked);
- ctx = req->ctx;
- /* if not contended, grab and improve batching */
- locked = mutex_trylock(&ctx->uring_lock);
- percpu_ref_get(&ctx->refs);
- }
- req->io_task_work.func(req, &locked);
- node = next;
- } while (node);
+ if (node1)
+ handle_prev_tw_list(node1, &ctx, &uring_locked);
+ if (node2)
+ handle_tw_list(node2, &ctx, &uring_locked);
cond_resched();
}
- ctx_flush_and_put(ctx, &locked);
+ ctx_flush_and_put(ctx, &uring_locked);
+
+ /* relaxed read is enough as only the task itself sets ->in_idle */
+ if (unlikely(atomic_read(&tctx->in_idle)))
+ io_uring_drop_tctx_refs(current);
}
-static void io_req_task_work_add(struct io_kiocb *req)
+static void io_req_task_work_add(struct io_kiocb *req, bool priority)
{
struct task_struct *tsk = req->task;
struct io_uring_task *tctx = tsk->io_uring;
@@ -2264,7 +2382,10 @@ static void io_req_task_work_add(struct io_kiocb *req)
WARN_ON_ONCE(!tctx);
spin_lock_irqsave(&tctx->task_lock, flags);
- wq_list_add_tail(&req->io_task_work.node, &tctx->task_list);
+ if (priority)
+ wq_list_add_tail(&req->io_task_work.node, &tctx->prior_task_list);
+ else
+ wq_list_add_tail(&req->io_task_work.node, &tctx->task_list);
running = tctx->task_running;
if (!running)
tctx->task_running = true;
@@ -2289,8 +2410,7 @@ static void io_req_task_work_add(struct io_kiocb *req)
spin_lock_irqsave(&tctx->task_lock, flags);
tctx->task_running = false;
- node = tctx->task_list.first;
- INIT_WQ_LIST(&tctx->task_list);
+ node = wq_list_merge(&tctx->prior_task_list, &tctx->task_list);
spin_unlock_irqrestore(&tctx->task_lock, flags);
while (node) {
@@ -2327,19 +2447,19 @@ static void io_req_task_queue_fail(struct io_kiocb *req, int ret)
{
req->result = ret;
req->io_task_work.func = io_req_task_cancel;
- io_req_task_work_add(req);
+ io_req_task_work_add(req, false);
}
static void io_req_task_queue(struct io_kiocb *req)
{
req->io_task_work.func = io_req_task_submit;
- io_req_task_work_add(req);
+ io_req_task_work_add(req, false);
}
static void io_req_task_queue_reissue(struct io_kiocb *req)
{
req->io_task_work.func = io_queue_async_work;
- io_req_task_work_add(req);
+ io_req_task_work_add(req, false);
}
static inline void io_queue_next(struct io_kiocb *req)
@@ -2403,17 +2523,22 @@ static void __io_submit_flush_completions(struct io_ring_ctx *ctx)
struct io_wq_work_node *node, *prev;
struct io_submit_state *state = &ctx->submit_state;
- spin_lock(&ctx->completion_lock);
- wq_list_for_each(node, prev, &state->compl_reqs) {
- struct io_kiocb *req = container_of(node, struct io_kiocb,
+ if (state->flush_cqes) {
+ spin_lock(&ctx->completion_lock);
+ wq_list_for_each(node, prev, &state->compl_reqs) {
+ struct io_kiocb *req = container_of(node, struct io_kiocb,
comp_list);
- __io_cqring_fill_event(ctx, req->user_data, req->result,
- req->cflags);
+ if (!(req->flags & REQ_F_CQE_SKIP))
+ __io_fill_cqe(ctx, req->user_data, req->result,
+ req->cflags);
+ }
+
+ io_commit_cqring(ctx);
+ spin_unlock(&ctx->completion_lock);
+ io_cqring_ev_posted(ctx);
+ state->flush_cqes = false;
}
- io_commit_cqring(ctx);
- spin_unlock(&ctx->completion_lock);
- io_cqring_ev_posted(ctx);
io_free_batch_list(ctx, state->compl_reqs.first);
INIT_WQ_LIST(&state->compl_reqs);
@@ -2444,7 +2569,7 @@ static inline void io_put_req_deferred(struct io_kiocb *req)
{
if (req_ref_put_and_test(req)) {
req->io_task_work.func = io_free_req_work;
- io_req_task_work_add(req);
+ io_req_task_work_add(req, false);
}
}
@@ -2463,24 +2588,6 @@ static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
}
-static unsigned int io_put_kbuf(struct io_kiocb *req, struct io_buffer *kbuf)
-{
- unsigned int cflags;
-
- cflags = kbuf->bid << IORING_CQE_BUFFER_SHIFT;
- cflags |= IORING_CQE_F_BUFFER;
- req->flags &= ~REQ_F_BUFFER_SELECTED;
- kfree(kbuf);
- return cflags;
-}
-
-static inline unsigned int io_put_rw_kbuf(struct io_kiocb *req)
-{
- if (likely(!(req->flags & REQ_F_BUFFER_SELECTED)))
- return 0;
- return io_put_kbuf(req, req->kbuf);
-}
-
static inline bool io_run_task_work(void)
{
if (test_thread_flag(TIF_NOTIFY_SIGNAL) || current->task_works) {
@@ -2543,8 +2650,10 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
/* order with io_complete_rw_iopoll(), e.g. ->result updates */
if (!smp_load_acquire(&req->iopoll_completed))
break;
- __io_cqring_fill_event(ctx, req->user_data, req->result,
- io_put_rw_kbuf(req));
+ if (unlikely(req->flags & REQ_F_CQE_SKIP))
+ continue;
+
+ __io_fill_cqe(ctx, req->user_data, req->result, io_put_kbuf(req));
nr_events++;
}
@@ -2718,9 +2827,9 @@ static bool __io_complete_rw_common(struct io_kiocb *req, long res)
return false;
}
-static void io_req_task_complete(struct io_kiocb *req, bool *locked)
+static inline void io_req_task_complete(struct io_kiocb *req, bool *locked)
{
- unsigned int cflags = io_put_rw_kbuf(req);
+ unsigned int cflags = io_put_kbuf(req);
int res = req->result;
if (*locked) {
@@ -2731,12 +2840,12 @@ static void io_req_task_complete(struct io_kiocb *req, bool *locked)
}
}
-static void __io_complete_rw(struct io_kiocb *req, long res, long res2,
+static void __io_complete_rw(struct io_kiocb *req, long res,
unsigned int issue_flags)
{
if (__io_complete_rw_common(req, res))
return;
- __io_req_complete(req, issue_flags, req->result, io_put_rw_kbuf(req));
+ __io_req_complete(req, issue_flags, req->result, io_put_kbuf(req));
}
static void io_complete_rw(struct kiocb *kiocb, long res)
@@ -2747,7 +2856,7 @@ static void io_complete_rw(struct kiocb *kiocb, long res)
return;
req->result = res;
req->io_task_work.func = io_req_task_complete;
- io_req_task_work_add(req);
+ io_req_task_work_add(req, !!(req->ctx->flags & IORING_SETUP_SQPOLL));
}
static void io_complete_rw_iopoll(struct kiocb *kiocb, long res)
@@ -2965,10 +3074,9 @@ static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
}
}
-static void kiocb_done(struct kiocb *kiocb, ssize_t ret,
+static void kiocb_done(struct io_kiocb *req, ssize_t ret,
unsigned int issue_flags)
{
- struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
struct io_async_rw *io = req->async_data;
/* add previously done IO, if any */
@@ -2980,28 +3088,21 @@ static void kiocb_done(struct kiocb *kiocb, ssize_t ret,
}
if (req->flags & REQ_F_CUR_POS)
- req->file->f_pos = kiocb->ki_pos;
- if (ret >= 0 && (kiocb->ki_complete == io_complete_rw))
- __io_complete_rw(req, ret, 0, issue_flags);
+ req->file->f_pos = req->rw.kiocb.ki_pos;
+ if (ret >= 0 && (req->rw.kiocb.ki_complete == io_complete_rw))
+ __io_complete_rw(req, ret, issue_flags);
else
- io_rw_done(kiocb, ret);
+ io_rw_done(&req->rw.kiocb, ret);
if (req->flags & REQ_F_REISSUE) {
req->flags &= ~REQ_F_REISSUE;
if (io_resubmit_prep(req)) {
io_req_task_queue_reissue(req);
} else {
- unsigned int cflags = io_put_rw_kbuf(req);
- struct io_ring_ctx *ctx = req->ctx;
-
req_set_fail(req);
- if (issue_flags & IO_URING_F_UNLOCKED) {
- mutex_lock(&ctx->uring_lock);
- __io_req_complete(req, issue_flags, ret, cflags);
- mutex_unlock(&ctx->uring_lock);
- } else {
- __io_req_complete(req, issue_flags, ret, cflags);
- }
+ req->result = ret;
+ req->io_task_work.func = io_req_task_complete;
+ io_req_task_work_add(req, false);
}
}
}
@@ -3229,10 +3330,12 @@ static struct iovec *__io_import_iovec(int rw, struct io_kiocb *req,
size_t sqe_len;
ssize_t ret;
- BUILD_BUG_ON(ERR_PTR(0) != NULL);
-
- if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED)
- return ERR_PTR(io_import_fixed(req, rw, iter));
+ if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) {
+ ret = io_import_fixed(req, rw, iter);
+ if (ret)
+ return ERR_PTR(ret);
+ return NULL;
+ }
/* buffer index only valid with fixed read/write, or buffer select */
if (unlikely(req->buf_index && !(req->flags & REQ_F_BUFFER_SELECT)))
@@ -3250,15 +3353,18 @@ static struct iovec *__io_import_iovec(int rw, struct io_kiocb *req,
}
ret = import_single_range(rw, buf, sqe_len, s->fast_iov, iter);
- return ERR_PTR(ret);
+ if (ret)
+ return ERR_PTR(ret);
+ return NULL;
}
iovec = s->fast_iov;
if (req->flags & REQ_F_BUFFER_SELECT) {
ret = io_iov_buffer_select(req, iovec, issue_flags);
- if (!ret)
- iov_iter_init(iter, rw, iovec, 1, iovec->iov_len);
- return ERR_PTR(ret);
+ if (ret)
+ return ERR_PTR(ret);
+ iov_iter_init(iter, rw, iovec, 1, iovec->iov_len);
+ return NULL;
}
ret = __import_iovec(rw, buf, sqe_len, UIO_FASTIOV, &iovec, iter,
@@ -3629,7 +3735,7 @@ static int io_read(struct io_kiocb *req, unsigned int issue_flags)
iov_iter_restore(&s->iter, &s->iter_state);
} while (ret > 0);
done:
- kiocb_done(kiocb, ret, issue_flags);
+ kiocb_done(req, ret, issue_flags);
out_free:
/* it's faster to check here then delegate to kfree */
if (iovec)
@@ -3726,7 +3832,7 @@ static int io_write(struct io_kiocb *req, unsigned int issue_flags)
if (ret2 == -EAGAIN && (req->ctx->flags & IORING_SETUP_IOPOLL))
goto copy_iov;
done:
- kiocb_done(kiocb, ret2, issue_flags);
+ kiocb_done(req, ret2, issue_flags);
} else {
copy_iov:
iov_iter_restore(&s->iter, &s->iter_state);
@@ -4839,17 +4945,18 @@ static int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
min_ret = iov_iter_count(&kmsg->msg.msg_iter);
ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
- if ((issue_flags & IO_URING_F_NONBLOCK) && ret == -EAGAIN)
- return io_setup_async_msg(req, kmsg);
- if (ret == -ERESTARTSYS)
- ret = -EINTR;
+ if (ret < min_ret) {
+ if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
+ return io_setup_async_msg(req, kmsg);
+ if (ret == -ERESTARTSYS)
+ ret = -EINTR;
+ req_set_fail(req);
+ }
/* fast path, check for non-NULL to avoid function call */
if (kmsg->free_iov)
kfree(kmsg->free_iov);
req->flags &= ~REQ_F_NEED_CLEANUP;
- if (ret < min_ret)
- req_set_fail(req);
__io_req_complete(req, issue_flags, ret, 0);
return 0;
}
@@ -4885,13 +4992,13 @@ static int io_send(struct io_kiocb *req, unsigned int issue_flags)
msg.msg_flags = flags;
ret = sock_sendmsg(sock, &msg);
- if ((issue_flags & IO_URING_F_NONBLOCK) && ret == -EAGAIN)
- return -EAGAIN;
- if (ret == -ERESTARTSYS)
- ret = -EINTR;
-
- if (ret < min_ret)
+ if (ret < min_ret) {
+ if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
+ return -EAGAIN;
+ if (ret == -ERESTARTSYS)
+ ret = -EINTR;
req_set_fail(req);
+ }
__io_req_complete(req, issue_flags, ret, 0);
return 0;
}
@@ -4991,11 +5098,6 @@ static struct io_buffer *io_recv_buffer_select(struct io_kiocb *req,
return io_buffer_select(req, &sr->len, sr->bgid, issue_flags);
}
-static inline unsigned int io_put_recv_kbuf(struct io_kiocb *req)
-{
- return io_put_kbuf(req, req->kbuf);
-}
-
static int io_recvmsg_prep_async(struct io_kiocb *req)
{
int ret;
@@ -5033,8 +5135,7 @@ static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
struct socket *sock;
struct io_buffer *kbuf;
unsigned flags;
- int min_ret = 0;
- int ret, cflags = 0;
+ int ret, min_ret = 0;
bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
sock = sock_from_file(req->file);
@@ -5068,20 +5169,21 @@ static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
ret = __sys_recvmsg_sock(sock, &kmsg->msg, req->sr_msg.umsg,
kmsg->uaddr, flags);
- if (force_nonblock && ret == -EAGAIN)
- return io_setup_async_msg(req, kmsg);
- if (ret == -ERESTARTSYS)
- ret = -EINTR;
+ if (ret < min_ret) {
+ if (ret == -EAGAIN && force_nonblock)
+ return io_setup_async_msg(req, kmsg);
+ if (ret == -ERESTARTSYS)
+ ret = -EINTR;
+ req_set_fail(req);
+ } else if ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
+ req_set_fail(req);
+ }
- if (req->flags & REQ_F_BUFFER_SELECTED)
- cflags = io_put_recv_kbuf(req);
/* fast path, check for non-NULL to avoid function call */
if (kmsg->free_iov)
kfree(kmsg->free_iov);
req->flags &= ~REQ_F_NEED_CLEANUP;
- if (ret < min_ret || ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))))
- req_set_fail(req);
- __io_req_complete(req, issue_flags, ret, cflags);
+ __io_req_complete(req, issue_flags, ret, io_put_kbuf(req));
return 0;
}
@@ -5094,8 +5196,7 @@ static int io_recv(struct io_kiocb *req, unsigned int issue_flags)
struct socket *sock;
struct iovec iov;
unsigned flags;
- int min_ret = 0;
- int ret, cflags = 0;
+ int ret, min_ret = 0;
bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
sock = sock_from_file(req->file);
@@ -5127,16 +5228,17 @@ static int io_recv(struct io_kiocb *req, unsigned int issue_flags)
min_ret = iov_iter_count(&msg.msg_iter);
ret = sock_recvmsg(sock, &msg, flags);
- if (force_nonblock && ret == -EAGAIN)
- return -EAGAIN;
- if (ret == -ERESTARTSYS)
- ret = -EINTR;
+ if (ret < min_ret) {
+ if (ret == -EAGAIN && force_nonblock)
+ return -EAGAIN;
+ if (ret == -ERESTARTSYS)
+ ret = -EINTR;
+ req_set_fail(req);
+ } else if ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
out_free:
- if (req->flags & REQ_F_BUFFER_SELECTED)
- cflags = io_put_recv_kbuf(req);
- if (ret < min_ret || ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))))
req_set_fail(req);
- __io_req_complete(req, issue_flags, ret, cflags);
+ }
+ __io_req_complete(req, issue_flags, ret, io_put_kbuf(req));
return 0;
}
@@ -5303,52 +5405,23 @@ struct io_poll_table {
int error;
};
-static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
- __poll_t mask, io_req_tw_func_t func)
-{
- /* for instances that support it check for an event match first: */
- if (mask && !(mask & poll->events))
- return 0;
-
- trace_io_uring_task_add(req->ctx, req->opcode, req->user_data, mask);
-
- list_del_init(&poll->wait.entry);
+#define IO_POLL_CANCEL_FLAG BIT(31)
+#define IO_POLL_REF_MASK ((1u << 20)-1)
- req->result = mask;
- req->io_task_work.func = func;
-
- /*
- * If this fails, then the task is exiting. When a task exits, the
- * work gets canceled, so just cancel this request as well instead
- * of executing it. We can't safely execute it anyway, as we may not
- * have the needed state needed for it anyway.
- */
- io_req_task_work_add(req);
- return 1;
+/*
+ * If refs part of ->poll_refs (see IO_POLL_REF_MASK) is 0, it's free. We can
+ * bump it and acquire ownership. It's disallowed to modify requests while not
+ * owning it, that prevents from races for enqueueing task_work's and b/w
+ * arming poll and wakeups.
+ */
+static inline bool io_poll_get_ownership(struct io_kiocb *req)
+{
+ return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK);
}
-static bool io_poll_rewait(struct io_kiocb *req, struct io_poll_iocb *poll)
- __acquires(&req->ctx->completion_lock)
+static void io_poll_mark_cancelled(struct io_kiocb *req)
{
- struct io_ring_ctx *ctx = req->ctx;
-
- /* req->task == current here, checking PF_EXITING is safe */
- if (unlikely(req->task->flags & PF_EXITING))
- WRITE_ONCE(poll->canceled, true);
-
- if (!req->result && !READ_ONCE(poll->canceled)) {
- struct poll_table_struct pt = { ._key = poll->events };
-
- req->result = vfs_poll(req->file, &pt) & poll->events;
- }
-
- spin_lock(&ctx->completion_lock);
- if (!req->result && !READ_ONCE(poll->canceled)) {
- add_wait_queue(poll->head, &poll->wait);
- return true;
- }
-
- return false;
+ atomic_or(IO_POLL_CANCEL_FLAG, &req->poll_refs);
}
static struct io_poll_iocb *io_poll_get_double(struct io_kiocb *req)
@@ -5366,133 +5439,241 @@ static struct io_poll_iocb *io_poll_get_single(struct io_kiocb *req)
return &req->apoll->poll;
}
-static void io_poll_remove_double(struct io_kiocb *req)
- __must_hold(&req->ctx->completion_lock)
+static void io_poll_req_insert(struct io_kiocb *req)
{
- struct io_poll_iocb *poll = io_poll_get_double(req);
+ struct io_ring_ctx *ctx = req->ctx;
+ struct hlist_head *list;
- lockdep_assert_held(&req->ctx->completion_lock);
+ list = &ctx->cancel_hash[hash_long(req->user_data, ctx->cancel_hash_bits)];
+ hlist_add_head(&req->hash_node, list);
+}
- if (poll && poll->head) {
- struct wait_queue_head *head = poll->head;
+static void io_init_poll_iocb(struct io_poll_iocb *poll, __poll_t events,
+ wait_queue_func_t wake_func)
+{
+ poll->head = NULL;
+#define IO_POLL_UNMASK (EPOLLERR|EPOLLHUP|EPOLLNVAL|EPOLLRDHUP)
+ /* mask in events that we always want/need */
+ poll->events = events | IO_POLL_UNMASK;
+ INIT_LIST_HEAD(&poll->wait.entry);
+ init_waitqueue_func_entry(&poll->wait, wake_func);
+}
+static inline void io_poll_remove_entry(struct io_poll_iocb *poll)
+{
+ struct wait_queue_head *head = smp_load_acquire(&poll->head);
+
+ if (head) {
spin_lock_irq(&head->lock);
list_del_init(&poll->wait.entry);
- if (poll->wait.private)
- req_ref_put(req);
poll->head = NULL;
spin_unlock_irq(&head->lock);
}
}
-static bool __io_poll_complete(struct io_kiocb *req, __poll_t mask)
- __must_hold(&req->ctx->completion_lock)
+static void io_poll_remove_entries(struct io_kiocb *req)
+{
+ struct io_poll_iocb *poll = io_poll_get_single(req);
+ struct io_poll_iocb *poll_double = io_poll_get_double(req);
+
+ /*
+ * While we hold the waitqueue lock and the waitqueue is nonempty,
+ * wake_up_pollfree() will wait for us. However, taking the waitqueue
+ * lock in the first place can race with the waitqueue being freed.
+ *
+ * We solve this as eventpoll does: by taking advantage of the fact that
+ * all users of wake_up_pollfree() will RCU-delay the actual free. If
+ * we enter rcu_read_lock() and see that the pointer to the queue is
+ * non-NULL, we can then lock it without the memory being freed out from
+ * under us.
+ *
+ * Keep holding rcu_read_lock() as long as we hold the queue lock, in
+ * case the caller deletes the entry from the queue, leaving it empty.
+ * In that case, only RCU prevents the queue memory from being freed.
+ */
+ rcu_read_lock();
+ io_poll_remove_entry(poll);
+ if (poll_double)
+ io_poll_remove_entry(poll_double);
+ rcu_read_unlock();
+}
+
+/*
+ * All poll tw should go through this. Checks for poll events, manages
+ * references, does rewait, etc.
+ *
+ * Returns a negative error on failure. >0 when no action require, which is
+ * either spurious wakeup or multishot CQE is served. 0 when it's done with
+ * the request, then the mask is stored in req->result.
+ */
+static int io_poll_check_events(struct io_kiocb *req)
{
struct io_ring_ctx *ctx = req->ctx;
- unsigned flags = IORING_CQE_F_MORE;
- int error;
+ struct io_poll_iocb *poll = io_poll_get_single(req);
+ int v;
- if (READ_ONCE(req->poll.canceled)) {
- error = -ECANCELED;
- req->poll.events |= EPOLLONESHOT;
- } else {
- error = mangle_poll(mask);
- }
- if (req->poll.events & EPOLLONESHOT)
- flags = 0;
- if (!io_cqring_fill_event(ctx, req->user_data, error, flags)) {
- req->poll.events |= EPOLLONESHOT;
- flags = 0;
- }
- if (flags & IORING_CQE_F_MORE)
- ctx->cq_extra++;
+ /* req->task == current here, checking PF_EXITING is safe */
+ if (unlikely(req->task->flags & PF_EXITING))
+ io_poll_mark_cancelled(req);
+
+ do {
+ v = atomic_read(&req->poll_refs);
+
+ /* tw handler should be the owner, and so have some references */
+ if (WARN_ON_ONCE(!(v & IO_POLL_REF_MASK)))
+ return 0;
+ if (v & IO_POLL_CANCEL_FLAG)
+ return -ECANCELED;
+
+ if (!req->result) {
+ struct poll_table_struct pt = { ._key = poll->events };
+
+ req->result = vfs_poll(req->file, &pt) & poll->events;
+ }
+
+ /* multishot, just fill an CQE and proceed */
+ if (req->result && !(poll->events & EPOLLONESHOT)) {
+ __poll_t mask = mangle_poll(req->result & poll->events);
+ bool filled;
+
+ spin_lock(&ctx->completion_lock);
+ filled = io_fill_cqe_aux(ctx, req->user_data, mask,
+ IORING_CQE_F_MORE);
+ io_commit_cqring(ctx);
+ spin_unlock(&ctx->completion_lock);
+ if (unlikely(!filled))
+ return -ECANCELED;
+ io_cqring_ev_posted(ctx);
+ } else if (req->result) {
+ return 0;
+ }
- return !(flags & IORING_CQE_F_MORE);
+ /*
+ * Release all references, retry if someone tried to restart
+ * task_work while we were executing it.
+ */
+ } while (atomic_sub_return(v & IO_POLL_REF_MASK, &req->poll_refs));
+
+ return 1;
}
static void io_poll_task_func(struct io_kiocb *req, bool *locked)
{
struct io_ring_ctx *ctx = req->ctx;
- struct io_kiocb *nxt;
+ int ret;
- if (io_poll_rewait(req, &req->poll)) {
- spin_unlock(&ctx->completion_lock);
+ ret = io_poll_check_events(req);
+ if (ret > 0)
+ return;
+
+ if (!ret) {
+ req->result = mangle_poll(req->result & req->poll.events);
} else {
- bool done;
+ req->result = ret;
+ req_set_fail(req);
+ }
- if (req->poll.done) {
- spin_unlock(&ctx->completion_lock);
- return;
- }
- done = __io_poll_complete(req, req->result);
- if (done) {
- io_poll_remove_double(req);
- hash_del(&req->hash_node);
- req->poll.done = true;
- } else {
- req->result = 0;
- add_wait_queue(req->poll.head, &req->poll.wait);
- }
- io_commit_cqring(ctx);
- spin_unlock(&ctx->completion_lock);
- io_cqring_ev_posted(ctx);
+ io_poll_remove_entries(req);
+ spin_lock(&ctx->completion_lock);
+ hash_del(&req->hash_node);
+ __io_req_complete_post(req, req->result, 0);
+ io_commit_cqring(ctx);
+ spin_unlock(&ctx->completion_lock);
+ io_cqring_ev_posted(ctx);
+}
- if (done) {
- nxt = io_put_req_find_next(req);
- if (nxt)
- io_req_task_submit(nxt, locked);
- }
- }
+static void io_apoll_task_func(struct io_kiocb *req, bool *locked)
+{
+ struct io_ring_ctx *ctx = req->ctx;
+ int ret;
+
+ ret = io_poll_check_events(req);
+ if (ret > 0)
+ return;
+
+ io_poll_remove_entries(req);
+ spin_lock(&ctx->completion_lock);
+ hash_del(&req->hash_node);
+ spin_unlock(&ctx->completion_lock);
+
+ if (!ret)
+ io_req_task_submit(req, locked);
+ else
+ io_req_complete_failed(req, ret);
+}
+
+static void __io_poll_execute(struct io_kiocb *req, int mask)
+{
+ req->result = mask;
+ if (req->opcode == IORING_OP_POLL_ADD)
+ req->io_task_work.func = io_poll_task_func;
+ else
+ req->io_task_work.func = io_apoll_task_func;
+
+ trace_io_uring_task_add(req->ctx, req->opcode, req->user_data, mask);
+ io_req_task_work_add(req, false);
+}
+
+static inline void io_poll_execute(struct io_kiocb *req, int res)
+{
+ if (io_poll_get_ownership(req))
+ __io_poll_execute(req, res);
}
-static int io_poll_double_wake(struct wait_queue_entry *wait, unsigned mode,
- int sync, void *key)
+static void io_poll_cancel_req(struct io_kiocb *req)
+{
+ io_poll_mark_cancelled(req);
+ /* kick tw, which should complete the request */
+ io_poll_execute(req, 0);
+}
+
+static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
+ void *key)
{
struct io_kiocb *req = wait->private;
- struct io_poll_iocb *poll = io_poll_get_single(req);
+ struct io_poll_iocb *poll = container_of(wait, struct io_poll_iocb,
+ wait);
__poll_t mask = key_to_poll(key);
- unsigned long flags;
- /* for instances that support it check for an event match first: */
- if (mask && !(mask & poll->events))
- return 0;
- if (!(poll->events & EPOLLONESHOT))
- return poll->wait.func(&poll->wait, mode, sync, key);
+ if (unlikely(mask & POLLFREE)) {
+ io_poll_mark_cancelled(req);
+ /* we have to kick tw in case it's not already */
+ io_poll_execute(req, 0);
- list_del_init(&wait->entry);
+ /*
+ * If the waitqueue is being freed early but someone is already
+ * holds ownership over it, we have to tear down the request as
+ * best we can. That means immediately removing the request from
+ * its waitqueue and preventing all further accesses to the
+ * waitqueue via the request.
+ */
+ list_del_init(&poll->wait.entry);
+
+ /*
+ * Careful: this *must* be the last step, since as soon
+ * as req->head is NULL'ed out, the request can be
+ * completed and freed, since aio_poll_complete_work()
+ * will no longer need to take the waitqueue lock.
+ */
+ smp_store_release(&poll->head, NULL);
+ return 1;
+ }
- if (poll->head) {
- bool done;
+ /* for instances that support it check for an event match first */
+ if (mask && !(mask & poll->events))
+ return 0;
- spin_lock_irqsave(&poll->head->lock, flags);
- done = list_empty(&poll->wait.entry);
- if (!done)
+ if (io_poll_get_ownership(req)) {
+ /* optional, saves extra locking for removal in tw handler */
+ if (mask && poll->events & EPOLLONESHOT) {
list_del_init(&poll->wait.entry);
- /* make sure double remove sees this as being gone */
- wait->private = NULL;
- spin_unlock_irqrestore(&poll->head->lock, flags);
- if (!done) {
- /* use wait func handler, so it matches the rq type */
- poll->wait.func(&poll->wait, mode, sync, key);
+ poll->head = NULL;
}
+ __io_poll_execute(req, mask);
}
- req_ref_put(req);
return 1;
}
-static void io_init_poll_iocb(struct io_poll_iocb *poll, __poll_t events,
- wait_queue_func_t wake_func)
-{
- poll->head = NULL;
- poll->done = false;
- poll->canceled = false;
-#define IO_POLL_UNMASK (EPOLLERR|EPOLLHUP|EPOLLNVAL|EPOLLRDHUP)
- /* mask in events that we always want/need */
- poll->events = events | IO_POLL_UNMASK;
- INIT_LIST_HEAD(&poll->wait.entry);
- init_waitqueue_func_entry(&poll->wait, wake_func);
-}
-
static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
struct wait_queue_head *head,
struct io_poll_iocb **poll_ptr)
@@ -5505,10 +5686,10 @@ static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
* if this happens.
*/
if (unlikely(pt->nr_entries)) {
- struct io_poll_iocb *poll_one = poll;
+ struct io_poll_iocb *first = poll;
/* double add on the same waitqueue head, ignore */
- if (poll_one->head == head)
+ if (first->head == head)
return;
/* already have a 2nd entry, fail a third attempt */
if (*poll_ptr) {
@@ -5517,21 +5698,13 @@ static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
pt->error = -EINVAL;
return;
}
- /*
- * Can't handle multishot for double wait for now, turn it
- * into one-shot mode.
- */
- if (!(poll_one->events & EPOLLONESHOT))
- poll_one->events |= EPOLLONESHOT;
+
poll = kmalloc(sizeof(*poll), GFP_ATOMIC);
if (!poll) {
pt->error = -ENOMEM;
return;
}
- io_init_poll_iocb(poll, poll_one->events, io_poll_double_wake);
- req_ref_get(req);
- poll->wait.private = req;
-
+ io_init_poll_iocb(poll, first->events, first->wait.func);
*poll_ptr = poll;
if (req->opcode == IORING_OP_POLL_ADD)
req->flags |= REQ_F_ASYNC_DATA;
@@ -5539,6 +5712,7 @@ static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
pt->nr_entries++;
poll->head = head;
+ poll->wait.private = req;
if (poll->events & EPOLLEXCLUSIVE)
add_wait_queue_exclusive(head, &poll->wait);
@@ -5546,70 +5720,24 @@ static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
add_wait_queue(head, &poll->wait);
}
-static void io_async_queue_proc(struct file *file, struct wait_queue_head *head,
+static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
struct poll_table_struct *p)
{
struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
- struct async_poll *apoll = pt->req->apoll;
-
- __io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll);
-}
-
-static void io_async_task_func(struct io_kiocb *req, bool *locked)
-{
- struct async_poll *apoll = req->apoll;
- struct io_ring_ctx *ctx = req->ctx;
-
- trace_io_uring_task_run(req->ctx, req, req->opcode, req->user_data);
-
- if (io_poll_rewait(req, &apoll->poll)) {
- spin_unlock(&ctx->completion_lock);
- return;
- }
-
- hash_del(&req->hash_node);
- io_poll_remove_double(req);
- apoll->poll.done = true;
- spin_unlock(&ctx->completion_lock);
-
- if (!READ_ONCE(apoll->poll.canceled))
- io_req_task_submit(req, locked);
- else
- io_req_complete_failed(req, -ECANCELED);
-}
-
-static int io_async_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
- void *key)
-{
- struct io_kiocb *req = wait->private;
- struct io_poll_iocb *poll = &req->apoll->poll;
-
- trace_io_uring_poll_wake(req->ctx, req->opcode, req->user_data,
- key_to_poll(key));
-
- return __io_async_wake(req, poll, key_to_poll(key), io_async_task_func);
-}
-
-static void io_poll_req_insert(struct io_kiocb *req)
-{
- struct io_ring_ctx *ctx = req->ctx;
- struct hlist_head *list;
- list = &ctx->cancel_hash[hash_long(req->user_data, ctx->cancel_hash_bits)];
- hlist_add_head(&req->hash_node, list);
+ __io_queue_proc(&pt->req->poll, pt, head,
+ (struct io_poll_iocb **) &pt->req->async_data);
}
-static __poll_t __io_arm_poll_handler(struct io_kiocb *req,
- struct io_poll_iocb *poll,
- struct io_poll_table *ipt, __poll_t mask,
- wait_queue_func_t wake_func)
- __acquires(&ctx->completion_lock)
+static int __io_arm_poll_handler(struct io_kiocb *req,
+ struct io_poll_iocb *poll,
+ struct io_poll_table *ipt, __poll_t mask)
{
struct io_ring_ctx *ctx = req->ctx;
- bool cancel = false;
+ int v;
INIT_HLIST_NODE(&req->hash_node);
- io_init_poll_iocb(poll, mask, wake_func);
+ io_init_poll_iocb(poll, mask, io_poll_wake);
poll->file = req->file;
poll->wait.private = req;
@@ -5618,31 +5746,54 @@ static __poll_t __io_arm_poll_handler(struct io_kiocb *req,
ipt->error = 0;
ipt->nr_entries = 0;
+ /*
+ * Take the ownership to delay any tw execution up until we're done
+ * with poll arming. see io_poll_get_ownership().
+ */
+ atomic_set(&req->poll_refs, 1);
mask = vfs_poll(req->file, &ipt->pt) & poll->events;
- if (unlikely(!ipt->nr_entries) && !ipt->error)
- ipt->error = -EINVAL;
+
+ if (mask && (poll->events & EPOLLONESHOT)) {
+ io_poll_remove_entries(req);
+ /* no one else has access to the req, forget about the ref */
+ return mask;
+ }
+ if (!mask && unlikely(ipt->error || !ipt->nr_entries)) {
+ io_poll_remove_entries(req);
+ if (!ipt->error)
+ ipt->error = -EINVAL;
+ return 0;
+ }
spin_lock(&ctx->completion_lock);
- if (ipt->error || (mask && (poll->events & EPOLLONESHOT)))
- io_poll_remove_double(req);
- if (likely(poll->head)) {
- spin_lock_irq(&poll->head->lock);
- if (unlikely(list_empty(&poll->wait.entry))) {
- if (ipt->error)
- cancel = true;
- ipt->error = 0;
- mask = 0;
- }
- if ((mask && (poll->events & EPOLLONESHOT)) || ipt->error)
- list_del_init(&poll->wait.entry);
- else if (cancel)
- WRITE_ONCE(poll->canceled, true);
- else if (!poll->done) /* actually waiting for an event */
- io_poll_req_insert(req);
- spin_unlock_irq(&poll->head->lock);
+ io_poll_req_insert(req);
+ spin_unlock(&ctx->completion_lock);
+
+ if (mask) {
+ /* can't multishot if failed, just queue the event we've got */
+ if (unlikely(ipt->error || !ipt->nr_entries))
+ poll->events |= EPOLLONESHOT;
+ __io_poll_execute(req, mask);
+ return 0;
}
- return mask;
+ /*
+ * Release ownership. If someone tried to queue a tw while it was
+ * locked, kick it off for them.
+ */
+ v = atomic_dec_return(&req->poll_refs);
+ if (unlikely(v & IO_POLL_REF_MASK))
+ __io_poll_execute(req, 0);
+ return 0;
+}
+
+static void io_async_queue_proc(struct file *file, struct wait_queue_head *head,
+ struct poll_table_struct *p)
+{
+ struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
+ struct async_poll *apoll = pt->req->apoll;
+
+ __io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll);
}
enum {
@@ -5657,7 +5808,8 @@ static int io_arm_poll_handler(struct io_kiocb *req)
struct io_ring_ctx *ctx = req->ctx;
struct async_poll *apoll;
struct io_poll_table ipt;
- __poll_t ret, mask = EPOLLONESHOT | POLLERR | POLLPRI;
+ __poll_t mask = EPOLLONESHOT | POLLERR | POLLPRI;
+ int ret;
if (!def->pollin && !def->pollout)
return IO_APOLL_ABORTED;
@@ -5682,11 +5834,8 @@ static int io_arm_poll_handler(struct io_kiocb *req)
req->apoll = apoll;
req->flags |= REQ_F_POLLED;
ipt.pt._qproc = io_async_queue_proc;
- io_req_set_refcount(req);
- ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask,
- io_async_wake);
- spin_unlock(&ctx->completion_lock);
+ ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask);
if (ret || ipt.error)
return ret ? IO_APOLL_READY : IO_APOLL_ABORTED;
@@ -5695,43 +5844,6 @@ static int io_arm_poll_handler(struct io_kiocb *req)
return IO_APOLL_OK;
}
-static bool __io_poll_remove_one(struct io_kiocb *req,
- struct io_poll_iocb *poll, bool do_cancel)
- __must_hold(&req->ctx->completion_lock)
-{
- bool do_complete = false;
-
- if (!poll->head)
- return false;
- spin_lock_irq(&poll->head->lock);
- if (do_cancel)
- WRITE_ONCE(poll->canceled, true);
- if (!list_empty(&poll->wait.entry)) {
- list_del_init(&poll->wait.entry);
- do_complete = true;
- }
- spin_unlock_irq(&poll->head->lock);
- hash_del(&req->hash_node);
- return do_complete;
-}
-
-static bool io_poll_remove_one(struct io_kiocb *req)
- __must_hold(&req->ctx->completion_lock)
-{
- bool do_complete;
-
- io_poll_remove_double(req);
- do_complete = __io_poll_remove_one(req, io_poll_get_single(req), true);
-
- if (do_complete) {
- io_cqring_fill_event(req->ctx, req->user_data, -ECANCELED, 0);
- io_commit_cqring(req->ctx);
- req_set_fail(req);
- io_put_req_deferred(req);
- }
- return do_complete;
-}
-
/*
* Returns true if we found and killed one or more poll requests
*/
@@ -5740,7 +5852,8 @@ static __cold bool io_poll_remove_all(struct io_ring_ctx *ctx,
{
struct hlist_node *tmp;
struct io_kiocb *req;
- int posted = 0, i;
+ bool found = false;
+ int i;
spin_lock(&ctx->completion_lock);
for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
@@ -5748,16 +5861,14 @@ static __cold bool io_poll_remove_all(struct io_ring_ctx *ctx,
list = &ctx->cancel_hash[i];
hlist_for_each_entry_safe(req, tmp, list, hash_node) {
- if (io_match_task_safe(req, tsk, cancel_all))
- posted += io_poll_remove_one(req);
+ if (io_match_task_safe(req, tsk, cancel_all)) {
+ io_poll_cancel_req(req);
+ found = true;
+ }
}
}
spin_unlock(&ctx->completion_lock);
-
- if (posted)
- io_cqring_ev_posted(ctx);
-
- return posted != 0;
+ return found;
}
static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, __u64 sqe_addr,
@@ -5778,19 +5889,26 @@ static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, __u64 sqe_addr,
return NULL;
}
+static bool io_poll_disarm(struct io_kiocb *req)
+ __must_hold(&ctx->completion_lock)
+{
+ if (!io_poll_get_ownership(req))
+ return false;
+ io_poll_remove_entries(req);
+ hash_del(&req->hash_node);
+ return true;
+}
+
static int io_poll_cancel(struct io_ring_ctx *ctx, __u64 sqe_addr,
bool poll_only)
__must_hold(&ctx->completion_lock)
{
- struct io_kiocb *req;
+ struct io_kiocb *req = io_poll_find(ctx, sqe_addr, poll_only);
- req = io_poll_find(ctx, sqe_addr, poll_only);
if (!req)
return -ENOENT;
- if (io_poll_remove_one(req))
- return 0;
-
- return -EALREADY;
+ io_poll_cancel_req(req);
+ return 0;
}
static __poll_t io_poll_parse_events(const struct io_uring_sqe *sqe,
@@ -5840,23 +5958,6 @@ static int io_poll_update_prep(struct io_kiocb *req,
return 0;
}
-static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
- void *key)
-{
- struct io_kiocb *req = wait->private;
- struct io_poll_iocb *poll = &req->poll;
-
- return __io_async_wake(req, poll, key_to_poll(key), io_poll_task_func);
-}
-
-static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
- struct poll_table_struct *p)
-{
- struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
-
- __io_queue_proc(&pt->req->poll, pt, head, (struct io_poll_iocb **) &pt->req->async_data);
-}
-
static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
struct io_poll_iocb *poll = &req->poll;
@@ -5869,6 +5970,8 @@ static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe
flags = READ_ONCE(sqe->len);
if (flags & ~IORING_POLL_ADD_MULTI)
return -EINVAL;
+ if ((flags & IORING_POLL_ADD_MULTI) && (req->flags & REQ_F_CQE_SKIP))
+ return -EINVAL;
io_req_set_refcount(req);
poll->events = io_poll_parse_events(sqe, flags);
@@ -5878,100 +5981,60 @@ static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe
static int io_poll_add(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_poll_iocb *poll = &req->poll;
- struct io_ring_ctx *ctx = req->ctx;
struct io_poll_table ipt;
- __poll_t mask;
- bool done;
+ int ret;
ipt.pt._qproc = io_poll_queue_proc;
- mask = __io_arm_poll_handler(req, &req->poll, &ipt, poll->events,
- io_poll_wake);
-
- if (mask) { /* no async, we'd stolen it */
- ipt.error = 0;
- done = __io_poll_complete(req, mask);
- io_commit_cqring(req->ctx);
- }
- spin_unlock(&ctx->completion_lock);
-
- if (mask) {
- io_cqring_ev_posted(ctx);
- if (done)
- io_put_req(req);
- }
- return ipt.error;
+ ret = __io_arm_poll_handler(req, &req->poll, &ipt, poll->events);
+ ret = ret ?: ipt.error;
+ if (ret)
+ __io_req_complete(req, issue_flags, ret, 0);
+ return 0;
}
static int io_poll_update(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_ring_ctx *ctx = req->ctx;
struct io_kiocb *preq;
- bool completing;
- int ret;
+ int ret2, ret = 0;
+ bool locked;
spin_lock(&ctx->completion_lock);
preq = io_poll_find(ctx, req->poll_update.old_user_data, true);
- if (!preq) {
- ret = -ENOENT;
- goto err;
- }
-
- if (!req->poll_update.update_events && !req->poll_update.update_user_data) {
- completing = true;
- ret = io_poll_remove_one(preq) ? 0 : -EALREADY;
- goto err;
- }
-
- /*
- * Don't allow racy completion with singleshot, as we cannot safely
- * update those. For multishot, if we're racing with completion, just
- * let completion re-add it.
- */
- completing = !__io_poll_remove_one(preq, &preq->poll, false);
- if (completing && (preq->poll.events & EPOLLONESHOT)) {
- ret = -EALREADY;
- goto err;
- }
- /* we now have a detached poll request. reissue. */
- ret = 0;
-err:
- if (ret < 0) {
+ if (!preq || !io_poll_disarm(preq)) {
spin_unlock(&ctx->completion_lock);
- req_set_fail(req);
- io_req_complete(req, ret);
- return 0;
- }
- /* only mask one event flags, keep behavior flags */
- if (req->poll_update.update_events) {
- preq->poll.events &= ~0xffff;
- preq->poll.events |= req->poll_update.events & 0xffff;
- preq->poll.events |= IO_POLL_UNMASK;
+ ret = preq ? -EALREADY : -ENOENT;
+ goto out;
}
- if (req->poll_update.update_user_data)
- preq->user_data = req->poll_update.new_user_data;
spin_unlock(&ctx->completion_lock);
- /* complete update request, we're done with it */
- io_req_complete(req, ret);
-
- if (!completing) {
- ret = io_poll_add(preq, issue_flags);
- if (ret < 0) {
- req_set_fail(preq);
- io_req_complete(preq, ret);
+ if (req->poll_update.update_events || req->poll_update.update_user_data) {
+ /* only mask one event flags, keep behavior flags */
+ if (req->poll_update.update_events) {
+ preq->poll.events &= ~0xffff;
+ preq->poll.events |= req->poll_update.events & 0xffff;
+ preq->poll.events |= IO_POLL_UNMASK;
}
- }
- return 0;
-}
+ if (req->poll_update.update_user_data)
+ preq->user_data = req->poll_update.new_user_data;
-static void io_req_task_timeout(struct io_kiocb *req, bool *locked)
-{
- struct io_timeout_data *data = req->async_data;
+ ret2 = io_poll_add(preq, issue_flags);
+ /* successfully updated, don't complete poll request */
+ if (!ret2)
+ goto out;
+ }
- if (!(data->flags & IORING_TIMEOUT_ETIME_SUCCESS))
+ req_set_fail(preq);
+ preq->result = -ECANCELED;
+ locked = !(issue_flags & IO_URING_F_UNLOCKED);
+ io_req_task_complete(preq, &locked);
+out:
+ if (ret < 0)
req_set_fail(req);
- io_req_complete_post(req, -ETIME, 0);
+ /* complete update request, we're done with it */
+ __io_req_complete(req, issue_flags, ret, 0);
+ return 0;
}
static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
@@ -5988,8 +6051,12 @@ static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
atomic_read(&req->ctx->cq_timeouts) + 1);
spin_unlock_irqrestore(&ctx->timeout_lock, flags);
- req->io_task_work.func = io_req_task_timeout;
- io_req_task_work_add(req);
+ if (!(data->flags & IORING_TIMEOUT_ETIME_SUCCESS))
+ req_set_fail(req);
+
+ req->result = -ETIME;
+ req->io_task_work.func = io_req_task_complete;
+ io_req_task_work_add(req, false);
return HRTIMER_NORESTART;
}
@@ -6026,7 +6093,7 @@ static int io_timeout_cancel(struct io_ring_ctx *ctx, __u64 user_data)
return PTR_ERR(req);
req_set_fail(req);
- io_cqring_fill_event(ctx, req->user_data, -ECANCELED, 0);
+ io_fill_cqe_req(req, -ECANCELED, 0);
io_put_req_deferred(req);
return 0;
}
@@ -6115,6 +6182,8 @@ static int io_timeout_remove_prep(struct io_kiocb *req,
return -EINVAL;
if (get_timespec64(&tr->ts, u64_to_user_ptr(sqe->addr2)))
return -EFAULT;
+ if (tr->ts.tv_sec < 0 || tr->ts.tv_nsec < 0)
+ return -EINVAL;
} else if (tr->flags) {
/* timeout removal doesn't support flags */
return -EINVAL;
@@ -6316,16 +6385,21 @@ static int io_try_cancel_userdata(struct io_kiocb *req, u64 sqe_addr)
WARN_ON_ONCE(!io_wq_current_is_worker() && req->task != current);
ret = io_async_cancel_one(req->task->io_uring, sqe_addr, ctx);
- if (ret != -ENOENT)
- return ret;
+ /*
+ * Fall-through even for -EALREADY, as we may have poll armed
+ * that need unarming.
+ */
+ if (!ret)
+ return 0;
spin_lock(&ctx->completion_lock);
+ ret = io_poll_cancel(ctx, sqe_addr, false);
+ if (ret != -ENOENT)
+ goto out;
+
spin_lock_irq(&ctx->timeout_lock);
ret = io_timeout_cancel(ctx, sqe_addr);
spin_unlock_irq(&ctx->timeout_lock);
- if (ret != -ENOENT)
- goto out;
- ret = io_poll_cancel(ctx, sqe_addr, false);
out:
spin_unlock(&ctx->completion_lock);
return ret;
@@ -6544,12 +6618,15 @@ static __cold void io_drain_req(struct io_kiocb *req)
u32 seq = io_get_sequence(req);
/* Still need defer if there is pending req in defer list. */
+ spin_lock(&ctx->completion_lock);
if (!req_need_defer(req, seq) && list_empty_careful(&ctx->defer_list)) {
+ spin_unlock(&ctx->completion_lock);
queue:
ctx->drain_active = false;
io_req_task_queue(req);
return;
}
+ spin_unlock(&ctx->completion_lock);
ret = io_req_prep_async(req);
if (ret) {
@@ -6580,10 +6657,8 @@ fail:
static void io_clean_op(struct io_kiocb *req)
{
- if (req->flags & REQ_F_BUFFER_SELECTED) {
- kfree(req->kbuf);
- req->kbuf = NULL;
- }
+ if (req->flags & REQ_F_BUFFER_SELECTED)
+ io_put_kbuf(req);
if (req->flags & REQ_F_NEED_CLEANUP) {
switch (req->opcode) {
@@ -6965,7 +7040,7 @@ static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
spin_unlock_irqrestore(&ctx->timeout_lock, flags);
req->io_task_work.func = io_req_task_link_timeout;
- io_req_task_work_add(req);
+ io_req_task_work_add(req, false);
return HRTIMER_NORESTART;
}
@@ -7100,10 +7175,10 @@ static void io_init_req_drain(struct io_kiocb *req)
* If we need to drain a request in the middle of a link, drain
* the head request and the next request/link after the current
* link. Considering sequential execution of links,
- * IOSQE_IO_DRAIN will be maintained for every request of our
+ * REQ_F_IO_DRAIN will be maintained for every request of our
* link.
*/
- head->flags |= IOSQE_IO_DRAIN | REQ_F_FORCE_ASYNC;
+ head->flags |= REQ_F_IO_DRAIN | REQ_F_FORCE_ASYNC;
ctx->drain_next = true;
}
}
@@ -7136,8 +7211,13 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
if ((sqe_flags & IOSQE_BUFFER_SELECT) &&
!io_op_defs[opcode].buffer_select)
return -EOPNOTSUPP;
- if (sqe_flags & IOSQE_IO_DRAIN)
+ if (sqe_flags & IOSQE_CQE_SKIP_SUCCESS)
+ ctx->drain_disabled = true;
+ if (sqe_flags & IOSQE_IO_DRAIN) {
+ if (ctx->drain_disabled)
+ return -EOPNOTSUPP;
io_init_req_drain(req);
+ }
}
if (unlikely(ctx->restricted || ctx->drain_active || ctx->drain_next)) {
if (ctx->restricted && !io_check_restriction(ctx, req, sqe_flags))
@@ -7149,7 +7229,7 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
if (unlikely(ctx->drain_next) && !ctx->submit_state.link.head) {
ctx->drain_next = false;
ctx->drain_active = true;
- req->flags |= IOSQE_IO_DRAIN | REQ_F_FORCE_ASYNC;
+ req->flags |= REQ_F_IO_DRAIN | REQ_F_FORCE_ASYNC;
}
}
@@ -7741,10 +7821,15 @@ static __cold void io_rsrc_node_ref_zero(struct percpu_ref *ref)
struct io_ring_ctx *ctx = node->rsrc_data->ctx;
unsigned long flags;
bool first_add = false;
+ unsigned long delay = HZ;
spin_lock_irqsave(&ctx->rsrc_ref_lock, flags);
node->done = true;
+ /* if we are mid-quiesce then do not delay */
+ if (node->rsrc_data->quiesce)
+ delay = 0;
+
while (!list_empty(&ctx->rsrc_ref_list)) {
node = list_first_entry(&ctx->rsrc_ref_list,
struct io_rsrc_node, node);
@@ -7757,10 +7842,10 @@ static __cold void io_rsrc_node_ref_zero(struct percpu_ref *ref)
spin_unlock_irqrestore(&ctx->rsrc_ref_lock, flags);
if (first_add)
- mod_delayed_work(system_wq, &ctx->rsrc_put_work, HZ);
+ mod_delayed_work(system_wq, &ctx->rsrc_put_work, delay);
}
-static struct io_rsrc_node *io_rsrc_node_alloc(struct io_ring_ctx *ctx)
+static struct io_rsrc_node *io_rsrc_node_alloc(void)
{
struct io_rsrc_node *ref_node;
@@ -7811,7 +7896,7 @@ static int io_rsrc_node_switch_start(struct io_ring_ctx *ctx)
{
if (ctx->rsrc_backup_node)
return 0;
- ctx->rsrc_backup_node = io_rsrc_node_alloc(ctx);
+ ctx->rsrc_backup_node = io_rsrc_node_alloc();
return ctx->rsrc_backup_node ? 0 : -ENOMEM;
}
@@ -8263,8 +8348,7 @@ static void __io_rsrc_put_work(struct io_rsrc_node *ref_node)
io_ring_submit_lock(ctx, lock_ring);
spin_lock(&ctx->completion_lock);
- io_cqring_fill_event(ctx, prsrc->tag, 0, 0);
- ctx->cq_extra++;
+ io_fill_cqe_aux(ctx, prsrc->tag, 0, 0);
io_commit_cqring(ctx);
spin_unlock(&ctx->completion_lock);
io_cqring_ev_posted(ctx);
@@ -8676,6 +8760,7 @@ static __cold int io_uring_alloc_task_context(struct task_struct *task,
task->io_uring = tctx;
spin_lock_init(&tctx->task_lock);
INIT_WQ_LIST(&tctx->task_list);
+ INIT_WQ_LIST(&tctx->prior_task_list);
init_task_work(&tctx->task_work, tctx_task_work);
return 0;
}
@@ -8847,10 +8932,9 @@ static void io_mem_free(void *ptr)
static void *io_mem_alloc(size_t size)
{
- gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP |
- __GFP_NORETRY | __GFP_ACCOUNT;
+ gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP;
- return (void *) __get_free_pages(gfp_flags, get_order(size));
+ return (void *) __get_free_pages(gfp, get_order(size));
}
static unsigned long rings_size(unsigned sq_entries, unsigned cq_entries,
@@ -9814,18 +9898,6 @@ static s64 tctx_inflight(struct io_uring_task *tctx, bool tracked)
return percpu_counter_sum(&tctx->inflight);
}
-static __cold void io_uring_drop_tctx_refs(struct task_struct *task)
-{
- struct io_uring_task *tctx = task->io_uring;
- unsigned int refs = tctx->cached_refs;
-
- if (refs) {
- tctx->cached_refs = 0;
- percpu_counter_sub(&tctx->inflight, refs);
- put_task_struct_many(task, refs);
- }
-}
-
/*
* Find any io_uring ctx that this task has registered or done IO on, and cancel
* requests. @sqd should be not-null IFF it's an SQPOLL thread cancellation.
@@ -9883,10 +9955,14 @@ static __cold void io_uring_cancel_generic(bool cancel_all,
schedule();
finish_wait(&tctx->wait, &wait);
} while (1);
- atomic_dec(&tctx->in_idle);
io_uring_clean_tctx(tctx);
if (cancel_all) {
+ /*
+ * We shouldn't run task_works after cancel, so just leave
+ * ->in_idle set for normal exit.
+ */
+ atomic_dec(&tctx->in_idle);
/* for exec all current's requests should be gone, kill tctx */
__io_uring_free(current);
}
@@ -10164,7 +10240,7 @@ static __cold void __io_uring_show_fdinfo(struct io_ring_ctx *ctx,
* and sq_tail and cq_head are changed by userspace. But it's ok since
* we usually use these info when it is stuck.
*/
- seq_printf(m, "SqMask:\t\t0x%x\n", sq_mask);
+ seq_printf(m, "SqMask:\t0x%x\n", sq_mask);
seq_printf(m, "SqHead:\t%u\n", sq_head);
seq_printf(m, "SqTail:\t%u\n", sq_tail);
seq_printf(m, "CachedSqHead:\t%u\n", ctx->cached_sq_head);
@@ -10473,7 +10549,7 @@ static __cold int io_uring_create(unsigned entries, struct io_uring_params *p,
IORING_FEAT_CUR_PERSONALITY | IORING_FEAT_FAST_POLL |
IORING_FEAT_POLL_32BITS | IORING_FEAT_SQPOLL_NONFIXED |
IORING_FEAT_EXT_ARG | IORING_FEAT_NATIVE_WORKERS |
- IORING_FEAT_RSRC_TAGS;
+ IORING_FEAT_RSRC_TAGS | IORING_FEAT_CQE_SKIP;
if (copy_to_user(params, p, sizeof(*p))) {
ret = -EFAULT;
diff --git a/fs/ioctl.c b/fs/ioctl.c
index 504e69578112..1ed097e94af2 100644
--- a/fs/ioctl.c
+++ b/fs/ioctl.c
@@ -430,7 +430,7 @@ static int ioctl_file_dedupe_range(struct file *file,
goto out;
}
- size = offsetof(struct file_dedupe_range __user, info[count]);
+ size = offsetof(struct file_dedupe_range, info[count]);
if (size > PAGE_SIZE) {
ret = -ENOMEM;
goto out;
diff --git a/fs/iomap/Makefile b/fs/iomap/Makefile
index 4143a3ff89db..fc070184b7fa 100644
--- a/fs/iomap/Makefile
+++ b/fs/iomap/Makefile
@@ -9,9 +9,9 @@ ccflags-y += -I $(srctree)/$(src) # needed for trace events
obj-$(CONFIG_FS_IOMAP) += iomap.o
iomap-y += trace.o \
- buffered-io.o \
+ iter.o
+iomap-$(CONFIG_BLOCK) += buffered-io.o \
direct-io.o \
fiemap.o \
- iter.o \
seek.o
iomap-$(CONFIG_SWAP) += swapfile.o
diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c
index 71a36ae120ee..6c51a75d0be6 100644
--- a/fs/iomap/buffered-io.c
+++ b/fs/iomap/buffered-io.c
@@ -21,9 +21,11 @@
#include "../internal.h"
+#define IOEND_BATCH_SIZE 4096
+
/*
- * Structure allocated for each page or THP when block size < page size
- * to track sub-page uptodate status and I/O completions.
+ * Structure allocated for each folio when block size < folio size
+ * to track sub-folio uptodate status and I/O completions.
*/
struct iomap_page {
atomic_t read_bytes_pending;
@@ -32,27 +34,20 @@ struct iomap_page {
unsigned long uptodate[];
};
-static inline struct iomap_page *to_iomap_page(struct page *page)
+static inline struct iomap_page *to_iomap_page(struct folio *folio)
{
- /*
- * per-block data is stored in the head page. Callers should
- * not be dealing with tail pages, and if they are, they can
- * call thp_head() first.
- */
- VM_BUG_ON_PGFLAGS(PageTail(page), page);
-
- if (page_has_private(page))
- return (struct iomap_page *)page_private(page);
+ if (folio_test_private(folio))
+ return folio_get_private(folio);
return NULL;
}
static struct bio_set iomap_ioend_bioset;
static struct iomap_page *
-iomap_page_create(struct inode *inode, struct page *page)
+iomap_page_create(struct inode *inode, struct folio *folio)
{
- struct iomap_page *iop = to_iomap_page(page);
- unsigned int nr_blocks = i_blocks_per_page(inode, page);
+ struct iomap_page *iop = to_iomap_page(folio);
+ unsigned int nr_blocks = i_blocks_per_folio(inode, folio);
if (iop || nr_blocks <= 1)
return iop;
@@ -60,40 +55,40 @@ iomap_page_create(struct inode *inode, struct page *page)
iop = kzalloc(struct_size(iop, uptodate, BITS_TO_LONGS(nr_blocks)),
GFP_NOFS | __GFP_NOFAIL);
spin_lock_init(&iop->uptodate_lock);
- if (PageUptodate(page))
+ if (folio_test_uptodate(folio))
bitmap_fill(iop->uptodate, nr_blocks);
- attach_page_private(page, iop);
+ folio_attach_private(folio, iop);
return iop;
}
-static void
-iomap_page_release(struct page *page)
+static void iomap_page_release(struct folio *folio)
{
- struct iomap_page *iop = detach_page_private(page);
- unsigned int nr_blocks = i_blocks_per_page(page->mapping->host, page);
+ struct iomap_page *iop = folio_detach_private(folio);
+ struct inode *inode = folio->mapping->host;
+ unsigned int nr_blocks = i_blocks_per_folio(inode, folio);
if (!iop)
return;
WARN_ON_ONCE(atomic_read(&iop->read_bytes_pending));
WARN_ON_ONCE(atomic_read(&iop->write_bytes_pending));
WARN_ON_ONCE(bitmap_full(iop->uptodate, nr_blocks) !=
- PageUptodate(page));
+ folio_test_uptodate(folio));
kfree(iop);
}
/*
- * Calculate the range inside the page that we actually need to read.
+ * Calculate the range inside the folio that we actually need to read.
*/
-static void
-iomap_adjust_read_range(struct inode *inode, struct iomap_page *iop,
- loff_t *pos, loff_t length, unsigned *offp, unsigned *lenp)
+static void iomap_adjust_read_range(struct inode *inode, struct folio *folio,
+ loff_t *pos, loff_t length, size_t *offp, size_t *lenp)
{
+ struct iomap_page *iop = to_iomap_page(folio);
loff_t orig_pos = *pos;
loff_t isize = i_size_read(inode);
unsigned block_bits = inode->i_blkbits;
unsigned block_size = (1 << block_bits);
- unsigned poff = offset_in_page(*pos);
- unsigned plen = min_t(loff_t, PAGE_SIZE - poff, length);
+ size_t poff = offset_in_folio(folio, *pos);
+ size_t plen = min_t(loff_t, folio_size(folio) - poff, length);
unsigned first = poff >> block_bits;
unsigned last = (poff + plen - 1) >> block_bits;
@@ -131,7 +126,7 @@ iomap_adjust_read_range(struct inode *inode, struct iomap_page *iop,
* page cache for blocks that are entirely outside of i_size.
*/
if (orig_pos <= isize && orig_pos + length > isize) {
- unsigned end = offset_in_page(isize - 1) >> block_bits;
+ unsigned end = offset_in_folio(folio, isize - 1) >> block_bits;
if (first <= end && last > end)
plen -= (last - end) * block_size;
@@ -141,66 +136,62 @@ iomap_adjust_read_range(struct inode *inode, struct iomap_page *iop,
*lenp = plen;
}
-static void
-iomap_iop_set_range_uptodate(struct page *page, unsigned off, unsigned len)
+static void iomap_iop_set_range_uptodate(struct folio *folio,
+ struct iomap_page *iop, size_t off, size_t len)
{
- struct iomap_page *iop = to_iomap_page(page);
- struct inode *inode = page->mapping->host;
+ struct inode *inode = folio->mapping->host;
unsigned first = off >> inode->i_blkbits;
unsigned last = (off + len - 1) >> inode->i_blkbits;
unsigned long flags;
spin_lock_irqsave(&iop->uptodate_lock, flags);
bitmap_set(iop->uptodate, first, last - first + 1);
- if (bitmap_full(iop->uptodate, i_blocks_per_page(inode, page)))
- SetPageUptodate(page);
+ if (bitmap_full(iop->uptodate, i_blocks_per_folio(inode, folio)))
+ folio_mark_uptodate(folio);
spin_unlock_irqrestore(&iop->uptodate_lock, flags);
}
-static void
-iomap_set_range_uptodate(struct page *page, unsigned off, unsigned len)
+static void iomap_set_range_uptodate(struct folio *folio,
+ struct iomap_page *iop, size_t off, size_t len)
{
- if (PageError(page))
+ if (folio_test_error(folio))
return;
- if (page_has_private(page))
- iomap_iop_set_range_uptodate(page, off, len);
+ if (iop)
+ iomap_iop_set_range_uptodate(folio, iop, off, len);
else
- SetPageUptodate(page);
+ folio_mark_uptodate(folio);
}
-static void
-iomap_read_page_end_io(struct bio_vec *bvec, int error)
+static void iomap_finish_folio_read(struct folio *folio, size_t offset,
+ size_t len, int error)
{
- struct page *page = bvec->bv_page;
- struct iomap_page *iop = to_iomap_page(page);
+ struct iomap_page *iop = to_iomap_page(folio);
if (unlikely(error)) {
- ClearPageUptodate(page);
- SetPageError(page);
+ folio_clear_uptodate(folio);
+ folio_set_error(folio);
} else {
- iomap_set_range_uptodate(page, bvec->bv_offset, bvec->bv_len);
+ iomap_set_range_uptodate(folio, iop, offset, len);
}
- if (!iop || atomic_sub_and_test(bvec->bv_len, &iop->read_bytes_pending))
- unlock_page(page);
+ if (!iop || atomic_sub_and_test(len, &iop->read_bytes_pending))
+ folio_unlock(folio);
}
-static void
-iomap_read_end_io(struct bio *bio)
+static void iomap_read_end_io(struct bio *bio)
{
int error = blk_status_to_errno(bio->bi_status);
- struct bio_vec *bvec;
- struct bvec_iter_all iter_all;
+ struct folio_iter fi;
- bio_for_each_segment_all(bvec, bio, iter_all)
- iomap_read_page_end_io(bvec, error);
+ bio_for_each_folio_all(fi, bio)
+ iomap_finish_folio_read(fi.folio, fi.offset, fi.length, error);
bio_put(bio);
}
struct iomap_readpage_ctx {
- struct page *cur_page;
- bool cur_page_in_bio;
+ struct folio *cur_folio;
+ bool cur_folio_in_bio;
struct bio *bio;
struct readahead_control *rac;
};
@@ -208,21 +199,23 @@ struct iomap_readpage_ctx {
/**
* iomap_read_inline_data - copy inline data into the page cache
* @iter: iteration structure
- * @page: page to copy to
+ * @folio: folio to copy to
*
- * Copy the inline data in @iter into @page and zero out the rest of the page.
+ * Copy the inline data in @iter into @folio and zero out the rest of the folio.
* Only a single IOMAP_INLINE extent is allowed at the end of each file.
* Returns zero for success to complete the read, or the usual negative errno.
*/
static int iomap_read_inline_data(const struct iomap_iter *iter,
- struct page *page)
+ struct folio *folio)
{
+ struct iomap_page *iop;
const struct iomap *iomap = iomap_iter_srcmap(iter);
size_t size = i_size_read(iter->inode) - iomap->offset;
size_t poff = offset_in_page(iomap->offset);
+ size_t offset = offset_in_folio(folio, iomap->offset);
void *addr;
- if (PageUptodate(page))
+ if (folio_test_uptodate(folio))
return 0;
if (WARN_ON_ONCE(size > PAGE_SIZE - poff))
@@ -232,14 +225,16 @@ static int iomap_read_inline_data(const struct iomap_iter *iter,
return -EIO;
if (WARN_ON_ONCE(size > iomap->length))
return -EIO;
- if (poff > 0)
- iomap_page_create(iter->inode, page);
+ if (offset > 0)
+ iop = iomap_page_create(iter->inode, folio);
+ else
+ iop = to_iomap_page(folio);
- addr = kmap_local_page(page) + poff;
+ addr = kmap_local_folio(folio, offset);
memcpy(addr, iomap->inline_data, size);
memset(addr + size, 0, PAGE_SIZE - poff - size);
kunmap_local(addr);
- iomap_set_range_uptodate(page, poff, PAGE_SIZE - poff);
+ iomap_set_range_uptodate(folio, iop, offset, PAGE_SIZE - poff);
return 0;
}
@@ -259,36 +254,36 @@ static loff_t iomap_readpage_iter(const struct iomap_iter *iter,
const struct iomap *iomap = &iter->iomap;
loff_t pos = iter->pos + offset;
loff_t length = iomap_length(iter) - offset;
- struct page *page = ctx->cur_page;
+ struct folio *folio = ctx->cur_folio;
struct iomap_page *iop;
loff_t orig_pos = pos;
- unsigned poff, plen;
+ size_t poff, plen;
sector_t sector;
if (iomap->type == IOMAP_INLINE)
- return iomap_read_inline_data(iter, page);
+ return iomap_read_inline_data(iter, folio);
/* zero post-eof blocks as the page may be mapped */
- iop = iomap_page_create(iter->inode, page);
- iomap_adjust_read_range(iter->inode, iop, &pos, length, &poff, &plen);
+ iop = iomap_page_create(iter->inode, folio);
+ iomap_adjust_read_range(iter->inode, folio, &pos, length, &poff, &plen);
if (plen == 0)
goto done;
if (iomap_block_needs_zeroing(iter, pos)) {
- zero_user(page, poff, plen);
- iomap_set_range_uptodate(page, poff, plen);
+ folio_zero_range(folio, poff, plen);
+ iomap_set_range_uptodate(folio, iop, poff, plen);
goto done;
}
- ctx->cur_page_in_bio = true;
+ ctx->cur_folio_in_bio = true;
if (iop)
atomic_add(plen, &iop->read_bytes_pending);
sector = iomap_sector(iomap, pos);
if (!ctx->bio ||
bio_end_sector(ctx->bio) != sector ||
- bio_add_page(ctx->bio, page, plen, poff) != plen) {
- gfp_t gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL);
+ !bio_add_folio(ctx->bio, folio, plen, poff)) {
+ gfp_t gfp = mapping_gfp_constraint(folio->mapping, GFP_KERNEL);
gfp_t orig_gfp = gfp;
unsigned int nr_vecs = DIV_ROUND_UP(length, PAGE_SIZE);
@@ -311,8 +306,9 @@ static loff_t iomap_readpage_iter(const struct iomap_iter *iter,
ctx->bio->bi_iter.bi_sector = sector;
bio_set_dev(ctx->bio, iomap->bdev);
ctx->bio->bi_end_io = iomap_read_end_io;
- __bio_add_page(ctx->bio, page, plen, poff);
+ bio_add_folio(ctx->bio, folio, plen, poff);
}
+
done:
/*
* Move the caller beyond our range so that it keeps making progress.
@@ -326,30 +322,31 @@ done:
int
iomap_readpage(struct page *page, const struct iomap_ops *ops)
{
+ struct folio *folio = page_folio(page);
struct iomap_iter iter = {
- .inode = page->mapping->host,
- .pos = page_offset(page),
- .len = PAGE_SIZE,
+ .inode = folio->mapping->host,
+ .pos = folio_pos(folio),
+ .len = folio_size(folio),
};
struct iomap_readpage_ctx ctx = {
- .cur_page = page,
+ .cur_folio = folio,
};
int ret;
- trace_iomap_readpage(page->mapping->host, 1);
+ trace_iomap_readpage(iter.inode, 1);
while ((ret = iomap_iter(&iter, ops)) > 0)
iter.processed = iomap_readpage_iter(&iter, &ctx, 0);
if (ret < 0)
- SetPageError(page);
+ folio_set_error(folio);
if (ctx.bio) {
submit_bio(ctx.bio);
- WARN_ON_ONCE(!ctx.cur_page_in_bio);
+ WARN_ON_ONCE(!ctx.cur_folio_in_bio);
} else {
- WARN_ON_ONCE(ctx.cur_page_in_bio);
- unlock_page(page);
+ WARN_ON_ONCE(ctx.cur_folio_in_bio);
+ folio_unlock(folio);
}
/*
@@ -368,15 +365,15 @@ static loff_t iomap_readahead_iter(const struct iomap_iter *iter,
loff_t done, ret;
for (done = 0; done < length; done += ret) {
- if (ctx->cur_page && offset_in_page(iter->pos + done) == 0) {
- if (!ctx->cur_page_in_bio)
- unlock_page(ctx->cur_page);
- put_page(ctx->cur_page);
- ctx->cur_page = NULL;
+ if (ctx->cur_folio &&
+ offset_in_folio(ctx->cur_folio, iter->pos + done) == 0) {
+ if (!ctx->cur_folio_in_bio)
+ folio_unlock(ctx->cur_folio);
+ ctx->cur_folio = NULL;
}
- if (!ctx->cur_page) {
- ctx->cur_page = readahead_page(ctx->rac);
- ctx->cur_page_in_bio = false;
+ if (!ctx->cur_folio) {
+ ctx->cur_folio = readahead_folio(ctx->rac);
+ ctx->cur_folio_in_bio = false;
}
ret = iomap_readpage_iter(iter, ctx, done);
if (ret <= 0)
@@ -419,10 +416,9 @@ void iomap_readahead(struct readahead_control *rac, const struct iomap_ops *ops)
if (ctx.bio)
submit_bio(ctx.bio);
- if (ctx.cur_page) {
- if (!ctx.cur_page_in_bio)
- unlock_page(ctx.cur_page);
- put_page(ctx.cur_page);
+ if (ctx.cur_folio) {
+ if (!ctx.cur_folio_in_bio)
+ folio_unlock(ctx.cur_folio);
}
}
EXPORT_SYMBOL_GPL(iomap_readahead);
@@ -438,7 +434,8 @@ int
iomap_is_partially_uptodate(struct page *page, unsigned long from,
unsigned long count)
{
- struct iomap_page *iop = to_iomap_page(page);
+ struct folio *folio = page_folio(page);
+ struct iomap_page *iop = to_iomap_page(folio);
struct inode *inode = page->mapping->host;
unsigned len, first, last;
unsigned i;
@@ -464,36 +461,49 @@ EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate);
int
iomap_releasepage(struct page *page, gfp_t gfp_mask)
{
- trace_iomap_releasepage(page->mapping->host, page_offset(page),
- PAGE_SIZE);
+ struct folio *folio = page_folio(page);
+
+ trace_iomap_releasepage(folio->mapping->host, folio_pos(folio),
+ folio_size(folio));
/*
* mm accommodates an old ext3 case where clean pages might not have had
* the dirty bit cleared. Thus, it can send actual dirty pages to
* ->releasepage() via shrink_active_list(); skip those here.
*/
- if (PageDirty(page) || PageWriteback(page))
+ if (folio_test_dirty(folio) || folio_test_writeback(folio))
return 0;
- iomap_page_release(page);
+ iomap_page_release(folio);
return 1;
}
EXPORT_SYMBOL_GPL(iomap_releasepage);
-void
-iomap_invalidatepage(struct page *page, unsigned int offset, unsigned int len)
+void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len)
{
- trace_iomap_invalidatepage(page->mapping->host, offset, len);
+ trace_iomap_invalidatepage(folio->mapping->host, offset, len);
/*
- * If we're invalidating the entire page, clear the dirty state from it
- * and release it to avoid unnecessary buildup of the LRU.
+ * If we're invalidating the entire folio, clear the dirty state
+ * from it and release it to avoid unnecessary buildup of the LRU.
*/
- if (offset == 0 && len == PAGE_SIZE) {
- WARN_ON_ONCE(PageWriteback(page));
- cancel_dirty_page(page);
- iomap_page_release(page);
+ if (offset == 0 && len == folio_size(folio)) {
+ WARN_ON_ONCE(folio_test_writeback(folio));
+ folio_cancel_dirty(folio);
+ iomap_page_release(folio);
+ } else if (folio_test_large(folio)) {
+ /* Must release the iop so the page can be split */
+ WARN_ON_ONCE(!folio_test_uptodate(folio) &&
+ folio_test_dirty(folio));
+ iomap_page_release(folio);
}
}
+EXPORT_SYMBOL_GPL(iomap_invalidate_folio);
+
+void iomap_invalidatepage(struct page *page, unsigned int offset,
+ unsigned int len)
+{
+ iomap_invalidate_folio(page_folio(page), offset, len);
+}
EXPORT_SYMBOL_GPL(iomap_invalidatepage);
#ifdef CONFIG_MIGRATION
@@ -501,19 +511,21 @@ int
iomap_migrate_page(struct address_space *mapping, struct page *newpage,
struct page *page, enum migrate_mode mode)
{
+ struct folio *folio = page_folio(page);
+ struct folio *newfolio = page_folio(newpage);
int ret;
- ret = migrate_page_move_mapping(mapping, newpage, page, 0);
+ ret = folio_migrate_mapping(mapping, newfolio, folio, 0);
if (ret != MIGRATEPAGE_SUCCESS)
return ret;
- if (page_has_private(page))
- attach_page_private(newpage, detach_page_private(page));
+ if (folio_test_private(folio))
+ folio_attach_private(newfolio, folio_detach_private(folio));
if (mode != MIGRATE_SYNC_NO_COPY)
- migrate_page_copy(newpage, page);
+ folio_migrate_copy(newfolio, folio);
else
- migrate_page_states(newpage, page);
+ folio_migrate_flags(newfolio, folio);
return MIGRATEPAGE_SUCCESS;
}
EXPORT_SYMBOL_GPL(iomap_migrate_page);
@@ -532,9 +544,8 @@ iomap_write_failed(struct inode *inode, loff_t pos, unsigned len)
truncate_pagecache_range(inode, max(pos, i_size), pos + len);
}
-static int
-iomap_read_page_sync(loff_t block_start, struct page *page, unsigned poff,
- unsigned plen, const struct iomap *iomap)
+static int iomap_read_folio_sync(loff_t block_start, struct folio *folio,
+ size_t poff, size_t plen, const struct iomap *iomap)
{
struct bio_vec bvec;
struct bio bio;
@@ -543,26 +554,27 @@ iomap_read_page_sync(loff_t block_start, struct page *page, unsigned poff,
bio.bi_opf = REQ_OP_READ;
bio.bi_iter.bi_sector = iomap_sector(iomap, block_start);
bio_set_dev(&bio, iomap->bdev);
- __bio_add_page(&bio, page, plen, poff);
+ bio_add_folio(&bio, folio, plen, poff);
return submit_bio_wait(&bio);
}
static int __iomap_write_begin(const struct iomap_iter *iter, loff_t pos,
- unsigned len, struct page *page)
+ size_t len, struct folio *folio)
{
const struct iomap *srcmap = iomap_iter_srcmap(iter);
- struct iomap_page *iop = iomap_page_create(iter->inode, page);
+ struct iomap_page *iop = iomap_page_create(iter->inode, folio);
loff_t block_size = i_blocksize(iter->inode);
loff_t block_start = round_down(pos, block_size);
loff_t block_end = round_up(pos + len, block_size);
- unsigned from = offset_in_page(pos), to = from + len, poff, plen;
+ size_t from = offset_in_folio(folio, pos), to = from + len;
+ size_t poff, plen;
- if (PageUptodate(page))
+ if (folio_test_uptodate(folio))
return 0;
- ClearPageError(page);
+ folio_clear_error(folio);
do {
- iomap_adjust_read_range(iter->inode, iop, &block_start,
+ iomap_adjust_read_range(iter->inode, folio, &block_start,
block_end - block_start, &poff, &plen);
if (plen == 0)
break;
@@ -575,34 +587,35 @@ static int __iomap_write_begin(const struct iomap_iter *iter, loff_t pos,
if (iomap_block_needs_zeroing(iter, block_start)) {
if (WARN_ON_ONCE(iter->flags & IOMAP_UNSHARE))
return -EIO;
- zero_user_segments(page, poff, from, to, poff + plen);
+ folio_zero_segments(folio, poff, from, to, poff + plen);
} else {
- int status = iomap_read_page_sync(block_start, page,
+ int status = iomap_read_folio_sync(block_start, folio,
poff, plen, srcmap);
if (status)
return status;
}
- iomap_set_range_uptodate(page, poff, plen);
+ iomap_set_range_uptodate(folio, iop, poff, plen);
} while ((block_start += plen) < block_end);
return 0;
}
static int iomap_write_begin_inline(const struct iomap_iter *iter,
- struct page *page)
+ struct folio *folio)
{
/* needs more work for the tailpacking case; disable for now */
if (WARN_ON_ONCE(iomap_iter_srcmap(iter)->offset != 0))
return -EIO;
- return iomap_read_inline_data(iter, page);
+ return iomap_read_inline_data(iter, folio);
}
static int iomap_write_begin(const struct iomap_iter *iter, loff_t pos,
- unsigned len, struct page **pagep)
+ size_t len, struct folio **foliop)
{
const struct iomap_page_ops *page_ops = iter->iomap.page_ops;
const struct iomap *srcmap = iomap_iter_srcmap(iter);
- struct page *page;
+ struct folio *folio;
+ unsigned fgp = FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE | FGP_NOFS;
int status = 0;
BUG_ON(pos + len > iter->iomap.offset + iter->iomap.length);
@@ -612,35 +625,40 @@ static int iomap_write_begin(const struct iomap_iter *iter, loff_t pos,
if (fatal_signal_pending(current))
return -EINTR;
+ if (!mapping_large_folio_support(iter->inode->i_mapping))
+ len = min_t(size_t, len, PAGE_SIZE - offset_in_page(pos));
+
if (page_ops && page_ops->page_prepare) {
status = page_ops->page_prepare(iter->inode, pos, len);
if (status)
return status;
}
- page = grab_cache_page_write_begin(iter->inode->i_mapping,
- pos >> PAGE_SHIFT, AOP_FLAG_NOFS);
- if (!page) {
+ folio = __filemap_get_folio(iter->inode->i_mapping, pos >> PAGE_SHIFT,
+ fgp, mapping_gfp_mask(iter->inode->i_mapping));
+ if (!folio) {
status = -ENOMEM;
goto out_no_page;
}
+ if (pos + len > folio_pos(folio) + folio_size(folio))
+ len = folio_pos(folio) + folio_size(folio) - pos;
if (srcmap->type == IOMAP_INLINE)
- status = iomap_write_begin_inline(iter, page);
+ status = iomap_write_begin_inline(iter, folio);
else if (srcmap->flags & IOMAP_F_BUFFER_HEAD)
- status = __block_write_begin_int(page, pos, len, NULL, srcmap);
+ status = __block_write_begin_int(folio, pos, len, NULL, srcmap);
else
- status = __iomap_write_begin(iter, pos, len, page);
+ status = __iomap_write_begin(iter, pos, len, folio);
if (unlikely(status))
goto out_unlock;
- *pagep = page;
+ *foliop = folio;
return 0;
out_unlock:
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
iomap_write_failed(iter->inode, pos, len);
out_no_page:
@@ -650,9 +668,10 @@ out_no_page:
}
static size_t __iomap_write_end(struct inode *inode, loff_t pos, size_t len,
- size_t copied, struct page *page)
+ size_t copied, struct folio *folio)
{
- flush_dcache_page(page);
+ struct iomap_page *iop = to_iomap_page(folio);
+ flush_dcache_folio(folio);
/*
* The blocks that were entirely written will now be uptodate, so we
@@ -665,24 +684,24 @@ static size_t __iomap_write_end(struct inode *inode, loff_t pos, size_t len,
* non-uptodate page as a zero-length write, and force the caller to
* redo the whole thing.
*/
- if (unlikely(copied < len && !PageUptodate(page)))
+ if (unlikely(copied < len && !folio_test_uptodate(folio)))
return 0;
- iomap_set_range_uptodate(page, offset_in_page(pos), len);
- __set_page_dirty_nobuffers(page);
+ iomap_set_range_uptodate(folio, iop, offset_in_folio(folio, pos), len);
+ filemap_dirty_folio(inode->i_mapping, folio);
return copied;
}
static size_t iomap_write_end_inline(const struct iomap_iter *iter,
- struct page *page, loff_t pos, size_t copied)
+ struct folio *folio, loff_t pos, size_t copied)
{
const struct iomap *iomap = &iter->iomap;
void *addr;
- WARN_ON_ONCE(!PageUptodate(page));
+ WARN_ON_ONCE(!folio_test_uptodate(folio));
BUG_ON(!iomap_inline_data_valid(iomap));
- flush_dcache_page(page);
- addr = kmap_local_page(page) + pos;
+ flush_dcache_folio(folio);
+ addr = kmap_local_folio(folio, pos);
memcpy(iomap_inline_data(iomap, pos), addr, copied);
kunmap_local(addr);
@@ -692,7 +711,7 @@ static size_t iomap_write_end_inline(const struct iomap_iter *iter,
/* Returns the number of bytes copied. May be 0. Cannot be an errno. */
static size_t iomap_write_end(struct iomap_iter *iter, loff_t pos, size_t len,
- size_t copied, struct page *page)
+ size_t copied, struct folio *folio)
{
const struct iomap_page_ops *page_ops = iter->iomap.page_ops;
const struct iomap *srcmap = iomap_iter_srcmap(iter);
@@ -700,12 +719,12 @@ static size_t iomap_write_end(struct iomap_iter *iter, loff_t pos, size_t len,
size_t ret;
if (srcmap->type == IOMAP_INLINE) {
- ret = iomap_write_end_inline(iter, page, pos, copied);
+ ret = iomap_write_end_inline(iter, folio, pos, copied);
} else if (srcmap->flags & IOMAP_F_BUFFER_HEAD) {
ret = block_write_end(NULL, iter->inode->i_mapping, pos, len,
- copied, page, NULL);
+ copied, &folio->page, NULL);
} else {
- ret = __iomap_write_end(iter->inode, pos, len, copied, page);
+ ret = __iomap_write_end(iter->inode, pos, len, copied, folio);
}
/*
@@ -717,13 +736,13 @@ static size_t iomap_write_end(struct iomap_iter *iter, loff_t pos, size_t len,
i_size_write(iter->inode, pos + ret);
iter->iomap.flags |= IOMAP_F_SIZE_CHANGED;
}
- unlock_page(page);
+ folio_unlock(folio);
if (old_size < pos)
pagecache_isize_extended(iter->inode, old_size, pos);
if (page_ops && page_ops->page_done)
- page_ops->page_done(iter->inode, pos, ret, page);
- put_page(page);
+ page_ops->page_done(iter->inode, pos, ret, &folio->page);
+ folio_put(folio);
if (ret < len)
iomap_write_failed(iter->inode, pos, len);
@@ -738,6 +757,7 @@ static loff_t iomap_write_iter(struct iomap_iter *iter, struct iov_iter *i)
long status = 0;
do {
+ struct folio *folio;
struct page *page;
unsigned long offset; /* Offset into pagecache page */
unsigned long bytes; /* Bytes to write to page */
@@ -761,16 +781,17 @@ again:
break;
}
- status = iomap_write_begin(iter, pos, bytes, &page);
+ status = iomap_write_begin(iter, pos, bytes, &folio);
if (unlikely(status))
break;
+ page = folio_file_page(folio, pos >> PAGE_SHIFT);
if (mapping_writably_mapped(iter->inode->i_mapping))
flush_dcache_page(page);
copied = copy_page_from_iter_atomic(page, offset, bytes, i);
- status = iomap_write_end(iter, pos, bytes, copied, page);
+ status = iomap_write_end(iter, pos, bytes, copied, folio);
if (unlikely(copied != status))
iov_iter_revert(i, copied - status);
@@ -836,13 +857,13 @@ static loff_t iomap_unshare_iter(struct iomap_iter *iter)
do {
unsigned long offset = offset_in_page(pos);
unsigned long bytes = min_t(loff_t, PAGE_SIZE - offset, length);
- struct page *page;
+ struct folio *folio;
- status = iomap_write_begin(iter, pos, bytes, &page);
+ status = iomap_write_begin(iter, pos, bytes, &folio);
if (unlikely(status))
return status;
- status = iomap_write_end(iter, pos, bytes, bytes, page);
+ status = iomap_write_end(iter, pos, bytes, bytes, folio);
if (WARN_ON_ONCE(status == 0))
return -EIO;
@@ -876,26 +897,8 @@ iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len,
}
EXPORT_SYMBOL_GPL(iomap_file_unshare);
-static s64 __iomap_zero_iter(struct iomap_iter *iter, loff_t pos, u64 length)
-{
- struct page *page;
- int status;
- unsigned offset = offset_in_page(pos);
- unsigned bytes = min_t(u64, PAGE_SIZE - offset, length);
-
- status = iomap_write_begin(iter, pos, bytes, &page);
- if (status)
- return status;
-
- zero_user(page, offset, bytes);
- mark_page_accessed(page);
-
- return iomap_write_end(iter, pos, bytes, bytes, page);
-}
-
static loff_t iomap_zero_iter(struct iomap_iter *iter, bool *did_zero)
{
- struct iomap *iomap = &iter->iomap;
const struct iomap *srcmap = iomap_iter_srcmap(iter);
loff_t pos = iter->pos;
loff_t length = iomap_length(iter);
@@ -906,14 +909,25 @@ static loff_t iomap_zero_iter(struct iomap_iter *iter, bool *did_zero)
return length;
do {
- s64 bytes;
+ struct folio *folio;
+ int status;
+ size_t offset;
+ size_t bytes = min_t(u64, SIZE_MAX, length);
- if (IS_DAX(iter->inode))
- bytes = dax_iomap_zero(pos, length, iomap);
- else
- bytes = __iomap_zero_iter(iter, pos, length);
- if (bytes < 0)
- return bytes;
+ status = iomap_write_begin(iter, pos, bytes, &folio);
+ if (status)
+ return status;
+
+ offset = offset_in_folio(folio, pos);
+ if (bytes > folio_size(folio) - offset)
+ bytes = folio_size(folio) - offset;
+
+ folio_zero_range(folio, offset, bytes);
+ folio_mark_accessed(folio);
+
+ bytes = iomap_write_end(iter, pos, bytes, bytes, folio);
+ if (WARN_ON_ONCE(bytes == 0))
+ return -EIO;
pos += bytes;
length -= bytes;
@@ -957,21 +971,21 @@ iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
}
EXPORT_SYMBOL_GPL(iomap_truncate_page);
-static loff_t iomap_page_mkwrite_iter(struct iomap_iter *iter,
- struct page *page)
+static loff_t iomap_folio_mkwrite_iter(struct iomap_iter *iter,
+ struct folio *folio)
{
loff_t length = iomap_length(iter);
int ret;
if (iter->iomap.flags & IOMAP_F_BUFFER_HEAD) {
- ret = __block_write_begin_int(page, iter->pos, length, NULL,
+ ret = __block_write_begin_int(folio, iter->pos, length, NULL,
&iter->iomap);
if (ret)
return ret;
- block_commit_write(page, 0, length);
+ block_commit_write(&folio->page, 0, length);
} else {
- WARN_ON_ONCE(!PageUptodate(page));
- set_page_dirty(page);
+ WARN_ON_ONCE(!folio_test_uptodate(folio));
+ folio_mark_dirty(folio);
}
return length;
@@ -983,44 +997,43 @@ vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops)
.inode = file_inode(vmf->vma->vm_file),
.flags = IOMAP_WRITE | IOMAP_FAULT,
};
- struct page *page = vmf->page;
+ struct folio *folio = page_folio(vmf->page);
ssize_t ret;
- lock_page(page);
- ret = page_mkwrite_check_truncate(page, iter.inode);
+ folio_lock(folio);
+ ret = folio_mkwrite_check_truncate(folio, iter.inode);
if (ret < 0)
goto out_unlock;
- iter.pos = page_offset(page);
+ iter.pos = folio_pos(folio);
iter.len = ret;
while ((ret = iomap_iter(&iter, ops)) > 0)
- iter.processed = iomap_page_mkwrite_iter(&iter, page);
+ iter.processed = iomap_folio_mkwrite_iter(&iter, folio);
if (ret < 0)
goto out_unlock;
- wait_for_stable_page(page);
+ folio_wait_stable(folio);
return VM_FAULT_LOCKED;
out_unlock:
- unlock_page(page);
+ folio_unlock(folio);
return block_page_mkwrite_return(ret);
}
EXPORT_SYMBOL_GPL(iomap_page_mkwrite);
-static void
-iomap_finish_page_writeback(struct inode *inode, struct page *page,
- int error, unsigned int len)
+static void iomap_finish_folio_write(struct inode *inode, struct folio *folio,
+ size_t len, int error)
{
- struct iomap_page *iop = to_iomap_page(page);
+ struct iomap_page *iop = to_iomap_page(folio);
if (error) {
- SetPageError(page);
+ folio_set_error(folio);
mapping_set_error(inode->i_mapping, error);
}
- WARN_ON_ONCE(i_blocks_per_page(inode, page) > 1 && !iop);
+ WARN_ON_ONCE(i_blocks_per_folio(inode, folio) > 1 && !iop);
WARN_ON_ONCE(iop && atomic_read(&iop->write_bytes_pending) <= 0);
if (!iop || atomic_sub_and_test(len, &iop->write_bytes_pending))
- end_page_writeback(page);
+ folio_end_writeback(folio);
}
/*
@@ -1028,7 +1041,7 @@ iomap_finish_page_writeback(struct inode *inode, struct page *page,
* state, release holds on bios, and finally free up memory. Do not use the
* ioend after this.
*/
-static void
+static u32
iomap_finish_ioend(struct iomap_ioend *ioend, int error)
{
struct inode *inode = ioend->io_inode;
@@ -1037,10 +1050,10 @@ iomap_finish_ioend(struct iomap_ioend *ioend, int error)
u64 start = bio->bi_iter.bi_sector;
loff_t offset = ioend->io_offset;
bool quiet = bio_flagged(bio, BIO_QUIET);
+ u32 folio_count = 0;
for (bio = &ioend->io_inline_bio; bio; bio = next) {
- struct bio_vec *bv;
- struct bvec_iter_all iter_all;
+ struct folio_iter fi;
/*
* For the last bio, bi_private points to the ioend, so we
@@ -1051,10 +1064,12 @@ iomap_finish_ioend(struct iomap_ioend *ioend, int error)
else
next = bio->bi_private;
- /* walk each page on bio, ending page IO on them */
- bio_for_each_segment_all(bv, bio, iter_all)
- iomap_finish_page_writeback(inode, bv->bv_page, error,
- bv->bv_len);
+ /* walk all folios in bio, ending page IO on them */
+ bio_for_each_folio_all(fi, bio) {
+ iomap_finish_folio_write(inode, fi.folio, fi.length,
+ error);
+ folio_count++;
+ }
bio_put(bio);
}
/* The ioend has been freed by bio_put() */
@@ -1064,20 +1079,36 @@ iomap_finish_ioend(struct iomap_ioend *ioend, int error)
"%s: writeback error on inode %lu, offset %lld, sector %llu",
inode->i_sb->s_id, inode->i_ino, offset, start);
}
+ return folio_count;
}
+/*
+ * Ioend completion routine for merged bios. This can only be called from task
+ * contexts as merged ioends can be of unbound length. Hence we have to break up
+ * the writeback completions into manageable chunks to avoid long scheduler
+ * holdoffs. We aim to keep scheduler holdoffs down below 10ms so that we get
+ * good batch processing throughput without creating adverse scheduler latency
+ * conditions.
+ */
void
iomap_finish_ioends(struct iomap_ioend *ioend, int error)
{
struct list_head tmp;
+ u32 completions;
+
+ might_sleep();
list_replace_init(&ioend->io_list, &tmp);
- iomap_finish_ioend(ioend, error);
+ completions = iomap_finish_ioend(ioend, error);
while (!list_empty(&tmp)) {
+ if (completions > IOEND_BATCH_SIZE * 8) {
+ cond_resched();
+ completions = 0;
+ }
ioend = list_first_entry(&tmp, struct iomap_ioend, io_list);
list_del_init(&ioend->io_list);
- iomap_finish_ioend(ioend, error);
+ completions += iomap_finish_ioend(ioend, error);
}
}
EXPORT_SYMBOL_GPL(iomap_finish_ioends);
@@ -1098,6 +1129,18 @@ iomap_ioend_can_merge(struct iomap_ioend *ioend, struct iomap_ioend *next)
return false;
if (ioend->io_offset + ioend->io_size != next->io_offset)
return false;
+ /*
+ * Do not merge physically discontiguous ioends. The filesystem
+ * completion functions will have to iterate the physical
+ * discontiguities even if we merge the ioends at a logical level, so
+ * we don't gain anything by merging physical discontiguities here.
+ *
+ * We cannot use bio->bi_iter.bi_sector here as it is modified during
+ * submission so does not point to the start sector of the bio at
+ * completion.
+ */
+ if (ioend->io_sector + (ioend->io_size >> 9) != next->io_sector)
+ return false;
return true;
}
@@ -1199,8 +1242,10 @@ iomap_alloc_ioend(struct inode *inode, struct iomap_writepage_ctx *wpc,
ioend->io_flags = wpc->iomap.flags;
ioend->io_inode = inode;
ioend->io_size = 0;
+ ioend->io_folios = 0;
ioend->io_offset = offset;
ioend->io_bio = bio;
+ ioend->io_sector = sector;
return ioend;
}
@@ -1241,6 +1286,13 @@ iomap_can_add_to_ioend(struct iomap_writepage_ctx *wpc, loff_t offset,
return false;
if (sector != bio_end_sector(wpc->ioend->io_bio))
return false;
+ /*
+ * Limit ioend bio chain lengths to minimise IO completion latency. This
+ * also prevents long tight loops ending page writeback on all the
+ * folios in the ioend.
+ */
+ if (wpc->ioend->io_folios >= IOEND_BATCH_SIZE)
+ return false;
return true;
}
@@ -1249,29 +1301,29 @@ iomap_can_add_to_ioend(struct iomap_writepage_ctx *wpc, loff_t offset,
* first; otherwise finish off the current ioend and start another.
*/
static void
-iomap_add_to_ioend(struct inode *inode, loff_t offset, struct page *page,
+iomap_add_to_ioend(struct inode *inode, loff_t pos, struct folio *folio,
struct iomap_page *iop, struct iomap_writepage_ctx *wpc,
struct writeback_control *wbc, struct list_head *iolist)
{
- sector_t sector = iomap_sector(&wpc->iomap, offset);
+ sector_t sector = iomap_sector(&wpc->iomap, pos);
unsigned len = i_blocksize(inode);
- unsigned poff = offset & (PAGE_SIZE - 1);
+ size_t poff = offset_in_folio(folio, pos);
- if (!wpc->ioend || !iomap_can_add_to_ioend(wpc, offset, sector)) {
+ if (!wpc->ioend || !iomap_can_add_to_ioend(wpc, pos, sector)) {
if (wpc->ioend)
list_add(&wpc->ioend->io_list, iolist);
- wpc->ioend = iomap_alloc_ioend(inode, wpc, offset, sector, wbc);
+ wpc->ioend = iomap_alloc_ioend(inode, wpc, pos, sector, wbc);
}
- if (bio_add_page(wpc->ioend->io_bio, page, len, poff) != len) {
+ if (!bio_add_folio(wpc->ioend->io_bio, folio, len, poff)) {
wpc->ioend->io_bio = iomap_chain_bio(wpc->ioend->io_bio);
- __bio_add_page(wpc->ioend->io_bio, page, len, poff);
+ bio_add_folio(wpc->ioend->io_bio, folio, len, poff);
}
if (iop)
atomic_add(len, &iop->write_bytes_pending);
wpc->ioend->io_size += len;
- wbc_account_cgroup_owner(wbc, page, len);
+ wbc_account_cgroup_owner(wbc, &folio->page, len);
}
/*
@@ -1293,44 +1345,45 @@ iomap_add_to_ioend(struct inode *inode, loff_t offset, struct page *page,
static int
iomap_writepage_map(struct iomap_writepage_ctx *wpc,
struct writeback_control *wbc, struct inode *inode,
- struct page *page, u64 end_offset)
+ struct folio *folio, u64 end_pos)
{
- struct iomap_page *iop = iomap_page_create(inode, page);
+ struct iomap_page *iop = iomap_page_create(inode, folio);
struct iomap_ioend *ioend, *next;
unsigned len = i_blocksize(inode);
- u64 file_offset; /* file offset of page */
+ unsigned nblocks = i_blocks_per_folio(inode, folio);
+ u64 pos = folio_pos(folio);
int error = 0, count = 0, i;
LIST_HEAD(submit_list);
WARN_ON_ONCE(iop && atomic_read(&iop->write_bytes_pending) != 0);
/*
- * Walk through the page to find areas to write back. If we run off the
- * end of the current map or find the current map invalid, grab a new
- * one.
+ * Walk through the folio to find areas to write back. If we
+ * run off the end of the current map or find the current map
+ * invalid, grab a new one.
*/
- for (i = 0, file_offset = page_offset(page);
- i < (PAGE_SIZE >> inode->i_blkbits) && file_offset < end_offset;
- i++, file_offset += len) {
+ for (i = 0; i < nblocks && pos < end_pos; i++, pos += len) {
if (iop && !test_bit(i, iop->uptodate))
continue;
- error = wpc->ops->map_blocks(wpc, inode, file_offset);
+ error = wpc->ops->map_blocks(wpc, inode, pos);
if (error)
break;
if (WARN_ON_ONCE(wpc->iomap.type == IOMAP_INLINE))
continue;
if (wpc->iomap.type == IOMAP_HOLE)
continue;
- iomap_add_to_ioend(inode, file_offset, page, iop, wpc, wbc,
+ iomap_add_to_ioend(inode, pos, folio, iop, wpc, wbc,
&submit_list);
count++;
}
+ if (count)
+ wpc->ioend->io_folios++;
WARN_ON_ONCE(!wpc->ioend && !list_empty(&submit_list));
- WARN_ON_ONCE(!PageLocked(page));
- WARN_ON_ONCE(PageWriteback(page));
- WARN_ON_ONCE(PageDirty(page));
+ WARN_ON_ONCE(!folio_test_locked(folio));
+ WARN_ON_ONCE(folio_test_writeback(folio));
+ WARN_ON_ONCE(folio_test_dirty(folio));
/*
* We cannot cancel the ioend directly here on error. We may have
@@ -1345,17 +1398,17 @@ iomap_writepage_map(struct iomap_writepage_ctx *wpc,
* won't be affected by I/O completion and we must unlock it
* now.
*/
- if (wpc->ops->discard_page)
- wpc->ops->discard_page(page, file_offset);
+ if (wpc->ops->discard_folio)
+ wpc->ops->discard_folio(folio, pos);
if (!count) {
- ClearPageUptodate(page);
- unlock_page(page);
+ folio_clear_uptodate(folio);
+ folio_unlock(folio);
goto done;
}
}
- set_page_writeback(page);
- unlock_page(page);
+ folio_start_writeback(folio);
+ folio_unlock(folio);
/*
* Preserve the original error if there was one; catch
@@ -1376,9 +1429,9 @@ iomap_writepage_map(struct iomap_writepage_ctx *wpc,
* with a partial page truncate on a sub-page block sized filesystem.
*/
if (!count)
- end_page_writeback(page);
+ folio_end_writeback(folio);
done:
- mapping_set_error(page->mapping, error);
+ mapping_set_error(folio->mapping, error);
return error;
}
@@ -1392,16 +1445,15 @@ done:
static int
iomap_do_writepage(struct page *page, struct writeback_control *wbc, void *data)
{
+ struct folio *folio = page_folio(page);
struct iomap_writepage_ctx *wpc = data;
- struct inode *inode = page->mapping->host;
- pgoff_t end_index;
- u64 end_offset;
- loff_t offset;
+ struct inode *inode = folio->mapping->host;
+ u64 end_pos, isize;
- trace_iomap_writepage(inode, page_offset(page), PAGE_SIZE);
+ trace_iomap_writepage(inode, folio_pos(folio), folio_size(folio));
/*
- * Refuse to write the page out if we're called from reclaim context.
+ * Refuse to write the folio out if we're called from reclaim context.
*
* This avoids stack overflows when called from deeply used stacks in
* random callers for direct reclaim or memcg reclaim. We explicitly
@@ -1415,10 +1467,10 @@ iomap_do_writepage(struct page *page, struct writeback_control *wbc, void *data)
goto redirty;
/*
- * Is this page beyond the end of the file?
+ * Is this folio beyond the end of the file?
*
- * The page index is less than the end_index, adjust the end_offset
- * to the highest offset that this page should represent.
+ * The folio index is less than the end_index, adjust the end_pos
+ * to the highest offset that this folio should represent.
* -----------------------------------------------------
* | file mapping | <EOF> |
* -----------------------------------------------------
@@ -1427,11 +1479,9 @@ iomap_do_writepage(struct page *page, struct writeback_control *wbc, void *data)
* | desired writeback range | see else |
* ---------------------------------^------------------|
*/
- offset = i_size_read(inode);
- end_index = offset >> PAGE_SHIFT;
- if (page->index < end_index)
- end_offset = (loff_t)(page->index + 1) << PAGE_SHIFT;
- else {
+ isize = i_size_read(inode);
+ end_pos = folio_pos(folio) + folio_size(folio);
+ if (end_pos > isize) {
/*
* Check whether the page to write out is beyond or straddles
* i_size or not.
@@ -1443,7 +1493,8 @@ iomap_do_writepage(struct page *page, struct writeback_control *wbc, void *data)
* | | Straddles |
* ---------------------------------^-----------|--------|
*/
- unsigned offset_into_page = offset & (PAGE_SIZE - 1);
+ size_t poff = offset_in_folio(folio, isize);
+ pgoff_t end_index = isize >> PAGE_SHIFT;
/*
* Skip the page if it's fully outside i_size, e.g. due to a
@@ -1462,8 +1513,8 @@ iomap_do_writepage(struct page *page, struct writeback_control *wbc, void *data)
* checking if the page is totally beyond i_size or if its
* offset is just equal to the EOF.
*/
- if (page->index > end_index ||
- (page->index == end_index && offset_into_page == 0))
+ if (folio->index > end_index ||
+ (folio->index == end_index && poff == 0))
goto redirty;
/*
@@ -1474,17 +1525,15 @@ iomap_do_writepage(struct page *page, struct writeback_control *wbc, void *data)
* memory is zeroed when mapped, and writes to that region are
* not written out to the file."
*/
- zero_user_segment(page, offset_into_page, PAGE_SIZE);
-
- /* Adjust the end_offset to the end of file */
- end_offset = offset;
+ folio_zero_segment(folio, poff, folio_size(folio));
+ end_pos = isize;
}
- return iomap_writepage_map(wpc, wbc, inode, page, end_offset);
+ return iomap_writepage_map(wpc, wbc, inode, folio, end_pos);
redirty:
- redirty_page_for_writepage(wbc, page);
- unlock_page(page);
+ folio_redirty_for_writepage(wbc, folio);
+ folio_unlock(folio);
return 0;
}
diff --git a/fs/iomap/direct-io.c b/fs/iomap/direct-io.c
index b4dc51063d36..03ea367df19a 100644
--- a/fs/iomap/direct-io.c
+++ b/fs/iomap/direct-io.c
@@ -6,6 +6,7 @@
#include <linux/module.h>
#include <linux/compiler.h>
#include <linux/fs.h>
+#include <linux/pagemap.h>
#include <linux/iomap.h>
#include <linux/backing-dev.h>
#include <linux/uio.h>
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
index 3cc4ab2ba7f4..5b9408e3b370 100644
--- a/fs/jbd2/commit.c
+++ b/fs/jbd2/commit.c
@@ -484,22 +484,9 @@ void jbd2_journal_commit_transaction(journal_t *journal)
stats.run.rs_running = jbd2_time_diff(commit_transaction->t_start,
stats.run.rs_locked);
- spin_lock(&commit_transaction->t_handle_lock);
- while (atomic_read(&commit_transaction->t_updates)) {
- DEFINE_WAIT(wait);
+ // waits for any t_updates to finish
+ jbd2_journal_wait_updates(journal);
- prepare_to_wait(&journal->j_wait_updates, &wait,
- TASK_UNINTERRUPTIBLE);
- if (atomic_read(&commit_transaction->t_updates)) {
- spin_unlock(&commit_transaction->t_handle_lock);
- write_unlock(&journal->j_state_lock);
- schedule();
- write_lock(&journal->j_state_lock);
- spin_lock(&commit_transaction->t_handle_lock);
- }
- finish_wait(&journal->j_wait_updates, &wait);
- }
- spin_unlock(&commit_transaction->t_handle_lock);
commit_transaction->t_state = T_SWITCH;
write_unlock(&journal->j_state_lock);
@@ -817,7 +804,7 @@ start_journal_io:
commit_transaction->t_state = T_COMMIT_DFLUSH;
write_unlock(&journal->j_state_lock);
- /*
+ /*
* If the journal is not located on the file system device,
* then we must flush the file system device before we issue
* the commit record
@@ -1170,7 +1157,7 @@ restart_loop:
if (journal->j_commit_callback)
journal->j_commit_callback(journal, commit_transaction);
if (journal->j_fc_cleanup_callback)
- journal->j_fc_cleanup_callback(journal, 1);
+ journal->j_fc_cleanup_callback(journal, 1, commit_transaction->t_tid);
trace_jbd2_end_commit(journal, commit_transaction);
jbd_debug(1, "JBD2: commit %d complete, head %d\n",
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index 0b86a4365b66..c2cf74b01ddb 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -771,7 +771,7 @@ static int __jbd2_fc_end_commit(journal_t *journal, tid_t tid, bool fallback)
{
jbd2_journal_unlock_updates(journal);
if (journal->j_fc_cleanup_callback)
- journal->j_fc_cleanup_callback(journal, 0);
+ journal->j_fc_cleanup_callback(journal, 0, tid);
write_lock(&journal->j_state_lock);
journal->j_flags &= ~JBD2_FAST_COMMIT_ONGOING;
if (fallback)
@@ -1212,7 +1212,7 @@ static const struct seq_operations jbd2_seq_info_ops = {
static int jbd2_seq_info_open(struct inode *inode, struct file *file)
{
- journal_t *journal = PDE_DATA(inode);
+ journal_t *journal = pde_data(inode);
struct jbd2_stats_proc_session *s;
int rc, size;
@@ -1287,6 +1287,8 @@ static int jbd2_min_tag_size(void)
/**
* jbd2_journal_shrink_scan()
+ * @shrink: shrinker to work on
+ * @sc: reclaim request to process
*
* Scan the checkpointed buffer on the checkpoint list and release the
* journal_head.
@@ -1312,6 +1314,8 @@ static unsigned long jbd2_journal_shrink_scan(struct shrinker *shrink,
/**
* jbd2_journal_shrink_count()
+ * @shrink: shrinker to work on
+ * @sc: reclaim request to process
*
* Count the number of checkpoint buffers on the checkpoint list.
*/
@@ -2972,6 +2976,7 @@ struct journal_head *jbd2_journal_grab_journal_head(struct buffer_head *bh)
jbd_unlock_bh_journal_head(bh);
return jh;
}
+EXPORT_SYMBOL(jbd2_journal_grab_journal_head);
static void __journal_remove_journal_head(struct buffer_head *bh)
{
@@ -3024,6 +3029,7 @@ void jbd2_journal_put_journal_head(struct journal_head *jh)
jbd_unlock_bh_journal_head(bh);
}
}
+EXPORT_SYMBOL(jbd2_journal_put_journal_head);
/*
* Initialize jbd inode head
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index 6a3caedd2285..8e2f8275a253 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -449,7 +449,7 @@ repeat:
}
/* OK, account for the buffers that this operation expects to
- * use and add the handle to the running transaction.
+ * use and add the handle to the running transaction.
*/
update_t_max_wait(transaction, ts);
handle->h_transaction = transaction;
@@ -836,6 +836,35 @@ int jbd2_journal_restart(handle_t *handle, int nblocks)
}
EXPORT_SYMBOL(jbd2_journal_restart);
+/*
+ * Waits for any outstanding t_updates to finish.
+ * This is called with write j_state_lock held.
+ */
+void jbd2_journal_wait_updates(journal_t *journal)
+{
+ transaction_t *commit_transaction = journal->j_running_transaction;
+
+ if (!commit_transaction)
+ return;
+
+ spin_lock(&commit_transaction->t_handle_lock);
+ while (atomic_read(&commit_transaction->t_updates)) {
+ DEFINE_WAIT(wait);
+
+ prepare_to_wait(&journal->j_wait_updates, &wait,
+ TASK_UNINTERRUPTIBLE);
+ if (atomic_read(&commit_transaction->t_updates)) {
+ spin_unlock(&commit_transaction->t_handle_lock);
+ write_unlock(&journal->j_state_lock);
+ schedule();
+ write_lock(&journal->j_state_lock);
+ spin_lock(&commit_transaction->t_handle_lock);
+ }
+ finish_wait(&journal->j_wait_updates, &wait);
+ }
+ spin_unlock(&commit_transaction->t_handle_lock);
+}
+
/**
* jbd2_journal_lock_updates () - establish a transaction barrier.
* @journal: Journal to establish a barrier on.
@@ -863,27 +892,9 @@ void jbd2_journal_lock_updates(journal_t *journal)
write_lock(&journal->j_state_lock);
}
- /* Wait until there are no running updates */
- while (1) {
- transaction_t *transaction = journal->j_running_transaction;
-
- if (!transaction)
- break;
+ /* Wait until there are no running t_updates */
+ jbd2_journal_wait_updates(journal);
- spin_lock(&transaction->t_handle_lock);
- prepare_to_wait(&journal->j_wait_updates, &wait,
- TASK_UNINTERRUPTIBLE);
- if (!atomic_read(&transaction->t_updates)) {
- spin_unlock(&transaction->t_handle_lock);
- finish_wait(&journal->j_wait_updates, &wait);
- break;
- }
- spin_unlock(&transaction->t_handle_lock);
- write_unlock(&journal->j_state_lock);
- schedule();
- finish_wait(&journal->j_wait_updates, &wait);
- write_lock(&journal->j_state_lock);
- }
write_unlock(&journal->j_state_lock);
/*
diff --git a/fs/jffs2/background.c b/fs/jffs2/background.c
index 2b4d5013dc5d..6da92ecaf66d 100644
--- a/fs/jffs2/background.c
+++ b/fs/jffs2/background.c
@@ -161,5 +161,5 @@ static int jffs2_garbage_collect_thread(void *_c)
spin_lock(&c->erase_completion_lock);
c->gc_task = NULL;
spin_unlock(&c->erase_completion_lock);
- complete_and_exit(&c->gc_thread_exit, 0);
+ kthread_complete_and_exit(&c->gc_thread_exit, 0);
}
diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c
index 8e0a1378a4b1..e6d9772ddb4c 100644
--- a/fs/kernfs/dir.c
+++ b/fs/kernfs/dir.c
@@ -17,7 +17,6 @@
#include "kernfs-internal.h"
-DECLARE_RWSEM(kernfs_rwsem);
static DEFINE_SPINLOCK(kernfs_rename_lock); /* kn->parent and ->name */
static char kernfs_pr_cont_buf[PATH_MAX]; /* protected by rename_lock */
static DEFINE_SPINLOCK(kernfs_idr_lock); /* root->ino_idr */
@@ -26,7 +25,7 @@ static DEFINE_SPINLOCK(kernfs_idr_lock); /* root->ino_idr */
static bool kernfs_active(struct kernfs_node *kn)
{
- lockdep_assert_held(&kernfs_rwsem);
+ lockdep_assert_held(&kernfs_root(kn)->kernfs_rwsem);
return atomic_read(&kn->active) >= 0;
}
@@ -457,14 +456,15 @@ void kernfs_put_active(struct kernfs_node *kn)
* return after draining is complete.
*/
static void kernfs_drain(struct kernfs_node *kn)
- __releases(&kernfs_rwsem) __acquires(&kernfs_rwsem)
+ __releases(&kernfs_root(kn)->kernfs_rwsem)
+ __acquires(&kernfs_root(kn)->kernfs_rwsem)
{
struct kernfs_root *root = kernfs_root(kn);
- lockdep_assert_held_write(&kernfs_rwsem);
+ lockdep_assert_held_write(&root->kernfs_rwsem);
WARN_ON_ONCE(kernfs_active(kn));
- up_write(&kernfs_rwsem);
+ up_write(&root->kernfs_rwsem);
if (kernfs_lockdep(kn)) {
rwsem_acquire(&kn->dep_map, 0, 0, _RET_IP_);
@@ -483,7 +483,7 @@ static void kernfs_drain(struct kernfs_node *kn)
kernfs_drain_open_files(kn);
- down_write(&kernfs_rwsem);
+ down_write(&root->kernfs_rwsem);
}
/**
@@ -718,11 +718,12 @@ err_unlock:
int kernfs_add_one(struct kernfs_node *kn)
{
struct kernfs_node *parent = kn->parent;
+ struct kernfs_root *root = kernfs_root(parent);
struct kernfs_iattrs *ps_iattr;
bool has_ns;
int ret;
- down_write(&kernfs_rwsem);
+ down_write(&root->kernfs_rwsem);
ret = -EINVAL;
has_ns = kernfs_ns_enabled(parent);
@@ -753,7 +754,7 @@ int kernfs_add_one(struct kernfs_node *kn)
ps_iattr->ia_mtime = ps_iattr->ia_ctime;
}
- up_write(&kernfs_rwsem);
+ up_write(&root->kernfs_rwsem);
/*
* Activate the new node unless CREATE_DEACTIVATED is requested.
@@ -767,7 +768,7 @@ int kernfs_add_one(struct kernfs_node *kn)
return 0;
out_unlock:
- up_write(&kernfs_rwsem);
+ up_write(&root->kernfs_rwsem);
return ret;
}
@@ -788,7 +789,7 @@ static struct kernfs_node *kernfs_find_ns(struct kernfs_node *parent,
bool has_ns = kernfs_ns_enabled(parent);
unsigned int hash;
- lockdep_assert_held(&kernfs_rwsem);
+ lockdep_assert_held(&kernfs_root(parent)->kernfs_rwsem);
if (has_ns != (bool)ns) {
WARN(1, KERN_WARNING "kernfs: ns %s in '%s' for '%s'\n",
@@ -820,7 +821,7 @@ static struct kernfs_node *kernfs_walk_ns(struct kernfs_node *parent,
size_t len;
char *p, *name;
- lockdep_assert_held_read(&kernfs_rwsem);
+ lockdep_assert_held_read(&kernfs_root(parent)->kernfs_rwsem);
/* grab kernfs_rename_lock to piggy back on kernfs_pr_cont_buf */
spin_lock_irq(&kernfs_rename_lock);
@@ -859,11 +860,12 @@ struct kernfs_node *kernfs_find_and_get_ns(struct kernfs_node *parent,
const char *name, const void *ns)
{
struct kernfs_node *kn;
+ struct kernfs_root *root = kernfs_root(parent);
- down_read(&kernfs_rwsem);
+ down_read(&root->kernfs_rwsem);
kn = kernfs_find_ns(parent, name, ns);
kernfs_get(kn);
- up_read(&kernfs_rwsem);
+ up_read(&root->kernfs_rwsem);
return kn;
}
@@ -883,11 +885,12 @@ struct kernfs_node *kernfs_walk_and_get_ns(struct kernfs_node *parent,
const char *path, const void *ns)
{
struct kernfs_node *kn;
+ struct kernfs_root *root = kernfs_root(parent);
- down_read(&kernfs_rwsem);
+ down_read(&root->kernfs_rwsem);
kn = kernfs_walk_ns(parent, path, ns);
kernfs_get(kn);
- up_read(&kernfs_rwsem);
+ up_read(&root->kernfs_rwsem);
return kn;
}
@@ -912,6 +915,7 @@ struct kernfs_root *kernfs_create_root(struct kernfs_syscall_ops *scops,
return ERR_PTR(-ENOMEM);
idr_init(&root->ino_idr);
+ init_rwsem(&root->kernfs_rwsem);
INIT_LIST_HEAD(&root->supers);
/*
@@ -957,7 +961,13 @@ struct kernfs_root *kernfs_create_root(struct kernfs_syscall_ops *scops,
*/
void kernfs_destroy_root(struct kernfs_root *root)
{
- kernfs_remove(root->kn); /* will also free @root */
+ /*
+ * kernfs_remove holds kernfs_rwsem from the root so the root
+ * shouldn't be freed during the operation.
+ */
+ kernfs_get(root->kn);
+ kernfs_remove(root->kn);
+ kernfs_put(root->kn); /* will also free @root */
}
/**
@@ -1035,6 +1045,7 @@ struct kernfs_node *kernfs_create_empty_dir(struct kernfs_node *parent,
static int kernfs_dop_revalidate(struct dentry *dentry, unsigned int flags)
{
struct kernfs_node *kn;
+ struct kernfs_root *root;
if (flags & LOOKUP_RCU)
return -ECHILD;
@@ -1046,18 +1057,19 @@ static int kernfs_dop_revalidate(struct dentry *dentry, unsigned int flags)
/* If the kernfs parent node has changed discard and
* proceed to ->lookup.
*/
- down_read(&kernfs_rwsem);
spin_lock(&dentry->d_lock);
parent = kernfs_dentry_node(dentry->d_parent);
if (parent) {
+ spin_unlock(&dentry->d_lock);
+ root = kernfs_root(parent);
+ down_read(&root->kernfs_rwsem);
if (kernfs_dir_changed(parent, dentry)) {
- spin_unlock(&dentry->d_lock);
- up_read(&kernfs_rwsem);
+ up_read(&root->kernfs_rwsem);
return 0;
}
- }
- spin_unlock(&dentry->d_lock);
- up_read(&kernfs_rwsem);
+ up_read(&root->kernfs_rwsem);
+ } else
+ spin_unlock(&dentry->d_lock);
/* The kernfs parent node hasn't changed, leave the
* dentry negative and return success.
@@ -1066,7 +1078,8 @@ static int kernfs_dop_revalidate(struct dentry *dentry, unsigned int flags)
}
kn = kernfs_dentry_node(dentry);
- down_read(&kernfs_rwsem);
+ root = kernfs_root(kn);
+ down_read(&root->kernfs_rwsem);
/* The kernfs node has been deactivated */
if (!kernfs_active(kn))
@@ -1085,10 +1098,10 @@ static int kernfs_dop_revalidate(struct dentry *dentry, unsigned int flags)
kernfs_info(dentry->d_sb)->ns != kn->ns)
goto out_bad;
- up_read(&kernfs_rwsem);
+ up_read(&root->kernfs_rwsem);
return 1;
out_bad:
- up_read(&kernfs_rwsem);
+ up_read(&root->kernfs_rwsem);
return 0;
}
@@ -1102,10 +1115,12 @@ static struct dentry *kernfs_iop_lookup(struct inode *dir,
{
struct kernfs_node *parent = dir->i_private;
struct kernfs_node *kn;
+ struct kernfs_root *root;
struct inode *inode = NULL;
const void *ns = NULL;
- down_read(&kernfs_rwsem);
+ root = kernfs_root(parent);
+ down_read(&root->kernfs_rwsem);
if (kernfs_ns_enabled(parent))
ns = kernfs_info(dir->i_sb)->ns;
@@ -1116,7 +1131,7 @@ static struct dentry *kernfs_iop_lookup(struct inode *dir,
* create a negative.
*/
if (!kernfs_active(kn)) {
- up_read(&kernfs_rwsem);
+ up_read(&root->kernfs_rwsem);
return NULL;
}
inode = kernfs_get_inode(dir->i_sb, kn);
@@ -1131,7 +1146,7 @@ static struct dentry *kernfs_iop_lookup(struct inode *dir,
*/
if (!IS_ERR(inode))
kernfs_set_rev(parent, dentry);
- up_read(&kernfs_rwsem);
+ up_read(&root->kernfs_rwsem);
/* instantiate and hash (possibly negative) dentry */
return d_splice_alias(inode, dentry);
@@ -1254,7 +1269,7 @@ static struct kernfs_node *kernfs_next_descendant_post(struct kernfs_node *pos,
{
struct rb_node *rbn;
- lockdep_assert_held_write(&kernfs_rwsem);
+ lockdep_assert_held_write(&kernfs_root(root)->kernfs_rwsem);
/* if first iteration, visit leftmost descendant which may be root */
if (!pos)
@@ -1289,8 +1304,9 @@ static struct kernfs_node *kernfs_next_descendant_post(struct kernfs_node *pos,
void kernfs_activate(struct kernfs_node *kn)
{
struct kernfs_node *pos;
+ struct kernfs_root *root = kernfs_root(kn);
- down_write(&kernfs_rwsem);
+ down_write(&root->kernfs_rwsem);
pos = NULL;
while ((pos = kernfs_next_descendant_post(pos, kn))) {
@@ -1304,14 +1320,14 @@ void kernfs_activate(struct kernfs_node *kn)
pos->flags |= KERNFS_ACTIVATED;
}
- up_write(&kernfs_rwsem);
+ up_write(&root->kernfs_rwsem);
}
static void __kernfs_remove(struct kernfs_node *kn)
{
struct kernfs_node *pos;
- lockdep_assert_held_write(&kernfs_rwsem);
+ lockdep_assert_held_write(&kernfs_root(kn)->kernfs_rwsem);
/*
* Short-circuit if non-root @kn has already finished removal.
@@ -1381,9 +1397,11 @@ static void __kernfs_remove(struct kernfs_node *kn)
*/
void kernfs_remove(struct kernfs_node *kn)
{
- down_write(&kernfs_rwsem);
+ struct kernfs_root *root = kernfs_root(kn);
+
+ down_write(&root->kernfs_rwsem);
__kernfs_remove(kn);
- up_write(&kernfs_rwsem);
+ up_write(&root->kernfs_rwsem);
}
/**
@@ -1469,8 +1487,9 @@ void kernfs_unbreak_active_protection(struct kernfs_node *kn)
bool kernfs_remove_self(struct kernfs_node *kn)
{
bool ret;
+ struct kernfs_root *root = kernfs_root(kn);
- down_write(&kernfs_rwsem);
+ down_write(&root->kernfs_rwsem);
kernfs_break_active_protection(kn);
/*
@@ -1498,9 +1517,9 @@ bool kernfs_remove_self(struct kernfs_node *kn)
atomic_read(&kn->active) == KN_DEACTIVATED_BIAS)
break;
- up_write(&kernfs_rwsem);
+ up_write(&root->kernfs_rwsem);
schedule();
- down_write(&kernfs_rwsem);
+ down_write(&root->kernfs_rwsem);
}
finish_wait(waitq, &wait);
WARN_ON_ONCE(!RB_EMPTY_NODE(&kn->rb));
@@ -1513,7 +1532,7 @@ bool kernfs_remove_self(struct kernfs_node *kn)
*/
kernfs_unbreak_active_protection(kn);
- up_write(&kernfs_rwsem);
+ up_write(&root->kernfs_rwsem);
return ret;
}
@@ -1530,6 +1549,7 @@ int kernfs_remove_by_name_ns(struct kernfs_node *parent, const char *name,
const void *ns)
{
struct kernfs_node *kn;
+ struct kernfs_root *root;
if (!parent) {
WARN(1, KERN_WARNING "kernfs: can not remove '%s', no directory\n",
@@ -1537,13 +1557,14 @@ int kernfs_remove_by_name_ns(struct kernfs_node *parent, const char *name,
return -ENOENT;
}
- down_write(&kernfs_rwsem);
+ root = kernfs_root(parent);
+ down_write(&root->kernfs_rwsem);
kn = kernfs_find_ns(parent, name, ns);
if (kn)
__kernfs_remove(kn);
- up_write(&kernfs_rwsem);
+ up_write(&root->kernfs_rwsem);
if (kn)
return 0;
@@ -1562,6 +1583,7 @@ int kernfs_rename_ns(struct kernfs_node *kn, struct kernfs_node *new_parent,
const char *new_name, const void *new_ns)
{
struct kernfs_node *old_parent;
+ struct kernfs_root *root;
const char *old_name = NULL;
int error;
@@ -1569,7 +1591,8 @@ int kernfs_rename_ns(struct kernfs_node *kn, struct kernfs_node *new_parent,
if (!kn->parent)
return -EINVAL;
- down_write(&kernfs_rwsem);
+ root = kernfs_root(kn);
+ down_write(&root->kernfs_rwsem);
error = -ENOENT;
if (!kernfs_active(kn) || !kernfs_active(new_parent) ||
@@ -1623,7 +1646,7 @@ int kernfs_rename_ns(struct kernfs_node *kn, struct kernfs_node *new_parent,
error = 0;
out:
- up_write(&kernfs_rwsem);
+ up_write(&root->kernfs_rwsem);
return error;
}
@@ -1694,11 +1717,14 @@ static int kernfs_fop_readdir(struct file *file, struct dir_context *ctx)
struct dentry *dentry = file->f_path.dentry;
struct kernfs_node *parent = kernfs_dentry_node(dentry);
struct kernfs_node *pos = file->private_data;
+ struct kernfs_root *root;
const void *ns = NULL;
if (!dir_emit_dots(file, ctx))
return 0;
- down_read(&kernfs_rwsem);
+
+ root = kernfs_root(parent);
+ down_read(&root->kernfs_rwsem);
if (kernfs_ns_enabled(parent))
ns = kernfs_info(dentry->d_sb)->ns;
@@ -1715,12 +1741,12 @@ static int kernfs_fop_readdir(struct file *file, struct dir_context *ctx)
file->private_data = pos;
kernfs_get(pos);
- up_read(&kernfs_rwsem);
+ up_read(&root->kernfs_rwsem);
if (!dir_emit(ctx, name, len, ino, type))
return 0;
- down_read(&kernfs_rwsem);
+ down_read(&root->kernfs_rwsem);
}
- up_read(&kernfs_rwsem);
+ up_read(&root->kernfs_rwsem);
file->private_data = NULL;
ctx->pos = INT_MAX;
return 0;
diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c
index 60e2a86c535e..9414a7a60a9f 100644
--- a/fs/kernfs/file.c
+++ b/fs/kernfs/file.c
@@ -847,6 +847,7 @@ static void kernfs_notify_workfn(struct work_struct *work)
{
struct kernfs_node *kn;
struct kernfs_super_info *info;
+ struct kernfs_root *root;
repeat:
/* pop one off the notify_list */
spin_lock_irq(&kernfs_notify_lock);
@@ -859,8 +860,9 @@ repeat:
kn->attr.notify_next = NULL;
spin_unlock_irq(&kernfs_notify_lock);
+ root = kernfs_root(kn);
/* kick fsnotify */
- down_write(&kernfs_rwsem);
+ down_write(&root->kernfs_rwsem);
list_for_each_entry(info, &kernfs_root(kn)->supers, node) {
struct kernfs_node *parent;
@@ -898,7 +900,7 @@ repeat:
iput(inode);
}
- up_write(&kernfs_rwsem);
+ up_write(&root->kernfs_rwsem);
kernfs_put(kn);
goto repeat;
}
diff --git a/fs/kernfs/inode.c b/fs/kernfs/inode.c
index c0eae1725435..3d783d80f5da 100644
--- a/fs/kernfs/inode.c
+++ b/fs/kernfs/inode.c
@@ -99,10 +99,11 @@ int __kernfs_setattr(struct kernfs_node *kn, const struct iattr *iattr)
int kernfs_setattr(struct kernfs_node *kn, const struct iattr *iattr)
{
int ret;
+ struct kernfs_root *root = kernfs_root(kn);
- down_write(&kernfs_rwsem);
+ down_write(&root->kernfs_rwsem);
ret = __kernfs_setattr(kn, iattr);
- up_write(&kernfs_rwsem);
+ up_write(&root->kernfs_rwsem);
return ret;
}
@@ -111,12 +112,14 @@ int kernfs_iop_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
{
struct inode *inode = d_inode(dentry);
struct kernfs_node *kn = inode->i_private;
+ struct kernfs_root *root;
int error;
if (!kn)
return -EINVAL;
- down_write(&kernfs_rwsem);
+ root = kernfs_root(kn);
+ down_write(&root->kernfs_rwsem);
error = setattr_prepare(&init_user_ns, dentry, iattr);
if (error)
goto out;
@@ -129,7 +132,7 @@ int kernfs_iop_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
setattr_copy(&init_user_ns, inode, iattr);
out:
- up_write(&kernfs_rwsem);
+ up_write(&root->kernfs_rwsem);
return error;
}
@@ -184,13 +187,14 @@ int kernfs_iop_getattr(struct user_namespace *mnt_userns,
{
struct inode *inode = d_inode(path->dentry);
struct kernfs_node *kn = inode->i_private;
+ struct kernfs_root *root = kernfs_root(kn);
- down_read(&kernfs_rwsem);
+ down_read(&root->kernfs_rwsem);
spin_lock(&inode->i_lock);
kernfs_refresh_inode(kn, inode);
generic_fillattr(&init_user_ns, inode, stat);
spin_unlock(&inode->i_lock);
- up_read(&kernfs_rwsem);
+ up_read(&root->kernfs_rwsem);
return 0;
}
@@ -274,19 +278,21 @@ int kernfs_iop_permission(struct user_namespace *mnt_userns,
struct inode *inode, int mask)
{
struct kernfs_node *kn;
+ struct kernfs_root *root;
int ret;
if (mask & MAY_NOT_BLOCK)
return -ECHILD;
kn = inode->i_private;
+ root = kernfs_root(kn);
- down_read(&kernfs_rwsem);
+ down_read(&root->kernfs_rwsem);
spin_lock(&inode->i_lock);
kernfs_refresh_inode(kn, inode);
ret = generic_permission(&init_user_ns, inode, mask);
spin_unlock(&inode->i_lock);
- up_read(&kernfs_rwsem);
+ up_read(&root->kernfs_rwsem);
return ret;
}
diff --git a/fs/kernfs/mount.c b/fs/kernfs/mount.c
index f2f909d09f52..cfa79715fc1a 100644
--- a/fs/kernfs/mount.c
+++ b/fs/kernfs/mount.c
@@ -236,6 +236,7 @@ struct dentry *kernfs_node_dentry(struct kernfs_node *kn,
static int kernfs_fill_super(struct super_block *sb, struct kernfs_fs_context *kfc)
{
struct kernfs_super_info *info = kernfs_info(sb);
+ struct kernfs_root *kf_root = kfc->root;
struct inode *inode;
struct dentry *root;
@@ -255,9 +256,9 @@ static int kernfs_fill_super(struct super_block *sb, struct kernfs_fs_context *k
sb->s_shrink.seeks = 0;
/* get root inode, initialize and unlock it */
- down_read(&kernfs_rwsem);
+ down_read(&kf_root->kernfs_rwsem);
inode = kernfs_get_inode(sb, info->root->kn);
- up_read(&kernfs_rwsem);
+ up_read(&kf_root->kernfs_rwsem);
if (!inode) {
pr_debug("kernfs: could not get root inode\n");
return -ENOMEM;
@@ -334,6 +335,7 @@ int kernfs_get_tree(struct fs_context *fc)
if (!sb->s_root) {
struct kernfs_super_info *info = kernfs_info(sb);
+ struct kernfs_root *root = kfc->root;
kfc->new_sb_created = true;
@@ -344,9 +346,9 @@ int kernfs_get_tree(struct fs_context *fc)
}
sb->s_flags |= SB_ACTIVE;
- down_write(&kernfs_rwsem);
+ down_write(&root->kernfs_rwsem);
list_add(&info->node, &info->root->supers);
- up_write(&kernfs_rwsem);
+ up_write(&root->kernfs_rwsem);
}
fc->root = dget(sb->s_root);
@@ -371,10 +373,11 @@ void kernfs_free_fs_context(struct fs_context *fc)
void kernfs_kill_sb(struct super_block *sb)
{
struct kernfs_super_info *info = kernfs_info(sb);
+ struct kernfs_root *root = info->root;
- down_write(&kernfs_rwsem);
+ down_write(&root->kernfs_rwsem);
list_del(&info->node);
- up_write(&kernfs_rwsem);
+ up_write(&root->kernfs_rwsem);
/*
* Remove the superblock from fs_supers/s_instances
diff --git a/fs/kernfs/symlink.c b/fs/kernfs/symlink.c
index 19a6c71c6ff5..0ab13824822f 100644
--- a/fs/kernfs/symlink.c
+++ b/fs/kernfs/symlink.c
@@ -113,11 +113,12 @@ static int kernfs_getlink(struct inode *inode, char *path)
struct kernfs_node *kn = inode->i_private;
struct kernfs_node *parent = kn->parent;
struct kernfs_node *target = kn->symlink.target_kn;
+ struct kernfs_root *root = kernfs_root(parent);
int error;
- down_read(&kernfs_rwsem);
+ down_read(&root->kernfs_rwsem);
error = kernfs_get_target_path(parent, target, path);
- up_read(&kernfs_rwsem);
+ up_read(&root->kernfs_rwsem);
return error;
}
diff --git a/fs/ksmbd/asn1.c b/fs/ksmbd/asn1.c
index b014f4638610..c03eba090368 100644
--- a/fs/ksmbd/asn1.c
+++ b/fs/ksmbd/asn1.c
@@ -21,101 +21,11 @@
#include "ksmbd_spnego_negtokeninit.asn1.h"
#include "ksmbd_spnego_negtokentarg.asn1.h"
-#define SPNEGO_OID_LEN 7
#define NTLMSSP_OID_LEN 10
-#define KRB5_OID_LEN 7
-#define KRB5U2U_OID_LEN 8
-#define MSKRB5_OID_LEN 7
-static unsigned long SPNEGO_OID[7] = { 1, 3, 6, 1, 5, 5, 2 };
-static unsigned long NTLMSSP_OID[10] = { 1, 3, 6, 1, 4, 1, 311, 2, 2, 10 };
-static unsigned long KRB5_OID[7] = { 1, 2, 840, 113554, 1, 2, 2 };
-static unsigned long KRB5U2U_OID[8] = { 1, 2, 840, 113554, 1, 2, 2, 3 };
-static unsigned long MSKRB5_OID[7] = { 1, 2, 840, 48018, 1, 2, 2 };
static char NTLMSSP_OID_STR[NTLMSSP_OID_LEN] = { 0x2b, 0x06, 0x01, 0x04, 0x01,
0x82, 0x37, 0x02, 0x02, 0x0a };
-static bool
-asn1_subid_decode(const unsigned char **begin, const unsigned char *end,
- unsigned long *subid)
-{
- const unsigned char *ptr = *begin;
- unsigned char ch;
-
- *subid = 0;
-
- do {
- if (ptr >= end)
- return false;
-
- ch = *ptr++;
- *subid <<= 7;
- *subid |= ch & 0x7F;
- } while ((ch & 0x80) == 0x80);
-
- *begin = ptr;
- return true;
-}
-
-static bool asn1_oid_decode(const unsigned char *value, size_t vlen,
- unsigned long **oid, size_t *oidlen)
-{
- const unsigned char *iptr = value, *end = value + vlen;
- unsigned long *optr;
- unsigned long subid;
-
- vlen += 1;
- if (vlen < 2 || vlen > UINT_MAX / sizeof(unsigned long))
- goto fail_nullify;
-
- *oid = kmalloc(vlen * sizeof(unsigned long), GFP_KERNEL);
- if (!*oid)
- return false;
-
- optr = *oid;
-
- if (!asn1_subid_decode(&iptr, end, &subid))
- goto fail;
-
- if (subid < 40) {
- optr[0] = 0;
- optr[1] = subid;
- } else if (subid < 80) {
- optr[0] = 1;
- optr[1] = subid - 40;
- } else {
- optr[0] = 2;
- optr[1] = subid - 80;
- }
-
- *oidlen = 2;
- optr += 2;
-
- while (iptr < end) {
- if (++(*oidlen) > vlen)
- goto fail;
-
- if (!asn1_subid_decode(&iptr, end, optr++))
- goto fail;
- }
- return true;
-
-fail:
- kfree(*oid);
-fail_nullify:
- *oid = NULL;
- return false;
-}
-
-static bool oid_eq(unsigned long *oid1, unsigned int oid1len,
- unsigned long *oid2, unsigned int oid2len)
-{
- if (oid1len != oid2len)
- return false;
-
- return memcmp(oid1, oid2, oid1len) == 0;
-}
-
int
ksmbd_decode_negTokenInit(unsigned char *security_blob, int length,
struct ksmbd_conn *conn)
@@ -252,26 +162,18 @@ int build_spnego_ntlmssp_auth_blob(unsigned char **pbuffer, u16 *buflen,
int ksmbd_gssapi_this_mech(void *context, size_t hdrlen, unsigned char tag,
const void *value, size_t vlen)
{
- unsigned long *oid;
- size_t oidlen;
- int err = 0;
-
- if (!asn1_oid_decode(value, vlen, &oid, &oidlen)) {
- err = -EBADMSG;
- goto out;
- }
+ enum OID oid;
- if (!oid_eq(oid, oidlen, SPNEGO_OID, SPNEGO_OID_LEN))
- err = -EBADMSG;
- kfree(oid);
-out:
- if (err) {
+ oid = look_up_OID(value, vlen);
+ if (oid != OID_spnego) {
char buf[50];
sprint_oid(value, vlen, buf, sizeof(buf));
ksmbd_debug(AUTH, "Unexpected OID: %s\n", buf);
+ return -EBADMSG;
}
- return err;
+
+ return 0;
}
int ksmbd_neg_token_init_mech_type(void *context, size_t hdrlen,
@@ -279,37 +181,31 @@ int ksmbd_neg_token_init_mech_type(void *context, size_t hdrlen,
size_t vlen)
{
struct ksmbd_conn *conn = context;
- unsigned long *oid;
- size_t oidlen;
+ enum OID oid;
int mech_type;
- char buf[50];
- if (!asn1_oid_decode(value, vlen, &oid, &oidlen))
- goto fail;
-
- if (oid_eq(oid, oidlen, NTLMSSP_OID, NTLMSSP_OID_LEN))
+ oid = look_up_OID(value, vlen);
+ if (oid == OID_ntlmssp) {
mech_type = KSMBD_AUTH_NTLMSSP;
- else if (oid_eq(oid, oidlen, MSKRB5_OID, MSKRB5_OID_LEN))
+ } else if (oid == OID_mskrb5) {
mech_type = KSMBD_AUTH_MSKRB5;
- else if (oid_eq(oid, oidlen, KRB5_OID, KRB5_OID_LEN))
+ } else if (oid == OID_krb5) {
mech_type = KSMBD_AUTH_KRB5;
- else if (oid_eq(oid, oidlen, KRB5U2U_OID, KRB5U2U_OID_LEN))
+ } else if (oid == OID_krb5u2u) {
mech_type = KSMBD_AUTH_KRB5U2U;
- else
- goto fail;
+ } else {
+ char buf[50];
+
+ sprint_oid(value, vlen, buf, sizeof(buf));
+ ksmbd_debug(AUTH, "Unexpected OID: %s\n", buf);
+ return -EBADMSG;
+ }
conn->auth_mechs |= mech_type;
if (conn->preferred_auth_mech == 0)
conn->preferred_auth_mech = mech_type;
- kfree(oid);
return 0;
-
-fail:
- kfree(oid);
- sprint_oid(value, vlen, buf, sizeof(buf));
- ksmbd_debug(AUTH, "Unexpected OID: %s\n", buf);
- return -EBADMSG;
}
int ksmbd_neg_token_init_mech_token(void *context, size_t hdrlen,
diff --git a/fs/ksmbd/auth.c b/fs/ksmbd/auth.c
index 3503b1c48cb4..911444d21267 100644
--- a/fs/ksmbd/auth.c
+++ b/fs/ksmbd/auth.c
@@ -29,6 +29,7 @@
#include "mgmt/user_config.h"
#include "crypto_ctx.h"
#include "transport_ipc.h"
+#include "../smbfs_common/arc4.h"
/*
* Fixed format data defining GSS header and fixed string
@@ -215,7 +216,7 @@ out:
* Return: 0 on success, error number on error
*/
int ksmbd_auth_ntlmv2(struct ksmbd_session *sess, struct ntlmv2_resp *ntlmv2,
- int blen, char *domain_name)
+ int blen, char *domain_name, char *cryptkey)
{
char ntlmv2_hash[CIFS_ENCPWD_SIZE];
char ntlmv2_rsp[CIFS_HMAC_MD5_HASH_SIZE];
@@ -256,7 +257,7 @@ int ksmbd_auth_ntlmv2(struct ksmbd_session *sess, struct ntlmv2_resp *ntlmv2,
goto out;
}
- memcpy(construct, sess->ntlmssp.cryptkey, CIFS_CRYPTO_KEY_SIZE);
+ memcpy(construct, cryptkey, CIFS_CRYPTO_KEY_SIZE);
memcpy(construct + CIFS_CRYPTO_KEY_SIZE, &ntlmv2->blob_signature, blen);
rc = crypto_shash_update(CRYPTO_HMACMD5(ctx), construct, len);
@@ -295,7 +296,8 @@ out:
* Return: 0 on success, error number on error
*/
int ksmbd_decode_ntlmssp_auth_blob(struct authenticate_message *authblob,
- int blob_len, struct ksmbd_session *sess)
+ int blob_len, struct ksmbd_conn *conn,
+ struct ksmbd_session *sess)
{
char *domain_name;
unsigned int nt_off, dn_off;
@@ -324,7 +326,7 @@ int ksmbd_decode_ntlmssp_auth_blob(struct authenticate_message *authblob,
/* TODO : use domain name that imported from configuration file */
domain_name = smb_strndup_from_utf16((const char *)authblob + dn_off,
- dn_len, true, sess->conn->local_nls);
+ dn_len, true, conn->local_nls);
if (IS_ERR(domain_name))
return PTR_ERR(domain_name);
@@ -333,8 +335,31 @@ int ksmbd_decode_ntlmssp_auth_blob(struct authenticate_message *authblob,
domain_name);
ret = ksmbd_auth_ntlmv2(sess, (struct ntlmv2_resp *)((char *)authblob + nt_off),
nt_len - CIFS_ENCPWD_SIZE,
- domain_name);
+ domain_name, conn->ntlmssp.cryptkey);
kfree(domain_name);
+
+ /* The recovered secondary session key */
+ if (conn->ntlmssp.client_flags & NTLMSSP_NEGOTIATE_KEY_XCH) {
+ struct arc4_ctx *ctx_arc4;
+ unsigned int sess_key_off, sess_key_len;
+
+ sess_key_off = le32_to_cpu(authblob->SessionKey.BufferOffset);
+ sess_key_len = le16_to_cpu(authblob->SessionKey.Length);
+
+ if (blob_len < (u64)sess_key_off + sess_key_len)
+ return -EINVAL;
+
+ ctx_arc4 = kmalloc(sizeof(*ctx_arc4), GFP_KERNEL);
+ if (!ctx_arc4)
+ return -ENOMEM;
+
+ cifs_arc4_setkey(ctx_arc4, sess->sess_key,
+ SMB2_NTLMV2_SESSKEY_SIZE);
+ cifs_arc4_crypt(ctx_arc4, sess->sess_key,
+ (char *)authblob + sess_key_off, sess_key_len);
+ kfree_sensitive(ctx_arc4);
+ }
+
return ret;
}
@@ -347,7 +372,7 @@ int ksmbd_decode_ntlmssp_auth_blob(struct authenticate_message *authblob,
*
*/
int ksmbd_decode_ntlmssp_neg_blob(struct negotiate_message *negblob,
- int blob_len, struct ksmbd_session *sess)
+ int blob_len, struct ksmbd_conn *conn)
{
if (blob_len < sizeof(struct negotiate_message)) {
ksmbd_debug(AUTH, "negotiate blob len %d too small\n",
@@ -361,7 +386,7 @@ int ksmbd_decode_ntlmssp_neg_blob(struct negotiate_message *negblob,
return -EINVAL;
}
- sess->ntlmssp.client_flags = le32_to_cpu(negblob->NegotiateFlags);
+ conn->ntlmssp.client_flags = le32_to_cpu(negblob->NegotiateFlags);
return 0;
}
@@ -375,14 +400,14 @@ int ksmbd_decode_ntlmssp_neg_blob(struct negotiate_message *negblob,
*/
unsigned int
ksmbd_build_ntlmssp_challenge_blob(struct challenge_message *chgblob,
- struct ksmbd_session *sess)
+ struct ksmbd_conn *conn)
{
struct target_info *tinfo;
wchar_t *name;
__u8 *target_name;
unsigned int flags, blob_off, blob_len, type, target_info_len = 0;
int len, uni_len, conv_len;
- int cflags = sess->ntlmssp.client_flags;
+ int cflags = conn->ntlmssp.client_flags;
memcpy(chgblob->Signature, NTLMSSP_SIGNATURE, 8);
chgblob->MessageType = NtLmChallenge;
@@ -403,10 +428,13 @@ ksmbd_build_ntlmssp_challenge_blob(struct challenge_message *chgblob,
if (cflags & NTLMSSP_REQUEST_TARGET)
flags |= NTLMSSP_REQUEST_TARGET;
- if (sess->conn->use_spnego &&
+ if (conn->use_spnego &&
(cflags & NTLMSSP_NEGOTIATE_EXTENDED_SEC))
flags |= NTLMSSP_NEGOTIATE_EXTENDED_SEC;
+ if (cflags & NTLMSSP_NEGOTIATE_KEY_XCH)
+ flags |= NTLMSSP_NEGOTIATE_KEY_XCH;
+
chgblob->NegotiateFlags = cpu_to_le32(flags);
len = strlen(ksmbd_netbios_name());
name = kmalloc(2 + UNICODE_LEN(len), GFP_KERNEL);
@@ -414,7 +442,7 @@ ksmbd_build_ntlmssp_challenge_blob(struct challenge_message *chgblob,
return -ENOMEM;
conv_len = smb_strtoUTF16((__le16 *)name, ksmbd_netbios_name(), len,
- sess->conn->local_nls);
+ conn->local_nls);
if (conv_len < 0 || conv_len > len) {
kfree(name);
return -EINVAL;
@@ -430,8 +458,8 @@ ksmbd_build_ntlmssp_challenge_blob(struct challenge_message *chgblob,
chgblob->TargetName.BufferOffset = cpu_to_le32(blob_off);
/* Initialize random conn challenge */
- get_random_bytes(sess->ntlmssp.cryptkey, sizeof(__u64));
- memcpy(chgblob->Challenge, sess->ntlmssp.cryptkey,
+ get_random_bytes(conn->ntlmssp.cryptkey, sizeof(__u64));
+ memcpy(chgblob->Challenge, conn->ntlmssp.cryptkey,
CIFS_CRYPTO_KEY_SIZE);
/* Add Target Information to security buffer */
diff --git a/fs/ksmbd/auth.h b/fs/ksmbd/auth.h
index 9c2d4badd05d..95629651cf26 100644
--- a/fs/ksmbd/auth.h
+++ b/fs/ksmbd/auth.h
@@ -38,16 +38,16 @@ struct kvec;
int ksmbd_crypt_message(struct ksmbd_conn *conn, struct kvec *iov,
unsigned int nvec, int enc);
void ksmbd_copy_gss_neg_header(void *buf);
-int ksmbd_auth_ntlm(struct ksmbd_session *sess, char *pw_buf);
int ksmbd_auth_ntlmv2(struct ksmbd_session *sess, struct ntlmv2_resp *ntlmv2,
- int blen, char *domain_name);
+ int blen, char *domain_name, char *cryptkey);
int ksmbd_decode_ntlmssp_auth_blob(struct authenticate_message *authblob,
- int blob_len, struct ksmbd_session *sess);
+ int blob_len, struct ksmbd_conn *conn,
+ struct ksmbd_session *sess);
int ksmbd_decode_ntlmssp_neg_blob(struct negotiate_message *negblob,
- int blob_len, struct ksmbd_session *sess);
+ int blob_len, struct ksmbd_conn *conn);
unsigned int
ksmbd_build_ntlmssp_challenge_blob(struct challenge_message *chgblob,
- struct ksmbd_session *sess);
+ struct ksmbd_conn *conn);
int ksmbd_krb5_authenticate(struct ksmbd_session *sess, char *in_blob,
int in_len, char *out_blob, int *out_len);
int ksmbd_sign_smb2_pdu(struct ksmbd_conn *conn, char *key, struct kvec *iov,
diff --git a/fs/ksmbd/connection.c b/fs/ksmbd/connection.c
index 83a94d0bb480..208d2cff7bd3 100644
--- a/fs/ksmbd/connection.c
+++ b/fs/ksmbd/connection.c
@@ -62,6 +62,7 @@ struct ksmbd_conn *ksmbd_conn_alloc(void)
atomic_set(&conn->req_running, 0);
atomic_set(&conn->r_count, 0);
conn->total_credits = 1;
+ conn->outstanding_credits = 1;
init_waitqueue_head(&conn->req_running_q);
INIT_LIST_HEAD(&conn->conns_list);
@@ -386,17 +387,24 @@ out:
static void stop_sessions(void)
{
struct ksmbd_conn *conn;
+ struct ksmbd_transport *t;
again:
read_lock(&conn_list_lock);
list_for_each_entry(conn, &conn_list, conns_list) {
struct task_struct *task;
- task = conn->transport->handler;
+ t = conn->transport;
+ task = t->handler;
if (task)
ksmbd_debug(CONN, "Stop session handler %s/%d\n",
task->comm, task_pid_nr(task));
conn->status = KSMBD_SESS_EXITING;
+ if (t->ops->shutdown) {
+ read_unlock(&conn_list_lock);
+ t->ops->shutdown(t);
+ read_lock(&conn_list_lock);
+ }
}
read_unlock(&conn_list_lock);
diff --git a/fs/ksmbd/connection.h b/fs/ksmbd/connection.h
index e5403c587a58..7a59aacb5daa 100644
--- a/fs/ksmbd/connection.h
+++ b/fs/ksmbd/connection.h
@@ -61,8 +61,8 @@ struct ksmbd_conn {
atomic_t req_running;
/* References which are made for this Server object*/
atomic_t r_count;
- unsigned short total_credits;
- unsigned short max_credits;
+ unsigned int total_credits;
+ unsigned int outstanding_credits;
spinlock_t credits_lock;
wait_queue_head_t req_running_q;
/* Lock to protect requests list*/
@@ -72,12 +72,7 @@ struct ksmbd_conn {
int connection_type;
struct ksmbd_stats stats;
char ClientGUID[SMB2_CLIENT_GUID_SIZE];
- union {
- /* pending trans request table */
- struct trans_state *recent_trans;
- /* Used by ntlmssp */
- char *ntlmssp_cryptkey;
- };
+ struct ntlmssp_auth ntlmssp;
spinlock_t llist_lock;
struct list_head lock_list;
@@ -122,6 +117,7 @@ struct ksmbd_conn_ops {
struct ksmbd_transport_ops {
int (*prepare)(struct ksmbd_transport *t);
void (*disconnect)(struct ksmbd_transport *t);
+ void (*shutdown)(struct ksmbd_transport *t);
int (*read)(struct ksmbd_transport *t, char *buf, unsigned int size);
int (*writev)(struct ksmbd_transport *t, struct kvec *iovs, int niov,
int size, bool need_invalidate_rkey,
diff --git a/fs/ksmbd/ksmbd_netlink.h b/fs/ksmbd/ksmbd_netlink.h
index c6718a05d347..71bfb7de4472 100644
--- a/fs/ksmbd/ksmbd_netlink.h
+++ b/fs/ksmbd/ksmbd_netlink.h
@@ -103,6 +103,8 @@ struct ksmbd_startup_request {
* we set the SPARSE_FILES bit (0x40).
*/
__u32 sub_auth[3]; /* Subauth value for Security ID */
+ __u32 smb2_max_credits; /* MAX credits */
+ __u32 reserved[128]; /* Reserved room */
__u32 ifc_list_sz; /* interfaces list size */
__s8 ____payload[];
};
@@ -113,7 +115,7 @@ struct ksmbd_startup_request {
* IPC request to shutdown ksmbd server.
*/
struct ksmbd_shutdown_request {
- __s32 reserved;
+ __s32 reserved[16];
};
/*
@@ -122,6 +124,7 @@ struct ksmbd_shutdown_request {
struct ksmbd_login_request {
__u32 handle;
__s8 account[KSMBD_REQ_MAX_ACCOUNT_NAME_SZ]; /* user account name */
+ __u32 reserved[16]; /* Reserved room */
};
/*
@@ -135,6 +138,7 @@ struct ksmbd_login_response {
__u16 status;
__u16 hash_sz; /* hash size */
__s8 hash[KSMBD_REQ_MAX_HASH_SZ]; /* password hash */
+ __u32 reserved[16]; /* Reserved room */
};
/*
@@ -143,6 +147,7 @@ struct ksmbd_login_response {
struct ksmbd_share_config_request {
__u32 handle;
__s8 share_name[KSMBD_REQ_MAX_SHARE_NAME]; /* share name */
+ __u32 reserved[16]; /* Reserved room */
};
/*
@@ -157,6 +162,7 @@ struct ksmbd_share_config_response {
__u16 force_directory_mode;
__u16 force_uid;
__u16 force_gid;
+ __u32 reserved[128]; /* Reserved room */
__u32 veto_list_sz;
__s8 ____payload[];
};
@@ -187,6 +193,7 @@ struct ksmbd_tree_connect_request {
__s8 account[KSMBD_REQ_MAX_ACCOUNT_NAME_SZ];
__s8 share[KSMBD_REQ_MAX_SHARE_NAME];
__s8 peer_addr[64];
+ __u32 reserved[16]; /* Reserved room */
};
/*
@@ -196,6 +203,7 @@ struct ksmbd_tree_connect_response {
__u32 handle;
__u16 status;
__u16 connection_flags;
+ __u32 reserved[16]; /* Reserved room */
};
/*
@@ -204,6 +212,7 @@ struct ksmbd_tree_connect_response {
struct ksmbd_tree_disconnect_request {
__u64 session_id; /* session id */
__u64 connect_id; /* tree connection id */
+ __u32 reserved[16]; /* Reserved room */
};
/*
@@ -212,6 +221,7 @@ struct ksmbd_tree_disconnect_request {
struct ksmbd_logout_request {
__s8 account[KSMBD_REQ_MAX_ACCOUNT_NAME_SZ]; /* user account name */
__u32 account_flags;
+ __u32 reserved[16]; /* Reserved room */
};
/*
diff --git a/fs/ksmbd/mgmt/user_config.c b/fs/ksmbd/mgmt/user_config.c
index 1019d3677d55..279d00feff21 100644
--- a/fs/ksmbd/mgmt/user_config.c
+++ b/fs/ksmbd/mgmt/user_config.c
@@ -67,3 +67,13 @@ int ksmbd_anonymous_user(struct ksmbd_user *user)
return 1;
return 0;
}
+
+bool ksmbd_compare_user(struct ksmbd_user *u1, struct ksmbd_user *u2)
+{
+ if (strcmp(u1->name, u2->name))
+ return false;
+ if (memcmp(u1->passkey, u2->passkey, u1->passkey_sz))
+ return false;
+
+ return true;
+}
diff --git a/fs/ksmbd/mgmt/user_config.h b/fs/ksmbd/mgmt/user_config.h
index aff80b029579..6a44109617f1 100644
--- a/fs/ksmbd/mgmt/user_config.h
+++ b/fs/ksmbd/mgmt/user_config.h
@@ -64,4 +64,5 @@ struct ksmbd_user *ksmbd_login_user(const char *account);
struct ksmbd_user *ksmbd_alloc_user(struct ksmbd_login_response *resp);
void ksmbd_free_user(struct ksmbd_user *user);
int ksmbd_anonymous_user(struct ksmbd_user *user);
+bool ksmbd_compare_user(struct ksmbd_user *u1, struct ksmbd_user *u2);
#endif /* __USER_CONFIG_MANAGEMENT_H__ */
diff --git a/fs/ksmbd/mgmt/user_session.h b/fs/ksmbd/mgmt/user_session.h
index 82289c3cbd2b..e241f16a3851 100644
--- a/fs/ksmbd/mgmt/user_session.h
+++ b/fs/ksmbd/mgmt/user_session.h
@@ -45,7 +45,6 @@ struct ksmbd_session {
int state;
__u8 *Preauth_HashValue;
- struct ntlmssp_auth ntlmssp;
char sess_key[CIFS_KEY_SIZE];
struct hlist_node hlist;
diff --git a/fs/ksmbd/smb2misc.c b/fs/ksmbd/smb2misc.c
index 50d0b1022289..4a9460153b59 100644
--- a/fs/ksmbd/smb2misc.c
+++ b/fs/ksmbd/smb2misc.c
@@ -289,7 +289,7 @@ static int smb2_validate_credit_charge(struct ksmbd_conn *conn,
unsigned int req_len = 0, expect_resp_len = 0, calc_credit_num, max_len;
unsigned short credit_charge = le16_to_cpu(hdr->CreditCharge);
void *__hdr = hdr;
- int ret;
+ int ret = 0;
switch (hdr->Command) {
case SMB2_QUERY_INFO:
@@ -326,21 +326,27 @@ static int smb2_validate_credit_charge(struct ksmbd_conn *conn,
ksmbd_debug(SMB, "Insufficient credit charge, given: %d, needed: %d\n",
credit_charge, calc_credit_num);
return 1;
- } else if (credit_charge > conn->max_credits) {
+ } else if (credit_charge > conn->vals->max_credits) {
ksmbd_debug(SMB, "Too large credit charge: %d\n", credit_charge);
return 1;
}
spin_lock(&conn->credits_lock);
- if (credit_charge <= conn->total_credits) {
- conn->total_credits -= credit_charge;
- ret = 0;
- } else {
+ if (credit_charge > conn->total_credits) {
ksmbd_debug(SMB, "Insufficient credits granted, given: %u, granted: %u\n",
credit_charge, conn->total_credits);
ret = 1;
}
+
+ if ((u64)conn->outstanding_credits + credit_charge > conn->vals->max_credits) {
+ ksmbd_debug(SMB, "Limits exceeding the maximum allowable outstanding requests, given : %u, pending : %u\n",
+ credit_charge, conn->outstanding_credits);
+ ret = 1;
+ } else
+ conn->outstanding_credits += credit_charge;
+
spin_unlock(&conn->credits_lock);
+
return ret;
}
diff --git a/fs/ksmbd/smb2ops.c b/fs/ksmbd/smb2ops.c
index 02a44d28bdaf..ab23da2120b9 100644
--- a/fs/ksmbd/smb2ops.c
+++ b/fs/ksmbd/smb2ops.c
@@ -19,6 +19,7 @@ static struct smb_version_values smb21_server_values = {
.max_read_size = SMB21_DEFAULT_IOSIZE,
.max_write_size = SMB21_DEFAULT_IOSIZE,
.max_trans_size = SMB21_DEFAULT_IOSIZE,
+ .max_credits = SMB2_MAX_CREDITS,
.large_lock_type = 0,
.exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE,
.shared_lock_type = SMB2_LOCKFLAG_SHARED,
@@ -44,6 +45,7 @@ static struct smb_version_values smb30_server_values = {
.max_read_size = SMB3_DEFAULT_IOSIZE,
.max_write_size = SMB3_DEFAULT_IOSIZE,
.max_trans_size = SMB3_DEFAULT_TRANS_SIZE,
+ .max_credits = SMB2_MAX_CREDITS,
.large_lock_type = 0,
.exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE,
.shared_lock_type = SMB2_LOCKFLAG_SHARED,
@@ -70,6 +72,7 @@ static struct smb_version_values smb302_server_values = {
.max_read_size = SMB3_DEFAULT_IOSIZE,
.max_write_size = SMB3_DEFAULT_IOSIZE,
.max_trans_size = SMB3_DEFAULT_TRANS_SIZE,
+ .max_credits = SMB2_MAX_CREDITS,
.large_lock_type = 0,
.exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE,
.shared_lock_type = SMB2_LOCKFLAG_SHARED,
@@ -96,6 +99,7 @@ static struct smb_version_values smb311_server_values = {
.max_read_size = SMB3_DEFAULT_IOSIZE,
.max_write_size = SMB3_DEFAULT_IOSIZE,
.max_trans_size = SMB3_DEFAULT_TRANS_SIZE,
+ .max_credits = SMB2_MAX_CREDITS,
.large_lock_type = 0,
.exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE,
.shared_lock_type = SMB2_LOCKFLAG_SHARED,
@@ -197,7 +201,6 @@ void init_smb2_1_server(struct ksmbd_conn *conn)
conn->ops = &smb2_0_server_ops;
conn->cmds = smb2_0_server_cmds;
conn->max_cmds = ARRAY_SIZE(smb2_0_server_cmds);
- conn->max_credits = SMB2_MAX_CREDITS;
conn->signing_algorithm = SIGNING_ALG_HMAC_SHA256_LE;
if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_LEASES)
@@ -215,7 +218,6 @@ void init_smb3_0_server(struct ksmbd_conn *conn)
conn->ops = &smb3_0_server_ops;
conn->cmds = smb2_0_server_cmds;
conn->max_cmds = ARRAY_SIZE(smb2_0_server_cmds);
- conn->max_credits = SMB2_MAX_CREDITS;
conn->signing_algorithm = SIGNING_ALG_AES_CMAC_LE;
if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_LEASES)
@@ -240,7 +242,6 @@ void init_smb3_02_server(struct ksmbd_conn *conn)
conn->ops = &smb3_0_server_ops;
conn->cmds = smb2_0_server_cmds;
conn->max_cmds = ARRAY_SIZE(smb2_0_server_cmds);
- conn->max_credits = SMB2_MAX_CREDITS;
conn->signing_algorithm = SIGNING_ALG_AES_CMAC_LE;
if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_LEASES)
@@ -265,7 +266,6 @@ int init_smb3_11_server(struct ksmbd_conn *conn)
conn->ops = &smb3_11_server_ops;
conn->cmds = smb2_0_server_cmds;
conn->max_cmds = ARRAY_SIZE(smb2_0_server_cmds);
- conn->max_credits = SMB2_MAX_CREDITS;
conn->signing_algorithm = SIGNING_ALG_AES_CMAC_LE;
if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_LEASES)
@@ -304,3 +304,11 @@ void init_smb2_max_trans_size(unsigned int sz)
smb302_server_values.max_trans_size = sz;
smb311_server_values.max_trans_size = sz;
}
+
+void init_smb2_max_credits(unsigned int sz)
+{
+ smb21_server_values.max_credits = sz;
+ smb30_server_values.max_credits = sz;
+ smb302_server_values.max_credits = sz;
+ smb311_server_values.max_credits = sz;
+}
diff --git a/fs/ksmbd/smb2pdu.c b/fs/ksmbd/smb2pdu.c
index b8b3a4c28b74..67e8e28e3fc3 100644
--- a/fs/ksmbd/smb2pdu.c
+++ b/fs/ksmbd/smb2pdu.c
@@ -299,16 +299,15 @@ int smb2_set_rsp_credits(struct ksmbd_work *work)
struct smb2_hdr *req_hdr = ksmbd_req_buf_next(work);
struct smb2_hdr *hdr = ksmbd_resp_buf_next(work);
struct ksmbd_conn *conn = work->conn;
- unsigned short credits_requested;
+ unsigned short credits_requested, aux_max;
unsigned short credit_charge, credits_granted = 0;
- unsigned short aux_max, aux_credits;
if (work->send_no_response)
return 0;
hdr->CreditCharge = req_hdr->CreditCharge;
- if (conn->total_credits > conn->max_credits) {
+ if (conn->total_credits > conn->vals->max_credits) {
hdr->CreditRequest = 0;
pr_err("Total credits overflow: %d\n", conn->total_credits);
return -EINVAL;
@@ -316,6 +315,14 @@ int smb2_set_rsp_credits(struct ksmbd_work *work)
credit_charge = max_t(unsigned short,
le16_to_cpu(req_hdr->CreditCharge), 1);
+ if (credit_charge > conn->total_credits) {
+ ksmbd_debug(SMB, "Insufficient credits granted, given: %u, granted: %u\n",
+ credit_charge, conn->total_credits);
+ return -EINVAL;
+ }
+
+ conn->total_credits -= credit_charge;
+ conn->outstanding_credits -= credit_charge;
credits_requested = max_t(unsigned short,
le16_to_cpu(req_hdr->CreditRequest), 1);
@@ -325,16 +332,14 @@ int smb2_set_rsp_credits(struct ksmbd_work *work)
* TODO: Need to adjuct CreditRequest value according to
* current cpu load
*/
- aux_credits = credits_requested - 1;
if (hdr->Command == SMB2_NEGOTIATE)
- aux_max = 0;
+ aux_max = 1;
else
- aux_max = conn->max_credits - credit_charge;
- aux_credits = min_t(unsigned short, aux_credits, aux_max);
- credits_granted = credit_charge + aux_credits;
+ aux_max = conn->vals->max_credits - credit_charge;
+ credits_granted = min_t(unsigned short, credits_requested, aux_max);
- if (conn->max_credits - conn->total_credits < credits_granted)
- credits_granted = conn->max_credits -
+ if (conn->vals->max_credits - conn->total_credits < credits_granted)
+ credits_granted = conn->vals->max_credits -
conn->total_credits;
conn->total_credits += credits_granted;
@@ -610,16 +615,14 @@ static void destroy_previous_session(struct ksmbd_user *user, u64 id)
/**
* smb2_get_name() - get filename string from on the wire smb format
- * @share: ksmbd_share_config pointer
* @src: source buffer
* @maxlen: maxlen of source string
- * @nls_table: nls_table pointer
+ * @local_nls: nls_table pointer
*
* Return: matching converted filename on success, otherwise error ptr
*/
static char *
-smb2_get_name(struct ksmbd_share_config *share, const char *src,
- const int maxlen, struct nls_table *local_nls)
+smb2_get_name(const char *src, const int maxlen, struct nls_table *local_nls)
{
char *name;
@@ -1303,7 +1306,7 @@ static int ntlm_negotiate(struct ksmbd_work *work,
int sz, rc;
ksmbd_debug(SMB, "negotiate phase\n");
- rc = ksmbd_decode_ntlmssp_neg_blob(negblob, negblob_len, work->sess);
+ rc = ksmbd_decode_ntlmssp_neg_blob(negblob, negblob_len, work->conn);
if (rc)
return rc;
@@ -1313,7 +1316,7 @@ static int ntlm_negotiate(struct ksmbd_work *work,
memset(chgblob, 0, sizeof(struct challenge_message));
if (!work->conn->use_spnego) {
- sz = ksmbd_build_ntlmssp_challenge_blob(chgblob, work->sess);
+ sz = ksmbd_build_ntlmssp_challenge_blob(chgblob, work->conn);
if (sz < 0)
return -ENOMEM;
@@ -1329,7 +1332,7 @@ static int ntlm_negotiate(struct ksmbd_work *work,
return -ENOMEM;
chgblob = (struct challenge_message *)neg_blob;
- sz = ksmbd_build_ntlmssp_challenge_blob(chgblob, work->sess);
+ sz = ksmbd_build_ntlmssp_challenge_blob(chgblob, work->conn);
if (sz < 0) {
rc = -ENOMEM;
goto out;
@@ -1450,60 +1453,62 @@ static int ntlm_authenticate(struct ksmbd_work *work)
ksmbd_free_user(user);
return 0;
}
- ksmbd_free_user(sess->user);
- }
- sess->user = user;
- if (user_guest(sess->user)) {
- if (conn->sign) {
- ksmbd_debug(SMB, "Guest login not allowed when signing enabled\n");
+ if (!ksmbd_compare_user(sess->user, user)) {
+ ksmbd_free_user(user);
return -EPERM;
}
+ ksmbd_free_user(user);
+ } else {
+ sess->user = user;
+ }
+ if (user_guest(sess->user)) {
rsp->SessionFlags = SMB2_SESSION_FLAG_IS_GUEST_LE;
} else {
struct authenticate_message *authblob;
authblob = user_authblob(conn, req);
sz = le16_to_cpu(req->SecurityBufferLength);
- rc = ksmbd_decode_ntlmssp_auth_blob(authblob, sz, sess);
+ rc = ksmbd_decode_ntlmssp_auth_blob(authblob, sz, conn, sess);
if (rc) {
set_user_flag(sess->user, KSMBD_USER_FLAG_BAD_PASSWORD);
ksmbd_debug(SMB, "authentication failed\n");
return -EPERM;
}
+ }
- /*
- * If session state is SMB2_SESSION_VALID, We can assume
- * that it is reauthentication. And the user/password
- * has been verified, so return it here.
- */
- if (sess->state == SMB2_SESSION_VALID) {
- if (conn->binding)
- goto binding_session;
- return 0;
- }
+ /*
+ * If session state is SMB2_SESSION_VALID, We can assume
+ * that it is reauthentication. And the user/password
+ * has been verified, so return it here.
+ */
+ if (sess->state == SMB2_SESSION_VALID) {
+ if (conn->binding)
+ goto binding_session;
+ return 0;
+ }
- if ((conn->sign || server_conf.enforced_signing) ||
- (req->SecurityMode & SMB2_NEGOTIATE_SIGNING_REQUIRED))
- sess->sign = true;
+ if ((rsp->SessionFlags != SMB2_SESSION_FLAG_IS_GUEST_LE &&
+ (conn->sign || server_conf.enforced_signing)) ||
+ (req->SecurityMode & SMB2_NEGOTIATE_SIGNING_REQUIRED))
+ sess->sign = true;
- if (smb3_encryption_negotiated(conn) &&
- !(req->Flags & SMB2_SESSION_REQ_FLAG_BINDING)) {
- rc = conn->ops->generate_encryptionkey(sess);
- if (rc) {
- ksmbd_debug(SMB,
- "SMB3 encryption key generation failed\n");
- return -EINVAL;
- }
- sess->enc = true;
- rsp->SessionFlags = SMB2_SESSION_FLAG_ENCRYPT_DATA_LE;
- /*
- * signing is disable if encryption is enable
- * on this session
- */
- sess->sign = false;
+ if (smb3_encryption_negotiated(conn) &&
+ !(req->Flags & SMB2_SESSION_REQ_FLAG_BINDING)) {
+ rc = conn->ops->generate_encryptionkey(sess);
+ if (rc) {
+ ksmbd_debug(SMB,
+ "SMB3 encryption key generation failed\n");
+ return -EINVAL;
}
+ sess->enc = true;
+ rsp->SessionFlags = SMB2_SESSION_FLAG_ENCRYPT_DATA_LE;
+ /*
+ * signing is disable if encryption is enable
+ * on this session
+ */
+ sess->sign = false;
}
binding_session:
@@ -2057,9 +2062,6 @@ int smb2_session_logoff(struct ksmbd_work *work)
ksmbd_debug(SMB, "request\n");
- /* Got a valid session, set connection state */
- WARN_ON(sess->conn != conn);
-
/* setting CifsExiting here may race with start_tcp_sess */
ksmbd_conn_set_need_reconnect(work);
ksmbd_close_session_fds(work);
@@ -2530,8 +2532,7 @@ int smb2_open(struct ksmbd_work *work)
goto err_out1;
}
- name = smb2_get_name(share,
- req->Buffer,
+ name = smb2_get_name(req->Buffer,
le16_to_cpu(req->NameLength),
work->conn->local_nls);
if (IS_ERR(name)) {
@@ -2687,7 +2688,7 @@ int smb2_open(struct ksmbd_work *work)
(struct create_posix *)context;
if (le16_to_cpu(context->DataOffset) +
le32_to_cpu(context->DataLength) <
- sizeof(struct create_posix)) {
+ sizeof(struct create_posix) - 4) {
rc = -EINVAL;
goto err_out1;
}
@@ -3392,7 +3393,6 @@ static int dentry_name(struct ksmbd_dir_info *d_info, int info_level)
* @conn: connection instance
* @info_level: smb information level
* @d_info: structure included variables for query dir
- * @user_ns: user namespace
* @ksmbd_kstat: ksmbd wrapper of dirent stat information
*
* if directory has many entries, find first can't read it fully.
@@ -3422,9 +3422,9 @@ static int smb2_populate_readdir_entry(struct ksmbd_conn *conn, int info_level,
goto free_conv_name;
}
- struct_sz = readdir_info_level_struct_sz(info_level);
- next_entry_offset = ALIGN(struct_sz - 1 + conv_len,
- KSMBD_DIR_INFO_ALIGNMENT);
+ struct_sz = readdir_info_level_struct_sz(info_level) - 1 + conv_len;
+ next_entry_offset = ALIGN(struct_sz, KSMBD_DIR_INFO_ALIGNMENT);
+ d_info->last_entry_off_align = next_entry_offset - struct_sz;
if (next_entry_offset > d_info->out_buf_len) {
d_info->out_buf_len = 0;
@@ -3976,6 +3976,7 @@ int smb2_query_dir(struct ksmbd_work *work)
((struct file_directory_info *)
((char *)rsp->Buffer + d_info.last_entry_offset))
->NextEntryOffset = 0;
+ d_info.data_count -= d_info.last_entry_off_align;
rsp->StructureSize = cpu_to_le16(9);
rsp->OutputBufferOffset = cpu_to_le16(72);
@@ -4018,6 +4019,7 @@ err_out2:
* buffer_check_err() - helper function to check buffer errors
* @reqOutputBufferLength: max buffer length expected in command response
* @rsp: query info response buffer contains output buffer length
+ * @rsp_org: base response buffer pointer in case of chained response
* @infoclass_size: query info class response buffer size
*
* Return: 0 on success, otherwise error
@@ -5398,8 +5400,7 @@ static int smb2_rename(struct ksmbd_work *work,
goto out;
}
- new_name = smb2_get_name(share,
- file_info->FileName,
+ new_name = smb2_get_name(file_info->FileName,
le32_to_cpu(file_info->FileNameLength),
local_nls);
if (IS_ERR(new_name)) {
@@ -5510,8 +5511,7 @@ static int smb2_create_link(struct ksmbd_work *work,
if (!pathname)
return -ENOMEM;
- link_name = smb2_get_name(share,
- file_info->FileName,
+ link_name = smb2_get_name(file_info->FileName,
le32_to_cpu(file_info->FileNameLength),
local_nls);
if (IS_ERR(link_name) || S_ISDIR(file_inode(filp)->i_mode)) {
@@ -5849,7 +5849,7 @@ static int set_file_mode_info(struct ksmbd_file *fp,
* smb2_set_info_file() - handler for smb2 set info command
* @work: smb work containing set info command buffer
* @fp: ksmbd_file pointer
- * @info_class: smb2 set info class
+ * @req: request buffer pointer
* @share: ksmbd_share_config pointer
*
* Return: 0 on success, otherwise error
@@ -6121,25 +6121,46 @@ out:
return err;
}
-static ssize_t smb2_read_rdma_channel(struct ksmbd_work *work,
- struct smb2_read_req *req, void *data_buf,
- size_t length)
+static int smb2_set_remote_key_for_rdma(struct ksmbd_work *work,
+ struct smb2_buffer_desc_v1 *desc,
+ __le32 Channel,
+ __le16 ChannelInfoOffset,
+ __le16 ChannelInfoLength)
{
- struct smb2_buffer_desc_v1 *desc =
- (struct smb2_buffer_desc_v1 *)&req->Buffer[0];
- int err;
+ unsigned int i, ch_count;
if (work->conn->dialect == SMB30_PROT_ID &&
- req->Channel != SMB2_CHANNEL_RDMA_V1)
+ Channel != SMB2_CHANNEL_RDMA_V1)
return -EINVAL;
- if (req->ReadChannelInfoOffset == 0 ||
- le16_to_cpu(req->ReadChannelInfoLength) < sizeof(*desc))
+ ch_count = le16_to_cpu(ChannelInfoLength) / sizeof(*desc);
+ if (ksmbd_debug_types & KSMBD_DEBUG_RDMA) {
+ for (i = 0; i < ch_count; i++) {
+ pr_info("RDMA r/w request %#x: token %#x, length %#x\n",
+ i,
+ le32_to_cpu(desc[i].token),
+ le32_to_cpu(desc[i].length));
+ }
+ }
+ if (ch_count != 1) {
+ ksmbd_debug(RDMA, "RDMA multiple buffer descriptors %d are not supported yet\n",
+ ch_count);
return -EINVAL;
+ }
work->need_invalidate_rkey =
- (req->Channel == SMB2_CHANNEL_RDMA_V1_INVALIDATE);
+ (Channel == SMB2_CHANNEL_RDMA_V1_INVALIDATE);
work->remote_key = le32_to_cpu(desc->token);
+ return 0;
+}
+
+static ssize_t smb2_read_rdma_channel(struct ksmbd_work *work,
+ struct smb2_read_req *req, void *data_buf,
+ size_t length)
+{
+ struct smb2_buffer_desc_v1 *desc =
+ (struct smb2_buffer_desc_v1 *)&req->Buffer[0];
+ int err;
err = ksmbd_conn_rdma_write(work->conn, data_buf, length,
le32_to_cpu(desc->token),
@@ -6162,7 +6183,7 @@ int smb2_read(struct ksmbd_work *work)
struct ksmbd_conn *conn = work->conn;
struct smb2_read_req *req;
struct smb2_read_rsp *rsp;
- struct ksmbd_file *fp;
+ struct ksmbd_file *fp = NULL;
loff_t offset;
size_t length, mincount;
ssize_t nbytes = 0, remain_bytes = 0;
@@ -6176,6 +6197,24 @@ int smb2_read(struct ksmbd_work *work)
return smb2_read_pipe(work);
}
+ if (req->Channel == SMB2_CHANNEL_RDMA_V1_INVALIDATE ||
+ req->Channel == SMB2_CHANNEL_RDMA_V1) {
+ unsigned int ch_offset = le16_to_cpu(req->ReadChannelInfoOffset);
+
+ if (ch_offset < offsetof(struct smb2_read_req, Buffer)) {
+ err = -EINVAL;
+ goto out;
+ }
+ err = smb2_set_remote_key_for_rdma(work,
+ (struct smb2_buffer_desc_v1 *)
+ ((char *)req + ch_offset),
+ req->Channel,
+ req->ReadChannelInfoOffset,
+ req->ReadChannelInfoLength);
+ if (err)
+ goto out;
+ }
+
fp = ksmbd_lookup_fd_slow(work, le64_to_cpu(req->VolatileFileId),
le64_to_cpu(req->PersistentFileId));
if (!fp) {
@@ -6361,21 +6400,6 @@ static ssize_t smb2_write_rdma_channel(struct ksmbd_work *work,
desc = (struct smb2_buffer_desc_v1 *)&req->Buffer[0];
- if (work->conn->dialect == SMB30_PROT_ID &&
- req->Channel != SMB2_CHANNEL_RDMA_V1)
- return -EINVAL;
-
- if (req->Length != 0 || req->DataOffset != 0)
- return -EINVAL;
-
- if (req->WriteChannelInfoOffset == 0 ||
- le16_to_cpu(req->WriteChannelInfoLength) < sizeof(*desc))
- return -EINVAL;
-
- work->need_invalidate_rkey =
- (req->Channel == SMB2_CHANNEL_RDMA_V1_INVALIDATE);
- work->remote_key = le32_to_cpu(desc->token);
-
data_buf = kvmalloc(length, GFP_KERNEL | __GFP_ZERO);
if (!data_buf)
return -ENOMEM;
@@ -6422,6 +6446,25 @@ int smb2_write(struct ksmbd_work *work)
return smb2_write_pipe(work);
}
+ if (req->Channel == SMB2_CHANNEL_RDMA_V1 ||
+ req->Channel == SMB2_CHANNEL_RDMA_V1_INVALIDATE) {
+ unsigned int ch_offset = le16_to_cpu(req->WriteChannelInfoOffset);
+
+ if (req->Length != 0 || req->DataOffset != 0 ||
+ ch_offset < offsetof(struct smb2_write_req, Buffer)) {
+ err = -EINVAL;
+ goto out;
+ }
+ err = smb2_set_remote_key_for_rdma(work,
+ (struct smb2_buffer_desc_v1 *)
+ ((char *)req + ch_offset),
+ req->Channel,
+ req->WriteChannelInfoOffset,
+ req->WriteChannelInfoLength);
+ if (err)
+ goto out;
+ }
+
if (!test_tree_conn_flag(work->tcon, KSMBD_TREE_CONN_FLAG_WRITABLE)) {
ksmbd_debug(SMB, "User does not have write permission\n");
err = -EACCES;
@@ -7243,15 +7286,10 @@ static int fsctl_query_iface_info_ioctl(struct ksmbd_conn *conn,
struct sockaddr_storage_rsp *sockaddr_storage;
unsigned int flags;
unsigned long long speed;
- struct sockaddr_in6 *csin6 = (struct sockaddr_in6 *)&conn->peer_addr;
rtnl_lock();
for_each_netdev(&init_net, netdev) {
- if (out_buf_len <
- nbytes + sizeof(struct network_interface_info_ioctl_rsp)) {
- rtnl_unlock();
- return -ENOSPC;
- }
+ bool ipv4_set = false;
if (netdev->type == ARPHRD_LOOPBACK)
continue;
@@ -7259,12 +7297,20 @@ static int fsctl_query_iface_info_ioctl(struct ksmbd_conn *conn,
flags = dev_get_flags(netdev);
if (!(flags & IFF_RUNNING))
continue;
+ipv6_retry:
+ if (out_buf_len <
+ nbytes + sizeof(struct network_interface_info_ioctl_rsp)) {
+ rtnl_unlock();
+ return -ENOSPC;
+ }
nii_rsp = (struct network_interface_info_ioctl_rsp *)
&rsp->Buffer[nbytes];
nii_rsp->IfIndex = cpu_to_le32(netdev->ifindex);
nii_rsp->Capability = 0;
+ if (netdev->real_num_tx_queues > 1)
+ nii_rsp->Capability |= cpu_to_le32(RSS_CAPABLE);
if (ksmbd_rdma_capable_netdev(netdev))
nii_rsp->Capability |= cpu_to_le32(RDMA_CAPABLE);
@@ -7289,8 +7335,7 @@ static int fsctl_query_iface_info_ioctl(struct ksmbd_conn *conn,
nii_rsp->SockAddr_Storage;
memset(sockaddr_storage, 0, 128);
- if (conn->peer_addr.ss_family == PF_INET ||
- ipv6_addr_v4mapped(&csin6->sin6_addr)) {
+ if (!ipv4_set) {
struct in_device *idev;
sockaddr_storage->Family = cpu_to_le16(INTERNETWORK);
@@ -7301,6 +7346,9 @@ static int fsctl_query_iface_info_ioctl(struct ksmbd_conn *conn,
continue;
sockaddr_storage->addr4.IPv4address =
idev_ipv4_address(idev);
+ nbytes += sizeof(struct network_interface_info_ioctl_rsp);
+ ipv4_set = true;
+ goto ipv6_retry;
} else {
struct inet6_dev *idev6;
struct inet6_ifaddr *ifa;
@@ -7322,9 +7370,8 @@ static int fsctl_query_iface_info_ioctl(struct ksmbd_conn *conn,
break;
}
sockaddr_storage->addr6.ScopeId = 0;
+ nbytes += sizeof(struct network_interface_info_ioctl_rsp);
}
-
- nbytes += sizeof(struct network_interface_info_ioctl_rsp);
}
rtnl_unlock();
diff --git a/fs/ksmbd/smb2pdu.h b/fs/ksmbd/smb2pdu.h
index 4a3e4339d4c4..725b800c29c8 100644
--- a/fs/ksmbd/smb2pdu.h
+++ b/fs/ksmbd/smb2pdu.h
@@ -980,6 +980,7 @@ int init_smb3_11_server(struct ksmbd_conn *conn);
void init_smb2_max_read_size(unsigned int sz);
void init_smb2_max_write_size(unsigned int sz);
void init_smb2_max_trans_size(unsigned int sz);
+void init_smb2_max_credits(unsigned int sz);
bool is_smb2_neg_cmd(struct ksmbd_work *work);
bool is_smb2_rsp(struct ksmbd_work *work);
diff --git a/fs/ksmbd/smb_common.c b/fs/ksmbd/smb_common.c
index ef7f42b0290a..9a7e211dbf4f 100644
--- a/fs/ksmbd/smb_common.c
+++ b/fs/ksmbd/smb_common.c
@@ -308,14 +308,17 @@ int ksmbd_populate_dot_dotdot_entries(struct ksmbd_work *work, int info_level,
for (i = 0; i < 2; i++) {
struct kstat kstat;
struct ksmbd_kstat ksmbd_kstat;
+ struct dentry *dentry;
if (!dir->dot_dotdot[i]) { /* fill dot entry info */
if (i == 0) {
d_info->name = ".";
d_info->name_len = 1;
+ dentry = dir->filp->f_path.dentry;
} else {
d_info->name = "..";
d_info->name_len = 2;
+ dentry = dir->filp->f_path.dentry->d_parent;
}
if (!match_pattern(d_info->name, d_info->name_len,
@@ -327,7 +330,7 @@ int ksmbd_populate_dot_dotdot_entries(struct ksmbd_work *work, int info_level,
ksmbd_kstat.kstat = &kstat;
ksmbd_vfs_fill_dentry_attrs(work,
user_ns,
- dir->filp->f_path.dentry->d_parent,
+ dentry,
&ksmbd_kstat);
rc = fn(conn, info_level, d_info, &ksmbd_kstat);
if (rc)
diff --git a/fs/ksmbd/smb_common.h b/fs/ksmbd/smb_common.h
index 50590842b651..e1369b4345a9 100644
--- a/fs/ksmbd/smb_common.h
+++ b/fs/ksmbd/smb_common.h
@@ -365,6 +365,7 @@ struct smb_version_values {
__u32 max_read_size;
__u32 max_write_size;
__u32 max_trans_size;
+ __u32 max_credits;
__u32 large_lock_type;
__u32 exclusive_lock_type;
__u32 shared_lock_type;
diff --git a/fs/ksmbd/transport_ipc.c b/fs/ksmbd/transport_ipc.c
index 1acf1892a466..3ad6881e0f7e 100644
--- a/fs/ksmbd/transport_ipc.c
+++ b/fs/ksmbd/transport_ipc.c
@@ -301,6 +301,8 @@ static int ipc_server_config_on_startup(struct ksmbd_startup_request *req)
init_smb2_max_write_size(req->smb2_max_write);
if (req->smb2_max_trans)
init_smb2_max_trans_size(req->smb2_max_trans);
+ if (req->smb2_max_credits)
+ init_smb2_max_credits(req->smb2_max_credits);
ret = ksmbd_set_netbios_name(req->netbios_name);
ret |= ksmbd_set_server_string(req->server_string);
diff --git a/fs/ksmbd/transport_rdma.c b/fs/ksmbd/transport_rdma.c
index 7e57cbb0bb35..ba5a22bc2e6d 100644
--- a/fs/ksmbd/transport_rdma.c
+++ b/fs/ksmbd/transport_rdma.c
@@ -34,7 +34,8 @@
#include "smbstatus.h"
#include "transport_rdma.h"
-#define SMB_DIRECT_PORT 5445
+#define SMB_DIRECT_PORT_IWARP 5445
+#define SMB_DIRECT_PORT_INFINIBAND 445
#define SMB_DIRECT_VERSION_LE cpu_to_le16(0x0100)
@@ -60,6 +61,10 @@
* as defined in [MS-SMBD] 3.1.1.1
* Those may change after a SMB_DIRECT negotiation
*/
+
+/* Set 445 port to SMB Direct port by default */
+static int smb_direct_port = SMB_DIRECT_PORT_INFINIBAND;
+
/* The local peer's maximum number of credits to grant to the peer */
static int smb_direct_receive_credit_max = 255;
@@ -75,10 +80,18 @@ static int smb_direct_max_fragmented_recv_size = 1024 * 1024;
/* The maximum single-message size which can be received */
static int smb_direct_max_receive_size = 8192;
-static int smb_direct_max_read_write_size = 1024 * 1024;
+static int smb_direct_max_read_write_size = 524224;
static int smb_direct_max_outstanding_rw_ops = 8;
+static LIST_HEAD(smb_direct_device_list);
+static DEFINE_RWLOCK(smb_direct_device_lock);
+
+struct smb_direct_device {
+ struct ib_device *ib_dev;
+ struct list_head list;
+};
+
static struct smb_direct_listener {
struct rdma_cm_id *cm_id;
} smb_direct_listener;
@@ -415,6 +428,7 @@ static void free_transport(struct smb_direct_transport *t)
if (t->qp) {
ib_drain_qp(t->qp);
+ ib_mr_pool_destroy(t->qp, &t->qp->rdma_mrs);
ib_destroy_qp(t->qp);
}
@@ -555,6 +569,7 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
}
t->negotiation_requested = true;
t->full_packet_received = true;
+ enqueue_reassembly(t, recvmsg, 0);
wake_up_interruptible(&t->wait_status);
break;
case SMB_DIRECT_MSG_DATA_TRANSFER: {
@@ -1438,6 +1453,15 @@ static void smb_direct_disconnect(struct ksmbd_transport *t)
free_transport(st);
}
+static void smb_direct_shutdown(struct ksmbd_transport *t)
+{
+ struct smb_direct_transport *st = smb_trans_direct_transfort(t);
+
+ ksmbd_debug(RDMA, "smb-direct shutdown cm_id=%p\n", st->cm_id);
+
+ smb_direct_disconnect_rdma_work(&st->disconnect_work);
+}
+
static int smb_direct_cm_handler(struct rdma_cm_id *cm_id,
struct rdma_cm_event *event)
{
@@ -1581,19 +1605,13 @@ static int smb_direct_accept_client(struct smb_direct_transport *t)
pr_err("error at rdma_accept: %d\n", ret);
return ret;
}
-
- wait_event_interruptible(t->wait_status,
- t->status != SMB_DIRECT_CS_NEW);
- if (t->status != SMB_DIRECT_CS_CONNECTED)
- return -ENOTCONN;
return 0;
}
-static int smb_direct_negotiate(struct smb_direct_transport *t)
+static int smb_direct_prepare_negotiation(struct smb_direct_transport *t)
{
int ret;
struct smb_direct_recvmsg *recvmsg;
- struct smb_direct_negotiate_req *req;
recvmsg = get_free_recvmsg(t);
if (!recvmsg)
@@ -1603,44 +1621,20 @@ static int smb_direct_negotiate(struct smb_direct_transport *t)
ret = smb_direct_post_recv(t, recvmsg);
if (ret) {
pr_err("Can't post recv: %d\n", ret);
- goto out;
+ goto out_err;
}
t->negotiation_requested = false;
ret = smb_direct_accept_client(t);
if (ret) {
pr_err("Can't accept client\n");
- goto out;
+ goto out_err;
}
smb_direct_post_recv_credits(&t->post_recv_credits_work.work);
-
- ksmbd_debug(RDMA, "Waiting for SMB_DIRECT negotiate request\n");
- ret = wait_event_interruptible_timeout(t->wait_status,
- t->negotiation_requested ||
- t->status == SMB_DIRECT_CS_DISCONNECTED,
- SMB_DIRECT_NEGOTIATE_TIMEOUT * HZ);
- if (ret <= 0 || t->status == SMB_DIRECT_CS_DISCONNECTED) {
- ret = ret < 0 ? ret : -ETIMEDOUT;
- goto out;
- }
-
- ret = smb_direct_check_recvmsg(recvmsg);
- if (ret == -ECONNABORTED)
- goto out;
-
- req = (struct smb_direct_negotiate_req *)recvmsg->packet;
- t->max_recv_size = min_t(int, t->max_recv_size,
- le32_to_cpu(req->preferred_send_size));
- t->max_send_size = min_t(int, t->max_send_size,
- le32_to_cpu(req->max_receive_size));
- t->max_fragmented_send_size =
- le32_to_cpu(req->max_fragmented_size);
-
- ret = smb_direct_send_negotiate_response(t, ret);
-out:
- if (recvmsg)
- put_recvmsg(t, recvmsg);
+ return 0;
+out_err:
+ put_recvmsg(t, recvmsg);
return ret;
}
@@ -1724,7 +1718,9 @@ static int smb_direct_init_params(struct smb_direct_transport *t,
cap->max_send_sge = SMB_DIRECT_MAX_SEND_SGES;
cap->max_recv_sge = SMB_DIRECT_MAX_RECV_SGES;
cap->max_inline_data = 0;
- cap->max_rdma_ctxs = 0;
+ cap->max_rdma_ctxs =
+ rdma_rw_mr_factor(device, t->cm_id->port_num, max_pages) *
+ smb_direct_max_outstanding_rw_ops;
return 0;
}
@@ -1806,6 +1802,7 @@ static int smb_direct_create_qpair(struct smb_direct_transport *t,
{
int ret;
struct ib_qp_init_attr qp_attr;
+ int pages_per_rw;
t->pd = ib_alloc_pd(t->cm_id->device, 0);
if (IS_ERR(t->pd)) {
@@ -1853,6 +1850,23 @@ static int smb_direct_create_qpair(struct smb_direct_transport *t,
t->qp = t->cm_id->qp;
t->cm_id->event_handler = smb_direct_cm_handler;
+ pages_per_rw = DIV_ROUND_UP(t->max_rdma_rw_size, PAGE_SIZE) + 1;
+ if (pages_per_rw > t->cm_id->device->attrs.max_sgl_rd) {
+ int pages_per_mr, mr_count;
+
+ pages_per_mr = min_t(int, pages_per_rw,
+ t->cm_id->device->attrs.max_fast_reg_page_list_len);
+ mr_count = DIV_ROUND_UP(pages_per_rw, pages_per_mr) *
+ atomic_read(&t->rw_avail_ops);
+ ret = ib_mr_pool_init(t->qp, &t->qp->rdma_mrs, mr_count,
+ IB_MR_TYPE_MEM_REG, pages_per_mr, 0);
+ if (ret) {
+ pr_err("failed to init mr pool count %d pages %d\n",
+ mr_count, pages_per_mr);
+ goto err;
+ }
+ }
+
return 0;
err:
if (t->qp) {
@@ -1877,6 +1891,49 @@ err:
static int smb_direct_prepare(struct ksmbd_transport *t)
{
struct smb_direct_transport *st = smb_trans_direct_transfort(t);
+ struct smb_direct_recvmsg *recvmsg;
+ struct smb_direct_negotiate_req *req;
+ int ret;
+
+ ksmbd_debug(RDMA, "Waiting for SMB_DIRECT negotiate request\n");
+ ret = wait_event_interruptible_timeout(st->wait_status,
+ st->negotiation_requested ||
+ st->status == SMB_DIRECT_CS_DISCONNECTED,
+ SMB_DIRECT_NEGOTIATE_TIMEOUT * HZ);
+ if (ret <= 0 || st->status == SMB_DIRECT_CS_DISCONNECTED)
+ return ret < 0 ? ret : -ETIMEDOUT;
+
+ recvmsg = get_first_reassembly(st);
+ if (!recvmsg)
+ return -ECONNABORTED;
+
+ ret = smb_direct_check_recvmsg(recvmsg);
+ if (ret == -ECONNABORTED)
+ goto out;
+
+ req = (struct smb_direct_negotiate_req *)recvmsg->packet;
+ st->max_recv_size = min_t(int, st->max_recv_size,
+ le32_to_cpu(req->preferred_send_size));
+ st->max_send_size = min_t(int, st->max_send_size,
+ le32_to_cpu(req->max_receive_size));
+ st->max_fragmented_send_size =
+ le32_to_cpu(req->max_fragmented_size);
+ st->max_fragmented_recv_size =
+ (st->recv_credit_max * st->max_recv_size) / 2;
+
+ ret = smb_direct_send_negotiate_response(st, ret);
+out:
+ spin_lock_irq(&st->reassembly_queue_lock);
+ st->reassembly_queue_length--;
+ list_del(&recvmsg->list);
+ spin_unlock_irq(&st->reassembly_queue_lock);
+ put_recvmsg(st, recvmsg);
+
+ return ret;
+}
+
+static int smb_direct_connect(struct smb_direct_transport *st)
+{
int ret;
struct ib_qp_cap qp_cap;
@@ -1898,13 +1955,11 @@ static int smb_direct_prepare(struct ksmbd_transport *t)
return ret;
}
- ret = smb_direct_negotiate(st);
+ ret = smb_direct_prepare_negotiation(st);
if (ret) {
pr_err("Can't negotiate: %d\n", ret);
return ret;
}
-
- st->status = SMB_DIRECT_CS_CONNECTED;
return 0;
}
@@ -1920,6 +1975,7 @@ static bool rdma_frwr_is_supported(struct ib_device_attr *attrs)
static int smb_direct_handle_connect_request(struct rdma_cm_id *new_cm_id)
{
struct smb_direct_transport *t;
+ int ret;
if (!rdma_frwr_is_supported(&new_cm_id->device->attrs)) {
ksmbd_debug(RDMA,
@@ -1932,18 +1988,23 @@ static int smb_direct_handle_connect_request(struct rdma_cm_id *new_cm_id)
if (!t)
return -ENOMEM;
+ ret = smb_direct_connect(t);
+ if (ret)
+ goto out_err;
+
KSMBD_TRANS(t)->handler = kthread_run(ksmbd_conn_handler_loop,
KSMBD_TRANS(t)->conn, "ksmbd:r%u",
- SMB_DIRECT_PORT);
+ smb_direct_port);
if (IS_ERR(KSMBD_TRANS(t)->handler)) {
- int ret = PTR_ERR(KSMBD_TRANS(t)->handler);
-
+ ret = PTR_ERR(KSMBD_TRANS(t)->handler);
pr_err("Can't start thread\n");
- free_transport(t);
- return ret;
+ goto out_err;
}
return 0;
+out_err:
+ free_transport(t);
+ return ret;
}
static int smb_direct_listen_handler(struct rdma_cm_id *cm_id,
@@ -2007,12 +2068,65 @@ err:
return ret;
}
+static int smb_direct_ib_client_add(struct ib_device *ib_dev)
+{
+ struct smb_direct_device *smb_dev;
+
+ /* Set 5445 port if device type is iWARP(No IB) */
+ if (ib_dev->node_type != RDMA_NODE_IB_CA)
+ smb_direct_port = SMB_DIRECT_PORT_IWARP;
+
+ if (!ib_dev->ops.get_netdev ||
+ !rdma_frwr_is_supported(&ib_dev->attrs))
+ return 0;
+
+ smb_dev = kzalloc(sizeof(*smb_dev), GFP_KERNEL);
+ if (!smb_dev)
+ return -ENOMEM;
+ smb_dev->ib_dev = ib_dev;
+
+ write_lock(&smb_direct_device_lock);
+ list_add(&smb_dev->list, &smb_direct_device_list);
+ write_unlock(&smb_direct_device_lock);
+
+ ksmbd_debug(RDMA, "ib device added: name %s\n", ib_dev->name);
+ return 0;
+}
+
+static void smb_direct_ib_client_remove(struct ib_device *ib_dev,
+ void *client_data)
+{
+ struct smb_direct_device *smb_dev, *tmp;
+
+ write_lock(&smb_direct_device_lock);
+ list_for_each_entry_safe(smb_dev, tmp, &smb_direct_device_list, list) {
+ if (smb_dev->ib_dev == ib_dev) {
+ list_del(&smb_dev->list);
+ kfree(smb_dev);
+ break;
+ }
+ }
+ write_unlock(&smb_direct_device_lock);
+}
+
+static struct ib_client smb_direct_ib_client = {
+ .name = "ksmbd_smb_direct_ib",
+ .add = smb_direct_ib_client_add,
+ .remove = smb_direct_ib_client_remove,
+};
+
int ksmbd_rdma_init(void)
{
int ret;
smb_direct_listener.cm_id = NULL;
+ ret = ib_register_client(&smb_direct_ib_client);
+ if (ret) {
+ pr_err("failed to ib_register_client\n");
+ return ret;
+ }
+
/* When a client is running out of send credits, the credits are
* granted by the server's sending a packet using this queue.
* This avoids the situation that a clients cannot send packets
@@ -2023,7 +2137,7 @@ int ksmbd_rdma_init(void)
if (!smb_direct_wq)
return -ENOMEM;
- ret = smb_direct_listen(SMB_DIRECT_PORT);
+ ret = smb_direct_listen(smb_direct_port);
if (ret) {
destroy_workqueue(smb_direct_wq);
smb_direct_wq = NULL;
@@ -2036,36 +2150,67 @@ int ksmbd_rdma_init(void)
return 0;
}
-int ksmbd_rdma_destroy(void)
+void ksmbd_rdma_destroy(void)
{
- if (smb_direct_listener.cm_id)
- rdma_destroy_id(smb_direct_listener.cm_id);
+ if (!smb_direct_listener.cm_id)
+ return;
+
+ ib_unregister_client(&smb_direct_ib_client);
+ rdma_destroy_id(smb_direct_listener.cm_id);
+
smb_direct_listener.cm_id = NULL;
if (smb_direct_wq) {
destroy_workqueue(smb_direct_wq);
smb_direct_wq = NULL;
}
- return 0;
}
bool ksmbd_rdma_capable_netdev(struct net_device *netdev)
{
- struct ib_device *ibdev;
+ struct smb_direct_device *smb_dev;
+ int i;
bool rdma_capable = false;
- ibdev = ib_device_get_by_netdev(netdev, RDMA_DRIVER_UNKNOWN);
- if (ibdev) {
- if (rdma_frwr_is_supported(&ibdev->attrs))
- rdma_capable = true;
- ib_device_put(ibdev);
+ read_lock(&smb_direct_device_lock);
+ list_for_each_entry(smb_dev, &smb_direct_device_list, list) {
+ for (i = 0; i < smb_dev->ib_dev->phys_port_cnt; i++) {
+ struct net_device *ndev;
+
+ ndev = smb_dev->ib_dev->ops.get_netdev(smb_dev->ib_dev,
+ i + 1);
+ if (!ndev)
+ continue;
+
+ if (ndev == netdev) {
+ dev_put(ndev);
+ rdma_capable = true;
+ goto out;
+ }
+ dev_put(ndev);
+ }
+ }
+out:
+ read_unlock(&smb_direct_device_lock);
+
+ if (rdma_capable == false) {
+ struct ib_device *ibdev;
+
+ ibdev = ib_device_get_by_netdev(netdev, RDMA_DRIVER_UNKNOWN);
+ if (ibdev) {
+ if (rdma_frwr_is_supported(&ibdev->attrs))
+ rdma_capable = true;
+ ib_device_put(ibdev);
+ }
}
+
return rdma_capable;
}
static struct ksmbd_transport_ops ksmbd_smb_direct_transport_ops = {
.prepare = smb_direct_prepare,
.disconnect = smb_direct_disconnect,
+ .shutdown = smb_direct_shutdown,
.writev = smb_direct_writev,
.read = smb_direct_read,
.rdma_read = smb_direct_rdma_read,
diff --git a/fs/ksmbd/transport_rdma.h b/fs/ksmbd/transport_rdma.h
index 0fa8adc0776f..5567d93a6f96 100644
--- a/fs/ksmbd/transport_rdma.h
+++ b/fs/ksmbd/transport_rdma.h
@@ -7,8 +7,6 @@
#ifndef __KSMBD_TRANSPORT_RDMA_H__
#define __KSMBD_TRANSPORT_RDMA_H__
-#define SMB_DIRECT_PORT 5445
-
/* SMB DIRECT negotiation request packet [MS-SMBD] 2.2.1 */
struct smb_direct_negotiate_req {
__le16 min_version;
@@ -52,7 +50,7 @@ struct smb_direct_data_transfer {
#ifdef CONFIG_SMB_SERVER_SMBDIRECT
int ksmbd_rdma_init(void);
-int ksmbd_rdma_destroy(void);
+void ksmbd_rdma_destroy(void);
bool ksmbd_rdma_capable_netdev(struct net_device *netdev);
#else
static inline int ksmbd_rdma_init(void) { return 0; }
diff --git a/fs/ksmbd/transport_tcp.c b/fs/ksmbd/transport_tcp.c
index c14320e03b69..82a1429bbe12 100644
--- a/fs/ksmbd/transport_tcp.c
+++ b/fs/ksmbd/transport_tcp.c
@@ -404,7 +404,7 @@ static int create_socket(struct interface *iface)
&ksmbd_socket);
if (ret) {
pr_err("Can't create socket for ipv4: %d\n", ret);
- goto out_error;
+ goto out_clear;
}
sin.sin_family = PF_INET;
@@ -462,6 +462,7 @@ static int create_socket(struct interface *iface)
out_error:
tcp_destroy_socket(ksmbd_socket);
+out_clear:
iface->ksmbd_socket = NULL;
return ret;
}
diff --git a/fs/ksmbd/vfs.h b/fs/ksmbd/vfs.h
index adf94a4f22fa..8c37aaf936ab 100644
--- a/fs/ksmbd/vfs.h
+++ b/fs/ksmbd/vfs.h
@@ -47,6 +47,7 @@ struct ksmbd_dir_info {
int last_entry_offset;
bool hide_dot_file;
int flags;
+ int last_entry_off_align;
};
struct ksmbd_readdir_data {
diff --git a/fs/ksmbd/vfs_cache.h b/fs/ksmbd/vfs_cache.h
index 448576fbe4b7..36239ce31afd 100644
--- a/fs/ksmbd/vfs_cache.h
+++ b/fs/ksmbd/vfs_cache.h
@@ -96,16 +96,6 @@ struct ksmbd_file {
int durable_timeout;
- /* for SMB1 */
- int pid;
-
- /* conflict lock fail count for SMB1 */
- unsigned int cflock_cnt;
- /* last lock failure start offset for SMB1 */
- unsigned long long llock_fstart;
-
- int dirent_offset;
-
/* if ls is happening on directory, below is valid*/
struct ksmbd_readdir_data readdir_data;
int dot_dotdot[2];
diff --git a/fs/libfs.c b/fs/libfs.c
index ba7438ab9371..974125270a42 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -1379,7 +1379,7 @@ bool is_empty_dir_inode(struct inode *inode)
(inode->i_op == &empty_dir_inode_operations);
}
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
/*
* Determine if the name of a dentry should be casefolded.
*
@@ -1473,7 +1473,7 @@ static const struct dentry_operations generic_encrypted_dentry_ops = {
};
#endif
-#if defined(CONFIG_FS_ENCRYPTION) && defined(CONFIG_UNICODE)
+#if defined(CONFIG_FS_ENCRYPTION) && IS_ENABLED(CONFIG_UNICODE)
static const struct dentry_operations generic_encrypted_ci_dentry_ops = {
.d_hash = generic_ci_d_hash,
.d_compare = generic_ci_d_compare,
@@ -1508,10 +1508,10 @@ void generic_set_encrypted_ci_d_ops(struct dentry *dentry)
#ifdef CONFIG_FS_ENCRYPTION
bool needs_encrypt_ops = dentry->d_flags & DCACHE_NOKEY_NAME;
#endif
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
bool needs_ci_ops = dentry->d_sb->s_encoding;
#endif
-#if defined(CONFIG_FS_ENCRYPTION) && defined(CONFIG_UNICODE)
+#if defined(CONFIG_FS_ENCRYPTION) && IS_ENABLED(CONFIG_UNICODE)
if (needs_encrypt_ops && needs_ci_ops) {
d_set_d_op(dentry, &generic_encrypted_ci_dentry_ops);
return;
@@ -1523,7 +1523,7 @@ void generic_set_encrypted_ci_d_ops(struct dentry *dentry)
return;
}
#endif
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
if (needs_ci_ops) {
d_set_d_op(dentry, &generic_ci_dentry_ops);
return;
diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
index b220e1b91726..0475c5a5d061 100644
--- a/fs/lockd/svc.c
+++ b/fs/lockd/svc.c
@@ -54,13 +54,9 @@ EXPORT_SYMBOL_GPL(nlmsvc_ops);
static DEFINE_MUTEX(nlmsvc_mutex);
static unsigned int nlmsvc_users;
-static struct task_struct *nlmsvc_task;
-static struct svc_rqst *nlmsvc_rqst;
+static struct svc_serv *nlmsvc_serv;
unsigned long nlmsvc_timeout;
-static atomic_t nlm_ntf_refcnt = ATOMIC_INIT(0);
-static DECLARE_WAIT_QUEUE_HEAD(nlm_ntf_wq);
-
unsigned int lockd_net_id;
/*
@@ -184,7 +180,12 @@ lockd(void *vrqstp)
nlm_shutdown_hosts();
cancel_delayed_work_sync(&ln->grace_period_end);
locks_end_grace(&ln->lockd_manager);
- return 0;
+
+ dprintk("lockd_down: service stopped\n");
+
+ svc_exit_thread(rqstp);
+
+ module_put_and_kthread_exit(0);
}
static int create_lockd_listener(struct svc_serv *serv, const char *name,
@@ -290,8 +291,8 @@ static void lockd_down_net(struct svc_serv *serv, struct net *net)
__func__, net->ns.inum);
}
} else {
- pr_err("%s: no users! task=%p, net=%x\n",
- __func__, nlmsvc_task, net->ns.inum);
+ pr_err("%s: no users! net=%x\n",
+ __func__, net->ns.inum);
BUG();
}
}
@@ -302,20 +303,16 @@ static int lockd_inetaddr_event(struct notifier_block *this,
struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
struct sockaddr_in sin;
- if ((event != NETDEV_DOWN) ||
- !atomic_inc_not_zero(&nlm_ntf_refcnt))
+ if (event != NETDEV_DOWN)
goto out;
- if (nlmsvc_rqst) {
+ if (nlmsvc_serv) {
dprintk("lockd_inetaddr_event: removed %pI4\n",
&ifa->ifa_local);
sin.sin_family = AF_INET;
sin.sin_addr.s_addr = ifa->ifa_local;
- svc_age_temp_xprts_now(nlmsvc_rqst->rq_server,
- (struct sockaddr *)&sin);
+ svc_age_temp_xprts_now(nlmsvc_serv, (struct sockaddr *)&sin);
}
- atomic_dec(&nlm_ntf_refcnt);
- wake_up(&nlm_ntf_wq);
out:
return NOTIFY_DONE;
@@ -332,21 +329,17 @@ static int lockd_inet6addr_event(struct notifier_block *this,
struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr;
struct sockaddr_in6 sin6;
- if ((event != NETDEV_DOWN) ||
- !atomic_inc_not_zero(&nlm_ntf_refcnt))
+ if (event != NETDEV_DOWN)
goto out;
- if (nlmsvc_rqst) {
+ if (nlmsvc_serv) {
dprintk("lockd_inet6addr_event: removed %pI6\n", &ifa->addr);
sin6.sin6_family = AF_INET6;
sin6.sin6_addr = ifa->addr;
if (ipv6_addr_type(&sin6.sin6_addr) & IPV6_ADDR_LINKLOCAL)
sin6.sin6_scope_id = ifa->idev->dev->ifindex;
- svc_age_temp_xprts_now(nlmsvc_rqst->rq_server,
- (struct sockaddr *)&sin6);
+ svc_age_temp_xprts_now(nlmsvc_serv, (struct sockaddr *)&sin6);
}
- atomic_dec(&nlm_ntf_refcnt);
- wake_up(&nlm_ntf_wq);
out:
return NOTIFY_DONE;
@@ -357,86 +350,22 @@ static struct notifier_block lockd_inet6addr_notifier = {
};
#endif
-static void lockd_unregister_notifiers(void)
-{
- unregister_inetaddr_notifier(&lockd_inetaddr_notifier);
-#if IS_ENABLED(CONFIG_IPV6)
- unregister_inet6addr_notifier(&lockd_inet6addr_notifier);
-#endif
- wait_event(nlm_ntf_wq, atomic_read(&nlm_ntf_refcnt) == 0);
-}
-
-static void lockd_svc_exit_thread(void)
-{
- atomic_dec(&nlm_ntf_refcnt);
- lockd_unregister_notifiers();
- svc_exit_thread(nlmsvc_rqst);
-}
-
-static int lockd_start_svc(struct svc_serv *serv)
-{
- int error;
-
- if (nlmsvc_rqst)
- return 0;
-
- /*
- * Create the kernel thread and wait for it to start.
- */
- nlmsvc_rqst = svc_prepare_thread(serv, &serv->sv_pools[0], NUMA_NO_NODE);
- if (IS_ERR(nlmsvc_rqst)) {
- error = PTR_ERR(nlmsvc_rqst);
- printk(KERN_WARNING
- "lockd_up: svc_rqst allocation failed, error=%d\n",
- error);
- lockd_unregister_notifiers();
- goto out_rqst;
- }
-
- atomic_inc(&nlm_ntf_refcnt);
- svc_sock_update_bufs(serv);
- serv->sv_maxconn = nlm_max_connections;
-
- nlmsvc_task = kthread_create(lockd, nlmsvc_rqst, "%s", serv->sv_name);
- if (IS_ERR(nlmsvc_task)) {
- error = PTR_ERR(nlmsvc_task);
- printk(KERN_WARNING
- "lockd_up: kthread_run failed, error=%d\n", error);
- goto out_task;
- }
- nlmsvc_rqst->rq_task = nlmsvc_task;
- wake_up_process(nlmsvc_task);
-
- dprintk("lockd_up: service started\n");
- return 0;
-
-out_task:
- lockd_svc_exit_thread();
- nlmsvc_task = NULL;
-out_rqst:
- nlmsvc_rqst = NULL;
- return error;
-}
-
static const struct svc_serv_ops lockd_sv_ops = {
.svo_shutdown = svc_rpcb_cleanup,
+ .svo_function = lockd,
.svo_enqueue_xprt = svc_xprt_do_enqueue,
+ .svo_module = THIS_MODULE,
};
-static struct svc_serv *lockd_create_svc(void)
+static int lockd_get(void)
{
struct svc_serv *serv;
+ int error;
- /*
- * Check whether we're already up and running.
- */
- if (nlmsvc_rqst) {
- /*
- * Note: increase service usage, because later in case of error
- * svc_destroy() will be called.
- */
- svc_get(nlmsvc_rqst->rq_server);
- return nlmsvc_rqst->rq_server;
+ if (nlmsvc_serv) {
+ svc_get(nlmsvc_serv);
+ nlmsvc_users++;
+ return 0;
}
/*
@@ -454,14 +383,41 @@ static struct svc_serv *lockd_create_svc(void)
serv = svc_create(&nlmsvc_program, LOCKD_BUFSIZE, &lockd_sv_ops);
if (!serv) {
printk(KERN_WARNING "lockd_up: create service failed\n");
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
}
+
+ serv->sv_maxconn = nlm_max_connections;
+ error = svc_set_num_threads(serv, NULL, 1);
+ /* The thread now holds the only reference */
+ svc_put(serv);
+ if (error < 0)
+ return error;
+
+ nlmsvc_serv = serv;
register_inetaddr_notifier(&lockd_inetaddr_notifier);
#if IS_ENABLED(CONFIG_IPV6)
register_inet6addr_notifier(&lockd_inet6addr_notifier);
#endif
dprintk("lockd_up: service created\n");
- return serv;
+ nlmsvc_users++;
+ return 0;
+}
+
+static void lockd_put(void)
+{
+ if (WARN(nlmsvc_users <= 0, "lockd_down: no users!\n"))
+ return;
+ if (--nlmsvc_users)
+ return;
+
+ unregister_inetaddr_notifier(&lockd_inetaddr_notifier);
+#if IS_ENABLED(CONFIG_IPV6)
+ unregister_inet6addr_notifier(&lockd_inet6addr_notifier);
+#endif
+
+ svc_set_num_threads(nlmsvc_serv, NULL, 0);
+ nlmsvc_serv = NULL;
+ dprintk("lockd_down: service destroyed\n");
}
/*
@@ -469,36 +425,21 @@ static struct svc_serv *lockd_create_svc(void)
*/
int lockd_up(struct net *net, const struct cred *cred)
{
- struct svc_serv *serv;
int error;
mutex_lock(&nlmsvc_mutex);
- serv = lockd_create_svc();
- if (IS_ERR(serv)) {
- error = PTR_ERR(serv);
- goto err_create;
- }
+ error = lockd_get();
+ if (error)
+ goto err;
- error = lockd_up_net(serv, net, cred);
+ error = lockd_up_net(nlmsvc_serv, net, cred);
if (error < 0) {
- lockd_unregister_notifiers();
- goto err_put;
+ lockd_put();
+ goto err;
}
- error = lockd_start_svc(serv);
- if (error < 0) {
- lockd_down_net(serv, net);
- goto err_put;
- }
- nlmsvc_users++;
- /*
- * Note: svc_serv structures have an initial use count of 1,
- * so we exit through here on both success and failure.
- */
-err_put:
- svc_destroy(serv);
-err_create:
+err:
mutex_unlock(&nlmsvc_mutex);
return error;
}
@@ -511,27 +452,8 @@ void
lockd_down(struct net *net)
{
mutex_lock(&nlmsvc_mutex);
- lockd_down_net(nlmsvc_rqst->rq_server, net);
- if (nlmsvc_users) {
- if (--nlmsvc_users)
- goto out;
- } else {
- printk(KERN_ERR "lockd_down: no users! task=%p\n",
- nlmsvc_task);
- BUG();
- }
-
- if (!nlmsvc_task) {
- printk(KERN_ERR "lockd_down: no lockd running.\n");
- BUG();
- }
- kthread_stop(nlmsvc_task);
- dprintk("lockd_down: service stopped\n");
- lockd_svc_exit_thread();
- dprintk("lockd_down: service destroyed\n");
- nlmsvc_task = NULL;
- nlmsvc_rqst = NULL;
-out:
+ lockd_down_net(nlmsvc_serv, net);
+ lockd_put();
mutex_unlock(&nlmsvc_mutex);
}
EXPORT_SYMBOL_GPL(lockd_down);
diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c
index e9b85d8fd5fe..cb3658ab9b7a 100644
--- a/fs/lockd/svclock.c
+++ b/fs/lockd/svclock.c
@@ -470,8 +470,10 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
struct nlm_host *host, struct nlm_lock *lock, int wait,
struct nlm_cookie *cookie, int reclaim)
{
- struct nlm_block *block = NULL;
+#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
struct inode *inode = nlmsvc_file_inode(file);
+#endif
+ struct nlm_block *block = NULL;
int error;
int mode;
int async_block = 0;
@@ -484,7 +486,7 @@ nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file,
(long long)lock->fl.fl_end,
wait);
- if (inode->i_sb->s_export_op->flags & EXPORT_OP_SYNC_LOCKS) {
+ if (nlmsvc_file_file(file)->f_op->lock) {
async_block = wait;
wait = 0;
}
diff --git a/fs/lockd/svcsubs.c b/fs/lockd/svcsubs.c
index cb3a7512c33e..0a22a2faf552 100644
--- a/fs/lockd/svcsubs.c
+++ b/fs/lockd/svcsubs.c
@@ -179,19 +179,21 @@ nlm_delete_file(struct nlm_file *file)
static int nlm_unlock_files(struct nlm_file *file)
{
struct file_lock lock;
- struct file *f;
+ locks_init_lock(&lock);
lock.fl_type = F_UNLCK;
lock.fl_start = 0;
lock.fl_end = OFFSET_MAX;
- for (f = file->f_file[0]; f <= file->f_file[1]; f++) {
- if (f && vfs_lock_file(f, F_SETLK, &lock, NULL) < 0) {
- pr_warn("lockd: unlock failure in %s:%d\n",
- __FILE__, __LINE__);
- return 1;
- }
- }
+ if (file->f_file[O_RDONLY] &&
+ vfs_lock_file(file->f_file[O_RDONLY], F_SETLK, &lock, NULL))
+ goto out_err;
+ if (file->f_file[O_WRONLY] &&
+ vfs_lock_file(file->f_file[O_WRONLY], F_SETLK, &lock, NULL))
+ goto out_err;
return 0;
+out_err:
+ pr_warn("lockd: unlock failure in %s:%d\n", __FILE__, __LINE__);
+ return 1;
}
/*
diff --git a/fs/locks.c b/fs/locks.c
index 0fca9d680978..8c6df10cd9ed 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -62,6 +62,7 @@
#include <linux/pid_namespace.h>
#include <linux/hashtable.h>
#include <linux/percpu.h>
+#include <linux/sysctl.h>
#define CREATE_TRACE_POINTS
#include <trace/events/filelock.h>
@@ -88,8 +89,37 @@ static int target_leasetype(struct file_lock *fl)
return fl->fl_type;
}
-int leases_enable = 1;
-int lease_break_time = 45;
+static int leases_enable = 1;
+static int lease_break_time = 45;
+
+#ifdef CONFIG_SYSCTL
+static struct ctl_table locks_sysctls[] = {
+ {
+ .procname = "leases-enable",
+ .data = &leases_enable,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+#ifdef CONFIG_MMU
+ {
+ .procname = "lease-break-time",
+ .data = &lease_break_time,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+#endif /* CONFIG_MMU */
+ {}
+};
+
+static int __init init_fs_locks_sysctls(void)
+{
+ register_sysctl_init("fs", locks_sysctls);
+ return 0;
+}
+early_initcall(init_fs_locks_sysctls);
+#endif /* CONFIG_SYSCTL */
/*
* The global file_lock_list is only used for displaying /proc/locks, so we
diff --git a/fs/mpage.c b/fs/mpage.c
index 334e7d09aa65..87f5cfef6caa 100644
--- a/fs/mpage.c
+++ b/fs/mpage.c
@@ -29,7 +29,6 @@
#include <linux/writeback.h>
#include <linux/backing-dev.h>
#include <linux/pagevec.h>
-#include <linux/cleancache.h>
#include "internal.h"
/*
@@ -284,12 +283,6 @@ static struct bio *do_mpage_readpage(struct mpage_readpage_args *args)
SetPageMappedToDisk(page);
}
- if (fully_mapped && blocks_per_page == 1 && !PageUptodate(page) &&
- cleancache_get_page(page) == 0) {
- SetPageUptodate(page);
- goto confused;
- }
-
/*
* This page will go to BIO. Do we need to send this BIO off first?
*/
diff --git a/fs/namei.c b/fs/namei.c
index 1f9d2187c765..3f1829b3ab5b 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -1020,10 +1020,60 @@ static inline void put_link(struct nameidata *nd)
path_put(&last->link);
}
-int sysctl_protected_symlinks __read_mostly = 0;
-int sysctl_protected_hardlinks __read_mostly = 0;
-int sysctl_protected_fifos __read_mostly;
-int sysctl_protected_regular __read_mostly;
+static int sysctl_protected_symlinks __read_mostly;
+static int sysctl_protected_hardlinks __read_mostly;
+static int sysctl_protected_fifos __read_mostly;
+static int sysctl_protected_regular __read_mostly;
+
+#ifdef CONFIG_SYSCTL
+static struct ctl_table namei_sysctls[] = {
+ {
+ .procname = "protected_symlinks",
+ .data = &sysctl_protected_symlinks,
+ .maxlen = sizeof(int),
+ .mode = 0600,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE,
+ },
+ {
+ .procname = "protected_hardlinks",
+ .data = &sysctl_protected_hardlinks,
+ .maxlen = sizeof(int),
+ .mode = 0600,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE,
+ },
+ {
+ .procname = "protected_fifos",
+ .data = &sysctl_protected_fifos,
+ .maxlen = sizeof(int),
+ .mode = 0600,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_TWO,
+ },
+ {
+ .procname = "protected_regular",
+ .data = &sysctl_protected_regular,
+ .maxlen = sizeof(int),
+ .mode = 0600,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_TWO,
+ },
+ { }
+};
+
+static int __init init_fs_namei_sysctls(void)
+{
+ register_sysctl_init("fs", namei_sysctls);
+ return 0;
+}
+fs_initcall(init_fs_namei_sysctls);
+
+#endif /* CONFIG_SYSCTL */
/**
* may_follow_link - Check symlink following for unsafe situations
@@ -3958,7 +4008,8 @@ int vfs_rmdir(struct user_namespace *mnt_userns, struct inode *dir,
inode_lock(dentry->d_inode);
error = -EBUSY;
- if (is_local_mountpoint(dentry))
+ if (is_local_mountpoint(dentry) ||
+ (dentry->d_inode->i_flags & S_KERNEL_FILE))
goto out;
error = security_inode_rmdir(dir, dentry);
@@ -3973,13 +4024,12 @@ int vfs_rmdir(struct user_namespace *mnt_userns, struct inode *dir,
dentry->d_inode->i_flags |= S_DEAD;
dont_mount(dentry);
detach_mounts(dentry);
- fsnotify_rmdir(dir, dentry);
out:
inode_unlock(dentry->d_inode);
dput(dentry);
if (!error)
- d_delete(dentry);
+ d_delete_notify(dir, dentry);
return error;
}
EXPORT_SYMBOL(vfs_rmdir);
@@ -4101,7 +4151,6 @@ int vfs_unlink(struct user_namespace *mnt_userns, struct inode *dir,
if (!error) {
dont_mount(dentry);
detach_mounts(dentry);
- fsnotify_unlink(dir, dentry);
}
}
}
@@ -4109,9 +4158,11 @@ out:
inode_unlock(target);
/* We don't d_delete() NFS sillyrenamed files--they still exist. */
- if (!error && !(dentry->d_flags & DCACHE_NFSFS_RENAMED)) {
+ if (!error && dentry->d_flags & DCACHE_NFSFS_RENAMED) {
+ fsnotify_unlink(dir, dentry);
+ } else if (!error) {
fsnotify_link_count(target);
- d_delete(dentry);
+ d_delete_notify(dir, dentry);
}
return error;
diff --git a/fs/namespace.c b/fs/namespace.c
index dc31ad6b370f..de6fae84f1a1 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -37,7 +37,7 @@
#include "internal.h"
/* Maximum number of mounts in a mount namespace */
-unsigned int sysctl_mount_max __read_mostly = 100000;
+static unsigned int sysctl_mount_max __read_mostly = 100000;
static unsigned int m_hash_mask __read_mostly;
static unsigned int m_hash_shift __read_mostly;
@@ -469,6 +469,24 @@ void mnt_drop_write_file(struct file *file)
}
EXPORT_SYMBOL(mnt_drop_write_file);
+/**
+ * mnt_hold_writers - prevent write access to the given mount
+ * @mnt: mnt to prevent write access to
+ *
+ * Prevents write access to @mnt if there are no active writers for @mnt.
+ * This function needs to be called and return successfully before changing
+ * properties of @mnt that need to remain stable for callers with write access
+ * to @mnt.
+ *
+ * After this functions has been called successfully callers must pair it with
+ * a call to mnt_unhold_writers() in order to stop preventing write access to
+ * @mnt.
+ *
+ * Context: This function expects lock_mount_hash() to be held serializing
+ * setting MNT_WRITE_HOLD.
+ * Return: On success 0 is returned.
+ * On error, -EBUSY is returned.
+ */
static inline int mnt_hold_writers(struct mount *mnt)
{
mnt->mnt.mnt_flags |= MNT_WRITE_HOLD;
@@ -500,6 +518,18 @@ static inline int mnt_hold_writers(struct mount *mnt)
return 0;
}
+/**
+ * mnt_unhold_writers - stop preventing write access to the given mount
+ * @mnt: mnt to stop preventing write access to
+ *
+ * Stop preventing write access to @mnt allowing callers to gain write access
+ * to @mnt again.
+ *
+ * This function can only be called after a successful call to
+ * mnt_hold_writers().
+ *
+ * Context: This function expects lock_mount_hash() to be held.
+ */
static inline void mnt_unhold_writers(struct mount *mnt)
{
/*
@@ -4620,3 +4650,25 @@ const struct proc_ns_operations mntns_operations = {
.install = mntns_install,
.owner = mntns_owner,
};
+
+#ifdef CONFIG_SYSCTL
+static struct ctl_table fs_namespace_sysctls[] = {
+ {
+ .procname = "mount-max",
+ .data = &sysctl_mount_max,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = SYSCTL_ONE,
+ },
+ { }
+};
+
+static int __init init_fs_namespace_sysctls(void)
+{
+ register_sysctl_init("fs", fs_namespace_sysctls);
+ return 0;
+}
+fs_initcall(init_fs_namespace_sysctls);
+
+#endif /* CONFIG_SYSCTL */
diff --git a/fs/netfs/read_helper.c b/fs/netfs/read_helper.c
index 75c76cbb27cc..501da990c259 100644
--- a/fs/netfs/read_helper.c
+++ b/fs/netfs/read_helper.c
@@ -55,7 +55,8 @@ static struct netfs_read_request *netfs_alloc_read_request(
INIT_WORK(&rreq->work, netfs_rreq_work);
refcount_set(&rreq->usage, 1);
__set_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags);
- ops->init_rreq(rreq, file);
+ if (ops->init_rreq)
+ ops->init_rreq(rreq, file);
netfs_stat(&netfs_n_rh_rreq);
}
@@ -170,7 +171,7 @@ static void netfs_cache_read_terminated(void *priv, ssize_t transferred_or_error
*/
static void netfs_read_from_cache(struct netfs_read_request *rreq,
struct netfs_read_subrequest *subreq,
- bool seek_data)
+ enum netfs_read_from_hole read_hole)
{
struct netfs_cache_resources *cres = &rreq->cache_resources;
struct iov_iter iter;
@@ -180,7 +181,7 @@ static void netfs_read_from_cache(struct netfs_read_request *rreq,
subreq->start + subreq->transferred,
subreq->len - subreq->transferred);
- cres->ops->read(cres, subreq->start, &iter, seek_data,
+ cres->ops->read(cres, subreq->start, &iter, read_hole,
netfs_cache_read_terminated, subreq);
}
@@ -323,7 +324,7 @@ static void netfs_rreq_do_write_to_cache(struct netfs_read_request *rreq)
}
ret = cres->ops->prepare_write(cres, &subreq->start, &subreq->len,
- rreq->i_size);
+ rreq->i_size, true);
if (ret < 0) {
trace_netfs_failure(rreq, subreq, ret, netfs_fail_prepare_write);
trace_netfs_sreq(subreq, netfs_sreq_trace_write_skip);
@@ -461,7 +462,7 @@ static void netfs_rreq_short_read(struct netfs_read_request *rreq,
netfs_get_read_subrequest(subreq);
atomic_inc(&rreq->nr_rd_ops);
if (subreq->source == NETFS_READ_FROM_CACHE)
- netfs_read_from_cache(rreq, subreq, true);
+ netfs_read_from_cache(rreq, subreq, NETFS_READ_HOLE_CLEAR);
else
netfs_read_from_server(rreq, subreq);
}
@@ -789,7 +790,7 @@ static bool netfs_rreq_submit_slice(struct netfs_read_request *rreq,
netfs_read_from_server(rreq, subreq);
break;
case NETFS_READ_FROM_CACHE:
- netfs_read_from_cache(rreq, subreq, false);
+ netfs_read_from_cache(rreq, subreq, NETFS_READ_HOLE_IGNORE);
break;
default:
BUG();
diff --git a/fs/nfs/Makefile b/fs/nfs/Makefile
index 22d11fdc6deb..5f6db37f461e 100644
--- a/fs/nfs/Makefile
+++ b/fs/nfs/Makefile
@@ -12,7 +12,7 @@ nfs-y := client.o dir.o file.o getroot.o inode.o super.o \
export.o sysfs.o fs_context.o
nfs-$(CONFIG_ROOT_NFS) += nfsroot.o
nfs-$(CONFIG_SYSCTL) += sysctl.o
-nfs-$(CONFIG_NFS_FSCACHE) += fscache.o fscache-index.o
+nfs-$(CONFIG_NFS_FSCACHE) += fscache.o
obj-$(CONFIG_NFS_V2) += nfsv2.o
nfsv2-y := nfs2super.o proc.o nfs2xdr.o
diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
index 86d856de1389..054cc1255fac 100644
--- a/fs/nfs/callback.c
+++ b/fs/nfs/callback.c
@@ -93,7 +93,7 @@ nfs4_callback_svc(void *vrqstp)
svc_process(rqstp);
}
svc_exit_thread(rqstp);
- module_put_and_exit(0);
+ module_put_and_kthread_exit(0);
return 0;
}
@@ -137,7 +137,7 @@ nfs41_callback_svc(void *vrqstp)
}
}
svc_exit_thread(rqstp);
- module_put_and_exit(0);
+ module_put_and_kthread_exit(0);
return 0;
}
@@ -169,12 +169,12 @@ static int nfs_callback_start_svc(int minorversion, struct rpc_xprt *xprt,
if (nrservs < NFS4_MIN_NR_CALLBACK_THREADS)
nrservs = NFS4_MIN_NR_CALLBACK_THREADS;
- if (serv->sv_nrthreads-1 == nrservs)
+ if (serv->sv_nrthreads == nrservs)
return 0;
- ret = serv->sv_ops->svo_setup(serv, NULL, nrservs);
+ ret = svc_set_num_threads(serv, NULL, nrservs);
if (ret) {
- serv->sv_ops->svo_setup(serv, NULL, 0);
+ svc_set_num_threads(serv, NULL, 0);
return ret;
}
dprintk("nfs_callback_up: service started\n");
@@ -235,14 +235,12 @@ err_bind:
static const struct svc_serv_ops nfs40_cb_sv_ops = {
.svo_function = nfs4_callback_svc,
.svo_enqueue_xprt = svc_xprt_do_enqueue,
- .svo_setup = svc_set_num_threads_sync,
.svo_module = THIS_MODULE,
};
#if defined(CONFIG_NFS_V4_1)
static const struct svc_serv_ops nfs41_cb_sv_ops = {
.svo_function = nfs41_callback_svc,
.svo_enqueue_xprt = svc_xprt_do_enqueue,
- .svo_setup = svc_set_num_threads_sync,
.svo_module = THIS_MODULE,
};
@@ -266,14 +264,8 @@ static struct svc_serv *nfs_callback_create_svc(int minorversion)
/*
* Check whether we're already up and running.
*/
- if (cb_info->serv) {
- /*
- * Note: increase service usage, because later in case of error
- * svc_destroy() will be called.
- */
- svc_get(cb_info->serv);
- return cb_info->serv;
- }
+ if (cb_info->serv)
+ return svc_get(cb_info->serv);
switch (minorversion) {
case 0:
@@ -294,7 +286,7 @@ static struct svc_serv *nfs_callback_create_svc(int minorversion)
printk(KERN_WARNING "nfs_callback_create_svc: no kthread, %d users??\n",
cb_info->users);
- serv = svc_create_pooled(&nfs4_callback_program, NFS4_CALLBACK_BUFSIZE, sv_ops);
+ serv = svc_create(&nfs4_callback_program, NFS4_CALLBACK_BUFSIZE, sv_ops);
if (!serv) {
printk(KERN_ERR "nfs_callback_create_svc: create service failed\n");
return ERR_PTR(-ENOMEM);
@@ -335,16 +327,10 @@ int nfs_callback_up(u32 minorversion, struct rpc_xprt *xprt)
goto err_start;
cb_info->users++;
- /*
- * svc_create creates the svc_serv with sv_nrthreads == 1, and then
- * svc_prepare_thread increments that. So we need to call svc_destroy
- * on both success and failure so that the refcount is 1 when the
- * thread exits.
- */
err_net:
if (!cb_info->users)
cb_info->serv = NULL;
- svc_destroy(serv);
+ svc_put(serv);
err_create:
mutex_unlock(&nfs_callback_mutex);
return ret;
@@ -369,8 +355,8 @@ void nfs_callback_down(int minorversion, struct net *net)
cb_info->users--;
if (cb_info->users == 0) {
svc_get(serv);
- serv->sv_ops->svo_setup(serv, NULL, 0);
- svc_destroy(serv);
+ svc_set_num_threads(serv, NULL, 0);
+ svc_put(serv);
dprintk("nfs_callback_down: service destroyed\n");
cb_info->serv = NULL;
}
diff --git a/fs/nfs/callback.h b/fs/nfs/callback.h
index 6a2033131c06..ccd4f245cae2 100644
--- a/fs/nfs/callback.h
+++ b/fs/nfs/callback.h
@@ -170,7 +170,7 @@ struct cb_devicenotifyitem {
};
struct cb_devicenotifyargs {
- int ndevs;
+ uint32_t ndevs;
struct cb_devicenotifyitem *devs;
};
diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c
index 09c5b1cb3e07..c343666d9a42 100644
--- a/fs/nfs/callback_proc.c
+++ b/fs/nfs/callback_proc.c
@@ -358,7 +358,7 @@ __be32 nfs4_callback_devicenotify(void *argp, void *resp,
struct cb_process_state *cps)
{
struct cb_devicenotifyargs *args = argp;
- int i;
+ uint32_t i;
__be32 res = 0;
struct nfs_client *clp = cps->clp;
struct nfs_server *server = NULL;
diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
index a67c41ec545f..f90de8043b0f 100644
--- a/fs/nfs/callback_xdr.c
+++ b/fs/nfs/callback_xdr.c
@@ -258,11 +258,9 @@ __be32 decode_devicenotify_args(struct svc_rqst *rqstp,
void *argp)
{
struct cb_devicenotifyargs *args = argp;
+ uint32_t tmp, n, i;
__be32 *p;
__be32 status = 0;
- u32 tmp;
- int n, i;
- args->ndevs = 0;
/* Num of device notifications */
p = xdr_inline_decode(xdr, sizeof(uint32_t));
@@ -271,7 +269,7 @@ __be32 decode_devicenotify_args(struct svc_rqst *rqstp,
goto out;
}
n = ntohl(*p++);
- if (n <= 0)
+ if (n == 0)
goto out;
if (n > ULONG_MAX / sizeof(*args->devs)) {
status = htonl(NFS4ERR_BADXDR);
@@ -330,19 +328,21 @@ __be32 decode_devicenotify_args(struct svc_rqst *rqstp,
dev->cbd_immediate = 0;
}
- args->ndevs++;
-
dprintk("%s: type %d layout 0x%x immediate %d\n",
__func__, dev->cbd_notify_type, dev->cbd_layout_type,
dev->cbd_immediate);
}
+ args->ndevs = n;
+ dprintk("%s: ndevs %d\n", __func__, args->ndevs);
+ return 0;
+err:
+ kfree(args->devs);
out:
+ args->devs = NULL;
+ args->ndevs = 0;
dprintk("%s: status %d ndevs %d\n",
__func__, ntohl(status), args->ndevs);
return status;
-err:
- kfree(args->devs);
- goto out;
}
static __be32 decode_sessionid(struct xdr_stream *xdr,
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 1e4dc1ab9312..d1f34229e11a 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -177,14 +177,13 @@ struct nfs_client *nfs_alloc_client(const struct nfs_client_initdata *cl_init)
INIT_LIST_HEAD(&clp->cl_superblocks);
clp->cl_rpcclient = ERR_PTR(-EINVAL);
+ clp->cl_flags = cl_init->init_flags;
clp->cl_proto = cl_init->proto;
clp->cl_nconnect = cl_init->nconnect;
clp->cl_max_connect = cl_init->max_connect ? cl_init->max_connect : 1;
clp->cl_net = get_net(cl_init->net);
clp->cl_principal = "*";
- nfs_fscache_get_client_cookie(clp);
-
return clp;
error_cleanup:
@@ -238,8 +237,6 @@ static void pnfs_init_server(struct nfs_server *server)
*/
void nfs_free_client(struct nfs_client *clp)
{
- nfs_fscache_release_client_cookie(clp);
-
/* -EIO all pending I/O */
if (!IS_ERR(clp->cl_rpcclient))
rpc_shutdown_client(clp->cl_rpcclient);
@@ -427,7 +424,6 @@ struct nfs_client *nfs_get_client(const struct nfs_client_initdata *cl_init)
list_add_tail(&new->cl_share_link,
&nn->nfs_client_list);
spin_unlock(&nn->nfs_client_lock);
- new->cl_flags = cl_init->init_flags;
return rpc_ops->init_client(new, cl_init);
}
@@ -860,6 +856,13 @@ static int nfs_probe_fsinfo(struct nfs_server *server, struct nfs_fh *mntfh, str
server->namelen = pathinfo.max_namelen;
}
+ if (clp->rpc_ops->discover_trunking != NULL &&
+ (server->caps & NFS_CAP_FS_LOCATIONS)) {
+ error = clp->rpc_ops->discover_trunking(server, mntfh);
+ if (error < 0)
+ return error;
+ }
+
return 0;
}
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 347793626f19..75cb1cbe4cde 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -80,6 +80,7 @@ static struct nfs_open_dir_context *alloc_nfs_open_dir_context(struct inode *dir
ctx->dir_cookie = 0;
ctx->dup_cookie = 0;
ctx->page_index = 0;
+ ctx->eof = false;
spin_lock(&dir->i_lock);
if (list_empty(&nfsi->open_files) &&
(nfsi->cache_validity & NFS_INO_DATA_INVAL_DEFER))
@@ -168,6 +169,7 @@ struct nfs_readdir_descriptor {
unsigned int cache_entry_index;
signed char duped;
bool plus;
+ bool eob;
bool eof;
};
@@ -867,7 +869,8 @@ static int nfs_readdir_xdr_to_array(struct nfs_readdir_descriptor *desc,
status = nfs_readdir_page_filler(desc, entry, pages, pglen,
arrays, narrays);
- } while (!status && nfs_readdir_page_needs_filling(page));
+ } while (!status && nfs_readdir_page_needs_filling(page) &&
+ page_mapping(page));
nfs_readdir_free_pages(pages, array_size);
out:
@@ -988,7 +991,7 @@ static void nfs_do_filldir(struct nfs_readdir_descriptor *desc,
ent = &array->array[i];
if (!dir_emit(desc->ctx, ent->name, ent->name_len,
nfs_compat_user_ino64(ent->ino), ent->d_type)) {
- desc->eof = true;
+ desc->eob = true;
break;
}
memcpy(desc->verf, verf, sizeof(desc->verf));
@@ -1004,7 +1007,7 @@ static void nfs_do_filldir(struct nfs_readdir_descriptor *desc,
desc->duped = 1;
}
if (array->page_is_eof)
- desc->eof = true;
+ desc->eof = !desc->eob;
kunmap(desc->page);
dfprintk(DIRCACHE, "NFS: nfs_do_filldir() filling ended @ cookie %llu\n",
@@ -1041,12 +1044,13 @@ static int uncached_readdir(struct nfs_readdir_descriptor *desc)
goto out;
desc->page_index = 0;
+ desc->cache_entry_index = 0;
desc->last_cookie = desc->dir_cookie;
desc->duped = 0;
status = nfs_readdir_xdr_to_array(desc, desc->verf, verf, arrays, sz);
- for (i = 0; !desc->eof && i < sz && arrays[i]; i++) {
+ for (i = 0; !desc->eob && i < sz && arrays[i]; i++) {
desc->page = arrays[i];
nfs_do_filldir(desc, verf);
}
@@ -1105,9 +1109,15 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx)
desc->duped = dir_ctx->duped;
page_index = dir_ctx->page_index;
desc->attr_gencount = dir_ctx->attr_gencount;
+ desc->eof = dir_ctx->eof;
memcpy(desc->verf, dir_ctx->verf, sizeof(desc->verf));
spin_unlock(&file->f_lock);
+ if (desc->eof) {
+ res = 0;
+ goto out_free;
+ }
+
if (test_and_clear_bit(NFS_INO_FORCE_READDIR, &nfsi->flags) &&
list_is_singular(&nfsi->open_files))
invalidate_mapping_pages(inode->i_mapping, page_index + 1, -1);
@@ -1141,7 +1151,7 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx)
nfs_do_filldir(desc, nfsi->cookieverf);
nfs_readdir_page_unlock_and_put_cached(desc);
- } while (!desc->eof);
+ } while (!desc->eob && !desc->eof);
spin_lock(&file->f_lock);
dir_ctx->dir_cookie = desc->dir_cookie;
@@ -1149,9 +1159,10 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx)
dir_ctx->duped = desc->duped;
dir_ctx->attr_gencount = desc->attr_gencount;
dir_ctx->page_index = desc->page_index;
+ dir_ctx->eof = desc->eof;
memcpy(dir_ctx->verf, desc->verf, sizeof(dir_ctx->verf));
spin_unlock(&file->f_lock);
-
+out_free:
kfree(desc);
out:
@@ -1193,6 +1204,7 @@ static loff_t nfs_llseek_dir(struct file *filp, loff_t offset, int whence)
if (offset == 0)
memset(dir_ctx->verf, 0, sizeof(dir_ctx->verf));
dir_ctx->duped = 0;
+ dir_ctx->eof = false;
}
spin_unlock(&filp->f_lock);
return offset;
@@ -1325,6 +1337,14 @@ void nfs_clear_verifier_delegated(struct inode *inode)
EXPORT_SYMBOL_GPL(nfs_clear_verifier_delegated);
#endif /* IS_ENABLED(CONFIG_NFS_V4) */
+static int nfs_dentry_verify_change(struct inode *dir, struct dentry *dentry)
+{
+ if (nfs_server_capable(dir, NFS_CAP_CASE_INSENSITIVE) &&
+ d_really_is_negative(dentry))
+ return dentry->d_time == inode_peek_iversion_raw(dir);
+ return nfs_verify_change_attribute(dir, dentry->d_time);
+}
+
/*
* A check for whether or not the parent directory has changed.
* In the case it has, we assume that the dentries are untrustworthy
@@ -1338,7 +1358,7 @@ static int nfs_check_verifier(struct inode *dir, struct dentry *dentry,
return 1;
if (NFS_SERVER(dir)->flags & NFS_MOUNT_LOOKUP_CACHE_NONE)
return 0;
- if (!nfs_verify_change_attribute(dir, dentry->d_time))
+ if (!nfs_dentry_verify_change(dir, dentry))
return 0;
/* Revalidate nfsi->cache_change_attribute before we declare a match */
if (nfs_mapping_need_revalidate_inode(dir)) {
@@ -1347,7 +1367,7 @@ static int nfs_check_verifier(struct inode *dir, struct dentry *dentry,
if (__nfs_revalidate_inode(NFS_SERVER(dir), dir) < 0)
return 0;
}
- if (!nfs_verify_change_attribute(dir, dentry->d_time))
+ if (!nfs_dentry_verify_change(dir, dentry))
return 0;
return 1;
}
@@ -1437,6 +1457,9 @@ int nfs_neg_need_reval(struct inode *dir, struct dentry *dentry,
return 0;
if (NFS_SERVER(dir)->flags & NFS_MOUNT_LOOKUP_CACHE_NONEG)
return 1;
+ /* Case insensitive server? Revalidate negative dentries */
+ if (nfs_server_capable(dir, NFS_CAP_CASE_INSENSITIVE))
+ return 1;
return !nfs_check_verifier(dir, dentry, flags & LOOKUP_RCU);
}
@@ -1537,7 +1560,7 @@ out:
* If the lookup failed despite the dentry change attribute being
* a match, then we should revalidate the directory cache.
*/
- if (!ret && nfs_verify_change_attribute(dir, dentry->d_time))
+ if (!ret && nfs_dentry_verify_change(dir, dentry))
nfs_mark_dir_for_revalidate(dir);
return nfs_lookup_revalidate_done(dir, dentry, inode, ret);
}
@@ -1776,8 +1799,11 @@ struct dentry *nfs_lookup(struct inode *dir, struct dentry * dentry, unsigned in
dir_verifier = nfs_save_change_attribute(dir);
trace_nfs_lookup_enter(dir, dentry, flags);
error = NFS_PROTO(dir)->lookup(dir, dentry, fhandle, fattr);
- if (error == -ENOENT)
+ if (error == -ENOENT) {
+ if (nfs_server_capable(dir, NFS_CAP_CASE_INSENSITIVE))
+ dir_verifier = inode_peek_iversion_raw(dir);
goto no_entry;
+ }
if (error < 0) {
res = ERR_PTR(error);
goto out;
@@ -1806,6 +1832,14 @@ out:
}
EXPORT_SYMBOL_GPL(nfs_lookup);
+void nfs_d_prune_case_insensitive_aliases(struct inode *inode)
+{
+ /* Case insensitive server? Revalidate dentries */
+ if (inode && nfs_server_capable(inode, NFS_CAP_CASE_INSENSITIVE))
+ d_prune_aliases(inode);
+}
+EXPORT_SYMBOL_GPL(nfs_d_prune_case_insensitive_aliases);
+
#if IS_ENABLED(CONFIG_NFS_V4)
static int nfs4_lookup_revalidate(struct dentry *, unsigned int);
@@ -1867,6 +1901,7 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry,
struct iattr attr = { .ia_valid = ATTR_OPEN };
struct inode *inode;
unsigned int lookup_flags = 0;
+ unsigned long dir_verifier;
bool switched = false;
int created = 0;
int err;
@@ -1940,7 +1975,11 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry,
switch (err) {
case -ENOENT:
d_splice_alias(NULL, dentry);
- nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
+ if (nfs_server_capable(dir, NFS_CAP_CASE_INSENSITIVE))
+ dir_verifier = inode_peek_iversion_raw(dir);
+ else
+ dir_verifier = nfs_save_change_attribute(dir);
+ nfs_set_verifier(dentry, dir_verifier);
break;
case -EISDIR:
case -ENOTDIR:
@@ -1968,6 +2007,24 @@ out:
no_open:
res = nfs_lookup(dir, dentry, lookup_flags);
+ if (!res) {
+ inode = d_inode(dentry);
+ if ((lookup_flags & LOOKUP_DIRECTORY) && inode &&
+ !(S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)))
+ res = ERR_PTR(-ENOTDIR);
+ else if (inode && S_ISREG(inode->i_mode))
+ res = ERR_PTR(-EOPENSTALE);
+ } else if (!IS_ERR(res)) {
+ inode = d_inode(res);
+ if ((lookup_flags & LOOKUP_DIRECTORY) && inode &&
+ !(S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))) {
+ dput(res);
+ res = ERR_PTR(-ENOTDIR);
+ } else if (inode && S_ISREG(inode->i_mode)) {
+ dput(res);
+ res = ERR_PTR(-EOPENSTALE);
+ }
+ }
if (switched) {
d_lookup_done(dentry);
if (!res)
@@ -2186,8 +2243,10 @@ static void nfs_dentry_remove_handle_error(struct inode *dir,
switch (error) {
case -ENOENT:
d_delete(dentry);
- fallthrough;
+ nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
+ break;
case 0:
+ nfs_d_prune_case_insensitive_aliases(d_inode(dentry));
nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
}
}
@@ -2380,6 +2439,8 @@ nfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
trace_nfs_link_enter(inode, dir, dentry);
d_drop(dentry);
+ if (S_ISREG(inode->i_mode))
+ nfs_sync_inode(inode);
error = NFS_PROTO(dir)->link(inode, dir, &dentry->d_name);
if (error == 0) {
nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
@@ -2469,6 +2530,8 @@ int nfs_rename(struct user_namespace *mnt_userns, struct inode *old_dir,
}
}
+ if (S_ISREG(old_inode->i_mode))
+ nfs_sync_inode(old_inode);
task = nfs_async_rename(old_dir, new_dir, old_dentry, new_dentry, NULL);
if (IS_ERR(task)) {
error = PTR_ERR(task);
@@ -2529,7 +2592,7 @@ MODULE_PARM_DESC(nfs_access_max_cachesize, "NFS access maximum total cache lengt
static void nfs_access_free_entry(struct nfs_access_entry *entry)
{
- put_cred(entry->cred);
+ put_group_info(entry->group_info);
kfree_rcu(entry, rcu_head);
smp_mb__before_atomic();
atomic_long_dec(&nfs_access_nr_entries);
@@ -2655,6 +2718,43 @@ void nfs_access_zap_cache(struct inode *inode)
}
EXPORT_SYMBOL_GPL(nfs_access_zap_cache);
+static int access_cmp(const struct cred *a, const struct nfs_access_entry *b)
+{
+ struct group_info *ga, *gb;
+ int g;
+
+ if (uid_lt(a->fsuid, b->fsuid))
+ return -1;
+ if (uid_gt(a->fsuid, b->fsuid))
+ return 1;
+
+ if (gid_lt(a->fsgid, b->fsgid))
+ return -1;
+ if (gid_gt(a->fsgid, b->fsgid))
+ return 1;
+
+ ga = a->group_info;
+ gb = b->group_info;
+ if (ga == gb)
+ return 0;
+ if (ga == NULL)
+ return -1;
+ if (gb == NULL)
+ return 1;
+ if (ga->ngroups < gb->ngroups)
+ return -1;
+ if (ga->ngroups > gb->ngroups)
+ return 1;
+
+ for (g = 0; g < ga->ngroups; g++) {
+ if (gid_lt(ga->gid[g], gb->gid[g]))
+ return -1;
+ if (gid_gt(ga->gid[g], gb->gid[g]))
+ return 1;
+ }
+ return 0;
+}
+
static struct nfs_access_entry *nfs_access_search_rbtree(struct inode *inode, const struct cred *cred)
{
struct rb_node *n = NFS_I(inode)->access_cache.rb_node;
@@ -2662,7 +2762,7 @@ static struct nfs_access_entry *nfs_access_search_rbtree(struct inode *inode, co
while (n != NULL) {
struct nfs_access_entry *entry =
rb_entry(n, struct nfs_access_entry, rb_node);
- int cmp = cred_fscmp(cred, entry->cred);
+ int cmp = access_cmp(cred, entry);
if (cmp < 0)
n = n->rb_left;
@@ -2674,7 +2774,7 @@ static struct nfs_access_entry *nfs_access_search_rbtree(struct inode *inode, co
return NULL;
}
-static int nfs_access_get_cached_locked(struct inode *inode, const struct cred *cred, struct nfs_access_entry *res, bool may_block)
+static int nfs_access_get_cached_locked(struct inode *inode, const struct cred *cred, u32 *mask, bool may_block)
{
struct nfs_inode *nfsi = NFS_I(inode);
struct nfs_access_entry *cache;
@@ -2704,8 +2804,7 @@ static int nfs_access_get_cached_locked(struct inode *inode, const struct cred *
spin_lock(&inode->i_lock);
retry = false;
}
- res->cred = cache->cred;
- res->mask = cache->mask;
+ *mask = cache->mask;
list_move_tail(&cache->lru, &nfsi->access_cache_entry_lru);
err = 0;
out:
@@ -2717,7 +2816,7 @@ out_zap:
return -ENOENT;
}
-static int nfs_access_get_cached_rcu(struct inode *inode, const struct cred *cred, struct nfs_access_entry *res)
+static int nfs_access_get_cached_rcu(struct inode *inode, const struct cred *cred, u32 *mask)
{
/* Only check the most recently returned cache entry,
* but do it without locking.
@@ -2733,35 +2832,36 @@ static int nfs_access_get_cached_rcu(struct inode *inode, const struct cred *cre
lh = rcu_dereference(list_tail_rcu(&nfsi->access_cache_entry_lru));
cache = list_entry(lh, struct nfs_access_entry, lru);
if (lh == &nfsi->access_cache_entry_lru ||
- cred_fscmp(cred, cache->cred) != 0)
+ access_cmp(cred, cache) != 0)
cache = NULL;
if (cache == NULL)
goto out;
if (nfs_check_cache_invalid(inode, NFS_INO_INVALID_ACCESS))
goto out;
- res->cred = cache->cred;
- res->mask = cache->mask;
+ *mask = cache->mask;
err = 0;
out:
rcu_read_unlock();
return err;
}
-int nfs_access_get_cached(struct inode *inode, const struct cred *cred, struct
-nfs_access_entry *res, bool may_block)
+int nfs_access_get_cached(struct inode *inode, const struct cred *cred,
+ u32 *mask, bool may_block)
{
int status;
- status = nfs_access_get_cached_rcu(inode, cred, res);
+ status = nfs_access_get_cached_rcu(inode, cred, mask);
if (status != 0)
- status = nfs_access_get_cached_locked(inode, cred, res,
+ status = nfs_access_get_cached_locked(inode, cred, mask,
may_block);
return status;
}
EXPORT_SYMBOL_GPL(nfs_access_get_cached);
-static void nfs_access_add_rbtree(struct inode *inode, struct nfs_access_entry *set)
+static void nfs_access_add_rbtree(struct inode *inode,
+ struct nfs_access_entry *set,
+ const struct cred *cred)
{
struct nfs_inode *nfsi = NFS_I(inode);
struct rb_root *root_node = &nfsi->access_cache;
@@ -2774,7 +2874,7 @@ static void nfs_access_add_rbtree(struct inode *inode, struct nfs_access_entry *
while (*p != NULL) {
parent = *p;
entry = rb_entry(parent, struct nfs_access_entry, rb_node);
- cmp = cred_fscmp(set->cred, entry->cred);
+ cmp = access_cmp(cred, entry);
if (cmp < 0)
p = &parent->rb_left;
@@ -2796,13 +2896,16 @@ found:
nfs_access_free_entry(entry);
}
-void nfs_access_add_cache(struct inode *inode, struct nfs_access_entry *set)
+void nfs_access_add_cache(struct inode *inode, struct nfs_access_entry *set,
+ const struct cred *cred)
{
struct nfs_access_entry *cache = kmalloc(sizeof(*cache), GFP_KERNEL);
if (cache == NULL)
return;
RB_CLEAR_NODE(&cache->rb_node);
- cache->cred = get_cred(set->cred);
+ cache->fsuid = cred->fsuid;
+ cache->fsgid = cred->fsgid;
+ cache->group_info = get_group_info(cred->group_info);
cache->mask = set->mask;
/* The above field assignments must be visible
@@ -2810,7 +2913,7 @@ void nfs_access_add_cache(struct inode *inode, struct nfs_access_entry *set)
* use rcu_assign_pointer, so just force the memory barrier.
*/
smp_wmb();
- nfs_access_add_rbtree(inode, cache);
+ nfs_access_add_rbtree(inode, cache, cred);
/* Update accounting */
smp_mb__before_atomic();
@@ -2875,7 +2978,7 @@ static int nfs_do_access(struct inode *inode, const struct cred *cred, int mask)
trace_nfs_access_enter(inode);
- status = nfs_access_get_cached(inode, cred, &cache, may_block);
+ status = nfs_access_get_cached(inode, cred, &cache.mask, may_block);
if (status == 0)
goto out_cached;
@@ -2895,8 +2998,7 @@ static int nfs_do_access(struct inode *inode, const struct cred *cred, int mask)
cache.mask |= NFS_ACCESS_DELETE | NFS_ACCESS_LOOKUP;
else
cache.mask |= NFS_ACCESS_EXECUTE;
- cache.cred = cred;
- status = NFS_PROTO(inode)->access(inode, &cache);
+ status = NFS_PROTO(inode)->access(inode, &cache, cred);
if (status != 0) {
if (status == -ESTALE) {
if (!S_ISDIR(inode->i_mode))
@@ -2906,7 +3008,7 @@ static int nfs_do_access(struct inode *inode, const struct cred *cred, int mask)
}
goto out;
}
- nfs_access_add_cache(inode, &cache);
+ nfs_access_add_cache(inode, &cache, cred);
out_cached:
cache_mask = nfs_access_calc_mask(cache.mask, inode->i_mode);
if ((mask & ~cache_mask & (MAY_READ | MAY_WRITE | MAY_EXEC)) != 0)
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index 9cff8709c80a..eabfdab543c8 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -59,6 +59,7 @@
#include "internal.h"
#include "iostat.h"
#include "pnfs.h"
+#include "fscache.h"
#define NFSDBG_FACILITY NFSDBG_VFS
@@ -959,6 +960,7 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter)
} else {
result = requested;
}
+ nfs_fscache_invalidate(inode, FSCACHE_INVAL_DIO_WRITE);
out_release:
nfs_direct_req_release(dreq);
out:
diff --git a/fs/nfs/export.c b/fs/nfs/export.c
index 171c424cb6d5..01596f2d0a1e 100644
--- a/fs/nfs/export.c
+++ b/fs/nfs/export.c
@@ -158,5 +158,5 @@ const struct export_operations nfs_export_ops = {
.fetch_iversion = nfs_fetch_iversion,
.flags = EXPORT_OP_NOWCC|EXPORT_OP_NOSUBTREECHK|
EXPORT_OP_CLOSE_BEFORE_UNLINK|EXPORT_OP_REMOTE_FS|
- EXPORT_OP_NOATOMIC_ATTR|EXPORT_OP_SYNC_LOCKS,
+ EXPORT_OP_NOATOMIC_ATTR,
};
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index 24e7dccce355..76d76acbc594 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -84,6 +84,7 @@ nfs_file_release(struct inode *inode, struct file *filp)
nfs_inc_stats(inode, NFSIOS_VFSRELEASE);
nfs_file_clear_open_context(filp);
+ nfs_fscache_release_file(inode, filp);
return 0;
}
EXPORT_SYMBOL_GPL(nfs_file_release);
@@ -415,8 +416,7 @@ static void nfs_invalidate_page(struct page *page, unsigned int offset,
return;
/* Cancel any unstarted writes on this page */
nfs_wb_page_cancel(page_file_mapping(page)->host, page);
-
- nfs_fscache_invalidate_page(page, page->mapping->host);
+ wait_on_page_fscache(page);
}
/*
@@ -475,12 +475,11 @@ static void nfs_check_dirty_writeback(struct page *page,
static int nfs_launder_page(struct page *page)
{
struct inode *inode = page_file_mapping(page)->host;
- struct nfs_inode *nfsi = NFS_I(inode);
dfprintk(PAGECACHE, "NFS: launder_page(%ld, %llu)\n",
inode->i_ino, (long long)page_offset(page));
- nfs_fscache_wait_on_page_write(nfsi, page);
+ wait_on_page_fscache(page);
return nfs_wb_page(inode, page);
}
@@ -555,7 +554,11 @@ static vm_fault_t nfs_vm_page_mkwrite(struct vm_fault *vmf)
sb_start_pagefault(inode->i_sb);
/* make sure the cache has finished storing the page */
- nfs_fscache_wait_on_page_write(NFS_I(inode), page);
+ if (PageFsCache(page) &&
+ wait_on_page_fscache_killable(vmf->page) < 0) {
+ ret = VM_FAULT_RETRY;
+ goto out;
+ }
wait_on_bit_action(&NFS_I(inode)->flags, NFS_INO_INVALIDATING,
nfs_wait_bit_killable, TASK_KILLABLE);
diff --git a/fs/nfs/filelayout/filelayout.h b/fs/nfs/filelayout/filelayout.h
index 79323b5dab0c..aed0748fd6ec 100644
--- a/fs/nfs/filelayout/filelayout.h
+++ b/fs/nfs/filelayout/filelayout.h
@@ -51,7 +51,7 @@ struct nfs4_file_layout_dsaddr {
u32 stripe_count;
u8 *stripe_indices;
u32 ds_num;
- struct nfs4_pnfs_ds *ds_list[1];
+ struct nfs4_pnfs_ds *ds_list[];
};
struct nfs4_filelayout_segment {
diff --git a/fs/nfs/filelayout/filelayoutdev.c b/fs/nfs/filelayout/filelayoutdev.c
index 86c3f7e69ec4..acf4b88889dc 100644
--- a/fs/nfs/filelayout/filelayoutdev.c
+++ b/fs/nfs/filelayout/filelayoutdev.c
@@ -136,9 +136,7 @@ nfs4_fl_alloc_deviceid_node(struct nfs_server *server, struct pnfs_device *pdev,
goto out_err_free_stripe_indices;
}
- dsaddr = kzalloc(sizeof(*dsaddr) +
- (sizeof(struct nfs4_pnfs_ds *) * (num - 1)),
- gfp_flags);
+ dsaddr = kzalloc(struct_size(dsaddr, ds_list, num), gfp_flags);
if (!dsaddr)
goto out_err_free_stripe_indices;
diff --git a/fs/nfs/fscache-index.c b/fs/nfs/fscache-index.c
deleted file mode 100644
index 573b1da9342c..000000000000
--- a/fs/nfs/fscache-index.c
+++ /dev/null
@@ -1,140 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/* NFS FS-Cache index structure definition
- *
- * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- */
-
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/nfs_fs.h>
-#include <linux/nfs_fs_sb.h>
-#include <linux/in6.h>
-#include <linux/iversion.h>
-
-#include "internal.h"
-#include "fscache.h"
-
-#define NFSDBG_FACILITY NFSDBG_FSCACHE
-
-/*
- * Define the NFS filesystem for FS-Cache. Upon registration FS-Cache sticks
- * the cookie for the top-level index object for NFS into here. The top-level
- * index can than have other cache objects inserted into it.
- */
-struct fscache_netfs nfs_fscache_netfs = {
- .name = "nfs",
- .version = 0,
-};
-
-/*
- * Register NFS for caching
- */
-int nfs_fscache_register(void)
-{
- return fscache_register_netfs(&nfs_fscache_netfs);
-}
-
-/*
- * Unregister NFS for caching
- */
-void nfs_fscache_unregister(void)
-{
- fscache_unregister_netfs(&nfs_fscache_netfs);
-}
-
-/*
- * Define the server object for FS-Cache. This is used to describe a server
- * object to fscache_acquire_cookie(). It is keyed by the NFS protocol and
- * server address parameters.
- */
-const struct fscache_cookie_def nfs_fscache_server_index_def = {
- .name = "NFS.server",
- .type = FSCACHE_COOKIE_TYPE_INDEX,
-};
-
-/*
- * Define the superblock object for FS-Cache. This is used to describe a
- * superblock object to fscache_acquire_cookie(). It is keyed by all the NFS
- * parameters that might cause a separate superblock.
- */
-const struct fscache_cookie_def nfs_fscache_super_index_def = {
- .name = "NFS.super",
- .type = FSCACHE_COOKIE_TYPE_INDEX,
-};
-
-/*
- * Consult the netfs about the state of an object
- * - This function can be absent if the index carries no state data
- * - The netfs data from the cookie being used as the target is
- * presented, as is the auxiliary data
- */
-static
-enum fscache_checkaux nfs_fscache_inode_check_aux(void *cookie_netfs_data,
- const void *data,
- uint16_t datalen,
- loff_t object_size)
-{
- struct nfs_fscache_inode_auxdata auxdata;
- struct nfs_inode *nfsi = cookie_netfs_data;
-
- if (datalen != sizeof(auxdata))
- return FSCACHE_CHECKAUX_OBSOLETE;
-
- memset(&auxdata, 0, sizeof(auxdata));
- auxdata.mtime_sec = nfsi->vfs_inode.i_mtime.tv_sec;
- auxdata.mtime_nsec = nfsi->vfs_inode.i_mtime.tv_nsec;
- auxdata.ctime_sec = nfsi->vfs_inode.i_ctime.tv_sec;
- auxdata.ctime_nsec = nfsi->vfs_inode.i_ctime.tv_nsec;
-
- if (NFS_SERVER(&nfsi->vfs_inode)->nfs_client->rpc_ops->version == 4)
- auxdata.change_attr = inode_peek_iversion_raw(&nfsi->vfs_inode);
-
- if (memcmp(data, &auxdata, datalen) != 0)
- return FSCACHE_CHECKAUX_OBSOLETE;
-
- return FSCACHE_CHECKAUX_OKAY;
-}
-
-/*
- * Get an extra reference on a read context.
- * - This function can be absent if the completion function doesn't require a
- * context.
- * - The read context is passed back to NFS in the event that a data read on the
- * cache fails with EIO - in which case the server must be contacted to
- * retrieve the data, which requires the read context for security.
- */
-static void nfs_fh_get_context(void *cookie_netfs_data, void *context)
-{
- get_nfs_open_context(context);
-}
-
-/*
- * Release an extra reference on a read context.
- * - This function can be absent if the completion function doesn't require a
- * context.
- */
-static void nfs_fh_put_context(void *cookie_netfs_data, void *context)
-{
- if (context)
- put_nfs_open_context(context);
-}
-
-/*
- * Define the inode object for FS-Cache. This is used to describe an inode
- * object to fscache_acquire_cookie(). It is keyed by the NFS file handle for
- * an inode.
- *
- * Coherency is managed by comparing the copies of i_size, i_mtime and i_ctime
- * held in the cache auxiliary data for the data storage object with those in
- * the inode struct in memory.
- */
-const struct fscache_cookie_def nfs_fscache_inode_object_def = {
- .name = "NFS.fh",
- .type = FSCACHE_COOKIE_TYPE_DATAFILE,
- .check_aux = nfs_fscache_inode_check_aux,
- .get_context = nfs_fh_get_context,
- .put_context = nfs_fh_put_context,
-};
diff --git a/fs/nfs/fscache.c b/fs/nfs/fscache.c
index d743629e05e1..cfe901650ab0 100644
--- a/fs/nfs/fscache.c
+++ b/fs/nfs/fscache.c
@@ -22,24 +22,18 @@
#define NFSDBG_FACILITY NFSDBG_FSCACHE
-static struct rb_root nfs_fscache_keys = RB_ROOT;
-static DEFINE_SPINLOCK(nfs_fscache_keys_lock);
+#define NFS_MAX_KEY_LEN 1000
-/*
- * Layout of the key for an NFS server cache object.
- */
-struct nfs_server_key {
- struct {
- uint16_t nfsversion; /* NFS protocol version */
- uint32_t minorversion; /* NFSv4 minor version */
- uint16_t family; /* address family */
- __be16 port; /* IP port */
- } hdr;
- union {
- struct in_addr ipv4_addr; /* IPv4 address */
- struct in6_addr ipv6_addr; /* IPv6 address */
- };
-} __packed;
+static bool nfs_append_int(char *key, int *_len, unsigned long long x)
+{
+ if (*_len > NFS_MAX_KEY_LEN)
+ return false;
+ if (x == 0)
+ key[(*_len)++] = ',';
+ else
+ *_len += sprintf(key + *_len, ",%llx", x);
+ return true;
+}
/*
* Get the per-client index cookie for an NFS client if the appropriate mount
@@ -47,160 +41,108 @@ struct nfs_server_key {
* - We always try and get an index cookie for the client, but get filehandle
* cookies on a per-superblock basis, depending on the mount flags
*/
-void nfs_fscache_get_client_cookie(struct nfs_client *clp)
+static bool nfs_fscache_get_client_key(struct nfs_client *clp,
+ char *key, int *_len)
{
const struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) &clp->cl_addr;
const struct sockaddr_in *sin = (struct sockaddr_in *) &clp->cl_addr;
- struct nfs_server_key key;
- uint16_t len = sizeof(key.hdr);
- memset(&key, 0, sizeof(key));
- key.hdr.nfsversion = clp->rpc_ops->version;
- key.hdr.minorversion = clp->cl_minorversion;
- key.hdr.family = clp->cl_addr.ss_family;
+ *_len += snprintf(key + *_len, NFS_MAX_KEY_LEN - *_len,
+ ",%u.%u,%x",
+ clp->rpc_ops->version,
+ clp->cl_minorversion,
+ clp->cl_addr.ss_family);
switch (clp->cl_addr.ss_family) {
case AF_INET:
- key.hdr.port = sin->sin_port;
- key.ipv4_addr = sin->sin_addr;
- len += sizeof(key.ipv4_addr);
- break;
+ if (!nfs_append_int(key, _len, sin->sin_port) ||
+ !nfs_append_int(key, _len, sin->sin_addr.s_addr))
+ return false;
+ return true;
case AF_INET6:
- key.hdr.port = sin6->sin6_port;
- key.ipv6_addr = sin6->sin6_addr;
- len += sizeof(key.ipv6_addr);
- break;
+ if (!nfs_append_int(key, _len, sin6->sin6_port) ||
+ !nfs_append_int(key, _len, sin6->sin6_addr.s6_addr32[0]) ||
+ !nfs_append_int(key, _len, sin6->sin6_addr.s6_addr32[1]) ||
+ !nfs_append_int(key, _len, sin6->sin6_addr.s6_addr32[2]) ||
+ !nfs_append_int(key, _len, sin6->sin6_addr.s6_addr32[3]))
+ return false;
+ return true;
default:
printk(KERN_WARNING "NFS: Unknown network family '%d'\n",
clp->cl_addr.ss_family);
- clp->fscache = NULL;
- return;
+ return false;
}
-
- /* create a cache index for looking up filehandles */
- clp->fscache = fscache_acquire_cookie(nfs_fscache_netfs.primary_index,
- &nfs_fscache_server_index_def,
- &key, len,
- NULL, 0,
- clp, 0, true);
- dfprintk(FSCACHE, "NFS: get client cookie (0x%p/0x%p)\n",
- clp, clp->fscache);
-}
-
-/*
- * Dispose of a per-client cookie
- */
-void nfs_fscache_release_client_cookie(struct nfs_client *clp)
-{
- dfprintk(FSCACHE, "NFS: releasing client cookie (0x%p/0x%p)\n",
- clp, clp->fscache);
-
- fscache_relinquish_cookie(clp->fscache, NULL, false);
- clp->fscache = NULL;
}
/*
- * Get the cache cookie for an NFS superblock. We have to handle
- * uniquification here because the cache doesn't do it for us.
+ * Get the cache cookie for an NFS superblock.
*
* The default uniquifier is just an empty string, but it may be overridden
* either by the 'fsc=xxx' option to mount, or by inheriting it from the parent
* superblock across an automount point of some nature.
*/
-void nfs_fscache_get_super_cookie(struct super_block *sb, const char *uniq, int ulen)
+int nfs_fscache_get_super_cookie(struct super_block *sb, const char *uniq, int ulen)
{
- struct nfs_fscache_key *key, *xkey;
+ struct fscache_volume *vcookie;
struct nfs_server *nfss = NFS_SB(sb);
- struct rb_node **p, *parent;
- int diff;
+ unsigned int len = 3;
+ char *key;
- nfss->fscache_key = NULL;
- nfss->fscache = NULL;
- if (!uniq) {
- uniq = "";
- ulen = 1;
+ if (uniq) {
+ nfss->fscache_uniq = kmemdup_nul(uniq, ulen, GFP_KERNEL);
+ if (!nfss->fscache_uniq)
+ return -ENOMEM;
}
- key = kzalloc(sizeof(*key) + ulen, GFP_KERNEL);
+ key = kmalloc(NFS_MAX_KEY_LEN + 24, GFP_KERNEL);
if (!key)
- return;
-
- key->nfs_client = nfss->nfs_client;
- key->key.super.s_flags = sb->s_flags & NFS_SB_MASK;
- key->key.nfs_server.flags = nfss->flags;
- key->key.nfs_server.rsize = nfss->rsize;
- key->key.nfs_server.wsize = nfss->wsize;
- key->key.nfs_server.acregmin = nfss->acregmin;
- key->key.nfs_server.acregmax = nfss->acregmax;
- key->key.nfs_server.acdirmin = nfss->acdirmin;
- key->key.nfs_server.acdirmax = nfss->acdirmax;
- key->key.nfs_server.fsid = nfss->fsid;
- key->key.rpc_auth.au_flavor = nfss->client->cl_auth->au_flavor;
-
- key->key.uniq_len = ulen;
- memcpy(key->key.uniquifier, uniq, ulen);
-
- spin_lock(&nfs_fscache_keys_lock);
- p = &nfs_fscache_keys.rb_node;
- parent = NULL;
- while (*p) {
- parent = *p;
- xkey = rb_entry(parent, struct nfs_fscache_key, node);
-
- if (key->nfs_client < xkey->nfs_client)
- goto go_left;
- if (key->nfs_client > xkey->nfs_client)
- goto go_right;
-
- diff = memcmp(&key->key, &xkey->key, sizeof(key->key));
- if (diff < 0)
- goto go_left;
- if (diff > 0)
- goto go_right;
-
- if (key->key.uniq_len == 0)
- goto non_unique;
- diff = memcmp(key->key.uniquifier,
- xkey->key.uniquifier,
- key->key.uniq_len);
- if (diff < 0)
- goto go_left;
- if (diff > 0)
- goto go_right;
- goto non_unique;
-
- go_left:
- p = &(*p)->rb_left;
- continue;
- go_right:
- p = &(*p)->rb_right;
+ return -ENOMEM;
+
+ memcpy(key, "nfs", 3);
+ if (!nfs_fscache_get_client_key(nfss->nfs_client, key, &len) ||
+ !nfs_append_int(key, &len, nfss->fsid.major) ||
+ !nfs_append_int(key, &len, nfss->fsid.minor) ||
+ !nfs_append_int(key, &len, sb->s_flags & NFS_SB_MASK) ||
+ !nfs_append_int(key, &len, nfss->flags) ||
+ !nfs_append_int(key, &len, nfss->rsize) ||
+ !nfs_append_int(key, &len, nfss->wsize) ||
+ !nfs_append_int(key, &len, nfss->acregmin) ||
+ !nfs_append_int(key, &len, nfss->acregmax) ||
+ !nfs_append_int(key, &len, nfss->acdirmin) ||
+ !nfs_append_int(key, &len, nfss->acdirmax) ||
+ !nfs_append_int(key, &len, nfss->client->cl_auth->au_flavor))
+ goto out;
+
+ if (ulen > 0) {
+ if (ulen > NFS_MAX_KEY_LEN - len)
+ goto out;
+ key[len++] = ',';
+ memcpy(key + len, uniq, ulen);
+ len += ulen;
}
-
- rb_link_node(&key->node, parent, p);
- rb_insert_color(&key->node, &nfs_fscache_keys);
- spin_unlock(&nfs_fscache_keys_lock);
- nfss->fscache_key = key;
+ key[len] = 0;
/* create a cache index for looking up filehandles */
- nfss->fscache = fscache_acquire_cookie(nfss->nfs_client->fscache,
- &nfs_fscache_super_index_def,
- &key->key,
- sizeof(key->key) + ulen,
- NULL, 0,
- nfss, 0, true);
+ vcookie = fscache_acquire_volume(key,
+ NULL, /* preferred_cache */
+ NULL, 0 /* coherency_data */);
dfprintk(FSCACHE, "NFS: get superblock cookie (0x%p/0x%p)\n",
- nfss, nfss->fscache);
- return;
+ nfss, vcookie);
+ if (IS_ERR(vcookie)) {
+ if (vcookie != ERR_PTR(-EBUSY)) {
+ kfree(key);
+ return PTR_ERR(vcookie);
+ }
+ pr_err("NFS: Cache volume key already in use (%s)\n", key);
+ vcookie = NULL;
+ }
+ nfss->fscache = vcookie;
-non_unique:
- spin_unlock(&nfs_fscache_keys_lock);
+out:
kfree(key);
- nfss->fscache_key = NULL;
- nfss->fscache = NULL;
- printk(KERN_WARNING "NFS:"
- " Cache request denied due to non-unique superblock keys\n");
+ return 0;
}
/*
@@ -213,29 +155,9 @@ void nfs_fscache_release_super_cookie(struct super_block *sb)
dfprintk(FSCACHE, "NFS: releasing superblock cookie (0x%p/0x%p)\n",
nfss, nfss->fscache);
- fscache_relinquish_cookie(nfss->fscache, NULL, false);
+ fscache_relinquish_volume(nfss->fscache, NULL, false);
nfss->fscache = NULL;
-
- if (nfss->fscache_key) {
- spin_lock(&nfs_fscache_keys_lock);
- rb_erase(&nfss->fscache_key->node, &nfs_fscache_keys);
- spin_unlock(&nfs_fscache_keys_lock);
- kfree(nfss->fscache_key);
- nfss->fscache_key = NULL;
- }
-}
-
-static void nfs_fscache_update_auxdata(struct nfs_fscache_inode_auxdata *auxdata,
- struct nfs_inode *nfsi)
-{
- memset(auxdata, 0, sizeof(*auxdata));
- auxdata->mtime_sec = nfsi->vfs_inode.i_mtime.tv_sec;
- auxdata->mtime_nsec = nfsi->vfs_inode.i_mtime.tv_nsec;
- auxdata->ctime_sec = nfsi->vfs_inode.i_ctime.tv_sec;
- auxdata->ctime_nsec = nfsi->vfs_inode.i_ctime.tv_nsec;
-
- if (NFS_SERVER(&nfsi->vfs_inode)->nfs_client->rpc_ops->version == 4)
- auxdata->change_attr = inode_peek_iversion_raw(&nfsi->vfs_inode);
+ kfree(nfss->fscache_uniq);
}
/*
@@ -254,10 +176,12 @@ void nfs_fscache_init_inode(struct inode *inode)
nfs_fscache_update_auxdata(&auxdata, nfsi);
nfsi->fscache = fscache_acquire_cookie(NFS_SB(inode->i_sb)->fscache,
- &nfs_fscache_inode_object_def,
- nfsi->fh.data, nfsi->fh.size,
- &auxdata, sizeof(auxdata),
- nfsi, nfsi->vfs_inode.i_size, false);
+ 0,
+ nfsi->fh.data, /* index_key */
+ nfsi->fh.size,
+ &auxdata, /* aux_data */
+ sizeof(auxdata),
+ i_size_read(&nfsi->vfs_inode));
}
/*
@@ -265,24 +189,15 @@ void nfs_fscache_init_inode(struct inode *inode)
*/
void nfs_fscache_clear_inode(struct inode *inode)
{
- struct nfs_fscache_inode_auxdata auxdata;
struct nfs_inode *nfsi = NFS_I(inode);
struct fscache_cookie *cookie = nfs_i_fscache(inode);
dfprintk(FSCACHE, "NFS: clear cookie (0x%p/0x%p)\n", nfsi, cookie);
- nfs_fscache_update_auxdata(&auxdata, nfsi);
- fscache_relinquish_cookie(cookie, &auxdata, false);
+ fscache_relinquish_cookie(cookie, false);
nfsi->fscache = NULL;
}
-static bool nfs_fscache_can_enable(void *data)
-{
- struct inode *inode = data;
-
- return !inode_is_open_for_write(inode);
-}
-
/*
* Enable or disable caching for a file that is being opened as appropriate.
* The cookie is allocated when the inode is initialised, but is not enabled at
@@ -307,100 +222,104 @@ void nfs_fscache_open_file(struct inode *inode, struct file *filp)
struct nfs_fscache_inode_auxdata auxdata;
struct nfs_inode *nfsi = NFS_I(inode);
struct fscache_cookie *cookie = nfs_i_fscache(inode);
+ bool open_for_write = inode_is_open_for_write(inode);
if (!fscache_cookie_valid(cookie))
return;
- nfs_fscache_update_auxdata(&auxdata, nfsi);
-
- if (inode_is_open_for_write(inode)) {
+ fscache_use_cookie(cookie, open_for_write);
+ if (open_for_write) {
dfprintk(FSCACHE, "NFS: nfsi 0x%p disabling cache\n", nfsi);
- clear_bit(NFS_INO_FSCACHE, &nfsi->flags);
- fscache_disable_cookie(cookie, &auxdata, true);
- fscache_uncache_all_inode_pages(cookie, inode);
- } else {
- dfprintk(FSCACHE, "NFS: nfsi 0x%p enabling cache\n", nfsi);
- fscache_enable_cookie(cookie, &auxdata, nfsi->vfs_inode.i_size,
- nfs_fscache_can_enable, inode);
- if (fscache_cookie_enabled(cookie))
- set_bit(NFS_INO_FSCACHE, &NFS_I(inode)->flags);
+ nfs_fscache_update_auxdata(&auxdata, nfsi);
+ fscache_invalidate(cookie, &auxdata, i_size_read(inode),
+ FSCACHE_INVAL_DIO_WRITE);
}
}
EXPORT_SYMBOL_GPL(nfs_fscache_open_file);
-/*
- * Release the caching state associated with a page, if the page isn't busy
- * interacting with the cache.
- * - Returns true (can release page) or false (page busy).
- */
-int nfs_fscache_release_page(struct page *page, gfp_t gfp)
+void nfs_fscache_release_file(struct inode *inode, struct file *filp)
{
- if (PageFsCache(page)) {
- struct fscache_cookie *cookie = nfs_i_fscache(page->mapping->host);
-
- BUG_ON(!cookie);
- dfprintk(FSCACHE, "NFS: fscache releasepage (0x%p/0x%p/0x%p)\n",
- cookie, page, NFS_I(page->mapping->host));
-
- if (!fscache_maybe_release_page(cookie, page, gfp))
- return 0;
+ struct nfs_fscache_inode_auxdata auxdata;
+ struct nfs_inode *nfsi = NFS_I(inode);
+ struct fscache_cookie *cookie = nfs_i_fscache(inode);
- nfs_inc_fscache_stats(page->mapping->host,
- NFSIOS_FSCACHE_PAGES_UNCACHED);
+ if (fscache_cookie_valid(cookie)) {
+ nfs_fscache_update_auxdata(&auxdata, nfsi);
+ fscache_unuse_cookie(cookie, &auxdata, NULL);
}
+}
- return 1;
+static inline void fscache_end_operation(struct netfs_cache_resources *cres)
+{
+ const struct netfs_cache_ops *ops = fscache_operation_valid(cres);
+
+ if (ops)
+ ops->end_operation(cres);
}
/*
- * Release the caching state associated with a page if undergoing complete page
- * invalidation.
+ * Fallback page reading interface.
*/
-void __nfs_fscache_invalidate_page(struct page *page, struct inode *inode)
+static int fscache_fallback_read_page(struct inode *inode, struct page *page)
{
+ struct netfs_cache_resources cres;
struct fscache_cookie *cookie = nfs_i_fscache(inode);
+ struct iov_iter iter;
+ struct bio_vec bvec[1];
+ int ret;
- BUG_ON(!cookie);
-
- dfprintk(FSCACHE, "NFS: fscache invalidatepage (0x%p/0x%p/0x%p)\n",
- cookie, page, NFS_I(inode));
+ memset(&cres, 0, sizeof(cres));
+ bvec[0].bv_page = page;
+ bvec[0].bv_offset = 0;
+ bvec[0].bv_len = PAGE_SIZE;
+ iov_iter_bvec(&iter, READ, bvec, ARRAY_SIZE(bvec), PAGE_SIZE);
- fscache_wait_on_page_write(cookie, page);
+ ret = fscache_begin_read_operation(&cres, cookie);
+ if (ret < 0)
+ return ret;
- BUG_ON(!PageLocked(page));
- fscache_uncache_page(cookie, page);
- nfs_inc_fscache_stats(page->mapping->host,
- NFSIOS_FSCACHE_PAGES_UNCACHED);
+ ret = fscache_read(&cres, page_offset(page), &iter, NETFS_READ_HOLE_FAIL,
+ NULL, NULL);
+ fscache_end_operation(&cres);
+ return ret;
}
/*
- * Handle completion of a page being read from the cache.
- * - Called in process (keventd) context.
+ * Fallback page writing interface.
*/
-static void nfs_readpage_from_fscache_complete(struct page *page,
- void *context,
- int error)
+static int fscache_fallback_write_page(struct inode *inode, struct page *page,
+ bool no_space_allocated_yet)
{
- dfprintk(FSCACHE,
- "NFS: readpage_from_fscache_complete (0x%p/0x%p/%d)\n",
- page, context, error);
-
- /*
- * If the read completes with an error, mark the page with PG_checked,
- * unlock the page, and let the VM reissue the readpage.
- */
- if (!error)
- SetPageUptodate(page);
- else
- SetPageChecked(page);
- unlock_page(page);
+ struct netfs_cache_resources cres;
+ struct fscache_cookie *cookie = nfs_i_fscache(inode);
+ struct iov_iter iter;
+ struct bio_vec bvec[1];
+ loff_t start = page_offset(page);
+ size_t len = PAGE_SIZE;
+ int ret;
+
+ memset(&cres, 0, sizeof(cres));
+ bvec[0].bv_page = page;
+ bvec[0].bv_offset = 0;
+ bvec[0].bv_len = PAGE_SIZE;
+ iov_iter_bvec(&iter, WRITE, bvec, ARRAY_SIZE(bvec), PAGE_SIZE);
+
+ ret = fscache_begin_write_operation(&cres, cookie);
+ if (ret < 0)
+ return ret;
+
+ ret = cres.ops->prepare_write(&cres, &start, &len, i_size_read(inode),
+ no_space_allocated_yet);
+ if (ret == 0)
+ ret = fscache_write(&cres, page_offset(page), &iter, NULL, NULL);
+ fscache_end_operation(&cres);
+ return ret;
}
/*
* Retrieve a page from fscache
*/
-int __nfs_readpage_from_fscache(struct nfs_open_context *ctx,
- struct inode *inode, struct page *page)
+int __nfs_readpage_from_fscache(struct inode *inode, struct page *page)
{
int ret;
@@ -409,112 +328,49 @@ int __nfs_readpage_from_fscache(struct nfs_open_context *ctx,
nfs_i_fscache(inode), page, page->index, page->flags, inode);
if (PageChecked(page)) {
+ dfprintk(FSCACHE, "NFS: readpage_from_fscache: PageChecked\n");
ClearPageChecked(page);
return 1;
}
- ret = fscache_read_or_alloc_page(nfs_i_fscache(inode),
- page,
- nfs_readpage_from_fscache_complete,
- ctx,
- GFP_KERNEL);
-
- switch (ret) {
- case 0: /* read BIO submitted (page in fscache) */
- dfprintk(FSCACHE,
- "NFS: readpage_from_fscache: BIO submitted\n");
- nfs_inc_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_READ_OK);
- return ret;
-
- case -ENOBUFS: /* inode not in cache */
- case -ENODATA: /* page not in cache */
+ ret = fscache_fallback_read_page(inode, page);
+ if (ret < 0) {
nfs_inc_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_READ_FAIL);
dfprintk(FSCACHE,
- "NFS: readpage_from_fscache %d\n", ret);
- return 1;
-
- default:
- dfprintk(FSCACHE, "NFS: readpage_from_fscache %d\n", ret);
- nfs_inc_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_READ_FAIL);
- }
- return ret;
-}
-
-/*
- * Retrieve a set of pages from fscache
- */
-int __nfs_readpages_from_fscache(struct nfs_open_context *ctx,
- struct inode *inode,
- struct address_space *mapping,
- struct list_head *pages,
- unsigned *nr_pages)
-{
- unsigned npages = *nr_pages;
- int ret;
-
- dfprintk(FSCACHE, "NFS: nfs_getpages_from_fscache (0x%p/%u/0x%p)\n",
- nfs_i_fscache(inode), npages, inode);
-
- ret = fscache_read_or_alloc_pages(nfs_i_fscache(inode),
- mapping, pages, nr_pages,
- nfs_readpage_from_fscache_complete,
- ctx,
- mapping_gfp_mask(mapping));
- if (*nr_pages < npages)
- nfs_add_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_READ_OK,
- npages);
- if (*nr_pages > 0)
- nfs_add_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_READ_FAIL,
- *nr_pages);
-
- switch (ret) {
- case 0: /* read submitted to the cache for all pages */
- BUG_ON(!list_empty(pages));
- BUG_ON(*nr_pages != 0);
- dfprintk(FSCACHE,
- "NFS: nfs_getpages_from_fscache: submitted\n");
-
+ "NFS: readpage_from_fscache failed %d\n", ret);
+ SetPageChecked(page);
return ret;
-
- case -ENOBUFS: /* some pages aren't cached and can't be */
- case -ENODATA: /* some pages aren't cached */
- dfprintk(FSCACHE,
- "NFS: nfs_getpages_from_fscache: no page: %d\n", ret);
- return 1;
-
- default:
- dfprintk(FSCACHE,
- "NFS: nfs_getpages_from_fscache: ret %d\n", ret);
}
- return ret;
+ /* Read completed synchronously */
+ dfprintk(FSCACHE, "NFS: readpage_from_fscache: read successful\n");
+ nfs_inc_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_READ_OK);
+ SetPageUptodate(page);
+ return 0;
}
/*
- * Store a newly fetched page in fscache
- * - PG_fscache must be set on the page
+ * Store a newly fetched page in fscache. We can be certain there's no page
+ * stored in the cache as yet otherwise we would've read it from there.
*/
-void __nfs_readpage_to_fscache(struct inode *inode, struct page *page, int sync)
+void __nfs_readpage_to_fscache(struct inode *inode, struct page *page)
{
int ret;
dfprintk(FSCACHE,
- "NFS: readpage_to_fscache(fsc:%p/p:%p(i:%lx f:%lx)/%d)\n",
- nfs_i_fscache(inode), page, page->index, page->flags, sync);
+ "NFS: readpage_to_fscache(fsc:%p/p:%p(i:%lx f:%lx))\n",
+ nfs_i_fscache(inode), page, page->index, page->flags);
+
+ ret = fscache_fallback_write_page(inode, page, true);
- ret = fscache_write_page(nfs_i_fscache(inode), page,
- inode->i_size, GFP_KERNEL);
dfprintk(FSCACHE,
"NFS: readpage_to_fscache: p:%p(i:%lu f:%lx) ret %d\n",
page, page->index, page->flags, ret);
if (ret != 0) {
- fscache_uncache_page(nfs_i_fscache(inode), page);
- nfs_inc_fscache_stats(inode,
- NFSIOS_FSCACHE_PAGES_WRITTEN_FAIL);
+ nfs_inc_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_WRITTEN_FAIL);
nfs_inc_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_UNCACHED);
} else {
- nfs_inc_fscache_stats(inode,
- NFSIOS_FSCACHE_PAGES_WRITTEN_OK);
+ nfs_inc_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_WRITTEN_OK);
}
}
diff --git a/fs/nfs/fscache.h b/fs/nfs/fscache.h
index 6754c8607230..25a5c0f82392 100644
--- a/fs/nfs/fscache.h
+++ b/fs/nfs/fscache.h
@@ -8,51 +8,16 @@
#ifndef _NFS_FSCACHE_H
#define _NFS_FSCACHE_H
+#include <linux/swap.h>
#include <linux/nfs_fs.h>
#include <linux/nfs_mount.h>
#include <linux/nfs4_mount.h>
#include <linux/fscache.h>
+#include <linux/iversion.h>
#ifdef CONFIG_NFS_FSCACHE
/*
- * set of NFS FS-Cache objects that form a superblock key
- */
-struct nfs_fscache_key {
- struct rb_node node;
- struct nfs_client *nfs_client; /* the server */
-
- /* the elements of the unique key - as used by nfs_compare_super() and
- * nfs_compare_mount_options() to distinguish superblocks */
- struct {
- struct {
- unsigned long s_flags; /* various flags
- * (& NFS_MS_MASK) */
- } super;
-
- struct {
- struct nfs_fsid fsid;
- int flags;
- unsigned int rsize; /* read size */
- unsigned int wsize; /* write size */
- unsigned int acregmin; /* attr cache timeouts */
- unsigned int acregmax;
- unsigned int acdirmin;
- unsigned int acdirmax;
- } nfs_server;
-
- struct {
- rpc_authflavor_t au_flavor;
- } rpc_auth;
-
- /* uniquifier - can be used if nfs_server.flags includes
- * NFS_MOUNT_UNSHARED */
- u8 uniq_len;
- char uniquifier[0];
- } key;
-};
-
-/*
* Definition of the auxiliary data attached to NFS inode storage objects
* within the cache.
*
@@ -70,84 +35,42 @@ struct nfs_fscache_inode_auxdata {
};
/*
- * fscache-index.c
- */
-extern struct fscache_netfs nfs_fscache_netfs;
-extern const struct fscache_cookie_def nfs_fscache_server_index_def;
-extern const struct fscache_cookie_def nfs_fscache_super_index_def;
-extern const struct fscache_cookie_def nfs_fscache_inode_object_def;
-
-extern int nfs_fscache_register(void);
-extern void nfs_fscache_unregister(void);
-
-/*
* fscache.c
*/
-extern void nfs_fscache_get_client_cookie(struct nfs_client *);
-extern void nfs_fscache_release_client_cookie(struct nfs_client *);
-
-extern void nfs_fscache_get_super_cookie(struct super_block *, const char *, int);
+extern int nfs_fscache_get_super_cookie(struct super_block *, const char *, int);
extern void nfs_fscache_release_super_cookie(struct super_block *);
extern void nfs_fscache_init_inode(struct inode *);
extern void nfs_fscache_clear_inode(struct inode *);
extern void nfs_fscache_open_file(struct inode *, struct file *);
+extern void nfs_fscache_release_file(struct inode *, struct file *);
-extern void __nfs_fscache_invalidate_page(struct page *, struct inode *);
-extern int nfs_fscache_release_page(struct page *, gfp_t);
+extern int __nfs_readpage_from_fscache(struct inode *, struct page *);
+extern void __nfs_read_completion_to_fscache(struct nfs_pgio_header *hdr,
+ unsigned long bytes);
+extern void __nfs_readpage_to_fscache(struct inode *, struct page *);
-extern int __nfs_readpage_from_fscache(struct nfs_open_context *,
- struct inode *, struct page *);
-extern int __nfs_readpages_from_fscache(struct nfs_open_context *,
- struct inode *, struct address_space *,
- struct list_head *, unsigned *);
-extern void __nfs_readpage_to_fscache(struct inode *, struct page *, int);
-
-/*
- * wait for a page to complete writing to the cache
- */
-static inline void nfs_fscache_wait_on_page_write(struct nfs_inode *nfsi,
- struct page *page)
-{
- if (PageFsCache(page))
- fscache_wait_on_page_write(nfsi->fscache, page);
-}
-
-/*
- * release the caching state associated with a page if undergoing complete page
- * invalidation
- */
-static inline void nfs_fscache_invalidate_page(struct page *page,
- struct inode *inode)
+static inline int nfs_fscache_release_page(struct page *page, gfp_t gfp)
{
- if (PageFsCache(page))
- __nfs_fscache_invalidate_page(page, inode);
+ if (PageFsCache(page)) {
+ if (current_is_kswapd() || !(gfp & __GFP_FS))
+ return false;
+ wait_on_page_fscache(page);
+ fscache_note_page_release(nfs_i_fscache(page->mapping->host));
+ nfs_inc_fscache_stats(page->mapping->host,
+ NFSIOS_FSCACHE_PAGES_UNCACHED);
+ }
+ return true;
}
/*
* Retrieve a page from an inode data storage object.
*/
-static inline int nfs_readpage_from_fscache(struct nfs_open_context *ctx,
- struct inode *inode,
+static inline int nfs_readpage_from_fscache(struct inode *inode,
struct page *page)
{
if (NFS_I(inode)->fscache)
- return __nfs_readpage_from_fscache(ctx, inode, page);
- return -ENOBUFS;
-}
-
-/*
- * Retrieve a set of pages from an inode data storage object.
- */
-static inline int nfs_readpages_from_fscache(struct nfs_open_context *ctx,
- struct inode *inode,
- struct address_space *mapping,
- struct list_head *pages,
- unsigned *nr_pages)
-{
- if (NFS_I(inode)->fscache)
- return __nfs_readpages_from_fscache(ctx, inode, mapping, pages,
- nr_pages);
+ return __nfs_readpage_from_fscache(inode, page);
return -ENOBUFS;
}
@@ -156,27 +79,38 @@ static inline int nfs_readpages_from_fscache(struct nfs_open_context *ctx,
* in the cache.
*/
static inline void nfs_readpage_to_fscache(struct inode *inode,
- struct page *page,
- int sync)
+ struct page *page)
{
- if (PageFsCache(page))
- __nfs_readpage_to_fscache(inode, page, sync);
+ if (NFS_I(inode)->fscache)
+ __nfs_readpage_to_fscache(inode, page);
}
-/*
- * Invalidate the contents of fscache for this inode. This will not sleep.
- */
-static inline void nfs_fscache_invalidate(struct inode *inode)
+static inline void nfs_fscache_update_auxdata(struct nfs_fscache_inode_auxdata *auxdata,
+ struct nfs_inode *nfsi)
{
- fscache_invalidate(NFS_I(inode)->fscache);
+ memset(auxdata, 0, sizeof(*auxdata));
+ auxdata->mtime_sec = nfsi->vfs_inode.i_mtime.tv_sec;
+ auxdata->mtime_nsec = nfsi->vfs_inode.i_mtime.tv_nsec;
+ auxdata->ctime_sec = nfsi->vfs_inode.i_ctime.tv_sec;
+ auxdata->ctime_nsec = nfsi->vfs_inode.i_ctime.tv_nsec;
+
+ if (NFS_SERVER(&nfsi->vfs_inode)->nfs_client->rpc_ops->version == 4)
+ auxdata->change_attr = inode_peek_iversion_raw(&nfsi->vfs_inode);
}
/*
- * Wait for an object to finish being invalidated.
+ * Invalidate the contents of fscache for this inode. This will not sleep.
*/
-static inline void nfs_fscache_wait_on_invalidate(struct inode *inode)
+static inline void nfs_fscache_invalidate(struct inode *inode, int flags)
{
- fscache_wait_on_invalidate(NFS_I(inode)->fscache);
+ struct nfs_fscache_inode_auxdata auxdata;
+ struct nfs_inode *nfsi = NFS_I(inode);
+
+ if (nfsi->fscache) {
+ nfs_fscache_update_auxdata(&auxdata, nfsi);
+ fscache_invalidate(nfsi->fscache, &auxdata,
+ i_size_read(&nfsi->vfs_inode), flags);
+ }
}
/*
@@ -190,48 +124,28 @@ static inline const char *nfs_server_fscache_state(struct nfs_server *server)
}
#else /* CONFIG_NFS_FSCACHE */
-static inline int nfs_fscache_register(void) { return 0; }
-static inline void nfs_fscache_unregister(void) {}
-
-static inline void nfs_fscache_get_client_cookie(struct nfs_client *clp) {}
-static inline void nfs_fscache_release_client_cookie(struct nfs_client *clp) {}
-
static inline void nfs_fscache_release_super_cookie(struct super_block *sb) {}
static inline void nfs_fscache_init_inode(struct inode *inode) {}
static inline void nfs_fscache_clear_inode(struct inode *inode) {}
static inline void nfs_fscache_open_file(struct inode *inode,
struct file *filp) {}
+static inline void nfs_fscache_release_file(struct inode *inode, struct file *file) {}
static inline int nfs_fscache_release_page(struct page *page, gfp_t gfp)
{
return 1; /* True: may release page */
}
-static inline void nfs_fscache_invalidate_page(struct page *page,
- struct inode *inode) {}
-static inline void nfs_fscache_wait_on_page_write(struct nfs_inode *nfsi,
- struct page *page) {}
-
-static inline int nfs_readpage_from_fscache(struct nfs_open_context *ctx,
- struct inode *inode,
+static inline int nfs_readpage_from_fscache(struct inode *inode,
struct page *page)
{
return -ENOBUFS;
}
-static inline int nfs_readpages_from_fscache(struct nfs_open_context *ctx,
- struct inode *inode,
- struct address_space *mapping,
- struct list_head *pages,
- unsigned *nr_pages)
-{
- return -ENOBUFS;
-}
static inline void nfs_readpage_to_fscache(struct inode *inode,
- struct page *page, int sync) {}
+ struct page *page) {}
-static inline void nfs_fscache_invalidate(struct inode *inode) {}
-static inline void nfs_fscache_wait_on_invalidate(struct inode *inode) {}
+static inline void nfs_fscache_invalidate(struct inode *inode, int flags) {}
static inline const char *nfs_server_fscache_state(struct nfs_server *server)
{
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index fda530d5e764..d96baa4450e3 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -209,7 +209,7 @@ void nfs_set_cache_invalid(struct inode *inode, unsigned long flags)
if (!nfs_has_xattr_cache(nfsi))
flags &= ~NFS_INO_INVALID_XATTR;
if (flags & NFS_INO_INVALID_DATA)
- nfs_fscache_invalidate(inode);
+ nfs_fscache_invalidate(inode, 0);
flags &= ~(NFS_INO_REVAL_PAGECACHE | NFS_INO_REVAL_FORCED);
nfsi->cache_validity |= flags;
@@ -853,12 +853,9 @@ int nfs_getattr(struct user_namespace *mnt_userns, const struct path *path,
}
/* Flush out writes to the server in order to update c/mtime. */
- if ((request_mask & (STATX_CTIME|STATX_MTIME)) &&
- S_ISREG(inode->i_mode)) {
- err = filemap_write_and_wait(inode->i_mapping);
- if (err)
- goto out;
- }
+ if ((request_mask & (STATX_CTIME | STATX_MTIME)) &&
+ S_ISREG(inode->i_mode))
+ filemap_write_and_wait(inode->i_mapping);
/*
* We may force a getattr if the user cares about atime.
@@ -1289,6 +1286,7 @@ static int nfs_invalidate_mapping(struct inode *inode, struct address_space *map
{
int ret;
+ nfs_fscache_invalidate(inode, 0);
if (mapping->nrpages != 0) {
if (S_ISREG(inode->i_mode)) {
ret = nfs_sync_mapping(mapping);
@@ -1300,7 +1298,6 @@ static int nfs_invalidate_mapping(struct inode *inode, struct address_space *map
return ret;
}
nfs_inc_stats(inode, NFSIOS_DATAINVALIDATE);
- nfs_fscache_wait_on_invalidate(inode);
dfprintk(PAGECACHE, "NFS: (%s/%Lu) data cache invalidated\n",
inode->i_sb->s_id,
@@ -2374,10 +2371,6 @@ static int __init init_nfs_fs(void)
if (err < 0)
goto out9;
- err = nfs_fscache_register();
- if (err < 0)
- goto out8;
-
err = nfsiod_start();
if (err)
goto out7;
@@ -2429,8 +2422,6 @@ out5:
out6:
nfsiod_stop();
out7:
- nfs_fscache_unregister();
-out8:
unregister_pernet_subsys(&nfs_net_ops);
out9:
nfs_sysfs_exit();
@@ -2445,7 +2436,6 @@ static void __exit exit_nfs_fs(void)
nfs_destroy_readpagecache();
nfs_destroy_inodecache();
nfs_destroy_nfspagecache();
- nfs_fscache_unregister();
unregister_pernet_subsys(&nfs_net_ops);
rpc_proc_unregister(&init_net, "nfs");
unregister_nfs_fs();
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 12f6acb483bb..2de7c56a1fbe 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -373,6 +373,7 @@ extern unsigned long nfs_access_cache_count(struct shrinker *shrink,
extern unsigned long nfs_access_cache_scan(struct shrinker *shrink,
struct shrink_control *sc);
struct dentry *nfs_lookup(struct inode *, struct dentry *, unsigned int);
+void nfs_d_prune_case_insensitive_aliases(struct inode *inode);
int nfs_create(struct user_namespace *, struct inode *, struct dentry *,
umode_t, bool);
int nfs_mkdir(struct user_namespace *, struct inode *, struct dentry *,
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
index 7100514d306b..1597eef40d54 100644
--- a/fs/nfs/nfs3proc.c
+++ b/fs/nfs/nfs3proc.c
@@ -220,7 +220,8 @@ static int nfs3_proc_lookupp(struct inode *inode, struct nfs_fh *fhandle,
task_flags);
}
-static int nfs3_proc_access(struct inode *inode, struct nfs_access_entry *entry)
+static int nfs3_proc_access(struct inode *inode, struct nfs_access_entry *entry,
+ const struct cred *cred)
{
struct nfs3_accessargs arg = {
.fh = NFS_FH(inode),
@@ -231,7 +232,7 @@ static int nfs3_proc_access(struct inode *inode, struct nfs_access_entry *entry)
.rpc_proc = &nfs3_procedures[NFS3PROC_ACCESS],
.rpc_argp = &arg,
.rpc_resp = &res,
- .rpc_cred = entry->cred,
+ .rpc_cred = cred,
};
int status = -ENOMEM;
diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c
index 8b21ff1be717..32129446beca 100644
--- a/fs/nfs/nfs42proc.c
+++ b/fs/nfs/nfs42proc.c
@@ -46,7 +46,7 @@ static int _nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
{
struct inode *inode = file_inode(filep);
struct nfs_server *server = NFS_SERVER(inode);
- u32 bitmask[3];
+ u32 bitmask[NFS_BITMASK_SZ];
struct nfs42_falloc_args args = {
.falloc_fh = NFS_FH(inode),
.falloc_offset = offset,
@@ -69,9 +69,8 @@ static int _nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
return status;
}
- memcpy(bitmask, server->cache_consistency_bitmask, sizeof(bitmask));
- if (server->attr_bitmask[1] & FATTR4_WORD1_SPACE_USED)
- bitmask[1] |= FATTR4_WORD1_SPACE_USED;
+ nfs4_bitmask_set(bitmask, server->cache_consistency_bitmask, inode,
+ NFS_INO_INVALID_BLOCKS);
res.falloc_fattr = nfs_alloc_fattr();
if (!res.falloc_fattr)
@@ -1044,13 +1043,14 @@ static int _nfs42_proc_clone(struct rpc_message *msg, struct file *src_f,
struct inode *src_inode = file_inode(src_f);
struct inode *dst_inode = file_inode(dst_f);
struct nfs_server *server = NFS_SERVER(dst_inode);
+ __u32 dst_bitmask[NFS_BITMASK_SZ];
struct nfs42_clone_args args = {
.src_fh = NFS_FH(src_inode),
.dst_fh = NFS_FH(dst_inode),
.src_offset = src_offset,
.dst_offset = dst_offset,
.count = count,
- .dst_bitmask = server->cache_consistency_bitmask,
+ .dst_bitmask = dst_bitmask,
};
struct nfs42_clone_res res = {
.server = server,
@@ -1079,6 +1079,9 @@ static int _nfs42_proc_clone(struct rpc_message *msg, struct file *src_f,
if (!res.dst_fattr)
return -ENOMEM;
+ nfs4_bitmask_set(dst_bitmask, server->cache_consistency_bitmask,
+ dst_inode, NFS_INO_INVALID_BLOCKS);
+
status = nfs4_call_sync(server->client, server, msg,
&args.seq_args, &res.seq_res, 0);
trace_nfs4_clone(src_inode, dst_inode, &args, status);
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index ed5eaca6801e..84f39b6f1b1e 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -260,8 +260,8 @@ struct nfs4_state_maintenance_ops {
};
struct nfs4_mig_recovery_ops {
- int (*get_locations)(struct inode *, struct nfs4_fs_locations *,
- struct page *, const struct cred *);
+ int (*get_locations)(struct nfs_server *, struct nfs_fh *,
+ struct nfs4_fs_locations *, struct page *, const struct cred *);
int (*fsid_present)(struct inode *, const struct cred *);
};
@@ -280,7 +280,8 @@ struct rpc_clnt *nfs4_negotiate_security(struct rpc_clnt *, struct inode *,
int nfs4_submount(struct fs_context *, struct nfs_server *);
int nfs4_replace_transport(struct nfs_server *server,
const struct nfs4_fs_locations *locations);
-
+size_t nfs_parse_server_name(char *string, size_t len, struct sockaddr *sa,
+ size_t salen, struct net *net, int port);
/* nfs4proc.c */
extern int nfs4_handle_exception(struct nfs_server *, int, struct nfs4_exception *);
extern int nfs4_async_handle_error(struct rpc_task *task,
@@ -302,8 +303,9 @@ extern int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait);
extern int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle);
extern int nfs4_proc_fs_locations(struct rpc_clnt *, struct inode *, const struct qstr *,
struct nfs4_fs_locations *, struct page *);
-extern int nfs4_proc_get_locations(struct inode *, struct nfs4_fs_locations *,
- struct page *page, const struct cred *);
+extern int nfs4_proc_get_locations(struct nfs_server *, struct nfs_fh *,
+ struct nfs4_fs_locations *,
+ struct page *page, const struct cred *);
extern int nfs4_proc_fsid_present(struct inode *, const struct cred *);
extern struct rpc_clnt *nfs4_proc_lookup_mountpoint(struct inode *,
struct dentry *,
@@ -315,6 +317,8 @@ extern int nfs4_set_rw_stateid(nfs4_stateid *stateid,
const struct nfs_open_context *ctx,
const struct nfs_lock_context *l_ctx,
fmode_t fmode);
+extern void nfs4_bitmask_set(__u32 bitmask[], const __u32 src[],
+ struct inode *inode, unsigned long cache_validity);
extern int nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle,
struct nfs_fattr *fattr, struct inode *inode);
extern int update_open_stateid(struct nfs4_state *state,
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
index d8b5a250ca05..47a6cf892c95 100644
--- a/fs/nfs/nfs4client.c
+++ b/fs/nfs/nfs4client.c
@@ -1343,8 +1343,11 @@ int nfs4_update_server(struct nfs_server *server, const char *hostname,
}
nfs_put_client(clp);
- if (server->nfs_client->cl_hostname == NULL)
+ if (server->nfs_client->cl_hostname == NULL) {
server->nfs_client->cl_hostname = kstrdup(hostname, GFP_KERNEL);
+ if (server->nfs_client->cl_hostname == NULL)
+ return -ENOMEM;
+ }
nfs_server_insert_lists(server);
return nfs_probe_server(server, NFS_FH(d_inode(server->super->s_root)));
diff --git a/fs/nfs/nfs4namespace.c b/fs/nfs/nfs4namespace.c
index 873342308dc0..3680c8da510c 100644
--- a/fs/nfs/nfs4namespace.c
+++ b/fs/nfs/nfs4namespace.c
@@ -164,16 +164,21 @@ static int nfs4_validate_fspath(struct dentry *dentry,
return 0;
}
-static size_t nfs_parse_server_name(char *string, size_t len,
- struct sockaddr *sa, size_t salen, struct net *net)
+size_t nfs_parse_server_name(char *string, size_t len, struct sockaddr *sa,
+ size_t salen, struct net *net, int port)
{
ssize_t ret;
ret = rpc_pton(net, string, len, sa, salen);
if (ret == 0) {
- ret = nfs_dns_resolve_name(net, string, len, sa, salen);
- if (ret < 0)
- ret = 0;
+ ret = rpc_uaddr2sockaddr(net, string, len, sa, salen);
+ if (ret == 0) {
+ ret = nfs_dns_resolve_name(net, string, len, sa, salen);
+ if (ret < 0)
+ ret = 0;
+ }
+ } else if (port) {
+ rpc_set_port(sa, port);
}
return ret;
}
@@ -328,7 +333,7 @@ static int try_location(struct fs_context *fc,
nfs_parse_server_name(buf->data, buf->len,
&ctx->nfs_server.address,
sizeof(ctx->nfs_server._address),
- fc->net_ns);
+ fc->net_ns, 0);
if (ctx->nfs_server.addrlen == 0)
continue;
@@ -496,7 +501,7 @@ static int nfs4_try_replacing_one_location(struct nfs_server *server,
continue;
salen = nfs_parse_server_name(buf->data, buf->len,
- sap, addr_bufsize, net);
+ sap, addr_bufsize, net, 0);
if (salen == 0)
continue;
rpc_set_port(sap, NFS_PORT);
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index ee3bc79f6ca3..0e0db6c27619 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -108,10 +108,6 @@ static int nfs41_test_stateid(struct nfs_server *, nfs4_stateid *,
static int nfs41_free_stateid(struct nfs_server *, const nfs4_stateid *,
const struct cred *, bool);
#endif
-static void nfs4_bitmask_set(__u32 bitmask[NFS4_BITMASK_SZ],
- const __u32 *src, struct inode *inode,
- struct nfs_server *server,
- struct nfs4_label *label);
#ifdef CONFIG_NFS_V4_SECURITY_LABEL
static inline struct nfs4_label *
@@ -1233,8 +1229,7 @@ nfs4_update_changeattr_locked(struct inode *inode,
NFS_INO_INVALID_ACCESS | NFS_INO_INVALID_ACL |
NFS_INO_INVALID_SIZE | NFS_INO_INVALID_OTHER |
NFS_INO_INVALID_BLOCKS | NFS_INO_INVALID_NLINK |
- NFS_INO_INVALID_MODE | NFS_INO_INVALID_XATTR |
- NFS_INO_REVAL_PAGECACHE;
+ NFS_INO_INVALID_MODE | NFS_INO_INVALID_XATTR;
nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
}
nfsi->attrtimeo_timestamp = jiffies;
@@ -2653,9 +2648,8 @@ static int nfs4_opendata_access(const struct cred *cred,
} else if ((fmode & FMODE_READ) && !opendata->file_created)
mask = NFS4_ACCESS_READ;
- cache.cred = cred;
nfs_access_set_mask(&cache, opendata->o_res.access_result);
- nfs_access_add_cache(state->inode, &cache);
+ nfs_access_add_cache(state->inode, &cache, cred);
flags = NFS4_ACCESS_READ | NFS4_ACCESS_EXECUTE | NFS4_ACCESS_LOOKUP;
if ((mask & ~cache.mask & flags) == 0)
@@ -3670,7 +3664,7 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
if (!nfs4_have_delegation(inode, FMODE_READ)) {
nfs4_bitmask_set(calldata->arg.bitmask_store,
server->cache_consistency_bitmask,
- inode, server, NULL);
+ inode, 0);
calldata->arg.bitmask = calldata->arg.bitmask_store;
} else
calldata->arg.bitmask = NULL;
@@ -3841,7 +3835,9 @@ static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *f
FATTR4_WORD0_FH_EXPIRE_TYPE |
FATTR4_WORD0_LINK_SUPPORT |
FATTR4_WORD0_SYMLINK_SUPPORT |
- FATTR4_WORD0_ACLSUPPORT;
+ FATTR4_WORD0_ACLSUPPORT |
+ FATTR4_WORD0_CASE_INSENSITIVE |
+ FATTR4_WORD0_CASE_PRESERVING;
if (minorversion)
bitmask[2] = FATTR4_WORD2_SUPPATTR_EXCLCREAT;
@@ -3870,10 +3866,16 @@ static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *f
server->caps |= NFS_CAP_HARDLINKS;
if (res.has_symlinks != 0)
server->caps |= NFS_CAP_SYMLINKS;
+ if (res.case_insensitive)
+ server->caps |= NFS_CAP_CASE_INSENSITIVE;
+ if (res.case_preserving)
+ server->caps |= NFS_CAP_CASE_PRESERVING;
#ifdef CONFIG_NFS_V4_SECURITY_LABEL
if (res.attr_bitmask[2] & FATTR4_WORD2_SECURITY_LABEL)
server->caps |= NFS_CAP_SECURITY_LABEL;
#endif
+ if (res.attr_bitmask[0] & FATTR4_WORD0_FS_LOCATIONS)
+ server->caps |= NFS_CAP_FS_LOCATIONS;
if (!(res.attr_bitmask[0] & FATTR4_WORD0_FILEID))
server->fattr_valid &= ~NFS_ATTR_FATTR_FILEID;
if (!(res.attr_bitmask[1] & FATTR4_WORD1_MODE))
@@ -3932,6 +3934,114 @@ int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle)
return err;
}
+static void test_fs_location_for_trunking(struct nfs4_fs_location *location,
+ struct nfs_client *clp,
+ struct nfs_server *server)
+{
+ int i;
+
+ for (i = 0; i < location->nservers; i++) {
+ struct nfs4_string *srv_loc = &location->servers[i];
+ struct sockaddr addr;
+ size_t addrlen;
+ struct xprt_create xprt_args = {
+ .ident = 0,
+ .net = clp->cl_net,
+ };
+ struct nfs4_add_xprt_data xprtdata = {
+ .clp = clp,
+ };
+ struct rpc_add_xprt_test rpcdata = {
+ .add_xprt_test = clp->cl_mvops->session_trunk,
+ .data = &xprtdata,
+ };
+ char *servername = NULL;
+
+ if (!srv_loc->len)
+ continue;
+
+ addrlen = nfs_parse_server_name(srv_loc->data, srv_loc->len,
+ &addr, sizeof(addr),
+ clp->cl_net, server->port);
+ if (!addrlen)
+ return;
+ xprt_args.dstaddr = &addr;
+ xprt_args.addrlen = addrlen;
+ servername = kmalloc(srv_loc->len + 1, GFP_KERNEL);
+ if (!servername)
+ return;
+ memcpy(servername, srv_loc->data, srv_loc->len);
+ servername[srv_loc->len] = '\0';
+ xprt_args.servername = servername;
+
+ xprtdata.cred = nfs4_get_clid_cred(clp);
+ rpc_clnt_add_xprt(clp->cl_rpcclient, &xprt_args,
+ rpc_clnt_setup_test_and_add_xprt,
+ &rpcdata);
+ if (xprtdata.cred)
+ put_cred(xprtdata.cred);
+ kfree(servername);
+ }
+}
+
+static int _nfs4_discover_trunking(struct nfs_server *server,
+ struct nfs_fh *fhandle)
+{
+ struct nfs4_fs_locations *locations = NULL;
+ struct page *page;
+ const struct cred *cred;
+ struct nfs_client *clp = server->nfs_client;
+ const struct nfs4_state_maintenance_ops *ops =
+ clp->cl_mvops->state_renewal_ops;
+ int status = -ENOMEM, i;
+
+ cred = ops->get_state_renewal_cred(clp);
+ if (cred == NULL) {
+ cred = nfs4_get_clid_cred(clp);
+ if (cred == NULL)
+ return -ENOKEY;
+ }
+
+ page = alloc_page(GFP_KERNEL);
+ locations = kmalloc(sizeof(struct nfs4_fs_locations), GFP_KERNEL);
+ if (page == NULL || locations == NULL)
+ goto out;
+
+ status = nfs4_proc_get_locations(server, fhandle, locations, page,
+ cred);
+ if (status)
+ goto out;
+
+ for (i = 0; i < locations->nlocations; i++)
+ test_fs_location_for_trunking(&locations->locations[i], clp,
+ server);
+out:
+ if (page)
+ __free_page(page);
+ kfree(locations);
+ return status;
+}
+
+static int nfs4_discover_trunking(struct nfs_server *server,
+ struct nfs_fh *fhandle)
+{
+ struct nfs4_exception exception = {
+ .interruptible = true,
+ };
+ struct nfs_client *clp = server->nfs_client;
+ int err = 0;
+
+ if (!nfs4_has_session(clp))
+ goto out;
+ do {
+ err = nfs4_handle_exception(server,
+ _nfs4_discover_trunking(server, fhandle),
+ &exception);
+ } while (exception.retry);
+out:
+ return err;
+}
+
static int _nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle,
struct nfs_fsinfo *info)
{
@@ -4441,7 +4551,8 @@ static int nfs4_proc_lookupp(struct inode *inode, struct nfs_fh *fhandle,
return err;
}
-static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry)
+static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry,
+ const struct cred *cred)
{
struct nfs_server *server = NFS_SERVER(inode);
struct nfs4_accessargs args = {
@@ -4455,7 +4566,7 @@ static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry
.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_ACCESS],
.rpc_argp = &args,
.rpc_resp = &res,
- .rpc_cred = entry->cred,
+ .rpc_cred = cred,
};
int status = 0;
@@ -4475,14 +4586,15 @@ static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry
return status;
}
-static int nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry)
+static int nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry,
+ const struct cred *cred)
{
struct nfs4_exception exception = {
.interruptible = true,
};
int err;
do {
- err = _nfs4_proc_access(inode, entry);
+ err = _nfs4_proc_access(inode, entry, cred);
trace_nfs4_access(inode, err);
err = nfs4_handle_exception(NFS_SERVER(inode), err,
&exception);
@@ -4663,8 +4775,10 @@ static void nfs4_proc_unlink_setup(struct rpc_message *msg,
nfs_fattr_init(res->dir_attr);
- if (inode)
+ if (inode) {
nfs4_inode_return_delegation(inode);
+ nfs_d_prune_case_insensitive_aliases(inode);
+ }
}
static void nfs4_proc_unlink_rpc_prepare(struct rpc_task *task, struct nfs_unlinkdata *data)
@@ -4730,6 +4844,7 @@ static int nfs4_proc_rename_done(struct rpc_task *task, struct inode *old_dir,
return 0;
if (task->tk_status == 0) {
+ nfs_d_prune_case_insensitive_aliases(d_inode(data->old_dentry));
if (new_dir != old_dir) {
/* Note: If we moved a directory, nlink will change */
nfs4_update_changeattr(old_dir, &res->old_cinfo,
@@ -5422,14 +5537,14 @@ bool nfs4_write_need_cache_consistency_data(struct nfs_pgio_header *hdr)
return nfs4_have_delegation(hdr->inode, FMODE_READ) == 0;
}
-static void nfs4_bitmask_set(__u32 bitmask[NFS4_BITMASK_SZ], const __u32 *src,
- struct inode *inode, struct nfs_server *server,
- struct nfs4_label *label)
+void nfs4_bitmask_set(__u32 bitmask[], const __u32 src[],
+ struct inode *inode, unsigned long cache_validity)
{
- unsigned long cache_validity = READ_ONCE(NFS_I(inode)->cache_validity);
+ struct nfs_server *server = NFS_SERVER(inode);
unsigned int i;
memcpy(bitmask, src, sizeof(*bitmask) * NFS4_BITMASK_SZ);
+ cache_validity |= READ_ONCE(NFS_I(inode)->cache_validity);
if (cache_validity & NFS_INO_INVALID_CHANGE)
bitmask[0] |= FATTR4_WORD0_CHANGE;
@@ -5441,8 +5556,6 @@ static void nfs4_bitmask_set(__u32 bitmask[NFS4_BITMASK_SZ], const __u32 *src,
bitmask[1] |= FATTR4_WORD1_OWNER | FATTR4_WORD1_OWNER_GROUP;
if (cache_validity & NFS_INO_INVALID_NLINK)
bitmask[1] |= FATTR4_WORD1_NUMLINKS;
- if (label && label->len && cache_validity & NFS_INO_INVALID_LABEL)
- bitmask[2] |= FATTR4_WORD2_SECURITY_LABEL;
if (cache_validity & NFS_INO_INVALID_CTIME)
bitmask[1] |= FATTR4_WORD1_TIME_METADATA;
if (cache_validity & NFS_INO_INVALID_MTIME)
@@ -5469,7 +5582,7 @@ static void nfs4_proc_write_setup(struct nfs_pgio_header *hdr,
} else {
nfs4_bitmask_set(hdr->args.bitmask_store,
server->cache_consistency_bitmask,
- hdr->inode, server, NULL);
+ hdr->inode, NFS_INO_INVALID_BLOCKS);
hdr->args.bitmask = hdr->args.bitmask_store;
}
@@ -6507,8 +6620,7 @@ static int _nfs4_proc_delegreturn(struct inode *inode, const struct cred *cred,
data->args.fhandle = &data->fh;
data->args.stateid = &data->stateid;
nfs4_bitmask_set(data->args.bitmask_store,
- server->cache_consistency_bitmask, inode, server,
- NULL);
+ server->cache_consistency_bitmask, inode, 0);
data->args.bitmask = data->args.bitmask_store;
nfs_copy_fh(&data->fh, NFS_FH(inode));
nfs4_stateid_copy(&data->stateid, stateid);
@@ -7611,7 +7723,7 @@ static int nfs4_xattr_set_nfs4_user(const struct xattr_handler *handler,
const char *key, const void *buf,
size_t buflen, int flags)
{
- struct nfs_access_entry cache;
+ u32 mask;
int ret;
if (!nfs_server_capable(inode, NFS_CAP_XATTR))
@@ -7626,8 +7738,8 @@ static int nfs4_xattr_set_nfs4_user(const struct xattr_handler *handler,
* do a cached access check for the XA* flags to possibly avoid
* doing an RPC and getting EACCES back.
*/
- if (!nfs_access_get_cached(inode, current_cred(), &cache, true)) {
- if (!(cache.mask & NFS_ACCESS_XAWRITE))
+ if (!nfs_access_get_cached(inode, current_cred(), &mask, true)) {
+ if (!(mask & NFS_ACCESS_XAWRITE))
return -EACCES;
}
@@ -7648,14 +7760,14 @@ static int nfs4_xattr_get_nfs4_user(const struct xattr_handler *handler,
struct dentry *unused, struct inode *inode,
const char *key, void *buf, size_t buflen)
{
- struct nfs_access_entry cache;
+ u32 mask;
ssize_t ret;
if (!nfs_server_capable(inode, NFS_CAP_XATTR))
return -EOPNOTSUPP;
- if (!nfs_access_get_cached(inode, current_cred(), &cache, true)) {
- if (!(cache.mask & NFS_ACCESS_XAREAD))
+ if (!nfs_access_get_cached(inode, current_cred(), &mask, true)) {
+ if (!(mask & NFS_ACCESS_XAREAD))
return -EACCES;
}
@@ -7680,13 +7792,13 @@ nfs4_listxattr_nfs4_user(struct inode *inode, char *list, size_t list_len)
ssize_t ret, size;
char *buf;
size_t buflen;
- struct nfs_access_entry cache;
+ u32 mask;
if (!nfs_server_capable(inode, NFS_CAP_XATTR))
return 0;
- if (!nfs_access_get_cached(inode, current_cred(), &cache, true)) {
- if (!(cache.mask & NFS_ACCESS_XALIST))
+ if (!nfs_access_get_cached(inode, current_cred(), &mask, true)) {
+ if (!(mask & NFS_ACCESS_XALIST))
return 0;
}
@@ -7818,18 +7930,18 @@ int nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
* appended to this compound to identify the client ID which is
* performing recovery.
*/
-static int _nfs40_proc_get_locations(struct inode *inode,
+static int _nfs40_proc_get_locations(struct nfs_server *server,
+ struct nfs_fh *fhandle,
struct nfs4_fs_locations *locations,
struct page *page, const struct cred *cred)
{
- struct nfs_server *server = NFS_SERVER(inode);
struct rpc_clnt *clnt = server->client;
u32 bitmask[2] = {
[0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
};
struct nfs4_fs_locations_arg args = {
.clientid = server->nfs_client->cl_clientid,
- .fh = NFS_FH(inode),
+ .fh = fhandle,
.page = page,
.bitmask = bitmask,
.migration = 1, /* skip LOOKUP */
@@ -7875,17 +7987,17 @@ static int _nfs40_proc_get_locations(struct inode *inode,
* When the client supports GETATTR(fs_locations_info), it can
* be plumbed in here.
*/
-static int _nfs41_proc_get_locations(struct inode *inode,
+static int _nfs41_proc_get_locations(struct nfs_server *server,
+ struct nfs_fh *fhandle,
struct nfs4_fs_locations *locations,
struct page *page, const struct cred *cred)
{
- struct nfs_server *server = NFS_SERVER(inode);
struct rpc_clnt *clnt = server->client;
u32 bitmask[2] = {
[0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
};
struct nfs4_fs_locations_arg args = {
- .fh = NFS_FH(inode),
+ .fh = fhandle,
.page = page,
.bitmask = bitmask,
.migration = 1, /* skip LOOKUP */
@@ -7919,7 +8031,8 @@ static int _nfs41_proc_get_locations(struct inode *inode,
/**
* nfs4_proc_get_locations - discover locations for a migrated FSID
- * @inode: inode on FSID that is migrating
+ * @server: pointer to nfs_server to process
+ * @fhandle: pointer to the kernel NFS client file handle
* @locations: result of query
* @page: buffer
* @cred: credential to use for this operation
@@ -7934,11 +8047,11 @@ static int _nfs41_proc_get_locations(struct inode *inode,
* -NFS4ERR_LEASE_MOVED is returned if the server still has leases
* from this client that require migration recovery.
*/
-int nfs4_proc_get_locations(struct inode *inode,
+int nfs4_proc_get_locations(struct nfs_server *server,
+ struct nfs_fh *fhandle,
struct nfs4_fs_locations *locations,
struct page *page, const struct cred *cred)
{
- struct nfs_server *server = NFS_SERVER(inode);
struct nfs_client *clp = server->nfs_client;
const struct nfs4_mig_recovery_ops *ops =
clp->cl_mvops->mig_recovery_ops;
@@ -7951,10 +8064,11 @@ int nfs4_proc_get_locations(struct inode *inode,
(unsigned long long)server->fsid.major,
(unsigned long long)server->fsid.minor,
clp->cl_hostname);
- nfs_display_fhandle(NFS_FH(inode), __func__);
+ nfs_display_fhandle(fhandle, __func__);
do {
- status = ops->get_locations(inode, locations, page, cred);
+ status = ops->get_locations(server, fhandle, locations, page,
+ cred);
if (status != -NFS4ERR_DELAY)
break;
nfs4_handle_exception(server, status, &exception);
@@ -10423,6 +10537,7 @@ const struct nfs_rpc_ops nfs_v4_clientops = {
.free_client = nfs4_free_client,
.create_server = nfs4_create_server,
.clone_server = nfs_clone_server,
+ .discover_trunking = nfs4_discover_trunking,
};
static const struct xattr_handler nfs4_xattr_nfs4_acl_handler = {
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index f63dfa01001c..f5a62c0d999b 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -2098,7 +2098,8 @@ static int nfs4_try_migration(struct nfs_server *server, const struct cred *cred
}
inode = d_inode(server->super->s_root);
- result = nfs4_proc_get_locations(inode, locations, page, cred);
+ result = nfs4_proc_get_locations(server, NFS_FH(inode), locations,
+ page, cred);
if (result) {
dprintk("<-- %s: failed to retrieve fs_locations: %d\n",
__func__, result);
@@ -2106,6 +2107,9 @@ static int nfs4_try_migration(struct nfs_server *server, const struct cred *cred
}
result = -NFS4ERR_NXIO;
+ if (!locations->nlocations)
+ goto out;
+
if (!(locations->fattr.valid & NFS_ATTR_FATTR_V4_LOCATIONS)) {
dprintk("<-- %s: No fs_locations data, migration skipped\n",
__func__);
@@ -2693,6 +2697,6 @@ static int nfs4_run_state_manager(void *ptr)
allow_signal(SIGKILL);
nfs4_state_manager(clp);
nfs_put_client(clp);
- module_put_and_exit(0);
+ module_put_and_kthread_exit(0);
return 0;
}
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index 69862bf6db00..8e70b92df4cc 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -3533,6 +3533,42 @@ static int decode_attr_aclsupport(struct xdr_stream *xdr, uint32_t *bitmap, uint
return 0;
}
+static int decode_attr_case_insensitive(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *res)
+{
+ __be32 *p;
+
+ *res = 0;
+ if (unlikely(bitmap[0] & (FATTR4_WORD0_CASE_INSENSITIVE - 1U)))
+ return -EIO;
+ if (likely(bitmap[0] & FATTR4_WORD0_CASE_INSENSITIVE)) {
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ return -EIO;
+ *res = be32_to_cpup(p);
+ bitmap[0] &= ~FATTR4_WORD0_CASE_INSENSITIVE;
+ }
+ dprintk("%s: case_insensitive=%s\n", __func__, *res == 0 ? "false" : "true");
+ return 0;
+}
+
+static int decode_attr_case_preserving(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *res)
+{
+ __be32 *p;
+
+ *res = 0;
+ if (unlikely(bitmap[0] & (FATTR4_WORD0_CASE_PRESERVING - 1U)))
+ return -EIO;
+ if (likely(bitmap[0] & FATTR4_WORD0_CASE_PRESERVING)) {
+ p = xdr_inline_decode(xdr, 4);
+ if (unlikely(!p))
+ return -EIO;
+ *res = be32_to_cpup(p);
+ bitmap[0] &= ~FATTR4_WORD0_CASE_PRESERVING;
+ }
+ dprintk("%s: case_preserving=%s\n", __func__, *res == 0 ? "false" : "true");
+ return 0;
+}
+
static int decode_attr_fileid(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *fileid)
{
__be32 *p;
@@ -3696,8 +3732,6 @@ static int decode_attr_fs_locations(struct xdr_stream *xdr, uint32_t *bitmap, st
if (unlikely(!p))
goto out_eio;
n = be32_to_cpup(p);
- if (n <= 0)
- goto out_eio;
for (res->nlocations = 0; res->nlocations < n; res->nlocations++) {
u32 m;
struct nfs4_fs_location *loc;
@@ -4200,10 +4234,11 @@ static int decode_attr_security_label(struct xdr_stream *xdr, uint32_t *bitmap,
} else
printk(KERN_WARNING "%s: label too long (%u)!\n",
__func__, len);
+ if (label && label->label)
+ dprintk("%s: label=%.*s, len=%d, PI=%d, LFS=%d\n",
+ __func__, label->len, (char *)label->label,
+ label->len, label->pi, label->lfs);
}
- if (label && label->label)
- dprintk("%s: label=%s, len=%d, PI=%d, LFS=%d\n", __func__,
- (char *)label->label, label->len, label->pi, label->lfs);
return status;
}
@@ -4412,6 +4447,10 @@ static int decode_server_caps(struct xdr_stream *xdr, struct nfs4_server_caps_re
goto xdr_error;
if ((status = decode_attr_aclsupport(xdr, bitmap, &res->acl_bitmask)) != 0)
goto xdr_error;
+ if ((status = decode_attr_case_insensitive(xdr, bitmap, &res->case_insensitive)) != 0)
+ goto xdr_error;
+ if ((status = decode_attr_case_preserving(xdr, bitmap, &res->case_preserving)) != 0)
+ goto xdr_error;
if ((status = decode_attr_exclcreat_supported(xdr, bitmap,
res->exclcreat_bitmask)) != 0)
goto xdr_error;
diff --git a/fs/nfs/nfstrace.h b/fs/nfs/nfstrace.h
index b3aee261801e..317ce27bdc4b 100644
--- a/fs/nfs/nfstrace.h
+++ b/fs/nfs/nfstrace.h
@@ -42,7 +42,6 @@
{ BIT(NFS_INO_ACL_LRU_SET), "ACL_LRU_SET" }, \
{ BIT(NFS_INO_INVALIDATING), "INVALIDATING" }, \
{ BIT(NFS_INO_FSCACHE), "FSCACHE" }, \
- { BIT(NFS_INO_FSCACHE_LOCK), "FSCACHE_LOCK" }, \
{ BIT(NFS_INO_LAYOUTCOMMIT), "NEED_LAYOUTCOMMIT" }, \
{ BIT(NFS_INO_LAYOUTCOMMITTING), "LAYOUTCOMMIT" }, \
{ BIT(NFS_INO_LAYOUTSTATS), "LAYOUTSTATS" }, \
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index d11af2a9299c..eb00229c1a50 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -123,7 +123,7 @@ static void nfs_readpage_release(struct nfs_page *req, int error)
struct address_space *mapping = page_file_mapping(page);
if (PageUptodate(page))
- nfs_readpage_to_fscache(inode, page, 0);
+ nfs_readpage_to_fscache(inode, page);
else if (!PageError(page) && !PagePrivate(page))
generic_error_remove_page(mapping, page);
unlock_page(page);
@@ -305,6 +305,12 @@ readpage_async_filler(void *data, struct page *page)
aligned_len = min_t(unsigned int, ALIGN(len, rsize), PAGE_SIZE);
+ if (!IS_SYNC(page->mapping->host)) {
+ error = nfs_readpage_from_fscache(page->mapping->host, page);
+ if (error == 0)
+ goto out_unlock;
+ }
+
new = nfs_create_request(desc->ctx, page, 0, aligned_len);
if (IS_ERR(new))
goto out_error;
@@ -320,6 +326,7 @@ readpage_async_filler(void *data, struct page *page)
return 0;
out_error:
error = PTR_ERR(new);
+out_unlock:
unlock_page(page);
out:
return error;
@@ -366,12 +373,6 @@ int nfs_readpage(struct file *file, struct page *page)
desc.ctx = get_nfs_open_context(nfs_file_open_context(file));
xchg(&desc.ctx->error, 0);
- if (!IS_SYNC(inode)) {
- ret = nfs_readpage_from_fscache(desc.ctx, inode, page);
- if (ret == 0)
- goto out_wait;
- }
-
nfs_pageio_init_read(&desc.pgio, inode, false,
&nfs_async_read_completion_ops);
@@ -381,7 +382,6 @@ int nfs_readpage(struct file *file, struct page *page)
nfs_pageio_complete_read(&desc.pgio);
ret = desc.pgio.pg_error < 0 ? desc.pgio.pg_error : 0;
-out_wait:
if (!ret) {
ret = wait_on_page_locked_killable(page);
if (!PageUptodate(page) && !ret)
@@ -419,14 +419,6 @@ int nfs_readpages(struct file *file, struct address_space *mapping,
} else
desc.ctx = get_nfs_open_context(nfs_file_open_context(file));
- /* attempt to read as many of the pages as possible from the cache
- * - this returns -ENOBUFS immediately if the cookie is negative
- */
- ret = nfs_readpages_from_fscache(desc.ctx, inode, mapping,
- pages, &nr_pages);
- if (ret == 0)
- goto read_complete; /* all pages were read */
-
nfs_pageio_init_read(&desc.pgio, inode, false,
&nfs_async_read_completion_ops);
@@ -434,7 +426,6 @@ int nfs_readpages(struct file *file, struct address_space *mapping,
nfs_pageio_complete_read(&desc.pgio);
-read_complete:
put_nfs_open_context(desc.ctx);
out:
trace_nfs_aop_readahead_done(inode, nr_pages, ret);
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 3aced401735c..6ab5eeb000dc 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -1204,42 +1204,42 @@ static int nfs_compare_super(struct super_block *sb, struct fs_context *fc)
}
#ifdef CONFIG_NFS_FSCACHE
-static void nfs_get_cache_cookie(struct super_block *sb,
- struct nfs_fs_context *ctx)
+static int nfs_get_cache_cookie(struct super_block *sb,
+ struct nfs_fs_context *ctx)
{
struct nfs_server *nfss = NFS_SB(sb);
char *uniq = NULL;
int ulen = 0;
- nfss->fscache_key = NULL;
nfss->fscache = NULL;
if (!ctx)
- return;
+ return 0;
if (ctx->clone_data.sb) {
struct nfs_server *mnt_s = NFS_SB(ctx->clone_data.sb);
if (!(mnt_s->options & NFS_OPTION_FSCACHE))
- return;
- if (mnt_s->fscache_key) {
- uniq = mnt_s->fscache_key->key.uniquifier;
- ulen = mnt_s->fscache_key->key.uniq_len;
+ return 0;
+ if (mnt_s->fscache_uniq) {
+ uniq = mnt_s->fscache_uniq;
+ ulen = strlen(uniq);
}
} else {
if (!(ctx->options & NFS_OPTION_FSCACHE))
- return;
+ return 0;
if (ctx->fscache_uniq) {
uniq = ctx->fscache_uniq;
ulen = strlen(ctx->fscache_uniq);
}
}
- nfs_fscache_get_super_cookie(sb, uniq, ulen);
+ return nfs_fscache_get_super_cookie(sb, uniq, ulen);
}
#else
-static void nfs_get_cache_cookie(struct super_block *sb,
- struct nfs_fs_context *ctx)
+static int nfs_get_cache_cookie(struct super_block *sb,
+ struct nfs_fs_context *ctx)
{
+ return 0;
}
#endif
@@ -1299,7 +1299,9 @@ int nfs_get_tree_common(struct fs_context *fc)
s->s_blocksize_bits = bsize;
s->s_blocksize = 1U << bsize;
}
- nfs_get_cache_cookie(s, ctx);
+ error = nfs_get_cache_cookie(s, ctx);
+ if (error < 0)
+ goto error_splat_super;
}
error = nfs_get_root(s, fc);
diff --git a/fs/nfs/sysfs.c b/fs/nfs/sysfs.c
index 8cb70755e3c9..a6f740366963 100644
--- a/fs/nfs/sysfs.c
+++ b/fs/nfs/sysfs.c
@@ -142,10 +142,11 @@ static struct attribute *nfs_netns_client_attrs[] = {
&nfs_netns_client_id.attr,
NULL,
};
+ATTRIBUTE_GROUPS(nfs_netns_client);
static struct kobj_type nfs_netns_client_type = {
.release = nfs_netns_client_release,
- .default_attrs = nfs_netns_client_attrs,
+ .default_groups = nfs_netns_client_groups,
.sysfs_ops = &kobj_sysfs_ops,
.namespace = nfs_netns_client_namespace,
};
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 9b7619ce17a7..987a187bd39a 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -294,6 +294,7 @@ static void nfs_grow_file(struct page *page, unsigned int offset, unsigned int c
nfs_inc_stats(inode, NFSIOS_EXTENDWRITE);
out:
spin_unlock(&inode->i_lock);
+ nfs_fscache_invalidate(inode, 0);
}
/* A writeback failed: mark the page as bad, and invalidate the page cache */
@@ -2125,8 +2126,11 @@ int nfs_migrate_page(struct address_space *mapping, struct page *newpage,
if (PagePrivate(page))
return -EBUSY;
- if (!nfs_fscache_release_page(page, GFP_KERNEL))
- return -EBUSY;
+ if (PageFsCache(page)) {
+ if (mode == MIGRATE_ASYNC)
+ return -EBUSY;
+ wait_on_page_fscache(page);
+ }
return migrate_page(mapping, newpage, page, mode);
}
diff --git a/fs/nfsd/filecache.c b/fs/nfsd/filecache.c
index fdf89fcf1a0c..8bc807c5fea4 100644
--- a/fs/nfsd/filecache.c
+++ b/fs/nfsd/filecache.c
@@ -44,12 +44,9 @@ struct nfsd_fcache_bucket {
static DEFINE_PER_CPU(unsigned long, nfsd_file_cache_hits);
struct nfsd_fcache_disposal {
- struct list_head list;
struct work_struct work;
- struct net *net;
spinlock_t lock;
struct list_head freeme;
- struct rcu_head rcu;
};
static struct workqueue_struct *nfsd_filecache_wq __read_mostly;
@@ -62,8 +59,6 @@ static long nfsd_file_lru_flags;
static struct fsnotify_group *nfsd_file_fsnotify_group;
static atomic_long_t nfsd_filecache_count;
static struct delayed_work nfsd_filecache_laundrette;
-static DEFINE_SPINLOCK(laundrette_lock);
-static LIST_HEAD(laundrettes);
static void nfsd_file_gc(void);
@@ -194,7 +189,6 @@ nfsd_file_alloc(struct inode *inode, unsigned int may, unsigned int hashval,
__set_bit(NFSD_FILE_BREAK_READ, &nf->nf_flags);
}
nf->nf_mark = NULL;
- init_rwsem(&nf->nf_rwsem);
trace_nfsd_file_alloc(nf);
}
return nf;
@@ -249,7 +243,7 @@ nfsd_file_do_unhash(struct nfsd_file *nf)
trace_nfsd_file_unhash(nf);
if (nfsd_file_check_write_error(nf))
- nfsd_reset_boot_verifier(net_generic(nf->nf_net, nfsd_net_id));
+ nfsd_reset_write_verifier(net_generic(nf->nf_net, nfsd_net_id));
--nfsd_file_hashtbl[nf->nf_hashval].nfb_count;
hlist_del_rcu(&nf->nf_node);
atomic_long_dec(&nfsd_filecache_count);
@@ -367,19 +361,13 @@ nfsd_file_list_remove_disposal(struct list_head *dst,
static void
nfsd_file_list_add_disposal(struct list_head *files, struct net *net)
{
- struct nfsd_fcache_disposal *l;
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+ struct nfsd_fcache_disposal *l = nn->fcache_disposal;
- rcu_read_lock();
- list_for_each_entry_rcu(l, &laundrettes, list) {
- if (l->net == net) {
- spin_lock(&l->lock);
- list_splice_tail_init(files, &l->freeme);
- spin_unlock(&l->lock);
- queue_work(nfsd_filecache_wq, &l->work);
- break;
- }
- }
- rcu_read_unlock();
+ spin_lock(&l->lock);
+ list_splice_tail_init(files, &l->freeme);
+ spin_unlock(&l->lock);
+ queue_work(nfsd_filecache_wq, &l->work);
}
static void
@@ -755,7 +743,7 @@ nfsd_file_cache_purge(struct net *net)
}
static struct nfsd_fcache_disposal *
-nfsd_alloc_fcache_disposal(struct net *net)
+nfsd_alloc_fcache_disposal(void)
{
struct nfsd_fcache_disposal *l;
@@ -763,7 +751,6 @@ nfsd_alloc_fcache_disposal(struct net *net)
if (!l)
return NULL;
INIT_WORK(&l->work, nfsd_file_delayed_close);
- l->net = net;
spin_lock_init(&l->lock);
INIT_LIST_HEAD(&l->freeme);
return l;
@@ -772,61 +759,27 @@ nfsd_alloc_fcache_disposal(struct net *net)
static void
nfsd_free_fcache_disposal(struct nfsd_fcache_disposal *l)
{
- rcu_assign_pointer(l->net, NULL);
cancel_work_sync(&l->work);
nfsd_file_dispose_list(&l->freeme);
- kfree_rcu(l, rcu);
-}
-
-static void
-nfsd_add_fcache_disposal(struct nfsd_fcache_disposal *l)
-{
- spin_lock(&laundrette_lock);
- list_add_tail_rcu(&l->list, &laundrettes);
- spin_unlock(&laundrette_lock);
-}
-
-static void
-nfsd_del_fcache_disposal(struct nfsd_fcache_disposal *l)
-{
- spin_lock(&laundrette_lock);
- list_del_rcu(&l->list);
- spin_unlock(&laundrette_lock);
-}
-
-static int
-nfsd_alloc_fcache_disposal_net(struct net *net)
-{
- struct nfsd_fcache_disposal *l;
-
- l = nfsd_alloc_fcache_disposal(net);
- if (!l)
- return -ENOMEM;
- nfsd_add_fcache_disposal(l);
- return 0;
+ kfree(l);
}
static void
nfsd_free_fcache_disposal_net(struct net *net)
{
- struct nfsd_fcache_disposal *l;
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+ struct nfsd_fcache_disposal *l = nn->fcache_disposal;
- rcu_read_lock();
- list_for_each_entry_rcu(l, &laundrettes, list) {
- if (l->net != net)
- continue;
- nfsd_del_fcache_disposal(l);
- rcu_read_unlock();
- nfsd_free_fcache_disposal(l);
- return;
- }
- rcu_read_unlock();
+ nfsd_free_fcache_disposal(l);
}
int
nfsd_file_cache_start_net(struct net *net)
{
- return nfsd_alloc_fcache_disposal_net(net);
+ struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+
+ nn->fcache_disposal = nfsd_alloc_fcache_disposal();
+ return nn->fcache_disposal ? 0 : -ENOMEM;
}
void
diff --git a/fs/nfsd/filecache.h b/fs/nfsd/filecache.h
index 7872df5a0fe3..435ceab27897 100644
--- a/fs/nfsd/filecache.h
+++ b/fs/nfsd/filecache.h
@@ -46,7 +46,6 @@ struct nfsd_file {
refcount_t nf_ref;
unsigned char nf_may;
struct nfsd_file_mark *nf_mark;
- struct rw_semaphore nf_rwsem;
};
int nfsd_file_cache_init(void);
diff --git a/fs/nfsd/netns.h b/fs/nfsd/netns.h
index 935c1028c217..1b1a962a1804 100644
--- a/fs/nfsd/netns.h
+++ b/fs/nfsd/netns.h
@@ -11,6 +11,7 @@
#include <net/net_namespace.h>
#include <net/netns/generic.h>
#include <linux/percpu_counter.h>
+#include <linux/siphash.h>
/* Hash tables for nfs4_clientid state */
#define CLIENT_HASH_BITS 4
@@ -108,9 +109,8 @@ struct nfsd_net {
bool nfsd_net_up;
bool lockd_up;
- /* Time of server startup */
- struct timespec64 nfssvc_boot;
- seqlock_t boot_lock;
+ seqlock_t writeverf_lock;
+ unsigned char writeverf[8];
/*
* Max number of connections this nfsd container will allow. Defaults
@@ -123,12 +123,13 @@ struct nfsd_net {
u32 clverifier_counter;
struct svc_serv *nfsd_serv;
-
- wait_queue_head_t ntf_wq;
- atomic_t ntf_refcnt;
-
- /* Allow umount to wait for nfsd state cleanup */
- struct completion nfsd_shutdown_complete;
+ /* When a listening socket is added to nfsd, keep_active is set
+ * and this justifies a reference on nfsd_serv. This stops
+ * nfsd_serv from being freed. When the number of threads is
+ * set, keep_active is cleared and the reference is dropped. So
+ * when the last thread exits, the service will be destroyed.
+ */
+ int keep_active;
/*
* clientid and stateid data for construction of net unique COPY
@@ -184,6 +185,10 @@ struct nfsd_net {
/* utsname taken from the process that starts the server */
char nfsd_name[UNX_MAXNODENAME+1];
+
+ struct nfsd_fcache_disposal *fcache_disposal;
+
+ siphash_key_t siphash_key;
};
/* Simple check to find out if a given net was properly initialized */
@@ -193,6 +198,6 @@ extern void nfsd_netns_free_versions(struct nfsd_net *nn);
extern unsigned int nfsd_net_id;
-void nfsd_copy_boot_verifier(__be32 verf[2], struct nfsd_net *nn);
-void nfsd_reset_boot_verifier(struct nfsd_net *nn);
+void nfsd_copy_write_verifier(__be32 verf[2], struct nfsd_net *nn);
+void nfsd_reset_write_verifier(struct nfsd_net *nn);
#endif /* __NFSD_NETNS_H__ */
diff --git a/fs/nfsd/nfs3proc.c b/fs/nfsd/nfs3proc.c
index 15dac36ca852..936eebd4c56d 100644
--- a/fs/nfsd/nfs3proc.c
+++ b/fs/nfsd/nfs3proc.c
@@ -150,13 +150,17 @@ nfsd3_proc_read(struct svc_rqst *rqstp)
unsigned int len;
int v;
- argp->count = min_t(u32, argp->count, max_blocksize);
-
dprintk("nfsd: READ(3) %s %lu bytes at %Lu\n",
SVCFH_fmt(&argp->fh),
(unsigned long) argp->count,
(unsigned long long) argp->offset);
+ argp->count = min_t(u32, argp->count, max_blocksize);
+ if (argp->offset > (u64)OFFSET_MAX)
+ argp->offset = (u64)OFFSET_MAX;
+ if (argp->offset + argp->count > (u64)OFFSET_MAX)
+ argp->count = (u64)OFFSET_MAX - argp->offset;
+
v = 0;
len = argp->count;
resp->pages = rqstp->rq_next_page;
@@ -199,18 +203,19 @@ nfsd3_proc_write(struct svc_rqst *rqstp)
(unsigned long long) argp->offset,
argp->stable? " stable" : "");
+ resp->status = nfserr_fbig;
+ if (argp->offset > (u64)OFFSET_MAX ||
+ argp->offset + argp->len > (u64)OFFSET_MAX)
+ return rpc_success;
+
fh_copy(&resp->fh, &argp->fh);
resp->committed = argp->stable;
nvecs = svc_fill_write_vector(rqstp, &argp->payload);
- if (!nvecs) {
- resp->status = nfserr_io;
- goto out;
- }
+
resp->status = nfsd_write(rqstp, &resp->fh, argp->offset,
rqstp->rq_vec, nvecs, &cnt,
resp->committed, resp->verf);
resp->count = cnt;
-out:
return rpc_success;
}
@@ -655,15 +660,9 @@ nfsd3_proc_commit(struct svc_rqst *rqstp)
argp->count,
(unsigned long long) argp->offset);
- if (argp->offset > NFS_OFFSET_MAX) {
- resp->status = nfserr_inval;
- goto out;
- }
-
fh_copy(&resp->fh, &argp->fh);
resp->status = nfsd_commit(rqstp, &resp->fh, argp->offset,
argp->count, resp->verf);
-out:
return rpc_success;
}
diff --git a/fs/nfsd/nfs3xdr.c b/fs/nfsd/nfs3xdr.c
index c3ac1b6aa3aa..0293b8d65f10 100644
--- a/fs/nfsd/nfs3xdr.c
+++ b/fs/nfsd/nfs3xdr.c
@@ -254,7 +254,7 @@ svcxdr_decode_sattr3(struct svc_rqst *rqstp, struct xdr_stream *xdr,
if (xdr_stream_decode_u64(xdr, &newsize) < 0)
return false;
iap->ia_valid |= ATTR_SIZE;
- iap->ia_size = min_t(u64, newsize, NFS_OFFSET_MAX);
+ iap->ia_size = newsize;
}
if (xdr_stream_decode_u32(xdr, &set_it) < 0)
return false;
@@ -487,71 +487,6 @@ neither:
return true;
}
-static bool fs_supports_change_attribute(struct super_block *sb)
-{
- return sb->s_flags & SB_I_VERSION || sb->s_export_op->fetch_iversion;
-}
-
-/*
- * Fill in the pre_op attr for the wcc data
- */
-void fill_pre_wcc(struct svc_fh *fhp)
-{
- struct inode *inode;
- struct kstat stat;
- bool v4 = (fhp->fh_maxsize == NFS4_FHSIZE);
-
- if (fhp->fh_no_wcc || fhp->fh_pre_saved)
- return;
- inode = d_inode(fhp->fh_dentry);
- if (fs_supports_change_attribute(inode->i_sb) || !v4) {
- __be32 err = fh_getattr(fhp, &stat);
-
- if (err) {
- /* Grab the times from inode anyway */
- stat.mtime = inode->i_mtime;
- stat.ctime = inode->i_ctime;
- stat.size = inode->i_size;
- }
- fhp->fh_pre_mtime = stat.mtime;
- fhp->fh_pre_ctime = stat.ctime;
- fhp->fh_pre_size = stat.size;
- }
- if (v4)
- fhp->fh_pre_change = nfsd4_change_attribute(&stat, inode);
-
- fhp->fh_pre_saved = true;
-}
-
-/*
- * Fill in the post_op attr for the wcc data
- */
-void fill_post_wcc(struct svc_fh *fhp)
-{
- bool v4 = (fhp->fh_maxsize == NFS4_FHSIZE);
- struct inode *inode = d_inode(fhp->fh_dentry);
-
- if (fhp->fh_no_wcc)
- return;
-
- if (fhp->fh_post_saved)
- printk("nfsd: inode locked twice during operation.\n");
-
- fhp->fh_post_saved = true;
-
- if (fs_supports_change_attribute(inode->i_sb) || !v4) {
- __be32 err = fh_getattr(fhp, &fhp->fh_post_attr);
-
- if (err) {
- fhp->fh_post_saved = false;
- fhp->fh_post_attr.ctime = inode->i_ctime;
- }
- }
- if (v4)
- fhp->fh_post_change =
- nfsd4_change_attribute(&fhp->fh_post_attr, inode);
-}
-
/*
* XDR decode functions
*/
@@ -1125,7 +1060,7 @@ svcxdr_encode_entry3_common(struct nfsd3_readdirres *resp, const char *name,
return false;
/* cookie */
resp->cookie_offset = dirlist->len;
- if (xdr_stream_encode_u64(xdr, NFS_OFFSET_MAX) < 0)
+ if (xdr_stream_encode_u64(xdr, OFFSET_MAX) < 0)
return false;
return true;
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index a36261f89bdf..b207c76a873f 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -598,7 +598,7 @@ static void gen_boot_verifier(nfs4_verifier *verifier, struct net *net)
BUILD_BUG_ON(2*sizeof(*verf) != sizeof(verifier->data));
- nfsd_copy_boot_verifier(verf, net_generic(net, nfsd_net_id));
+ nfsd_copy_write_verifier(verf, net_generic(net, nfsd_net_id));
}
static __be32
@@ -782,12 +782,16 @@ nfsd4_read(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
__be32 status;
read->rd_nf = NULL;
- if (read->rd_offset >= OFFSET_MAX)
- return nfserr_inval;
trace_nfsd_read_start(rqstp, &cstate->current_fh,
read->rd_offset, read->rd_length);
+ read->rd_length = min_t(u32, read->rd_length, svc_max_payload(rqstp));
+ if (read->rd_offset > (u64)OFFSET_MAX)
+ read->rd_offset = (u64)OFFSET_MAX;
+ if (read->rd_offset + read->rd_length > (u64)OFFSET_MAX)
+ read->rd_length = (u64)OFFSET_MAX - read->rd_offset;
+
/*
* If we do a zero copy read, then a client will see read data
* that reflects the state of the file *after* performing the
@@ -1018,8 +1022,9 @@ nfsd4_write(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
unsigned long cnt;
int nvecs;
- if (write->wr_offset >= OFFSET_MAX)
- return nfserr_inval;
+ if (write->wr_offset > (u64)OFFSET_MAX ||
+ write->wr_offset + write->wr_buflen > (u64)OFFSET_MAX)
+ return nfserr_fbig;
cnt = write->wr_buflen;
trace_nfsd_write_start(rqstp, &cstate->current_fh,
@@ -1101,7 +1106,7 @@ nfsd4_clone(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
if (status)
goto out;
- status = nfsd4_clone_file_range(src, clone->cl_src_pos,
+ status = nfsd4_clone_file_range(rqstp, src, clone->cl_src_pos,
dst, clone->cl_dst_pos, clone->cl_count,
EX_ISSYNC(cstate->current_fh.fh_export));
@@ -1510,11 +1515,14 @@ static void nfsd4_init_copy_res(struct nfsd4_copy *copy, bool sync)
static ssize_t _nfsd_copy_file_range(struct nfsd4_copy *copy)
{
+ struct file *dst = copy->nf_dst->nf_file;
+ struct file *src = copy->nf_src->nf_file;
+ errseq_t since;
ssize_t bytes_copied = 0;
u64 bytes_total = copy->cp_count;
u64 src_pos = copy->cp_src_pos;
u64 dst_pos = copy->cp_dst_pos;
- __be32 status;
+ int status;
/* See RFC 7862 p.67: */
if (bytes_total == 0)
@@ -1522,9 +1530,8 @@ static ssize_t _nfsd_copy_file_range(struct nfsd4_copy *copy)
do {
if (kthread_should_stop())
break;
- bytes_copied = nfsd_copy_file_range(copy->nf_src->nf_file,
- src_pos, copy->nf_dst->nf_file, dst_pos,
- bytes_total);
+ bytes_copied = nfsd_copy_file_range(src, src_pos, dst, dst_pos,
+ bytes_total);
if (bytes_copied <= 0)
break;
bytes_total -= bytes_copied;
@@ -1534,11 +1541,11 @@ static ssize_t _nfsd_copy_file_range(struct nfsd4_copy *copy)
} while (bytes_total > 0 && !copy->cp_synchronous);
/* for a non-zero asynchronous copy do a commit of data */
if (!copy->cp_synchronous && copy->cp_res.wr_bytes_written > 0) {
- down_write(&copy->nf_dst->nf_rwsem);
- status = vfs_fsync_range(copy->nf_dst->nf_file,
- copy->cp_dst_pos,
+ since = READ_ONCE(dst->f_wb_err);
+ status = vfs_fsync_range(dst, copy->cp_dst_pos,
copy->cp_res.wr_bytes_written, 0);
- up_write(&copy->nf_dst->nf_rwsem);
+ if (!status)
+ status = filemap_check_wb_err(dst->f_mapping, since);
if (!status)
copy->committed = true;
}
@@ -2528,7 +2535,7 @@ nfsd4_proc_compound(struct svc_rqst *rqstp)
goto encode_op;
}
- fh_clear_wcc(current_fh);
+ fh_clear_pre_post_attrs(current_fh);
/* If op is non-idempotent */
if (op->opdesc->op_flags & OP_MODIFIES_SOMETHING) {
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 1956d377d1a6..32063733443d 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -246,6 +246,7 @@ find_blocked_lock(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
list_for_each_entry(cur, &lo->lo_blocked, nbl_list) {
if (fh_match(fh, &cur->nbl_fh)) {
list_del_init(&cur->nbl_list);
+ WARN_ON(list_empty(&cur->nbl_lru));
list_del_init(&cur->nbl_lru);
found = cur;
break;
@@ -271,6 +272,7 @@ find_or_allocate_block(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
INIT_LIST_HEAD(&nbl->nbl_lru);
fh_copy_shallow(&nbl->nbl_fh, fh);
locks_init_lock(&nbl->nbl_lock);
+ kref_init(&nbl->nbl_kref);
nfsd4_init_cb(&nbl->nbl_cb, lo->lo_owner.so_client,
&nfsd4_cb_notify_lock_ops,
NFSPROC4_CLNT_CB_NOTIFY_LOCK);
@@ -280,11 +282,20 @@ find_or_allocate_block(struct nfs4_lockowner *lo, struct knfsd_fh *fh,
}
static void
+free_nbl(struct kref *kref)
+{
+ struct nfsd4_blocked_lock *nbl;
+
+ nbl = container_of(kref, struct nfsd4_blocked_lock, nbl_kref);
+ kfree(nbl);
+}
+
+static void
free_blocked_lock(struct nfsd4_blocked_lock *nbl)
{
locks_delete_block(&nbl->nbl_lock);
locks_release_private(&nbl->nbl_lock);
- kfree(nbl);
+ kref_put(&nbl->nbl_kref, free_nbl);
}
static void
@@ -302,6 +313,7 @@ remove_blocked_locks(struct nfs4_lockowner *lo)
struct nfsd4_blocked_lock,
nbl_list);
list_del_init(&nbl->nbl_list);
+ WARN_ON(list_empty(&nbl->nbl_lru));
list_move(&nbl->nbl_lru, &reaplist);
}
spin_unlock(&nn->blocked_locks_lock);
@@ -360,11 +372,13 @@ static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops = {
* st_{access,deny}_bmap field of the stateid, in order to track not
* only what share bits are currently in force, but also what
* combinations of share bits previous opens have used. This allows us
- * to enforce the recommendation of rfc 3530 14.2.19 that the server
- * return an error if the client attempt to downgrade to a combination
- * of share bits not explicable by closing some of its previous opens.
+ * to enforce the recommendation in
+ * https://datatracker.ietf.org/doc/html/rfc7530#section-16.19.4 that
+ * the server return an error if the client attempt to downgrade to a
+ * combination of share bits not explicable by closing some of its
+ * previous opens.
*
- * XXX: This enforcement is actually incomplete, since we don't keep
+ * This enforcement is arguably incomplete, since we don't keep
* track of access/deny bit combinations; so, e.g., we allow:
*
* OPEN allow read, deny write
@@ -372,6 +386,10 @@ static const struct nfsd4_callback_ops nfsd4_cb_notify_lock_ops = {
* DOWNGRADE allow read, deny none
*
* which we should reject.
+ *
+ * But you could also argue that our current code is already overkill,
+ * since it only exists to return NFS4ERR_INVAL on incorrect client
+ * behavior.
*/
static unsigned int
bmap_to_share_mode(unsigned long bmap)
@@ -4112,8 +4130,10 @@ nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
status = nfserr_clid_inuse;
if (client_has_state(old)
&& !same_creds(&unconf->cl_cred,
- &old->cl_cred))
+ &old->cl_cred)) {
+ old = NULL;
goto out;
+ }
status = mark_client_expired_locked(old);
if (status) {
old = NULL;
@@ -6040,7 +6060,11 @@ nfs4_preprocess_stateid_op(struct svc_rqst *rqstp,
*nfp = NULL;
if (ZERO_STATEID(stateid) || ONE_STATEID(stateid)) {
- status = check_special_stateids(net, fhp, stateid, flags);
+ if (cstid)
+ status = nfserr_bad_stateid;
+ else
+ status = check_special_stateids(net, fhp, stateid,
+ flags);
goto done;
}
@@ -6836,7 +6860,6 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd4_blocked_lock *nbl = NULL;
struct file_lock *file_lock = NULL;
struct file_lock *conflock = NULL;
- struct super_block *sb;
__be32 status = 0;
int lkflg;
int err;
@@ -6858,7 +6881,6 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
dprintk("NFSD: nfsd4_lock: permission denied!\n");
return status;
}
- sb = cstate->current_fh.fh_dentry->d_sb;
if (lock->lk_is_new) {
if (nfsd4_has_session(cstate))
@@ -6910,8 +6932,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
fp = lock_stp->st_stid.sc_file;
switch (lock->lk_type) {
case NFS4_READW_LT:
- if (nfsd4_has_session(cstate) &&
- !(sb->s_export_op->flags & EXPORT_OP_SYNC_LOCKS))
+ if (nfsd4_has_session(cstate))
fl_flags |= FL_SLEEP;
fallthrough;
case NFS4_READ_LT:
@@ -6923,8 +6944,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
fl_type = F_RDLCK;
break;
case NFS4_WRITEW_LT:
- if (nfsd4_has_session(cstate) &&
- !(sb->s_export_op->flags & EXPORT_OP_SYNC_LOCKS))
+ if (nfsd4_has_session(cstate))
fl_flags |= FL_SLEEP;
fallthrough;
case NFS4_WRITE_LT:
@@ -6945,6 +6965,16 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
goto out;
}
+ /*
+ * Most filesystems with their own ->lock operations will block
+ * the nfsd thread waiting to acquire the lock. That leads to
+ * deadlocks (we don't want every nfsd thread tied up waiting
+ * for file locks), so don't attempt blocking lock notifications
+ * on those filesystems:
+ */
+ if (nf->nf_file->f_op->lock)
+ fl_flags &= ~FL_SLEEP;
+
nbl = find_or_allocate_block(lock_sop, &fp->fi_fhandle, nn);
if (!nbl) {
dprintk("NFSD: %s: unable to allocate block!\n", __func__);
@@ -6975,6 +7005,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
spin_lock(&nn->blocked_locks_lock);
list_add_tail(&nbl->nbl_list, &lock_sop->lo_blocked);
list_add_tail(&nbl->nbl_lru, &nn->blocked_locks_lru);
+ kref_get(&nbl->nbl_kref);
spin_unlock(&nn->blocked_locks_lock);
}
@@ -6987,6 +7018,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
nn->somebody_reclaimed = true;
break;
case FILE_LOCK_DEFERRED:
+ kref_put(&nbl->nbl_kref, free_nbl);
nbl = NULL;
fallthrough;
case -EAGAIN: /* conflock holds conflicting lock */
@@ -7007,8 +7039,13 @@ out:
/* dequeue it if we queued it before */
if (fl_flags & FL_SLEEP) {
spin_lock(&nn->blocked_locks_lock);
- list_del_init(&nbl->nbl_list);
- list_del_init(&nbl->nbl_lru);
+ if (!list_empty(&nbl->nbl_list) &&
+ !list_empty(&nbl->nbl_lru)) {
+ list_del_init(&nbl->nbl_list);
+ list_del_init(&nbl->nbl_lru);
+ kref_put(&nbl->nbl_kref, free_nbl);
+ }
+ /* nbl can use one of lists to be linked to reaplist */
spin_unlock(&nn->blocked_locks_lock);
}
free_blocked_lock(nbl);
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index 5a93a5db4fb0..714a3a3bd50c 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -277,21 +277,10 @@ nfsd4_decode_verifier4(struct nfsd4_compoundargs *argp, nfs4_verifier *verf)
static __be32
nfsd4_decode_bitmap4(struct nfsd4_compoundargs *argp, u32 *bmval, u32 bmlen)
{
- u32 i, count;
- __be32 *p;
-
- if (xdr_stream_decode_u32(argp->xdr, &count) < 0)
- return nfserr_bad_xdr;
- /* request sanity */
- if (count > 1000)
- return nfserr_bad_xdr;
- p = xdr_inline_decode(argp->xdr, count << 2);
- if (!p)
- return nfserr_bad_xdr;
- for (i = 0; i < bmlen; i++)
- bmval[i] = (i < count) ? be32_to_cpup(p++) : 0;
+ ssize_t status;
- return nfs_ok;
+ status = xdr_stream_decode_uint32_array(argp->xdr, bmval, bmlen);
+ return status == -EBADMSG ? nfserr_bad_xdr : nfs_ok;
}
static __be32
@@ -3506,7 +3495,7 @@ nfsd4_encode_dirent(void *ccdv, const char *name, int namlen,
p = xdr_reserve_space(xdr, 3*4 + namlen);
if (!p)
goto fail;
- p = xdr_encode_hyper(p, NFS_OFFSET_MAX); /* offset of next entry */
+ p = xdr_encode_hyper(p, OFFSET_MAX); /* offset of next entry */
p = xdr_encode_array(p, name, namlen); /* name length & name */
nfserr = nfsd4_encode_dirent_fattr(xdr, cd, name, namlen);
@@ -3997,10 +3986,8 @@ nfsd4_encode_read(struct nfsd4_compoundres *resp, __be32 nfserr,
}
xdr_commit_encode(xdr);
- maxcount = svc_max_payload(resp->rqstp);
- maxcount = min_t(unsigned long, maxcount,
+ maxcount = min_t(unsigned long, read->rd_length,
(xdr->buf->buflen - xdr->buf->len));
- maxcount = min_t(unsigned long, maxcount, read->rd_length);
if (file->f_op->splice_read &&
test_bit(RQ_SPLICE_OK, &resp->rqstp->rq_flags))
@@ -4804,8 +4791,8 @@ nfsd4_encode_read_plus_hole(struct nfsd4_compoundres *resp,
return nfserr_resource;
*p++ = htonl(NFS4_CONTENT_HOLE);
- p = xdr_encode_hyper(p, read->rd_offset);
- p = xdr_encode_hyper(p, count);
+ p = xdr_encode_hyper(p, read->rd_offset);
+ p = xdr_encode_hyper(p, count);
*eof = (read->rd_offset + count) >= f_size;
*maxcount = min_t(unsigned long, count, *maxcount);
@@ -4837,10 +4824,8 @@ nfsd4_encode_read_plus(struct nfsd4_compoundres *resp, __be32 nfserr,
return nfserr_resource;
xdr_commit_encode(xdr);
- maxcount = svc_max_payload(resp->rqstp);
- maxcount = min_t(unsigned long, maxcount,
+ maxcount = min_t(unsigned long, read->rd_length,
(xdr->buf->buflen - xdr->buf->len));
- maxcount = min_t(unsigned long, maxcount, read->rd_length);
count = maxcount;
eof = read->rd_offset >= i_size_read(file_inode(file));
diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
index 6e0b6f3148dc..a4a69ab6ab28 100644
--- a/fs/nfsd/nfscache.c
+++ b/fs/nfsd/nfscache.c
@@ -87,7 +87,7 @@ nfsd_hashsize(unsigned int limit)
static u32
nfsd_cache_hash(__be32 xid, struct nfsd_net *nn)
{
- return hash_32(be32_to_cpu(xid), nn->maskbits);
+ return hash_32((__force u32)xid, nn->maskbits);
}
static struct svc_cacherep *
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index 51a49e0cfe37..68b020f2002b 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -742,13 +742,12 @@ static ssize_t __write_ports_addfd(char *buf, struct net *net, const struct cred
return err;
err = svc_addsock(nn->nfsd_serv, fd, buf, SIMPLE_TRANSACTION_LIMIT, cred);
- if (err < 0) {
- nfsd_destroy(net);
- return err;
- }
- /* Decrease the count, but don't shut down the service */
- nn->nfsd_serv->sv_nrthreads--;
+ if (err >= 0 &&
+ !nn->nfsd_serv->sv_nrthreads && !xchg(&nn->keep_active, 1))
+ svc_get(nn->nfsd_serv);
+
+ nfsd_put(net);
return err;
}
@@ -783,8 +782,10 @@ static ssize_t __write_ports_addxprt(char *buf, struct net *net, const struct cr
if (err < 0 && err != -EAFNOSUPPORT)
goto out_close;
- /* Decrease the count, but don't shut down the service */
- nn->nfsd_serv->sv_nrthreads--;
+ if (!nn->nfsd_serv->sv_nrthreads && !xchg(&nn->keep_active, 1))
+ svc_get(nn->nfsd_serv);
+
+ nfsd_put(net);
return 0;
out_close:
xprt = svc_find_xprt(nn->nfsd_serv, transport, net, PF_INET, port);
@@ -793,10 +794,7 @@ out_close:
svc_xprt_put(xprt);
}
out_err:
- if (!list_empty(&nn->nfsd_serv->sv_permsocks))
- nn->nfsd_serv->sv_nrthreads--;
- else
- nfsd_destroy(net);
+ nfsd_put(net);
return err;
}
@@ -1249,7 +1247,8 @@ static void nfsdfs_remove_file(struct inode *dir, struct dentry *dentry)
clear_ncl(d_inode(dentry));
dget(dentry);
ret = simple_unlink(dir, dentry);
- d_delete(dentry);
+ d_drop(dentry);
+ fsnotify_unlink(dir, dentry);
dput(dentry);
WARN_ON_ONCE(ret);
}
@@ -1340,8 +1339,8 @@ void nfsd_client_rmdir(struct dentry *dentry)
dget(dentry);
ret = simple_rmdir(dir, dentry);
WARN_ON_ONCE(ret);
+ d_drop(dentry);
fsnotify_rmdir(dir, dentry);
- d_delete(dentry);
dput(dentry);
inode_unlock(dir);
}
@@ -1485,9 +1484,8 @@ static __net_init int nfsd_init_net(struct net *net)
nn->clientid_counter = nn->clientid_base + 1;
nn->s2s_cp_cl_id = nn->clientid_counter++;
- atomic_set(&nn->ntf_refcnt, 0);
- init_waitqueue_head(&nn->ntf_wq);
- seqlock_init(&nn->boot_lock);
+ get_random_bytes(&nn->siphash_key, sizeof(nn->siphash_key));
+ seqlock_init(&nn->writeverf_lock);
return 0;
diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h
index 498e5a489826..3e5008b475ff 100644
--- a/fs/nfsd/nfsd.h
+++ b/fs/nfsd/nfsd.h
@@ -97,7 +97,7 @@ int nfsd_pool_stats_open(struct inode *, struct file *);
int nfsd_pool_stats_release(struct inode *, struct file *);
void nfsd_shutdown_threads(struct net *net);
-void nfsd_destroy(struct net *net);
+void nfsd_put(struct net *net);
bool i_am_nfsd(void);
diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c
index f3779fa72c89..145208bcb9bd 100644
--- a/fs/nfsd/nfsfh.c
+++ b/fs/nfsd/nfsfh.c
@@ -611,6 +611,70 @@ out_negative:
return nfserr_serverfault;
}
+#ifdef CONFIG_NFSD_V3
+
+/**
+ * fh_fill_pre_attrs - Fill in pre-op attributes
+ * @fhp: file handle to be updated
+ *
+ */
+void fh_fill_pre_attrs(struct svc_fh *fhp)
+{
+ bool v4 = (fhp->fh_maxsize == NFS4_FHSIZE);
+ struct inode *inode;
+ struct kstat stat;
+ __be32 err;
+
+ if (fhp->fh_no_wcc || fhp->fh_pre_saved)
+ return;
+
+ inode = d_inode(fhp->fh_dentry);
+ err = fh_getattr(fhp, &stat);
+ if (err) {
+ /* Grab the times from inode anyway */
+ stat.mtime = inode->i_mtime;
+ stat.ctime = inode->i_ctime;
+ stat.size = inode->i_size;
+ }
+ if (v4)
+ fhp->fh_pre_change = nfsd4_change_attribute(&stat, inode);
+
+ fhp->fh_pre_mtime = stat.mtime;
+ fhp->fh_pre_ctime = stat.ctime;
+ fhp->fh_pre_size = stat.size;
+ fhp->fh_pre_saved = true;
+}
+
+/**
+ * fh_fill_post_attrs - Fill in post-op attributes
+ * @fhp: file handle to be updated
+ *
+ */
+void fh_fill_post_attrs(struct svc_fh *fhp)
+{
+ bool v4 = (fhp->fh_maxsize == NFS4_FHSIZE);
+ struct inode *inode = d_inode(fhp->fh_dentry);
+ __be32 err;
+
+ if (fhp->fh_no_wcc)
+ return;
+
+ if (fhp->fh_post_saved)
+ printk("nfsd: inode locked twice during operation.\n");
+
+ err = fh_getattr(fhp, &fhp->fh_post_attr);
+ if (err) {
+ fhp->fh_post_saved = false;
+ fhp->fh_post_attr.ctime = inode->i_ctime;
+ } else
+ fhp->fh_post_saved = true;
+ if (v4)
+ fhp->fh_post_change =
+ nfsd4_change_attribute(&fhp->fh_post_attr, inode);
+}
+
+#endif /* CONFIG_NFSD_V3 */
+
/*
* Release a file handle.
*/
@@ -623,7 +687,7 @@ fh_put(struct svc_fh *fhp)
fh_unlock(fhp);
fhp->fh_dentry = NULL;
dput(dentry);
- fh_clear_wcc(fhp);
+ fh_clear_pre_post_attrs(fhp);
}
fh_drop_write(fhp);
if (exp) {
diff --git a/fs/nfsd/nfsfh.h b/fs/nfsd/nfsfh.h
index d11e4b6870d6..434930d8a946 100644
--- a/fs/nfsd/nfsfh.h
+++ b/fs/nfsd/nfsfh.h
@@ -284,12 +284,13 @@ static inline u32 knfsd_fh_hash(const struct knfsd_fh *fh)
#endif
#ifdef CONFIG_NFSD_V3
-/*
- * The wcc data stored in current_fh should be cleared
- * between compound ops.
+
+/**
+ * fh_clear_pre_post_attrs - Reset pre/post attributes
+ * @fhp: file handle to be updated
+ *
*/
-static inline void
-fh_clear_wcc(struct svc_fh *fhp)
+static inline void fh_clear_pre_post_attrs(struct svc_fh *fhp)
{
fhp->fh_post_saved = false;
fhp->fh_pre_saved = false;
@@ -323,13 +324,24 @@ static inline u64 nfsd4_change_attribute(struct kstat *stat,
return time_to_chattr(&stat->ctime);
}
-extern void fill_pre_wcc(struct svc_fh *fhp);
-extern void fill_post_wcc(struct svc_fh *fhp);
-#else
-#define fh_clear_wcc(ignored)
-#define fill_pre_wcc(ignored)
-#define fill_post_wcc(notused)
-#endif /* CONFIG_NFSD_V3 */
+extern void fh_fill_pre_attrs(struct svc_fh *fhp);
+extern void fh_fill_post_attrs(struct svc_fh *fhp);
+
+#else /* !CONFIG_NFSD_V3 */
+
+static inline void fh_clear_pre_post_attrs(struct svc_fh *fhp)
+{
+}
+
+static inline void fh_fill_pre_attrs(struct svc_fh *fhp)
+{
+}
+
+static inline void fh_fill_post_attrs(struct svc_fh *fhp)
+{
+}
+
+#endif /* !CONFIG_NFSD_V3 */
/*
@@ -355,7 +367,7 @@ fh_lock_nested(struct svc_fh *fhp, unsigned int subclass)
inode = d_inode(dentry);
inode_lock_nested(inode, subclass);
- fill_pre_wcc(fhp);
+ fh_fill_pre_attrs(fhp);
fhp->fh_locked = true;
}
@@ -372,7 +384,7 @@ static inline void
fh_unlock(struct svc_fh *fhp)
{
if (fhp->fh_locked) {
- fill_post_wcc(fhp);
+ fh_fill_post_attrs(fhp);
inode_unlock(d_inode(fhp->fh_dentry));
fhp->fh_locked = false;
}
diff --git a/fs/nfsd/nfsproc.c b/fs/nfsd/nfsproc.c
index de282f3273c5..18b8eb43a19b 100644
--- a/fs/nfsd/nfsproc.c
+++ b/fs/nfsd/nfsproc.c
@@ -235,10 +235,6 @@ nfsd_proc_write(struct svc_rqst *rqstp)
argp->len, argp->offset);
nvecs = svc_fill_write_vector(rqstp, &argp->payload);
- if (!nvecs) {
- resp->status = nfserr_io;
- goto out;
- }
resp->status = nfsd_write(rqstp, fh_copy(&resp->fh, &argp->fh),
argp->offset, rqstp->rq_vec, nvecs,
@@ -247,7 +243,6 @@ nfsd_proc_write(struct svc_rqst *rqstp)
resp->status = fh_getattr(&resp->fh, &resp->stat);
else if (resp->status == nfserr_jukebox)
return rpc_drop_reply;
-out:
return rpc_success;
}
@@ -850,6 +845,7 @@ nfserrno (int errno)
{ nfserr_io, -EIO },
{ nfserr_nxio, -ENXIO },
{ nfserr_fbig, -E2BIG },
+ { nfserr_stale, -EBADF },
{ nfserr_acces, -EACCES },
{ nfserr_exist, -EEXIST },
{ nfserr_xdev, -EXDEV },
@@ -878,6 +874,8 @@ nfserrno (int errno)
{ nfserr_toosmall, -ETOOSMALL },
{ nfserr_serverfault, -ESERVERFAULT },
{ nfserr_serverfault, -ENFILE },
+ { nfserr_io, -EREMOTEIO },
+ { nfserr_stale, -EOPENSTALE },
{ nfserr_io, -EUCLEAN },
{ nfserr_perm, -ENOKEY },
{ nfserr_no_grace, -ENOGRACE},
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
index 80431921e5d7..b8c682b62d29 100644
--- a/fs/nfsd/nfssvc.c
+++ b/fs/nfsd/nfssvc.c
@@ -12,6 +12,7 @@
#include <linux/module.h>
#include <linux/fs_struct.h>
#include <linux/swap.h>
+#include <linux/siphash.h>
#include <linux/sunrpc/stats.h>
#include <linux/sunrpc/svcsock.h>
@@ -55,18 +56,17 @@ static __be32 nfsd_init_request(struct svc_rqst *,
struct svc_process_info *);
/*
- * nfsd_mutex protects nn->nfsd_serv -- both the pointer itself and the members
- * of the svc_serv struct. In particular, ->sv_nrthreads but also to some
- * extent ->sv_temp_socks and ->sv_permsocks. It also protects nfsdstats.th_cnt
+ * nfsd_mutex protects nn->nfsd_serv -- both the pointer itself and some members
+ * of the svc_serv struct such as ->sv_temp_socks and ->sv_permsocks.
*
* If (out side the lock) nn->nfsd_serv is non-NULL, then it must point to a
- * properly initialised 'struct svc_serv' with ->sv_nrthreads > 0. That number
- * of nfsd threads must exist and each must listed in ->sp_all_threads in each
- * entry of ->sv_pools[].
+ * properly initialised 'struct svc_serv' with ->sv_nrthreads > 0 (unless
+ * nn->keep_active is set). That number of nfsd threads must
+ * exist and each must be listed in ->sp_all_threads in some entry of
+ * ->sv_pools[].
*
- * Transitions of the thread count between zero and non-zero are of particular
- * interest since the svc_serv needs to be created and initialized at that
- * point, or freed.
+ * Each active thread holds a counted reference on nn->nfsd_serv, as does
+ * the nn->keep_active flag and various transient calls to svc_get().
*
* Finally, the nfsd_mutex also protects some of the global variables that are
* accessed when nfsd starts and that are settable via the write_* routines in
@@ -345,33 +345,57 @@ static bool nfsd_needs_lockd(struct nfsd_net *nn)
return nfsd_vers(nn, 2, NFSD_TEST) || nfsd_vers(nn, 3, NFSD_TEST);
}
-void nfsd_copy_boot_verifier(__be32 verf[2], struct nfsd_net *nn)
+/**
+ * nfsd_copy_write_verifier - Atomically copy a write verifier
+ * @verf: buffer in which to receive the verifier cookie
+ * @nn: NFS net namespace
+ *
+ * This function provides a wait-free mechanism for copying the
+ * namespace's write verifier without tearing it.
+ */
+void nfsd_copy_write_verifier(__be32 verf[2], struct nfsd_net *nn)
{
int seq = 0;
do {
- read_seqbegin_or_lock(&nn->boot_lock, &seq);
- /*
- * This is opaque to client, so no need to byte-swap. Use
- * __force to keep sparse happy. y2038 time_t overflow is
- * irrelevant in this usage
- */
- verf[0] = (__force __be32)nn->nfssvc_boot.tv_sec;
- verf[1] = (__force __be32)nn->nfssvc_boot.tv_nsec;
- } while (need_seqretry(&nn->boot_lock, seq));
- done_seqretry(&nn->boot_lock, seq);
+ read_seqbegin_or_lock(&nn->writeverf_lock, &seq);
+ memcpy(verf, nn->writeverf, sizeof(*verf));
+ } while (need_seqretry(&nn->writeverf_lock, seq));
+ done_seqretry(&nn->writeverf_lock, seq);
}
-static void nfsd_reset_boot_verifier_locked(struct nfsd_net *nn)
+static void nfsd_reset_write_verifier_locked(struct nfsd_net *nn)
{
- ktime_get_real_ts64(&nn->nfssvc_boot);
+ struct timespec64 now;
+ u64 verf;
+
+ /*
+ * Because the time value is hashed, y2038 time_t overflow
+ * is irrelevant in this usage.
+ */
+ ktime_get_raw_ts64(&now);
+ verf = siphash_2u64(now.tv_sec, now.tv_nsec, &nn->siphash_key);
+ memcpy(nn->writeverf, &verf, sizeof(nn->writeverf));
}
-void nfsd_reset_boot_verifier(struct nfsd_net *nn)
+/**
+ * nfsd_reset_write_verifier - Generate a new write verifier
+ * @nn: NFS net namespace
+ *
+ * This function updates the ->writeverf field of @nn. This field
+ * contains an opaque cookie that, according to Section 18.32.3 of
+ * RFC 8881, "the client can use to determine whether a server has
+ * changed instance state (e.g., server restart) between a call to
+ * WRITE and a subsequent call to either WRITE or COMMIT. This
+ * cookie MUST be unchanged during a single instance of the NFSv4.1
+ * server and MUST be unique between instances of the NFSv4.1
+ * server."
+ */
+void nfsd_reset_write_verifier(struct nfsd_net *nn)
{
- write_seqlock(&nn->boot_lock);
- nfsd_reset_boot_verifier_locked(nn);
- write_sequnlock(&nn->boot_lock);
+ write_seqlock(&nn->writeverf_lock);
+ nfsd_reset_write_verifier_locked(nn);
+ write_sequnlock(&nn->writeverf_lock);
}
static int nfsd_startup_net(struct net *net, const struct cred *cred)
@@ -435,6 +459,7 @@ static void nfsd_shutdown_net(struct net *net)
nfsd_shutdown_generic();
}
+static DEFINE_SPINLOCK(nfsd_notifier_lock);
static int nfsd_inetaddr_event(struct notifier_block *this, unsigned long event,
void *ptr)
{
@@ -444,18 +469,17 @@ static int nfsd_inetaddr_event(struct notifier_block *this, unsigned long event,
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
struct sockaddr_in sin;
- if ((event != NETDEV_DOWN) ||
- !atomic_inc_not_zero(&nn->ntf_refcnt))
+ if (event != NETDEV_DOWN || !nn->nfsd_serv)
goto out;
+ spin_lock(&nfsd_notifier_lock);
if (nn->nfsd_serv) {
dprintk("nfsd_inetaddr_event: removed %pI4\n", &ifa->ifa_local);
sin.sin_family = AF_INET;
sin.sin_addr.s_addr = ifa->ifa_local;
svc_age_temp_xprts_now(nn->nfsd_serv, (struct sockaddr *)&sin);
}
- atomic_dec(&nn->ntf_refcnt);
- wake_up(&nn->ntf_wq);
+ spin_unlock(&nfsd_notifier_lock);
out:
return NOTIFY_DONE;
@@ -475,10 +499,10 @@ static int nfsd_inet6addr_event(struct notifier_block *this,
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
struct sockaddr_in6 sin6;
- if ((event != NETDEV_DOWN) ||
- !atomic_inc_not_zero(&nn->ntf_refcnt))
+ if (event != NETDEV_DOWN || !nn->nfsd_serv)
goto out;
+ spin_lock(&nfsd_notifier_lock);
if (nn->nfsd_serv) {
dprintk("nfsd_inet6addr_event: removed %pI6\n", &ifa->addr);
sin6.sin6_family = AF_INET6;
@@ -487,8 +511,8 @@ static int nfsd_inet6addr_event(struct notifier_block *this,
sin6.sin6_scope_id = ifa->idev->dev->ifindex;
svc_age_temp_xprts_now(nn->nfsd_serv, (struct sockaddr *)&sin6);
}
- atomic_dec(&nn->ntf_refcnt);
- wake_up(&nn->ntf_wq);
+ spin_unlock(&nfsd_notifier_lock);
+
out:
return NOTIFY_DONE;
}
@@ -505,7 +529,6 @@ static void nfsd_last_thread(struct svc_serv *serv, struct net *net)
{
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
- atomic_dec(&nn->ntf_refcnt);
/* check if the notifier still has clients */
if (atomic_dec_return(&nfsd_notifier_refcount) == 0) {
unregister_inetaddr_notifier(&nfsd_inetaddr_notifier);
@@ -513,7 +536,6 @@ static void nfsd_last_thread(struct svc_serv *serv, struct net *net)
unregister_inet6addr_notifier(&nfsd_inet6addr_notifier);
#endif
}
- wait_event(nn->ntf_wq, atomic_read(&nn->ntf_refcnt) == 0);
/*
* write_ports can create the server without actually starting
@@ -594,20 +616,9 @@ static const struct svc_serv_ops nfsd_thread_sv_ops = {
.svo_shutdown = nfsd_last_thread,
.svo_function = nfsd,
.svo_enqueue_xprt = svc_xprt_do_enqueue,
- .svo_setup = svc_set_num_threads,
.svo_module = THIS_MODULE,
};
-static void nfsd_complete_shutdown(struct net *net)
-{
- struct nfsd_net *nn = net_generic(net, nfsd_net_id);
-
- WARN_ON(!mutex_is_locked(&nfsd_mutex));
-
- nn->nfsd_serv = NULL;
- complete(&nn->nfsd_shutdown_complete);
-}
-
void nfsd_shutdown_threads(struct net *net)
{
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
@@ -622,11 +633,9 @@ void nfsd_shutdown_threads(struct net *net)
svc_get(serv);
/* Kill outstanding nfsd threads */
- serv->sv_ops->svo_setup(serv, NULL, 0);
- nfsd_destroy(net);
+ svc_set_num_threads(serv, NULL, 0);
+ nfsd_put(net);
mutex_unlock(&nfsd_mutex);
- /* Wait for shutdown of nfsd_serv to complete */
- wait_for_completion(&nn->nfsd_shutdown_complete);
}
bool i_am_nfsd(void)
@@ -638,6 +647,7 @@ int nfsd_create_serv(struct net *net)
{
int error;
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+ struct svc_serv *serv;
WARN_ON(!mutex_is_locked(&nfsd_mutex));
if (nn->nfsd_serv) {
@@ -647,19 +657,23 @@ int nfsd_create_serv(struct net *net)
if (nfsd_max_blksize == 0)
nfsd_max_blksize = nfsd_get_default_max_blksize();
nfsd_reset_versions(nn);
- nn->nfsd_serv = svc_create_pooled(&nfsd_program, nfsd_max_blksize,
- &nfsd_thread_sv_ops);
- if (nn->nfsd_serv == NULL)
+ serv = svc_create_pooled(&nfsd_program, nfsd_max_blksize,
+ &nfsd_thread_sv_ops);
+ if (serv == NULL)
return -ENOMEM;
- init_completion(&nn->nfsd_shutdown_complete);
- nn->nfsd_serv->sv_maxconn = nn->max_connections;
- error = svc_bind(nn->nfsd_serv, net);
+ serv->sv_maxconn = nn->max_connections;
+ error = svc_bind(serv, net);
if (error < 0) {
- svc_destroy(nn->nfsd_serv);
- nfsd_complete_shutdown(net);
+ /* NOT nfsd_put() as notifiers (see below) haven't
+ * been set up yet.
+ */
+ svc_put(serv);
return error;
}
+ spin_lock(&nfsd_notifier_lock);
+ nn->nfsd_serv = serv;
+ spin_unlock(&nfsd_notifier_lock);
set_max_drc();
/* check if the notifier is already set */
@@ -669,8 +683,7 @@ int nfsd_create_serv(struct net *net)
register_inet6addr_notifier(&nfsd_inet6addr_notifier);
#endif
}
- atomic_inc(&nn->ntf_refcnt);
- nfsd_reset_boot_verifier(nn);
+ nfsd_reset_write_verifier(nn);
return 0;
}
@@ -697,16 +710,26 @@ int nfsd_get_nrthreads(int n, int *nthreads, struct net *net)
return 0;
}
-void nfsd_destroy(struct net *net)
+/* This is the callback for kref_put() below.
+ * There is no code here as the first thing to be done is
+ * call svc_shutdown_net(), but we cannot get the 'net' from
+ * the kref. So do all the work when kref_put returns true.
+ */
+static void nfsd_noop(struct kref *ref)
+{
+}
+
+void nfsd_put(struct net *net)
{
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
- int destroy = (nn->nfsd_serv->sv_nrthreads == 1);
- if (destroy)
+ if (kref_put(&nn->nfsd_serv->sv_refcnt, nfsd_noop)) {
svc_shutdown_net(nn->nfsd_serv, net);
- svc_destroy(nn->nfsd_serv);
- if (destroy)
- nfsd_complete_shutdown(net);
+ svc_destroy(&nn->nfsd_serv->sv_refcnt);
+ spin_lock(&nfsd_notifier_lock);
+ nn->nfsd_serv = NULL;
+ spin_unlock(&nfsd_notifier_lock);
+ }
}
int nfsd_set_nrthreads(int n, int *nthreads, struct net *net)
@@ -733,7 +756,7 @@ int nfsd_set_nrthreads(int n, int *nthreads, struct net *net)
if (tot > NFSD_MAXSERVS) {
/* total too large: scale down requested numbers */
for (i = 0; i < n && tot > 0; i++) {
- int new = nthreads[i] * NFSD_MAXSERVS / tot;
+ int new = nthreads[i] * NFSD_MAXSERVS / tot;
tot -= (nthreads[i] - new);
nthreads[i] = new;
}
@@ -753,12 +776,13 @@ int nfsd_set_nrthreads(int n, int *nthreads, struct net *net)
/* apply the new numbers */
svc_get(nn->nfsd_serv);
for (i = 0; i < n; i++) {
- err = nn->nfsd_serv->sv_ops->svo_setup(nn->nfsd_serv,
- &nn->nfsd_serv->sv_pools[i], nthreads[i]);
+ err = svc_set_num_threads(nn->nfsd_serv,
+ &nn->nfsd_serv->sv_pools[i],
+ nthreads[i]);
if (err)
break;
}
- nfsd_destroy(net);
+ nfsd_put(net);
return err;
}
@@ -795,21 +819,19 @@ nfsd_svc(int nrservs, struct net *net, const struct cred *cred)
error = nfsd_startup_net(net, cred);
if (error)
- goto out_destroy;
- error = nn->nfsd_serv->sv_ops->svo_setup(nn->nfsd_serv,
- NULL, nrservs);
+ goto out_put;
+ error = svc_set_num_threads(nn->nfsd_serv, NULL, nrservs);
if (error)
goto out_shutdown;
- /* We are holding a reference to nn->nfsd_serv which
- * we don't want to count in the return value,
- * so subtract 1
- */
- error = nn->nfsd_serv->sv_nrthreads - 1;
+ error = nn->nfsd_serv->sv_nrthreads;
out_shutdown:
if (error < 0 && !nfsd_up_before)
nfsd_shutdown_net(net);
-out_destroy:
- nfsd_destroy(net); /* Release server */
+out_put:
+ /* Threads now hold service active */
+ if (xchg(&nn->keep_active, 0))
+ nfsd_put(net);
+ nfsd_put(net);
out:
mutex_unlock(&nfsd_mutex);
return error;
@@ -923,9 +945,6 @@ nfsd(void *vrqstp)
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
int err;
- /* Lock module and set up kernel thread */
- mutex_lock(&nfsd_mutex);
-
/* At this point, the thread shares current->fs
* with the init process. We need to create files with the
* umask as defined by the client instead of init's umask. */
@@ -945,8 +964,7 @@ nfsd(void *vrqstp)
allow_signal(SIGINT);
allow_signal(SIGQUIT);
- nfsdstats.th_cnt++;
- mutex_unlock(&nfsd_mutex);
+ atomic_inc(&nfsdstats.th_cnt);
set_freezable();
@@ -973,20 +991,36 @@ nfsd(void *vrqstp)
/* Clear signals before calling svc_exit_thread() */
flush_signals(current);
- mutex_lock(&nfsd_mutex);
- nfsdstats.th_cnt --;
+ atomic_dec(&nfsdstats.th_cnt);
out:
- rqstp->rq_server = NULL;
+ /* Take an extra ref so that the svc_put in svc_exit_thread()
+ * doesn't call svc_destroy()
+ */
+ svc_get(nn->nfsd_serv);
/* Release the thread */
svc_exit_thread(rqstp);
- nfsd_destroy(net);
+ /* We need to drop a ref, but may not drop the last reference
+ * without holding nfsd_mutex, and we cannot wait for nfsd_mutex as that
+ * could deadlock with nfsd_shutdown_threads() waiting for us.
+ * So three options are:
+ * - drop a non-final reference,
+ * - get the mutex without waiting
+ * - sleep briefly andd try the above again
+ */
+ while (!svc_put_not_last(nn->nfsd_serv)) {
+ if (mutex_trylock(&nfsd_mutex)) {
+ nfsd_put(net);
+ mutex_unlock(&nfsd_mutex);
+ break;
+ }
+ msleep(20);
+ }
/* Release module */
- mutex_unlock(&nfsd_mutex);
- module_put_and_exit(0);
+ module_put_and_kthread_exit(0);
return 0;
}
@@ -1096,7 +1130,6 @@ int nfsd_pool_stats_open(struct inode *inode, struct file *file)
mutex_unlock(&nfsd_mutex);
return -ENODEV;
}
- /* bump up the psudo refcount while traversing */
svc_get(nn->nfsd_serv);
ret = svc_pool_stats_open(nn->nfsd_serv, file);
mutex_unlock(&nfsd_mutex);
@@ -1109,8 +1142,7 @@ int nfsd_pool_stats_release(struct inode *inode, struct file *file)
struct net *net = inode->i_sb->s_fs_info;
mutex_lock(&nfsd_mutex);
- /* this function really, really should have been called svc_put() */
- nfsd_destroy(net);
+ nfsd_put(net);
mutex_unlock(&nfsd_mutex);
return ret;
}
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
index e73bdbb1634a..95457cfd37fc 100644
--- a/fs/nfsd/state.h
+++ b/fs/nfsd/state.h
@@ -568,6 +568,10 @@ struct nfs4_ol_stateid {
struct list_head st_locks;
struct nfs4_stateowner *st_stateowner;
struct nfs4_clnt_odstate *st_clnt_odstate;
+/*
+ * These bitmasks use 3 separate bits for READ, ALLOW, and BOTH; see the
+ * comment above bmap_to_share_mode() for explanation:
+ */
unsigned char st_access_bmap;
unsigned char st_deny_bmap;
struct nfs4_ol_stateid *st_openstp;
@@ -629,6 +633,7 @@ struct nfsd4_blocked_lock {
struct file_lock nbl_lock;
struct knfsd_fh nbl_fh;
struct nfsd4_callback nbl_cb;
+ struct kref nbl_kref;
};
struct nfsd4_compound_state;
diff --git a/fs/nfsd/stats.c b/fs/nfsd/stats.c
index 1d3b881e7382..a8c5a02a84f0 100644
--- a/fs/nfsd/stats.c
+++ b/fs/nfsd/stats.c
@@ -45,7 +45,7 @@ static int nfsd_proc_show(struct seq_file *seq, void *v)
percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_IO_WRITE]));
/* thread usage: */
- seq_printf(seq, "th %u 0", nfsdstats.th_cnt);
+ seq_printf(seq, "th %u 0", atomic_read(&nfsdstats.th_cnt));
/* deprecated thread usage histogram stats */
for (i = 0; i < 10; i++)
diff --git a/fs/nfsd/stats.h b/fs/nfsd/stats.h
index 51ecda852e23..9b43dc3d9991 100644
--- a/fs/nfsd/stats.h
+++ b/fs/nfsd/stats.h
@@ -29,11 +29,9 @@ enum {
struct nfsd_stats {
struct percpu_counter counter[NFSD_STATS_COUNTERS_NUM];
- /* Protected by nfsd_mutex */
- unsigned int th_cnt; /* number of available threads */
+ atomic_t th_cnt; /* number of available threads */
};
-
extern struct nfsd_stats nfsdstats;
extern struct svc_stat nfsd_svcstats;
diff --git a/fs/nfsd/trace.h b/fs/nfsd/trace.h
index f1e0d3c51bc2..5889db66409d 100644
--- a/fs/nfsd/trace.h
+++ b/fs/nfsd/trace.h
@@ -47,7 +47,7 @@
rqstp->rq_xprt->xpt_remotelen); \
} while (0);
-TRACE_EVENT(nfsd_garbage_args_err,
+DECLARE_EVENT_CLASS(nfsd_xdr_err_class,
TP_PROTO(
const struct svc_rqst *rqstp
),
@@ -69,27 +69,13 @@ TRACE_EVENT(nfsd_garbage_args_err,
)
);
-TRACE_EVENT(nfsd_cant_encode_err,
- TP_PROTO(
- const struct svc_rqst *rqstp
- ),
- TP_ARGS(rqstp),
- TP_STRUCT__entry(
- NFSD_TRACE_PROC_ARG_FIELDS
+#define DEFINE_NFSD_XDR_ERR_EVENT(name) \
+DEFINE_EVENT(nfsd_xdr_err_class, nfsd_##name##_err, \
+ TP_PROTO(const struct svc_rqst *rqstp), \
+ TP_ARGS(rqstp))
- __field(u32, vers)
- __field(u32, proc)
- ),
- TP_fast_assign(
- NFSD_TRACE_PROC_ARG_ASSIGNMENTS
-
- __entry->vers = rqstp->rq_vers;
- __entry->proc = rqstp->rq_proc;
- ),
- TP_printk("xid=0x%08x vers=%u proc=%u",
- __entry->xid, __entry->vers, __entry->proc
- )
-);
+DEFINE_NFSD_XDR_ERR_EVENT(garbage_args);
+DEFINE_NFSD_XDR_ERR_EVENT(cant_encode);
#define show_nfsd_may_flags(x) \
__print_flags(x, "|", \
@@ -320,14 +306,14 @@ TRACE_EVENT(nfsd_export_update,
DECLARE_EVENT_CLASS(nfsd_io_class,
TP_PROTO(struct svc_rqst *rqstp,
struct svc_fh *fhp,
- loff_t offset,
- unsigned long len),
+ u64 offset,
+ u32 len),
TP_ARGS(rqstp, fhp, offset, len),
TP_STRUCT__entry(
__field(u32, xid)
__field(u32, fh_hash)
- __field(loff_t, offset)
- __field(unsigned long, len)
+ __field(u64, offset)
+ __field(u32, len)
),
TP_fast_assign(
__entry->xid = be32_to_cpu(rqstp->rq_xid);
@@ -335,7 +321,7 @@ DECLARE_EVENT_CLASS(nfsd_io_class,
__entry->offset = offset;
__entry->len = len;
),
- TP_printk("xid=0x%08x fh_hash=0x%08x offset=%lld len=%lu",
+ TP_printk("xid=0x%08x fh_hash=0x%08x offset=%llu len=%u",
__entry->xid, __entry->fh_hash,
__entry->offset, __entry->len)
)
@@ -344,8 +330,8 @@ DECLARE_EVENT_CLASS(nfsd_io_class,
DEFINE_EVENT(nfsd_io_class, nfsd_##name, \
TP_PROTO(struct svc_rqst *rqstp, \
struct svc_fh *fhp, \
- loff_t offset, \
- unsigned long len), \
+ u64 offset, \
+ u32 len), \
TP_ARGS(rqstp, fhp, offset, len))
DEFINE_NFSD_IO_EVENT(read_start);
@@ -413,6 +399,56 @@ TRACE_EVENT(nfsd_dirent,
)
)
+DECLARE_EVENT_CLASS(nfsd_copy_err_class,
+ TP_PROTO(struct svc_rqst *rqstp,
+ struct svc_fh *src_fhp,
+ loff_t src_offset,
+ struct svc_fh *dst_fhp,
+ loff_t dst_offset,
+ u64 count,
+ int status),
+ TP_ARGS(rqstp, src_fhp, src_offset, dst_fhp, dst_offset, count, status),
+ TP_STRUCT__entry(
+ __field(u32, xid)
+ __field(u32, src_fh_hash)
+ __field(loff_t, src_offset)
+ __field(u32, dst_fh_hash)
+ __field(loff_t, dst_offset)
+ __field(u64, count)
+ __field(int, status)
+ ),
+ TP_fast_assign(
+ __entry->xid = be32_to_cpu(rqstp->rq_xid);
+ __entry->src_fh_hash = knfsd_fh_hash(&src_fhp->fh_handle);
+ __entry->src_offset = src_offset;
+ __entry->dst_fh_hash = knfsd_fh_hash(&dst_fhp->fh_handle);
+ __entry->dst_offset = dst_offset;
+ __entry->count = count;
+ __entry->status = status;
+ ),
+ TP_printk("xid=0x%08x src_fh_hash=0x%08x src_offset=%lld "
+ "dst_fh_hash=0x%08x dst_offset=%lld "
+ "count=%llu status=%d",
+ __entry->xid, __entry->src_fh_hash, __entry->src_offset,
+ __entry->dst_fh_hash, __entry->dst_offset,
+ (unsigned long long)__entry->count,
+ __entry->status)
+)
+
+#define DEFINE_NFSD_COPY_ERR_EVENT(name) \
+DEFINE_EVENT(nfsd_copy_err_class, nfsd_##name, \
+ TP_PROTO(struct svc_rqst *rqstp, \
+ struct svc_fh *src_fhp, \
+ loff_t src_offset, \
+ struct svc_fh *dst_fhp, \
+ loff_t dst_offset, \
+ u64 count, \
+ int status), \
+ TP_ARGS(rqstp, src_fhp, src_offset, dst_fhp, dst_offset, \
+ count, status))
+
+DEFINE_NFSD_COPY_ERR_EVENT(clone_file_range_err);
+
#include "state.h"
#include "filecache.h"
#include "vfs.h"
@@ -538,6 +574,34 @@ DEFINE_EVENT(nfsd_net_class, nfsd_##name, \
DEFINE_NET_EVENT(grace_start);
DEFINE_NET_EVENT(grace_complete);
+TRACE_EVENT(nfsd_writeverf_reset,
+ TP_PROTO(
+ const struct nfsd_net *nn,
+ const struct svc_rqst *rqstp,
+ int error
+ ),
+ TP_ARGS(nn, rqstp, error),
+ TP_STRUCT__entry(
+ __field(unsigned long long, boot_time)
+ __field(u32, xid)
+ __field(int, error)
+ __array(unsigned char, verifier, NFS4_VERIFIER_SIZE)
+ ),
+ TP_fast_assign(
+ __entry->boot_time = nn->boot_time;
+ __entry->xid = be32_to_cpu(rqstp->rq_xid);
+ __entry->error = error;
+
+ /* avoid seqlock inside TP_fast_assign */
+ memcpy(__entry->verifier, nn->writeverf,
+ NFS4_VERIFIER_SIZE);
+ ),
+ TP_printk("boot_time=%16llx xid=0x%08x error=%d new verifier=0x%s",
+ __entry->boot_time, __entry->xid, __entry->error,
+ __print_hex_str(__entry->verifier, NFS4_VERIFIER_SIZE)
+ )
+);
+
TRACE_EVENT(nfsd_clid_cred_mismatch,
TP_PROTO(
const struct nfs4_client *clp,
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index c99857689e2c..91600e71be19 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -40,6 +40,7 @@
#include "../internal.h"
#include "acl.h"
#include "idmap.h"
+#include "xdr4.h"
#endif /* CONFIG_NFSD_V4 */
#include "nfsd.h"
@@ -434,6 +435,10 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
.ia_size = iap->ia_size,
};
+ host_err = -EFBIG;
+ if (iap->ia_size < 0)
+ goto out_unlock;
+
host_err = notify_change(&init_user_ns, dentry, &size_attr, NULL);
if (host_err)
goto out_unlock;
@@ -517,15 +522,23 @@ __be32 nfsd4_set_nfs4_label(struct svc_rqst *rqstp, struct svc_fh *fhp,
}
#endif
-__be32 nfsd4_clone_file_range(struct nfsd_file *nf_src, u64 src_pos,
- struct nfsd_file *nf_dst, u64 dst_pos, u64 count, bool sync)
+static struct nfsd4_compound_state *nfsd4_get_cstate(struct svc_rqst *rqstp)
+{
+ return &((struct nfsd4_compoundres *)rqstp->rq_resp)->cstate;
+}
+
+__be32 nfsd4_clone_file_range(struct svc_rqst *rqstp,
+ struct nfsd_file *nf_src, u64 src_pos,
+ struct nfsd_file *nf_dst, u64 dst_pos,
+ u64 count, bool sync)
{
struct file *src = nf_src->nf_file;
struct file *dst = nf_dst->nf_file;
+ errseq_t since;
loff_t cloned;
__be32 ret = 0;
- down_write(&nf_dst->nf_rwsem);
+ since = READ_ONCE(dst->f_wb_err);
cloned = vfs_clone_file_range(src, src_pos, dst, dst_pos, count, 0);
if (cloned < 0) {
ret = nfserrno(cloned);
@@ -540,15 +553,25 @@ __be32 nfsd4_clone_file_range(struct nfsd_file *nf_src, u64 src_pos,
int status = vfs_fsync_range(dst, dst_pos, dst_end, 0);
if (!status)
+ status = filemap_check_wb_err(dst->f_mapping, since);
+ if (!status)
status = commit_inode_metadata(file_inode(src));
if (status < 0) {
- nfsd_reset_boot_verifier(net_generic(nf_dst->nf_net,
- nfsd_net_id));
+ struct nfsd_net *nn = net_generic(nf_dst->nf_net,
+ nfsd_net_id);
+
+ trace_nfsd_clone_file_range_err(rqstp,
+ &nfsd4_get_cstate(rqstp)->save_fh,
+ src_pos,
+ &nfsd4_get_cstate(rqstp)->current_fh,
+ dst_pos,
+ count, status);
+ nfsd_reset_write_verifier(nn);
+ trace_nfsd_writeverf_reset(nn, rqstp, status);
ret = nfserrno(status);
}
}
out_err:
- up_write(&nf_dst->nf_rwsem);
return ret;
}
@@ -777,6 +800,7 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type,
int may_flags, struct file **filp)
{
__be32 err;
+ bool retried = false;
validate_process_creds();
/*
@@ -792,9 +816,16 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type,
*/
if (type == S_IFREG)
may_flags |= NFSD_MAY_OWNER_OVERRIDE;
+retry:
err = fh_verify(rqstp, fhp, type, may_flags);
- if (!err)
+ if (!err) {
err = __nfsd_open(rqstp, fhp, type, may_flags, filp);
+ if (err == nfserr_stale && !retried) {
+ retried = true;
+ fh_put(fhp);
+ goto retry;
+ }
+ }
validate_process_creds();
return err;
}
@@ -944,10 +975,12 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfsd_file *nf,
unsigned long *cnt, int stable,
__be32 *verf)
{
+ struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
struct file *file = nf->nf_file;
struct super_block *sb = file_inode(file)->i_sb;
struct svc_export *exp;
struct iov_iter iter;
+ errseq_t since;
__be32 nfserr;
int host_err;
int use_wgather;
@@ -985,36 +1018,28 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfsd_file *nf,
flags |= RWF_SYNC;
iov_iter_kvec(&iter, WRITE, vec, vlen, *cnt);
- if (flags & RWF_SYNC) {
- down_write(&nf->nf_rwsem);
- host_err = vfs_iter_write(file, &iter, &pos, flags);
- if (host_err < 0)
- nfsd_reset_boot_verifier(net_generic(SVC_NET(rqstp),
- nfsd_net_id));
- up_write(&nf->nf_rwsem);
- } else {
- down_read(&nf->nf_rwsem);
- if (verf)
- nfsd_copy_boot_verifier(verf,
- net_generic(SVC_NET(rqstp),
- nfsd_net_id));
- host_err = vfs_iter_write(file, &iter, &pos, flags);
- up_read(&nf->nf_rwsem);
- }
+ since = READ_ONCE(file->f_wb_err);
+ if (verf)
+ nfsd_copy_write_verifier(verf, nn);
+ host_err = vfs_iter_write(file, &iter, &pos, flags);
if (host_err < 0) {
- nfsd_reset_boot_verifier(net_generic(SVC_NET(rqstp),
- nfsd_net_id));
+ nfsd_reset_write_verifier(nn);
+ trace_nfsd_writeverf_reset(nn, rqstp, host_err);
goto out_nfserr;
}
*cnt = host_err;
nfsd_stats_io_write_add(exp, *cnt);
fsnotify_modify(file);
+ host_err = filemap_check_wb_err(file->f_mapping, since);
+ if (host_err < 0)
+ goto out_nfserr;
if (stable && use_wgather) {
host_err = wait_for_concurrent_writes(file);
- if (host_err < 0)
- nfsd_reset_boot_verifier(net_generic(SVC_NET(rqstp),
- nfsd_net_id));
+ if (host_err < 0) {
+ nfsd_reset_write_verifier(nn);
+ trace_nfsd_writeverf_reset(nn, rqstp, host_err);
+ }
}
out_nfserr:
@@ -1089,71 +1114,77 @@ out:
}
#ifdef CONFIG_NFSD_V3
-static int
-nfsd_filemap_write_and_wait_range(struct nfsd_file *nf, loff_t offset,
- loff_t end)
-{
- struct address_space *mapping = nf->nf_file->f_mapping;
- int ret = filemap_fdatawrite_range(mapping, offset, end);
-
- if (ret)
- return ret;
- filemap_fdatawait_range_keep_errors(mapping, offset, end);
- return 0;
-}
-
-/*
- * Commit all pending writes to stable storage.
+/**
+ * nfsd_commit - Commit pending writes to stable storage
+ * @rqstp: RPC request being processed
+ * @fhp: NFS filehandle
+ * @offset: raw offset from beginning of file
+ * @count: raw count of bytes to sync
+ * @verf: filled in with the server's current write verifier
*
- * Note: we only guarantee that data that lies within the range specified
- * by the 'offset' and 'count' parameters will be synced.
+ * Note: we guarantee that data that lies within the range specified
+ * by the 'offset' and 'count' parameters will be synced. The server
+ * is permitted to sync data that lies outside this range at the
+ * same time.
*
* Unfortunately we cannot lock the file to make sure we return full WCC
* data to the client, as locking happens lower down in the filesystem.
+ *
+ * Return values:
+ * An nfsstat value in network byte order.
*/
__be32
-nfsd_commit(struct svc_rqst *rqstp, struct svc_fh *fhp,
- loff_t offset, unsigned long count, __be32 *verf)
+nfsd_commit(struct svc_rqst *rqstp, struct svc_fh *fhp, u64 offset,
+ u32 count, __be32 *verf)
{
+ u64 maxbytes;
+ loff_t start, end;
+ struct nfsd_net *nn;
struct nfsd_file *nf;
- loff_t end = LLONG_MAX;
- __be32 err = nfserr_inval;
-
- if (offset < 0)
- goto out;
- if (count != 0) {
- end = offset + (loff_t)count - 1;
- if (end < offset)
- goto out;
- }
+ __be32 err;
err = nfsd_file_acquire(rqstp, fhp,
NFSD_MAY_WRITE|NFSD_MAY_NOT_BREAK_LEASE, &nf);
if (err)
goto out;
+
+ /*
+ * Convert the client-provided (offset, count) range to a
+ * (start, end) range. If the client-provided range falls
+ * outside the maximum file size of the underlying FS,
+ * clamp the sync range appropriately.
+ */
+ start = 0;
+ end = LLONG_MAX;
+ maxbytes = (u64)fhp->fh_dentry->d_sb->s_maxbytes;
+ if (offset < maxbytes) {
+ start = offset;
+ if (count && (offset + count - 1 < maxbytes))
+ end = offset + count - 1;
+ }
+
+ nn = net_generic(nf->nf_net, nfsd_net_id);
if (EX_ISSYNC(fhp->fh_export)) {
- int err2 = nfsd_filemap_write_and_wait_range(nf, offset, end);
+ errseq_t since = READ_ONCE(nf->nf_file->f_wb_err);
+ int err2;
- down_write(&nf->nf_rwsem);
- if (!err2)
- err2 = vfs_fsync_range(nf->nf_file, offset, end, 0);
+ err2 = vfs_fsync_range(nf->nf_file, start, end, 0);
switch (err2) {
case 0:
- nfsd_copy_boot_verifier(verf, net_generic(nf->nf_net,
- nfsd_net_id));
+ nfsd_copy_write_verifier(verf, nn);
+ err2 = filemap_check_wb_err(nf->nf_file->f_mapping,
+ since);
break;
case -EINVAL:
err = nfserr_notsupp;
break;
default:
- err = nfserrno(err2);
- nfsd_reset_boot_verifier(net_generic(nf->nf_net,
- nfsd_net_id));
+ nfsd_reset_write_verifier(nn);
+ trace_nfsd_writeverf_reset(nn, rqstp, err2);
}
- up_write(&nf->nf_rwsem);
+ err = nfserrno(err2);
} else
- nfsd_copy_boot_verifier(verf, net_generic(nf->nf_net,
- nfsd_net_id));
+ nfsd_copy_write_verifier(verf, nn);
nfsd_file_put(nf);
out:
@@ -1747,8 +1778,8 @@ retry:
* so do it by hand */
trap = lock_rename(tdentry, fdentry);
ffhp->fh_locked = tfhp->fh_locked = true;
- fill_pre_wcc(ffhp);
- fill_pre_wcc(tfhp);
+ fh_fill_pre_attrs(ffhp);
+ fh_fill_pre_attrs(tfhp);
odentry = lookup_one_len(fname, fdentry, flen);
host_err = PTR_ERR(odentry);
@@ -1808,8 +1839,8 @@ retry:
* were the same, so again we do it by hand.
*/
if (!close_cached) {
- fill_post_wcc(ffhp);
- fill_post_wcc(tfhp);
+ fh_fill_post_attrs(ffhp);
+ fh_fill_post_attrs(tfhp);
}
unlock_rename(tdentry, fdentry);
ffhp->fh_locked = tfhp->fh_locked = false;
diff --git a/fs/nfsd/vfs.h b/fs/nfsd/vfs.h
index b21b76e6b9a8..2c43d10e3cab 100644
--- a/fs/nfsd/vfs.h
+++ b/fs/nfsd/vfs.h
@@ -57,7 +57,8 @@ __be32 nfsd4_set_nfs4_label(struct svc_rqst *, struct svc_fh *,
struct xdr_netobj *);
__be32 nfsd4_vfs_fallocate(struct svc_rqst *, struct svc_fh *,
struct file *, loff_t, loff_t, int);
-__be32 nfsd4_clone_file_range(struct nfsd_file *nf_src, u64 src_pos,
+__be32 nfsd4_clone_file_range(struct svc_rqst *rqstp,
+ struct nfsd_file *nf_src, u64 src_pos,
struct nfsd_file *nf_dst, u64 dst_pos,
u64 count, bool sync);
#endif /* CONFIG_NFSD_V4 */
@@ -73,8 +74,8 @@ __be32 do_nfsd_create(struct svc_rqst *, struct svc_fh *,
char *name, int len, struct iattr *attrs,
struct svc_fh *res, int createmode,
u32 *verifier, bool *truncp, bool *created);
-__be32 nfsd_commit(struct svc_rqst *, struct svc_fh *,
- loff_t, unsigned long, __be32 *verf);
+__be32 nfsd_commit(struct svc_rqst *rqst, struct svc_fh *fhp,
+ u64 offset, u32 count, __be32 *verf);
#endif /* CONFIG_NFSD_V3 */
#ifdef CONFIG_NFSD_V4
__be32 nfsd_getxattr(struct svc_rqst *rqstp, struct svc_fh *fhp,
diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c
index bc3e2cd4117f..063dd16d75b5 100644
--- a/fs/nilfs2/page.c
+++ b/fs/nilfs2/page.c
@@ -195,12 +195,12 @@ void nilfs_page_bug(struct page *page)
*/
static void nilfs_copy_page(struct page *dst, struct page *src, int copy_dirty)
{
- struct buffer_head *dbh, *dbufs, *sbh, *sbufs;
+ struct buffer_head *dbh, *dbufs, *sbh;
unsigned long mask = NILFS_BUFFER_INHERENT_BITS;
BUG_ON(PageWriteback(dst));
- sbh = sbufs = page_buffers(src);
+ sbh = page_buffers(src);
if (!page_has_buffers(dst))
create_empty_buffers(dst, sbh->b_size, 0);
diff --git a/fs/nilfs2/sysfs.c b/fs/nilfs2/sysfs.c
index 81f35c5b5a40..379d22e28ed6 100644
--- a/fs/nilfs2/sysfs.c
+++ b/fs/nilfs2/sysfs.c
@@ -57,7 +57,7 @@ static void nilfs_##name##_attr_release(struct kobject *kobj) \
complete(&subgroups->sg_##name##_kobj_unregister); \
} \
static struct kobj_type nilfs_##name##_ktype = { \
- .default_attrs = nilfs_##name##_attrs, \
+ .default_groups = nilfs_##name##_groups, \
.sysfs_ops = &nilfs_##name##_attr_ops, \
.release = nilfs_##name##_attr_release, \
}
@@ -129,6 +129,7 @@ static struct attribute *nilfs_snapshot_attrs[] = {
NILFS_SNAPSHOT_ATTR_LIST(README),
NULL,
};
+ATTRIBUTE_GROUPS(nilfs_snapshot);
static ssize_t nilfs_snapshot_attr_show(struct kobject *kobj,
struct attribute *attr, char *buf)
@@ -166,7 +167,7 @@ static const struct sysfs_ops nilfs_snapshot_attr_ops = {
};
static struct kobj_type nilfs_snapshot_ktype = {
- .default_attrs = nilfs_snapshot_attrs,
+ .default_groups = nilfs_snapshot_groups,
.sysfs_ops = &nilfs_snapshot_attr_ops,
.release = nilfs_snapshot_attr_release,
};
@@ -226,6 +227,7 @@ static struct attribute *nilfs_mounted_snapshots_attrs[] = {
NILFS_MOUNTED_SNAPSHOTS_ATTR_LIST(README),
NULL,
};
+ATTRIBUTE_GROUPS(nilfs_mounted_snapshots);
NILFS_DEV_INT_GROUP_OPS(mounted_snapshots, dev);
NILFS_DEV_INT_GROUP_TYPE(mounted_snapshots, dev);
@@ -339,6 +341,7 @@ static struct attribute *nilfs_checkpoints_attrs[] = {
NILFS_CHECKPOINTS_ATTR_LIST(README),
NULL,
};
+ATTRIBUTE_GROUPS(nilfs_checkpoints);
NILFS_DEV_INT_GROUP_OPS(checkpoints, dev);
NILFS_DEV_INT_GROUP_TYPE(checkpoints, dev);
@@ -428,6 +431,7 @@ static struct attribute *nilfs_segments_attrs[] = {
NILFS_SEGMENTS_ATTR_LIST(README),
NULL,
};
+ATTRIBUTE_GROUPS(nilfs_segments);
NILFS_DEV_INT_GROUP_OPS(segments, dev);
NILFS_DEV_INT_GROUP_TYPE(segments, dev);
@@ -689,6 +693,7 @@ static struct attribute *nilfs_segctor_attrs[] = {
NILFS_SEGCTOR_ATTR_LIST(README),
NULL,
};
+ATTRIBUTE_GROUPS(nilfs_segctor);
NILFS_DEV_INT_GROUP_OPS(segctor, dev);
NILFS_DEV_INT_GROUP_TYPE(segctor, dev);
@@ -816,6 +821,7 @@ static struct attribute *nilfs_superblock_attrs[] = {
NILFS_SUPERBLOCK_ATTR_LIST(README),
NULL,
};
+ATTRIBUTE_GROUPS(nilfs_superblock);
NILFS_DEV_INT_GROUP_OPS(superblock, dev);
NILFS_DEV_INT_GROUP_TYPE(superblock, dev);
@@ -924,6 +930,7 @@ static struct attribute *nilfs_dev_attrs[] = {
NILFS_DEV_ATTR_LIST(README),
NULL,
};
+ATTRIBUTE_GROUPS(nilfs_dev);
static ssize_t nilfs_dev_attr_show(struct kobject *kobj,
struct attribute *attr, char *buf)
@@ -961,7 +968,7 @@ static const struct sysfs_ops nilfs_dev_attr_ops = {
};
static struct kobj_type nilfs_dev_ktype = {
- .default_attrs = nilfs_dev_attrs,
+ .default_groups = nilfs_dev_groups,
.sysfs_ops = &nilfs_dev_attr_ops,
.release = nilfs_dev_attr_release,
};
diff --git a/fs/notify/dnotify/dnotify.c b/fs/notify/dnotify/dnotify.c
index e85e13c50d6d..829dd4a61b66 100644
--- a/fs/notify/dnotify/dnotify.c
+++ b/fs/notify/dnotify/dnotify.c
@@ -19,7 +19,25 @@
#include <linux/fdtable.h>
#include <linux/fsnotify_backend.h>
-int dir_notify_enable __read_mostly = 1;
+static int dir_notify_enable __read_mostly = 1;
+#ifdef CONFIG_SYSCTL
+static struct ctl_table dnotify_sysctls[] = {
+ {
+ .procname = "dir-notify-enable",
+ .data = &dir_notify_enable,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {}
+};
+static void __init dnotify_sysctl_init(void)
+{
+ register_sysctl_init("fs", dnotify_sysctls);
+}
+#else
+#define dnotify_sysctl_init() do { } while (0)
+#endif
static struct kmem_cache *dnotify_struct_cache __read_mostly;
static struct kmem_cache *dnotify_mark_cache __read_mostly;
@@ -196,7 +214,7 @@ static __u32 convert_arg(unsigned long arg)
if (arg & DN_ATTRIB)
new_mask |= FS_ATTRIB;
if (arg & DN_RENAME)
- new_mask |= FS_DN_RENAME;
+ new_mask |= FS_RENAME;
if (arg & DN_CREATE)
new_mask |= (FS_CREATE | FS_MOVED_TO);
@@ -386,6 +404,7 @@ static int __init dnotify_init(void)
dnotify_group = fsnotify_alloc_group(&dnotify_fsnotify_ops);
if (IS_ERR(dnotify_group))
panic("unable to allocate fsnotify group for dnotify\n");
+ dnotify_sysctl_init();
return 0;
}
diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c
index b6091775aa6e..985e995d2a39 100644
--- a/fs/notify/fanotify/fanotify.c
+++ b/fs/notify/fanotify/fanotify.c
@@ -76,8 +76,10 @@ static bool fanotify_info_equal(struct fanotify_info *info1,
struct fanotify_info *info2)
{
if (info1->dir_fh_totlen != info2->dir_fh_totlen ||
+ info1->dir2_fh_totlen != info2->dir2_fh_totlen ||
info1->file_fh_totlen != info2->file_fh_totlen ||
- info1->name_len != info2->name_len)
+ info1->name_len != info2->name_len ||
+ info1->name2_len != info2->name2_len)
return false;
if (info1->dir_fh_totlen &&
@@ -85,14 +87,24 @@ static bool fanotify_info_equal(struct fanotify_info *info1,
fanotify_info_dir_fh(info2)))
return false;
+ if (info1->dir2_fh_totlen &&
+ !fanotify_fh_equal(fanotify_info_dir2_fh(info1),
+ fanotify_info_dir2_fh(info2)))
+ return false;
+
if (info1->file_fh_totlen &&
!fanotify_fh_equal(fanotify_info_file_fh(info1),
fanotify_info_file_fh(info2)))
return false;
- return !info1->name_len ||
- !memcmp(fanotify_info_name(info1), fanotify_info_name(info2),
- info1->name_len);
+ if (info1->name_len &&
+ memcmp(fanotify_info_name(info1), fanotify_info_name(info2),
+ info1->name_len))
+ return false;
+
+ return !info1->name2_len ||
+ !memcmp(fanotify_info_name2(info1), fanotify_info_name2(info2),
+ info1->name2_len);
}
static bool fanotify_name_event_equal(struct fanotify_name_event *fne1,
@@ -141,6 +153,13 @@ static bool fanotify_should_merge(struct fanotify_event *old,
if ((old->mask & FS_ISDIR) != (new->mask & FS_ISDIR))
return false;
+ /*
+ * FAN_RENAME event is reported with special info record types,
+ * so we cannot merge it with other events.
+ */
+ if ((old->mask & FAN_RENAME) != (new->mask & FAN_RENAME))
+ return false;
+
switch (old->type) {
case FANOTIFY_EVENT_TYPE_PATH:
return fanotify_path_equal(fanotify_event_path(old),
@@ -272,8 +291,9 @@ out:
*/
static u32 fanotify_group_event_mask(struct fsnotify_group *group,
struct fsnotify_iter_info *iter_info,
- u32 event_mask, const void *data,
- int data_type, struct inode *dir)
+ u32 *match_mask, u32 event_mask,
+ const void *data, int data_type,
+ struct inode *dir)
{
__u32 marks_mask = 0, marks_ignored_mask = 0;
__u32 test_mask, user_mask = FANOTIFY_OUTGOING_EVENTS |
@@ -299,7 +319,7 @@ static u32 fanotify_group_event_mask(struct fsnotify_group *group,
return 0;
}
- fsnotify_foreach_obj_type(type) {
+ fsnotify_foreach_iter_type(type) {
if (!fsnotify_iter_should_report_type(iter_info, type))
continue;
mark = iter_info->marks[type];
@@ -318,11 +338,14 @@ static u32 fanotify_group_event_mask(struct fsnotify_group *group,
* If the event is on a child and this mark is on a parent not
* watching children, don't send it!
*/
- if (type == FSNOTIFY_OBJ_TYPE_PARENT &&
+ if (type == FSNOTIFY_ITER_TYPE_PARENT &&
!(mark->mask & FS_EVENT_ON_CHILD))
continue;
marks_mask |= mark->mask;
+
+ /* Record the mark types of this group that matched the event */
+ *match_mask |= 1U << type;
}
test_mask = event_mask & marks_mask & ~marks_ignored_mask;
@@ -411,7 +434,7 @@ static int fanotify_encode_fh(struct fanotify_fh *fh, struct inode *inode,
* be zero in that case if encoding fh len failed.
*/
err = -ENOENT;
- if (fh_len < 4 || WARN_ON_ONCE(fh_len % 4))
+ if (fh_len < 4 || WARN_ON_ONCE(fh_len % 4) || fh_len > MAX_HANDLE_SZ)
goto out_err;
/* No external buffer in a variable size allocated fh */
@@ -458,17 +481,41 @@ out_err:
}
/*
- * The inode to use as identifier when reporting fid depends on the event.
- * Report the modified directory inode on dirent modification events.
- * Report the "victim" inode otherwise.
+ * FAN_REPORT_FID is ambiguous in that it reports the fid of the child for
+ * some events and the fid of the parent for create/delete/move events.
+ *
+ * With the FAN_REPORT_TARGET_FID flag, the fid of the child is reported
+ * also in create/delete/move events in addition to the fid of the parent
+ * and the name of the child.
+ */
+static inline bool fanotify_report_child_fid(unsigned int fid_mode, u32 mask)
+{
+ if (mask & ALL_FSNOTIFY_DIRENT_EVENTS)
+ return (fid_mode & FAN_REPORT_TARGET_FID);
+
+ return (fid_mode & FAN_REPORT_FID) && !(mask & FAN_ONDIR);
+}
+
+/*
+ * The inode to use as identifier when reporting fid depends on the event
+ * and the group flags.
+ *
+ * With the group flag FAN_REPORT_TARGET_FID, always report the child fid.
+ *
+ * Without the group flag FAN_REPORT_TARGET_FID, report the modified directory
+ * fid on dirent events and the child fid otherwise.
+ *
* For example:
- * FS_ATTRIB reports the child inode even if reported on a watched parent.
- * FS_CREATE reports the modified dir inode and not the created inode.
+ * FS_ATTRIB reports the child fid even if reported on a watched parent.
+ * FS_CREATE reports the modified dir fid without FAN_REPORT_TARGET_FID.
+ * and reports the created child fid with FAN_REPORT_TARGET_FID.
*/
static struct inode *fanotify_fid_inode(u32 event_mask, const void *data,
- int data_type, struct inode *dir)
+ int data_type, struct inode *dir,
+ unsigned int fid_mode)
{
- if (event_mask & ALL_FSNOTIFY_DIRENT_EVENTS)
+ if ((event_mask & ALL_FSNOTIFY_DIRENT_EVENTS) &&
+ !(fid_mode & FAN_REPORT_TARGET_FID))
return dir;
return fsnotify_data_inode(data, data_type);
@@ -552,25 +599,34 @@ static struct fanotify_event *fanotify_alloc_fid_event(struct inode *id,
return &ffe->fae;
}
-static struct fanotify_event *fanotify_alloc_name_event(struct inode *id,
+static struct fanotify_event *fanotify_alloc_name_event(struct inode *dir,
__kernel_fsid_t *fsid,
const struct qstr *name,
struct inode *child,
+ struct dentry *moved,
unsigned int *hash,
gfp_t gfp)
{
struct fanotify_name_event *fne;
struct fanotify_info *info;
struct fanotify_fh *dfh, *ffh;
- unsigned int dir_fh_len = fanotify_encode_fh_len(id);
+ struct inode *dir2 = moved ? d_inode(moved->d_parent) : NULL;
+ const struct qstr *name2 = moved ? &moved->d_name : NULL;
+ unsigned int dir_fh_len = fanotify_encode_fh_len(dir);
+ unsigned int dir2_fh_len = fanotify_encode_fh_len(dir2);
unsigned int child_fh_len = fanotify_encode_fh_len(child);
- unsigned int size;
-
- size = sizeof(*fne) + FANOTIFY_FH_HDR_LEN + dir_fh_len;
+ unsigned long name_len = name ? name->len : 0;
+ unsigned long name2_len = name2 ? name2->len : 0;
+ unsigned int len, size;
+
+ /* Reserve terminating null byte even for empty name */
+ size = sizeof(*fne) + name_len + name2_len + 2;
+ if (dir_fh_len)
+ size += FANOTIFY_FH_HDR_LEN + dir_fh_len;
+ if (dir2_fh_len)
+ size += FANOTIFY_FH_HDR_LEN + dir2_fh_len;
if (child_fh_len)
size += FANOTIFY_FH_HDR_LEN + child_fh_len;
- if (name)
- size += name->len + 1;
fne = kmalloc(size, gfp);
if (!fne)
return NULL;
@@ -580,24 +636,41 @@ static struct fanotify_event *fanotify_alloc_name_event(struct inode *id,
*hash ^= fanotify_hash_fsid(fsid);
info = &fne->info;
fanotify_info_init(info);
- dfh = fanotify_info_dir_fh(info);
- info->dir_fh_totlen = fanotify_encode_fh(dfh, id, dir_fh_len, hash, 0);
+ if (dir_fh_len) {
+ dfh = fanotify_info_dir_fh(info);
+ len = fanotify_encode_fh(dfh, dir, dir_fh_len, hash, 0);
+ fanotify_info_set_dir_fh(info, len);
+ }
+ if (dir2_fh_len) {
+ dfh = fanotify_info_dir2_fh(info);
+ len = fanotify_encode_fh(dfh, dir2, dir2_fh_len, hash, 0);
+ fanotify_info_set_dir2_fh(info, len);
+ }
if (child_fh_len) {
ffh = fanotify_info_file_fh(info);
- info->file_fh_totlen = fanotify_encode_fh(ffh, child,
- child_fh_len, hash, 0);
+ len = fanotify_encode_fh(ffh, child, child_fh_len, hash, 0);
+ fanotify_info_set_file_fh(info, len);
}
- if (name) {
- long salt = name->len;
-
+ if (name_len) {
fanotify_info_copy_name(info, name);
- *hash ^= full_name_hash((void *)salt, name->name, name->len);
+ *hash ^= full_name_hash((void *)name_len, name->name, name_len);
+ }
+ if (name2_len) {
+ fanotify_info_copy_name2(info, name2);
+ *hash ^= full_name_hash((void *)name2_len, name2->name,
+ name2_len);
}
- pr_debug("%s: ino=%lu size=%u dir_fh_len=%u child_fh_len=%u name_len=%u name='%.*s'\n",
- __func__, id->i_ino, size, dir_fh_len, child_fh_len,
+ pr_debug("%s: size=%u dir_fh_len=%u child_fh_len=%u name_len=%u name='%.*s'\n",
+ __func__, size, dir_fh_len, child_fh_len,
info->name_len, info->name_len, fanotify_info_name(info));
+ if (dir2_fh_len) {
+ pr_debug("%s: dir2_fh_len=%u name2_len=%u name2='%.*s'\n",
+ __func__, dir2_fh_len, info->name2_len,
+ info->name2_len, fanotify_info_name2(info));
+ }
+
return &fne->fae;
}
@@ -639,19 +712,21 @@ static struct fanotify_event *fanotify_alloc_error_event(
return &fee->fae;
}
-static struct fanotify_event *fanotify_alloc_event(struct fsnotify_group *group,
- u32 mask, const void *data,
- int data_type, struct inode *dir,
- const struct qstr *file_name,
- __kernel_fsid_t *fsid)
+static struct fanotify_event *fanotify_alloc_event(
+ struct fsnotify_group *group,
+ u32 mask, const void *data, int data_type,
+ struct inode *dir, const struct qstr *file_name,
+ __kernel_fsid_t *fsid, u32 match_mask)
{
struct fanotify_event *event = NULL;
gfp_t gfp = GFP_KERNEL_ACCOUNT;
- struct inode *id = fanotify_fid_inode(mask, data, data_type, dir);
+ unsigned int fid_mode = FAN_GROUP_FLAG(group, FANOTIFY_FID_BITS);
+ struct inode *id = fanotify_fid_inode(mask, data, data_type, dir,
+ fid_mode);
struct inode *dirid = fanotify_dfid_inode(mask, data, data_type, dir);
const struct path *path = fsnotify_data_path(data, data_type);
- unsigned int fid_mode = FAN_GROUP_FLAG(group, FANOTIFY_FID_BITS);
struct mem_cgroup *old_memcg;
+ struct dentry *moved = NULL;
struct inode *child = NULL;
bool name_event = false;
unsigned int hash = 0;
@@ -660,11 +735,10 @@ static struct fanotify_event *fanotify_alloc_event(struct fsnotify_group *group,
if ((fid_mode & FAN_REPORT_DIR_FID) && dirid) {
/*
- * With both flags FAN_REPORT_DIR_FID and FAN_REPORT_FID, we
- * report the child fid for events reported on a non-dir child
+ * For certain events and group flags, report the child fid
* in addition to reporting the parent fid and maybe child name.
*/
- if ((fid_mode & FAN_REPORT_FID) && id != dirid && !ondir)
+ if (fanotify_report_child_fid(fid_mode, mask) && id != dirid)
child = id;
id = dirid;
@@ -688,6 +762,38 @@ static struct fanotify_event *fanotify_alloc_event(struct fsnotify_group *group,
} else if ((mask & ALL_FSNOTIFY_DIRENT_EVENTS) || !ondir) {
name_event = true;
}
+
+ /*
+ * In the special case of FAN_RENAME event, use the match_mask
+ * to determine if we need to report only the old parent+name,
+ * only the new parent+name or both.
+ * 'dirid' and 'file_name' are the old parent+name and
+ * 'moved' has the new parent+name.
+ */
+ if (mask & FAN_RENAME) {
+ bool report_old, report_new;
+
+ if (WARN_ON_ONCE(!match_mask))
+ return NULL;
+
+ /* Report both old and new parent+name if sb watching */
+ report_old = report_new =
+ match_mask & (1U << FSNOTIFY_ITER_TYPE_SB);
+ report_old |=
+ match_mask & (1U << FSNOTIFY_ITER_TYPE_INODE);
+ report_new |=
+ match_mask & (1U << FSNOTIFY_ITER_TYPE_INODE2);
+
+ if (!report_old) {
+ /* Do not report old parent+name */
+ dirid = NULL;
+ file_name = NULL;
+ }
+ if (report_new) {
+ /* Report new parent+name */
+ moved = fsnotify_data_dentry(data, data_type);
+ }
+ }
}
/*
@@ -709,9 +815,9 @@ static struct fanotify_event *fanotify_alloc_event(struct fsnotify_group *group,
} else if (fanotify_is_error_event(mask)) {
event = fanotify_alloc_error_event(group, fsid, data,
data_type, &hash);
- } else if (name_event && (file_name || child)) {
- event = fanotify_alloc_name_event(id, fsid, file_name, child,
- &hash, gfp);
+ } else if (name_event && (file_name || moved || child)) {
+ event = fanotify_alloc_name_event(dirid, fsid, file_name, child,
+ moved, &hash, gfp);
} else if (fid_mode) {
event = fanotify_alloc_fid_event(id, fsid, &hash, gfp);
} else {
@@ -746,7 +852,7 @@ static __kernel_fsid_t fanotify_get_fsid(struct fsnotify_iter_info *iter_info)
int type;
__kernel_fsid_t fsid = {};
- fsnotify_foreach_obj_type(type) {
+ fsnotify_foreach_iter_type(type) {
struct fsnotify_mark_connector *conn;
if (!fsnotify_iter_should_report_type(iter_info, type))
@@ -800,6 +906,7 @@ static int fanotify_handle_event(struct fsnotify_group *group, u32 mask,
struct fanotify_event *event;
struct fsnotify_event *fsn_event;
__kernel_fsid_t fsid = {};
+ u32 match_mask = 0;
BUILD_BUG_ON(FAN_ACCESS != FS_ACCESS);
BUILD_BUG_ON(FAN_MODIFY != FS_MODIFY);
@@ -821,15 +928,17 @@ static int fanotify_handle_event(struct fsnotify_group *group, u32 mask,
BUILD_BUG_ON(FAN_OPEN_EXEC != FS_OPEN_EXEC);
BUILD_BUG_ON(FAN_OPEN_EXEC_PERM != FS_OPEN_EXEC_PERM);
BUILD_BUG_ON(FAN_FS_ERROR != FS_ERROR);
+ BUILD_BUG_ON(FAN_RENAME != FS_RENAME);
- BUILD_BUG_ON(HWEIGHT32(ALL_FANOTIFY_EVENT_BITS) != 20);
+ BUILD_BUG_ON(HWEIGHT32(ALL_FANOTIFY_EVENT_BITS) != 21);
- mask = fanotify_group_event_mask(group, iter_info, mask, data,
- data_type, dir);
+ mask = fanotify_group_event_mask(group, iter_info, &match_mask,
+ mask, data, data_type, dir);
if (!mask)
return 0;
- pr_debug("%s: group=%p mask=%x\n", __func__, group, mask);
+ pr_debug("%s: group=%p mask=%x report_mask=%x\n", __func__,
+ group, mask, match_mask);
if (fanotify_is_perm_event(mask)) {
/*
@@ -848,7 +957,7 @@ static int fanotify_handle_event(struct fsnotify_group *group, u32 mask,
}
event = fanotify_alloc_event(group, mask, data, data_type, dir,
- file_name, &fsid);
+ file_name, &fsid, match_mask);
ret = -ENOMEM;
if (unlikely(!event)) {
/*
diff --git a/fs/notify/fanotify/fanotify.h b/fs/notify/fanotify/fanotify.h
index d25f500bf7e7..a3d5b751cac5 100644
--- a/fs/notify/fanotify/fanotify.h
+++ b/fs/notify/fanotify/fanotify.h
@@ -40,15 +40,45 @@ struct fanotify_fh {
struct fanotify_info {
/* size of dir_fh/file_fh including fanotify_fh hdr size */
u8 dir_fh_totlen;
+ u8 dir2_fh_totlen;
u8 file_fh_totlen;
u8 name_len;
- u8 pad;
+ u8 name2_len;
+ u8 pad[3];
unsigned char buf[];
/*
* (struct fanotify_fh) dir_fh starts at buf[0]
- * (optional) file_fh starts at buf[dir_fh_totlen]
- * name starts at buf[dir_fh_totlen + file_fh_totlen]
+ * (optional) dir2_fh starts at buf[dir_fh_totlen]
+ * (optional) file_fh starts at buf[dir_fh_totlen + dir2_fh_totlen]
+ * name starts at buf[dir_fh_totlen + dir2_fh_totlen + file_fh_totlen]
+ * ...
*/
+#define FANOTIFY_DIR_FH_SIZE(info) ((info)->dir_fh_totlen)
+#define FANOTIFY_DIR2_FH_SIZE(info) ((info)->dir2_fh_totlen)
+#define FANOTIFY_FILE_FH_SIZE(info) ((info)->file_fh_totlen)
+#define FANOTIFY_NAME_SIZE(info) ((info)->name_len + 1)
+#define FANOTIFY_NAME2_SIZE(info) ((info)->name2_len + 1)
+
+#define FANOTIFY_DIR_FH_OFFSET(info) 0
+#define FANOTIFY_DIR2_FH_OFFSET(info) \
+ (FANOTIFY_DIR_FH_OFFSET(info) + FANOTIFY_DIR_FH_SIZE(info))
+#define FANOTIFY_FILE_FH_OFFSET(info) \
+ (FANOTIFY_DIR2_FH_OFFSET(info) + FANOTIFY_DIR2_FH_SIZE(info))
+#define FANOTIFY_NAME_OFFSET(info) \
+ (FANOTIFY_FILE_FH_OFFSET(info) + FANOTIFY_FILE_FH_SIZE(info))
+#define FANOTIFY_NAME2_OFFSET(info) \
+ (FANOTIFY_NAME_OFFSET(info) + FANOTIFY_NAME_SIZE(info))
+
+#define FANOTIFY_DIR_FH_BUF(info) \
+ ((info)->buf + FANOTIFY_DIR_FH_OFFSET(info))
+#define FANOTIFY_DIR2_FH_BUF(info) \
+ ((info)->buf + FANOTIFY_DIR2_FH_OFFSET(info))
+#define FANOTIFY_FILE_FH_BUF(info) \
+ ((info)->buf + FANOTIFY_FILE_FH_OFFSET(info))
+#define FANOTIFY_NAME_BUF(info) \
+ ((info)->buf + FANOTIFY_NAME_OFFSET(info))
+#define FANOTIFY_NAME2_BUF(info) \
+ ((info)->buf + FANOTIFY_NAME2_OFFSET(info))
} __aligned(4);
static inline bool fanotify_fh_has_ext_buf(struct fanotify_fh *fh)
@@ -87,7 +117,21 @@ static inline struct fanotify_fh *fanotify_info_dir_fh(struct fanotify_info *inf
{
BUILD_BUG_ON(offsetof(struct fanotify_info, buf) % 4);
- return (struct fanotify_fh *)info->buf;
+ return (struct fanotify_fh *)FANOTIFY_DIR_FH_BUF(info);
+}
+
+static inline int fanotify_info_dir2_fh_len(struct fanotify_info *info)
+{
+ if (!info->dir2_fh_totlen ||
+ WARN_ON_ONCE(info->dir2_fh_totlen < FANOTIFY_FH_HDR_LEN))
+ return 0;
+
+ return info->dir2_fh_totlen - FANOTIFY_FH_HDR_LEN;
+}
+
+static inline struct fanotify_fh *fanotify_info_dir2_fh(struct fanotify_info *info)
+{
+ return (struct fanotify_fh *)FANOTIFY_DIR2_FH_BUF(info);
}
static inline int fanotify_info_file_fh_len(struct fanotify_info *info)
@@ -101,32 +145,90 @@ static inline int fanotify_info_file_fh_len(struct fanotify_info *info)
static inline struct fanotify_fh *fanotify_info_file_fh(struct fanotify_info *info)
{
- return (struct fanotify_fh *)(info->buf + info->dir_fh_totlen);
+ return (struct fanotify_fh *)FANOTIFY_FILE_FH_BUF(info);
}
-static inline const char *fanotify_info_name(struct fanotify_info *info)
+static inline char *fanotify_info_name(struct fanotify_info *info)
{
- return info->buf + info->dir_fh_totlen + info->file_fh_totlen;
+ if (!info->name_len)
+ return NULL;
+
+ return FANOTIFY_NAME_BUF(info);
+}
+
+static inline char *fanotify_info_name2(struct fanotify_info *info)
+{
+ if (!info->name2_len)
+ return NULL;
+
+ return FANOTIFY_NAME2_BUF(info);
}
static inline void fanotify_info_init(struct fanotify_info *info)
{
+ BUILD_BUG_ON(FANOTIFY_FH_HDR_LEN + MAX_HANDLE_SZ > U8_MAX);
+ BUILD_BUG_ON(NAME_MAX > U8_MAX);
+
info->dir_fh_totlen = 0;
+ info->dir2_fh_totlen = 0;
info->file_fh_totlen = 0;
info->name_len = 0;
+ info->name2_len = 0;
+}
+
+/* These set/copy helpers MUST be called by order */
+static inline void fanotify_info_set_dir_fh(struct fanotify_info *info,
+ unsigned int totlen)
+{
+ if (WARN_ON_ONCE(info->dir2_fh_totlen > 0) ||
+ WARN_ON_ONCE(info->file_fh_totlen > 0) ||
+ WARN_ON_ONCE(info->name_len > 0) ||
+ WARN_ON_ONCE(info->name2_len > 0))
+ return;
+
+ info->dir_fh_totlen = totlen;
}
-static inline unsigned int fanotify_info_len(struct fanotify_info *info)
+static inline void fanotify_info_set_dir2_fh(struct fanotify_info *info,
+ unsigned int totlen)
{
- return info->dir_fh_totlen + info->file_fh_totlen + info->name_len;
+ if (WARN_ON_ONCE(info->file_fh_totlen > 0) ||
+ WARN_ON_ONCE(info->name_len > 0) ||
+ WARN_ON_ONCE(info->name2_len > 0))
+ return;
+
+ info->dir2_fh_totlen = totlen;
+}
+
+static inline void fanotify_info_set_file_fh(struct fanotify_info *info,
+ unsigned int totlen)
+{
+ if (WARN_ON_ONCE(info->name_len > 0) ||
+ WARN_ON_ONCE(info->name2_len > 0))
+ return;
+
+ info->file_fh_totlen = totlen;
}
static inline void fanotify_info_copy_name(struct fanotify_info *info,
const struct qstr *name)
{
+ if (WARN_ON_ONCE(name->len > NAME_MAX) ||
+ WARN_ON_ONCE(info->name2_len > 0))
+ return;
+
info->name_len = name->len;
- strcpy(info->buf + info->dir_fh_totlen + info->file_fh_totlen,
- name->name);
+ strcpy(fanotify_info_name(info), name->name);
+}
+
+static inline void fanotify_info_copy_name2(struct fanotify_info *info,
+ const struct qstr *name)
+{
+ if (WARN_ON_ONCE(name->len > NAME_MAX))
+ return;
+
+ info->name2_len = name->len;
+ strcpy(fanotify_info_name2(info), name->name);
}
/*
@@ -271,6 +373,13 @@ static inline int fanotify_event_dir_fh_len(struct fanotify_event *event)
return info ? fanotify_info_dir_fh_len(info) : 0;
}
+static inline int fanotify_event_dir2_fh_len(struct fanotify_event *event)
+{
+ struct fanotify_info *info = fanotify_event_info(event);
+
+ return info ? fanotify_info_dir2_fh_len(info) : 0;
+}
+
static inline bool fanotify_event_has_object_fh(struct fanotify_event *event)
{
/* For error events, even zeroed fh are reported. */
@@ -284,6 +393,17 @@ static inline bool fanotify_event_has_dir_fh(struct fanotify_event *event)
return fanotify_event_dir_fh_len(event) > 0;
}
+static inline bool fanotify_event_has_dir2_fh(struct fanotify_event *event)
+{
+ return fanotify_event_dir2_fh_len(event) > 0;
+}
+
+static inline bool fanotify_event_has_any_dir_fh(struct fanotify_event *event)
+{
+ return fanotify_event_has_dir_fh(event) ||
+ fanotify_event_has_dir2_fh(event);
+}
+
struct fanotify_path_event {
struct fanotify_event fae;
struct path path;
diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
index 559bc1e9926d..2ff6bd85ba8f 100644
--- a/fs/notify/fanotify/fanotify_user.c
+++ b/fs/notify/fanotify/fanotify_user.c
@@ -59,7 +59,7 @@ static int fanotify_max_queued_events __read_mostly;
static long ft_zero = 0;
static long ft_int_max = INT_MAX;
-struct ctl_table fanotify_table[] = {
+static struct ctl_table fanotify_table[] = {
{
.procname = "max_user_groups",
.data = &init_user_ns.ucount_max[UCOUNT_FANOTIFY_GROUPS],
@@ -88,6 +88,13 @@ struct ctl_table fanotify_table[] = {
},
{ }
};
+
+static void __init fanotify_sysctls_init(void)
+{
+ register_sysctl("fs/fanotify", fanotify_table);
+}
+#else
+#define fanotify_sysctls_init() do { } while (0)
#endif /* CONFIG_SYSCTL */
/*
@@ -129,12 +136,28 @@ static int fanotify_fid_info_len(int fh_len, int name_len)
FANOTIFY_EVENT_ALIGN);
}
+/* FAN_RENAME may have one or two dir+name info records */
+static int fanotify_dir_name_info_len(struct fanotify_event *event)
+{
+ struct fanotify_info *info = fanotify_event_info(event);
+ int dir_fh_len = fanotify_event_dir_fh_len(event);
+ int dir2_fh_len = fanotify_event_dir2_fh_len(event);
+ int info_len = 0;
+
+ if (dir_fh_len)
+ info_len += fanotify_fid_info_len(dir_fh_len,
+ info->name_len);
+ if (dir2_fh_len)
+ info_len += fanotify_fid_info_len(dir2_fh_len,
+ info->name2_len);
+
+ return info_len;
+}
+
static size_t fanotify_event_len(unsigned int info_mode,
struct fanotify_event *event)
{
size_t event_len = FAN_EVENT_METADATA_LEN;
- struct fanotify_info *info;
- int dir_fh_len;
int fh_len;
int dot_len = 0;
@@ -144,11 +167,8 @@ static size_t fanotify_event_len(unsigned int info_mode,
if (fanotify_is_error_event(event->mask))
event_len += FANOTIFY_ERROR_INFO_LEN;
- info = fanotify_event_info(event);
-
- if (fanotify_event_has_dir_fh(event)) {
- dir_fh_len = fanotify_event_dir_fh_len(event);
- event_len += fanotify_fid_info_len(dir_fh_len, info->name_len);
+ if (fanotify_event_has_any_dir_fh(event)) {
+ event_len += fanotify_dir_name_info_len(event);
} else if ((info_mode & FAN_REPORT_NAME) &&
(event->mask & FAN_ONDIR)) {
/*
@@ -332,11 +352,10 @@ static int process_access_response(struct fsnotify_group *group,
static size_t copy_error_info_to_user(struct fanotify_event *event,
char __user *buf, int count)
{
- struct fanotify_event_info_error info;
+ struct fanotify_event_info_error info = { };
struct fanotify_error_event *fee = FANOTIFY_EE(event);
info.hdr.info_type = FAN_EVENT_INFO_TYPE_ERROR;
- info.hdr.pad = 0;
info.hdr.len = FANOTIFY_ERROR_INFO_LEN;
if (WARN_ON(count < info.hdr.len))
@@ -380,6 +399,8 @@ static int copy_fid_info_to_user(__kernel_fsid_t *fsid, struct fanotify_fh *fh,
return -EFAULT;
break;
case FAN_EVENT_INFO_TYPE_DFID_NAME:
+ case FAN_EVENT_INFO_TYPE_OLD_DFID_NAME:
+ case FAN_EVENT_INFO_TYPE_NEW_DFID_NAME:
if (WARN_ON_ONCE(!name || !name_len))
return -EFAULT;
break;
@@ -479,11 +500,19 @@ static int copy_info_records_to_user(struct fanotify_event *event,
unsigned int pidfd_mode = info_mode & FAN_REPORT_PIDFD;
/*
- * Event info records order is as follows: dir fid + name, child fid.
+ * Event info records order is as follows:
+ * 1. dir fid + name
+ * 2. (optional) new dir fid + new name
+ * 3. (optional) child fid
*/
if (fanotify_event_has_dir_fh(event)) {
info_type = info->name_len ? FAN_EVENT_INFO_TYPE_DFID_NAME :
FAN_EVENT_INFO_TYPE_DFID;
+
+ /* FAN_RENAME uses special info types */
+ if (event->mask & FAN_RENAME)
+ info_type = FAN_EVENT_INFO_TYPE_OLD_DFID_NAME;
+
ret = copy_fid_info_to_user(fanotify_event_fsid(event),
fanotify_info_dir_fh(info),
info_type,
@@ -497,6 +526,22 @@ static int copy_info_records_to_user(struct fanotify_event *event,
total_bytes += ret;
}
+ /* New dir fid+name may be reported in addition to old dir fid+name */
+ if (fanotify_event_has_dir2_fh(event)) {
+ info_type = FAN_EVENT_INFO_TYPE_NEW_DFID_NAME;
+ ret = copy_fid_info_to_user(fanotify_event_fsid(event),
+ fanotify_info_dir2_fh(info),
+ info_type,
+ fanotify_info_name2(info),
+ info->name2_len, buf, count);
+ if (ret < 0)
+ return ret;
+
+ buf += ret;
+ count -= ret;
+ total_bytes += ret;
+ }
+
if (fanotify_event_has_object_fh(event)) {
const char *dot = NULL;
int dot_len = 0;
@@ -656,9 +701,6 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
if (fanotify_is_perm_event(event->mask))
FANOTIFY_PERM(event)->fd = fd;
- if (f)
- fd_install(fd, f);
-
if (info_mode) {
ret = copy_info_records_to_user(event, info, info_mode, pidfd,
buf, count);
@@ -666,6 +708,9 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
goto out_close_fd;
}
+ if (f)
+ fd_install(fd, f);
+
return metadata.event_len;
out_close_fd:
@@ -1057,7 +1102,7 @@ static __u32 fanotify_mark_add_to_mask(struct fsnotify_mark *fsn_mark,
static struct fsnotify_mark *fanotify_add_new_mark(struct fsnotify_group *group,
fsnotify_connp_t *connp,
- unsigned int type,
+ unsigned int obj_type,
__kernel_fsid_t *fsid)
{
struct ucounts *ucounts = group->fanotify_data.ucounts;
@@ -1080,7 +1125,7 @@ static struct fsnotify_mark *fanotify_add_new_mark(struct fsnotify_group *group,
}
fsnotify_init_mark(mark, group);
- ret = fsnotify_add_mark_locked(mark, connp, type, 0, fsid);
+ ret = fsnotify_add_mark_locked(mark, connp, obj_type, 0, fsid);
if (ret) {
fsnotify_put_mark(mark);
goto out_dec_ucounts;
@@ -1105,7 +1150,7 @@ static int fanotify_group_init_error_pool(struct fsnotify_group *group)
}
static int fanotify_add_mark(struct fsnotify_group *group,
- fsnotify_connp_t *connp, unsigned int type,
+ fsnotify_connp_t *connp, unsigned int obj_type,
__u32 mask, unsigned int flags,
__kernel_fsid_t *fsid)
{
@@ -1116,7 +1161,7 @@ static int fanotify_add_mark(struct fsnotify_group *group,
mutex_lock(&group->mark_mutex);
fsn_mark = fsnotify_find_mark(connp, group);
if (!fsn_mark) {
- fsn_mark = fanotify_add_new_mark(group, connp, type, fsid);
+ fsn_mark = fanotify_add_new_mark(group, connp, obj_type, fsid);
if (IS_ERR(fsn_mark)) {
mutex_unlock(&group->mark_mutex);
return PTR_ERR(fsn_mark);
@@ -1275,6 +1320,15 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
if ((fid_mode & FAN_REPORT_NAME) && !(fid_mode & FAN_REPORT_DIR_FID))
return -EINVAL;
+ /*
+ * FAN_REPORT_TARGET_FID requires FAN_REPORT_NAME and FAN_REPORT_FID
+ * and is used as an indication to report both dir and child fid on all
+ * dirent events.
+ */
+ if ((fid_mode & FAN_REPORT_TARGET_FID) &&
+ (!(fid_mode & FAN_REPORT_NAME) || !(fid_mode & FAN_REPORT_FID)))
+ return -EINVAL;
+
f_flags = O_RDWR | FMODE_NONOTIFY;
if (flags & FAN_CLOEXEC)
f_flags |= O_CLOEXEC;
@@ -1536,6 +1590,14 @@ static int do_fanotify_mark(int fanotify_fd, unsigned int flags, __u64 mask,
(!fid_mode || mark_type == FAN_MARK_MOUNT))
goto fput_and_out;
+ /*
+ * FAN_RENAME uses special info type records to report the old and
+ * new parent+name. Reporting only old and new parent id is less
+ * useful and was not implemented.
+ */
+ if (mask & FAN_RENAME && !(fid_mode & FAN_REPORT_NAME))
+ goto fput_and_out;
+
if (flags & FAN_MARK_FLUSH) {
ret = 0;
if (mark_type == FAN_MARK_MOUNT)
@@ -1667,7 +1729,7 @@ static int __init fanotify_user_setup(void)
FANOTIFY_DEFAULT_MAX_USER_MARKS);
BUILD_BUG_ON(FANOTIFY_INIT_FLAGS & FANOTIFY_INTERNAL_GROUP_FLAGS);
- BUILD_BUG_ON(HWEIGHT32(FANOTIFY_INIT_FLAGS) != 11);
+ BUILD_BUG_ON(HWEIGHT32(FANOTIFY_INIT_FLAGS) != 12);
BUILD_BUG_ON(HWEIGHT32(FANOTIFY_MARK_FLAGS) != 9);
fanotify_mark_cache = KMEM_CACHE(fsnotify_mark,
@@ -1685,6 +1747,7 @@ static int __init fanotify_user_setup(void)
init_user_ns.ucount_max[UCOUNT_FANOTIFY_GROUPS] =
FANOTIFY_DEFAULT_MAX_GROUPS;
init_user_ns.ucount_max[UCOUNT_FANOTIFY_MARKS] = max_marks;
+ fanotify_sysctls_init();
return 0;
}
diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c
index 4034ca566f95..ab81a0776ece 100644
--- a/fs/notify/fsnotify.c
+++ b/fs/notify/fsnotify.c
@@ -279,6 +279,18 @@ static int fsnotify_handle_event(struct fsnotify_group *group, __u32 mask,
WARN_ON_ONCE(fsnotify_iter_vfsmount_mark(iter_info)))
return 0;
+ /*
+ * For FS_RENAME, 'dir' is old dir and 'data' is new dentry.
+ * The only ->handle_inode_event() backend that supports FS_RENAME is
+ * dnotify, where it means file was renamed within same parent.
+ */
+ if (mask & FS_RENAME) {
+ struct dentry *moved = fsnotify_data_dentry(data, data_type);
+
+ if (dir != moved->d_parent->d_inode)
+ return 0;
+ }
+
if (parent_mark) {
/*
* parent_mark indicates that the parent inode is watching
@@ -330,7 +342,7 @@ static int send_to_group(__u32 mask, const void *data, int data_type,
/* clear ignored on inode modification */
if (mask & FS_MODIFY) {
- fsnotify_foreach_obj_type(type) {
+ fsnotify_foreach_iter_type(type) {
if (!fsnotify_iter_should_report_type(iter_info, type))
continue;
mark = iter_info->marks[type];
@@ -340,7 +352,7 @@ static int send_to_group(__u32 mask, const void *data, int data_type,
}
}
- fsnotify_foreach_obj_type(type) {
+ fsnotify_foreach_iter_type(type) {
if (!fsnotify_iter_should_report_type(iter_info, type))
continue;
mark = iter_info->marks[type];
@@ -405,7 +417,7 @@ static unsigned int fsnotify_iter_select_report_types(
int type;
/* Choose max prio group among groups of all queue heads */
- fsnotify_foreach_obj_type(type) {
+ fsnotify_foreach_iter_type(type) {
mark = iter_info->marks[type];
if (mark &&
fsnotify_compare_groups(max_prio_group, mark->group) > 0)
@@ -417,7 +429,7 @@ static unsigned int fsnotify_iter_select_report_types(
/* Set the report mask for marks from same group as max prio group */
iter_info->report_mask = 0;
- fsnotify_foreach_obj_type(type) {
+ fsnotify_foreach_iter_type(type) {
mark = iter_info->marks[type];
if (mark &&
fsnotify_compare_groups(max_prio_group, mark->group) == 0)
@@ -435,7 +447,7 @@ static void fsnotify_iter_next(struct fsnotify_iter_info *iter_info)
{
int type;
- fsnotify_foreach_obj_type(type) {
+ fsnotify_foreach_iter_type(type) {
if (fsnotify_iter_should_report_type(iter_info, type))
iter_info->marks[type] =
fsnotify_next_mark(iter_info->marks[type]);
@@ -469,7 +481,9 @@ int fsnotify(__u32 mask, const void *data, int data_type, struct inode *dir,
struct super_block *sb = fsnotify_data_sb(data, data_type);
struct fsnotify_iter_info iter_info = {};
struct mount *mnt = NULL;
- struct inode *parent = NULL;
+ struct inode *inode2 = NULL;
+ struct dentry *moved;
+ int inode2_type;
int ret = 0;
__u32 test_mask, marks_mask;
@@ -479,12 +493,19 @@ int fsnotify(__u32 mask, const void *data, int data_type, struct inode *dir,
if (!inode) {
/* Dirent event - report on TYPE_INODE to dir */
inode = dir;
+ /* For FS_RENAME, inode is old_dir and inode2 is new_dir */
+ if (mask & FS_RENAME) {
+ moved = fsnotify_data_dentry(data, data_type);
+ inode2 = moved->d_parent->d_inode;
+ inode2_type = FSNOTIFY_ITER_TYPE_INODE2;
+ }
} else if (mask & FS_EVENT_ON_CHILD) {
/*
* Event on child - report on TYPE_PARENT to dir if it is
* watching children and on TYPE_INODE to child.
*/
- parent = dir;
+ inode2 = dir;
+ inode2_type = FSNOTIFY_ITER_TYPE_PARENT;
}
/*
@@ -497,7 +518,7 @@ int fsnotify(__u32 mask, const void *data, int data_type, struct inode *dir,
if (!sb->s_fsnotify_marks &&
(!mnt || !mnt->mnt_fsnotify_marks) &&
(!inode || !inode->i_fsnotify_marks) &&
- (!parent || !parent->i_fsnotify_marks))
+ (!inode2 || !inode2->i_fsnotify_marks))
return 0;
marks_mask = sb->s_fsnotify_mask;
@@ -505,8 +526,8 @@ int fsnotify(__u32 mask, const void *data, int data_type, struct inode *dir,
marks_mask |= mnt->mnt_fsnotify_mask;
if (inode)
marks_mask |= inode->i_fsnotify_mask;
- if (parent)
- marks_mask |= parent->i_fsnotify_mask;
+ if (inode2)
+ marks_mask |= inode2->i_fsnotify_mask;
/*
@@ -519,19 +540,19 @@ int fsnotify(__u32 mask, const void *data, int data_type, struct inode *dir,
iter_info.srcu_idx = srcu_read_lock(&fsnotify_mark_srcu);
- iter_info.marks[FSNOTIFY_OBJ_TYPE_SB] =
+ iter_info.marks[FSNOTIFY_ITER_TYPE_SB] =
fsnotify_first_mark(&sb->s_fsnotify_marks);
if (mnt) {
- iter_info.marks[FSNOTIFY_OBJ_TYPE_VFSMOUNT] =
+ iter_info.marks[FSNOTIFY_ITER_TYPE_VFSMOUNT] =
fsnotify_first_mark(&mnt->mnt_fsnotify_marks);
}
if (inode) {
- iter_info.marks[FSNOTIFY_OBJ_TYPE_INODE] =
+ iter_info.marks[FSNOTIFY_ITER_TYPE_INODE] =
fsnotify_first_mark(&inode->i_fsnotify_marks);
}
- if (parent) {
- iter_info.marks[FSNOTIFY_OBJ_TYPE_PARENT] =
- fsnotify_first_mark(&parent->i_fsnotify_marks);
+ if (inode2) {
+ iter_info.marks[inode2_type] =
+ fsnotify_first_mark(&inode2->i_fsnotify_marks);
}
/*
diff --git a/fs/notify/group.c b/fs/notify/group.c
index 6a297efc4788..b7d4d64f87c2 100644
--- a/fs/notify/group.c
+++ b/fs/notify/group.c
@@ -58,7 +58,7 @@ void fsnotify_destroy_group(struct fsnotify_group *group)
fsnotify_group_stop_queueing(group);
/* Clear all marks for this group and queue them for destruction */
- fsnotify_clear_marks_by_group(group, FSNOTIFY_OBJ_ALL_TYPES_MASK);
+ fsnotify_clear_marks_by_group(group, FSNOTIFY_OBJ_TYPE_ANY);
/*
* Some marks can still be pinned when waiting for response from
diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
index 29fca3284bb5..54583f62dc44 100644
--- a/fs/notify/inotify/inotify_user.c
+++ b/fs/notify/inotify/inotify_user.c
@@ -58,7 +58,7 @@ struct kmem_cache *inotify_inode_mark_cachep __read_mostly;
static long it_zero = 0;
static long it_int_max = INT_MAX;
-struct ctl_table inotify_table[] = {
+static struct ctl_table inotify_table[] = {
{
.procname = "max_user_instances",
.data = &init_user_ns.ucount_max[UCOUNT_INOTIFY_INSTANCES],
@@ -87,6 +87,14 @@ struct ctl_table inotify_table[] = {
},
{ }
};
+
+static void __init inotify_sysctls_init(void)
+{
+ register_sysctl("fs/inotify", inotify_table);
+}
+
+#else
+#define inotify_sysctls_init() do { } while (0)
#endif /* CONFIG_SYSCTL */
static inline __u32 inotify_arg_to_mask(struct inode *inode, u32 arg)
@@ -849,6 +857,7 @@ static int __init inotify_user_setup(void)
inotify_max_queued_events = 16384;
init_user_ns.ucount_max[UCOUNT_INOTIFY_INSTANCES] = 128;
init_user_ns.ucount_max[UCOUNT_INOTIFY_WATCHES] = watches_max;
+ inotify_sysctls_init();
return 0;
}
diff --git a/fs/notify/mark.c b/fs/notify/mark.c
index fa1d99101f89..9007d6affff3 100644
--- a/fs/notify/mark.c
+++ b/fs/notify/mark.c
@@ -353,7 +353,7 @@ bool fsnotify_prepare_user_wait(struct fsnotify_iter_info *iter_info)
{
int type;
- fsnotify_foreach_obj_type(type) {
+ fsnotify_foreach_iter_type(type) {
/* This can fail if mark is being removed */
if (!fsnotify_get_mark_safe(iter_info->marks[type])) {
__release(&fsnotify_mark_srcu);
@@ -382,7 +382,7 @@ void fsnotify_finish_user_wait(struct fsnotify_iter_info *iter_info)
int type;
iter_info->srcu_idx = srcu_read_lock(&fsnotify_mark_srcu);
- fsnotify_foreach_obj_type(type)
+ fsnotify_foreach_iter_type(type)
fsnotify_put_mark_wake(iter_info->marks[type]);
}
@@ -496,7 +496,7 @@ int fsnotify_compare_groups(struct fsnotify_group *a, struct fsnotify_group *b)
}
static int fsnotify_attach_connector_to_object(fsnotify_connp_t *connp,
- unsigned int type,
+ unsigned int obj_type,
__kernel_fsid_t *fsid)
{
struct inode *inode = NULL;
@@ -507,7 +507,7 @@ static int fsnotify_attach_connector_to_object(fsnotify_connp_t *connp,
return -ENOMEM;
spin_lock_init(&conn->lock);
INIT_HLIST_HEAD(&conn->list);
- conn->type = type;
+ conn->type = obj_type;
conn->obj = connp;
/* Cache fsid of filesystem containing the object */
if (fsid) {
@@ -572,7 +572,8 @@ out:
* priority, highest number first, and then by the group's location in memory.
*/
static int fsnotify_add_mark_list(struct fsnotify_mark *mark,
- fsnotify_connp_t *connp, unsigned int type,
+ fsnotify_connp_t *connp,
+ unsigned int obj_type,
int allow_dups, __kernel_fsid_t *fsid)
{
struct fsnotify_mark *lmark, *last = NULL;
@@ -580,7 +581,7 @@ static int fsnotify_add_mark_list(struct fsnotify_mark *mark,
int cmp;
int err = 0;
- if (WARN_ON(!fsnotify_valid_obj_type(type)))
+ if (WARN_ON(!fsnotify_valid_obj_type(obj_type)))
return -EINVAL;
/* Backend is expected to check for zero fsid (e.g. tmpfs) */
@@ -592,7 +593,8 @@ restart:
conn = fsnotify_grab_connector(connp);
if (!conn) {
spin_unlock(&mark->lock);
- err = fsnotify_attach_connector_to_object(connp, type, fsid);
+ err = fsnotify_attach_connector_to_object(connp, obj_type,
+ fsid);
if (err)
return err;
goto restart;
@@ -665,7 +667,7 @@ out_err:
* event types should be delivered to which group.
*/
int fsnotify_add_mark_locked(struct fsnotify_mark *mark,
- fsnotify_connp_t *connp, unsigned int type,
+ fsnotify_connp_t *connp, unsigned int obj_type,
int allow_dups, __kernel_fsid_t *fsid)
{
struct fsnotify_group *group = mark->group;
@@ -686,7 +688,7 @@ int fsnotify_add_mark_locked(struct fsnotify_mark *mark,
fsnotify_get_mark(mark); /* for g_list */
spin_unlock(&mark->lock);
- ret = fsnotify_add_mark_list(mark, connp, type, allow_dups, fsid);
+ ret = fsnotify_add_mark_list(mark, connp, obj_type, allow_dups, fsid);
if (ret)
goto err;
@@ -706,13 +708,14 @@ err:
}
int fsnotify_add_mark(struct fsnotify_mark *mark, fsnotify_connp_t *connp,
- unsigned int type, int allow_dups, __kernel_fsid_t *fsid)
+ unsigned int obj_type, int allow_dups,
+ __kernel_fsid_t *fsid)
{
int ret;
struct fsnotify_group *group = mark->group;
mutex_lock(&group->mark_mutex);
- ret = fsnotify_add_mark_locked(mark, connp, type, allow_dups, fsid);
+ ret = fsnotify_add_mark_locked(mark, connp, obj_type, allow_dups, fsid);
mutex_unlock(&group->mark_mutex);
return ret;
}
@@ -747,14 +750,14 @@ EXPORT_SYMBOL_GPL(fsnotify_find_mark);
/* Clear any marks in a group with given type mask */
void fsnotify_clear_marks_by_group(struct fsnotify_group *group,
- unsigned int type_mask)
+ unsigned int obj_type)
{
struct fsnotify_mark *lmark, *mark;
LIST_HEAD(to_free);
struct list_head *head = &to_free;
/* Skip selection step if we want to clear all marks. */
- if (type_mask == FSNOTIFY_OBJ_ALL_TYPES_MASK) {
+ if (obj_type == FSNOTIFY_OBJ_TYPE_ANY) {
head = &group->marks_list;
goto clear;
}
@@ -769,7 +772,7 @@ void fsnotify_clear_marks_by_group(struct fsnotify_group *group,
*/
mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING);
list_for_each_entry_safe(mark, lmark, &group->marks_list, g_list) {
- if ((1U << mark->connector->type) & type_mask)
+ if (mark->connector->type == obj_type)
list_move(&mark->g_list, &to_free);
}
mutex_unlock(&group->mark_mutex);
diff --git a/fs/ntfs/attrib.c b/fs/ntfs/attrib.c
index d563abc3e136..2911c04a33e0 100644
--- a/fs/ntfs/attrib.c
+++ b/fs/ntfs/attrib.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-or-later
-/**
+/*
* attrib.c - NTFS attribute operations. Part of the Linux-NTFS project.
*
* Copyright (c) 2001-2012 Anton Altaparmakov and Tuxera Inc.
diff --git a/fs/ntfs3/ntfs_fs.h b/fs/ntfs3/ntfs_fs.h
index 8aaec7e0804e..fb825059d488 100644
--- a/fs/ntfs3/ntfs_fs.h
+++ b/fs/ntfs3/ntfs_fs.h
@@ -11,7 +11,6 @@
#include <linux/blkdev.h>
#include <linux/buffer_head.h>
-#include <linux/cleancache.h>
#include <linux/fs.h>
#include <linux/highmem.h>
#include <linux/kernel.h>
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index bb247bc349e4..bf9357123bc5 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -2040,7 +2040,7 @@ static void ocfs2_complete_edge_insert(handle_t *handle,
int i, idx;
struct ocfs2_extent_list *el, *left_el, *right_el;
struct ocfs2_extent_rec *left_rec, *right_rec;
- struct buffer_head *root_bh = left_path->p_node[subtree_index].bh;
+ struct buffer_head *root_bh;
/*
* Update the counts and position values within all the
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index 68d11c295dd3..498da317580a 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -1799,20 +1799,20 @@ try_again:
*/
ret = ocfs2_grab_pages_for_write(mapping, wc, wc->w_cpos, pos, len,
cluster_of_pages, mmap_page);
- if (ret && ret != -EAGAIN) {
- mlog_errno(ret);
- goto out_quota;
- }
+ if (ret) {
+ /*
+ * ocfs2_grab_pages_for_write() returns -EAGAIN if it could not lock
+ * the target page. In this case, we exit with no error and no target
+ * page. This will trigger the caller, page_mkwrite(), to re-try
+ * the operation.
+ */
+ if (type == OCFS2_WRITE_MMAP && ret == -EAGAIN) {
+ BUG_ON(wc->w_target_page);
+ ret = 0;
+ goto out_quota;
+ }
- /*
- * ocfs2_grab_pages_for_write() returns -EAGAIN if it could not lock
- * the target page. In this case, we exit with no error and no target
- * page. This will trigger the caller, page_mkwrite(), to re-try
- * the operation.
- */
- if (ret == -EAGAIN) {
- BUG_ON(wc->w_target_page);
- ret = 0;
+ mlog_errno(ret);
goto out_quota;
}
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index f89ffcbd585f..a17be1618bf7 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -379,7 +379,7 @@ static void o2hb_nego_timeout(struct work_struct *work)
o2hb_fill_node_map(live_node_bitmap, sizeof(live_node_bitmap));
/* lowest node as master node to make negotiate decision. */
- master_node = find_next_bit(live_node_bitmap, O2NM_MAX_NODES, 0);
+ master_node = find_first_bit(live_node_bitmap, O2NM_MAX_NODES);
if (master_node == o2nm_this_node()) {
if (!test_bit(master_node, reg->hr_nego_node_bitmap)) {
diff --git a/fs/ocfs2/cluster/masklog.c b/fs/ocfs2/cluster/masklog.c
index 810d32815593..563881ddbf00 100644
--- a/fs/ocfs2/cluster/masklog.c
+++ b/fs/ocfs2/cluster/masklog.c
@@ -120,7 +120,8 @@ static struct mlog_attribute mlog_attrs[MLOG_MAX_BITS] = {
define_mask(KTHREAD),
};
-static struct attribute *mlog_attr_ptrs[MLOG_MAX_BITS] = {NULL, };
+static struct attribute *mlog_default_attrs[MLOG_MAX_BITS] = {NULL, };
+ATTRIBUTE_GROUPS(mlog_default);
static ssize_t mlog_show(struct kobject *obj, struct attribute *attr,
char *buf)
@@ -144,8 +145,8 @@ static const struct sysfs_ops mlog_attr_ops = {
};
static struct kobj_type mlog_ktype = {
- .default_attrs = mlog_attr_ptrs,
- .sysfs_ops = &mlog_attr_ops,
+ .default_groups = mlog_default_groups,
+ .sysfs_ops = &mlog_attr_ops,
};
static struct kset mlog_kset = {
@@ -157,10 +158,10 @@ int mlog_sys_init(struct kset *o2cb_kset)
int i = 0;
while (mlog_attrs[i].attr.mode) {
- mlog_attr_ptrs[i] = &mlog_attrs[i].attr;
+ mlog_default_attrs[i] = &mlog_attrs[i].attr;
i++;
}
- mlog_attr_ptrs[i] = NULL;
+ mlog_default_attrs[i] = NULL;
kobject_set_name(&mlog_kset.kobj, "logmask");
mlog_kset.kobj.kset = o2cb_kset;
diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c
index bd8d534f11cb..f2cc1ff29e6d 100644
--- a/fs/ocfs2/dir.c
+++ b/fs/ocfs2/dir.c
@@ -3343,7 +3343,7 @@ static int ocfs2_find_dir_space_id(struct inode *dir, struct buffer_head *di_bh,
struct ocfs2_dir_entry *de, *last_de = NULL;
char *de_buf, *limit;
unsigned long offset = 0;
- unsigned int rec_len, new_rec_len, free_space = dir->i_sb->s_blocksize;
+ unsigned int rec_len, new_rec_len, free_space;
/*
* This calculates how many free bytes we'd have in block zero, should
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c
index 9f90fc9551e1..c4eccd499db8 100644
--- a/fs/ocfs2/dlm/dlmdomain.c
+++ b/fs/ocfs2/dlm/dlmdomain.c
@@ -1045,7 +1045,7 @@ static int dlm_send_regions(struct dlm_ctxt *dlm, unsigned long *node_map)
int status, ret = 0, i;
char *p;
- if (find_next_bit(node_map, O2NM_MAX_NODES, 0) >= O2NM_MAX_NODES)
+ if (find_first_bit(node_map, O2NM_MAX_NODES) >= O2NM_MAX_NODES)
goto bail;
qr = kzalloc(sizeof(struct dlm_query_region), GFP_KERNEL);
@@ -1217,7 +1217,7 @@ static int dlm_send_nodeinfo(struct dlm_ctxt *dlm, unsigned long *node_map)
struct o2nm_node *node;
int ret = 0, status, count, i;
- if (find_next_bit(node_map, O2NM_MAX_NODES, 0) >= O2NM_MAX_NODES)
+ if (find_first_bit(node_map, O2NM_MAX_NODES) >= O2NM_MAX_NODES)
goto bail;
qn = kzalloc(sizeof(struct dlm_query_nodeinfo), GFP_KERNEL);
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index 9b88219febb5..227da5b1b6ab 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -861,7 +861,7 @@ lookup:
* to see if there are any nodes that still need to be
* considered. these will not appear in the mle nodemap
* but they might own this lockres. wait on them. */
- bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0);
+ bit = find_first_bit(dlm->recovery_map, O2NM_MAX_NODES);
if (bit < O2NM_MAX_NODES) {
mlog(0, "%s: res %.*s, At least one node (%d) "
"to recover before lock mastery can begin\n",
@@ -912,7 +912,7 @@ redo_request:
dlm_wait_for_recovery(dlm);
spin_lock(&dlm->spinlock);
- bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0);
+ bit = find_first_bit(dlm->recovery_map, O2NM_MAX_NODES);
if (bit < O2NM_MAX_NODES) {
mlog(0, "%s: res %.*s, At least one node (%d) "
"to recover before lock mastery can begin\n",
@@ -1079,7 +1079,7 @@ recheck:
sleep = 1;
/* have all nodes responded? */
if (voting_done && !*blocked) {
- bit = find_next_bit(mle->maybe_map, O2NM_MAX_NODES, 0);
+ bit = find_first_bit(mle->maybe_map, O2NM_MAX_NODES);
if (dlm->node_num <= bit) {
/* my node number is lowest.
* now tell other nodes that I am
@@ -1234,8 +1234,8 @@ static int dlm_restart_lock_mastery(struct dlm_ctxt *dlm,
} else {
mlog(ML_ERROR, "node down! %d\n", node);
if (blocked) {
- int lowest = find_next_bit(mle->maybe_map,
- O2NM_MAX_NODES, 0);
+ int lowest = find_first_bit(mle->maybe_map,
+ O2NM_MAX_NODES);
/* act like it was never there */
clear_bit(node, mle->maybe_map);
@@ -1795,7 +1795,7 @@ int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data,
"MLE for it! (%.*s)\n", assert->node_idx,
namelen, name);
} else {
- int bit = find_next_bit (mle->maybe_map, O2NM_MAX_NODES, 0);
+ int bit = find_first_bit(mle->maybe_map, O2NM_MAX_NODES);
if (bit >= O2NM_MAX_NODES) {
/* not necessarily an error, though less likely.
* could be master just re-asserting. */
@@ -2521,7 +2521,7 @@ static int dlm_is_lockres_migratable(struct dlm_ctxt *dlm,
}
if (!nonlocal) {
- node_ref = find_next_bit(res->refmap, O2NM_MAX_NODES, 0);
+ node_ref = find_first_bit(res->refmap, O2NM_MAX_NODES);
if (node_ref >= O2NM_MAX_NODES)
return 0;
}
@@ -3303,7 +3303,7 @@ static void dlm_clean_block_mle(struct dlm_ctxt *dlm,
BUG_ON(mle->type != DLM_MLE_BLOCK);
spin_lock(&mle->spinlock);
- bit = find_next_bit(mle->maybe_map, O2NM_MAX_NODES, 0);
+ bit = find_first_bit(mle->maybe_map, O2NM_MAX_NODES);
if (bit != dead_node) {
mlog(0, "mle found, but dead node %u would not have been "
"master\n", dead_node);
@@ -3542,7 +3542,7 @@ void dlm_force_free_mles(struct dlm_ctxt *dlm)
spin_lock(&dlm->master_lock);
BUG_ON(dlm->dlm_state != DLM_CTXT_LEAVING);
- BUG_ON((find_next_bit(dlm->domain_map, O2NM_MAX_NODES, 0) < O2NM_MAX_NODES));
+ BUG_ON((find_first_bit(dlm->domain_map, O2NM_MAX_NODES) < O2NM_MAX_NODES));
for (i = 0; i < DLM_HASH_BUCKETS; i++) {
bucket = dlm_master_hash(dlm, i);
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
index 5cd5f7511dac..52ad342fec3e 100644
--- a/fs/ocfs2/dlm/dlmrecovery.c
+++ b/fs/ocfs2/dlm/dlmrecovery.c
@@ -451,7 +451,7 @@ static int dlm_do_recovery(struct dlm_ctxt *dlm)
if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
int bit;
- bit = find_next_bit (dlm->recovery_map, O2NM_MAX_NODES, 0);
+ bit = find_first_bit(dlm->recovery_map, O2NM_MAX_NODES);
if (bit >= O2NM_MAX_NODES || bit < 0)
dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM);
else
diff --git a/fs/ocfs2/dlm/dlmthread.c b/fs/ocfs2/dlm/dlmthread.c
index c350bd4df770..eedf07ca23ca 100644
--- a/fs/ocfs2/dlm/dlmthread.c
+++ b/fs/ocfs2/dlm/dlmthread.c
@@ -92,7 +92,7 @@ int __dlm_lockres_unused(struct dlm_lock_resource *res)
return 0;
/* Another node has this resource with this node as the master */
- bit = find_next_bit(res->refmap, O2NM_MAX_NODES, 0);
+ bit = find_first_bit(res->refmap, O2NM_MAX_NODES);
if (bit < O2NM_MAX_NODES)
return 0;
diff --git a/fs/ocfs2/filecheck.c b/fs/ocfs2/filecheck.c
index de56e6231af8..1ad7106741f8 100644
--- a/fs/ocfs2/filecheck.c
+++ b/fs/ocfs2/filecheck.c
@@ -94,6 +94,7 @@ static struct attribute *ocfs2_filecheck_attrs[] = {
&ocfs2_filecheck_attr_set.attr,
NULL
};
+ATTRIBUTE_GROUPS(ocfs2_filecheck);
static void ocfs2_filecheck_release(struct kobject *kobj)
{
@@ -138,7 +139,7 @@ static const struct sysfs_ops ocfs2_filecheck_ops = {
};
static struct kobj_type ocfs2_ktype_filecheck = {
- .default_attrs = ocfs2_filecheck_attrs,
+ .default_groups = ocfs2_filecheck_groups,
.sysfs_ops = &ocfs2_filecheck_ops,
.release = ocfs2_filecheck_release,
};
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
index dbf9b9e97d74..1887a2708709 100644
--- a/fs/ocfs2/journal.c
+++ b/fs/ocfs2/journal.c
@@ -1669,8 +1669,7 @@ static int ocfs2_replay_journal(struct ocfs2_super *osb,
status = jbd2_journal_load(journal);
if (status < 0) {
mlog_errno(status);
- if (!igrab(inode))
- BUG();
+ BUG_ON(!igrab(inode));
jbd2_journal_destroy(journal);
goto done;
}
@@ -1699,8 +1698,7 @@ static int ocfs2_replay_journal(struct ocfs2_super *osb,
if (status < 0)
mlog_errno(status);
- if (!igrab(inode))
- BUG();
+ BUG_ON(!igrab(inode));
jbd2_journal_destroy(journal);
diff --git a/fs/ocfs2/stackglue.c b/fs/ocfs2/stackglue.c
index 16f1bfc407f2..dd77b7aaabf5 100644
--- a/fs/ocfs2/stackglue.c
+++ b/fs/ocfs2/stackglue.c
@@ -661,42 +661,8 @@ static struct ctl_table ocfs2_nm_table[] = {
{ }
};
-static struct ctl_table ocfs2_mod_table[] = {
- {
- .procname = "nm",
- .data = NULL,
- .maxlen = 0,
- .mode = 0555,
- .child = ocfs2_nm_table
- },
- { }
-};
-
-static struct ctl_table ocfs2_kern_table[] = {
- {
- .procname = "ocfs2",
- .data = NULL,
- .maxlen = 0,
- .mode = 0555,
- .child = ocfs2_mod_table
- },
- { }
-};
-
-static struct ctl_table ocfs2_root_table[] = {
- {
- .procname = "fs",
- .data = NULL,
- .maxlen = 0,
- .mode = 0555,
- .child = ocfs2_kern_table
- },
- { }
-};
-
static struct ctl_table_header *ocfs2_table_header;
-
/*
* Initialization
*/
@@ -705,7 +671,7 @@ static int __init ocfs2_stack_glue_init(void)
{
strcpy(cluster_stack_name, OCFS2_STACK_PLUGIN_O2CB);
- ocfs2_table_header = register_sysctl_table(ocfs2_root_table);
+ ocfs2_table_header = register_sysctl("fs/ocfs2/nm", ocfs2_nm_table);
if (!ocfs2_table_header) {
printk(KERN_ERR
"ocfs2 stack glue: unable to register sysctl\n");
diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
index 481017e1dac5..166c8918c825 100644
--- a/fs/ocfs2/suballoc.c
+++ b/fs/ocfs2/suballoc.c
@@ -1251,26 +1251,23 @@ static int ocfs2_test_bg_bit_allocatable(struct buffer_head *bg_bh,
{
struct ocfs2_group_desc *bg = (struct ocfs2_group_desc *) bg_bh->b_data;
struct journal_head *jh;
- int ret = 1;
+ int ret;
if (ocfs2_test_bit(nr, (unsigned long *)bg->bg_bitmap))
return 0;
- if (!buffer_jbd(bg_bh))
+ jh = jbd2_journal_grab_journal_head(bg_bh);
+ if (!jh)
return 1;
- jbd_lock_bh_journal_head(bg_bh);
- if (buffer_jbd(bg_bh)) {
- jh = bh2jh(bg_bh);
- spin_lock(&jh->b_state_lock);
- bg = (struct ocfs2_group_desc *) jh->b_committed_data;
- if (bg)
- ret = !ocfs2_test_bit(nr, (unsigned long *)bg->bg_bitmap);
- else
- ret = 1;
- spin_unlock(&jh->b_state_lock);
- }
- jbd_unlock_bh_journal_head(bg_bh);
+ spin_lock(&jh->b_state_lock);
+ bg = (struct ocfs2_group_desc *) jh->b_committed_data;
+ if (bg)
+ ret = !ocfs2_test_bit(nr, (unsigned long *)bg->bg_bitmap);
+ else
+ ret = 1;
+ spin_unlock(&jh->b_state_lock);
+ jbd2_journal_put_journal_head(jh);
return ret;
}
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 1286b88b6fa1..2772dec9dcea 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -25,7 +25,6 @@
#include <linux/mount.h>
#include <linux/seq_file.h>
#include <linux/quotaops.h>
-#include <linux/cleancache.h>
#include <linux/signal.h>
#define CREATE_TRACE_POINTS
@@ -2283,7 +2282,6 @@ static int ocfs2_initialize_super(struct super_block *sb,
mlog_errno(status);
goto bail;
}
- cleancache_init_shared_fs(sb);
osb->ocfs2_wq = alloc_ordered_workqueue("ocfs2_wq", WQ_MEM_RECLAIM);
if (!osb->ocfs2_wq) {
diff --git a/fs/orangefs/orangefs-bufmap.c b/fs/orangefs/orangefs-bufmap.c
index 538e839590ef..b501dc07f922 100644
--- a/fs/orangefs/orangefs-bufmap.c
+++ b/fs/orangefs/orangefs-bufmap.c
@@ -176,7 +176,7 @@ orangefs_bufmap_free(struct orangefs_bufmap *bufmap)
{
kfree(bufmap->page_array);
kfree(bufmap->desc_array);
- kfree(bufmap->buffer_index_array);
+ bitmap_free(bufmap->buffer_index_array);
kfree(bufmap);
}
@@ -226,8 +226,7 @@ orangefs_bufmap_alloc(struct ORANGEFS_dev_map_desc *user_desc)
bufmap->desc_size = user_desc->size;
bufmap->desc_shift = ilog2(bufmap->desc_size);
- bufmap->buffer_index_array =
- kzalloc(DIV_ROUND_UP(bufmap->desc_count, BITS_PER_LONG), GFP_KERNEL);
+ bufmap->buffer_index_array = bitmap_zalloc(bufmap->desc_count, GFP_KERNEL);
if (!bufmap->buffer_index_array)
goto out_free_bufmap;
@@ -250,7 +249,7 @@ orangefs_bufmap_alloc(struct ORANGEFS_dev_map_desc *user_desc)
out_free_desc_array:
kfree(bufmap->desc_array);
out_free_index_array:
- kfree(bufmap->buffer_index_array);
+ bitmap_free(bufmap->buffer_index_array);
out_free_bufmap:
kfree(bufmap);
out:
diff --git a/fs/orangefs/orangefs-sysfs.c b/fs/orangefs/orangefs-sysfs.c
index 3627ea946402..de80b62553bb 100644
--- a/fs/orangefs/orangefs-sysfs.c
+++ b/fs/orangefs/orangefs-sysfs.c
@@ -894,10 +894,11 @@ static struct attribute *orangefs_default_attrs[] = {
&perf_time_interval_secs_attribute.attr,
NULL,
};
+ATTRIBUTE_GROUPS(orangefs_default);
static struct kobj_type orangefs_ktype = {
.sysfs_ops = &orangefs_sysfs_ops,
- .default_attrs = orangefs_default_attrs,
+ .default_groups = orangefs_default_groups,
};
static struct orangefs_attribute acache_hard_limit_attribute =
@@ -931,10 +932,11 @@ static struct attribute *acache_orangefs_default_attrs[] = {
&acache_timeout_msecs_attribute.attr,
NULL,
};
+ATTRIBUTE_GROUPS(acache_orangefs_default);
static struct kobj_type acache_orangefs_ktype = {
.sysfs_ops = &orangefs_sysfs_ops,
- .default_attrs = acache_orangefs_default_attrs,
+ .default_groups = acache_orangefs_default_groups,
};
static struct orangefs_attribute capcache_hard_limit_attribute =
@@ -968,10 +970,11 @@ static struct attribute *capcache_orangefs_default_attrs[] = {
&capcache_timeout_secs_attribute.attr,
NULL,
};
+ATTRIBUTE_GROUPS(capcache_orangefs_default);
static struct kobj_type capcache_orangefs_ktype = {
.sysfs_ops = &orangefs_sysfs_ops,
- .default_attrs = capcache_orangefs_default_attrs,
+ .default_groups = capcache_orangefs_default_groups,
};
static struct orangefs_attribute ccache_hard_limit_attribute =
@@ -1005,10 +1008,11 @@ static struct attribute *ccache_orangefs_default_attrs[] = {
&ccache_timeout_secs_attribute.attr,
NULL,
};
+ATTRIBUTE_GROUPS(ccache_orangefs_default);
static struct kobj_type ccache_orangefs_ktype = {
.sysfs_ops = &orangefs_sysfs_ops,
- .default_attrs = ccache_orangefs_default_attrs,
+ .default_groups = ccache_orangefs_default_groups,
};
static struct orangefs_attribute ncache_hard_limit_attribute =
@@ -1042,10 +1046,11 @@ static struct attribute *ncache_orangefs_default_attrs[] = {
&ncache_timeout_msecs_attribute.attr,
NULL,
};
+ATTRIBUTE_GROUPS(ncache_orangefs_default);
static struct kobj_type ncache_orangefs_ktype = {
.sysfs_ops = &orangefs_sysfs_ops,
- .default_attrs = ncache_orangefs_default_attrs,
+ .default_groups = ncache_orangefs_default_groups,
};
static struct orangefs_attribute pc_acache_attribute =
@@ -1072,10 +1077,11 @@ static struct attribute *pc_orangefs_default_attrs[] = {
&pc_ncache_attribute.attr,
NULL,
};
+ATTRIBUTE_GROUPS(pc_orangefs_default);
static struct kobj_type pc_orangefs_ktype = {
.sysfs_ops = &orangefs_sysfs_ops,
- .default_attrs = pc_orangefs_default_attrs,
+ .default_groups = pc_orangefs_default_groups,
};
static struct orangefs_attribute stats_reads_attribute =
@@ -1095,10 +1101,11 @@ static struct attribute *stats_orangefs_default_attrs[] = {
&stats_writes_attribute.attr,
NULL,
};
+ATTRIBUTE_GROUPS(stats_orangefs_default);
static struct kobj_type stats_orangefs_ktype = {
.sysfs_ops = &orangefs_sysfs_ops,
- .default_attrs = stats_orangefs_default_attrs,
+ .default_groups = stats_orangefs_default_groups,
};
static struct kobject *orangefs_obj;
diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
index b193d08a3dc3..e040970408d4 100644
--- a/fs/overlayfs/copy_up.c
+++ b/fs/overlayfs/copy_up.c
@@ -145,7 +145,7 @@ static int ovl_copy_fileattr(struct inode *inode, struct path *old,
if (err == -ENOTTY || err == -EINVAL)
return 0;
pr_warn("failed to retrieve lower fileattr (%pd2, err=%i)\n",
- old, err);
+ old->dentry, err);
return err;
}
@@ -157,7 +157,9 @@ static int ovl_copy_fileattr(struct inode *inode, struct path *old,
*/
if (oldfa.flags & OVL_PROT_FS_FLAGS_MASK) {
err = ovl_set_protattr(inode, new->dentry, &oldfa);
- if (err)
+ if (err == -EPERM)
+ pr_warn_once("copying fileattr: no xattr on upper\n");
+ else if (err)
return err;
}
@@ -167,8 +169,16 @@ static int ovl_copy_fileattr(struct inode *inode, struct path *old,
err = ovl_real_fileattr_get(new, &newfa);
if (err) {
+ /*
+ * Returning an error if upper doesn't support fileattr will
+ * result in a regression, so revert to the old behavior.
+ */
+ if (err == -ENOTTY || err == -EINVAL) {
+ pr_warn_once("copying fileattr: no support on upper\n");
+ return 0;
+ }
pr_warn("failed to retrieve upper fileattr (%pd2, err=%i)\n",
- new, err);
+ new->dentry, err);
return err;
}
diff --git a/fs/pipe.c b/fs/pipe.c
index 6d4342bad9f1..cc28623a67b6 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -25,6 +25,7 @@
#include <linux/fcntl.h>
#include <linux/memcontrol.h>
#include <linux/watch_queue.h>
+#include <linux/sysctl.h>
#include <linux/uaccess.h>
#include <asm/ioctls.h>
@@ -50,13 +51,13 @@
* The max size that a non-root user is allowed to grow the pipe. Can
* be set by root in /proc/sys/fs/pipe-max-size
*/
-unsigned int pipe_max_size = 1048576;
+static unsigned int pipe_max_size = 1048576;
/* Maximum allocatable pages per user. Hard limit is unset by default, soft
* matches default values.
*/
-unsigned long pipe_user_pages_hard;
-unsigned long pipe_user_pages_soft = PIPE_DEF_BUFFERS * INR_OPEN_CUR;
+static unsigned long pipe_user_pages_hard;
+static unsigned long pipe_user_pages_soft = PIPE_DEF_BUFFERS * INR_OPEN_CUR;
/*
* We use head and tail indices that aren't masked off, except at the point of
@@ -1428,6 +1429,60 @@ static struct file_system_type pipe_fs_type = {
.kill_sb = kill_anon_super,
};
+#ifdef CONFIG_SYSCTL
+static int do_proc_dopipe_max_size_conv(unsigned long *lvalp,
+ unsigned int *valp,
+ int write, void *data)
+{
+ if (write) {
+ unsigned int val;
+
+ val = round_pipe_size(*lvalp);
+ if (val == 0)
+ return -EINVAL;
+
+ *valp = val;
+ } else {
+ unsigned int val = *valp;
+ *lvalp = (unsigned long) val;
+ }
+
+ return 0;
+}
+
+static int proc_dopipe_max_size(struct ctl_table *table, int write,
+ void *buffer, size_t *lenp, loff_t *ppos)
+{
+ return do_proc_douintvec(table, write, buffer, lenp, ppos,
+ do_proc_dopipe_max_size_conv, NULL);
+}
+
+static struct ctl_table fs_pipe_sysctls[] = {
+ {
+ .procname = "pipe-max-size",
+ .data = &pipe_max_size,
+ .maxlen = sizeof(pipe_max_size),
+ .mode = 0644,
+ .proc_handler = proc_dopipe_max_size,
+ },
+ {
+ .procname = "pipe-user-pages-hard",
+ .data = &pipe_user_pages_hard,
+ .maxlen = sizeof(pipe_user_pages_hard),
+ .mode = 0644,
+ .proc_handler = proc_doulongvec_minmax,
+ },
+ {
+ .procname = "pipe-user-pages-soft",
+ .data = &pipe_user_pages_soft,
+ .maxlen = sizeof(pipe_user_pages_soft),
+ .mode = 0644,
+ .proc_handler = proc_doulongvec_minmax,
+ },
+ { }
+};
+#endif
+
static int __init init_pipe_fs(void)
{
int err = register_filesystem(&pipe_fs_type);
@@ -1439,6 +1494,9 @@ static int __init init_pipe_fs(void)
unregister_filesystem(&pipe_fs_type);
}
}
+#ifdef CONFIG_SYSCTL
+ register_sysctl_init("fs", fs_pipe_sysctls);
+#endif
return err;
}
diff --git a/fs/proc/array.c b/fs/proc/array.c
index ff869a66b34e..fd8b0c12b2cb 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -92,6 +92,7 @@
#include <linux/string_helpers.h>
#include <linux/user_namespace.h>
#include <linux/fs_struct.h>
+#include <linux/kthread.h>
#include <asm/processor.h>
#include "internal.h"
@@ -102,6 +103,8 @@ void proc_task_name(struct seq_file *m, struct task_struct *p, bool escape)
if (p->flags & PF_WQ_WORKER)
wq_worker_comm(tcomm, sizeof(tcomm), p);
+ else if (p->flags & PF_KTHREAD)
+ get_kthread_comm(tcomm, sizeof(tcomm), p);
else
__get_task_comm(tcomm, sizeof(tcomm), p);
@@ -468,6 +471,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
u64 cgtime, gtime;
unsigned long rsslim = 0;
unsigned long flags;
+ int exit_code = task->exit_code;
state = *get_task_state(task);
vsize = eip = esp = 0;
@@ -531,6 +535,9 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
maj_flt += sig->maj_flt;
thread_group_cputime_adjusted(task, &utime, &stime);
gtime += sig->gtime;
+
+ if (sig->flags & (SIGNAL_GROUP_EXIT | SIGNAL_STOP_STOPPED))
+ exit_code = sig->group_exit_code;
}
sid = task_session_nr_ns(task, ns);
@@ -630,7 +637,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
seq_puts(m, " 0 0 0 0 0 0 0");
if (permitted)
- seq_put_decimal_ll(m, " ", task->exit_code);
+ seq_put_decimal_ll(m, " ", exit_code);
else
seq_puts(m, " 0");
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 13eda8de2998..d654ce7150fd 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -670,10 +670,10 @@ static int proc_pid_syscall(struct seq_file *m, struct pid_namespace *ns,
/************************************************************************/
/* permission checks */
-static int proc_fd_access_allowed(struct inode *inode)
+static bool proc_fd_access_allowed(struct inode *inode)
{
struct task_struct *task;
- int allowed = 0;
+ bool allowed = false;
/* Allow access to a task's file descriptors if it is us or we
* may use ptrace attach to the process and find out that
* information.
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index 5b78739e60e4..f2132407e133 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -791,12 +791,6 @@ void proc_remove(struct proc_dir_entry *de)
}
EXPORT_SYMBOL(proc_remove);
-void *PDE_DATA(const struct inode *inode)
-{
- return __PDE_DATA(inode);
-}
-EXPORT_SYMBOL(PDE_DATA);
-
/*
* Pull a user buffer into memory and pass it to the file's write handler if
* one is supplied. The ->write() method is permitted to modify the
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index 599eb724ff2d..f84355c5a36d 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -650,6 +650,7 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
return NULL;
}
+ inode->i_private = de->data;
inode->i_ino = de->low_ino;
inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
PROC_I(inode)->pde = de;
diff --git a/fs/proc/internal.h b/fs/proc/internal.h
index 03415f3fb3a8..06a80f78433d 100644
--- a/fs/proc/internal.h
+++ b/fs/proc/internal.h
@@ -115,11 +115,6 @@ static inline struct proc_dir_entry *PDE(const struct inode *inode)
return PROC_I(inode)->pde;
}
-static inline void *__PDE_DATA(const struct inode *inode)
-{
- return PDE(inode)->data;
-}
-
static inline struct pid *proc_pid(const struct inode *inode)
{
return PROC_I(inode)->pid;
diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
index 39b823ab2564..e1cfeda397f3 100644
--- a/fs/proc/proc_net.c
+++ b/fs/proc/proc_net.c
@@ -138,7 +138,7 @@ EXPORT_SYMBOL_GPL(proc_create_net_data);
* @parent: The parent directory in which to create.
* @ops: The seq_file ops with which to read the file.
* @write: The write method with which to 'modify' the file.
- * @data: Data for retrieval by PDE_DATA().
+ * @data: Data for retrieval by pde_data().
*
* Create a network namespaced proc file in the @parent directory with the
* specified @name and @mode that allows reading of a file that displays a
@@ -153,7 +153,7 @@ EXPORT_SYMBOL_GPL(proc_create_net_data);
* modified by the @write function. @write should return 0 on success.
*
* The @data value is accessible from the @show and @write functions by calling
- * PDE_DATA() on the file inode. The network namespace must be accessed by
+ * pde_data() on the file inode. The network namespace must be accessed by
* calling seq_file_net() on the seq_file struct.
*/
struct proc_dir_entry *proc_create_net_data_write(const char *name, umode_t mode,
@@ -230,7 +230,7 @@ EXPORT_SYMBOL_GPL(proc_create_net_single);
* @parent: The parent directory in which to create.
* @show: The seqfile show method with which to read the file.
* @write: The write method with which to 'modify' the file.
- * @data: Data for retrieval by PDE_DATA().
+ * @data: Data for retrieval by pde_data().
*
* Create a network-namespaced proc file in the @parent directory with the
* specified @name and @mode that allows reading of a file that displays a
@@ -245,7 +245,7 @@ EXPORT_SYMBOL_GPL(proc_create_net_single);
* modified by the @write function. @write should return 0 on success.
*
* The @data value is accessible from the @show and @write functions by calling
- * PDE_DATA() on the file inode. The network namespace must be accessed by
+ * pde_data() on the file inode. The network namespace must be accessed by
* calling seq_file_single_net() on the seq_file struct.
*/
struct proc_dir_entry *proc_create_net_single_write(const char *name, umode_t mode,
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
index 5d66faecd4ef..7d9cfc730bd4 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -16,6 +16,7 @@
#include <linux/module.h>
#include <linux/bpf-cgroup.h>
#include <linux/mount.h>
+#include <linux/kmemleak.h>
#include "internal.h"
static const struct dentry_operations proc_sys_dentry_operations;
@@ -25,15 +26,32 @@ static const struct file_operations proc_sys_dir_file_operations;
static const struct inode_operations proc_sys_dir_operations;
/* shared constants to be used in various sysctls */
-const int sysctl_vals[] = { 0, 1, INT_MAX };
+const int sysctl_vals[] = { -1, 0, 1, 2, 4, 100, 200, 1000, 3000, INT_MAX, 65535 };
EXPORT_SYMBOL(sysctl_vals);
+const unsigned long sysctl_long_vals[] = { 0, 1, LONG_MAX };
+EXPORT_SYMBOL_GPL(sysctl_long_vals);
+
/* Support for permanently empty directories */
struct ctl_table sysctl_mount_point[] = {
{ }
};
+/**
+ * register_sysctl_mount_point() - registers a sysctl mount point
+ * @path: path for the mount point
+ *
+ * Used to create a permanently empty directory to serve as mount point.
+ * There are some subtle but important permission checks this allows in the
+ * case of unprivileged mounts.
+ */
+struct ctl_table_header *register_sysctl_mount_point(const char *path)
+{
+ return register_sysctl(path, sysctl_mount_point);
+}
+EXPORT_SYMBOL(register_sysctl_mount_point);
+
static bool is_empty_dir(struct ctl_table_header *head)
{
return head->ctl_table[0].child == sysctl_mount_point;
@@ -163,7 +181,7 @@ static int insert_entry(struct ctl_table_header *head, struct ctl_table *entry)
else {
pr_err("sysctl duplicate entry: ");
sysctl_print_dir(head->parent);
- pr_cont("/%s\n", entry->procname);
+ pr_cont("%s\n", entry->procname);
return -EEXIST;
}
}
@@ -1020,8 +1038,8 @@ failed:
if (IS_ERR(subdir)) {
pr_err("sysctl could not get directory: ");
sysctl_print_dir(dir);
- pr_cont("/%*.*s %ld\n",
- namelen, namelen, name, PTR_ERR(subdir));
+ pr_cont("%*.*s %ld\n", namelen, namelen, name,
+ PTR_ERR(subdir));
}
drop_sysctl_table(&dir->header);
if (new)
@@ -1053,7 +1071,6 @@ static int sysctl_follow_link(struct ctl_table_header **phead,
struct ctl_dir *dir;
int ret;
- ret = 0;
spin_lock(&sysctl_lock);
root = (*pentry)->data;
set = lookup_header_set(root);
@@ -1384,6 +1401,38 @@ struct ctl_table_header *register_sysctl(const char *path, struct ctl_table *tab
}
EXPORT_SYMBOL(register_sysctl);
+/**
+ * __register_sysctl_init() - register sysctl table to path
+ * @path: path name for sysctl base
+ * @table: This is the sysctl table that needs to be registered to the path
+ * @table_name: The name of sysctl table, only used for log printing when
+ * registration fails
+ *
+ * The sysctl interface is used by userspace to query or modify at runtime
+ * a predefined value set on a variable. These variables however have default
+ * values pre-set. Code which depends on these variables will always work even
+ * if register_sysctl() fails. If register_sysctl() fails you'd just loose the
+ * ability to query or modify the sysctls dynamically at run time. Chances of
+ * register_sysctl() failing on init are extremely low, and so for both reasons
+ * this function does not return any error as it is used by initialization code.
+ *
+ * Context: Can only be called after your respective sysctl base path has been
+ * registered. So for instance, most base directories are registered early on
+ * init before init levels are processed through proc_sys_init() and
+ * sysctl_init_bases().
+ */
+void __init __register_sysctl_init(const char *path, struct ctl_table *table,
+ const char *table_name)
+{
+ struct ctl_table_header *hdr = register_sysctl(path, table);
+
+ if (unlikely(!hdr)) {
+ pr_err("failed when register_sysctl %s to %s\n", table_name, path);
+ return;
+ }
+ kmemleak_not_leak(hdr);
+}
+
static char *append_path(const char *path, char *pos, const char *name)
{
int namelen;
@@ -1597,6 +1646,15 @@ struct ctl_table_header *register_sysctl_table(struct ctl_table *table)
}
EXPORT_SYMBOL(register_sysctl_table);
+int __register_sysctl_base(struct ctl_table *base_table)
+{
+ struct ctl_table_header *hdr;
+
+ hdr = register_sysctl_table(base_table);
+ kmemleak_not_leak(hdr);
+ return 0;
+}
+
static void put_links(struct ctl_table_header *header)
{
struct ctl_table_set *root_set = &sysctl_table_root.default_set;
@@ -1626,7 +1684,7 @@ static void put_links(struct ctl_table_header *header)
else {
pr_err("sysctl link missing during unregister: ");
sysctl_print_dir(parent);
- pr_cont("/%s\n", name);
+ pr_cont("%s\n", name);
}
}
}
@@ -1710,7 +1768,7 @@ int __init proc_sys_init(void)
proc_sys_root->proc_dir_ops = &proc_sys_dir_file_operations;
proc_sys_root->nlink = 0;
- return sysctl_init();
+ return sysctl_init_bases();
}
struct sysctl_alias {
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index ad667dbc96f5..6e97ed775074 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/pagewalk.h>
#include <linux/vmacache.h>
+#include <linux/mm_inline.h>
#include <linux/hugetlb.h>
#include <linux/huge_mm.h>
#include <linux/mount.h>
@@ -308,6 +309,8 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
name = arch_vma_name(vma);
if (!name) {
+ const char *anon_name;
+
if (!mm) {
name = "[vdso]";
goto done;
@@ -319,8 +322,16 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
goto done;
}
- if (is_stack(vma))
+ if (is_stack(vma)) {
name = "[stack]";
+ goto done;
+ }
+
+ anon_name = vma_anon_name(vma);
+ if (anon_name) {
+ seq_pad(m, ' ');
+ seq_printf(m, "[anon:%s]", anon_name);
+ }
}
done:
@@ -429,7 +440,8 @@ static void smaps_page_accumulate(struct mem_size_stats *mss,
}
static void smaps_account(struct mem_size_stats *mss, struct page *page,
- bool compound, bool young, bool dirty, bool locked)
+ bool compound, bool young, bool dirty, bool locked,
+ bool migration)
{
int i, nr = compound ? compound_nr(page) : 1;
unsigned long size = nr * PAGE_SIZE;
@@ -456,8 +468,15 @@ static void smaps_account(struct mem_size_stats *mss, struct page *page,
* page_count(page) == 1 guarantees the page is mapped exactly once.
* If any subpage of the compound page mapped with PTE it would elevate
* page_count().
+ *
+ * The page_mapcount() is called to get a snapshot of the mapcount.
+ * Without holding the page lock this snapshot can be slightly wrong as
+ * we cannot always read the mapcount atomically. It is not safe to
+ * call page_mapcount() even with PTL held if the page is not mapped,
+ * especially for migration entries. Treat regular migration entries
+ * as mapcount == 1.
*/
- if (page_count(page) == 1) {
+ if ((page_count(page) == 1) || migration) {
smaps_page_accumulate(mss, page, size, size << PSS_SHIFT, dirty,
locked, true);
return;
@@ -506,6 +525,7 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
struct vm_area_struct *vma = walk->vma;
bool locked = !!(vma->vm_flags & VM_LOCKED);
struct page *page = NULL;
+ bool migration = false;
if (pte_present(*pte)) {
page = vm_normal_page(vma, addr, *pte);
@@ -525,8 +545,11 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
} else {
mss->swap_pss += (u64)PAGE_SIZE << PSS_SHIFT;
}
- } else if (is_pfn_swap_entry(swpent))
+ } else if (is_pfn_swap_entry(swpent)) {
+ if (is_migration_entry(swpent))
+ migration = true;
page = pfn_swap_entry_to_page(swpent);
+ }
} else {
smaps_pte_hole_lookup(addr, walk);
return;
@@ -535,7 +558,8 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
if (!page)
return;
- smaps_account(mss, page, false, pte_young(*pte), pte_dirty(*pte), locked);
+ smaps_account(mss, page, false, pte_young(*pte), pte_dirty(*pte),
+ locked, migration);
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@ -546,6 +570,7 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
struct vm_area_struct *vma = walk->vma;
bool locked = !!(vma->vm_flags & VM_LOCKED);
struct page *page = NULL;
+ bool migration = false;
if (pmd_present(*pmd)) {
/* FOLL_DUMP will return -EFAULT on huge zero page */
@@ -553,8 +578,10 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
} else if (unlikely(thp_migration_supported() && is_swap_pmd(*pmd))) {
swp_entry_t entry = pmd_to_swp_entry(*pmd);
- if (is_migration_entry(entry))
+ if (is_migration_entry(entry)) {
+ migration = true;
page = pfn_swap_entry_to_page(entry);
+ }
}
if (IS_ERR_OR_NULL(page))
return;
@@ -566,7 +593,9 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
/* pass */;
else
mss->file_thp += HPAGE_PMD_SIZE;
- smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd), locked);
+
+ smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd),
+ locked, migration);
}
#else
static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
@@ -1367,6 +1396,7 @@ static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm,
{
u64 frame = 0, flags = 0;
struct page *page = NULL;
+ bool migration = false;
if (pte_present(pte)) {
if (pm->show_pfn)
@@ -1388,13 +1418,14 @@ static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm,
frame = swp_type(entry) |
(swp_offset(entry) << MAX_SWAPFILES_SHIFT);
flags |= PM_SWAP;
+ migration = is_migration_entry(entry);
if (is_pfn_swap_entry(entry))
page = pfn_swap_entry_to_page(entry);
}
if (page && !PageAnon(page))
flags |= PM_FILE;
- if (page && page_mapcount(page) == 1)
+ if (page && !migration && page_mapcount(page) == 1)
flags |= PM_MMAP_EXCLUSIVE;
if (vma->vm_flags & VM_SOFTDIRTY)
flags |= PM_SOFT_DIRTY;
@@ -1410,8 +1441,9 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
spinlock_t *ptl;
pte_t *pte, *orig_pte;
int err = 0;
-
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ bool migration = false;
+
ptl = pmd_trans_huge_lock(pmdp, vma);
if (ptl) {
u64 flags = 0, frame = 0;
@@ -1450,11 +1482,12 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
if (pmd_swp_uffd_wp(pmd))
flags |= PM_UFFD_WP;
VM_BUG_ON(!is_pmd_migration_entry(pmd));
+ migration = is_migration_entry(entry);
page = pfn_swap_entry_to_page(entry);
}
#endif
- if (page && page_mapcount(page) == 1)
+ if (page && !migration && page_mapcount(page) == 1)
flags |= PM_MMAP_EXCLUSIVE;
for (; addr != end; addr += PAGE_SIZE) {
diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
index 509f85148fee..702754dd1daf 100644
--- a/fs/proc/vmcore.c
+++ b/fs/proc/vmcore.c
@@ -65,8 +65,6 @@ static size_t vmcoredd_orig_sz;
static DECLARE_RWSEM(vmcore_cb_rwsem);
/* List of registered vmcore callbacks. */
static LIST_HEAD(vmcore_cb_list);
-/* Whether we had a surprise unregistration of a callback. */
-static bool vmcore_cb_unstable;
/* Whether the vmcore has been opened once. */
static bool vmcore_opened;
@@ -94,10 +92,8 @@ void unregister_vmcore_cb(struct vmcore_cb *cb)
* very unusual (e.g., forced driver removal), but we cannot stop
* unregistering.
*/
- if (vmcore_opened) {
+ if (vmcore_opened)
pr_warn_once("Unexpected vmcore callback unregistration\n");
- vmcore_cb_unstable = true;
- }
up_write(&vmcore_cb_rwsem);
}
EXPORT_SYMBOL_GPL(unregister_vmcore_cb);
@@ -108,8 +104,6 @@ static bool pfn_is_ram(unsigned long pfn)
bool ret = true;
lockdep_assert_held_read(&vmcore_cb_rwsem);
- if (unlikely(vmcore_cb_unstable))
- return false;
list_for_each_entry(cb, &vmcore_cb_list, next) {
if (unlikely(!cb->pfn_is_ram))
@@ -581,7 +575,7 @@ static int vmcore_remap_oldmem_pfn(struct vm_area_struct *vma,
* looping over all pages without a reason.
*/
down_read(&vmcore_cb_rwsem);
- if (!list_empty(&vmcore_cb_list) || vmcore_cb_unstable)
+ if (!list_empty(&vmcore_cb_list))
ret = remap_oldmem_pfn_checked(vma, from, pfn, size, prot);
else
ret = remap_oldmem_pfn_range(vma, from, pfn, size, prot);
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index 22d904bde6ab..a74aef99bd3d 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -690,9 +690,14 @@ int dquot_quota_sync(struct super_block *sb, int type)
/* This is not very clever (and fast) but currently I don't know about
* any other simple way of getting quota data to disk and we must get
* them there for userspace to be visible... */
- if (sb->s_op->sync_fs)
- sb->s_op->sync_fs(sb, 1);
- sync_blockdev(sb->s_bdev);
+ if (sb->s_op->sync_fs) {
+ ret = sb->s_op->sync_fs(sb, 1);
+ if (ret)
+ return ret;
+ }
+ ret = sync_blockdev(sb->s_bdev);
+ if (ret)
+ return ret;
/*
* Now when everything is written we can discard the pagecache so
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
index 0834b101c316..a3e21160b634 100644
--- a/fs/reiserfs/journal.c
+++ b/fs/reiserfs/journal.c
@@ -951,7 +951,9 @@ static int reiserfs_async_progress_wait(struct super_block *s)
int depth;
depth = reiserfs_write_unlock_nested(s);
- congestion_wait(BLK_RW_ASYNC, HZ / 10);
+ wait_var_event_timeout(&j->j_async_throttle,
+ atomic_read(&j->j_async_throttle) == 0,
+ HZ / 10);
reiserfs_write_lock_nested(s, depth);
}
@@ -1058,7 +1060,8 @@ static int flush_commit_list(struct super_block *s,
put_bh(tbh) ;
}
}
- atomic_dec(&journal->j_async_throttle);
+ if (atomic_dec_and_test(&journal->j_async_throttle))
+ wake_up_var(&journal->j_async_throttle);
for (i = 0; i < (jl->j_len + 1); i++) {
bn = SB_ONDISK_JOURNAL_1st_BLOCK(s) +
diff --git a/fs/remap_range.c b/fs/remap_range.c
index 6d4a9beaa097..231159682907 100644
--- a/fs/remap_range.c
+++ b/fs/remap_range.c
@@ -146,41 +146,41 @@ static int generic_remap_check_len(struct inode *inode_in,
}
/* Read a page's worth of file data into the page cache. */
-static struct page *vfs_dedupe_get_page(struct inode *inode, loff_t offset)
+static struct folio *vfs_dedupe_get_folio(struct inode *inode, loff_t pos)
{
- struct page *page;
+ struct folio *folio;
- page = read_mapping_page(inode->i_mapping, offset >> PAGE_SHIFT, NULL);
- if (IS_ERR(page))
- return page;
- if (!PageUptodate(page)) {
- put_page(page);
+ folio = read_mapping_folio(inode->i_mapping, pos >> PAGE_SHIFT, NULL);
+ if (IS_ERR(folio))
+ return folio;
+ if (!folio_test_uptodate(folio)) {
+ folio_put(folio);
return ERR_PTR(-EIO);
}
- return page;
+ return folio;
}
/*
- * Lock two pages, ensuring that we lock in offset order if the pages are from
- * the same file.
+ * Lock two folios, ensuring that we lock in offset order if the folios
+ * are from the same file.
*/
-static void vfs_lock_two_pages(struct page *page1, struct page *page2)
+static void vfs_lock_two_folios(struct folio *folio1, struct folio *folio2)
{
/* Always lock in order of increasing index. */
- if (page1->index > page2->index)
- swap(page1, page2);
+ if (folio1->index > folio2->index)
+ swap(folio1, folio2);
- lock_page(page1);
- if (page1 != page2)
- lock_page(page2);
+ folio_lock(folio1);
+ if (folio1 != folio2)
+ folio_lock(folio2);
}
-/* Unlock two pages, being careful not to unlock the same page twice. */
-static void vfs_unlock_two_pages(struct page *page1, struct page *page2)
+/* Unlock two folios, being careful not to unlock the same folio twice. */
+static void vfs_unlock_two_folios(struct folio *folio1, struct folio *folio2)
{
- unlock_page(page1);
- if (page1 != page2)
- unlock_page(page2);
+ folio_unlock(folio1);
+ if (folio1 != folio2)
+ folio_unlock(folio2);
}
/*
@@ -188,77 +188,71 @@ static void vfs_unlock_two_pages(struct page *page1, struct page *page2)
* Caller must have locked both inodes to prevent write races.
*/
static int vfs_dedupe_file_range_compare(struct inode *src, loff_t srcoff,
- struct inode *dest, loff_t destoff,
+ struct inode *dest, loff_t dstoff,
loff_t len, bool *is_same)
{
- loff_t src_poff;
- loff_t dest_poff;
- void *src_addr;
- void *dest_addr;
- struct page *src_page;
- struct page *dest_page;
- loff_t cmp_len;
- bool same;
- int error;
-
- error = -EINVAL;
- same = true;
+ bool same = true;
+ int error = -EINVAL;
+
while (len) {
- src_poff = srcoff & (PAGE_SIZE - 1);
- dest_poff = destoff & (PAGE_SIZE - 1);
- cmp_len = min(PAGE_SIZE - src_poff,
- PAGE_SIZE - dest_poff);
+ struct folio *src_folio, *dst_folio;
+ void *src_addr, *dst_addr;
+ loff_t cmp_len = min(PAGE_SIZE - offset_in_page(srcoff),
+ PAGE_SIZE - offset_in_page(dstoff));
+
cmp_len = min(cmp_len, len);
if (cmp_len <= 0)
goto out_error;
- src_page = vfs_dedupe_get_page(src, srcoff);
- if (IS_ERR(src_page)) {
- error = PTR_ERR(src_page);
+ src_folio = vfs_dedupe_get_folio(src, srcoff);
+ if (IS_ERR(src_folio)) {
+ error = PTR_ERR(src_folio);
goto out_error;
}
- dest_page = vfs_dedupe_get_page(dest, destoff);
- if (IS_ERR(dest_page)) {
- error = PTR_ERR(dest_page);
- put_page(src_page);
+ dst_folio = vfs_dedupe_get_folio(dest, dstoff);
+ if (IS_ERR(dst_folio)) {
+ error = PTR_ERR(dst_folio);
+ folio_put(src_folio);
goto out_error;
}
- vfs_lock_two_pages(src_page, dest_page);
+ vfs_lock_two_folios(src_folio, dst_folio);
/*
- * Now that we've locked both pages, make sure they're still
+ * Now that we've locked both folios, make sure they're still
* mapped to the file data we're interested in. If not,
* someone is invalidating pages on us and we lose.
*/
- if (!PageUptodate(src_page) || !PageUptodate(dest_page) ||
- src_page->mapping != src->i_mapping ||
- dest_page->mapping != dest->i_mapping) {
+ if (!folio_test_uptodate(src_folio) || !folio_test_uptodate(dst_folio) ||
+ src_folio->mapping != src->i_mapping ||
+ dst_folio->mapping != dest->i_mapping) {
same = false;
goto unlock;
}
- src_addr = kmap_atomic(src_page);
- dest_addr = kmap_atomic(dest_page);
+ src_addr = kmap_local_folio(src_folio,
+ offset_in_folio(src_folio, srcoff));
+ dst_addr = kmap_local_folio(dst_folio,
+ offset_in_folio(dst_folio, dstoff));
- flush_dcache_page(src_page);
- flush_dcache_page(dest_page);
+ flush_dcache_folio(src_folio);
+ flush_dcache_folio(dst_folio);
- if (memcmp(src_addr + src_poff, dest_addr + dest_poff, cmp_len))
+ if (memcmp(src_addr, dst_addr, cmp_len))
same = false;
- kunmap_atomic(dest_addr);
- kunmap_atomic(src_addr);
+ kunmap_local(dst_addr);
+ kunmap_local(src_addr);
unlock:
- vfs_unlock_two_pages(src_page, dest_page);
- put_page(dest_page);
- put_page(src_page);
+ vfs_unlock_two_folios(src_folio, dst_folio);
+ folio_put(dst_folio);
+ folio_put(src_folio);
if (!same)
break;
srcoff += cmp_len;
- destoff += cmp_len;
+ dstoff += cmp_len;
len -= cmp_len;
}
diff --git a/fs/signalfd.c b/fs/signalfd.c
index 65ce0e72e7b9..e20d1484c663 100644
--- a/fs/signalfd.c
+++ b/fs/signalfd.c
@@ -155,11 +155,12 @@ static int signalfd_copyinfo(struct signalfd_siginfo __user *uinfo,
static ssize_t signalfd_dequeue(struct signalfd_ctx *ctx, kernel_siginfo_t *info,
int nonblock)
{
+ enum pid_type type;
ssize_t ret;
DECLARE_WAITQUEUE(wait, current);
spin_lock_irq(&current->sighand->siglock);
- ret = dequeue_signal(current, &ctx->sigmask, info);
+ ret = dequeue_signal(current, &ctx->sigmask, info, &type);
switch (ret) {
case 0:
if (!nonblock)
@@ -174,7 +175,7 @@ static ssize_t signalfd_dequeue(struct signalfd_ctx *ctx, kernel_siginfo_t *info
add_wait_queue(&current->sighand->signalfd_wqh, &wait);
for (;;) {
set_current_state(TASK_INTERRUPTIBLE);
- ret = dequeue_signal(current, &ctx->sigmask, info);
+ ret = dequeue_signal(current, &ctx->sigmask, info, &type);
if (ret != 0)
break;
if (signal_pending(current)) {
diff --git a/fs/smbfs_common/smb2pdu.h b/fs/smbfs_common/smb2pdu.h
index 7ccadcbe684b..38b8fc514860 100644
--- a/fs/smbfs_common/smb2pdu.h
+++ b/fs/smbfs_common/smb2pdu.h
@@ -449,7 +449,7 @@ struct smb2_netname_neg_context {
*/
/* Flags */
-#define SMB2_ACCEPT_TRANSFORM_LEVEL_SECURITY 0x00000001
+#define SMB2_ACCEPT_TRANSPORT_LEVEL_SECURITY 0x00000001
struct smb2_transport_capabilities_context {
__le16 ContextType; /* 6 */
diff --git a/fs/smbfs_common/smbfsctl.h b/fs/smbfs_common/smbfsctl.h
index 926f87cd6af0..d51939c43ad7 100644
--- a/fs/smbfs_common/smbfsctl.h
+++ b/fs/smbfs_common/smbfsctl.h
@@ -95,8 +95,10 @@
#define FSCTL_SET_SHORT_NAME_BEHAVIOR 0x000901B4 /* BB add struct */
#define FSCTL_GET_INTEGRITY_INFORMATION 0x0009027C
#define FSCTL_GET_REFS_VOLUME_DATA 0x000902D8 /* See MS-FSCC 2.3.24 */
+#define FSCTL_SET_INTEGRITY_INFORMATION_EXT 0x00090380
#define FSCTL_GET_RETRIEVAL_POINTERS_AND_REFCOUNT 0x000903d3
#define FSCTL_GET_RETRIEVAL_POINTER_COUNT 0x0009042b
+#define FSCTL_REFS_STREAM_SNAPSHOT_MANAGEMENT 0x00090440
#define FSCTL_QUERY_ALLOCATED_RANGES 0x000940CF
#define FSCTL_SET_DEFECT_MANAGEMENT 0x00098134 /* BB add struct */
#define FSCTL_FILE_LEVEL_TRIM 0x00098208 /* BB add struct */
diff --git a/fs/squashfs/super.c b/fs/squashfs/super.c
index bb44ff4c5cc6..b1b556dbce12 100644
--- a/fs/squashfs/super.c
+++ b/fs/squashfs/super.c
@@ -29,6 +29,7 @@
#include <linux/module.h>
#include <linux/magic.h>
#include <linux/xattr.h>
+#include <linux/backing-dev.h>
#include "squashfs_fs.h"
#include "squashfs_fs_sb.h"
@@ -112,6 +113,24 @@ static const struct squashfs_decompressor *supported_squashfs_filesystem(
return decompressor;
}
+static int squashfs_bdi_init(struct super_block *sb)
+{
+ int err;
+ unsigned int major = MAJOR(sb->s_dev);
+ unsigned int minor = MINOR(sb->s_dev);
+
+ bdi_put(sb->s_bdi);
+ sb->s_bdi = &noop_backing_dev_info;
+
+ err = super_setup_bdi_name(sb, "squashfs_%u_%u", major, minor);
+ if (err)
+ return err;
+
+ sb->s_bdi->ra_pages = 0;
+ sb->s_bdi->io_pages = 0;
+
+ return 0;
+}
static int squashfs_fill_super(struct super_block *sb, struct fs_context *fc)
{
@@ -127,6 +146,20 @@ static int squashfs_fill_super(struct super_block *sb, struct fs_context *fc)
TRACE("Entered squashfs_fill_superblock\n");
+ /*
+ * squashfs provides 'backing_dev_info' in order to disable read-ahead. For
+ * squashfs, I/O is not deferred, it is done immediately in readpage,
+ * which means the user would always have to wait their own I/O. So the effect
+ * of readahead is very weak for squashfs. squashfs_bdi_init will set
+ * sb->s_bdi->ra_pages and sb->s_bdi->io_pages to 0 and close readahead for
+ * squashfs.
+ */
+ err = squashfs_bdi_init(sb);
+ if (err) {
+ errorf(fc, "squashfs init bdi failed");
+ return err;
+ }
+
sb->s_fs_info = kzalloc(sizeof(*msblk), GFP_KERNEL);
if (sb->s_fs_info == NULL) {
ERROR("Failed to allocate squashfs_sb_info\n");
diff --git a/fs/super.c b/fs/super.c
index 3bfc0f8fbd5b..f1d4a193602d 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -31,7 +31,6 @@
#include <linux/mutex.h>
#include <linux/backing-dev.h>
#include <linux/rculist_bl.h>
-#include <linux/cleancache.h>
#include <linux/fscrypt.h>
#include <linux/fsnotify.h>
#include <linux/lockdep.h>
@@ -260,7 +259,6 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags,
s->s_time_gran = 1000000000;
s->s_time_min = TIME64_MIN;
s->s_time_max = TIME64_MAX;
- s->cleancache_poolid = CLEANCACHE_NO_POOL;
s->s_shrink.seeks = DEFAULT_SEEKS;
s->s_shrink.scan_objects = super_cache_scan;
@@ -330,7 +328,6 @@ void deactivate_locked_super(struct super_block *s)
{
struct file_system_type *fs = s->s_type;
if (atomic_dec_and_test(&s->s_active)) {
- cleancache_invalidate_fs(s);
unregister_shrinker(&s->s_shrink);
fs->kill_sb(s);
@@ -1423,8 +1420,8 @@ struct dentry *mount_nodev(struct file_system_type *fs_type,
}
EXPORT_SYMBOL(mount_nodev);
-static int reconfigure_single(struct super_block *s,
- int flags, void *data)
+int reconfigure_single(struct super_block *s,
+ int flags, void *data)
{
struct fs_context *fc;
int ret;
@@ -1619,11 +1616,9 @@ static void lockdep_sb_freeze_acquire(struct super_block *sb)
percpu_rwsem_acquire(sb->s_writers.rw_sem + level, 0, _THIS_IP_);
}
-static void sb_freeze_unlock(struct super_block *sb)
+static void sb_freeze_unlock(struct super_block *sb, int level)
{
- int level;
-
- for (level = SB_FREEZE_LEVELS - 1; level >= 0; level--)
+ for (level--; level >= 0; level--)
percpu_up_write(sb->s_writers.rw_sem + level);
}
@@ -1694,7 +1689,14 @@ int freeze_super(struct super_block *sb)
sb_wait_write(sb, SB_FREEZE_PAGEFAULT);
/* All writers are done so after syncing there won't be dirty data */
- sync_filesystem(sb);
+ ret = sync_filesystem(sb);
+ if (ret) {
+ sb->s_writers.frozen = SB_UNFROZEN;
+ sb_freeze_unlock(sb, SB_FREEZE_PAGEFAULT);
+ wake_up(&sb->s_writers.wait_unfrozen);
+ deactivate_locked_super(sb);
+ return ret;
+ }
/* Now wait for internal filesystem counter */
sb->s_writers.frozen = SB_FREEZE_FS;
@@ -1706,7 +1708,7 @@ int freeze_super(struct super_block *sb)
printk(KERN_ERR
"VFS:Filesystem freeze failed\n");
sb->s_writers.frozen = SB_UNFROZEN;
- sb_freeze_unlock(sb);
+ sb_freeze_unlock(sb, SB_FREEZE_FS);
wake_up(&sb->s_writers.wait_unfrozen);
deactivate_locked_super(sb);
return ret;
@@ -1751,7 +1753,7 @@ static int thaw_super_locked(struct super_block *sb)
}
sb->s_writers.frozen = SB_UNFROZEN;
- sb_freeze_unlock(sb);
+ sb_freeze_unlock(sb, SB_FREEZE_FS);
out:
wake_up(&sb->s_writers.wait_unfrozen);
deactivate_locked_super(sb);
diff --git a/fs/sync.c b/fs/sync.c
index 3ce8e2137f31..c7690016453e 100644
--- a/fs/sync.c
+++ b/fs/sync.c
@@ -29,7 +29,7 @@
*/
int sync_filesystem(struct super_block *sb)
{
- int ret;
+ int ret = 0;
/*
* We need to be protected against the filesystem going from
@@ -52,15 +52,21 @@ int sync_filesystem(struct super_block *sb)
* at a time.
*/
writeback_inodes_sb(sb, WB_REASON_SYNC);
- if (sb->s_op->sync_fs)
- sb->s_op->sync_fs(sb, 0);
+ if (sb->s_op->sync_fs) {
+ ret = sb->s_op->sync_fs(sb, 0);
+ if (ret)
+ return ret;
+ }
ret = sync_blockdev_nowait(sb->s_bdev);
- if (ret < 0)
+ if (ret)
return ret;
sync_inodes_sb(sb);
- if (sb->s_op->sync_fs)
- sb->s_op->sync_fs(sb, 1);
+ if (sb->s_op->sync_fs) {
+ ret = sb->s_op->sync_fs(sb, 1);
+ if (ret)
+ return ret;
+ }
return sync_blockdev(sb->s_bdev);
}
EXPORT_SYMBOL(sync_filesystem);
diff --git a/fs/sysctls.c b/fs/sysctls.c
new file mode 100644
index 000000000000..c701273c9432
--- /dev/null
+++ b/fs/sysctls.c
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * /proc/sys/fs shared sysctls
+ *
+ * These sysctls are shared between different filesystems.
+ */
+#include <linux/init.h>
+#include <linux/sysctl.h>
+
+static struct ctl_table fs_shared_sysctls[] = {
+ {
+ .procname = "overflowuid",
+ .data = &fs_overflowuid,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_MAXOLDUID,
+ },
+ {
+ .procname = "overflowgid",
+ .data = &fs_overflowgid,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_MAXOLDUID,
+ },
+ { }
+};
+
+DECLARE_SYSCTL_BASE(fs, fs_shared_sysctls);
+
+static int __init init_fs_sysctls(void)
+{
+ return register_sysctl_base(fs);
+}
+
+early_initcall(init_fs_sysctls);
diff --git a/fs/tracefs/inode.c b/fs/tracefs/inode.c
index 3616839c5c4b..bafc02bf8220 100644
--- a/fs/tracefs/inode.c
+++ b/fs/tracefs/inode.c
@@ -109,12 +109,12 @@ static int tracefs_syscall_rmdir(struct inode *inode, struct dentry *dentry)
* also the directory that is being deleted.
*/
inode_unlock(inode);
- inode_unlock(dentry->d_inode);
+ inode_unlock(d_inode(dentry));
ret = tracefs_ops.rmdir(name);
inode_lock_nested(inode, I_MUTEX_PARENT);
- inode_lock(dentry->d_inode);
+ inode_lock(d_inode(dentry));
kfree(name);
@@ -284,7 +284,7 @@ static int tracefs_parse_options(char *data, struct tracefs_mount_opts *opts)
static int tracefs_apply_options(struct super_block *sb)
{
struct tracefs_fs_info *fsi = sb->s_fs_info;
- struct inode *inode = sb->s_root->d_inode;
+ struct inode *inode = d_inode(sb->s_root);
struct tracefs_mount_opts *opts = &fsi->mount_opts;
inode->i_mode &= ~S_IALLUGO;
@@ -403,18 +403,18 @@ static struct dentry *start_creating(const char *name, struct dentry *parent)
if (!parent)
parent = tracefs_mount->mnt_root;
- inode_lock(parent->d_inode);
- if (unlikely(IS_DEADDIR(parent->d_inode)))
+ inode_lock(d_inode(parent));
+ if (unlikely(IS_DEADDIR(d_inode(parent))))
dentry = ERR_PTR(-ENOENT);
else
dentry = lookup_one_len(name, parent, strlen(name));
- if (!IS_ERR(dentry) && dentry->d_inode) {
+ if (!IS_ERR(dentry) && d_inode(dentry)) {
dput(dentry);
dentry = ERR_PTR(-EEXIST);
}
if (IS_ERR(dentry)) {
- inode_unlock(parent->d_inode);
+ inode_unlock(d_inode(parent));
simple_release_fs(&tracefs_mount, &tracefs_mount_count);
}
@@ -423,7 +423,7 @@ static struct dentry *start_creating(const char *name, struct dentry *parent)
static struct dentry *failed_creating(struct dentry *dentry)
{
- inode_unlock(dentry->d_parent->d_inode);
+ inode_unlock(d_inode(dentry->d_parent));
dput(dentry);
simple_release_fs(&tracefs_mount, &tracefs_mount_count);
return NULL;
@@ -431,7 +431,7 @@ static struct dentry *failed_creating(struct dentry *dentry)
static struct dentry *end_creating(struct dentry *dentry)
{
- inode_unlock(dentry->d_parent->d_inode);
+ inode_unlock(d_inode(dentry->d_parent));
return dentry;
}
@@ -489,7 +489,7 @@ struct dentry *tracefs_create_file(const char *name, umode_t mode,
inode->i_uid = d_inode(dentry->d_parent)->i_uid;
inode->i_gid = d_inode(dentry->d_parent)->i_gid;
d_instantiate(dentry, inode);
- fsnotify_create(dentry->d_parent->d_inode, dentry);
+ fsnotify_create(d_inode(dentry->d_parent), dentry);
return end_creating(dentry);
}
@@ -516,8 +516,8 @@ static struct dentry *__create_dir(const char *name, struct dentry *parent,
/* directory inodes start off with i_nlink == 2 (for "." entry) */
inc_nlink(inode);
d_instantiate(dentry, inode);
- inc_nlink(dentry->d_parent->d_inode);
- fsnotify_mkdir(dentry->d_parent->d_inode, dentry);
+ inc_nlink(d_inode(dentry->d_parent));
+ fsnotify_mkdir(d_inode(dentry->d_parent), dentry);
return end_creating(dentry);
}
diff --git a/fs/udf/ialloc.c b/fs/udf/ialloc.c
index 2ecf0e87660e..b5d611cee749 100644
--- a/fs/udf/ialloc.c
+++ b/fs/udf/ialloc.c
@@ -77,6 +77,7 @@ struct inode *udf_new_inode(struct inode *dir, umode_t mode)
GFP_KERNEL);
}
if (!iinfo->i_data) {
+ make_bad_inode(inode);
iput(inode);
return ERR_PTR(-ENOMEM);
}
@@ -86,6 +87,7 @@ struct inode *udf_new_inode(struct inode *dir, umode_t mode)
dinfo->i_location.partitionReferenceNum,
start, &err);
if (err) {
+ make_bad_inode(inode);
iput(inode);
return ERR_PTR(err);
}
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index 1d6b7a50736b..ea8f6cd01f50 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -258,10 +258,6 @@ int udf_expand_file_adinicb(struct inode *inode)
char *kaddr;
struct udf_inode_info *iinfo = UDF_I(inode);
int err;
- struct writeback_control udf_wbc = {
- .sync_mode = WB_SYNC_NONE,
- .nr_to_write = 1,
- };
WARN_ON_ONCE(!inode_is_locked(inode));
if (!iinfo->i_lenAlloc) {
@@ -305,8 +301,10 @@ int udf_expand_file_adinicb(struct inode *inode)
iinfo->i_alloc_type = ICBTAG_FLAG_AD_LONG;
/* from now on we have normal address_space methods */
inode->i_data.a_ops = &udf_aops;
+ set_page_dirty(page);
+ unlock_page(page);
up_write(&iinfo->i_data_sem);
- err = inode->i_data.a_ops->writepage(page, &udf_wbc);
+ err = filemap_fdatawrite(inode->i_mapping);
if (err) {
/* Restore everything back so that we don't lose data... */
lock_page(page);
@@ -317,6 +315,7 @@ int udf_expand_file_adinicb(struct inode *inode)
unlock_page(page);
iinfo->i_alloc_type = ICBTAG_FLAG_AD_IN_ICB;
inode->i_data.a_ops = &udf_adinicb_aops;
+ iinfo->i_lenAlloc = inode->i_size;
up_write(&iinfo->i_data_sem);
}
put_page(page);
diff --git a/fs/unicode/.gitignore b/fs/unicode/.gitignore
index 361294571ab0..51cdf3fb4dd4 100644
--- a/fs/unicode/.gitignore
+++ b/fs/unicode/.gitignore
@@ -1,3 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
/mkutf8data
-/utf8data.h
+/utf8data.c
diff --git a/fs/unicode/Kconfig b/fs/unicode/Kconfig
index 2c27b9a5cd6c..da786a687fdc 100644
--- a/fs/unicode/Kconfig
+++ b/fs/unicode/Kconfig
@@ -3,12 +3,13 @@
# UTF-8 normalization
#
config UNICODE
- bool "UTF-8 normalization and casefolding support"
+ tristate "UTF-8 normalization and casefolding support"
help
Say Y here to enable UTF-8 NFD normalization and NFD+CF casefolding
- support.
+ support. If you say M here the large table of case foldings will
+ be a separate loadable module that gets requested only when a file
+ system actually use it.
config UNICODE_NORMALIZATION_SELFTEST
tristate "Test UTF-8 normalization support"
depends on UNICODE
- default n
diff --git a/fs/unicode/Makefile b/fs/unicode/Makefile
index b88aecc86550..0cc87423de82 100644
--- a/fs/unicode/Makefile
+++ b/fs/unicode/Makefile
@@ -1,15 +1,18 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_UNICODE) += unicode.o
+ifneq ($(CONFIG_UNICODE),)
+obj-y += unicode.o
+endif
+obj-$(CONFIG_UNICODE) += utf8data.o
obj-$(CONFIG_UNICODE_NORMALIZATION_SELFTEST) += utf8-selftest.o
unicode-y := utf8-norm.o utf8-core.o
-$(obj)/utf8-norm.o: $(obj)/utf8data.h
+$(obj)/utf8-data.o: $(obj)/utf8data.c
-# In the normal build, the checked-in utf8data.h is just shipped.
+# In the normal build, the checked-in utf8data.c is just shipped.
#
-# To generate utf8data.h from UCD, put *.txt files in this directory
+# To generate utf8data.c from UCD, put *.txt files in this directory
# and pass REGENERATE_UTF8DATA=1 from the command line.
ifdef REGENERATE_UTF8DATA
@@ -24,15 +27,15 @@ quiet_cmd_utf8data = GEN $@
-t $(srctree)/$(src)/NormalizationTest.txt \
-o $@
-$(obj)/utf8data.h: $(obj)/mkutf8data $(filter %.txt, $(cmd_utf8data)) FORCE
+$(obj)/utf8data.c: $(obj)/mkutf8data $(filter %.txt, $(cmd_utf8data)) FORCE
$(call if_changed,utf8data)
else
-$(obj)/utf8data.h: $(src)/utf8data.h_shipped FORCE
+$(obj)/utf8data.c: $(src)/utf8data.c_shipped FORCE
$(call if_changed,shipped)
endif
-targets += utf8data.h
+targets += utf8data.c
hostprogs += mkutf8data
diff --git a/fs/unicode/mkutf8data.c b/fs/unicode/mkutf8data.c
index ff2025ac5a32..bc1a7c8b5c8d 100644
--- a/fs/unicode/mkutf8data.c
+++ b/fs/unicode/mkutf8data.c
@@ -3287,12 +3287,10 @@ static void write_file(void)
open_fail(utf8_name, errno);
fprintf(file, "/* This file is generated code, do not edit. */\n");
- fprintf(file, "#ifndef __INCLUDED_FROM_UTF8NORM_C__\n");
- fprintf(file, "#error Only nls_utf8-norm.c should include this file.\n");
- fprintf(file, "#endif\n");
fprintf(file, "\n");
- fprintf(file, "static const unsigned int utf8vers = %#x;\n",
- unicode_maxage);
+ fprintf(file, "#include <linux/module.h>\n");
+ fprintf(file, "#include <linux/kernel.h>\n");
+ fprintf(file, "#include \"utf8n.h\"\n");
fprintf(file, "\n");
fprintf(file, "static const unsigned int utf8agetab[] = {\n");
for (i = 0; i != ages_count; i++)
@@ -3339,6 +3337,22 @@ static void write_file(void)
fprintf(file, "\n");
}
fprintf(file, "};\n");
+ fprintf(file, "\n");
+ fprintf(file, "struct utf8data_table utf8_data_table = {\n");
+ fprintf(file, "\t.utf8agetab = utf8agetab,\n");
+ fprintf(file, "\t.utf8agetab_size = ARRAY_SIZE(utf8agetab),\n");
+ fprintf(file, "\n");
+ fprintf(file, "\t.utf8nfdicfdata = utf8nfdicfdata,\n");
+ fprintf(file, "\t.utf8nfdicfdata_size = ARRAY_SIZE(utf8nfdicfdata),\n");
+ fprintf(file, "\n");
+ fprintf(file, "\t.utf8nfdidata = utf8nfdidata,\n");
+ fprintf(file, "\t.utf8nfdidata_size = ARRAY_SIZE(utf8nfdidata),\n");
+ fprintf(file, "\n");
+ fprintf(file, "\t.utf8data = utf8data,\n");
+ fprintf(file, "};\n");
+ fprintf(file, "EXPORT_SYMBOL_GPL(utf8_data_table);");
+ fprintf(file, "\n");
+ fprintf(file, "MODULE_LICENSE(\"GPL v2\");\n");
fclose(file);
}
diff --git a/fs/unicode/utf8-core.c b/fs/unicode/utf8-core.c
index dc25823bfed9..67aaadc3ab07 100644
--- a/fs/unicode/utf8-core.c
+++ b/fs/unicode/utf8-core.c
@@ -5,16 +5,13 @@
#include <linux/slab.h>
#include <linux/parser.h>
#include <linux/errno.h>
-#include <linux/unicode.h>
#include <linux/stringhash.h>
#include "utf8n.h"
int utf8_validate(const struct unicode_map *um, const struct qstr *str)
{
- const struct utf8data *data = utf8nfdi(um->version);
-
- if (utf8nlen(data, str->name, str->len) < 0)
+ if (utf8nlen(um, UTF8_NFDI, str->name, str->len) < 0)
return -1;
return 0;
}
@@ -23,14 +20,13 @@ EXPORT_SYMBOL(utf8_validate);
int utf8_strncmp(const struct unicode_map *um,
const struct qstr *s1, const struct qstr *s2)
{
- const struct utf8data *data = utf8nfdi(um->version);
struct utf8cursor cur1, cur2;
int c1, c2;
- if (utf8ncursor(&cur1, data, s1->name, s1->len) < 0)
+ if (utf8ncursor(&cur1, um, UTF8_NFDI, s1->name, s1->len) < 0)
return -EINVAL;
- if (utf8ncursor(&cur2, data, s2->name, s2->len) < 0)
+ if (utf8ncursor(&cur2, um, UTF8_NFDI, s2->name, s2->len) < 0)
return -EINVAL;
do {
@@ -50,14 +46,13 @@ EXPORT_SYMBOL(utf8_strncmp);
int utf8_strncasecmp(const struct unicode_map *um,
const struct qstr *s1, const struct qstr *s2)
{
- const struct utf8data *data = utf8nfdicf(um->version);
struct utf8cursor cur1, cur2;
int c1, c2;
- if (utf8ncursor(&cur1, data, s1->name, s1->len) < 0)
+ if (utf8ncursor(&cur1, um, UTF8_NFDICF, s1->name, s1->len) < 0)
return -EINVAL;
- if (utf8ncursor(&cur2, data, s2->name, s2->len) < 0)
+ if (utf8ncursor(&cur2, um, UTF8_NFDICF, s2->name, s2->len) < 0)
return -EINVAL;
do {
@@ -81,12 +76,11 @@ int utf8_strncasecmp_folded(const struct unicode_map *um,
const struct qstr *cf,
const struct qstr *s1)
{
- const struct utf8data *data = utf8nfdicf(um->version);
struct utf8cursor cur1;
int c1, c2;
int i = 0;
- if (utf8ncursor(&cur1, data, s1->name, s1->len) < 0)
+ if (utf8ncursor(&cur1, um, UTF8_NFDICF, s1->name, s1->len) < 0)
return -EINVAL;
do {
@@ -105,11 +99,10 @@ EXPORT_SYMBOL(utf8_strncasecmp_folded);
int utf8_casefold(const struct unicode_map *um, const struct qstr *str,
unsigned char *dest, size_t dlen)
{
- const struct utf8data *data = utf8nfdicf(um->version);
struct utf8cursor cur;
size_t nlen = 0;
- if (utf8ncursor(&cur, data, str->name, str->len) < 0)
+ if (utf8ncursor(&cur, um, UTF8_NFDICF, str->name, str->len) < 0)
return -EINVAL;
for (nlen = 0; nlen < dlen; nlen++) {
@@ -128,12 +121,11 @@ EXPORT_SYMBOL(utf8_casefold);
int utf8_casefold_hash(const struct unicode_map *um, const void *salt,
struct qstr *str)
{
- const struct utf8data *data = utf8nfdicf(um->version);
struct utf8cursor cur;
int c;
unsigned long hash = init_name_hash(salt);
- if (utf8ncursor(&cur, data, str->name, str->len) < 0)
+ if (utf8ncursor(&cur, um, UTF8_NFDICF, str->name, str->len) < 0)
return -EINVAL;
while ((c = utf8byte(&cur))) {
@@ -149,11 +141,10 @@ EXPORT_SYMBOL(utf8_casefold_hash);
int utf8_normalize(const struct unicode_map *um, const struct qstr *str,
unsigned char *dest, size_t dlen)
{
- const struct utf8data *data = utf8nfdi(um->version);
struct utf8cursor cur;
ssize_t nlen = 0;
- if (utf8ncursor(&cur, data, str->name, str->len) < 0)
+ if (utf8ncursor(&cur, um, UTF8_NFDI, str->name, str->len) < 0)
return -EINVAL;
for (nlen = 0; nlen < dlen; nlen++) {
@@ -167,69 +158,59 @@ int utf8_normalize(const struct unicode_map *um, const struct qstr *str,
}
return -EINVAL;
}
-
EXPORT_SYMBOL(utf8_normalize);
-static int utf8_parse_version(const char *version, unsigned int *maj,
- unsigned int *min, unsigned int *rev)
+static const struct utf8data *find_table_version(const struct utf8data *table,
+ size_t nr_entries, unsigned int version)
{
- substring_t args[3];
- char version_string[12];
- static const struct match_token token[] = {
- {1, "%d.%d.%d"},
- {0, NULL}
- };
-
- strncpy(version_string, version, sizeof(version_string));
-
- if (match_token(version_string, token, args) != 1)
- return -EINVAL;
-
- if (match_int(&args[0], maj) || match_int(&args[1], min) ||
- match_int(&args[2], rev))
- return -EINVAL;
+ size_t i = nr_entries - 1;
- return 0;
+ while (version < table[i].maxage)
+ i--;
+ if (version > table[i].maxage)
+ return NULL;
+ return &table[i];
}
-struct unicode_map *utf8_load(const char *version)
+struct unicode_map *utf8_load(unsigned int version)
{
- struct unicode_map *um = NULL;
- int unicode_version;
-
- if (version) {
- unsigned int maj, min, rev;
-
- if (utf8_parse_version(version, &maj, &min, &rev) < 0)
- return ERR_PTR(-EINVAL);
-
- if (!utf8version_is_supported(maj, min, rev))
- return ERR_PTR(-EINVAL);
-
- unicode_version = UNICODE_AGE(maj, min, rev);
- } else {
- unicode_version = utf8version_latest();
- printk(KERN_WARNING"UTF-8 version not specified. "
- "Assuming latest supported version (%d.%d.%d).",
- (unicode_version >> 16) & 0xff,
- (unicode_version >> 8) & 0xff,
- (unicode_version & 0xff));
- }
+ struct unicode_map *um;
um = kzalloc(sizeof(struct unicode_map), GFP_KERNEL);
if (!um)
return ERR_PTR(-ENOMEM);
-
- um->charset = "UTF-8";
- um->version = unicode_version;
-
+ um->version = version;
+
+ um->tables = symbol_request(utf8_data_table);
+ if (!um->tables)
+ goto out_free_um;
+
+ if (!utf8version_is_supported(um, version))
+ goto out_symbol_put;
+ um->ntab[UTF8_NFDI] = find_table_version(um->tables->utf8nfdidata,
+ um->tables->utf8nfdidata_size, um->version);
+ if (!um->ntab[UTF8_NFDI])
+ goto out_symbol_put;
+ um->ntab[UTF8_NFDICF] = find_table_version(um->tables->utf8nfdicfdata,
+ um->tables->utf8nfdicfdata_size, um->version);
+ if (!um->ntab[UTF8_NFDICF])
+ goto out_symbol_put;
return um;
+
+out_symbol_put:
+ symbol_put(um->tables);
+out_free_um:
+ kfree(um);
+ return ERR_PTR(-EINVAL);
}
EXPORT_SYMBOL(utf8_load);
void utf8_unload(struct unicode_map *um)
{
- kfree(um);
+ if (um) {
+ symbol_put(utf8_data_table);
+ kfree(um);
+ }
}
EXPORT_SYMBOL(utf8_unload);
diff --git a/fs/unicode/utf8-norm.c b/fs/unicode/utf8-norm.c
index 1d2d2e5b906a..768f8ab448b8 100644
--- a/fs/unicode/utf8-norm.c
+++ b/fs/unicode/utf8-norm.c
@@ -6,34 +6,17 @@
#include "utf8n.h"
-struct utf8data {
- unsigned int maxage;
- unsigned int offset;
-};
-
-#define __INCLUDED_FROM_UTF8NORM_C__
-#include "utf8data.h"
-#undef __INCLUDED_FROM_UTF8NORM_C__
-
-int utf8version_is_supported(u8 maj, u8 min, u8 rev)
+int utf8version_is_supported(const struct unicode_map *um, unsigned int version)
{
- int i = ARRAY_SIZE(utf8agetab) - 1;
- unsigned int sb_utf8version = UNICODE_AGE(maj, min, rev);
+ int i = um->tables->utf8agetab_size - 1;
- while (i >= 0 && utf8agetab[i] != 0) {
- if (sb_utf8version == utf8agetab[i])
+ while (i >= 0 && um->tables->utf8agetab[i] != 0) {
+ if (version == um->tables->utf8agetab[i])
return 1;
i--;
}
return 0;
}
-EXPORT_SYMBOL(utf8version_is_supported);
-
-int utf8version_latest(void)
-{
- return utf8vers;
-}
-EXPORT_SYMBOL(utf8version_latest);
/*
* UTF-8 valid ranges.
@@ -168,7 +151,7 @@ typedef const unsigned char utf8trie_t;
* underlying datatype: unsigned char.
*
* leaf[0]: The unicode version, stored as a generation number that is
- * an index into utf8agetab[]. With this we can filter code
+ * an index into ->utf8agetab[]. With this we can filter code
* points based on the unicode version in which they were
* defined. The CCC of a non-defined code point is 0.
* leaf[1]: Canonical Combining Class. During normalization, we need
@@ -316,21 +299,19 @@ utf8hangul(const char *str, unsigned char *hangul)
* is well-formed and corresponds to a known unicode code point. The
* shorthand for this will be "is valid UTF-8 unicode".
*/
-static utf8leaf_t *utf8nlookup(const struct utf8data *data,
- unsigned char *hangul, const char *s, size_t len)
+static utf8leaf_t *utf8nlookup(const struct unicode_map *um,
+ enum utf8_normalization n, unsigned char *hangul, const char *s,
+ size_t len)
{
- utf8trie_t *trie = NULL;
+ utf8trie_t *trie = um->tables->utf8data + um->ntab[n]->offset;
int offlen;
int offset;
int mask;
int node;
- if (!data)
- return NULL;
if (len == 0)
return NULL;
- trie = utf8data + data->offset;
node = 1;
while (node) {
offlen = (*trie & OFFLEN) >> OFFLEN_SHIFT;
@@ -392,172 +373,29 @@ static utf8leaf_t *utf8nlookup(const struct utf8data *data,
*
* Forwards to utf8nlookup().
*/
-static utf8leaf_t *utf8lookup(const struct utf8data *data,
- unsigned char *hangul, const char *s)
+static utf8leaf_t *utf8lookup(const struct unicode_map *um,
+ enum utf8_normalization n, unsigned char *hangul, const char *s)
{
- return utf8nlookup(data, hangul, s, (size_t)-1);
-}
-
-/*
- * Maximum age of any character in s.
- * Return -1 if s is not valid UTF-8 unicode.
- * Return 0 if only non-assigned code points are used.
- */
-int utf8agemax(const struct utf8data *data, const char *s)
-{
- utf8leaf_t *leaf;
- int age = 0;
- int leaf_age;
- unsigned char hangul[UTF8HANGULLEAF];
-
- if (!data)
- return -1;
-
- while (*s) {
- leaf = utf8lookup(data, hangul, s);
- if (!leaf)
- return -1;
-
- leaf_age = utf8agetab[LEAF_GEN(leaf)];
- if (leaf_age <= data->maxage && leaf_age > age)
- age = leaf_age;
- s += utf8clen(s);
- }
- return age;
+ return utf8nlookup(um, n, hangul, s, (size_t)-1);
}
-EXPORT_SYMBOL(utf8agemax);
-
-/*
- * Minimum age of any character in s.
- * Return -1 if s is not valid UTF-8 unicode.
- * Return 0 if non-assigned code points are used.
- */
-int utf8agemin(const struct utf8data *data, const char *s)
-{
- utf8leaf_t *leaf;
- int age;
- int leaf_age;
- unsigned char hangul[UTF8HANGULLEAF];
-
- if (!data)
- return -1;
- age = data->maxage;
- while (*s) {
- leaf = utf8lookup(data, hangul, s);
- if (!leaf)
- return -1;
- leaf_age = utf8agetab[LEAF_GEN(leaf)];
- if (leaf_age <= data->maxage && leaf_age < age)
- age = leaf_age;
- s += utf8clen(s);
- }
- return age;
-}
-EXPORT_SYMBOL(utf8agemin);
-
-/*
- * Maximum age of any character in s, touch at most len bytes.
- * Return -1 if s is not valid UTF-8 unicode.
- */
-int utf8nagemax(const struct utf8data *data, const char *s, size_t len)
-{
- utf8leaf_t *leaf;
- int age = 0;
- int leaf_age;
- unsigned char hangul[UTF8HANGULLEAF];
-
- if (!data)
- return -1;
-
- while (len && *s) {
- leaf = utf8nlookup(data, hangul, s, len);
- if (!leaf)
- return -1;
- leaf_age = utf8agetab[LEAF_GEN(leaf)];
- if (leaf_age <= data->maxage && leaf_age > age)
- age = leaf_age;
- len -= utf8clen(s);
- s += utf8clen(s);
- }
- return age;
-}
-EXPORT_SYMBOL(utf8nagemax);
-
-/*
- * Maximum age of any character in s, touch at most len bytes.
- * Return -1 if s is not valid UTF-8 unicode.
- */
-int utf8nagemin(const struct utf8data *data, const char *s, size_t len)
-{
- utf8leaf_t *leaf;
- int leaf_age;
- int age;
- unsigned char hangul[UTF8HANGULLEAF];
-
- if (!data)
- return -1;
- age = data->maxage;
- while (len && *s) {
- leaf = utf8nlookup(data, hangul, s, len);
- if (!leaf)
- return -1;
- leaf_age = utf8agetab[LEAF_GEN(leaf)];
- if (leaf_age <= data->maxage && leaf_age < age)
- age = leaf_age;
- len -= utf8clen(s);
- s += utf8clen(s);
- }
- return age;
-}
-EXPORT_SYMBOL(utf8nagemin);
-
-/*
- * Length of the normalization of s.
- * Return -1 if s is not valid UTF-8 unicode.
- *
- * A string of Default_Ignorable_Code_Point has length 0.
- */
-ssize_t utf8len(const struct utf8data *data, const char *s)
-{
- utf8leaf_t *leaf;
- size_t ret = 0;
- unsigned char hangul[UTF8HANGULLEAF];
-
- if (!data)
- return -1;
- while (*s) {
- leaf = utf8lookup(data, hangul, s);
- if (!leaf)
- return -1;
- if (utf8agetab[LEAF_GEN(leaf)] > data->maxage)
- ret += utf8clen(s);
- else if (LEAF_CCC(leaf) == DECOMPOSE)
- ret += strlen(LEAF_STR(leaf));
- else
- ret += utf8clen(s);
- s += utf8clen(s);
- }
- return ret;
-}
-EXPORT_SYMBOL(utf8len);
/*
* Length of the normalization of s, touch at most len bytes.
* Return -1 if s is not valid UTF-8 unicode.
*/
-ssize_t utf8nlen(const struct utf8data *data, const char *s, size_t len)
+ssize_t utf8nlen(const struct unicode_map *um, enum utf8_normalization n,
+ const char *s, size_t len)
{
utf8leaf_t *leaf;
size_t ret = 0;
unsigned char hangul[UTF8HANGULLEAF];
- if (!data)
- return -1;
while (len && *s) {
- leaf = utf8nlookup(data, hangul, s, len);
+ leaf = utf8nlookup(um, n, hangul, s, len);
if (!leaf)
return -1;
- if (utf8agetab[LEAF_GEN(leaf)] > data->maxage)
+ if (um->tables->utf8agetab[LEAF_GEN(leaf)] >
+ um->ntab[n]->maxage)
ret += utf8clen(s);
else if (LEAF_CCC(leaf) == DECOMPOSE)
ret += strlen(LEAF_STR(leaf));
@@ -568,7 +406,6 @@ ssize_t utf8nlen(const struct utf8data *data, const char *s, size_t len)
}
return ret;
}
-EXPORT_SYMBOL(utf8nlen);
/*
* Set up an utf8cursor for use by utf8byte().
@@ -580,14 +417,13 @@ EXPORT_SYMBOL(utf8nlen);
*
* Returns -1 on error, 0 on success.
*/
-int utf8ncursor(struct utf8cursor *u8c, const struct utf8data *data,
- const char *s, size_t len)
+int utf8ncursor(struct utf8cursor *u8c, const struct unicode_map *um,
+ enum utf8_normalization n, const char *s, size_t len)
{
- if (!data)
- return -1;
if (!s)
return -1;
- u8c->data = data;
+ u8c->um = um;
+ u8c->n = n;
u8c->s = s;
u8c->p = NULL;
u8c->ss = NULL;
@@ -604,23 +440,6 @@ int utf8ncursor(struct utf8cursor *u8c, const struct utf8data *data,
return -1;
return 0;
}
-EXPORT_SYMBOL(utf8ncursor);
-
-/*
- * Set up an utf8cursor for use by utf8byte().
- *
- * u8c : pointer to cursor.
- * data : const struct utf8data to use for normalization.
- * s : NUL-terminated string.
- *
- * Returns -1 on error, 0 on success.
- */
-int utf8cursor(struct utf8cursor *u8c, const struct utf8data *data,
- const char *s)
-{
- return utf8ncursor(u8c, data, s, (unsigned int)-1);
-}
-EXPORT_SYMBOL(utf8cursor);
/*
* Get one byte from the normalized form of the string described by u8c.
@@ -678,9 +497,9 @@ int utf8byte(struct utf8cursor *u8c)
/* Look up the data for the current character. */
if (u8c->p) {
- leaf = utf8lookup(u8c->data, u8c->hangul, u8c->s);
+ leaf = utf8lookup(u8c->um, u8c->n, u8c->hangul, u8c->s);
} else {
- leaf = utf8nlookup(u8c->data, u8c->hangul,
+ leaf = utf8nlookup(u8c->um, u8c->n, u8c->hangul,
u8c->s, u8c->len);
}
@@ -690,7 +509,8 @@ int utf8byte(struct utf8cursor *u8c)
ccc = LEAF_CCC(leaf);
/* Characters that are too new have CCC 0. */
- if (utf8agetab[LEAF_GEN(leaf)] > u8c->data->maxage) {
+ if (u8c->um->tables->utf8agetab[LEAF_GEN(leaf)] >
+ u8c->um->ntab[u8c->n]->maxage) {
ccc = STOPPER;
} else if (ccc == DECOMPOSE) {
u8c->len -= utf8clen(u8c->s);
@@ -704,7 +524,7 @@ int utf8byte(struct utf8cursor *u8c)
goto ccc_mismatch;
}
- leaf = utf8lookup(u8c->data, u8c->hangul, u8c->s);
+ leaf = utf8lookup(u8c->um, u8c->n, u8c->hangul, u8c->s);
if (!leaf)
return -1;
ccc = LEAF_CCC(leaf);
@@ -765,28 +585,10 @@ ccc_mismatch:
}
}
}
-EXPORT_SYMBOL(utf8byte);
-
-const struct utf8data *utf8nfdi(unsigned int maxage)
-{
- int i = ARRAY_SIZE(utf8nfdidata) - 1;
-
- while (maxage < utf8nfdidata[i].maxage)
- i--;
- if (maxage > utf8nfdidata[i].maxage)
- return NULL;
- return &utf8nfdidata[i];
-}
-EXPORT_SYMBOL(utf8nfdi);
-
-const struct utf8data *utf8nfdicf(unsigned int maxage)
-{
- int i = ARRAY_SIZE(utf8nfdicfdata) - 1;
- while (maxage < utf8nfdicfdata[i].maxage)
- i--;
- if (maxage > utf8nfdicfdata[i].maxage)
- return NULL;
- return &utf8nfdicfdata[i];
-}
-EXPORT_SYMBOL(utf8nfdicf);
+#ifdef CONFIG_UNICODE_NORMALIZATION_SELFTEST_MODULE
+EXPORT_SYMBOL_GPL(utf8version_is_supported);
+EXPORT_SYMBOL_GPL(utf8nlen);
+EXPORT_SYMBOL_GPL(utf8ncursor);
+EXPORT_SYMBOL_GPL(utf8byte);
+#endif
diff --git a/fs/unicode/utf8-selftest.c b/fs/unicode/utf8-selftest.c
index 6fe8af7edccb..eb2bbdd688d7 100644
--- a/fs/unicode/utf8-selftest.c
+++ b/fs/unicode/utf8-selftest.c
@@ -18,9 +18,7 @@ unsigned int failed_tests;
unsigned int total_tests;
/* Tests will be based on this version. */
-#define latest_maj 12
-#define latest_min 1
-#define latest_rev 0
+#define UTF8_LATEST UNICODE_AGE(12, 1, 0)
#define _test(cond, func, line, fmt, ...) do { \
total_tests++; \
@@ -160,18 +158,22 @@ static const struct {
}
};
-static void check_utf8_nfdi(void)
+static ssize_t utf8len(const struct unicode_map *um, enum utf8_normalization n,
+ const char *s)
+{
+ return utf8nlen(um, n, s, (size_t)-1);
+}
+
+static int utf8cursor(struct utf8cursor *u8c, const struct unicode_map *um,
+ enum utf8_normalization n, const char *s)
+{
+ return utf8ncursor(u8c, um, n, s, (unsigned int)-1);
+}
+
+static void check_utf8_nfdi(struct unicode_map *um)
{
int i;
struct utf8cursor u8c;
- const struct utf8data *data;
-
- data = utf8nfdi(UNICODE_AGE(latest_maj, latest_min, latest_rev));
- if (!data) {
- pr_err("%s: Unable to load utf8-%d.%d.%d. Skipping.\n",
- __func__, latest_maj, latest_min, latest_rev);
- return;
- }
for (i = 0; i < ARRAY_SIZE(nfdi_test_data); i++) {
int len = strlen(nfdi_test_data[i].str);
@@ -179,10 +181,11 @@ static void check_utf8_nfdi(void)
int j = 0;
unsigned char c;
- test((utf8len(data, nfdi_test_data[i].str) == nlen));
- test((utf8nlen(data, nfdi_test_data[i].str, len) == nlen));
+ test((utf8len(um, UTF8_NFDI, nfdi_test_data[i].str) == nlen));
+ test((utf8nlen(um, UTF8_NFDI, nfdi_test_data[i].str, len) ==
+ nlen));
- if (utf8cursor(&u8c, data, nfdi_test_data[i].str) < 0)
+ if (utf8cursor(&u8c, um, UTF8_NFDI, nfdi_test_data[i].str) < 0)
pr_err("can't create cursor\n");
while ((c = utf8byte(&u8c)) > 0) {
@@ -196,18 +199,10 @@ static void check_utf8_nfdi(void)
}
}
-static void check_utf8_nfdicf(void)
+static void check_utf8_nfdicf(struct unicode_map *um)
{
int i;
struct utf8cursor u8c;
- const struct utf8data *data;
-
- data = utf8nfdicf(UNICODE_AGE(latest_maj, latest_min, latest_rev));
- if (!data) {
- pr_err("%s: Unable to load utf8-%d.%d.%d. Skipping.\n",
- __func__, latest_maj, latest_min, latest_rev);
- return;
- }
for (i = 0; i < ARRAY_SIZE(nfdicf_test_data); i++) {
int len = strlen(nfdicf_test_data[i].str);
@@ -215,10 +210,13 @@ static void check_utf8_nfdicf(void)
int j = 0;
unsigned char c;
- test((utf8len(data, nfdicf_test_data[i].str) == nlen));
- test((utf8nlen(data, nfdicf_test_data[i].str, len) == nlen));
+ test((utf8len(um, UTF8_NFDICF, nfdicf_test_data[i].str) ==
+ nlen));
+ test((utf8nlen(um, UTF8_NFDICF, nfdicf_test_data[i].str, len) ==
+ nlen));
- if (utf8cursor(&u8c, data, nfdicf_test_data[i].str) < 0)
+ if (utf8cursor(&u8c, um, UTF8_NFDICF,
+ nfdicf_test_data[i].str) < 0)
pr_err("can't create cursor\n");
while ((c = utf8byte(&u8c)) > 0) {
@@ -232,16 +230,9 @@ static void check_utf8_nfdicf(void)
}
}
-static void check_utf8_comparisons(void)
+static void check_utf8_comparisons(struct unicode_map *table)
{
int i;
- struct unicode_map *table = utf8_load("12.1.0");
-
- if (IS_ERR(table)) {
- pr_err("%s: Unable to load utf8 %d.%d.%d. Skipping.\n",
- __func__, latest_maj, latest_min, latest_rev);
- return;
- }
for (i = 0; i < ARRAY_SIZE(nfdi_test_data); i++) {
const struct qstr s1 = {.name = nfdi_test_data[i].str,
@@ -262,42 +253,49 @@ static void check_utf8_comparisons(void)
test_f(!utf8_strncasecmp(table, &s1, &s2),
"%s %s comparison mismatch\n", s1.name, s2.name);
}
-
- utf8_unload(table);
}
-static void check_supported_versions(void)
+static void check_supported_versions(struct unicode_map *um)
{
/* Unicode 7.0.0 should be supported. */
- test(utf8version_is_supported(7, 0, 0));
+ test(utf8version_is_supported(um, UNICODE_AGE(7, 0, 0)));
/* Unicode 9.0.0 should be supported. */
- test(utf8version_is_supported(9, 0, 0));
+ test(utf8version_is_supported(um, UNICODE_AGE(9, 0, 0)));
/* Unicode 1x.0.0 (the latest version) should be supported. */
- test(utf8version_is_supported(latest_maj, latest_min, latest_rev));
+ test(utf8version_is_supported(um, UTF8_LATEST));
/* Next versions don't exist. */
- test(!utf8version_is_supported(13, 0, 0));
- test(!utf8version_is_supported(0, 0, 0));
- test(!utf8version_is_supported(-1, -1, -1));
+ test(!utf8version_is_supported(um, UNICODE_AGE(13, 0, 0)));
+ test(!utf8version_is_supported(um, UNICODE_AGE(0, 0, 0)));
+ test(!utf8version_is_supported(um, UNICODE_AGE(-1, -1, -1)));
}
static int __init init_test_ucd(void)
{
+ struct unicode_map *um;
+
failed_tests = 0;
total_tests = 0;
- check_supported_versions();
- check_utf8_nfdi();
- check_utf8_nfdicf();
- check_utf8_comparisons();
+ um = utf8_load(UTF8_LATEST);
+ if (IS_ERR(um)) {
+ pr_err("%s: Unable to load utf8 table.\n", __func__);
+ return PTR_ERR(um);
+ }
+
+ check_supported_versions(um);
+ check_utf8_nfdi(um);
+ check_utf8_nfdicf(um);
+ check_utf8_comparisons(um);
if (!failed_tests)
pr_info("All %u tests passed\n", total_tests);
else
pr_err("%u out of %u tests failed\n", failed_tests,
total_tests);
+ utf8_unload(um);
return 0;
}
diff --git a/fs/unicode/utf8data.h_shipped b/fs/unicode/utf8data.c_shipped
index 76e4f0e1b089..d9b62901aa96 100644
--- a/fs/unicode/utf8data.h_shipped
+++ b/fs/unicode/utf8data.c_shipped
@@ -1,9 +1,8 @@
/* This file is generated code, do not edit. */
-#ifndef __INCLUDED_FROM_UTF8NORM_C__
-#error Only nls_utf8-norm.c should include this file.
-#endif
-static const unsigned int utf8vers = 0xc0100;
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include "utf8n.h"
static const unsigned int utf8agetab[] = {
0,
@@ -4107,3 +4106,18 @@ static const unsigned char utf8data[64256] = {
0x52,0x04,0x00,0x00,0x11,0x04,0x00,0x00,0x02,0x00,0xcf,0x86,0xcf,0x06,0x02,0x00,
0x81,0x80,0xcf,0x86,0x85,0x84,0xcf,0x86,0xcf,0x06,0x02,0x00,0x00,0x00,0x00,0x00
};
+
+struct utf8data_table utf8_data_table = {
+ .utf8agetab = utf8agetab,
+ .utf8agetab_size = ARRAY_SIZE(utf8agetab),
+
+ .utf8nfdicfdata = utf8nfdicfdata,
+ .utf8nfdicfdata_size = ARRAY_SIZE(utf8nfdicfdata),
+
+ .utf8nfdidata = utf8nfdidata,
+ .utf8nfdidata_size = ARRAY_SIZE(utf8nfdidata),
+
+ .utf8data = utf8data,
+};
+EXPORT_SYMBOL_GPL(utf8_data_table);
+MODULE_LICENSE("GPL v2");
diff --git a/fs/unicode/utf8n.h b/fs/unicode/utf8n.h
index 0acd530c2c79..bd00d587747a 100644
--- a/fs/unicode/utf8n.h
+++ b/fs/unicode/utf8n.h
@@ -11,53 +11,9 @@
#include <linux/export.h>
#include <linux/string.h>
#include <linux/module.h>
+#include <linux/unicode.h>
-/* Encoding a unicode version number as a single unsigned int. */
-#define UNICODE_MAJ_SHIFT (16)
-#define UNICODE_MIN_SHIFT (8)
-
-#define UNICODE_AGE(MAJ, MIN, REV) \
- (((unsigned int)(MAJ) << UNICODE_MAJ_SHIFT) | \
- ((unsigned int)(MIN) << UNICODE_MIN_SHIFT) | \
- ((unsigned int)(REV)))
-
-/* Highest unicode version supported by the data tables. */
-extern int utf8version_is_supported(u8 maj, u8 min, u8 rev);
-extern int utf8version_latest(void);
-
-/*
- * Look for the correct const struct utf8data for a unicode version.
- * Returns NULL if the version requested is too new.
- *
- * Two normalization forms are supported: nfdi and nfdicf.
- *
- * nfdi:
- * - Apply unicode normalization form NFD.
- * - Remove any Default_Ignorable_Code_Point.
- *
- * nfdicf:
- * - Apply unicode normalization form NFD.
- * - Remove any Default_Ignorable_Code_Point.
- * - Apply a full casefold (C + F).
- */
-extern const struct utf8data *utf8nfdi(unsigned int maxage);
-extern const struct utf8data *utf8nfdicf(unsigned int maxage);
-
-/*
- * Determine the maximum age of any unicode character in the string.
- * Returns 0 if only unassigned code points are present.
- * Returns -1 if the input is not valid UTF-8.
- */
-extern int utf8agemax(const struct utf8data *data, const char *s);
-extern int utf8nagemax(const struct utf8data *data, const char *s, size_t len);
-
-/*
- * Determine the minimum age of any unicode character in the string.
- * Returns 0 if any unassigned code points are present.
- * Returns -1 if the input is not valid UTF-8.
- */
-extern int utf8agemin(const struct utf8data *data, const char *s);
-extern int utf8nagemin(const struct utf8data *data, const char *s, size_t len);
+int utf8version_is_supported(const struct unicode_map *um, unsigned int version);
/*
* Determine the length of the normalized from of the string,
@@ -65,8 +21,8 @@ extern int utf8nagemin(const struct utf8data *data, const char *s, size_t len);
* Returns 0 if only ignorable code points are present.
* Returns -1 if the input is not valid UTF-8.
*/
-extern ssize_t utf8len(const struct utf8data *data, const char *s);
-extern ssize_t utf8nlen(const struct utf8data *data, const char *s, size_t len);
+ssize_t utf8nlen(const struct unicode_map *um, enum utf8_normalization n,
+ const char *s, size_t len);
/* Needed in struct utf8cursor below. */
#define UTF8HANGULLEAF (12)
@@ -75,7 +31,8 @@ extern ssize_t utf8nlen(const struct utf8data *data, const char *s, size_t len);
* Cursor structure used by the normalizer.
*/
struct utf8cursor {
- const struct utf8data *data;
+ const struct unicode_map *um;
+ enum utf8_normalization n;
const char *s;
const char *p;
const char *ss;
@@ -92,10 +49,8 @@ struct utf8cursor {
* Returns 0 on success.
* Returns -1 on failure.
*/
-extern int utf8cursor(struct utf8cursor *u8c, const struct utf8data *data,
- const char *s);
-extern int utf8ncursor(struct utf8cursor *u8c, const struct utf8data *data,
- const char *s, size_t len);
+int utf8ncursor(struct utf8cursor *u8c, const struct unicode_map *um,
+ enum utf8_normalization n, const char *s, size_t len);
/*
* Get the next byte in the normalization.
@@ -105,4 +60,24 @@ extern int utf8ncursor(struct utf8cursor *u8c, const struct utf8data *data,
*/
extern int utf8byte(struct utf8cursor *u8c);
+struct utf8data {
+ unsigned int maxage;
+ unsigned int offset;
+};
+
+struct utf8data_table {
+ const unsigned int *utf8agetab;
+ int utf8agetab_size;
+
+ const struct utf8data *utf8nfdicfdata;
+ int utf8nfdicfdata_size;
+
+ const struct utf8data *utf8nfdidata;
+ int utf8nfdidata_size;
+
+ const unsigned char *utf8data;
+};
+
+extern struct utf8data_table utf8_data_table;
+
#endif /* UTF8NORM_H */
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index 22bf14ab2d16..e26b10132d47 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -15,6 +15,7 @@
#include <linux/sched/signal.h>
#include <linux/sched/mm.h>
#include <linux/mm.h>
+#include <linux/mm_inline.h>
#include <linux/mmu_notifier.h>
#include <linux/poll.h>
#include <linux/slab.h>
@@ -877,7 +878,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
new_flags, vma->anon_vma,
vma->vm_file, vma->vm_pgoff,
vma_policy(vma),
- NULL_VM_UFFD_CTX);
+ NULL_VM_UFFD_CTX, vma_anon_name(vma));
if (prev)
vma = prev;
else
@@ -1436,7 +1437,8 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
prev = vma_merge(mm, prev, start, vma_end, new_flags,
vma->anon_vma, vma->vm_file, vma->vm_pgoff,
vma_policy(vma),
- ((struct vm_userfaultfd_ctx){ ctx }));
+ ((struct vm_userfaultfd_ctx){ ctx }),
+ vma_anon_name(vma));
if (prev) {
vma = prev;
goto next;
@@ -1613,7 +1615,7 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
prev = vma_merge(mm, prev, start, vma_end, new_flags,
vma->anon_vma, vma->vm_file, vma->vm_pgoff,
vma_policy(vma),
- NULL_VM_UFFD_CTX);
+ NULL_VM_UFFD_CTX, vma_anon_name(vma));
if (prev) {
vma = prev;
goto next;
diff --git a/fs/xfs/kmem.c b/fs/xfs/kmem.c
index 6f49bf39183c..c557a030acfe 100644
--- a/fs/xfs/kmem.c
+++ b/fs/xfs/kmem.c
@@ -4,7 +4,6 @@
* All Rights Reserved.
*/
#include "xfs.h"
-#include <linux/backing-dev.h>
#include "xfs_message.h"
#include "xfs_trace.h"
@@ -26,6 +25,6 @@ kmem_alloc(size_t size, xfs_km_flags_t flags)
"%s(%u) possible memory allocation deadlock size %u in %s (mode:0x%x)",
current->comm, current->pid,
(unsigned int)size, __func__, lflags);
- congestion_wait(BLK_RW_ASYNC, HZ/50);
+ memalloc_retry_wait(lflags);
} while (1);
}
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index 4dccd4d90622..74198dd82b03 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -4551,7 +4551,7 @@ xfs_bmapi_convert_delalloc(
* the extent. Just return the real extent at this offset.
*/
if (!isnullstartblock(bma.got.br_startblock)) {
- xfs_bmbt_to_iomap(ip, iomap, &bma.got, flags);
+ xfs_bmbt_to_iomap(ip, iomap, &bma.got, 0, flags);
*seq = READ_ONCE(ifp->if_seq);
goto out_trans_cancel;
}
@@ -4598,7 +4598,7 @@ xfs_bmapi_convert_delalloc(
XFS_STATS_INC(mp, xs_xstrat_quick);
ASSERT(!isnullstartblock(bma.got.br_startblock));
- xfs_bmbt_to_iomap(ip, iomap, &bma.got, flags);
+ xfs_bmbt_to_iomap(ip, iomap, &bma.got, 0, flags);
*seq = READ_ONCE(ifp->if_seq);
if (whichfork == XFS_COW_FORK)
diff --git a/fs/xfs/libxfs/xfs_fs.h b/fs/xfs/libxfs/xfs_fs.h
index c43877c8a279..505533c43a92 100644
--- a/fs/xfs/libxfs/xfs_fs.h
+++ b/fs/xfs/libxfs/xfs_fs.h
@@ -93,21 +93,6 @@ struct getbmapx {
#define XFS_FMR_OWN_DEFECTIVE FMR_OWNER('X', 8) /* bad blocks */
/*
- * Structure for XFS_IOC_FSSETDM.
- * For use by backup and restore programs to set the XFS on-disk inode
- * fields di_dmevmask and di_dmstate. These must be set to exactly and
- * only values previously obtained via xfs_bulkstat! (Specifically the
- * struct xfs_bstat fields bs_dmevmask and bs_dmstate.)
- */
-#ifndef HAVE_FSDMIDATA
-struct fsdmidata {
- __u32 fsd_dmevmask; /* corresponds to di_dmevmask */
- __u16 fsd_padding;
- __u16 fsd_dmstate; /* corresponds to di_dmstate */
-};
-#endif
-
-/*
* File segment locking set data type for 64 bit access.
* Also used for all the RESV/FREE interfaces.
*/
@@ -562,16 +547,10 @@ typedef struct xfs_fsop_handlereq {
/*
* Compound structures for passing args through Handle Request interfaces
- * xfs_fssetdm_by_handle, xfs_attrlist_by_handle, xfs_attrmulti_by_handle
- * - ioctls: XFS_IOC_FSSETDM_BY_HANDLE, XFS_IOC_ATTRLIST_BY_HANDLE, and
- * XFS_IOC_ATTRMULTI_BY_HANDLE
+ * xfs_attrlist_by_handle, xfs_attrmulti_by_handle
+ * - ioctls: XFS_IOC_ATTRLIST_BY_HANDLE, and XFS_IOC_ATTRMULTI_BY_HANDLE
*/
-typedef struct xfs_fsop_setdm_handlereq {
- struct xfs_fsop_handlereq hreq; /* handle information */
- struct fsdmidata __user *data; /* DMAPI data */
-} xfs_fsop_setdm_handlereq_t;
-
/*
* Flags passed in xfs_attr_multiop.am_flags for the attr ioctl interface.
*
@@ -781,15 +760,15 @@ struct xfs_scrub_metadata {
* For 'documentation' purposed more than anything else,
* the "cmd #" field reflects the IRIX fcntl number.
*/
-#define XFS_IOC_ALLOCSP _IOW ('X', 10, struct xfs_flock64)
-#define XFS_IOC_FREESP _IOW ('X', 11, struct xfs_flock64)
+/* XFS_IOC_ALLOCSP ------- deprecated 10 */
+/* XFS_IOC_FREESP -------- deprecated 11 */
#define XFS_IOC_DIOINFO _IOR ('X', 30, struct dioattr)
#define XFS_IOC_FSGETXATTR FS_IOC_FSGETXATTR
#define XFS_IOC_FSSETXATTR FS_IOC_FSSETXATTR
-#define XFS_IOC_ALLOCSP64 _IOW ('X', 36, struct xfs_flock64)
-#define XFS_IOC_FREESP64 _IOW ('X', 37, struct xfs_flock64)
+/* XFS_IOC_ALLOCSP64 ----- deprecated 36 */
+/* XFS_IOC_FREESP64 ------ deprecated 37 */
#define XFS_IOC_GETBMAP _IOWR('X', 38, struct getbmap)
-#define XFS_IOC_FSSETDM _IOW ('X', 39, struct fsdmidata)
+/* XFS_IOC_FSSETDM ------- deprecated 39 */
#define XFS_IOC_RESVSP _IOW ('X', 40, struct xfs_flock64)
#define XFS_IOC_UNRESVSP _IOW ('X', 41, struct xfs_flock64)
#define XFS_IOC_RESVSP64 _IOW ('X', 42, struct xfs_flock64)
@@ -831,7 +810,7 @@ struct xfs_scrub_metadata {
#define XFS_IOC_FREEZE _IOWR('X', 119, int) /* aka FIFREEZE */
#define XFS_IOC_THAW _IOWR('X', 120, int) /* aka FITHAW */
-#define XFS_IOC_FSSETDM_BY_HANDLE _IOW ('X', 121, struct xfs_fsop_setdm_handlereq)
+/* XFS_IOC_FSSETDM_BY_HANDLE -- deprecated 121 */
#define XFS_IOC_ATTRLIST_BY_HANDLE _IOW ('X', 122, struct xfs_fsop_attrlist_handlereq)
#define XFS_IOC_ATTRMULTI_BY_HANDLE _IOW ('X', 123, struct xfs_fsop_attrmulti_handlereq)
#define XFS_IOC_FSGEOMETRY_V4 _IOR ('X', 124, struct xfs_fsop_geom_v4)
diff --git a/fs/xfs/scrub/agheader.c b/fs/xfs/scrub/agheader.c
index bed798792226..90aebfe9dc5f 100644
--- a/fs/xfs/scrub/agheader.c
+++ b/fs/xfs/scrub/agheader.c
@@ -281,7 +281,7 @@ xchk_superblock(
features_mask = cpu_to_be32(XFS_SB_VERSION2_ATTR2BIT);
if ((sb->sb_features2 & features_mask) !=
(cpu_to_be32(mp->m_sb.sb_features2) & features_mask))
- xchk_block_set_corrupt(sc, bp);
+ xchk_block_set_preen(sc, bp);
if (!xfs_has_crc(mp)) {
/* all v5 fields must be zero */
@@ -290,39 +290,38 @@ xchk_superblock(
offsetof(struct xfs_dsb, sb_features_compat)))
xchk_block_set_corrupt(sc, bp);
} else {
- /* Check compat flags; all are set at mkfs time. */
- features_mask = cpu_to_be32(XFS_SB_FEAT_COMPAT_UNKNOWN);
- if ((sb->sb_features_compat & features_mask) !=
- (cpu_to_be32(mp->m_sb.sb_features_compat) & features_mask))
+ /* compat features must match */
+ if (sb->sb_features_compat !=
+ cpu_to_be32(mp->m_sb.sb_features_compat))
xchk_block_set_corrupt(sc, bp);
- /* Check ro compat flags; all are set at mkfs time. */
- features_mask = cpu_to_be32(XFS_SB_FEAT_RO_COMPAT_UNKNOWN |
- XFS_SB_FEAT_RO_COMPAT_FINOBT |
- XFS_SB_FEAT_RO_COMPAT_RMAPBT |
- XFS_SB_FEAT_RO_COMPAT_REFLINK);
- if ((sb->sb_features_ro_compat & features_mask) !=
- (cpu_to_be32(mp->m_sb.sb_features_ro_compat) &
- features_mask))
+ /* ro compat features must match */
+ if (sb->sb_features_ro_compat !=
+ cpu_to_be32(mp->m_sb.sb_features_ro_compat))
xchk_block_set_corrupt(sc, bp);
- /* Check incompat flags; all are set at mkfs time. */
- features_mask = cpu_to_be32(XFS_SB_FEAT_INCOMPAT_UNKNOWN |
- XFS_SB_FEAT_INCOMPAT_FTYPE |
- XFS_SB_FEAT_INCOMPAT_SPINODES |
- XFS_SB_FEAT_INCOMPAT_META_UUID);
- if ((sb->sb_features_incompat & features_mask) !=
- (cpu_to_be32(mp->m_sb.sb_features_incompat) &
- features_mask))
- xchk_block_set_corrupt(sc, bp);
+ /*
+ * NEEDSREPAIR is ignored on a secondary super, so we should
+ * clear it when we find it, though it's not a corruption.
+ */
+ features_mask = cpu_to_be32(XFS_SB_FEAT_INCOMPAT_NEEDSREPAIR);
+ if ((cpu_to_be32(mp->m_sb.sb_features_incompat) ^
+ sb->sb_features_incompat) & features_mask)
+ xchk_block_set_preen(sc, bp);
- /* Check log incompat flags; all are set at mkfs time. */
- features_mask = cpu_to_be32(XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN);
- if ((sb->sb_features_log_incompat & features_mask) !=
- (cpu_to_be32(mp->m_sb.sb_features_log_incompat) &
- features_mask))
+ /* all other incompat features must match */
+ if ((cpu_to_be32(mp->m_sb.sb_features_incompat) ^
+ sb->sb_features_incompat) & ~features_mask)
xchk_block_set_corrupt(sc, bp);
+ /*
+ * log incompat features protect newer log record types from
+ * older log recovery code. Log recovery doesn't check the
+ * secondary supers, so we can clear these if needed.
+ */
+ if (sb->sb_features_log_incompat)
+ xchk_block_set_preen(sc, bp);
+
/* Don't care about sb_crc */
if (sb->sb_spino_align != cpu_to_be32(mp->m_sb.sb_spino_align))
diff --git a/fs/xfs/scrub/agheader_repair.c b/fs/xfs/scrub/agheader_repair.c
index d7bfed52f4cd..6da7f2ca77de 100644
--- a/fs/xfs/scrub/agheader_repair.c
+++ b/fs/xfs/scrub/agheader_repair.c
@@ -52,6 +52,18 @@ xrep_superblock(
xfs_buf_zero(bp, 0, BBTOB(bp->b_length));
xfs_sb_to_disk(bp->b_addr, &mp->m_sb);
+ /*
+ * Don't write out a secondary super with NEEDSREPAIR or log incompat
+ * features set, since both are ignored when set on a secondary.
+ */
+ if (xfs_has_crc(mp)) {
+ struct xfs_dsb *sb = bp->b_addr;
+
+ sb->sb_features_incompat &=
+ ~cpu_to_be32(XFS_SB_FEAT_INCOMPAT_NEEDSREPAIR);
+ sb->sb_features_log_incompat = 0;
+ }
+
/* Write this to disk. */
xfs_trans_buf_set_type(sc->tp, bp, XFS_BLFT_SB_BUF);
xfs_trans_log_buf(sc->tp, bp, 0, BBTOB(bp->b_length) - 1);
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index c8c15c3c3147..9d6a67c7d227 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -136,7 +136,20 @@ done:
memalloc_nofs_restore(nofs_flag);
}
-/* Finish all pending io completions. */
+/*
+ * Finish all pending IO completions that require transactional modifications.
+ *
+ * We try to merge physical and logically contiguous ioends before completion to
+ * minimise the number of transactions we need to perform during IO completion.
+ * Both unwritten extent conversion and COW remapping need to iterate and modify
+ * one physical extent at a time, so we gain nothing by merging physically
+ * discontiguous extents here.
+ *
+ * The ioend chain length that we can be processing here is largely unbound in
+ * length and we may have to perform significant amounts of work on each ioend
+ * to complete it. Hence we have to be careful about holding the CPU for too
+ * long in this loop.
+ */
void
xfs_end_io(
struct work_struct *work)
@@ -157,6 +170,7 @@ xfs_end_io(
list_del_init(&ioend->io_list);
iomap_ioend_try_merge(ioend, &tmp);
xfs_end_ioend(ioend);
+ cond_resched();
}
}
@@ -359,7 +373,7 @@ retry:
isnullstartblock(imap.br_startblock))
goto allocate_blocks;
- xfs_bmbt_to_iomap(ip, &wpc->iomap, &imap, 0);
+ xfs_bmbt_to_iomap(ip, &wpc->iomap, &imap, 0, 0);
trace_xfs_map_blocks_found(ip, offset, count, whichfork, &imap);
return 0;
allocate_blocks:
@@ -437,37 +451,37 @@ xfs_prepare_ioend(
* see a ENOSPC in writeback).
*/
static void
-xfs_discard_page(
- struct page *page,
- loff_t fileoff)
+xfs_discard_folio(
+ struct folio *folio,
+ loff_t pos)
{
- struct inode *inode = page->mapping->host;
+ struct inode *inode = folio->mapping->host;
struct xfs_inode *ip = XFS_I(inode);
struct xfs_mount *mp = ip->i_mount;
- unsigned int pageoff = offset_in_page(fileoff);
- xfs_fileoff_t start_fsb = XFS_B_TO_FSBT(mp, fileoff);
- xfs_fileoff_t pageoff_fsb = XFS_B_TO_FSBT(mp, pageoff);
+ size_t offset = offset_in_folio(folio, pos);
+ xfs_fileoff_t start_fsb = XFS_B_TO_FSBT(mp, pos);
+ xfs_fileoff_t pageoff_fsb = XFS_B_TO_FSBT(mp, offset);
int error;
if (xfs_is_shutdown(mp))
goto out_invalidate;
xfs_alert_ratelimited(mp,
- "page discard on page "PTR_FMT", inode 0x%llx, offset %llu.",
- page, ip->i_ino, fileoff);
+ "page discard on page "PTR_FMT", inode 0x%llx, pos %llu.",
+ folio, ip->i_ino, pos);
error = xfs_bmap_punch_delalloc_range(ip, start_fsb,
- i_blocks_per_page(inode, page) - pageoff_fsb);
+ i_blocks_per_folio(inode, folio) - pageoff_fsb);
if (error && !xfs_is_shutdown(mp))
xfs_alert(mp, "page discard unable to remove delalloc mapping.");
out_invalidate:
- iomap_invalidatepage(page, pageoff, PAGE_SIZE - pageoff);
+ iomap_invalidate_folio(folio, offset, folio_size(folio) - offset);
}
static const struct iomap_writeback_ops xfs_writeback_ops = {
.map_blocks = xfs_map_blocks,
.prepare_ioend = xfs_prepare_ioend,
- .discard_page = xfs_discard_page,
+ .discard_folio = xfs_discard_folio,
};
STATIC int
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index 73a36b7be3bd..eb2e387ba528 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -771,8 +771,7 @@ int
xfs_alloc_file_space(
struct xfs_inode *ip,
xfs_off_t offset,
- xfs_off_t len,
- int alloc_type)
+ xfs_off_t len)
{
xfs_mount_t *mp = ip->i_mount;
xfs_off_t count;
@@ -851,9 +850,6 @@ xfs_alloc_file_space(
rblocks = 0;
}
- /*
- * Allocate and setup the transaction.
- */
error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_write,
dblocks, rblocks, false, &tp);
if (error)
@@ -865,14 +861,14 @@ xfs_alloc_file_space(
goto error;
error = xfs_bmapi_write(tp, ip, startoffset_fsb,
- allocatesize_fsb, alloc_type, 0, imapp,
- &nimaps);
+ allocatesize_fsb, XFS_BMAPI_PREALLOC, 0, imapp,
+ &nimaps);
if (error)
goto error;
- /*
- * Complete the transaction
- */
+ ip->i_diflags |= XFS_DIFLAG_PREALLOC;
+ xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+
error = xfs_trans_commit(tp);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
if (error)
@@ -1001,7 +997,7 @@ xfs_free_file_space(
/*
* Now that we've unmap all full blocks we'll have to zero out any
- * partial block at the beginning and/or end. iomap_zero_range is smart
+ * partial block at the beginning and/or end. xfs_zero_range is smart
* enough to skip any holes, including those we just created, but we
* must take care not to zero beyond EOF and enlarge i_size.
*/
@@ -1009,15 +1005,14 @@ xfs_free_file_space(
return 0;
if (offset + len > XFS_ISIZE(ip))
len = XFS_ISIZE(ip) - offset;
- error = iomap_zero_range(VFS_I(ip), offset, len, NULL,
- &xfs_buffered_write_iomap_ops);
+ error = xfs_zero_range(ip, offset, len, NULL);
if (error)
return error;
/*
* If we zeroed right up to EOF and EOF straddles a page boundary we
* must make sure that the post-EOF area is also zeroed because the
- * page could be mmap'd and iomap_zero_range doesn't do that for us.
+ * page could be mmap'd and xfs_zero_range doesn't do that for us.
* Writeback of the eof page will do this, albeit clumsily.
*/
if (offset + len >= XFS_ISIZE(ip) && offset_in_page(offset + len) > 0) {
diff --git a/fs/xfs/xfs_bmap_util.h b/fs/xfs/xfs_bmap_util.h
index 9f993168b55b..24b37d211f1d 100644
--- a/fs/xfs/xfs_bmap_util.h
+++ b/fs/xfs/xfs_bmap_util.h
@@ -54,7 +54,7 @@ int xfs_bmap_last_extent(struct xfs_trans *tp, struct xfs_inode *ip,
/* preallocation and hole punch interface */
int xfs_alloc_file_space(struct xfs_inode *ip, xfs_off_t offset,
- xfs_off_t len, int alloc_type);
+ xfs_off_t len);
int xfs_free_file_space(struct xfs_inode *ip, xfs_off_t offset,
xfs_off_t len);
int xfs_collapse_file_space(struct xfs_inode *, xfs_off_t offset,
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index 631c5a61d89b..b45e0d50a405 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -394,7 +394,7 @@ xfs_buf_alloc_pages(
}
XFS_STATS_INC(bp->b_mount, xb_page_retries);
- congestion_wait(BLK_RW_ASYNC, HZ / 50);
+ memalloc_retry_wait(gfp_mask);
}
return 0;
}
@@ -1892,6 +1892,7 @@ xfs_free_buftarg(
list_lru_destroy(&btp->bt_lru);
blkdev_issue_flush(btp->bt_bdev);
+ fs_put_dax(btp->bt_daxdev);
kmem_free(btp);
}
@@ -1932,11 +1933,10 @@ xfs_setsize_buftarg_early(
return xfs_setsize_buftarg(btp, bdev_logical_block_size(bdev));
}
-xfs_buftarg_t *
+struct xfs_buftarg *
xfs_alloc_buftarg(
struct xfs_mount *mp,
- struct block_device *bdev,
- struct dax_device *dax_dev)
+ struct block_device *bdev)
{
xfs_buftarg_t *btp;
@@ -1945,7 +1945,7 @@ xfs_alloc_buftarg(
btp->bt_mount = mp;
btp->bt_dev = bdev->bd_dev;
btp->bt_bdev = bdev;
- btp->bt_daxdev = dax_dev;
+ btp->bt_daxdev = fs_dax_get_by_bdev(bdev, &btp->bt_dax_part_off);
/*
* Buffer IO error rate limiting. Limit it to no more than 10 messages
diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h
index 6b0200b8007d..edcb6254fa6a 100644
--- a/fs/xfs/xfs_buf.h
+++ b/fs/xfs/xfs_buf.h
@@ -89,6 +89,7 @@ typedef struct xfs_buftarg {
dev_t bt_dev;
struct block_device *bt_bdev;
struct dax_device *bt_daxdev;
+ u64 bt_dax_part_off;
struct xfs_mount *bt_mount;
unsigned int bt_meta_sectorsize;
size_t bt_meta_sectormask;
@@ -338,8 +339,8 @@ xfs_buf_update_cksum(struct xfs_buf *bp, unsigned long cksum_offset)
/*
* Handling of buftargs.
*/
-extern struct xfs_buftarg *xfs_alloc_buftarg(struct xfs_mount *,
- struct block_device *, struct dax_device *);
+struct xfs_buftarg *xfs_alloc_buftarg(struct xfs_mount *mp,
+ struct block_device *bdev);
extern void xfs_free_buftarg(struct xfs_buftarg *);
extern void xfs_buftarg_wait(struct xfs_buftarg *);
extern void xfs_buftarg_drain(struct xfs_buftarg *);
diff --git a/fs/xfs/xfs_dir2_readdir.c b/fs/xfs/xfs_dir2_readdir.c
index 8310005af00f..a7174a5b3203 100644
--- a/fs/xfs/xfs_dir2_readdir.c
+++ b/fs/xfs/xfs_dir2_readdir.c
@@ -138,7 +138,8 @@ xfs_dir2_sf_getdents(
STATIC int
xfs_dir2_block_getdents(
struct xfs_da_args *args,
- struct dir_context *ctx)
+ struct dir_context *ctx,
+ unsigned int *lock_mode)
{
struct xfs_inode *dp = args->dp; /* incore directory inode */
struct xfs_buf *bp; /* buffer for block */
@@ -146,7 +147,6 @@ xfs_dir2_block_getdents(
int wantoff; /* starting block offset */
xfs_off_t cook;
struct xfs_da_geometry *geo = args->geo;
- int lock_mode;
unsigned int offset, next_offset;
unsigned int end;
@@ -156,12 +156,13 @@ xfs_dir2_block_getdents(
if (xfs_dir2_dataptr_to_db(geo, ctx->pos) > geo->datablk)
return 0;
- lock_mode = xfs_ilock_data_map_shared(dp);
error = xfs_dir3_block_read(args->trans, dp, &bp);
- xfs_iunlock(dp, lock_mode);
if (error)
return error;
+ xfs_iunlock(dp, *lock_mode);
+ *lock_mode = 0;
+
/*
* Extract the byte offset we start at from the seek pointer.
* We'll skip entries before this.
@@ -344,7 +345,8 @@ STATIC int
xfs_dir2_leaf_getdents(
struct xfs_da_args *args,
struct dir_context *ctx,
- size_t bufsize)
+ size_t bufsize,
+ unsigned int *lock_mode)
{
struct xfs_inode *dp = args->dp;
struct xfs_mount *mp = dp->i_mount;
@@ -356,7 +358,6 @@ xfs_dir2_leaf_getdents(
xfs_dir2_off_t curoff; /* current overall offset */
int length; /* temporary length value */
int byteoff; /* offset in current block */
- int lock_mode;
unsigned int offset = 0;
int error = 0; /* error return value */
@@ -390,13 +391,16 @@ xfs_dir2_leaf_getdents(
bp = NULL;
}
- lock_mode = xfs_ilock_data_map_shared(dp);
+ if (*lock_mode == 0)
+ *lock_mode = xfs_ilock_data_map_shared(dp);
error = xfs_dir2_leaf_readbuf(args, bufsize, &curoff,
&rablk, &bp);
- xfs_iunlock(dp, lock_mode);
if (error || !bp)
break;
+ xfs_iunlock(dp, *lock_mode);
+ *lock_mode = 0;
+
xfs_dir3_data_check(dp, bp);
/*
* Find our position in the block.
@@ -496,7 +500,7 @@ xfs_dir2_leaf_getdents(
*
* If supplied, the transaction collects locked dir buffers to avoid
* nested buffer deadlocks. This function does not dirty the
- * transaction. The caller should ensure that the inode is locked
+ * transaction. The caller must hold the IOLOCK (shared or exclusive)
* before calling this function.
*/
int
@@ -507,8 +511,9 @@ xfs_readdir(
size_t bufsize)
{
struct xfs_da_args args = { NULL };
- int rval;
- int v;
+ unsigned int lock_mode;
+ int isblock;
+ int error;
trace_xfs_readdir(dp);
@@ -516,6 +521,7 @@ xfs_readdir(
return -EIO;
ASSERT(S_ISDIR(VFS_I(dp)->i_mode));
+ ASSERT(xfs_isilocked(dp, XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
XFS_STATS_INC(dp->i_mount, xs_dir_getdents);
args.dp = dp;
@@ -523,13 +529,22 @@ xfs_readdir(
args.trans = tp;
if (dp->i_df.if_format == XFS_DINODE_FMT_LOCAL)
- rval = xfs_dir2_sf_getdents(&args, ctx);
- else if ((rval = xfs_dir2_isblock(&args, &v)))
- ;
- else if (v)
- rval = xfs_dir2_block_getdents(&args, ctx);
- else
- rval = xfs_dir2_leaf_getdents(&args, ctx, bufsize);
+ return xfs_dir2_sf_getdents(&args, ctx);
- return rval;
+ lock_mode = xfs_ilock_data_map_shared(dp);
+ error = xfs_dir2_isblock(&args, &isblock);
+ if (error)
+ goto out_unlock;
+
+ if (isblock) {
+ error = xfs_dir2_block_getdents(&args, ctx, &lock_mode);
+ goto out_unlock;
+ }
+
+ error = xfs_dir2_leaf_getdents(&args, ctx, bufsize, &lock_mode);
+
+out_unlock:
+ if (lock_mode)
+ xfs_iunlock(dp, lock_mode);
+ return error;
}
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 27594738b0d1..5bddb1e9e0b3 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -66,40 +66,6 @@ xfs_is_falloc_aligned(
return !((pos | len) & mask);
}
-int
-xfs_update_prealloc_flags(
- struct xfs_inode *ip,
- enum xfs_prealloc_flags flags)
-{
- struct xfs_trans *tp;
- int error;
-
- error = xfs_trans_alloc(ip->i_mount, &M_RES(ip->i_mount)->tr_writeid,
- 0, 0, 0, &tp);
- if (error)
- return error;
-
- xfs_ilock(ip, XFS_ILOCK_EXCL);
- xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
-
- if (!(flags & XFS_PREALLOC_INVISIBLE)) {
- VFS_I(ip)->i_mode &= ~S_ISUID;
- if (VFS_I(ip)->i_mode & S_IXGRP)
- VFS_I(ip)->i_mode &= ~S_ISGID;
- xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
- }
-
- if (flags & XFS_PREALLOC_SET)
- ip->i_diflags |= XFS_DIFLAG_PREALLOC;
- if (flags & XFS_PREALLOC_CLEAR)
- ip->i_diflags &= ~XFS_DIFLAG_PREALLOC;
-
- xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
- if (flags & XFS_PREALLOC_SYNC)
- xfs_trans_set_sync(tp);
- return xfs_trans_commit(tp);
-}
-
/*
* Fsync operations on directories are much simpler than on regular files,
* as there is no file data to flush, and thus also no need for explicit
@@ -437,8 +403,7 @@ restart:
}
trace_xfs_zero_eof(ip, isize, iocb->ki_pos - isize);
- error = iomap_zero_range(inode, isize, iocb->ki_pos - isize,
- NULL, &xfs_buffered_write_iomap_ops);
+ error = xfs_zero_range(ip, isize, iocb->ki_pos - isize, NULL);
if (error)
return error;
} else
@@ -896,6 +861,21 @@ xfs_break_layouts(
return error;
}
+/* Does this file, inode, or mount want synchronous writes? */
+static inline bool xfs_file_sync_writes(struct file *filp)
+{
+ struct xfs_inode *ip = XFS_I(file_inode(filp));
+
+ if (xfs_has_wsync(ip->i_mount))
+ return true;
+ if (filp->f_flags & (__O_SYNC | O_DSYNC))
+ return true;
+ if (IS_SYNC(file_inode(filp)))
+ return true;
+
+ return false;
+}
+
#define XFS_FALLOC_FL_SUPPORTED \
(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | \
FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE | \
@@ -911,7 +891,6 @@ xfs_file_fallocate(
struct inode *inode = file_inode(file);
struct xfs_inode *ip = XFS_I(inode);
long error;
- enum xfs_prealloc_flags flags = 0;
uint iolock = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL;
loff_t new_size = 0;
bool do_file_insert = false;
@@ -956,6 +935,10 @@ xfs_file_fallocate(
goto out_unlock;
}
+ error = file_modified(file);
+ if (error)
+ goto out_unlock;
+
if (mode & FALLOC_FL_PUNCH_HOLE) {
error = xfs_free_file_space(ip, offset, len);
if (error)
@@ -1005,8 +988,6 @@ xfs_file_fallocate(
}
do_file_insert = true;
} else {
- flags |= XFS_PREALLOC_SET;
-
if (!(mode & FALLOC_FL_KEEP_SIZE) &&
offset + len > i_size_read(inode)) {
new_size = offset + len;
@@ -1052,20 +1033,12 @@ xfs_file_fallocate(
}
if (!xfs_is_always_cow_inode(ip)) {
- error = xfs_alloc_file_space(ip, offset, len,
- XFS_BMAPI_PREALLOC);
+ error = xfs_alloc_file_space(ip, offset, len);
if (error)
goto out_unlock;
}
}
- if (file->f_flags & O_DSYNC)
- flags |= XFS_PREALLOC_SYNC;
-
- error = xfs_update_prealloc_flags(ip, flags);
- if (error)
- goto out_unlock;
-
/* Change file size if needed */
if (new_size) {
struct iattr iattr;
@@ -1084,8 +1057,14 @@ xfs_file_fallocate(
* leave shifted extents past EOF and hence losing access to
* the data that is contained within them.
*/
- if (do_file_insert)
+ if (do_file_insert) {
error = xfs_insert_file_space(ip, offset, len);
+ if (error)
+ goto out_unlock;
+ }
+
+ if (xfs_file_sync_writes(file))
+ error = xfs_log_force_inode(ip);
out_unlock:
xfs_iunlock(ip, iolock);
@@ -1117,21 +1096,6 @@ xfs_file_fadvise(
return ret;
}
-/* Does this file, inode, or mount want synchronous writes? */
-static inline bool xfs_file_sync_writes(struct file *filp)
-{
- struct xfs_inode *ip = XFS_I(file_inode(filp));
-
- if (xfs_has_wsync(ip->i_mount))
- return true;
- if (filp->f_flags & (__O_SYNC | O_DSYNC))
- return true;
- if (IS_SYNC(file_inode(filp)))
- return true;
-
- return false;
-}
-
STATIC loff_t
xfs_file_remap_range(
struct file *file_in,
diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
index d019c98eb839..9644f938990c 100644
--- a/fs/xfs/xfs_icache.c
+++ b/fs/xfs/xfs_icache.c
@@ -87,6 +87,7 @@ xfs_inode_alloc(
/* VFS doesn't initialise i_mode or i_state! */
VFS_I(ip)->i_mode = 0;
VFS_I(ip)->i_state = 0;
+ mapping_set_large_folios(VFS_I(ip)->i_mapping);
XFS_STATS_INC(mp, vn_active);
ASSERT(atomic_read(&ip->i_pincount) == 0);
@@ -320,6 +321,7 @@ xfs_reinit_inode(
inode->i_rdev = dev;
inode->i_uid = uid;
inode->i_gid = gid;
+ mapping_set_large_folios(inode->i_mapping);
return error;
}
@@ -1852,28 +1854,20 @@ xfs_inodegc_worker(
}
/*
- * Force all currently queued inode inactivation work to run immediately, and
- * wait for the work to finish. Two pass - queue all the work first pass, wait
- * for it in a second pass.
+ * Force all currently queued inode inactivation work to run immediately and
+ * wait for the work to finish.
*/
void
xfs_inodegc_flush(
struct xfs_mount *mp)
{
- struct xfs_inodegc *gc;
- int cpu;
-
if (!xfs_is_inodegc_enabled(mp))
return;
trace_xfs_inodegc_flush(mp, __return_address);
xfs_inodegc_queue_all(mp);
-
- for_each_online_cpu(cpu) {
- gc = per_cpu_ptr(mp->m_inodegc, cpu);
- flush_work(&gc->work);
- }
+ flush_workqueue(mp->m_inodegc_wq);
}
/*
@@ -1884,18 +1878,12 @@ void
xfs_inodegc_stop(
struct xfs_mount *mp)
{
- struct xfs_inodegc *gc;
- int cpu;
-
if (!xfs_clear_inodegc_enabled(mp))
return;
xfs_inodegc_queue_all(mp);
+ drain_workqueue(mp->m_inodegc_wq);
- for_each_online_cpu(cpu) {
- gc = per_cpu_ptr(mp->m_inodegc, cpu);
- cancel_work_sync(&gc->work);
- }
trace_xfs_inodegc_stop(mp, __return_address);
}
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index c447bf04205a..b7e8f14d9fca 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -462,15 +462,6 @@ xfs_itruncate_extents(
}
/* from xfs_file.c */
-enum xfs_prealloc_flags {
- XFS_PREALLOC_SET = (1 << 1),
- XFS_PREALLOC_CLEAR = (1 << 2),
- XFS_PREALLOC_SYNC = (1 << 3),
- XFS_PREALLOC_INVISIBLE = (1 << 4),
-};
-
-int xfs_update_prealloc_flags(struct xfs_inode *ip,
- enum xfs_prealloc_flags flags);
int xfs_break_layouts(struct inode *inode, uint *iolock,
enum layout_break_reason reason);
diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
index 8ea47a9d5aad..2515fe8299e1 100644
--- a/fs/xfs/xfs_ioctl.c
+++ b/fs/xfs/xfs_ioctl.c
@@ -627,87 +627,6 @@ xfs_attrmulti_by_handle(
return error;
}
-int
-xfs_ioc_space(
- struct file *filp,
- xfs_flock64_t *bf)
-{
- struct inode *inode = file_inode(filp);
- struct xfs_inode *ip = XFS_I(inode);
- struct iattr iattr;
- enum xfs_prealloc_flags flags = XFS_PREALLOC_CLEAR;
- uint iolock = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL;
- int error;
-
- if (inode->i_flags & (S_IMMUTABLE|S_APPEND))
- return -EPERM;
-
- if (!(filp->f_mode & FMODE_WRITE))
- return -EBADF;
-
- if (!S_ISREG(inode->i_mode))
- return -EINVAL;
-
- if (xfs_is_always_cow_inode(ip))
- return -EOPNOTSUPP;
-
- if (filp->f_flags & O_DSYNC)
- flags |= XFS_PREALLOC_SYNC;
- if (filp->f_mode & FMODE_NOCMTIME)
- flags |= XFS_PREALLOC_INVISIBLE;
-
- error = mnt_want_write_file(filp);
- if (error)
- return error;
-
- xfs_ilock(ip, iolock);
- error = xfs_break_layouts(inode, &iolock, BREAK_UNMAP);
- if (error)
- goto out_unlock;
- inode_dio_wait(inode);
-
- switch (bf->l_whence) {
- case 0: /*SEEK_SET*/
- break;
- case 1: /*SEEK_CUR*/
- bf->l_start += filp->f_pos;
- break;
- case 2: /*SEEK_END*/
- bf->l_start += XFS_ISIZE(ip);
- break;
- default:
- error = -EINVAL;
- goto out_unlock;
- }
-
- if (bf->l_start < 0 || bf->l_start > inode->i_sb->s_maxbytes) {
- error = -EINVAL;
- goto out_unlock;
- }
-
- if (bf->l_start > XFS_ISIZE(ip)) {
- error = xfs_alloc_file_space(ip, XFS_ISIZE(ip),
- bf->l_start - XFS_ISIZE(ip),
- XFS_BMAPI_PREALLOC);
- if (error)
- goto out_unlock;
- }
-
- iattr.ia_valid = ATTR_SIZE;
- iattr.ia_size = bf->l_start;
- error = xfs_vn_setattr_size(file_mnt_user_ns(filp), file_dentry(filp),
- &iattr);
- if (error)
- goto out_unlock;
-
- error = xfs_update_prealloc_flags(ip, flags);
-
-out_unlock:
- xfs_iunlock(ip, iolock);
- mnt_drop_write_file(filp);
- return error;
-}
-
/* Return 0 on success or positive error */
int
xfs_fsbulkstat_one_fmt(
@@ -1545,7 +1464,7 @@ xfs_ioc_getbmap(
if (bmx.bmv_count < 2)
return -EINVAL;
- if (bmx.bmv_count > ULONG_MAX / recsize)
+ if (bmx.bmv_count >= INT_MAX / recsize)
return -ENOMEM;
buf = kvcalloc(bmx.bmv_count, sizeof(*buf), GFP_KERNEL);
@@ -1936,6 +1855,15 @@ xfs_fs_eofblocks_from_user(
}
/*
+ * These long-unused ioctls were removed from the official ioctl API in 5.17,
+ * but retain these definitions so that we can log warnings about them.
+ */
+#define XFS_IOC_ALLOCSP _IOW ('X', 10, struct xfs_flock64)
+#define XFS_IOC_FREESP _IOW ('X', 11, struct xfs_flock64)
+#define XFS_IOC_ALLOCSP64 _IOW ('X', 36, struct xfs_flock64)
+#define XFS_IOC_FREESP64 _IOW ('X', 37, struct xfs_flock64)
+
+/*
* Note: some of the ioctl's return positive numbers as a
* byte count indicating success, such as readlink_by_handle.
* So we don't "sign flip" like most other routines. This means
@@ -1965,13 +1893,11 @@ xfs_file_ioctl(
case XFS_IOC_ALLOCSP:
case XFS_IOC_FREESP:
case XFS_IOC_ALLOCSP64:
- case XFS_IOC_FREESP64: {
- xfs_flock64_t bf;
-
- if (copy_from_user(&bf, arg, sizeof(bf)))
- return -EFAULT;
- return xfs_ioc_space(filp, &bf);
- }
+ case XFS_IOC_FREESP64:
+ xfs_warn_once(mp,
+ "%s should use fallocate; XFS_IOC_{ALLOC,FREE}SP ioctl unsupported",
+ current->comm);
+ return -ENOTTY;
case XFS_IOC_DIOINFO: {
struct xfs_buftarg *target = xfs_inode_buftarg(ip);
struct dioattr da;
diff --git a/fs/xfs/xfs_ioctl.h b/fs/xfs/xfs_ioctl.h
index 845d3bcab74b..d4abba2c13c1 100644
--- a/fs/xfs/xfs_ioctl.h
+++ b/fs/xfs/xfs_ioctl.h
@@ -10,12 +10,6 @@ struct xfs_bstat;
struct xfs_ibulk;
struct xfs_inogrp;
-
-extern int
-xfs_ioc_space(
- struct file *filp,
- xfs_flock64_t *bf);
-
int
xfs_ioc_swapext(
xfs_swapext_t *sxp);
diff --git a/fs/xfs/xfs_ioctl32.c b/fs/xfs/xfs_ioctl32.c
index 8783af203cfc..004ed2a251e8 100644
--- a/fs/xfs/xfs_ioctl32.c
+++ b/fs/xfs/xfs_ioctl32.c
@@ -28,22 +28,6 @@
#ifdef BROKEN_X86_ALIGNMENT
STATIC int
-xfs_compat_flock64_copyin(
- xfs_flock64_t *bf,
- compat_xfs_flock64_t __user *arg32)
-{
- if (get_user(bf->l_type, &arg32->l_type) ||
- get_user(bf->l_whence, &arg32->l_whence) ||
- get_user(bf->l_start, &arg32->l_start) ||
- get_user(bf->l_len, &arg32->l_len) ||
- get_user(bf->l_sysid, &arg32->l_sysid) ||
- get_user(bf->l_pid, &arg32->l_pid) ||
- copy_from_user(bf->l_pad, &arg32->l_pad, 4*sizeof(u32)))
- return -EFAULT;
- return 0;
-}
-
-STATIC int
xfs_compat_ioc_fsgeometry_v1(
struct xfs_mount *mp,
compat_xfs_fsop_geom_v1_t __user *arg32)
@@ -445,17 +429,6 @@ xfs_file_compat_ioctl(
switch (cmd) {
#if defined(BROKEN_X86_ALIGNMENT)
- case XFS_IOC_ALLOCSP_32:
- case XFS_IOC_FREESP_32:
- case XFS_IOC_ALLOCSP64_32:
- case XFS_IOC_FREESP64_32: {
- struct xfs_flock64 bf;
-
- if (xfs_compat_flock64_copyin(&bf, arg))
- return -EFAULT;
- cmd = _NATIVE_IOC(cmd, struct xfs_flock64);
- return xfs_ioc_space(filp, &bf);
- }
case XFS_IOC_FSGEOMETRY_V1_32:
return xfs_compat_ioc_fsgeometry_v1(ip->i_mount, arg);
case XFS_IOC_FSGROWFSDATA_32: {
diff --git a/fs/xfs/xfs_ioctl32.h b/fs/xfs/xfs_ioctl32.h
index 9929482bf358..c14852362fce 100644
--- a/fs/xfs/xfs_ioctl32.h
+++ b/fs/xfs/xfs_ioctl32.h
@@ -142,28 +142,6 @@ typedef struct compat_xfs_fsop_attrmulti_handlereq {
_IOW('X', 123, struct compat_xfs_fsop_attrmulti_handlereq)
#ifdef BROKEN_X86_ALIGNMENT
-/* on ia32 l_start is on a 32-bit boundary */
-typedef struct compat_xfs_flock64 {
- __s16 l_type;
- __s16 l_whence;
- __s64 l_start __attribute__((packed));
- /* len == 0 means until end of file */
- __s64 l_len __attribute__((packed));
- __s32 l_sysid;
- __u32 l_pid;
- __s32 l_pad[4]; /* reserve area */
-} compat_xfs_flock64_t;
-
-#define XFS_IOC_ALLOCSP_32 _IOW('X', 10, struct compat_xfs_flock64)
-#define XFS_IOC_FREESP_32 _IOW('X', 11, struct compat_xfs_flock64)
-#define XFS_IOC_ALLOCSP64_32 _IOW('X', 36, struct compat_xfs_flock64)
-#define XFS_IOC_FREESP64_32 _IOW('X', 37, struct compat_xfs_flock64)
-#define XFS_IOC_RESVSP_32 _IOW('X', 40, struct compat_xfs_flock64)
-#define XFS_IOC_UNRESVSP_32 _IOW('X', 41, struct compat_xfs_flock64)
-#define XFS_IOC_RESVSP64_32 _IOW('X', 42, struct compat_xfs_flock64)
-#define XFS_IOC_UNRESVSP64_32 _IOW('X', 43, struct compat_xfs_flock64)
-#define XFS_IOC_ZERO_RANGE_32 _IOW('X', 57, struct compat_xfs_flock64)
-
typedef struct compat_xfs_fsop_geom_v1 {
__u32 blocksize; /* filesystem (data) block size */
__u32 rtextsize; /* realtime extent size */
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index 093758440ad5..e552ce541ec2 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -28,7 +28,6 @@
#include "xfs_dquot.h"
#include "xfs_reflink.h"
-
#define XFS_ALLOC_ALIGN(mp, off) \
(((off) >> mp->m_allocsize_log) << mp->m_allocsize_log)
@@ -54,7 +53,8 @@ xfs_bmbt_to_iomap(
struct xfs_inode *ip,
struct iomap *iomap,
struct xfs_bmbt_irec *imap,
- u16 flags)
+ unsigned int mapping_flags,
+ u16 iomap_flags)
{
struct xfs_mount *mp = ip->i_mount;
struct xfs_buftarg *target = xfs_inode_buftarg(ip);
@@ -71,16 +71,22 @@ xfs_bmbt_to_iomap(
iomap->type = IOMAP_DELALLOC;
} else {
iomap->addr = BBTOB(xfs_fsb_to_db(ip, imap->br_startblock));
+ if (mapping_flags & IOMAP_DAX)
+ iomap->addr += target->bt_dax_part_off;
+
if (imap->br_state == XFS_EXT_UNWRITTEN)
iomap->type = IOMAP_UNWRITTEN;
else
iomap->type = IOMAP_MAPPED;
+
}
iomap->offset = XFS_FSB_TO_B(mp, imap->br_startoff);
iomap->length = XFS_FSB_TO_B(mp, imap->br_blockcount);
- iomap->bdev = target->bt_bdev;
- iomap->dax_dev = target->bt_daxdev;
- iomap->flags = flags;
+ if (mapping_flags & IOMAP_DAX)
+ iomap->dax_dev = target->bt_daxdev;
+ else
+ iomap->bdev = target->bt_bdev;
+ iomap->flags = iomap_flags;
if (xfs_ipincount(ip) &&
(ip->i_itemp->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP))
@@ -188,6 +194,7 @@ xfs_iomap_write_direct(
struct xfs_inode *ip,
xfs_fileoff_t offset_fsb,
xfs_fileoff_t count_fsb,
+ unsigned int flags,
struct xfs_bmbt_irec *imap)
{
struct xfs_mount *mp = ip->i_mount;
@@ -229,7 +236,7 @@ xfs_iomap_write_direct(
* the reserve block pool for bmbt block allocation if there is no space
* left but we need to do unwritten extent conversion.
*/
- if (IS_DAX(VFS_I(ip))) {
+ if (flags & IOMAP_DAX) {
bmapi_flags = XFS_BMAPI_CONVERT | XFS_BMAPI_ZERO;
if (imap->br_state == XFS_EXT_UNWRITTEN) {
force = true;
@@ -620,7 +627,7 @@ imap_needs_alloc(
imap->br_startblock == DELAYSTARTBLOCK)
return true;
/* we convert unwritten extents before copying the data for DAX */
- if (IS_DAX(inode) && imap->br_state == XFS_EXT_UNWRITTEN)
+ if ((flags & IOMAP_DAX) && imap->br_state == XFS_EXT_UNWRITTEN)
return true;
return false;
}
@@ -800,7 +807,7 @@ xfs_direct_write_iomap_begin(
xfs_iunlock(ip, lockmode);
trace_xfs_iomap_found(ip, offset, length, XFS_DATA_FORK, &imap);
- return xfs_bmbt_to_iomap(ip, iomap, &imap, iomap_flags);
+ return xfs_bmbt_to_iomap(ip, iomap, &imap, flags, iomap_flags);
allocate_blocks:
error = -EAGAIN;
@@ -826,23 +833,24 @@ allocate_blocks:
xfs_iunlock(ip, lockmode);
error = xfs_iomap_write_direct(ip, offset_fsb, end_fsb - offset_fsb,
- &imap);
+ flags, &imap);
if (error)
return error;
trace_xfs_iomap_alloc(ip, offset, length, XFS_DATA_FORK, &imap);
- return xfs_bmbt_to_iomap(ip, iomap, &imap, iomap_flags | IOMAP_F_NEW);
+ return xfs_bmbt_to_iomap(ip, iomap, &imap, flags,
+ iomap_flags | IOMAP_F_NEW);
out_found_cow:
xfs_iunlock(ip, lockmode);
length = XFS_FSB_TO_B(mp, cmap.br_startoff + cmap.br_blockcount);
trace_xfs_iomap_found(ip, offset, length - offset, XFS_COW_FORK, &cmap);
if (imap.br_startblock != HOLESTARTBLOCK) {
- error = xfs_bmbt_to_iomap(ip, srcmap, &imap, 0);
+ error = xfs_bmbt_to_iomap(ip, srcmap, &imap, flags, 0);
if (error)
return error;
}
- return xfs_bmbt_to_iomap(ip, iomap, &cmap, IOMAP_F_SHARED);
+ return xfs_bmbt_to_iomap(ip, iomap, &cmap, flags, IOMAP_F_SHARED);
out_unlock:
if (lockmode)
@@ -1052,23 +1060,24 @@ retry:
*/
xfs_iunlock(ip, XFS_ILOCK_EXCL);
trace_xfs_iomap_alloc(ip, offset, count, allocfork, &imap);
- return xfs_bmbt_to_iomap(ip, iomap, &imap, IOMAP_F_NEW);
+ return xfs_bmbt_to_iomap(ip, iomap, &imap, flags, IOMAP_F_NEW);
found_imap:
xfs_iunlock(ip, XFS_ILOCK_EXCL);
- return xfs_bmbt_to_iomap(ip, iomap, &imap, 0);
+ return xfs_bmbt_to_iomap(ip, iomap, &imap, flags, 0);
found_cow:
xfs_iunlock(ip, XFS_ILOCK_EXCL);
if (imap.br_startoff <= offset_fsb) {
- error = xfs_bmbt_to_iomap(ip, srcmap, &imap, 0);
+ error = xfs_bmbt_to_iomap(ip, srcmap, &imap, flags, 0);
if (error)
return error;
- return xfs_bmbt_to_iomap(ip, iomap, &cmap, IOMAP_F_SHARED);
+ return xfs_bmbt_to_iomap(ip, iomap, &cmap, flags,
+ IOMAP_F_SHARED);
}
xfs_trim_extent(&cmap, offset_fsb, imap.br_startoff - offset_fsb);
- return xfs_bmbt_to_iomap(ip, iomap, &cmap, 0);
+ return xfs_bmbt_to_iomap(ip, iomap, &cmap, flags, 0);
out_unlock:
xfs_iunlock(ip, XFS_ILOCK_EXCL);
@@ -1177,7 +1186,8 @@ xfs_read_iomap_begin(
if (error)
return error;
trace_xfs_iomap_found(ip, offset, length, XFS_DATA_FORK, &imap);
- return xfs_bmbt_to_iomap(ip, iomap, &imap, shared ? IOMAP_F_SHARED : 0);
+ return xfs_bmbt_to_iomap(ip, iomap, &imap, flags,
+ shared ? IOMAP_F_SHARED : 0);
}
const struct iomap_ops xfs_read_iomap_ops = {
@@ -1236,7 +1246,8 @@ xfs_seek_iomap_begin(
if (data_fsb < cow_fsb + cmap.br_blockcount)
end_fsb = min(end_fsb, data_fsb);
xfs_trim_extent(&cmap, offset_fsb, end_fsb);
- error = xfs_bmbt_to_iomap(ip, iomap, &cmap, IOMAP_F_SHARED);
+ error = xfs_bmbt_to_iomap(ip, iomap, &cmap, flags,
+ IOMAP_F_SHARED);
/*
* This is a COW extent, so we must probe the page cache
* because there could be dirty page cache being backed
@@ -1258,7 +1269,7 @@ xfs_seek_iomap_begin(
imap.br_state = XFS_EXT_NORM;
done:
xfs_trim_extent(&imap, offset_fsb, end_fsb);
- error = xfs_bmbt_to_iomap(ip, iomap, &imap, 0);
+ error = xfs_bmbt_to_iomap(ip, iomap, &imap, flags, 0);
out_unlock:
xfs_iunlock(ip, lockmode);
return error;
@@ -1305,9 +1316,40 @@ out_unlock:
if (error)
return error;
ASSERT(nimaps);
- return xfs_bmbt_to_iomap(ip, iomap, &imap, 0);
+ return xfs_bmbt_to_iomap(ip, iomap, &imap, flags, 0);
}
const struct iomap_ops xfs_xattr_iomap_ops = {
.iomap_begin = xfs_xattr_iomap_begin,
};
+
+int
+xfs_zero_range(
+ struct xfs_inode *ip,
+ loff_t pos,
+ loff_t len,
+ bool *did_zero)
+{
+ struct inode *inode = VFS_I(ip);
+
+ if (IS_DAX(inode))
+ return dax_zero_range(inode, pos, len, did_zero,
+ &xfs_direct_write_iomap_ops);
+ return iomap_zero_range(inode, pos, len, did_zero,
+ &xfs_buffered_write_iomap_ops);
+}
+
+int
+xfs_truncate_page(
+ struct xfs_inode *ip,
+ loff_t pos,
+ bool *did_zero)
+{
+ struct inode *inode = VFS_I(ip);
+
+ if (IS_DAX(inode))
+ return dax_truncate_page(inode, pos, did_zero,
+ &xfs_direct_write_iomap_ops);
+ return iomap_truncate_page(inode, pos, did_zero,
+ &xfs_buffered_write_iomap_ops);
+}
diff --git a/fs/xfs/xfs_iomap.h b/fs/xfs/xfs_iomap.h
index 7d3703556d0e..e88dc162c785 100644
--- a/fs/xfs/xfs_iomap.h
+++ b/fs/xfs/xfs_iomap.h
@@ -12,13 +12,19 @@ struct xfs_inode;
struct xfs_bmbt_irec;
int xfs_iomap_write_direct(struct xfs_inode *ip, xfs_fileoff_t offset_fsb,
- xfs_fileoff_t count_fsb, struct xfs_bmbt_irec *imap);
+ xfs_fileoff_t count_fsb, unsigned int flags,
+ struct xfs_bmbt_irec *imap);
int xfs_iomap_write_unwritten(struct xfs_inode *, xfs_off_t, xfs_off_t, bool);
xfs_fileoff_t xfs_iomap_eof_align_last_fsb(struct xfs_inode *ip,
xfs_fileoff_t end_fsb);
-int xfs_bmbt_to_iomap(struct xfs_inode *, struct iomap *,
- struct xfs_bmbt_irec *, u16);
+int xfs_bmbt_to_iomap(struct xfs_inode *ip, struct iomap *iomap,
+ struct xfs_bmbt_irec *imap, unsigned int mapping_flags,
+ u16 iomap_flags);
+
+int xfs_zero_range(struct xfs_inode *ip, loff_t pos, loff_t len,
+ bool *did_zero);
+int xfs_truncate_page(struct xfs_inode *ip, loff_t pos, bool *did_zero);
static inline xfs_filblks_t
xfs_aligned_fsb_count(
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index 3447c19e99da..b79b3846e71b 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -890,8 +890,8 @@ xfs_setattr_size(
*/
if (newsize > oldsize) {
trace_xfs_zero_eof(ip, oldsize, newsize - oldsize);
- error = iomap_zero_range(inode, oldsize, newsize - oldsize,
- &did_zeroing, &xfs_buffered_write_iomap_ops);
+ error = xfs_zero_range(ip, oldsize, newsize - oldsize,
+ &did_zeroing);
} else {
/*
* iomap won't detect a dirty page over an unwritten block (or a
@@ -903,8 +903,7 @@ xfs_setattr_size(
newsize);
if (error)
return error;
- error = iomap_truncate_page(inode, newsize, &did_zeroing,
- &xfs_buffered_write_iomap_ops);
+ error = xfs_truncate_page(ip, newsize, &did_zeroing);
}
if (error)
diff --git a/fs/xfs/xfs_pnfs.c b/fs/xfs/xfs_pnfs.c
index 5e1d29d8b2e7..4abe17312c2b 100644
--- a/fs/xfs/xfs_pnfs.c
+++ b/fs/xfs/xfs_pnfs.c
@@ -71,6 +71,40 @@ xfs_fs_get_uuid(
}
/*
+ * We cannot use file based VFS helpers such as file_modified() to update
+ * inode state as we modify the data/metadata in the inode here. Hence we have
+ * to open code the timestamp updates and SUID/SGID stripping. We also need
+ * to set the inode prealloc flag to ensure that the extents we allocate are not
+ * removed if the inode is reclaimed from memory before xfs_fs_block_commit()
+ * is from the client to indicate that data has been written and the file size
+ * can be extended.
+ */
+static int
+xfs_fs_map_update_inode(
+ struct xfs_inode *ip)
+{
+ struct xfs_trans *tp;
+ int error;
+
+ error = xfs_trans_alloc(ip->i_mount, &M_RES(ip->i_mount)->tr_writeid,
+ 0, 0, 0, &tp);
+ if (error)
+ return error;
+
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
+ xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
+
+ VFS_I(ip)->i_mode &= ~S_ISUID;
+ if (VFS_I(ip)->i_mode & S_IXGRP)
+ VFS_I(ip)->i_mode &= ~S_ISGID;
+ xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
+ ip->i_diflags |= XFS_DIFLAG_PREALLOC;
+
+ xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+ return xfs_trans_commit(tp);
+}
+
+/*
* Get a layout for the pNFS client.
*/
int
@@ -155,7 +189,7 @@ xfs_fs_map_blocks(
xfs_iunlock(ip, lock_flags);
error = xfs_iomap_write_direct(ip, offset_fsb,
- end_fsb - offset_fsb, &imap);
+ end_fsb - offset_fsb, 0, &imap);
if (error)
goto out_unlock;
@@ -164,16 +198,18 @@ xfs_fs_map_blocks(
* that the blocks allocated and handed out to the client are
* guaranteed to be present even after a server crash.
*/
- error = xfs_update_prealloc_flags(ip,
- XFS_PREALLOC_SET | XFS_PREALLOC_SYNC);
+ error = xfs_fs_map_update_inode(ip);
+ if (!error)
+ error = xfs_log_force_inode(ip);
if (error)
goto out_unlock;
+
} else {
xfs_iunlock(ip, lock_flags);
}
xfs_iunlock(ip, XFS_IOLOCK_EXCL);
- error = xfs_bmbt_to_iomap(ip, iomap, &imap, 0);
+ error = xfs_bmbt_to_iomap(ip, iomap, &imap, 0, 0);
*device_generation = mp->m_generation;
return error;
out_unlock:
@@ -255,7 +291,7 @@ xfs_fs_commit_blocks(
length = end - start;
if (!length)
continue;
-
+
/*
* Make sure reads through the pagecache see the new data.
*/
diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c
index 8b6c7163f684..db70060e7bf6 100644
--- a/fs/xfs/xfs_reflink.c
+++ b/fs/xfs/xfs_reflink.c
@@ -1272,8 +1272,7 @@ xfs_reflink_zero_posteof(
return 0;
trace_xfs_zero_eof(ip, isize, pos - isize);
- return iomap_zero_range(VFS_I(ip), isize, pos - isize, NULL,
- &xfs_buffered_write_iomap_ops);
+ return xfs_zero_range(ip, isize, pos - isize, NULL);
}
/*
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index c7ac486ca5d3..4c0dee78b2f8 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -331,13 +331,34 @@ xfs_set_inode_alloc(
return xfs_is_inode32(mp) ? maxagi : agcount;
}
-static bool
-xfs_buftarg_is_dax(
- struct super_block *sb,
- struct xfs_buftarg *bt)
+static int
+xfs_setup_dax_always(
+ struct xfs_mount *mp)
{
- return dax_supported(bt->bt_daxdev, bt->bt_bdev, sb->s_blocksize, 0,
- bdev_nr_sectors(bt->bt_bdev));
+ if (!mp->m_ddev_targp->bt_daxdev &&
+ (!mp->m_rtdev_targp || !mp->m_rtdev_targp->bt_daxdev)) {
+ xfs_alert(mp,
+ "DAX unsupported by block device. Turning off DAX.");
+ goto disable_dax;
+ }
+
+ if (mp->m_super->s_blocksize != PAGE_SIZE) {
+ xfs_alert(mp,
+ "DAX not supported for blocksize. Turning off DAX.");
+ goto disable_dax;
+ }
+
+ if (xfs_has_reflink(mp)) {
+ xfs_alert(mp, "DAX and reflink cannot be used together!");
+ return -EINVAL;
+ }
+
+ xfs_warn(mp, "DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
+ return 0;
+
+disable_dax:
+ xfs_mount_set_dax_mode(mp, XFS_DAX_NEVER);
+ return 0;
}
STATIC int
@@ -370,26 +391,19 @@ STATIC void
xfs_close_devices(
struct xfs_mount *mp)
{
- struct dax_device *dax_ddev = mp->m_ddev_targp->bt_daxdev;
-
if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
struct block_device *logdev = mp->m_logdev_targp->bt_bdev;
- struct dax_device *dax_logdev = mp->m_logdev_targp->bt_daxdev;
xfs_free_buftarg(mp->m_logdev_targp);
xfs_blkdev_put(logdev);
- fs_put_dax(dax_logdev);
}
if (mp->m_rtdev_targp) {
struct block_device *rtdev = mp->m_rtdev_targp->bt_bdev;
- struct dax_device *dax_rtdev = mp->m_rtdev_targp->bt_daxdev;
xfs_free_buftarg(mp->m_rtdev_targp);
xfs_blkdev_put(rtdev);
- fs_put_dax(dax_rtdev);
}
xfs_free_buftarg(mp->m_ddev_targp);
- fs_put_dax(dax_ddev);
}
/*
@@ -407,8 +421,6 @@ xfs_open_devices(
struct xfs_mount *mp)
{
struct block_device *ddev = mp->m_super->s_bdev;
- struct dax_device *dax_ddev = fs_dax_get_by_bdev(ddev);
- struct dax_device *dax_logdev = NULL, *dax_rtdev = NULL;
struct block_device *logdev = NULL, *rtdev = NULL;
int error;
@@ -418,8 +430,7 @@ xfs_open_devices(
if (mp->m_logname) {
error = xfs_blkdev_get(mp, mp->m_logname, &logdev);
if (error)
- goto out;
- dax_logdev = fs_dax_get_by_bdev(logdev);
+ return error;
}
if (mp->m_rtname) {
@@ -433,25 +444,24 @@ xfs_open_devices(
error = -EINVAL;
goto out_close_rtdev;
}
- dax_rtdev = fs_dax_get_by_bdev(rtdev);
}
/*
* Setup xfs_mount buffer target pointers
*/
error = -ENOMEM;
- mp->m_ddev_targp = xfs_alloc_buftarg(mp, ddev, dax_ddev);
+ mp->m_ddev_targp = xfs_alloc_buftarg(mp, ddev);
if (!mp->m_ddev_targp)
goto out_close_rtdev;
if (rtdev) {
- mp->m_rtdev_targp = xfs_alloc_buftarg(mp, rtdev, dax_rtdev);
+ mp->m_rtdev_targp = xfs_alloc_buftarg(mp, rtdev);
if (!mp->m_rtdev_targp)
goto out_free_ddev_targ;
}
if (logdev && logdev != ddev) {
- mp->m_logdev_targp = xfs_alloc_buftarg(mp, logdev, dax_logdev);
+ mp->m_logdev_targp = xfs_alloc_buftarg(mp, logdev);
if (!mp->m_logdev_targp)
goto out_free_rtdev_targ;
} else {
@@ -467,14 +477,9 @@ xfs_open_devices(
xfs_free_buftarg(mp->m_ddev_targp);
out_close_rtdev:
xfs_blkdev_put(rtdev);
- fs_put_dax(dax_rtdev);
out_close_logdev:
- if (logdev && logdev != ddev) {
+ if (logdev && logdev != ddev)
xfs_blkdev_put(logdev);
- fs_put_dax(dax_logdev);
- }
- out:
- fs_put_dax(dax_ddev);
return error;
}
@@ -730,6 +735,7 @@ xfs_fs_sync_fs(
int wait)
{
struct xfs_mount *mp = XFS_M(sb);
+ int error;
trace_xfs_fs_sync_fs(mp, __return_address);
@@ -739,7 +745,10 @@ xfs_fs_sync_fs(
if (!wait)
return 0;
- xfs_log_force(mp, XFS_LOG_SYNC);
+ error = xfs_log_force(mp, XFS_LOG_SYNC);
+ if (error)
+ return error;
+
if (laptop_mode) {
/*
* The disk must be active because we're syncing.
@@ -1593,26 +1602,9 @@ xfs_fs_fill_super(
sb->s_flags |= SB_I_VERSION;
if (xfs_has_dax_always(mp)) {
- bool rtdev_is_dax = false, datadev_is_dax;
-
- xfs_warn(mp,
- "DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
-
- datadev_is_dax = xfs_buftarg_is_dax(sb, mp->m_ddev_targp);
- if (mp->m_rtdev_targp)
- rtdev_is_dax = xfs_buftarg_is_dax(sb,
- mp->m_rtdev_targp);
- if (!rtdev_is_dax && !datadev_is_dax) {
- xfs_alert(mp,
- "DAX unsupported by block device. Turning off DAX.");
- xfs_mount_set_dax_mode(mp, XFS_DAX_NEVER);
- }
- if (xfs_has_reflink(mp)) {
- xfs_alert(mp,
- "DAX and reflink cannot be used together!");
- error = -EINVAL;
+ error = xfs_setup_dax_always(mp);
+ if (error)
goto out_filestream_unmount;
- }
}
if (xfs_has_discard(mp)) {
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index 44a3082a92a4..ca88c4706f2b 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -360,6 +360,7 @@ struct acpi_gpio_mapping;
/* Device */
struct acpi_device {
+ u32 pld_crc;
int device_type;
acpi_handle handle; /* no handle for fixed hardware */
struct fwnode_handle fwnode;
diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h
index 3d503e74037f..fd7e8fbaeef1 100644
--- a/include/asm-generic/barrier.h
+++ b/include/asm-generic/barrier.h
@@ -285,7 +285,7 @@ do { \
* write-combining memory accesses before this macro with those after it.
*/
#ifndef io_stop_wc
-#define io_stop_wc do { } while (0)
+#define io_stop_wc() do { } while (0)
#endif
#endif /* !__ASSEMBLY__ */
diff --git a/include/asm-generic/bitops.h b/include/asm-generic/bitops.h
index df9b5bc3d282..a47b8a71d6fe 100644
--- a/include/asm-generic/bitops.h
+++ b/include/asm-generic/bitops.h
@@ -20,7 +20,6 @@
#include <asm-generic/bitops/fls.h>
#include <asm-generic/bitops/__fls.h>
#include <asm-generic/bitops/fls64.h>
-#include <asm-generic/bitops/find.h>
#ifndef _LINUX_BITOPS_H
#error only <linux/bitops.h> can be included directly
diff --git a/include/asm-generic/bitops/le.h b/include/asm-generic/bitops/le.h
index 5a28629cbf4d..d51beff60375 100644
--- a/include/asm-generic/bitops/le.h
+++ b/include/asm-generic/bitops/le.h
@@ -2,83 +2,19 @@
#ifndef _ASM_GENERIC_BITOPS_LE_H_
#define _ASM_GENERIC_BITOPS_LE_H_
-#include <asm-generic/bitops/find.h>
#include <asm/types.h>
#include <asm/byteorder.h>
-#include <linux/swab.h>
#if defined(__LITTLE_ENDIAN)
#define BITOP_LE_SWIZZLE 0
-static inline unsigned long find_next_zero_bit_le(const void *addr,
- unsigned long size, unsigned long offset)
-{
- return find_next_zero_bit(addr, size, offset);
-}
-
-static inline unsigned long find_next_bit_le(const void *addr,
- unsigned long size, unsigned long offset)
-{
- return find_next_bit(addr, size, offset);
-}
-
-static inline unsigned long find_first_zero_bit_le(const void *addr,
- unsigned long size)
-{
- return find_first_zero_bit(addr, size);
-}
-
#elif defined(__BIG_ENDIAN)
#define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7)
-#ifndef find_next_zero_bit_le
-static inline
-unsigned long find_next_zero_bit_le(const void *addr, unsigned
- long size, unsigned long offset)
-{
- if (small_const_nbits(size)) {
- unsigned long val = *(const unsigned long *)addr;
-
- if (unlikely(offset >= size))
- return size;
-
- val = swab(val) | ~GENMASK(size - 1, offset);
- return val == ~0UL ? size : ffz(val);
- }
-
- return _find_next_bit(addr, NULL, size, offset, ~0UL, 1);
-}
-#endif
-
-#ifndef find_next_bit_le
-static inline
-unsigned long find_next_bit_le(const void *addr, unsigned
- long size, unsigned long offset)
-{
- if (small_const_nbits(size)) {
- unsigned long val = *(const unsigned long *)addr;
-
- if (unlikely(offset >= size))
- return size;
-
- val = swab(val) & GENMASK(size - 1, offset);
- return val ? __ffs(val) : size;
- }
-
- return _find_next_bit(addr, NULL, size, offset, 0UL, 1);
-}
#endif
-#ifndef find_first_zero_bit_le
-#define find_first_zero_bit_le(addr, size) \
- find_next_zero_bit_le((addr), (size), 0)
-#endif
-
-#else
-#error "Please fix <asm/byteorder.h>"
-#endif
static inline int test_bit_le(int nr, const void *addr)
{
diff --git a/include/asm-generic/hyperv-tlfs.h b/include/asm-generic/hyperv-tlfs.h
index 8ed6733d5146..8f97c2927bee 100644
--- a/include/asm-generic/hyperv-tlfs.h
+++ b/include/asm-generic/hyperv-tlfs.h
@@ -540,39 +540,6 @@ enum hv_interrupt_source {
HV_INTERRUPT_SOURCE_IOAPIC,
};
-union hv_msi_address_register {
- u32 as_uint32;
- struct {
- u32 reserved1:2;
- u32 destination_mode:1;
- u32 redirection_hint:1;
- u32 reserved2:8;
- u32 destination_id:8;
- u32 msi_base:12;
- };
-} __packed;
-
-union hv_msi_data_register {
- u32 as_uint32;
- struct {
- u32 vector:8;
- u32 delivery_mode:3;
- u32 reserved1:3;
- u32 level_assert:1;
- u32 trigger_mode:1;
- u32 reserved2:16;
- };
-} __packed;
-
-/* HvRetargetDeviceInterrupt hypercall */
-union hv_msi_entry {
- u64 as_uint64;
- struct {
- union hv_msi_address_register address;
- union hv_msi_data_register data;
- } __packed;
-};
-
union hv_ioapic_rte {
u64 as_uint64;
diff --git a/include/asm-generic/mshyperv.h b/include/asm-generic/mshyperv.h
index 3e2248ac328e..c08758b6b364 100644
--- a/include/asm-generic/mshyperv.h
+++ b/include/asm-generic/mshyperv.h
@@ -49,8 +49,8 @@ struct ms_hyperv_info {
};
extern struct ms_hyperv_info ms_hyperv;
-extern void __percpu **hyperv_pcpu_input_arg;
-extern void __percpu **hyperv_pcpu_output_arg;
+extern void * __percpu *hyperv_pcpu_input_arg;
+extern void * __percpu *hyperv_pcpu_output_arg;
extern u64 hv_do_hypercall(u64 control, void *inputaddr, void *outputaddr);
extern u64 hv_do_fast_hypercall8(u16 control, u64 input8);
@@ -269,6 +269,8 @@ bool hv_isolation_type_snp(void);
u64 hv_ghcb_hypercall(u64 control, void *input, void *output, u32 input_size);
void hyperv_cleanup(void);
bool hv_query_ext_cap(u64 cap_query);
+void *hv_map_memory(void *addr, unsigned long size);
+void hv_unmap_memory(void *addr);
#else /* CONFIG_HYPERV */
static inline bool hv_is_hyperv_initialized(void) { return false; }
static inline bool hv_is_hibernation_supported(void) { return false; }
diff --git a/include/asm-generic/pgalloc.h b/include/asm-generic/pgalloc.h
index 02932efad3ab..977bea16cf1b 100644
--- a/include/asm-generic/pgalloc.h
+++ b/include/asm-generic/pgalloc.h
@@ -147,6 +147,15 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
#if CONFIG_PGTABLE_LEVELS > 3
+static inline pud_t *__pud_alloc_one(struct mm_struct *mm, unsigned long addr)
+{
+ gfp_t gfp = GFP_PGTABLE_USER;
+
+ if (mm == &init_mm)
+ gfp = GFP_PGTABLE_KERNEL;
+ return (pud_t *)get_zeroed_page(gfp);
+}
+
#ifndef __HAVE_ARCH_PUD_ALLOC_ONE
/**
* pud_alloc_one - allocate a page for PUD-level page table
@@ -159,20 +168,23 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
*/
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
{
- gfp_t gfp = GFP_PGTABLE_USER;
-
- if (mm == &init_mm)
- gfp = GFP_PGTABLE_KERNEL;
- return (pud_t *)get_zeroed_page(gfp);
+ return __pud_alloc_one(mm, addr);
}
#endif
-static inline void pud_free(struct mm_struct *mm, pud_t *pud)
+static inline void __pud_free(struct mm_struct *mm, pud_t *pud)
{
BUG_ON((unsigned long)pud & (PAGE_SIZE-1));
free_page((unsigned long)pud);
}
+#ifndef __HAVE_ARCH_PUD_FREE
+static inline void pud_free(struct mm_struct *mm, pud_t *pud)
+{
+ __pud_free(mm, pud);
+}
+#endif
+
#endif /* CONFIG_PGTABLE_LEVELS > 3 */
#ifndef __HAVE_ARCH_PGD_FREE
diff --git a/include/crypto/blake2s.h b/include/crypto/blake2s.h
index df3c6c2f9553..f9ffd39194eb 100644
--- a/include/crypto/blake2s.h
+++ b/include/crypto/blake2s.h
@@ -101,7 +101,4 @@ static inline void blake2s(u8 *out, const u8 *in, const u8 *key,
blake2s_final(&state, out);
}
-void blake2s256_hmac(u8 *out, const u8 *in, const u8 *key, const size_t inlen,
- const size_t keylen);
-
#endif /* _CRYPTO_BLAKE2S_H */
diff --git a/include/crypto/internal/blake2s.h b/include/crypto/internal/blake2s.h
index d39cfa0d333e..52363eee2b20 100644
--- a/include/crypto/internal/blake2s.h
+++ b/include/crypto/internal/blake2s.h
@@ -24,14 +24,11 @@ static inline void blake2s_set_lastblock(struct blake2s_state *state)
state->f[0] = -1;
}
-typedef void (*blake2s_compress_t)(struct blake2s_state *state,
- const u8 *block, size_t nblocks, u32 inc);
-
/* Helper functions for BLAKE2s shared by the library and shash APIs */
-static inline void __blake2s_update(struct blake2s_state *state,
- const u8 *in, size_t inlen,
- blake2s_compress_t compress)
+static __always_inline void
+__blake2s_update(struct blake2s_state *state, const u8 *in, size_t inlen,
+ bool force_generic)
{
const size_t fill = BLAKE2S_BLOCK_SIZE - state->buflen;
@@ -39,7 +36,12 @@ static inline void __blake2s_update(struct blake2s_state *state,
return;
if (inlen > fill) {
memcpy(state->buf + state->buflen, in, fill);
- (*compress)(state, state->buf, 1, BLAKE2S_BLOCK_SIZE);
+ if (force_generic)
+ blake2s_compress_generic(state, state->buf, 1,
+ BLAKE2S_BLOCK_SIZE);
+ else
+ blake2s_compress(state, state->buf, 1,
+ BLAKE2S_BLOCK_SIZE);
state->buflen = 0;
in += fill;
inlen -= fill;
@@ -47,7 +49,12 @@ static inline void __blake2s_update(struct blake2s_state *state,
if (inlen > BLAKE2S_BLOCK_SIZE) {
const size_t nblocks = DIV_ROUND_UP(inlen, BLAKE2S_BLOCK_SIZE);
/* Hash one less (full) block than strictly possible */
- (*compress)(state, in, nblocks - 1, BLAKE2S_BLOCK_SIZE);
+ if (force_generic)
+ blake2s_compress_generic(state, in, nblocks - 1,
+ BLAKE2S_BLOCK_SIZE);
+ else
+ blake2s_compress(state, in, nblocks - 1,
+ BLAKE2S_BLOCK_SIZE);
in += BLAKE2S_BLOCK_SIZE * (nblocks - 1);
inlen -= BLAKE2S_BLOCK_SIZE * (nblocks - 1);
}
@@ -55,13 +62,16 @@ static inline void __blake2s_update(struct blake2s_state *state,
state->buflen += inlen;
}
-static inline void __blake2s_final(struct blake2s_state *state, u8 *out,
- blake2s_compress_t compress)
+static __always_inline void
+__blake2s_final(struct blake2s_state *state, u8 *out, bool force_generic)
{
blake2s_set_lastblock(state);
memset(state->buf + state->buflen, 0,
BLAKE2S_BLOCK_SIZE - state->buflen); /* Padding */
- (*compress)(state, state->buf, 1, state->buflen);
+ if (force_generic)
+ blake2s_compress_generic(state, state->buf, 1, state->buflen);
+ else
+ blake2s_compress(state, state->buf, 1, state->buflen);
cpu_to_le32_array(state->h, ARRAY_SIZE(state->h));
memcpy(out, state->h, state->outlen);
}
@@ -99,20 +109,20 @@ static inline int crypto_blake2s_init(struct shash_desc *desc)
static inline int crypto_blake2s_update(struct shash_desc *desc,
const u8 *in, unsigned int inlen,
- blake2s_compress_t compress)
+ bool force_generic)
{
struct blake2s_state *state = shash_desc_ctx(desc);
- __blake2s_update(state, in, inlen, compress);
+ __blake2s_update(state, in, inlen, force_generic);
return 0;
}
static inline int crypto_blake2s_final(struct shash_desc *desc, u8 *out,
- blake2s_compress_t compress)
+ bool force_generic)
{
struct blake2s_state *state = shash_desc_ctx(desc);
- __blake2s_final(state, out, compress);
+ __blake2s_final(state, out, force_generic);
return 0;
}
diff --git a/include/dt-bindings/clock/dra7.h b/include/dt-bindings/clock/dra7.h
index 7d57063b8a65..29ff6b895848 100644
--- a/include/dt-bindings/clock/dra7.h
+++ b/include/dt-bindings/clock/dra7.h
@@ -84,17 +84,10 @@
#define DRA7_L3_MAIN_2_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
#define DRA7_L3_INSTR_CLKCTRL DRA7_CLKCTRL_INDEX(0x28)
-/* iva clocks */
-#define DRA7_IVA_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
-#define DRA7_SL2IF_CLKCTRL DRA7_CLKCTRL_INDEX(0x28)
-
/* dss clocks */
#define DRA7_DSS_CORE_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
#define DRA7_BB2D_CLKCTRL DRA7_CLKCTRL_INDEX(0x30)
-/* gpu clocks */
-#define DRA7_GPU_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
-
/* l3init clocks */
#define DRA7_MMC1_CLKCTRL DRA7_CLKCTRL_INDEX(0x28)
#define DRA7_MMC2_CLKCTRL DRA7_CLKCTRL_INDEX(0x30)
@@ -267,10 +260,17 @@
#define DRA7_L3INSTR_L3_MAIN_2_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
#define DRA7_L3INSTR_L3_INSTR_CLKCTRL DRA7_CLKCTRL_INDEX(0x28)
+/* iva clocks */
+#define DRA7_IVA_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
+#define DRA7_SL2IF_CLKCTRL DRA7_CLKCTRL_INDEX(0x28)
+
/* dss clocks */
#define DRA7_DSS_DSS_CORE_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
#define DRA7_DSS_BB2D_CLKCTRL DRA7_CLKCTRL_INDEX(0x30)
+/* gpu clocks */
+#define DRA7_GPU_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
+
/* l3init clocks */
#define DRA7_L3INIT_MMC1_CLKCTRL DRA7_CLKCTRL_INDEX(0x28)
#define DRA7_L3INIT_MMC2_CLKCTRL DRA7_CLKCTRL_INDEX(0x30)
diff --git a/include/dt-bindings/clock/exynos4.h b/include/dt-bindings/clock/exynos4.h
index 88ec3968b90a..acbfbab875ec 100644
--- a/include/dt-bindings/clock/exynos4.h
+++ b/include/dt-bindings/clock/exynos4.h
@@ -209,6 +209,7 @@
#define CLK_ACLK400_MCUISP 395 /* Exynos4x12 only */
#define CLK_MOUT_HDMI 396
#define CLK_MOUT_MIXER 397
+#define CLK_MOUT_VPLLSRC 398
/* gate clocks - ppmu */
#define CLK_PPMULEFT 400
@@ -236,9 +237,10 @@
#define CLK_DIV_C2C 458 /* Exynos4x12 only */
#define CLK_DIV_GDL 459
#define CLK_DIV_GDR 460
+#define CLK_DIV_CORE2 461
/* must be greater than maximal clock id */
-#define CLK_NR_CLKS 461
+#define CLK_NR_CLKS 462
/* Exynos4x12 ISP clocks */
#define CLK_ISP_FIMC_ISP 1
diff --git a/include/dt-bindings/clock/exynos5250.h b/include/dt-bindings/clock/exynos5250.h
index e259cc01f22f..4680da7357d3 100644
--- a/include/dt-bindings/clock/exynos5250.h
+++ b/include/dt-bindings/clock/exynos5250.h
@@ -19,6 +19,7 @@
#define CLK_FOUT_EPLL 7
#define CLK_FOUT_VPLL 8
#define CLK_ARM_CLK 9
+#define CLK_DIV_ARM2 10
/* gate for special clocks (sclk) */
#define CLK_SCLK_CAM_BAYER 128
@@ -174,8 +175,9 @@
#define CLK_MOUT_ACLK300_DISP1_SUB 1027
#define CLK_MOUT_APLL 1028
#define CLK_MOUT_MPLL 1029
+#define CLK_MOUT_VPLLSRC 1030
/* must be greater than maximal clock id */
-#define CLK_NR_CLKS 1030
+#define CLK_NR_CLKS 1031
#endif /* _DT_BINDINGS_CLOCK_EXYNOS_5250_H */
diff --git a/include/dt-bindings/clock/exynos7885.h b/include/dt-bindings/clock/exynos7885.h
new file mode 100644
index 000000000000..1f8701691d62
--- /dev/null
+++ b/include/dt-bindings/clock/exynos7885.h
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021 Dávid Virág
+ *
+ * Device Tree binding constants for Exynos7885 clock controller.
+ */
+
+#ifndef _DT_BINDINGS_CLOCK_EXYNOS_7885_H
+#define _DT_BINDINGS_CLOCK_EXYNOS_7885_H
+
+/* CMU_TOP */
+#define CLK_FOUT_SHARED0_PLL 1
+#define CLK_FOUT_SHARED1_PLL 2
+#define CLK_DOUT_SHARED0_DIV2 3
+#define CLK_DOUT_SHARED0_DIV3 4
+#define CLK_DOUT_SHARED0_DIV4 5
+#define CLK_DOUT_SHARED0_DIV5 6
+#define CLK_DOUT_SHARED1_DIV2 7
+#define CLK_DOUT_SHARED1_DIV3 8
+#define CLK_DOUT_SHARED1_DIV4 9
+#define CLK_MOUT_CORE_BUS 10
+#define CLK_MOUT_CORE_CCI 11
+#define CLK_MOUT_CORE_G3D 12
+#define CLK_DOUT_CORE_BUS 13
+#define CLK_DOUT_CORE_CCI 14
+#define CLK_DOUT_CORE_G3D 15
+#define CLK_GOUT_CORE_BUS 16
+#define CLK_GOUT_CORE_CCI 17
+#define CLK_GOUT_CORE_G3D 18
+#define CLK_MOUT_PERI_BUS 19
+#define CLK_MOUT_PERI_SPI0 20
+#define CLK_MOUT_PERI_SPI1 21
+#define CLK_MOUT_PERI_UART0 22
+#define CLK_MOUT_PERI_UART1 23
+#define CLK_MOUT_PERI_UART2 24
+#define CLK_MOUT_PERI_USI0 25
+#define CLK_MOUT_PERI_USI1 26
+#define CLK_MOUT_PERI_USI2 27
+#define CLK_DOUT_PERI_BUS 28
+#define CLK_DOUT_PERI_SPI0 29
+#define CLK_DOUT_PERI_SPI1 30
+#define CLK_DOUT_PERI_UART0 31
+#define CLK_DOUT_PERI_UART1 32
+#define CLK_DOUT_PERI_UART2 33
+#define CLK_DOUT_PERI_USI0 34
+#define CLK_DOUT_PERI_USI1 35
+#define CLK_DOUT_PERI_USI2 36
+#define CLK_GOUT_PERI_BUS 37
+#define CLK_GOUT_PERI_SPI0 38
+#define CLK_GOUT_PERI_SPI1 39
+#define CLK_GOUT_PERI_UART0 40
+#define CLK_GOUT_PERI_UART1 41
+#define CLK_GOUT_PERI_UART2 42
+#define CLK_GOUT_PERI_USI0 43
+#define CLK_GOUT_PERI_USI1 44
+#define CLK_GOUT_PERI_USI2 45
+#define TOP_NR_CLK 46
+
+/* CMU_CORE */
+#define CLK_MOUT_CORE_BUS_USER 1
+#define CLK_MOUT_CORE_CCI_USER 2
+#define CLK_MOUT_CORE_G3D_USER 3
+#define CLK_MOUT_CORE_GIC 4
+#define CLK_DOUT_CORE_BUSP 5
+#define CLK_GOUT_CCI_ACLK 6
+#define CLK_GOUT_GIC400_CLK 7
+#define CORE_NR_CLK 8
+
+/* CMU_PERI */
+#define CLK_MOUT_PERI_BUS_USER 1
+#define CLK_MOUT_PERI_SPI0_USER 2
+#define CLK_MOUT_PERI_SPI1_USER 3
+#define CLK_MOUT_PERI_UART0_USER 4
+#define CLK_MOUT_PERI_UART1_USER 5
+#define CLK_MOUT_PERI_UART2_USER 6
+#define CLK_MOUT_PERI_USI0_USER 7
+#define CLK_MOUT_PERI_USI1_USER 8
+#define CLK_MOUT_PERI_USI2_USER 9
+#define CLK_GOUT_GPIO_TOP_PCLK 10
+#define CLK_GOUT_HSI2C0_PCLK 11
+#define CLK_GOUT_HSI2C1_PCLK 12
+#define CLK_GOUT_HSI2C2_PCLK 13
+#define CLK_GOUT_HSI2C3_PCLK 14
+#define CLK_GOUT_I2C0_PCLK 15
+#define CLK_GOUT_I2C1_PCLK 16
+#define CLK_GOUT_I2C2_PCLK 17
+#define CLK_GOUT_I2C3_PCLK 18
+#define CLK_GOUT_I2C4_PCLK 19
+#define CLK_GOUT_I2C5_PCLK 20
+#define CLK_GOUT_I2C6_PCLK 21
+#define CLK_GOUT_I2C7_PCLK 22
+#define CLK_GOUT_PWM_MOTOR_PCLK 23
+#define CLK_GOUT_SPI0_PCLK 24
+#define CLK_GOUT_SPI0_EXT_CLK 25
+#define CLK_GOUT_SPI1_PCLK 26
+#define CLK_GOUT_SPI1_EXT_CLK 27
+#define CLK_GOUT_UART0_EXT_UCLK 28
+#define CLK_GOUT_UART0_PCLK 29
+#define CLK_GOUT_UART1_EXT_UCLK 30
+#define CLK_GOUT_UART1_PCLK 31
+#define CLK_GOUT_UART2_EXT_UCLK 32
+#define CLK_GOUT_UART2_PCLK 33
+#define CLK_GOUT_USI0_PCLK 34
+#define CLK_GOUT_USI0_SCLK 35
+#define CLK_GOUT_USI1_PCLK 36
+#define CLK_GOUT_USI1_SCLK 37
+#define CLK_GOUT_USI2_PCLK 38
+#define CLK_GOUT_USI2_SCLK 39
+#define CLK_GOUT_MCT_PCLK 40
+#define CLK_GOUT_SYSREG_PERI_PCLK 41
+#define CLK_GOUT_WDT0_PCLK 42
+#define CLK_GOUT_WDT1_PCLK 43
+#define PERI_NR_CLK 44
+
+#endif /* _DT_BINDINGS_CLOCK_EXYNOS_7885_H */
diff --git a/include/dt-bindings/clock/exynos850.h b/include/dt-bindings/clock/exynos850.h
index 8999184f94a2..0b6a3c6a7c90 100644
--- a/include/dt-bindings/clock/exynos850.h
+++ b/include/dt-bindings/clock/exynos850.h
@@ -55,7 +55,55 @@
#define CLK_GOUT_PERI_BUS 43
#define CLK_GOUT_PERI_UART 44
#define CLK_GOUT_PERI_IP 45
-#define TOP_NR_CLK 46
+#define CLK_MOUT_CLKCMU_APM_BUS 46
+#define CLK_DOUT_CLKCMU_APM_BUS 47
+#define CLK_GOUT_CLKCMU_APM_BUS 48
+#define TOP_NR_CLK 49
+
+/* CMU_APM */
+#define CLK_RCO_I3C_PMIC 1
+#define OSCCLK_RCO_APM 2
+#define CLK_RCO_APM__ALV 3
+#define CLK_DLL_DCO 4
+#define CLK_MOUT_APM_BUS_USER 5
+#define CLK_MOUT_RCO_APM_I3C_USER 6
+#define CLK_MOUT_RCO_APM_USER 7
+#define CLK_MOUT_DLL_USER 8
+#define CLK_MOUT_CLKCMU_CHUB_BUS 9
+#define CLK_MOUT_APM_BUS 10
+#define CLK_MOUT_APM_I3C 11
+#define CLK_DOUT_CLKCMU_CHUB_BUS 12
+#define CLK_DOUT_APM_BUS 13
+#define CLK_DOUT_APM_I3C 14
+#define CLK_GOUT_CLKCMU_CMGP_BUS 15
+#define CLK_GOUT_CLKCMU_CHUB_BUS 16
+#define CLK_GOUT_RTC_PCLK 17
+#define CLK_GOUT_TOP_RTC_PCLK 18
+#define CLK_GOUT_I3C_PCLK 19
+#define CLK_GOUT_I3C_SCLK 20
+#define CLK_GOUT_SPEEDY_PCLK 21
+#define CLK_GOUT_GPIO_ALIVE_PCLK 22
+#define CLK_GOUT_PMU_ALIVE_PCLK 23
+#define CLK_GOUT_SYSREG_APM_PCLK 24
+#define APM_NR_CLK 25
+
+/* CMU_CMGP */
+#define CLK_RCO_CMGP 1
+#define CLK_MOUT_CMGP_ADC 2
+#define CLK_MOUT_CMGP_USI0 3
+#define CLK_MOUT_CMGP_USI1 4
+#define CLK_DOUT_CMGP_ADC 5
+#define CLK_DOUT_CMGP_USI0 6
+#define CLK_DOUT_CMGP_USI1 7
+#define CLK_GOUT_CMGP_ADC_S0_PCLK 8
+#define CLK_GOUT_CMGP_ADC_S1_PCLK 9
+#define CLK_GOUT_CMGP_GPIO_PCLK 10
+#define CLK_GOUT_CMGP_USI0_IPCLK 11
+#define CLK_GOUT_CMGP_USI0_PCLK 12
+#define CLK_GOUT_CMGP_USI1_IPCLK 13
+#define CLK_GOUT_CMGP_USI1_PCLK 14
+#define CLK_GOUT_SYSREG_CMGP_PCLK 15
+#define CMGP_NR_CLK 16
/* CMU_HSI */
#define CLK_MOUT_HSI_BUS_USER 1
@@ -123,7 +171,9 @@
#define CLK_GOUT_MMC_EMBD_SDCLKIN 10
#define CLK_GOUT_SSS_ACLK 11
#define CLK_GOUT_SSS_PCLK 12
-#define CORE_NR_CLK 13
+#define CLK_GOUT_GPIO_CORE_PCLK 13
+#define CLK_GOUT_SYSREG_CORE_PCLK 14
+#define CORE_NR_CLK 15
/* CMU_DPU */
#define CLK_MOUT_DPU_USER 1
diff --git a/include/dt-bindings/clock/imx8mp-clock.h b/include/dt-bindings/clock/imx8mp-clock.h
index 43927a1b9e94..235c7a00d379 100644
--- a/include/dt-bindings/clock/imx8mp-clock.h
+++ b/include/dt-bindings/clock/imx8mp-clock.h
@@ -117,7 +117,6 @@
#define IMX8MP_CLK_AUDIO_AHB 108
#define IMX8MP_CLK_MIPI_DSI_ESC_RX 109
#define IMX8MP_CLK_IPG_ROOT 110
-#define IMX8MP_CLK_IPG_AUDIO_ROOT 111
#define IMX8MP_CLK_DRAM_ALT 112
#define IMX8MP_CLK_DRAM_APB 113
#define IMX8MP_CLK_VPU_G1 114
diff --git a/include/dt-bindings/clock/ingenic,jz4760-cgu.h b/include/dt-bindings/clock/ingenic,jz4760-cgu.h
index 4bb2e19c4743..9fb04ebac6de 100644
--- a/include/dt-bindings/clock/ingenic,jz4760-cgu.h
+++ b/include/dt-bindings/clock/ingenic,jz4760-cgu.h
@@ -50,5 +50,7 @@
#define JZ4760_CLK_LPCLK_DIV 41
#define JZ4760_CLK_TVE 42
#define JZ4760_CLK_LPCLK 43
+#define JZ4760_CLK_MDMA 44
+#define JZ4760_CLK_BDMA 45
#endif /* __DT_BINDINGS_CLOCK_JZ4760_CGU_H__ */
diff --git a/include/dt-bindings/clock/ingenic,jz4770-cgu.h b/include/dt-bindings/clock/ingenic,jz4770-cgu.h
index d68a7695a1f8..0b475e8ae321 100644
--- a/include/dt-bindings/clock/ingenic,jz4770-cgu.h
+++ b/include/dt-bindings/clock/ingenic,jz4770-cgu.h
@@ -54,5 +54,6 @@
#define JZ4770_CLK_OTG_PHY 45
#define JZ4770_CLK_EXT512 46
#define JZ4770_CLK_RTC 47
+#define JZ4770_CLK_BDMA 48
#endif /* __DT_BINDINGS_CLOCK_JZ4770_CGU_H__ */
diff --git a/include/dt-bindings/clock/microchip,lan966x.h b/include/dt-bindings/clock/microchip,lan966x.h
new file mode 100644
index 000000000000..6f9d43d76d5a
--- /dev/null
+++ b/include/dt-bindings/clock/microchip,lan966x.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021 Microchip Inc.
+ *
+ * Author: Kavyasree Kotagiri <kavyasree.kotagiri@microchip.com>
+ */
+
+#ifndef _DT_BINDINGS_CLK_LAN966X_H
+#define _DT_BINDINGS_CLK_LAN966X_H
+
+#define GCK_ID_QSPI0 0
+#define GCK_ID_QSPI1 1
+#define GCK_ID_QSPI2 2
+#define GCK_ID_SDMMC0 3
+#define GCK_ID_PI 4
+#define GCK_ID_MCAN0 5
+#define GCK_ID_MCAN1 6
+#define GCK_ID_FLEXCOM0 7
+#define GCK_ID_FLEXCOM1 8
+#define GCK_ID_FLEXCOM2 9
+#define GCK_ID_FLEXCOM3 10
+#define GCK_ID_FLEXCOM4 11
+#define GCK_ID_TIMER 12
+#define GCK_ID_USB_REFCLK 13
+
+/* Gate clocks */
+#define GCK_GATE_UHPHS 14
+#define GCK_GATE_UDPHS 15
+#define GCK_GATE_MCRAMC 16
+#define GCK_GATE_HMATRIX 17
+
+#define N_CLOCKS 18
+
+#endif
diff --git a/include/dt-bindings/clock/mt7986-clk.h b/include/dt-bindings/clock/mt7986-clk.h
new file mode 100644
index 000000000000..5a9b169324b0
--- /dev/null
+++ b/include/dt-bindings/clock/mt7986-clk.h
@@ -0,0 +1,169 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2021 MediaTek Inc.
+ * Author: Sam Shih <sam.shih@mediatek.com>
+ */
+
+#ifndef _DT_BINDINGS_CLK_MT7986_H
+#define _DT_BINDINGS_CLK_MT7986_H
+
+/* APMIXEDSYS */
+
+#define CLK_APMIXED_ARMPLL 0
+#define CLK_APMIXED_NET2PLL 1
+#define CLK_APMIXED_MMPLL 2
+#define CLK_APMIXED_SGMPLL 3
+#define CLK_APMIXED_WEDMCUPLL 4
+#define CLK_APMIXED_NET1PLL 5
+#define CLK_APMIXED_MPLL 6
+#define CLK_APMIXED_APLL2 7
+
+/* TOPCKGEN */
+
+#define CLK_TOP_XTAL 0
+#define CLK_TOP_XTAL_D2 1
+#define CLK_TOP_RTC_32K 2
+#define CLK_TOP_RTC_32P7K 3
+#define CLK_TOP_MPLL_D2 4
+#define CLK_TOP_MPLL_D4 5
+#define CLK_TOP_MPLL_D8 6
+#define CLK_TOP_MPLL_D8_D2 7
+#define CLK_TOP_MPLL_D3_D2 8
+#define CLK_TOP_MMPLL_D2 9
+#define CLK_TOP_MMPLL_D4 10
+#define CLK_TOP_MMPLL_D8 11
+#define CLK_TOP_MMPLL_D8_D2 12
+#define CLK_TOP_MMPLL_D3_D8 13
+#define CLK_TOP_MMPLL_U2PHY 14
+#define CLK_TOP_APLL2_D4 15
+#define CLK_TOP_NET1PLL_D4 16
+#define CLK_TOP_NET1PLL_D5 17
+#define CLK_TOP_NET1PLL_D5_D2 18
+#define CLK_TOP_NET1PLL_D5_D4 19
+#define CLK_TOP_NET1PLL_D8_D2 20
+#define CLK_TOP_NET1PLL_D8_D4 21
+#define CLK_TOP_NET2PLL_D4 22
+#define CLK_TOP_NET2PLL_D4_D2 23
+#define CLK_TOP_NET2PLL_D3_D2 24
+#define CLK_TOP_WEDMCUPLL_D5_D2 25
+#define CLK_TOP_NFI1X_SEL 26
+#define CLK_TOP_SPINFI_SEL 27
+#define CLK_TOP_SPI_SEL 28
+#define CLK_TOP_SPIM_MST_SEL 29
+#define CLK_TOP_UART_SEL 30
+#define CLK_TOP_PWM_SEL 31
+#define CLK_TOP_I2C_SEL 32
+#define CLK_TOP_PEXTP_TL_SEL 33
+#define CLK_TOP_EMMC_250M_SEL 34
+#define CLK_TOP_EMMC_416M_SEL 35
+#define CLK_TOP_F_26M_ADC_SEL 36
+#define CLK_TOP_DRAMC_SEL 37
+#define CLK_TOP_DRAMC_MD32_SEL 38
+#define CLK_TOP_SYSAXI_SEL 39
+#define CLK_TOP_SYSAPB_SEL 40
+#define CLK_TOP_ARM_DB_MAIN_SEL 41
+#define CLK_TOP_ARM_DB_JTSEL 42
+#define CLK_TOP_NETSYS_SEL 43
+#define CLK_TOP_NETSYS_500M_SEL 44
+#define CLK_TOP_NETSYS_MCU_SEL 45
+#define CLK_TOP_NETSYS_2X_SEL 46
+#define CLK_TOP_SGM_325M_SEL 47
+#define CLK_TOP_SGM_REG_SEL 48
+#define CLK_TOP_A1SYS_SEL 49
+#define CLK_TOP_CONN_MCUSYS_SEL 50
+#define CLK_TOP_EIP_B_SEL 51
+#define CLK_TOP_PCIE_PHY_SEL 52
+#define CLK_TOP_USB3_PHY_SEL 53
+#define CLK_TOP_F26M_SEL 54
+#define CLK_TOP_AUD_L_SEL 55
+#define CLK_TOP_A_TUNER_SEL 56
+#define CLK_TOP_U2U3_SEL 57
+#define CLK_TOP_U2U3_SYS_SEL 58
+#define CLK_TOP_U2U3_XHCI_SEL 59
+#define CLK_TOP_DA_U2_REFSEL 60
+#define CLK_TOP_DA_U2_CK_1P_SEL 61
+#define CLK_TOP_AP2CNN_HOST_SEL 62
+#define CLK_TOP_JTAG 63
+
+/* INFRACFG */
+
+#define CLK_INFRA_SYSAXI_D2 0
+#define CLK_INFRA_UART0_SEL 1
+#define CLK_INFRA_UART1_SEL 2
+#define CLK_INFRA_UART2_SEL 3
+#define CLK_INFRA_SPI0_SEL 4
+#define CLK_INFRA_SPI1_SEL 5
+#define CLK_INFRA_PWM1_SEL 6
+#define CLK_INFRA_PWM2_SEL 7
+#define CLK_INFRA_PWM_BSEL 8
+#define CLK_INFRA_PCIE_SEL 9
+#define CLK_INFRA_GPT_STA 10
+#define CLK_INFRA_PWM_HCK 11
+#define CLK_INFRA_PWM_STA 12
+#define CLK_INFRA_PWM1_CK 13
+#define CLK_INFRA_PWM2_CK 14
+#define CLK_INFRA_CQ_DMA_CK 15
+#define CLK_INFRA_EIP97_CK 16
+#define CLK_INFRA_AUD_BUS_CK 17
+#define CLK_INFRA_AUD_26M_CK 18
+#define CLK_INFRA_AUD_L_CK 19
+#define CLK_INFRA_AUD_AUD_CK 20
+#define CLK_INFRA_AUD_EG2_CK 21
+#define CLK_INFRA_DRAMC_26M_CK 22
+#define CLK_INFRA_DBG_CK 23
+#define CLK_INFRA_AP_DMA_CK 24
+#define CLK_INFRA_SEJ_CK 25
+#define CLK_INFRA_SEJ_13M_CK 26
+#define CLK_INFRA_THERM_CK 27
+#define CLK_INFRA_I2C0_CK 28
+#define CLK_INFRA_UART0_CK 29
+#define CLK_INFRA_UART1_CK 30
+#define CLK_INFRA_UART2_CK 31
+#define CLK_INFRA_NFI1_CK 32
+#define CLK_INFRA_SPINFI1_CK 33
+#define CLK_INFRA_NFI_HCK_CK 34
+#define CLK_INFRA_SPI0_CK 35
+#define CLK_INFRA_SPI1_CK 36
+#define CLK_INFRA_SPI0_HCK_CK 37
+#define CLK_INFRA_SPI1_HCK_CK 38
+#define CLK_INFRA_FRTC_CK 39
+#define CLK_INFRA_MSDC_CK 40
+#define CLK_INFRA_MSDC_HCK_CK 41
+#define CLK_INFRA_MSDC_133M_CK 42
+#define CLK_INFRA_MSDC_66M_CK 43
+#define CLK_INFRA_ADC_26M_CK 44
+#define CLK_INFRA_ADC_FRC_CK 45
+#define CLK_INFRA_FBIST2FPC_CK 46
+#define CLK_INFRA_IUSB_133_CK 47
+#define CLK_INFRA_IUSB_66M_CK 48
+#define CLK_INFRA_IUSB_SYS_CK 49
+#define CLK_INFRA_IUSB_CK 50
+#define CLK_INFRA_IPCIE_CK 51
+#define CLK_INFRA_IPCIE_PIPE_CK 52
+#define CLK_INFRA_IPCIER_CK 53
+#define CLK_INFRA_IPCIEB_CK 54
+#define CLK_INFRA_TRNG_CK 55
+
+/* SGMIISYS_0 */
+
+#define CLK_SGMII0_TX250M_EN 0
+#define CLK_SGMII0_RX250M_EN 1
+#define CLK_SGMII0_CDR_REF 2
+#define CLK_SGMII0_CDR_FB 3
+
+/* SGMIISYS_1 */
+
+#define CLK_SGMII1_TX250M_EN 0
+#define CLK_SGMII1_RX250M_EN 1
+#define CLK_SGMII1_CDR_REF 2
+#define CLK_SGMII1_CDR_FB 3
+
+/* ETHSYS */
+
+#define CLK_ETH_FE_EN 0
+#define CLK_ETH_GP2_EN 1
+#define CLK_ETH_GP1_EN 2
+#define CLK_ETH_WOCPU1_EN 3
+#define CLK_ETH_WOCPU0_EN 4
+
+#endif /* _DT_BINDINGS_CLK_MT7986_H */
diff --git a/include/dt-bindings/clock/qcom,gcc-msm8976.h b/include/dt-bindings/clock/qcom,gcc-msm8976.h
new file mode 100644
index 000000000000..51955fd49426
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gcc-msm8976.h
@@ -0,0 +1,240 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (C) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2016-2021, AngeloGioacchino Del Regno
+ * <angelogioacchino.delregno@somainline.org>
+ */
+
+#ifndef _DT_BINDINGS_CLK_MSM_GCC_8976_H
+#define _DT_BINDINGS_CLK_MSM_GCC_8976_H
+
+#define GPLL0 0
+#define GPLL2 1
+#define GPLL3 2
+#define GPLL4 3
+#define GPLL6 4
+#define GPLL0_CLK_SRC 5
+#define GPLL2_CLK_SRC 6
+#define GPLL3_CLK_SRC 7
+#define GPLL4_CLK_SRC 8
+#define GPLL6_CLK_SRC 9
+#define GCC_BLSP1_QUP1_SPI_APPS_CLK 10
+#define GCC_BLSP1_QUP1_I2C_APPS_CLK 11
+#define GCC_BLSP1_QUP2_I2C_APPS_CLK 12
+#define GCC_BLSP1_QUP2_SPI_APPS_CLK 13
+#define GCC_BLSP1_QUP3_I2C_APPS_CLK 14
+#define GCC_BLSP1_QUP3_SPI_APPS_CLK 15
+#define GCC_BLSP1_QUP4_I2C_APPS_CLK 16
+#define GCC_BLSP1_QUP4_SPI_APPS_CLK 17
+#define GCC_BLSP1_UART1_APPS_CLK 18
+#define GCC_BLSP1_UART2_APPS_CLK 19
+#define GCC_BLSP2_QUP1_I2C_APPS_CLK 20
+#define GCC_BLSP2_QUP1_SPI_APPS_CLK 21
+#define GCC_BLSP2_QUP2_I2C_APPS_CLK 22
+#define GCC_BLSP2_QUP2_SPI_APPS_CLK 23
+#define GCC_BLSP2_QUP3_I2C_APPS_CLK 24
+#define GCC_BLSP2_QUP3_SPI_APPS_CLK 25
+#define GCC_BLSP2_QUP4_I2C_APPS_CLK 26
+#define GCC_BLSP2_QUP4_SPI_APPS_CLK 27
+#define GCC_BLSP2_UART1_APPS_CLK 28
+#define GCC_BLSP2_UART2_APPS_CLK 29
+#define GCC_CAMSS_CCI_AHB_CLK 30
+#define GCC_CAMSS_CCI_CLK 31
+#define GCC_CAMSS_CPP_AHB_CLK 32
+#define GCC_CAMSS_CPP_AXI_CLK 33
+#define GCC_CAMSS_CPP_CLK 34
+#define GCC_CAMSS_CSI0_AHB_CLK 35
+#define GCC_CAMSS_CSI0_CLK 36
+#define GCC_CAMSS_CSI0PHY_CLK 37
+#define GCC_CAMSS_CSI0PIX_CLK 38
+#define GCC_CAMSS_CSI0RDI_CLK 39
+#define GCC_CAMSS_CSI1_AHB_CLK 40
+#define GCC_CAMSS_CSI1_CLK 41
+#define GCC_CAMSS_CSI1PHY_CLK 42
+#define GCC_CAMSS_CSI1PIX_CLK 43
+#define GCC_CAMSS_CSI1RDI_CLK 44
+#define GCC_CAMSS_CSI2_AHB_CLK 45
+#define GCC_CAMSS_CSI2_CLK 46
+#define GCC_CAMSS_CSI2PHY_CLK 47
+#define GCC_CAMSS_CSI2PIX_CLK 48
+#define GCC_CAMSS_CSI2RDI_CLK 49
+#define GCC_CAMSS_CSI_VFE0_CLK 50
+#define GCC_CAMSS_CSI_VFE1_CLK 51
+#define GCC_CAMSS_GP0_CLK 52
+#define GCC_CAMSS_GP1_CLK 53
+#define GCC_CAMSS_ISPIF_AHB_CLK 54
+#define GCC_CAMSS_JPEG0_CLK 55
+#define GCC_CAMSS_JPEG_AHB_CLK 56
+#define GCC_CAMSS_JPEG_AXI_CLK 57
+#define GCC_CAMSS_MCLK0_CLK 58
+#define GCC_CAMSS_MCLK1_CLK 59
+#define GCC_CAMSS_MCLK2_CLK 60
+#define GCC_CAMSS_MICRO_AHB_CLK 61
+#define GCC_CAMSS_CSI0PHYTIMER_CLK 62
+#define GCC_CAMSS_CSI1PHYTIMER_CLK 63
+#define GCC_CAMSS_AHB_CLK 64
+#define GCC_CAMSS_TOP_AHB_CLK 65
+#define GCC_CAMSS_VFE0_CLK 66
+#define GCC_CAMSS_VFE_AHB_CLK 67
+#define GCC_CAMSS_VFE_AXI_CLK 68
+#define GCC_CAMSS_VFE1_AHB_CLK 69
+#define GCC_CAMSS_VFE1_AXI_CLK 70
+#define GCC_CAMSS_VFE1_CLK 71
+#define GCC_DCC_CLK 72
+#define GCC_GP1_CLK 73
+#define GCC_GP2_CLK 74
+#define GCC_GP3_CLK 75
+#define GCC_MDSS_AHB_CLK 76
+#define GCC_MDSS_AXI_CLK 77
+#define GCC_MDSS_ESC0_CLK 78
+#define GCC_MDSS_ESC1_CLK 79
+#define GCC_MDSS_MDP_CLK 80
+#define GCC_MDSS_VSYNC_CLK 81
+#define GCC_MSS_CFG_AHB_CLK 82
+#define GCC_MSS_Q6_BIMC_AXI_CLK 83
+#define GCC_PDM2_CLK 84
+#define GCC_PRNG_AHB_CLK 85
+#define GCC_PDM_AHB_CLK 86
+#define GCC_RBCPR_GFX_AHB_CLK 87
+#define GCC_RBCPR_GFX_CLK 88
+#define GCC_SDCC1_AHB_CLK 89
+#define GCC_SDCC1_APPS_CLK 90
+#define GCC_SDCC1_ICE_CORE_CLK 91
+#define GCC_SDCC2_AHB_CLK 92
+#define GCC_SDCC2_APPS_CLK 93
+#define GCC_SDCC3_AHB_CLK 94
+#define GCC_SDCC3_APPS_CLK 95
+#define GCC_USB2A_PHY_SLEEP_CLK 96
+#define GCC_USB_HS_PHY_CFG_AHB_CLK 97
+#define GCC_USB_FS_AHB_CLK 98
+#define GCC_USB_FS_IC_CLK 99
+#define GCC_USB_FS_SYSTEM_CLK 100
+#define GCC_USB_HS_AHB_CLK 101
+#define GCC_USB_HS_SYSTEM_CLK 102
+#define GCC_VENUS0_AHB_CLK 103
+#define GCC_VENUS0_AXI_CLK 104
+#define GCC_VENUS0_CORE0_VCODEC0_CLK 105
+#define GCC_VENUS0_CORE1_VCODEC0_CLK 106
+#define GCC_VENUS0_VCODEC0_CLK 107
+#define GCC_APSS_AHB_CLK 108
+#define GCC_APSS_AXI_CLK 109
+#define GCC_BLSP1_AHB_CLK 110
+#define GCC_BLSP2_AHB_CLK 111
+#define GCC_BOOT_ROM_AHB_CLK 112
+#define GCC_CRYPTO_AHB_CLK 113
+#define GCC_CRYPTO_AXI_CLK 114
+#define GCC_CRYPTO_CLK 115
+#define GCC_CPP_TBU_CLK 116
+#define GCC_APSS_TCU_CLK 117
+#define GCC_JPEG_TBU_CLK 118
+#define GCC_MDP_RT_TBU_CLK 119
+#define GCC_MDP_TBU_CLK 120
+#define GCC_SMMU_CFG_CLK 121
+#define GCC_VENUS_1_TBU_CLK 122
+#define GCC_VENUS_TBU_CLK 123
+#define GCC_VFE1_TBU_CLK 124
+#define GCC_VFE_TBU_CLK 125
+#define GCC_APS_0_CLK 126
+#define GCC_APS_1_CLK 127
+#define APS_0_CLK_SRC 128
+#define APS_1_CLK_SRC 129
+#define APSS_AHB_CLK_SRC 130
+#define BLSP1_QUP1_I2C_APPS_CLK_SRC 131
+#define BLSP1_QUP1_SPI_APPS_CLK_SRC 132
+#define BLSP1_QUP2_I2C_APPS_CLK_SRC 133
+#define BLSP1_QUP2_SPI_APPS_CLK_SRC 134
+#define BLSP1_QUP3_I2C_APPS_CLK_SRC 135
+#define BLSP1_QUP3_SPI_APPS_CLK_SRC 136
+#define BLSP1_QUP4_I2C_APPS_CLK_SRC 137
+#define BLSP1_QUP4_SPI_APPS_CLK_SRC 138
+#define BLSP1_UART1_APPS_CLK_SRC 139
+#define BLSP1_UART2_APPS_CLK_SRC 140
+#define BLSP2_QUP1_I2C_APPS_CLK_SRC 141
+#define BLSP2_QUP1_SPI_APPS_CLK_SRC 142
+#define BLSP2_QUP2_I2C_APPS_CLK_SRC 143
+#define BLSP2_QUP2_SPI_APPS_CLK_SRC 144
+#define BLSP2_QUP3_I2C_APPS_CLK_SRC 145
+#define BLSP2_QUP3_SPI_APPS_CLK_SRC 146
+#define BLSP2_QUP4_I2C_APPS_CLK_SRC 147
+#define BLSP2_QUP4_SPI_APPS_CLK_SRC 148
+#define BLSP2_UART1_APPS_CLK_SRC 149
+#define BLSP2_UART2_APPS_CLK_SRC 150
+#define CCI_CLK_SRC 151
+#define CPP_CLK_SRC 152
+#define CSI0_CLK_SRC 153
+#define CSI1_CLK_SRC 154
+#define CSI2_CLK_SRC 155
+#define CAMSS_GP0_CLK_SRC 156
+#define CAMSS_GP1_CLK_SRC 157
+#define JPEG0_CLK_SRC 158
+#define MCLK0_CLK_SRC 159
+#define MCLK1_CLK_SRC 160
+#define MCLK2_CLK_SRC 161
+#define CSI0PHYTIMER_CLK_SRC 162
+#define CSI1PHYTIMER_CLK_SRC 163
+#define CAMSS_TOP_AHB_CLK_SRC 164
+#define VFE0_CLK_SRC 165
+#define VFE1_CLK_SRC 166
+#define CRYPTO_CLK_SRC 167
+#define GP1_CLK_SRC 168
+#define GP2_CLK_SRC 169
+#define GP3_CLK_SRC 170
+#define ESC0_CLK_SRC 171
+#define ESC1_CLK_SRC 172
+#define MDP_CLK_SRC 173
+#define VSYNC_CLK_SRC 174
+#define PDM2_CLK_SRC 175
+#define RBCPR_GFX_CLK_SRC 176
+#define SDCC1_APPS_CLK_SRC 177
+#define SDCC1_ICE_CORE_CLK_SRC 178
+#define SDCC2_APPS_CLK_SRC 179
+#define SDCC3_APPS_CLK_SRC 180
+#define USB_FS_IC_CLK_SRC 181
+#define USB_FS_SYSTEM_CLK_SRC 182
+#define USB_HS_SYSTEM_CLK_SRC 183
+#define VCODEC0_CLK_SRC 184
+#define GCC_MDSS_BYTE0_CLK_SRC 185
+#define GCC_MDSS_BYTE1_CLK_SRC 186
+#define GCC_MDSS_BYTE0_CLK 187
+#define GCC_MDSS_BYTE1_CLK 188
+#define GCC_MDSS_PCLK0_CLK_SRC 189
+#define GCC_MDSS_PCLK1_CLK_SRC 190
+#define GCC_MDSS_PCLK0_CLK 191
+#define GCC_MDSS_PCLK1_CLK 192
+#define GCC_GFX3D_CLK_SRC 193
+#define GCC_GFX3D_OXILI_CLK 194
+#define GCC_GFX3D_BIMC_CLK 195
+#define GCC_GFX3D_OXILI_AHB_CLK 196
+#define GCC_GFX3D_OXILI_AON_CLK 197
+#define GCC_GFX3D_OXILI_GMEM_CLK 198
+#define GCC_GFX3D_OXILI_TIMER_CLK 199
+#define GCC_GFX3D_TBU0_CLK 200
+#define GCC_GFX3D_TBU1_CLK 201
+#define GCC_GFX3D_TCU_CLK 202
+#define GCC_GFX3D_GTCU_AHB_CLK 203
+
+/* GCC block resets */
+#define RST_CAMSS_MICRO_BCR 0
+#define RST_USB_HS_BCR 1
+#define RST_QUSB2_PHY_BCR 2
+#define RST_USB2_HS_PHY_ONLY_BCR 3
+#define RST_USB_HS_PHY_CFG_AHB_BCR 4
+#define RST_USB_FS_BCR 5
+#define RST_CAMSS_CSI1PIX_BCR 6
+#define RST_CAMSS_CSI_VFE1_BCR 7
+#define RST_CAMSS_VFE1_BCR 8
+#define RST_CAMSS_CPP_BCR 9
+
+/* GDSCs */
+#define VENUS_GDSC 0
+#define VENUS_CORE0_GDSC 1
+#define VENUS_CORE1_GDSC 2
+#define MDSS_GDSC 3
+#define JPEG_GDSC 4
+#define VFE0_GDSC 5
+#define VFE1_GDSC 6
+#define CPP_GDSC 7
+#define OXILI_GX_GDSC 8
+#define OXILI_CX_GDSC 9
+
+#endif /* _DT_BINDINGS_CLK_MSM_GCC_8976_H */
diff --git a/include/dt-bindings/clock/sun20i-d1-ccu.h b/include/dt-bindings/clock/sun20i-d1-ccu.h
new file mode 100644
index 000000000000..e3ac53315e1a
--- /dev/null
+++ b/include/dt-bindings/clock/sun20i-d1-ccu.h
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */
+/*
+ * Copyright (C) 2020 huangzhenwei@allwinnertech.com
+ * Copyright (C) 2021 Samuel Holland <samuel@sholland.org>
+ */
+
+#ifndef _DT_BINDINGS_CLK_SUN20I_D1_CCU_H_
+#define _DT_BINDINGS_CLK_SUN20I_D1_CCU_H_
+
+#define CLK_PLL_CPUX 0
+#define CLK_PLL_DDR0 1
+#define CLK_PLL_PERIPH0_4X 2
+#define CLK_PLL_PERIPH0_2X 3
+#define CLK_PLL_PERIPH0_800M 4
+#define CLK_PLL_PERIPH0 5
+#define CLK_PLL_PERIPH0_DIV3 6
+#define CLK_PLL_VIDEO0_4X 7
+#define CLK_PLL_VIDEO0_2X 8
+#define CLK_PLL_VIDEO0 9
+#define CLK_PLL_VIDEO1_4X 10
+#define CLK_PLL_VIDEO1_2X 11
+#define CLK_PLL_VIDEO1 12
+#define CLK_PLL_VE 13
+#define CLK_PLL_AUDIO0_4X 14
+#define CLK_PLL_AUDIO0_2X 15
+#define CLK_PLL_AUDIO0 16
+#define CLK_PLL_AUDIO1 17
+#define CLK_PLL_AUDIO1_DIV2 18
+#define CLK_PLL_AUDIO1_DIV5 19
+#define CLK_CPUX 20
+#define CLK_CPUX_AXI 21
+#define CLK_CPUX_APB 22
+#define CLK_PSI_AHB 23
+#define CLK_APB0 24
+#define CLK_APB1 25
+#define CLK_MBUS 26
+#define CLK_DE 27
+#define CLK_BUS_DE 28
+#define CLK_DI 29
+#define CLK_BUS_DI 30
+#define CLK_G2D 31
+#define CLK_BUS_G2D 32
+#define CLK_CE 33
+#define CLK_BUS_CE 34
+#define CLK_VE 35
+#define CLK_BUS_VE 36
+#define CLK_BUS_DMA 37
+#define CLK_BUS_MSGBOX0 38
+#define CLK_BUS_MSGBOX1 39
+#define CLK_BUS_MSGBOX2 40
+#define CLK_BUS_SPINLOCK 41
+#define CLK_BUS_HSTIMER 42
+#define CLK_AVS 43
+#define CLK_BUS_DBG 44
+#define CLK_BUS_PWM 45
+#define CLK_BUS_IOMMU 46
+#define CLK_DRAM 47
+#define CLK_MBUS_DMA 48
+#define CLK_MBUS_VE 49
+#define CLK_MBUS_CE 50
+#define CLK_MBUS_TVIN 51
+#define CLK_MBUS_CSI 52
+#define CLK_MBUS_G2D 53
+#define CLK_MBUS_RISCV 54
+#define CLK_BUS_DRAM 55
+#define CLK_MMC0 56
+#define CLK_MMC1 57
+#define CLK_MMC2 58
+#define CLK_BUS_MMC0 59
+#define CLK_BUS_MMC1 60
+#define CLK_BUS_MMC2 61
+#define CLK_BUS_UART0 62
+#define CLK_BUS_UART1 63
+#define CLK_BUS_UART2 64
+#define CLK_BUS_UART3 65
+#define CLK_BUS_UART4 66
+#define CLK_BUS_UART5 67
+#define CLK_BUS_I2C0 68
+#define CLK_BUS_I2C1 69
+#define CLK_BUS_I2C2 70
+#define CLK_BUS_I2C3 71
+#define CLK_SPI0 72
+#define CLK_SPI1 73
+#define CLK_BUS_SPI0 74
+#define CLK_BUS_SPI1 75
+#define CLK_EMAC_25M 76
+#define CLK_BUS_EMAC 77
+#define CLK_IR_TX 78
+#define CLK_BUS_IR_TX 79
+#define CLK_BUS_GPADC 80
+#define CLK_BUS_THS 81
+#define CLK_I2S0 82
+#define CLK_I2S1 83
+#define CLK_I2S2 84
+#define CLK_I2S2_ASRC 85
+#define CLK_BUS_I2S0 86
+#define CLK_BUS_I2S1 87
+#define CLK_BUS_I2S2 88
+#define CLK_SPDIF_TX 89
+#define CLK_SPDIF_RX 90
+#define CLK_BUS_SPDIF 91
+#define CLK_DMIC 92
+#define CLK_BUS_DMIC 93
+#define CLK_AUDIO_DAC 94
+#define CLK_AUDIO_ADC 95
+#define CLK_BUS_AUDIO 96
+#define CLK_USB_OHCI0 97
+#define CLK_USB_OHCI1 98
+#define CLK_BUS_OHCI0 99
+#define CLK_BUS_OHCI1 100
+#define CLK_BUS_EHCI0 101
+#define CLK_BUS_EHCI1 102
+#define CLK_BUS_OTG 103
+#define CLK_BUS_LRADC 104
+#define CLK_BUS_DPSS_TOP 105
+#define CLK_HDMI_24M 106
+#define CLK_HDMI_CEC_32K 107
+#define CLK_HDMI_CEC 108
+#define CLK_BUS_HDMI 109
+#define CLK_MIPI_DSI 110
+#define CLK_BUS_MIPI_DSI 111
+#define CLK_TCON_LCD0 112
+#define CLK_BUS_TCON_LCD0 113
+#define CLK_TCON_TV 114
+#define CLK_BUS_TCON_TV 115
+#define CLK_TVE 116
+#define CLK_BUS_TVE_TOP 117
+#define CLK_BUS_TVE 118
+#define CLK_TVD 119
+#define CLK_BUS_TVD_TOP 120
+#define CLK_BUS_TVD 121
+#define CLK_LEDC 122
+#define CLK_BUS_LEDC 123
+#define CLK_CSI_TOP 124
+#define CLK_CSI_MCLK 125
+#define CLK_BUS_CSI 126
+#define CLK_TPADC 127
+#define CLK_BUS_TPADC 128
+#define CLK_BUS_TZMA 129
+#define CLK_DSP 130
+#define CLK_BUS_DSP_CFG 131
+#define CLK_RISCV 132
+#define CLK_RISCV_AXI 133
+#define CLK_BUS_RISCV_CFG 134
+#define CLK_FANOUT_24M 135
+#define CLK_FANOUT_12M 136
+#define CLK_FANOUT_16M 137
+#define CLK_FANOUT_25M 138
+#define CLK_FANOUT_32K 139
+#define CLK_FANOUT_27M 140
+#define CLK_FANOUT_PCLK 141
+#define CLK_FANOUT0 142
+#define CLK_FANOUT1 143
+#define CLK_FANOUT2 144
+
+#endif /* _DT_BINDINGS_CLK_SUN20I_D1_CCU_H_ */
diff --git a/include/dt-bindings/clock/sun20i-d1-r-ccu.h b/include/dt-bindings/clock/sun20i-d1-r-ccu.h
new file mode 100644
index 000000000000..4c2697fd32b0
--- /dev/null
+++ b/include/dt-bindings/clock/sun20i-d1-r-ccu.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */
+/*
+ * Copyright (C) 2021 Samuel Holland <samuel@sholland.org>
+ */
+
+#ifndef _DT_BINDINGS_CLK_SUN20I_D1_R_CCU_H_
+#define _DT_BINDINGS_CLK_SUN20I_D1_R_CCU_H_
+
+#define CLK_R_AHB 0
+
+#define CLK_BUS_R_TIMER 2
+#define CLK_BUS_R_TWD 3
+#define CLK_BUS_R_PPU 4
+#define CLK_R_IR_RX 5
+#define CLK_BUS_R_IR_RX 6
+#define CLK_BUS_R_RTC 7
+#define CLK_BUS_R_CPUCFG 8
+
+#endif /* _DT_BINDINGS_CLK_SUN20I_D1_R_CCU_H_ */
diff --git a/include/dt-bindings/clock/toshiba,tmpv770x.h b/include/dt-bindings/clock/toshiba,tmpv770x.h
new file mode 100644
index 000000000000..5fce713001fd
--- /dev/null
+++ b/include/dt-bindings/clock/toshiba,tmpv770x.h
@@ -0,0 +1,181 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+
+#ifndef _DT_BINDINGS_CLOCK_TOSHIBA_TMPV770X_H_
+#define _DT_BINDINGS_CLOCK_TOSHIBA_TMPV770X_H_
+
+/* PLL */
+#define TMPV770X_PLL_PIPLL0 0
+#define TMPV770X_PLL_PIPLL1 1
+#define TMPV770X_PLL_PIDNNPLL 2
+#define TMPV770X_PLL_PIETHERPLL 3
+#define TMPV770X_PLL_PIDDRCPLL 4
+#define TMPV770X_PLL_PIVOIFPLL 5
+#define TMPV770X_PLL_PIIMGERPLL 6
+#define TMPV770X_NR_PLL 7
+
+/* Clocks */
+#define TMPV770X_CLK_PIPLL1_DIV1 0
+#define TMPV770X_CLK_PIPLL1_DIV2 1
+#define TMPV770X_CLK_PIPLL1_DIV4 2
+#define TMPV770X_CLK_PIDNNPLL_DIV1 3
+#define TMPV770X_CLK_DDRC_PHY_PLL0 4
+#define TMPV770X_CLK_DDRC_PHY_PLL1 5
+#define TMPV770X_CLK_D_PHYPLL 6
+#define TMPV770X_CLK_PHY_PCIEPLL 7
+#define TMPV770X_CLK_CA53CL0 8
+#define TMPV770X_CLK_CA53CL1 9
+#define TMPV770X_CLK_PISDMAC 10
+#define TMPV770X_CLK_PIPDMAC0 11
+#define TMPV770X_CLK_PIPDMAC1 12
+#define TMPV770X_CLK_PIWRAM 13
+#define TMPV770X_CLK_DDRC0 14
+#define TMPV770X_CLK_DDRC0_SCLK 15
+#define TMPV770X_CLK_DDRC0_NCLK 16
+#define TMPV770X_CLK_DDRC0_MCLK 17
+#define TMPV770X_CLK_DDRC0_APBCLK 18
+#define TMPV770X_CLK_DDRC1 19
+#define TMPV770X_CLK_DDRC1_SCLK 20
+#define TMPV770X_CLK_DDRC1_NCLK 21
+#define TMPV770X_CLK_DDRC1_MCLK 22
+#define TMPV770X_CLK_DDRC1_APBCLK 23
+#define TMPV770X_CLK_HOX 24
+#define TMPV770X_CLK_PCIE_MSTR 25
+#define TMPV770X_CLK_PCIE_AUX 26
+#define TMPV770X_CLK_PIINTC 27
+#define TMPV770X_CLK_PIETHER_BUS 28
+#define TMPV770X_CLK_PISPI0 29
+#define TMPV770X_CLK_PISPI1 30
+#define TMPV770X_CLK_PISPI2 31
+#define TMPV770X_CLK_PISPI3 32
+#define TMPV770X_CLK_PISPI4 33
+#define TMPV770X_CLK_PISPI5 34
+#define TMPV770X_CLK_PISPI6 35
+#define TMPV770X_CLK_PIUART0 36
+#define TMPV770X_CLK_PIUART1 37
+#define TMPV770X_CLK_PIUART2 38
+#define TMPV770X_CLK_PIUART3 39
+#define TMPV770X_CLK_PII2C0 40
+#define TMPV770X_CLK_PII2C1 41
+#define TMPV770X_CLK_PII2C2 42
+#define TMPV770X_CLK_PII2C3 43
+#define TMPV770X_CLK_PII2C4 44
+#define TMPV770X_CLK_PII2C5 45
+#define TMPV770X_CLK_PII2C6 46
+#define TMPV770X_CLK_PII2C7 47
+#define TMPV770X_CLK_PII2C8 48
+#define TMPV770X_CLK_PIGPIO 49
+#define TMPV770X_CLK_PIPGM 50
+#define TMPV770X_CLK_PIPCMIF 51
+#define TMPV770X_CLK_PIPCMIF_AUDIO_O 52
+#define TMPV770X_CLK_PIPCMIF_AUDIO_I 53
+#define TMPV770X_CLK_PICMPT0 54
+#define TMPV770X_CLK_PICMPT1 55
+#define TMPV770X_CLK_PITSC 56
+#define TMPV770X_CLK_PIUWDT 57
+#define TMPV770X_CLK_PISWDT 58
+#define TMPV770X_CLK_WDTCLK 59
+#define TMPV770X_CLK_PISUBUS_150M 60
+#define TMPV770X_CLK_PISUBUS_300M 61
+#define TMPV770X_CLK_PIPMU 62
+#define TMPV770X_CLK_PIGPMU 63
+#define TMPV770X_CLK_PITMU 64
+#define TMPV770X_CLK_WRCK 65
+#define TMPV770X_CLK_PIEMM 66
+#define TMPV770X_CLK_PIMISC 67
+#define TMPV770X_CLK_PIGCOMM 68
+#define TMPV770X_CLK_PIDCOMM 69
+#define TMPV770X_CLK_PICKMON 70
+#define TMPV770X_CLK_PIMBUS 71
+#define TMPV770X_CLK_SBUSCLK 72
+#define TMPV770X_CLK_DDR0_APBCLKCLK 73
+#define TMPV770X_CLK_DDR1_APBCLKCLK 74
+#define TMPV770X_CLK_DSP0_PBCLK 75
+#define TMPV770X_CLK_DSP1_PBCLK 76
+#define TMPV770X_CLK_DSP2_PBCLK 77
+#define TMPV770X_CLK_DSP3_PBCLK 78
+#define TMPV770X_CLK_DSVIIF0_APBCLK 79
+#define TMPV770X_CLK_VIIF0_APBCLK 80
+#define TMPV770X_CLK_VIIF0_CFGCLK 81
+#define TMPV770X_CLK_VIIF1_APBCLK 82
+#define TMPV770X_CLK_VIIF1_CFGCLK 83
+#define TMPV770X_CLK_VIIF2_APBCLK 84
+#define TMPV770X_CLK_VIIF2_CFGCLK 85
+#define TMPV770X_CLK_VIIF3_APBCLK 86
+#define TMPV770X_CLK_VIIF3_CFGCLK 87
+#define TMPV770X_CLK_VIIF4_APBCLK 88
+#define TMPV770X_CLK_VIIF4_CFGCLK 89
+#define TMPV770X_CLK_VIIF5_APBCLK 90
+#define TMPV770X_CLK_VIIF5_CFGCLK 91
+#define TMPV770X_CLK_VOIF_SBUSCLK 92
+#define TMPV770X_CLK_VOIF_PROCCLK 93
+#define TMPV770X_CLK_VOIF_DPHYCFGCLK 94
+#define TMPV770X_CLK_DNN0 95
+#define TMPV770X_CLK_STMAT 96
+#define TMPV770X_CLK_HWA0 97
+#define TMPV770X_CLK_AFFINE0 98
+#define TMPV770X_CLK_HAMAT 99
+#define TMPV770X_CLK_SMLDB 100
+#define TMPV770X_CLK_HWA0_ASYNC 101
+#define TMPV770X_CLK_HWA2 102
+#define TMPV770X_CLK_FLMAT 103
+#define TMPV770X_CLK_PYRAMID 104
+#define TMPV770X_CLK_HWA2_ASYNC 105
+#define TMPV770X_CLK_DSP0 106
+#define TMPV770X_CLK_VIIFBS0 107
+#define TMPV770X_CLK_VIIFBS0_L2ISP 108
+#define TMPV770X_CLK_VIIFBS0_L1ISP 109
+#define TMPV770X_CLK_VIIFBS0_PROC 110
+#define TMPV770X_CLK_VIIFBS1 111
+#define TMPV770X_CLK_VIIFBS2 112
+#define TMPV770X_CLK_VIIFOP_MBUS 113
+#define TMPV770X_CLK_VIIFOP0_PROC 114
+#define TMPV770X_CLK_PIETHER_2P5M 115
+#define TMPV770X_CLK_PIETHER_25M 116
+#define TMPV770X_CLK_PIETHER_50M 117
+#define TMPV770X_CLK_PIETHER_125M 118
+#define TMPV770X_CLK_VOIF0_DPHYCFG 119
+#define TMPV770X_CLK_VOIF0_PROC 120
+#define TMPV770X_CLK_VOIF0_SBUS 121
+#define TMPV770X_CLK_VOIF0_DSIREF 122
+#define TMPV770X_CLK_VOIF0_PIXEL 123
+#define TMPV770X_CLK_PIREFCLK 124
+#define TMPV770X_CLK_SBUS 125
+#define TMPV770X_CLK_BUSLCK 126
+#define TMPV770X_NR_CLK 127
+
+/* Reset */
+#define TMPV770X_RESET_PIETHER_2P5M 0
+#define TMPV770X_RESET_PIETHER_25M 1
+#define TMPV770X_RESET_PIETHER_50M 2
+#define TMPV770X_RESET_PIETHER_125M 3
+#define TMPV770X_RESET_HOX 4
+#define TMPV770X_RESET_PCIE_MSTR 5
+#define TMPV770X_RESET_PCIE_AUX 6
+#define TMPV770X_RESET_PIINTC 7
+#define TMPV770X_RESET_PIETHER_BUS 8
+#define TMPV770X_RESET_PISPI0 9
+#define TMPV770X_RESET_PISPI1 10
+#define TMPV770X_RESET_PISPI2 11
+#define TMPV770X_RESET_PISPI3 12
+#define TMPV770X_RESET_PISPI4 13
+#define TMPV770X_RESET_PISPI5 14
+#define TMPV770X_RESET_PISPI6 15
+#define TMPV770X_RESET_PIUART0 16
+#define TMPV770X_RESET_PIUART1 17
+#define TMPV770X_RESET_PIUART2 18
+#define TMPV770X_RESET_PIUART3 19
+#define TMPV770X_RESET_PII2C0 20
+#define TMPV770X_RESET_PII2C1 21
+#define TMPV770X_RESET_PII2C2 22
+#define TMPV770X_RESET_PII2C3 23
+#define TMPV770X_RESET_PII2C4 24
+#define TMPV770X_RESET_PII2C5 25
+#define TMPV770X_RESET_PII2C6 26
+#define TMPV770X_RESET_PII2C7 27
+#define TMPV770X_RESET_PII2C8 28
+#define TMPV770X_RESET_PIPCMIF 29
+#define TMPV770X_RESET_PICKMON 30
+#define TMPV770X_RESET_SBUSCLK 31
+#define TMPV770X_NR_RESET 32
+
+#endif /*_DT_BINDINGS_CLOCK_TOSHIBA_TMPV770X_H_ */
diff --git a/include/dt-bindings/iio/addac/adi,ad74413r.h b/include/dt-bindings/iio/addac/adi,ad74413r.h
new file mode 100644
index 000000000000..204f92bbd79f
--- /dev/null
+++ b/include/dt-bindings/iio/addac/adi,ad74413r.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _DT_BINDINGS_ADI_AD74413R_H
+#define _DT_BINDINGS_ADI_AD74413R_H
+
+#define CH_FUNC_HIGH_IMPEDANCE 0x0
+#define CH_FUNC_VOLTAGE_OUTPUT 0x1
+#define CH_FUNC_CURRENT_OUTPUT 0x2
+#define CH_FUNC_VOLTAGE_INPUT 0x3
+#define CH_FUNC_CURRENT_INPUT_EXT_POWER 0x4
+#define CH_FUNC_CURRENT_INPUT_LOOP_POWER 0x5
+#define CH_FUNC_RESISTANCE_INPUT 0x6
+#define CH_FUNC_DIGITAL_INPUT_LOGIC 0x7
+#define CH_FUNC_DIGITAL_INPUT_LOOP_POWER 0x8
+#define CH_FUNC_CURRENT_INPUT_EXT_POWER_HART 0x9
+#define CH_FUNC_CURRENT_INPUT_LOOP_POWER_HART 0xA
+
+#define CH_FUNC_MIN CH_FUNC_HIGH_IMPEDANCE
+#define CH_FUNC_MAX CH_FUNC_CURRENT_INPUT_LOOP_POWER_HART
+
+#endif /* _DT_BINDINGS_ADI_AD74413R_H */
diff --git a/include/dt-bindings/interconnect/qcom,msm8996.h b/include/dt-bindings/interconnect/qcom,msm8996.h
new file mode 100644
index 000000000000..a0b7c0ec7bed
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,msm8996.h
@@ -0,0 +1,163 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/*
+ * Qualcomm MSM8996 interconnect IDs
+ *
+ * Copyright (c) 2021 Yassine Oudjana <y.oudjana@protonmail.com>
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_MSM8996_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_MSM8996_H
+
+/* A0NOC */
+#define MASTER_PCIE_0 0
+#define MASTER_PCIE_1 1
+#define MASTER_PCIE_2 2
+
+/* A1NOC */
+#define MASTER_CNOC_A1NOC 0
+#define MASTER_CRYPTO_CORE0 1
+#define MASTER_PNOC_A1NOC 2
+
+/* A2NOC */
+#define MASTER_USB3 0
+#define MASTER_IPA 1
+#define MASTER_UFS 2
+
+/* BIMC */
+#define MASTER_AMPSS_M0 0
+#define MASTER_GRAPHICS_3D 1
+#define MASTER_MNOC_BIMC 2
+#define MASTER_SNOC_BIMC 3
+#define SLAVE_EBI_CH0 4
+#define SLAVE_HMSS_L3 5
+#define SLAVE_BIMC_SNOC_0 6
+#define SLAVE_BIMC_SNOC_1 7
+
+/* CNOC */
+#define MASTER_SNOC_CNOC 0
+#define MASTER_QDSS_DAP 1
+#define SLAVE_CNOC_A1NOC 2
+#define SLAVE_CLK_CTL 3
+#define SLAVE_TCSR 4
+#define SLAVE_TLMM 5
+#define SLAVE_CRYPTO_0_CFG 6
+#define SLAVE_MPM 7
+#define SLAVE_PIMEM_CFG 8
+#define SLAVE_IMEM_CFG 9
+#define SLAVE_MESSAGE_RAM 10
+#define SLAVE_BIMC_CFG 11
+#define SLAVE_PMIC_ARB 12
+#define SLAVE_PRNG 13
+#define SLAVE_DCC_CFG 14
+#define SLAVE_RBCPR_MX 15
+#define SLAVE_QDSS_CFG 16
+#define SLAVE_RBCPR_CX 17
+#define SLAVE_QDSS_RBCPR_APU 18
+#define SLAVE_CNOC_MNOC_CFG 19
+#define SLAVE_SNOC_CFG 20
+#define SLAVE_SNOC_MPU_CFG 21
+#define SLAVE_EBI1_PHY_CFG 22
+#define SLAVE_A0NOC_CFG 23
+#define SLAVE_PCIE_1_CFG 24
+#define SLAVE_PCIE_2_CFG 25
+#define SLAVE_PCIE_0_CFG 26
+#define SLAVE_PCIE20_AHB2PHY 27
+#define SLAVE_A0NOC_MPU_CFG 28
+#define SLAVE_UFS_CFG 29
+#define SLAVE_A1NOC_CFG 30
+#define SLAVE_A1NOC_MPU_CFG 31
+#define SLAVE_A2NOC_CFG 32
+#define SLAVE_A2NOC_MPU_CFG 33
+#define SLAVE_SSC_CFG 34
+#define SLAVE_A0NOC_SMMU_CFG 35
+#define SLAVE_A1NOC_SMMU_CFG 36
+#define SLAVE_A2NOC_SMMU_CFG 37
+#define SLAVE_LPASS_SMMU_CFG 38
+#define SLAVE_CNOC_MNOC_MMSS_CFG 39
+
+/* MNOC */
+#define MASTER_CNOC_MNOC_CFG 0
+#define MASTER_CPP 1
+#define MASTER_JPEG 2
+#define MASTER_MDP_PORT0 3
+#define MASTER_MDP_PORT1 4
+#define MASTER_ROTATOR 5
+#define MASTER_VIDEO_P0 6
+#define MASTER_VFE 7
+#define MASTER_SNOC_VMEM 8
+#define MASTER_VIDEO_P0_OCMEM 9
+#define MASTER_CNOC_MNOC_MMSS_CFG 10
+#define SLAVE_MNOC_BIMC 11
+#define SLAVE_VMEM 12
+#define SLAVE_SERVICE_MNOC 13
+#define SLAVE_MMAGIC_CFG 14
+#define SLAVE_CPR_CFG 15
+#define SLAVE_MISC_CFG 16
+#define SLAVE_VENUS_THROTTLE_CFG 17
+#define SLAVE_VENUS_CFG 18
+#define SLAVE_VMEM_CFG 19
+#define SLAVE_DSA_CFG 20
+#define SLAVE_MMSS_CLK_CFG 21
+#define SLAVE_DSA_MPU_CFG 22
+#define SLAVE_MNOC_MPU_CFG 23
+#define SLAVE_DISPLAY_CFG 24
+#define SLAVE_DISPLAY_THROTTLE_CFG 25
+#define SLAVE_CAMERA_CFG 26
+#define SLAVE_CAMERA_THROTTLE_CFG 27
+#define SLAVE_GRAPHICS_3D_CFG 28
+#define SLAVE_SMMU_MDP_CFG 29
+#define SLAVE_SMMU_ROT_CFG 30
+#define SLAVE_SMMU_VENUS_CFG 31
+#define SLAVE_SMMU_CPP_CFG 32
+#define SLAVE_SMMU_JPEG_CFG 33
+#define SLAVE_SMMU_VFE_CFG 34
+
+/* PNOC */
+#define MASTER_SNOC_PNOC 0
+#define MASTER_SDCC_1 1
+#define MASTER_SDCC_2 2
+#define MASTER_SDCC_4 3
+#define MASTER_USB_HS 4
+#define MASTER_BLSP_1 5
+#define MASTER_BLSP_2 6
+#define MASTER_TSIF 7
+#define SLAVE_PNOC_A1NOC 8
+#define SLAVE_USB_HS 9
+#define SLAVE_SDCC_2 10
+#define SLAVE_SDCC_4 11
+#define SLAVE_TSIF 12
+#define SLAVE_BLSP_2 13
+#define SLAVE_SDCC_1 14
+#define SLAVE_BLSP_1 15
+#define SLAVE_PDM 16
+#define SLAVE_AHB2PHY 17
+
+/* SNOC */
+#define MASTER_HMSS 0
+#define MASTER_QDSS_BAM 1
+#define MASTER_SNOC_CFG 2
+#define MASTER_BIMC_SNOC_0 3
+#define MASTER_BIMC_SNOC_1 4
+#define MASTER_A0NOC_SNOC 5
+#define MASTER_A1NOC_SNOC 6
+#define MASTER_A2NOC_SNOC 7
+#define MASTER_QDSS_ETR 8
+#define SLAVE_A0NOC_SNOC 9
+#define SLAVE_A1NOC_SNOC 10
+#define SLAVE_A2NOC_SNOC 11
+#define SLAVE_HMSS 12
+#define SLAVE_LPASS 13
+#define SLAVE_USB3 14
+#define SLAVE_SNOC_BIMC 15
+#define SLAVE_SNOC_CNOC 16
+#define SLAVE_IMEM 17
+#define SLAVE_PIMEM 18
+#define SLAVE_SNOC_VMEM 19
+#define SLAVE_SNOC_PNOC 20
+#define SLAVE_QDSS_STM 21
+#define SLAVE_PCIE_0 22
+#define SLAVE_PCIE_1 23
+#define SLAVE_PCIE_2 24
+#define SLAVE_SERVICE_SNOC 25
+
+#endif
diff --git a/include/dt-bindings/interconnect/qcom,qcm2290.h b/include/dt-bindings/interconnect/qcom,qcm2290.h
new file mode 100644
index 000000000000..6cbbb7fe0bd3
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,qcm2290.h
@@ -0,0 +1,94 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* QCM2290 interconnect IDs */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_QCM2290_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_QCM2290_H
+
+/* BIMC */
+#define MASTER_APPSS_PROC 0
+#define MASTER_SNOC_BIMC_RT 1
+#define MASTER_SNOC_BIMC_NRT 2
+#define MASTER_SNOC_BIMC 3
+#define MASTER_TCU_0 4
+#define MASTER_GFX3D 5
+#define SLAVE_EBI1 6
+#define SLAVE_BIMC_SNOC 7
+
+/* CNOC */
+#define MASTER_SNOC_CNOC 0
+#define MASTER_QDSS_DAP 1
+#define SLAVE_BIMC_CFG 2
+#define SLAVE_CAMERA_NRT_THROTTLE_CFG 3
+#define SLAVE_CAMERA_RT_THROTTLE_CFG 4
+#define SLAVE_CAMERA_CFG 5
+#define SLAVE_CLK_CTL 6
+#define SLAVE_CRYPTO_0_CFG 7
+#define SLAVE_DISPLAY_CFG 8
+#define SLAVE_DISPLAY_THROTTLE_CFG 9
+#define SLAVE_GPU_CFG 10
+#define SLAVE_HWKM 11
+#define SLAVE_IMEM_CFG 12
+#define SLAVE_IPA_CFG 13
+#define SLAVE_LPASS 14
+#define SLAVE_MESSAGE_RAM 15
+#define SLAVE_PDM 16
+#define SLAVE_PIMEM_CFG 17
+#define SLAVE_PKA_WRAPPER 18
+#define SLAVE_PMIC_ARB 19
+#define SLAVE_PRNG 20
+#define SLAVE_QDSS_CFG 21
+#define SLAVE_QM_CFG 22
+#define SLAVE_QM_MPU_CFG 23
+#define SLAVE_QPIC 24
+#define SLAVE_QUP_0 25
+#define SLAVE_SDCC_1 26
+#define SLAVE_SDCC_2 27
+#define SLAVE_SNOC_CFG 28
+#define SLAVE_TCSR 29
+#define SLAVE_USB3 30
+#define SLAVE_VENUS_CFG 31
+#define SLAVE_VENUS_THROTTLE_CFG 32
+#define SLAVE_VSENSE_CTRL_CFG 33
+#define SLAVE_SERVICE_CNOC 34
+
+/* SNOC */
+#define MASTER_CRYPTO_CORE0 0
+#define MASTER_SNOC_CFG 1
+#define MASTER_TIC 2
+#define MASTER_ANOC_SNOC 3
+#define MASTER_BIMC_SNOC 4
+#define MASTER_PIMEM 5
+#define MASTER_QDSS_BAM 6
+#define MASTER_QUP_0 7
+#define MASTER_IPA 8
+#define MASTER_QDSS_ETR 9
+#define MASTER_SDCC_1 10
+#define MASTER_SDCC_2 11
+#define MASTER_QPIC 12
+#define MASTER_USB3_0 13
+#define SLAVE_APPSS 14
+#define SLAVE_SNOC_CNOC 15
+#define SLAVE_IMEM 16
+#define SLAVE_PIMEM 17
+#define SLAVE_SNOC_BIMC 18
+#define SLAVE_SERVICE_SNOC 19
+#define SLAVE_QDSS_STM 20
+#define SLAVE_TCU 21
+#define SLAVE_ANOC_SNOC 22
+
+/* QUP Virtual */
+#define MASTER_QUP_CORE_0 0
+#define SLAVE_QUP_CORE_0 1
+
+/* MMNRT Virtual */
+#define MASTER_CAMNOC_SF 0
+#define MASTER_VIDEO_P0 1
+#define MASTER_VIDEO_PROC 2
+#define SLAVE_SNOC_BIMC_NRT 3
+
+/* MMRT Virtual */
+#define MASTER_CAMNOC_HF 0
+#define MASTER_MDP0 1
+#define SLAVE_SNOC_BIMC_RT 2
+
+#endif
diff --git a/include/dt-bindings/interconnect/qcom,sm8450.h b/include/dt-bindings/interconnect/qcom,sm8450.h
new file mode 100644
index 000000000000..8f3c5e1fb4c4
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,sm8450.h
@@ -0,0 +1,171 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021, Linaro Limited
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_SM8450_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_SM8450_H
+
+#define MASTER_QSPI_0 0
+#define MASTER_QUP_1 1
+#define MASTER_A1NOC_CFG 2
+#define MASTER_SDCC_4 3
+#define MASTER_UFS_MEM 4
+#define MASTER_USB3_0 5
+#define SLAVE_A1NOC_SNOC 6
+#define SLAVE_SERVICE_A1NOC 7
+
+#define MASTER_QDSS_BAM 0
+#define MASTER_QUP_0 1
+#define MASTER_QUP_2 2
+#define MASTER_A2NOC_CFG 3
+#define MASTER_CRYPTO 4
+#define MASTER_IPA 5
+#define MASTER_SENSORS_PROC 6
+#define MASTER_SP 7
+#define MASTER_QDSS_ETR 8
+#define MASTER_QDSS_ETR_1 9
+#define MASTER_SDCC_2 10
+#define SLAVE_A2NOC_SNOC 11
+#define SLAVE_SERVICE_A2NOC 12
+
+#define MASTER_QUP_CORE_0 0
+#define MASTER_QUP_CORE_1 1
+#define MASTER_QUP_CORE_2 2
+#define SLAVE_QUP_CORE_0 3
+#define SLAVE_QUP_CORE_1 4
+#define SLAVE_QUP_CORE_2 5
+
+#define MASTER_GEM_NOC_CNOC 0
+#define MASTER_GEM_NOC_PCIE_SNOC 1
+#define SLAVE_AHB2PHY_SOUTH 2
+#define SLAVE_AHB2PHY_NORTH 3
+#define SLAVE_AOSS 4
+#define SLAVE_CAMERA_CFG 5
+#define SLAVE_CLK_CTL 6
+#define SLAVE_CDSP_CFG 7
+#define SLAVE_RBCPR_CX_CFG 8
+#define SLAVE_RBCPR_MMCX_CFG 9
+#define SLAVE_RBCPR_MXA_CFG 10
+#define SLAVE_RBCPR_MXC_CFG 11
+#define SLAVE_CRYPTO_0_CFG 12
+#define SLAVE_CX_RDPM 13
+#define SLAVE_DISPLAY_CFG 14
+#define SLAVE_GFX3D_CFG 15
+#define SLAVE_IMEM_CFG 16
+#define SLAVE_IPA_CFG 17
+#define SLAVE_IPC_ROUTER_CFG 18
+#define SLAVE_LPASS 19
+#define SLAVE_CNOC_MSS 20
+#define SLAVE_MX_RDPM 21
+#define SLAVE_PCIE_0_CFG 22
+#define SLAVE_PCIE_1_CFG 23
+#define SLAVE_PDM 24
+#define SLAVE_PIMEM_CFG 25
+#define SLAVE_PRNG 26
+#define SLAVE_QDSS_CFG 27
+#define SLAVE_QSPI_0 28
+#define SLAVE_QUP_0 29
+#define SLAVE_QUP_1 30
+#define SLAVE_QUP_2 31
+#define SLAVE_SDCC_2 32
+#define SLAVE_SDCC_4 33
+#define SLAVE_SPSS_CFG 34
+#define SLAVE_TCSR 35
+#define SLAVE_TLMM 36
+#define SLAVE_TME_CFG 37
+#define SLAVE_UFS_MEM_CFG 38
+#define SLAVE_USB3_0 39
+#define SLAVE_VENUS_CFG 40
+#define SLAVE_VSENSE_CTRL_CFG 41
+#define SLAVE_A1NOC_CFG 42
+#define SLAVE_A2NOC_CFG 43
+#define SLAVE_DDRSS_CFG 44
+#define SLAVE_CNOC_MNOC_CFG 45
+#define SLAVE_PCIE_ANOC_CFG 46
+#define SLAVE_SNOC_CFG 47
+#define SLAVE_IMEM 48
+#define SLAVE_PIMEM 49
+#define SLAVE_SERVICE_CNOC 50
+#define SLAVE_PCIE_0 51
+#define SLAVE_PCIE_1 52
+#define SLAVE_QDSS_STM 53
+#define SLAVE_TCU 54
+
+#define MASTER_GPU_TCU 0
+#define MASTER_SYS_TCU 1
+#define MASTER_APPSS_PROC 2
+#define MASTER_GFX3D 3
+#define MASTER_MSS_PROC 4
+#define MASTER_MNOC_HF_MEM_NOC 5
+#define MASTER_MNOC_SF_MEM_NOC 6
+#define MASTER_COMPUTE_NOC 7
+#define MASTER_ANOC_PCIE_GEM_NOC 8
+#define MASTER_SNOC_GC_MEM_NOC 9
+#define MASTER_SNOC_SF_MEM_NOC 10
+#define SLAVE_GEM_NOC_CNOC 11
+#define SLAVE_LLCC 12
+#define SLAVE_MEM_NOC_PCIE_SNOC 13
+#define MASTER_MNOC_HF_MEM_NOC_DISP 14
+#define MASTER_MNOC_SF_MEM_NOC_DISP 15
+#define MASTER_ANOC_PCIE_GEM_NOC_DISP 16
+#define SLAVE_LLCC_DISP 17
+
+#define MASTER_CNOC_LPASS_AG_NOC 0
+#define MASTER_LPASS_PROC 1
+#define SLAVE_LPASS_CORE_CFG 2
+#define SLAVE_LPASS_LPI_CFG 3
+#define SLAVE_LPASS_MPU_CFG 4
+#define SLAVE_LPASS_TOP_CFG 5
+#define SLAVE_LPASS_SNOC 6
+#define SLAVE_SERVICES_LPASS_AML_NOC 7
+#define SLAVE_SERVICE_LPASS_AG_NOC 8
+
+#define MASTER_LLCC 0
+#define SLAVE_EBI1 1
+#define MASTER_LLCC_DISP 2
+#define SLAVE_EBI1_DISP 3
+
+#define MASTER_CAMNOC_HF 0
+#define MASTER_CAMNOC_ICP 1
+#define MASTER_CAMNOC_SF 2
+#define MASTER_MDP 3
+#define MASTER_CNOC_MNOC_CFG 4
+#define MASTER_ROTATOR 5
+#define MASTER_CDSP_HCP 6
+#define MASTER_VIDEO 7
+#define MASTER_VIDEO_CV_PROC 8
+#define MASTER_VIDEO_PROC 9
+#define MASTER_VIDEO_V_PROC 10
+#define SLAVE_MNOC_HF_MEM_NOC 11
+#define SLAVE_MNOC_SF_MEM_NOC 12
+#define SLAVE_SERVICE_MNOC 13
+#define MASTER_MDP_DISP 14
+#define MASTER_ROTATOR_DISP 15
+#define SLAVE_MNOC_HF_MEM_NOC_DISP 16
+#define SLAVE_MNOC_SF_MEM_NOC_DISP 17
+
+#define MASTER_CDSP_NOC_CFG 0
+#define MASTER_CDSP_PROC 1
+#define SLAVE_CDSP_MEM_NOC 2
+#define SLAVE_SERVICE_NSP_NOC 3
+
+#define MASTER_PCIE_ANOC_CFG 0
+#define MASTER_PCIE_0 1
+#define MASTER_PCIE_1 2
+#define SLAVE_ANOC_PCIE_GEM_NOC 3
+#define SLAVE_SERVICE_PCIE_ANOC 4
+
+#define MASTER_GIC_AHB 0
+#define MASTER_A1NOC_SNOC 1
+#define MASTER_A2NOC_SNOC 2
+#define MASTER_LPASS_ANOC 3
+#define MASTER_SNOC_CFG 4
+#define MASTER_PIMEM 5
+#define MASTER_GIC 6
+#define SLAVE_SNOC_GEM_NOC_GC 7
+#define SLAVE_SNOC_GEM_NOC_SF 8
+#define SLAVE_SERVICE_SNOC 9
+
+#endif
diff --git a/include/dt-bindings/mailbox/qcom-ipcc.h b/include/dt-bindings/mailbox/qcom-ipcc.h
index eb91a6c05b71..9296d0bb5f34 100644
--- a/include/dt-bindings/mailbox/qcom-ipcc.h
+++ b/include/dt-bindings/mailbox/qcom-ipcc.h
@@ -8,6 +8,7 @@
/* Signal IDs for MPROC protocol */
#define IPCC_MPROC_SIGNAL_GLINK_QMP 0
+#define IPCC_MPROC_SIGNAL_TZ 1
#define IPCC_MPROC_SIGNAL_SMP2P 2
#define IPCC_MPROC_SIGNAL_PING 3
@@ -29,6 +30,7 @@
#define IPCC_CLIENT_PCIE1 14
#define IPCC_CLIENT_PCIE2 15
#define IPCC_CLIENT_SPSS 16
+#define IPCC_CLIENT_TME 23
#define IPCC_CLIENT_WPSS 24
#endif
diff --git a/include/dt-bindings/mux/ti-serdes.h b/include/dt-bindings/mux/ti-serdes.h
index d417b9268b16..d3116c52ab72 100644
--- a/include/dt-bindings/mux/ti-serdes.h
+++ b/include/dt-bindings/mux/ti-serdes.h
@@ -95,4 +95,26 @@
#define AM64_SERDES0_LANE0_PCIE0 0x0
#define AM64_SERDES0_LANE0_USB 0x1
+/* J721S2 */
+
+#define J721S2_SERDES0_LANE0_EDP_LANE0 0x0
+#define J721S2_SERDES0_LANE0_PCIE1_LANE0 0x1
+#define J721S2_SERDES0_LANE0_IP3_UNUSED 0x2
+#define J721S2_SERDES0_LANE0_IP4_UNUSED 0x3
+
+#define J721S2_SERDES0_LANE1_EDP_LANE1 0x0
+#define J721S2_SERDES0_LANE1_PCIE1_LANE1 0x1
+#define J721S2_SERDES0_LANE1_USB 0x2
+#define J721S2_SERDES0_LANE1_IP4_UNUSED 0x3
+
+#define J721S2_SERDES0_LANE2_EDP_LANE2 0x0
+#define J721S2_SERDES0_LANE2_PCIE1_LANE2 0x1
+#define J721S2_SERDES0_LANE2_IP3_UNUSED 0x2
+#define J721S2_SERDES0_LANE2_IP4_UNUSED 0x3
+
+#define J721S2_SERDES0_LANE3_EDP_LANE3 0x0
+#define J721S2_SERDES0_LANE3_PCIE1_LANE3 0x1
+#define J721S2_SERDES0_LANE3_USB 0x2
+#define J721S2_SERDES0_LANE3_IP4_UNUSED 0x3
+
#endif /* _DT_BINDINGS_MUX_TI_SERDES */
diff --git a/include/dt-bindings/phy/phy-cadence.h b/include/dt-bindings/phy/phy-cadence.h
index 24fdc9e11bd6..0671991208fc 100644
--- a/include/dt-bindings/phy/phy-cadence.h
+++ b/include/dt-bindings/phy/phy-cadence.h
@@ -6,11 +6,11 @@
#ifndef _DT_BINDINGS_CADENCE_SERDES_H
#define _DT_BINDINGS_CADENCE_SERDES_H
-/* Torrent */
-#define TORRENT_SERDES_NO_SSC 0
-#define TORRENT_SERDES_EXTERNAL_SSC 1
-#define TORRENT_SERDES_INTERNAL_SSC 2
+#define CDNS_SERDES_NO_SSC 0
+#define CDNS_SERDES_EXTERNAL_SSC 1
+#define CDNS_SERDES_INTERNAL_SSC 2
+/* Torrent */
#define CDNS_TORRENT_REFCLK_DRIVER 0
#define CDNS_TORRENT_DERIVED_REFCLK 1
#define CDNS_TORRENT_RECEIVED_REFCLK 2
@@ -18,5 +18,6 @@
/* Sierra */
#define CDNS_SIERRA_PLL_CMNLC 0
#define CDNS_SIERRA_PLL_CMNLC1 1
+#define CDNS_SIERRA_DERIVED_REFCLK 2
#endif /* _DT_BINDINGS_CADENCE_SERDES_H */
diff --git a/include/dt-bindings/phy/phy-imx8-pcie.h b/include/dt-bindings/phy/phy-imx8-pcie.h
new file mode 100644
index 000000000000..8bbe2d6538d8
--- /dev/null
+++ b/include/dt-bindings/phy/phy-imx8-pcie.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
+/*
+ * This header provides constants for i.MX8 PCIe.
+ */
+
+#ifndef _DT_BINDINGS_IMX8_PCIE_H
+#define _DT_BINDINGS_IMX8_PCIE_H
+
+/* Reference clock PAD mode */
+#define IMX8_PCIE_REFCLK_PAD_UNUSED 0
+#define IMX8_PCIE_REFCLK_PAD_INPUT 1
+#define IMX8_PCIE_REFCLK_PAD_OUTPUT 2
+
+#endif /* _DT_BINDINGS_IMX8_PCIE_H */
diff --git a/include/dt-bindings/phy/phy-lan966x-serdes.h b/include/dt-bindings/phy/phy-lan966x-serdes.h
new file mode 100644
index 000000000000..4330269a901e
--- /dev/null
+++ b/include/dt-bindings/phy/phy-lan966x-serdes.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+
+#ifndef __PHY_LAN966X_SERDES_H__
+#define __PHY_LAN966X_SERDES_H__
+
+#define CU(x) (x)
+#define CU_MAX CU(2)
+#define SERDES6G(x) (CU_MAX + 1 + (x))
+#define SERDES6G_MAX SERDES6G(3)
+#define RGMII(x) (SERDES6G_MAX + 1 + (x))
+#define RGMII_MAX RGMII(2)
+#define SERDES_MAX (RGMII_MAX + 1)
+
+#endif
diff --git a/include/dt-bindings/reset/sun20i-d1-ccu.h b/include/dt-bindings/reset/sun20i-d1-ccu.h
new file mode 100644
index 000000000000..de9ff5203239
--- /dev/null
+++ b/include/dt-bindings/reset/sun20i-d1-ccu.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */
+/*
+ * Copyright (c) 2020 huangzhenwei@allwinnertech.com
+ * Copyright (C) 2021 Samuel Holland <samuel@sholland.org>
+ */
+
+#ifndef _DT_BINDINGS_RST_SUN20I_D1_CCU_H_
+#define _DT_BINDINGS_RST_SUN20I_D1_CCU_H_
+
+#define RST_MBUS 0
+#define RST_BUS_DE 1
+#define RST_BUS_DI 2
+#define RST_BUS_G2D 3
+#define RST_BUS_CE 4
+#define RST_BUS_VE 5
+#define RST_BUS_DMA 6
+#define RST_BUS_MSGBOX0 7
+#define RST_BUS_MSGBOX1 8
+#define RST_BUS_MSGBOX2 9
+#define RST_BUS_SPINLOCK 10
+#define RST_BUS_HSTIMER 11
+#define RST_BUS_DBG 12
+#define RST_BUS_PWM 13
+#define RST_BUS_DRAM 14
+#define RST_BUS_MMC0 15
+#define RST_BUS_MMC1 16
+#define RST_BUS_MMC2 17
+#define RST_BUS_UART0 18
+#define RST_BUS_UART1 19
+#define RST_BUS_UART2 20
+#define RST_BUS_UART3 21
+#define RST_BUS_UART4 22
+#define RST_BUS_UART5 23
+#define RST_BUS_I2C0 24
+#define RST_BUS_I2C1 25
+#define RST_BUS_I2C2 26
+#define RST_BUS_I2C3 27
+#define RST_BUS_SPI0 28
+#define RST_BUS_SPI1 29
+#define RST_BUS_EMAC 30
+#define RST_BUS_IR_TX 31
+#define RST_BUS_GPADC 32
+#define RST_BUS_THS 33
+#define RST_BUS_I2S0 34
+#define RST_BUS_I2S1 35
+#define RST_BUS_I2S2 36
+#define RST_BUS_SPDIF 37
+#define RST_BUS_DMIC 38
+#define RST_BUS_AUDIO 39
+#define RST_USB_PHY0 40
+#define RST_USB_PHY1 41
+#define RST_BUS_OHCI0 42
+#define RST_BUS_OHCI1 43
+#define RST_BUS_EHCI0 44
+#define RST_BUS_EHCI1 45
+#define RST_BUS_OTG 46
+#define RST_BUS_LRADC 47
+#define RST_BUS_DPSS_TOP 48
+#define RST_BUS_HDMI_SUB 49
+#define RST_BUS_HDMI_MAIN 50
+#define RST_BUS_MIPI_DSI 51
+#define RST_BUS_TCON_LCD0 52
+#define RST_BUS_TCON_TV 53
+#define RST_BUS_LVDS0 54
+#define RST_BUS_TVE 55
+#define RST_BUS_TVE_TOP 56
+#define RST_BUS_TVD 57
+#define RST_BUS_TVD_TOP 58
+#define RST_BUS_LEDC 59
+#define RST_BUS_CSI 60
+#define RST_BUS_TPADC 61
+#define RST_DSP 62
+#define RST_BUS_DSP_CFG 63
+#define RST_BUS_DSP_DBG 64
+#define RST_BUS_RISCV_CFG 65
+
+#endif /* _DT_BINDINGS_RST_SUN20I_D1_CCU_H_ */
diff --git a/include/dt-bindings/reset/sun20i-d1-r-ccu.h b/include/dt-bindings/reset/sun20i-d1-r-ccu.h
new file mode 100644
index 000000000000..d93d6423d283
--- /dev/null
+++ b/include/dt-bindings/reset/sun20i-d1-r-ccu.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */
+/*
+ * Copyright (C) 2021 Samuel Holland <samuel@sholland.org>
+ */
+
+#ifndef _DT_BINDINGS_RST_SUN20I_D1_R_CCU_H_
+#define _DT_BINDINGS_RST_SUN20I_D1_R_CCU_H_
+
+#define RST_BUS_R_TIMER 0
+#define RST_BUS_R_TWD 1
+#define RST_BUS_R_PPU 2
+#define RST_BUS_R_IR_RX 3
+#define RST_BUS_R_RTC 4
+#define RST_BUS_R_CPUCFG 5
+
+#endif /* _DT_BINDINGS_RST_SUN20I_D1_R_CCU_H_ */
diff --git a/include/dt-bindings/reset/toshiba,tmpv770x.h b/include/dt-bindings/reset/toshiba,tmpv770x.h
new file mode 100644
index 000000000000..c1007acb1941
--- /dev/null
+++ b/include/dt-bindings/reset/toshiba,tmpv770x.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+
+#ifndef _DT_BINDINGS_RESET_TOSHIBA_TMPV770X_H_
+#define _DT_BINDINGS_RESET_TOSHIBA_TMPV770X_H_
+
+/* Reset */
+#define TMPV770X_RESET_PIETHER_2P5M 0
+#define TMPV770X_RESET_PIETHER_25M 1
+#define TMPV770X_RESET_PIETHER_50M 2
+#define TMPV770X_RESET_PIETHER_125M 3
+#define TMPV770X_RESET_HOX 4
+#define TMPV770X_RESET_PCIE_MSTR 5
+#define TMPV770X_RESET_PCIE_AUX 6
+#define TMPV770X_RESET_PIINTC 7
+#define TMPV770X_RESET_PIETHER_BUS 8
+#define TMPV770X_RESET_PISPI0 9
+#define TMPV770X_RESET_PISPI1 10
+#define TMPV770X_RESET_PISPI2 11
+#define TMPV770X_RESET_PISPI3 12
+#define TMPV770X_RESET_PISPI4 13
+#define TMPV770X_RESET_PISPI5 14
+#define TMPV770X_RESET_PISPI6 15
+#define TMPV770X_RESET_PIUART0 16
+#define TMPV770X_RESET_PIUART1 17
+#define TMPV770X_RESET_PIUART2 18
+#define TMPV770X_RESET_PIUART3 19
+#define TMPV770X_RESET_PII2C0 20
+#define TMPV770X_RESET_PII2C1 21
+#define TMPV770X_RESET_PII2C2 22
+#define TMPV770X_RESET_PII2C3 23
+#define TMPV770X_RESET_PII2C4 24
+#define TMPV770X_RESET_PII2C5 25
+#define TMPV770X_RESET_PII2C6 26
+#define TMPV770X_RESET_PII2C7 27
+#define TMPV770X_RESET_PII2C8 28
+#define TMPV770X_RESET_PIPCMIF 29
+#define TMPV770X_RESET_PICKMON 30
+#define TMPV770X_RESET_SBUSCLK 31
+#define TMPV770X_NR_RESET 32
+
+#endif /*_DT_BINDINGS_RESET_TOSHIBA_TMPV770X_H_ */
diff --git a/include/dt-bindings/sound/rt5640.h b/include/dt-bindings/sound/rt5640.h
index 154c9b4414f2..655f6946388a 100644
--- a/include/dt-bindings/sound/rt5640.h
+++ b/include/dt-bindings/sound/rt5640.h
@@ -16,6 +16,7 @@
#define RT5640_JD_SRC_GPIO2 4
#define RT5640_JD_SRC_GPIO3 5
#define RT5640_JD_SRC_GPIO4 6
+#define RT5640_JD_SRC_HDA_HEADER 7
#define RT5640_OVCD_SF_0P5 0
#define RT5640_OVCD_SF_0P75 1
diff --git a/include/dt-bindings/sound/tlv320adc3xxx.h b/include/dt-bindings/sound/tlv320adc3xxx.h
new file mode 100644
index 000000000000..ec988439da20
--- /dev/null
+++ b/include/dt-bindings/sound/tlv320adc3xxx.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Devicetree bindings definitions for tlv320adc3xxx driver.
+ *
+ * Copyright (C) 2021 Axis Communications AB
+ */
+#ifndef __DT_TLV320ADC3XXX_H
+#define __DT_TLV320ADC3XXX_H
+
+#define ADC3XXX_GPIO_DISABLED 0 /* I/O buffers powered down */
+#define ADC3XXX_GPIO_INPUT 1 /* Various non-GPIO inputs */
+#define ADC3XXX_GPIO_GPI 2 /* General purpose input */
+#define ADC3XXX_GPIO_GPO 3 /* General purpose output */
+#define ADC3XXX_GPIO_CLKOUT 4 /* Source set in reg. CLKOUT_MUX */
+#define ADC3XXX_GPIO_INT1 5 /* INT1 output */
+#define ADC3XXX_GPIO_INT2 6 /* INT2 output */
+/* value 7 is reserved */
+#define ADC3XXX_GPIO_SECONDARY_BCLK 8 /* Codec interface secondary BCLK */
+#define ADC3XXX_GPIO_SECONDARY_WCLK 9 /* Codec interface secondary WCLK */
+#define ADC3XXX_GPIO_ADC_MOD_CLK 10 /* Clock output for digital mics */
+/* values 11-15 reserved */
+
+#define ADC3XXX_MICBIAS_OFF 0 /* Micbias pin powered off */
+#define ADC3XXX_MICBIAS_2_0V 1 /* Micbias pin set to 2.0V */
+#define ADC3XXX_MICBIAS_2_5V 2 /* Micbias pin set to 2.5V */
+#define ADC3XXX_MICBIAS_AVDD 3 /* Use AVDD voltage for micbias pin */
+
+#endif /* __DT_TLV320ADC3XXX_H */
diff --git a/include/kunit/assert.h b/include/kunit/assert.h
index ad889b539ab3..ccbc36c0b02f 100644
--- a/include/kunit/assert.h
+++ b/include/kunit/assert.h
@@ -10,7 +10,7 @@
#define _KUNIT_ASSERT_H
#include <linux/err.h>
-#include <linux/kernel.h>
+#include <linux/printk.h>
struct kunit;
struct string_stream;
diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h
index 90f21898aad8..f9ed4c171d7b 100644
--- a/include/kvm/arm_pmu.h
+++ b/include/kvm/arm_pmu.h
@@ -13,13 +13,6 @@
#define ARMV8_PMU_CYCLE_IDX (ARMV8_PMU_MAX_COUNTERS - 1)
#define ARMV8_PMU_MAX_COUNTER_PAIRS ((ARMV8_PMU_MAX_COUNTERS + 1) >> 1)
-DECLARE_STATIC_KEY_FALSE(kvm_arm_pmu_available);
-
-static __always_inline bool kvm_arm_support_pmu_v3(void)
-{
- return static_branch_likely(&kvm_arm_pmu_available);
-}
-
#ifdef CONFIG_HW_PERF_EVENTS
struct kvm_pmc {
@@ -36,6 +29,13 @@ struct kvm_pmu {
struct irq_work overflow_work;
};
+DECLARE_STATIC_KEY_FALSE(kvm_arm_pmu_available);
+
+static __always_inline bool kvm_arm_support_pmu_v3(void)
+{
+ return static_branch_likely(&kvm_arm_pmu_available);
+}
+
#define kvm_arm_pmu_irq_initialized(v) ((v)->arch.pmu.irq_num >= VGIC_NR_SGIS)
u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx);
void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val);
@@ -65,6 +65,11 @@ int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu);
struct kvm_pmu {
};
+static inline bool kvm_arm_support_pmu_v3(void)
+{
+ return false;
+}
+
#define kvm_arm_pmu_irq_initialized(v) (false)
static inline u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu,
u64 select_idx)
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index e602d848fc1a..bb30a6803d9f 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -5,9 +5,11 @@
#ifndef __KVM_ARM_VGIC_H
#define __KVM_ARM_VGIC_H
-#include <linux/kernel.h>
+#include <linux/bits.h>
#include <linux/kvm.h>
#include <linux/irqreturn.h>
+#include <linux/kref.h>
+#include <linux/mutex.h>
#include <linux/spinlock.h>
#include <linux/static_key.h>
#include <linux/types.h>
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index eaeb4b9255bc..6274758648e3 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -133,6 +133,7 @@ union acpi_subtable_headers {
struct acpi_subtable_header common;
struct acpi_hmat_structure hmat;
struct acpi_prmt_module_header prmt;
+ struct acpi_cedt_header cedt;
};
typedef int (*acpi_tbl_table_handler)(struct acpi_table_header *table);
@@ -140,6 +141,9 @@ typedef int (*acpi_tbl_table_handler)(struct acpi_table_header *table);
typedef int (*acpi_tbl_entry_handler)(union acpi_subtable_headers *header,
const unsigned long end);
+typedef int (*acpi_tbl_entry_handler_arg)(union acpi_subtable_headers *header,
+ void *arg, const unsigned long end);
+
/* Debugger support */
struct acpi_debugger_ops {
@@ -216,6 +220,8 @@ static inline int acpi_debugger_notify_command_complete(void)
struct acpi_subtable_proc {
int id;
acpi_tbl_entry_handler handler;
+ acpi_tbl_entry_handler_arg handler_arg;
+ void *arg;
int count;
};
@@ -232,17 +238,31 @@ int acpi_locate_initial_tables (void);
void acpi_reserve_initial_tables (void);
void acpi_table_init_complete (void);
int acpi_table_init (void);
+
+#ifdef CONFIG_ACPI_TABLE_LIB
+#define EXPORT_SYMBOL_ACPI_LIB(x) EXPORT_SYMBOL_NS_GPL(x, ACPI)
+#define __init_or_acpilib
+#define __initdata_or_acpilib
+#else
+#define EXPORT_SYMBOL_ACPI_LIB(x)
+#define __init_or_acpilib __init
+#define __initdata_or_acpilib __initdata
+#endif
+
int acpi_table_parse(char *id, acpi_tbl_table_handler handler);
-int __init acpi_table_parse_entries(char *id, unsigned long table_size,
- int entry_id,
- acpi_tbl_entry_handler handler,
- unsigned int max_entries);
-int __init acpi_table_parse_entries_array(char *id, unsigned long table_size,
- struct acpi_subtable_proc *proc, int proc_num,
- unsigned int max_entries);
+int __init_or_acpilib acpi_table_parse_entries(char *id,
+ unsigned long table_size, int entry_id,
+ acpi_tbl_entry_handler handler, unsigned int max_entries);
+int __init_or_acpilib acpi_table_parse_entries_array(char *id,
+ unsigned long table_size, struct acpi_subtable_proc *proc,
+ int proc_num, unsigned int max_entries);
int acpi_table_parse_madt(enum acpi_madt_type id,
acpi_tbl_entry_handler handler,
unsigned int max_entries);
+int __init_or_acpilib
+acpi_table_parse_cedt(enum acpi_cedt_type id,
+ acpi_tbl_entry_handler_arg handler_arg, void *arg);
+
int acpi_parse_mcfg (struct acpi_table_header *header);
void acpi_table_print_madt_entry (struct acpi_subtable_header *madt);
diff --git a/include/linux/aio.h b/include/linux/aio.h
index b83e68dd006f..86892a4fe7c8 100644
--- a/include/linux/aio.h
+++ b/include/linux/aio.h
@@ -20,8 +20,4 @@ static inline void kiocb_set_cancel_fn(struct kiocb *req,
kiocb_cancel_fn *cancel) { }
#endif /* CONFIG_AIO */
-/* for sysctl: */
-extern unsigned long aio_nr;
-extern unsigned long aio_max_nr;
-
#endif /* __LINUX__AIO_H */
diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h
index edfcf7a14dcd..6c7f47846971 100644
--- a/include/linux/amba/bus.h
+++ b/include/linux/amba/bus.h
@@ -90,14 +90,8 @@ enum amba_vendor {
AMBA_VENDOR_ST = 0x80,
AMBA_VENDOR_QCOM = 0x51,
AMBA_VENDOR_LSI = 0xb6,
- AMBA_VENDOR_LINUX = 0xfe, /* This value is not official */
};
-/* This is used to generate pseudo-ID for AMBA device */
-#define AMBA_LINUX_ID(conf, rev, part) \
- (((conf) & 0xff) << 24 | ((rev) & 0xf) << 20 | \
- AMBA_VENDOR_LINUX << 12 | ((part) & 0xfff))
-
extern struct bus_type amba_bustype;
#define to_amba_device(d) container_of(d, struct amba_device, dev)
diff --git a/include/linux/ata.h b/include/linux/ata.h
index 199e47e97d64..21292b5bbb55 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -324,12 +324,12 @@ enum {
ATA_LOG_NCQ_NON_DATA = 0x12,
ATA_LOG_NCQ_SEND_RECV = 0x13,
ATA_LOG_IDENTIFY_DEVICE = 0x30,
+ ATA_LOG_CONCURRENT_POSITIONING_RANGES = 0x47,
/* Identify device log pages: */
ATA_LOG_SECURITY = 0x06,
ATA_LOG_SATA_SETTINGS = 0x08,
ATA_LOG_ZONED_INFORMATION = 0x09,
- ATA_LOG_CONCURRENT_POSITIONING_RANGES = 0x47,
/* Identify device SATA settings log:*/
ATA_LOG_DEVSLP_OFFSET = 0x30,
diff --git a/include/linux/auxiliary_bus.h b/include/linux/auxiliary_bus.h
index fc51d45f106b..de21d9d24a95 100644
--- a/include/linux/auxiliary_bus.h
+++ b/include/linux/auxiliary_bus.h
@@ -11,12 +11,172 @@
#include <linux/device.h>
#include <linux/mod_devicetable.h>
+/**
+ * DOC: DEVICE_LIFESPAN
+ *
+ * The registering driver is the entity that allocates memory for the
+ * auxiliary_device and registers it on the auxiliary bus. It is important to
+ * note that, as opposed to the platform bus, the registering driver is wholly
+ * responsible for the management of the memory used for the device object.
+ *
+ * To be clear the memory for the auxiliary_device is freed in the release()
+ * callback defined by the registering driver. The registering driver should
+ * only call auxiliary_device_delete() and then auxiliary_device_uninit() when
+ * it is done with the device. The release() function is then automatically
+ * called if and when other code releases their reference to the devices.
+ *
+ * A parent object, defined in the shared header file, contains the
+ * auxiliary_device. It also contains a pointer to the shared object(s), which
+ * also is defined in the shared header. Both the parent object and the shared
+ * object(s) are allocated by the registering driver. This layout allows the
+ * auxiliary_driver's registering module to perform a container_of() call to go
+ * from the pointer to the auxiliary_device, that is passed during the call to
+ * the auxiliary_driver's probe function, up to the parent object, and then
+ * have access to the shared object(s).
+ *
+ * The memory for the shared object(s) must have a lifespan equal to, or
+ * greater than, the lifespan of the memory for the auxiliary_device. The
+ * auxiliary_driver should only consider that the shared object is valid as
+ * long as the auxiliary_device is still registered on the auxiliary bus. It
+ * is up to the registering driver to manage (e.g. free or keep available) the
+ * memory for the shared object beyond the life of the auxiliary_device.
+ *
+ * The registering driver must unregister all auxiliary devices before its own
+ * driver.remove() is completed. An easy way to ensure this is to use the
+ * devm_add_action_or_reset() call to register a function against the parent
+ * device which unregisters the auxiliary device object(s).
+ *
+ * Finally, any operations which operate on the auxiliary devices must continue
+ * to function (if only to return an error) after the registering driver
+ * unregisters the auxiliary device.
+ */
+
+/**
+ * struct auxiliary_device - auxiliary device object.
+ * @dev: Device,
+ * The release and parent fields of the device structure must be filled
+ * in
+ * @name: Match name found by the auxiliary device driver,
+ * @id: unique identitier if multiple devices of the same name are exported,
+ *
+ * An auxiliary_device represents a part of its parent device's functionality.
+ * It is given a name that, combined with the registering drivers
+ * KBUILD_MODNAME, creates a match_name that is used for driver binding, and an
+ * id that combined with the match_name provide a unique name to register with
+ * the bus subsystem. For example, a driver registering an auxiliary device is
+ * named 'foo_mod.ko' and the subdevice is named 'foo_dev'. The match name is
+ * therefore 'foo_mod.foo_dev'.
+ *
+ * Registering an auxiliary_device is a three-step process.
+ *
+ * First, a 'struct auxiliary_device' needs to be defined or allocated for each
+ * sub-device desired. The name, id, dev.release, and dev.parent fields of
+ * this structure must be filled in as follows.
+ *
+ * The 'name' field is to be given a name that is recognized by the auxiliary
+ * driver. If two auxiliary_devices with the same match_name, eg
+ * "foo_mod.foo_dev", are registered onto the bus, they must have unique id
+ * values (e.g. "x" and "y") so that the registered devices names are
+ * "foo_mod.foo_dev.x" and "foo_mod.foo_dev.y". If match_name + id are not
+ * unique, then the device_add fails and generates an error message.
+ *
+ * The auxiliary_device.dev.type.release or auxiliary_device.dev.release must
+ * be populated with a non-NULL pointer to successfully register the
+ * auxiliary_device. This release call is where resources associated with the
+ * auxiliary device must be free'ed. Because once the device is placed on the
+ * bus the parent driver can not tell what other code may have a reference to
+ * this data.
+ *
+ * The auxiliary_device.dev.parent should be set. Typically to the registering
+ * drivers device.
+ *
+ * Second, call auxiliary_device_init(), which checks several aspects of the
+ * auxiliary_device struct and performs a device_initialize(). After this step
+ * completes, any error state must have a call to auxiliary_device_uninit() in
+ * its resolution path.
+ *
+ * The third and final step in registering an auxiliary_device is to perform a
+ * call to auxiliary_device_add(), which sets the name of the device and adds
+ * the device to the bus.
+ *
+ * .. code-block:: c
+ *
+ * #define MY_DEVICE_NAME "foo_dev"
+ *
+ * ...
+ *
+ * struct auxiliary_device *my_aux_dev = my_aux_dev_alloc(xxx);
+ *
+ * // Step 1:
+ * my_aux_dev->name = MY_DEVICE_NAME;
+ * my_aux_dev->id = my_unique_id_alloc(xxx);
+ * my_aux_dev->dev.release = my_aux_dev_release;
+ * my_aux_dev->dev.parent = my_dev;
+ *
+ * // Step 2:
+ * if (auxiliary_device_init(my_aux_dev))
+ * goto fail;
+ *
+ * // Step 3:
+ * if (auxiliary_device_add(my_aux_dev)) {
+ * auxiliary_device_uninit(my_aux_dev);
+ * goto fail;
+ * }
+ *
+ * ...
+ *
+ *
+ * Unregistering an auxiliary_device is a two-step process to mirror the
+ * register process. First call auxiliary_device_delete(), then call
+ * auxiliary_device_uninit().
+ *
+ * .. code-block:: c
+ *
+ * auxiliary_device_delete(my_dev->my_aux_dev);
+ * auxiliary_device_uninit(my_dev->my_aux_dev);
+ */
struct auxiliary_device {
struct device dev;
const char *name;
u32 id;
};
+/**
+ * struct auxiliary_driver - Definition of an auxiliary bus driver
+ * @probe: Called when a matching device is added to the bus.
+ * @remove: Called when device is removed from the bus.
+ * @shutdown: Called at shut-down time to quiesce the device.
+ * @suspend: Called to put the device to sleep mode. Usually to a power state.
+ * @resume: Called to bring a device from sleep mode.
+ * @name: Driver name.
+ * @driver: Core driver structure.
+ * @id_table: Table of devices this driver should match on the bus.
+ *
+ * Auxiliary drivers follow the standard driver model convention, where
+ * discovery/enumeration is handled by the core, and drivers provide probe()
+ * and remove() methods. They support power management and shutdown
+ * notifications using the standard conventions.
+ *
+ * Auxiliary drivers register themselves with the bus by calling
+ * auxiliary_driver_register(). The id_table contains the match_names of
+ * auxiliary devices that a driver can bind with.
+ *
+ * .. code-block:: c
+ *
+ * static const struct auxiliary_device_id my_auxiliary_id_table[] = {
+ * { .name = "foo_mod.foo_dev" },
+ * {},
+ * };
+ *
+ * MODULE_DEVICE_TABLE(auxiliary, my_auxiliary_id_table);
+ *
+ * struct auxiliary_driver my_drv = {
+ * .name = "myauxiliarydrv",
+ * .id_table = my_auxiliary_id_table,
+ * .probe = my_drv_probe,
+ * .remove = my_drv_remove
+ * };
+ */
struct auxiliary_driver {
int (*probe)(struct auxiliary_device *auxdev, const struct auxiliary_device_id *id);
void (*remove)(struct auxiliary_device *auxdev);
@@ -28,6 +188,16 @@ struct auxiliary_driver {
const struct auxiliary_device_id *id_table;
};
+static inline void *auxiliary_get_drvdata(struct auxiliary_device *auxdev)
+{
+ return dev_get_drvdata(&auxdev->dev);
+}
+
+static inline void auxiliary_set_drvdata(struct auxiliary_device *auxdev, void *data)
+{
+ dev_set_drvdata(&auxdev->dev, data);
+}
+
static inline struct auxiliary_device *to_auxiliary_dev(struct device *dev)
{
return container_of(dev, struct auxiliary_device, dev);
@@ -66,6 +236,10 @@ void auxiliary_driver_unregister(struct auxiliary_driver *auxdrv);
* Helper macro for auxiliary drivers which do not do anything special in
* module init/exit. This eliminates a lot of boilerplate. Each module may only
* use this macro once, and calling it replaces module_init() and module_exit()
+ *
+ * .. code-block:: c
+ *
+ * module_auxiliary_driver(my_drv);
*/
#define module_auxiliary_driver(__auxiliary_driver) \
module_driver(__auxiliary_driver, auxiliary_driver_register, auxiliary_driver_unregister)
diff --git a/include/linux/bio.h b/include/linux/bio.h
index fe6bdfbbef66..117d7f248ac9 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -124,7 +124,7 @@ void __bio_advance(struct bio *, unsigned bytes);
/**
* bio_advance - increment/complete a bio by some number of bytes
* @bio: bio to advance
- * @bytes: number of bytes to complete
+ * @nbytes: number of bytes to complete
*
* This updates bi_sector, bi_size and bi_idx; if the number of bytes to
* complete doesn't align with a bvec boundary, then bv_len and bv_offset will
@@ -166,7 +166,7 @@ static inline void bio_advance(struct bio *bio, unsigned int nbytes)
*/
#define bio_for_each_bvec_all(bvl, bio, i) \
for (i = 0, bvl = bio_first_bvec_all(bio); \
- i < (bio)->bi_vcnt; i++, bvl++) \
+ i < (bio)->bi_vcnt; i++, bvl++)
#define bio_iter_last(bvec, iter) ((iter).bi_size == (bvec).bv_len)
@@ -260,6 +260,57 @@ static inline struct bio_vec *bio_last_bvec_all(struct bio *bio)
return &bio->bi_io_vec[bio->bi_vcnt - 1];
}
+/**
+ * struct folio_iter - State for iterating all folios in a bio.
+ * @folio: The current folio we're iterating. NULL after the last folio.
+ * @offset: The byte offset within the current folio.
+ * @length: The number of bytes in this iteration (will not cross folio
+ * boundary).
+ */
+struct folio_iter {
+ struct folio *folio;
+ size_t offset;
+ size_t length;
+ /* private: for use by the iterator */
+ size_t _seg_count;
+ int _i;
+};
+
+static inline void bio_first_folio(struct folio_iter *fi, struct bio *bio,
+ int i)
+{
+ struct bio_vec *bvec = bio_first_bvec_all(bio) + i;
+
+ fi->folio = page_folio(bvec->bv_page);
+ fi->offset = bvec->bv_offset +
+ PAGE_SIZE * (bvec->bv_page - &fi->folio->page);
+ fi->_seg_count = bvec->bv_len;
+ fi->length = min(folio_size(fi->folio) - fi->offset, fi->_seg_count);
+ fi->_i = i;
+}
+
+static inline void bio_next_folio(struct folio_iter *fi, struct bio *bio)
+{
+ fi->_seg_count -= fi->length;
+ if (fi->_seg_count) {
+ fi->folio = folio_next(fi->folio);
+ fi->offset = 0;
+ fi->length = min(folio_size(fi->folio), fi->_seg_count);
+ } else if (fi->_i + 1 < bio->bi_vcnt) {
+ bio_first_folio(fi, bio, fi->_i + 1);
+ } else {
+ fi->folio = NULL;
+ }
+}
+
+/**
+ * bio_for_each_folio_all - Iterate over each folio in a bio.
+ * @fi: struct folio_iter which is updated for each folio.
+ * @bio: struct bio to iterate over.
+ */
+#define bio_for_each_folio_all(fi, bio) \
+ for (bio_first_folio(&fi, bio, 0); fi.folio; bio_next_folio(&fi, bio))
+
enum bip_flags {
BIP_BLOCK_INTEGRITY = 1 << 0, /* block layer owns integrity data */
BIP_MAPPED_INTEGRITY = 1 << 1, /* ref tag has been remapped */
@@ -332,7 +383,7 @@ extern struct bio *bio_split(struct bio *bio, int sectors,
* @gfp: gfp mask
* @bs: bio set to allocate from
*
- * Returns a bio representing the next @sectors of @bio - if the bio is smaller
+ * Return: a bio representing the next @sectors of @bio - if the bio is smaller
* than @sectors, returns the original bio unchanged.
*/
static inline struct bio *bio_next_split(struct bio *bio, int sectors,
@@ -409,7 +460,8 @@ extern void bio_uninit(struct bio *);
extern void bio_reset(struct bio *);
void bio_chain(struct bio *, struct bio *);
-extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int);
+int bio_add_page(struct bio *, struct page *, unsigned len, unsigned off);
+bool bio_add_folio(struct bio *, struct folio *, size_t len, size_t off);
extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *,
unsigned int, unsigned int);
int bio_add_zone_append_page(struct bio *bio, struct page *page,
diff --git a/include/linux/bitfield.h b/include/linux/bitfield.h
index 4e035aca6f7e..6093fa6db260 100644
--- a/include/linux/bitfield.h
+++ b/include/linux/bitfield.h
@@ -41,6 +41,22 @@
#define __bf_shf(x) (__builtin_ffsll(x) - 1)
+#define __scalar_type_to_unsigned_cases(type) \
+ unsigned type: (unsigned type)0, \
+ signed type: (unsigned type)0
+
+#define __unsigned_scalar_typeof(x) typeof( \
+ _Generic((x), \
+ char: (unsigned char)0, \
+ __scalar_type_to_unsigned_cases(char), \
+ __scalar_type_to_unsigned_cases(short), \
+ __scalar_type_to_unsigned_cases(int), \
+ __scalar_type_to_unsigned_cases(long), \
+ __scalar_type_to_unsigned_cases(long long), \
+ default: (x)))
+
+#define __bf_cast_unsigned(type, x) ((__unsigned_scalar_typeof(type))(x))
+
#define __BF_FIELD_CHECK(_mask, _reg, _val, _pfx) \
({ \
BUILD_BUG_ON_MSG(!__builtin_constant_p(_mask), \
@@ -49,7 +65,8 @@
BUILD_BUG_ON_MSG(__builtin_constant_p(_val) ? \
~((_mask) >> __bf_shf(_mask)) & (_val) : 0, \
_pfx "value too large for the field"); \
- BUILD_BUG_ON_MSG((_mask) > (typeof(_reg))~0ull, \
+ BUILD_BUG_ON_MSG(__bf_cast_unsigned(_mask, _mask) > \
+ __bf_cast_unsigned(_reg, ~0ull), \
_pfx "type of reg too small for mask"); \
__BUILD_BUG_ON_NOT_POWER_OF_2((_mask) + \
(1ULL << __bf_shf(_mask))); \
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
index a241dcf50f39..7dba0847510c 100644
--- a/include/linux/bitmap.h
+++ b/include/linux/bitmap.h
@@ -6,6 +6,7 @@
#include <linux/align.h>
#include <linux/bitops.h>
+#include <linux/find.h>
#include <linux/limits.h>
#include <linux/string.h>
#include <linux/types.h>
@@ -54,12 +55,6 @@ struct device;
* bitmap_clear(dst, pos, nbits) Clear specified bit area
* bitmap_find_next_zero_area(buf, len, pos, n, mask) Find bit free area
* bitmap_find_next_zero_area_off(buf, len, pos, n, mask, mask_off) as above
- * bitmap_next_clear_region(map, &start, &end, nbits) Find next clear region
- * bitmap_next_set_region(map, &start, &end, nbits) Find next set region
- * bitmap_for_each_clear_region(map, rs, re, start, end)
- * Iterate over all clear regions
- * bitmap_for_each_set_region(map, rs, re, start, end)
- * Iterate over all set regions
* bitmap_shift_right(dst, src, n, nbits) *dst = *src >> n
* bitmap_shift_left(dst, src, n, nbits) *dst = *src << n
* bitmap_cut(dst, src, first, n, nbits) Cut n bits from first, copy rest
@@ -466,14 +461,6 @@ static inline void bitmap_replace(unsigned long *dst,
__bitmap_replace(dst, old, new, mask, nbits);
}
-static inline void bitmap_next_clear_region(unsigned long *bitmap,
- unsigned int *rs, unsigned int *re,
- unsigned int end)
-{
- *rs = find_next_zero_bit(bitmap, end, *rs);
- *re = find_next_bit(bitmap, end, *rs + 1);
-}
-
static inline void bitmap_next_set_region(unsigned long *bitmap,
unsigned int *rs, unsigned int *re,
unsigned int end)
@@ -482,25 +469,6 @@ static inline void bitmap_next_set_region(unsigned long *bitmap,
*re = find_next_zero_bit(bitmap, end, *rs + 1);
}
-/*
- * Bitmap region iterators. Iterates over the bitmap between [@start, @end).
- * @rs and @re should be integer variables and will be set to start and end
- * index of the current clear or set region.
- */
-#define bitmap_for_each_clear_region(bitmap, rs, re, start, end) \
- for ((rs) = (start), \
- bitmap_next_clear_region((bitmap), &(rs), &(re), (end)); \
- (rs) < (re); \
- (rs) = (re) + 1, \
- bitmap_next_clear_region((bitmap), &(rs), &(re), (end)))
-
-#define bitmap_for_each_set_region(bitmap, rs, re, start, end) \
- for ((rs) = (start), \
- bitmap_next_set_region((bitmap), &(rs), &(re), (end)); \
- (rs) < (re); \
- (rs) = (re) + 1, \
- bitmap_next_set_region((bitmap), &(rs), &(re), (end)))
-
/**
* BITMAP_FROM_U64() - Represent u64 value in the format suitable for bitmap.
* @n: u64 value
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index 5e62e2383b7f..7aaed501f768 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -32,40 +32,6 @@ extern unsigned long __sw_hweight64(__u64 w);
*/
#include <asm/bitops.h>
-#define for_each_set_bit(bit, addr, size) \
- for ((bit) = find_first_bit((addr), (size)); \
- (bit) < (size); \
- (bit) = find_next_bit((addr), (size), (bit) + 1))
-
-/* same as for_each_set_bit() but use bit as value to start with */
-#define for_each_set_bit_from(bit, addr, size) \
- for ((bit) = find_next_bit((addr), (size), (bit)); \
- (bit) < (size); \
- (bit) = find_next_bit((addr), (size), (bit) + 1))
-
-#define for_each_clear_bit(bit, addr, size) \
- for ((bit) = find_first_zero_bit((addr), (size)); \
- (bit) < (size); \
- (bit) = find_next_zero_bit((addr), (size), (bit) + 1))
-
-/* same as for_each_clear_bit() but use bit as value to start with */
-#define for_each_clear_bit_from(bit, addr, size) \
- for ((bit) = find_next_zero_bit((addr), (size), (bit)); \
- (bit) < (size); \
- (bit) = find_next_zero_bit((addr), (size), (bit) + 1))
-
-/**
- * for_each_set_clump8 - iterate over bitmap for each 8-bit clump with set bits
- * @start: bit offset to start search and to store the current iteration offset
- * @clump: location to store copy of current 8-bit clump
- * @bits: bitmap address to base the search on
- * @size: bitmap size in number of bits
- */
-#define for_each_set_clump8(start, clump, bits, size) \
- for ((start) = find_first_clump8(&(clump), (bits), (size)); \
- (start) < (size); \
- (start) = find_next_clump8(&(clump), (bits), (size), (start) + 8))
-
static inline int get_bitmask_order(unsigned int count)
{
int order;
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 2949d9ac7484..d319ffa59354 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -4,7 +4,6 @@
#include <linux/blkdev.h>
#include <linux/sbitmap.h>
-#include <linux/srcu.h>
#include <linux/lockdep.h>
#include <linux/scatterlist.h>
#include <linux/prefetch.h>
@@ -100,7 +99,6 @@ struct request {
struct request *rq_next;
};
- struct gendisk *rq_disk;
struct block_device *part;
#ifdef CONFIG_BLK_RQ_ALLOC_TIME
/* Time that the first bio started allocating this request. */
@@ -140,7 +138,7 @@ struct request {
unsigned short ioprio;
enum mq_rq_state state;
- refcount_t ref;
+ atomic_t ref;
unsigned long deadline;
@@ -218,6 +216,56 @@ static inline unsigned short req_get_ioprio(struct request *req)
#define rq_dma_dir(rq) \
(op_is_write(req_op(rq)) ? DMA_TO_DEVICE : DMA_FROM_DEVICE)
+#define rq_list_add(listptr, rq) do { \
+ (rq)->rq_next = *(listptr); \
+ *(listptr) = rq; \
+} while (0)
+
+#define rq_list_pop(listptr) \
+({ \
+ struct request *__req = NULL; \
+ if ((listptr) && *(listptr)) { \
+ __req = *(listptr); \
+ *(listptr) = __req->rq_next; \
+ } \
+ __req; \
+})
+
+#define rq_list_peek(listptr) \
+({ \
+ struct request *__req = NULL; \
+ if ((listptr) && *(listptr)) \
+ __req = *(listptr); \
+ __req; \
+})
+
+#define rq_list_for_each(listptr, pos) \
+ for (pos = rq_list_peek((listptr)); pos; pos = rq_list_next(pos))
+
+#define rq_list_for_each_safe(listptr, pos, nxt) \
+ for (pos = rq_list_peek((listptr)), nxt = rq_list_next(pos); \
+ pos; pos = nxt, nxt = pos ? rq_list_next(pos) : NULL)
+
+#define rq_list_next(rq) (rq)->rq_next
+#define rq_list_empty(list) ((list) == (struct request *) NULL)
+
+/**
+ * rq_list_move() - move a struct request from one list to another
+ * @src: The source list @rq is currently in
+ * @dst: The destination list that @rq will be appended to
+ * @rq: The request to move
+ * @prev: The request preceding @rq in @src (NULL if @rq is the head)
+ */
+static inline void rq_list_move(struct request **src, struct request **dst,
+ struct request *rq, struct request *prev)
+{
+ if (prev)
+ prev->rq_next = rq->rq_next;
+ else
+ *src = rq->rq_next;
+ rq_list_add(dst, rq);
+}
+
enum blk_eh_timer_return {
BLK_EH_DONE, /* drivers has completed the command */
BLK_EH_RESET_TIMER, /* reset timer and try again */
@@ -376,13 +424,6 @@ struct blk_mq_hw_ctx {
* q->unused_hctx_list.
*/
struct list_head hctx_list;
-
- /**
- * @srcu: Sleepable RCU. Use as lock when type of the hardware queue is
- * blocking (BLK_MQ_F_BLOCKING). Must be the last member - see also
- * blk_mq_hw_ctx_size().
- */
- struct srcu_struct srcu[];
};
/**
@@ -479,8 +520,6 @@ struct blk_mq_queue_data {
bool last;
};
-typedef bool (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *,
- bool);
typedef bool (busy_tag_iter_fn)(struct request *, void *, bool);
/**
@@ -504,6 +543,14 @@ struct blk_mq_ops {
void (*commit_rqs)(struct blk_mq_hw_ctx *);
/**
+ * @queue_rqs: Queue a list of new requests. Driver is guaranteed
+ * that each request belongs to the same queue. If the driver doesn't
+ * empty the @rqlist completely, then the rest will be queued
+ * individually by the block layer upon return.
+ */
+ void (*queue_rqs)(struct request **rqlist);
+
+ /**
* @get_budget: Reserve budget before queue request, once .queue_rq is
* run, it is driver's responsibility to release the
* reserved budget. Also we have to handle failure case
@@ -752,6 +799,17 @@ static inline void blk_mq_set_request_complete(struct request *rq)
WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
}
+/*
+ * Complete the request directly instead of deferring it to softirq or
+ * completing it another CPU. Useful in preemptible instead of an interrupt.
+ */
+static inline void blk_mq_complete_request_direct(struct request *rq,
+ void (*complete)(struct request *rq))
+{
+ WRITE_ONCE(rq->state, MQ_RQ_COMPLETE);
+ complete(rq);
+}
+
void blk_mq_start_request(struct request *rq);
void blk_mq_end_request(struct request *rq, blk_status_t error);
void __blk_mq_end_request(struct request *rq, blk_status_t error);
@@ -879,9 +937,6 @@ static inline void blk_rq_bio_prep(struct request *rq, struct bio *bio,
rq->__data_len = bio->bi_iter.bi_size;
rq->bio = rq->biotail = bio;
rq->ioprio = bio_prio(bio);
-
- if (bio->bi_bdev)
- rq->rq_disk = bio->bi_bdev->bd_disk;
}
void blk_mq_hctx_set_fq_lock_class(struct blk_mq_hw_ctx *hctx,
@@ -917,10 +972,9 @@ int blk_rq_unmap_user(struct bio *);
int blk_rq_map_kern(struct request_queue *, struct request *, void *,
unsigned int, gfp_t);
int blk_rq_append_bio(struct request *rq, struct bio *bio);
-void blk_execute_rq_nowait(struct gendisk *, struct request *, int,
- rq_end_io_fn *);
-blk_status_t blk_execute_rq(struct gendisk *bd_disk, struct request *rq,
- int at_head);
+void blk_execute_rq_nowait(struct request *rq, bool at_head,
+ rq_end_io_fn *end_io);
+blk_status_t blk_execute_rq(struct request *rq, bool at_head);
struct req_iterator {
struct bvec_iter iter;
@@ -947,7 +1001,6 @@ struct req_iterator {
* blk_rq_pos() : the current sector
* blk_rq_bytes() : bytes left in the entire request
* blk_rq_cur_bytes() : bytes left in the current segment
- * blk_rq_err_bytes() : bytes left till the next error boundary
* blk_rq_sectors() : sectors left in the entire request
* blk_rq_cur_sectors() : sectors left in the current segment
* blk_rq_stats_sectors() : sectors of the entire request used for stats
@@ -971,8 +1024,6 @@ static inline int blk_rq_cur_bytes(const struct request *rq)
return bio_iovec(rq->bio).bv_len;
}
-unsigned int blk_rq_err_bytes(const struct request *rq);
-
static inline unsigned int blk_rq_sectors(const struct request *rq)
{
return blk_rq_bytes(rq) >> SECTOR_SHIFT;
@@ -1135,14 +1186,4 @@ static inline bool blk_req_can_dispatch_to_zone(struct request *rq)
}
#endif /* CONFIG_BLK_DEV_ZONED */
-#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
-# error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform"
-#endif
-#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
-void rq_flush_dcache_pages(struct request *rq);
-#else
-static inline void rq_flush_dcache_pages(struct request *rq)
-{
-}
-#endif /* ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE */
#endif /* BLK_MQ_H */
diff --git a/include/linux/blk-pm.h b/include/linux/blk-pm.h
index b80c65aba249..2580e05a8ab6 100644
--- a/include/linux/blk-pm.h
+++ b/include/linux/blk-pm.h
@@ -14,7 +14,7 @@ extern void blk_pm_runtime_init(struct request_queue *q, struct device *dev);
extern int blk_pre_runtime_suspend(struct request_queue *q);
extern void blk_post_runtime_suspend(struct request_queue *q, int err);
extern void blk_pre_runtime_resume(struct request_queue *q);
-extern void blk_post_runtime_resume(struct request_queue *q, int err);
+extern void blk_post_runtime_resume(struct request_queue *q);
extern void blk_set_runtime_active(struct request_queue *q);
#else
static inline void blk_pm_runtime_init(struct request_queue *q,
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index bd4370baccca..16b47035e4b0 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -16,6 +16,7 @@
#include <linux/percpu-refcount.h>
#include <linux/blkzoned.h>
#include <linux/sbitmap.h>
+#include <linux/srcu.h>
struct module;
struct request_queue;
@@ -44,7 +45,7 @@ struct blk_crypto_profile;
*/
#define BLKCG_MAX_POLS 6
-static inline int blk_validate_block_size(unsigned int bsize)
+static inline int blk_validate_block_size(unsigned long bsize)
{
if (bsize < 512 || bsize > PAGE_SIZE || !is_power_of_2(bsize))
return -EINVAL;
@@ -267,7 +268,7 @@ struct request_queue {
int poll_nsec;
struct blk_stat_callback *poll_cb;
- struct blk_rq_stat poll_stat[BLK_MQ_POLL_STATS_BKTS];
+ struct blk_rq_stat *poll_stat;
struct timer_list timeout;
struct work_struct timeout_work;
@@ -373,11 +374,18 @@ struct request_queue {
* devices that do not have multiple independent access ranges.
*/
struct blk_independent_access_ranges *ia_ranges;
+
+ /**
+ * @srcu: Sleepable RCU. Use as lock when type of the request queue
+ * is blocking (BLK_MQ_F_BLOCKING). Must be the last member
+ */
+ struct srcu_struct srcu[];
};
/* Keep blk_queue_flag_name[] in sync with the definitions below */
#define QUEUE_FLAG_STOPPED 0 /* queue is stopped */
#define QUEUE_FLAG_DYING 1 /* queue being torn down */
+#define QUEUE_FLAG_HAS_SRCU 2 /* SRCU is allocated */
#define QUEUE_FLAG_NOMERGES 3 /* disable merge attempts */
#define QUEUE_FLAG_SAME_COMP 4 /* complete on same CPU-group */
#define QUEUE_FLAG_FAIL_IO 5 /* fake timeout */
@@ -397,7 +405,6 @@ struct request_queue {
#define QUEUE_FLAG_FUA 18 /* device supports FUA writes */
#define QUEUE_FLAG_DAX 19 /* device supports DAX */
#define QUEUE_FLAG_STATS 20 /* track IO start and completion times */
-#define QUEUE_FLAG_POLL_STATS 21 /* collecting stats for hybrid polling */
#define QUEUE_FLAG_REGISTERED 22 /* queue has been registered to a disk */
#define QUEUE_FLAG_QUIESCED 24 /* queue has been quiesced */
#define QUEUE_FLAG_PCI_P2PDMA 25 /* device supports PCI p2p requests */
@@ -416,6 +423,7 @@ bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q);
#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
#define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags)
+#define blk_queue_has_srcu(q) test_bit(QUEUE_FLAG_HAS_SRCU, &(q)->queue_flags)
#define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags)
#define blk_queue_init_done(q) test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags)
#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
@@ -740,7 +748,8 @@ extern bool blk_queue_can_use_dma_map_merging(struct request_queue *q,
bool __must_check blk_get_queue(struct request_queue *);
extern void blk_put_queue(struct request_queue *);
-extern void blk_set_queue_dying(struct request_queue *);
+
+void blk_mark_disk_dead(struct gendisk *disk);
#ifdef CONFIG_BLOCK
/*
@@ -1171,8 +1180,6 @@ int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned lo
bool blk_crypto_register(struct blk_crypto_profile *profile,
struct request_queue *q);
-void blk_crypto_unregister(struct request_queue *q);
-
#else /* CONFIG_BLK_INLINE_ENCRYPTION */
static inline bool blk_crypto_register(struct blk_crypto_profile *profile,
@@ -1181,8 +1188,6 @@ static inline bool blk_crypto_register(struct blk_crypto_profile *profile,
return true;
}
-static inline void blk_crypto_unregister(struct request_queue *q) { }
-
#endif /* CONFIG_BLK_INLINE_ENCRYPTION */
enum blk_unique_id {
@@ -1254,6 +1259,7 @@ unsigned long disk_start_io_acct(struct gendisk *disk, unsigned int sectors,
void disk_end_io_acct(struct gendisk *disk, unsigned int op,
unsigned long start_time);
+void bio_start_io_acct_time(struct bio *bio, unsigned long start_time);
unsigned long bio_start_io_acct(struct bio *bio);
void bio_end_io_acct_remapped(struct bio *bio, unsigned long start_time,
struct block_device *orig_bdev);
@@ -1335,33 +1341,4 @@ struct io_comp_batch {
#define DEFINE_IO_COMP_BATCH(name) struct io_comp_batch name = { }
-#define rq_list_add(listptr, rq) do { \
- (rq)->rq_next = *(listptr); \
- *(listptr) = rq; \
-} while (0)
-
-#define rq_list_pop(listptr) \
-({ \
- struct request *__req = NULL; \
- if ((listptr) && *(listptr)) { \
- __req = *(listptr); \
- *(listptr) = __req->rq_next; \
- } \
- __req; \
-})
-
-#define rq_list_peek(listptr) \
-({ \
- struct request *__req = NULL; \
- if ((listptr) && *(listptr)) \
- __req = *(listptr); \
- __req; \
-})
-
-#define rq_list_for_each(listptr, pos) \
- for (pos = rq_list_peek((listptr)); pos; pos = rq_list_next(pos)) \
-
-#define rq_list_next(rq) (rq)->rq_next
-#define rq_list_empty(list) ((list) == (struct request *) NULL)
-
#endif /* _LINUX_BLKDEV_H */
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 6e947cd91152..fa517ae604ad 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -316,7 +316,12 @@ enum bpf_type_flag {
*/
MEM_RDONLY = BIT(1 + BPF_BASE_TYPE_BITS),
- __BPF_TYPE_LAST_FLAG = MEM_RDONLY,
+ /* MEM was "allocated" from a different helper, and cannot be mixed
+ * with regular non-MEM_ALLOC'ed MEM types.
+ */
+ MEM_ALLOC = BIT(2 + BPF_BASE_TYPE_BITS),
+
+ __BPF_TYPE_LAST_FLAG = MEM_ALLOC,
};
/* Max number of base types. */
@@ -400,7 +405,7 @@ enum bpf_return_type {
RET_PTR_TO_SOCKET_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_SOCKET,
RET_PTR_TO_TCP_SOCK_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_TCP_SOCK,
RET_PTR_TO_SOCK_COMMON_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_SOCK_COMMON,
- RET_PTR_TO_ALLOC_MEM_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_ALLOC_MEM,
+ RET_PTR_TO_ALLOC_MEM_OR_NULL = PTR_MAYBE_NULL | MEM_ALLOC | RET_PTR_TO_ALLOC_MEM,
RET_PTR_TO_BTF_ID_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_BTF_ID,
/* This must be the last entry. Its purpose is to ensure the enum is
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 143401d4c9d9..e9993172f892 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -519,8 +519,8 @@ bpf_prog_offload_replace_insn(struct bpf_verifier_env *env, u32 off,
void
bpf_prog_offload_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt);
-int check_ctx_reg(struct bpf_verifier_env *env,
- const struct bpf_reg_state *reg, int regno);
+int check_ptr_off_reg(struct bpf_verifier_env *env,
+ const struct bpf_reg_state *reg, int regno);
int check_mem_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
u32 regno, u32 mem_size);
diff --git a/include/linux/byteorder/generic.h b/include/linux/byteorder/generic.h
index 4b13e0a3e15b..c9a4c96c9943 100644
--- a/include/linux/byteorder/generic.h
+++ b/include/linux/byteorder/generic.h
@@ -190,7 +190,7 @@ static inline void be64_add_cpu(__be64 *var, u64 val)
static inline void cpu_to_be32_array(__be32 *dst, const u32 *src, size_t len)
{
- int i;
+ size_t i;
for (i = 0; i < len; i++)
dst[i] = cpu_to_be32(src[i]);
@@ -198,7 +198,7 @@ static inline void cpu_to_be32_array(__be32 *dst, const u32 *src, size_t len)
static inline void be32_to_cpu_array(u32 *dst, const __be32 *src, size_t len)
{
- int i;
+ size_t i;
for (i = 0; i < len; i++)
dst[i] = be32_to_cpu(src[i]);
diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h
index 409d8c29bc4f..edf62eaa6285 100644
--- a/include/linux/ceph/libceph.h
+++ b/include/linux/ceph/libceph.h
@@ -35,6 +35,7 @@
#define CEPH_OPT_TCP_NODELAY (1<<4) /* TCP_NODELAY on TCP sockets */
#define CEPH_OPT_NOMSGSIGN (1<<5) /* don't sign msgs (msgr1) */
#define CEPH_OPT_ABORT_ON_FULL (1<<6) /* abort w/ ENOSPC when full */
+#define CEPH_OPT_RXBOUNCE (1<<7) /* double-buffer read data */
#define CEPH_OPT_DEFAULT (CEPH_OPT_TCP_NODELAY)
@@ -295,13 +296,13 @@ extern bool libceph_compatible(void *data);
extern const char *ceph_msg_type_name(int type);
extern int ceph_check_fsid(struct ceph_client *client, struct ceph_fsid *fsid);
-extern void *ceph_kvmalloc(size_t size, gfp_t flags);
+extern int ceph_parse_fsid(const char *str, struct ceph_fsid *fsid);
struct fs_parameter;
struct fc_log;
struct ceph_options *ceph_alloc_options(void);
int ceph_parse_mon_ips(const char *buf, size_t len, struct ceph_options *opt,
- struct fc_log *l);
+ struct fc_log *l, char delim);
int ceph_parse_param(struct fs_parameter *param, struct ceph_options *opt,
struct fc_log *l);
int ceph_print_client_options(struct seq_file *m, struct ceph_client *client,
diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h
index 0e6e9ad3c3bf..e7f2fb2fc207 100644
--- a/include/linux/ceph/messenger.h
+++ b/include/linux/ceph/messenger.h
@@ -383,6 +383,10 @@ struct ceph_connection_v2_info {
struct ceph_gcm_nonce in_gcm_nonce;
struct ceph_gcm_nonce out_gcm_nonce;
+ struct page **in_enc_pages;
+ int in_enc_page_cnt;
+ int in_enc_resid;
+ int in_enc_i;
struct page **out_enc_pages;
int out_enc_page_cnt;
int out_enc_resid;
@@ -457,6 +461,7 @@ struct ceph_connection {
struct ceph_msg *out_msg; /* sending message (== tail of
out_sent) */
+ struct page *bounce_page;
u32 in_front_crc, in_middle_crc, in_data_crc; /* calculated crc */
struct timespec64 last_keepalive_ack; /* keepalive2 ack stamp */
@@ -532,7 +537,7 @@ extern const char *ceph_pr_addr(const struct ceph_entity_addr *addr);
extern int ceph_parse_ips(const char *c, const char *end,
struct ceph_entity_addr *addr,
- int max_count, int *count);
+ int max_count, int *count, char delim);
extern int ceph_msgr_init(void);
extern void ceph_msgr_exit(void);
diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
deleted file mode 100644
index 5f5730c1d324..000000000000
--- a/include/linux/cleancache.h
+++ /dev/null
@@ -1,124 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_CLEANCACHE_H
-#define _LINUX_CLEANCACHE_H
-
-#include <linux/fs.h>
-#include <linux/exportfs.h>
-#include <linux/mm.h>
-
-#define CLEANCACHE_NO_POOL -1
-#define CLEANCACHE_NO_BACKEND -2
-#define CLEANCACHE_NO_BACKEND_SHARED -3
-
-#define CLEANCACHE_KEY_MAX 6
-
-/*
- * cleancache requires every file with a page in cleancache to have a
- * unique key unless/until the file is removed/truncated. For some
- * filesystems, the inode number is unique, but for "modern" filesystems
- * an exportable filehandle is required (see exportfs.h)
- */
-struct cleancache_filekey {
- union {
- ino_t ino;
- __u32 fh[CLEANCACHE_KEY_MAX];
- u32 key[CLEANCACHE_KEY_MAX];
- } u;
-};
-
-struct cleancache_ops {
- int (*init_fs)(size_t);
- int (*init_shared_fs)(uuid_t *uuid, size_t);
- int (*get_page)(int, struct cleancache_filekey,
- pgoff_t, struct page *);
- void (*put_page)(int, struct cleancache_filekey,
- pgoff_t, struct page *);
- void (*invalidate_page)(int, struct cleancache_filekey, pgoff_t);
- void (*invalidate_inode)(int, struct cleancache_filekey);
- void (*invalidate_fs)(int);
-};
-
-extern int cleancache_register_ops(const struct cleancache_ops *ops);
-extern void __cleancache_init_fs(struct super_block *);
-extern void __cleancache_init_shared_fs(struct super_block *);
-extern int __cleancache_get_page(struct page *);
-extern void __cleancache_put_page(struct page *);
-extern void __cleancache_invalidate_page(struct address_space *, struct page *);
-extern void __cleancache_invalidate_inode(struct address_space *);
-extern void __cleancache_invalidate_fs(struct super_block *);
-
-#ifdef CONFIG_CLEANCACHE
-#define cleancache_enabled (1)
-static inline bool cleancache_fs_enabled_mapping(struct address_space *mapping)
-{
- return mapping->host->i_sb->cleancache_poolid >= 0;
-}
-static inline bool cleancache_fs_enabled(struct page *page)
-{
- return cleancache_fs_enabled_mapping(page->mapping);
-}
-#else
-#define cleancache_enabled (0)
-#define cleancache_fs_enabled(_page) (0)
-#define cleancache_fs_enabled_mapping(_page) (0)
-#endif
-
-/*
- * The shim layer provided by these inline functions allows the compiler
- * to reduce all cleancache hooks to nothingness if CONFIG_CLEANCACHE
- * is disabled, to a single global variable check if CONFIG_CLEANCACHE
- * is enabled but no cleancache "backend" has dynamically enabled it,
- * and, for the most frequent cleancache ops, to a single global variable
- * check plus a superblock element comparison if CONFIG_CLEANCACHE is enabled
- * and a cleancache backend has dynamically enabled cleancache, but the
- * filesystem referenced by that cleancache op has not enabled cleancache.
- * As a result, CONFIG_CLEANCACHE can be enabled by default with essentially
- * no measurable performance impact.
- */
-
-static inline void cleancache_init_fs(struct super_block *sb)
-{
- if (cleancache_enabled)
- __cleancache_init_fs(sb);
-}
-
-static inline void cleancache_init_shared_fs(struct super_block *sb)
-{
- if (cleancache_enabled)
- __cleancache_init_shared_fs(sb);
-}
-
-static inline int cleancache_get_page(struct page *page)
-{
- if (cleancache_enabled && cleancache_fs_enabled(page))
- return __cleancache_get_page(page);
- return -1;
-}
-
-static inline void cleancache_put_page(struct page *page)
-{
- if (cleancache_enabled && cleancache_fs_enabled(page))
- __cleancache_put_page(page);
-}
-
-static inline void cleancache_invalidate_page(struct address_space *mapping,
- struct page *page)
-{
- /* careful... page->mapping is NULL sometimes when this is called */
- if (cleancache_enabled && cleancache_fs_enabled_mapping(mapping))
- __cleancache_invalidate_page(mapping, page);
-}
-
-static inline void cleancache_invalidate_inode(struct address_space *mapping)
-{
- if (cleancache_enabled && cleancache_fs_enabled_mapping(mapping))
- __cleancache_invalidate_inode(mapping);
-}
-
-static inline void cleancache_invalidate_fs(struct super_block *sb)
-{
- if (cleancache_enabled)
- __cleancache_invalidate_fs(sb);
-}
-
-#endif /* _LINUX_CLEANCACHE_H */
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index f59c875271a0..2faa6f7aa8a8 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -490,6 +490,13 @@ struct clk_hw *__clk_hw_register_gate(struct device *dev,
unsigned long flags,
void __iomem *reg, u8 bit_idx,
u8 clk_gate_flags, spinlock_t *lock);
+struct clk_hw *__devm_clk_hw_register_gate(struct device *dev,
+ struct device_node *np, const char *name,
+ const char *parent_name, const struct clk_hw *parent_hw,
+ const struct clk_parent_data *parent_data,
+ unsigned long flags,
+ void __iomem *reg, u8 bit_idx,
+ u8 clk_gate_flags, spinlock_t *lock);
struct clk *clk_register_gate(struct device *dev, const char *name,
const char *parent_name, unsigned long flags,
void __iomem *reg, u8 bit_idx,
@@ -544,6 +551,22 @@ struct clk *clk_register_gate(struct device *dev, const char *name,
__clk_hw_register_gate((dev), NULL, (name), NULL, NULL, (parent_data), \
(flags), (reg), (bit_idx), \
(clk_gate_flags), (lock))
+/**
+ * devm_clk_hw_register_gate - register a gate clock with the clock framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @parent_name: name of this clock's parent
+ * @flags: framework-specific flags for this clock
+ * @reg: register address to control gating of this clock
+ * @bit_idx: which bit in the register controls gating of this clock
+ * @clk_gate_flags: gate-specific flags for this clock
+ * @lock: shared register lock for this clock
+ */
+#define devm_clk_hw_register_gate(dev, name, parent_name, flags, reg, bit_idx,\
+ clk_gate_flags, lock) \
+ __devm_clk_hw_register_gate((dev), NULL, (name), (parent_name), NULL, \
+ NULL, (flags), (reg), (bit_idx), \
+ (clk_gate_flags), (lock))
void clk_unregister_gate(struct clk *clk);
void clk_hw_unregister_gate(struct clk_hw *hw);
int clk_gate_is_enabled(struct clk_hw *hw);
diff --git a/include/linux/clk/sunxi-ng.h b/include/linux/clk/sunxi-ng.h
index 3cd14acde0a1..cf32123b39f5 100644
--- a/include/linux/clk/sunxi-ng.h
+++ b/include/linux/clk/sunxi-ng.h
@@ -6,22 +6,7 @@
#ifndef _LINUX_CLK_SUNXI_NG_H_
#define _LINUX_CLK_SUNXI_NG_H_
-#include <linux/errno.h>
-
-#ifdef CONFIG_SUNXI_CCU
int sunxi_ccu_set_mmc_timing_mode(struct clk *clk, bool new_mode);
int sunxi_ccu_get_mmc_timing_mode(struct clk *clk);
-#else
-static inline int sunxi_ccu_set_mmc_timing_mode(struct clk *clk,
- bool new_mode)
-{
- return -ENOTSUPP;
-}
-
-static inline int sunxi_ccu_get_mmc_timing_mode(struct clk *clk)
-{
- return -ENOTSUPP;
-}
-#endif
#endif
diff --git a/drivers/comedi/drivers/comedi_8254.h b/include/linux/comedi/comedi_8254.h
index d8264417e53c..d8264417e53c 100644
--- a/drivers/comedi/drivers/comedi_8254.h
+++ b/include/linux/comedi/comedi_8254.h
diff --git a/drivers/comedi/drivers/8255.h b/include/linux/comedi/comedi_8255.h
index ceae3ca52e60..b2a5bc6b3a49 100644
--- a/drivers/comedi/drivers/8255.h
+++ b/include/linux/comedi/comedi_8255.h
@@ -1,14 +1,14 @@
/* SPDX-License-Identifier: GPL-2.0+ */
/*
- * module/8255.h
- * Header file for 8255
+ * comedi_8255.h
+ * Generic 8255 digital I/O subdevice support
*
* COMEDI - Linux Control and Measurement Device Interface
* Copyright (C) 1998 David A. Schleef <ds@schleef.org>
*/
-#ifndef _8255_H
-#define _8255_H
+#ifndef _COMEDI_8255_H
+#define _COMEDI_8255_H
#define I8255_SIZE 0x04
diff --git a/drivers/comedi/drivers/comedi_isadma.h b/include/linux/comedi/comedi_isadma.h
index 9d2b12db7e6e..9d2b12db7e6e 100644
--- a/drivers/comedi/drivers/comedi_isadma.h
+++ b/include/linux/comedi/comedi_isadma.h
diff --git a/drivers/comedi/comedi_pci.h b/include/linux/comedi/comedi_pci.h
index 4e069440cbdc..2fb50663e3ed 100644
--- a/drivers/comedi/comedi_pci.h
+++ b/include/linux/comedi/comedi_pci.h
@@ -11,8 +11,7 @@
#define _COMEDI_PCI_H
#include <linux/pci.h>
-
-#include "comedidev.h"
+#include <linux/comedi/comedidev.h>
/*
* PCI Vendor IDs not in <linux/pci_ids.h>
diff --git a/drivers/comedi/comedi_pcmcia.h b/include/linux/comedi/comedi_pcmcia.h
index f2f6e779645b..a33dfb65b869 100644
--- a/drivers/comedi/comedi_pcmcia.h
+++ b/include/linux/comedi/comedi_pcmcia.h
@@ -12,8 +12,7 @@
#include <pcmcia/cistpl.h>
#include <pcmcia/ds.h>
-
-#include "comedidev.h"
+#include <linux/comedi/comedidev.h>
struct pcmcia_device *comedi_to_pcmcia_dev(struct comedi_device *dev);
diff --git a/drivers/comedi/comedi_usb.h b/include/linux/comedi/comedi_usb.h
index 601e29d3891c..5d17dd425bd2 100644
--- a/drivers/comedi/comedi_usb.h
+++ b/include/linux/comedi/comedi_usb.h
@@ -10,8 +10,7 @@
#define _COMEDI_USB_H
#include <linux/usb.h>
-
-#include "comedidev.h"
+#include <linux/comedi/comedidev.h>
struct usb_interface *comedi_to_usb_interface(struct comedi_device *dev);
struct usb_device *comedi_to_usb_dev(struct comedi_device *dev);
diff --git a/drivers/comedi/comedidev.h b/include/linux/comedi/comedidev.h
index 0e1b95ef9a4d..0a1150900ef3 100644
--- a/drivers/comedi/comedidev.h
+++ b/include/linux/comedi/comedidev.h
@@ -15,8 +15,7 @@
#include <linux/spinlock_types.h>
#include <linux/rwsem.h>
#include <linux/kref.h>
-
-#include "comedi.h"
+#include <linux/comedi.h>
#define COMEDI_VERSION(a, b, c) (((a) << 16) + ((b) << 8) + (c))
#define COMEDI_VERSION_CODE COMEDI_VERSION(COMEDI_MAJORVERSION, \
diff --git a/drivers/comedi/comedilib.h b/include/linux/comedi/comedilib.h
index 0223c9cd9215..0223c9cd9215 100644
--- a/drivers/comedi/comedilib.h
+++ b/include/linux/comedi/comedilib.h
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 429dcebe2b99..0f7fd205ab7e 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -117,14 +117,6 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
*/
#define __stringify_label(n) #n
-#define __annotate_reachable(c) ({ \
- asm volatile(__stringify_label(c) ":\n\t" \
- ".pushsection .discard.reachable\n\t" \
- ".long " __stringify_label(c) "b - .\n\t" \
- ".popsection\n\t" : : "i" (c)); \
-})
-#define annotate_reachable() __annotate_reachable(__COUNTER__)
-
#define __annotate_unreachable(c) ({ \
asm volatile(__stringify_label(c) ":\n\t" \
".pushsection .discard.unreachable\n\t" \
@@ -133,24 +125,21 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
})
#define annotate_unreachable() __annotate_unreachable(__COUNTER__)
-#define ASM_UNREACHABLE \
- "999:\n\t" \
- ".pushsection .discard.unreachable\n\t" \
- ".long 999b - .\n\t" \
+#define ASM_REACHABLE \
+ "998:\n\t" \
+ ".pushsection .discard.reachable\n\t" \
+ ".long 998b - .\n\t" \
".popsection\n\t"
/* Annotate a C jump table to allow objtool to follow the code flow */
#define __annotate_jump_table __section(".rodata..c_jump_table")
#else
-#define annotate_reachable()
#define annotate_unreachable()
+# define ASM_REACHABLE
#define __annotate_jump_table
#endif
-#ifndef ASM_UNREACHABLE
-# define ASM_UNREACHABLE
-#endif
#ifndef unreachable
# define unreachable() do { \
annotate_unreachable(); \
diff --git a/include/linux/coredump.h b/include/linux/coredump.h
index 78fcd776b185..248a68c668b4 100644
--- a/include/linux/coredump.h
+++ b/include/linux/coredump.h
@@ -14,10 +14,6 @@ struct core_vma_metadata {
unsigned long dump_size;
};
-extern int core_uses_pid;
-extern char core_pattern[];
-extern unsigned int core_pipe_limit;
-
/*
* These are the only things you should do on a core-file: use only these
* functions to write out all the necessary info.
@@ -37,4 +33,10 @@ extern void do_coredump(const kernel_siginfo_t *siginfo);
static inline void do_coredump(const kernel_siginfo_t *siginfo) {}
#endif
+#if defined(CONFIG_COREDUMP) && defined(CONFIG_SYSCTL)
+extern void validate_coredump_safety(void);
+#else
+static inline void validate_coredump_safety(void) {}
+#endif
+
#endif /* _LINUX_COREDUMP_H */
diff --git a/include/linux/counter.h b/include/linux/counter.h
index b7d0a00a61cf..1fe17f5adb09 100644
--- a/include/linux/counter.h
+++ b/include/linux/counter.h
@@ -38,64 +38,64 @@ enum counter_comp_type {
* @type: Counter component data type
* @name: device-specific component name
* @priv: component-relevant data
- * @action_read Synapse action mode read callback. The read value of the
+ * @action_read: Synapse action mode read callback. The read value of the
* respective Synapse action mode should be passed back via
* the action parameter.
- * @device_u8_read Device u8 component read callback. The read value of the
+ * @device_u8_read: Device u8 component read callback. The read value of the
* respective Device u8 component should be passed back via
* the val parameter.
- * @count_u8_read Count u8 component read callback. The read value of the
+ * @count_u8_read: Count u8 component read callback. The read value of the
* respective Count u8 component should be passed back via
* the val parameter.
- * @signal_u8_read Signal u8 component read callback. The read value of the
+ * @signal_u8_read: Signal u8 component read callback. The read value of the
* respective Signal u8 component should be passed back via
* the val parameter.
- * @device_u32_read Device u32 component read callback. The read value of
+ * @device_u32_read: Device u32 component read callback. The read value of
* the respective Device u32 component should be passed
* back via the val parameter.
- * @count_u32_read Count u32 component read callback. The read value of the
+ * @count_u32_read: Count u32 component read callback. The read value of the
* respective Count u32 component should be passed back via
* the val parameter.
- * @signal_u32_read Signal u32 component read callback. The read value of
+ * @signal_u32_read: Signal u32 component read callback. The read value of
* the respective Signal u32 component should be passed
* back via the val parameter.
- * @device_u64_read Device u64 component read callback. The read value of
+ * @device_u64_read: Device u64 component read callback. The read value of
* the respective Device u64 component should be passed
* back via the val parameter.
- * @count_u64_read Count u64 component read callback. The read value of the
+ * @count_u64_read: Count u64 component read callback. The read value of the
* respective Count u64 component should be passed back via
* the val parameter.
- * @signal_u64_read Signal u64 component read callback. The read value of
+ * @signal_u64_read: Signal u64 component read callback. The read value of
* the respective Signal u64 component should be passed
* back via the val parameter.
- * @action_write Synapse action mode write callback. The write value of
+ * @action_write: Synapse action mode write callback. The write value of
* the respective Synapse action mode is passed via the
* action parameter.
- * @device_u8_write Device u8 component write callback. The write value of
+ * @device_u8_write: Device u8 component write callback. The write value of
* the respective Device u8 component is passed via the val
* parameter.
- * @count_u8_write Count u8 component write callback. The write value of
+ * @count_u8_write: Count u8 component write callback. The write value of
* the respective Count u8 component is passed via the val
* parameter.
- * @signal_u8_write Signal u8 component write callback. The write value of
+ * @signal_u8_write: Signal u8 component write callback. The write value of
* the respective Signal u8 component is passed via the val
* parameter.
- * @device_u32_write Device u32 component write callback. The write value of
+ * @device_u32_write: Device u32 component write callback. The write value of
* the respective Device u32 component is passed via the
* val parameter.
- * @count_u32_write Count u32 component write callback. The write value of
+ * @count_u32_write: Count u32 component write callback. The write value of
* the respective Count u32 component is passed via the val
* parameter.
- * @signal_u32_write Signal u32 component write callback. The write value of
+ * @signal_u32_write: Signal u32 component write callback. The write value of
* the respective Signal u32 component is passed via the
* val parameter.
- * @device_u64_write Device u64 component write callback. The write value of
+ * @device_u64_write: Device u64 component write callback. The write value of
* the respective Device u64 component is passed via the
* val parameter.
- * @count_u64_write Count u64 component write callback. The write value of
+ * @count_u64_write: Count u64 component write callback. The write value of
* the respective Count u64 component is passed via the val
* parameter.
- * @signal_u64_write Signal u64 component write callback. The write value of
+ * @signal_u64_write: Signal u64 component write callback. The write value of
* the respective Signal u64 component is passed via the
* val parameter.
*/
@@ -314,8 +314,6 @@ struct counter_device {
struct counter_comp *ext;
size_t num_ext;
- void *priv;
-
struct device dev;
struct cdev chrdev;
struct list_head events_list;
@@ -329,10 +327,17 @@ struct counter_device {
struct mutex ops_exist_lock;
};
-int counter_register(struct counter_device *const counter);
+void *counter_priv(const struct counter_device *const counter);
+
+struct counter_device *counter_alloc(size_t sizeof_priv);
+void counter_put(struct counter_device *const counter);
+int counter_add(struct counter_device *const counter);
+
void counter_unregister(struct counter_device *const counter);
-int devm_counter_register(struct device *dev,
- struct counter_device *const counter);
+struct counter_device *devm_counter_alloc(struct device *dev,
+ size_t sizeof_priv);
+int devm_counter_add(struct device *dev,
+ struct counter_device *const counter);
void counter_push_event(struct counter_device *const counter, const u8 event,
const u8 channel);
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index 1e7399fc69c0..64dae70d31f5 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -123,6 +123,17 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
return 0;
}
+static inline unsigned int cpumask_first_zero(const struct cpumask *srcp)
+{
+ return 0;
+}
+
+static inline unsigned int cpumask_first_and(const struct cpumask *srcp1,
+ const struct cpumask *srcp2)
+{
+ return 0;
+}
+
static inline unsigned int cpumask_last(const struct cpumask *srcp)
{
return 0;
@@ -167,7 +178,7 @@ static inline unsigned int cpumask_local_spread(unsigned int i, int node)
static inline int cpumask_any_and_distribute(const struct cpumask *src1p,
const struct cpumask *src2p) {
- return cpumask_next_and(-1, src1p, src2p);
+ return cpumask_first_and(src1p, src2p);
}
static inline int cpumask_any_distribute(const struct cpumask *srcp)
@@ -196,6 +207,30 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
}
/**
+ * cpumask_first_zero - get the first unset cpu in a cpumask
+ * @srcp: the cpumask pointer
+ *
+ * Returns >= nr_cpu_ids if all cpus are set.
+ */
+static inline unsigned int cpumask_first_zero(const struct cpumask *srcp)
+{
+ return find_first_zero_bit(cpumask_bits(srcp), nr_cpumask_bits);
+}
+
+/**
+ * cpumask_first_and - return the first cpu from *srcp1 & *srcp2
+ * @src1p: the first input
+ * @src2p: the second input
+ *
+ * Returns >= nr_cpu_ids if no cpus set in both. See also cpumask_next_and().
+ */
+static inline
+unsigned int cpumask_first_and(const struct cpumask *srcp1, const struct cpumask *srcp2)
+{
+ return find_first_and_bit(cpumask_bits(srcp1), cpumask_bits(srcp2), nr_cpumask_bits);
+}
+
+/**
* cpumask_last - get the last CPU in a cpumask
* @srcp: - the cpumask pointer
*
@@ -586,15 +621,6 @@ static inline void cpumask_copy(struct cpumask *dstp,
#define cpumask_any(srcp) cpumask_first(srcp)
/**
- * cpumask_first_and - return the first cpu from *srcp1 & *srcp2
- * @src1p: the first input
- * @src2p: the second input
- *
- * Returns >= nr_cpu_ids if no cpus set in both. See also cpumask_next_and().
- */
-#define cpumask_first_and(src1p, src2p) cpumask_next_and(-1, (src1p), (src2p))
-
-/**
* cpumask_any_and - pick a "random" cpu from *mask1 & *mask2
* @mask1: the first input cpumask
* @mask2: the second input cpumask
diff --git a/include/linux/cuda.h b/include/linux/cuda.h
index 45bfe9d61271..daf3e6f98444 100644
--- a/include/linux/cuda.h
+++ b/include/linux/cuda.h
@@ -12,7 +12,7 @@
#include <uapi/linux/cuda.h>
-extern int find_via_cuda(void);
+extern int __init find_via_cuda(void);
extern int cuda_request(struct adb_request *req,
void (*done)(struct adb_request *), int nbytes, ...);
extern void cuda_poll(void);
diff --git a/include/linux/damon.h b/include/linux/damon.h
index b4d4be3cc987..5e1e3a128b77 100644
--- a/include/linux/damon.h
+++ b/include/linux/damon.h
@@ -11,12 +11,19 @@
#include <linux/mutex.h>
#include <linux/time64.h>
#include <linux/types.h>
+#include <linux/random.h>
/* Minimal region size. Every damon_region is aligned by this. */
#define DAMON_MIN_REGION PAGE_SIZE
/* Max priority score for DAMON-based operation schemes */
#define DAMOS_MAX_SCORE (99)
+/* Get a random number in [l, r) */
+static inline unsigned long damon_rand(unsigned long l, unsigned long r)
+{
+ return l + prandom_u32_max(r - l);
+}
+
/**
* struct damon_addr_range - Represents an address region of [@start, @end).
* @start: Start address of the region (inclusive).
@@ -186,6 +193,22 @@ struct damos_watermarks {
};
/**
+ * struct damos_stat - Statistics on a given scheme.
+ * @nr_tried: Total number of regions that the scheme is tried to be applied.
+ * @sz_tried: Total size of regions that the scheme is tried to be applied.
+ * @nr_applied: Total number of regions that the scheme is applied.
+ * @sz_applied: Total size of regions that the scheme is applied.
+ * @qt_exceeds: Total number of times the quota of the scheme has exceeded.
+ */
+struct damos_stat {
+ unsigned long nr_tried;
+ unsigned long sz_tried;
+ unsigned long nr_applied;
+ unsigned long sz_applied;
+ unsigned long qt_exceeds;
+};
+
+/**
* struct damos - Represents a Data Access Monitoring-based Operation Scheme.
* @min_sz_region: Minimum size of target regions.
* @max_sz_region: Maximum size of target regions.
@@ -196,8 +219,7 @@ struct damos_watermarks {
* @action: &damo_action to be applied to the target regions.
* @quota: Control the aggressiveness of this scheme.
* @wmarks: Watermarks for automated (in)activation of this scheme.
- * @stat_count: Total number of regions that this scheme is applied.
- * @stat_sz: Total size of regions that this scheme is applied.
+ * @stat: Statistics of this scheme.
* @list: List head for siblings.
*
* For each aggregation interval, DAMON finds regions which fit in the
@@ -228,8 +250,7 @@ struct damos {
enum damos_action action;
struct damos_quota quota;
struct damos_watermarks wmarks;
- unsigned long stat_count;
- unsigned long stat_sz;
+ struct damos_stat stat;
struct list_head list;
};
@@ -274,7 +295,8 @@ struct damon_ctx;
* as an integer in [0, &DAMOS_MAX_SCORE].
* @apply_scheme is called from @kdamond when a region for user provided
* DAMON-based operation scheme is found. It should apply the scheme's action
- * to the region. This is not used for &DAMON_ARBITRARY_TARGET case.
+ * to the region and return bytes of the region that the action is successfully
+ * applied.
* @target_valid should check whether the target is still valid for the
* monitoring.
* @cleanup is called from @kdamond just before its termination.
@@ -288,8 +310,9 @@ struct damon_primitive {
int (*get_scheme_score)(struct damon_ctx *context,
struct damon_target *t, struct damon_region *r,
struct damos *scheme);
- int (*apply_scheme)(struct damon_ctx *context, struct damon_target *t,
- struct damon_region *r, struct damos *scheme);
+ unsigned long (*apply_scheme)(struct damon_ctx *context,
+ struct damon_target *t, struct damon_region *r,
+ struct damos *scheme);
bool (*target_valid)(void *target);
void (*cleanup)(struct damon_ctx *context);
};
@@ -392,14 +415,20 @@ struct damon_ctx {
struct list_head schemes;
};
-#define damon_next_region(r) \
- (container_of(r->list.next, struct damon_region, list))
+static inline struct damon_region *damon_next_region(struct damon_region *r)
+{
+ return container_of(r->list.next, struct damon_region, list);
+}
-#define damon_prev_region(r) \
- (container_of(r->list.prev, struct damon_region, list))
+static inline struct damon_region *damon_prev_region(struct damon_region *r)
+{
+ return container_of(r->list.prev, struct damon_region, list);
+}
-#define damon_last_region(t) \
- (list_last_entry(&t->regions_list, struct damon_region, list))
+static inline struct damon_region *damon_last_region(struct damon_target *t)
+{
+ return list_last_entry(&t->regions_list, struct damon_region, list);
+}
#define damon_for_each_region(r, t) \
list_for_each_entry(r, &t->regions_list, list)
@@ -422,9 +451,18 @@ struct damon_ctx {
#ifdef CONFIG_DAMON
struct damon_region *damon_new_region(unsigned long start, unsigned long end);
-inline void damon_insert_region(struct damon_region *r,
+
+/*
+ * Add a region between two other regions
+ */
+static inline void damon_insert_region(struct damon_region *r,
struct damon_region *prev, struct damon_region *next,
- struct damon_target *t);
+ struct damon_target *t)
+{
+ __list_add(&r->list, &prev->list, &next->list);
+ t->nr_regions++;
+}
+
void damon_add_region(struct damon_region *r, struct damon_target *t);
void damon_destroy_region(struct damon_region *r, struct damon_target *t);
@@ -461,34 +499,13 @@ int damon_stop(struct damon_ctx **ctxs, int nr_ctxs);
#endif /* CONFIG_DAMON */
#ifdef CONFIG_DAMON_VADDR
-
-/* Monitoring primitives for virtual memory address spaces */
-void damon_va_init(struct damon_ctx *ctx);
-void damon_va_update(struct damon_ctx *ctx);
-void damon_va_prepare_access_checks(struct damon_ctx *ctx);
-unsigned int damon_va_check_accesses(struct damon_ctx *ctx);
bool damon_va_target_valid(void *t);
-void damon_va_cleanup(struct damon_ctx *ctx);
-int damon_va_apply_scheme(struct damon_ctx *context, struct damon_target *t,
- struct damon_region *r, struct damos *scheme);
-int damon_va_scheme_score(struct damon_ctx *context, struct damon_target *t,
- struct damon_region *r, struct damos *scheme);
void damon_va_set_primitives(struct damon_ctx *ctx);
-
#endif /* CONFIG_DAMON_VADDR */
#ifdef CONFIG_DAMON_PADDR
-
-/* Monitoring primitives for the physical memory address space */
-void damon_pa_prepare_access_checks(struct damon_ctx *ctx);
-unsigned int damon_pa_check_accesses(struct damon_ctx *ctx);
bool damon_pa_target_valid(void *t);
-int damon_pa_apply_scheme(struct damon_ctx *context, struct damon_target *t,
- struct damon_region *r, struct damos *scheme);
-int damon_pa_scheme_score(struct damon_ctx *context, struct damon_target *t,
- struct damon_region *r, struct damos *scheme);
void damon_pa_set_primitives(struct damon_ctx *ctx);
-
#endif /* CONFIG_DAMON_PADDR */
#endif /* _DAMON_H */
diff --git a/include/linux/dax.h b/include/linux/dax.h
index 8623caa67388..9fc5f99a0ae2 100644
--- a/include/linux/dax.h
+++ b/include/linux/dax.h
@@ -6,14 +6,14 @@
#include <linux/mm.h>
#include <linux/radix-tree.h>
-/* Flag for synchronous flush */
-#define DAXDEV_F_SYNC (1UL << 0)
-
typedef unsigned long dax_entry_t;
+struct dax_device;
+struct gendisk;
struct iomap_ops;
+struct iomap_iter;
struct iomap;
-struct dax_device;
+
struct dax_operations {
/*
* direct_access: translate a device-relative
@@ -28,33 +28,18 @@ struct dax_operations {
*/
bool (*dax_supported)(struct dax_device *, struct block_device *, int,
sector_t, sector_t);
- /* copy_from_iter: required operation for fs-dax direct-i/o */
- size_t (*copy_from_iter)(struct dax_device *, pgoff_t, void *, size_t,
- struct iov_iter *);
- /* copy_to_iter: required operation for fs-dax direct-i/o */
- size_t (*copy_to_iter)(struct dax_device *, pgoff_t, void *, size_t,
- struct iov_iter *);
/* zero_page_range: required operation. Zero page range */
int (*zero_page_range)(struct dax_device *, pgoff_t, size_t);
};
#if IS_ENABLED(CONFIG_DAX)
-struct dax_device *alloc_dax(void *private, const char *host,
- const struct dax_operations *ops, unsigned long flags);
+struct dax_device *alloc_dax(void *private, const struct dax_operations *ops);
void put_dax(struct dax_device *dax_dev);
void kill_dax(struct dax_device *dax_dev);
void dax_write_cache(struct dax_device *dax_dev, bool wc);
bool dax_write_cache_enabled(struct dax_device *dax_dev);
-bool __dax_synchronous(struct dax_device *dax_dev);
-static inline bool dax_synchronous(struct dax_device *dax_dev)
-{
- return __dax_synchronous(dax_dev);
-}
-void __set_dax_synchronous(struct dax_device *dax_dev);
-static inline void set_dax_synchronous(struct dax_device *dax_dev)
-{
- __set_dax_synchronous(dax_dev);
-}
+bool dax_synchronous(struct dax_device *dax_dev);
+void set_dax_synchronous(struct dax_device *dax_dev);
/*
* Check if given mapping is supported by the file / underlying device.
*/
@@ -68,8 +53,8 @@ static inline bool daxdev_mapping_supported(struct vm_area_struct *vma,
return dax_synchronous(dax_dev);
}
#else
-static inline struct dax_device *alloc_dax(void *private, const char *host,
- const struct dax_operations *ops, unsigned long flags)
+static inline struct dax_device *alloc_dax(void *private,
+ const struct dax_operations *ops)
{
/*
* Callers should check IS_ENABLED(CONFIG_DAX) to know if this
@@ -104,48 +89,46 @@ static inline bool daxdev_mapping_supported(struct vm_area_struct *vma,
}
#endif
-struct writeback_control;
-int bdev_dax_pgoff(struct block_device *, sector_t, size_t, pgoff_t *pgoff);
-#if IS_ENABLED(CONFIG_FS_DAX)
-bool generic_fsdax_supported(struct dax_device *dax_dev,
- struct block_device *bdev, int blocksize, sector_t start,
- sector_t sectors);
-
-bool dax_supported(struct dax_device *dax_dev, struct block_device *bdev,
- int blocksize, sector_t start, sector_t len);
+void set_dax_nocache(struct dax_device *dax_dev);
+void set_dax_nomc(struct dax_device *dax_dev);
+struct writeback_control;
+#if defined(CONFIG_BLOCK) && defined(CONFIG_FS_DAX)
+int dax_add_host(struct dax_device *dax_dev, struct gendisk *disk);
+void dax_remove_host(struct gendisk *disk);
+struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev,
+ u64 *start_off);
static inline void fs_put_dax(struct dax_device *dax_dev)
{
put_dax(dax_dev);
}
-
-struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev);
-int dax_writeback_mapping_range(struct address_space *mapping,
- struct dax_device *dax_dev, struct writeback_control *wbc);
-
-struct page *dax_layout_busy_page(struct address_space *mapping);
-struct page *dax_layout_busy_page_range(struct address_space *mapping, loff_t start, loff_t end);
-dax_entry_t dax_lock_page(struct page *page);
-void dax_unlock_page(struct page *page, dax_entry_t cookie);
#else
-#define generic_fsdax_supported NULL
-
-static inline bool dax_supported(struct dax_device *dax_dev,
- struct block_device *bdev, int blocksize, sector_t start,
- sector_t len)
+static inline int dax_add_host(struct dax_device *dax_dev, struct gendisk *disk)
{
- return false;
+ return 0;
}
-
-static inline void fs_put_dax(struct dax_device *dax_dev)
+static inline void dax_remove_host(struct gendisk *disk)
{
}
-
-static inline struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev)
+static inline struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev,
+ u64 *start_off)
{
return NULL;
}
+static inline void fs_put_dax(struct dax_device *dax_dev)
+{
+}
+#endif /* CONFIG_BLOCK && CONFIG_FS_DAX */
+#if IS_ENABLED(CONFIG_FS_DAX)
+int dax_writeback_mapping_range(struct address_space *mapping,
+ struct dax_device *dax_dev, struct writeback_control *wbc);
+
+struct page *dax_layout_busy_page(struct address_space *mapping);
+struct page *dax_layout_busy_page_range(struct address_space *mapping, loff_t start, loff_t end);
+dax_entry_t dax_lock_page(struct page *page);
+void dax_unlock_page(struct page *page, dax_entry_t cookie);
+#else
static inline struct page *dax_layout_busy_page(struct address_space *mapping)
{
return NULL;
@@ -174,6 +157,11 @@ static inline void dax_unlock_page(struct page *page, dax_entry_t cookie)
}
#endif
+int dax_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
+ const struct iomap_ops *ops);
+int dax_truncate_page(struct inode *inode, loff_t pos, bool *did_zero,
+ const struct iomap_ops *ops);
+
#if IS_ENABLED(CONFIG_DAX)
int dax_read_lock(void);
void dax_read_unlock(int id);
@@ -208,7 +196,6 @@ vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf,
int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index);
int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
pgoff_t index);
-s64 dax_iomap_zero(loff_t pos, u64 length, struct iomap *iomap);
static inline bool dax_mapping(struct address_space *mapping)
{
return mapping->host && IS_DAX(mapping->host);
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 9e23d33bb6f1..f5bba51480b2 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -61,16 +61,6 @@ extern const struct qstr empty_name;
extern const struct qstr slash_name;
extern const struct qstr dotdot_name;
-struct dentry_stat_t {
- long nr_dentry;
- long nr_unused;
- long age_limit; /* age in seconds */
- long want_pages; /* pages requested by system */
- long nr_negative; /* # of unused negative dentries */
- long dummy; /* Reserved for future use */
-};
-extern struct dentry_stat_t dentry_stat;
-
/*
* Try to keep struct dentry aligned on 64 byte cachelines (this will
* give reasonable cacheline footprint with larger lines without the
diff --git a/include/linux/delayacct.h b/include/linux/delayacct.h
index af7e6eb50283..3e03d010bd2e 100644
--- a/include/linux/delayacct.h
+++ b/include/linux/delayacct.h
@@ -9,18 +9,9 @@
#include <uapi/linux/taskstats.h>
-/*
- * Per-task flags relevant to delay accounting
- * maintained privately to avoid exhausting similar flags in sched.h:PF_*
- * Used to set current->delays->flags
- */
-#define DELAYACCT_PF_SWAPIN 0x00000001 /* I am doing a swapin */
-#define DELAYACCT_PF_BLKIO 0x00000002 /* I am waiting on IO */
-
#ifdef CONFIG_TASK_DELAY_ACCT
struct task_delay_info {
raw_spinlock_t lock;
- unsigned int flags; /* Private per-task flags */
/* For each stat XXX, add following, aligned appropriately
*
@@ -37,13 +28,13 @@ struct task_delay_info {
* associated with the operation is added to XXX_delay.
* XXX_delay contains the accumulated delay time in nanoseconds.
*/
- u64 blkio_start; /* Shared by blkio, swapin */
+ u64 blkio_start;
u64 blkio_delay; /* wait for sync block io completion */
- u64 swapin_delay; /* wait for swapin block io completion */
+ u64 swapin_start;
+ u64 swapin_delay; /* wait for swapin */
u32 blkio_count; /* total count of the number of sync block */
/* io operations performed */
- u32 swapin_count; /* total count of the number of swapin block */
- /* io operations performed */
+ u32 swapin_count; /* total count of swapin */
u64 freepages_start;
u64 freepages_delay; /* wait for memory reclaim */
@@ -51,8 +42,12 @@ struct task_delay_info {
u64 thrashing_start;
u64 thrashing_delay; /* wait for thrashing page */
+ u64 compact_start;
+ u64 compact_delay; /* wait for memory compact */
+
u32 freepages_count; /* total count of memory reclaim */
u32 thrashing_count; /* total count of thrash waits */
+ u32 compact_count; /* total count of memory compact */
};
#endif
@@ -79,26 +74,10 @@ extern void __delayacct_freepages_start(void);
extern void __delayacct_freepages_end(void);
extern void __delayacct_thrashing_start(void);
extern void __delayacct_thrashing_end(void);
-
-static inline int delayacct_is_task_waiting_on_io(struct task_struct *p)
-{
- if (p->delays)
- return (p->delays->flags & DELAYACCT_PF_BLKIO);
- else
- return 0;
-}
-
-static inline void delayacct_set_flag(struct task_struct *p, int flag)
-{
- if (p->delays)
- p->delays->flags |= flag;
-}
-
-static inline void delayacct_clear_flag(struct task_struct *p, int flag)
-{
- if (p->delays)
- p->delays->flags &= ~flag;
-}
+extern void __delayacct_swapin_start(void);
+extern void __delayacct_swapin_end(void);
+extern void __delayacct_compact_start(void);
+extern void __delayacct_compact_end(void);
static inline void delayacct_tsk_init(struct task_struct *tsk)
{
@@ -123,7 +102,6 @@ static inline void delayacct_blkio_start(void)
if (!static_branch_unlikely(&delayacct_key))
return;
- delayacct_set_flag(current, DELAYACCT_PF_BLKIO);
if (current->delays)
__delayacct_blkio_start();
}
@@ -135,7 +113,6 @@ static inline void delayacct_blkio_end(struct task_struct *p)
if (p->delays)
__delayacct_blkio_end(p);
- delayacct_clear_flag(p, DELAYACCT_PF_BLKIO);
}
static inline __u64 delayacct_blkio_ticks(struct task_struct *tsk)
@@ -147,33 +124,77 @@ static inline __u64 delayacct_blkio_ticks(struct task_struct *tsk)
static inline void delayacct_freepages_start(void)
{
+ if (!static_branch_unlikely(&delayacct_key))
+ return;
+
if (current->delays)
__delayacct_freepages_start();
}
static inline void delayacct_freepages_end(void)
{
+ if (!static_branch_unlikely(&delayacct_key))
+ return;
+
if (current->delays)
__delayacct_freepages_end();
}
static inline void delayacct_thrashing_start(void)
{
+ if (!static_branch_unlikely(&delayacct_key))
+ return;
+
if (current->delays)
__delayacct_thrashing_start();
}
static inline void delayacct_thrashing_end(void)
{
+ if (!static_branch_unlikely(&delayacct_key))
+ return;
+
if (current->delays)
__delayacct_thrashing_end();
}
+static inline void delayacct_swapin_start(void)
+{
+ if (!static_branch_unlikely(&delayacct_key))
+ return;
+
+ if (current->delays)
+ __delayacct_swapin_start();
+}
+
+static inline void delayacct_swapin_end(void)
+{
+ if (!static_branch_unlikely(&delayacct_key))
+ return;
+
+ if (current->delays)
+ __delayacct_swapin_end();
+}
+
+static inline void delayacct_compact_start(void)
+{
+ if (!static_branch_unlikely(&delayacct_key))
+ return;
+
+ if (current->delays)
+ __delayacct_compact_start();
+}
+
+static inline void delayacct_compact_end(void)
+{
+ if (!static_branch_unlikely(&delayacct_key))
+ return;
+
+ if (current->delays)
+ __delayacct_compact_end();
+}
+
#else
-static inline void delayacct_set_flag(struct task_struct *p, int flag)
-{}
-static inline void delayacct_clear_flag(struct task_struct *p, int flag)
-{}
static inline void delayacct_init(void)
{}
static inline void delayacct_tsk_init(struct task_struct *tsk)
@@ -199,6 +220,14 @@ static inline void delayacct_thrashing_start(void)
{}
static inline void delayacct_thrashing_end(void)
{}
+static inline void delayacct_swapin_start(void)
+{}
+static inline void delayacct_swapin_end(void)
+{}
+static inline void delayacct_compact_start(void)
+{}
+static inline void delayacct_compact_end(void)
+{}
#endif /* CONFIG_TASK_DELAY_ACCT */
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index a7df155ea49b..b26fecf6c8e8 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -147,8 +147,6 @@ typedef int (*dm_busy_fn) (struct dm_target *ti);
*/
typedef long (*dm_dax_direct_access_fn) (struct dm_target *ti, pgoff_t pgoff,
long nr_pages, void **kaddr, pfn_t *pfn);
-typedef size_t (*dm_dax_copy_iter_fn)(struct dm_target *ti, pgoff_t pgoff,
- void *addr, size_t bytes, struct iov_iter *i);
typedef int (*dm_dax_zero_page_range_fn)(struct dm_target *ti, pgoff_t pgoff,
size_t nr_pages);
@@ -200,8 +198,6 @@ struct target_type {
dm_iterate_devices_fn iterate_devices;
dm_io_hints_fn io_hints;
dm_dax_direct_access_fn direct_access;
- dm_dax_copy_iter_fn dax_copy_from_iter;
- dm_dax_copy_iter_fn dax_copy_to_iter;
dm_dax_zero_page_range_fn dax_zero_page_range;
/* For internal device-mapper use. */
diff --git a/include/linux/device.h b/include/linux/device.h
index e270cb740b9e..93459724dcde 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -45,6 +45,7 @@ struct iommu_ops;
struct iommu_group;
struct dev_pin_info;
struct dev_iommu;
+struct msi_device_data;
/**
* struct subsys_interface - interfaces to device functions
@@ -372,6 +373,20 @@ struct dev_links_info {
};
/**
+ * struct dev_msi_info - Device data related to MSI
+ * @domain: The MSI interrupt domain associated to the device
+ * @data: Pointer to MSI device data
+ */
+struct dev_msi_info {
+#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
+ struct irq_domain *domain;
+#endif
+#ifdef CONFIG_GENERIC_MSI_IRQ
+ struct msi_device_data *data;
+#endif
+};
+
+/**
* struct device - The basic device structure
* @parent: The device's "parent" device, the device to which it is attached.
* In most cases, a parent device is some sort of bus or host
@@ -407,9 +422,7 @@ struct dev_links_info {
* @em_pd: device's energy model performance domain
* @pins: For device pin management.
* See Documentation/driver-api/pin-control.rst for details.
- * @msi_lock: Lock to protect MSI mask cache and mask register
- * @msi_list: Hosts MSI descriptors
- * @msi_domain: The generic MSI domain this device is using.
+ * @msi: MSI related data
* @numa_node: NUMA node this device is close to.
* @dma_ops: DMA mapping operations for this device.
* @dma_mask: Dma mask (if dma'ble device).
@@ -501,16 +514,10 @@ struct device {
struct em_perf_domain *em_pd;
#endif
-#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
- struct irq_domain *msi_domain;
-#endif
#ifdef CONFIG_PINCTRL
struct dev_pin_info *pins;
#endif
-#ifdef CONFIG_GENERIC_MSI_IRQ
- raw_spinlock_t msi_lock;
- struct list_head msi_list;
-#endif
+ struct dev_msi_info msi;
#ifdef CONFIG_DMA_OPS
const struct dma_map_ops *dma_ops;
#endif
@@ -668,7 +675,7 @@ static inline void set_dev_node(struct device *dev, int node)
static inline struct irq_domain *dev_get_msi_domain(const struct device *dev)
{
#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
- return dev->msi_domain;
+ return dev->msi.domain;
#else
return NULL;
#endif
@@ -677,7 +684,7 @@ static inline struct irq_domain *dev_get_msi_domain(const struct device *dev)
static inline void dev_set_msi_domain(struct device *dev, struct irq_domain *d)
{
#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
- dev->msi_domain = d;
+ dev->msi.domain = d;
#endif
}
diff --git a/include/linux/dma/qcom_adm.h b/include/linux/dma/qcom_adm.h
new file mode 100644
index 000000000000..af20df674f0c
--- /dev/null
+++ b/include/linux/dma/qcom_adm.h
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#ifndef __LINUX_DMA_QCOM_ADM_H
+#define __LINUX_DMA_QCOM_ADM_H
+
+#include <linux/types.h>
+
+struct qcom_adm_peripheral_config {
+ u32 crci;
+ u32 mux;
+};
+
+#endif /* __LINUX_DMA_QCOM_ADM_H */
diff --git a/include/linux/dma/xilinx_dpdma.h b/include/linux/dma/xilinx_dpdma.h
new file mode 100644
index 000000000000..02a4adf8921b
--- /dev/null
+++ b/include/linux/dma/xilinx_dpdma.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_DMA_XILINX_DPDMA_H
+#define __LINUX_DMA_XILINX_DPDMA_H
+
+#include <linux/types.h>
+
+struct xilinx_dpdma_peripheral_config {
+ bool video_group;
+};
+
+#endif /* __LINUX_DMA_XILINX_DPDMA_H */
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 9000f3ffce8b..842d4f7ca752 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -50,6 +50,7 @@ enum dma_status {
*/
enum dma_transaction_type {
DMA_MEMCPY,
+ DMA_MEMCPY_SG,
DMA_XOR,
DMA_PQ,
DMA_XOR_VAL,
@@ -418,9 +419,6 @@ enum dma_slave_buswidth {
* @device_fc: Flow Controller Settings. Only valid for slave channels. Fill
* with 'true' if peripheral should be flow controller. Direction will be
* selected at Runtime.
- * @slave_id: Slave requester id. Only valid for slave channels. The dma
- * slave peripheral will have unique id as dma requester which need to be
- * pass as slave config.
* @peripheral_config: peripheral configuration for programming peripheral
* for dmaengine transfer
* @peripheral_size: peripheral configuration buffer size
@@ -448,7 +446,6 @@ struct dma_slave_config {
u32 src_port_window_size;
u32 dst_port_window_size;
bool device_fc;
- unsigned int slave_id;
void *peripheral_config;
size_t peripheral_size;
};
@@ -891,6 +888,11 @@ struct dma_device {
struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)(
struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
size_t len, unsigned long flags);
+ struct dma_async_tx_descriptor *(*device_prep_dma_memcpy_sg)(
+ struct dma_chan *chan,
+ struct scatterlist *dst_sg, unsigned int dst_nents,
+ struct scatterlist *src_sg, unsigned int src_nents,
+ unsigned long flags);
struct dma_async_tx_descriptor *(*device_prep_dma_xor)(
struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
unsigned int src_cnt, size_t len, unsigned long flags);
@@ -1051,6 +1053,20 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memcpy(
len, flags);
}
+static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memcpy_sg(
+ struct dma_chan *chan,
+ struct scatterlist *dst_sg, unsigned int dst_nents,
+ struct scatterlist *src_sg, unsigned int src_nents,
+ unsigned long flags)
+{
+ if (!chan || !chan->device || !chan->device->device_prep_dma_memcpy_sg)
+ return NULL;
+
+ return chan->device->device_prep_dma_memcpy_sg(chan, dst_sg, dst_nents,
+ src_sg, src_nents,
+ flags);
+}
+
static inline bool dmaengine_is_metadata_mode_supported(struct dma_chan *chan,
enum dma_desc_metadata_mode mode)
{
diff --git a/include/linux/dnotify.h b/include/linux/dnotify.h
index 0aad774beaec..b1d26f9f1c9f 100644
--- a/include/linux/dnotify.h
+++ b/include/linux/dnotify.h
@@ -26,10 +26,9 @@ struct dnotify_struct {
FS_MODIFY | FS_MODIFY_CHILD |\
FS_ACCESS | FS_ACCESS_CHILD |\
FS_ATTRIB | FS_ATTRIB_CHILD |\
- FS_CREATE | FS_DN_RENAME |\
+ FS_CREATE | FS_RENAME |\
FS_MOVED_FROM | FS_MOVED_TO)
-extern int dir_notify_enable;
extern void dnotify_flush(struct file *, fl_owner_t);
extern int fcntl_dirnotify(int, struct file *, unsigned long);
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 0de9fb1fdc5a..ccd4d3f91c98 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -148,6 +148,52 @@ typedef struct {
u32 imagesize;
} efi_capsule_header_t;
+/* EFI_FIRMWARE_MANAGEMENT_CAPSULE_HEADER */
+struct efi_manage_capsule_header {
+ u32 ver;
+ u16 emb_drv_cnt;
+ u16 payload_cnt;
+ /*
+ * Variable-size array of the size given by the sum of
+ * emb_drv_cnt and payload_cnt.
+ */
+ u64 offset_list[];
+} __packed;
+
+/* EFI_FIRMWARE_MANAGEMENT_CAPSULE_IMAGE_HEADER */
+struct efi_manage_capsule_image_header {
+ u32 ver;
+ efi_guid_t image_type_id;
+ u8 image_index;
+ u8 reserved_bytes[3];
+ u32 image_size;
+ u32 vendor_code_size;
+ /* hw_ins was introduced in version 2 */
+ u64 hw_ins;
+ /* capsule_support was introduced in version 3 */
+ u64 capsule_support;
+} __packed;
+
+/* WIN_CERTIFICATE */
+struct win_cert {
+ u32 len;
+ u16 rev;
+ u16 cert_type;
+};
+
+/* WIN_CERTIFICATE_UEFI_GUID */
+struct win_cert_uefi_guid {
+ struct win_cert hdr;
+ efi_guid_t cert_type;
+ u8 cert_data[];
+};
+
+/* EFI_FIRMWARE_IMAGE_AUTHENTICATION */
+struct efi_image_auth {
+ u64 mon_count;
+ struct win_cert_uefi_guid auth_info;
+};
+
/*
* EFI capsule flags
*/
diff --git a/include/linux/elfcore-compat.h b/include/linux/elfcore-compat.h
index e272c3d452ce..54feb64e9b5d 100644
--- a/include/linux/elfcore-compat.h
+++ b/include/linux/elfcore-compat.h
@@ -43,6 +43,11 @@ struct compat_elf_prpsinfo
__compat_uid_t pr_uid;
__compat_gid_t pr_gid;
compat_pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid;
+ /*
+ * The hard-coded 16 is derived from TASK_COMM_LEN, but it can't be
+ * changed as it is exposed to userspace. We'd better make it hard-coded
+ * here.
+ */
char pr_fname[16];
char pr_psargs[ELF_PRARGSZ];
};
diff --git a/include/linux/elfcore.h b/include/linux/elfcore.h
index 957ebec35aad..746e081879a5 100644
--- a/include/linux/elfcore.h
+++ b/include/linux/elfcore.h
@@ -65,6 +65,11 @@ struct elf_prpsinfo
__kernel_gid_t pr_gid;
pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid;
/* Lots missing */
+ /*
+ * The hard-coded 16 is derived from TASK_COMM_LEN, but it can't be
+ * changed as it is exposed to userspace. We'd better make it hard-coded
+ * here.
+ */
char pr_fname[16]; /* filename of executable */
char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
};
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index a26f37a27167..11efc45de66a 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -111,7 +111,7 @@ struct ethtool_link_ext_state_info {
enum ethtool_link_ext_substate_bad_signal_integrity bad_signal_integrity;
enum ethtool_link_ext_substate_cable_issue cable_issue;
enum ethtool_link_ext_substate_module module;
- u8 __link_ext_substate;
+ u32 __link_ext_substate;
};
};
diff --git a/include/linux/exportfs.h b/include/linux/exportfs.h
index 3260fe714846..fe848901fcc3 100644
--- a/include/linux/exportfs.h
+++ b/include/linux/exportfs.h
@@ -221,8 +221,6 @@ struct export_operations {
#define EXPORT_OP_NOATOMIC_ATTR (0x10) /* Filesystem cannot supply
atomic attribute updates
*/
-#define EXPORT_OP_SYNC_LOCKS (0x20) /* Filesystem can't do
- asychronous blocking locks */
unsigned long flags;
};
diff --git a/include/linux/fanotify.h b/include/linux/fanotify.h
index 616af2ea20f3..419cadcd7ff5 100644
--- a/include/linux/fanotify.h
+++ b/include/linux/fanotify.h
@@ -5,8 +5,6 @@
#include <linux/sysctl.h>
#include <uapi/linux/fanotify.h>
-extern struct ctl_table fanotify_table[]; /* for sysctl */
-
#define FAN_GROUP_FLAG(group, flag) \
((group)->fanotify_data.flags & (flag))
@@ -25,7 +23,7 @@ extern struct ctl_table fanotify_table[]; /* for sysctl */
#define FANOTIFY_CLASS_BITS (FAN_CLASS_NOTIF | FANOTIFY_PERM_CLASSES)
-#define FANOTIFY_FID_BITS (FAN_REPORT_FID | FAN_REPORT_DFID_NAME)
+#define FANOTIFY_FID_BITS (FAN_REPORT_DFID_NAME_TARGET)
#define FANOTIFY_INFO_MODES (FANOTIFY_FID_BITS | FAN_REPORT_PIDFD)
@@ -82,7 +80,8 @@ extern struct ctl_table fanotify_table[]; /* for sysctl */
* Directory entry modification events - reported only to directory
* where entry is modified and not to a watching parent.
*/
-#define FANOTIFY_DIRENT_EVENTS (FAN_MOVE | FAN_CREATE | FAN_DELETE)
+#define FANOTIFY_DIRENT_EVENTS (FAN_MOVE | FAN_CREATE | FAN_DELETE | \
+ FAN_RENAME)
/* Events that can be reported with event->fd */
#define FANOTIFY_FD_EVENTS (FANOTIFY_PATH_EVENTS | FANOTIFY_PERM_EVENTS)
diff --git a/include/linux/fb.h b/include/linux/fb.h
index 3da95842b207..02f362c661c8 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -262,7 +262,7 @@ struct fb_ops {
/* Draws a rectangle */
void (*fb_fillrect) (struct fb_info *info, const struct fb_fillrect *rect);
- /* Copy data from area to another. Obsolete. */
+ /* Copy data from area to another */
void (*fb_copyarea) (struct fb_info *info, const struct fb_copyarea *region);
/* Draws a image to the display */
void (*fb_imageblit) (struct fb_info *info, const struct fb_image *image);
diff --git a/include/linux/find.h b/include/linux/find.h
new file mode 100644
index 000000000000..5bb6db213bcb
--- /dev/null
+++ b/include/linux/find.h
@@ -0,0 +1,372 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_FIND_H_
+#define __LINUX_FIND_H_
+
+#ifndef __LINUX_BITMAP_H
+#error only <linux/bitmap.h> can be included directly
+#endif
+
+#include <linux/bitops.h>
+
+extern unsigned long _find_next_bit(const unsigned long *addr1,
+ const unsigned long *addr2, unsigned long nbits,
+ unsigned long start, unsigned long invert, unsigned long le);
+extern unsigned long _find_first_bit(const unsigned long *addr, unsigned long size);
+extern unsigned long _find_first_and_bit(const unsigned long *addr1,
+ const unsigned long *addr2, unsigned long size);
+extern unsigned long _find_first_zero_bit(const unsigned long *addr, unsigned long size);
+extern unsigned long _find_last_bit(const unsigned long *addr, unsigned long size);
+
+#ifndef find_next_bit
+/**
+ * find_next_bit - find the next set bit in a memory region
+ * @addr: The address to base the search on
+ * @offset: The bitnumber to start searching at
+ * @size: The bitmap size in bits
+ *
+ * Returns the bit number for the next set bit
+ * If no bits are set, returns @size.
+ */
+static inline
+unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
+ unsigned long offset)
+{
+ if (small_const_nbits(size)) {
+ unsigned long val;
+
+ if (unlikely(offset >= size))
+ return size;
+
+ val = *addr & GENMASK(size - 1, offset);
+ return val ? __ffs(val) : size;
+ }
+
+ return _find_next_bit(addr, NULL, size, offset, 0UL, 0);
+}
+#endif
+
+#ifndef find_next_and_bit
+/**
+ * find_next_and_bit - find the next set bit in both memory regions
+ * @addr1: The first address to base the search on
+ * @addr2: The second address to base the search on
+ * @offset: The bitnumber to start searching at
+ * @size: The bitmap size in bits
+ *
+ * Returns the bit number for the next set bit
+ * If no bits are set, returns @size.
+ */
+static inline
+unsigned long find_next_and_bit(const unsigned long *addr1,
+ const unsigned long *addr2, unsigned long size,
+ unsigned long offset)
+{
+ if (small_const_nbits(size)) {
+ unsigned long val;
+
+ if (unlikely(offset >= size))
+ return size;
+
+ val = *addr1 & *addr2 & GENMASK(size - 1, offset);
+ return val ? __ffs(val) : size;
+ }
+
+ return _find_next_bit(addr1, addr2, size, offset, 0UL, 0);
+}
+#endif
+
+#ifndef find_next_zero_bit
+/**
+ * find_next_zero_bit - find the next cleared bit in a memory region
+ * @addr: The address to base the search on
+ * @offset: The bitnumber to start searching at
+ * @size: The bitmap size in bits
+ *
+ * Returns the bit number of the next zero bit
+ * If no bits are zero, returns @size.
+ */
+static inline
+unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size,
+ unsigned long offset)
+{
+ if (small_const_nbits(size)) {
+ unsigned long val;
+
+ if (unlikely(offset >= size))
+ return size;
+
+ val = *addr | ~GENMASK(size - 1, offset);
+ return val == ~0UL ? size : ffz(val);
+ }
+
+ return _find_next_bit(addr, NULL, size, offset, ~0UL, 0);
+}
+#endif
+
+#ifndef find_first_bit
+/**
+ * find_first_bit - find the first set bit in a memory region
+ * @addr: The address to start the search at
+ * @size: The maximum number of bits to search
+ *
+ * Returns the bit number of the first set bit.
+ * If no bits are set, returns @size.
+ */
+static inline
+unsigned long find_first_bit(const unsigned long *addr, unsigned long size)
+{
+ if (small_const_nbits(size)) {
+ unsigned long val = *addr & GENMASK(size - 1, 0);
+
+ return val ? __ffs(val) : size;
+ }
+
+ return _find_first_bit(addr, size);
+}
+#endif
+
+#ifndef find_first_and_bit
+/**
+ * find_first_and_bit - find the first set bit in both memory regions
+ * @addr1: The first address to base the search on
+ * @addr2: The second address to base the search on
+ * @size: The bitmap size in bits
+ *
+ * Returns the bit number for the next set bit
+ * If no bits are set, returns @size.
+ */
+static inline
+unsigned long find_first_and_bit(const unsigned long *addr1,
+ const unsigned long *addr2,
+ unsigned long size)
+{
+ if (small_const_nbits(size)) {
+ unsigned long val = *addr1 & *addr2 & GENMASK(size - 1, 0);
+
+ return val ? __ffs(val) : size;
+ }
+
+ return _find_first_and_bit(addr1, addr2, size);
+}
+#endif
+
+#ifndef find_first_zero_bit
+/**
+ * find_first_zero_bit - find the first cleared bit in a memory region
+ * @addr: The address to start the search at
+ * @size: The maximum number of bits to search
+ *
+ * Returns the bit number of the first cleared bit.
+ * If no bits are zero, returns @size.
+ */
+static inline
+unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size)
+{
+ if (small_const_nbits(size)) {
+ unsigned long val = *addr | ~GENMASK(size - 1, 0);
+
+ return val == ~0UL ? size : ffz(val);
+ }
+
+ return _find_first_zero_bit(addr, size);
+}
+#endif
+
+#ifndef find_last_bit
+/**
+ * find_last_bit - find the last set bit in a memory region
+ * @addr: The address to start the search at
+ * @size: The number of bits to search
+ *
+ * Returns the bit number of the last set bit, or size.
+ */
+static inline
+unsigned long find_last_bit(const unsigned long *addr, unsigned long size)
+{
+ if (small_const_nbits(size)) {
+ unsigned long val = *addr & GENMASK(size - 1, 0);
+
+ return val ? __fls(val) : size;
+ }
+
+ return _find_last_bit(addr, size);
+}
+#endif
+
+/**
+ * find_next_clump8 - find next 8-bit clump with set bits in a memory region
+ * @clump: location to store copy of found clump
+ * @addr: address to base the search on
+ * @size: bitmap size in number of bits
+ * @offset: bit offset at which to start searching
+ *
+ * Returns the bit offset for the next set clump; the found clump value is
+ * copied to the location pointed by @clump. If no bits are set, returns @size.
+ */
+extern unsigned long find_next_clump8(unsigned long *clump,
+ const unsigned long *addr,
+ unsigned long size, unsigned long offset);
+
+#define find_first_clump8(clump, bits, size) \
+ find_next_clump8((clump), (bits), (size), 0)
+
+#if defined(__LITTLE_ENDIAN)
+
+static inline unsigned long find_next_zero_bit_le(const void *addr,
+ unsigned long size, unsigned long offset)
+{
+ return find_next_zero_bit(addr, size, offset);
+}
+
+static inline unsigned long find_next_bit_le(const void *addr,
+ unsigned long size, unsigned long offset)
+{
+ return find_next_bit(addr, size, offset);
+}
+
+static inline unsigned long find_first_zero_bit_le(const void *addr,
+ unsigned long size)
+{
+ return find_first_zero_bit(addr, size);
+}
+
+#elif defined(__BIG_ENDIAN)
+
+#ifndef find_next_zero_bit_le
+static inline
+unsigned long find_next_zero_bit_le(const void *addr, unsigned
+ long size, unsigned long offset)
+{
+ if (small_const_nbits(size)) {
+ unsigned long val = *(const unsigned long *)addr;
+
+ if (unlikely(offset >= size))
+ return size;
+
+ val = swab(val) | ~GENMASK(size - 1, offset);
+ return val == ~0UL ? size : ffz(val);
+ }
+
+ return _find_next_bit(addr, NULL, size, offset, ~0UL, 1);
+}
+#endif
+
+#ifndef find_next_bit_le
+static inline
+unsigned long find_next_bit_le(const void *addr, unsigned
+ long size, unsigned long offset)
+{
+ if (small_const_nbits(size)) {
+ unsigned long val = *(const unsigned long *)addr;
+
+ if (unlikely(offset >= size))
+ return size;
+
+ val = swab(val) & GENMASK(size - 1, offset);
+ return val ? __ffs(val) : size;
+ }
+
+ return _find_next_bit(addr, NULL, size, offset, 0UL, 1);
+}
+#endif
+
+#ifndef find_first_zero_bit_le
+#define find_first_zero_bit_le(addr, size) \
+ find_next_zero_bit_le((addr), (size), 0)
+#endif
+
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+
+#define for_each_set_bit(bit, addr, size) \
+ for ((bit) = find_next_bit((addr), (size), 0); \
+ (bit) < (size); \
+ (bit) = find_next_bit((addr), (size), (bit) + 1))
+
+/* same as for_each_set_bit() but use bit as value to start with */
+#define for_each_set_bit_from(bit, addr, size) \
+ for ((bit) = find_next_bit((addr), (size), (bit)); \
+ (bit) < (size); \
+ (bit) = find_next_bit((addr), (size), (bit) + 1))
+
+#define for_each_clear_bit(bit, addr, size) \
+ for ((bit) = find_next_zero_bit((addr), (size), 0); \
+ (bit) < (size); \
+ (bit) = find_next_zero_bit((addr), (size), (bit) + 1))
+
+/* same as for_each_clear_bit() but use bit as value to start with */
+#define for_each_clear_bit_from(bit, addr, size) \
+ for ((bit) = find_next_zero_bit((addr), (size), (bit)); \
+ (bit) < (size); \
+ (bit) = find_next_zero_bit((addr), (size), (bit) + 1))
+
+/**
+ * for_each_set_bitrange - iterate over all set bit ranges [b; e)
+ * @b: bit offset of start of current bitrange (first set bit)
+ * @e: bit offset of end of current bitrange (first unset bit)
+ * @addr: bitmap address to base the search on
+ * @size: bitmap size in number of bits
+ */
+#define for_each_set_bitrange(b, e, addr, size) \
+ for ((b) = find_next_bit((addr), (size), 0), \
+ (e) = find_next_zero_bit((addr), (size), (b) + 1); \
+ (b) < (size); \
+ (b) = find_next_bit((addr), (size), (e) + 1), \
+ (e) = find_next_zero_bit((addr), (size), (b) + 1))
+
+/**
+ * for_each_set_bitrange_from - iterate over all set bit ranges [b; e)
+ * @b: bit offset of start of current bitrange (first set bit); must be initialized
+ * @e: bit offset of end of current bitrange (first unset bit)
+ * @addr: bitmap address to base the search on
+ * @size: bitmap size in number of bits
+ */
+#define for_each_set_bitrange_from(b, e, addr, size) \
+ for ((b) = find_next_bit((addr), (size), (b)), \
+ (e) = find_next_zero_bit((addr), (size), (b) + 1); \
+ (b) < (size); \
+ (b) = find_next_bit((addr), (size), (e) + 1), \
+ (e) = find_next_zero_bit((addr), (size), (b) + 1))
+
+/**
+ * for_each_clear_bitrange - iterate over all unset bit ranges [b; e)
+ * @b: bit offset of start of current bitrange (first unset bit)
+ * @e: bit offset of end of current bitrange (first set bit)
+ * @addr: bitmap address to base the search on
+ * @size: bitmap size in number of bits
+ */
+#define for_each_clear_bitrange(b, e, addr, size) \
+ for ((b) = find_next_zero_bit((addr), (size), 0), \
+ (e) = find_next_bit((addr), (size), (b) + 1); \
+ (b) < (size); \
+ (b) = find_next_zero_bit((addr), (size), (e) + 1), \
+ (e) = find_next_bit((addr), (size), (b) + 1))
+
+/**
+ * for_each_clear_bitrange_from - iterate over all unset bit ranges [b; e)
+ * @b: bit offset of start of current bitrange (first set bit); must be initialized
+ * @e: bit offset of end of current bitrange (first unset bit)
+ * @addr: bitmap address to base the search on
+ * @size: bitmap size in number of bits
+ */
+#define for_each_clear_bitrange_from(b, e, addr, size) \
+ for ((b) = find_next_zero_bit((addr), (size), (b)), \
+ (e) = find_next_bit((addr), (size), (b) + 1); \
+ (b) < (size); \
+ (b) = find_next_zero_bit((addr), (size), (e) + 1), \
+ (e) = find_next_bit((addr), (size), (b) + 1))
+
+/**
+ * for_each_set_clump8 - iterate over bitmap for each 8-bit clump with set bits
+ * @start: bit offset to start search and to store the current iteration offset
+ * @clump: location to store copy of current 8-bit clump
+ * @bits: bitmap address to base the search on
+ * @size: bitmap size in number of bits
+ */
+#define for_each_set_clump8(start, clump, bits, size) \
+ for ((start) = find_first_clump8(&(clump), (bits), (size)); \
+ (start) < (size); \
+ (start) = find_next_clump8(&(clump), (bits), (size), (start) + 8))
+
+#endif /*__LINUX_FIND_H_ */
diff --git a/include/linux/firmware/cirrus/cs_dsp.h b/include/linux/firmware/cirrus/cs_dsp.h
index 9ad9eaaaa552..38b4da3ddfe4 100644
--- a/include/linux/firmware/cirrus/cs_dsp.h
+++ b/include/linux/firmware/cirrus/cs_dsp.h
@@ -11,6 +11,11 @@
#ifndef __CS_DSP_H
#define __CS_DSP_H
+#include <linux/device.h>
+#include <linux/firmware.h>
+#include <linux/list.h>
+#include <linux/regmap.h>
+
#define CS_ADSP2_REGION_0 BIT(0)
#define CS_ADSP2_REGION_1 BIT(1)
#define CS_ADSP2_REGION_2 BIT(2)
@@ -49,12 +54,14 @@ struct cs_dsp_region {
* struct cs_dsp_alg_region - Describes a logical algorithm region in DSP address space
* @list: List node for internal use
* @alg: Algorithm id
+ * @ver: Expected algorithm version
* @type: Memory region type
* @base: Address of region
*/
struct cs_dsp_alg_region {
struct list_head list;
unsigned int alg;
+ unsigned int ver;
int type;
unsigned int base;
};
@@ -69,8 +76,8 @@ struct cs_dsp_alg_region {
* @enabled: Flag indicating whether control is enabled
* @list: List node for internal use
* @cache: Cached value of the control
- * @offset: Offset of control within alg_region
- * @len: Length of the cached value
+ * @offset: Offset of control within alg_region in words
+ * @len: Length of the cached value in bytes
* @set: Flag indicating the value has been written by the user
* @flags: Bitfield of WMFW_CTL_FLAG_ control flags defined in wmfw.h
* @type: One of the WMFW_CTL_TYPE_ control types defined in wmfw.h
@@ -180,7 +187,8 @@ struct cs_dsp {
* struct cs_dsp_client_ops - client callbacks
* @control_add: Called under the pwr_lock when a control is created
* @control_remove: Called under the pwr_lock when a control is destroyed
- * @post_run: Called under the pwr_lock by cs_dsp_run()
+ * @pre_run: Called under the pwr_lock by cs_dsp_run() before the core is started
+ * @post_run: Called under the pwr_lock by cs_dsp_run() after the core is started
* @post_stop: Called under the pwr_lock by cs_dsp_stop()
* @watchdog_expired: Called when a watchdog expiry is detected
*
@@ -190,6 +198,7 @@ struct cs_dsp {
struct cs_dsp_client_ops {
int (*control_add)(struct cs_dsp_coeff_ctl *ctl);
void (*control_remove)(struct cs_dsp_coeff_ctl *ctl);
+ int (*pre_run)(struct cs_dsp *dsp);
int (*post_run)(struct cs_dsp *dsp);
void (*post_stop)(struct cs_dsp *dsp);
void (*watchdog_expired)(struct cs_dsp *dsp);
@@ -223,8 +232,10 @@ void cs_dsp_init_debugfs(struct cs_dsp *dsp, struct dentry *debugfs_root);
void cs_dsp_cleanup_debugfs(struct cs_dsp *dsp);
int cs_dsp_coeff_write_acked_control(struct cs_dsp_coeff_ctl *ctl, unsigned int event_id);
-int cs_dsp_coeff_write_ctrl(struct cs_dsp_coeff_ctl *ctl, const void *buf, size_t len);
-int cs_dsp_coeff_read_ctrl(struct cs_dsp_coeff_ctl *ctl, void *buf, size_t len);
+int cs_dsp_coeff_write_ctrl(struct cs_dsp_coeff_ctl *ctl, unsigned int off,
+ const void *buf, size_t len);
+int cs_dsp_coeff_read_ctrl(struct cs_dsp_coeff_ctl *ctl, unsigned int off,
+ void *buf, size_t len);
struct cs_dsp_coeff_ctl *cs_dsp_get_ctl(struct cs_dsp *dsp, const char *name, int type,
unsigned int alg);
diff --git a/include/linux/firmware/cirrus/wmfw.h b/include/linux/firmware/cirrus/wmfw.h
index a19bf7c6fc8b..74e5a4f6c13a 100644
--- a/include/linux/firmware/cirrus/wmfw.h
+++ b/include/linux/firmware/cirrus/wmfw.h
@@ -29,6 +29,7 @@
#define WMFW_CTL_TYPE_ACKED 0x1000 /* acked control */
#define WMFW_CTL_TYPE_HOSTEVENT 0x1001 /* event control */
#define WMFW_CTL_TYPE_HOST_BUFFER 0x1002 /* host buffer pointer */
+#define WMFW_CTL_TYPE_FWEVENT 0x1004 /* firmware event control */
struct wmfw_header {
char magic[4];
diff --git a/include/linux/firmware/xlnx-event-manager.h b/include/linux/firmware/xlnx-event-manager.h
new file mode 100644
index 000000000000..3f87c4929d21
--- /dev/null
+++ b/include/linux/firmware/xlnx-event-manager.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _FIRMWARE_XLNX_EVENT_MANAGER_H_
+#define _FIRMWARE_XLNX_EVENT_MANAGER_H_
+
+#include <linux/firmware/xlnx-zynqmp.h>
+
+#define CB_MAX_PAYLOAD_SIZE (4U) /*In payload maximum 32bytes */
+
+/************************** Exported Function *****************************/
+
+typedef void (*event_cb_func_t)(const u32 *payload, void *data);
+
+#if IS_REACHABLE(CONFIG_XLNX_EVENT_MANAGER)
+int xlnx_register_event(const enum pm_api_cb_id cb_type, const u32 node_id,
+ const u32 event, const bool wake,
+ event_cb_func_t cb_fun, void *data);
+
+int xlnx_unregister_event(const enum pm_api_cb_id cb_type, const u32 node_id,
+ const u32 event, event_cb_func_t cb_fun);
+#else
+static inline int xlnx_register_event(const enum pm_api_cb_id cb_type, const u32 node_id,
+ const u32 event, const bool wake,
+ event_cb_func_t cb_fun, void *data)
+{
+ return -ENODEV;
+}
+
+static inline int xlnx_unregister_event(const enum pm_api_cb_id cb_type, const u32 node_id,
+ const u32 event, event_cb_func_t cb_fun)
+{
+ return -ENODEV;
+}
+#endif
+
+#endif /* _FIRMWARE_XLNX_EVENT_MANAGER_H_ */
diff --git a/include/linux/firmware/xlnx-zynqmp.h b/include/linux/firmware/xlnx-zynqmp.h
index 47fd4e52a423..907cb01890cf 100644
--- a/include/linux/firmware/xlnx-zynqmp.h
+++ b/include/linux/firmware/xlnx-zynqmp.h
@@ -2,7 +2,7 @@
/*
* Xilinx Zynq MPSoC Firmware layer
*
- * Copyright (C) 2014-2019 Xilinx
+ * Copyright (C) 2014-2021 Xilinx
*
* Michal Simek <michal.simek@xilinx.com>
* Davorin Mista <davorin.mista@aggios.com>
@@ -64,8 +64,23 @@
#define XILINX_ZYNQMP_PM_FPGA_FULL 0x0U
#define XILINX_ZYNQMP_PM_FPGA_PARTIAL BIT(0)
+/*
+ * Node IDs for the Error Events.
+ */
+#define EVENT_ERROR_PMC_ERR1 (0x28100000U)
+#define EVENT_ERROR_PMC_ERR2 (0x28104000U)
+#define EVENT_ERROR_PSM_ERR1 (0x28108000U)
+#define EVENT_ERROR_PSM_ERR2 (0x2810C000U)
+
+enum pm_api_cb_id {
+ PM_INIT_SUSPEND_CB = 30,
+ PM_ACKNOWLEDGE_CB = 31,
+ PM_NOTIFY_CB = 32,
+};
+
enum pm_api_id {
PM_GET_API_VERSION = 1,
+ PM_REGISTER_NOTIFIER = 5,
PM_SYSTEM_SHUTDOWN = 12,
PM_REQUEST_NODE = 13,
PM_RELEASE_NODE = 14,
@@ -126,6 +141,8 @@ enum pm_ioctl_id {
/* Set healthy bit value */
IOCTL_SET_BOOT_HEALTH_STATUS = 17,
IOCTL_OSPI_MUX_SELECT = 21,
+ /* Register SGI to ATF */
+ IOCTL_REGISTER_SGI = 25,
};
enum pm_query_id {
@@ -427,6 +444,9 @@ int zynqmp_pm_pinctrl_get_config(const u32 pin, const u32 param,
int zynqmp_pm_pinctrl_set_config(const u32 pin, const u32 param,
u32 value);
int zynqmp_pm_load_pdi(const u32 src, const u64 address);
+int zynqmp_pm_register_notifier(const u32 node, const u32 event,
+ const u32 wake, const u32 enable);
+int zynqmp_pm_feature(const u32 api_id);
#else
static inline int zynqmp_pm_get_api_version(u32 *version)
{
@@ -658,6 +678,17 @@ static inline int zynqmp_pm_load_pdi(const u32 src, const u64 address)
{
return -ENODEV;
}
+
+static inline int zynqmp_pm_register_notifier(const u32 node, const u32 event,
+ const u32 wake, const u32 enable)
+{
+ return -ENODEV;
+}
+
+static inline int zynqmp_pm_feature(const u32 api_id)
+{
+ return -ENODEV;
+}
#endif
#endif /* __FIRMWARE_ZYNQMP_H__ */
diff --git a/include/linux/fpga/fpga-bridge.h b/include/linux/fpga/fpga-bridge.h
index 6c3c28806ff1..223da48a6d18 100644
--- a/include/linux/fpga/fpga-bridge.h
+++ b/include/linux/fpga/fpga-bridge.h
@@ -23,6 +23,23 @@ struct fpga_bridge_ops {
};
/**
+ * struct fpga_bridge_info - collection of parameters an FPGA Bridge
+ * @name: fpga bridge name
+ * @br_ops: pointer to structure of fpga bridge ops
+ * @priv: fpga bridge private data
+ *
+ * fpga_bridge_info contains parameters for the register function. These
+ * are separated into an info structure because they some are optional
+ * others could be added to in the future. The info structure facilitates
+ * maintaining a stable API.
+ */
+struct fpga_bridge_info {
+ const char *name;
+ const struct fpga_bridge_ops *br_ops;
+ void *priv;
+};
+
+/**
* struct fpga_bridge - FPGA bridge structure
* @name: name of low level FPGA bridge
* @dev: FPGA bridge device
@@ -62,15 +79,10 @@ int of_fpga_bridge_get_to_list(struct device_node *np,
struct fpga_image_info *info,
struct list_head *bridge_list);
-struct fpga_bridge *fpga_bridge_create(struct device *dev, const char *name,
- const struct fpga_bridge_ops *br_ops,
- void *priv);
-void fpga_bridge_free(struct fpga_bridge *br);
-int fpga_bridge_register(struct fpga_bridge *br);
+struct fpga_bridge *
+fpga_bridge_register(struct device *parent, const char *name,
+ const struct fpga_bridge_ops *br_ops,
+ void *priv);
void fpga_bridge_unregister(struct fpga_bridge *br);
-struct fpga_bridge
-*devm_fpga_bridge_create(struct device *dev, const char *name,
- const struct fpga_bridge_ops *br_ops, void *priv);
-
#endif /* _LINUX_FPGA_BRIDGE_H */
diff --git a/include/linux/fpga/fpga-mgr.h b/include/linux/fpga/fpga-mgr.h
index 474c1f506307..0f9468771bb9 100644
--- a/include/linux/fpga/fpga-mgr.h
+++ b/include/linux/fpga/fpga-mgr.h
@@ -106,6 +106,36 @@ struct fpga_image_info {
};
/**
+ * struct fpga_compat_id - id for compatibility check
+ *
+ * @id_h: high 64bit of the compat_id
+ * @id_l: low 64bit of the compat_id
+ */
+struct fpga_compat_id {
+ u64 id_h;
+ u64 id_l;
+};
+
+/**
+ * struct fpga_manager_info - collection of parameters for an FPGA Manager
+ * @name: fpga manager name
+ * @compat_id: FPGA manager id for compatibility check.
+ * @mops: pointer to structure of fpga manager ops
+ * @priv: fpga manager private data
+ *
+ * fpga_manager_info contains parameters for the register_full function.
+ * These are separated into an info structure because they some are optional
+ * others could be added to in the future. The info structure facilitates
+ * maintaining a stable API.
+ */
+struct fpga_manager_info {
+ const char *name;
+ struct fpga_compat_id *compat_id;
+ const struct fpga_manager_ops *mops;
+ void *priv;
+};
+
+/**
* struct fpga_manager_ops - ops for low level fpga manager drivers
* @initial_header_size: Maximum number of bytes that should be passed into write_init
* @state: returns an enum value of the FPGA's state
@@ -144,17 +174,6 @@ struct fpga_manager_ops {
#define FPGA_MGR_STATUS_FIFO_OVERFLOW_ERR BIT(4)
/**
- * struct fpga_compat_id - id for compatibility check
- *
- * @id_h: high 64bit of the compat_id
- * @id_l: low 64bit of the compat_id
- */
-struct fpga_compat_id {
- u64 id_h;
- u64 id_l;
-};
-
-/**
* struct fpga_manager - fpga manager structure
* @name: name of low level fpga manager
* @dev: fpga manager device
@@ -191,17 +210,18 @@ struct fpga_manager *fpga_mgr_get(struct device *dev);
void fpga_mgr_put(struct fpga_manager *mgr);
-struct fpga_manager *fpga_mgr_create(struct device *dev, const char *name,
- const struct fpga_manager_ops *mops,
- void *priv);
-void fpga_mgr_free(struct fpga_manager *mgr);
-int fpga_mgr_register(struct fpga_manager *mgr);
-void fpga_mgr_unregister(struct fpga_manager *mgr);
+struct fpga_manager *
+fpga_mgr_register_full(struct device *parent, const struct fpga_manager_info *info);
-int devm_fpga_mgr_register(struct device *dev, struct fpga_manager *mgr);
+struct fpga_manager *
+fpga_mgr_register(struct device *parent, const char *name,
+ const struct fpga_manager_ops *mops, void *priv);
+void fpga_mgr_unregister(struct fpga_manager *mgr);
-struct fpga_manager *devm_fpga_mgr_create(struct device *dev, const char *name,
- const struct fpga_manager_ops *mops,
- void *priv);
+struct fpga_manager *
+devm_fpga_mgr_register_full(struct device *parent, const struct fpga_manager_info *info);
+struct fpga_manager *
+devm_fpga_mgr_register(struct device *parent, const char *name,
+ const struct fpga_manager_ops *mops, void *priv);
#endif /*_LINUX_FPGA_MGR_H */
diff --git a/include/linux/fpga/fpga-region.h b/include/linux/fpga/fpga-region.h
index 27cb706275db..3b87f232425c 100644
--- a/include/linux/fpga/fpga-region.h
+++ b/include/linux/fpga/fpga-region.h
@@ -7,6 +7,27 @@
#include <linux/fpga/fpga-mgr.h>
#include <linux/fpga/fpga-bridge.h>
+struct fpga_region;
+
+/**
+ * struct fpga_region_info - collection of parameters an FPGA Region
+ * @mgr: fpga region manager
+ * @compat_id: FPGA region id for compatibility check.
+ * @priv: fpga region private data
+ * @get_bridges: optional function to get bridges to a list
+ *
+ * fpga_region_info contains parameters for the register_full function.
+ * These are separated into an info structure because they some are optional
+ * others could be added to in the future. The info structure facilitates
+ * maintaining a stable API.
+ */
+struct fpga_region_info {
+ struct fpga_manager *mgr;
+ struct fpga_compat_id *compat_id;
+ void *priv;
+ int (*get_bridges)(struct fpga_region *region);
+};
+
/**
* struct fpga_region - FPGA Region structure
* @dev: FPGA Region device
@@ -37,15 +58,12 @@ struct fpga_region *fpga_region_class_find(
int fpga_region_program_fpga(struct fpga_region *region);
-struct fpga_region
-*fpga_region_create(struct device *dev, struct fpga_manager *mgr,
- int (*get_bridges)(struct fpga_region *));
-void fpga_region_free(struct fpga_region *region);
-int fpga_region_register(struct fpga_region *region);
-void fpga_region_unregister(struct fpga_region *region);
+struct fpga_region *
+fpga_region_register_full(struct device *parent, const struct fpga_region_info *info);
-struct fpga_region
-*devm_fpga_region_create(struct device *dev, struct fpga_manager *mgr,
- int (*get_bridges)(struct fpga_region *));
+struct fpga_region *
+fpga_region_register(struct device *parent, struct fpga_manager *mgr,
+ int (*get_bridges)(struct fpga_region *));
+void fpga_region_unregister(struct fpga_region *region);
#endif /* _FPGA_REGION_H */
diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h
index b07d88c92bb2..a631bac12220 100644
--- a/include/linux/frontswap.h
+++ b/include/linux/frontswap.h
@@ -7,31 +7,17 @@
#include <linux/bitops.h>
#include <linux/jump_label.h>
-/*
- * Return code to denote that requested number of
- * frontswap pages are unused(moved to page cache).
- * Used in shmem_unuse and try_to_unuse.
- */
-#define FRONTSWAP_PAGES_UNUSED 2
-
struct frontswap_ops {
void (*init)(unsigned); /* this swap type was just swapon'ed */
int (*store)(unsigned, pgoff_t, struct page *); /* store a page */
int (*load)(unsigned, pgoff_t, struct page *); /* load a page */
void (*invalidate_page)(unsigned, pgoff_t); /* page no longer needed */
void (*invalidate_area)(unsigned); /* swap type just swapoff'ed */
- struct frontswap_ops *next; /* private pointer to next ops */
};
-extern void frontswap_register_ops(struct frontswap_ops *ops);
-extern void frontswap_shrink(unsigned long);
-extern unsigned long frontswap_curr_pages(void);
-extern void frontswap_writethrough(bool);
-#define FRONTSWAP_HAS_EXCLUSIVE_GETS
-extern void frontswap_tmem_exclusive_gets(bool);
+int frontswap_register_ops(const struct frontswap_ops *ops);
-extern bool __frontswap_test(struct swap_info_struct *, pgoff_t);
-extern void __frontswap_init(unsigned type, unsigned long *map);
+extern void frontswap_init(unsigned type, unsigned long *map);
extern int __frontswap_store(struct page *page);
extern int __frontswap_load(struct page *page);
extern void __frontswap_invalidate_page(unsigned, pgoff_t);
@@ -45,11 +31,6 @@ static inline bool frontswap_enabled(void)
return static_branch_unlikely(&frontswap_enabled_key);
}
-static inline bool frontswap_test(struct swap_info_struct *sis, pgoff_t offset)
-{
- return __frontswap_test(sis, offset);
-}
-
static inline void frontswap_map_set(struct swap_info_struct *p,
unsigned long *map)
{
@@ -68,11 +49,6 @@ static inline bool frontswap_enabled(void)
return false;
}
-static inline bool frontswap_test(struct swap_info_struct *sis, pgoff_t offset)
-{
- return false;
-}
-
static inline void frontswap_map_set(struct swap_info_struct *p,
unsigned long *map)
{
@@ -112,11 +88,4 @@ static inline void frontswap_invalidate_area(unsigned type)
__frontswap_invalidate_area(type);
}
-static inline void frontswap_init(unsigned type, unsigned long *map)
-{
-#ifdef CONFIG_FRONTSWAP
- __frontswap_init(type, map);
-#endif
-}
-
#endif /* _LINUX_FRONTSWAP_H */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 493b87e3616b..e2d892b201b0 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -79,15 +79,8 @@ extern void __init inode_init_early(void);
extern void __init files_init(void);
extern void __init files_maxfiles_init(void);
-extern struct files_stat_struct files_stat;
extern unsigned long get_max_files(void);
extern unsigned int sysctl_nr_open;
-extern struct inodes_stat_t inodes_stat;
-extern int leases_enable, lease_break_time;
-extern int sysctl_protected_symlinks;
-extern int sysctl_protected_hardlinks;
-extern int sysctl_protected_fifos;
-extern int sysctl_protected_regular;
typedef __kernel_rwf_t rwf_t;
@@ -1221,13 +1214,13 @@ static inline int fcntl_setlk(unsigned int fd, struct file *file,
#if BITS_PER_LONG == 32
static inline int fcntl_getlk64(struct file *file, unsigned int cmd,
- struct flock64 __user *user)
+ struct flock64 *user)
{
return -EINVAL;
}
static inline int fcntl_setlk64(unsigned int fd, struct file *file,
- unsigned int cmd, struct flock64 __user *user)
+ unsigned int cmd, struct flock64 *user)
{
return -EACCES;
}
@@ -1490,7 +1483,7 @@ struct super_block {
#ifdef CONFIG_FS_VERITY
const struct fsverity_operations *s_vop;
#endif
-#ifdef CONFIG_UNICODE
+#if IS_ENABLED(CONFIG_UNICODE)
struct unicode_map *s_encoding;
__u16 s_encoding_flags;
#endif
@@ -1542,11 +1535,6 @@ struct super_block {
const struct dentry_operations *s_d_op; /* default d_op for dentries */
- /*
- * Saved pool identifier for cleancache (-1 means none)
- */
- int cleancache_poolid;
-
struct shrinker s_shrink; /* per-sb shrinker handle */
/* Number of inodes with nlink == 0 but still referenced */
@@ -2173,6 +2161,7 @@ struct super_operations {
#define S_ENCRYPTED (1 << 14) /* Encrypted file (using fs/crypto/) */
#define S_CASEFOLD (1 << 15) /* Casefolded file */
#define S_VERITY (1 << 16) /* Verity file (using fs/verity/) */
+#define S_KERNEL_FILE (1 << 17) /* File is in use by the kernel (eg. fs/cachefiles) */
/*
* Note that nosuid etc flags are inode-specific: setting some file-system
@@ -2342,6 +2331,8 @@ static inline void kiocb_clone(struct kiocb *kiocb, struct kiocb *kiocb_src,
* Used to detect that mark_inode_dirty() should not move
* inode between dirty lists.
*
+ * I_PINNING_FSCACHE_WB Inode is pinning an fscache object for writeback.
+ *
* Q: What is the difference between I_WILL_FREE and I_FREEING?
*/
#define I_DIRTY_SYNC (1 << 0)
@@ -2364,6 +2355,7 @@ static inline void kiocb_clone(struct kiocb *kiocb, struct kiocb *kiocb_src,
#define I_CREATING (1 << 15)
#define I_DONTCACHE (1 << 16)
#define I_SYNC_QUEUED (1 << 17)
+#define I_PINNING_FSCACHE_WB (1 << 18)
#define I_DIRTY_INODE (I_DIRTY_SYNC | I_DIRTY_DATASYNC)
#define I_DIRTY (I_DIRTY_INODE | I_DIRTY_PAGES)
@@ -2786,8 +2778,6 @@ static inline int filemap_fdatawait(struct address_space *mapping)
extern bool filemap_range_has_page(struct address_space *, loff_t lstart,
loff_t lend);
-extern bool filemap_range_needs_writeback(struct address_space *,
- loff_t lstart, loff_t lend);
extern int filemap_write_and_wait_range(struct address_space *mapping,
loff_t lstart, loff_t lend);
extern int __filemap_fdatawrite_range(struct address_space *mapping,
@@ -3091,6 +3081,7 @@ extern void unlock_new_inode(struct inode *);
extern void discard_new_inode(struct inode *);
extern unsigned int get_next_ino(void);
extern void evict_inodes(struct super_block *sb);
+void dump_mapping(const struct address_space *);
/*
* Userspace may rely on the the inode number being non-zero. For example, glibc
@@ -3530,12 +3521,6 @@ ssize_t simple_attr_write(struct file *file, const char __user *buf,
size_t len, loff_t *ppos);
struct ctl_table;
-int proc_nr_files(struct ctl_table *table, int write,
- void *buffer, size_t *lenp, loff_t *ppos);
-int proc_nr_dentry(struct ctl_table *table, int write,
- void *buffer, size_t *lenp, loff_t *ppos);
-int proc_nr_inodes(struct ctl_table *table, int write,
- void *buffer, size_t *lenp, loff_t *ppos);
int __init list_bdev_fs_names(char *buf, size_t size);
#define __FMODE_EXEC ((__force int) FMODE_EXEC)
diff --git a/include/linux/fs_context.h b/include/linux/fs_context.h
index 6b54982fc5f3..13fa6f3df8e4 100644
--- a/include/linux/fs_context.h
+++ b/include/linux/fs_context.h
@@ -142,6 +142,8 @@ extern void put_fs_context(struct fs_context *fc);
extern int vfs_parse_fs_param_source(struct fs_context *fc,
struct fs_parameter *param);
extern void fc_drop_locked(struct fs_context *fc);
+int reconfigure_single(struct super_block *s,
+ int flags, void *data);
/*
* sget() wrappers to be called from the ->get_tree() op.
diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
index 8d39491c5f9f..a174cedf4d90 100644
--- a/include/linux/fscache-cache.h
+++ b/include/linux/fscache-cache.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/* General filesystem caching backing cache interface
*
- * Copyright (C) 2004-2007 Red Hat, Inc. All Rights Reserved.
+ * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* NOTE!!! See:
@@ -15,207 +15,34 @@
#define _LINUX_FSCACHE_CACHE_H
#include <linux/fscache.h>
-#include <linux/sched.h>
-#include <linux/workqueue.h>
-#define NR_MAXCACHES BITS_PER_LONG
-
-struct fscache_cache;
-struct fscache_cache_ops;
-struct fscache_object;
-struct fscache_operation;
-
-enum fscache_obj_ref_trace {
- fscache_obj_get_add_to_deps,
- fscache_obj_get_queue,
- fscache_obj_put_alloc_fail,
- fscache_obj_put_attach_fail,
- fscache_obj_put_drop_obj,
- fscache_obj_put_enq_dep,
- fscache_obj_put_queue,
- fscache_obj_put_work,
- fscache_obj_ref__nr_traces
+enum fscache_cache_trace;
+enum fscache_cookie_trace;
+enum fscache_access_trace;
+
+enum fscache_cache_state {
+ FSCACHE_CACHE_IS_NOT_PRESENT, /* No cache is present for this name */
+ FSCACHE_CACHE_IS_PREPARING, /* A cache is preparing to come live */
+ FSCACHE_CACHE_IS_ACTIVE, /* Attached cache is active and can be used */
+ FSCACHE_CACHE_GOT_IOERROR, /* Attached cache stopped on I/O error */
+ FSCACHE_CACHE_IS_WITHDRAWN, /* Attached cache is being withdrawn */
+#define NR__FSCACHE_CACHE_STATE (FSCACHE_CACHE_IS_WITHDRAWN + 1)
};
/*
- * cache tag definition
- */
-struct fscache_cache_tag {
- struct list_head link;
- struct fscache_cache *cache; /* cache referred to by this tag */
- unsigned long flags;
-#define FSCACHE_TAG_RESERVED 0 /* T if tag is reserved for a cache */
- atomic_t usage;
- char name[]; /* tag name */
-};
-
-/*
- * cache definition
+ * Cache cookie.
*/
struct fscache_cache {
const struct fscache_cache_ops *ops;
- struct fscache_cache_tag *tag; /* tag representing this cache */
- struct kobject *kobj; /* system representation of this cache */
- struct list_head link; /* link in list of caches */
- size_t max_index_size; /* maximum size of index data */
- char identifier[36]; /* cache label */
-
- /* node management */
- struct work_struct op_gc; /* operation garbage collector */
- struct list_head object_list; /* list of data/index objects */
- struct list_head op_gc_list; /* list of ops to be deleted */
- spinlock_t object_list_lock;
- spinlock_t op_gc_list_lock;
+ struct list_head cache_link; /* Link in cache list */
+ void *cache_priv; /* Private cache data (or NULL) */
+ refcount_t ref;
+ atomic_t n_volumes; /* Number of active volumes; */
+ atomic_t n_accesses; /* Number of in-progress accesses on the cache */
atomic_t object_count; /* no. of live objects in this cache */
- struct fscache_object *fsdef; /* object for the fsdef index */
- unsigned long flags;
-#define FSCACHE_IOERROR 0 /* cache stopped on I/O error */
-#define FSCACHE_CACHE_WITHDRAWN 1 /* cache has been withdrawn */
-};
-
-extern wait_queue_head_t fscache_cache_cleared_wq;
-
-/*
- * operation to be applied to a cache object
- * - retrieval initiation operations are done in the context of the process
- * that issued them, and not in an async thread pool
- */
-typedef void (*fscache_operation_release_t)(struct fscache_operation *op);
-typedef void (*fscache_operation_processor_t)(struct fscache_operation *op);
-typedef void (*fscache_operation_cancel_t)(struct fscache_operation *op);
-
-enum fscache_operation_state {
- FSCACHE_OP_ST_BLANK, /* Op is not yet submitted */
- FSCACHE_OP_ST_INITIALISED, /* Op is initialised */
- FSCACHE_OP_ST_PENDING, /* Op is blocked from running */
- FSCACHE_OP_ST_IN_PROGRESS, /* Op is in progress */
- FSCACHE_OP_ST_COMPLETE, /* Op is complete */
- FSCACHE_OP_ST_CANCELLED, /* Op has been cancelled */
- FSCACHE_OP_ST_DEAD /* Op is now dead */
-};
-
-struct fscache_operation {
- struct work_struct work; /* record for async ops */
- struct list_head pend_link; /* link in object->pending_ops */
- struct fscache_object *object; /* object to be operated upon */
-
- unsigned long flags;
-#define FSCACHE_OP_TYPE 0x000f /* operation type */
-#define FSCACHE_OP_ASYNC 0x0001 /* - async op, processor may sleep for disk */
-#define FSCACHE_OP_MYTHREAD 0x0002 /* - processing is done be issuing thread, not pool */
-#define FSCACHE_OP_WAITING 4 /* cleared when op is woken */
-#define FSCACHE_OP_EXCLUSIVE 5 /* exclusive op, other ops must wait */
-#define FSCACHE_OP_DEC_READ_CNT 6 /* decrement object->n_reads on destruction */
-#define FSCACHE_OP_UNUSE_COOKIE 7 /* call fscache_unuse_cookie() on completion */
-#define FSCACHE_OP_KEEP_FLAGS 0x00f0 /* flags to keep when repurposing an op */
-
- enum fscache_operation_state state;
- atomic_t usage;
- unsigned debug_id; /* debugging ID */
-
- /* operation processor callback
- * - can be NULL if FSCACHE_OP_WAITING is going to be used to perform
- * the op in a non-pool thread */
- fscache_operation_processor_t processor;
-
- /* Operation cancellation cleanup (optional) */
- fscache_operation_cancel_t cancel;
-
- /* operation releaser */
- fscache_operation_release_t release;
-};
-
-extern atomic_t fscache_op_debug_id;
-extern void fscache_op_work_func(struct work_struct *work);
-
-extern void fscache_enqueue_operation(struct fscache_operation *);
-extern void fscache_op_complete(struct fscache_operation *, bool);
-extern void fscache_put_operation(struct fscache_operation *);
-extern void fscache_operation_init(struct fscache_cookie *,
- struct fscache_operation *,
- fscache_operation_processor_t,
- fscache_operation_cancel_t,
- fscache_operation_release_t);
-
-/*
- * data read operation
- */
-struct fscache_retrieval {
- struct fscache_operation op;
- struct fscache_cookie *cookie; /* The netfs cookie */
- struct address_space *mapping; /* netfs pages */
- fscache_rw_complete_t end_io_func; /* function to call on I/O completion */
- void *context; /* netfs read context (pinned) */
- struct list_head to_do; /* list of things to be done by the backend */
- atomic_t n_pages; /* number of pages to be retrieved */
-};
-
-typedef int (*fscache_page_retrieval_func_t)(struct fscache_retrieval *op,
- struct page *page,
- gfp_t gfp);
-
-typedef int (*fscache_pages_retrieval_func_t)(struct fscache_retrieval *op,
- struct list_head *pages,
- unsigned *nr_pages,
- gfp_t gfp);
-
-/**
- * fscache_get_retrieval - Get an extra reference on a retrieval operation
- * @op: The retrieval operation to get a reference on
- *
- * Get an extra reference on a retrieval operation.
- */
-static inline
-struct fscache_retrieval *fscache_get_retrieval(struct fscache_retrieval *op)
-{
- atomic_inc(&op->op.usage);
- return op;
-}
-
-/**
- * fscache_enqueue_retrieval - Enqueue a retrieval operation for processing
- * @op: The retrieval operation affected
- *
- * Enqueue a retrieval operation for processing by the FS-Cache thread pool.
- */
-static inline void fscache_enqueue_retrieval(struct fscache_retrieval *op)
-{
- fscache_enqueue_operation(&op->op);
-}
-
-/**
- * fscache_retrieval_complete - Record (partial) completion of a retrieval
- * @op: The retrieval operation affected
- * @n_pages: The number of pages to account for
- */
-static inline void fscache_retrieval_complete(struct fscache_retrieval *op,
- int n_pages)
-{
- if (atomic_sub_return_relaxed(n_pages, &op->n_pages) <= 0)
- fscache_op_complete(&op->op, false);
-}
-
-/**
- * fscache_put_retrieval - Drop a reference to a retrieval operation
- * @op: The retrieval operation affected
- *
- * Drop a reference to a retrieval operation.
- */
-static inline void fscache_put_retrieval(struct fscache_retrieval *op)
-{
- fscache_put_operation(&op->op);
-}
-
-/*
- * cached page storage work item
- * - used to do three things:
- * - batch writes to the cache
- * - do cache writes asynchronously
- * - defer writes until cache object lookup completion
- */
-struct fscache_storage {
- struct fscache_operation op;
- pgoff_t store_limit; /* don't write more than this */
+ unsigned int debug_id;
+ enum fscache_cache_state state;
+ char *name;
};
/*
@@ -225,341 +52,154 @@ struct fscache_cache_ops {
/* name of cache provider */
const char *name;
- /* allocate an object record for a cookie */
- struct fscache_object *(*alloc_object)(struct fscache_cache *cache,
- struct fscache_cookie *cookie);
-
- /* look up the object for a cookie
- * - return -ETIMEDOUT to be requeued
- */
- int (*lookup_object)(struct fscache_object *object);
-
- /* finished looking up */
- void (*lookup_complete)(struct fscache_object *object);
-
- /* increment the usage count on this object (may fail if unmounting) */
- struct fscache_object *(*grab_object)(struct fscache_object *object,
- enum fscache_obj_ref_trace why);
+ /* Acquire a volume */
+ void (*acquire_volume)(struct fscache_volume *volume);
- /* pin an object in the cache */
- int (*pin_object)(struct fscache_object *object);
+ /* Free the cache's data attached to a volume */
+ void (*free_volume)(struct fscache_volume *volume);
- /* unpin an object in the cache */
- void (*unpin_object)(struct fscache_object *object);
+ /* Look up a cookie in the cache */
+ bool (*lookup_cookie)(struct fscache_cookie *cookie);
- /* check the consistency between the backing cache and the FS-Cache
- * cookie */
- int (*check_consistency)(struct fscache_operation *op);
+ /* Withdraw an object without any cookie access counts held */
+ void (*withdraw_cookie)(struct fscache_cookie *cookie);
- /* store the updated auxiliary data on an object */
- void (*update_object)(struct fscache_object *object);
+ /* Change the size of a data object */
+ void (*resize_cookie)(struct netfs_cache_resources *cres,
+ loff_t new_size);
/* Invalidate an object */
- void (*invalidate_object)(struct fscache_operation *op);
-
- /* discard the resources pinned by an object and effect retirement if
- * necessary */
- void (*drop_object)(struct fscache_object *object);
-
- /* dispose of a reference to an object */
- void (*put_object)(struct fscache_object *object,
- enum fscache_obj_ref_trace why);
-
- /* sync a cache */
- void (*sync_cache)(struct fscache_cache *cache);
-
- /* notification that the attributes of a non-index object (such as
- * i_size) have changed */
- int (*attr_changed)(struct fscache_object *object);
-
- /* reserve space for an object's data and associated metadata */
- int (*reserve_space)(struct fscache_object *object, loff_t i_size);
-
- /* request a backing block for a page be read or allocated in the
- * cache */
- fscache_page_retrieval_func_t read_or_alloc_page;
-
- /* request backing blocks for a list of pages be read or allocated in
- * the cache */
- fscache_pages_retrieval_func_t read_or_alloc_pages;
-
- /* request a backing block for a page be allocated in the cache so that
- * it can be written directly */
- fscache_page_retrieval_func_t allocate_page;
-
- /* request backing blocks for pages be allocated in the cache so that
- * they can be written directly */
- fscache_pages_retrieval_func_t allocate_pages;
-
- /* write a page to its backing block in the cache */
- int (*write_page)(struct fscache_storage *op, struct page *page);
-
- /* detach backing block from a page (optional)
- * - must release the cookie lock before returning
- * - may sleep
- */
- void (*uncache_page)(struct fscache_object *object,
- struct page *page);
-
- /* dissociate a cache from all the pages it was backing */
- void (*dissociate_pages)(struct fscache_cache *cache);
+ bool (*invalidate_cookie)(struct fscache_cookie *cookie);
- /* Begin a read operation for the netfs lib */
- int (*begin_read_operation)(struct netfs_read_request *rreq,
- struct fscache_retrieval *op);
-};
-
-extern struct fscache_cookie fscache_fsdef_index;
+ /* Begin an operation for the netfs lib */
+ bool (*begin_operation)(struct netfs_cache_resources *cres,
+ enum fscache_want_state want_state);
-/*
- * Event list for fscache_object::{event_mask,events}
- */
-enum {
- FSCACHE_OBJECT_EV_NEW_CHILD, /* T if object has a new child */
- FSCACHE_OBJECT_EV_PARENT_READY, /* T if object's parent is ready */
- FSCACHE_OBJECT_EV_UPDATE, /* T if object should be updated */
- FSCACHE_OBJECT_EV_INVALIDATE, /* T if cache requested object invalidation */
- FSCACHE_OBJECT_EV_CLEARED, /* T if accessors all gone */
- FSCACHE_OBJECT_EV_ERROR, /* T if fatal error occurred during processing */
- FSCACHE_OBJECT_EV_KILL, /* T if netfs relinquished or cache withdrew object */
- NR_FSCACHE_OBJECT_EVENTS
-};
-
-#define FSCACHE_OBJECT_EVENTS_MASK ((1UL << NR_FSCACHE_OBJECT_EVENTS) - 1)
-
-/*
- * States for object state machine.
- */
-struct fscache_transition {
- unsigned long events;
- const struct fscache_state *transit_to;
+ /* Prepare to write to a live cache object */
+ void (*prepare_to_write)(struct fscache_cookie *cookie);
};
-struct fscache_state {
- char name[24];
- char short_name[8];
- const struct fscache_state *(*work)(struct fscache_object *object,
- int event);
- const struct fscache_transition transitions[];
-};
+extern struct workqueue_struct *fscache_wq;
+extern wait_queue_head_t fscache_clearance_waiters;
/*
- * on-disk cache file or index handle
+ * out-of-line cache backend functions
*/
-struct fscache_object {
- const struct fscache_state *state; /* Object state machine state */
- const struct fscache_transition *oob_table; /* OOB state transition table */
- int debug_id; /* debugging ID */
- int n_children; /* number of child objects */
- int n_ops; /* number of extant ops on object */
- int n_obj_ops; /* number of object ops outstanding on object */
- int n_in_progress; /* number of ops in progress */
- int n_exclusive; /* number of exclusive ops queued or in progress */
- atomic_t n_reads; /* number of read ops in progress */
- spinlock_t lock; /* state and operations lock */
-
- unsigned long lookup_jif; /* time at which lookup started */
- unsigned long oob_event_mask; /* OOB events this object is interested in */
- unsigned long event_mask; /* events this object is interested in */
- unsigned long events; /* events to be processed by this object
- * (order is important - using fls) */
-
- unsigned long flags;
-#define FSCACHE_OBJECT_LOCK 0 /* T if object is busy being processed */
-#define FSCACHE_OBJECT_PENDING_WRITE 1 /* T if object has pending write */
-#define FSCACHE_OBJECT_WAITING 2 /* T if object is waiting on its parent */
-#define FSCACHE_OBJECT_IS_LIVE 3 /* T if object is not withdrawn or relinquished */
-#define FSCACHE_OBJECT_IS_LOOKED_UP 4 /* T if object has been looked up */
-#define FSCACHE_OBJECT_IS_AVAILABLE 5 /* T if object has become active */
-#define FSCACHE_OBJECT_RETIRED 6 /* T if object was retired on relinquishment */
-#define FSCACHE_OBJECT_KILLED_BY_CACHE 7 /* T if object was killed by the cache */
-#define FSCACHE_OBJECT_RUN_AFTER_DEAD 8 /* T if object has been dispatched after death */
-
- struct list_head cache_link; /* link in cache->object_list */
- struct hlist_node cookie_link; /* link in cookie->backing_objects */
- struct fscache_cache *cache; /* cache that supplied this object */
- struct fscache_cookie *cookie; /* netfs's file/index object */
- struct fscache_object *parent; /* parent object */
- struct work_struct work; /* attention scheduling record */
- struct list_head dependents; /* FIFO of dependent objects */
- struct list_head dep_link; /* link in parent's dependents list */
- struct list_head pending_ops; /* unstarted operations on this object */
- pgoff_t store_limit; /* current storage limit */
- loff_t store_limit_l; /* current storage limit */
-};
-
-extern void fscache_object_init(struct fscache_object *, struct fscache_cookie *,
- struct fscache_cache *);
-extern void fscache_object_destroy(struct fscache_object *);
-
-extern void fscache_object_lookup_negative(struct fscache_object *object);
-extern void fscache_obtained_object(struct fscache_object *object);
-
-static inline bool fscache_object_is_live(struct fscache_object *object)
-{
- return test_bit(FSCACHE_OBJECT_IS_LIVE, &object->flags);
-}
-
-static inline bool fscache_object_is_dying(struct fscache_object *object)
-{
- return !fscache_object_is_live(object);
-}
-
-static inline bool fscache_object_is_available(struct fscache_object *object)
-{
- return test_bit(FSCACHE_OBJECT_IS_AVAILABLE, &object->flags);
-}
+extern struct rw_semaphore fscache_addremove_sem;
+extern struct fscache_cache *fscache_acquire_cache(const char *name);
+extern void fscache_relinquish_cache(struct fscache_cache *cache);
+extern int fscache_add_cache(struct fscache_cache *cache,
+ const struct fscache_cache_ops *ops,
+ void *cache_priv);
+extern void fscache_withdraw_cache(struct fscache_cache *cache);
+extern void fscache_withdraw_volume(struct fscache_volume *volume);
+extern void fscache_withdraw_cookie(struct fscache_cookie *cookie);
-static inline bool fscache_cache_is_broken(struct fscache_object *object)
-{
- return test_bit(FSCACHE_IOERROR, &object->cache->flags);
-}
+extern void fscache_io_error(struct fscache_cache *cache);
-static inline bool fscache_object_is_active(struct fscache_object *object)
-{
- return fscache_object_is_available(object) &&
- fscache_object_is_live(object) &&
- !fscache_cache_is_broken(object);
-}
+extern void fscache_end_volume_access(struct fscache_volume *volume,
+ struct fscache_cookie *cookie,
+ enum fscache_access_trace why);
+
+extern struct fscache_cookie *fscache_get_cookie(struct fscache_cookie *cookie,
+ enum fscache_cookie_trace where);
+extern void fscache_put_cookie(struct fscache_cookie *cookie,
+ enum fscache_cookie_trace where);
+extern void fscache_end_cookie_access(struct fscache_cookie *cookie,
+ enum fscache_access_trace why);
+extern void fscache_cookie_lookup_negative(struct fscache_cookie *cookie);
+extern void fscache_resume_after_invalidation(struct fscache_cookie *cookie);
+extern void fscache_caching_failed(struct fscache_cookie *cookie);
+extern bool fscache_wait_for_operation(struct netfs_cache_resources *cred,
+ enum fscache_want_state state);
/**
- * fscache_object_destroyed - Note destruction of an object in a cache
- * @cache: The cache from which the object came
+ * fscache_cookie_state - Read the state of a cookie
+ * @cookie: The cookie to query
*
- * Note the destruction and deallocation of an object record in a cache.
+ * Get the state of a cookie, imposing an ordering between the cookie contents
+ * and the state value. Paired with fscache_set_cookie_state().
*/
-static inline void fscache_object_destroyed(struct fscache_cache *cache)
+static inline
+enum fscache_cookie_state fscache_cookie_state(struct fscache_cookie *cookie)
{
- if (atomic_dec_and_test(&cache->object_count))
- wake_up_all(&fscache_cache_cleared_wq);
+ return smp_load_acquire(&cookie->state);
}
/**
- * fscache_object_lookup_error - Note an object encountered an error
- * @object: The object on which the error was encountered
+ * fscache_get_key - Get a pointer to the cookie key
+ * @cookie: The cookie to query
*
- * Note that an object encountered a fatal error (usually an I/O error) and
- * that it should be withdrawn as soon as possible.
+ * Return a pointer to the where a cookie's key is stored.
*/
-static inline void fscache_object_lookup_error(struct fscache_object *object)
+static inline void *fscache_get_key(struct fscache_cookie *cookie)
{
- set_bit(FSCACHE_OBJECT_EV_ERROR, &object->events);
+ if (cookie->key_len <= sizeof(cookie->inline_key))
+ return cookie->inline_key;
+ else
+ return cookie->key;
}
-/**
- * fscache_set_store_limit - Set the maximum size to be stored in an object
- * @object: The object to set the maximum on
- * @i_size: The limit to set in bytes
- *
- * Set the maximum size an object is permitted to reach, implying the highest
- * byte that may be written. Intended to be called by the attr_changed() op.
- *
- * See Documentation/filesystems/caching/backend-api.rst for a complete
- * description.
- */
-static inline
-void fscache_set_store_limit(struct fscache_object *object, loff_t i_size)
+static inline struct fscache_cookie *fscache_cres_cookie(struct netfs_cache_resources *cres)
{
- object->store_limit_l = i_size;
- object->store_limit = i_size >> PAGE_SHIFT;
- if (i_size & ~PAGE_MASK)
- object->store_limit++;
+ return cres->cache_priv;
}
/**
- * fscache_end_io - End a retrieval operation on a page
- * @op: The FS-Cache operation covering the retrieval
- * @page: The page that was to be fetched
- * @error: The error code (0 if successful)
+ * fscache_count_object - Tell fscache that an object has been added
+ * @cache: The cache to account to
*
- * Note the end of an operation to retrieve a page, as covered by a particular
- * operation record.
+ * Tell fscache that an object has been added to the cache. This prevents the
+ * cache from tearing down the cache structure until the object is uncounted.
*/
-static inline void fscache_end_io(struct fscache_retrieval *op,
- struct page *page, int error)
+static inline void fscache_count_object(struct fscache_cache *cache)
{
- op->end_io_func(page, op->context, error);
-}
-
-static inline void __fscache_use_cookie(struct fscache_cookie *cookie)
-{
- atomic_inc(&cookie->n_active);
+ atomic_inc(&cache->object_count);
}
/**
- * fscache_use_cookie - Request usage of cookie attached to an object
- * @object: Object description
- *
- * Request usage of the cookie attached to an object. NULL is returned if the
- * relinquishment had reduced the cookie usage count to 0.
+ * fscache_uncount_object - Tell fscache that an object has been removed
+ * @cache: The cache to account to
+ *
+ * Tell fscache that an object has been removed from the cache and will no
+ * longer be accessed. After this point, the cache cookie may be destroyed.
*/
-static inline bool fscache_use_cookie(struct fscache_object *object)
-{
- struct fscache_cookie *cookie = object->cookie;
- return atomic_inc_not_zero(&cookie->n_active) != 0;
-}
-
-static inline bool __fscache_unuse_cookie(struct fscache_cookie *cookie)
-{
- return atomic_dec_and_test(&cookie->n_active);
-}
-
-static inline void __fscache_wake_unused_cookie(struct fscache_cookie *cookie)
+static inline void fscache_uncount_object(struct fscache_cache *cache)
{
- wake_up_var(&cookie->n_active);
+ if (atomic_dec_and_test(&cache->object_count))
+ wake_up_all(&fscache_clearance_waiters);
}
/**
- * fscache_unuse_cookie - Cease usage of cookie attached to an object
- * @object: Object description
- *
- * Cease usage of the cookie attached to an object. When the users count
- * reaches zero then the cookie relinquishment will be permitted to proceed.
- */
-static inline void fscache_unuse_cookie(struct fscache_object *object)
-{
- struct fscache_cookie *cookie = object->cookie;
- if (__fscache_unuse_cookie(cookie))
- __fscache_wake_unused_cookie(cookie);
-}
-
-/*
- * out-of-line cache backend functions
- */
-extern __printf(3, 4)
-void fscache_init_cache(struct fscache_cache *cache,
- const struct fscache_cache_ops *ops,
- const char *idfmt, ...);
-
-extern int fscache_add_cache(struct fscache_cache *cache,
- struct fscache_object *fsdef,
- const char *tagname);
-extern void fscache_withdraw_cache(struct fscache_cache *cache);
-
-extern void fscache_io_error(struct fscache_cache *cache);
-
-extern void fscache_mark_page_cached(struct fscache_retrieval *op,
- struct page *page);
-
-extern void fscache_mark_pages_cached(struct fscache_retrieval *op,
- struct pagevec *pagevec);
-
-extern bool fscache_object_sleep_till_congested(signed long *timeoutp);
-
-extern enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
- const void *data,
- uint16_t datalen,
- loff_t object_size);
-
-extern void fscache_object_retrying_stale(struct fscache_object *object);
-
-enum fscache_why_object_killed {
- FSCACHE_OBJECT_IS_STALE,
- FSCACHE_OBJECT_NO_SPACE,
- FSCACHE_OBJECT_WAS_RETIRED,
- FSCACHE_OBJECT_WAS_CULLED,
-};
-extern void fscache_object_mark_killed(struct fscache_object *object,
- enum fscache_why_object_killed why);
+ * fscache_wait_for_objects - Wait for all objects to be withdrawn
+ * @cache: The cache to query
+ *
+ * Wait for all extant objects in a cache to finish being withdrawn
+ * and go away.
+ */
+static inline void fscache_wait_for_objects(struct fscache_cache *cache)
+{
+ wait_event(fscache_clearance_waiters,
+ atomic_read(&cache->object_count) == 0);
+}
+
+#ifdef CONFIG_FSCACHE_STATS
+extern atomic_t fscache_n_read;
+extern atomic_t fscache_n_write;
+extern atomic_t fscache_n_no_write_space;
+extern atomic_t fscache_n_no_create_space;
+extern atomic_t fscache_n_culled;
+#define fscache_count_read() atomic_inc(&fscache_n_read)
+#define fscache_count_write() atomic_inc(&fscache_n_write)
+#define fscache_count_no_write_space() atomic_inc(&fscache_n_no_write_space)
+#define fscache_count_no_create_space() atomic_inc(&fscache_n_no_create_space)
+#define fscache_count_culled() atomic_inc(&fscache_n_culled)
+#else
+#define fscache_count_read() do {} while(0)
+#define fscache_count_write() do {} while(0)
+#define fscache_count_no_write_space() do {} while(0)
+#define fscache_count_no_create_space() do {} while(0)
+#define fscache_count_culled() do {} while(0)
+#endif
#endif /* _LINUX_FSCACHE_CACHE_H */
diff --git a/include/linux/fscache.h b/include/linux/fscache.h
index 3b2282c157f7..296c5f1d9f35 100644
--- a/include/linux/fscache.h
+++ b/include/linux/fscache.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/* General filesystem caching interface
*
- * Copyright (C) 2004-2007 Red Hat, Inc. All Rights Reserved.
+ * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* NOTE!!! See:
@@ -15,146 +15,126 @@
#define _LINUX_FSCACHE_H
#include <linux/fs.h>
-#include <linux/list.h>
-#include <linux/pagemap.h>
-#include <linux/pagevec.h>
-#include <linux/list_bl.h>
#include <linux/netfs.h>
+#include <linux/writeback.h>
#if defined(CONFIG_FSCACHE) || defined(CONFIG_FSCACHE_MODULE)
+#define __fscache_available (1)
#define fscache_available() (1)
+#define fscache_volume_valid(volume) (volume)
#define fscache_cookie_valid(cookie) (cookie)
+#define fscache_resources_valid(cres) ((cres)->cache_priv)
+#define fscache_cookie_enabled(cookie) (cookie && !test_bit(FSCACHE_COOKIE_DISABLED, &cookie->flags))
#else
+#define __fscache_available (0)
#define fscache_available() (0)
+#define fscache_volume_valid(volume) (0)
#define fscache_cookie_valid(cookie) (0)
+#define fscache_resources_valid(cres) (false)
+#define fscache_cookie_enabled(cookie) (0)
#endif
-
-/* pattern used to fill dead space in an index entry */
-#define FSCACHE_INDEX_DEADFILL_PATTERN 0x79
-
-struct pagevec;
-struct fscache_cache_tag;
struct fscache_cookie;
-struct fscache_netfs;
-struct netfs_read_request;
-
-typedef void (*fscache_rw_complete_t)(struct page *page,
- void *context,
- int error);
-
-/* result of index entry consultation */
-enum fscache_checkaux {
- FSCACHE_CHECKAUX_OKAY, /* entry okay as is */
- FSCACHE_CHECKAUX_NEEDS_UPDATE, /* entry requires update */
- FSCACHE_CHECKAUX_OBSOLETE, /* entry requires deletion */
-};
-/*
- * fscache cookie definition
- */
-struct fscache_cookie_def {
- /* name of cookie type */
- char name[16];
-
- /* cookie type */
- uint8_t type;
-#define FSCACHE_COOKIE_TYPE_INDEX 0
-#define FSCACHE_COOKIE_TYPE_DATAFILE 1
-
- /* select the cache into which to insert an entry in this index
- * - optional
- * - should return a cache identifier or NULL to cause the cache to be
- * inherited from the parent if possible or the first cache picked
- * for a non-index file if not
- */
- struct fscache_cache_tag *(*select_cache)(
- const void *parent_netfs_data,
- const void *cookie_netfs_data);
-
- /* consult the netfs about the state of an object
- * - this function can be absent if the index carries no state data
- * - the netfs data from the cookie being used as the target is
- * presented, as is the auxiliary data and the object size
- */
- enum fscache_checkaux (*check_aux)(void *cookie_netfs_data,
- const void *data,
- uint16_t datalen,
- loff_t object_size);
-
- /* get an extra reference on a read context
- * - this function can be absent if the completion function doesn't
- * require a context
- */
- void (*get_context)(void *cookie_netfs_data, void *context);
+#define FSCACHE_ADV_SINGLE_CHUNK 0x01 /* The object is a single chunk of data */
+#define FSCACHE_ADV_WRITE_CACHE 0x00 /* Do cache if written to locally */
+#define FSCACHE_ADV_WRITE_NOCACHE 0x02 /* Don't cache if written to locally */
- /* release an extra reference on a read context
- * - this function can be absent if the completion function doesn't
- * require a context
- */
- void (*put_context)(void *cookie_netfs_data, void *context);
+#define FSCACHE_INVAL_DIO_WRITE 0x01 /* Invalidate due to DIO write */
- /* indicate page that now have cache metadata retained
- * - this function should mark the specified page as now being cached
- * - the page will have been marked with PG_fscache before this is
- * called, so this is optional
- */
- void (*mark_page_cached)(void *cookie_netfs_data,
- struct address_space *mapping,
- struct page *page);
+enum fscache_want_state {
+ FSCACHE_WANT_PARAMS,
+ FSCACHE_WANT_WRITE,
+ FSCACHE_WANT_READ,
};
/*
- * fscache cached network filesystem type
- * - name, version and ops must be filled in before registration
- * - all other fields will be set during registration
+ * Data object state.
+ */
+enum fscache_cookie_state {
+ FSCACHE_COOKIE_STATE_QUIESCENT, /* The cookie is uncached */
+ FSCACHE_COOKIE_STATE_LOOKING_UP, /* The cache object is being looked up */
+ FSCACHE_COOKIE_STATE_CREATING, /* The cache object is being created */
+ FSCACHE_COOKIE_STATE_ACTIVE, /* The cache is active, readable and writable */
+ FSCACHE_COOKIE_STATE_INVALIDATING, /* The cache is being invalidated */
+ FSCACHE_COOKIE_STATE_FAILED, /* The cache failed, withdraw to clear */
+ FSCACHE_COOKIE_STATE_LRU_DISCARDING, /* The cookie is being discarded by the LRU */
+ FSCACHE_COOKIE_STATE_WITHDRAWING, /* The cookie is being withdrawn */
+ FSCACHE_COOKIE_STATE_RELINQUISHING, /* The cookie is being relinquished */
+ FSCACHE_COOKIE_STATE_DROPPED, /* The cookie has been dropped */
+#define FSCACHE_COOKIE_STATE__NR (FSCACHE_COOKIE_STATE_DROPPED + 1)
+} __attribute__((mode(byte)));
+
+/*
+ * Volume representation cookie.
*/
-struct fscache_netfs {
- uint32_t version; /* indexing version */
- const char *name; /* filesystem name */
- struct fscache_cookie *primary_index;
+struct fscache_volume {
+ refcount_t ref;
+ atomic_t n_cookies; /* Number of data cookies in volume */
+ atomic_t n_accesses; /* Number of cache accesses in progress */
+ unsigned int debug_id;
+ unsigned int key_hash; /* Hash of key string */
+ char *key; /* Volume ID, eg. "afs@example.com@1234" */
+ struct list_head proc_link; /* Link in /proc/fs/fscache/volumes */
+ struct hlist_bl_node hash_link; /* Link in hash table */
+ struct work_struct work;
+ struct fscache_cache *cache; /* The cache in which this resides */
+ void *cache_priv; /* Cache private data */
+ spinlock_t lock;
+ unsigned long flags;
+#define FSCACHE_VOLUME_RELINQUISHED 0 /* Volume is being cleaned up */
+#define FSCACHE_VOLUME_INVALIDATE 1 /* Volume was invalidated */
+#define FSCACHE_VOLUME_COLLIDED_WITH 2 /* Volume was collided with */
+#define FSCACHE_VOLUME_ACQUIRE_PENDING 3 /* Volume is waiting to complete acquisition */
+#define FSCACHE_VOLUME_CREATING 4 /* Volume is being created on disk */
+ u8 coherency_len; /* Length of the coherency data */
+ u8 coherency[]; /* Coherency data */
};
/*
- * data file or index object cookie
+ * Data file representation cookie.
* - a file will only appear in one cache
* - a request to cache a file may or may not be honoured, subject to
* constraints such as disk space
* - indices are created on disk just-in-time
*/
struct fscache_cookie {
- refcount_t ref; /* number of users of this cookie */
- atomic_t n_children; /* number of children of this cookie */
- atomic_t n_active; /* number of active users of netfs ptrs */
+ refcount_t ref;
+ atomic_t n_active; /* number of active users of cookie */
+ atomic_t n_accesses; /* Number of cache accesses in progress */
unsigned int debug_id;
+ unsigned int inval_counter; /* Number of invalidations made */
spinlock_t lock;
- spinlock_t stores_lock; /* lock on page store tree */
- struct hlist_head backing_objects; /* object(s) backing this file/index */
- const struct fscache_cookie_def *def; /* definition */
- struct fscache_cookie *parent; /* parent of this entry */
+ struct fscache_volume *volume; /* Parent volume of this file. */
+ void *cache_priv; /* Cache-side representation */
struct hlist_bl_node hash_link; /* Link in hash table */
struct list_head proc_link; /* Link in proc list */
- void *netfs_data; /* back pointer to netfs */
- struct radix_tree_root stores; /* pages to be stored on this cookie */
-#define FSCACHE_COOKIE_PENDING_TAG 0 /* pages tag: pending write to cache */
-#define FSCACHE_COOKIE_STORING_TAG 1 /* pages tag: writing to cache */
-
+ struct list_head commit_link; /* Link in commit queue */
+ struct work_struct work; /* Commit/relinq/withdraw work */
+ loff_t object_size; /* Size of the netfs object */
+ unsigned long unused_at; /* Time at which unused (jiffies) */
unsigned long flags;
-#define FSCACHE_COOKIE_LOOKING_UP 0 /* T if non-index cookie being looked up still */
-#define FSCACHE_COOKIE_NO_DATA_YET 1 /* T if new object with no cached data yet */
-#define FSCACHE_COOKIE_UNAVAILABLE 2 /* T if cookie is unavailable (error, etc) */
-#define FSCACHE_COOKIE_INVALIDATING 3 /* T if cookie is being invalidated */
-#define FSCACHE_COOKIE_RELINQUISHED 4 /* T if cookie has been relinquished */
-#define FSCACHE_COOKIE_ENABLED 5 /* T if cookie is enabled */
-#define FSCACHE_COOKIE_ENABLEMENT_LOCK 6 /* T if cookie is being en/disabled */
-#define FSCACHE_COOKIE_AUX_UPDATED 8 /* T if the auxiliary data was updated */
-#define FSCACHE_COOKIE_ACQUIRED 9 /* T if cookie is in use */
-#define FSCACHE_COOKIE_RELINQUISHING 10 /* T if cookie is being relinquished */
-
- u8 type; /* Type of object */
+#define FSCACHE_COOKIE_RELINQUISHED 0 /* T if cookie has been relinquished */
+#define FSCACHE_COOKIE_RETIRED 1 /* T if this cookie has retired on relinq */
+#define FSCACHE_COOKIE_IS_CACHING 2 /* T if this cookie is cached */
+#define FSCACHE_COOKIE_NO_DATA_TO_READ 3 /* T if this cookie has nothing to read */
+#define FSCACHE_COOKIE_NEEDS_UPDATE 4 /* T if attrs have been updated */
+#define FSCACHE_COOKIE_HAS_BEEN_CACHED 5 /* T if cookie needs withdraw-on-relinq */
+#define FSCACHE_COOKIE_DISABLED 6 /* T if cookie has been disabled */
+#define FSCACHE_COOKIE_LOCAL_WRITE 7 /* T if cookie has been modified locally */
+#define FSCACHE_COOKIE_NO_ACCESS_WAKE 8 /* T if no wake when n_accesses goes 0 */
+#define FSCACHE_COOKIE_DO_RELINQUISH 9 /* T if this cookie needs relinquishment */
+#define FSCACHE_COOKIE_DO_WITHDRAW 10 /* T if this cookie needs withdrawing */
+#define FSCACHE_COOKIE_DO_LRU_DISCARD 11 /* T if this cookie needs LRU discard */
+#define FSCACHE_COOKIE_DO_PREP_TO_WRITE 12 /* T if cookie needs write preparation */
+#define FSCACHE_COOKIE_HAVE_DATA 13 /* T if this cookie has data stored */
+#define FSCACHE_COOKIE_IS_HASHED 14 /* T if this cookie is hashed */
+
+ enum fscache_cookie_state state;
+ u8 advice; /* FSCACHE_ADV_* */
u8 key_len; /* Length of index key */
u8 aux_len; /* Length of auxiliary data */
- u32 key_hash; /* Hash of parent, type, key, len */
+ u32 key_hash; /* Hash of volume, key, len */
union {
void *key; /* Index key */
u8 inline_key[16]; /* - If the key is short enough */
@@ -165,11 +145,6 @@ struct fscache_cookie {
};
};
-static inline bool fscache_cookie_enabled(struct fscache_cookie *cookie)
-{
- return fscache_cookie_valid(cookie) && test_bit(FSCACHE_COOKIE_ENABLED, &cookie->flags);
-}
-
/*
* slow-path functions for when there is actually caching available, and the
* netfs does actually have a valid token
@@ -177,699 +152,528 @@ static inline bool fscache_cookie_enabled(struct fscache_cookie *cookie)
* - these are undefined symbols when FS-Cache is not configured and the
* optimiser takes care of not using them
*/
-extern int __fscache_register_netfs(struct fscache_netfs *);
-extern void __fscache_unregister_netfs(struct fscache_netfs *);
-extern struct fscache_cache_tag *__fscache_lookup_cache_tag(const char *);
-extern void __fscache_release_cache_tag(struct fscache_cache_tag *);
+extern struct fscache_volume *__fscache_acquire_volume(const char *, const char *,
+ const void *, size_t);
+extern void __fscache_relinquish_volume(struct fscache_volume *, const void *, bool);
extern struct fscache_cookie *__fscache_acquire_cookie(
- struct fscache_cookie *,
- const struct fscache_cookie_def *,
+ struct fscache_volume *,
+ u8,
const void *, size_t,
const void *, size_t,
- void *, loff_t, bool);
-extern void __fscache_relinquish_cookie(struct fscache_cookie *, const void *, bool);
-extern int __fscache_check_consistency(struct fscache_cookie *, const void *);
-extern void __fscache_update_cookie(struct fscache_cookie *, const void *);
-extern int __fscache_attr_changed(struct fscache_cookie *);
-extern void __fscache_invalidate(struct fscache_cookie *);
-extern void __fscache_wait_on_invalidate(struct fscache_cookie *);
-
-#ifdef FSCACHE_USE_NEW_IO_API
-extern int __fscache_begin_read_operation(struct netfs_read_request *, struct fscache_cookie *);
-#else
-extern int __fscache_read_or_alloc_page(struct fscache_cookie *,
- struct page *,
- fscache_rw_complete_t,
- void *,
- gfp_t);
-extern int __fscache_read_or_alloc_pages(struct fscache_cookie *,
- struct address_space *,
- struct list_head *,
- unsigned *,
- fscache_rw_complete_t,
- void *,
- gfp_t);
-extern int __fscache_alloc_page(struct fscache_cookie *, struct page *, gfp_t);
-extern int __fscache_write_page(struct fscache_cookie *, struct page *, loff_t, gfp_t);
-extern void __fscache_uncache_page(struct fscache_cookie *, struct page *);
-extern bool __fscache_check_page_write(struct fscache_cookie *, struct page *);
-extern void __fscache_wait_on_page_write(struct fscache_cookie *, struct page *);
-extern bool __fscache_maybe_release_page(struct fscache_cookie *, struct page *,
- gfp_t);
-extern void __fscache_uncache_all_inode_pages(struct fscache_cookie *,
- struct inode *);
-extern void __fscache_readpages_cancel(struct fscache_cookie *cookie,
- struct list_head *pages);
-#endif /* FSCACHE_USE_NEW_IO_API */
-
-extern void __fscache_disable_cookie(struct fscache_cookie *, const void *, bool);
-extern void __fscache_enable_cookie(struct fscache_cookie *, const void *, loff_t,
- bool (*)(void *), void *);
+ loff_t);
+extern void __fscache_use_cookie(struct fscache_cookie *, bool);
+extern void __fscache_unuse_cookie(struct fscache_cookie *, const void *, const loff_t *);
+extern void __fscache_relinquish_cookie(struct fscache_cookie *, bool);
+extern void __fscache_resize_cookie(struct fscache_cookie *, loff_t);
+extern void __fscache_invalidate(struct fscache_cookie *, const void *, loff_t, unsigned int);
+extern int __fscache_begin_read_operation(struct netfs_cache_resources *, struct fscache_cookie *);
+extern int __fscache_begin_write_operation(struct netfs_cache_resources *, struct fscache_cookie *);
+
+extern void __fscache_write_to_cache(struct fscache_cookie *, struct address_space *,
+ loff_t, size_t, loff_t, netfs_io_terminated_t, void *,
+ bool);
+extern void __fscache_clear_page_bits(struct address_space *, loff_t, size_t);
/**
- * fscache_register_netfs - Register a filesystem as desiring caching services
- * @netfs: The description of the filesystem
+ * fscache_acquire_volume - Register a volume as desiring caching services
+ * @volume_key: An identification string for the volume
+ * @cache_name: The name of the cache to use (or NULL for the default)
+ * @coherency_data: Piece of arbitrary coherency data to check (or NULL)
+ * @coherency_len: The size of the coherency data
*
- * Register a filesystem as desiring caching services if they're available.
+ * Register a volume as desiring caching services if they're available. The
+ * caller must provide an identifier for the volume and may also indicate which
+ * cache it should be in. If a preexisting volume entry is found in the cache,
+ * the coherency data must match otherwise the entry will be invalidated.
*
- * See Documentation/filesystems/caching/netfs-api.rst for a complete
- * description.
+ * Returns a cookie pointer on success, -ENOMEM if out of memory or -EBUSY if a
+ * cache volume of that name is already acquired. Note that "NULL" is a valid
+ * cookie pointer and can be returned if caching is refused.
*/
static inline
-int fscache_register_netfs(struct fscache_netfs *netfs)
+struct fscache_volume *fscache_acquire_volume(const char *volume_key,
+ const char *cache_name,
+ const void *coherency_data,
+ size_t coherency_len)
{
- if (fscache_available())
- return __fscache_register_netfs(netfs);
- else
- return 0;
+ if (!fscache_available())
+ return NULL;
+ return __fscache_acquire_volume(volume_key, cache_name,
+ coherency_data, coherency_len);
}
/**
- * fscache_unregister_netfs - Indicate that a filesystem no longer desires
- * caching services
- * @netfs: The description of the filesystem
+ * fscache_relinquish_volume - Cease caching a volume
+ * @volume: The volume cookie
+ * @coherency_data: Piece of arbitrary coherency data to set (or NULL)
+ * @invalidate: True if the volume should be invalidated
*
- * Indicate that a filesystem no longer desires caching services for the
- * moment.
- *
- * See Documentation/filesystems/caching/netfs-api.rst for a complete
- * description.
+ * Indicate that a filesystem no longer desires caching services for a volume.
+ * The caller must have relinquished all file cookies prior to calling this.
+ * The stored coherency data is updated.
*/
static inline
-void fscache_unregister_netfs(struct fscache_netfs *netfs)
+void fscache_relinquish_volume(struct fscache_volume *volume,
+ const void *coherency_data,
+ bool invalidate)
{
- if (fscache_available())
- __fscache_unregister_netfs(netfs);
+ if (fscache_volume_valid(volume))
+ __fscache_relinquish_volume(volume, coherency_data, invalidate);
}
/**
- * fscache_lookup_cache_tag - Look up a cache tag
- * @name: The name of the tag to search for
+ * fscache_acquire_cookie - Acquire a cookie to represent a cache object
+ * @volume: The volume in which to locate/create this cookie
+ * @advice: Advice flags (FSCACHE_COOKIE_ADV_*)
+ * @index_key: The index key for this cookie
+ * @index_key_len: Size of the index key
+ * @aux_data: The auxiliary data for the cookie (may be NULL)
+ * @aux_data_len: Size of the auxiliary data buffer
+ * @object_size: The initial size of object
*
- * Acquire a specific cache referral tag that can be used to select a specific
- * cache in which to cache an index.
+ * Acquire a cookie to represent a data file within the given cache volume.
*
* See Documentation/filesystems/caching/netfs-api.rst for a complete
* description.
*/
static inline
-struct fscache_cache_tag *fscache_lookup_cache_tag(const char *name)
+struct fscache_cookie *fscache_acquire_cookie(struct fscache_volume *volume,
+ u8 advice,
+ const void *index_key,
+ size_t index_key_len,
+ const void *aux_data,
+ size_t aux_data_len,
+ loff_t object_size)
{
- if (fscache_available())
- return __fscache_lookup_cache_tag(name);
- else
+ if (!fscache_volume_valid(volume))
return NULL;
+ return __fscache_acquire_cookie(volume, advice,
+ index_key, index_key_len,
+ aux_data, aux_data_len,
+ object_size);
}
/**
- * fscache_release_cache_tag - Release a cache tag
- * @tag: The tag to release
+ * fscache_use_cookie - Request usage of cookie attached to an object
+ * @object: Object description
+ * @will_modify: If cache is expected to be modified locally
*
- * Release a reference to a cache referral tag previously looked up.
- *
- * See Documentation/filesystems/caching/netfs-api.rst for a complete
- * description.
+ * Request usage of the cookie attached to an object. The caller should tell
+ * the cache if the object's contents are about to be modified locally and then
+ * the cache can apply the policy that has been set to handle this case.
*/
-static inline
-void fscache_release_cache_tag(struct fscache_cache_tag *tag)
+static inline void fscache_use_cookie(struct fscache_cookie *cookie,
+ bool will_modify)
{
- if (fscache_available())
- __fscache_release_cache_tag(tag);
+ if (fscache_cookie_valid(cookie))
+ __fscache_use_cookie(cookie, will_modify);
}
/**
- * fscache_acquire_cookie - Acquire a cookie to represent a cache object
- * @parent: The cookie that's to be the parent of this one
- * @def: A description of the cache object, including callback operations
- * @index_key: The index key for this cookie
- * @index_key_len: Size of the index key
- * @aux_data: The auxiliary data for the cookie (may be NULL)
- * @aux_data_len: Size of the auxiliary data buffer
- * @netfs_data: An arbitrary piece of data to be kept in the cookie to
- * represent the cache object to the netfs
- * @object_size: The initial size of object
- * @enable: Whether or not to enable a data cookie immediately
- *
- * This function is used to inform FS-Cache about part of an index hierarchy
- * that can be used to locate files. This is done by requesting a cookie for
- * each index in the path to the file.
+ * fscache_unuse_cookie - Cease usage of cookie attached to an object
+ * @object: Object description
+ * @aux_data: Updated auxiliary data (or NULL)
+ * @object_size: Revised size of the object (or NULL)
*
- * See Documentation/filesystems/caching/netfs-api.rst for a complete
- * description.
+ * Cease usage of the cookie attached to an object. When the users count
+ * reaches zero then the cookie relinquishment will be permitted to proceed.
*/
-static inline
-struct fscache_cookie *fscache_acquire_cookie(
- struct fscache_cookie *parent,
- const struct fscache_cookie_def *def,
- const void *index_key,
- size_t index_key_len,
- const void *aux_data,
- size_t aux_data_len,
- void *netfs_data,
- loff_t object_size,
- bool enable)
+static inline void fscache_unuse_cookie(struct fscache_cookie *cookie,
+ const void *aux_data,
+ const loff_t *object_size)
{
- if (fscache_cookie_valid(parent) && fscache_cookie_enabled(parent))
- return __fscache_acquire_cookie(parent, def,
- index_key, index_key_len,
- aux_data, aux_data_len,
- netfs_data, object_size, enable);
- else
- return NULL;
+ if (fscache_cookie_valid(cookie))
+ __fscache_unuse_cookie(cookie, aux_data, object_size);
}
/**
* fscache_relinquish_cookie - Return the cookie to the cache, maybe discarding
* it
* @cookie: The cookie being returned
- * @aux_data: The updated auxiliary data for the cookie (may be NULL)
* @retire: True if the cache object the cookie represents is to be discarded
*
* This function returns a cookie to the cache, forcibly discarding the
- * associated cache object if retire is set to true. The opportunity is
- * provided to update the auxiliary data in the cache before the object is
- * disconnected.
+ * associated cache object if retire is set to true.
*
* See Documentation/filesystems/caching/netfs-api.rst for a complete
* description.
*/
static inline
-void fscache_relinquish_cookie(struct fscache_cookie *cookie,
- const void *aux_data,
- bool retire)
+void fscache_relinquish_cookie(struct fscache_cookie *cookie, bool retire)
{
if (fscache_cookie_valid(cookie))
- __fscache_relinquish_cookie(cookie, aux_data, retire);
+ __fscache_relinquish_cookie(cookie, retire);
}
-/**
- * fscache_check_consistency - Request validation of a cache's auxiliary data
- * @cookie: The cookie representing the cache object
- * @aux_data: The updated auxiliary data for the cookie (may be NULL)
- *
- * Request an consistency check from fscache, which passes the request to the
- * backing cache. The auxiliary data on the cookie will be updated first if
- * @aux_data is set.
- *
- * Returns 0 if consistent and -ESTALE if inconsistent. May also
- * return -ENOMEM and -ERESTARTSYS.
+/*
+ * Find the auxiliary data on a cookie.
*/
-static inline
-int fscache_check_consistency(struct fscache_cookie *cookie,
- const void *aux_data)
+static inline void *fscache_get_aux(struct fscache_cookie *cookie)
{
- if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
- return __fscache_check_consistency(cookie, aux_data);
+ if (cookie->aux_len <= sizeof(cookie->inline_aux))
+ return cookie->inline_aux;
else
- return 0;
+ return cookie->aux;
}
-/**
- * fscache_update_cookie - Request that a cache object be updated
- * @cookie: The cookie representing the cache object
- * @aux_data: The updated auxiliary data for the cookie (may be NULL)
- *
- * Request an update of the index data for the cache object associated with the
- * cookie. The auxiliary data on the cookie will be updated first if @aux_data
- * is set.
- *
- * See Documentation/filesystems/caching/netfs-api.rst for a complete
- * description.
+/*
+ * Update the auxiliary data on a cookie.
*/
static inline
-void fscache_update_cookie(struct fscache_cookie *cookie, const void *aux_data)
+void fscache_update_aux(struct fscache_cookie *cookie,
+ const void *aux_data, const loff_t *object_size)
{
- if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
- __fscache_update_cookie(cookie, aux_data);
+ void *p = fscache_get_aux(cookie);
+
+ if (aux_data && p)
+ memcpy(p, aux_data, cookie->aux_len);
+ if (object_size)
+ cookie->object_size = *object_size;
}
-/**
- * fscache_pin_cookie - Pin a data-storage cache object in its cache
- * @cookie: The cookie representing the cache object
- *
- * Permit data-storage cache objects to be pinned in the cache.
- *
- * See Documentation/filesystems/caching/netfs-api.rst for a complete
- * description.
- */
+#ifdef CONFIG_FSCACHE_STATS
+extern atomic_t fscache_n_updates;
+#endif
+
static inline
-int fscache_pin_cookie(struct fscache_cookie *cookie)
+void __fscache_update_cookie(struct fscache_cookie *cookie, const void *aux_data,
+ const loff_t *object_size)
{
- return -ENOBUFS;
+#ifdef CONFIG_FSCACHE_STATS
+ atomic_inc(&fscache_n_updates);
+#endif
+ fscache_update_aux(cookie, aux_data, object_size);
+ smp_wmb();
+ set_bit(FSCACHE_COOKIE_NEEDS_UPDATE, &cookie->flags);
}
/**
- * fscache_pin_cookie - Unpin a data-storage cache object in its cache
+ * fscache_update_cookie - Request that a cache object be updated
* @cookie: The cookie representing the cache object
+ * @aux_data: The updated auxiliary data for the cookie (may be NULL)
+ * @object_size: The current size of the object (may be NULL)
*
- * Permit data-storage cache objects to be unpinned from the cache.
+ * Request an update of the index data for the cache object associated with the
+ * cookie. The auxiliary data on the cookie will be updated first if @aux_data
+ * is set and the object size will be updated and the object possibly trimmed
+ * if @object_size is set.
*
* See Documentation/filesystems/caching/netfs-api.rst for a complete
* description.
*/
static inline
-void fscache_unpin_cookie(struct fscache_cookie *cookie)
+void fscache_update_cookie(struct fscache_cookie *cookie, const void *aux_data,
+ const loff_t *object_size)
{
+ if (fscache_cookie_enabled(cookie))
+ __fscache_update_cookie(cookie, aux_data, object_size);
}
/**
- * fscache_attr_changed - Notify cache that an object's attributes changed
+ * fscache_resize_cookie - Request that a cache object be resized
* @cookie: The cookie representing the cache object
+ * @new_size: The new size of the object (may be NULL)
*
- * Send a notification to the cache indicating that an object's attributes have
- * changed. This includes the data size. These attributes will be obtained
- * through the get_attr() cookie definition op.
+ * Request that the size of an object be changed.
*
- * See Documentation/filesystems/caching/netfs-api.rst for a complete
+ * See Documentation/filesystems/caching/netfs-api.txt for a complete
* description.
*/
static inline
-int fscache_attr_changed(struct fscache_cookie *cookie)
+void fscache_resize_cookie(struct fscache_cookie *cookie, loff_t new_size)
{
- if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
- return __fscache_attr_changed(cookie);
- else
- return -ENOBUFS;
+ if (fscache_cookie_enabled(cookie))
+ __fscache_resize_cookie(cookie, new_size);
}
/**
* fscache_invalidate - Notify cache that an object needs invalidation
* @cookie: The cookie representing the cache object
+ * @aux_data: The updated auxiliary data for the cookie (may be NULL)
+ * @size: The revised size of the object.
+ * @flags: Invalidation flags (FSCACHE_INVAL_*)
*
* Notify the cache that an object is needs to be invalidated and that it
- * should abort any retrievals or stores it is doing on the cache. The object
- * is then marked non-caching until such time as the invalidation is complete.
- *
- * This can be called with spinlocks held.
- *
- * See Documentation/filesystems/caching/netfs-api.rst for a complete
- * description.
- */
-static inline
-void fscache_invalidate(struct fscache_cookie *cookie)
-{
- if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
- __fscache_invalidate(cookie);
-}
-
-/**
- * fscache_wait_on_invalidate - Wait for invalidation to complete
- * @cookie: The cookie representing the cache object
+ * should abort any retrievals or stores it is doing on the cache. This
+ * increments inval_counter on the cookie which can be used by the caller to
+ * reconsider I/O requests as they complete.
*
- * Wait for the invalidation of an object to complete.
+ * If @flags has FSCACHE_INVAL_DIO_WRITE set, this indicates that this is due
+ * to a direct I/O write and will cause caching to be disabled on this cookie
+ * until it is completely unused.
*
* See Documentation/filesystems/caching/netfs-api.rst for a complete
* description.
*/
static inline
-void fscache_wait_on_invalidate(struct fscache_cookie *cookie)
+void fscache_invalidate(struct fscache_cookie *cookie,
+ const void *aux_data, loff_t size, unsigned int flags)
{
- if (fscache_cookie_valid(cookie))
- __fscache_wait_on_invalidate(cookie);
+ if (fscache_cookie_enabled(cookie))
+ __fscache_invalidate(cookie, aux_data, size, flags);
}
/**
- * fscache_reserve_space - Reserve data space for a cached object
- * @cookie: The cookie representing the cache object
- * @i_size: The amount of space to be reserved
- *
- * Reserve an amount of space in the cache for the cache object attached to a
- * cookie so that a write to that object within the space can always be
- * honoured.
+ * fscache_operation_valid - Return true if operations resources are usable
+ * @cres: The resources to check.
*
- * See Documentation/filesystems/caching/netfs-api.rst for a complete
- * description.
+ * Returns a pointer to the operations table if usable or NULL if not.
*/
static inline
-int fscache_reserve_space(struct fscache_cookie *cookie, loff_t size)
+const struct netfs_cache_ops *fscache_operation_valid(const struct netfs_cache_resources *cres)
{
- return -ENOBUFS;
+ return fscache_resources_valid(cres) ? cres->ops : NULL;
}
-#ifdef FSCACHE_USE_NEW_IO_API
-
/**
* fscache_begin_read_operation - Begin a read operation for the netfs lib
- * @rreq: The read request being undertaken
+ * @cres: The cache resources for the read being performed
* @cookie: The cookie representing the cache object
*
- * Begin a read operation on behalf of the netfs helper library. @rreq
- * indicates the read request to which the operation state should be attached;
- * @cookie indicates the cache object that will be accessed.
+ * Begin a read operation on behalf of the netfs helper library. @cres
+ * indicates the cache resources to which the operation state should be
+ * attached; @cookie indicates the cache object that will be accessed.
*
* This is intended to be called from the ->begin_cache_operation() netfs lib
* operation as implemented by the network filesystem.
*
+ * @cres->inval_counter is set from @cookie->inval_counter for comparison at
+ * the end of the operation. This allows invalidation during the operation to
+ * be detected by the caller.
+ *
* Returns:
* * 0 - Success
* * -ENOBUFS - No caching available
* * Other error code from the cache, such as -ENOMEM.
*/
static inline
-int fscache_begin_read_operation(struct netfs_read_request *rreq,
+int fscache_begin_read_operation(struct netfs_cache_resources *cres,
struct fscache_cookie *cookie)
{
- if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
- return __fscache_begin_read_operation(rreq, cookie);
+ if (fscache_cookie_enabled(cookie))
+ return __fscache_begin_read_operation(cres, cookie);
return -ENOBUFS;
}
-#else /* FSCACHE_USE_NEW_IO_API */
-
-/**
- * fscache_read_or_alloc_page - Read a page from the cache or allocate a block
- * in which to store it
- * @cookie: The cookie representing the cache object
- * @page: The netfs page to fill if possible
- * @end_io_func: The callback to invoke when and if the page is filled
- * @context: An arbitrary piece of data to pass on to end_io_func()
- * @gfp: The conditions under which memory allocation should be made
- *
- * Read a page from the cache, or if that's not possible make a potential
- * one-block reservation in the cache into which the page may be stored once
- * fetched from the server.
- *
- * If the page is not backed by the cache object, or if it there's some reason
- * it can't be, -ENOBUFS will be returned and nothing more will be done for
- * that page.
- *
- * Else, if that page is backed by the cache, a read will be initiated directly
- * to the netfs's page and 0 will be returned by this function. The
- * end_io_func() callback will be invoked when the operation terminates on a
- * completion or failure. Note that the callback may be invoked before the
- * return.
- *
- * Else, if the page is unbacked, -ENODATA is returned and a block may have
- * been allocated in the cache.
- *
- * See Documentation/filesystems/caching/netfs-api.rst for a complete
- * description.
- */
-static inline
-int fscache_read_or_alloc_page(struct fscache_cookie *cookie,
- struct page *page,
- fscache_rw_complete_t end_io_func,
- void *context,
- gfp_t gfp)
-{
- if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
- return __fscache_read_or_alloc_page(cookie, page, end_io_func,
- context, gfp);
- else
- return -ENOBUFS;
-}
-
/**
- * fscache_read_or_alloc_pages - Read pages from the cache and/or allocate
- * blocks in which to store them
- * @cookie: The cookie representing the cache object
- * @mapping: The netfs inode mapping to which the pages will be attached
- * @pages: A list of potential netfs pages to be filled
- * @nr_pages: Number of pages to be read and/or allocated
- * @end_io_func: The callback to invoke when and if each page is filled
- * @context: An arbitrary piece of data to pass on to end_io_func()
- * @gfp: The conditions under which memory allocation should be made
- *
- * Read a set of pages from the cache, or if that's not possible, attempt to
- * make a potential one-block reservation for each page in the cache into which
- * that page may be stored once fetched from the server.
- *
- * If some pages are not backed by the cache object, or if it there's some
- * reason they can't be, -ENOBUFS will be returned and nothing more will be
- * done for that pages.
- *
- * Else, if some of the pages are backed by the cache, a read will be initiated
- * directly to the netfs's page and 0 will be returned by this function. The
- * end_io_func() callback will be invoked when the operation terminates on a
- * completion or failure. Note that the callback may be invoked before the
- * return.
- *
- * Else, if a page is unbacked, -ENODATA is returned and a block may have
- * been allocated in the cache.
- *
- * Because the function may want to return all of -ENOBUFS, -ENODATA and 0 in
- * regard to different pages, the return values are prioritised in that order.
- * Any pages submitted for reading are removed from the pages list.
+ * fscache_read - Start a read from the cache.
+ * @cres: The cache resources to use
+ * @start_pos: The beginning file offset in the cache file
+ * @iter: The buffer to fill - and also the length
+ * @read_hole: How to handle a hole in the data.
+ * @term_func: The function to call upon completion
+ * @term_func_priv: The private data for @term_func
*
- * See Documentation/filesystems/caching/netfs-api.rst for a complete
- * description.
- */
-static inline
-int fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
- struct address_space *mapping,
- struct list_head *pages,
- unsigned *nr_pages,
- fscache_rw_complete_t end_io_func,
- void *context,
- gfp_t gfp)
-{
- if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
- return __fscache_read_or_alloc_pages(cookie, mapping, pages,
- nr_pages, end_io_func,
- context, gfp);
- else
- return -ENOBUFS;
-}
-
-/**
- * fscache_alloc_page - Allocate a block in which to store a page
- * @cookie: The cookie representing the cache object
- * @page: The netfs page to allocate a page for
- * @gfp: The conditions under which memory allocation should be made
+ * Start a read from the cache. @cres indicates the cache object to read from
+ * and must be obtained by a call to fscache_begin_operation() beforehand.
*
- * Request Allocation a block in the cache in which to store a netfs page
- * without retrieving any contents from the cache.
+ * The data is read into the iterator, @iter, and that also indicates the size
+ * of the operation. @start_pos is the start position in the file, though if
+ * @seek_data is set appropriately, the cache can use SEEK_DATA to find the
+ * next piece of data, writing zeros for the hole into the iterator.
*
- * If the page is not backed by a file then -ENOBUFS will be returned and
- * nothing more will be done, and no reservation will be made.
+ * Upon termination of the operation, @term_func will be called and supplied
+ * with @term_func_priv plus the amount of data written, if successful, or the
+ * error code otherwise.
*
- * Else, a block will be allocated if one wasn't already, and 0 will be
- * returned
+ * @read_hole indicates how a partially populated region in the cache should be
+ * handled. It can be one of a number of settings:
*
- * See Documentation/filesystems/caching/netfs-api.rst for a complete
- * description.
- */
-static inline
-int fscache_alloc_page(struct fscache_cookie *cookie,
- struct page *page,
- gfp_t gfp)
-{
- if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
- return __fscache_alloc_page(cookie, page, gfp);
- else
- return -ENOBUFS;
-}
-
-/**
- * fscache_readpages_cancel - Cancel read/alloc on pages
- * @cookie: The cookie representing the inode's cache object.
- * @pages: The netfs pages that we canceled write on in readpages()
+ * NETFS_READ_HOLE_IGNORE - Just try to read (may return a short read).
*
- * Uncache/unreserve the pages reserved earlier in readpages() via
- * fscache_readpages_or_alloc() and similar. In most successful caches in
- * readpages() this doesn't do anything. In cases when the underlying netfs's
- * readahead failed we need to clean up the pagelist (unmark and uncache).
+ * NETFS_READ_HOLE_CLEAR - Seek for data, clearing the part of the buffer
+ * skipped over, then do as for IGNORE.
*
- * This function may sleep as it may have to clean up disk state.
+ * NETFS_READ_HOLE_FAIL - Give ENODATA if we encounter a hole.
*/
static inline
-void fscache_readpages_cancel(struct fscache_cookie *cookie,
- struct list_head *pages)
+int fscache_read(struct netfs_cache_resources *cres,
+ loff_t start_pos,
+ struct iov_iter *iter,
+ enum netfs_read_from_hole read_hole,
+ netfs_io_terminated_t term_func,
+ void *term_func_priv)
{
- if (fscache_cookie_valid(cookie))
- __fscache_readpages_cancel(cookie, pages);
+ const struct netfs_cache_ops *ops = fscache_operation_valid(cres);
+ return ops->read(cres, start_pos, iter, read_hole,
+ term_func, term_func_priv);
}
/**
- * fscache_write_page - Request storage of a page in the cache
+ * fscache_begin_write_operation - Begin a write operation for the netfs lib
+ * @cres: The cache resources for the write being performed
* @cookie: The cookie representing the cache object
- * @page: The netfs page to store
- * @object_size: Updated size of object
- * @gfp: The conditions under which memory allocation should be made
*
- * Request the contents of the netfs page be written into the cache. This
- * request may be ignored if no cache block is currently allocated, in which
- * case it will return -ENOBUFS.
+ * Begin a write operation on behalf of the netfs helper library. @cres
+ * indicates the cache resources to which the operation state should be
+ * attached; @cookie indicates the cache object that will be accessed.
*
- * If a cache block was already allocated, a write will be initiated and 0 will
- * be returned. The PG_fscache_write page bit is set immediately and will then
- * be cleared at the completion of the write to indicate the success or failure
- * of the operation. Note that the completion may happen before the return.
+ * @cres->inval_counter is set from @cookie->inval_counter for comparison at
+ * the end of the operation. This allows invalidation during the operation to
+ * be detected by the caller.
*
- * See Documentation/filesystems/caching/netfs-api.rst for a complete
- * description.
+ * Returns:
+ * * 0 - Success
+ * * -ENOBUFS - No caching available
+ * * Other error code from the cache, such as -ENOMEM.
*/
static inline
-int fscache_write_page(struct fscache_cookie *cookie,
- struct page *page,
- loff_t object_size,
- gfp_t gfp)
+int fscache_begin_write_operation(struct netfs_cache_resources *cres,
+ struct fscache_cookie *cookie)
{
- if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
- return __fscache_write_page(cookie, page, object_size, gfp);
- else
- return -ENOBUFS;
+ if (fscache_cookie_enabled(cookie))
+ return __fscache_begin_write_operation(cres, cookie);
+ return -ENOBUFS;
}
/**
- * fscache_uncache_page - Indicate that caching is no longer required on a page
- * @cookie: The cookie representing the cache object
- * @page: The netfs page that was being cached.
+ * fscache_write - Start a write to the cache.
+ * @cres: The cache resources to use
+ * @start_pos: The beginning file offset in the cache file
+ * @iter: The data to write - and also the length
+ * @term_func: The function to call upon completion
+ * @term_func_priv: The private data for @term_func
*
- * Tell the cache that we no longer want a page to be cached and that it should
- * remove any knowledge of the netfs page it may have.
+ * Start a write to the cache. @cres indicates the cache object to write to and
+ * must be obtained by a call to fscache_begin_operation() beforehand.
*
- * Note that this cannot cancel any outstanding I/O operations between this
- * page and the cache.
+ * The data to be written is obtained from the iterator, @iter, and that also
+ * indicates the size of the operation. @start_pos is the start position in
+ * the file.
*
- * See Documentation/filesystems/caching/netfs-api.rst for a complete
- * description.
+ * Upon termination of the operation, @term_func will be called and supplied
+ * with @term_func_priv plus the amount of data written, if successful, or the
+ * error code otherwise.
*/
static inline
-void fscache_uncache_page(struct fscache_cookie *cookie,
- struct page *page)
+int fscache_write(struct netfs_cache_resources *cres,
+ loff_t start_pos,
+ struct iov_iter *iter,
+ netfs_io_terminated_t term_func,
+ void *term_func_priv)
{
- if (fscache_cookie_valid(cookie))
- __fscache_uncache_page(cookie, page);
+ const struct netfs_cache_ops *ops = fscache_operation_valid(cres);
+ return ops->write(cres, start_pos, iter, term_func, term_func_priv);
}
/**
- * fscache_check_page_write - Ask if a page is being writing to the cache
+ * fscache_clear_page_bits - Clear the PG_fscache bits from a set of pages
* @cookie: The cookie representing the cache object
- * @page: The netfs page that is being cached.
- *
- * Ask the cache if a page is being written to the cache.
- *
- * See Documentation/filesystems/caching/netfs-api.rst for a complete
- * description.
- */
-static inline
-bool fscache_check_page_write(struct fscache_cookie *cookie,
- struct page *page)
+ * @mapping: The netfs inode to use as the source
+ * @start: The start position in @mapping
+ * @len: The amount of data to unlock
+ * @caching: If PG_fscache has been set
+ *
+ * Clear the PG_fscache flag from a sequence of pages and wake up anyone who's
+ * waiting.
+ */
+static inline void fscache_clear_page_bits(struct fscache_cookie *cookie,
+ struct address_space *mapping,
+ loff_t start, size_t len,
+ bool caching)
{
- if (fscache_cookie_valid(cookie))
- return __fscache_check_page_write(cookie, page);
- return false;
+ if (caching)
+ __fscache_clear_page_bits(mapping, start, len);
}
/**
- * fscache_wait_on_page_write - Wait for a page to complete writing to the cache
+ * fscache_write_to_cache - Save a write to the cache and clear PG_fscache
* @cookie: The cookie representing the cache object
- * @page: The netfs page that is being cached.
- *
- * Ask the cache to wake us up when a page is no longer being written to the
- * cache.
- *
- * See Documentation/filesystems/caching/netfs-api.rst for a complete
- * description.
- */
-static inline
-void fscache_wait_on_page_write(struct fscache_cookie *cookie,
- struct page *page)
+ * @mapping: The netfs inode to use as the source
+ * @start: The start position in @mapping
+ * @len: The amount of data to write back
+ * @i_size: The new size of the inode
+ * @term_func: The function to call upon completion
+ * @term_func_priv: The private data for @term_func
+ * @caching: If PG_fscache has been set
+ *
+ * Helper function for a netfs to write dirty data from an inode into the cache
+ * object that's backing it.
+ *
+ * @start and @len describe the range of the data. This does not need to be
+ * page-aligned, but to satisfy DIO requirements, the cache may expand it up to
+ * the page boundaries on either end. All the pages covering the range must be
+ * marked with PG_fscache.
+ *
+ * If given, @term_func will be called upon completion and supplied with
+ * @term_func_priv. Note that the PG_fscache flags will have been cleared by
+ * this point, so the netfs must retain its own pin on the mapping.
+ */
+static inline void fscache_write_to_cache(struct fscache_cookie *cookie,
+ struct address_space *mapping,
+ loff_t start, size_t len, loff_t i_size,
+ netfs_io_terminated_t term_func,
+ void *term_func_priv,
+ bool caching)
{
- if (fscache_cookie_valid(cookie))
- __fscache_wait_on_page_write(cookie, page);
-}
+ if (caching)
+ __fscache_write_to_cache(cookie, mapping, start, len, i_size,
+ term_func, term_func_priv, caching);
+ else if (term_func)
+ term_func(term_func_priv, -ENOBUFS, false);
-/**
- * fscache_maybe_release_page - Consider releasing a page, cancelling a store
- * @cookie: The cookie representing the cache object
- * @page: The netfs page that is being cached.
- * @gfp: The gfp flags passed to releasepage()
- *
- * Consider releasing a page for the vmscan algorithm, on behalf of the netfs's
- * releasepage() call. A storage request on the page may cancelled if it is
- * not currently being processed.
- *
- * The function returns true if the page no longer has a storage request on it,
- * and false if a storage request is left in place. If true is returned, the
- * page will have been passed to fscache_uncache_page(). If false is returned
- * the page cannot be freed yet.
- */
-static inline
-bool fscache_maybe_release_page(struct fscache_cookie *cookie,
- struct page *page,
- gfp_t gfp)
-{
- if (fscache_cookie_valid(cookie) && PageFsCache(page))
- return __fscache_maybe_release_page(cookie, page, gfp);
- return true;
}
+#if __fscache_available
+extern int fscache_set_page_dirty(struct page *page, struct fscache_cookie *cookie);
+#else
+#define fscache_set_page_dirty(PAGE, COOKIE) (__set_page_dirty_nobuffers((PAGE)))
+#endif
+
/**
- * fscache_uncache_all_inode_pages - Uncache all an inode's pages
- * @cookie: The cookie representing the inode's cache object.
- * @inode: The inode to uncache pages from.
- *
- * Uncache all the pages in an inode that are marked PG_fscache, assuming them
- * to be associated with the given cookie.
+ * fscache_unpin_writeback - Unpin writeback resources
+ * @wbc: The writeback control
+ * @cookie: The cookie referring to the cache object
*
- * This function may sleep. It will wait for pages that are being written out
- * and will wait whilst the PG_fscache mark is removed by the cache.
+ * Unpin the writeback resources pinned by fscache_set_page_dirty(). This is
+ * intended to be called by the netfs's ->write_inode() method.
*/
-static inline
-void fscache_uncache_all_inode_pages(struct fscache_cookie *cookie,
- struct inode *inode)
+static inline void fscache_unpin_writeback(struct writeback_control *wbc,
+ struct fscache_cookie *cookie)
{
- if (fscache_cookie_valid(cookie))
- __fscache_uncache_all_inode_pages(cookie, inode);
+ if (wbc->unpinned_fscache_wb)
+ fscache_unuse_cookie(cookie, NULL, NULL);
}
-#endif /* FSCACHE_USE_NEW_IO_API */
-
/**
- * fscache_disable_cookie - Disable a cookie
- * @cookie: The cookie representing the cache object
- * @aux_data: The updated auxiliary data for the cookie (may be NULL)
- * @invalidate: Invalidate the backing object
- *
- * Disable a cookie from accepting further alloc, read, write, invalidate,
- * update or acquire operations. Outstanding operations can still be waited
- * upon and pages can still be uncached and the cookie relinquished.
- *
- * This will not return until all outstanding operations have completed.
+ * fscache_clear_inode_writeback - Clear writeback resources pinned by an inode
+ * @cookie: The cookie referring to the cache object
+ * @inode: The inode to clean up
+ * @aux: Auxiliary data to apply to the inode
*
- * If @invalidate is set, then the backing object will be invalidated and
- * detached, otherwise it will just be detached.
- *
- * If @aux_data is set, then auxiliary data will be updated from that.
+ * Clear any writeback resources held by an inode when the inode is evicted.
+ * This must be called before clear_inode() is called.
*/
-static inline
-void fscache_disable_cookie(struct fscache_cookie *cookie,
- const void *aux_data,
- bool invalidate)
+static inline void fscache_clear_inode_writeback(struct fscache_cookie *cookie,
+ struct inode *inode,
+ const void *aux)
{
- if (fscache_cookie_valid(cookie) && fscache_cookie_enabled(cookie))
- __fscache_disable_cookie(cookie, aux_data, invalidate);
+ if (inode->i_state & I_PINNING_FSCACHE_WB) {
+ loff_t i_size = i_size_read(inode);
+ fscache_unuse_cookie(cookie, aux, &i_size);
+ }
}
/**
- * fscache_enable_cookie - Reenable a cookie
- * @cookie: The cookie representing the cache object
- * @aux_data: The updated auxiliary data for the cookie (may be NULL)
- * @object_size: Current size of object
- * @can_enable: A function to permit enablement once lock is held
- * @data: Data for can_enable()
- *
- * Reenable a previously disabled cookie, allowing it to accept further alloc,
- * read, write, invalidate, update or acquire operations. An attempt will be
- * made to immediately reattach the cookie to a backing object. If @aux_data
- * is set, the auxiliary data attached to the cookie will be updated.
+ * fscache_note_page_release - Note that a netfs page got released
+ * @cookie: The cookie corresponding to the file
*
- * The can_enable() function is called (if not NULL) once the enablement lock
- * is held to rule on whether enablement is still permitted to go ahead.
+ * Note that a page that has been copied to the cache has been released. This
+ * means that future reads will need to look in the cache to see if it's there.
*/
static inline
-void fscache_enable_cookie(struct fscache_cookie *cookie,
- const void *aux_data,
- loff_t object_size,
- bool (*can_enable)(void *data),
- void *data)
+void fscache_note_page_release(struct fscache_cookie *cookie)
{
- if (fscache_cookie_valid(cookie) && !fscache_cookie_enabled(cookie))
- __fscache_enable_cookie(cookie, aux_data, object_size,
- can_enable, data);
+ /* If we've written data to the cache (HAVE_DATA) and there wasn't any
+ * data in the cache when we started (NO_DATA_TO_READ), it may no
+ * longer be true that we can skip reading from the cache - so clear
+ * the flag that causes reads to be skipped.
+ */
+ if (cookie &&
+ test_bit(FSCACHE_COOKIE_HAVE_DATA, &cookie->flags) &&
+ test_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags))
+ clear_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags);
}
#endif /* _LINUX_FSCACHE_H */
diff --git a/include/linux/fsl/mc.h b/include/linux/fsl/mc.h
index e026f6c48b49..7b6c42bfb660 100644
--- a/include/linux/fsl/mc.h
+++ b/include/linux/fsl/mc.h
@@ -91,13 +91,13 @@ struct fsl_mc_resource {
/**
* struct fsl_mc_device_irq - MC object device message-based interrupt
- * @msi_desc: pointer to MSI descriptor allocated by fsl_mc_msi_alloc_descs()
+ * @virq: Linux virtual interrupt number
* @mc_dev: MC object device that owns this interrupt
* @dev_irq_index: device-relative IRQ index
* @resource: MC generic resource associated with the interrupt
*/
struct fsl_mc_device_irq {
- struct msi_desc *msi_desc;
+ unsigned int virq;
struct fsl_mc_device *mc_dev;
u8 dev_irq_index;
struct fsl_mc_resource resource;
diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
index 787545e87eeb..bb8467cd11ae 100644
--- a/include/linux/fsnotify.h
+++ b/include/linux/fsnotify.h
@@ -144,16 +144,19 @@ static inline void fsnotify_move(struct inode *old_dir, struct inode *new_dir,
u32 fs_cookie = fsnotify_get_cookie();
__u32 old_dir_mask = FS_MOVED_FROM;
__u32 new_dir_mask = FS_MOVED_TO;
+ __u32 rename_mask = FS_RENAME;
const struct qstr *new_name = &moved->d_name;
- if (old_dir == new_dir)
- old_dir_mask |= FS_DN_RENAME;
-
if (isdir) {
old_dir_mask |= FS_ISDIR;
new_dir_mask |= FS_ISDIR;
+ rename_mask |= FS_ISDIR;
}
+ /* Event with information about both old and new parent+name */
+ fsnotify_name(rename_mask, moved, FSNOTIFY_EVENT_DENTRY,
+ old_dir, old_name, 0);
+
fsnotify_name(old_dir_mask, source, FSNOTIFY_EVENT_INODE,
old_dir, old_name, fs_cookie);
fsnotify_name(new_dir_mask, source, FSNOTIFY_EVENT_INODE,
@@ -222,16 +225,53 @@ static inline void fsnotify_link(struct inode *dir, struct inode *inode,
}
/*
+ * fsnotify_delete - @dentry was unlinked and unhashed
+ *
+ * Caller must make sure that dentry->d_name is stable.
+ *
+ * Note: unlike fsnotify_unlink(), we have to pass also the unlinked inode
+ * as this may be called after d_delete() and old_dentry may be negative.
+ */
+static inline void fsnotify_delete(struct inode *dir, struct inode *inode,
+ struct dentry *dentry)
+{
+ __u32 mask = FS_DELETE;
+
+ if (S_ISDIR(inode->i_mode))
+ mask |= FS_ISDIR;
+
+ fsnotify_name(mask, inode, FSNOTIFY_EVENT_INODE, dir, &dentry->d_name,
+ 0);
+}
+
+/**
+ * d_delete_notify - delete a dentry and call fsnotify_delete()
+ * @dentry: The dentry to delete
+ *
+ * This helper is used to guaranty that the unlinked inode cannot be found
+ * by lookup of this name after fsnotify_delete() event has been delivered.
+ */
+static inline void d_delete_notify(struct inode *dir, struct dentry *dentry)
+{
+ struct inode *inode = d_inode(dentry);
+
+ ihold(inode);
+ d_delete(dentry);
+ fsnotify_delete(dir, inode, dentry);
+ iput(inode);
+}
+
+/*
* fsnotify_unlink - 'name' was unlinked
*
* Caller must make sure that dentry->d_name is stable.
*/
static inline void fsnotify_unlink(struct inode *dir, struct dentry *dentry)
{
- /* Expected to be called before d_delete() */
- WARN_ON_ONCE(d_is_negative(dentry));
+ if (WARN_ON_ONCE(d_is_negative(dentry)))
+ return;
- fsnotify_dirent(dir, dentry, FS_DELETE);
+ fsnotify_delete(dir, d_inode(dentry), dentry);
}
/*
@@ -255,10 +295,10 @@ static inline void fsnotify_mkdir(struct inode *dir, struct dentry *dentry)
*/
static inline void fsnotify_rmdir(struct inode *dir, struct dentry *dentry)
{
- /* Expected to be called before d_delete() */
- WARN_ON_ONCE(d_is_negative(dentry));
+ if (WARN_ON_ONCE(d_is_negative(dentry)))
+ return;
- fsnotify_dirent(dir, dentry, FS_DELETE | FS_ISDIR);
+ fsnotify_delete(dir, d_inode(dentry), dentry);
}
/*
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index 51ef2b079bfa..790c31844db5 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -63,7 +63,7 @@
*/
#define FS_EVENT_ON_CHILD 0x08000000
-#define FS_DN_RENAME 0x10000000 /* file renamed */
+#define FS_RENAME 0x10000000 /* File was renamed */
#define FS_DN_MULTISHOT 0x20000000 /* dnotify multishot */
#define FS_ISDIR 0x40000000 /* event occurred against dir */
#define FS_IN_ONESHOT 0x80000000 /* only send event once */
@@ -76,7 +76,7 @@
* The watching parent may get an FS_ATTRIB|FS_EVENT_ON_CHILD event
* when a directory entry inside a child subdir changes.
*/
-#define ALL_FSNOTIFY_DIRENT_EVENTS (FS_CREATE | FS_DELETE | FS_MOVE)
+#define ALL_FSNOTIFY_DIRENT_EVENTS (FS_CREATE | FS_DELETE | FS_MOVE | FS_RENAME)
#define ALL_FSNOTIFY_PERM_EVENTS (FS_OPEN_PERM | FS_ACCESS_PERM | \
FS_OPEN_EXEC_PERM)
@@ -101,7 +101,7 @@
/* Events that can be reported to backends */
#define ALL_FSNOTIFY_EVENTS (ALL_FSNOTIFY_DIRENT_EVENTS | \
FS_EVENTS_POSS_ON_CHILD | \
- FS_DELETE_SELF | FS_MOVE_SELF | FS_DN_RENAME | \
+ FS_DELETE_SELF | FS_MOVE_SELF | \
FS_UNMOUNT | FS_Q_OVERFLOW | FS_IN_IGNORED | \
FS_ERROR)
@@ -337,58 +337,69 @@ static inline struct fs_error_report *fsnotify_data_error_report(
}
}
+/*
+ * Index to merged marks iterator array that correlates to a type of watch.
+ * The type of watched object can be deduced from the iterator type, but not
+ * the other way around, because an event can match different watched objects
+ * of the same object type.
+ * For example, both parent and child are watching an object of type inode.
+ */
+enum fsnotify_iter_type {
+ FSNOTIFY_ITER_TYPE_INODE,
+ FSNOTIFY_ITER_TYPE_VFSMOUNT,
+ FSNOTIFY_ITER_TYPE_SB,
+ FSNOTIFY_ITER_TYPE_PARENT,
+ FSNOTIFY_ITER_TYPE_INODE2,
+ FSNOTIFY_ITER_TYPE_COUNT
+};
+
+/* The type of object that a mark is attached to */
enum fsnotify_obj_type {
+ FSNOTIFY_OBJ_TYPE_ANY = -1,
FSNOTIFY_OBJ_TYPE_INODE,
- FSNOTIFY_OBJ_TYPE_PARENT,
FSNOTIFY_OBJ_TYPE_VFSMOUNT,
FSNOTIFY_OBJ_TYPE_SB,
FSNOTIFY_OBJ_TYPE_COUNT,
FSNOTIFY_OBJ_TYPE_DETACHED = FSNOTIFY_OBJ_TYPE_COUNT
};
-#define FSNOTIFY_OBJ_TYPE_INODE_FL (1U << FSNOTIFY_OBJ_TYPE_INODE)
-#define FSNOTIFY_OBJ_TYPE_PARENT_FL (1U << FSNOTIFY_OBJ_TYPE_PARENT)
-#define FSNOTIFY_OBJ_TYPE_VFSMOUNT_FL (1U << FSNOTIFY_OBJ_TYPE_VFSMOUNT)
-#define FSNOTIFY_OBJ_TYPE_SB_FL (1U << FSNOTIFY_OBJ_TYPE_SB)
-#define FSNOTIFY_OBJ_ALL_TYPES_MASK ((1U << FSNOTIFY_OBJ_TYPE_COUNT) - 1)
-
-static inline bool fsnotify_valid_obj_type(unsigned int type)
+static inline bool fsnotify_valid_obj_type(unsigned int obj_type)
{
- return (type < FSNOTIFY_OBJ_TYPE_COUNT);
+ return (obj_type < FSNOTIFY_OBJ_TYPE_COUNT);
}
struct fsnotify_iter_info {
- struct fsnotify_mark *marks[FSNOTIFY_OBJ_TYPE_COUNT];
+ struct fsnotify_mark *marks[FSNOTIFY_ITER_TYPE_COUNT];
unsigned int report_mask;
int srcu_idx;
};
static inline bool fsnotify_iter_should_report_type(
- struct fsnotify_iter_info *iter_info, int type)
+ struct fsnotify_iter_info *iter_info, int iter_type)
{
- return (iter_info->report_mask & (1U << type));
+ return (iter_info->report_mask & (1U << iter_type));
}
static inline void fsnotify_iter_set_report_type(
- struct fsnotify_iter_info *iter_info, int type)
+ struct fsnotify_iter_info *iter_info, int iter_type)
{
- iter_info->report_mask |= (1U << type);
+ iter_info->report_mask |= (1U << iter_type);
}
static inline void fsnotify_iter_set_report_type_mark(
- struct fsnotify_iter_info *iter_info, int type,
+ struct fsnotify_iter_info *iter_info, int iter_type,
struct fsnotify_mark *mark)
{
- iter_info->marks[type] = mark;
- iter_info->report_mask |= (1U << type);
+ iter_info->marks[iter_type] = mark;
+ iter_info->report_mask |= (1U << iter_type);
}
#define FSNOTIFY_ITER_FUNCS(name, NAME) \
static inline struct fsnotify_mark *fsnotify_iter_##name##_mark( \
struct fsnotify_iter_info *iter_info) \
{ \
- return (iter_info->report_mask & FSNOTIFY_OBJ_TYPE_##NAME##_FL) ? \
- iter_info->marks[FSNOTIFY_OBJ_TYPE_##NAME] : NULL; \
+ return (iter_info->report_mask & (1U << FSNOTIFY_ITER_TYPE_##NAME)) ? \
+ iter_info->marks[FSNOTIFY_ITER_TYPE_##NAME] : NULL; \
}
FSNOTIFY_ITER_FUNCS(inode, INODE)
@@ -396,8 +407,8 @@ FSNOTIFY_ITER_FUNCS(parent, PARENT)
FSNOTIFY_ITER_FUNCS(vfsmount, VFSMOUNT)
FSNOTIFY_ITER_FUNCS(sb, SB)
-#define fsnotify_foreach_obj_type(type) \
- for (type = 0; type < FSNOTIFY_OBJ_TYPE_COUNT; type++)
+#define fsnotify_foreach_iter_type(type) \
+ for (type = 0; type < FSNOTIFY_ITER_TYPE_COUNT; type++)
/*
* fsnotify_connp_t is what we embed in objects which connector can be attached
@@ -604,11 +615,11 @@ extern int fsnotify_get_conn_fsid(const struct fsnotify_mark_connector *conn,
__kernel_fsid_t *fsid);
/* attach the mark to the object */
extern int fsnotify_add_mark(struct fsnotify_mark *mark,
- fsnotify_connp_t *connp, unsigned int type,
+ fsnotify_connp_t *connp, unsigned int obj_type,
int allow_dups, __kernel_fsid_t *fsid);
extern int fsnotify_add_mark_locked(struct fsnotify_mark *mark,
fsnotify_connp_t *connp,
- unsigned int type, int allow_dups,
+ unsigned int obj_type, int allow_dups,
__kernel_fsid_t *fsid);
/* attach the mark to the inode */
@@ -637,22 +648,23 @@ extern void fsnotify_detach_mark(struct fsnotify_mark *mark);
extern void fsnotify_free_mark(struct fsnotify_mark *mark);
/* Wait until all marks queued for destruction are destroyed */
extern void fsnotify_wait_marks_destroyed(void);
-/* run all the marks in a group, and clear all of the marks attached to given object type */
-extern void fsnotify_clear_marks_by_group(struct fsnotify_group *group, unsigned int type);
+/* Clear all of the marks of a group attached to a given object type */
+extern void fsnotify_clear_marks_by_group(struct fsnotify_group *group,
+ unsigned int obj_type);
/* run all the marks in a group, and clear all of the vfsmount marks */
static inline void fsnotify_clear_vfsmount_marks_by_group(struct fsnotify_group *group)
{
- fsnotify_clear_marks_by_group(group, FSNOTIFY_OBJ_TYPE_VFSMOUNT_FL);
+ fsnotify_clear_marks_by_group(group, FSNOTIFY_OBJ_TYPE_VFSMOUNT);
}
/* run all the marks in a group, and clear all of the inode marks */
static inline void fsnotify_clear_inode_marks_by_group(struct fsnotify_group *group)
{
- fsnotify_clear_marks_by_group(group, FSNOTIFY_OBJ_TYPE_INODE_FL);
+ fsnotify_clear_marks_by_group(group, FSNOTIFY_OBJ_TYPE_INODE);
}
/* run all the marks in a group, and clear all of the sn marks */
static inline void fsnotify_clear_sb_marks_by_group(struct fsnotify_group *group)
{
- fsnotify_clear_marks_by_group(group, FSNOTIFY_OBJ_TYPE_SB_FL);
+ fsnotify_clear_marks_by_group(group, FSNOTIFY_OBJ_TYPE_SB);
}
extern void fsnotify_get_mark(struct fsnotify_mark *mark);
extern void fsnotify_put_mark(struct fsnotify_mark *mark);
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index 74c410263113..6906a45bc761 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -39,57 +39,24 @@ struct partition_meta_info {
/**
* DOC: genhd capability flags
*
- * ``GENHD_FL_REMOVABLE`` (0x0001): indicates that the block device
- * gives access to removable media.
- * When set, the device remains present even when media is not
- * inserted.
- * Must not be set for devices which are removed entirely when the
+ * ``GENHD_FL_REMOVABLE``: indicates that the block device gives access to
+ * removable media. When set, the device remains present even when media is not
+ * inserted. Shall not be set for devices which are removed entirely when the
* media is removed.
*
- * ``GENHD_FL_CD`` (0x0008): the block device is a CD-ROM-style
- * device.
- * Affects responses to the ``CDROM_GET_CAPABILITY`` ioctl.
+ * ``GENHD_FL_HIDDEN``: the block device is hidden; it doesn't produce events,
+ * doesn't appear in sysfs, and can't be opened from userspace or using
+ * blkdev_get*. Used for the underlying components of multipath devices.
*
- * ``GENHD_FL_SUPPRESS_PARTITION_INFO`` (0x0020): don't include
- * partition information in ``/proc/partitions`` or in the output of
- * printk_all_partitions().
- * Used for the null block device and some MMC devices.
+ * ``GENHD_FL_NO_PART``: partition support is disabled. The kernel will not
+ * scan for partitions from add_disk, and users can't add partitions manually.
*
- * ``GENHD_FL_EXT_DEVT`` (0x0040): the driver supports extended
- * dynamic ``dev_t``, i.e. it wants extended device numbers
- * (``BLOCK_EXT_MAJOR``).
- * This affects the maximum number of partitions.
- *
- * ``GENHD_FL_NATIVE_CAPACITY`` (0x0080): based on information in the
- * partition table, the device's capacity has been extended to its
- * native capacity; i.e. the device has hidden capacity used by one
- * of the partitions (this is a flag used so that native capacity is
- * only ever unlocked once).
- *
- * ``GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE`` (0x0100): event polling is
- * blocked whenever a writer holds an exclusive lock.
- *
- * ``GENHD_FL_NO_PART_SCAN`` (0x0200): partition scanning is disabled.
- * Used for loop devices in their default settings and some MMC
- * devices.
- *
- * ``GENHD_FL_HIDDEN`` (0x0400): the block device is hidden; it
- * doesn't produce events, doesn't appear in sysfs, and doesn't have
- * an associated ``bdev``.
- * Implies ``GENHD_FL_SUPPRESS_PARTITION_INFO`` and
- * ``GENHD_FL_NO_PART_SCAN``.
- * Used for multipath devices.
*/
-#define GENHD_FL_REMOVABLE 0x0001
-/* 2 is unused (used to be GENHD_FL_DRIVERFS) */
-/* 4 is unused (used to be GENHD_FL_MEDIA_CHANGE_NOTIFY) */
-#define GENHD_FL_CD 0x0008
-#define GENHD_FL_SUPPRESS_PARTITION_INFO 0x0020
-#define GENHD_FL_EXT_DEVT 0x0040
-#define GENHD_FL_NATIVE_CAPACITY 0x0080
-#define GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE 0x0100
-#define GENHD_FL_NO_PART_SCAN 0x0200
-#define GENHD_FL_HIDDEN 0x0400
+enum {
+ GENHD_FL_REMOVABLE = 1 << 0,
+ GENHD_FL_HIDDEN = 1 << 1,
+ GENHD_FL_NO_PART = 1 << 2,
+};
enum {
DISK_EVENT_MEDIA_CHANGE = 1 << 0, /* media changed */
@@ -101,6 +68,8 @@ enum {
DISK_EVENT_FLAG_POLL = 1 << 0,
/* Forward events to udev */
DISK_EVENT_FLAG_UEVENT = 1 << 1,
+ /* Block event polling when open for exclusive write */
+ DISK_EVENT_FLAG_BLOCK_ON_EXCL_WRITE = 1 << 2,
};
struct disk_events;
@@ -115,13 +84,13 @@ struct blk_integrity {
};
struct gendisk {
- /* major, first_minor and minors are input parameters only,
- * don't use directly. Use disk_devt() and disk_max_parts().
+ /*
+ * major/first_minor/minors should not be set by any new driver, the
+ * block core will take care of allocating them automatically.
*/
- int major; /* major number of driver */
+ int major;
int first_minor;
- int minors; /* maximum number of minors, =1 for
- * disks that can't be partitioned. */
+ int minors;
char disk_name[DISK_NAME_LEN]; /* name of major driver */
@@ -140,6 +109,7 @@ struct gendisk {
#define GD_NEED_PART_SCAN 0
#define GD_READ_ONLY 1
#define GD_DEAD 2
+#define GD_NATIVE_CAPACITY 3
struct mutex open_mutex; /* open/close mutex */
unsigned open_partitions; /* number of open partitions */
@@ -184,19 +154,6 @@ static inline bool disk_live(struct gendisk *disk)
#define disk_to_cdi(disk) NULL
#endif
-static inline int disk_max_parts(struct gendisk *disk)
-{
- if (disk->flags & GENHD_FL_EXT_DEVT)
- return DISK_MAX_PARTS;
- return disk->minors;
-}
-
-static inline bool disk_part_scan_enabled(struct gendisk *disk)
-{
- return disk_max_parts(disk) > 1 &&
- !(disk->flags & GENHD_FL_NO_PART_SCAN);
-}
-
static inline dev_t disk_devt(struct gendisk *disk)
{
return MKDEV(disk->major, disk->first_minor);
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 8fcc38467af6..80f63c862be5 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -302,7 +302,9 @@ struct vm_area_struct;
* lowest zone as a type of emergency reserve.
*
* %GFP_DMA32 is similar to %GFP_DMA except that the caller requires a 32-bit
- * address.
+ * address. Note that kmalloc(..., GFP_DMA32) does not return DMA32 memory
+ * because the DMA32 kmalloc cache array is not implemented.
+ * (Reason: there is no such user in kernel).
*
* %GFP_HIGHUSER is for userspace allocations that may be mapped to userspace,
* do not need to be directly accessible by the kernel but that cannot
@@ -598,9 +600,9 @@ struct page *alloc_pages(gfp_t gfp, unsigned int order);
struct folio *folio_alloc(gfp_t gfp, unsigned order);
extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
struct vm_area_struct *vma, unsigned long addr,
- int node, bool hugepage);
+ bool hugepage);
#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
- alloc_pages_vma(gfp_mask, order, vma, addr, numa_node_id(), true)
+ alloc_pages_vma(gfp_mask, order, vma, addr, true)
#else
static inline struct page *alloc_pages(gfp_t gfp_mask, unsigned int order)
{
@@ -610,14 +612,14 @@ static inline struct folio *folio_alloc(gfp_t gfp, unsigned int order)
{
return __folio_alloc_node(gfp, order, numa_node_id());
}
-#define alloc_pages_vma(gfp_mask, order, vma, addr, node, false)\
+#define alloc_pages_vma(gfp_mask, order, vma, addr, false)\
alloc_pages(gfp_mask, order)
#define alloc_hugepage_vma(gfp_mask, vma, addr, order) \
alloc_pages(gfp_mask, order)
#endif
#define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
#define alloc_page_vma(gfp_mask, vma, addr) \
- alloc_pages_vma(gfp_mask, 0, vma, addr, numa_node_id(), false)
+ alloc_pages_vma(gfp_mask, 0, vma, addr, false)
extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order);
extern unsigned long get_zeroed_page(gfp_t gfp_mask);
diff --git a/include/linux/hash.h b/include/linux/hash.h
index ad6fa21d977b..38edaa08f862 100644
--- a/include/linux/hash.h
+++ b/include/linux/hash.h
@@ -62,10 +62,7 @@ static inline u32 __hash_32_generic(u32 val)
return val * GOLDEN_RATIO_32;
}
-#ifndef HAVE_ARCH_HASH_32
-#define hash_32 hash_32_generic
-#endif
-static inline u32 hash_32_generic(u32 val, unsigned int bits)
+static inline u32 hash_32(u32 val, unsigned int bits)
{
/* High bits are more random, so use them. */
return __hash_32(val) >> (32 - bits);
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index f280f33ff223..e4c18ba8d3bf 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -274,6 +274,15 @@ static inline int thp_nr_pages(struct page *page)
return 1;
}
+/**
+ * folio_test_pmd_mappable - Can we map this folio with a PMD?
+ * @folio: The folio to test
+ */
+static inline bool folio_test_pmd_mappable(struct folio *folio)
+{
+ return folio_order(folio) >= HPAGE_PMD_ORDER;
+}
+
struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
pmd_t *pmd, int flags, struct dev_pagemap **pgmap);
struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
@@ -339,6 +348,11 @@ static inline int thp_nr_pages(struct page *page)
return 1;
}
+static inline bool folio_test_pmd_mappable(struct folio *folio)
+{
+ return false;
+}
+
static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma)
{
return false;
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 00351ccb49a3..d1897a69c540 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -622,8 +622,8 @@ struct hstate {
#endif
#ifdef CONFIG_CGROUP_HUGETLB
/* cgroup control files */
- struct cftype cgroup_files_dfl[7];
- struct cftype cgroup_files_legacy[9];
+ struct cftype cgroup_files_dfl[8];
+ struct cftype cgroup_files_legacy[10];
#endif
char name[HSTATE_NAME_LEN];
};
diff --git a/include/linux/hugetlb_cgroup.h b/include/linux/hugetlb_cgroup.h
index ba025ae27882..379344828e78 100644
--- a/include/linux/hugetlb_cgroup.h
+++ b/include/linux/hugetlb_cgroup.h
@@ -36,6 +36,11 @@ enum hugetlb_memory_event {
HUGETLB_NR_MEMORY_EVENTS,
};
+struct hugetlb_cgroup_per_node {
+ /* hugetlb usage in pages over all hstates. */
+ unsigned long usage[HUGE_MAX_HSTATE];
+};
+
struct hugetlb_cgroup {
struct cgroup_subsys_state css;
@@ -57,6 +62,8 @@ struct hugetlb_cgroup {
/* Handle for "hugetlb.events.local" */
struct cgroup_file events_local_file[HUGE_MAX_HSTATE];
+
+ struct hugetlb_cgroup_per_node *nodeinfo[];
};
static inline struct hugetlb_cgroup *
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index b823311eac79..fe2e0179ed51 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -1261,6 +1261,8 @@ struct hv_device {
struct vmbus_channel *channel;
struct kset *channels_kset;
+ struct device_dma_parameters dma_parms;
+ u64 dma_mask;
/* place holder to keep track of the dir for hv device in debugfs */
struct dentry *debug_dir;
@@ -1583,6 +1585,11 @@ struct hyperv_service_callback {
void (*callback)(void *context);
};
+struct hv_dma_range {
+ dma_addr_t dma;
+ u32 mapping_size;
+};
+
#define MAX_SRV_VER 0x7ffffff
extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp, u8 *buf, u32 buflen,
const int *fw_version, int fw_vercnt,
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index 8420fe504927..2be4dd7e90a9 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -46,8 +46,10 @@ struct vlan_hdr {
* @h_vlan_encapsulated_proto: packet type ID or len
*/
struct vlan_ethhdr {
- unsigned char h_dest[ETH_ALEN];
- unsigned char h_source[ETH_ALEN];
+ struct_group(addrs,
+ unsigned char h_dest[ETH_ALEN];
+ unsigned char h_source[ETH_ALEN];
+ );
__be16 h_vlan_proto;
__be16 h_vlan_TCI;
__be16 h_vlan_encapsulated_proto;
diff --git a/include/linux/iio/buffer-dma.h b/include/linux/iio/buffer-dma.h
index ff15c61bf319..6564bdcdac66 100644
--- a/include/linux/iio/buffer-dma.h
+++ b/include/linux/iio/buffer-dma.h
@@ -17,11 +17,6 @@ struct iio_dma_buffer_queue;
struct iio_dma_buffer_ops;
struct device;
-struct iio_buffer_block {
- u32 size;
- u32 bytes_used;
-};
-
/**
* enum iio_block_state - State of a struct iio_dma_buffer_block
* @IIO_BLOCK_STATE_DEQUEUED: Block is not queued
diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h
index 324561b7a5e8..07025d6b3de1 100644
--- a/include/linux/iio/iio.h
+++ b/include/linux/iio/iio.h
@@ -103,15 +103,16 @@ ssize_t iio_enum_write(struct iio_dev *indio_dev,
/**
* IIO_ENUM_AVAILABLE() - Initialize enum available extended channel attribute
* @_name: Attribute name ("_available" will be appended to the name)
+ * @_shared: Whether the attribute is shared between all channels
* @_e: Pointer to an iio_enum struct
*
* Creates a read only attribute which lists all the available enum items in a
* space separated list. This should usually be used together with IIO_ENUM()
*/
-#define IIO_ENUM_AVAILABLE(_name, _e) \
+#define IIO_ENUM_AVAILABLE(_name, _shared, _e) \
{ \
.name = (_name "_available"), \
- .shared = IIO_SHARED_BY_TYPE, \
+ .shared = _shared, \
.read = iio_enum_available_read, \
.private = (uintptr_t)(_e), \
}
diff --git a/include/linux/iio/trigger.h b/include/linux/iio/trigger.h
index 096f68dd2e0c..4c69b144677b 100644
--- a/include/linux/iio/trigger.h
+++ b/include/linux/iio/trigger.h
@@ -55,6 +55,7 @@ struct iio_trigger_ops {
* @attached_own_device:[INTERN] if we are using our own device as trigger,
* i.e. if we registered a poll function to the same
* device as the one providing the trigger.
+ * @reenable_work: [INTERN] work item used to ensure reenable can sleep.
**/
struct iio_trigger {
const struct iio_trigger_ops *ops;
@@ -74,6 +75,7 @@ struct iio_trigger {
unsigned long pool[BITS_TO_LONGS(CONFIG_IIO_CONSUMERS_PER_TRIGGER)];
struct mutex pool_lock;
bool attached_own_device;
+ struct work_struct reenable_work;
};
diff --git a/include/linux/iio/types.h b/include/linux/iio/types.h
index 84b3f8175cc6..a7aa91f3a8dc 100644
--- a/include/linux/iio/types.h
+++ b/include/linux/iio/types.h
@@ -24,6 +24,7 @@ enum iio_event_info {
#define IIO_VAL_INT_PLUS_NANO 3
#define IIO_VAL_INT_PLUS_MICRO_DB 4
#define IIO_VAL_INT_MULTIPLE 5
+#define IIO_VAL_INT_64 6 /* 64-bit data, val is lower 32 bits */
#define IIO_VAL_FRACTIONAL 10
#define IIO_VAL_FRACTIONAL_LOG2 11
#define IIO_VAL_CHAR 12
diff --git a/include/linux/inotify.h b/include/linux/inotify.h
index 6a24905f6e1e..8d20caa1b268 100644
--- a/include/linux/inotify.h
+++ b/include/linux/inotify.h
@@ -7,11 +7,8 @@
#ifndef _LINUX_INOTIFY_H
#define _LINUX_INOTIFY_H
-#include <linux/sysctl.h>
#include <uapi/linux/inotify.h>
-extern struct ctl_table inotify_table[]; /* for sysctl */
-
#define ALL_INOTIFY_BITS (IN_ACCESS | IN_MODIFY | IN_ATTRIB | IN_CLOSE_WRITE | \
IN_CLOSE_NOWRITE | IN_OPEN | IN_MOVED_FROM | \
IN_MOVED_TO | IN_CREATE | IN_DELETE | \
diff --git a/include/linux/intel-svm.h b/include/linux/intel-svm.h
index 57cceecbe37f..1b73bab7eeff 100644
--- a/include/linux/intel-svm.h
+++ b/include/linux/intel-svm.h
@@ -8,12 +8,6 @@
#ifndef __INTEL_SVM_H__
#define __INTEL_SVM_H__
-/* Values for rxwp in fault_cb callback */
-#define SVM_REQ_READ (1<<3)
-#define SVM_REQ_WRITE (1<<2)
-#define SVM_REQ_EXEC (1<<1)
-#define SVM_REQ_PRIV (1<<0)
-
/* Page Request Queue depth */
#define PRQ_ORDER 2
#define PRQ_RING_MASK ((0x1000 << PRQ_ORDER) - 0x20)
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 1f22a30c0963..9367f1cb2e3c 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -329,7 +329,46 @@ extern int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask);
extern int irq_can_set_affinity(unsigned int irq);
extern int irq_select_affinity(unsigned int irq);
-extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m);
+extern int __irq_apply_affinity_hint(unsigned int irq, const struct cpumask *m,
+ bool setaffinity);
+
+/**
+ * irq_update_affinity_hint - Update the affinity hint
+ * @irq: Interrupt to update
+ * @m: cpumask pointer (NULL to clear the hint)
+ *
+ * Updates the affinity hint, but does not change the affinity of the interrupt.
+ */
+static inline int
+irq_update_affinity_hint(unsigned int irq, const struct cpumask *m)
+{
+ return __irq_apply_affinity_hint(irq, m, false);
+}
+
+/**
+ * irq_set_affinity_and_hint - Update the affinity hint and apply the provided
+ * cpumask to the interrupt
+ * @irq: Interrupt to update
+ * @m: cpumask pointer (NULL to clear the hint)
+ *
+ * Updates the affinity hint and if @m is not NULL it applies it as the
+ * affinity of that interrupt.
+ */
+static inline int
+irq_set_affinity_and_hint(unsigned int irq, const struct cpumask *m)
+{
+ return __irq_apply_affinity_hint(irq, m, true);
+}
+
+/*
+ * Deprecated. Use irq_update_affinity_hint() or irq_set_affinity_and_hint()
+ * instead.
+ */
+static inline int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
+{
+ return irq_set_affinity_and_hint(irq, m);
+}
+
extern int irq_update_affinity_desc(unsigned int irq,
struct irq_affinity_desc *affinity);
@@ -361,6 +400,18 @@ static inline int irq_can_set_affinity(unsigned int irq)
static inline int irq_select_affinity(unsigned int irq) { return 0; }
+static inline int irq_update_affinity_hint(unsigned int irq,
+ const struct cpumask *m)
+{
+ return -EINVAL;
+}
+
+static inline int irq_set_affinity_and_hint(unsigned int irq,
+ const struct cpumask *m)
+{
+ return -EINVAL;
+}
+
static inline int irq_set_affinity_hint(unsigned int irq,
const struct cpumask *m)
{
diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h
index 0a9dc40b7be8..14f7eaf1b443 100644
--- a/include/linux/iocontext.h
+++ b/include/linux/iocontext.h
@@ -99,55 +99,40 @@ struct io_cq {
struct io_context {
atomic_long_t refcount;
atomic_t active_ref;
- atomic_t nr_tasks;
+ unsigned short ioprio;
+
+#ifdef CONFIG_BLK_ICQ
/* all the fields below are protected by this lock */
spinlock_t lock;
- unsigned short ioprio;
-
struct radix_tree_root icq_tree;
struct io_cq __rcu *icq_hint;
struct hlist_head icq_list;
struct work_struct release_work;
+#endif /* CONFIG_BLK_ICQ */
};
-/**
- * get_io_context_active - get active reference on ioc
- * @ioc: ioc of interest
- *
- * Only iocs with active reference can issue new IOs. This function
- * acquires an active reference on @ioc. The caller must already have an
- * active reference on @ioc.
- */
-static inline void get_io_context_active(struct io_context *ioc)
-{
- WARN_ON_ONCE(atomic_long_read(&ioc->refcount) <= 0);
- WARN_ON_ONCE(atomic_read(&ioc->active_ref) <= 0);
- atomic_long_inc(&ioc->refcount);
- atomic_inc(&ioc->active_ref);
-}
-
-static inline void ioc_task_link(struct io_context *ioc)
-{
- get_io_context_active(ioc);
-
- WARN_ON_ONCE(atomic_read(&ioc->nr_tasks) <= 0);
- atomic_inc(&ioc->nr_tasks);
-}
-
struct task_struct;
#ifdef CONFIG_BLOCK
void put_io_context(struct io_context *ioc);
-void put_io_context_active(struct io_context *ioc);
void exit_io_context(struct task_struct *task);
-struct io_context *get_task_io_context(struct task_struct *task,
- gfp_t gfp_flags, int node);
+int __copy_io(unsigned long clone_flags, struct task_struct *tsk);
+static inline int copy_io(unsigned long clone_flags, struct task_struct *tsk)
+{
+ if (!current->io_context)
+ return 0;
+ return __copy_io(clone_flags, tsk);
+}
#else
struct io_context;
static inline void put_io_context(struct io_context *ioc) { }
static inline void exit_io_context(struct task_struct *task) { }
-#endif
+static inline int copy_io(unsigned long clone_flags, struct task_struct *tsk)
+{
+ return 0;
+}
+#endif /* CONFIG_BLOCK */
-#endif
+#endif /* IOCONTEXT_H */
diff --git a/include/linux/iomap.h b/include/linux/iomap.h
index 6d1b08d0ae93..97a3a2edb585 100644
--- a/include/linux/iomap.h
+++ b/include/linux/iomap.h
@@ -141,6 +141,11 @@ struct iomap_page_ops {
#define IOMAP_NOWAIT (1 << 5) /* do not block */
#define IOMAP_OVERWRITE_ONLY (1 << 6) /* only pure overwrites allowed */
#define IOMAP_UNSHARE (1 << 7) /* unshare_file_range */
+#ifdef CONFIG_FS_DAX
+#define IOMAP_DAX (1 << 8) /* DAX mapping */
+#else
+#define IOMAP_DAX 0
+#endif /* CONFIG_FS_DAX */
struct iomap_ops {
/*
@@ -225,6 +230,7 @@ void iomap_readahead(struct readahead_control *, const struct iomap_ops *ops);
int iomap_is_partially_uptodate(struct page *page, unsigned long from,
unsigned long count);
int iomap_releasepage(struct page *page, gfp_t gfp_mask);
+void iomap_invalidate_folio(struct folio *folio, size_t offset, size_t len);
void iomap_invalidatepage(struct page *page, unsigned int offset,
unsigned int len);
#ifdef CONFIG_MIGRATION
@@ -257,9 +263,11 @@ struct iomap_ioend {
struct list_head io_list; /* next ioend in chain */
u16 io_type;
u16 io_flags; /* IOMAP_F_* */
+ u32 io_folios; /* folios added to ioend */
struct inode *io_inode; /* file being written to */
size_t io_size; /* size of the extent */
loff_t io_offset; /* offset in the file */
+ sector_t io_sector; /* start sector of ioend */
struct bio *io_bio; /* bio being built */
struct bio io_inline_bio; /* MUST BE LAST! */
};
@@ -284,7 +292,7 @@ struct iomap_writeback_ops {
* Optional, allows the file system to discard state on a page where
* we failed to submit any I/O.
*/
- void (*discard_page)(struct page *page, loff_t fileoff);
+ void (*discard_folio)(struct folio *folio, loff_t pos);
};
struct iomap_writepage_ctx {
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index d2f3435e7d17..de0c57a567c8 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -186,7 +186,7 @@ struct iommu_iotlb_gather {
unsigned long start;
unsigned long end;
size_t pgsize;
- struct page *freelist;
+ struct list_head freelist;
bool queued;
};
@@ -399,6 +399,7 @@ static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather)
{
*gather = (struct iommu_iotlb_gather) {
.start = ULONG_MAX,
+ .freelist = LIST_HEAD_INIT(gather->freelist),
};
}
diff --git a/include/linux/iova.h b/include/linux/iova.h
index 71d8a2de6635..cea79cb9f26c 100644
--- a/include/linux/iova.h
+++ b/include/linux/iova.h
@@ -12,7 +12,6 @@
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/rbtree.h>
-#include <linux/atomic.h>
#include <linux/dma-mapping.h>
/* iova structure */
@@ -35,35 +34,6 @@ struct iova_rcache {
struct iova_cpu_rcache __percpu *cpu_rcaches;
};
-struct iova_domain;
-
-/* Call-Back from IOVA code into IOMMU drivers */
-typedef void (* iova_flush_cb)(struct iova_domain *domain);
-
-/* Destructor for per-entry data */
-typedef void (* iova_entry_dtor)(unsigned long data);
-
-/* Number of entries per Flush Queue */
-#define IOVA_FQ_SIZE 256
-
-/* Timeout (in ms) after which entries are flushed from the Flush-Queue */
-#define IOVA_FQ_TIMEOUT 10
-
-/* Flush Queue entry for defered flushing */
-struct iova_fq_entry {
- unsigned long iova_pfn;
- unsigned long pages;
- unsigned long data;
- u64 counter; /* Flush counter when this entrie was added */
-};
-
-/* Per-CPU Flush Queue structure */
-struct iova_fq {
- struct iova_fq_entry entries[IOVA_FQ_SIZE];
- unsigned head, tail;
- spinlock_t lock;
-};
-
/* holds all the iova translations for a domain */
struct iova_domain {
spinlock_t iova_rbtree_lock; /* Lock to protect update of rbtree */
@@ -74,27 +44,9 @@ struct iova_domain {
unsigned long start_pfn; /* Lower limit for this domain */
unsigned long dma_32bit_pfn;
unsigned long max32_alloc_size; /* Size of last failed allocation */
- struct iova_fq __percpu *fq; /* Flush Queue */
-
- atomic64_t fq_flush_start_cnt; /* Number of TLB flushes that
- have been started */
-
- atomic64_t fq_flush_finish_cnt; /* Number of TLB flushes that
- have been finished */
-
struct iova anchor; /* rbtree lookup anchor */
- struct iova_rcache rcaches[IOVA_RANGE_CACHE_MAX_SIZE]; /* IOVA range caches */
-
- iova_flush_cb flush_cb; /* Call-Back function to flush IOMMU
- TLBs */
- iova_entry_dtor entry_dtor; /* IOMMU driver specific destructor for
- iova entry */
-
- struct timer_list fq_timer; /* Timer to regularily empty the
- flush-queues */
- atomic_t fq_timer_on; /* 1 when timer is active, 0
- when not */
+ struct iova_rcache rcaches[IOVA_RANGE_CACHE_MAX_SIZE]; /* IOVA range caches */
struct hlist_node cpuhp_dead;
};
@@ -144,17 +96,12 @@ struct iova *alloc_iova(struct iova_domain *iovad, unsigned long size,
bool size_aligned);
void free_iova_fast(struct iova_domain *iovad, unsigned long pfn,
unsigned long size);
-void queue_iova(struct iova_domain *iovad,
- unsigned long pfn, unsigned long pages,
- unsigned long data);
unsigned long alloc_iova_fast(struct iova_domain *iovad, unsigned long size,
unsigned long limit_pfn, bool flush_rcache);
struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo,
unsigned long pfn_hi);
void init_iova_domain(struct iova_domain *iovad, unsigned long granule,
unsigned long start_pfn);
-int init_iova_flush_queue(struct iova_domain *iovad,
- iova_flush_cb flush_cb, iova_entry_dtor entry_dtor);
struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn);
void put_iova_domain(struct iova_domain *iovad);
#else
@@ -189,12 +136,6 @@ static inline void free_iova_fast(struct iova_domain *iovad,
{
}
-static inline void queue_iova(struct iova_domain *iovad,
- unsigned long pfn, unsigned long pages,
- unsigned long data)
-{
-}
-
static inline unsigned long alloc_iova_fast(struct iova_domain *iovad,
unsigned long size,
unsigned long limit_pfn,
@@ -216,13 +157,6 @@ static inline void init_iova_domain(struct iova_domain *iovad,
{
}
-static inline int init_iova_flush_queue(struct iova_domain *iovad,
- iova_flush_cb flush_cb,
- iova_entry_dtor entry_dtor)
-{
- return -ENODEV;
-}
-
static inline struct iova *find_iova(struct iova_domain *iovad,
unsigned long pfn)
{
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index 81cbf85f73de..12d91f0dedf9 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -615,7 +615,7 @@ struct rdists {
void __iomem *rd_base;
struct page *pend_page;
phys_addr_t phys_base;
- bool lpi_enabled;
+ u64 flags;
cpumask_t *vpe_table_mask;
void *vpe_l1_base;
} __percpu *rdist;
@@ -624,6 +624,7 @@ struct rdists {
u64 flags;
u32 gicd_typer;
u32 gicd_typer2;
+ int cpuhp_memreserve_state;
bool has_vlpis;
bool has_rvpeid;
bool has_direct_lpi;
@@ -632,6 +633,7 @@ struct rdists {
struct irq_domain;
struct fwnode_handle;
+int __init its_lpi_memreserve_init(void);
int its_cpu_init(void);
int its_init(struct fwnode_handle *handle, struct rdists *rdists,
struct irq_domain *domain);
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index 553da4899f55..d476405802e9 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -131,7 +131,7 @@ struct irq_domain_ops {
#endif
};
-extern struct irq_domain_ops irq_generic_chip_ops;
+extern const struct irq_domain_ops irq_generic_chip_ops;
struct irq_domain_chip_generic;
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index fd933c45281a..9c3ada74ffb1 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -594,7 +594,7 @@ struct transaction_s
*/
unsigned long t_log_start;
- /*
+ /*
* Number of buffers on the t_buffers list [j_list_lock, no locks
* needed for jbd2 thread]
*/
@@ -1295,7 +1295,7 @@ struct journal_s
* Clean-up after fast commit or full commit. JBD2 calls this function
* after every commit operation.
*/
- void (*j_fc_cleanup_callback)(struct journal_s *journal, int);
+ void (*j_fc_cleanup_callback)(struct journal_s *journal, int full, tid_t tid);
/**
* @j_fc_replay_callback:
@@ -1419,9 +1419,7 @@ extern void jbd2_journal_unfile_buffer(journal_t *, struct journal_head *);
extern bool __jbd2_journal_refile_buffer(struct journal_head *);
extern void jbd2_journal_refile_buffer(journal_t *, struct journal_head *);
extern void __jbd2_journal_file_buffer(struct journal_head *, transaction_t *, int);
-extern void __journal_free_buffer(struct journal_head *bh);
extern void jbd2_journal_file_buffer(struct journal_head *, transaction_t *, int);
-extern void __journal_clean_data_list(transaction_t *transaction);
static inline void jbd2_file_log_bh(struct list_head *head, struct buffer_head *bh)
{
list_add_tail(&bh->b_assoc_buffers, head);
@@ -1486,9 +1484,6 @@ extern int jbd2_journal_write_metadata_buffer(transaction_t *transaction,
struct buffer_head **bh_out,
sector_t blocknr);
-/* Transaction locking */
-extern void __wait_on_journal (journal_t *);
-
/* Transaction cache support */
extern void jbd2_journal_destroy_transaction_cache(void);
extern int __init jbd2_journal_init_transaction_cache(void);
@@ -1543,6 +1538,8 @@ extern int jbd2_journal_flush(journal_t *journal, unsigned int flags);
extern void jbd2_journal_lock_updates (journal_t *);
extern void jbd2_journal_unlock_updates (journal_t *);
+void jbd2_journal_wait_updates(journal_t *);
+
extern journal_t * jbd2_journal_init_dev(struct block_device *bdev,
struct block_device *fs_dev,
unsigned long long start, int len, int bsize);
@@ -1774,8 +1771,6 @@ static inline unsigned long jbd2_log_space_left(journal_t *journal)
#define BJ_Reserved 4 /* Buffer is reserved for access by journal */
#define BJ_Types 5
-extern int jbd_blocks_per_page(struct inode *inode);
-
/* JBD uses a CRC32 checksum */
#define JBD_MAX_CHECKSUM_SIZE 4
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index fb78108d694e..4a45562d8893 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -475,12 +475,12 @@ static inline void kasan_populate_early_vm_area_shadow(void *start,
* allocations with real shadow memory. With KASAN vmalloc, the special
* case is unnecessary, as the work is handled in the generic case.
*/
-int kasan_module_alloc(void *addr, size_t size);
+int kasan_module_alloc(void *addr, size_t size, gfp_t gfp_mask);
void kasan_free_shadow(const struct vm_struct *vm);
#else /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
-static inline int kasan_module_alloc(void *addr, size_t size) { return 0; }
+static inline int kasan_module_alloc(void *addr, size_t size, gfp_t gfp_mask) { return 0; }
static inline void kasan_free_shadow(const struct vm_struct *vm) {}
#endif /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 77755ac3e189..33f47a996513 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -1,4 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * NOTE:
+ *
+ * This header has combined a lot of unrelated to each other stuff.
+ * The process of splitting its content is in progress while keeping
+ * backward compatibility. That's why it's highly recommended NOT to
+ * include this header inside another header file, especially under
+ * generic or architectural include/ directory.
+ */
#ifndef _LINUX_KERNEL_H
#define _LINUX_KERNEL_H
@@ -187,7 +196,6 @@ static inline void might_fault(void) { }
#endif
void do_exit(long error_code) __noreturn;
-void complete_and_exit(struct completion *, long) __noreturn;
extern int num_to_str(char *buf, int size,
unsigned long long num, unsigned int width);
diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h
index 3ccce6f24548..861c4f0f8a29 100644
--- a/include/linux/kernfs.h
+++ b/include/linux/kernfs.h
@@ -6,7 +6,6 @@
#ifndef __LINUX_KERNFS_H
#define __LINUX_KERNFS_H
-#include <linux/kernel.h>
#include <linux/err.h>
#include <linux/list.h>
#include <linux/mutex.h>
@@ -14,14 +13,18 @@
#include <linux/lockdep.h>
#include <linux/rbtree.h>
#include <linux/atomic.h>
+#include <linux/bug.h>
+#include <linux/types.h>
#include <linux/uidgid.h>
#include <linux/wait.h>
+#include <linux/rwsem.h>
struct file;
struct dentry;
struct iattr;
struct seq_file;
struct vm_area_struct;
+struct vm_operations_struct;
struct super_block;
struct file_system_type;
struct poll_table_struct;
@@ -197,6 +200,7 @@ struct kernfs_root {
struct list_head supers;
wait_queue_head_t deactivate_waitq;
+ struct rw_semaphore kernfs_rwsem;
};
struct kernfs_open_file {
diff --git a/include/linux/kfence.h b/include/linux/kfence.h
index 4b5e3679a72c..f49e64222628 100644
--- a/include/linux/kfence.h
+++ b/include/linux/kfence.h
@@ -17,6 +17,8 @@
#include <linux/atomic.h>
#include <linux/static_key.h>
+extern unsigned long kfence_sample_interval;
+
/*
* We allocate an even number of pages, as it simplifies calculations to map
* address to metadata indices; effectively, the very first page serves as an
diff --git a/include/linux/kobject.h b/include/linux/kobject.h
index efd56f990a46..c7b47399b36a 100644
--- a/include/linux/kobject.h
+++ b/include/linux/kobject.h
@@ -19,10 +19,10 @@
#include <linux/list.h>
#include <linux/sysfs.h>
#include <linux/compiler.h>
+#include <linux/container_of.h>
#include <linux/spinlock.h>
#include <linux/kref.h>
#include <linux/kobject_ns.h>
-#include <linux/kernel.h>
#include <linux/wait.h>
#include <linux/atomic.h>
#include <linux/workqueue.h>
@@ -66,7 +66,7 @@ struct kobject {
struct list_head entry;
struct kobject *parent;
struct kset *kset;
- struct kobj_type *ktype;
+ const struct kobj_type *ktype;
struct kernfs_node *sd; /* sysfs directory entry */
struct kref kref;
#ifdef CONFIG_DEBUG_KOBJECT_RELEASE
@@ -90,13 +90,13 @@ static inline const char *kobject_name(const struct kobject *kobj)
return kobj->name;
}
-extern void kobject_init(struct kobject *kobj, struct kobj_type *ktype);
+extern void kobject_init(struct kobject *kobj, const struct kobj_type *ktype);
extern __printf(3, 4) __must_check
int kobject_add(struct kobject *kobj, struct kobject *parent,
const char *fmt, ...);
extern __printf(4, 5) __must_check
int kobject_init_and_add(struct kobject *kobj,
- struct kobj_type *ktype, struct kobject *parent,
+ const struct kobj_type *ktype, struct kobject *parent,
const char *fmt, ...);
extern void kobject_del(struct kobject *kobj);
@@ -117,23 +117,6 @@ extern void kobject_get_ownership(struct kobject *kobj,
kuid_t *uid, kgid_t *gid);
extern char *kobject_get_path(struct kobject *kobj, gfp_t flag);
-/**
- * kobject_has_children - Returns whether a kobject has children.
- * @kobj: the object to test
- *
- * This will return whether a kobject has other kobjects as children.
- *
- * It does NOT account for the presence of attribute files, only sub
- * directories. It also assumes there is no concurrent addition or
- * removal of such children, and thus relies on external locking.
- */
-static inline bool kobject_has_children(struct kobject *kobj)
-{
- WARN_ON_ONCE(kref_read(&kobj->kref) == 0);
-
- return kobj->sd && kobj->sd->dir.subdirs;
-}
-
struct kobj_type {
void (*release)(struct kobject *kobj);
const struct sysfs_ops *sysfs_ops;
@@ -153,10 +136,9 @@ struct kobj_uevent_env {
};
struct kset_uevent_ops {
- int (* const filter)(struct kset *kset, struct kobject *kobj);
- const char *(* const name)(struct kset *kset, struct kobject *kobj);
- int (* const uevent)(struct kset *kset, struct kobject *kobj,
- struct kobj_uevent_env *env);
+ int (* const filter)(struct kobject *kobj);
+ const char *(* const name)(struct kobject *kobj);
+ int (* const uevent)(struct kobject *kobj, struct kobj_uevent_env *env);
};
struct kobj_attribute {
@@ -217,7 +199,7 @@ static inline void kset_put(struct kset *k)
kobject_put(&k->kobj);
}
-static inline struct kobj_type *get_ktype(struct kobject *kobj)
+static inline const struct kobj_type *get_ktype(struct kobject *kobj)
{
return kobj->ktype;
}
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index 8c8f7a4d93af..19b884353b15 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -348,12 +348,6 @@ extern void opt_pre_handler(struct kprobe *p, struct pt_regs *regs);
DEFINE_INSN_CACHE_OPS(optinsn);
-#ifdef CONFIG_SYSCTL
-extern int sysctl_kprobes_optimization;
-extern int proc_kprobes_optimization_handler(struct ctl_table *table,
- int write, void *buffer,
- size_t *length, loff_t *ppos);
-#endif /* CONFIG_SYSCTL */
extern void wait_for_kprobe_optimizer(void);
#else /* !CONFIG_OPTPROBES */
static inline void wait_for_kprobe_optimizer(void) { }
diff --git a/include/linux/kthread.h b/include/linux/kthread.h
index 346b0f269161..3df4ea04716f 100644
--- a/include/linux/kthread.h
+++ b/include/linux/kthread.h
@@ -33,7 +33,8 @@ struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
unsigned int cpu,
const char *namefmt);
-void set_kthread_struct(struct task_struct *p);
+void get_kthread_comm(char *buf, size_t buf_size, struct task_struct *tsk);
+bool set_kthread_struct(struct task_struct *p);
void kthread_set_per_cpu(struct task_struct *k, int cpu);
bool kthread_is_per_cpu(struct task_struct *k);
@@ -56,6 +57,31 @@ bool kthread_is_per_cpu(struct task_struct *k);
__k; \
})
+/**
+ * kthread_run_on_cpu - create and wake a cpu bound thread.
+ * @threadfn: the function to run until signal_pending(current).
+ * @data: data ptr for @threadfn.
+ * @cpu: The cpu on which the thread should be bound,
+ * @namefmt: printf-style name for the thread. Format is restricted
+ * to "name.*%u". Code fills in cpu number.
+ *
+ * Description: Convenient wrapper for kthread_create_on_cpu()
+ * followed by wake_up_process(). Returns the kthread or
+ * ERR_PTR(-ENOMEM).
+ */
+static inline struct task_struct *
+kthread_run_on_cpu(int (*threadfn)(void *data), void *data,
+ unsigned int cpu, const char *namefmt)
+{
+ struct task_struct *p;
+
+ p = kthread_create_on_cpu(threadfn, data, cpu, namefmt);
+ if (!IS_ERR(p))
+ wake_up_process(p);
+
+ return p;
+}
+
void free_kthread_struct(struct task_struct *k);
void kthread_bind(struct task_struct *k, unsigned int cpu);
void kthread_bind_mask(struct task_struct *k, const struct cpumask *mask);
@@ -70,6 +96,8 @@ void *kthread_probe_data(struct task_struct *k);
int kthread_park(struct task_struct *k);
void kthread_unpark(struct task_struct *k);
void kthread_parkme(void);
+void kthread_exit(long result) __noreturn;
+void kthread_complete_and_exit(struct completion *, long) __noreturn;
int kthreadd(void *unused);
extern struct task_struct *kthreadd_task;
diff --git a/include/linux/kvm_dirty_ring.h b/include/linux/kvm_dirty_ring.h
index 120e5e90fa1d..906f899813dc 100644
--- a/include/linux/kvm_dirty_ring.h
+++ b/include/linux/kvm_dirty_ring.h
@@ -27,9 +27,9 @@ struct kvm_dirty_ring {
int index;
};
-#if (KVM_DIRTY_LOG_PAGE_OFFSET == 0)
+#ifndef CONFIG_HAVE_KVM_DIRTY_RING
/*
- * If KVM_DIRTY_LOG_PAGE_OFFSET not defined, kvm_dirty_ring.o should
+ * If CONFIG_HAVE_HVM_DIRTY_RING not defined, kvm_dirty_ring.o should
* not be included as well, so define these nop functions for the arch.
*/
static inline u32 kvm_dirty_ring_get_rsvd_entries(void)
@@ -43,11 +43,6 @@ static inline int kvm_dirty_ring_alloc(struct kvm_dirty_ring *ring,
return 0;
}
-static inline struct kvm_dirty_ring *kvm_dirty_ring_get(struct kvm *kvm)
-{
- return NULL;
-}
-
static inline int kvm_dirty_ring_reset(struct kvm *kvm,
struct kvm_dirty_ring *ring)
{
@@ -74,11 +69,10 @@ static inline bool kvm_dirty_ring_soft_full(struct kvm_dirty_ring *ring)
return true;
}
-#else /* KVM_DIRTY_LOG_PAGE_OFFSET == 0 */
+#else /* CONFIG_HAVE_KVM_DIRTY_RING */
u32 kvm_dirty_ring_get_rsvd_entries(void);
int kvm_dirty_ring_alloc(struct kvm_dirty_ring *ring, int index, u32 size);
-struct kvm_dirty_ring *kvm_dirty_ring_get(struct kvm *kvm);
/*
* called with kvm->slots_lock held, returns the number of
@@ -98,6 +92,6 @@ struct page *kvm_dirty_ring_get_page(struct kvm_dirty_ring *ring, u32 offset);
void kvm_dirty_ring_free(struct kvm_dirty_ring *ring);
bool kvm_dirty_ring_soft_full(struct kvm_dirty_ring *ring);
-#endif /* KVM_DIRTY_LOG_PAGE_OFFSET == 0 */
+#endif /* CONFIG_HAVE_KVM_DIRTY_RING */
#endif /* KVM_DIRTY_RING_H */
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index c310648cc8f1..f11039944c08 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -29,6 +29,12 @@
#include <linux/refcount.h>
#include <linux/nospec.h>
#include <linux/notifier.h>
+#include <linux/ftrace.h>
+#include <linux/hashtable.h>
+#include <linux/instrumentation.h>
+#include <linux/interval_tree.h>
+#include <linux/rbtree.h>
+#include <linux/xarray.h>
#include <asm/signal.h>
#include <linux/kvm.h>
@@ -151,6 +157,7 @@ static inline bool is_error_page(struct page *page)
#define KVM_REQ_UNBLOCK 2
#define KVM_REQ_UNHALT 3
#define KVM_REQ_VM_DEAD (4 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
+#define KVM_REQ_GPC_INVALIDATE (5 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
#define KVM_REQUEST_ARCH_BASE 8
#define KVM_ARCH_REQ_FLAGS(nr, flags) ({ \
@@ -304,13 +311,12 @@ struct kvm_vcpu {
u64 requests;
unsigned long guest_debug;
- int pre_pcpu;
- struct list_head blocked_vcpu_list;
-
struct mutex mutex;
struct kvm_run *run;
+#ifndef __KVM_HAVE_ARCH_WQP
struct rcuwait wait;
+#endif
struct pid __rcu *pid;
int sigset_active;
sigset_t sigset;
@@ -355,15 +361,20 @@ struct kvm_vcpu {
struct kvm_dirty_ring dirty_ring;
/*
- * The index of the most recently used memslot by this vCPU. It's ok
- * if this becomes stale due to memslot changes since we always check
- * it is a valid slot.
+ * The most recently used memslot by this vCPU and the slots generation
+ * for which it is valid.
+ * No wraparound protection is needed since generations won't overflow in
+ * thousands of years, even assuming 1M memslot operations per second.
*/
- int last_used_slot;
+ struct kvm_memory_slot *last_used_slot;
+ u64 last_used_slot_gen;
};
-/* must be called with irqs disabled */
-static __always_inline void guest_enter_irqoff(void)
+/*
+ * Start accounting time towards a guest.
+ * Must be called before entering guest context.
+ */
+static __always_inline void guest_timing_enter_irqoff(void)
{
/*
* This is running in ioctl context so its safe to assume that it's the
@@ -372,7 +383,18 @@ static __always_inline void guest_enter_irqoff(void)
instrumentation_begin();
vtime_account_guest_enter();
instrumentation_end();
+}
+/*
+ * Enter guest context and enter an RCU extended quiescent state.
+ *
+ * Between guest_context_enter_irqoff() and guest_context_exit_irqoff() it is
+ * unsafe to use any code which may directly or indirectly use RCU, tracing
+ * (including IRQ flag tracing), or lockdep. All code in this period must be
+ * non-instrumentable.
+ */
+static __always_inline void guest_context_enter_irqoff(void)
+{
/*
* KVM does not hold any references to rcu protected data when it
* switches CPU into a guest mode. In fact switching to a guest mode
@@ -388,16 +410,79 @@ static __always_inline void guest_enter_irqoff(void)
}
}
-static __always_inline void guest_exit_irqoff(void)
+/*
+ * Deprecated. Architectures should move to guest_timing_enter_irqoff() and
+ * guest_state_enter_irqoff().
+ */
+static __always_inline void guest_enter_irqoff(void)
+{
+ guest_timing_enter_irqoff();
+ guest_context_enter_irqoff();
+}
+
+/**
+ * guest_state_enter_irqoff - Fixup state when entering a guest
+ *
+ * Entry to a guest will enable interrupts, but the kernel state is interrupts
+ * disabled when this is invoked. Also tell RCU about it.
+ *
+ * 1) Trace interrupts on state
+ * 2) Invoke context tracking if enabled to adjust RCU state
+ * 3) Tell lockdep that interrupts are enabled
+ *
+ * Invoked from architecture specific code before entering a guest.
+ * Must be called with interrupts disabled and the caller must be
+ * non-instrumentable.
+ * The caller has to invoke guest_timing_enter_irqoff() before this.
+ *
+ * Note: this is analogous to exit_to_user_mode().
+ */
+static __always_inline void guest_state_enter_irqoff(void)
+{
+ instrumentation_begin();
+ trace_hardirqs_on_prepare();
+ lockdep_hardirqs_on_prepare(CALLER_ADDR0);
+ instrumentation_end();
+
+ guest_context_enter_irqoff();
+ lockdep_hardirqs_on(CALLER_ADDR0);
+}
+
+/*
+ * Exit guest context and exit an RCU extended quiescent state.
+ *
+ * Between guest_context_enter_irqoff() and guest_context_exit_irqoff() it is
+ * unsafe to use any code which may directly or indirectly use RCU, tracing
+ * (including IRQ flag tracing), or lockdep. All code in this period must be
+ * non-instrumentable.
+ */
+static __always_inline void guest_context_exit_irqoff(void)
{
context_tracking_guest_exit();
+}
+/*
+ * Stop accounting time towards a guest.
+ * Must be called after exiting guest context.
+ */
+static __always_inline void guest_timing_exit_irqoff(void)
+{
instrumentation_begin();
/* Flush the guest cputime we spent on the guest */
vtime_account_guest_exit();
instrumentation_end();
}
+/*
+ * Deprecated. Architectures should move to guest_state_exit_irqoff() and
+ * guest_timing_exit_irqoff().
+ */
+static __always_inline void guest_exit_irqoff(void)
+{
+ guest_context_exit_irqoff();
+ guest_timing_exit_irqoff();
+}
+
static inline void guest_exit(void)
{
unsigned long flags;
@@ -407,6 +492,33 @@ static inline void guest_exit(void)
local_irq_restore(flags);
}
+/**
+ * guest_state_exit_irqoff - Establish state when returning from guest mode
+ *
+ * Entry from a guest disables interrupts, but guest mode is traced as
+ * interrupts enabled. Also with NO_HZ_FULL RCU might be idle.
+ *
+ * 1) Tell lockdep that interrupts are disabled
+ * 2) Invoke context tracking if enabled to reactivate RCU
+ * 3) Trace interrupts off state
+ *
+ * Invoked from architecture specific code after exiting a guest.
+ * Must be invoked with interrupts disabled and the caller must be
+ * non-instrumentable.
+ * The caller has to invoke guest_timing_exit_irqoff() after this.
+ *
+ * Note: this is analogous to enter_from_user_mode().
+ */
+static __always_inline void guest_state_exit_irqoff(void)
+{
+ lockdep_hardirqs_off(CALLER_ADDR0);
+ guest_context_exit_irqoff();
+
+ instrumentation_begin();
+ trace_hardirqs_off_finish();
+ instrumentation_end();
+}
+
static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu)
{
/*
@@ -424,7 +536,26 @@ static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu)
*/
#define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1)
+/*
+ * Since at idle each memslot belongs to two memslot sets it has to contain
+ * two embedded nodes for each data structure that it forms a part of.
+ *
+ * Two memslot sets (one active and one inactive) are necessary so the VM
+ * continues to run on one memslot set while the other is being modified.
+ *
+ * These two memslot sets normally point to the same set of memslots.
+ * They can, however, be desynchronized when performing a memslot management
+ * operation by replacing the memslot to be modified by its copy.
+ * After the operation is complete, both memslot sets once again point to
+ * the same, common set of memslot data.
+ *
+ * The memslots themselves are independent of each other so they can be
+ * individually added or deleted.
+ */
struct kvm_memory_slot {
+ struct hlist_node id_node[2];
+ struct interval_tree_node hva_node[2];
+ struct rb_node gfn_node[2];
gfn_t base_gfn;
unsigned long npages;
unsigned long *dirty_bitmap;
@@ -435,7 +566,7 @@ struct kvm_memory_slot {
u16 as_id;
};
-static inline bool kvm_slot_dirty_track_enabled(struct kvm_memory_slot *slot)
+static inline bool kvm_slot_dirty_track_enabled(const struct kvm_memory_slot *slot)
{
return slot->flags & KVM_MEM_LOG_DIRTY_PAGES;
}
@@ -469,6 +600,12 @@ struct kvm_hv_sint {
u32 sint;
};
+struct kvm_xen_evtchn {
+ u32 port;
+ u32 vcpu;
+ u32 priority;
+};
+
struct kvm_kernel_irq_routing_entry {
u32 gsi;
u32 type;
@@ -489,6 +626,7 @@ struct kvm_kernel_irq_routing_entry {
} msi;
struct kvm_s390_adapter_int adapter;
struct kvm_hv_sint hv_sint;
+ struct kvm_xen_evtchn xen_evtchn;
};
struct hlist_node link;
};
@@ -519,18 +657,21 @@ static inline int kvm_arch_vcpu_memslots_id(struct kvm_vcpu *vcpu)
}
#endif
-/*
- * Note:
- * memslots are not sorted by id anymore, please use id_to_memslot()
- * to get the memslot by its id.
- */
struct kvm_memslots {
u64 generation;
- /* The mapping table from slot id to the index in memslots[]. */
- short id_to_index[KVM_MEM_SLOTS_NUM];
- atomic_t last_used_slot;
- int used_slots;
- struct kvm_memory_slot memslots[];
+ atomic_long_t last_used_slot;
+ struct rb_root_cached hva_tree;
+ struct rb_root gfn_tree;
+ /*
+ * The mapping table from slot id to memslot.
+ *
+ * 7-bit bucket count matches the size of the old id to index array for
+ * 512 slots, while giving good performance with this slot count.
+ * Higher bucket counts bring only small performance improvements but
+ * always result in higher memory usage (even for lower memslot counts).
+ */
+ DECLARE_HASHTABLE(id_hash, 7);
+ int node_idx;
};
struct kvm {
@@ -551,14 +692,22 @@ struct kvm {
*/
struct mutex slots_arch_lock;
struct mm_struct *mm; /* userspace tied to this vm */
+ unsigned long nr_memslot_pages;
+ /* The two memslot sets - active and inactive (per address space) */
+ struct kvm_memslots __memslots[KVM_ADDRESS_SPACE_NUM][2];
+ /* The current active memslot set for each address space */
struct kvm_memslots __rcu *memslots[KVM_ADDRESS_SPACE_NUM];
- struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
+ struct xarray vcpu_array;
/* Used to wait for completion of MMU notifiers. */
spinlock_t mn_invalidate_lock;
unsigned long mn_active_invalidate_count;
struct rcuwait mn_memslots_update_rcuwait;
+ /* For management / invalidation of gfn_to_pfn_caches */
+ spinlock_t gpc_lock;
+ struct list_head gpc_list;
+
/*
* created_vcpus is protected by kvm->lock, and is incremented
* at the beginning of KVM_CREATE_VCPU. online_vcpus is only
@@ -701,19 +850,17 @@ static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
/* Pairs with smp_wmb() in kvm_vm_ioctl_create_vcpu. */
smp_rmb();
- return kvm->vcpus[i];
+ return xa_load(&kvm->vcpu_array, i);
}
-#define kvm_for_each_vcpu(idx, vcpup, kvm) \
- for (idx = 0; \
- idx < atomic_read(&kvm->online_vcpus) && \
- (vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \
- idx++)
+#define kvm_for_each_vcpu(idx, vcpup, kvm) \
+ xa_for_each_range(&kvm->vcpu_array, idx, vcpup, 0, \
+ (atomic_read(&kvm->online_vcpus) - 1))
static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id)
{
struct kvm_vcpu *vcpu = NULL;
- int i;
+ unsigned long i;
if (id < 0)
return NULL;
@@ -727,13 +874,12 @@ static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id)
return NULL;
}
-#define kvm_for_each_memslot(memslot, slots) \
- for (memslot = &slots->memslots[0]; \
- memslot < slots->memslots + slots->used_slots; memslot++) \
- if (WARN_ON_ONCE(!memslot->npages)) { \
- } else
+static inline int kvm_vcpu_get_idx(struct kvm_vcpu *vcpu)
+{
+ return vcpu->vcpu_idx;
+}
-void kvm_vcpu_destroy(struct kvm_vcpu *vcpu);
+void kvm_destroy_vcpus(struct kvm *kvm);
void vcpu_load(struct kvm_vcpu *vcpu);
void vcpu_put(struct kvm_vcpu *vcpu);
@@ -793,21 +939,124 @@ static inline struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu)
return __kvm_memslots(vcpu->kvm, as_id);
}
+static inline bool kvm_memslots_empty(struct kvm_memslots *slots)
+{
+ return RB_EMPTY_ROOT(&slots->gfn_tree);
+}
+
+#define kvm_for_each_memslot(memslot, bkt, slots) \
+ hash_for_each(slots->id_hash, bkt, memslot, id_node[slots->node_idx]) \
+ if (WARN_ON_ONCE(!memslot->npages)) { \
+ } else
+
static inline
struct kvm_memory_slot *id_to_memslot(struct kvm_memslots *slots, int id)
{
- int index = slots->id_to_index[id];
struct kvm_memory_slot *slot;
+ int idx = slots->node_idx;
- if (index < 0)
- return NULL;
+ hash_for_each_possible(slots->id_hash, slot, id_node[idx], id) {
+ if (slot->id == id)
+ return slot;
+ }
+
+ return NULL;
+}
+
+/* Iterator used for walking memslots that overlap a gfn range. */
+struct kvm_memslot_iter {
+ struct kvm_memslots *slots;
+ struct rb_node *node;
+ struct kvm_memory_slot *slot;
+};
+
+static inline void kvm_memslot_iter_next(struct kvm_memslot_iter *iter)
+{
+ iter->node = rb_next(iter->node);
+ if (!iter->node)
+ return;
+
+ iter->slot = container_of(iter->node, struct kvm_memory_slot, gfn_node[iter->slots->node_idx]);
+}
+
+static inline void kvm_memslot_iter_start(struct kvm_memslot_iter *iter,
+ struct kvm_memslots *slots,
+ gfn_t start)
+{
+ int idx = slots->node_idx;
+ struct rb_node *tmp;
+ struct kvm_memory_slot *slot;
+
+ iter->slots = slots;
+
+ /*
+ * Find the so called "upper bound" of a key - the first node that has
+ * its key strictly greater than the searched one (the start gfn in our case).
+ */
+ iter->node = NULL;
+ for (tmp = slots->gfn_tree.rb_node; tmp; ) {
+ slot = container_of(tmp, struct kvm_memory_slot, gfn_node[idx]);
+ if (start < slot->base_gfn) {
+ iter->node = tmp;
+ tmp = tmp->rb_left;
+ } else {
+ tmp = tmp->rb_right;
+ }
+ }
- slot = &slots->memslots[index];
+ /*
+ * Find the slot with the lowest gfn that can possibly intersect with
+ * the range, so we'll ideally have slot start <= range start
+ */
+ if (iter->node) {
+ /*
+ * A NULL previous node means that the very first slot
+ * already has a higher start gfn.
+ * In this case slot start > range start.
+ */
+ tmp = rb_prev(iter->node);
+ if (tmp)
+ iter->node = tmp;
+ } else {
+ /* a NULL node below means no slots */
+ iter->node = rb_last(&slots->gfn_tree);
+ }
- WARN_ON(slot->id != id);
- return slot;
+ if (iter->node) {
+ iter->slot = container_of(iter->node, struct kvm_memory_slot, gfn_node[idx]);
+
+ /*
+ * It is possible in the slot start < range start case that the
+ * found slot ends before or at range start (slot end <= range start)
+ * and so it does not overlap the requested range.
+ *
+ * In such non-overlapping case the next slot (if it exists) will
+ * already have slot start > range start, otherwise the logic above
+ * would have found it instead of the current slot.
+ */
+ if (iter->slot->base_gfn + iter->slot->npages <= start)
+ kvm_memslot_iter_next(iter);
+ }
+}
+
+static inline bool kvm_memslot_iter_is_valid(struct kvm_memslot_iter *iter, gfn_t end)
+{
+ if (!iter->node)
+ return false;
+
+ /*
+ * If this slot starts beyond or at the end of the range so does
+ * every next one
+ */
+ return iter->slot->base_gfn < end;
}
+/* Iterate over each memslot at least partially intersecting [start, end) range */
+#define kvm_for_each_memslot_in_gfn_range(iter, slots, start, end) \
+ for (kvm_memslot_iter_start(iter, slots, start); \
+ kvm_memslot_iter_is_valid(iter, end); \
+ kvm_memslot_iter_next(iter))
+
/*
* KVM_SET_USER_MEMORY_REGION ioctl allows the following operations:
* - create a new memory slot
@@ -833,11 +1082,10 @@ int __kvm_set_memory_region(struct kvm *kvm,
void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot);
void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen);
int kvm_arch_prepare_memory_region(struct kvm *kvm,
- struct kvm_memory_slot *memslot,
- const struct kvm_userspace_memory_region *mem,
+ const struct kvm_memory_slot *old,
+ struct kvm_memory_slot *new,
enum kvm_mr_change change);
void kvm_arch_commit_memory_region(struct kvm *kvm,
- const struct kvm_userspace_memory_region *mem,
struct kvm_memory_slot *old,
const struct kvm_memory_slot *new,
enum kvm_mr_change change);
@@ -863,9 +1111,9 @@ void kvm_set_page_accessed(struct page *page);
kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
bool *writable);
-kvm_pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
-kvm_pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn);
-kvm_pfn_t __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn,
+kvm_pfn_t gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn);
+kvm_pfn_t gfn_to_pfn_memslot_atomic(const struct kvm_memory_slot *slot, gfn_t gfn);
+kvm_pfn_t __gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn,
bool atomic, bool *async, bool write_fault,
bool *writable, hva_t *hva);
@@ -942,7 +1190,7 @@ struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);
unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn);
-void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot, gfn_t gfn);
+void mark_page_dirty_in_slot(struct kvm *kvm, const struct kvm_memory_slot *memslot, gfn_t gfn);
void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu);
@@ -966,10 +1214,109 @@ int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,
unsigned long len);
void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn);
+/**
+ * kvm_gfn_to_pfn_cache_init - prepare a cached kernel mapping and HPA for a
+ * given guest physical address.
+ *
+ * @kvm: pointer to kvm instance.
+ * @gpc: struct gfn_to_pfn_cache object.
+ * @vcpu: vCPU to be used for marking pages dirty and to be woken on
+ * invalidation.
+ * @guest_uses_pa: indicates that the resulting host physical PFN is used while
+ * @vcpu is IN_GUEST_MODE so invalidations should wake it.
+ * @kernel_map: requests a kernel virtual mapping (kmap / memremap).
+ * @gpa: guest physical address to map.
+ * @len: sanity check; the range being access must fit a single page.
+ * @dirty: mark the cache dirty immediately.
+ *
+ * @return: 0 for success.
+ * -EINVAL for a mapping which would cross a page boundary.
+ * -EFAULT for an untranslatable guest physical address.
+ *
+ * This primes a gfn_to_pfn_cache and links it into the @kvm's list for
+ * invalidations to be processed. Invalidation callbacks to @vcpu using
+ * %KVM_REQ_GPC_INVALIDATE will occur only for MMU notifiers, not for KVM
+ * memslot changes. Callers are required to use kvm_gfn_to_pfn_cache_check()
+ * to ensure that the cache is valid before accessing the target page.
+ */
+int kvm_gfn_to_pfn_cache_init(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
+ struct kvm_vcpu *vcpu, bool guest_uses_pa,
+ bool kernel_map, gpa_t gpa, unsigned long len,
+ bool dirty);
+
+/**
+ * kvm_gfn_to_pfn_cache_check - check validity of a gfn_to_pfn_cache.
+ *
+ * @kvm: pointer to kvm instance.
+ * @gpc: struct gfn_to_pfn_cache object.
+ * @gpa: current guest physical address to map.
+ * @len: sanity check; the range being access must fit a single page.
+ * @dirty: mark the cache dirty immediately.
+ *
+ * @return: %true if the cache is still valid and the address matches.
+ * %false if the cache is not valid.
+ *
+ * Callers outside IN_GUEST_MODE context should hold a read lock on @gpc->lock
+ * while calling this function, and then continue to hold the lock until the
+ * access is complete.
+ *
+ * Callers in IN_GUEST_MODE may do so without locking, although they should
+ * still hold a read lock on kvm->scru for the memslot checks.
+ */
+bool kvm_gfn_to_pfn_cache_check(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
+ gpa_t gpa, unsigned long len);
+
+/**
+ * kvm_gfn_to_pfn_cache_refresh - update a previously initialized cache.
+ *
+ * @kvm: pointer to kvm instance.
+ * @gpc: struct gfn_to_pfn_cache object.
+ * @gpa: updated guest physical address to map.
+ * @len: sanity check; the range being access must fit a single page.
+ * @dirty: mark the cache dirty immediately.
+ *
+ * @return: 0 for success.
+ * -EINVAL for a mapping which would cross a page boundary.
+ * -EFAULT for an untranslatable guest physical address.
+ *
+ * This will attempt to refresh a gfn_to_pfn_cache. Note that a successful
+ * returm from this function does not mean the page can be immediately
+ * accessed because it may have raced with an invalidation. Callers must
+ * still lock and check the cache status, as this function does not return
+ * with the lock still held to permit access.
+ */
+int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
+ gpa_t gpa, unsigned long len, bool dirty);
+
+/**
+ * kvm_gfn_to_pfn_cache_unmap - temporarily unmap a gfn_to_pfn_cache.
+ *
+ * @kvm: pointer to kvm instance.
+ * @gpc: struct gfn_to_pfn_cache object.
+ *
+ * This unmaps the referenced page and marks it dirty, if appropriate. The
+ * cache is left in the invalid state but at least the mapping from GPA to
+ * userspace HVA will remain cached and can be reused on a subsequent
+ * refresh.
+ */
+void kvm_gfn_to_pfn_cache_unmap(struct kvm *kvm, struct gfn_to_pfn_cache *gpc);
+
+/**
+ * kvm_gfn_to_pfn_cache_destroy - destroy and unlink a gfn_to_pfn_cache.
+ *
+ * @kvm: pointer to kvm instance.
+ * @gpc: struct gfn_to_pfn_cache object.
+ *
+ * This removes a cache from the @kvm's list to be processed on MMU notifier
+ * invocation.
+ */
+void kvm_gfn_to_pfn_cache_destroy(struct kvm *kvm, struct gfn_to_pfn_cache *gpc);
+
void kvm_sigset_activate(struct kvm_vcpu *vcpu);
void kvm_sigset_deactivate(struct kvm_vcpu *vcpu);
-void kvm_vcpu_block(struct kvm_vcpu *vcpu);
+void kvm_vcpu_halt(struct kvm_vcpu *vcpu);
+bool kvm_vcpu_block(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu);
bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu);
@@ -1152,6 +1499,20 @@ static inline struct rcuwait *kvm_arch_vcpu_get_wait(struct kvm_vcpu *vcpu)
#endif
}
+/*
+ * Wake a vCPU if necessary, but don't do any stats/metadata updates. Returns
+ * true if the vCPU was blocking and was awakened, false otherwise.
+ */
+static inline bool __kvm_vcpu_wake_up(struct kvm_vcpu *vcpu)
+{
+ return !!rcuwait_wake_up(kvm_arch_vcpu_get_wait(vcpu));
+}
+
+static inline bool kvm_vcpu_is_blocking(struct kvm_vcpu *vcpu)
+{
+ return rcuwait_active(kvm_arch_vcpu_get_wait(vcpu));
+}
+
#ifdef __KVM_HAVE_ARCH_INTC_INITIALIZED
/*
* returns true if the virtual interrupt controller is initialized and
@@ -1166,6 +1527,16 @@ static inline bool kvm_arch_intc_initialized(struct kvm *kvm)
}
#endif
+#ifdef CONFIG_GUEST_PERF_EVENTS
+unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu);
+
+void kvm_register_perf_callbacks(unsigned int (*pt_intr_handler)(void));
+void kvm_unregister_perf_callbacks(void);
+#else
+static inline void kvm_register_perf_callbacks(void *ign) {}
+static inline void kvm_unregister_perf_callbacks(void) {}
+#endif /* CONFIG_GUEST_PERF_EVENTS */
+
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type);
void kvm_arch_destroy_vm(struct kvm *kvm);
void kvm_arch_sync_events(struct kvm *kvm);
@@ -1174,7 +1545,6 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
bool kvm_is_reserved_pfn(kvm_pfn_t pfn);
bool kvm_is_zone_device_pfn(kvm_pfn_t pfn);
-bool kvm_is_transparent_hugepage(kvm_pfn_t pfn);
struct kvm_irq_ack_notifier {
struct hlist_node link;
@@ -1205,25 +1575,15 @@ void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
bool kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args);
/*
- * Returns a pointer to the memslot at slot_index if it contains gfn.
+ * Returns a pointer to the memslot if it contains gfn.
* Otherwise returns NULL.
*/
static inline struct kvm_memory_slot *
-try_get_memslot(struct kvm_memslots *slots, int slot_index, gfn_t gfn)
+try_get_memslot(struct kvm_memory_slot *slot, gfn_t gfn)
{
- struct kvm_memory_slot *slot;
-
- if (slot_index < 0 || slot_index >= slots->used_slots)
+ if (!slot)
return NULL;
- /*
- * slot_index can come from vcpu->last_used_slot which is not kept
- * in sync with userspace-controllable memslot deletion. So use nospec
- * to prevent the CPU from speculating past the end of memslots[].
- */
- slot_index = array_index_nospec(slot_index, slots->used_slots);
- slot = &slots->memslots[slot_index];
-
if (gfn >= slot->base_gfn && gfn < slot->base_gfn + slot->npages)
return slot;
else
@@ -1231,63 +1591,63 @@ try_get_memslot(struct kvm_memslots *slots, int slot_index, gfn_t gfn)
}
/*
- * Returns a pointer to the memslot that contains gfn and records the index of
- * the slot in index. Otherwise returns NULL.
+ * Returns a pointer to the memslot that contains gfn. Otherwise returns NULL.
*
- * IMPORTANT: Slots are sorted from highest GFN to lowest GFN!
+ * With "approx" set returns the memslot also when the address falls
+ * in a hole. In that case one of the memslots bordering the hole is
+ * returned.
*/
static inline struct kvm_memory_slot *
-search_memslots(struct kvm_memslots *slots, gfn_t gfn, int *index)
+search_memslots(struct kvm_memslots *slots, gfn_t gfn, bool approx)
{
- int start = 0, end = slots->used_slots;
- struct kvm_memory_slot *memslots = slots->memslots;
struct kvm_memory_slot *slot;
-
- if (unlikely(!slots->used_slots))
- return NULL;
-
- while (start < end) {
- int slot = start + (end - start) / 2;
-
- if (gfn >= memslots[slot].base_gfn)
- end = slot;
- else
- start = slot + 1;
- }
-
- slot = try_get_memslot(slots, start, gfn);
- if (slot) {
- *index = start;
- return slot;
+ struct rb_node *node;
+ int idx = slots->node_idx;
+
+ slot = NULL;
+ for (node = slots->gfn_tree.rb_node; node; ) {
+ slot = container_of(node, struct kvm_memory_slot, gfn_node[idx]);
+ if (gfn >= slot->base_gfn) {
+ if (gfn < slot->base_gfn + slot->npages)
+ return slot;
+ node = node->rb_right;
+ } else
+ node = node->rb_left;
}
- return NULL;
+ return approx ? slot : NULL;
}
-/*
- * __gfn_to_memslot() and its descendants are here because it is called from
- * non-modular code in arch/powerpc/kvm/book3s_64_vio{,_hv}.c. gfn_to_memslot()
- * itself isn't here as an inline because that would bloat other code too much.
- */
static inline struct kvm_memory_slot *
-__gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn)
+____gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn, bool approx)
{
struct kvm_memory_slot *slot;
- int slot_index = atomic_read(&slots->last_used_slot);
- slot = try_get_memslot(slots, slot_index, gfn);
+ slot = (struct kvm_memory_slot *)atomic_long_read(&slots->last_used_slot);
+ slot = try_get_memslot(slot, gfn);
if (slot)
return slot;
- slot = search_memslots(slots, gfn, &slot_index);
+ slot = search_memslots(slots, gfn, approx);
if (slot) {
- atomic_set(&slots->last_used_slot, slot_index);
+ atomic_long_set(&slots->last_used_slot, (unsigned long)slot);
return slot;
}
return NULL;
}
+/*
+ * __gfn_to_memslot() and its descendants are here to allow arch code to inline
+ * the lookups in hot paths. gfn_to_memslot() itself isn't here as an inline
+ * because that would bloat other code too much.
+ */
+static inline struct kvm_memory_slot *
+__gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn)
+{
+ return ____gfn_to_memslot(slots, gfn, false);
+}
+
static inline unsigned long
__gfn_to_hva_memslot(const struct kvm_memory_slot *slot, gfn_t gfn)
{
@@ -1463,7 +1823,8 @@ struct _kvm_stats_desc {
STATS_DESC_LOGHIST_TIME_NSEC(VCPU_GENERIC, halt_poll_fail_hist, \
HALT_POLL_HIST_COUNT), \
STATS_DESC_LOGHIST_TIME_NSEC(VCPU_GENERIC, halt_wait_hist, \
- HALT_POLL_HIST_COUNT)
+ HALT_POLL_HIST_COUNT), \
+ STATS_DESC_ICOUNTER(VCPU_GENERIC, blocking)
extern struct dentry *kvm_debugfs_dir;
diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h
index 234eab059839..dceac12c1ce5 100644
--- a/include/linux/kvm_types.h
+++ b/include/linux/kvm_types.h
@@ -19,6 +19,7 @@ struct kvm_memslots;
enum kvm_mr_change;
#include <linux/types.h>
+#include <linux/spinlock_types.h>
#include <asm/kvm_types.h>
@@ -53,6 +54,23 @@ struct gfn_to_hva_cache {
struct kvm_memory_slot *memslot;
};
+struct gfn_to_pfn_cache {
+ u64 generation;
+ gpa_t gpa;
+ unsigned long uhva;
+ struct kvm_memory_slot *memslot;
+ struct kvm_vcpu *vcpu;
+ struct list_head list;
+ rwlock_t lock;
+ void *khva;
+ kvm_pfn_t pfn;
+ bool active;
+ bool valid;
+ bool dirty;
+ bool kernel_map;
+ bool guest_uses_pa;
+};
+
#ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
/*
* Memory caches are used to preallocate memory ahead of various MMU flows,
@@ -87,6 +105,7 @@ struct kvm_vcpu_stat_generic {
u64 halt_poll_success_hist[HALT_POLL_HIST_COUNT];
u64 halt_poll_fail_hist[HALT_POLL_HIST_COUNT];
u64 halt_wait_hist[HALT_POLL_HIST_COUNT];
+ u64 blocking;
};
#define KVM_STATS_NAME_SIZE 48
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 2a8404b26083..7f99b4d78822 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -39,25 +39,9 @@
* compile-time options: to be removed as soon as all the drivers are
* converted to the new debugging mechanism
*/
-#undef ATA_DEBUG /* debugging output */
-#undef ATA_VERBOSE_DEBUG /* yet more debugging output */
#undef ATA_IRQ_TRAP /* define to ack screaming irqs */
-#undef ATA_NDEBUG /* define to disable quick runtime checks */
-/* note: prints function name for you */
-#ifdef ATA_DEBUG
-#define DPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ## args)
-#ifdef ATA_VERBOSE_DEBUG
-#define VPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ## args)
-#else
-#define VPRINTK(fmt, args...)
-#endif /* ATA_VERBOSE_DEBUG */
-#else
-#define DPRINTK(fmt, args...)
-#define VPRINTK(fmt, args...)
-#endif /* ATA_DEBUG */
-
#define ata_print_version_once(dev, version) \
({ \
static bool __print_once; \
@@ -68,38 +52,6 @@
} \
})
-/* NEW: debug levels */
-#define HAVE_LIBATA_MSG 1
-
-enum {
- ATA_MSG_DRV = 0x0001,
- ATA_MSG_INFO = 0x0002,
- ATA_MSG_PROBE = 0x0004,
- ATA_MSG_WARN = 0x0008,
- ATA_MSG_MALLOC = 0x0010,
- ATA_MSG_CTL = 0x0020,
- ATA_MSG_INTR = 0x0040,
- ATA_MSG_ERR = 0x0080,
-};
-
-#define ata_msg_drv(p) ((p)->msg_enable & ATA_MSG_DRV)
-#define ata_msg_info(p) ((p)->msg_enable & ATA_MSG_INFO)
-#define ata_msg_probe(p) ((p)->msg_enable & ATA_MSG_PROBE)
-#define ata_msg_warn(p) ((p)->msg_enable & ATA_MSG_WARN)
-#define ata_msg_malloc(p) ((p)->msg_enable & ATA_MSG_MALLOC)
-#define ata_msg_ctl(p) ((p)->msg_enable & ATA_MSG_CTL)
-#define ata_msg_intr(p) ((p)->msg_enable & ATA_MSG_INTR)
-#define ata_msg_err(p) ((p)->msg_enable & ATA_MSG_ERR)
-
-static inline u32 ata_msg_init(int dval, int default_msg_enable_bits)
-{
- if (dval < 0 || dval >= (sizeof(u32) * 8))
- return default_msg_enable_bits; /* should be 0x1 - only driver info msgs */
- if (!dval)
- return 0;
- return (1 << dval) - 1;
-}
-
/* defines only for the constants which don't work well as enums */
#define ATA_TAG_POISON 0xfafbfcfdU
@@ -191,7 +143,7 @@ enum {
ATA_LFLAG_NO_LPM = (1 << 8), /* disable LPM on this link */
ATA_LFLAG_RST_ONCE = (1 << 9), /* limit recovery to one reset */
ATA_LFLAG_CHANGED = (1 << 10), /* LPM state changed on this link */
- ATA_LFLAG_NO_DB_DELAY = (1 << 11), /* no debounce delay on link resume */
+ ATA_LFLAG_NO_DEBOUNCE_DELAY = (1 << 11), /* no debounce delay on link resume */
/* struct ata_port flags */
ATA_FLAG_SLAVE_POSS = (1 << 0), /* host supports slave dev */
@@ -428,6 +380,7 @@ enum {
ATA_HORKAGE_MAX_TRIM_128M = (1 << 26), /* Limit max trim size to 128M */
ATA_HORKAGE_NO_NCQ_ON_ATI = (1 << 27), /* Disable NCQ on ATI chipset */
ATA_HORKAGE_NO_ID_DEV_LOG = (1 << 28), /* Identify device log missing */
+ ATA_HORKAGE_NO_LOG_DIR = (1 << 29), /* Do not read log directory */
/* DMA mask for user DMA control: User visible values; DO NOT
renumber */
@@ -884,7 +837,6 @@ struct ata_port {
unsigned int hsm_task_state;
- u32 msg_enable;
struct list_head eh_done_q;
wait_queue_head_t eh_wait_q;
int eh_tries;
@@ -933,7 +885,8 @@ struct ata_port_operations {
void (*set_piomode)(struct ata_port *ap, struct ata_device *dev);
void (*set_dmamode)(struct ata_port *ap, struct ata_device *dev);
int (*set_mode)(struct ata_link *link, struct ata_device **r_failed_dev);
- unsigned int (*read_id)(struct ata_device *dev, struct ata_taskfile *tf, u16 *id);
+ unsigned int (*read_id)(struct ata_device *dev, struct ata_taskfile *tf,
+ __le16 *id);
void (*dev_config)(struct ata_device *dev);
@@ -1160,13 +1113,15 @@ extern enum ata_completion_errors ata_noop_qc_prep(struct ata_queued_cmd *qc);
extern void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
unsigned int n_elem);
extern unsigned int ata_dev_classify(const struct ata_taskfile *tf);
+extern unsigned int ata_port_classify(struct ata_port *ap,
+ const struct ata_taskfile *tf);
extern void ata_dev_disable(struct ata_device *adev);
extern void ata_id_string(const u16 *id, unsigned char *s,
unsigned int ofs, unsigned int len);
extern void ata_id_c_string(const u16 *id, unsigned char *s,
unsigned int ofs, unsigned int len);
extern unsigned int ata_do_dev_read_id(struct ata_device *dev,
- struct ata_taskfile *tf, u16 *id);
+ struct ata_taskfile *tf, __le16 *id);
extern void ata_qc_complete(struct ata_queued_cmd *qc);
extern u64 ata_qc_get_active(struct ata_port *ap);
extern void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd);
@@ -1432,6 +1387,12 @@ extern const struct attribute_group *ata_common_sdev_groups[];
.tag_alloc_policy = BLK_TAG_ALLOC_RR, \
.slave_configure = ata_scsi_slave_config
+#define ATA_SUBBASE_SHT_QD(drv_name, drv_qd) \
+ __ATA_BASE_SHT(drv_name), \
+ .can_queue = drv_qd, \
+ .tag_alloc_policy = BLK_TAG_ALLOC_RR, \
+ .slave_configure = ata_scsi_slave_config
+
#define ATA_BASE_SHT(drv_name) \
ATA_SUBBASE_SHT(drv_name), \
.sdev_groups = ata_common_sdev_groups
@@ -1443,6 +1404,11 @@ extern const struct attribute_group *ata_ncq_sdev_groups[];
ATA_SUBBASE_SHT(drv_name), \
.sdev_groups = ata_ncq_sdev_groups, \
.change_queue_depth = ata_scsi_change_queue_depth
+
+#define ATA_NCQ_SHT_QD(drv_name, drv_qd) \
+ ATA_SUBBASE_SHT_QD(drv_name, drv_qd), \
+ .sdev_groups = ata_ncq_sdev_groups, \
+ .change_queue_depth = ata_scsi_change_queue_depth
#endif
/*
@@ -1487,51 +1453,61 @@ static inline int sata_srst_pmp(struct ata_link *link)
return link->pmp;
}
-/*
- * printk helpers
- */
-__printf(3, 4)
-void ata_port_printk(const struct ata_port *ap, const char *level,
- const char *fmt, ...);
-__printf(3, 4)
-void ata_link_printk(const struct ata_link *link, const char *level,
- const char *fmt, ...);
-__printf(3, 4)
-void ata_dev_printk(const struct ata_device *dev, const char *level,
- const char *fmt, ...);
+#define ata_port_printk(level, ap, fmt, ...) \
+ pr_ ## level ("ata%u: " fmt, (ap)->print_id, ##__VA_ARGS__)
#define ata_port_err(ap, fmt, ...) \
- ata_port_printk(ap, KERN_ERR, fmt, ##__VA_ARGS__)
+ ata_port_printk(err, ap, fmt, ##__VA_ARGS__)
#define ata_port_warn(ap, fmt, ...) \
- ata_port_printk(ap, KERN_WARNING, fmt, ##__VA_ARGS__)
+ ata_port_printk(warn, ap, fmt, ##__VA_ARGS__)
#define ata_port_notice(ap, fmt, ...) \
- ata_port_printk(ap, KERN_NOTICE, fmt, ##__VA_ARGS__)
+ ata_port_printk(notice, ap, fmt, ##__VA_ARGS__)
#define ata_port_info(ap, fmt, ...) \
- ata_port_printk(ap, KERN_INFO, fmt, ##__VA_ARGS__)
+ ata_port_printk(info, ap, fmt, ##__VA_ARGS__)
#define ata_port_dbg(ap, fmt, ...) \
- ata_port_printk(ap, KERN_DEBUG, fmt, ##__VA_ARGS__)
+ ata_port_printk(debug, ap, fmt, ##__VA_ARGS__)
+
+#define ata_link_printk(level, link, fmt, ...) \
+do { \
+ if (sata_pmp_attached((link)->ap) || \
+ (link)->ap->slave_link) \
+ pr_ ## level ("ata%u.%02u: " fmt, \
+ (link)->ap->print_id, \
+ (link)->pmp, \
+ ##__VA_ARGS__); \
+ else \
+ pr_ ## level ("ata%u: " fmt, \
+ (link)->ap->print_id, \
+ ##__VA_ARGS__); \
+} while (0)
#define ata_link_err(link, fmt, ...) \
- ata_link_printk(link, KERN_ERR, fmt, ##__VA_ARGS__)
+ ata_link_printk(err, link, fmt, ##__VA_ARGS__)
#define ata_link_warn(link, fmt, ...) \
- ata_link_printk(link, KERN_WARNING, fmt, ##__VA_ARGS__)
+ ata_link_printk(warn, link, fmt, ##__VA_ARGS__)
#define ata_link_notice(link, fmt, ...) \
- ata_link_printk(link, KERN_NOTICE, fmt, ##__VA_ARGS__)
+ ata_link_printk(notice, link, fmt, ##__VA_ARGS__)
#define ata_link_info(link, fmt, ...) \
- ata_link_printk(link, KERN_INFO, fmt, ##__VA_ARGS__)
+ ata_link_printk(info, link, fmt, ##__VA_ARGS__)
#define ata_link_dbg(link, fmt, ...) \
- ata_link_printk(link, KERN_DEBUG, fmt, ##__VA_ARGS__)
+ ata_link_printk(debug, link, fmt, ##__VA_ARGS__)
+
+#define ata_dev_printk(level, dev, fmt, ...) \
+ pr_ ## level("ata%u.%02u: " fmt, \
+ (dev)->link->ap->print_id, \
+ (dev)->link->pmp + (dev)->devno, \
+ ##__VA_ARGS__)
#define ata_dev_err(dev, fmt, ...) \
- ata_dev_printk(dev, KERN_ERR, fmt, ##__VA_ARGS__)
+ ata_dev_printk(err, dev, fmt, ##__VA_ARGS__)
#define ata_dev_warn(dev, fmt, ...) \
- ata_dev_printk(dev, KERN_WARNING, fmt, ##__VA_ARGS__)
+ ata_dev_printk(warn, dev, fmt, ##__VA_ARGS__)
#define ata_dev_notice(dev, fmt, ...) \
- ata_dev_printk(dev, KERN_NOTICE, fmt, ##__VA_ARGS__)
+ ata_dev_printk(notice, dev, fmt, ##__VA_ARGS__)
#define ata_dev_info(dev, fmt, ...) \
- ata_dev_printk(dev, KERN_INFO, fmt, ##__VA_ARGS__)
+ ata_dev_printk(info, dev, fmt, ##__VA_ARGS__)
#define ata_dev_dbg(dev, fmt, ...) \
- ata_dev_printk(dev, KERN_DEBUG, fmt, ##__VA_ARGS__)
+ ata_dev_printk(debug, dev, fmt, ##__VA_ARGS__)
void ata_print_version(const struct device *dev, const char *version);
@@ -2065,11 +2041,8 @@ static inline u8 ata_wait_idle(struct ata_port *ap)
{
u8 status = ata_sff_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
-#ifdef ATA_DEBUG
if (status != 0xff && (status & (ATA_BUSY | ATA_DRQ)))
- ata_port_printk(ap, KERN_DEBUG, "abnormal Status 0x%X\n",
- status);
-#endif
+ ata_port_dbg(ap, "abnormal Status 0x%X\n", status);
return status;
}
diff --git a/include/linux/list.h b/include/linux/list.h
index 6636fc07f918..dd6c2041d09c 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -258,8 +258,7 @@ static inline void list_bulk_move_tail(struct list_head *head,
* @list: the entry to test
* @head: the head of the list
*/
-static inline int list_is_first(const struct list_head *list,
- const struct list_head *head)
+static inline int list_is_first(const struct list_head *list, const struct list_head *head)
{
return list->prev == head;
}
@@ -269,13 +268,22 @@ static inline int list_is_first(const struct list_head *list,
* @list: the entry to test
* @head: the head of the list
*/
-static inline int list_is_last(const struct list_head *list,
- const struct list_head *head)
+static inline int list_is_last(const struct list_head *list, const struct list_head *head)
{
return list->next == head;
}
/**
+ * list_is_head - tests whether @list is the list @head
+ * @list: the entry to test
+ * @head: the head of the list
+ */
+static inline int list_is_head(const struct list_head *list, const struct list_head *head)
+{
+ return list == head;
+}
+
+/**
* list_empty - tests whether a list is empty
* @head: the list to test.
*/
@@ -318,7 +326,7 @@ static inline void list_del_init_careful(struct list_head *entry)
static inline int list_empty_careful(const struct list_head *head)
{
struct list_head *next = smp_load_acquire(&head->next);
- return (next == head) && (next == head->prev);
+ return list_is_head(next, head) && (next == head->prev);
}
/**
@@ -393,10 +401,9 @@ static inline void list_cut_position(struct list_head *list,
{
if (list_empty(head))
return;
- if (list_is_singular(head) &&
- (head->next != entry && head != entry))
+ if (list_is_singular(head) && !list_is_head(entry, head) && (entry != head->next))
return;
- if (entry == head)
+ if (list_is_head(entry, head))
INIT_LIST_HEAD(list);
else
__list_cut_position(list, head, entry);
@@ -570,7 +577,7 @@ static inline void list_splice_tail_init(struct list_head *list,
* @head: the head for your list.
*/
#define list_for_each(pos, head) \
- for (pos = (head)->next; pos != (head); pos = pos->next)
+ for (pos = (head)->next; !list_is_head(pos, (head)); pos = pos->next)
/**
* list_for_each_continue - continue iteration over a list
@@ -580,7 +587,7 @@ static inline void list_splice_tail_init(struct list_head *list,
* Continue to iterate over a list, continuing after the current position.
*/
#define list_for_each_continue(pos, head) \
- for (pos = pos->next; pos != (head); pos = pos->next)
+ for (pos = pos->next; !list_is_head(pos, (head)); pos = pos->next)
/**
* list_for_each_prev - iterate over a list backwards
@@ -588,7 +595,7 @@ static inline void list_splice_tail_init(struct list_head *list,
* @head: the head for your list.
*/
#define list_for_each_prev(pos, head) \
- for (pos = (head)->prev; pos != (head); pos = pos->prev)
+ for (pos = (head)->prev; !list_is_head(pos, (head)); pos = pos->prev)
/**
* list_for_each_safe - iterate over a list safe against removal of list entry
@@ -597,8 +604,9 @@ static inline void list_splice_tail_init(struct list_head *list,
* @head: the head for your list.
*/
#define list_for_each_safe(pos, n, head) \
- for (pos = (head)->next, n = pos->next; pos != (head); \
- pos = n, n = pos->next)
+ for (pos = (head)->next, n = pos->next; \
+ !list_is_head(pos, (head)); \
+ pos = n, n = pos->next)
/**
* list_for_each_prev_safe - iterate over a list backwards safe against removal of list entry
@@ -608,7 +616,7 @@ static inline void list_splice_tail_init(struct list_head *list,
*/
#define list_for_each_prev_safe(pos, n, head) \
for (pos = (head)->prev, n = pos->prev; \
- pos != (head); \
+ !list_is_head(pos, (head)); \
pos = n, n = pos->prev)
/**
diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h
index c4ae6506b8b3..fcef192e5e45 100644
--- a/include/linux/lockd/lockd.h
+++ b/include/linux/lockd/lockd.h
@@ -303,10 +303,15 @@ void nlmsvc_invalidate_all(void);
int nlmsvc_unlock_all_by_sb(struct super_block *sb);
int nlmsvc_unlock_all_by_ip(struct sockaddr *server_addr);
+static inline struct file *nlmsvc_file_file(struct nlm_file *file)
+{
+ return file->f_file[O_RDONLY] ?
+ file->f_file[O_RDONLY] : file->f_file[O_WRONLY];
+}
+
static inline struct inode *nlmsvc_file_inode(struct nlm_file *file)
{
- return locks_inode(file->f_file[O_RDONLY] ?
- file->f_file[O_RDONLY] : file->f_file[O_WRONLY]);
+ return locks_inode(nlmsvc_file_file(file));
}
static inline int __nlm_privileged_request4(const struct sockaddr *sap)
diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h
index a5a724c308d8..819ec92dc2a8 100644
--- a/include/linux/lsm_hook_defs.h
+++ b/include/linux/lsm_hook_defs.h
@@ -80,7 +80,7 @@ LSM_HOOK(int, 0, sb_clone_mnt_opts, const struct super_block *oldsb,
unsigned long *set_kern_flags)
LSM_HOOK(int, 0, move_mount, const struct path *from_path,
const struct path *to_path)
-LSM_HOOK(int, 0, dentry_init_security, struct dentry *dentry,
+LSM_HOOK(int, -EOPNOTSUPP, dentry_init_security, struct dentry *dentry,
int mode, const struct qstr *name, const char **xattr_name,
void **ctx, u32 *ctxlen)
LSM_HOOK(int, 0, dentry_create_files_as, struct dentry *dentry, int mode,
diff --git a/include/linux/mc146818rtc.h b/include/linux/mc146818rtc.h
index 0661af17a758..808bb4cee230 100644
--- a/include/linux/mc146818rtc.h
+++ b/include/linux/mc146818rtc.h
@@ -123,7 +123,11 @@ struct cmos_rtc_board_info {
#define RTC_IO_EXTENT_USED RTC_IO_EXTENT
#endif /* ARCH_RTC_LOCATION */
-unsigned int mc146818_get_time(struct rtc_time *time);
+bool mc146818_does_rtc_work(void);
+int mc146818_get_time(struct rtc_time *time);
int mc146818_set_time(struct rtc_time *time);
+bool mc146818_avoid_UIP(void (*callback)(unsigned char seconds, void *param),
+ void *param);
+
#endif /* _MC146818RTC_H */
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index 9dc7cb239d21..50ad19662a32 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -1,7 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
#ifndef _LINUX_MEMBLOCK_H
#define _LINUX_MEMBLOCK_H
-#ifdef __KERNEL__
/*
* Logical memory blocks.
@@ -605,6 +604,5 @@ static inline void early_memtest(phys_addr_t start, phys_addr_t end)
}
#endif
-#endif /* __KERNEL__ */
#endif /* _LINUX_MEMBLOCK_H */
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index e34112f6a369..0abbd685703b 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -33,6 +33,7 @@ enum memcg_stat_item {
MEMCG_SWAP = NR_VM_NODE_STAT_ITEMS,
MEMCG_SOCK,
MEMCG_PERCPU_B,
+ MEMCG_VMALLOC,
MEMCG_NR_STAT,
};
@@ -42,6 +43,7 @@ enum memcg_memory_event {
MEMCG_MAX,
MEMCG_OOM,
MEMCG_OOM_KILL,
+ MEMCG_OOM_GROUP_KILL,
MEMCG_SWAP_HIGH,
MEMCG_SWAP_MAX,
MEMCG_SWAP_FAIL,
@@ -217,7 +219,7 @@ struct obj_cgroup {
struct mem_cgroup *memcg;
atomic_t nr_charged_bytes;
union {
- struct list_head list;
+ struct list_head list; /* protected by objcg_lock */
struct rcu_head rcu;
};
};
@@ -313,7 +315,8 @@ struct mem_cgroup {
#ifdef CONFIG_MEMCG_KMEM
int kmemcg_id;
struct obj_cgroup __rcu *objcg;
- struct list_head objcg_list; /* list of inherited objcgs */
+ /* list of inherited objcgs, protected by objcg_lock */
+ struct list_head objcg_list;
#endif
MEMCG_PADDING(_pad2_);
@@ -943,6 +946,21 @@ static inline void mod_memcg_state(struct mem_cgroup *memcg,
local_irq_restore(flags);
}
+static inline void mod_memcg_page_state(struct page *page,
+ int idx, int val)
+{
+ struct mem_cgroup *memcg;
+
+ if (mem_cgroup_disabled())
+ return;
+
+ rcu_read_lock();
+ memcg = page_memcg(page);
+ if (memcg)
+ mod_memcg_state(memcg, idx, val);
+ rcu_read_unlock();
+}
+
static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
{
return READ_ONCE(memcg->vmstats.state[idx]);
@@ -1398,6 +1416,11 @@ static inline void mod_memcg_state(struct mem_cgroup *memcg,
{
}
+static inline void mod_memcg_page_state(struct page *page,
+ int idx, int val)
+{
+}
+
static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, int idx)
{
return 0;
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index 3c7595e81150..668389b4b53d 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -46,6 +46,7 @@ struct mempolicy {
unsigned short mode; /* See MPOL_* above */
unsigned short flags; /* See set_mempolicy() MPOL_F_* above */
nodemask_t nodes; /* interleave/bind/perfer */
+ int home_node; /* Home node to use for MPOL_BIND and MPOL_PREFERRED_MANY */
union {
nodemask_t cpuset_mems_allowed; /* relative to these nodes */
diff --git a/include/linux/memremap.h b/include/linux/memremap.h
index c0e9d35889e8..1fafcc38acba 100644
--- a/include/linux/memremap.h
+++ b/include/linux/memremap.h
@@ -73,16 +73,6 @@ struct dev_pagemap_ops {
void (*page_free)(struct page *page);
/*
- * Transition the refcount in struct dev_pagemap to the dead state.
- */
- void (*kill)(struct dev_pagemap *pgmap);
-
- /*
- * Wait for refcount in struct dev_pagemap to be idle and reap it.
- */
- void (*cleanup)(struct dev_pagemap *pgmap);
-
- /*
* Used for private (un-addressable) device memory only. Must migrate
* the page back to a CPU accessible page.
*/
@@ -95,10 +85,14 @@ struct dev_pagemap_ops {
* struct dev_pagemap - metadata for ZONE_DEVICE mappings
* @altmap: pre-allocated/reserved memory for vmemmap allocations
* @ref: reference count that pins the devm_memremap_pages() mapping
- * @internal_ref: internal reference if @ref is not provided by the caller
- * @done: completion for @internal_ref
+ * @done: completion for @ref
* @type: memory type: see MEMORY_* in memory_hotplug.h
* @flags: PGMAP_* flags to specify defailed behavior
+ * @vmemmap_shift: structural definition of how the vmemmap page metadata
+ * is populated, specifically the metadata page order.
+ * A zero value (default) uses base pages as the vmemmap metadata
+ * representation. A bigger value will set up compound struct pages
+ * of the requested order value.
* @ops: method table
* @owner: an opaque pointer identifying the entity that manages this
* instance. Used by various helpers to make sure that no
@@ -109,11 +103,11 @@ struct dev_pagemap_ops {
*/
struct dev_pagemap {
struct vmem_altmap altmap;
- struct percpu_ref *ref;
- struct percpu_ref internal_ref;
+ struct percpu_ref ref;
struct completion done;
enum memory_type type;
unsigned int flags;
+ unsigned long vmemmap_shift;
const struct dev_pagemap_ops *ops;
void *owner;
int nr_range;
@@ -130,6 +124,11 @@ static inline struct vmem_altmap *pgmap_altmap(struct dev_pagemap *pgmap)
return NULL;
}
+static inline unsigned long pgmap_vmemmap_nr(struct dev_pagemap *pgmap)
+{
+ return 1 << pgmap->vmemmap_shift;
+}
+
#ifdef CONFIG_ZONE_DEVICE
void *memremap_pages(struct dev_pagemap *pgmap, int nid);
void memunmap_pages(struct dev_pagemap *pgmap);
@@ -191,7 +190,7 @@ static inline unsigned long memremap_compat_align(void)
static inline void put_dev_pagemap(struct dev_pagemap *pgmap)
{
if (pgmap)
- percpu_ref_put(pgmap->ref);
+ percpu_ref_put(&pgmap->ref);
}
#endif /* _LINUX_MEMREMAP_H_ */
diff --git a/include/linux/mfd/tps68470.h b/include/linux/mfd/tps68470.h
index ffe81127d91c..7807fa329db0 100644
--- a/include/linux/mfd/tps68470.h
+++ b/include/linux/mfd/tps68470.h
@@ -75,6 +75,17 @@
#define TPS68470_CLKCFG1_MODE_A_MASK GENMASK(1, 0)
#define TPS68470_CLKCFG1_MODE_B_MASK GENMASK(3, 2)
+#define TPS68470_CLKCFG2_DRV_STR_2MA 0x05
+#define TPS68470_PLL_OUTPUT_ENABLE 0x02
+#define TPS68470_CLK_SRC_XTAL BIT(0)
+#define TPS68470_PLLSWR_DEFAULT GENMASK(1, 0)
+#define TPS68470_OSC_EXT_CAP_DEFAULT 0x05
+
+#define TPS68470_OUTPUT_A_SHIFT 0x00
+#define TPS68470_OUTPUT_B_SHIFT 0x02
+#define TPS68470_CLK_SRC_SHIFT GENMASK(2, 0)
+#define TPS68470_OSC_EXT_CAP_SHIFT BIT(2)
+
#define TPS68470_GPIO_CTL_REG_A(x) (TPS68470_REG_GPCTL0A + (x) * 2)
#define TPS68470_GPIO_CTL_REG_B(x) (TPS68470_REG_GPCTL0B + (x) * 2)
#define TPS68470_GPIO_MODE_MASK GENMASK(1, 0)
diff --git a/include/linux/mhi.h b/include/linux/mhi.h
index a5cc4cdf9cc8..a5441ad33c74 100644
--- a/include/linux/mhi.h
+++ b/include/linux/mhi.h
@@ -730,16 +730,27 @@ void mhi_device_put(struct mhi_device *mhi_dev);
/**
* mhi_prepare_for_transfer - Setup UL and DL channels for data transfer.
- * Allocate and initialize the channel context and
- * also issue the START channel command to both
- * channels. Channels can be started only if both
- * host and device execution environments match and
- * channels are in a DISABLED state.
* @mhi_dev: Device associated with the channels
+ *
+ * Allocate and initialize the channel context and also issue the START channel
+ * command to both channels. Channels can be started only if both host and
+ * device execution environments match and channels are in a DISABLED state.
*/
int mhi_prepare_for_transfer(struct mhi_device *mhi_dev);
/**
+ * mhi_prepare_for_transfer_autoqueue - Setup UL and DL channels with auto queue
+ * buffers for DL traffic
+ * @mhi_dev: Device associated with the channels
+ *
+ * Allocate and initialize the channel context and also issue the START channel
+ * command to both channels. Channels can be started only if both host and
+ * device execution environments match and channels are in a DISABLED state.
+ * The MHI core will automatically allocate and queue buffers for the DL traffic.
+ */
+int mhi_prepare_for_transfer_autoqueue(struct mhi_device *mhi_dev);
+
+/**
* mhi_unprepare_from_transfer - Reset UL and DL channels for data transfer.
* Issue the RESET channel command and let the
* device clean-up the context so no incoming
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index 4850cc5bf813..db96e10eb8da 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -40,6 +40,8 @@ extern int migrate_huge_page_move_mapping(struct address_space *mapping,
struct page *newpage, struct page *page);
extern int migrate_page_move_mapping(struct address_space *mapping,
struct page *newpage, struct page *page, int extra_count);
+void migration_entry_wait_on_locked(swp_entry_t entry, pte_t *ptep,
+ spinlock_t *ptl);
void folio_migrate_flags(struct folio *newfolio, struct folio *folio);
void folio_migrate_copy(struct folio *newfolio, struct folio *folio);
int folio_migrate_mapping(struct address_space *mapping,
diff --git a/include/linux/mm.h b/include/linux/mm.h
index de103949860f..213cc569b192 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -424,51 +424,6 @@ extern unsigned int kobjsize(const void *objp);
*/
extern pgprot_t protection_map[16];
-/**
- * enum fault_flag - Fault flag definitions.
- * @FAULT_FLAG_WRITE: Fault was a write fault.
- * @FAULT_FLAG_MKWRITE: Fault was mkwrite of existing PTE.
- * @FAULT_FLAG_ALLOW_RETRY: Allow to retry the fault if blocked.
- * @FAULT_FLAG_RETRY_NOWAIT: Don't drop mmap_lock and wait when retrying.
- * @FAULT_FLAG_KILLABLE: The fault task is in SIGKILL killable region.
- * @FAULT_FLAG_TRIED: The fault has been tried once.
- * @FAULT_FLAG_USER: The fault originated in userspace.
- * @FAULT_FLAG_REMOTE: The fault is not for current task/mm.
- * @FAULT_FLAG_INSTRUCTION: The fault was during an instruction fetch.
- * @FAULT_FLAG_INTERRUPTIBLE: The fault can be interrupted by non-fatal signals.
- *
- * About @FAULT_FLAG_ALLOW_RETRY and @FAULT_FLAG_TRIED: we can specify
- * whether we would allow page faults to retry by specifying these two
- * fault flags correctly. Currently there can be three legal combinations:
- *
- * (a) ALLOW_RETRY and !TRIED: this means the page fault allows retry, and
- * this is the first try
- *
- * (b) ALLOW_RETRY and TRIED: this means the page fault allows retry, and
- * we've already tried at least once
- *
- * (c) !ALLOW_RETRY and !TRIED: this means the page fault does not allow retry
- *
- * The unlisted combination (!ALLOW_RETRY && TRIED) is illegal and should never
- * be used. Note that page faults can be allowed to retry for multiple times,
- * in which case we'll have an initial fault with flags (a) then later on
- * continuous faults with flags (b). We should always try to detect pending
- * signals before a retry to make sure the continuous page faults can still be
- * interrupted if necessary.
- */
-enum fault_flag {
- FAULT_FLAG_WRITE = 1 << 0,
- FAULT_FLAG_MKWRITE = 1 << 1,
- FAULT_FLAG_ALLOW_RETRY = 1 << 2,
- FAULT_FLAG_RETRY_NOWAIT = 1 << 3,
- FAULT_FLAG_KILLABLE = 1 << 4,
- FAULT_FLAG_TRIED = 1 << 5,
- FAULT_FLAG_USER = 1 << 6,
- FAULT_FLAG_REMOTE = 1 << 7,
- FAULT_FLAG_INSTRUCTION = 1 << 8,
- FAULT_FLAG_INTERRUPTIBLE = 1 << 9,
-};
-
/*
* The default fault flags that should be used by most of the
* arch-specific page fault handlers.
@@ -577,6 +532,10 @@ enum page_entry_size {
*/
struct vm_operations_struct {
void (*open)(struct vm_area_struct * area);
+ /**
+ * @close: Called when the VMA is being removed from the MM.
+ * Context: User context. May sleep. Caller holds mmap_lock.
+ */
void (*close)(struct vm_area_struct * area);
/* Called any time before splitting to check if it's allowed */
int (*may_split)(struct vm_area_struct *area, unsigned long addr);
@@ -714,6 +673,27 @@ int vma_is_stack_for_current(struct vm_area_struct *vma);
struct mmu_gather;
struct inode;
+static inline unsigned int compound_order(struct page *page)
+{
+ if (!PageHead(page))
+ return 0;
+ return page[1].compound_order;
+}
+
+/**
+ * folio_order - The allocation order of a folio.
+ * @folio: The folio.
+ *
+ * A folio is composed of 2^order pages. See get_order() for the definition
+ * of order.
+ *
+ * Return: The order of the folio.
+ */
+static inline unsigned int folio_order(struct folio *folio)
+{
+ return compound_order(&folio->page);
+}
+
#include <linux/huge_mm.h>
/*
@@ -840,19 +820,15 @@ static inline int page_mapcount(struct page *page)
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
int total_mapcount(struct page *page);
-int page_trans_huge_mapcount(struct page *page, int *total_mapcount);
+int page_trans_huge_mapcount(struct page *page);
#else
static inline int total_mapcount(struct page *page)
{
return page_mapcount(page);
}
-static inline int page_trans_huge_mapcount(struct page *page,
- int *total_mapcount)
+static inline int page_trans_huge_mapcount(struct page *page)
{
- int mapcount = page_mapcount(page);
- if (total_mapcount)
- *total_mapcount = mapcount;
- return mapcount;
+ return page_mapcount(page);
}
#endif
@@ -913,27 +889,6 @@ static inline void destroy_compound_page(struct page *page)
compound_page_dtors[page[1].compound_dtor](page);
}
-static inline unsigned int compound_order(struct page *page)
-{
- if (!PageHead(page))
- return 0;
- return page[1].compound_order;
-}
-
-/**
- * folio_order - The allocation order of a folio.
- * @folio: The folio.
- *
- * A folio is composed of 2^order pages. See get_order() for the definition
- * of order.
- *
- * Return: The order of the folio.
- */
-static inline unsigned int folio_order(struct folio *folio)
-{
- return compound_order(&folio->page);
-}
-
static inline bool hpage_pincount_available(struct page *page)
{
/*
@@ -1244,6 +1199,26 @@ static inline void folio_put(struct folio *folio)
__put_page(&folio->page);
}
+/**
+ * folio_put_refs - Reduce the reference count on a folio.
+ * @folio: The folio.
+ * @refs: The amount to subtract from the folio's reference count.
+ *
+ * If the folio's reference count reaches zero, the memory will be
+ * released back to the page allocator and may be used by another
+ * allocation immediately. Do not access the memory or the struct folio
+ * after calling folio_put_refs() unless you can be sure that these weren't
+ * the last references.
+ *
+ * Context: May be called in process or interrupt context, but not in NMI
+ * context. May be called while holding a spinlock.
+ */
+static inline void folio_put_refs(struct folio *folio, int refs)
+{
+ if (folio_ref_sub_and_test(folio, refs))
+ __put_page(&folio->page);
+}
+
static inline void put_page(struct page *page)
{
struct folio *folio = page_folio(page);
@@ -1531,11 +1506,18 @@ static inline u8 page_kasan_tag(const struct page *page)
static inline void page_kasan_tag_set(struct page *page, u8 tag)
{
- if (kasan_enabled()) {
- tag ^= 0xff;
- page->flags &= ~(KASAN_TAG_MASK << KASAN_TAG_PGSHIFT);
- page->flags |= (tag & KASAN_TAG_MASK) << KASAN_TAG_PGSHIFT;
- }
+ unsigned long old_flags, flags;
+
+ if (!kasan_enabled())
+ return;
+
+ tag ^= 0xff;
+ old_flags = READ_ONCE(page->flags);
+ do {
+ flags = old_flags;
+ flags &= ~(KASAN_TAG_MASK << KASAN_TAG_PGSHIFT);
+ flags |= (tag & KASAN_TAG_MASK) << KASAN_TAG_PGSHIFT;
+ } while (unlikely(!try_cmpxchg(&page->flags, &old_flags, flags)));
}
static inline void page_kasan_tag_reset(struct page *page)
@@ -1837,28 +1819,6 @@ static inline bool can_do_mlock(void) { return false; }
extern int user_shm_lock(size_t, struct ucounts *);
extern void user_shm_unlock(size_t, struct ucounts *);
-/*
- * Parameter block passed down to zap_pte_range in exceptional cases.
- */
-struct zap_details {
- struct address_space *zap_mapping; /* Check page->mapping if set */
- struct page *single_page; /* Locked page to be unmapped */
-};
-
-/*
- * We set details->zap_mappings when we want to unmap shared but keep private
- * pages. Return true if skip zapping this page, false otherwise.
- */
-static inline bool
-zap_skip_check_mapping(struct zap_details *details, struct page *page)
-{
- if (!details || !page)
- return false;
-
- return details->zap_mapping &&
- (details->zap_mapping != page_rmapping(page));
-}
-
struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
pte_t pte);
struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
@@ -1893,7 +1853,6 @@ extern void truncate_pagecache(struct inode *inode, loff_t new);
extern void truncate_setsize(struct inode *inode, loff_t newsize);
void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to);
void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end);
-int truncate_inode_page(struct address_space *mapping, struct page *page);
int generic_error_remove_page(struct address_space *mapping, struct page *page);
int invalidate_inode_page(struct page *page);
@@ -1904,7 +1863,6 @@ extern vm_fault_t handle_mm_fault(struct vm_area_struct *vma,
extern int fixup_user_fault(struct mm_struct *mm,
unsigned long address, unsigned int fault_flags,
bool *unlocked);
-void unmap_mapping_page(struct page *page);
void unmap_mapping_pages(struct address_space *mapping,
pgoff_t start, pgoff_t nr, bool even_cows);
void unmap_mapping_range(struct address_space *mapping,
@@ -1925,7 +1883,6 @@ static inline int fixup_user_fault(struct mm_struct *mm, unsigned long address,
BUG();
return -EFAULT;
}
-static inline void unmap_mapping_page(struct page *page) { }
static inline void unmap_mapping_pages(struct address_space *mapping,
pgoff_t start, pgoff_t nr, bool even_cows) { }
static inline void unmap_mapping_range(struct address_space *mapping,
@@ -1982,7 +1939,6 @@ int get_kernel_pages(const struct kvec *iov, int nr_pages, int write,
struct page **pages);
struct page *get_dump_page(unsigned long addr);
-extern int try_to_release_page(struct page * page, gfp_t gfp_mask);
extern void do_invalidatepage(struct page *page, unsigned int offset,
unsigned int length);
@@ -2670,7 +2626,7 @@ static inline int vma_adjust(struct vm_area_struct *vma, unsigned long start,
extern struct vm_area_struct *vma_merge(struct mm_struct *,
struct vm_area_struct *prev, unsigned long addr, unsigned long end,
unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t,
- struct mempolicy *, struct vm_userfaultfd_ctx);
+ struct mempolicy *, struct vm_userfaultfd_ctx, const char *);
extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *);
extern int __split_vma(struct mm_struct *, struct vm_area_struct *,
unsigned long addr, int new_below);
@@ -3179,7 +3135,6 @@ int drop_caches_sysctl_handler(struct ctl_table *, int, void *, size_t *,
#endif
void drop_slab(void);
-void drop_slab_node(int nid);
#ifndef CONFIG_MMU
#define randomize_va_space 0
@@ -3232,6 +3187,7 @@ enum mf_flags {
MF_ACTION_REQUIRED = 1 << 1,
MF_MUST_KILL = 1 << 2,
MF_SOFT_OFFLINE = 1 << 3,
+ MF_UNPOISON = 1 << 4,
};
extern int memory_failure(unsigned long pfn, int flags);
extern void memory_failure_queue(unsigned long pfn, int flags);
@@ -3272,7 +3228,6 @@ enum mf_action_page_type {
MF_MSG_KERNEL_HIGH_ORDER,
MF_MSG_SLAB,
MF_MSG_DIFFERENT_COMPOUND,
- MF_MSG_POISONED_HUGE,
MF_MSG_HUGE,
MF_MSG_FREE_HUGE,
MF_MSG_NON_PMD_HUGE,
@@ -3287,7 +3242,6 @@ enum mf_action_page_type {
MF_MSG_CLEAN_LRU,
MF_MSG_TRUNCATED_LRU,
MF_MSG_BUDDY,
- MF_MSG_BUDDY_2ND,
MF_MSG_DAX,
MF_MSG_UNSPLIT_THP,
MF_MSG_UNKNOWN,
@@ -3416,5 +3370,16 @@ static inline int seal_check_future_write(int seals, struct vm_area_struct *vma)
return 0;
}
+#ifdef CONFIG_ANON_VMA_NAME
+int madvise_set_anon_name(struct mm_struct *mm, unsigned long start,
+ unsigned long len_in, const char *name);
+#else
+static inline int
+madvise_set_anon_name(struct mm_struct *mm, unsigned long start,
+ unsigned long len_in, const char *name) {
+ return 0;
+}
+#endif
+
#endif /* __KERNEL__ */
#endif /* _LINUX_MM_H */
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index e2ec68b0515c..b725839dfe71 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -2,8 +2,10 @@
#ifndef LINUX_MM_INLINE_H
#define LINUX_MM_INLINE_H
+#include <linux/atomic.h>
#include <linux/huge_mm.h>
#include <linux/swap.h>
+#include <linux/string.h>
/**
* folio_is_file_lru - Should the folio be on a file LRU or anon LRU?
@@ -135,4 +137,138 @@ static __always_inline void del_page_from_lru_list(struct page *page,
{
lruvec_del_folio(lruvec, page_folio(page));
}
+
+#ifdef CONFIG_ANON_VMA_NAME
+/*
+ * mmap_lock should be read-locked when calling vma_anon_name() and while using
+ * the returned pointer.
+ */
+extern const char *vma_anon_name(struct vm_area_struct *vma);
+
+/*
+ * mmap_lock should be read-locked for orig_vma->vm_mm.
+ * mmap_lock should be write-locked for new_vma->vm_mm or new_vma should be
+ * isolated.
+ */
+extern void dup_vma_anon_name(struct vm_area_struct *orig_vma,
+ struct vm_area_struct *new_vma);
+
+/*
+ * mmap_lock should be write-locked or vma should have been isolated under
+ * write-locked mmap_lock protection.
+ */
+extern void free_vma_anon_name(struct vm_area_struct *vma);
+
+/* mmap_lock should be read-locked */
+static inline bool is_same_vma_anon_name(struct vm_area_struct *vma,
+ const char *name)
+{
+ const char *vma_name = vma_anon_name(vma);
+
+ /* either both NULL, or pointers to same string */
+ if (vma_name == name)
+ return true;
+
+ return name && vma_name && !strcmp(name, vma_name);
+}
+#else /* CONFIG_ANON_VMA_NAME */
+static inline const char *vma_anon_name(struct vm_area_struct *vma)
+{
+ return NULL;
+}
+static inline void dup_vma_anon_name(struct vm_area_struct *orig_vma,
+ struct vm_area_struct *new_vma) {}
+static inline void free_vma_anon_name(struct vm_area_struct *vma) {}
+static inline bool is_same_vma_anon_name(struct vm_area_struct *vma,
+ const char *name)
+{
+ return true;
+}
+#endif /* CONFIG_ANON_VMA_NAME */
+
+static inline void init_tlb_flush_pending(struct mm_struct *mm)
+{
+ atomic_set(&mm->tlb_flush_pending, 0);
+}
+
+static inline void inc_tlb_flush_pending(struct mm_struct *mm)
+{
+ atomic_inc(&mm->tlb_flush_pending);
+ /*
+ * The only time this value is relevant is when there are indeed pages
+ * to flush. And we'll only flush pages after changing them, which
+ * requires the PTL.
+ *
+ * So the ordering here is:
+ *
+ * atomic_inc(&mm->tlb_flush_pending);
+ * spin_lock(&ptl);
+ * ...
+ * set_pte_at();
+ * spin_unlock(&ptl);
+ *
+ * spin_lock(&ptl)
+ * mm_tlb_flush_pending();
+ * ....
+ * spin_unlock(&ptl);
+ *
+ * flush_tlb_range();
+ * atomic_dec(&mm->tlb_flush_pending);
+ *
+ * Where the increment if constrained by the PTL unlock, it thus
+ * ensures that the increment is visible if the PTE modification is
+ * visible. After all, if there is no PTE modification, nobody cares
+ * about TLB flushes either.
+ *
+ * This very much relies on users (mm_tlb_flush_pending() and
+ * mm_tlb_flush_nested()) only caring about _specific_ PTEs (and
+ * therefore specific PTLs), because with SPLIT_PTE_PTLOCKS and RCpc
+ * locks (PPC) the unlock of one doesn't order against the lock of
+ * another PTL.
+ *
+ * The decrement is ordered by the flush_tlb_range(), such that
+ * mm_tlb_flush_pending() will not return false unless all flushes have
+ * completed.
+ */
+}
+
+static inline void dec_tlb_flush_pending(struct mm_struct *mm)
+{
+ /*
+ * See inc_tlb_flush_pending().
+ *
+ * This cannot be smp_mb__before_atomic() because smp_mb() simply does
+ * not order against TLB invalidate completion, which is what we need.
+ *
+ * Therefore we must rely on tlb_flush_*() to guarantee order.
+ */
+ atomic_dec(&mm->tlb_flush_pending);
+}
+
+static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
+{
+ /*
+ * Must be called after having acquired the PTL; orders against that
+ * PTLs release and therefore ensures that if we observe the modified
+ * PTE we must also observe the increment from inc_tlb_flush_pending().
+ *
+ * That is, it only guarantees to return true if there is a flush
+ * pending for _this_ PTL.
+ */
+ return atomic_read(&mm->tlb_flush_pending);
+}
+
+static inline bool mm_tlb_flush_nested(struct mm_struct *mm)
+{
+ /*
+ * Similar to mm_tlb_flush_pending(), we must have acquired the PTL
+ * for which there is a TLB flush pending in order to guarantee
+ * we've seen both that PTE modification and the increment.
+ *
+ * (no requirement on actually still holding the PTL, that is irrelevant)
+ */
+ return atomic_read(&mm->tlb_flush_pending) > 1;
+}
+
+
#endif
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 1ae3537c7920..5140e5feb486 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -5,6 +5,7 @@
#include <linux/mm_types_task.h>
#include <linux/auxvec.h>
+#include <linux/kref.h>
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/rbtree.h>
@@ -118,31 +119,6 @@ struct page {
atomic_long_t pp_frag_count;
};
};
- struct { /* slab, slob and slub */
- union {
- struct list_head slab_list;
- struct { /* Partial pages */
- struct page *next;
-#ifdef CONFIG_64BIT
- int pages; /* Nr of pages left */
-#else
- short int pages;
-#endif
- };
- };
- struct kmem_cache *slab_cache; /* not slob */
- /* Double-word boundary */
- void *freelist; /* first free object */
- union {
- void *s_mem; /* slab: first object */
- unsigned long counters; /* SLUB */
- struct { /* SLUB */
- unsigned inuse:16;
- unsigned objects:15;
- unsigned frozen:1;
- };
- };
- };
struct { /* Tail pages of compound page */
unsigned long compound_head; /* Bit zero is set */
@@ -206,9 +182,6 @@ struct page {
* which are currently stored here.
*/
unsigned int page_type;
-
- unsigned int active; /* SLAB */
- int units; /* SLOB */
};
/* Usage count. *DO NOT USE DIRECTLY*. See page_ref.h */
@@ -288,6 +261,7 @@ static_assert(sizeof(struct page) == sizeof(struct folio));
static_assert(offsetof(struct page, pg) == offsetof(struct folio, fl))
FOLIO_MATCH(flags, flags);
FOLIO_MATCH(lru, lru);
+FOLIO_MATCH(mapping, mapping);
FOLIO_MATCH(compound_head, lru);
FOLIO_MATCH(index, index);
FOLIO_MATCH(private, private);
@@ -386,6 +360,12 @@ struct vm_userfaultfd_ctx {
struct vm_userfaultfd_ctx {};
#endif /* CONFIG_USERFAULTFD */
+struct anon_vma_name {
+ struct kref kref;
+ /* The name needs to be at the end because it is dynamically sized. */
+ char name[];
+};
+
/*
* This struct describes a virtual memory area. There is one of these
* per VM-area/task. A VM area is any part of the process virtual memory
@@ -426,11 +406,19 @@ struct vm_area_struct {
/*
* For areas with an address space and backing store,
* linkage into the address_space->i_mmap interval tree.
+ *
+ * For private anonymous mappings, a pointer to a null terminated string
+ * containing the name given to the vma, or NULL if unnamed.
*/
- struct {
- struct rb_node rb;
- unsigned long rb_subtree_last;
- } shared;
+
+ union {
+ struct {
+ struct rb_node rb;
+ unsigned long rb_subtree_last;
+ } shared;
+ /* Serialized by mmap_sem. */
+ struct anon_vma_name *anon_name;
+ };
/*
* A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma
@@ -632,7 +620,7 @@ struct mm_struct {
atomic_t tlb_flush_pending;
#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
/* See flush_tlb_batched_pending() */
- bool tlb_flush_batched;
+ atomic_t tlb_flush_batched;
#endif
struct uprobes_state uprobes_state;
#ifdef CONFIG_PREEMPT_RT
@@ -677,90 +665,6 @@ extern void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm);
extern void tlb_gather_mmu_fullmm(struct mmu_gather *tlb, struct mm_struct *mm);
extern void tlb_finish_mmu(struct mmu_gather *tlb);
-static inline void init_tlb_flush_pending(struct mm_struct *mm)
-{
- atomic_set(&mm->tlb_flush_pending, 0);
-}
-
-static inline void inc_tlb_flush_pending(struct mm_struct *mm)
-{
- atomic_inc(&mm->tlb_flush_pending);
- /*
- * The only time this value is relevant is when there are indeed pages
- * to flush. And we'll only flush pages after changing them, which
- * requires the PTL.
- *
- * So the ordering here is:
- *
- * atomic_inc(&mm->tlb_flush_pending);
- * spin_lock(&ptl);
- * ...
- * set_pte_at();
- * spin_unlock(&ptl);
- *
- * spin_lock(&ptl)
- * mm_tlb_flush_pending();
- * ....
- * spin_unlock(&ptl);
- *
- * flush_tlb_range();
- * atomic_dec(&mm->tlb_flush_pending);
- *
- * Where the increment if constrained by the PTL unlock, it thus
- * ensures that the increment is visible if the PTE modification is
- * visible. After all, if there is no PTE modification, nobody cares
- * about TLB flushes either.
- *
- * This very much relies on users (mm_tlb_flush_pending() and
- * mm_tlb_flush_nested()) only caring about _specific_ PTEs (and
- * therefore specific PTLs), because with SPLIT_PTE_PTLOCKS and RCpc
- * locks (PPC) the unlock of one doesn't order against the lock of
- * another PTL.
- *
- * The decrement is ordered by the flush_tlb_range(), such that
- * mm_tlb_flush_pending() will not return false unless all flushes have
- * completed.
- */
-}
-
-static inline void dec_tlb_flush_pending(struct mm_struct *mm)
-{
- /*
- * See inc_tlb_flush_pending().
- *
- * This cannot be smp_mb__before_atomic() because smp_mb() simply does
- * not order against TLB invalidate completion, which is what we need.
- *
- * Therefore we must rely on tlb_flush_*() to guarantee order.
- */
- atomic_dec(&mm->tlb_flush_pending);
-}
-
-static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
-{
- /*
- * Must be called after having acquired the PTL; orders against that
- * PTLs release and therefore ensures that if we observe the modified
- * PTE we must also observe the increment from inc_tlb_flush_pending().
- *
- * That is, it only guarantees to return true if there is a flush
- * pending for _this_ PTL.
- */
- return atomic_read(&mm->tlb_flush_pending);
-}
-
-static inline bool mm_tlb_flush_nested(struct mm_struct *mm)
-{
- /*
- * Similar to mm_tlb_flush_pending(), we must have acquired the PTL
- * for which there is a TLB flush pending in order to guarantee
- * we've seen both that PTE modification and the increment.
- *
- * (no requirement on actually still holding the PTL, that is irrelevant)
- */
- return atomic_read(&mm->tlb_flush_pending) > 1;
-}
-
struct vm_fault;
/**
@@ -875,4 +779,49 @@ typedef struct {
unsigned long val;
} swp_entry_t;
+/**
+ * enum fault_flag - Fault flag definitions.
+ * @FAULT_FLAG_WRITE: Fault was a write fault.
+ * @FAULT_FLAG_MKWRITE: Fault was mkwrite of existing PTE.
+ * @FAULT_FLAG_ALLOW_RETRY: Allow to retry the fault if blocked.
+ * @FAULT_FLAG_RETRY_NOWAIT: Don't drop mmap_lock and wait when retrying.
+ * @FAULT_FLAG_KILLABLE: The fault task is in SIGKILL killable region.
+ * @FAULT_FLAG_TRIED: The fault has been tried once.
+ * @FAULT_FLAG_USER: The fault originated in userspace.
+ * @FAULT_FLAG_REMOTE: The fault is not for current task/mm.
+ * @FAULT_FLAG_INSTRUCTION: The fault was during an instruction fetch.
+ * @FAULT_FLAG_INTERRUPTIBLE: The fault can be interrupted by non-fatal signals.
+ *
+ * About @FAULT_FLAG_ALLOW_RETRY and @FAULT_FLAG_TRIED: we can specify
+ * whether we would allow page faults to retry by specifying these two
+ * fault flags correctly. Currently there can be three legal combinations:
+ *
+ * (a) ALLOW_RETRY and !TRIED: this means the page fault allows retry, and
+ * this is the first try
+ *
+ * (b) ALLOW_RETRY and TRIED: this means the page fault allows retry, and
+ * we've already tried at least once
+ *
+ * (c) !ALLOW_RETRY and !TRIED: this means the page fault does not allow retry
+ *
+ * The unlisted combination (!ALLOW_RETRY && TRIED) is illegal and should never
+ * be used. Note that page faults can be allowed to retry for multiple times,
+ * in which case we'll have an initial fault with flags (a) then later on
+ * continuous faults with flags (b). We should always try to detect pending
+ * signals before a retry to make sure the continuous page faults can still be
+ * interrupted if necessary.
+ */
+enum fault_flag {
+ FAULT_FLAG_WRITE = 1 << 0,
+ FAULT_FLAG_MKWRITE = 1 << 1,
+ FAULT_FLAG_ALLOW_RETRY = 1 << 2,
+ FAULT_FLAG_RETRY_NOWAIT = 1 << 3,
+ FAULT_FLAG_KILLABLE = 1 << 4,
+ FAULT_FLAG_TRIED = 1 << 5,
+ FAULT_FLAG_USER = 1 << 6,
+ FAULT_FLAG_REMOTE = 1 << 7,
+ FAULT_FLAG_INSTRUCTION = 1 << 8,
+ FAULT_FLAG_INTERRUPTIBLE = 1 << 9,
+};
+
#endif /* _LINUX_MM_TYPES_H */
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 936dc0b6c226..aed44e9b5d89 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -1047,6 +1047,15 @@ static inline int is_highmem_idx(enum zone_type idx)
#endif
}
+#ifdef CONFIG_ZONE_DMA
+bool has_managed_dma(void);
+#else
+static inline bool has_managed_dma(void)
+{
+ return false;
+}
+#endif
+
/**
* is_highmem - helper function to quickly check if a struct zone is a
* highmem zone or not. This is an attempt to keep references
diff --git a/include/linux/module.h b/include/linux/module.h
index c9f1200b2312..1e135fd5c076 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -290,7 +290,8 @@ extern typeof(name) __mod_##type##__##name##_device_table \
* files require multiple MODULE_FIRMWARE() specifiers */
#define MODULE_FIRMWARE(_firmware) MODULE_INFO(firmware, _firmware)
-#define MODULE_IMPORT_NS(ns) MODULE_INFO(import_ns, #ns)
+#define _MODULE_IMPORT_NS(ns) MODULE_INFO(import_ns, #ns)
+#define MODULE_IMPORT_NS(ns) _MODULE_IMPORT_NS(ns)
struct notifier_block;
@@ -595,9 +596,9 @@ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
/* Look for this name: can be of form module:name. */
unsigned long module_kallsyms_lookup_name(const char *name);
-extern void __noreturn __module_put_and_exit(struct module *mod,
+extern void __noreturn __module_put_and_kthread_exit(struct module *mod,
long code);
-#define module_put_and_exit(code) __module_put_and_exit(THIS_MODULE, code)
+#define module_put_and_kthread_exit(code) __module_put_and_kthread_exit(THIS_MODULE, code)
#ifdef CONFIG_MODULE_UNLOAD
int module_refcount(struct module *mod);
@@ -790,7 +791,7 @@ static inline int unregister_module_notifier(struct notifier_block *nb)
return 0;
}
-#define module_put_and_exit(code) do_exit(code)
+#define module_put_and_kthread_exit(code) kthread_exit(code)
static inline void print_modules(void)
{
diff --git a/include/linux/mount.h b/include/linux/mount.h
index 5d92a7e1a742..7f18a7555dff 100644
--- a/include/linux/mount.h
+++ b/include/linux/mount.h
@@ -113,9 +113,6 @@ extern void mnt_set_expiry(struct vfsmount *mnt, struct list_head *expiry_list);
extern void mark_mounts_for_expiry(struct list_head *mounts);
extern dev_t name_to_dev_t(const char *name);
-
-extern unsigned int sysctl_mount_max;
-
extern bool path_is_mountpoint(const struct path *path);
extern void kern_unmount_array(struct vfsmount *mnt[], unsigned int num);
diff --git a/include/linux/msi.h b/include/linux/msi.h
index e616f94c7c58..fc918a658d48 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -2,7 +2,23 @@
#ifndef LINUX_MSI_H
#define LINUX_MSI_H
-#include <linux/kobject.h>
+/*
+ * This header file contains MSI data structures and functions which are
+ * only relevant for:
+ * - Interrupt core code
+ * - PCI/MSI core code
+ * - MSI interrupt domain implementations
+ * - IOMMU, low level VFIO, NTB and other justified exceptions
+ * dealing with low level MSI details.
+ *
+ * Regular device drivers have no business with any of these functions and
+ * especially storing MSI descriptor pointers in random code is considered
+ * abuse. The only function which is relevant for drivers is msi_get_virq().
+ */
+
+#include <linux/cpumask.h>
+#include <linux/xarray.h>
+#include <linux/mutex.h>
#include <linux/list.h>
#include <asm/msi.h>
@@ -56,6 +72,8 @@ struct irq_data;
struct msi_desc;
struct pci_dev;
struct platform_msi_priv_data;
+struct device_attribute;
+
void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
#ifdef CONFIG_GENERIC_MSI_IRQ
void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg);
@@ -69,62 +87,59 @@ typedef void (*irq_write_msi_msg_t)(struct msi_desc *desc,
struct msi_msg *msg);
/**
- * platform_msi_desc - Platform device specific msi descriptor data
- * @msi_priv_data: Pointer to platform private data
- * @msi_index: The index of the MSI descriptor for multi MSI
- */
-struct platform_msi_desc {
- struct platform_msi_priv_data *msi_priv_data;
- u16 msi_index;
-};
-
-/**
- * fsl_mc_msi_desc - FSL-MC device specific msi descriptor data
- * @msi_index: The index of the MSI descriptor
+ * pci_msi_desc - PCI/MSI specific MSI descriptor data
+ *
+ * @msi_mask: [PCI MSI] MSI cached mask bits
+ * @msix_ctrl: [PCI MSI-X] MSI-X cached per vector control bits
+ * @is_msix: [PCI MSI/X] True if MSI-X
+ * @multiple: [PCI MSI/X] log2 num of messages allocated
+ * @multi_cap: [PCI MSI/X] log2 num of messages supported
+ * @can_mask: [PCI MSI/X] Masking supported?
+ * @is_64: [PCI MSI/X] Address size: 0=32bit 1=64bit
+ * @default_irq:[PCI MSI/X] The default pre-assigned non-MSI irq
+ * @mask_pos: [PCI MSI] Mask register position
+ * @mask_base: [PCI MSI-X] Mask register base address
*/
-struct fsl_mc_msi_desc {
- u16 msi_index;
+struct pci_msi_desc {
+ union {
+ u32 msi_mask;
+ u32 msix_ctrl;
+ };
+ struct {
+ u8 is_msix : 1;
+ u8 multiple : 3;
+ u8 multi_cap : 3;
+ u8 can_mask : 1;
+ u8 is_64 : 1;
+ u8 is_virtual : 1;
+ unsigned default_irq;
+ } msi_attrib;
+ union {
+ u8 mask_pos;
+ void __iomem *mask_base;
+ };
};
-/**
- * ti_sci_inta_msi_desc - TISCI based INTA specific msi descriptor data
- * @dev_index: TISCI device index
- */
-struct ti_sci_inta_msi_desc {
- u16 dev_index;
-};
+#define MSI_MAX_INDEX ((unsigned int)USHRT_MAX)
/**
* struct msi_desc - Descriptor structure for MSI based interrupts
- * @list: List head for management
* @irq: The base interrupt number
* @nvec_used: The number of vectors used
* @dev: Pointer to the device which uses this descriptor
* @msg: The last set MSI message cached for reuse
* @affinity: Optional pointer to a cpu affinity mask for this descriptor
+ * @sysfs_attr: Pointer to sysfs device attribute
*
* @write_msi_msg: Callback that may be called when the MSI message
* address or data changes
* @write_msi_msg_data: Data parameter for the callback.
*
- * @msi_mask: [PCI MSI] MSI cached mask bits
- * @msix_ctrl: [PCI MSI-X] MSI-X cached per vector control bits
- * @is_msix: [PCI MSI/X] True if MSI-X
- * @multiple: [PCI MSI/X] log2 num of messages allocated
- * @multi_cap: [PCI MSI/X] log2 num of messages supported
- * @maskbit: [PCI MSI/X] Mask-Pending bit supported?
- * @is_64: [PCI MSI/X] Address size: 0=32bit 1=64bit
- * @entry_nr: [PCI MSI/X] Entry which is described by this descriptor
- * @default_irq:[PCI MSI/X] The default pre-assigned non-MSI irq
- * @mask_pos: [PCI MSI] Mask register position
- * @mask_base: [PCI MSI-X] Mask register base address
- * @platform: [platform] Platform device specific msi descriptor data
- * @fsl_mc: [fsl-mc] FSL MC device specific msi descriptor data
- * @inta: [INTA] TISCI based INTA specific msi descriptor data
+ * @msi_index: Index of the msi descriptor
+ * @pci: PCI specific msi descriptor data
*/
struct msi_desc {
/* Shared device/bus type independent data */
- struct list_head list;
unsigned int irq;
unsigned int nvec_used;
struct device *dev;
@@ -133,61 +148,71 @@ struct msi_desc {
#ifdef CONFIG_IRQ_MSI_IOMMU
const void *iommu_cookie;
#endif
+#ifdef CONFIG_SYSFS
+ struct device_attribute *sysfs_attrs;
+#endif
void (*write_msi_msg)(struct msi_desc *entry, void *data);
void *write_msi_msg_data;
- union {
- /* PCI MSI/X specific data */
- struct {
- union {
- u32 msi_mask;
- u32 msix_ctrl;
- };
- struct {
- u8 is_msix : 1;
- u8 multiple : 3;
- u8 multi_cap : 3;
- u8 can_mask : 1;
- u8 is_64 : 1;
- u8 is_virtual : 1;
- u16 entry_nr;
- unsigned default_irq;
- } msi_attrib;
- union {
- u8 mask_pos;
- void __iomem *mask_base;
- };
- };
-
- /*
- * Non PCI variants add their data structure here. New
- * entries need to use a named structure. We want
- * proper name spaces for this. The PCI part is
- * anonymous for now as it would require an immediate
- * tree wide cleanup.
- */
- struct platform_msi_desc platform;
- struct fsl_mc_msi_desc fsl_mc;
- struct ti_sci_inta_msi_desc inta;
- };
+ u16 msi_index;
+ struct pci_msi_desc pci;
+};
+
+/*
+ * Filter values for the MSI descriptor iterators and accessor functions.
+ */
+enum msi_desc_filter {
+ /* All descriptors */
+ MSI_DESC_ALL,
+ /* Descriptors which have no interrupt associated */
+ MSI_DESC_NOTASSOCIATED,
+ /* Descriptors which have an interrupt associated */
+ MSI_DESC_ASSOCIATED,
};
-/* Helpers to hide struct msi_desc implementation details */
+/**
+ * msi_device_data - MSI per device data
+ * @properties: MSI properties which are interesting to drivers
+ * @platform_data: Platform-MSI specific data
+ * @mutex: Mutex protecting the MSI descriptor store
+ * @__store: Xarray for storing MSI descriptor pointers
+ * @__iter_idx: Index to search the next entry for iterators
+ */
+struct msi_device_data {
+ unsigned long properties;
+ struct platform_msi_priv_data *platform_data;
+ struct mutex mutex;
+ struct xarray __store;
+ unsigned long __iter_idx;
+};
+
+int msi_setup_device_data(struct device *dev);
+
+unsigned int msi_get_virq(struct device *dev, unsigned int index);
+void msi_lock_descs(struct device *dev);
+void msi_unlock_descs(struct device *dev);
+
+struct msi_desc *msi_first_desc(struct device *dev, enum msi_desc_filter filter);
+struct msi_desc *msi_next_desc(struct device *dev, enum msi_desc_filter filter);
+
+/**
+ * msi_for_each_desc - Iterate the MSI descriptors
+ *
+ * @desc: struct msi_desc pointer used as iterator
+ * @dev: struct device pointer - device to iterate
+ * @filter: Filter for descriptor selection
+ *
+ * Notes:
+ * - The loop must be protected with a msi_lock_descs()/msi_unlock_descs()
+ * pair.
+ * - It is safe to remove a retrieved MSI descriptor in the loop.
+ */
+#define msi_for_each_desc(desc, dev, filter) \
+ for ((desc) = msi_first_desc((dev), (filter)); (desc); \
+ (desc) = msi_next_desc((dev), (filter)))
+
#define msi_desc_to_dev(desc) ((desc)->dev)
-#define dev_to_msi_list(dev) (&(dev)->msi_list)
-#define first_msi_entry(dev) \
- list_first_entry(dev_to_msi_list((dev)), struct msi_desc, list)
-#define for_each_msi_entry(desc, dev) \
- list_for_each_entry((desc), dev_to_msi_list((dev)), list)
-#define for_each_msi_entry_safe(desc, tmp, dev) \
- list_for_each_entry_safe((desc), (tmp), dev_to_msi_list((dev)), list)
-#define for_each_msi_vector(desc, __irq, dev) \
- for_each_msi_entry((desc), (dev)) \
- if ((desc)->irq) \
- for (__irq = (desc)->irq; \
- __irq < ((desc)->irq + (desc)->nvec_used); \
- __irq++)
#ifdef CONFIG_IRQ_MSI_IOMMU
static inline const void *msi_desc_get_iommu_cookie(struct msi_desc *desc)
@@ -213,36 +238,33 @@ static inline void msi_desc_set_iommu_cookie(struct msi_desc *desc,
#endif
#ifdef CONFIG_PCI_MSI
-#define first_pci_msi_entry(pdev) first_msi_entry(&(pdev)->dev)
-#define for_each_pci_msi_entry(desc, pdev) \
- for_each_msi_entry((desc), &(pdev)->dev)
-
struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc);
-void *msi_desc_to_pci_sysdata(struct msi_desc *desc);
void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg);
#else /* CONFIG_PCI_MSI */
-static inline void *msi_desc_to_pci_sysdata(struct msi_desc *desc)
-{
- return NULL;
-}
static inline void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg)
{
}
#endif /* CONFIG_PCI_MSI */
-struct msi_desc *alloc_msi_entry(struct device *dev, int nvec,
- const struct irq_affinity_desc *affinity);
-void free_msi_entry(struct msi_desc *entry);
+int msi_add_msi_desc(struct device *dev, struct msi_desc *init_desc);
+void msi_free_msi_descs_range(struct device *dev, enum msi_desc_filter filter,
+ unsigned int first_index, unsigned int last_index);
+
+/**
+ * msi_free_msi_descs - Free MSI descriptors of a device
+ * @dev: Device to free the descriptors
+ */
+static inline void msi_free_msi_descs(struct device *dev)
+{
+ msi_free_msi_descs_range(dev, MSI_DESC_ALL, 0, MSI_MAX_INDEX);
+}
+
void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
void pci_msi_mask_irq(struct irq_data *data);
void pci_msi_unmask_irq(struct irq_data *data);
-const struct attribute_group **msi_populate_sysfs(struct device *dev);
-void msi_destroy_sysfs(struct device *dev,
- const struct attribute_group **msi_irq_groups);
-
/*
* The arch hooks to setup up msi irqs. Default functions are implemented
* as weak symbols so that they /can/ be overriden by architecture specific
@@ -256,25 +278,20 @@ int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc);
void arch_teardown_msi_irq(unsigned int irq);
int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type);
void arch_teardown_msi_irqs(struct pci_dev *dev);
-#else
-static inline int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
-{
- WARN_ON_ONCE(1);
- return -ENODEV;
-}
-
-static inline void arch_teardown_msi_irqs(struct pci_dev *dev)
-{
- WARN_ON_ONCE(1);
-}
-#endif
+#ifdef CONFIG_SYSFS
+int msi_device_populate_sysfs(struct device *dev);
+void msi_device_destroy_sysfs(struct device *dev);
+#else /* CONFIG_SYSFS */
+static inline int msi_device_populate_sysfs(struct device *dev) { return 0; }
+static inline void msi_device_destroy_sysfs(struct device *dev) { }
+#endif /* !CONFIG_SYSFS */
+#endif /* CONFIG_PCI_MSI_ARCH_FALLBACKS */
/*
- * The restore hooks are still available as they are useful even
- * for fully irq domain based setups. Courtesy to XEN/X86.
+ * The restore hook is still available even for fully irq domain based
+ * setups. Courtesy to XEN/X86.
*/
-void arch_restore_msi_irqs(struct pci_dev *dev);
-void default_restore_msi_irqs(struct pci_dev *dev);
+bool arch_restore_msi_irqs(struct pci_dev *dev);
#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
@@ -294,20 +311,17 @@ struct msi_domain_info;
* @msi_free: Domain specific function to free a MSI interrupts
* @msi_check: Callback for verification of the domain/info/dev data
* @msi_prepare: Prepare the allocation of the interrupts in the domain
- * @msi_finish: Optional callback to finalize the allocation
* @set_desc: Set the msi descriptor for an interrupt
- * @handle_error: Optional error handler if the allocation fails
* @domain_alloc_irqs: Optional function to override the default allocation
* function.
* @domain_free_irqs: Optional function to override the default free
* function.
*
- * @get_hwirq, @msi_init and @msi_free are callbacks used by
- * msi_create_irq_domain() and related interfaces
+ * @get_hwirq, @msi_init and @msi_free are callbacks used by the underlying
+ * irqdomain.
*
- * @msi_check, @msi_prepare, @msi_finish, @set_desc and @handle_error
- * are callbacks used by msi_domain_alloc_irqs() and related
- * interfaces which are based on msi_desc.
+ * @msi_check, @msi_prepare and @set_desc are callbacks used by
+ * msi_domain_alloc/free_irqs().
*
* @domain_alloc_irqs, @domain_free_irqs can be used to override the
* default allocation/free functions (__msi_domain_alloc/free_irqs). This
@@ -341,11 +355,8 @@ struct msi_domain_ops {
int (*msi_prepare)(struct irq_domain *domain,
struct device *dev, int nvec,
msi_alloc_info_t *arg);
- void (*msi_finish)(msi_alloc_info_t *arg, int retval);
void (*set_desc)(msi_alloc_info_t *arg,
struct msi_desc *desc);
- int (*handle_error)(struct irq_domain *domain,
- struct msi_desc *desc, int error);
int (*domain_alloc_irqs)(struct irq_domain *domain,
struct device *dev, int nvec);
void (*domain_free_irqs)(struct irq_domain *domain,
@@ -399,6 +410,14 @@ enum {
MSI_FLAG_MUST_REACTIVATE = (1 << 5),
/* Is level-triggered capable, using two messages */
MSI_FLAG_LEVEL_CAPABLE = (1 << 6),
+ /* Populate sysfs on alloc() and destroy it on free() */
+ MSI_FLAG_DEV_SYSFS = (1 << 7),
+ /* MSI-X entries must be contiguous */
+ MSI_FLAG_MSIX_CONTIGUOUS = (1 << 8),
+ /* Allocate simple MSI descriptors */
+ MSI_FLAG_ALLOC_SIMPLE_MSI_DESCS = (1 << 9),
+ /* Free MSI descriptors */
+ MSI_FLAG_FREE_MSI_DESCS = (1 << 10),
};
int msi_domain_set_affinity(struct irq_data *data, const struct cpumask *mask,
@@ -409,9 +428,12 @@ struct irq_domain *msi_create_irq_domain(struct fwnode_handle *fwnode,
struct irq_domain *parent);
int __msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
int nvec);
+int msi_domain_alloc_irqs_descs_locked(struct irq_domain *domain, struct device *dev,
+ int nvec);
int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
int nvec);
void __msi_domain_free_irqs(struct irq_domain *domain, struct device *dev);
+void msi_domain_free_irqs_descs_locked(struct irq_domain *domain, struct device *dev);
void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev);
struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain);
@@ -440,20 +462,17 @@ __platform_msi_create_device_domain(struct device *dev,
#define platform_msi_create_device_tree_domain(dev, nvec, write, ops, data) \
__platform_msi_create_device_domain(dev, nvec, true, write, ops, data)
-int platform_msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
- unsigned int nr_irqs);
-void platform_msi_domain_free(struct irq_domain *domain, unsigned int virq,
- unsigned int nvec);
+int platform_msi_device_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs);
+void platform_msi_device_domain_free(struct irq_domain *domain, unsigned int virq,
+ unsigned int nvec);
void *platform_msi_get_host_data(struct irq_domain *domain);
#endif /* CONFIG_GENERIC_MSI_IRQ_DOMAIN */
#ifdef CONFIG_PCI_MSI_IRQ_DOMAIN
-void pci_msi_domain_write_msg(struct irq_data *irq_data, struct msi_msg *msg);
struct irq_domain *pci_msi_create_irq_domain(struct fwnode_handle *fwnode,
struct msi_domain_info *info,
struct irq_domain *parent);
-int pci_msi_domain_check_cap(struct irq_domain *domain,
- struct msi_domain_info *info, struct device *dev);
u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev);
struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev);
bool pci_dev_has_special_msi_domain(struct pci_dev *pdev);
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 3213c7227b59..8b5a314db167 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -2158,7 +2158,7 @@ struct net_device {
struct netdev_queue *_tx ____cacheline_aligned_in_smp;
unsigned int num_tx_queues;
unsigned int real_num_tx_queues;
- struct Qdisc *qdisc;
+ struct Qdisc __rcu *qdisc;
unsigned int tx_queue_len;
spinlock_t tx_global_lock;
@@ -2548,6 +2548,7 @@ struct packet_type {
struct net_device *);
bool (*id_match)(struct packet_type *ptype,
struct sock *sk);
+ struct net *af_packet_net;
void *af_packet_priv;
struct list_head list;
};
diff --git a/include/linux/netfs.h b/include/linux/netfs.h
index ca0683b9e3d1..614f22213e21 100644
--- a/include/linux/netfs.h
+++ b/include/linux/netfs.h
@@ -124,6 +124,7 @@ struct netfs_cache_resources {
void *cache_priv;
void *cache_priv2;
unsigned int debug_id; /* Cookie debug ID */
+ unsigned int inval_counter; /* object->inval_counter at begin_op */
};
/*
@@ -196,6 +197,15 @@ struct netfs_read_request_ops {
};
/*
+ * How to handle reading from a hole.
+ */
+enum netfs_read_from_hole {
+ NETFS_READ_HOLE_IGNORE,
+ NETFS_READ_HOLE_CLEAR,
+ NETFS_READ_HOLE_FAIL,
+};
+
+/*
* Table of operations for access to a cache. This is obtained by
* rreq->ops->begin_cache_operation().
*/
@@ -207,7 +217,7 @@ struct netfs_cache_ops {
int (*read)(struct netfs_cache_resources *cres,
loff_t start_pos,
struct iov_iter *iter,
- bool seek_data,
+ enum netfs_read_from_hole read_hole,
netfs_io_terminated_t term_func,
void *term_func_priv);
@@ -232,7 +242,15 @@ struct netfs_cache_ops {
* actually do.
*/
int (*prepare_write)(struct netfs_cache_resources *cres,
- loff_t *_start, size_t *_len, loff_t i_size);
+ loff_t *_start, size_t *_len, loff_t i_size,
+ bool no_space_allocated_yet);
+
+ /* Query the occupancy of the cache in a region, returning where the
+ * next chunk of data starts and how long it is.
+ */
+ int (*query_occupancy)(struct netfs_cache_resources *cres,
+ loff_t start, size_t len, size_t granularity,
+ loff_t *_data_start, size_t *_data_len);
};
struct readahead_control;
diff --git a/include/linux/nfs.h b/include/linux/nfs.h
index 0dc7ad38a0da..b06375e88e58 100644
--- a/include/linux/nfs.h
+++ b/include/linux/nfs.h
@@ -36,14 +36,6 @@ static inline void nfs_copy_fh(struct nfs_fh *target, const struct nfs_fh *sourc
memcpy(target->data, source->data, source->size);
}
-
-/*
- * This is really a general kernel constant, but since nothing like
- * this is defined in the kernel headers, I have to do it here.
- */
-#define NFS_OFFSET_MAX ((__s64)((~(__u64)0) >> 1))
-
-
enum nfs3_stable_how {
NFS_UNSTABLE = 0,
NFS_DATA_SYNC = 1,
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index 05f249f20f55..68f81d8d36de 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -61,7 +61,9 @@
struct nfs_access_entry {
struct rb_node rb_node;
struct list_head lru;
- const struct cred * cred;
+ kuid_t fsuid;
+ kgid_t fsgid;
+ struct group_info *group_info;
__u32 mask;
struct rcu_head rcu_head;
};
@@ -105,6 +107,7 @@ struct nfs_open_dir_context {
__u64 dup_cookie;
pgoff_t page_index;
signed char duped;
+ bool eof;
};
/*
@@ -275,7 +278,6 @@ struct nfs4_copy_state {
#define NFS_INO_ACL_LRU_SET (2) /* Inode is on the LRU list */
#define NFS_INO_INVALIDATING (3) /* inode is being invalidated */
#define NFS_INO_FSCACHE (5) /* inode can be cached by FS-Cache */
-#define NFS_INO_FSCACHE_LOCK (6) /* FS-Cache cookie management lock */
#define NFS_INO_FORCE_READDIR (7) /* force readdirplus */
#define NFS_INO_LAYOUTCOMMIT (9) /* layoutcommit required */
#define NFS_INO_LAYOUTCOMMITTING (10) /* layoutcommit inflight */
@@ -396,7 +398,7 @@ extern int nfs_post_op_update_inode_force_wcc(struct inode *inode, struct nfs_fa
extern int nfs_post_op_update_inode_force_wcc_locked(struct inode *inode, struct nfs_fattr *fattr);
extern int nfs_getattr(struct user_namespace *, const struct path *,
struct kstat *, u32, unsigned int);
-extern void nfs_access_add_cache(struct inode *, struct nfs_access_entry *);
+extern void nfs_access_add_cache(struct inode *, struct nfs_access_entry *, const struct cred *);
extern void nfs_access_set_mask(struct nfs_access_entry *, u32);
extern int nfs_permission(struct user_namespace *, struct inode *, int);
extern int nfs_open(struct inode *, struct file *);
@@ -533,8 +535,8 @@ extern int nfs_instantiate(struct dentry *dentry, struct nfs_fh *fh,
struct nfs_fattr *fattr);
extern int nfs_may_open(struct inode *inode, const struct cred *cred, int openflags);
extern void nfs_access_zap_cache(struct inode *inode);
-extern int nfs_access_get_cached(struct inode *inode, const struct cred *cred, struct nfs_access_entry *res,
- bool may_block);
+extern int nfs_access_get_cached(struct inode *inode, const struct cred *cred,
+ u32 *mask, bool may_block);
/*
* linux/fs/nfs/symlink.c
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 2a9acbfe00f0..ca0959e51e81 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -120,11 +120,6 @@ struct nfs_client {
* This is used to generate the mv0 callback address.
*/
char cl_ipaddr[48];
-
-#ifdef CONFIG_NFS_FSCACHE
- struct fscache_cookie *fscache; /* client index cache cookie */
-#endif
-
struct net *cl_net;
struct list_head pending_cb_stateids;
};
@@ -194,8 +189,8 @@ struct nfs_server {
struct nfs_auth_info auth_info; /* parsed auth flavors */
#ifdef CONFIG_NFS_FSCACHE
- struct nfs_fscache_key *fscache_key; /* unique key for superblock */
- struct fscache_cookie *fscache; /* superblock cookie */
+ struct fscache_volume *fscache; /* superblock cookie */
+ char *fscache_uniq; /* Uniquifier (or NULL) */
#endif
u32 pnfs_blksize; /* layout_blksize attr */
@@ -271,6 +266,8 @@ struct nfs_server {
#define NFS_CAP_ACLS (1U << 3)
#define NFS_CAP_ATOMIC_OPEN (1U << 4)
#define NFS_CAP_LGOPEN (1U << 5)
+#define NFS_CAP_CASE_INSENSITIVE (1U << 6)
+#define NFS_CAP_CASE_PRESERVING (1U << 7)
#define NFS_CAP_POSIX_LOCK (1U << 14)
#define NFS_CAP_UIDGID_NOMAP (1U << 15)
#define NFS_CAP_STATEID_NFSV41 (1U << 16)
@@ -287,5 +284,5 @@ struct nfs_server {
#define NFS_CAP_COPY_NOTIFY (1U << 27)
#define NFS_CAP_XATTR (1U << 28)
#define NFS_CAP_READ_PLUS (1U << 29)
-
+#define NFS_CAP_FS_LOCATIONS (1U << 30)
#endif
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 967a0098f0a9..728cb0c1f0b6 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -1194,6 +1194,8 @@ struct nfs4_server_caps_res {
u32 has_links;
u32 has_symlinks;
u32 fh_expire_type;
+ u32 case_insensitive;
+ u32 case_preserving;
};
#define NFS4_PATHNAME_MAXCOMPONENTS 512
@@ -1737,7 +1739,7 @@ struct nfs_rpc_ops {
struct nfs_fh *, struct nfs_fattr *);
int (*lookupp) (struct inode *, struct nfs_fh *,
struct nfs_fattr *);
- int (*access) (struct inode *, struct nfs_access_entry *);
+ int (*access) (struct inode *, struct nfs_access_entry *, const struct cred *);
int (*readlink)(struct inode *, struct page *, unsigned int,
unsigned int);
int (*create) (struct inode *, struct dentry *,
@@ -1795,6 +1797,7 @@ struct nfs_rpc_ops {
struct nfs_server *(*create_server)(struct fs_context *);
struct nfs_server *(*clone_server)(struct nfs_server *, struct nfs_fh *,
struct nfs_fattr *, rpc_authflavor_t);
+ int (*discover_trunking)(struct nfs_server *, struct nfs_fh *);
};
/*
diff --git a/include/linux/of.h b/include/linux/of.h
index ff143a027abc..2dc77430a91a 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -364,18 +364,12 @@ extern const struct of_device_id *of_match_node(
const struct of_device_id *matches, const struct device_node *node);
extern int of_modalias_node(struct device_node *node, char *modalias, int len);
extern void of_print_phandle_args(const char *msg, const struct of_phandle_args *args);
-extern struct device_node *of_parse_phandle(const struct device_node *np,
- const char *phandle_name,
- int index);
-extern int of_parse_phandle_with_args(const struct device_node *np,
- const char *list_name, const char *cells_name, int index,
- struct of_phandle_args *out_args);
+extern int __of_parse_phandle_with_args(const struct device_node *np,
+ const char *list_name, const char *cells_name, int cell_count,
+ int index, struct of_phandle_args *out_args);
extern int of_parse_phandle_with_args_map(const struct device_node *np,
const char *list_name, const char *stem_name, int index,
struct of_phandle_args *out_args);
-extern int of_parse_phandle_with_fixed_args(const struct device_node *np,
- const char *list_name, int cells_count, int index,
- struct of_phandle_args *out_args);
extern int of_count_phandle_with_args(const struct device_node *np,
const char *list_name, const char *cells_name);
@@ -416,130 +410,6 @@ extern int of_detach_node(struct device_node *);
#define of_match_ptr(_ptr) (_ptr)
-/**
- * of_property_read_u8_array - Find and read an array of u8 from a property.
- *
- * @np: device node from which the property value is to be read.
- * @propname: name of the property to be searched.
- * @out_values: pointer to return value, modified only if return value is 0.
- * @sz: number of array elements to read
- *
- * Search for a property in a device node and read 8-bit value(s) from
- * it.
- *
- * dts entry of array should be like:
- * ``property = /bits/ 8 <0x50 0x60 0x70>;``
- *
- * Return: 0 on success, -EINVAL if the property does not exist,
- * -ENODATA if property does not have a value, and -EOVERFLOW if the
- * property data isn't large enough.
- *
- * The out_values is modified only if a valid u8 value can be decoded.
- */
-static inline int of_property_read_u8_array(const struct device_node *np,
- const char *propname,
- u8 *out_values, size_t sz)
-{
- int ret = of_property_read_variable_u8_array(np, propname, out_values,
- sz, 0);
- if (ret >= 0)
- return 0;
- else
- return ret;
-}
-
-/**
- * of_property_read_u16_array - Find and read an array of u16 from a property.
- *
- * @np: device node from which the property value is to be read.
- * @propname: name of the property to be searched.
- * @out_values: pointer to return value, modified only if return value is 0.
- * @sz: number of array elements to read
- *
- * Search for a property in a device node and read 16-bit value(s) from
- * it.
- *
- * dts entry of array should be like:
- * ``property = /bits/ 16 <0x5000 0x6000 0x7000>;``
- *
- * Return: 0 on success, -EINVAL if the property does not exist,
- * -ENODATA if property does not have a value, and -EOVERFLOW if the
- * property data isn't large enough.
- *
- * The out_values is modified only if a valid u16 value can be decoded.
- */
-static inline int of_property_read_u16_array(const struct device_node *np,
- const char *propname,
- u16 *out_values, size_t sz)
-{
- int ret = of_property_read_variable_u16_array(np, propname, out_values,
- sz, 0);
- if (ret >= 0)
- return 0;
- else
- return ret;
-}
-
-/**
- * of_property_read_u32_array - Find and read an array of 32 bit integers
- * from a property.
- *
- * @np: device node from which the property value is to be read.
- * @propname: name of the property to be searched.
- * @out_values: pointer to return value, modified only if return value is 0.
- * @sz: number of array elements to read
- *
- * Search for a property in a device node and read 32-bit value(s) from
- * it.
- *
- * Return: 0 on success, -EINVAL if the property does not exist,
- * -ENODATA if property does not have a value, and -EOVERFLOW if the
- * property data isn't large enough.
- *
- * The out_values is modified only if a valid u32 value can be decoded.
- */
-static inline int of_property_read_u32_array(const struct device_node *np,
- const char *propname,
- u32 *out_values, size_t sz)
-{
- int ret = of_property_read_variable_u32_array(np, propname, out_values,
- sz, 0);
- if (ret >= 0)
- return 0;
- else
- return ret;
-}
-
-/**
- * of_property_read_u64_array - Find and read an array of 64 bit integers
- * from a property.
- *
- * @np: device node from which the property value is to be read.
- * @propname: name of the property to be searched.
- * @out_values: pointer to return value, modified only if return value is 0.
- * @sz: number of array elements to read
- *
- * Search for a property in a device node and read 64-bit value(s) from
- * it.
- *
- * Return: 0 on success, -EINVAL if the property does not exist,
- * -ENODATA if property does not have a value, and -EOVERFLOW if the
- * property data isn't large enough.
- *
- * The out_values is modified only if a valid u64 value can be decoded.
- */
-static inline int of_property_read_u64_array(const struct device_node *np,
- const char *propname,
- u64 *out_values, size_t sz)
-{
- int ret = of_property_read_variable_u64_array(np, propname, out_values,
- sz, 0);
- if (ret >= 0)
- return 0;
- else
- return ret;
-}
-
/*
* struct property *prop;
* const __be32 *p;
@@ -734,32 +604,6 @@ static inline int of_property_count_elems_of_size(const struct device_node *np,
return -ENOSYS;
}
-static inline int of_property_read_u8_array(const struct device_node *np,
- const char *propname, u8 *out_values, size_t sz)
-{
- return -ENOSYS;
-}
-
-static inline int of_property_read_u16_array(const struct device_node *np,
- const char *propname, u16 *out_values, size_t sz)
-{
- return -ENOSYS;
-}
-
-static inline int of_property_read_u32_array(const struct device_node *np,
- const char *propname,
- u32 *out_values, size_t sz)
-{
- return -ENOSYS;
-}
-
-static inline int of_property_read_u64_array(const struct device_node *np,
- const char *propname,
- u64 *out_values, size_t sz)
-{
- return -ENOSYS;
-}
-
static inline int of_property_read_u32_index(const struct device_node *np,
const char *propname, u32 index, u32 *out_value)
{
@@ -865,18 +709,12 @@ static inline int of_property_read_string_helper(const struct device_node *np,
return -ENOSYS;
}
-static inline struct device_node *of_parse_phandle(const struct device_node *np,
- const char *phandle_name,
- int index)
-{
- return NULL;
-}
-
-static inline int of_parse_phandle_with_args(const struct device_node *np,
- const char *list_name,
- const char *cells_name,
- int index,
- struct of_phandle_args *out_args)
+static inline int __of_parse_phandle_with_args(const struct device_node *np,
+ const char *list_name,
+ const char *cells_name,
+ int cell_count,
+ int index,
+ struct of_phandle_args *out_args)
{
return -ENOSYS;
}
@@ -890,13 +728,6 @@ static inline int of_parse_phandle_with_args_map(const struct device_node *np,
return -ENOSYS;
}
-static inline int of_parse_phandle_with_fixed_args(const struct device_node *np,
- const char *list_name, int cells_count, int index,
- struct of_phandle_args *out_args)
-{
- return -ENOSYS;
-}
-
static inline int of_count_phandle_with_args(const struct device_node *np,
const char *list_name,
const char *cells_name)
@@ -1078,6 +909,117 @@ static inline bool of_node_is_type(const struct device_node *np, const char *typ
}
/**
+ * of_parse_phandle - Resolve a phandle property to a device_node pointer
+ * @np: Pointer to device node holding phandle property
+ * @phandle_name: Name of property holding a phandle value
+ * @index: For properties holding a table of phandles, this is the index into
+ * the table
+ *
+ * Return: The device_node pointer with refcount incremented. Use
+ * of_node_put() on it when done.
+ */
+static inline struct device_node *of_parse_phandle(const struct device_node *np,
+ const char *phandle_name,
+ int index)
+{
+ struct of_phandle_args args;
+
+ if (__of_parse_phandle_with_args(np, phandle_name, NULL, 0,
+ index, &args))
+ return NULL;
+
+ return args.np;
+}
+
+/**
+ * of_parse_phandle_with_args() - Find a node pointed by phandle in a list
+ * @np: pointer to a device tree node containing a list
+ * @list_name: property name that contains a list
+ * @cells_name: property name that specifies phandles' arguments count
+ * @index: index of a phandle to parse out
+ * @out_args: optional pointer to output arguments structure (will be filled)
+ *
+ * This function is useful to parse lists of phandles and their arguments.
+ * Returns 0 on success and fills out_args, on error returns appropriate
+ * errno value.
+ *
+ * Caller is responsible to call of_node_put() on the returned out_args->np
+ * pointer.
+ *
+ * Example::
+ *
+ * phandle1: node1 {
+ * #list-cells = <2>;
+ * };
+ *
+ * phandle2: node2 {
+ * #list-cells = <1>;
+ * };
+ *
+ * node3 {
+ * list = <&phandle1 1 2 &phandle2 3>;
+ * };
+ *
+ * To get a device_node of the ``node2`` node you may call this:
+ * of_parse_phandle_with_args(node3, "list", "#list-cells", 1, &args);
+ */
+static inline int of_parse_phandle_with_args(const struct device_node *np,
+ const char *list_name,
+ const char *cells_name,
+ int index,
+ struct of_phandle_args *out_args)
+{
+ int cell_count = -1;
+
+ /* If cells_name is NULL we assume a cell count of 0 */
+ if (!cells_name)
+ cell_count = 0;
+
+ return __of_parse_phandle_with_args(np, list_name, cells_name,
+ cell_count, index, out_args);
+}
+
+/**
+ * of_parse_phandle_with_fixed_args() - Find a node pointed by phandle in a list
+ * @np: pointer to a device tree node containing a list
+ * @list_name: property name that contains a list
+ * @cell_count: number of argument cells following the phandle
+ * @index: index of a phandle to parse out
+ * @out_args: optional pointer to output arguments structure (will be filled)
+ *
+ * This function is useful to parse lists of phandles and their arguments.
+ * Returns 0 on success and fills out_args, on error returns appropriate
+ * errno value.
+ *
+ * Caller is responsible to call of_node_put() on the returned out_args->np
+ * pointer.
+ *
+ * Example::
+ *
+ * phandle1: node1 {
+ * };
+ *
+ * phandle2: node2 {
+ * };
+ *
+ * node3 {
+ * list = <&phandle1 0 2 &phandle2 2 3>;
+ * };
+ *
+ * To get a device_node of the ``node2`` node you may call this:
+ * of_parse_phandle_with_fixed_args(node3, "list", 2, 1, &args);
+ */
+static inline int of_parse_phandle_with_fixed_args(const struct device_node *np,
+ const char *list_name,
+ int cell_count,
+ int index,
+ struct of_phandle_args *out_args)
+{
+ return __of_parse_phandle_with_args(np, list_name, NULL, cell_count,
+ index, out_args);
+}
+
+/**
* of_property_count_u8_elems - Count the number of u8 elements in a property
*
* @np: device node from which the property value is to be read.
@@ -1236,6 +1178,130 @@ static inline bool of_property_read_bool(const struct device_node *np,
return prop ? true : false;
}
+/**
+ * of_property_read_u8_array - Find and read an array of u8 from a property.
+ *
+ * @np: device node from which the property value is to be read.
+ * @propname: name of the property to be searched.
+ * @out_values: pointer to return value, modified only if return value is 0.
+ * @sz: number of array elements to read
+ *
+ * Search for a property in a device node and read 8-bit value(s) from
+ * it.
+ *
+ * dts entry of array should be like:
+ * ``property = /bits/ 8 <0x50 0x60 0x70>;``
+ *
+ * Return: 0 on success, -EINVAL if the property does not exist,
+ * -ENODATA if property does not have a value, and -EOVERFLOW if the
+ * property data isn't large enough.
+ *
+ * The out_values is modified only if a valid u8 value can be decoded.
+ */
+static inline int of_property_read_u8_array(const struct device_node *np,
+ const char *propname,
+ u8 *out_values, size_t sz)
+{
+ int ret = of_property_read_variable_u8_array(np, propname, out_values,
+ sz, 0);
+ if (ret >= 0)
+ return 0;
+ else
+ return ret;
+}
+
+/**
+ * of_property_read_u16_array - Find and read an array of u16 from a property.
+ *
+ * @np: device node from which the property value is to be read.
+ * @propname: name of the property to be searched.
+ * @out_values: pointer to return value, modified only if return value is 0.
+ * @sz: number of array elements to read
+ *
+ * Search for a property in a device node and read 16-bit value(s) from
+ * it.
+ *
+ * dts entry of array should be like:
+ * ``property = /bits/ 16 <0x5000 0x6000 0x7000>;``
+ *
+ * Return: 0 on success, -EINVAL if the property does not exist,
+ * -ENODATA if property does not have a value, and -EOVERFLOW if the
+ * property data isn't large enough.
+ *
+ * The out_values is modified only if a valid u16 value can be decoded.
+ */
+static inline int of_property_read_u16_array(const struct device_node *np,
+ const char *propname,
+ u16 *out_values, size_t sz)
+{
+ int ret = of_property_read_variable_u16_array(np, propname, out_values,
+ sz, 0);
+ if (ret >= 0)
+ return 0;
+ else
+ return ret;
+}
+
+/**
+ * of_property_read_u32_array - Find and read an array of 32 bit integers
+ * from a property.
+ *
+ * @np: device node from which the property value is to be read.
+ * @propname: name of the property to be searched.
+ * @out_values: pointer to return value, modified only if return value is 0.
+ * @sz: number of array elements to read
+ *
+ * Search for a property in a device node and read 32-bit value(s) from
+ * it.
+ *
+ * Return: 0 on success, -EINVAL if the property does not exist,
+ * -ENODATA if property does not have a value, and -EOVERFLOW if the
+ * property data isn't large enough.
+ *
+ * The out_values is modified only if a valid u32 value can be decoded.
+ */
+static inline int of_property_read_u32_array(const struct device_node *np,
+ const char *propname,
+ u32 *out_values, size_t sz)
+{
+ int ret = of_property_read_variable_u32_array(np, propname, out_values,
+ sz, 0);
+ if (ret >= 0)
+ return 0;
+ else
+ return ret;
+}
+
+/**
+ * of_property_read_u64_array - Find and read an array of 64 bit integers
+ * from a property.
+ *
+ * @np: device node from which the property value is to be read.
+ * @propname: name of the property to be searched.
+ * @out_values: pointer to return value, modified only if return value is 0.
+ * @sz: number of array elements to read
+ *
+ * Search for a property in a device node and read 64-bit value(s) from
+ * it.
+ *
+ * Return: 0 on success, -EINVAL if the property does not exist,
+ * -ENODATA if property does not have a value, and -EOVERFLOW if the
+ * property data isn't large enough.
+ *
+ * The out_values is modified only if a valid u64 value can be decoded.
+ */
+static inline int of_property_read_u64_array(const struct device_node *np,
+ const char *propname,
+ u64 *out_values, size_t sz)
+{
+ int ret = of_property_read_variable_u64_array(np, propname, out_values,
+ sz, 0);
+ if (ret >= 0)
+ return 0;
+ else
+ return ret;
+}
+
static inline int of_property_read_u8(const struct device_node *np,
const char *propname,
u8 *out_value)
diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h
index cf48983d3c86..d69ad5bb1eb1 100644
--- a/include/linux/of_fdt.h
+++ b/include/linux/of_fdt.h
@@ -58,10 +58,9 @@ extern int of_flat_dt_is_compatible(unsigned long node, const char *name);
extern unsigned long of_get_flat_dt_root(void);
extern uint32_t of_get_flat_dt_phandle(unsigned long node);
-extern int early_init_dt_scan_chosen(unsigned long node, const char *uname,
- int depth, void *data);
-extern int early_init_dt_scan_memory(unsigned long node, const char *uname,
- int depth, void *data);
+extern int early_init_dt_scan_chosen(char *cmdline);
+extern int early_init_dt_scan_memory(void);
+extern void early_init_dt_check_for_usable_mem_range(void);
extern int early_init_dt_scan_chosen_stdout(void);
extern void early_init_fdt_scan_reserved_mem(void);
extern void early_init_fdt_reserve_self(void);
@@ -69,8 +68,7 @@ extern void early_init_dt_add_memory_arch(u64 base, u64 size);
extern u64 dt_mem_next_cell(int s, const __be32 **cellp);
/* Early flat tree scan hooks */
-extern int early_init_dt_scan_root(unsigned long node, const char *uname,
- int depth, void *data);
+extern int early_init_dt_scan_root(void);
extern bool early_init_dt_scan(void *params);
extern bool early_init_dt_verify(void *params);
@@ -86,6 +84,7 @@ extern void unflatten_and_copy_device_tree(void);
extern void early_init_devtree(void *);
extern void early_get_first_memblock_info(void *, phys_addr_t *);
#else /* CONFIG_OF_EARLY_FLATTREE */
+static inline void early_init_dt_check_for_usable_mem_range(void) {}
static inline int early_init_dt_scan_chosen_stdout(void) { return -ENODEV; }
static inline void early_init_fdt_scan_reserved_mem(void) {}
static inline void early_init_fdt_reserve_self(void) {}
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index b5f14d581113..1c3b6e5c8bfd 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -68,9 +68,6 @@
* might lose their PG_swapbacked flag when they simply can be dropped (e.g. as
* a result of MADV_FREE).
*
- * PG_uptodate tells whether the page's contents is valid. When a read
- * completes, the page becomes uptodate, unless a disk I/O error happened.
- *
* PG_referenced, PG_reclaim are used for page reclaim for anonymous and
* file-backed pagecache (see mm/vmscan.c).
*
@@ -383,7 +380,7 @@ static __always_inline int TestClearPage##uname(struct page *page) \
TESTCLEARFLAG(uname, lname, policy)
#define TESTPAGEFLAG_FALSE(uname, lname) \
-static inline bool folio_test_##lname(const struct folio *folio) { return 0; } \
+static inline bool folio_test_##lname(const struct folio *folio) { return false; } \
static inline int Page##uname(const struct page *page) { return 0; }
#define SETPAGEFLAG_NOOP(uname, lname) \
@@ -522,7 +519,11 @@ PAGEFLAG_FALSE(Uncached, uncached)
PAGEFLAG(HWPoison, hwpoison, PF_ANY)
TESTSCFLAG(HWPoison, hwpoison, PF_ANY)
#define __PG_HWPOISON (1UL << PG_hwpoison)
+#define MAGIC_HWPOISON 0x48575053U /* HWPS */
+extern void SetPageHWPoisonTakenOff(struct page *page);
+extern void ClearPageHWPoisonTakenOff(struct page *page);
extern bool take_page_off_buddy(struct page *page);
+extern bool put_page_back_buddy(struct page *page);
#else
PAGEFLAG_FALSE(HWPoison, hwpoison)
#define __PG_HWPOISON 0
@@ -615,6 +616,16 @@ TESTPAGEFLAG_FALSE(Ksm, ksm)
u64 stable_page_flags(struct page *page);
+/**
+ * folio_test_uptodate - Is this folio up to date?
+ * @folio: The folio.
+ *
+ * The uptodate flag is set on a folio when every byte in the folio is
+ * at least as new as the corresponding bytes on storage. Anonymous
+ * and CoW folios are always uptodate. If the folio is not uptodate,
+ * some of the bytes in it may be; see the is_partially_uptodate()
+ * address_space operation.
+ */
static inline bool folio_test_uptodate(struct folio *folio)
{
bool ret = test_bit(PG_uptodate, folio_flags(folio, 0));
@@ -909,43 +920,6 @@ extern bool is_free_buddy_page(struct page *page);
__PAGEFLAG(Isolated, isolated, PF_ANY);
-/*
- * If network-based swap is enabled, sl*b must keep track of whether pages
- * were allocated from pfmemalloc reserves.
- */
-static inline int PageSlabPfmemalloc(struct page *page)
-{
- VM_BUG_ON_PAGE(!PageSlab(page), page);
- return PageActive(page);
-}
-
-/*
- * A version of PageSlabPfmemalloc() for opportunistic checks where the page
- * might have been freed under us and not be a PageSlab anymore.
- */
-static inline int __PageSlabPfmemalloc(struct page *page)
-{
- return PageActive(page);
-}
-
-static inline void SetPageSlabPfmemalloc(struct page *page)
-{
- VM_BUG_ON_PAGE(!PageSlab(page), page);
- SetPageActive(page);
-}
-
-static inline void __ClearPageSlabPfmemalloc(struct page *page)
-{
- VM_BUG_ON_PAGE(!PageSlab(page), page);
- __ClearPageActive(page);
-}
-
-static inline void ClearPageSlabPfmemalloc(struct page *page)
-{
- VM_BUG_ON_PAGE(!PageSlab(page), page);
- ClearPageActive(page);
-}
-
#ifdef CONFIG_MMU
#define __PG_MLOCKED (1UL << PG_mlocked)
#else
diff --git a/include/linux/page_idle.h b/include/linux/page_idle.h
index 83abf95e9fa7..4663dfed1293 100644
--- a/include/linux/page_idle.h
+++ b/include/linux/page_idle.h
@@ -13,7 +13,6 @@
* If there is not enough space to store Idle and Young bits in page flags, use
* page ext flags instead.
*/
-extern struct page_ext_operations page_idle_ops;
static inline bool folio_test_young(struct folio *folio)
{
diff --git a/include/linux/page_table_check.h b/include/linux/page_table_check.h
new file mode 100644
index 000000000000..01e16c7696ec
--- /dev/null
+++ b/include/linux/page_table_check.h
@@ -0,0 +1,166 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * Copyright (c) 2021, Google LLC.
+ * Pasha Tatashin <pasha.tatashin@soleen.com>
+ */
+#ifndef __LINUX_PAGE_TABLE_CHECK_H
+#define __LINUX_PAGE_TABLE_CHECK_H
+
+#ifdef CONFIG_PAGE_TABLE_CHECK
+#include <linux/jump_label.h>
+
+extern struct static_key_true page_table_check_disabled;
+extern struct page_ext_operations page_table_check_ops;
+
+void __page_table_check_zero(struct page *page, unsigned int order);
+void __page_table_check_pte_clear(struct mm_struct *mm, unsigned long addr,
+ pte_t pte);
+void __page_table_check_pmd_clear(struct mm_struct *mm, unsigned long addr,
+ pmd_t pmd);
+void __page_table_check_pud_clear(struct mm_struct *mm, unsigned long addr,
+ pud_t pud);
+void __page_table_check_pte_set(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte);
+void __page_table_check_pmd_set(struct mm_struct *mm, unsigned long addr,
+ pmd_t *pmdp, pmd_t pmd);
+void __page_table_check_pud_set(struct mm_struct *mm, unsigned long addr,
+ pud_t *pudp, pud_t pud);
+void __page_table_check_pte_clear_range(struct mm_struct *mm,
+ unsigned long addr,
+ pmd_t pmd);
+
+static inline void page_table_check_alloc(struct page *page, unsigned int order)
+{
+ if (static_branch_likely(&page_table_check_disabled))
+ return;
+
+ __page_table_check_zero(page, order);
+}
+
+static inline void page_table_check_free(struct page *page, unsigned int order)
+{
+ if (static_branch_likely(&page_table_check_disabled))
+ return;
+
+ __page_table_check_zero(page, order);
+}
+
+static inline void page_table_check_pte_clear(struct mm_struct *mm,
+ unsigned long addr, pte_t pte)
+{
+ if (static_branch_likely(&page_table_check_disabled))
+ return;
+
+ __page_table_check_pte_clear(mm, addr, pte);
+}
+
+static inline void page_table_check_pmd_clear(struct mm_struct *mm,
+ unsigned long addr, pmd_t pmd)
+{
+ if (static_branch_likely(&page_table_check_disabled))
+ return;
+
+ __page_table_check_pmd_clear(mm, addr, pmd);
+}
+
+static inline void page_table_check_pud_clear(struct mm_struct *mm,
+ unsigned long addr, pud_t pud)
+{
+ if (static_branch_likely(&page_table_check_disabled))
+ return;
+
+ __page_table_check_pud_clear(mm, addr, pud);
+}
+
+static inline void page_table_check_pte_set(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep,
+ pte_t pte)
+{
+ if (static_branch_likely(&page_table_check_disabled))
+ return;
+
+ __page_table_check_pte_set(mm, addr, ptep, pte);
+}
+
+static inline void page_table_check_pmd_set(struct mm_struct *mm,
+ unsigned long addr, pmd_t *pmdp,
+ pmd_t pmd)
+{
+ if (static_branch_likely(&page_table_check_disabled))
+ return;
+
+ __page_table_check_pmd_set(mm, addr, pmdp, pmd);
+}
+
+static inline void page_table_check_pud_set(struct mm_struct *mm,
+ unsigned long addr, pud_t *pudp,
+ pud_t pud)
+{
+ if (static_branch_likely(&page_table_check_disabled))
+ return;
+
+ __page_table_check_pud_set(mm, addr, pudp, pud);
+}
+
+static inline void page_table_check_pte_clear_range(struct mm_struct *mm,
+ unsigned long addr,
+ pmd_t pmd)
+{
+ if (static_branch_likely(&page_table_check_disabled))
+ return;
+
+ __page_table_check_pte_clear_range(mm, addr, pmd);
+}
+
+#else
+
+static inline void page_table_check_alloc(struct page *page, unsigned int order)
+{
+}
+
+static inline void page_table_check_free(struct page *page, unsigned int order)
+{
+}
+
+static inline void page_table_check_pte_clear(struct mm_struct *mm,
+ unsigned long addr, pte_t pte)
+{
+}
+
+static inline void page_table_check_pmd_clear(struct mm_struct *mm,
+ unsigned long addr, pmd_t pmd)
+{
+}
+
+static inline void page_table_check_pud_clear(struct mm_struct *mm,
+ unsigned long addr, pud_t pud)
+{
+}
+
+static inline void page_table_check_pte_set(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep,
+ pte_t pte)
+{
+}
+
+static inline void page_table_check_pmd_set(struct mm_struct *mm,
+ unsigned long addr, pmd_t *pmdp,
+ pmd_t pmd)
+{
+}
+
+static inline void page_table_check_pud_set(struct mm_struct *mm,
+ unsigned long addr, pud_t *pudp,
+ pud_t pud)
+{
+}
+
+static inline void page_table_check_pte_clear_range(struct mm_struct *mm,
+ unsigned long addr,
+ pmd_t pmd)
+{
+}
+
+#endif /* CONFIG_PAGE_TABLE_CHECK */
+#endif /* __LINUX_PAGE_TABLE_CHECK_H */
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index d150a9082b31..270bf5136c34 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -16,7 +16,7 @@
#include <linux/hardirq.h> /* for in_interrupt() */
#include <linux/hugetlb_inline.h>
-struct pagevec;
+struct folio_batch;
static inline bool mapping_empty(struct address_space *mapping)
{
@@ -511,15 +511,6 @@ static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
mapping_gfp_mask(mapping));
}
-/* Does this page contain this index? */
-static inline bool thp_contains(struct page *head, pgoff_t index)
-{
- /* HugeTLBfs indexes the page cache in units of hpage_size */
- if (PageHuge(head))
- return head->index == index;
- return page_index(head) == (index & ~(thp_nr_pages(head) - 1UL));
-}
-
#define swapcache_index(folio) __page_file_index(&(folio)->page)
/**
@@ -600,8 +591,6 @@ static inline struct page *find_subpage(struct page *head, pgoff_t index)
return head + (index & (thp_nr_pages(head) - 1));
}
-unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
- pgoff_t end, struct pagevec *pvec, pgoff_t *indices);
unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start,
pgoff_t end, unsigned int nr_pages,
struct page **pages);
@@ -637,8 +626,10 @@ static inline struct page *grab_cache_page(struct address_space *mapping,
return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
}
-extern struct page * read_cache_page(struct address_space *mapping,
- pgoff_t index, filler_t *filler, void *data);
+struct folio *read_cache_folio(struct address_space *, pgoff_t index,
+ filler_t *filler, void *data);
+struct page *read_cache_page(struct address_space *, pgoff_t index,
+ filler_t *filler, void *data);
extern struct page * read_cache_page_gfp(struct address_space *mapping,
pgoff_t index, gfp_t gfp_mask);
extern int read_cache_pages(struct address_space *mapping,
@@ -650,6 +641,12 @@ static inline struct page *read_mapping_page(struct address_space *mapping,
return read_cache_page(mapping, index, NULL, data);
}
+static inline struct folio *read_mapping_folio(struct address_space *mapping,
+ pgoff_t index, void *data)
+{
+ return read_cache_folio(mapping, index, NULL, data);
+}
+
/*
* Get index of the page within radix-tree (but not for hugetlb pages).
* (TODO: remove once hugetlb pages will have ->index in PAGE_SIZE)
@@ -867,7 +864,7 @@ static inline int wait_on_page_locked_killable(struct page *page)
return folio_wait_locked_killable(page_folio(page));
}
-int put_and_wait_on_page_locked(struct page *page, int state);
+int folio_put_wait_locked(struct folio *folio, int state);
void wait_on_page_writeback(struct page *page);
void folio_wait_writeback(struct folio *folio);
int folio_wait_writeback_killable(struct folio *folio);
@@ -883,11 +880,6 @@ static inline void __set_page_dirty(struct page *page,
}
void folio_account_cleaned(struct folio *folio, struct address_space *mapping,
struct bdi_writeback *wb);
-static inline void account_page_cleaned(struct page *page,
- struct address_space *mapping, struct bdi_writeback *wb)
-{
- return folio_account_cleaned(page_folio(page), mapping, wb);
-}
void __folio_cancel_dirty(struct folio *folio);
static inline void folio_cancel_dirty(struct folio *folio)
{
@@ -934,11 +926,18 @@ int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
pgoff_t index, gfp_t gfp);
int filemap_add_folio(struct address_space *mapping, struct folio *folio,
pgoff_t index, gfp_t gfp);
-extern void delete_from_page_cache(struct page *page);
-extern void __delete_from_page_cache(struct page *page, void *shadow);
+void filemap_remove_folio(struct folio *folio);
+void delete_from_page_cache(struct page *page);
+void __filemap_remove_folio(struct folio *folio, void *shadow);
+static inline void __delete_from_page_cache(struct page *page, void *shadow)
+{
+ __filemap_remove_folio(page_folio(page), shadow);
+}
void replace_page_cache_page(struct page *old, struct page *new);
void delete_from_page_cache_batch(struct address_space *mapping,
- struct pagevec *pvec);
+ struct folio_batch *fbatch);
+int try_to_release_page(struct page *page, gfp_t gfp);
+bool filemap_release_folio(struct folio *folio, gfp_t gfp);
loff_t mapping_seek_hole_data(struct address_space *, loff_t start, loff_t end,
int whence);
@@ -962,6 +961,35 @@ static inline int add_to_page_cache(struct page *page,
int __filemap_add_folio(struct address_space *mapping, struct folio *folio,
pgoff_t index, gfp_t gfp, void **shadowp);
+bool filemap_range_has_writeback(struct address_space *mapping,
+ loff_t start_byte, loff_t end_byte);
+
+/**
+ * filemap_range_needs_writeback - check if range potentially needs writeback
+ * @mapping: address space within which to check
+ * @start_byte: offset in bytes where the range starts
+ * @end_byte: offset in bytes where the range ends (inclusive)
+ *
+ * Find at least one page in the range supplied, usually used to check if
+ * direct writing in this range will trigger a writeback. Used by O_DIRECT
+ * read/write with IOCB_NOWAIT, to see if the caller needs to do
+ * filemap_write_and_wait_range() before proceeding.
+ *
+ * Return: %true if the caller should do filemap_write_and_wait_range() before
+ * doing O_DIRECT to a page in this range, %false otherwise.
+ */
+static inline bool filemap_range_needs_writeback(struct address_space *mapping,
+ loff_t start_byte,
+ loff_t end_byte)
+{
+ if (!mapping->nrpages)
+ return false;
+ if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) &&
+ !mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK))
+ return false;
+ return filemap_range_has_writeback(mapping, start_byte, end_byte);
+}
+
/**
* struct readahead_control - Describes a readahead request.
*
@@ -1001,7 +1029,7 @@ struct readahead_control {
void page_cache_ra_unbounded(struct readahead_control *,
unsigned long nr_to_read, unsigned long lookahead_count);
void page_cache_sync_ra(struct readahead_control *, unsigned long req_count);
-void page_cache_async_ra(struct readahead_control *, struct page *,
+void page_cache_async_ra(struct readahead_control *, struct folio *,
unsigned long req_count);
void readahead_expand(struct readahead_control *ractl,
loff_t new_start, size_t new_len);
@@ -1048,7 +1076,7 @@ void page_cache_async_readahead(struct address_space *mapping,
struct page *page, pgoff_t index, unsigned long req_count)
{
DEFINE_READAHEAD(ractl, file, ra, mapping, index);
- page_cache_async_ra(&ractl, page, req_count);
+ page_cache_async_ra(&ractl, page_folio(page), req_count);
}
static inline struct folio *__readahead_folio(struct readahead_control *ractl)
@@ -1125,16 +1153,6 @@ static inline unsigned int __readahead_batch(struct readahead_control *rac,
VM_BUG_ON_PAGE(PageTail(page), page);
array[i++] = page;
rac->_batch_count += thp_nr_pages(page);
-
- /*
- * The page cache isn't using multi-index entries yet,
- * so the xas cursor needs to be manually moved to the
- * next index. This can be removed once the page cache
- * is converted.
- */
- if (PageHead(page))
- xas_set(&xas, rac->_index + rac->_batch_count);
-
if (i == array_sz)
break;
}
diff --git a/include/linux/pagevec.h b/include/linux/pagevec.h
index 7f3f19065a9f..67b1246f136b 100644
--- a/include/linux/pagevec.h
+++ b/include/linux/pagevec.h
@@ -15,8 +15,10 @@
#define PAGEVEC_SIZE 15
struct page;
+struct folio;
struct address_space;
+/* Layout must match folio_batch */
struct pagevec {
unsigned char nr;
bool percpu_pvec_drained;
@@ -25,7 +27,6 @@ struct pagevec {
void __pagevec_release(struct pagevec *pvec);
void __pagevec_lru_add(struct pagevec *pvec);
-void pagevec_remove_exceptionals(struct pagevec *pvec);
unsigned pagevec_lookup_range(struct pagevec *pvec,
struct address_space *mapping,
pgoff_t *start, pgoff_t end);
@@ -81,4 +82,69 @@ static inline void pagevec_release(struct pagevec *pvec)
__pagevec_release(pvec);
}
+/**
+ * struct folio_batch - A collection of folios.
+ *
+ * The folio_batch is used to amortise the cost of retrieving and
+ * operating on a set of folios. The order of folios in the batch may be
+ * significant (eg delete_from_page_cache_batch()). Some users of the
+ * folio_batch store "exceptional" entries in it which can be removed
+ * by calling folio_batch_remove_exceptionals().
+ */
+struct folio_batch {
+ unsigned char nr;
+ bool percpu_pvec_drained;
+ struct folio *folios[PAGEVEC_SIZE];
+};
+
+/* Layout must match pagevec */
+static_assert(sizeof(struct pagevec) == sizeof(struct folio_batch));
+static_assert(offsetof(struct pagevec, pages) ==
+ offsetof(struct folio_batch, folios));
+
+/**
+ * folio_batch_init() - Initialise a batch of folios
+ * @fbatch: The folio batch.
+ *
+ * A freshly initialised folio_batch contains zero folios.
+ */
+static inline void folio_batch_init(struct folio_batch *fbatch)
+{
+ fbatch->nr = 0;
+ fbatch->percpu_pvec_drained = false;
+}
+
+static inline unsigned int folio_batch_count(struct folio_batch *fbatch)
+{
+ return fbatch->nr;
+}
+
+static inline unsigned int fbatch_space(struct folio_batch *fbatch)
+{
+ return PAGEVEC_SIZE - fbatch->nr;
+}
+
+/**
+ * folio_batch_add() - Add a folio to a batch.
+ * @fbatch: The folio batch.
+ * @folio: The folio to add.
+ *
+ * The folio is added to the end of the batch.
+ * The batch must have previously been initialised using folio_batch_init().
+ *
+ * Return: The number of slots still available.
+ */
+static inline unsigned folio_batch_add(struct folio_batch *fbatch,
+ struct folio *folio)
+{
+ fbatch->folios[fbatch->nr++] = folio;
+ return fbatch_space(fbatch);
+}
+
+static inline void folio_batch_release(struct folio_batch *fbatch)
+{
+ pagevec_release((struct pagevec *)fbatch);
+}
+
+void folio_batch_remove_exceptionals(struct folio_batch *fbatch);
#endif /* _LINUX_PAGEVEC_H */
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 18a75c8e615c..8253a5413d7c 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -155,6 +155,15 @@ enum pci_interrupt_pin {
#define PCI_NUM_INTX 4
/*
+ * Reading from a device that doesn't respond typically returns ~0. A
+ * successful read from a device may also return ~0, so you need additional
+ * information to reliably identify errors.
+ */
+#define PCI_ERROR_RESPONSE (~0ULL)
+#define PCI_SET_ERROR_RESPONSE(val) (*(val) = ((typeof(*(val))) PCI_ERROR_RESPONSE))
+#define PCI_POSSIBLE_ERROR(val) ((val) == ((typeof(val)) PCI_ERROR_RESPONSE))
+
+/*
* pci_power_t values must match the bits in the Capabilities PME_Support
* and Control/Status PowerState fields in the Power Management capability.
*/
@@ -425,7 +434,8 @@ struct pci_dev {
unsigned int ats_enabled:1; /* Address Translation Svc */
unsigned int pasid_enabled:1; /* Process Address Space ID */
unsigned int pri_enabled:1; /* Page Request Interface */
- unsigned int is_managed:1;
+ unsigned int is_managed:1; /* Managed via devres */
+ unsigned int is_msi_managed:1; /* MSI release via devres installed */
unsigned int needs_freset:1; /* Requires fundamental reset */
unsigned int state_saved:1;
unsigned int is_physfn:1;
@@ -455,6 +465,7 @@ struct pci_dev {
unsigned int link_active_reporting:1;/* Device capable of reporting link active */
unsigned int no_vf_scan:1; /* Don't scan for VFs after IOV enablement */
unsigned int no_command_memory:1; /* No PCI_COMMAND_MEMORY */
+ unsigned int rom_bar_overlap:1; /* ROM BAR disable broken */
pci_dev_flags_t dev_flags;
atomic_t enable_cnt; /* pci_enable_device has been called */
@@ -473,7 +484,8 @@ struct pci_dev {
u8 ptm_granularity;
#endif
#ifdef CONFIG_PCI_MSI
- const struct attribute_group **msi_irq_groups;
+ void __iomem *msix_base;
+ raw_spinlock_t msi_lock;
#endif
struct pci_vpd vpd;
#ifdef CONFIG_PCIE_DPC
@@ -1775,7 +1787,10 @@ static inline struct pci_dev *pci_get_class(unsigned int class,
struct pci_dev *from)
{ return NULL; }
-#define pci_dev_present(ids) (0)
+
+static inline int pci_dev_present(const struct pci_device_id *ids)
+{ return 0; }
+
#define no_pci_devices() (1)
#define pci_dev_put(dev) do { } while (0)
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index b5248f27910e..aad54c666407 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -1965,24 +1965,6 @@
#define PCI_DEVICE_ID_APPLICOM_PCI2000PFB 0x0003
#define PCI_VENDOR_ID_MOXA 0x1393
-#define PCI_DEVICE_ID_MOXA_RC7000 0x0001
-#define PCI_DEVICE_ID_MOXA_CP102 0x1020
-#define PCI_DEVICE_ID_MOXA_CP102UL 0x1021
-#define PCI_DEVICE_ID_MOXA_CP102U 0x1022
-#define PCI_DEVICE_ID_MOXA_C104 0x1040
-#define PCI_DEVICE_ID_MOXA_CP104U 0x1041
-#define PCI_DEVICE_ID_MOXA_CP104JU 0x1042
-#define PCI_DEVICE_ID_MOXA_CP104EL 0x1043
-#define PCI_DEVICE_ID_MOXA_CT114 0x1140
-#define PCI_DEVICE_ID_MOXA_CP114 0x1141
-#define PCI_DEVICE_ID_MOXA_CP118U 0x1180
-#define PCI_DEVICE_ID_MOXA_CP118EL 0x1181
-#define PCI_DEVICE_ID_MOXA_CP132 0x1320
-#define PCI_DEVICE_ID_MOXA_CP132U 0x1321
-#define PCI_DEVICE_ID_MOXA_CP134U 0x1340
-#define PCI_DEVICE_ID_MOXA_C168 0x1680
-#define PCI_DEVICE_ID_MOXA_CP168U 0x1681
-#define PCI_DEVICE_ID_MOXA_CP168EL 0x1682
#define PCI_DEVICE_ID_MOXA_CP204J 0x2040
#define PCI_DEVICE_ID_MOXA_C218 0x2180
#define PCI_DEVICE_ID_MOXA_C320 0x3200
@@ -2636,8 +2618,8 @@
#define PCI_DEVICE_ID_INTEL_PXHD_0 0x0320
#define PCI_DEVICE_ID_INTEL_PXHD_1 0x0321
#define PCI_DEVICE_ID_INTEL_PXH_0 0x0329
-#define PCI_DEVICE_ID_INTEL_PXH_1 0x032A
-#define PCI_DEVICE_ID_INTEL_PXHV 0x032C
+#define PCI_DEVICE_ID_INTEL_PXH_1 0x032a
+#define PCI_DEVICE_ID_INTEL_PXHV 0x032c
#define PCI_DEVICE_ID_INTEL_80332_0 0x0330
#define PCI_DEVICE_ID_INTEL_80332_1 0x0332
#define PCI_DEVICE_ID_INTEL_80333_0 0x0370
@@ -2655,14 +2637,14 @@
#define PCI_DEVICE_ID_INTEL_MFD_SDIO2 0x0822
#define PCI_DEVICE_ID_INTEL_MFD_EMMC0 0x0823
#define PCI_DEVICE_ID_INTEL_MFD_EMMC1 0x0824
-#define PCI_DEVICE_ID_INTEL_MRST_SD2 0x084F
-#define PCI_DEVICE_ID_INTEL_QUARK_X1000_ILB 0x095E
+#define PCI_DEVICE_ID_INTEL_MRST_SD2 0x084f
+#define PCI_DEVICE_ID_INTEL_QUARK_X1000_ILB 0x095e
#define PCI_DEVICE_ID_INTEL_I960 0x0960
#define PCI_DEVICE_ID_INTEL_I960RM 0x0962
#define PCI_DEVICE_ID_INTEL_CENTERTON_ILB 0x0c60
#define PCI_DEVICE_ID_INTEL_8257X_SOL 0x1062
#define PCI_DEVICE_ID_INTEL_82573E_SOL 0x1085
-#define PCI_DEVICE_ID_INTEL_82573L_SOL 0x108F
+#define PCI_DEVICE_ID_INTEL_82573L_SOL 0x108f
#define PCI_DEVICE_ID_INTEL_82815_MC 0x1130
#define PCI_DEVICE_ID_INTEL_82815_CGC 0x1132
#define PCI_DEVICE_ID_INTEL_82092AA_0 0x1221
@@ -2756,12 +2738,6 @@
#define PCI_DEVICE_ID_INTEL_82801EB_11 0x24db
#define PCI_DEVICE_ID_INTEL_82801EB_12 0x24dc
#define PCI_DEVICE_ID_INTEL_82801EB_13 0x24dd
-#define PCI_DEVICE_ID_INTEL_ESB_1 0x25a1
-#define PCI_DEVICE_ID_INTEL_ESB_2 0x25a2
-#define PCI_DEVICE_ID_INTEL_ESB_4 0x25a4
-#define PCI_DEVICE_ID_INTEL_ESB_5 0x25a6
-#define PCI_DEVICE_ID_INTEL_ESB_9 0x25ab
-#define PCI_DEVICE_ID_INTEL_ESB_10 0x25ac
#define PCI_DEVICE_ID_INTEL_82820_HB 0x2500
#define PCI_DEVICE_ID_INTEL_82820_UP_HB 0x2501
#define PCI_DEVICE_ID_INTEL_82850_HB 0x2530
@@ -2776,14 +2752,15 @@
#define PCI_DEVICE_ID_INTEL_82915G_IG 0x2582
#define PCI_DEVICE_ID_INTEL_82915GM_HB 0x2590
#define PCI_DEVICE_ID_INTEL_82915GM_IG 0x2592
-#define PCI_DEVICE_ID_INTEL_5000_ERR 0x25F0
-#define PCI_DEVICE_ID_INTEL_5000_FBD0 0x25F5
-#define PCI_DEVICE_ID_INTEL_5000_FBD1 0x25F6
-#define PCI_DEVICE_ID_INTEL_82945G_HB 0x2770
-#define PCI_DEVICE_ID_INTEL_82945G_IG 0x2772
-#define PCI_DEVICE_ID_INTEL_3000_HB 0x2778
-#define PCI_DEVICE_ID_INTEL_82945GM_HB 0x27A0
-#define PCI_DEVICE_ID_INTEL_82945GM_IG 0x27A2
+#define PCI_DEVICE_ID_INTEL_ESB_1 0x25a1
+#define PCI_DEVICE_ID_INTEL_ESB_2 0x25a2
+#define PCI_DEVICE_ID_INTEL_ESB_4 0x25a4
+#define PCI_DEVICE_ID_INTEL_ESB_5 0x25a6
+#define PCI_DEVICE_ID_INTEL_ESB_9 0x25ab
+#define PCI_DEVICE_ID_INTEL_ESB_10 0x25ac
+#define PCI_DEVICE_ID_INTEL_5000_ERR 0x25f0
+#define PCI_DEVICE_ID_INTEL_5000_FBD0 0x25f5
+#define PCI_DEVICE_ID_INTEL_5000_FBD1 0x25f6
#define PCI_DEVICE_ID_INTEL_ICH6_0 0x2640
#define PCI_DEVICE_ID_INTEL_ICH6_1 0x2641
#define PCI_DEVICE_ID_INTEL_ICH6_2 0x2642
@@ -2795,6 +2772,11 @@
#define PCI_DEVICE_ID_INTEL_ESB2_14 0x2698
#define PCI_DEVICE_ID_INTEL_ESB2_17 0x269b
#define PCI_DEVICE_ID_INTEL_ESB2_18 0x269e
+#define PCI_DEVICE_ID_INTEL_82945G_HB 0x2770
+#define PCI_DEVICE_ID_INTEL_82945G_IG 0x2772
+#define PCI_DEVICE_ID_INTEL_3000_HB 0x2778
+#define PCI_DEVICE_ID_INTEL_82945GM_HB 0x27a0
+#define PCI_DEVICE_ID_INTEL_82945GM_IG 0x27a2
#define PCI_DEVICE_ID_INTEL_ICH7_0 0x27b8
#define PCI_DEVICE_ID_INTEL_ICH7_1 0x27b9
#define PCI_DEVICE_ID_INTEL_ICH7_30 0x27b0
@@ -2847,7 +2829,7 @@
#define PCI_DEVICE_ID_INTEL_LYNNFIELD_QPI_PHY0 0x2c91
#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MCR 0x2c98
#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TAD 0x2c99
-#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TEST 0x2c9C
+#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_TEST 0x2c9c
#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_CTRL 0x2ca0
#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_ADDR 0x2ca1
#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH0_RANK 0x2ca2
@@ -2959,16 +2941,16 @@
#define PCI_DEVICE_ID_INTEL_SBRIDGE_BR 0x3cf5 /* 13.6 */
#define PCI_DEVICE_ID_INTEL_SBRIDGE_SAD1 0x3cf6 /* 12.7 */
#define PCI_DEVICE_ID_INTEL_IOAT_SNB 0x402f
-#define PCI_DEVICE_ID_INTEL_5100_16 0x65f0
-#define PCI_DEVICE_ID_INTEL_5100_19 0x65f3
-#define PCI_DEVICE_ID_INTEL_5100_21 0x65f5
-#define PCI_DEVICE_ID_INTEL_5100_22 0x65f6
#define PCI_DEVICE_ID_INTEL_5400_ERR 0x4030
#define PCI_DEVICE_ID_INTEL_5400_FBD0 0x4035
#define PCI_DEVICE_ID_INTEL_5400_FBD1 0x4036
-#define PCI_DEVICE_ID_INTEL_IOAT_SCNB 0x65ff
#define PCI_DEVICE_ID_INTEL_EP80579_0 0x5031
#define PCI_DEVICE_ID_INTEL_EP80579_1 0x5032
+#define PCI_DEVICE_ID_INTEL_5100_16 0x65f0
+#define PCI_DEVICE_ID_INTEL_5100_19 0x65f3
+#define PCI_DEVICE_ID_INTEL_5100_21 0x65f5
+#define PCI_DEVICE_ID_INTEL_5100_22 0x65f6
+#define PCI_DEVICE_ID_INTEL_IOAT_SCNB 0x65ff
#define PCI_DEVICE_ID_INTEL_82371SB_0 0x7000
#define PCI_DEVICE_ID_INTEL_82371SB_1 0x7010
#define PCI_DEVICE_ID_INTEL_82371SB_2 0x7020
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index ae4004e7957e..f1ec5ad1351c 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -94,10 +94,7 @@ extern const char * const pcpu_fc_names[PCPU_FC_NR];
extern enum pcpu_fc pcpu_chosen_fc;
-typedef void * (*pcpu_fc_alloc_fn_t)(unsigned int cpu, size_t size,
- size_t align);
-typedef void (*pcpu_fc_free_fn_t)(void *ptr, size_t size);
-typedef void (*pcpu_fc_populate_pte_fn_t)(unsigned long addr);
+typedef int (pcpu_fc_cpu_to_node_fn_t)(int cpu);
typedef int (pcpu_fc_cpu_distance_fn_t)(unsigned int from, unsigned int to);
extern struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
@@ -111,15 +108,13 @@ extern void __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
extern int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
size_t atom_size,
pcpu_fc_cpu_distance_fn_t cpu_distance_fn,
- pcpu_fc_alloc_fn_t alloc_fn,
- pcpu_fc_free_fn_t free_fn);
+ pcpu_fc_cpu_to_node_fn_t cpu_to_nd_fn);
#endif
#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
+void __init pcpu_populate_pte(unsigned long addr);
extern int __init pcpu_page_first_chunk(size_t reserved_size,
- pcpu_fc_alloc_fn_t alloc_fn,
- pcpu_fc_free_fn_t free_fn,
- pcpu_fc_populate_pte_fn_t populate_pte_fn);
+ pcpu_fc_cpu_to_node_fn_t cpu_to_nd_fn);
#endif
extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align) __alloc_size(1);
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 474eaecd63ba..733649184b27 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -26,11 +26,13 @@
# include <asm/local64.h>
#endif
+#define PERF_GUEST_ACTIVE 0x01
+#define PERF_GUEST_USER 0x02
+
struct perf_guest_info_callbacks {
- int (*is_in_guest)(void);
- int (*is_user_mode)(void);
- unsigned long (*get_guest_ip)(void);
- void (*handle_intel_pt_intr)(void);
+ unsigned int (*state)(void);
+ unsigned long (*get_ip)(void);
+ unsigned int (*handle_intel_pt_intr)(void);
};
#ifdef CONFIG_HAVE_HW_BREAKPOINT
@@ -691,18 +693,6 @@ struct perf_event {
u64 total_time_running;
u64 tstamp;
- /*
- * timestamp shadows the actual context timing but it can
- * be safely used in NMI interrupt context. It reflects the
- * context time as it was when the event was last scheduled in,
- * or when ctx_sched_in failed to schedule the event because we
- * run out of PMC.
- *
- * ctx_time already accounts for ctx->timestamp. Therefore to
- * compute ctx_time for a sample, simply add perf_clock().
- */
- u64 shadow_ctx_time;
-
struct perf_event_attr attr;
u16 header_size;
u16 id_header_size;
@@ -850,6 +840,7 @@ struct perf_event_context {
*/
u64 time;
u64 timestamp;
+ u64 timeoffset;
/*
* These fields let us detect when two contexts have both
@@ -932,6 +923,8 @@ struct bpf_perf_event_data_kern {
struct perf_cgroup_info {
u64 time;
u64 timestamp;
+ u64 timeoffset;
+ int active;
};
struct perf_cgroup {
@@ -1251,9 +1244,32 @@ extern void perf_event_bpf_event(struct bpf_prog *prog,
enum perf_bpf_event_type type,
u16 flags);
-extern struct perf_guest_info_callbacks *perf_guest_cbs;
-extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
-extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
+#ifdef CONFIG_GUEST_PERF_EVENTS
+extern struct perf_guest_info_callbacks __rcu *perf_guest_cbs;
+
+DECLARE_STATIC_CALL(__perf_guest_state, *perf_guest_cbs->state);
+DECLARE_STATIC_CALL(__perf_guest_get_ip, *perf_guest_cbs->get_ip);
+DECLARE_STATIC_CALL(__perf_guest_handle_intel_pt_intr, *perf_guest_cbs->handle_intel_pt_intr);
+
+static inline unsigned int perf_guest_state(void)
+{
+ return static_call(__perf_guest_state)();
+}
+static inline unsigned long perf_guest_get_ip(void)
+{
+ return static_call(__perf_guest_get_ip)();
+}
+static inline unsigned int perf_guest_handle_intel_pt_intr(void)
+{
+ return static_call(__perf_guest_handle_intel_pt_intr)();
+}
+extern void perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs);
+extern void perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs);
+#else
+static inline unsigned int perf_guest_state(void) { return 0; }
+static inline unsigned long perf_guest_get_ip(void) { return 0; }
+static inline unsigned int perf_guest_handle_intel_pt_intr(void) { return 0; }
+#endif /* CONFIG_GUEST_PERF_EVENTS */
extern void perf_event_exec(void);
extern void perf_event_comm(struct task_struct *tsk, bool exec);
@@ -1497,11 +1513,6 @@ perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr) { }
static inline void
perf_bp_event(struct perf_event *event, void *data) { }
-static inline int perf_register_guest_info_callbacks
-(struct perf_guest_info_callbacks *callbacks) { return 0; }
-static inline int perf_unregister_guest_info_callbacks
-(struct perf_guest_info_callbacks *callbacks) { return 0; }
-
static inline void perf_event_mmap(struct vm_area_struct *vma) { }
typedef int (perf_ksymbol_get_name_f)(char *name, int name_len, void *data);
diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
index e24d2c992b11..f4f4077b97aa 100644
--- a/include/linux/pgtable.h
+++ b/include/linux/pgtable.h
@@ -62,6 +62,7 @@ static inline unsigned long pte_index(unsigned long address)
{
return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
}
+#define pte_index pte_index
#ifndef pmd_index
static inline unsigned long pmd_index(unsigned long address)
@@ -258,6 +259,14 @@ static inline int pmdp_clear_flush_young(struct vm_area_struct *vma,
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#endif
+#ifndef __HAVE_ARCH_PTEP_CLEAR
+static inline void ptep_clear(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep)
+{
+ pte_clear(mm, addr, ptep);
+}
+#endif
+
#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR
static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
unsigned long address,
diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h
index 7c7e627503d2..07481bb87d4e 100644
--- a/include/linux/pid_namespace.h
+++ b/include/linux/pid_namespace.h
@@ -86,4 +86,9 @@ extern struct pid_namespace *task_active_pid_ns(struct task_struct *tsk);
void pidhash_init(void);
void pid_idr_init(void);
+static inline bool task_is_in_init_pid_ns(struct task_struct *tsk)
+{
+ return task_active_pid_ns(tsk) == &init_pid_ns;
+}
+
#endif /* _LINUX_PID_NS_H */
diff --git a/include/linux/pinctrl/pinconf-generic.h b/include/linux/pinctrl/pinconf-generic.h
index eee0e3948537..2422211d6a5a 100644
--- a/include/linux/pinctrl/pinconf-generic.h
+++ b/include/linux/pinctrl/pinconf-generic.h
@@ -91,6 +91,8 @@ struct pinctrl_map;
* configuration (eg. the currently selected mux function) drive values on
* the line. Use argument 1 to enable output mode, argument 0 to disable
* it.
+ * @PIN_CONFIG_OUTPUT_IMPEDANCE_OHMS: this will configure the output impedance
+ * of the pin with the value passed as argument. The argument is in ohms.
* @PIN_CONFIG_PERSIST_STATE: retain pin state across sleep or controller reset
* @PIN_CONFIG_POWER_SOURCE: if the pin can select between different power
* supplies, the argument to this parameter (on a custom format) tells
@@ -129,6 +131,7 @@ enum pin_config_param {
PIN_CONFIG_MODE_PWM,
PIN_CONFIG_OUTPUT,
PIN_CONFIG_OUTPUT_ENABLE,
+ PIN_CONFIG_OUTPUT_IMPEDANCE_OHMS,
PIN_CONFIG_PERSIST_STATE,
PIN_CONFIG_POWER_SOURCE,
PIN_CONFIG_SKEW_DELAY,
diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
index fc5642431b92..c00c618ef290 100644
--- a/include/linux/pipe_fs_i.h
+++ b/include/linux/pipe_fs_i.h
@@ -238,10 +238,6 @@ void pipe_lock(struct pipe_inode_info *);
void pipe_unlock(struct pipe_inode_info *);
void pipe_double_lock(struct pipe_inode_info *, struct pipe_inode_info *);
-extern unsigned int pipe_max_size;
-extern unsigned long pipe_user_pages_hard;
-extern unsigned long pipe_user_pages_soft;
-
/* Wait for a pipe to be readable/writable while dropping the pipe lock */
void pipe_wait_readable(struct pipe_inode_info *);
void pipe_wait_writable(struct pipe_inode_info *);
diff --git a/include/linux/pktcdvd.h b/include/linux/pktcdvd.h
index 174601554b06..f9c5ac80d59b 100644
--- a/include/linux/pktcdvd.h
+++ b/include/linux/pktcdvd.h
@@ -152,14 +152,6 @@ struct packet_stacked_data
};
#define PSD_POOL_SIZE 64
-struct pktcdvd_kobj
-{
- struct kobject kobj;
- struct pktcdvd_device *pd;
-};
-#define to_pktcdvdkobj(_k) \
- ((struct pktcdvd_kobj*)container_of(_k,struct pktcdvd_kobj,kobj))
-
struct pktcdvd_device
{
struct block_device *bdev; /* dev attached */
@@ -183,6 +175,8 @@ struct pktcdvd_device
spinlock_t lock; /* Serialize access to bio_queue */
struct rb_root bio_queue; /* Work queue of bios we need to handle */
int bio_queue_size; /* Number of nodes in bio_queue */
+ bool congested; /* Someone is waiting for bio_queue_size
+ * to drop. */
sector_t current_sector; /* Keep track of where the elevator is */
atomic_t scan_queue; /* Set to non-zero when pkt_handle_queue */
/* needs to be run. */
@@ -195,8 +189,6 @@ struct pktcdvd_device
int write_congestion_on;
struct device *dev; /* sysfs pktcdvd[0-7] dev */
- struct pktcdvd_kobj *kobj_stat; /* sysfs pktcdvd[0-7]/stat/ */
- struct pktcdvd_kobj *kobj_wqueue; /* sysfs pktcdvd[0-7]/write_queue/ */
struct dentry *dfs_d_root; /* debugfs: devname directory */
struct dentry *dfs_f_info; /* debugfs: info file */
diff --git a/include/linux/platform_data/ad5755.h b/include/linux/platform_data/ad5755.h
deleted file mode 100644
index e371e08f04bc..000000000000
--- a/include/linux/platform_data/ad5755.h
+++ /dev/null
@@ -1,102 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright 2012 Analog Devices Inc.
- */
-#ifndef __LINUX_PLATFORM_DATA_AD5755_H__
-#define __LINUX_PLATFORM_DATA_AD5755_H__
-
-enum ad5755_mode {
- AD5755_MODE_VOLTAGE_0V_5V = 0,
- AD5755_MODE_VOLTAGE_0V_10V = 1,
- AD5755_MODE_VOLTAGE_PLUSMINUS_5V = 2,
- AD5755_MODE_VOLTAGE_PLUSMINUS_10V = 3,
- AD5755_MODE_CURRENT_4mA_20mA = 4,
- AD5755_MODE_CURRENT_0mA_20mA = 5,
- AD5755_MODE_CURRENT_0mA_24mA = 6,
-};
-
-enum ad5755_dc_dc_phase {
- AD5755_DC_DC_PHASE_ALL_SAME_EDGE = 0,
- AD5755_DC_DC_PHASE_A_B_SAME_EDGE_C_D_OPP_EDGE = 1,
- AD5755_DC_DC_PHASE_A_C_SAME_EDGE_B_D_OPP_EDGE = 2,
- AD5755_DC_DC_PHASE_90_DEGREE = 3,
-};
-
-enum ad5755_dc_dc_freq {
- AD5755_DC_DC_FREQ_250kHZ = 0,
- AD5755_DC_DC_FREQ_410kHZ = 1,
- AD5755_DC_DC_FREQ_650kHZ = 2,
-};
-
-enum ad5755_dc_dc_maxv {
- AD5755_DC_DC_MAXV_23V = 0,
- AD5755_DC_DC_MAXV_24V5 = 1,
- AD5755_DC_DC_MAXV_27V = 2,
- AD5755_DC_DC_MAXV_29V5 = 3,
-};
-
-enum ad5755_slew_rate {
- AD5755_SLEW_RATE_64k = 0,
- AD5755_SLEW_RATE_32k = 1,
- AD5755_SLEW_RATE_16k = 2,
- AD5755_SLEW_RATE_8k = 3,
- AD5755_SLEW_RATE_4k = 4,
- AD5755_SLEW_RATE_2k = 5,
- AD5755_SLEW_RATE_1k = 6,
- AD5755_SLEW_RATE_500 = 7,
- AD5755_SLEW_RATE_250 = 8,
- AD5755_SLEW_RATE_125 = 9,
- AD5755_SLEW_RATE_64 = 10,
- AD5755_SLEW_RATE_32 = 11,
- AD5755_SLEW_RATE_16 = 12,
- AD5755_SLEW_RATE_8 = 13,
- AD5755_SLEW_RATE_4 = 14,
- AD5755_SLEW_RATE_0_5 = 15,
-};
-
-enum ad5755_slew_step_size {
- AD5755_SLEW_STEP_SIZE_1 = 0,
- AD5755_SLEW_STEP_SIZE_2 = 1,
- AD5755_SLEW_STEP_SIZE_4 = 2,
- AD5755_SLEW_STEP_SIZE_8 = 3,
- AD5755_SLEW_STEP_SIZE_16 = 4,
- AD5755_SLEW_STEP_SIZE_32 = 5,
- AD5755_SLEW_STEP_SIZE_64 = 6,
- AD5755_SLEW_STEP_SIZE_128 = 7,
- AD5755_SLEW_STEP_SIZE_256 = 8,
-};
-
-/**
- * struct ad5755_platform_data - AD5755 DAC driver platform data
- * @ext_dc_dc_compenstation_resistor: Whether an external DC-DC converter
- * compensation register is used.
- * @dc_dc_phase: DC-DC converter phase.
- * @dc_dc_freq: DC-DC converter frequency.
- * @dc_dc_maxv: DC-DC maximum allowed boost voltage.
- * @dac.mode: The mode to be used for the DAC output.
- * @dac.ext_current_sense_resistor: Whether an external current sense resistor
- * is used.
- * @dac.enable_voltage_overrange: Whether to enable 20% voltage output overrange.
- * @dac.slew.enable: Whether to enable digital slew.
- * @dac.slew.rate: Slew rate of the digital slew.
- * @dac.slew.step_size: Slew step size of the digital slew.
- **/
-struct ad5755_platform_data {
- bool ext_dc_dc_compenstation_resistor;
- enum ad5755_dc_dc_phase dc_dc_phase;
- enum ad5755_dc_dc_freq dc_dc_freq;
- enum ad5755_dc_dc_maxv dc_dc_maxv;
-
- struct {
- enum ad5755_mode mode;
- bool ext_current_sense_resistor;
- bool enable_voltage_overrange;
- struct {
- bool enable;
- enum ad5755_slew_rate rate;
- enum ad5755_slew_step_size step_size;
- } slew;
- } dac[4];
-};
-
-#endif
diff --git a/include/linux/platform_data/bcm7038_wdt.h b/include/linux/platform_data/bcm7038_wdt.h
new file mode 100644
index 000000000000..e18cfd9ec8f9
--- /dev/null
+++ b/include/linux/platform_data/bcm7038_wdt.h
@@ -0,0 +1,8 @@
+#ifndef __BCM7038_WDT_PDATA_H
+#define __BCM7038_WDT_PDATA_H
+
+struct bcm7038_wdt_platform_data {
+ const char *clk_name;
+};
+
+#endif /* __BCM7038_WDT_PDATA_H */
diff --git a/include/linux/platform_data/clk-fch.h b/include/linux/platform_data/clk-fch.h
index b9f682459f08..11a2a23fd9b2 100644
--- a/include/linux/platform_data/clk-fch.h
+++ b/include/linux/platform_data/clk-fch.h
@@ -12,7 +12,7 @@
struct fch_clk_data {
void __iomem *base;
- u32 is_rv;
+ char *name;
};
#endif /* __CLK_FCH_H */
diff --git a/include/linux/pm.h b/include/linux/pm.h
index e1e9402180b9..f7d2be686359 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -8,6 +8,7 @@
#ifndef _LINUX_PM_H
#define _LINUX_PM_H
+#include <linux/export.h>
#include <linux/list.h>
#include <linux/workqueue.h>
#include <linux/spinlock.h>
@@ -357,13 +358,47 @@ struct dev_pm_ops {
#define SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn)
#endif
+#define _DEFINE_DEV_PM_OPS(name, \
+ suspend_fn, resume_fn, \
+ runtime_suspend_fn, runtime_resume_fn, idle_fn) \
+const struct dev_pm_ops name = { \
+ SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
+ RUNTIME_PM_OPS(runtime_suspend_fn, runtime_resume_fn, idle_fn) \
+}
+
+#ifdef CONFIG_PM
+#define _EXPORT_DEV_PM_OPS(name, suspend_fn, resume_fn, runtime_suspend_fn, \
+ runtime_resume_fn, idle_fn, sec) \
+ _DEFINE_DEV_PM_OPS(name, suspend_fn, resume_fn, runtime_suspend_fn, \
+ runtime_resume_fn, idle_fn); \
+ _EXPORT_SYMBOL(name, sec)
+#else
+#define _EXPORT_DEV_PM_OPS(name, suspend_fn, resume_fn, runtime_suspend_fn, \
+ runtime_resume_fn, idle_fn, sec) \
+static __maybe_unused _DEFINE_DEV_PM_OPS(__static_##name, suspend_fn, \
+ resume_fn, runtime_suspend_fn, \
+ runtime_resume_fn, idle_fn)
+#endif
+
/*
* Use this if you want to use the same suspend and resume callbacks for suspend
* to RAM and hibernation.
+ *
+ * If the underlying dev_pm_ops struct symbol has to be exported, use
+ * EXPORT_SIMPLE_DEV_PM_OPS() or EXPORT_GPL_SIMPLE_DEV_PM_OPS() instead.
*/
#define DEFINE_SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn) \
-static const struct dev_pm_ops name = { \
- SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
+ _DEFINE_DEV_PM_OPS(name, suspend_fn, resume_fn, NULL, NULL, NULL)
+
+#define EXPORT_SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn) \
+ _EXPORT_DEV_PM_OPS(name, suspend_fn, resume_fn, NULL, NULL, NULL, "")
+#define EXPORT_GPL_SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn) \
+ _EXPORT_DEV_PM_OPS(name, suspend_fn, resume_fn, NULL, NULL, NULL, "_gpl")
+
+/* Deprecated. Use DEFINE_SIMPLE_DEV_PM_OPS() instead. */
+#define SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn) \
+const struct dev_pm_ops __maybe_unused name = { \
+ SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
}
/*
@@ -378,20 +413,10 @@ static const struct dev_pm_ops name = { \
* suspend and "early" resume callback pointers, .suspend_late() and
* .resume_early(), to the same routines as .runtime_suspend() and
* .runtime_resume(), respectively (and analogously for hibernation).
+ *
+ * Deprecated. You most likely don't want this macro. Use
+ * DEFINE_RUNTIME_DEV_PM_OPS() instead.
*/
-#define DEFINE_UNIVERSAL_DEV_PM_OPS(name, suspend_fn, resume_fn, idle_fn) \
-static const struct dev_pm_ops name = { \
- SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
- RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \
-}
-
-/* Deprecated. Use DEFINE_SIMPLE_DEV_PM_OPS() instead. */
-#define SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn) \
-const struct dev_pm_ops __maybe_unused name = { \
- SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
-}
-
-/* Deprecated. Use DEFINE_UNIVERSAL_DEV_PM_OPS() instead. */
#define UNIVERSAL_DEV_PM_OPS(name, suspend_fn, resume_fn, idle_fn) \
const struct dev_pm_ops __maybe_unused name = { \
SET_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
index 016de5776b6d..9f09601c465a 100644
--- a/include/linux/pm_runtime.h
+++ b/include/linux/pm_runtime.h
@@ -22,6 +22,30 @@
usage_count */
#define RPM_AUTO 0x08 /* Use autosuspend_delay */
+/*
+ * Use this for defining a set of PM operations to be used in all situations
+ * (system suspend, hibernation or runtime PM).
+ *
+ * Note that the behaviour differs from the deprecated UNIVERSAL_DEV_PM_OPS()
+ * macro, which uses the provided callbacks for both runtime PM and system
+ * sleep, while DEFINE_RUNTIME_DEV_PM_OPS() uses pm_runtime_force_suspend()
+ * and pm_runtime_force_resume() for its system sleep callbacks.
+ *
+ * If the underlying dev_pm_ops struct symbol has to be exported, use
+ * EXPORT_RUNTIME_DEV_PM_OPS() or EXPORT_GPL_RUNTIME_DEV_PM_OPS() instead.
+ */
+#define DEFINE_RUNTIME_DEV_PM_OPS(name, suspend_fn, resume_fn, idle_fn) \
+ _DEFINE_DEV_PM_OPS(name, pm_runtime_force_suspend, \
+ pm_runtime_force_resume, suspend_fn, \
+ resume_fn, idle_fn)
+
+#define EXPORT_RUNTIME_DEV_PM_OPS(name, suspend_fn, resume_fn, idle_fn) \
+ _EXPORT_DEV_PM_OPS(name, pm_runtime_force_suspend, pm_runtime_force_resume, \
+ suspend_fn, resume_fn, idle_fn, "")
+#define EXPORT_GPL_RUNTIME_DEV_PM_OPS(name, suspend_fn, resume_fn, idle_fn) \
+ _EXPORT_DEV_PM_OPS(name, pm_runtime_force_suspend, pm_runtime_force_resume, \
+ suspend_fn, resume_fn, idle_fn, "_gpl")
+
#ifdef CONFIG_PM
extern struct workqueue_struct *pm_wq;
diff --git a/include/linux/pmu.h b/include/linux/pmu.h
index 52453a24a24f..c677442d007c 100644
--- a/include/linux/pmu.h
+++ b/include/linux/pmu.h
@@ -13,7 +13,7 @@
#include <uapi/linux/pmu.h>
-extern int find_via_pmu(void);
+extern int __init find_via_pmu(void);
extern int pmu_request(struct adb_request *req,
void (*done)(struct adb_request *), int nbytes, ...);
diff --git a/include/linux/poll.h b/include/linux/poll.h
index 1cdc32b1f1b0..a9e0e1c2d1f2 100644
--- a/include/linux/poll.h
+++ b/include/linux/poll.h
@@ -8,12 +8,10 @@
#include <linux/wait.h>
#include <linux/string.h>
#include <linux/fs.h>
-#include <linux/sysctl.h>
#include <linux/uaccess.h>
#include <uapi/linux/poll.h>
#include <uapi/linux/eventpoll.h>
-extern struct ctl_table epoll_table[]; /* for sysctl */
/* ~832 bytes of stack space used max in sys_select/sys_poll before allocating
additional memory. */
#ifdef __clang__
diff --git a/include/linux/printk.h b/include/linux/printk.h
index 9497f6b98339..1522df223c0f 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -183,10 +183,6 @@ extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
extern int printk_delay_msec;
extern int dmesg_restrict;
-extern int
-devkmsg_sysctl_set_loglvl(struct ctl_table *table, int write, void *buf,
- size_t *lenp, loff_t *ppos);
-
extern void wake_up_klogd(void);
char *log_buf_addr_get(void);
diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
index 069c7fd95396..81d6e4ec2294 100644
--- a/include/linux/proc_fs.h
+++ b/include/linux/proc_fs.h
@@ -110,7 +110,16 @@ extern struct proc_dir_entry *proc_create_data(const char *, umode_t,
struct proc_dir_entry *proc_create(const char *name, umode_t mode, struct proc_dir_entry *parent, const struct proc_ops *proc_ops);
extern void proc_set_size(struct proc_dir_entry *, loff_t);
extern void proc_set_user(struct proc_dir_entry *, kuid_t, kgid_t);
-extern void *PDE_DATA(const struct inode *);
+
+/*
+ * Obtain the private data passed by user through proc_create_data() or
+ * related.
+ */
+static inline void *pde_data(const struct inode *inode)
+{
+ return inode->i_private;
+}
+
extern void *proc_get_parent_data(const struct inode *);
extern void proc_remove(struct proc_dir_entry *);
extern void remove_proc_entry(const char *, struct proc_dir_entry *);
@@ -178,12 +187,20 @@ static inline struct proc_dir_entry *proc_mkdir_mode(const char *name,
#define proc_create_seq(name, mode, parent, ops) ({NULL;})
#define proc_create_single(name, mode, parent, show) ({NULL;})
#define proc_create_single_data(name, mode, parent, show, data) ({NULL;})
-#define proc_create(name, mode, parent, proc_ops) ({NULL;})
-#define proc_create_data(name, mode, parent, proc_ops, data) ({NULL;})
+
+static inline struct proc_dir_entry *
+proc_create(const char *name, umode_t mode, struct proc_dir_entry *parent,
+ const struct proc_ops *proc_ops)
+{ return NULL; }
+
+static inline struct proc_dir_entry *
+proc_create_data(const char *name, umode_t mode, struct proc_dir_entry *parent,
+ const struct proc_ops *proc_ops, void *data)
+{ return NULL; }
static inline void proc_set_size(struct proc_dir_entry *de, loff_t size) {}
static inline void proc_set_user(struct proc_dir_entry *de, kuid_t uid, kgid_t gid) {}
-static inline void *PDE_DATA(const struct inode *inode) {BUG(); return NULL;}
+static inline void *pde_data(const struct inode *inode) {BUG(); return NULL;}
static inline void *proc_get_parent_data(const struct inode *inode) { BUG(); return NULL; }
static inline void proc_remove(struct proc_dir_entry *de) {}
diff --git a/include/linux/profile.h b/include/linux/profile.h
index fd18ca96f557..11db1ec516e2 100644
--- a/include/linux/profile.h
+++ b/include/linux/profile.h
@@ -31,11 +31,6 @@ static inline int create_proc_profile(void)
}
#endif
-enum profile_type {
- PROFILE_TASK_EXIT,
- PROFILE_MUNMAP
-};
-
#ifdef CONFIG_PROFILING
extern int prof_on __read_mostly;
@@ -66,23 +61,6 @@ static inline void profile_hit(int type, void *ip)
struct task_struct;
struct mm_struct;
-/* task is in do_exit() */
-void profile_task_exit(struct task_struct * task);
-
-/* task is dead, free task struct ? Returns 1 if
- * the task was taken, 0 if the task should be freed.
- */
-int profile_handoff_task(struct task_struct * task);
-
-/* sys_munmap */
-void profile_munmap(unsigned long addr);
-
-int task_handoff_register(struct notifier_block * n);
-int task_handoff_unregister(struct notifier_block * n);
-
-int profile_event_register(enum profile_type, struct notifier_block * n);
-int profile_event_unregister(enum profile_type, struct notifier_block * n);
-
#else
#define prof_on 0
@@ -107,29 +85,6 @@ static inline void profile_hit(int type, void *ip)
return;
}
-static inline int task_handoff_register(struct notifier_block * n)
-{
- return -ENOSYS;
-}
-
-static inline int task_handoff_unregister(struct notifier_block * n)
-{
- return -ENOSYS;
-}
-
-static inline int profile_event_register(enum profile_type t, struct notifier_block * n)
-{
- return -ENOSYS;
-}
-
-static inline int profile_event_unregister(enum profile_type t, struct notifier_block * n)
-{
- return -ENOSYS;
-}
-
-#define profile_task_exit(a) do { } while (0)
-#define profile_handoff_task(a) (0)
-#define profile_munmap(a) do { } while (0)
#endif /* CONFIG_PROFILING */
diff --git a/include/linux/property.h b/include/linux/property.h
index 8355f99ebd47..7399a0b45f98 100644
--- a/include/linux/property.h
+++ b/include/linux/property.h
@@ -122,6 +122,8 @@ void fwnode_handle_put(struct fwnode_handle *fwnode);
int fwnode_irq_get(const struct fwnode_handle *fwnode, unsigned int index);
+void __iomem *fwnode_iomap(struct fwnode_handle *fwnode, int index);
+
unsigned int device_get_child_node_count(struct device *dev);
static inline bool device_property_read_bool(struct device *dev,
diff --git a/include/linux/psi.h b/include/linux/psi.h
index a70ca833c6d7..7f7d1d88c3bb 100644
--- a/include/linux/psi.h
+++ b/include/linux/psi.h
@@ -25,18 +25,17 @@ void psi_memstall_enter(unsigned long *flags);
void psi_memstall_leave(unsigned long *flags);
int psi_show(struct seq_file *s, struct psi_group *group, enum psi_res res);
-
-#ifdef CONFIG_CGROUPS
-int psi_cgroup_alloc(struct cgroup *cgrp);
-void psi_cgroup_free(struct cgroup *cgrp);
-void cgroup_move_task(struct task_struct *p, struct css_set *to);
-
struct psi_trigger *psi_trigger_create(struct psi_group *group,
char *buf, size_t nbytes, enum psi_res res);
-void psi_trigger_replace(void **trigger_ptr, struct psi_trigger *t);
+void psi_trigger_destroy(struct psi_trigger *t);
__poll_t psi_trigger_poll(void **trigger_ptr, struct file *file,
poll_table *wait);
+
+#ifdef CONFIG_CGROUPS
+int psi_cgroup_alloc(struct cgroup *cgrp);
+void psi_cgroup_free(struct cgroup *cgrp);
+void cgroup_move_task(struct task_struct *p, struct css_set *to);
#endif
#else /* CONFIG_PSI */
diff --git a/include/linux/psi_types.h b/include/linux/psi_types.h
index 516c0fe836fd..1a3cef26d129 100644
--- a/include/linux/psi_types.h
+++ b/include/linux/psi_types.h
@@ -141,9 +141,6 @@ struct psi_trigger {
* events to one per window
*/
u64 last_event_time;
-
- /* Refcounting to prevent premature destruction */
- struct kref refcount;
};
struct psi_group {
diff --git a/include/linux/quota.h b/include/linux/quota.h
index 18ebd39c9487..fd692b4a41d5 100644
--- a/include/linux/quota.h
+++ b/include/linux/quota.h
@@ -91,7 +91,7 @@ extern bool qid_valid(struct kqid qid);
*
* When there is no mapping defined for the user-namespace, type,
* qid tuple an invalid kqid is returned. Callers are expected to
- * test for and handle handle invalid kqids being returned.
+ * test for and handle invalid kqids being returned.
* Invalid kqids may be tested for using qid_valid().
*/
static inline struct kqid make_kqid(struct user_namespace *from,
diff --git a/include/linux/raid/pq.h b/include/linux/raid/pq.h
index 154e954b711d..d6e5a1feb947 100644
--- a/include/linux/raid/pq.h
+++ b/include/linux/raid/pq.h
@@ -81,7 +81,7 @@ struct raid6_calls {
void (*xor_syndrome)(int, int, int, size_t, void **);
int (*valid)(void); /* Returns 1 if this routine set is usable */
const char *name; /* Name of this routine set */
- int prefer; /* Has special performance attribute */
+ int priority; /* Relative priority ranking if non-zero */
};
/* Selected algorithm */
diff --git a/include/linux/ref_tracker.h b/include/linux/ref_tracker.h
index c11c9db5825c..60f3453be23e 100644
--- a/include/linux/ref_tracker.h
+++ b/include/linux/ref_tracker.h
@@ -4,6 +4,7 @@
#include <linux/refcount.h>
#include <linux/types.h>
#include <linux/spinlock.h>
+#include <linux/stackdepot.h>
struct ref_tracker;
@@ -26,6 +27,7 @@ static inline void ref_tracker_dir_init(struct ref_tracker_dir *dir,
spin_lock_init(&dir->lock);
dir->quarantine_avail = quarantine_count;
refcount_set(&dir->untracked, 1);
+ stack_depot_init();
}
void ref_tracker_dir_exit(struct ref_tracker_dir *dir);
diff --git a/include/linux/reset.h b/include/linux/reset.h
index db0e6115a2f6..8a21b5756c3e 100644
--- a/include/linux/reset.h
+++ b/include/linux/reset.h
@@ -455,6 +455,26 @@ static inline struct reset_control *of_reset_control_get_exclusive(
}
/**
+ * of_reset_control_get_optional_exclusive - Lookup and obtain an optional exclusive
+ * reference to a reset controller.
+ * @node: device to be reset by the controller
+ * @id: reset line name
+ *
+ * Optional variant of of_reset_control_get_exclusive(). If the requested reset
+ * is not specified in the device tree, this function returns NULL instead of
+ * an error.
+ *
+ * Returns a struct reset_control or IS_ERR() condition containing errno.
+ *
+ * Use of id names is optional.
+ */
+static inline struct reset_control *of_reset_control_get_optional_exclusive(
+ struct device_node *node, const char *id)
+{
+ return __of_reset_control_get(node, id, 0, false, true, true);
+}
+
+/**
* of_reset_control_get_shared - Lookup and obtain a shared reference
* to a reset controller.
* @node: device to be reset by the controller
diff --git a/include/linux/rio_ids.h b/include/linux/rio_ids.h
index 4846f72759b2..c7e2f21dd5c1 100644
--- a/include/linux/rio_ids.h
+++ b/include/linux/rio_ids.h
@@ -9,18 +9,6 @@
#ifndef LINUX_RIO_IDS_H
#define LINUX_RIO_IDS_H
-#define RIO_VID_FREESCALE 0x0002
-#define RIO_DID_MPC8560 0x0003
-
-#define RIO_VID_TUNDRA 0x000d
-#define RIO_DID_TSI500 0x0500
-#define RIO_DID_TSI568 0x0568
-#define RIO_DID_TSI572 0x0572
-#define RIO_DID_TSI574 0x0574
-#define RIO_DID_TSI576 0x0578 /* Same ID as Tsi578 */
-#define RIO_DID_TSI577 0x0577
-#define RIO_DID_TSI578 0x0578
-
#define RIO_VID_IDT 0x0038
#define RIO_DID_IDT70K200 0x0310
#define RIO_DID_IDTCPS8 0x035c
@@ -33,7 +21,6 @@
#define RIO_DID_IDTCPS1616 0x0379
#define RIO_DID_IDTVPS1616 0x0377
#define RIO_DID_IDTSPS1616 0x0378
-#define RIO_DID_TSI721 0x80ab
#define RIO_DID_IDTRXS1632 0x80e5
#define RIO_DID_IDTRXS2448 0x80e6
diff --git a/include/linux/rwlock.h b/include/linux/rwlock.h
index 2c0ad417ce3c..8f416c5e929e 100644
--- a/include/linux/rwlock.h
+++ b/include/linux/rwlock.h
@@ -55,6 +55,12 @@ do { \
#define write_lock(lock) _raw_write_lock(lock)
#define read_lock(lock) _raw_read_lock(lock)
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+#define write_lock_nested(lock, subclass) _raw_write_lock_nested(lock, subclass)
+#else
+#define write_lock_nested(lock, subclass) _raw_write_lock(lock)
+#endif
+
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
#define read_lock_irqsave(lock, flags) \
diff --git a/include/linux/rwlock_api_smp.h b/include/linux/rwlock_api_smp.h
index f1db6f17c4fb..dceb0a59b692 100644
--- a/include/linux/rwlock_api_smp.h
+++ b/include/linux/rwlock_api_smp.h
@@ -17,6 +17,7 @@
void __lockfunc _raw_read_lock(rwlock_t *lock) __acquires(lock);
void __lockfunc _raw_write_lock(rwlock_t *lock) __acquires(lock);
+void __lockfunc _raw_write_lock_nested(rwlock_t *lock, int subclass) __acquires(lock);
void __lockfunc _raw_read_lock_bh(rwlock_t *lock) __acquires(lock);
void __lockfunc _raw_write_lock_bh(rwlock_t *lock) __acquires(lock);
void __lockfunc _raw_read_lock_irq(rwlock_t *lock) __acquires(lock);
@@ -209,6 +210,13 @@ static inline void __raw_write_lock(rwlock_t *lock)
LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock);
}
+static inline void __raw_write_lock_nested(rwlock_t *lock, int subclass)
+{
+ preempt_disable();
+ rwlock_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
+ LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock);
+}
+
#endif /* !CONFIG_GENERIC_LOCKBREAK || CONFIG_DEBUG_LOCK_ALLOC */
static inline void __raw_write_unlock(rwlock_t *lock)
diff --git a/include/linux/rwlock_rt.h b/include/linux/rwlock_rt.h
index 49c1f3842ed5..8544ff05e594 100644
--- a/include/linux/rwlock_rt.h
+++ b/include/linux/rwlock_rt.h
@@ -28,6 +28,7 @@ extern void rt_read_lock(rwlock_t *rwlock);
extern int rt_read_trylock(rwlock_t *rwlock);
extern void rt_read_unlock(rwlock_t *rwlock);
extern void rt_write_lock(rwlock_t *rwlock);
+extern void rt_write_lock_nested(rwlock_t *rwlock, int subclass);
extern int rt_write_trylock(rwlock_t *rwlock);
extern void rt_write_unlock(rwlock_t *rwlock);
@@ -83,6 +84,15 @@ static __always_inline void write_lock(rwlock_t *rwlock)
rt_write_lock(rwlock);
}
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+static __always_inline void write_lock_nested(rwlock_t *rwlock, int subclass)
+{
+ rt_write_lock_nested(rwlock, subclass);
+}
+#else
+#define write_lock_nested(lock, subclass) rt_write_lock(((void)(subclass), (lock)))
+#endif
+
static __always_inline void write_lock_bh(rwlock_t *rwlock)
{
local_bh_disable();
diff --git a/include/linux/sbitmap.h b/include/linux/sbitmap.h
index fc0357a6e19b..95df357ec009 100644
--- a/include/linux/sbitmap.h
+++ b/include/linux/sbitmap.h
@@ -416,6 +416,17 @@ static inline void sbitmap_queue_free(struct sbitmap_queue *sbq)
}
/**
+ * sbitmap_queue_recalculate_wake_batch() - Recalculate wake batch
+ * @sbq: Bitmap queue to recalculate wake batch.
+ * @users: Number of shares.
+ *
+ * Like sbitmap_queue_update_wake_batch(), this will calculate wake batch
+ * by depth. This interface is for HCTX shared tags or queue shared tags.
+ */
+void sbitmap_queue_recalculate_wake_batch(struct sbitmap_queue *sbq,
+ unsigned int users);
+
+/**
* sbitmap_queue_resize() - Resize a &struct sbitmap_queue.
* @sbq: Bitmap queue to resize.
* @depth: New number of bits to resize to.
diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
index 266754a55327..7ff9d6386c12 100644
--- a/include/linux/scatterlist.h
+++ b/include/linux/scatterlist.h
@@ -69,10 +69,27 @@ struct sg_append_table {
* a valid sg entry, or whether it points to the start of a new scatterlist.
* Those low bits are there for everyone! (thanks mason :-)
*/
-#define sg_is_chain(sg) ((sg)->page_link & SG_CHAIN)
-#define sg_is_last(sg) ((sg)->page_link & SG_END)
-#define sg_chain_ptr(sg) \
- ((struct scatterlist *) ((sg)->page_link & ~(SG_CHAIN | SG_END)))
+#define SG_PAGE_LINK_MASK (SG_CHAIN | SG_END)
+
+static inline unsigned int __sg_flags(struct scatterlist *sg)
+{
+ return sg->page_link & SG_PAGE_LINK_MASK;
+}
+
+static inline struct scatterlist *sg_chain_ptr(struct scatterlist *sg)
+{
+ return (struct scatterlist *)(sg->page_link & ~SG_PAGE_LINK_MASK);
+}
+
+static inline bool sg_is_chain(struct scatterlist *sg)
+{
+ return __sg_flags(sg) & SG_CHAIN;
+}
+
+static inline bool sg_is_last(struct scatterlist *sg)
+{
+ return __sg_flags(sg) & SG_END;
+}
/**
* sg_assign_page - Assign a given page to an SG entry
@@ -92,7 +109,7 @@ static inline void sg_assign_page(struct scatterlist *sg, struct page *page)
* In order for the low bit stealing approach to work, pages
* must be aligned at a 32-bit boundary as a minimum.
*/
- BUG_ON((unsigned long) page & (SG_CHAIN | SG_END));
+ BUG_ON((unsigned long)page & SG_PAGE_LINK_MASK);
#ifdef CONFIG_DEBUG_SG
BUG_ON(sg_is_chain(sg));
#endif
@@ -126,7 +143,7 @@ static inline struct page *sg_page(struct scatterlist *sg)
#ifdef CONFIG_DEBUG_SG
BUG_ON(sg_is_chain(sg));
#endif
- return (struct page *)((sg)->page_link & ~(SG_CHAIN | SG_END));
+ return (struct page *)((sg)->page_link & ~SG_PAGE_LINK_MASK);
}
/**
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 7a1f16df66e3..75ba8aa60248 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -274,8 +274,13 @@ struct task_group;
#define get_current_state() READ_ONCE(current->__state)
-/* Task command name length: */
-#define TASK_COMM_LEN 16
+/*
+ * Define the task command name length as enum, then it can be visible to
+ * BPF programs.
+ */
+enum {
+ TASK_COMM_LEN = 16,
+};
extern void scheduler_tick(void);
@@ -614,10 +619,6 @@ struct sched_dl_entity {
* task has to wait for a replenishment to be performed at the
* next firing of dl_timer.
*
- * @dl_boosted tells if we are boosted due to DI. If so we are
- * outside bandwidth enforcement mechanism (but only until we
- * exit the critical section);
- *
* @dl_yielded tells if task gave up the CPU before consuming
* all its available runtime during the last job.
*
@@ -991,8 +992,8 @@ struct task_struct {
/* CLONE_CHILD_CLEARTID: */
int __user *clear_child_tid;
- /* PF_IO_WORKER */
- void *pf_io_worker;
+ /* PF_KTHREAD | PF_IO_WORKER */
+ void *worker_private;
u64 utime;
u64 stime;
@@ -1679,7 +1680,6 @@ extern struct pid *cad_pid;
#define PF_MEMALLOC 0x00000800 /* Allocating memory */
#define PF_NPROC_EXCEEDED 0x00001000 /* set_user() noticed that RLIMIT_NPROC was exceeded */
#define PF_USED_MATH 0x00002000 /* If unset the fpu must be initialized before use */
-#define PF_USED_ASYNC 0x00004000 /* Used async_schedule*(), used by module init */
#define PF_NOFREEZE 0x00008000 /* This thread should not be frozen */
#define PF_FROZEN 0x00010000 /* Frozen for system suspend */
#define PF_KSWAPD 0x00020000 /* I am kswapd */
diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
index aca874d33fe6..aa5f09ca5bcf 100644
--- a/include/linux/sched/mm.h
+++ b/include/linux/sched/mm.h
@@ -214,6 +214,32 @@ static inline void fs_reclaim_acquire(gfp_t gfp_mask) { }
static inline void fs_reclaim_release(gfp_t gfp_mask) { }
#endif
+/* Any memory-allocation retry loop should use
+ * memalloc_retry_wait(), and pass the flags for the most
+ * constrained allocation attempt that might have failed.
+ * This provides useful documentation of where loops are,
+ * and a central place to fine tune the waiting as the MM
+ * implementation changes.
+ */
+static inline void memalloc_retry_wait(gfp_t gfp_flags)
+{
+ /* We use io_schedule_timeout because waiting for memory
+ * typically included waiting for dirty pages to be
+ * written out, which requires IO.
+ */
+ __set_current_state(TASK_UNINTERRUPTIBLE);
+ gfp_flags = current_gfp_context(gfp_flags);
+ if (gfpflags_allow_blocking(gfp_flags) &&
+ !(gfp_flags & __GFP_NORETRY))
+ /* Probably waited already, no need for much more */
+ io_schedule_timeout(1);
+ else
+ /* Probably didn't wait, and has now released a lock,
+ * so now is a good time to wait
+ */
+ io_schedule_timeout(HZ/50);
+}
+
/**
* might_alloc - Mark possible allocation sites
* @gfp_mask: gfp_t flags that would be used to allocate
diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h
index 33a50642cf41..b6ecb9fc4cd2 100644
--- a/include/linux/sched/signal.h
+++ b/include/linux/sched/signal.h
@@ -109,13 +109,9 @@ struct signal_struct {
/* thread group exit support */
int group_exit_code;
- /* overloaded:
- * - notify group_exit_task when ->count is equal to notify_count
- * - everyone except group_exit_task is stopped during signal delivery
- * of fatal signals, group_exit_task processes the signal.
- */
+ /* notify group_exec_task when notify_count is less or equal to 0 */
int notify_count;
- struct task_struct *group_exit_task;
+ struct task_struct *group_exec_task;
/* thread group stop support, overloads group_exit_code too */
int group_stop_count;
@@ -256,7 +252,6 @@ struct signal_struct {
#define SIGNAL_STOP_STOPPED 0x00000001 /* job control stop in effect */
#define SIGNAL_STOP_CONTINUED 0x00000002 /* SIGCONT since WCONTINUED reap */
#define SIGNAL_GROUP_EXIT 0x00000004 /* group exit in progress */
-#define SIGNAL_GROUP_COREDUMP 0x00000008 /* coredump in progress */
/*
* Pending notifications to parent.
*/
@@ -272,31 +267,25 @@ struct signal_struct {
static inline void signal_set_stop_flags(struct signal_struct *sig,
unsigned int flags)
{
- WARN_ON(sig->flags & (SIGNAL_GROUP_EXIT|SIGNAL_GROUP_COREDUMP));
+ WARN_ON(sig->flags & SIGNAL_GROUP_EXIT);
sig->flags = (sig->flags & ~SIGNAL_STOP_MASK) | flags;
}
-/* If true, all threads except ->group_exit_task have pending SIGKILL */
-static inline int signal_group_exit(const struct signal_struct *sig)
-{
- return (sig->flags & SIGNAL_GROUP_EXIT) ||
- (sig->group_exit_task != NULL);
-}
-
extern void flush_signals(struct task_struct *);
extern void ignore_signals(struct task_struct *);
extern void flush_signal_handlers(struct task_struct *, int force_default);
-extern int dequeue_signal(struct task_struct *task,
- sigset_t *mask, kernel_siginfo_t *info);
+extern int dequeue_signal(struct task_struct *task, sigset_t *mask,
+ kernel_siginfo_t *info, enum pid_type *type);
static inline int kernel_dequeue_signal(void)
{
struct task_struct *task = current;
kernel_siginfo_t __info;
+ enum pid_type __type;
int ret;
spin_lock_irq(&task->sighand->siglock);
- ret = dequeue_signal(task, &task->blocked, &__info);
+ ret = dequeue_signal(task, &task->blocked, &__info, &__type);
spin_unlock_irq(&task->sighand->siglock);
return ret;
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
index 304f431178fd..c19dd5a2c05c 100644
--- a/include/linux/sched/sysctl.h
+++ b/include/linux/sched/sysctl.h
@@ -7,20 +7,8 @@
struct ctl_table;
#ifdef CONFIG_DETECT_HUNG_TASK
-
-#ifdef CONFIG_SMP
-extern unsigned int sysctl_hung_task_all_cpu_backtrace;
-#else
-#define sysctl_hung_task_all_cpu_backtrace 0
-#endif /* CONFIG_SMP */
-
-extern int sysctl_hung_task_check_count;
-extern unsigned int sysctl_hung_task_panic;
+/* used for hung_task and block/ */
extern unsigned long sysctl_hung_task_timeout_secs;
-extern unsigned long sysctl_hung_task_check_interval_secs;
-extern int sysctl_hung_task_warnings;
-int proc_dohung_task_timeout_secs(struct ctl_table *table, int write,
- void *buffer, size_t *lenp, loff_t *ppos);
#else
/* Avoid need for ifdefs elsewhere in the code */
enum { sysctl_hung_task_timeout_secs = 0 };
diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h
index 058d7f371e25..e84e54d1b490 100644
--- a/include/linux/sched/task.h
+++ b/include/linux/sched/task.h
@@ -54,11 +54,12 @@ extern asmlinkage void schedule_tail(struct task_struct *prev);
extern void init_idle(struct task_struct *idle, int cpu);
extern int sched_fork(unsigned long clone_flags, struct task_struct *p);
-extern void sched_post_fork(struct task_struct *p,
- struct kernel_clone_args *kargs);
+extern void sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs);
+extern void sched_post_fork(struct task_struct *p);
extern void sched_dead(struct task_struct *p);
void __noreturn do_task_dead(void);
+void __noreturn make_task_dead(int signr);
extern void proc_caches_init(void);
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
index 72dbb44a4573..88cc16444b43 100644
--- a/include/linux/seq_file.h
+++ b/include/linux/seq_file.h
@@ -209,7 +209,7 @@ static const struct file_operations __name ## _fops = { \
#define DEFINE_PROC_SHOW_ATTRIBUTE(__name) \
static int __name ## _open(struct inode *inode, struct file *file) \
{ \
- return single_open(file, __name ## _show, PDE_DATA(inode)); \
+ return single_open(file, __name ## _show, pde_data(inode)); \
} \
\
static const struct proc_ops __name ## _proc_ops = { \
diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h
index 5db211f43b29..ff84a3ed10ea 100644
--- a/include/linux/serial_8250.h
+++ b/include/linux/serial_8250.h
@@ -104,8 +104,6 @@ struct uart_8250_port {
unsigned char ier;
unsigned char lcr;
unsigned char mcr;
- unsigned char mcr_mask; /* mask of user bits */
- unsigned char mcr_force; /* mask of forced bits */
unsigned char cur_iotype; /* Running I/O type */
unsigned int rpm_tx_active;
unsigned char canary; /* non-zero during system sleep
diff --git a/include/linux/serial_s3c.h b/include/linux/serial_s3c.h
index cf0de4a86640..f6c3323fc4c5 100644
--- a/include/linux/serial_s3c.h
+++ b/include/linux/serial_s3c.h
@@ -27,15 +27,6 @@
#define S3C2410_UERSTAT (0x14)
#define S3C2410_UFSTAT (0x18)
#define S3C2410_UMSTAT (0x1C)
-#define USI_CON (0xC4)
-#define USI_OPTION (0xC8)
-
-#define USI_CON_RESET (1<<0)
-#define USI_CON_RESET_MASK (1<<0)
-
-#define USI_OPTION_HWACG_CLKREQ_ON (1<<1)
-#define USI_OPTION_HWACG_CLKSTOP_ON (1<<2)
-#define USI_OPTION_HWACG_MASK (3<<1)
#define S3C2410_LCON_CFGMASK ((0xF<<3)|(0x3))
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index 166158b6e917..e65b80ed09e7 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -83,8 +83,7 @@ extern void shmem_unlock_mapping(struct address_space *mapping);
extern struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
pgoff_t index, gfp_t gfp_mask);
extern void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end);
-extern int shmem_unuse(unsigned int type, bool frontswap,
- unsigned long *fs_pages_to_unuse);
+int shmem_unuse(unsigned int type);
extern bool shmem_is_huge(struct vm_area_struct *vma,
struct inode *inode, pgoff_t index);
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index bf11e1fbd69b..8a636e678902 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -318,7 +318,7 @@ enum skb_drop_reason {
SKB_DROP_REASON_NO_SOCKET,
SKB_DROP_REASON_PKT_TOO_SMALL,
SKB_DROP_REASON_TCP_CSUM,
- SKB_DROP_REASON_TCP_FILTER,
+ SKB_DROP_REASON_SOCKET_FILTER,
SKB_DROP_REASON_UDP_CSUM,
SKB_DROP_REASON_MAX,
};
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 367366f1d1ff..37bde99b74af 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -403,8 +403,7 @@ static __always_inline unsigned int __kmalloc_index(size_t size,
if (size <= 16 * 1024 * 1024) return 24;
if (size <= 32 * 1024 * 1024) return 25;
- if ((IS_ENABLED(CONFIG_CC_IS_GCC) || CONFIG_CLANG_VERSION >= 110000)
- && !IS_ENABLED(CONFIG_PROFILE_ALL_BRANCHES) && size_is_constant)
+ if (!IS_ENABLED(CONFIG_PROFILE_ALL_BRANCHES) && size_is_constant)
BUILD_BUG_ON_MSG(1, "unexpected size in kmalloc_index()");
else
BUG();
diff --git a/include/linux/soc/ti/ti_sci_inta_msi.h b/include/linux/soc/ti/ti_sci_inta_msi.h
index e3aa8b14612e..4dba2f2aff6f 100644
--- a/include/linux/soc/ti/ti_sci_inta_msi.h
+++ b/include/linux/soc/ti/ti_sci_inta_msi.h
@@ -18,6 +18,4 @@ struct irq_domain
struct irq_domain *parent);
int ti_sci_inta_msi_domain_alloc_irqs(struct device *dev,
struct ti_sci_resource *res);
-unsigned int ti_sci_inta_msi_get_virq(struct device *dev, u32 index);
-void ti_sci_inta_msi_domain_free_irqs(struct device *dev);
#endif /* __INCLUDE_LINUX_IRQCHIP_TI_SCI_INTA_H */
diff --git a/include/linux/soundwire/sdw_intel.h b/include/linux/soundwire/sdw_intel.h
index 8a463b8fc12a..67e0d3e750b5 100644
--- a/include/linux/soundwire/sdw_intel.h
+++ b/include/linux/soundwire/sdw_intel.h
@@ -92,7 +92,7 @@
* firmware.
*/
struct sdw_intel_stream_params_data {
- struct snd_pcm_substream *substream;
+ int stream;
struct snd_soc_dai *dai;
struct snd_pcm_hw_params *hw_params;
int link_id;
@@ -105,7 +105,7 @@ struct sdw_intel_stream_params_data {
* firmware.
*/
struct sdw_intel_stream_free_data {
- struct snd_pcm_substream *substream;
+ int stream;
struct snd_soc_dai *dai;
int link_id;
};
diff --git a/include/linux/spinlock_api_up.h b/include/linux/spinlock_api_up.h
index d0d188861ad6..b8ba00ccccde 100644
--- a/include/linux/spinlock_api_up.h
+++ b/include/linux/spinlock_api_up.h
@@ -59,6 +59,7 @@
#define _raw_spin_lock_nested(lock, subclass) __LOCK(lock)
#define _raw_read_lock(lock) __LOCK(lock)
#define _raw_write_lock(lock) __LOCK(lock)
+#define _raw_write_lock_nested(lock, subclass) __LOCK(lock)
#define _raw_spin_lock_bh(lock) __LOCK_BH(lock)
#define _raw_read_lock_bh(lock) __LOCK_BH(lock)
#define _raw_write_lock_bh(lock) __LOCK_BH(lock)
diff --git a/include/linux/stackdepot.h b/include/linux/stackdepot.h
index c34b55a6e554..17f992fe6355 100644
--- a/include/linux/stackdepot.h
+++ b/include/linux/stackdepot.h
@@ -19,6 +19,22 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries,
unsigned int nr_entries,
gfp_t gfp_flags, bool can_alloc);
+/*
+ * Every user of stack depot has to call this during its own init when it's
+ * decided that it will be calling stack_depot_save() later.
+ *
+ * The alternative is to select STACKDEPOT_ALWAYS_INIT to have stack depot
+ * enabled as part of mm_init(), for subsystems where it's known at compile time
+ * that stack depot will be used.
+ */
+int stack_depot_init(void);
+
+#ifdef CONFIG_STACKDEPOT_ALWAYS_INIT
+static inline int stack_depot_early_init(void) { return stack_depot_init(); }
+#else
+static inline int stack_depot_early_init(void) { return 0; }
+#endif
+
depot_stack_handle_t stack_depot_save(unsigned long *entries,
unsigned int nr_entries, gfp_t gfp_flags);
@@ -30,13 +46,4 @@ int stack_depot_snprint(depot_stack_handle_t handle, char *buf, size_t size,
void stack_depot_print(depot_stack_handle_t stack);
-#ifdef CONFIG_STACKDEPOT
-int stack_depot_init(void);
-#else
-static inline int stack_depot_init(void)
-{
- return 0;
-}
-#endif /* CONFIG_STACKDEPOT */
-
#endif
diff --git a/include/linux/stackleak.h b/include/linux/stackleak.h
index a59db2f08e76..ccaab2043fcd 100644
--- a/include/linux/stackleak.h
+++ b/include/linux/stackleak.h
@@ -23,11 +23,6 @@ static inline void stackleak_task_init(struct task_struct *t)
# endif
}
-#ifdef CONFIG_STACKLEAK_RUNTIME_DISABLE
-int stack_erasing_sysctl(struct ctl_table *table, int write,
- void *buffer, size_t *lenp, loff_t *ppos);
-#endif
-
#else /* !CONFIG_GCC_PLUGIN_STACKLEAK */
static inline void stackleak_task_init(struct task_struct *t) { }
#endif
diff --git a/include/linux/string_helpers.h b/include/linux/string_helpers.h
index 4ba39e1403b2..7a22921c9db7 100644
--- a/include/linux/string_helpers.h
+++ b/include/linux/string_helpers.h
@@ -7,6 +7,7 @@
#include <linux/string.h>
#include <linux/types.h>
+struct device;
struct file;
struct task_struct;
@@ -100,6 +101,9 @@ char *kstrdup_quotable(const char *src, gfp_t gfp);
char *kstrdup_quotable_cmdline(struct task_struct *task, gfp_t gfp);
char *kstrdup_quotable_file(struct file *file, gfp_t gfp);
+char **kasprintf_strarray(gfp_t gfp, const char *prefix, size_t n);
void kfree_strarray(char **array, size_t n);
+char **devm_kasprintf_strarray(struct device *dev, const char *prefix, size_t n);
+
#endif
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index 0ae28ae6caf2..f35c22b3355f 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -64,10 +64,9 @@ struct svc_serv_ops {
/* queue up a transport for servicing */
void (*svo_enqueue_xprt)(struct svc_xprt *);
- /* set up thread (or whatever) execution context */
- int (*svo_setup)(struct svc_serv *, struct svc_pool *, int);
-
- /* optional module to count when adding threads (pooled svcs only) */
+ /* optional module to count when adding threads.
+ * Thread function must call module_put_and_kthread_exit() to exit.
+ */
struct module *svo_module;
};
@@ -85,6 +84,7 @@ struct svc_serv {
struct svc_program * sv_program; /* RPC program */
struct svc_stat * sv_stats; /* RPC statistics */
spinlock_t sv_lock;
+ struct kref sv_refcnt;
unsigned int sv_nrthreads; /* # of server threads */
unsigned int sv_maxconn; /* max connections allowed or
* '0' causing max to be based
@@ -114,15 +114,43 @@ struct svc_serv {
#endif /* CONFIG_SUNRPC_BACKCHANNEL */
};
-/*
- * We use sv_nrthreads as a reference count. svc_destroy() drops
- * this refcount, so we need to bump it up around operations that
- * change the number of threads. Horrible, but there it is.
- * Should be called with the "service mutex" held.
+/**
+ * svc_get() - increment reference count on a SUNRPC serv
+ * @serv: the svc_serv to have count incremented
+ *
+ * Returns: the svc_serv that was passed in.
+ */
+static inline struct svc_serv *svc_get(struct svc_serv *serv)
+{
+ kref_get(&serv->sv_refcnt);
+ return serv;
+}
+
+void svc_destroy(struct kref *);
+
+/**
+ * svc_put - decrement reference count on a SUNRPC serv
+ * @serv: the svc_serv to have count decremented
+ *
+ * When the reference count reaches zero, svc_destroy()
+ * is called to clean up and free the serv.
+ */
+static inline void svc_put(struct svc_serv *serv)
+{
+ kref_put(&serv->sv_refcnt, svc_destroy);
+}
+
+/**
+ * svc_put_not_last - decrement non-final reference count on SUNRPC serv
+ * @serv: the svc_serv to have count decremented
+ *
+ * Returns: %true is refcount was decremented.
+ *
+ * If the refcount is 1, it is not decremented and instead failure is reported.
*/
-static inline void svc_get(struct svc_serv *serv)
+static inline bool svc_put_not_last(struct svc_serv *serv)
{
- serv->sv_nrthreads++;
+ return refcount_dec_not_one(&serv->sv_refcnt.refcount);
}
/*
@@ -469,29 +497,6 @@ struct svc_procedure {
};
/*
- * Mode for mapping cpus to pools.
- */
-enum {
- SVC_POOL_AUTO = -1, /* choose one of the others */
- SVC_POOL_GLOBAL, /* no mapping, just a single global pool
- * (legacy & UP mode) */
- SVC_POOL_PERCPU, /* one pool per cpu */
- SVC_POOL_PERNODE /* one pool per numa node */
-};
-
-struct svc_pool_map {
- int count; /* How many svc_servs use us */
- int mode; /* Note: int not enum to avoid
- * warnings about "enumeration value
- * not handled in switch" */
- unsigned int npools;
- unsigned int *pool_to; /* maps pool id to cpu or node */
- unsigned int *to_pool; /* maps cpu or node to pool id */
-};
-
-extern struct svc_pool_map svc_pool_map;
-
-/*
* Function prototypes.
*/
int svc_rpcb_setup(struct svc_serv *serv, struct net *net);
@@ -501,20 +506,14 @@ struct svc_serv *svc_create(struct svc_program *, unsigned int,
const struct svc_serv_ops *);
struct svc_rqst *svc_rqst_alloc(struct svc_serv *serv,
struct svc_pool *pool, int node);
-struct svc_rqst *svc_prepare_thread(struct svc_serv *serv,
- struct svc_pool *pool, int node);
void svc_rqst_replace_page(struct svc_rqst *rqstp,
struct page *page);
void svc_rqst_free(struct svc_rqst *);
void svc_exit_thread(struct svc_rqst *);
-unsigned int svc_pool_map_get(void);
-void svc_pool_map_put(void);
struct svc_serv * svc_create_pooled(struct svc_program *, unsigned int,
const struct svc_serv_ops *);
int svc_set_num_threads(struct svc_serv *, struct svc_pool *, int);
-int svc_set_num_threads_sync(struct svc_serv *, struct svc_pool *, int);
int svc_pool_stats_open(struct svc_serv *serv, struct file *file);
-void svc_destroy(struct svc_serv *);
void svc_shutdown_net(struct svc_serv *, struct net *);
int svc_process(struct svc_rqst *);
int bc_svc_process(struct svc_serv *, struct rpc_rqst *,
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 5785d909c321..300273ff40cc 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -430,15 +430,7 @@ struct platform_hibernation_ops {
#ifdef CONFIG_HIBERNATION
/* kernel/power/snapshot.c */
-extern void __register_nosave_region(unsigned long b, unsigned long e, int km);
-static inline void __init register_nosave_region(unsigned long b, unsigned long e)
-{
- __register_nosave_region(b, e, 0);
-}
-static inline void __init register_nosave_region_late(unsigned long b, unsigned long e)
-{
- __register_nosave_region(b, e, 1);
-}
+extern void register_nosave_region(unsigned long b, unsigned long e);
extern int swsusp_page_is_forbidden(struct page *);
extern void swsusp_set_page_free(struct page *);
extern void swsusp_unset_page_free(struct page *);
@@ -458,7 +450,6 @@ int pfn_is_nosave(unsigned long pfn);
int hibernate_quiet_exec(int (*func)(void *data), void *data);
#else /* CONFIG_HIBERNATION */
static inline void register_nosave_region(unsigned long b, unsigned long e) {}
-static inline void register_nosave_region_late(unsigned long b, unsigned long e) {}
static inline int swsusp_page_is_forbidden(struct page *p) { return 0; }
static inline void swsusp_set_page_free(struct page *p) {}
static inline void swsusp_unset_page_free(struct page *p) {}
@@ -506,14 +497,14 @@ extern void ksys_sync_helper(void);
/* drivers/base/power/wakeup.c */
extern bool events_check_enabled;
-extern unsigned int pm_wakeup_irq;
extern suspend_state_t pm_suspend_target_state;
extern bool pm_wakeup_pending(void);
extern void pm_system_wakeup(void);
extern void pm_system_cancel_wakeup(void);
-extern void pm_wakeup_clear(bool reset);
+extern void pm_wakeup_clear(unsigned int irq_number);
extern void pm_system_irq_wakeup(unsigned int irq_number);
+extern unsigned int pm_wakeup_irq(void);
extern bool pm_get_wakeup_count(unsigned int *count, bool block);
extern bool pm_save_wakeup_count(unsigned int count);
extern void pm_wakep_autosleep_enabled(bool set);
diff --git a/include/linux/swap.h b/include/linux/swap.h
index d1ea44b31f19..1d38d9475c4d 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -514,7 +514,7 @@ extern int __swp_swapcount(swp_entry_t entry);
extern int swp_swapcount(swp_entry_t entry);
extern struct swap_info_struct *page_swap_info(struct page *);
extern struct swap_info_struct *swp_swap_info(swp_entry_t entry);
-extern bool reuse_swap_page(struct page *, int *);
+extern bool reuse_swap_page(struct page *);
extern int try_to_free_swap(struct page *);
struct backing_dev_info;
extern int init_swap_address_space(unsigned int type, unsigned long nr_pages);
@@ -680,8 +680,8 @@ static inline int swp_swapcount(swp_entry_t entry)
return 0;
}
-#define reuse_swap_page(page, total_map_swapcount) \
- (page_trans_huge_mapcount(page, total_map_swapcount) == 1)
+#define reuse_swap_page(page) \
+ (page_trans_huge_mapcount(page) == 1)
static inline int try_to_free_swap(struct page *page)
{
diff --git a/include/linux/swapfile.h b/include/linux/swapfile.h
index e06febf62978..54078542134c 100644
--- a/include/linux/swapfile.h
+++ b/include/linux/swapfile.h
@@ -6,10 +6,7 @@
* these were static in swapfile.c but frontswap.c needs them and we don't
* want to expose them to the dozens of source files that include swap.h
*/
-extern spinlock_t swap_lock;
-extern struct plist_head swap_active_head;
extern struct swap_info_struct *swap_info[];
-extern int try_to_unuse(unsigned int, bool, unsigned long);
extern unsigned long generic_max_swapfile_size(void);
extern unsigned long max_swapfile_size(void);
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index 569272871375..f6c3638255d5 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -73,6 +73,9 @@ extern enum swiotlb_force swiotlb_force;
* @end: The end address of the swiotlb memory pool. Used to do a quick
* range check to see if the memory was in fact allocated by this
* API.
+ * @vaddr: The vaddr of the swiotlb memory pool. The swiotlb memory pool
+ * may be remapped in the memory encrypted case and store virtual
+ * address for bounce buffer operation.
* @nslabs: The number of IO TLB blocks (in groups of 64) between @start and
* @end. For default swiotlb, this is command line adjustable via
* setup_io_tlb_npages.
@@ -92,6 +95,7 @@ extern enum swiotlb_force swiotlb_force;
struct io_tlb_mem {
phys_addr_t start;
phys_addr_t end;
+ void *vaddr;
unsigned long nslabs;
unsigned long used;
unsigned int index;
@@ -186,4 +190,6 @@ static inline bool is_swiotlb_for_alloc(struct device *dev)
}
#endif /* CONFIG_DMA_RESTRICTED_POOL */
+extern phys_addr_t swiotlb_unencrypted_base;
+
#endif /* __LINUX_SWIOTLB_H */
diff --git a/include/linux/switchtec.h b/include/linux/switchtec.h
index be24056ac00f..48fabe36509e 100644
--- a/include/linux/switchtec.h
+++ b/include/linux/switchtec.h
@@ -337,8 +337,6 @@ enum {
NTB_CTRL_REQ_ID_EN = 1 << 0,
NTB_CTRL_LUT_EN = 1 << 0,
-
- NTB_PART_CTRL_ID_PROT_DIS = 1 << 0,
};
struct ntb_ctrl_regs {
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 528a478dbda8..819c0cb00b6d 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -1057,6 +1057,9 @@ asmlinkage long sys_landlock_add_rule(int ruleset_fd, enum landlock_rule_type ru
const void __user *rule_attr, __u32 flags);
asmlinkage long sys_landlock_restrict_self(int ruleset_fd, __u32 flags);
asmlinkage long sys_memfd_secret(unsigned int flags);
+asmlinkage long sys_set_mempolicy_home_node(unsigned long start, unsigned long len,
+ unsigned long home_node,
+ unsigned long flags);
/*
* Architecture-specific system calls
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index 1fa2b69c6fc3..6353d6db69b2 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -38,12 +38,28 @@ struct ctl_table_header;
struct ctl_dir;
/* Keep the same order as in fs/proc/proc_sysctl.c */
-#define SYSCTL_ZERO ((void *)&sysctl_vals[0])
-#define SYSCTL_ONE ((void *)&sysctl_vals[1])
-#define SYSCTL_INT_MAX ((void *)&sysctl_vals[2])
+#define SYSCTL_NEG_ONE ((void *)&sysctl_vals[0])
+#define SYSCTL_ZERO ((void *)&sysctl_vals[1])
+#define SYSCTL_ONE ((void *)&sysctl_vals[2])
+#define SYSCTL_TWO ((void *)&sysctl_vals[3])
+#define SYSCTL_FOUR ((void *)&sysctl_vals[4])
+#define SYSCTL_ONE_HUNDRED ((void *)&sysctl_vals[5])
+#define SYSCTL_TWO_HUNDRED ((void *)&sysctl_vals[6])
+#define SYSCTL_ONE_THOUSAND ((void *)&sysctl_vals[7])
+#define SYSCTL_THREE_THOUSAND ((void *)&sysctl_vals[8])
+#define SYSCTL_INT_MAX ((void *)&sysctl_vals[9])
+
+/* this is needed for the proc_dointvec_minmax for [fs_]overflow UID and GID */
+#define SYSCTL_MAXOLDUID ((void *)&sysctl_vals[10])
extern const int sysctl_vals[];
+#define SYSCTL_LONG_ZERO ((void *)&sysctl_long_vals[0])
+#define SYSCTL_LONG_ONE ((void *)&sysctl_long_vals[1])
+#define SYSCTL_LONG_MAX ((void *)&sysctl_long_vals[2])
+
+extern const unsigned long sysctl_long_vals[];
+
typedef int proc_handler(struct ctl_table *ctl, int write, void *buffer,
size_t *lenp, loff_t *ppos);
@@ -178,6 +194,20 @@ struct ctl_path {
#ifdef CONFIG_SYSCTL
+#define DECLARE_SYSCTL_BASE(_name, _table) \
+static struct ctl_table _name##_base_table[] = { \
+ { \
+ .procname = #_name, \
+ .mode = 0555, \
+ .child = _table, \
+ }, \
+ { }, \
+}
+
+extern int __register_sysctl_base(struct ctl_table *base_table);
+
+#define register_sysctl_base(_name) __register_sysctl_base(_name##_base_table)
+
void proc_sys_poll_notify(struct ctl_table_poll *poll);
extern void setup_sysctl_set(struct ctl_table_set *p,
@@ -198,8 +228,19 @@ struct ctl_table_header *register_sysctl_paths(const struct ctl_path *path,
void unregister_sysctl_table(struct ctl_table_header * table);
-extern int sysctl_init(void);
+extern int sysctl_init_bases(void);
+extern void __register_sysctl_init(const char *path, struct ctl_table *table,
+ const char *table_name);
+#define register_sysctl_init(path, table) __register_sysctl_init(path, table, #table)
+extern struct ctl_table_header *register_sysctl_mount_point(const char *path);
+
void do_sysctl_args(void);
+int do_proc_douintvec(struct ctl_table *table, int write,
+ void *buffer, size_t *lenp, loff_t *ppos,
+ int (*conv)(unsigned long *lvalp,
+ unsigned int *valp,
+ int write, void *data),
+ void *data);
extern int pwrsw_enabled;
extern int unaligned_enabled;
@@ -207,16 +248,28 @@ extern int unaligned_dump_stack;
extern int no_unaligned_warning;
extern struct ctl_table sysctl_mount_point[];
-extern struct ctl_table random_table[];
-extern struct ctl_table firmware_config_table[];
-extern struct ctl_table epoll_table[];
#else /* CONFIG_SYSCTL */
+
+#define DECLARE_SYSCTL_BASE(_name, _table)
+
+static inline int __register_sysctl_base(struct ctl_table *base_table)
+{
+ return 0;
+}
+
+#define register_sysctl_base(table) __register_sysctl_base(table)
+
static inline struct ctl_table_header *register_sysctl_table(struct ctl_table * table)
{
return NULL;
}
+static inline struct ctl_table_header *register_sysctl_mount_point(const char *path)
+{
+ return NULL;
+}
+
static inline struct ctl_table_header *register_sysctl_paths(
const struct ctl_path *path, struct ctl_table *table)
{
diff --git a/include/linux/topology.h b/include/linux/topology.h
index 0b3704ad13c8..a6e201758ae9 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -180,6 +180,19 @@ static inline int cpu_to_mem(int cpu)
#endif /* [!]CONFIG_HAVE_MEMORYLESS_NODES */
+#if defined(topology_die_id) && defined(topology_die_cpumask)
+#define TOPOLOGY_DIE_SYSFS
+#endif
+#if defined(topology_cluster_id) && defined(topology_cluster_cpumask)
+#define TOPOLOGY_CLUSTER_SYSFS
+#endif
+#if defined(topology_book_id) && defined(topology_book_cpumask)
+#define TOPOLOGY_BOOK_SYSFS
+#endif
+#if defined(topology_drawer_id) && defined(topology_drawer_cpumask)
+#define TOPOLOGY_DRAWER_SYSFS
+#endif
+
#ifndef topology_physical_package_id
#define topology_physical_package_id(cpu) ((void)(cpu), -1)
#endif
@@ -192,6 +205,12 @@ static inline int cpu_to_mem(int cpu)
#ifndef topology_core_id
#define topology_core_id(cpu) ((void)(cpu), 0)
#endif
+#ifndef topology_book_id
+#define topology_book_id(cpu) ((void)(cpu), -1)
+#endif
+#ifndef topology_drawer_id
+#define topology_drawer_id(cpu) ((void)(cpu), -1)
+#endif
#ifndef topology_sibling_cpumask
#define topology_sibling_cpumask(cpu) cpumask_of(cpu)
#endif
@@ -204,6 +223,12 @@ static inline int cpu_to_mem(int cpu)
#ifndef topology_die_cpumask
#define topology_die_cpumask(cpu) cpumask_of(cpu)
#endif
+#ifndef topology_book_cpumask
+#define topology_book_cpumask(cpu) cpumask_of(cpu)
+#endif
+#ifndef topology_drawer_cpumask
+#define topology_drawer_cpumask(cpu) cpumask_of(cpu)
+#endif
#if defined(CONFIG_SCHED_SMT) && !defined(cpu_smt_mask)
static inline const struct cpumask *cpu_smt_mask(int cpu)
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index 2d167ac3452c..70c069aef02c 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -172,6 +172,7 @@ enum trace_flag_type {
TRACE_FLAG_SOFTIRQ = 0x10,
TRACE_FLAG_PREEMPT_RESCHED = 0x20,
TRACE_FLAG_NMI = 0x40,
+ TRACE_FLAG_BH_OFF = 0x80,
};
#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
@@ -782,6 +783,7 @@ enum {
FILTER_OTHER = 0,
FILTER_STATIC_STRING,
FILTER_DYN_STRING,
+ FILTER_RDYN_STRING,
FILTER_PTR_STRING,
FILTER_TRACE_FN,
FILTER_COMM,
diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h
index 2564b7434b4d..88c007ab5ebc 100644
--- a/include/linux/tracehook.h
+++ b/include/linux/tracehook.h
@@ -54,8 +54,7 @@ struct linux_binprm;
/*
* ptrace report for syscall entry and exit looks identical.
*/
-static inline int ptrace_report_syscall(struct pt_regs *regs,
- unsigned long message)
+static inline int ptrace_report_syscall(unsigned long message)
{
int ptrace = current->ptrace;
@@ -102,7 +101,7 @@ static inline int ptrace_report_syscall(struct pt_regs *regs,
static inline __must_check int tracehook_report_syscall_entry(
struct pt_regs *regs)
{
- return ptrace_report_syscall(regs, PTRACE_EVENTMSG_SYSCALL_ENTRY);
+ return ptrace_report_syscall(PTRACE_EVENTMSG_SYSCALL_ENTRY);
}
/**
@@ -127,7 +126,7 @@ static inline void tracehook_report_syscall_exit(struct pt_regs *regs, int step)
if (step)
user_single_step_report(regs);
else
- ptrace_report_syscall(regs, PTRACE_EVENTMSG_SYSCALL_EXIT);
+ ptrace_report_syscall(PTRACE_EVENTMSG_SYSCALL_EXIT);
}
/**
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 5dbd7c5afac7..7b0a5d478ef6 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -122,33 +122,84 @@ struct tty_operations;
/**
* struct tty_struct - state associated with a tty while open
*
- * @flow.lock: lock for flow members
- * @flow.stopped: tty stopped/started by tty_stop/tty_start
- * @flow.tco_stopped: tty stopped/started by TCOOFF/TCOON ioctls (it has
- * precedense over @flow.stopped)
+ * @magic: magic value set early in @alloc_tty_struct to %TTY_MAGIC, for
+ * debugging purposes
+ * @kref: reference counting by tty_kref_get() and tty_kref_put(), reaching zero
+ * frees the structure
+ * @dev: class device or %NULL (e.g. ptys, serdev)
+ * @driver: &struct tty_driver operating this tty
+ * @ops: &struct tty_operations of @driver for this tty (open, close, etc.)
+ * @index: index of this tty (e.g. to construct @name like tty12)
+ * @ldisc_sem: protects line discipline changes (@ldisc) -- lock tty not pty
+ * @ldisc: the current line discipline for this tty (n_tty by default)
+ * @atomic_write_lock: protects against concurrent writers, i.e. locks
+ * @write_cnt, @write_buf and similar
+ * @legacy_mutex: leftover from history (BKL -> BTM -> @legacy_mutex),
+ * protecting several operations on this tty
+ * @throttle_mutex: protects against concurrent tty_throttle_safe() and
+ * tty_unthrottle_safe() (but not tty_unthrottle())
+ * @termios_rwsem: protects @termios and @termios_locked
+ * @winsize_mutex: protects @winsize
+ * @termios: termios for the current tty, copied from/to @driver.termios
+ * @termios_locked: locked termios (by %TIOCGLCKTRMIOS and %TIOCSLCKTRMIOS
+ * ioctls)
+ * @name: name of the tty constructed by tty_line_name() (e.g. ttyS3)
+ * @flags: bitwise OR of %TTY_THROTTLED, %TTY_IO_ERROR, ...
+ * @count: count of open processes, reaching zero cancels all the work for
+ * this tty and drops a @kref too (but does not free this tty)
+ * @winsize: size of the terminal "window" (cf. @winsize_mutex)
+ * @flow: flow settings grouped together, see also @flow.unused
+ * @flow.lock: lock for @flow members
+ * @flow.stopped: tty stopped/started by stop_tty()/start_tty()
+ * @flow.tco_stopped: tty stopped/started by %TCOOFF/%TCOON ioctls (it has
+ * precedence over @flow.stopped)
* @flow.unused: alignment for Alpha, so that no members other than @flow.* are
* modified by the same 64b word store. The @flow's __aligned is
* there for the very same reason.
- * @ctrl.lock: lock for ctrl members
+ * @ctrl: control settings grouped together, see also @ctrl.unused
+ * @ctrl.lock: lock for @ctrl members
* @ctrl.pgrp: process group of this tty (setpgrp(2))
* @ctrl.session: session of this tty (setsid(2)). Writes are protected by both
- * @ctrl.lock and legacy mutex, readers must use at least one of
+ * @ctrl.lock and @legacy_mutex, readers must use at least one of
* them.
- * @ctrl.pktstatus: packet mode status (bitwise OR of TIOCPKT_* constants)
+ * @ctrl.pktstatus: packet mode status (bitwise OR of %TIOCPKT_ constants)
* @ctrl.packet: packet mode enabled
+ * @ctrl.unused: alignment for Alpha, see @flow.unused for explanation
+ * @hw_stopped: not controlled by the tty layer, under @driver's control for CTS
+ * handling
+ * @receive_room: bytes permitted to feed to @ldisc without any being lost
+ * @flow_change: controls behavior of throttling, see tty_throttle_safe() and
+ * tty_unthrottle_safe()
+ * @link: link to another pty (master -> slave and vice versa)
+ * @fasync: state for %O_ASYNC (for %SIGIO); managed by fasync_helper()
+ * @write_wait: concurrent writers are waiting in this queue until they are
+ * allowed to write
+ * @read_wait: readers wait for data in this queue
+ * @hangup_work: normally a work to perform a hangup (do_tty_hangup()); while
+ * freeing the tty, (re)used to release_one_tty()
+ * @disc_data: pointer to @ldisc's private data (e.g. to &struct n_tty_data)
+ * @driver_data: pointer to @driver's private data (e.g. &struct uart_state)
+ * @files_lock: protects @tty_files list
+ * @tty_files: list of (re)openers of this tty (i.e. linked &struct
+ * tty_file_private)
+ * @closing: when set during close, n_tty processes only START & STOP chars
+ * @write_buf: temporary buffer used during tty_write() to copy user data to
+ * @write_cnt: count of bytes written in tty_write() to @write_buf
+ * @SAK_work: if the tty has a pending do_SAK, it is queued here
+ * @port: persistent storage for this device (i.e. &struct tty_port)
*
* All of the state associated with a tty while the tty is open. Persistent
- * storage for tty devices is referenced here as @port in struct tty_port.
+ * storage for tty devices is referenced here as @port and is documented in
+ * &struct tty_port.
*/
struct tty_struct {
int magic;
struct kref kref;
- struct device *dev; /* class device or NULL (e.g. ptys, serdev) */
+ struct device *dev;
struct tty_driver *driver;
const struct tty_operations *ops;
int index;
- /* Protects ldisc changes: Lock tty not pty */
struct ld_semaphore ldisc_sem;
struct tty_ldisc *ldisc;
@@ -157,12 +208,11 @@ struct tty_struct {
struct mutex throttle_mutex;
struct rw_semaphore termios_rwsem;
struct mutex winsize_mutex;
- /* Termios values are protected by the termios rwsem */
struct ktermios termios, termios_locked;
char name[64];
unsigned long flags;
int count;
- struct winsize winsize; /* winsize_mutex */
+ struct winsize winsize;
struct {
spinlock_t lock;
@@ -181,7 +231,7 @@ struct tty_struct {
} __aligned(sizeof(unsigned long)) ctrl;
int hw_stopped;
- unsigned int receive_room; /* Bytes free for queue */
+ unsigned int receive_room;
int flow_change;
struct tty_struct *link;
@@ -191,7 +241,7 @@ struct tty_struct {
struct work_struct hangup_work;
void *disc_data;
void *driver_data;
- spinlock_t files_lock; /* protects tty_files list */
+ spinlock_t files_lock;
struct list_head tty_files;
#define N_TTY_BUF_SIZE 4096
@@ -199,7 +249,6 @@ struct tty_struct {
int closing;
unsigned char *write_buf;
int write_cnt;
- /* If the tty has a pending do_SAK, queue it here - akpm */
struct work_struct SAK_work;
struct tty_port *port;
} __randomize_layout;
@@ -214,26 +263,72 @@ struct tty_file_private {
/* tty magic number */
#define TTY_MAGIC 0x5401
-/*
- * These bits are used in the flags field of the tty structure.
+/**
+ * DOC: TTY Struct Flags
+ *
+ * These bits are used in the :c:member:`tty_struct.flags` field.
*
* So that interrupts won't be able to mess up the queues,
* copy_to_cooked must be atomic with respect to itself, as must
* tty->write. Thus, you must use the inline functions set_bit() and
* clear_bit() to make things atomic.
+ *
+ * TTY_THROTTLED
+ * Driver input is throttled. The ldisc should call
+ * :c:member:`tty_driver.unthrottle()` in order to resume reception when
+ * it is ready to process more data (at threshold min).
+ *
+ * TTY_IO_ERROR
+ * If set, causes all subsequent userspace read/write calls on the tty to
+ * fail, returning -%EIO. (May be no ldisc too.)
+ *
+ * TTY_OTHER_CLOSED
+ * Device is a pty and the other side has closed.
+ *
+ * TTY_EXCLUSIVE
+ * Exclusive open mode (a single opener).
+ *
+ * TTY_DO_WRITE_WAKEUP
+ * If set, causes the driver to call the
+ * :c:member:`tty_ldisc_ops.write_wakeup()` method in order to resume
+ * transmission when it can accept more data to transmit.
+ *
+ * TTY_LDISC_OPEN
+ * Indicates that a line discipline is open. For debugging purposes only.
+ *
+ * TTY_PTY_LOCK
+ * A flag private to pty code to implement %TIOCSPTLCK/%TIOCGPTLCK logic.
+ *
+ * TTY_NO_WRITE_SPLIT
+ * Prevent driver from splitting up writes into smaller chunks (preserve
+ * write boundaries to driver).
+ *
+ * TTY_HUPPED
+ * The TTY was hung up. This is set post :c:member:`tty_driver.hangup()`.
+ *
+ * TTY_HUPPING
+ * The TTY is in the process of hanging up to abort potential readers.
+ *
+ * TTY_LDISC_CHANGING
+ * Line discipline for this TTY is being changed. I/O should not block
+ * when this is set. Use tty_io_nonblock() to check.
+ *
+ * TTY_LDISC_HALTED
+ * Line discipline for this TTY was stopped. No work should be queued to
+ * this ldisc.
*/
-#define TTY_THROTTLED 0 /* Call unthrottle() at threshold min */
-#define TTY_IO_ERROR 1 /* Cause an I/O error (may be no ldisc too) */
-#define TTY_OTHER_CLOSED 2 /* Other side (if any) has closed */
-#define TTY_EXCLUSIVE 3 /* Exclusive open mode */
-#define TTY_DO_WRITE_WAKEUP 5 /* Call write_wakeup after queuing new */
-#define TTY_LDISC_OPEN 11 /* Line discipline is open */
-#define TTY_PTY_LOCK 16 /* pty private */
-#define TTY_NO_WRITE_SPLIT 17 /* Preserve write boundaries to driver */
-#define TTY_HUPPED 18 /* Post driver->hangup() */
-#define TTY_HUPPING 19 /* Hangup in progress */
-#define TTY_LDISC_CHANGING 20 /* Change pending - non-block IO */
-#define TTY_LDISC_HALTED 22 /* Line discipline is halted */
+#define TTY_THROTTLED 0
+#define TTY_IO_ERROR 1
+#define TTY_OTHER_CLOSED 2
+#define TTY_EXCLUSIVE 3
+#define TTY_DO_WRITE_WAKEUP 5
+#define TTY_LDISC_OPEN 11
+#define TTY_PTY_LOCK 16
+#define TTY_NO_WRITE_SPLIT 17
+#define TTY_HUPPED 18
+#define TTY_HUPPING 19
+#define TTY_LDISC_CHANGING 20
+#define TTY_LDISC_HALTED 22
static inline bool tty_io_nonblock(struct tty_struct *tty, struct file *file)
{
diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
index 795b94ccdeb6..4841d8069c07 100644
--- a/include/linux/tty_driver.h
+++ b/include/linux/tty_driver.h
@@ -2,248 +2,350 @@
#ifndef _LINUX_TTY_DRIVER_H
#define _LINUX_TTY_DRIVER_H
-/*
- * This structure defines the interface between the low-level tty
- * driver and the tty routines. The following routines can be
- * defined; unless noted otherwise, they are optional, and can be
- * filled in with a null pointer.
+#include <linux/export.h>
+#include <linux/fs.h>
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/cdev.h>
+#include <linux/termios.h>
+#include <linux/seq_file.h>
+
+struct tty_struct;
+struct tty_driver;
+struct serial_icounter_struct;
+struct serial_struct;
+
+/**
+ * struct tty_operations -- interface between driver and tty
+ *
+ * @lookup: ``struct tty_struct *()(struct tty_driver *self, struct file *,
+ * int idx)``
*
- * struct tty_struct * (*lookup)(struct tty_driver *self, struct file *, int idx)
+ * Return the tty device corresponding to @idx, %NULL if there is not
+ * one currently in use and an %ERR_PTR value on error. Called under
+ * %tty_mutex (for now!)
*
- * Return the tty device corresponding to idx, NULL if there is not
- * one currently in use and an ERR_PTR value on error. Called under
- * tty_mutex (for now!)
+ * Optional method. Default behaviour is to use the @self->ttys array.
*
- * Optional method. Default behaviour is to use the ttys array
+ * @install: ``int ()(struct tty_driver *self, struct tty_struct *tty)``
*
- * int (*install)(struct tty_driver *self, struct tty_struct *tty)
+ * Install a new @tty into the @self's internal tables. Used in
+ * conjunction with @lookup and @remove methods.
*
- * Install a new tty into the tty driver internal tables. Used in
- * conjunction with lookup and remove methods.
+ * Optional method. Default behaviour is to use the @self->ttys array.
*
- * Optional method. Default behaviour is to use the ttys array
+ * @remove: ``void ()(struct tty_driver *self, struct tty_struct *tty)``
*
- * void (*remove)(struct tty_driver *self, struct tty_struct *tty)
+ * Remove a closed @tty from the @self's internal tables. Used in
+ * conjunction with @lookup and @remove methods.
*
- * Remove a closed tty from the tty driver internal tables. Used in
- * conjunction with lookup and remove methods.
+ * Optional method. Default behaviour is to use the @self->ttys array.
*
- * Optional method. Default behaviour is to use the ttys array
+ * @open: ``int ()(struct tty_struct *tty, struct file *)``
*
- * int (*open)(struct tty_struct * tty, struct file * filp);
+ * This routine is called when a particular @tty device is opened. This
+ * routine is mandatory; if this routine is not filled in, the attempted
+ * open will fail with %ENODEV.
*
- * This routine is called when a particular tty device is opened.
- * This routine is mandatory; if this routine is not filled in,
- * the attempted open will fail with ENODEV.
+ * Required method. Called with tty lock held. May sleep.
*
- * Required method. Called with tty lock held.
+ * @close: ``void ()(struct tty_struct *tty, struct file *)``
*
- * void (*close)(struct tty_struct * tty, struct file * filp);
+ * This routine is called when a particular @tty device is closed. At the
+ * point of return from this call the driver must make no further ldisc
+ * calls of any kind.
*
- * This routine is called when a particular tty device is closed.
- * Note: called even if the corresponding open() failed.
+ * Remark: called even if the corresponding @open() failed.
*
- * Required method. Called with tty lock held.
+ * Required method. Called with tty lock held. May sleep.
*
- * void (*shutdown)(struct tty_struct * tty);
+ * @shutdown: ``void ()(struct tty_struct *tty)``
*
- * This routine is called under the tty lock when a particular tty device
- * is closed for the last time. It executes before the tty resources
- * are freed so may execute while another function holds a tty kref.
+ * This routine is called under the tty lock when a particular @tty device
+ * is closed for the last time. It executes before the @tty resources
+ * are freed so may execute while another function holds a @tty kref.
*
- * void (*cleanup)(struct tty_struct * tty);
+ * @cleanup: ``void ()(struct tty_struct *tty)``
*
- * This routine is called asynchronously when a particular tty device
+ * This routine is called asynchronously when a particular @tty device
* is closed for the last time freeing up the resources. This is
* actually the second part of shutdown for routines that might sleep.
*
+ * @write: ``int ()(struct tty_struct *tty, const unsigned char *buf,
+ * int count)``
*
- * int (*write)(struct tty_struct * tty,
- * const unsigned char *buf, int count);
- *
- * This routine is called by the kernel to write a series of
- * characters to the tty device. The characters may come from
- * user space or kernel space. This routine will return the
+ * This routine is called by the kernel to write a series (@count) of
+ * characters (@buf) to the @tty device. The characters may come from
+ * user space or kernel space. This routine will return the
* number of characters actually accepted for writing.
*
- * Optional: Required for writable devices.
+ * May occur in parallel in special cases. Because this includes panic
+ * paths drivers generally shouldn't try and do clever locking here.
*
- * int (*put_char)(struct tty_struct *tty, unsigned char ch);
+ * Optional: Required for writable devices. May not sleep.
*
- * This routine is called by the kernel to write a single
- * character to the tty device. If the kernel uses this routine,
- * it must call the flush_chars() routine (if defined) when it is
- * done stuffing characters into the driver. If there is no room
- * in the queue, the character is ignored.
+ * @put_char: ``int ()(struct tty_struct *tty, unsigned char ch)``
*
- * Optional: Kernel will use the write method if not provided.
+ * This routine is called by the kernel to write a single character @ch to
+ * the @tty device. If the kernel uses this routine, it must call the
+ * @flush_chars() routine (if defined) when it is done stuffing characters
+ * into the driver. If there is no room in the queue, the character is
+ * ignored.
*
- * Note: Do not call this function directly, call tty_put_char
+ * Optional: Kernel will use the @write method if not provided. Do not
+ * call this function directly, call tty_put_char().
*
- * void (*flush_chars)(struct tty_struct *tty);
+ * @flush_chars: ``void ()(struct tty_struct *tty)``
*
- * This routine is called by the kernel after it has written a
- * series of characters to the tty device using put_char().
+ * This routine is called by the kernel after it has written a
+ * series of characters to the tty device using @put_char().
*
- * Optional:
+ * Optional. Do not call this function directly, call
+ * tty_driver_flush_chars().
*
- * Note: Do not call this function directly, call tty_driver_flush_chars
- *
- * unsigned int (*write_room)(struct tty_struct *tty);
+ * @write_room: ``unsigned int ()(struct tty_struct *tty)``
*
- * This routine returns the numbers of characters the tty driver
- * will accept for queuing to be written. This number is subject
- * to change as output buffers get emptied, or if the output flow
+ * This routine returns the numbers of characters the @tty driver
+ * will accept for queuing to be written. This number is subject
+ * to change as output buffers get emptied, or if the output flow
* control is acted.
*
- * Required if write method is provided else not needed.
+ * The ldisc is responsible for being intelligent about multi-threading of
+ * write_room/write calls
+ *
+ * Required if @write method is provided else not needed. Do not call this
+ * function directly, call tty_write_room()
+ *
+ * @chars_in_buffer: ``unsigned int ()(struct tty_struct *tty)``
+ *
+ * This routine returns the number of characters in the device private
+ * output queue. Used in tty_wait_until_sent() and for poll()
+ * implementation.
*
- * Note: Do not call this function directly, call tty_write_room
- *
- * int (*ioctl)(struct tty_struct *tty, unsigned int cmd, unsigned long arg);
+ * Optional: if not provided, it is assumed there is no queue on the
+ * device. Do not call this function directly, call tty_chars_in_buffer().
*
- * This routine allows the tty driver to implement
- * device-specific ioctls. If the ioctl number passed in cmd
- * is not recognized by the driver, it should return ENOIOCTLCMD.
+ * @ioctl: ``int ()(struct tty_struct *tty, unsigned int cmd,
+ * unsigned long arg)``
*
- * Optional
+ * This routine allows the @tty driver to implement device-specific
+ * ioctls. If the ioctl number passed in @cmd is not recognized by the
+ * driver, it should return %ENOIOCTLCMD.
*
- * long (*compat_ioctl)(struct tty_struct *tty,,
- * unsigned int cmd, unsigned long arg);
+ * Optional.
*
- * implement ioctl processing for 32 bit process on 64 bit system
+ * @compat_ioctl: ``long ()(struct tty_struct *tty, unsigned int cmd,
+ * unsigned long arg)``
*
- * Optional
- *
- * void (*set_termios)(struct tty_struct *tty, struct ktermios * old);
+ * Implement ioctl processing for 32 bit process on 64 bit system.
*
- * This routine allows the tty driver to be notified when
- * device's termios settings have changed.
+ * Optional.
*
- * Optional: Called under the termios lock
+ * @set_termios: ``void ()(struct tty_struct *tty, struct ktermios *old)``
*
+ * This routine allows the @tty driver to be notified when device's
+ * termios settings have changed. New settings are in @tty->termios.
+ * Previous settings are passed in the @old argument.
*
- * void (*set_ldisc)(struct tty_struct *tty);
+ * The API is defined such that the driver should return the actual modes
+ * selected. This means that the driver is responsible for modifying any
+ * bits in @tty->termios it cannot fulfill to indicate the actual modes
+ * being used.
*
- * This routine allows the tty driver to be notified when the
- * device's termios settings have changed.
+ * Optional. Called under the @tty->termios_rwsem. May sleep.
*
- * Optional: Called under BKL (currently)
- *
- * void (*throttle)(struct tty_struct * tty);
+ * @set_ldisc: ``void ()(struct tty_struct *tty)``
*
- * This routine notifies the tty driver that input buffers for
- * the line discipline are close to full, and it should somehow
- * signal that no more characters should be sent to the tty.
+ * This routine allows the @tty driver to be notified when the device's
+ * line discipline is being changed. At the point this is done the
+ * discipline is not yet usable.
*
- * Optional: Always invoke via tty_throttle_safe(), called under the
- * termios lock.
- *
- * void (*unthrottle)(struct tty_struct * tty);
+ * Optional. Called under the @tty->ldisc_sem and @tty->termios_rwsem.
*
- * This routine notifies the tty drivers that it should signals
- * that characters can now be sent to the tty without fear of
- * overrunning the input buffers of the line disciplines.
- *
- * Optional: Always invoke via tty_unthrottle(), called under the
- * termios lock.
+ * @throttle: ``void ()(struct tty_struct *tty)``
*
- * void (*stop)(struct tty_struct *tty);
+ * This routine notifies the @tty driver that input buffers for the line
+ * discipline are close to full, and it should somehow signal that no more
+ * characters should be sent to the @tty.
*
- * This routine notifies the tty driver that it should stop
- * outputting characters to the tty device.
+ * Serialization including with @unthrottle() is the job of the ldisc
+ * layer.
*
- * Called with ->flow.lock held. Serialized with start() method.
+ * Optional: Always invoke via tty_throttle_safe(). Called under the
+ * @tty->termios_rwsem.
*
- * Optional:
+ * @unthrottle: ``void ()(struct tty_struct *tty)``
*
- * Note: Call stop_tty not this method.
- *
- * void (*start)(struct tty_struct *tty);
+ * This routine notifies the @tty driver that it should signal that
+ * characters can now be sent to the @tty without fear of overrunning the
+ * input buffers of the line disciplines.
*
- * This routine notifies the tty driver that it resume sending
+ * Optional. Always invoke via tty_unthrottle(). Called under the
+ * @tty->termios_rwsem.
+ *
+ * @stop: ``void ()(struct tty_struct *tty)``
+ *
+ * This routine notifies the @tty driver that it should stop outputting
* characters to the tty device.
*
- * Called with ->flow.lock held. Serialized with stop() method.
+ * Called with @tty->flow.lock held. Serialized with @start() method.
+ *
+ * Optional. Always invoke via stop_tty().
+ *
+ * @start: ``void ()(struct tty_struct *tty)``
+ *
+ * This routine notifies the @tty driver that it resumed sending
+ * characters to the @tty device.
+ *
+ * Called with @tty->flow.lock held. Serialized with stop() method.
+ *
+ * Optional. Always invoke via start_tty().
+ *
+ * @hangup: ``void ()(struct tty_struct *tty)``
+ *
+ * This routine notifies the @tty driver that it should hang up the @tty
+ * device.
*
- * Optional:
+ * Optional. Called with tty lock held.
*
- * Note: Call start_tty not this method.
- *
- * void (*hangup)(struct tty_struct *tty);
+ * @break_ctl: ``int ()(struct tty_struct *tty, int state)``
*
- * This routine notifies the tty driver that it should hang up the
- * tty device.
+ * This optional routine requests the @tty driver to turn on or off BREAK
+ * status on the RS-232 port. If @state is -1, then the BREAK status
+ * should be turned on; if @state is 0, then BREAK should be turned off.
*
- * Optional:
+ * If this routine is implemented, the high-level tty driver will handle
+ * the following ioctls: %TCSBRK, %TCSBRKP, %TIOCSBRK, %TIOCCBRK.
*
- * Called with tty lock held.
+ * If the driver sets %TTY_DRIVER_HARDWARE_BREAK in tty_alloc_driver(),
+ * then the interface will also be called with actual times and the
+ * hardware is expected to do the delay work itself. 0 and -1 are still
+ * used for on/off.
*
- * int (*break_ctl)(struct tty_struct *tty, int state);
+ * Optional: Required for %TCSBRK/%BRKP/etc. handling. May sleep.
*
- * This optional routine requests the tty driver to turn on or
- * off BREAK status on the RS-232 port. If state is -1,
- * then the BREAK status should be turned on; if state is 0, then
- * BREAK should be turned off.
+ * @flush_buffer: ``void ()(struct tty_struct *tty)``
*
- * If this routine is implemented, the high-level tty driver will
- * handle the following ioctls: TCSBRK, TCSBRKP, TIOCSBRK,
- * TIOCCBRK.
+ * This routine discards device private output buffer. Invoked on close,
+ * hangup, to implement %TCOFLUSH ioctl and similar.
*
- * If the driver sets TTY_DRIVER_HARDWARE_BREAK then the interface
- * will also be called with actual times and the hardware is expected
- * to do the delay work itself. 0 and -1 are still used for on/off.
+ * Optional: if not provided, it is assumed there is no queue on the
+ * device. Do not call this function directly, call
+ * tty_driver_flush_buffer().
*
- * Optional: Required for TCSBRK/BRKP/etc handling.
+ * @wait_until_sent: ``void ()(struct tty_struct *tty, int timeout)``
*
- * void (*wait_until_sent)(struct tty_struct *tty, int timeout);
- *
- * This routine waits until the device has written out all of the
- * characters in its transmitter FIFO.
+ * This routine waits until the device has written out all of the
+ * characters in its transmitter FIFO. Or until @timeout (in jiffies) is
+ * reached.
*
- * Optional: If not provided the device is assumed to have no FIFO
+ * Optional: If not provided, the device is assumed to have no FIFO.
+ * Usually correct to invoke via tty_wait_until_sent(). May sleep.
*
- * Note: Usually correct to call tty_wait_until_sent
+ * @send_xchar: ``void ()(struct tty_struct *tty, char ch)``
*
- * void (*send_xchar)(struct tty_struct *tty, char ch);
+ * This routine is used to send a high-priority XON/XOFF character (@ch)
+ * to the @tty device.
*
- * This routine is used to send a high-priority XON/XOFF
- * character to the device.
+ * Optional: If not provided, then the @write method is called under
+ * the @tty->atomic_write_lock to keep it serialized with the ldisc.
*
- * Optional: If not provided then the write method is called under
- * the atomic write lock to keep it serialized with the ldisc.
+ * @tiocmget: ``int ()(struct tty_struct *tty)``
*
- * int (*resize)(struct tty_struct *tty, struct winsize *ws)
+ * This routine is used to obtain the modem status bits from the @tty
+ * driver.
*
- * Called when a termios request is issued which changes the
- * requested terminal geometry.
+ * Optional: If not provided, then %ENOTTY is returned from the %TIOCMGET
+ * ioctl. Do not call this function directly, call tty_tiocmget().
+ *
+ * @tiocmset: ``int ()(struct tty_struct *tty,
+ * unsigned int set, unsigned int clear)``
+ *
+ * This routine is used to set the modem status bits to the @tty driver.
+ * First, @clear bits should be cleared, then @set bits set.
+ *
+ * Optional: If not provided, then %ENOTTY is returned from the %TIOCMSET
+ * ioctl. Do not call this function directly, call tty_tiocmset().
+ *
+ * @resize: ``int ()(struct tty_struct *tty, struct winsize *ws)``
+ *
+ * Called when a termios request is issued which changes the requested
+ * terminal geometry to @ws.
*
* Optional: the default action is to update the termios structure
* without error. This is usually the correct behaviour. Drivers should
- * not force errors here if they are not resizable objects (eg a serial
+ * not force errors here if they are not resizable objects (e.g. a serial
* line). See tty_do_resize() if you need to wrap the standard method
- * in your own logic - the usual case.
+ * in your own logic -- the usual case.
+ *
+ * @get_icount: ``int ()(struct tty_struct *tty,
+ * struct serial_icounter *icount)``
+ *
+ * Called when the @tty device receives a %TIOCGICOUNT ioctl. Passed a
+ * kernel structure @icount to complete.
+ *
+ * Optional: called only if provided, otherwise %ENOTTY will be returned.
*
- * int (*get_icount)(struct tty_struct *tty, struct serial_icounter *icount);
+ * @get_serial: ``int ()(struct tty_struct *tty, struct serial_struct *p)``
*
- * Called when the device receives a TIOCGICOUNT ioctl. Passed a kernel
- * structure to complete. This method is optional and will only be called
- * if provided (otherwise ENOTTY will be returned).
+ * Called when the @tty device receives a %TIOCGSERIAL ioctl. Passed a
+ * kernel structure @p (&struct serial_struct) to complete.
+ *
+ * Optional: called only if provided, otherwise %ENOTTY will be returned.
+ * Do not call this function directly, call tty_tiocgserial().
+ *
+ * @set_serial: ``int ()(struct tty_struct *tty, struct serial_struct *p)``
+ *
+ * Called when the @tty device receives a %TIOCSSERIAL ioctl. Passed a
+ * kernel structure @p (&struct serial_struct) to set the values from.
+ *
+ * Optional: called only if provided, otherwise %ENOTTY will be returned.
+ * Do not call this function directly, call tty_tiocsserial().
+ *
+ * @show_fdinfo: ``void ()(struct tty_struct *tty, struct seq_file *m)``
+ *
+ * Called when the @tty device file descriptor receives a fdinfo request
+ * from VFS (to show in /proc/<pid>/fdinfo/). @m should be filled with
+ * information.
+ *
+ * Optional: called only if provided, otherwise nothing is written to @m.
+ * Do not call this function directly, call tty_show_fdinfo().
+ *
+ * @poll_init: ``int ()(struct tty_driver *driver, int line, char *options)``
+ *
+ * kgdboc support (Documentation/dev-tools/kgdb.rst). This routine is
+ * called to initialize the HW for later use by calling @poll_get_char or
+ * @poll_put_char.
+ *
+ * Optional: called only if provided, otherwise skipped as a non-polling
+ * driver.
+ *
+ * @poll_get_char: ``int ()(struct tty_driver *driver, int line)``
+ *
+ * kgdboc support (see @poll_init). @driver should read a character from a
+ * tty identified by @line and return it.
+ *
+ * Optional: called only if @poll_init provided.
+ *
+ * @poll_put_char: ``void ()(struct tty_driver *driver, int line, char ch)``
+ *
+ * kgdboc support (see @poll_init). @driver should write character @ch to
+ * a tty identified by @line.
+ *
+ * Optional: called only if @poll_init provided.
+ *
+ * @proc_show: ``int ()(struct seq_file *m, void *driver)``
+ *
+ * Driver @driver (cast to &struct tty_driver) can show additional info in
+ * /proc/tty/driver/<driver_name>. It is enough to fill in the information
+ * into @m.
+ *
+ * Optional: called only if provided, otherwise no /proc entry created.
+ *
+ * This structure defines the interface between the low-level tty driver and
+ * the tty routines. These routines can be defined. Unless noted otherwise,
+ * they are optional, and can be filled in with a %NULL pointer.
*/
-
-#include <linux/export.h>
-#include <linux/fs.h>
-#include <linux/kref.h>
-#include <linux/list.h>
-#include <linux/cdev.h>
-#include <linux/termios.h>
-#include <linux/seq_file.h>
-
-struct tty_struct;
-struct tty_driver;
-struct serial_icounter_struct;
-struct serial_struct;
-
struct tty_operations {
struct tty_struct * (*lookup)(struct tty_driver *driver,
struct file *filp, int idx);
@@ -288,26 +390,64 @@ struct tty_operations {
int (*poll_get_char)(struct tty_driver *driver, int line);
void (*poll_put_char)(struct tty_driver *driver, int line, char ch);
#endif
- int (*proc_show)(struct seq_file *, void *);
+ int (*proc_show)(struct seq_file *m, void *driver);
} __randomize_layout;
+/**
+ * struct tty_driver -- driver for TTY devices
+ *
+ * @magic: set to %TTY_DRIVER_MAGIC in __tty_alloc_driver()
+ * @kref: reference counting. Reaching zero frees all the internals and the
+ * driver.
+ * @cdevs: allocated/registered character /dev devices
+ * @owner: modules owning this driver. Used drivers cannot be rmmod'ed.
+ * Automatically set by tty_alloc_driver().
+ * @driver_name: name of the driver used in /proc/tty
+ * @name: used for constructing /dev node name
+ * @name_base: used as a number base for constructing /dev node name
+ * @major: major /dev device number (zero for autoassignment)
+ * @minor_start: the first minor /dev device number
+ * @num: number of devices allocated
+ * @type: type of tty driver (%TTY_DRIVER_TYPE_)
+ * @subtype: subtype of tty driver (%SYSTEM_TYPE_, %PTY_TYPE_, %SERIAL_TYPE_)
+ * @init_termios: termios to set to each tty initially (e.g. %tty_std_termios)
+ * @flags: tty driver flags (%TTY_DRIVER_)
+ * @proc_entry: proc fs entry, used internally
+ * @other: driver of the linked tty; only used for the PTY driver
+ * @ttys: array of active &struct tty_struct, set by tty_standard_install()
+ * @ports: array of &struct tty_port; can be set during initialization by
+ * tty_port_link_device() and similar
+ * @termios: storage for termios at each TTY close for the next open
+ * @driver_state: pointer to driver's arbitrary data
+ * @ops: driver hooks for TTYs. Set them using tty_set_operations(). Use &struct
+ * tty_port helpers in them as much as possible.
+ * @tty_drivers: used internally to link tty_drivers together
+ *
+ * The usual handling of &struct tty_driver is to allocate it by
+ * tty_alloc_driver(), set up all the necessary members, and register it by
+ * tty_register_driver(). At last, the driver is torn down by calling
+ * tty_unregister_driver() followed by tty_driver_kref_put().
+ *
+ * The fields required to be set before calling tty_register_driver() include
+ * @driver_name, @name, @type, @subtype, @init_termios, and @ops.
+ */
struct tty_driver {
- int magic; /* magic number for this structure */
- struct kref kref; /* Reference management */
+ int magic;
+ struct kref kref;
struct cdev **cdevs;
struct module *owner;
const char *driver_name;
const char *name;
- int name_base; /* offset of printed name */
- int major; /* major device number */
- int minor_start; /* start of minor device number */
- unsigned int num; /* number of devices allocated */
- short type; /* type of tty driver */
- short subtype; /* subtype of tty driver */
- struct ktermios init_termios; /* Initial termios */
- unsigned long flags; /* tty driver flags */
- struct proc_dir_entry *proc_entry; /* /proc fs entry */
- struct tty_driver *other; /* only used for the PTY driver */
+ int name_base;
+ int major;
+ int minor_start;
+ unsigned int num;
+ short type;
+ short subtype;
+ struct ktermios init_termios;
+ unsigned long flags;
+ struct proc_dir_entry *proc_entry;
+ struct tty_driver *other;
/*
* Pointer to the tty data structures
@@ -352,49 +492,53 @@ static inline void tty_set_operations(struct tty_driver *driver,
/* tty driver magic number */
#define TTY_DRIVER_MAGIC 0x5402
-/*
- * tty driver flags
- *
- * TTY_DRIVER_RESET_TERMIOS --- requests the tty layer to reset the
- * termios setting when the last process has closed the device.
- * Used for PTY's, in particular.
- *
- * TTY_DRIVER_REAL_RAW --- if set, indicates that the driver will
- * guarantee never to set any special character handling
- * flags if ((IGNBRK || (!BRKINT && !PARMRK)) && (IGNPAR ||
- * !INPCK)). That is, if there is no reason for the driver to
- * send notifications of parity and break characters up to the
- * line driver, it won't do so. This allows the line driver to
- * optimize for this case if this flag is set. (Note that there
- * is also a promise, if the above case is true, not to signal
- * overruns, either.)
- *
- * TTY_DRIVER_DYNAMIC_DEV --- if set, the individual tty devices need
- * to be registered with a call to tty_register_device() when the
- * device is found in the system and unregistered with a call to
- * tty_unregister_device() so the devices will be show up
- * properly in sysfs. If not set, driver->num entries will be
- * created by the tty core in sysfs when tty_register_driver() is
- * called. This is to be used by drivers that have tty devices
- * that can appear and disappear while the main tty driver is
- * registered with the tty core.
- *
- * TTY_DRIVER_DEVPTS_MEM -- don't use the standard arrays, instead
- * use dynamic memory keyed through the devpts filesystem. This
- * is only applicable to the pty driver.
- *
- * TTY_DRIVER_HARDWARE_BREAK -- hardware handles break signals. Pass
- * the requested timeout to the caller instead of using a simple
- * on/off interface.
- *
- * TTY_DRIVER_DYNAMIC_ALLOC -- do not allocate structures which are
- * needed per line for this driver as it would waste memory.
- * The driver will take care.
- *
- * TTY_DRIVER_UNNUMBERED_NODE -- do not create numbered /dev nodes. In
- * other words create /dev/ttyprintk and not /dev/ttyprintk0.
- * Applicable only when a driver for a single tty device is
- * being allocated.
+/**
+ * DOC: TTY Driver Flags
+ *
+ * TTY_DRIVER_RESET_TERMIOS
+ * Requests the tty layer to reset the termios setting when the last
+ * process has closed the device. Used for PTYs, in particular.
+ *
+ * TTY_DRIVER_REAL_RAW
+ * Indicates that the driver will guarantee not to set any special
+ * character handling flags if this is set for the tty:
+ *
+ * ``(IGNBRK || (!BRKINT && !PARMRK)) && (IGNPAR || !INPCK)``
+ *
+ * That is, if there is no reason for the driver to
+ * send notifications of parity and break characters up to the line
+ * driver, it won't do so. This allows the line driver to optimize for
+ * this case if this flag is set. (Note that there is also a promise, if
+ * the above case is true, not to signal overruns, either.)
+ *
+ * TTY_DRIVER_DYNAMIC_DEV
+ * The individual tty devices need to be registered with a call to
+ * tty_register_device() when the device is found in the system and
+ * unregistered with a call to tty_unregister_device() so the devices will
+ * be show up properly in sysfs. If not set, all &tty_driver.num entries
+ * will be created by the tty core in sysfs when tty_register_driver() is
+ * called. This is to be used by drivers that have tty devices that can
+ * appear and disappear while the main tty driver is registered with the
+ * tty core.
+ *
+ * TTY_DRIVER_DEVPTS_MEM
+ * Don't use the standard arrays (&tty_driver.ttys and
+ * &tty_driver.termios), instead use dynamic memory keyed through the
+ * devpts filesystem. This is only applicable to the PTY driver.
+ *
+ * TTY_DRIVER_HARDWARE_BREAK
+ * Hardware handles break signals. Pass the requested timeout to the
+ * &tty_operations.break_ctl instead of using a simple on/off interface.
+ *
+ * TTY_DRIVER_DYNAMIC_ALLOC
+ * Do not allocate structures which are needed per line for this driver
+ * (&tty_driver.ports) as it would waste memory. The driver will take
+ * care. This is only applicable to the PTY driver.
+ *
+ * TTY_DRIVER_UNNUMBERED_NODE
+ * Do not create numbered ``/dev`` nodes. For example, create
+ * ``/dev/ttyprintk`` and not ``/dev/ttyprintk0``. Applicable only when a
+ * driver for a single tty device is being allocated.
*/
#define TTY_DRIVER_INSTALLED 0x0001
#define TTY_DRIVER_RESET_TERMIOS 0x0002
diff --git a/include/linux/tty_flip.h b/include/linux/tty_flip.h
index 9916acb5de49..483d41cbcbb7 100644
--- a/include/linux/tty_flip.h
+++ b/include/linux/tty_flip.h
@@ -17,7 +17,6 @@ int tty_insert_flip_string_fixed_flag(struct tty_port *port,
int tty_prepare_flip_string(struct tty_port *port, unsigned char **chars,
size_t size);
void tty_flip_buffer_push(struct tty_port *port);
-void tty_schedule_flip(struct tty_port *port);
int __tty_insert_flip_char(struct tty_port *port, unsigned char ch, char flag);
static inline int tty_insert_flip_char(struct tty_port *port,
diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
index b85d84fb5f49..e85002b56752 100644
--- a/include/linux/tty_ldisc.h
+++ b/include/linux/tty_ldisc.h
@@ -4,127 +4,6 @@
struct tty_struct;
-/*
- * This structure defines the interface between the tty line discipline
- * implementation and the tty routines. The following routines can be
- * defined; unless noted otherwise, they are optional, and can be
- * filled in with a null pointer.
- *
- * int (*open)(struct tty_struct *);
- *
- * This function is called when the line discipline is associated
- * with the tty. The line discipline can use this as an
- * opportunity to initialize any state needed by the ldisc routines.
- *
- * void (*close)(struct tty_struct *);
- *
- * This function is called when the line discipline is being
- * shutdown, either because the tty is being closed or because
- * the tty is being changed to use a new line discipline
- *
- * void (*flush_buffer)(struct tty_struct *tty);
- *
- * This function instructs the line discipline to clear its
- * buffers of any input characters it may have queued to be
- * delivered to the user mode process.
- *
- * ssize_t (*read)(struct tty_struct * tty, struct file * file,
- * unsigned char * buf, size_t nr);
- *
- * This function is called when the user requests to read from
- * the tty. The line discipline will return whatever characters
- * it has buffered up for the user. If this function is not
- * defined, the user will receive an EIO error.
- *
- * ssize_t (*write)(struct tty_struct * tty, struct file * file,
- * const unsigned char * buf, size_t nr);
- *
- * This function is called when the user requests to write to the
- * tty. The line discipline will deliver the characters to the
- * low-level tty device for transmission, optionally performing
- * some processing on the characters first. If this function is
- * not defined, the user will receive an EIO error.
- *
- * int (*ioctl)(struct tty_struct * tty, struct file * file,
- * unsigned int cmd, unsigned long arg);
- *
- * This function is called when the user requests an ioctl which
- * is not handled by the tty layer or the low-level tty driver.
- * It is intended for ioctls which affect line discpline
- * operation. Note that the search order for ioctls is (1) tty
- * layer, (2) tty low-level driver, (3) line discpline. So a
- * low-level driver can "grab" an ioctl request before the line
- * discpline has a chance to see it.
- *
- * int (*compat_ioctl)(struct tty_struct * tty, struct file * file,
- * unsigned int cmd, unsigned long arg);
- *
- * Process ioctl calls from 32-bit process on 64-bit system
- *
- * NOTE: only ioctls that are neither "pointer to compatible
- * structure" nor tty-generic. Something private that takes
- * an integer or a pointer to wordsize-sensitive structure
- * belongs here, but most of ldiscs will happily leave
- * it NULL.
- *
- * void (*set_termios)(struct tty_struct *tty, struct ktermios * old);
- *
- * This function notifies the line discpline that a change has
- * been made to the termios structure.
- *
- * int (*poll)(struct tty_struct * tty, struct file * file,
- * poll_table *wait);
- *
- * This function is called when a user attempts to select/poll on a
- * tty device. It is solely the responsibility of the line
- * discipline to handle poll requests.
- *
- * void (*receive_buf)(struct tty_struct *, const unsigned char *cp,
- * char *fp, int count);
- *
- * This function is called by the low-level tty driver to send
- * characters received by the hardware to the line discpline for
- * processing. <cp> is a pointer to the buffer of input
- * character received by the device. <fp> is a pointer to a
- * pointer of flag bytes which indicate whether a character was
- * received with a parity error, etc. <fp> may be NULL to indicate
- * all data received is TTY_NORMAL.
- *
- * void (*write_wakeup)(struct tty_struct *);
- *
- * This function is called by the low-level tty driver to signal
- * that line discpline should try to send more characters to the
- * low-level driver for transmission. If the line discpline does
- * not have any more data to send, it can just return. If the line
- * discipline does have some data to send, please arise a tasklet
- * or workqueue to do the real data transfer. Do not send data in
- * this hook, it may leads to a deadlock.
- *
- * int (*hangup)(struct tty_struct *)
- *
- * Called on a hangup. Tells the discipline that it should
- * cease I/O to the tty driver. Can sleep. The driver should
- * seek to perform this action quickly but should wait until
- * any pending driver I/O is completed.
- *
- * void (*dcd_change)(struct tty_struct *tty, unsigned int status)
- *
- * Tells the discipline that the DCD pin has changed its status.
- * Used exclusively by the N_PPS (Pulse-Per-Second) line discipline.
- *
- * int (*receive_buf2)(struct tty_struct *, const unsigned char *cp,
- * char *fp, int count);
- *
- * This function is called by the low-level tty driver to send
- * characters received by the hardware to the line discpline for
- * processing. <cp> is a pointer to the buffer of input
- * character received by the device. <fp> is a pointer to a
- * pointer of flag bytes which indicate whether a character was
- * received with a parity error, etc. <fp> may be NULL to indicate
- * all data received is TTY_NORMAL.
- * If assigned, prefer this function for automatic flow control.
- */
-
#include <linux/fs.h>
#include <linux/wait.h>
#include <linux/atomic.h>
@@ -176,7 +55,147 @@ int ldsem_down_write_nested(struct ld_semaphore *sem, int subclass,
ldsem_down_write(sem, timeout)
#endif
-
+/**
+ * struct tty_ldisc_ops - ldisc operations
+ *
+ * @name: name of this ldisc rendered in /proc/tty/ldiscs
+ * @num: ``N_*`` number (%N_TTY, %N_HDLC, ...) reserved to this ldisc
+ *
+ * @open: [TTY] ``int ()(struct tty_struct *tty)``
+ *
+ * This function is called when the line discipline is associated with the
+ * @tty. No other call into the line discipline for this tty will occur
+ * until it completes successfully. It should initialize any state needed
+ * by the ldisc, and set @tty->receive_room to the maximum amount of data
+ * the line discipline is willing to accept from the driver with a single
+ * call to @receive_buf(). Returning an error will prevent the ldisc from
+ * being attached.
+ *
+ * Can sleep.
+ *
+ * @close: [TTY] ``void ()(struct tty_struct *tty)``
+ *
+ * This function is called when the line discipline is being shutdown,
+ * either because the @tty is being closed or because the @tty is being
+ * changed to use a new line discipline. At the point of execution no
+ * further users will enter the ldisc code for this tty.
+ *
+ * Can sleep.
+ *
+ * @flush_buffer: [TTY] ``void ()(struct tty_struct *tty)``
+ *
+ * This function instructs the line discipline to clear its buffers of any
+ * input characters it may have queued to be delivered to the user mode
+ * process. It may be called at any point between open and close.
+ *
+ * @read: [TTY] ``ssize_t ()(struct tty_struct *tty, struct file *file,
+ * unsigned char *buf, size_t nr)``
+ *
+ * This function is called when the user requests to read from the @tty.
+ * The line discipline will return whatever characters it has buffered up
+ * for the user. If this function is not defined, the user will receive
+ * an %EIO error. Multiple read calls may occur in parallel and the ldisc
+ * must deal with serialization issues.
+ *
+ * Can sleep.
+ *
+ * @write: [TTY] ``ssize_t ()(struct tty_struct *tty, struct file *file,
+ * const unsigned char *buf, size_t nr)``
+ *
+ * This function is called when the user requests to write to the @tty.
+ * The line discipline will deliver the characters to the low-level tty
+ * device for transmission, optionally performing some processing on the
+ * characters first. If this function is not defined, the user will
+ * receive an %EIO error.
+ *
+ * Can sleep.
+ *
+ * @ioctl: [TTY] ``int ()(struct tty_struct *tty, unsigned int cmd,
+ * unsigned long arg)``
+ *
+ * This function is called when the user requests an ioctl which is not
+ * handled by the tty layer or the low-level tty driver. It is intended
+ * for ioctls which affect line discpline operation. Note that the search
+ * order for ioctls is (1) tty layer, (2) tty low-level driver, (3) line
+ * discpline. So a low-level driver can "grab" an ioctl request before
+ * the line discpline has a chance to see it.
+ *
+ * @compat_ioctl: [TTY] ``int ()(struct tty_struct *tty, unsigned int cmd,
+ * unsigned long arg)``
+ *
+ * Process ioctl calls from 32-bit process on 64-bit system.
+ *
+ * Note that only ioctls that are neither "pointer to compatible
+ * structure" nor tty-generic. Something private that takes an integer or
+ * a pointer to wordsize-sensitive structure belongs here, but most of
+ * ldiscs will happily leave it %NULL.
+ *
+ * @set_termios: [TTY] ``void ()(struct tty_struct *tty, struct ktermios *old)``
+ *
+ * This function notifies the line discpline that a change has been made
+ * to the termios structure.
+ *
+ * @poll: [TTY] ``int ()(struct tty_struct *tty, struct file *file,
+ * struct poll_table_struct *wait)``
+ *
+ * This function is called when a user attempts to select/poll on a @tty
+ * device. It is solely the responsibility of the line discipline to
+ * handle poll requests.
+ *
+ * @hangup: [TTY] ``void ()(struct tty_struct *tty)``
+ *
+ * Called on a hangup. Tells the discipline that it should cease I/O to
+ * the tty driver. The driver should seek to perform this action quickly
+ * but should wait until any pending driver I/O is completed. No further
+ * calls into the ldisc code will occur.
+ *
+ * Can sleep.
+ *
+ * @receive_buf: [DRV] ``void ()(struct tty_struct *tty,
+ * const unsigned char *cp, const char *fp, int count)``
+ *
+ * This function is called by the low-level tty driver to send characters
+ * received by the hardware to the line discpline for processing. @cp is
+ * a pointer to the buffer of input character received by the device. @fp
+ * is a pointer to an array of flag bytes which indicate whether a
+ * character was received with a parity error, etc. @fp may be %NULL to
+ * indicate all data received is %TTY_NORMAL.
+ *
+ * @write_wakeup: [DRV] ``void ()(struct tty_struct *tty)``
+ *
+ * This function is called by the low-level tty driver to signal that line
+ * discpline should try to send more characters to the low-level driver
+ * for transmission. If the line discpline does not have any more data to
+ * send, it can just return. If the line discipline does have some data to
+ * send, please arise a tasklet or workqueue to do the real data transfer.
+ * Do not send data in this hook, it may lead to a deadlock.
+ *
+ * @dcd_change: [DRV] ``void ()(struct tty_struct *tty, unsigned int status)``
+ *
+ * Tells the discipline that the DCD pin has changed its status. Used
+ * exclusively by the %N_PPS (Pulse-Per-Second) line discipline.
+ *
+ * @receive_buf2: [DRV] ``int ()(struct tty_struct *tty,
+ * const unsigned char *cp, const char *fp, int count)``
+ *
+ * This function is called by the low-level tty driver to send characters
+ * received by the hardware to the line discpline for processing. @cp is a
+ * pointer to the buffer of input character received by the device. @fp
+ * is a pointer to an array of flag bytes which indicate whether a
+ * character was received with a parity error, etc. @fp may be %NULL to
+ * indicate all data received is %TTY_NORMAL. If assigned, prefer this
+ * function for automatic flow control.
+ *
+ * @owner: module containting this ldisc (for reference counting)
+ *
+ * This structure defines the interface between the tty line discipline
+ * implementation and the tty routines. The above routines can be defined.
+ * Unless noted otherwise, they are optional, and can be filled in with a %NULL
+ * pointer.
+ *
+ * Hooks marked [TTY] are invoked from the TTY core, the [DRV] ones from the
+ * tty_driver side.
+ */
struct tty_ldisc_ops {
char *name;
int num;
@@ -184,31 +203,31 @@ struct tty_ldisc_ops {
/*
* The following routines are called from above.
*/
- int (*open)(struct tty_struct *);
- void (*close)(struct tty_struct *);
+ int (*open)(struct tty_struct *tty);
+ void (*close)(struct tty_struct *tty);
void (*flush_buffer)(struct tty_struct *tty);
ssize_t (*read)(struct tty_struct *tty, struct file *file,
unsigned char *buf, size_t nr,
void **cookie, unsigned long offset);
ssize_t (*write)(struct tty_struct *tty, struct file *file,
const unsigned char *buf, size_t nr);
- int (*ioctl)(struct tty_struct *tty, struct file *file,
- unsigned int cmd, unsigned long arg);
- int (*compat_ioctl)(struct tty_struct *tty, struct file *file,
- unsigned int cmd, unsigned long arg);
+ int (*ioctl)(struct tty_struct *tty, unsigned int cmd,
+ unsigned long arg);
+ int (*compat_ioctl)(struct tty_struct *tty, unsigned int cmd,
+ unsigned long arg);
void (*set_termios)(struct tty_struct *tty, struct ktermios *old);
- __poll_t (*poll)(struct tty_struct *, struct file *,
- struct poll_table_struct *);
+ __poll_t (*poll)(struct tty_struct *tty, struct file *file,
+ struct poll_table_struct *wait);
void (*hangup)(struct tty_struct *tty);
/*
* The following routines are called from below.
*/
- void (*receive_buf)(struct tty_struct *, const unsigned char *cp,
+ void (*receive_buf)(struct tty_struct *tty, const unsigned char *cp,
const char *fp, int count);
- void (*write_wakeup)(struct tty_struct *);
- void (*dcd_change)(struct tty_struct *, unsigned int);
- int (*receive_buf2)(struct tty_struct *, const unsigned char *cp,
+ void (*write_wakeup)(struct tty_struct *tty);
+ void (*dcd_change)(struct tty_struct *tty, unsigned int status);
+ int (*receive_buf2)(struct tty_struct *tty, const unsigned char *cp,
const char *fp, int count);
struct module *owner;
diff --git a/include/linux/tty_port.h b/include/linux/tty_port.h
index 6e86e9e118b6..d3ea9ed0b98e 100644
--- a/include/linux/tty_port.h
+++ b/include/linux/tty_port.h
@@ -7,37 +7,33 @@
#include <linux/tty_buffer.h>
#include <linux/wait.h>
-/*
- * Port level information. Each device keeps its own port level information
- * so provide a common structure for those ports wanting to use common support
- * routines.
- *
- * The tty port has a different lifetime to the tty so must be kept apart.
- * In addition be careful as tty -> port mappings are valid for the life
- * of the tty object but in many cases port -> tty mappings are valid only
- * until a hangup so don't use the wrong path.
- */
-
struct attribute_group;
struct tty_driver;
struct tty_port;
struct tty_struct;
+/**
+ * struct tty_port_operations -- operations on tty_port
+ * @carrier_raised: return 1 if the carrier is raised on @port
+ * @dtr_rts: raise the DTR line if @raise is nonzero, otherwise lower DTR
+ * @shutdown: called when the last close completes or a hangup finishes IFF the
+ * port was initialized. Do not use to free resources. Turn off the device
+ * only. Called under the port mutex to serialize against @activate and
+ * @shutdown.
+ * @activate: called under the port mutex from tty_port_open(), serialized using
+ * the port mutex. Supposed to turn on the device.
+ *
+ * FIXME: long term getting the tty argument *out* of this would be good
+ * for consoles.
+ *
+ * @destruct: called on the final put of a port. Free resources, possibly incl.
+ * the port itself.
+ */
struct tty_port_operations {
- /* Return 1 if the carrier is raised */
int (*carrier_raised)(struct tty_port *port);
- /* Control the DTR line */
void (*dtr_rts)(struct tty_port *port, int raise);
- /* Called when the last close completes or a hangup finishes
- IFF the port was initialized. Do not use to free resources. Called
- under the port mutex to serialize against activate/shutdowns */
void (*shutdown)(struct tty_port *port);
- /* Called under the port mutex from tty_port_open, serialized using
- the port mutex */
- /* FIXME: long term getting the tty argument *out* of this would be
- good for consoles */
int (*activate)(struct tty_port *port, struct tty_struct *tty);
- /* Called on the final put of a port */
void (*destruct)(struct tty_port *port);
};
@@ -48,30 +44,77 @@ struct tty_port_client_operations {
extern const struct tty_port_client_operations tty_port_default_client_ops;
+/**
+ * struct tty_port -- port level information
+ *
+ * @buf: buffer for this port, locked internally
+ * @tty: back pointer to &struct tty_struct, valid only if the tty is open. Use
+ * tty_port_tty_get() to obtain it (and tty_kref_put() to release).
+ * @itty: internal back pointer to &struct tty_struct. Avoid this. It should be
+ * eliminated in the long term.
+ * @ops: tty port operations (like activate, shutdown), see &struct
+ * tty_port_operations
+ * @client_ops: tty port client operations (like receive_buf, write_wakeup).
+ * By default, tty_port_default_client_ops is used.
+ * @lock: lock protecting @tty
+ * @blocked_open: # of procs waiting for open in tty_port_block_til_ready()
+ * @count: usage count
+ * @open_wait: open waiters queue (waiting e.g. for a carrier)
+ * @delta_msr_wait: modem status change queue (waiting for MSR changes)
+ * @flags: user TTY flags (%ASYNC_)
+ * @iflags: internal flags (%TTY_PORT_)
+ * @console: when set, the port is a console
+ * @mutex: locking, for open, shutdown and other port operations
+ * @buf_mutex: @xmit_buf alloc lock
+ * @xmit_buf: optional xmit buffer used by some drivers
+ * @close_delay: delay in jiffies to wait when closing the port
+ * @closing_wait: delay in jiffies for output to be sent before closing
+ * @drain_delay: set to zero if no pure time based drain is needed else set to
+ * size of fifo
+ * @kref: references counter. Reaching zero calls @ops->destruct() if non-%NULL
+ * or frees the port otherwise.
+ * @client_data: pointer to private data, for @client_ops
+ *
+ * Each device keeps its own port level information. &struct tty_port was
+ * introduced as a common structure for such information. As every TTY device
+ * shall have a backing tty_port structure, every driver can use these members.
+ *
+ * The tty port has a different lifetime to the tty so must be kept apart.
+ * In addition be careful as tty -> port mappings are valid for the life
+ * of the tty object but in many cases port -> tty mappings are valid only
+ * until a hangup so don't use the wrong path.
+ *
+ * Tty port shall be initialized by tty_port_init() and shut down either by
+ * tty_port_destroy() (refcounting not used), or tty_port_put() (refcounting).
+ *
+ * There is a lot of helpers around &struct tty_port too. To name the most
+ * significant ones: tty_port_open(), tty_port_close() (or
+ * tty_port_close_start() and tty_port_close_end() separately if need be), and
+ * tty_port_hangup(). These call @ops->activate() and @ops->shutdown() as
+ * needed.
+ */
struct tty_port {
- struct tty_bufhead buf; /* Locked internally */
- struct tty_struct *tty; /* Back pointer */
- struct tty_struct *itty; /* internal back ptr */
- const struct tty_port_operations *ops; /* Port operations */
- const struct tty_port_client_operations *client_ops; /* Port client operations */
- spinlock_t lock; /* Lock protecting tty field */
- int blocked_open; /* Waiting to open */
- int count; /* Usage count */
- wait_queue_head_t open_wait; /* Open waiters */
- wait_queue_head_t delta_msr_wait; /* Modem status change */
- unsigned long flags; /* User TTY flags ASYNC_ */
- unsigned long iflags; /* Internal flags TTY_PORT_ */
- unsigned char console:1; /* port is a console */
- struct mutex mutex; /* Locking */
- struct mutex buf_mutex; /* Buffer alloc lock */
- unsigned char *xmit_buf; /* Optional buffer */
- unsigned int close_delay; /* Close port delay */
- unsigned int closing_wait; /* Delay for output */
- int drain_delay; /* Set to zero if no pure time
- based drain is needed else
- set to size of fifo */
- struct kref kref; /* Ref counter */
- void *client_data;
+ struct tty_bufhead buf;
+ struct tty_struct *tty;
+ struct tty_struct *itty;
+ const struct tty_port_operations *ops;
+ const struct tty_port_client_operations *client_ops;
+ spinlock_t lock;
+ int blocked_open;
+ int count;
+ wait_queue_head_t open_wait;
+ wait_queue_head_t delta_msr_wait;
+ unsigned long flags;
+ unsigned long iflags;
+ unsigned char console:1;
+ struct mutex mutex;
+ struct mutex buf_mutex;
+ unsigned char *xmit_buf;
+ unsigned int close_delay;
+ unsigned int closing_wait;
+ int drain_delay;
+ struct kref kref;
+ void *client_data;
};
/* tty_port::iflags bits -- use atomic bit ops */
diff --git a/include/linux/uio.h b/include/linux/uio.h
index 6350354f97e9..1198a2bfc9bf 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -7,6 +7,7 @@
#include <linux/kernel.h>
#include <linux/thread_info.h>
+#include <linux/mm_types.h>
#include <uapi/linux/uio.h>
struct page;
@@ -146,6 +147,12 @@ size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i);
size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i);
+static inline size_t copy_folio_to_iter(struct folio *folio, size_t offset,
+ size_t bytes, struct iov_iter *i)
+{
+ return copy_page_to_iter(&folio->page, offset, bytes, i);
+}
+
static __always_inline __must_check
size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
{
@@ -196,7 +203,7 @@ bool copy_from_iter_full_nocache(void *addr, size_t bytes, struct iov_iter *i)
#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
/*
* Note, users like pmem that depend on the stricter semantics of
- * copy_from_iter_flushcache() than copy_from_iter_nocache() must check for
+ * _copy_from_iter_flushcache() than _copy_from_iter_nocache() must check for
* IS_ENABLED(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) before assuming that the
* destination is flushed from the cache on return.
*/
@@ -211,24 +218,6 @@ size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
#define _copy_mc_to_iter _copy_to_iter
#endif
-static __always_inline __must_check
-size_t copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
-{
- if (unlikely(!check_copy_size(addr, bytes, false)))
- return 0;
- else
- return _copy_from_iter_flushcache(addr, bytes, i);
-}
-
-static __always_inline __must_check
-size_t copy_mc_to_iter(void *addr, size_t bytes, struct iov_iter *i)
-{
- if (unlikely(!check_copy_size(addr, bytes, true)))
- return 0;
- else
- return _copy_mc_to_iter(addr, bytes, i);
-}
-
size_t iov_iter_zero(size_t bytes, struct iov_iter *);
unsigned long iov_iter_alignment(const struct iov_iter *i);
unsigned long iov_iter_gap_alignment(const struct iov_iter *i);
diff --git a/include/linux/unaligned/packed_struct.h b/include/linux/unaligned/packed_struct.h
index c0d817de4df2..f4c8eaf4d012 100644
--- a/include/linux/unaligned/packed_struct.h
+++ b/include/linux/unaligned/packed_struct.h
@@ -1,7 +1,7 @@
#ifndef _LINUX_UNALIGNED_PACKED_STRUCT_H
#define _LINUX_UNALIGNED_PACKED_STRUCT_H
-#include <linux/kernel.h>
+#include <linux/types.h>
struct __una_u16 { u16 x; } __packed;
struct __una_u32 { u32 x; } __packed;
diff --git a/include/linux/unicode.h b/include/linux/unicode.h
index 74484d44c755..4d39e6e11a95 100644
--- a/include/linux/unicode.h
+++ b/include/linux/unicode.h
@@ -5,9 +5,52 @@
#include <linux/init.h>
#include <linux/dcache.h>
+struct utf8data;
+struct utf8data_table;
+
+#define UNICODE_MAJ_SHIFT 16
+#define UNICODE_MIN_SHIFT 8
+
+#define UNICODE_AGE(MAJ, MIN, REV) \
+ (((unsigned int)(MAJ) << UNICODE_MAJ_SHIFT) | \
+ ((unsigned int)(MIN) << UNICODE_MIN_SHIFT) | \
+ ((unsigned int)(REV)))
+
+static inline u8 unicode_major(unsigned int age)
+{
+ return (age >> UNICODE_MAJ_SHIFT) & 0xff;
+}
+
+static inline u8 unicode_minor(unsigned int age)
+{
+ return (age >> UNICODE_MIN_SHIFT) & 0xff;
+}
+
+static inline u8 unicode_rev(unsigned int age)
+{
+ return age & 0xff;
+}
+
+/*
+ * Two normalization forms are supported:
+ * 1) NFDI
+ * - Apply unicode normalization form NFD.
+ * - Remove any Default_Ignorable_Code_Point.
+ * 2) NFDICF
+ * - Apply unicode normalization form NFD.
+ * - Remove any Default_Ignorable_Code_Point.
+ * - Apply a full casefold (C + F).
+ */
+enum utf8_normalization {
+ UTF8_NFDI = 0,
+ UTF8_NFDICF,
+ UTF8_NMAX,
+};
+
struct unicode_map {
- const char *charset;
- int version;
+ unsigned int version;
+ const struct utf8data *ntab[UTF8_NMAX];
+ const struct utf8data_table *tables;
};
int utf8_validate(const struct unicode_map *um, const struct qstr *str);
@@ -30,7 +73,7 @@ int utf8_casefold(const struct unicode_map *um, const struct qstr *str,
int utf8_casefold_hash(const struct unicode_map *um, const void *salt,
struct qstr *str);
-struct unicode_map *utf8_load(const char *version);
+struct unicode_map *utf8_load(unsigned int version);
void utf8_unload(struct unicode_map *um);
#endif /* _LINUX_UNICODE_H */
diff --git a/include/linux/usb.h b/include/linux/usb.h
index 7ccaa76a9a96..200b7b79acb5 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -875,15 +875,6 @@ extern struct usb_host_interface *usb_find_alt_setting(
unsigned int iface_num,
unsigned int alt_num);
-#if IS_REACHABLE(CONFIG_USB)
-int usb_for_each_port(void *data, int (*fn)(struct device *, void *));
-#else
-static inline int usb_for_each_port(void *data, int (*fn)(struct device *, void *))
-{
- return 0;
-}
-#endif
-
/* port claiming functions */
int usb_hub_claim_port(struct usb_device *hdev, unsigned port1,
struct usb_dev_state *owner);
diff --git a/include/linux/usb/ch9.h b/include/linux/usb/ch9.h
index 1cffa34740b0..969e7dba6358 100644
--- a/include/linux/usb/ch9.h
+++ b/include/linux/usb/ch9.h
@@ -33,7 +33,6 @@
#ifndef __LINUX_USB_CH9_H
#define __LINUX_USB_CH9_H
-#include <linux/device.h>
#include <uapi/linux/usb/ch9.h>
/* USB 3.2 SuperSpeed Plus phy signaling rate generation and lane count */
@@ -45,6 +44,8 @@ enum usb_ssp_rate {
USB_SSP_GEN_2x2,
};
+struct device;
+
extern const char *usb_ep_type_string(int ep_type);
extern const char *usb_speed_string(enum usb_device_speed speed);
extern enum usb_device_speed usb_get_maximum_speed(struct device *dev);
diff --git a/include/linux/usb/role.h b/include/linux/usb/role.h
index 031f148ab373..b5deafd91f67 100644
--- a/include/linux/usb/role.h
+++ b/include/linux/usb/role.h
@@ -92,6 +92,12 @@ fwnode_usb_role_switch_get(struct fwnode_handle *node)
static inline void usb_role_switch_put(struct usb_role_switch *sw) { }
static inline struct usb_role_switch *
+usb_role_switch_find_by_fwnode(const struct fwnode_handle *fwnode)
+{
+ return NULL;
+}
+
+static inline struct usb_role_switch *
usb_role_switch_register(struct device *parent,
const struct usb_role_switch_desc *desc)
{
diff --git a/include/linux/usb/typec.h b/include/linux/usb/typec.h
index e2e44bb1dad8..7ba45a97eeae 100644
--- a/include/linux/usb/typec.h
+++ b/include/linux/usb/typec.h
@@ -305,16 +305,4 @@ void typec_partner_set_svdm_version(struct typec_partner *partner,
enum usb_pd_svdm_ver svdm_version);
int typec_get_negotiated_svdm_version(struct typec_port *port);
-#if IS_REACHABLE(CONFIG_TYPEC)
-int typec_link_port(struct device *port);
-void typec_unlink_port(struct device *port);
-#else
-static inline int typec_link_port(struct device *port)
-{
- return 0;
-}
-
-static inline void typec_unlink_port(struct device *port) { }
-#endif
-
#endif /* __LINUX_USB_TYPEC_H */
diff --git a/include/linux/vdpa.h b/include/linux/vdpa.h
index c3011ccda430..2de442ececae 100644
--- a/include/linux/vdpa.h
+++ b/include/linux/vdpa.h
@@ -64,6 +64,7 @@ struct vdpa_mgmt_dev;
* struct vdpa_device - representation of a vDPA device
* @dev: underlying device
* @dma_dev: the actual device that is performing DMA
+ * @driver_override: driver name to force a match
* @config: the configuration ops for this device.
* @cf_mutex: Protects get and set access to configuration layout.
* @index: device index
@@ -76,6 +77,7 @@ struct vdpa_mgmt_dev;
struct vdpa_device {
struct device dev;
struct device *dma_dev;
+ const char *driver_override;
const struct vdpa_config_ops *config;
struct mutex cf_mutex; /* Protects get/set config */
unsigned int index;
@@ -99,6 +101,7 @@ struct vdpa_dev_set_config {
struct {
u8 mac[ETH_ALEN];
u16 mtu;
+ u16 max_vq_pairs;
} net;
u64 mask;
};
@@ -155,7 +158,7 @@ struct vdpa_map_file {
* @vdev: vdpa device
* @idx: virtqueue index
* @state: pointer to returned state (last_avail_idx)
- * @get_vq_notification: Get the notification area for a virtqueue
+ * @get_vq_notification: Get the notification area for a virtqueue (optional)
* @vdev: vdpa device
* @idx: virtqueue index
* Returns the notifcation area
@@ -169,14 +172,17 @@ struct vdpa_map_file {
* for the device
* @vdev: vdpa device
* Returns virtqueue algin requirement
- * @get_features: Get virtio features supported by the device
+ * @get_device_features: Get virtio features supported by the device
* @vdev: vdpa device
* Returns the virtio features support by the
* device
- * @set_features: Set virtio features supported by the driver
+ * @set_driver_features: Set virtio features supported by the driver
* @vdev: vdpa device
* @features: feature support by the driver
* Returns integer: success (0) or error (< 0)
+ * @get_driver_features: Get the virtio driver features in action
+ * @vdev: vdpa device
+ * Returns the virtio features accepted
* @set_config_cb: Set the config interrupt callback
* @vdev: vdpa device
* @cb: virtio-vdev interrupt callback structure
@@ -276,8 +282,9 @@ struct vdpa_config_ops {
/* Device ops */
u32 (*get_vq_align)(struct vdpa_device *vdev);
- u64 (*get_features)(struct vdpa_device *vdev);
- int (*set_features)(struct vdpa_device *vdev, u64 features);
+ u64 (*get_device_features)(struct vdpa_device *vdev);
+ int (*set_driver_features)(struct vdpa_device *vdev, u64 features);
+ u64 (*get_driver_features)(struct vdpa_device *vdev);
void (*set_config_cb)(struct vdpa_device *vdev,
struct vdpa_callback *cb);
u16 (*get_vq_num_max)(struct vdpa_device *vdev);
@@ -385,23 +392,37 @@ static inline struct device *vdpa_get_dma_dev(struct vdpa_device *vdev)
static inline int vdpa_reset(struct vdpa_device *vdev)
{
const struct vdpa_config_ops *ops = vdev->config;
+ int ret;
+ mutex_lock(&vdev->cf_mutex);
vdev->features_valid = false;
- return ops->reset(vdev);
+ ret = ops->reset(vdev);
+ mutex_unlock(&vdev->cf_mutex);
+ return ret;
}
-static inline int vdpa_set_features(struct vdpa_device *vdev, u64 features)
+static inline int vdpa_set_features(struct vdpa_device *vdev, u64 features, bool locked)
{
const struct vdpa_config_ops *ops = vdev->config;
+ int ret;
+
+ if (!locked)
+ mutex_lock(&vdev->cf_mutex);
vdev->features_valid = true;
- return ops->set_features(vdev, features);
+ ret = ops->set_driver_features(vdev, features);
+ if (!locked)
+ mutex_unlock(&vdev->cf_mutex);
+
+ return ret;
}
void vdpa_get_config(struct vdpa_device *vdev, unsigned int offset,
void *buf, unsigned int len);
void vdpa_set_config(struct vdpa_device *dev, unsigned int offset,
const void *buf, unsigned int length);
+void vdpa_set_status(struct vdpa_device *vdev, u8 status);
+
/**
* struct vdpa_mgmtdev_ops - vdpa device ops
* @dev_add: Add a vdpa device using alloc and register
@@ -438,6 +459,8 @@ struct vdpa_mgmt_dev {
const struct virtio_device_id *id_table;
u64 config_attr_mask;
struct list_head list;
+ u64 supported_features;
+ u32 max_supported_vqs;
};
int vdpa_mgmtdev_register(struct vdpa_mgmt_dev *mdev);
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index 41edbc01ffa4..72292a62cd90 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -138,6 +138,7 @@ int virtio_finalize_features(struct virtio_device *dev);
int virtio_device_freeze(struct virtio_device *dev);
int virtio_device_restore(struct virtio_device *dev);
#endif
+void virtio_reset_device(struct virtio_device *dev);
size_t virtio_max_dma_size(struct virtio_device *vdev);
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
index a185cc75ff52..7b2363388bfa 100644
--- a/include/linux/vm_event_item.h
+++ b/include/linux/vm_event_item.h
@@ -98,6 +98,9 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
THP_SPLIT_PAGE_FAILED,
THP_DEFERRED_SPLIT_PAGE,
THP_SPLIT_PMD,
+ THP_SCAN_EXCEED_NONE_PTE,
+ THP_SCAN_EXCEED_SWAP_PTE,
+ THP_SCAN_EXCEED_SHARED_PTE,
#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
THP_SPLIT_PUD,
#endif
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 6e022cc712e6..880227b9f044 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -28,6 +28,13 @@ struct notifier_block; /* in notifier.h */
#define VM_MAP_PUT_PAGES 0x00000200 /* put pages and free array in vfree */
#define VM_NO_HUGE_VMAP 0x00000400 /* force PAGE_SIZE pte mapping */
+#if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \
+ !defined(CONFIG_KASAN_VMALLOC)
+#define VM_DEFER_KMEMLEAK 0x00000800 /* defer kmemleak object creation */
+#else
+#define VM_DEFER_KMEMLEAK 0
+#endif
+
/*
* VM_KASAN is used slightly differently depending on CONFIG_KASAN_VMALLOC.
*
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index 3bfd487d1dd2..fec248ab1fec 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -68,6 +68,7 @@ struct writeback_control {
unsigned for_reclaim:1; /* Invoked from the page allocator */
unsigned range_cyclic:1; /* range_start is cyclic */
unsigned for_sync:1; /* sync(2) WB_SYNC_ALL writeback */
+ unsigned unpinned_fscache_wb:1; /* Cleared I_PINNING_FSCACHE_WB */
/*
* When writeback IOs are bounced through async layers, only the
diff --git a/include/linux/xarray.h b/include/linux/xarray.h
index a91e3d90df8a..d6d5da6ed735 100644
--- a/include/linux/xarray.h
+++ b/include/linux/xarray.h
@@ -1581,6 +1581,24 @@ static inline void xas_set(struct xa_state *xas, unsigned long index)
}
/**
+ * xas_advance() - Skip over sibling entries.
+ * @xas: XArray operation state.
+ * @index: Index of last sibling entry.
+ *
+ * Move the operation state to refer to the last sibling entry.
+ * This is useful for loops that normally want to see sibling
+ * entries but sometimes want to skip them. Use xas_set() if you
+ * want to move to an index which is not part of this entry.
+ */
+static inline void xas_advance(struct xa_state *xas, unsigned long index)
+{
+ unsigned char shift = xas_is_node(xas) ? xas->xa_node->shift : 0;
+
+ xas->xa_index = index;
+ xas->xa_offset = (index >> shift) & XA_CHUNK_MASK;
+}
+
+/**
* xas_set_order() - Set up XArray operation state for a multislot entry.
* @xas: XArray operation state.
* @index: Target of the operation.
diff --git a/include/net/9p/9p.h b/include/net/9p/9p.h
index 9c6ec78e47a5..24a509f559ee 100644
--- a/include/net/9p/9p.h
+++ b/include/net/9p/9p.h
@@ -551,6 +551,4 @@ struct p9_fcall {
int p9_errstr2errno(char *errstr, int len);
int p9_error_init(void);
-int p9_trans_fd_init(void);
-void p9_trans_fd_exit(void);
#endif /* NET_9P_H */
diff --git a/include/net/9p/transport.h b/include/net/9p/transport.h
index 15a4e6a9dbf7..ff842f963071 100644
--- a/include/net/9p/transport.h
+++ b/include/net/9p/transport.h
@@ -54,7 +54,7 @@ struct p9_trans_module {
void v9fs_register_trans(struct p9_trans_module *m);
void v9fs_unregister_trans(struct p9_trans_module *m);
-struct p9_trans_module *v9fs_get_trans_by_name(char *s);
+struct p9_trans_module *v9fs_get_trans_by_name(const char *s);
struct p9_trans_module *v9fs_get_default_trans(void);
void v9fs_put_trans(struct p9_trans_module *m);
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index 78ea3e332688..59940e230b78 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -6,6 +6,8 @@
#define RTR_SOLICITATION_INTERVAL (4*HZ)
#define RTR_SOLICITATION_MAX_INTERVAL (3600*HZ) /* 1 hour */
+#define MIN_VALID_LIFETIME (2*3600) /* 2 hours */
+
#define TEMP_VALID_LIFETIME (7*86400)
#define TEMP_PREFERRED_LIFETIME (86400)
#define REGEN_MAX_RETRY (3)
@@ -107,8 +109,6 @@ struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net,
int ipv6_dev_get_saddr(struct net *net, const struct net_device *dev,
const struct in6_addr *daddr, unsigned int srcprefs,
struct in6_addr *saddr);
-int __ipv6_get_lladdr(struct inet6_dev *idev, struct in6_addr *addr,
- u32 banned_flags);
int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr,
u32 banned_flags);
bool inet_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2,
diff --git a/include/net/ax25.h b/include/net/ax25.h
index 526e49589197..8221af1811df 100644
--- a/include/net/ax25.h
+++ b/include/net/ax25.h
@@ -239,6 +239,7 @@ typedef struct ax25_dev {
#if defined(CONFIG_AX25_DAMA_SLAVE) || defined(CONFIG_AX25_DAMA_MASTER)
ax25_dama_info dama;
#endif
+ refcount_t refcount;
} ax25_dev;
typedef struct ax25_cb {
@@ -293,6 +294,17 @@ static __inline__ void ax25_cb_put(ax25_cb *ax25)
}
}
+static inline void ax25_dev_hold(ax25_dev *ax25_dev)
+{
+ refcount_inc(&ax25_dev->refcount);
+}
+
+static inline void ax25_dev_put(ax25_dev *ax25_dev)
+{
+ if (refcount_dec_and_test(&ax25_dev->refcount)) {
+ kfree(ax25_dev);
+ }
+}
static inline __be16 ax25_type_trans(struct sk_buff *skb, struct net_device *dev)
{
skb->dev = dev;
diff --git a/include/net/bond_3ad.h b/include/net/bond_3ad.h
index 38785d48baff..184105d68294 100644
--- a/include/net/bond_3ad.h
+++ b/include/net/bond_3ad.h
@@ -262,7 +262,7 @@ struct ad_system {
struct ad_bond_info {
struct ad_system system; /* 802.3ad system structure */
struct bond_3ad_stats stats;
- u32 agg_select_timer; /* Timer to select aggregator after all adapter's hand shakes */
+ atomic_t agg_select_timer; /* Timer to select aggregator after all adapter's hand shakes */
u16 aggregator_identifier;
};
diff --git a/include/net/bonding.h b/include/net/bonding.h
index f6ae3a4baea4..83cfd2d70247 100644
--- a/include/net/bonding.h
+++ b/include/net/bonding.h
@@ -346,7 +346,7 @@ static inline bool bond_uses_primary(struct bonding *bond)
static inline struct net_device *bond_option_active_slave_get_rcu(struct bonding *bond)
{
- struct slave *slave = rcu_dereference(bond->curr_active_slave);
+ struct slave *slave = rcu_dereference_rtnl(bond->curr_active_slave);
return bond_uses_primary(bond) && slave ? slave->dev : NULL;
}
diff --git a/include/net/dsa.h b/include/net/dsa.h
index 57b3e4e7413b..85a5ba3772f5 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -1187,6 +1187,7 @@ void dsa_unregister_switch(struct dsa_switch *ds);
int dsa_register_switch(struct dsa_switch *ds);
void dsa_switch_shutdown(struct dsa_switch *ds);
struct dsa_switch *dsa_switch_find(int tree_index, int sw_index);
+void dsa_flush_workqueue(void);
#ifdef CONFIG_PM_SLEEP
int dsa_switch_suspend(struct dsa_switch *ds);
int dsa_switch_resume(struct dsa_switch *ds);
diff --git a/include/net/dst_metadata.h b/include/net/dst_metadata.h
index 14efa0ded75d..adab27ba1ecb 100644
--- a/include/net/dst_metadata.h
+++ b/include/net/dst_metadata.h
@@ -123,8 +123,20 @@ static inline struct metadata_dst *tun_dst_unclone(struct sk_buff *skb)
memcpy(&new_md->u.tun_info, &md_dst->u.tun_info,
sizeof(struct ip_tunnel_info) + md_size);
+#ifdef CONFIG_DST_CACHE
+ /* Unclone the dst cache if there is one */
+ if (new_md->u.tun_info.dst_cache.cache) {
+ int ret;
+
+ ret = dst_cache_init(&new_md->u.tun_info.dst_cache, GFP_ATOMIC);
+ if (ret) {
+ metadata_dst_free(new_md);
+ return ERR_PTR(ret);
+ }
+ }
+#endif
+
skb_dst_drop(skb);
- dst_hold(&new_md->dst);
skb_dst_set(skb, &new_md->dst);
return new_md;
}
diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h
index 48cc5795ceda..63540be0fc34 100644
--- a/include/net/inet_frag.h
+++ b/include/net/inet_frag.h
@@ -117,8 +117,15 @@ int fqdir_init(struct fqdir **fqdirp, struct inet_frags *f, struct net *net);
static inline void fqdir_pre_exit(struct fqdir *fqdir)
{
- fqdir->high_thresh = 0; /* prevent creation of new frags */
- fqdir->dead = true;
+ /* Prevent creation of new frags.
+ * Pairs with READ_ONCE() in inet_frag_find().
+ */
+ WRITE_ONCE(fqdir->high_thresh, 0);
+
+ /* Pairs with READ_ONCE() in inet_frag_kill(), ip_expire()
+ * and ip6frag_expire_frag_queue().
+ */
+ WRITE_ONCE(fqdir->dead, true);
}
void fqdir_exit(struct fqdir *fqdir);
diff --git a/include/net/ip.h b/include/net/ip.h
index 81e23a102a0d..b51bae43b0dd 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -525,19 +525,18 @@ static inline void ip_select_ident_segs(struct net *net, struct sk_buff *skb,
{
struct iphdr *iph = ip_hdr(skb);
+ /* We had many attacks based on IPID, use the private
+ * generator as much as we can.
+ */
+ if (sk && inet_sk(sk)->inet_daddr) {
+ iph->id = htons(inet_sk(sk)->inet_id);
+ inet_sk(sk)->inet_id += segs;
+ return;
+ }
if ((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) {
- /* This is only to work around buggy Windows95/2000
- * VJ compression implementations. If the ID field
- * does not change, they drop every other packet in
- * a TCP stream using header compression.
- */
- if (sk && inet_sk(sk)->inet_daddr) {
- iph->id = htons(inet_sk(sk)->inet_id);
- inet_sk(sk)->inet_id += segs;
- } else {
- iph->id = 0;
- }
+ iph->id = 0;
} else {
+ /* Unfortunately we need the big hammer to get a suitable IPID */
__ip_select_ident(net, iph, segs);
}
}
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index a9a4ccc0cdb5..2048bc8748cb 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -190,14 +190,16 @@ struct fib6_info {
u32 fib6_metric;
u8 fib6_protocol;
u8 fib6_type;
+
+ u8 offload;
+ u8 trap;
+ u8 offload_failed;
+
u8 should_flush:1,
dst_nocount:1,
dst_nopolicy:1,
fib6_destroying:1,
- offload:1,
- trap:1,
- offload_failed:1,
- unused:1;
+ unused:4;
struct rcu_head rcu;
struct nexthop *nh;
@@ -282,7 +284,7 @@ static inline bool fib6_get_cookie_safe(const struct fib6_info *f6i,
fn = rcu_dereference(f6i->fib6_node);
if (fn) {
- *cookie = fn->fn_sernum;
+ *cookie = READ_ONCE(fn->fn_sernum);
/* pairs with smp_wmb() in __fib6_update_sernum_upto_root() */
smp_rmb();
status = true;
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 3afcb128e064..92eec13d1693 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -393,17 +393,20 @@ static inline void txopt_put(struct ipv6_txoptions *opt)
kfree_rcu(opt, rcu);
}
+#if IS_ENABLED(CONFIG_IPV6)
struct ip6_flowlabel *__fl6_sock_lookup(struct sock *sk, __be32 label);
extern struct static_key_false_deferred ipv6_flowlabel_exclusive;
static inline struct ip6_flowlabel *fl6_sock_lookup(struct sock *sk,
__be32 label)
{
- if (static_branch_unlikely(&ipv6_flowlabel_exclusive.key))
+ if (static_branch_unlikely(&ipv6_flowlabel_exclusive.key) &&
+ READ_ONCE(sock_net(sk)->ipv6.flowlabel_has_excl))
return __fl6_sock_lookup(sk, label) ? : ERR_PTR(-ENOENT);
return NULL;
}
+#endif
struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions *opt_space,
struct ip6_flowlabel *fl,
diff --git a/include/net/ipv6_frag.h b/include/net/ipv6_frag.h
index 851029ecff13..0a4779175a52 100644
--- a/include/net/ipv6_frag.h
+++ b/include/net/ipv6_frag.h
@@ -67,7 +67,8 @@ ip6frag_expire_frag_queue(struct net *net, struct frag_queue *fq)
struct sk_buff *head;
rcu_read_lock();
- if (fq->q.fqdir->dead)
+ /* Paired with the WRITE_ONCE() in fqdir_pre_exit(). */
+ if (READ_ONCE(fq->q.fqdir->dead))
goto out_rcu_unlock;
spin_lock(&fq->q.lock);
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index 937389e04c8e..87419f7f5421 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -350,7 +350,8 @@ static inline struct neighbour *neigh_create(struct neigh_table *tbl,
return __neigh_create(tbl, pkey, dev, true);
}
void neigh_destroy(struct neighbour *neigh);
-int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb);
+int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb,
+ const bool immediate_ok);
int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, u32 flags,
u32 nlmsg_pid);
void __neigh_set_probe_once(struct neighbour *neigh);
@@ -460,17 +461,24 @@ static inline struct neighbour * neigh_clone(struct neighbour *neigh)
#define neigh_hold(n) refcount_inc(&(n)->refcnt)
-static inline int neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
+static __always_inline int neigh_event_send_probe(struct neighbour *neigh,
+ struct sk_buff *skb,
+ const bool immediate_ok)
{
unsigned long now = jiffies;
-
+
if (READ_ONCE(neigh->used) != now)
WRITE_ONCE(neigh->used, now);
- if (!(neigh->nud_state&(NUD_CONNECTED|NUD_DELAY|NUD_PROBE)))
- return __neigh_event_send(neigh, skb);
+ if (!(neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE)))
+ return __neigh_event_send(neigh, skb, immediate_ok);
return 0;
}
+static inline int neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
+{
+ return neigh_event_send_probe(neigh, skb, true);
+}
+
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
static inline int neigh_hh_bridge(struct hh_cache *hh, struct sk_buff *skb)
{
diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
index a4b550380316..6bd7e5a85ce7 100644
--- a/include/net/netns/ipv6.h
+++ b/include/net/netns/ipv6.h
@@ -77,9 +77,10 @@ struct netns_ipv6 {
spinlock_t fib6_gc_lock;
unsigned int ip6_rt_gc_expire;
unsigned long ip6_rt_last_gc;
+ unsigned char flowlabel_has_excl;
#ifdef CONFIG_IPV6_MULTIPLE_TABLES
- unsigned int fib6_rules_require_fldissect;
bool fib6_has_custom_rules;
+ unsigned int fib6_rules_require_fldissect;
#ifdef CONFIG_IPV6_SUBTREES
unsigned int fib6_routes_require_src;
#endif
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index ebef45e821af..676cb8ea9e15 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -218,8 +218,10 @@ static inline int tcf_exts_init(struct tcf_exts *exts, struct net *net,
#ifdef CONFIG_NET_CLS_ACT
exts->type = 0;
exts->nr_actions = 0;
+ /* Note: we do not own yet a reference on net.
+ * This reference might be taken later from tcf_exts_get_net().
+ */
exts->net = net;
- netns_tracker_alloc(net, &exts->ns_tracker, GFP_KERNEL);
exts->actions = kcalloc(TCA_ACT_MAX_PRIO, sizeof(struct tc_action *),
GFP_KERNEL);
if (!exts->actions)
diff --git a/include/net/route.h b/include/net/route.h
index 4c858dcf1aa8..25404fc2b483 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -370,7 +370,7 @@ static inline struct neighbour *ip_neigh_gw4(struct net_device *dev,
{
struct neighbour *neigh;
- neigh = __ipv4_neigh_lookup_noref(dev, daddr);
+ neigh = __ipv4_neigh_lookup_noref(dev, (__force u32)daddr);
if (unlikely(!neigh))
neigh = __neigh_create(&arp_tbl, &daddr, dev, false);
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index c11dbac5abb2..472843eedbae 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -1244,6 +1244,7 @@ struct psched_ratecfg {
u64 rate_bytes_ps; /* bytes per second */
u32 mult;
u16 overhead;
+ u16 mpu;
u8 linklayer;
u8 shift;
};
@@ -1253,6 +1254,9 @@ static inline u64 psched_l2t_ns(const struct psched_ratecfg *r,
{
len += r->overhead;
+ if (len < r->mpu)
+ len = r->mpu;
+
if (unlikely(r->linklayer == TC_LINKLAYER_ATM))
return ((u64)(DIV_ROUND_UP(len,48)*53) * r->mult) >> r->shift;
@@ -1275,6 +1279,7 @@ static inline void psched_ratecfg_getrate(struct tc_ratespec *res,
res->rate = min_t(u64, r->rate_bytes_ps, ~0U);
res->overhead = r->overhead;
+ res->mpu = r->mpu;
res->linklayer = (r->linklayer & TC_LINKLAYER_MASK);
}
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 44e442bf23f9..b9fc978fb2ca 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -1369,6 +1369,7 @@ static inline bool tcp_checksum_complete(struct sk_buff *skb)
bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb);
+#ifdef CONFIG_INET
void __sk_defer_free_flush(struct sock *sk);
static inline void sk_defer_free_flush(struct sock *sk)
@@ -1377,6 +1378,9 @@ static inline void sk_defer_free_flush(struct sock *sk)
return;
__sk_defer_free_flush(sk);
}
+#else
+static inline void sk_defer_free_flush(struct sock *sk) {}
+#endif
int tcp_filter(struct sock *sk, struct sk_buff *skb);
void tcp_set_state(struct sock *sk, int state);
diff --git a/include/ras/ras_event.h b/include/ras/ras_event.h
index 0bdbc0d17d2f..d0337a41141c 100644
--- a/include/ras/ras_event.h
+++ b/include/ras/ras_event.h
@@ -358,7 +358,6 @@ TRACE_EVENT(aer_event,
EM ( MF_MSG_KERNEL_HIGH_ORDER, "high-order kernel page" ) \
EM ( MF_MSG_SLAB, "kernel slab page" ) \
EM ( MF_MSG_DIFFERENT_COMPOUND, "different compound page after locking" ) \
- EM ( MF_MSG_POISONED_HUGE, "huge page already hardware poisoned" ) \
EM ( MF_MSG_HUGE, "huge page" ) \
EM ( MF_MSG_FREE_HUGE, "free huge page" ) \
EM ( MF_MSG_NON_PMD_HUGE, "non-pmd-sized huge page" ) \
@@ -373,7 +372,6 @@ TRACE_EVENT(aer_event,
EM ( MF_MSG_CLEAN_LRU, "clean LRU page" ) \
EM ( MF_MSG_TRUNCATED_LRU, "already truncated LRU page" ) \
EM ( MF_MSG_BUDDY, "free buddy page" ) \
- EM ( MF_MSG_BUDDY_2ND, "free buddy page (2nd try)" ) \
EM ( MF_MSG_DAX, "dax page" ) \
EM ( MF_MSG_UNSPLIT_THP, "unsplit thp" ) \
EMe ( MF_MSG_UNKNOWN, "unknown page" )
diff --git a/include/rdma/ib_mad.h b/include/rdma/ib_mad.h
index 465b0d0bdaf8..2e3843b761e8 100644
--- a/include/rdma/ib_mad.h
+++ b/include/rdma/ib_mad.h
@@ -276,6 +276,7 @@ enum ib_port_capability_mask2_bits {
IB_PORT_SWITCH_PORT_STATE_TABLE_SUP = 1 << 3,
IB_PORT_LINK_WIDTH_2X_SUP = 1 << 4,
IB_PORT_LINK_SPEED_HDR_SUP = 1 << 5,
+ IB_PORT_LINK_SPEED_NDR_SUP = 1 << 10,
};
#define OPA_CLASS_PORT_INFO_PR_SUPPORT BIT(26)
diff --git a/include/rdma/ib_smi.h b/include/rdma/ib_smi.h
index fdb8633cbaff..fc16b826b2c1 100644
--- a/include/rdma/ib_smi.h
+++ b/include/rdma/ib_smi.h
@@ -144,5 +144,15 @@ ib_get_smp_direction(struct ib_smp *smp)
#define IB_NOTICE_TRAP_DR_NOTICE 0x80
#define IB_NOTICE_TRAP_DR_TRUNC 0x40
-
+/**
+ * ib_init_query_mad - Initialize query MAD.
+ * @mad: MAD to initialize.
+ */
+static inline void ib_init_query_mad(struct ib_smp *mad)
+{
+ mad->base_version = IB_MGMT_BASE_VERSION;
+ mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
+ mad->class_version = 1;
+ mad->method = IB_MGMT_METHOD_GET;
+}
#endif /* IB_SMI_H */
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 6e9ad656ecb7..69d883f7fb41 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -4749,6 +4749,23 @@ static inline u32 rdma_calc_flow_label(u32 lqpn, u32 rqpn)
return (u32)(v & IB_GRH_FLOWLABEL_MASK);
}
+/**
+ * rdma_get_udp_sport - Calculate and set UDP source port based on the flow
+ * label. If flow label is not defined in GRH then
+ * calculate it based on lqpn/rqpn.
+ *
+ * @fl: flow label from GRH
+ * @lqpn: local qp number
+ * @rqpn: remote qp number
+ */
+static inline u16 rdma_get_udp_sport(u32 fl, u32 lqpn, u32 rqpn)
+{
+ if (!fl)
+ fl = rdma_calc_flow_label(lqpn, rqpn);
+
+ return rdma_flow_label_to_udp_sport(fl);
+}
+
const struct ib_port_immutable*
ib_port_immutable_read(struct ib_device *dev, unsigned int port);
#endif /* IB_VERBS_H */
diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h
index 79e4903bd414..698f2032807b 100644
--- a/include/scsi/libsas.h
+++ b/include/scsi/libsas.h
@@ -356,6 +356,7 @@ enum sas_ha_state {
SAS_HA_DRAINING,
SAS_HA_ATA_EH_ACTIVE,
SAS_HA_FROZEN,
+ SAS_HA_RESUMING,
};
struct sas_ha_struct {
@@ -660,6 +661,7 @@ extern int sas_register_ha(struct sas_ha_struct *);
extern int sas_unregister_ha(struct sas_ha_struct *);
extern void sas_prep_resume_ha(struct sas_ha_struct *sas_ha);
extern void sas_resume_ha(struct sas_ha_struct *sas_ha);
+extern void sas_resume_ha_no_sync(struct sas_ha_struct *sas_ha);
extern void sas_suspend_ha(struct sas_ha_struct *sas_ha);
int sas_set_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates);
diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
index 477a800a9543..6794d7322cbd 100644
--- a/include/scsi/scsi_cmnd.h
+++ b/include/scsi/scsi_cmnd.h
@@ -164,7 +164,7 @@ static inline struct scsi_driver *scsi_cmd_to_driver(struct scsi_cmnd *cmd)
{
struct request *rq = scsi_cmd_to_rq(cmd);
- return *(struct scsi_driver **)rq->rq_disk->private_data;
+ return *(struct scsi_driver **)rq->q->disk->private_data;
}
void scsi_done(struct scsi_cmnd *cmd);
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index d1c6fc83b1e3..647c53b26105 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -275,9 +275,9 @@ scmd_printk(const char *, const struct scsi_cmnd *, const char *, ...);
do { \
struct request *__rq = scsi_cmd_to_rq((scmd)); \
\
- if (__rq->rq_disk) \
+ if (__rq->q->disk) \
sdev_dbg((scmd)->device, "[%s] " fmt, \
- __rq->rq_disk->disk_name, ##a); \
+ __rq->q->disk->disk_name, ##a); \
else \
sdev_dbg((scmd)->device, fmt, ##a); \
} while (0)
@@ -415,9 +415,8 @@ extern int scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
int retries, struct scsi_mode_data *data,
struct scsi_sense_hdr *);
extern int scsi_mode_select(struct scsi_device *sdev, int pf, int sp,
- int modepage, unsigned char *buffer, int len,
- int timeout, int retries,
- struct scsi_mode_data *data,
+ unsigned char *buffer, int len, int timeout,
+ int retries, struct scsi_mode_data *data,
struct scsi_sense_hdr *);
extern int scsi_test_unit_ready(struct scsi_device *sdev, int timeout,
int retries, struct scsi_sense_hdr *sshdr);
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index ebe059badba0..72e1a347baa6 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -691,12 +691,6 @@ struct Scsi_Host {
/* ldm bits */
struct device shost_gendev, shost_dev;
- /*
- * The array size 3 provides space for one attribute group defined by
- * the SCSI core, one attribute group defined by the SCSI LLD and one
- * terminating NULL pointer.
- */
- const struct attribute_group *shost_dev_attr_groups[3];
/*
* Points to the transport data (if any) which is allocated
diff --git a/include/scsi/scsi_ioctl.h b/include/scsi/scsi_ioctl.h
index d2cb9aeaf1f1..beac64e38b87 100644
--- a/include/scsi/scsi_ioctl.h
+++ b/include/scsi/scsi_ioctl.h
@@ -45,8 +45,8 @@ typedef struct scsi_fctargaddress {
int scsi_ioctl_block_when_processing_errors(struct scsi_device *sdev,
int cmd, bool ndelay);
-int scsi_ioctl(struct scsi_device *sdev, struct gendisk *disk, fmode_t mode,
- int cmd, void __user *arg);
+int scsi_ioctl(struct scsi_device *sdev, fmode_t mode, int cmd,
+ void __user *arg);
int get_sg_io_hdr(struct sg_io_hdr *hdr, const void __user *argp);
int put_sg_io_hdr(const struct sg_io_hdr *hdr, void __user *argp);
bool scsi_cmd_allowed(unsigned char *cmd, fmode_t mode);
diff --git a/include/scsi/sg.h b/include/scsi/sg.h
index 843cefb8efce..068e35d36557 100644
--- a/include/scsi/sg.h
+++ b/include/scsi/sg.h
@@ -29,10 +29,6 @@
* For utility and test programs see: http://sg.danny.cz/sg/sg3_utils.html
*/
-#ifdef __KERNEL__
-extern int sg_big_buff; /* for sysctl */
-#endif
-
typedef struct sg_iovec /* same structure as used by readv() Linux system */
{ /* call. It defines one scatter-gather element. */
diff --git a/include/sound/cs35l41.h b/include/sound/cs35l41.h
index 1f1e3c6c9be1..bf7f9a9aeba0 100644
--- a/include/sound/cs35l41.h
+++ b/include/sound/cs35l41.h
@@ -10,6 +10,725 @@
#ifndef __CS35L41_H
#define __CS35L41_H
+#include <linux/regmap.h>
+
+#define CS35L41_FIRSTREG 0x00000000
+#define CS35L41_LASTREG 0x03804FE8
+#define CS35L41_DEVID 0x00000000
+#define CS35L41_REVID 0x00000004
+#define CS35L41_FABID 0x00000008
+#define CS35L41_RELID 0x0000000C
+#define CS35L41_OTPID 0x00000010
+#define CS35L41_SFT_RESET 0x00000020
+#define CS35L41_TEST_KEY_CTL 0x00000040
+#define CS35L41_USER_KEY_CTL 0x00000044
+#define CS35L41_OTP_MEM0 0x00000400
+#define CS35L41_OTP_MEM31 0x0000047C
+#define CS35L41_OTP_CTRL0 0x00000500
+#define CS35L41_OTP_CTRL1 0x00000504
+#define CS35L41_OTP_CTRL3 0x00000508
+#define CS35L41_OTP_CTRL4 0x0000050C
+#define CS35L41_OTP_CTRL5 0x00000510
+#define CS35L41_OTP_CTRL6 0x00000514
+#define CS35L41_OTP_CTRL7 0x00000518
+#define CS35L41_OTP_CTRL8 0x0000051C
+#define CS35L41_PWR_CTRL1 0x00002014
+#define CS35L41_PWR_CTRL2 0x00002018
+#define CS35L41_PWR_CTRL3 0x0000201C
+#define CS35L41_CTRL_OVRRIDE 0x00002020
+#define CS35L41_AMP_OUT_MUTE 0x00002024
+#define CS35L41_PROTECT_REL_ERR_IGN 0x00002034
+#define CS35L41_GPIO_PAD_CONTROL 0x0000242C
+#define CS35L41_JTAG_CONTROL 0x00002438
+#define CS35L41_PWRMGT_CTL 0x00002900
+#define CS35L41_WAKESRC_CTL 0x00002904
+#define CS35L41_PWRMGT_STS 0x00002908
+#define CS35L41_PLL_CLK_CTRL 0x00002C04
+#define CS35L41_DSP_CLK_CTRL 0x00002C08
+#define CS35L41_GLOBAL_CLK_CTRL 0x00002C0C
+#define CS35L41_DATA_FS_SEL 0x00002C10
+#define CS35L41_TST_FS_MON0 0x00002D10
+#define CS35L41_MDSYNC_EN 0x00003400
+#define CS35L41_MDSYNC_TX_ID 0x00003408
+#define CS35L41_MDSYNC_PWR_CTRL 0x0000340C
+#define CS35L41_MDSYNC_DATA_TX 0x00003410
+#define CS35L41_MDSYNC_TX_STATUS 0x00003414
+#define CS35L41_MDSYNC_DATA_RX 0x0000341C
+#define CS35L41_MDSYNC_RX_STATUS 0x00003420
+#define CS35L41_MDSYNC_ERR_STATUS 0x00003424
+#define CS35L41_MDSYNC_SYNC_PTE2 0x00003528
+#define CS35L41_MDSYNC_SYNC_PTE3 0x0000352C
+#define CS35L41_MDSYNC_SYNC_MSM_STATUS 0x0000353C
+#define CS35L41_BSTCVRT_VCTRL1 0x00003800
+#define CS35L41_BSTCVRT_VCTRL2 0x00003804
+#define CS35L41_BSTCVRT_PEAK_CUR 0x00003808
+#define CS35L41_BSTCVRT_SFT_RAMP 0x0000380C
+#define CS35L41_BSTCVRT_COEFF 0x00003810
+#define CS35L41_BSTCVRT_SLOPE_LBST 0x00003814
+#define CS35L41_BSTCVRT_SW_FREQ 0x00003818
+#define CS35L41_BSTCVRT_DCM_CTRL 0x0000381C
+#define CS35L41_BSTCVRT_DCM_MODE_FORCE 0x00003820
+#define CS35L41_BSTCVRT_OVERVOLT_CTRL 0x00003830
+#define CS35L41_VI_VOL_POL 0x00004000
+#define CS35L41_VIMON_SPKMON_RESYNC 0x00004100
+#define CS35L41_DTEMP_WARN_THLD 0x00004220
+#define CS35L41_DTEMP_CFG 0x00004224
+#define CS35L41_DTEMP_EN 0x00004308
+#define CS35L41_VPVBST_FS_SEL 0x00004400
+#define CS35L41_SP_ENABLES 0x00004800
+#define CS35L41_SP_RATE_CTRL 0x00004804
+#define CS35L41_SP_FORMAT 0x00004808
+#define CS35L41_SP_HIZ_CTRL 0x0000480C
+#define CS35L41_SP_FRAME_TX_SLOT 0x00004810
+#define CS35L41_SP_FRAME_RX_SLOT 0x00004820
+#define CS35L41_SP_TX_WL 0x00004830
+#define CS35L41_SP_RX_WL 0x00004840
+#define CS35L41_ASP_CONTROL4 0x00004854
+#define CS35L41_DAC_PCM1_SRC 0x00004C00
+#define CS35L41_ASP_TX1_SRC 0x00004C20
+#define CS35L41_ASP_TX2_SRC 0x00004C24
+#define CS35L41_ASP_TX3_SRC 0x00004C28
+#define CS35L41_ASP_TX4_SRC 0x00004C2C
+#define CS35L41_DSP1_RX1_SRC 0x00004C40
+#define CS35L41_DSP1_RX2_SRC 0x00004C44
+#define CS35L41_DSP1_RX3_SRC 0x00004C48
+#define CS35L41_DSP1_RX4_SRC 0x00004C4C
+#define CS35L41_DSP1_RX5_SRC 0x00004C50
+#define CS35L41_DSP1_RX6_SRC 0x00004C54
+#define CS35L41_DSP1_RX7_SRC 0x00004C58
+#define CS35L41_DSP1_RX8_SRC 0x00004C5C
+#define CS35L41_NGATE1_SRC 0x00004C60
+#define CS35L41_NGATE2_SRC 0x00004C64
+#define CS35L41_AMP_DIG_VOL_CTRL 0x00006000
+#define CS35L41_VPBR_CFG 0x00006404
+#define CS35L41_VBBR_CFG 0x00006408
+#define CS35L41_VPBR_STATUS 0x0000640C
+#define CS35L41_VBBR_STATUS 0x00006410
+#define CS35L41_OVERTEMP_CFG 0x00006414
+#define CS35L41_AMP_ERR_VOL 0x00006418
+#define CS35L41_VOL_STATUS_TO_DSP 0x00006450
+#define CS35L41_CLASSH_CFG 0x00006800
+#define CS35L41_WKFET_CFG 0x00006804
+#define CS35L41_NG_CFG 0x00006808
+#define CS35L41_AMP_GAIN_CTRL 0x00006C04
+#define CS35L41_DAC_MSM_CFG 0x00007400
+#define CS35L41_IRQ1_CFG 0x00010000
+#define CS35L41_IRQ1_STATUS 0x00010004
+#define CS35L41_IRQ1_STATUS1 0x00010010
+#define CS35L41_IRQ1_STATUS2 0x00010014
+#define CS35L41_IRQ1_STATUS3 0x00010018
+#define CS35L41_IRQ1_STATUS4 0x0001001C
+#define CS35L41_IRQ1_RAW_STATUS1 0x00010090
+#define CS35L41_IRQ1_RAW_STATUS2 0x00010094
+#define CS35L41_IRQ1_RAW_STATUS3 0x00010098
+#define CS35L41_IRQ1_RAW_STATUS4 0x0001009C
+#define CS35L41_IRQ1_MASK1 0x00010110
+#define CS35L41_IRQ1_MASK2 0x00010114
+#define CS35L41_IRQ1_MASK3 0x00010118
+#define CS35L41_IRQ1_MASK4 0x0001011C
+#define CS35L41_IRQ1_FRC1 0x00010190
+#define CS35L41_IRQ1_FRC2 0x00010194
+#define CS35L41_IRQ1_FRC3 0x00010198
+#define CS35L41_IRQ1_FRC4 0x0001019C
+#define CS35L41_IRQ1_EDGE1 0x00010210
+#define CS35L41_IRQ1_EDGE4 0x0001021C
+#define CS35L41_IRQ1_POL1 0x00010290
+#define CS35L41_IRQ1_POL2 0x00010294
+#define CS35L41_IRQ1_POL3 0x00010298
+#define CS35L41_IRQ1_POL4 0x0001029C
+#define CS35L41_IRQ1_DB3 0x00010318
+#define CS35L41_IRQ2_CFG 0x00010800
+#define CS35L41_IRQ2_STATUS 0x00010804
+#define CS35L41_IRQ2_STATUS1 0x00010810
+#define CS35L41_IRQ2_STATUS2 0x00010814
+#define CS35L41_IRQ2_STATUS3 0x00010818
+#define CS35L41_IRQ2_STATUS4 0x0001081C
+#define CS35L41_IRQ2_RAW_STATUS1 0x00010890
+#define CS35L41_IRQ2_RAW_STATUS2 0x00010894
+#define CS35L41_IRQ2_RAW_STATUS3 0x00010898
+#define CS35L41_IRQ2_RAW_STATUS4 0x0001089C
+#define CS35L41_IRQ2_MASK1 0x00010910
+#define CS35L41_IRQ2_MASK2 0x00010914
+#define CS35L41_IRQ2_MASK3 0x00010918
+#define CS35L41_IRQ2_MASK4 0x0001091C
+#define CS35L41_IRQ2_FRC1 0x00010990
+#define CS35L41_IRQ2_FRC2 0x00010994
+#define CS35L41_IRQ2_FRC3 0x00010998
+#define CS35L41_IRQ2_FRC4 0x0001099C
+#define CS35L41_IRQ2_EDGE1 0x00010A10
+#define CS35L41_IRQ2_EDGE4 0x00010A1C
+#define CS35L41_IRQ2_POL1 0x00010A90
+#define CS35L41_IRQ2_POL2 0x00010A94
+#define CS35L41_IRQ2_POL3 0x00010A98
+#define CS35L41_IRQ2_POL4 0x00010A9C
+#define CS35L41_IRQ2_DB3 0x00010B18
+#define CS35L41_GPIO_STATUS1 0x00011000
+#define CS35L41_GPIO1_CTRL1 0x00011008
+#define CS35L41_GPIO2_CTRL1 0x0001100C
+#define CS35L41_MIXER_NGATE_CFG 0x00012000
+#define CS35L41_MIXER_NGATE_CH1_CFG 0x00012004
+#define CS35L41_MIXER_NGATE_CH2_CFG 0x00012008
+#define CS35L41_DSP_MBOX_1 0x00013000
+#define CS35L41_DSP_MBOX_2 0x00013004
+#define CS35L41_DSP_MBOX_3 0x00013008
+#define CS35L41_DSP_MBOX_4 0x0001300C
+#define CS35L41_DSP_MBOX_5 0x00013010
+#define CS35L41_DSP_MBOX_6 0x00013014
+#define CS35L41_DSP_MBOX_7 0x00013018
+#define CS35L41_DSP_MBOX_8 0x0001301C
+#define CS35L41_DSP_VIRT1_MBOX_1 0x00013020
+#define CS35L41_DSP_VIRT1_MBOX_2 0x00013024
+#define CS35L41_DSP_VIRT1_MBOX_3 0x00013028
+#define CS35L41_DSP_VIRT1_MBOX_4 0x0001302C
+#define CS35L41_DSP_VIRT1_MBOX_5 0x00013030
+#define CS35L41_DSP_VIRT1_MBOX_6 0x00013034
+#define CS35L41_DSP_VIRT1_MBOX_7 0x00013038
+#define CS35L41_DSP_VIRT1_MBOX_8 0x0001303C
+#define CS35L41_DSP_VIRT2_MBOX_1 0x00013040
+#define CS35L41_DSP_VIRT2_MBOX_2 0x00013044
+#define CS35L41_DSP_VIRT2_MBOX_3 0x00013048
+#define CS35L41_DSP_VIRT2_MBOX_4 0x0001304C
+#define CS35L41_DSP_VIRT2_MBOX_5 0x00013050
+#define CS35L41_DSP_VIRT2_MBOX_6 0x00013054
+#define CS35L41_DSP_VIRT2_MBOX_7 0x00013058
+#define CS35L41_DSP_VIRT2_MBOX_8 0x0001305C
+#define CS35L41_CLOCK_DETECT_1 0x00014000
+#define CS35L41_TIMER1_CONTROL 0x00015000
+#define CS35L41_TIMER1_COUNT_PRESET 0x00015004
+#define CS35L41_TIMER1_START_STOP 0x0001500C
+#define CS35L41_TIMER1_STATUS 0x00015010
+#define CS35L41_TIMER1_COUNT_READBACK 0x00015014
+#define CS35L41_TIMER1_DSP_CLK_CFG 0x00015018
+#define CS35L41_TIMER1_DSP_CLK_STATUS 0x0001501C
+#define CS35L41_TIMER2_CONTROL 0x00015100
+#define CS35L41_TIMER2_COUNT_PRESET 0x00015104
+#define CS35L41_TIMER2_START_STOP 0x0001510C
+#define CS35L41_TIMER2_STATUS 0x00015110
+#define CS35L41_TIMER2_COUNT_READBACK 0x00015114
+#define CS35L41_TIMER2_DSP_CLK_CFG 0x00015118
+#define CS35L41_TIMER2_DSP_CLK_STATUS 0x0001511C
+#define CS35L41_DFT_JTAG_CONTROL 0x00016000
+#define CS35L41_DIE_STS1 0x00017040
+#define CS35L41_DIE_STS2 0x00017044
+#define CS35L41_TEMP_CAL1 0x00017048
+#define CS35L41_TEMP_CAL2 0x0001704C
+#define CS35L41_DSP1_XMEM_PACK_0 0x02000000
+#define CS35L41_DSP1_XMEM_PACK_3068 0x02002FF0
+#define CS35L41_DSP1_XMEM_UNPACK32_0 0x02400000
+#define CS35L41_DSP1_XMEM_UNPACK32_2046 0x02401FF8
+#define CS35L41_DSP1_TIMESTAMP_COUNT 0x025C0800
+#define CS35L41_DSP1_SYS_ID 0x025E0000
+#define CS35L41_DSP1_SYS_VERSION 0x025E0004
+#define CS35L41_DSP1_SYS_CORE_ID 0x025E0008
+#define CS35L41_DSP1_SYS_AHB_ADDR 0x025E000C
+#define CS35L41_DSP1_SYS_XSRAM_SIZE 0x025E0010
+#define CS35L41_DSP1_SYS_YSRAM_SIZE 0x025E0018
+#define CS35L41_DSP1_SYS_PSRAM_SIZE 0x025E0020
+#define CS35L41_DSP1_SYS_PM_BOOT_SIZE 0x025E0028
+#define CS35L41_DSP1_SYS_FEATURES 0x025E002C
+#define CS35L41_DSP1_SYS_FIR_FILTERS 0x025E0030
+#define CS35L41_DSP1_SYS_LMS_FILTERS 0x025E0034
+#define CS35L41_DSP1_SYS_XM_BANK_SIZE 0x025E0038
+#define CS35L41_DSP1_SYS_YM_BANK_SIZE 0x025E003C
+#define CS35L41_DSP1_SYS_PM_BANK_SIZE 0x025E0040
+#define CS35L41_DSP1_AHBM_WIN0_CTRL0 0x025E2000
+#define CS35L41_DSP1_AHBM_WIN0_CTRL1 0x025E2004
+#define CS35L41_DSP1_AHBM_WIN1_CTRL0 0x025E2008
+#define CS35L41_DSP1_AHBM_WIN1_CTRL1 0x025E200C
+#define CS35L41_DSP1_AHBM_WIN2_CTRL0 0x025E2010
+#define CS35L41_DSP1_AHBM_WIN2_CTRL1 0x025E2014
+#define CS35L41_DSP1_AHBM_WIN3_CTRL0 0x025E2018
+#define CS35L41_DSP1_AHBM_WIN3_CTRL1 0x025E201C
+#define CS35L41_DSP1_AHBM_WIN4_CTRL0 0x025E2020
+#define CS35L41_DSP1_AHBM_WIN4_CTRL1 0x025E2024
+#define CS35L41_DSP1_AHBM_WIN5_CTRL0 0x025E2028
+#define CS35L41_DSP1_AHBM_WIN5_CTRL1 0x025E202C
+#define CS35L41_DSP1_AHBM_WIN6_CTRL0 0x025E2030
+#define CS35L41_DSP1_AHBM_WIN6_CTRL1 0x025E2034
+#define CS35L41_DSP1_AHBM_WIN7_CTRL0 0x025E2038
+#define CS35L41_DSP1_AHBM_WIN7_CTRL1 0x025E203C
+#define CS35L41_DSP1_AHBM_WIN_DBG_CTRL0 0x025E2040
+#define CS35L41_DSP1_AHBM_WIN_DBG_CTRL1 0x025E2044
+#define CS35L41_DSP1_XMEM_UNPACK24_0 0x02800000
+#define CS35L41_DSP1_XMEM_UNPACK24_4093 0x02803FF4
+#define CS35L41_DSP1_CTRL_BASE 0x02B80000
+#define CS35L41_DSP1_CORE_SOFT_RESET 0x02B80010
+#define CS35L41_DSP1_DEBUG 0x02B80040
+#define CS35L41_DSP1_TIMER_CTRL 0x02B80048
+#define CS35L41_DSP1_STREAM_ARB_CTRL 0x02B80050
+#define CS35L41_DSP1_RX1_RATE 0x02B80080
+#define CS35L41_DSP1_RX2_RATE 0x02B80088
+#define CS35L41_DSP1_RX3_RATE 0x02B80090
+#define CS35L41_DSP1_RX4_RATE 0x02B80098
+#define CS35L41_DSP1_RX5_RATE 0x02B800A0
+#define CS35L41_DSP1_RX6_RATE 0x02B800A8
+#define CS35L41_DSP1_RX7_RATE 0x02B800B0
+#define CS35L41_DSP1_RX8_RATE 0x02B800B8
+#define CS35L41_DSP1_TX1_RATE 0x02B80280
+#define CS35L41_DSP1_TX2_RATE 0x02B80288
+#define CS35L41_DSP1_TX3_RATE 0x02B80290
+#define CS35L41_DSP1_TX4_RATE 0x02B80298
+#define CS35L41_DSP1_TX5_RATE 0x02B802A0
+#define CS35L41_DSP1_TX6_RATE 0x02B802A8
+#define CS35L41_DSP1_TX7_RATE 0x02B802B0
+#define CS35L41_DSP1_TX8_RATE 0x02B802B8
+#define CS35L41_DSP1_NMI_CTRL1 0x02B80480
+#define CS35L41_DSP1_NMI_CTRL2 0x02B80488
+#define CS35L41_DSP1_NMI_CTRL3 0x02B80490
+#define CS35L41_DSP1_NMI_CTRL4 0x02B80498
+#define CS35L41_DSP1_NMI_CTRL5 0x02B804A0
+#define CS35L41_DSP1_NMI_CTRL6 0x02B804A8
+#define CS35L41_DSP1_NMI_CTRL7 0x02B804B0
+#define CS35L41_DSP1_NMI_CTRL8 0x02B804B8
+#define CS35L41_DSP1_RESUME_CTRL 0x02B80500
+#define CS35L41_DSP1_IRQ1_CTRL 0x02B80508
+#define CS35L41_DSP1_IRQ2_CTRL 0x02B80510
+#define CS35L41_DSP1_IRQ3_CTRL 0x02B80518
+#define CS35L41_DSP1_IRQ4_CTRL 0x02B80520
+#define CS35L41_DSP1_IRQ5_CTRL 0x02B80528
+#define CS35L41_DSP1_IRQ6_CTRL 0x02B80530
+#define CS35L41_DSP1_IRQ7_CTRL 0x02B80538
+#define CS35L41_DSP1_IRQ8_CTRL 0x02B80540
+#define CS35L41_DSP1_IRQ9_CTRL 0x02B80548
+#define CS35L41_DSP1_IRQ10_CTRL 0x02B80550
+#define CS35L41_DSP1_IRQ11_CTRL 0x02B80558
+#define CS35L41_DSP1_IRQ12_CTRL 0x02B80560
+#define CS35L41_DSP1_IRQ13_CTRL 0x02B80568
+#define CS35L41_DSP1_IRQ14_CTRL 0x02B80570
+#define CS35L41_DSP1_IRQ15_CTRL 0x02B80578
+#define CS35L41_DSP1_IRQ16_CTRL 0x02B80580
+#define CS35L41_DSP1_IRQ17_CTRL 0x02B80588
+#define CS35L41_DSP1_IRQ18_CTRL 0x02B80590
+#define CS35L41_DSP1_IRQ19_CTRL 0x02B80598
+#define CS35L41_DSP1_IRQ20_CTRL 0x02B805A0
+#define CS35L41_DSP1_IRQ21_CTRL 0x02B805A8
+#define CS35L41_DSP1_IRQ22_CTRL 0x02B805B0
+#define CS35L41_DSP1_IRQ23_CTRL 0x02B805B8
+#define CS35L41_DSP1_SCRATCH1 0x02B805C0
+#define CS35L41_DSP1_SCRATCH2 0x02B805C8
+#define CS35L41_DSP1_SCRATCH3 0x02B805D0
+#define CS35L41_DSP1_SCRATCH4 0x02B805D8
+#define CS35L41_DSP1_CCM_CORE_CTRL 0x02BC1000
+#define CS35L41_DSP1_CCM_CLK_OVERRIDE 0x02BC1008
+#define CS35L41_DSP1_XM_MSTR_EN 0x02BC2000
+#define CS35L41_DSP1_XM_CORE_PRI 0x02BC2008
+#define CS35L41_DSP1_XM_AHB_PACK_PL_PRI 0x02BC2010
+#define CS35L41_DSP1_XM_AHB_UP_PL_PRI 0x02BC2018
+#define CS35L41_DSP1_XM_ACCEL_PL0_PRI 0x02BC2020
+#define CS35L41_DSP1_XM_NPL0_PRI 0x02BC2078
+#define CS35L41_DSP1_YM_MSTR_EN 0x02BC20C0
+#define CS35L41_DSP1_YM_CORE_PRI 0x02BC20C8
+#define CS35L41_DSP1_YM_AHB_PACK_PL_PRI 0x02BC20D0
+#define CS35L41_DSP1_YM_AHB_UP_PL_PRI 0x02BC20D8
+#define CS35L41_DSP1_YM_ACCEL_PL0_PRI 0x02BC20E0
+#define CS35L41_DSP1_YM_NPL0_PRI 0x02BC2138
+#define CS35L41_DSP1_PM_MSTR_EN 0x02BC2180
+#define CS35L41_DSP1_PM_PATCH0_ADDR 0x02BC2188
+#define CS35L41_DSP1_PM_PATCH0_EN 0x02BC218C
+#define CS35L41_DSP1_PM_PATCH0_DATA_LO 0x02BC2190
+#define CS35L41_DSP1_PM_PATCH0_DATA_HI 0x02BC2194
+#define CS35L41_DSP1_PM_PATCH1_ADDR 0x02BC2198
+#define CS35L41_DSP1_PM_PATCH1_EN 0x02BC219C
+#define CS35L41_DSP1_PM_PATCH1_DATA_LO 0x02BC21A0
+#define CS35L41_DSP1_PM_PATCH1_DATA_HI 0x02BC21A4
+#define CS35L41_DSP1_PM_PATCH2_ADDR 0x02BC21A8
+#define CS35L41_DSP1_PM_PATCH2_EN 0x02BC21AC
+#define CS35L41_DSP1_PM_PATCH2_DATA_LO 0x02BC21B0
+#define CS35L41_DSP1_PM_PATCH2_DATA_HI 0x02BC21B4
+#define CS35L41_DSP1_PM_PATCH3_ADDR 0x02BC21B8
+#define CS35L41_DSP1_PM_PATCH3_EN 0x02BC21BC
+#define CS35L41_DSP1_PM_PATCH3_DATA_LO 0x02BC21C0
+#define CS35L41_DSP1_PM_PATCH3_DATA_HI 0x02BC21C4
+#define CS35L41_DSP1_PM_PATCH4_ADDR 0x02BC21C8
+#define CS35L41_DSP1_PM_PATCH4_EN 0x02BC21CC
+#define CS35L41_DSP1_PM_PATCH4_DATA_LO 0x02BC21D0
+#define CS35L41_DSP1_PM_PATCH4_DATA_HI 0x02BC21D4
+#define CS35L41_DSP1_PM_PATCH5_ADDR 0x02BC21D8
+#define CS35L41_DSP1_PM_PATCH5_EN 0x02BC21DC
+#define CS35L41_DSP1_PM_PATCH5_DATA_LO 0x02BC21E0
+#define CS35L41_DSP1_PM_PATCH5_DATA_HI 0x02BC21E4
+#define CS35L41_DSP1_PM_PATCH6_ADDR 0x02BC21E8
+#define CS35L41_DSP1_PM_PATCH6_EN 0x02BC21EC
+#define CS35L41_DSP1_PM_PATCH6_DATA_LO 0x02BC21F0
+#define CS35L41_DSP1_PM_PATCH6_DATA_HI 0x02BC21F4
+#define CS35L41_DSP1_PM_PATCH7_ADDR 0x02BC21F8
+#define CS35L41_DSP1_PM_PATCH7_EN 0x02BC21FC
+#define CS35L41_DSP1_PM_PATCH7_DATA_LO 0x02BC2200
+#define CS35L41_DSP1_PM_PATCH7_DATA_HI 0x02BC2204
+#define CS35L41_DSP1_MPU_XM_ACCESS0 0x02BC3000
+#define CS35L41_DSP1_MPU_YM_ACCESS0 0x02BC3004
+#define CS35L41_DSP1_MPU_WNDW_ACCESS0 0x02BC3008
+#define CS35L41_DSP1_MPU_XREG_ACCESS0 0x02BC300C
+#define CS35L41_DSP1_MPU_YREG_ACCESS0 0x02BC3014
+#define CS35L41_DSP1_MPU_XM_ACCESS1 0x02BC3018
+#define CS35L41_DSP1_MPU_YM_ACCESS1 0x02BC301C
+#define CS35L41_DSP1_MPU_WNDW_ACCESS1 0x02BC3020
+#define CS35L41_DSP1_MPU_XREG_ACCESS1 0x02BC3024
+#define CS35L41_DSP1_MPU_YREG_ACCESS1 0x02BC302C
+#define CS35L41_DSP1_MPU_XM_ACCESS2 0x02BC3030
+#define CS35L41_DSP1_MPU_YM_ACCESS2 0x02BC3034
+#define CS35L41_DSP1_MPU_WNDW_ACCESS2 0x02BC3038
+#define CS35L41_DSP1_MPU_XREG_ACCESS2 0x02BC303C
+#define CS35L41_DSP1_MPU_YREG_ACCESS2 0x02BC3044
+#define CS35L41_DSP1_MPU_XM_ACCESS3 0x02BC3048
+#define CS35L41_DSP1_MPU_YM_ACCESS3 0x02BC304C
+#define CS35L41_DSP1_MPU_WNDW_ACCESS3 0x02BC3050
+#define CS35L41_DSP1_MPU_XREG_ACCESS3 0x02BC3054
+#define CS35L41_DSP1_MPU_YREG_ACCESS3 0x02BC305C
+#define CS35L41_DSP1_MPU_XM_VIO_ADDR 0x02BC3100
+#define CS35L41_DSP1_MPU_XM_VIO_STATUS 0x02BC3104
+#define CS35L41_DSP1_MPU_YM_VIO_ADDR 0x02BC3108
+#define CS35L41_DSP1_MPU_YM_VIO_STATUS 0x02BC310C
+#define CS35L41_DSP1_MPU_PM_VIO_ADDR 0x02BC3110
+#define CS35L41_DSP1_MPU_PM_VIO_STATUS 0x02BC3114
+#define CS35L41_DSP1_MPU_LOCK_CONFIG 0x02BC3140
+#define CS35L41_DSP1_MPU_WDT_RST_CTRL 0x02BC3180
+#define CS35L41_DSP1_STRMARB_MSTR0_CFG0 0x02BC5000
+#define CS35L41_DSP1_STRMARB_MSTR0_CFG1 0x02BC5004
+#define CS35L41_DSP1_STRMARB_MSTR0_CFG2 0x02BC5008
+#define CS35L41_DSP1_STRMARB_MSTR1_CFG0 0x02BC5010
+#define CS35L41_DSP1_STRMARB_MSTR1_CFG1 0x02BC5014
+#define CS35L41_DSP1_STRMARB_MSTR1_CFG2 0x02BC5018
+#define CS35L41_DSP1_STRMARB_MSTR2_CFG0 0x02BC5020
+#define CS35L41_DSP1_STRMARB_MSTR2_CFG1 0x02BC5024
+#define CS35L41_DSP1_STRMARB_MSTR2_CFG2 0x02BC5028
+#define CS35L41_DSP1_STRMARB_MSTR3_CFG0 0x02BC5030
+#define CS35L41_DSP1_STRMARB_MSTR3_CFG1 0x02BC5034
+#define CS35L41_DSP1_STRMARB_MSTR3_CFG2 0x02BC5038
+#define CS35L41_DSP1_STRMARB_MSTR4_CFG0 0x02BC5040
+#define CS35L41_DSP1_STRMARB_MSTR4_CFG1 0x02BC5044
+#define CS35L41_DSP1_STRMARB_MSTR4_CFG2 0x02BC5048
+#define CS35L41_DSP1_STRMARB_MSTR5_CFG0 0x02BC5050
+#define CS35L41_DSP1_STRMARB_MSTR5_CFG1 0x02BC5054
+#define CS35L41_DSP1_STRMARB_MSTR5_CFG2 0x02BC5058
+#define CS35L41_DSP1_STRMARB_MSTR6_CFG0 0x02BC5060
+#define CS35L41_DSP1_STRMARB_MSTR6_CFG1 0x02BC5064
+#define CS35L41_DSP1_STRMARB_MSTR6_CFG2 0x02BC5068
+#define CS35L41_DSP1_STRMARB_MSTR7_CFG0 0x02BC5070
+#define CS35L41_DSP1_STRMARB_MSTR7_CFG1 0x02BC5074
+#define CS35L41_DSP1_STRMARB_MSTR7_CFG2 0x02BC5078
+#define CS35L41_DSP1_STRMARB_TX0_CFG0 0x02BC5200
+#define CS35L41_DSP1_STRMARB_TX0_CFG1 0x02BC5204
+#define CS35L41_DSP1_STRMARB_TX1_CFG0 0x02BC5208
+#define CS35L41_DSP1_STRMARB_TX1_CFG1 0x02BC520C
+#define CS35L41_DSP1_STRMARB_TX2_CFG0 0x02BC5210
+#define CS35L41_DSP1_STRMARB_TX2_CFG1 0x02BC5214
+#define CS35L41_DSP1_STRMARB_TX3_CFG0 0x02BC5218
+#define CS35L41_DSP1_STRMARB_TX3_CFG1 0x02BC521C
+#define CS35L41_DSP1_STRMARB_TX4_CFG0 0x02BC5220
+#define CS35L41_DSP1_STRMARB_TX4_CFG1 0x02BC5224
+#define CS35L41_DSP1_STRMARB_TX5_CFG0 0x02BC5228
+#define CS35L41_DSP1_STRMARB_TX5_CFG1 0x02BC522C
+#define CS35L41_DSP1_STRMARB_TX6_CFG0 0x02BC5230
+#define CS35L41_DSP1_STRMARB_TX6_CFG1 0x02BC5234
+#define CS35L41_DSP1_STRMARB_TX7_CFG0 0x02BC5238
+#define CS35L41_DSP1_STRMARB_TX7_CFG1 0x02BC523C
+#define CS35L41_DSP1_STRMARB_RX0_CFG0 0x02BC5400
+#define CS35L41_DSP1_STRMARB_RX0_CFG1 0x02BC5404
+#define CS35L41_DSP1_STRMARB_RX1_CFG0 0x02BC5408
+#define CS35L41_DSP1_STRMARB_RX1_CFG1 0x02BC540C
+#define CS35L41_DSP1_STRMARB_RX2_CFG0 0x02BC5410
+#define CS35L41_DSP1_STRMARB_RX2_CFG1 0x02BC5414
+#define CS35L41_DSP1_STRMARB_RX3_CFG0 0x02BC5418
+#define CS35L41_DSP1_STRMARB_RX3_CFG1 0x02BC541C
+#define CS35L41_DSP1_STRMARB_RX4_CFG0 0x02BC5420
+#define CS35L41_DSP1_STRMARB_RX4_CFG1 0x02BC5424
+#define CS35L41_DSP1_STRMARB_RX5_CFG0 0x02BC5428
+#define CS35L41_DSP1_STRMARB_RX5_CFG1 0x02BC542C
+#define CS35L41_DSP1_STRMARB_RX6_CFG0 0x02BC5430
+#define CS35L41_DSP1_STRMARB_RX6_CFG1 0x02BC5434
+#define CS35L41_DSP1_STRMARB_RX7_CFG0 0x02BC5438
+#define CS35L41_DSP1_STRMARB_RX7_CFG1 0x02BC543C
+#define CS35L41_DSP1_STRMARB_IRQ0_CFG0 0x02BC5600
+#define CS35L41_DSP1_STRMARB_IRQ0_CFG1 0x02BC5604
+#define CS35L41_DSP1_STRMARB_IRQ0_CFG2 0x02BC5608
+#define CS35L41_DSP1_STRMARB_IRQ1_CFG0 0x02BC5610
+#define CS35L41_DSP1_STRMARB_IRQ1_CFG1 0x02BC5614
+#define CS35L41_DSP1_STRMARB_IRQ1_CFG2 0x02BC5618
+#define CS35L41_DSP1_STRMARB_IRQ2_CFG0 0x02BC5620
+#define CS35L41_DSP1_STRMARB_IRQ2_CFG1 0x02BC5624
+#define CS35L41_DSP1_STRMARB_IRQ2_CFG2 0x02BC5628
+#define CS35L41_DSP1_STRMARB_IRQ3_CFG0 0x02BC5630
+#define CS35L41_DSP1_STRMARB_IRQ3_CFG1 0x02BC5634
+#define CS35L41_DSP1_STRMARB_IRQ3_CFG2 0x02BC5638
+#define CS35L41_DSP1_STRMARB_IRQ4_CFG0 0x02BC5640
+#define CS35L41_DSP1_STRMARB_IRQ4_CFG1 0x02BC5644
+#define CS35L41_DSP1_STRMARB_IRQ4_CFG2 0x02BC5648
+#define CS35L41_DSP1_STRMARB_IRQ5_CFG0 0x02BC5650
+#define CS35L41_DSP1_STRMARB_IRQ5_CFG1 0x02BC5654
+#define CS35L41_DSP1_STRMARB_IRQ5_CFG2 0x02BC5658
+#define CS35L41_DSP1_STRMARB_IRQ6_CFG0 0x02BC5660
+#define CS35L41_DSP1_STRMARB_IRQ6_CFG1 0x02BC5664
+#define CS35L41_DSP1_STRMARB_IRQ6_CFG2 0x02BC5668
+#define CS35L41_DSP1_STRMARB_IRQ7_CFG0 0x02BC5670
+#define CS35L41_DSP1_STRMARB_IRQ7_CFG1 0x02BC5674
+#define CS35L41_DSP1_STRMARB_IRQ7_CFG2 0x02BC5678
+#define CS35L41_DSP1_STRMARB_RESYNC_MSK 0x02BC5A00
+#define CS35L41_DSP1_STRMARB_ERR_STATUS 0x02BC5A08
+#define CS35L41_DSP1_INTPCTL_RES_STATIC 0x02BC6000
+#define CS35L41_DSP1_INTPCTL_RES_DYN 0x02BC6004
+#define CS35L41_DSP1_INTPCTL_NMI_CTRL 0x02BC6008
+#define CS35L41_DSP1_INTPCTL_IRQ_INV 0x02BC6010
+#define CS35L41_DSP1_INTPCTL_IRQ_MODE 0x02BC6014
+#define CS35L41_DSP1_INTPCTL_IRQ_EN 0x02BC6018
+#define CS35L41_DSP1_INTPCTL_IRQ_MSK 0x02BC601C
+#define CS35L41_DSP1_INTPCTL_IRQ_FLUSH 0x02BC6020
+#define CS35L41_DSP1_INTPCTL_IRQ_MSKCLR 0x02BC6024
+#define CS35L41_DSP1_INTPCTL_IRQ_FRC 0x02BC6028
+#define CS35L41_DSP1_INTPCTL_IRQ_MSKSET 0x02BC602C
+#define CS35L41_DSP1_INTPCTL_IRQ_ERR 0x02BC6030
+#define CS35L41_DSP1_INTPCTL_IRQ_PEND 0x02BC6034
+#define CS35L41_DSP1_INTPCTL_IRQ_GEN 0x02BC6038
+#define CS35L41_DSP1_INTPCTL_TESTBITS 0x02BC6040
+#define CS35L41_DSP1_WDT_CONTROL 0x02BC7000
+#define CS35L41_DSP1_WDT_STATUS 0x02BC7008
+#define CS35L41_DSP1_YMEM_PACK_0 0x02C00000
+#define CS35L41_DSP1_YMEM_PACK_1532 0x02C017F0
+#define CS35L41_DSP1_YMEM_UNPACK32_0 0x03000000
+#define CS35L41_DSP1_YMEM_UNPACK32_1022 0x03000FF8
+#define CS35L41_DSP1_YMEM_UNPACK24_0 0x03400000
+#define CS35L41_DSP1_YMEM_UNPACK24_2045 0x03401FF4
+#define CS35L41_DSP1_PMEM_0 0x03800000
+#define CS35L41_DSP1_PMEM_5114 0x03804FE8
+
+/*test regs for emulation bringup*/
+#define CS35L41_PLL_OVR 0x00003018
+#define CS35L41_BST_TEST_DUTY 0x00003900
+#define CS35L41_DIGPWM_IOCTRL 0x0000706C
+
+/*registers populated by OTP*/
+#define CS35L41_OTP_TRIM_1 0x0000208c
+#define CS35L41_OTP_TRIM_2 0x00002090
+#define CS35L41_OTP_TRIM_3 0x00003010
+#define CS35L41_OTP_TRIM_4 0x0000300C
+#define CS35L41_OTP_TRIM_5 0x0000394C
+#define CS35L41_OTP_TRIM_6 0x00003950
+#define CS35L41_OTP_TRIM_7 0x00003954
+#define CS35L41_OTP_TRIM_8 0x00003958
+#define CS35L41_OTP_TRIM_9 0x0000395C
+#define CS35L41_OTP_TRIM_10 0x0000416C
+#define CS35L41_OTP_TRIM_11 0x00004160
+#define CS35L41_OTP_TRIM_12 0x00004170
+#define CS35L41_OTP_TRIM_13 0x00004360
+#define CS35L41_OTP_TRIM_14 0x00004448
+#define CS35L41_OTP_TRIM_15 0x0000444C
+#define CS35L41_OTP_TRIM_16 0x00006E30
+#define CS35L41_OTP_TRIM_17 0x00006E34
+#define CS35L41_OTP_TRIM_18 0x00006E38
+#define CS35L41_OTP_TRIM_19 0x00006E3C
+#define CS35L41_OTP_TRIM_20 0x00006E40
+#define CS35L41_OTP_TRIM_21 0x00006E44
+#define CS35L41_OTP_TRIM_22 0x00006E48
+#define CS35L41_OTP_TRIM_23 0x00006E4C
+#define CS35L41_OTP_TRIM_24 0x00006E50
+#define CS35L41_OTP_TRIM_25 0x00006E54
+#define CS35L41_OTP_TRIM_26 0x00006E58
+#define CS35L41_OTP_TRIM_27 0x00006E5C
+#define CS35L41_OTP_TRIM_28 0x00006E60
+#define CS35L41_OTP_TRIM_29 0x00006E64
+#define CS35L41_OTP_TRIM_30 0x00007418
+#define CS35L41_OTP_TRIM_31 0x0000741C
+#define CS35L41_OTP_TRIM_32 0x00007434
+#define CS35L41_OTP_TRIM_33 0x00007068
+#define CS35L41_OTP_TRIM_34 0x0000410C
+#define CS35L41_OTP_TRIM_35 0x0000400C
+#define CS35L41_OTP_TRIM_36 0x00002030
+
+#define CS35L41_MAX_CACHE_REG 36
+#define CS35L41_OTP_SIZE_WORDS 32
+#define CS35L41_NUM_OTP_ELEM 100
+
+#define CS35L41_VALID_PDATA 0x80000000
+#define CS35L41_NUM_SUPPLIES 2
+
+#define CS35L41_SCLK_MSTR_MASK 0x10
+#define CS35L41_SCLK_MSTR_SHIFT 4
+#define CS35L41_LRCLK_MSTR_MASK 0x01
+#define CS35L41_LRCLK_MSTR_SHIFT 0
+#define CS35L41_SCLK_INV_MASK 0x40
+#define CS35L41_SCLK_INV_SHIFT 6
+#define CS35L41_LRCLK_INV_MASK 0x04
+#define CS35L41_LRCLK_INV_SHIFT 2
+#define CS35L41_SCLK_FRC_MASK 0x20
+#define CS35L41_SCLK_FRC_SHIFT 5
+#define CS35L41_LRCLK_FRC_MASK 0x02
+#define CS35L41_LRCLK_FRC_SHIFT 1
+
+#define CS35L41_AMP_GAIN_PCM_MASK 0x3E0
+#define CS35L41_AMP_GAIN_ZC_MASK 0x0400
+#define CS35L41_AMP_GAIN_ZC_SHIFT 10
+
+#define CS35L41_BST_CTL_MASK 0xFF
+#define CS35L41_BST_CTL_SEL_MASK 0x03
+#define CS35L41_BST_CTL_SEL_REG 0x00
+#define CS35L41_BST_CTL_SEL_CLASSH 0x01
+#define CS35L41_BST_IPK_MASK 0x7F
+#define CS35L41_BST_IPK_SHIFT 0
+#define CS35L41_BST_LIM_MASK 0x4
+#define CS35L41_BST_LIM_SHIFT 2
+#define CS35L41_BST_K1_MASK 0x000000FF
+#define CS35L41_BST_K1_SHIFT 0
+#define CS35L41_BST_K2_MASK 0x0000FF00
+#define CS35L41_BST_K2_SHIFT 8
+#define CS35L41_BST_SLOPE_MASK 0x0000FF00
+#define CS35L41_BST_SLOPE_SHIFT 8
+#define CS35L41_BST_LBST_VAL_MASK 0x00000003
+#define CS35L41_BST_LBST_VAL_SHIFT 0
+
+#define CS35L41_TEMP_THLD_MASK 0x03
+#define CS35L41_VMON_IMON_VOL_MASK 0x07FF07FF
+#define CS35L41_PDM_MODE_MASK 0x01
+#define CS35L41_PDM_MODE_SHIFT 0
+
+#define CS35L41_CH_MEM_DEPTH_MASK 0x07
+#define CS35L41_CH_MEM_DEPTH_SHIFT 0
+#define CS35L41_CH_HDRM_CTL_MASK 0x007F0000
+#define CS35L41_CH_HDRM_CTL_SHIFT 16
+#define CS35L41_CH_REL_RATE_MASK 0xFF00
+#define CS35L41_CH_REL_RATE_SHIFT 8
+#define CS35L41_CH_WKFET_DLY_MASK 0x001C
+#define CS35L41_CH_WKFET_DLY_SHIFT 2
+#define CS35L41_CH_WKFET_THLD_MASK 0x0F00
+#define CS35L41_CH_WKFET_THLD_SHIFT 8
+
+#define CS35L41_HW_NG_SEL_MASK 0x3F00
+#define CS35L41_HW_NG_SEL_SHIFT 8
+#define CS35L41_HW_NG_DLY_MASK 0x0070
+#define CS35L41_HW_NG_DLY_SHIFT 4
+#define CS35L41_HW_NG_THLD_MASK 0x0007
+#define CS35L41_HW_NG_THLD_SHIFT 0
+
+#define CS35L41_DSP_NG_ENABLE_MASK 0x00010000
+#define CS35L41_DSP_NG_ENABLE_SHIFT 16
+#define CS35L41_DSP_NG_THLD_MASK 0x7
+#define CS35L41_DSP_NG_THLD_SHIFT 0
+#define CS35L41_DSP_NG_DELAY_MASK 0x0F00
+#define CS35L41_DSP_NG_DELAY_SHIFT 8
+
+#define CS35L41_ASP_FMT_MASK 0x0700
+#define CS35L41_ASP_FMT_SHIFT 8
+#define CS35L41_ASP_DOUT_HIZ_MASK 0x03
+#define CS35L41_ASP_DOUT_HIZ_SHIFT 0
+#define CS35L41_ASP_WIDTH_16 0x10
+#define CS35L41_ASP_WIDTH_24 0x18
+#define CS35L41_ASP_WIDTH_32 0x20
+#define CS35L41_ASP_WIDTH_TX_MASK 0xFF0000
+#define CS35L41_ASP_WIDTH_TX_SHIFT 16
+#define CS35L41_ASP_WIDTH_RX_MASK 0xFF000000
+#define CS35L41_ASP_WIDTH_RX_SHIFT 24
+#define CS35L41_ASP_RX1_SLOT_MASK 0x3F
+#define CS35L41_ASP_RX1_SLOT_SHIFT 0
+#define CS35L41_ASP_RX2_SLOT_MASK 0x3F00
+#define CS35L41_ASP_RX2_SLOT_SHIFT 8
+#define CS35L41_ASP_RX_WL_MASK 0x3F
+#define CS35L41_ASP_TX_WL_MASK 0x3F
+#define CS35L41_ASP_RX_WL_SHIFT 0
+#define CS35L41_ASP_TX_WL_SHIFT 0
+#define CS35L41_ASP_SOURCE_MASK 0x7F
+
+#define CS35L41_INPUT_SRC_ASPRX1 0x08
+#define CS35L41_INPUT_SRC_ASPRX2 0x09
+#define CS35L41_INPUT_SRC_VMON 0x18
+#define CS35L41_INPUT_SRC_IMON 0x19
+#define CS35L41_INPUT_SRC_CLASSH 0x21
+#define CS35L41_INPUT_SRC_VPMON 0x28
+#define CS35L41_INPUT_SRC_VBSTMON 0x29
+#define CS35L41_INPUT_SRC_TEMPMON 0x3A
+#define CS35L41_INPUT_SRC_RSVD 0x3B
+#define CS35L41_INPUT_DSP_TX1 0x32
+#define CS35L41_INPUT_DSP_TX2 0x33
+
+#define CS35L41_WR_PEND_STS_MASK 0x2
+
+#define CS35L41_PLL_CLK_SEL_MASK 0x07
+#define CS35L41_PLL_CLK_SEL_SHIFT 0
+#define CS35L41_PLL_CLK_EN_MASK 0x10
+#define CS35L41_PLL_CLK_EN_SHIFT 4
+#define CS35L41_PLL_OPENLOOP_MASK 0x0800
+#define CS35L41_PLL_OPENLOOP_SHIFT 11
+#define CS35L41_PLLSRC_SCLK 0
+#define CS35L41_PLLSRC_LRCLK 1
+#define CS35L41_PLLSRC_SELF 3
+#define CS35L41_PLLSRC_PDMCLK 4
+#define CS35L41_PLLSRC_MCLK 5
+#define CS35L41_PLLSRC_SWIRE 7
+#define CS35L41_REFCLK_FREQ_MASK 0x7E0
+#define CS35L41_REFCLK_FREQ_SHIFT 5
+
+#define CS35L41_GLOBAL_FS_MASK 0x1F
+#define CS35L41_GLOBAL_FS_SHIFT 0
+
+#define CS35L41_GLOBAL_EN_MASK 0x01
+#define CS35L41_GLOBAL_EN_SHIFT 0
+#define CS35L41_BST_EN_MASK 0x0030
+#define CS35L41_BST_EN_SHIFT 4
+#define CS35L41_BST_EN_DEFAULT 0x2
+#define CS35L41_AMP_EN_SHIFT 0
+#define CS35L41_AMP_EN_MASK 1
+
+#define CS35L41_PDN_DONE_MASK 0x00800000
+#define CS35L41_PDN_DONE_SHIFT 23
+#define CS35L41_PUP_DONE_MASK 0x01000000
+#define CS35L41_PUP_DONE_SHIFT 24
+
+#define CS35L36_PUP_DONE_IRQ_UNMASK 0x5F
+#define CS35L36_PUP_DONE_IRQ_MASK 0xBF
+
+#define CS35L41_AMP_SHORT_ERR 0x80000000
+#define CS35L41_BST_SHORT_ERR 0x0100
+#define CS35L41_TEMP_WARN 0x8000
+#define CS35L41_TEMP_ERR 0x00020000
+#define CS35L41_BST_OVP_ERR 0x40
+#define CS35L41_BST_DCM_UVP_ERR 0x80
+#define CS35L41_OTP_BOOT_DONE 0x02
+#define CS35L41_PLL_UNLOCK 0x10
+#define CS35L41_OTP_BOOT_ERR 0x80000000
+
+#define CS35L41_AMP_SHORT_ERR_RLS 0x02
+#define CS35L41_BST_SHORT_ERR_RLS 0x04
+#define CS35L41_BST_OVP_ERR_RLS 0x08
+#define CS35L41_BST_UVP_ERR_RLS 0x10
+#define CS35L41_TEMP_WARN_ERR_RLS 0x20
+#define CS35L41_TEMP_ERR_RLS 0x40
+
+#define CS35L41_INT1_MASK_DEFAULT 0x7FFCFE3F
+#define CS35L41_INT1_UNMASK_PUP 0xFEFFFFFF
+#define CS35L41_INT1_UNMASK_PDN 0xFF7FFFFF
+
+#define CS35L41_GPIO_DIR_MASK 0x80000000
+#define CS35L41_GPIO_DIR_SHIFT 31
+#define CS35L41_GPIO1_CTRL_MASK 0x00030000
+#define CS35L41_GPIO1_CTRL_SHIFT 16
+#define CS35L41_GPIO2_CTRL_MASK 0x07000000
+#define CS35L41_GPIO2_CTRL_SHIFT 24
+#define CS35L41_GPIO_CTRL_OPEN_INT 2
+#define CS35L41_GPIO_CTRL_ACTV_LO 4
+#define CS35L41_GPIO_CTRL_ACTV_HI 5
+#define CS35L41_GPIO_POL_MASK 0x1000
+#define CS35L41_GPIO_POL_SHIFT 12
+
+#define CS35L41_AMP_INV_PCM_SHIFT 14
+#define CS35L41_AMP_INV_PCM_MASK BIT(CS35L41_AMP_INV_PCM_SHIFT)
+#define CS35L41_AMP_PCM_VOL_SHIFT 3
+#define CS35L41_AMP_PCM_VOL_MASK (0x7FF << 3)
+#define CS35L41_AMP_PCM_VOL_MUTE 0x4CF
+
+#define CS35L41_CHIP_ID 0x35a40
+#define CS35L41R_CHIP_ID 0x35b40
+#define CS35L41_MTLREVID_MASK 0x0F
+#define CS35L41_REVID_A0 0xA0
+#define CS35L41_REVID_B0 0xB0
+#define CS35L41_REVID_B2 0xB2
+
+#define CS35L41_HALO_CORE_RESET 0x00000200
+
+#define CS35L41_FS1_WINDOW_MASK 0x000007FF
+#define CS35L41_FS2_WINDOW_MASK 0x00FFF800
+#define CS35L41_FS2_WINDOW_SHIFT 12
+
+#define CS35L41_SPI_MAX_FREQ 4000000
+#define CS35L41_REGSTRIDE 4
+
enum cs35l41_clk_ids {
CS35L41_CLKID_SCLK = 0,
CS35L41_CLKID_LRCLK = 1,
@@ -31,4 +750,31 @@ struct cs35l41_platform_data {
struct cs35l41_irq_cfg irq_config2;
};
+struct cs35l41_otp_packed_element_t {
+ u32 reg;
+ u8 shift;
+ u8 size;
+};
+
+struct cs35l41_otp_map_element_t {
+ u32 id;
+ u32 num_elements;
+ const struct cs35l41_otp_packed_element_t *map;
+ u32 bit_offset;
+ u32 word_offset;
+};
+
+extern struct regmap_config cs35l41_regmap_i2c;
+extern struct regmap_config cs35l41_regmap_spi;
+
+int cs35l41_test_key_unlock(struct device *dev, struct regmap *regmap);
+int cs35l41_test_key_lock(struct device *dev, struct regmap *regmap);
+int cs35l41_otp_unpack(struct device *dev, struct regmap *regmap);
+int cs35l41_register_errata_patch(struct device *dev, struct regmap *reg, unsigned int reg_revid);
+int cs35l41_set_channels(struct device *dev, struct regmap *reg,
+ unsigned int tx_num, unsigned int *tx_slot,
+ unsigned int rx_num, unsigned int *rx_slot);
+int cs35l41_boost_config(struct device *dev, struct regmap *regmap, int boost_ind, int boost_cap,
+ int boost_ipk);
+
#endif /* __CS35L41_H */
diff --git a/include/sound/dmaengine_pcm.h b/include/sound/dmaengine_pcm.h
index 96666efddb39..38ea046e653c 100644
--- a/include/sound/dmaengine_pcm.h
+++ b/include/sound/dmaengine_pcm.h
@@ -60,7 +60,6 @@ struct dma_chan *snd_dmaengine_pcm_get_chan(struct snd_pcm_substream *substream)
* @maxburst: Maximum number of words(note: words, as in units of the
* src_addr_width member, not bytes) that can be send to or received from the
* DAI in one burst.
- * @slave_id: Slave requester id for the DMA channel.
* @filter_data: Custom DMA channel filter data, this will usually be used when
* requesting the DMA channel.
* @chan_name: Custom channel name to use when requesting DMA channel.
@@ -74,7 +73,6 @@ struct snd_dmaengine_dai_dma_data {
dma_addr_t addr;
enum dma_slave_buswidth addr_width;
u32 maxburst;
- unsigned int slave_id;
void *filter_data;
const char *chan_name;
unsigned int fifo_size;
diff --git a/include/sound/hda_codec.h b/include/sound/hda_codec.h
index 0e45963bb767..82d9daa17851 100644
--- a/include/sound/hda_codec.h
+++ b/include/sound/hda_codec.h
@@ -8,7 +8,7 @@
#ifndef __SOUND_HDA_CODEC_H
#define __SOUND_HDA_CODEC_H
-#include <linux/kref.h>
+#include <linux/refcount.h>
#include <linux/mod_devicetable.h>
#include <sound/info.h>
#include <sound/control.h>
@@ -166,8 +166,8 @@ struct hda_pcm {
bool own_chmap; /* codec driver provides own channel maps */
/* private: */
struct hda_codec *codec;
- struct kref kref;
struct list_head list;
+ unsigned int disconnected:1;
};
/* codec information */
@@ -187,6 +187,8 @@ struct hda_codec {
/* PCM to create, set by patch_ops.build_pcms callback */
struct list_head pcm_list_head;
+ refcount_t pcm_ref;
+ wait_queue_head_t remove_sleep;
/* codec specific info */
void *spec;
@@ -420,7 +422,7 @@ void snd_hda_codec_cleanup_for_unbind(struct hda_codec *codec);
static inline void snd_hda_codec_pcm_get(struct hda_pcm *pcm)
{
- kref_get(&pcm->kref);
+ refcount_inc(&pcm->codec->pcm_ref);
}
void snd_hda_codec_pcm_put(struct hda_pcm *pcm);
diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h
index 22af68b01426..6a90ce405e60 100644
--- a/include/sound/hdaudio.h
+++ b/include/sound/hdaudio.h
@@ -558,6 +558,7 @@ int snd_hdac_stream_set_params(struct hdac_stream *azx_dev,
void snd_hdac_stream_start(struct hdac_stream *azx_dev, bool fresh_start);
void snd_hdac_stream_clear(struct hdac_stream *azx_dev);
void snd_hdac_stream_stop(struct hdac_stream *azx_dev);
+void snd_hdac_stop_streams_and_chip(struct hdac_bus *bus);
void snd_hdac_stream_reset(struct hdac_stream *azx_dev);
void snd_hdac_stream_sync_trigger(struct hdac_stream *azx_dev, bool set,
unsigned int streams, unsigned int reg);
diff --git a/include/sound/hdaudio_ext.h b/include/sound/hdaudio_ext.h
index d4e31ea16aba..77123c3e4095 100644
--- a/include/sound/hdaudio_ext.h
+++ b/include/sound/hdaudio_ext.h
@@ -78,36 +78,35 @@ struct hdac_ext_stream {
container_of(s, struct hdac_ext_stream, hstream)
void snd_hdac_ext_stream_init(struct hdac_bus *bus,
- struct hdac_ext_stream *stream, int idx,
- int direction, int tag);
+ struct hdac_ext_stream *hext_stream, int idx,
+ int direction, int tag);
int snd_hdac_ext_stream_init_all(struct hdac_bus *bus, int start_idx,
- int num_stream, int dir);
+ int num_stream, int dir);
void snd_hdac_stream_free_all(struct hdac_bus *bus);
void snd_hdac_link_free_all(struct hdac_bus *bus);
struct hdac_ext_stream *snd_hdac_ext_stream_assign(struct hdac_bus *bus,
struct snd_pcm_substream *substream,
int type);
-void snd_hdac_ext_stream_release(struct hdac_ext_stream *azx_dev, int type);
+void snd_hdac_ext_stream_release(struct hdac_ext_stream *hext_stream, int type);
void snd_hdac_ext_stream_decouple_locked(struct hdac_bus *bus,
- struct hdac_ext_stream *azx_dev, bool decouple);
+ struct hdac_ext_stream *hext_stream, bool decouple);
void snd_hdac_ext_stream_decouple(struct hdac_bus *bus,
struct hdac_ext_stream *azx_dev, bool decouple);
-void snd_hdac_ext_stop_streams(struct hdac_bus *bus);
int snd_hdac_ext_stream_set_spib(struct hdac_bus *bus,
- struct hdac_ext_stream *stream, u32 value);
+ struct hdac_ext_stream *hext_stream, u32 value);
int snd_hdac_ext_stream_get_spbmaxfifo(struct hdac_bus *bus,
- struct hdac_ext_stream *stream);
+ struct hdac_ext_stream *hext_stream);
void snd_hdac_ext_stream_drsm_enable(struct hdac_bus *bus,
bool enable, int index);
int snd_hdac_ext_stream_set_dpibr(struct hdac_bus *bus,
- struct hdac_ext_stream *stream, u32 value);
-int snd_hdac_ext_stream_set_lpib(struct hdac_ext_stream *stream, u32 value);
+ struct hdac_ext_stream *hext_stream, u32 value);
+int snd_hdac_ext_stream_set_lpib(struct hdac_ext_stream *hext_stream, u32 value);
-void snd_hdac_ext_link_stream_start(struct hdac_ext_stream *hstream);
-void snd_hdac_ext_link_stream_clear(struct hdac_ext_stream *hstream);
-void snd_hdac_ext_link_stream_reset(struct hdac_ext_stream *hstream);
-int snd_hdac_ext_link_stream_setup(struct hdac_ext_stream *stream, int fmt);
+void snd_hdac_ext_link_stream_start(struct hdac_ext_stream *hext_stream);
+void snd_hdac_ext_link_stream_clear(struct hdac_ext_stream *hext_stream);
+void snd_hdac_ext_link_stream_reset(struct hdac_ext_stream *hext_stream);
+int snd_hdac_ext_link_stream_setup(struct hdac_ext_stream *hext_stream, int fmt);
struct hdac_ext_link {
struct hdac_bus *bus;
diff --git a/include/sound/intel-nhlt.h b/include/sound/intel-nhlt.h
index d0574805865f..089a760d36eb 100644
--- a/include/sound/intel-nhlt.h
+++ b/include/sound/intel-nhlt.h
@@ -10,6 +10,14 @@
#include <linux/acpi.h>
+enum nhlt_link_type {
+ NHLT_LINK_HDA = 0,
+ NHLT_LINK_DSP = 1,
+ NHLT_LINK_DMIC = 2,
+ NHLT_LINK_SSP = 3,
+ NHLT_LINK_INVALID
+};
+
#if IS_ENABLED(CONFIG_ACPI) && IS_ENABLED(CONFIG_SND_INTEL_NHLT)
struct wav_fmt {
@@ -33,14 +41,6 @@ struct wav_fmt_ext {
u8 sub_fmt[16];
} __packed;
-enum nhlt_link_type {
- NHLT_LINK_HDA = 0,
- NHLT_LINK_DSP = 1,
- NHLT_LINK_DMIC = 2,
- NHLT_LINK_SSP = 3,
- NHLT_LINK_INVALID
-};
-
enum nhlt_device_type {
NHLT_DEVICE_BT = 0,
NHLT_DEVICE_DMIC = 1,
@@ -132,6 +132,12 @@ void intel_nhlt_free(struct nhlt_acpi_table *addr);
int intel_nhlt_get_dmic_geo(struct device *dev, struct nhlt_acpi_table *nhlt);
+bool intel_nhlt_has_endpoint_type(struct nhlt_acpi_table *nhlt, u8 link_type);
+struct nhlt_specific_cfg *
+intel_nhlt_get_endpoint_blob(struct device *dev, struct nhlt_acpi_table *nhlt,
+ u32 bus_id, u8 link_type, u8 vbps, u8 bps,
+ u8 num_ch, u32 rate, u8 dir, u8 dev_type);
+
#else
struct nhlt_acpi_table;
@@ -150,6 +156,21 @@ static inline int intel_nhlt_get_dmic_geo(struct device *dev,
{
return 0;
}
+
+static inline bool intel_nhlt_has_endpoint_type(struct nhlt_acpi_table *nhlt,
+ u8 link_type)
+{
+ return false;
+}
+
+static inline struct nhlt_specific_cfg *
+intel_nhlt_get_endpoint_blob(struct device *dev, struct nhlt_acpi_table *nhlt,
+ u32 bus_id, u8 link_type, u8 vbps, u8 bps,
+ u8 num_ch, u32 rate, u8 dir, u8 dev_type)
+{
+ return NULL;
+}
+
#endif
#endif
diff --git a/include/sound/memalloc.h b/include/sound/memalloc.h
index 1051b84e8579..653dfffb3ac8 100644
--- a/include/sound/memalloc.h
+++ b/include/sound/memalloc.h
@@ -36,13 +36,6 @@ struct snd_dma_device {
#define SNDRV_DMA_TYPE_CONTINUOUS 1 /* continuous no-DMA memory */
#define SNDRV_DMA_TYPE_DEV 2 /* generic device continuous */
#define SNDRV_DMA_TYPE_DEV_WC 5 /* continuous write-combined */
-#ifdef CONFIG_SND_DMA_SGBUF
-#define SNDRV_DMA_TYPE_DEV_SG 3 /* generic device SG-buffer */
-#define SNDRV_DMA_TYPE_DEV_WC_SG 6 /* SG write-combined */
-#else
-#define SNDRV_DMA_TYPE_DEV_SG SNDRV_DMA_TYPE_DEV /* no SG-buf support */
-#define SNDRV_DMA_TYPE_DEV_WC_SG SNDRV_DMA_TYPE_DEV_WC
-#endif
#ifdef CONFIG_GENERIC_ALLOCATOR
#define SNDRV_DMA_TYPE_DEV_IRAM 4 /* generic device iram-buffer */
#else
@@ -51,6 +44,13 @@ struct snd_dma_device {
#define SNDRV_DMA_TYPE_VMALLOC 7 /* vmalloc'ed buffer */
#define SNDRV_DMA_TYPE_NONCONTIG 8 /* non-coherent SG buffer */
#define SNDRV_DMA_TYPE_NONCOHERENT 9 /* non-coherent buffer */
+#ifdef CONFIG_SND_DMA_SGBUF
+#define SNDRV_DMA_TYPE_DEV_SG SNDRV_DMA_TYPE_NONCONTIG
+#define SNDRV_DMA_TYPE_DEV_WC_SG 6 /* SG write-combined */
+#else
+#define SNDRV_DMA_TYPE_DEV_SG SNDRV_DMA_TYPE_DEV /* no SG-buf support */
+#define SNDRV_DMA_TYPE_DEV_WC_SG SNDRV_DMA_TYPE_DEV_WC
+#endif
/*
* info for buffer allocation
diff --git a/include/sound/pcm.h b/include/sound/pcm.h
index 33451f8ff755..36da42cd0774 100644
--- a/include/sound/pcm.h
+++ b/include/sound/pcm.h
@@ -147,6 +147,9 @@ struct snd_pcm_ops {
#define SNDRV_PCM_FMTBIT_S24_BE _SNDRV_PCM_FMTBIT(S24_BE)
#define SNDRV_PCM_FMTBIT_U24_LE _SNDRV_PCM_FMTBIT(U24_LE)
#define SNDRV_PCM_FMTBIT_U24_BE _SNDRV_PCM_FMTBIT(U24_BE)
+// For S32/U32 formats, 'msbits' hardware parameter is often used to deliver information about the
+// available bit count in most significant bit. It's for the case of so-called 'left-justified' or
+// `right-padding` sample which has less width than 32 bit.
#define SNDRV_PCM_FMTBIT_S32_LE _SNDRV_PCM_FMTBIT(S32_LE)
#define SNDRV_PCM_FMTBIT_S32_BE _SNDRV_PCM_FMTBIT(S32_BE)
#define SNDRV_PCM_FMTBIT_U32_LE _SNDRV_PCM_FMTBIT(U32_LE)
@@ -614,6 +617,7 @@ void snd_pcm_stream_unlock(struct snd_pcm_substream *substream);
void snd_pcm_stream_lock_irq(struct snd_pcm_substream *substream);
void snd_pcm_stream_unlock_irq(struct snd_pcm_substream *substream);
unsigned long _snd_pcm_stream_lock_irqsave(struct snd_pcm_substream *substream);
+unsigned long _snd_pcm_stream_lock_irqsave_nested(struct snd_pcm_substream *substream);
/**
* snd_pcm_stream_lock_irqsave - Lock the PCM stream
@@ -633,6 +637,20 @@ void snd_pcm_stream_unlock_irqrestore(struct snd_pcm_substream *substream,
unsigned long flags);
/**
+ * snd_pcm_stream_lock_irqsave_nested - Single-nested PCM stream locking
+ * @substream: PCM substream
+ * @flags: irq flags
+ *
+ * This locks the PCM stream like snd_pcm_stream_lock_irqsave() but with
+ * the single-depth lockdep subclass.
+ */
+#define snd_pcm_stream_lock_irqsave_nested(substream, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ flags = _snd_pcm_stream_lock_irqsave_nested(substream); \
+ } while (0)
+
+/**
* snd_pcm_group_for_each_entry - iterate over the linked substreams
* @s: the iterator
* @substream: the substream
diff --git a/include/sound/rt5682s.h b/include/sound/rt5682s.h
index accfbc2dcdd2..f18d91308b9a 100644
--- a/include/sound/rt5682s.h
+++ b/include/sound/rt5682s.h
@@ -40,6 +40,7 @@ struct rt5682s_platform_data {
enum rt5682s_jd_src jd_src;
unsigned int dmic_clk_rate;
unsigned int dmic_delay;
+ unsigned int amic_delay;
bool dmic_clk_driving_high;
const char *dai_clk_names[RT5682S_DAI_NUM_CLKS];
diff --git a/include/sound/soc-component.h b/include/sound/soc-component.h
index a4317144ab62..a52080407b98 100644
--- a/include/sound/soc-component.h
+++ b/include/sound/soc-component.h
@@ -148,6 +148,8 @@ struct snd_soc_component_driver {
struct vm_area_struct *vma);
int (*ack)(struct snd_soc_component *component,
struct snd_pcm_substream *substream);
+ snd_pcm_sframes_t (*delay)(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream);
const struct snd_compress_ops *compress_ops;
@@ -505,5 +507,7 @@ int snd_soc_pcm_component_pm_runtime_get(struct snd_soc_pcm_runtime *rtd,
void snd_soc_pcm_component_pm_runtime_put(struct snd_soc_pcm_runtime *rtd,
void *stream, int rollback);
int snd_soc_pcm_component_ack(struct snd_pcm_substream *substream);
+void snd_soc_pcm_component_delay(struct snd_pcm_substream *substream,
+ snd_pcm_sframes_t *cpu_delay, snd_pcm_sframes_t *codec_delay);
#endif /* __SOC_COMPONENT_H */
diff --git a/include/sound/soc-dai.h b/include/sound/soc-dai.h
index 0dcb361a98bb..bbd821d2df9c 100644
--- a/include/sound/soc-dai.h
+++ b/include/sound/soc-dai.h
@@ -208,8 +208,6 @@ int snd_soc_dai_startup(struct snd_soc_dai *dai,
struct snd_pcm_substream *substream);
void snd_soc_dai_shutdown(struct snd_soc_dai *dai,
struct snd_pcm_substream *substream, int rollback);
-snd_pcm_sframes_t snd_soc_dai_delay(struct snd_soc_dai *dai,
- struct snd_pcm_substream *substream);
void snd_soc_dai_suspend(struct snd_soc_dai *dai);
void snd_soc_dai_resume(struct snd_soc_dai *dai);
int snd_soc_dai_compress_new(struct snd_soc_dai *dai,
@@ -238,6 +236,8 @@ int snd_soc_pcm_dai_trigger(struct snd_pcm_substream *substream, int cmd,
int rollback);
int snd_soc_pcm_dai_bespoke_trigger(struct snd_pcm_substream *substream,
int cmd);
+void snd_soc_pcm_dai_delay(struct snd_pcm_substream *substream,
+ snd_pcm_sframes_t *cpu_delay, snd_pcm_sframes_t *codec_delay);
int snd_soc_dai_compr_startup(struct snd_soc_dai *dai,
struct snd_compr_stream *cstream);
@@ -295,9 +295,9 @@ struct snd_soc_dai_ops {
unsigned int *rx_num, unsigned int *rx_slot);
int (*set_tristate)(struct snd_soc_dai *dai, int tristate);
- int (*set_sdw_stream)(struct snd_soc_dai *dai,
- void *stream, int direction);
- void *(*get_sdw_stream)(struct snd_soc_dai *dai, int direction);
+ int (*set_stream)(struct snd_soc_dai *dai,
+ void *stream, int direction);
+ void *(*get_stream)(struct snd_soc_dai *dai, int direction);
/*
* DAI digital mute - optional.
@@ -515,42 +515,42 @@ static inline void *snd_soc_dai_get_drvdata(struct snd_soc_dai *dai)
}
/**
- * snd_soc_dai_set_sdw_stream() - Configures a DAI for SDW stream operation
+ * snd_soc_dai_set_stream() - Configures a DAI for stream operation
* @dai: DAI
- * @stream: STREAM
+ * @stream: STREAM (opaque structure depending on DAI type)
* @direction: Stream direction(Playback/Capture)
- * SoundWire subsystem doesn't have a notion of direction and we reuse
+ * Some subsystems, such as SoundWire, don't have a notion of direction and we reuse
* the ASoC stream direction to configure sink/source ports.
* Playback maps to source ports and Capture for sink ports.
*
* This should be invoked with NULL to clear the stream set previously.
* Returns 0 on success, a negative error code otherwise.
*/
-static inline int snd_soc_dai_set_sdw_stream(struct snd_soc_dai *dai,
- void *stream, int direction)
+static inline int snd_soc_dai_set_stream(struct snd_soc_dai *dai,
+ void *stream, int direction)
{
- if (dai->driver->ops->set_sdw_stream)
- return dai->driver->ops->set_sdw_stream(dai, stream, direction);
+ if (dai->driver->ops->set_stream)
+ return dai->driver->ops->set_stream(dai, stream, direction);
else
return -ENOTSUPP;
}
/**
- * snd_soc_dai_get_sdw_stream() - Retrieves SDW stream from DAI
+ * snd_soc_dai_get_stream() - Retrieves stream from DAI
* @dai: DAI
* @direction: Stream direction(Playback/Capture)
*
* This routine only retrieves that was previously configured
- * with snd_soc_dai_get_sdw_stream()
+ * with snd_soc_dai_get_stream()
*
* Returns pointer to stream or an ERR_PTR value, e.g.
* ERR_PTR(-ENOTSUPP) if callback is not supported;
*/
-static inline void *snd_soc_dai_get_sdw_stream(struct snd_soc_dai *dai,
- int direction)
+static inline void *snd_soc_dai_get_stream(struct snd_soc_dai *dai,
+ int direction)
{
- if (dai->driver->ops->get_sdw_stream)
- return dai->driver->ops->get_sdw_stream(dai, direction);
+ if (dai->driver->ops->get_stream)
+ return dai->driver->ops->get_stream(dai, direction);
else
return ERR_PTR(-ENOTSUPP);
}
diff --git a/include/sound/soc-dpcm.h b/include/sound/soc-dpcm.h
index bc7af90099a8..75b92d883976 100644
--- a/include/sound/soc-dpcm.h
+++ b/include/sound/soc-dpcm.h
@@ -101,6 +101,8 @@ struct snd_soc_dpcm_runtime {
enum snd_soc_dpcm_state state;
int trigger_pending; /* trigger cmd + 1 if pending, 0 if not */
+
+ int be_start; /* refcount protected by BE stream pcm lock */
};
#define for_each_dpcm_fe(be, stream, _dpcm) \
diff --git a/include/sound/soc.h b/include/sound/soc.h
index 8e6dd8a257c5..7a1650b303f1 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -893,8 +893,6 @@ struct snd_soc_card {
struct mutex pcm_mutex;
enum snd_soc_pcm_subclass pcm_subclass;
- spinlock_t dpcm_lock;
-
int (*probe)(struct snd_soc_card *card);
int (*late_probe)(struct snd_soc_card *card);
int (*remove)(struct snd_soc_card *card);
@@ -1213,6 +1211,7 @@ int snd_soc_of_parse_card_name(struct snd_soc_card *card,
const char *propname);
int snd_soc_of_parse_audio_simple_widgets(struct snd_soc_card *card,
const char *propname);
+int snd_soc_of_parse_pin_switches(struct snd_soc_card *card, const char *prop);
int snd_soc_of_get_slot_mask(struct device_node *np,
const char *prop_name,
unsigned int *mask);
diff --git a/include/sound/sof.h b/include/sound/sof.h
index 23b374311d16..813680ab9aad 100644
--- a/include/sound/sof.h
+++ b/include/sound/sof.h
@@ -17,6 +17,28 @@
struct snd_sof_dsp_ops;
+/**
+ * enum sof_fw_state - DSP firmware state definitions
+ * @SOF_FW_BOOT_NOT_STARTED: firmware boot is not yet started
+ * @SOF_FW_BOOT_PREPARE: preparing for boot (firmware loading for exaqmple)
+ * @SOF_FW_BOOT_IN_PROGRESS: firmware boot is in progress
+ * @SOF_FW_BOOT_FAILED: firmware boot failed
+ * @SOF_FW_BOOT_READY_FAILED: firmware booted but fw_ready op failed
+ * @SOF_FW_BOOT_READY_OK: firmware booted and fw_ready op passed
+ * @SOF_FW_BOOT_COMPLETE: firmware is booted up and functional
+ * @SOF_FW_CRASHED: firmware crashed after successful boot
+ */
+enum sof_fw_state {
+ SOF_FW_BOOT_NOT_STARTED = 0,
+ SOF_FW_BOOT_PREPARE,
+ SOF_FW_BOOT_IN_PROGRESS,
+ SOF_FW_BOOT_FAILED,
+ SOF_FW_BOOT_READY_FAILED,
+ SOF_FW_BOOT_READY_OK,
+ SOF_FW_BOOT_COMPLETE,
+ SOF_FW_CRASHED,
+};
+
/*
* SOF Platform data.
*/
diff --git a/include/sound/sof/dai-amd.h b/include/sound/sof/dai-amd.h
new file mode 100644
index 000000000000..90d09dbdd709
--- /dev/null
+++ b/include/sound/sof/dai-amd.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * Copyright(c) 2021 Advanced Micro Devices, Inc.. All rights reserved.
+ */
+
+#ifndef __INCLUDE_SOUND_SOF_DAI_AMD_H__
+#define __INCLUDE_SOUND_SOF_DAI_AMD_H__
+
+#include <sound/sof/header.h>
+
+/* ACP Configuration Request - SOF_IPC_DAI_AMD_CONFIG */
+struct sof_ipc_dai_acp_params {
+ struct sof_ipc_hdr hdr;
+
+ uint32_t fsync_rate; /* FSYNC frequency in Hz */
+ uint32_t tdm_slots;
+} __packed;
+#endif
diff --git a/include/sound/sof/dai-mediatek.h b/include/sound/sof/dai-mediatek.h
new file mode 100644
index 000000000000..62dd4720558d
--- /dev/null
+++ b/include/sound/sof/dai-mediatek.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/*
+ * Copyright(c) 2021 Mediatek Corporation. All rights reserved.
+ *
+ * Author: Bo Pan <bo.pan@mediatek.com>
+ */
+
+#ifndef __INCLUDE_SOUND_SOF_DAI_MEDIATEK_H__
+#define __INCLUDE_SOUND_SOF_DAI_MEDIATEK_H__
+
+#include <sound/sof/header.h>
+
+struct sof_ipc_dai_mtk_afe_params {
+ struct sof_ipc_hdr hdr;
+ u32 channels;
+ u32 rate;
+ u32 format;
+ u32 stream_id;
+ u32 reserved[4]; /* reserve for future */
+} __packed;
+
+#endif
+
diff --git a/include/sound/sof/dai.h b/include/sound/sof/dai.h
index 9625f47557b8..59ee50ac7705 100644
--- a/include/sound/sof/dai.h
+++ b/include/sound/sof/dai.h
@@ -12,6 +12,8 @@
#include <sound/sof/header.h>
#include <sound/sof/dai-intel.h>
#include <sound/sof/dai-imx.h>
+#include <sound/sof/dai-amd.h>
+#include <sound/sof/dai-mediatek.h>
/*
* DAI Configuration.
@@ -50,12 +52,25 @@
#define SOF_DAI_FMT_INV_MASK 0x0f00
#define SOF_DAI_FMT_CLOCK_PROVIDER_MASK 0xf000
-/* DAI_CONFIG flags */
-#define SOF_DAI_CONFIG_FLAGS_MASK 0x3
-#define SOF_DAI_CONFIG_FLAGS_NONE (0 << 0) /**< DAI_CONFIG sent without stage information */
-#define SOF_DAI_CONFIG_FLAGS_HW_PARAMS (1 << 0) /**< DAI_CONFIG sent during hw_params stage */
-#define SOF_DAI_CONFIG_FLAGS_HW_FREE (2 << 0) /**< DAI_CONFIG sent during hw_free stage */
-#define SOF_DAI_CONFIG_FLAGS_RFU (3 << 0) /**< not used, reserved for future use */
+/*
+ * DAI_CONFIG flags. The 4 LSB bits are used for the commands, HW_PARAMS, HW_FREE and PAUSE
+ * representing when the IPC is sent. The 4 MSB bits are used to add quirks along with the above
+ * commands.
+ */
+#define SOF_DAI_CONFIG_FLAGS_CMD_MASK 0xF
+#define SOF_DAI_CONFIG_FLAGS_NONE 0 /**< DAI_CONFIG sent without stage information */
+#define SOF_DAI_CONFIG_FLAGS_HW_PARAMS BIT(0) /**< DAI_CONFIG sent during hw_params stage */
+#define SOF_DAI_CONFIG_FLAGS_HW_FREE BIT(1) /**< DAI_CONFIG sent during hw_free stage */
+/**< DAI_CONFIG sent during pause trigger. Only available ABI 3.20 onwards */
+#define SOF_DAI_CONFIG_FLAGS_PAUSE BIT(2)
+#define SOF_DAI_CONFIG_FLAGS_QUIRK_SHIFT 4
+#define SOF_DAI_CONFIG_FLAGS_QUIRK_MASK (0xF << SOF_DAI_CONFIG_FLAGS_QUIRK_SHIFT)
+/*
+ * This should be used along with the SOF_DAI_CONFIG_FLAGS_HW_PARAMS to indicate that pipeline
+ * stop/pause and DAI DMA stop/pause should happen in two steps. This change is only available
+ * ABI 3.20 onwards.
+ */
+#define SOF_DAI_CONFIG_FLAGS_2_STEP_STOP BIT(0)
/** \brief Types of DAI */
enum sof_ipc_dai_type {
@@ -66,6 +81,10 @@ enum sof_ipc_dai_type {
SOF_DAI_INTEL_ALH, /**< Intel ALH */
SOF_DAI_IMX_SAI, /**< i.MX SAI */
SOF_DAI_IMX_ESAI, /**< i.MX ESAI */
+ SOF_DAI_AMD_BT, /**< AMD ACP BT*/
+ SOF_DAI_AMD_SP, /**< AMD ACP SP */
+ SOF_DAI_AMD_DMIC, /**< AMD ACP DMIC */
+ SOF_DAI_MEDIATEK_AFE, /**< Mediatek AFE */
};
/* general purpose DAI configuration */
@@ -90,6 +109,10 @@ struct sof_ipc_dai_config {
struct sof_ipc_dai_alh_params alh;
struct sof_ipc_dai_esai_params esai;
struct sof_ipc_dai_sai_params sai;
+ struct sof_ipc_dai_acp_params acpbt;
+ struct sof_ipc_dai_acp_params acpsp;
+ struct sof_ipc_dai_acp_params acpdmic;
+ struct sof_ipc_dai_mtk_afe_params afe;
};
} __packed;
diff --git a/include/sound/sof/debug.h b/include/sound/sof/debug.h
index 3ecb5793789d..38693e3fb514 100644
--- a/include/sound/sof/debug.h
+++ b/include/sound/sof/debug.h
@@ -19,6 +19,8 @@ enum sof_ipc_dbg_mem_zone {
SOF_IPC_MEM_ZONE_SYS_RUNTIME = 1, /**< System-runtime zone */
SOF_IPC_MEM_ZONE_RUNTIME = 2, /**< Runtime zone */
SOF_IPC_MEM_ZONE_BUFFER = 3, /**< Buffer zone */
+ SOF_IPC_MEM_ZONE_RUNTIME_SHARED = 4, /**< System runtime zone */
+ SOF_IPC_MEM_ZONE_SYS_SHARED = 5, /**< System shared zone */
};
/** ABI3.18 */
diff --git a/include/sound/sof/header.h b/include/sound/sof/header.h
index 4c747c52e01b..b97a76bcb655 100644
--- a/include/sound/sof/header.h
+++ b/include/sound/sof/header.h
@@ -119,6 +119,7 @@
#define SOF_IPC_TRACE_DMA_POSITION SOF_CMD_TYPE(0x002)
#define SOF_IPC_TRACE_DMA_PARAMS_EXT SOF_CMD_TYPE(0x003)
#define SOF_IPC_TRACE_FILTER_UPDATE SOF_CMD_TYPE(0x004) /**< ABI3.17 */
+#define SOF_IPC_TRACE_DMA_FREE SOF_CMD_TYPE(0x005) /**< ABI3.20 */
/* debug */
#define SOF_IPC_DEBUG_MEM_USAGE SOF_CMD_TYPE(0x001)
diff --git a/include/trace/bpf_probe.h b/include/trace/bpf_probe.h
index a8e97f84b652..7660a7846586 100644
--- a/include/trace/bpf_probe.h
+++ b/include/trace/bpf_probe.h
@@ -21,6 +21,22 @@
#undef __get_bitmask
#define __get_bitmask(field) (char *)__get_dynamic_array(field)
+#undef __get_rel_dynamic_array
+#define __get_rel_dynamic_array(field) \
+ ((void *)(&__entry->__rel_loc_##field) + \
+ sizeof(__entry->__rel_loc_##field) + \
+ (__entry->__rel_loc_##field & 0xffff))
+
+#undef __get_rel_dynamic_array_len
+#define __get_rel_dynamic_array_len(field) \
+ ((__entry->__rel_loc_##field >> 16) & 0xffff)
+
+#undef __get_rel_str
+#define __get_rel_str(field) ((char *)__get_rel_dynamic_array(field))
+
+#undef __get_rel_bitmask
+#define __get_rel_bitmask(field) (char *)__get_rel_dynamic_array(field)
+
#undef __perf_count
#define __perf_count(c) (c)
diff --git a/include/trace/events/block.h b/include/trace/events/block.h
index a95daa4d4caa..27170e40e8c9 100644
--- a/include/trace/events/block.h
+++ b/include/trace/events/block.h
@@ -85,7 +85,7 @@ TRACE_EVENT(block_rq_requeue,
),
TP_fast_assign(
- __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
+ __entry->dev = rq->q->disk ? disk_devt(rq->q->disk) : 0;
__entry->sector = blk_rq_trace_sector(rq);
__entry->nr_sector = blk_rq_trace_nr_sectors(rq);
@@ -128,7 +128,7 @@ TRACE_EVENT(block_rq_complete,
),
TP_fast_assign(
- __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
+ __entry->dev = rq->q->disk ? disk_devt(rq->q->disk) : 0;
__entry->sector = blk_rq_pos(rq);
__entry->nr_sector = nr_bytes >> 9;
__entry->error = blk_status_to_errno(error);
@@ -161,7 +161,7 @@ DECLARE_EVENT_CLASS(block_rq,
),
TP_fast_assign(
- __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
+ __entry->dev = rq->q->disk ? disk_devt(rq->q->disk) : 0;
__entry->sector = blk_rq_trace_sector(rq);
__entry->nr_sector = blk_rq_trace_nr_sectors(rq);
__entry->bytes = blk_rq_bytes(rq);
@@ -512,7 +512,7 @@ TRACE_EVENT(block_rq_remap,
),
TP_fast_assign(
- __entry->dev = disk_devt(rq->rq_disk);
+ __entry->dev = disk_devt(rq->q->disk);
__entry->sector = blk_rq_pos(rq);
__entry->nr_sector = blk_rq_sectors(rq);
__entry->old_dev = dev;
diff --git a/include/trace/events/cachefiles.h b/include/trace/events/cachefiles.h
index 920b6a303d60..c6f5aa74db89 100644
--- a/include/trace/events/cachefiles.h
+++ b/include/trace/events/cachefiles.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/* CacheFiles tracepoints
*
- * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved.
+ * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*/
#undef TRACE_SYSTEM
@@ -19,9 +19,83 @@
#define __CACHEFILES_DECLARE_TRACE_ENUMS_ONCE_ONLY
enum cachefiles_obj_ref_trace {
- cachefiles_obj_put_wait_retry = fscache_obj_ref__nr_traces,
- cachefiles_obj_put_wait_timeo,
- cachefiles_obj_ref__nr_traces
+ cachefiles_obj_get_ioreq,
+ cachefiles_obj_new,
+ cachefiles_obj_put_alloc_fail,
+ cachefiles_obj_put_detach,
+ cachefiles_obj_put_ioreq,
+ cachefiles_obj_see_clean_commit,
+ cachefiles_obj_see_clean_delete,
+ cachefiles_obj_see_clean_drop_tmp,
+ cachefiles_obj_see_lookup_cookie,
+ cachefiles_obj_see_lookup_failed,
+ cachefiles_obj_see_withdraw_cookie,
+ cachefiles_obj_see_withdrawal,
+};
+
+enum fscache_why_object_killed {
+ FSCACHE_OBJECT_IS_STALE,
+ FSCACHE_OBJECT_IS_WEIRD,
+ FSCACHE_OBJECT_INVALIDATED,
+ FSCACHE_OBJECT_NO_SPACE,
+ FSCACHE_OBJECT_WAS_RETIRED,
+ FSCACHE_OBJECT_WAS_CULLED,
+ FSCACHE_VOLUME_IS_WEIRD,
+};
+
+enum cachefiles_coherency_trace {
+ cachefiles_coherency_check_aux,
+ cachefiles_coherency_check_content,
+ cachefiles_coherency_check_dirty,
+ cachefiles_coherency_check_len,
+ cachefiles_coherency_check_objsize,
+ cachefiles_coherency_check_ok,
+ cachefiles_coherency_check_type,
+ cachefiles_coherency_check_xattr,
+ cachefiles_coherency_set_fail,
+ cachefiles_coherency_set_ok,
+ cachefiles_coherency_vol_check_cmp,
+ cachefiles_coherency_vol_check_ok,
+ cachefiles_coherency_vol_check_xattr,
+ cachefiles_coherency_vol_set_fail,
+ cachefiles_coherency_vol_set_ok,
+};
+
+enum cachefiles_trunc_trace {
+ cachefiles_trunc_dio_adjust,
+ cachefiles_trunc_expand_tmpfile,
+ cachefiles_trunc_shrink,
+};
+
+enum cachefiles_prepare_read_trace {
+ cachefiles_trace_read_after_eof,
+ cachefiles_trace_read_found_hole,
+ cachefiles_trace_read_found_part,
+ cachefiles_trace_read_have_data,
+ cachefiles_trace_read_no_data,
+ cachefiles_trace_read_no_file,
+ cachefiles_trace_read_seek_error,
+ cachefiles_trace_read_seek_nxio,
+};
+
+enum cachefiles_error_trace {
+ cachefiles_trace_fallocate_error,
+ cachefiles_trace_getxattr_error,
+ cachefiles_trace_link_error,
+ cachefiles_trace_lookup_error,
+ cachefiles_trace_mkdir_error,
+ cachefiles_trace_notify_change_error,
+ cachefiles_trace_open_error,
+ cachefiles_trace_read_error,
+ cachefiles_trace_remxattr_error,
+ cachefiles_trace_rename_error,
+ cachefiles_trace_seek_error,
+ cachefiles_trace_setxattr_error,
+ cachefiles_trace_statfs_error,
+ cachefiles_trace_tmpfile_error,
+ cachefiles_trace_trunc_error,
+ cachefiles_trace_unlink_error,
+ cachefiles_trace_write_error,
};
#endif
@@ -31,21 +105,78 @@ enum cachefiles_obj_ref_trace {
*/
#define cachefiles_obj_kill_traces \
EM(FSCACHE_OBJECT_IS_STALE, "stale") \
+ EM(FSCACHE_OBJECT_IS_WEIRD, "weird") \
+ EM(FSCACHE_OBJECT_INVALIDATED, "inval") \
EM(FSCACHE_OBJECT_NO_SPACE, "no_space") \
EM(FSCACHE_OBJECT_WAS_RETIRED, "was_retired") \
- E_(FSCACHE_OBJECT_WAS_CULLED, "was_culled")
+ EM(FSCACHE_OBJECT_WAS_CULLED, "was_culled") \
+ E_(FSCACHE_VOLUME_IS_WEIRD, "volume_weird")
#define cachefiles_obj_ref_traces \
- EM(fscache_obj_get_add_to_deps, "GET add_to_deps") \
- EM(fscache_obj_get_queue, "GET queue") \
- EM(fscache_obj_put_alloc_fail, "PUT alloc_fail") \
- EM(fscache_obj_put_attach_fail, "PUT attach_fail") \
- EM(fscache_obj_put_drop_obj, "PUT drop_obj") \
- EM(fscache_obj_put_enq_dep, "PUT enq_dep") \
- EM(fscache_obj_put_queue, "PUT queue") \
- EM(fscache_obj_put_work, "PUT work") \
- EM(cachefiles_obj_put_wait_retry, "PUT wait_retry") \
- E_(cachefiles_obj_put_wait_timeo, "PUT wait_timeo")
+ EM(cachefiles_obj_get_ioreq, "GET ioreq") \
+ EM(cachefiles_obj_new, "NEW obj") \
+ EM(cachefiles_obj_put_alloc_fail, "PUT alloc_fail") \
+ EM(cachefiles_obj_put_detach, "PUT detach") \
+ EM(cachefiles_obj_put_ioreq, "PUT ioreq") \
+ EM(cachefiles_obj_see_clean_commit, "SEE clean_commit") \
+ EM(cachefiles_obj_see_clean_delete, "SEE clean_delete") \
+ EM(cachefiles_obj_see_clean_drop_tmp, "SEE clean_drop_tmp") \
+ EM(cachefiles_obj_see_lookup_cookie, "SEE lookup_cookie") \
+ EM(cachefiles_obj_see_lookup_failed, "SEE lookup_failed") \
+ EM(cachefiles_obj_see_withdraw_cookie, "SEE withdraw_cookie") \
+ E_(cachefiles_obj_see_withdrawal, "SEE withdrawal")
+
+#define cachefiles_coherency_traces \
+ EM(cachefiles_coherency_check_aux, "BAD aux ") \
+ EM(cachefiles_coherency_check_content, "BAD cont") \
+ EM(cachefiles_coherency_check_dirty, "BAD dirt") \
+ EM(cachefiles_coherency_check_len, "BAD len ") \
+ EM(cachefiles_coherency_check_objsize, "BAD osiz") \
+ EM(cachefiles_coherency_check_ok, "OK ") \
+ EM(cachefiles_coherency_check_type, "BAD type") \
+ EM(cachefiles_coherency_check_xattr, "BAD xatt") \
+ EM(cachefiles_coherency_set_fail, "SET fail") \
+ EM(cachefiles_coherency_set_ok, "SET ok ") \
+ EM(cachefiles_coherency_vol_check_cmp, "VOL BAD cmp ") \
+ EM(cachefiles_coherency_vol_check_ok, "VOL OK ") \
+ EM(cachefiles_coherency_vol_check_xattr,"VOL BAD xatt") \
+ EM(cachefiles_coherency_vol_set_fail, "VOL SET fail") \
+ E_(cachefiles_coherency_vol_set_ok, "VOL SET ok ")
+
+#define cachefiles_trunc_traces \
+ EM(cachefiles_trunc_dio_adjust, "DIOADJ") \
+ EM(cachefiles_trunc_expand_tmpfile, "EXPTMP") \
+ E_(cachefiles_trunc_shrink, "SHRINK")
+
+#define cachefiles_prepare_read_traces \
+ EM(cachefiles_trace_read_after_eof, "after-eof ") \
+ EM(cachefiles_trace_read_found_hole, "found-hole") \
+ EM(cachefiles_trace_read_found_part, "found-part") \
+ EM(cachefiles_trace_read_have_data, "have-data ") \
+ EM(cachefiles_trace_read_no_data, "no-data ") \
+ EM(cachefiles_trace_read_no_file, "no-file ") \
+ EM(cachefiles_trace_read_seek_error, "seek-error") \
+ E_(cachefiles_trace_read_seek_nxio, "seek-enxio")
+
+#define cachefiles_error_traces \
+ EM(cachefiles_trace_fallocate_error, "fallocate") \
+ EM(cachefiles_trace_getxattr_error, "getxattr") \
+ EM(cachefiles_trace_link_error, "link") \
+ EM(cachefiles_trace_lookup_error, "lookup") \
+ EM(cachefiles_trace_mkdir_error, "mkdir") \
+ EM(cachefiles_trace_notify_change_error, "notify_change") \
+ EM(cachefiles_trace_open_error, "open") \
+ EM(cachefiles_trace_read_error, "read") \
+ EM(cachefiles_trace_remxattr_error, "remxattr") \
+ EM(cachefiles_trace_rename_error, "rename") \
+ EM(cachefiles_trace_seek_error, "seek") \
+ EM(cachefiles_trace_setxattr_error, "setxattr") \
+ EM(cachefiles_trace_statfs_error, "statfs") \
+ EM(cachefiles_trace_tmpfile_error, "tmpfile") \
+ EM(cachefiles_trace_trunc_error, "trunc") \
+ EM(cachefiles_trace_unlink_error, "unlink") \
+ E_(cachefiles_trace_write_error, "write")
+
/*
* Export enum symbols via userspace.
@@ -57,6 +188,10 @@ enum cachefiles_obj_ref_trace {
cachefiles_obj_kill_traces;
cachefiles_obj_ref_traces;
+cachefiles_coherency_traces;
+cachefiles_trunc_traces;
+cachefiles_prepare_read_traces;
+cachefiles_error_traces;
/*
* Now redefine the EM() and E_() macros to map the enums to the strings that
@@ -69,12 +204,12 @@ cachefiles_obj_ref_traces;
TRACE_EVENT(cachefiles_ref,
- TP_PROTO(struct cachefiles_object *obj,
- struct fscache_cookie *cookie,
- enum cachefiles_obj_ref_trace why,
- int usage),
+ TP_PROTO(unsigned int object_debug_id,
+ unsigned int cookie_debug_id,
+ int usage,
+ enum cachefiles_obj_ref_trace why),
- TP_ARGS(obj, cookie, why, usage),
+ TP_ARGS(object_debug_id, cookie_debug_id, usage, why),
/* Note that obj may be NULL */
TP_STRUCT__entry(
@@ -85,8 +220,8 @@ TRACE_EVENT(cachefiles_ref,
),
TP_fast_assign(
- __entry->obj = obj->fscache.debug_id;
- __entry->cookie = cookie->debug_id;
+ __entry->obj = object_debug_id;
+ __entry->cookie = cookie_debug_id;
__entry->usage = usage;
__entry->why = why;
),
@@ -98,221 +233,440 @@ TRACE_EVENT(cachefiles_ref,
TRACE_EVENT(cachefiles_lookup,
TP_PROTO(struct cachefiles_object *obj,
- struct dentry *de,
- struct inode *inode),
+ struct dentry *dir,
+ struct dentry *de),
- TP_ARGS(obj, de, inode),
+ TP_ARGS(obj, dir, de),
TP_STRUCT__entry(
__field(unsigned int, obj )
- __field(struct dentry *, de )
- __field(struct inode *, inode )
+ __field(short, error )
+ __field(unsigned long, dino )
+ __field(unsigned long, ino )
),
TP_fast_assign(
- __entry->obj = obj->fscache.debug_id;
- __entry->de = de;
- __entry->inode = inode;
+ __entry->obj = obj ? obj->debug_id : 0;
+ __entry->dino = d_backing_inode(dir)->i_ino;
+ __entry->ino = (!IS_ERR(de) && d_backing_inode(de) ?
+ d_backing_inode(de)->i_ino : 0);
+ __entry->error = IS_ERR(de) ? PTR_ERR(de) : 0;
),
- TP_printk("o=%08x d=%p i=%p",
- __entry->obj, __entry->de, __entry->inode)
+ TP_printk("o=%08x dB=%lx B=%lx e=%d",
+ __entry->obj, __entry->dino, __entry->ino, __entry->error)
);
TRACE_EVENT(cachefiles_mkdir,
- TP_PROTO(struct cachefiles_object *obj,
- struct dentry *de, int ret),
+ TP_PROTO(struct dentry *dir, struct dentry *subdir),
- TP_ARGS(obj, de, ret),
+ TP_ARGS(dir, subdir),
TP_STRUCT__entry(
- __field(unsigned int, obj )
- __field(struct dentry *, de )
- __field(int, ret )
+ __field(unsigned int, dir )
+ __field(unsigned int, subdir )
),
TP_fast_assign(
- __entry->obj = obj->fscache.debug_id;
- __entry->de = de;
- __entry->ret = ret;
+ __entry->dir = d_backing_inode(dir)->i_ino;
+ __entry->subdir = d_backing_inode(subdir)->i_ino;
),
- TP_printk("o=%08x d=%p r=%u",
- __entry->obj, __entry->de, __entry->ret)
+ TP_printk("dB=%x sB=%x",
+ __entry->dir,
+ __entry->subdir)
);
-TRACE_EVENT(cachefiles_create,
- TP_PROTO(struct cachefiles_object *obj,
- struct dentry *de, int ret),
+TRACE_EVENT(cachefiles_tmpfile,
+ TP_PROTO(struct cachefiles_object *obj, struct inode *backer),
- TP_ARGS(obj, de, ret),
+ TP_ARGS(obj, backer),
TP_STRUCT__entry(
- __field(unsigned int, obj )
- __field(struct dentry *, de )
- __field(int, ret )
+ __field(unsigned int, obj )
+ __field(unsigned int, backer )
),
TP_fast_assign(
- __entry->obj = obj->fscache.debug_id;
- __entry->de = de;
- __entry->ret = ret;
+ __entry->obj = obj->debug_id;
+ __entry->backer = backer->i_ino;
),
- TP_printk("o=%08x d=%p r=%u",
- __entry->obj, __entry->de, __entry->ret)
+ TP_printk("o=%08x B=%x",
+ __entry->obj,
+ __entry->backer)
+ );
+
+TRACE_EVENT(cachefiles_link,
+ TP_PROTO(struct cachefiles_object *obj, struct inode *backer),
+
+ TP_ARGS(obj, backer),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, obj )
+ __field(unsigned int, backer )
+ ),
+
+ TP_fast_assign(
+ __entry->obj = obj->debug_id;
+ __entry->backer = backer->i_ino;
+ ),
+
+ TP_printk("o=%08x B=%x",
+ __entry->obj,
+ __entry->backer)
);
TRACE_EVENT(cachefiles_unlink,
TP_PROTO(struct cachefiles_object *obj,
- struct dentry *de,
+ ino_t ino,
enum fscache_why_object_killed why),
- TP_ARGS(obj, de, why),
+ TP_ARGS(obj, ino, why),
/* Note that obj may be NULL */
TP_STRUCT__entry(
__field(unsigned int, obj )
- __field(struct dentry *, de )
+ __field(unsigned int, ino )
__field(enum fscache_why_object_killed, why )
),
TP_fast_assign(
- __entry->obj = obj ? obj->fscache.debug_id : UINT_MAX;
- __entry->de = de;
+ __entry->obj = obj ? obj->debug_id : UINT_MAX;
+ __entry->ino = ino;
__entry->why = why;
),
- TP_printk("o=%08x d=%p w=%s",
- __entry->obj, __entry->de,
+ TP_printk("o=%08x B=%x w=%s",
+ __entry->obj, __entry->ino,
__print_symbolic(__entry->why, cachefiles_obj_kill_traces))
);
TRACE_EVENT(cachefiles_rename,
TP_PROTO(struct cachefiles_object *obj,
- struct dentry *de,
- struct dentry *to,
+ ino_t ino,
enum fscache_why_object_killed why),
- TP_ARGS(obj, de, to, why),
+ TP_ARGS(obj, ino, why),
/* Note that obj may be NULL */
TP_STRUCT__entry(
__field(unsigned int, obj )
- __field(struct dentry *, de )
- __field(struct dentry *, to )
+ __field(unsigned int, ino )
__field(enum fscache_why_object_killed, why )
),
TP_fast_assign(
- __entry->obj = obj ? obj->fscache.debug_id : UINT_MAX;
- __entry->de = de;
- __entry->to = to;
+ __entry->obj = obj ? obj->debug_id : UINT_MAX;
+ __entry->ino = ino;
__entry->why = why;
),
- TP_printk("o=%08x d=%p t=%p w=%s",
- __entry->obj, __entry->de, __entry->to,
+ TP_printk("o=%08x B=%x w=%s",
+ __entry->obj, __entry->ino,
__print_symbolic(__entry->why, cachefiles_obj_kill_traces))
);
-TRACE_EVENT(cachefiles_mark_active,
+TRACE_EVENT(cachefiles_coherency,
TP_PROTO(struct cachefiles_object *obj,
- struct dentry *de),
+ ino_t ino,
+ enum cachefiles_content content,
+ enum cachefiles_coherency_trace why),
- TP_ARGS(obj, de),
+ TP_ARGS(obj, ino, content, why),
/* Note that obj may be NULL */
TP_STRUCT__entry(
- __field(unsigned int, obj )
- __field(struct dentry *, de )
+ __field(unsigned int, obj )
+ __field(enum cachefiles_coherency_trace, why )
+ __field(enum cachefiles_content, content )
+ __field(u64, ino )
+ ),
+
+ TP_fast_assign(
+ __entry->obj = obj->debug_id;
+ __entry->why = why;
+ __entry->content = content;
+ __entry->ino = ino;
+ ),
+
+ TP_printk("o=%08x %s B=%llx c=%u",
+ __entry->obj,
+ __print_symbolic(__entry->why, cachefiles_coherency_traces),
+ __entry->ino,
+ __entry->content)
+ );
+
+TRACE_EVENT(cachefiles_vol_coherency,
+ TP_PROTO(struct cachefiles_volume *volume,
+ ino_t ino,
+ enum cachefiles_coherency_trace why),
+
+ TP_ARGS(volume, ino, why),
+
+ /* Note that obj may be NULL */
+ TP_STRUCT__entry(
+ __field(unsigned int, vol )
+ __field(enum cachefiles_coherency_trace, why )
+ __field(u64, ino )
+ ),
+
+ TP_fast_assign(
+ __entry->vol = volume->vcookie->debug_id;
+ __entry->why = why;
+ __entry->ino = ino;
+ ),
+
+ TP_printk("V=%08x %s B=%llx",
+ __entry->vol,
+ __print_symbolic(__entry->why, cachefiles_coherency_traces),
+ __entry->ino)
+ );
+
+TRACE_EVENT(cachefiles_prep_read,
+ TP_PROTO(struct netfs_read_subrequest *sreq,
+ enum netfs_read_source source,
+ enum cachefiles_prepare_read_trace why,
+ ino_t cache_inode),
+
+ TP_ARGS(sreq, source, why, cache_inode),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, rreq )
+ __field(unsigned short, index )
+ __field(unsigned short, flags )
+ __field(enum netfs_read_source, source )
+ __field(enum cachefiles_prepare_read_trace, why )
+ __field(size_t, len )
+ __field(loff_t, start )
+ __field(unsigned int, netfs_inode )
+ __field(unsigned int, cache_inode )
),
TP_fast_assign(
- __entry->obj = obj->fscache.debug_id;
- __entry->de = de;
+ __entry->rreq = sreq->rreq->debug_id;
+ __entry->index = sreq->debug_index;
+ __entry->flags = sreq->flags;
+ __entry->source = source;
+ __entry->why = why;
+ __entry->len = sreq->len;
+ __entry->start = sreq->start;
+ __entry->netfs_inode = sreq->rreq->inode->i_ino;
+ __entry->cache_inode = cache_inode;
),
- TP_printk("o=%08x d=%p",
- __entry->obj, __entry->de)
+ TP_printk("R=%08x[%u] %s %s f=%02x s=%llx %zx ni=%x B=%x",
+ __entry->rreq, __entry->index,
+ __print_symbolic(__entry->source, netfs_sreq_sources),
+ __print_symbolic(__entry->why, cachefiles_prepare_read_traces),
+ __entry->flags,
+ __entry->start, __entry->len,
+ __entry->netfs_inode, __entry->cache_inode)
);
-TRACE_EVENT(cachefiles_wait_active,
+TRACE_EVENT(cachefiles_read,
TP_PROTO(struct cachefiles_object *obj,
- struct dentry *de,
- struct cachefiles_object *xobj),
+ struct inode *backer,
+ loff_t start,
+ size_t len),
- TP_ARGS(obj, de, xobj),
+ TP_ARGS(obj, backer, start, len),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, obj )
+ __field(unsigned int, backer )
+ __field(size_t, len )
+ __field(loff_t, start )
+ ),
+
+ TP_fast_assign(
+ __entry->obj = obj->debug_id;
+ __entry->backer = backer->i_ino;
+ __entry->start = start;
+ __entry->len = len;
+ ),
+
+ TP_printk("o=%08x B=%x s=%llx l=%zx",
+ __entry->obj,
+ __entry->backer,
+ __entry->start,
+ __entry->len)
+ );
+
+TRACE_EVENT(cachefiles_write,
+ TP_PROTO(struct cachefiles_object *obj,
+ struct inode *backer,
+ loff_t start,
+ size_t len),
+
+ TP_ARGS(obj, backer, start, len),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, obj )
+ __field(unsigned int, backer )
+ __field(size_t, len )
+ __field(loff_t, start )
+ ),
+
+ TP_fast_assign(
+ __entry->obj = obj->debug_id;
+ __entry->backer = backer->i_ino;
+ __entry->start = start;
+ __entry->len = len;
+ ),
+
+ TP_printk("o=%08x B=%x s=%llx l=%zx",
+ __entry->obj,
+ __entry->backer,
+ __entry->start,
+ __entry->len)
+ );
+
+TRACE_EVENT(cachefiles_trunc,
+ TP_PROTO(struct cachefiles_object *obj, struct inode *backer,
+ loff_t from, loff_t to, enum cachefiles_trunc_trace why),
+
+ TP_ARGS(obj, backer, from, to, why),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, obj )
+ __field(unsigned int, backer )
+ __field(enum cachefiles_trunc_trace, why )
+ __field(loff_t, from )
+ __field(loff_t, to )
+ ),
+
+ TP_fast_assign(
+ __entry->obj = obj->debug_id;
+ __entry->backer = backer->i_ino;
+ __entry->from = from;
+ __entry->to = to;
+ __entry->why = why;
+ ),
+
+ TP_printk("o=%08x B=%x %s l=%llx->%llx",
+ __entry->obj,
+ __entry->backer,
+ __print_symbolic(__entry->why, cachefiles_trunc_traces),
+ __entry->from,
+ __entry->to)
+ );
+
+TRACE_EVENT(cachefiles_mark_active,
+ TP_PROTO(struct cachefiles_object *obj,
+ struct inode *inode),
+
+ TP_ARGS(obj, inode),
/* Note that obj may be NULL */
TP_STRUCT__entry(
__field(unsigned int, obj )
- __field(unsigned int, xobj )
- __field(struct dentry *, de )
- __field(u16, flags )
- __field(u16, fsc_flags )
+ __field(ino_t, inode )
),
TP_fast_assign(
- __entry->obj = obj->fscache.debug_id;
- __entry->de = de;
- __entry->xobj = xobj->fscache.debug_id;
- __entry->flags = xobj->flags;
- __entry->fsc_flags = xobj->fscache.flags;
+ __entry->obj = obj ? obj->debug_id : 0;
+ __entry->inode = inode->i_ino;
),
- TP_printk("o=%08x d=%p wo=%08x wf=%x wff=%x",
- __entry->obj, __entry->de, __entry->xobj,
- __entry->flags, __entry->fsc_flags)
+ TP_printk("o=%08x B=%lx",
+ __entry->obj, __entry->inode)
);
-TRACE_EVENT(cachefiles_mark_inactive,
+TRACE_EVENT(cachefiles_mark_failed,
TP_PROTO(struct cachefiles_object *obj,
- struct dentry *de,
struct inode *inode),
- TP_ARGS(obj, de, inode),
+ TP_ARGS(obj, inode),
/* Note that obj may be NULL */
TP_STRUCT__entry(
__field(unsigned int, obj )
- __field(struct dentry *, de )
- __field(struct inode *, inode )
+ __field(ino_t, inode )
),
TP_fast_assign(
- __entry->obj = obj->fscache.debug_id;
- __entry->de = de;
- __entry->inode = inode;
+ __entry->obj = obj ? obj->debug_id : 0;
+ __entry->inode = inode->i_ino;
),
- TP_printk("o=%08x d=%p i=%p",
- __entry->obj, __entry->de, __entry->inode)
+ TP_printk("o=%08x B=%lx",
+ __entry->obj, __entry->inode)
);
-TRACE_EVENT(cachefiles_mark_buried,
+TRACE_EVENT(cachefiles_mark_inactive,
TP_PROTO(struct cachefiles_object *obj,
- struct dentry *de,
- enum fscache_why_object_killed why),
+ struct inode *inode),
- TP_ARGS(obj, de, why),
+ TP_ARGS(obj, inode),
/* Note that obj may be NULL */
TP_STRUCT__entry(
__field(unsigned int, obj )
- __field(struct dentry *, de )
- __field(enum fscache_why_object_killed, why )
+ __field(ino_t, inode )
),
TP_fast_assign(
- __entry->obj = obj ? obj->fscache.debug_id : UINT_MAX;
- __entry->de = de;
- __entry->why = why;
+ __entry->obj = obj ? obj->debug_id : 0;
+ __entry->inode = inode->i_ino;
),
- TP_printk("o=%08x d=%p w=%s",
- __entry->obj, __entry->de,
- __print_symbolic(__entry->why, cachefiles_obj_kill_traces))
+ TP_printk("o=%08x B=%lx",
+ __entry->obj, __entry->inode)
+ );
+
+TRACE_EVENT(cachefiles_vfs_error,
+ TP_PROTO(struct cachefiles_object *obj, struct inode *backer,
+ int error, enum cachefiles_error_trace where),
+
+ TP_ARGS(obj, backer, error, where),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, obj )
+ __field(unsigned int, backer )
+ __field(enum cachefiles_error_trace, where )
+ __field(short, error )
+ ),
+
+ TP_fast_assign(
+ __entry->obj = obj ? obj->debug_id : 0;
+ __entry->backer = backer->i_ino;
+ __entry->error = error;
+ __entry->where = where;
+ ),
+
+ TP_printk("o=%08x B=%x %s e=%d",
+ __entry->obj,
+ __entry->backer,
+ __print_symbolic(__entry->where, cachefiles_error_traces),
+ __entry->error)
+ );
+
+TRACE_EVENT(cachefiles_io_error,
+ TP_PROTO(struct cachefiles_object *obj, struct inode *backer,
+ int error, enum cachefiles_error_trace where),
+
+ TP_ARGS(obj, backer, error, where),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, obj )
+ __field(unsigned int, backer )
+ __field(enum cachefiles_error_trace, where )
+ __field(short, error )
+ ),
+
+ TP_fast_assign(
+ __entry->obj = obj ? obj->debug_id : 0;
+ __entry->backer = backer->i_ino;
+ __entry->error = error;
+ __entry->where = where;
+ ),
+
+ TP_printk("o=%08x B=%x %s e=%d",
+ __entry->obj,
+ __entry->backer,
+ __print_symbolic(__entry->where, cachefiles_error_traces),
+ __entry->error)
);
#endif /* _TRACE_CACHEFILES_H */
diff --git a/include/trace/events/compaction.h b/include/trace/events/compaction.h
index 54e5bf081171..7d48e7079e48 100644
--- a/include/trace/events/compaction.h
+++ b/include/trace/events/compaction.h
@@ -68,10 +68,9 @@ DEFINE_EVENT(mm_compaction_isolate_template, mm_compaction_isolate_freepages,
TRACE_EVENT(mm_compaction_migratepages,
TP_PROTO(unsigned long nr_all,
- int migrate_rc,
- struct list_head *migratepages),
+ unsigned int nr_succeeded),
- TP_ARGS(nr_all, migrate_rc, migratepages),
+ TP_ARGS(nr_all, nr_succeeded),
TP_STRUCT__entry(
__field(unsigned long, nr_migrated)
@@ -79,23 +78,8 @@ TRACE_EVENT(mm_compaction_migratepages,
),
TP_fast_assign(
- unsigned long nr_failed = 0;
- struct list_head *page_lru;
-
- /*
- * migrate_pages() returns either a non-negative number
- * with the number of pages that failed migration, or an
- * error code, in which case we need to count the remaining
- * pages manually
- */
- if (migrate_rc >= 0)
- nr_failed = migrate_rc;
- else
- list_for_each(page_lru, migratepages)
- nr_failed++;
-
- __entry->nr_migrated = nr_all - nr_failed;
- __entry->nr_failed = nr_failed;
+ __entry->nr_migrated = nr_succeeded;
+ __entry->nr_failed = nr_all - nr_succeeded;
),
TP_printk("nr_migrated=%lu nr_failed=%lu",
diff --git a/include/trace/events/damon.h b/include/trace/events/damon.h
index 2f422f4f1fb9..c79f1d4c39af 100644
--- a/include/trace/events/damon.h
+++ b/include/trace/events/damon.h
@@ -11,10 +11,10 @@
TRACE_EVENT(damon_aggregated,
- TP_PROTO(struct damon_target *t, struct damon_region *r,
- unsigned int nr_regions),
+ TP_PROTO(struct damon_target *t, unsigned int target_id,
+ struct damon_region *r, unsigned int nr_regions),
- TP_ARGS(t, r, nr_regions),
+ TP_ARGS(t, target_id, r, nr_regions),
TP_STRUCT__entry(
__field(unsigned long, target_id)
@@ -22,19 +22,22 @@ TRACE_EVENT(damon_aggregated,
__field(unsigned long, start)
__field(unsigned long, end)
__field(unsigned int, nr_accesses)
+ __field(unsigned int, age)
),
TP_fast_assign(
- __entry->target_id = t->id;
+ __entry->target_id = target_id;
__entry->nr_regions = nr_regions;
__entry->start = r->ar.start;
__entry->end = r->ar.end;
__entry->nr_accesses = r->nr_accesses;
+ __entry->age = r->age;
),
- TP_printk("target_id=%lu nr_regions=%u %lu-%lu: %u",
+ TP_printk("target_id=%lu nr_regions=%u %lu-%lu: %u %u",
__entry->target_id, __entry->nr_regions,
- __entry->start, __entry->end, __entry->nr_accesses)
+ __entry->start, __entry->end,
+ __entry->nr_accesses, __entry->age)
);
#endif /* _TRACE_DAMON_H */
diff --git a/include/trace/events/error_report.h b/include/trace/events/error_report.h
index 96f64bf218b2..a1922a800e6f 100644
--- a/include/trace/events/error_report.h
+++ b/include/trace/events/error_report.h
@@ -17,14 +17,16 @@
enum error_detector {
ERROR_DETECTOR_KFENCE,
- ERROR_DETECTOR_KASAN
+ ERROR_DETECTOR_KASAN,
+ ERROR_DETECTOR_WARN,
};
#endif /* __ERROR_REPORT_DECLARE_TRACE_ENUMS_ONCE_ONLY */
-#define error_detector_list \
+#define error_detector_list \
EM(ERROR_DETECTOR_KFENCE, "kfence") \
- EMe(ERROR_DETECTOR_KASAN, "kasan")
+ EM(ERROR_DETECTOR_KASAN, "kasan") \
+ EMe(ERROR_DETECTOR_WARN, "warning")
/* Always end the list with an EMe. */
#undef EM
diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
index f8cb916f3595..f701bb23f83c 100644
--- a/include/trace/events/f2fs.h
+++ b/include/trace/events/f2fs.h
@@ -540,17 +540,17 @@ TRACE_EVENT(f2fs_truncate_partial_nodes,
TRACE_EVENT(f2fs_file_write_iter,
- TP_PROTO(struct inode *inode, unsigned long offset,
- unsigned long length, int ret),
+ TP_PROTO(struct inode *inode, loff_t offset, size_t length,
+ ssize_t ret),
TP_ARGS(inode, offset, length, ret),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(ino_t, ino)
- __field(unsigned long, offset)
- __field(unsigned long, length)
- __field(int, ret)
+ __field(loff_t, offset)
+ __field(size_t, length)
+ __field(ssize_t, ret)
),
TP_fast_assign(
@@ -562,7 +562,7 @@ TRACE_EVENT(f2fs_file_write_iter,
),
TP_printk("dev = (%d,%d), ino = %lu, "
- "offset = %lu, length = %lu, written(err) = %d",
+ "offset = %lld, length = %zu, written(err) = %zd",
show_dev_ino(__entry),
__entry->offset,
__entry->length,
@@ -936,14 +936,14 @@ TRACE_EVENT(f2fs_fallocate,
TRACE_EVENT(f2fs_direct_IO_enter,
- TP_PROTO(struct inode *inode, loff_t offset, unsigned long len, int rw),
+ TP_PROTO(struct inode *inode, struct kiocb *iocb, long len, int rw),
- TP_ARGS(inode, offset, len, rw),
+ TP_ARGS(inode, iocb, len, rw),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(ino_t, ino)
- __field(loff_t, pos)
+ __field(struct kiocb *, iocb)
__field(unsigned long, len)
__field(int, rw)
),
@@ -951,15 +951,18 @@ TRACE_EVENT(f2fs_direct_IO_enter,
TP_fast_assign(
__entry->dev = inode->i_sb->s_dev;
__entry->ino = inode->i_ino;
- __entry->pos = offset;
+ __entry->iocb = iocb;
__entry->len = len;
__entry->rw = rw;
),
- TP_printk("dev = (%d,%d), ino = %lu pos = %lld len = %lu rw = %d",
+ TP_printk("dev = (%d,%d), ino = %lu pos = %lld len = %lu ki_flags = %x ki_hint = %x ki_ioprio = %x rw = %d",
show_dev_ino(__entry),
- __entry->pos,
+ __entry->iocb->ki_pos,
__entry->len,
+ __entry->iocb->ki_flags,
+ __entry->iocb->ki_hint,
+ __entry->iocb->ki_ioprio,
__entry->rw)
);
diff --git a/include/trace/events/filemap.h b/include/trace/events/filemap.h
index c47b63db124e..46c89c1e460c 100644
--- a/include/trace/events/filemap.h
+++ b/include/trace/events/filemap.h
@@ -15,43 +15,45 @@
DECLARE_EVENT_CLASS(mm_filemap_op_page_cache,
- TP_PROTO(struct page *page),
+ TP_PROTO(struct folio *folio),
- TP_ARGS(page),
+ TP_ARGS(folio),
TP_STRUCT__entry(
__field(unsigned long, pfn)
__field(unsigned long, i_ino)
__field(unsigned long, index)
__field(dev_t, s_dev)
+ __field(unsigned char, order)
),
TP_fast_assign(
- __entry->pfn = page_to_pfn(page);
- __entry->i_ino = page->mapping->host->i_ino;
- __entry->index = page->index;
- if (page->mapping->host->i_sb)
- __entry->s_dev = page->mapping->host->i_sb->s_dev;
+ __entry->pfn = folio_pfn(folio);
+ __entry->i_ino = folio->mapping->host->i_ino;
+ __entry->index = folio->index;
+ if (folio->mapping->host->i_sb)
+ __entry->s_dev = folio->mapping->host->i_sb->s_dev;
else
- __entry->s_dev = page->mapping->host->i_rdev;
+ __entry->s_dev = folio->mapping->host->i_rdev;
+ __entry->order = folio_order(folio);
),
- TP_printk("dev %d:%d ino %lx page=%p pfn=0x%lx ofs=%lu",
+ TP_printk("dev %d:%d ino %lx pfn=0x%lx ofs=%lu order=%u",
MAJOR(__entry->s_dev), MINOR(__entry->s_dev),
__entry->i_ino,
- pfn_to_page(__entry->pfn),
__entry->pfn,
- __entry->index << PAGE_SHIFT)
+ __entry->index << PAGE_SHIFT,
+ __entry->order)
);
DEFINE_EVENT(mm_filemap_op_page_cache, mm_filemap_delete_from_page_cache,
- TP_PROTO(struct page *page),
- TP_ARGS(page)
+ TP_PROTO(struct folio *folio),
+ TP_ARGS(folio)
);
DEFINE_EVENT(mm_filemap_op_page_cache, mm_filemap_add_to_page_cache,
- TP_PROTO(struct page *page),
- TP_ARGS(page)
+ TP_PROTO(struct folio *folio),
+ TP_ARGS(folio)
);
TRACE_EVENT(filemap_set_wb_err,
diff --git a/include/trace/events/fscache.h b/include/trace/events/fscache.h
index 446392f5ba83..cb3fb337e880 100644
--- a/include/trace/events/fscache.h
+++ b/include/trace/events/fscache.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/* FS-Cache tracepoints
*
- * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved.
+ * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*/
#undef TRACE_SYSTEM
@@ -19,65 +19,83 @@
#ifndef __FSCACHE_DECLARE_TRACE_ENUMS_ONCE_ONLY
#define __FSCACHE_DECLARE_TRACE_ENUMS_ONCE_ONLY
+enum fscache_cache_trace {
+ fscache_cache_collision,
+ fscache_cache_get_acquire,
+ fscache_cache_new_acquire,
+ fscache_cache_put_alloc_volume,
+ fscache_cache_put_cache,
+ fscache_cache_put_prep_failed,
+ fscache_cache_put_relinquish,
+ fscache_cache_put_volume,
+};
+
+enum fscache_volume_trace {
+ fscache_volume_collision,
+ fscache_volume_get_cookie,
+ fscache_volume_get_create_work,
+ fscache_volume_get_hash_collision,
+ fscache_volume_free,
+ fscache_volume_new_acquire,
+ fscache_volume_put_cookie,
+ fscache_volume_put_create_work,
+ fscache_volume_put_hash_collision,
+ fscache_volume_put_relinquish,
+ fscache_volume_see_create_work,
+ fscache_volume_see_hash_wake,
+ fscache_volume_wait_create_work,
+};
+
enum fscache_cookie_trace {
fscache_cookie_collision,
fscache_cookie_discard,
- fscache_cookie_get_acquire_parent,
fscache_cookie_get_attach_object,
- fscache_cookie_get_reacquire,
- fscache_cookie_get_register_netfs,
- fscache_cookie_put_acquire_nobufs,
- fscache_cookie_put_dup_netfs,
- fscache_cookie_put_relinquish,
+ fscache_cookie_get_end_access,
+ fscache_cookie_get_hash_collision,
+ fscache_cookie_get_inval_work,
+ fscache_cookie_get_lru,
+ fscache_cookie_get_use_work,
+ fscache_cookie_new_acquire,
+ fscache_cookie_put_hash_collision,
+ fscache_cookie_put_lru,
fscache_cookie_put_object,
- fscache_cookie_put_parent,
-};
-
-enum fscache_page_trace {
- fscache_page_cached,
- fscache_page_inval,
- fscache_page_maybe_release,
- fscache_page_radix_clear_store,
- fscache_page_radix_delete,
- fscache_page_radix_insert,
- fscache_page_radix_pend2store,
- fscache_page_radix_set_pend,
- fscache_page_uncache,
- fscache_page_write,
- fscache_page_write_end,
- fscache_page_write_end_pend,
- fscache_page_write_end_noc,
- fscache_page_write_wait,
- fscache_page_trace__nr
+ fscache_cookie_put_over_queued,
+ fscache_cookie_put_relinquish,
+ fscache_cookie_put_withdrawn,
+ fscache_cookie_put_work,
+ fscache_cookie_see_active,
+ fscache_cookie_see_lru_discard,
+ fscache_cookie_see_lru_do_one,
+ fscache_cookie_see_relinquish,
+ fscache_cookie_see_withdraw,
+ fscache_cookie_see_work,
};
-enum fscache_op_trace {
- fscache_op_cancel,
- fscache_op_cancel_all,
- fscache_op_cancelled,
- fscache_op_completed,
- fscache_op_enqueue_async,
- fscache_op_enqueue_mythread,
- fscache_op_gc,
- fscache_op_init,
- fscache_op_put,
- fscache_op_run,
- fscache_op_signal,
- fscache_op_submit,
- fscache_op_submit_ex,
- fscache_op_work,
- fscache_op_trace__nr
+enum fscache_active_trace {
+ fscache_active_use,
+ fscache_active_use_modify,
+ fscache_active_unuse,
};
-enum fscache_page_op_trace {
- fscache_page_op_alloc_one,
- fscache_page_op_attr_changed,
- fscache_page_op_check_consistency,
- fscache_page_op_invalidate,
- fscache_page_op_retr_multi,
- fscache_page_op_retr_one,
- fscache_page_op_write_one,
- fscache_page_op_trace__nr
+enum fscache_access_trace {
+ fscache_access_acquire_volume,
+ fscache_access_acquire_volume_end,
+ fscache_access_cache_pin,
+ fscache_access_cache_unpin,
+ fscache_access_invalidate_cookie,
+ fscache_access_invalidate_cookie_end,
+ fscache_access_io_end,
+ fscache_access_io_not_live,
+ fscache_access_io_read,
+ fscache_access_io_resize,
+ fscache_access_io_wait,
+ fscache_access_io_write,
+ fscache_access_lookup_cookie,
+ fscache_access_lookup_cookie_end,
+ fscache_access_lookup_cookie_end_failed,
+ fscache_access_relinquish_volume,
+ fscache_access_relinquish_volume_end,
+ fscache_access_unlive,
};
#endif
@@ -85,59 +103,79 @@ enum fscache_page_op_trace {
/*
* Declare tracing information enums and their string mappings for display.
*/
+#define fscache_cache_traces \
+ EM(fscache_cache_collision, "*COLLIDE*") \
+ EM(fscache_cache_get_acquire, "GET acq ") \
+ EM(fscache_cache_new_acquire, "NEW acq ") \
+ EM(fscache_cache_put_alloc_volume, "PUT alvol") \
+ EM(fscache_cache_put_cache, "PUT cache") \
+ EM(fscache_cache_put_prep_failed, "PUT pfail") \
+ EM(fscache_cache_put_relinquish, "PUT relnq") \
+ E_(fscache_cache_put_volume, "PUT vol ")
+
+#define fscache_volume_traces \
+ EM(fscache_volume_collision, "*COLLIDE*") \
+ EM(fscache_volume_get_cookie, "GET cook ") \
+ EM(fscache_volume_get_create_work, "GET creat") \
+ EM(fscache_volume_get_hash_collision, "GET hcoll") \
+ EM(fscache_volume_free, "FREE ") \
+ EM(fscache_volume_new_acquire, "NEW acq ") \
+ EM(fscache_volume_put_cookie, "PUT cook ") \
+ EM(fscache_volume_put_create_work, "PUT creat") \
+ EM(fscache_volume_put_hash_collision, "PUT hcoll") \
+ EM(fscache_volume_put_relinquish, "PUT relnq") \
+ EM(fscache_volume_see_create_work, "SEE creat") \
+ EM(fscache_volume_see_hash_wake, "SEE hwake") \
+ E_(fscache_volume_wait_create_work, "WAIT crea")
+
#define fscache_cookie_traces \
- EM(fscache_cookie_collision, "*COLLISION*") \
- EM(fscache_cookie_discard, "DISCARD") \
- EM(fscache_cookie_get_acquire_parent, "GET prn") \
- EM(fscache_cookie_get_attach_object, "GET obj") \
- EM(fscache_cookie_get_reacquire, "GET raq") \
- EM(fscache_cookie_get_register_netfs, "GET net") \
- EM(fscache_cookie_put_acquire_nobufs, "PUT nbf") \
- EM(fscache_cookie_put_dup_netfs, "PUT dnt") \
- EM(fscache_cookie_put_relinquish, "PUT rlq") \
- EM(fscache_cookie_put_object, "PUT obj") \
- E_(fscache_cookie_put_parent, "PUT prn")
-
-#define fscache_page_traces \
- EM(fscache_page_cached, "Cached ") \
- EM(fscache_page_inval, "InvalPg") \
- EM(fscache_page_maybe_release, "MayRels") \
- EM(fscache_page_uncache, "Uncache") \
- EM(fscache_page_radix_clear_store, "RxCStr ") \
- EM(fscache_page_radix_delete, "RxDel ") \
- EM(fscache_page_radix_insert, "RxIns ") \
- EM(fscache_page_radix_pend2store, "RxP2S ") \
- EM(fscache_page_radix_set_pend, "RxSPend ") \
- EM(fscache_page_write, "WritePg") \
- EM(fscache_page_write_end, "EndPgWr") \
- EM(fscache_page_write_end_pend, "EndPgWP") \
- EM(fscache_page_write_end_noc, "EndPgNC") \
- E_(fscache_page_write_wait, "WtOnWrt")
-
-#define fscache_op_traces \
- EM(fscache_op_cancel, "Cancel1") \
- EM(fscache_op_cancel_all, "CancelA") \
- EM(fscache_op_cancelled, "Canclld") \
- EM(fscache_op_completed, "Complet") \
- EM(fscache_op_enqueue_async, "EnqAsyn") \
- EM(fscache_op_enqueue_mythread, "EnqMyTh") \
- EM(fscache_op_gc, "GC ") \
- EM(fscache_op_init, "Init ") \
- EM(fscache_op_put, "Put ") \
- EM(fscache_op_run, "Run ") \
- EM(fscache_op_signal, "Signal ") \
- EM(fscache_op_submit, "Submit ") \
- EM(fscache_op_submit_ex, "SubmitX") \
- E_(fscache_op_work, "Work ")
-
-#define fscache_page_op_traces \
- EM(fscache_page_op_alloc_one, "Alloc1 ") \
- EM(fscache_page_op_attr_changed, "AttrChg") \
- EM(fscache_page_op_check_consistency, "CheckCn") \
- EM(fscache_page_op_invalidate, "Inval ") \
- EM(fscache_page_op_retr_multi, "RetrMul") \
- EM(fscache_page_op_retr_one, "Retr1 ") \
- E_(fscache_page_op_write_one, "Write1 ")
+ EM(fscache_cookie_collision, "*COLLIDE*") \
+ EM(fscache_cookie_discard, "DISCARD ") \
+ EM(fscache_cookie_get_attach_object, "GET attch") \
+ EM(fscache_cookie_get_hash_collision, "GET hcoll") \
+ EM(fscache_cookie_get_end_access, "GQ endac") \
+ EM(fscache_cookie_get_inval_work, "GQ inval") \
+ EM(fscache_cookie_get_lru, "GET lru ") \
+ EM(fscache_cookie_get_use_work, "GQ use ") \
+ EM(fscache_cookie_new_acquire, "NEW acq ") \
+ EM(fscache_cookie_put_hash_collision, "PUT hcoll") \
+ EM(fscache_cookie_put_lru, "PUT lru ") \
+ EM(fscache_cookie_put_object, "PUT obj ") \
+ EM(fscache_cookie_put_over_queued, "PQ overq") \
+ EM(fscache_cookie_put_relinquish, "PUT relnq") \
+ EM(fscache_cookie_put_withdrawn, "PUT wthdn") \
+ EM(fscache_cookie_put_work, "PQ work ") \
+ EM(fscache_cookie_see_active, "- activ") \
+ EM(fscache_cookie_see_lru_discard, "- x-lru") \
+ EM(fscache_cookie_see_lru_do_one, "- lrudo") \
+ EM(fscache_cookie_see_relinquish, "- x-rlq") \
+ EM(fscache_cookie_see_withdraw, "- x-wth") \
+ E_(fscache_cookie_see_work, "- work ")
+
+#define fscache_active_traces \
+ EM(fscache_active_use, "USE ") \
+ EM(fscache_active_use_modify, "USE-m ") \
+ E_(fscache_active_unuse, "UNUSE ")
+
+#define fscache_access_traces \
+ EM(fscache_access_acquire_volume, "BEGIN acq_vol") \
+ EM(fscache_access_acquire_volume_end, "END acq_vol") \
+ EM(fscache_access_cache_pin, "PIN cache ") \
+ EM(fscache_access_cache_unpin, "UNPIN cache ") \
+ EM(fscache_access_invalidate_cookie, "BEGIN inval ") \
+ EM(fscache_access_invalidate_cookie_end,"END inval ") \
+ EM(fscache_access_io_end, "END io ") \
+ EM(fscache_access_io_not_live, "END io_notl") \
+ EM(fscache_access_io_read, "BEGIN io_read") \
+ EM(fscache_access_io_resize, "BEGIN io_resz") \
+ EM(fscache_access_io_wait, "WAIT io ") \
+ EM(fscache_access_io_write, "BEGIN io_writ") \
+ EM(fscache_access_lookup_cookie, "BEGIN lookup ") \
+ EM(fscache_access_lookup_cookie_end, "END lookup ") \
+ EM(fscache_access_lookup_cookie_end_failed,"END lookupf") \
+ EM(fscache_access_relinquish_volume, "BEGIN rlq_vol") \
+ EM(fscache_access_relinquish_volume_end,"END rlq_vol") \
+ E_(fscache_access_unlive, "END unlive ")
/*
* Export enum symbols via userspace.
@@ -147,7 +185,10 @@ enum fscache_page_op_trace {
#define EM(a, b) TRACE_DEFINE_ENUM(a);
#define E_(a, b) TRACE_DEFINE_ENUM(a);
+fscache_cache_traces;
+fscache_volume_traces;
fscache_cookie_traces;
+fscache_access_traces;
/*
* Now redefine the EM() and E_() macros to map the enums to the strings that
@@ -159,362 +200,297 @@ fscache_cookie_traces;
#define E_(a, b) { a, b }
-TRACE_EVENT(fscache_cookie,
- TP_PROTO(unsigned int cookie_debug_id,
- int ref,
- enum fscache_cookie_trace where),
+TRACE_EVENT(fscache_cache,
+ TP_PROTO(unsigned int cache_debug_id,
+ int usage,
+ enum fscache_cache_trace where),
- TP_ARGS(cookie_debug_id, ref, where),
+ TP_ARGS(cache_debug_id, usage, where),
TP_STRUCT__entry(
- __field(unsigned int, cookie )
- __field(enum fscache_cookie_trace, where )
- __field(int, ref )
+ __field(unsigned int, cache )
+ __field(int, usage )
+ __field(enum fscache_cache_trace, where )
),
TP_fast_assign(
- __entry->cookie = cookie_debug_id;
+ __entry->cache = cache_debug_id;
+ __entry->usage = usage;
__entry->where = where;
- __entry->ref = ref;
),
- TP_printk("%s c=%08x r=%d",
- __print_symbolic(__entry->where, fscache_cookie_traces),
- __entry->cookie, __entry->ref)
+ TP_printk("C=%08x %s r=%d",
+ __entry->cache,
+ __print_symbolic(__entry->where, fscache_cache_traces),
+ __entry->usage)
);
-TRACE_EVENT(fscache_netfs,
- TP_PROTO(struct fscache_netfs *netfs),
+TRACE_EVENT(fscache_volume,
+ TP_PROTO(unsigned int volume_debug_id,
+ int usage,
+ enum fscache_volume_trace where),
- TP_ARGS(netfs),
-
- TP_STRUCT__entry(
- __field(unsigned int, cookie )
- __array(char, name, 8 )
- ),
-
- TP_fast_assign(
- __entry->cookie = netfs->primary_index->debug_id;
- strncpy(__entry->name, netfs->name, 8);
- __entry->name[7] = 0;
- ),
-
- TP_printk("c=%08x n=%s",
- __entry->cookie, __entry->name)
- );
-
-TRACE_EVENT(fscache_acquire,
- TP_PROTO(struct fscache_cookie *cookie),
-
- TP_ARGS(cookie),
+ TP_ARGS(volume_debug_id, usage, where),
TP_STRUCT__entry(
- __field(unsigned int, cookie )
- __field(unsigned int, parent )
- __array(char, name, 8 )
- __field(int, p_ref )
- __field(int, p_n_children )
- __field(u8, p_flags )
+ __field(unsigned int, volume )
+ __field(int, usage )
+ __field(enum fscache_volume_trace, where )
),
TP_fast_assign(
- __entry->cookie = cookie->debug_id;
- __entry->parent = cookie->parent->debug_id;
- __entry->p_ref = refcount_read(&cookie->parent->ref);
- __entry->p_n_children = atomic_read(&cookie->parent->n_children);
- __entry->p_flags = cookie->parent->flags;
- memcpy(__entry->name, cookie->def->name, 8);
- __entry->name[7] = 0;
+ __entry->volume = volume_debug_id;
+ __entry->usage = usage;
+ __entry->where = where;
),
- TP_printk("c=%08x p=%08x pr=%d pc=%d pf=%02x n=%s",
- __entry->cookie, __entry->parent, __entry->p_ref,
- __entry->p_n_children, __entry->p_flags, __entry->name)
+ TP_printk("V=%08x %s u=%d",
+ __entry->volume,
+ __print_symbolic(__entry->where, fscache_volume_traces),
+ __entry->usage)
);
-TRACE_EVENT(fscache_relinquish,
- TP_PROTO(struct fscache_cookie *cookie, bool retire),
+TRACE_EVENT(fscache_cookie,
+ TP_PROTO(unsigned int cookie_debug_id,
+ int ref,
+ enum fscache_cookie_trace where),
- TP_ARGS(cookie, retire),
+ TP_ARGS(cookie_debug_id, ref, where),
TP_STRUCT__entry(
__field(unsigned int, cookie )
- __field(unsigned int, parent )
__field(int, ref )
- __field(int, n_children )
- __field(int, n_active )
- __field(u8, flags )
- __field(bool, retire )
+ __field(enum fscache_cookie_trace, where )
),
TP_fast_assign(
- __entry->cookie = cookie->debug_id;
- __entry->parent = cookie->parent->debug_id;
- __entry->ref = refcount_read(&cookie->ref);
- __entry->n_children = atomic_read(&cookie->n_children);
- __entry->n_active = atomic_read(&cookie->n_active);
- __entry->flags = cookie->flags;
- __entry->retire = retire;
+ __entry->cookie = cookie_debug_id;
+ __entry->ref = ref;
+ __entry->where = where;
),
- TP_printk("c=%08x r=%d p=%08x Nc=%d Na=%d f=%02x r=%u",
- __entry->cookie, __entry->ref,
- __entry->parent, __entry->n_children, __entry->n_active,
- __entry->flags, __entry->retire)
+ TP_printk("c=%08x %s r=%d",
+ __entry->cookie,
+ __print_symbolic(__entry->where, fscache_cookie_traces),
+ __entry->ref)
);
-TRACE_EVENT(fscache_enable,
- TP_PROTO(struct fscache_cookie *cookie),
+TRACE_EVENT(fscache_active,
+ TP_PROTO(unsigned int cookie_debug_id,
+ int ref,
+ int n_active,
+ int n_accesses,
+ enum fscache_active_trace why),
- TP_ARGS(cookie),
+ TP_ARGS(cookie_debug_id, ref, n_active, n_accesses, why),
TP_STRUCT__entry(
__field(unsigned int, cookie )
__field(int, ref )
- __field(int, n_children )
__field(int, n_active )
- __field(u8, flags )
+ __field(int, n_accesses )
+ __field(enum fscache_active_trace, why )
),
TP_fast_assign(
- __entry->cookie = cookie->debug_id;
- __entry->ref = refcount_read(&cookie->ref);
- __entry->n_children = atomic_read(&cookie->n_children);
- __entry->n_active = atomic_read(&cookie->n_active);
- __entry->flags = cookie->flags;
+ __entry->cookie = cookie_debug_id;
+ __entry->ref = ref;
+ __entry->n_active = n_active;
+ __entry->n_accesses = n_accesses;
+ __entry->why = why;
),
- TP_printk("c=%08x r=%d Nc=%d Na=%d f=%02x",
- __entry->cookie, __entry->ref,
- __entry->n_children, __entry->n_active, __entry->flags)
+ TP_printk("c=%08x %s r=%d a=%d c=%d",
+ __entry->cookie,
+ __print_symbolic(__entry->why, fscache_active_traces),
+ __entry->ref,
+ __entry->n_accesses,
+ __entry->n_active)
);
-TRACE_EVENT(fscache_disable,
- TP_PROTO(struct fscache_cookie *cookie),
+TRACE_EVENT(fscache_access_cache,
+ TP_PROTO(unsigned int cache_debug_id,
+ int ref,
+ int n_accesses,
+ enum fscache_access_trace why),
- TP_ARGS(cookie),
+ TP_ARGS(cache_debug_id, ref, n_accesses, why),
TP_STRUCT__entry(
- __field(unsigned int, cookie )
+ __field(unsigned int, cache )
__field(int, ref )
- __field(int, n_children )
- __field(int, n_active )
- __field(u8, flags )
+ __field(int, n_accesses )
+ __field(enum fscache_access_trace, why )
),
TP_fast_assign(
- __entry->cookie = cookie->debug_id;
- __entry->ref = refcount_read(&cookie->ref);
- __entry->n_children = atomic_read(&cookie->n_children);
- __entry->n_active = atomic_read(&cookie->n_active);
- __entry->flags = cookie->flags;
+ __entry->cache = cache_debug_id;
+ __entry->ref = ref;
+ __entry->n_accesses = n_accesses;
+ __entry->why = why;
),
- TP_printk("c=%08x r=%d Nc=%d Na=%d f=%02x",
- __entry->cookie, __entry->ref,
- __entry->n_children, __entry->n_active, __entry->flags)
+ TP_printk("C=%08x %s r=%d a=%d",
+ __entry->cache,
+ __print_symbolic(__entry->why, fscache_access_traces),
+ __entry->ref,
+ __entry->n_accesses)
);
-TRACE_EVENT(fscache_osm,
- TP_PROTO(struct fscache_object *object,
- const struct fscache_state *state,
- bool wait, bool oob, s8 event_num),
+TRACE_EVENT(fscache_access_volume,
+ TP_PROTO(unsigned int volume_debug_id,
+ unsigned int cookie_debug_id,
+ int ref,
+ int n_accesses,
+ enum fscache_access_trace why),
- TP_ARGS(object, state, wait, oob, event_num),
+ TP_ARGS(volume_debug_id, cookie_debug_id, ref, n_accesses, why),
TP_STRUCT__entry(
+ __field(unsigned int, volume )
__field(unsigned int, cookie )
- __field(unsigned int, object )
- __array(char, state, 8 )
- __field(bool, wait )
- __field(bool, oob )
- __field(s8, event_num )
+ __field(int, ref )
+ __field(int, n_accesses )
+ __field(enum fscache_access_trace, why )
),
TP_fast_assign(
- __entry->cookie = object->cookie->debug_id;
- __entry->object = object->debug_id;
- __entry->wait = wait;
- __entry->oob = oob;
- __entry->event_num = event_num;
- memcpy(__entry->state, state->short_name, 8);
+ __entry->volume = volume_debug_id;
+ __entry->cookie = cookie_debug_id;
+ __entry->ref = ref;
+ __entry->n_accesses = n_accesses;
+ __entry->why = why;
),
- TP_printk("c=%08x o=%08d %s %s%sev=%d",
+ TP_printk("V=%08x c=%08x %s r=%d a=%d",
+ __entry->volume,
__entry->cookie,
- __entry->object,
- __entry->state,
- __print_symbolic(__entry->wait,
- { true, "WAIT" },
- { false, "WORK" }),
- __print_symbolic(__entry->oob,
- { true, " OOB " },
- { false, " " }),
- __entry->event_num)
+ __print_symbolic(__entry->why, fscache_access_traces),
+ __entry->ref,
+ __entry->n_accesses)
);
-TRACE_EVENT(fscache_page,
- TP_PROTO(struct fscache_cookie *cookie, struct page *page,
- enum fscache_page_trace why),
+TRACE_EVENT(fscache_access,
+ TP_PROTO(unsigned int cookie_debug_id,
+ int ref,
+ int n_accesses,
+ enum fscache_access_trace why),
- TP_ARGS(cookie, page, why),
+ TP_ARGS(cookie_debug_id, ref, n_accesses, why),
TP_STRUCT__entry(
__field(unsigned int, cookie )
- __field(pgoff_t, page )
- __field(enum fscache_page_trace, why )
+ __field(int, ref )
+ __field(int, n_accesses )
+ __field(enum fscache_access_trace, why )
),
TP_fast_assign(
- __entry->cookie = cookie->debug_id;
- __entry->page = page->index;
- __entry->why = why;
+ __entry->cookie = cookie_debug_id;
+ __entry->ref = ref;
+ __entry->n_accesses = n_accesses;
+ __entry->why = why;
),
- TP_printk("c=%08x %s pg=%lx",
+ TP_printk("c=%08x %s r=%d a=%d",
__entry->cookie,
- __print_symbolic(__entry->why, fscache_page_traces),
- __entry->page)
- );
-
-TRACE_EVENT(fscache_check_page,
- TP_PROTO(struct fscache_cookie *cookie, struct page *page,
- void *val, int n),
-
- TP_ARGS(cookie, page, val, n),
-
- TP_STRUCT__entry(
- __field(unsigned int, cookie )
- __field(void *, page )
- __field(void *, val )
- __field(int, n )
- ),
-
- TP_fast_assign(
- __entry->cookie = cookie->debug_id;
- __entry->page = page;
- __entry->val = val;
- __entry->n = n;
- ),
-
- TP_printk("c=%08x pg=%p val=%p n=%d",
- __entry->cookie, __entry->page, __entry->val, __entry->n)
+ __print_symbolic(__entry->why, fscache_access_traces),
+ __entry->ref,
+ __entry->n_accesses)
);
-TRACE_EVENT(fscache_wake_cookie,
+TRACE_EVENT(fscache_acquire,
TP_PROTO(struct fscache_cookie *cookie),
TP_ARGS(cookie),
TP_STRUCT__entry(
__field(unsigned int, cookie )
+ __field(unsigned int, volume )
+ __field(int, v_ref )
+ __field(int, v_n_cookies )
),
TP_fast_assign(
__entry->cookie = cookie->debug_id;
+ __entry->volume = cookie->volume->debug_id;
+ __entry->v_ref = refcount_read(&cookie->volume->ref);
+ __entry->v_n_cookies = atomic_read(&cookie->volume->n_cookies);
),
- TP_printk("c=%08x", __entry->cookie)
- );
-
-TRACE_EVENT(fscache_op,
- TP_PROTO(struct fscache_cookie *cookie, struct fscache_operation *op,
- enum fscache_op_trace why),
-
- TP_ARGS(cookie, op, why),
-
- TP_STRUCT__entry(
- __field(unsigned int, cookie )
- __field(unsigned int, op )
- __field(enum fscache_op_trace, why )
- ),
-
- TP_fast_assign(
- __entry->cookie = cookie ? cookie->debug_id : 0;
- __entry->op = op->debug_id;
- __entry->why = why;
- ),
-
- TP_printk("c=%08x op=%08x %s",
- __entry->cookie, __entry->op,
- __print_symbolic(__entry->why, fscache_op_traces))
+ TP_printk("c=%08x V=%08x vr=%d vc=%d",
+ __entry->cookie,
+ __entry->volume, __entry->v_ref, __entry->v_n_cookies)
);
-TRACE_EVENT(fscache_page_op,
- TP_PROTO(struct fscache_cookie *cookie, struct page *page,
- struct fscache_operation *op, enum fscache_page_op_trace what),
+TRACE_EVENT(fscache_relinquish,
+ TP_PROTO(struct fscache_cookie *cookie, bool retire),
- TP_ARGS(cookie, page, op, what),
+ TP_ARGS(cookie, retire),
TP_STRUCT__entry(
__field(unsigned int, cookie )
- __field(unsigned int, op )
- __field(pgoff_t, page )
- __field(enum fscache_page_op_trace, what )
+ __field(unsigned int, volume )
+ __field(int, ref )
+ __field(int, n_active )
+ __field(u8, flags )
+ __field(bool, retire )
),
TP_fast_assign(
- __entry->cookie = cookie->debug_id;
- __entry->page = page ? page->index : 0;
- __entry->op = op->debug_id;
- __entry->what = what;
+ __entry->cookie = cookie->debug_id;
+ __entry->volume = cookie->volume->debug_id;
+ __entry->ref = refcount_read(&cookie->ref);
+ __entry->n_active = atomic_read(&cookie->n_active);
+ __entry->flags = cookie->flags;
+ __entry->retire = retire;
),
- TP_printk("c=%08x %s pg=%lx op=%08x",
- __entry->cookie,
- __print_symbolic(__entry->what, fscache_page_op_traces),
- __entry->page, __entry->op)
+ TP_printk("c=%08x V=%08x r=%d U=%d f=%02x rt=%u",
+ __entry->cookie, __entry->volume, __entry->ref,
+ __entry->n_active, __entry->flags, __entry->retire)
);
-TRACE_EVENT(fscache_wrote_page,
- TP_PROTO(struct fscache_cookie *cookie, struct page *page,
- struct fscache_operation *op, int ret),
+TRACE_EVENT(fscache_invalidate,
+ TP_PROTO(struct fscache_cookie *cookie, loff_t new_size),
- TP_ARGS(cookie, page, op, ret),
+ TP_ARGS(cookie, new_size),
TP_STRUCT__entry(
__field(unsigned int, cookie )
- __field(unsigned int, op )
- __field(pgoff_t, page )
- __field(int, ret )
+ __field(loff_t, new_size )
),
TP_fast_assign(
- __entry->cookie = cookie->debug_id;
- __entry->page = page->index;
- __entry->op = op->debug_id;
- __entry->ret = ret;
+ __entry->cookie = cookie->debug_id;
+ __entry->new_size = new_size;
),
- TP_printk("c=%08x pg=%lx op=%08x ret=%d",
- __entry->cookie, __entry->page, __entry->op, __entry->ret)
+ TP_printk("c=%08x sz=%llx",
+ __entry->cookie, __entry->new_size)
);
-TRACE_EVENT(fscache_gang_lookup,
- TP_PROTO(struct fscache_cookie *cookie, struct fscache_operation *op,
- void **results, int n, pgoff_t store_limit),
+TRACE_EVENT(fscache_resize,
+ TP_PROTO(struct fscache_cookie *cookie, loff_t new_size),
- TP_ARGS(cookie, op, results, n, store_limit),
+ TP_ARGS(cookie, new_size),
TP_STRUCT__entry(
__field(unsigned int, cookie )
- __field(unsigned int, op )
- __field(pgoff_t, results0 )
- __field(int, n )
- __field(pgoff_t, store_limit )
+ __field(loff_t, old_size )
+ __field(loff_t, new_size )
),
TP_fast_assign(
- __entry->cookie = cookie->debug_id;
- __entry->op = op->debug_id;
- __entry->results0 = results[0] ? ((struct page *)results[0])->index : (pgoff_t)-1;
- __entry->n = n;
- __entry->store_limit = store_limit;
+ __entry->cookie = cookie->debug_id;
+ __entry->old_size = cookie->object_size;
+ __entry->new_size = new_size;
),
- TP_printk("c=%08x op=%08x r0=%lx n=%d sl=%lx",
- __entry->cookie, __entry->op, __entry->results0, __entry->n,
- __entry->store_limit)
+ TP_printk("c=%08x os=%08llx sz=%08llx",
+ __entry->cookie,
+ __entry->old_size,
+ __entry->new_size)
);
#endif /* _TRACE_FSCACHE_H */
diff --git a/include/trace/events/iommu.h b/include/trace/events/iommu.h
index 72b4582322ff..29096fe12623 100644
--- a/include/trace/events/iommu.h
+++ b/include/trace/events/iommu.h
@@ -101,8 +101,9 @@ TRACE_EVENT(map,
__entry->size = size;
),
- TP_printk("IOMMU: iova=0x%016llx paddr=0x%016llx size=%zu",
- __entry->iova, __entry->paddr, __entry->size
+ TP_printk("IOMMU: iova=0x%016llx - 0x%016llx paddr=0x%016llx size=%zu",
+ __entry->iova, __entry->iova + __entry->size, __entry->paddr,
+ __entry->size
)
);
@@ -124,8 +125,9 @@ TRACE_EVENT(unmap,
__entry->unmapped_size = unmapped_size;
),
- TP_printk("IOMMU: iova=0x%016llx size=%zu unmapped_size=%zu",
- __entry->iova, __entry->size, __entry->unmapped_size
+ TP_printk("IOMMU: iova=0x%016llx - 0x%016llx size=%zu unmapped_size=%zu",
+ __entry->iova, __entry->iova + __entry->size,
+ __entry->size, __entry->unmapped_size
)
);
diff --git a/include/trace/events/libata.h b/include/trace/events/libata.h
index ab69434e2329..d4e631aa976f 100644
--- a/include/trace/events/libata.h
+++ b/include/trace/events/libata.h
@@ -132,9 +132,37 @@
ata_protocol_name(ATAPI_PROT_PIO), \
ata_protocol_name(ATAPI_PROT_DMA))
+#define ata_class_name(class) { class, #class }
+#define show_class_name(val) \
+ __print_symbolic(val, \
+ ata_class_name(ATA_DEV_UNKNOWN), \
+ ata_class_name(ATA_DEV_ATA), \
+ ata_class_name(ATA_DEV_ATA_UNSUP), \
+ ata_class_name(ATA_DEV_ATAPI), \
+ ata_class_name(ATA_DEV_ATAPI_UNSUP), \
+ ata_class_name(ATA_DEV_PMP), \
+ ata_class_name(ATA_DEV_PMP_UNSUP), \
+ ata_class_name(ATA_DEV_SEMB), \
+ ata_class_name(ATA_DEV_SEMB_UNSUP), \
+ ata_class_name(ATA_DEV_ZAC), \
+ ata_class_name(ATA_DEV_ZAC_UNSUP), \
+ ata_class_name(ATA_DEV_NONE))
+
+#define ata_sff_hsm_state_name(state) { state, #state }
+#define show_sff_hsm_state_name(val) \
+ __print_symbolic(val, \
+ ata_sff_hsm_state_name(HSM_ST_IDLE), \
+ ata_sff_hsm_state_name(HSM_ST_FIRST), \
+ ata_sff_hsm_state_name(HSM_ST), \
+ ata_sff_hsm_state_name(HSM_ST_LAST), \
+ ata_sff_hsm_state_name(HSM_ST_ERR))
+
const char *libata_trace_parse_status(struct trace_seq*, unsigned char);
#define __parse_status(s) libata_trace_parse_status(p, s)
+const char *libata_trace_parse_host_stat(struct trace_seq *, unsigned char);
+#define __parse_host_stat(s) libata_trace_parse_host_stat(p, s)
+
const char *libata_trace_parse_eh_action(struct trace_seq *, unsigned int);
#define __parse_eh_action(a) libata_trace_parse_eh_action(p, a)
@@ -144,11 +172,14 @@ const char *libata_trace_parse_eh_err_mask(struct trace_seq *, unsigned int);
const char *libata_trace_parse_qc_flags(struct trace_seq *, unsigned int);
#define __parse_qc_flags(f) libata_trace_parse_qc_flags(p, f)
+const char *libata_trace_parse_tf_flags(struct trace_seq *, unsigned int);
+#define __parse_tf_flags(f) libata_trace_parse_tf_flags(p, f)
+
const char *libata_trace_parse_subcmd(struct trace_seq *, unsigned char,
unsigned char, unsigned char);
#define __parse_subcmd(c,f,h) libata_trace_parse_subcmd(p, c, f, h)
-TRACE_EVENT(ata_qc_issue,
+DECLARE_EVENT_CLASS(ata_qc_issue_template,
TP_PROTO(struct ata_queued_cmd *qc),
@@ -207,6 +238,14 @@ TRACE_EVENT(ata_qc_issue,
__entry->dev)
);
+DEFINE_EVENT(ata_qc_issue_template, ata_qc_prep,
+ TP_PROTO(struct ata_queued_cmd *qc),
+ TP_ARGS(qc));
+
+DEFINE_EVENT(ata_qc_issue_template, ata_qc_issue,
+ TP_PROTO(struct ata_queued_cmd *qc),
+ TP_ARGS(qc));
+
DECLARE_EVENT_CLASS(ata_qc_complete_template,
TP_PROTO(struct ata_queued_cmd *qc),
@@ -275,6 +314,128 @@ DEFINE_EVENT(ata_qc_complete_template, ata_qc_complete_done,
TP_PROTO(struct ata_queued_cmd *qc),
TP_ARGS(qc));
+TRACE_EVENT(ata_tf_load,
+
+ TP_PROTO(struct ata_port *ap, const struct ata_taskfile *tf),
+
+ TP_ARGS(ap, tf),
+
+ TP_STRUCT__entry(
+ __field( unsigned int, ata_port )
+ __field( unsigned char, cmd )
+ __field( unsigned char, dev )
+ __field( unsigned char, lbal )
+ __field( unsigned char, lbam )
+ __field( unsigned char, lbah )
+ __field( unsigned char, nsect )
+ __field( unsigned char, feature )
+ __field( unsigned char, hob_lbal )
+ __field( unsigned char, hob_lbam )
+ __field( unsigned char, hob_lbah )
+ __field( unsigned char, hob_nsect )
+ __field( unsigned char, hob_feature )
+ __field( unsigned char, proto )
+ ),
+
+ TP_fast_assign(
+ __entry->ata_port = ap->print_id;
+ __entry->proto = tf->protocol;
+ __entry->cmd = tf->command;
+ __entry->dev = tf->device;
+ __entry->lbal = tf->lbal;
+ __entry->lbam = tf->lbam;
+ __entry->lbah = tf->lbah;
+ __entry->hob_lbal = tf->hob_lbal;
+ __entry->hob_lbam = tf->hob_lbam;
+ __entry->hob_lbah = tf->hob_lbah;
+ __entry->feature = tf->feature;
+ __entry->hob_feature = tf->hob_feature;
+ __entry->nsect = tf->nsect;
+ __entry->hob_nsect = tf->hob_nsect;
+ ),
+
+ TP_printk("ata_port=%u proto=%s cmd=%s%s " \
+ " tf=(%02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x)",
+ __entry->ata_port,
+ show_protocol_name(__entry->proto),
+ show_opcode_name(__entry->cmd),
+ __parse_subcmd(__entry->cmd, __entry->feature, __entry->hob_nsect),
+ __entry->cmd, __entry->feature, __entry->nsect,
+ __entry->lbal, __entry->lbam, __entry->lbah,
+ __entry->hob_feature, __entry->hob_nsect,
+ __entry->hob_lbal, __entry->hob_lbam, __entry->hob_lbah,
+ __entry->dev)
+);
+
+DECLARE_EVENT_CLASS(ata_exec_command_template,
+
+ TP_PROTO(struct ata_port *ap, const struct ata_taskfile *tf, unsigned int tag),
+
+ TP_ARGS(ap, tf, tag),
+
+ TP_STRUCT__entry(
+ __field( unsigned int, ata_port )
+ __field( unsigned int, tag )
+ __field( unsigned char, cmd )
+ __field( unsigned char, feature )
+ __field( unsigned char, hob_nsect )
+ __field( unsigned char, proto )
+ ),
+
+ TP_fast_assign(
+ __entry->ata_port = ap->print_id;
+ __entry->tag = tag;
+ __entry->proto = tf->protocol;
+ __entry->cmd = tf->command;
+ __entry->feature = tf->feature;
+ __entry->hob_nsect = tf->hob_nsect;
+ ),
+
+ TP_printk("ata_port=%u tag=%d proto=%s cmd=%s%s",
+ __entry->ata_port, __entry->tag,
+ show_protocol_name(__entry->proto),
+ show_opcode_name(__entry->cmd),
+ __parse_subcmd(__entry->cmd, __entry->feature, __entry->hob_nsect))
+);
+
+DEFINE_EVENT(ata_exec_command_template, ata_exec_command,
+ TP_PROTO(struct ata_port *ap, const struct ata_taskfile *tf, unsigned int tag),
+ TP_ARGS(ap, tf, tag));
+
+DEFINE_EVENT(ata_exec_command_template, ata_bmdma_setup,
+ TP_PROTO(struct ata_port *ap, const struct ata_taskfile *tf, unsigned int tag),
+ TP_ARGS(ap, tf, tag));
+
+DEFINE_EVENT(ata_exec_command_template, ata_bmdma_start,
+ TP_PROTO(struct ata_port *ap, const struct ata_taskfile *tf, unsigned int tag),
+ TP_ARGS(ap, tf, tag));
+
+DEFINE_EVENT(ata_exec_command_template, ata_bmdma_stop,
+ TP_PROTO(struct ata_port *ap, const struct ata_taskfile *tf, unsigned int tag),
+ TP_ARGS(ap, tf, tag));
+
+TRACE_EVENT(ata_bmdma_status,
+
+ TP_PROTO(struct ata_port *ap, unsigned int host_stat),
+
+ TP_ARGS(ap, host_stat),
+
+ TP_STRUCT__entry(
+ __field( unsigned int, ata_port )
+ __field( unsigned int, tag )
+ __field( unsigned char, host_stat )
+ ),
+
+ TP_fast_assign(
+ __entry->ata_port = ap->print_id;
+ __entry->host_stat = host_stat;
+ ),
+
+ TP_printk("ata_port=%u host_stat=%s",
+ __entry->ata_port,
+ __parse_host_stat(__entry->host_stat))
+);
+
TRACE_EVENT(ata_eh_link_autopsy,
TP_PROTO(struct ata_device *dev, unsigned int eh_action, unsigned int eh_err_mask),
@@ -329,6 +490,259 @@ TRACE_EVENT(ata_eh_link_autopsy_qc,
__parse_eh_err_mask(__entry->eh_err_mask))
);
+DECLARE_EVENT_CLASS(ata_eh_action_template,
+
+ TP_PROTO(struct ata_link *link, unsigned int devno, unsigned int eh_action),
+
+ TP_ARGS(link, devno, eh_action),
+
+ TP_STRUCT__entry(
+ __field( unsigned int, ata_port )
+ __field( unsigned int, ata_dev )
+ __field( unsigned int, eh_action )
+ ),
+
+ TP_fast_assign(
+ __entry->ata_port = link->ap->print_id;
+ __entry->ata_dev = link->pmp + devno;
+ __entry->eh_action = eh_action;
+ ),
+
+ TP_printk("ata_port=%u ata_dev=%u eh_action=%s",
+ __entry->ata_port, __entry->ata_dev,
+ __parse_eh_action(__entry->eh_action))
+);
+
+DEFINE_EVENT(ata_eh_action_template, ata_eh_about_to_do,
+ TP_PROTO(struct ata_link *link, unsigned int devno, unsigned int eh_action),
+ TP_ARGS(link, devno, eh_action));
+
+DEFINE_EVENT(ata_eh_action_template, ata_eh_done,
+ TP_PROTO(struct ata_link *link, unsigned int devno, unsigned int eh_action),
+ TP_ARGS(link, devno, eh_action));
+
+DECLARE_EVENT_CLASS(ata_link_reset_begin_template,
+
+ TP_PROTO(struct ata_link *link, unsigned int *class, unsigned long deadline),
+
+ TP_ARGS(link, class, deadline),
+
+ TP_STRUCT__entry(
+ __field( unsigned int, ata_port )
+ __array( unsigned int, class, 2 )
+ __field( unsigned long, deadline )
+ ),
+
+ TP_fast_assign(
+ __entry->ata_port = link->ap->print_id;
+ memcpy(__entry->class, class, 2);
+ __entry->deadline = deadline;
+ ),
+
+ TP_printk("ata_port=%u deadline=%lu classes=[%s,%s]",
+ __entry->ata_port, __entry->deadline,
+ show_class_name(__entry->class[0]),
+ show_class_name(__entry->class[1]))
+);
+
+DEFINE_EVENT(ata_link_reset_begin_template, ata_link_hardreset_begin,
+ TP_PROTO(struct ata_link *link, unsigned int *class, unsigned long deadline),
+ TP_ARGS(link, class, deadline));
+
+DEFINE_EVENT(ata_link_reset_begin_template, ata_slave_hardreset_begin,
+ TP_PROTO(struct ata_link *link, unsigned int *class, unsigned long deadline),
+ TP_ARGS(link, class, deadline));
+
+DEFINE_EVENT(ata_link_reset_begin_template, ata_link_softreset_begin,
+ TP_PROTO(struct ata_link *link, unsigned int *class, unsigned long deadline),
+ TP_ARGS(link, class, deadline));
+
+DECLARE_EVENT_CLASS(ata_link_reset_end_template,
+
+ TP_PROTO(struct ata_link *link, unsigned int *class, int rc),
+
+ TP_ARGS(link, class, rc),
+
+ TP_STRUCT__entry(
+ __field( unsigned int, ata_port )
+ __array( unsigned int, class, 2 )
+ __field( int, rc )
+ ),
+
+ TP_fast_assign(
+ __entry->ata_port = link->ap->print_id;
+ memcpy(__entry->class, class, 2);
+ __entry->rc = rc;
+ ),
+
+ TP_printk("ata_port=%u rc=%d class=[%s,%s]",
+ __entry->ata_port, __entry->rc,
+ show_class_name(__entry->class[0]),
+ show_class_name(__entry->class[1]))
+);
+
+DEFINE_EVENT(ata_link_reset_end_template, ata_link_hardreset_end,
+ TP_PROTO(struct ata_link *link, unsigned int *class, int rc),
+ TP_ARGS(link, class, rc));
+
+DEFINE_EVENT(ata_link_reset_end_template, ata_slave_hardreset_end,
+ TP_PROTO(struct ata_link *link, unsigned int *class, int rc),
+ TP_ARGS(link, class, rc));
+
+DEFINE_EVENT(ata_link_reset_end_template, ata_link_softreset_end,
+ TP_PROTO(struct ata_link *link, unsigned int *class, int rc),
+ TP_ARGS(link, class, rc));
+
+DEFINE_EVENT(ata_link_reset_end_template, ata_link_postreset,
+ TP_PROTO(struct ata_link *link, unsigned int *class, int rc),
+ TP_ARGS(link, class, rc));
+
+DEFINE_EVENT(ata_link_reset_end_template, ata_slave_postreset,
+ TP_PROTO(struct ata_link *link, unsigned int *class, int rc),
+ TP_ARGS(link, class, rc));
+
+DECLARE_EVENT_CLASS(ata_port_eh_begin_template,
+
+ TP_PROTO(struct ata_port *ap),
+
+ TP_ARGS(ap),
+
+ TP_STRUCT__entry(
+ __field( unsigned int, ata_port )
+ ),
+
+ TP_fast_assign(
+ __entry->ata_port = ap->print_id;
+ ),
+
+ TP_printk("ata_port=%u", __entry->ata_port)
+);
+
+DEFINE_EVENT(ata_port_eh_begin_template, ata_std_sched_eh,
+ TP_PROTO(struct ata_port *ap),
+ TP_ARGS(ap));
+
+DEFINE_EVENT(ata_port_eh_begin_template, ata_port_freeze,
+ TP_PROTO(struct ata_port *ap),
+ TP_ARGS(ap));
+
+DEFINE_EVENT(ata_port_eh_begin_template, ata_port_thaw,
+ TP_PROTO(struct ata_port *ap),
+ TP_ARGS(ap));
+
+DECLARE_EVENT_CLASS(ata_sff_hsm_template,
+
+ TP_PROTO(struct ata_queued_cmd *qc, unsigned char status),
+
+ TP_ARGS(qc, status),
+
+ TP_STRUCT__entry(
+ __field( unsigned int, ata_port )
+ __field( unsigned int, ata_dev )
+ __field( unsigned int, tag )
+ __field( unsigned int, qc_flags )
+ __field( unsigned int, protocol )
+ __field( unsigned int, hsm_state )
+ __field( unsigned char, dev_state )
+ ),
+
+ TP_fast_assign(
+ __entry->ata_port = qc->ap->print_id;
+ __entry->ata_dev = qc->dev->link->pmp + qc->dev->devno;
+ __entry->tag = qc->tag;
+ __entry->qc_flags = qc->flags;
+ __entry->protocol = qc->tf.protocol;
+ __entry->hsm_state = qc->ap->hsm_task_state;
+ __entry->dev_state = status;
+ ),
+
+ TP_printk("ata_port=%u ata_dev=%u tag=%d proto=%s flags=%s task_state=%s dev_stat=0x%X",
+ __entry->ata_port, __entry->ata_dev, __entry->tag,
+ show_protocol_name(__entry->protocol),
+ __parse_qc_flags(__entry->qc_flags),
+ show_sff_hsm_state_name(__entry->hsm_state),
+ __entry->dev_state)
+);
+
+DEFINE_EVENT(ata_sff_hsm_template, ata_sff_hsm_state,
+ TP_PROTO(struct ata_queued_cmd *qc, unsigned char state),
+ TP_ARGS(qc, state));
+
+DEFINE_EVENT(ata_sff_hsm_template, ata_sff_hsm_command_complete,
+ TP_PROTO(struct ata_queued_cmd *qc, unsigned char state),
+ TP_ARGS(qc, state));
+
+DEFINE_EVENT(ata_sff_hsm_template, ata_sff_port_intr,
+ TP_PROTO(struct ata_queued_cmd *qc, unsigned char state),
+ TP_ARGS(qc, state));
+
+DECLARE_EVENT_CLASS(ata_transfer_data_template,
+
+ TP_PROTO(struct ata_queued_cmd *qc, unsigned int offset, unsigned int count),
+
+ TP_ARGS(qc, offset, count),
+
+ TP_STRUCT__entry(
+ __field( unsigned int, ata_port )
+ __field( unsigned int, ata_dev )
+ __field( unsigned int, tag )
+ __field( unsigned int, flags )
+ __field( unsigned int, offset )
+ __field( unsigned int, bytes )
+ ),
+
+ TP_fast_assign(
+ __entry->ata_port = qc->ap->print_id;
+ __entry->ata_dev = qc->dev->link->pmp + qc->dev->devno;
+ __entry->tag = qc->tag;
+ __entry->flags = qc->tf.flags;
+ __entry->offset = offset;
+ __entry->bytes = count;
+ ),
+
+ TP_printk("ata_port=%u ata_dev=%u tag=%d flags=%s offset=%u bytes=%u",
+ __entry->ata_port, __entry->ata_dev, __entry->tag,
+ __parse_tf_flags(__entry->flags),
+ __entry->offset, __entry->bytes)
+);
+
+DEFINE_EVENT(ata_transfer_data_template, ata_sff_pio_transfer_data,
+ TP_PROTO(struct ata_queued_cmd *qc, unsigned int offset, unsigned int count),
+ TP_ARGS(qc, offset, count));
+
+DEFINE_EVENT(ata_transfer_data_template, atapi_pio_transfer_data,
+ TP_PROTO(struct ata_queued_cmd *qc, unsigned int offset, unsigned int count),
+ TP_ARGS(qc, offset, count));
+
+DEFINE_EVENT(ata_transfer_data_template, atapi_send_cdb,
+ TP_PROTO(struct ata_queued_cmd *qc, unsigned int offset, unsigned int count),
+ TP_ARGS(qc, offset, count));
+
+DECLARE_EVENT_CLASS(ata_sff_template,
+
+ TP_PROTO(struct ata_port *ap),
+
+ TP_ARGS(ap),
+
+ TP_STRUCT__entry(
+ __field( unsigned int, ata_port )
+ __field( unsigned char, hsm_state )
+ ),
+
+ TP_fast_assign(
+ __entry->ata_port = ap->print_id;
+ __entry->hsm_state = ap->hsm_task_state;
+ ),
+
+ TP_printk("ata_port=%u task_state=%s",
+ __entry->ata_port,
+ show_sff_hsm_state_name(__entry->hsm_state))
+);
+
+DEFINE_EVENT(ata_sff_template, ata_sff_flush_pio_task,
+ TP_PROTO(struct ata_port *ap),
+ TP_ARGS(ap));
+
#endif /* _TRACE_LIBATA_H */
/* This part must be outside protection */
diff --git a/include/trace/events/netfs.h b/include/trace/events/netfs.h
index 4d470bffd9f1..e6f4ebbb4c69 100644
--- a/include/trace/events/netfs.h
+++ b/include/trace/events/netfs.h
@@ -135,6 +135,7 @@ TRACE_EVENT(netfs_read,
__field(loff_t, start )
__field(size_t, len )
__field(enum netfs_read_trace, what )
+ __field(unsigned int, netfs_inode )
),
TP_fast_assign(
@@ -143,12 +144,14 @@ TRACE_EVENT(netfs_read,
__entry->start = start;
__entry->len = len;
__entry->what = what;
+ __entry->netfs_inode = rreq->inode->i_ino;
),
- TP_printk("R=%08x %s c=%08x s=%llx %zx",
+ TP_printk("R=%08x %s c=%08x ni=%x s=%llx %zx",
__entry->rreq,
__print_symbolic(__entry->what, netfs_read_traces),
__entry->cookie,
+ __entry->netfs_inode,
__entry->start, __entry->len)
);
diff --git a/include/trace/events/random.h b/include/trace/events/random.h
index 3d7b432ca5f3..a2d9aa16a5d7 100644
--- a/include/trace/events/random.h
+++ b/include/trace/events/random.h
@@ -28,80 +28,71 @@ TRACE_EVENT(add_device_randomness,
);
DECLARE_EVENT_CLASS(random__mix_pool_bytes,
- TP_PROTO(const char *pool_name, int bytes, unsigned long IP),
+ TP_PROTO(int bytes, unsigned long IP),
- TP_ARGS(pool_name, bytes, IP),
+ TP_ARGS(bytes, IP),
TP_STRUCT__entry(
- __field( const char *, pool_name )
__field( int, bytes )
__field(unsigned long, IP )
),
TP_fast_assign(
- __entry->pool_name = pool_name;
__entry->bytes = bytes;
__entry->IP = IP;
),
- TP_printk("%s pool: bytes %d caller %pS",
- __entry->pool_name, __entry->bytes, (void *)__entry->IP)
+ TP_printk("input pool: bytes %d caller %pS",
+ __entry->bytes, (void *)__entry->IP)
);
DEFINE_EVENT(random__mix_pool_bytes, mix_pool_bytes,
- TP_PROTO(const char *pool_name, int bytes, unsigned long IP),
+ TP_PROTO(int bytes, unsigned long IP),
- TP_ARGS(pool_name, bytes, IP)
+ TP_ARGS(bytes, IP)
);
DEFINE_EVENT(random__mix_pool_bytes, mix_pool_bytes_nolock,
- TP_PROTO(const char *pool_name, int bytes, unsigned long IP),
+ TP_PROTO(int bytes, unsigned long IP),
- TP_ARGS(pool_name, bytes, IP)
+ TP_ARGS(bytes, IP)
);
TRACE_EVENT(credit_entropy_bits,
- TP_PROTO(const char *pool_name, int bits, int entropy_count,
- unsigned long IP),
+ TP_PROTO(int bits, int entropy_count, unsigned long IP),
- TP_ARGS(pool_name, bits, entropy_count, IP),
+ TP_ARGS(bits, entropy_count, IP),
TP_STRUCT__entry(
- __field( const char *, pool_name )
__field( int, bits )
__field( int, entropy_count )
__field(unsigned long, IP )
),
TP_fast_assign(
- __entry->pool_name = pool_name;
__entry->bits = bits;
__entry->entropy_count = entropy_count;
__entry->IP = IP;
),
- TP_printk("%s pool: bits %d entropy_count %d caller %pS",
- __entry->pool_name, __entry->bits,
- __entry->entropy_count, (void *)__entry->IP)
+ TP_printk("input pool: bits %d entropy_count %d caller %pS",
+ __entry->bits, __entry->entropy_count, (void *)__entry->IP)
);
TRACE_EVENT(debit_entropy,
- TP_PROTO(const char *pool_name, int debit_bits),
+ TP_PROTO(int debit_bits),
- TP_ARGS(pool_name, debit_bits),
+ TP_ARGS( debit_bits),
TP_STRUCT__entry(
- __field( const char *, pool_name )
__field( int, debit_bits )
),
TP_fast_assign(
- __entry->pool_name = pool_name;
__entry->debit_bits = debit_bits;
),
- TP_printk("%s: debit_bits %d", __entry->pool_name,
- __entry->debit_bits)
+ TP_printk("input pool: debit_bits %d", __entry->debit_bits)
);
TRACE_EVENT(add_input_randomness,
@@ -170,36 +161,31 @@ DEFINE_EVENT(random__get_random_bytes, get_random_bytes_arch,
);
DECLARE_EVENT_CLASS(random__extract_entropy,
- TP_PROTO(const char *pool_name, int nbytes, int entropy_count,
- unsigned long IP),
+ TP_PROTO(int nbytes, int entropy_count, unsigned long IP),
- TP_ARGS(pool_name, nbytes, entropy_count, IP),
+ TP_ARGS(nbytes, entropy_count, IP),
TP_STRUCT__entry(
- __field( const char *, pool_name )
__field( int, nbytes )
__field( int, entropy_count )
__field(unsigned long, IP )
),
TP_fast_assign(
- __entry->pool_name = pool_name;
__entry->nbytes = nbytes;
__entry->entropy_count = entropy_count;
__entry->IP = IP;
),
- TP_printk("%s pool: nbytes %d entropy_count %d caller %pS",
- __entry->pool_name, __entry->nbytes, __entry->entropy_count,
- (void *)__entry->IP)
+ TP_printk("input pool: nbytes %d entropy_count %d caller %pS",
+ __entry->nbytes, __entry->entropy_count, (void *)__entry->IP)
);
DEFINE_EVENT(random__extract_entropy, extract_entropy,
- TP_PROTO(const char *pool_name, int nbytes, int entropy_count,
- unsigned long IP),
+ TP_PROTO(int nbytes, int entropy_count, unsigned long IP),
- TP_ARGS(pool_name, nbytes, entropy_count, IP)
+ TP_ARGS(nbytes, entropy_count, IP)
);
TRACE_EVENT(urandom_read,
diff --git a/include/trace/events/skb.h b/include/trace/events/skb.h
index 3e042ca2cedb..a8a64b97504d 100644
--- a/include/trace/events/skb.h
+++ b/include/trace/events/skb.h
@@ -14,7 +14,7 @@
EM(SKB_DROP_REASON_NO_SOCKET, NO_SOCKET) \
EM(SKB_DROP_REASON_PKT_TOO_SMALL, PKT_TOO_SMALL) \
EM(SKB_DROP_REASON_TCP_CSUM, TCP_CSUM) \
- EM(SKB_DROP_REASON_TCP_FILTER, TCP_FILTER) \
+ EM(SKB_DROP_REASON_SOCKET_FILTER, SOCKET_FILTER) \
EM(SKB_DROP_REASON_UDP_CSUM, UDP_CSUM) \
EMe(SKB_DROP_REASON_MAX, MAX)
diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
index 3a99358c262b..29982d60b68a 100644
--- a/include/trace/events/sunrpc.h
+++ b/include/trace/events/sunrpc.h
@@ -794,6 +794,9 @@ RPC_SHOW_SOCKET
RPC_SHOW_SOCK
+
+#include <trace/events/net_probe_common.h>
+
/*
* Now redefine the EM() and EMe() macros to map the enums to the strings
* that will be printed in the output.
@@ -816,27 +819,32 @@ DECLARE_EVENT_CLASS(xs_socket_event,
__field(unsigned int, socket_state)
__field(unsigned int, sock_state)
__field(unsigned long long, ino)
- __string(dstaddr,
- xprt->address_strings[RPC_DISPLAY_ADDR])
- __string(dstport,
- xprt->address_strings[RPC_DISPLAY_PORT])
+ __array(__u8, saddr, sizeof(struct sockaddr_in6))
+ __array(__u8, daddr, sizeof(struct sockaddr_in6))
),
TP_fast_assign(
struct inode *inode = SOCK_INODE(socket);
+ const struct sock *sk = socket->sk;
+ const struct inet_sock *inet = inet_sk(sk);
+
+ memset(__entry->saddr, 0, sizeof(struct sockaddr_in6));
+ memset(__entry->daddr, 0, sizeof(struct sockaddr_in6));
+
+ TP_STORE_ADDR_PORTS(__entry, inet, sk);
+
__entry->socket_state = socket->state;
__entry->sock_state = socket->sk->sk_state;
__entry->ino = (unsigned long long)inode->i_ino;
- __assign_str(dstaddr,
- xprt->address_strings[RPC_DISPLAY_ADDR]);
- __assign_str(dstport,
- xprt->address_strings[RPC_DISPLAY_PORT]);
+
),
TP_printk(
- "socket:[%llu] dstaddr=%s/%s "
+ "socket:[%llu] srcaddr=%pISpc dstaddr=%pISpc "
"state=%u (%s) sk_state=%u (%s)",
- __entry->ino, __get_str(dstaddr), __get_str(dstport),
+ __entry->ino,
+ __entry->saddr,
+ __entry->daddr,
__entry->socket_state,
rpc_show_socket_state(__entry->socket_state),
__entry->sock_state,
@@ -866,29 +874,33 @@ DECLARE_EVENT_CLASS(xs_socket_event_done,
__field(unsigned int, socket_state)
__field(unsigned int, sock_state)
__field(unsigned long long, ino)
- __string(dstaddr,
- xprt->address_strings[RPC_DISPLAY_ADDR])
- __string(dstport,
- xprt->address_strings[RPC_DISPLAY_PORT])
+ __array(__u8, saddr, sizeof(struct sockaddr_in6))
+ __array(__u8, daddr, sizeof(struct sockaddr_in6))
),
TP_fast_assign(
struct inode *inode = SOCK_INODE(socket);
+ const struct sock *sk = socket->sk;
+ const struct inet_sock *inet = inet_sk(sk);
+
+ memset(__entry->saddr, 0, sizeof(struct sockaddr_in6));
+ memset(__entry->daddr, 0, sizeof(struct sockaddr_in6));
+
+ TP_STORE_ADDR_PORTS(__entry, inet, sk);
+
__entry->socket_state = socket->state;
__entry->sock_state = socket->sk->sk_state;
__entry->ino = (unsigned long long)inode->i_ino;
__entry->error = error;
- __assign_str(dstaddr,
- xprt->address_strings[RPC_DISPLAY_ADDR]);
- __assign_str(dstport,
- xprt->address_strings[RPC_DISPLAY_PORT]);
),
TP_printk(
- "error=%d socket:[%llu] dstaddr=%s/%s "
+ "error=%d socket:[%llu] srcaddr=%pISpc dstaddr=%pISpc "
"state=%u (%s) sk_state=%u (%s)",
__entry->error,
- __entry->ino, __get_str(dstaddr), __get_str(dstport),
+ __entry->ino,
+ __entry->saddr,
+ __entry->daddr,
__entry->socket_state,
rpc_show_socket_state(__entry->socket_state),
__entry->sock_state,
@@ -953,7 +965,8 @@ TRACE_EVENT(rpc_socket_nospace,
{ BIT(XPRT_REMOVE), "REMOVE" }, \
{ BIT(XPRT_CONGESTED), "CONGESTED" }, \
{ BIT(XPRT_CWND_WAIT), "CWND_WAIT" }, \
- { BIT(XPRT_WRITE_SPACE), "WRITE_SPACE" })
+ { BIT(XPRT_WRITE_SPACE), "WRITE_SPACE" }, \
+ { BIT(XPRT_SND_IS_COOKIE), "SND_IS_COOKIE" })
DECLARE_EVENT_CLASS(rpc_xprt_lifetime_class,
TP_PROTO(
@@ -1150,8 +1163,11 @@ DECLARE_EVENT_CLASS(xprt_writelock_event,
__entry->task_id = -1;
__entry->client_id = -1;
}
- __entry->snd_task_id = xprt->snd_task ?
- xprt->snd_task->tk_pid : -1;
+ if (xprt->snd_task &&
+ !test_bit(XPRT_SND_IS_COOKIE, &xprt->state))
+ __entry->snd_task_id = xprt->snd_task->tk_pid;
+ else
+ __entry->snd_task_id = -1;
),
TP_printk(SUNRPC_TRACE_TASK_SPECIFIER
@@ -1196,8 +1212,12 @@ DECLARE_EVENT_CLASS(xprt_cong_event,
__entry->task_id = -1;
__entry->client_id = -1;
}
- __entry->snd_task_id = xprt->snd_task ?
- xprt->snd_task->tk_pid : -1;
+ if (xprt->snd_task &&
+ !test_bit(XPRT_SND_IS_COOKIE, &xprt->state))
+ __entry->snd_task_id = xprt->snd_task->tk_pid;
+ else
+ __entry->snd_task_id = -1;
+
__entry->cong = xprt->cong;
__entry->cwnd = xprt->cwnd;
__entry->wait = test_bit(XPRT_CWND_WAIT, &xprt->state);
@@ -1744,10 +1764,11 @@ TRACE_EVENT(svc_xprt_create_err,
const char *program,
const char *protocol,
struct sockaddr *sap,
+ size_t salen,
const struct svc_xprt *xprt
),
- TP_ARGS(program, protocol, sap, xprt),
+ TP_ARGS(program, protocol, sap, salen, xprt),
TP_STRUCT__entry(
__field(long, error)
@@ -1760,7 +1781,7 @@ TRACE_EVENT(svc_xprt_create_err,
__entry->error = PTR_ERR(xprt);
__assign_str(program, program);
__assign_str(protocol, protocol);
- memcpy(__entry->addr, sap, sizeof(__entry->addr));
+ memcpy(__entry->addr, sap, min(salen, sizeof(__entry->addr)));
),
TP_printk("addr=%pISpc program=%s protocol=%s error=%ld",
@@ -1768,7 +1789,7 @@ TRACE_EVENT(svc_xprt_create_err,
__entry->error)
);
-TRACE_EVENT(svc_xprt_do_enqueue,
+TRACE_EVENT(svc_xprt_enqueue,
TP_PROTO(struct svc_xprt *xprt, struct svc_rqst *rqst),
TP_ARGS(xprt, rqst),
@@ -1815,7 +1836,6 @@ DECLARE_EVENT_CLASS(svc_xprt_event,
), \
TP_ARGS(xprt))
-DEFINE_SVC_XPRT_EVENT(received);
DEFINE_SVC_XPRT_EVENT(no_write_space);
DEFINE_SVC_XPRT_EVENT(close);
DEFINE_SVC_XPRT_EVENT(detach);
@@ -1902,27 +1922,6 @@ TRACE_EVENT(svc_alloc_arg_err,
TP_printk("pages=%u", __entry->pages)
);
-TRACE_EVENT(svc_handle_xprt,
- TP_PROTO(struct svc_xprt *xprt, int len),
-
- TP_ARGS(xprt, len),
-
- TP_STRUCT__entry(
- __field(int, len)
- __field(unsigned long, flags)
- __string(addr, xprt->xpt_remotebuf)
- ),
-
- TP_fast_assign(
- __entry->len = len;
- __entry->flags = xprt->xpt_flags;
- __assign_str(addr, xprt->xpt_remotebuf);
- ),
-
- TP_printk("addr=%s len=%d flags=%s", __get_str(addr),
- __entry->len, show_svc_xprt_flags(__entry->flags))
-);
-
TRACE_EVENT(svc_stats_latency,
TP_PROTO(const struct svc_rqst *rqst),
@@ -2146,17 +2145,17 @@ DECLARE_EVENT_CLASS(svcsock_accept_class,
TP_STRUCT__entry(
__field(long, status)
__string(service, service)
- __array(unsigned char, addr, sizeof(struct sockaddr_in6))
+ __field(unsigned int, netns_ino)
),
TP_fast_assign(
__entry->status = status;
__assign_str(service, service);
- memcpy(__entry->addr, &xprt->xpt_local, sizeof(__entry->addr));
+ __entry->netns_ino = xprt->xpt_net->ns.inum;
),
- TP_printk("listener=%pISpc service=%s status=%ld",
- __entry->addr, __get_str(service), __entry->status
+ TP_printk("addr=listener service=%s status=%ld",
+ __get_str(service), __entry->status
)
);
diff --git a/include/trace/events/thp.h b/include/trace/events/thp.h
index d7fbbe551841..ca3f2767828a 100644
--- a/include/trace/events/thp.h
+++ b/include/trace/events/thp.h
@@ -8,24 +8,6 @@
#include <linux/types.h>
#include <linux/tracepoint.h>
-TRACE_EVENT(hugepage_invalidate,
-
- TP_PROTO(unsigned long addr, unsigned long pte),
- TP_ARGS(addr, pte),
- TP_STRUCT__entry(
- __field(unsigned long, addr)
- __field(unsigned long, pte)
- ),
-
- TP_fast_assign(
- __entry->addr = addr;
- __entry->pte = pte;
- ),
-
- TP_printk("hugepage invalidate at addr 0x%lx and pte = 0x%lx",
- __entry->addr, __entry->pte)
-);
-
TRACE_EVENT(hugepage_set_pmd,
TP_PROTO(unsigned long addr, unsigned long pmd),
@@ -65,23 +47,6 @@ TRACE_EVENT(hugepage_update,
TP_printk("hugepage update at addr 0x%lx and pte = 0x%lx clr = 0x%lx, set = 0x%lx", __entry->addr, __entry->pte, __entry->clr, __entry->set)
);
-TRACE_EVENT(hugepage_splitting,
-
- TP_PROTO(unsigned long addr, unsigned long pte),
- TP_ARGS(addr, pte),
- TP_STRUCT__entry(
- __field(unsigned long, addr)
- __field(unsigned long, pte)
- ),
-
- TP_fast_assign(
- __entry->addr = addr;
- __entry->pte = pte;
- ),
-
- TP_printk("hugepage splitting at addr 0x%lx and pte = 0x%lx",
- __entry->addr, __entry->pte)
-);
#endif /* _TRACE_THP_H */
diff --git a/include/trace/perf.h b/include/trace/perf.h
index dbc6c74defc3..5d48c46a3008 100644
--- a/include/trace/perf.h
+++ b/include/trace/perf.h
@@ -21,6 +21,23 @@
#undef __get_bitmask
#define __get_bitmask(field) (char *)__get_dynamic_array(field)
+#undef __get_rel_dynamic_array
+#define __get_rel_dynamic_array(field) \
+ ((void *)__entry + \
+ offsetof(typeof(*__entry), __rel_loc_##field) + \
+ sizeof(__entry->__rel_loc_##field) + \
+ (__entry->__rel_loc_##field & 0xffff))
+
+#undef __get_rel_dynamic_array_len
+#define __get_rel_dynamic_array_len(field) \
+ ((__entry->__rel_loc_##field >> 16) & 0xffff)
+
+#undef __get_rel_str
+#define __get_rel_str(field) ((char *)__get_rel_dynamic_array(field))
+
+#undef __get_rel_bitmask
+#define __get_rel_bitmask(field) (char *)__get_rel_dynamic_array(field)
+
#undef __perf_count
#define __perf_count(c) (__count = (c))
diff --git a/include/trace/trace_events.h b/include/trace/trace_events.h
index 08810a463880..3d29919045af 100644
--- a/include/trace/trace_events.h
+++ b/include/trace/trace_events.h
@@ -108,6 +108,18 @@ TRACE_MAKE_SYSTEM_STR();
#undef __bitmask
#define __bitmask(item, nr_bits) __dynamic_array(char, item, -1)
+#undef __rel_dynamic_array
+#define __rel_dynamic_array(type, item, len) u32 __rel_loc_##item;
+
+#undef __rel_string
+#define __rel_string(item, src) __rel_dynamic_array(char, item, -1)
+
+#undef __rel_string_len
+#define __rel_string_len(item, src, len) __rel_dynamic_array(char, item, -1)
+
+#undef __rel_bitmask
+#define __rel_bitmask(item, nr_bits) __rel_dynamic_array(char, item, -1)
+
#undef TP_STRUCT__entry
#define TP_STRUCT__entry(args...) args
@@ -116,7 +128,7 @@ TRACE_MAKE_SYSTEM_STR();
struct trace_event_raw_##name { \
struct trace_entry ent; \
tstruct \
- char __data[0]; \
+ char __data[]; \
}; \
\
static struct trace_event_class event_class_##name;
@@ -200,11 +212,23 @@ TRACE_MAKE_SYSTEM_STR();
#undef __string
#define __string(item, src) __dynamic_array(char, item, -1)
+#undef __bitmask
+#define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1)
+
#undef __string_len
#define __string_len(item, src, len) __dynamic_array(char, item, -1)
-#undef __bitmask
-#define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1)
+#undef __rel_dynamic_array
+#define __rel_dynamic_array(type, item, len) u32 item;
+
+#undef __rel_string
+#define __rel_string(item, src) __rel_dynamic_array(char, item, -1)
+
+#undef __rel_string_len
+#define __rel_string_len(item, src, len) __rel_dynamic_array(char, item, -1)
+
+#undef __rel_bitmask
+#define __rel_bitmask(item, nr_bits) __rel_dynamic_array(unsigned long, item, -1)
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
@@ -293,6 +317,20 @@ TRACE_MAKE_SYSTEM_STR();
#undef __get_str
#define __get_str(field) ((char *)__get_dynamic_array(field))
+#undef __get_rel_dynamic_array
+#define __get_rel_dynamic_array(field) \
+ ((void *)__entry + \
+ offsetof(typeof(*__entry), __rel_loc_##field) + \
+ sizeof(__entry->__rel_loc_##field) + \
+ (__entry->__rel_loc_##field & 0xffff))
+
+#undef __get_rel_dynamic_array_len
+#define __get_rel_dynamic_array_len(field) \
+ ((__entry->__rel_loc_##field >> 16) & 0xffff)
+
+#undef __get_rel_str
+#define __get_rel_str(field) ((char *)__get_rel_dynamic_array(field))
+
#undef __get_bitmask
#define __get_bitmask(field) \
({ \
@@ -302,6 +340,15 @@ TRACE_MAKE_SYSTEM_STR();
trace_print_bitmask_seq(p, __bitmask, __bitmask_size); \
})
+#undef __get_rel_bitmask
+#define __get_rel_bitmask(field) \
+ ({ \
+ void *__bitmask = __get_rel_dynamic_array(field); \
+ unsigned int __bitmask_size; \
+ __bitmask_size = __get_rel_dynamic_array_len(field); \
+ trace_print_bitmask_seq(p, __bitmask, __bitmask_size); \
+ })
+
#undef __print_flags
#define __print_flags(flag, delim, flag_array...) \
({ \
@@ -471,6 +518,21 @@ static struct trace_event_functions trace_event_type_funcs_##call = { \
#undef __bitmask
#define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1)
+#undef __rel_dynamic_array
+#define __rel_dynamic_array(_type, _item, _len) { \
+ .type = "__rel_loc " #_type "[]", .name = #_item, \
+ .size = 4, .align = 4, \
+ .is_signed = is_signed_type(_type), .filter_type = FILTER_OTHER },
+
+#undef __rel_string
+#define __rel_string(item, src) __rel_dynamic_array(char, item, -1)
+
+#undef __rel_string_len
+#define __rel_string_len(item, src, len) __rel_dynamic_array(char, item, -1)
+
+#undef __rel_bitmask
+#define __rel_bitmask(item, nr_bits) __rel_dynamic_array(unsigned long, item, -1)
+
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \
static struct trace_event_fields trace_event_fields_##call[] = { \
@@ -519,6 +581,22 @@ static struct trace_event_fields trace_event_fields_##call[] = { \
#undef __string_len
#define __string_len(item, src, len) __dynamic_array(char, item, (len) + 1)
+#undef __rel_dynamic_array
+#define __rel_dynamic_array(type, item, len) \
+ __item_length = (len) * sizeof(type); \
+ __data_offsets->item = __data_size + \
+ offsetof(typeof(*entry), __data) - \
+ offsetof(typeof(*entry), __rel_loc_##item) - \
+ sizeof(u32); \
+ __data_offsets->item |= __item_length << 16; \
+ __data_size += __item_length;
+
+#undef __rel_string
+#define __rel_string(item, src) __rel_dynamic_array(char, item, \
+ strlen((src) ? (const char *)(src) : "(null)") + 1)
+
+#undef __rel_string_len
+#define __rel_string_len(item, src, len) __rel_dynamic_array(char, item, (len) + 1)
/*
* __bitmask_size_in_bytes_raw is the number of bytes needed to hold
* num_possible_cpus().
@@ -542,6 +620,10 @@ static struct trace_event_fields trace_event_fields_##call[] = { \
#define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, \
__bitmask_size_in_longs(nr_bits))
+#undef __rel_bitmask
+#define __rel_bitmask(item, nr_bits) __rel_dynamic_array(unsigned long, item, \
+ __bitmask_size_in_longs(nr_bits))
+
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
static inline notrace int trace_event_get_offsets_##call( \
@@ -706,6 +788,37 @@ static inline notrace int trace_event_get_offsets_##call( \
#define __assign_bitmask(dst, src, nr_bits) \
memcpy(__get_bitmask(dst), (src), __bitmask_size_in_bytes(nr_bits))
+#undef __rel_dynamic_array
+#define __rel_dynamic_array(type, item, len) \
+ __entry->__rel_loc_##item = __data_offsets.item;
+
+#undef __rel_string
+#define __rel_string(item, src) __rel_dynamic_array(char, item, -1)
+
+#undef __rel_string_len
+#define __rel_string_len(item, src, len) __rel_dynamic_array(char, item, -1)
+
+#undef __assign_rel_str
+#define __assign_rel_str(dst, src) \
+ strcpy(__get_rel_str(dst), (src) ? (const char *)(src) : "(null)");
+
+#undef __assign_rel_str_len
+#define __assign_rel_str_len(dst, src, len) \
+ do { \
+ memcpy(__get_rel_str(dst), (src), (len)); \
+ __get_rel_str(dst)[len] = '\0'; \
+ } while (0)
+
+#undef __rel_bitmask
+#define __rel_bitmask(item, nr_bits) __rel_dynamic_array(unsigned long, item, -1)
+
+#undef __get_rel_bitmask
+#define __get_rel_bitmask(field) (char *)__get_rel_dynamic_array(field)
+
+#undef __assign_rel_bitmask
+#define __assign_rel_bitmask(dst, src, nr_bits) \
+ memcpy(__get_rel_bitmask(dst), (src), __bitmask_size_in_bytes(nr_bits))
+
#undef TP_fast_assign
#define TP_fast_assign(args...) args
@@ -770,6 +883,10 @@ static inline void ftrace_test_probe_##call(void) \
#undef __get_dynamic_array_len
#undef __get_str
#undef __get_bitmask
+#undef __get_rel_dynamic_array
+#undef __get_rel_dynamic_array_len
+#undef __get_rel_str
+#undef __get_rel_bitmask
#undef __print_array
#undef __print_hex_dump
diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h
index 4557a8b6086f..1c48b0ae3ba3 100644
--- a/include/uapi/asm-generic/unistd.h
+++ b/include/uapi/asm-generic/unistd.h
@@ -883,8 +883,11 @@ __SYSCALL(__NR_process_mrelease, sys_process_mrelease)
#define __NR_futex_waitv 449
__SYSCALL(__NR_futex_waitv, sys_futex_waitv)
+#define __NR_set_mempolicy_home_node 450
+__SYSCALL(__NR_set_mempolicy_home_node, sys_set_mempolicy_home_node)
+
#undef __NR_syscalls
-#define __NR_syscalls 450
+#define __NR_syscalls 451
/*
* 32 bit systems traditionally used different
diff --git a/drivers/comedi/comedi.h b/include/uapi/linux/comedi.h
index b5d00a006dbb..7314e5ee0a1e 100644
--- a/drivers/comedi/comedi.h
+++ b/include/uapi/linux/comedi.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: LGPL-2.0+ */
+/* SPDX-License-Identifier: LGPL-2.0+ WITH Linux-syscall-note */
/*
* comedi.h
* header file for COMEDI user API
diff --git a/include/uapi/linux/cyclades.h b/include/uapi/linux/cyclades.h
new file mode 100644
index 000000000000..6225c5aebe06
--- /dev/null
+++ b/include/uapi/linux/cyclades.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+
+#ifndef _UAPI_LINUX_CYCLADES_H
+#define _UAPI_LINUX_CYCLADES_H
+
+#warning "Support for features provided by this header has been removed"
+#warning "Please consider updating your code"
+
+struct cyclades_monitor {
+ unsigned long int_count;
+ unsigned long char_count;
+ unsigned long char_max;
+ unsigned long char_last;
+};
+
+#define CYGETMON 0x435901
+#define CYGETTHRESH 0x435902
+#define CYSETTHRESH 0x435903
+#define CYGETDEFTHRESH 0x435904
+#define CYSETDEFTHRESH 0x435905
+#define CYGETTIMEOUT 0x435906
+#define CYSETTIMEOUT 0x435907
+#define CYGETDEFTIMEOUT 0x435908
+#define CYSETDEFTIMEOUT 0x435909
+#define CYSETRFLOW 0x43590a
+#define CYGETRFLOW 0x43590b
+#define CYSETRTSDTR_INV 0x43590c
+#define CYGETRTSDTR_INV 0x43590d
+#define CYZSETPOLLCYCLE 0x43590e
+#define CYZGETPOLLCYCLE 0x43590f
+#define CYGETCD1400VER 0x435910
+#define CYSETWAIT 0x435912
+#define CYGETWAIT 0x435913
+
+#endif /* _UAPI_LINUX_CYCLADES_H */
diff --git a/include/uapi/linux/fanotify.h b/include/uapi/linux/fanotify.h
index bd1932c2074d..e8ac38cc2fd6 100644
--- a/include/uapi/linux/fanotify.h
+++ b/include/uapi/linux/fanotify.h
@@ -28,6 +28,8 @@
#define FAN_EVENT_ON_CHILD 0x08000000 /* Interested in child events */
+#define FAN_RENAME 0x10000000 /* File was renamed */
+
#define FAN_ONDIR 0x40000000 /* Event occurred against dir */
/* helper events */
@@ -57,9 +59,13 @@
#define FAN_REPORT_FID 0x00000200 /* Report unique file id */
#define FAN_REPORT_DIR_FID 0x00000400 /* Report unique directory id */
#define FAN_REPORT_NAME 0x00000800 /* Report events with name */
+#define FAN_REPORT_TARGET_FID 0x00001000 /* Report dirent target id */
/* Convenience macro - FAN_REPORT_NAME requires FAN_REPORT_DIR_FID */
#define FAN_REPORT_DFID_NAME (FAN_REPORT_DIR_FID | FAN_REPORT_NAME)
+/* Convenience macro - FAN_REPORT_TARGET_FID requires all other FID flags */
+#define FAN_REPORT_DFID_NAME_TARGET (FAN_REPORT_DFID_NAME | \
+ FAN_REPORT_FID | FAN_REPORT_TARGET_FID)
/* Deprecated - do not use this in programs and do not add new flags here! */
#define FAN_ALL_INIT_FLAGS (FAN_CLOEXEC | FAN_NONBLOCK | \
@@ -128,6 +134,12 @@ struct fanotify_event_metadata {
#define FAN_EVENT_INFO_TYPE_PIDFD 4
#define FAN_EVENT_INFO_TYPE_ERROR 5
+/* Special info types for FAN_RENAME */
+#define FAN_EVENT_INFO_TYPE_OLD_DFID_NAME 10
+/* Reserved for FAN_EVENT_INFO_TYPE_OLD_DFID 11 */
+#define FAN_EVENT_INFO_TYPE_NEW_DFID_NAME 12
+/* Reserved for FAN_EVENT_INFO_TYPE_NEW_DFID 13 */
+
/* Variable length info record following event metadata */
struct fanotify_event_info_header {
__u8 info_type;
diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h
index a1dc3ee1d17c..d6ccee961891 100644
--- a/include/uapi/linux/fuse.h
+++ b/include/uapi/linux/fuse.h
@@ -187,6 +187,13 @@
*
* 7.35
* - add FOPEN_NOFLUSH
+ *
+ * 7.36
+ * - extend fuse_init_in with reserved fields, add FUSE_INIT_EXT init flag
+ * - add flags2 to fuse_init_in and fuse_init_out
+ * - add FUSE_SECURITY_CTX init flag
+ * - add security context to create, mkdir, symlink, and mknod requests
+ * - add FUSE_HAS_INODE_DAX, FUSE_ATTR_DAX
*/
#ifndef _LINUX_FUSE_H
@@ -222,7 +229,7 @@
#define FUSE_KERNEL_VERSION 7
/** Minor version number of this interface */
-#define FUSE_KERNEL_MINOR_VERSION 35
+#define FUSE_KERNEL_MINOR_VERSION 36
/** The node ID of the root inode */
#define FUSE_ROOT_ID 1
@@ -341,6 +348,11 @@ struct fuse_file_lock {
* write/truncate sgid is killed only if file has group
* execute permission. (Same as Linux VFS behavior).
* FUSE_SETXATTR_EXT: Server supports extended struct fuse_setxattr_in
+ * FUSE_INIT_EXT: extended fuse_init_in request
+ * FUSE_INIT_RESERVED: reserved, do not use
+ * FUSE_SECURITY_CTX: add security context to create, mkdir, symlink, and
+ * mknod
+ * FUSE_HAS_INODE_DAX: use per inode DAX
*/
#define FUSE_ASYNC_READ (1 << 0)
#define FUSE_POSIX_LOCKS (1 << 1)
@@ -372,6 +384,11 @@ struct fuse_file_lock {
#define FUSE_SUBMOUNTS (1 << 27)
#define FUSE_HANDLE_KILLPRIV_V2 (1 << 28)
#define FUSE_SETXATTR_EXT (1 << 29)
+#define FUSE_INIT_EXT (1 << 30)
+#define FUSE_INIT_RESERVED (1 << 31)
+/* bits 32..63 get shifted down 32 bits into the flags2 field */
+#define FUSE_SECURITY_CTX (1ULL << 32)
+#define FUSE_HAS_INODE_DAX (1ULL << 33)
/**
* CUSE INIT request/reply flags
@@ -454,8 +471,10 @@ struct fuse_file_lock {
* fuse_attr flags
*
* FUSE_ATTR_SUBMOUNT: Object is a submount root
+ * FUSE_ATTR_DAX: Enable DAX for this file in per inode DAX mode
*/
#define FUSE_ATTR_SUBMOUNT (1 << 0)
+#define FUSE_ATTR_DAX (1 << 1)
/**
* Open flags
@@ -741,6 +760,8 @@ struct fuse_init_in {
uint32_t minor;
uint32_t max_readahead;
uint32_t flags;
+ uint32_t flags2;
+ uint32_t unused[11];
};
#define FUSE_COMPAT_INIT_OUT_SIZE 8
@@ -757,7 +778,8 @@ struct fuse_init_out {
uint32_t time_gran;
uint16_t max_pages;
uint16_t map_alignment;
- uint32_t unused[8];
+ uint32_t flags2;
+ uint32_t unused[7];
};
#define CUSE_INIT_INFO_MAX 4096
@@ -865,9 +887,12 @@ struct fuse_dirent {
char name[];
};
-#define FUSE_NAME_OFFSET offsetof(struct fuse_dirent, name)
-#define FUSE_DIRENT_ALIGN(x) \
+/* Align variable length records to 64bit boundary */
+#define FUSE_REC_ALIGN(x) \
(((x) + sizeof(uint64_t) - 1) & ~(sizeof(uint64_t) - 1))
+
+#define FUSE_NAME_OFFSET offsetof(struct fuse_dirent, name)
+#define FUSE_DIRENT_ALIGN(x) FUSE_REC_ALIGN(x)
#define FUSE_DIRENT_SIZE(d) \
FUSE_DIRENT_ALIGN(FUSE_NAME_OFFSET + (d)->namelen)
@@ -984,4 +1009,26 @@ struct fuse_syncfs_in {
uint64_t padding;
};
+/*
+ * For each security context, send fuse_secctx with size of security context
+ * fuse_secctx will be followed by security context name and this in turn
+ * will be followed by actual context label.
+ * fuse_secctx, name, context
+ */
+struct fuse_secctx {
+ uint32_t size;
+ uint32_t padding;
+};
+
+/*
+ * Contains the information about how many fuse_secctx structures are being
+ * sent and what's the total size of all security contexts (including
+ * size of fuse_secctx_header).
+ *
+ */
+struct fuse_secctx_header {
+ uint32_t size;
+ uint32_t nr_secctx;
+};
+
#endif /* _LINUX_FUSE_H */
diff --git a/include/uapi/linux/idxd.h b/include/uapi/linux/idxd.h
index c750eac09fc9..a8f0ff75c430 100644
--- a/include/uapi/linux/idxd.h
+++ b/include/uapi/linux/idxd.h
@@ -28,6 +28,7 @@ enum idxd_scmd_stat {
IDXD_SCMD_WQ_NONE_CONFIGURED = 0x800d0000,
IDXD_SCMD_WQ_NO_SIZE = 0x800e0000,
IDXD_SCMD_WQ_NO_PRIV = 0x800f0000,
+ IDXD_SCMD_WQ_IRQ_ERR = 0x80100000,
};
#define IDXD_SCMD_SOFTERR_MASK 0x80000000
diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h
index c45b5e9a9387..787f491f0d2a 100644
--- a/include/uapi/linux/io_uring.h
+++ b/include/uapi/linux/io_uring.h
@@ -70,6 +70,7 @@ enum {
IOSQE_IO_HARDLINK_BIT,
IOSQE_ASYNC_BIT,
IOSQE_BUFFER_SELECT_BIT,
+ IOSQE_CQE_SKIP_SUCCESS_BIT,
};
/*
@@ -87,6 +88,8 @@ enum {
#define IOSQE_ASYNC (1U << IOSQE_ASYNC_BIT)
/* select buffer from sqe->buf_group */
#define IOSQE_BUFFER_SELECT (1U << IOSQE_BUFFER_SELECT_BIT)
+/* don't post CQE if request succeeded */
+#define IOSQE_CQE_SKIP_SUCCESS (1U << IOSQE_CQE_SKIP_SUCCESS_BIT)
/*
* io_uring_setup() flags
@@ -289,6 +292,7 @@ struct io_uring_params {
#define IORING_FEAT_EXT_ARG (1U << 8)
#define IORING_FEAT_NATIVE_WORKERS (1U << 9)
#define IORING_FEAT_RSRC_TAGS (1U << 10)
+#define IORING_FEAT_CQE_SKIP (1U << 11)
/*
* io_uring_register(2) opcodes and arguments
diff --git a/include/uapi/linux/kfd_sysfs.h b/include/uapi/linux/kfd_sysfs.h
index e1fb78b4bf09..3e330f368917 100644
--- a/include/uapi/linux/kfd_sysfs.h
+++ b/include/uapi/linux/kfd_sysfs.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 OR MIT WITH Linux-syscall-note */
+/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR MIT */
/*
* Copyright 2021 Advanced Micro Devices, Inc.
*
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 1daa45268de2..5191b57e1562 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -1131,6 +1131,9 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_EXIT_ON_EMULATION_FAILURE 204
#define KVM_CAP_ARM_MTE 205
#define KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM 206
+#define KVM_CAP_VM_GPA_BITS 207
+#define KVM_CAP_XSAVE2 208
+#define KVM_CAP_SYS_ATTRIBUTES 209
#ifdef KVM_CAP_IRQ_ROUTING
@@ -1162,11 +1165,20 @@ struct kvm_irq_routing_hv_sint {
__u32 sint;
};
+struct kvm_irq_routing_xen_evtchn {
+ __u32 port;
+ __u32 vcpu;
+ __u32 priority;
+};
+
+#define KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL ((__u32)(-1))
+
/* gsi routing entry types */
#define KVM_IRQ_ROUTING_IRQCHIP 1
#define KVM_IRQ_ROUTING_MSI 2
#define KVM_IRQ_ROUTING_S390_ADAPTER 3
#define KVM_IRQ_ROUTING_HV_SINT 4
+#define KVM_IRQ_ROUTING_XEN_EVTCHN 5
struct kvm_irq_routing_entry {
__u32 gsi;
@@ -1178,6 +1190,7 @@ struct kvm_irq_routing_entry {
struct kvm_irq_routing_msi msi;
struct kvm_irq_routing_s390_adapter adapter;
struct kvm_irq_routing_hv_sint hv_sint;
+ struct kvm_irq_routing_xen_evtchn xen_evtchn;
__u32 pad[8];
} u;
};
@@ -1208,6 +1221,7 @@ struct kvm_x86_mce {
#define KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL (1 << 1)
#define KVM_XEN_HVM_CONFIG_SHARED_INFO (1 << 2)
#define KVM_XEN_HVM_CONFIG_RUNSTATE (1 << 3)
+#define KVM_XEN_HVM_CONFIG_EVTCHN_2LEVEL (1 << 4)
struct kvm_xen_hvm_config {
__u32 flags;
@@ -2031,4 +2045,7 @@ struct kvm_stats_desc {
#define KVM_GET_STATS_FD _IO(KVMIO, 0xce)
+/* Available with KVM_CAP_XSAVE2 */
+#define KVM_GET_XSAVE2 _IOR(KVMIO, 0xcf, struct kvm_xsave)
+
#endif /* __LINUX_KVM_H */
diff --git a/include/uapi/linux/magic.h b/include/uapi/linux/magic.h
index 35687dcb1a42..0425cd79af9a 100644
--- a/include/uapi/linux/magic.h
+++ b/include/uapi/linux/magic.h
@@ -6,6 +6,7 @@
#define AFFS_SUPER_MAGIC 0xadff
#define AFS_SUPER_MAGIC 0x5346414F
#define AUTOFS_SUPER_MAGIC 0x0187
+#define CEPH_SUPER_MAGIC 0x00c36400
#define CODA_SUPER_MAGIC 0x73757245
#define CRAMFS_MAGIC 0x28cd3d45 /* some random number */
#define CRAMFS_MAGIC_WEND 0x453dcd28 /* magic number with the wrong endianess */
@@ -43,6 +44,7 @@
#define MINIX3_SUPER_MAGIC 0x4d5a /* minix v3 fs, 60 char names */
#define MSDOS_SUPER_MAGIC 0x4d44 /* MD */
+#define EXFAT_SUPER_MAGIC 0x2011BAB0
#define NCP_SUPER_MAGIC 0x564c /* Guess, what 0x564c is :-) */
#define NFS_SUPER_MAGIC 0x6969
#define OCFS2_SUPER_MAGIC 0x7461636f
@@ -51,6 +53,7 @@
#define QNX6_SUPER_MAGIC 0x68191122 /* qnx6 fs detection */
#define AFS_FS_MAGIC 0x6B414653
+
#define REISERFS_SUPER_MAGIC 0x52654973 /* used by gcc */
/* used by file system utilities that
look at the superblock, etc. */
@@ -59,6 +62,9 @@
#define REISER2FS_JR_SUPER_MAGIC_STRING "ReIsEr3Fs"
#define SMB_SUPER_MAGIC 0x517B
+#define CIFS_SUPER_MAGIC 0xFF534D42 /* the first four bytes of SMB PDUs */
+#define SMB2_SUPER_MAGIC 0xFE534D42
+
#define CGROUP_SUPER_MAGIC 0x27e0eb
#define CGROUP2_SUPER_MAGIC 0x63677270
diff --git a/include/uapi/linux/module.h b/include/uapi/linux/module.h
index 50d98ec5e866..03a33ffffcba 100644
--- a/include/uapi/linux/module.h
+++ b/include/uapi/linux/module.h
@@ -5,5 +5,6 @@
/* Flags for sys_finit_module: */
#define MODULE_INIT_IGNORE_MODVERSIONS 1
#define MODULE_INIT_IGNORE_VERMAGIC 2
+#define MODULE_INIT_COMPRESSED_FILE 4
#endif /* _UAPI_LINUX_MODULE_H */
diff --git a/include/uapi/linux/netfilter/nf_conntrack_common.h b/include/uapi/linux/netfilter/nf_conntrack_common.h
index 4b3395082d15..26071021e986 100644
--- a/include/uapi/linux/netfilter/nf_conntrack_common.h
+++ b/include/uapi/linux/netfilter/nf_conntrack_common.h
@@ -106,7 +106,7 @@ enum ip_conntrack_status {
IPS_NAT_CLASH = IPS_UNTRACKED,
#endif
- /* Conntrack got a helper explicitly attached via CT target. */
+ /* Conntrack got a helper explicitly attached (ruleset, ctnetlink). */
IPS_HELPER_BIT = 13,
IPS_HELPER = (1 << IPS_HELPER_BIT),
diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
index ff6ccbc6efe9..bee1a9ed6e66 100644
--- a/include/uapi/linux/pci_regs.h
+++ b/include/uapi/linux/pci_regs.h
@@ -301,23 +301,23 @@
#define PCI_SID_ESR_FIC 0x20 /* First In Chassis Flag */
#define PCI_SID_CHASSIS_NR 3 /* Chassis Number */
-/* Message Signalled Interrupt registers */
+/* Message Signaled Interrupt registers */
-#define PCI_MSI_FLAGS 2 /* Message Control */
+#define PCI_MSI_FLAGS 0x02 /* Message Control */
#define PCI_MSI_FLAGS_ENABLE 0x0001 /* MSI feature enabled */
#define PCI_MSI_FLAGS_QMASK 0x000e /* Maximum queue size available */
#define PCI_MSI_FLAGS_QSIZE 0x0070 /* Message queue size configured */
#define PCI_MSI_FLAGS_64BIT 0x0080 /* 64-bit addresses allowed */
#define PCI_MSI_FLAGS_MASKBIT 0x0100 /* Per-vector masking capable */
#define PCI_MSI_RFU 3 /* Rest of capability flags */
-#define PCI_MSI_ADDRESS_LO 4 /* Lower 32 bits */
-#define PCI_MSI_ADDRESS_HI 8 /* Upper 32 bits (if PCI_MSI_FLAGS_64BIT set) */
-#define PCI_MSI_DATA_32 8 /* 16 bits of data for 32-bit devices */
-#define PCI_MSI_MASK_32 12 /* Mask bits register for 32-bit devices */
-#define PCI_MSI_PENDING_32 16 /* Pending intrs for 32-bit devices */
-#define PCI_MSI_DATA_64 12 /* 16 bits of data for 64-bit devices */
-#define PCI_MSI_MASK_64 16 /* Mask bits register for 64-bit devices */
-#define PCI_MSI_PENDING_64 20 /* Pending intrs for 64-bit devices */
+#define PCI_MSI_ADDRESS_LO 0x04 /* Lower 32 bits */
+#define PCI_MSI_ADDRESS_HI 0x08 /* Upper 32 bits (if PCI_MSI_FLAGS_64BIT set) */
+#define PCI_MSI_DATA_32 0x08 /* 16 bits of data for 32-bit devices */
+#define PCI_MSI_MASK_32 0x0c /* Mask bits register for 32-bit devices */
+#define PCI_MSI_PENDING_32 0x10 /* Pending intrs for 32-bit devices */
+#define PCI_MSI_DATA_64 0x0c /* 16 bits of data for 64-bit devices */
+#define PCI_MSI_MASK_64 0x10 /* Mask bits register for 64-bit devices */
+#define PCI_MSI_PENDING_64 0x14 /* Pending intrs for 64-bit devices */
/* MSI-X registers (in MSI-X capability) */
#define PCI_MSIX_FLAGS 2 /* Message Control */
@@ -335,10 +335,10 @@
/* MSI-X Table entry format (in memory mapped by a BAR) */
#define PCI_MSIX_ENTRY_SIZE 16
-#define PCI_MSIX_ENTRY_LOWER_ADDR 0 /* Message Address */
-#define PCI_MSIX_ENTRY_UPPER_ADDR 4 /* Message Upper Address */
-#define PCI_MSIX_ENTRY_DATA 8 /* Message Data */
-#define PCI_MSIX_ENTRY_VECTOR_CTRL 12 /* Vector Control */
+#define PCI_MSIX_ENTRY_LOWER_ADDR 0x0 /* Message Address */
+#define PCI_MSIX_ENTRY_UPPER_ADDR 0x4 /* Message Upper Address */
+#define PCI_MSIX_ENTRY_DATA 0x8 /* Message Data */
+#define PCI_MSIX_ENTRY_VECTOR_CTRL 0xc /* Vector Control */
#define PCI_MSIX_ENTRY_CTRL_MASKBIT 0x00000001
/* CompactPCI Hotswap Register */
@@ -470,7 +470,7 @@
/* PCI Express capability registers */
-#define PCI_EXP_FLAGS 2 /* Capabilities register */
+#define PCI_EXP_FLAGS 0x02 /* Capabilities register */
#define PCI_EXP_FLAGS_VERS 0x000f /* Capability version */
#define PCI_EXP_FLAGS_TYPE 0x00f0 /* Device/Port type */
#define PCI_EXP_TYPE_ENDPOINT 0x0 /* Express Endpoint */
@@ -484,7 +484,7 @@
#define PCI_EXP_TYPE_RC_EC 0xa /* Root Complex Event Collector */
#define PCI_EXP_FLAGS_SLOT 0x0100 /* Slot implemented */
#define PCI_EXP_FLAGS_IRQ 0x3e00 /* Interrupt message number */
-#define PCI_EXP_DEVCAP 4 /* Device capabilities */
+#define PCI_EXP_DEVCAP 0x04 /* Device capabilities */
#define PCI_EXP_DEVCAP_PAYLOAD 0x00000007 /* Max_Payload_Size */
#define PCI_EXP_DEVCAP_PHANTOM 0x00000018 /* Phantom functions */
#define PCI_EXP_DEVCAP_EXT_TAG 0x00000020 /* Extended tags */
@@ -497,7 +497,7 @@
#define PCI_EXP_DEVCAP_PWR_VAL 0x03fc0000 /* Slot Power Limit Value */
#define PCI_EXP_DEVCAP_PWR_SCL 0x0c000000 /* Slot Power Limit Scale */
#define PCI_EXP_DEVCAP_FLR 0x10000000 /* Function Level Reset */
-#define PCI_EXP_DEVCTL 8 /* Device Control */
+#define PCI_EXP_DEVCTL 0x08 /* Device Control */
#define PCI_EXP_DEVCTL_CERE 0x0001 /* Correctable Error Reporting En. */
#define PCI_EXP_DEVCTL_NFERE 0x0002 /* Non-Fatal Error Reporting Enable */
#define PCI_EXP_DEVCTL_FERE 0x0004 /* Fatal Error Reporting Enable */
@@ -522,7 +522,7 @@
#define PCI_EXP_DEVCTL_READRQ_2048B 0x4000 /* 2048 Bytes */
#define PCI_EXP_DEVCTL_READRQ_4096B 0x5000 /* 4096 Bytes */
#define PCI_EXP_DEVCTL_BCR_FLR 0x8000 /* Bridge Configuration Retry / FLR */
-#define PCI_EXP_DEVSTA 10 /* Device Status */
+#define PCI_EXP_DEVSTA 0x0a /* Device Status */
#define PCI_EXP_DEVSTA_CED 0x0001 /* Correctable Error Detected */
#define PCI_EXP_DEVSTA_NFED 0x0002 /* Non-Fatal Error Detected */
#define PCI_EXP_DEVSTA_FED 0x0004 /* Fatal Error Detected */
@@ -530,7 +530,7 @@
#define PCI_EXP_DEVSTA_AUXPD 0x0010 /* AUX Power Detected */
#define PCI_EXP_DEVSTA_TRPND 0x0020 /* Transactions Pending */
#define PCI_CAP_EXP_RC_ENDPOINT_SIZEOF_V1 12 /* v1 endpoints without link end here */
-#define PCI_EXP_LNKCAP 12 /* Link Capabilities */
+#define PCI_EXP_LNKCAP 0x0c /* Link Capabilities */
#define PCI_EXP_LNKCAP_SLS 0x0000000f /* Supported Link Speeds */
#define PCI_EXP_LNKCAP_SLS_2_5GB 0x00000001 /* LNKCAP2 SLS Vector bit 0 */
#define PCI_EXP_LNKCAP_SLS_5_0GB 0x00000002 /* LNKCAP2 SLS Vector bit 1 */
@@ -549,7 +549,7 @@
#define PCI_EXP_LNKCAP_DLLLARC 0x00100000 /* Data Link Layer Link Active Reporting Capable */
#define PCI_EXP_LNKCAP_LBNC 0x00200000 /* Link Bandwidth Notification Capability */
#define PCI_EXP_LNKCAP_PN 0xff000000 /* Port Number */
-#define PCI_EXP_LNKCTL 16 /* Link Control */
+#define PCI_EXP_LNKCTL 0x10 /* Link Control */
#define PCI_EXP_LNKCTL_ASPMC 0x0003 /* ASPM Control */
#define PCI_EXP_LNKCTL_ASPM_L0S 0x0001 /* L0s Enable */
#define PCI_EXP_LNKCTL_ASPM_L1 0x0002 /* L1 Enable */
@@ -562,7 +562,7 @@
#define PCI_EXP_LNKCTL_HAWD 0x0200 /* Hardware Autonomous Width Disable */
#define PCI_EXP_LNKCTL_LBMIE 0x0400 /* Link Bandwidth Management Interrupt Enable */
#define PCI_EXP_LNKCTL_LABIE 0x0800 /* Link Autonomous Bandwidth Interrupt Enable */
-#define PCI_EXP_LNKSTA 18 /* Link Status */
+#define PCI_EXP_LNKSTA 0x12 /* Link Status */
#define PCI_EXP_LNKSTA_CLS 0x000f /* Current Link Speed */
#define PCI_EXP_LNKSTA_CLS_2_5GB 0x0001 /* Current Link Speed 2.5GT/s */
#define PCI_EXP_LNKSTA_CLS_5_0GB 0x0002 /* Current Link Speed 5.0GT/s */
@@ -582,7 +582,7 @@
#define PCI_EXP_LNKSTA_LBMS 0x4000 /* Link Bandwidth Management Status */
#define PCI_EXP_LNKSTA_LABS 0x8000 /* Link Autonomous Bandwidth Status */
#define PCI_CAP_EXP_ENDPOINT_SIZEOF_V1 20 /* v1 endpoints with link end here */
-#define PCI_EXP_SLTCAP 20 /* Slot Capabilities */
+#define PCI_EXP_SLTCAP 0x14 /* Slot Capabilities */
#define PCI_EXP_SLTCAP_ABP 0x00000001 /* Attention Button Present */
#define PCI_EXP_SLTCAP_PCP 0x00000002 /* Power Controller Present */
#define PCI_EXP_SLTCAP_MRLSP 0x00000004 /* MRL Sensor Present */
@@ -595,7 +595,7 @@
#define PCI_EXP_SLTCAP_EIP 0x00020000 /* Electromechanical Interlock Present */
#define PCI_EXP_SLTCAP_NCCS 0x00040000 /* No Command Completed Support */
#define PCI_EXP_SLTCAP_PSN 0xfff80000 /* Physical Slot Number */
-#define PCI_EXP_SLTCTL 24 /* Slot Control */
+#define PCI_EXP_SLTCTL 0x18 /* Slot Control */
#define PCI_EXP_SLTCTL_ABPE 0x0001 /* Attention Button Pressed Enable */
#define PCI_EXP_SLTCTL_PFDE 0x0002 /* Power Fault Detected Enable */
#define PCI_EXP_SLTCTL_MRLSCE 0x0004 /* MRL Sensor Changed Enable */
@@ -617,7 +617,7 @@
#define PCI_EXP_SLTCTL_EIC 0x0800 /* Electromechanical Interlock Control */
#define PCI_EXP_SLTCTL_DLLSCE 0x1000 /* Data Link Layer State Changed Enable */
#define PCI_EXP_SLTCTL_IBPD_DISABLE 0x4000 /* In-band PD disable */
-#define PCI_EXP_SLTSTA 26 /* Slot Status */
+#define PCI_EXP_SLTSTA 0x1a /* Slot Status */
#define PCI_EXP_SLTSTA_ABP 0x0001 /* Attention Button Pressed */
#define PCI_EXP_SLTSTA_PFD 0x0002 /* Power Fault Detected */
#define PCI_EXP_SLTSTA_MRLSC 0x0004 /* MRL Sensor Changed */
@@ -627,15 +627,15 @@
#define PCI_EXP_SLTSTA_PDS 0x0040 /* Presence Detect State */
#define PCI_EXP_SLTSTA_EIS 0x0080 /* Electromechanical Interlock Status */
#define PCI_EXP_SLTSTA_DLLSC 0x0100 /* Data Link Layer State Changed */
-#define PCI_EXP_RTCTL 28 /* Root Control */
+#define PCI_EXP_RTCTL 0x1c /* Root Control */
#define PCI_EXP_RTCTL_SECEE 0x0001 /* System Error on Correctable Error */
#define PCI_EXP_RTCTL_SENFEE 0x0002 /* System Error on Non-Fatal Error */
#define PCI_EXP_RTCTL_SEFEE 0x0004 /* System Error on Fatal Error */
#define PCI_EXP_RTCTL_PMEIE 0x0008 /* PME Interrupt Enable */
#define PCI_EXP_RTCTL_CRSSVE 0x0010 /* CRS Software Visibility Enable */
-#define PCI_EXP_RTCAP 30 /* Root Capabilities */
+#define PCI_EXP_RTCAP 0x1e /* Root Capabilities */
#define PCI_EXP_RTCAP_CRSVIS 0x0001 /* CRS Software Visibility capability */
-#define PCI_EXP_RTSTA 32 /* Root Status */
+#define PCI_EXP_RTSTA 0x20 /* Root Status */
#define PCI_EXP_RTSTA_PME 0x00010000 /* PME status */
#define PCI_EXP_RTSTA_PENDING 0x00020000 /* PME pending */
/*
@@ -646,7 +646,7 @@
* Use pcie_capability_read_word() and similar interfaces to use them
* safely.
*/
-#define PCI_EXP_DEVCAP2 36 /* Device Capabilities 2 */
+#define PCI_EXP_DEVCAP2 0x24 /* Device Capabilities 2 */
#define PCI_EXP_DEVCAP2_COMP_TMOUT_DIS 0x00000010 /* Completion Timeout Disable supported */
#define PCI_EXP_DEVCAP2_ARI 0x00000020 /* Alternative Routing-ID */
#define PCI_EXP_DEVCAP2_ATOMIC_ROUTE 0x00000040 /* Atomic Op routing */
@@ -658,7 +658,7 @@
#define PCI_EXP_DEVCAP2_OBFF_MSG 0x00040000 /* New message signaling */
#define PCI_EXP_DEVCAP2_OBFF_WAKE 0x00080000 /* Re-use WAKE# for OBFF */
#define PCI_EXP_DEVCAP2_EE_PREFIX 0x00200000 /* End-End TLP Prefix */
-#define PCI_EXP_DEVCTL2 40 /* Device Control 2 */
+#define PCI_EXP_DEVCTL2 0x28 /* Device Control 2 */
#define PCI_EXP_DEVCTL2_COMP_TIMEOUT 0x000f /* Completion Timeout Value */
#define PCI_EXP_DEVCTL2_COMP_TMOUT_DIS 0x0010 /* Completion Timeout Disable */
#define PCI_EXP_DEVCTL2_ARI 0x0020 /* Alternative Routing-ID */
@@ -670,9 +670,9 @@
#define PCI_EXP_DEVCTL2_OBFF_MSGA_EN 0x2000 /* Enable OBFF Message type A */
#define PCI_EXP_DEVCTL2_OBFF_MSGB_EN 0x4000 /* Enable OBFF Message type B */
#define PCI_EXP_DEVCTL2_OBFF_WAKE_EN 0x6000 /* OBFF using WAKE# signaling */
-#define PCI_EXP_DEVSTA2 42 /* Device Status 2 */
-#define PCI_CAP_EXP_RC_ENDPOINT_SIZEOF_V2 44 /* v2 endpoints without link end here */
-#define PCI_EXP_LNKCAP2 44 /* Link Capabilities 2 */
+#define PCI_EXP_DEVSTA2 0x2a /* Device Status 2 */
+#define PCI_CAP_EXP_RC_ENDPOINT_SIZEOF_V2 0x2c /* end of v2 EPs w/o link */
+#define PCI_EXP_LNKCAP2 0x2c /* Link Capabilities 2 */
#define PCI_EXP_LNKCAP2_SLS_2_5GB 0x00000002 /* Supported Speed 2.5GT/s */
#define PCI_EXP_LNKCAP2_SLS_5_0GB 0x00000004 /* Supported Speed 5GT/s */
#define PCI_EXP_LNKCAP2_SLS_8_0GB 0x00000008 /* Supported Speed 8GT/s */
@@ -680,7 +680,7 @@
#define PCI_EXP_LNKCAP2_SLS_32_0GB 0x00000020 /* Supported Speed 32GT/s */
#define PCI_EXP_LNKCAP2_SLS_64_0GB 0x00000040 /* Supported Speed 64GT/s */
#define PCI_EXP_LNKCAP2_CROSSLINK 0x00000100 /* Crosslink supported */
-#define PCI_EXP_LNKCTL2 48 /* Link Control 2 */
+#define PCI_EXP_LNKCTL2 0x30 /* Link Control 2 */
#define PCI_EXP_LNKCTL2_TLS 0x000f
#define PCI_EXP_LNKCTL2_TLS_2_5GT 0x0001 /* Supported Speed 2.5GT/s */
#define PCI_EXP_LNKCTL2_TLS_5_0GT 0x0002 /* Supported Speed 5GT/s */
@@ -691,12 +691,12 @@
#define PCI_EXP_LNKCTL2_ENTER_COMP 0x0010 /* Enter Compliance */
#define PCI_EXP_LNKCTL2_TX_MARGIN 0x0380 /* Transmit Margin */
#define PCI_EXP_LNKCTL2_HASD 0x0020 /* HW Autonomous Speed Disable */
-#define PCI_EXP_LNKSTA2 50 /* Link Status 2 */
-#define PCI_CAP_EXP_ENDPOINT_SIZEOF_V2 52 /* v2 endpoints with link end here */
-#define PCI_EXP_SLTCAP2 52 /* Slot Capabilities 2 */
+#define PCI_EXP_LNKSTA2 0x32 /* Link Status 2 */
+#define PCI_CAP_EXP_ENDPOINT_SIZEOF_V2 0x32 /* end of v2 EPs w/ link */
+#define PCI_EXP_SLTCAP2 0x34 /* Slot Capabilities 2 */
#define PCI_EXP_SLTCAP2_IBPD 0x00000001 /* In-band PD Disable Supported */
-#define PCI_EXP_SLTCTL2 56 /* Slot Control 2 */
-#define PCI_EXP_SLTSTA2 58 /* Slot Status 2 */
+#define PCI_EXP_SLTCTL2 0x38 /* Slot Control 2 */
+#define PCI_EXP_SLTSTA2 0x3a /* Slot Status 2 */
/* Extended Capabilities (PCI-X 2.0 and Express) */
#define PCI_EXT_CAP_ID(header) (header & 0x0000ffff)
@@ -742,7 +742,7 @@
#define PCI_EXT_CAP_MCAST_ENDPOINT_SIZEOF 40
/* Advanced Error Reporting */
-#define PCI_ERR_UNCOR_STATUS 4 /* Uncorrectable Error Status */
+#define PCI_ERR_UNCOR_STATUS 0x04 /* Uncorrectable Error Status */
#define PCI_ERR_UNC_UND 0x00000001 /* Undefined */
#define PCI_ERR_UNC_DLP 0x00000010 /* Data Link Protocol */
#define PCI_ERR_UNC_SURPDN 0x00000020 /* Surprise Down */
@@ -760,11 +760,11 @@
#define PCI_ERR_UNC_MCBTLP 0x00800000 /* MC blocked TLP */
#define PCI_ERR_UNC_ATOMEG 0x01000000 /* Atomic egress blocked */
#define PCI_ERR_UNC_TLPPRE 0x02000000 /* TLP prefix blocked */
-#define PCI_ERR_UNCOR_MASK 8 /* Uncorrectable Error Mask */
+#define PCI_ERR_UNCOR_MASK 0x08 /* Uncorrectable Error Mask */
/* Same bits as above */
-#define PCI_ERR_UNCOR_SEVER 12 /* Uncorrectable Error Severity */
+#define PCI_ERR_UNCOR_SEVER 0x0c /* Uncorrectable Error Severity */
/* Same bits as above */
-#define PCI_ERR_COR_STATUS 16 /* Correctable Error Status */
+#define PCI_ERR_COR_STATUS 0x10 /* Correctable Error Status */
#define PCI_ERR_COR_RCVR 0x00000001 /* Receiver Error Status */
#define PCI_ERR_COR_BAD_TLP 0x00000040 /* Bad TLP Status */
#define PCI_ERR_COR_BAD_DLLP 0x00000080 /* Bad DLLP Status */
@@ -773,20 +773,20 @@
#define PCI_ERR_COR_ADV_NFAT 0x00002000 /* Advisory Non-Fatal */
#define PCI_ERR_COR_INTERNAL 0x00004000 /* Corrected Internal */
#define PCI_ERR_COR_LOG_OVER 0x00008000 /* Header Log Overflow */
-#define PCI_ERR_COR_MASK 20 /* Correctable Error Mask */
+#define PCI_ERR_COR_MASK 0x14 /* Correctable Error Mask */
/* Same bits as above */
-#define PCI_ERR_CAP 24 /* Advanced Error Capabilities */
-#define PCI_ERR_CAP_FEP(x) ((x) & 31) /* First Error Pointer */
+#define PCI_ERR_CAP 0x18 /* Advanced Error Capabilities & Ctrl*/
+#define PCI_ERR_CAP_FEP(x) ((x) & 0x1f) /* First Error Pointer */
#define PCI_ERR_CAP_ECRC_GENC 0x00000020 /* ECRC Generation Capable */
#define PCI_ERR_CAP_ECRC_GENE 0x00000040 /* ECRC Generation Enable */
#define PCI_ERR_CAP_ECRC_CHKC 0x00000080 /* ECRC Check Capable */
#define PCI_ERR_CAP_ECRC_CHKE 0x00000100 /* ECRC Check Enable */
-#define PCI_ERR_HEADER_LOG 28 /* Header Log Register (16 bytes) */
-#define PCI_ERR_ROOT_COMMAND 44 /* Root Error Command */
+#define PCI_ERR_HEADER_LOG 0x1c /* Header Log Register (16 bytes) */
+#define PCI_ERR_ROOT_COMMAND 0x2c /* Root Error Command */
#define PCI_ERR_ROOT_CMD_COR_EN 0x00000001 /* Correctable Err Reporting Enable */
#define PCI_ERR_ROOT_CMD_NONFATAL_EN 0x00000002 /* Non-Fatal Err Reporting Enable */
#define PCI_ERR_ROOT_CMD_FATAL_EN 0x00000004 /* Fatal Err Reporting Enable */
-#define PCI_ERR_ROOT_STATUS 48
+#define PCI_ERR_ROOT_STATUS 0x30
#define PCI_ERR_ROOT_COR_RCV 0x00000001 /* ERR_COR Received */
#define PCI_ERR_ROOT_MULTI_COR_RCV 0x00000002 /* Multiple ERR_COR */
#define PCI_ERR_ROOT_UNCOR_RCV 0x00000004 /* ERR_FATAL/NONFATAL */
@@ -795,52 +795,52 @@
#define PCI_ERR_ROOT_NONFATAL_RCV 0x00000020 /* Non-Fatal Received */
#define PCI_ERR_ROOT_FATAL_RCV 0x00000040 /* Fatal Received */
#define PCI_ERR_ROOT_AER_IRQ 0xf8000000 /* Advanced Error Interrupt Message Number */
-#define PCI_ERR_ROOT_ERR_SRC 52 /* Error Source Identification */
+#define PCI_ERR_ROOT_ERR_SRC 0x34 /* Error Source Identification */
/* Virtual Channel */
-#define PCI_VC_PORT_CAP1 4
+#define PCI_VC_PORT_CAP1 0x04
#define PCI_VC_CAP1_EVCC 0x00000007 /* extended VC count */
#define PCI_VC_CAP1_LPEVCC 0x00000070 /* low prio extended VC count */
#define PCI_VC_CAP1_ARB_SIZE 0x00000c00
-#define PCI_VC_PORT_CAP2 8
+#define PCI_VC_PORT_CAP2 0x08
#define PCI_VC_CAP2_32_PHASE 0x00000002
#define PCI_VC_CAP2_64_PHASE 0x00000004
#define PCI_VC_CAP2_128_PHASE 0x00000008
#define PCI_VC_CAP2_ARB_OFF 0xff000000
-#define PCI_VC_PORT_CTRL 12
+#define PCI_VC_PORT_CTRL 0x0c
#define PCI_VC_PORT_CTRL_LOAD_TABLE 0x00000001
-#define PCI_VC_PORT_STATUS 14
+#define PCI_VC_PORT_STATUS 0x0e
#define PCI_VC_PORT_STATUS_TABLE 0x00000001
-#define PCI_VC_RES_CAP 16
+#define PCI_VC_RES_CAP 0x10
#define PCI_VC_RES_CAP_32_PHASE 0x00000002
#define PCI_VC_RES_CAP_64_PHASE 0x00000004
#define PCI_VC_RES_CAP_128_PHASE 0x00000008
#define PCI_VC_RES_CAP_128_PHASE_TB 0x00000010
#define PCI_VC_RES_CAP_256_PHASE 0x00000020
#define PCI_VC_RES_CAP_ARB_OFF 0xff000000
-#define PCI_VC_RES_CTRL 20
+#define PCI_VC_RES_CTRL 0x14
#define PCI_VC_RES_CTRL_LOAD_TABLE 0x00010000
#define PCI_VC_RES_CTRL_ARB_SELECT 0x000e0000
#define PCI_VC_RES_CTRL_ID 0x07000000
#define PCI_VC_RES_CTRL_ENABLE 0x80000000
-#define PCI_VC_RES_STATUS 26
+#define PCI_VC_RES_STATUS 0x1a
#define PCI_VC_RES_STATUS_TABLE 0x00000001
#define PCI_VC_RES_STATUS_NEGO 0x00000002
#define PCI_CAP_VC_BASE_SIZEOF 0x10
-#define PCI_CAP_VC_PER_VC_SIZEOF 0x0C
+#define PCI_CAP_VC_PER_VC_SIZEOF 0x0c
/* Power Budgeting */
-#define PCI_PWR_DSR 4 /* Data Select Register */
-#define PCI_PWR_DATA 8 /* Data Register */
+#define PCI_PWR_DSR 0x04 /* Data Select Register */
+#define PCI_PWR_DATA 0x08 /* Data Register */
#define PCI_PWR_DATA_BASE(x) ((x) & 0xff) /* Base Power */
#define PCI_PWR_DATA_SCALE(x) (((x) >> 8) & 3) /* Data Scale */
#define PCI_PWR_DATA_PM_SUB(x) (((x) >> 10) & 7) /* PM Sub State */
#define PCI_PWR_DATA_PM_STATE(x) (((x) >> 13) & 3) /* PM State */
#define PCI_PWR_DATA_TYPE(x) (((x) >> 15) & 7) /* Type */
#define PCI_PWR_DATA_RAIL(x) (((x) >> 18) & 7) /* Power Rail */
-#define PCI_PWR_CAP 12 /* Capability */
+#define PCI_PWR_CAP 0x0c /* Capability */
#define PCI_PWR_CAP_BUDGET(x) ((x) & 1) /* Included in system budget */
-#define PCI_EXT_CAP_PWR_SIZEOF 16
+#define PCI_EXT_CAP_PWR_SIZEOF 0x10
/* Root Complex Event Collector Endpoint Association */
#define PCI_RCEC_RCIEP_BITMAP 4 /* Associated Bitmap for RCiEPs */
@@ -964,7 +964,7 @@
#define PCI_SRIOV_VFM_MI 0x1 /* Dormant.MigrateIn */
#define PCI_SRIOV_VFM_MO 0x2 /* Active.MigrateOut */
#define PCI_SRIOV_VFM_AV 0x3 /* Active.Available */
-#define PCI_EXT_CAP_SRIOV_SIZEOF 64
+#define PCI_EXT_CAP_SRIOV_SIZEOF 0x40
#define PCI_LTR_MAX_SNOOP_LAT 0x4
#define PCI_LTR_MAX_NOSNOOP_LAT 0x6
@@ -1017,12 +1017,12 @@
#define PCI_TPH_LOC_NONE 0x000 /* no location */
#define PCI_TPH_LOC_CAP 0x200 /* in capability */
#define PCI_TPH_LOC_MSIX 0x400 /* in MSI-X */
-#define PCI_TPH_CAP_ST_MASK 0x07FF0000 /* st table mask */
-#define PCI_TPH_CAP_ST_SHIFT 16 /* st table shift */
-#define PCI_TPH_BASE_SIZEOF 12 /* size with no st table */
+#define PCI_TPH_CAP_ST_MASK 0x07FF0000 /* ST table mask */
+#define PCI_TPH_CAP_ST_SHIFT 16 /* ST table shift */
+#define PCI_TPH_BASE_SIZEOF 0xc /* size with no ST table */
/* Downstream Port Containment */
-#define PCI_EXP_DPC_CAP 4 /* DPC Capability */
+#define PCI_EXP_DPC_CAP 0x04 /* DPC Capability */
#define PCI_EXP_DPC_IRQ 0x001F /* Interrupt Message Number */
#define PCI_EXP_DPC_CAP_RP_EXT 0x0020 /* Root Port Extensions */
#define PCI_EXP_DPC_CAP_POISONED_TLP 0x0040 /* Poisoned TLP Egress Blocking Supported */
@@ -1030,19 +1030,19 @@
#define PCI_EXP_DPC_RP_PIO_LOG_SIZE 0x0F00 /* RP PIO Log Size */
#define PCI_EXP_DPC_CAP_DL_ACTIVE 0x1000 /* ERR_COR signal on DL_Active supported */
-#define PCI_EXP_DPC_CTL 6 /* DPC control */
+#define PCI_EXP_DPC_CTL 0x06 /* DPC control */
#define PCI_EXP_DPC_CTL_EN_FATAL 0x0001 /* Enable trigger on ERR_FATAL message */
#define PCI_EXP_DPC_CTL_EN_NONFATAL 0x0002 /* Enable trigger on ERR_NONFATAL message */
#define PCI_EXP_DPC_CTL_INT_EN 0x0008 /* DPC Interrupt Enable */
-#define PCI_EXP_DPC_STATUS 8 /* DPC Status */
+#define PCI_EXP_DPC_STATUS 0x08 /* DPC Status */
#define PCI_EXP_DPC_STATUS_TRIGGER 0x0001 /* Trigger Status */
#define PCI_EXP_DPC_STATUS_TRIGGER_RSN 0x0006 /* Trigger Reason */
#define PCI_EXP_DPC_STATUS_INTERRUPT 0x0008 /* Interrupt Status */
#define PCI_EXP_DPC_RP_BUSY 0x0010 /* Root Port Busy */
#define PCI_EXP_DPC_STATUS_TRIGGER_RSN_EXT 0x0060 /* Trig Reason Extension */
-#define PCI_EXP_DPC_SOURCE_ID 10 /* DPC Source Identifier */
+#define PCI_EXP_DPC_SOURCE_ID 0x0A /* DPC Source Identifier */
#define PCI_EXP_DPC_RP_PIO_STATUS 0x0C /* RP PIO Status */
#define PCI_EXP_DPC_RP_PIO_MASK 0x10 /* RP PIO Mask */
@@ -1086,7 +1086,11 @@
/* Designated Vendor-Specific (DVSEC, PCI_EXT_CAP_ID_DVSEC) */
#define PCI_DVSEC_HEADER1 0x4 /* Designated Vendor-Specific Header1 */
+#define PCI_DVSEC_HEADER1_VID(x) ((x) & 0xffff)
+#define PCI_DVSEC_HEADER1_REV(x) (((x) >> 16) & 0xf)
+#define PCI_DVSEC_HEADER1_LEN(x) (((x) >> 20) & 0xfff)
#define PCI_DVSEC_HEADER2 0x8 /* Designated Vendor-Specific Header2 */
+#define PCI_DVSEC_HEADER2_ID(x) ((x) & 0xffff)
/* Data Link Feature */
#define PCI_DLF_CAP 0x04 /* Capabilities Register */
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index bd8860eeb291..82858b697c05 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -465,6 +465,8 @@ struct perf_event_attr {
/*
* User provided data if sigtrap=1, passed back to user via
* siginfo_t::si_perf_data, e.g. to permit user to identify the event.
+ * Note, siginfo_t::si_perf_data is long-sized, and sig_data will be
+ * truncated accordingly on 32 bit architectures.
*/
__u64 sig_data;
};
@@ -1332,7 +1334,10 @@ union perf_mem_data_src {
/* hop level */
#define PERF_MEM_HOPS_0 0x01 /* remote core, same node */
-/* 2-7 available */
+#define PERF_MEM_HOPS_1 0x02 /* remote node, same socket */
+#define PERF_MEM_HOPS_2 0x03 /* remote socket, same board */
+#define PERF_MEM_HOPS_3 0x04 /* remote board */
+/* 5-7 available */
#define PERF_MEM_HOPS_SHIFT 43
#define PERF_MEM_S(a, s) \
diff --git a/include/uapi/linux/pfrut.h b/include/uapi/linux/pfrut.h
new file mode 100644
index 000000000000..42fa15f8310d
--- /dev/null
+++ b/include/uapi/linux/pfrut.h
@@ -0,0 +1,262 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Platform Firmware Runtime Update header
+ *
+ * Copyright(c) 2021 Intel Corporation. All rights reserved.
+ */
+#ifndef __PFRUT_H__
+#define __PFRUT_H__
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+#define PFRUT_IOCTL_MAGIC 0xEE
+
+/**
+ * PFRU_IOC_SET_REV - _IOW(PFRUT_IOCTL_MAGIC, 0x01, unsigned int)
+ *
+ * Return:
+ * * 0 - success
+ * * -EFAULT - fail to read the revision id
+ * * -EINVAL - user provides an invalid revision id
+ *
+ * Set the Revision ID for Platform Firmware Runtime Update.
+ */
+#define PFRU_IOC_SET_REV _IOW(PFRUT_IOCTL_MAGIC, 0x01, unsigned int)
+
+/**
+ * PFRU_IOC_STAGE - _IOW(PFRUT_IOCTL_MAGIC, 0x02, unsigned int)
+ *
+ * Return:
+ * * 0 - success
+ * * -EINVAL - stage phase returns invalid result
+ *
+ * Stage a capsule image from communication buffer and perform authentication.
+ */
+#define PFRU_IOC_STAGE _IOW(PFRUT_IOCTL_MAGIC, 0x02, unsigned int)
+
+/**
+ * PFRU_IOC_ACTIVATE - _IOW(PFRUT_IOCTL_MAGIC, 0x03, unsigned int)
+ *
+ * Return:
+ * * 0 - success
+ * * -EINVAL - activate phase returns invalid result
+ *
+ * Activate a previously staged capsule image.
+ */
+#define PFRU_IOC_ACTIVATE _IOW(PFRUT_IOCTL_MAGIC, 0x03, unsigned int)
+
+/**
+ * PFRU_IOC_STAGE_ACTIVATE - _IOW(PFRUT_IOCTL_MAGIC, 0x04, unsigned int)
+ *
+ * Return:
+ * * 0 - success
+ * * -EINVAL - stage/activate phase returns invalid result.
+ *
+ * Perform both stage and activation action.
+ */
+#define PFRU_IOC_STAGE_ACTIVATE _IOW(PFRUT_IOCTL_MAGIC, 0x04, unsigned int)
+
+/**
+ * PFRU_IOC_QUERY_CAP - _IOR(PFRUT_IOCTL_MAGIC, 0x05,
+ * struct pfru_update_cap_info)
+ *
+ * Return:
+ * * 0 - success
+ * * -EINVAL - query phase returns invalid result
+ * * -EFAULT - the result fails to be copied to userspace
+ *
+ * Retrieve information on the Platform Firmware Runtime Update capability.
+ * The information is a struct pfru_update_cap_info.
+ */
+#define PFRU_IOC_QUERY_CAP _IOR(PFRUT_IOCTL_MAGIC, 0x05, struct pfru_update_cap_info)
+
+/**
+ * struct pfru_payload_hdr - Capsule file payload header.
+ *
+ * @sig: Signature of this capsule file.
+ * @hdr_version: Revision of this header structure.
+ * @hdr_size: Size of this header, including the OemHeader bytes.
+ * @hw_ver: The supported firmware version.
+ * @rt_ver: Version of the code injection image.
+ * @platform_id: A platform specific GUID to specify the platform what
+ * this capsule image support.
+ */
+struct pfru_payload_hdr {
+ __u32 sig;
+ __u32 hdr_version;
+ __u32 hdr_size;
+ __u32 hw_ver;
+ __u32 rt_ver;
+ __u8 platform_id[16];
+};
+
+enum pfru_dsm_status {
+ DSM_SUCCEED = 0,
+ DSM_FUNC_NOT_SUPPORT = 1,
+ DSM_INVAL_INPUT = 2,
+ DSM_HARDWARE_ERR = 3,
+ DSM_RETRY_SUGGESTED = 4,
+ DSM_UNKNOWN = 5,
+ DSM_FUNC_SPEC_ERR = 6,
+};
+
+/**
+ * struct pfru_update_cap_info - Runtime update capability information.
+ *
+ * @status: Indicator of whether this query succeed.
+ * @update_cap: Bitmap to indicate whether the feature is supported.
+ * @code_type: A buffer containing an image type GUID.
+ * @fw_version: Platform firmware version.
+ * @code_rt_version: Code injection runtime version for anti-rollback.
+ * @drv_type: A buffer containing an image type GUID.
+ * @drv_rt_version: The version of the driver update runtime code.
+ * @drv_svn: The secure version number(SVN) of the driver update runtime code.
+ * @platform_id: A buffer containing a platform ID GUID.
+ * @oem_id: A buffer containing an OEM ID GUID.
+ * @oem_info_len: Length of the buffer containing the vendor specific information.
+ */
+struct pfru_update_cap_info {
+ __u32 status;
+ __u32 update_cap;
+
+ __u8 code_type[16];
+ __u32 fw_version;
+ __u32 code_rt_version;
+
+ __u8 drv_type[16];
+ __u32 drv_rt_version;
+ __u32 drv_svn;
+
+ __u8 platform_id[16];
+ __u8 oem_id[16];
+
+ __u32 oem_info_len;
+};
+
+/**
+ * struct pfru_com_buf_info - Communication buffer information.
+ *
+ * @status: Indicator of whether this query succeed.
+ * @ext_status: Implementation specific query result.
+ * @addr_lo: Low 32bit physical address of the communication buffer to hold
+ * a runtime update package.
+ * @addr_hi: High 32bit physical address of the communication buffer to hold
+ * a runtime update package.
+ * @buf_size: Maximum size in bytes of the communication buffer.
+ */
+struct pfru_com_buf_info {
+ __u32 status;
+ __u32 ext_status;
+ __u64 addr_lo;
+ __u64 addr_hi;
+ __u32 buf_size;
+};
+
+/**
+ * struct pfru_updated_result - Platform firmware runtime update result information.
+ * @status: Indicator of whether this update succeed.
+ * @ext_status: Implementation specific update result.
+ * @low_auth_time: Low 32bit value of image authentication time in nanosecond.
+ * @high_auth_time: High 32bit value of image authentication time in nanosecond.
+ * @low_exec_time: Low 32bit value of image execution time in nanosecond.
+ * @high_exec_time: High 32bit value of image execution time in nanosecond.
+ */
+struct pfru_updated_result {
+ __u32 status;
+ __u32 ext_status;
+ __u64 low_auth_time;
+ __u64 high_auth_time;
+ __u64 low_exec_time;
+ __u64 high_exec_time;
+};
+
+/**
+ * struct pfrt_log_data_info - Log Data from telemetry service.
+ * @status: Indicator of whether this update succeed.
+ * @ext_status: Implementation specific update result.
+ * @chunk1_addr_lo: Low 32bit physical address of the telemetry data chunk1
+ * starting address.
+ * @chunk1_addr_hi: High 32bit physical address of the telemetry data chunk1
+ * starting address.
+ * @chunk2_addr_lo: Low 32bit physical address of the telemetry data chunk2
+ * starting address.
+ * @chunk2_addr_hi: High 32bit physical address of the telemetry data chunk2
+ * starting address.
+ * @max_data_size: Maximum supported size of data of all data chunks combined.
+ * @chunk1_size: Data size in bytes of the telemetry data chunk1 buffer.
+ * @chunk2_size: Data size in bytes of the telemetry data chunk2 buffer.
+ * @rollover_cnt: Number of times telemetry data buffer is overwritten
+ * since telemetry buffer reset.
+ * @reset_cnt: Number of times telemetry services resets that results in
+ * rollover count and data chunk buffers are reset.
+ */
+struct pfrt_log_data_info {
+ __u32 status;
+ __u32 ext_status;
+ __u64 chunk1_addr_lo;
+ __u64 chunk1_addr_hi;
+ __u64 chunk2_addr_lo;
+ __u64 chunk2_addr_hi;
+ __u32 max_data_size;
+ __u32 chunk1_size;
+ __u32 chunk2_size;
+ __u32 rollover_cnt;
+ __u32 reset_cnt;
+};
+
+/**
+ * struct pfrt_log_info - Telemetry log information.
+ * @log_level: The telemetry log level.
+ * @log_type: The telemetry log type(history and execution).
+ * @log_revid: The telemetry log revision id.
+ */
+struct pfrt_log_info {
+ __u32 log_level;
+ __u32 log_type;
+ __u32 log_revid;
+};
+
+/**
+ * PFRT_LOG_IOC_SET_INFO - _IOW(PFRUT_IOCTL_MAGIC, 0x06,
+ * struct pfrt_log_info)
+ *
+ * Return:
+ * * 0 - success
+ * * -EFAULT - fail to get the setting parameter
+ * * -EINVAL - fail to set the log level
+ *
+ * Set the PFRT log level and log type. The input information is
+ * a struct pfrt_log_info.
+ */
+#define PFRT_LOG_IOC_SET_INFO _IOW(PFRUT_IOCTL_MAGIC, 0x06, struct pfrt_log_info)
+
+/**
+ * PFRT_LOG_IOC_GET_INFO - _IOR(PFRUT_IOCTL_MAGIC, 0x07,
+ * struct pfrt_log_info)
+ *
+ * Return:
+ * * 0 - success
+ * * -EINVAL - fail to get the log level
+ * * -EFAULT - fail to copy the result back to userspace
+ *
+ * Retrieve log level and log type of the telemetry. The information is
+ * a struct pfrt_log_info.
+ */
+#define PFRT_LOG_IOC_GET_INFO _IOR(PFRUT_IOCTL_MAGIC, 0x07, struct pfrt_log_info)
+
+/**
+ * PFRT_LOG_IOC_GET_DATA_INFO - _IOR(PFRUT_IOCTL_MAGIC, 0x08,
+ * struct pfrt_log_data_info)
+ *
+ * Return:
+ * * 0 - success
+ * * -EINVAL - fail to get the log buffer information
+ * * -EFAULT - fail to copy the log buffer information to userspace
+ *
+ * Retrieve data information about the telemetry. The information
+ * is a struct pfrt_log_data_info.
+ */
+#define PFRT_LOG_IOC_GET_DATA_INFO _IOR(PFRUT_IOCTL_MAGIC, 0x08, struct pfrt_log_data_info)
+
+#endif /* __PFRUT_H__ */
diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h
index bb73e9a0b24f..e998764f0262 100644
--- a/include/uapi/linux/prctl.h
+++ b/include/uapi/linux/prctl.h
@@ -272,4 +272,7 @@ struct prctl_mm_map {
# define PR_SCHED_CORE_SCOPE_THREAD_GROUP 1
# define PR_SCHED_CORE_SCOPE_PROCESS_GROUP 2
+#define PR_SET_VMA 0x53564d41
+# define PR_SET_VMA_ANON_NAME 0
+
#endif /* _LINUX_PRCTL_H */
diff --git a/include/uapi/linux/smc_diag.h b/include/uapi/linux/smc_diag.h
index c7008d87f1a4..8cb3a6fef553 100644
--- a/include/uapi/linux/smc_diag.h
+++ b/include/uapi/linux/smc_diag.h
@@ -84,12 +84,11 @@ struct smc_diag_conninfo {
/* SMC_DIAG_LINKINFO */
struct smc_diag_linkinfo {
- __u8 link_id; /* link identifier */
- __u8 ibname[IB_DEVICE_NAME_MAX]; /* name of the RDMA device */
- __u8 ibport; /* RDMA device port number */
- __u8 gid[40]; /* local GID */
- __u8 peer_gid[40]; /* peer GID */
- __aligned_u64 net_cookie; /* RDMA device net namespace */
+ __u8 link_id; /* link identifier */
+ __u8 ibname[IB_DEVICE_NAME_MAX]; /* name of the RDMA device */
+ __u8 ibport; /* RDMA device port number */
+ __u8 gid[40]; /* local GID */
+ __u8 peer_gid[40]; /* peer GID */
};
struct smc_diag_lgrinfo {
diff --git a/include/uapi/linux/soundcard.h b/include/uapi/linux/soundcard.h
index f3b21f989872..ac1318793a86 100644
--- a/include/uapi/linux/soundcard.h
+++ b/include/uapi/linux/soundcard.h
@@ -1051,7 +1051,7 @@ typedef struct mixer_vol_table {
* the GPL version of OSS-4.x and build against that version
* of the header.
*
- * We redefine the extern keyword so that make headers_check
+ * We redefine the extern keyword so that usr/include/headers_check.pl
* does not complain about SEQ_USE_EXTBUF.
*/
#define SEQ_DECLAREBUF() SEQ_USE_EXTBUF()
diff --git a/include/uapi/linux/taskstats.h b/include/uapi/linux/taskstats.h
index ccbd08709321..12327d32378f 100644
--- a/include/uapi/linux/taskstats.h
+++ b/include/uapi/linux/taskstats.h
@@ -34,7 +34,7 @@
*/
-#define TASKSTATS_VERSION 10
+#define TASKSTATS_VERSION 11
#define TS_COMM_LEN 32 /* should be >= TASK_COMM_LEN
* in linux/sched.h */
@@ -172,6 +172,10 @@ struct taskstats {
/* v10: 64-bit btime to avoid overflow */
__u64 ac_btime64; /* 64-bit begin time */
+
+ /* Delay waiting for memory compact */
+ __u64 compact_count;
+ __u64 compact_delay_total;
};
diff --git a/include/uapi/linux/uuid.h b/include/uapi/linux/uuid.h
index e5a7eecef7c3..c0f4bd9b040e 100644
--- a/include/uapi/linux/uuid.h
+++ b/include/uapi/linux/uuid.h
@@ -1,18 +1,10 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/* DO NOT USE in new code! This is solely for MEI due to legacy reasons */
/*
* UUID/GUID definition
*
* Copyright (C) 2010, Intel Corp.
* Huang Ying <ying.huang@intel.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation;
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef _UAPI_LINUX_UUID_H_
diff --git a/include/uapi/linux/vdpa.h b/include/uapi/linux/vdpa.h
index a252f06f9dfd..1061d8d2d09d 100644
--- a/include/uapi/linux/vdpa.h
+++ b/include/uapi/linux/vdpa.h
@@ -23,6 +23,9 @@ enum vdpa_command {
enum vdpa_attr {
VDPA_ATTR_UNSPEC,
+ /* Pad attribute for 64b alignment */
+ VDPA_ATTR_PAD = VDPA_ATTR_UNSPEC,
+
/* bus name (optional) + dev name together make the parent device handle */
VDPA_ATTR_MGMTDEV_BUS_NAME, /* string */
VDPA_ATTR_MGMTDEV_DEV_NAME, /* string */
@@ -40,6 +43,9 @@ enum vdpa_attr {
VDPA_ATTR_DEV_NET_CFG_MAX_VQP, /* u16 */
VDPA_ATTR_DEV_NET_CFG_MTU, /* u16 */
+ VDPA_ATTR_DEV_NEGOTIATED_FEATURES, /* u64 */
+ VDPA_ATTR_DEV_MGMTDEV_MAX_VQS, /* u32 */
+ VDPA_ATTR_DEV_SUPPORTED_FEATURES, /* u64 */
/* new attributes must be added above here */
VDPA_ATTR_MAX,
};
diff --git a/include/uapi/linux/virtio_iommu.h b/include/uapi/linux/virtio_iommu.h
index 237e36a280cb..1ff357f0d72e 100644
--- a/include/uapi/linux/virtio_iommu.h
+++ b/include/uapi/linux/virtio_iommu.h
@@ -16,6 +16,7 @@
#define VIRTIO_IOMMU_F_BYPASS 3
#define VIRTIO_IOMMU_F_PROBE 4
#define VIRTIO_IOMMU_F_MMIO 5
+#define VIRTIO_IOMMU_F_BYPASS_CONFIG 6
struct virtio_iommu_range_64 {
__le64 start;
@@ -36,6 +37,8 @@ struct virtio_iommu_config {
struct virtio_iommu_range_32 domain_range;
/* Probe buffer size */
__le32 probe_size;
+ __u8 bypass;
+ __u8 reserved[3];
};
/* Request types */
@@ -66,11 +69,14 @@ struct virtio_iommu_req_tail {
__u8 reserved[3];
};
+#define VIRTIO_IOMMU_ATTACH_F_BYPASS (1 << 0)
+
struct virtio_iommu_req_attach {
struct virtio_iommu_req_head head;
__le32 domain;
__le32 endpoint;
- __u8 reserved[8];
+ __le32 flags;
+ __u8 reserved[4];
struct virtio_iommu_req_tail tail;
};
diff --git a/include/uapi/misc/habanalabs.h b/include/uapi/misc/habanalabs.h
index 00b309590499..371dfc4243b3 100644
--- a/include/uapi/misc/habanalabs.h
+++ b/include/uapi/misc/habanalabs.h
@@ -333,7 +333,18 @@ enum hl_server_type {
* HL_INFO_SYNC_MANAGER - Retrieve sync manager info per dcore
* HL_INFO_TOTAL_ENERGY - Retrieve total energy consumption
* HL_INFO_PLL_FREQUENCY - Retrieve PLL frequency
+ * HL_INFO_POWER - Retrieve power information
* HL_INFO_OPEN_STATS - Retrieve info regarding recent device open calls
+ * HL_INFO_DRAM_REPLACED_ROWS - Retrieve DRAM replaced rows info
+ * HL_INFO_DRAM_PENDING_ROWS - Retrieve DRAM pending rows num
+ * HL_INFO_LAST_ERR_OPEN_DEV_TIME - Retrieve timestamp of the last time the device was opened
+ * and CS timeout or razwi error occurred.
+ * HL_INFO_CS_TIMEOUT_EVENT - Retrieve CS timeout timestamp and its related CS sequence number.
+ * HL_INFO_RAZWI_EVENT - Retrieve parameters of razwi:
+ * Timestamp of razwi.
+ * The address which accessing it caused the razwi.
+ * Razwi initiator.
+ * Razwi cause, was it a page fault or MMU access error.
*/
#define HL_INFO_HW_IP_INFO 0
#define HL_INFO_HW_EVENTS 1
@@ -353,8 +364,13 @@ enum hl_server_type {
#define HL_INFO_PLL_FREQUENCY 16
#define HL_INFO_POWER 17
#define HL_INFO_OPEN_STATS 18
+#define HL_INFO_DRAM_REPLACED_ROWS 21
+#define HL_INFO_DRAM_PENDING_ROWS 22
+#define HL_INFO_LAST_ERR_OPEN_DEV_TIME 23
+#define HL_INFO_CS_TIMEOUT_EVENT 24
+#define HL_INFO_RAZWI_EVENT 25
-#define HL_INFO_VERSION_MAX_LEN 128
+#define HL_INFO_VERSION_MAX_LEN 128
#define HL_INFO_CARD_NAME_MAX_LEN 16
/**
@@ -473,15 +489,27 @@ struct hl_info_pci_counters {
__u64 replay_cnt;
};
-#define HL_CLK_THROTTLE_POWER 0x1
-#define HL_CLK_THROTTLE_THERMAL 0x2
+enum hl_clk_throttling_type {
+ HL_CLK_THROTTLE_TYPE_POWER,
+ HL_CLK_THROTTLE_TYPE_THERMAL,
+ HL_CLK_THROTTLE_TYPE_MAX
+};
+
+/* clk_throttling_reason masks */
+#define HL_CLK_THROTTLE_POWER (1 << HL_CLK_THROTTLE_TYPE_POWER)
+#define HL_CLK_THROTTLE_THERMAL (1 << HL_CLK_THROTTLE_TYPE_THERMAL)
/**
* struct hl_info_clk_throttle - clock throttling reason
* @clk_throttling_reason: each bit represents a clk throttling reason
+ * @clk_throttling_timestamp_us: represents CPU timestamp in microseconds of the start-event
+ * @clk_throttling_duration_ns: the clock throttle time in nanosec
*/
struct hl_info_clk_throttle {
__u32 clk_throttling_reason;
+ __u32 pad;
+ __u64 clk_throttling_timestamp_us[HL_CLK_THROTTLE_TYPE_MAX];
+ __u64 clk_throttling_duration_ns[HL_CLK_THROTTLE_TYPE_MAX];
};
/**
@@ -559,6 +587,51 @@ struct hl_info_cs_counters {
__u64 ctx_validation_drop_cnt;
};
+/**
+ * struct hl_info_last_err_open_dev_time - last error boot information.
+ * @timestamp: timestamp of last time the device was opened and error occurred.
+ */
+struct hl_info_last_err_open_dev_time {
+ __s64 timestamp;
+};
+
+/**
+ * struct hl_info_cs_timeout_event - last CS timeout information.
+ * @timestamp: timestamp when last CS timeout event occurred.
+ * @seq: sequence number of last CS timeout event.
+ */
+struct hl_info_cs_timeout_event {
+ __s64 timestamp;
+ __u64 seq;
+};
+
+#define HL_RAZWI_PAGE_FAULT 0
+#define HL_RAZWI_MMU_ACCESS_ERROR 1
+
+/**
+ * struct hl_info_razwi_event - razwi information.
+ * @timestamp: timestamp of razwi.
+ * @addr: address which accessing it caused razwi.
+ * @engine_id_1: engine id of the razwi initiator, if it was initiated by engine that does not
+ * have engine id it will be set to U16_MAX.
+ * @engine_id_2: second engine id of razwi initiator. Might happen that razwi have 2 possible
+ * engines which one them caused the razwi. In that case, it will contain the
+ * second possible engine id, otherwise it will be set to U16_MAX.
+ * @no_engine_id: if razwi initiator does not have engine id, this field will be set to 1,
+ * otherwise 0.
+ * @error_type: cause of razwi, page fault or access error, otherwise it will be set to U8_MAX.
+ * @pad: padding to 64 bit.
+ */
+struct hl_info_razwi_event {
+ __s64 timestamp;
+ __u64 addr;
+ __u16 engine_id_1;
+ __u16 engine_id_2;
+ __u8 no_engine_id;
+ __u8 error_type;
+ __u8 pad[2];
+};
+
enum gaudi_dcores {
HL_GAUDI_WS_DCORE,
HL_GAUDI_WN_DCORE,
@@ -607,7 +680,10 @@ struct hl_info_args {
#define HL_MAX_CB_SIZE (0x200000 - 32)
/* Indicates whether the command buffer should be mapped to the device's MMU */
-#define HL_CB_FLAGS_MAP 0x1
+#define HL_CB_FLAGS_MAP 0x1
+
+/* Used with HL_CB_OP_INFO opcode to get the device va address for kernel mapped CB */
+#define HL_CB_FLAGS_GET_DEVICE_VA 0x2
struct hl_cb_in {
/* Handle of CB or 0 if we want to create one */
@@ -629,11 +705,16 @@ struct hl_cb_out {
/* Handle of CB */
__u64 cb_handle;
- /* Information about CB */
- struct {
- /* Usage count of CB */
- __u32 usage_cnt;
- __u32 pad;
+ union {
+ /* Information about CB */
+ struct {
+ /* Usage count of CB */
+ __u32 usage_cnt;
+ __u32 pad;
+ };
+
+ /* CB mapped address to device MMU */
+ __u64 device_va;
};
};
};
@@ -856,9 +937,17 @@ struct hl_cs_out {
/*
* SOB base address offset
- * Valid only when HL_CS_FLAGS_RESERVE_SIGNALS_ONLY is set
+ * Valid only when HL_CS_FLAGS_RESERVE_SIGNALS_ONLY or HL_CS_FLAGS_SIGNAL is set
*/
__u32 sob_base_addr_offset;
+
+ /*
+ * Count of completed signals in SOB before current signal submission.
+ * Valid only when (HL_CS_FLAGS_ENCAP_SIGNALS & HL_CS_FLAGS_STAGED_SUBMISSION)
+ * or HL_CS_FLAGS_SIGNAL is set
+ */
+ __u16 sob_count_before_submission;
+ __u16 pad[3];
};
union hl_cs_args {
@@ -866,9 +955,10 @@ union hl_cs_args {
struct hl_cs_out out;
};
-#define HL_WAIT_CS_FLAGS_INTERRUPT 0x2
-#define HL_WAIT_CS_FLAGS_INTERRUPT_MASK 0xFFF00000
-#define HL_WAIT_CS_FLAGS_MULTI_CS 0x4
+#define HL_WAIT_CS_FLAGS_INTERRUPT 0x2
+#define HL_WAIT_CS_FLAGS_INTERRUPT_MASK 0xFFF00000
+#define HL_WAIT_CS_FLAGS_MULTI_CS 0x4
+#define HL_WAIT_CS_FLAGS_INTERRUPT_KERNEL_CQ 0x10
#define HL_WAIT_MULTI_CS_LIST_MAX_LEN 32
@@ -888,14 +978,23 @@ struct hl_wait_cs_in {
};
struct {
- /* User address for completion comparison.
- * upon interrupt, driver will compare the value pointed
- * by this address with the supplied target value.
- * in order not to perform any comparison, set address
- * to all 1s.
- * Relevant only when HL_WAIT_CS_FLAGS_INTERRUPT is set
- */
- __u64 addr;
+ union {
+ /* User address for completion comparison.
+ * upon interrupt, driver will compare the value pointed
+ * by this address with the supplied target value.
+ * in order not to perform any comparison, set address
+ * to all 1s.
+ * Relevant only when HL_WAIT_CS_FLAGS_INTERRUPT is set
+ */
+ __u64 addr;
+
+ /* cq_counters_handle to a kernel mapped cb which contains
+ * cq counters.
+ * Relevant only when HL_WAIT_CS_FLAGS_INTERRUPT_KERNEL_CQ is set
+ */
+ __u64 cq_counters_handle;
+ };
+
/* Target value for completion comparison */
__u64 target;
};
@@ -911,14 +1010,27 @@ struct hl_wait_cs_in {
*/
__u32 flags;
- /* Multi CS API info- valid entries in multi-CS array */
- __u8 seq_arr_len;
- __u8 pad[3];
+ union {
+ struct {
+ /* Multi CS API info- valid entries in multi-CS array */
+ __u8 seq_arr_len;
+ __u8 pad[7];
+ };
+
+ /* Absolute timeout to wait for an interrupt in microseconds.
+ * Relevant only when HL_WAIT_CS_FLAGS_INTERRUPT is set
+ */
+ __u64 interrupt_timeout_us;
+ };
- /* Absolute timeout to wait for an interrupt in microseconds.
- * Relevant only when HL_WAIT_CS_FLAGS_INTERRUPT is set
+ /*
+ * cq counter offset inside the counters cb pointed by cq_counters_handle above.
+ * upon interrupt, driver will compare the value pointed
+ * by this address (cq_counters_handle + cq_counters_offset)
+ * with the supplied target value.
+ * relevant only when HL_WAIT_CS_FLAGS_INTERRUPT_KERNEL_CQ is set
*/
- __u32 interrupt_timeout_us;
+ __u64 cq_counters_offset;
};
#define HL_WAIT_CS_STATUS_COMPLETED 0
diff --git a/include/uapi/rdma/hns-abi.h b/include/uapi/rdma/hns-abi.h
index 42b177655560..f6fde06db4b4 100644
--- a/include/uapi/rdma/hns-abi.h
+++ b/include/uapi/rdma/hns-abi.h
@@ -77,10 +77,12 @@ enum hns_roce_qp_cap_flags {
HNS_ROCE_QP_CAP_RQ_RECORD_DB = 1 << 0,
HNS_ROCE_QP_CAP_SQ_RECORD_DB = 1 << 1,
HNS_ROCE_QP_CAP_OWNER_DB = 1 << 2,
+ HNS_ROCE_QP_CAP_DIRECT_WQE = 1 << 5,
};
struct hns_roce_ib_create_qp_resp {
__aligned_u64 cap_flags;
+ __aligned_u64 dwqe_mmap_key;
};
struct hns_roce_ib_alloc_ucontext_resp {
diff --git a/include/uapi/sound/asound.h b/include/uapi/sound/asound.h
index 5fbb79e30819..2d3e5df39a59 100644
--- a/include/uapi/sound/asound.h
+++ b/include/uapi/sound/asound.h
@@ -56,8 +56,10 @@
* *
****************************************************************************/
+#define AES_IEC958_STATUS_SIZE 24
+
struct snd_aes_iec958 {
- unsigned char status[24]; /* AES/IEC958 channel status bits */
+ unsigned char status[AES_IEC958_STATUS_SIZE]; /* AES/IEC958 channel status bits */
unsigned char subcode[147]; /* AES/IEC958 subcode bits */
unsigned char pad; /* nothing */
unsigned char dig_subframe[4]; /* AES/IEC958 subframe bits */
@@ -202,6 +204,11 @@ typedef int __bitwise snd_pcm_format_t;
#define SNDRV_PCM_FORMAT_S24_BE ((__force snd_pcm_format_t) 7) /* low three bytes */
#define SNDRV_PCM_FORMAT_U24_LE ((__force snd_pcm_format_t) 8) /* low three bytes */
#define SNDRV_PCM_FORMAT_U24_BE ((__force snd_pcm_format_t) 9) /* low three bytes */
+/*
+ * For S32/U32 formats, 'msbits' hardware parameter is often used to deliver information about the
+ * available bit count in most significant bit. It's for the case of so-called 'left-justified' or
+ * `right-padding` sample which has less width than 32 bit.
+ */
#define SNDRV_PCM_FORMAT_S32_LE ((__force snd_pcm_format_t) 10)
#define SNDRV_PCM_FORMAT_S32_BE ((__force snd_pcm_format_t) 11)
#define SNDRV_PCM_FORMAT_U32_LE ((__force snd_pcm_format_t) 12)
@@ -300,7 +307,7 @@ typedef int __bitwise snd_pcm_subformat_t;
#define SNDRV_PCM_INFO_HAS_LINK_ESTIMATED_ATIME 0x04000000 /* report estimated link audio time */
#define SNDRV_PCM_INFO_HAS_LINK_SYNCHRONIZED_ATIME 0x08000000 /* report synchronized audio/system time */
#define SNDRV_PCM_INFO_EXPLICIT_SYNC 0x10000000 /* needs explicit sync of pointers and data */
-
+#define SNDRV_PCM_INFO_NO_REWINDS 0x20000000 /* hardware can only support monotonic changes of appl_ptr */
#define SNDRV_PCM_INFO_DRAIN_TRIGGER 0x40000000 /* internal kernel flag - trigger in drain */
#define SNDRV_PCM_INFO_FIFO_IN_FRAMES 0x80000000 /* internal kernel flag - FIFO size is in frames */
diff --git a/include/uapi/sound/sof/tokens.h b/include/uapi/sound/sof/tokens.h
index 02b71a8deea4..b72fa385bebf 100644
--- a/include/uapi/sound/sof/tokens.h
+++ b/include/uapi/sound/sof/tokens.h
@@ -140,4 +140,9 @@
#define SOF_TKN_INTEL_HDA_RATE 1500
#define SOF_TKN_INTEL_HDA_CH 1501
+/* AFE */
+#define SOF_TKN_MEDIATEK_AFE_RATE 1600
+#define SOF_TKN_MEDIATEK_AFE_CH 1601
+#define SOF_TKN_MEDIATEK_AFE_FORMAT 1602
+
#endif
diff --git a/include/uapi/xen/gntdev.h b/include/uapi/xen/gntdev.h
index 9ac5515b9bc2..7a7145395c09 100644
--- a/include/uapi/xen/gntdev.h
+++ b/include/uapi/xen/gntdev.h
@@ -47,7 +47,13 @@ struct ioctl_gntdev_grant_ref {
/*
* Inserts the grant references into the mapping table of an instance
* of gntdev. N.B. This does not perform the mapping, which is deferred
- * until mmap() is called with @index as the offset.
+ * until mmap() is called with @index as the offset. @index should be
+ * considered opaque to userspace, with one exception: if no grant
+ * references have ever been inserted into the mapping table of this
+ * instance, @index will be set to 0. This is necessary to use gntdev
+ * with userspace APIs that expect a file descriptor that can be
+ * mmap()'d at offset 0, such as Wayland. If @count is set to 0, this
+ * ioctl will fail.
*/
#define IOCTL_GNTDEV_MAP_GRANT_REF \
_IOC(_IOC_NONE, 'G', 0, sizeof(struct ioctl_gntdev_map_grant_ref))
diff --git a/include/xen/balloon.h b/include/xen/balloon.h
index e93d4f0088c5..f78a6cc94f1a 100644
--- a/include/xen/balloon.h
+++ b/include/xen/balloon.h
@@ -26,6 +26,9 @@ extern struct balloon_stats balloon_stats;
void balloon_set_new_target(unsigned long target);
+int xen_alloc_ballooned_pages(unsigned int nr_pages, struct page **pages);
+void xen_free_ballooned_pages(unsigned int nr_pages, struct page **pages);
+
#ifdef CONFIG_XEN_BALLOON
void xen_balloon_init(void);
#else
diff --git a/include/xen/interface/io/usbif.h b/include/xen/interface/io/usbif.h
new file mode 100644
index 000000000000..a70e0b93178b
--- /dev/null
+++ b/include/xen/interface/io/usbif.h
@@ -0,0 +1,405 @@
+/* SPDX-License-Identifier: MIT */
+
+/*
+ * usbif.h
+ *
+ * USB I/O interface for Xen guest OSes.
+ *
+ * Copyright (C) 2009, FUJITSU LABORATORIES LTD.
+ * Author: Noboru Iwamatsu <n_iwamatsu@jp.fujitsu.com>
+ */
+
+#ifndef __XEN_PUBLIC_IO_USBIF_H__
+#define __XEN_PUBLIC_IO_USBIF_H__
+
+#include "ring.h"
+#include "../grant_table.h"
+
+/*
+ * Detailed Interface Description
+ * ==============================
+ * The pvUSB interface is using a split driver design: a frontend driver in
+ * the guest and a backend driver in a driver domain (normally dom0) having
+ * access to the physical USB device(s) being passed to the guest.
+ *
+ * The frontend and backend drivers use XenStore to initiate the connection
+ * between them, the I/O activity is handled via two shared ring pages and an
+ * event channel. As the interface between frontend and backend is at the USB
+ * host connector level, multiple (up to 31) physical USB devices can be
+ * handled by a single connection.
+ *
+ * The Xen pvUSB device name is "qusb", so the frontend's XenStore entries are
+ * to be found under "device/qusb", while the backend's XenStore entries are
+ * under "backend/<guest-dom-id>/qusb".
+ *
+ * When a new pvUSB connection is established, the frontend needs to setup the
+ * two shared ring pages for communication and the event channel. The ring
+ * pages need to be made available to the backend via the grant table
+ * interface.
+ *
+ * One of the shared ring pages is used by the backend to inform the frontend
+ * about USB device plug events (device to be added or removed). This is the
+ * "conn-ring".
+ *
+ * The other ring page is used for USB I/O communication (requests and
+ * responses). This is the "urb-ring".
+ *
+ * Feature and Parameter Negotiation
+ * =================================
+ * The two halves of a Xen pvUSB driver utilize nodes within the XenStore to
+ * communicate capabilities and to negotiate operating parameters. This
+ * section enumerates these nodes which reside in the respective front and
+ * backend portions of the XenStore, following the XenBus convention.
+ *
+ * Any specified default value is in effect if the corresponding XenBus node
+ * is not present in the XenStore.
+ *
+ * XenStore nodes in sections marked "PRIVATE" are solely for use by the
+ * driver side whose XenBus tree contains them.
+ *
+ *****************************************************************************
+ * Backend XenBus Nodes
+ *****************************************************************************
+ *
+ *------------------ Backend Device Identification (PRIVATE) ------------------
+ *
+ * num-ports
+ * Values: unsigned [1...31]
+ *
+ * Number of ports for this (virtual) USB host connector.
+ *
+ * usb-ver
+ * Values: unsigned [1...2]
+ *
+ * USB version of this host connector: 1 = USB 1.1, 2 = USB 2.0.
+ *
+ * port/[1...31]
+ * Values: string
+ *
+ * Physical USB device connected to the given port, e.g. "3-1.5".
+ *
+ *****************************************************************************
+ * Frontend XenBus Nodes
+ *****************************************************************************
+ *
+ *----------------------- Request Transport Parameters -----------------------
+ *
+ * event-channel
+ * Values: unsigned
+ *
+ * The identifier of the Xen event channel used to signal activity
+ * in the ring buffer.
+ *
+ * urb-ring-ref
+ * Values: unsigned
+ *
+ * The Xen grant reference granting permission for the backend to map
+ * the sole page in a single page sized ring buffer. This is the ring
+ * buffer for urb requests.
+ *
+ * conn-ring-ref
+ * Values: unsigned
+ *
+ * The Xen grant reference granting permission for the backend to map
+ * the sole page in a single page sized ring buffer. This is the ring
+ * buffer for connection/disconnection requests.
+ *
+ * protocol
+ * Values: string (XEN_IO_PROTO_ABI_*)
+ * Default Value: XEN_IO_PROTO_ABI_NATIVE
+ *
+ * The machine ABI rules governing the format of all ring request and
+ * response structures.
+ *
+ * Protocol Description
+ * ====================
+ *
+ *-------------------------- USB device plug events --------------------------
+ *
+ * USB device plug events are send via the "conn-ring" shared page. As only
+ * events are being sent, the respective requests from the frontend to the
+ * backend are just dummy ones.
+ * The events sent to the frontend have the following layout:
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | id | portnum | speed | 4
+ * +----------------+----------------+----------------+----------------+
+ * id - uint16_t, event id (taken from the actual frontend dummy request)
+ * portnum - uint8_t, port number (1 ... 31)
+ * speed - uint8_t, device XENUSB_SPEED_*, XENUSB_SPEED_NONE == unplug
+ *
+ * The dummy request:
+ * 0 1 octet
+ * +----------------+----------------+
+ * | id | 2
+ * +----------------+----------------+
+ * id - uint16_t, guest supplied value (no need for being unique)
+ *
+ *-------------------------- USB I/O request ---------------------------------
+ *
+ * A single USB I/O request on the "urb-ring" has the following layout:
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | id | nr_buffer_segs | 4
+ * +----------------+----------------+----------------+----------------+
+ * | pipe | 8
+ * +----------------+----------------+----------------+----------------+
+ * | transfer_flags | buffer_length | 12
+ * +----------------+----------------+----------------+----------------+
+ * | request type specific | 16
+ * | data | 20
+ * +----------------+----------------+----------------+----------------+
+ * | seg[0] | 24
+ * | data | 28
+ * +----------------+----------------+----------------+----------------+
+ * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
+ * +----------------+----------------+----------------+----------------+
+ * | seg[XENUSB_MAX_SEGMENTS_PER_REQUEST - 1] | 144
+ * | data | 148
+ * +----------------+----------------+----------------+----------------+
+ * Bit field bit number 0 is always least significant bit, undefined bits must
+ * be zero.
+ * id - uint16_t, guest supplied value
+ * nr_buffer_segs - uint16_t, number of segment entries in seg[] array
+ * pipe - uint32_t, bit field with multiple information:
+ * bits 0-4: port request to send to
+ * bit 5: unlink request with specified id (cancel I/O) if set (see below)
+ * bit 7: direction (1 = read from device)
+ * bits 8-14: device number on port
+ * bits 15-18: endpoint of device
+ * bits 30-31: request type: 00 = isochronous, 01 = interrupt,
+ * 10 = control, 11 = bulk
+ * transfer_flags - uint16_t, bit field with processing flags:
+ * bit 0: less data than specified allowed
+ * buffer_length - uint16_t, total length of data
+ * request type specific data - 8 bytes, see below
+ * seg[] - array with 8 byte elements, see below
+ *
+ * Request type specific data for isochronous request:
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | interval | start_frame | 4
+ * +----------------+----------------+----------------+----------------+
+ * | number_of_packets | nr_frame_desc_segs | 8
+ * +----------------+----------------+----------------+----------------+
+ * interval - uint16_t, time interval in msecs between frames
+ * start_frame - uint16_t, start frame number
+ * number_of_packets - uint16_t, number of packets to transfer
+ * nr_frame_desc_segs - uint16_t number of seg[] frame descriptors elements
+ *
+ * Request type specific data for interrupt request:
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | interval | 0 | 4
+ * +----------------+----------------+----------------+----------------+
+ * | 0 | 8
+ * +----------------+----------------+----------------+----------------+
+ * interval - uint16_t, time in msecs until interruption
+ *
+ * Request type specific data for control request:
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | data of setup packet | 4
+ * | | 8
+ * +----------------+----------------+----------------+----------------+
+ *
+ * Request type specific data for bulk request:
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | 0 | 4
+ * | 0 | 8
+ * +----------------+----------------+----------------+----------------+
+ *
+ * Request type specific data for unlink request:
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | unlink_id | 0 | 4
+ * +----------------+----------------+----------------+----------------+
+ * | 0 | 8
+ * +----------------+----------------+----------------+----------------+
+ * unlink_id - uint16_t, request id of request to terminate
+ *
+ * seg[] array element layout:
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | gref | 4
+ * +----------------+----------------+----------------+----------------+
+ * | offset | length | 8
+ * +----------------+----------------+----------------+----------------+
+ * gref - uint32_t, grant reference of buffer page
+ * offset - uint16_t, offset of buffer start in page
+ * length - uint16_t, length of buffer in page
+ *
+ *-------------------------- USB I/O response --------------------------------
+ *
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | id | start_frame | 4
+ * +----------------+----------------+----------------+----------------+
+ * | status | 8
+ * +----------------+----------------+----------------+----------------+
+ * | actual_length | 12
+ * +----------------+----------------+----------------+----------------+
+ * | error_count | 16
+ * +----------------+----------------+----------------+----------------+
+ * id - uint16_t, id of the request this response belongs to
+ * start_frame - uint16_t, start_frame this response (iso requests only)
+ * status - int32_t, XENUSB_STATUS_* (non-iso requests)
+ * actual_length - uint32_t, actual size of data transferred
+ * error_count - uint32_t, number of errors (iso requests)
+ */
+
+enum xenusb_spec_version {
+ XENUSB_VER_UNKNOWN = 0,
+ XENUSB_VER_USB11,
+ XENUSB_VER_USB20,
+ XENUSB_VER_USB30, /* not supported yet */
+};
+
+/*
+ * USB pipe in xenusb_request
+ *
+ * - port number: bits 0-4
+ * (USB_MAXCHILDREN is 31)
+ *
+ * - operation flag: bit 5
+ * (0 = submit urb,
+ * 1 = unlink urb)
+ *
+ * - direction: bit 7
+ * (0 = Host-to-Device [Out]
+ * 1 = Device-to-Host [In])
+ *
+ * - device address: bits 8-14
+ *
+ * - endpoint: bits 15-18
+ *
+ * - pipe type: bits 30-31
+ * (00 = isochronous, 01 = interrupt,
+ * 10 = control, 11 = bulk)
+ */
+
+#define XENUSB_PIPE_PORT_MASK 0x0000001f
+#define XENUSB_PIPE_UNLINK 0x00000020
+#define XENUSB_PIPE_DIR 0x00000080
+#define XENUSB_PIPE_DEV_MASK 0x0000007f
+#define XENUSB_PIPE_DEV_SHIFT 8
+#define XENUSB_PIPE_EP_MASK 0x0000000f
+#define XENUSB_PIPE_EP_SHIFT 15
+#define XENUSB_PIPE_TYPE_MASK 0x00000003
+#define XENUSB_PIPE_TYPE_SHIFT 30
+#define XENUSB_PIPE_TYPE_ISOC 0
+#define XENUSB_PIPE_TYPE_INT 1
+#define XENUSB_PIPE_TYPE_CTRL 2
+#define XENUSB_PIPE_TYPE_BULK 3
+
+#define xenusb_pipeportnum(pipe) ((pipe) & XENUSB_PIPE_PORT_MASK)
+#define xenusb_setportnum_pipe(pipe, portnum) ((pipe) | (portnum))
+
+#define xenusb_pipeunlink(pipe) ((pipe) & XENUSB_PIPE_UNLINK)
+#define xenusb_pipesubmit(pipe) (!xenusb_pipeunlink(pipe))
+#define xenusb_setunlink_pipe(pipe) ((pipe) | XENUSB_PIPE_UNLINK)
+
+#define xenusb_pipein(pipe) ((pipe) & XENUSB_PIPE_DIR)
+#define xenusb_pipeout(pipe) (!xenusb_pipein(pipe))
+
+#define xenusb_pipedevice(pipe) \
+ (((pipe) >> XENUSB_PIPE_DEV_SHIFT) & XENUSB_PIPE_DEV_MASK)
+
+#define xenusb_pipeendpoint(pipe) \
+ (((pipe) >> XENUSB_PIPE_EP_SHIFT) & XENUSB_PIPE_EP_MASK)
+
+#define xenusb_pipetype(pipe) \
+ (((pipe) >> XENUSB_PIPE_TYPE_SHIFT) & XENUSB_PIPE_TYPE_MASK)
+#define xenusb_pipeisoc(pipe) (xenusb_pipetype(pipe) == XENUSB_PIPE_TYPE_ISOC)
+#define xenusb_pipeint(pipe) (xenusb_pipetype(pipe) == XENUSB_PIPE_TYPE_INT)
+#define xenusb_pipectrl(pipe) (xenusb_pipetype(pipe) == XENUSB_PIPE_TYPE_CTRL)
+#define xenusb_pipebulk(pipe) (xenusb_pipetype(pipe) == XENUSB_PIPE_TYPE_BULK)
+
+#define XENUSB_MAX_SEGMENTS_PER_REQUEST (16)
+#define XENUSB_MAX_PORTNR 31
+#define XENUSB_RING_SIZE 4096
+
+/*
+ * RING for transferring urbs.
+ */
+struct xenusb_request_segment {
+ grant_ref_t gref;
+ uint16_t offset;
+ uint16_t length;
+};
+
+struct xenusb_urb_request {
+ uint16_t id; /* request id */
+ uint16_t nr_buffer_segs; /* number of urb->transfer_buffer segments */
+
+ /* basic urb parameter */
+ uint32_t pipe;
+ uint16_t transfer_flags;
+#define XENUSB_SHORT_NOT_OK 0x0001
+ uint16_t buffer_length;
+ union {
+ uint8_t ctrl[8]; /* setup_packet (Ctrl) */
+
+ struct {
+ uint16_t interval; /* maximum (1024*8) in usb core */
+ uint16_t start_frame; /* start frame */
+ uint16_t number_of_packets; /* number of ISO packet */
+ uint16_t nr_frame_desc_segs; /* number of iso_frame_desc segments */
+ } isoc;
+
+ struct {
+ uint16_t interval; /* maximum (1024*8) in usb core */
+ uint16_t pad[3];
+ } intr;
+
+ struct {
+ uint16_t unlink_id; /* unlink request id */
+ uint16_t pad[3];
+ } unlink;
+
+ } u;
+
+ /* urb data segments */
+ struct xenusb_request_segment seg[XENUSB_MAX_SEGMENTS_PER_REQUEST];
+};
+
+struct xenusb_urb_response {
+ uint16_t id; /* request id */
+ uint16_t start_frame; /* start frame (ISO) */
+ int32_t status; /* status (non-ISO) */
+#define XENUSB_STATUS_OK 0
+#define XENUSB_STATUS_NODEV (-19)
+#define XENUSB_STATUS_INVAL (-22)
+#define XENUSB_STATUS_STALL (-32)
+#define XENUSB_STATUS_IOERROR (-71)
+#define XENUSB_STATUS_BABBLE (-75)
+#define XENUSB_STATUS_SHUTDOWN (-108)
+ int32_t actual_length; /* actual transfer length */
+ int32_t error_count; /* number of ISO errors */
+};
+
+DEFINE_RING_TYPES(xenusb_urb, struct xenusb_urb_request, struct xenusb_urb_response);
+#define XENUSB_URB_RING_SIZE __CONST_RING_SIZE(xenusb_urb, XENUSB_RING_SIZE)
+
+/*
+ * RING for notifying connect/disconnect events to frontend
+ */
+struct xenusb_conn_request {
+ uint16_t id;
+};
+
+struct xenusb_conn_response {
+ uint16_t id; /* request id */
+ uint8_t portnum; /* port number */
+ uint8_t speed; /* usb_device_speed */
+#define XENUSB_SPEED_NONE 0
+#define XENUSB_SPEED_LOW 1
+#define XENUSB_SPEED_FULL 2
+#define XENUSB_SPEED_HIGH 3
+};
+
+DEFINE_RING_TYPES(xenusb_conn, struct xenusb_conn_request, struct xenusb_conn_response);
+#define XENUSB_CONN_RING_SIZE __CONST_RING_SIZE(xenusb_conn, XENUSB_RING_SIZE)
+
+#endif /* __XEN_PUBLIC_IO_USBIF_H__ */
diff --git a/include/xen/interface/xen.h b/include/xen/interface/xen.h
index 5e9916939268..0ca23eca2a9c 100644
--- a/include/xen/interface/xen.h
+++ b/include/xen/interface/xen.h
@@ -722,6 +722,9 @@ struct dom0_vga_console_info {
uint32_t gbl_caps;
/* Mode attributes (offset 0x0, VESA command 0x4f01). */
uint16_t mode_attrs;
+ uint16_t pad;
+ /* high 32 bits of lfb_base */
+ uint32_t ext_lfb_base;
} vesa_lfb;
} u;
};
diff --git a/include/xen/xen.h b/include/xen/xen.h
index 9f031b5faa54..a99bab817523 100644
--- a/include/xen/xen.h
+++ b/include/xen/xen.h
@@ -52,7 +52,23 @@ bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
extern u64 xen_saved_max_mem_size;
#endif
+#ifdef CONFIG_XEN_UNPOPULATED_ALLOC
int xen_alloc_unpopulated_pages(unsigned int nr_pages, struct page **pages);
void xen_free_unpopulated_pages(unsigned int nr_pages, struct page **pages);
+#include <linux/ioport.h>
+int arch_xen_unpopulated_init(struct resource **res);
+#else
+#include <xen/balloon.h>
+static inline int xen_alloc_unpopulated_pages(unsigned int nr_pages,
+ struct page **pages)
+{
+ return xen_alloc_ballooned_pages(nr_pages, pages);
+}
+static inline void xen_free_unpopulated_pages(unsigned int nr_pages,
+ struct page **pages)
+{
+ xen_free_ballooned_pages(nr_pages, pages);
+}
+#endif
#endif /* _XEN_XEN_H */
diff --git a/include/xen/xenbus_dev.h b/include/xen/xenbus_dev.h
index bbee8c6a349d..4dc45a51c044 100644
--- a/include/xen/xenbus_dev.h
+++ b/include/xen/xenbus_dev.h
@@ -1,6 +1,4 @@
/******************************************************************************
- * evtchn.h
- *
* Interface to /dev/xen/xenbus_backend.
*
* Copyright (c) 2011 Bastian Blank <waldi@debian.org>
diff --git a/init/Kconfig b/init/Kconfig
index f2ae41e6717f..e9119bf54b1f 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1414,7 +1414,6 @@ config LD_DEAD_CODE_DATA_ELIMINATION
config LD_ORPHAN_WARN
def_bool y
depends on ARCH_WANT_LD_ORPHAN_WARN
- depends on !LD_IS_LLD || LLD_VERSION >= 110000
depends on $(ld-option,--orphan-handling=warn)
config SYSCTL
@@ -1797,6 +1796,10 @@ config HAVE_PERF_EVENTS
help
See tools/perf/design.txt for details.
+config GUEST_PERF_EVENTS
+ bool
+ depends on HAVE_PERF_EVENTS
+
config PERF_USE_VMALLOC
bool
help
@@ -2274,6 +2277,19 @@ config MODULE_COMPRESS_ZSTD
endchoice
+config MODULE_DECOMPRESS
+ bool "Support in-kernel module decompression"
+ depends on MODULE_COMPRESS_GZIP || MODULE_COMPRESS_XZ
+ select ZLIB_INFLATE if MODULE_COMPRESS_GZIP
+ select XZ_DEC if MODULE_COMPRESS_XZ
+ help
+
+ Support for decompressing kernel modules by the kernel itself
+ instead of relying on userspace to perform this task. Useful when
+ load pinning security policy is enabled.
+
+ If unsure, say N.
+
config MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS
bool "Allow loading of modules with missing namespace imports"
help
diff --git a/init/Makefile b/init/Makefile
index 04eeee12c076..06326e304384 100644
--- a/init/Makefile
+++ b/init/Makefile
@@ -31,7 +31,7 @@ quiet_cmd_compile.h = CHK $@
cmd_compile.h = \
$(CONFIG_SHELL) $(srctree)/scripts/mkcompile_h $@ \
"$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CONFIG_PREEMPT_BUILD)" \
- "$(CONFIG_PREEMPT_RT)" $(CONFIG_CC_VERSION_TEXT) "$(LD)"
+ "$(CONFIG_PREEMPT_RT)" "$(CONFIG_CC_VERSION_TEXT)" "$(LD)"
include/generated/compile.h: FORCE
$(call cmd,compile.h)
diff --git a/init/main.c b/init/main.c
index bb984ed79de0..65fa2e41a9c0 100644
--- a/init/main.c
+++ b/init/main.c
@@ -834,12 +834,15 @@ static void __init mm_init(void)
init_mem_debugging_and_hardening();
kfence_alloc_pool();
report_meminit();
- stack_depot_init();
+ stack_depot_early_init();
mem_init();
mem_init_print_info();
- /* page_owner must be initialized after buddy is ready */
- page_ext_init_flatmem_late();
kmem_cache_init();
+ /*
+ * page_owner must be initialized after buddy is ready, and also after
+ * slab is ready so that stack_depot_init() works properly
+ */
+ page_ext_init_flatmem_late();
kmemleak_init();
pgtable_init();
debug_objects_mem_init();
diff --git a/ipc/sem.c b/ipc/sem.c
index 6693daf4fe11..0dbdb98fdf2d 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -1964,6 +1964,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
*/
un = lookup_undo(ulp, semid);
if (un) {
+ spin_unlock(&ulp->lock);
kvfree(new);
goto success;
}
@@ -1976,9 +1977,8 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
ipc_assert_locked_object(&sma->sem_perm);
list_add(&new->list_id, &sma->list_id);
un = new;
-
-success:
spin_unlock(&ulp->lock);
+success:
sem_unlock(sma, -1);
out:
return un;
diff --git a/ipc/util.c b/ipc/util.c
index fa2d86ef3fb8..a2208d0f26b2 100644
--- a/ipc/util.c
+++ b/ipc/util.c
@@ -894,7 +894,7 @@ static int sysvipc_proc_open(struct inode *inode, struct file *file)
if (!iter)
return -ENOMEM;
- iter->iface = PDE_DATA(inode);
+ iter->iface = pde_data(inode);
iter->ns = get_ipc_ns(current->nsproxy->ipc_ns);
iter->pid_ns = get_pid_ns(task_active_pid_ns(current));
diff --git a/kernel/Makefile b/kernel/Makefile
index 186c49582f45..56f4ee97f328 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -67,6 +67,7 @@ obj-y += up.o
endif
obj-$(CONFIG_UID16) += uid16.o
obj-$(CONFIG_MODULES) += module.o
+obj-$(CONFIG_MODULE_DECOMPRESS) += module_decompress.o
obj-$(CONFIG_MODULE_SIG) += module_signing.o
obj-$(CONFIG_MODULE_SIG_FORMAT) += module_signature.o
obj-$(CONFIG_KALLSYMS) += kallsyms.o
diff --git a/kernel/async.c b/kernel/async.c
index b8d7a663497f..b2c4ba5686ee 100644
--- a/kernel/async.c
+++ b/kernel/async.c
@@ -205,9 +205,6 @@ async_cookie_t async_schedule_node_domain(async_func_t func, void *data,
atomic_inc(&entry_count);
spin_unlock_irqrestore(&async_lock, flags);
- /* mark that this task has queued an async job, used by module init */
- current->flags |= PF_USED_ASYNC;
-
/* schedule for execution */
queue_work_node(node, system_unbound_wq, &entry->work);
diff --git a/kernel/audit.c b/kernel/audit.c
index e4bbe2c70c26..7690c29d4ee4 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -541,20 +541,22 @@ static void kauditd_printk_skb(struct sk_buff *skb)
/**
* kauditd_rehold_skb - Handle a audit record send failure in the hold queue
* @skb: audit record
+ * @error: error code (unused)
*
* Description:
* This should only be used by the kauditd_thread when it fails to flush the
* hold queue.
*/
-static void kauditd_rehold_skb(struct sk_buff *skb)
+static void kauditd_rehold_skb(struct sk_buff *skb, __always_unused int error)
{
- /* put the record back in the queue at the same place */
- skb_queue_head(&audit_hold_queue, skb);
+ /* put the record back in the queue */
+ skb_queue_tail(&audit_hold_queue, skb);
}
/**
* kauditd_hold_skb - Queue an audit record, waiting for auditd
* @skb: audit record
+ * @error: error code
*
* Description:
* Queue the audit record, waiting for an instance of auditd. When this
@@ -564,19 +566,31 @@ static void kauditd_rehold_skb(struct sk_buff *skb)
* and queue it, if we have room. If we want to hold on to the record, but we
* don't have room, record a record lost message.
*/
-static void kauditd_hold_skb(struct sk_buff *skb)
+static void kauditd_hold_skb(struct sk_buff *skb, int error)
{
/* at this point it is uncertain if we will ever send this to auditd so
* try to send the message via printk before we go any further */
kauditd_printk_skb(skb);
/* can we just silently drop the message? */
- if (!audit_default) {
- kfree_skb(skb);
- return;
+ if (!audit_default)
+ goto drop;
+
+ /* the hold queue is only for when the daemon goes away completely,
+ * not -EAGAIN failures; if we are in a -EAGAIN state requeue the
+ * record on the retry queue unless it's full, in which case drop it
+ */
+ if (error == -EAGAIN) {
+ if (!audit_backlog_limit ||
+ skb_queue_len(&audit_retry_queue) < audit_backlog_limit) {
+ skb_queue_tail(&audit_retry_queue, skb);
+ return;
+ }
+ audit_log_lost("kauditd retry queue overflow");
+ goto drop;
}
- /* if we have room, queue the message */
+ /* if we have room in the hold queue, queue the message */
if (!audit_backlog_limit ||
skb_queue_len(&audit_hold_queue) < audit_backlog_limit) {
skb_queue_tail(&audit_hold_queue, skb);
@@ -585,24 +599,32 @@ static void kauditd_hold_skb(struct sk_buff *skb)
/* we have no other options - drop the message */
audit_log_lost("kauditd hold queue overflow");
+drop:
kfree_skb(skb);
}
/**
* kauditd_retry_skb - Queue an audit record, attempt to send again to auditd
* @skb: audit record
+ * @error: error code (unused)
*
* Description:
* Not as serious as kauditd_hold_skb() as we still have a connected auditd,
* but for some reason we are having problems sending it audit records so
* queue the given record and attempt to resend.
*/
-static void kauditd_retry_skb(struct sk_buff *skb)
+static void kauditd_retry_skb(struct sk_buff *skb, __always_unused int error)
{
- /* NOTE: because records should only live in the retry queue for a
- * short period of time, before either being sent or moved to the hold
- * queue, we don't currently enforce a limit on this queue */
- skb_queue_tail(&audit_retry_queue, skb);
+ if (!audit_backlog_limit ||
+ skb_queue_len(&audit_retry_queue) < audit_backlog_limit) {
+ skb_queue_tail(&audit_retry_queue, skb);
+ return;
+ }
+
+ /* we have to drop the record, send it via printk as a last effort */
+ kauditd_printk_skb(skb);
+ audit_log_lost("kauditd retry queue overflow");
+ kfree_skb(skb);
}
/**
@@ -640,7 +662,7 @@ static void auditd_reset(const struct auditd_connection *ac)
/* flush the retry queue to the hold queue, but don't touch the main
* queue since we need to process that normally for multicast */
while ((skb = skb_dequeue(&audit_retry_queue)))
- kauditd_hold_skb(skb);
+ kauditd_hold_skb(skb, -ECONNREFUSED);
}
/**
@@ -714,16 +736,18 @@ static int kauditd_send_queue(struct sock *sk, u32 portid,
struct sk_buff_head *queue,
unsigned int retry_limit,
void (*skb_hook)(struct sk_buff *skb),
- void (*err_hook)(struct sk_buff *skb))
+ void (*err_hook)(struct sk_buff *skb, int error))
{
int rc = 0;
- struct sk_buff *skb;
+ struct sk_buff *skb = NULL;
+ struct sk_buff *skb_tail;
unsigned int failed = 0;
/* NOTE: kauditd_thread takes care of all our locking, we just use
* the netlink info passed to us (e.g. sk and portid) */
- while ((skb = skb_dequeue(queue))) {
+ skb_tail = skb_peek_tail(queue);
+ while ((skb != skb_tail) && (skb = skb_dequeue(queue))) {
/* call the skb_hook for each skb we touch */
if (skb_hook)
(*skb_hook)(skb);
@@ -731,7 +755,7 @@ static int kauditd_send_queue(struct sock *sk, u32 portid,
/* can we send to anyone via unicast? */
if (!sk) {
if (err_hook)
- (*err_hook)(skb);
+ (*err_hook)(skb, -ECONNREFUSED);
continue;
}
@@ -745,7 +769,7 @@ retry:
rc == -ECONNREFUSED || rc == -EPERM) {
sk = NULL;
if (err_hook)
- (*err_hook)(skb);
+ (*err_hook)(skb, rc);
if (rc == -EAGAIN)
rc = 0;
/* continue to drain the queue */
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index fce5d43a933f..a83928cbdcb7 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -185,7 +185,7 @@ static int audit_match_perm(struct audit_context *ctx, int mask)
case AUDITSC_EXECVE:
return mask & AUDIT_PERM_EXEC;
case AUDITSC_OPENAT2:
- return mask & ACC_MODE((u32)((struct open_how *)ctx->argv[2])->flags);
+ return mask & ACC_MODE((u32)ctx->openat2.flags);
default:
return 0;
}
diff --git a/kernel/bpf/bpf_lsm.c b/kernel/bpf/bpf_lsm.c
index 06062370c3b8..9e4ecc990647 100644
--- a/kernel/bpf/bpf_lsm.c
+++ b/kernel/bpf/bpf_lsm.c
@@ -207,7 +207,7 @@ BTF_ID(func, bpf_lsm_socket_socketpair)
BTF_ID(func, bpf_lsm_syslog)
BTF_ID(func, bpf_lsm_task_alloc)
-BTF_ID(func, bpf_lsm_task_getsecid_subj)
+BTF_ID(func, bpf_lsm_current_getsecid_subj)
BTF_ID(func, bpf_lsm_task_getsecid_obj)
BTF_ID(func, bpf_lsm_task_prctl)
BTF_ID(func, bpf_lsm_task_setscheduler)
diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
index 33bb8ae4a804..e16dafeb2450 100644
--- a/kernel/bpf/btf.c
+++ b/kernel/bpf/btf.c
@@ -5686,7 +5686,7 @@ static int btf_check_func_arg_match(struct bpf_verifier_env *env,
i, btf_type_str(t));
return -EINVAL;
}
- if (check_ctx_reg(env, reg, regno))
+ if (check_ptr_off_reg(env, reg, regno))
return -EINVAL;
} else if (is_kfunc && (reg->type == PTR_TO_BTF_ID || reg2btf_ids[reg->type])) {
const struct btf_type *reg_ref_t;
diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c
index 80da1db47c68..5a8d9f7467bf 100644
--- a/kernel/bpf/inode.c
+++ b/kernel/bpf/inode.c
@@ -648,12 +648,22 @@ static int bpf_parse_param(struct fs_context *fc, struct fs_parameter *param)
int opt;
opt = fs_parse(fc, bpf_fs_parameters, param, &result);
- if (opt < 0)
+ if (opt < 0) {
/* We might like to report bad mount options here, but
* traditionally we've ignored all mount options, so we'd
* better continue to ignore non-existing options for bpf.
*/
- return opt == -ENOPARAM ? 0 : opt;
+ if (opt == -ENOPARAM) {
+ opt = vfs_parse_fs_param_source(fc, param);
+ if (opt != -ENOPARAM)
+ return opt;
+
+ return 0;
+ }
+
+ if (opt < 0)
+ return opt;
+ }
switch (opt) {
case OPT_MODE:
diff --git a/kernel/bpf/ringbuf.c b/kernel/bpf/ringbuf.c
index 638d7fd7b375..710ba9de12ce 100644
--- a/kernel/bpf/ringbuf.c
+++ b/kernel/bpf/ringbuf.c
@@ -104,7 +104,7 @@ static struct bpf_ringbuf *bpf_ringbuf_area_alloc(size_t data_sz, int numa_node)
}
rb = vmap(pages, nr_meta_pages + 2 * nr_data_pages,
- VM_ALLOC | VM_USERMAP, PAGE_KERNEL);
+ VM_MAP | VM_USERMAP, PAGE_KERNEL);
if (rb) {
kmemleak_not_leak(pages);
rb->pages = pages;
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
index 49e567209c6b..22c8ae94e4c1 100644
--- a/kernel/bpf/stackmap.c
+++ b/kernel/bpf/stackmap.c
@@ -472,13 +472,14 @@ BPF_CALL_4(bpf_get_task_stack, struct task_struct *, task, void *, buf,
u32, size, u64, flags)
{
struct pt_regs *regs;
- long res;
+ long res = -EINVAL;
if (!try_get_task_stack(task))
return -EFAULT;
regs = task_pt_regs(task);
- res = __bpf_get_stack(regs, task, NULL, buf, size, flags);
+ if (regs)
+ res = __bpf_get_stack(regs, task, NULL, buf, size, flags);
put_task_stack(task);
return res;
diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c
index 4b6974a195c1..5e7edf913060 100644
--- a/kernel/bpf/trampoline.c
+++ b/kernel/bpf/trampoline.c
@@ -550,11 +550,12 @@ static __always_inline u64 notrace bpf_prog_start_time(void)
static void notrace inc_misses_counter(struct bpf_prog *prog)
{
struct bpf_prog_stats *stats;
+ unsigned int flags;
stats = this_cpu_ptr(prog->stats);
- u64_stats_update_begin(&stats->syncp);
+ flags = u64_stats_update_begin_irqsave(&stats->syncp);
u64_stats_inc(&stats->misses);
- u64_stats_update_end(&stats->syncp);
+ u64_stats_update_end_irqrestore(&stats->syncp, flags);
}
/* The logic is similar to bpf_prog_run(), but with an explicit
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index bfb45381fb3f..a39eedecc93a 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -570,6 +570,8 @@ static const char *reg_type_str(struct bpf_verifier_env *env,
if (type & MEM_RDONLY)
strncpy(prefix, "rdonly_", 16);
+ if (type & MEM_ALLOC)
+ strncpy(prefix, "alloc_", 16);
snprintf(env->type_str_buf, TYPE_STR_BUF_LEN, "%s%s%s",
prefix, str[base_type(type)], postfix);
@@ -616,7 +618,7 @@ static void mark_reg_scratched(struct bpf_verifier_env *env, u32 regno)
static void mark_stack_slot_scratched(struct bpf_verifier_env *env, u32 spi)
{
- env->scratched_stack_slots |= 1UL << spi;
+ env->scratched_stack_slots |= 1ULL << spi;
}
static bool reg_scratched(const struct bpf_verifier_env *env, u32 regno)
@@ -637,14 +639,14 @@ static bool verifier_state_scratched(const struct bpf_verifier_env *env)
static void mark_verifier_state_clean(struct bpf_verifier_env *env)
{
env->scratched_regs = 0U;
- env->scratched_stack_slots = 0UL;
+ env->scratched_stack_slots = 0ULL;
}
/* Used for printing the entire verifier state. */
static void mark_verifier_state_scratched(struct bpf_verifier_env *env)
{
env->scratched_regs = ~0U;
- env->scratched_stack_slots = ~0UL;
+ env->scratched_stack_slots = ~0ULL;
}
/* The reg state of a pointer or a bounded scalar was saved when
@@ -3969,16 +3971,17 @@ static int get_callee_stack_depth(struct bpf_verifier_env *env,
}
#endif
-int check_ctx_reg(struct bpf_verifier_env *env,
- const struct bpf_reg_state *reg, int regno)
+static int __check_ptr_off_reg(struct bpf_verifier_env *env,
+ const struct bpf_reg_state *reg, int regno,
+ bool fixed_off_ok)
{
- /* Access to ctx or passing it to a helper is only allowed in
- * its original, unmodified form.
+ /* Access to this pointer-typed register or passing it to a helper
+ * is only allowed in its original, unmodified form.
*/
- if (reg->off) {
- verbose(env, "dereference of modified ctx ptr R%d off=%d disallowed\n",
- regno, reg->off);
+ if (!fixed_off_ok && reg->off) {
+ verbose(env, "dereference of modified %s ptr R%d off=%d disallowed\n",
+ reg_type_str(env, reg->type), regno, reg->off);
return -EACCES;
}
@@ -3986,13 +3989,20 @@ int check_ctx_reg(struct bpf_verifier_env *env,
char tn_buf[48];
tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
- verbose(env, "variable ctx access var_off=%s disallowed\n", tn_buf);
+ verbose(env, "variable %s access var_off=%s disallowed\n",
+ reg_type_str(env, reg->type), tn_buf);
return -EACCES;
}
return 0;
}
+int check_ptr_off_reg(struct bpf_verifier_env *env,
+ const struct bpf_reg_state *reg, int regno)
+{
+ return __check_ptr_off_reg(env, reg, regno, false);
+}
+
static int __check_buffer_access(struct bpf_verifier_env *env,
const char *buf_info,
const struct bpf_reg_state *reg,
@@ -4437,7 +4447,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
return -EACCES;
}
- err = check_ctx_reg(env, reg, regno);
+ err = check_ptr_off_reg(env, reg, regno);
if (err < 0)
return err;
@@ -5127,6 +5137,7 @@ static const struct bpf_reg_types mem_types = {
PTR_TO_MAP_KEY,
PTR_TO_MAP_VALUE,
PTR_TO_MEM,
+ PTR_TO_MEM | MEM_ALLOC,
PTR_TO_BUF,
},
};
@@ -5144,7 +5155,7 @@ static const struct bpf_reg_types int_ptr_types = {
static const struct bpf_reg_types fullsock_types = { .types = { PTR_TO_SOCKET } };
static const struct bpf_reg_types scalar_types = { .types = { SCALAR_VALUE } };
static const struct bpf_reg_types context_types = { .types = { PTR_TO_CTX } };
-static const struct bpf_reg_types alloc_mem_types = { .types = { PTR_TO_MEM } };
+static const struct bpf_reg_types alloc_mem_types = { .types = { PTR_TO_MEM | MEM_ALLOC } };
static const struct bpf_reg_types const_map_ptr_types = { .types = { CONST_PTR_TO_MAP } };
static const struct bpf_reg_types btf_ptr_types = { .types = { PTR_TO_BTF_ID } };
static const struct bpf_reg_types spin_lock_types = { .types = { PTR_TO_MAP_VALUE } };
@@ -5244,12 +5255,6 @@ found:
kernel_type_name(btf_vmlinux, *arg_btf_id));
return -EACCES;
}
-
- if (!tnum_is_const(reg->var_off) || reg->var_off.value) {
- verbose(env, "R%d is a pointer to in-kernel struct with non-zero offset\n",
- regno);
- return -EACCES;
- }
}
return 0;
@@ -5304,10 +5309,33 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
if (err)
return err;
- if (type == PTR_TO_CTX) {
- err = check_ctx_reg(env, reg, regno);
+ switch ((u32)type) {
+ case SCALAR_VALUE:
+ /* Pointer types where reg offset is explicitly allowed: */
+ case PTR_TO_PACKET:
+ case PTR_TO_PACKET_META:
+ case PTR_TO_MAP_KEY:
+ case PTR_TO_MAP_VALUE:
+ case PTR_TO_MEM:
+ case PTR_TO_MEM | MEM_RDONLY:
+ case PTR_TO_MEM | MEM_ALLOC:
+ case PTR_TO_BUF:
+ case PTR_TO_BUF | MEM_RDONLY:
+ case PTR_TO_STACK:
+ /* Some of the argument types nevertheless require a
+ * zero register offset.
+ */
+ if (arg_type == ARG_PTR_TO_ALLOC_MEM)
+ goto force_off_check;
+ break;
+ /* All the rest must be rejected: */
+ default:
+force_off_check:
+ err = __check_ptr_off_reg(env, reg, regno,
+ type == PTR_TO_BTF_ID);
if (err < 0)
return err;
+ break;
}
skip_type_check:
@@ -9507,9 +9535,13 @@ static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn)
return 0;
}
- if (insn->src_reg == BPF_PSEUDO_BTF_ID) {
- mark_reg_known_zero(env, regs, insn->dst_reg);
+ /* All special src_reg cases are listed below. From this point onwards
+ * we either succeed and assign a corresponding dst_reg->type after
+ * zeroing the offset, or fail and reject the program.
+ */
+ mark_reg_known_zero(env, regs, insn->dst_reg);
+ if (insn->src_reg == BPF_PSEUDO_BTF_ID) {
dst_reg->type = aux->btf_var.reg_type;
switch (base_type(dst_reg->type)) {
case PTR_TO_MEM:
@@ -9547,7 +9579,6 @@ static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn)
}
map = env->used_maps[aux->map_index];
- mark_reg_known_zero(env, regs, insn->dst_reg);
dst_reg->map_ptr = map;
if (insn->src_reg == BPF_PSEUDO_MAP_VALUE ||
@@ -9651,7 +9682,7 @@ static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
return err;
}
- err = check_ctx_reg(env, &regs[ctx_reg], ctx_reg);
+ err = check_ptr_off_reg(env, &regs[ctx_reg], ctx_reg);
if (err < 0)
return err;
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index 77702e089d6a..a557eea7166f 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -3643,6 +3643,12 @@ static ssize_t cgroup_pressure_write(struct kernfs_open_file *of, char *buf,
cgroup_get(cgrp);
cgroup_kn_unlock(of->kn);
+ /* Allow only one trigger per file descriptor */
+ if (ctx->psi.trigger) {
+ cgroup_put(cgrp);
+ return -EBUSY;
+ }
+
psi = cgroup_ino(cgrp) == 1 ? &psi_system : &cgrp->psi;
new = psi_trigger_create(psi, buf, nbytes, res);
if (IS_ERR(new)) {
@@ -3650,8 +3656,7 @@ static ssize_t cgroup_pressure_write(struct kernfs_open_file *of, char *buf,
return PTR_ERR(new);
}
- psi_trigger_replace(&ctx->psi.trigger, new);
-
+ smp_store_release(&ctx->psi.trigger, new);
cgroup_put(cgrp);
return nbytes;
@@ -3690,7 +3695,7 @@ static void cgroup_pressure_release(struct kernfs_open_file *of)
{
struct cgroup_file_ctx *ctx = of->priv;
- psi_trigger_replace(&ctx->psi.trigger, NULL);
+ psi_trigger_destroy(ctx->psi.trigger);
}
bool cgroup_psi_enabled(void)
diff --git a/kernel/configs/debug.config b/kernel/configs/debug.config
new file mode 100644
index 000000000000..e9ffb0cc1eec
--- /dev/null
+++ b/kernel/configs/debug.config
@@ -0,0 +1,105 @@
+# The config is based on running daily CI for enterprise Linux distros to
+# seek regressions on linux-next builds on different bare-metal and virtual
+# platforms. It can be used for example,
+#
+# $ make ARCH=arm64 defconfig debug.config
+#
+# Keep alphabetically sorted inside each section.
+#
+# printk and dmesg options
+#
+CONFIG_DEBUG_BUGVERBOSE=y
+CONFIG_DYNAMIC_DEBUG=y
+CONFIG_PRINTK_CALLER=y
+CONFIG_PRINTK_TIME=y
+CONFIG_SYMBOLIC_ERRNAME=y
+#
+# Compile-time checks and compiler options
+#
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_SECTION_MISMATCH=y
+CONFIG_FRAME_WARN=2048
+CONFIG_SECTION_MISMATCH_WARN_ONLY=y
+#
+# Generic Kernel Debugging Instruments
+#
+# CONFIG_UBSAN_ALIGNMENT is not set
+# CONFIG_UBSAN_DIV_ZERO is not set
+# CONFIG_UBSAN_TRAP is not set
+# CONFIG_WARN_ALL_UNSEEDED_RANDOM is not set
+CONFIG_DEBUG_FS=y
+CONFIG_DEBUG_FS_ALLOW_ALL=y
+CONFIG_DEBUG_IRQFLAGS=y
+CONFIG_UBSAN=y
+CONFIG_UBSAN_BOOL=y
+CONFIG_UBSAN_BOUNDS=y
+CONFIG_UBSAN_ENUM=y
+CONFIG_UBSAN_SHIFT=y
+CONFIG_UBSAN_UNREACHABLE=y
+#
+# Memory Debugging
+#
+# CONFIG_DEBUG_PAGEALLOC is not set
+# CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF is not set
+# CONFIG_DEBUG_RODATA_TEST is not set
+# CONFIG_DEBUG_WX is not set
+# CONFIG_KFENCE is not set
+# CONFIG_PAGE_POISONING is not set
+# CONFIG_SLUB_STATS is not set
+CONFIG_PAGE_EXTENSION=y
+CONFIG_PAGE_OWNER=y
+CONFIG_DEBUG_KMEMLEAK=y
+CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN=y
+CONFIG_DEBUG_OBJECTS=y
+CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT=1
+CONFIG_DEBUG_OBJECTS_FREE=y
+CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER=y
+CONFIG_DEBUG_OBJECTS_RCU_HEAD=y
+CONFIG_DEBUG_OBJECTS_TIMERS=y
+CONFIG_DEBUG_OBJECTS_WORK=y
+CONFIG_DEBUG_PER_CPU_MAPS=y
+CONFIG_DEBUG_STACK_USAGE=y
+CONFIG_DEBUG_VIRTUAL=y
+CONFIG_DEBUG_VM=y
+CONFIG_DEBUG_VM_PGFLAGS=y
+CONFIG_DEBUG_VM_RB=y
+CONFIG_DEBUG_VM_VMACACHE=y
+CONFIG_GENERIC_PTDUMP=y
+CONFIG_KASAN=y
+CONFIG_KASAN_GENERIC=y
+CONFIG_KASAN_INLINE=y
+CONFIG_KASAN_VMALLOC=y
+CONFIG_PTDUMP_DEBUGFS=y
+CONFIG_SCHED_STACK_END_CHECK=y
+CONFIG_SLUB_DEBUG_ON=y
+#
+# Debug Oops, Lockups and Hangs
+#
+# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set
+# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set
+CONFIG_DEBUG_ATOMIC_SLEEP=y
+CONFIG_DETECT_HUNG_TASK=y
+CONFIG_PANIC_ON_OOPS=y
+CONFIG_PANIC_TIMEOUT=0
+CONFIG_SOFTLOCKUP_DETECTOR=y
+#
+# Lock Debugging (spinlocks, mutexes, etc...)
+#
+# CONFIG_PROVE_RAW_LOCK_NESTING is not set
+CONFIG_PROVE_LOCKING=y
+#
+# Debug kernel data structures
+#
+CONFIG_BUG_ON_DATA_CORRUPTION=y
+#
+# RCU Debugging
+#
+CONFIG_PROVE_RCU=y
+CONFIG_PROVE_RCU_LIST=y
+#
+# Tracers
+#
+CONFIG_BRANCH_PROFILE_NONE=y
+CONFIG_DYNAMIC_FTRACE=y
+CONFIG_FTRACE=y
+CONFIG_FUNCTION_TRACER=y
diff --git a/kernel/cred.c b/kernel/cred.c
index 473d17c431f3..933155c96922 100644
--- a/kernel/cred.c
+++ b/kernel/cred.c
@@ -665,21 +665,16 @@ EXPORT_SYMBOL(cred_fscmp);
int set_cred_ucounts(struct cred *new)
{
- struct task_struct *task = current;
- const struct cred *old = task->real_cred;
struct ucounts *new_ucounts, *old_ucounts = new->ucounts;
- if (new->user == old->user && new->user_ns == old->user_ns)
- return 0;
-
/*
* This optimization is needed because alloc_ucounts() uses locks
* for table lookups.
*/
- if (old_ucounts->ns == new->user_ns && uid_eq(old_ucounts->uid, new->euid))
+ if (old_ucounts->ns == new->user_ns && uid_eq(old_ucounts->uid, new->uid))
return 0;
- if (!(new_ucounts = alloc_ucounts(new->user_ns, new->euid)))
+ if (!(new_ucounts = alloc_ucounts(new->user_ns, new->uid)))
return -EAGAIN;
new->ucounts = new_ucounts;
diff --git a/kernel/delayacct.c b/kernel/delayacct.c
index 51530d5b15a8..c5e8cea9e05f 100644
--- a/kernel/delayacct.c
+++ b/kernel/delayacct.c
@@ -100,19 +100,10 @@ void __delayacct_blkio_start(void)
*/
void __delayacct_blkio_end(struct task_struct *p)
{
- struct task_delay_info *delays = p->delays;
- u64 *total;
- u32 *count;
-
- if (p->delays->flags & DELAYACCT_PF_SWAPIN) {
- total = &delays->swapin_delay;
- count = &delays->swapin_count;
- } else {
- total = &delays->blkio_delay;
- count = &delays->blkio_count;
- }
-
- delayacct_end(&delays->lock, &delays->blkio_start, total, count);
+ delayacct_end(&p->delays->lock,
+ &p->delays->blkio_start,
+ &p->delays->blkio_delay,
+ &p->delays->blkio_count);
}
int delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk)
@@ -164,10 +155,13 @@ int delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk)
d->freepages_delay_total = (tmp < d->freepages_delay_total) ? 0 : tmp;
tmp = d->thrashing_delay_total + tsk->delays->thrashing_delay;
d->thrashing_delay_total = (tmp < d->thrashing_delay_total) ? 0 : tmp;
+ tmp = d->compact_delay_total + tsk->delays->compact_delay;
+ d->compact_delay_total = (tmp < d->compact_delay_total) ? 0 : tmp;
d->blkio_count += tsk->delays->blkio_count;
d->swapin_count += tsk->delays->swapin_count;
d->freepages_count += tsk->delays->freepages_count;
d->thrashing_count += tsk->delays->thrashing_count;
+ d->compact_count += tsk->delays->compact_count;
raw_spin_unlock_irqrestore(&tsk->delays->lock, flags);
return 0;
@@ -179,8 +173,7 @@ __u64 __delayacct_blkio_ticks(struct task_struct *tsk)
unsigned long flags;
raw_spin_lock_irqsave(&tsk->delays->lock, flags);
- ret = nsec_to_clock_t(tsk->delays->blkio_delay +
- tsk->delays->swapin_delay);
+ ret = nsec_to_clock_t(tsk->delays->blkio_delay);
raw_spin_unlock_irqrestore(&tsk->delays->lock, flags);
return ret;
}
@@ -210,3 +203,29 @@ void __delayacct_thrashing_end(void)
&current->delays->thrashing_delay,
&current->delays->thrashing_count);
}
+
+void __delayacct_swapin_start(void)
+{
+ current->delays->swapin_start = local_clock();
+}
+
+void __delayacct_swapin_end(void)
+{
+ delayacct_end(&current->delays->lock,
+ &current->delays->swapin_start,
+ &current->delays->swapin_delay,
+ &current->delays->swapin_count);
+}
+
+void __delayacct_compact_start(void)
+{
+ current->delays->compact_start = local_clock();
+}
+
+void __delayacct_compact_end(void)
+{
+ delayacct_end(&current->delays->lock,
+ &current->delays->compact_start,
+ &current->delays->compact_delay,
+ &current->delays->compact_count);
+}
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index 4c6c5e0635e3..50f48e9e4598 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -75,15 +75,45 @@ static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit);
}
+static int dma_set_decrypted(struct device *dev, void *vaddr, size_t size)
+{
+ if (!force_dma_unencrypted(dev))
+ return 0;
+ return set_memory_decrypted((unsigned long)vaddr, 1 << get_order(size));
+}
+
+static int dma_set_encrypted(struct device *dev, void *vaddr, size_t size)
+{
+ int ret;
+
+ if (!force_dma_unencrypted(dev))
+ return 0;
+ ret = set_memory_encrypted((unsigned long)vaddr, 1 << get_order(size));
+ if (ret)
+ pr_warn_ratelimited("leaking DMA memory that can't be re-encrypted\n");
+ return ret;
+}
+
static void __dma_direct_free_pages(struct device *dev, struct page *page,
size_t size)
{
- if (IS_ENABLED(CONFIG_DMA_RESTRICTED_POOL) &&
- swiotlb_free(dev, page, size))
+ if (swiotlb_free(dev, page, size))
return;
dma_free_contiguous(dev, page, size);
}
+static struct page *dma_direct_alloc_swiotlb(struct device *dev, size_t size)
+{
+ struct page *page = swiotlb_alloc(dev, size);
+
+ if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
+ swiotlb_free(dev, page, size);
+ return NULL;
+ }
+
+ return page;
+}
+
static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
gfp_t gfp)
{
@@ -93,18 +123,11 @@ static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
WARN_ON_ONCE(!PAGE_ALIGNED(size));
+ if (is_swiotlb_for_alloc(dev))
+ return dma_direct_alloc_swiotlb(dev, size);
+
gfp |= dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
&phys_limit);
- if (IS_ENABLED(CONFIG_DMA_RESTRICTED_POOL) &&
- is_swiotlb_for_alloc(dev)) {
- page = swiotlb_alloc(dev, size);
- if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
- __dma_direct_free_pages(dev, page, size);
- return NULL;
- }
- return page;
- }
-
page = dma_alloc_contiguous(dev, size, gfp);
if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) {
dma_free_contiguous(dev, page, size);
@@ -133,6 +156,15 @@ again:
return page;
}
+/*
+ * Check if a potentially blocking operations needs to dip into the atomic
+ * pools for the given device/gfp.
+ */
+static bool dma_direct_use_pool(struct device *dev, gfp_t gfp)
+{
+ return !gfpflags_allow_blocking(gfp) && !is_swiotlb_for_alloc(dev);
+}
+
static void *dma_direct_alloc_from_pool(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp)
{
@@ -140,6 +172,9 @@ static void *dma_direct_alloc_from_pool(struct device *dev, size_t size,
u64 phys_mask;
void *ret;
+ if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_DMA_COHERENT_POOL)))
+ return NULL;
+
gfp |= dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
&phys_mask);
page = dma_alloc_from_pool(dev, size, &ret, gfp, dma_coherent_ok);
@@ -149,64 +184,103 @@ static void *dma_direct_alloc_from_pool(struct device *dev, size_t size,
return ret;
}
+static void *dma_direct_alloc_no_mapping(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp)
+{
+ struct page *page;
+
+ page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO);
+ if (!page)
+ return NULL;
+
+ /* remove any dirty cache lines on the kernel alias */
+ if (!PageHighMem(page))
+ arch_dma_prep_coherent(page, size);
+
+ /* return the page pointer as the opaque cookie */
+ *dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
+ return page;
+}
+
void *dma_direct_alloc(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
{
+ bool remap = false, set_uncached = false;
struct page *page;
void *ret;
- int err;
size = PAGE_ALIGN(size);
if (attrs & DMA_ATTR_NO_WARN)
gfp |= __GFP_NOWARN;
if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
- !force_dma_unencrypted(dev) && !is_swiotlb_for_alloc(dev)) {
- page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO);
- if (!page)
- return NULL;
- /* remove any dirty cache lines on the kernel alias */
- if (!PageHighMem(page))
- arch_dma_prep_coherent(page, size);
- *dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
- /* return the page pointer as the opaque cookie */
- return page;
- }
+ !force_dma_unencrypted(dev) && !is_swiotlb_for_alloc(dev))
+ return dma_direct_alloc_no_mapping(dev, size, dma_handle, gfp);
- if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
- !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
- !IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) &&
- !dev_is_dma_coherent(dev) &&
- !is_swiotlb_for_alloc(dev))
- return arch_dma_alloc(dev, size, dma_handle, gfp, attrs);
+ if (!dev_is_dma_coherent(dev)) {
+ /*
+ * Fallback to the arch handler if it exists. This should
+ * eventually go away.
+ */
+ if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
+ !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
+ !IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) &&
+ !is_swiotlb_for_alloc(dev))
+ return arch_dma_alloc(dev, size, dma_handle, gfp,
+ attrs);
- if (IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) &&
- !dev_is_dma_coherent(dev))
- return dma_alloc_from_global_coherent(dev, size, dma_handle);
+ /*
+ * If there is a global pool, always allocate from it for
+ * non-coherent devices.
+ */
+ if (IS_ENABLED(CONFIG_DMA_GLOBAL_POOL))
+ return dma_alloc_from_global_coherent(dev, size,
+ dma_handle);
+
+ /*
+ * Otherwise remap if the architecture is asking for it. But
+ * given that remapping memory is a blocking operation we'll
+ * instead have to dip into the atomic pools.
+ */
+ remap = IS_ENABLED(CONFIG_DMA_DIRECT_REMAP);
+ if (remap) {
+ if (dma_direct_use_pool(dev, gfp))
+ return dma_direct_alloc_from_pool(dev, size,
+ dma_handle, gfp);
+ } else {
+ if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED))
+ return NULL;
+ set_uncached = true;
+ }
+ }
/*
- * Remapping or decrypting memory may block. If either is required and
- * we can't block, allocate the memory from the atomic pools.
- * If restricted DMA (i.e., is_swiotlb_for_alloc) is required, one must
- * set up another device coherent pool by shared-dma-pool and use
- * dma_alloc_from_dev_coherent instead.
+ * Decrypting memory may block, so allocate the memory from the atomic
+ * pools if we can't block.
*/
- if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) &&
- !gfpflags_allow_blocking(gfp) &&
- (force_dma_unencrypted(dev) ||
- (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
- !dev_is_dma_coherent(dev))) &&
- !is_swiotlb_for_alloc(dev))
+ if (force_dma_unencrypted(dev) && dma_direct_use_pool(dev, gfp))
return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp);
/* we always manually zero the memory once we are done */
page = __dma_direct_alloc_pages(dev, size, gfp & ~__GFP_ZERO);
if (!page)
return NULL;
+ if (PageHighMem(page)) {
+ /*
+ * Depending on the cma= arguments and per-arch setup,
+ * dma_alloc_contiguous could return highmem pages.
+ * Without remapping there is no way to return them here, so
+ * log an error and fail.
+ */
+ if (!IS_ENABLED(CONFIG_DMA_REMAP)) {
+ dev_info(dev, "Rejecting highmem page from CMA.\n");
+ goto out_free_pages;
+ }
+ remap = true;
+ set_uncached = false;
+ }
- if ((IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
- !dev_is_dma_coherent(dev)) ||
- (IS_ENABLED(CONFIG_DMA_REMAP) && PageHighMem(page))) {
+ if (remap) {
/* remove any dirty cache lines on the kernel alias */
arch_dma_prep_coherent(page, size);
@@ -216,56 +290,27 @@ void *dma_direct_alloc(struct device *dev, size_t size,
__builtin_return_address(0));
if (!ret)
goto out_free_pages;
- if (force_dma_unencrypted(dev)) {
- err = set_memory_decrypted((unsigned long)ret,
- 1 << get_order(size));
- if (err)
- goto out_free_pages;
- }
- memset(ret, 0, size);
- goto done;
- }
-
- if (PageHighMem(page)) {
- /*
- * Depending on the cma= arguments and per-arch setup
- * dma_alloc_contiguous could return highmem pages.
- * Without remapping there is no way to return them here,
- * so log an error and fail.
- */
- dev_info(dev, "Rejecting highmem page from CMA.\n");
- goto out_free_pages;
- }
-
- ret = page_address(page);
- if (force_dma_unencrypted(dev)) {
- err = set_memory_decrypted((unsigned long)ret,
- 1 << get_order(size));
- if (err)
+ } else {
+ ret = page_address(page);
+ if (dma_set_decrypted(dev, ret, size))
goto out_free_pages;
}
memset(ret, 0, size);
- if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
- !dev_is_dma_coherent(dev)) {
+ if (set_uncached) {
arch_dma_prep_coherent(page, size);
ret = arch_dma_set_uncached(ret, size);
if (IS_ERR(ret))
goto out_encrypt_pages;
}
-done:
+
*dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
return ret;
out_encrypt_pages:
- if (force_dma_unencrypted(dev)) {
- err = set_memory_encrypted((unsigned long)page_address(page),
- 1 << get_order(size));
- /* If memory cannot be re-encrypted, it must be leaked */
- if (err)
- return NULL;
- }
+ if (dma_set_encrypted(dev, page_address(page), size))
+ return NULL;
out_free_pages:
__dma_direct_free_pages(dev, page, size);
return NULL;
@@ -304,13 +349,14 @@ void dma_direct_free(struct device *dev, size_t size,
dma_free_from_pool(dev, cpu_addr, PAGE_ALIGN(size)))
return;
- if (force_dma_unencrypted(dev))
- set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order);
-
- if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr))
+ if (IS_ENABLED(CONFIG_DMA_REMAP) && is_vmalloc_addr(cpu_addr)) {
vunmap(cpu_addr);
- else if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_CLEAR_UNCACHED))
- arch_dma_clear_uncached(cpu_addr, size);
+ } else {
+ if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_CLEAR_UNCACHED))
+ arch_dma_clear_uncached(cpu_addr, size);
+ if (dma_set_encrypted(dev, cpu_addr, 1 << page_order))
+ return;
+ }
__dma_direct_free_pages(dev, dma_direct_to_page(dev, dma_addr), size);
}
@@ -321,9 +367,7 @@ struct page *dma_direct_alloc_pages(struct device *dev, size_t size,
struct page *page;
void *ret;
- if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) &&
- force_dma_unencrypted(dev) && !gfpflags_allow_blocking(gfp) &&
- !is_swiotlb_for_alloc(dev))
+ if (force_dma_unencrypted(dev) && dma_direct_use_pool(dev, gfp))
return dma_direct_alloc_from_pool(dev, size, dma_handle, gfp);
page = __dma_direct_alloc_pages(dev, size, gfp);
@@ -341,11 +385,8 @@ struct page *dma_direct_alloc_pages(struct device *dev, size_t size,
}
ret = page_address(page);
- if (force_dma_unencrypted(dev)) {
- if (set_memory_decrypted((unsigned long)ret,
- 1 << get_order(size)))
- goto out_free_pages;
- }
+ if (dma_set_decrypted(dev, ret, size))
+ goto out_free_pages;
memset(ret, 0, size);
*dma_handle = phys_to_dma_direct(dev, page_to_phys(page));
return page;
@@ -366,9 +407,8 @@ void dma_direct_free_pages(struct device *dev, size_t size,
dma_free_from_pool(dev, vaddr, size))
return;
- if (force_dma_unencrypted(dev))
- set_memory_encrypted((unsigned long)vaddr, 1 << page_order);
-
+ if (dma_set_encrypted(dev, vaddr, 1 << page_order))
+ return;
__dma_direct_free_pages(dev, page, size);
}
diff --git a/kernel/dma/pool.c b/kernel/dma/pool.c
index 5f84e6cdb78e..4d40dcce7604 100644
--- a/kernel/dma/pool.c
+++ b/kernel/dma/pool.c
@@ -203,7 +203,7 @@ static int __init dma_atomic_pool_init(void)
GFP_KERNEL);
if (!atomic_pool_kernel)
ret = -ENOMEM;
- if (IS_ENABLED(CONFIG_ZONE_DMA)) {
+ if (has_managed_dma()) {
atomic_pool_dma = __dma_atomic_pool_init(atomic_pool_size,
GFP_KERNEL | GFP_DMA);
if (!atomic_pool_dma)
@@ -226,7 +226,7 @@ static inline struct gen_pool *dma_guess_pool(struct gen_pool *prev, gfp_t gfp)
if (prev == NULL) {
if (IS_ENABLED(CONFIG_ZONE_DMA32) && (gfp & GFP_DMA32))
return atomic_pool_dma32;
- if (IS_ENABLED(CONFIG_ZONE_DMA) && (gfp & GFP_DMA))
+ if (atomic_pool_dma && (gfp & GFP_DMA))
return atomic_pool_dma;
return atomic_pool_kernel;
}
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index 8e840fbbed7c..f1e7ea160b43 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -50,6 +50,7 @@
#include <asm/io.h>
#include <asm/dma.h>
+#include <linux/io.h>
#include <linux/init.h>
#include <linux/memblock.h>
#include <linux/iommu-helper.h>
@@ -72,6 +73,8 @@ enum swiotlb_force swiotlb_force;
struct io_tlb_mem io_tlb_default_mem;
+phys_addr_t swiotlb_unencrypted_base;
+
/*
* Max segment that we can provide which (if pages are contingous) will
* not be bounced (unless SWIOTLB_FORCE is set).
@@ -156,6 +159,34 @@ static inline unsigned long nr_slots(u64 val)
}
/*
+ * Remap swioltb memory in the unencrypted physical address space
+ * when swiotlb_unencrypted_base is set. (e.g. for Hyper-V AMD SEV-SNP
+ * Isolation VMs).
+ */
+#ifdef CONFIG_HAS_IOMEM
+static void *swiotlb_mem_remap(struct io_tlb_mem *mem, unsigned long bytes)
+{
+ void *vaddr = NULL;
+
+ if (swiotlb_unencrypted_base) {
+ phys_addr_t paddr = mem->start + swiotlb_unencrypted_base;
+
+ vaddr = memremap(paddr, bytes, MEMREMAP_WB);
+ if (!vaddr)
+ pr_err("Failed to map the unencrypted memory %pa size %lx.\n",
+ &paddr, bytes);
+ }
+
+ return vaddr;
+}
+#else
+static void *swiotlb_mem_remap(struct io_tlb_mem *mem, unsigned long bytes)
+{
+ return NULL;
+}
+#endif
+
+/*
* Early SWIOTLB allocation may be too early to allow an architecture to
* perform the desired operations. This function allows the architecture to
* call SWIOTLB when the operations are possible. It needs to be called
@@ -172,7 +203,12 @@ void __init swiotlb_update_mem_attributes(void)
vaddr = phys_to_virt(mem->start);
bytes = PAGE_ALIGN(mem->nslabs << IO_TLB_SHIFT);
set_memory_decrypted((unsigned long)vaddr, bytes >> PAGE_SHIFT);
- memset(vaddr, 0, bytes);
+
+ mem->vaddr = swiotlb_mem_remap(mem, bytes);
+ if (!mem->vaddr)
+ mem->vaddr = vaddr;
+
+ memset(mem->vaddr, 0, bytes);
}
static void swiotlb_init_io_tlb_mem(struct io_tlb_mem *mem, phys_addr_t start,
@@ -196,7 +232,17 @@ static void swiotlb_init_io_tlb_mem(struct io_tlb_mem *mem, phys_addr_t start,
mem->slots[i].orig_addr = INVALID_PHYS_ADDR;
mem->slots[i].alloc_size = 0;
}
+
+ /*
+ * If swiotlb_unencrypted_base is set, the bounce buffer memory will
+ * be remapped and cleared in swiotlb_update_mem_attributes.
+ */
+ if (swiotlb_unencrypted_base)
+ return;
+
memset(vaddr, 0, bytes);
+ mem->vaddr = vaddr;
+ return;
}
int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
@@ -371,7 +417,7 @@ static void swiotlb_bounce(struct device *dev, phys_addr_t tlb_addr, size_t size
phys_addr_t orig_addr = mem->slots[index].orig_addr;
size_t alloc_size = mem->slots[index].alloc_size;
unsigned long pfn = PFN_DOWN(orig_addr);
- unsigned char *vaddr = phys_to_virt(tlb_addr);
+ unsigned char *vaddr = mem->vaddr + tlb_addr - mem->start;
unsigned int tlb_offset, orig_addr_offset;
if (orig_addr == INVALID_PHYS_ADDR)
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 1362b9b770d8..6859229497b1 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -674,6 +674,23 @@ perf_event_set_state(struct perf_event *event, enum perf_event_state state)
WRITE_ONCE(event->state, state);
}
+/*
+ * UP store-release, load-acquire
+ */
+
+#define __store_release(ptr, val) \
+do { \
+ barrier(); \
+ WRITE_ONCE(*(ptr), (val)); \
+} while (0)
+
+#define __load_acquire(ptr) \
+({ \
+ __unqual_scalar_typeof(*(ptr)) ___p = READ_ONCE(*(ptr)); \
+ barrier(); \
+ ___p; \
+})
+
#ifdef CONFIG_CGROUP_PERF
static inline bool
@@ -719,34 +736,51 @@ static inline u64 perf_cgroup_event_time(struct perf_event *event)
return t->time;
}
-static inline void __update_cgrp_time(struct perf_cgroup *cgrp)
+static inline u64 perf_cgroup_event_time_now(struct perf_event *event, u64 now)
{
- struct perf_cgroup_info *info;
- u64 now;
-
- now = perf_clock();
+ struct perf_cgroup_info *t;
- info = this_cpu_ptr(cgrp->info);
+ t = per_cpu_ptr(event->cgrp->info, event->cpu);
+ if (!__load_acquire(&t->active))
+ return t->time;
+ now += READ_ONCE(t->timeoffset);
+ return now;
+}
- info->time += now - info->timestamp;
+static inline void __update_cgrp_time(struct perf_cgroup_info *info, u64 now, bool adv)
+{
+ if (adv)
+ info->time += now - info->timestamp;
info->timestamp = now;
+ /*
+ * see update_context_time()
+ */
+ WRITE_ONCE(info->timeoffset, info->time - info->timestamp);
}
-static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
+static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx, bool final)
{
struct perf_cgroup *cgrp = cpuctx->cgrp;
struct cgroup_subsys_state *css;
+ struct perf_cgroup_info *info;
if (cgrp) {
+ u64 now = perf_clock();
+
for (css = &cgrp->css; css; css = css->parent) {
cgrp = container_of(css, struct perf_cgroup, css);
- __update_cgrp_time(cgrp);
+ info = this_cpu_ptr(cgrp->info);
+
+ __update_cgrp_time(info, now, true);
+ if (final)
+ __store_release(&info->active, 0);
}
}
}
static inline void update_cgrp_time_from_event(struct perf_event *event)
{
+ struct perf_cgroup_info *info;
struct perf_cgroup *cgrp;
/*
@@ -760,8 +794,10 @@ static inline void update_cgrp_time_from_event(struct perf_event *event)
/*
* Do not update time when cgroup is not active
*/
- if (cgroup_is_descendant(cgrp->css.cgroup, event->cgrp->css.cgroup))
- __update_cgrp_time(event->cgrp);
+ if (cgroup_is_descendant(cgrp->css.cgroup, event->cgrp->css.cgroup)) {
+ info = this_cpu_ptr(event->cgrp->info);
+ __update_cgrp_time(info, perf_clock(), true);
+ }
}
static inline void
@@ -785,7 +821,8 @@ perf_cgroup_set_timestamp(struct task_struct *task,
for (css = &cgrp->css; css; css = css->parent) {
cgrp = container_of(css, struct perf_cgroup, css);
info = this_cpu_ptr(cgrp->info);
- info->timestamp = ctx->timestamp;
+ __update_cgrp_time(info, ctx->timestamp, false);
+ __store_release(&info->active, 1);
}
}
@@ -802,7 +839,7 @@ static DEFINE_PER_CPU(struct list_head, cgrp_cpuctx_list);
*/
static void perf_cgroup_switch(struct task_struct *task, int mode)
{
- struct perf_cpu_context *cpuctx;
+ struct perf_cpu_context *cpuctx, *tmp;
struct list_head *list;
unsigned long flags;
@@ -813,7 +850,7 @@ static void perf_cgroup_switch(struct task_struct *task, int mode)
local_irq_save(flags);
list = this_cpu_ptr(&cgrp_cpuctx_list);
- list_for_each_entry(cpuctx, list, cgrp_cpuctx_entry) {
+ list_for_each_entry_safe(cpuctx, tmp, list, cgrp_cpuctx_entry) {
WARN_ON_ONCE(cpuctx->ctx.nr_cgroups == 0);
perf_ctx_lock(cpuctx, cpuctx->task_ctx);
@@ -982,14 +1019,6 @@ out:
}
static inline void
-perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
-{
- struct perf_cgroup_info *t;
- t = per_cpu_ptr(event->cgrp->info, event->cpu);
- event->shadow_ctx_time = now - t->timestamp;
-}
-
-static inline void
perf_cgroup_event_enable(struct perf_event *event, struct perf_event_context *ctx)
{
struct perf_cpu_context *cpuctx;
@@ -1066,7 +1095,8 @@ static inline void update_cgrp_time_from_event(struct perf_event *event)
{
}
-static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
+static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx,
+ bool final)
{
}
@@ -1098,12 +1128,12 @@ perf_cgroup_switch(struct task_struct *task, struct task_struct *next)
{
}
-static inline void
-perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
+static inline u64 perf_cgroup_event_time(struct perf_event *event)
{
+ return 0;
}
-static inline u64 perf_cgroup_event_time(struct perf_event *event)
+static inline u64 perf_cgroup_event_time_now(struct perf_event *event, u64 now)
{
return 0;
}
@@ -1525,22 +1555,59 @@ static void perf_unpin_context(struct perf_event_context *ctx)
/*
* Update the record of the current time in a context.
*/
-static void update_context_time(struct perf_event_context *ctx)
+static void __update_context_time(struct perf_event_context *ctx, bool adv)
{
u64 now = perf_clock();
- ctx->time += now - ctx->timestamp;
+ if (adv)
+ ctx->time += now - ctx->timestamp;
ctx->timestamp = now;
+
+ /*
+ * The above: time' = time + (now - timestamp), can be re-arranged
+ * into: time` = now + (time - timestamp), which gives a single value
+ * offset to compute future time without locks on.
+ *
+ * See perf_event_time_now(), which can be used from NMI context where
+ * it's (obviously) not possible to acquire ctx->lock in order to read
+ * both the above values in a consistent manner.
+ */
+ WRITE_ONCE(ctx->timeoffset, ctx->time - ctx->timestamp);
+}
+
+static void update_context_time(struct perf_event_context *ctx)
+{
+ __update_context_time(ctx, true);
}
static u64 perf_event_time(struct perf_event *event)
{
struct perf_event_context *ctx = event->ctx;
+ if (unlikely(!ctx))
+ return 0;
+
if (is_cgroup_event(event))
return perf_cgroup_event_time(event);
- return ctx ? ctx->time : 0;
+ return ctx->time;
+}
+
+static u64 perf_event_time_now(struct perf_event *event, u64 now)
+{
+ struct perf_event_context *ctx = event->ctx;
+
+ if (unlikely(!ctx))
+ return 0;
+
+ if (is_cgroup_event(event))
+ return perf_cgroup_event_time_now(event, now);
+
+ if (!(__load_acquire(&ctx->is_active) & EVENT_TIME))
+ return ctx->time;
+
+ now += READ_ONCE(ctx->timeoffset);
+ return now;
}
static enum event_type_t get_event_type(struct perf_event *event)
@@ -2350,7 +2417,7 @@ __perf_remove_from_context(struct perf_event *event,
if (ctx->is_active & EVENT_TIME) {
update_context_time(ctx);
- update_cgrp_time_from_cpuctx(cpuctx);
+ update_cgrp_time_from_cpuctx(cpuctx, false);
}
event_sched_out(event, cpuctx, ctx);
@@ -2361,6 +2428,9 @@ __perf_remove_from_context(struct perf_event *event,
list_del_event(event, ctx);
if (!ctx->nr_events && ctx->is_active) {
+ if (ctx == &cpuctx->ctx)
+ update_cgrp_time_from_cpuctx(cpuctx, true);
+
ctx->is_active = 0;
ctx->rotate_necessary = 0;
if (ctx->task) {
@@ -2392,7 +2462,11 @@ static void perf_remove_from_context(struct perf_event *event, unsigned long fla
* event_function_call() user.
*/
raw_spin_lock_irq(&ctx->lock);
- if (!ctx->is_active) {
+ /*
+ * Cgroup events are per-cpu events, and must IPI because of
+ * cgrp_cpuctx_list.
+ */
+ if (!ctx->is_active && !is_cgroup_event(event)) {
__perf_remove_from_context(event, __get_cpu_context(ctx),
ctx, (void *)flags);
raw_spin_unlock_irq(&ctx->lock);
@@ -2482,40 +2556,6 @@ void perf_event_disable_inatomic(struct perf_event *event)
irq_work_queue(&event->pending);
}
-static void perf_set_shadow_time(struct perf_event *event,
- struct perf_event_context *ctx)
-{
- /*
- * use the correct time source for the time snapshot
- *
- * We could get by without this by leveraging the
- * fact that to get to this function, the caller
- * has most likely already called update_context_time()
- * and update_cgrp_time_xx() and thus both timestamp
- * are identical (or very close). Given that tstamp is,
- * already adjusted for cgroup, we could say that:
- * tstamp - ctx->timestamp
- * is equivalent to
- * tstamp - cgrp->timestamp.
- *
- * Then, in perf_output_read(), the calculation would
- * work with no changes because:
- * - event is guaranteed scheduled in
- * - no scheduled out in between
- * - thus the timestamp would be the same
- *
- * But this is a bit hairy.
- *
- * So instead, we have an explicit cgroup call to remain
- * within the time source all along. We believe it
- * is cleaner and simpler to understand.
- */
- if (is_cgroup_event(event))
- perf_cgroup_set_shadow_time(event, event->tstamp);
- else
- event->shadow_ctx_time = event->tstamp - ctx->timestamp;
-}
-
#define MAX_INTERRUPTS (~0ULL)
static void perf_log_throttle(struct perf_event *event, int enable);
@@ -2556,8 +2596,6 @@ event_sched_in(struct perf_event *event,
perf_pmu_disable(event->pmu);
- perf_set_shadow_time(event, ctx);
-
perf_log_itrace_start(event);
if (event->pmu->add(event, PERF_EF_START)) {
@@ -2861,11 +2899,14 @@ perf_install_in_context(struct perf_event_context *ctx,
* perf_event_attr::disabled events will not run and can be initialized
* without IPI. Except when this is the first event for the context, in
* that case we need the magic of the IPI to set ctx->is_active.
+ * Similarly, cgroup events for the context also needs the IPI to
+ * manipulate the cgrp_cpuctx_list.
*
* The IOC_ENABLE that is sure to follow the creation of a disabled
* event will issue the IPI and reprogram the hardware.
*/
- if (__perf_effective_state(event) == PERF_EVENT_STATE_OFF && ctx->nr_events) {
+ if (__perf_effective_state(event) == PERF_EVENT_STATE_OFF &&
+ ctx->nr_events && !is_cgroup_event(event)) {
raw_spin_lock_irq(&ctx->lock);
if (ctx->task == TASK_TOMBSTONE) {
raw_spin_unlock_irq(&ctx->lock);
@@ -3197,6 +3238,15 @@ static int perf_event_modify_breakpoint(struct perf_event *bp,
return err;
}
+/*
+ * Copy event-type-independent attributes that may be modified.
+ */
+static void perf_event_modify_copy_attr(struct perf_event_attr *to,
+ const struct perf_event_attr *from)
+{
+ to->sig_data = from->sig_data;
+}
+
static int perf_event_modify_attr(struct perf_event *event,
struct perf_event_attr *attr)
{
@@ -3219,10 +3269,17 @@ static int perf_event_modify_attr(struct perf_event *event,
WARN_ON_ONCE(event->ctx->parent_ctx);
mutex_lock(&event->child_mutex);
+ /*
+ * Event-type-independent attributes must be copied before event-type
+ * modification, which will validate that final attributes match the
+ * source attributes after all relevant attributes have been copied.
+ */
+ perf_event_modify_copy_attr(&event->attr, attr);
err = func(event, attr);
if (err)
goto out;
list_for_each_entry(child, &event->child_list, child_list) {
+ perf_event_modify_copy_attr(&child->attr, attr);
err = func(child, attr);
if (err)
goto out;
@@ -3251,16 +3308,6 @@ static void ctx_sched_out(struct perf_event_context *ctx,
return;
}
- ctx->is_active &= ~event_type;
- if (!(ctx->is_active & EVENT_ALL))
- ctx->is_active = 0;
-
- if (ctx->task) {
- WARN_ON_ONCE(cpuctx->task_ctx != ctx);
- if (!ctx->is_active)
- cpuctx->task_ctx = NULL;
- }
-
/*
* Always update time if it was set; not only when it changes.
* Otherwise we can 'forget' to update time for any but the last
@@ -3274,7 +3321,22 @@ static void ctx_sched_out(struct perf_event_context *ctx,
if (is_active & EVENT_TIME) {
/* update (and stop) ctx time */
update_context_time(ctx);
- update_cgrp_time_from_cpuctx(cpuctx);
+ update_cgrp_time_from_cpuctx(cpuctx, ctx == &cpuctx->ctx);
+ /*
+ * CPU-release for the below ->is_active store,
+ * see __load_acquire() in perf_event_time_now()
+ */
+ barrier();
+ }
+
+ ctx->is_active &= ~event_type;
+ if (!(ctx->is_active & EVENT_ALL))
+ ctx->is_active = 0;
+
+ if (ctx->task) {
+ WARN_ON_ONCE(cpuctx->task_ctx != ctx);
+ if (!ctx->is_active)
+ cpuctx->task_ctx = NULL;
}
is_active ^= ctx->is_active; /* changed bits */
@@ -3711,13 +3773,19 @@ static noinline int visit_groups_merge(struct perf_cpu_context *cpuctx,
return 0;
}
+/*
+ * Because the userpage is strictly per-event (there is no concept of context,
+ * so there cannot be a context indirection), every userpage must be updated
+ * when context time starts :-(
+ *
+ * IOW, we must not miss EVENT_TIME edges.
+ */
static inline bool event_update_userpage(struct perf_event *event)
{
if (likely(!atomic_read(&event->mmap_count)))
return false;
perf_event_update_time(event);
- perf_set_shadow_time(event, event->ctx);
perf_event_update_userpage(event);
return true;
@@ -3801,13 +3869,23 @@ ctx_sched_in(struct perf_event_context *ctx,
struct task_struct *task)
{
int is_active = ctx->is_active;
- u64 now;
lockdep_assert_held(&ctx->lock);
if (likely(!ctx->nr_events))
return;
+ if (is_active ^ EVENT_TIME) {
+ /* start ctx time */
+ __update_context_time(ctx, false);
+ perf_cgroup_set_timestamp(task, ctx);
+ /*
+ * CPU-release for the below ->is_active store,
+ * see __load_acquire() in perf_event_time_now()
+ */
+ barrier();
+ }
+
ctx->is_active |= (event_type | EVENT_TIME);
if (ctx->task) {
if (!is_active)
@@ -3818,13 +3896,6 @@ ctx_sched_in(struct perf_event_context *ctx,
is_active ^= ctx->is_active; /* changed bits */
- if (is_active & EVENT_TIME) {
- /* start ctx time */
- now = perf_clock();
- ctx->timestamp = now;
- perf_cgroup_set_timestamp(task, ctx);
- }
-
/*
* First go through the list and put on any pinned groups
* in order to give them the best chance of going on.
@@ -4418,6 +4489,18 @@ static inline u64 perf_event_count(struct perf_event *event)
return local64_read(&event->count) + atomic64_read(&event->child_count);
}
+static void calc_timer_values(struct perf_event *event,
+ u64 *now,
+ u64 *enabled,
+ u64 *running)
+{
+ u64 ctx_time;
+
+ *now = perf_clock();
+ ctx_time = perf_event_time_now(event, *now);
+ __perf_update_times(event, ctx_time, enabled, running);
+}
+
/*
* NMI-safe method to read a local event, that is an event that
* is:
@@ -4477,10 +4560,9 @@ int perf_event_read_local(struct perf_event *event, u64 *value,
*value = local64_read(&event->count);
if (enabled || running) {
- u64 now = event->shadow_ctx_time + perf_clock();
- u64 __enabled, __running;
+ u64 __enabled, __running, __now;;
- __perf_update_times(event, now, &__enabled, &__running);
+ calc_timer_values(event, &__now, &__enabled, &__running);
if (enabled)
*enabled = __enabled;
if (running)
@@ -5802,18 +5884,6 @@ static int perf_event_index(struct perf_event *event)
return event->pmu->event_idx(event);
}
-static void calc_timer_values(struct perf_event *event,
- u64 *now,
- u64 *enabled,
- u64 *running)
-{
- u64 ctx_time;
-
- *now = perf_clock();
- ctx_time = event->shadow_ctx_time + *now;
- __perf_update_times(event, ctx_time, enabled, running);
-}
-
static void perf_event_init_userpage(struct perf_event *event)
{
struct perf_event_mmap_page *userpg;
@@ -5938,6 +6008,8 @@ static void ring_buffer_attach(struct perf_event *event,
struct perf_buffer *old_rb = NULL;
unsigned long flags;
+ WARN_ON_ONCE(event->parent);
+
if (event->rb) {
/*
* Should be impossible, we set this when removing
@@ -5995,6 +6067,9 @@ static void ring_buffer_wakeup(struct perf_event *event)
{
struct perf_buffer *rb;
+ if (event->parent)
+ event = event->parent;
+
rcu_read_lock();
rb = rcu_dereference(event->rb);
if (rb) {
@@ -6008,6 +6083,9 @@ struct perf_buffer *ring_buffer_get(struct perf_event *event)
{
struct perf_buffer *rb;
+ if (event->parent)
+ event = event->parent;
+
rcu_read_lock();
rb = rcu_dereference(event->rb);
if (rb) {
@@ -6353,7 +6431,6 @@ accounting:
ring_buffer_attach(event, rb);
perf_event_update_time(event);
- perf_set_shadow_time(event, event->ctx);
perf_event_init_userpage(event);
perf_event_update_userpage(event);
} else {
@@ -6525,26 +6602,43 @@ static void perf_pending_event(struct irq_work *entry)
perf_swevent_put_recursion_context(rctx);
}
-/*
- * We assume there is only KVM supporting the callbacks.
- * Later on, we might change it to a list if there is
- * another virtualization implementation supporting the callbacks.
- */
-struct perf_guest_info_callbacks *perf_guest_cbs;
+#ifdef CONFIG_GUEST_PERF_EVENTS
+struct perf_guest_info_callbacks __rcu *perf_guest_cbs;
+
+DEFINE_STATIC_CALL_RET0(__perf_guest_state, *perf_guest_cbs->state);
+DEFINE_STATIC_CALL_RET0(__perf_guest_get_ip, *perf_guest_cbs->get_ip);
+DEFINE_STATIC_CALL_RET0(__perf_guest_handle_intel_pt_intr, *perf_guest_cbs->handle_intel_pt_intr);
-int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
+void perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
{
- perf_guest_cbs = cbs;
- return 0;
+ if (WARN_ON_ONCE(rcu_access_pointer(perf_guest_cbs)))
+ return;
+
+ rcu_assign_pointer(perf_guest_cbs, cbs);
+ static_call_update(__perf_guest_state, cbs->state);
+ static_call_update(__perf_guest_get_ip, cbs->get_ip);
+
+ /* Implementing ->handle_intel_pt_intr is optional. */
+ if (cbs->handle_intel_pt_intr)
+ static_call_update(__perf_guest_handle_intel_pt_intr,
+ cbs->handle_intel_pt_intr);
}
EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks);
-int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
+void perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
{
- perf_guest_cbs = NULL;
- return 0;
+ if (WARN_ON_ONCE(rcu_access_pointer(perf_guest_cbs) != cbs))
+ return;
+
+ rcu_assign_pointer(perf_guest_cbs, NULL);
+ static_call_update(__perf_guest_state, (void *)&__static_call_return0);
+ static_call_update(__perf_guest_get_ip, (void *)&__static_call_return0);
+ static_call_update(__perf_guest_handle_intel_pt_intr,
+ (void *)&__static_call_return0);
+ synchronize_rcu();
}
EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks);
+#endif
static void
perf_output_sample_regs(struct perf_output_handle *handle,
@@ -6700,7 +6794,7 @@ static unsigned long perf_prepare_sample_aux(struct perf_event *event,
if (WARN_ON_ONCE(READ_ONCE(sampler->oncpu) != smp_processor_id()))
goto out;
- rb = ring_buffer_get(sampler->parent ? sampler->parent : sampler);
+ rb = ring_buffer_get(sampler);
if (!rb)
goto out;
@@ -6766,7 +6860,7 @@ static void perf_aux_sample_output(struct perf_event *event,
if (WARN_ON_ONCE(!sampler || !data->aux_size))
return;
- rb = ring_buffer_get(sampler->parent ? sampler->parent : sampler);
+ rb = ring_buffer_get(sampler);
if (!rb)
return;
diff --git a/kernel/exit.c b/kernel/exit.c
index f702a6a63686..b00a25bb4ab9 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -116,7 +116,7 @@ static void __exit_signal(struct task_struct *tsk)
* then notify it:
*/
if (sig->notify_count > 0 && !--sig->notify_count)
- wake_up_process(sig->group_exit_task);
+ wake_up_process(sig->group_exec_task);
if (tsk == sig->curr_target)
sig->curr_target = next_thread(tsk);
@@ -697,7 +697,7 @@ static void exit_notify(struct task_struct *tsk, int group_dead)
/* mt-exec, de_thread() is waiting for group leader */
if (unlikely(tsk->signal->notify_count < 0))
- wake_up_process(tsk->signal->group_exit_task);
+ wake_up_process(tsk->signal->group_exec_task);
write_unlock_irq(&tasklist_lock);
list_for_each_entry_safe(p, n, &dead, ptrace_entry) {
@@ -735,37 +735,22 @@ void __noreturn do_exit(long code)
struct task_struct *tsk = current;
int group_dead;
- /*
- * We can get here from a kernel oops, sometimes with preemption off.
- * Start by checking for critical errors.
- * Then fix up important state like USER_DS and preemption.
- * Then do everything else.
- */
-
WARN_ON(blk_needs_flush_plug(tsk));
- if (unlikely(in_interrupt()))
- panic("Aiee, killing interrupt handler!");
- if (unlikely(!tsk->pid))
- panic("Attempted to kill the idle task!");
-
/*
- * If do_exit is called because this processes oopsed, it's possible
+ * If do_dead is called because this processes oopsed, it's possible
* that get_fs() was left as KERNEL_DS, so reset it to USER_DS before
* continuing. Amongst other possible reasons, this is to prevent
* mm_release()->clear_child_tid() from writing to a user-controlled
* kernel address.
+ *
+ * On uptodate architectures force_uaccess_begin is a noop. On
+ * architectures that still have set_fs/get_fs in addition to handling
+ * oopses handles kernel threads that run as set_fs(KERNEL_DS) by
+ * default.
*/
force_uaccess_begin();
- if (unlikely(in_atomic())) {
- pr_info("note: %s[%d] exited with preempt_count %d\n",
- current->comm, task_pid_nr(current),
- preempt_count());
- preempt_count_set(PREEMPT_ENABLED);
- }
-
- profile_task_exit(tsk);
kcov_task_exit(tsk);
coredump_task_exit(tsk);
@@ -773,17 +758,6 @@ void __noreturn do_exit(long code)
validate_creds_for_do_exit(tsk);
- /*
- * We're taking recursive faults here in do_exit. Safest is to just
- * leave this task alone and wait for reboot.
- */
- if (unlikely(tsk->flags & PF_EXITING)) {
- pr_alert("Fixing recursive fault but reboot is needed!\n");
- futex_exit_recursive(tsk);
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule();
- }
-
io_uring_files_cancel();
exit_signals(tsk); /* sets PF_EXITING */
@@ -882,16 +856,46 @@ void __noreturn do_exit(long code)
lockdep_free_task(tsk);
do_task_dead();
}
-EXPORT_SYMBOL_GPL(do_exit);
-void complete_and_exit(struct completion *comp, long code)
+void __noreturn make_task_dead(int signr)
{
- if (comp)
- complete(comp);
+ /*
+ * Take the task off the cpu after something catastrophic has
+ * happened.
+ *
+ * We can get here from a kernel oops, sometimes with preemption off.
+ * Start by checking for critical errors.
+ * Then fix up important state like USER_DS and preemption.
+ * Then do everything else.
+ */
+ struct task_struct *tsk = current;
- do_exit(code);
+ if (unlikely(in_interrupt()))
+ panic("Aiee, killing interrupt handler!");
+ if (unlikely(!tsk->pid))
+ panic("Attempted to kill the idle task!");
+
+ if (unlikely(in_atomic())) {
+ pr_info("note: %s[%d] exited with preempt_count %d\n",
+ current->comm, task_pid_nr(current),
+ preempt_count());
+ preempt_count_set(PREEMPT_ENABLED);
+ }
+
+ /*
+ * We're taking recursive faults here in make_task_dead. Safest is to just
+ * leave this task alone and wait for reboot.
+ */
+ if (unlikely(tsk->flags & PF_EXITING)) {
+ pr_alert("Fixing recursive fault but reboot is needed!\n");
+ futex_exit_recursive(tsk);
+ tsk->exit_state = EXIT_DEAD;
+ refcount_inc(&tsk->rcu_users);
+ do_task_dead();
+ }
+
+ do_exit(signr);
}
-EXPORT_SYMBOL(complete_and_exit);
SYSCALL_DEFINE1(exit, int, error_code)
{
@@ -907,17 +911,19 @@ do_group_exit(int exit_code)
{
struct signal_struct *sig = current->signal;
- BUG_ON(exit_code & 0x80); /* core dumps don't get here */
-
- if (signal_group_exit(sig))
+ if (sig->flags & SIGNAL_GROUP_EXIT)
exit_code = sig->group_exit_code;
+ else if (sig->group_exec_task)
+ exit_code = 0;
else if (!thread_group_empty(current)) {
struct sighand_struct *const sighand = current->sighand;
spin_lock_irq(&sighand->siglock);
- if (signal_group_exit(sig))
+ if (sig->flags & SIGNAL_GROUP_EXIT)
/* Another thread got here before we took the lock. */
exit_code = sig->group_exit_code;
+ else if (sig->group_exec_task)
+ exit_code = 0;
else {
sig->group_exit_code = exit_code;
sig->flags = SIGNAL_GROUP_EXIT;
@@ -1012,7 +1018,8 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
return 0;
if (unlikely(wo->wo_flags & WNOWAIT)) {
- status = p->exit_code;
+ status = (p->signal->flags & SIGNAL_GROUP_EXIT)
+ ? p->signal->group_exit_code : p->exit_code;
get_task_struct(p);
read_unlock(&tasklist_lock);
sched_annotate_sleep();
diff --git a/kernel/fork.c b/kernel/fork.c
index 3244cc56b697..a024bf6254df 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -42,6 +42,7 @@
#include <linux/mmu_notifier.h>
#include <linux/fs.h>
#include <linux/mm.h>
+#include <linux/mm_inline.h>
#include <linux/vmacache.h>
#include <linux/nsproxy.h>
#include <linux/capability.h>
@@ -365,12 +366,14 @@ struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig)
*new = data_race(*orig);
INIT_LIST_HEAD(&new->anon_vma_chain);
new->vm_next = new->vm_prev = NULL;
+ dup_vma_anon_name(orig, new);
}
return new;
}
void vm_area_free(struct vm_area_struct *vma)
{
+ free_vma_anon_name(vma);
kmem_cache_free(vm_area_cachep, vma);
}
@@ -754,9 +757,7 @@ void __put_task_struct(struct task_struct *tsk)
delayacct_tsk_free(tsk);
put_signal_struct(tsk->signal);
sched_core_free(tsk);
-
- if (!profile_handoff_task(tsk))
- free_task(tsk);
+ free_task(tsk);
}
EXPORT_SYMBOL_GPL(__put_task_struct);
@@ -950,7 +951,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
tsk->splice_pipe = NULL;
tsk->task_frag.page = NULL;
tsk->wake_q.next = NULL;
- tsk->pf_io_worker = NULL;
+ tsk->worker_private = NULL;
account_kernel_stack(tsk, 1);
@@ -1556,32 +1557,6 @@ out:
return error;
}
-static int copy_io(unsigned long clone_flags, struct task_struct *tsk)
-{
-#ifdef CONFIG_BLOCK
- struct io_context *ioc = current->io_context;
- struct io_context *new_ioc;
-
- if (!ioc)
- return 0;
- /*
- * Share io context with parent, if CLONE_IO is set
- */
- if (clone_flags & CLONE_IO) {
- ioc_task_link(ioc);
- tsk->io_context = ioc;
- } else if (ioprio_valid(ioc->ioprio)) {
- new_ioc = get_task_io_context(tsk, GFP_KERNEL, NUMA_NO_NODE);
- if (unlikely(!new_ioc))
- return -ENOMEM;
-
- new_ioc->ioprio = ioc->ioprio;
- put_io_context(new_ioc);
- }
-#endif
- return 0;
-}
-
static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk)
{
struct sighand_struct *sig;
@@ -2032,12 +2007,6 @@ static __latent_entropy struct task_struct *copy_process(
siginitsetinv(&p->blocked, sigmask(SIGKILL)|sigmask(SIGSTOP));
}
- /*
- * This _must_ happen before we call free_task(), i.e. before we jump
- * to any of the bad_fork_* labels. This is to avoid freeing
- * p->set_child_tid which is (ab)used as a kthread's data pointer for
- * kernel threads (PF_KTHREAD).
- */
p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? args->child_tid : NULL;
/*
* Clear TID on mm_release()?
@@ -2052,18 +2021,18 @@ static __latent_entropy struct task_struct *copy_process(
#ifdef CONFIG_PROVE_LOCKING
DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
#endif
+ retval = copy_creds(p, clone_flags);
+ if (retval < 0)
+ goto bad_fork_free;
+
retval = -EAGAIN;
if (is_ucounts_overlimit(task_ucounts(p), UCOUNT_RLIMIT_NPROC, rlimit(RLIMIT_NPROC))) {
if (p->real_cred->user != INIT_USER &&
!capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN))
- goto bad_fork_free;
+ goto bad_fork_cleanup_count;
}
current->flags &= ~PF_NPROC_EXCEEDED;
- retval = copy_creds(p, clone_flags);
- if (retval < 0)
- goto bad_fork_free;
-
/*
* If multiple threads are within copy_process(), then this check
* triggers too late. This doesn't hurt, the check is only there
@@ -2118,12 +2087,16 @@ static __latent_entropy struct task_struct *copy_process(
p->io_context = NULL;
audit_set_context(p, NULL);
cgroup_fork(p);
+ if (p->flags & PF_KTHREAD) {
+ if (!set_kthread_struct(p))
+ goto bad_fork_cleanup_delayacct;
+ }
#ifdef CONFIG_NUMA
p->mempolicy = mpol_dup(p->mempolicy);
if (IS_ERR(p->mempolicy)) {
retval = PTR_ERR(p->mempolicy);
p->mempolicy = NULL;
- goto bad_fork_cleanup_threadgroup_lock;
+ goto bad_fork_cleanup_delayacct;
}
#endif
#ifdef CONFIG_CPUSETS
@@ -2294,6 +2267,17 @@ static __latent_entropy struct task_struct *copy_process(
goto bad_fork_put_pidfd;
/*
+ * Now that the cgroups are pinned, re-clone the parent cgroup and put
+ * the new task on the correct runqueue. All this *before* the task
+ * becomes visible.
+ *
+ * This isn't part of ->can_fork() because while the re-cloning is
+ * cgroup specific, it unconditionally needs to place the task on a
+ * runqueue.
+ */
+ sched_cgroup_fork(p, args);
+
+ /*
* From this point on we must avoid any synchronous user-space
* communication until we take the tasklist-lock. In particular, we do
* not want user-space to be able to predict the process start-time by
@@ -2350,10 +2334,6 @@ static __latent_entropy struct task_struct *copy_process(
goto bad_fork_cancel_cgroup;
}
- /* past the last point of failure */
- if (pidfile)
- fd_install(pidfd, pidfile);
-
init_task_pid_links(p);
if (likely(p->pid)) {
ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);
@@ -2402,8 +2382,11 @@ static __latent_entropy struct task_struct *copy_process(
syscall_tracepoint_update(p);
write_unlock_irq(&tasklist_lock);
+ if (pidfile)
+ fd_install(pidfd, pidfile);
+
proc_fork_connector(p);
- sched_post_fork(p, args);
+ sched_post_fork(p);
cgroup_post_fork(p, args);
perf_event_fork(p);
@@ -2460,8 +2443,8 @@ bad_fork_cleanup_policy:
lockdep_free_task(p);
#ifdef CONFIG_NUMA
mpol_put(p->mempolicy);
-bad_fork_cleanup_threadgroup_lock:
#endif
+bad_fork_cleanup_delayacct:
delayacct_tsk_free(p);
bad_fork_cleanup_count:
dec_rlimit_ucounts(task_ucounts(p), UCOUNT_RLIMIT_NPROC, 1);
diff --git a/kernel/futex/core.c b/kernel/futex/core.c
index 926c2bb752bc..51dd822a8060 100644
--- a/kernel/futex/core.c
+++ b/kernel/futex/core.c
@@ -1031,7 +1031,7 @@ static void futex_cleanup(struct task_struct *tsk)
* actually finished the futex cleanup. The worst case for this is that the
* waiter runs through the wait loop until the state becomes visible.
*
- * This is called from the recursive fault handling path in do_exit().
+ * This is called from the recursive fault handling path in make_task_dead().
*
* This is best effort. Either the futex exit code has run already or
* not. If the OWNER_DIED bit has been set on the futex then the waiter can
diff --git a/kernel/gcov/Kconfig b/kernel/gcov/Kconfig
index 053447183ac5..04f4ebdc3cf5 100644
--- a/kernel/gcov/Kconfig
+++ b/kernel/gcov/Kconfig
@@ -4,7 +4,6 @@ menu "GCOV-based kernel profiling"
config GCOV_KERNEL
bool "Enable gcov-based kernel profiling"
depends on DEBUG_FS
- depends on !CC_IS_CLANG || CLANG_VERSION >= 110000
depends on !ARCH_WANTS_NO_INSTR || CC_HAS_NO_PROFILE_FN_ATTR
select CONSTRUCTORS
default n
diff --git a/kernel/hung_task.c b/kernel/hung_task.c
index 9888e2bc8c76..52501e5f7655 100644
--- a/kernel/hung_task.c
+++ b/kernel/hung_task.c
@@ -63,7 +63,9 @@ static struct task_struct *watchdog_task;
* Should we dump all CPUs backtraces in a hung task event?
* Defaults to 0, can be changed via sysctl.
*/
-unsigned int __read_mostly sysctl_hung_task_all_cpu_backtrace;
+static unsigned int __read_mostly sysctl_hung_task_all_cpu_backtrace;
+#else
+#define sysctl_hung_task_all_cpu_backtrace 0
#endif /* CONFIG_SMP */
/*
@@ -222,11 +224,13 @@ static long hung_timeout_jiffies(unsigned long last_checked,
MAX_SCHEDULE_TIMEOUT;
}
+#ifdef CONFIG_SYSCTL
/*
* Process updating of timeout sysctl
*/
-int proc_dohung_task_timeout_secs(struct ctl_table *table, int write,
- void *buffer, size_t *lenp, loff_t *ppos)
+static int proc_dohung_task_timeout_secs(struct ctl_table *table, int write,
+ void __user *buffer,
+ size_t *lenp, loff_t *ppos)
{
int ret;
@@ -241,6 +245,76 @@ int proc_dohung_task_timeout_secs(struct ctl_table *table, int write,
return ret;
}
+/*
+ * This is needed for proc_doulongvec_minmax of sysctl_hung_task_timeout_secs
+ * and hung_task_check_interval_secs
+ */
+static const unsigned long hung_task_timeout_max = (LONG_MAX / HZ);
+static struct ctl_table hung_task_sysctls[] = {
+#ifdef CONFIG_SMP
+ {
+ .procname = "hung_task_all_cpu_backtrace",
+ .data = &sysctl_hung_task_all_cpu_backtrace,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE,
+ },
+#endif /* CONFIG_SMP */
+ {
+ .procname = "hung_task_panic",
+ .data = &sysctl_hung_task_panic,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE,
+ },
+ {
+ .procname = "hung_task_check_count",
+ .data = &sysctl_hung_task_check_count,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ },
+ {
+ .procname = "hung_task_timeout_secs",
+ .data = &sysctl_hung_task_timeout_secs,
+ .maxlen = sizeof(unsigned long),
+ .mode = 0644,
+ .proc_handler = proc_dohung_task_timeout_secs,
+ .extra2 = (void *)&hung_task_timeout_max,
+ },
+ {
+ .procname = "hung_task_check_interval_secs",
+ .data = &sysctl_hung_task_check_interval_secs,
+ .maxlen = sizeof(unsigned long),
+ .mode = 0644,
+ .proc_handler = proc_dohung_task_timeout_secs,
+ .extra2 = (void *)&hung_task_timeout_max,
+ },
+ {
+ .procname = "hung_task_warnings",
+ .data = &sysctl_hung_task_warnings,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = SYSCTL_NEG_ONE,
+ },
+ {}
+};
+
+static void __init hung_task_sysctl_init(void)
+{
+ register_sysctl_init("kernel", hung_task_sysctls);
+}
+#else
+#define hung_task_sysctl_init() do { } while (0)
+#endif /* CONFIG_SYSCTL */
+
+
static atomic_t reset_hung_task = ATOMIC_INIT(0);
void reset_hung_task_detector(void)
@@ -310,6 +384,7 @@ static int __init hung_task_init(void)
pm_notifier(hungtask_pm_notify, 0);
watchdog_task = kthread_run(watchdog, NULL, "khungtaskd");
+ hung_task_sysctl_init();
return 0;
}
diff --git a/kernel/irq/generic-chip.c b/kernel/irq/generic-chip.c
index 6f29bf4c8515..f0862eb6b506 100644
--- a/kernel/irq/generic-chip.c
+++ b/kernel/irq/generic-chip.c
@@ -451,7 +451,7 @@ static void irq_unmap_generic_chip(struct irq_domain *d, unsigned int virq)
}
-struct irq_domain_ops irq_generic_chip_ops = {
+const struct irq_domain_ops irq_generic_chip_ops = {
.map = irq_map_generic_chip,
.unmap = irq_unmap_generic_chip,
.xlate = irq_domain_xlate_onetwocell,
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 7405e384e5ed..f23ffd30385b 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -486,7 +486,8 @@ int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
}
EXPORT_SYMBOL_GPL(irq_force_affinity);
-int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
+int __irq_apply_affinity_hint(unsigned int irq, const struct cpumask *m,
+ bool setaffinity)
{
unsigned long flags;
struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
@@ -495,12 +496,11 @@ int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
return -EINVAL;
desc->affinity_hint = m;
irq_put_desc_unlock(desc, flags);
- /* set the initial affinity to prevent every interrupt being on CPU0 */
- if (m)
+ if (m && setaffinity)
__irq_set_affinity(irq, m, false);
return 0;
}
-EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
+EXPORT_SYMBOL_GPL(__irq_apply_affinity_hint);
static void irq_affinity_notify(struct work_struct *work)
{
diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c
index 7f350ae59c5f..2bdfce5edafd 100644
--- a/kernel/irq/msi.c
+++ b/kernel/irq/msi.c
@@ -14,12 +14,15 @@
#include <linux/irqdomain.h>
#include <linux/msi.h>
#include <linux/slab.h>
+#include <linux/sysfs.h>
#include <linux/pci.h>
#include "internals.h"
+static inline int msi_sysfs_create_group(struct device *dev);
+
/**
- * alloc_msi_entry - Allocate an initialized msi_desc
+ * msi_alloc_desc - Allocate an initialized msi_desc
* @dev: Pointer to the device for which this is allocated
* @nvec: The number of vectors used in this entry
* @affinity: Optional pointer to an affinity mask array size of @nvec
@@ -29,34 +32,134 @@
*
* Return: pointer to allocated &msi_desc on success or %NULL on failure
*/
-struct msi_desc *alloc_msi_entry(struct device *dev, int nvec,
- const struct irq_affinity_desc *affinity)
+static struct msi_desc *msi_alloc_desc(struct device *dev, int nvec,
+ const struct irq_affinity_desc *affinity)
{
- struct msi_desc *desc;
+ struct msi_desc *desc = kzalloc(sizeof(*desc), GFP_KERNEL);
- desc = kzalloc(sizeof(*desc), GFP_KERNEL);
if (!desc)
return NULL;
- INIT_LIST_HEAD(&desc->list);
desc->dev = dev;
desc->nvec_used = nvec;
if (affinity) {
- desc->affinity = kmemdup(affinity,
- nvec * sizeof(*desc->affinity), GFP_KERNEL);
+ desc->affinity = kmemdup(affinity, nvec * sizeof(*desc->affinity), GFP_KERNEL);
if (!desc->affinity) {
kfree(desc);
return NULL;
}
}
-
return desc;
}
-void free_msi_entry(struct msi_desc *entry)
+static void msi_free_desc(struct msi_desc *desc)
{
- kfree(entry->affinity);
- kfree(entry);
+ kfree(desc->affinity);
+ kfree(desc);
+}
+
+static int msi_insert_desc(struct msi_device_data *md, struct msi_desc *desc, unsigned int index)
+{
+ int ret;
+
+ desc->msi_index = index;
+ ret = xa_insert(&md->__store, index, desc, GFP_KERNEL);
+ if (ret)
+ msi_free_desc(desc);
+ return ret;
+}
+
+/**
+ * msi_add_msi_desc - Allocate and initialize a MSI descriptor
+ * @dev: Pointer to the device for which the descriptor is allocated
+ * @init_desc: Pointer to an MSI descriptor to initialize the new descriptor
+ *
+ * Return: 0 on success or an appropriate failure code.
+ */
+int msi_add_msi_desc(struct device *dev, struct msi_desc *init_desc)
+{
+ struct msi_desc *desc;
+
+ lockdep_assert_held(&dev->msi.data->mutex);
+
+ desc = msi_alloc_desc(dev, init_desc->nvec_used, init_desc->affinity);
+ if (!desc)
+ return -ENOMEM;
+
+ /* Copy type specific data to the new descriptor. */
+ desc->pci = init_desc->pci;
+ return msi_insert_desc(dev->msi.data, desc, init_desc->msi_index);
+}
+
+/**
+ * msi_add_simple_msi_descs - Allocate and initialize MSI descriptors
+ * @dev: Pointer to the device for which the descriptors are allocated
+ * @index: Index for the first MSI descriptor
+ * @ndesc: Number of descriptors to allocate
+ *
+ * Return: 0 on success or an appropriate failure code.
+ */
+static int msi_add_simple_msi_descs(struct device *dev, unsigned int index, unsigned int ndesc)
+{
+ unsigned int idx, last = index + ndesc - 1;
+ struct msi_desc *desc;
+ int ret;
+
+ lockdep_assert_held(&dev->msi.data->mutex);
+
+ for (idx = index; idx <= last; idx++) {
+ desc = msi_alloc_desc(dev, 1, NULL);
+ if (!desc)
+ goto fail_mem;
+ ret = msi_insert_desc(dev->msi.data, desc, idx);
+ if (ret)
+ goto fail;
+ }
+ return 0;
+
+fail_mem:
+ ret = -ENOMEM;
+fail:
+ msi_free_msi_descs_range(dev, MSI_DESC_NOTASSOCIATED, index, last);
+ return ret;
+}
+
+static bool msi_desc_match(struct msi_desc *desc, enum msi_desc_filter filter)
+{
+ switch (filter) {
+ case MSI_DESC_ALL:
+ return true;
+ case MSI_DESC_NOTASSOCIATED:
+ return !desc->irq;
+ case MSI_DESC_ASSOCIATED:
+ return !!desc->irq;
+ }
+ WARN_ON_ONCE(1);
+ return false;
+}
+
+/**
+ * msi_free_msi_descs_range - Free MSI descriptors of a device
+ * @dev: Device to free the descriptors
+ * @filter: Descriptor state filter
+ * @first_index: Index to start freeing from
+ * @last_index: Last index to be freed
+ */
+void msi_free_msi_descs_range(struct device *dev, enum msi_desc_filter filter,
+ unsigned int first_index, unsigned int last_index)
+{
+ struct xarray *xa = &dev->msi.data->__store;
+ struct msi_desc *desc;
+ unsigned long idx;
+
+ lockdep_assert_held(&dev->msi.data->mutex);
+
+ xa_for_each_range(xa, idx, desc, first_index, last_index) {
+ if (msi_desc_match(desc, filter)) {
+ xa_erase(xa, idx);
+ msi_free_desc(desc);
+ }
+ }
}
void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
@@ -72,139 +175,290 @@ void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg)
}
EXPORT_SYMBOL_GPL(get_cached_msi_msg);
-static ssize_t msi_mode_show(struct device *dev, struct device_attribute *attr,
- char *buf)
+static void msi_device_data_release(struct device *dev, void *res)
{
- struct msi_desc *entry;
- bool is_msix = false;
- unsigned long irq;
- int retval;
+ struct msi_device_data *md = res;
- retval = kstrtoul(attr->attr.name, 10, &irq);
- if (retval)
- return retval;
+ WARN_ON_ONCE(!xa_empty(&md->__store));
+ xa_destroy(&md->__store);
+ dev->msi.data = NULL;
+}
- entry = irq_get_msi_desc(irq);
- if (!entry)
- return -ENODEV;
+/**
+ * msi_setup_device_data - Setup MSI device data
+ * @dev: Device for which MSI device data should be set up
+ *
+ * Return: 0 on success, appropriate error code otherwise
+ *
+ * This can be called more than once for @dev. If the MSI device data is
+ * already allocated the call succeeds. The allocated memory is
+ * automatically released when the device is destroyed.
+ */
+int msi_setup_device_data(struct device *dev)
+{
+ struct msi_device_data *md;
+ int ret;
- if (dev_is_pci(dev))
- is_msix = entry->msi_attrib.is_msix;
+ if (dev->msi.data)
+ return 0;
- return sysfs_emit(buf, "%s\n", is_msix ? "msix" : "msi");
+ md = devres_alloc(msi_device_data_release, sizeof(*md), GFP_KERNEL);
+ if (!md)
+ return -ENOMEM;
+
+ ret = msi_sysfs_create_group(dev);
+ if (ret) {
+ devres_free(md);
+ return ret;
+ }
+
+ xa_init(&md->__store);
+ mutex_init(&md->mutex);
+ dev->msi.data = md;
+ devres_add(dev, md);
+ return 0;
+}
+
+/**
+ * msi_lock_descs - Lock the MSI descriptor storage of a device
+ * @dev: Device to operate on
+ */
+void msi_lock_descs(struct device *dev)
+{
+ mutex_lock(&dev->msi.data->mutex);
+}
+EXPORT_SYMBOL_GPL(msi_lock_descs);
+
+/**
+ * msi_unlock_descs - Unlock the MSI descriptor storage of a device
+ * @dev: Device to operate on
+ */
+void msi_unlock_descs(struct device *dev)
+{
+ /* Invalidate the index wich was cached by the iterator */
+ dev->msi.data->__iter_idx = MSI_MAX_INDEX;
+ mutex_unlock(&dev->msi.data->mutex);
+}
+EXPORT_SYMBOL_GPL(msi_unlock_descs);
+
+static struct msi_desc *msi_find_desc(struct msi_device_data *md, enum msi_desc_filter filter)
+{
+ struct msi_desc *desc;
+
+ xa_for_each_start(&md->__store, md->__iter_idx, desc, md->__iter_idx) {
+ if (msi_desc_match(desc, filter))
+ return desc;
+ }
+ md->__iter_idx = MSI_MAX_INDEX;
+ return NULL;
}
/**
- * msi_populate_sysfs - Populate msi_irqs sysfs entries for devices
- * @dev: The device(PCI, platform etc) who will get sysfs entries
+ * msi_first_desc - Get the first MSI descriptor of a device
+ * @dev: Device to operate on
+ * @filter: Descriptor state filter
+ *
+ * Must be called with the MSI descriptor mutex held, i.e. msi_lock_descs()
+ * must be invoked before the call.
*
- * Return attribute_group ** so that specific bus MSI can save it to
- * somewhere during initilizing msi irqs. If devices has no MSI irq,
- * return NULL; if it fails to populate sysfs, return ERR_PTR
+ * Return: Pointer to the first MSI descriptor matching the search
+ * criteria, NULL if none found.
*/
-const struct attribute_group **msi_populate_sysfs(struct device *dev)
-{
- const struct attribute_group **msi_irq_groups;
- struct attribute **msi_attrs, *msi_attr;
- struct device_attribute *msi_dev_attr;
- struct attribute_group *msi_irq_group;
- struct msi_desc *entry;
- int ret = -ENOMEM;
- int num_msi = 0;
- int count = 0;
- int i;
+struct msi_desc *msi_first_desc(struct device *dev, enum msi_desc_filter filter)
+{
+ struct msi_device_data *md = dev->msi.data;
- /* Determine how many msi entries we have */
- for_each_msi_entry(entry, dev)
- num_msi += entry->nvec_used;
- if (!num_msi)
+ if (WARN_ON_ONCE(!md))
return NULL;
- /* Dynamically create the MSI attributes for the device */
- msi_attrs = kcalloc(num_msi + 1, sizeof(void *), GFP_KERNEL);
- if (!msi_attrs)
- return ERR_PTR(-ENOMEM);
-
- for_each_msi_entry(entry, dev) {
- for (i = 0; i < entry->nvec_used; i++) {
- msi_dev_attr = kzalloc(sizeof(*msi_dev_attr), GFP_KERNEL);
- if (!msi_dev_attr)
- goto error_attrs;
- msi_attrs[count] = &msi_dev_attr->attr;
-
- sysfs_attr_init(&msi_dev_attr->attr);
- msi_dev_attr->attr.name = kasprintf(GFP_KERNEL, "%d",
- entry->irq + i);
- if (!msi_dev_attr->attr.name)
- goto error_attrs;
- msi_dev_attr->attr.mode = 0444;
- msi_dev_attr->show = msi_mode_show;
- ++count;
+ lockdep_assert_held(&md->mutex);
+
+ md->__iter_idx = 0;
+ return msi_find_desc(md, filter);
+}
+EXPORT_SYMBOL_GPL(msi_first_desc);
+
+/**
+ * msi_next_desc - Get the next MSI descriptor of a device
+ * @dev: Device to operate on
+ *
+ * The first invocation of msi_next_desc() has to be preceeded by a
+ * successful invocation of __msi_first_desc(). Consecutive invocations are
+ * only valid if the previous one was successful. All these operations have
+ * to be done within the same MSI mutex held region.
+ *
+ * Return: Pointer to the next MSI descriptor matching the search
+ * criteria, NULL if none found.
+ */
+struct msi_desc *msi_next_desc(struct device *dev, enum msi_desc_filter filter)
+{
+ struct msi_device_data *md = dev->msi.data;
+
+ if (WARN_ON_ONCE(!md))
+ return NULL;
+
+ lockdep_assert_held(&md->mutex);
+
+ if (md->__iter_idx >= (unsigned long)MSI_MAX_INDEX)
+ return NULL;
+
+ md->__iter_idx++;
+ return msi_find_desc(md, filter);
+}
+EXPORT_SYMBOL_GPL(msi_next_desc);
+
+/**
+ * msi_get_virq - Return Linux interrupt number of a MSI interrupt
+ * @dev: Device to operate on
+ * @index: MSI interrupt index to look for (0-based)
+ *
+ * Return: The Linux interrupt number on success (> 0), 0 if not found
+ */
+unsigned int msi_get_virq(struct device *dev, unsigned int index)
+{
+ struct msi_desc *desc;
+ unsigned int ret = 0;
+ bool pcimsi;
+
+ if (!dev->msi.data)
+ return 0;
+
+ pcimsi = dev_is_pci(dev) ? to_pci_dev(dev)->msi_enabled : false;
+
+ msi_lock_descs(dev);
+ desc = xa_load(&dev->msi.data->__store, pcimsi ? 0 : index);
+ if (desc && desc->irq) {
+ /*
+ * PCI-MSI has only one descriptor for multiple interrupts.
+ * PCI-MSIX and platform MSI use a descriptor per
+ * interrupt.
+ */
+ if (pcimsi) {
+ if (index < desc->nvec_used)
+ ret = desc->irq + index;
+ } else {
+ ret = desc->irq;
}
}
+ msi_unlock_descs(dev);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(msi_get_virq);
- msi_irq_group = kzalloc(sizeof(*msi_irq_group), GFP_KERNEL);
- if (!msi_irq_group)
- goto error_attrs;
- msi_irq_group->name = "msi_irqs";
- msi_irq_group->attrs = msi_attrs;
+#ifdef CONFIG_SYSFS
+static struct attribute *msi_dev_attrs[] = {
+ NULL
+};
- msi_irq_groups = kcalloc(2, sizeof(void *), GFP_KERNEL);
- if (!msi_irq_groups)
- goto error_irq_group;
- msi_irq_groups[0] = msi_irq_group;
+static const struct attribute_group msi_irqs_group = {
+ .name = "msi_irqs",
+ .attrs = msi_dev_attrs,
+};
- ret = sysfs_create_groups(&dev->kobj, msi_irq_groups);
- if (ret)
- goto error_irq_groups;
-
- return msi_irq_groups;
-
-error_irq_groups:
- kfree(msi_irq_groups);
-error_irq_group:
- kfree(msi_irq_group);
-error_attrs:
- count = 0;
- msi_attr = msi_attrs[count];
- while (msi_attr) {
- msi_dev_attr = container_of(msi_attr, struct device_attribute, attr);
- kfree(msi_attr->name);
- kfree(msi_dev_attr);
- ++count;
- msi_attr = msi_attrs[count];
+static inline int msi_sysfs_create_group(struct device *dev)
+{
+ return devm_device_add_group(dev, &msi_irqs_group);
+}
+
+static ssize_t msi_mode_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ /* MSI vs. MSIX is per device not per interrupt */
+ bool is_msix = dev_is_pci(dev) ? to_pci_dev(dev)->msix_enabled : false;
+
+ return sysfs_emit(buf, "%s\n", is_msix ? "msix" : "msi");
+}
+
+static void msi_sysfs_remove_desc(struct device *dev, struct msi_desc *desc)
+{
+ struct device_attribute *attrs = desc->sysfs_attrs;
+ int i;
+
+ if (!attrs)
+ return;
+
+ desc->sysfs_attrs = NULL;
+ for (i = 0; i < desc->nvec_used; i++) {
+ if (attrs[i].show)
+ sysfs_remove_file_from_group(&dev->kobj, &attrs[i].attr, msi_irqs_group.name);
+ kfree(attrs[i].attr.name);
}
- kfree(msi_attrs);
- return ERR_PTR(ret);
+ kfree(attrs);
}
+static int msi_sysfs_populate_desc(struct device *dev, struct msi_desc *desc)
+{
+ struct device_attribute *attrs;
+ int ret, i;
+
+ attrs = kcalloc(desc->nvec_used, sizeof(*attrs), GFP_KERNEL);
+ if (!attrs)
+ return -ENOMEM;
+
+ desc->sysfs_attrs = attrs;
+ for (i = 0; i < desc->nvec_used; i++) {
+ sysfs_attr_init(&attrs[i].attr);
+ attrs[i].attr.name = kasprintf(GFP_KERNEL, "%d", desc->irq + i);
+ if (!attrs[i].attr.name) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ attrs[i].attr.mode = 0444;
+ attrs[i].show = msi_mode_show;
+
+ ret = sysfs_add_file_to_group(&dev->kobj, &attrs[i].attr, msi_irqs_group.name);
+ if (ret) {
+ attrs[i].show = NULL;
+ goto fail;
+ }
+ }
+ return 0;
+
+fail:
+ msi_sysfs_remove_desc(dev, desc);
+ return ret;
+}
+
+#ifdef CONFIG_PCI_MSI_ARCH_FALLBACKS
/**
- * msi_destroy_sysfs - Destroy msi_irqs sysfs entries for devices
- * @dev: The device(PCI, platform etc) who will remove sysfs entries
- * @msi_irq_groups: attribute_group for device msi_irqs entries
+ * msi_device_populate_sysfs - Populate msi_irqs sysfs entries for a device
+ * @dev: The device (PCI, platform etc) which will get sysfs entries
*/
-void msi_destroy_sysfs(struct device *dev, const struct attribute_group **msi_irq_groups)
-{
- struct device_attribute *dev_attr;
- struct attribute **msi_attrs;
- int count = 0;
-
- if (msi_irq_groups) {
- sysfs_remove_groups(&dev->kobj, msi_irq_groups);
- msi_attrs = msi_irq_groups[0]->attrs;
- while (msi_attrs[count]) {
- dev_attr = container_of(msi_attrs[count],
- struct device_attribute, attr);
- kfree(dev_attr->attr.name);
- kfree(dev_attr);
- ++count;
- }
- kfree(msi_attrs);
- kfree(msi_irq_groups[0]);
- kfree(msi_irq_groups);
+int msi_device_populate_sysfs(struct device *dev)
+{
+ struct msi_desc *desc;
+ int ret;
+
+ msi_for_each_desc(desc, dev, MSI_DESC_ASSOCIATED) {
+ if (desc->sysfs_attrs)
+ continue;
+ ret = msi_sysfs_populate_desc(dev, desc);
+ if (ret)
+ return ret;
}
+ return 0;
}
+/**
+ * msi_device_destroy_sysfs - Destroy msi_irqs sysfs entries for a device
+ * @dev: The device (PCI, platform etc) for which to remove
+ * sysfs entries
+ */
+void msi_device_destroy_sysfs(struct device *dev)
+{
+ struct msi_desc *desc;
+
+ msi_for_each_desc(desc, dev, MSI_DESC_ALL)
+ msi_sysfs_remove_desc(dev, desc);
+}
+#endif /* CONFIG_PCI_MSI_ARCH_FALLBACK */
+#else /* CONFIG_SYSFS */
+static inline int msi_sysfs_create_group(struct device *dev) { return 0; }
+static inline int msi_sysfs_populate_desc(struct device *dev, struct msi_desc *desc) { return 0; }
+static inline void msi_sysfs_remove_desc(struct device *dev, struct msi_desc *desc) { }
+#endif /* !CONFIG_SYSFS */
+
#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
static inline void irq_chip_write_msi_msg(struct irq_data *data,
struct msi_msg *msg)
@@ -456,43 +710,38 @@ int msi_domain_prepare_irqs(struct irq_domain *domain, struct device *dev,
}
int msi_domain_populate_irqs(struct irq_domain *domain, struct device *dev,
- int virq, int nvec, msi_alloc_info_t *arg)
+ int virq_base, int nvec, msi_alloc_info_t *arg)
{
struct msi_domain_info *info = domain->host_data;
struct msi_domain_ops *ops = info->ops;
struct msi_desc *desc;
- int ret = 0;
+ int ret, virq;
- for_each_msi_entry(desc, dev) {
- /* Don't even try the multi-MSI brain damage. */
- if (WARN_ON(!desc->irq || desc->nvec_used != 1)) {
- ret = -EINVAL;
- break;
- }
+ msi_lock_descs(dev);
+ ret = msi_add_simple_msi_descs(dev, virq_base, nvec);
+ if (ret)
+ goto unlock;
- if (!(desc->irq >= virq && desc->irq < (virq + nvec)))
- continue;
+ for (virq = virq_base; virq < virq_base + nvec; virq++) {
+ desc = xa_load(&dev->msi.data->__store, virq);
+ desc->irq = virq;
ops->set_desc(arg, desc);
- /* Assumes the domain mutex is held! */
- ret = irq_domain_alloc_irqs_hierarchy(domain, desc->irq, 1,
- arg);
+ ret = irq_domain_alloc_irqs_hierarchy(domain, virq, 1, arg);
if (ret)
- break;
+ goto fail;
- irq_set_msi_desc_off(desc->irq, 0, desc);
- }
-
- if (ret) {
- /* Mop up the damage */
- for_each_msi_entry(desc, dev) {
- if (!(desc->irq >= virq && desc->irq < (virq + nvec)))
- continue;
-
- irq_domain_free_irqs_common(domain, desc->irq, 1);
- }
+ irq_set_msi_desc(virq, desc);
}
+ msi_unlock_descs(dev);
+ return 0;
+fail:
+ for (--virq; virq >= virq_base; virq--)
+ irq_domain_free_irqs_common(domain, virq, 1);
+ msi_free_msi_descs_range(dev, MSI_DESC_ALL, virq_base, virq_base + nvec - 1);
+unlock:
+ msi_unlock_descs(dev);
return ret;
}
@@ -531,8 +780,59 @@ static bool msi_check_reservation_mode(struct irq_domain *domain,
* Checking the first MSI descriptor is sufficient. MSIX supports
* masking and MSI does so when the can_mask attribute is set.
*/
- desc = first_msi_entry(dev);
- return desc->msi_attrib.is_msix || desc->msi_attrib.can_mask;
+ desc = msi_first_desc(dev, MSI_DESC_ALL);
+ return desc->pci.msi_attrib.is_msix || desc->pci.msi_attrib.can_mask;
+}
+
+static int msi_handle_pci_fail(struct irq_domain *domain, struct msi_desc *desc,
+ int allocated)
+{
+ switch(domain->bus_token) {
+ case DOMAIN_BUS_PCI_MSI:
+ case DOMAIN_BUS_VMD_MSI:
+ if (IS_ENABLED(CONFIG_PCI_MSI))
+ break;
+ fallthrough;
+ default:
+ return -ENOSPC;
+ }
+
+ /* Let a failed PCI multi MSI allocation retry */
+ if (desc->nvec_used > 1)
+ return 1;
+
+ /* If there was a successful allocation let the caller know */
+ return allocated ? allocated : -ENOSPC;
+}
+
+#define VIRQ_CAN_RESERVE 0x01
+#define VIRQ_ACTIVATE 0x02
+#define VIRQ_NOMASK_QUIRK 0x04
+
+static int msi_init_virq(struct irq_domain *domain, int virq, unsigned int vflags)
+{
+ struct irq_data *irqd = irq_domain_get_irq_data(domain, virq);
+ int ret;
+
+ if (!(vflags & VIRQ_CAN_RESERVE)) {
+ irqd_clr_can_reserve(irqd);
+ if (vflags & VIRQ_NOMASK_QUIRK)
+ irqd_set_msi_nomask_quirk(irqd);
+ }
+
+ if (!(vflags & VIRQ_ACTIVATE))
+ return 0;
+
+ ret = irq_domain_activate_irq(irqd, vflags & VIRQ_CAN_RESERVE);
+ if (ret)
+ return ret;
+ /*
+ * If the interrupt uses reservation mode, clear the activated bit
+ * so request_irq() will assign the final vector.
+ */
+ if (vflags & VIRQ_CAN_RESERVE)
+ irqd_clr_activated(irqd);
+ return 0;
}
int __msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
@@ -540,83 +840,103 @@ int __msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
{
struct msi_domain_info *info = domain->host_data;
struct msi_domain_ops *ops = info->ops;
- struct irq_data *irq_data;
- struct msi_desc *desc;
msi_alloc_info_t arg = { };
+ unsigned int vflags = 0;
+ struct msi_desc *desc;
+ int allocated = 0;
int i, ret, virq;
- bool can_reserve;
ret = msi_domain_prepare_irqs(domain, dev, nvec, &arg);
if (ret)
return ret;
- for_each_msi_entry(desc, dev) {
+ /*
+ * This flag is set by the PCI layer as we need to activate
+ * the MSI entries before the PCI layer enables MSI in the
+ * card. Otherwise the card latches a random msi message.
+ */
+ if (info->flags & MSI_FLAG_ACTIVATE_EARLY)
+ vflags |= VIRQ_ACTIVATE;
+
+ /*
+ * Interrupt can use a reserved vector and will not occupy
+ * a real device vector until the interrupt is requested.
+ */
+ if (msi_check_reservation_mode(domain, info, dev)) {
+ vflags |= VIRQ_CAN_RESERVE;
+ /*
+ * MSI affinity setting requires a special quirk (X86) when
+ * reservation mode is active.
+ */
+ if (domain->flags & IRQ_DOMAIN_MSI_NOMASK_QUIRK)
+ vflags |= VIRQ_NOMASK_QUIRK;
+ }
+
+ msi_for_each_desc(desc, dev, MSI_DESC_NOTASSOCIATED) {
ops->set_desc(&arg, desc);
virq = __irq_domain_alloc_irqs(domain, -1, desc->nvec_used,
dev_to_node(dev), &arg, false,
desc->affinity);
- if (virq < 0) {
- ret = -ENOSPC;
- if (ops->handle_error)
- ret = ops->handle_error(domain, desc, ret);
- if (ops->msi_finish)
- ops->msi_finish(&arg, ret);
- return ret;
- }
+ if (virq < 0)
+ return msi_handle_pci_fail(domain, desc, allocated);
for (i = 0; i < desc->nvec_used; i++) {
irq_set_msi_desc_off(virq, i, desc);
irq_debugfs_copy_devname(virq + i, dev);
+ ret = msi_init_virq(domain, virq + i, vflags);
+ if (ret)
+ return ret;
+ }
+ if (info->flags & MSI_FLAG_DEV_SYSFS) {
+ ret = msi_sysfs_populate_desc(dev, desc);
+ if (ret)
+ return ret;
}
+ allocated++;
}
+ return 0;
+}
- if (ops->msi_finish)
- ops->msi_finish(&arg, 0);
+static int msi_domain_add_simple_msi_descs(struct msi_domain_info *info,
+ struct device *dev,
+ unsigned int num_descs)
+{
+ if (!(info->flags & MSI_FLAG_ALLOC_SIMPLE_MSI_DESCS))
+ return 0;
- can_reserve = msi_check_reservation_mode(domain, info, dev);
+ return msi_add_simple_msi_descs(dev, 0, num_descs);
+}
- /*
- * This flag is set by the PCI layer as we need to activate
- * the MSI entries before the PCI layer enables MSI in the
- * card. Otherwise the card latches a random msi message.
- */
- if (!(info->flags & MSI_FLAG_ACTIVATE_EARLY))
- goto skip_activate;
-
- for_each_msi_vector(desc, i, dev) {
- if (desc->irq == i) {
- virq = desc->irq;
- dev_dbg(dev, "irq [%d-%d] for MSI\n",
- virq, virq + desc->nvec_used - 1);
- }
+/**
+ * msi_domain_alloc_irqs_descs_locked - Allocate interrupts from a MSI interrupt domain
+ * @domain: The domain to allocate from
+ * @dev: Pointer to device struct of the device for which the interrupts
+ * are allocated
+ * @nvec: The number of interrupts to allocate
+ *
+ * Must be invoked from within a msi_lock_descs() / msi_unlock_descs()
+ * pair. Use this for MSI irqdomains which implement their own vector
+ * allocation/free.
+ *
+ * Return: %0 on success or an error code.
+ */
+int msi_domain_alloc_irqs_descs_locked(struct irq_domain *domain, struct device *dev,
+ int nvec)
+{
+ struct msi_domain_info *info = domain->host_data;
+ struct msi_domain_ops *ops = info->ops;
+ int ret;
- irq_data = irq_domain_get_irq_data(domain, i);
- if (!can_reserve) {
- irqd_clr_can_reserve(irq_data);
- if (domain->flags & IRQ_DOMAIN_MSI_NOMASK_QUIRK)
- irqd_set_msi_nomask_quirk(irq_data);
- }
- ret = irq_domain_activate_irq(irq_data, can_reserve);
- if (ret)
- goto cleanup;
- }
+ lockdep_assert_held(&dev->msi.data->mutex);
-skip_activate:
- /*
- * If these interrupts use reservation mode, clear the activated bit
- * so request_irq() will assign the final vector.
- */
- if (can_reserve) {
- for_each_msi_vector(desc, i, dev) {
- irq_data = irq_domain_get_irq_data(domain, i);
- irqd_clr_activated(irq_data);
- }
- }
- return 0;
+ ret = msi_domain_add_simple_msi_descs(info, dev, nvec);
+ if (ret)
+ return ret;
-cleanup:
- msi_domain_free_irqs(domain, dev);
+ ret = ops->domain_alloc_irqs(domain, dev, nvec);
+ if (ret)
+ msi_domain_free_irqs_descs_locked(domain, dev);
return ret;
}
@@ -629,52 +949,78 @@ cleanup:
*
* Return: %0 on success or an error code.
*/
-int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
- int nvec)
+int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev, int nvec)
{
- struct msi_domain_info *info = domain->host_data;
- struct msi_domain_ops *ops = info->ops;
+ int ret;
- return ops->domain_alloc_irqs(domain, dev, nvec);
+ msi_lock_descs(dev);
+ ret = msi_domain_alloc_irqs_descs_locked(domain, dev, nvec);
+ msi_unlock_descs(dev);
+ return ret;
}
void __msi_domain_free_irqs(struct irq_domain *domain, struct device *dev)
{
- struct irq_data *irq_data;
+ struct msi_domain_info *info = domain->host_data;
+ struct irq_data *irqd;
struct msi_desc *desc;
int i;
- for_each_msi_vector(desc, i, dev) {
- irq_data = irq_domain_get_irq_data(domain, i);
- if (irqd_is_activated(irq_data))
- irq_domain_deactivate_irq(irq_data);
- }
-
- for_each_msi_entry(desc, dev) {
- /*
- * We might have failed to allocate an MSI early
- * enough that there is no IRQ associated to this
- * entry. If that's the case, don't do anything.
- */
- if (desc->irq) {
- irq_domain_free_irqs(desc->irq, desc->nvec_used);
- desc->irq = 0;
+ /* Only handle MSI entries which have an interrupt associated */
+ msi_for_each_desc(desc, dev, MSI_DESC_ASSOCIATED) {
+ /* Make sure all interrupts are deactivated */
+ for (i = 0; i < desc->nvec_used; i++) {
+ irqd = irq_domain_get_irq_data(domain, desc->irq + i);
+ if (irqd && irqd_is_activated(irqd))
+ irq_domain_deactivate_irq(irqd);
}
+
+ irq_domain_free_irqs(desc->irq, desc->nvec_used);
+ if (info->flags & MSI_FLAG_DEV_SYSFS)
+ msi_sysfs_remove_desc(dev, desc);
+ desc->irq = 0;
}
}
+static void msi_domain_free_msi_descs(struct msi_domain_info *info,
+ struct device *dev)
+{
+ if (info->flags & MSI_FLAG_FREE_MSI_DESCS)
+ msi_free_msi_descs(dev);
+}
+
/**
- * msi_domain_free_irqs - Free interrupts from a MSI interrupt @domain associated to @dev
+ * msi_domain_free_irqs_descs_locked - Free interrupts from a MSI interrupt @domain associated to @dev
* @domain: The domain to managing the interrupts
* @dev: Pointer to device struct of the device for which the interrupts
* are free
+ *
+ * Must be invoked from within a msi_lock_descs() / msi_unlock_descs()
+ * pair. Use this for MSI irqdomains which implement their own vector
+ * allocation.
*/
-void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev)
+void msi_domain_free_irqs_descs_locked(struct irq_domain *domain, struct device *dev)
{
struct msi_domain_info *info = domain->host_data;
struct msi_domain_ops *ops = info->ops;
- return ops->domain_free_irqs(domain, dev);
+ lockdep_assert_held(&dev->msi.data->mutex);
+
+ ops->domain_free_irqs(domain, dev);
+ msi_domain_free_msi_descs(info, dev);
+}
+
+/**
+ * msi_domain_free_irqs - Free interrupts from a MSI interrupt @domain associated to @dev
+ * @domain: The domain to managing the interrupts
+ * @dev: Pointer to device struct of the device for which the interrupts
+ * are free
+ */
+void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev)
+{
+ msi_lock_descs(dev);
+ msi_domain_free_irqs_descs_locked(domain, dev);
+ msi_unlock_descs(dev);
}
/**
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
index ee595ec09778..623b8136e9af 100644
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -137,7 +137,7 @@ static inline int irq_select_affinity_usr(unsigned int irq)
static ssize_t write_irq_affinity(int type, struct file *file,
const char __user *buffer, size_t count, loff_t *pos)
{
- unsigned int irq = (int)(long)PDE_DATA(file_inode(file));
+ unsigned int irq = (int)(long)pde_data(file_inode(file));
cpumask_var_t new_value;
int err;
@@ -190,12 +190,12 @@ static ssize_t irq_affinity_list_proc_write(struct file *file,
static int irq_affinity_proc_open(struct inode *inode, struct file *file)
{
- return single_open(file, irq_affinity_proc_show, PDE_DATA(inode));
+ return single_open(file, irq_affinity_proc_show, pde_data(inode));
}
static int irq_affinity_list_proc_open(struct inode *inode, struct file *file)
{
- return single_open(file, irq_affinity_list_proc_show, PDE_DATA(inode));
+ return single_open(file, irq_affinity_list_proc_show, pde_data(inode));
}
static const struct proc_ops irq_affinity_proc_ops = {
@@ -265,7 +265,7 @@ out:
static int default_affinity_open(struct inode *inode, struct file *file)
{
- return single_open(file, default_affinity_show, PDE_DATA(inode));
+ return single_open(file, default_affinity_show, pde_data(inode));
}
static const struct proc_ops default_affinity_proc_ops = {
diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
index 3011bc33a5ba..951c93216fc4 100644
--- a/kernel/kallsyms.c
+++ b/kernel/kallsyms.c
@@ -243,6 +243,7 @@ int kallsyms_on_each_symbol(int (*fn)(void *, const char *, struct module *,
ret = fn(data, namebuf, NULL, kallsyms_sym_address(i));
if (ret != 0)
return ret;
+ cond_resched();
}
return 0;
}
diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c
index 5a5d192a89ac..68480f731192 100644
--- a/kernel/kexec_core.c
+++ b/kernel/kexec_core.c
@@ -81,7 +81,7 @@ int kexec_should_crash(struct task_struct *p)
if (crash_kexec_post_notifiers)
return 0;
/*
- * There are 4 panic() calls in do_exit() path, each of which
+ * There are 4 panic() calls in make_task_dead() path, each of which
* corresponds to each of these 4 conditions.
*/
if (in_interrupt() || !p->pid || is_global_init(p) || panic_on_oops)
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 21eccc961bba..94cab8c9ce56 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -48,6 +48,9 @@
#define KPROBE_HASH_BITS 6
#define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
+#if !defined(CONFIG_OPTPROBES) || !defined(CONFIG_SYSCTL)
+#define kprobe_sysctls_init() do { } while (0)
+#endif
static int kprobes_initialized;
/* kprobe_table can be accessed by
@@ -938,10 +941,10 @@ static void unoptimize_all_kprobes(void)
}
static DEFINE_MUTEX(kprobe_sysctl_mutex);
-int sysctl_kprobes_optimization;
-int proc_kprobes_optimization_handler(struct ctl_table *table, int write,
- void *buffer, size_t *length,
- loff_t *ppos)
+static int sysctl_kprobes_optimization;
+static int proc_kprobes_optimization_handler(struct ctl_table *table,
+ int write, void *buffer,
+ size_t *length, loff_t *ppos)
{
int ret;
@@ -957,6 +960,24 @@ int proc_kprobes_optimization_handler(struct ctl_table *table, int write,
return ret;
}
+
+static struct ctl_table kprobe_sysctls[] = {
+ {
+ .procname = "kprobes-optimization",
+ .data = &sysctl_kprobes_optimization,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_kprobes_optimization_handler,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE,
+ },
+ {}
+};
+
+static void __init kprobe_sysctls_init(void)
+{
+ register_sysctl_init("debug", kprobe_sysctls);
+}
#endif /* CONFIG_SYSCTL */
/* Put a breakpoint for a probe. */
@@ -2584,6 +2605,7 @@ static int __init init_kprobes(void)
err = register_module_notifier(&kprobe_module_nb);
kprobes_initialized = (err == 0);
+ kprobe_sysctls_init();
return err;
}
early_initcall(init_kprobes);
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 7113003fab63..38c6dd822da8 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -52,6 +52,7 @@ struct kthread_create_info
struct kthread {
unsigned long flags;
unsigned int cpu;
+ int result;
int (*threadfn)(void *);
void *data;
mm_segment_t oldfs;
@@ -60,6 +61,8 @@ struct kthread {
#ifdef CONFIG_BLK_CGROUP
struct cgroup_subsys_state *blkcg_css;
#endif
+ /* To store the full name if task comm is truncated. */
+ char *full_name;
};
enum KTHREAD_BITS {
@@ -71,7 +74,7 @@ enum KTHREAD_BITS {
static inline struct kthread *to_kthread(struct task_struct *k)
{
WARN_ON(!(k->flags & PF_KTHREAD));
- return (__force void *)k->set_child_tid;
+ return k->worker_private;
}
/*
@@ -79,7 +82,7 @@ static inline struct kthread *to_kthread(struct task_struct *k)
*
* Per construction; when:
*
- * (p->flags & PF_KTHREAD) && p->set_child_tid
+ * (p->flags & PF_KTHREAD) && p->worker_private
*
* the task is both a kthread and struct kthread is persistent. However
* PF_KTHREAD on it's own is not, kernel_thread() can exec() (See umh.c and
@@ -87,26 +90,41 @@ static inline struct kthread *to_kthread(struct task_struct *k)
*/
static inline struct kthread *__to_kthread(struct task_struct *p)
{
- void *kthread = (__force void *)p->set_child_tid;
+ void *kthread = p->worker_private;
if (kthread && !(p->flags & PF_KTHREAD))
kthread = NULL;
return kthread;
}
-void set_kthread_struct(struct task_struct *p)
+void get_kthread_comm(char *buf, size_t buf_size, struct task_struct *tsk)
{
- struct kthread *kthread;
+ struct kthread *kthread = to_kthread(tsk);
- if (__to_kthread(p))
+ if (!kthread || !kthread->full_name) {
+ __get_task_comm(buf, buf_size, tsk);
return;
+ }
+
+ strscpy_pad(buf, kthread->full_name, buf_size);
+}
+
+bool set_kthread_struct(struct task_struct *p)
+{
+ struct kthread *kthread;
+
+ if (WARN_ON_ONCE(to_kthread(p)))
+ return false;
kthread = kzalloc(sizeof(*kthread), GFP_KERNEL);
- /*
- * We abuse ->set_child_tid to avoid the new member and because it
- * can't be wrongly copied by copy_process(). We also rely on fact
- * that the caller can't exec, so PF_KTHREAD can't be cleared.
- */
- p->set_child_tid = (__force void __user *)kthread;
+ if (!kthread)
+ return false;
+
+ init_completion(&kthread->exited);
+ init_completion(&kthread->parked);
+ p->vfork_done = &kthread->exited;
+
+ p->worker_private = kthread;
+ return true;
}
void free_kthread_struct(struct task_struct *k)
@@ -114,13 +132,17 @@ void free_kthread_struct(struct task_struct *k)
struct kthread *kthread;
/*
- * Can be NULL if this kthread was created by kernel_thread()
- * or if kmalloc() in kthread() failed.
+ * Can be NULL if kmalloc() in set_kthread_struct() failed.
*/
kthread = to_kthread(k);
+ if (!kthread)
+ return;
+
#ifdef CONFIG_BLK_CGROUP
- WARN_ON_ONCE(kthread && kthread->blkcg_css);
+ WARN_ON_ONCE(kthread->blkcg_css);
#endif
+ k->worker_private = NULL;
+ kfree(kthread->full_name);
kfree(kthread);
}
@@ -268,6 +290,44 @@ void kthread_parkme(void)
}
EXPORT_SYMBOL_GPL(kthread_parkme);
+/**
+ * kthread_exit - Cause the current kthread return @result to kthread_stop().
+ * @result: The integer value to return to kthread_stop().
+ *
+ * While kthread_exit can be called directly, it exists so that
+ * functions which do some additional work in non-modular code such as
+ * module_put_and_kthread_exit can be implemented.
+ *
+ * Does not return.
+ */
+void __noreturn kthread_exit(long result)
+{
+ struct kthread *kthread = to_kthread(current);
+ kthread->result = result;
+ do_exit(0);
+}
+
+/**
+ * kthread_complete_and_exit - Exit the current kthread.
+ * @comp: Completion to complete
+ * @code: The integer value to return to kthread_stop().
+ *
+ * If present complete @comp and the reuturn code to kthread_stop().
+ *
+ * A kernel thread whose module may be removed after the completion of
+ * @comp can use this function exit safely.
+ *
+ * Does not return.
+ */
+void __noreturn kthread_complete_and_exit(struct completion *comp, long code)
+{
+ if (comp)
+ complete(comp);
+
+ kthread_exit(code);
+}
+EXPORT_SYMBOL(kthread_complete_and_exit);
+
static int kthread(void *_create)
{
static const struct sched_param param = { .sched_priority = 0 };
@@ -279,27 +339,17 @@ static int kthread(void *_create)
struct kthread *self;
int ret;
- set_kthread_struct(current);
self = to_kthread(current);
/* If user was SIGKILLed, I release the structure. */
done = xchg(&create->done, NULL);
if (!done) {
kfree(create);
- do_exit(-EINTR);
- }
-
- if (!self) {
- create->result = ERR_PTR(-ENOMEM);
- complete(done);
- do_exit(-ENOMEM);
+ kthread_exit(-EINTR);
}
self->threadfn = threadfn;
self->data = data;
- init_completion(&self->exited);
- init_completion(&self->parked);
- current->vfork_done = &self->exited;
/*
* The new thread inherited kthreadd's priority and CPU mask. Reset
@@ -326,7 +376,7 @@ static int kthread(void *_create)
__kthread_parkme(self);
ret = threadfn(data);
}
- do_exit(ret);
+ kthread_exit(ret);
}
/* called from kernel_clone() to get node information for about to be created task */
@@ -406,12 +456,22 @@ struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data),
task = create->result;
if (!IS_ERR(task)) {
char name[TASK_COMM_LEN];
+ va_list aq;
+ int len;
/*
* task is already visible to other tasks, so updating
* COMM must be protected.
*/
- vsnprintf(name, sizeof(name), namefmt, args);
+ va_copy(aq, args);
+ len = vsnprintf(name, sizeof(name), namefmt, aq);
+ va_end(aq);
+ if (len >= TASK_COMM_LEN) {
+ struct kthread *kthread = to_kthread(task);
+
+ /* leave it truncated when out of memory. */
+ kthread->full_name = kvasprintf(GFP_KERNEL, namefmt, args);
+ }
set_task_comm(task, name);
}
kfree(create);
@@ -523,6 +583,7 @@ struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
to_kthread(p)->cpu = cpu;
return p;
}
+EXPORT_SYMBOL(kthread_create_on_cpu);
void kthread_set_per_cpu(struct task_struct *k, int cpu)
{
@@ -627,7 +688,7 @@ EXPORT_SYMBOL_GPL(kthread_park);
* instead of calling wake_up_process(): the thread will exit without
* calling threadfn().
*
- * If threadfn() may call do_exit() itself, the caller must ensure
+ * If threadfn() may call kthread_exit() itself, the caller must ensure
* task_struct can't go away.
*
* Returns the result of threadfn(), or %-EINTR if wake_up_process()
@@ -646,7 +707,7 @@ int kthread_stop(struct task_struct *k)
kthread_unpark(k);
wake_up_process(k);
wait_for_completion(&kthread->exited);
- ret = k->exit_code;
+ ret = kthread->result;
put_task_struct(k);
trace_sched_kthread_stop_ret(ret);
diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c
index 335d988bd811..585494ec464f 100644
--- a/kernel/livepatch/core.c
+++ b/kernel/livepatch/core.c
@@ -862,14 +862,11 @@ static void klp_init_object_early(struct klp_patch *patch,
list_add_tail(&obj->node, &patch->obj_list);
}
-static int klp_init_patch_early(struct klp_patch *patch)
+static void klp_init_patch_early(struct klp_patch *patch)
{
struct klp_object *obj;
struct klp_func *func;
- if (!patch->objs)
- return -EINVAL;
-
INIT_LIST_HEAD(&patch->list);
INIT_LIST_HEAD(&patch->obj_list);
kobject_init(&patch->kobj, &klp_ktype_patch);
@@ -879,20 +876,12 @@ static int klp_init_patch_early(struct klp_patch *patch)
init_completion(&patch->finish);
klp_for_each_object_static(patch, obj) {
- if (!obj->funcs)
- return -EINVAL;
-
klp_init_object_early(patch, obj);
klp_for_each_func_static(obj, func) {
klp_init_func_early(obj, func);
}
}
-
- if (!try_module_get(patch->mod))
- return -ENODEV;
-
- return 0;
}
static int klp_init_patch(struct klp_patch *patch)
@@ -1024,10 +1013,17 @@ err:
int klp_enable_patch(struct klp_patch *patch)
{
int ret;
+ struct klp_object *obj;
- if (!patch || !patch->mod)
+ if (!patch || !patch->mod || !patch->objs)
return -EINVAL;
+ klp_for_each_object_static(patch, obj) {
+ if (!obj->funcs)
+ return -EINVAL;
+ }
+
+
if (!is_livepatch_module(patch->mod)) {
pr_err("module %s is not marked as a livepatch module\n",
patch->mod->name);
@@ -1051,12 +1047,13 @@ int klp_enable_patch(struct klp_patch *patch)
return -EINVAL;
}
- ret = klp_init_patch_early(patch);
- if (ret) {
+ if (!try_module_get(patch->mod)) {
mutex_unlock(&klp_mutex);
- return ret;
+ return -ENODEV;
}
+ klp_init_patch_early(patch);
+
ret = klp_init_patch(patch);
if (ret)
goto err;
diff --git a/kernel/livepatch/shadow.c b/kernel/livepatch/shadow.c
index e5c9fb295ba9..c2e724d97ddf 100644
--- a/kernel/livepatch/shadow.c
+++ b/kernel/livepatch/shadow.c
@@ -272,12 +272,12 @@ void klp_shadow_free(void *obj, unsigned long id, klp_shadow_dtor_t dtor)
EXPORT_SYMBOL_GPL(klp_shadow_free);
/**
- * klp_shadow_free_all() - detach and free all <*, id> shadow variables
+ * klp_shadow_free_all() - detach and free all <_, id> shadow variables
* @id: data identifier
* @dtor: custom callback that can be used to unregister the variable
* and/or free data that the shadow variable points to (optional)
*
- * This function releases the memory for all <*, id> shadow variable
+ * This function releases the memory for all <_, id> shadow variable
* instances, callers should stop referencing them accordingly.
*/
void klp_shadow_free_all(unsigned long id, klp_shadow_dtor_t dtor)
@@ -288,7 +288,7 @@ void klp_shadow_free_all(unsigned long id, klp_shadow_dtor_t dtor)
spin_lock_irqsave(&klp_shadow_lock, flags);
- /* Delete all <*, id> from hash */
+ /* Delete all <_, id> from hash */
hash_for_each(klp_shadow_hash, i, shadow, node) {
if (klp_shadow_match(shadow, shadow->obj, id))
klp_shadow_free_struct(shadow, dtor);
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 4a882f83aeb9..f8a0212189ca 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -3462,7 +3462,7 @@ struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i)
u16 chain_hlock = chain_hlocks[chain->base + i];
unsigned int class_idx = chain_hlock_class_idx(chain_hlock);
- return lock_classes + class_idx - 1;
+ return lock_classes + class_idx;
}
/*
@@ -3530,7 +3530,7 @@ static void print_chain_keys_chain(struct lock_chain *chain)
hlock_id = chain_hlocks[chain->base + i];
chain_key = print_chain_key_iteration(hlock_id, chain_key);
- print_lock_name(lock_classes + chain_hlock_class_idx(hlock_id) - 1);
+ print_lock_name(lock_classes + chain_hlock_class_idx(hlock_id));
printk("\n");
}
}
diff --git a/kernel/locking/spinlock.c b/kernel/locking/spinlock.c
index b562f9289372..7f49baaa4979 100644
--- a/kernel/locking/spinlock.c
+++ b/kernel/locking/spinlock.c
@@ -300,6 +300,16 @@ void __lockfunc _raw_write_lock(rwlock_t *lock)
__raw_write_lock(lock);
}
EXPORT_SYMBOL(_raw_write_lock);
+
+#ifndef CONFIG_DEBUG_LOCK_ALLOC
+#define __raw_write_lock_nested(lock, subclass) __raw_write_lock(((void)(subclass), (lock)))
+#endif
+
+void __lockfunc _raw_write_lock_nested(rwlock_t *lock, int subclass)
+{
+ __raw_write_lock_nested(lock, subclass);
+}
+EXPORT_SYMBOL(_raw_write_lock_nested);
#endif
#ifndef CONFIG_INLINE_WRITE_LOCK_IRQSAVE
diff --git a/kernel/locking/spinlock_rt.c b/kernel/locking/spinlock_rt.c
index 9e396a09fe0f..48a19ed8486d 100644
--- a/kernel/locking/spinlock_rt.c
+++ b/kernel/locking/spinlock_rt.c
@@ -239,6 +239,18 @@ void __sched rt_write_lock(rwlock_t *rwlock)
}
EXPORT_SYMBOL(rt_write_lock);
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+void __sched rt_write_lock_nested(rwlock_t *rwlock, int subclass)
+{
+ rtlock_might_resched();
+ rwlock_acquire(&rwlock->dep_map, subclass, 0, _RET_IP_);
+ rwbase_write_lock(&rwlock->rwbase, TASK_RTLOCK_WAIT);
+ rcu_read_lock();
+ migrate_disable();
+}
+EXPORT_SYMBOL(rt_write_lock_nested);
+#endif
+
void __sched rt_read_unlock(rwlock_t *rwlock)
{
rwlock_release(&rwlock->dep_map, _RET_IP_);
diff --git a/kernel/module-internal.h b/kernel/module-internal.h
index 33783abc377b..8c381c99062f 100644
--- a/kernel/module-internal.h
+++ b/kernel/module-internal.h
@@ -23,9 +23,28 @@ struct load_info {
#ifdef CONFIG_KALLSYMS
unsigned long mod_kallsyms_init_off;
#endif
+#ifdef CONFIG_MODULE_DECOMPRESS
+ struct page **pages;
+ unsigned int max_pages;
+ unsigned int used_pages;
+#endif
struct {
unsigned int sym, str, mod, vers, info, pcpu;
} index;
};
extern int mod_verify_sig(const void *mod, struct load_info *info);
+
+#ifdef CONFIG_MODULE_DECOMPRESS
+int module_decompress(struct load_info *info, const void *buf, size_t size);
+void module_decompress_cleanup(struct load_info *info);
+#else
+static inline int module_decompress(struct load_info *info,
+ const void *buf, size_t size)
+{
+ return -EOPNOTSUPP;
+}
+static inline void module_decompress_cleanup(struct load_info *info)
+{
+}
+#endif
diff --git a/kernel/module.c b/kernel/module.c
index 84a9141a5e15..46a5c2ed1928 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -337,12 +337,12 @@ static inline void add_taint_module(struct module *mod, unsigned flag,
* A thread that wants to hold a reference to a module only while it
* is running can call this to safely exit. nfsd and lockd use this.
*/
-void __noreturn __module_put_and_exit(struct module *mod, long code)
+void __noreturn __module_put_and_kthread_exit(struct module *mod, long code)
{
module_put(mod);
- do_exit(code);
+ kthread_exit(code);
}
-EXPORT_SYMBOL(__module_put_and_exit);
+EXPORT_SYMBOL(__module_put_and_kthread_exit);
/* Find a module section: 0 means not found. */
static unsigned int find_sec(const struct load_info *info, const char *name)
@@ -958,7 +958,6 @@ SYSCALL_DEFINE2(delete_module, const char __user *, name_user,
}
}
- /* Stop the machine so refcounts can't move and disable module. */
ret = try_stop_module(mod, flags, &forced);
if (ret != 0)
goto out;
@@ -2884,12 +2883,13 @@ static int module_sig_check(struct load_info *info, int flags)
const unsigned long markerlen = sizeof(MODULE_SIG_STRING) - 1;
const char *reason;
const void *mod = info->hdr;
-
+ bool mangled_module = flags & (MODULE_INIT_IGNORE_MODVERSIONS |
+ MODULE_INIT_IGNORE_VERMAGIC);
/*
- * Require flags == 0, as a module with version information
- * removed is no longer the module that was signed
+ * Do not allow mangled modules as a module with version information
+ * removed is no longer the module that was signed.
*/
- if (flags == 0 &&
+ if (!mangled_module &&
info->len > markerlen &&
memcmp(mod + info->len - markerlen, MODULE_SIG_STRING, markerlen) == 0) {
/* We truncate the module to discard the signature */
@@ -3174,9 +3174,12 @@ out:
return err;
}
-static void free_copy(struct load_info *info)
+static void free_copy(struct load_info *info, int flags)
{
- vfree(info->hdr);
+ if (flags & MODULE_INIT_COMPRESSED_FILE)
+ module_decompress_cleanup(info);
+ else
+ vfree(info->hdr);
}
static int rewrite_section_headers(struct load_info *info, int flags)
@@ -3722,12 +3725,6 @@ static noinline int do_init_module(struct module *mod)
}
freeinit->module_init = mod->init_layout.base;
- /*
- * We want to find out whether @mod uses async during init. Clear
- * PF_USED_ASYNC. async_schedule*() will set it.
- */
- current->flags &= ~PF_USED_ASYNC;
-
do_mod_ctors(mod);
/* Start the module */
if (mod->init != NULL)
@@ -3753,22 +3750,13 @@ static noinline int do_init_module(struct module *mod)
/*
* We need to finish all async code before the module init sequence
- * is done. This has potential to deadlock. For example, a newly
- * detected block device can trigger request_module() of the
- * default iosched from async probing task. Once userland helper
- * reaches here, async_synchronize_full() will wait on the async
- * task waiting on request_module() and deadlock.
+ * is done. This has potential to deadlock if synchronous module
+ * loading is requested from async (which is not allowed!).
*
- * This deadlock is avoided by perfomring async_synchronize_full()
- * iff module init queued any async jobs. This isn't a full
- * solution as it will deadlock the same if module loading from
- * async jobs nests more than once; however, due to the various
- * constraints, this hack seems to be the best option for now.
- * Please refer to the following thread for details.
- *
- * http://thread.gmane.org/gmane.linux.kernel/1420814
+ * See commit 0fdff3ec6d87 ("async, kmod: warn on synchronous
+ * request_module() from async workers") for more details.
*/
- if (!mod->async_probe_requested && (current->flags & PF_USED_ASYNC))
+ if (!mod->async_probe_requested)
async_synchronize_full();
ftrace_free_mem(mod, mod->init_layout.base, mod->init_layout.base +
@@ -4125,7 +4113,7 @@ static int load_module(struct load_info *info, const char __user *uargs,
}
/* Get rid of temporary copy. */
- free_copy(info);
+ free_copy(info, flags);
/* Done! */
trace_module_load(mod);
@@ -4174,7 +4162,7 @@ static int load_module(struct load_info *info, const char __user *uargs,
module_deallocate(mod, info);
free_copy:
- free_copy(info);
+ free_copy(info, flags);
return err;
}
@@ -4201,7 +4189,8 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
SYSCALL_DEFINE3(finit_module, int, fd, const char __user *, uargs, int, flags)
{
struct load_info info = { };
- void *hdr = NULL;
+ void *buf = NULL;
+ int len;
int err;
err = may_init_module();
@@ -4211,15 +4200,24 @@ SYSCALL_DEFINE3(finit_module, int, fd, const char __user *, uargs, int, flags)
pr_debug("finit_module: fd=%d, uargs=%p, flags=%i\n", fd, uargs, flags);
if (flags & ~(MODULE_INIT_IGNORE_MODVERSIONS
- |MODULE_INIT_IGNORE_VERMAGIC))
+ |MODULE_INIT_IGNORE_VERMAGIC
+ |MODULE_INIT_COMPRESSED_FILE))
return -EINVAL;
- err = kernel_read_file_from_fd(fd, 0, &hdr, INT_MAX, NULL,
+ len = kernel_read_file_from_fd(fd, 0, &buf, INT_MAX, NULL,
READING_MODULE);
- if (err < 0)
- return err;
- info.hdr = hdr;
- info.len = err;
+ if (len < 0)
+ return len;
+
+ if (flags & MODULE_INIT_COMPRESSED_FILE) {
+ err = module_decompress(&info, buf, len);
+ vfree(buf); /* compressed data is no longer needed */
+ if (err)
+ return err;
+ } else {
+ info.hdr = buf;
+ info.len = len;
+ }
return load_module(&info, uargs, flags);
}
@@ -4499,6 +4497,8 @@ int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
mod, kallsyms_symbol_value(sym));
if (ret != 0)
goto out;
+
+ cond_resched();
}
}
out:
diff --git a/kernel/module_decompress.c b/kernel/module_decompress.c
new file mode 100644
index 000000000000..ffef98a20320
--- /dev/null
+++ b/kernel/module_decompress.c
@@ -0,0 +1,273 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright 2021 Google LLC.
+ */
+
+#include <linux/init.h>
+#include <linux/highmem.h>
+#include <linux/kobject.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/vmalloc.h>
+
+#include "module-internal.h"
+
+static int module_extend_max_pages(struct load_info *info, unsigned int extent)
+{
+ struct page **new_pages;
+
+ new_pages = kvmalloc_array(info->max_pages + extent,
+ sizeof(info->pages), GFP_KERNEL);
+ if (!new_pages)
+ return -ENOMEM;
+
+ memcpy(new_pages, info->pages, info->max_pages * sizeof(info->pages));
+ kvfree(info->pages);
+ info->pages = new_pages;
+ info->max_pages += extent;
+
+ return 0;
+}
+
+static struct page *module_get_next_page(struct load_info *info)
+{
+ struct page *page;
+ int error;
+
+ if (info->max_pages == info->used_pages) {
+ error = module_extend_max_pages(info, info->used_pages);
+ if (error)
+ return ERR_PTR(error);
+ }
+
+ page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
+ if (!page)
+ return ERR_PTR(-ENOMEM);
+
+ info->pages[info->used_pages++] = page;
+ return page;
+}
+
+#ifdef CONFIG_MODULE_COMPRESS_GZIP
+#include <linux/zlib.h>
+#define MODULE_COMPRESSION gzip
+#define MODULE_DECOMPRESS_FN module_gzip_decompress
+
+/*
+ * Calculate length of the header which consists of signature, header
+ * flags, time stamp and operating system ID (10 bytes total), plus
+ * an optional filename.
+ */
+static size_t module_gzip_header_len(const u8 *buf, size_t size)
+{
+ const u8 signature[] = { 0x1f, 0x8b, 0x08 };
+ size_t len = 10;
+
+ if (size < len || memcmp(buf, signature, sizeof(signature)))
+ return 0;
+
+ if (buf[3] & 0x08) {
+ do {
+ /*
+ * If we can't find the end of the file name we must
+ * be dealing with a corrupted file.
+ */
+ if (len == size)
+ return 0;
+ } while (buf[len++] != '\0');
+ }
+
+ return len;
+}
+
+static ssize_t module_gzip_decompress(struct load_info *info,
+ const void *buf, size_t size)
+{
+ struct z_stream_s s = { 0 };
+ size_t new_size = 0;
+ size_t gzip_hdr_len;
+ ssize_t retval;
+ int rc;
+
+ gzip_hdr_len = module_gzip_header_len(buf, size);
+ if (!gzip_hdr_len) {
+ pr_err("not a gzip compressed module\n");
+ return -EINVAL;
+ }
+
+ s.next_in = buf + gzip_hdr_len;
+ s.avail_in = size - gzip_hdr_len;
+
+ s.workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
+ if (!s.workspace)
+ return -ENOMEM;
+
+ rc = zlib_inflateInit2(&s, -MAX_WBITS);
+ if (rc != Z_OK) {
+ pr_err("failed to initialize decompressor: %d\n", rc);
+ retval = -EINVAL;
+ goto out;
+ }
+
+ do {
+ struct page *page = module_get_next_page(info);
+ if (!page) {
+ retval = -ENOMEM;
+ goto out_inflate_end;
+ }
+
+ s.next_out = kmap(page);
+ s.avail_out = PAGE_SIZE;
+ rc = zlib_inflate(&s, 0);
+ kunmap(page);
+
+ new_size += PAGE_SIZE - s.avail_out;
+ } while (rc == Z_OK);
+
+ if (rc != Z_STREAM_END) {
+ pr_err("decompression failed with status %d\n", rc);
+ retval = -EINVAL;
+ goto out_inflate_end;
+ }
+
+ retval = new_size;
+
+out_inflate_end:
+ zlib_inflateEnd(&s);
+out:
+ kfree(s.workspace);
+ return retval;
+}
+#elif CONFIG_MODULE_COMPRESS_XZ
+#include <linux/xz.h>
+#define MODULE_COMPRESSION xz
+#define MODULE_DECOMPRESS_FN module_xz_decompress
+
+static ssize_t module_xz_decompress(struct load_info *info,
+ const void *buf, size_t size)
+{
+ static const u8 signature[] = { 0xfd, '7', 'z', 'X', 'Z', 0 };
+ struct xz_dec *xz_dec;
+ struct xz_buf xz_buf;
+ enum xz_ret xz_ret;
+ size_t new_size = 0;
+ ssize_t retval;
+
+ if (size < sizeof(signature) ||
+ memcmp(buf, signature, sizeof(signature))) {
+ pr_err("not an xz compressed module\n");
+ return -EINVAL;
+ }
+
+ xz_dec = xz_dec_init(XZ_DYNALLOC, (u32)-1);
+ if (!xz_dec)
+ return -ENOMEM;
+
+ xz_buf.in_size = size;
+ xz_buf.in = buf;
+ xz_buf.in_pos = 0;
+
+ do {
+ struct page *page = module_get_next_page(info);
+ if (!page) {
+ retval = -ENOMEM;
+ goto out;
+ }
+
+ xz_buf.out = kmap(page);
+ xz_buf.out_pos = 0;
+ xz_buf.out_size = PAGE_SIZE;
+ xz_ret = xz_dec_run(xz_dec, &xz_buf);
+ kunmap(page);
+
+ new_size += xz_buf.out_pos;
+ } while (xz_buf.out_pos == PAGE_SIZE && xz_ret == XZ_OK);
+
+ if (xz_ret != XZ_STREAM_END) {
+ pr_err("decompression failed with status %d\n", xz_ret);
+ retval = -EINVAL;
+ goto out;
+ }
+
+ retval = new_size;
+
+ out:
+ xz_dec_end(xz_dec);
+ return retval;
+}
+#else
+#error "Unexpected configuration for CONFIG_MODULE_DECOMPRESS"
+#endif
+
+int module_decompress(struct load_info *info, const void *buf, size_t size)
+{
+ unsigned int n_pages;
+ ssize_t data_size;
+ int error;
+
+ /*
+ * Start with number of pages twice as big as needed for
+ * compressed data.
+ */
+ n_pages = DIV_ROUND_UP(size, PAGE_SIZE) * 2;
+ error = module_extend_max_pages(info, n_pages);
+
+ data_size = MODULE_DECOMPRESS_FN(info, buf, size);
+ if (data_size < 0) {
+ error = data_size;
+ goto err;
+ }
+
+ info->hdr = vmap(info->pages, info->used_pages, VM_MAP, PAGE_KERNEL);
+ if (!info->hdr) {
+ error = -ENOMEM;
+ goto err;
+ }
+
+ info->len = data_size;
+ return 0;
+
+err:
+ module_decompress_cleanup(info);
+ return error;
+}
+
+void module_decompress_cleanup(struct load_info *info)
+{
+ int i;
+
+ if (info->hdr)
+ vunmap(info->hdr);
+
+ for (i = 0; i < info->used_pages; i++)
+ __free_page(info->pages[i]);
+
+ kvfree(info->pages);
+
+ info->pages = NULL;
+ info->max_pages = info->used_pages = 0;
+}
+
+#ifdef CONFIG_SYSFS
+static ssize_t compression_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "%s\n", __stringify(MODULE_COMPRESSION));
+}
+static struct kobj_attribute module_compression_attr = __ATTR_RO(compression);
+
+static int __init module_decompress_sysfs_init(void)
+{
+ int error;
+
+ error = sysfs_create_file(&module_kset->kobj,
+ &module_compression_attr.attr);
+ if (error)
+ pr_warn("Failed to create 'compression' attribute");
+
+ return 0;
+}
+late_initcall(module_decompress_sysfs_init);
+#endif
diff --git a/kernel/panic.c b/kernel/panic.c
index cefd7d82366f..55b50e052ec3 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -32,6 +32,7 @@
#include <linux/bug.h>
#include <linux/ratelimit.h>
#include <linux/debugfs.h>
+#include <trace/events/error_report.h>
#include <asm/sections.h>
#define PANIC_TIMER_STEP 100
@@ -533,26 +534,9 @@ void oops_enter(void)
trigger_all_cpu_backtrace();
}
-/*
- * 64-bit random ID for oopses:
- */
-static u64 oops_id;
-
-static int init_oops_id(void)
-{
- if (!oops_id)
- get_random_bytes(&oops_id, sizeof(oops_id));
- else
- oops_id++;
-
- return 0;
-}
-late_initcall(init_oops_id);
-
static void print_oops_end_marker(void)
{
- init_oops_id();
- pr_warn("---[ end trace %016llx ]---\n", (unsigned long long)oops_id);
+ pr_warn("---[ end trace %016llx ]---\n", 0ULL);
}
/*
@@ -609,6 +593,7 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
print_irqtrace_events(current);
print_oops_end_marker();
+ trace_error_report_end(ERROR_DETECTOR_WARN, (unsigned long)caller);
/* Just a warning, don't kill lockdep. */
add_taint(taint, LOCKDEP_STILL_OK);
diff --git a/kernel/params.c b/kernel/params.c
index 8299bd764e42..5b92310425c5 100644
--- a/kernel/params.c
+++ b/kernel/params.c
@@ -926,9 +926,9 @@ static const struct sysfs_ops module_sysfs_ops = {
.store = module_attr_store,
};
-static int uevent_filter(struct kset *kset, struct kobject *kobj)
+static int uevent_filter(struct kobject *kobj)
{
- struct kobj_type *ktype = get_ktype(kobj);
+ const struct kobj_type *ktype = get_ktype(kobj);
if (ktype == &module_ktype)
return 1;
diff --git a/kernel/power/main.c b/kernel/power/main.c
index 44169f3081fd..7e646079fbeb 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -504,7 +504,10 @@ static ssize_t pm_wakeup_irq_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
{
- return pm_wakeup_irq ? sprintf(buf, "%u\n", pm_wakeup_irq) : -ENODATA;
+ if (!pm_wakeup_irq())
+ return -ENODATA;
+
+ return sprintf(buf, "%u\n", pm_wakeup_irq());
}
power_attr_ro(pm_wakeup_irq);
diff --git a/kernel/power/process.c b/kernel/power/process.c
index b7e7798637b8..11b570fcf049 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -134,7 +134,7 @@ int freeze_processes(void)
if (!pm_freezing)
atomic_inc(&system_freezing_cnt);
- pm_wakeup_clear(true);
+ pm_wakeup_clear(0);
pr_info("Freezing user space processes ... ");
pm_freezing = true;
error = try_to_freeze_tasks(true);
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index f7a986078213..330d49937692 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -978,8 +978,7 @@ static void memory_bm_recycle(struct memory_bitmap *bm)
* Register a range of page frames the contents of which should not be saved
* during hibernation (to be used in the early initialization code).
*/
-void __init __register_nosave_region(unsigned long start_pfn,
- unsigned long end_pfn, int use_kmalloc)
+void __init register_nosave_region(unsigned long start_pfn, unsigned long end_pfn)
{
struct nosave_region *region;
@@ -995,18 +994,12 @@ void __init __register_nosave_region(unsigned long start_pfn,
goto Report;
}
}
- if (use_kmalloc) {
- /* During init, this shouldn't fail */
- region = kmalloc(sizeof(struct nosave_region), GFP_KERNEL);
- BUG_ON(!region);
- } else {
- /* This allocation cannot fail */
- region = memblock_alloc(sizeof(struct nosave_region),
- SMP_CACHE_BYTES);
- if (!region)
- panic("%s: Failed to allocate %zu bytes\n", __func__,
- sizeof(struct nosave_region));
- }
+ /* This allocation cannot fail */
+ region = memblock_alloc(sizeof(struct nosave_region),
+ SMP_CACHE_BYTES);
+ if (!region)
+ panic("%s: Failed to allocate %zu bytes\n", __func__,
+ sizeof(struct nosave_region));
region->start_pfn = start_pfn;
region->end_pfn = end_pfn;
list_add_tail(&region->list, &nosave_regions);
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index 80cc1f0f502b..6fcdee7e87a5 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -136,8 +136,6 @@ static void s2idle_loop(void)
break;
}
- pm_wakeup_clear(false);
-
s2idle_enter();
}
diff --git a/kernel/power/wakelock.c b/kernel/power/wakelock.c
index 105df4dfc783..52571dcad768 100644
--- a/kernel/power/wakelock.c
+++ b/kernel/power/wakelock.c
@@ -39,23 +39,20 @@ ssize_t pm_show_wakelocks(char *buf, bool show_active)
{
struct rb_node *node;
struct wakelock *wl;
- char *str = buf;
- char *end = buf + PAGE_SIZE;
+ int len = 0;
mutex_lock(&wakelocks_lock);
for (node = rb_first(&wakelocks_tree); node; node = rb_next(node)) {
wl = rb_entry(node, struct wakelock, node);
if (wl->ws->active == show_active)
- str += scnprintf(str, end - str, "%s ", wl->name);
+ len += sysfs_emit_at(buf, len, "%s ", wl->name);
}
- if (str > buf)
- str--;
- str += scnprintf(str, end - str, "\n");
+ len += sysfs_emit_at(buf, len, "\n");
mutex_unlock(&wakelocks_lock);
- return (str - buf);
+ return len;
}
#if CONFIG_PM_WAKELOCKS_LIMIT > 0
diff --git a/kernel/printk/Makefile b/kernel/printk/Makefile
index d118739874c0..f5b388e810b9 100644
--- a/kernel/printk/Makefile
+++ b/kernel/printk/Makefile
@@ -2,5 +2,8 @@
obj-y = printk.o
obj-$(CONFIG_PRINTK) += printk_safe.o
obj-$(CONFIG_A11Y_BRAILLE_CONSOLE) += braille.o
-obj-$(CONFIG_PRINTK) += printk_ringbuffer.o
obj-$(CONFIG_PRINTK_INDEX) += index.o
+
+obj-$(CONFIG_PRINTK) += printk_support.o
+printk_support-y := printk_ringbuffer.o
+printk_support-$(CONFIG_SYSCTL) += sysctl.o
diff --git a/kernel/printk/internal.h b/kernel/printk/internal.h
index 9f3ed2fdb721..d947ca6c84f9 100644
--- a/kernel/printk/internal.h
+++ b/kernel/printk/internal.h
@@ -4,6 +4,14 @@
*/
#include <linux/percpu.h>
+#if defined(CONFIG_PRINTK) && defined(CONFIG_SYSCTL)
+void __init printk_sysctl_init(void);
+int devkmsg_sysctl_set_loglvl(struct ctl_table *table, int write,
+ void *buffer, size_t *lenp, loff_t *ppos);
+#else
+#define printk_sysctl_init() do { } while (0)
+#endif
+
#ifdef CONFIG_PRINTK
/* Flags for a single printk record. */
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index 155229f0cf0f..82abfaf3c2aa 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -171,7 +171,7 @@ static int __init control_devkmsg(char *str)
__setup("printk.devkmsg=", control_devkmsg);
char devkmsg_log_str[DEVKMSG_STR_MAX_SIZE] = "ratelimit";
-
+#if defined(CONFIG_PRINTK) && defined(CONFIG_SYSCTL)
int devkmsg_sysctl_set_loglvl(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
@@ -210,6 +210,7 @@ int devkmsg_sysctl_set_loglvl(struct ctl_table *table, int write,
return 0;
}
+#endif /* CONFIG_PRINTK && CONFIG_SYSCTL */
/* Number of registered extended console drivers. */
static int nr_ext_console_drivers;
@@ -3211,6 +3212,7 @@ static int __init printk_late_init(void)
ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "printk:online",
console_cpu_notify, NULL);
WARN_ON(ret < 0);
+ printk_sysctl_init();
return 0;
}
late_initcall(printk_late_init);
diff --git a/kernel/printk/sysctl.c b/kernel/printk/sysctl.c
new file mode 100644
index 000000000000..c228343eeb97
--- /dev/null
+++ b/kernel/printk/sysctl.c
@@ -0,0 +1,85 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * sysctl.c: General linux system control interface
+ */
+
+#include <linux/sysctl.h>
+#include <linux/printk.h>
+#include <linux/capability.h>
+#include <linux/ratelimit.h>
+#include "internal.h"
+
+static const int ten_thousand = 10000;
+
+static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
+ void *buffer, size_t *lenp, loff_t *ppos)
+{
+ if (write && !capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+}
+
+static struct ctl_table printk_sysctls[] = {
+ {
+ .procname = "printk",
+ .data = &console_loglevel,
+ .maxlen = 4*sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
+ .procname = "printk_ratelimit",
+ .data = &printk_ratelimit_state.interval,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
+ {
+ .procname = "printk_ratelimit_burst",
+ .data = &printk_ratelimit_state.burst,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
+ .procname = "printk_delay",
+ .data = &printk_delay_msec,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = (void *)&ten_thousand,
+ },
+ {
+ .procname = "printk_devkmsg",
+ .data = devkmsg_log_str,
+ .maxlen = DEVKMSG_STR_MAX_SIZE,
+ .mode = 0644,
+ .proc_handler = devkmsg_sysctl_set_loglvl,
+ },
+ {
+ .procname = "dmesg_restrict",
+ .data = &dmesg_restrict,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax_sysadmin,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE,
+ },
+ {
+ .procname = "kptr_restrict",
+ .data = &kptr_restrict,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax_sysadmin,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_TWO,
+ },
+ {}
+};
+
+void __init printk_sysctl_init(void)
+{
+ register_sysctl_init("kernel", printk_sysctls);
+}
diff --git a/kernel/profile.c b/kernel/profile.c
index eb9c7f0f5ac5..37640a0bd8a3 100644
--- a/kernel/profile.c
+++ b/kernel/profile.c
@@ -133,79 +133,6 @@ int __ref profile_init(void)
return -ENOMEM;
}
-/* Profile event notifications */
-
-static BLOCKING_NOTIFIER_HEAD(task_exit_notifier);
-static ATOMIC_NOTIFIER_HEAD(task_free_notifier);
-static BLOCKING_NOTIFIER_HEAD(munmap_notifier);
-
-void profile_task_exit(struct task_struct *task)
-{
- blocking_notifier_call_chain(&task_exit_notifier, 0, task);
-}
-
-int profile_handoff_task(struct task_struct *task)
-{
- int ret;
- ret = atomic_notifier_call_chain(&task_free_notifier, 0, task);
- return (ret == NOTIFY_OK) ? 1 : 0;
-}
-
-void profile_munmap(unsigned long addr)
-{
- blocking_notifier_call_chain(&munmap_notifier, 0, (void *)addr);
-}
-
-int task_handoff_register(struct notifier_block *n)
-{
- return atomic_notifier_chain_register(&task_free_notifier, n);
-}
-EXPORT_SYMBOL_GPL(task_handoff_register);
-
-int task_handoff_unregister(struct notifier_block *n)
-{
- return atomic_notifier_chain_unregister(&task_free_notifier, n);
-}
-EXPORT_SYMBOL_GPL(task_handoff_unregister);
-
-int profile_event_register(enum profile_type type, struct notifier_block *n)
-{
- int err = -EINVAL;
-
- switch (type) {
- case PROFILE_TASK_EXIT:
- err = blocking_notifier_chain_register(
- &task_exit_notifier, n);
- break;
- case PROFILE_MUNMAP:
- err = blocking_notifier_chain_register(
- &munmap_notifier, n);
- break;
- }
-
- return err;
-}
-EXPORT_SYMBOL_GPL(profile_event_register);
-
-int profile_event_unregister(enum profile_type type, struct notifier_block *n)
-{
- int err = -EINVAL;
-
- switch (type) {
- case PROFILE_TASK_EXIT:
- err = blocking_notifier_chain_unregister(
- &task_exit_notifier, n);
- break;
- case PROFILE_MUNMAP:
- err = blocking_notifier_chain_unregister(
- &munmap_notifier, n);
- break;
- }
-
- return err;
-}
-EXPORT_SYMBOL_GPL(profile_event_unregister);
-
#if defined(CONFIG_SMP) && defined(CONFIG_PROC_FS)
/*
* Each cpu has a pair of open-addressed hashtables for pending
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index f8589bf8d7dc..eea265082e97 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -419,8 +419,6 @@ static int ptrace_attach(struct task_struct *task, long request,
if (task->ptrace)
goto unlock_tasklist;
- if (seize)
- flags |= PT_SEIZED;
task->ptrace = flags;
ptrace_link(task, current);
diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
index 33ea446101b3..422f7e4cc08d 100644
--- a/kernel/rcu/rcutorture.c
+++ b/kernel/rcu/rcutorture.c
@@ -2031,9 +2031,8 @@ static int rcutorture_booster_init(unsigned int cpu)
mutex_lock(&boost_mutex);
rcu_torture_disable_rt_throttle();
VERBOSE_TOROUT_STRING("Creating rcu_torture_boost task");
- boost_tasks[cpu] = kthread_create_on_node(rcu_torture_boost, NULL,
- cpu_to_node(cpu),
- "rcu_torture_boost");
+ boost_tasks[cpu] = kthread_run_on_cpu(rcu_torture_boost, NULL,
+ cpu, "rcu_torture_boost_%u");
if (IS_ERR(boost_tasks[cpu])) {
retval = PTR_ERR(boost_tasks[cpu]);
VERBOSE_TOROUT_STRING("rcu_torture_boost task create failed");
@@ -2042,8 +2041,6 @@ static int rcutorture_booster_init(unsigned int cpu)
mutex_unlock(&boost_mutex);
return retval;
}
- kthread_bind(boost_tasks[cpu], cpu);
- wake_up_process(boost_tasks[cpu]);
mutex_unlock(&boost_mutex);
return 0;
}
diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h
index 84f1d91604cc..d64f0b1d8cd3 100644
--- a/kernel/rcu/tasks.h
+++ b/kernel/rcu/tasks.h
@@ -123,7 +123,7 @@ static struct rcu_tasks rt_name = \
.call_func = call, \
.rtpcpu = &rt_name ## __percpu, \
.name = n, \
- .percpu_enqueue_shift = ilog2(CONFIG_NR_CPUS), \
+ .percpu_enqueue_shift = ilog2(CONFIG_NR_CPUS) + 1, \
.percpu_enqueue_lim = 1, \
.percpu_dequeue_lim = 1, \
.barrier_q_mutex = __MUTEX_INITIALIZER(rt_name.barrier_q_mutex), \
@@ -216,6 +216,7 @@ static void cblist_init_generic(struct rcu_tasks *rtp)
int cpu;
unsigned long flags;
int lim;
+ int shift;
raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
if (rcu_task_enqueue_lim < 0) {
@@ -229,7 +230,10 @@ static void cblist_init_generic(struct rcu_tasks *rtp)
if (lim > nr_cpu_ids)
lim = nr_cpu_ids;
- WRITE_ONCE(rtp->percpu_enqueue_shift, ilog2(nr_cpu_ids / lim));
+ shift = ilog2(nr_cpu_ids / lim);
+ if (((nr_cpu_ids - 1) >> shift) >= lim)
+ shift++;
+ WRITE_ONCE(rtp->percpu_enqueue_shift, shift);
WRITE_ONCE(rtp->percpu_dequeue_lim, lim);
smp_store_release(&rtp->percpu_enqueue_lim, lim);
for_each_possible_cpu(cpu) {
@@ -298,7 +302,7 @@ static void call_rcu_tasks_generic(struct rcu_head *rhp, rcu_callback_t func,
if (unlikely(needadjust)) {
raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
if (rtp->percpu_enqueue_lim != nr_cpu_ids) {
- WRITE_ONCE(rtp->percpu_enqueue_shift, ilog2(nr_cpu_ids));
+ WRITE_ONCE(rtp->percpu_enqueue_shift, ilog2(nr_cpu_ids) + 1);
WRITE_ONCE(rtp->percpu_dequeue_lim, nr_cpu_ids);
smp_store_release(&rtp->percpu_enqueue_lim, nr_cpu_ids);
pr_info("Switching %s to per-CPU callback queuing.\n", rtp->name);
@@ -413,7 +417,7 @@ static int rcu_tasks_need_gpcb(struct rcu_tasks *rtp)
if (rcu_task_cb_adjust && ncbs <= rcu_task_collapse_lim) {
raw_spin_lock_irqsave(&rtp->cbs_gbl_lock, flags);
if (rtp->percpu_enqueue_lim > 1) {
- WRITE_ONCE(rtp->percpu_enqueue_shift, ilog2(nr_cpu_ids));
+ WRITE_ONCE(rtp->percpu_enqueue_shift, ilog2(nr_cpu_ids) + 1);
smp_store_release(&rtp->percpu_enqueue_lim, 1);
rtp->percpu_dequeue_gpseq = get_state_synchronize_rcu();
pr_info("Starting switch %s to CPU-0 callback queuing.\n", rtp->name);
diff --git a/kernel/resource.c b/kernel/resource.c
index 5ad3eba619ba..9c08d6e9eef2 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -99,7 +99,7 @@ enum { MAX_IORES_LEVEL = 5 };
static void *r_start(struct seq_file *m, loff_t *pos)
__acquires(resource_lock)
{
- struct resource *p = PDE_DATA(file_inode(m->file));
+ struct resource *p = pde_data(file_inode(m->file));
loff_t l = 0;
read_lock(&resource_lock);
for (p = p->child; p && l < *pos; p = r_next(m, p, &l))
@@ -115,7 +115,7 @@ static void r_stop(struct seq_file *m, void *v)
static int r_show(struct seq_file *m, void *v)
{
- struct resource *root = PDE_DATA(file_inode(m->file));
+ struct resource *root = pde_data(file_inode(m->file));
struct resource *r = v, *p;
unsigned long long start, end;
int width = root->end < 0x10000 ? 4 : 8;
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 83872f95a1ea..9745613d531c 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4424,6 +4424,7 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
init_entity_runnable_average(&p->se);
+
#ifdef CONFIG_SCHED_INFO
if (likely(sched_info_on()))
memset(&p->sched_info, 0, sizeof(p->sched_info));
@@ -4439,18 +4440,23 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
return 0;
}
-void sched_post_fork(struct task_struct *p, struct kernel_clone_args *kargs)
+void sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs)
{
unsigned long flags;
-#ifdef CONFIG_CGROUP_SCHED
- struct task_group *tg;
-#endif
+ /*
+ * Because we're not yet on the pid-hash, p->pi_lock isn't strictly
+ * required yet, but lockdep gets upset if rules are violated.
+ */
raw_spin_lock_irqsave(&p->pi_lock, flags);
#ifdef CONFIG_CGROUP_SCHED
- tg = container_of(kargs->cset->subsys[cpu_cgrp_id],
- struct task_group, css);
- p->sched_task_group = autogroup_task_group(p, tg);
+ if (1) {
+ struct task_group *tg;
+ tg = container_of(kargs->cset->subsys[cpu_cgrp_id],
+ struct task_group, css);
+ tg = autogroup_task_group(p, tg);
+ p->sched_task_group = tg;
+ }
#endif
rseq_migrate(p);
/*
@@ -4461,7 +4467,10 @@ void sched_post_fork(struct task_struct *p, struct kernel_clone_args *kargs)
if (p->sched_class->task_fork)
p->sched_class->task_fork(p);
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
+}
+void sched_post_fork(struct task_struct *p)
+{
uclamp_post_fork(p);
}
@@ -5822,8 +5831,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
}
if (schedstat_enabled() && rq->core->core_forceidle_count) {
- if (cookie)
- rq->core->core_forceidle_start = rq_clock(rq->core);
+ rq->core->core_forceidle_start = rq_clock(rq->core);
rq->core->core_forceidle_occupation = occ;
}
@@ -8219,9 +8227,7 @@ int __cond_resched_lock(spinlock_t *lock)
if (spin_needbreak(lock) || resched) {
spin_unlock(lock);
- if (resched)
- preempt_schedule_common();
- else
+ if (!_cond_resched())
cpu_relax();
ret = 1;
spin_lock(lock);
@@ -8239,9 +8245,7 @@ int __cond_resched_rwlock_read(rwlock_t *lock)
if (rwlock_needbreak(lock) || resched) {
read_unlock(lock);
- if (resched)
- preempt_schedule_common();
- else
+ if (!_cond_resched())
cpu_relax();
ret = 1;
read_lock(lock);
@@ -8259,9 +8263,7 @@ int __cond_resched_rwlock_write(rwlock_t *lock)
if (rwlock_needbreak(lock) || resched) {
write_unlock(lock);
- if (resched)
- preempt_schedule_common();
- else
+ if (!_cond_resched())
cpu_relax();
ret = 1;
write_lock(lock);
@@ -8642,14 +8644,6 @@ void __init init_idle(struct task_struct *idle, int cpu)
__sched_fork(0, idle);
- /*
- * The idle task doesn't need the kthread struct to function, but it
- * is dressed up as a per-CPU kthread and thus needs to play the part
- * if we want to avoid special-casing it in code that deals with per-CPU
- * kthreads.
- */
- set_kthread_struct(idle);
-
raw_spin_lock_irqsave(&idle->pi_lock, flags);
raw_spin_rq_lock(rq);
@@ -9469,6 +9463,14 @@ void __init sched_init(void)
enter_lazy_tlb(&init_mm, current);
/*
+ * The idle task doesn't need the kthread struct to function, but it
+ * is dressed up as a per-CPU kthread and thus needs to play the part
+ * if we want to avoid special-casing it in code that deals with per-CPU
+ * kthreads.
+ */
+ WARN_ON(!set_kthread_struct(current));
+
+ /*
* Make us the idle thread. Technically, schedule() should not be
* called from this thread, however somewhere below it might be,
* but because we are the idle thread, we just pick up running again
diff --git a/kernel/sched/core_sched.c b/kernel/sched/core_sched.c
index 1fb45672ec85..c8746a9a7ada 100644
--- a/kernel/sched/core_sched.c
+++ b/kernel/sched/core_sched.c
@@ -277,7 +277,7 @@ void __sched_core_account_forceidle(struct rq *rq)
rq_i = cpu_rq(i);
p = rq_i->core_pick ?: rq_i->curr;
- if (!p->core_cookie)
+ if (p == rq_i->idle)
continue;
__schedstat_add(p->stats.core_forceidle_sum, delta);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 095b0aa378df..5146163bfabb 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3028,9 +3028,11 @@ enqueue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
static inline void
dequeue_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
- u32 divider = get_pelt_divider(&se->avg);
sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg);
- cfs_rq->avg.load_sum = cfs_rq->avg.load_avg * divider;
+ sub_positive(&cfs_rq->avg.load_sum, se_weight(se) * se->avg.load_sum);
+ /* See update_cfs_rq_load_avg() */
+ cfs_rq->avg.load_sum = max_t(u32, cfs_rq->avg.load_sum,
+ cfs_rq->avg.load_avg * PELT_MIN_DIVIDER);
}
#else
static inline void
@@ -3381,7 +3383,6 @@ void set_task_rq_fair(struct sched_entity *se,
se->avg.last_update_time = n_last_update_time;
}
-
/*
* When on migration a sched_entity joins/leaves the PELT hierarchy, we need to
* propagate its contribution. The key to this propagation is the invariant
@@ -3449,15 +3450,14 @@ void set_task_rq_fair(struct sched_entity *se,
* XXX: only do this for the part of runnable > running ?
*
*/
-
static inline void
update_tg_cfs_util(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq)
{
- long delta = gcfs_rq->avg.util_avg - se->avg.util_avg;
- u32 divider;
+ long delta_sum, delta_avg = gcfs_rq->avg.util_avg - se->avg.util_avg;
+ u32 new_sum, divider;
/* Nothing to update */
- if (!delta)
+ if (!delta_avg)
return;
/*
@@ -3466,23 +3466,30 @@ update_tg_cfs_util(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq
*/
divider = get_pelt_divider(&cfs_rq->avg);
+
/* Set new sched_entity's utilization */
se->avg.util_avg = gcfs_rq->avg.util_avg;
- se->avg.util_sum = se->avg.util_avg * divider;
+ new_sum = se->avg.util_avg * divider;
+ delta_sum = (long)new_sum - (long)se->avg.util_sum;
+ se->avg.util_sum = new_sum;
/* Update parent cfs_rq utilization */
- add_positive(&cfs_rq->avg.util_avg, delta);
- cfs_rq->avg.util_sum = cfs_rq->avg.util_avg * divider;
+ add_positive(&cfs_rq->avg.util_avg, delta_avg);
+ add_positive(&cfs_rq->avg.util_sum, delta_sum);
+
+ /* See update_cfs_rq_load_avg() */
+ cfs_rq->avg.util_sum = max_t(u32, cfs_rq->avg.util_sum,
+ cfs_rq->avg.util_avg * PELT_MIN_DIVIDER);
}
static inline void
update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq)
{
- long delta = gcfs_rq->avg.runnable_avg - se->avg.runnable_avg;
- u32 divider;
+ long delta_sum, delta_avg = gcfs_rq->avg.runnable_avg - se->avg.runnable_avg;
+ u32 new_sum, divider;
/* Nothing to update */
- if (!delta)
+ if (!delta_avg)
return;
/*
@@ -3493,19 +3500,25 @@ update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cf
/* Set new sched_entity's runnable */
se->avg.runnable_avg = gcfs_rq->avg.runnable_avg;
- se->avg.runnable_sum = se->avg.runnable_avg * divider;
+ new_sum = se->avg.runnable_avg * divider;
+ delta_sum = (long)new_sum - (long)se->avg.runnable_sum;
+ se->avg.runnable_sum = new_sum;
/* Update parent cfs_rq runnable */
- add_positive(&cfs_rq->avg.runnable_avg, delta);
- cfs_rq->avg.runnable_sum = cfs_rq->avg.runnable_avg * divider;
+ add_positive(&cfs_rq->avg.runnable_avg, delta_avg);
+ add_positive(&cfs_rq->avg.runnable_sum, delta_sum);
+ /* See update_cfs_rq_load_avg() */
+ cfs_rq->avg.runnable_sum = max_t(u32, cfs_rq->avg.runnable_sum,
+ cfs_rq->avg.runnable_avg * PELT_MIN_DIVIDER);
}
static inline void
update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq)
{
- long delta, running_sum, runnable_sum = gcfs_rq->prop_runnable_sum;
+ long delta_avg, running_sum, runnable_sum = gcfs_rq->prop_runnable_sum;
unsigned long load_avg;
u64 load_sum = 0;
+ s64 delta_sum;
u32 divider;
if (!runnable_sum)
@@ -3532,7 +3545,7 @@ update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq
* assuming all tasks are equally runnable.
*/
if (scale_load_down(gcfs_rq->load.weight)) {
- load_sum = div_s64(gcfs_rq->avg.load_sum,
+ load_sum = div_u64(gcfs_rq->avg.load_sum,
scale_load_down(gcfs_rq->load.weight));
}
@@ -3549,19 +3562,22 @@ update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq
running_sum = se->avg.util_sum >> SCHED_CAPACITY_SHIFT;
runnable_sum = max(runnable_sum, running_sum);
- load_sum = (s64)se_weight(se) * runnable_sum;
- load_avg = div_s64(load_sum, divider);
-
- se->avg.load_sum = runnable_sum;
+ load_sum = se_weight(se) * runnable_sum;
+ load_avg = div_u64(load_sum, divider);
- delta = load_avg - se->avg.load_avg;
- if (!delta)
+ delta_avg = load_avg - se->avg.load_avg;
+ if (!delta_avg)
return;
- se->avg.load_avg = load_avg;
+ delta_sum = load_sum - (s64)se_weight(se) * se->avg.load_sum;
- add_positive(&cfs_rq->avg.load_avg, delta);
- cfs_rq->avg.load_sum = cfs_rq->avg.load_avg * divider;
+ se->avg.load_sum = runnable_sum;
+ se->avg.load_avg = load_avg;
+ add_positive(&cfs_rq->avg.load_avg, delta_avg);
+ add_positive(&cfs_rq->avg.load_sum, delta_sum);
+ /* See update_cfs_rq_load_avg() */
+ cfs_rq->avg.load_sum = max_t(u32, cfs_rq->avg.load_sum,
+ cfs_rq->avg.load_avg * PELT_MIN_DIVIDER);
}
static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum)
@@ -3652,7 +3668,7 @@ static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum
*
* cfs_rq->avg is used for task_h_load() and update_cfs_share() for example.
*
- * Returns true if the load decayed or we removed load.
+ * Return: true if the load decayed or we removed load.
*
* Since both these conditions indicate a changed cfs_rq->avg.load we should
* call update_tg_load_avg() when this function returns true.
@@ -3677,15 +3693,32 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
r = removed_load;
sub_positive(&sa->load_avg, r);
- sa->load_sum = sa->load_avg * divider;
+ sub_positive(&sa->load_sum, r * divider);
+ /* See sa->util_sum below */
+ sa->load_sum = max_t(u32, sa->load_sum, sa->load_avg * PELT_MIN_DIVIDER);
r = removed_util;
sub_positive(&sa->util_avg, r);
- sa->util_sum = sa->util_avg * divider;
+ sub_positive(&sa->util_sum, r * divider);
+ /*
+ * Because of rounding, se->util_sum might ends up being +1 more than
+ * cfs->util_sum. Although this is not a problem by itself, detaching
+ * a lot of tasks with the rounding problem between 2 updates of
+ * util_avg (~1ms) can make cfs->util_sum becoming null whereas
+ * cfs_util_avg is not.
+ * Check that util_sum is still above its lower bound for the new
+ * util_avg. Given that period_contrib might have moved since the last
+ * sync, we are only sure that util_sum must be above or equal to
+ * util_avg * minimum possible divider
+ */
+ sa->util_sum = max_t(u32, sa->util_sum, sa->util_avg * PELT_MIN_DIVIDER);
r = removed_runnable;
sub_positive(&sa->runnable_avg, r);
- sa->runnable_sum = sa->runnable_avg * divider;
+ sub_positive(&sa->runnable_sum, r * divider);
+ /* See sa->util_sum above */
+ sa->runnable_sum = max_t(u32, sa->runnable_sum,
+ sa->runnable_avg * PELT_MIN_DIVIDER);
/*
* removed_runnable is the unweighted version of removed_load so we
@@ -3772,17 +3805,18 @@ static void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
*/
static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
- /*
- * cfs_rq->avg.period_contrib can be used for both cfs_rq and se.
- * See ___update_load_avg() for details.
- */
- u32 divider = get_pelt_divider(&cfs_rq->avg);
-
dequeue_load_avg(cfs_rq, se);
sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg);
- cfs_rq->avg.util_sum = cfs_rq->avg.util_avg * divider;
+ sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum);
+ /* See update_cfs_rq_load_avg() */
+ cfs_rq->avg.util_sum = max_t(u32, cfs_rq->avg.util_sum,
+ cfs_rq->avg.util_avg * PELT_MIN_DIVIDER);
+
sub_positive(&cfs_rq->avg.runnable_avg, se->avg.runnable_avg);
- cfs_rq->avg.runnable_sum = cfs_rq->avg.runnable_avg * divider;
+ sub_positive(&cfs_rq->avg.runnable_sum, se->avg.runnable_sum);
+ /* See update_cfs_rq_load_avg() */
+ cfs_rq->avg.runnable_sum = max_t(u32, cfs_rq->avg.runnable_sum,
+ cfs_rq->avg.runnable_avg * PELT_MIN_DIVIDER);
add_tg_cfs_propagate(cfs_rq, -se->avg.load_sum);
@@ -8539,6 +8573,8 @@ group_type group_classify(unsigned int imbalance_pct,
*
* If @sg does not have SMT siblings, only pull tasks if all of the SMT siblings
* of @dst_cpu are idle and @sg has lower priority.
+ *
+ * Return: true if @dst_cpu can pull tasks, false otherwise.
*/
static bool asym_smt_can_pull_tasks(int dst_cpu, struct sd_lb_stats *sds,
struct sg_lb_stats *sgs,
@@ -8614,6 +8650,7 @@ sched_asym(struct lb_env *env, struct sd_lb_stats *sds, struct sg_lb_stats *sgs
/**
* update_sg_lb_stats - Update sched_group's statistics for load balancing.
* @env: The load balancing environment.
+ * @sds: Load-balancing data with statistics of the local group.
* @group: sched_group whose statistics are to be updated.
* @sgs: variable to hold the statistics for this group.
* @sg_status: Holds flag indicating the status of the sched_group
@@ -9421,12 +9458,11 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
/**
* find_busiest_group - Returns the busiest group within the sched_domain
* if there is an imbalance.
+ * @env: The load balancing environment.
*
* Also calculates the amount of runnable load which should be moved
* to restore balance.
*
- * @env: The load balancing environment.
- *
* Return: - The busiest group if imbalance exists.
*/
static struct sched_group *find_busiest_group(struct lb_env *env)
diff --git a/kernel/sched/membarrier.c b/kernel/sched/membarrier.c
index b5add64d9698..3d2825408e3a 100644
--- a/kernel/sched/membarrier.c
+++ b/kernel/sched/membarrier.c
@@ -147,11 +147,11 @@
#endif
#ifdef CONFIG_RSEQ
-#define MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ_BITMASK \
+#define MEMBARRIER_PRIVATE_EXPEDITED_RSEQ_BITMASK \
(MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ \
- | MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_RSEQ_BITMASK)
+ | MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_RSEQ)
#else
-#define MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ_BITMASK 0
+#define MEMBARRIER_PRIVATE_EXPEDITED_RSEQ_BITMASK 0
#endif
#define MEMBARRIER_CMD_BITMASK \
@@ -159,7 +159,8 @@
| MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED \
| MEMBARRIER_CMD_PRIVATE_EXPEDITED \
| MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED \
- | MEMBARRIER_PRIVATE_EXPEDITED_SYNC_CORE_BITMASK)
+ | MEMBARRIER_PRIVATE_EXPEDITED_SYNC_CORE_BITMASK \
+ | MEMBARRIER_PRIVATE_EXPEDITED_RSEQ_BITMASK)
static void ipi_mb(void *info)
{
diff --git a/kernel/sched/pelt.h b/kernel/sched/pelt.h
index e06071bf3472..c336f5f481bc 100644
--- a/kernel/sched/pelt.h
+++ b/kernel/sched/pelt.h
@@ -37,9 +37,11 @@ update_irq_load_avg(struct rq *rq, u64 running)
}
#endif
+#define PELT_MIN_DIVIDER (LOAD_AVG_MAX - 1024)
+
static inline u32 get_pelt_divider(struct sched_avg *avg)
{
- return LOAD_AVG_MAX - 1024 + avg->period_contrib;
+ return PELT_MIN_DIVIDER + avg->period_contrib;
}
static inline void cfs_se_util_change(struct sched_avg *avg)
diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c
index a679613a7cb7..e14358178849 100644
--- a/kernel/sched/psi.c
+++ b/kernel/sched/psi.c
@@ -1082,44 +1082,6 @@ int psi_show(struct seq_file *m, struct psi_group *group, enum psi_res res)
return 0;
}
-static int psi_io_show(struct seq_file *m, void *v)
-{
- return psi_show(m, &psi_system, PSI_IO);
-}
-
-static int psi_memory_show(struct seq_file *m, void *v)
-{
- return psi_show(m, &psi_system, PSI_MEM);
-}
-
-static int psi_cpu_show(struct seq_file *m, void *v)
-{
- return psi_show(m, &psi_system, PSI_CPU);
-}
-
-static int psi_open(struct file *file, int (*psi_show)(struct seq_file *, void *))
-{
- if (file->f_mode & FMODE_WRITE && !capable(CAP_SYS_RESOURCE))
- return -EPERM;
-
- return single_open(file, psi_show, NULL);
-}
-
-static int psi_io_open(struct inode *inode, struct file *file)
-{
- return psi_open(file, psi_io_show);
-}
-
-static int psi_memory_open(struct inode *inode, struct file *file)
-{
- return psi_open(file, psi_memory_show);
-}
-
-static int psi_cpu_open(struct inode *inode, struct file *file)
-{
- return psi_open(file, psi_cpu_show);
-}
-
struct psi_trigger *psi_trigger_create(struct psi_group *group,
char *buf, size_t nbytes, enum psi_res res)
{
@@ -1162,7 +1124,6 @@ struct psi_trigger *psi_trigger_create(struct psi_group *group,
t->event = 0;
t->last_event_time = 0;
init_waitqueue_head(&t->event_wait);
- kref_init(&t->refcount);
mutex_lock(&group->trigger_lock);
@@ -1191,15 +1152,19 @@ struct psi_trigger *psi_trigger_create(struct psi_group *group,
return t;
}
-static void psi_trigger_destroy(struct kref *ref)
+void psi_trigger_destroy(struct psi_trigger *t)
{
- struct psi_trigger *t = container_of(ref, struct psi_trigger, refcount);
- struct psi_group *group = t->group;
+ struct psi_group *group;
struct task_struct *task_to_destroy = NULL;
- if (static_branch_likely(&psi_disabled))
+ /*
+ * We do not check psi_disabled since it might have been disabled after
+ * the trigger got created.
+ */
+ if (!t)
return;
+ group = t->group;
/*
* Wakeup waiters to stop polling. Can happen if cgroup is deleted
* from under a polling process.
@@ -1235,9 +1200,9 @@ static void psi_trigger_destroy(struct kref *ref)
mutex_unlock(&group->trigger_lock);
/*
- * Wait for both *trigger_ptr from psi_trigger_replace and
- * poll_task RCUs to complete their read-side critical sections
- * before destroying the trigger and optionally the poll_task
+ * Wait for psi_schedule_poll_work RCU to complete its read-side
+ * critical section before destroying the trigger and optionally the
+ * poll_task.
*/
synchronize_rcu();
/*
@@ -1254,18 +1219,6 @@ static void psi_trigger_destroy(struct kref *ref)
kfree(t);
}
-void psi_trigger_replace(void **trigger_ptr, struct psi_trigger *new)
-{
- struct psi_trigger *old = *trigger_ptr;
-
- if (static_branch_likely(&psi_disabled))
- return;
-
- rcu_assign_pointer(*trigger_ptr, new);
- if (old)
- kref_put(&old->refcount, psi_trigger_destroy);
-}
-
__poll_t psi_trigger_poll(void **trigger_ptr,
struct file *file, poll_table *wait)
{
@@ -1275,27 +1228,57 @@ __poll_t psi_trigger_poll(void **trigger_ptr,
if (static_branch_likely(&psi_disabled))
return DEFAULT_POLLMASK | EPOLLERR | EPOLLPRI;
- rcu_read_lock();
-
- t = rcu_dereference(*(void __rcu __force **)trigger_ptr);
- if (!t) {
- rcu_read_unlock();
+ t = smp_load_acquire(trigger_ptr);
+ if (!t)
return DEFAULT_POLLMASK | EPOLLERR | EPOLLPRI;
- }
- kref_get(&t->refcount);
-
- rcu_read_unlock();
poll_wait(file, &t->event_wait, wait);
if (cmpxchg(&t->event, 1, 0) == 1)
ret |= EPOLLPRI;
- kref_put(&t->refcount, psi_trigger_destroy);
-
return ret;
}
+#ifdef CONFIG_PROC_FS
+static int psi_io_show(struct seq_file *m, void *v)
+{
+ return psi_show(m, &psi_system, PSI_IO);
+}
+
+static int psi_memory_show(struct seq_file *m, void *v)
+{
+ return psi_show(m, &psi_system, PSI_MEM);
+}
+
+static int psi_cpu_show(struct seq_file *m, void *v)
+{
+ return psi_show(m, &psi_system, PSI_CPU);
+}
+
+static int psi_open(struct file *file, int (*psi_show)(struct seq_file *, void *))
+{
+ if (file->f_mode & FMODE_WRITE && !capable(CAP_SYS_RESOURCE))
+ return -EPERM;
+
+ return single_open(file, psi_show, NULL);
+}
+
+static int psi_io_open(struct inode *inode, struct file *file)
+{
+ return psi_open(file, psi_io_show);
+}
+
+static int psi_memory_open(struct inode *inode, struct file *file)
+{
+ return psi_open(file, psi_memory_show);
+}
+
+static int psi_cpu_open(struct inode *inode, struct file *file)
+{
+ return psi_open(file, psi_cpu_show);
+}
+
static ssize_t psi_write(struct file *file, const char __user *user_buf,
size_t nbytes, enum psi_res res)
{
@@ -1316,14 +1299,24 @@ static ssize_t psi_write(struct file *file, const char __user *user_buf,
buf[buf_size - 1] = '\0';
- new = psi_trigger_create(&psi_system, buf, nbytes, res);
- if (IS_ERR(new))
- return PTR_ERR(new);
-
seq = file->private_data;
+
/* Take seq->lock to protect seq->private from concurrent writes */
mutex_lock(&seq->lock);
- psi_trigger_replace(&seq->private, new);
+
+ /* Allow only one trigger per file descriptor */
+ if (seq->private) {
+ mutex_unlock(&seq->lock);
+ return -EBUSY;
+ }
+
+ new = psi_trigger_create(&psi_system, buf, nbytes, res);
+ if (IS_ERR(new)) {
+ mutex_unlock(&seq->lock);
+ return PTR_ERR(new);
+ }
+
+ smp_store_release(&seq->private, new);
mutex_unlock(&seq->lock);
return nbytes;
@@ -1358,7 +1351,7 @@ static int psi_fop_release(struct inode *inode, struct file *file)
{
struct seq_file *seq = file->private_data;
- psi_trigger_replace(&seq->private, NULL);
+ psi_trigger_destroy(seq->private);
return single_release(inode, file);
}
@@ -1400,3 +1393,5 @@ static int __init psi_proc_init(void)
return 0;
}
module_init(psi_proc_init);
+
+#endif /* CONFIG_PROC_FS */
diff --git a/kernel/seccomp.c b/kernel/seccomp.c
index 4d8f44a17727..db10e73d06e0 100644
--- a/kernel/seccomp.c
+++ b/kernel/seccomp.c
@@ -29,6 +29,9 @@
#include <linux/syscalls.h>
#include <linux/sysctl.h>
+/* Not exposed in headers: strictly internal use only. */
+#define SECCOMP_MODE_DEAD (SECCOMP_MODE_FILTER + 1)
+
#ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
#include <asm/syscall.h>
#endif
@@ -1010,6 +1013,7 @@ static void __secure_computing_strict(int this_syscall)
#ifdef SECCOMP_DEBUG
dump_stack();
#endif
+ current->seccomp.mode = SECCOMP_MODE_DEAD;
seccomp_log(this_syscall, SIGKILL, SECCOMP_RET_KILL_THREAD, true);
do_exit(SIGKILL);
}
@@ -1261,6 +1265,7 @@ static int __seccomp_filter(int this_syscall, const struct seccomp_data *sd,
case SECCOMP_RET_KILL_THREAD:
case SECCOMP_RET_KILL_PROCESS:
default:
+ current->seccomp.mode = SECCOMP_MODE_DEAD;
seccomp_log(this_syscall, SIGSYS, action, true);
/* Dump core only if this is the last remaining thread. */
if (action != SECCOMP_RET_KILL_THREAD ||
@@ -1309,6 +1314,11 @@ int __secure_computing(const struct seccomp_data *sd)
return 0;
case SECCOMP_MODE_FILTER:
return __seccomp_filter(this_syscall, sd, false);
+ /* Surviving SECCOMP_RET_KILL_* must be proactively impossible. */
+ case SECCOMP_MODE_DEAD:
+ WARN_ON_ONCE(1);
+ do_exit(SIGKILL);
+ return -1;
default:
BUG();
}
diff --git a/kernel/signal.c b/kernel/signal.c
index dfcee3888b00..9b04631acde8 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -626,7 +626,8 @@ static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
*
* All callers have to hold the siglock.
*/
-int dequeue_signal(struct task_struct *tsk, sigset_t *mask, kernel_siginfo_t *info)
+int dequeue_signal(struct task_struct *tsk, sigset_t *mask,
+ kernel_siginfo_t *info, enum pid_type *type)
{
bool resched_timer = false;
int signr;
@@ -634,8 +635,10 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, kernel_siginfo_t *in
/* We only dequeue private signals from ourselves, we don't let
* signalfd steal them
*/
+ *type = PIDTYPE_PID;
signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
if (!signr) {
+ *type = PIDTYPE_TGID;
signr = __dequeue_signal(&tsk->signal->shared_pending,
mask, info, &resched_timer);
#ifdef CONFIG_POSIX_TIMERS
@@ -903,8 +906,8 @@ static bool prepare_signal(int sig, struct task_struct *p, bool force)
struct task_struct *t;
sigset_t flush;
- if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
- if (!(signal->flags & SIGNAL_GROUP_EXIT))
+ if (signal->flags & SIGNAL_GROUP_EXIT) {
+ if (signal->core_state)
return sig == SIGKILL;
/*
* The process is in the middle of dying, nothing to do.
@@ -1029,7 +1032,7 @@ static void complete_signal(int sig, struct task_struct *p, enum pid_type type)
* then start taking the whole group down immediately.
*/
if (sig_fatal(p, sig) &&
- !(signal->flags & SIGNAL_GROUP_EXIT) &&
+ (signal->core_state || !(signal->flags & SIGNAL_GROUP_EXIT)) &&
!sigismember(&t->real_blocked, sig) &&
(sig == SIGKILL || !p->ptrace)) {
/*
@@ -1339,9 +1342,10 @@ force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t,
}
/*
* Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect
- * debugging to leave init killable.
+ * debugging to leave init killable. But HANDLER_EXIT is always fatal.
*/
- if (action->sa.sa_handler == SIG_DFL && !t->ptrace)
+ if (action->sa.sa_handler == SIG_DFL &&
+ (!t->ptrace || (handler == HANDLER_EXIT)))
t->signal->flags &= ~SIGNAL_UNKILLABLE;
ret = send_signal(sig, info, t, PIDTYPE_PID);
spin_unlock_irqrestore(&t->sighand->siglock, flags);
@@ -1820,6 +1824,7 @@ int force_sig_perf(void __user *addr, u32 type, u64 sig_data)
* force_sig_seccomp - signals the task to allow in-process syscall emulation
* @syscall: syscall number to send to userland
* @reason: filter-supplied reason code to send to userland (via si_errno)
+ * @force_coredump: true to trigger a coredump
*
* Forces a SIGSYS with a code of SYS_SECCOMP and related sigsys info.
*/
@@ -2383,7 +2388,8 @@ static bool do_signal_stop(int signr)
WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
- unlikely(signal_group_exit(sig)))
+ unlikely(sig->flags & SIGNAL_GROUP_EXIT) ||
+ unlikely(sig->group_exec_task))
return false;
/*
* There is no group stop already in progress. We must
@@ -2544,7 +2550,7 @@ static void do_freezer_trap(void)
freezable_schedule();
}
-static int ptrace_signal(int signr, kernel_siginfo_t *info)
+static int ptrace_signal(int signr, kernel_siginfo_t *info, enum pid_type type)
{
/*
* We do not check sig_kernel_stop(signr) but set this marker
@@ -2584,8 +2590,9 @@ static int ptrace_signal(int signr, kernel_siginfo_t *info)
}
/* If the (new) signal is now blocked, requeue it. */
- if (sigismember(&current->blocked, signr)) {
- send_signal(signr, info, current, PIDTYPE_PID);
+ if (sigismember(&current->blocked, signr) ||
+ fatal_signal_pending(current)) {
+ send_signal(signr, info, current, type);
signr = 0;
}
@@ -2684,18 +2691,20 @@ relock:
goto relock;
}
- /* Has this task already been marked for death? */
- if (signal_group_exit(signal)) {
- ksig->info.si_signo = signr = SIGKILL;
- sigdelset(&current->pending.signal, SIGKILL);
- trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO,
- &sighand->action[SIGKILL - 1]);
- recalc_sigpending();
- goto fatal;
- }
-
for (;;) {
struct k_sigaction *ka;
+ enum pid_type type;
+
+ /* Has this task already been marked for death? */
+ if ((signal->flags & SIGNAL_GROUP_EXIT) ||
+ signal->group_exec_task) {
+ ksig->info.si_signo = signr = SIGKILL;
+ sigdelset(&current->pending.signal, SIGKILL);
+ trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO,
+ &sighand->action[SIGKILL - 1]);
+ recalc_sigpending();
+ goto fatal;
+ }
if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
do_signal_stop(0))
@@ -2728,16 +2737,18 @@ relock:
* so that the instruction pointer in the signal stack
* frame points to the faulting instruction.
*/
+ type = PIDTYPE_PID;
signr = dequeue_synchronous_signal(&ksig->info);
if (!signr)
- signr = dequeue_signal(current, &current->blocked, &ksig->info);
+ signr = dequeue_signal(current, &current->blocked,
+ &ksig->info, &type);
if (!signr)
break; /* will return 0 */
if (unlikely(current->ptrace) && (signr != SIGKILL) &&
!(sighand->action[signr -1].sa.sa_flags & SA_IMMUTABLE)) {
- signr = ptrace_signal(signr, &ksig->info);
+ signr = ptrace_signal(signr, &ksig->info, type);
if (!signr)
continue;
}
@@ -2863,13 +2874,13 @@ out:
}
/**
- * signal_delivered -
+ * signal_delivered - called after signal delivery to update blocked signals
* @ksig: kernel signal struct
* @stepping: nonzero if debugger single-step or block-step in use
*
* This function should be called when a signal has successfully been
* delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
- * is always blocked, and the signal itself is blocked unless %SA_NODEFER
+ * is always blocked), and the signal itself is blocked unless %SA_NODEFER
* is set in @ksig->ka.sa.sa_flags. Tracing is notified.
*/
static void signal_delivered(struct ksignal *ksig, int stepping)
@@ -2942,7 +2953,7 @@ void exit_signals(struct task_struct *tsk)
*/
cgroup_threadgroup_change_begin(tsk);
- if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
+ if (thread_group_empty(tsk) || (tsk->signal->flags & SIGNAL_GROUP_EXIT)) {
tsk->flags |= PF_EXITING;
cgroup_threadgroup_change_end(tsk);
return;
@@ -3562,6 +3573,7 @@ static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info,
ktime_t *to = NULL, timeout = KTIME_MAX;
struct task_struct *tsk = current;
sigset_t mask = *which;
+ enum pid_type type;
int sig, ret = 0;
if (ts) {
@@ -3578,7 +3590,7 @@ static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info,
signotset(&mask);
spin_lock_irq(&tsk->sighand->siglock);
- sig = dequeue_signal(tsk, &mask, info);
+ sig = dequeue_signal(tsk, &mask, info, &type);
if (!sig && timeout) {
/*
* None ready, temporarily unblock those we're interested
@@ -3597,7 +3609,7 @@ static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info,
spin_lock_irq(&tsk->sighand->siglock);
__set_task_blocked(tsk, &tsk->real_blocked);
sigemptyset(&tsk->real_blocked);
- sig = dequeue_signal(tsk, &mask, info);
+ sig = dequeue_signal(tsk, &mask, info, &type);
}
spin_unlock_irq(&tsk->sighand->siglock);
diff --git a/kernel/stackleak.c b/kernel/stackleak.c
index ce161a8e8d97..ddb5a7f48d69 100644
--- a/kernel/stackleak.c
+++ b/kernel/stackleak.c
@@ -16,11 +16,13 @@
#ifdef CONFIG_STACKLEAK_RUNTIME_DISABLE
#include <linux/jump_label.h>
#include <linux/sysctl.h>
+#include <linux/init.h>
static DEFINE_STATIC_KEY_FALSE(stack_erasing_bypass);
-int stack_erasing_sysctl(struct ctl_table *table, int write,
- void *buffer, size_t *lenp, loff_t *ppos)
+#ifdef CONFIG_SYSCTL
+static int stack_erasing_sysctl(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
{
int ret = 0;
int state = !static_branch_unlikely(&stack_erasing_bypass);
@@ -42,13 +44,33 @@ int stack_erasing_sysctl(struct ctl_table *table, int write,
state ? "enabled" : "disabled");
return ret;
}
+static struct ctl_table stackleak_sysctls[] = {
+ {
+ .procname = "stack_erasing",
+ .data = NULL,
+ .maxlen = sizeof(int),
+ .mode = 0600,
+ .proc_handler = stack_erasing_sysctl,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE,
+ },
+ {}
+};
+
+static int __init stackleak_sysctls_init(void)
+{
+ register_sysctl_init("kernel", stackleak_sysctls);
+ return 0;
+}
+late_initcall(stackleak_sysctls_init);
+#endif /* CONFIG_SYSCTL */
#define skip_erasing() static_branch_unlikely(&stack_erasing_bypass)
#else
#define skip_erasing() false
#endif /* CONFIG_STACKLEAK_RUNTIME_DISABLE */
-asmlinkage void notrace stackleak_erase(void)
+asmlinkage void noinstr stackleak_erase(void)
{
/* It would be nice not to have 'kstack_ptr' and 'boundary' on stack */
unsigned long kstack_ptr = current->lowest_stack;
@@ -102,9 +124,8 @@ asmlinkage void notrace stackleak_erase(void)
/* Reset the 'lowest_stack' value for the next syscall */
current->lowest_stack = current_top_of_stack() - THREAD_SIZE/64;
}
-NOKPROBE_SYMBOL(stackleak_erase);
-void __used __no_caller_saved_registers notrace stackleak_track_stack(void)
+void __used __no_caller_saved_registers noinstr stackleak_track_stack(void)
{
unsigned long sp = current_stack_pointer;
diff --git a/kernel/sys.c b/kernel/sys.c
index 8fdac0d90504..97dc9e5d6bf9 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -220,7 +220,6 @@ SYSCALL_DEFINE3(setpriority, int, which, int, who, int, niceval)
niceval = MAX_NICE;
rcu_read_lock();
- read_lock(&tasklist_lock);
switch (which) {
case PRIO_PROCESS:
if (who)
@@ -235,9 +234,11 @@ SYSCALL_DEFINE3(setpriority, int, which, int, who, int, niceval)
pgrp = find_vpid(who);
else
pgrp = task_pgrp(current);
+ read_lock(&tasklist_lock);
do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
error = set_one_prio(p, niceval, error);
} while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
+ read_unlock(&tasklist_lock);
break;
case PRIO_USER:
uid = make_kuid(cred->user_ns, who);
@@ -249,16 +250,15 @@ SYSCALL_DEFINE3(setpriority, int, which, int, who, int, niceval)
if (!user)
goto out_unlock; /* No processes for this user */
}
- do_each_thread(g, p) {
+ for_each_process_thread(g, p) {
if (uid_eq(task_uid(p), uid) && task_pid_vnr(p))
error = set_one_prio(p, niceval, error);
- } while_each_thread(g, p);
+ }
if (!uid_eq(uid, cred->uid))
free_uid(user); /* For find_user() */
break;
}
out_unlock:
- read_unlock(&tasklist_lock);
rcu_read_unlock();
out:
return error;
@@ -283,7 +283,6 @@ SYSCALL_DEFINE2(getpriority, int, which, int, who)
return -EINVAL;
rcu_read_lock();
- read_lock(&tasklist_lock);
switch (which) {
case PRIO_PROCESS:
if (who)
@@ -301,11 +300,13 @@ SYSCALL_DEFINE2(getpriority, int, which, int, who)
pgrp = find_vpid(who);
else
pgrp = task_pgrp(current);
+ read_lock(&tasklist_lock);
do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
niceval = nice_to_rlimit(task_nice(p));
if (niceval > retval)
retval = niceval;
} while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
+ read_unlock(&tasklist_lock);
break;
case PRIO_USER:
uid = make_kuid(cred->user_ns, who);
@@ -317,19 +318,18 @@ SYSCALL_DEFINE2(getpriority, int, which, int, who)
if (!user)
goto out_unlock; /* No processes for this user */
}
- do_each_thread(g, p) {
+ for_each_process_thread(g, p) {
if (uid_eq(task_uid(p), uid) && task_pid_vnr(p)) {
niceval = nice_to_rlimit(task_nice(p));
if (niceval > retval)
retval = niceval;
}
- } while_each_thread(g, p);
+ }
if (!uid_eq(uid, cred->uid))
free_uid(user); /* for find_user() */
break;
}
out_unlock:
- read_unlock(&tasklist_lock);
rcu_read_unlock();
return retval;
@@ -472,6 +472,16 @@ static int set_user(struct cred *new)
if (!new_user)
return -EAGAIN;
+ free_uid(new->user);
+ new->user = new_user;
+ return 0;
+}
+
+static void flag_nproc_exceeded(struct cred *new)
+{
+ if (new->ucounts == current_ucounts())
+ return;
+
/*
* We don't fail in case of NPROC limit excess here because too many
* poorly written programs don't check set*uid() return code, assuming
@@ -480,15 +490,10 @@ static int set_user(struct cred *new)
* failure to the execve() stage.
*/
if (is_ucounts_overlimit(new->ucounts, UCOUNT_RLIMIT_NPROC, rlimit(RLIMIT_NPROC)) &&
- new_user != INIT_USER &&
- !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN))
+ new->user != INIT_USER)
current->flags |= PF_NPROC_EXCEEDED;
else
current->flags &= ~PF_NPROC_EXCEEDED;
-
- free_uid(new->user);
- new->user = new_user;
- return 0;
}
/*
@@ -563,6 +568,7 @@ long __sys_setreuid(uid_t ruid, uid_t euid)
if (retval < 0)
goto error;
+ flag_nproc_exceeded(new);
return commit_creds(new);
error:
@@ -625,6 +631,7 @@ long __sys_setuid(uid_t uid)
if (retval < 0)
goto error;
+ flag_nproc_exceeded(new);
return commit_creds(new);
error:
@@ -704,6 +711,7 @@ long __sys_setresuid(uid_t ruid, uid_t euid, uid_t suid)
if (retval < 0)
goto error;
+ flag_nproc_exceeded(new);
return commit_creds(new);
error:
@@ -2261,6 +2269,66 @@ int __weak arch_prctl_spec_ctrl_set(struct task_struct *t, unsigned long which,
#define PR_IO_FLUSHER (PF_MEMALLOC_NOIO | PF_LOCAL_THROTTLE)
+#ifdef CONFIG_ANON_VMA_NAME
+
+#define ANON_VMA_NAME_MAX_LEN 80
+#define ANON_VMA_NAME_INVALID_CHARS "\\`$[]"
+
+static inline bool is_valid_name_char(char ch)
+{
+ /* printable ascii characters, excluding ANON_VMA_NAME_INVALID_CHARS */
+ return ch > 0x1f && ch < 0x7f &&
+ !strchr(ANON_VMA_NAME_INVALID_CHARS, ch);
+}
+
+static int prctl_set_vma(unsigned long opt, unsigned long addr,
+ unsigned long size, unsigned long arg)
+{
+ struct mm_struct *mm = current->mm;
+ const char __user *uname;
+ char *name, *pch;
+ int error;
+
+ switch (opt) {
+ case PR_SET_VMA_ANON_NAME:
+ uname = (const char __user *)arg;
+ if (uname) {
+ name = strndup_user(uname, ANON_VMA_NAME_MAX_LEN);
+
+ if (IS_ERR(name))
+ return PTR_ERR(name);
+
+ for (pch = name; *pch != '\0'; pch++) {
+ if (!is_valid_name_char(*pch)) {
+ kfree(name);
+ return -EINVAL;
+ }
+ }
+ } else {
+ /* Reset the name */
+ name = NULL;
+ }
+
+ mmap_write_lock(mm);
+ error = madvise_set_anon_name(mm, addr, size, name);
+ mmap_write_unlock(mm);
+ kfree(name);
+ break;
+ default:
+ error = -EINVAL;
+ }
+
+ return error;
+}
+
+#else /* CONFIG_ANON_VMA_NAME */
+static int prctl_set_vma(unsigned long opt, unsigned long start,
+ unsigned long size, unsigned long arg)
+{
+ return -EINVAL;
+}
+#endif /* CONFIG_ANON_VMA_NAME */
+
SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
unsigned long, arg4, unsigned long, arg5)
{
@@ -2530,6 +2598,9 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
error = sched_core_share_pid(arg2, arg3, arg4, arg5);
break;
#endif
+ case PR_SET_VMA:
+ error = prctl_set_vma(arg2, arg3, arg4, arg5);
+ break;
default:
error = -EINVAL;
break;
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c
index d1944258cfc0..a492f159624f 100644
--- a/kernel/sys_ni.c
+++ b/kernel/sys_ni.c
@@ -297,6 +297,7 @@ COND_SYSCALL(get_mempolicy);
COND_SYSCALL(set_mempolicy);
COND_SYSCALL(migrate_pages);
COND_SYSCALL(move_pages);
+COND_SYSCALL(set_mempolicy_home_node);
COND_SYSCALL(perf_event_open);
COND_SYSCALL(accept4);
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index d7ed1dffa426..5ae443b2882e 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -20,7 +20,6 @@
*/
#include <linux/module.h>
-#include <linux/aio.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/slab.h>
@@ -50,7 +49,6 @@
#include <linux/times.h>
#include <linux/limits.h>
#include <linux/dcache.h>
-#include <linux/dnotify.h>
#include <linux/syscalls.h>
#include <linux/vmstat.h>
#include <linux/nfs_fs.h>
@@ -58,19 +56,15 @@
#include <linux/reboot.h>
#include <linux/ftrace.h>
#include <linux/perf_event.h>
-#include <linux/kprobes.h>
-#include <linux/pipe_fs_i.h>
#include <linux/oom.h>
#include <linux/kmod.h>
#include <linux/capability.h>
#include <linux/binfmts.h>
#include <linux/sched/sysctl.h>
-#include <linux/sched/coredump.h>
#include <linux/kexec.h>
#include <linux/bpf.h>
#include <linux/mount.h>
#include <linux/userfaultfd_k.h>
-#include <linux/coredump.h>
#include <linux/latencytop.h>
#include <linux/pid.h>
#include <linux/delayacct.h>
@@ -97,64 +91,21 @@
#if defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_LOCK_STAT)
#include <linux/lockdep.h>
#endif
-#ifdef CONFIG_CHR_DEV_SG
-#include <scsi/sg.h>
-#endif
-#ifdef CONFIG_STACKLEAK_RUNTIME_DISABLE
-#include <linux/stackleak.h>
-#endif
-#ifdef CONFIG_LOCKUP_DETECTOR
-#include <linux/nmi.h>
-#endif
#if defined(CONFIG_SYSCTL)
/* Constants used for minimum and maximum */
-#ifdef CONFIG_LOCKUP_DETECTOR
-static int sixty = 60;
-#endif
-
-static int __maybe_unused neg_one = -1;
-static int __maybe_unused two = 2;
-static int __maybe_unused four = 4;
-static unsigned long zero_ul;
-static unsigned long one_ul = 1;
-static unsigned long long_max = LONG_MAX;
-static int one_hundred = 100;
-static int two_hundred = 200;
-static int one_thousand = 1000;
-#ifdef CONFIG_PRINTK
-static int ten_thousand = 10000;
-#endif
+
#ifdef CONFIG_PERF_EVENTS
-static int six_hundred_forty_kb = 640 * 1024;
+static const int six_hundred_forty_kb = 640 * 1024;
#endif
/* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */
-static unsigned long dirty_bytes_min = 2 * PAGE_SIZE;
-
-/* this is needed for the proc_dointvec_minmax for [fs_]overflow UID and GID */
-static int maxolduid = 65535;
-static int minolduid;
+static const unsigned long dirty_bytes_min = 2 * PAGE_SIZE;
-static int ngroups_max = NGROUPS_MAX;
+static const int ngroups_max = NGROUPS_MAX;
static const int cap_last_cap = CAP_LAST_CAP;
-/*
- * This is needed for proc_doulongvec_minmax of sysctl_hung_task_timeout_secs
- * and hung_task_check_interval_secs
- */
-#ifdef CONFIG_DETECT_HUNG_TASK
-static unsigned long hung_task_timeout_max = (LONG_MAX/HZ);
-#endif
-
-#ifdef CONFIG_INOTIFY_USER
-#include <linux/inotify.h>
-#endif
-#ifdef CONFIG_FANOTIFY
-#include <linux/fanotify.h>
-#endif
-
#ifdef CONFIG_PROC_SYSCTL
/**
@@ -191,8 +142,8 @@ int sysctl_legacy_va_layout;
#endif
#ifdef CONFIG_COMPACTION
-static int min_extfrag_threshold;
-static int max_extfrag_threshold = 1000;
+/* min_extfrag_threshold is SYSCTL_ZERO */;
+static const int max_extfrag_threshold = 1000;
#endif
#endif /* CONFIG_SYSCTL */
@@ -803,12 +754,12 @@ static int __do_proc_douintvec(void *tbl_data, struct ctl_table *table,
return do_proc_douintvec_r(i, buffer, lenp, ppos, conv, data);
}
-static int do_proc_douintvec(struct ctl_table *table, int write,
- void *buffer, size_t *lenp, loff_t *ppos,
- int (*conv)(unsigned long *lvalp,
- unsigned int *valp,
- int write, void *data),
- void *data)
+int do_proc_douintvec(struct ctl_table *table, int write,
+ void *buffer, size_t *lenp, loff_t *ppos,
+ int (*conv)(unsigned long *lvalp,
+ unsigned int *valp,
+ int write, void *data),
+ void *data)
{
return __do_proc_douintvec(table->data, table, write,
buffer, lenp, ppos, conv, data);
@@ -937,17 +888,6 @@ static int proc_taint(struct ctl_table *table, int write,
return err;
}
-#ifdef CONFIG_PRINTK
-static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
- void *buffer, size_t *lenp, loff_t *ppos)
-{
- if (write && !capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
-}
-#endif
-
/**
* struct do_proc_dointvec_minmax_conv_param - proc_dointvec_minmax() range checking structure
* @min: pointer to minimum allowable value
@@ -1143,67 +1083,6 @@ int proc_dou8vec_minmax(struct ctl_table *table, int write,
}
EXPORT_SYMBOL_GPL(proc_dou8vec_minmax);
-static int do_proc_dopipe_max_size_conv(unsigned long *lvalp,
- unsigned int *valp,
- int write, void *data)
-{
- if (write) {
- unsigned int val;
-
- val = round_pipe_size(*lvalp);
- if (val == 0)
- return -EINVAL;
-
- *valp = val;
- } else {
- unsigned int val = *valp;
- *lvalp = (unsigned long) val;
- }
-
- return 0;
-}
-
-static int proc_dopipe_max_size(struct ctl_table *table, int write,
- void *buffer, size_t *lenp, loff_t *ppos)
-{
- return do_proc_douintvec(table, write, buffer, lenp, ppos,
- do_proc_dopipe_max_size_conv, NULL);
-}
-
-static void validate_coredump_safety(void)
-{
-#ifdef CONFIG_COREDUMP
- if (suid_dumpable == SUID_DUMP_ROOT &&
- core_pattern[0] != '/' && core_pattern[0] != '|') {
- printk(KERN_WARNING
-"Unsafe core_pattern used with fs.suid_dumpable=2.\n"
-"Pipe handler or fully qualified core dump path required.\n"
-"Set kernel.core_pattern before fs.suid_dumpable.\n"
- );
- }
-#endif
-}
-
-static int proc_dointvec_minmax_coredump(struct ctl_table *table, int write,
- void *buffer, size_t *lenp, loff_t *ppos)
-{
- int error = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
- if (!error)
- validate_coredump_safety();
- return error;
-}
-
-#ifdef CONFIG_COREDUMP
-static int proc_dostring_coredump(struct ctl_table *table, int write,
- void *buffer, size_t *lenp, loff_t *ppos)
-{
- int error = proc_dostring(table, write, buffer, lenp, ppos);
- if (!error)
- validate_coredump_safety();
- return error;
-}
-#endif
-
#ifdef CONFIG_MAGIC_SYSRQ
static int sysrq_sysctl_handler(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
@@ -1266,10 +1145,11 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table,
err = proc_get_long(&p, &left, &val, &neg,
proc_wspace_sep,
sizeof(proc_wspace_sep), NULL);
- if (err)
+ if (err || neg) {
+ err = -EINVAL;
break;
- if (neg)
- continue;
+ }
+
val = convmul * val / convdiv;
if ((min && val < *min) || (max && val > *max)) {
err = -EINVAL;
@@ -1927,29 +1807,6 @@ static struct ctl_table kern_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec,
},
-#ifdef CONFIG_COREDUMP
- {
- .procname = "core_uses_pid",
- .data = &core_uses_pid,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
- },
- {
- .procname = "core_pattern",
- .data = core_pattern,
- .maxlen = CORENAME_MAX_SIZE,
- .mode = 0644,
- .proc_handler = proc_dostring_coredump,
- },
- {
- .procname = "core_pipe_limit",
- .data = &core_pipe_limit,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
- },
-#endif
#ifdef CONFIG_PROC_SYSCTL
{
.procname = "tainted",
@@ -1963,7 +1820,7 @@ static struct ctl_table kern_table[] = {
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
- .extra1 = &neg_one,
+ .extra1 = SYSCTL_NEG_ONE,
.extra2 = SYSCTL_ONE,
},
#endif
@@ -2130,15 +1987,6 @@ static struct ctl_table kern_table[] = {
.proc_handler = proc_dostring,
},
#endif
-#ifdef CONFIG_CHR_DEV_SG
- {
- .procname = "sg-big-buff",
- .data = &sg_big_buff,
- .maxlen = sizeof (int),
- .mode = 0444,
- .proc_handler = proc_dointvec,
- },
-#endif
#ifdef CONFIG_BSD_PROCESS_ACCT
{
.procname = "acct",
@@ -2174,30 +2022,18 @@ static struct ctl_table kern_table[] = {
.proc_handler = sysctl_max_threads,
},
{
- .procname = "random",
- .mode = 0555,
- .child = random_table,
- },
- {
.procname = "usermodehelper",
.mode = 0555,
.child = usermodehelper_table,
},
-#ifdef CONFIG_FW_LOADER_USER_HELPER
- {
- .procname = "firmware_config",
- .mode = 0555,
- .child = firmware_config_table,
- },
-#endif
{
.procname = "overflowuid",
.data = &overflowuid,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
- .extra1 = &minolduid,
- .extra2 = &maxolduid,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_MAXOLDUID,
},
{
.procname = "overflowgid",
@@ -2205,8 +2041,8 @@ static struct ctl_table kern_table[] = {
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
- .extra1 = &minolduid,
- .extra2 = &maxolduid,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_MAXOLDUID,
},
#ifdef CONFIG_S390
{
@@ -2251,66 +2087,9 @@ static struct ctl_table kern_table[] = {
.mode = 0644,
.proc_handler = proc_doulongvec_minmax,
},
-#if defined CONFIG_PRINTK
- {
- .procname = "printk",
- .data = &console_loglevel,
- .maxlen = 4*sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
- },
- {
- .procname = "printk_ratelimit",
- .data = &printk_ratelimit_state.interval,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec_jiffies,
- },
- {
- .procname = "printk_ratelimit_burst",
- .data = &printk_ratelimit_state.burst,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
- },
- {
- .procname = "printk_delay",
- .data = &printk_delay_msec,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = SYSCTL_ZERO,
- .extra2 = &ten_thousand,
- },
- {
- .procname = "printk_devkmsg",
- .data = devkmsg_log_str,
- .maxlen = DEVKMSG_STR_MAX_SIZE,
- .mode = 0644,
- .proc_handler = devkmsg_sysctl_set_loglvl,
- },
- {
- .procname = "dmesg_restrict",
- .data = &dmesg_restrict,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec_minmax_sysadmin,
- .extra1 = SYSCTL_ZERO,
- .extra2 = SYSCTL_ONE,
- },
- {
- .procname = "kptr_restrict",
- .data = &kptr_restrict,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec_minmax_sysadmin,
- .extra1 = SYSCTL_ZERO,
- .extra2 = &two,
- },
-#endif
{
.procname = "ngroups_max",
- .data = &ngroups_max,
+ .data = (void *)&ngroups_max,
.maxlen = sizeof (int),
.mode = 0444,
.proc_handler = proc_dointvec,
@@ -2322,96 +2101,6 @@ static struct ctl_table kern_table[] = {
.mode = 0444,
.proc_handler = proc_dointvec,
},
-#if defined(CONFIG_LOCKUP_DETECTOR)
- {
- .procname = "watchdog",
- .data = &watchdog_user_enabled,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_watchdog,
- .extra1 = SYSCTL_ZERO,
- .extra2 = SYSCTL_ONE,
- },
- {
- .procname = "watchdog_thresh",
- .data = &watchdog_thresh,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_watchdog_thresh,
- .extra1 = SYSCTL_ZERO,
- .extra2 = &sixty,
- },
- {
- .procname = "nmi_watchdog",
- .data = &nmi_watchdog_user_enabled,
- .maxlen = sizeof(int),
- .mode = NMI_WATCHDOG_SYSCTL_PERM,
- .proc_handler = proc_nmi_watchdog,
- .extra1 = SYSCTL_ZERO,
- .extra2 = SYSCTL_ONE,
- },
- {
- .procname = "watchdog_cpumask",
- .data = &watchdog_cpumask_bits,
- .maxlen = NR_CPUS,
- .mode = 0644,
- .proc_handler = proc_watchdog_cpumask,
- },
-#ifdef CONFIG_SOFTLOCKUP_DETECTOR
- {
- .procname = "soft_watchdog",
- .data = &soft_watchdog_user_enabled,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_soft_watchdog,
- .extra1 = SYSCTL_ZERO,
- .extra2 = SYSCTL_ONE,
- },
- {
- .procname = "softlockup_panic",
- .data = &softlockup_panic,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = SYSCTL_ZERO,
- .extra2 = SYSCTL_ONE,
- },
-#ifdef CONFIG_SMP
- {
- .procname = "softlockup_all_cpu_backtrace",
- .data = &sysctl_softlockup_all_cpu_backtrace,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = SYSCTL_ZERO,
- .extra2 = SYSCTL_ONE,
- },
-#endif /* CONFIG_SMP */
-#endif
-#ifdef CONFIG_HARDLOCKUP_DETECTOR
- {
- .procname = "hardlockup_panic",
- .data = &hardlockup_panic,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = SYSCTL_ZERO,
- .extra2 = SYSCTL_ONE,
- },
-#ifdef CONFIG_SMP
- {
- .procname = "hardlockup_all_cpu_backtrace",
- .data = &sysctl_hardlockup_all_cpu_backtrace,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = SYSCTL_ZERO,
- .extra2 = SYSCTL_ONE,
- },
-#endif /* CONFIG_SMP */
-#endif
-#endif
-
#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86)
{
.procname = "unknown_nmi_panic",
@@ -2514,60 +2203,6 @@ static struct ctl_table kern_table[] = {
.proc_handler = proc_dointvec,
},
#endif
-#ifdef CONFIG_DETECT_HUNG_TASK
-#ifdef CONFIG_SMP
- {
- .procname = "hung_task_all_cpu_backtrace",
- .data = &sysctl_hung_task_all_cpu_backtrace,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = SYSCTL_ZERO,
- .extra2 = SYSCTL_ONE,
- },
-#endif /* CONFIG_SMP */
- {
- .procname = "hung_task_panic",
- .data = &sysctl_hung_task_panic,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = SYSCTL_ZERO,
- .extra2 = SYSCTL_ONE,
- },
- {
- .procname = "hung_task_check_count",
- .data = &sysctl_hung_task_check_count,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = SYSCTL_ZERO,
- },
- {
- .procname = "hung_task_timeout_secs",
- .data = &sysctl_hung_task_timeout_secs,
- .maxlen = sizeof(unsigned long),
- .mode = 0644,
- .proc_handler = proc_dohung_task_timeout_secs,
- .extra2 = &hung_task_timeout_max,
- },
- {
- .procname = "hung_task_check_interval_secs",
- .data = &sysctl_hung_task_check_interval_secs,
- .maxlen = sizeof(unsigned long),
- .mode = 0644,
- .proc_handler = proc_dohung_task_timeout_secs,
- .extra2 = &hung_task_timeout_max,
- },
- {
- .procname = "hung_task_warnings",
- .data = &sysctl_hung_task_warnings,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = &neg_one,
- },
-#endif
#ifdef CONFIG_RT_MUTEXES
{
.procname = "max_lock_depth",
@@ -2627,7 +2262,7 @@ static struct ctl_table kern_table[] = {
.mode = 0644,
.proc_handler = perf_cpu_time_max_percent_handler,
.extra1 = SYSCTL_ZERO,
- .extra2 = &one_hundred,
+ .extra2 = SYSCTL_ONE_HUNDRED,
},
{
.procname = "perf_event_max_stack",
@@ -2636,7 +2271,7 @@ static struct ctl_table kern_table[] = {
.mode = 0644,
.proc_handler = perf_event_max_stack_handler,
.extra1 = SYSCTL_ZERO,
- .extra2 = &six_hundred_forty_kb,
+ .extra2 = (void *)&six_hundred_forty_kb,
},
{
.procname = "perf_event_max_contexts_per_stack",
@@ -2645,7 +2280,7 @@ static struct ctl_table kern_table[] = {
.mode = 0644,
.proc_handler = perf_event_max_stack_handler,
.extra1 = SYSCTL_ZERO,
- .extra2 = &one_thousand,
+ .extra2 = SYSCTL_ONE_THOUSAND,
},
#endif
{
@@ -2676,7 +2311,7 @@ static struct ctl_table kern_table[] = {
.mode = 0644,
.proc_handler = bpf_unpriv_handler,
.extra1 = SYSCTL_ZERO,
- .extra2 = &two,
+ .extra2 = SYSCTL_TWO,
},
{
.procname = "bpf_stats_enabled",
@@ -2708,17 +2343,6 @@ static struct ctl_table kern_table[] = {
.extra2 = SYSCTL_INT_MAX,
},
#endif
-#ifdef CONFIG_STACKLEAK_RUNTIME_DISABLE
- {
- .procname = "stack_erasing",
- .data = NULL,
- .maxlen = sizeof(int),
- .mode = 0600,
- .proc_handler = stack_erasing_sysctl,
- .extra1 = SYSCTL_ZERO,
- .extra2 = SYSCTL_ONE,
- },
-#endif
{ }
};
@@ -2730,7 +2354,7 @@ static struct ctl_table vm_table[] = {
.mode = 0644,
.proc_handler = overcommit_policy_handler,
.extra1 = SYSCTL_ZERO,
- .extra2 = &two,
+ .extra2 = SYSCTL_TWO,
},
{
.procname = "panic_on_oom",
@@ -2739,7 +2363,7 @@ static struct ctl_table vm_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = SYSCTL_ZERO,
- .extra2 = &two,
+ .extra2 = SYSCTL_TWO,
},
{
.procname = "oom_kill_allocating_task",
@@ -2784,7 +2408,7 @@ static struct ctl_table vm_table[] = {
.mode = 0644,
.proc_handler = dirty_background_ratio_handler,
.extra1 = SYSCTL_ZERO,
- .extra2 = &one_hundred,
+ .extra2 = SYSCTL_ONE_HUNDRED,
},
{
.procname = "dirty_background_bytes",
@@ -2792,7 +2416,7 @@ static struct ctl_table vm_table[] = {
.maxlen = sizeof(dirty_background_bytes),
.mode = 0644,
.proc_handler = dirty_background_bytes_handler,
- .extra1 = &one_ul,
+ .extra1 = SYSCTL_LONG_ONE,
},
{
.procname = "dirty_ratio",
@@ -2801,7 +2425,7 @@ static struct ctl_table vm_table[] = {
.mode = 0644,
.proc_handler = dirty_ratio_handler,
.extra1 = SYSCTL_ZERO,
- .extra2 = &one_hundred,
+ .extra2 = SYSCTL_ONE_HUNDRED,
},
{
.procname = "dirty_bytes",
@@ -2809,7 +2433,7 @@ static struct ctl_table vm_table[] = {
.maxlen = sizeof(vm_dirty_bytes),
.mode = 0644,
.proc_handler = dirty_bytes_handler,
- .extra1 = &dirty_bytes_min,
+ .extra1 = (void *)&dirty_bytes_min,
},
{
.procname = "dirty_writeback_centisecs",
@@ -2841,7 +2465,7 @@ static struct ctl_table vm_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = SYSCTL_ZERO,
- .extra2 = &two_hundred,
+ .extra2 = SYSCTL_TWO_HUNDRED,
},
#ifdef CONFIG_HUGETLB_PAGE
{
@@ -2898,7 +2522,7 @@ static struct ctl_table vm_table[] = {
.mode = 0200,
.proc_handler = drop_caches_sysctl_handler,
.extra1 = SYSCTL_ONE,
- .extra2 = &four,
+ .extra2 = SYSCTL_FOUR,
},
#ifdef CONFIG_COMPACTION
{
@@ -2915,7 +2539,7 @@ static struct ctl_table vm_table[] = {
.mode = 0644,
.proc_handler = compaction_proactiveness_sysctl_handler,
.extra1 = SYSCTL_ZERO,
- .extra2 = &one_hundred,
+ .extra2 = SYSCTL_ONE_HUNDRED,
},
{